Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
assembler_x64.h
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_X64_H_
6#define RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_X64_H_
7
8#if defined(DART_PRECOMPILED_RUNTIME)
9#error "AOT runtime should not use compiler sources (including header files)"
10#endif // defined(DART_PRECOMPILED_RUNTIME)
11
12#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_H_
13#error Do not include assembler_x64.h directly; use assembler.h instead.
14#endif
15
16#include <functional>
17
18#include "platform/assert.h"
19#include "platform/utils.h"
21#include "vm/constants.h"
22#include "vm/constants_x86.h"
23#include "vm/hash_map.h"
24#include "vm/pointer_tagging.h"
25
26namespace dart {
27
28// Forward declarations.
29class FlowGraphCompiler;
30class RegisterSet;
31
32namespace compiler {
33
34class Immediate : public ValueObject {
35 public:
36 explicit Immediate(int64_t value) : value_(value) {}
37
38 Immediate(const Immediate& other) : ValueObject(), value_(other.value_) {}
39
40 int64_t value() const { return value_; }
41
42 bool is_int8() const { return Utils::IsInt(8, value_); }
43 bool is_uint8() const { return Utils::IsUint(8, value_); }
44 bool is_int16() const { return Utils::IsInt(16, value_); }
45 bool is_uint16() const { return Utils::IsUint(16, value_); }
46 bool is_int32() const { return Utils::IsInt(32, value_); }
47 bool is_uint32() const { return Utils::IsUint(32, value_); }
48
49 private:
50 const int64_t value_;
51
52 // TODO(5411081): Add DISALLOW_COPY_AND_ASSIGN(Immediate) once the mac
53 // build issue is resolved.
54 // And remove the unnecessary copy constructor.
55};
56
57class Operand : public ValueObject {
58 public:
59 uint8_t rex() const { return rex_; }
60
61 uint8_t mod() const { return (encoding_at(0) >> 6) & 3; }
62
63 Register rm() const {
64 int rm_rex = (rex_ & REX_B) << 3;
65 return static_cast<Register>(rm_rex + (encoding_at(0) & 7));
66 }
67
69 return static_cast<ScaleFactor>((encoding_at(1) >> 6) & 3);
70 }
71
72 Register index() const {
73 int index_rex = (rex_ & REX_X) << 2;
74 return static_cast<Register>(index_rex + ((encoding_at(1) >> 3) & 7));
75 }
76
77 Register base() const {
78 int base_rex = (rex_ & REX_B) << 3;
79 return static_cast<Register>(base_rex + (encoding_at(1) & 7));
80 }
81
82 int8_t disp8() const {
83 ASSERT(length_ >= 2);
84 return static_cast<int8_t>(encoding_[length_ - 1]);
85 }
86
87 int32_t disp32() const {
88 ASSERT(length_ >= 5);
89 return bit_copy<int32_t>(encoding_[length_ - 4]);
90 }
91
92 Operand(const Operand& other)
93 : ValueObject(), length_(other.length_), rex_(other.rex_) {
94 memmove(&encoding_[0], &other.encoding_[0], other.length_);
95 }
96
97 Operand& operator=(const Operand& other) {
98 length_ = other.length_;
99 rex_ = other.rex_;
100 memmove(&encoding_[0], &other.encoding_[0], other.length_);
101 return *this;
102 }
103
104 bool Equals(const Operand& other) const {
105 if (length_ != other.length_) return false;
106 if (rex_ != other.rex_) return false;
107 for (uint8_t i = 0; i < length_; i++) {
108 if (encoding_[i] != other.encoding_[i]) return false;
109 }
110 return true;
111 }
112
113 protected:
114 Operand() : length_(0), rex_(REX_NONE) {} // Needed by subclass Address.
115
116 void SetModRM(int mod, Register rm) {
117 ASSERT((mod & ~3) == 0);
118 if ((rm > 7) && !((rm == R12) && (mod != 3))) {
119 rex_ |= REX_B;
120 }
121 encoding_[0] = (mod << 6) | (rm & 7);
122 length_ = 1;
123 }
124
126 ASSERT(length_ == 1);
127 ASSERT((scale & ~3) == 0);
128 if (base > 7) {
129 ASSERT((rex_ & REX_B) == 0); // Must not have REX.B already set.
130 rex_ |= REX_B;
131 }
132 if (index > 7) rex_ |= REX_X;
133 encoding_[1] = (scale << 6) | ((index & 7) << 3) | (base & 7);
134 length_ = 2;
135 }
136
137 void SetDisp8(int8_t disp) {
138 ASSERT(length_ == 1 || length_ == 2);
139 encoding_[length_++] = static_cast<uint8_t>(disp);
140 }
141
142 void SetDisp32(int32_t disp) {
143 ASSERT(length_ == 1 || length_ == 2);
144 memmove(&encoding_[length_], &disp, sizeof(disp));
145 length_ += sizeof(disp);
146 }
147
148 private:
149 uint8_t length_;
150 uint8_t rex_;
151 uint8_t encoding_[6];
152
153 explicit Operand(Register reg) : rex_(REX_NONE) { SetModRM(3, reg); }
154
155 // Get the operand encoding byte at the given index.
156 uint8_t encoding_at(intptr_t index) const {
157 ASSERT(index >= 0 && index < length_);
158 return encoding_[index];
159 }
160
161 // Returns whether or not this operand is really the given register in
162 // disguise. Used from the assembler to generate better encodings.
163 bool IsRegister(Register reg) const {
164 return ((reg > 7 ? 1 : 0) == (rex_ & REX_B)) // REX.B match.
165 && ((encoding_at(0) & 0xF8) == 0xC0) // Addressing mode is register.
166 && ((encoding_at(0) & 0x07) == reg); // Register codes match.
167 }
168
169 friend class Assembler;
170};
171
172class Address : public Operand {
173 public:
174 Address(Register base, int32_t disp) {
175 if ((disp == 0) && ((base & 7) != RBP)) {
176 SetModRM(0, base);
177 if ((base & 7) == RSP) {
179 }
180 } else if (Utils::IsInt(8, disp)) {
181 SetModRM(1, base);
182 if ((base & 7) == RSP) {
184 }
185 SetDisp8(disp);
186 } else {
187 SetModRM(2, base);
188 if ((base & 7) == RSP) {
190 }
191 SetDisp32(disp);
192 }
193 }
194
195 // This addressing mode does not exist.
197
199 ASSERT(index != RSP); // Illegal addressing mode.
200 ASSERT(scale != TIMES_16); // Unsupported scale factor.
201 SetModRM(0, RSP);
203 SetDisp32(disp);
204 }
205
206 // This addressing mode does not exist.
208
210 ASSERT(index != RSP); // Illegal addressing mode.
211 ASSERT(scale != TIMES_16); // Unsupported scale factor.
212 if ((disp == 0) && ((base & 7) != RBP)) {
213 SetModRM(0, RSP);
215 } else if (Utils::IsInt(8, disp)) {
216 SetModRM(1, RSP);
218 SetDisp8(disp);
219 } else {
220 SetModRM(2, RSP);
222 SetDisp32(disp);
223 }
224 }
225
226 // This addressing mode does not exist.
228
229 Address(const Address& other) : Operand(other) {}
230
231 Address& operator=(const Address& other) {
232 Operand::operator=(other);
233 return *this;
234 }
235
236 static Address AddressRIPRelative(int32_t disp) {
237 return Address(RIPRelativeDisp(disp));
238 }
239 static Address AddressBaseImm32(Register base, int32_t disp) {
240 return Address(base, disp, true);
241 }
242
243 // This addressing mode does not exist.
245
246 private:
247 Address(Register base, int32_t disp, bool fixed) {
248 ASSERT(fixed);
249 SetModRM(2, base);
250 if ((base & 7) == RSP) {
252 }
253 SetDisp32(disp);
254 }
255
256 struct RIPRelativeDisp {
257 explicit RIPRelativeDisp(int32_t disp) : disp_(disp) {}
258 const int32_t disp_;
259 };
260
261 explicit Address(const RIPRelativeDisp& disp) {
262 SetModRM(0, static_cast<Register>(0x5));
263 SetDisp32(disp.disp_);
264 }
265};
266
267class FieldAddress : public Address {
268 public:
270 : Address(base, disp - kHeapObjectTag) {}
271
272 // This addressing mode does not exist.
274
277
278 // This addressing mode does not exist.
280
281 FieldAddress(const FieldAddress& other) : Address(other) {}
282
284 Address::operator=(other);
285 return *this;
286 }
287};
288
289#if !defined(DART_COMPRESSED_POINTERS)
290#define OBJ(op) op##q
291#else
292#define OBJ(op) op##l
293#endif
294
295class Assembler : public AssemblerBase {
296 public:
298 intptr_t far_branch_level = 0);
299
301
302 /*
303 * Emit Machine Instructions.
304 */
305 void call(Register reg) { EmitUnaryL(reg, 0xFF, 2); }
306 void call(const Address& address) { EmitUnaryL(address, 0xFF, 2); }
307 void call(Label* label);
308 void call(const ExternalLabel* label);
309
310 void pushq(Register reg);
311 void pushq(const Address& address) { EmitUnaryL(address, 0xFF, 6); }
312 void pushq(const Immediate& imm);
313 void PushImmediate(const Immediate& imm) { pushq(imm); }
314 void PushImmediate(int64_t value) { PushImmediate(Immediate(value)); }
315
316 void popq(Register reg);
317 void popq(const Address& address) { EmitUnaryL(address, 0x8F, 0); }
318
319 void setcc(Condition condition, ByteRegister dst);
320
322 void ExitFullSafepoint(bool ignore_unwind_in_progress);
323 void TransitionGeneratedToNative(Register destination_address,
324 Register new_exit_frame,
325 Register new_exit_through_ffi,
326 bool enter_safepoint);
327 void TransitionNativeToGenerated(bool leave_safepoint,
328 bool ignore_unwind_in_progress = false);
329
330// Register-register, register-address and address-register instructions.
331#define RR(width, name, ...) \
332 void name(Register dst, Register src) { Emit##width(dst, src, __VA_ARGS__); }
333#define RA(width, name, ...) \
334 void name(Register dst, const Address& src) { \
335 Emit##width(dst, src, __VA_ARGS__); \
336 }
337#define RAB(name, ...) \
338 void name(ByteRegister dst, const Address& src) { \
339 EmitB(dst, src, __VA_ARGS__); \
340 }
341#define AR(width, name, ...) \
342 void name(const Address& dst, Register src) { \
343 Emit##width(src, dst, __VA_ARGS__); \
344 }
345#define ARB(name, ...) \
346 void name(const Address& dst, ByteRegister src) { \
347 EmitB(src, dst, __VA_ARGS__); \
348 }
349#define REGULAR_INSTRUCTION(name, ...) \
350 RA(W, name##w, __VA_ARGS__) \
351 RA(L, name##l, __VA_ARGS__) \
352 RA(Q, name##q, __VA_ARGS__) \
353 RR(W, name##w, __VA_ARGS__) \
354 RR(L, name##l, __VA_ARGS__) \
355 RR(Q, name##q, __VA_ARGS__)
357 REGULAR_INSTRUCTION(xchg, 0x87)
358 REGULAR_INSTRUCTION(imul, 0xAF, 0x0F)
359 REGULAR_INSTRUCTION(bsf, 0xBC, 0x0F)
360 REGULAR_INSTRUCTION(bsr, 0xBD, 0x0F)
361 REGULAR_INSTRUCTION(popcnt, 0xB8, 0x0F, 0xF3)
362 REGULAR_INSTRUCTION(lzcnt, 0xBD, 0x0F, 0xF3)
363#undef REGULAR_INSTRUCTION
364 RA(Q, movsxd, 0x63)
365 RR(Q, movsxd, 0x63)
366 ARB(movb, 0x88)
367 AR(L, movl, 0x89)
368 AR(Q, movq, 0x89)
369 AR(W, movw, 0x89)
370 RAB(movb, 0x8A)
371 RA(L, movl, 0x8B)
372 RA(Q, movq, 0x8B)
373 RR(L, movl, 0x8B)
374 RA(Q, leaq, 0x8D)
375 RA(L, leal, 0x8D)
376 AR(L, cmpxchgl, 0xB1, 0x0F)
377 AR(Q, cmpxchgq, 0xB1, 0x0F)
378 RA(L, cmpxchgl, 0xB1, 0x0F)
379 RA(Q, cmpxchgq, 0xB1, 0x0F)
380 RR(L, cmpxchgl, 0xB1, 0x0F)
381 RR(Q, cmpxchgq, 0xB1, 0x0F)
382 RA(Q, movzxb, 0xB6, 0x0F)
383 RR(Q, movzxb, 0xB6, 0x0F)
384 RA(Q, movzxw, 0xB7, 0x0F)
385 RR(Q, movzxw, 0xB7, 0x0F)
386 RA(Q, movsxb, 0xBE, 0x0F)
387 RR(Q, movsxb, 0xBE, 0x0F)
388 RA(Q, movsxw, 0xBF, 0x0F)
389 RR(Q, movsxw, 0xBF, 0x0F)
390#define DECLARE_CMOV(name, code) \
391 RR(Q, cmov##name##q, 0x40 + code, 0x0F) \
392 RR(L, cmov##name##l, 0x40 + code, 0x0F) \
393 RA(Q, cmov##name##q, 0x40 + code, 0x0F) \
394 RA(L, cmov##name##l, 0x40 + code, 0x0F)
396#undef DECLARE_CMOV
397#undef AA
398#undef RA
399#undef AR
400
401#define SIMPLE(name, ...) \
402 void name() { EmitSimple(__VA_ARGS__); }
403 SIMPLE(cpuid, 0x0F, 0xA2)
404 SIMPLE(fcos, 0xD9, 0xFF)
405 SIMPLE(fincstp, 0xD9, 0xF7)
406 SIMPLE(fsin, 0xD9, 0xFE)
407 SIMPLE(lock, 0xF0)
408 SIMPLE(rep_movsb, 0xF3, 0xA4)
409 SIMPLE(rep_movsw, 0xF3, 0x66, 0xA5)
410 SIMPLE(rep_movsd, 0xF3, 0xA5)
411 SIMPLE(rep_movsq, 0xF3, 0x48, 0xA5)
412#undef SIMPLE
413// XmmRegister operations with another register or an address.
414#define XX(width, name, ...) \
415 void name(XmmRegister dst, XmmRegister src) { \
416 Emit##width(dst, src, __VA_ARGS__); \
417 }
418#define XA(width, name, ...) \
419 void name(XmmRegister dst, const Address& src) { \
420 Emit##width(dst, src, __VA_ARGS__); \
421 }
422#define AX(width, name, ...) \
423 void name(const Address& dst, XmmRegister src) { \
424 Emit##width(src, dst, __VA_ARGS__); \
425 }
426 // We could add movupd here, but movups does the same and is shorter.
427 XA(L, movups, 0x10, 0x0F);
428 XA(L, movsd, 0x10, 0x0F, 0xF2)
429 XA(L, movss, 0x10, 0x0F, 0xF3)
430 AX(L, movups, 0x11, 0x0F);
431 AX(L, movsd, 0x11, 0x0F, 0xF2)
432 AX(L, movss, 0x11, 0x0F, 0xF3)
433 XX(L, movhlps, 0x12, 0x0F)
434 XX(L, unpcklps, 0x14, 0x0F)
435 XX(L, unpcklpd, 0x14, 0x0F, 0x66)
436 XX(L, unpckhps, 0x15, 0x0F)
437 XX(L, unpckhpd, 0x15, 0x0F, 0x66)
438 XX(L, movlhps, 0x16, 0x0F)
439 XX(L, movaps, 0x28, 0x0F)
440 XX(L, comisd, 0x2F, 0x0F, 0x66)
441#define DECLARE_XMM(name, code) \
442 XX(L, name##ps, 0x50 + code, 0x0F) \
443 XA(L, name##ps, 0x50 + code, 0x0F) \
444 AX(L, name##ps, 0x50 + code, 0x0F) \
445 XX(L, name##pd, 0x50 + code, 0x0F, 0x66) \
446 XA(L, name##pd, 0x50 + code, 0x0F, 0x66) \
447 AX(L, name##pd, 0x50 + code, 0x0F, 0x66) \
448 XX(L, name##sd, 0x50 + code, 0x0F, 0xF2) \
449 XA(L, name##sd, 0x50 + code, 0x0F, 0xF2) \
450 AX(L, name##sd, 0x50 + code, 0x0F, 0xF2) \
451 XX(L, name##ss, 0x50 + code, 0x0F, 0xF3) \
452 XA(L, name##ss, 0x50 + code, 0x0F, 0xF3) \
453 AX(L, name##ss, 0x50 + code, 0x0F, 0xF3)
455#undef DECLARE_XMM
456 XX(L, cvtps2pd, 0x5A, 0x0F)
457 XX(L, cvtpd2ps, 0x5A, 0x0F, 0x66)
458 XX(L, cvtsd2ss, 0x5A, 0x0F, 0xF2)
459 XX(L, cvtss2sd, 0x5A, 0x0F, 0xF3)
460 XX(L, pxor, 0xEF, 0x0F, 0x66)
461 XX(L, subpl, 0xFA, 0x0F, 0x66)
462 XX(L, addpl, 0xFE, 0x0F, 0x66)
463#undef XX
464#undef AX
465#undef XA
466
467#define DECLARE_CMPPS(name, code) \
468 void cmpps##name(XmmRegister dst, XmmRegister src) { \
469 EmitL(dst, src, 0xC2, 0x0F); \
470 AssemblerBuffer::EnsureCapacity ensured(&buffer_); \
471 EmitUint8(code); \
472 }
474#undef DECLARE_CMPPS
475
476#define DECLARE_SIMPLE(name, opcode) \
477 void name() { EmitSimple(opcode); }
479#undef DECLARE_SIMPLE
480
481 void movl(Register dst, const Immediate& imm);
482 void movl(const Address& dst, const Immediate& imm);
483
484 void movb(const Address& dst, const Immediate& imm);
485
486 void movw(Register dst, const Address& src);
487 void movw(const Address& dst, const Immediate& imm);
488
489 void movq(Register dst, const Immediate& imm);
490 void movq(const Address& dst, const Immediate& imm);
491
492 // Destination and source are reversed for some reason.
493 void movq(Register dst, XmmRegister src) {
494 EmitQ(src, dst, 0x7E, 0x0F, 0x66);
495 }
496 void movl(Register dst, XmmRegister src) {
497 EmitL(src, dst, 0x7E, 0x0F, 0x66);
498 }
500 EmitL(src, dst, 0x11, 0x0F, 0xF3);
501 }
503 EmitL(src, dst, 0x11, 0x0F, 0xF2);
504 }
505
506 // Use the reversed operand order and the 0x89 bytecode instead of the
507 // obvious 0x88 encoding for this some, because it is expected by gdb64 older
508 // than 7.3.1-gg5 when disassembling a function's prologue (movq rbp, rsp)
509 // for proper unwinding of Dart frames (use --generate_gdb_symbols and -O0).
510 void movq(Register dst, Register src) { EmitQ(src, dst, 0x89); }
511
512 void movq(XmmRegister dst, Register src) {
513 EmitQ(dst, src, 0x6E, 0x0F, 0x66);
514 }
515
516 void movd(XmmRegister dst, Register src) {
517 EmitL(dst, src, 0x6E, 0x0F, 0x66);
518 }
520 EmitQ(dst, src, 0x2A, 0x0F, 0xF2);
521 }
523 EmitL(dst, src, 0x2A, 0x0F, 0xF2);
524 }
526 EmitQ(dst, src, 0x2C, 0x0F, 0xF2);
527 }
529 EmitL(dst, src, 0x2C, 0x0F, 0xF2);
530 }
532 EmitL(dst, src, 0x50, 0x0F, 0x66);
533 }
534 void movmskps(Register dst, XmmRegister src) { EmitL(dst, src, 0x50, 0x0F); }
536 EmitL(dst, src, 0xD7, 0x0F, 0x66);
537 }
538
539 void btl(Register dst, Register src) { EmitL(src, dst, 0xA3, 0x0F); }
540 void btq(Register dst, Register src) { EmitQ(src, dst, 0xA3, 0x0F); }
541
546
547 void set1ps(XmmRegister dst, Register tmp, const Immediate& imm);
548 void shufps(XmmRegister dst, XmmRegister src, const Immediate& mask);
549
552 void shufpd(XmmRegister dst, XmmRegister src, const Immediate& mask);
553
555 kRoundToNearest = 0x0,
556 kRoundDown = 0x1,
557 kRoundUp = 0x2,
558 kRoundToZero = 0x3
559 };
561
563 const Immediate& imm,
565 void CompareImmediate(const Address& address,
566 const Immediate& imm,
569 int64_t immediate,
570 OperandSize width = kEightBytes) override {
571 return CompareImmediate(reg, Immediate(immediate), width);
572 }
573
574 void testl(Register reg, const Immediate& imm) {
575 testq(reg, Immediate(imm.value() & 0xFFFFFFFF));
576 }
577 void testb(const Address& address, const Immediate& imm);
578 void testb(const Address& address, Register reg);
579
580 void testq(Register reg, const Immediate& imm);
582 const Immediate& imm,
584
585 void AndImmediate(Register dst, const Immediate& imm);
586 void AndImmediate(Register dst, int64_t value) override {
588 }
589 void AndImmediate(Register dst, Register src, int64_t value) {
590 MoveRegister(dst, src);
591 AndImmediate(dst, value);
592 }
594 Register src1,
595 Register src2 = kNoRegister) override;
596 void OrImmediate(Register dst, const Immediate& imm);
597 void OrImmediate(Register dst, int64_t value) {
599 }
600 void XorImmediate(Register dst, const Immediate& imm);
601 void LslImmediate(Register dst, int32_t shift) {
602 shlq(dst, Immediate(shift));
603 }
604 void LslRegister(Register dst, Register shift) override;
605 void LsrImmediate(Register dst, int32_t shift) override {
606 shrq(dst, Immediate(shift));
607 }
608
609 void shldq(Register dst, Register src, Register shifter) {
610 ASSERT(shifter == RCX);
611 EmitQ(src, dst, 0xA5, 0x0F);
612 }
613 void shrdq(Register dst, Register src, Register shifter) {
614 ASSERT(shifter == RCX);
615 EmitQ(src, dst, 0xAD, 0x0F);
616 }
617
618#define DECLARE_ALU(op, c) \
619 void op##w(Register dst, Register src) { EmitW(dst, src, c * 8 + 3); } \
620 void op##l(Register dst, Register src) { EmitL(dst, src, c * 8 + 3); } \
621 void op##q(Register dst, Register src) { EmitQ(dst, src, c * 8 + 3); } \
622 void op##w(Register dst, const Address& src) { EmitW(dst, src, c * 8 + 3); } \
623 void op##l(Register dst, const Address& src) { EmitL(dst, src, c * 8 + 3); } \
624 void op##q(Register dst, const Address& src) { EmitQ(dst, src, c * 8 + 3); } \
625 void op##w(const Address& dst, Register src) { EmitW(src, dst, c * 8 + 1); } \
626 void op##l(const Address& dst, Register src) { EmitL(src, dst, c * 8 + 1); } \
627 void op##q(const Address& dst, Register src) { EmitQ(src, dst, c * 8 + 1); } \
628 void op##l(Register dst, const Immediate& imm) { AluL(c, dst, imm); } \
629 void op##q(Register dst, const Immediate& imm) { \
630 AluQ(c, c * 8 + 3, dst, imm); \
631 } \
632 void op##b(const Address& dst, const Immediate& imm) { AluB(c, dst, imm); } \
633 void op##w(const Address& dst, const Immediate& imm) { AluW(c, dst, imm); } \
634 void op##l(const Address& dst, const Immediate& imm) { AluL(c, dst, imm); } \
635 void op##q(const Address& dst, const Immediate& imm) { \
636 AluQ(c, c * 8 + 3, dst, imm); \
637 }
638
640
641#undef DECLARE_ALU
642#undef ALU_OPS
643
644 void cqo();
645
646#define REGULAR_UNARY(name, opcode, modrm) \
647 void name##q(Register reg) { EmitUnaryQ(reg, opcode, modrm); } \
648 void name##l(Register reg) { EmitUnaryL(reg, opcode, modrm); } \
649 void name##q(const Address& address) { EmitUnaryQ(address, opcode, modrm); } \
650 void name##l(const Address& address) { EmitUnaryL(address, opcode, modrm); }
651 REGULAR_UNARY(not, 0xF7, 2)
652 REGULAR_UNARY(neg, 0xF7, 3)
653 REGULAR_UNARY(mul, 0xF7, 4)
654 REGULAR_UNARY(imul, 0xF7, 5)
655 REGULAR_UNARY(div, 0xF7, 6)
656 REGULAR_UNARY(idiv, 0xF7, 7)
657 REGULAR_UNARY(inc, 0xFF, 0)
658 REGULAR_UNARY(dec, 0xFF, 1)
659#undef REGULAR_UNARY
660
661 void imull(Register reg, const Immediate& imm);
662
663 void imulq(Register dst, const Immediate& imm);
665 const Immediate& imm,
668 int64_t imm,
669 OperandSize width = kEightBytes) override {
670 MulImmediate(reg, Immediate(imm), width);
671 }
672
673 void shll(Register reg, const Immediate& imm);
674 void shll(Register operand, Register shifter);
675 void shrl(Register reg, const Immediate& imm);
676 void shrl(Register operand, Register shifter);
677 void sarl(Register reg, const Immediate& imm);
678 void sarl(Register operand, Register shifter);
679 void shldl(Register dst, Register src, const Immediate& imm);
680
681 void shlq(Register reg, const Immediate& imm);
682 void shlq(Register operand, Register shifter);
683 void shrq(Register reg, const Immediate& imm);
684 void shrq(Register operand, Register shifter);
685 void sarq(Register reg, const Immediate& imm);
686 void sarq(Register operand, Register shifter);
687 void shldq(Register dst, Register src, const Immediate& imm);
688
689 void btq(Register base, int bit);
690
691 void enter(const Immediate& imm);
692
693 void fldl(const Address& src);
694 void fstpl(const Address& dst);
695
696 void ffree(intptr_t value);
697
698 // 'size' indicates size in bytes and must be in the range 1..8.
699 void nop(int size = 1);
700
701 void j(Condition condition, Label* label, JumpDistance distance = kFarJump);
702 void jmp(Register reg) { EmitUnaryL(reg, 0xFF, 4); }
703 void jmp(const Address& address) { EmitUnaryL(address, 0xFF, 4); }
704 void jmp(Label* label, JumpDistance distance = kFarJump);
705 void jmp(const ExternalLabel* label);
706 void jmp(const Code& code);
707
708 // Issue memory to memory move through a TMP register.
709 // TODO(koda): Assert that these are not used for heap objects.
710 void MoveMemoryToMemory(const Address& dst, const Address& src) {
711 movq(TMP, src);
712 movq(dst, TMP);
713 }
714
715 void Exchange(Register reg, const Address& mem) {
716 movq(TMP, mem);
717 movq(mem, reg);
718 movq(reg, TMP);
719 }
720
721 void Exchange(const Address& mem1, const Address& mem2) {
722 movq(TMP, mem1);
723 xorq(TMP, mem2);
724 xorq(mem1, TMP);
725 xorq(mem2, TMP);
726 }
727
728 // Methods for High-level operations and implemented on all architectures.
729 void Ret() { ret(); }
730
731 // Sets the return address to [value] as if there was a call.
732 // On X64 pushes [value].
734
737 void BranchIf(Condition condition,
738 Label* label,
739 JumpDistance distance = kFarJump) {
740 j(condition, label, distance);
741 }
743 Label* label,
744 JumpDistance distance = kFarJump) {
745 cmpq(src, Immediate(0));
746 j(ZERO, label, distance);
747 }
749 intptr_t bit_number,
750 Condition condition,
751 Label* label,
752 JumpDistance distance = kFarJump) {
753 testq(rn, Immediate(1 << bit_number));
754 j(condition, label, distance);
755 }
756
757 void ExtendValue(Register dst, Register src, OperandSize sz) override;
760
762 PushRegister(r1);
763 PushRegister(r0);
764 }
766 PopRegister(r0);
767 PopRegister(r1);
768 }
769
773
774 // Methods for adding/subtracting an immediate value that may be loaded from
775 // the constant pool.
776 // TODO(koda): Assert that these are not used for heap objects.
778 const Immediate& imm,
781 int64_t value,
784 }
785 void AddRegisters(Register dest, Register src) { addq(dest, src); }
786 // [dest] = [src] << [scale] + [value].
788 Register src,
790 int32_t value) {
791 leaq(dest, Address(src, scale, value));
792 }
793 void AddImmediate(Register dest, Register src, int64_t value);
794 void AddImmediate(const Address& address, const Immediate& imm);
796 const Immediate& imm,
798 void SubImmediate(const Address& address, const Immediate& imm);
799 void SubRegisters(Register dest, Register src) { subq(dest, src); }
800
801 void Drop(intptr_t stack_elements, Register tmp = TMP);
802
803 bool constant_pool_allowed() const { return constant_pool_allowed_; }
804 void set_constant_pool_allowed(bool b) { constant_pool_allowed_ = b; }
805
806 // Unlike movq this can affect the flags or use the constant pool.
807 void LoadImmediate(Register reg, const Immediate& imm);
808 void LoadImmediate(Register reg, int64_t immediate) override {
809 LoadImmediate(reg, Immediate(immediate));
810 }
811 void LoadSImmediate(FpuRegister dst, float immediate);
812 void LoadDImmediate(FpuRegister dst, double immediate);
814
818 void LoadObject(Register dst, const Object& obj);
820 Register dst,
821 const Object& obj,
825 const ExternalLabel* label,
827 void JmpPatchable(const Code& code, Register pp);
828 void Jmp(const Code& code, Register pp = PP);
829 void J(Condition condition, const Code& code, Register pp);
831 const Code& code,
835 void Call(const Code& stub_entry,
838
839 // Emit a call that shares its object pool entries with other calls
840 // that have the same equivalence marker.
841 void CallWithEquivalence(const Code& code,
842 const Object& equivalence,
844
845 void Call(Address target) { call(target); }
846
847 // Unaware of write barrier (use StoreInto* methods for storing to objects).
848 // TODO(koda): Add StackAddress/HeapAddress types to prevent misuse.
849 void StoreObject(const Address& dst,
850 const Object& obj,
851 OperandSize size = kWordBytes);
852 void PushObject(const Object& object);
853 void CompareObject(Register reg, const Object& object);
854
855#if defined(DART_COMPRESSED_POINTERS)
856 void LoadCompressed(Register dest, const Address& slot) override;
857#endif
858 void StoreBarrier(Register object, // Object we are storing into.
859 Register value, // Value we are storing.
860 CanBeSmi can_be_smi,
861 Register scratch) override;
862 void ArrayStoreBarrier(Register object, // Object we are storing into.
863 Register slot, // Slot into which we are storing.
864 Register value, // Value we are storing.
865 CanBeSmi can_be_smi,
866 Register scratch) override;
868
870 Register object,
871 const Address& dest,
872 const Object& value,
873 MemoryOrder memory_order = kRelaxedNonAtomic,
874 OperandSize size = kWordBytes) override;
875
876 // Stores a non-tagged value into a heap object.
878 const Address& dest,
879 Register value);
880
881 // Stores a Smi value into a heap object field that always contains a Smi.
882 void StoreIntoSmiField(const Address& dest, Register value);
883 void ZeroInitSmiField(const Address& dest);
885 // Increments a Smi field. Leaves flags in same state as an 'addq'.
886 void IncrementCompressedSmiField(const Address& dest, int64_t increment);
887
890
891 void LockCmpxchgq(const Address& address, Register reg) {
892 lock();
893 cmpxchgq(address, reg);
894 }
895
896 void LockCmpxchgl(const Address& address, Register reg) {
897 lock();
898 cmpxchgl(address, reg);
899 }
900
901 void PushRegisters(const RegisterSet& registers);
902 void PopRegisters(const RegisterSet& registers);
903
904 void PushRegistersInOrder(std::initializer_list<Register> regs);
905
907
908 void EnterFrame(intptr_t frame_space);
910 void ReserveAlignedFrameSpace(intptr_t frame_space);
911
912 // In debug mode, generates code to verify that:
913 // FP + kExitLinkSlotFromFp == SP
914 //
915 // Triggers breakpoint otherwise.
916 // Clobbers RAX.
918
919 // For non-leaf runtime calls. For leaf runtime calls, use LeafRuntimeScope,
920 void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count);
921
922 // Call runtime function. Reserves shadow space on the stack before calling
923 // if platform ABI requires that.
924 void CallCFunction(Register reg, bool restore_rsp = false);
925 void CallCFunction(Address address, bool restore_rsp = false);
926
929
931 Register temp,
932 intptr_t low,
933 intptr_t high,
934 RangeCheckCondition condition,
935 Label* target) override;
936
937 // Loading and comparing classes of objects.
940
942 intptr_t class_id,
943 Register scratch = kNoRegister);
944
947
949 Register src,
950 Register scratch,
951 bool can_be_null = false) override;
952
953#if defined(DART_COMPRESSED_POINTERS)
954 void ExtendNonNegativeSmi(Register dst) override {
955 // Zero-extends and is a smaller instruction to output than sign
956 // extension (movsxd).
957 orl(dst, dst);
958 }
959#endif
960
961 // CheckClassIs fused with optimistic SmiUntag.
962 // Value in the register object is untagged optimistically.
963 void SmiUntagOrCheckClass(Register object, intptr_t class_id, Label* smi);
964
965 // Misc. functionality.
966 void SmiTag(Register reg) override { OBJ(add)(reg, reg); }
967
968 void SmiUntag(Register reg) { OBJ(sar)(reg, Immediate(kSmiTagSize)); }
969 void SmiUntag(Register dst, Register src) {
970 if (dst != src) {
971 OBJ(mov)(dst, src);
972 }
973 OBJ(sar)(dst, Immediate(kSmiTagSize));
974 }
975
977#if !defined(DART_COMPRESSED_POINTERS)
979#else
980 // This is shorter than
981 // shlq reg, 32
982 // sraq reg, 33
984 movsxd(reg, reg);
985#endif
986 }
987
989#if !defined(DART_COMPRESSED_POINTERS)
990 if (dst != src) {
991 movq(dst, src);
992 }
994#else
995 movsxd(dst, src);
997#endif
998 }
999
1000 // Truncates upper bits.
1002
1004
1006 Label* label,
1007 JumpDistance distance = kFarJump) {
1009 j(NOT_ZERO, label, distance);
1010 }
1011
1013 Label* label,
1014 JumpDistance distance = kFarJump) override {
1016 j(ZERO, label, distance);
1017 }
1018
1019 void ArithmeticShiftRightImmediate(Register reg, intptr_t shift) override;
1021 Register reg2,
1022 intptr_t offset,
1024 Register temp,
1025 Label* equals) override;
1026
1027 void Align(int alignment, intptr_t offset);
1028 void Bind(Label* label) override;
1029 // Unconditional jump to a given label.
1030 void Jump(Label* label, JumpDistance distance = kFarJump) {
1031 jmp(label, distance);
1032 }
1033 // Unconditional jump to a given address in register.
1034 void Jump(Register target) { jmp(target); }
1035 // Unconditional jump to a given address in memory.
1036 void Jump(const Address& address) { jmp(address); }
1037
1038 // Arch-specific LoadFromOffset to choose the right operation for [sz].
1039 void Load(Register dst,
1040 const Address& address,
1041 OperandSize sz = kEightBytes) override;
1043 Register base,
1044 int32_t payload_offset,
1045 Register index,
1047 OperandSize sz = kEightBytes) override {
1048 Load(dst, FieldAddress(base, index, scale, payload_offset), sz);
1049 }
1050#if defined(DART_COMPRESSED_POINTERS)
1052 Register base,
1053 int32_t offset,
1054 Register index) override {
1057 }
1058#endif
1059 void Store(Register src,
1060 const Address& address,
1061 OperandSize sz = kEightBytes) override;
1062 void StoreZero(const Address& address, Register temp = kNoRegister) {
1063 movq(address, Immediate(0));
1064 }
1065 void LoadFromStack(Register dst, intptr_t depth);
1066 void StoreToStack(Register src, intptr_t depth);
1067 void CompareToStack(Register src, intptr_t depth);
1069 movq(dst, Address(base, offset));
1070 }
1072 OBJ(mov)(dst, Address(base, offset));
1073 }
1075 movq(Address(base, offset), src);
1076 }
1077
1079 movups(dst, Address(base, offset));
1080 }
1082 movups(Address(base, offset), dst);
1083 }
1085 if (src != dst) {
1086 movaps(dst, src);
1087 }
1088 }
1089
1091 movss(dst, Address(base, offset));
1092 }
1094 movsd(dst, Address(base, offset));
1095 }
1097 movsd(Address(base, offset), src);
1098 }
1100 if (src != dst) {
1101 movaps(dst, src);
1102 }
1103 }
1104
1105#if defined(TARGET_USES_THREAD_SANITIZER)
1106 void TsanLoadAcquire(Address addr);
1107 void TsanStoreRelease(Address addr);
1108#endif
1109
1111 const Address& address,
1112 OperandSize size = kEightBytes) override {
1113 // On intel loads have load-acquire behavior (i.e. loads are not re-ordered
1114 // with other loads).
1115 Load(dst, address, size);
1116#if defined(TARGET_USES_THREAD_SANITIZER)
1117 TsanLoadAcquire(address);
1118#endif
1119 }
1120#if defined(DART_COMPRESSED_POINTERS)
1121 void LoadAcquireCompressed(Register dst, const Address& address) override {
1122 // On intel loads have load-acquire behavior (i.e. loads are not re-ordered
1123 // with other loads).
1124 LoadCompressed(dst, address);
1125#if defined(TARGET_USES_THREAD_SANITIZER)
1126 TsanLoadAcquire(address);
1127#endif
1128 }
1129#endif
1131 const Address& address,
1132 OperandSize size = kWordBytes) override {
1133 // On intel stores have store-release behavior (i.e. stores are not
1134 // re-ordered with other stores).
1135 Store(src, address, size);
1136#if defined(TARGET_USES_THREAD_SANITIZER)
1137 TsanStoreRelease(address);
1138#endif
1139 }
1140
1142 Address address,
1143 OperandSize size = kEightBytes) override {
1144 ASSERT(size == kEightBytes || size == kFourBytes);
1145 if (size == kFourBytes) {
1146 cmpl(value, address);
1147 } else {
1148 cmpq(value, address);
1149 }
1150 }
1151
1154
1155 // Set up a Dart frame on entry with a frame pointer and PC information to
1156 // enable easy access to the RawInstruction object of code corresponding
1157 // to this frame.
1158 // The dart frame layout is as follows:
1159 // ....
1160 // locals space <=== RSP
1161 // saved PP
1162 // code object (used to derive the RawInstruction Object of the dart code)
1163 // saved RBP <=== RBP
1164 // ret PC
1165 // .....
1166 // This code sets this up with the sequence:
1167 // pushq rbp
1168 // movq rbp, rsp
1169 // call L
1170 // L: <code to adjust saved pc if there is any intrinsification code>
1171 // ...
1172 // pushq r15
1173 // .....
1174 void EnterDartFrame(intptr_t frame_size, Register new_pp = kNoRegister);
1176
1177 // Set up a Dart frame for a function compiled for on-stack replacement.
1178 // The frame layout is a normal Dart frame, but the frame is partially set
1179 // up on entry (it is the frame of the unoptimized code).
1180 void EnterOsrFrame(intptr_t extra_size);
1181
1182 // Set up a stub frame so that the stack traversal code can easily identify
1183 // a stub frame.
1184 // The stub frame layout is as follows:
1185 // .... <=== RSP
1186 // pc (used to derive the RawInstruction Object of the stub)
1187 // saved RBP <=== RBP
1188 // ret PC
1189 // .....
1190 // This code sets this up with the sequence:
1191 // pushq rbp
1192 // movq rbp, rsp
1193 // pushq immediate(0)
1194 // .....
1197
1198 // Set up a frame for calling a C function.
1199 // Automatically save the pinned registers in Dart which are not callee-
1200 // saved in the native calling convention.
1201 // Use together with CallCFunction.
1202 void EnterCFrame(intptr_t frame_space);
1204
1208
1209 void CombineHashes(Register dst, Register other) override;
1210 void FinalizeHashForSize(intptr_t bit_size,
1211 Register dst,
1212 Register scratch = TMP) override;
1213
1214 // If allocation tracing for |cid| is enabled, will jump to |trace| label,
1215 // which will allocate in the runtime where tracing occurs.
1217 Label* trace,
1218 Register temp_reg = kNoRegister,
1220
1222 Label* trace,
1223 Register temp_reg = kNoRegister,
1225
1226 void TryAllocateObject(intptr_t cid,
1227 intptr_t instance_size,
1228 Label* failure,
1229 JumpDistance distance,
1230 Register instance_reg,
1231 Register temp) override;
1232
1233 void TryAllocateArray(intptr_t cid,
1234 intptr_t instance_size,
1235 Label* failure,
1236 JumpDistance distance,
1238 Register end_address,
1239 Register temp);
1240
1242#if defined(DEBUG)
1243 Label okay;
1244 cmpl(Address(top, 0), Immediate(kAllocationCanary));
1245 j(EQUAL, &okay, Assembler::kNearJump);
1246 Stop("Allocation canary");
1247 Bind(&okay);
1248#endif
1249 }
1251#if defined(DEBUG)
1253#endif
1254 }
1255
1256 // Copy [size] bytes from [src] address to [dst] address.
1257 // [size] should be a multiple of word size.
1258 // Clobbers [src], [dst], [size] and [temp] registers.
1259 // X64 requires fixed registers for memory copying:
1260 // [src] = RSI, [dst] = RDI, [size] = RCX.
1262 Register dst,
1263 Register size,
1264 Register temp = kNoRegister);
1265
1266 // This emits an PC-relative call of the form "callq *[rip+<offset>]". The
1267 // offset is not yet known and needs therefore relocation to the right place
1268 // before the code can be used.
1269 //
1270 // The necessary information for the "linker" (i.e. the relocation
1271 // information) is stored in [UntaggedCode::static_calls_target_table_]: an
1272 // entry of the form
1273 //
1274 // (Code::kPcRelativeCall & pc_offset, <target-code>, <target-function>)
1275 //
1276 // will be used during relocation to fix the offset.
1277 //
1278 // The provided [offset_into_target] will be added to calculate the final
1279 // destination. It can be used e.g. for calling into the middle of a
1280 // function.
1281 void GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target = 0);
1282
1283 // This emits an PC-relative tail call of the form "jmp *[rip+<offset>]".
1284 //
1285 // See also above for the pc-relative call.
1286 void GenerateUnRelocatedPcRelativeTailCall(intptr_t offset_into_target = 0);
1287
1288 // Debugging and bringup support.
1289 void Breakpoint() override { int3(); }
1290
1291 static bool AddressCanHoldConstantIndex(const Object& constant,
1292 bool is_external,
1293 intptr_t cid,
1294 intptr_t index_scale);
1295
1296 static Address ElementAddressForIntIndex(bool is_external,
1297 intptr_t cid,
1298 intptr_t index_scale,
1299 Register array,
1300 intptr_t index);
1301 static Address ElementAddressForRegIndex(bool is_external,
1302 intptr_t cid,
1303 intptr_t index_scale,
1304 bool index_unboxed,
1305 Register array,
1306 Register index);
1307
1309 Register field,
1310 Register scratch) {
1312 scratch, compiler::FieldAddress(
1313 field, target::Field::host_offset_or_field_id_offset()));
1314 const intptr_t field_table_offset =
1315 compiler::target::Thread::field_table_values_offset();
1316 LoadMemoryValue(address, THR, static_cast<int32_t>(field_table_offset));
1317 static_assert(kSmiTagShift == 1, "adjust scale factor");
1318 leaq(address, Address(address, scratch, TIMES_HALF_WORD_SIZE, 0));
1319 }
1320
1323 Register offset_in_words_as_smi) override {
1324 static_assert(kSmiTagShift == 1, "adjust scale factor");
1325 leaq(address, FieldAddress(instance, offset_in_words_as_smi, TIMES_4, 0));
1326 }
1327
1328#if defined(DART_COMPRESSED_POINTERS)
1330 Register address,
1332 Register offset_in_words_as_smi) override {
1333 static_assert(kSmiTagShift == 1, "adjust scale factor");
1334 leaq(address, FieldAddress(instance, offset_in_words_as_smi,
1336 }
1337#endif
1338
1341 int32_t offset) override {
1342 leaq(address, FieldAddress(instance, offset));
1343 }
1344
1346
1347 // On some other platforms, we draw a distinction between safe and unsafe
1348 // smis.
1349 static bool IsSafe(const Object& object) { return true; }
1350 static bool IsSafeSmi(const Object& object) { return target::IsSmi(object); }
1351
1352 void LoadWordFromPoolIndex(Register dst, intptr_t index);
1353 void StoreWordToPoolIndex(Register src, intptr_t index);
1354
1355 private:
1356 bool constant_pool_allowed_;
1357
1358 void CallCodeThroughPool(intptr_t target_code_pool_index,
1359 CodeEntryKind entry_kind);
1360
1361 bool CanLoadFromObjectPool(const Object& object) const;
1362 void LoadObjectHelper(
1363 Register dst,
1364 const Object& obj,
1365 bool is_unique,
1368
1369 void AluL(uint8_t modrm_opcode, Register dst, const Immediate& imm);
1370 void AluB(uint8_t modrm_opcode, const Address& dst, const Immediate& imm);
1371 void AluW(uint8_t modrm_opcode, const Address& dst, const Immediate& imm);
1372 void AluL(uint8_t modrm_opcode, const Address& dst, const Immediate& imm);
1373 void AluQ(uint8_t modrm_opcode,
1374 uint8_t opcode,
1375 Register dst,
1376 const Immediate& imm);
1377 void AluQ(uint8_t modrm_opcode,
1378 uint8_t opcode,
1379 const Address& dst,
1380 const Immediate& imm);
1381
1382 void EmitSimple(int opcode, int opcode2 = -1, int opcode3 = -1);
1383 void EmitUnaryQ(Register reg, int opcode, int modrm_code);
1384 void EmitUnaryL(Register reg, int opcode, int modrm_code);
1385 void EmitUnaryQ(const Address& address, int opcode, int modrm_code);
1386 void EmitUnaryL(const Address& address, int opcode, int modrm_code);
1387 // The prefixes are in reverse order due to the rules of default arguments in
1388 // C++.
1389 void EmitQ(int reg,
1390 const Address& address,
1391 int opcode,
1392 int prefix2 = -1,
1393 int prefix1 = -1);
1394 void EmitL(int reg,
1395 const Address& address,
1396 int opcode,
1397 int prefix2 = -1,
1398 int prefix1 = -1);
1399 void EmitW(Register reg,
1400 const Address& address,
1401 int opcode,
1402 int prefix2 = -1,
1403 int prefix1 = -1);
1404 void EmitQ(int dst, int src, int opcode, int prefix2 = -1, int prefix1 = -1);
1405 void EmitL(int dst, int src, int opcode, int prefix2 = -1, int prefix1 = -1);
1406 void EmitW(Register dst,
1407 Register src,
1408 int opcode,
1409 int prefix2 = -1,
1410 int prefix1 = -1);
1411 void EmitB(int reg, const Address& address, int opcode);
1412 void CmpPS(XmmRegister dst, XmmRegister src, int condition);
1413
1414 inline void EmitUint8(uint8_t value);
1415 inline void EmitInt32(int32_t value);
1416 inline void EmitUInt32(uint32_t value);
1417 inline void EmitInt64(int64_t value);
1418
1419 inline void EmitRegisterREX(Register reg,
1420 uint8_t rex,
1421 bool force_emit = false);
1422 inline void EmitOperandREX(int rm, const Operand& operand, uint8_t rex);
1423 inline void EmitRegisterOperand(int rm, int reg);
1424 inline void EmitFixup(AssemblerFixup* fixup);
1425 inline void EmitOperandSizeOverride();
1426 inline void EmitRegRegRex(int reg, int base, uint8_t rex = REX_NONE);
1427 void EmitOperand(int rm, const Operand& operand);
1428 void EmitImmediate(const Immediate& imm);
1429 void EmitComplex(int rm, const Operand& operand, const Immediate& immediate);
1430 void EmitSignExtendedInt8(int rm,
1431 const Operand& operand,
1432 const Immediate& immediate);
1433 void EmitLabel(Label* label, intptr_t instruction_size);
1434 void EmitLabelLink(Label* label);
1435 void EmitNearLabelLink(Label* label);
1436
1437 void EmitGenericShift(bool wide, int rm, Register reg, const Immediate& imm);
1438 void EmitGenericShift(bool wide, int rm, Register operand, Register shifter);
1439
1440 enum BarrierFilterMode {
1441 // Filter falls through into the barrier update code. Target label
1442 // is a "after-store" label.
1443 kJumpToNoUpdate,
1444
1445 // Filter falls through to the "after-store" code. Target label
1446 // is barrier update code label.
1447 kJumpToBarrier,
1448 };
1449
1450 void StoreIntoArrayBarrier(Register object,
1451 Register slot,
1452 Register value,
1453 CanBeSmi can_be_smi = kValueCanBeSmi);
1454
1455 // Unaware of write barrier (use StoreInto* methods for storing to objects).
1456 void MoveImmediate(const Address& dst,
1457 const Immediate& imm,
1458 OperandSize size = kWordBytes);
1459
1460 friend class dart::FlowGraphCompiler;
1461 std::function<void(Register reg)> generate_invoke_write_barrier_wrapper_;
1462 std::function<void()> generate_invoke_array_write_barrier_;
1463
1466};
1467
1468inline void Assembler::EmitUint8(uint8_t value) {
1469 buffer_.Emit<uint8_t>(value);
1470}
1471
1472inline void Assembler::EmitInt32(int32_t value) {
1473 buffer_.Emit<int32_t>(value);
1474}
1475
1476inline void Assembler::EmitUInt32(uint32_t value) {
1477 buffer_.Emit<uint32_t>(value);
1478}
1479
1480inline void Assembler::EmitInt64(int64_t value) {
1481 buffer_.Emit<int64_t>(value);
1482}
1483
1484inline void Assembler::EmitRegisterREX(Register reg, uint8_t rex, bool force) {
1485 ASSERT(reg != kNoRegister && reg <= R15);
1486 ASSERT(rex == REX_NONE || rex == REX_W);
1487 rex |= (reg > 7 ? REX_B : REX_NONE);
1488 if (rex != REX_NONE || force) EmitUint8(REX_PREFIX | rex);
1489}
1490
1491inline void Assembler::EmitOperandREX(int rm,
1492 const Operand& operand,
1493 uint8_t rex) {
1494 rex |= (rm > 7 ? REX_R : REX_NONE) | operand.rex();
1495 if (rex != REX_NONE) EmitUint8(REX_PREFIX | rex);
1496}
1497
1498inline void Assembler::EmitRegRegRex(int reg, int base, uint8_t rex) {
1499 ASSERT(reg != kNoRegister && reg <= R15);
1500 ASSERT(base != kNoRegister && base <= R15);
1501 ASSERT(rex == REX_NONE || rex == REX_W);
1502 if (reg > 7) rex |= REX_R;
1503 if (base > 7) rex |= REX_B;
1504 if (rex != REX_NONE) EmitUint8(REX_PREFIX | rex);
1505}
1506
1507inline void Assembler::EmitFixup(AssemblerFixup* fixup) {
1508 buffer_.EmitFixup(fixup);
1509}
1510
1511inline void Assembler::EmitOperandSizeOverride() {
1512 EmitUint8(0x66);
1513}
1514
1515} // namespace compiler
1516} // namespace dart
1517
1518#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_X64_H_
int count
static bool equals(T *a, T *b)
#define DECLARE_ALU(op, opcode, opcode2, modrm_opcode)
#define AX(width, name,...)
#define RAB(name,...)
#define RR(width, name,...)
#define XX(width, name,...)
#define DECLARE_SIMPLE(name, opcode)
#define SIMPLE(name,...)
#define REGULAR_INSTRUCTION(name,...)
#define AR(width, name,...)
#define XA(width, name,...)
#define OBJ(op)
#define DECLARE_XMM(name, code)
#define DECLARE_CMPPS(name, code)
#define DECLARE_CMOV(name, code)
#define ARB(name,...)
#define REGULAR_UNARY(name, opcode, modrm)
static bool IsInt(intptr_t N, T value)
Definition utils.h:298
static bool IsUint(intptr_t N, T value)
Definition utils.h:313
static Address AddressBaseImm32(Register base, int32_t disp)
Address(Register base, Register index, ScaleFactor scale, int32_t disp)
Address(Register index, ScaleFactor scale, int32_t disp)
static Address AddressRIPRelative(int32_t disp)
Address(Register base, Register r)
Address & operator=(const Address &other)
static Address AddressBaseImm32(Register base, Register r)
Address(Register base, int32_t disp)
Address(Register base, Register index, ScaleFactor scale, Register r)
Address(Register index, ScaleFactor scale, Register r)
Address(const Address &other)
void LoadCompressedSmi(Register dst, const Address &address)
void Stop(const char *message)
void LoadCompressed(Register dst, const Address &address)
ObjectPoolBuilder & object_pool_builder()
void LoadCompressedFieldAddressForRegOffset(Register address, Register instance, Register offset_in_words_as_smi)
void LoadIndexedCompressed(Register dst, Register base, int32_t offset, Register index)
void ExtendNonNegativeSmi(Register dst)
void LoadAcquireCompressed(Register dst, const Address &address)
void EmitFixup(AssemblerFixup *fixup)
void PushRegistersInOrder(std::initializer_list< Register > regs)
void PopRegisterPair(Register r0, Register r1)
void MoveUnboxedDouble(FpuRegister dst, FpuRegister src)
void jmp(Register reg)
void LoadClassId(Register result, Register object)
void StoreObject(const Address &dst, const Object &obj, OperandSize size=kWordBytes)
void SmiUntagOrCheckClass(Register object, intptr_t class_id, Label *smi)
void sarq(Register reg, const Immediate &imm)
void LoadPoolPointer(Register pp=PP)
void Call(Address target)
void unpcklpd(XmmRegister dst, XmmRegister src)
static Address ElementAddressForRegIndex(bool is_external, intptr_t cid, intptr_t index_scale, bool index_unboxed, Register array, Register index)
bool CanLoadFromObjectPool(const Object &object) const
void CompareClassId(Register object, intptr_t class_id, Register scratch=kNoRegister)
void call(Label *label)
void PushRegisters(const RegisterSet &registers)
void StoreUnboxedDouble(FpuRegister src, Register base, int32_t offset)
void Jmp(const Code &code, Register pp=PP)
void CompareObject(Register reg, const Object &object)
void ZeroInitCompressedSmiField(const Address &dest)
void movsd(XmmRegister dst, XmmRegister src)
void MoveUnboxedSimd128(FpuRegister dst, FpuRegister src)
void LoadUniqueObject(Register dst, const Object &obj, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
void abspd(XmmRegister dst, XmmRegister src)
void LoadObject(Register dst, const Object &obj)
void CombineHashes(Register dst, Register other) override
void BranchIfSmi(Register reg, Label *label, JumpDistance distance=kFarJump) override
void LoadTaggedClassIdMayBeSmi(Register result, Register object)
void notps(XmmRegister dst, XmmRegister src)
void movups(XmmRegister dst, const Address &src)
void AddImmediate(const Address &address, const Immediate &imm)
void LoadStaticFieldAddress(Register address, Register field, Register scratch)
void Load(Register reg, const Address &address, OperandSize type, Condition cond)
void BranchIfZero(Register src, Label *label, JumpDistance distance=kFarJump)
void unpckhpd(XmmRegister dst, XmmRegister src)
void movq(const Address &dst, XmmRegister src)
void LoadQImmediate(FpuRegister dst, simd128_value_t immediate)
void PushRegisterPair(Register r0, Register r1)
void StoreZero(const Address &address, Register temp=kNoRegister)
void shlq(Register reg, const Immediate &imm)
void Call(const Code &stub_entry, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
void pxor(XmmRegister dst, XmmRegister src)
Assembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level=0)
void jmp(const ExternalLabel *label)
void movlhps(XmmRegister dst, XmmRegister src)
void ZeroInitSmiField(const Address &dest)
void movq(Register dst, Register src)
void LoadImmediate(Register reg, int64_t immediate) override
void LoadDImmediate(FpuRegister dst, double immediate)
void movl(const Address &dst, const Immediate &imm)
void movd(XmmRegister dst, Register src)
void movl(Register dst, const Immediate &src)
void DoubleNegate(XmmRegister dst, XmmRegister src)
void j(Condition condition, Label *label, JumpDistance distance=kFarJump)
void SmiUntag(Register reg)
void absps(XmmRegister dst, XmmRegister src)
void CompareImmediate(Register reg, const Immediate &imm, OperandSize width=kEightBytes)
void Jump(Label *label, JumpDistance distance=kFarJump)
void AddImmediate(Register reg, const Immediate &imm, OperandSize width=kEightBytes)
void shldq(Register dst, Register src, const Immediate &imm)
void JmpPatchable(const Code &code, Register pp)
void LoadFromStack(Register dst, intptr_t depth)
void movmskpd(Register dst, XmmRegister src)
void cvtsi2sdq(XmmRegister dst, Register src)
void leal(Register dst, const Address &src)
void shufps(XmmRegister dst, XmmRegister src, const Immediate &mask)
void shll(Register reg, const Immediate &imm)
void movb(const Address &dst, const Immediate &imm)
void shufpd(XmmRegister dst, XmmRegister src, const Immediate &mask)
void ExtendValue(Register dst, Register src, OperandSize sz) override
void CompareImmediate(Register rn, int32_t value, Condition cond)
void LoadIndexedPayload(Register dst, Register base, int32_t payload_offset, Register index, ScaleFactor scale, OperandSize sz=kEightBytes) override
void PushRegister(Register r)
void LoadMemoryValue(Register dst, Register base, int32_t offset)
void MulImmediate(Register reg, int64_t imm, OperandSize width=kEightBytes) override
void pushq(Register reg)
void CompareImmediate(const Address &address, const Immediate &imm, OperandSize width=kEightBytes)
void imulq(Register dst, const Immediate &imm)
void cvtpd2ps(XmmRegister dst, XmmRegister src)
void set1ps(XmmRegister dst, Register tmp, const Immediate &imm)
void roundsd(XmmRegister dst, XmmRegister src, RoundingMode mode)
void set_constant_pool_allowed(bool b)
void sarq(Register operand, Register shifter)
void cvtps2pd(XmmRegister dst, XmmRegister src)
void subpl(XmmRegister dst, XmmRegister src)
void movw(Register dst, const Address &src)
void zerowps(XmmRegister dst, XmmRegister src)
void OrImmediate(Register dst, int64_t value)
void PushImmediate(const Immediate &imm)
void movmskps(Register dst, XmmRegister src)
void jmp(Label *label, JumpDistance distance=kFarJump)
void Align(int alignment, intptr_t offset)
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_external, intptr_t cid, intptr_t index_scale)
void StoreObjectIntoObjectNoBarrier(Register object, const Address &dest, const Object &value, MemoryOrder memory_order=kRelaxedNonAtomic, OperandSize size=kWordBytes) override
void MaybeTraceAllocation(Register cid, Label *trace, Register temp_reg=kNoRegister, JumpDistance distance=JumpDistance::kFarJump)
void EnterDartFrame(intptr_t frame_size, Register new_pp=kNoRegister)
void LoadClassById(Register result, Register class_id)
void cvtss2sd(XmmRegister dst, XmmRegister src)
void movzxw(Register dst, Register src)
void PushValueAtOffset(Register base, int32_t offset)
void LoadIsolate(Register dst)
void nop(int size=1)
void MaybeTraceAllocation(intptr_t cid, Label *trace, Register temp_reg=kNoRegister, JumpDistance distance=JumpDistance::kFarJump)
void imull(Register reg, const Immediate &imm)
void movsd(XmmRegister dst, const Address &src)
void PushImmediate(int64_t value)
void AndImmediate(Register dst, int64_t value) override
void OrImmediate(Register dst, const Immediate &imm)
void CheckAllocationCanary(Register top)
void XorImmediate(Register dst, const Immediate &imm)
void shll(Register operand, Register shifter)
void mov(Register rd, Operand o, Condition cond=AL)
void SetReturnAddress(Register value)
void GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target=0)
void CallCFunction(Address address, bool restore_rsp=false)
void LoadInt32FromBoxOrSmi(Register result, Register value) override
void Exchange(Register reg, const Address &mem)
void BranchIf(Condition condition, Label *label, JumpDistance distance=kFarJump)
void EnterOsrFrame(intptr_t extra_size)
void comisd(XmmRegister a, XmmRegister b)
void btl(Register dst, Register src)
void PushObject(const Object &object)
void AndRegisters(Register dst, Register src1, Register src2=kNoRegister) override
void testb(const Address &address, const Immediate &imm)
void sarl(Register reg, const Immediate &imm)
void jmp(const Code &code)
void Exchange(const Address &mem1, const Address &mem2)
void cmp(Register rn, Operand o, Condition cond=AL)
void LoadUnboxedSimd128(FpuRegister dst, Register base, int32_t offset)
XA(L, movups, 0x10, 0x0F)
void popq(Register reg)
void StoreMemoryValue(Register src, Register base, int32_t offset)
void TransitionGeneratedToNative(Register destination_address, Register new_exit_frame, Register new_exit_through_ffi, bool enter_safepoint)
void CompareImmediate(Register reg, int64_t immediate, OperandSize width=kEightBytes) override
void pushq(const Immediate &imm)
void StoreToStack(Register src, intptr_t depth)
void TestImmediate(Register dst, const Immediate &imm, OperandSize width=kEightBytes)
void J(Condition condition, const Code &code, Register pp)
void CallWithEquivalence(const Code &code, const Object &equivalence, CodeEntryKind entry_kind=CodeEntryKind::kNormal)
void movw(const Address &dst, const Immediate &imm)
void popq(const Address &address)
void SmiUntagAndSignExtend(Register dst, Register src)
void SubRegisters(Register dest, Register src)
void PopRegisters(const RegisterSet &registers)
void StoreInternalPointer(Register object, const Address &dest, Register value)
void CompareRegisters(Register a, Register b)
void MulImmediate(Register reg, int32_t imm, OperandSize width=kFourBytes) override
void AndImmediate(Register dst, const Immediate &imm)
void cvttsd2sil(Register dst, XmmRegister src)
void AddRegisters(Register dest, Register src)
void testq(Register reg, const Immediate &imm)
void Bind(Label *label) override
void ReserveAlignedFrameSpace(intptr_t frame_space)
void negateps(XmmRegister dst, XmmRegister src)
void btq(Register base, int bit)
void AddImmediate(Register dest, Register src, int64_t value)
void SubImmediate(const Address &address, const Immediate &imm)
void LockCmpxchgq(const Address &address, Register reg)
void movq(const Address &dst, const Immediate &imm)
void RangeCheck(Register value, Register temp, intptr_t low, intptr_t high, RangeCheckCondition condition, Label *target) override
void enter(const Immediate &imm)
bool constant_pool_allowed() const
void setcc(Condition condition, ByteRegister dst)
void PopRegister(Register r)
void CallCFunction(Register reg, bool restore_rsp=false)
void AddImmediate(Register reg, int64_t value, OperandSize width=kEightBytes)
void ArrayStoreBarrier(Register object, Register slot, Register value, CanBeSmi can_be_smi, Register scratch) override
void testb(const Address &address, Register reg)
void btq(Register dst, Register src)
void LslRegister(Register dst, Register shift) override
void add(Register rd, Register rn, Operand o, Condition cond=AL)
void shrl(Register operand, Register shifter)
void CompareWithMemoryValue(Register value, Address address, OperandSize size=kEightBytes) override
void shrdq(Register dst, Register src, Register shifter)
void mul(Register rd, Register rn, Register rm, Condition cond=AL)
void TryAllocateArray(intptr_t cid, intptr_t instance_size, Label *failure, JumpDistance distance, Register instance, Register end_address, Register temp)
void WriteAllocationCanary(Register top)
void addpl(XmmRegister dst, XmmRegister src)
void Load(Register dst, const Address &address, OperandSize sz=kEightBytes) override
void CallRuntime(const RuntimeEntry &entry, intptr_t argument_count)
void ExtractInstanceSizeFromTags(Register result, Register tags)
void LockCmpxchgl(const Address &address, Register reg)
void CopyMemoryWords(Register src, Register dst, Register size, Register temp=kNoRegister)
void CompareToStack(Register src, intptr_t depth)
void LoadImmediate(Register rd, Immediate value, Condition cond=AL)
void shrq(Register operand, Register shifter)
void MulImmediate(Register reg, const Immediate &imm, OperandSize width=kEightBytes)
static Address VMTagAddress()
void AndImmediate(Register rd, Register rs, int32_t imm, Condition cond=AL)
void cvtsd2ss(XmmRegister dst, XmmRegister src)
void CompareObjectRegisters(Register a, Register b)
void Jump(const Address &address)
static bool IsSafeSmi(const Object &object)
void movsxb(Register dst, ByteRegister src)
void SmiUntagAndSignExtend(Register reg)
void LoadAcquire(Register dst, const Address &address, OperandSize size=kEightBytes) override
void shldq(Register dst, Register src, Register shifter)
void cvtsi2sdl(XmmRegister dst, Register src)
void EnterCFrame(intptr_t frame_space)
void StoreBarrier(Register object, Register value, CanBeSmi can_be_smi, Register scratch) override
void call(const Address &address)
void TryAllocateObject(intptr_t cid, intptr_t instance_size, Label *failure, JumpDistance distance, Register instance_reg, Register temp) override
void AddScaled(Register dest, Register src, ScaleFactor scale, int32_t value)
void shlq(Register operand, Register shifter)
void LoadSImmediate(FpuRegister dst, float immediate)
void VerifyStoreNeedsNoWriteBarrier(Register object, Register value) override
void IncrementCompressedSmiField(const Address &dest, int64_t increment)
void cvttsd2siq(Register dst, XmmRegister src)
void CompareWords(Register reg1, Register reg2, intptr_t offset, Register count, Register temp, Label *equals) override
void StoreWordToPoolIndex(Register src, intptr_t index)
void pushq(const Address &address)
void shldl(Register dst, Register src, const Immediate &imm)
void testl(Register reg, const Immediate &imm)
void movl(Register dst, const Immediate &imm)
void pmovmskb(Register dst, XmmRegister src)
void movhlps(XmmRegister dst, XmmRegister src)
void movb(Register dst, const Address &src)
void BranchIfNotSmi(Register reg, Label *label, JumpDistance distance=kFarJump)
void GenerateUnRelocatedPcRelativeTailCall(intptr_t offset_into_target=0)
void negatepd(XmmRegister dst, XmmRegister src)
void LoadInt64FromBoxOrSmi(Register result, Register value) override
void Store(Register src, const Address &address, OperandSize sz=kEightBytes) override
void SubImmediate(Register reg, const Immediate &imm, OperandSize width=kEightBytes)
void ExitFullSafepoint(bool ignore_unwind_in_progress)
void LslImmediate(Register dst, int32_t shift)
void EnterFrame(intptr_t frame_space)
void shrq(Register reg, const Immediate &imm)
static bool IsSafe(const Object &object)
void jmp(const Address &address)
void BranchIfBit(Register rn, intptr_t bit_number, Condition condition, Label *label, JumpDistance distance=kFarJump)
void Drop(intptr_t stack_elements, Register tmp=TMP)
void OrImmediate(Register rd, Register rs, int32_t imm, Condition cond=AL)
void StoreIntoSmiField(const Address &dest, Register value)
void Jump(Register target)
void call(const ExternalLabel *label)
void CallPatchable(const Code &code, CodeEntryKind entry_kind=CodeEntryKind::kNormal, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
void LoadClassIdMayBeSmi(Register result, Register object)
void sarl(Register operand, Register shifter)
void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset)
void shrl(Register reg, const Immediate &imm)
void movss(XmmRegister dst, XmmRegister src)
void LoadIsolateGroup(Register dst)
void LoadFieldAddressForRegOffset(Register address, Register instance, Register offset_in_words_as_smi) override
void AndImmediate(Register dst, Register src, int64_t value)
void BranchOnMonomorphicCheckedEntryJIT(Label *label)
void movl(Register dst, XmmRegister src)
void Store(Register reg, const Address &address, OperandSize type, Condition cond)
void movzxb(Register dst, ByteRegister src)
void movss(XmmRegister dst, const Address &src)
void movq(Register dst, XmmRegister src)
void cmpxchgl(const Address &address, Register reg)
void MoveRegister(Register rd, Register rm, Condition cond)
void TransitionNativeToGenerated(bool leave_safepoint, bool ignore_unwind_in_progress=false)
void LoadWordFromPoolIndex(Register dst, intptr_t index)
void DoubleAbs(XmmRegister dst, XmmRegister src)
void LoadFieldAddressForOffset(Register address, Register instance, int32_t offset) override
void neg(Register rd, Register rm)
void StoreUnboxedSimd128(FpuRegister dst, Register base, int32_t offset)
void ExtractClassIdFromTags(Register result, Register tags)
void movq(XmmRegister dst, Register src)
void fldl(const Address &src)
void unpckhps(XmmRegister dst, XmmRegister src)
void movq(Register dst, const Immediate &imm)
void FinalizeHashForSize(intptr_t bit_size, Register dst, Register scratch=TMP) override
void SmiUntag(Register dst, Register src)
void EnsureHasClassIdInDEBUG(intptr_t cid, Register src, Register scratch, bool can_be_null=false) override
void call(Register reg)
void LoadDispatchTable(Register dst)
void AddImmediate(Register rd, int32_t value, Condition cond=AL)
void LsrImmediate(Register dst, int32_t shift) override
void MoveMemoryToMemory(const Address &dst, const Address &src)
void LoadNativeEntry(Register dst, const ExternalLabel *label, ObjectPoolBuilderEntry::Patchability patchable)
static Address ElementAddressForIntIndex(bool is_external, intptr_t cid, intptr_t index_scale, Register array, intptr_t index)
void LoadCompressedMemoryValue(Register dst, Register base, int32_t offset)
void movsxw(Register dst, Register src)
void StoreRelease(Register src, const Address &address, OperandSize size=kWordBytes) override
void LoadImmediate(Register reg, const Immediate &imm)
void fstpl(const Address &dst)
void SmiTag(Register reg) override
void LoadUnboxedSingle(FpuRegister dst, Register base, int32_t offset)
void unpcklps(XmmRegister dst, XmmRegister src)
void ffree(intptr_t value)
void ArithmeticShiftRightImmediate(Register reg, intptr_t shift) override
void movaps(XmmRegister dst, XmmRegister src)
FieldAddress & operator=(const FieldAddress &other)
FieldAddress(Register base, Register r)
FieldAddress(Register base, Register index, ScaleFactor scale, Register r)
FieldAddress(const FieldAddress &other)
FieldAddress(Register base, Register index, ScaleFactor scale, int32_t disp)
FieldAddress(Register base, int32_t disp)
Immediate(const Immediate &other)
void div(Register rd, Register rs1, Register rs2)
bool Equals(const Operand &other) const
void SetSIB(ScaleFactor scale, Register index, Register base)
Register base() const
int32_t disp32() const
Register index() const
Operand(const Operand &other)
void SetDisp8(int8_t disp)
Operand & operator=(const Operand &other)
void SetModRM(int mod, Register rm)
Register rm() const
void SetDisp32(int32_t disp)
ScaleFactor scale() const
#define XMM_ALU_CODES(F)
#define X86_CONDITIONAL_SUFFIXES(F)
#define XMM_CONDITIONAL_CODES(F)
#define X86_ALU_CODES(F)
#define X86_ZERO_OPERAND_1_BYTE_INSTRUCTIONS(F)
#define ASSERT(E)
VkInstance instance
Definition main.cc:48
static bool b
struct MyStruct a[10]
uint8_t value
GAsyncResult * result
uint32_t * target
int argument_count
Definition fuchsia.cc:52
bool IsSmi(int64_t v)
constexpr OperandSize kWordBytes
@ TIMES_COMPRESSED_HALF_WORD_SIZE
@ TIMES_COMPRESSED_WORD_SIZE
const Register THR
static constexpr intptr_t kAllocationCanary
Definition globals.h:181
@ kHeapObjectTag
@ kNoRegister
const Register TMP
const intptr_t cid
const Register PP
#define DISALLOW_ALLOCATION()
Definition globals.h:604
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition globals.h:581
int32_t width
const Scalar scale
Point offset