Flutter Engine
The Flutter Engine
assembler_x64.h
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_X64_H_
6#define RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_X64_H_
7
8#if defined(DART_PRECOMPILED_RUNTIME)
9#error "AOT runtime should not use compiler sources (including header files)"
10#endif // defined(DART_PRECOMPILED_RUNTIME)
11
12#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_H_
13#error Do not include assembler_x64.h directly; use assembler.h instead.
14#endif
15
16#include <functional>
17
18#include "platform/assert.h"
19#include "platform/utils.h"
21#include "vm/constants.h"
22#include "vm/constants_x86.h"
23#include "vm/hash_map.h"
24#include "vm/pointer_tagging.h"
25
26namespace dart {
27
28// Forward declarations.
29class FlowGraphCompiler;
30class RegisterSet;
31
32namespace compiler {
33
34class Immediate : public ValueObject {
35 public:
36 explicit Immediate(int64_t value) : value_(value) {}
37
38 Immediate(const Immediate& other) : ValueObject(), value_(other.value_) {}
39
40 int64_t value() const { return value_; }
41
42 bool is_int8() const { return Utils::IsInt(8, value_); }
43 bool is_uint8() const { return Utils::IsUint(8, value_); }
44 bool is_int16() const { return Utils::IsInt(16, value_); }
45 bool is_uint16() const { return Utils::IsUint(16, value_); }
46 bool is_int32() const { return Utils::IsInt(32, value_); }
47 bool is_uint32() const { return Utils::IsUint(32, value_); }
48
49 private:
50 const int64_t value_;
51
52 // TODO(5411081): Add DISALLOW_COPY_AND_ASSIGN(Immediate) once the mac
53 // build issue is resolved.
54 // And remove the unnecessary copy constructor.
55};
56
57class Operand : public ValueObject {
58 public:
59 uint8_t rex() const { return rex_; }
60
61 uint8_t mod() const { return (encoding_at(0) >> 6) & 3; }
62
63 Register rm() const {
64 int rm_rex = (rex_ & REX_B) << 3;
65 return static_cast<Register>(rm_rex + (encoding_at(0) & 7));
66 }
67
69 return static_cast<ScaleFactor>((encoding_at(1) >> 6) & 3);
70 }
71
72 Register index() const {
73 int index_rex = (rex_ & REX_X) << 2;
74 return static_cast<Register>(index_rex + ((encoding_at(1) >> 3) & 7));
75 }
76
77 Register base() const {
78 int base_rex = (rex_ & REX_B) << 3;
79 return static_cast<Register>(base_rex + (encoding_at(1) & 7));
80 }
81
82 int8_t disp8() const {
83 ASSERT(length_ >= 2);
84 return static_cast<int8_t>(encoding_[length_ - 1]);
85 }
86
87 int32_t disp32() const {
88 ASSERT(length_ >= 5);
89 return bit_copy<int32_t>(encoding_[length_ - 4]);
90 }
91
92 Operand(const Operand& other)
93 : ValueObject(), length_(other.length_), rex_(other.rex_) {
94 memmove(&encoding_[0], &other.encoding_[0], other.length_);
95 }
96
97 Operand& operator=(const Operand& other) {
98 length_ = other.length_;
99 rex_ = other.rex_;
100 memmove(&encoding_[0], &other.encoding_[0], other.length_);
101 return *this;
102 }
103
104 bool Equals(const Operand& other) const {
105 if (length_ != other.length_) return false;
106 if (rex_ != other.rex_) return false;
107 for (uint8_t i = 0; i < length_; i++) {
108 if (encoding_[i] != other.encoding_[i]) return false;
109 }
110 return true;
111 }
112
113 protected:
114 Operand() : length_(0), rex_(REX_NONE) {} // Needed by subclass Address.
115
116 void SetModRM(int mod, Register rm) {
117 ASSERT((mod & ~3) == 0);
118 if ((rm > 7) && !((rm == R12) && (mod != 3))) {
119 rex_ |= REX_B;
120 }
121 encoding_[0] = (mod << 6) | (rm & 7);
122 length_ = 1;
123 }
124
126 ASSERT(length_ == 1);
127 ASSERT((scale & ~3) == 0);
128 if (base > 7) {
129 ASSERT((rex_ & REX_B) == 0); // Must not have REX.B already set.
130 rex_ |= REX_B;
131 }
132 if (index > 7) rex_ |= REX_X;
133 encoding_[1] = (scale << 6) | ((index & 7) << 3) | (base & 7);
134 length_ = 2;
135 }
136
137 void SetDisp8(int8_t disp) {
138 ASSERT(length_ == 1 || length_ == 2);
139 encoding_[length_++] = static_cast<uint8_t>(disp);
140 }
141
142 void SetDisp32(int32_t disp) {
143 ASSERT(length_ == 1 || length_ == 2);
144 memmove(&encoding_[length_], &disp, sizeof(disp));
145 length_ += sizeof(disp);
146 }
147
148 private:
149 uint8_t length_;
150 uint8_t rex_;
151 uint8_t encoding_[6];
152
153 explicit Operand(Register reg) : rex_(REX_NONE) { SetModRM(3, reg); }
154
155 // Get the operand encoding byte at the given index.
156 uint8_t encoding_at(intptr_t index) const {
157 ASSERT(index >= 0 && index < length_);
158 return encoding_[index];
159 }
160
161 // Returns whether or not this operand is really the given register in
162 // disguise. Used from the assembler to generate better encodings.
163 bool IsRegister(Register reg) const {
164 return ((reg > 7 ? 1 : 0) == (rex_ & REX_B)) // REX.B match.
165 && ((encoding_at(0) & 0xF8) == 0xC0) // Addressing mode is register.
166 && ((encoding_at(0) & 0x07) == reg); // Register codes match.
167 }
168
169 friend class Assembler;
170};
171
172class Address : public Operand {
173 public:
174 Address(Register base, int32_t disp) {
175 if ((disp == 0) && ((base & 7) != RBP)) {
176 SetModRM(0, base);
177 if ((base & 7) == RSP) {
179 }
180 } else if (Utils::IsInt(8, disp)) {
181 SetModRM(1, base);
182 if ((base & 7) == RSP) {
184 }
185 SetDisp8(disp);
186 } else {
187 SetModRM(2, base);
188 if ((base & 7) == RSP) {
190 }
191 SetDisp32(disp);
192 }
193 }
194
195 // This addressing mode does not exist.
197
199 ASSERT(index != RSP); // Illegal addressing mode.
200 ASSERT(scale != TIMES_16); // Unsupported scale factor.
201 SetModRM(0, RSP);
203 SetDisp32(disp);
204 }
205
206 // This addressing mode does not exist.
208
210 ASSERT(index != RSP); // Illegal addressing mode.
211 ASSERT(scale != TIMES_16); // Unsupported scale factor.
212 if ((disp == 0) && ((base & 7) != RBP)) {
213 SetModRM(0, RSP);
215 } else if (Utils::IsInt(8, disp)) {
216 SetModRM(1, RSP);
218 SetDisp8(disp);
219 } else {
220 SetModRM(2, RSP);
222 SetDisp32(disp);
223 }
224 }
225
226 // This addressing mode does not exist.
228
229 Address(const Address& other) : Operand(other) {}
230
231 Address& operator=(const Address& other) {
232 Operand::operator=(other);
233 return *this;
234 }
235
236 static Address AddressRIPRelative(int32_t disp) {
237 return Address(RIPRelativeDisp(disp));
238 }
239 static Address AddressBaseImm32(Register base, int32_t disp) {
240 return Address(base, disp, true);
241 }
242
243 // This addressing mode does not exist.
245
246 private:
247 Address(Register base, int32_t disp, bool fixed) {
248 ASSERT(fixed);
249 SetModRM(2, base);
250 if ((base & 7) == RSP) {
252 }
253 SetDisp32(disp);
254 }
255
256 struct RIPRelativeDisp {
257 explicit RIPRelativeDisp(int32_t disp) : disp_(disp) {}
258 const int32_t disp_;
259 };
260
261 explicit Address(const RIPRelativeDisp& disp) {
262 SetModRM(0, static_cast<Register>(0x5));
263 SetDisp32(disp.disp_);
264 }
265};
266
267class FieldAddress : public Address {
268 public:
270 : Address(base, disp - kHeapObjectTag) {}
271
272 // This addressing mode does not exist.
274
276 : Address(base, index, scale, disp - kHeapObjectTag) {}
277
278 // This addressing mode does not exist.
280
281 FieldAddress(const FieldAddress& other) : Address(other) {}
282
284 Address::operator=(other);
285 return *this;
286 }
287};
288
289#if !defined(DART_COMPRESSED_POINTERS)
290#define OBJ(op) op##q
291#else
292#define OBJ(op) op##l
293#endif
294
295class Assembler : public AssemblerBase {
296 public:
298 intptr_t far_branch_level = 0);
299
301
302 /*
303 * Emit Machine Instructions.
304 */
305 void call(Register reg) { EmitUnaryL(reg, 0xFF, 2); }
306 void call(const Address& address) { EmitUnaryL(address, 0xFF, 2); }
307 void call(Label* label);
308 void call(const ExternalLabel* label);
309
310 void pushq(Register reg);
311 void pushq(const Address& address) { EmitUnaryL(address, 0xFF, 6); }
312 void pushq(const Immediate& imm);
313 void PushImmediate(const Immediate& imm) { pushq(imm); }
315
316 void popq(Register reg);
317 void popq(const Address& address) { EmitUnaryL(address, 0x8F, 0); }
318
319 void setcc(Condition condition, ByteRegister dst);
320
322 void ExitFullSafepoint(bool ignore_unwind_in_progress);
323 void TransitionGeneratedToNative(Register destination_address,
324 Register new_exit_frame,
325 Register new_exit_through_ffi,
326 bool enter_safepoint);
327 void TransitionNativeToGenerated(bool leave_safepoint,
328 bool ignore_unwind_in_progress = false,
329 bool set_tag = true);
330
331// Register-register, register-address and address-register instructions.
332#define RR(width, name, ...) \
333 void name(Register dst, Register src) { Emit##width(dst, src, __VA_ARGS__); }
334#define RA(width, name, ...) \
335 void name(Register dst, const Address& src) { \
336 Emit##width(dst, src, __VA_ARGS__); \
337 }
338#define RAB(name, ...) \
339 void name(ByteRegister dst, const Address& src) { \
340 EmitB(dst, src, __VA_ARGS__); \
341 }
342#define AR(width, name, ...) \
343 void name(const Address& dst, Register src) { \
344 Emit##width(src, dst, __VA_ARGS__); \
345 }
346#define ARB(name, ...) \
347 void name(const Address& dst, ByteRegister src) { \
348 EmitB(src, dst, __VA_ARGS__); \
349 }
350#define REGULAR_INSTRUCTION(name, ...) \
351 RA(W, name##w, __VA_ARGS__) \
352 RA(L, name##l, __VA_ARGS__) \
353 RA(Q, name##q, __VA_ARGS__) \
354 RR(W, name##w, __VA_ARGS__) \
355 RR(L, name##l, __VA_ARGS__) \
356 RR(Q, name##q, __VA_ARGS__)
358 REGULAR_INSTRUCTION(xchg, 0x87)
359 REGULAR_INSTRUCTION(imul, 0xAF, 0x0F)
360 REGULAR_INSTRUCTION(bsf, 0xBC, 0x0F)
361 REGULAR_INSTRUCTION(bsr, 0xBD, 0x0F)
362 REGULAR_INSTRUCTION(popcnt, 0xB8, 0x0F, 0xF3)
363 REGULAR_INSTRUCTION(lzcnt, 0xBD, 0x0F, 0xF3)
364#undef REGULAR_INSTRUCTION
365 RA(Q, movsxd, 0x63)
366 RR(Q, movsxd, 0x63)
367 ARB(movb, 0x88)
368 AR(L, movl, 0x89)
369 AR(Q, movq, 0x89)
370 AR(W, movw, 0x89)
371 RAB(movb, 0x8A)
372 RA(L, movl, 0x8B)
373 RA(Q, movq, 0x8B)
374 RR(L, movl, 0x8B)
375 RA(Q, leaq, 0x8D)
376 RA(L, leal, 0x8D)
377 AR(L, cmpxchgl, 0xB1, 0x0F)
378 AR(Q, cmpxchgq, 0xB1, 0x0F)
379 RA(L, cmpxchgl, 0xB1, 0x0F)
380 RA(Q, cmpxchgq, 0xB1, 0x0F)
381 RR(L, cmpxchgl, 0xB1, 0x0F)
382 RR(Q, cmpxchgq, 0xB1, 0x0F)
383 RA(Q, movzxb, 0xB6, 0x0F)
384 RR(Q, movzxb, 0xB6, 0x0F)
385 RA(Q, movzxw, 0xB7, 0x0F)
386 RR(Q, movzxw, 0xB7, 0x0F)
387 RA(Q, movsxb, 0xBE, 0x0F)
388 RR(Q, movsxb, 0xBE, 0x0F)
389 RA(Q, movsxw, 0xBF, 0x0F)
390 RR(Q, movsxw, 0xBF, 0x0F)
391#define DECLARE_CMOV(name, code) \
392 RR(Q, cmov##name##q, 0x40 + code, 0x0F) \
393 RR(L, cmov##name##l, 0x40 + code, 0x0F) \
394 RA(Q, cmov##name##q, 0x40 + code, 0x0F) \
395 RA(L, cmov##name##l, 0x40 + code, 0x0F)
397#undef DECLARE_CMOV
398#undef AA
399#undef RA
400#undef AR
401
402#define SIMPLE(name, ...) \
403 void name() { EmitSimple(__VA_ARGS__); }
404 SIMPLE(cpuid, 0x0F, 0xA2)
405 SIMPLE(fcos, 0xD9, 0xFF)
406 SIMPLE(fincstp, 0xD9, 0xF7)
407 SIMPLE(fsin, 0xD9, 0xFE)
408 SIMPLE(lock, 0xF0)
409 SIMPLE(rep_movsb, 0xF3, 0xA4)
410 SIMPLE(rep_movsw, 0xF3, 0x66, 0xA5)
411 SIMPLE(rep_movsd, 0xF3, 0xA5)
412 SIMPLE(rep_movsq, 0xF3, 0x48, 0xA5)
413#undef SIMPLE
414// XmmRegister operations with another register or an address.
415#define XX(width, name, ...) \
416 void name(XmmRegister dst, XmmRegister src) { \
417 Emit##width(dst, src, __VA_ARGS__); \
418 }
419#define XA(width, name, ...) \
420 void name(XmmRegister dst, const Address& src) { \
421 Emit##width(dst, src, __VA_ARGS__); \
422 }
423#define AX(width, name, ...) \
424 void name(const Address& dst, XmmRegister src) { \
425 Emit##width(src, dst, __VA_ARGS__); \
426 }
427 // We could add movupd here, but movups does the same and is shorter.
428 XA(L, movups, 0x10, 0x0F);
429 XA(L, movsd, 0x10, 0x0F, 0xF2)
430 XA(L, movss, 0x10, 0x0F, 0xF3)
431 AX(L, movups, 0x11, 0x0F);
432 AX(L, movsd, 0x11, 0x0F, 0xF2)
433 AX(L, movss, 0x11, 0x0F, 0xF3)
434 XX(L, movhlps, 0x12, 0x0F)
435 XX(L, unpcklps, 0x14, 0x0F)
436 XX(L, unpcklpd, 0x14, 0x0F, 0x66)
437 XX(L, unpckhps, 0x15, 0x0F)
438 XX(L, unpckhpd, 0x15, 0x0F, 0x66)
439 XX(L, movlhps, 0x16, 0x0F)
440 XX(L, movaps, 0x28, 0x0F)
441 XX(L, comisd, 0x2F, 0x0F, 0x66)
442#define DECLARE_XMM(name, code) \
443 XX(L, name##ps, 0x50 + code, 0x0F) \
444 XA(L, name##ps, 0x50 + code, 0x0F) \
445 AX(L, name##ps, 0x50 + code, 0x0F) \
446 XX(L, name##pd, 0x50 + code, 0x0F, 0x66) \
447 XA(L, name##pd, 0x50 + code, 0x0F, 0x66) \
448 AX(L, name##pd, 0x50 + code, 0x0F, 0x66) \
449 XX(L, name##sd, 0x50 + code, 0x0F, 0xF2) \
450 XA(L, name##sd, 0x50 + code, 0x0F, 0xF2) \
451 AX(L, name##sd, 0x50 + code, 0x0F, 0xF2) \
452 XX(L, name##ss, 0x50 + code, 0x0F, 0xF3) \
453 XA(L, name##ss, 0x50 + code, 0x0F, 0xF3) \
454 AX(L, name##ss, 0x50 + code, 0x0F, 0xF3)
456#undef DECLARE_XMM
457 XX(L, cvtps2pd, 0x5A, 0x0F)
458 XX(L, cvtpd2ps, 0x5A, 0x0F, 0x66)
459 XX(L, cvtsd2ss, 0x5A, 0x0F, 0xF2)
460 XX(L, cvtss2sd, 0x5A, 0x0F, 0xF3)
461 XX(L, pxor, 0xEF, 0x0F, 0x66)
462 XX(L, subpl, 0xFA, 0x0F, 0x66)
463 XX(L, addpl, 0xFE, 0x0F, 0x66)
464#undef XX
465#undef AX
466#undef XA
467
468#define DECLARE_CMPPS(name, code) \
469 void cmpps##name(XmmRegister dst, XmmRegister src) { \
470 EmitL(dst, src, 0xC2, 0x0F); \
471 AssemblerBuffer::EnsureCapacity ensured(&buffer_); \
472 EmitUint8(code); \
473 }
475#undef DECLARE_CMPPS
476
477#define DECLARE_SIMPLE(name, opcode) \
478 void name() { EmitSimple(opcode); }
480#undef DECLARE_SIMPLE
481
482 void movl(Register dst, const Immediate& imm);
483 void movl(const Address& dst, const Immediate& imm);
484
485 void movb(const Address& dst, const Immediate& imm);
486
487 void movw(Register dst, const Address& src);
488 void movw(const Address& dst, const Immediate& imm);
489
490 void movq(Register dst, const Immediate& imm);
491 void movq(const Address& dst, const Immediate& imm);
492
493 // Destination and source are reversed for some reason.
495 EmitQ(src, dst, 0x7E, 0x0F, 0x66);
496 }
498 EmitL(src, dst, 0x7E, 0x0F, 0x66);
499 }
501 EmitL(src, dst, 0x11, 0x0F, 0xF3);
502 }
504 EmitL(src, dst, 0x11, 0x0F, 0xF2);
505 }
506
507 // Use the reversed operand order and the 0x89 bytecode instead of the
508 // obvious 0x88 encoding for this some, because it is expected by gdb64 older
509 // than 7.3.1-gg5 when disassembling a function's prologue (movq rbp, rsp)
510 // for proper unwinding of Dart frames (use --generate_gdb_symbols and -O0).
511 void movq(Register dst, Register src) { EmitQ(src, dst, 0x89); }
512
514 EmitQ(dst, src, 0x6E, 0x0F, 0x66);
515 }
516
518 EmitL(dst, src, 0x6E, 0x0F, 0x66);
519 }
521 EmitQ(dst, src, 0x2A, 0x0F, 0xF2);
522 }
524 EmitL(dst, src, 0x2A, 0x0F, 0xF2);
525 }
527 EmitQ(dst, src, 0x2C, 0x0F, 0xF2);
528 }
530 EmitL(dst, src, 0x2C, 0x0F, 0xF2);
531 }
533 EmitL(dst, src, 0x50, 0x0F, 0x66);
534 }
535 void movmskps(Register dst, XmmRegister src) { EmitL(dst, src, 0x50, 0x0F); }
537 EmitL(dst, src, 0xD7, 0x0F, 0x66);
538 }
539
540 void btl(Register dst, Register src) { EmitL(src, dst, 0xA3, 0x0F); }
541 void btq(Register dst, Register src) { EmitQ(src, dst, 0xA3, 0x0F); }
542
547
548 void set1ps(XmmRegister dst, Register tmp, const Immediate& imm);
550
554
556 kRoundToNearest = 0x0,
557 kRoundDown = 0x1,
558 kRoundUp = 0x2,
559 kRoundToZero = 0x3
560 };
562
564 const Immediate& imm,
566 void CompareImmediate(const Address& address,
567 const Immediate& imm,
570 int64_t immediate,
571 OperandSize width = kEightBytes) override {
572 return CompareImmediate(reg, Immediate(immediate), width);
573 }
574
575 void testl(Register reg, const Immediate& imm) {
576 testq(reg, Immediate(imm.value() & 0xFFFFFFFF));
577 }
578 void testb(const Address& address, const Immediate& imm);
579 void testb(const Address& address, Register reg);
580
581 void testq(Register reg, const Immediate& imm);
583 const Immediate& imm,
585
587 void AndImmediate(Register dst, int64_t value) override {
589 }
593 }
595 Register src1,
596 Register src2 = kNoRegister) override;
598 void OrImmediate(Register dst, int64_t value) {
600 }
602 void LslImmediate(Register dst, int32_t shift) {
603 shlq(dst, Immediate(shift));
604 }
605 void LslRegister(Register dst, Register shift) override;
606 void LsrImmediate(Register dst, int32_t shift) override {
607 shrq(dst, Immediate(shift));
608 }
609
611 ASSERT(shifter == RCX);
612 EmitQ(src, dst, 0xA5, 0x0F);
613 }
615 ASSERT(shifter == RCX);
616 EmitQ(src, dst, 0xAD, 0x0F);
617 }
618
619#define DECLARE_ALU(op, c) \
620 void op##w(Register dst, Register src) { EmitW(dst, src, c * 8 + 3); } \
621 void op##l(Register dst, Register src) { EmitL(dst, src, c * 8 + 3); } \
622 void op##q(Register dst, Register src) { EmitQ(dst, src, c * 8 + 3); } \
623 void op##w(Register dst, const Address& src) { EmitW(dst, src, c * 8 + 3); } \
624 void op##l(Register dst, const Address& src) { EmitL(dst, src, c * 8 + 3); } \
625 void op##q(Register dst, const Address& src) { EmitQ(dst, src, c * 8 + 3); } \
626 void op##w(const Address& dst, Register src) { EmitW(src, dst, c * 8 + 1); } \
627 void op##l(const Address& dst, Register src) { EmitL(src, dst, c * 8 + 1); } \
628 void op##q(const Address& dst, Register src) { EmitQ(src, dst, c * 8 + 1); } \
629 void op##l(Register dst, const Immediate& imm) { AluL(c, dst, imm); } \
630 void op##q(Register dst, const Immediate& imm) { \
631 AluQ(c, c * 8 + 3, dst, imm); \
632 } \
633 void op##b(const Address& dst, const Immediate& imm) { AluB(c, dst, imm); } \
634 void op##w(const Address& dst, const Immediate& imm) { AluW(c, dst, imm); } \
635 void op##l(const Address& dst, const Immediate& imm) { AluL(c, dst, imm); } \
636 void op##q(const Address& dst, const Immediate& imm) { \
637 AluQ(c, c * 8 + 3, dst, imm); \
638 }
639
641
642#undef DECLARE_ALU
643#undef ALU_OPS
644
645 void cqo();
646
647#define REGULAR_UNARY(name, opcode, modrm) \
648 void name##q(Register reg) { EmitUnaryQ(reg, opcode, modrm); } \
649 void name##l(Register reg) { EmitUnaryL(reg, opcode, modrm); } \
650 void name##q(const Address& address) { EmitUnaryQ(address, opcode, modrm); } \
651 void name##l(const Address& address) { EmitUnaryL(address, opcode, modrm); }
652 REGULAR_UNARY(not, 0xF7, 2)
653 REGULAR_UNARY(neg, 0xF7, 3)
654 REGULAR_UNARY(mul, 0xF7, 4)
655 REGULAR_UNARY(imul, 0xF7, 5)
656 REGULAR_UNARY(div, 0xF7, 6)
657 REGULAR_UNARY(idiv, 0xF7, 7)
658 REGULAR_UNARY(inc, 0xFF, 0)
659 REGULAR_UNARY(dec, 0xFF, 1)
660#undef REGULAR_UNARY
661
662 void imull(Register reg, const Immediate& imm);
663
664 void imulq(Register dst, const Immediate& imm);
666 const Immediate& imm,
669 int64_t imm,
670 OperandSize width = kEightBytes) override {
671 MulImmediate(reg, Immediate(imm), width);
672 }
673
674 void shll(Register reg, const Immediate& imm);
675 void shll(Register operand, Register shifter);
676 void shrl(Register reg, const Immediate& imm);
677 void shrl(Register operand, Register shifter);
678 void sarl(Register reg, const Immediate& imm);
679 void sarl(Register operand, Register shifter);
681
682 void shlq(Register reg, const Immediate& imm);
683 void shlq(Register operand, Register shifter);
684 void shrq(Register reg, const Immediate& imm);
685 void shrq(Register operand, Register shifter);
686 void sarq(Register reg, const Immediate& imm);
687 void sarq(Register operand, Register shifter);
689
690 void btq(Register base, int bit);
691
692 void enter(const Immediate& imm);
693
694 void fldl(const Address& src);
695 void fstpl(const Address& dst);
696
697 void ffree(intptr_t value);
698
699 // 'size' indicates size in bytes and must be in the range 1..8.
700 void nop(int size = 1);
701
702 void j(Condition condition, Label* label, JumpDistance distance = kFarJump);
703 void jmp(Register reg) { EmitUnaryL(reg, 0xFF, 4); }
704 void jmp(const Address& address) { EmitUnaryL(address, 0xFF, 4); }
706 void jmp(const ExternalLabel* label);
707 void jmp(const Code& code);
708
709 // Issue memory to memory move through a TMP register.
710 // TODO(koda): Assert that these are not used for heap objects.
711 void MoveMemoryToMemory(const Address& dst, const Address& src) {
712 movq(TMP, src);
713 movq(dst, TMP);
714 }
715
716 void Exchange(Register reg, const Address& mem) {
717 movq(TMP, mem);
718 movq(mem, reg);
719 movq(reg, TMP);
720 }
721
722 void Exchange(const Address& mem1, const Address& mem2) {
723 movq(TMP, mem1);
724 xorq(TMP, mem2);
725 xorq(mem1, TMP);
726 xorq(mem2, TMP);
727 }
728
729 // Methods for High-level operations and implemented on all architectures.
730 void Ret() { ret(); }
731
732 // Sets the return address to [value] as if there was a call.
733 // On X64 pushes [value].
735
738 void BranchIf(Condition condition,
739 Label* label,
741 j(condition, label, distance);
742 }
744 Label* label,
746 cmpq(src, Immediate(0));
747 j(ZERO, label, distance);
748 }
750 intptr_t bit_number,
751 Condition condition,
752 Label* label,
754 testq(rn, Immediate(1 << bit_number));
755 j(condition, label, distance);
756 }
757
761
763 PushRegister(r1);
764 PushRegister(r0);
765 }
767 PopRegister(r0);
768 PopRegister(r1);
769 }
770
773 }
774
775 // Methods for adding/subtracting an immediate value that may be loaded from
776 // the constant pool.
777 // TODO(koda): Assert that these are not used for heap objects.
779 const Immediate& imm,
782 int64_t value,
785 }
789 Register index,
791 int32_t disp) override {
792 if (base == kNoRegister) {
793 leaq(dest, Address(index, scale, disp));
794 } else {
795 leaq(dest, Address(base, index, scale, disp));
796 }
797 }
799 void AddImmediate(const Address& address, const Immediate& imm);
801 const Immediate& imm,
803 void SubImmediate(const Address& address, const Immediate& imm);
805
806 void Drop(intptr_t stack_elements, Register tmp = TMP);
807
808 bool constant_pool_allowed() const { return constant_pool_allowed_; }
809 void set_constant_pool_allowed(bool b) { constant_pool_allowed_ = b; }
810
811 // Unlike movq this can affect the flags or use the constant pool.
812 void LoadImmediate(Register reg, const Immediate& imm);
813 void LoadImmediate(Register reg, int64_t immediate) override {
814 LoadImmediate(reg, Immediate(immediate));
815 }
816 void LoadSImmediate(FpuRegister dst, float immediate);
817 void LoadDImmediate(FpuRegister dst, double immediate);
819
823 void LoadObject(Register dst, const Object& obj);
826 const Object& obj,
830 const ExternalLabel* label,
832 void JmpPatchable(const Code& code, Register pp);
833 void Jmp(const Code& code, Register pp = PP);
834 void J(Condition condition, const Code& code, Register pp);
836 const Code& code,
840 void Call(const Code& stub_entry,
843
844 // Emit a call that shares its object pool entries with other calls
845 // that have the same equivalence marker.
847 const Object& equivalence,
849
851
852 // Unaware of write barrier (use StoreInto* methods for storing to objects).
853 // TODO(koda): Add StackAddress/HeapAddress types to prevent misuse.
855 const Object& obj,
857 void PushObject(const Object& object);
858 void CompareObject(Register reg, const Object& object);
859
860#if defined(DART_COMPRESSED_POINTERS)
861 void LoadCompressed(Register dest, const Address& slot) override;
862#endif
863 void StoreBarrier(Register object, // Object we are storing into.
864 Register value, // Value we are storing.
865 CanBeSmi can_be_smi,
866 Register scratch) override;
867 void ArrayStoreBarrier(Register object, // Object we are storing into.
868 Register slot, // Slot into which we are storing.
869 Register value, // Value we are storing.
870 CanBeSmi can_be_smi,
871 Register scratch) override;
873
875 Register object,
876 const Address& dest,
877 const Object& value,
878 MemoryOrder memory_order = kRelaxedNonAtomic,
879 OperandSize size = kWordBytes) override;
880
881 // Stores a non-tagged value into a heap object.
883 const Address& dest,
885
886 // Stores a Smi value into a heap object field that always contains a Smi.
890 // Increments a Smi field. Leaves flags in same state as an 'addq'.
891 void IncrementCompressedSmiField(const Address& dest, int64_t increment);
892
895
896 void LockCmpxchgq(const Address& address, Register reg) {
897 lock();
898 cmpxchgq(address, reg);
899 }
900
901 void LockCmpxchgl(const Address& address, Register reg) {
902 lock();
903 cmpxchgl(address, reg);
904 }
905
906 void PushRegisters(const RegisterSet& registers);
907 void PopRegisters(const RegisterSet& registers);
908
909 void PushRegistersInOrder(std::initializer_list<Register> regs);
910
912
913 void EnterFrame(intptr_t frame_space);
915 void ReserveAlignedFrameSpace(intptr_t frame_space);
916
917 // In debug mode, generates code to verify that:
918 // FP + kExitLinkSlotFromFp == SP
919 //
920 // Triggers breakpoint otherwise.
921 // Clobbers RAX.
923
924 // For non-leaf runtime calls. For leaf runtime calls, use LeafRuntimeScope,
925 void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count);
926
927 // Call runtime function. Reserves shadow space on the stack before calling
928 // if platform ABI requires that.
929 void CallCFunction(Register reg, bool restore_rsp = false);
930 void CallCFunction(Address address, bool restore_rsp = false);
931
934
936 Register temp,
937 intptr_t low,
938 intptr_t high,
939 RangeCheckCondition condition,
940 Label* target) override;
941
942 // Loading and comparing classes of objects.
945
947 intptr_t class_id,
948 Register scratch = kNoRegister);
949
952
955 Register scratch,
956 bool can_be_null = false) override;
957
958#if defined(DART_COMPRESSED_POINTERS)
959 void ExtendNonNegativeSmi(Register dst) override {
960 // Zero-extends and is a smaller instruction to output than sign
961 // extension (movsxd).
962 orl(dst, dst);
963 }
964#endif
965
966 // CheckClassIs fused with optimistic SmiUntag.
967 // Value in the register object is untagged optimistically.
968 void SmiUntagOrCheckClass(Register object, intptr_t class_id, Label* smi);
969
970 // Misc. functionality.
971 void SmiTag(Register reg) override { OBJ(add)(reg, reg); }
972
973 void SmiUntag(Register reg) { OBJ(sar)(reg, Immediate(kSmiTagSize)); }
975 if (dst != src) {
976 OBJ(mov)(dst, src);
977 }
979 }
980
982#if !defined(DART_COMPRESSED_POINTERS)
984#else
985 // This is shorter than
986 // shlq reg, 32
987 // sraq reg, 33
989 movsxd(reg, reg);
990#endif
991 }
992
994#if !defined(DART_COMPRESSED_POINTERS)
995 if (dst != src) {
996 movq(dst, src);
997 }
999#else
1000 movsxd(dst, src);
1002#endif
1003 }
1004
1005 // Truncates upper bits.
1007
1009
1011 Label* label,
1014 j(NOT_ZERO, label, distance);
1015 }
1016
1018 Label* label,
1019 JumpDistance distance = kFarJump) override {
1021 j(ZERO, label, distance);
1022 }
1023
1024 void ArithmeticShiftRightImmediate(Register reg, intptr_t shift) override;
1026 Register reg2,
1027 intptr_t offset,
1029 Register temp,
1030 Label* equals) override;
1031
1032 void Align(int alignment, intptr_t offset);
1033 void Bind(Label* label) override;
1034 // Unconditional jump to a given label.
1036 jmp(label, distance);
1037 }
1038 // Unconditional jump to a given address in register.
1040 // Unconditional jump to a given address in memory.
1041 void Jump(const Address& address) { jmp(address); }
1042
1043 // Arch-specific LoadFromOffset to choose the right operation for [sz].
1045 const Address& address,
1046 OperandSize sz = kEightBytes) override;
1048 Register base,
1049 int32_t payload_offset,
1050 Register index,
1052 OperandSize sz = kEightBytes) override {
1053 Load(dst, FieldAddress(base, index, scale, payload_offset), sz);
1054 }
1055#if defined(DART_COMPRESSED_POINTERS)
1057 Register base,
1058 int32_t offset,
1059 Register index) override {
1062 }
1063#endif
1065 const Address& address,
1066 OperandSize sz = kEightBytes) override;
1067 void StoreZero(const Address& address, Register temp = kNoRegister) {
1068 movq(address, Immediate(0));
1069 }
1070 void LoadFromStack(Register dst, intptr_t depth);
1071 void StoreToStack(Register src, intptr_t depth);
1072 void CompareToStack(Register src, intptr_t depth);
1075 }
1078 }
1081 }
1082
1085 }
1088 }
1090 if (src != dst) {
1091 movaps(dst, src);
1092 }
1093 }
1094
1097 }
1100 }
1103 }
1105 if (src != dst) {
1106 movaps(dst, src);
1107 }
1108 }
1109
1112
1114 const Address& address,
1115 OperandSize size = kEightBytes) override {
1116 // On intel loads have load-acquire behavior (i.e. loads are not re-ordered
1117 // with other loads).
1118 Load(dst, address, size);
1119 if (FLAG_target_thread_sanitizer) {
1120 TsanLoadAcquire(address);
1121 }
1122 }
1123#if defined(DART_COMPRESSED_POINTERS)
1124 void LoadAcquireCompressed(Register dst, const Address& address) override {
1125 // On intel loads have load-acquire behavior (i.e. loads are not re-ordered
1126 // with other loads).
1127 LoadCompressed(dst, address);
1128 if (FLAG_target_thread_sanitizer) {
1129 TsanLoadAcquire(address);
1130 }
1131 }
1132#endif
1134 const Address& address,
1135 OperandSize size = kWordBytes) override {
1136 // On intel stores have store-release behavior (i.e. stores are not
1137 // re-ordered with other stores).
1138 Store(src, address, size);
1139 if (FLAG_target_thread_sanitizer) {
1140 TsanStoreRelease(address);
1141 }
1142 }
1143
1145 Address address,
1146 OperandSize size = kEightBytes) override {
1148 if (size == kFourBytes) {
1149 cmpl(value, address);
1150 } else {
1151 cmpq(value, address);
1152 }
1153 }
1154
1157
1158 // Set up a Dart frame on entry with a frame pointer and PC information to
1159 // enable easy access to the RawInstruction object of code corresponding
1160 // to this frame.
1161 // The dart frame layout is as follows:
1162 // ....
1163 // locals space <=== RSP
1164 // saved PP
1165 // code object (used to derive the RawInstruction Object of the dart code)
1166 // saved RBP <=== RBP
1167 // ret PC
1168 // .....
1169 // This code sets this up with the sequence:
1170 // pushq rbp
1171 // movq rbp, rsp
1172 // call L
1173 // L: <code to adjust saved pc if there is any intrinsification code>
1174 // ...
1175 // pushq r15
1176 // .....
1177 void EnterDartFrame(intptr_t frame_size, Register new_pp = kNoRegister);
1179
1180 // Set up a Dart frame for a function compiled for on-stack replacement.
1181 // The frame layout is a normal Dart frame, but the frame is partially set
1182 // up on entry (it is the frame of the unoptimized code).
1183 void EnterOsrFrame(intptr_t extra_size);
1184
1185 // Set up a stub frame so that the stack traversal code can easily identify
1186 // a stub frame.
1187 // The stub frame layout is as follows:
1188 // .... <=== RSP
1189 // pc (used to derive the RawInstruction Object of the stub)
1190 // saved RBP <=== RBP
1191 // ret PC
1192 // .....
1193 // This code sets this up with the sequence:
1194 // pushq rbp
1195 // movq rbp, rsp
1196 // pushq immediate(0)
1197 // .....
1200
1201 // Set up a frame for calling a C function.
1202 // Automatically save the pinned registers in Dart which are not callee-
1203 // saved in the native calling convention.
1204 // Use together with CallCFunction.
1205 void EnterCFrame(intptr_t frame_space);
1207
1211
1212 void CombineHashes(Register dst, Register other) override;
1213 void FinalizeHashForSize(intptr_t bit_size,
1214 Register dst,
1215 Register scratch = TMP) override;
1216
1217 // If allocation tracing for |cid| is enabled, will jump to |trace| label,
1218 // which will allocate in the runtime where tracing occurs.
1220 Label* trace,
1221 Register temp_reg = kNoRegister,
1222 JumpDistance distance = JumpDistance::kFarJump);
1223
1225 Label* trace,
1226 Register temp_reg = kNoRegister,
1227 JumpDistance distance = JumpDistance::kFarJump);
1228
1229 void TryAllocateObject(intptr_t cid,
1230 intptr_t instance_size,
1231 Label* failure,
1233 Register instance_reg,
1234 Register temp) override;
1235
1236 void TryAllocateArray(intptr_t cid,
1237 intptr_t instance_size,
1238 Label* failure,
1241 Register end_address,
1242 Register temp);
1243
1245#if defined(DEBUG)
1246 Label okay;
1247 cmpl(Address(top, 0), Immediate(kAllocationCanary));
1248 j(EQUAL, &okay, Assembler::kNearJump);
1249 Stop("Allocation canary");
1250 Bind(&okay);
1251#endif
1252 }
1254#if defined(DEBUG)
1256#endif
1257 }
1258
1259 // Copy [size] bytes from [src] address to [dst] address.
1260 // [size] should be a multiple of word size.
1261 // Clobbers [src], [dst], [size] and [temp] registers.
1262 // X64 requires fixed registers for memory copying:
1263 // [src] = RSI, [dst] = RDI, [size] = RCX.
1265 Register dst,
1266 Register size,
1267 Register temp = kNoRegister);
1268
1269 // This emits an PC-relative call of the form "callq *[rip+<offset>]". The
1270 // offset is not yet known and needs therefore relocation to the right place
1271 // before the code can be used.
1272 //
1273 // The necessary information for the "linker" (i.e. the relocation
1274 // information) is stored in [UntaggedCode::static_calls_target_table_]: an
1275 // entry of the form
1276 //
1277 // (Code::kPcRelativeCall & pc_offset, <target-code>, <target-function>)
1278 //
1279 // will be used during relocation to fix the offset.
1280 //
1281 // The provided [offset_into_target] will be added to calculate the final
1282 // destination. It can be used e.g. for calling into the middle of a
1283 // function.
1284 void GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target = 0);
1285
1286 // This emits an PC-relative tail call of the form "jmp *[rip+<offset>]".
1287 //
1288 // See also above for the pc-relative call.
1289 void GenerateUnRelocatedPcRelativeTailCall(intptr_t offset_into_target = 0);
1290
1291 // Debugging and bringup support.
1292 void Breakpoint() override { int3(); }
1293
1294 static bool AddressCanHoldConstantIndex(const Object& constant,
1295 bool is_external,
1296 intptr_t cid,
1297 intptr_t index_scale);
1298
1299 static Address ElementAddressForIntIndex(bool is_external,
1300 intptr_t cid,
1301 intptr_t index_scale,
1302 Register array,
1303 intptr_t index);
1304 static Address ElementAddressForRegIndex(bool is_external,
1305 intptr_t cid,
1306 intptr_t index_scale,
1307 bool index_unboxed,
1308 Register array,
1309 Register index);
1310
1312 Register field,
1313 Register scratch,
1314 bool is_shared) {
1316 scratch, compiler::FieldAddress(
1318 const intptr_t field_table_offset =
1321 LoadMemoryValue(address, THR, static_cast<int32_t>(field_table_offset));
1322 static_assert(kSmiTagShift == 1, "adjust scale factor");
1323 leaq(address, Address(address, scratch, TIMES_HALF_WORD_SIZE, 0));
1324 }
1325
1328 Register offset_in_words_as_smi) override {
1329 static_assert(kSmiTagShift == 1, "adjust scale factor");
1330 leaq(address, FieldAddress(instance, offset_in_words_as_smi, TIMES_4, 0));
1331 }
1332
1333#if defined(DART_COMPRESSED_POINTERS)
1335 Register address,
1337 Register offset_in_words_as_smi) override {
1338 static_assert(kSmiTagShift == 1, "adjust scale factor");
1339 leaq(address, FieldAddress(instance, offset_in_words_as_smi,
1341 }
1342#endif
1343
1346 int32_t offset) override {
1347 leaq(address, FieldAddress(instance, offset));
1348 }
1349
1351
1352 // On some other platforms, we draw a distinction between safe and unsafe
1353 // smis.
1354 static bool IsSafe(const Object& object) { return true; }
1355 static bool IsSafeSmi(const Object& object) { return target::IsSmi(object); }
1356
1357 void LoadWordFromPoolIndex(Register dst, intptr_t index);
1358 void StoreWordToPoolIndex(Register src, intptr_t index);
1359
1360 private:
1361 bool constant_pool_allowed_;
1362
1363 void CallCodeThroughPool(intptr_t target_code_pool_index,
1364 CodeEntryKind entry_kind);
1365
1366 bool CanLoadFromObjectPool(const Object& object) const;
1367 void LoadObjectHelper(
1368 Register dst,
1369 const Object& obj,
1370 bool is_unique,
1373
1374 void AluL(uint8_t modrm_opcode, Register dst, const Immediate& imm);
1375 void AluB(uint8_t modrm_opcode, const Address& dst, const Immediate& imm);
1376 void AluW(uint8_t modrm_opcode, const Address& dst, const Immediate& imm);
1377 void AluL(uint8_t modrm_opcode, const Address& dst, const Immediate& imm);
1378 void AluQ(uint8_t modrm_opcode,
1379 uint8_t opcode,
1380 Register dst,
1381 const Immediate& imm);
1382 void AluQ(uint8_t modrm_opcode,
1383 uint8_t opcode,
1384 const Address& dst,
1385 const Immediate& imm);
1386
1387 void EmitSimple(int opcode, int opcode2 = -1, int opcode3 = -1);
1388 void EmitUnaryQ(Register reg, int opcode, int modrm_code);
1389 void EmitUnaryL(Register reg, int opcode, int modrm_code);
1390 void EmitUnaryQ(const Address& address, int opcode, int modrm_code);
1391 void EmitUnaryL(const Address& address, int opcode, int modrm_code);
1392 // The prefixes are in reverse order due to the rules of default arguments in
1393 // C++.
1394 void EmitQ(int reg,
1395 const Address& address,
1396 int opcode,
1397 int prefix2 = -1,
1398 int prefix1 = -1);
1399 void EmitL(int reg,
1400 const Address& address,
1401 int opcode,
1402 int prefix2 = -1,
1403 int prefix1 = -1);
1404 void EmitW(Register reg,
1405 const Address& address,
1406 int opcode,
1407 int prefix2 = -1,
1408 int prefix1 = -1);
1409 void EmitQ(int dst, int src, int opcode, int prefix2 = -1, int prefix1 = -1);
1410 void EmitL(int dst, int src, int opcode, int prefix2 = -1, int prefix1 = -1);
1411 void EmitW(Register dst,
1412 Register src,
1413 int opcode,
1414 int prefix2 = -1,
1415 int prefix1 = -1);
1416 void EmitB(int reg, const Address& address, int opcode);
1417 void CmpPS(XmmRegister dst, XmmRegister src, int condition);
1418
1419 inline void EmitUint8(uint8_t value);
1420 inline void EmitInt32(int32_t value);
1421 inline void EmitUInt32(uint32_t value);
1422 inline void EmitInt64(int64_t value);
1423
1424 inline void EmitRegisterREX(Register reg,
1425 uint8_t rex,
1426 bool force_emit = false);
1427 inline void EmitOperandREX(int rm, const Operand& operand, uint8_t rex);
1428 inline void EmitRegisterOperand(int rm, int reg);
1429 inline void EmitFixup(AssemblerFixup* fixup);
1430 inline void EmitOperandSizeOverride();
1431 inline void EmitRegRegRex(int reg, int base, uint8_t rex = REX_NONE);
1432 void EmitOperand(int rm, const Operand& operand);
1433 void EmitImmediate(const Immediate& imm);
1434 void EmitComplex(int rm, const Operand& operand, const Immediate& immediate);
1435 void EmitSignExtendedInt8(int rm,
1436 const Operand& operand,
1437 const Immediate& immediate);
1438 void EmitLabel(Label* label, intptr_t instruction_size);
1439 void EmitLabelLink(Label* label);
1440 void EmitNearLabelLink(Label* label);
1441
1442 void EmitGenericShift(bool wide, int rm, Register reg, const Immediate& imm);
1443 void EmitGenericShift(bool wide, int rm, Register operand, Register shifter);
1444
1445 enum BarrierFilterMode {
1446 // Filter falls through into the barrier update code. Target label
1447 // is a "after-store" label.
1448 kJumpToNoUpdate,
1449
1450 // Filter falls through to the "after-store" code. Target label
1451 // is barrier update code label.
1452 kJumpToBarrier,
1453 };
1454
1455 void StoreIntoArrayBarrier(Register object,
1456 Register slot,
1458 CanBeSmi can_be_smi = kValueCanBeSmi);
1459
1460 // Unaware of write barrier (use StoreInto* methods for storing to objects).
1461 void MoveImmediate(const Address& dst,
1462 const Immediate& imm,
1464
1465 friend class dart::FlowGraphCompiler;
1466 std::function<void(Register reg)> generate_invoke_write_barrier_wrapper_;
1467 std::function<void()> generate_invoke_array_write_barrier_;
1468
1469 DISALLOW_ALLOCATION();
1470 DISALLOW_COPY_AND_ASSIGN(Assembler);
1471};
1472
1473inline void Assembler::EmitUint8(uint8_t value) {
1474 buffer_.Emit<uint8_t>(value);
1475}
1476
1477inline void Assembler::EmitInt32(int32_t value) {
1478 buffer_.Emit<int32_t>(value);
1479}
1480
1481inline void Assembler::EmitUInt32(uint32_t value) {
1482 buffer_.Emit<uint32_t>(value);
1483}
1484
1485inline void Assembler::EmitInt64(int64_t value) {
1486 buffer_.Emit<int64_t>(value);
1487}
1488
1489inline void Assembler::EmitRegisterREX(Register reg, uint8_t rex, bool force) {
1490 ASSERT(reg != kNoRegister && reg <= R15);
1491 ASSERT(rex == REX_NONE || rex == REX_W);
1492 rex |= (reg > 7 ? REX_B : REX_NONE);
1493 if (rex != REX_NONE || force) EmitUint8(REX_PREFIX | rex);
1494}
1495
1496inline void Assembler::EmitOperandREX(int rm,
1497 const Operand& operand,
1498 uint8_t rex) {
1499 rex |= (rm > 7 ? REX_R : REX_NONE) | operand.rex();
1500 if (rex != REX_NONE) EmitUint8(REX_PREFIX | rex);
1501}
1502
1503inline void Assembler::EmitRegRegRex(int reg, int base, uint8_t rex) {
1504 ASSERT(reg != kNoRegister && reg <= R15);
1505 ASSERT(base != kNoRegister && base <= R15);
1506 ASSERT(rex == REX_NONE || rex == REX_W);
1507 if (reg > 7) rex |= REX_R;
1508 if (base > 7) rex |= REX_B;
1509 if (rex != REX_NONE) EmitUint8(REX_PREFIX | rex);
1510}
1511
1512inline void Assembler::EmitFixup(AssemblerFixup* fixup) {
1513 buffer_.EmitFixup(fixup);
1514}
1515
1516inline void Assembler::EmitOperandSizeOverride() {
1517 EmitUint8(0x66);
1518}
1519
1520} // namespace compiler
1521} // namespace dart
1522
1523#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_X64_H_
int count
Definition: FontMgrTest.cpp:50
bool equals(SkDrawable *a, SkDrawable *b)
#define DECLARE_ALU(op, c)
#define RAB(name,...)
#define RR(width, name,...)
#define XX(width, name,...)
#define DECLARE_SIMPLE(name, opcode)
#define SIMPLE(name,...)
#define REGULAR_INSTRUCTION(name,...)
#define AR(width, name,...)
#define OBJ(op)
#define DECLARE_XMM(name, code)
#define DECLARE_CMPPS(name, code)
#define DECLARE_CMOV(name, code)
#define ARB(name,...)
#define REGULAR_UNARY(name, opcode, modrm)
static bool IsInt(intptr_t N, T value)
Definition: utils.h:313
static bool IsUint(intptr_t N, T value)
Definition: utils.h:328
static Address AddressBaseImm32(Register base, int32_t disp)
Address(Register base, Register index, ScaleFactor scale, int32_t disp)
Address(Register index, ScaleFactor scale, int32_t disp)
static Address AddressRIPRelative(int32_t disp)
Address(Register base, Register r)
Address & operator=(const Address &other)
static Address AddressBaseImm32(Register base, Register r)
Address(Register base, int32_t disp)
Address(Register base, Register index, ScaleFactor scale, Register r)
Address(Register index, ScaleFactor scale, Register r)
Address(const Address &other)
void LoadCompressedSmi(Register dst, const Address &address)
void Stop(const char *message)
void LoadCompressed(Register dst, const Address &address)
ObjectPoolBuilder & object_pool_builder()
void LoadCompressedFieldAddressForRegOffset(Register address, Register instance, Register offset_in_words_as_smi)
void LoadIndexedCompressed(Register dst, Register base, int32_t offset, Register index)
void ExtendNonNegativeSmi(Register dst)
void LoadAcquireCompressed(Register dst, const Address &address)
void EmitFixup(AssemblerFixup *fixup)
void PushRegistersInOrder(std::initializer_list< Register > regs)
void PopRegisterPair(Register r0, Register r1)
void MoveUnboxedDouble(FpuRegister dst, FpuRegister src)
void jmp(Register reg)
void LoadClassId(Register result, Register object)
void StoreObject(const Address &dst, const Object &obj, OperandSize size=kWordBytes)
void SmiUntagOrCheckClass(Register object, intptr_t class_id, Label *smi)
void sarq(Register reg, const Immediate &imm)
void LoadPoolPointer(Register pp=PP)
void Call(Address target)
void unpcklpd(XmmRegister dst, XmmRegister src)
static Address ElementAddressForRegIndex(bool is_external, intptr_t cid, intptr_t index_scale, bool index_unboxed, Register array, Register index)
bool CanLoadFromObjectPool(const Object &object) const
void CompareClassId(Register object, intptr_t class_id, Register scratch=kNoRegister)
void call(Label *label)
void PushRegisters(const RegisterSet &registers)
void StoreUnboxedDouble(FpuRegister src, Register base, int32_t offset)
void Jmp(const Code &code, Register pp=PP)
void CompareObject(Register reg, const Object &object)
void ZeroInitCompressedSmiField(const Address &dest)
void movsd(XmmRegister dst, XmmRegister src)
void MoveUnboxedSimd128(FpuRegister dst, FpuRegister src)
void LoadUniqueObject(Register dst, const Object &obj, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
void abspd(XmmRegister dst, XmmRegister src)
void LoadObject(Register dst, const Object &obj)
void CombineHashes(Register dst, Register other) override
void BranchIfSmi(Register reg, Label *label, JumpDistance distance=kFarJump) override
void LoadTaggedClassIdMayBeSmi(Register result, Register object)
void notps(XmmRegister dst, XmmRegister src)
void movups(XmmRegister dst, const Address &src)
void AddImmediate(const Address &address, const Immediate &imm)
void TsanLoadAcquire(Register addr)
void Load(Register reg, const Address &address, OperandSize type, Condition cond)
void BranchIfZero(Register src, Label *label, JumpDistance distance=kFarJump)
void unpckhpd(XmmRegister dst, XmmRegister src)
void movq(const Address &dst, XmmRegister src)
void LoadQImmediate(FpuRegister dst, simd128_value_t immediate)
void PushRegisterPair(Register r0, Register r1)
void StoreZero(const Address &address, Register temp=kNoRegister)
void shlq(Register reg, const Immediate &imm)
void Call(const Code &stub_entry, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
void pxor(XmmRegister dst, XmmRegister src)
Assembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level=0)
void jmp(const ExternalLabel *label)
void movlhps(XmmRegister dst, XmmRegister src)
void ZeroInitSmiField(const Address &dest)
void movq(Register dst, Register src)
void LoadImmediate(Register reg, int64_t immediate) override
void LoadDImmediate(FpuRegister dst, double immediate)
void movl(const Address &dst, const Immediate &imm)
void movd(XmmRegister dst, Register src)
void movl(Register dst, const Immediate &src)
void DoubleNegate(XmmRegister dst, XmmRegister src)
void j(Condition condition, Label *label, JumpDistance distance=kFarJump)
void SmiUntag(Register reg)
void absps(XmmRegister dst, XmmRegister src)
void CompareImmediate(Register reg, const Immediate &imm, OperandSize width=kEightBytes)
void Jump(Label *label, JumpDistance distance=kFarJump)
void AddImmediate(Register reg, const Immediate &imm, OperandSize width=kEightBytes)
void shldq(Register dst, Register src, const Immediate &imm)
void JmpPatchable(const Code &code, Register pp)
void LoadFromStack(Register dst, intptr_t depth)
void movmskpd(Register dst, XmmRegister src)
void cvtsi2sdq(XmmRegister dst, Register src)
void leal(Register dst, const Address &src)
void shufps(XmmRegister dst, XmmRegister src, const Immediate &mask)
void shll(Register reg, const Immediate &imm)
void movb(const Address &dst, const Immediate &imm)
void shufpd(XmmRegister dst, XmmRegister src, const Immediate &mask)
void ExtendValue(Register dst, Register src, OperandSize sz) override
void CompareImmediate(Register rn, int32_t value, Condition cond)
void LoadIndexedPayload(Register dst, Register base, int32_t payload_offset, Register index, ScaleFactor scale, OperandSize sz=kEightBytes) override
void PushRegister(Register r)
void LoadMemoryValue(Register dst, Register base, int32_t offset)
void MulImmediate(Register reg, int64_t imm, OperandSize width=kEightBytes) override
void pushq(Register reg)
void CompareImmediate(const Address &address, const Immediate &imm, OperandSize width=kEightBytes)
void b(Label *label, Condition cond=AL)
void imulq(Register dst, const Immediate &imm)
void cvtpd2ps(XmmRegister dst, XmmRegister src)
void set1ps(XmmRegister dst, Register tmp, const Immediate &imm)
void roundsd(XmmRegister dst, XmmRegister src, RoundingMode mode)
void set_constant_pool_allowed(bool b)
void sarq(Register operand, Register shifter)
void cvtps2pd(XmmRegister dst, XmmRegister src)
void subpl(XmmRegister dst, XmmRegister src)
void movw(Register dst, const Address &src)
void zerowps(XmmRegister dst, XmmRegister src)
void OrImmediate(Register dst, int64_t value)
void PushImmediate(const Immediate &imm)
void movmskps(Register dst, XmmRegister src)
void jmp(Label *label, JumpDistance distance=kFarJump)
void TsanStoreRelease(Address addr)
void Align(int alignment, intptr_t offset)
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_external, intptr_t cid, intptr_t index_scale)
void StoreObjectIntoObjectNoBarrier(Register object, const Address &dest, const Object &value, MemoryOrder memory_order=kRelaxedNonAtomic, OperandSize size=kWordBytes) override
void MaybeTraceAllocation(Register cid, Label *trace, Register temp_reg=kNoRegister, JumpDistance distance=JumpDistance::kFarJump)
void EnterDartFrame(intptr_t frame_size, Register new_pp=kNoRegister)
void LoadClassById(Register result, Register class_id)
void cvtss2sd(XmmRegister dst, XmmRegister src)
void movzxw(Register dst, Register src)
void PushValueAtOffset(Register base, int32_t offset)
void LoadIsolate(Register dst)
void nop(int size=1)
void MaybeTraceAllocation(intptr_t cid, Label *trace, Register temp_reg=kNoRegister, JumpDistance distance=JumpDistance::kFarJump)
void imull(Register reg, const Immediate &imm)
void movsd(XmmRegister dst, const Address &src)
void PushImmediate(int64_t value)
void AndImmediate(Register dst, int64_t value) override
void OrImmediate(Register dst, const Immediate &imm)
void CheckAllocationCanary(Register top)
void XorImmediate(Register dst, const Immediate &imm)
void shll(Register operand, Register shifter)
void mov(Register rd, Operand o, Condition cond=AL)
void SetReturnAddress(Register value)
void GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target=0)
void CallCFunction(Address address, bool restore_rsp=false)
void LoadInt32FromBoxOrSmi(Register result, Register value) override
void Exchange(Register reg, const Address &mem)
void BranchIf(Condition condition, Label *label, JumpDistance distance=kFarJump)
void EnterOsrFrame(intptr_t extra_size)
void comisd(XmmRegister a, XmmRegister b)
void TsanLoadAcquire(Address addr)
void btl(Register dst, Register src)
void PushObject(const Object &object)
void AndRegisters(Register dst, Register src1, Register src2=kNoRegister) override
void testb(const Address &address, const Immediate &imm)
void sarl(Register reg, const Immediate &imm)
void jmp(const Code &code)
void Exchange(const Address &mem1, const Address &mem2)
void cmp(Register rn, Operand o, Condition cond=AL)
void LoadUnboxedSimd128(FpuRegister dst, Register base, int32_t offset)
XA(L, movups, 0x10, 0x0F)
void popq(Register reg)
void StoreMemoryValue(Register src, Register base, int32_t offset)
void TransitionGeneratedToNative(Register destination_address, Register new_exit_frame, Register new_exit_through_ffi, bool enter_safepoint)
void CompareImmediate(Register reg, int64_t immediate, OperandSize width=kEightBytes) override
void pushq(const Immediate &imm)
void StoreToStack(Register src, intptr_t depth)
void TestImmediate(Register dst, const Immediate &imm, OperandSize width=kEightBytes)
void J(Condition condition, const Code &code, Register pp)
void AddScaled(Register dest, Register base, Register index, ScaleFactor scale, int32_t disp) override
void CallWithEquivalence(const Code &code, const Object &equivalence, CodeEntryKind entry_kind=CodeEntryKind::kNormal)
void movw(const Address &dst, const Immediate &imm)
void LoadStaticFieldAddress(Register address, Register field, Register scratch, bool is_shared)
void popq(const Address &address)
void SmiUntagAndSignExtend(Register dst, Register src)
void SubRegisters(Register dest, Register src)
void PopRegisters(const RegisterSet &registers)
void StoreInternalPointer(Register object, const Address &dest, Register value)
void CompareRegisters(Register a, Register b)
void MulImmediate(Register reg, int32_t imm, OperandSize width=kFourBytes) override
void AndImmediate(Register dst, const Immediate &imm)
void cvttsd2sil(Register dst, XmmRegister src)
void AddRegisters(Register dest, Register src)
void testq(Register reg, const Immediate &imm)
void Bind(Label *label) override
void ReserveAlignedFrameSpace(intptr_t frame_space)
void negateps(XmmRegister dst, XmmRegister src)
void btq(Register base, int bit)
void AddImmediate(Register dest, Register src, int64_t value)
void SubImmediate(const Address &address, const Immediate &imm)
void LockCmpxchgq(const Address &address, Register reg)
void movq(const Address &dst, const Immediate &imm)
void RangeCheck(Register value, Register temp, intptr_t low, intptr_t high, RangeCheckCondition condition, Label *target) override
void enter(const Immediate &imm)
bool constant_pool_allowed() const
void setcc(Condition condition, ByteRegister dst)
void PopRegister(Register r)
void CallCFunction(Register reg, bool restore_rsp=false)
void AddImmediate(Register reg, int64_t value, OperandSize width=kEightBytes)
void ArrayStoreBarrier(Register object, Register slot, Register value, CanBeSmi can_be_smi, Register scratch) override
void testb(const Address &address, Register reg)
void btq(Register dst, Register src)
void LslRegister(Register dst, Register shift) override
void add(Register rd, Register rn, Operand o, Condition cond=AL)
void shrl(Register operand, Register shifter)
void CompareWithMemoryValue(Register value, Address address, OperandSize size=kEightBytes) override
void shrdq(Register dst, Register src, Register shifter)
void mul(Register rd, Register rn, Register rm, Condition cond=AL)
void TryAllocateArray(intptr_t cid, intptr_t instance_size, Label *failure, JumpDistance distance, Register instance, Register end_address, Register temp)
void WriteAllocationCanary(Register top)
void addpl(XmmRegister dst, XmmRegister src)
void Load(Register dst, const Address &address, OperandSize sz=kEightBytes) override
void CallRuntime(const RuntimeEntry &entry, intptr_t argument_count)
void ExtractInstanceSizeFromTags(Register result, Register tags)
void LockCmpxchgl(const Address &address, Register reg)
void CopyMemoryWords(Register src, Register dst, Register size, Register temp=kNoRegister)
void CompareToStack(Register src, intptr_t depth)
void LoadImmediate(Register rd, Immediate value, Condition cond=AL)
void shrq(Register operand, Register shifter)
void MulImmediate(Register reg, const Immediate &imm, OperandSize width=kEightBytes)
static Address VMTagAddress()
void AndImmediate(Register rd, Register rs, int32_t imm, Condition cond=AL)
void cvtsd2ss(XmmRegister dst, XmmRegister src)
void CompareObjectRegisters(Register a, Register b)
void Jump(const Address &address)
static bool IsSafeSmi(const Object &object)
void movsxb(Register dst, ByteRegister src)
void TransitionNativeToGenerated(bool leave_safepoint, bool ignore_unwind_in_progress=false, bool set_tag=true)
void SmiUntagAndSignExtend(Register reg)
void LoadAcquire(Register dst, const Address &address, OperandSize size=kEightBytes) override
void shldq(Register dst, Register src, Register shifter)
void cvtsi2sdl(XmmRegister dst, Register src)
void EnterCFrame(intptr_t frame_space)
void StoreBarrier(Register object, Register value, CanBeSmi can_be_smi, Register scratch) override
void call(const Address &address)
void TryAllocateObject(intptr_t cid, intptr_t instance_size, Label *failure, JumpDistance distance, Register instance_reg, Register temp) override
void shlq(Register operand, Register shifter)
void LoadSImmediate(FpuRegister dst, float immediate)
void VerifyStoreNeedsNoWriteBarrier(Register object, Register value) override
void IncrementCompressedSmiField(const Address &dest, int64_t increment)
void cvttsd2siq(Register dst, XmmRegister src)
void CompareWords(Register reg1, Register reg2, intptr_t offset, Register count, Register temp, Label *equals) override
void StoreWordToPoolIndex(Register src, intptr_t index)
void pushq(const Address &address)
void shldl(Register dst, Register src, const Immediate &imm)
void testl(Register reg, const Immediate &imm)
void movl(Register dst, const Immediate &imm)
void pmovmskb(Register dst, XmmRegister src)
void movhlps(XmmRegister dst, XmmRegister src)
void movb(Register dst, const Address &src)
void BranchIfNotSmi(Register reg, Label *label, JumpDistance distance=kFarJump)
void GenerateUnRelocatedPcRelativeTailCall(intptr_t offset_into_target=0)
void negatepd(XmmRegister dst, XmmRegister src)
void LoadInt64FromBoxOrSmi(Register result, Register value) override
void Store(Register src, const Address &address, OperandSize sz=kEightBytes) override
void SubImmediate(Register reg, const Immediate &imm, OperandSize width=kEightBytes)
void ExitFullSafepoint(bool ignore_unwind_in_progress)
void LslImmediate(Register dst, int32_t shift)
void EnterFrame(intptr_t frame_space)
void shrq(Register reg, const Immediate &imm)
static bool IsSafe(const Object &object)
void jmp(const Address &address)
void BranchIfBit(Register rn, intptr_t bit_number, Condition condition, Label *label, JumpDistance distance=kFarJump)
void Drop(intptr_t stack_elements, Register tmp=TMP)
void OrImmediate(Register rd, Register rs, int32_t imm, Condition cond=AL)
void StoreIntoSmiField(const Address &dest, Register value)
void Jump(Register target)
void call(const ExternalLabel *label)
void CallPatchable(const Code &code, CodeEntryKind entry_kind=CodeEntryKind::kNormal, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
void LoadClassIdMayBeSmi(Register result, Register object)
void sarl(Register operand, Register shifter)
void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset)
void shrl(Register reg, const Immediate &imm)
void movss(XmmRegister dst, XmmRegister src)
void LoadIsolateGroup(Register dst)
void LoadFieldAddressForRegOffset(Register address, Register instance, Register offset_in_words_as_smi) override
void AndImmediate(Register dst, Register src, int64_t value)
void BranchOnMonomorphicCheckedEntryJIT(Label *label)
void movl(Register dst, XmmRegister src)
void Store(Register reg, const Address &address, OperandSize type, Condition cond)
void movzxb(Register dst, ByteRegister src)
void movss(XmmRegister dst, const Address &src)
void movq(Register dst, XmmRegister src)
void cmpxchgl(const Address &address, Register reg)
void MoveRegister(Register rd, Register rm, Condition cond)
void Breakpoint() override
void TsanStoreRelease(Register addr)
AX(L, movups, 0x11, 0x0F)
void LoadWordFromPoolIndex(Register dst, intptr_t index)
void DoubleAbs(XmmRegister dst, XmmRegister src)
void LoadFieldAddressForOffset(Register address, Register instance, int32_t offset) override
void neg(Register rd, Register rm)
void StoreUnboxedSimd128(FpuRegister dst, Register base, int32_t offset)
void ExtractClassIdFromTags(Register result, Register tags)
void movq(XmmRegister dst, Register src)
void fldl(const Address &src)
void unpckhps(XmmRegister dst, XmmRegister src)
void movq(Register dst, const Immediate &imm)
void FinalizeHashForSize(intptr_t bit_size, Register dst, Register scratch=TMP) override
void SmiUntag(Register dst, Register src)
void EnsureHasClassIdInDEBUG(intptr_t cid, Register src, Register scratch, bool can_be_null=false) override
void call(Register reg)
void LoadDispatchTable(Register dst)
void AddImmediate(Register rd, int32_t value, Condition cond=AL)
void LsrImmediate(Register dst, int32_t shift) override
void MoveMemoryToMemory(const Address &dst, const Address &src)
void LoadNativeEntry(Register dst, const ExternalLabel *label, ObjectPoolBuilderEntry::Patchability patchable)
static Address ElementAddressForIntIndex(bool is_external, intptr_t cid, intptr_t index_scale, Register array, intptr_t index)
void LoadCompressedMemoryValue(Register dst, Register base, int32_t offset)
void movsxw(Register dst, Register src)
void StoreRelease(Register src, const Address &address, OperandSize size=kWordBytes) override
void LoadImmediate(Register reg, const Immediate &imm)
void fstpl(const Address &dst)
void SmiTag(Register reg) override
void LoadUnboxedSingle(FpuRegister dst, Register base, int32_t offset)
void unpcklps(XmmRegister dst, XmmRegister src)
void ffree(intptr_t value)
void ArithmeticShiftRightImmediate(Register reg, intptr_t shift) override
void movaps(XmmRegister dst, XmmRegister src)
FieldAddress & operator=(const FieldAddress &other)
FieldAddress(Register base, Register r)
FieldAddress(Register base, Register index, ScaleFactor scale, Register r)
FieldAddress(const FieldAddress &other)
FieldAddress(Register base, Register index, ScaleFactor scale, int32_t disp)
FieldAddress(Register base, int32_t disp)
Immediate(const Immediate &other)
Definition: assembler_x64.h:38
Immediate(int64_t value)
Definition: assembler_x64.h:36
void div(Register rd, Register rs1, Register rs2)
bool Equals(const Operand &other) const
void SetSIB(ScaleFactor scale, Register index, Register base)
uint8_t mod() const
Definition: assembler_x64.h:61
Register base() const
Definition: assembler_x64.h:77
uint8_t rex() const
Definition: assembler_x64.h:59
int32_t disp32() const
Definition: assembler_x64.h:87
Register index() const
Definition: assembler_x64.h:72
Operand(const Operand &other)
Definition: assembler_x64.h:92
void SetDisp8(int8_t disp)
Operand & operator=(const Operand &other)
Definition: assembler_x64.h:97
void SetModRM(int mod, Register rm)
Register rm() const
Definition: assembler_x64.h:63
void SetDisp32(int32_t disp)
int8_t disp8() const
Definition: assembler_x64.h:82
ScaleFactor scale() const
Definition: assembler_x64.h:68
static word host_offset_or_field_id_offset()
static word shared_field_table_values_offset()
static word field_table_values_offset()
#define XMM_ALU_CODES(F)
Definition: constants_x86.h:95
#define X86_CONDITIONAL_SUFFIXES(F)
#define XMM_CONDITIONAL_CODES(F)
#define X86_ALU_CODES(F)
Definition: constants_x86.h:85
#define X86_ZERO_OPERAND_1_BYTE_INSTRUCTIONS(F)
Definition: constants_x86.h:65
#define ASSERT(E)
VkInstance instance
Definition: main.cc:48
struct MyStruct a[10]
uint8_t value
GAsyncResult * result
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
int argument_count
Definition: fuchsia.cc:52
bool IsSmi(int64_t v)
Definition: runtime_api.cc:31
constexpr OperandSize kWordBytes
Definition: dart_vm.cc:33
@ TIMES_16
@ TIMES_COMPRESSED_HALF_WORD_SIZE
@ TIMES_COMPRESSED_WORD_SIZE
const Register THR
static constexpr intptr_t kAllocationCanary
Definition: globals.h:181
@ NOT_ZERO
@ kNoRegister
Definition: constants_arm.h:99
const Register TMP
const intptr_t cid
@ REX_PREFIX
@ REX_NONE
const Register PP
@ kSmiTagSize
@ kHeapObjectTag
@ kSmiTagMask
@ kSmiTagShift
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
Definition: switches.h:228
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
dst
Definition: cp.py:12
dest
Definition: zip.py:79
int32_t width
const Scalar scale
SeparatedVector2 offset