Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
assembler_arm64.h
Go to the documentation of this file.
1// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM64_H_
6#define RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM64_H_
7
8#if defined(DART_PRECOMPILED_RUNTIME)
9#error "AOT runtime should not use compiler sources (including header files)"
10#endif // defined(DART_PRECOMPILED_RUNTIME)
11
12#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_H_
13#error Do not include assembler_arm64.h directly; use assembler.h instead.
14#endif
15
16#include <functional>
17
18#include "platform/assert.h"
19#include "platform/utils.h"
20#include "vm/class_id.h"
22#include "vm/constants.h"
23#include "vm/hash_map.h"
24#include "vm/simulator.h"
25
26namespace dart {
27
28// Forward declarations.
29class FlowGraphCompiler;
30class RuntimeEntry;
31class RegisterSet;
32
33namespace compiler {
34
35static inline int Log2OperandSizeBytes(OperandSize os) {
36 switch (os) {
37 case kByte:
38 case kUnsignedByte:
39 return 0;
40 case kTwoBytes:
42 return 1;
43 case kFourBytes:
45 case kSWord:
46 return 2;
47 case kEightBytes:
48 case kDWord:
49 return 3;
50 case kQWord:
51 return 4;
52 default:
54 break;
55 }
56 return -1;
57}
58
59static inline bool IsSignedOperand(OperandSize os) {
60 switch (os) {
61 case kByte:
62 case kTwoBytes:
63 case kFourBytes:
64 return true;
65 case kUnsignedByte:
68 case kEightBytes:
69 case kSWord:
70 case kDWord:
71 case kQWord:
72 return false;
73 default:
75 break;
76 }
77 return false;
78}
79class Immediate : public ValueObject {
80 public:
81 explicit Immediate(int64_t value) : value_(value) {}
82
83 Immediate(const Immediate& other) : ValueObject(), value_(other.value_) {}
84 Immediate& operator=(const Immediate& other) {
85 value_ = other.value_;
86 return *this;
87 }
88
89 private:
90 int64_t value_;
91
92 int64_t value() const { return value_; }
93
94 friend class Assembler;
95};
96
97class Arm64Encode : public AllStatic {
98 public:
99 static inline uint32_t Rd(Register rd) {
100 ASSERT(rd <= ZR);
101 return static_cast<uint32_t>(ConcreteRegister(rd)) << kRdShift;
102 }
103
104 static inline uint32_t Rm(Register rm) {
105 ASSERT(rm <= ZR);
106 return static_cast<uint32_t>(ConcreteRegister(rm)) << kRmShift;
107 }
108
109 static inline uint32_t Rn(Register rn) {
110 ASSERT(rn <= ZR);
111 return static_cast<uint32_t>(ConcreteRegister(rn)) << kRnShift;
112 }
113
114 static inline uint32_t Ra(Register ra) {
115 ASSERT(ra <= ZR);
116 return static_cast<uint32_t>(ConcreteRegister(ra)) << kRaShift;
117 }
118
119 static inline uint32_t Rs(Register rs) {
120 ASSERT(rs <= ZR);
121 return static_cast<uint32_t>(ConcreteRegister(rs)) << kRsShift;
122 }
123
124 static inline uint32_t Rt(Register rt) {
125 ASSERT(rt <= ZR);
126 return static_cast<uint32_t>(ConcreteRegister(rt)) << kRtShift;
127 }
128
129 static inline uint32_t Rt2(Register rt2) {
130 ASSERT(rt2 <= ZR);
131 return static_cast<uint32_t>(ConcreteRegister(rt2)) << kRt2Shift;
132 }
133};
134
135class Address : public ValueObject {
136 public:
137 Address(const Address& other)
138 : ValueObject(),
139 type_(other.type_),
140 base_(other.base_),
141 offset_(other.offset_) {}
142
143 Address& operator=(const Address& other) {
144 type_ = other.type_;
145 base_ = other.base_;
146 offset_ = other.offset_;
147 return *this;
148 }
149
161
162 // If we are doing pre-/post-indexing, and the base and result registers are
163 // the same, then the result is unpredictable. This kind of instruction is
164 // actually illegal on some microarchitectures.
166 if (type() == PreIndex || type() == PostIndex || type() == PairPreIndex ||
167 type() == PairPostIndex) {
168 return ConcreteRegister(base()) != ConcreteRegister(r);
169 }
170 return true;
171 }
172
173 // Offset is in bytes.
174 explicit Address(Register rn, int32_t offset = 0, AddressType at = Offset) {
175 ASSERT((rn != kNoRegister) && (rn != R31) && (rn != ZR));
176 type_ = at;
177 base_ = rn;
178 offset_ = offset;
179 }
180
181 // This addressing mode does not exist.
182 Address(Register rn, Register offset, AddressType at) = delete;
183
184 static bool CanHoldOffset(int32_t offset,
185 AddressType at = Offset,
187 if (at == Offset) {
188 // Offset fits in 12 bit unsigned and has right alignment for sz,
189 // or fits in 9 bit signed offset with no alignment restriction.
190 const int32_t scale = Log2OperandSizeBytes(sz);
191 return (Utils::IsUint(12 + scale, offset) &&
192 (offset == ((offset >> scale) << scale))) ||
193 (Utils::IsInt(9, offset));
194 } else if (at == PCOffset) {
195 return Utils::IsInt(21, offset) && (offset == ((offset >> 2) << 2));
196 } else if ((at == PreIndex) || (at == PostIndex)) {
197 return Utils::IsInt(9, offset);
198 } else {
199 ASSERT((at == PairOffset) || (at == PairPreIndex) ||
200 (at == PairPostIndex));
201 const int32_t scale = Log2OperandSizeBytes(sz);
202 return (Utils::IsInt(7 + scale, offset) &&
203 (static_cast<uint32_t>(offset) ==
204 ((static_cast<uint32_t>(offset) >> scale) << scale)));
205 }
206 }
207
208 // PC-relative load address.
209 static Address PC(int32_t pc_off) {
210 ASSERT(CanHoldOffset(pc_off, PCOffset));
211 Address addr;
212 addr.base_ = kNoRegister;
213 addr.type_ = PCOffset;
214 addr.offset_ = pc_off;
215 return addr;
216 }
217
219 int32_t offset = 0,
220 AddressType at = PairOffset) {
221 return Address(rn, offset, at);
222 }
223
224 // This addressing mode does not exist.
225 static Address PC(Register r) = delete;
226
231
232 // Base register rn with offset rm. rm is sign-extended according to ext.
233 // If ext is UXTX, rm may be optionally scaled by the
234 // Log2OperandSize (specified by the instruction).
236 Register rm,
237 Extend ext = UXTX,
239 ASSERT((rn != R31) && (rn != ZR));
240 ASSERT((rm != R31) && (rm != CSP));
241 // Can only scale when ext = UXTX.
242 ASSERT((scale != Scaled) || (ext == UXTX));
243 ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
244 type_ = Reg;
245 base_ = rn;
246 // Use offset_ to store pre-encoded scale, extend and rm.
247 offset_ = ((scale == Scaled) ? B12 : 0) | Arm64Encode::Rm(rm) |
248 (static_cast<int32_t>(ext) << kExtendTypeShift);
249 }
250
252
253 private:
254 uint32_t encoding(OperandSize sz) const {
255 const int32_t offset = offset_;
256 const int32_t scale = Log2OperandSizeBytes(sz);
257 ASSERT((type_ == Reg) || CanHoldOffset(offset, type_, sz));
258 switch (type_) {
259 case Offset:
260 if (Utils::IsUint(12 + scale, offset) &&
261 (offset == ((offset >> scale) << scale))) {
262 return B24 | ((offset >> scale) << kImm12Shift) |
263 Arm64Encode::Rn(base_);
264 } else if (Utils::IsInt(9, offset)) {
265 return ((offset & 0x1ff) << kImm9Shift) | Arm64Encode::Rn(base_);
266 } else {
267 FATAL("Offset %d is out of range\n", offset);
268 }
269 case PreIndex:
270 case PostIndex: {
271 ASSERT(Utils::IsInt(9, offset));
272 int32_t idx = (type_ == PostIndex) ? B10 : (B11 | B10);
273 return idx | ((offset & 0x1ff) << kImm9Shift) | Arm64Encode::Rn(base_);
274 }
275 case PairOffset:
276 case PairPreIndex:
277 case PairPostIndex: {
278 ASSERT(Utils::IsInt(7 + scale, offset) &&
279 (static_cast<uint32_t>(offset) ==
280 ((static_cast<uint32_t>(offset) >> scale) << scale)));
281 int32_t idx = 0;
282 switch (type_) {
283 case PairPostIndex:
284 idx = B23;
285 break;
286 case PairPreIndex:
287 idx = B24 | B23;
288 break;
289 case PairOffset:
290 idx = B24;
291 break;
292 default:
293 UNREACHABLE();
294 break;
295 }
296 return idx |
297 ((static_cast<uint32_t>(offset >> scale) << kImm7Shift) &
298 kImm7Mask) |
299 Arm64Encode::Rn(base_);
300 }
301 case PCOffset:
302 return (((offset >> 2) << kImm19Shift) & kImm19Mask);
303 case Reg:
304 // Offset contains pre-encoded scale, extend and rm.
305 return B21 | B11 | Arm64Encode::Rn(base_) | offset;
306 case Unknown:
307 UNREACHABLE();
308 }
309 return 0;
310 }
311
312 AddressType type() const { return type_; }
313 Register base() const { return base_; }
314 int32_t offset() const { return offset_; }
315
316 Address() : type_(Unknown), base_(kNoRegister), offset_(0) {}
317
318 AddressType type_;
319 Register base_;
320 int32_t offset_;
321
322 friend class Assembler;
323};
324
325class FieldAddress : public Address {
326 public:
327 static bool CanHoldOffset(int32_t offset,
328 AddressType at = Offset,
331 }
332
334 : Address(base, disp - kHeapObjectTag) {}
335
336 // This addressing mode does not exist.
338
339 FieldAddress(const FieldAddress& other) : Address(other) {}
340
342 Address::operator=(other);
343 return *this;
344 }
345};
346
347class Operand : public ValueObject {
348 public:
356
357 // Data-processing operand - Uninitialized.
358 Operand() : encoding_(-1), type_(Unknown) {}
359
360 // Data-processing operands - Copy constructor.
361 Operand(const Operand& other)
362 : ValueObject(), encoding_(other.encoding_), type_(other.type_) {}
363
364 Operand& operator=(const Operand& other) {
365 type_ = other.type_;
366 encoding_ = other.encoding_;
367 return *this;
368 }
369
370 explicit Operand(Register rm) {
371 ASSERT((rm != R31) && (rm != CSP));
372 encoding_ = Arm64Encode::Rm(rm);
373 type_ = Shifted;
374 }
375
376 Operand(Register rm, Shift shift, int32_t imm) {
377 ASSERT(Utils::IsUint(6, imm));
378 ASSERT((rm != R31) && (rm != CSP));
379 encoding_ = (imm << kImm6Shift) | Arm64Encode::Rm(rm) |
380 (static_cast<int32_t>(shift) << kShiftTypeShift);
381 type_ = Shifted;
382 }
383
384 // This operand type does not exist.
386
387 Operand(Register rm, Extend extend, int32_t imm) {
388 ASSERT(Utils::IsUint(3, imm));
389 ASSERT((rm != R31) && (rm != CSP));
390 encoding_ = B21 | Arm64Encode::Rm(rm) |
391 (static_cast<int32_t>(extend) << kExtendTypeShift) |
392 ((imm & 0x7) << kImm3Shift);
393 type_ = Extended;
394 }
395
396 // This operand type does not exist.
398
399 explicit Operand(int32_t imm) {
400 if (Utils::IsUint(12, imm)) {
401 encoding_ = imm << kImm12Shift;
402 } else {
403 // imm only has bits in [12, 24) set.
404 ASSERT(((imm & 0xfff) == 0) && (Utils::IsUint(12, imm >> 12)));
405 encoding_ = B22 | ((imm >> 12) << kImm12Shift);
406 }
407 type_ = Immediate;
408 }
409
410 // Encodes the value of an immediate for a logical operation.
411 // Since these values are difficult to craft by hand, instead pass the
412 // logical mask to the function IsImmLogical to get n, imm_s, and
413 // imm_r. Takes s before r like DecodeBitMasks from Appendix G but unlike
414 // the disassembly of the *bfm instructions.
415 Operand(uint8_t n, int8_t imm_s, int8_t imm_r) {
416 ASSERT((n == 1) || (n == 0));
417 ASSERT(Utils::IsUint(6, imm_s) && Utils::IsUint(6, imm_r));
418 type_ = BitfieldImm;
419 encoding_ = (static_cast<int32_t>(n) << kNShift) |
420 (static_cast<int32_t>(imm_s) << kImmSShift) |
421 (static_cast<int32_t>(imm_r) << kImmRShift);
422 }
423
424 // Test if a given value can be encoded in the immediate field of a logical
425 // instruction.
426 // If it can be encoded, the function returns true, and values pointed to by
427 // n, imm_s and imm_r are updated with immediates encoded in the format
428 // required by the corresponding fields in the logical instruction.
429 // If it can't be encoded, the function returns false, and the operand is
430 // undefined.
431 static bool IsImmLogical(uint64_t value, uint8_t width, Operand* imm_op);
432
433 // An immediate imm can be an operand to add/sub when the return value is
434 // Immediate, or a logical operation over sz bits when the return value is
435 // BitfieldImm. If the return value is Unknown, then the immediate can't be
436 // used as an operand in either instruction. The encoded operand is written
437 // to op.
438 static OperandType CanHold(int64_t imm, uint8_t sz, Operand* op) {
439 ASSERT(op != nullptr);
440 ASSERT((sz == kXRegSizeInBits) || (sz == kWRegSizeInBits));
441 if (Utils::IsUint(12, imm)) {
442 op->encoding_ = imm << kImm12Shift;
443 op->type_ = Immediate;
444 } else if (((imm & 0xfff) == 0) && (Utils::IsUint(12, imm >> 12))) {
445 op->encoding_ = B22 | ((imm >> 12) << kImm12Shift);
446 op->type_ = Immediate;
447 } else if (IsImmLogical(imm, sz, op)) {
448 op->type_ = BitfieldImm;
449 } else {
450 op->encoding_ = 0;
451 op->type_ = Unknown;
452 }
453 return op->type_;
454 }
455
456 private:
457 uint32_t encoding() const { return encoding_; }
458 OperandType type() const { return type_; }
459
460 uint32_t encoding_;
461 OperandType type_;
462
463 friend class Assembler;
464};
465
466class Assembler : public AssemblerBase {
467 public:
469 intptr_t far_branch_level = 0);
471
472 void PushRegister(Register r) { Push(r); }
473 void PopRegister(Register r) { Pop(r); }
474
476
477 void PushRegisterPair(Register r0, Register r1) { PushPair(r0, r1); }
478 void PopRegisterPair(Register r0, Register r1) { PopPair(r0, r1); }
479
480 void PushRegisters(const RegisterSet& registers);
481 void PopRegisters(const RegisterSet& registers);
482
483 void PushRegistersInOrder(std::initializer_list<Register> regs);
484
485 // Push all registers which are callee-saved according to the ARM64 ABI.
487
488 // Pop all registers which are callee-saved according to the ARM64 ABI.
490
491 void ExtendValue(Register rd, Register rn, OperandSize sz) override;
493 Register rn,
494 OperandSize sz = kEightBytes) override;
495
496 void Drop(intptr_t stack_elements) {
497 ASSERT(stack_elements >= 0);
498 if (stack_elements > 0) {
499 AddImmediate(SP, SP, stack_elements * target::kWordSize);
500 }
501 }
502
504
505 void Align(intptr_t alignment, intptr_t offset);
506
507 void Bind(Label* label) override;
508 // Unconditional jump to a given label. [distance] is ignored on ARM.
509 void Jump(Label* label, JumpDistance distance = kFarJump) { b(label); }
510 // Unconditional jump to a given address in register.
511 void Jump(Register target) { br(target); }
512 // Unconditional jump to a given address in memory. Clobbers TMP.
513 void Jump(const Address& address) {
514 ldr(TMP, address);
515 br(TMP);
516 }
517
524
525#if defined(TARGET_USES_THREAD_SANITIZER)
526 void TsanLoadAcquire(Register addr);
527 void TsanStoreRelease(Register addr);
528#endif
529
531 const Address& address,
532 OperandSize size = kEightBytes) override {
533 // ldar does not feature an address operand.
534 ASSERT(address.type() == Address::AddressType::Offset);
535 Register src = address.base();
536 if (address.offset() != 0) {
537 AddImmediate(TMP2, src, address.offset());
538 src = TMP2;
539 }
540 ldar(dst, src, size);
541#if defined(TARGET_USES_THREAD_SANITIZER)
542 TsanLoadAcquire(src);
543#endif
544 }
545
546#if defined(DART_COMPRESSED_POINTERS)
547 void LoadAcquireCompressed(Register dst, const Address& address) override {
548 LoadAcquire(dst, address, kObjectBytes);
549 add(dst, dst, Operand(HEAP_BITS, LSL, 32));
550 }
551#endif
552
554 const Address& address,
555 OperandSize size = kEightBytes) override {
556 // stlr does not feature an address operand.
557 ASSERT(address.type() == Address::AddressType::Offset);
558 Register dst = address.base();
559 if (address.offset() != 0) {
560 AddImmediate(TMP2, dst, address.offset());
561 dst = TMP2;
562 }
563 stlr(src, dst, size);
564#if defined(TARGET_USES_THREAD_SANITIZER)
565 TsanStoreRelease(dst);
566#endif
567 }
568
570 Address address,
571 OperandSize sz = kEightBytes) override {
572 Load(TMP, address, sz);
573 cmp(value, Operand(TMP), sz);
574 }
575
576 bool use_far_branches() const {
577 return FLAG_use_far_branches || use_far_branches_;
578 }
579
580 void set_use_far_branches(bool b) { use_far_branches_ = b; }
581
582 // Debugging and bringup support.
583 void Breakpoint() override { brk(0); }
584
586 if (prologue_offset_ == -1) {
588 }
589 }
590
591 void ReserveAlignedFrameSpace(intptr_t frame_space);
592
593 // In debug mode, this generates code to check that:
594 // FP + kExitLinkSlotFromEntryFp == SP
595 // or triggers breakpoint otherwise.
597
598 // Instruction pattern from entrypoint is used in Dart frame prologs
599 // to set up the frame and save a PC which can be used to figure out the
600 // RawInstruction object corresponding to the code running in the frame.
601 static constexpr intptr_t kEntryPointToPcMarkerOffset = 0;
602 static intptr_t EntryPointToPcMarkerOffset() {
604 }
605
606 // Emit data (e.g encoded instruction or immediate) in instruction stream.
607 void Emit(int32_t value);
608 void Emit64(int64_t value);
609
610 // On some other platforms, we draw a distinction between safe and unsafe
611 // smis.
612 static bool IsSafe(const Object& object) { return true; }
613 static bool IsSafeSmi(const Object& object) { return target::IsSmi(object); }
614
615 // Addition and subtraction.
616 // For add and sub, to use CSP for rn, o must be of type Operand::Extend.
617 // For an unmodified rm in this case, use Operand(rm, UXTX, 0);
619 AddSubHelper(sz, false, false, rd, rn, o);
620 }
622 AddSubHelper(sz, true, false, rd, rn, o);
623 }
625 AddSubHelper(sz, false, true, rd, rn, o);
626 }
628 AddSubHelper(sz, true, true, rd, rn, o);
629 }
630 void addw(Register rd, Register rn, Operand o) { add(rd, rn, o, kFourBytes); }
631 void addsw(Register rd, Register rn, Operand o) {
632 adds(rd, rn, o, kFourBytes);
633 }
634 void subw(Register rd, Register rn, Operand o) { sub(rd, rn, o, kFourBytes); }
635 void subsw(Register rd, Register rn, Operand o) {
636 subs(rd, rn, o, kFourBytes);
637 }
638
639 // Addition and subtraction with carry.
640 void adc(Register rd, Register rn, Register rm) {
641 AddSubWithCarryHelper(kEightBytes, false, false, rd, rn, rm);
642 }
643 void adcs(Register rd, Register rn, Register rm) {
644 AddSubWithCarryHelper(kEightBytes, true, false, rd, rn, rm);
645 }
646 void adcw(Register rd, Register rn, Register rm) {
647 AddSubWithCarryHelper(kFourBytes, false, false, rd, rn, rm);
648 }
649 void adcsw(Register rd, Register rn, Register rm) {
650 AddSubWithCarryHelper(kFourBytes, true, false, rd, rn, rm);
651 }
652 void sbc(Register rd, Register rn, Register rm) {
653 AddSubWithCarryHelper(kEightBytes, false, true, rd, rn, rm);
654 }
655 void sbcs(Register rd, Register rn, Register rm) {
656 AddSubWithCarryHelper(kEightBytes, true, true, rd, rn, rm);
657 }
658 void sbcw(Register rd, Register rn, Register rm) {
659 AddSubWithCarryHelper(kFourBytes, false, true, rd, rn, rm);
660 }
661 void sbcsw(Register rd, Register rn, Register rm) {
662 AddSubWithCarryHelper(kFourBytes, true, true, rd, rn, rm);
663 }
664
665 // PC relative immediate add. imm is in bytes.
666 void adr(Register rd, const Immediate& imm) { EmitPCRelOp(ADR, rd, imm); }
667
668 // Bitfield operations.
669 // Bitfield move.
670 // If s >= r then Rd[s-r:0] := Rn[s:r], else Rd[bitwidth+s-r:bitwidth-r] :=
671 // Rn[s:0].
672 void bfm(Register rd,
673 Register rn,
674 int r_imm,
675 int s_imm,
676 OperandSize size = kEightBytes) {
677 EmitBitfieldOp(BFM, rd, rn, r_imm, s_imm, size);
678 }
679
680 // Signed bitfield move.
681 void sbfm(Register rd,
682 Register rn,
683 int r_imm,
684 int s_imm,
685 OperandSize size = kEightBytes) {
686 EmitBitfieldOp(SBFM, rd, rn, r_imm, s_imm, size);
687 }
688
689 // Unsigned bitfield move.
690 void ubfm(Register rd,
691 Register rn,
692 int r_imm,
693 int s_imm,
694 OperandSize size = kEightBytes) {
695 EmitBitfieldOp(UBFM, rd, rn, r_imm, s_imm, size);
696 }
697
698 // Bitfield insert. Takes the low width bits and replaces bits in rd with
699 // them, starting at low_bit.
700 void bfi(Register rd,
701 Register rn,
702 int low_bit,
703 int width,
704 OperandSize size = kEightBytes) {
705 int wordsize = size == kEightBytes ? 64 : 32;
706 EmitBitfieldOp(BFM, rd, rn, -low_bit & (wordsize - 1), width - 1, size);
707 }
708
709 // Bitfield extract and insert low. Takes width bits, starting at low_bit and
710 // replaces the low width bits of rd with them.
712 Register rn,
713 int low_bit,
714 int width,
715 OperandSize size = kEightBytes) {
716 EmitBitfieldOp(BFM, rd, rn, low_bit, low_bit + width - 1, size);
717 }
718
719 // Signed bitfield insert in zero. Takes the low width bits, sign extends
720 // them and writes them to rd, starting at low_bit, and zeroing bits below
721 // that.
723 Register rn,
724 int low_bit,
725 int width,
726 OperandSize size = kEightBytes) {
727 int wordsize = size == kEightBytes ? 64 : 32;
728 EmitBitfieldOp(SBFM, rd, rn, (wordsize - low_bit) & (wordsize - 1),
729 width - 1, size);
730 }
731
732 // Signed bitfield extract. Takes width bits, starting at low_bit, sign
733 // extends them and writes them to rd, starting at the lowest bit.
734 void sbfx(Register rd,
735 Register rn,
736 int low_bit,
737 int width,
738 OperandSize size = kEightBytes) {
739 EmitBitfieldOp(SBFM, rd, rn, low_bit, low_bit + width - 1, size);
740 }
741
742 // Unsigned bitfield insert in zero. Takes the low width bits and writes
743 // them to rd, starting at low_bit, and zeroing bits above and below.
745 Register rn,
746 int low_bit,
747 int width,
748 OperandSize size = kEightBytes) {
749 int wordsize = size == kEightBytes ? 64 : 32;
750 ASSERT(width > 0);
751 ASSERT(low_bit < wordsize);
752 EmitBitfieldOp(UBFM, rd, rn, (-low_bit) & (wordsize - 1), width - 1, size);
753 }
754
755 // Unsigned bitfield extract. Takes the width bits, starting at low_bit and
756 // writes them to the low bits of rd zeroing bits above.
757 void ubfx(Register rd,
758 Register rn,
759 int low_bit,
760 int width,
761 OperandSize size = kEightBytes) {
762 EmitBitfieldOp(UBFM, rd, rn, low_bit, low_bit + width - 1, size);
763 }
764
765 // Sign extend byte->64 bit.
766 void sxtb(Register rd, Register rn) {
767 EmitBitfieldOp(SBFM, rd, rn, 0, 7, kEightBytes);
768 }
769
770 // Sign extend halfword->64 bit.
771 void sxth(Register rd, Register rn) {
772 EmitBitfieldOp(SBFM, rd, rn, 0, 15, kEightBytes);
773 }
774
775 // Sign extend word->64 bit.
776 void sxtw(Register rd, Register rn) {
777 EmitBitfieldOp(SBFM, rd, rn, 0, 31, kEightBytes);
778 }
779
780 // Zero/unsigned extend byte->64 bit.
781 void uxtb(Register rd, Register rn) {
782 EmitBitfieldOp(UBFM, rd, rn, 0, 7, kEightBytes);
783 }
784
785 // Zero/unsigned extend halfword->64 bit.
786 void uxth(Register rd, Register rn) {
787 EmitBitfieldOp(UBFM, rd, rn, 0, 15, kEightBytes);
788 }
789
790 // Zero/unsigned extend word->64 bit.
791 void uxtw(Register rd, Register rn) {
792 EmitBitfieldOp(UBFM, rd, rn, 0, 31, kEightBytes);
793 }
794
795 // Logical immediate operations.
796 void andi(Register rd,
797 Register rn,
798 const Immediate& imm,
800 ASSERT(sz == kEightBytes || sz == kFourBytes);
802 Operand imm_op;
803 const bool immok = Operand::IsImmLogical(imm.value(), width, &imm_op);
804 ASSERT(immok);
805 EmitLogicalImmOp(ANDI, rd, rn, imm_op, sz);
806 }
807 void orri(Register rd,
808 Register rn,
809 const Immediate& imm,
811 ASSERT(sz == kEightBytes || sz == kFourBytes);
813 Operand imm_op;
814 const bool immok = Operand::IsImmLogical(imm.value(), width, &imm_op);
815 ASSERT(immok);
816 EmitLogicalImmOp(ORRI, rd, rn, imm_op, sz);
817 }
818 void eori(Register rd,
819 Register rn,
820 const Immediate& imm,
822 ASSERT(sz == kEightBytes || sz == kFourBytes);
824 Operand imm_op;
825 const bool immok = Operand::IsImmLogical(imm.value(), width, &imm_op);
826 ASSERT(immok);
827 EmitLogicalImmOp(EORI, rd, rn, imm_op, sz);
828 }
830 Register rn,
831 const Immediate& imm,
833 ASSERT(sz == kEightBytes || sz == kFourBytes);
835 Operand imm_op;
836 const bool immok = Operand::IsImmLogical(imm.value(), width, &imm_op);
837 ASSERT(immok);
838 EmitLogicalImmOp(ANDIS, rd, rn, imm_op, sz);
839 }
840
841 // Logical (shifted) register operations.
843 EmitLogicalShiftOp(AND, rd, rn, o, sz);
844 }
846 EmitLogicalShiftOp(BIC, rd, rn, o, sz);
847 }
849 EmitLogicalShiftOp(ORR, rd, rn, o, sz);
850 }
852 EmitLogicalShiftOp(ORN, rd, rn, o, sz);
853 }
855 EmitLogicalShiftOp(EOR, rd, rn, o, sz);
856 }
858 EmitLogicalShiftOp(EON, rd, rn, o, sz);
859 }
861 EmitLogicalShiftOp(ANDS, rd, rn, o, sz);
862 }
864 EmitLogicalShiftOp(BICS, rd, rn, o, sz);
865 }
866 void andw_(Register rd, Register rn, Operand o) {
867 and_(rd, rn, o, kFourBytes);
868 }
869 void orrw(Register rd, Register rn, Operand o) { orr(rd, rn, o, kFourBytes); }
870 void ornw(Register rd, Register rn, Operand o) { orn(rd, rn, o, kFourBytes); }
871 void eorw(Register rd, Register rn, Operand o) { eor(rd, rn, o, kFourBytes); }
872
873 // Count leading zero bits.
874 void clz(Register rd, Register rn) {
875 EmitMiscDP1Source(CLZ, rd, rn, kEightBytes);
876 }
877 void clzw(Register rd, Register rn) {
878 EmitMiscDP1Source(CLZ, rd, rn, kFourBytes);
879 }
880
881 // Reverse bits.
882 void rbit(Register rd, Register rn) {
883 EmitMiscDP1Source(RBIT, rd, rn, kEightBytes);
884 }
885
886 // Misc. arithmetic.
887 void udiv(Register rd,
888 Register rn,
889 Register rm,
891 EmitMiscDP2Source(UDIV, rd, rn, rm, sz);
892 }
893 void sdiv(Register rd,
894 Register rn,
895 Register rm,
897 EmitMiscDP2Source(SDIV, rd, rn, rm, sz);
898 }
899 void lslv(Register rd,
900 Register rn,
901 Register rm,
903 EmitMiscDP2Source(LSLV, rd, rn, rm, sz);
904 }
905 void lsrv(Register rd,
906 Register rn,
907 Register rm,
909 EmitMiscDP2Source(LSRV, rd, rn, rm, sz);
910 }
911 void asrv(Register rd,
912 Register rn,
913 Register rm,
915 EmitMiscDP2Source(ASRV, rd, rn, rm, sz);
916 }
917 void sdivw(Register rd, Register rn, Register rm) {
918 sdiv(rd, rn, rm, kFourBytes);
919 }
920 void lslvw(Register rd, Register rn, Register rm) {
921 lslv(rd, rn, rm, kFourBytes);
922 }
923 void lsrvw(Register rd, Register rn, Register rm) {
924 lsrv(rd, rn, rm, kFourBytes);
925 }
926 void asrvw(Register rd, Register rn, Register rm) {
927 asrv(rd, rn, rm, kFourBytes);
928 }
929 void madd(Register rd,
930 Register rn,
931 Register rm,
932 Register ra,
934 EmitMiscDP3Source(MADD, rd, rn, rm, ra, sz);
935 }
936 void msub(Register rd,
937 Register rn,
938 Register rm,
939 Register ra,
941 EmitMiscDP3Source(MSUB, rd, rn, rm, ra, sz);
942 }
943 // Signed Multiply High
944 // rd <- (rn * rm)[127:64]
946 Register rn,
947 Register rm,
949 EmitMiscDP3Source(SMULH, rd, rn, rm, R31, sz);
950 }
951 // Unsigned Multiply High
952 // rd <- (rn * rm)[127:64]
954 Register rn,
955 Register rm,
957 EmitMiscDP3Source(UMULH, rd, rn, rm, R31, sz);
958 }
960 Register rn,
961 Register rm,
962 Register ra,
964 EmitMiscDP3Source(UMADDL, rd, rn, rm, ra, sz);
965 }
966 // Unsigned Multiply Long
967 // rd:uint64 <- rn:uint32 * rm:uint32
969 Register rn,
970 Register rm,
972 EmitMiscDP3Source(UMADDL, rd, rn, rm, ZR, sz);
973 }
975 Register rn,
976 Register rm,
977 Register ra,
979 EmitMiscDP3Source(SMADDL, rd, rn, rm, ra, sz);
980 }
981 // Signed Multiply Long
982 // rd:int64 <- rn:int32 * rm:int32
984 Register rn,
985 Register rm,
987 EmitMiscDP3Source(SMADDL, rd, rn, rm, ZR, sz);
988 }
989
990 // Move wide immediate.
991 void movk(Register rd, const Immediate& imm, int hw_idx) {
992 ASSERT(rd != CSP);
993 const Register crd = ConcreteRegister(rd);
994 EmitMoveWideOp(MOVK, crd, imm, hw_idx, kEightBytes);
995 }
996 void movn(Register rd, const Immediate& imm, int hw_idx) {
997 ASSERT(rd != CSP);
998 const Register crd = ConcreteRegister(rd);
999 EmitMoveWideOp(MOVN, crd, imm, hw_idx, kEightBytes);
1000 }
1001 void movz(Register rd, const Immediate& imm, int hw_idx) {
1002 ASSERT(rd != CSP);
1003 const Register crd = ConcreteRegister(rd);
1004 EmitMoveWideOp(MOVZ, crd, imm, hw_idx, kEightBytes);
1005 }
1006
1007 // Loads and Stores.
1009 ASSERT((rt != CSP) && (rt != R31));
1010 ASSERT((a.type() != Address::PairOffset) &&
1011 (a.type() != Address::PairPostIndex) &&
1012 (a.type() != Address::PairPreIndex));
1013 if (a.type() == Address::PCOffset) {
1014 ASSERT(sz == kEightBytes);
1015 EmitLoadRegLiteral(LDRpc, rt, a, sz);
1016 } else {
1017 if (IsSignedOperand(sz)) {
1018 EmitLoadStoreReg(LDRS, rt, a, sz);
1019 } else {
1020 EmitLoadStoreReg(LDR, rt, a, sz);
1021 }
1022 }
1023 }
1025 ASSERT((rt != CSP) && (rt != R31));
1026 ASSERT((a.type() != Address::PairOffset) &&
1027 (a.type() != Address::PairPostIndex) &&
1028 (a.type() != Address::PairPreIndex));
1029 EmitLoadStoreReg(STR, rt, a, sz);
1030 }
1031
1033 ASSERT((rt != CSP) && (rt != R31));
1034 ASSERT((a.type() == Address::PairOffset) ||
1035 (a.type() == Address::PairPostIndex) ||
1036 (a.type() == Address::PairPreIndex));
1037 EmitLoadStoreRegPair(LDP, rt, rt2, a, sz);
1038 }
1040 ASSERT((rt != CSP) && (rt != R31));
1041 ASSERT((a.type() == Address::PairOffset) ||
1042 (a.type() == Address::PairPostIndex) ||
1043 (a.type() == Address::PairPreIndex));
1044 EmitLoadStoreRegPair(STP, rt, rt2, a, sz);
1045 }
1047 ASSERT((a.type() == Address::PairOffset) ||
1048 (a.type() == Address::PairPostIndex) ||
1049 (a.type() == Address::PairPreIndex));
1050 EmitLoadStoreVRegPair(FLDP, rt, rt2, a, sz);
1051 }
1053 ASSERT((a.type() == Address::PairOffset) ||
1054 (a.type() == Address::PairPostIndex) ||
1055 (a.type() == Address::PairPreIndex));
1056 EmitLoadStoreVRegPair(FSTP, rt, rt2, a, sz);
1057 }
1058
1060 // rt = value
1061 // rn = address
1062 EmitLoadStoreExclusive(LDXR, R31, rn, rt, size);
1063 }
1065 Register rt,
1066 Register rn,
1067 OperandSize size = kEightBytes) {
1068 // rs = status (1 = failure, 0 = success)
1069 // rt = value
1070 // rn = address
1071 EmitLoadStoreExclusive(STXR, rs, rn, rt, size);
1072 }
1073 void clrex() {
1074 const int32_t encoding = static_cast<int32_t>(CLREX);
1075 Emit(encoding);
1076 }
1077
1079 EmitLoadStoreExclusive(LDAR, R31, rn, rt, sz);
1080 }
1081
1083 EmitLoadStoreExclusive(STLR, R31, rn, rt, sz);
1084 }
1085
1087 Register rt,
1088 Register rn,
1089 OperandSize sz = kEightBytes) {
1090 // rs = value in
1091 // rt = value out
1092 // rn = address
1093 EmitAtomicMemory(LDCLR, rs, rn, rt, sz);
1094 }
1096 Register rt,
1097 Register rn,
1098 OperandSize sz = kEightBytes) {
1099 // rs = value in
1100 // rt = value out
1101 // rn = address
1102 EmitAtomicMemory(LDSET, rs, rn, rt, sz);
1103 }
1104
1105 // Conditional select.
1106 void csel(Register rd, Register rn, Register rm, Condition cond) {
1107 EmitConditionalSelect(CSEL, rd, rn, rm, cond, kEightBytes);
1108 }
1110 Register rn,
1111 Register rm,
1112 Condition cond,
1113 OperandSize sz = kEightBytes) {
1114 EmitConditionalSelect(CSINC, rd, rn, rm, cond, sz);
1115 }
1116 void cinc(Register rd, Register rn, Condition cond) {
1117 csinc(rd, rn, rn, InvertCondition(cond));
1118 }
1119 void cset(Register rd, Condition cond) {
1120 csinc(rd, ZR, ZR, InvertCondition(cond));
1121 }
1122 void csinv(Register rd, Register rn, Register rm, Condition cond) {
1123 EmitConditionalSelect(CSINV, rd, rn, rm, cond, kEightBytes);
1124 }
1125 void cinv(Register rd, Register rn, Condition cond) {
1126 csinv(rd, rn, rn, InvertCondition(cond));
1127 }
1128 void csetm(Register rd, Condition cond) {
1129 csinv(rd, ZR, ZR, InvertCondition(cond));
1130 }
1131 void csneg(Register rd, Register rn, Register rm, Condition cond) {
1132 EmitConditionalSelect(CSNEG, rd, rn, rm, cond, kEightBytes);
1133 }
1134 void cneg(Register rd, Register rn, Condition cond) {
1135 EmitConditionalSelect(CSNEG, rd, rn, rn, InvertCondition(cond),
1136 kEightBytes);
1137 }
1138
1139 // Comparison.
1140 // rn cmp o.
1141 // For add and sub, to use CSP for rn, o must be of type Operand::Extend.
1142 // For an unmodified rm in this case, use Operand(rm, UXTX, 0);
1144 subs(ZR, rn, o, sz);
1145 }
1146 void cmpw(Register rn, Operand o) { cmp(rn, o, kFourBytes); }
1147 // rn cmp -o.
1149 adds(ZR, rn, o, sz);
1150 }
1151
1153 if (rn == CSP) {
1154 // UXTX 0 on a 64-bit register (rm) is a nop, but forces R31 to be
1155 // interpreted as CSP.
1156 cmp(CSP, Operand(rm, UXTX, 0));
1157 } else {
1158 cmp(rn, Operand(rm));
1159 }
1160 }
1161
1163 ASSERT(rn != CSP);
1164 cmp(rn, Operand(rm), kObjectBytes);
1165 }
1166
1167 // Conditional branch.
1168 void b(Label* label, Condition cond = AL) {
1169 if (cond == AL) {
1170 EmitUnconditionalBranch(B, label);
1171 } else {
1172 EmitConditionalBranch(BCOND, cond, label);
1173 }
1174 }
1175
1176 void b(int32_t offset) { EmitUnconditionalBranchOp(B, offset); }
1177 void bl(int32_t offset) {
1178 // CLOBBERS_LR uses __ to access the assembler.
1179#define __ this->
1180 CLOBBERS_LR(EmitUnconditionalBranchOp(BL, offset));
1181#undef __
1182 }
1183
1184 // Branches to the given label if the condition holds.
1185 // [distance] is ignored on ARM.
1186 void BranchIf(Condition condition,
1187 Label* label,
1188 JumpDistance distance = kFarJump) {
1189 b(label, condition);
1190 }
1192 Label* label,
1193 JumpDistance distance = kFarJump) {
1194 cbz(label, rn);
1195 }
1197 intptr_t bit_number,
1198 Condition condition,
1199 Label* label,
1200 JumpDistance distance = kFarJump) {
1201 if (condition == ZERO) {
1202 tbz(label, rn, bit_number);
1203 } else if (condition == NOT_ZERO) {
1204 tbnz(label, rn, bit_number);
1205 } else {
1206 UNREACHABLE();
1207 }
1208 }
1209
1210 void cbz(Label* label, Register rt, OperandSize sz = kEightBytes) {
1211 EmitCompareAndBranch(CBZ, rt, label, sz);
1212 }
1213
1214 void cbnz(Label* label, Register rt, OperandSize sz = kEightBytes) {
1215 EmitCompareAndBranch(CBNZ, rt, label, sz);
1216 }
1217
1218 // Generate 64/32-bit compare with zero and branch when condition allows to
1219 // use a single instruction: cbz/cbnz/tbz/tbnz.
1222 Condition cond,
1223 Label* label,
1225
1226 // Test bit and branch if zero.
1227 void tbz(Label* label, Register rt, intptr_t bit_number) {
1228 EmitTestAndBranch(TBZ, rt, bit_number, label);
1229 }
1230 void tbnz(Label* label, Register rt, intptr_t bit_number) {
1231 EmitTestAndBranch(TBNZ, rt, bit_number, label);
1232 }
1233
1234 // Branch, link, return.
1235 void br(Register rn) { EmitUnconditionalBranchRegOp(BR, rn); }
1236 void blr(Register rn) {
1237 // CLOBBERS_LR uses __ to access the assembler.
1238#define __ this->
1239 CLOBBERS_LR(EmitUnconditionalBranchRegOp(BLR, rn));
1240#undef __
1241 }
1243 if (rn == kNoRegister2) {
1244 // READS_RETURN_ADDRESS_FROM_LR uses __ to access the assembler.
1245#define __ this->
1246 READS_RETURN_ADDRESS_FROM_LR(rn = LR);
1247#undef __
1248 }
1249 EmitUnconditionalBranchRegOp(RET, rn);
1250 }
1251
1252 // Breakpoint.
1253 void brk(uint16_t imm) { EmitExceptionGenOp(BRK, imm); }
1254
1255 // Double floating point.
1256 bool fmovdi(VRegister vd, double immd) {
1257 int64_t imm64 = bit_cast<int64_t, double>(immd);
1258 const uint8_t bit7 = imm64 >> 63;
1259 const uint8_t bit6 = (~(imm64 >> 62)) & 0x1;
1260 const uint8_t bit54 = (imm64 >> 52) & 0x3;
1261 const uint8_t bit30 = (imm64 >> 48) & 0xf;
1262 const uint8_t imm8 = (bit7 << 7) | (bit6 << 6) | (bit54 << 4) | bit30;
1263 const int64_t expimm8 = Instr::VFPExpandImm(imm8);
1264 if (imm64 != expimm8) {
1265 return false;
1266 }
1267 EmitFPImm(FMOVDI, vd, imm8);
1268 return true;
1269 }
1271 ASSERT(rn != R31);
1272 ASSERT(rn != CSP);
1273 const Register crn = ConcreteRegister(rn);
1274 EmitFPIntCvtOp(FMOVSR, static_cast<Register>(vd), crn, kFourBytes);
1275 }
1277 ASSERT(rd != R31);
1278 ASSERT(rd != CSP);
1279 const Register crd = ConcreteRegister(rd);
1280 EmitFPIntCvtOp(FMOVRS, crd, static_cast<Register>(vn), kFourBytes);
1281 }
1283 ASSERT(rn != R31);
1284 ASSERT(rn != CSP);
1285 const Register crn = ConcreteRegister(rn);
1286 EmitFPIntCvtOp(FMOVDR, static_cast<Register>(vd), crn);
1287 }
1289 ASSERT(rd != R31);
1290 ASSERT(rd != CSP);
1291 const Register crd = ConcreteRegister(rd);
1292 EmitFPIntCvtOp(FMOVRD, crd, static_cast<Register>(vn));
1293 }
1295 ASSERT(rn != R31);
1296 ASSERT(rn != CSP);
1297 const Register crn = ConcreteRegister(rn);
1298 EmitFPIntCvtOp(SCVTFD, static_cast<Register>(vd), crn);
1299 }
1301 ASSERT(rn != R31);
1302 ASSERT(rn != CSP);
1303 const Register crn = ConcreteRegister(rn);
1304 EmitFPIntCvtOp(SCVTFD, static_cast<Register>(vd), crn, kFourBytes);
1305 }
1307 ASSERT(rd != R31);
1308 ASSERT(rd != CSP);
1309 const Register crd = ConcreteRegister(rd);
1310 EmitFPIntCvtOp(FCVTZS_D, crd, static_cast<Register>(vn));
1311 }
1313 ASSERT(rd != R31);
1314 ASSERT(rd != CSP);
1315 const Register crd = ConcreteRegister(rd);
1316 EmitFPIntCvtOp(FCVTZS_D, crd, static_cast<Register>(vn), kFourBytes);
1317 }
1319 ASSERT(rd != R31);
1320 ASSERT(rd != CSP);
1321 const Register crd = ConcreteRegister(rd);
1322 EmitFPIntCvtOp(FCVTMS_D, crd, static_cast<Register>(vn));
1323 }
1325 ASSERT(rd != R31);
1326 ASSERT(rd != CSP);
1327 const Register crd = ConcreteRegister(rd);
1328 EmitFPIntCvtOp(FCVTMS_D, crd, static_cast<Register>(vn), kFourBytes);
1329 }
1331 ASSERT(rd != R31);
1332 ASSERT(rd != CSP);
1333 const Register crd = ConcreteRegister(rd);
1334 EmitFPIntCvtOp(FCVTPS_D, crd, static_cast<Register>(vn));
1335 }
1337 ASSERT(rd != R31);
1338 ASSERT(rd != CSP);
1339 const Register crd = ConcreteRegister(rd);
1340 EmitFPIntCvtOp(FCVTPS_D, crd, static_cast<Register>(vn), kFourBytes);
1341 }
1342 void fmovdd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FMOVDD, vd, vn); }
1343 void fabsd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FABSD, vd, vn); }
1344 void fnegd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FNEGD, vd, vn); }
1345 void fsqrtd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FSQRTD, vd, vn); }
1346 void fcvtsd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FCVTSD, vd, vn); }
1347 void fcvtds(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FCVTDS, vd, vn); }
1349 ASSERT(a.type() != Address::PCOffset);
1350 EmitLoadStoreReg(FLDRQ, static_cast<Register>(vt), a, kQWord);
1351 }
1353 ASSERT(a.type() != Address::PCOffset);
1354 EmitLoadStoreReg(FSTRQ, static_cast<Register>(vt), a, kQWord);
1355 }
1357 ASSERT(a.type() != Address::PCOffset);
1358 EmitLoadStoreReg(FLDR, static_cast<Register>(vt), a, kDWord);
1359 }
1361 ASSERT(a.type() != Address::PCOffset);
1362 EmitLoadStoreReg(FSTR, static_cast<Register>(vt), a, kDWord);
1363 }
1365 ASSERT(a.type() != Address::PCOffset);
1366 EmitLoadStoreReg(FLDR, static_cast<Register>(vt), a, kSWord);
1367 }
1369 ASSERT(a.type() != Address::PCOffset);
1370 EmitLoadStoreReg(FSTR, static_cast<Register>(vt), a, kSWord);
1371 }
1372 void fcmpd(VRegister vn, VRegister vm) { EmitFPCompareOp(FCMPD, vn, vm); }
1373 void fcmpdz(VRegister vn) { EmitFPCompareOp(FCMPZD, vn, V0); }
1375 EmitFPTwoSourceOp(FMULD, vd, vn, vm);
1376 }
1378 EmitFPTwoSourceOp(FDIVD, vd, vn, vm);
1379 }
1381 EmitFPTwoSourceOp(FADDD, vd, vn, vm);
1382 }
1384 EmitFPTwoSourceOp(FSUBD, vd, vn, vm);
1385 }
1386
1387 // SIMD operations.
1389 EmitSIMDThreeSameOp(VAND, vd, vn, vm);
1390 }
1392 EmitSIMDThreeSameOp(VORR, vd, vn, vm);
1393 }
1395 EmitSIMDThreeSameOp(VEOR, vd, vn, vm);
1396 }
1398 EmitSIMDThreeSameOp(VADDW, vd, vn, vm);
1399 }
1401 EmitSIMDThreeSameOp(VADDX, vd, vn, vm);
1402 }
1404 EmitSIMDThreeSameOp(VSUBW, vd, vn, vm);
1405 }
1407 EmitSIMDThreeSameOp(VSUBX, vd, vn, vm);
1408 }
1410 EmitSIMDThreeSameOp(VADDS, vd, vn, vm);
1411 }
1413 EmitSIMDThreeSameOp(VADDD, vd, vn, vm);
1414 }
1416 EmitSIMDThreeSameOp(VSUBS, vd, vn, vm);
1417 }
1419 EmitSIMDThreeSameOp(VSUBD, vd, vn, vm);
1420 }
1422 EmitSIMDThreeSameOp(VMULS, vd, vn, vm);
1423 }
1425 EmitSIMDThreeSameOp(VMULD, vd, vn, vm);
1426 }
1428 EmitSIMDThreeSameOp(VDIVS, vd, vn, vm);
1429 }
1431 EmitSIMDThreeSameOp(VDIVD, vd, vn, vm);
1432 }
1434 EmitSIMDThreeSameOp(VCEQS, vd, vn, vm);
1435 }
1437 EmitSIMDThreeSameOp(VCEQD, vd, vn, vm);
1438 }
1440 EmitSIMDThreeSameOp(VCGTS, vd, vn, vm);
1441 }
1443 EmitSIMDThreeSameOp(VCGTD, vd, vn, vm);
1444 }
1446 EmitSIMDThreeSameOp(VCGES, vd, vn, vm);
1447 }
1449 EmitSIMDThreeSameOp(VCGED, vd, vn, vm);
1450 }
1452 EmitSIMDThreeSameOp(VMINS, vd, vn, vm);
1453 }
1455 EmitSIMDThreeSameOp(VMIND, vd, vn, vm);
1456 }
1458 EmitSIMDThreeSameOp(VMAXS, vd, vn, vm);
1459 }
1461 EmitSIMDThreeSameOp(VMAXD, vd, vn, vm);
1462 }
1464 EmitSIMDThreeSameOp(VRECPSS, vd, vn, vm);
1465 }
1467 EmitSIMDThreeSameOp(VRSQRTSS, vd, vn, vm);
1468 }
1469 void vnot(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VNOT, vd, vn); }
1470 void vabss(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VABSS, vd, vn); }
1471 void vabsd(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VABSD, vd, vn); }
1472 void vnegs(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VNEGS, vd, vn); }
1473 void vnegd(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VNEGD, vd, vn); }
1474 void vsqrts(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VSQRTS, vd, vn); }
1475 void vsqrtd(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VSQRTD, vd, vn); }
1477 EmitSIMDTwoRegOp(VRECPES, vd, vn);
1478 }
1480 EmitSIMDTwoRegOp(VRSQRTES, vd, vn);
1481 }
1482 void vdupw(VRegister vd, Register rn) {
1483 const VRegister vn = static_cast<VRegister>(rn);
1484 EmitSIMDCopyOp(VDUPI, vd, vn, kFourBytes, 0, 0);
1485 }
1486 void vdupx(VRegister vd, Register rn) {
1487 const VRegister vn = static_cast<VRegister>(rn);
1488 EmitSIMDCopyOp(VDUPI, vd, vn, kEightBytes, 0, 0);
1489 }
1490 void vdups(VRegister vd, VRegister vn, int32_t idx) {
1491 EmitSIMDCopyOp(VDUP, vd, vn, kSWord, 0, idx);
1492 }
1493 void vdupd(VRegister vd, VRegister vn, int32_t idx) {
1494 EmitSIMDCopyOp(VDUP, vd, vn, kDWord, 0, idx);
1495 }
1496 void vinsw(VRegister vd, int32_t didx, Register rn) {
1497 const VRegister vn = static_cast<VRegister>(rn);
1498 EmitSIMDCopyOp(VINSI, vd, vn, kFourBytes, 0, didx);
1499 }
1500 void vinsx(VRegister vd, int32_t didx, Register rn) {
1501 const VRegister vn = static_cast<VRegister>(rn);
1502 EmitSIMDCopyOp(VINSI, vd, vn, kEightBytes, 0, didx);
1503 }
1504 void vinss(VRegister vd, int32_t didx, VRegister vn, int32_t sidx) {
1505 EmitSIMDCopyOp(VINS, vd, vn, kSWord, sidx, didx);
1506 }
1507 void vinsd(VRegister vd, int32_t didx, VRegister vn, int32_t sidx) {
1508 EmitSIMDCopyOp(VINS, vd, vn, kDWord, sidx, didx);
1509 }
1510 void vmovrs(Register rd, VRegister vn, int32_t sidx) {
1511 const VRegister vd = static_cast<VRegister>(rd);
1512 EmitSIMDCopyOp(VMOVW, vd, vn, kFourBytes, 0, sidx);
1513 }
1514 void vmovrd(Register rd, VRegister vn, int32_t sidx) {
1515 const VRegister vd = static_cast<VRegister>(rd);
1516 EmitSIMDCopyOp(VMOVX, vd, vn, kEightBytes, 0, sidx);
1517 }
1518
1519 // Aliases.
1520 void mov(Register rd, Register rn) {
1521 if ((rd == CSP) || (rn == CSP)) {
1522 add(rd, rn, Operand(0));
1523 } else {
1524 orr(rd, ZR, Operand(rn));
1525 }
1526 }
1527 void movw(Register rd, Register rn) {
1528 if ((rd == CSP) || (rn == CSP)) {
1529 addw(rd, rn, Operand(0));
1530 } else {
1531 orrw(rd, ZR, Operand(rn));
1532 }
1533 }
1534 void vmov(VRegister vd, VRegister vn) { vorr(vd, vn, vn); }
1535 void mvn_(Register rd, Register rm) { orn(rd, ZR, Operand(rm)); }
1536 void mvnw(Register rd, Register rm) { ornw(rd, ZR, Operand(rm)); }
1537 void neg(Register rd, Register rm) { sub(rd, ZR, Operand(rm)); }
1539 subs(rd, ZR, Operand(rm), sz);
1540 }
1541 void negsw(Register rd, Register rm) { negs(rd, rm, kFourBytes); }
1542 void mul(Register rd, Register rn, Register rm) {
1543 madd(rd, rn, rm, ZR, kEightBytes);
1544 }
1545 void mulw(Register rd, Register rn, Register rm) {
1546 madd(rd, rn, rm, ZR, kFourBytes);
1547 }
1548 void Push(Register reg) {
1549 ASSERT(reg != PP); // Only push PP with TagAndPushPP().
1550 str(reg, Address(SP, -1 * target::kWordSize, Address::PreIndex));
1551 }
1552 void Pop(Register reg) {
1553 ASSERT(reg != PP); // Only pop PP with PopAndUntagPP().
1554 ldr(reg, Address(SP, 1 * target::kWordSize, Address::PostIndex));
1555 }
1556 void PushPair(Register low, Register high) {
1557 stp(low, high, Address(SP, -2 * target::kWordSize, Address::PairPreIndex));
1558 }
1559 void PopPair(Register low, Register high) {
1560 ldp(low, high, Address(SP, 2 * target::kWordSize, Address::PairPostIndex));
1561 }
1564 }
1567 }
1570 }
1573 }
1576 }
1577 void PopQuad(VRegister reg) {
1579 }
1581 fstp(low, high, Address(SP, -2 * kDoubleSize, Address::PairPreIndex),
1582 kDWord);
1583 }
1586 kDWord);
1587 }
1589 fstp(low, high, Address(SP, -2 * kQuadSize, Address::PairPreIndex), kQWord);
1590 }
1593 }
1595 // Add the heap object tag back to PP before putting it on the stack.
1597 str(TMP, Address(SP, -1 * target::kWordSize, Address::PreIndex));
1598 }
1601 // Add the heap object tag back to PP before putting it on the stack.
1603 stp(TMP2, CODE_REG,
1604 Address(SP, -2 * target::kWordSize, Address::PairPreIndex));
1605 }
1607 ldr(PP, Address(SP, 1 * target::kWordSize, Address::PostIndex));
1609 // The caller of PopAndUntagPP() must explicitly allow use of popped PP.
1611 }
1613 ands(ZR, rn, o, sz);
1614 }
1615 void tsti(Register rn, const Immediate& imm, OperandSize sz = kEightBytes) {
1616 andis(ZR, rn, imm, sz);
1617 }
1618
1620 Register rn,
1621 int32_t shift,
1622 OperandSize sz = kEightBytes) {
1623 const int32_t reg_size =
1625 ASSERT((shift >= 0) && (shift < reg_size));
1626 ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1, sz);
1627 }
1628 void LslImmediate(Register rd, int32_t shift, OperandSize sz = kEightBytes) {
1629 LslImmediate(rd, rd, shift, sz);
1630 }
1631 void LslRegister(Register dst, Register shift) override {
1632 lslv(dst, dst, shift);
1633 }
1635 Register rn,
1636 int shift,
1637 OperandSize sz = kEightBytes) {
1638 const int reg_size =
1640 ASSERT((shift >= 0) && (shift < reg_size));
1641 ubfm(rd, rn, shift, reg_size - 1, sz);
1642 }
1643 void LsrImmediate(Register rd, int32_t shift) override {
1644 LsrImmediate(rd, rd, shift);
1645 }
1647 Register rn,
1648 int shift,
1649 OperandSize sz = kEightBytes) {
1650 const int reg_size =
1652 ASSERT((shift >= 0) && (shift < reg_size));
1653 sbfm(rd, rn, shift, reg_size - 1, sz);
1654 }
1655
1658
1659 void SmiUntag(Register reg) { SmiUntag(reg, reg); }
1660 void SmiUntag(Register dst, Register src) {
1661 sbfm(dst, src, kSmiTagSize, target::kSmiBits + 1);
1662 }
1663 void SmiTag(Register reg) override { SmiTag(reg, reg); }
1664 void SmiTag(Register dst, Register src) {
1665 LslImmediate(dst, src, kSmiTagSize);
1666 }
1667
1669 COMPILE_ASSERT(kSmiTag == 0);
1670 adds(reg, reg, compiler::Operand(reg)); // SmiTag
1671 // If the value doesn't fit in a smi, the tagging changes the sign,
1672 // which causes the overflow flag to be set.
1673 b(label, OVERFLOW);
1674#if defined(DART_COMPRESSED_POINTERS)
1675 cmp(reg, compiler::Operand(reg, SXTW, 0));
1676 b(label, NOT_EQUAL);
1677#endif // defined(DART_COMPRESSED_POINTERS)
1678 }
1679
1680 // Truncates upper bits.
1682 if (result == value) {
1683 ASSERT(TMP != value);
1685 value = TMP;
1686 }
1687 ASSERT(value != result);
1690 Utils::Minimum(static_cast<intptr_t>(32), compiler::target::kSmiBits));
1692 LoadFieldFromOffset(result, value, compiler::target::Mint::value_offset(),
1694 Bind(&done);
1695 }
1696
1698 if (result == value) {
1699 ASSERT(TMP != value);
1701 value = TMP;
1702 }
1703 ASSERT(value != result);
1707 LoadFieldFromOffset(result, value, target::Mint::value_offset());
1708 Bind(&done);
1709 }
1710
1711 // For ARM, the near argument is ignored.
1713 Label* label,
1714 JumpDistance distance = kFarJump) {
1715 tbnz(label, reg, kSmiTag);
1716 }
1717
1718 // For ARM, the near argument is ignored.
1720 Label* label,
1721 JumpDistance distance = kFarJump) override {
1722 tbz(label, reg, kSmiTag);
1723 }
1724
1725 void BranchLink(const Code& code,
1731
1733 const Code& code,
1738 snapshot_behavior);
1739 }
1740
1741 // Emit a call that shares its object pool entries with other calls
1742 // that have the same equivalence marker.
1744 const Code& code,
1745 const Object& equivalence,
1747
1748 void Call(Address target) {
1749 // CLOBBERS_LR uses __ to access the assembler.
1750#define __ this->
1751 CLOBBERS_LR({
1752 ldr(LR, target);
1753 blr(LR);
1754 });
1755#undef __
1756 }
1757 void Call(const Code& code) { BranchLink(code); }
1758
1759 // Clobbers LR.
1762#define __ this->
1763 CLOBBERS_LR({ blr(target); });
1764#undef __
1765 }
1766
1767 void AddImmediate(Register dest, int64_t imm) {
1768 AddImmediate(dest, dest, imm);
1769 }
1770
1771 // Macros accepting a pp Register argument may attempt to load values from
1772 // the object pool when possible. Unless you are sure that the untagged object
1773 // pool pointer is in another register, or that it is not available at all,
1774 // PP should be passed for pp. `dest` can be TMP2, `rn` cannot. `dest` can be
1775 // TMP.
1777 Register rn,
1778 int64_t imm,
1781 Register rn,
1782 int64_t imm,
1785 add(dest, dest, Operand(src));
1786 }
1787 // [dest] = [src] << [scale] + [value].
1789 Register src,
1791 int32_t value) {
1792 if (scale == 0) {
1793 AddImmediate(dest, src, value);
1794 } else {
1795 orr(dest, ZR, Operand(src, LSL, scale));
1796 AddImmediate(dest, dest, value);
1797 }
1798 }
1800 Register rn,
1801 int64_t imm,
1804 sub(dest, dest, Operand(src));
1805 }
1807 int64_t imm,
1808 OperandSize width = kEightBytes) override {
1810 if (Utils::IsPowerOfTwo(imm)) {
1812 } else {
1813 LoadImmediate(TMP, imm);
1814 if (width == kFourBytes) {
1815 mulw(reg, reg, TMP);
1816 } else {
1817 mul(reg, reg, TMP);
1818 }
1819 }
1820 }
1822 Register rn,
1823 int64_t imm,
1825 void AndImmediate(Register rd, int64_t imm) override {
1826 AndImmediate(rd, rd, imm);
1827 }
1829 Register src1,
1830 Register src2 = kNoRegister) override {
1831 ASSERT(src1 != src2); // Likely a mistake.
1832 if (src2 == kNoRegister) {
1833 src2 = dst;
1834 }
1835 and_(dst, src2, Operand(src1));
1836 }
1838 Register rn,
1839 int64_t imm,
1841 void OrImmediate(Register rd, int64_t imm) { OrImmediate(rd, rd, imm); }
1843 Register rn,
1844 int64_t imm,
1846 void TestImmediate(Register rn, int64_t imm, OperandSize sz = kEightBytes);
1848 int64_t imm,
1849 OperandSize sz = kEightBytes) override;
1850
1852 int32_t offset,
1853 OperandSize sz,
1854 Address::AddressType addr_type);
1855 void Load(Register dest,
1856 const Address& address,
1857 OperandSize sz = kEightBytes) override;
1858 // For loading indexed payloads out of tagged objects like Arrays. If the
1859 // payload objects are word-sized, use TIMES_HALF_WORD_SIZE if the contents of
1860 // [index] is a Smi, otherwise TIMES_WORD_SIZE if unboxed.
1862 Register base,
1863 int32_t payload_offset,
1864 Register index,
1866 OperandSize sz = kEightBytes) override {
1867 add(dest, base, Operand(index, LSL, scale));
1868 LoadFromOffset(dest, dest, payload_offset - kHeapObjectTag, sz);
1869 }
1870#if defined(DART_COMPRESSED_POINTERS)
1872 Register base,
1873 int32_t offset,
1874 Register index) override {
1877 }
1878#endif
1888
1889 void LoadFromStack(Register dst, intptr_t depth);
1890 void StoreToStack(Register src, intptr_t depth);
1891 void CompareToStack(Register src, intptr_t depth);
1892
1893 void Store(Register src,
1894 const Address& address,
1895 OperandSize sz = kEightBytes) override;
1896 void StoreZero(const Address& address, Register temp = kNoRegister) {
1897 Store(ZR, address);
1898 }
1899
1901 Register high,
1902 Register base,
1903 int32_t offset,
1905
1915
1918 }
1920 StoreDToOffset(src, base, offset);
1921 }
1923 if (src != dst) {
1924 fmovdd(dst, src);
1925 }
1926 }
1927
1935 if (src != dst) {
1936 vmov(dst, src);
1937 }
1938 }
1939
1940#if defined(DART_COMPRESSED_POINTERS)
1941 void LoadCompressed(Register dest, const Address& slot) override;
1942#endif
1943
1945 Register value,
1946 CanBeSmi can_value_be_smi,
1947 Register scratch) override;
1949 Register slot,
1950 Register value,
1951 CanBeSmi can_value_be_smi,
1952 Register scratch) override;
1954
1956 Register object,
1957 const Address& address,
1958 const Object& value,
1959 MemoryOrder memory_order = kRelaxedNonAtomic,
1960 OperandSize size = kWordBytes) override;
1961
1962 // Stores a non-tagged value into a heap object.
1964 const Address& dest,
1965 Register value);
1966
1967 // Object pool, loading from pool, etc.
1969
1970 bool constant_pool_allowed() const { return constant_pool_allowed_; }
1971 void set_constant_pool_allowed(bool b) { constant_pool_allowed_ = b; }
1972
1973 compiler::LRState lr_state() const { return lr_state_; }
1974 void set_lr_state(compiler::LRState state) { lr_state_ = state; }
1975
1976 bool CanLoadFromObjectPool(const Object& object) const;
1978 const ExternalLabel* label,
1982
1983 // Note: the function never clobbers TMP, TMP2 scratch registers.
1984 void LoadObject(Register dst, const Object& obj);
1985 // Note: the function never clobbers TMP, TMP2 scratch registers.
1986 void LoadUniqueObject(Register dst, const Object& obj);
1987 // Note: the function never clobbers TMP, TMP2 scratch registers.
1988 void LoadImmediate(Register reg, int64_t imm) override;
1990 LoadImmediate(reg, imm.value());
1991 }
1992
1993 void LoadSImmediate(VRegister reg, float immd);
1994 void LoadDImmediate(VRegister reg, double immd);
1996
1997 // Load word from pool from the given offset using encoding that
1998 // InstructionPattern::DecodeLoadWordFromPool can decode.
1999 //
2000 // Note: the function never clobbers TMP, TMP2 scratch registers.
2001 void LoadWordFromPoolIndex(Register dst, intptr_t index, Register pp = PP);
2002
2003 // Store word to pool at the given offset.
2004 //
2005 // Note: clobbers TMP.
2006 void StoreWordToPoolIndex(Register src, intptr_t index, Register pp = PP);
2007
2009 Register upper,
2010 intptr_t index);
2011
2012 void PushObject(const Object& object) {
2013 if (IsSameObject(compiler::NullObject(), object)) {
2014 Push(NULL_REG);
2015 } else {
2016 LoadObject(TMP, object);
2017 Push(TMP);
2018 }
2019 }
2020 void PushImmediate(int64_t immediate) {
2021 LoadImmediate(TMP, immediate);
2022 Push(TMP);
2023 }
2024 void PushImmediate(Immediate immediate) { PushImmediate(immediate.value()); }
2025 void CompareObject(Register reg, const Object& object);
2026
2029
2031 Register temp,
2032 intptr_t low,
2033 intptr_t high,
2034 RangeCheckCondition condition,
2035 Label* target) override;
2036
2040 intptr_t class_id,
2041 Register scratch = kNoRegister);
2042 // Note: input and output registers must be different.
2046 Register src,
2047 Register scratch,
2048 bool can_be_null = false) override;
2049
2050 // Reserve specifies how much space to reserve for the Dart stack.
2051 void SetupDartSP(intptr_t reserve = 4096);
2054
2055 void ArithmeticShiftRightImmediate(Register reg, intptr_t shift) override;
2057 Register reg2,
2058 intptr_t offset,
2060 Register temp,
2061 Label* equals) override;
2062
2063 void EnterFrame(intptr_t frame_size);
2065 void Ret() { ret(); }
2066
2067 // Sets the return address to [value] as if there was a call.
2068 // On ARM64 sets LR.
2070
2071 // Emit code to transition between generated mode and native mode.
2072 //
2073 // These require and ensure that CSP and SP are equal and aligned and require
2074 // a scratch register (in addition to TMP/TMP2).
2075
2076 void TransitionGeneratedToNative(Register destination_address,
2077 Register new_exit_frame,
2078 Register new_exit_through_ffi,
2079 bool enter_safepoint);
2081 bool exit_safepoint,
2082 bool ignore_unwind_in_progress = false);
2084 void ExitFullSafepoint(Register scratch, bool ignore_unwind_in_progress);
2085
2088
2089 // Restores the values of the registers that are blocked to cache some values
2090 // e.g. HEAP_BITS and NULL_REG.
2092
2094
2095 void EnterDartFrame(intptr_t frame_size, Register new_pp = kNoRegister);
2096 void EnterOsrFrame(intptr_t extra_size, Register new_pp = kNoRegister);
2098
2099 // For non-leaf runtime calls. For leaf runtime calls, use LeafRuntimeScope,
2100 void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count);
2101
2102 // Set up a stub frame so that the stack traversal code can easily identify
2103 // a stub frame.
2106
2107 // Set up a frame for calling a C function.
2108 // Automatically save the pinned registers in Dart which are not callee-
2109 // saved in the native calling convention.
2110 // Use together with CallCFunction.
2111 void EnterCFrame(intptr_t frame_space);
2113
2117
2118 void CombineHashes(Register hash, Register other) override;
2119 void FinalizeHashForSize(intptr_t bit_size,
2120 Register hash,
2121 Register scratch = TMP) override;
2122
2123 // If allocation tracing for |cid| is enabled, will jump to |trace| label,
2124 // which will allocate in the runtime where tracing occurs.
2126 Label* trace,
2127 Register temp_reg,
2129
2131 Label* trace,
2132 Register temp_reg,
2134
2135 void TryAllocateObject(intptr_t cid,
2136 intptr_t instance_size,
2137 Label* failure,
2138 JumpDistance distance,
2139 Register instance_reg,
2140 Register top_reg) override;
2141
2142 void TryAllocateArray(intptr_t cid,
2143 intptr_t instance_size,
2144 Label* failure,
2146 Register end_address,
2147 Register temp1,
2148 Register temp2);
2149
2151#if defined(DEBUG)
2152 Label okay;
2153 ldr(tmp, Address(top, 0));
2155 b(&okay, EQUAL);
2156 Stop("Allocation canary");
2157 Bind(&okay);
2158#endif
2159 }
2161#if defined(DEBUG)
2162 ASSERT(top != TMP);
2164 str(TMP, Address(top, 0));
2165#endif
2166 }
2167
2168 // Copy [size] bytes from [src] address to [dst] address.
2169 // [size] should be a multiple of word size.
2170 // Clobbers [src], [dst], [size] and [temp] registers.
2172 Register dst,
2173 Register size,
2174 Register temp);
2175
2176 // This emits an PC-relative call of the form "bl <offset>". The offset
2177 // is not yet known and needs therefore relocation to the right place before
2178 // the code can be used.
2179 //
2180 // The necessary information for the "linker" (i.e. the relocation
2181 // information) is stored in [UntaggedCode::static_calls_target_table_]: an
2182 // entry of the form
2183 //
2184 // (Code::kPcRelativeCall & pc_offset, <target-code>, <target-function>)
2185 //
2186 // will be used during relocation to fix the offset.
2187 //
2188 // The provided [offset_into_target] will be added to calculate the final
2189 // destination. It can be used e.g. for calling into the middle of a
2190 // function.
2191 void GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target = 0);
2192
2193 // This emits an PC-relative tail call of the form "b <offset>".
2194 //
2195 // See also above for the pc-relative call.
2196 void GenerateUnRelocatedPcRelativeTailCall(intptr_t offset_into_target = 0);
2197
2198 static bool AddressCanHoldConstantIndex(const Object& constant,
2199 bool is_external,
2200 intptr_t cid,
2201 intptr_t index_scale);
2202
2204 intptr_t cid,
2205 intptr_t index_scale,
2206 Register array,
2207 intptr_t index) const;
2209 bool is_external,
2210 intptr_t cid,
2211 intptr_t index_scale,
2212 Register array,
2213 intptr_t index);
2215 intptr_t cid,
2216 intptr_t index_scale,
2217 bool index_unboxed,
2218 Register array,
2219 Register index,
2220 Register temp);
2221
2222 // Special version of ElementAddressForRegIndex for the case when cid and
2223 // operand size for the target load don't match (e.g. when loading a few
2224 // elements of the array with one load).
2226 intptr_t cid,
2227 OperandSize size,
2228 intptr_t index_scale,
2229 bool index_unboxed,
2230 Register array,
2231 Register index,
2232 Register temp);
2233
2235 bool is_external,
2236 intptr_t cid,
2237 intptr_t index_scale,
2238 bool index_unboxed,
2239 Register array,
2240 Register index);
2241
2243 Register field,
2244 Register scratch);
2245
2246#if defined(DART_COMPRESSED_POINTERS)
2248 Register address,
2250 Register offset_in_words_as_smi) override;
2251#endif
2252
2255 Register offset_in_words_as_smi) override;
2256
2259 int32_t offset) override {
2261 }
2262
2263 // Returns object data offset for address calculation; for heap objects also
2264 // accounts for the tag.
2265 static int32_t HeapDataOffset(bool is_external, intptr_t cid) {
2266 return is_external
2267 ? 0
2268 : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag);
2269 }
2270
2271 static int32_t EncodeImm26BranchOffset(int64_t imm, int32_t instr) {
2272 const int32_t imm32 = static_cast<int32_t>(imm);
2273 const int32_t off = (((imm32 >> 2) << kImm26Shift) & kImm26Mask);
2274 return (instr & ~kImm26Mask) | off;
2275 }
2276
2277 static int64_t DecodeImm26BranchOffset(int32_t instr) {
2278 const int32_t off = (((instr & kImm26Mask) >> kImm26Shift) << 6) >> 4;
2279 return static_cast<int64_t>(off);
2280 }
2281
2282 private:
2283 bool use_far_branches_;
2284
2285 bool constant_pool_allowed_;
2286
2287 compiler::LRState lr_state_ = compiler::LRState::OnEntry();
2288
2289 // Note: the function never clobbers TMP, TMP2 scratch registers.
2290 void LoadObjectHelper(Register dst, const Object& obj, bool is_unique);
2291
2292 void AddSubHelper(OperandSize os,
2293 bool set_flags,
2294 bool subtract,
2295 Register rd,
2296 Register rn,
2297 Operand o) {
2298 ASSERT((rd != R31) && (rn != R31));
2299 const Register crd = ConcreteRegister(rd);
2300 const Register crn = ConcreteRegister(rn);
2301 if (o.type() == Operand::Immediate) {
2302 ASSERT(rn != ZR);
2303 EmitAddSubImmOp(subtract ? SUBI : ADDI, crd, crn, o, os, set_flags);
2304 } else if (o.type() == Operand::Shifted) {
2305 ASSERT((rd != CSP) && (rn != CSP));
2306 EmitAddSubShiftExtOp(subtract ? SUB : ADD, crd, crn, o, os, set_flags);
2307 } else {
2308 ASSERT(o.type() == Operand::Extended);
2309 ASSERT((rd != CSP) && (rn != ZR));
2310 EmitAddSubShiftExtOp(subtract ? SUB : ADD, crd, crn, o, os, set_flags);
2311 }
2312 }
2313
2314 void AddSubWithCarryHelper(OperandSize sz,
2315 bool set_flags,
2316 bool subtract,
2317 Register rd,
2318 Register rn,
2319 Register rm) {
2320 ASSERT((rd != R31) && (rn != R31) && (rm != R31));
2321 ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP));
2322 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2323 const int32_t s = set_flags ? B29 : 0;
2324 const int32_t op = subtract ? SBC : ADC;
2325 const int32_t encoding = op | size | s | Arm64Encode::Rd(rd) |
2327 Emit(encoding);
2328 }
2329
2330 void EmitAddSubImmOp(AddSubImmOp op,
2331 Register rd,
2332 Register rn,
2333 Operand o,
2334 OperandSize sz,
2335 bool set_flags) {
2336 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2337 (sz == kUnsignedFourBytes));
2338 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2339 const int32_t s = set_flags ? B29 : 0;
2340 const int32_t encoding = op | size | s | Arm64Encode::Rd(rd) |
2341 Arm64Encode::Rn(rn) | o.encoding();
2342 Emit(encoding);
2343 }
2344
2345 // Follows the *bfm instructions in taking r before s (unlike the Operand
2346 // constructor, which follows DecodeBitMasks from Appendix G).
2347 void EmitBitfieldOp(BitfieldOp op,
2348 Register rd,
2349 Register rn,
2350 int r_imm,
2351 int s_imm,
2352 OperandSize size) {
2353 if (size != kEightBytes) {
2354 ASSERT(size == kFourBytes);
2355 ASSERT(r_imm < 32 && s_imm < 32);
2356 } else {
2357 ASSERT(r_imm < 64 && s_imm < 64);
2358 }
2359 const int32_t instr = op | (size == kEightBytes ? Bitfield64 : 0);
2360 const int32_t encoding = instr | Operand(0, s_imm, r_imm).encoding() |
2362 Emit(encoding);
2363 }
2364
2365 void EmitLogicalImmOp(LogicalImmOp op,
2366 Register rd,
2367 Register rn,
2368 Operand o,
2369 OperandSize sz) {
2370 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2371 (sz == kUnsignedFourBytes));
2372 ASSERT((rd != R31) && (rn != R31));
2373 ASSERT(rn != CSP);
2374 ASSERT((op == ANDIS) || (rd != ZR)); // op != ANDIS => rd != ZR.
2375 ASSERT((op != ANDIS) || (rd != CSP)); // op == ANDIS => rd != CSP.
2376 ASSERT(o.type() == Operand::BitfieldImm);
2377 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2378 const int32_t encoding =
2379 op | size | Arm64Encode::Rd(rd) | Arm64Encode::Rn(rn) | o.encoding();
2380 Emit(encoding);
2381 }
2382
2383 void EmitLogicalShiftOp(LogicalShiftOp op,
2384 Register rd,
2385 Register rn,
2386 Operand o,
2387 OperandSize sz) {
2388 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2389 (sz == kUnsignedFourBytes));
2390 ASSERT((rd != R31) && (rn != R31));
2391 ASSERT((rd != CSP) && (rn != CSP));
2392 ASSERT(o.type() == Operand::Shifted);
2393 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2394 const int32_t encoding =
2395 op | size | Arm64Encode::Rd(rd) | Arm64Encode::Rn(rn) | o.encoding();
2396 Emit(encoding);
2397 }
2398
2399 void EmitAddSubShiftExtOp(AddSubShiftExtOp op,
2400 Register rd,
2401 Register rn,
2402 Operand o,
2403 OperandSize sz,
2404 bool set_flags) {
2405 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2406 (sz == kUnsignedFourBytes));
2407 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2408 const int32_t s = set_flags ? B29 : 0;
2409 const int32_t encoding = op | size | s | Arm64Encode::Rd(rd) |
2410 Arm64Encode::Rn(rn) | o.encoding();
2411 Emit(encoding);
2412 }
2413
2414 int32_t BindImm26Branch(int64_t position, int64_t dest);
2415 int32_t BindImm19Branch(int64_t position, int64_t dest);
2416 int32_t BindImm14Branch(int64_t position, int64_t dest);
2417
2418 int32_t EncodeImm19BranchOffset(int64_t imm, int32_t instr) {
2419 if (!CanEncodeImm19BranchOffset(imm)) {
2422 }
2423 const int32_t imm32 = static_cast<int32_t>(imm);
2424 const int32_t off =
2425 ((static_cast<uint32_t>(imm32 >> 2) << kImm19Shift) & kImm19Mask);
2426 return (instr & ~kImm19Mask) | off;
2427 }
2428
2429 int64_t DecodeImm19BranchOffset(int32_t instr) {
2430 int32_t insns = (static_cast<uint32_t>(instr) & kImm19Mask) >> kImm19Shift;
2431 const int32_t off = static_cast<int32_t>(insns << 13) >> 11;
2432 return static_cast<int64_t>(off);
2433 }
2434
2435 int32_t EncodeImm14BranchOffset(int64_t imm, int32_t instr) {
2436 if (!CanEncodeImm14BranchOffset(imm)) {
2439 }
2440 const int32_t imm32 = static_cast<int32_t>(imm);
2441 const int32_t off =
2442 ((static_cast<uint32_t>(imm32 >> 2) << kImm14Shift) & kImm14Mask);
2443 return (instr & ~kImm14Mask) | off;
2444 }
2445
2446 int64_t DecodeImm14BranchOffset(int32_t instr) {
2447 int32_t insns = (static_cast<uint32_t>(instr) & kImm14Mask) >> kImm14Shift;
2448 const int32_t off = static_cast<int32_t>(insns << 18) >> 16;
2449 return static_cast<int64_t>(off);
2450 }
2451
2452 bool IsUnconditionalBranch(int32_t instr) {
2453 return (instr & UnconditionalBranchMask) ==
2455 }
2456
2457 bool IsConditionalBranch(int32_t instr) {
2458 return (instr & ConditionalBranchMask) ==
2460 }
2461
2462 bool IsCompareAndBranch(int32_t instr) {
2463 return (instr & CompareAndBranchMask) ==
2465 }
2466
2467 bool IsTestAndBranch(int32_t instr) {
2468 return (instr & TestAndBranchMask) ==
2470 }
2471
2472 Condition DecodeImm19BranchCondition(int32_t instr) {
2473 if (IsConditionalBranch(instr)) {
2474 return static_cast<Condition>((instr & kCondMask) >> kCondShift);
2475 }
2476 ASSERT(IsCompareAndBranch(instr));
2477 return (instr & B24) ? EQ : NE; // cbz : cbnz
2478 }
2479
2480 int32_t EncodeImm19BranchCondition(Condition cond, int32_t instr) {
2481 if (IsConditionalBranch(instr)) {
2482 const int32_t c_imm = static_cast<int32_t>(cond);
2483 return (instr & ~kCondMask) | (c_imm << kCondShift);
2484 }
2485 ASSERT(IsCompareAndBranch(instr));
2486 return (instr & ~B24) | (cond == EQ ? B24 : 0); // cbz : cbnz
2487 }
2488
2489 Condition DecodeImm14BranchCondition(int32_t instr) {
2490 ASSERT(IsTestAndBranch(instr));
2491 return (instr & B24) ? EQ : NE; // tbz : tbnz
2492 }
2493
2494 int32_t EncodeImm14BranchCondition(Condition cond, int32_t instr) {
2495 ASSERT(IsTestAndBranch(instr));
2496 return (instr & ~B24) | (cond == EQ ? B24 : 0); // tbz : tbnz
2497 }
2498
2499 void EmitCompareAndBranchOp(CompareAndBranchOp op,
2500 Register rt,
2501 int64_t imm,
2502 OperandSize sz) {
2503 // EncodeImm19BranchOffset will longjump out if the offset does not fit in
2504 // 19 bits.
2505 const int32_t encoded_offset = EncodeImm19BranchOffset(imm, 0);
2506 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2507 (sz == kUnsignedFourBytes));
2508 ASSERT(Utils::IsInt(21, imm) && ((imm & 0x3) == 0));
2509 ASSERT((rt != CSP) && (rt != R31));
2510 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2511 const int32_t encoding = op | size | Arm64Encode::Rt(rt) | encoded_offset;
2512 Emit(encoding);
2513 }
2514
2515 void EmitTestAndBranchOp(TestAndBranchOp op,
2516 Register rt,
2517 intptr_t bit_number,
2518 int64_t imm) {
2519 // EncodeImm14BranchOffset will longjump out if the offset does not fit in
2520 // 14 bits.
2521 const int32_t encoded_offset = EncodeImm14BranchOffset(imm, 0);
2522 ASSERT((bit_number >= 0) && (bit_number <= 63));
2523 ASSERT(Utils::IsInt(16, imm) && ((imm & 0x3) == 0));
2524 ASSERT((rt != CSP) && (rt != R31));
2525 const Register crt = ConcreteRegister(rt);
2526 int32_t bit_number_low = bit_number & 0x1f;
2527 int32_t bit_number_hi = (bit_number & 0x20) >> 5;
2528 const int32_t encoding =
2529 op | (bit_number_low << 19) | (bit_number_hi << 31) |
2530 (static_cast<int32_t>(crt) << kRtShift) | encoded_offset;
2531 Emit(encoding);
2532 }
2533
2534 void EmitConditionalBranchOp(ConditionalBranchOp op,
2535 Condition cond,
2536 int64_t imm) {
2537 ASSERT(cond != AL);
2538 const int32_t off = EncodeImm19BranchOffset(imm, 0);
2539 const int32_t encoding =
2540 op | (static_cast<int32_t>(cond) << kCondShift) | off;
2541 Emit(encoding);
2542 }
2543
2544 bool CanEncodeImm19BranchOffset(int64_t offset) {
2546 return Utils::IsInt(21, offset);
2547 }
2548
2549 bool CanEncodeImm14BranchOffset(int64_t offset) {
2551 return Utils::IsInt(16, offset);
2552 }
2553
2554 void EmitConditionalBranch(ConditionalBranchOp op,
2555 Condition cond,
2556 Label* label) {
2557 ASSERT(cond != AL);
2558 if (label->IsBound()) {
2559 const int64_t dest = label->Position() - buffer_.Size();
2560 if (use_far_branches() && !CanEncodeImm19BranchOffset(dest)) {
2561 EmitConditionalBranchOp(op, InvertCondition(cond),
2562 2 * Instr::kInstrSize);
2563 // Make a new dest that takes the new position into account after the
2564 // inverted test.
2565 const int64_t dest = label->Position() - buffer_.Size();
2566 b(dest);
2567 } else {
2568 EmitConditionalBranchOp(op, cond, dest);
2569 }
2570 label->UpdateLRState(lr_state());
2571 } else {
2572 const int64_t position = buffer_.Size();
2573 if (use_far_branches()) {
2574 // When cond is AL, this guard branch will be rewritten as a nop when
2575 // the label is bound. We don't write it as a nop initially because it
2576 // makes the decoding code in Bind simpler.
2577 EmitConditionalBranchOp(op, InvertCondition(cond),
2578 2 * Instr::kInstrSize);
2579 b(label->position_);
2580 } else {
2581 EmitConditionalBranchOp(op, cond, label->position_);
2582 }
2583 label->LinkTo(position, lr_state());
2584 }
2585 }
2586
2587 void EmitCompareAndBranch(CompareAndBranchOp op,
2588 Register rt,
2589 Label* label,
2590 OperandSize sz) {
2591 if (label->IsBound()) {
2592 const int64_t dest = label->Position() - buffer_.Size();
2593 if (use_far_branches() && !CanEncodeImm19BranchOffset(dest)) {
2594 EmitCompareAndBranchOp(op == CBZ ? CBNZ : CBZ, rt,
2595 2 * Instr::kInstrSize, sz);
2596 // Make a new dest that takes the new position into account after the
2597 // inverted test.
2598 const int64_t dest = label->Position() - buffer_.Size();
2599 b(dest);
2600 } else {
2601 EmitCompareAndBranchOp(op, rt, dest, sz);
2602 }
2603 label->UpdateLRState(lr_state());
2604 } else {
2605 const int64_t position = buffer_.Size();
2606 if (use_far_branches()) {
2607 EmitCompareAndBranchOp(op == CBZ ? CBNZ : CBZ, rt,
2608 2 * Instr::kInstrSize, sz);
2609 b(label->position_);
2610 } else {
2611 EmitCompareAndBranchOp(op, rt, label->position_, sz);
2612 }
2613 label->LinkTo(position, lr_state());
2614 }
2615 }
2616
2617 void EmitTestAndBranch(TestAndBranchOp op,
2618 Register rt,
2619 intptr_t bit_number,
2620 Label* label) {
2621 if (label->IsBound()) {
2622 const int64_t dest = label->Position() - buffer_.Size();
2623 if (use_far_branches() && !CanEncodeImm14BranchOffset(dest)) {
2624 EmitTestAndBranchOp(op == TBZ ? TBNZ : TBZ, rt, bit_number,
2625 2 * Instr::kInstrSize);
2626 // Make a new dest that takes the new position into account after the
2627 // inverted test.
2628 const int64_t dest = label->Position() - buffer_.Size();
2629 b(dest);
2630 } else {
2631 EmitTestAndBranchOp(op, rt, bit_number, dest);
2632 }
2633 label->UpdateLRState(lr_state());
2634 } else {
2635 int64_t position = buffer_.Size();
2636 if (use_far_branches()) {
2637 EmitTestAndBranchOp(op == TBZ ? TBNZ : TBZ, rt, bit_number,
2638 2 * Instr::kInstrSize);
2639 b(label->position_);
2640 } else {
2641 EmitTestAndBranchOp(op, rt, bit_number, label->position_);
2642 }
2643 label->LinkTo(position, lr_state());
2644 }
2645 }
2646
2647 bool CanEncodeImm26BranchOffset(int64_t offset) {
2649 return Utils::IsInt(26, offset);
2650 }
2651
2652 void EmitUnconditionalBranchOp(UnconditionalBranchOp op, int64_t offset) {
2653 ASSERT(CanEncodeImm26BranchOffset(offset));
2654 const int32_t off = ((offset >> 2) << kImm26Shift) & kImm26Mask;
2655 const int32_t encoding = op | off;
2656 Emit(encoding);
2657 }
2658
2659 void EmitUnconditionalBranch(UnconditionalBranchOp op, Label* label) {
2660 if (label->IsBound()) {
2661 const int64_t dest = label->Position() - buffer_.Size();
2662 EmitUnconditionalBranchOp(op, dest);
2663 label->UpdateLRState(lr_state());
2664 } else {
2665 const int64_t position = buffer_.Size();
2666 EmitUnconditionalBranchOp(op, label->position_);
2667 label->LinkTo(position, lr_state());
2668 }
2669 }
2670
2671 void EmitUnconditionalBranchRegOp(UnconditionalBranchRegOp op, Register rn) {
2672 ASSERT((rn != CSP) && (rn != R31));
2673 const int32_t encoding = op | Arm64Encode::Rn(rn);
2674 Emit(encoding);
2675 }
2676
2677 static int32_t ExceptionGenOpEncoding(ExceptionGenOp op, uint16_t imm) {
2678 return op | (static_cast<int32_t>(imm) << kImm16Shift);
2679 }
2680
2681 void EmitExceptionGenOp(ExceptionGenOp op, uint16_t imm) {
2682 Emit(ExceptionGenOpEncoding(op, imm));
2683 }
2684
2685 void EmitMoveWideOp(MoveWideOp op,
2686 Register rd,
2687 const Immediate& imm,
2688 int hw_idx,
2689 OperandSize sz) {
2690 ASSERT((hw_idx >= 0) && (hw_idx <= 3));
2691 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2692 (sz == kUnsignedFourBytes));
2693 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2694 const int32_t encoding =
2695 op | size | Arm64Encode::Rd(rd) |
2696 (static_cast<int32_t>(hw_idx) << kHWShift) |
2697 (static_cast<int32_t>(imm.value() & 0xffff) << kImm16Shift);
2698 Emit(encoding);
2699 }
2700
2701 void EmitLoadStoreExclusive(LoadStoreExclusiveOp op,
2702 Register rs,
2703 Register rn,
2704 Register rt,
2705 OperandSize sz = kEightBytes) {
2706 ASSERT(sz == kEightBytes || sz == kFourBytes);
2707 const int32_t size = B31 | (sz == kEightBytes ? B30 : 0);
2708
2709 ASSERT((rs != kNoRegister) && (rs != CSP));
2710 ASSERT((rn != kNoRegister) && (rn != ZR));
2711 ASSERT((rt != kNoRegister) && (rt != CSP));
2712
2713 const int32_t encoding = op | size | Arm64Encode::Rs(rs) |
2715 Arm64Encode::Rt(rt);
2716 Emit(encoding);
2717 }
2718
2719 void EmitAtomicMemory(AtomicMemoryOp op,
2720 Register rs,
2721 Register rn,
2722 Register rt,
2723 OperandSize sz = kEightBytes) {
2724 ASSERT(sz == kEightBytes || sz == kFourBytes);
2725 const int32_t size = B31 | (sz == kEightBytes ? B30 : 0);
2726
2727 ASSERT((rs != kNoRegister) && (rs != CSP));
2728 ASSERT((rn != kNoRegister) && (rn != ZR));
2729 ASSERT((rt != kNoRegister) && (rt != CSP));
2730
2731 const int32_t encoding = op | size | Arm64Encode::Rs(rs) |
2733 Emit(encoding);
2734 }
2735
2736 void EmitLoadStoreReg(LoadStoreRegOp op,
2737 Register rt,
2738 Address a,
2739 OperandSize sz) {
2740 // Unpredictable, illegal on some microarchitectures.
2741 ASSERT((op != LDR && op != STR && op != LDRS) || a.can_writeback_to(rt));
2742
2743 const int32_t size = Log2OperandSizeBytes(sz);
2744 const int32_t encoding =
2745 op | ((size & 0x3) << kSzShift) | Arm64Encode::Rt(rt) | a.encoding(sz);
2746 Emit(encoding);
2747 }
2748
2749 void EmitLoadRegLiteral(LoadRegLiteralOp op,
2750 Register rt,
2751 Address a,
2752 OperandSize sz) {
2753 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2754 (sz == kUnsignedFourBytes));
2755 ASSERT((rt != CSP) && (rt != R31));
2756 const int32_t size = (sz == kEightBytes) ? B30 : 0;
2757 const int32_t encoding = op | size | Arm64Encode::Rt(rt) | a.encoding(sz);
2758 Emit(encoding);
2759 }
2760
2761 void EmitLoadStoreRegPair(LoadStoreRegPairOp op,
2762 Register rt,
2763 Register rt2,
2764 Address a,
2765 OperandSize sz) {
2766 // Unpredictable, illegal on some microarchitectures.
2767 ASSERT(a.can_writeback_to(rt) && a.can_writeback_to(rt2));
2768 ASSERT(op != LDP || rt != rt2);
2769
2770 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2771 (sz == kUnsignedFourBytes));
2772 ASSERT((rt != CSP) && (rt != R31));
2773 ASSERT((rt2 != CSP) && (rt2 != R31));
2774 int32_t opc = 0;
2775 switch (sz) {
2776 case kEightBytes:
2777 opc = B31;
2778 break;
2779 case kFourBytes:
2780 opc = op == LDP ? B30 : 0;
2781 break;
2782 case kUnsignedFourBytes:
2783 opc = 0;
2784 break;
2785 default:
2786 UNREACHABLE();
2787 break;
2788 }
2789 const int32_t encoding =
2790 opc | op | Arm64Encode::Rt(rt) | Arm64Encode::Rt2(rt2) | a.encoding(sz);
2791 Emit(encoding);
2792 }
2793
2794 void EmitLoadStoreVRegPair(LoadStoreRegPairOp op,
2795 VRegister rt,
2796 VRegister rt2,
2797 Address a,
2798 OperandSize sz) {
2799 ASSERT(op != FLDP || rt != rt2);
2800 ASSERT((sz == kSWord) || (sz == kDWord) || (sz == kQWord));
2801 int32_t opc = 0;
2802 switch (sz) {
2803 case kSWord:
2804 opc = 0;
2805 break;
2806 case kDWord:
2807 opc = B30;
2808 break;
2809 case kQWord:
2810 opc = B31;
2811 break;
2812 default:
2813 UNREACHABLE();
2814 break;
2815 }
2816 const int32_t encoding =
2817 opc | op | Arm64Encode::Rt(static_cast<Register>(rt)) |
2818 Arm64Encode::Rt2(static_cast<Register>(rt2)) | a.encoding(sz);
2819 Emit(encoding);
2820 }
2821
2822 void EmitPCRelOp(PCRelOp op, Register rd, const Immediate& imm) {
2823 ASSERT(Utils::IsInt(21, imm.value()));
2824 ASSERT((rd != R31) && (rd != CSP));
2825 const int32_t loimm = (imm.value() & 0x3) << 29;
2826 const int32_t hiimm =
2827 (static_cast<uint32_t>(imm.value() >> 2) << kImm19Shift) & kImm19Mask;
2828 const int32_t encoding = op | loimm | hiimm | Arm64Encode::Rd(rd);
2829 Emit(encoding);
2830 }
2831
2832 void EmitMiscDP1Source(MiscDP1SourceOp op,
2833 Register rd,
2834 Register rn,
2835 OperandSize sz) {
2836 ASSERT((rd != CSP) && (rn != CSP));
2837 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2838 (sz == kUnsignedFourBytes));
2839 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2840 const int32_t encoding =
2841 op | size | Arm64Encode::Rd(rd) | Arm64Encode::Rn(rn);
2842 Emit(encoding);
2843 }
2844
2845 void EmitMiscDP2Source(MiscDP2SourceOp op,
2846 Register rd,
2847 Register rn,
2848 Register rm,
2849 OperandSize sz) {
2850 ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP));
2851 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2852 (sz == kUnsignedFourBytes));
2853 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2854 const int32_t encoding = op | size | Arm64Encode::Rd(rd) |
2856 Emit(encoding);
2857 }
2858
2859 void EmitMiscDP3Source(MiscDP3SourceOp op,
2860 Register rd,
2861 Register rn,
2862 Register rm,
2863 Register ra,
2864 OperandSize sz) {
2865 ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP) && (ra != CSP));
2866 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2867 (sz == kUnsignedFourBytes));
2868 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2869 const int32_t encoding = op | size | Arm64Encode::Rd(rd) |
2871 Arm64Encode::Ra(ra);
2872 Emit(encoding);
2873 }
2874
2875 void EmitConditionalSelect(ConditionalSelectOp op,
2876 Register rd,
2877 Register rn,
2878 Register rm,
2879 Condition cond,
2880 OperandSize sz) {
2881 ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP));
2882 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2883 (sz == kUnsignedFourBytes));
2884 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2885 const int32_t encoding = op | size | Arm64Encode::Rd(rd) |
2887 (static_cast<int32_t>(cond) << kSelCondShift);
2888 Emit(encoding);
2889 }
2890
2891 void EmitFPImm(FPImmOp op, VRegister vd, uint8_t imm8) {
2892 const int32_t encoding =
2893 op | (static_cast<int32_t>(vd) << kVdShift) | (imm8 << kImm8Shift);
2894 Emit(encoding);
2895 }
2896
2897 void EmitFPIntCvtOp(FPIntCvtOp op,
2898 Register rd,
2899 Register rn,
2900 OperandSize sz = kEightBytes) {
2901 ASSERT((sz == kEightBytes) || (sz == kFourBytes));
2902 const int32_t sfield = (sz == kEightBytes) ? B31 : 0;
2903 const int32_t encoding =
2904 op | Arm64Encode::Rd(rd) | Arm64Encode::Rn(rn) | sfield;
2905 Emit(encoding);
2906 }
2907
2908 void EmitFPOneSourceOp(FPOneSourceOp op, VRegister vd, VRegister vn) {
2909 const int32_t encoding = op | (static_cast<int32_t>(vd) << kVdShift) |
2910 (static_cast<int32_t>(vn) << kVnShift);
2911 Emit(encoding);
2912 }
2913
2914 void EmitFPTwoSourceOp(FPTwoSourceOp op,
2915 VRegister vd,
2916 VRegister vn,
2917 VRegister vm) {
2918 const int32_t encoding = op | (static_cast<int32_t>(vd) << kVdShift) |
2919 (static_cast<int32_t>(vn) << kVnShift) |
2920 (static_cast<int32_t>(vm) << kVmShift);
2921 Emit(encoding);
2922 }
2923
2924 void EmitFPCompareOp(FPCompareOp op, VRegister vn, VRegister vm) {
2925 const int32_t encoding = op | (static_cast<int32_t>(vn) << kVnShift) |
2926 (static_cast<int32_t>(vm) << kVmShift);
2927 Emit(encoding);
2928 }
2929
2930 void EmitSIMDThreeSameOp(SIMDThreeSameOp op,
2931 VRegister vd,
2932 VRegister vn,
2933 VRegister vm) {
2934 const int32_t encoding = op | (static_cast<int32_t>(vd) << kVdShift) |
2935 (static_cast<int32_t>(vn) << kVnShift) |
2936 (static_cast<int32_t>(vm) << kVmShift);
2937 Emit(encoding);
2938 }
2939
2940 void EmitSIMDCopyOp(SIMDCopyOp op,
2941 VRegister vd,
2942 VRegister vn,
2943 OperandSize sz,
2944 int32_t idx4,
2945 int32_t idx5) {
2946 const int32_t shift = Log2OperandSizeBytes(sz);
2947 const int32_t imm5 = ((idx5 << (shift + 1)) | (1 << shift)) & 0x1f;
2948 const int32_t imm4 = (idx4 << shift) & 0xf;
2949 const int32_t encoding = op | (imm5 << kImm5Shift) | (imm4 << kImm4Shift) |
2950 (static_cast<int32_t>(vd) << kVdShift) |
2951 (static_cast<int32_t>(vn) << kVnShift);
2952 Emit(encoding);
2953 }
2954
2955 void EmitSIMDTwoRegOp(SIMDTwoRegOp op, VRegister vd, VRegister vn) {
2956 const int32_t encoding = op | (static_cast<int32_t>(vd) << kVdShift) |
2957 (static_cast<int32_t>(vn) << kVnShift);
2958 Emit(encoding);
2959 }
2960
2961 void BranchLink(intptr_t target_code_pool_index, CodeEntryKind entry_kind);
2962
2963 friend class dart::FlowGraphCompiler;
2964 std::function<void(Register reg)> generate_invoke_write_barrier_wrapper_;
2965 std::function<void()> generate_invoke_array_write_barrier_;
2966
2968 DISALLOW_COPY_AND_ASSIGN(Assembler);
2969};
2970
2971} // namespace compiler
2972} // namespace dart
2973
2974#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM64_H_
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
int count
static uint32_t hash(const SkShaderBase::GradientInfo &v)
static bool equals(T *a, T *b)
static bool subtract(const R &a, const R &b, R *out)
Definition SkRect.cpp:177
#define UNREACHABLE()
Definition assert.h:248
#define COMPILE_ASSERT(expr)
Definition assert.h:339
static int64_t VFPExpandImm(uint8_t imm8)
static constexpr int32_t kNopInstruction
static bool IsInt(intptr_t N, T value)
Definition utils.h:298
static constexpr int ShiftForPowerOfTwo(T x)
Definition utils.h:66
static T Minimum(T x, T y)
Definition utils.h:21
static bool IsUint(intptr_t N, T value)
Definition utils.h:313
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition utils.h:77
static constexpr bool IsPowerOfTwo(T x)
Definition utils.h:61
Address & operator=(const Address &other)
static OperandSize OperandSizeFor(intptr_t cid)
static Address PC(int32_t pc_off)
static bool CanHoldOffset(int32_t offset, AddressType at=Offset, OperandSize sz=kEightBytes)
Address(Register rn, int32_t offset=0, AddressType at=Offset)
static Address PC(Register r)=delete
bool can_writeback_to(Register r) const
Address(Register rn, Register offset, AddressType at)=delete
static Address Pair(Register rn, int32_t offset=0, AddressType at=PairOffset)
Address(Register rn, Register rm, Extend ext=UXTX, Scaling scale=Unscaled)
Address(const Address &other)
static uint32_t Rm(Register rm)
static uint32_t Rt2(Register rt2)
static uint32_t Rs(Register rs)
static uint32_t Rn(Register rn)
static uint32_t Ra(Register ra)
static uint32_t Rt(Register rt)
static uint32_t Rd(Register rd)
void Stop(const char *message)
void LoadCompressed(Register dst, const Address &address)
void LoadCompressedFieldFromOffset(Register dst, Register base, int32_t offset)
ObjectPoolBuilder & object_pool_builder()
void LoadCompressedFieldAddressForRegOffset(Register address, Register instance, Register offset_in_words_as_smi)
void LoadIndexedCompressed(Register dst, Register base, int32_t offset, Register index)
void LoadAcquireCompressed(Register dst, const Address &address)
void VRSqrts(VRegister vd, VRegister vn)
void AndImmediate(Register rd, int64_t imm) override
void PushRegistersInOrder(std::initializer_list< Register > regs)
void cinv(Register rd, Register rn, Condition cond)
void PopRegisterPair(Register r0, Register r1)
void umulh(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void MoveUnboxedDouble(FpuRegister dst, FpuRegister src)
void TestImmediate(Register rn, int64_t imm, OperandSize sz=kEightBytes)
void LoadClassId(Register result, Register object)
void LoadSFromOffset(VRegister dest, Register base, int32_t offset)
void LoadPoolPointer(Register pp=PP)
void LoadIndexedPayload(Register dest, Register base, int32_t payload_offset, Register index, ScaleFactor scale, OperandSize sz=kEightBytes) override
void cinc(Register rd, Register rn, Condition cond)
void Call(Address target)
void vdupx(VRegister vd, Register rn)
bool CanLoadFromObjectPool(const Object &object) const
void CompareClassId(Register object, intptr_t class_id, Register scratch=kNoRegister)
void vadds(VRegister vd, VRegister vn, VRegister vm)
void adcsw(Register rd, Register rn, Register rm)
void movz(Register rd, const Immediate &imm, int hw_idx)
void eor(Register rd, Register rn, Operand o, Condition cond=AL)
void PushRegisters(const RegisterSet &registers)
void ExtendAndSmiTagValue(Register rd, Register rn, OperandSize sz=kEightBytes) override
void fcvtzsxd(Register rd, VRegister vn)
void StoreUnboxedDouble(FpuRegister src, Register base, int32_t offset)
void CompareObject(Register reg, const Object &object)
void vaddw(VRegister vd, VRegister vn, VRegister vm)
void fstp(VRegister rt, VRegister rt2, Address a, OperandSize sz)
void MoveUnboxedSimd128(FpuRegister dst, FpuRegister src)
void vrecpss(VRegister vd, VRegister vn, VRegister vm)
void BranchIfZero(Register rn, Label *label, JumpDistance distance=kFarJump)
void fcmpd(VRegister vn, VRegister vm)
void vsqrtd(VRegister vd, VRegister vn)
void CompareImmediate(Register rn, int64_t imm, OperandSize sz=kEightBytes) override
void tbz(Label *label, Register rt, intptr_t bit_number)
void LoadObject(Register dst, const Object &obj)
void PopDouble(VRegister reg)
void fmovrd(Register rd, VRegister vn)
void PushQuadPair(VRegister low, VRegister high)
void orr(Register rd, Register rn, Operand o, Condition cond=AL)
void BranchIfSmi(Register reg, Label *label, JumpDistance distance=kFarJump) override
void sbcsw(Register rd, Register rn, Register rm)
void LoadTaggedClassIdMayBeSmi(Register result, Register object)
void Call(const Code &code)
void BranchLink(const Code &code, ObjectPoolBuilderEntry::Patchability patchable=ObjectPoolBuilderEntry::kNotPatchable, CodeEntryKind entry_kind=CodeEntryKind::kNormal, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
void subsw(Register rd, Register rn, Operand o)
void TransitionNativeToGenerated(Register scratch, bool exit_safepoint, bool ignore_unwind_in_progress=false)
void fldrs(VRegister vt, Address a)
void fldrq(VRegister vt, Address a)
void LoadDFromOffset(DRegister reg, Register base, int32_t offset, Condition cond=AL)
void ldr(Register rd, Address ad, Condition cond=AL)
void FinalizeHashForSize(intptr_t bit_size, Register hash, Register scratch=TMP) override
void LoadStaticFieldAddress(Register address, Register field, Register scratch)
void Load(Register reg, const Address &address, OperandSize type, Condition cond)
void LoadDFromOffset(VRegister dest, Register base, int32_t offset)
void ComputeElementAddressForIntIndex(Register address, bool is_external, intptr_t cid, intptr_t index_scale, Register array, intptr_t index)
void PushRegisterPair(Register r0, Register r1)
void LoadDFieldFromOffset(VRegister dest, Register base, int32_t offset)
void StoreZero(const Address &address, Register temp=kNoRegister)
Address ElementAddressForRegIndex(bool is_external, intptr_t cid, intptr_t index_scale, bool index_unboxed, Register array, Register index, Register temp)
void fmovrs(Register rd, VRegister vn)
Assembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level=0)
void vsqrts(VRegister vd, VRegister vn)
void fcvtmswd(Register rd, VRegister vn)
void subs(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void XorImmediate(Register rd, Register rn, int64_t imm, OperandSize sz=kEightBytes)
void andw_(Register rd, Register rn, Operand o)
void vcgtd(VRegister vd, VRegister vn, VRegister vm)
void EnterFrame(intptr_t frame_size)
void fmovdr(VRegister vd, Register rn)
void vorr(VRegister vd, VRegister vn, VRegister vm)
void fmovdd(VRegister vd, VRegister vn)
void LoadFieldFromOffset(Register reg, Register base, int32_t offset, OperandSize type=kFourBytes) override
void vdupw(VRegister vd, Register rn)
void CheckAllocationCanary(Register top, Register tmp=TMP)
void StoreSToOffset(VRegister src, Register base, int32_t offset)
void ldr(Register rt, Address a, OperandSize sz=kEightBytes)
void AddImmediate(Register dest, Register rn, int64_t imm, OperandSize sz=kEightBytes)
void SmiUntag(Register reg)
void LoadQFieldFromOffset(VRegister dest, Register base, int32_t offset)
void sxth(Register rd, Register rn)
void ubfm(Register rd, Register rn, int r_imm, int s_imm, OperandSize size=kEightBytes)
void lslvw(Register rd, Register rn, Register rm)
void LoadSImmediate(VRegister reg, float immd)
void Jump(Label *label, JumpDistance distance=kFarJump)
void StoreWordToPoolIndex(Register src, intptr_t index, Register pp=PP)
void CompareObjectRegisters(Register rn, Register rm)
void LoadAcquire(Register dst, const Address &address, OperandSize size=kFourBytes) override
void msub(Register rd, Register rn, Register rm, Register ra, OperandSize sz=kEightBytes)
void Pop(Register rd, Condition cond=AL)
void andis(Register rd, Register rn, const Immediate &imm, OperandSize sz=kEightBytes)
void StoreObjectIntoObjectNoBarrier(Register object, const Address &address, const Object &value, MemoryOrder memory_order=kRelaxedNonAtomic, OperandSize size=kWordBytes) override
void LoadFromStack(Register dst, intptr_t depth)
void bfm(Register rd, Register rn, int r_imm, int s_imm, OperandSize size=kEightBytes)
void vrecpes(VRegister vd, VRegister vn)
void EnterOsrFrame(intptr_t extra_size, Register new_pp=kNoRegister)
Address ElementAddressForIntIndex(bool is_external, intptr_t cid, intptr_t index_scale, Register array, intptr_t index) const
void cmn(Register rn, Operand o, OperandSize sz=kEightBytes)
void umaddl(Register rd, Register rn, Register rm, Register ra, OperandSize sz=kEightBytes)
void stlr(Register rt, Register rn, OperandSize sz=kEightBytes)
void PushRegister(Register r)
void fcvtzswd(Register rd, VRegister vn)
void LoadMemoryValue(Register dst, Register base, int32_t offset)
void MulImmediate(Register reg, int64_t imm, OperandSize width=kEightBytes) override
void b(Label *label, Condition cond=AL)
void orr(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void set_lr_state(compiler::LRState state)
void subw(Register rd, Register rn, Operand o)
void set_constant_pool_allowed(bool b)
void Align(intptr_t alignment, intptr_t offset)
void vmind(VRegister vd, VRegister vn, VRegister vm)
void sub(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void PopDoublePair(VRegister low, VRegister high)
void LslImmediate(Register rd, Register rn, int32_t shift)
void fsqrtd(VRegister vd, VRegister vn)
void PopFloat(VRegister reg)
void stp(Register rt, Register rt2, Address a, OperandSize sz=kEightBytes)
Address PrepareLargeOffset(Register base, int32_t offset, OperandSize sz, Address::AddressType addr_type)
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_external, intptr_t cid, intptr_t index_scale)
void EnterDartFrame(intptr_t frame_size, Register new_pp=kNoRegister)
void EnterFullSafepoint(Register scratch)
void vsubs(VRegister vd, VRegister vn, VRegister vm)
void LoadClassById(Register result, Register class_id)
void sxtb(Register rd, Register rn)
void and_(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void vabss(VRegister vd, VRegister vn)
void BranchLinkWithEquivalence(const Code &code, const Object &equivalence, CodeEntryKind entry_kind=CodeEntryKind::kNormal)
void smull(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void vmov(VRegister vd, VRegister vn)
void PushValueAtOffset(Register base, int32_t offset)
void adds(Register rd, Register rn, Operand o, Condition cond=AL)
void LoadIsolate(Register dst)
void SetupDartSP(intptr_t reserve=4096)
void fcvtds(VRegister vd, VRegister vn)
void cmpw(Register rn, Operand o)
void smulh(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void tbnz(Label *label, Register rt, intptr_t bit_number)
void vrsqrtss(VRegister vd, VRegister vn, VRegister vm)
void vabsd(VRegister vd, VRegister vn)
void fstrd(VRegister vt, Address a)
void udiv(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void mulw(Register rd, Register rn, Register rm)
void sbcw(Register rd, Register rn, Register rm)
void Push(Register rd, Condition cond=AL)
void SetupCSPFromThread(Register thr)
void orri(Register rd, Register rn, const Immediate &imm, OperandSize sz=kEightBytes)
void LsrImmediate(Register rd, int32_t shift) override
void str(Register rd, Address ad, Condition cond=AL)
void adds(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void SetReturnAddress(Register value)
void sbfx(Register rd, Register rn, int low_bit, int width, OperandSize size=kEightBytes)
void SubImmediateSetFlags(Register dest, Register rn, int64_t imm, OperandSize sz=kEightBytes)
void GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target=0)
void LsrImmediate(Register rd, Register rn, int shift, OperandSize sz=kEightBytes)
void fcvtpsxd(Register rd, VRegister vn)
void madd(Register rd, Register rn, Register rm, Register ra, OperandSize sz=kEightBytes)
void PopQuad(VRegister reg)
void LoadInt32FromBoxOrSmi(Register result, Register value) override
void adc(Register rd, Register rn, Register rm)
void BranchIf(Condition condition, Label *label, JumpDistance distance=kFarJump)
void fcvtpswd(Register rd, VRegister vn)
void fmuld(VRegister vd, VRegister vn, VRegister vm)
void PushObject(const Object &object)
void adcw(Register rd, Register rn, Register rm)
void ComputeElementAddressForRegIndex(Register address, bool is_external, intptr_t cid, intptr_t index_scale, bool index_unboxed, Register array, Register index)
void AndRegisters(Register dst, Register src1, Register src2=kNoRegister) override
void PushImmediate(int64_t immediate)
void TryAllocateObject(intptr_t cid, intptr_t instance_size, Label *failure, JumpDistance distance, Register instance_reg, Register top_reg) override
void stxr(Register rs, Register rt, Register rn, OperandSize size=kEightBytes)
void andi(Register rd, Register rn, const Immediate &imm, OperandSize sz=kEightBytes)
void fcvtsd(VRegister vd, VRegister vn)
void LsrImmediate(Register rd, Register rn, int32_t shift)
void fabsd(VRegister vd, VRegister vn)
void OrImmediate(Register rd, Register rn, int64_t imm, OperandSize sz=kEightBytes)
void PushPair(Register low, Register high)
void vand(VRegister vd, VRegister vn, VRegister vm)
void cmp(Register rn, Operand o, Condition cond=AL)
void LoadWordFromPoolIndex(Register dst, intptr_t index, Register pp=PP)
void StoreDFieldToOffset(VRegister src, Register base, int32_t offset)
void LoadUnboxedSimd128(FpuRegister dst, Register base, int32_t offset)
void LoadUniqueObject(Register dst, const Object &obj)
void StoreMemoryValue(Register src, Register base, int32_t offset)
void TransitionGeneratedToNative(Register destination_address, Register new_exit_frame, Register new_exit_through_ffi, bool enter_safepoint)
void tst(Register rn, Operand o, OperandSize sz=kEightBytes)
void ldxr(Register rt, Register rn, OperandSize size=kEightBytes)
void vdups(VRegister vd, VRegister vn, int32_t idx)
void Load(Register dest, const Address &address, OperandSize sz=kEightBytes) override
void GenerateCbzTbz(Register rn, Condition cond, Label *label, OperandSize sz=kEightBytes)
void vnot(VRegister vd, VRegister vn)
void asrvw(Register rd, Register rn, Register rm)
void StoreToStack(Register src, intptr_t depth)
void vdivs(VRegister vd, VRegister vn, VRegister vm)
void rbit(Register rd, Register rn)
void csetm(Register rd, Condition cond)
void vcged(VRegister vd, VRegister vn, VRegister vm)
void sxtw(Register rd, Register rn)
void cbnz(Label *label, Register rt, OperandSize sz=kEightBytes)
void vcges(VRegister vd, VRegister vn, VRegister vm)
void SubRegisters(Register dest, Register src)
void vrsqrtes(VRegister vd, VRegister vn)
void clz(Register rd, Register rn)
void PopRegisters(const RegisterSet &registers)
void StoreInternalPointer(Register object, const Address &dest, Register value)
void tsti(Register rn, const Immediate &imm, OperandSize sz=kEightBytes)
void AddRegisters(Register dest, Register src)
void Emit64(int64_t value)
void fdivd(VRegister vd, VRegister vn, VRegister vm)
void cbz(Label *label, Register rt, OperandSize sz=kEightBytes)
void LslImmediate(Register rd, int32_t shift, OperandSize sz=kEightBytes)
void Bind(Label *label) override
void ReserveAlignedFrameSpace(intptr_t frame_space)
void csinv(Register rd, Register rn, Register rm, Condition cond)
void vaddd(VRegister vd, VRegister vn, VRegister vm)
static int64_t DecodeImm26BranchOffset(int32_t instr)
static intptr_t EntryPointToPcMarkerOffset()
void vceqd(VRegister vd, VRegister vn, VRegister vm)
void add(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void vsubd(VRegister vd, VRegister vn, VRegister vm)
void OrImmediate(Register rd, int64_t imm)
void addsw(Register rd, Register rn, Operand o)
void RangeCheck(Register value, Register temp, intptr_t low, intptr_t high, RangeCheckCondition condition, Label *target) override
void AddImmediate(Register dest, int64_t imm)
void PopPair(Register low, Register high)
void cmp(Register rn, Operand o, OperandSize sz=kEightBytes)
void bics(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void movk(Register rd, const Immediate &imm, int hw_idx)
void SmiTagAndBranchIfOverflow(Register reg, Label *label)
void ldp(Register rt, Register rt2, Address a, OperandSize sz=kEightBytes)
void vinsd(VRegister vd, int32_t didx, VRegister vn, int32_t sidx)
void LslRegister(Register dst, Register shift) override
void bl(int32_t offset)
void LoadImmediate(Register reg, int64_t imm) override
void add(Register rd, Register rn, Operand o, Condition cond=AL)
void vmins(VRegister vd, VRegister vn, VRegister vm)
void uxth(Register rd, Register rn)
void sbfiz(Register rd, Register rn, int low_bit, int width, OperandSize size=kEightBytes)
void mov(Register rd, Register rn)
void lsrvw(Register rd, Register rn, Register rm)
void vcgts(VRegister vd, VRegister vn, VRegister vm)
void mul(Register rd, Register rn, Register rm, Condition cond=AL)
void umull(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void Call(Address target, Condition cond=AL)
void CallCFunction(Register target)
void WriteAllocationCanary(Register top)
void vmuld(VRegister vd, VRegister vn, VRegister vm)
void sdivw(Register rd, Register rn, Register rm)
void vmaxd(VRegister vd, VRegister vn, VRegister vm)
void ret(Register rn=kNoRegister2)
void CallRuntime(const RuntimeEntry &entry, intptr_t argument_count)
void ExtractInstanceSizeFromTags(Register result, Register tags)
void LoadObject(Register rd, const Object &object, Condition cond=AL)
void CompareRegisters(Register rn, Register rm)
void mvnw(Register rd, Register rm)
void uxtw(Register rd, Register rn)
void ArrayStoreBarrier(Register object, Register slot, Register value, CanBeSmi can_value_be_smi, Register scratch) override
void negsw(Register rd, Register rm)
void vinsx(VRegister vd, int32_t didx, Register rn)
void CompareToStack(Register src, intptr_t depth)
void fstrs(VRegister vt, Address a)
void AddImmediateSetFlags(Register dest, Register rn, int64_t imm, OperandSize sz=kEightBytes)
void sbfx(Register rd, Register rn, int32_t lsb, int32_t width, Condition cond=AL)
void LoadImmediate(Register rd, Immediate value, Condition cond=AL)
void vsubw(VRegister vd, VRegister vn, VRegister vm)
void lsrv(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void ldclr(Register rs, Register rt, Register rn, OperandSize sz=kEightBytes)
void StoreQFieldToOffset(VRegister src, Register base, int32_t offset)
void AndImmediate(Register rd, Register rs, int32_t imm, Condition cond=AL)
void VRecps(VRegister vd, VRegister vn)
void mvn_(Register rd, Register rm)
void vdivd(VRegister vd, VRegister vn, VRegister vm)
void csinc(Register rd, Register rn, Register rm, Condition cond, OperandSize sz=kEightBytes)
void csel(Register rd, Register rn, Register rm, Condition cond)
void LoadFromOffset(Register reg, Register base, int32_t offset, OperandSize type=kFourBytes) override
void addw(Register rd, Register rn, Operand o)
void faddd(VRegister vd, VRegister vn, VRegister vm)
void PushFloat(VRegister reg)
void bic(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void cset(Register rd, Condition cond)
void Jump(const Address &address)
void fmovsr(VRegister vd, Register rn)
void ExtendValue(Register rd, Register rn, OperandSize sz) override
void mul(Register rd, Register rn, Register rm)
void LoadDoubleWordFromPoolIndex(Register lower, Register upper, intptr_t index)
void smaddl(Register rd, Register rn, Register rm, Register ra, OperandSize sz=kEightBytes)
void vdupd(VRegister vd, VRegister vn, int32_t idx)
void StoreBarrier(Register object, Register value, CanBeSmi can_value_be_smi, Register scratch) override
void MaybeTraceAllocation(intptr_t cid, Label *trace, Register temp_reg, JumpDistance distance=JumpDistance::kFarJump)
void uxtb(Register rd, Register rn)
void ands(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void BranchLinkPatchable(const Code &code, CodeEntryKind entry_kind=CodeEntryKind::kNormal, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
void vmovrs(Register rd, VRegister vn, int32_t sidx)
static bool IsSafeSmi(const Object &object)
void clzw(Register rd, Register rn)
void and_(Register rd, Register rn, Operand o, Condition cond=AL)
void PushDouble(VRegister reg)
static int32_t HeapDataOffset(bool is_external, intptr_t cid)
void LoadAcquire(Register dst, const Address &address, OperandSize size=kEightBytes) override
void asrv(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void Drop(intptr_t stack_elements)
void ubfiz(Register rd, Register rn, int low_bit, int width, OperandSize size=kEightBytes)
void StoreUnboxedSimd128(FpuRegister src, Register base, int32_t offset)
void vmuls(VRegister vd, VRegister vn, VRegister vm)
void csneg(Register rd, Register rn, Register rm, Condition cond)
void EnterCFrame(intptr_t frame_space)
void PushDoublePair(VRegister low, VRegister high)
void adr(Register rd, const Immediate &imm)
compiler::LRState lr_state() const
void AddScaled(Register dest, Register src, ScaleFactor scale, int32_t value)
void negs(Register rd, Register rm, OperandSize sz=kEightBytes)
void CombineHashes(Register hash, Register other) override
void sdiv(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void scvtfdx(VRegister vd, Register rn)
void ands(Register rd, Register rn, Operand o, Condition cond=AL)
void LoadQFromOffset(VRegister dest, Register base, int32_t offset)
void VerifyStoreNeedsNoWriteBarrier(Register object, Register value) override
void sbfm(Register rd, Register rn, int r_imm, int s_imm, OperandSize size=kEightBytes)
void CompareWords(Register reg1, Register reg2, intptr_t offset, Register count, Register temp, Label *equals) override
void sbc(Register rd, Register rn, Register rm)
Address ElementAddressForRegIndexWithSize(bool is_external, intptr_t cid, OperandSize size, intptr_t index_scale, bool index_unboxed, Register array, Register index, Register temp)
void PushQuad(VRegister reg)
void movn(Register rd, const Immediate &imm, int hw_idx)
void StorePairToOffset(Register low, Register high, Register base, int32_t offset, OperandSize sz=kEightBytes)
void StoreRelease(Register src, const Address &address, OperandSize size=kEightBytes) override
void BranchIfNotSmi(Register reg, Label *label, JumpDistance distance=kFarJump)
void GenerateUnRelocatedPcRelativeTailCall(intptr_t offset_into_target=0)
void LoadInt64FromBoxOrSmi(Register result, Register value) override
void Store(Register src, const Address &address, OperandSize sz=kEightBytes) override
void CallCFunction(Address target)
void cneg(Register rd, Register rn, Condition cond)
void subs(Register rd, Register rn, Operand o, Condition cond=AL)
static bool IsSafe(const Object &object)
void BranchIfBit(Register rn, intptr_t bit_number, Condition condition, Label *label, JumpDistance distance=kFarJump)
void ldset(Register rs, Register rt, Register rn, OperandSize sz=kEightBytes)
void vmaxs(VRegister vd, VRegister vn, VRegister vm)
void StoreDToOffset(VRegister src, Register base, int32_t offset)
void Jump(Register target)
void scvtfdw(VRegister vd, Register rn)
void vnegd(VRegister vd, VRegister vn)
void sdiv(Register rd, Register rn, Register rm, Condition cond=AL)
void LoadClassIdMayBeSmi(Register result, Register object)
void ornw(Register rd, Register rn, Operand o)
void eor(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
static int32_t EncodeImm26BranchOffset(int64_t imm, int32_t instr)
void fldrd(VRegister vt, Address a)
void ubfx(Register rd, Register rn, int low_bit, int width, OperandSize size=kEightBytes)
void vaddx(VRegister vd, VRegister vn, VRegister vm)
void PopQuadPair(VRegister low, VRegister high)
void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset)
void vinsw(VRegister vd, int32_t didx, Register rn)
void vsubx(VRegister vd, VRegister vn, VRegister vm)
void LoadIsolateGroup(Register dst)
void fsubd(VRegister vd, VRegister vn, VRegister vm)
void LoadFieldAddressForRegOffset(Register address, Register instance, Register offset_in_words_as_smi) override
void eon(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void fnegd(VRegister vd, VRegister vn)
void vinss(VRegister vd, int32_t didx, VRegister vn, int32_t sidx)
void orrw(Register rd, Register rn, Operand o)
void BranchOnMonomorphicCheckedEntryJIT(Label *label)
void Store(Register reg, const Address &address, OperandSize type, Condition cond)
void SmiUntag(Register reg, Condition cond=AL)
void MoveRegister(Register rd, Register rm, Condition cond)
void LoadQImmediate(VRegister reg, simd128_value_t immq)
void orn(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void fcvtmsxd(Register rd, VRegister vn)
void eori(Register rd, Register rn, const Immediate &imm, OperandSize sz=kEightBytes)
void sub(Register rd, Register rn, Operand o, Condition cond=AL)
void movw(Register rd, Register rn)
void Emit(int32_t value)
void LoadFieldAddressForOffset(Register address, Register instance, int32_t offset) override
void neg(Register rd, Register rm)
void LoadDImmediate(VRegister reg, double immd)
void TryAllocateArray(intptr_t cid, intptr_t instance_size, Label *failure, Register instance, Register end_address, Register temp1, Register temp2)
void StoreToOffset(Register reg, Register base, int32_t offset, OperandSize type=kFourBytes) override
void AsrImmediate(Register rd, Register rn, int shift, OperandSize sz=kEightBytes)
void adcs(Register rd, Register rn, Register rm)
void ExtractClassIdFromTags(Register result, Register tags)
void vmovrd(Register rd, VRegister vn, int32_t sidx)
void MaybeTraceAllocation(Register cid, Label *trace, Register temp_reg, JumpDistance distance=JumpDistance::kFarJump)
void eorw(Register rd, Register rn, Operand o)
void SmiUntag(Register dst, Register src)
void EnsureHasClassIdInDEBUG(intptr_t cid, Register src, Register scratch, bool can_be_null=false) override
bool CanGenerateCbzTbz(Register rn, Condition cond)
void SmiTag(Register dst, Register src)
void fldp(VRegister rt, VRegister rt2, Address a, OperandSize sz)
void CompareWithMemoryValue(Register value, Address address, OperandSize sz=kEightBytes) override
void StoreDToOffset(DRegister reg, Register base, int32_t offset, Condition cond=AL)
void PushImmediate(Immediate immediate)
void ldar(Register rt, Register rn, OperandSize sz=kEightBytes)
void AddImmediate(Register rd, int32_t value, Condition cond=AL)
void AndImmediate(Register rd, Register rn, int64_t imm, OperandSize sz=kEightBytes)
void bfi(Register rd, Register rn, int low_bit, int width, OperandSize size=kEightBytes)
void lslv(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void ExitFullSafepoint(Register scratch, bool ignore_unwind_in_progress)
void str(Register rt, Address a, OperandSize sz=kEightBytes)
void LoadNativeEntry(Register dst, const ExternalLabel *label, ObjectPoolBuilderEntry::Patchability patchable)
void LslImmediate(Register rd, Register rn, int32_t shift, OperandSize sz=kEightBytes)
void CopyMemoryWords(Register src, Register dst, Register size, Register temp)
void StoreQToOffset(VRegister src, Register base, int32_t offset)
static constexpr intptr_t kEntryPointToPcMarkerOffset
void fstrq(VRegister vt, Address a)
bool fmovdi(VRegister vd, double immd)
void SmiTag(Register reg) override
void LoadImmediate(Register reg, Immediate imm)
void veor(VRegister vd, VRegister vn, VRegister vm)
void vnegs(VRegister vd, VRegister vn)
void sbcs(Register rd, Register rn, Register rm)
void vceqs(VRegister vd, VRegister vn, VRegister vm)
void bfxil(Register rd, Register rn, int low_bit, int width, OperandSize size=kEightBytes)
void ArithmeticShiftRightImmediate(Register reg, intptr_t shift) override
FieldAddress & operator=(const FieldAddress &other)
FieldAddress(const FieldAddress &other)
FieldAddress(Register base, Register disp)=delete
FieldAddress(Register base, int32_t disp)
static bool CanHoldOffset(int32_t offset, AddressType at=Offset, OperandSize sz=kEightBytes)
Immediate(const Immediate &other)
Immediate & operator=(const Immediate &other)
Operand(Register rm, Shift shift, int32_t imm)
static bool IsImmLogical(uint64_t value, uint8_t width, Operand *imm_op)
Operand(Register rm, Shift shift, Register r)
Operand(const Operand &other)
Operand(Register rm, Extend extend, Register r)
Operand & operator=(const Operand &other)
Operand(uint8_t n, int8_t imm_s, int8_t imm_r)
static OperandType CanHold(int64_t imm, uint8_t sz, Operand *op)
Operand(Register rm, Extend extend, int32_t imm)
ScaleFactor scale() const
#define UNIMPLEMENTED
#define ASSERT(E)
VkInstance instance
Definition main.cc:48
static bool b
struct MyStruct s
struct MyStruct a[10]
#define FATAL(error)
AtkStateType state
uint8_t value
GAsyncResult * result
uint32_t * target
int argument_count
Definition fuchsia.cc:52
bool IsSmi(int64_t v)
void BailoutWithBranchOffsetError()
static bool IsSignedOperand(OperandSize os)
static int Log2OperandSizeBytes(OperandSize os)
bool IsSameObject(const Object &a, const Object &b)
const Object & NullObject()
constexpr OperandSize kWordBytes
const int kXRegSizeInBits
@ CompareAndBranchMask
@ CompareAndBranchFixed
@ TIMES_COMPRESSED_WORD_SIZE
static Condition InvertCondition(Condition c)
static constexpr intptr_t kAllocationCanary
Definition globals.h:181
const Register NULL_REG
@ TestAndBranchMask
@ TestAndBranchFixed
Register ConcreteRegister(LinkRegister)
@ kHeapObjectTag
const Register CODE_REG
const Register TMP2
@ kNoRegister
@ ConditionalBranchFixed
@ ConditionalBranchMask
const Register TMP
const intptr_t cid
const Register HEAP_BITS
constexpr intptr_t kQuadSize
Definition globals.h:458
const int kWRegSizeInBits
constexpr intptr_t kFloatSize
Definition globals.h:457
const Register PP
constexpr intptr_t kDoubleSize
Definition globals.h:456
UnconditionalBranchRegOp
UnconditionalBranchOp
@ UnconditionalBranchMask
@ UnconditionalBranchFixed
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
dest
Definition zip.py:79
#define DISALLOW_ALLOCATION()
Definition globals.h:604
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition globals.h:581
int32_t width
const Scalar scale
Point offset