Flutter Engine
The Flutter Engine
assembler_arm64.h
Go to the documentation of this file.
1// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM64_H_
6#define RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM64_H_
7
8#if defined(DART_PRECOMPILED_RUNTIME)
9#error "AOT runtime should not use compiler sources (including header files)"
10#endif // defined(DART_PRECOMPILED_RUNTIME)
11
12#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_H_
13#error Do not include assembler_arm64.h directly; use assembler.h instead.
14#endif
15
16#include <functional>
17
18#include "platform/assert.h"
19#include "platform/utils.h"
20#include "vm/class_id.h"
22#include "vm/constants.h"
23#include "vm/hash_map.h"
24#include "vm/simulator.h"
25
26namespace dart {
27
28// Forward declarations.
29class FlowGraphCompiler;
30class RuntimeEntry;
31class RegisterSet;
32
33namespace compiler {
34
35static inline int Log2OperandSizeBytes(OperandSize os) {
36 switch (os) {
37 case kByte:
38 case kUnsignedByte:
39 return 0;
40 case kTwoBytes:
42 return 1;
43 case kFourBytes:
45 case kSWord:
46 return 2;
47 case kEightBytes:
48 case kDWord:
49 return 3;
50 case kQWord:
51 return 4;
52 default:
54 break;
55 }
56 return -1;
57}
58
59static inline bool IsSignedOperand(OperandSize os) {
60 switch (os) {
61 case kByte:
62 case kTwoBytes:
63 case kFourBytes:
64 return true;
65 case kUnsignedByte:
68 case kEightBytes:
69 case kSWord:
70 case kDWord:
71 case kQWord:
72 return false;
73 default:
75 break;
76 }
77 return false;
78}
79class Immediate : public ValueObject {
80 public:
81 explicit Immediate(int64_t value) : value_(value) {}
82
83 Immediate(const Immediate& other) : ValueObject(), value_(other.value_) {}
84 Immediate& operator=(const Immediate& other) {
85 value_ = other.value_;
86 return *this;
87 }
88
89 private:
90 int64_t value_;
91
92 int64_t value() const { return value_; }
93
94 friend class Assembler;
95};
96
97class Arm64Encode : public AllStatic {
98 public:
99 static inline uint32_t Rd(Register rd) {
100 ASSERT(rd <= ZR);
101 return static_cast<uint32_t>(ConcreteRegister(rd)) << kRdShift;
102 }
103
104 static inline uint32_t Rm(Register rm) {
105 ASSERT(rm <= ZR);
106 return static_cast<uint32_t>(ConcreteRegister(rm)) << kRmShift;
107 }
108
109 static inline uint32_t Rn(Register rn) {
110 ASSERT(rn <= ZR);
111 return static_cast<uint32_t>(ConcreteRegister(rn)) << kRnShift;
112 }
113
114 static inline uint32_t Ra(Register ra) {
115 ASSERT(ra <= ZR);
116 return static_cast<uint32_t>(ConcreteRegister(ra)) << kRaShift;
117 }
118
119 static inline uint32_t Rs(Register rs) {
120 ASSERT(rs <= ZR);
121 return static_cast<uint32_t>(ConcreteRegister(rs)) << kRsShift;
122 }
123
124 static inline uint32_t Rt(Register rt) {
125 ASSERT(rt <= ZR);
126 return static_cast<uint32_t>(ConcreteRegister(rt)) << kRtShift;
127 }
128
129 static inline uint32_t Rt2(Register rt2) {
130 ASSERT(rt2 <= ZR);
131 return static_cast<uint32_t>(ConcreteRegister(rt2)) << kRt2Shift;
132 }
133};
134
135class Address : public ValueObject {
136 public:
137 Address(const Address& other)
138 : ValueObject(),
139 type_(other.type_),
140 base_(other.base_),
141 offset_(other.offset_) {}
142
143 Address& operator=(const Address& other) {
144 type_ = other.type_;
145 base_ = other.base_;
146 offset_ = other.offset_;
147 return *this;
148 }
149
151 Offset,
152 PreIndex,
153 PostIndex,
160 };
161
162 // If we are doing pre-/post-indexing, and the base and result registers are
163 // the same, then the result is unpredictable. This kind of instruction is
164 // actually illegal on some microarchitectures.
166 if (type() == PreIndex || type() == PostIndex || type() == PairPreIndex ||
167 type() == PairPostIndex) {
168 return ConcreteRegister(base()) != ConcreteRegister(r);
169 }
170 return true;
171 }
172
173 // Offset is in bytes.
174 explicit Address(Register rn, int32_t offset = 0, AddressType at = Offset) {
175 ASSERT((rn != kNoRegister) && (rn != R31) && (rn != ZR));
176 type_ = at;
177 base_ = rn;
178 offset_ = offset;
179 }
180
181 // This addressing mode does not exist.
182 Address(Register rn, Register offset, AddressType at) = delete;
183
184 static bool CanHoldOffset(int32_t offset,
185 AddressType at = Offset,
187 if (at == Offset) {
188 // Offset fits in 12 bit unsigned and has right alignment for sz,
189 // or fits in 9 bit signed offset with no alignment restriction.
190 const int32_t scale = Log2OperandSizeBytes(sz);
191 return (Utils::IsUint(12 + scale, offset) &&
192 (offset == ((offset >> scale) << scale))) ||
193 (Utils::IsInt(9, offset));
194 } else if (at == PCOffset) {
195 return Utils::IsInt(21, offset) && (offset == ((offset >> 2) << 2));
196 } else if ((at == PreIndex) || (at == PostIndex)) {
197 return Utils::IsInt(9, offset);
198 } else {
199 ASSERT((at == PairOffset) || (at == PairPreIndex) ||
200 (at == PairPostIndex));
201 const int32_t scale = Log2OperandSizeBytes(sz);
202 return (Utils::IsInt(7 + scale, offset) &&
203 (static_cast<uint32_t>(offset) ==
204 ((static_cast<uint32_t>(offset) >> scale) << scale)));
205 }
206 }
207
208 // PC-relative load address.
209 static Address PC(int32_t pc_off) {
210 ASSERT(CanHoldOffset(pc_off, PCOffset));
212 addr.base_ = kNoRegister;
213 addr.type_ = PCOffset;
214 addr.offset_ = pc_off;
215 return addr;
216 }
217
219 int32_t offset = 0,
220 AddressType at = PairOffset) {
221 return Address(rn, offset, at);
222 }
223
224 // This addressing mode does not exist.
225 static Address PC(Register r) = delete;
226
227 enum Scaling {
230 };
231
232 // Base register rn with offset rm. rm is sign-extended according to ext.
233 // If ext is UXTX, rm may be optionally scaled by the
234 // Log2OperandSize (specified by the instruction).
236 Register rm,
237 Extend ext = UXTX,
239 ASSERT((rn != R31) && (rn != ZR));
240 ASSERT((rm != R31) && (rm != CSP));
241 // Can only scale when ext = UXTX.
242 ASSERT((scale != Scaled) || (ext == UXTX));
243 ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
244 type_ = Reg;
245 base_ = rn;
246 // Use offset_ to store pre-encoded scale, extend and rm.
247 offset_ = ((scale == Scaled) ? B12 : 0) | Arm64Encode::Rm(rm) |
248 (static_cast<int32_t>(ext) << kExtendTypeShift);
249 }
250
252
253 private:
254 uint32_t encoding(OperandSize sz) const {
255 const int32_t offset = offset_;
256 const int32_t scale = Log2OperandSizeBytes(sz);
257 ASSERT((type_ == Reg) || CanHoldOffset(offset, type_, sz));
258 switch (type_) {
259 case Offset:
260 if (Utils::IsUint(12 + scale, offset) &&
261 (offset == ((offset >> scale) << scale))) {
262 return B24 | ((offset >> scale) << kImm12Shift) |
263 Arm64Encode::Rn(base_);
264 } else if (Utils::IsInt(9, offset)) {
265 return ((offset & 0x1ff) << kImm9Shift) | Arm64Encode::Rn(base_);
266 } else {
267 FATAL("Offset %d is out of range\n", offset);
268 }
269 case PreIndex:
270 case PostIndex: {
271 ASSERT(Utils::IsInt(9, offset));
272 int32_t idx = (type_ == PostIndex) ? B10 : (B11 | B10);
273 return idx | ((offset & 0x1ff) << kImm9Shift) | Arm64Encode::Rn(base_);
274 }
275 case PairOffset:
276 case PairPreIndex:
277 case PairPostIndex: {
278 ASSERT(Utils::IsInt(7 + scale, offset) &&
279 (static_cast<uint32_t>(offset) ==
280 ((static_cast<uint32_t>(offset) >> scale) << scale)));
281 int32_t idx = 0;
282 switch (type_) {
283 case PairPostIndex:
284 idx = B23;
285 break;
286 case PairPreIndex:
287 idx = B24 | B23;
288 break;
289 case PairOffset:
290 idx = B24;
291 break;
292 default:
293 UNREACHABLE();
294 break;
295 }
296 return idx |
297 ((static_cast<uint32_t>(offset >> scale) << kImm7Shift) &
298 kImm7Mask) |
299 Arm64Encode::Rn(base_);
300 }
301 case PCOffset:
302 return (((offset >> 2) << kImm19Shift) & kImm19Mask);
303 case Reg:
304 // Offset contains pre-encoded scale, extend and rm.
305 return B21 | B11 | Arm64Encode::Rn(base_) | offset;
306 case Unknown:
307 UNREACHABLE();
308 }
309 return 0;
310 }
311
312 AddressType type() const { return type_; }
313 Register base() const { return base_; }
314 int32_t offset() const { return offset_; }
315
316 Address() : type_(Unknown), base_(kNoRegister), offset_(0) {}
317
318 AddressType type_;
319 Register base_;
320 int32_t offset_;
321
322 friend class Assembler;
323};
324
325class FieldAddress : public Address {
326 public:
327 static bool CanHoldOffset(int32_t offset,
328 AddressType at = Offset,
331 }
332
334 : Address(base, disp - kHeapObjectTag) {}
335
336 // This addressing mode does not exist.
338
339 FieldAddress(const FieldAddress& other) : Address(other) {}
340
342 Address::operator=(other);
343 return *this;
344 }
345};
346
347class Operand : public ValueObject {
348 public:
355 };
356
357 // Data-processing operand - Uninitialized.
358 Operand() : encoding_(-1), type_(Unknown) {}
359
360 // Data-processing operands - Copy constructor.
361 Operand(const Operand& other)
362 : ValueObject(), encoding_(other.encoding_), type_(other.type_) {}
363
364 Operand& operator=(const Operand& other) {
365 type_ = other.type_;
366 encoding_ = other.encoding_;
367 return *this;
368 }
369
370 explicit Operand(Register rm) {
371 ASSERT((rm != R31) && (rm != CSP));
372 encoding_ = Arm64Encode::Rm(rm);
373 type_ = Shifted;
374 }
375
376 Operand(Register rm, Shift shift, int32_t imm) {
377 ASSERT(Utils::IsUint(6, imm));
378 ASSERT((rm != R31) && (rm != CSP));
379 encoding_ = (imm << kImm6Shift) | Arm64Encode::Rm(rm) |
380 (static_cast<int32_t>(shift) << kShiftTypeShift);
381 type_ = Shifted;
382 }
383
384 // This operand type does not exist.
386
387 Operand(Register rm, Extend extend, int32_t imm) {
388 ASSERT(Utils::IsUint(3, imm));
389 ASSERT((rm != R31) && (rm != CSP));
390 encoding_ = B21 | Arm64Encode::Rm(rm) |
391 (static_cast<int32_t>(extend) << kExtendTypeShift) |
392 ((imm & 0x7) << kImm3Shift);
393 type_ = Extended;
394 }
395
396 // This operand type does not exist.
398
399 explicit Operand(int32_t imm) {
400 if (Utils::IsUint(12, imm)) {
401 encoding_ = imm << kImm12Shift;
402 } else {
403 // imm only has bits in [12, 24) set.
404 ASSERT(((imm & 0xfff) == 0) && (Utils::IsUint(12, imm >> 12)));
405 encoding_ = B22 | ((imm >> 12) << kImm12Shift);
406 }
407 type_ = Immediate;
408 }
409
410 // Encodes the value of an immediate for a logical operation.
411 // Since these values are difficult to craft by hand, instead pass the
412 // logical mask to the function IsImmLogical to get n, imm_s, and
413 // imm_r. Takes s before r like DecodeBitMasks from Appendix G but unlike
414 // the disassembly of the *bfm instructions.
415 Operand(uint8_t n, int8_t imm_s, int8_t imm_r) {
416 ASSERT((n == 1) || (n == 0));
417 ASSERT(Utils::IsUint(6, imm_s) && Utils::IsUint(6, imm_r));
418 type_ = BitfieldImm;
419 encoding_ = (static_cast<int32_t>(n) << kNShift) |
420 (static_cast<int32_t>(imm_s) << kImmSShift) |
421 (static_cast<int32_t>(imm_r) << kImmRShift);
422 }
423
424 // Test if a given value can be encoded in the immediate field of a logical
425 // instruction.
426 // If it can be encoded, the function returns true, and values pointed to by
427 // n, imm_s and imm_r are updated with immediates encoded in the format
428 // required by the corresponding fields in the logical instruction.
429 // If it can't be encoded, the function returns false, and the operand is
430 // undefined.
431 static bool IsImmLogical(uint64_t value, uint8_t width, Operand* imm_op);
432
433 // An immediate imm can be an operand to add/sub when the return value is
434 // Immediate, or a logical operation over sz bits when the return value is
435 // BitfieldImm. If the return value is Unknown, then the immediate can't be
436 // used as an operand in either instruction. The encoded operand is written
437 // to op.
438 static OperandType CanHold(int64_t imm, uint8_t sz, Operand* op) {
439 ASSERT(op != nullptr);
440 ASSERT((sz == kXRegSizeInBits) || (sz == kWRegSizeInBits));
441 if (Utils::IsUint(12, imm)) {
442 op->encoding_ = imm << kImm12Shift;
443 op->type_ = Immediate;
444 } else if (((imm & 0xfff) == 0) && (Utils::IsUint(12, imm >> 12))) {
445 op->encoding_ = B22 | ((imm >> 12) << kImm12Shift);
446 op->type_ = Immediate;
447 } else if (IsImmLogical(imm, sz, op)) {
448 op->type_ = BitfieldImm;
449 } else {
450 op->encoding_ = 0;
451 op->type_ = Unknown;
452 }
453 return op->type_;
454 }
455
456 private:
457 uint32_t encoding() const { return encoding_; }
458 OperandType type() const { return type_; }
459
460 uint32_t encoding_;
461 OperandType type_;
462
463 friend class Assembler;
464};
465
466class Assembler : public AssemblerBase {
467 public:
469 intptr_t far_branch_level = 0);
471
472 void PushRegister(Register r) { Push(r); }
473 void PopRegister(Register r) { Pop(r); }
474
476
477 void PushRegisterPair(Register r0, Register r1) { PushPair(r0, r1); }
478 void PopRegisterPair(Register r0, Register r1) { PopPair(r0, r1); }
479
480 void PushRegisters(const RegisterSet& registers);
481 void PopRegisters(const RegisterSet& registers);
482
483 void PushRegistersInOrder(std::initializer_list<Register> regs);
484
485 // Push all registers which are callee-saved according to the ARM64 ABI.
487
488 // Pop all registers which are callee-saved according to the ARM64 ABI.
490
491 void ExtendValue(Register rd, Register rn, OperandSize sz) override;
493 Register rn,
494 OperandSize sz = kEightBytes) override;
495
496 void Drop(intptr_t stack_elements) {
497 ASSERT(stack_elements >= 0);
498 if (stack_elements > 0) {
499 AddImmediate(SP, SP, stack_elements * target::kWordSize);
500 }
501 }
502
504
505 void Align(intptr_t alignment, intptr_t offset);
506
507 void Bind(Label* label) override;
508 // Unconditional jump to a given label. [distance] is ignored on ARM.
509 void Jump(Label* label, JumpDistance distance = kFarJump) { b(label); }
510 // Unconditional jump to a given address in register.
512 // Unconditional jump to a given address in memory. Clobbers TMP.
513 void Jump(const Address& address) {
514 ldr(TMP, address);
515 br(TMP);
516 }
517
520 }
523 }
524
527
529 const Address& address,
530 OperandSize size = kEightBytes) override {
531 // ldar does not feature an address operand.
532 ASSERT(address.type() == Address::AddressType::Offset);
533 Register src = address.base();
534 if (address.offset() != 0) {
535 AddImmediate(TMP2, src, address.offset());
536 src = TMP2;
537 }
538 ldar(dst, src, size);
539 if (FLAG_target_thread_sanitizer) {
541 }
542 }
543
544#if defined(DART_COMPRESSED_POINTERS)
545 void LoadAcquireCompressed(Register dst, const Address& address) override {
546 LoadAcquire(dst, address, kObjectBytes);
547 add(dst, dst, Operand(HEAP_BITS, LSL, 32));
548 }
549#endif
550
552 const Address& address,
553 OperandSize size = kEightBytes) override {
554 // stlr does not feature an address operand.
555 ASSERT(address.type() == Address::AddressType::Offset);
556 Register dst = address.base();
557 if (address.offset() != 0) {
558 AddImmediate(TMP2, dst, address.offset());
559 dst = TMP2;
560 }
561 stlr(src, dst, size);
562 if (FLAG_target_thread_sanitizer) {
564 }
565 }
566
568 Address address,
569 OperandSize sz = kEightBytes) override {
570 Load(TMP, address, sz);
571 cmp(value, Operand(TMP), sz);
572 }
573
574 bool use_far_branches() const {
575 return FLAG_use_far_branches || use_far_branches_;
576 }
577
578 void set_use_far_branches(bool b) { use_far_branches_ = b; }
579
580 // Debugging and bringup support.
581 void Breakpoint() override { brk(0); }
582
584 if (prologue_offset_ == -1) {
586 }
587 }
588
589 void ReserveAlignedFrameSpace(intptr_t frame_space);
590
591 // In debug mode, this generates code to check that:
592 // FP + kExitLinkSlotFromEntryFp == SP
593 // or triggers breakpoint otherwise.
595
596 // Instruction pattern from entrypoint is used in Dart frame prologs
597 // to set up the frame and save a PC which can be used to figure out the
598 // RawInstruction object corresponding to the code running in the frame.
599 static constexpr intptr_t kEntryPointToPcMarkerOffset = 0;
600 static intptr_t EntryPointToPcMarkerOffset() {
602 }
603
604 // Emit data (e.g encoded instruction or immediate) in instruction stream.
605 void Emit(int32_t value);
606 void Emit64(int64_t value);
607
608 // On some other platforms, we draw a distinction between safe and unsafe
609 // smis.
610 static bool IsSafe(const Object& object) { return true; }
611 static bool IsSafeSmi(const Object& object) { return target::IsSmi(object); }
612
613 // Addition and subtraction.
614 // For add and sub, to use CSP for rn, o must be of type Operand::Extend.
615 // For an unmodified rm in this case, use Operand(rm, UXTX, 0);
617 AddSubHelper(sz, false, false, rd, rn, o);
618 }
620 AddSubHelper(sz, true, false, rd, rn, o);
621 }
623 AddSubHelper(sz, false, true, rd, rn, o);
624 }
626 AddSubHelper(sz, true, true, rd, rn, o);
627 }
628 void addw(Register rd, Register rn, Operand o) { add(rd, rn, o, kFourBytes); }
629 void addsw(Register rd, Register rn, Operand o) {
630 adds(rd, rn, o, kFourBytes);
631 }
632 void subw(Register rd, Register rn, Operand o) { sub(rd, rn, o, kFourBytes); }
633 void subsw(Register rd, Register rn, Operand o) {
634 subs(rd, rn, o, kFourBytes);
635 }
636
637 // Addition and subtraction with carry.
638 void adc(Register rd, Register rn, Register rm) {
639 AddSubWithCarryHelper(kEightBytes, false, false, rd, rn, rm);
640 }
641 void adcs(Register rd, Register rn, Register rm) {
642 AddSubWithCarryHelper(kEightBytes, true, false, rd, rn, rm);
643 }
644 void adcw(Register rd, Register rn, Register rm) {
645 AddSubWithCarryHelper(kFourBytes, false, false, rd, rn, rm);
646 }
647 void adcsw(Register rd, Register rn, Register rm) {
648 AddSubWithCarryHelper(kFourBytes, true, false, rd, rn, rm);
649 }
650 void sbc(Register rd, Register rn, Register rm) {
651 AddSubWithCarryHelper(kEightBytes, false, true, rd, rn, rm);
652 }
653 void sbcs(Register rd, Register rn, Register rm) {
654 AddSubWithCarryHelper(kEightBytes, true, true, rd, rn, rm);
655 }
656 void sbcw(Register rd, Register rn, Register rm) {
657 AddSubWithCarryHelper(kFourBytes, false, true, rd, rn, rm);
658 }
659 void sbcsw(Register rd, Register rn, Register rm) {
660 AddSubWithCarryHelper(kFourBytes, true, true, rd, rn, rm);
661 }
662
663 // PC relative immediate add. imm is in bytes.
664 void adr(Register rd, const Immediate& imm) { EmitPCRelOp(ADR, rd, imm); }
665
666 // Bitfield operations.
667 // Bitfield move.
668 // If s >= r then Rd[s-r:0] := Rn[s:r], else Rd[bitwidth+s-r:bitwidth-r] :=
669 // Rn[s:0].
670 void bfm(Register rd,
671 Register rn,
672 int r_imm,
673 int s_imm,
675 EmitBitfieldOp(BFM, rd, rn, r_imm, s_imm, size);
676 }
677
678 // Signed bitfield move.
679 void sbfm(Register rd,
680 Register rn,
681 int r_imm,
682 int s_imm,
684 EmitBitfieldOp(SBFM, rd, rn, r_imm, s_imm, size);
685 }
686
687 // Unsigned bitfield move.
688 void ubfm(Register rd,
689 Register rn,
690 int r_imm,
691 int s_imm,
693 EmitBitfieldOp(UBFM, rd, rn, r_imm, s_imm, size);
694 }
695
696 // Bitfield insert. Takes the low width bits and replaces bits in rd with
697 // them, starting at low_bit.
698 void bfi(Register rd,
699 Register rn,
700 int low_bit,
701 int width,
703 int wordsize = size == kEightBytes ? 64 : 32;
704 EmitBitfieldOp(BFM, rd, rn, -low_bit & (wordsize - 1), width - 1, size);
705 }
706
707 // Bitfield extract and insert low. Takes width bits, starting at low_bit and
708 // replaces the low width bits of rd with them.
710 Register rn,
711 int low_bit,
712 int width,
714 EmitBitfieldOp(BFM, rd, rn, low_bit, low_bit + width - 1, size);
715 }
716
717 // Signed bitfield insert in zero. Takes the low width bits, sign extends
718 // them and writes them to rd, starting at low_bit, and zeroing bits below
719 // that.
721 Register rn,
722 int low_bit,
723 int width,
725 int wordsize = size == kEightBytes ? 64 : 32;
726 EmitBitfieldOp(SBFM, rd, rn, (wordsize - low_bit) & (wordsize - 1),
727 width - 1, size);
728 }
729
730 // Signed bitfield extract. Takes width bits, starting at low_bit, sign
731 // extends them and writes them to rd, starting at the lowest bit.
732 void sbfx(Register rd,
733 Register rn,
734 int low_bit,
735 int width,
737 EmitBitfieldOp(SBFM, rd, rn, low_bit, low_bit + width - 1, size);
738 }
739
740 // Unsigned bitfield insert in zero. Takes the low width bits and writes
741 // them to rd, starting at low_bit, and zeroing bits above and below.
743 Register rn,
744 int low_bit,
745 int width,
747 int wordsize = size == kEightBytes ? 64 : 32;
748 ASSERT(width > 0);
749 ASSERT(low_bit < wordsize);
750 EmitBitfieldOp(UBFM, rd, rn, (-low_bit) & (wordsize - 1), width - 1, size);
751 }
752
753 // Unsigned bitfield extract. Takes the width bits, starting at low_bit and
754 // writes them to the low bits of rd zeroing bits above.
755 void ubfx(Register rd,
756 Register rn,
757 int low_bit,
758 int width,
760 EmitBitfieldOp(UBFM, rd, rn, low_bit, low_bit + width - 1, size);
761 }
762
763 // Sign extend byte->64 bit.
764 void sxtb(Register rd, Register rn) {
765 EmitBitfieldOp(SBFM, rd, rn, 0, 7, kEightBytes);
766 }
767
768 // Sign extend halfword->64 bit.
769 void sxth(Register rd, Register rn) {
770 EmitBitfieldOp(SBFM, rd, rn, 0, 15, kEightBytes);
771 }
772
773 // Sign extend word->64 bit.
774 void sxtw(Register rd, Register rn) {
775 EmitBitfieldOp(SBFM, rd, rn, 0, 31, kEightBytes);
776 }
777
778 // Zero/unsigned extend byte->64 bit.
779 void uxtb(Register rd, Register rn) {
780 EmitBitfieldOp(UBFM, rd, rn, 0, 7, kEightBytes);
781 }
782
783 // Zero/unsigned extend halfword->64 bit.
784 void uxth(Register rd, Register rn) {
785 EmitBitfieldOp(UBFM, rd, rn, 0, 15, kEightBytes);
786 }
787
788 // Zero/unsigned extend word->64 bit.
789 void uxtw(Register rd, Register rn) {
790 EmitBitfieldOp(UBFM, rd, rn, 0, 31, kEightBytes);
791 }
792
793 // Logical immediate operations.
794 void andi(Register rd,
795 Register rn,
796 const Immediate& imm,
798 ASSERT(sz == kEightBytes || sz == kFourBytes);
800 Operand imm_op;
801 const bool immok = Operand::IsImmLogical(imm.value(), width, &imm_op);
802 ASSERT(immok);
803 EmitLogicalImmOp(ANDI, rd, rn, imm_op, sz);
804 }
805 void orri(Register rd,
806 Register rn,
807 const Immediate& imm,
809 ASSERT(sz == kEightBytes || sz == kFourBytes);
811 Operand imm_op;
812 const bool immok = Operand::IsImmLogical(imm.value(), width, &imm_op);
813 ASSERT(immok);
814 EmitLogicalImmOp(ORRI, rd, rn, imm_op, sz);
815 }
816 void eori(Register rd,
817 Register rn,
818 const Immediate& imm,
820 ASSERT(sz == kEightBytes || sz == kFourBytes);
822 Operand imm_op;
823 const bool immok = Operand::IsImmLogical(imm.value(), width, &imm_op);
824 ASSERT(immok);
825 EmitLogicalImmOp(EORI, rd, rn, imm_op, sz);
826 }
828 Register rn,
829 const Immediate& imm,
831 ASSERT(sz == kEightBytes || sz == kFourBytes);
833 Operand imm_op;
834 const bool immok = Operand::IsImmLogical(imm.value(), width, &imm_op);
835 ASSERT(immok);
836 EmitLogicalImmOp(ANDIS, rd, rn, imm_op, sz);
837 }
838
839 // Logical (shifted) register operations.
841 EmitLogicalShiftOp(AND, rd, rn, o, sz);
842 }
844 EmitLogicalShiftOp(BIC, rd, rn, o, sz);
845 }
847 EmitLogicalShiftOp(ORR, rd, rn, o, sz);
848 }
850 EmitLogicalShiftOp(ORN, rd, rn, o, sz);
851 }
853 EmitLogicalShiftOp(EOR, rd, rn, o, sz);
854 }
856 EmitLogicalShiftOp(EON, rd, rn, o, sz);
857 }
859 EmitLogicalShiftOp(ANDS, rd, rn, o, sz);
860 }
862 EmitLogicalShiftOp(BICS, rd, rn, o, sz);
863 }
864 void andw_(Register rd, Register rn, Operand o) {
865 and_(rd, rn, o, kFourBytes);
866 }
867 void orrw(Register rd, Register rn, Operand o) { orr(rd, rn, o, kFourBytes); }
868 void ornw(Register rd, Register rn, Operand o) { orn(rd, rn, o, kFourBytes); }
869 void eorw(Register rd, Register rn, Operand o) { eor(rd, rn, o, kFourBytes); }
870
871 // Count leading zero bits.
872 void clz(Register rd, Register rn) {
873 EmitMiscDP1Source(CLZ, rd, rn, kEightBytes);
874 }
875 void clzw(Register rd, Register rn) {
876 EmitMiscDP1Source(CLZ, rd, rn, kFourBytes);
877 }
878
879 // Reverse bits.
880 void rbit(Register rd, Register rn) {
881 EmitMiscDP1Source(RBIT, rd, rn, kEightBytes);
882 }
883
884 // Misc. arithmetic.
885 void udiv(Register rd,
886 Register rn,
887 Register rm,
889 EmitMiscDP2Source(UDIV, rd, rn, rm, sz);
890 }
891 void sdiv(Register rd,
892 Register rn,
893 Register rm,
895 EmitMiscDP2Source(SDIV, rd, rn, rm, sz);
896 }
897 void lslv(Register rd,
898 Register rn,
899 Register rm,
901 EmitMiscDP2Source(LSLV, rd, rn, rm, sz);
902 }
903 void lsrv(Register rd,
904 Register rn,
905 Register rm,
907 EmitMiscDP2Source(LSRV, rd, rn, rm, sz);
908 }
909 void asrv(Register rd,
910 Register rn,
911 Register rm,
913 EmitMiscDP2Source(ASRV, rd, rn, rm, sz);
914 }
915 void sdivw(Register rd, Register rn, Register rm) {
916 sdiv(rd, rn, rm, kFourBytes);
917 }
918 void lslvw(Register rd, Register rn, Register rm) {
919 lslv(rd, rn, rm, kFourBytes);
920 }
921 void lsrvw(Register rd, Register rn, Register rm) {
922 lsrv(rd, rn, rm, kFourBytes);
923 }
924 void asrvw(Register rd, Register rn, Register rm) {
925 asrv(rd, rn, rm, kFourBytes);
926 }
927 void madd(Register rd,
928 Register rn,
929 Register rm,
930 Register ra,
932 EmitMiscDP3Source(MADD, rd, rn, rm, ra, sz);
933 }
934 void msub(Register rd,
935 Register rn,
936 Register rm,
937 Register ra,
939 EmitMiscDP3Source(MSUB, rd, rn, rm, ra, sz);
940 }
941 // Signed Multiply High
942 // rd <- (rn * rm)[127:64]
944 Register rn,
945 Register rm,
947 EmitMiscDP3Source(SMULH, rd, rn, rm, R31, sz);
948 }
949 // Unsigned Multiply High
950 // rd <- (rn * rm)[127:64]
952 Register rn,
953 Register rm,
955 EmitMiscDP3Source(UMULH, rd, rn, rm, R31, sz);
956 }
958 Register rn,
959 Register rm,
960 Register ra,
962 EmitMiscDP3Source(UMADDL, rd, rn, rm, ra, sz);
963 }
964 // Unsigned Multiply Long
965 // rd:uint64 <- rn:uint32 * rm:uint32
967 Register rn,
968 Register rm,
970 EmitMiscDP3Source(UMADDL, rd, rn, rm, ZR, sz);
971 }
973 Register rn,
974 Register rm,
975 Register ra,
977 EmitMiscDP3Source(SMADDL, rd, rn, rm, ra, sz);
978 }
979 // Signed Multiply Long
980 // rd:int64 <- rn:int32 * rm:int32
982 Register rn,
983 Register rm,
985 EmitMiscDP3Source(SMADDL, rd, rn, rm, ZR, sz);
986 }
987
988 // Move wide immediate.
989 void movk(Register rd, const Immediate& imm, int hw_idx) {
990 ASSERT(rd != CSP);
991 const Register crd = ConcreteRegister(rd);
992 EmitMoveWideOp(MOVK, crd, imm, hw_idx, kEightBytes);
993 }
994 void movn(Register rd, const Immediate& imm, int hw_idx) {
995 ASSERT(rd != CSP);
996 const Register crd = ConcreteRegister(rd);
997 EmitMoveWideOp(MOVN, crd, imm, hw_idx, kEightBytes);
998 }
999 void movz(Register rd, const Immediate& imm, int hw_idx) {
1000 ASSERT(rd != CSP);
1001 const Register crd = ConcreteRegister(rd);
1002 EmitMoveWideOp(MOVZ, crd, imm, hw_idx, kEightBytes);
1003 }
1004
1005 // Loads and Stores.
1007 ASSERT((rt != CSP) && (rt != R31));
1008 ASSERT((a.type() != Address::PairOffset) &&
1009 (a.type() != Address::PairPostIndex) &&
1010 (a.type() != Address::PairPreIndex));
1011 if (a.type() == Address::PCOffset) {
1012 ASSERT(sz == kEightBytes);
1013 EmitLoadRegLiteral(LDRpc, rt, a, sz);
1014 } else {
1015 if (IsSignedOperand(sz)) {
1016 EmitLoadStoreReg(LDRS, rt, a, sz);
1017 } else {
1018 EmitLoadStoreReg(LDR, rt, a, sz);
1019 }
1020 }
1021 }
1023 ASSERT((rt != CSP) && (rt != R31));
1024 ASSERT((a.type() != Address::PairOffset) &&
1025 (a.type() != Address::PairPostIndex) &&
1026 (a.type() != Address::PairPreIndex));
1027 EmitLoadStoreReg(STR, rt, a, sz);
1028 }
1029
1031 ASSERT((rt != CSP) && (rt != R31));
1032 ASSERT((a.type() == Address::PairOffset) ||
1033 (a.type() == Address::PairPostIndex) ||
1034 (a.type() == Address::PairPreIndex));
1035 EmitLoadStoreRegPair(LDP, rt, rt2, a, sz);
1036 }
1038 ASSERT((rt != CSP) && (rt != R31));
1039 ASSERT((a.type() == Address::PairOffset) ||
1040 (a.type() == Address::PairPostIndex) ||
1041 (a.type() == Address::PairPreIndex));
1042 EmitLoadStoreRegPair(STP, rt, rt2, a, sz);
1043 }
1045 ASSERT((a.type() == Address::PairOffset) ||
1046 (a.type() == Address::PairPostIndex) ||
1047 (a.type() == Address::PairPreIndex));
1048 EmitLoadStoreVRegPair(FLDP, rt, rt2, a, sz);
1049 }
1051 ASSERT((a.type() == Address::PairOffset) ||
1052 (a.type() == Address::PairPostIndex) ||
1053 (a.type() == Address::PairPreIndex));
1054 EmitLoadStoreVRegPair(FSTP, rt, rt2, a, sz);
1055 }
1056
1058 // rt = value
1059 // rn = address
1060 EmitLoadStoreExclusive(LDXR, R31, rn, rt, size);
1061 }
1063 Register rt,
1064 Register rn,
1066 // rs = status (1 = failure, 0 = success)
1067 // rt = value
1068 // rn = address
1069 ASSERT(rs != rt);
1070 ASSERT((rs != rn) || (rs == ZR));
1071 EmitLoadStoreExclusive(STXR, rs, rn, rt, size);
1072 }
1073 void clrex() {
1074 const int32_t encoding = static_cast<int32_t>(CLREX);
1075 Emit(encoding);
1076 }
1077
1079 EmitLoadStoreExclusive(LDAR, R31, rn, rt, sz);
1080 }
1081
1083 EmitLoadStoreExclusive(STLR, R31, rn, rt, sz);
1084 }
1085
1087 Register rt,
1088 Register rn,
1089 OperandSize sz = kEightBytes) {
1090 // rs = value in
1091 // rt = value out
1092 // rn = address
1093 EmitAtomicMemory(LDCLR, rs, rn, rt, sz);
1094 }
1096 Register rt,
1097 Register rn,
1098 OperandSize sz = kEightBytes) {
1099 // rs = value in
1100 // rt = value out
1101 // rn = address
1102 EmitAtomicMemory(LDSET, rs, rn, rt, sz);
1103 }
1104
1105 // Conditional select.
1106 void csel(Register rd, Register rn, Register rm, Condition cond) {
1107 EmitConditionalSelect(CSEL, rd, rn, rm, cond, kEightBytes);
1108 }
1110 Register rn,
1111 Register rm,
1112 Condition cond,
1113 OperandSize sz = kEightBytes) {
1114 EmitConditionalSelect(CSINC, rd, rn, rm, cond, sz);
1115 }
1116 void cinc(Register rd, Register rn, Condition cond) {
1117 csinc(rd, rn, rn, InvertCondition(cond));
1118 }
1119 void cset(Register rd, Condition cond) {
1120 csinc(rd, ZR, ZR, InvertCondition(cond));
1121 }
1122 void csinv(Register rd, Register rn, Register rm, Condition cond) {
1123 EmitConditionalSelect(CSINV, rd, rn, rm, cond, kEightBytes);
1124 }
1125 void cinv(Register rd, Register rn, Condition cond) {
1126 csinv(rd, rn, rn, InvertCondition(cond));
1127 }
1128 void csetm(Register rd, Condition cond) {
1129 csinv(rd, ZR, ZR, InvertCondition(cond));
1130 }
1131 void csneg(Register rd, Register rn, Register rm, Condition cond) {
1132 EmitConditionalSelect(CSNEG, rd, rn, rm, cond, kEightBytes);
1133 }
1134 void cneg(Register rd, Register rn, Condition cond) {
1135 EmitConditionalSelect(CSNEG, rd, rn, rn, InvertCondition(cond),
1136 kEightBytes);
1137 }
1138
1139 // Comparison.
1140 // rn cmp o.
1141 // For add and sub, to use CSP for rn, o must be of type Operand::Extend.
1142 // For an unmodified rm in this case, use Operand(rm, UXTX, 0);
1144 subs(ZR, rn, o, sz);
1145 }
1146 void cmpw(Register rn, Operand o) { cmp(rn, o, kFourBytes); }
1147 // rn cmp -o.
1149 adds(ZR, rn, o, sz);
1150 }
1151
1153 if (rn == CSP) {
1154 // UXTX 0 on a 64-bit register (rm) is a nop, but forces R31 to be
1155 // interpreted as CSP.
1156 cmp(CSP, Operand(rm, UXTX, 0));
1157 } else {
1158 cmp(rn, Operand(rm));
1159 }
1160 }
1161
1163 ASSERT(rn != CSP);
1164 cmp(rn, Operand(rm), kObjectBytes);
1165 }
1166
1167 // Conditional branch.
1168 void b(Label* label, Condition cond = AL) {
1169 if (cond == AL) {
1170 EmitUnconditionalBranch(B, label);
1171 } else {
1172 EmitConditionalBranch(BCOND, cond, label);
1173 }
1174 }
1175
1176 void b(int32_t offset) { EmitUnconditionalBranchOp(B, offset); }
1177 void bl(int32_t offset) {
1178 // CLOBBERS_LR uses __ to access the assembler.
1179#define __ this->
1180 CLOBBERS_LR(EmitUnconditionalBranchOp(BL, offset));
1181#undef __
1182 }
1183
1184 // Branches to the given label if the condition holds.
1185 // [distance] is ignored on ARM.
1186 void BranchIf(Condition condition,
1187 Label* label,
1189 b(label, condition);
1190 }
1192 Label* label,
1194 cbz(label, rn);
1195 }
1197 intptr_t bit_number,
1198 Condition condition,
1199 Label* label,
1201 if (condition == ZERO) {
1202 tbz(label, rn, bit_number);
1203 } else if (condition == NOT_ZERO) {
1204 tbnz(label, rn, bit_number);
1205 } else {
1206 UNREACHABLE();
1207 }
1208 }
1209
1210 void cbz(Label* label, Register rt, OperandSize sz = kEightBytes) {
1211 EmitCompareAndBranch(CBZ, rt, label, sz);
1212 }
1213
1214 void cbnz(Label* label, Register rt, OperandSize sz = kEightBytes) {
1215 EmitCompareAndBranch(CBNZ, rt, label, sz);
1216 }
1217
1218 // Generate 64/32-bit compare with zero and branch when condition allows to
1219 // use a single instruction: cbz/cbnz/tbz/tbnz.
1222 Condition cond,
1223 Label* label,
1225
1226 // Test bit and branch if zero.
1227 void tbz(Label* label, Register rt, intptr_t bit_number) {
1228 EmitTestAndBranch(TBZ, rt, bit_number, label);
1229 }
1230 void tbnz(Label* label, Register rt, intptr_t bit_number) {
1231 EmitTestAndBranch(TBNZ, rt, bit_number, label);
1232 }
1233
1234 // Branch, link, return.
1235 void br(Register rn) { EmitUnconditionalBranchRegOp(BR, rn); }
1236 void blr(Register rn) {
1237 // CLOBBERS_LR uses __ to access the assembler.
1238#define __ this->
1239 CLOBBERS_LR(EmitUnconditionalBranchRegOp(BLR, rn));
1240#undef __
1241 }
1243 if (rn == kNoRegister2) {
1244 // READS_RETURN_ADDRESS_FROM_LR uses __ to access the assembler.
1245#define __ this->
1246 READS_RETURN_ADDRESS_FROM_LR(rn = LR);
1247#undef __
1248 }
1249 EmitUnconditionalBranchRegOp(RET, rn);
1250 }
1251
1252 // Breakpoint.
1253 void brk(uint16_t imm) { EmitExceptionGenOp(BRK, imm); }
1254
1255 // Double floating point.
1256 bool fmovdi(VRegister vd, double immd) {
1257 int64_t imm64 = bit_cast<int64_t, double>(immd);
1258 const uint8_t bit7 = imm64 >> 63;
1259 const uint8_t bit6 = (~(imm64 >> 62)) & 0x1;
1260 const uint8_t bit54 = (imm64 >> 52) & 0x3;
1261 const uint8_t bit30 = (imm64 >> 48) & 0xf;
1262 const uint8_t imm8 = (bit7 << 7) | (bit6 << 6) | (bit54 << 4) | bit30;
1263 const int64_t expimm8 = Instr::VFPExpandImm(imm8);
1264 if (imm64 != expimm8) {
1265 return false;
1266 }
1267 EmitFPImm(FMOVDI, vd, imm8);
1268 return true;
1269 }
1271 ASSERT(rn != R31);
1272 ASSERT(rn != CSP);
1273 const Register crn = ConcreteRegister(rn);
1274 EmitFPIntCvtOp(FMOVSR, static_cast<Register>(vd), crn, kFourBytes);
1275 }
1277 ASSERT(rd != R31);
1278 ASSERT(rd != CSP);
1279 const Register crd = ConcreteRegister(rd);
1280 EmitFPIntCvtOp(FMOVRS, crd, static_cast<Register>(vn), kFourBytes);
1281 }
1283 ASSERT(rn != R31);
1284 ASSERT(rn != CSP);
1285 const Register crn = ConcreteRegister(rn);
1286 EmitFPIntCvtOp(FMOVDR, static_cast<Register>(vd), crn);
1287 }
1289 ASSERT(rd != R31);
1290 ASSERT(rd != CSP);
1291 const Register crd = ConcreteRegister(rd);
1292 EmitFPIntCvtOp(FMOVRD, crd, static_cast<Register>(vn));
1293 }
1295 ASSERT(rn != R31);
1296 ASSERT(rn != CSP);
1297 const Register crn = ConcreteRegister(rn);
1298 EmitFPIntCvtOp(SCVTFD, static_cast<Register>(vd), crn);
1299 }
1301 ASSERT(rn != R31);
1302 ASSERT(rn != CSP);
1303 const Register crn = ConcreteRegister(rn);
1304 EmitFPIntCvtOp(SCVTFD, static_cast<Register>(vd), crn, kFourBytes);
1305 }
1307 ASSERT(rd != R31);
1308 ASSERT(rd != CSP);
1309 const Register crd = ConcreteRegister(rd);
1310 EmitFPIntCvtOp(FCVTZS_D, crd, static_cast<Register>(vn));
1311 }
1313 ASSERT(rd != R31);
1314 ASSERT(rd != CSP);
1315 const Register crd = ConcreteRegister(rd);
1316 EmitFPIntCvtOp(FCVTZS_D, crd, static_cast<Register>(vn), kFourBytes);
1317 }
1319 ASSERT(rd != R31);
1320 ASSERT(rd != CSP);
1321 const Register crd = ConcreteRegister(rd);
1322 EmitFPIntCvtOp(FCVTMS_D, crd, static_cast<Register>(vn));
1323 }
1325 ASSERT(rd != R31);
1326 ASSERT(rd != CSP);
1327 const Register crd = ConcreteRegister(rd);
1328 EmitFPIntCvtOp(FCVTMS_D, crd, static_cast<Register>(vn), kFourBytes);
1329 }
1331 ASSERT(rd != R31);
1332 ASSERT(rd != CSP);
1333 const Register crd = ConcreteRegister(rd);
1334 EmitFPIntCvtOp(FCVTPS_D, crd, static_cast<Register>(vn));
1335 }
1337 ASSERT(rd != R31);
1338 ASSERT(rd != CSP);
1339 const Register crd = ConcreteRegister(rd);
1340 EmitFPIntCvtOp(FCVTPS_D, crd, static_cast<Register>(vn), kFourBytes);
1341 }
1342 void fmovdd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FMOVDD, vd, vn); }
1343 void fabsd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FABSD, vd, vn); }
1344 void fnegd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FNEGD, vd, vn); }
1345 void fsqrtd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FSQRTD, vd, vn); }
1346 void fcvtsd(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FCVTSD, vd, vn); }
1347 void fcvtds(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FCVTDS, vd, vn); }
1349 ASSERT(a.type() != Address::PCOffset);
1350 EmitLoadStoreReg(FLDRQ, static_cast<Register>(vt), a, kQWord);
1351 }
1353 ASSERT(a.type() != Address::PCOffset);
1354 EmitLoadStoreReg(FSTRQ, static_cast<Register>(vt), a, kQWord);
1355 }
1357 ASSERT(a.type() != Address::PCOffset);
1358 EmitLoadStoreReg(FLDR, static_cast<Register>(vt), a, kDWord);
1359 }
1361 ASSERT(a.type() != Address::PCOffset);
1362 EmitLoadStoreReg(FSTR, static_cast<Register>(vt), a, kDWord);
1363 }
1365 ASSERT(a.type() != Address::PCOffset);
1366 EmitLoadStoreReg(FLDR, static_cast<Register>(vt), a, kSWord);
1367 }
1369 ASSERT(a.type() != Address::PCOffset);
1370 EmitLoadStoreReg(FSTR, static_cast<Register>(vt), a, kSWord);
1371 }
1372 void fcmpd(VRegister vn, VRegister vm) { EmitFPCompareOp(FCMPD, vn, vm); }
1373 void fcmpdz(VRegister vn) { EmitFPCompareOp(FCMPZD, vn, V0); }
1375 EmitFPTwoSourceOp(FMULD, vd, vn, vm);
1376 }
1378 EmitFPTwoSourceOp(FDIVD, vd, vn, vm);
1379 }
1381 EmitFPTwoSourceOp(FADDD, vd, vn, vm);
1382 }
1384 EmitFPTwoSourceOp(FSUBD, vd, vn, vm);
1385 }
1386
1387 // SIMD operations.
1389 EmitSIMDThreeSameOp(VAND, vd, vn, vm);
1390 }
1392 EmitSIMDThreeSameOp(VORR, vd, vn, vm);
1393 }
1395 EmitSIMDThreeSameOp(VEOR, vd, vn, vm);
1396 }
1398 EmitSIMDThreeSameOp(VADDW, vd, vn, vm);
1399 }
1401 EmitSIMDThreeSameOp(VADDX, vd, vn, vm);
1402 }
1404 EmitSIMDThreeSameOp(VSUBW, vd, vn, vm);
1405 }
1407 EmitSIMDThreeSameOp(VSUBX, vd, vn, vm);
1408 }
1410 EmitSIMDThreeSameOp(VADDS, vd, vn, vm);
1411 }
1413 EmitSIMDThreeSameOp(VADDD, vd, vn, vm);
1414 }
1416 EmitSIMDThreeSameOp(VSUBS, vd, vn, vm);
1417 }
1419 EmitSIMDThreeSameOp(VSUBD, vd, vn, vm);
1420 }
1422 EmitSIMDThreeSameOp(VMULS, vd, vn, vm);
1423 }
1425 EmitSIMDThreeSameOp(VMULD, vd, vn, vm);
1426 }
1428 EmitSIMDThreeSameOp(VDIVS, vd, vn, vm);
1429 }
1431 EmitSIMDThreeSameOp(VDIVD, vd, vn, vm);
1432 }
1434 EmitSIMDThreeSameOp(VCEQS, vd, vn, vm);
1435 }
1437 EmitSIMDThreeSameOp(VCEQD, vd, vn, vm);
1438 }
1440 EmitSIMDThreeSameOp(VCGTS, vd, vn, vm);
1441 }
1443 EmitSIMDThreeSameOp(VCGTD, vd, vn, vm);
1444 }
1446 EmitSIMDThreeSameOp(VCGES, vd, vn, vm);
1447 }
1449 EmitSIMDThreeSameOp(VCGED, vd, vn, vm);
1450 }
1452 EmitSIMDThreeSameOp(VMINS, vd, vn, vm);
1453 }
1455 EmitSIMDThreeSameOp(VMIND, vd, vn, vm);
1456 }
1458 EmitSIMDThreeSameOp(VMAXS, vd, vn, vm);
1459 }
1461 EmitSIMDThreeSameOp(VMAXD, vd, vn, vm);
1462 }
1464 EmitSIMDThreeSameOp(VRECPSS, vd, vn, vm);
1465 }
1467 EmitSIMDThreeSameOp(VRSQRTSS, vd, vn, vm);
1468 }
1469 void vnot(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VNOT, vd, vn); }
1470 void vabss(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VABSS, vd, vn); }
1471 void vabsd(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VABSD, vd, vn); }
1472 void vnegs(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VNEGS, vd, vn); }
1473 void vnegd(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VNEGD, vd, vn); }
1474 void vsqrts(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VSQRTS, vd, vn); }
1475 void vsqrtd(VRegister vd, VRegister vn) { EmitSIMDTwoRegOp(VSQRTD, vd, vn); }
1477 EmitSIMDTwoRegOp(VRECPES, vd, vn);
1478 }
1480 EmitSIMDTwoRegOp(VRSQRTES, vd, vn);
1481 }
1482 void vdupw(VRegister vd, Register rn) {
1483 const VRegister vn = static_cast<VRegister>(rn);
1484 EmitSIMDCopyOp(VDUPI, vd, vn, kFourBytes, 0, 0);
1485 }
1486 void vdupx(VRegister vd, Register rn) {
1487 const VRegister vn = static_cast<VRegister>(rn);
1488 EmitSIMDCopyOp(VDUPI, vd, vn, kEightBytes, 0, 0);
1489 }
1490 void vdups(VRegister vd, VRegister vn, int32_t idx) {
1491 EmitSIMDCopyOp(VDUP, vd, vn, kSWord, 0, idx);
1492 }
1493 void vdupd(VRegister vd, VRegister vn, int32_t idx) {
1494 EmitSIMDCopyOp(VDUP, vd, vn, kDWord, 0, idx);
1495 }
1496 void vinsw(VRegister vd, int32_t didx, Register rn) {
1497 const VRegister vn = static_cast<VRegister>(rn);
1498 EmitSIMDCopyOp(VINSI, vd, vn, kFourBytes, 0, didx);
1499 }
1500 void vinsx(VRegister vd, int32_t didx, Register rn) {
1501 const VRegister vn = static_cast<VRegister>(rn);
1502 EmitSIMDCopyOp(VINSI, vd, vn, kEightBytes, 0, didx);
1503 }
1504 void vinss(VRegister vd, int32_t didx, VRegister vn, int32_t sidx) {
1505 EmitSIMDCopyOp(VINS, vd, vn, kSWord, sidx, didx);
1506 }
1507 void vinsd(VRegister vd, int32_t didx, VRegister vn, int32_t sidx) {
1508 EmitSIMDCopyOp(VINS, vd, vn, kDWord, sidx, didx);
1509 }
1510 void vmovrs(Register rd, VRegister vn, int32_t sidx) {
1511 const VRegister vd = static_cast<VRegister>(rd);
1512 EmitSIMDCopyOp(VMOVW, vd, vn, kFourBytes, 0, sidx);
1513 }
1514 void vmovrd(Register rd, VRegister vn, int32_t sidx) {
1515 const VRegister vd = static_cast<VRegister>(rd);
1516 EmitSIMDCopyOp(VMOVX, vd, vn, kEightBytes, 0, sidx);
1517 }
1518
1519 // Aliases.
1520 void mov(Register rd, Register rn) {
1521 if ((rd == CSP) || (rn == CSP)) {
1522 add(rd, rn, Operand(0));
1523 } else {
1524 orr(rd, ZR, Operand(rn));
1525 }
1526 }
1527 void movw(Register rd, Register rn) {
1528 if ((rd == CSP) || (rn == CSP)) {
1529 addw(rd, rn, Operand(0));
1530 } else {
1531 orrw(rd, ZR, Operand(rn));
1532 }
1533 }
1534 void vmov(VRegister vd, VRegister vn) { vorr(vd, vn, vn); }
1535 void mvn_(Register rd, Register rm) { orn(rd, ZR, Operand(rm)); }
1536 void mvnw(Register rd, Register rm) { ornw(rd, ZR, Operand(rm)); }
1537 void neg(Register rd, Register rm) { sub(rd, ZR, Operand(rm)); }
1539 subs(rd, ZR, Operand(rm), sz);
1540 }
1541 void negsw(Register rd, Register rm) { negs(rd, rm, kFourBytes); }
1542 void mul(Register rd, Register rn, Register rm) {
1543 madd(rd, rn, rm, ZR, kEightBytes);
1544 }
1545 void mulw(Register rd, Register rn, Register rm) {
1546 madd(rd, rn, rm, ZR, kFourBytes);
1547 }
1548 void Push(Register reg) {
1549 ASSERT(reg != PP); // Only push PP with TagAndPushPP().
1551 }
1552 void Pop(Register reg) {
1553 ASSERT(reg != PP); // Only pop PP with PopAndUntagPP().
1555 }
1556 void PushPair(Register low, Register high) {
1558 }
1559 void PopPair(Register low, Register high) {
1561 }
1564 }
1567 }
1570 }
1573 }
1576 }
1577 void PopQuad(VRegister reg) {
1579 }
1581 fstp(low, high, Address(SP, -2 * kDoubleSize, Address::PairPreIndex),
1582 kDWord);
1583 }
1586 kDWord);
1587 }
1589 fstp(low, high, Address(SP, -2 * kQuadSize, Address::PairPreIndex), kQWord);
1590 }
1593 }
1595 // Add the heap object tag back to PP before putting it on the stack.
1598 }
1601 // Add the heap object tag back to PP before putting it on the stack.
1603 stp(TMP2, CODE_REG,
1605 }
1609 // The caller of PopAndUntagPP() must explicitly allow use of popped PP.
1611 }
1613 ands(ZR, rn, o, sz);
1614 }
1615 void tsti(Register rn, const Immediate& imm, OperandSize sz = kEightBytes) {
1616 andis(ZR, rn, imm, sz);
1617 }
1618
1620 Register rn,
1621 int32_t shift,
1622 OperandSize sz = kEightBytes) {
1623 const int32_t reg_size =
1625 ASSERT((shift >= 0) && (shift < reg_size));
1626 ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1, sz);
1627 }
1628 void LslImmediate(Register rd, int32_t shift, OperandSize sz = kEightBytes) {
1629 LslImmediate(rd, rd, shift, sz);
1630 }
1631 void LslRegister(Register dst, Register shift) override {
1632 lslv(dst, dst, shift);
1633 }
1635 Register rn,
1636 int shift,
1637 OperandSize sz = kEightBytes) {
1638 const int reg_size =
1640 ASSERT((shift >= 0) && (shift < reg_size));
1641 ubfm(rd, rn, shift, reg_size - 1, sz);
1642 }
1643 void LsrImmediate(Register rd, int32_t shift) override {
1644 LsrImmediate(rd, rd, shift);
1645 }
1647 Register rn,
1648 int shift,
1649 OperandSize sz = kEightBytes) {
1650 const int reg_size =
1652 ASSERT((shift >= 0) && (shift < reg_size));
1653 sbfm(rd, rn, shift, reg_size - 1, sz);
1654 }
1655
1658
1659 void SmiUntag(Register reg) { SmiUntag(reg, reg); }
1662 }
1663 void SmiTag(Register reg) override { SmiTag(reg, reg); }
1666 }
1667
1669 COMPILE_ASSERT(kSmiTag == 0);
1670 adds(reg, reg, compiler::Operand(reg)); // SmiTag
1671 // If the value doesn't fit in a smi, the tagging changes the sign,
1672 // which causes the overflow flag to be set.
1673 b(label, OVERFLOW);
1674#if defined(DART_COMPRESSED_POINTERS)
1675 cmp(reg, compiler::Operand(reg, SXTW, 0));
1676 b(label, NOT_EQUAL);
1677#endif // defined(DART_COMPRESSED_POINTERS)
1678 }
1679
1680 // Truncates upper bits.
1682 if (result == value) {
1683 ASSERT(TMP != value);
1685 value = TMP;
1686 }
1687 ASSERT(value != result);
1690 Utils::Minimum(static_cast<intptr_t>(32), compiler::target::kSmiBits));
1694 Bind(&done);
1695 }
1696
1698 if (result == value) {
1699 ASSERT(TMP != value);
1701 value = TMP;
1702 }
1703 ASSERT(value != result);
1708 Bind(&done);
1709 }
1710
1711 // For ARM, the near argument is ignored.
1713 Label* label,
1715 tbnz(label, reg, kSmiTag);
1716 }
1717
1718 // For ARM, the near argument is ignored.
1720 Label* label,
1721 JumpDistance distance = kFarJump) override {
1722 tbz(label, reg, kSmiTag);
1723 }
1724
1725 void BranchLink(const Code& code,
1731
1733 const Code& code,
1738 snapshot_behavior);
1739 }
1740
1741 // Emit a call that shares its object pool entries with other calls
1742 // that have the same equivalence marker.
1744 const Code& code,
1745 const Object& equivalence,
1747
1749 // CLOBBERS_LR uses __ to access the assembler.
1750#define __ this->
1751 CLOBBERS_LR({
1752 ldr(LR, target);
1753 blr(LR);
1754 });
1755#undef __
1756 }
1757 void Call(const Code& code) { BranchLink(code); }
1758
1759 // Clobbers LR.
1762#define __ this->
1763 CLOBBERS_LR({ blr(target); });
1764#undef __
1765 }
1766
1767 void AddImmediate(Register dest, int64_t imm) {
1768 AddImmediate(dest, dest, imm);
1769 }
1770
1771 // Macros accepting a pp Register argument may attempt to load values from
1772 // the object pool when possible. Unless you are sure that the untagged object
1773 // pool pointer is in another register, or that it is not available at all,
1774 // PP should be passed for pp. `dest` can be TMP2, `rn` cannot. `dest` can be
1775 // TMP.
1777 Register rn,
1778 int64_t imm,
1781 Register rn,
1782 int64_t imm,
1785 add(dest, dest, Operand(src));
1786 }
1788 Register base,
1789 Register index,
1791 int32_t disp) override {
1792 if (base == kNoRegister || base == ZR) {
1793 if (scale == TIMES_1) {
1794 AddImmediate(dest, index, disp);
1795 } else {
1796 orr(dest, ZR, Operand(index, LSL, scale));
1797 AddImmediate(dest, disp);
1798 }
1799 } else {
1800 add(dest, base, compiler::Operand(index, LSL, scale));
1801 AddImmediate(dest, disp);
1802 }
1803 }
1805 Register rn,
1806 int64_t imm,
1809 sub(dest, dest, Operand(src));
1810 }
1812 int64_t imm,
1813 OperandSize width = kEightBytes) override {
1815 if (Utils::IsPowerOfTwo(imm)) {
1817 } else {
1818 LoadImmediate(TMP, imm);
1819 if (width == kFourBytes) {
1820 mulw(reg, reg, TMP);
1821 } else {
1822 mul(reg, reg, TMP);
1823 }
1824 }
1825 }
1827 Register rn,
1828 int64_t imm,
1830 void AndImmediate(Register rd, int64_t imm) override {
1831 AndImmediate(rd, rd, imm);
1832 }
1834 Register src1,
1835 Register src2 = kNoRegister) override {
1836 ASSERT(src1 != src2); // Likely a mistake.
1837 if (src2 == kNoRegister) {
1838 src2 = dst;
1839 }
1840 and_(dst, src2, Operand(src1));
1841 }
1843 Register rn,
1844 int64_t imm,
1846 void OrImmediate(Register rd, int64_t imm) { OrImmediate(rd, rd, imm); }
1848 Register rn,
1849 int64_t imm,
1851 void TestImmediate(Register rn, int64_t imm, OperandSize sz = kEightBytes);
1853 int64_t imm,
1854 OperandSize sz = kEightBytes) override;
1855
1857 int32_t offset,
1858 OperandSize sz,
1859 Address::AddressType addr_type);
1861 const Address& address,
1862 OperandSize sz = kEightBytes) override;
1863 // For loading indexed payloads out of tagged objects like Arrays. If the
1864 // payload objects are word-sized, use TIMES_HALF_WORD_SIZE if the contents of
1865 // [index] is a Smi, otherwise TIMES_WORD_SIZE if unboxed.
1867 Register base,
1868 int32_t payload_offset,
1869 Register index,
1871 OperandSize sz = kEightBytes) override {
1872 add(dest, base, Operand(index, LSL, scale));
1873 LoadFromOffset(dest, dest, payload_offset - kHeapObjectTag, sz);
1874 }
1875#if defined(DART_COMPRESSED_POINTERS)
1877 Register base,
1878 int32_t offset,
1879 Register index) override {
1882 }
1883#endif
1888 }
1892 }
1893
1894 void LoadFromStack(Register dst, intptr_t depth);
1895 void StoreToStack(Register src, intptr_t depth);
1896 void CompareToStack(Register src, intptr_t depth);
1897
1899 const Address& address,
1900 OperandSize sz = kEightBytes) override;
1901 void StoreZero(const Address& address, Register temp = kNoRegister) {
1902 Store(ZR, address);
1903 }
1904
1906 Register high,
1907 Register base,
1908 int32_t offset,
1910
1915 }
1919 }
1920
1923 }
1926 }
1928 if (src != dst) {
1929 fmovdd(dst, src);
1930 }
1931 }
1932
1935 }
1938 }
1940 if (src != dst) {
1941 vmov(dst, src);
1942 }
1943 }
1944
1945#if defined(DART_COMPRESSED_POINTERS)
1946 void LoadCompressed(Register dest, const Address& slot) override;
1947#endif
1948
1951 CanBeSmi can_value_be_smi,
1952 Register scratch) override;
1954 Register slot,
1956 CanBeSmi can_value_be_smi,
1957 Register scratch) override;
1959
1961 Register object,
1962 const Address& address,
1963 const Object& value,
1964 MemoryOrder memory_order = kRelaxedNonAtomic,
1965 OperandSize size = kWordBytes) override;
1966
1967 // Stores a non-tagged value into a heap object.
1969 const Address& dest,
1970 Register value);
1971
1972 // Object pool, loading from pool, etc.
1974
1975 bool constant_pool_allowed() const { return constant_pool_allowed_; }
1976 void set_constant_pool_allowed(bool b) { constant_pool_allowed_ = b; }
1977
1978 compiler::LRState lr_state() const { return lr_state_; }
1979 void set_lr_state(compiler::LRState state) { lr_state_ = state; }
1980
1981 bool CanLoadFromObjectPool(const Object& object) const;
1983 const ExternalLabel* label,
1987
1988 // Note: the function never clobbers TMP, TMP2 scratch registers.
1989 void LoadObject(Register dst, const Object& obj);
1990 // Note: the function never clobbers TMP, TMP2 scratch registers.
1992 // Note: the function never clobbers TMP, TMP2 scratch registers.
1993 void LoadImmediate(Register reg, int64_t imm) override;
1995 LoadImmediate(reg, imm.value());
1996 }
1997
1998 void LoadSImmediate(VRegister reg, float immd);
1999 void LoadDImmediate(VRegister reg, double immd);
2001
2002 // Load word from pool from the given offset using encoding that
2003 // InstructionPattern::DecodeLoadWordFromPool can decode.
2004 //
2005 // Note: the function never clobbers TMP, TMP2 scratch registers.
2006 void LoadWordFromPoolIndex(Register dst, intptr_t index, Register pp = PP);
2007
2008 // Store word to pool at the given offset.
2009 //
2010 // Note: clobbers TMP.
2011 void StoreWordToPoolIndex(Register src, intptr_t index, Register pp = PP);
2012
2014 Register upper,
2015 intptr_t index);
2016
2017 void PushObject(const Object& object) {
2018 if (IsSameObject(compiler::NullObject(), object)) {
2019 Push(NULL_REG);
2020 } else {
2021 LoadObject(TMP, object);
2022 Push(TMP);
2023 }
2024 }
2025 void PushImmediate(int64_t immediate) {
2026 LoadImmediate(TMP, immediate);
2027 Push(TMP);
2028 }
2029 void PushImmediate(Immediate immediate) { PushImmediate(immediate.value()); }
2030 void CompareObject(Register reg, const Object& object);
2031
2034
2036 Register temp,
2037 intptr_t low,
2038 intptr_t high,
2039 RangeCheckCondition condition,
2040 Label* target) override;
2041
2045 intptr_t class_id,
2046 Register scratch = kNoRegister);
2047 // Note: input and output registers must be different.
2051 Register src,
2052 Register scratch,
2053 bool can_be_null = false) override;
2054
2055 // Reserve specifies how much space to reserve for the Dart stack.
2056 void SetupDartSP(intptr_t reserve = 4096);
2059
2060 void ArithmeticShiftRightImmediate(Register reg, intptr_t shift) override;
2062 Register reg2,
2063 intptr_t offset,
2065 Register temp,
2066 Label* equals) override;
2067
2068 void EnterFrame(intptr_t frame_size);
2070 void Ret() { ret(); }
2071
2072 // Sets the return address to [value] as if there was a call.
2073 // On ARM64 sets LR.
2075
2076 // Emit code to transition between generated mode and native mode.
2077 //
2078 // These require and ensure that CSP and SP are equal and aligned and require
2079 // a scratch register (in addition to TMP/TMP2).
2080
2081 void TransitionGeneratedToNative(Register destination_address,
2082 Register new_exit_frame,
2083 Register new_exit_through_ffi,
2084 bool enter_safepoint);
2086 bool exit_safepoint,
2087 bool ignore_unwind_in_progress = false,
2088 bool set_tag = true);
2090 void ExitFullSafepoint(Register scratch, bool ignore_unwind_in_progress);
2091
2094
2095 // Restores the values of the registers that are blocked to cache some values
2096 // e.g. HEAP_BITS and NULL_REG.
2098
2100
2101 void EnterDartFrame(intptr_t frame_size, Register new_pp = kNoRegister);
2102 void EnterOsrFrame(intptr_t extra_size, Register new_pp = kNoRegister);
2104
2105 // For non-leaf runtime calls. For leaf runtime calls, use LeafRuntimeScope,
2106 void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count);
2107
2108 // Set up a stub frame so that the stack traversal code can easily identify
2109 // a stub frame.
2112
2113 // Set up a frame for calling a C function.
2114 // Automatically save the pinned registers in Dart which are not callee-
2115 // saved in the native calling convention.
2116 // Use together with CallCFunction.
2117 void EnterCFrame(intptr_t frame_space);
2119
2123
2124 void CombineHashes(Register hash, Register other) override;
2125 void FinalizeHashForSize(intptr_t bit_size,
2126 Register hash,
2127 Register scratch = TMP) override;
2128
2129 // If allocation tracing for |cid| is enabled, will jump to |trace| label,
2130 // which will allocate in the runtime where tracing occurs.
2132 Label* trace,
2133 Register temp_reg,
2134 JumpDistance distance = JumpDistance::kFarJump);
2135
2137 Label* trace,
2138 Register temp_reg,
2139 JumpDistance distance = JumpDistance::kFarJump);
2140
2141 void TryAllocateObject(intptr_t cid,
2142 intptr_t instance_size,
2143 Label* failure,
2145 Register instance_reg,
2146 Register top_reg) override;
2147
2148 void TryAllocateArray(intptr_t cid,
2149 intptr_t instance_size,
2150 Label* failure,
2152 Register end_address,
2153 Register temp1,
2154 Register temp2);
2155
2157#if defined(DEBUG)
2158 Label okay;
2159 ldr(tmp, Address(top, 0));
2161 b(&okay, EQUAL);
2162 Stop("Allocation canary");
2163 Bind(&okay);
2164#endif
2165 }
2167#if defined(DEBUG)
2168 ASSERT(top != TMP);
2170 str(TMP, Address(top, 0));
2171#endif
2172 }
2173
2174 // Copy [size] bytes from [src] address to [dst] address.
2175 // [size] should be a multiple of word size.
2176 // Clobbers [src], [dst], [size] and [temp] registers.
2178 Register dst,
2179 Register size,
2180 Register temp);
2181
2182 // This emits an PC-relative call of the form "bl <offset>". The offset
2183 // is not yet known and needs therefore relocation to the right place before
2184 // the code can be used.
2185 //
2186 // The necessary information for the "linker" (i.e. the relocation
2187 // information) is stored in [UntaggedCode::static_calls_target_table_]: an
2188 // entry of the form
2189 //
2190 // (Code::kPcRelativeCall & pc_offset, <target-code>, <target-function>)
2191 //
2192 // will be used during relocation to fix the offset.
2193 //
2194 // The provided [offset_into_target] will be added to calculate the final
2195 // destination. It can be used e.g. for calling into the middle of a
2196 // function.
2197 void GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target = 0);
2198
2199 // This emits an PC-relative tail call of the form "b <offset>".
2200 //
2201 // See also above for the pc-relative call.
2202 void GenerateUnRelocatedPcRelativeTailCall(intptr_t offset_into_target = 0);
2203
2204 static bool AddressCanHoldConstantIndex(const Object& constant,
2205 bool is_external,
2206 intptr_t cid,
2207 intptr_t index_scale);
2208
2210 intptr_t cid,
2211 intptr_t index_scale,
2212 Register array,
2213 intptr_t index) const;
2215 bool is_external,
2216 intptr_t cid,
2217 intptr_t index_scale,
2218 Register array,
2219 intptr_t index);
2221 intptr_t cid,
2222 intptr_t index_scale,
2223 bool index_unboxed,
2224 Register array,
2225 Register index,
2226 Register temp);
2227
2228 // Special version of ElementAddressForRegIndex for the case when cid and
2229 // operand size for the target load don't match (e.g. when loading a few
2230 // elements of the array with one load).
2232 intptr_t cid,
2234 intptr_t index_scale,
2235 bool index_unboxed,
2236 Register array,
2237 Register index,
2238 Register temp);
2239
2241 bool is_external,
2242 intptr_t cid,
2243 intptr_t index_scale,
2244 bool index_unboxed,
2245 Register array,
2246 Register index);
2247
2249 Register field,
2250 Register scratch,
2251 bool is_shared);
2252
2253#if defined(DART_COMPRESSED_POINTERS)
2255 Register address,
2257 Register offset_in_words_as_smi) override;
2258#endif
2259
2262 Register offset_in_words_as_smi) override;
2263
2266 int32_t offset) override {
2268 }
2269
2270 // Returns object data offset for address calculation; for heap objects also
2271 // accounts for the tag.
2272 static int32_t HeapDataOffset(bool is_external, intptr_t cid) {
2273 return is_external
2274 ? 0
2276 }
2277
2278 static int32_t EncodeImm26BranchOffset(int64_t imm, int32_t instr) {
2279 const int32_t imm32 = static_cast<int32_t>(imm);
2280 const int32_t off = (((imm32 >> 2) << kImm26Shift) & kImm26Mask);
2281 return (instr & ~kImm26Mask) | off;
2282 }
2283
2284 static int64_t DecodeImm26BranchOffset(int32_t instr) {
2285 const int32_t off = (((instr & kImm26Mask) >> kImm26Shift) << 6) >> 4;
2286 return static_cast<int64_t>(off);
2287 }
2288
2289 private:
2290 bool use_far_branches_;
2291
2292 bool constant_pool_allowed_;
2293
2294 compiler::LRState lr_state_ = compiler::LRState::OnEntry();
2295
2296 // Note: the function never clobbers TMP, TMP2 scratch registers.
2297 void LoadObjectHelper(Register dst, const Object& obj, bool is_unique);
2298
2299 void AddSubHelper(OperandSize os,
2300 bool set_flags,
2301 bool subtract,
2302 Register rd,
2303 Register rn,
2304 Operand o) {
2305 ASSERT((rd != R31) && (rn != R31));
2306 const Register crd = ConcreteRegister(rd);
2307 const Register crn = ConcreteRegister(rn);
2308 if (o.type() == Operand::Immediate) {
2309 ASSERT(rn != ZR);
2310 EmitAddSubImmOp(subtract ? SUBI : ADDI, crd, crn, o, os, set_flags);
2311 } else if (o.type() == Operand::Shifted) {
2312 ASSERT((rd != CSP) && (rn != CSP));
2313 EmitAddSubShiftExtOp(subtract ? SUB : ADD, crd, crn, o, os, set_flags);
2314 } else {
2315 ASSERT(o.type() == Operand::Extended);
2316 ASSERT((rd != CSP) && (rn != ZR));
2317 EmitAddSubShiftExtOp(subtract ? SUB : ADD, crd, crn, o, os, set_flags);
2318 }
2319 }
2320
2321 void AddSubWithCarryHelper(OperandSize sz,
2322 bool set_flags,
2323 bool subtract,
2324 Register rd,
2325 Register rn,
2326 Register rm) {
2327 ASSERT((rd != R31) && (rn != R31) && (rm != R31));
2328 ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP));
2329 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2330 const int32_t s = set_flags ? B29 : 0;
2331 const int32_t op = subtract ? SBC : ADC;
2332 const int32_t encoding = op | size | s | Arm64Encode::Rd(rd) |
2334 Emit(encoding);
2335 }
2336
2337 void EmitAddSubImmOp(AddSubImmOp op,
2338 Register rd,
2339 Register rn,
2340 Operand o,
2341 OperandSize sz,
2342 bool set_flags) {
2343 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2344 (sz == kUnsignedFourBytes));
2345 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2346 const int32_t s = set_flags ? B29 : 0;
2347 const int32_t encoding = op | size | s | Arm64Encode::Rd(rd) |
2348 Arm64Encode::Rn(rn) | o.encoding();
2349 Emit(encoding);
2350 }
2351
2352 // Follows the *bfm instructions in taking r before s (unlike the Operand
2353 // constructor, which follows DecodeBitMasks from Appendix G).
2354 void EmitBitfieldOp(BitfieldOp op,
2355 Register rd,
2356 Register rn,
2357 int r_imm,
2358 int s_imm,
2359 OperandSize size) {
2360 if (size != kEightBytes) {
2362 ASSERT(r_imm < 32 && s_imm < 32);
2363 } else {
2364 ASSERT(r_imm < 64 && s_imm < 64);
2365 }
2366 const int32_t instr = op | (size == kEightBytes ? Bitfield64 : 0);
2367 const int32_t encoding = instr | Operand(0, s_imm, r_imm).encoding() |
2369 Emit(encoding);
2370 }
2371
2372 void EmitLogicalImmOp(LogicalImmOp op,
2373 Register rd,
2374 Register rn,
2375 Operand o,
2376 OperandSize sz) {
2377 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2378 (sz == kUnsignedFourBytes));
2379 ASSERT((rd != R31) && (rn != R31));
2380 ASSERT(rn != CSP);
2381 ASSERT((op == ANDIS) || (rd != ZR)); // op != ANDIS => rd != ZR.
2382 ASSERT((op != ANDIS) || (rd != CSP)); // op == ANDIS => rd != CSP.
2383 ASSERT(o.type() == Operand::BitfieldImm);
2384 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2385 const int32_t encoding =
2386 op | size | Arm64Encode::Rd(rd) | Arm64Encode::Rn(rn) | o.encoding();
2387 Emit(encoding);
2388 }
2389
2390 void EmitLogicalShiftOp(LogicalShiftOp op,
2391 Register rd,
2392 Register rn,
2393 Operand o,
2394 OperandSize sz) {
2395 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2396 (sz == kUnsignedFourBytes));
2397 ASSERT((rd != R31) && (rn != R31));
2398 ASSERT((rd != CSP) && (rn != CSP));
2399 ASSERT(o.type() == Operand::Shifted);
2400 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2401 const int32_t encoding =
2402 op | size | Arm64Encode::Rd(rd) | Arm64Encode::Rn(rn) | o.encoding();
2403 Emit(encoding);
2404 }
2405
2406 void EmitAddSubShiftExtOp(AddSubShiftExtOp op,
2407 Register rd,
2408 Register rn,
2409 Operand o,
2410 OperandSize sz,
2411 bool set_flags) {
2412 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2413 (sz == kUnsignedFourBytes));
2414 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2415 const int32_t s = set_flags ? B29 : 0;
2416 const int32_t encoding = op | size | s | Arm64Encode::Rd(rd) |
2417 Arm64Encode::Rn(rn) | o.encoding();
2418 Emit(encoding);
2419 }
2420
2421 int32_t BindImm26Branch(int64_t position, int64_t dest);
2422 int32_t BindImm19Branch(int64_t position, int64_t dest);
2423 int32_t BindImm14Branch(int64_t position, int64_t dest);
2424
2425 int32_t EncodeImm19BranchOffset(int64_t imm, int32_t instr) {
2426 if (!CanEncodeImm19BranchOffset(imm)) {
2429 }
2430 const int32_t imm32 = static_cast<int32_t>(imm);
2431 const int32_t off =
2432 ((static_cast<uint32_t>(imm32 >> 2) << kImm19Shift) & kImm19Mask);
2433 return (instr & ~kImm19Mask) | off;
2434 }
2435
2436 int64_t DecodeImm19BranchOffset(int32_t instr) {
2437 int32_t insns = (static_cast<uint32_t>(instr) & kImm19Mask) >> kImm19Shift;
2438 const int32_t off = static_cast<int32_t>(insns << 13) >> 11;
2439 return static_cast<int64_t>(off);
2440 }
2441
2442 int32_t EncodeImm14BranchOffset(int64_t imm, int32_t instr) {
2443 if (!CanEncodeImm14BranchOffset(imm)) {
2446 }
2447 const int32_t imm32 = static_cast<int32_t>(imm);
2448 const int32_t off =
2449 ((static_cast<uint32_t>(imm32 >> 2) << kImm14Shift) & kImm14Mask);
2450 return (instr & ~kImm14Mask) | off;
2451 }
2452
2453 int64_t DecodeImm14BranchOffset(int32_t instr) {
2454 int32_t insns = (static_cast<uint32_t>(instr) & kImm14Mask) >> kImm14Shift;
2455 const int32_t off = static_cast<int32_t>(insns << 18) >> 16;
2456 return static_cast<int64_t>(off);
2457 }
2458
2459 bool IsUnconditionalBranch(int32_t instr) {
2460 return (instr & UnconditionalBranchMask) ==
2462 }
2463
2464 bool IsConditionalBranch(int32_t instr) {
2465 return (instr & ConditionalBranchMask) ==
2467 }
2468
2469 bool IsCompareAndBranch(int32_t instr) {
2470 return (instr & CompareAndBranchMask) ==
2472 }
2473
2474 bool IsTestAndBranch(int32_t instr) {
2475 return (instr & TestAndBranchMask) ==
2477 }
2478
2479 Condition DecodeImm19BranchCondition(int32_t instr) {
2480 if (IsConditionalBranch(instr)) {
2481 return static_cast<Condition>((instr & kCondMask) >> kCondShift);
2482 }
2483 ASSERT(IsCompareAndBranch(instr));
2484 return (instr & B24) ? EQ : NE; // cbz : cbnz
2485 }
2486
2487 int32_t EncodeImm19BranchCondition(Condition cond, int32_t instr) {
2488 if (IsConditionalBranch(instr)) {
2489 const int32_t c_imm = static_cast<int32_t>(cond);
2490 return (instr & ~kCondMask) | (c_imm << kCondShift);
2491 }
2492 ASSERT(IsCompareAndBranch(instr));
2493 return (instr & ~B24) | (cond == EQ ? B24 : 0); // cbz : cbnz
2494 }
2495
2496 Condition DecodeImm14BranchCondition(int32_t instr) {
2497 ASSERT(IsTestAndBranch(instr));
2498 return (instr & B24) ? EQ : NE; // tbz : tbnz
2499 }
2500
2501 int32_t EncodeImm14BranchCondition(Condition cond, int32_t instr) {
2502 ASSERT(IsTestAndBranch(instr));
2503 return (instr & ~B24) | (cond == EQ ? B24 : 0); // tbz : tbnz
2504 }
2505
2506 void EmitCompareAndBranchOp(CompareAndBranchOp op,
2507 Register rt,
2508 int64_t imm,
2509 OperandSize sz) {
2510 // EncodeImm19BranchOffset will longjump out if the offset does not fit in
2511 // 19 bits.
2512 const int32_t encoded_offset = EncodeImm19BranchOffset(imm, 0);
2513 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2514 (sz == kUnsignedFourBytes));
2515 ASSERT(Utils::IsInt(21, imm) && ((imm & 0x3) == 0));
2516 ASSERT((rt != CSP) && (rt != R31));
2517 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2518 const int32_t encoding = op | size | Arm64Encode::Rt(rt) | encoded_offset;
2519 Emit(encoding);
2520 }
2521
2522 void EmitTestAndBranchOp(TestAndBranchOp op,
2523 Register rt,
2524 intptr_t bit_number,
2525 int64_t imm) {
2526 // EncodeImm14BranchOffset will longjump out if the offset does not fit in
2527 // 14 bits.
2528 const int32_t encoded_offset = EncodeImm14BranchOffset(imm, 0);
2529 ASSERT((bit_number >= 0) && (bit_number <= 63));
2530 ASSERT(Utils::IsInt(16, imm) && ((imm & 0x3) == 0));
2531 ASSERT((rt != CSP) && (rt != R31));
2532 const Register crt = ConcreteRegister(rt);
2533 int32_t bit_number_low = bit_number & 0x1f;
2534 int32_t bit_number_hi = (bit_number & 0x20) >> 5;
2535 const int32_t encoding =
2536 op | (bit_number_low << 19) | (bit_number_hi << 31) |
2537 (static_cast<int32_t>(crt) << kRtShift) | encoded_offset;
2538 Emit(encoding);
2539 }
2540
2541 void EmitConditionalBranchOp(ConditionalBranchOp op,
2542 Condition cond,
2543 int64_t imm) {
2544 ASSERT(cond != AL);
2545 const int32_t off = EncodeImm19BranchOffset(imm, 0);
2546 const int32_t encoding =
2547 op | (static_cast<int32_t>(cond) << kCondShift) | off;
2548 Emit(encoding);
2549 }
2550
2551 bool CanEncodeImm19BranchOffset(int64_t offset) {
2553 return Utils::IsInt(21, offset);
2554 }
2555
2556 bool CanEncodeImm14BranchOffset(int64_t offset) {
2558 return Utils::IsInt(16, offset);
2559 }
2560
2561 void EmitConditionalBranch(ConditionalBranchOp op,
2562 Condition cond,
2563 Label* label) {
2564 ASSERT(cond != AL);
2565 if (label->IsBound()) {
2566 const int64_t dest = label->Position() - buffer_.Size();
2567 if (use_far_branches() && !CanEncodeImm19BranchOffset(dest)) {
2568 EmitConditionalBranchOp(op, InvertCondition(cond),
2569 2 * Instr::kInstrSize);
2570 // Make a new dest that takes the new position into account after the
2571 // inverted test.
2572 const int64_t dest = label->Position() - buffer_.Size();
2573 b(dest);
2574 } else {
2575 EmitConditionalBranchOp(op, cond, dest);
2576 }
2577 label->UpdateLRState(lr_state());
2578 } else {
2579 const int64_t position = buffer_.Size();
2580 if (use_far_branches()) {
2581 // When cond is AL, this guard branch will be rewritten as a nop when
2582 // the label is bound. We don't write it as a nop initially because it
2583 // makes the decoding code in Bind simpler.
2584 EmitConditionalBranchOp(op, InvertCondition(cond),
2585 2 * Instr::kInstrSize);
2586 b(label->position_);
2587 } else {
2588 EmitConditionalBranchOp(op, cond, label->position_);
2589 }
2590 label->LinkTo(position, lr_state());
2591 }
2592 }
2593
2594 void EmitCompareAndBranch(CompareAndBranchOp op,
2595 Register rt,
2596 Label* label,
2597 OperandSize sz) {
2598 if (label->IsBound()) {
2599 const int64_t dest = label->Position() - buffer_.Size();
2600 if (use_far_branches() && !CanEncodeImm19BranchOffset(dest)) {
2601 EmitCompareAndBranchOp(op == CBZ ? CBNZ : CBZ, rt,
2602 2 * Instr::kInstrSize, sz);
2603 // Make a new dest that takes the new position into account after the
2604 // inverted test.
2605 const int64_t dest = label->Position() - buffer_.Size();
2606 b(dest);
2607 } else {
2608 EmitCompareAndBranchOp(op, rt, dest, sz);
2609 }
2610 label->UpdateLRState(lr_state());
2611 } else {
2612 const int64_t position = buffer_.Size();
2613 if (use_far_branches()) {
2614 EmitCompareAndBranchOp(op == CBZ ? CBNZ : CBZ, rt,
2615 2 * Instr::kInstrSize, sz);
2616 b(label->position_);
2617 } else {
2618 EmitCompareAndBranchOp(op, rt, label->position_, sz);
2619 }
2620 label->LinkTo(position, lr_state());
2621 }
2622 }
2623
2624 void EmitTestAndBranch(TestAndBranchOp op,
2625 Register rt,
2626 intptr_t bit_number,
2627 Label* label) {
2628 if (label->IsBound()) {
2629 const int64_t dest = label->Position() - buffer_.Size();
2630 if (use_far_branches() && !CanEncodeImm14BranchOffset(dest)) {
2631 EmitTestAndBranchOp(op == TBZ ? TBNZ : TBZ, rt, bit_number,
2632 2 * Instr::kInstrSize);
2633 // Make a new dest that takes the new position into account after the
2634 // inverted test.
2635 const int64_t dest = label->Position() - buffer_.Size();
2636 b(dest);
2637 } else {
2638 EmitTestAndBranchOp(op, rt, bit_number, dest);
2639 }
2640 label->UpdateLRState(lr_state());
2641 } else {
2642 int64_t position = buffer_.Size();
2643 if (use_far_branches()) {
2644 EmitTestAndBranchOp(op == TBZ ? TBNZ : TBZ, rt, bit_number,
2645 2 * Instr::kInstrSize);
2646 b(label->position_);
2647 } else {
2648 EmitTestAndBranchOp(op, rt, bit_number, label->position_);
2649 }
2650 label->LinkTo(position, lr_state());
2651 }
2652 }
2653
2654 bool CanEncodeImm26BranchOffset(int64_t offset) {
2656 return Utils::IsInt(26, offset);
2657 }
2658
2659 void EmitUnconditionalBranchOp(UnconditionalBranchOp op, int64_t offset) {
2660 ASSERT(CanEncodeImm26BranchOffset(offset));
2661 const int32_t off = ((offset >> 2) << kImm26Shift) & kImm26Mask;
2662 const int32_t encoding = op | off;
2663 Emit(encoding);
2664 }
2665
2666 void EmitUnconditionalBranch(UnconditionalBranchOp op, Label* label) {
2667 if (label->IsBound()) {
2668 const int64_t dest = label->Position() - buffer_.Size();
2669 EmitUnconditionalBranchOp(op, dest);
2670 label->UpdateLRState(lr_state());
2671 } else {
2672 const int64_t position = buffer_.Size();
2673 EmitUnconditionalBranchOp(op, label->position_);
2674 label->LinkTo(position, lr_state());
2675 }
2676 }
2677
2678 void EmitUnconditionalBranchRegOp(UnconditionalBranchRegOp op, Register rn) {
2679 ASSERT((rn != CSP) && (rn != R31));
2680 const int32_t encoding = op | Arm64Encode::Rn(rn);
2681 Emit(encoding);
2682 }
2683
2684 static int32_t ExceptionGenOpEncoding(ExceptionGenOp op, uint16_t imm) {
2685 return op | (static_cast<int32_t>(imm) << kImm16Shift);
2686 }
2687
2688 void EmitExceptionGenOp(ExceptionGenOp op, uint16_t imm) {
2689 Emit(ExceptionGenOpEncoding(op, imm));
2690 }
2691
2692 void EmitMoveWideOp(MoveWideOp op,
2693 Register rd,
2694 const Immediate& imm,
2695 int hw_idx,
2696 OperandSize sz) {
2697 ASSERT((hw_idx >= 0) && (hw_idx <= 3));
2698 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2699 (sz == kUnsignedFourBytes));
2700 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2701 const int32_t encoding =
2702 op | size | Arm64Encode::Rd(rd) |
2703 (static_cast<int32_t>(hw_idx) << kHWShift) |
2704 (static_cast<int32_t>(imm.value() & 0xffff) << kImm16Shift);
2705 Emit(encoding);
2706 }
2707
2708 void EmitLoadStoreExclusive(LoadStoreExclusiveOp op,
2709 Register rs,
2710 Register rn,
2711 Register rt,
2712 OperandSize sz = kEightBytes) {
2713 ASSERT(sz == kEightBytes || sz == kFourBytes);
2714 const int32_t size = B31 | (sz == kEightBytes ? B30 : 0);
2715
2716 ASSERT((rs != kNoRegister) && (rs != CSP));
2717 ASSERT((rn != kNoRegister) && (rn != ZR));
2718 ASSERT((rt != kNoRegister) && (rt != CSP));
2719
2720 const int32_t encoding = op | size | Arm64Encode::Rs(rs) |
2722 Arm64Encode::Rt(rt);
2723 Emit(encoding);
2724 }
2725
2726 void EmitAtomicMemory(AtomicMemoryOp op,
2727 Register rs,
2728 Register rn,
2729 Register rt,
2730 OperandSize sz = kEightBytes) {
2731 ASSERT(sz == kEightBytes || sz == kFourBytes);
2732 const int32_t size = B31 | (sz == kEightBytes ? B30 : 0);
2733
2734 ASSERT((rs != kNoRegister) && (rs != CSP));
2735 ASSERT((rn != kNoRegister) && (rn != ZR));
2736 ASSERT((rt != kNoRegister) && (rt != CSP));
2737
2738 const int32_t encoding = op | size | Arm64Encode::Rs(rs) |
2740 Emit(encoding);
2741 }
2742
2743 void EmitLoadStoreReg(LoadStoreRegOp op,
2744 Register rt,
2745 Address a,
2746 OperandSize sz) {
2747 // Unpredictable, illegal on some microarchitectures.
2748 ASSERT((op != LDR && op != STR && op != LDRS) || a.can_writeback_to(rt));
2749
2750 const int32_t size = Log2OperandSizeBytes(sz);
2751 const int32_t encoding =
2752 op | ((size & 0x3) << kSzShift) | Arm64Encode::Rt(rt) | a.encoding(sz);
2753 Emit(encoding);
2754 }
2755
2756 void EmitLoadRegLiteral(LoadRegLiteralOp op,
2757 Register rt,
2758 Address a,
2759 OperandSize sz) {
2760 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2761 (sz == kUnsignedFourBytes));
2762 ASSERT((rt != CSP) && (rt != R31));
2763 const int32_t size = (sz == kEightBytes) ? B30 : 0;
2764 const int32_t encoding = op | size | Arm64Encode::Rt(rt) | a.encoding(sz);
2765 Emit(encoding);
2766 }
2767
2768 void EmitLoadStoreRegPair(LoadStoreRegPairOp op,
2769 Register rt,
2770 Register rt2,
2771 Address a,
2772 OperandSize sz) {
2773 // Unpredictable, illegal on some microarchitectures.
2774 ASSERT(a.can_writeback_to(rt) && a.can_writeback_to(rt2));
2775 ASSERT(op != LDP || rt != rt2);
2776
2777 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2778 (sz == kUnsignedFourBytes));
2779 ASSERT((rt != CSP) && (rt != R31));
2780 ASSERT((rt2 != CSP) && (rt2 != R31));
2781 int32_t opc = 0;
2782 switch (sz) {
2783 case kEightBytes:
2784 opc = B31;
2785 break;
2786 case kFourBytes:
2787 opc = op == LDP ? B30 : 0;
2788 break;
2789 case kUnsignedFourBytes:
2790 opc = 0;
2791 break;
2792 default:
2793 UNREACHABLE();
2794 break;
2795 }
2796 const int32_t encoding =
2797 opc | op | Arm64Encode::Rt(rt) | Arm64Encode::Rt2(rt2) | a.encoding(sz);
2798 Emit(encoding);
2799 }
2800
2801 void EmitLoadStoreVRegPair(LoadStoreRegPairOp op,
2802 VRegister rt,
2803 VRegister rt2,
2804 Address a,
2805 OperandSize sz) {
2806 ASSERT(op != FLDP || rt != rt2);
2807 ASSERT((sz == kSWord) || (sz == kDWord) || (sz == kQWord));
2808 int32_t opc = 0;
2809 switch (sz) {
2810 case kSWord:
2811 opc = 0;
2812 break;
2813 case kDWord:
2814 opc = B30;
2815 break;
2816 case kQWord:
2817 opc = B31;
2818 break;
2819 default:
2820 UNREACHABLE();
2821 break;
2822 }
2823 const int32_t encoding =
2824 opc | op | Arm64Encode::Rt(static_cast<Register>(rt)) |
2825 Arm64Encode::Rt2(static_cast<Register>(rt2)) | a.encoding(sz);
2826 Emit(encoding);
2827 }
2828
2829 void EmitPCRelOp(PCRelOp op, Register rd, const Immediate& imm) {
2830 ASSERT(Utils::IsInt(21, imm.value()));
2831 ASSERT((rd != R31) && (rd != CSP));
2832 const int32_t loimm = (imm.value() & 0x3) << 29;
2833 const int32_t hiimm =
2834 (static_cast<uint32_t>(imm.value() >> 2) << kImm19Shift) & kImm19Mask;
2835 const int32_t encoding = op | loimm | hiimm | Arm64Encode::Rd(rd);
2836 Emit(encoding);
2837 }
2838
2839 void EmitMiscDP1Source(MiscDP1SourceOp op,
2840 Register rd,
2841 Register rn,
2842 OperandSize sz) {
2843 ASSERT((rd != CSP) && (rn != CSP));
2844 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2845 (sz == kUnsignedFourBytes));
2846 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2847 const int32_t encoding =
2848 op | size | Arm64Encode::Rd(rd) | Arm64Encode::Rn(rn);
2849 Emit(encoding);
2850 }
2851
2852 void EmitMiscDP2Source(MiscDP2SourceOp op,
2853 Register rd,
2854 Register rn,
2855 Register rm,
2856 OperandSize sz) {
2857 ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP));
2858 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2859 (sz == kUnsignedFourBytes));
2860 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2861 const int32_t encoding = op | size | Arm64Encode::Rd(rd) |
2863 Emit(encoding);
2864 }
2865
2866 void EmitMiscDP3Source(MiscDP3SourceOp op,
2867 Register rd,
2868 Register rn,
2869 Register rm,
2870 Register ra,
2871 OperandSize sz) {
2872 ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP) && (ra != CSP));
2873 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2874 (sz == kUnsignedFourBytes));
2875 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2876 const int32_t encoding = op | size | Arm64Encode::Rd(rd) |
2878 Arm64Encode::Ra(ra);
2879 Emit(encoding);
2880 }
2881
2882 void EmitConditionalSelect(ConditionalSelectOp op,
2883 Register rd,
2884 Register rn,
2885 Register rm,
2886 Condition cond,
2887 OperandSize sz) {
2888 ASSERT((rd != CSP) && (rn != CSP) && (rm != CSP));
2889 ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
2890 (sz == kUnsignedFourBytes));
2891 const int32_t size = (sz == kEightBytes) ? B31 : 0;
2892 const int32_t encoding = op | size | Arm64Encode::Rd(rd) |
2894 (static_cast<int32_t>(cond) << kSelCondShift);
2895 Emit(encoding);
2896 }
2897
2898 void EmitFPImm(FPImmOp op, VRegister vd, uint8_t imm8) {
2899 const int32_t encoding =
2900 op | (static_cast<int32_t>(vd) << kVdShift) | (imm8 << kImm8Shift);
2901 Emit(encoding);
2902 }
2903
2904 void EmitFPIntCvtOp(FPIntCvtOp op,
2905 Register rd,
2906 Register rn,
2907 OperandSize sz = kEightBytes) {
2908 ASSERT((sz == kEightBytes) || (sz == kFourBytes));
2909 const int32_t sfield = (sz == kEightBytes) ? B31 : 0;
2910 const int32_t encoding =
2911 op | Arm64Encode::Rd(rd) | Arm64Encode::Rn(rn) | sfield;
2912 Emit(encoding);
2913 }
2914
2915 void EmitFPOneSourceOp(FPOneSourceOp op, VRegister vd, VRegister vn) {
2916 const int32_t encoding = op | (static_cast<int32_t>(vd) << kVdShift) |
2917 (static_cast<int32_t>(vn) << kVnShift);
2918 Emit(encoding);
2919 }
2920
2921 void EmitFPTwoSourceOp(FPTwoSourceOp op,
2922 VRegister vd,
2923 VRegister vn,
2924 VRegister vm) {
2925 const int32_t encoding = op | (static_cast<int32_t>(vd) << kVdShift) |
2926 (static_cast<int32_t>(vn) << kVnShift) |
2927 (static_cast<int32_t>(vm) << kVmShift);
2928 Emit(encoding);
2929 }
2930
2931 void EmitFPCompareOp(FPCompareOp op, VRegister vn, VRegister vm) {
2932 const int32_t encoding = op | (static_cast<int32_t>(vn) << kVnShift) |
2933 (static_cast<int32_t>(vm) << kVmShift);
2934 Emit(encoding);
2935 }
2936
2937 void EmitSIMDThreeSameOp(SIMDThreeSameOp op,
2938 VRegister vd,
2939 VRegister vn,
2940 VRegister vm) {
2941 const int32_t encoding = op | (static_cast<int32_t>(vd) << kVdShift) |
2942 (static_cast<int32_t>(vn) << kVnShift) |
2943 (static_cast<int32_t>(vm) << kVmShift);
2944 Emit(encoding);
2945 }
2946
2947 void EmitSIMDCopyOp(SIMDCopyOp op,
2948 VRegister vd,
2949 VRegister vn,
2950 OperandSize sz,
2951 int32_t idx4,
2952 int32_t idx5) {
2953 const int32_t shift = Log2OperandSizeBytes(sz);
2954 const int32_t imm5 = ((idx5 << (shift + 1)) | (1 << shift)) & 0x1f;
2955 const int32_t imm4 = (idx4 << shift) & 0xf;
2956 const int32_t encoding = op | (imm5 << kImm5Shift) | (imm4 << kImm4Shift) |
2957 (static_cast<int32_t>(vd) << kVdShift) |
2958 (static_cast<int32_t>(vn) << kVnShift);
2959 Emit(encoding);
2960 }
2961
2962 void EmitSIMDTwoRegOp(SIMDTwoRegOp op, VRegister vd, VRegister vn) {
2963 const int32_t encoding = op | (static_cast<int32_t>(vd) << kVdShift) |
2964 (static_cast<int32_t>(vn) << kVnShift);
2965 Emit(encoding);
2966 }
2967
2968 void BranchLink(intptr_t target_code_pool_index, CodeEntryKind entry_kind);
2969
2970 friend class dart::FlowGraphCompiler;
2971 std::function<void(Register reg)> generate_invoke_write_barrier_wrapper_;
2972 std::function<void()> generate_invoke_array_write_barrier_;
2973
2974 DISALLOW_ALLOCATION();
2975 DISALLOW_COPY_AND_ASSIGN(Assembler);
2976};
2977
2978} // namespace compiler
2979} // namespace dart
2980
2981#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM64_H_
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
int count
Definition: FontMgrTest.cpp:50
static uint32_t hash(const SkShaderBase::GradientInfo &v)
bool equals(SkDrawable *a, SkDrawable *b)
static bool subtract(const R &a, const R &b, R *out)
Definition: SkRect.cpp:177
#define UNREACHABLE()
Definition: assert.h:248
static int64_t VFPExpandImm(uint8_t imm8)
static constexpr int32_t kNopInstruction
static bool IsInt(intptr_t N, T value)
Definition: utils.h:313
static constexpr int ShiftForPowerOfTwo(T x)
Definition: utils.h:81
static T Minimum(T x, T y)
Definition: utils.h:36
static bool IsUint(intptr_t N, T value)
Definition: utils.h:328
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition: utils.h:92
static constexpr bool IsPowerOfTwo(T x)
Definition: utils.h:76
Address & operator=(const Address &other)
static OperandSize OperandSizeFor(intptr_t cid)
static Address PC(int32_t pc_off)
static bool CanHoldOffset(int32_t offset, AddressType at=Offset, OperandSize sz=kEightBytes)
Address(Register rn, int32_t offset=0, AddressType at=Offset)
static Address PC(Register r)=delete
bool can_writeback_to(Register r) const
Address(Register rn, Register offset, AddressType at)=delete
static Address Pair(Register rn, int32_t offset=0, AddressType at=PairOffset)
Address(Register rn, Register rm, Extend ext=UXTX, Scaling scale=Unscaled)
Address(const Address &other)
static uint32_t Rm(Register rm)
static uint32_t Rt2(Register rt2)
static uint32_t Rs(Register rs)
static uint32_t Rn(Register rn)
static uint32_t Ra(Register ra)
static uint32_t Rt(Register rt)
static uint32_t Rd(Register rd)
void Stop(const char *message)
void LoadCompressed(Register dst, const Address &address)
void LoadCompressedFieldFromOffset(Register dst, Register base, int32_t offset)
ObjectPoolBuilder & object_pool_builder()
void LoadCompressedFieldAddressForRegOffset(Register address, Register instance, Register offset_in_words_as_smi)
void LoadIndexedCompressed(Register dst, Register base, int32_t offset, Register index)
void LoadAcquireCompressed(Register dst, const Address &address)
void VRSqrts(VRegister vd, VRegister vn)
void AndImmediate(Register rd, int64_t imm) override
void PushRegistersInOrder(std::initializer_list< Register > regs)
void cinv(Register rd, Register rn, Condition cond)
void PopRegisterPair(Register r0, Register r1)
void umulh(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void MoveUnboxedDouble(FpuRegister dst, FpuRegister src)
void TestImmediate(Register rn, int64_t imm, OperandSize sz=kEightBytes)
void LoadClassId(Register result, Register object)
void LoadSFromOffset(VRegister dest, Register base, int32_t offset)
void LoadPoolPointer(Register pp=PP)
void LoadIndexedPayload(Register dest, Register base, int32_t payload_offset, Register index, ScaleFactor scale, OperandSize sz=kEightBytes) override
void cinc(Register rd, Register rn, Condition cond)
void Call(Address target)
void vdupx(VRegister vd, Register rn)
bool CanLoadFromObjectPool(const Object &object) const
void CompareClassId(Register object, intptr_t class_id, Register scratch=kNoRegister)
void vadds(VRegister vd, VRegister vn, VRegister vm)
void adcsw(Register rd, Register rn, Register rm)
void movz(Register rd, const Immediate &imm, int hw_idx)
void eor(Register rd, Register rn, Operand o, Condition cond=AL)
void PushRegisters(const RegisterSet &registers)
void ExtendAndSmiTagValue(Register rd, Register rn, OperandSize sz=kEightBytes) override
void fcvtzsxd(Register rd, VRegister vn)
void StoreUnboxedDouble(FpuRegister src, Register base, int32_t offset)
void CompareObject(Register reg, const Object &object)
void vaddw(VRegister vd, VRegister vn, VRegister vm)
void fstp(VRegister rt, VRegister rt2, Address a, OperandSize sz)
void MoveUnboxedSimd128(FpuRegister dst, FpuRegister src)
void vrecpss(VRegister vd, VRegister vn, VRegister vm)
void BranchIfZero(Register rn, Label *label, JumpDistance distance=kFarJump)
void fcmpd(VRegister vn, VRegister vm)
void vsqrtd(VRegister vd, VRegister vn)
void CompareImmediate(Register rn, int64_t imm, OperandSize sz=kEightBytes) override
void tbz(Label *label, Register rt, intptr_t bit_number)
void LoadObject(Register dst, const Object &obj)
void PopDouble(VRegister reg)
void fmovrd(Register rd, VRegister vn)
void PushQuadPair(VRegister low, VRegister high)
void orr(Register rd, Register rn, Operand o, Condition cond=AL)
void BranchIfSmi(Register reg, Label *label, JumpDistance distance=kFarJump) override
void sbcsw(Register rd, Register rn, Register rm)
void LoadTaggedClassIdMayBeSmi(Register result, Register object)
void Call(const Code &code)
void BranchLink(const Code &code, ObjectPoolBuilderEntry::Patchability patchable=ObjectPoolBuilderEntry::kNotPatchable, CodeEntryKind entry_kind=CodeEntryKind::kNormal, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
void subsw(Register rd, Register rn, Operand o)
void fldrs(VRegister vt, Address a)
void fldrq(VRegister vt, Address a)
void LoadDFromOffset(DRegister reg, Register base, int32_t offset, Condition cond=AL)
void TransitionNativeToGenerated(Register scratch, bool exit_safepoint, bool ignore_unwind_in_progress=false, bool set_tag=true)
void TsanLoadAcquire(Register addr)
void ldr(Register rd, Address ad, Condition cond=AL)
void FinalizeHashForSize(intptr_t bit_size, Register hash, Register scratch=TMP) override
void Load(Register reg, const Address &address, OperandSize type, Condition cond)
void LoadDFromOffset(VRegister dest, Register base, int32_t offset)
void ComputeElementAddressForIntIndex(Register address, bool is_external, intptr_t cid, intptr_t index_scale, Register array, intptr_t index)
void PushRegisterPair(Register r0, Register r1)
void LoadDFieldFromOffset(VRegister dest, Register base, int32_t offset)
void StoreZero(const Address &address, Register temp=kNoRegister)
Address ElementAddressForRegIndex(bool is_external, intptr_t cid, intptr_t index_scale, bool index_unboxed, Register array, Register index, Register temp)
void fmovrs(Register rd, VRegister vn)
Assembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level=0)
void vsqrts(VRegister vd, VRegister vn)
void fcvtmswd(Register rd, VRegister vn)
void subs(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void XorImmediate(Register rd, Register rn, int64_t imm, OperandSize sz=kEightBytes)
void andw_(Register rd, Register rn, Operand o)
void vcgtd(VRegister vd, VRegister vn, VRegister vm)
void EnterFrame(intptr_t frame_size)
void fmovdr(VRegister vd, Register rn)
void vorr(VRegister vd, VRegister vn, VRegister vm)
void fmovdd(VRegister vd, VRegister vn)
void LoadFieldFromOffset(Register reg, Register base, int32_t offset, OperandSize type=kFourBytes) override
void vdupw(VRegister vd, Register rn)
void CheckAllocationCanary(Register top, Register tmp=TMP)
void StoreSToOffset(VRegister src, Register base, int32_t offset)
void ldr(Register rt, Address a, OperandSize sz=kEightBytes)
void AddImmediate(Register dest, Register rn, int64_t imm, OperandSize sz=kEightBytes)
void SmiUntag(Register reg)
void LoadQFieldFromOffset(VRegister dest, Register base, int32_t offset)
void sxth(Register rd, Register rn)
void ubfm(Register rd, Register rn, int r_imm, int s_imm, OperandSize size=kEightBytes)
void lslvw(Register rd, Register rn, Register rm)
void LoadSImmediate(VRegister reg, float immd)
void Jump(Label *label, JumpDistance distance=kFarJump)
void StoreWordToPoolIndex(Register src, intptr_t index, Register pp=PP)
void CompareObjectRegisters(Register rn, Register rm)
void LoadAcquire(Register dst, const Address &address, OperandSize size=kFourBytes) override
void msub(Register rd, Register rn, Register rm, Register ra, OperandSize sz=kEightBytes)
void Pop(Register rd, Condition cond=AL)
void andis(Register rd, Register rn, const Immediate &imm, OperandSize sz=kEightBytes)
void StoreObjectIntoObjectNoBarrier(Register object, const Address &address, const Object &value, MemoryOrder memory_order=kRelaxedNonAtomic, OperandSize size=kWordBytes) override
void LoadFromStack(Register dst, intptr_t depth)
void bfm(Register rd, Register rn, int r_imm, int s_imm, OperandSize size=kEightBytes)
void vrecpes(VRegister vd, VRegister vn)
void EnterOsrFrame(intptr_t extra_size, Register new_pp=kNoRegister)
Address ElementAddressForIntIndex(bool is_external, intptr_t cid, intptr_t index_scale, Register array, intptr_t index) const
void cmn(Register rn, Operand o, OperandSize sz=kEightBytes)
void umaddl(Register rd, Register rn, Register rm, Register ra, OperandSize sz=kEightBytes)
void stlr(Register rt, Register rn, OperandSize sz=kEightBytes)
void PushRegister(Register r)
void fcvtzswd(Register rd, VRegister vn)
void LoadMemoryValue(Register dst, Register base, int32_t offset)
void MulImmediate(Register reg, int64_t imm, OperandSize width=kEightBytes) override
void b(Label *label, Condition cond=AL)
void orr(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void set_lr_state(compiler::LRState state)
void subw(Register rd, Register rn, Operand o)
void set_constant_pool_allowed(bool b)
void Align(intptr_t alignment, intptr_t offset)
void vmind(VRegister vd, VRegister vn, VRegister vm)
void sub(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void PopDoublePair(VRegister low, VRegister high)
void LslImmediate(Register rd, Register rn, int32_t shift)
void fsqrtd(VRegister vd, VRegister vn)
void PopFloat(VRegister reg)
void stp(Register rt, Register rt2, Address a, OperandSize sz=kEightBytes)
Address PrepareLargeOffset(Register base, int32_t offset, OperandSize sz, Address::AddressType addr_type)
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_external, intptr_t cid, intptr_t index_scale)
void EnterDartFrame(intptr_t frame_size, Register new_pp=kNoRegister)
void EnterFullSafepoint(Register scratch)
void vsubs(VRegister vd, VRegister vn, VRegister vm)
void LoadClassById(Register result, Register class_id)
void sxtb(Register rd, Register rn)
void and_(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void vabss(VRegister vd, VRegister vn)
void BranchLinkWithEquivalence(const Code &code, const Object &equivalence, CodeEntryKind entry_kind=CodeEntryKind::kNormal)
void smull(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void vmov(VRegister vd, VRegister vn)
void PushValueAtOffset(Register base, int32_t offset)
void adds(Register rd, Register rn, Operand o, Condition cond=AL)
void LoadIsolate(Register dst)
void SetupDartSP(intptr_t reserve=4096)
void fcvtds(VRegister vd, VRegister vn)
void cmpw(Register rn, Operand o)
void smulh(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void tbnz(Label *label, Register rt, intptr_t bit_number)
void vrsqrtss(VRegister vd, VRegister vn, VRegister vm)
void vabsd(VRegister vd, VRegister vn)
void fstrd(VRegister vt, Address a)
void udiv(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void mulw(Register rd, Register rn, Register rm)
void sbcw(Register rd, Register rn, Register rm)
void b(int32_t offset)
void Push(Register rd, Condition cond=AL)
void SetupCSPFromThread(Register thr)
void orri(Register rd, Register rn, const Immediate &imm, OperandSize sz=kEightBytes)
void LsrImmediate(Register rd, int32_t shift) override
void str(Register rd, Address ad, Condition cond=AL)
void adds(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void SetReturnAddress(Register value)
void sbfx(Register rd, Register rn, int low_bit, int width, OperandSize size=kEightBytes)
void SubImmediateSetFlags(Register dest, Register rn, int64_t imm, OperandSize sz=kEightBytes)
void GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target=0)
void LsrImmediate(Register rd, Register rn, int shift, OperandSize sz=kEightBytes)
void fcvtpsxd(Register rd, VRegister vn)
void madd(Register rd, Register rn, Register rm, Register ra, OperandSize sz=kEightBytes)
void PopQuad(VRegister reg)
void LoadInt32FromBoxOrSmi(Register result, Register value) override
void adc(Register rd, Register rn, Register rm)
void BranchIf(Condition condition, Label *label, JumpDistance distance=kFarJump)
void fcvtpswd(Register rd, VRegister vn)
void fmuld(VRegister vd, VRegister vn, VRegister vm)
void PushObject(const Object &object)
void adcw(Register rd, Register rn, Register rm)
void ComputeElementAddressForRegIndex(Register address, bool is_external, intptr_t cid, intptr_t index_scale, bool index_unboxed, Register array, Register index)
void AndRegisters(Register dst, Register src1, Register src2=kNoRegister) override
void PushImmediate(int64_t immediate)
void TryAllocateObject(intptr_t cid, intptr_t instance_size, Label *failure, JumpDistance distance, Register instance_reg, Register top_reg) override
void stxr(Register rs, Register rt, Register rn, OperandSize size=kEightBytes)
void andi(Register rd, Register rn, const Immediate &imm, OperandSize sz=kEightBytes)
void fcvtsd(VRegister vd, VRegister vn)
void LsrImmediate(Register rd, Register rn, int32_t shift)
void fabsd(VRegister vd, VRegister vn)
void OrImmediate(Register rd, Register rn, int64_t imm, OperandSize sz=kEightBytes)
void PushPair(Register low, Register high)
void vand(VRegister vd, VRegister vn, VRegister vm)
void cmp(Register rn, Operand o, Condition cond=AL)
void LoadWordFromPoolIndex(Register dst, intptr_t index, Register pp=PP)
void StoreDFieldToOffset(VRegister src, Register base, int32_t offset)
void LoadUnboxedSimd128(FpuRegister dst, Register base, int32_t offset)
void LoadUniqueObject(Register dst, const Object &obj)
void StoreMemoryValue(Register src, Register base, int32_t offset)
void TransitionGeneratedToNative(Register destination_address, Register new_exit_frame, Register new_exit_through_ffi, bool enter_safepoint)
void tst(Register rn, Operand o, OperandSize sz=kEightBytes)
void ldxr(Register rt, Register rn, OperandSize size=kEightBytes)
void vdups(VRegister vd, VRegister vn, int32_t idx)
void Load(Register dest, const Address &address, OperandSize sz=kEightBytes) override
void GenerateCbzTbz(Register rn, Condition cond, Label *label, OperandSize sz=kEightBytes)
void vnot(VRegister vd, VRegister vn)
void asrvw(Register rd, Register rn, Register rm)
void StoreToStack(Register src, intptr_t depth)
void vdivs(VRegister vd, VRegister vn, VRegister vm)
void rbit(Register rd, Register rn)
void csetm(Register rd, Condition cond)
void AddScaled(Register dest, Register base, Register index, ScaleFactor scale, int32_t disp) override
void vcged(VRegister vd, VRegister vn, VRegister vm)
void sxtw(Register rd, Register rn)
void cbnz(Label *label, Register rt, OperandSize sz=kEightBytes)
void vcges(VRegister vd, VRegister vn, VRegister vm)
void LoadStaticFieldAddress(Register address, Register field, Register scratch, bool is_shared)
void SubRegisters(Register dest, Register src)
void vrsqrtes(VRegister vd, VRegister vn)
void clz(Register rd, Register rn)
void PopRegisters(const RegisterSet &registers)
void StoreInternalPointer(Register object, const Address &dest, Register value)
void tsti(Register rn, const Immediate &imm, OperandSize sz=kEightBytes)
void AddRegisters(Register dest, Register src)
void Emit64(int64_t value)
void fdivd(VRegister vd, VRegister vn, VRegister vm)
void cbz(Label *label, Register rt, OperandSize sz=kEightBytes)
void LslImmediate(Register rd, int32_t shift, OperandSize sz=kEightBytes)
void Bind(Label *label) override
void ReserveAlignedFrameSpace(intptr_t frame_space)
void csinv(Register rd, Register rn, Register rm, Condition cond)
void vaddd(VRegister vd, VRegister vn, VRegister vm)
static int64_t DecodeImm26BranchOffset(int32_t instr)
static intptr_t EntryPointToPcMarkerOffset()
void vceqd(VRegister vd, VRegister vn, VRegister vm)
void add(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void vsubd(VRegister vd, VRegister vn, VRegister vm)
void OrImmediate(Register rd, int64_t imm)
void addsw(Register rd, Register rn, Operand o)
void RangeCheck(Register value, Register temp, intptr_t low, intptr_t high, RangeCheckCondition condition, Label *target) override
void AddImmediate(Register dest, int64_t imm)
void PopPair(Register low, Register high)
void cmp(Register rn, Operand o, OperandSize sz=kEightBytes)
void PopRegister(Register r)
void bics(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void movk(Register rd, const Immediate &imm, int hw_idx)
void SmiTagAndBranchIfOverflow(Register reg, Label *label)
void ldp(Register rt, Register rt2, Address a, OperandSize sz=kEightBytes)
void vinsd(VRegister vd, int32_t didx, VRegister vn, int32_t sidx)
void LslRegister(Register dst, Register shift) override
void bl(int32_t offset)
void LoadImmediate(Register reg, int64_t imm) override
void add(Register rd, Register rn, Operand o, Condition cond=AL)
void vmins(VRegister vd, VRegister vn, VRegister vm)
void uxth(Register rd, Register rn)
void sbfiz(Register rd, Register rn, int low_bit, int width, OperandSize size=kEightBytes)
void mov(Register rd, Register rn)
void lsrvw(Register rd, Register rn, Register rm)
void vcgts(VRegister vd, VRegister vn, VRegister vm)
void mul(Register rd, Register rn, Register rm, Condition cond=AL)
void umull(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void Call(Address target, Condition cond=AL)
void CallCFunction(Register target)
void WriteAllocationCanary(Register top)
void vmuld(VRegister vd, VRegister vn, VRegister vm)
void sdivw(Register rd, Register rn, Register rm)
void vmaxd(VRegister vd, VRegister vn, VRegister vm)
void ret(Register rn=kNoRegister2)
void CallRuntime(const RuntimeEntry &entry, intptr_t argument_count)
void ExtractInstanceSizeFromTags(Register result, Register tags)
void LoadObject(Register rd, const Object &object, Condition cond=AL)
void CompareRegisters(Register rn, Register rm)
void mvnw(Register rd, Register rm)
void uxtw(Register rd, Register rn)
void ArrayStoreBarrier(Register object, Register slot, Register value, CanBeSmi can_value_be_smi, Register scratch) override
void negsw(Register rd, Register rm)
void vinsx(VRegister vd, int32_t didx, Register rn)
void CompareToStack(Register src, intptr_t depth)
void fstrs(VRegister vt, Address a)
void AddImmediateSetFlags(Register dest, Register rn, int64_t imm, OperandSize sz=kEightBytes)
void sbfx(Register rd, Register rn, int32_t lsb, int32_t width, Condition cond=AL)
void LoadImmediate(Register rd, Immediate value, Condition cond=AL)
void vsubw(VRegister vd, VRegister vn, VRegister vm)
void lsrv(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void ldclr(Register rs, Register rt, Register rn, OperandSize sz=kEightBytes)
void StoreQFieldToOffset(VRegister src, Register base, int32_t offset)
void AndImmediate(Register rd, Register rs, int32_t imm, Condition cond=AL)
void VRecps(VRegister vd, VRegister vn)
void mvn_(Register rd, Register rm)
void vdivd(VRegister vd, VRegister vn, VRegister vm)
void csinc(Register rd, Register rn, Register rm, Condition cond, OperandSize sz=kEightBytes)
void csel(Register rd, Register rn, Register rm, Condition cond)
void LoadFromOffset(Register reg, Register base, int32_t offset, OperandSize type=kFourBytes) override
void addw(Register rd, Register rn, Operand o)
void faddd(VRegister vd, VRegister vn, VRegister vm)
void PushFloat(VRegister reg)
void bic(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void cset(Register rd, Condition cond)
void Jump(const Address &address)
void fmovsr(VRegister vd, Register rn)
void ExtendValue(Register rd, Register rn, OperandSize sz) override
void mul(Register rd, Register rn, Register rm)
void LoadDoubleWordFromPoolIndex(Register lower, Register upper, intptr_t index)
void smaddl(Register rd, Register rn, Register rm, Register ra, OperandSize sz=kEightBytes)
void vdupd(VRegister vd, VRegister vn, int32_t idx)
void StoreBarrier(Register object, Register value, CanBeSmi can_value_be_smi, Register scratch) override
void MaybeTraceAllocation(intptr_t cid, Label *trace, Register temp_reg, JumpDistance distance=JumpDistance::kFarJump)
void uxtb(Register rd, Register rn)
void ands(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void BranchLinkPatchable(const Code &code, CodeEntryKind entry_kind=CodeEntryKind::kNormal, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
void vmovrs(Register rd, VRegister vn, int32_t sidx)
static bool IsSafeSmi(const Object &object)
void clzw(Register rd, Register rn)
void and_(Register rd, Register rn, Operand o, Condition cond=AL)
void PushDouble(VRegister reg)
static int32_t HeapDataOffset(bool is_external, intptr_t cid)
void LoadAcquire(Register dst, const Address &address, OperandSize size=kEightBytes) override
void asrv(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void Drop(intptr_t stack_elements)
void ubfiz(Register rd, Register rn, int low_bit, int width, OperandSize size=kEightBytes)
void StoreUnboxedSimd128(FpuRegister src, Register base, int32_t offset)
void vmuls(VRegister vd, VRegister vn, VRegister vm)
void csneg(Register rd, Register rn, Register rm, Condition cond)
void EnterCFrame(intptr_t frame_space)
void PushDoublePair(VRegister low, VRegister high)
void adr(Register rd, const Immediate &imm)
compiler::LRState lr_state() const
void negs(Register rd, Register rm, OperandSize sz=kEightBytes)
void CombineHashes(Register hash, Register other) override
void sdiv(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void scvtfdx(VRegister vd, Register rn)
void ands(Register rd, Register rn, Operand o, Condition cond=AL)
void LoadQFromOffset(VRegister dest, Register base, int32_t offset)
void VerifyStoreNeedsNoWriteBarrier(Register object, Register value) override
void sbfm(Register rd, Register rn, int r_imm, int s_imm, OperandSize size=kEightBytes)
void CompareWords(Register reg1, Register reg2, intptr_t offset, Register count, Register temp, Label *equals) override
void sbc(Register rd, Register rn, Register rm)
Address ElementAddressForRegIndexWithSize(bool is_external, intptr_t cid, OperandSize size, intptr_t index_scale, bool index_unboxed, Register array, Register index, Register temp)
void PushQuad(VRegister reg)
void movn(Register rd, const Immediate &imm, int hw_idx)
void StorePairToOffset(Register low, Register high, Register base, int32_t offset, OperandSize sz=kEightBytes)
void StoreRelease(Register src, const Address &address, OperandSize size=kEightBytes) override
void BranchIfNotSmi(Register reg, Label *label, JumpDistance distance=kFarJump)
void GenerateUnRelocatedPcRelativeTailCall(intptr_t offset_into_target=0)
void LoadInt64FromBoxOrSmi(Register result, Register value) override
void Store(Register src, const Address &address, OperandSize sz=kEightBytes) override
void CallCFunction(Address target)
void cneg(Register rd, Register rn, Condition cond)
void subs(Register rd, Register rn, Operand o, Condition cond=AL)
static bool IsSafe(const Object &object)
void BranchIfBit(Register rn, intptr_t bit_number, Condition condition, Label *label, JumpDistance distance=kFarJump)
void ldset(Register rs, Register rt, Register rn, OperandSize sz=kEightBytes)
void vmaxs(VRegister vd, VRegister vn, VRegister vm)
void StoreDToOffset(VRegister src, Register base, int32_t offset)
void Jump(Register target)
void scvtfdw(VRegister vd, Register rn)
void vnegd(VRegister vd, VRegister vn)
void sdiv(Register rd, Register rn, Register rm, Condition cond=AL)
void LoadClassIdMayBeSmi(Register result, Register object)
void ornw(Register rd, Register rn, Operand o)
void eor(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
static int32_t EncodeImm26BranchOffset(int64_t imm, int32_t instr)
void fldrd(VRegister vt, Address a)
void ubfx(Register rd, Register rn, int low_bit, int width, OperandSize size=kEightBytes)
void vaddx(VRegister vd, VRegister vn, VRegister vm)
void PopQuadPair(VRegister low, VRegister high)
void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset)
void vinsw(VRegister vd, int32_t didx, Register rn)
void vsubx(VRegister vd, VRegister vn, VRegister vm)
void LoadIsolateGroup(Register dst)
void fsubd(VRegister vd, VRegister vn, VRegister vm)
void LoadFieldAddressForRegOffset(Register address, Register instance, Register offset_in_words_as_smi) override
void eon(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void fnegd(VRegister vd, VRegister vn)
void vinss(VRegister vd, int32_t didx, VRegister vn, int32_t sidx)
void orrw(Register rd, Register rn, Operand o)
void BranchOnMonomorphicCheckedEntryJIT(Label *label)
void Store(Register reg, const Address &address, OperandSize type, Condition cond)
void SmiUntag(Register reg, Condition cond=AL)
void MoveRegister(Register rd, Register rm, Condition cond)
void LoadQImmediate(VRegister reg, simd128_value_t immq)
void orn(Register rd, Register rn, Operand o, OperandSize sz=kEightBytes)
void fcvtmsxd(Register rd, VRegister vn)
void TsanStoreRelease(Register addr)
void eori(Register rd, Register rn, const Immediate &imm, OperandSize sz=kEightBytes)
void sub(Register rd, Register rn, Operand o, Condition cond=AL)
void movw(Register rd, Register rn)
void Emit(int32_t value)
void LoadFieldAddressForOffset(Register address, Register instance, int32_t offset) override
void neg(Register rd, Register rm)
void LoadDImmediate(VRegister reg, double immd)
void TryAllocateArray(intptr_t cid, intptr_t instance_size, Label *failure, Register instance, Register end_address, Register temp1, Register temp2)
void StoreToOffset(Register reg, Register base, int32_t offset, OperandSize type=kFourBytes) override
void AsrImmediate(Register rd, Register rn, int shift, OperandSize sz=kEightBytes)
void adcs(Register rd, Register rn, Register rm)
void ExtractClassIdFromTags(Register result, Register tags)
void vmovrd(Register rd, VRegister vn, int32_t sidx)
void MaybeTraceAllocation(Register cid, Label *trace, Register temp_reg, JumpDistance distance=JumpDistance::kFarJump)
void eorw(Register rd, Register rn, Operand o)
void SmiUntag(Register dst, Register src)
void EnsureHasClassIdInDEBUG(intptr_t cid, Register src, Register scratch, bool can_be_null=false) override
bool CanGenerateCbzTbz(Register rn, Condition cond)
void SmiTag(Register dst, Register src)
void fldp(VRegister rt, VRegister rt2, Address a, OperandSize sz)
void CompareWithMemoryValue(Register value, Address address, OperandSize sz=kEightBytes) override
void StoreDToOffset(DRegister reg, Register base, int32_t offset, Condition cond=AL)
void PushImmediate(Immediate immediate)
void ldar(Register rt, Register rn, OperandSize sz=kEightBytes)
void AddImmediate(Register rd, int32_t value, Condition cond=AL)
void AndImmediate(Register rd, Register rn, int64_t imm, OperandSize sz=kEightBytes)
void bfi(Register rd, Register rn, int low_bit, int width, OperandSize size=kEightBytes)
void lslv(Register rd, Register rn, Register rm, OperandSize sz=kEightBytes)
void ExitFullSafepoint(Register scratch, bool ignore_unwind_in_progress)
void str(Register rt, Address a, OperandSize sz=kEightBytes)
void LoadNativeEntry(Register dst, const ExternalLabel *label, ObjectPoolBuilderEntry::Patchability patchable)
void LslImmediate(Register rd, Register rn, int32_t shift, OperandSize sz=kEightBytes)
void CopyMemoryWords(Register src, Register dst, Register size, Register temp)
void StoreQToOffset(VRegister src, Register base, int32_t offset)
static constexpr intptr_t kEntryPointToPcMarkerOffset
void fstrq(VRegister vt, Address a)
bool fmovdi(VRegister vd, double immd)
void SmiTag(Register reg) override
void fcmpdz(VRegister vn)
void LoadImmediate(Register reg, Immediate imm)
void veor(VRegister vd, VRegister vn, VRegister vm)
void vnegs(VRegister vd, VRegister vn)
void sbcs(Register rd, Register rn, Register rm)
void vceqs(VRegister vd, VRegister vn, VRegister vm)
void bfxil(Register rd, Register rn, int low_bit, int width, OperandSize size=kEightBytes)
void ArithmeticShiftRightImmediate(Register reg, intptr_t shift) override
FieldAddress & operator=(const FieldAddress &other)
FieldAddress(const FieldAddress &other)
FieldAddress(Register base, Register disp)=delete
FieldAddress(Register base, int32_t disp)
static bool CanHoldOffset(int32_t offset, AddressType at=Offset, OperandSize sz=kEightBytes)
Immediate(const Immediate &other)
Immediate & operator=(const Immediate &other)
Operand(Register rm, Shift shift, int32_t imm)
static bool IsImmLogical(uint64_t value, uint8_t width, Operand *imm_op)
Operand(Register rm, Shift shift, Register r)
Operand(const Operand &other)
Operand(Register rm, Extend extend, Register r)
Operand & operator=(const Operand &other)
Register rm() const
Operand(uint8_t n, int8_t imm_s, int8_t imm_r)
static OperandType CanHold(int64_t imm, uint8_t sz, Operand *op)
Operand(Register rm, Extend extend, int32_t imm)
ScaleFactor scale() const
static word DataOffsetFor(intptr_t cid)
Definition: runtime_api.cc:555
#define UNIMPLEMENTED
#define ASSERT(E)
VkInstance instance
Definition: main.cc:48
struct MyStruct s
struct MyStruct a[10]
#define FATAL(error)
AtkStateType state
uint8_t value
GAsyncResult * result
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
int argument_count
Definition: fuchsia.cc:52
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
bool IsSmi(int64_t v)
Definition: runtime_api.cc:31
constexpr intptr_t kSmiBits
Definition: runtime_api.h:301
void BailoutWithBranchOffsetError()
Definition: runtime_api.cc:328
static bool IsSignedOperand(OperandSize os)
static int Log2OperandSizeBytes(OperandSize os)
bool IsSameObject(const Object &a, const Object &b)
Definition: runtime_api.cc:60
const Object & NullObject()
Definition: runtime_api.cc:149
constexpr OperandSize kWordBytes
Definition: dart_vm.cc:33
const int kXRegSizeInBits
CompareAndBranchOp
@ CompareAndBranchMask
@ CompareAndBranchFixed
@ TIMES_COMPRESSED_WORD_SIZE
static Condition InvertCondition(Condition c)
static constexpr intptr_t kAllocationCanary
Definition: globals.h:181
const Register NULL_REG
@ TestAndBranchMask
@ TestAndBranchFixed
LoadStoreExclusiveOp
Register ConcreteRegister(LinkRegister)
const Register CODE_REG
@ OVERFLOW
@ NOT_ZERO
@ NOT_EQUAL
const Register TMP2
@ kNoRegister
Definition: constants_arm.h:99
@ kNoRegister2
LoadStoreRegPairOp
ConditionalBranchOp
@ ConditionalBranchFixed
@ ConditionalBranchMask
const Register TMP
@ kRdShift
@ kRnShift
@ kShiftTypeShift
@ kRmShift
@ kRsShift
@ kExtendTypeShift
const intptr_t cid
const Register HEAP_BITS
constexpr intptr_t kQuadSize
Definition: globals.h:458
const int kWRegSizeInBits
constexpr intptr_t kFloatSize
Definition: globals.h:457
const Register PP
ConditionalSelectOp
constexpr intptr_t kDoubleSize
Definition: globals.h:456
@ kSmiTagSize
@ kHeapObjectTag
UnconditionalBranchRegOp
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
UnconditionalBranchOp
@ UnconditionalBranchMask
@ UnconditionalBranchFixed
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
dst
Definition: cp.py:12
dest
Definition: zip.py:79
int32_t width
const Scalar scale
SeparatedVector2 offset