6#if defined(TARGET_ARCH_ARM64)
8#define SHOULD_NOT_INCLUDE_RUNTIME
22DEFINE_FLAG(
bool, use_far_branches,
false,
"Always use far branches");
37 case kUnboxedFloat32x4:
38 case kUnboxedFloat64x2:
46 intptr_t far_branch_level)
47 : AssemblerBase(object_pool_builder),
48 use_far_branches_(far_branch_level != 0),
49 constant_pool_allowed_(
false) {
50 generate_invoke_write_barrier_wrapper_ = [&](
Register reg) {
52 target::Thread::write_barrier_wrappers_thread_offset(reg)));
54 generate_invoke_array_write_barrier_ = [&]() {
56 Address(THR, target::Thread::array_write_barrier_entry_point_offset()));
60void Assembler::Emit(int32_t value) {
61 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
62 buffer_.Emit<int32_t>(
value);
65void Assembler::Emit64(int64_t value) {
66 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
67 buffer_.Emit<int64_t>(
value);
70int32_t Assembler::BindImm26Branch(int64_t position, int64_t dest) {
71 ASSERT(CanEncodeImm26BranchOffset(dest));
72 const int32_t
next = buffer_.Load<int32_t>(position);
73 const int32_t encoded = EncodeImm26BranchOffset(dest,
next);
74 buffer_.Store<int32_t>(position, encoded);
75 return DecodeImm26BranchOffset(
next);
78int32_t Assembler::BindImm19Branch(int64_t position, int64_t dest) {
79 if (use_far_branches() && !CanEncodeImm19BranchOffset(dest)) {
84 const int32_t guard_branch =
85 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
88 const int32_t far_branch =
89 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
90 const Condition c = DecodeImm19BranchCondition(guard_branch);
94 const int32_t
next = DecodeImm26BranchOffset(far_branch);
98 const int64_t
offset =
dest - Instr::kInstrSize;
101 const int32_t encoded_branch = EncodeImm26BranchOffset(
offset, far_branch);
104 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, encoded_branch);
106 }
else if (use_far_branches() && CanEncodeImm19BranchOffset(dest)) {
111 const int32_t guard_branch =
112 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
115 const int32_t far_branch =
116 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
119 const int32_t
next = DecodeImm26BranchOffset(far_branch);
122 int32_t encoded_guard_branch = EncodeImm19BranchOffset(dest, guard_branch);
123 const Condition c = DecodeImm19BranchCondition(encoded_guard_branch);
124 encoded_guard_branch =
125 EncodeImm19BranchCondition(InvertCondition(c), encoded_guard_branch);
128 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize,
129 encoded_guard_branch);
130 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize,
131 Instr::kNopInstruction);
134 const int32_t
next = buffer_.Load<int32_t>(position);
135 const int32_t encoded = EncodeImm19BranchOffset(dest,
next);
136 buffer_.Store<int32_t>(position, encoded);
137 return DecodeImm19BranchOffset(
next);
141int32_t Assembler::BindImm14Branch(int64_t position, int64_t dest) {
142 if (use_far_branches() && !CanEncodeImm14BranchOffset(dest)) {
147 const int32_t guard_branch =
148 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
151 const int32_t far_branch =
152 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
153 const Condition c = DecodeImm14BranchCondition(guard_branch);
157 const int32_t
next = DecodeImm26BranchOffset(far_branch);
161 const int64_t
offset =
dest - Instr::kInstrSize;
164 const int32_t encoded_branch = EncodeImm26BranchOffset(
offset, far_branch);
167 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, encoded_branch);
169 }
else if (use_far_branches() && CanEncodeImm14BranchOffset(dest)) {
174 const int32_t guard_branch =
175 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
178 const int32_t far_branch =
179 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
182 const int32_t
next = DecodeImm26BranchOffset(far_branch);
185 int32_t encoded_guard_branch = EncodeImm14BranchOffset(dest, guard_branch);
186 const Condition c = DecodeImm14BranchCondition(encoded_guard_branch);
187 encoded_guard_branch =
188 EncodeImm14BranchCondition(InvertCondition(c), encoded_guard_branch);
191 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize,
192 encoded_guard_branch);
193 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize,
194 Instr::kNopInstruction);
197 const int32_t
next = buffer_.Load<int32_t>(position);
198 const int32_t encoded = EncodeImm14BranchOffset(dest,
next);
199 buffer_.Store<int32_t>(position, encoded);
200 return DecodeImm14BranchOffset(
next);
204void Assembler::ExtendValue(Register rd, Register rn, OperandSize sz) {
207 if (rd == rn)
return;
230void Assembler::ExtendAndSmiTagValue(Register rd, Register rn, OperandSize sz) {
233 return sbfm(rd, rn, kBFMTagRotate, target::kSmiBits + 1);
235 return ubfm(rd, rn, kBFMTagRotate, kBitsPerInt32 - 1);
237 return sbfm(rd, rn, kBFMTagRotate, kBitsPerInt32 - 1);
239 return ubfm(rd, rn, kBFMTagRotate, kBitsPerInt16 - 1);
241 return sbfm(rd, rn, kBFMTagRotate, kBitsPerInt16 - 1);
243 return ubfm(rd, rn, kBFMTagRotate, kBitsPerInt8 - 1);
245 return sbfm(rd, rn, kBFMTagRotate, kBitsPerInt8 - 1);
252void Assembler::Bind(Label* label) {
253 ASSERT(!label->IsBound());
254 const intptr_t bound_pc = buffer_.Size();
256 while (label->IsLinked()) {
257 const int64_t position = label->Position();
258 const int64_t
dest = bound_pc - position;
259 const int32_t instr = buffer_.Load<int32_t>(position);
260 if (IsTestAndBranch(instr)) {
261 label->position_ = BindImm14Branch(position, dest);
262 }
else if (IsConditionalBranch(instr) || IsCompareAndBranch(instr)) {
263 label->position_ = BindImm19Branch(position, dest);
264 }
else if (IsUnconditionalBranch(instr)) {
265 label->position_ = BindImm26Branch(position, dest);
270 label->BindTo(bound_pc, lr_state());
273void Assembler::Align(intptr_t alignment, intptr_t
offset) {
274 ASSERT(Utils::IsPowerOfTwo(alignment));
275 intptr_t
pos =
offset + buffer_.GetPosition();
276 intptr_t mod =
pos & (alignment - 1);
280 intptr_t bytes_needed = alignment - mod;
281 ASSERT((bytes_needed % Instr::kInstrSize) == 0);
282 while (bytes_needed > 0) {
284 bytes_needed -= Instr::kInstrSize;
286 ASSERT(((
offset + buffer_.GetPosition()) & (alignment - 1)) == 0);
289#if defined(TARGET_USES_THREAD_SANITIZER)
290void Assembler::TsanLoadAcquire(Register addr) {
291 LeafRuntimeScope rt(
this, 0,
true);
292 MoveRegister(R0, addr);
293 rt.Call(kTsanLoadAcquireRuntimeEntry, 1);
296void Assembler::TsanStoreRelease(Register addr) {
297 LeafRuntimeScope rt(
this, 0,
true);
298 MoveRegister(R0, addr);
299 rt.Call(kTsanStoreReleaseRuntimeEntry, 1);
303static int CountLeadingZeros(uint64_t value,
int width) {
304 if (
width == 64)
return Utils::CountLeadingZeros64(value);
305 if (
width == 32)
return Utils::CountLeadingZeros32(value);
310static int CountOneBits(uint64_t value,
int width) {
312 value &= (0xffffffffffffffffULL >> (64 -
width));
314 value = ((
value >> 1) & 0x5555555555555555) + (
value & 0x5555555555555555);
315 value = ((
value >> 2) & 0x3333333333333333) + (
value & 0x3333333333333333);
316 value = ((
value >> 4) & 0x0f0f0f0f0f0f0f0f) + (
value & 0x0f0f0f0f0f0f0f0f);
317 value = ((
value >> 8) & 0x00ff00ff00ff00ff) + (
value & 0x00ff00ff00ff00ff);
318 value = ((
value >> 16) & 0x0000ffff0000ffff) + (
value & 0x0000ffff0000ffff);
319 value = ((
value >> 32) & 0x00000000ffffffff) + (
value & 0x00000000ffffffff);
331bool Operand::IsImmLogical(uint64_t value, uint8_t
width, Operand* imm_op) {
332 ASSERT(imm_op !=
nullptr);
334 if (
width == kWRegSizeInBits) {
335 value &= 0xffffffffUL;
361 if ((value == 0) || (value == 0xffffffffffffffffULL) ||
362 ((
width == kWRegSizeInBits) && (value == 0xffffffff))) {
366 int lead_zero = CountLeadingZeros(value,
width);
367 int lead_one = CountLeadingZeros(~value,
width);
368 int trail_zero = Utils::CountTrailingZerosWord(value);
369 int trail_one = Utils::CountTrailingZerosWord(~value);
370 int set_bits = CountOneBits(value,
width);
377 int imm_s_mask = 0x3F;
384 imm_r = (
value & 3) - 1;
385 *imm_op = Operand(n, imm_s, imm_r);
389 n = (
width == 64) ? 1 : 0;
390 imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
391 if ((lead_zero + set_bits) ==
width) {
394 imm_r = (lead_zero > 0) ? (
width - trail_zero) : lead_one;
399 if (lead_zero + trail_zero + set_bits ==
width) {
400 *imm_op = Operand(n, imm_s, imm_r);
406 if (lead_one + trail_one + (
width - set_bits) ==
width) {
407 *imm_op = Operand(n, imm_s, imm_r);
414 uint64_t mask = (1ULL << (
width >> 1)) - 1;
415 if ((value & mask) == ((
value >> (
width >> 1)) & mask)) {
427void Assembler::LoadPoolPointer(Register pp) {
429 ldr(pp, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
437 sub(pp, pp, Operand(kHeapObjectTag));
438 set_constant_pool_allowed(pp == PP);
441void Assembler::LoadWordFromPoolIndex(Register dst,
444 ASSERT((pp != PP) || constant_pool_allowed());
448 const uint32_t
offset = target::ObjectPool::element_offset(index);
449 const uint32_t upper20 =
offset & 0xfffff000;
450 if (Address::CanHoldOffset(
offset)) {
451 ldr(dst, Address(pp,
offset));
452 }
else if (Operand::CanHold(upper20, kXRegSizeInBits, &op) ==
453 Operand::Immediate) {
454 const uint32_t lower12 =
offset & 0x00000fff;
455 ASSERT(Address::CanHoldOffset(lower12));
457 ldr(dst, Address(dst, lower12));
459 const uint16_t offset_low = Utils::Low16Bits(
offset);
460 const uint16_t offset_high = Utils::High16Bits(
offset);
461 movz(dst, Immediate(offset_low), 0);
462 movk(dst, Immediate(offset_high), 1);
463 ldr(dst, Address(pp, dst));
467void Assembler::StoreWordToPoolIndex(Register src,
470 ASSERT((pp != PP) || constant_pool_allowed());
474 const uint32_t
offset = target::ObjectPool::element_offset(index);
475 const uint32_t upper20 =
offset & 0xfffff000;
476 if (Address::CanHoldOffset(
offset)) {
477 str(src, Address(pp,
offset));
478 }
else if (Operand::CanHold(upper20, kXRegSizeInBits, &op) ==
479 Operand::Immediate) {
480 const uint32_t lower12 =
offset & 0x00000fff;
481 ASSERT(Address::CanHoldOffset(lower12));
483 str(src, Address(TMP, lower12));
485 const uint16_t offset_low = Utils::Low16Bits(
offset);
486 const uint16_t offset_high = Utils::High16Bits(
offset);
487 movz(TMP, Immediate(offset_low), 0);
488 movk(TMP, Immediate(offset_high), 1);
489 str(src, Address(pp, TMP));
493void Assembler::LoadDoubleWordFromPoolIndex(Register lower,
498 ASSERT(constant_pool_allowed());
499 ASSERT(lower != PP && upper != PP);
503 const uint32_t
offset = target::ObjectPool::element_offset(index);
505 const uint32_t upper20 =
offset & 0xfffff000;
506 const uint32_t lower12 =
offset & 0x00000fff;
507 if (Address::CanHoldOffset(
offset, Address::PairOffset)) {
508 ldp(lower, upper, Address(PP,
offset, Address::PairOffset));
509 }
else if (Operand::CanHold(
offset, kXRegSizeInBits, &op) ==
510 Operand::Immediate) {
512 ldp(lower, upper, Address(TMP, 0, Address::PairOffset));
513 }
else if (Operand::CanHold(upper20, kXRegSizeInBits, &op) ==
514 Operand::Immediate &&
515 Address::CanHoldOffset(lower12, Address::PairOffset)) {
517 ldp(lower, upper, Address(TMP, lower12, Address::PairOffset));
519 const uint32_t lower12 =
offset & 0xfff;
520 const uint32_t higher12 =
offset & 0xfff000;
522 Operand op_high, op_low;
523 bool ok = Operand::CanHold(higher12, kXRegSizeInBits, &op_high) ==
524 Operand::Immediate &&
525 Operand::CanHold(lower12, kXRegSizeInBits, &op_low) ==
529 add(TMP, PP, op_high);
530 add(TMP, TMP, op_low);
531 ldp(lower, upper, Address(TMP, 0, Address::PairOffset));
535bool Assembler::CanLoadFromObjectPool(
const Object&
object)
const {
537 if (!constant_pool_allowed()) {
546void Assembler::LoadNativeEntry(
548 const ExternalLabel* label,
549 ObjectPoolBuilderEntry::Patchability patchable) {
550 const intptr_t index =
551 object_pool_builder().FindNativeFunction(label, patchable);
552 LoadWordFromPoolIndex(dst, index);
555void Assembler::LoadIsolate(Register dst) {
556 ldr(dst, Address(THR, target::Thread::isolate_offset()));
559void Assembler::LoadIsolateGroup(Register rd) {
560 ldr(rd, Address(THR, target::Thread::isolate_group_offset()));
563void Assembler::LoadObjectHelper(Register dst,
564 const Object&
object,
574 if (
IsSameObject(CastHandle<Object>(compiler::TrueObject()),
object)) {
575 AddImmediate(dst, NULL_REG, kTrueOffsetFromNull);
578 if (
IsSameObject(CastHandle<Object>(compiler::FalseObject()),
object)) {
579 AddImmediate(dst, NULL_REG, kFalseOffsetFromNull);
583 if (target::CanLoadFromThread(
object, &
offset)) {
584 ldr(dst, Address(THR,
offset));
587 if (target::IsSmi(
object)) {
588 LoadImmediate(dst, target::ToRawSmi(
object));
593 const intptr_t index =
594 is_unique ? object_pool_builder().AddObject(
595 object, ObjectPoolBuilderEntry::kPatchable)
596 : object_pool_builder().FindObject(
597 object, ObjectPoolBuilderEntry::kNotPatchable);
598 LoadWordFromPoolIndex(dst, index);
601void Assembler::LoadObject(Register dst,
const Object&
object) {
602 LoadObjectHelper(dst,
object,
false);
605void Assembler::LoadUniqueObject(Register dst,
const Object&
object) {
606 LoadObjectHelper(dst,
object,
true);
609void Assembler::LoadFromStack(Register dst, intptr_t depth) {
611 LoadFromOffset(dst, SPREG, depth * target::kWordSize);
614void Assembler::StoreToStack(Register src, intptr_t depth) {
616 StoreToOffset(src, SPREG, depth * target::kWordSize);
619void Assembler::CompareToStack(Register src, intptr_t depth) {
620 LoadFromStack(TMP, depth);
621 CompareRegisters(src, TMP);
624void Assembler::CompareObject(Register reg,
const Object&
object) {
627 CompareObjectRegisters(reg, NULL_REG);
628 }
else if (target::IsSmi(
object)) {
629 CompareImmediate(reg, target::ToRawSmi(
object), kObjectBytes);
631 LoadObject(TMP,
object);
632 CompareObjectRegisters(reg, TMP);
636void Assembler::LoadImmediate(Register reg, int64_t imm) {
639 movz(reg, Immediate(0), 0);
645 Operand::OperandType ot;
646 ot = Operand::CanHold(imm, kXRegSizeInBits, &op);
647 if (ot == Operand::BitfieldImm) {
648 orri(reg, ZR, Immediate(imm));
653 const uint32_t w0 = Utils::Low32Bits(imm);
654 const uint32_t w1 = Utils::High32Bits(imm);
655 const uint16_t h0 = Utils::Low16Bits(w0);
656 const uint16_t h1 = Utils::High16Bits(w0);
657 const uint16_t h2 = Utils::Low16Bits(w1);
658 const uint16_t h3 = Utils::High16Bits(w1);
661 if (w1 == 0xffffffff) {
663 movn(reg, Immediate(~h0), 0);
665 movn(reg, Immediate(~h1), 1);
666 movk(reg, Immediate(h0), 0);
674 movn(reg, Immediate(~h2), 2);
676 movk(reg, Immediate(h1), 1);
679 movk(reg, Immediate(h0), 0);
685 if ((w1 != 0) && constant_pool_allowed()) {
686 const intptr_t index = object_pool_builder().FindImmediate(imm);
687 LoadWordFromPoolIndex(reg, index);
691 bool initialized =
false;
693 movz(reg, Immediate(h0), 0);
698 movk(reg, Immediate(h1), 1);
700 movz(reg, Immediate(h1), 1);
706 movk(reg, Immediate(h2), 2);
708 movz(reg, Immediate(h2), 2);
714 movk(reg, Immediate(h3), 3);
716 movz(reg, Immediate(h3), 3);
721void Assembler::LoadSImmediate(VRegister vd,
float imms) {
722 int32_t imm32 = bit_cast<int32_t, float>(imms);
725 }
else if (constant_pool_allowed()) {
726 intptr_t index = object_pool_builder().FindImmediate(imm32);
727 intptr_t
offset = target::ObjectPool::element_offset(index);
728 LoadSFromOffset(vd, PP,
offset);
730 LoadImmediate(TMP, imm32);
735void Assembler::LoadDImmediate(VRegister vd,
double immd) {
736 if (fmovdi(vd, immd))
return;
738 int64_t imm64 = bit_cast<int64_t, double>(immd);
741 }
else if (constant_pool_allowed()) {
742 intptr_t index = object_pool_builder().FindImmediate64(imm64);
743 intptr_t
offset = target::ObjectPool::element_offset(index);
744 LoadDFromOffset(vd, PP,
offset);
746 LoadImmediate(TMP, imm64);
751void Assembler::LoadQImmediate(VRegister vd, simd128_value_t immq) {
752 ASSERT(constant_pool_allowed());
753 intptr_t index = object_pool_builder().FindImmediate128(immq);
754 intptr_t
offset = target::ObjectPool::element_offset(index);
755 LoadQFromOffset(vd, PP,
offset);
758void Assembler::BranchLink(intptr_t target_code_pool_index,
759 CodeEntryKind entry_kind) {
767 LoadWordFromPoolIndex(code_reg, target_code_pool_index);
768 Call(FieldAddress(code_reg, target::Code::entry_point_offset(entry_kind)));
772void Assembler::BranchLink(
774 ObjectPoolBuilderEntry::Patchability patchable,
775 CodeEntryKind entry_kind,
776 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
777 const intptr_t index = object_pool_builder().FindObject(
779 BranchLink(index, entry_kind);
782void Assembler::BranchLinkWithEquivalence(
const Code&
target,
783 const Object& equivalence,
784 CodeEntryKind entry_kind) {
785 const intptr_t index =
787 BranchLink(index, entry_kind);
790void Assembler::AddImmediate(Register dest,
794 ASSERT(sz == kEightBytes || sz == kFourBytes);
803 if (Operand::CanHold(imm,
width, &op) == Operand::Immediate) {
804 add(dest, rn, op, sz);
805 }
else if (Operand::CanHold(-
static_cast<uint64_t
>(imm),
width, &op) ==
806 Operand::Immediate) {
807 sub(dest, rn, op, sz);
811 LoadImmediate(TMP2, imm);
812 add(dest, rn, Operand(TMP2), sz);
816void Assembler::AddImmediateSetFlags(Register dest,
820 ASSERT(sz == kEightBytes || sz == kFourBytes);
823 if (Operand::CanHold(imm,
width, &op) == Operand::Immediate) {
825 adds(dest, rn, op, sz);
826 }
else if (Operand::CanHold(-
static_cast<uint64_t
>(imm),
width, &op) ==
827 Operand::Immediate) {
829 subs(dest, rn, op, sz);
833 LoadImmediate(TMP2, imm);
834 adds(dest, rn, Operand(TMP2), sz);
838void Assembler::SubImmediateSetFlags(Register dest,
842 ASSERT(sz == kEightBytes || sz == kFourBytes);
845 if (Operand::CanHold(imm,
width, &op) == Operand::Immediate) {
847 subs(dest, rn, op, sz);
848 }
else if (Operand::CanHold(-
static_cast<uint64_t
>(imm),
width, &op) ==
849 Operand::Immediate) {
851 adds(dest, rn, op, sz);
855 LoadImmediate(TMP2, imm);
856 subs(dest, rn, Operand(TMP2), sz);
860void Assembler::AndImmediate(Register rd,
864 ASSERT(sz == kEightBytes || sz == kFourBytes);
867 if (Operand::IsImmLogical(imm,
width, &imm_op)) {
868 andi(rd, rn, Immediate(imm), sz);
870 LoadImmediate(TMP, imm);
871 and_(rd, rn, Operand(TMP), sz);
875void Assembler::OrImmediate(Register rd,
879 ASSERT(sz == kEightBytes || sz == kFourBytes);
882 if (Operand::IsImmLogical(imm,
width, &imm_op)) {
883 orri(rd, rn, Immediate(imm), sz);
885 LoadImmediate(TMP, imm);
886 orr(rd, rn, Operand(TMP), sz);
890void Assembler::XorImmediate(Register rd,
894 ASSERT(sz == kEightBytes || sz == kFourBytes);
897 if (Operand::IsImmLogical(imm,
width, &imm_op)) {
898 eori(rd, rn, Immediate(imm), sz);
900 LoadImmediate(TMP, imm);
901 eor(rd, rn, Operand(TMP), sz);
905void Assembler::TestImmediate(Register rn, int64_t imm, OperandSize sz) {
906 ASSERT(sz == kEightBytes || sz == kFourBytes);
909 if (Operand::IsImmLogical(imm,
width, &imm_op)) {
910 tsti(rn, Immediate(imm), sz);
912 LoadImmediate(TMP, imm);
913 tst(rn, Operand(TMP), sz);
917void Assembler::CompareImmediate(Register rn, int64_t imm, OperandSize sz) {
918 ASSERT(sz == kEightBytes || sz == kFourBytes);
921 if (Operand::CanHold(imm,
width, &op) == Operand::Immediate) {
923 }
else if (Operand::CanHold(-
static_cast<uint64_t
>(imm),
width, &op) ==
924 Operand::Immediate) {
928 LoadImmediate(TMP2, imm);
929 cmp(rn, Operand(TMP2), sz);
933Address Assembler::PrepareLargeOffset(Register
base,
936 Address::AddressType addr_type) {
937 ASSERT(addr_type == Address::AddressType::Offset ||
938 addr_type == Address::AddressType::PairOffset);
939 if (Address::CanHoldOffset(
offset, addr_type, sz)) {
944 const uint32_t upper20 =
offset & 0xfffff000;
945 const uint32_t lower12 =
offset & 0x00000fff;
947 (Operand::CanHold(upper20, kXRegSizeInBits, &op) == Operand::Immediate) &&
948 Address::CanHoldOffset(lower12, addr_type, sz)) {
950 return Address(TMP2, lower12, addr_type);
952 LoadImmediate(TMP2,
offset);
953 if (addr_type == Address::AddressType::Offset) {
954 return Address(
base, TMP2);
956 add(TMP2, TMP2, Operand(
base));
957 return Address(TMP2, 0, Address::AddressType::PairOffset);
961void Assembler::Load(Register dst,
const Address& addr, OperandSize sz) {
962 if (
addr.type() == Address::AddressType::Offset ||
963 addr.type() == Address::AddressType::PairOffset) {
964 ldr(dst, PrepareLargeOffset(
addr.base(),
addr.offset(), sz,
addr.type()),
972void Assembler::LoadSFromOffset(VRegister dest, Register
base, int32_t
offset) {
973 auto const type = Address::AddressType::Offset;
977void Assembler::LoadDFromOffset(VRegister dest, Register
base, int32_t
offset) {
978 auto const type = Address::AddressType::Offset;
982void Assembler::LoadQFromOffset(VRegister dest, Register
base, int32_t
offset) {
983 auto const type = Address::AddressType::Offset;
987void Assembler::Store(Register src,
const Address& addr, OperandSize sz) {
988 if (
addr.type() == Address::AddressType::Offset ||
989 addr.type() == Address::AddressType::PairOffset) {
990 str(src, PrepareLargeOffset(
addr.base(),
addr.offset(), sz,
addr.type()),
998void Assembler::StorePairToOffset(Register low,
1003 auto const type = Address::AddressType::PairOffset;
1007void Assembler::StoreSToOffset(VRegister src, Register
base, int32_t
offset) {
1008 auto const type = Address::AddressType::Offset;
1012void Assembler::StoreDToOffset(VRegister src, Register
base, int32_t
offset) {
1013 auto const type = Address::AddressType::Offset;
1017void Assembler::StoreQToOffset(VRegister src, Register
base, int32_t
offset) {
1018 auto const type = Address::AddressType::Offset;
1022void Assembler::VRecps(VRegister vd, VRegister vn) {
1029 vrecpss(VTMP, vn, vd);
1030 vmuls(vd, vd, VTMP);
1031 vrecpss(VTMP, vn, vd);
1032 vmuls(vd, vd, VTMP);
1035void Assembler::VRSqrts(VRegister vd, VRegister vn) {
1043 vmuls(VTMP, vd, vd);
1044 vrsqrtss(VTMP, vn, VTMP);
1045 vmuls(vd, vd, VTMP);
1047 vmuls(VTMP, vd, vd);
1048 vrsqrtss(VTMP, vn, VTMP);
1049 vmuls(vd, vd, VTMP);
1052#if defined(DART_COMPRESSED_POINTERS)
1053void Assembler::LoadCompressed(Register dest,
const Address& slot) {
1054 Load(dest, slot, kUnsignedFourBytes);
1055 add(dest, dest, Operand(HEAP_BITS, LSL, 32));
1059void Assembler::StoreBarrier(Register
object,
1061 CanBeSmi can_be_smi,
1063 const bool spill_lr = lr_state().LRContainsReturnAddress();
1066 ASSERT(
object != scratch);
1067 ASSERT(value != scratch);
1074 ASSERT(scratch != kNoRegister);
1084 if (can_be_smi == kValueCanBeSmi) {
1085 BranchIfSmi(value, &
done);
1089 BranchIfNotSmi(value, &passed_check, kNearJump);
1091 Bind(&passed_check);
1094 ldr(scratch, FieldAddress(
object, target::Object::tags_offset()),
1096 ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
1098 Operand(scratch, LSR, target::UntaggedObject::kBarrierOverlapShift));
1099 tst(scratch, Operand(HEAP_BITS, LSR, 32));
1103 SPILLS_LR_TO_FRAME(Push(
LR));
1106 if (value != kWriteBarrierValueReg) {
1109 if (
object != kWriteBarrierValueReg) {
1110 Push(kWriteBarrierValueReg);
1114 objectForCall = (
value ==
R2) ? R3 :
R2;
1115 PushPair(kWriteBarrierValueReg, objectForCall);
1116 mov(objectForCall,
object);
1118 mov(kWriteBarrierValueReg, value);
1121 generate_invoke_write_barrier_wrapper_(objectForCall);
1123 if (value != kWriteBarrierValueReg) {
1124 if (
object != kWriteBarrierValueReg) {
1125 Pop(kWriteBarrierValueReg);
1127 PopPair(kWriteBarrierValueReg, objectForCall);
1131 RESTORES_LR_FROM_FRAME(Pop(
LR));
1136void Assembler::ArrayStoreBarrier(Register
object,
1139 CanBeSmi can_be_smi,
1141 const bool spill_lr = lr_state().LRContainsReturnAddress();
1144 ASSERT(
object != scratch);
1147 ASSERT(value != scratch);
1156 ASSERT(scratch != kNoRegister);
1166 if (can_be_smi == kValueCanBeSmi) {
1167 BranchIfSmi(value, &
done);
1171 BranchIfNotSmi(value, &passed_check, kNearJump);
1173 Bind(&passed_check);
1176 ldr(scratch, FieldAddress(
object, target::Object::tags_offset()),
1178 ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
1180 Operand(scratch, LSR, target::UntaggedObject::kBarrierOverlapShift));
1181 tst(scratch, Operand(HEAP_BITS, LSR, 32));
1184 SPILLS_LR_TO_FRAME(Push(
LR));
1186 if ((
object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
1187 (slot != kWriteBarrierSlotReg)) {
1193 generate_invoke_array_write_barrier_();
1195 RESTORES_LR_FROM_FRAME(Pop(
LR));
1200void Assembler::StoreObjectIntoObjectNoBarrier(Register
object,
1201 const Address& address,
1202 const Object& value,
1203 MemoryOrder memory_order,
1210 }
else if (target::IsSmi(value) && (target::ToRawSmi(value) == 0)) {
1217 LoadObject(src, value);
1219 if (memory_order == kRelease) {
1220 StoreRelease(src, address, size);
1222 Store(src, address, size);
1226void Assembler::VerifyStoreNeedsNoWriteBarrier(Register
object,
1234 BranchIfSmi(value, &
done, kNearJump);
1235 ldr(TMP, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
1236 tbz(&
done, TMP, target::UntaggedObject::kNewBit);
1237 ldr(TMP, FieldAddress(
object, target::Object::tags_offset()), kUnsignedByte);
1238 tbz(&
done, TMP, target::UntaggedObject::kOldAndNotRememberedBit);
1239 Stop(
"Write barrier is required");
1243void Assembler::StoreInternalPointer(Register
object,
1244 const Address& dest,
1249void Assembler::ExtractClassIdFromTags(Register
result, Register tags) {
1250 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
1251 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
1252 ubfx(
result, tags, target::UntaggedObject::kClassIdTagPos,
1253 target::UntaggedObject::kClassIdTagSize);
1256void Assembler::ExtractInstanceSizeFromTags(Register
result, Register tags) {
1257 ASSERT(target::UntaggedObject::kSizeTagPos == 8);
1258 ASSERT(target::UntaggedObject::kSizeTagSize == 4);
1259 ubfx(
result, tags, target::UntaggedObject::kSizeTagPos,
1260 target::UntaggedObject::kSizeTagSize);
1261 LslImmediate(
result,
result, target::ObjectAlignment::kObjectAlignmentLog2);
1264void Assembler::LoadClassId(Register
result, Register
object) {
1265 ldr(
result, FieldAddress(
object, target::Object::tags_offset()));
1269void Assembler::LoadClassById(Register
result, Register class_id) {
1272 const intptr_t table_offset =
1273 target::IsolateGroup::cached_class_table_table_offset();
1275 LoadIsolateGroup(
result);
1277 ldr(
result, Address(
result, class_id, UXTX, Address::Scaled));
1280void Assembler::CompareClassId(Register
object,
1283 LoadClassId(TMP,
object);
1284 CompareImmediate(TMP, class_id);
1287void Assembler::LoadClassIdMayBeSmi(Register
result, Register
object) {
1290 LoadImmediate(
result, kSmiCid);
1291 BranchIfSmi(
object, &
done);
1292 LoadClassId(
result,
object);
1296void Assembler::LoadTaggedClassIdMayBeSmi(Register
result, Register
object) {
1298 LoadClassIdMayBeSmi(TMP,
object);
1302 LoadImmediate(
result, target::ToRawSmi(kSmiCid));
1303 BranchIfSmi(
object, &
done);
1304 LoadClassId(
result,
object);
1310void Assembler::EnsureHasClassIdInDEBUG(intptr_t cid,
1315 Comment(
"Check that object in register has cid %" Pd "", cid);
1317 LoadClassIdMayBeSmi(scratch, src);
1318 CompareImmediate(scratch, cid);
1319 BranchIf(
EQUAL, &matches, Assembler::kNearJump);
1321 CompareImmediate(scratch, kNullCid);
1322 BranchIf(
EQUAL, &matches, Assembler::kNearJump);
1330void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
1333 if (frame_space != 0) {
1334 AddImmediate(SP, -frame_space);
1336 if (OS::ActivationFrameAlignment() > 1) {
1337 andi(SP, SP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
1341void Assembler::EmitEntryFrameVerification() {
1344 ASSERT(!constant_pool_allowed());
1345 LoadImmediate(TMP, target::frame_layout.exit_link_slot_from_entry_fp *
1347 add(TMP, TMP, Operand(FPREG));
1348 cmp(TMP, Operand(SPREG));
1357void Assembler::RestoreCodePointer() {
1359 Address(FP, target::frame_layout.code_from_fp * target::kWordSize));
1363void Assembler::RestorePinnedRegisters() {
1365 compiler::Address(THR, target::Thread::write_barrier_mask_offset()));
1366 LslImmediate(HEAP_BITS, HEAP_BITS, 32);
1367 ldr(NULL_REG, compiler::Address(THR, target::Thread::object_null_offset()));
1368#if defined(DART_COMPRESSED_POINTERS)
1369 ldr(TMP, compiler::Address(THR, target::Thread::heap_base_offset()));
1370 orr(HEAP_BITS, HEAP_BITS, Operand(TMP, LSR, 32));
1374void Assembler::SetupGlobalPoolAndDispatchTable() {
1375 ASSERT(FLAG_precompiled_mode);
1376 ldr(PP, Address(THR, target::Thread::global_object_pool_offset()));
1377 sub(PP, PP, Operand(kHeapObjectTag));
1378 ldr(DISPATCH_TABLE_REG,
1379 Address(THR, target::Thread::dispatch_table_array_offset()));
1382void Assembler::CheckCodePointer() {
1384 if (!FLAG_check_code_pointer) {
1387 Comment(
"CheckCodePointer");
1388 Label cid_ok, instructions_ok;
1390 CompareClassId(CODE_REG, kCodeCid);
1395 const intptr_t entry_offset =
1396 CodeSize() + target::Instructions::HeaderSize() -
kHeapObjectTag;
1397 adr(R0, Immediate(-entry_offset));
1398 ldr(TMP, FieldAddress(CODE_REG, target::Code::instructions_offset()));
1399 cmp(R0, Operand(TMP));
1400 b(&instructions_ok, EQ);
1402 Bind(&instructions_ok);
1417void Assembler::SetupDartSP(intptr_t reserve ) {
1420 AddImmediate(CSP, CSP, -Utils::RoundUp(reserve, 16));
1423void Assembler::SetupCSPFromThread(Register thr) {
1429 ldr(TMP, Address(thr, target::Thread::saved_stack_limit_offset()));
1430 AddImmediate(CSP, TMP, -4096);
1437void Assembler::RestoreCSP() {
1441void Assembler::SetReturnAddress(Register value) {
1442 RESTORES_RETURN_ADDRESS_FROM_REGISTER_TO_LR(MoveRegister(
LR, value));
1445void Assembler::ArithmeticShiftRightImmediate(Register reg, intptr_t shift) {
1446 AsrImmediate(reg, reg, shift);
1449void Assembler::CompareWords(Register reg1,
1457 AddImmediate(reg1,
offset - kHeapObjectTag);
1458 AddImmediate(reg2,
offset - kHeapObjectTag);
1462 BranchIfZero(
count,
equals, Assembler::kNearJump);
1463 AddImmediate(
count, -1);
1464 ldr(temp, Address(reg1, 8, Address::PostIndex));
1465 ldr(TMP, Address(reg2, 8, Address::PostIndex));
1466 cmp(temp, Operand(TMP));
1467 BranchIf(
EQUAL, &loop, Assembler::kNearJump);
1470void Assembler::EnterFrame(intptr_t frame_size) {
1471 SPILLS_LR_TO_FRAME(PushPair(FP,
LR));
1474 if (frame_size > 0) {
1475 sub(SP, SP, Operand(frame_size));
1479void Assembler::LeaveFrame() {
1481 RESTORES_LR_FROM_FRAME(PopPair(FP,
LR));
1484void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) {
1485 ASSERT(!constant_pool_allowed());
1489 if (!FLAG_precompiled_mode) {
1490 TagAndPushPPAndPcMarker();
1493 if (new_pp == kNoRegister) {
1499 set_constant_pool_allowed(
true);
1502 if (frame_size > 0) {
1503 AddImmediate(SP, -frame_size);
1512void Assembler::EnterOsrFrame(intptr_t extra_size, Register new_pp) {
1513 ASSERT(!constant_pool_allowed());
1514 Comment(
"EnterOsrFrame");
1515 RestoreCodePointer();
1518 if (extra_size > 0) {
1519 AddImmediate(SP, -extra_size);
1523void Assembler::LeaveDartFrame() {
1524 if (!FLAG_precompiled_mode) {
1528 target::frame_layout.saved_caller_pp_from_fp * target::kWordSize);
1529 sub(PP, PP, Operand(kHeapObjectTag));
1531 set_constant_pool_allowed(
false);
1535void Assembler::EnterFullSafepoint(Register
state) {
1544 Label slow_path,
done, retry;
1549 movz(addr, Immediate(target::Thread::safepoint_state_offset()), 0);
1550 add(addr, THR, Operand(addr));
1553 cmp(
state, Operand(target::Thread::full_safepoint_state_unacquired()));
1556 movz(
state, Immediate(target::Thread::full_safepoint_state_acquired()), 0);
1557 stxr(TMP,
state, addr);
1565 ldr(addr, Address(THR, target::Thread::enter_safepoint_stub_offset()));
1566 ldr(addr, FieldAddress(addr, target::Code::entry_point_offset()));
1572void Assembler::TransitionGeneratedToNative(Register destination,
1573 Register new_exit_frame,
1574 Register new_exit_through_ffi,
1575 bool enter_safepoint) {
1577 StoreToOffset(new_exit_frame, THR,
1578 target::Thread::top_exit_frame_info_offset());
1580 StoreToOffset(new_exit_through_ffi, THR,
1581 target::Thread::exit_through_ffi_offset());
1582 Register tmp = new_exit_through_ffi;
1585 StoreToOffset(destination, THR, target::Thread::vm_tag_offset());
1586 LoadImmediate(tmp, target::Thread::native_execution_state());
1587 StoreToOffset(tmp, THR, target::Thread::execution_state_offset());
1589 if (enter_safepoint) {
1590 EnterFullSafepoint(tmp);
1594void Assembler::ExitFullSafepoint(Register
state,
1595 bool ignore_unwind_in_progress) {
1603 Label slow_path,
done, retry;
1608 movz(addr, Immediate(target::Thread::safepoint_state_offset()), 0);
1609 add(addr, THR, Operand(addr));
1612 cmp(
state, Operand(target::Thread::full_safepoint_state_acquired()));
1615 movz(
state, Immediate(target::Thread::full_safepoint_state_unacquired()), 0);
1616 stxr(TMP,
state, addr);
1624 if (ignore_unwind_in_progress) {
1628 exit_safepoint_ignore_unwind_in_progress_stub_offset()));
1630 ldr(addr, Address(THR, target::Thread::exit_safepoint_stub_offset()));
1632 ldr(addr, FieldAddress(addr, target::Code::entry_point_offset()));
1638void Assembler::TransitionNativeToGenerated(Register
state,
1639 bool exit_safepoint,
1640 bool ignore_unwind_in_progress) {
1641 if (exit_safepoint) {
1642 ExitFullSafepoint(
state, ignore_unwind_in_progress);
1645 ASSERT(!ignore_unwind_in_progress);
1648 ASSERT(target::Thread::full_safepoint_state_acquired() != 0);
1649 LoadImmediate(
state, target::Thread::full_safepoint_state_acquired());
1650 ldr(TMP, Address(THR, target::Thread::safepoint_state_offset()));
1651 and_(TMP, TMP, Operand(
state));
1660 LoadImmediate(
state, target::Thread::vm_tag_dart_id());
1661 StoreToOffset(
state, THR, target::Thread::vm_tag_offset());
1662 LoadImmediate(
state, target::Thread::generated_execution_state());
1663 StoreToOffset(
state, THR, target::Thread::execution_state_offset());
1666 StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
1667 LoadImmediate(
state, 0);
1668 StoreToOffset(
state, THR, target::Thread::exit_through_ffi_offset());
1671void Assembler::CallRuntime(
const RuntimeEntry& entry,
1673 ASSERT(!entry.is_leaf());
1676 ldr(R5, compiler::Address(THR, entry.OffsetFromThread()));
1678 Call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
1683static const RegisterSet kRuntimeCallSavedRegisters(kDartVolatileCpuRegs,
1684 kAllFpuRegistersList);
1687#define __ assembler_->
1689LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
1690 intptr_t frame_size,
1691 bool preserve_registers)
1692 : assembler_(assembler), preserve_registers_(preserve_registers) {
1693 __ Comment(
"EnterCallRuntimeFrame");
1696 if (preserve_registers) {
1697 __ PushRegisters(kRuntimeCallSavedRegisters);
1708 __ ReserveAlignedFrameSpace(frame_size);
1711void LeafRuntimeScope::Call(
const RuntimeEntry& entry,
1721 __ ldr(TMP, compiler::Address(THR, entry.OffsetFromThread()));
1722 __ str(TMP, compiler::Address(THR, target::Thread::vm_tag_offset()));
1724 __ LoadImmediate(TMP, VMTag::kDartTagId);
1725 __ str(TMP, compiler::Address(THR, target::Thread::vm_tag_offset()));
1726 __ SetupCSPFromThread(THR);
1729LeafRuntimeScope::~LeafRuntimeScope() {
1730 if (preserve_registers_) {
1734 const intptr_t kPushedRegistersSize =
1735 kRuntimeCallSavedRegisters.CpuRegisterCount() * target::kWordSize +
1737 __ AddImmediate(SP, FP, -kPushedRegistersSize);
1738 __ PopRegisters(kRuntimeCallSavedRegisters);
1748void Assembler::EnterStubFrame() {
1752void Assembler::LeaveStubFrame() {
1756void Assembler::EnterCFrame(intptr_t frame_space) {
1766 ReserveAlignedFrameSpace(frame_space);
1769void Assembler::LeaveCFrame() {
1776void Assembler::MonomorphicCheckedEntryJIT() {
1777 has_monomorphic_entry_ =
true;
1778 const bool saved_use_far_branches = use_far_branches();
1779 set_use_far_branches(
false);
1780 const intptr_t
start = CodeSize();
1782 Label immediate, miss;
1784 ldr(IP0, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
1787 Comment(
"MonomorphicCheckedEntry");
1789 target::Instructions::kMonomorphicEntryOffsetJIT);
1791 const intptr_t cid_offset = target::Array::element_offset(0);
1792 const intptr_t count_offset = target::Array::element_offset(1);
1795 ldr(R1, FieldAddress(R5, cid_offset), kObjectBytes);
1796 ldr(R2, FieldAddress(R5, count_offset), kObjectBytes);
1797 LoadClassIdMayBeSmi(IP0, R0);
1798 add(R2, R2, Operand(target::ToRawSmi(1)), kObjectBytes);
1799 cmp(R1, Operand(IP0, LSL, 1), kObjectBytes);
1801 str(R2, FieldAddress(R5, count_offset), kObjectBytes);
1802 LoadImmediate(R4, 0);
1806 target::Instructions::kPolymorphicEntryOffsetJIT);
1808 set_use_far_branches(saved_use_far_branches);
1813void Assembler::MonomorphicCheckedEntryAOT() {
1814 has_monomorphic_entry_ =
true;
1815 bool saved_use_far_branches = use_far_branches();
1816 set_use_far_branches(
false);
1818 const intptr_t
start = CodeSize();
1820 Label immediate, miss;
1822 ldr(IP0, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
1825 Comment(
"MonomorphicCheckedEntry");
1827 target::Instructions::kMonomorphicEntryOffsetAOT);
1828 LoadClassId(IP0, R0);
1829 cmp(R5, Operand(IP0, LSL, 1), kObjectBytes);
1834 target::Instructions::kPolymorphicEntryOffsetAOT);
1836 set_use_far_branches(saved_use_far_branches);
1839void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
1840 has_monomorphic_entry_ =
true;
1841 while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
1845 while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
1850void Assembler::CombineHashes(Register
hash, Register other) {
1852 add(
hash,
hash, Operand(other), kFourBytes);
1859void Assembler::FinalizeHashForSize(intptr_t bit_size,
1865 ASSERT(bit_size <= kBitsPerInt32);
1871 if (bit_size < kBitsPerInt32) {
1874 andis(
hash,
hash, Immediate(Utils::NBitMask(bit_size)));
1883void Assembler::MaybeTraceAllocation(intptr_t cid,
1886 JumpDistance distance) {
1889 LoadIsolateGroup(temp_reg);
1890 ldr(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
1893 target::ClassTable::allocation_tracing_state_table_offset()));
1894 LoadFromOffset(temp_reg, temp_reg,
1895 target::ClassTable::AllocationTracingStateSlotOffsetFor(cid),
1897 cbnz(trace, temp_reg);
1900void Assembler::MaybeTraceAllocation(Register cid,
1903 JumpDistance distance) {
1905 LoadIsolateGroup(temp_reg);
1906 ldr(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
1909 target::ClassTable::allocation_tracing_state_table_offset()));
1910 AddRegisters(temp_reg, cid);
1911 LoadFromOffset(temp_reg, temp_reg,
1912 target::ClassTable::AllocationTracingStateSlotOffsetFor(0),
1914 cbnz(trace, temp_reg);
1918void Assembler::TryAllocateObject(intptr_t cid,
1919 intptr_t instance_size,
1921 JumpDistance distance,
1922 Register instance_reg,
1923 Register temp_reg) {
1924 ASSERT(failure !=
nullptr);
1925 ASSERT(instance_size != 0);
1926 ASSERT(instance_reg != temp_reg);
1927 ASSERT(temp_reg != kNoRegister);
1928 ASSERT(Utils::IsAligned(instance_size,
1929 target::ObjectAlignment::kObjectAlignment));
1930 if (FLAG_inline_alloc &&
1931 target::Heap::IsAllocatableInNewSpace(instance_size)) {
1936 RELEASE_ASSERT((target::Thread::top_offset() + target::kWordSize) ==
1937 target::Thread::end_offset());
1938 ldp(instance_reg, temp_reg,
1939 Address(THR, target::Thread::top_offset(), Address::PairOffset));
1944 AddImmediate(instance_reg, instance_size);
1947 cmp(temp_reg, Operand(instance_reg));
1949 CheckAllocationCanary(instance_reg, temp_reg);
1953 str(instance_reg, Address(THR, target::Thread::top_offset()));
1955 AddImmediate(instance_reg, -instance_size + kHeapObjectTag);
1957 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
1958 LoadImmediate(temp_reg, tags);
1959 Store(temp_reg, FieldAddress(instance_reg, target::Object::tags_offset()));
1965void Assembler::TryAllocateArray(intptr_t cid,
1966 intptr_t instance_size,
1969 Register end_address,
1972 if (FLAG_inline_alloc &&
1973 target::Heap::IsAllocatableInNewSpace(instance_size)) {
1979 ldr(
instance, Address(THR, target::Thread::top_offset()));
1980 AddImmediateSetFlags(end_address,
instance, instance_size);
1986 ldr(temp2, Address(THR, target::Thread::end_offset()));
1987 cmp(end_address, Operand(temp2));
1989 CheckAllocationCanary(
instance, temp2);
1993 str(end_address, Address(THR, target::Thread::top_offset()));
1999 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
2000 LoadImmediate(temp2, tags);
2001 str(temp2, FieldAddress(
instance, target::Object::tags_offset()));
2007void Assembler::CopyMemoryWords(Register src,
2014 __ ldr(temp, Address(src, target::kWordSize, Address::PostIndex));
2015 __ str(temp, Address(dst, target::kWordSize, Address::PostIndex));
2016 __ subs(size, size, Operand(target::kWordSize));
2017 __ b(&loop, NOT_ZERO);
2021void Assembler::GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target) {
2023 EmitUnconditionalBranchOp(BL, 0);
2025 PcRelativeCallPattern pattern(buffer_.contents() + buffer_.Size() -
2026 PcRelativeCallPattern::kLengthInBytes);
2027 pattern.set_distance(offset_into_target);
2030void Assembler::GenerateUnRelocatedPcRelativeTailCall(
2031 intptr_t offset_into_target) {
2033 EmitUnconditionalBranchOp(
B, 0);
2034 PcRelativeTailCallPattern pattern(buffer_.contents() + buffer_.Size() -
2035 PcRelativeTailCallPattern::kLengthInBytes);
2036 pattern.set_distance(offset_into_target);
2039bool Assembler::AddressCanHoldConstantIndex(
const Object& constant,
2042 intptr_t index_scale) {
2043 if (!IsSafeSmi(constant))
return false;
2044 const int64_t index = target::SmiValue(constant);
2045 const int64_t
offset = index * index_scale + HeapDataOffset(is_external, cid);
2046 if (!Utils::IsInt(32,
offset)) {
2049 return Address::CanHoldOffset(
static_cast<int32_t
>(
offset), Address::Offset,
2050 Address::OperandSizeFor(cid));
2053Address Assembler::ElementAddressForIntIndex(
bool is_external,
2055 intptr_t index_scale,
2057 intptr_t index)
const {
2058 const int64_t
offset = index * index_scale + HeapDataOffset(is_external, cid);
2061 ASSERT(Address::CanHoldOffset(
offset, Address::Offset, size));
2062 return Address(array,
static_cast<int32_t
>(
offset));
2065void Assembler::ComputeElementAddressForIntIndex(Register address,
2068 intptr_t index_scale,
2071 const int64_t
offset = index * index_scale + HeapDataOffset(is_external, cid);
2072 AddImmediate(address, array,
offset);
2075Address Assembler::ElementAddressForRegIndex(
bool is_external,
2077 intptr_t index_scale,
2082 return ElementAddressForRegIndexWithSize(
2083 is_external, cid, Address::OperandSizeFor(cid), index_scale,
2084 index_unboxed, array, index, temp);
2087Address Assembler::ElementAddressForRegIndexWithSize(
bool is_external,
2090 intptr_t index_scale,
2096 const intptr_t boxing_shift = index_unboxed ? 0 : -
kSmiTagShift;
2097 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
2098 const int32_t
offset = HeapDataOffset(is_external, cid);
2099#if !defined(DART_COMPRESSED_POINTERS)
2100 const bool index_is_32bit =
false;
2102 const bool index_is_32bit = !index_unboxed;
2106 if ((
offset == 0) && (shift == 0)) {
2107 if (index_is_32bit) {
2108 return Address(array, index, SXTW, Address::Unscaled);
2110 return Address(array, index, UXTX, Address::Unscaled);
2112 }
else if (shift < 0) {
2114 if (index_is_32bit) {
2115 AsrImmediate(temp, index, 1, kFourBytes);
2116 add(temp, array, Operand(temp, SXTW, 0));
2118 add(temp, array, Operand(index, ASR, 1));
2121 if (index_is_32bit) {
2122 add(temp, array, Operand(index, SXTW, shift));
2124 add(temp, array, Operand(index, LSL, shift));
2127 ASSERT(Address::CanHoldOffset(
offset, Address::Offset, size));
2128 return Address(temp,
offset);
2131void Assembler::ComputeElementAddressForRegIndex(Register address,
2134 intptr_t index_scale,
2139 const intptr_t boxing_shift = index_unboxed ? 0 : -
kSmiTagShift;
2140 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
2141 const int32_t
offset = HeapDataOffset(is_external, cid);
2142#if !defined(DART_COMPRESSED_POINTERS)
2143 const bool index_is_32bit =
false;
2145 const bool index_is_32bit = !index_unboxed;
2148 if (index_is_32bit) {
2149 add(address, array, Operand(index, SXTW, 0));
2151 add(address, array, Operand(index));
2153 }
else if (shift < 0) {
2155 if (index_is_32bit) {
2157 add(address, array, Operand(index, ASR, 1));
2159 add(address, array, Operand(index, ASR, 1));
2162 if (index_is_32bit) {
2163 add(address, array, Operand(index, SXTW, shift));
2165 add(address, array, Operand(index, LSL, shift));
2169 AddImmediate(address,
offset);
2173void Assembler::LoadStaticFieldAddress(Register address,
2176 LoadCompressedSmiFieldFromOffset(
2177 scratch, field, target::Field::host_offset_or_field_id_offset());
2178 const intptr_t field_table_offset =
2179 compiler::target::Thread::field_table_values_offset();
2180 LoadMemoryValue(address, THR,
static_cast<int32_t
>(field_table_offset));
2181 add(address, address,
2182 Operand(scratch, LSL, target::kWordSizeLog2 - kSmiTagShift));
2185#if defined(DART_COMPRESSED_POINTERS)
2186void Assembler::LoadCompressedFieldAddressForRegOffset(
2189 Register offset_in_compressed_words_as_smi) {
2191 Operand(offset_in_compressed_words_as_smi, LSL,
2192 target::kCompressedWordSizeLog2 - kSmiTagShift));
2193 AddImmediate(address, -kHeapObjectTag);
2197void Assembler::LoadFieldAddressForRegOffset(Register address,
2199 Register offset_in_words_as_smi) {
2201 Operand(offset_in_words_as_smi, LSL,
2202 target::kWordSizeLog2 - kSmiTagShift));
2203 AddImmediate(address, -kHeapObjectTag);
2206void Assembler::PushRegisters(
const RegisterSet& regs) {
2210 for (intptr_t i = kNumberOfVRegisters - 1; i >= 0; --i) {
2212 if (regs.ContainsFpuRegister(fpu_reg)) {
2213 if (vprev != kNoVRegister) {
2214 PushQuadPair(fpu_reg, vprev);
2221 if (vprev != kNoVRegister) {
2228 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
2230 if (regs.ContainsRegister(reg)) {
2231 if (
prev != kNoRegister) {
2232 PushPair(reg,
prev);
2239 if (
prev != kNoRegister) {
2244void Assembler::PopRegisters(
const RegisterSet& regs) {
2245 bool pop_single = (regs.CpuRegisterCount() & 1) == 1;
2249 if (regs.ContainsRegister(reg)) {
2255 }
else if (
prev != kNoRegister) {
2265 pop_single = (regs.FpuRegisterCount() & 1) == 1;
2270 if (regs.ContainsFpuRegister(fpu_reg)) {
2274 }
else if (vprev != kNoVRegister) {
2275 PopQuadPair(vprev, fpu_reg);
2282 ASSERT(vprev == kNoVRegister);
2285void Assembler::PushRegistersInOrder(std::initializer_list<Register> regs) {
2288 for (Register reg : regs) {
2289 if (pending_reg != kNoRegister) {
2290 PushPair(reg, pending_reg);
2296 if (pending_reg != kNoRegister) {
2301void Assembler::PushNativeCalleeSavedRegisters() {
2309 if (
prev != kNoRegister) {
2311 Address(SP, -2 * target::kWordSize, Address::PairPreIndex));
2317 if (
prev != kNoRegister) {
2318 str(
prev, Address(SP, -1 * target::kWordSize, Address::PreIndex));
2325 if (vprev != kNoVRegister) {
2326 PushDoublePair(r, vprev);
2332 if (vprev != kNoVRegister) {
2337void Assembler::PopNativeCalleeSavedRegisters() {
2346 }
else if (vprev != kNoVRegister) {
2347 PopDoublePair(vprev, r);
2364 ldr(r, Address(SP, 1 * target::kWordSize, Address::PostIndex));
2366 }
else if (
prev != kNoRegister) {
2368 Address(SP, 2 * target::kWordSize, Address::PairPostIndex));
2376bool Assembler::CanGenerateCbzTbz(Register rn, Condition cond) {
2393void Assembler::GenerateCbzTbz(Register rn,
2397 ASSERT((sz == kEightBytes) || (sz == kFourBytes));
2398 const int32_t sign_bit = sz ==
kEightBytes ? 63 : 31;
2405 cbnz(label, rn, sz);
2409 tbnz(label, rn, sign_bit);
2413 tbz(label, rn, sign_bit);
2421void Assembler::RangeCheck(Register value,
2425 RangeCheckCondition condition,
2427 auto cc = condition == kIfInRange ?
LS :
HI;
2429 AddImmediate(to_check, value, -low);
2430 CompareImmediate(to_check, high - low);
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
static float next(float f)
static float prev(float f)
static bool ok(int result)
static uint32_t hash(const SkShaderBase::GradientInfo &v)
static bool equals(T *a, T *b)
#define DEBUG_ASSERT(cond)
#define ASSERT_EQUAL(expected, actual)
#define RELEASE_ASSERT(cond)
#define COMPILE_ASSERT(expr)
static OperandSize OperandSizeFor(intptr_t cid)
Assembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level=0)
#define DECLARE_FLAG(type, name)
#define DEFINE_FLAG(type, name, default_value, comment)
bool IsOriginalObject(const Object &object)
bool IsInOldSpace(const Object &obj)
bool IsSameObject(const Object &a, const Object &b)
const Object & ToObject(const Code &handle)
const QRegister kAbiLastPreservedFpuReg
const int kXRegSizeInBits
const Register kAbiLastPreservedCpuReg
const int kAbiPreservedCpuRegCount
const Register kAbiFirstPreservedCpuReg
const int kAbiPreservedFpuRegCount
const int kWRegSizeInBits
constexpr intptr_t kBitsPerInt64
const int kFpuRegisterSize
const QRegister kAbiFirstPreservedFpuReg
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
static compiler::OperandSize OperandSize(Representation rep)
static Representation RepresentationOfArrayElement(classid_t cid)
constexpr bool kTargetUsesThreadSanitizer
#define NOT_IN_PRODUCT(code)