6#if defined(TARGET_ARCH_ARM64)
8#define SHOULD_NOT_INCLUDE_RUNTIME
22DEFINE_FLAG(
bool, use_far_branches,
false,
"Always use far branches");
37 case kUnboxedFloat32x4:
38 case kUnboxedFloat64x2:
46 intptr_t far_branch_level)
47 : AssemblerBase(object_pool_builder),
48 use_far_branches_(far_branch_level != 0),
49 constant_pool_allowed_(
false) {
50 generate_invoke_write_barrier_wrapper_ = [&](
Register reg) {
52 target::Thread::write_barrier_wrappers_thread_offset(reg)));
54 generate_invoke_array_write_barrier_ = [&]() {
56 Address(
THR, target::Thread::array_write_barrier_entry_point_offset()));
60void Assembler::Emit(int32_t
value) {
61 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
62 buffer_.Emit<int32_t>(
value);
65void Assembler::Emit64(int64_t
value) {
66 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
67 buffer_.Emit<int64_t>(
value);
70int32_t Assembler::BindImm26Branch(int64_t position, int64_t
dest) {
72 const int32_t
next = buffer_.Load<int32_t>(position);
73 const int32_t encoded = EncodeImm26BranchOffset(
dest,
next);
74 buffer_.Store<int32_t>(position, encoded);
75 return DecodeImm26BranchOffset(
next);
78int32_t Assembler::BindImm19Branch(int64_t position, int64_t
dest) {
79 if (use_far_branches() && !CanEncodeImm19BranchOffset(
dest)) {
84 const int32_t guard_branch =
85 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
88 const int32_t far_branch =
89 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
90 const Condition c = DecodeImm19BranchCondition(guard_branch);
94 const int32_t
next = DecodeImm26BranchOffset(far_branch);
98 const int64_t
offset =
dest - Instr::kInstrSize;
101 const int32_t encoded_branch = EncodeImm26BranchOffset(
offset, far_branch);
104 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, encoded_branch);
106 }
else if (use_far_branches() && CanEncodeImm19BranchOffset(
dest)) {
111 const int32_t guard_branch =
112 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
115 const int32_t far_branch =
116 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
119 const int32_t
next = DecodeImm26BranchOffset(far_branch);
122 int32_t encoded_guard_branch = EncodeImm19BranchOffset(
dest, guard_branch);
123 const Condition c = DecodeImm19BranchCondition(encoded_guard_branch);
124 encoded_guard_branch =
128 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize,
129 encoded_guard_branch);
130 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize,
131 Instr::kNopInstruction);
134 const int32_t
next = buffer_.Load<int32_t>(position);
135 const int32_t encoded = EncodeImm19BranchOffset(
dest,
next);
136 buffer_.Store<int32_t>(position, encoded);
137 return DecodeImm19BranchOffset(
next);
141int32_t Assembler::BindImm14Branch(int64_t position, int64_t
dest) {
142 if (use_far_branches() && !CanEncodeImm14BranchOffset(
dest)) {
147 const int32_t guard_branch =
148 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
151 const int32_t far_branch =
152 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
153 const Condition c = DecodeImm14BranchCondition(guard_branch);
157 const int32_t
next = DecodeImm26BranchOffset(far_branch);
161 const int64_t
offset =
dest - Instr::kInstrSize;
164 const int32_t encoded_branch = EncodeImm26BranchOffset(
offset, far_branch);
167 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, encoded_branch);
169 }
else if (use_far_branches() && CanEncodeImm14BranchOffset(
dest)) {
174 const int32_t guard_branch =
175 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
178 const int32_t far_branch =
179 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
182 const int32_t
next = DecodeImm26BranchOffset(far_branch);
185 int32_t encoded_guard_branch = EncodeImm14BranchOffset(
dest, guard_branch);
186 const Condition c = DecodeImm14BranchCondition(encoded_guard_branch);
187 encoded_guard_branch =
191 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize,
192 encoded_guard_branch);
193 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize,
194 Instr::kNopInstruction);
197 const int32_t
next = buffer_.Load<int32_t>(position);
198 const int32_t encoded = EncodeImm14BranchOffset(
dest,
next);
199 buffer_.Store<int32_t>(position, encoded);
200 return DecodeImm14BranchOffset(
next);
207 if (rd == rn)
return;
253 ASSERT(!label->IsBound());
254 const intptr_t bound_pc = buffer_.Size();
256 while (label->IsLinked()) {
257 const int64_t position = label->Position();
258 const int64_t
dest = bound_pc - position;
259 const int32_t instr = buffer_.Load<int32_t>(position);
260 if (IsTestAndBranch(instr)) {
261 label->position_ = BindImm14Branch(position,
dest);
262 }
else if (IsConditionalBranch(instr) || IsCompareAndBranch(instr)) {
263 label->position_ = BindImm19Branch(position,
dest);
264 }
else if (IsUnconditionalBranch(instr)) {
265 label->position_ = BindImm26Branch(position,
dest);
270 label->BindTo(bound_pc, lr_state());
274 ASSERT(Utils::IsPowerOfTwo(alignment));
275 intptr_t
pos =
offset + buffer_.GetPosition();
276 intptr_t mod =
pos & (alignment - 1);
286 ASSERT(((
offset + buffer_.GetPosition()) & (alignment - 1)) == 0);
290 LeafRuntimeScope rt(
this, 0,
true);
292 rt.Call(kTsanLoadAcquireRuntimeEntry, 1);
296 LeafRuntimeScope rt(
this, 0,
true);
298 rt.Call(kTsanStoreReleaseRuntimeEntry, 1);
301static int CountLeadingZeros(uint64_t
value,
int width) {
302 if (
width == 64)
return Utils::CountLeadingZeros64(
value);
303 if (
width == 32)
return Utils::CountLeadingZeros32(
value);
308static int CountOneBits(uint64_t
value,
int width) {
310 value &= (0xffffffffffffffffULL >> (64 -
width));
312 value = ((
value >> 1) & 0x5555555555555555) + (
value & 0x5555555555555555);
313 value = ((
value >> 2) & 0x3333333333333333) + (
value & 0x3333333333333333);
314 value = ((
value >> 4) & 0x0f0f0f0f0f0f0f0f) + (
value & 0x0f0f0f0f0f0f0f0f);
315 value = ((
value >> 8) & 0x00ff00ff00ff00ff) + (
value & 0x00ff00ff00ff00ff);
316 value = ((
value >> 16) & 0x0000ffff0000ffff) + (
value & 0x0000ffff0000ffff);
317 value = ((
value >> 32) & 0x00000000ffffffff) + (
value & 0x00000000ffffffff);
329bool Operand::IsImmLogical(uint64_t
value, uint8_t
width, Operand* imm_op) {
330 ASSERT(imm_op !=
nullptr);
333 value &= 0xffffffffUL;
359 if ((
value == 0) || (
value == 0xffffffffffffffffULL) ||
364 int lead_zero = CountLeadingZeros(
value,
width);
365 int lead_one = CountLeadingZeros(~
value,
width);
366 int trail_zero = Utils::CountTrailingZerosWord(
value);
367 int trail_one = Utils::CountTrailingZerosWord(~
value);
375 int imm_s_mask = 0x3F;
382 imm_r = (
value & 3) - 1;
383 *imm_op = Operand(n, imm_s, imm_r);
387 n = (
width == 64) ? 1 : 0;
388 imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
389 if ((lead_zero + set_bits) ==
width) {
392 imm_r = (lead_zero > 0) ? (
width - trail_zero) : lead_one;
397 if (lead_zero + trail_zero + set_bits ==
width) {
398 *imm_op = Operand(n, imm_s, imm_r);
404 if (lead_one + trail_one + (
width - set_bits) ==
width) {
405 *imm_op = Operand(n, imm_s, imm_r);
412 uint64_t mask = (1ULL << (
width >> 1)) - 1;
425void Assembler::LoadPoolPointer(
Register pp) {
427 ldr(pp, FieldAddress(
CODE_REG, target::Code::object_pool_offset()));
436 set_constant_pool_allowed(pp ==
PP);
442 ASSERT((pp !=
PP) || constant_pool_allowed());
446 const uint32_t
offset = target::ObjectPool::element_offset(index);
447 const uint32_t upper20 =
offset & 0xfffff000;
448 if (Address::CanHoldOffset(
offset)) {
451 Operand::Immediate) {
452 const uint32_t lower12 =
offset & 0x00000fff;
453 ASSERT(Address::CanHoldOffset(lower12));
455 ldr(
dst, Address(
dst, lower12));
457 const uint16_t offset_low = Utils::Low16Bits(
offset);
458 const uint16_t offset_high = Utils::High16Bits(
offset);
459 movz(
dst, Immediate(offset_low), 0);
460 movk(
dst, Immediate(offset_high), 1);
461 ldr(
dst, Address(pp,
dst));
468 ASSERT((pp !=
PP) || constant_pool_allowed());
472 const uint32_t
offset = target::ObjectPool::element_offset(index);
473 const uint32_t upper20 =
offset & 0xfffff000;
474 if (Address::CanHoldOffset(
offset)) {
477 Operand::Immediate) {
478 const uint32_t lower12 =
offset & 0x00000fff;
479 ASSERT(Address::CanHoldOffset(lower12));
481 str(
src, Address(
TMP, lower12));
483 const uint16_t offset_low = Utils::Low16Bits(
offset);
484 const uint16_t offset_high = Utils::High16Bits(
offset);
485 movz(
TMP, Immediate(offset_low), 0);
486 movk(
TMP, Immediate(offset_high), 1);
487 str(
src, Address(pp,
TMP));
496 ASSERT(constant_pool_allowed());
501 const uint32_t
offset = target::ObjectPool::element_offset(index);
503 const uint32_t upper20 =
offset & 0xfffff000;
504 const uint32_t lower12 =
offset & 0x00000fff;
505 if (Address::CanHoldOffset(
offset, Address::PairOffset)) {
508 Operand::Immediate) {
510 ldp(
lower, upper, Address(
TMP, 0, Address::PairOffset));
512 Operand::Immediate &&
513 Address::CanHoldOffset(lower12, Address::PairOffset)) {
515 ldp(
lower, upper, Address(
TMP, lower12, Address::PairOffset));
517 const uint32_t lower12 =
offset & 0xfff;
518 const uint32_t higher12 =
offset & 0xfff000;
520 Operand op_high, op_low;
522 Operand::Immediate &&
527 add(
TMP,
PP, op_high);
529 ldp(
lower, upper, Address(
TMP, 0, Address::PairOffset));
533bool Assembler::CanLoadFromObjectPool(
const Object&
object)
const {
535 if (!constant_pool_allowed()) {
544void Assembler::LoadNativeEntry(
546 const ExternalLabel* label,
547 ObjectPoolBuilderEntry::Patchability patchable) {
548 const intptr_t index =
549 object_pool_builder().FindNativeFunction(label, patchable);
550 LoadWordFromPoolIndex(
dst, index);
554 ldr(
dst, Address(
THR, target::Thread::isolate_offset()));
557void Assembler::LoadIsolateGroup(
Register rd) {
558 ldr(rd, Address(
THR, target::Thread::isolate_group_offset()));
562 const Object&
object,
591 const intptr_t index =
592 is_unique ? object_pool_builder().AddObject(
593 object, ObjectPoolBuilderEntry::kPatchable)
594 : object_pool_builder().FindObject(
595 object, ObjectPoolBuilderEntry::kNotPatchable);
596 LoadWordFromPoolIndex(
dst, index);
599void Assembler::LoadObject(
Register dst,
const Object&
object) {
600 LoadObjectHelper(
dst,
object,
false);
603void Assembler::LoadUniqueObject(
Register dst,
const Object&
object) {
604 LoadObjectHelper(
dst,
object,
true);
607void Assembler::LoadFromStack(
Register dst, intptr_t depth) {
612void Assembler::StoreToStack(
Register src, intptr_t depth) {
617void Assembler::CompareToStack(
Register src, intptr_t depth) {
618 LoadFromStack(
TMP, depth);
619 CompareRegisters(
src,
TMP);
622void Assembler::CompareObject(
Register reg,
const Object&
object) {
625 CompareObjectRegisters(reg,
NULL_REG);
629 LoadObject(
TMP,
object);
630 CompareObjectRegisters(reg,
TMP);
634void Assembler::LoadImmediate(
Register reg, int64_t imm) {
637 movz(reg, Immediate(0), 0);
643 Operand::OperandType ot;
645 if (ot == Operand::BitfieldImm) {
646 orri(reg,
ZR, Immediate(imm));
651 const uint32_t w0 = Utils::Low32Bits(imm);
652 const uint32_t w1 = Utils::High32Bits(imm);
653 const uint16_t h0 = Utils::Low16Bits(w0);
654 const uint16_t h1 = Utils::High16Bits(w0);
655 const uint16_t h2 = Utils::Low16Bits(w1);
656 const uint16_t h3 = Utils::High16Bits(w1);
659 if (w1 == 0xffffffff) {
661 movn(reg, Immediate(~h0), 0);
663 movn(reg, Immediate(~h1), 1);
664 movk(reg, Immediate(h0), 0);
672 movn(reg, Immediate(~h2), 2);
674 movk(reg, Immediate(h1), 1);
677 movk(reg, Immediate(h0), 0);
683 if ((w1 != 0) && constant_pool_allowed()) {
684 const intptr_t index = object_pool_builder().FindImmediate(imm);
685 LoadWordFromPoolIndex(reg, index);
689 bool initialized =
false;
691 movz(reg, Immediate(h0), 0);
696 movk(reg, Immediate(h1), 1);
698 movz(reg, Immediate(h1), 1);
704 movk(reg, Immediate(h2), 2);
706 movz(reg, Immediate(h2), 2);
712 movk(reg, Immediate(h3), 3);
714 movz(reg, Immediate(h3), 3);
719void Assembler::LoadSImmediate(
VRegister vd,
float imms) {
720 int32_t imm32 = bit_cast<int32_t, float>(imms);
723 }
else if (constant_pool_allowed()) {
724 intptr_t index = object_pool_builder().FindImmediate(imm32);
725 intptr_t
offset = target::ObjectPool::element_offset(index);
728 LoadImmediate(
TMP, imm32);
733void Assembler::LoadDImmediate(
VRegister vd,
double immd) {
734 if (fmovdi(vd, immd))
return;
736 int64_t imm64 = bit_cast<int64_t, double>(immd);
739 }
else if (constant_pool_allowed()) {
740 intptr_t index = object_pool_builder().FindImmediate64(imm64);
741 intptr_t
offset = target::ObjectPool::element_offset(index);
744 LoadImmediate(
TMP, imm64);
749void Assembler::LoadQImmediate(
VRegister vd, simd128_value_t immq) {
750 ASSERT(constant_pool_allowed());
751 intptr_t index = object_pool_builder().FindImmediate128(immq);
752 intptr_t
offset = target::ObjectPool::element_offset(index);
756void Assembler::BranchLink(intptr_t target_code_pool_index,
765 LoadWordFromPoolIndex(code_reg, target_code_pool_index);
766 Call(FieldAddress(code_reg, target::Code::entry_point_offset(entry_kind)));
770void Assembler::BranchLink(
772 ObjectPoolBuilderEntry::Patchability patchable,
774 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
775 const intptr_t index = object_pool_builder().FindObject(
777 BranchLink(index, entry_kind);
780void Assembler::BranchLinkWithEquivalence(
const Code&
target,
781 const Object& equivalence,
783 const intptr_t index =
785 BranchLink(index, entry_kind);
801 if (Operand::CanHold(imm,
width, &op) == Operand::Immediate) {
802 add(
dest, rn, op, sz);
803 }
else if (Operand::CanHold(-
static_cast<uint64_t
>(imm),
width, &op) ==
804 Operand::Immediate) {
805 sub(
dest, rn, op, sz);
809 LoadImmediate(
TMP2, imm);
821 if (Operand::CanHold(imm,
width, &op) == Operand::Immediate) {
823 adds(
dest, rn, op, sz);
824 }
else if (Operand::CanHold(-
static_cast<uint64_t
>(imm),
width, &op) ==
825 Operand::Immediate) {
827 subs(
dest, rn, op, sz);
831 LoadImmediate(
TMP2, imm);
843 if (Operand::CanHold(imm,
width, &op) == Operand::Immediate) {
845 subs(
dest, rn, op, sz);
846 }
else if (Operand::CanHold(-
static_cast<uint64_t
>(imm),
width, &op) ==
847 Operand::Immediate) {
849 adds(
dest, rn, op, sz);
853 LoadImmediate(
TMP2, imm);
858void Assembler::AndImmediate(
Register rd,
865 if (Operand::IsImmLogical(imm,
width, &imm_op)) {
866 andi(rd, rn, Immediate(imm), sz);
868 LoadImmediate(
TMP, imm);
869 and_(rd, rn, Operand(
TMP), sz);
873void Assembler::OrImmediate(
Register rd,
880 if (Operand::IsImmLogical(imm,
width, &imm_op)) {
881 orri(rd, rn, Immediate(imm), sz);
883 LoadImmediate(
TMP, imm);
884 orr(rd, rn, Operand(
TMP), sz);
888void Assembler::XorImmediate(
Register rd,
895 if (Operand::IsImmLogical(imm,
width, &imm_op)) {
896 eori(rd, rn, Immediate(imm), sz);
898 LoadImmediate(
TMP, imm);
899 eor(rd, rn, Operand(
TMP), sz);
907 if (Operand::IsImmLogical(imm,
width, &imm_op)) {
908 tsti(rn, Immediate(imm), sz);
910 LoadImmediate(
TMP, imm);
911 tst(rn, Operand(
TMP), sz);
919 if (Operand::CanHold(imm,
width, &op) == Operand::Immediate) {
921 }
else if (Operand::CanHold(-
static_cast<uint64_t
>(imm),
width, &op) ==
922 Operand::Immediate) {
926 LoadImmediate(
TMP2, imm);
927 cmp(rn, Operand(
TMP2), sz);
934 Address::AddressType addr_type) {
935 ASSERT(addr_type == Address::AddressType::Offset ||
936 addr_type == Address::AddressType::PairOffset);
937 if (Address::CanHoldOffset(
offset, addr_type, sz)) {
942 const uint32_t upper20 =
offset & 0xfffff000;
943 const uint32_t lower12 =
offset & 0x00000fff;
945 (Operand::CanHold(upper20,
kXRegSizeInBits, &op) == Operand::Immediate) &&
946 Address::CanHoldOffset(lower12, addr_type, sz)) {
948 return Address(
TMP2, lower12, addr_type);
951 if (addr_type == Address::AddressType::Offset) {
955 return Address(
TMP2, 0, Address::AddressType::PairOffset);
960 if (
addr.type() == Address::AddressType::Offset ||
961 addr.type() == Address::AddressType::PairOffset) {
962 ldr(
dst, PrepareLargeOffset(
addr.base(),
addr.offset(), sz,
addr.type()),
971 auto const type = Address::AddressType::Offset;
976 auto const type = Address::AddressType::Offset;
981 auto const type = Address::AddressType::Offset;
986 if (
addr.type() == Address::AddressType::Offset ||
987 addr.type() == Address::AddressType::PairOffset) {
988 str(
src, PrepareLargeOffset(
addr.base(),
addr.offset(), sz,
addr.type()),
996void Assembler::StorePairToOffset(
Register low,
1001 auto const type = Address::AddressType::PairOffset;
1006 auto const type = Address::AddressType::Offset;
1011 auto const type = Address::AddressType::Offset;
1016 auto const type = Address::AddressType::Offset;
1027 vrecpss(
VTMP, vn, vd);
1028 vmuls(vd, vd,
VTMP);
1029 vrecpss(
VTMP, vn, vd);
1030 vmuls(vd, vd,
VTMP);
1041 vmuls(
VTMP, vd, vd);
1043 vmuls(vd, vd,
VTMP);
1045 vmuls(
VTMP, vd, vd);
1047 vmuls(vd, vd,
VTMP);
1050#if defined(DART_COMPRESSED_POINTERS)
1051void Assembler::LoadCompressed(
Register dest,
const Address& slot) {
1057void Assembler::StoreBarrier(
Register object,
1059 CanBeSmi can_be_smi,
1061 const bool spill_lr = lr_state().LRContainsReturnAddress();
1064 ASSERT(
object != scratch);
1082 if (can_be_smi == kValueCanBeSmi) {
1087 BranchIfNotSmi(
value, &passed_check, kNearJump);
1089 Bind(&passed_check);
1092 ldr(scratch, FieldAddress(
object, target::Object::tags_offset()),
1096 Operand(scratch,
LSR, target::UntaggedObject::kBarrierOverlapShift));
1101 SPILLS_LR_TO_FRAME(Push(
LR));
1114 mov(objectForCall,
object);
1119 generate_invoke_write_barrier_wrapper_(objectForCall);
1129 RESTORES_LR_FROM_FRAME(Pop(
LR));
1134void Assembler::ArrayStoreBarrier(
Register object,
1137 CanBeSmi can_be_smi,
1139 const bool spill_lr = lr_state().LRContainsReturnAddress();
1142 ASSERT(
object != scratch);
1164 if (can_be_smi == kValueCanBeSmi) {
1169 BranchIfNotSmi(
value, &passed_check, kNearJump);
1171 Bind(&passed_check);
1174 ldr(scratch, FieldAddress(
object, target::Object::tags_offset()),
1178 Operand(scratch,
LSR, target::UntaggedObject::kBarrierOverlapShift));
1182 SPILLS_LR_TO_FRAME(Push(
LR));
1191 generate_invoke_array_write_barrier_();
1193 RESTORES_LR_FROM_FRAME(Pop(
LR));
1198void Assembler::StoreObjectIntoObjectNoBarrier(
Register object,
1199 const Address& address,
1200 const Object&
value,
1201 MemoryOrder memory_order,
1217 if (memory_order == kRelease) {
1218 StoreRelease(
src, address,
size);
1224void Assembler::VerifyStoreNeedsNoWriteBarrier(
Register object,
1234 tbz(&
done,
TMP, target::UntaggedObject::kNewOrEvacuationCandidateBit);
1235 ldr(
TMP, FieldAddress(
object, target::Object::tags_offset()),
kUnsignedByte);
1236 tbz(&
done,
TMP, target::UntaggedObject::kOldAndNotRememberedBit);
1237 Stop(
"Write barrier is required");
1241void Assembler::StoreInternalPointer(
Register object,
1242 const Address&
dest,
1248 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
1249 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
1250 ubfx(
result, tags, target::UntaggedObject::kClassIdTagPos,
1251 target::UntaggedObject::kClassIdTagSize);
1255 ASSERT(target::UntaggedObject::kSizeTagPos == 8);
1256 ASSERT(target::UntaggedObject::kSizeTagSize == 4);
1257 ubfx(
result, tags, target::UntaggedObject::kSizeTagPos,
1258 target::UntaggedObject::kSizeTagSize);
1263 ldr(
result, FieldAddress(
object, target::Object::tags_offset()));
1270 const intptr_t table_offset =
1271 target::IsolateGroup::cached_class_table_table_offset();
1273 LoadIsolateGroup(
result);
1278void Assembler::CompareClassId(
Register object,
1281 LoadClassId(
TMP,
object);
1282 CompareImmediate(
TMP, class_id);
1288 LoadImmediate(
result, kSmiCid);
1289 BranchIfSmi(
object, &
done);
1290 LoadClassId(
result,
object);
1296 LoadClassIdMayBeSmi(
TMP,
object);
1301 BranchIfSmi(
object, &
done);
1302 LoadClassId(
result,
object);
1308void Assembler::EnsureHasClassIdInDEBUG(intptr_t
cid,
1313 Comment(
"Check that object in register has cid %" Pd "",
cid);
1315 LoadClassIdMayBeSmi(scratch,
src);
1316 CompareImmediate(scratch,
cid);
1319 CompareImmediate(scratch,
kNullCid);
1328void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
1331 if (frame_space != 0) {
1332 AddImmediate(
SP, -frame_space);
1334 if (OS::ActivationFrameAlignment() > 1) {
1335 andi(
SP,
SP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
1339void Assembler::EmitEntryFrameVerification() {
1342 ASSERT(!constant_pool_allowed());
1355void Assembler::RestoreCodePointer() {
1361void Assembler::RestorePinnedRegisters() {
1363 compiler::Address(
THR, target::Thread::write_barrier_mask_offset()));
1365 ldr(
NULL_REG, compiler::Address(
THR, target::Thread::object_null_offset()));
1366#if defined(DART_COMPRESSED_POINTERS)
1367 ldr(
TMP, compiler::Address(
THR, target::Thread::heap_base_offset()));
1372void Assembler::SetupGlobalPoolAndDispatchTable() {
1373 ASSERT(FLAG_precompiled_mode);
1374 ldr(
PP, Address(
THR, target::Thread::global_object_pool_offset()));
1377 Address(
THR, target::Thread::dispatch_table_array_offset()));
1380void Assembler::CheckCodePointer() {
1382 if (!FLAG_check_code_pointer) {
1385 Comment(
"CheckCodePointer");
1386 Label cid_ok, instructions_ok;
1388 CompareClassId(
CODE_REG, kCodeCid);
1393 const intptr_t entry_offset =
1395 adr(
R0, Immediate(-entry_offset));
1396 ldr(
TMP, FieldAddress(
CODE_REG, target::Code::instructions_offset()));
1397 cmp(
R0, Operand(
TMP));
1398 b(&instructions_ok,
EQ);
1400 Bind(&instructions_ok);
1415void Assembler::SetupDartSP(intptr_t reserve ) {
1421void Assembler::SetupCSPFromThread(
Register thr) {
1427 ldr(
TMP, Address(thr, target::Thread::saved_stack_limit_offset()));
1428 AddImmediate(
CSP,
TMP, -4096);
1435void Assembler::RestoreCSP() {
1440 RESTORES_RETURN_ADDRESS_FROM_REGISTER_TO_LR(MoveRegister(
LR,
value));
1443void Assembler::ArithmeticShiftRightImmediate(
Register reg, intptr_t shift) {
1444 AsrImmediate(reg, reg, shift);
1447void Assembler::CompareWords(
Register reg1,
1460 BranchIfZero(
count,
equals, Assembler::kNearJump);
1461 AddImmediate(
count, -1);
1462 ldr(temp, Address(reg1, 8, Address::PostIndex));
1463 ldr(
TMP, Address(reg2, 8, Address::PostIndex));
1464 cmp(temp, Operand(
TMP));
1465 BranchIf(
EQUAL, &loop, Assembler::kNearJump);
1468void Assembler::EnterFrame(intptr_t frame_size) {
1469 SPILLS_LR_TO_FRAME(PushPair(
FP,
LR));
1472 if (frame_size > 0) {
1473 sub(
SP,
SP, Operand(frame_size));
1477void Assembler::LeaveFrame() {
1479 RESTORES_LR_FROM_FRAME(PopPair(
FP,
LR));
1482void Assembler::EnterDartFrame(intptr_t frame_size,
Register new_pp) {
1483 ASSERT(!constant_pool_allowed());
1487 if (!FLAG_precompiled_mode) {
1488 TagAndPushPPAndPcMarker();
1497 set_constant_pool_allowed(
true);
1500 if (frame_size > 0) {
1501 AddImmediate(
SP, -frame_size);
1510void Assembler::EnterOsrFrame(intptr_t extra_size,
Register new_pp) {
1511 ASSERT(!constant_pool_allowed());
1512 Comment(
"EnterOsrFrame");
1513 RestoreCodePointer();
1516 if (extra_size > 0) {
1517 AddImmediate(
SP, -extra_size);
1521void Assembler::LeaveDartFrame() {
1522 if (!FLAG_precompiled_mode) {
1529 set_constant_pool_allowed(
false);
1542 Label slow_path,
done, retry;
1543 if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
1547 movz(
addr, Immediate(target::Thread::safepoint_state_offset()), 0);
1551 cmp(
state, Operand(target::Thread::full_safepoint_state_unacquired()));
1554 movz(
state, Immediate(target::Thread::full_safepoint_state_acquired()), 0);
1558 if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
1563 ldr(
addr, Address(
THR, target::Thread::enter_safepoint_stub_offset()));
1564 ldr(
addr, FieldAddress(
addr, target::Code::entry_point_offset()));
1570void Assembler::TransitionGeneratedToNative(
Register destination,
1573 bool enter_safepoint) {
1575 StoreToOffset(new_exit_frame,
THR,
1576 target::Thread::top_exit_frame_info_offset());
1578 StoreToOffset(new_exit_through_ffi,
THR,
1579 target::Thread::exit_through_ffi_offset());
1580 Register tmp = new_exit_through_ffi;
1583 StoreToOffset(destination,
THR, target::Thread::vm_tag_offset());
1584 LoadImmediate(tmp, target::Thread::native_execution_state());
1585 StoreToOffset(tmp,
THR, target::Thread::execution_state_offset());
1587 if (enter_safepoint) {
1588 EnterFullSafepoint(tmp);
1593 bool ignore_unwind_in_progress) {
1601 Label slow_path,
done, retry;
1602 if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
1606 movz(
addr, Immediate(target::Thread::safepoint_state_offset()), 0);
1610 cmp(
state, Operand(target::Thread::full_safepoint_state_acquired()));
1613 movz(
state, Immediate(target::Thread::full_safepoint_state_unacquired()), 0);
1617 if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
1622 if (ignore_unwind_in_progress) {
1626 exit_safepoint_ignore_unwind_in_progress_stub_offset()));
1628 ldr(
addr, Address(
THR, target::Thread::exit_safepoint_stub_offset()));
1630 ldr(
addr, FieldAddress(
addr, target::Code::entry_point_offset()));
1637 bool exit_safepoint,
1638 bool ignore_unwind_in_progress,
1640 if (exit_safepoint) {
1641 ExitFullSafepoint(
state, ignore_unwind_in_progress);
1644 ASSERT(!ignore_unwind_in_progress);
1647 ASSERT(target::Thread::full_safepoint_state_acquired() != 0);
1648 LoadImmediate(
state, target::Thread::full_safepoint_state_acquired());
1649 ldr(
TMP, Address(
THR, target::Thread::safepoint_state_offset()));
1660 LoadImmediate(
state, target::Thread::vm_tag_dart_id());
1661 StoreToOffset(
state,
THR, target::Thread::vm_tag_offset());
1663 LoadImmediate(
state, target::Thread::generated_execution_state());
1664 StoreToOffset(
state,
THR, target::Thread::execution_state_offset());
1667 StoreToOffset(
ZR,
THR, target::Thread::top_exit_frame_info_offset());
1668 LoadImmediate(
state, 0);
1669 StoreToOffset(
state,
THR, target::Thread::exit_through_ffi_offset());
1672void Assembler::CallRuntime(
const RuntimeEntry& entry,
1674 ASSERT(!entry.is_leaf());
1677 ldr(
R5, compiler::Address(
THR, entry.OffsetFromThread()));
1679 Call(Address(
THR, target::Thread::call_to_runtime_entry_point_offset()));
1688#define __ assembler_->
1690LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
1691 intptr_t frame_size,
1692 bool preserve_registers)
1693 : assembler_(assembler), preserve_registers_(preserve_registers) {
1694 __ Comment(
"EnterCallRuntimeFrame");
1697 if (preserve_registers) {
1698 __ PushRegisters(kRuntimeCallSavedRegisters);
1709 __ ReserveAlignedFrameSpace(frame_size);
1712void LeafRuntimeScope::Call(
const RuntimeEntry& entry,
1722 __ ldr(
TMP, compiler::Address(
THR, entry.OffsetFromThread()));
1723 __ str(
TMP, compiler::Address(
THR, target::Thread::vm_tag_offset()));
1725 __ LoadImmediate(
TMP, VMTag::kDartTagId);
1726 __ str(
TMP, compiler::Address(
THR, target::Thread::vm_tag_offset()));
1727 __ SetupCSPFromThread(
THR);
1730LeafRuntimeScope::~LeafRuntimeScope() {
1731 if (preserve_registers_) {
1735 const intptr_t kPushedRegistersSize =
1738 __ AddImmediate(
SP,
FP, -kPushedRegistersSize);
1739 __ PopRegisters(kRuntimeCallSavedRegisters);
1749void Assembler::EnterStubFrame() {
1753void Assembler::LeaveStubFrame() {
1757void Assembler::EnterCFrame(intptr_t frame_space) {
1767 ReserveAlignedFrameSpace(frame_space);
1770void Assembler::LeaveCFrame() {
1777void Assembler::MonomorphicCheckedEntryJIT() {
1778 has_monomorphic_entry_ =
true;
1779 const bool saved_use_far_branches = use_far_branches();
1780 set_use_far_branches(
false);
1781 const intptr_t
start = CodeSize();
1783 Label immediate, miss;
1785 ldr(
IP0, Address(
THR, target::Thread::switchable_call_miss_entry_offset()));
1788 Comment(
"MonomorphicCheckedEntry");
1790 target::Instructions::kMonomorphicEntryOffsetJIT);
1792 const intptr_t cid_offset = target::Array::element_offset(0);
1793 const intptr_t count_offset = target::Array::element_offset(1);
1798 LoadClassIdMayBeSmi(
IP0,
R0);
1803 LoadImmediate(
R4, 0);
1807 target::Instructions::kPolymorphicEntryOffsetJIT);
1809 set_use_far_branches(saved_use_far_branches);
1814void Assembler::MonomorphicCheckedEntryAOT() {
1815 has_monomorphic_entry_ =
true;
1816 bool saved_use_far_branches = use_far_branches();
1817 set_use_far_branches(
false);
1819 const intptr_t
start = CodeSize();
1821 Label immediate, miss;
1823 ldr(
IP0, Address(
THR, target::Thread::switchable_call_miss_entry_offset()));
1826 Comment(
"MonomorphicCheckedEntry");
1828 target::Instructions::kMonomorphicEntryOffsetAOT);
1829 LoadClassId(
IP0,
R0);
1835 target::Instructions::kPolymorphicEntryOffsetAOT);
1837 set_use_far_branches(saved_use_far_branches);
1840void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
1841 has_monomorphic_entry_ =
true;
1842 while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
1846 while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
1860void Assembler::FinalizeHashForSize(intptr_t bit_size,
1875 andis(
hash,
hash, Immediate(Utils::NBitMask(bit_size)));
1884void Assembler::MaybeTraceAllocation(intptr_t
cid,
1890 LoadIsolateGroup(temp_reg);
1891 ldr(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
1894 target::ClassTable::allocation_tracing_state_table_offset()));
1895 LoadFromOffset(temp_reg, temp_reg,
1896 target::ClassTable::AllocationTracingStateSlotOffsetFor(
cid),
1898 cbnz(trace, temp_reg);
1906 LoadIsolateGroup(temp_reg);
1907 ldr(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
1910 target::ClassTable::allocation_tracing_state_table_offset()));
1911 AddRegisters(temp_reg,
cid);
1912 LoadFromOffset(temp_reg, temp_reg,
1913 target::ClassTable::AllocationTracingStateSlotOffsetFor(0),
1915 cbnz(trace, temp_reg);
1919void Assembler::TryAllocateObject(intptr_t
cid,
1920 intptr_t instance_size,
1925 ASSERT(failure !=
nullptr);
1926 ASSERT(instance_size != 0);
1927 ASSERT(instance_reg != temp_reg);
1929 ASSERT(Utils::IsAligned(instance_size,
1931 if (FLAG_inline_alloc &&
1938 target::Thread::end_offset());
1939 ldp(instance_reg, temp_reg,
1940 Address(
THR, target::Thread::top_offset(), Address::PairOffset));
1945 AddImmediate(instance_reg, instance_size);
1948 cmp(temp_reg, Operand(instance_reg));
1950 CheckAllocationCanary(instance_reg, temp_reg);
1954 str(instance_reg, Address(
THR, target::Thread::top_offset()));
1959 LoadImmediate(temp_reg, tags);
1960 Store(temp_reg, FieldAddress(instance_reg, target::Object::tags_offset()));
1966void Assembler::TryAllocateArray(intptr_t
cid,
1967 intptr_t instance_size,
1973 if (FLAG_inline_alloc &&
1980 ldr(
instance, Address(
THR, target::Thread::top_offset()));
1981 AddImmediateSetFlags(end_address,
instance, instance_size);
1987 ldr(temp2, Address(
THR, target::Thread::end_offset()));
1988 cmp(end_address, Operand(temp2));
1990 CheckAllocationCanary(
instance, temp2);
1994 str(end_address, Address(
THR, target::Thread::top_offset()));
2001 LoadImmediate(temp2, tags);
2002 str(temp2, FieldAddress(
instance, target::Object::tags_offset()));
2022void Assembler::GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target) {
2024 EmitUnconditionalBranchOp(
BL, 0);
2026 PcRelativeCallPattern pattern(buffer_.contents() + buffer_.Size() -
2027 PcRelativeCallPattern::kLengthInBytes);
2028 pattern.set_distance(offset_into_target);
2031void Assembler::GenerateUnRelocatedPcRelativeTailCall(
2032 intptr_t offset_into_target) {
2034 EmitUnconditionalBranchOp(
B, 0);
2035 PcRelativeTailCallPattern pattern(buffer_.contents() + buffer_.Size() -
2036 PcRelativeTailCallPattern::kLengthInBytes);
2037 pattern.set_distance(offset_into_target);
2040bool Assembler::AddressCanHoldConstantIndex(
const Object& constant,
2043 intptr_t index_scale) {
2044 if (!IsSafeSmi(constant))
return false;
2046 const int64_t
offset = index * index_scale + HeapDataOffset(is_external,
cid);
2047 if (!Utils::IsInt(32,
offset)) {
2050 return Address::CanHoldOffset(
static_cast<int32_t
>(
offset), Address::Offset,
2051 Address::OperandSizeFor(
cid));
2054Address Assembler::ElementAddressForIntIndex(
bool is_external,
2056 intptr_t index_scale,
2058 intptr_t index)
const {
2059 const int64_t
offset = index * index_scale + HeapDataOffset(is_external,
cid);
2063 return Address(array,
static_cast<int32_t
>(
offset));
2066void Assembler::ComputeElementAddressForIntIndex(
Register address,
2069 intptr_t index_scale,
2072 const int64_t
offset = index * index_scale + HeapDataOffset(is_external,
cid);
2073 AddImmediate(address, array,
offset);
2076Address Assembler::ElementAddressForRegIndex(
bool is_external,
2078 intptr_t index_scale,
2083 return ElementAddressForRegIndexWithSize(
2084 is_external,
cid, Address::OperandSizeFor(
cid), index_scale,
2085 index_unboxed, array, index, temp);
2088Address Assembler::ElementAddressForRegIndexWithSize(
bool is_external,
2091 intptr_t index_scale,
2097 const intptr_t boxing_shift = index_unboxed ? 0 : -
kSmiTagShift;
2098 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
2099 const int32_t
offset = HeapDataOffset(is_external,
cid);
2100#if !defined(DART_COMPRESSED_POINTERS)
2101 const bool index_is_32bit =
false;
2103 const bool index_is_32bit = !index_unboxed;
2107 if ((
offset == 0) && (shift == 0)) {
2108 if (index_is_32bit) {
2109 return Address(array, index,
SXTW, Address::Unscaled);
2111 return Address(array, index,
UXTX, Address::Unscaled);
2113 }
else if (shift < 0) {
2115 if (index_is_32bit) {
2117 add(temp, array, Operand(temp,
SXTW, 0));
2119 add(temp, array, Operand(index,
ASR, 1));
2122 if (index_is_32bit) {
2123 add(temp, array, Operand(index,
SXTW, shift));
2125 add(temp, array, Operand(index,
LSL, shift));
2129 return Address(temp,
offset);
2132void Assembler::ComputeElementAddressForRegIndex(
Register address,
2135 intptr_t index_scale,
2140 const intptr_t boxing_shift = index_unboxed ? 0 : -
kSmiTagShift;
2141 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
2142 const int32_t
offset = HeapDataOffset(is_external,
cid);
2143#if !defined(DART_COMPRESSED_POINTERS)
2144 const bool index_is_32bit =
false;
2146 const bool index_is_32bit = !index_unboxed;
2149 if (index_is_32bit) {
2150 add(address, array, Operand(index,
SXTW, 0));
2152 add(address, array, Operand(index));
2154 }
else if (shift < 0) {
2156 if (index_is_32bit) {
2158 add(address, array, Operand(index,
ASR, 1));
2160 add(address, array, Operand(index,
ASR, 1));
2163 if (index_is_32bit) {
2164 add(address, array, Operand(index,
SXTW, shift));
2166 add(address, array, Operand(index,
LSL, shift));
2170 AddImmediate(address,
offset);
2174void Assembler::LoadStaticFieldAddress(
Register address,
2178 LoadCompressedSmiFieldFromOffset(
2179 scratch, field, target::Field::host_offset_or_field_id_offset());
2180 const intptr_t field_table_offset =
2181 is_shared ? compiler::target::Thread::shared_field_table_values_offset()
2183 LoadMemoryValue(address,
THR,
static_cast<int32_t
>(field_table_offset));
2184 add(address, address,
2188#if defined(DART_COMPRESSED_POINTERS)
2189void Assembler::LoadCompressedFieldAddressForRegOffset(
2192 Register offset_in_compressed_words_as_smi) {
2194 Operand(offset_in_compressed_words_as_smi,
LSL,
2200void Assembler::LoadFieldAddressForRegOffset(
Register address,
2204 Operand(offset_in_words_as_smi,
LSL,
2209void Assembler::PushRegisters(
const RegisterSet& regs) {
2215 if (regs.ContainsFpuRegister(fpu_reg)) {
2217 PushQuadPair(fpu_reg, vprev);
2233 if (regs.ContainsRegister(reg)) {
2235 PushPair(reg,
prev);
2247void Assembler::PopRegisters(
const RegisterSet& regs) {
2248 bool pop_single = (regs.CpuRegisterCount() & 1) == 1;
2252 if (regs.ContainsRegister(reg)) {
2268 pop_single = (regs.FpuRegisterCount() & 1) == 1;
2273 if (regs.ContainsFpuRegister(fpu_reg)) {
2278 PopQuadPair(vprev, fpu_reg);
2288void Assembler::PushRegistersInOrder(std::initializer_list<Register> regs) {
2293 PushPair(reg, pending_reg);
2304void Assembler::PushNativeCalleeSavedRegisters() {
2329 PushDoublePair(r, vprev);
2340void Assembler::PopNativeCalleeSavedRegisters() {
2350 PopDoublePair(vprev, r);
2396void Assembler::GenerateCbzTbz(
Register rn,
2401 const int32_t sign_bit = sz ==
kEightBytes ? 63 : 31;
2408 cbnz(label, rn, sz);
2412 tbnz(label, rn, sign_bit);
2416 tbz(label, rn, sign_bit);
2428 RangeCheckCondition condition,
2430 auto cc = condition == kIfInRange ?
LS :
HI;
2432 AddImmediate(to_check,
value, -low);
2433 CompareImmediate(to_check, high - low);
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
static float next(float f)
static float prev(float f)
static bool ok(int result)
static uint32_t hash(const SkShaderBase::GradientInfo &v)
bool equals(SkDrawable *a, SkDrawable *b)
#define DEBUG_ASSERT(cond)
#define ASSERT_EQUAL(expected, actual)
#define RELEASE_ASSERT(cond)
#define COMPILE_ASSERT(expr)
static OperandSize OperandSizeFor(intptr_t cid)
Assembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level=0)
static float Scaled(float time, float speed, float period=0)
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
static constexpr intptr_t kCompressedWordSizeLog2
bool CanLoadFromThread(const dart::Object &object, intptr_t *offset)
word ToRawSmi(const dart::Object &a)
word SmiValue(const dart::Object &a)
bool IsOriginalObject(const Object &object)
InvalidClass kObjectAlignment
InvalidClass kObjectAlignmentLog2
const Bool & TrueObject()
bool IsInOldSpace(const Object &obj)
bool IsSameObject(const Object &a, const Object &b)
const Bool & FalseObject()
const Object & NullObject()
const Object & ToObject(const Code &handle)
static constexpr int HeaderSize
const QRegister kAbiLastPreservedFpuReg
constexpr int64_t kMinInt64
const int kXRegSizeInBits
const Register kWriteBarrierSlotReg
static Condition InvertCondition(Condition c)
static constexpr intptr_t kFalseOffsetFromNull
const Register kWriteBarrierObjectReg
const RegList kAllFpuRegistersList
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
static constexpr intptr_t kTrueOffsetFromNull
constexpr intptr_t kWordSizeLog2
constexpr intptr_t kBitsPerInt16
const Register DISPATCH_TABLE_REG
bool IsAllocatableInNewSpace(intptr_t size)
const Register kAbiLastPreservedCpuReg
const int kAbiPreservedCpuRegCount
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const RegList kDartVolatileCpuRegs
const Register kAbiFirstPreservedCpuReg
constexpr intptr_t kBitsPerInt32
const int kAbiPreservedFpuRegCount
const int kWRegSizeInBits
constexpr intptr_t kWordSize
constexpr intptr_t kBitsPerInt64
constexpr intptr_t kBitsPerInt8
const int kFpuRegisterSize
DECLARE_FLAG(bool, show_invisible_frames)
const QRegister kAbiFirstPreservedFpuReg
static void RoundUp(Vector< char > buffer, int *length, int *decimal_point)
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
static size_t bytes_needed(int vertex_count, Flags flags, int index_count)
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
static compiler::OperandSize OperandSize(Representation rep)
static Representation RepresentationOfArrayElement(classid_t cid)
#define NOT_IN_PRODUCT(code)