6#if defined(TARGET_ARCH_ARM)
8#define SHOULD_NOT_INCLUDE_RUNTIME
18#if !defined(USING_SIMULATOR) && !defined(__linux__) && !defined(ANDROID) && \
19 !defined(DART_HOST_OS_IOS) && !defined(DART_HOST_OS_MACOS)
20#error ARM cross-compile only supported on Linux, Android, iOS, and Mac
34 intptr_t far_branch_level)
35 : AssemblerBase(object_pool_builder),
36 use_far_branches_(far_branch_level != 0),
37 constant_pool_allowed_(
false) {
40 Address(
THR, target::Thread::write_barrier_wrappers_thread_offset(reg)),
43 generate_invoke_array_write_barrier_ = [&](
Condition cond) {
44 Call(Address(
THR, target::Thread::array_write_barrier_entry_point_offset()),
49uint32_t Address::encoding3()
const {
50 if (kind_ == Immediate) {
56 ASSERT(kind_ == IndexRegister);
60uint32_t Address::vencoding()
const {
61 ASSERT(kind_ == Immediate);
65 int mode = encoding_ & ((8 | 4 | 1) << 21);
67 uint32_t vencoding = (encoding_ & (0xf <<
kRnShift)) | (
offset >> 2);
74void Assembler::Emit(int32_t
value) {
75 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
76 buffer_.Emit<int32_t>(
value);
91 ArmEncode::Rn(rn) | ArmEncode::Rd(rd) | o.encoding();
99 BailoutIfInvalidBranchOffset(
offset);
100 Emit(Assembler::EncodeBranchOffset(
offset, encoding));
111 ASSERT(!ad.has_writeback() || (ad.rn() != rd));
114 (ad.kind() == Address::Immediate ? 0 :
B25) |
115 (
load ?
L : 0) | (
byte ?
B : 0) | ArmEncode::Rd(rd) |
120void Assembler::EmitMemOpAddressMode3(
Condition cond,
127 ASSERT(!ad.has_writeback() || (ad.rn() != rd));
130 ArmEncode::Rd(rd) | ad.encoding3();
134void Assembler::EmitMultiMemOp(
Condition cond,
142 ASSERT(!Address::has_writeback(am) || !(regs & (1 <<
base)));
144 am | (
load ?
L : 0) | ArmEncode::Rn(
base) | regs;
148void Assembler::EmitShiftImmediate(
Condition cond,
159 static_cast<int32_t
>(rm);
163void Assembler::EmitShiftRegister(
Condition cond,
174 static_cast<int32_t
>(rm);
179 EmitType01(cond, o.type(),
AND, 0, rn, rd, o);
183 EmitType01(cond, o.type(),
AND, 1, rn, rd, o);
187 EmitType01(cond, o.type(),
EOR, 0, rn, rd, o);
191 EmitType01(cond, o.type(),
SUB, 0, rn, rd, o);
195 EmitType01(cond, o.type(),
RSB, 0, rn, rd, o);
199 EmitType01(cond, o.type(),
RSB, 1, rn, rd, o);
203 EmitType01(cond, o.type(),
ADD, 0, rn, rd, o);
207 EmitType01(cond, o.type(),
ADD, 1, rn, rd, o);
211 EmitType01(cond, o.type(),
SUB, 1, rn, rd, o);
215 EmitType01(cond, o.type(),
ADC, 0, rn, rd, o);
219 EmitType01(cond, o.type(),
ADC, 1, rn, rd, o);
223 EmitType01(cond, o.type(),
SBC, 0, rn, rd, o);
227 EmitType01(cond, o.type(),
SBC, 1, rn, rd, o);
231 EmitType01(cond, o.type(),
RSC, 0, rn, rd, o);
235 EmitType01(cond, o.type(),
TST, 1, rn,
R0, o);
239 EmitType01(cond, o.type(),
TEQ, 1, rn,
R0, o);
243 EmitType01(cond, o.type(),
CMP, 1, rn,
R0, o);
247 EmitType01(cond, o.type(),
CMN, 1, rn,
R0, o);
251 EmitType01(cond, o.type(),
ORR, 0, rn, rd, o);
255 EmitType01(cond, o.type(),
ORR, 1, rn, rd, o);
259 EmitType01(cond, o.type(),
MOV, 0,
R0, rd, o);
263 EmitType01(cond, o.type(),
MOV, 1,
R0, rd, o);
267 EmitType01(cond, o.type(),
BIC, 0, rn, rd, o);
271 EmitType01(cond, o.type(),
BIC, 1, rn, rd, o);
275 EmitType01(cond, o.type(),
MVN, 0,
R0, rd, o);
279 EmitType01(cond, o.type(),
MVN, 1,
R0, rd, o);
289 B22 |
B21 | (0xf << 16) | ArmEncode::Rd(rd) | (0xf << 8) |
290 B4 |
static_cast<int32_t
>(rm);
302 ArmEncode::Rd(rd) | (0xf << 8) |
B5 |
B4 |
303 static_cast<int32_t
>(rm);
310 ((imm16 >> 12) << 16) | ArmEncode::Rd(rd) |
318 B22 | ((imm16 >> 12) << 16) | ArmEncode::Rd(rd) |
334 int32_t encoding = opcode | (
static_cast<int32_t
>(cond) <<
kConditionShift) |
335 ArmEncode::Rn(rn) | ArmEncode::Rd(rd) | ArmEncode::Rs(rs) |
336 B7 |
B4 | ArmEncode::Rm(rm);
342 EmitMulOp(cond, 0,
R0, rd, rn, rm);
347 EmitMulOp(cond,
B20,
R0, rd, rn, rm);
357 EmitMulOp(cond,
B21, ra, rd, rn, rm);
367 EmitMulOp(cond,
B22 |
B21, ra, rd, rn, rm);
370void Assembler::smull(
Register rd_lo,
376 EmitMulOp(cond,
B23 |
B22, rd_lo, rd_hi, rn, rm);
379void Assembler::umull(
Register rd_lo,
385 EmitMulOp(cond,
B23, rd_lo, rd_hi, rn, rm);
388void Assembler::umlal(
Register rd_lo,
394 EmitMulOp(cond,
B23 |
B21, rd_lo, rd_hi, rn, rm);
397void Assembler::umaal(
Register rd_lo,
406 EmitMulOp(
AL,
B22, rd_lo, rd_hi, rn, rm);
414 ASSERT(TargetCPUFeatures::integer_division_supported());
419 int32_t encoding = opcode | (
static_cast<int32_t
>(cond) <<
kConditionShift) |
428 EmitDivOp(cond, 0, rd, rn, rm);
432 EmitDivOp(cond,
B21, rd, rn, rm);
436 EmitMemOp(cond,
true,
false, rd, ad);
440 EmitMemOp(cond,
false,
false, rd, ad);
444 EmitMemOp(cond,
true,
true, rd, ad);
448 EmitMemOp(cond,
false,
true, rd, ad);
452 EmitMemOpAddressMode3(cond,
L |
B7 |
H |
B4, rd, ad);
456 EmitMemOpAddressMode3(cond,
B7 |
H |
B4, rd, ad);
460 EmitMemOpAddressMode3(cond,
L |
B7 |
B6 |
B4, rd, ad);
464 EmitMemOpAddressMode3(cond,
L |
B7 |
B6 |
H |
B4, rd, ad);
474 EmitMemOpAddressMode3(cond,
B7 |
B6 |
B4, rd, Address(rn,
offset));
484 EmitMemOpAddressMode3(cond,
B7 |
B6 |
B5 |
B4, rd, Address(rn,
offset));
492 EmitMultiMemOp(cond, am,
true,
base, regs);
500 EmitMultiMemOp(cond, am,
false,
base, regs);
536void Assembler::dmb() {
541static int32_t BitFieldExtractEncoding(
bool sign_extend,
553 const int32_t widthm1 =
width - 1;
558 (
static_cast<int32_t
>(rd) <<
kRdShift) |
584 Label slow_path,
done, retry;
585 if (FLAG_use_slow_path) {
589 LoadImmediate(
addr, target::Thread::safepoint_state_offset());
593 cmp(
state, Operand(target::Thread::full_safepoint_state_unacquired()));
596 mov(
state, Operand(target::Thread::full_safepoint_state_acquired()));
598 cmp(
TMP, Operand(0));
601 if (!FLAG_use_slow_path) {
606 ldr(
TMP, Address(
THR, target::Thread::enter_safepoint_stub_offset()));
607 ldr(
TMP, FieldAddress(
TMP, target::Code::entry_point_offset()));
613void Assembler::TransitionGeneratedToNative(
Register destination_address,
617 bool enter_safepoint) {
619 StoreToOffset(exit_frame_fp,
THR,
620 target::Thread::top_exit_frame_info_offset());
622 StoreToOffset(exit_through_ffi,
THR,
623 target::Thread::exit_through_ffi_offset());
627 StoreToOffset(destination_address,
THR, target::Thread::vm_tag_offset());
628 LoadImmediate(tmp1, target::Thread::native_execution_state());
629 StoreToOffset(tmp1,
THR, target::Thread::execution_state_offset());
631 if (enter_safepoint) {
632 EnterFullSafepoint(tmp1, tmp2);
636void Assembler::ExitFullSafepoint(
Register tmp1,
638 bool ignore_unwind_in_progress) {
644 Label slow_path,
done, retry;
645 if (FLAG_use_slow_path) {
649 LoadImmediate(
addr, target::Thread::safepoint_state_offset());
653 cmp(
state, Operand(target::Thread::full_safepoint_state_acquired()));
656 mov(
state, Operand(target::Thread::full_safepoint_state_unacquired()));
658 cmp(
TMP, Operand(0));
661 if (!FLAG_use_slow_path) {
666 if (ignore_unwind_in_progress) {
670 exit_safepoint_ignore_unwind_in_progress_stub_offset()));
672 ldr(
TMP, Address(
THR, target::Thread::exit_safepoint_stub_offset()));
674 ldr(
TMP, FieldAddress(
TMP, target::Code::entry_point_offset()));
680void Assembler::TransitionNativeToGenerated(
Register addr,
683 bool ignore_unwind_in_progress,
685 if (exit_safepoint) {
686 ExitFullSafepoint(
addr,
state, ignore_unwind_in_progress);
689 ASSERT(!ignore_unwind_in_progress);
692 ASSERT(target::Thread::full_safepoint_state_acquired() != 0);
693 LoadImmediate(
state, target::Thread::full_safepoint_state_acquired());
694 ldr(
TMP, Address(
THR, target::Thread::safepoint_state_offset()));
705 LoadImmediate(
state, target::Thread::vm_tag_dart_id());
706 StoreToOffset(
state,
THR, target::Thread::vm_tag_offset());
708 LoadImmediate(
state, target::Thread::generated_execution_state());
709 StoreToOffset(
state,
THR, target::Thread::execution_state_offset());
712 LoadImmediate(
state, 0);
713 StoreToOffset(
state,
THR, target::Thread::top_exit_frame_info_offset());
714 StoreToOffset(
state,
THR, target::Thread::exit_through_ffi_offset());
717void Assembler::clrex() {
719 B21 |
B20 | (0xff << 12) |
B4 | 0xf;
737 B26 |
B25 | ((
static_cast<int32_t
>(sn) >> 1) *
B16) |
738 (
static_cast<int32_t
>(rt) *
B12) |
B11 |
B9 |
739 ((
static_cast<int32_t
>(sn) & 1) *
B7) |
B4;
750 B26 |
B25 |
B20 | ((
static_cast<int32_t
>(sn) >> 1) *
B16) |
751 (
static_cast<int32_t
>(rt) *
B12) |
B11 |
B9 |
752 ((
static_cast<int32_t
>(sn) & 1) *
B7) |
B4;
770 B26 |
B22 | (
static_cast<int32_t
>(rt2) *
B16) |
771 (
static_cast<int32_t
>(rt) *
B12) |
B11 |
B9 |
772 ((
static_cast<int32_t
>(sm) & 1) *
B5) |
B4 |
773 (
static_cast<int32_t
>(sm) >> 1);
793 (
static_cast<int32_t
>(rt) *
B12) |
B11 |
B9 |
794 ((
static_cast<int32_t
>(sm) & 1) *
B5) |
B4 |
795 (
static_cast<int32_t
>(sm) >> 1);
808 B11 |
B9 |
B8 | ((
static_cast<int32_t
>(dn) >> 4) *
B7) |
809 ((
static_cast<int32_t
>(dn) & 0xf) *
B16) |
B4;
826 B26 |
B22 | (
static_cast<int32_t
>(rt2) *
B16) |
827 (
static_cast<int32_t
>(rt) *
B12) |
B11 |
B9 |
B8 |
828 ((
static_cast<int32_t
>(dm) >> 4) *
B5) |
B4 |
829 (
static_cast<int32_t
>(dm) & 0xf);
848 (
static_cast<int32_t
>(rt) *
B12) |
B11 |
B9 |
B8 |
849 ((
static_cast<int32_t
>(dm) >> 4) *
B5) |
B4 |
850 (
static_cast<int32_t
>(dm) & 0xf);
858 B26 |
B24 |
B20 | ((
static_cast<int32_t
>(sd) & 1) *
B22) |
859 ((
static_cast<int32_t
>(sd) >> 1) *
B12) |
B11 |
B9 |
869 B26 |
B24 | ((
static_cast<int32_t
>(sd) & 1) *
B22) |
870 ((
static_cast<int32_t
>(sd) >> 1) *
B12) |
B11 |
B9 |
879 B26 |
B24 |
B20 | ((
static_cast<int32_t
>(dd) >> 4) *
B22) |
880 ((
static_cast<int32_t
>(dd) & 0xf) *
B12) |
B11 |
B9 |
B8 |
890 B26 |
B24 | ((
static_cast<int32_t
>(dd) >> 4) *
B22) |
891 ((
static_cast<int32_t
>(dd) & 0xf) *
B12) |
B11 |
B9 |
B8 |
896void Assembler::EmitMultiVSMemOp(
Condition cond,
909 ArmEncode::Rn(
base) |
910 ((
static_cast<int32_t
>(
start) & 0x1) != 0 ?
D : 0) |
911 ((
static_cast<int32_t
>(
start) >> 1) << 12) |
count;
915void Assembler::EmitMultiVDMemOp(
Condition cond,
925 const int notArmv5te = 0;
930 ((
static_cast<int32_t
>(
start) & 0x10) != 0 ?
D : 0) |
931 ((
static_cast<int32_t
>(
start) & 0xf) << 12) | (
count << 1) | notArmv5te;
942 EmitMultiVSMemOp(cond, am,
true,
base, first, last - first + 1);
952 EmitMultiVSMemOp(cond, am,
false,
base, first, last - first + 1);
963 EmitMultiVDMemOp(cond, am,
true,
base, first,
count);
974 EmitMultiVDMemOp(cond, am,
false,
base, first,
count);
977void Assembler::EmitVFPsss(
Condition cond,
988 B9 | opcode | ((
static_cast<int32_t
>(sd) & 1) *
B22) |
989 ((
static_cast<int32_t
>(sn) >> 1) *
B16) |
990 ((
static_cast<int32_t
>(sd) >> 1) *
B12) |
991 ((
static_cast<int32_t
>(sn) & 1) *
B7) |
992 ((
static_cast<int32_t
>(sm) & 1) *
B5) | (
static_cast<int32_t
>(sm) >> 1);
996void Assembler::EmitVFPddd(
Condition cond,
1007 B9 |
B8 | opcode | ((
static_cast<int32_t
>(dd) >> 4) *
B22) |
1008 ((
static_cast<int32_t
>(dn) & 0xf) *
B16) |
1009 ((
static_cast<int32_t
>(dd) & 0xf) *
B12) |
1010 ((
static_cast<int32_t
>(dn) >> 4) *
B7) |
1011 ((
static_cast<int32_t
>(dm) >> 4) *
B5) | (
static_cast<int32_t
>(dm) & 0xf);
1024 uint32_t imm32 = bit_cast<uint32_t, float>(s_imm);
1025 if (((imm32 & ((1 << 19) - 1)) == 0) &&
1026 ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) ||
1027 (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) - 1)))) {
1028 uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) |
1029 ((imm32 >> 19) & ((1 << 6) - 1));
1030 EmitVFPsss(cond,
B23 |
B21 |
B20 | ((imm8 >> 4) *
B16) | (imm8 & 0xf), sd,
1038 uint64_t imm64 = bit_cast<uint64_t, double>(d_imm);
1039 if (((imm64 & ((1LL << 48) - 1)) == 0) &&
1040 ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) ||
1041 (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) - 1)))) {
1042 uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) |
1043 ((imm64 >> 48) & ((1 << 6) - 1));
1044 EmitVFPddd(cond,
B23 |
B21 |
B20 | ((imm8 >> 4) *
B16) |
B8 | (imm8 & 0xf),
1055 EmitVFPsss(cond,
B21 |
B20, sd, sn, sm);
1062 EmitVFPddd(cond,
B21 |
B20, dd, dn, dm);
1069 EmitVFPsss(cond,
B21 |
B20 |
B6, sd, sn, sm);
1076 EmitVFPddd(cond,
B21 |
B20 |
B6, dd, dn, dm);
1083 EmitVFPsss(cond,
B21, sd, sn, sm);
1090 EmitVFPddd(cond,
B21, dd, dn, dm);
1097 EmitVFPsss(cond, 0, sd, sn, sm);
1104 EmitVFPddd(cond, 0, dd, dn, dm);
1111 EmitVFPsss(cond,
B6, sd, sn, sm);
1118 EmitVFPddd(cond,
B6, dd, dn, dm);
1125 EmitVFPsss(cond,
B23, sd, sn, sm);
1132 EmitVFPddd(cond,
B23, dd, dn, dm);
1159void Assembler::EmitVFPsd(
Condition cond,
1168 B9 | opcode | ((
static_cast<int32_t
>(sd) & 1) *
B22) |
1169 ((
static_cast<int32_t
>(sd) >> 1) *
B12) |
1170 ((
static_cast<int32_t
>(dm) >> 4) *
B5) | (
static_cast<int32_t
>(dm) & 0xf);
1174void Assembler::EmitVFPds(
Condition cond,
1183 B9 | opcode | ((
static_cast<int32_t
>(dd) >> 4) *
B22) |
1184 ((
static_cast<int32_t
>(dd) & 0xf) *
B12) |
1185 ((
static_cast<int32_t
>(sm) & 1) *
B5) | (
static_cast<int32_t
>(sm) >> 1);
1249 (
static_cast<int32_t
>(rd) *
B12) |
B11 |
B9 |
B4;
1282void Assembler::EmitSIMDqqq(int32_t opcode,
1287 ASSERT(TargetCPUFeatures::neon_supported());
1288 int sz = ShiftOfOperandSize(
size);
1291 opcode | ((sz & 0x3) *
B20) |
1292 ((
static_cast<int32_t
>(qd * 2) >> 4) *
B22) |
1293 ((
static_cast<int32_t
>(qn * 2) & 0xf) *
B16) |
1294 ((
static_cast<int32_t
>(qd * 2) & 0xf) *
B12) |
1295 ((
static_cast<int32_t
>(qn * 2) >> 4) *
B7) |
1296 ((
static_cast<int32_t
>(qm * 2) >> 4) *
B5) |
1297 (
static_cast<int32_t
>(qm * 2) & 0xf);
1301void Assembler::EmitSIMDddd(int32_t opcode,
1306 ASSERT(TargetCPUFeatures::neon_supported());
1307 int sz = ShiftOfOperandSize(
size);
1310 opcode | ((sz & 0x3) *
B20) | ((
static_cast<int32_t
>(dd) >> 4) *
B22) |
1311 ((
static_cast<int32_t
>(dn) & 0xf) *
B16) |
1312 ((
static_cast<int32_t
>(dd) & 0xf) *
B12) |
1313 ((
static_cast<int32_t
>(dn) >> 4) *
B7) |
1314 ((
static_cast<int32_t
>(dm) >> 4) *
B5) | (
static_cast<int32_t
>(dm) & 0xf);
1326 EmitSIMDqqq(
B11, sz, qd, qn, qm);
1337 EmitSIMDqqq(
B24 |
B11, sz, qd, qn, qm);
1348 EmitSIMDqqq(
B11 |
B8 |
B4, sz, qd, qn, qm);
1359 EmitSIMDqqq(
B25 |
B10, sz, qd, qn, qm);
1366 EmitSIMDqqq(
B25 |
B24 |
B10, sz, qd, qn, qm);
1382 EmitSIMDqqq(
B8 |
B4,
kByte, qd, qn, qm);
1432 ASSERT((idx >= 0) && (idx < 8));
1433 code = 1 | (idx << 1);
1438 ASSERT((idx >= 0) && (idx < 4));
1439 code = 2 | (idx << 2);
1444 ASSERT((idx >= 0) && (idx < 2));
1445 code = 4 | (idx << 3);
1471 EmitSIMDqqq(
B24 |
B11 |
B4, sz, qd, qn, qm);
1482 EmitSIMDqqq(
B9 |
B8 |
B4, sz, qd, qn, qm);
1489 EmitSIMDqqq(
B24 |
B9 |
B8 |
B4, sz, qd, qn, qm);
1500 EmitSIMDqqq(
B9 |
B8, sz, qd, qn, qm);
1507 EmitSIMDqqq(
B24 |
B9 |
B8, sz, qd, qn, qm);
1514void Assembler::bkpt(uint16_t imm16) {
1515 Emit(BkptEncoding(imm16));
1519 EmitBranch(cond, label,
false);
1522void Assembler::bl(Label* label,
Condition cond) {
1523 EmitBranch(cond, label,
true);
1530 B21 | (0xfff << 8) |
B4 | ArmEncode::Rm(rm);
1538 B21 | (0xfff << 8) |
B5 |
B4 | ArmEncode::Rm(rm);
1542void Assembler::MarkExceptionHandler(Label* label) {
1543 EmitType01(
AL, 1,
TST, 1,
PC,
R0, Operand(0));
1546 EmitBranch(
AL, label,
false);
1550void Assembler::Drop(intptr_t stack_elements) {
1551 ASSERT(stack_elements >= 0);
1552 if (stack_elements > 0) {
1558void Assembler::LoadWordFromPoolIndex(
Register rd,
1562 ASSERT((pp !=
PP) || constant_pool_allowed());
1567 int32_t offset_mask = 0;
1569 ldr(rd, Address(pp,
offset), cond);
1571 int32_t offset_hi =
offset & ~offset_mask;
1572 uint32_t offset_lo =
offset & offset_mask;
1575 if (Operand::CanHold(offset_hi, &o)) {
1576 add(rd, pp, o, cond);
1578 LoadImmediate(rd, offset_hi, cond);
1579 add(rd, pp, Operand(rd), cond);
1581 ldr(rd, Address(rd, offset_lo), cond);
1589 ASSERT((pp !=
PP) || constant_pool_allowed());
1594 int32_t offset_mask = 0;
1598 int32_t offset_hi =
offset & ~offset_mask;
1599 uint32_t offset_lo =
offset & offset_mask;
1602 if (Operand::CanHold(offset_hi, &o)) {
1603 add(
TMP, pp, o, cond);
1605 LoadImmediate(
TMP, offset_hi, cond);
1606 add(
TMP, pp, Operand(
TMP), cond);
1608 str(
value, Address(
TMP, offset_lo), cond);
1612void Assembler::CheckCodePointer() {
1614 if (!FLAG_check_code_pointer) {
1617 Comment(
"CheckCodePointer");
1618 Label cid_ok, instructions_ok;
1626 const intptr_t
offset = CodeSize() + Instr::kPCReadOffset +
1628 mov(
R0, Operand(
PC));
1630 ldr(
IP, FieldAddress(
CODE_REG, target::Code::instructions_offset()));
1631 cmp(
R0, Operand(
IP));
1632 b(&instructions_ok,
EQ);
1634 Bind(&instructions_ok);
1640void Assembler::RestoreCodePointer() {
1646void Assembler::LoadPoolPointer(
Register reg) {
1649 ldr(reg, FieldAddress(
CODE_REG, target::Code::object_pool_offset()));
1650 set_constant_pool_allowed(reg ==
PP);
1653void Assembler::SetupGlobalPoolAndDispatchTable() {
1654 ASSERT(FLAG_precompiled_mode);
1655 ldr(
PP, Address(
THR, target::Thread::global_object_pool_offset()));
1657 Address(
THR, target::Thread::dispatch_table_array_offset()));
1660void Assembler::LoadIsolate(
Register rd) {
1661 ldr(rd, Address(
THR, target::Thread::isolate_offset()));
1664void Assembler::LoadIsolateGroup(
Register rd) {
1665 ldr(rd, Address(
THR, target::Thread::isolate_group_offset()));
1668bool Assembler::CanLoadFromObjectPool(
const Object&
object)
const {
1670 if (!constant_pool_allowed()) {
1679void Assembler::LoadObjectHelper(
1681 const Object&
object,
1685 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
1707 ? object_pool_builder().AddObject(
1708 object, ObjectPoolBuilderEntry::kPatchable, snapshot_behavior)
1709 : object_pool_builder().FindObject(
1710 object, ObjectPoolBuilderEntry::kNotPatchable,
1712 LoadWordFromPoolIndex(rd, index, pp, cond);
1716 LoadObjectHelper(rd,
object, cond,
false,
PP);
1719void Assembler::LoadUniqueObject(
1721 const Object&
object,
1723 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
1724 LoadObjectHelper(rd,
object, cond,
true,
PP,
1728void Assembler::LoadNativeEntry(
Register rd,
1729 const ExternalLabel* label,
1730 ObjectPoolBuilderEntry::Patchability patchable,
1732 const intptr_t index =
1733 object_pool_builder().FindNativeFunction(label, patchable);
1734 LoadWordFromPoolIndex(rd, index,
PP, cond);
1737void Assembler::PushObject(
const Object&
object) {
1739 LoadObject(
IP,
object);
1743void Assembler::CompareObject(
Register rn,
const Object&
object) {
1749 LoadObject(
IP,
object);
1750 cmp(rn, Operand(
IP));
1759 ASSERT((*used & (1 << reg)) == 0);
1760 *used |= (1 << reg);
1769 static_cast<Register>(Utils::CountTrailingZerosWord(free)),
1773void Assembler::StoreBarrier(
Register object,
1775 CanBeSmi can_be_smi,
1781 ASSERT(
object != scratch);
1793 if (can_be_smi == kValueCanBeSmi) {
1798 BranchIfNotSmi(
value, &passed_check, kNearJump);
1800 Bind(&passed_check);
1803 const bool preserve_lr = lr_state().LRContainsReturnAddress();
1805 SPILLS_LR_TO_FRAME(Push(
LR));
1808 ldrb(scratch, FieldAddress(
object, target::Object::tags_offset()));
1809 ldrb(
LR, FieldAddress(
value, target::Object::tags_offset()));
1811 Operand(scratch,
LSR, target::UntaggedObject::kBarrierOverlapShift));
1812 ldr(
LR, Address(
THR, target::Thread::write_barrier_mask_offset()));
1813 tst(scratch, Operand(
LR));
1818 Label restore_and_done;
1819 b(&restore_and_done,
ZERO);
1828 mov(objectForCall, Operand(
object));
1831 generate_invoke_write_barrier_wrapper_(
AL, objectForCall);
1838 Bind(&restore_and_done);
1840 generate_invoke_write_barrier_wrapper_(
NE,
object);
1843 RESTORES_LR_FROM_FRAME(Pop(
LR));
1848void Assembler::ArrayStoreBarrier(
Register object,
1851 CanBeSmi can_be_smi,
1856 ASSERT(
object != scratch);
1869 if (can_be_smi == kValueCanBeSmi) {
1874 BranchIfNotSmi(
value, &passed_check, kNearJump);
1876 Bind(&passed_check);
1879 const bool preserve_lr = lr_state().LRContainsReturnAddress();
1881 SPILLS_LR_TO_FRAME(Push(
LR));
1885 ldrb(scratch, FieldAddress(
object, target::Object::tags_offset()));
1886 ldrb(
LR, FieldAddress(
value, target::Object::tags_offset()));
1888 Operand(scratch,
LSR, target::UntaggedObject::kBarrierOverlapShift));
1889 ldr(
LR, Address(
THR, target::Thread::write_barrier_mask_offset()));
1890 tst(scratch, Operand(
LR));
1900 generate_invoke_array_write_barrier_(
NE);
1902 RESTORES_LR_FROM_FRAME(Pop(
LR));
1907void Assembler::StoreObjectIntoObjectNoBarrier(
Register object,
1908 const Address&
dest,
1909 const Object&
value,
1910 MemoryOrder memory_order,
1915 int32_t ignored = 0;
1917 if (!Address::CanHoldStoreOffset(
size,
dest.offset(), &ignored)) {
1927 LoadObject(scratch,
value);
1928 if (memory_order == kRelease) {
1929 StoreRelease(scratch,
dest);
1931 Store(scratch,
dest);
1933 if (scratch !=
TMP) {
1938void Assembler::VerifyStoreNeedsNoWriteBarrier(
Register object,
1947 ldrb(
TMP, FieldAddress(
value, target::Object::tags_offset()));
1948 tst(
TMP, Operand(1 << target::UntaggedObject::kNewOrEvacuationCandidateBit));
1950 ldrb(
TMP, FieldAddress(
object, target::Object::tags_offset()));
1951 tst(
TMP, Operand(1 << target::UntaggedObject::kOldAndNotRememberedBit));
1953 Stop(
"Write barrier is required");
1957void Assembler::StoreInternalPointer(
Register object,
1958 const Address&
dest,
1963void Assembler::InitializeFieldsNoBarrier(
Register object,
1968 ASSERT(value_odd == value_even + 1);
1978void Assembler::InitializeFieldsNoBarrierUnrolled(
Register object,
1980 intptr_t begin_offset,
1981 intptr_t end_offset,
1984 ASSERT(value_odd == value_even + 1);
1985 intptr_t current_offset = begin_offset;
1987 strd(value_even, value_odd,
base, current_offset);
1990 while (current_offset < end_offset) {
1991 str(value_even, Address(
base, current_offset));
2001 Stop(
"New value must be Smi.");
2010 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
2011 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
2012 ubfx(
result, tags, target::UntaggedObject::kClassIdTagPos,
2013 target::UntaggedObject::kClassIdTagSize, cond);
2017 ASSERT(target::UntaggedObject::kSizeTagPos == 8);
2018 ASSERT(target::UntaggedObject::kSizeTagSize == 4);
2020 Operand(target::UntaggedObject::kSizeTagPos -
2024 (Utils::NBitMask(target::UntaggedObject::kSizeTagSize)
2029 ldr(
result, FieldAddress(
object, target::Object::tags_offset()), cond);
2036 const intptr_t table_offset =
2037 target::IsolateGroup::cached_class_table_table_offset();
2039 LoadIsolateGroup(
result);
2044void Assembler::CompareClassId(
Register object,
2047 LoadClassId(scratch,
object);
2048 CompareImmediate(scratch, class_id);
2054 LoadImmediate(
result, kSmiCid,
EQ);
2058 LoadClassIdMayBeSmi(
result,
object);
2062void Assembler::EnsureHasClassIdInDEBUG(intptr_t
cid,
2067 Comment(
"Check that object in register has cid %" Pd "",
cid);
2069 LoadClassIdMayBeSmi(scratch,
src);
2070 CompareImmediate(scratch,
cid);
2073 CompareImmediate(scratch,
kNullCid);
2081void Assembler::BailoutIfInvalidBranchOffset(int32_t
offset) {
2082 if (!CanEncodeBranchDistance(
offset)) {
2083 ASSERT(!use_far_branches());
2088int32_t Assembler::EncodeBranchOffset(int32_t
offset, int32_t
inst) {
2090 offset -= Instr::kPCReadOffset;
2098int Assembler::DecodeBranchOffset(int32_t
inst) {
2103static int32_t DecodeARMv7LoadImmediate(int32_t movt, int32_t movw) {
2105 offset |= (movt & 0xf0000) << 12;
2106 offset |= (movt & 0xfff) << 16;
2107 offset |= (movw & 0xf0000) >> 4;
2112class PatchFarBranch :
public AssemblerFixup {
2116 void Process(
const MemoryRegion&
region, intptr_t position) {
2117 ProcessARMv7(
region, position);
2121 void ProcessARMv7(
const MemoryRegion&
region, intptr_t position) {
2122 const int32_t movw =
region.Load<int32_t>(position);
2123 const int32_t movt =
region.Load<int32_t>(position + Instr::kInstrSize);
2124 const int32_t bx =
region.Load<int32_t>(position + 2 * Instr::kInstrSize);
2126 if (((movt & 0xfff0f000) == 0xe340c000) &&
2127 ((movw & 0xfff0f000) == 0xe300c000)) {
2128 const int32_t
offset = DecodeARMv7LoadImmediate(movt, movw);
2130 const uint16_t dest_high = Utils::High16Bits(
dest);
2131 const uint16_t dest_low = Utils::Low16Bits(
dest);
2132 const int32_t patched_movt =
2133 0xe340c000 | ((dest_high >> 12) << 16) | (dest_high & 0xfff);
2134 const int32_t patched_movw =
2135 0xe300c000 | ((dest_low >> 12) << 16) | (dest_low & 0xfff);
2137 region.Store<int32_t>(position, patched_movw);
2138 region.Store<int32_t>(position + Instr::kInstrSize, patched_movt);
2145 ASSERT((movt == Instr::kNopInstruction) && (bx == Instr::kNopInstruction));
2148 virtual bool IsPointerOffset()
const {
return false; }
2152 buffer_.EmitFixup(
new PatchFarBranch());
2153 LoadPatchableImmediate(
IP,
offset);
2161void Assembler::EmitBranch(
Condition cond, Label* label,
bool link) {
2162 if (label->IsBound()) {
2163 const int32_t
dest = label->Position() - buffer_.Size();
2164 if (use_far_branches() && !CanEncodeBranchDistance(
dest)) {
2165 EmitFarBranch(cond, label->Position(),
link);
2169 label->UpdateLRState(lr_state());
2171 const intptr_t position = buffer_.Size();
2172 if (use_far_branches()) {
2173 const int32_t
dest = label->position_;
2177 EmitType5(cond, label->position_,
link);
2179 label->LinkTo(position, lr_state());
2183void Assembler::BindARMv7(Label* label) {
2184 ASSERT(!label->IsBound());
2185 intptr_t bound_pc = buffer_.Size();
2186 while (label->IsLinked()) {
2187 const int32_t position = label->Position();
2188 int32_t
dest = bound_pc - position;
2189 if (use_far_branches() && !CanEncodeBranchDistance(
dest)) {
2193 const int32_t movw =
2194 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
2195 const int32_t movt =
2196 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
2200 dest = buffer_.Size();
2201 const uint16_t dest_high = Utils::High16Bits(
dest);
2202 const uint16_t dest_low = Utils::Low16Bits(
dest);
2203 const int32_t patched_movt =
2204 0xe340c000 | ((dest_high >> 12) << 16) | (dest_high & 0xfff);
2205 const int32_t patched_movw =
2206 0xe300c000 | ((dest_low >> 12) << 16) | (dest_low & 0xfff);
2209 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, patched_movw);
2210 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, patched_movt);
2211 label->position_ = DecodeARMv7LoadImmediate(movt, movw);
2212 }
else if (use_far_branches() && CanEncodeBranchDistance(
dest)) {
2216 const int32_t movw =
2217 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
2218 const int32_t movt =
2219 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
2220 const int32_t branch =
2221 buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize);
2224 const int32_t cond = branch & 0xf0000000;
2225 const int32_t
link = (branch & 0x20) << 19;
2228 const int32_t new_branch = cond |
link | 0x0a000000;
2229 const int32_t encoded = EncodeBranchOffset(
dest, new_branch);
2232 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, encoded);
2233 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize,
2234 Instr::kNopInstruction);
2235 buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize,
2236 Instr::kNopInstruction);
2238 label->position_ = DecodeARMv7LoadImmediate(movt, movw);
2240 BailoutIfInvalidBranchOffset(
dest);
2241 int32_t
next = buffer_.Load<int32_t>(position);
2242 int32_t encoded = Assembler::EncodeBranchOffset(
dest,
next);
2243 buffer_.Store<int32_t>(position, encoded);
2244 label->position_ = Assembler::DecodeBranchOffset(
next);
2247 label->BindTo(bound_pc, lr_state());
2255 auto const rep = RepresentationUtils::RepresentationOfArrayElement(
cid);
2261 case kUnboxedDouble:
2263 case kUnboxedInt32x4:
2264 case kUnboxedFloat32x4:
2265 case kUnboxedFloat64x2:
2274 int32_t* offset_mask) {
2280 *offset_mask = 0xff;
2281 return Utils::MagnitudeIsUint(8,
offset);
2286 *offset_mask = 0xfff;
2287 return Utils::MagnitudeIsUint(12,
offset);
2291 *offset_mask = 0x3fc;
2293 return (Utils::MagnitudeIsUint(10,
offset) &&
2294 Utils::IsAligned(
offset, 4));
2309 int32_t* offset_mask) {
2314 *offset_mask = 0xff;
2315 return Utils::MagnitudeIsUint(8,
offset);
2321 *offset_mask = 0xfff;
2322 return Utils::MagnitudeIsUint(12,
offset);
2326 *offset_mask = 0x3fc;
2328 return (Utils::MagnitudeIsUint(10,
offset) &&
2329 Utils::IsAligned(
offset, 4));
2342bool Address::CanHoldImmediateOffset(
bool is_load,
2345 int32_t offset_mask = 0;
2347 return CanHoldLoadOffset(OperandSizeFor(
cid),
offset, &offset_mask);
2349 return CanHoldStoreOffset(OperandSizeFor(
cid),
offset, &offset_mask);
2362 stm(
DB_W,
SP, regs, cond);
2366 ldm(
IA_W,
SP, regs, cond);
2371 vstmd(
DB_W,
SP, dreg, 2, cond);
2376 vldmd(
IA_W,
SP, dreg, 2, cond);
2379void Assembler::PushRegisters(
const RegisterSet& regs) {
2380 const intptr_t fpu_regs_count = regs.FpuRegisterCount();
2381 if (fpu_regs_count > 0) {
2386 mov(
TMP, Operand(
SP));
2389 if (regs.ContainsFpuRegister(fpu_reg)) {
2405 if (regs.ContainsRegister(reg)) {
2406 reg_list |= (1 << reg);
2409 if (reg_list != 0) {
2414void Assembler::PopRegisters(
const RegisterSet& regs) {
2418 if (regs.ContainsRegister(reg)) {
2419 reg_list |= (1 << reg);
2422 if (reg_list != 0) {
2426 const intptr_t fpu_regs_count = regs.FpuRegisterCount();
2427 if (fpu_regs_count > 0) {
2432 if (regs.ContainsFpuRegister(fpu_reg)) {
2443void Assembler::PushRegistersInOrder(std::initializer_list<Register> regs) {
2448 intptr_t num_pending_regs = 0;
2450 if (reg >= lowest_pending_reg) {
2451 ASSERT(pending_regs != 0);
2452 if (num_pending_regs > 1) {
2453 PushList(pending_regs);
2455 Push(lowest_pending_reg);
2458 num_pending_regs = 0;
2460 pending_regs |= (1 << reg);
2461 lowest_pending_reg = reg;
2464 if (pending_regs != 0) {
2465 if (num_pending_regs > 1) {
2466 PushList(pending_regs);
2468 Push(lowest_pending_reg);
2473void Assembler::PushNativeCalleeSavedRegisters() {
2483void Assembler::PopNativeCalleeSavedRegisters() {
2492void Assembler::ExtendValue(
Register rd,
2499 if (rd == rm)
return;
2500 return mov(rd, Operand(rm), cond);
2517 const Operand& shift_imm,
2519 ASSERT(shift_imm.type() == 1);
2520 ASSERT(shift_imm.encoding() != 0);
2521 mov(rd, Operand(rm,
LSL, shift_imm.encoding()), cond);
2525 mov(rd, Operand(rm,
LSL, rs), cond);
2530 const Operand& shift_imm,
2532 ASSERT(shift_imm.type() == 1);
2533 uint32_t shift = shift_imm.encoding();
2538 mov(rd, Operand(rm,
LSR, shift), cond);
2542 mov(rd, Operand(rm,
LSR, rs), cond);
2547 const Operand& shift_imm,
2549 ASSERT(shift_imm.type() == 1);
2550 uint32_t shift = shift_imm.encoding();
2555 mov(rd, Operand(rm,
ASR, shift), cond);
2560 const Operand& shift_imm,
2562 ASSERT(shift_imm.type() == 1);
2563 uint32_t shift = shift_imm.encoding();
2568 movs(rd, Operand(rm,
ASR, shift), cond);
2572 mov(rd, Operand(rm,
ASR, rs), cond);
2577 const Operand& shift_imm,
2579 ASSERT(shift_imm.type() == 1);
2580 ASSERT(shift_imm.encoding() != 0);
2581 mov(rd, Operand(rm,
ROR, shift_imm.encoding()), cond);
2585 mov(rd, Operand(rm,
ROR, rs), cond);
2589 mov(rd, Operand(rm,
ROR, 0), cond);
2593 Asr(rd, rm, Operand(31), cond);
2603 vrecpsqs(
QTMP, qm, qd);
2604 vmulqs(qd, qd,
QTMP);
2605 vrecpsqs(
QTMP, qm, qd);
2606 vmulqs(qd, qd,
QTMP);
2617 vmulqs(
QTMP, qd, qd);
2619 vmulqs(qd, qd,
QTMP);
2621 vmulqs(
QTMP, qd, qd);
2623 vmulqs(qd, qd,
QTMP);
2636 VreciprocalSqrtqs(qd, qm);
2638 Vreciprocalqs(qd, qm);
2646 Vreciprocalqs(qd, qm);
2650void Assembler::Branch(
const Address& address,
Condition cond) {
2651 ldr(
PC, address, cond);
2654void Assembler::BranchLink(intptr_t target_code_pool_index,
2663 LoadWordFromPoolIndex(code_reg, target_code_pool_index,
PP,
AL);
2664 Call(FieldAddress(code_reg, target::Code::entry_point_offset(entry_kind)));
2668void Assembler::BranchLink(
2670 ObjectPoolBuilderEntry::Patchability patchable,
2672 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
2677 const intptr_t index = object_pool_builder().FindObject(
2679 BranchLink(index, entry_kind);
2682void Assembler::BranchLinkPatchable(
2685 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
2686 BranchLink(
target, ObjectPoolBuilderEntry::kPatchable, entry_kind,
2690void Assembler::BranchLinkWithEquivalence(
const Code&
target,
2691 const Object& equivalence,
2697 const intptr_t index =
2699 BranchLink(index, entry_kind);
2702void Assembler::BranchLink(
const ExternalLabel* label) {
2704 LoadImmediate(
LR, label->address());
2716void Assembler::LoadPatchableImmediate(
Register rd,
2719 const uint16_t value_low = Utils::Low16Bits(
value);
2720 const uint16_t value_high = Utils::High16Bits(
value);
2721 movw(rd, value_low, cond);
2722 movt(rd, value_high, cond);
2725void Assembler::LoadDecodableImmediate(
Register rd,
2728 movw(rd, Utils::Low16Bits(
value), cond);
2729 const uint16_t value_high = Utils::High16Bits(
value);
2730 if (value_high != 0) {
2731 movt(rd, value_high, cond);
2736 LoadImmediate(rd,
value.value(), cond);
2741 if (Operand::CanHold(
value, &o)) {
2743 }
else if (Operand::CanHold(~
value, &o)) {
2746 LoadDecodableImmediate(rd,
value, cond);
2751 if (!vmovs(sd,
value, cond)) {
2753 const int index = sd & 1;
2754 LoadImmediate(
IP, bit_cast<int32_t, float>(
value), cond);
2755 vmovdr(dd, index,
IP, cond);
2759void Assembler::LoadDImmediate(
DRegister dd,
2765 if (vmovd(dd,
value, cond))
return;
2767 int64_t imm64 = bit_cast<int64_t, double>(
value);
2768 if (constant_pool_allowed()) {
2769 intptr_t index = object_pool_builder().FindImmediate64(imm64);
2772 LoadDFromOffset(dd,
PP,
offset, cond);
2776 int64_t imm64 = bit_cast<int64_t, double>(
value);
2777 LoadImmediate(
IP, Utils::Low32Bits(imm64), cond);
2778 LoadImmediate(scratch, Utils::High32Bits(imm64), cond);
2779 vmovdrr(dd,
IP, scratch, cond);
2783void Assembler::LoadQImmediate(
QRegister qd, simd128_value_t
value) {
2784 ASSERT(constant_pool_allowed());
2785 intptr_t index = object_pool_builder().FindImmediate128(
value);
2790Address Assembler::PrepareLargeLoadOffset(
const Address& address,
2794 if (address.kind() != Address::Immediate) {
2797 int32_t
offset = address.offset();
2798 int32_t offset_mask = 0;
2799 if (Address::CanHoldLoadOffset(
size,
offset, &offset_mask)) {
2802 auto mode = address.mode();
2816 AddImmediate(temp,
base,
offset & ~offset_mask, cond);
2822Address Assembler::PrepareLargeStoreOffset(
const Address& address,
2826 if (address.kind() != Address::Immediate) {
2829 int32_t
offset = address.offset();
2830 int32_t offset_mask = 0;
2831 if (Address::CanHoldStoreOffset(
size,
offset, &offset_mask)) {
2834 auto mode = address.mode();
2848 AddImmediate(temp,
base,
offset & ~offset_mask, cond);
2855 const Address& address,
2858 const Address&
addr = PrepareLargeLoadOffset(address,
size, cond);
2861 ldrsb(reg,
addr, cond);
2864 ldrb(reg,
addr, cond);
2867 ldrsh(reg,
addr, cond);
2870 ldrh(reg,
addr, cond);
2874 ldr(reg,
addr, cond);
2881void Assembler::LoadFromStack(
Register dst, intptr_t depth) {
2886void Assembler::StoreToStack(
Register src, intptr_t depth) {
2891void Assembler::CompareToStack(
Register src, intptr_t depth) {
2892 LoadFromStack(
TMP, depth);
2893 CompareRegisters(
src,
TMP);
2897 const Address& address,
2900 const Address&
addr = PrepareLargeStoreOffset(address,
size, cond);
2904 strb(reg,
addr, cond);
2908 strh(reg,
addr, cond);
2912 str(reg,
addr, cond);
2919void Assembler::LoadSFromOffset(
SRegister reg,
2923 vldrs(reg, PrepareLargeLoadOffset(Address(
base,
offset),
kSWord, cond), cond);
2926void Assembler::StoreSToOffset(
SRegister reg,
2934void Assembler::LoadDFromOffset(
DRegister reg,
2938 vldrd(reg, PrepareLargeLoadOffset(Address(
base,
offset),
kDWord, cond), cond);
2941void Assembler::StoreDToOffset(
DRegister reg,
2949void Assembler::LoadMultipleDFromOffset(
DRegister first,
2958void Assembler::StoreMultipleDToOffset(
DRegister first,
2967void Assembler::AddImmediate(
Register rd,
2973 mov(rd, Operand(rn), cond);
2981 if (Operand::CanHold(
value, &o)) {
2982 add(rd, rn, o, cond);
2983 }
else if (Operand::CanHold(-
value, &o)) {
2984 sub(rd, rn, o, cond);
2987 if (Operand::CanHold(~
value, &o)) {
2989 add(rd, rn, Operand(
IP), cond);
2990 }
else if (Operand::CanHold(~(-
value), &o)) {
2992 sub(rd, rn, Operand(
IP), cond);
2993 }
else if (
value > 0) {
2994 LoadDecodableImmediate(
IP,
value, cond);
2995 add(rd, rn, Operand(
IP), cond);
2997 LoadDecodableImmediate(
IP, -
value, cond);
2998 sub(rd, rn, Operand(
IP), cond);
3003void Assembler::AddImmediateSetFlags(
Register rd,
3008 if (Operand::CanHold(
value, &o)) {
3010 adds(rd, rn, o, cond);
3011 }
else if (Operand::CanHold(-
value, &o)) {
3013 subs(rd, rn, o, cond);
3016 if (Operand::CanHold(~
value, &o)) {
3018 adds(rd, rn, Operand(
IP), cond);
3019 }
else if (Operand::CanHold(~(-
value), &o)) {
3022 subs(rd, rn, Operand(
IP), cond);
3024 LoadDecodableImmediate(
IP,
value, cond);
3025 adds(rd, rn, Operand(
IP), cond);
3030void Assembler::SubImmediate(
Register rd,
3034 AddImmediate(rd, rn, -
value, cond);
3037void Assembler::SubImmediateSetFlags(
Register rd,
3042 if (Operand::CanHold(
value, &o)) {
3044 subs(rd, rn, o, cond);
3045 }
else if (Operand::CanHold(-
value, &o)) {
3047 adds(rd, rn, o, cond);
3050 if (Operand::CanHold(~
value, &o)) {
3052 subs(rd, rn, Operand(
IP), cond);
3053 }
else if (Operand::CanHold(~(-
value), &o)) {
3056 adds(rd, rn, Operand(
IP), cond);
3058 LoadDecodableImmediate(
IP,
value, cond);
3059 subs(rd, rn, Operand(
IP), cond);
3064void Assembler::AndImmediate(
Register rd,
3069 if (Operand::CanHold(imm, &o)) {
3070 and_(rd, rs, Operand(o), cond);
3072 LoadImmediate(
TMP, imm, cond);
3073 and_(rd, rs, Operand(
TMP), cond);
3077void Assembler::AndImmediateSetFlags(
Register rd,
3082 if (Operand::CanHold(imm, &o)) {
3083 ands(rd, rs, Operand(o), cond);
3085 LoadImmediate(
TMP, imm, cond);
3086 ands(rd, rs, Operand(
TMP), cond);
3090void Assembler::OrImmediate(
Register rd,
3095 if (Operand::CanHold(imm, &o)) {
3096 orr(rd, rs, Operand(o), cond);
3098 LoadImmediate(
TMP, imm, cond);
3099 orr(rd, rs, Operand(
TMP), cond);
3105 if (Operand::CanHold(
value, &o)) {
3109 LoadImmediate(
IP,
value, cond);
3110 cmp(rn, Operand(
IP), cond);
3116 if (Operand::CanHold(imm, &o)) {
3119 LoadImmediate(
IP, imm);
3120 tst(rn, Operand(
IP), cond);
3130 if (TargetCPUFeatures::integer_division_supported()) {
3135 vmovsr(stmpl,
left);
3136 vcvtdi(tmpl, stmpl);
3137 vmovsr(stmpr,
right);
3138 vcvtdi(tmpr, stmpr);
3139 vdivd(tmpr, tmpl, tmpr);
3140 vcvtid(stmpr, tmpr);
3145static int NumRegsBelowFP(
RegList regs) {
3147 for (
int i = 0;
i <
FP;
i++) {
3148 if ((regs & (1 <<
i)) != 0) {
3155void Assembler::ArithmeticShiftRightImmediate(
Register reg, intptr_t shift) {
3156 Asr(reg, reg, Operand(shift));
3159void Assembler::CompareWords(
Register reg1,
3172 BranchIfZero(
count,
equals, Assembler::kNearJump);
3173 AddImmediate(
count, -1);
3174 ldr(temp, Address(reg1, 4, Address::PostIndex));
3175 ldr(
TMP, Address(reg2, 4, Address::PostIndex));
3176 cmp(temp, Operand(
TMP));
3177 BranchIf(
EQUAL, &loop, Assembler::kNearJump);
3180void Assembler::EnterFrame(
RegList regs, intptr_t frame_size) {
3181 if (prologue_offset_ == -1) {
3182 prologue_offset_ = CodeSize();
3185 if ((regs & (1 <<
FP)) != 0) {
3187 add(
FP,
SP, Operand(4 * NumRegsBelowFP(regs)));
3189 if (frame_size != 0) {
3190 AddImmediate(
SP, -frame_size);
3194void Assembler::LeaveFrame(
RegList regs,
bool allow_pop_pc) {
3195 ASSERT(allow_pop_pc || (regs & (1 <<
PC)) == 0);
3196 if ((regs & (1 <<
FP)) != 0) {
3198 sub(
SP,
FP, Operand(4 * NumRegsBelowFP(regs)));
3204 READS_RETURN_ADDRESS_FROM_LR(bx(
LR, cond));
3208 RESTORES_RETURN_ADDRESS_FROM_REGISTER_TO_LR(MoveRegister(
LR,
value));
3211void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
3214 AddImmediate(
SP, -frame_space);
3215 if (OS::ActivationFrameAlignment() > 1) {
3216 bic(
SP,
SP, Operand(OS::ActivationFrameAlignment() - 1));
3220void Assembler::EmitEntryFrameVerification(
Register scratch) {
3223 ASSERT(!constant_pool_allowed());
3226 add(scratch, scratch, Operand(
FPREG));
3227 cmp(scratch, Operand(
SPREG));
3236void Assembler::CallRuntime(
const RuntimeEntry& entry,
3238 ASSERT(!entry.is_leaf());
3241 LoadFromOffset(
R9,
THR, entry.OffsetFromThread());
3243 ldr(
IP, Address(
THR, target::Thread::call_to_runtime_entry_point_offset()));
3249#define __ assembler_->
3251#if defined(VFPv3_D32)
3252static const RegisterSet kVolatileFpuRegisters(0, 0xFF0F);
3254static const RegisterSet kVolatileFpuRegisters(0, 0x000F);
3257LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
3258 intptr_t frame_size,
3259 bool preserve_registers)
3260 : assembler_(assembler), preserve_registers_(preserve_registers) {
3261 __ Comment(
"EnterCallRuntimeFrame");
3262 if (preserve_registers) {
3264 SPILLS_LR_TO_FRAME(
__ EnterFrame(
3268 __ PushRegisters(kVolatileFpuRegisters);
3270 SPILLS_LR_TO_FRAME(
__ EnterFrame((1 <<
FP) | (1 <<
LR), 0));
3277 __ ReserveAlignedFrameSpace(frame_size);
3280void LeafRuntimeScope::Call(
const RuntimeEntry& entry,
3283 __ LoadFromOffset(
TMP,
THR, entry.OffsetFromThread());
3285 compiler::Address(
THR, compiler::target::Thread::vm_tag_offset()));
3287 __ LoadImmediate(
TMP, VMTag::kDartTagId);
3289 compiler::Address(
THR, compiler::target::Thread::vm_tag_offset()));
3292LeafRuntimeScope::~LeafRuntimeScope() {
3293 if (preserve_registers_) {
3297 const intptr_t kPushedFpuRegisterSize =
3304 const intptr_t kPushedRegistersSize =
3306 __ AddImmediate(
SP,
FP, -kPushedRegistersSize);
3308 __ PopRegisters(kVolatileFpuRegisters);
3312 (1 <<
FP) | (1 <<
LR)));
3314 RESTORES_LR_FROM_FRAME(
__ LeaveFrame((1 <<
FP) | (1 <<
LR)));
3322void Assembler::EnterDartFrame(intptr_t frame_size,
bool load_pool_pointer) {
3323 ASSERT(!constant_pool_allowed());
3330 if (!FLAG_precompiled_mode) {
3332 EnterFrame((1 <<
PP) | (1 <<
CODE_REG) | (1 <<
FP) | (1 <<
LR), 0));
3335 if (load_pool_pointer) LoadPoolPointer();
3337 SPILLS_LR_TO_FRAME(EnterFrame((1 <<
FP) | (1 <<
LR), 0));
3339 set_constant_pool_allowed(
true);
3342 AddImmediate(
SP, -frame_size);
3350void Assembler::EnterOsrFrame(intptr_t extra_size) {
3351 ASSERT(!constant_pool_allowed());
3352 Comment(
"EnterOsrFrame");
3353 RestoreCodePointer();
3356 AddImmediate(
SP, -extra_size);
3359void Assembler::LeaveDartFrame() {
3360 if (!FLAG_precompiled_mode) {
3364 set_constant_pool_allowed(
false);
3368 RESTORES_LR_FROM_FRAME(LeaveFrame((1 <<
FP) | (1 <<
LR)));
3371void Assembler::LeaveDartFrameAndReturn() {
3372 if (!FLAG_precompiled_mode) {
3376 set_constant_pool_allowed(
false);
3380 LeaveFrame((1 <<
FP) | (1 <<
PC),
true);
3383void Assembler::EnterStubFrame() {
3387void Assembler::LeaveStubFrame() {
3391void Assembler::EnterCFrame(intptr_t frame_space) {
3396 EnterFrame(1 <<
FP, 0);
3397 ReserveAlignedFrameSpace(frame_space);
3400void Assembler::LeaveCFrame() {
3401 LeaveFrame(1 <<
FP);
3406void Assembler::MonomorphicCheckedEntryJIT() {
3407 has_monomorphic_entry_ =
true;
3408#if defined(TESTING) || defined(DEBUG)
3409 bool saved_use_far_branches = use_far_branches();
3410 set_use_far_branches(
false);
3412 intptr_t
start = CodeSize();
3414 Comment(
"MonomorphicCheckedEntry");
3416 target::Instructions::kMonomorphicEntryOffsetJIT);
3418 const intptr_t cid_offset = target::Array::element_offset(0);
3419 const intptr_t count_offset = target::Array::element_offset(1);
3422 ldr(
R1, FieldAddress(
R9, cid_offset));
3423 ldr(
R2, FieldAddress(
R9, count_offset));
3424 LoadClassIdMayBeSmi(
IP,
R0);
3427 Branch(Address(
THR, target::Thread::switchable_call_miss_entry_offset()),
NE);
3428 str(
R2, FieldAddress(
R9, count_offset));
3429 LoadImmediate(
R4, 0);
3433 target::Instructions::kPolymorphicEntryOffsetJIT);
3435#if defined(TESTING) || defined(DEBUG)
3436 set_use_far_branches(saved_use_far_branches);
3442void Assembler::MonomorphicCheckedEntryAOT() {
3443 has_monomorphic_entry_ =
true;
3444#if defined(TESTING) || defined(DEBUG)
3445 bool saved_use_far_branches = use_far_branches();
3446 set_use_far_branches(
false);
3448 intptr_t
start = CodeSize();
3450 Comment(
"MonomorphicCheckedEntry");
3452 target::Instructions::kMonomorphicEntryOffsetAOT);
3454 LoadClassId(
IP,
R0);
3456 Branch(Address(
THR, target::Thread::switchable_call_miss_entry_offset()),
NE);
3460 target::Instructions::kPolymorphicEntryOffsetAOT);
3462#if defined(TESTING) || defined(DEBUG)
3463 set_use_far_branches(saved_use_far_branches);
3467void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
3468 has_monomorphic_entry_ =
true;
3469 while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
3473 while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
3487void Assembler::FinalizeHashForSize(intptr_t bit_size,
3509void Assembler::MaybeTraceAllocation(
Register stats_addr_reg, Label* trace) {
3512 ldrb(
TMP, Address(stats_addr_reg, 0));
3513 cmp(
TMP, Operand(0));
3517void Assembler::MaybeTraceAllocation(intptr_t
cid,
3521 LoadAllocationTracingStateAddress(temp_reg,
cid);
3522 MaybeTraceAllocation(temp_reg, trace);
3529 LoadAllocationTracingStateAddress(temp_reg,
cid);
3530 MaybeTraceAllocation(temp_reg, trace);
3537 LoadIsolateGroup(
dest);
3538 ldr(
dest, Address(
dest, target::IsolateGroup::class_table_offset()));
3541 target::ClassTable::allocation_tracing_state_table_offset()));
3543 target::ClassTable::AllocationTracingStateSlotOffsetFor(0));
3546void Assembler::LoadAllocationTracingStateAddress(
Register dest, intptr_t
cid) {
3551 LoadIsolateGroup(
dest);
3552 ldr(
dest, Address(
dest, target::IsolateGroup::class_table_offset()));
3555 target::ClassTable::allocation_tracing_state_table_offset()));
3557 target::ClassTable::AllocationTracingStateSlotOffsetFor(
cid));
3561void Assembler::TryAllocateObject(intptr_t
cid,
3562 intptr_t instance_size,
3567 ASSERT(failure !=
nullptr);
3569 ASSERT(instance_reg != temp_reg);
3573 ASSERT(instance_size != 0);
3574 ASSERT(Utils::IsAligned(instance_size,
3576 if (FLAG_inline_alloc &&
3578 ldr(instance_reg, Address(
THR, target::Thread::top_offset()));
3580 AddImmediate(instance_reg, instance_size);
3582 ldr(
IP, Address(
THR, target::Thread::end_offset()));
3583 cmp(
IP, Operand(instance_reg));
3586 CheckAllocationCanary(instance_reg, temp_reg);
3596 str(instance_reg, Address(
THR, target::Thread::top_offset()));
3601 LoadImmediate(temp_reg, tags);
3602 str(temp_reg, FieldAddress(instance_reg, target::Object::tags_offset()));
3608void Assembler::TryAllocateArray(intptr_t
cid,
3609 intptr_t instance_size,
3615 if (FLAG_inline_alloc &&
3619 ldr(
instance, Address(
THR, target::Thread::top_offset()));
3620 AddImmediateSetFlags(end_address,
instance, instance_size);
3626 ldr(temp2, Address(
THR, target::Thread::end_offset()));
3627 cmp(end_address, Operand(temp2));
3629 CheckAllocationCanary(
instance, temp2);
3638 str(end_address, Address(
THR, target::Thread::top_offset()));
3644 LoadImmediate(temp2, tags);
3646 FieldAddress(
instance, target::Object::tags_offset()));
3657 __ cmp(
size, Operand(0));
3667void Assembler::GenerateUnRelocatedPcRelativeCall(
Condition cond,
3668 intptr_t offset_into_target) {
3670 EmitType5(cond, 0x686868,
true);
3672 PcRelativeCallPattern pattern(buffer_.contents() + buffer_.Size() -
3673 PcRelativeCallPattern::kLengthInBytes);
3674 pattern.set_distance(offset_into_target);
3677void Assembler::GenerateUnRelocatedPcRelativeTailCall(
3679 intptr_t offset_into_target) {
3681 EmitType5(cond, 0x686868,
false);
3683 PcRelativeTailCallPattern pattern(buffer_.contents() + buffer_.Size() -
3684 PcRelativeTailCallPattern::kLengthInBytes);
3685 pattern.set_distance(offset_into_target);
3688bool Assembler::AddressCanHoldConstantIndex(
const Object& constant,
3692 intptr_t index_scale,
3694 ASSERT(needs_base !=
nullptr);
3695 auto const rep = RepresentationUtils::RepresentationOfArrayElement(
cid);
3696 if ((rep == kUnboxedInt32x4) || (rep == kUnboxedFloat32x4) ||
3697 (rep == kUnboxedFloat64x2)) {
3702 if (!IsSafeSmi(constant))
return false;
3704 const intptr_t offset_base =
3707 const int64_t
offset = index * index_scale + offset_base;
3708 if (!Utils::IsInt(32,
offset))
return false;
3709 if (Address::CanHoldImmediateOffset(is_load,
cid,
offset)) {
3710 *needs_base =
false;
3713 if (Address::CanHoldImmediateOffset(is_load,
cid,
offset - offset_base)) {
3721Address Assembler::ElementAddressForIntIndex(
bool is_load,
3724 intptr_t index_scale,
3728 const int64_t offset_base =
3732 offset_base +
static_cast<int64_t
>(index) * index_scale;
3735 if (Address::CanHoldImmediateOffset(is_load,
cid,
offset)) {
3736 return Address(array,
static_cast<int32_t
>(
offset));
3738 ASSERT(Address::CanHoldImmediateOffset(is_load,
cid,
offset - offset_base));
3739 AddImmediate(temp, array,
static_cast<int32_t
>(offset_base));
3740 return Address(temp,
static_cast<int32_t
>(
offset - offset_base));
3744void Assembler::LoadElementAddressForIntIndex(
Register address,
3748 intptr_t index_scale,
3751 const int64_t offset_base =
3755 offset_base +
static_cast<int64_t
>(index) * index_scale;
3757 AddImmediate(address, array,
offset);
3760Address Assembler::ElementAddressForRegIndex(
bool is_load,
3763 intptr_t index_scale,
3768 const intptr_t boxing_shift = index_unboxed ? 0 : -
kSmiTagShift;
3769 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
3781 add(
base, array, Operand(index,
ASR, 1));
3783 add(
base, array, Operand(index,
LSL, shift));
3788 return Address(array, index,
ASR, 1);
3790 return Address(array, index,
LSL, shift);
3793 int32_t offset_mask = 0;
3794 if ((is_load && !Address::CanHoldLoadOffset(
size,
offset, &offset_mask)) ||
3795 (!is_load && !Address::CanHoldStoreOffset(
size,
offset, &offset_mask))) {
3802void Assembler::LoadElementAddressForRegIndex(
Register address,
3806 intptr_t index_scale,
3811 const intptr_t boxing_shift = index_unboxed ? 0 : -
kSmiTagShift;
3812 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
3817 add(address, array, Operand(index,
ASR, 1));
3819 add(address, array, Operand(index,
LSL, shift));
3822 AddImmediate(address,
offset);
3826void Assembler::LoadStaticFieldAddress(
Register address,
3830 LoadFieldFromOffset(scratch, field,
3831 target::Field::host_offset_or_field_id_offset());
3832 const intptr_t field_table_offset =
3833 is_shared ? compiler::target::Thread::shared_field_table_values_offset()
3835 LoadMemoryValue(address,
THR,
static_cast<int32_t
>(field_table_offset));
3836 add(address, address,
3840void Assembler::LoadFieldAddressForRegOffset(
Register address,
3844 Operand(offset_in_words_as_smi,
LSL,
3849void Assembler::LoadHalfWordUnaligned(
Register dst,
3854 ldrsb(tmp, Address(
addr, 1));
3858void Assembler::LoadHalfWordUnsignedUnaligned(
Register dst,
3863 ldrb(tmp, Address(
addr, 1));
3867void Assembler::StoreHalfWordUnaligned(
Register src,
3871 Lsr(tmp,
src, Operand(8));
3872 strb(tmp, Address(
addr, 1));
3878 ldrb(tmp, Address(
addr, 1));
3880 ldrb(tmp, Address(
addr, 2));
3882 ldrb(tmp, Address(
addr, 3));
3888 Lsr(tmp,
src, Operand(8));
3889 strb(tmp, Address(
addr, 1));
3890 Lsr(tmp,
src, Operand(16));
3891 strb(tmp, Address(
addr, 2));
3892 Lsr(tmp,
src, Operand(24));
3893 strb(tmp, Address(
addr, 3));
3900 RangeCheckCondition condition,
3902 auto cc = condition == kIfInRange ?
LS :
HI;
3904 AddImmediate(to_check,
value, -low);
3905 CompareImmediate(to_check, high - low);
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
static float next(float f)
static void B2(DFData *curr, int width)
static void B1(DFData *curr, int width)
static bool ok(int result)
static uint32_t hash(const SkShaderBase::GradientInfo &v)
bool equals(SkDrawable *a, SkDrawable *b)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
#define DEBUG_ASSERT(cond)
#define ASSERT_EQUAL(expected, actual)
#define RELEASE_ASSERT(cond)
#define COMPILE_ASSERT(expr)
Assembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level=0)
static const char * begin(const StringSlice &s)
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
ClipOpAndAA opAA SkRegion region
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
bool CanLoadFromThread(const dart::Object &object, intptr_t *offset)
word ToRawSmi(const dart::Object &a)
word SmiValue(const dart::Object &a)
void BailoutWithBranchOffsetError()
bool IsOriginalObject(const Object &object)
InvalidClass kObjectAlignment
InvalidClass kObjectAlignmentLog2
bool IsInOldSpace(const Object &obj)
const Object & ToObject(const Code &handle)
static constexpr int HeaderSize
def link(from_root, to_root)
const Register kWriteBarrierSlotReg
const int kDartVolatileCpuRegCount
static DRegister EvenDRegisterOf(QRegister q)
static DRegister OddDRegisterOf(QRegister q)
const Register kWriteBarrierObjectReg
constexpr int32_t kMinInt32
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
constexpr intptr_t kWordSizeLog2
constexpr intptr_t kBitsPerInt16
const Register DISPATCH_TABLE_REG
const int kNumberOfFpuRegisters
const RegList kAbiPreservedCpuRegs
bool IsAllocatableInNewSpace(intptr_t size)
intx_t sign_extend(int32_t x)
constexpr uword kDataMemoryBarrier
const RegList kDartVolatileCpuRegs
constexpr intptr_t kBitsPerInt32
@ kBitFieldExtractLSBBits
@ kBitFieldExtractWidthBits
@ kBitFieldExtractLSBShift
@ kBitFieldExtractWidthShift
@ kBitFieldExtractRnShift
const int kAbiPreservedFpuRegCount
constexpr intptr_t kWordSize
constexpr intptr_t kBitsPerInt8
const int kFpuRegisterSize
static SRegister EvenSRegisterOf(DRegister d)
DECLARE_FLAG(bool, show_invisible_frames)
const QRegister kAbiFirstPreservedFpuReg
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
#define NOT_IN_PRODUCT(code)