6#if defined(TARGET_ARCH_ARM)
8#define SHOULD_NOT_INCLUDE_RUNTIME
18#if !defined(USING_SIMULATOR) && !defined(__linux__) && !defined(ANDROID) && \
19 !defined(DART_HOST_OS_IOS) && !defined(DART_HOST_OS_MACOS)
20#error ARM cross-compile only supported on Linux, Android, iOS, and Mac
34 intptr_t far_branch_level)
35 : AssemblerBase(object_pool_builder),
36 use_far_branches_(far_branch_level != 0),
37 constant_pool_allowed_(
false) {
40 Address(THR, target::Thread::write_barrier_wrappers_thread_offset(reg)),
43 generate_invoke_array_write_barrier_ = [&](
Condition cond) {
44 Call(Address(THR, target::Thread::array_write_barrier_entry_point_offset()),
49uint32_t Address::encoding3()
const {
50 if (kind_ == Immediate) {
53 return (encoding_ & ~kOffset12Mask) |
B22 | ((
offset & 0xf0) << 4) |
56 ASSERT(kind_ == IndexRegister);
60uint32_t Address::vencoding()
const {
61 ASSERT(kind_ == Immediate);
65 int mode = encoding_ & ((8 | 4 | 1) << 21);
66 ASSERT((mode == Offset) || (mode == NegOffset));
67 uint32_t vencoding = (encoding_ & (0xf <<
kRnShift)) | (
offset >> 2);
74void Assembler::Emit(int32_t value) {
75 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
76 buffer_.Emit<int32_t>(
value);
79void Assembler::EmitType01(Condition cond,
87 ASSERT(cond != kNoCondition);
89 static_cast<int32_t
>(cond) << kConditionShift |
type << kTypeShift |
91 ArmEncode::Rn(rn) | ArmEncode::Rd(rd) | o.encoding();
95void Assembler::EmitType5(Condition cond, int32_t
offset,
bool link) {
96 ASSERT(cond != kNoCondition);
97 int32_t encoding =
static_cast<int32_t
>(cond) << kConditionShift |
98 5 << kTypeShift | (link ? 1 : 0) <<
kLinkShift;
99 BailoutIfInvalidBranchOffset(
offset);
100 Emit(Assembler::EncodeBranchOffset(
offset, encoding));
103void Assembler::EmitMemOp(Condition cond,
108 ASSERT(rd != kNoRegister);
109 ASSERT(cond != kNoCondition);
111 ASSERT(!ad.has_writeback() || (ad.rn() != rd));
113 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B26 |
114 (ad.kind() == Address::Immediate ? 0 :
B25) |
115 (
load ? L : 0) | (byte ?
B : 0) | ArmEncode::Rd(rd) |
120void Assembler::EmitMemOpAddressMode3(Condition cond,
124 ASSERT(rd != kNoRegister);
125 ASSERT(cond != kNoCondition);
127 ASSERT(!ad.has_writeback() || (ad.rn() != rd));
129 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
mode |
130 ArmEncode::Rd(rd) | ad.encoding3();
134void Assembler::EmitMultiMemOp(Condition cond,
140 ASSERT(cond != kNoCondition);
142 ASSERT(!Address::has_writeback(am) || !(regs & (1 <<
base)));
143 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
144 am | (
load ?
L : 0) | ArmEncode::Rn(
base) | regs;
148void Assembler::EmitShiftImmediate(Condition cond,
153 ASSERT(cond != kNoCondition);
155 int32_t encoding =
static_cast<int32_t
>(cond) << kConditionShift |
158 static_cast<int32_t
>(opcode) << kShiftShift |
159 static_cast<int32_t
>(rm);
163void Assembler::EmitShiftRegister(Condition cond,
168 ASSERT(cond != kNoCondition);
170 int32_t encoding =
static_cast<int32_t
>(cond) << kConditionShift |
173 static_cast<int32_t
>(opcode) << kShiftShift | B4 |
174 static_cast<int32_t
>(rm);
178void Assembler::and_(Register rd, Register rn, Operand o, Condition cond) {
179 EmitType01(cond, o.type(), AND, 0, rn, rd, o);
182void Assembler::ands(Register rd, Register rn, Operand o, Condition cond) {
183 EmitType01(cond, o.type(), AND, 1, rn, rd, o);
186void Assembler::eor(Register rd, Register rn, Operand o, Condition cond) {
187 EmitType01(cond, o.type(), EOR, 0, rn, rd, o);
190void Assembler::sub(Register rd, Register rn, Operand o, Condition cond) {
191 EmitType01(cond, o.type(), SUB, 0, rn, rd, o);
194void Assembler::rsb(Register rd, Register rn, Operand o, Condition cond) {
195 EmitType01(cond, o.type(), RSB, 0, rn, rd, o);
198void Assembler::rsbs(Register rd, Register rn, Operand o, Condition cond) {
199 EmitType01(cond, o.type(), RSB, 1, rn, rd, o);
202void Assembler::add(Register rd, Register rn, Operand o, Condition cond) {
203 EmitType01(cond, o.type(), ADD, 0, rn, rd, o);
206void Assembler::adds(Register rd, Register rn, Operand o, Condition cond) {
207 EmitType01(cond, o.type(), ADD, 1, rn, rd, o);
210void Assembler::subs(Register rd, Register rn, Operand o, Condition cond) {
211 EmitType01(cond, o.type(), SUB, 1, rn, rd, o);
214void Assembler::adc(Register rd, Register rn, Operand o, Condition cond) {
215 EmitType01(cond, o.type(), ADC, 0, rn, rd, o);
218void Assembler::adcs(Register rd, Register rn, Operand o, Condition cond) {
219 EmitType01(cond, o.type(), ADC, 1, rn, rd, o);
222void Assembler::sbc(Register rd, Register rn, Operand o, Condition cond) {
223 EmitType01(cond, o.type(), SBC, 0, rn, rd, o);
226void Assembler::sbcs(Register rd, Register rn, Operand o, Condition cond) {
227 EmitType01(cond, o.type(), SBC, 1, rn, rd, o);
230void Assembler::rsc(Register rd, Register rn, Operand o, Condition cond) {
231 EmitType01(cond, o.type(), RSC, 0, rn, rd, o);
234void Assembler::tst(Register rn, Operand o, Condition cond) {
235 EmitType01(cond, o.type(), TST, 1, rn, R0, o);
238void Assembler::teq(Register rn, Operand o, Condition cond) {
239 EmitType01(cond, o.type(), TEQ, 1, rn, R0, o);
242void Assembler::cmp(Register rn, Operand o, Condition cond) {
243 EmitType01(cond, o.type(), CMP, 1, rn, R0, o);
246void Assembler::cmn(Register rn, Operand o, Condition cond) {
247 EmitType01(cond, o.type(), CMN, 1, rn, R0, o);
250void Assembler::orr(Register rd, Register rn, Operand o, Condition cond) {
251 EmitType01(cond, o.type(), ORR, 0, rn, rd, o);
254void Assembler::orrs(Register rd, Register rn, Operand o, Condition cond) {
255 EmitType01(cond, o.type(), ORR, 1, rn, rd, o);
258void Assembler::mov(Register rd, Operand o, Condition cond) {
259 EmitType01(cond, o.type(), MOV, 0, R0, rd, o);
262void Assembler::movs(Register rd, Operand o, Condition cond) {
263 EmitType01(cond, o.type(), MOV, 1, R0, rd, o);
266void Assembler::bic(Register rd, Register rn, Operand o, Condition cond) {
267 EmitType01(cond, o.type(), BIC, 0, rn, rd, o);
270void Assembler::bics(Register rd, Register rn, Operand o, Condition cond) {
271 EmitType01(cond, o.type(), BIC, 1, rn, rd, o);
274void Assembler::mvn_(Register rd, Operand o, Condition cond) {
275 EmitType01(cond, o.type(), MVN, 0, R0, rd, o);
278void Assembler::mvns(Register rd, Operand o, Condition cond) {
279 EmitType01(cond, o.type(), MVN, 1, R0, rd, o);
282void Assembler::clz(Register rd, Register rm, Condition cond) {
283 ASSERT(rd != kNoRegister);
284 ASSERT(rm != kNoRegister);
285 ASSERT(cond != kNoCondition);
288 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B24 |
289 B22 |
B21 | (0xf << 16) | ArmEncode::Rd(rd) | (0xf << 8) |
290 B4 |
static_cast<int32_t
>(rm);
294void Assembler::rbit(Register rd, Register rm, Condition cond) {
295 ASSERT(rd != kNoRegister);
296 ASSERT(rm != kNoRegister);
297 ASSERT(cond != kNoCondition);
300 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B26 |
302 ArmEncode::Rd(rd) | (0xf << 8) | B5 | B4 |
303 static_cast<int32_t
>(rm);
307void Assembler::movw(Register rd, uint16_t imm16, Condition cond) {
308 ASSERT(cond != kNoCondition);
309 int32_t encoding =
static_cast<int32_t
>(cond) << kConditionShift | B25 | B24 |
310 ((imm16 >> 12) << 16) | ArmEncode::Rd(rd) |
315void Assembler::movt(Register rd, uint16_t imm16, Condition cond) {
316 ASSERT(cond != kNoCondition);
317 int32_t encoding =
static_cast<int32_t
>(cond) << kConditionShift | B25 | B24 |
318 B22 | ((imm16 >> 12) << 16) | ArmEncode::Rd(rd) |
323void Assembler::EmitMulOp(Condition cond,
329 ASSERT(rd != kNoRegister);
330 ASSERT(rn != kNoRegister);
331 ASSERT(rm != kNoRegister);
332 ASSERT(rs != kNoRegister);
333 ASSERT(cond != kNoCondition);
334 int32_t encoding = opcode | (
static_cast<int32_t
>(cond) << kConditionShift) |
335 ArmEncode::Rn(rn) | ArmEncode::Rd(rd) | ArmEncode::Rs(rs) |
336 B7 |
B4 | ArmEncode::Rm(rm);
340void Assembler::mul(Register rd, Register rn, Register rm, Condition cond) {
342 EmitMulOp(cond, 0, R0, rd, rn, rm);
346void Assembler::muls(Register rd, Register rn, Register rm, Condition cond) {
347 EmitMulOp(cond, B20, R0, rd, rn, rm);
350void Assembler::mla(Register rd,
357 EmitMulOp(cond, B21, ra, rd, rn, rm);
360void Assembler::mls(Register rd,
367 EmitMulOp(cond, B22 | B21, ra, rd, rn, rm);
370void Assembler::smull(Register rd_lo,
376 EmitMulOp(cond, B23 | B22, rd_lo, rd_hi, rn, rm);
379void Assembler::umull(Register rd_lo,
385 EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm);
388void Assembler::umlal(Register rd_lo,
394 EmitMulOp(cond, B23 | B21, rd_lo, rd_hi, rn, rm);
397void Assembler::umaal(Register rd_lo,
406 EmitMulOp(AL, B22, rd_lo, rd_hi, rn, rm);
409void Assembler::EmitDivOp(Condition cond,
414 ASSERT(TargetCPUFeatures::integer_division_supported());
415 ASSERT(rd != kNoRegister);
416 ASSERT(rn != kNoRegister);
417 ASSERT(rm != kNoRegister);
418 ASSERT(cond != kNoCondition);
419 int32_t encoding = opcode | (
static_cast<int32_t
>(cond) << kConditionShift) |
420 (
static_cast<int32_t
>(rn) << kDivRnShift) |
421 (
static_cast<int32_t
>(rd) << kDivRdShift) |
B26 |
B25 |
423 (
static_cast<int32_t
>(rm) << kDivRmShift);
427void Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) {
428 EmitDivOp(cond, 0, rd, rn, rm);
431void Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) {
432 EmitDivOp(cond, B21, rd, rn, rm);
435void Assembler::ldr(Register rd, Address ad, Condition cond) {
436 EmitMemOp(cond,
true,
false, rd, ad);
439void Assembler::str(Register rd, Address ad, Condition cond) {
440 EmitMemOp(cond,
false,
false, rd, ad);
443void Assembler::ldrb(Register rd, Address ad, Condition cond) {
444 EmitMemOp(cond,
true,
true, rd, ad);
447void Assembler::strb(Register rd, Address ad, Condition cond) {
448 EmitMemOp(cond,
false,
true, rd, ad);
451void Assembler::ldrh(Register rd, Address ad, Condition cond) {
452 EmitMemOpAddressMode3(cond, L | B7 |
H | B4, rd, ad);
455void Assembler::strh(Register rd, Address ad, Condition cond) {
456 EmitMemOpAddressMode3(cond, B7 |
H | B4, rd, ad);
459void Assembler::ldrsb(Register rd, Address ad, Condition cond) {
460 EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad);
463void Assembler::ldrsh(Register rd, Address ad, Condition cond) {
464 EmitMemOpAddressMode3(cond, L | B7 | B6 |
H | B4, rd, ad);
467void Assembler::ldrd(Register rd,
474 EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, Address(rn,
offset));
477void Assembler::strd(Register rd,
484 EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, Address(rn,
offset));
487void Assembler::ldm(BlockAddressMode am,
492 EmitMultiMemOp(cond, am,
true,
base, regs);
495void Assembler::stm(BlockAddressMode am,
500 EmitMultiMemOp(cond, am,
false,
base, regs);
503void Assembler::ldrex(Register rt, Register rn, Condition cond) {
504 ASSERT(rn != kNoRegister);
505 ASSERT(rt != kNoRegister);
506 ASSERT(cond != kNoCondition);
507 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B24 |
508 B23 |
L | (
static_cast<int32_t
>(rn) << kLdrExRnShift) |
509 (
static_cast<int32_t
>(rt) << kLdrExRtShift) |
B11 |
B10 |
514void Assembler::strex(Register rd, Register rt, Register rn, Condition cond) {
515 ASSERT(rn != kNoRegister);
516 ASSERT(rd != kNoRegister);
517 ASSERT(rt != kNoRegister);
518 ASSERT(cond != kNoCondition);
519 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B24 |
520 B23 | (
static_cast<int32_t
>(rn) << kStrExRnShift) |
521 (
static_cast<int32_t
>(rd) << kStrExRdShift) |
B11 |
B10 |
523 (
static_cast<int32_t
>(rt) << kStrExRtShift);
527void Assembler::dmb() {
529 Emit(kDataMemoryBarrier);
532static int32_t BitFieldExtractEncoding(
bool sign_extend,
538 ASSERT(rn != kNoRegister && rn != PC);
539 ASSERT(rd != kNoRegister && rd != PC);
540 ASSERT(cond != kNoCondition);
541 ASSERT(Utils::IsUint(kBitFieldExtractLSBBits, lsb));
544 const int32_t widthm1 =
width - 1;
545 ASSERT(Utils::IsUint(kBitFieldExtractWidthBits, widthm1));
546 return (
static_cast<int32_t
>(cond) << kConditionShift) |
B26 |
B25 |
B24 |
548 (widthm1 << kBitFieldExtractWidthShift) |
549 (
static_cast<int32_t
>(rd) << kRdShift) |
554void Assembler::sbfx(Register rd,
560 Emit(BitFieldExtractEncoding(sign_extend, rd, rn, lsb,
width, cond));
563void Assembler::ubfx(Register rd,
569 Emit(BitFieldExtractEncoding(sign_extend, rd, rn, lsb,
width, cond));
572void Assembler::EnterFullSafepoint(Register addr, Register
state) {
575 Label slow_path,
done, retry;
576 if (FLAG_use_slow_path) {
580 LoadImmediate(addr, target::Thread::safepoint_state_offset());
581 add(addr, THR, Operand(addr));
584 cmp(
state, Operand(target::Thread::full_safepoint_state_unacquired()));
587 mov(
state, Operand(target::Thread::full_safepoint_state_acquired()));
588 strex(TMP,
state, addr);
589 cmp(TMP, Operand(0));
592 if (!FLAG_use_slow_path) {
597 ldr(TMP, Address(THR, target::Thread::enter_safepoint_stub_offset()));
598 ldr(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
604void Assembler::TransitionGeneratedToNative(Register destination_address,
605 Register exit_frame_fp,
606 Register exit_through_ffi,
608 bool enter_safepoint) {
610 StoreToOffset(exit_frame_fp, THR,
611 target::Thread::top_exit_frame_info_offset());
613 StoreToOffset(exit_through_ffi, THR,
614 target::Thread::exit_through_ffi_offset());
618 StoreToOffset(destination_address, THR, target::Thread::vm_tag_offset());
619 LoadImmediate(tmp1, target::Thread::native_execution_state());
620 StoreToOffset(tmp1, THR, target::Thread::execution_state_offset());
622 if (enter_safepoint) {
623 EnterFullSafepoint(tmp1, tmp2);
627void Assembler::ExitFullSafepoint(Register tmp1,
629 bool ignore_unwind_in_progress) {
635 Label slow_path,
done, retry;
636 if (FLAG_use_slow_path) {
640 LoadImmediate(addr, target::Thread::safepoint_state_offset());
641 add(addr, THR, Operand(addr));
644 cmp(
state, Operand(target::Thread::full_safepoint_state_acquired()));
647 mov(
state, Operand(target::Thread::full_safepoint_state_unacquired()));
648 strex(TMP,
state, addr);
649 cmp(TMP, Operand(0));
652 if (!FLAG_use_slow_path) {
657 if (ignore_unwind_in_progress) {
661 exit_safepoint_ignore_unwind_in_progress_stub_offset()));
663 ldr(TMP, Address(THR, target::Thread::exit_safepoint_stub_offset()));
665 ldr(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
671void Assembler::TransitionNativeToGenerated(Register addr,
674 bool ignore_unwind_in_progress) {
675 if (exit_safepoint) {
676 ExitFullSafepoint(addr,
state, ignore_unwind_in_progress);
679 ASSERT(!ignore_unwind_in_progress);
682 ASSERT(target::Thread::full_safepoint_state_acquired() != 0);
683 LoadImmediate(
state, target::Thread::full_safepoint_state_acquired());
684 ldr(TMP, Address(THR, target::Thread::safepoint_state_offset()));
685 ands(TMP, TMP, Operand(
state));
694 LoadImmediate(
state, target::Thread::vm_tag_dart_id());
695 StoreToOffset(
state, THR, target::Thread::vm_tag_offset());
696 LoadImmediate(
state, target::Thread::generated_execution_state());
697 StoreToOffset(
state, THR, target::Thread::execution_state_offset());
700 LoadImmediate(
state, 0);
701 StoreToOffset(
state, THR, target::Thread::top_exit_frame_info_offset());
702 StoreToOffset(
state, THR, target::Thread::exit_through_ffi_offset());
705void Assembler::clrex() {
707 B21 | B20 | (0xff << 12) |
B4 | 0xf;
711void Assembler::nop(Condition cond) {
712 ASSERT(cond != kNoCondition);
713 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B25 |
718void Assembler::vmovsr(SRegister sn, Register rt, Condition cond) {
719 ASSERT(sn != kNoSRegister);
720 ASSERT(rt != kNoRegister);
723 ASSERT(cond != kNoCondition);
724 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
725 B26 |
B25 | ((
static_cast<int32_t
>(sn) >> 1) *
B16) |
726 (
static_cast<int32_t
>(rt) *
B12) | B11 | B9 |
727 ((
static_cast<int32_t
>(sn) & 1) * B7) |
B4;
731void Assembler::vmovrs(Register rt, SRegister sn, Condition cond) {
732 ASSERT(sn != kNoSRegister);
733 ASSERT(rt != kNoRegister);
736 ASSERT(cond != kNoCondition);
737 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
738 B26 |
B25 |
B20 | ((
static_cast<int32_t
>(sn) >> 1) *
B16) |
739 (
static_cast<int32_t
>(rt) *
B12) | B11 | B9 |
740 ((
static_cast<int32_t
>(sn) & 1) * B7) |
B4;
744void Assembler::vmovsrr(SRegister sm,
748 ASSERT(sm != kNoSRegister);
750 ASSERT(rt != kNoRegister);
753 ASSERT(rt2 != kNoRegister);
756 ASSERT(cond != kNoCondition);
757 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
758 B26 |
B22 | (
static_cast<int32_t
>(rt2) * B16) |
759 (
static_cast<int32_t
>(rt) * B12) |
B11 |
B9 |
760 ((
static_cast<int32_t
>(sm) & 1) *
B5) | B4 |
761 (
static_cast<int32_t
>(sm) >> 1);
765void Assembler::vmovrrs(Register rt,
769 ASSERT(sm != kNoSRegister);
771 ASSERT(rt != kNoRegister);
774 ASSERT(rt2 != kNoRegister);
778 ASSERT(cond != kNoCondition);
779 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
780 B26 |
B22 |
B20 | (
static_cast<int32_t
>(rt2) * B16) |
781 (
static_cast<int32_t
>(rt) * B12) |
B11 |
B9 |
782 ((
static_cast<int32_t
>(sm) & 1) *
B5) | B4 |
783 (
static_cast<int32_t
>(sm) >> 1);
787void Assembler::vmovdr(DRegister dn,
int i, Register rt, Condition cond) {
788 ASSERT((i == 0) || (i == 1));
789 ASSERT(rt != kNoRegister);
792 ASSERT(dn != kNoDRegister);
793 ASSERT(cond != kNoCondition);
794 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
795 B26 |
B25 | (i *
B21) | (
static_cast<int32_t
>(rt) *
B12) |
796 B11 | B9 | B8 | ((
static_cast<int32_t
>(dn) >> 4) * B7) |
797 ((
static_cast<int32_t
>(dn) & 0xf) *
B16) | B4;
801void Assembler::vmovdrr(DRegister dm,
805 ASSERT(dm != kNoDRegister);
806 ASSERT(rt != kNoRegister);
809 ASSERT(rt2 != kNoRegister);
812 ASSERT(cond != kNoCondition);
813 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
814 B26 |
B22 | (
static_cast<int32_t
>(rt2) * B16) |
815 (
static_cast<int32_t
>(rt) * B12) |
B11 |
B9 |
B8 |
816 ((
static_cast<int32_t
>(dm) >> 4) *
B5) | B4 |
817 (
static_cast<int32_t
>(dm) & 0xf);
821void Assembler::vmovrrd(Register rt,
825 ASSERT(dm != kNoDRegister);
826 ASSERT(rt != kNoRegister);
829 ASSERT(rt2 != kNoRegister);
833 ASSERT(cond != kNoCondition);
834 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
835 B26 |
B22 |
B20 | (
static_cast<int32_t
>(rt2) * B16) |
836 (
static_cast<int32_t
>(rt) * B12) |
B11 |
B9 |
B8 |
837 ((
static_cast<int32_t
>(dm) >> 4) *
B5) | B4 |
838 (
static_cast<int32_t
>(dm) & 0xf);
842void Assembler::vldrs(SRegister sd, Address ad, Condition cond) {
843 ASSERT(sd != kNoSRegister);
844 ASSERT(cond != kNoCondition);
845 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
846 B26 |
B24 |
B20 | ((
static_cast<int32_t
>(sd) & 1) *
B22) |
847 ((
static_cast<int32_t
>(sd) >> 1) * B12) |
B11 |
B9 |
852void Assembler::vstrs(SRegister sd, Address ad, Condition cond) {
853 ASSERT(
static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC);
854 ASSERT(sd != kNoSRegister);
855 ASSERT(cond != kNoCondition);
856 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
857 B26 |
B24 | ((
static_cast<int32_t
>(sd) & 1) *
B22) |
858 ((
static_cast<int32_t
>(sd) >> 1) * B12) |
B11 |
B9 |
863void Assembler::vldrd(DRegister dd, Address ad, Condition cond) {
864 ASSERT(dd != kNoDRegister);
865 ASSERT(cond != kNoCondition);
866 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
867 B26 |
B24 |
B20 | ((
static_cast<int32_t
>(dd) >> 4) *
B22) |
868 ((
static_cast<int32_t
>(dd) & 0xf) * B12) |
B11 |
B9 |
B8 |
873void Assembler::vstrd(DRegister dd, Address ad, Condition cond) {
874 ASSERT(
static_cast<Register>(ad.encoding_ & (0xf << kRnShift)) != PC);
875 ASSERT(dd != kNoDRegister);
876 ASSERT(cond != kNoCondition);
877 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
878 B26 |
B24 | ((
static_cast<int32_t
>(dd) >> 4) *
B22) |
879 ((
static_cast<int32_t
>(dd) & 0xf) * B12) |
B11 |
B9 |
B8 |
884void Assembler::EmitMultiVSMemOp(Condition cond,
891 ASSERT(cond != kNoCondition);
895 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
897 ArmEncode::Rn(
base) |
898 ((
static_cast<int32_t
>(
start) & 0x1) != 0 ?
D : 0) |
899 ((
static_cast<int32_t
>(
start) >> 1) << 12) |
count;
903void Assembler::EmitMultiVDMemOp(Condition cond,
910 ASSERT(cond != kNoCondition);
913 const int notArmv5te = 0;
916 (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
B26 |
B11 |
B9 |
918 ((
static_cast<int32_t
>(
start) & 0x10) != 0 ?
D : 0) |
919 ((
static_cast<int32_t
>(
start) & 0xf) << 12) | (
count << 1) | notArmv5te;
923void Assembler::vldms(BlockAddressMode am,
928 ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
930 EmitMultiVSMemOp(cond, am,
true,
base, first, last - first + 1);
933void Assembler::vstms(BlockAddressMode am,
938 ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
940 EmitMultiVSMemOp(cond, am,
false,
base, first, last - first + 1);
943void Assembler::vldmd(BlockAddressMode am,
948 ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
951 EmitMultiVDMemOp(cond, am,
true,
base, first,
count);
954void Assembler::vstmd(BlockAddressMode am,
959 ASSERT((am == IA) || (am == IA_W) || (am == DB_W));
962 EmitMultiVDMemOp(cond, am,
false,
base, first,
count);
965void Assembler::EmitVFPsss(Condition cond,
970 ASSERT(sd != kNoSRegister);
971 ASSERT(sn != kNoSRegister);
972 ASSERT(sm != kNoSRegister);
973 ASSERT(cond != kNoCondition);
975 (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
B26 |
B25 |
B11 |
976 B9 | opcode | ((
static_cast<int32_t
>(sd) & 1) *
B22) |
977 ((
static_cast<int32_t
>(sn) >> 1) * B16) |
978 ((
static_cast<int32_t
>(sd) >> 1) *
B12) |
979 ((
static_cast<int32_t
>(sn) & 1) * B7) |
980 ((
static_cast<int32_t
>(sm) & 1) *
B5) | (
static_cast<int32_t
>(sm) >> 1);
984void Assembler::EmitVFPddd(Condition cond,
989 ASSERT(dd != kNoDRegister);
990 ASSERT(dn != kNoDRegister);
991 ASSERT(dm != kNoDRegister);
992 ASSERT(cond != kNoCondition);
994 (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
B26 |
B25 |
B11 |
995 B9 |
B8 | opcode | ((
static_cast<int32_t
>(dd) >> 4) *
B22) |
996 ((
static_cast<int32_t
>(dn) & 0xf) * B16) |
997 ((
static_cast<int32_t
>(dd) & 0xf) *
B12) |
998 ((
static_cast<int32_t
>(dn) >> 4) * B7) |
999 ((
static_cast<int32_t
>(dm) >> 4) *
B5) | (
static_cast<int32_t
>(dm) & 0xf);
1003void Assembler::vmovs(SRegister sd, SRegister sm, Condition cond) {
1004 EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm);
1007void Assembler::vmovd(DRegister dd, DRegister dm, Condition cond) {
1008 EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm);
1011bool Assembler::vmovs(SRegister sd,
float s_imm, Condition cond) {
1012 uint32_t imm32 = bit_cast<uint32_t, float>(s_imm);
1013 if (((imm32 & ((1 << 19) - 1)) == 0) &&
1014 ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) ||
1015 (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) - 1)))) {
1016 uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) |
1017 ((imm32 >> 19) & ((1 << 6) - 1));
1018 EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4) * B16) | (imm8 & 0xf), sd,
1025bool Assembler::vmovd(DRegister dd,
double d_imm, Condition cond) {
1026 uint64_t imm64 = bit_cast<uint64_t, double>(d_imm);
1027 if (((imm64 & ((1LL << 48) - 1)) == 0) &&
1028 ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) ||
1029 (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) - 1)))) {
1030 uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) |
1031 ((imm64 >> 48) & ((1 << 6) - 1));
1032 EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4) * B16) | B8 | (imm8 & 0xf),
1039void Assembler::vadds(SRegister sd,
1043 EmitVFPsss(cond, B21 | B20, sd, sn, sm);
1046void Assembler::vaddd(DRegister dd,
1050 EmitVFPddd(cond, B21 | B20, dd, dn, dm);
1053void Assembler::vsubs(SRegister sd,
1057 EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm);
1060void Assembler::vsubd(DRegister dd,
1064 EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm);
1067void Assembler::vmuls(SRegister sd,
1071 EmitVFPsss(cond, B21, sd, sn, sm);
1074void Assembler::vmuld(DRegister dd,
1078 EmitVFPddd(cond, B21, dd, dn, dm);
1081void Assembler::vmlas(SRegister sd,
1085 EmitVFPsss(cond, 0, sd, sn, sm);
1088void Assembler::vmlad(DRegister dd,
1092 EmitVFPddd(cond, 0, dd, dn, dm);
1095void Assembler::vmlss(SRegister sd,
1099 EmitVFPsss(cond, B6, sd, sn, sm);
1102void Assembler::vmlsd(DRegister dd,
1106 EmitVFPddd(cond, B6, dd, dn, dm);
1109void Assembler::vdivs(SRegister sd,
1113 EmitVFPsss(cond, B23, sd, sn, sm);
1116void Assembler::vdivd(DRegister dd,
1120 EmitVFPddd(cond, B23, dd, dn, dm);
1123void Assembler::vabss(SRegister sd, SRegister sm, Condition cond) {
1124 EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm);
1127void Assembler::vabsd(DRegister dd, DRegister dm, Condition cond) {
1128 EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm);
1131void Assembler::vnegs(SRegister sd, SRegister sm, Condition cond) {
1132 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm);
1135void Assembler::vnegd(DRegister dd, DRegister dm, Condition cond) {
1136 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm);
1139void Assembler::vsqrts(SRegister sd, SRegister sm, Condition cond) {
1140 EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm);
1143void Assembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) {
1144 EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm);
1147void Assembler::EmitVFPsd(Condition cond,
1151 ASSERT(sd != kNoSRegister);
1152 ASSERT(dm != kNoDRegister);
1153 ASSERT(cond != kNoCondition);
1155 (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
B26 |
B25 |
B11 |
1156 B9 | opcode | ((
static_cast<int32_t
>(sd) & 1) *
B22) |
1157 ((
static_cast<int32_t
>(sd) >> 1) * B12) |
1158 ((
static_cast<int32_t
>(dm) >> 4) *
B5) | (
static_cast<int32_t
>(dm) & 0xf);
1162void Assembler::EmitVFPds(Condition cond,
1166 ASSERT(dd != kNoDRegister);
1167 ASSERT(sm != kNoSRegister);
1168 ASSERT(cond != kNoCondition);
1170 (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
B26 |
B25 |
B11 |
1171 B9 | opcode | ((
static_cast<int32_t
>(dd) >> 4) *
B22) |
1172 ((
static_cast<int32_t
>(dd) & 0xf) * B12) |
1173 ((
static_cast<int32_t
>(sm) & 1) *
B5) | (
static_cast<int32_t
>(sm) >> 1);
1177void Assembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) {
1178 EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm);
1181void Assembler::vcvtds(DRegister dd, SRegister sm, Condition cond) {
1182 EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm);
1185void Assembler::vcvtis(SRegister sd, SRegister sm, Condition cond) {
1186 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm);
1189void Assembler::vcvtid(SRegister sd, DRegister dm, Condition cond) {
1190 EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm);
1193void Assembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) {
1194 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm);
1197void Assembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) {
1198 EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm);
1201void Assembler::vcvtus(SRegister sd, SRegister sm, Condition cond) {
1202 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm);
1205void Assembler::vcvtud(SRegister sd, DRegister dm, Condition cond) {
1206 EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm);
1209void Assembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) {
1210 EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm);
1213void Assembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) {
1214 EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm);
1217void Assembler::vcmps(SRegister sd, SRegister sm, Condition cond) {
1218 EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm);
1221void Assembler::vcmpd(DRegister dd, DRegister dm, Condition cond) {
1222 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm);
1225void Assembler::vcmpsz(SRegister sd, Condition cond) {
1226 EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0);
1229void Assembler::vcmpdz(DRegister dd, Condition cond) {
1230 EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0);
1233void Assembler::vmrs(Register rd, Condition cond) {
1234 ASSERT(cond != kNoCondition);
1235 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B27 |
1237 (
static_cast<int32_t
>(rd) * B12) |
B11 |
B9 |
B4;
1241void Assembler::vmstat(Condition cond) {
1245static inline int ShiftOfOperandSize(OperandSize size) {
1270void Assembler::EmitSIMDqqq(int32_t opcode,
1275 ASSERT(TargetCPUFeatures::neon_supported());
1276 int sz = ShiftOfOperandSize(size);
1279 opcode | ((sz & 0x3) * B20) |
1280 ((
static_cast<int32_t
>(qd * 2) >> 4) *
B22) |
1281 ((
static_cast<int32_t
>(qn * 2) & 0xf) * B16) |
1282 ((
static_cast<int32_t
>(qd * 2) & 0xf) *
B12) |
1283 ((
static_cast<int32_t
>(qn * 2) >> 4) * B7) |
1284 ((
static_cast<int32_t
>(qm * 2) >> 4) *
B5) |
1285 (
static_cast<int32_t
>(qm * 2) & 0xf);
1289void Assembler::EmitSIMDddd(int32_t opcode,
1294 ASSERT(TargetCPUFeatures::neon_supported());
1295 int sz = ShiftOfOperandSize(size);
1298 opcode | ((sz & 0x3) * B20) | ((
static_cast<int32_t
>(dd) >> 4) *
B22) |
1299 ((
static_cast<int32_t
>(dn) & 0xf) * B16) |
1300 ((
static_cast<int32_t
>(dd) & 0xf) *
B12) |
1301 ((
static_cast<int32_t
>(dn) >> 4) * B7) |
1302 ((
static_cast<int32_t
>(dm) >> 4) *
B5) | (
static_cast<int32_t
>(dm) & 0xf);
1306void Assembler::vmovq(QRegister qd, QRegister qm) {
1307 EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qm, qm);
1310void Assembler::vaddqi(OperandSize sz,
1314 EmitSIMDqqq(B11, sz, qd, qn, qm);
1317void Assembler::vaddqs(QRegister qd, QRegister qn, QRegister qm) {
1318 EmitSIMDqqq(B11 | B10 | B8, kSWord, qd, qn, qm);
1321void Assembler::vsubqi(OperandSize sz,
1325 EmitSIMDqqq(B24 | B11, sz, qd, qn, qm);
1328void Assembler::vsubqs(QRegister qd, QRegister qn, QRegister qm) {
1329 EmitSIMDqqq(B21 | B11 | B10 | B8, kSWord, qd, qn, qm);
1332void Assembler::vmulqi(OperandSize sz,
1336 EmitSIMDqqq(B11 | B8 | B4, sz, qd, qn, qm);
1339void Assembler::vmulqs(QRegister qd, QRegister qn, QRegister qm) {
1340 EmitSIMDqqq(B24 | B11 | B10 | B8 | B4, kSWord, qd, qn, qm);
1343void Assembler::vshlqi(OperandSize sz,
1347 EmitSIMDqqq(B25 | B10, sz, qd, qn, qm);
1350void Assembler::vshlqu(OperandSize sz,
1354 EmitSIMDqqq(B25 | B24 | B10, sz, qd, qn, qm);
1357void Assembler::veorq(QRegister qd, QRegister qn, QRegister qm) {
1358 EmitSIMDqqq(B24 | B8 | B4, kByte, qd, qn, qm);
1361void Assembler::vorrq(QRegister qd, QRegister qn, QRegister qm) {
1362 EmitSIMDqqq(B21 | B8 | B4, kByte, qd, qn, qm);
1365void Assembler::vornq(QRegister qd, QRegister qn, QRegister qm) {
1366 EmitSIMDqqq(B21 | B20 | B8 | B4, kByte, qd, qn, qm);
1369void Assembler::vandq(QRegister qd, QRegister qn, QRegister qm) {
1370 EmitSIMDqqq(B8 | B4, kByte, qd, qn, qm);
1373void Assembler::vmvnq(QRegister qd, QRegister qm) {
1374 EmitSIMDqqq(B25 | B24 | B23 | B10 | B8 | B7, kWordPair, qd, Q0, qm);
1377void Assembler::vminqs(QRegister qd, QRegister qn, QRegister qm) {
1378 EmitSIMDqqq(B21 | B11 | B10 | B9 | B8, kSWord, qd, qn, qm);
1381void Assembler::vmaxqs(QRegister qd, QRegister qn, QRegister qm) {
1382 EmitSIMDqqq(B11 | B10 | B9 | B8, kSWord, qd, qn, qm);
1385void Assembler::vabsqs(QRegister qd, QRegister qm) {
1386 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8, kSWord, qd, Q0,
1390void Assembler::vnegqs(QRegister qd, QRegister qm) {
1391 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B16 | B10 | B9 | B8 | B7, kSWord,
1395void Assembler::vrecpeqs(QRegister qd, QRegister qm) {
1396 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8, kSWord, qd,
1400void Assembler::vrecpsqs(QRegister qd, QRegister qn, QRegister qm) {
1401 EmitSIMDqqq(B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm);
1404void Assembler::vrsqrteqs(QRegister qd, QRegister qm) {
1405 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B16 | B10 | B8 | B7, kSWord,
1409void Assembler::vrsqrtsqs(QRegister qd, QRegister qn, QRegister qm) {
1410 EmitSIMDqqq(B21 | B11 | B10 | B9 | B8 | B4, kSWord, qd, qn, qm);
1413void Assembler::vdup(OperandSize sz, QRegister qd, DRegister dm,
int idx) {
1414 ASSERT((sz != kDWord) && (sz != kSWord) && (sz != kWordPair));
1420 ASSERT((idx >= 0) && (idx < 8));
1421 code = 1 | (idx << 1);
1426 ASSERT((idx >= 0) && (idx < 4));
1427 code = 2 | (idx << 2);
1432 ASSERT((idx >= 0) && (idx < 2));
1433 code = 4 | (idx << 3);
1441 EmitSIMDddd(B24 | B23 | B11 | B10 | B6, kWordPair,
1443 static_cast<DRegister>(code & 0xf), dm);
1446void Assembler::vtbl(DRegister dd, DRegister dn,
int len, DRegister dm) {
1447 ASSERT((len >= 1) && (len <= 4));
1448 EmitSIMDddd(B24 | B23 | B11 | ((len - 1) * B8), kWordPair, dd, dn, dm);
1451void Assembler::vzipqw(QRegister qd, QRegister qm) {
1452 EmitSIMDqqq(B24 | B23 | B21 | B20 | B19 | B17 | B8 | B7, kByte, qd, Q0, qm);
1455void Assembler::vceqqi(OperandSize sz,
1459 EmitSIMDqqq(B24 | B11 | B4, sz, qd, qn, qm);
1462void Assembler::vceqqs(QRegister qd, QRegister qn, QRegister qm) {
1463 EmitSIMDqqq(B11 | B10 | B9, kSWord, qd, qn, qm);
1466void Assembler::vcgeqi(OperandSize sz,
1470 EmitSIMDqqq(B9 | B8 | B4, sz, qd, qn, qm);
1473void Assembler::vcugeqi(OperandSize sz,
1477 EmitSIMDqqq(B24 | B9 | B8 | B4, sz, qd, qn, qm);
1480void Assembler::vcgeqs(QRegister qd, QRegister qn, QRegister qm) {
1481 EmitSIMDqqq(B24 | B11 | B10 | B9, kSWord, qd, qn, qm);
1484void Assembler::vcgtqi(OperandSize sz,
1488 EmitSIMDqqq(B9 | B8, sz, qd, qn, qm);
1491void Assembler::vcugtqi(OperandSize sz,
1495 EmitSIMDqqq(B24 | B9 | B8, sz, qd, qn, qm);
1498void Assembler::vcgtqs(QRegister qd, QRegister qn, QRegister qm) {
1499 EmitSIMDqqq(B24 | B21 | B11 | B10 | B9, kSWord, qd, qn, qm);
1502void Assembler::bkpt(uint16_t imm16) {
1503 Emit(BkptEncoding(imm16));
1506void Assembler::b(Label* label, Condition cond) {
1507 EmitBranch(cond, label,
false);
1510void Assembler::bl(Label* label, Condition cond) {
1511 EmitBranch(cond, label,
true);
1514void Assembler::bx(Register rm, Condition cond) {
1515 ASSERT(rm != kNoRegister);
1516 ASSERT(cond != kNoCondition);
1517 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B24 |
1518 B21 | (0xfff << 8) | B4 | ArmEncode::Rm(rm);
1522void Assembler::blx(Register rm, Condition cond) {
1523 ASSERT(rm != kNoRegister);
1524 ASSERT(cond != kNoCondition);
1525 int32_t encoding = (
static_cast<int32_t
>(cond) << kConditionShift) |
B24 |
1526 B21 | (0xfff << 8) | B5 | B4 | ArmEncode::Rm(rm);
1530void Assembler::MarkExceptionHandler(Label* label) {
1531 EmitType01(AL, 1, TST, 1, PC, R0, Operand(0));
1534 EmitBranch(AL, label,
false);
1538void Assembler::Drop(intptr_t stack_elements) {
1539 ASSERT(stack_elements >= 0);
1540 if (stack_elements > 0) {
1541 AddImmediate(SP, stack_elements * target::kWordSize);
1546void Assembler::LoadWordFromPoolIndex(Register rd,
1550 ASSERT((pp != PP) || constant_pool_allowed());
1555 int32_t offset_mask = 0;
1556 if (Address::CanHoldLoadOffset(kFourBytes,
offset, &offset_mask)) {
1557 ldr(rd, Address(pp,
offset), cond);
1559 int32_t offset_hi =
offset & ~offset_mask;
1560 uint32_t offset_lo =
offset & offset_mask;
1563 if (Operand::CanHold(offset_hi, &o)) {
1564 add(rd, pp, o, cond);
1566 LoadImmediate(rd, offset_hi, cond);
1567 add(rd, pp, Operand(rd), cond);
1569 ldr(rd, Address(rd, offset_lo), cond);
1573void Assembler::StoreWordToPoolIndex(Register value,
1577 ASSERT((pp != PP) || constant_pool_allowed());
1582 int32_t offset_mask = 0;
1583 if (Address::CanHoldLoadOffset(kFourBytes,
offset, &offset_mask)) {
1584 str(value, Address(pp,
offset), cond);
1586 int32_t offset_hi =
offset & ~offset_mask;
1587 uint32_t offset_lo =
offset & offset_mask;
1590 if (Operand::CanHold(offset_hi, &o)) {
1591 add(TMP, pp, o, cond);
1593 LoadImmediate(TMP, offset_hi, cond);
1594 add(TMP, pp, Operand(TMP), cond);
1596 str(value, Address(TMP, offset_lo), cond);
1600void Assembler::CheckCodePointer() {
1602 if (!FLAG_check_code_pointer) {
1605 Comment(
"CheckCodePointer");
1606 Label cid_ok, instructions_ok;
1609 CompareClassId(CODE_REG, kCodeCid, R0);
1614 const intptr_t
offset = CodeSize() + Instr::kPCReadOffset +
1616 mov(R0, Operand(PC));
1617 AddImmediate(R0, -
offset);
1618 ldr(IP, FieldAddress(CODE_REG, target::Code::instructions_offset()));
1619 cmp(R0, Operand(IP));
1620 b(&instructions_ok, EQ);
1622 Bind(&instructions_ok);
1628void Assembler::RestoreCodePointer() {
1630 Address(FP, target::frame_layout.code_from_fp * target::kWordSize));
1634void Assembler::LoadPoolPointer(Register reg) {
1637 ldr(reg, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
1638 set_constant_pool_allowed(reg == PP);
1641void Assembler::SetupGlobalPoolAndDispatchTable() {
1642 ASSERT(FLAG_precompiled_mode);
1643 ldr(PP, Address(THR, target::Thread::global_object_pool_offset()));
1644 ldr(DISPATCH_TABLE_REG,
1645 Address(THR, target::Thread::dispatch_table_array_offset()));
1648void Assembler::LoadIsolate(Register rd) {
1649 ldr(rd, Address(THR, target::Thread::isolate_offset()));
1652void Assembler::LoadIsolateGroup(Register rd) {
1653 ldr(rd, Address(THR, target::Thread::isolate_group_offset()));
1656bool Assembler::CanLoadFromObjectPool(
const Object&
object)
const {
1658 if (!constant_pool_allowed()) {
1667void Assembler::LoadObjectHelper(
1669 const Object&
object,
1673 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
1678 if (target::CanLoadFromThread(
object, &
offset)) {
1681 ldr(rd, Address(THR,
offset), cond);
1684 if (target::IsSmi(
object)) {
1686 LoadImmediate(rd, target::ToRawSmi(
object), cond);
1695 ? object_pool_builder().AddObject(
1696 object, ObjectPoolBuilderEntry::kPatchable, snapshot_behavior)
1697 : object_pool_builder().FindObject(
1698 object, ObjectPoolBuilderEntry::kNotPatchable,
1700 LoadWordFromPoolIndex(rd, index, pp, cond);
1703void Assembler::LoadObject(Register rd,
const Object&
object, Condition cond) {
1704 LoadObjectHelper(rd,
object, cond,
false, PP);
1707void Assembler::LoadUniqueObject(
1709 const Object&
object,
1711 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
1712 LoadObjectHelper(rd,
object, cond,
true, PP,
1716void Assembler::LoadNativeEntry(Register rd,
1717 const ExternalLabel* label,
1718 ObjectPoolBuilderEntry::Patchability patchable,
1720 const intptr_t index =
1721 object_pool_builder().FindNativeFunction(label, patchable);
1722 LoadWordFromPoolIndex(rd, index, PP, cond);
1725void Assembler::PushObject(
const Object&
object) {
1727 LoadObject(IP,
object);
1731void Assembler::CompareObject(Register rn,
const Object&
object) {
1734 if (target::IsSmi(
object)) {
1735 CompareImmediate(rn, target::ToRawSmi(
object));
1737 LoadObject(IP,
object);
1738 cmp(rn, Operand(IP));
1742Register UseRegister(Register reg, RegList* used) {
1747 ASSERT((*used & (1 << reg)) == 0);
1748 *used |= (1 << reg);
1752Register AllocateRegister(RegList* used) {
1757 static_cast<Register>(Utils::CountTrailingZerosWord(free)),
1761void Assembler::StoreBarrier(Register
object,
1763 CanBeSmi can_be_smi,
1769 ASSERT(
object != scratch);
1770 ASSERT(value != scratch);
1771 ASSERT(scratch != kNoRegister);
1781 if (can_be_smi == kValueCanBeSmi) {
1782 BranchIfSmi(value, &
done, kNearJump);
1786 BranchIfNotSmi(value, &passed_check, kNearJump);
1788 Bind(&passed_check);
1791 const bool preserve_lr = lr_state().LRContainsReturnAddress();
1793 SPILLS_LR_TO_FRAME(Push(
LR));
1796 ldrb(scratch, FieldAddress(
object, target::Object::tags_offset()));
1797 ldrb(
LR, FieldAddress(value, target::Object::tags_offset()));
1799 Operand(scratch, LSR, target::UntaggedObject::kBarrierOverlapShift));
1800 ldr(
LR, Address(THR, target::Thread::write_barrier_mask_offset()));
1801 tst(scratch, Operand(
LR));
1803 if (value != kWriteBarrierValueReg) {
1806 Label restore_and_done;
1807 b(&restore_and_done, ZERO);
1809 if (
object != kWriteBarrierValueReg) {
1810 Push(kWriteBarrierValueReg);
1814 objectForCall = (
value ==
R2) ? R3 :
R2;
1815 PushList((1 << kWriteBarrierValueReg) | (1 << objectForCall));
1816 mov(objectForCall, Operand(
object));
1818 mov(kWriteBarrierValueReg, Operand(value));
1819 generate_invoke_write_barrier_wrapper_(AL, objectForCall);
1821 if (
object != kWriteBarrierValueReg) {
1822 Pop(kWriteBarrierValueReg);
1824 PopList((1 << kWriteBarrierValueReg) | (1 << objectForCall));
1826 Bind(&restore_and_done);
1828 generate_invoke_write_barrier_wrapper_(NE,
object);
1831 RESTORES_LR_FROM_FRAME(Pop(
LR));
1836void Assembler::ArrayStoreBarrier(Register
object,
1839 CanBeSmi can_be_smi,
1844 ASSERT(
object != scratch);
1845 ASSERT(value != scratch);
1847 ASSERT(scratch != kNoRegister);
1857 if (can_be_smi == kValueCanBeSmi) {
1858 BranchIfSmi(value, &
done, kNearJump);
1862 BranchIfNotSmi(value, &passed_check, kNearJump);
1864 Bind(&passed_check);
1867 const bool preserve_lr = lr_state().LRContainsReturnAddress();
1869 SPILLS_LR_TO_FRAME(Push(
LR));
1873 ldrb(scratch, FieldAddress(
object, target::Object::tags_offset()));
1874 ldrb(
LR, FieldAddress(value, target::Object::tags_offset()));
1876 Operand(scratch, LSR, target::UntaggedObject::kBarrierOverlapShift));
1877 ldr(
LR, Address(THR, target::Thread::write_barrier_mask_offset()));
1878 tst(scratch, Operand(
LR));
1882 (slot != kWriteBarrierSlotReg)) {
1888 generate_invoke_array_write_barrier_(NE);
1890 RESTORES_LR_FROM_FRAME(Pop(
LR));
1895void Assembler::StoreObjectIntoObjectNoBarrier(Register
object,
1896 const Address& dest,
1897 const Object& value,
1898 MemoryOrder memory_order,
1903 int32_t ignored = 0;
1905 if (!Address::CanHoldStoreOffset(size,
dest.offset(), &ignored)) {
1915 LoadObject(scratch, value);
1916 if (memory_order == kRelease) {
1917 StoreRelease(scratch, dest);
1919 Store(scratch, dest);
1921 if (scratch != TMP) {
1926void Assembler::VerifyStoreNeedsNoWriteBarrier(Register
object,
1934 BranchIfSmi(value, &
done, kNearJump);
1935 ldrb(TMP, FieldAddress(value, target::Object::tags_offset()));
1936 tst(TMP, Operand(1 << target::UntaggedObject::kNewBit));
1938 ldrb(TMP, FieldAddress(
object, target::Object::tags_offset()));
1939 tst(TMP, Operand(1 << target::UntaggedObject::kOldAndNotRememberedBit));
1941 Stop(
"Write barrier is required");
1945void Assembler::StoreInternalPointer(Register
object,
1946 const Address& dest,
1951void Assembler::InitializeFieldsNoBarrier(Register
object,
1954 Register value_even,
1955 Register value_odd) {
1956 ASSERT(value_odd == value_even + 1);
1959 AddImmediate(
begin, 2 * target::kWordSize);
1961 strd(value_even, value_odd,
begin, -2 * target::kWordSize, LS);
1963 str(value_even, Address(
begin, -2 * target::kWordSize), HI);
1966void Assembler::InitializeFieldsNoBarrierUnrolled(Register
object,
1968 intptr_t begin_offset,
1969 intptr_t end_offset,
1970 Register value_even,
1971 Register value_odd) {
1972 ASSERT(value_odd == value_even + 1);
1973 intptr_t current_offset = begin_offset;
1974 while (current_offset + target::kWordSize < end_offset) {
1975 strd(value_even, value_odd,
base, current_offset);
1976 current_offset += 2 * target::kWordSize;
1978 while (current_offset < end_offset) {
1979 str(value_even, Address(
base, current_offset));
1980 current_offset += target::kWordSize;
1984void Assembler::StoreIntoSmiField(
const Address& dest, Register value) {
1987 tst(value, Operand(kHeapObjectTag));
1989 Stop(
"New value must be Smi.");
1995void Assembler::ExtractClassIdFromTags(Register
result,
1998 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
1999 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
2000 ubfx(
result, tags, target::UntaggedObject::kClassIdTagPos,
2001 target::UntaggedObject::kClassIdTagSize, cond);
2004void Assembler::ExtractInstanceSizeFromTags(Register
result, Register tags) {
2005 ASSERT(target::UntaggedObject::kSizeTagPos == 8);
2006 ASSERT(target::UntaggedObject::kSizeTagSize == 4);
2008 Operand(target::UntaggedObject::kSizeTagPos -
2009 target::ObjectAlignment::kObjectAlignmentLog2),
2012 (Utils::NBitMask(target::UntaggedObject::kSizeTagSize)
2013 << target::ObjectAlignment::kObjectAlignmentLog2));
2016void Assembler::LoadClassId(Register
result, Register
object, Condition cond) {
2017 ldr(
result, FieldAddress(
object, target::Object::tags_offset()), cond);
2021void Assembler::LoadClassById(Register
result, Register class_id) {
2024 const intptr_t table_offset =
2025 target::IsolateGroup::cached_class_table_table_offset();
2027 LoadIsolateGroup(
result);
2029 ldr(
result, Address(
result, class_id, LSL, target::kWordSizeLog2));
2032void Assembler::CompareClassId(Register
object,
2035 LoadClassId(scratch,
object);
2036 CompareImmediate(scratch, class_id);
2039void Assembler::LoadClassIdMayBeSmi(Register
result, Register
object) {
2040 tst(
object, Operand(kSmiTagMask));
2041 LoadClassId(
result,
object, NE);
2042 LoadImmediate(
result, kSmiCid, EQ);
2045void Assembler::LoadTaggedClassIdMayBeSmi(Register
result, Register
object) {
2046 LoadClassIdMayBeSmi(
result,
object);
2050void Assembler::EnsureHasClassIdInDEBUG(intptr_t cid,
2055 Comment(
"Check that object in register has cid %" Pd "", cid);
2057 LoadClassIdMayBeSmi(scratch, src);
2058 CompareImmediate(scratch, cid);
2059 BranchIf(
EQUAL, &matches, Assembler::kNearJump);
2061 CompareImmediate(scratch, kNullCid);
2062 BranchIf(
EQUAL, &matches, Assembler::kNearJump);
2069void Assembler::BailoutIfInvalidBranchOffset(int32_t
offset) {
2070 if (!CanEncodeBranchDistance(
offset)) {
2071 ASSERT(!use_far_branches());
2076int32_t Assembler::EncodeBranchOffset(int32_t
offset, int32_t inst) {
2078 offset -= Instr::kPCReadOffset;
2083 return (inst & ~kBranchOffsetMask) |
offset;
2086int Assembler::DecodeBranchOffset(int32_t inst) {
2088 return ((((inst & kBranchOffsetMask) << 8) >> 6) + Instr::kPCReadOffset);
2091static int32_t DecodeARMv7LoadImmediate(int32_t movt, int32_t movw) {
2093 offset |= (movt & 0xf0000) << 12;
2094 offset |= (movt & 0xfff) << 16;
2095 offset |= (movw & 0xf0000) >> 4;
2100class PatchFarBranch :
public AssemblerFixup {
2104 void Process(
const MemoryRegion& region, intptr_t position) {
2105 ProcessARMv7(region, position);
2109 void ProcessARMv7(
const MemoryRegion& region, intptr_t position) {
2110 const int32_t movw =
region.Load<int32_t>(position);
2111 const int32_t movt =
region.Load<int32_t>(position + Instr::kInstrSize);
2112 const int32_t bx =
region.Load<int32_t>(position + 2 * Instr::kInstrSize);
2114 if (((movt & 0xfff0f000) == 0xe340c000) &&
2115 ((movw & 0xfff0f000) == 0xe300c000)) {
2116 const int32_t
offset = DecodeARMv7LoadImmediate(movt, movw);
2118 const uint16_t dest_high = Utils::High16Bits(dest);
2119 const uint16_t dest_low = Utils::Low16Bits(dest);
2120 const int32_t patched_movt =
2121 0xe340c000 | ((dest_high >> 12) << 16) | (dest_high & 0xfff);
2122 const int32_t patched_movw =
2123 0xe300c000 | ((dest_low >> 12) << 16) | (dest_low & 0xfff);
2125 region.Store<int32_t>(position, patched_movw);
2126 region.Store<int32_t>(position + Instr::kInstrSize, patched_movt);
2133 ASSERT((movt == Instr::kNopInstruction) && (bx == Instr::kNopInstruction));
2136 virtual bool IsPointerOffset()
const {
return false; }
2139void Assembler::EmitFarBranch(Condition cond, int32_t
offset,
bool link) {
2140 buffer_.EmitFixup(
new PatchFarBranch());
2141 LoadPatchableImmediate(IP,
offset);
2149void Assembler::EmitBranch(Condition cond, Label* label,
bool link) {
2150 if (label->IsBound()) {
2151 const int32_t
dest = label->Position() - buffer_.Size();
2152 if (use_far_branches() && !CanEncodeBranchDistance(dest)) {
2153 EmitFarBranch(cond, label->Position(), link);
2155 EmitType5(cond, dest, link);
2157 label->UpdateLRState(lr_state());
2159 const intptr_t position = buffer_.Size();
2160 if (use_far_branches()) {
2161 const int32_t
dest = label->position_;
2162 EmitFarBranch(cond, dest, link);
2165 EmitType5(cond, label->position_, link);
2167 label->LinkTo(position, lr_state());
2171void Assembler::BindARMv7(Label* label) {
2172 ASSERT(!label->IsBound());
2173 intptr_t bound_pc = buffer_.Size();
2174 while (label->IsLinked()) {
2175 const int32_t position = label->Position();
2176 int32_t
dest = bound_pc - position;
2177 if (use_far_branches() && !CanEncodeBranchDistance(dest)) {
2181 const int32_t movw =
2182 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
2183 const int32_t movt =
2184 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
2188 dest = buffer_.Size();
2189 const uint16_t dest_high = Utils::High16Bits(dest);
2190 const uint16_t dest_low = Utils::Low16Bits(dest);
2191 const int32_t patched_movt =
2192 0xe340c000 | ((dest_high >> 12) << 16) | (dest_high & 0xfff);
2193 const int32_t patched_movw =
2194 0xe300c000 | ((dest_low >> 12) << 16) | (dest_low & 0xfff);
2197 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, patched_movw);
2198 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize, patched_movt);
2199 label->position_ = DecodeARMv7LoadImmediate(movt, movw);
2200 }
else if (use_far_branches() && CanEncodeBranchDistance(dest)) {
2204 const int32_t movw =
2205 buffer_.Load<int32_t>(position + 0 * Instr::kInstrSize);
2206 const int32_t movt =
2207 buffer_.Load<int32_t>(position + 1 * Instr::kInstrSize);
2208 const int32_t branch =
2209 buffer_.Load<int32_t>(position + 2 * Instr::kInstrSize);
2212 const int32_t cond = branch & 0xf0000000;
2213 const int32_t
link = (branch & 0x20) << 19;
2216 const int32_t new_branch = cond |
link | 0x0a000000;
2217 const int32_t encoded = EncodeBranchOffset(dest, new_branch);
2220 buffer_.Store<int32_t>(position + 0 * Instr::kInstrSize, encoded);
2221 buffer_.Store<int32_t>(position + 1 * Instr::kInstrSize,
2222 Instr::kNopInstruction);
2223 buffer_.Store<int32_t>(position + 2 * Instr::kInstrSize,
2224 Instr::kNopInstruction);
2226 label->position_ = DecodeARMv7LoadImmediate(movt, movw);
2228 BailoutIfInvalidBranchOffset(dest);
2229 int32_t
next = buffer_.Load<int32_t>(position);
2230 int32_t encoded = Assembler::EncodeBranchOffset(dest,
next);
2231 buffer_.Store<int32_t>(position, encoded);
2232 label->position_ = Assembler::DecodeBranchOffset(
next);
2235 label->BindTo(bound_pc, lr_state());
2238void Assembler::Bind(Label* label) {
2242OperandSize Address::OperandSizeFor(intptr_t cid) {
2243 auto const rep = RepresentationUtils::RepresentationOfArrayElement(cid);
2249 case kUnboxedDouble:
2251 case kUnboxedInt32x4:
2252 case kUnboxedFloat32x4:
2253 case kUnboxedFloat64x2:
2256 return RepresentationUtils::OperandSize(rep);
2260bool Address::CanHoldLoadOffset(OperandSize size,
2262 int32_t* offset_mask) {
2268 *offset_mask = 0xff;
2269 return Utils::MagnitudeIsUint(8,
offset);
2274 *offset_mask = 0xfff;
2275 return Utils::MagnitudeIsUint(12,
offset);
2279 *offset_mask = 0x3fc;
2281 return (Utils::MagnitudeIsUint(10,
offset) &&
2282 Utils::IsAligned(
offset, 4));
2295bool Address::CanHoldStoreOffset(OperandSize size,
2297 int32_t* offset_mask) {
2302 *offset_mask = 0xff;
2303 return Utils::MagnitudeIsUint(8,
offset);
2309 *offset_mask = 0xfff;
2310 return Utils::MagnitudeIsUint(12,
offset);
2314 *offset_mask = 0x3fc;
2316 return (Utils::MagnitudeIsUint(10,
offset) &&
2317 Utils::IsAligned(
offset, 4));
2330bool Address::CanHoldImmediateOffset(
bool is_load,
2333 int32_t offset_mask = 0;
2335 return CanHoldLoadOffset(OperandSizeFor(cid),
offset, &offset_mask);
2337 return CanHoldStoreOffset(OperandSizeFor(cid),
offset, &offset_mask);
2341void Assembler::Push(Register rd, Condition cond) {
2342 str(rd, Address(SP, -target::kWordSize, Address::PreIndex), cond);
2345void Assembler::Pop(Register rd, Condition cond) {
2346 ldr(rd, Address(SP, target::kWordSize, Address::PostIndex), cond);
2349void Assembler::PushList(RegList regs, Condition cond) {
2350 stm(DB_W, SP, regs, cond);
2353void Assembler::PopList(RegList regs, Condition cond) {
2354 ldm(IA_W, SP, regs, cond);
2357void Assembler::PushQuad(FpuRegister reg, Condition cond) {
2359 vstmd(DB_W, SP, dreg, 2, cond);
2362void Assembler::PopQuad(FpuRegister reg, Condition cond) {
2364 vldmd(IA_W, SP, dreg, 2, cond);
2367void Assembler::PushRegisters(
const RegisterSet& regs) {
2368 const intptr_t fpu_regs_count = regs.FpuRegisterCount();
2369 if (fpu_regs_count > 0) {
2370 AddImmediate(SP, -(fpu_regs_count * kFpuRegisterSize));
2374 mov(TMP, Operand(SP));
2377 if (regs.ContainsFpuRegister(fpu_reg)) {
2379 ASSERT(
d + 1 == OddDRegisterOf(fpu_reg));
2380 vstmd(IA_W, IP,
d, 2);
2391 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
2393 if (regs.ContainsRegister(reg)) {
2394 reg_list |= (1 << reg);
2397 if (reg_list != 0) {
2402void Assembler::PopRegisters(
const RegisterSet& regs) {
2404 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
2406 if (regs.ContainsRegister(reg)) {
2407 reg_list |= (1 << reg);
2410 if (reg_list != 0) {
2414 const intptr_t fpu_regs_count = regs.FpuRegisterCount();
2415 if (fpu_regs_count > 0) {
2420 if (regs.ContainsFpuRegister(fpu_reg)) {
2422 ASSERT(
d + 1 == OddDRegisterOf(fpu_reg));
2423 vldmd(IA_W, SP,
d, 2);
2431void Assembler::PushRegistersInOrder(std::initializer_list<Register> regs) {
2436 intptr_t num_pending_regs = 0;
2437 for (Register reg : regs) {
2438 if (reg >= lowest_pending_reg) {
2439 ASSERT(pending_regs != 0);
2440 if (num_pending_regs > 1) {
2441 PushList(pending_regs);
2443 Push(lowest_pending_reg);
2446 num_pending_regs = 0;
2448 pending_regs |= (1 << reg);
2449 lowest_pending_reg = reg;
2452 if (pending_regs != 0) {
2453 if (num_pending_regs > 1) {
2454 PushList(pending_regs);
2456 Push(lowest_pending_reg);
2461void Assembler::PushNativeCalleeSavedRegisters() {
2463 PushList(kAbiPreservedCpuRegs);
2465 const DRegister firstd = EvenDRegisterOf(kAbiFirstPreservedFpuReg);
2466 ASSERT(2 * kAbiPreservedFpuRegCount < 16);
2468 vstmd(DB_W, SP, firstd, 2 * kAbiPreservedFpuRegCount);
2471void Assembler::PopNativeCalleeSavedRegisters() {
2472 const DRegister firstd = EvenDRegisterOf(kAbiFirstPreservedFpuReg);
2475 vldmd(IA_W, SP, firstd, 2 * kAbiPreservedFpuRegCount);
2477 PopList(kAbiPreservedCpuRegs);
2480void Assembler::ExtendValue(Register rd,
2487 if (rd == rm)
return;
2488 return mov(rd, Operand(rm), cond);
2490 return ubfx(rd, rm, 0, kBitsPerInt16, cond);
2492 return sbfx(rd, rm, 0, kBitsPerInt16, cond);
2494 return ubfx(rd, rm, 0, kBitsPerInt8, cond);
2496 return sbfx(rd, rm, 0, kBitsPerInt8, cond);
2503void Assembler::Lsl(Register rd,
2505 const Operand& shift_imm,
2507 ASSERT(shift_imm.type() == 1);
2508 ASSERT(shift_imm.encoding() != 0);
2509 mov(rd, Operand(rm, LSL, shift_imm.encoding()), cond);
2512void Assembler::Lsl(Register rd, Register rm, Register rs, Condition cond) {
2513 mov(rd, Operand(rm, LSL, rs), cond);
2516void Assembler::Lsr(Register rd,
2518 const Operand& shift_imm,
2520 ASSERT(shift_imm.type() == 1);
2521 uint32_t shift = shift_imm.encoding();
2526 mov(rd, Operand(rm, LSR, shift), cond);
2529void Assembler::Lsr(Register rd, Register rm, Register rs, Condition cond) {
2530 mov(rd, Operand(rm, LSR, rs), cond);
2533void Assembler::Asr(Register rd,
2535 const Operand& shift_imm,
2537 ASSERT(shift_imm.type() == 1);
2538 uint32_t shift = shift_imm.encoding();
2543 mov(rd, Operand(rm, ASR, shift), cond);
2546void Assembler::Asrs(Register rd,
2548 const Operand& shift_imm,
2550 ASSERT(shift_imm.type() == 1);
2551 uint32_t shift = shift_imm.encoding();
2556 movs(rd, Operand(rm, ASR, shift), cond);
2559void Assembler::Asr(Register rd, Register rm, Register rs, Condition cond) {
2560 mov(rd, Operand(rm, ASR, rs), cond);
2563void Assembler::Ror(Register rd,
2565 const Operand& shift_imm,
2567 ASSERT(shift_imm.type() == 1);
2568 ASSERT(shift_imm.encoding() != 0);
2569 mov(rd, Operand(rm, ROR, shift_imm.encoding()), cond);
2572void Assembler::Ror(Register rd, Register rm, Register rs, Condition cond) {
2573 mov(rd, Operand(rm, ROR, rs), cond);
2576void Assembler::Rrx(Register rd, Register rm, Condition cond) {
2577 mov(rd, Operand(rm, ROR, 0), cond);
2580void Assembler::SignFill(Register rd, Register rm, Condition cond) {
2581 Asr(rd, rm, Operand(31), cond);
2584void Assembler::Vreciprocalqs(QRegister qd, QRegister qm) {
2591 vrecpsqs(QTMP, qm, qd);
2592 vmulqs(qd, qd, QTMP);
2593 vrecpsqs(QTMP, qm, qd);
2594 vmulqs(qd, qd, QTMP);
2597void Assembler::VreciprocalSqrtqs(QRegister qd, QRegister qm) {
2605 vmulqs(QTMP, qd, qd);
2606 vrsqrtsqs(QTMP, qm, QTMP);
2607 vmulqs(qd, qd, QTMP);
2609 vmulqs(QTMP, qd, qd);
2610 vrsqrtsqs(QTMP, qm, QTMP);
2611 vmulqs(qd, qd, QTMP);
2614void Assembler::Vsqrtqs(QRegister qd, QRegister qm, QRegister temp) {
2619 if (temp != kNoQRegister) {
2624 VreciprocalSqrtqs(qd, qm);
2626 Vreciprocalqs(qd, qm);
2629void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) {
2634 Vreciprocalqs(qd, qm);
2638void Assembler::Branch(
const Address& address, Condition cond) {
2639 ldr(PC, address, cond);
2642void Assembler::BranchLink(intptr_t target_code_pool_index,
2643 CodeEntryKind entry_kind) {
2651 LoadWordFromPoolIndex(code_reg, target_code_pool_index, PP, AL);
2652 Call(FieldAddress(code_reg, target::Code::entry_point_offset(entry_kind)));
2656void Assembler::BranchLink(
2658 ObjectPoolBuilderEntry::Patchability patchable,
2659 CodeEntryKind entry_kind,
2660 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
2665 const intptr_t index = object_pool_builder().FindObject(
2667 BranchLink(index, entry_kind);
2670void Assembler::BranchLinkPatchable(
2672 CodeEntryKind entry_kind,
2673 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
2674 BranchLink(
target, ObjectPoolBuilderEntry::kPatchable, entry_kind,
2678void Assembler::BranchLinkWithEquivalence(
const Code&
target,
2679 const Object& equivalence,
2680 CodeEntryKind entry_kind) {
2685 const intptr_t index =
2687 BranchLink(index, entry_kind);
2690void Assembler::BranchLink(
const ExternalLabel* label) {
2692 LoadImmediate(
LR, label->address());
2697void Assembler::BranchLinkOffset(Register
base, int32_t
offset) {
2704void Assembler::LoadPatchableImmediate(Register rd,
2707 const uint16_t value_low = Utils::Low16Bits(value);
2708 const uint16_t value_high = Utils::High16Bits(value);
2709 movw(rd, value_low, cond);
2710 movt(rd, value_high, cond);
2713void Assembler::LoadDecodableImmediate(Register rd,
2716 movw(rd, Utils::Low16Bits(value), cond);
2717 const uint16_t value_high = Utils::High16Bits(value);
2718 if (value_high != 0) {
2719 movt(rd, value_high, cond);
2723void Assembler::LoadImmediate(Register rd, Immediate value, Condition cond) {
2724 LoadImmediate(rd,
value.value(), cond);
2727void Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
2729 if (Operand::CanHold(value, &o)) {
2731 }
else if (Operand::CanHold(~value, &o)) {
2734 LoadDecodableImmediate(rd, value, cond);
2738void Assembler::LoadSImmediate(SRegister sd,
float value, Condition cond) {
2739 if (!vmovs(sd, value, cond)) {
2741 const int index = sd & 1;
2742 LoadImmediate(IP, bit_cast<int32_t, float>(value), cond);
2743 vmovdr(dd, index, IP, cond);
2747void Assembler::LoadDImmediate(DRegister dd,
2753 if (vmovd(dd, value, cond))
return;
2755 int64_t imm64 = bit_cast<int64_t, double>(value);
2756 if (constant_pool_allowed()) {
2757 intptr_t index = object_pool_builder().FindImmediate64(imm64);
2760 LoadDFromOffset(dd, PP,
offset, cond);
2763 ASSERT(scratch != kNoRegister);
2764 int64_t imm64 = bit_cast<int64_t, double>(value);
2765 LoadImmediate(IP, Utils::Low32Bits(imm64), cond);
2766 LoadImmediate(scratch, Utils::High32Bits(imm64), cond);
2767 vmovdrr(dd, IP, scratch, cond);
2771void Assembler::LoadQImmediate(QRegister qd, simd128_value_t value) {
2772 ASSERT(constant_pool_allowed());
2773 intptr_t index = object_pool_builder().FindImmediate128(value);
2775 LoadMultipleDFromOffset(EvenDRegisterOf(qd), 2, PP,
offset);
2778Address Assembler::PrepareLargeLoadOffset(
const Address& address,
2781 ASSERT(size != kWordPair);
2782 if (address.kind() != Address::Immediate) {
2785 int32_t
offset = address.offset();
2786 int32_t offset_mask = 0;
2787 if (Address::CanHoldLoadOffset(size,
offset, &offset_mask)) {
2790 auto mode = address.mode();
2794 mode =
static_cast<Address::Mode
>(
mode ^
U);
2798 ASSERT((mode | U) != Address::PostIndex);
2802 ASSERT(
base != TMP || address.has_writeback());
2804 AddImmediate(temp,
base,
offset & ~offset_mask, cond);
2810Address Assembler::PrepareLargeStoreOffset(
const Address& address,
2813 ASSERT(size != kWordPair);
2814 if (address.kind() != Address::Immediate) {
2817 int32_t
offset = address.offset();
2818 int32_t offset_mask = 0;
2819 if (Address::CanHoldStoreOffset(size,
offset, &offset_mask)) {
2822 auto mode = address.mode();
2826 mode =
static_cast<Address::Mode
>(
mode ^
U);
2830 ASSERT((mode | U) != Address::PostIndex);
2834 ASSERT(
base != TMP || address.has_writeback());
2836 AddImmediate(temp,
base,
offset & ~offset_mask, cond);
2842void Assembler::Load(Register reg,
2843 const Address& address,
2846 const Address&
addr = PrepareLargeLoadOffset(address, size, cond);
2849 ldrsb(reg, addr, cond);
2852 ldrb(reg, addr, cond);
2855 ldrsh(reg, addr, cond);
2858 ldrh(reg, addr, cond);
2862 ldr(reg, addr, cond);
2869void Assembler::LoadFromStack(Register dst, intptr_t depth) {
2871 LoadFromOffset(dst, SPREG, depth * target::kWordSize);
2874void Assembler::StoreToStack(Register src, intptr_t depth) {
2876 StoreToOffset(src, SPREG, depth * target::kWordSize);
2879void Assembler::CompareToStack(Register src, intptr_t depth) {
2880 LoadFromStack(TMP, depth);
2881 CompareRegisters(src, TMP);
2884void Assembler::Store(Register reg,
2885 const Address& address,
2888 const Address&
addr = PrepareLargeStoreOffset(address, size, cond);
2892 strb(reg, addr, cond);
2896 strh(reg, addr, cond);
2900 str(reg, addr, cond);
2907void Assembler::LoadSFromOffset(SRegister reg,
2911 vldrs(reg, PrepareLargeLoadOffset(Address(
base,
offset), kSWord, cond), cond);
2914void Assembler::StoreSToOffset(SRegister reg,
2918 vstrs(reg, PrepareLargeStoreOffset(Address(
base,
offset), kSWord, cond),
2922void Assembler::LoadDFromOffset(DRegister reg,
2926 vldrd(reg, PrepareLargeLoadOffset(Address(
base,
offset), kDWord, cond), cond);
2929void Assembler::StoreDToOffset(DRegister reg,
2933 vstrd(reg, PrepareLargeStoreOffset(Address(
base,
offset), kDWord, cond),
2937void Assembler::LoadMultipleDFromOffset(DRegister first,
2943 vldmd(IA, IP, first,
count);
2946void Assembler::StoreMultipleDToOffset(DRegister first,
2952 vstmd(IA, IP, first,
count);
2955void Assembler::AddImmediate(Register rd,
2961 mov(rd, Operand(rn), cond);
2969 if (Operand::CanHold(value, &o)) {
2970 add(rd, rn, o, cond);
2971 }
else if (Operand::CanHold(-value, &o)) {
2972 sub(rd, rn, o, cond);
2975 if (Operand::CanHold(~value, &o)) {
2977 add(rd, rn, Operand(IP), cond);
2978 }
else if (Operand::CanHold(~(-value), &o)) {
2980 sub(rd, rn, Operand(IP), cond);
2981 }
else if (value > 0) {
2982 LoadDecodableImmediate(IP, value, cond);
2983 add(rd, rn, Operand(IP), cond);
2985 LoadDecodableImmediate(IP, -value, cond);
2986 sub(rd, rn, Operand(IP), cond);
2991void Assembler::AddImmediateSetFlags(Register rd,
2996 if (Operand::CanHold(value, &o)) {
2998 adds(rd, rn, o, cond);
2999 }
else if (Operand::CanHold(-value, &o)) {
3000 ASSERT(value != kMinInt32);
3001 subs(rd, rn, o, cond);
3004 if (Operand::CanHold(~value, &o)) {
3006 adds(rd, rn, Operand(IP), cond);
3007 }
else if (Operand::CanHold(~(-value), &o)) {
3008 ASSERT(value != kMinInt32);
3010 subs(rd, rn, Operand(IP), cond);
3012 LoadDecodableImmediate(IP, value, cond);
3013 adds(rd, rn, Operand(IP), cond);
3018void Assembler::SubImmediate(Register rd,
3022 AddImmediate(rd, rn, -value, cond);
3025void Assembler::SubImmediateSetFlags(Register rd,
3030 if (Operand::CanHold(value, &o)) {
3032 subs(rd, rn, o, cond);
3033 }
else if (Operand::CanHold(-value, &o)) {
3034 ASSERT(value != kMinInt32);
3035 adds(rd, rn, o, cond);
3038 if (Operand::CanHold(~value, &o)) {
3040 subs(rd, rn, Operand(IP), cond);
3041 }
else if (Operand::CanHold(~(-value), &o)) {
3042 ASSERT(value != kMinInt32);
3044 adds(rd, rn, Operand(IP), cond);
3046 LoadDecodableImmediate(IP, value, cond);
3047 subs(rd, rn, Operand(IP), cond);
3052void Assembler::AndImmediate(Register rd,
3057 if (Operand::CanHold(imm, &o)) {
3058 and_(rd, rs, Operand(o), cond);
3060 LoadImmediate(TMP, imm, cond);
3061 and_(rd, rs, Operand(TMP), cond);
3065void Assembler::AndImmediateSetFlags(Register rd,
3070 if (Operand::CanHold(imm, &o)) {
3071 ands(rd, rs, Operand(o), cond);
3073 LoadImmediate(TMP, imm, cond);
3074 ands(rd, rs, Operand(TMP), cond);
3078void Assembler::OrImmediate(Register rd,
3083 if (Operand::CanHold(imm, &o)) {
3084 orr(rd, rs, Operand(o), cond);
3086 LoadImmediate(TMP, imm, cond);
3087 orr(rd, rs, Operand(TMP), cond);
3091void Assembler::CompareImmediate(Register rn, int32_t value, Condition cond) {
3093 if (Operand::CanHold(value, &o)) {
3097 LoadImmediate(IP, value, cond);
3098 cmp(rn, Operand(IP), cond);
3102void Assembler::TestImmediate(Register rn, int32_t imm, Condition cond) {
3104 if (Operand::CanHold(imm, &o)) {
3107 LoadImmediate(IP, imm);
3108 tst(rn, Operand(IP), cond);
3112void Assembler::IntegerDivide(Register
result,
3118 if (TargetCPUFeatures::integer_division_supported()) {
3121 SRegister stmpl = EvenSRegisterOf(tmpl);
3122 SRegister stmpr = EvenSRegisterOf(tmpr);
3123 vmovsr(stmpl,
left);
3124 vcvtdi(tmpl, stmpl);
3125 vmovsr(stmpr,
right);
3126 vcvtdi(tmpr, stmpr);
3127 vdivd(tmpr, tmpl, tmpr);
3128 vcvtid(stmpr, tmpr);
3133static int NumRegsBelowFP(RegList regs) {
3135 for (
int i = 0; i <
FP; i++) {
3136 if ((regs & (1 << i)) != 0) {
3143void Assembler::ArithmeticShiftRightImmediate(Register reg, intptr_t shift) {
3144 Asr(reg, reg, Operand(shift));
3147void Assembler::CompareWords(Register reg1,
3155 AddImmediate(reg1,
offset - kHeapObjectTag);
3156 AddImmediate(reg2,
offset - kHeapObjectTag);
3160 BranchIfZero(
count,
equals, Assembler::kNearJump);
3161 AddImmediate(
count, -1);
3162 ldr(temp, Address(reg1, 4, Address::PostIndex));
3163 ldr(TMP, Address(reg2, 4, Address::PostIndex));
3164 cmp(temp, Operand(TMP));
3165 BranchIf(
EQUAL, &loop, Assembler::kNearJump);
3168void Assembler::EnterFrame(RegList regs, intptr_t frame_size) {
3169 if (prologue_offset_ == -1) {
3170 prologue_offset_ = CodeSize();
3173 if ((regs & (1 << FP)) != 0) {
3175 add(FP, SP, Operand(4 * NumRegsBelowFP(regs)));
3177 if (frame_size != 0) {
3178 AddImmediate(SP, -frame_size);
3182void Assembler::LeaveFrame(RegList regs,
bool allow_pop_pc) {
3183 ASSERT(allow_pop_pc || (regs & (1 << PC)) == 0);
3184 if ((regs & (1 << FP)) != 0) {
3186 sub(SP, FP, Operand(4 * NumRegsBelowFP(regs)));
3191void Assembler::Ret(Condition cond ) {
3192 READS_RETURN_ADDRESS_FROM_LR(bx(
LR, cond));
3195void Assembler::SetReturnAddress(Register value) {
3196 RESTORES_RETURN_ADDRESS_FROM_REGISTER_TO_LR(MoveRegister(
LR, value));
3199void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
3202 AddImmediate(SP, -frame_space);
3203 if (OS::ActivationFrameAlignment() > 1) {
3204 bic(SP, SP, Operand(OS::ActivationFrameAlignment() - 1));
3208void Assembler::EmitEntryFrameVerification(Register scratch) {
3211 ASSERT(!constant_pool_allowed());
3212 LoadImmediate(scratch, target::frame_layout.exit_link_slot_from_entry_fp *
3214 add(scratch, scratch, Operand(FPREG));
3215 cmp(scratch, Operand(SPREG));
3224void Assembler::CallRuntime(
const RuntimeEntry& entry,
3226 ASSERT(!entry.is_leaf());
3229 LoadFromOffset(R9, THR, entry.OffsetFromThread());
3231 ldr(IP, Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
3237#define __ assembler_->
3239#if defined(VFPv3_D32)
3240static const RegisterSet kVolatileFpuRegisters(0, 0xFF0F);
3242static const RegisterSet kVolatileFpuRegisters(0, 0x000F);
3245LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
3246 intptr_t frame_size,
3247 bool preserve_registers)
3248 : assembler_(assembler), preserve_registers_(preserve_registers) {
3249 __ Comment(
"EnterCallRuntimeFrame");
3250 if (preserve_registers) {
3252 SPILLS_LR_TO_FRAME(
__ EnterFrame(
3253 kDartVolatileCpuRegs | (1 << PP) | (1 << FP) | (1 <<
LR), 0));
3256 __ PushRegisters(kVolatileFpuRegisters);
3258 SPILLS_LR_TO_FRAME(
__ EnterFrame((1 << FP) | (1 <<
LR), 0));
3265 __ ReserveAlignedFrameSpace(frame_size);
3268void LeafRuntimeScope::Call(
const RuntimeEntry& entry,
3271 __ LoadFromOffset(TMP, THR, entry.OffsetFromThread());
3273 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
3275 __ LoadImmediate(TMP, VMTag::kDartTagId);
3277 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
3280LeafRuntimeScope::~LeafRuntimeScope() {
3281 if (preserve_registers_) {
3285 const intptr_t kPushedFpuRegisterSize =
3292 const intptr_t kPushedRegistersSize =
3294 __ AddImmediate(SP, FP, -kPushedRegistersSize);
3296 __ PopRegisters(kVolatileFpuRegisters);
3299 RESTORES_LR_FROM_FRAME(
__ LeaveFrame(kDartVolatileCpuRegs | (1 << PP) |
3300 (1 << FP) | (1 <<
LR)));
3302 RESTORES_LR_FROM_FRAME(
__ LeaveFrame((1 << FP) | (1 <<
LR)));
3310void Assembler::EnterDartFrame(intptr_t frame_size,
bool load_pool_pointer) {
3311 ASSERT(!constant_pool_allowed());
3318 if (!FLAG_precompiled_mode) {
3320 EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 <<
LR), 0));
3323 if (load_pool_pointer) LoadPoolPointer();
3325 SPILLS_LR_TO_FRAME(EnterFrame((1 << FP) | (1 <<
LR), 0));
3327 set_constant_pool_allowed(
true);
3330 AddImmediate(SP, -frame_size);
3338void Assembler::EnterOsrFrame(intptr_t extra_size) {
3339 ASSERT(!constant_pool_allowed());
3340 Comment(
"EnterOsrFrame");
3341 RestoreCodePointer();
3344 AddImmediate(SP, -extra_size);
3347void Assembler::LeaveDartFrame() {
3348 if (!FLAG_precompiled_mode) {
3349 ldr(PP, Address(FP, target::frame_layout.saved_caller_pp_from_fp *
3350 target::kWordSize));
3352 set_constant_pool_allowed(
false);
3356 RESTORES_LR_FROM_FRAME(LeaveFrame((1 << FP) | (1 <<
LR)));
3359void Assembler::LeaveDartFrameAndReturn() {
3360 if (!FLAG_precompiled_mode) {
3361 ldr(PP, Address(FP, target::frame_layout.saved_caller_pp_from_fp *
3362 target::kWordSize));
3364 set_constant_pool_allowed(
false);
3368 LeaveFrame((1 << FP) | (1 << PC),
true);
3371void Assembler::EnterStubFrame() {
3375void Assembler::LeaveStubFrame() {
3379void Assembler::EnterCFrame(intptr_t frame_space) {
3384 EnterFrame(1 << FP, 0);
3385 ReserveAlignedFrameSpace(frame_space);
3388void Assembler::LeaveCFrame() {
3389 LeaveFrame(1 << FP);
3394void Assembler::MonomorphicCheckedEntryJIT() {
3395 has_monomorphic_entry_ =
true;
3396#if defined(TESTING) || defined(DEBUG)
3397 bool saved_use_far_branches = use_far_branches();
3398 set_use_far_branches(
false);
3400 intptr_t
start = CodeSize();
3402 Comment(
"MonomorphicCheckedEntry");
3404 target::Instructions::kMonomorphicEntryOffsetJIT);
3406 const intptr_t cid_offset = target::Array::element_offset(0);
3407 const intptr_t count_offset = target::Array::element_offset(1);
3410 ldr(R1, FieldAddress(R9, cid_offset));
3411 ldr(R2, FieldAddress(R9, count_offset));
3412 LoadClassIdMayBeSmi(IP, R0);
3413 add(R2, R2, Operand(target::ToRawSmi(1)));
3414 cmp(R1, Operand(IP, LSL, 1));
3415 Branch(Address(THR, target::Thread::switchable_call_miss_entry_offset()), NE);
3416 str(R2, FieldAddress(R9, count_offset));
3417 LoadImmediate(R4, 0);
3421 target::Instructions::kPolymorphicEntryOffsetJIT);
3423#if defined(TESTING) || defined(DEBUG)
3424 set_use_far_branches(saved_use_far_branches);
3430void Assembler::MonomorphicCheckedEntryAOT() {
3431 has_monomorphic_entry_ =
true;
3432#if defined(TESTING) || defined(DEBUG)
3433 bool saved_use_far_branches = use_far_branches();
3434 set_use_far_branches(
false);
3436 intptr_t
start = CodeSize();
3438 Comment(
"MonomorphicCheckedEntry");
3440 target::Instructions::kMonomorphicEntryOffsetAOT);
3442 LoadClassId(IP, R0);
3443 cmp(R9, Operand(IP, LSL, 1));
3444 Branch(Address(THR, target::Thread::switchable_call_miss_entry_offset()), NE);
3448 target::Instructions::kPolymorphicEntryOffsetAOT);
3450#if defined(TESTING) || defined(DEBUG)
3451 set_use_far_branches(saved_use_far_branches);
3455void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
3456 has_monomorphic_entry_ =
true;
3457 while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
3461 while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
3466void Assembler::CombineHashes(Register
hash, Register other) {
3475void Assembler::FinalizeHashForSize(intptr_t bit_size,
3481 ASSERT(bit_size <= kBitsPerInt32);
3488 if (bit_size < kBitsPerInt32) {
3490 AndImmediateSetFlags(
hash,
hash, Utils::NBitMask(bit_size), NOT_ZERO);
3493 LoadImmediate(
hash, 1, ZERO);
3497void Assembler::MaybeTraceAllocation(Register stats_addr_reg, Label* trace) {
3498 ASSERT(stats_addr_reg != kNoRegister);
3499 ASSERT(stats_addr_reg != TMP);
3500 ldrb(TMP, Address(stats_addr_reg, 0));
3501 cmp(TMP, Operand(0));
3505void Assembler::MaybeTraceAllocation(intptr_t cid,
3508 JumpDistance distance) {
3509 LoadAllocationTracingStateAddress(temp_reg, cid);
3510 MaybeTraceAllocation(temp_reg, trace);
3513void Assembler::MaybeTraceAllocation(Register cid,
3516 JumpDistance distance) {
3517 LoadAllocationTracingStateAddress(temp_reg, cid);
3518 MaybeTraceAllocation(temp_reg, trace);
3521void Assembler::LoadAllocationTracingStateAddress(Register dest, Register cid) {
3522 ASSERT(dest != kNoRegister);
3525 LoadIsolateGroup(dest);
3526 ldr(dest, Address(dest, target::IsolateGroup::class_table_offset()));
3529 target::ClassTable::allocation_tracing_state_table_offset()));
3530 AddScaled(cid, cid, TIMES_1,
3531 target::ClassTable::AllocationTracingStateSlotOffsetFor(0));
3532 AddRegisters(dest, cid);
3535void Assembler::LoadAllocationTracingStateAddress(Register dest, intptr_t cid) {
3536 ASSERT(dest != kNoRegister);
3540 LoadIsolateGroup(dest);
3541 ldr(dest, Address(dest, target::IsolateGroup::class_table_offset()));
3544 target::ClassTable::allocation_tracing_state_table_offset()));
3546 target::ClassTable::AllocationTracingStateSlotOffsetFor(cid));
3550void Assembler::TryAllocateObject(intptr_t cid,
3551 intptr_t instance_size,
3553 JumpDistance distance,
3554 Register instance_reg,
3555 Register temp_reg) {
3556 ASSERT(failure !=
nullptr);
3557 ASSERT(instance_reg != kNoRegister);
3558 ASSERT(instance_reg != temp_reg);
3559 ASSERT(instance_reg != IP);
3560 ASSERT(temp_reg != kNoRegister);
3562 ASSERT(instance_size != 0);
3563 ASSERT(Utils::IsAligned(instance_size,
3564 target::ObjectAlignment::kObjectAlignment));
3565 if (FLAG_inline_alloc &&
3566 target::Heap::IsAllocatableInNewSpace(instance_size)) {
3567 ldr(instance_reg, Address(THR, target::Thread::top_offset()));
3569 AddImmediate(instance_reg, instance_size);
3571 ldr(IP, Address(THR, target::Thread::end_offset()));
3572 cmp(IP, Operand(instance_reg));
3575 CheckAllocationCanary(instance_reg, temp_reg);
3580 NOT_IN_PRODUCT(LoadAllocationTracingStateAddress(temp_reg, cid));
3585 str(instance_reg, Address(THR, target::Thread::top_offset()));
3587 AddImmediate(instance_reg, -instance_size + kHeapObjectTag);
3589 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
3590 LoadImmediate(temp_reg, tags);
3591 str(temp_reg, FieldAddress(instance_reg, target::Object::tags_offset()));
3597void Assembler::TryAllocateArray(intptr_t cid,
3598 intptr_t instance_size,
3601 Register end_address,
3604 if (FLAG_inline_alloc &&
3605 target::Heap::IsAllocatableInNewSpace(instance_size)) {
3608 ldr(
instance, Address(THR, target::Thread::top_offset()));
3609 AddImmediateSetFlags(end_address,
instance, instance_size);
3615 ldr(temp2, Address(THR, target::Thread::end_offset()));
3616 cmp(end_address, Operand(temp2));
3618 CheckAllocationCanary(
instance, temp2);
3627 str(end_address, Address(THR, target::Thread::top_offset()));
3632 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
3633 LoadImmediate(temp2, tags);
3635 FieldAddress(
instance, target::Object::tags_offset()));
3641void Assembler::CopyMemoryWords(Register src,
3646 __ cmp(size, Operand(0));
3649 __ ldr(temp, Address(src, target::kWordSize, Address::PostIndex));
3650 __ str(temp, Address(dst, target::kWordSize, Address::PostIndex));
3651 __ subs(size, size, Operand(target::kWordSize));
3652 __ b(&loop, NOT_ZERO);
3656void Assembler::GenerateUnRelocatedPcRelativeCall(Condition cond,
3657 intptr_t offset_into_target) {
3659 EmitType5(cond, 0x686868,
true);
3661 PcRelativeCallPattern pattern(buffer_.contents() + buffer_.Size() -
3662 PcRelativeCallPattern::kLengthInBytes);
3663 pattern.set_distance(offset_into_target);
3666void Assembler::GenerateUnRelocatedPcRelativeTailCall(
3668 intptr_t offset_into_target) {
3670 EmitType5(cond, 0x686868,
false);
3672 PcRelativeTailCallPattern pattern(buffer_.contents() + buffer_.Size() -
3673 PcRelativeTailCallPattern::kLengthInBytes);
3674 pattern.set_distance(offset_into_target);
3677bool Assembler::AddressCanHoldConstantIndex(
const Object& constant,
3681 intptr_t index_scale,
3683 ASSERT(needs_base !=
nullptr);
3684 auto const rep = RepresentationUtils::RepresentationOfArrayElement(cid);
3685 if ((rep == kUnboxedInt32x4) || (rep == kUnboxedFloat32x4) ||
3686 (rep == kUnboxedFloat64x2)) {
3691 if (!IsSafeSmi(constant))
return false;
3692 const int64_t index = target::SmiValue(constant);
3693 const intptr_t offset_base =
3696 const int64_t
offset = index * index_scale + offset_base;
3698 if (Address::CanHoldImmediateOffset(is_load, cid,
offset)) {
3699 *needs_base =
false;
3702 if (Address::CanHoldImmediateOffset(is_load, cid,
offset - offset_base)) {
3710Address Assembler::ElementAddressForIntIndex(
bool is_load,
3713 intptr_t index_scale,
3717 const int64_t offset_base =
3721 offset_base +
static_cast<int64_t
>(index) * index_scale;
3724 if (Address::CanHoldImmediateOffset(is_load, cid,
offset)) {
3725 return Address(array,
static_cast<int32_t
>(
offset));
3727 ASSERT(Address::CanHoldImmediateOffset(is_load, cid,
offset - offset_base));
3728 AddImmediate(temp, array,
static_cast<int32_t
>(offset_base));
3729 return Address(temp,
static_cast<int32_t
>(
offset - offset_base));
3733void Assembler::LoadElementAddressForIntIndex(Register address,
3737 intptr_t index_scale,
3740 const int64_t offset_base =
3744 offset_base +
static_cast<int64_t
>(index) * index_scale;
3746 AddImmediate(address, array,
offset);
3749Address Assembler::ElementAddressForRegIndex(
bool is_load,
3752 intptr_t index_scale,
3757 const intptr_t boxing_shift = index_unboxed ? 0 : -
kSmiTagShift;
3758 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
3760 is_external ? 0 : (target::Instance::DataOffsetFor(cid) -
kHeapObjectTag);
3765 if ((
offset != 0) || (is_load && (size == kByte || size == kUnsignedByte)) ||
3766 (size == kTwoBytes) || (size == kUnsignedTwoBytes) || (size == kSWord) ||
3767 (size == kDWord) || (size == kRegList)) {
3770 add(
base, array, Operand(index, ASR, 1));
3772 add(
base, array, Operand(index, LSL, shift));
3777 return Address(array, index, ASR, 1);
3779 return Address(array, index, LSL, shift);
3782 int32_t offset_mask = 0;
3783 if ((is_load && !Address::CanHoldLoadOffset(size,
offset, &offset_mask)) ||
3784 (!is_load && !Address::CanHoldStoreOffset(size,
offset, &offset_mask))) {
3791void Assembler::LoadElementAddressForRegIndex(Register address,
3795 intptr_t index_scale,
3800 const intptr_t boxing_shift = index_unboxed ? 0 : -
kSmiTagShift;
3801 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
3803 is_external ? 0 : (target::Instance::DataOffsetFor(cid) -
kHeapObjectTag);
3806 add(address, array, Operand(index, ASR, 1));
3808 add(address, array, Operand(index, LSL, shift));
3811 AddImmediate(address,
offset);
3815void Assembler::LoadStaticFieldAddress(Register address,
3818 LoadFieldFromOffset(scratch, field,
3819 target::Field::host_offset_or_field_id_offset());
3820 const intptr_t field_table_offset =
3821 compiler::target::Thread::field_table_values_offset();
3822 LoadMemoryValue(address, THR,
static_cast<int32_t
>(field_table_offset));
3823 add(address, address,
3824 Operand(scratch, LSL, target::kWordSizeLog2 - kSmiTagShift));
3827void Assembler::LoadFieldAddressForRegOffset(Register address,
3829 Register offset_in_words_as_smi) {
3831 Operand(offset_in_words_as_smi, LSL,
3832 target::kWordSizeLog2 - kSmiTagShift));
3833 AddImmediate(address, -kHeapObjectTag);
3836void Assembler::LoadHalfWordUnaligned(Register dst,
3840 ldrb(dst, Address(addr, 0));
3841 ldrsb(tmp, Address(addr, 1));
3842 orr(dst, dst, Operand(tmp, LSL, 8));
3845void Assembler::LoadHalfWordUnsignedUnaligned(Register dst,
3849 ldrb(dst, Address(addr, 0));
3850 ldrb(tmp, Address(addr, 1));
3851 orr(dst, dst, Operand(tmp, LSL, 8));
3854void Assembler::StoreHalfWordUnaligned(Register src,
3857 strb(src, Address(addr, 0));
3858 Lsr(tmp, src, Operand(8));
3859 strb(tmp, Address(addr, 1));
3862void Assembler::LoadWordUnaligned(Register dst, Register addr, Register tmp) {
3864 ldrb(dst, Address(addr, 0));
3865 ldrb(tmp, Address(addr, 1));
3866 orr(dst, dst, Operand(tmp, LSL, 8));
3867 ldrb(tmp, Address(addr, 2));
3868 orr(dst, dst, Operand(tmp, LSL, 16));
3869 ldrb(tmp, Address(addr, 3));
3870 orr(dst, dst, Operand(tmp, LSL, 24));
3873void Assembler::StoreWordUnaligned(Register src, Register addr, Register tmp) {
3874 strb(src, Address(addr, 0));
3875 Lsr(tmp, src, Operand(8));
3876 strb(tmp, Address(addr, 1));
3877 Lsr(tmp, src, Operand(16));
3878 strb(tmp, Address(addr, 2));
3879 Lsr(tmp, src, Operand(24));
3880 strb(tmp, Address(addr, 3));
3883void Assembler::RangeCheck(Register value,
3887 RangeCheckCondition condition,
3889 auto cc = condition == kIfInRange ?
LS :
HI;
3891 AddImmediate(to_check, value, -low);
3892 CompareImmediate(to_check, high - low);
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
static float next(float f)
static void B2(DFData *curr, int width)
static void B1(DFData *curr, int width)
static bool ok(int result)
static uint32_t hash(const SkShaderBase::GradientInfo &v)
static bool equals(T *a, T *b)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
#define DEBUG_ASSERT(cond)
#define ASSERT_EQUAL(expected, actual)
#define RELEASE_ASSERT(cond)
#define COMPILE_ASSERT(expr)
Assembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level=0)
static const char * begin(const StringSlice &s)
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
#define DECLARE_FLAG(type, name)
ClipOpAndAA opAA SkRegion region
void BailoutWithBranchOffsetError()
bool IsOriginalObject(const Object &object)
bool IsInOldSpace(const Object &obj)
const Object & ToObject(const Code &handle)
const int kDartVolatileCpuRegCount
const Register kWriteBarrierValueReg
const int kNumberOfFpuRegisters
intx_t sign_extend(int32_t x)
@ kBitFieldExtractLSBShift
@ kBitFieldExtractRnShift
const int kFpuRegisterSize
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
#define NOT_IN_PRODUCT(code)