7#if defined(TARGET_ARCH_X64)
9#define SHOULD_NOT_INCLUDE_RUNTIME
25 intptr_t far_branch_level)
26 : AssemblerBase(object_pool_builder), constant_pool_allowed_(
false) {
28 ASSERT(far_branch_level == 0);
30 generate_invoke_write_barrier_wrapper_ = [&](
Register reg) {
32 target::Thread::write_barrier_wrappers_thread_offset(reg)));
34 generate_invoke_array_write_barrier_ = [&]() {
36 Address(THR, target::Thread::array_write_barrier_entry_point_offset()));
40void Assembler::call(Label* label) {
41 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
44 EmitLabel(label,
kSize);
47void Assembler::LoadNativeEntry(
49 const ExternalLabel* label,
50 ObjectPoolBuilderEntry::Patchability patchable) {
51 const intptr_t index =
52 object_pool_builder().FindNativeFunction(label, patchable);
53 LoadWordFromPoolIndex(dst, index);
56void Assembler::call(
const ExternalLabel* label) {
58 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
59 EmitRegisterREX(TMP, REX_W);
60 EmitUint8(0xB8 | (TMP & 7));
61 EmitInt64(label->address());
66void Assembler::CallCodeThroughPool(intptr_t target_code_pool_index,
67 CodeEntryKind entry_kind) {
74 LoadWordFromPoolIndex(code_reg, target_code_pool_index);
75 call(FieldAddress(code_reg, target::Code::entry_point_offset(entry_kind)));
78void Assembler::CallPatchable(
80 CodeEntryKind entry_kind,
81 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
82 ASSERT(constant_pool_allowed());
83 const intptr_t idx = object_pool_builder().AddObject(
84 ToObject(
target), ObjectPoolBuilderEntry::kPatchable, snapshot_behavior);
85 CallCodeThroughPool(idx, entry_kind);
88void Assembler::CallWithEquivalence(
const Code&
target,
89 const Object& equivalence,
90 CodeEntryKind entry_kind) {
91 ASSERT(constant_pool_allowed());
94 CallCodeThroughPool(idx, entry_kind);
99 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
100 ASSERT(constant_pool_allowed());
101 const intptr_t idx = object_pool_builder().FindObject(
104 CallCodeThroughPool(idx, CodeEntryKind::kNormal);
107void Assembler::pushq(Register reg) {
108 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
109 EmitRegisterREX(reg, REX_NONE);
110 EmitUint8(0x50 | (reg & 7));
113void Assembler::pushq(
const Immediate& imm) {
115 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
117 EmitUint8(imm.value() & 0xFF);
118 }
else if (imm.is_int32()) {
119 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
128void Assembler::popq(Register reg) {
129 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
130 EmitRegisterREX(reg, REX_NONE);
131 EmitUint8(0x58 | (reg & 7));
134void Assembler::setcc(Condition condition, ByteRegister dst) {
135 ASSERT(dst != kNoByteRegister);
136 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
138 EmitUint8(REX_PREFIX | (((dst & 0x08) != 0) ? REX_B :
REX_NONE));
141 EmitUint8(0x90 + condition);
142 EmitUint8(0xC0 + (dst & 0x07));
145void Assembler::EnterFullSafepoint() {
150 Label
done, slow_path;
158 movq(RAX, Immediate(target::Thread::full_safepoint_state_unacquired()));
159 movq(TMP, Immediate(target::Thread::full_safepoint_state_acquired()));
160 LockCmpxchgq(Address(THR, target::Thread::safepoint_state_offset()), TMP);
163 cmpq(TMP, Immediate(target::Thread::full_safepoint_state_unacquired()));
170 movq(TMP, Address(THR, target::Thread::enter_safepoint_stub_offset()));
171 movq(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
181void Assembler::TransitionGeneratedToNative(Register destination_address,
182 Register new_exit_frame,
183 Register new_exit_through_ffi,
184 bool enter_safepoint) {
186 movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
189 movq(compiler::Address(THR,
190 compiler::target::Thread::exit_through_ffi_offset()),
191 new_exit_through_ffi);
193 movq(Assembler::VMTagAddress(), destination_address);
194 movq(Address(THR, target::Thread::execution_state_offset()),
195 Immediate(target::Thread::native_execution_state()));
197 if (enter_safepoint) {
198 EnterFullSafepoint();
202void Assembler::ExitFullSafepoint(
bool ignore_unwind_in_progress) {
207 Label
done, slow_path;
217 movq(RAX, Immediate(target::Thread::full_safepoint_state_acquired()));
218 movq(TMP, Immediate(target::Thread::full_safepoint_state_unacquired()));
219 LockCmpxchgq(Address(THR, target::Thread::safepoint_state_offset()), TMP);
222 cmpq(TMP, Immediate(target::Thread::full_safepoint_state_acquired()));
229 if (ignore_unwind_in_progress) {
233 exit_safepoint_ignore_unwind_in_progress_stub_offset()));
235 movq(TMP, Address(THR, target::Thread::exit_safepoint_stub_offset()));
237 movq(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
247void Assembler::TransitionNativeToGenerated(
bool leave_safepoint,
248 bool ignore_unwind_in_progress) {
249 if (leave_safepoint) {
250 ExitFullSafepoint(ignore_unwind_in_progress);
253 ASSERT(!ignore_unwind_in_progress);
256 movq(TMP, Address(THR, target::Thread::safepoint_state_offset()));
257 andq(TMP, Immediate(target::Thread::full_safepoint_state_acquired()));
265 movq(Assembler::VMTagAddress(), Immediate(target::Thread::vm_tag_dart_id()));
266 movq(Address(THR, target::Thread::execution_state_offset()),
267 Immediate(target::Thread::generated_execution_state()));
270 movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
272 movq(compiler::Address(THR,
273 compiler::target::Thread::exit_through_ffi_offset()),
274 compiler::Immediate(0));
277void Assembler::EmitQ(
int reg,
278 const Address& address,
283 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
287 EmitOperandREX(reg, address, REX_W);
292 EmitOperand(reg & 7, address);
295void Assembler::EmitL(
int reg,
296 const Address& address,
301 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
305 EmitOperandREX(reg, address, REX_NONE);
310 EmitOperand(reg & 7, address);
313void Assembler::EmitW(Register reg,
314 const Address& address,
318 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
322 EmitOperandSizeOverride();
323 EmitOperandREX(reg, address, REX_NONE);
328 EmitOperand(reg & 7, address);
331void Assembler::EmitB(
int reg,
const Address& address,
int opcode) {
332 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
333 EmitOperandREX(reg & ~0x10, address, reg >= 8 ? REX_PREFIX :
REX_NONE);
335 EmitOperand(reg & 7, address);
338void Assembler::movl(Register dst,
const Immediate& imm) {
339 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
340 Operand operand(dst);
341 EmitOperandREX(0, operand, REX_NONE);
343 EmitOperand(0, operand);
348void Assembler::movl(
const Address& dst,
const Immediate& imm) {
353void Assembler::movb(
const Address& dst,
const Immediate& imm) {
354 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
355 EmitOperandREX(0, dst, REX_NONE);
359 EmitUint8(imm.value() & 0xFF);
362void Assembler::movw(Register dst,
const Address& src) {
366 FATAL(
"Use movzxw or movsxw instead.");
369void Assembler::movw(
const Address& dst,
const Immediate& imm) {
370 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
371 EmitOperandSizeOverride();
372 EmitOperandREX(0, dst, REX_NONE);
375 EmitUint8(imm.value() & 0xFF);
376 EmitUint8((imm.value() >> 8) & 0xFF);
379void Assembler::movq(Register dst,
const Immediate& imm) {
380 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
381 if (imm.is_uint32()) {
384 EmitRegisterREX(dst, REX_NONE);
385 EmitUint8(0xB8 | (dst & 7));
386 EmitUInt32(imm.value());
387 }
else if (imm.is_int32()) {
389 Operand operand(dst);
390 EmitOperandREX(0, operand, REX_W);
392 EmitOperand(0, operand);
396 EmitRegisterREX(dst, REX_W);
397 EmitUint8(0xB8 | (dst & 7));
402void Assembler::movq(
const Address& dst,
const Immediate& imm) {
403 if (imm.is_int32()) {
404 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
405 EmitOperandREX(0, dst, REX_W);
415void Assembler::EmitSimple(
int opcode,
int opcode2,
int opcode3) {
416 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
426void Assembler::EmitQ(
int dst,
int src,
int opcode,
int prefix2,
int prefix1) {
429 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
433 EmitRegRegRex(dst, src, REX_W);
438 EmitRegisterOperand(dst & 7, src);
441void Assembler::EmitL(
int dst,
int src,
int opcode,
int prefix2,
int prefix1) {
444 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
448 EmitRegRegRex(dst, src);
453 EmitRegisterOperand(dst & 7, src);
456void Assembler::EmitW(Register dst,
463 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
467 EmitOperandSizeOverride();
468 EmitRegRegRex(dst, src);
473 EmitRegisterOperand(dst & 7, src);
476#define UNARY_XMM_WITH_CONSTANT(name, constant, op) \
477 void Assembler::name(XmmRegister dst, XmmRegister src) { \
478 movq(TMP, Address(THR, target::Thread::constant##_address_offset())); \
480 op(dst, Address(TMP, 0)); \
482 movups(dst, Address(TMP, 0)); \
491UNARY_XMM_WITH_CONSTANT(notps, float_not, xorps)
493UNARY_XMM_WITH_CONSTANT(negateps, float_negate, xorps)
495UNARY_XMM_WITH_CONSTANT(absps, float_absolute, andps)
497UNARY_XMM_WITH_CONSTANT(zerowps, float_zerow, andps)
499UNARY_XMM_WITH_CONSTANT(negatepd, double_negate, xorpd)
501UNARY_XMM_WITH_CONSTANT(abspd, double_abs, andpd)
503UNARY_XMM_WITH_CONSTANT(DoubleNegate, double_negate, xorpd)
505UNARY_XMM_WITH_CONSTANT(DoubleAbs, double_abs, andpd)
507#undef UNARY_XMM_WITH_CONSTANT
509void Assembler::CmpPS(XmmRegister dst, XmmRegister src,
int condition) {
510 EmitL(dst, src, 0xC2, 0x0F);
511 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
512 EmitUint8(condition);
515void Assembler::set1ps(XmmRegister dst, Register tmp1,
const Immediate& imm) {
521 shufps(dst, dst, Immediate(0x0));
524void Assembler::shufps(XmmRegister dst, XmmRegister src,
const Immediate& imm) {
525 EmitL(dst, src, 0xC6, 0x0F);
526 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
528 EmitUint8(imm.value());
531void Assembler::shufpd(XmmRegister dst, XmmRegister src,
const Immediate& imm) {
532 EmitL(dst, src, 0xC6, 0x0F, 0x66);
533 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
535 EmitUint8(imm.value());
538void Assembler::roundsd(XmmRegister dst, XmmRegister src, RoundingMode mode) {
541 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
543 EmitRegRegRex(dst, src);
547 EmitRegisterOperand(dst & 7, src);
549 EmitUint8(
static_cast<uint8_t
>(mode) | 0x8);
552void Assembler::fldl(
const Address& src) {
553 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
558void Assembler::fstpl(
const Address& dst) {
559 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
564void Assembler::ffree(intptr_t value) {
566 EmitSimple(0xDD, 0xC0 + value);
569void Assembler::CompareImmediate(Register reg,
570 const Immediate& imm,
572 if (
width == kEightBytes) {
573 if (imm.is_int32()) {
577 LoadImmediate(TMP, imm);
586void Assembler::CompareImmediate(
const Address& address,
587 const Immediate& imm,
589 if (
width == kEightBytes) {
590 if (imm.is_int32()) {
593 LoadImmediate(TMP, imm);
602void Assembler::testb(
const Address& address,
const Immediate& imm) {
603 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
604 EmitOperandREX(0, address, REX_NONE);
606 EmitOperand(0, address);
608 EmitUint8(imm.value() & 0xFF);
611void Assembler::testb(
const Address& address, Register reg) {
612 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
613 EmitOperandREX(reg, address, REX_NONE);
615 EmitOperand(reg & 7, address);
618void Assembler::testq(Register reg,
const Immediate& imm) {
619 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
620 if (imm.is_uint8()) {
625 EmitRegisterREX(reg, REX_NONE,
true);
631 EmitUint8(0xC0 + (reg & 7));
633 EmitUint8(imm.value() & 0xFF);
634 }
else if (imm.is_uint32()) {
638 EmitRegisterREX(reg, REX_NONE);
640 EmitUint8(0xC0 | (reg & 7));
642 EmitUInt32(imm.value());
646 EmitRegisterREX(reg, REX_W);
651 EmitUint8(0xC0 | (reg & 7));
657void Assembler::TestImmediate(Register dst,
658 const Immediate& imm,
660 if (
width == kEightBytes) {
661 if (imm.is_int32() || imm.is_uint32()) {
665 LoadImmediate(TMP, imm);
674void Assembler::AluL(uint8_t modrm_opcode, Register dst,
const Immediate& imm) {
675 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
676 EmitRegisterREX(dst, REX_NONE);
677 EmitComplex(modrm_opcode, Operand(dst), imm);
680void Assembler::AluB(uint8_t modrm_opcode,
682 const Immediate& imm) {
683 ASSERT(imm.is_uint8() || imm.is_int8());
684 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
685 EmitOperandREX(modrm_opcode, dst, REX_NONE);
687 EmitOperand(modrm_opcode, dst);
688 EmitUint8(imm.value() & 0xFF);
691void Assembler::AluW(uint8_t modrm_opcode,
693 const Immediate& imm) {
694 ASSERT(imm.is_int16() || imm.is_uint16());
695 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
696 EmitOperandSizeOverride();
697 EmitOperandREX(modrm_opcode, dst, REX_NONE);
699 EmitSignExtendedInt8(modrm_opcode, dst, imm);
702 EmitOperand(modrm_opcode, dst);
703 EmitUint8(imm.value() & 0xFF);
704 EmitUint8((imm.value() >> 8) & 0xFF);
708void Assembler::AluL(uint8_t modrm_opcode,
710 const Immediate& imm) {
712 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
713 EmitOperandREX(modrm_opcode, dst, REX_NONE);
714 EmitComplex(modrm_opcode, dst, imm);
717void Assembler::AluQ(uint8_t modrm_opcode,
720 const Immediate& imm) {
721 Operand operand(dst);
722 if (modrm_opcode == 4 && imm.is_uint32()) {
724 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
725 EmitRegisterREX(dst, REX_NONE);
729 EmitSignExtendedInt8(modrm_opcode, operand, imm);
735 EmitOperand(modrm_opcode, operand);
737 EmitUInt32(imm.value());
739 }
else if (imm.is_int32()) {
740 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
741 EmitRegisterREX(dst, REX_W);
742 EmitComplex(modrm_opcode, operand, imm);
746 EmitQ(dst, TMP, opcode);
750void Assembler::AluQ(uint8_t modrm_opcode,
753 const Immediate& imm) {
754 if (imm.is_int32()) {
755 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
756 EmitOperandREX(modrm_opcode, dst, REX_W);
757 EmitComplex(modrm_opcode, dst, imm);
760 EmitQ(TMP, dst, opcode);
764void Assembler::AndImmediate(Register dst,
const Immediate& imm) {
765 if (imm.is_int32() || imm.is_uint32()) {
769 LoadImmediate(TMP, imm);
774void Assembler::AndRegisters(Register dst, Register src1, Register src2) {
776 if (src2 == kNoRegister) {
781 }
else if (dst == src2) {
789void Assembler::LslRegister(Register dst, Register shift) {
793 shlq(dst == RCX ? TMP :
dst,
RCX);
800void Assembler::OrImmediate(Register dst,
const Immediate& imm) {
801 if (imm.is_int32()) {
805 LoadImmediate(TMP, imm);
810void Assembler::XorImmediate(Register dst,
const Immediate& imm) {
811 if (imm.is_int32()) {
815 LoadImmediate(TMP, imm);
820void Assembler::cqo() {
821 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
822 EmitRegisterREX(RAX, REX_W);
826void Assembler::EmitUnaryQ(Register reg,
int opcode,
int modrm_code) {
827 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
828 EmitRegisterREX(reg, REX_W);
830 EmitOperand(modrm_code, Operand(reg));
833void Assembler::EmitUnaryL(Register reg,
int opcode,
int modrm_code) {
834 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
835 EmitRegisterREX(reg, REX_NONE);
837 EmitOperand(modrm_code, Operand(reg));
840void Assembler::EmitUnaryQ(
const Address& address,
int opcode,
int modrm_code) {
841 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
842 Operand operand(address);
843 EmitOperandREX(modrm_code, operand, REX_W);
845 EmitOperand(modrm_code, operand);
848void Assembler::EmitUnaryL(
const Address& address,
int opcode,
int modrm_code) {
849 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
850 Operand operand(address);
851 EmitOperandREX(modrm_code, operand, REX_NONE);
853 EmitOperand(modrm_code, operand);
856void Assembler::imull(Register reg,
const Immediate& imm) {
857 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
858 Operand operand(reg);
859 EmitOperandREX(reg, operand, REX_NONE);
861 EmitOperand(reg & 7, Operand(reg));
865void Assembler::imulq(Register reg,
const Immediate& imm) {
866 if (imm.is_int32()) {
867 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
868 Operand operand(reg);
869 EmitOperandREX(reg, operand, REX_W);
871 EmitOperand(reg & 7, Operand(reg));
880void Assembler::MulImmediate(Register reg,
881 const Immediate& imm,
884 if (Utils::IsPowerOfTwo(imm.value())) {
885 const intptr_t shift = Utils::ShiftForPowerOfTwo(imm.value());
886 if (
width == kFourBytes) {
887 shll(reg, Immediate(shift));
889 shlq(reg, Immediate(shift));
891 }
else if (imm.is_int32()) {
892 if (
width == kFourBytes) {
905void Assembler::shll(Register reg,
const Immediate& imm) {
906 EmitGenericShift(
false, 4, reg, imm);
909void Assembler::shll(Register operand, Register shifter) {
910 EmitGenericShift(
false, 4, operand, shifter);
913void Assembler::shrl(Register reg,
const Immediate& imm) {
914 EmitGenericShift(
false, 5, reg, imm);
917void Assembler::shrl(Register operand, Register shifter) {
918 EmitGenericShift(
false, 5, operand, shifter);
921void Assembler::sarl(Register reg,
const Immediate& imm) {
922 EmitGenericShift(
false, 7, reg, imm);
925void Assembler::sarl(Register operand, Register shifter) {
926 EmitGenericShift(
false, 7, operand, shifter);
929void Assembler::shldl(Register dst, Register src,
const Immediate& imm) {
930 EmitL(src, dst, 0xA4, 0x0F);
931 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
933 EmitUint8(imm.value() & 0xFF);
936void Assembler::shlq(Register reg,
const Immediate& imm) {
937 EmitGenericShift(
true, 4, reg, imm);
940void Assembler::shlq(Register operand, Register shifter) {
941 EmitGenericShift(
true, 4, operand, shifter);
944void Assembler::shrq(Register reg,
const Immediate& imm) {
945 EmitGenericShift(
true, 5, reg, imm);
948void Assembler::shrq(Register operand, Register shifter) {
949 EmitGenericShift(
true, 5, operand, shifter);
952void Assembler::sarq(Register reg,
const Immediate& imm) {
953 EmitGenericShift(
true, 7, reg, imm);
956void Assembler::sarq(Register operand, Register shifter) {
957 EmitGenericShift(
true, 7, operand, shifter);
960void Assembler::shldq(Register dst, Register src,
const Immediate& imm) {
961 EmitQ(src, dst, 0xA4, 0x0F);
962 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
964 EmitUint8(imm.value() & 0xFF);
967void Assembler::btq(Register
base,
int bit) {
968 ASSERT(bit >= 0 && bit < 64);
969 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
970 Operand operand(
base);
971 EmitOperandREX(4, operand, bit >= 32 ? REX_W :
REX_NONE);
974 EmitOperand(4, operand);
978void Assembler::enter(
const Immediate& imm) {
979 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
982 EmitUint8(imm.value() & 0xFF);
983 EmitUint8((imm.value() >> 8) & 0xFF);
987void Assembler::nop(
int size) {
988 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
990 ASSERT(0 < size && size <= MAX_NOP_SIZE);
1049void Assembler::j(Condition condition, Label* label, JumpDistance distance) {
1050 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
1051 if (label->IsBound()) {
1052 const int kShortSize = 2;
1053 const int kLongSize = 6;
1054 intptr_t
offset = label->Position() - buffer_.Size();
1056 if (Utils::IsInt(8,
offset - kShortSize)) {
1057 EmitUint8(0x70 + condition);
1058 EmitUint8((
offset - kShortSize) & 0xFF);
1061 EmitUint8(0x80 + condition);
1062 EmitInt32(
offset - kLongSize);
1064 }
else if (distance == kNearJump) {
1065 EmitUint8(0x70 + condition);
1066 EmitNearLabelLink(label);
1069 EmitUint8(0x80 + condition);
1070 EmitLabelLink(label);
1074void Assembler::J(Condition condition,
const Code&
target, Register pp) {
1077 j(
static_cast<Condition>(condition ^ 1), &no_jump, kNearJump);
1082void Assembler::jmp(Label* label, JumpDistance distance) {
1083 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
1084 if (label->IsBound()) {
1085 const int kShortSize = 2;
1086 const int kLongSize = 5;
1087 intptr_t
offset = label->Position() - buffer_.Size();
1089 if (Utils::IsInt(8,
offset - kShortSize)) {
1091 EmitUint8((
offset - kShortSize) & 0xFF);
1094 EmitInt32(
offset - kLongSize);
1096 }
else if (distance == kNearJump) {
1098 EmitNearLabelLink(label);
1101 EmitLabelLink(label);
1105void Assembler::jmp(
const ExternalLabel* label) {
1107 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
1108 EmitRegisterREX(TMP, REX_W);
1109 EmitUint8(0xB8 | (TMP & 7));
1110 EmitInt64(label->address());
1115void Assembler::JmpPatchable(
const Code&
target, Register pp) {
1116 ASSERT((pp != PP) || constant_pool_allowed());
1117 const intptr_t idx = object_pool_builder().AddObject(
1119 const int32_t
offset = target::ObjectPool::element_offset(idx);
1120 movq(CODE_REG, Address(pp,
offset - kHeapObjectTag));
1121 movq(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
1125void Assembler::Jmp(
const Code&
target, Register pp) {
1126 ASSERT((pp != PP) || constant_pool_allowed());
1127 const intptr_t idx = object_pool_builder().FindObject(
1129 const int32_t
offset = target::ObjectPool::element_offset(idx);
1130 movq(CODE_REG, FieldAddress(pp,
offset));
1131 jmp(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
1134void Assembler::CompareRegisters(Register
a, Register
b) {
1138void Assembler::LoadFromStack(Register dst, intptr_t depth) {
1140 movq(dst, Address(SPREG, depth * target::kWordSize));
1143void Assembler::StoreToStack(Register src, intptr_t depth) {
1145 movq(Address(SPREG, depth * target::kWordSize), src);
1148void Assembler::CompareToStack(Register src, intptr_t depth) {
1150 cmpq(src, Address(SPREG, depth * target::kWordSize));
1153void Assembler::ExtendValue(Register to, Register from, OperandSize sz) {
1156 if (to == from)
return;
1157 return movq(to, from);
1159 return movl(to, from);
1161 return movsxd(to, from);
1163 return movzxw(to, from);
1165 return movsxw(to, from);
1167 return movzxb(to, from);
1169 return movsxb(to, from);
1176void Assembler::PushRegister(Register r) {
1180void Assembler::PopRegister(Register r) {
1184void Assembler::AddImmediate(Register reg,
1185 const Immediate& imm,
1186 OperandSize
width) {
1188 const int64_t
value = imm.value();
1192 if ((value > 0) || (value == kMinInt64)) {
1194 if (
width == kFourBytes) {
1200 if (imm.is_int32() || (
width == kFourBytes && imm.is_uint32())) {
1201 if (
width == kFourBytes) {
1209 LoadImmediate(TMP, imm);
1214 SubImmediate(reg, Immediate(-value),
width);
1218void Assembler::AddImmediate(Register dest, Register src, int64_t value) {
1220 AddImmediate(dest, value);
1224 MoveRegister(dest, src);
1227 if (Utils::IsInt(32, value)) {
1228 leaq(dest, Address(src, value));
1231 LoadImmediate(dest, value);
1235void Assembler::AddImmediate(
const Address& address,
const Immediate& imm) {
1236 const int64_t
value = imm.value();
1240 if ((value > 0) || (value == kMinInt64)) {
1244 if (imm.is_int32()) {
1247 LoadImmediate(TMP, imm);
1252 SubImmediate(address, Immediate(-value));
1256void Assembler::SubImmediate(Register reg,
1257 const Immediate& imm,
1258 OperandSize
width) {
1260 const int64_t
value = imm.value();
1264 if ((value > 0) || (value == kMinInt64) ||
1265 (value == kMinInt32 &&
width == kFourBytes)) {
1267 if (
width == kFourBytes) {
1273 if (imm.is_int32()) {
1274 if (
width == kFourBytes) {
1282 LoadImmediate(TMP, imm);
1287 AddImmediate(reg, Immediate(-value),
width);
1291void Assembler::SubImmediate(
const Address& address,
const Immediate& imm) {
1292 const int64_t
value = imm.value();
1296 if ((value > 0) || (value == kMinInt64)) {
1300 if (imm.is_int32()) {
1303 LoadImmediate(TMP, imm);
1308 AddImmediate(address, Immediate(-value));
1312void Assembler::Drop(intptr_t stack_elements, Register tmp) {
1313 ASSERT(stack_elements >= 0);
1314 if (stack_elements <= 4) {
1315 for (intptr_t i = 0; i < stack_elements; i++) {
1320 addq(RSP, Immediate(stack_elements * target::kWordSize));
1323bool Assembler::CanLoadFromObjectPool(
const Object&
object)
const {
1325 if (!constant_pool_allowed()) {
1334void Assembler::LoadWordFromPoolIndex(Register dst, intptr_t idx) {
1335 ASSERT(constant_pool_allowed());
1338 movq(dst, FieldAddress(PP, target::ObjectPool::element_offset(idx)));
1341void Assembler::StoreWordToPoolIndex(Register src, intptr_t idx) {
1342 ASSERT(constant_pool_allowed());
1345 movq(FieldAddress(PP, target::ObjectPool::element_offset(idx)), src);
1348void Assembler::LoadInt64FromBoxOrSmi(Register
result, Register value) {
1349 compiler::Label
done;
1350#if !defined(DART_COMPRESSED_POINTERS)
1353 j(NOT_CARRY, &
done, compiler::Assembler::kNearJump);
1357 target::Mint::value_offset()));
1361 MoveRegister(TMP, value);
1367 SmiUntagAndSignExtend(
result, value);
1368 j(NOT_CARRY, &
done, compiler::Assembler::kNearJump);
1369 movq(
result, compiler::FieldAddress(value, target::Mint::value_offset()));
1374void Assembler::LoadInt32FromBoxOrSmi(Register
result, Register value) {
1375 compiler::Label
done;
1376#if !defined(DART_COMPRESSED_POINTERS)
1379 j(NOT_CARRY, &
done, compiler::Assembler::kNearJump);
1383 compiler::target::Mint::value_offset()));
1387 MoveRegister(TMP, value);
1393 SmiUntagAndSignExtend(
result, value);
1394 j(NOT_CARRY, &
done, compiler::Assembler::kNearJump);
1396 compiler::FieldAddress(value, compiler::target::Mint::value_offset()));
1401void Assembler::LoadIsolate(Register dst) {
1402 movq(dst, Address(THR, target::Thread::isolate_offset()));
1405void Assembler::LoadIsolateGroup(Register dst) {
1406 movq(dst, Address(THR, target::Thread::isolate_group_offset()));
1409void Assembler::LoadDispatchTable(Register dst) {
1410 movq(dst, Address(THR, target::Thread::dispatch_table_array_offset()));
1413void Assembler::LoadObjectHelper(
1415 const Object&
object,
1417 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
1423 if (target::CanLoadFromThread(
object, &
offset)) {
1424 movq(dst, Address(THR,
offset));
1427 if (target::IsSmi(
object)) {
1428 LoadImmediate(dst, Immediate(target::ToRawSmi(
object)));
1433 const intptr_t index =
1435 ? object_pool_builder().AddObject(
1436 object, ObjectPoolBuilderEntry::kPatchable, snapshot_behavior)
1437 : object_pool_builder().FindObject(
1438 object, ObjectPoolBuilderEntry::kNotPatchable,
1440 LoadWordFromPoolIndex(dst, index);
1443void Assembler::LoadObject(Register dst,
const Object&
object) {
1444 LoadObjectHelper(dst,
object,
false);
1447void Assembler::LoadUniqueObject(
1449 const Object&
object,
1450 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
1451 LoadObjectHelper(dst,
object,
true, snapshot_behavior);
1454void Assembler::StoreObject(
const Address& dst,
1455 const Object&
object,
1458 ASSERT(size == kWordBytes || size == kObjectBytes);
1460 intptr_t offset_from_thread;
1461 if (target::CanLoadFromThread(
object, &offset_from_thread)) {
1462 movq(TMP, Address(THR, offset_from_thread));
1463 Store(TMP, dst, size);
1464 }
else if (target::IsSmi(
object)) {
1465 MoveImmediate(dst, Immediate(target::ToRawSmi(
object)), size);
1467 LoadObject(TMP,
object);
1468 Store(TMP, dst, size);
1472void Assembler::PushObject(
const Object&
object) {
1475 intptr_t offset_from_thread;
1476 if (target::CanLoadFromThread(
object, &offset_from_thread)) {
1477 pushq(Address(THR, offset_from_thread));
1478 }
else if (target::IsSmi(
object)) {
1479 PushImmediate(Immediate(target::ToRawSmi(
object)));
1481 LoadObject(TMP,
object);
1486void Assembler::CompareObject(Register reg,
const Object&
object) {
1489 intptr_t offset_from_thread;
1490 if (target::CanLoadFromThread(
object, &offset_from_thread)) {
1491 OBJ(cmp)(reg, Address(THR, offset_from_thread));
1492 }
else if (target::IsSmi(
object)) {
1493 CompareImmediate(reg, Immediate(target::ToRawSmi(
object)), kObjectBytes);
1496 const intptr_t idx = object_pool_builder().FindObject(
1497 object, ObjectPoolBuilderEntry::kNotPatchable);
1498 const int32_t
offset = target::ObjectPool::element_offset(idx);
1499 OBJ(cmp)(reg, Address(PP,
offset - kHeapObjectTag));
1503void Assembler::LoadImmediate(Register reg,
const Immediate& imm) {
1504 if (imm.value() == 0) {
1506 }
else if (imm.is_int32() || !constant_pool_allowed()) {
1509 const intptr_t idx = object_pool_builder().FindImmediate(imm.value());
1510 LoadWordFromPoolIndex(reg, idx);
1514void Assembler::MoveImmediate(
const Address& dst,
1515 const Immediate& imm,
1517 if (imm.is_int32()) {
1518 if (size == kFourBytes) {
1521 ASSERT(size == kEightBytes);
1525 LoadImmediate(TMP, imm);
1526 if (size == kFourBytes) {
1529 ASSERT(size == kEightBytes);
1535void Assembler::LoadSImmediate(FpuRegister dst,
float immediate) {
1536 int32_t
bits = bit_cast<int32_t>(immediate);
1540 intptr_t index = object_pool_builder().FindImmediate(bits);
1542 dst, PP, target::ObjectPool::element_offset(index) - kHeapObjectTag);
1546void Assembler::LoadDImmediate(FpuRegister dst,
double immediate) {
1547 int64_t
bits = bit_cast<int64_t>(immediate);
1551 intptr_t index = object_pool_builder().FindImmediate64(bits);
1553 dst, PP, target::ObjectPool::element_offset(index) - kHeapObjectTag);
1557void Assembler::LoadQImmediate(FpuRegister dst, simd128_value_t immediate) {
1558 intptr_t index = object_pool_builder().FindImmediate128(immediate);
1559 movups(dst, Address(PP, target::ObjectPool::element_offset(index) -
1563#if defined(DART_COMPRESSED_POINTERS)
1564void Assembler::LoadCompressed(Register dest,
const Address& slot) {
1566 addq(dest, Address(THR, target::Thread::heap_base_offset()));
1570void Assembler::StoreBarrier(Register
object,
1572 CanBeSmi can_be_smi,
1576 ASSERT(
object != scratch);
1577 ASSERT(value != scratch);
1578 ASSERT(scratch != kNoRegister);
1588 if (can_be_smi == kValueCanBeSmi) {
1589 BranchIfSmi(value, &
done, kNearJump);
1593 BranchIfNotSmi(value, &passed_check, kNearJump);
1595 Bind(&passed_check);
1599 FieldAddress(
object, target::Object::tags_offset()));
1600 shrl(scratch, Immediate(target::UntaggedObject::kBarrierOverlapShift));
1601 andl(scratch, Address(THR, target::Thread::write_barrier_mask_offset()));
1602 testb(FieldAddress(value, target::Object::tags_offset()), scratch);
1603 j(ZERO, &
done, kNearJump);
1606 if (value != kWriteBarrierValueReg) {
1609 pushq(kWriteBarrierValueReg);
1610 if (
object == kWriteBarrierValueReg) {
1614 pushq(object_for_call);
1615 movq(object_for_call,
object);
1617 movq(kWriteBarrierValueReg, value);
1619 generate_invoke_write_barrier_wrapper_(object_for_call);
1620 if (value != kWriteBarrierValueReg) {
1621 if (
object == kWriteBarrierValueReg) {
1622 popq(object_for_call);
1624 popq(kWriteBarrierValueReg);
1629void Assembler::ArrayStoreBarrier(Register
object,
1632 CanBeSmi can_be_smi,
1634 ASSERT(
object != scratch);
1635 ASSERT(value != scratch);
1637 ASSERT(scratch != kNoRegister);
1647 if (can_be_smi == kValueCanBeSmi) {
1648 BranchIfSmi(value, &
done, kNearJump);
1652 BranchIfNotSmi(value, &passed_check, kNearJump);
1654 Bind(&passed_check);
1658 FieldAddress(
object, target::Object::tags_offset()));
1659 shrl(scratch, Immediate(target::UntaggedObject::kBarrierOverlapShift));
1660 andl(scratch, Address(THR, target::Thread::write_barrier_mask_offset()));
1661 testb(FieldAddress(value, target::Object::tags_offset()), scratch);
1662 j(ZERO, &
done, kNearJump);
1664 if ((
object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
1665 (slot != kWriteBarrierSlotReg)) {
1672 generate_invoke_array_write_barrier_();
1677void Assembler::VerifyStoreNeedsNoWriteBarrier(Register
object,
1685 BranchIfSmi(value, &
done, kNearJump);
1686 testb(FieldAddress(value, target::Object::tags_offset()),
1687 Immediate(1 << target::UntaggedObject::kNewBit));
1688 j(ZERO, &
done, Assembler::kNearJump);
1689 testb(FieldAddress(
object, target::Object::tags_offset()),
1690 Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
1691 j(ZERO, &
done, Assembler::kNearJump);
1692 Stop(
"Write barrier is required");
1696void Assembler::StoreObjectIntoObjectNoBarrier(Register
object,
1697 const Address& dest,
1698 const Object& value,
1699 MemoryOrder memory_order,
1701 if (memory_order == kRelease) {
1702 LoadObject(TMP, value);
1703 StoreIntoObjectNoBarrier(
object, dest, TMP, memory_order, size);
1705 StoreObject(dest, value, size);
1709void Assembler::StoreInternalPointer(Register
object,
1710 const Address& dest,
1715void Assembler::StoreIntoSmiField(
const Address& dest, Register value) {
1718 testq(value, Immediate(kHeapObjectTag));
1720 Stop(
"New value must be Smi.");
1726void Assembler::ZeroInitSmiField(
const Address& dest) {
1727 Immediate zero(target::ToRawSmi(0));
1731void Assembler::ZeroInitCompressedSmiField(
const Address& dest) {
1732 Immediate zero(target::ToRawSmi(0));
1736void Assembler::IncrementCompressedSmiField(
const Address& dest,
1737 int64_t increment) {
1740 Immediate inc_imm(target::ToRawSmi(increment));
1744void Assembler::Bind(Label* label) {
1745 intptr_t bound = buffer_.Size();
1746 ASSERT(!label->IsBound());
1747 while (label->IsLinked()) {
1748 intptr_t position = label->LinkPosition();
1749 intptr_t
next = buffer_.Load<int32_t>(position);
1750 buffer_.Store<int32_t>(position, bound - (position + 4));
1751 label->position_ =
next;
1753 while (label->HasNear()) {
1754 intptr_t position = label->NearPosition();
1755 intptr_t
offset = bound - (position + 1);
1757 buffer_.Store<int8_t>(position,
offset);
1759 label->BindTo(bound);
1762void Assembler::Load(Register reg,
const Address& address, OperandSize sz) {
1765 return movsxb(reg, address);
1767 return movzxb(reg, address);
1769 return movsxw(reg, address);
1771 return movzxw(reg, address);
1773 return movsxd(reg, address);
1775 return movl(reg, address);
1777 return movq(reg, address);
1784void Assembler::Store(Register reg,
const Address& address, OperandSize sz) {
1791 return movw(address, reg);
1794 return movl(address, reg);
1796 return movq(address, reg);
1803void Assembler::ArithmeticShiftRightImmediate(Register reg, intptr_t shift) {
1804 sarq(reg, Immediate(shift));
1807void Assembler::CompareWords(Register reg1,
1816 j(LESS,
equals, Assembler::kNearJump);
1818 movq(temp, FieldAddress(reg1,
count, TIMES_8,
offset));
1819 cmpq(temp, FieldAddress(reg2,
count, TIMES_8,
offset));
1820 BranchIf(
EQUAL, &loop, Assembler::kNearJump);
1823void Assembler::EnterFrame(intptr_t frame_size) {
1824 if (prologue_offset_ == -1) {
1825 prologue_offset_ = CodeSize();
1826 Comment(
"PrologueOffset = %" Pd "", CodeSize());
1829 intptr_t check_offset = CodeSize();
1834 ProloguePattern pp(CodeAddress(check_offset));
1837 if (frame_size != 0) {
1838 Immediate frame_space(frame_size);
1839 subq(RSP, frame_space);
1843void Assembler::LeaveFrame() {
1848void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
1851 if (frame_space != 0) {
1852 subq(RSP, Immediate(frame_space));
1854 if (OS::ActivationFrameAlignment() > 1) {
1855 andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
1859void Assembler::EmitEntryFrameVerification() {
1862 leaq(RAX, Address(RBP, target::frame_layout.exit_link_slot_from_entry_fp *
1863 target::kWordSize));
1866 Stop(
"target::frame_layout.exit_link_slot_from_entry_fp mismatch");
1871void Assembler::PushRegisters(
const RegisterSet& register_set) {
1872 const intptr_t xmm_regs_count = register_set.FpuRegisterCount();
1873 if (xmm_regs_count > 0) {
1874 AddImmediate(RSP, Immediate(-xmm_regs_count * kFpuRegisterSize));
1880 if (register_set.ContainsFpuRegister(xmm_reg)) {
1881 movups(Address(RSP,
offset), xmm_reg);
1890 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
1892 if (register_set.ContainsRegister(reg)) {
1898void Assembler::PopRegisters(
const RegisterSet& register_set) {
1901 if (register_set.ContainsRegister(reg)) {
1906 const intptr_t xmm_regs_count = register_set.FpuRegisterCount();
1907 if (xmm_regs_count > 0) {
1912 if (register_set.ContainsFpuRegister(xmm_reg)) {
1913 movups(xmm_reg, Address(RSP,
offset));
1918 AddImmediate(RSP, Immediate(
offset));
1922void Assembler::PushRegistersInOrder(std::initializer_list<Register> regs) {
1923 for (Register reg : regs) {
1928static const RegisterSet kVolatileRegisterSet(
1929 CallingConventions::kVolatileCpuRegisters,
1930 CallingConventions::kVolatileXmmRegisters);
1932void Assembler::CallCFunction(Register reg,
bool restore_rsp) {
1934 if (CallingConventions::kShadowSpaceBytes != 0) {
1935 subq(RSP, Immediate(CallingConventions::kShadowSpaceBytes));
1939 if (restore_rsp && CallingConventions::kShadowSpaceBytes != 0) {
1940 addq(RSP, Immediate(CallingConventions::kShadowSpaceBytes));
1943void Assembler::CallCFunction(Address address,
bool restore_rsp) {
1945 if (CallingConventions::kShadowSpaceBytes != 0) {
1946 subq(RSP, Immediate(CallingConventions::kShadowSpaceBytes));
1950 if (restore_rsp && CallingConventions::kShadowSpaceBytes != 0) {
1951 addq(RSP, Immediate(CallingConventions::kShadowSpaceBytes));
1955void Assembler::CallRuntime(
const RuntimeEntry& entry,
1957 ASSERT(!entry.is_leaf());
1960 movq(RBX, compiler::Address(THR, entry.OffsetFromThread()));
1962 call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
1965#define __ assembler_->
1967LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
1968 intptr_t frame_size,
1969 bool preserve_registers)
1970 : assembler_(assembler), preserve_registers_(preserve_registers) {
1971 __ Comment(
"EnterCallRuntimeFrame");
1974 if (preserve_registers_) {
1976 __ PushRegisters(kVolatileRegisterSet);
1979 ASSERT(IsCalleeSavedRegister(THR));
1980 ASSERT(IsCalleeSavedRegister(PP));
1981 ASSERT(IsCalleeSavedRegister(CODE_REG));
1984 __ ReserveAlignedFrameSpace(frame_size);
1987void LeafRuntimeScope::Call(
const RuntimeEntry& entry,
1991 COMPILE_ASSERT(CallingConventions::kVolatileCpuRegisters & (1 << RAX));
1992 __ movq(RAX, compiler::Address(THR, entry.OffsetFromThread()));
1993 __ movq(compiler::Assembler::VMTagAddress(), RAX);
1994 __ CallCFunction(RAX);
1995 __ movq(compiler::Assembler::VMTagAddress(),
1996 compiler::Immediate(VMTag::kDartTagId));
1999LeafRuntimeScope::~LeafRuntimeScope() {
2000 if (preserve_registers_) {
2004 const intptr_t kPushedCpuRegistersCount =
2005 RegisterSet::RegisterCount(CallingConventions::kVolatileCpuRegisters);
2006 const intptr_t kPushedXmmRegistersCount =
2007 RegisterSet::RegisterCount(CallingConventions::kVolatileXmmRegisters);
2008 const intptr_t kPushedRegistersSize =
2009 kPushedCpuRegistersCount * target::kWordSize +
2012 __ leaq(RSP, Address(RBP, -kPushedRegistersSize));
2015 __ PopRegisters(kVolatileRegisterSet);
2017 const intptr_t kPushedRegistersSize =
2018 (target::frame_layout.dart_fixed_frame_size - 2) *
2020 __ leaq(RSP, Address(RBP, -kPushedRegistersSize));
2026#if defined(TARGET_USES_THREAD_SANITIZER)
2027void Assembler::TsanLoadAcquire(Address addr) {
2028 LeafRuntimeScope rt(
this, 0,
true);
2029 leaq(CallingConventions::kArg1Reg, addr);
2030 rt.Call(kTsanLoadAcquireRuntimeEntry, 1);
2033void Assembler::TsanStoreRelease(Address addr) {
2034 LeafRuntimeScope rt(
this, 0,
true);
2035 leaq(CallingConventions::kArg1Reg, addr);
2036 rt.Call(kTsanStoreReleaseRuntimeEntry, 1);
2040void Assembler::RestoreCodePointer() {
2042 Address(RBP, target::frame_layout.code_from_fp * target::kWordSize));
2045void Assembler::LoadPoolPointer(Register pp) {
2048 movq(pp, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
2049 set_constant_pool_allowed(pp == PP);
2052void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) {
2053 ASSERT(!constant_pool_allowed());
2055 if (!FLAG_precompiled_mode) {
2058 if (new_pp == kNoRegister) {
2059 LoadPoolPointer(PP);
2064 set_constant_pool_allowed(
true);
2065 if (frame_size != 0) {
2066 subq(RSP, Immediate(frame_size));
2070void Assembler::LeaveDartFrame() {
2072 if (!FLAG_precompiled_mode) {
2073 movq(PP, Address(RBP, (target::frame_layout.saved_caller_pp_from_fp *
2074 target::kWordSize)));
2076 set_constant_pool_allowed(
false);
2080void Assembler::CheckCodePointer() {
2082 if (!FLAG_check_code_pointer) {
2085 Comment(
"CheckCodePointer");
2086 Label cid_ok, instructions_ok;
2088 LoadClassId(RAX, CODE_REG);
2089 cmpq(RAX, Immediate(kCodeCid));
2094 const intptr_t kRIPRelativeLeaqSize = 7;
2095 const intptr_t header_to_entry_offset =
2097 const intptr_t header_to_rip_offset =
2098 CodeSize() + kRIPRelativeLeaqSize + header_to_entry_offset;
2099 leaq(RAX, Address::AddressRIPRelative(-header_to_rip_offset));
2100 ASSERT(CodeSize() == (header_to_rip_offset - header_to_entry_offset));
2102 cmpq(RAX, FieldAddress(CODE_REG, target::Code::instructions_offset()));
2103 j(
EQUAL, &instructions_ok);
2105 Bind(&instructions_ok);
2115void Assembler::EnterOsrFrame(intptr_t extra_size) {
2116 ASSERT(!constant_pool_allowed());
2117 if (prologue_offset_ == -1) {
2118 Comment(
"PrologueOffset = %" Pd "", CodeSize());
2119 prologue_offset_ = CodeSize();
2121 RestoreCodePointer();
2124 if (extra_size != 0) {
2125 subq(RSP, Immediate(extra_size));
2129void Assembler::EnterStubFrame() {
2130 EnterDartFrame(0, kNoRegister);
2133void Assembler::LeaveStubFrame() {
2137void Assembler::EnterCFrame(intptr_t frame_space) {
2143 ReserveAlignedFrameSpace(frame_space);
2146void Assembler::LeaveCFrame() {
2152void Assembler::MonomorphicCheckedEntryJIT() {
2153 has_monomorphic_entry_ =
true;
2154 intptr_t
start = CodeSize();
2155 Label have_cid, miss;
2157 jmp(Address(THR, target::Thread::switchable_call_miss_entry_offset()));
2163 Comment(
"MonomorphicCheckedEntry");
2165 target::Instructions::kMonomorphicEntryOffsetJIT);
2166 ASSERT((CodeSize() & kSmiTagMask) == kSmiTag);
2168 const intptr_t cid_offset = target::Array::element_offset(0);
2169 const intptr_t count_offset = target::Array::element_offset(1);
2171 LoadTaggedClassIdMayBeSmi(RAX, RDX);
2173 OBJ(cmp)(
RAX, FieldAddress(RBX, cid_offset));
2174 j(NOT_EQUAL, &miss, Assembler::kNearJump);
2175 OBJ(add)(FieldAddress(RBX, count_offset), Immediate(target::ToRawSmi(1)));
2177#if defined(DART_COMPRESSED_POINTERS)
2185 target::Instructions::kPolymorphicEntryOffsetJIT);
2186 ASSERT(((CodeSize() -
start) & kSmiTagMask) == kSmiTag);
2191void Assembler::MonomorphicCheckedEntryAOT() {
2192 has_monomorphic_entry_ =
true;
2193 intptr_t
start = CodeSize();
2194 Label have_cid, miss;
2196 jmp(Address(THR, target::Thread::switchable_call_miss_entry_offset()));
2202 Comment(
"MonomorphicCheckedEntry");
2204 target::Instructions::kMonomorphicEntryOffsetAOT);
2205 ASSERT((CodeSize() & kSmiTagMask) == kSmiTag);
2208 LoadClassId(RAX, RDX);
2210 j(NOT_EQUAL, &miss, Assembler::kNearJump);
2214#if defined(DART_COMPRESSED_POINTERS)
2220 target::Instructions::kPolymorphicEntryOffsetAOT);
2221 ASSERT(((CodeSize() -
start) & kSmiTagMask) == kSmiTag);
2224void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
2225 has_monomorphic_entry_ =
true;
2226 while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
2230 while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
2235void Assembler::CombineHashes(Register dst, Register other) {
2240 shll(other, Immediate(10));
2244 shrl(other, Immediate(6));
2248void Assembler::FinalizeHashForSize(intptr_t bit_size,
2254 ASSERT(bit_size <= kBitsPerInt32);
2255 ASSERT(scratch != kNoRegister);
2258 shll(scratch, Immediate(3));
2262 shrl(scratch, Immediate(11));
2266 shll(scratch, Immediate(15));
2269 if (bit_size < kBitsPerInt32) {
2270 andl(dst, Immediate(Utils::NBitMask(bit_size)));
2274 j(NOT_ZERO, &
done, kNearJump);
2280void Assembler::MaybeTraceAllocation(Register cid,
2283 JumpDistance distance) {
2284 if (temp_reg == kNoRegister) {
2288 LoadIsolateGroup(temp_reg);
2289 movq(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
2293 target::ClassTable::allocation_tracing_state_table_offset()));
2294 cmpb(Address(temp_reg, cid, TIMES_1,
2295 target::ClassTable::AllocationTracingStateSlotOffsetFor(0)),
2299 j(NOT_ZERO, trace, distance);
2302void Assembler::MaybeTraceAllocation(intptr_t cid,
2305 JumpDistance distance) {
2308 if (temp_reg == kNoRegister) {
2311 LoadIsolateGroup(temp_reg);
2312 movq(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
2315 target::ClassTable::allocation_tracing_state_table_offset()));
2316 cmpb(Address(temp_reg,
2317 target::ClassTable::AllocationTracingStateSlotOffsetFor(cid)),
2321 j(NOT_ZERO, trace, distance);
2325void Assembler::TryAllocateObject(intptr_t cid,
2326 intptr_t instance_size,
2328 JumpDistance distance,
2329 Register instance_reg,
2330 Register temp_reg) {
2331 ASSERT(failure !=
nullptr);
2332 ASSERT(instance_size != 0);
2333 ASSERT(Utils::IsAligned(instance_size,
2334 target::ObjectAlignment::kObjectAlignment));
2335 if (FLAG_inline_alloc &&
2336 target::Heap::IsAllocatableInNewSpace(instance_size)) {
2340 NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp_reg, distance));
2341 movq(instance_reg, Address(THR, target::Thread::top_offset()));
2342 addq(instance_reg, Immediate(instance_size));
2344 cmpq(instance_reg, Address(THR, target::Thread::end_offset()));
2345 j(ABOVE_EQUAL, failure, distance);
2346 CheckAllocationCanary(instance_reg);
2349 movq(Address(THR, target::Thread::top_offset()), instance_reg);
2350 ASSERT(instance_size >= kHeapObjectTag);
2351 AddImmediate(instance_reg, Immediate(kHeapObjectTag - instance_size));
2352 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
2353 MoveImmediate(FieldAddress(instance_reg, target::Object::tags_offset()),
2360void Assembler::TryAllocateArray(intptr_t cid,
2361 intptr_t instance_size,
2363 JumpDistance distance,
2365 Register end_address,
2367 ASSERT(failure !=
nullptr);
2368 if (FLAG_inline_alloc &&
2369 target::Heap::IsAllocatableInNewSpace(instance_size)) {
2373 NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp, distance));
2374 movq(
instance, Address(THR, target::Thread::top_offset()));
2377 addq(end_address, Immediate(instance_size));
2383 cmpq(end_address, Address(THR, target::Thread::end_offset()));
2384 j(ABOVE_EQUAL, failure);
2389 movq(Address(THR, target::Thread::top_offset()), end_address);
2390 addq(
instance, Immediate(kHeapObjectTag));
2394 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
2395 movq(FieldAddress(
instance, target::Object::tags_offset()),
2402void Assembler::CopyMemoryWords(Register src,
2411 cmpq(size, Immediate(0));
2414 movq(temp, Address(src, 0));
2415 addq(src, Immediate(target::kWordSize));
2416 movq(Address(dst, 0), temp);
2417 addq(dst, Immediate(target::kWordSize));
2418 subq(size, Immediate(target::kWordSize));
2419 j(NOT_ZERO, &loop, kNearJump);
2423void Assembler::GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target) {
2424 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2425 buffer_.Emit<uint8_t>(0xe8);
2426 buffer_.Emit<int32_t>(0);
2428 PcRelativeCallPattern pattern(buffer_.contents() + buffer_.Size() -
2429 PcRelativeCallPattern::kLengthInBytes);
2430 pattern.set_distance(offset_into_target);
2433void Assembler::GenerateUnRelocatedPcRelativeTailCall(
2434 intptr_t offset_into_target) {
2435 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2436 buffer_.Emit<uint8_t>(0xe9);
2437 buffer_.Emit<int32_t>(0);
2439 PcRelativeCallPattern pattern(buffer_.contents() + buffer_.Size() -
2440 PcRelativeCallPattern::kLengthInBytes);
2441 pattern.set_distance(offset_into_target);
2444void Assembler::Align(
int alignment, intptr_t
offset) {
2445 ASSERT(Utils::IsPowerOfTwo(alignment));
2446 intptr_t
pos =
offset + buffer_.GetPosition();
2447 int mod =
pos & (alignment - 1);
2451 intptr_t bytes_needed = alignment - mod;
2452 while (bytes_needed > MAX_NOP_SIZE) {
2456 if (bytes_needed != 0) {
2459 ASSERT(((
offset + buffer_.GetPosition()) & (alignment - 1)) == 0);
2462void Assembler::EmitOperand(
int rm,
const Operand& operand) {
2463 ASSERT(rm >= 0 && rm < 8);
2464 const intptr_t
length = operand.length_;
2467 ASSERT((operand.encoding_[0] & 0x38) == 0);
2468 EmitUint8(operand.encoding_[0] + (rm << 3));
2470 for (intptr_t i = 1; i <
length; i++) {
2471 EmitUint8(operand.encoding_[i]);
2475void Assembler::EmitRegisterOperand(
int rm,
int reg) {
2477 operand.SetModRM(3,
static_cast<Register>(reg));
2478 EmitOperand(rm, operand);
2481void Assembler::EmitImmediate(
const Immediate& imm) {
2482 if (imm.is_int32()) {
2483 EmitInt32(
static_cast<int32_t
>(imm.value()));
2485 EmitInt64(imm.value());
2489void Assembler::EmitSignExtendedInt8(
int rm,
2490 const Operand& operand,
2491 const Immediate& immediate) {
2493 EmitOperand(rm, operand);
2494 EmitUint8(immediate.value() & 0xFF);
2497void Assembler::EmitComplex(
int rm,
2498 const Operand& operand,
2499 const Immediate& immediate) {
2500 ASSERT(rm >= 0 && rm < 8);
2501 ASSERT(immediate.is_int32());
2502 if (immediate.is_int8()) {
2503 EmitSignExtendedInt8(rm, operand, immediate);
2504 }
else if (operand.IsRegister(RAX)) {
2506 EmitUint8(0x05 + (rm << 3));
2507 EmitImmediate(immediate);
2510 EmitOperand(rm, operand);
2511 EmitImmediate(immediate);
2515void Assembler::EmitLabel(Label* label, intptr_t instruction_size) {
2516 if (label->IsBound()) {
2517 intptr_t
offset = label->Position() - buffer_.Size();
2519 EmitInt32(
offset - instruction_size);
2521 EmitLabelLink(label);
2525void Assembler::EmitLabelLink(Label* label) {
2526 ASSERT(!label->IsBound());
2527 intptr_t position = buffer_.Size();
2528 EmitInt32(label->position_);
2529 label->LinkTo(position);
2532void Assembler::EmitNearLabelLink(Label* label) {
2533 ASSERT(!label->IsBound());
2534 intptr_t position = buffer_.Size();
2536 label->NearLinkTo(position);
2539void Assembler::EmitGenericShift(
bool wide,
2542 const Immediate& imm) {
2543 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2546 EmitRegisterREX(reg, REX_W);
2548 EmitRegisterREX(reg, REX_NONE);
2550 if (imm.value() == 1) {
2552 EmitOperand(rm, Operand(reg));
2555 EmitOperand(rm, Operand(reg));
2556 EmitUint8(imm.value() & 0xFF);
2560void Assembler::EmitGenericShift(
bool wide,
2564 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
2566 EmitRegisterREX(operand, wide ? REX_W :
REX_NONE);
2568 EmitOperand(rm, Operand(operand));
2571void Assembler::ExtractClassIdFromTags(Register
result, Register tags) {
2572 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
2573 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
2575 shrl(
result, Immediate(12));
2578void Assembler::ExtractInstanceSizeFromTags(Register
result, Register tags) {
2579 ASSERT(target::UntaggedObject::kSizeTagPos == 8);
2580 ASSERT(target::UntaggedObject::kSizeTagSize == 4);
2582 shrl(
result, Immediate(target::UntaggedObject::kSizeTagPos -
2583 target::ObjectAlignment::kObjectAlignmentLog2));
2585 Immediate(Utils::NBitMask(target::UntaggedObject::kSizeTagSize)
2586 << target::ObjectAlignment::kObjectAlignmentLog2));
2589void Assembler::LoadClassId(Register
result, Register
object) {
2590 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
2591 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
2592 movl(
result, FieldAddress(
object, target::Object::tags_offset()));
2593 shrl(
result, Immediate(target::UntaggedObject::kClassIdTagPos));
2596void Assembler::LoadClassById(Register
result, Register class_id) {
2598 const intptr_t table_offset =
2599 target::IsolateGroup::cached_class_table_table_offset();
2601 LoadIsolateGroup(
result);
2606void Assembler::CompareClassId(Register
object,
2609 LoadClassId(TMP,
object);
2610 cmpl(TMP, Immediate(class_id));
2613void Assembler::SmiUntagOrCheckClass(Register
object,
2616#if !defined(DART_COMPRESSED_POINTERS)
2617 ASSERT(kSmiTagShift == 1);
2618 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
2619 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
2622 j(NOT_CARRY, is_smi, kNearJump);
2625 movl(TMP, Address(
object, TIMES_2,
2626 target::Object::tags_offset() + kHeapObjectTag));
2627 shrl(TMP, Immediate(target::UntaggedObject::kClassIdTagPos));
2628 cmpl(TMP, Immediate(class_id));
2636void Assembler::LoadClassIdMayBeSmi(Register
result, Register
object) {
2642 testq(
object, Immediate(kSmiTagMask));
2643 j(
EQUAL, &smi, Assembler::kNearJump);
2644 LoadClassId(
result,
object);
2645 jmp(&join, Assembler::kNearJump);
2648 movq(
result, Immediate(kSmiCid));
2652 testq(
object, Immediate(kSmiTagMask));
2653 movq(
result, Immediate(kSmiCid));
2654 j(
EQUAL, &smi, Assembler::kNearJump);
2655 LoadClassId(
result,
object);
2661void Assembler::LoadTaggedClassIdMayBeSmi(Register
result, Register
object) {
2667 testq(
object, Immediate(kSmiTagMask));
2668 j(
EQUAL, &smi, Assembler::kNearJump);
2669 LoadClassId(
result,
object);
2671 jmp(&join, Assembler::kNearJump);
2674 movq(
result, Immediate(target::ToRawSmi(kSmiCid)));
2678 testq(
object, Immediate(kSmiTagMask));
2679 movq(
result, Immediate(target::ToRawSmi(kSmiCid)));
2680 j(
EQUAL, &smi, Assembler::kNearJump);
2681 LoadClassId(
result,
object);
2688void Assembler::EnsureHasClassIdInDEBUG(intptr_t cid,
2693 Comment(
"Check that object in register has cid %" Pd "", cid);
2695 LoadClassIdMayBeSmi(scratch, src);
2696 CompareImmediate(scratch, cid);
2697 BranchIf(
EQUAL, &matches, Assembler::kNearJump);
2699 CompareImmediate(scratch, kNullCid);
2700 BranchIf(
EQUAL, &matches, Assembler::kNearJump);
2707Address Assembler::VMTagAddress() {
2708 return Address(THR, target::Thread::vm_tag_offset());
2711bool Assembler::AddressCanHoldConstantIndex(
const Object& constant,
2714 intptr_t index_scale) {
2715 if (!IsSafeSmi(constant))
return false;
2716 const int64_t index = target::SmiValue(constant);
2717 const int64_t disp =
2718 index * index_scale +
2719 (is_external ? 0 : target::Instance::DataOffsetFor(cid) -
kHeapObjectTag);
2720 return Utils::IsInt(32, disp);
2723Address Assembler::ElementAddressForIntIndex(
bool is_external,
2725 intptr_t index_scale,
2729 return Address(array, index * index_scale);
2731 const int64_t disp =
static_cast<int64_t
>(index) * index_scale +
2732 target::Instance::DataOffsetFor(cid);
2733 ASSERT(Utils::IsInt(32, disp));
2734 return FieldAddress(array,
static_cast<int32_t
>(disp));
2738Address Assembler::ElementAddressForRegIndex(
bool is_external,
2740 intptr_t index_scale,
2745 return Address(array, index, ToScaleFactor(index_scale, index_unboxed), 0);
2747 return FieldAddress(array, index, ToScaleFactor(index_scale, index_unboxed),
2748 target::Instance::DataOffsetFor(cid));
2752void Assembler::RangeCheck(Register value,
2756 RangeCheckCondition condition,
2760 if (temp != kNoRegister) {
2764 subq(to_check, Immediate(low));
2765 cmpq(to_check, Immediate(high - low));
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
static float next(float f)
static bool ok(int result)
static bool equals(T *a, T *b)
#define DEBUG_ASSERT(cond)
#define ASSERT_EQUAL(expected, actual)
#define RELEASE_ASSERT(cond)
#define COMPILE_ASSERT(expr)
Assembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level=0)
static constexpr int kSize
#define DECLARE_FLAG(type, name)
bool IsOriginalObject(const Object &object)
bool IsInOldSpace(const Object &obj)
const Object & ToObject(const Code &handle)
const int kFpuRegisterSize
ByteRegister ByteRegisterOf(Register reg)
SINT Vec< 2 *N, T > join(const Vec< N, T > &lo, const Vec< N, T > &hi)
constexpr bool kTargetUsesThreadSanitizer
#define NOT_IN_PRODUCT(code)