6#if defined(TARGET_ARCH_ARM)
28DEFINE_FLAG(
bool, trap_on_deoptimization,
false,
"Trap on deoptimization.");
29DEFINE_FLAG(
bool, unbox_doubles,
true,
"Optimize double arithmetic.");
33 if (FLAG_precompiled_mode) {
38 if (CanPcRelativeCall(stub)) {
39 assembler_->generate_invoke_write_barrier_wrapper_ =
41 const intptr_t offset_into_target =
45 AddPcRelativeCallStubTarget(stub);
49 const auto& array_stub =
51 if (CanPcRelativeCall(stub)) {
52 assembler_->generate_invoke_array_write_barrier_ =
55 AddPcRelativeCallStubTarget(array_stub);
64 for (
int i = 0; i < block_info_.
length(); ++i) {
65 ASSERT(!block_info_[i]->jump_label()->IsLinked());
70 return FLAG_unbox_doubles;
85 intrinsic_mode_ =
true;
91 intrinsic_mode_ =
false;
95 DeoptInfoBuilder* builder,
96 const Array& deopt_table) {
97 if (deopt_env_ ==
nullptr) {
98 ++
builder->current_info_number_;
102 AllocateOutgoingArguments(deopt_env_);
104 intptr_t slot_ix = 0;
105 Environment* current = deopt_env_;
109 EmitMaterializations(deopt_env_, builder);
116 builder->AddPp(current->function(), slot_ix++);
118 builder->AddCallerFp(slot_ix++);
124 slot_ix =
builder->EmitMaterializationArguments(slot_ix);
127 for (intptr_t i = current->Length() - 1;
128 i >= current->fixed_parameter_count(); i--) {
129 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
132 Environment* previous = current;
133 current = current->outer();
134 while (current !=
nullptr) {
135 builder->AddPp(current->function(), slot_ix++);
136 builder->AddPcMarker(previous->function(), slot_ix++);
137 builder->AddCallerFp(slot_ix++);
141 builder->AddReturnAddress(current->function(),
147 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
148 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
153 for (intptr_t i = current->Length() - 1;
154 i >= current->fixed_parameter_count(); i--) {
155 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
160 current = current->outer();
163 ASSERT(previous !=
nullptr);
166 builder->AddCallerPp(slot_ix++);
167 builder->AddPcMarker(previous->function(), slot_ix++);
168 builder->AddCallerFp(slot_ix++);
169 builder->AddCallerPc(slot_ix++);
172 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
173 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
176 return builder->CreateDeoptInfo(deopt_table);
183 compiler::Assembler* assembler =
compiler->assembler();
184#define __ assembler->
187 if (FLAG_trap_on_deoptimization) {
192 __ Call(compiler::Address(
193 THR, compiler::target::Thread::deoptimize_entry_offset()));
198#define __ assembler->
203 intptr_t sub_type_cache_index) {
206 compiler::FieldAddress(
208 compiler::target::AbstractType::type_test_stub_entry_point_offset()));
210 sub_type_cache_index);
215#define __ assembler()->
219void FlowGraphCompiler::GenerateBoolToJump(
Register bool_register,
220 compiler::Label* is_true,
221 compiler::Label* is_false) {
222 compiler::Label fall_through;
223 __ CompareObject(bool_register, Object::null_object());
224 __ b(&fall_through,
EQ);
225 BranchLabels labels = {is_true, is_false, &fall_through};
229 __ b(is_true, true_condition);
231 __ Bind(&fall_through);
234void FlowGraphCompiler::EmitFrameEntry() {
238 __ Comment(
"Invocation Count Check");
240 __ ldr(function_reg, compiler::FieldAddress(
241 CODE_REG, compiler::target::Code::owner_offset()));
242 __ ldr(
R3, compiler::FieldAddress(
244 compiler::target::Function::usage_counter_offset()));
248 __ add(
R3,
R3, compiler::Operand(1));
249 __ str(
R3, compiler::FieldAddress(
251 compiler::target::Function::usage_counter_offset()));
253 __ CompareImmediate(
R3, GetOptimizationThreshold());
255 __ Branch(compiler::Address(
256 THR, compiler::target::Thread::optimize_entry_offset()),
260 if (
flow_graph().graph_entry()->NeedsFrame()) {
261 __ Comment(
"Enter frame");
265 __ EnterOsrFrame(extra_slots * compiler::target::kWordSize);
268 __ EnterDartFrame(
StackSize() * compiler::target::kWordSize);
270 }
else if (FLAG_precompiled_mode) {
275const InstructionSource& PrologueSource() {
276 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
278 return prologue_source;
291 intptr_t args_desc_slot = -1;
293 args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
297 __ Comment(
"Initialize spill slots");
298 if (num_locals > 1 || (num_locals == 1 && args_desc_slot == -1)) {
299 __ LoadObject(
R0, Object::null_object());
301 for (intptr_t i = 0; i < num_locals; ++i) {
302 const intptr_t slot_index =
303 compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
305 __ StoreToOffset(value_reg,
FP, slot_index * compiler::target::kWordSize);
312 const intptr_t slot_index =
313 compiler::target::frame_layout.FrameSlotForVariable(
315 __ LoadObject(
R0, Object::null_object());
316 __ StoreToOffset(
R0,
FP, slot_index * compiler::target::kWordSize);
326 if (CanPcRelativeCall(stub)) {
327 __ GenerateUnRelocatedPcRelativeCall();
328 AddPcRelativeCallStubTarget(stub);
338 if (CanPcRelativeCall(stub)) {
339 __ GenerateUnRelocatedPcRelativeTailCall();
340 AddPcRelativeTailCallStubTarget(stub);
343 __ ldr(
PC, compiler::FieldAddress(
344 CODE_REG, compiler::target::Code::entry_point_offset()));
351 if (CanPcRelativeCall(stub)) {
352 if (
flow_graph().graph_entry()->NeedsFrame()) {
355 __ GenerateUnRelocatedPcRelativeTailCall();
356 AddPcRelativeTailCallStubTarget(stub);
362 if (
flow_graph().graph_entry()->NeedsFrame()) {
365 __ ldr(
PC, compiler::FieldAddress(
366 CODE_REG, compiler::target::Code::entry_point_offset()));
372 const InstructionSource&
source,
375 LocationSummary* locs,
379 pending_deoptimization_env_);
383 const InstructionSource&
source,
386 LocationSummary* locs,
389 __ BranchLinkPatchable(stub, entry_kind);
391 pending_deoptimization_env_);
395 const InstructionSource&
source,
397 LocationSummary* locs,
401 if (CanPcRelativeCall(
target)) {
402 __ GenerateUnRelocatedPcRelativeCall();
403 AddPcRelativeCallTarget(
target, entry_kind);
405 pending_deoptimization_env_);
412 const auto& stub = StubCode::CallStaticFunction();
413 __ BranchLinkWithEquivalence(stub,
target, entry_kind);
415 pending_deoptimization_env_);
416 AddStaticCallTarget(
target, entry_kind);
428 __ Comment(
"Edge counter");
429 __ LoadObject(
R0, edge_counters_array_);
434 __ LoadFieldFromOffset(
R1,
R0,
435 compiler::target::Array::element_offset(edge_id));
437 __ StoreIntoObjectOffsetNoBarrier(
438 R0, compiler::target::Array::element_offset(edge_id),
R1);
446 const ICData& ic_data,
448 const InstructionSource&
source,
449 LocationSummary* locs,
461 __ LoadFromOffset(
R0,
SP, (ic_data.SizeWithoutTypeArgs() - 1) *
kWordSize);
469 const ICData& ic_data,
471 const InstructionSource&
source,
472 LocationSummary* locs,
475 ASSERT(entry_kind == Code::EntryKind::kNormal ||
476 entry_kind == Code::EntryKind::kUnchecked);
478 __ LoadFromOffset(
R0,
SP, (ic_data.SizeWithoutTypeArgs() - 1) *
kWordSize);
481 const intptr_t entry_point_offset =
482 entry_kind == Code::EntryKind::kNormal
485 __ Call(compiler::FieldAddress(
CODE_REG, entry_point_offset));
487 pending_deoptimization_env_);
493 const Array& arguments_descriptor,
495 const InstructionSource&
source,
496 LocationSummary* locs) {
498 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
499 ASSERT(!FLAG_precompiled_mode);
500 const ArgumentsDescriptor args_desc(arguments_descriptor);
505 __ Comment(
"MegamorphicCall");
508 (args_desc.Count() - 1) * compiler::target::kWordSize);
511 __ LoadUniqueObject(
CODE_REG, StubCode::MegamorphicCall());
512 __ Call(compiler::FieldAddress(
531 const InstructionSource&
source,
532 LocationSummary* locs,
534 bool receiver_can_be_smi) {
536 ASSERT(entry_kind == Code::EntryKind::kNormal ||
537 entry_kind == Code::EntryKind::kUnchecked);
538 ASSERT(ic_data.NumArgsTested() == 1);
539 const Code& initial_stub = StubCode::SwitchableCallMiss();
540 const char* switchable_call_mode =
"smiable";
541 if (!receiver_can_be_smi) {
542 switchable_call_mode =
"non-smi";
543 ic_data.set_receiver_cannot_be_smi(
true);
545 const UnlinkedCall&
data =
548 __ Comment(
"InstanceCallAOT (%s)", switchable_call_mode);
551 (ic_data.SizeWithoutTypeArgs() - 1) * compiler::target::kWordSize);
552 if (FLAG_precompiled_mode) {
555 const auto snapshot_behavior =
557 CLOBBERS_LR(
__ LoadUniqueObject(
LR, initial_stub,
AL, snapshot_behavior));
560 const intptr_t entry_point_offset =
561 entry_kind == Code::EntryKind::kNormal
562 ? compiler::target::Code::entry_point_offset(
563 Code::EntryKind::kMonomorphic)
567 __ ldr(
LR, compiler::FieldAddress(
CODE_REG, entry_point_offset)));
570 CLOBBERS_LR(
__ blx(
LR));
573 locs, pending_deoptimization_env_);
577void FlowGraphCompiler::EmitUnoptimizedStaticCall(
578 intptr_t size_with_type_args,
580 const InstructionSource&
source,
581 LocationSummary* locs,
582 const ICData& ic_data,
587 __ LoadObject(
R9, ic_data);
589 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
595 const Array& arguments_descriptor,
596 intptr_t size_with_type_args,
598 const InstructionSource&
source,
599 LocationSummary* locs,
606 if (!FLAG_precompiled_mode) {
618 int32_t selector_offset,
619 const Array& arguments_descriptor) {
623 if (!arguments_descriptor.IsNull()) {
627 compiler::target::kWordSize;
634 compiler::target::kWordSizeLog2));
637 compiler::Operand(cid_reg,
LSL, compiler::target::kWordSizeLog2));
639 const intptr_t adjust =
offset & -(1 << 12);
640 __ AddImmediate(
LR,
LR, adjust);
652 bool needs_number_check,
653 const InstructionSource&
source,
655 if (needs_number_check) {
656 ASSERT(!obj.IsMint() && !obj.IsDouble());
661 __ BranchLink(StubCode::OptimizedIdenticalWithNumberCheck());
665 __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
673 __ CompareObject(reg, obj);
681 bool needs_number_check,
682 const InstructionSource&
source,
684 if (needs_number_check) {
688 __ BranchLink(StubCode::OptimizedIdenticalWithNumberCheck());
690 __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
705 __ Comment(
"BoolTest");
715 locs->CheckWritableInputs();
716 ClobberDeadTempRegisters(locs);
719 __ PushRegisters(*locs->live_registers());
723 __ PopRegisters(*locs->live_registers());
727void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
729 for (intptr_t i = 0; i < locs->temp_count(); ++i) {
732 if (tmp.IsRegister() &&
733 !locs->live_registers()->ContainsRegister(tmp.reg())) {
734 __ mov(tmp.reg(), compiler::Operand(0xf7));
740Register FlowGraphCompiler::EmitTestCidRegister() {
744void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
745 intptr_t count_without_type_args,
746 const Array& arguments_descriptor) {
747 __ Comment(
"EmitTestAndCall");
750 R0,
SP, (count_without_type_args - 1) * compiler::target::kWordSize);
754void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
761void FlowGraphCompiler::EmitTestAndCallLoadCid(
Register class_id_reg) {
763 __ LoadClassId(class_id_reg,
R0);
768 TemporaryRegisterAllocator* allocator) {
769 if (destination.Equals(
source))
return;
771 if (
source.IsRegister()) {
772 if (destination.IsRegister()) {
773 __ mov(destination.reg(), compiler::Operand(
source.reg()));
775 ASSERT(destination.IsStackSlot());
776 const intptr_t dest_offset = destination.ToStackSlotOffset();
777 __ StoreToOffset(
source.reg(), destination.base_reg(), dest_offset);
779 }
else if (
source.IsStackSlot()) {
780 if (destination.IsRegister()) {
781 const intptr_t source_offset =
source.ToStackSlotOffset();
782 __ LoadFromOffset(destination.reg(),
source.base_reg(), source_offset);
784 ASSERT(destination.IsStackSlot());
785 const intptr_t source_offset =
source.ToStackSlotOffset();
786 const intptr_t dest_offset = destination.ToStackSlotOffset();
796 __ LoadFromOffset(temp_reg,
source.base_reg(), source_offset);
797 __ StoreToOffset(temp_reg, destination.base_reg(), dest_offset);
800 }
else if (
source.IsFpuRegister()) {
801 if (destination.IsFpuRegister()) {
803 __ vmovq(destination.fpu_reg(),
source.fpu_reg());
810 }
else if (destination.IsStackSlot()) {
812 const intptr_t dest_offset = destination.ToStackSlotOffset();
814 __ StoreSToOffset(src, destination.base_reg(), dest_offset);
815 }
else if (destination.IsDoubleStackSlot()) {
816 const intptr_t dest_offset = destination.ToStackSlotOffset();
818 __ StoreDToOffset(src, destination.base_reg(), dest_offset);
820 ASSERT(destination.IsQuadStackSlot());
821 const intptr_t dest_offset = destination.ToStackSlotOffset();
823 __ StoreMultipleDToOffset(dsrc0, 2, destination.base_reg(), dest_offset);
825 }
else if (
source.IsDoubleStackSlot()) {
826 if (destination.IsFpuRegister()) {
827 const intptr_t source_offset =
source.ToStackSlotOffset();
829 __ LoadDFromOffset(dst,
source.base_reg(), source_offset);
830 }
else if (destination.IsStackSlot()) {
832 const intptr_t source_offset =
source.ToStackSlotOffset();
833 const intptr_t dest_offset = destination.ToStackSlotOffset();
834 __ LoadSFromOffset(STMP,
source.base_reg(), source_offset);
835 __ StoreSToOffset(STMP, destination.base_reg(), dest_offset);
837 ASSERT(destination.IsDoubleStackSlot());
838 const intptr_t source_offset =
source.ToStackSlotOffset();
839 const intptr_t dest_offset = destination.ToStackSlotOffset();
840 __ LoadDFromOffset(
DTMP,
source.base_reg(), source_offset);
841 __ StoreDToOffset(
DTMP, destination.base_reg(), dest_offset);
843 }
else if (
source.IsQuadStackSlot()) {
844 if (destination.IsFpuRegister()) {
845 const intptr_t source_offset =
source.ToStackSlotOffset();
847 __ LoadMultipleDFromOffset(dst0, 2,
source.base_reg(), source_offset);
849 ASSERT(destination.IsQuadStackSlot());
850 const intptr_t source_offset =
source.ToStackSlotOffset();
851 const intptr_t dest_offset = destination.ToStackSlotOffset();
853 __ LoadMultipleDFromOffset(dtmp0, 2,
source.base_reg(), source_offset);
854 __ StoreMultipleDToOffset(dtmp0, 2, destination.base_reg(), dest_offset);
856 }
else if (
source.IsPairLocation()) {
857 ASSERT(destination.IsPairLocation());
858 for (intptr_t i : {0, 1}) {
859 EmitMove(destination.Component(i),
source.Component(i), allocator);
863 if (destination.IsFpuRegister() || destination.IsDoubleStackSlot() ||
864 destination.IsStackSlot()) {
865 Register tmp = allocator->AllocateTemporary();
866 source.constant_instruction()->EmitMoveToLocation(
this, destination, tmp,
868 allocator->ReleaseTemporary();
870 source.constant_instruction()->EmitMoveToLocation(
889void FlowGraphCompiler::EmitNativeMoveArchitecture(
890 const compiler::ffi::NativeLocation& destination,
891 const compiler::ffi::NativeLocation&
source) {
892 const auto& src_payload_type =
source.payload_type();
893 const auto& dst_payload_type = destination.payload_type();
894 const auto& src_container_type =
source.container_type();
895 const auto& dst_container_type = destination.container_type();
896 ASSERT(src_container_type.IsFloat() == dst_container_type.IsFloat());
897 ASSERT(src_container_type.IsInt() == dst_container_type.IsInt());
898 ASSERT(src_payload_type.IsSigned() == dst_payload_type.IsSigned());
899 ASSERT(src_payload_type.IsPrimitive());
900 ASSERT(dst_payload_type.IsPrimitive());
901 const intptr_t src_size = src_payload_type.SizeInBytes();
902 const intptr_t dst_size = dst_payload_type.SizeInBytes();
903 const bool sign_or_zero_extend = dst_size > src_size;
905 if (
source.IsRegisters()) {
909 const auto src_reg =
src.reg_at(0);
911 if (destination.IsRegisters()) {
912 const auto&
dst = destination.AsRegisters();
914 const auto dst_reg =
dst.reg_at(0);
915 ASSERT(destination.container_type().SizeInBytes() <= 4);
916 if (!sign_or_zero_extend) {
917 __ MoveRegister(dst_reg, src_reg);
919 if (src_payload_type.IsSigned()) {
926 }
else if (destination.IsFpuRegisters()) {
933 ASSERT(destination.IsStack());
934 const auto&
dst = destination.AsStack();
935 ASSERT(!sign_or_zero_extend);
937 BytesToOperandSize(destination.container_type().SizeInBytes());
938 __ StoreToOffset(
src.reg_at(0),
dst.base_register(),
939 dst.offset_in_bytes(), op_size);
942 }
else if (
source.IsFpuRegisters()) {
943 const auto&
src =
source.AsFpuRegisters();
945 ASSERT(src_payload_type.Equals(dst_payload_type));
947 if (destination.IsRegisters()) {
953 }
else if (destination.IsFpuRegisters()) {
954 const auto&
dst = destination.AsFpuRegisters();
957 __ vmovq(
dst.fpu_reg(),
src.fpu_reg());
960 __ vmovd(
dst.fpu_as_d_reg(),
src.fpu_as_d_reg());
963 __ vmovs(
dst.fpu_as_s_reg(),
src.fpu_as_s_reg());
970 ASSERT(destination.IsStack());
971 ASSERT(src_payload_type.IsFloat());
972 const auto&
dst = destination.AsStack();
975 __ StoreDToOffset(
src.fpu_as_d_reg(),
dst.base_register(),
976 dst.offset_in_bytes());
979 __ StoreSToOffset(
src.fpu_as_s_reg(),
dst.base_register(),
980 dst.offset_in_bytes());
991 if (destination.IsRegisters()) {
992 const auto&
dst = destination.AsRegisters();
994 const auto dst_reg =
dst.reg_at(0);
995 EmitNativeLoad(dst_reg,
src.base_register(),
src.offset_in_bytes(),
996 src_payload_type.AsPrimitive().representation());
997 }
else if (destination.IsFpuRegisters()) {
998 ASSERT(src_payload_type.Equals(dst_payload_type));
999 ASSERT(src_payload_type.IsFloat());
1000 const auto&
dst = destination.AsFpuRegisters();
1003 __ LoadDFromOffset(
dst.fpu_as_d_reg(),
src.base_register(),
1004 src.offset_in_bytes());
1007 __ LoadSFromOffset(
dst.fpu_as_s_reg(),
src.base_register(),
1008 src.offset_in_bytes());
1015 ASSERT(destination.IsStack());
1021void FlowGraphCompiler::EmitNativeLoad(
Register dst,
1050 __ orr(dst, dst, compiler::Operand(
TMP,
LSL, 16));
1055 __ orr(dst, dst, compiler::Operand(
TMP,
LSL, 16));
1065 compiler::Label skip_reloc;
1068 __ Bind(&skip_reloc);
1076 __ ldr(dst, compiler::Address(tmp));
1080 __ add(tmp, tmp, compiler::Operand(dst));
1084 __ ldr(dst, compiler::Address(tmp));
1088#define __ compiler_->assembler()->
1090void ParallelMoveEmitter::EmitSwap(
const MoveOperands& move) {
1092 const Location destination = move.dest();
1094 if (
source.IsRegister() && destination.IsRegister()) {
1098 __ mov(
source.reg(), compiler::Operand(destination.reg()));
1099 __ mov(destination.reg(), compiler::Operand(
IP));
1100 }
else if (
source.IsRegister() && destination.IsStackSlot()) {
1101 Exchange(
source.reg(), destination.base_reg(),
1102 destination.ToStackSlotOffset());
1103 }
else if (
source.IsStackSlot() && destination.IsRegister()) {
1104 Exchange(destination.reg(),
source.base_reg(),
source.ToStackSlotOffset());
1105 }
else if (
source.IsStackSlot() && destination.IsStackSlot()) {
1106 Exchange(
source.base_reg(),
source.ToStackSlotOffset(),
1107 destination.base_reg(), destination.ToStackSlotOffset());
1108 }
else if (
source.IsFpuRegister() && destination.IsFpuRegister()) {
1124 }
else if (
source.IsFpuRegister() || destination.IsFpuRegister()) {
1125 ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() ||
1126 source.IsDoubleStackSlot() ||
source.IsQuadStackSlot());
1128 destination.IsDoubleStackSlot() ||
source.IsDoubleStackSlot();
1130 source.IsFpuRegister() ?
source.fpu_reg() : destination.fpu_reg();
1133 source.IsFpuRegister() ? destination.base_reg() :
source.base_reg();
1134 const intptr_t slot_offset =
source.IsFpuRegister()
1135 ? destination.ToStackSlotOffset()
1136 :
source.ToStackSlotOffset();
1139 __ LoadDFromOffset(
DTMP, base_reg, slot_offset);
1140 __ StoreDToOffset(reg, base_reg, slot_offset);
1143 __ LoadMultipleDFromOffset(
DTMP, 2, base_reg, slot_offset);
1144 __ StoreMultipleDToOffset(reg, 2, base_reg, slot_offset);
1147 }
else if (
source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1148 const intptr_t source_offset =
source.ToStackSlotOffset();
1149 const intptr_t dest_offset = destination.ToStackSlotOffset();
1151 ScratchFpuRegisterScope ensure_scratch(
this,
kNoQRegister);
1153 __ LoadDFromOffset(
DTMP,
source.base_reg(), source_offset);
1154 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset);
1155 __ StoreDToOffset(
DTMP, destination.base_reg(), dest_offset);
1156 __ StoreDToOffset(scratch, destination.base_reg(), source_offset);
1157 }
else if (
source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1158 const intptr_t source_offset =
source.ToStackSlotOffset();
1159 const intptr_t dest_offset = destination.ToStackSlotOffset();
1161 ScratchFpuRegisterScope ensure_scratch(
this,
kNoQRegister);
1163 __ LoadMultipleDFromOffset(
DTMP, 2,
source.base_reg(), source_offset);
1164 __ LoadMultipleDFromOffset(scratch, 2, destination.base_reg(), dest_offset);
1165 __ StoreMultipleDToOffset(
DTMP, 2, destination.base_reg(), dest_offset);
1166 __ StoreMultipleDToOffset(scratch, 2, destination.base_reg(),
1173void ParallelMoveEmitter::MoveMemoryToMemory(
const compiler::Address& dst,
1174 const compiler::Address& src) {
1180void ParallelMoveEmitter::Exchange(
Register reg,
const compiler::Address& mem) {
1186void ParallelMoveEmitter::Exchange(
const compiler::Address& mem1,
1187 const compiler::Address& mem2) {
1191void ParallelMoveEmitter::Exchange(
Register reg,
1193 intptr_t stack_offset) {
1194 ScratchRegisterScope tmp(
this, reg);
1195 __ mov(tmp.reg(), compiler::Operand(reg));
1196 __ LoadFromOffset(reg, base_reg, stack_offset);
1197 __ StoreToOffset(tmp.reg(), base_reg, stack_offset);
1200void ParallelMoveEmitter::Exchange(
Register base_reg1,
1201 intptr_t stack_offset1,
1203 intptr_t stack_offset2) {
1205 ScratchRegisterScope tmp2(
this, tmp1.reg());
1206 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1);
1207 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2);
1208 __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2);
1209 __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1);
1212void ParallelMoveEmitter::SpillScratch(
Register reg) {
1216void ParallelMoveEmitter::RestoreScratch(
Register reg) {
1220void ParallelMoveEmitter::SpillFpuScratch(
FpuRegister reg) {
1224void ParallelMoveEmitter::RestoreFpuScratch(
FpuRegister reg) {
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
#define COMPILE_ASSERT(expr)
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
const char * Name() const
compiler::Label * entry_label()
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
intptr_t deopt_id() const
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
static intptr_t ToDeoptAfter(intptr_t deopt_id)
static constexpr intptr_t kOriginElement
IsolateGroup * isolate_group() const
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitTailCallToStub(const Code &stub)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
bool is_optimizing() const
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void InsertBSSRelocation(BSS::Relocation reloc)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
intptr_t StackSize() const
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EnterIntrinsicMode()
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static bool SupportsUnboxedDoubles()
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
bool may_reoptimize() const
void GeneratePatchableCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void ArchSpecificInitialization()
bool CanOptimizeFunction() const
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
compiler::Assembler * assembler() const
bool intrinsic_mode() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
bool IsClosureFunction() const
bool IsOptimizable() const
ObjectStore * object_store() const
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
static Object & ZoneHandle()
const Function & function() const
int num_stack_locals() const
static intptr_t RawValue(intptr_t value)
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
static bool neon_supported()
static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg)
static bool MagnitudeIsUint(intptr_t N, T value)
void set_constant_pool_allowed(bool b)
void set_use_far_branches(bool b)
bool use_far_branches() const
void GenerateUnRelocatedPcRelativeCall(Condition cond=AL, intptr_t offset_into_target=0)
bool constant_pool_allowed() const
#define DECLARE_FLAG(type, name)
#define DEFINE_FLAG(type, name, default_value, comment)
Dart_NativeFunction function
static DRegister EvenDRegisterOf(QRegister q)
constexpr intptr_t kBitsPerByte
const Register ARGS_DESC_REG
const Register DISPATCH_TABLE_REG
constexpr RegList kDartAvailableCpuRegs
const Register IC_DATA_REG
constexpr intptr_t kWordSize
static int8_t data[kExtLength]
static SRegister EvenSRegisterOf(DRegister d)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
static constexpr Register kClassIdReg
static constexpr intptr_t kBoolValueMask
static constexpr Register kScratchReg
static constexpr Register kSubtypeTestCacheReg
@ kResetToSwitchableCallMissEntryPoint