6#if defined(TARGET_ARCH_ARM)
28DEFINE_FLAG(
bool, trap_on_deoptimization,
false,
"Trap on deoptimization.");
32 if (FLAG_precompiled_mode) {
37 if (CanPcRelativeCall(stub)) {
38 assembler_->generate_invoke_write_barrier_wrapper_ =
40 const intptr_t offset_into_target =
44 AddPcRelativeCallStubTarget(stub);
48 const auto& array_stub =
50 if (CanPcRelativeCall(stub)) {
51 assembler_->generate_invoke_array_write_barrier_ =
54 AddPcRelativeCallStubTarget(array_stub);
63 for (
int i = 0;
i < block_info_.
length(); ++
i) {
64 ASSERT(!block_info_[
i]->jump_label()->IsLinked());
80 intrinsic_mode_ =
true;
86 intrinsic_mode_ =
false;
91 const Array& deopt_table) {
92 if (deopt_env_ ==
nullptr) {
93 ++
builder->current_info_number_;
97 AllocateOutgoingArguments(deopt_env_);
100 Environment* current = deopt_env_;
104 EmitMaterializations(deopt_env_,
builder);
111 builder->AddPp(current->function(), slot_ix++);
113 builder->AddCallerFp(slot_ix++);
119 slot_ix =
builder->EmitMaterializationArguments(slot_ix);
122 for (intptr_t
i = current->Length() - 1;
123 i >= current->fixed_parameter_count();
i--) {
124 builder->AddCopy(current->ValueAt(
i), current->LocationAt(
i), slot_ix++);
127 Environment* previous = current;
128 current = current->outer();
129 while (current !=
nullptr) {
130 builder->AddPp(current->function(), slot_ix++);
131 builder->AddPcMarker(previous->function(), slot_ix++);
132 builder->AddCallerFp(slot_ix++);
136 builder->AddReturnAddress(current->function(),
142 for (intptr_t
i = previous->fixed_parameter_count() - 1;
i >= 0;
i--) {
143 builder->AddCopy(previous->ValueAt(
i), previous->LocationAt(
i),
148 for (intptr_t
i = current->Length() - 1;
149 i >= current->fixed_parameter_count();
i--) {
150 builder->AddCopy(current->ValueAt(
i), current->LocationAt(
i), slot_ix++);
155 current = current->outer();
158 ASSERT(previous !=
nullptr);
161 builder->AddCallerPp(slot_ix++);
162 builder->AddPcMarker(previous->function(), slot_ix++);
163 builder->AddCallerFp(slot_ix++);
164 builder->AddCallerPc(slot_ix++);
167 for (intptr_t
i = previous->fixed_parameter_count() - 1;
i >= 0;
i--) {
168 builder->AddCopy(previous->ValueAt(
i), previous->LocationAt(
i), slot_ix++);
171 return builder->CreateDeoptInfo(deopt_table);
178 compiler::Assembler* assembler =
compiler->assembler();
179#define __ assembler->
182 if (FLAG_trap_on_deoptimization) {
187 __ Call(compiler::Address(
193#define __ assembler->
198 intptr_t sub_type_cache_index) {
201 compiler::FieldAddress(
205 sub_type_cache_index);
210#define __ assembler()->
214void FlowGraphCompiler::GenerateBoolToJump(
Register bool_register,
215 compiler::Label* is_true,
216 compiler::Label* is_false) {
217 compiler::Label fall_through;
218 __ CompareObject(bool_register, Object::null_object());
219 __ b(&fall_through,
EQ);
220 BranchLabels labels = {is_true, is_false, &fall_through};
224 __ b(is_true, true_condition);
229void FlowGraphCompiler::EmitFrameEntry() {
233 __ Comment(
"Invocation Count Check");
235 __ ldr(function_reg, compiler::FieldAddress(
237 __ ldr(
R3, compiler::FieldAddress(
243 __ add(
R3,
R3, compiler::Operand(1));
244 __ str(
R3, compiler::FieldAddress(
248 __ CompareImmediate(
R3, GetOptimizationThreshold());
250 __ Branch(compiler::Address(
255 if (
flow_graph().graph_entry()->NeedsFrame()) {
256 __ Comment(
"Enter frame");
265 }
else if (FLAG_precompiled_mode) {
270const InstructionSource& PrologueSource() {
271 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
273 return prologue_source;
286 intptr_t args_desc_slot = -1;
292 __ Comment(
"Initialize spill slots");
293 if (num_locals > 1 || (num_locals == 1 && args_desc_slot == -1)) {
294 __ LoadObject(
R0, Object::null_object());
296 for (intptr_t
i = 0;
i < num_locals; ++
i) {
297 const intptr_t slot_index =
307 const intptr_t slot_index =
310 __ LoadObject(
R0, Object::null_object());
321 if (CanPcRelativeCall(stub)) {
322 __ GenerateUnRelocatedPcRelativeCall();
323 AddPcRelativeCallStubTarget(stub);
333 if (CanPcRelativeCall(stub)) {
334 __ GenerateUnRelocatedPcRelativeTailCall();
335 AddPcRelativeTailCallStubTarget(stub);
338 __ ldr(
PC, compiler::FieldAddress(
346 if (CanPcRelativeCall(stub)) {
347 if (
flow_graph().graph_entry()->NeedsFrame()) {
350 __ GenerateUnRelocatedPcRelativeTailCall();
351 AddPcRelativeTailCallStubTarget(stub);
357 if (
flow_graph().graph_entry()->NeedsFrame()) {
360 __ ldr(
PC, compiler::FieldAddress(
367 const InstructionSource&
source,
370 LocationSummary* locs,
374 pending_deoptimization_env_);
378 const InstructionSource&
source,
381 LocationSummary* locs,
384 __ BranchLinkPatchable(stub, entry_kind);
386 pending_deoptimization_env_);
390 const InstructionSource&
source,
392 LocationSummary* locs,
396 if (CanPcRelativeCall(
target)) {
397 __ GenerateUnRelocatedPcRelativeCall();
398 AddPcRelativeCallTarget(
target, entry_kind);
400 pending_deoptimization_env_);
407 const auto& stub = StubCode::CallStaticFunction();
408 __ BranchLinkWithEquivalence(stub,
target, entry_kind);
410 pending_deoptimization_env_);
411 AddStaticCallTarget(
target, entry_kind);
423 __ Comment(
"Edge counter");
424 __ LoadObject(
R0, edge_counters_array_);
429 __ LoadFieldFromOffset(
R1,
R0,
432 __ StoreIntoObjectOffsetNoBarrier(
441 const ICData& ic_data,
443 const InstructionSource&
source,
444 LocationSummary* locs,
456 __ LoadFromOffset(
R0,
SP, (ic_data.SizeWithoutTypeArgs() - 1) *
kWordSize);
464 const ICData& ic_data,
466 const InstructionSource&
source,
467 LocationSummary* locs,
471 entry_kind == Code::EntryKind::kUnchecked);
473 __ LoadFromOffset(
R0,
SP, (ic_data.SizeWithoutTypeArgs() - 1) *
kWordSize);
476 const intptr_t entry_point_offset =
480 __ Call(compiler::FieldAddress(
CODE_REG, entry_point_offset));
482 pending_deoptimization_env_);
488 const Array& arguments_descriptor,
490 const InstructionSource&
source,
491 LocationSummary* locs) {
493 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
494 ASSERT(!FLAG_precompiled_mode);
495 const ArgumentsDescriptor args_desc(arguments_descriptor);
500 __ Comment(
"MegamorphicCall");
506 __ LoadUniqueObject(
CODE_REG, StubCode::MegamorphicCall());
507 __ Call(compiler::FieldAddress(
526 const InstructionSource&
source,
527 LocationSummary* locs,
529 bool receiver_can_be_smi) {
532 entry_kind == Code::EntryKind::kUnchecked);
533 ASSERT(ic_data.NumArgsTested() == 1);
534 const Code& initial_stub = StubCode::SwitchableCallMiss();
535 const char* switchable_call_mode =
"smiable";
536 if (!receiver_can_be_smi) {
537 switchable_call_mode =
"non-smi";
538 ic_data.set_receiver_cannot_be_smi(
true);
540 const UnlinkedCall&
data =
543 __ Comment(
"InstanceCallAOT (%s)", switchable_call_mode);
549 const auto snapshot_behavior =
551 CLOBBERS_LR(
__ LoadUniqueObject(
LR, initial_stub,
AL, snapshot_behavior));
553 CLOBBERS_LR(
__ blx(
LR));
556 locs, pending_deoptimization_env_);
560void FlowGraphCompiler::EmitUnoptimizedStaticCall(
561 intptr_t size_with_type_args,
563 const InstructionSource&
source,
564 LocationSummary* locs,
565 const ICData& ic_data,
570 __ LoadObject(
R9, ic_data);
572 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
578 const Array& arguments_descriptor,
579 intptr_t size_with_type_args,
581 const InstructionSource&
source,
582 LocationSummary* locs,
589 if (!FLAG_precompiled_mode) {
601 int32_t selector_offset,
602 const Array& arguments_descriptor) {
606 if (!arguments_descriptor.IsNull()) {
622 const intptr_t adjust =
offset & -(1 << 12);
623 __ AddImmediate(
LR,
LR, adjust);
635 bool needs_number_check,
636 const InstructionSource&
source,
638 if (needs_number_check) {
639 ASSERT(!obj.IsMint() && !obj.IsDouble());
644 __ BranchLink(StubCode::OptimizedIdenticalWithNumberCheck());
648 __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
656 __ CompareObject(reg, obj);
664 bool needs_number_check,
665 const InstructionSource&
source,
667 if (needs_number_check) {
671 __ BranchLink(StubCode::OptimizedIdenticalWithNumberCheck());
673 __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
680 __ cmp(left, compiler::Operand(right));
688 __ Comment(
"BoolTest");
698 locs->CheckWritableInputs();
699 ClobberDeadTempRegisters(locs);
702 __ PushRegisters(*locs->live_registers());
706 __ PopRegisters(*locs->live_registers());
710void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
712 for (intptr_t
i = 0;
i < locs->temp_count(); ++
i) {
715 if (tmp.IsRegister() &&
716 !locs->live_registers()->ContainsRegister(tmp.reg())) {
717 __ mov(tmp.reg(), compiler::Operand(0xf7));
723Register FlowGraphCompiler::EmitTestCidRegister() {
727void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
728 intptr_t count_without_type_args,
729 const Array& arguments_descriptor) {
730 __ Comment(
"EmitTestAndCall");
737void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
744void FlowGraphCompiler::EmitTestAndCallLoadCid(
Register class_id_reg) {
746 __ LoadClassId(class_id_reg,
R0);
751 TemporaryRegisterAllocator* allocator) {
752 if (destination.Equals(
source))
return;
754 if (
source.IsRegister()) {
755 if (destination.IsRegister()) {
756 __ mov(destination.reg(), compiler::Operand(
source.reg()));
758 ASSERT(destination.IsStackSlot());
759 const intptr_t dest_offset = destination.ToStackSlotOffset();
760 __ StoreToOffset(
source.reg(), destination.base_reg(), dest_offset);
762 }
else if (
source.IsStackSlot()) {
763 if (destination.IsRegister()) {
764 const intptr_t source_offset =
source.ToStackSlotOffset();
765 __ LoadFromOffset(destination.reg(),
source.base_reg(), source_offset);
767 ASSERT(destination.IsStackSlot());
768 const intptr_t source_offset =
source.ToStackSlotOffset();
769 const intptr_t dest_offset = destination.ToStackSlotOffset();
779 __ LoadFromOffset(temp_reg,
source.base_reg(), source_offset);
780 __ StoreToOffset(temp_reg, destination.base_reg(), dest_offset);
783 }
else if (
source.IsFpuRegister()) {
784 if (destination.IsFpuRegister()) {
786 __ vmovq(destination.fpu_reg(),
source.fpu_reg());
793 }
else if (destination.IsStackSlot()) {
795 const intptr_t dest_offset = destination.ToStackSlotOffset();
797 __ StoreSToOffset(
src, destination.base_reg(), dest_offset);
798 }
else if (destination.IsDoubleStackSlot()) {
799 const intptr_t dest_offset = destination.ToStackSlotOffset();
801 __ StoreDToOffset(
src, destination.base_reg(), dest_offset);
803 ASSERT(destination.IsQuadStackSlot());
804 const intptr_t dest_offset = destination.ToStackSlotOffset();
806 __ StoreMultipleDToOffset(dsrc0, 2, destination.base_reg(), dest_offset);
808 }
else if (
source.IsDoubleStackSlot()) {
809 if (destination.IsFpuRegister()) {
810 const intptr_t source_offset =
source.ToStackSlotOffset();
812 __ LoadDFromOffset(
dst,
source.base_reg(), source_offset);
813 }
else if (destination.IsStackSlot()) {
815 const intptr_t source_offset =
source.ToStackSlotOffset();
816 const intptr_t dest_offset = destination.ToStackSlotOffset();
817 __ LoadSFromOffset(STMP,
source.base_reg(), source_offset);
818 __ StoreSToOffset(STMP, destination.base_reg(), dest_offset);
820 ASSERT(destination.IsDoubleStackSlot());
821 const intptr_t source_offset =
source.ToStackSlotOffset();
822 const intptr_t dest_offset = destination.ToStackSlotOffset();
823 __ LoadDFromOffset(
DTMP,
source.base_reg(), source_offset);
824 __ StoreDToOffset(
DTMP, destination.base_reg(), dest_offset);
826 }
else if (
source.IsQuadStackSlot()) {
827 if (destination.IsFpuRegister()) {
828 const intptr_t source_offset =
source.ToStackSlotOffset();
830 __ LoadMultipleDFromOffset(dst0, 2,
source.base_reg(), source_offset);
832 ASSERT(destination.IsQuadStackSlot());
833 const intptr_t source_offset =
source.ToStackSlotOffset();
834 const intptr_t dest_offset = destination.ToStackSlotOffset();
836 __ LoadMultipleDFromOffset(dtmp0, 2,
source.base_reg(), source_offset);
837 __ StoreMultipleDToOffset(dtmp0, 2, destination.base_reg(), dest_offset);
839 }
else if (
source.IsPairLocation()) {
840 ASSERT(destination.IsPairLocation());
841 for (intptr_t
i : {0, 1}) {
846 if (destination.IsFpuRegister() || destination.IsDoubleStackSlot() ||
847 destination.IsStackSlot()) {
848 Register tmp = allocator->AllocateTemporary();
849 source.constant_instruction()->EmitMoveToLocation(
this, destination, tmp,
851 allocator->ReleaseTemporary();
853 source.constant_instruction()->EmitMoveToLocation(
872void FlowGraphCompiler::EmitNativeMoveArchitecture(
873 const compiler::ffi::NativeLocation& destination,
874 const compiler::ffi::NativeLocation&
source) {
875 const auto& src_payload_type =
source.payload_type();
876 const auto& dst_payload_type = destination.payload_type();
877 const auto& src_container_type =
source.container_type();
878 const auto& dst_container_type = destination.container_type();
879 ASSERT(src_container_type.IsFloat() == dst_container_type.IsFloat());
880 ASSERT(src_container_type.IsInt() == dst_container_type.IsInt());
881 ASSERT(src_payload_type.IsSigned() == dst_payload_type.IsSigned());
882 ASSERT(src_payload_type.IsPrimitive());
883 ASSERT(dst_payload_type.IsPrimitive());
884 const intptr_t src_size = src_payload_type.SizeInBytes();
885 const intptr_t dst_size = dst_payload_type.SizeInBytes();
886 const bool sign_or_zero_extend = dst_size > src_size;
888 if (
source.IsRegisters()) {
892 const auto src_reg =
src.reg_at(0);
894 if (destination.IsRegisters()) {
895 const auto&
dst = destination.AsRegisters();
897 const auto dst_reg =
dst.reg_at(0);
898 ASSERT(destination.container_type().SizeInBytes() <= 4);
899 if (!sign_or_zero_extend) {
900 __ MoveRegister(dst_reg, src_reg);
902 if (src_payload_type.IsSigned()) {
909 }
else if (destination.IsFpuRegisters()) {
916 ASSERT(destination.IsStack());
917 const auto&
dst = destination.AsStack();
918 ASSERT(!sign_or_zero_extend);
920 BytesToOperandSize(destination.container_type().SizeInBytes());
921 __ StoreToOffset(
src.reg_at(0),
dst.base_register(),
922 dst.offset_in_bytes(), op_size);
925 }
else if (
source.IsFpuRegisters()) {
926 const auto&
src =
source.AsFpuRegisters();
928 ASSERT(src_payload_type.Equals(dst_payload_type));
930 if (destination.IsRegisters()) {
936 }
else if (destination.IsFpuRegisters()) {
937 const auto&
dst = destination.AsFpuRegisters();
940 __ vmovq(
dst.fpu_reg(),
src.fpu_reg());
943 __ vmovd(
dst.fpu_as_d_reg(),
src.fpu_as_d_reg());
946 __ vmovs(
dst.fpu_as_s_reg(),
src.fpu_as_s_reg());
953 ASSERT(destination.IsStack());
954 ASSERT(src_payload_type.IsFloat());
955 const auto&
dst = destination.AsStack();
958 __ StoreDToOffset(
src.fpu_as_d_reg(),
dst.base_register(),
959 dst.offset_in_bytes());
962 __ StoreSToOffset(
src.fpu_as_s_reg(),
dst.base_register(),
963 dst.offset_in_bytes());
974 if (destination.IsRegisters()) {
975 const auto&
dst = destination.AsRegisters();
977 const auto dst_reg =
dst.reg_at(0);
978 EmitNativeLoad(dst_reg,
src.base_register(),
src.offset_in_bytes(),
979 src_payload_type.AsPrimitive().representation());
980 }
else if (destination.IsFpuRegisters()) {
981 ASSERT(src_payload_type.Equals(dst_payload_type));
982 ASSERT(src_payload_type.IsFloat());
983 const auto&
dst = destination.AsFpuRegisters();
986 __ LoadDFromOffset(
dst.fpu_as_d_reg(),
src.base_register(),
987 src.offset_in_bytes());
990 __ LoadSFromOffset(
dst.fpu_as_s_reg(),
src.base_register(),
991 src.offset_in_bytes());
998 ASSERT(destination.IsStack());
1004void FlowGraphCompiler::EmitNativeLoad(
Register dst,
1048 compiler::Label skip_reloc;
1059 __ ldr(
dst, compiler::Address(tmp));
1063 __ add(tmp, tmp, compiler::Operand(
dst));
1067 __ ldr(
dst, compiler::Address(tmp));
1071#define __ compiler_->assembler()->
1073void ParallelMoveEmitter::EmitSwap(
const MoveOperands& move) {
1075 const Location destination = move.dest();
1077 if (
source.IsRegister() && destination.IsRegister()) {
1081 __ mov(
source.reg(), compiler::Operand(destination.reg()));
1082 __ mov(destination.reg(), compiler::Operand(
IP));
1083 }
else if (
source.IsRegister() && destination.IsStackSlot()) {
1084 Exchange(
source.reg(), destination.base_reg(),
1085 destination.ToStackSlotOffset());
1086 }
else if (
source.IsStackSlot() && destination.IsRegister()) {
1087 Exchange(destination.reg(),
source.base_reg(),
source.ToStackSlotOffset());
1088 }
else if (
source.IsStackSlot() && destination.IsStackSlot()) {
1089 Exchange(
source.base_reg(),
source.ToStackSlotOffset(),
1090 destination.base_reg(), destination.ToStackSlotOffset());
1091 }
else if (
source.IsFpuRegister() && destination.IsFpuRegister()) {
1107 }
else if (
source.IsFpuRegister() || destination.IsFpuRegister()) {
1108 ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() ||
1109 source.IsDoubleStackSlot() ||
source.IsQuadStackSlot());
1111 destination.IsDoubleStackSlot() ||
source.IsDoubleStackSlot();
1113 source.IsFpuRegister() ?
source.fpu_reg() : destination.fpu_reg();
1116 source.IsFpuRegister() ? destination.base_reg() :
source.base_reg();
1117 const intptr_t slot_offset =
source.IsFpuRegister()
1118 ? destination.ToStackSlotOffset()
1119 :
source.ToStackSlotOffset();
1122 __ LoadDFromOffset(
DTMP, base_reg, slot_offset);
1123 __ StoreDToOffset(reg, base_reg, slot_offset);
1126 __ LoadMultipleDFromOffset(
DTMP, 2, base_reg, slot_offset);
1127 __ StoreMultipleDToOffset(reg, 2, base_reg, slot_offset);
1130 }
else if (
source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1131 const intptr_t source_offset =
source.ToStackSlotOffset();
1132 const intptr_t dest_offset = destination.ToStackSlotOffset();
1134 ScratchFpuRegisterScope ensure_scratch(
this,
kNoQRegister);
1136 __ LoadDFromOffset(
DTMP,
source.base_reg(), source_offset);
1137 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset);
1138 __ StoreDToOffset(
DTMP, destination.base_reg(), dest_offset);
1139 __ StoreDToOffset(scratch, destination.base_reg(), source_offset);
1140 }
else if (
source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1141 const intptr_t source_offset =
source.ToStackSlotOffset();
1142 const intptr_t dest_offset = destination.ToStackSlotOffset();
1144 ScratchFpuRegisterScope ensure_scratch(
this,
kNoQRegister);
1146 __ LoadMultipleDFromOffset(
DTMP, 2,
source.base_reg(), source_offset);
1147 __ LoadMultipleDFromOffset(scratch, 2, destination.base_reg(), dest_offset);
1148 __ StoreMultipleDToOffset(
DTMP, 2, destination.base_reg(), dest_offset);
1149 __ StoreMultipleDToOffset(scratch, 2, destination.base_reg(),
1156void ParallelMoveEmitter::MoveMemoryToMemory(
const compiler::Address&
dst,
1157 const compiler::Address&
src) {
1163void ParallelMoveEmitter::Exchange(
Register reg,
const compiler::Address& mem) {
1169void ParallelMoveEmitter::Exchange(
const compiler::Address& mem1,
1170 const compiler::Address& mem2) {
1174void ParallelMoveEmitter::Exchange(
Register reg,
1176 intptr_t stack_offset) {
1177 ScratchRegisterScope tmp(
this, reg);
1178 __ mov(tmp.reg(), compiler::Operand(reg));
1179 __ LoadFromOffset(reg, base_reg, stack_offset);
1180 __ StoreToOffset(tmp.reg(), base_reg, stack_offset);
1183void ParallelMoveEmitter::Exchange(
Register base_reg1,
1184 intptr_t stack_offset1,
1186 intptr_t stack_offset2) {
1188 ScratchRegisterScope tmp2(
this, tmp1.reg());
1189 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1);
1190 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2);
1191 __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2);
1192 __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1);
1195void ParallelMoveEmitter::SpillScratch(
Register reg) {
1199void ParallelMoveEmitter::RestoreScratch(
Register reg) {
1203void ParallelMoveEmitter::SpillFpuScratch(
FpuRegister reg) {
1207void ParallelMoveEmitter::RestoreFpuScratch(
FpuRegister reg) {
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
const char * Name() const
compiler::Label * entry_label()
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
intptr_t deopt_id() const
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
static intptr_t ToDeoptAfter(intptr_t deopt_id)
static constexpr intptr_t kOriginElement
IsolateGroup * isolate_group() const
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitTailCallToStub(const Code &stub)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
bool is_optimizing() const
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void InsertBSSRelocation(BSS::Relocation reloc)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
intptr_t StackSize() const
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EnterIntrinsicMode()
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
bool may_reoptimize() const
void GeneratePatchableCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void ArchSpecificInitialization()
bool CanOptimizeFunction() const
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
compiler::Assembler * assembler() const
bool intrinsic_mode() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
bool IsClosureFunction() const
bool IsOptimizable() const
ObjectStore * object_store() const
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
static Object & ZoneHandle()
const Function & function() const
int num_stack_locals() const
static intptr_t RawValue(intptr_t value)
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
static bool neon_supported()
static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg)
static bool MagnitudeIsUint(intptr_t N, T value)
void set_constant_pool_allowed(bool b)
void set_use_far_branches(bool b)
bool use_far_branches() const
void GenerateUnRelocatedPcRelativeCall(Condition cond=AL, intptr_t offset_into_target=0)
bool constant_pool_allowed() const
static word type_test_stub_entry_point_offset()
static word element_offset(intptr_t index)
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word owner_offset()
static word usage_counter_offset()
static word optimize_entry_offset()
static word deoptimize_entry_offset()
Dart_NativeFunction function
static constexpr intptr_t kWordSize
static DRegister EvenDRegisterOf(QRegister q)
constexpr intptr_t kBitsPerByte
constexpr intptr_t kWordSizeLog2
const Register ARGS_DESC_REG
const Register DISPATCH_TABLE_REG
constexpr RegList kDartAvailableCpuRegs
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const Register IC_DATA_REG
constexpr intptr_t kWordSize
static int8_t data[kExtLength]
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
static SRegister EvenSRegisterOf(DRegister d)
DECLARE_FLAG(bool, show_invisible_frames)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
static constexpr Register kClassIdReg
intptr_t FrameSlotForVariable(const LocalVariable *variable) const
intptr_t FrameSlotForVariableIndex(intptr_t index) const
static constexpr intptr_t kBoolValueMask
static constexpr Register kScratchReg
static constexpr Register kSubtypeTestCacheReg
@ kResetToSwitchableCallMissEntryPoint