6#if defined(TARGET_ARCH_IA32)
29DEFINE_FLAG(
bool, trap_on_deoptimization,
false,
"Trap on deoptimization.");
38 for (
int i = 0;
i < block_info_.
length(); ++
i) {
39 ASSERT(!block_info_[
i]->jump_label()->IsLinked());
40 ASSERT(!block_info_[
i]->jump_label()->HasNear());
45 return FLAG_enable_simd_inline;
54 intrinsic_mode_ =
true;
59 intrinsic_mode_ =
false;
64 const Array& deopt_table) {
65 if (deopt_env_ ==
nullptr) {
66 ++
builder->current_info_number_;
70 AllocateOutgoingArguments(deopt_env_);
73 Environment* current = deopt_env_;
77 EmitMaterializations(deopt_env_,
builder);
85 builder->AddCallerFp(slot_ix++);
91 slot_ix =
builder->EmitMaterializationArguments(slot_ix);
94 for (intptr_t
i = current->Length() - 1;
95 i >= current->fixed_parameter_count();
i--) {
96 builder->AddCopy(current->ValueAt(
i), current->LocationAt(
i), slot_ix++);
99 builder->AddPcMarker(current->function(), slot_ix++);
100 builder->AddCallerFp(slot_ix++);
102 Environment* previous = current;
103 current = current->outer();
104 while (current !=
nullptr) {
107 builder->AddReturnAddress(current->function(),
113 for (intptr_t
i = previous->fixed_parameter_count() - 1;
i >= 0;
i--) {
114 builder->AddCopy(previous->ValueAt(
i), previous->LocationAt(
i),
119 for (intptr_t
i = current->Length() - 1;
120 i >= current->fixed_parameter_count();
i--) {
121 builder->AddCopy(current->ValueAt(
i), current->LocationAt(
i), slot_ix++);
124 builder->AddPcMarker(current->function(), slot_ix++);
125 builder->AddCallerFp(slot_ix++);
129 current = current->outer();
132 ASSERT(previous !=
nullptr);
135 builder->AddCallerPc(slot_ix++);
138 for (intptr_t
i = previous->fixed_parameter_count() - 1;
i >= 0;
i--) {
139 builder->AddCopy(previous->ValueAt(
i), previous->LocationAt(
i), slot_ix++);
142 return builder->CreateDeoptInfo(deopt_table);
149 compiler::Assembler* assembler =
compiler->assembler();
150#define __ assembler->
153 if (FLAG_trap_on_deoptimization) {
159 __ Call(StubCode::Deoptimize());
165#define __ assembler()->
168void FlowGraphCompiler::GenerateBoolToJump(
Register bool_register,
169 compiler::Label* is_true,
170 compiler::Label* is_false) {
171 const compiler::Immediate& raw_null =
172 compiler::Immediate(
static_cast<intptr_t
>(
Object::null()));
173 compiler::Label fall_through;
174 __ cmpl(bool_register, raw_null);
176 BranchLabels labels = {is_true, is_false, &fall_through};
180 __ j(true_condition, is_true);
195SubtypeTestCachePtr FlowGraphCompiler::GenerateCallSubtypeTestStub(
196 TypeTestStubKind test_kind,
197 compiler::Label* is_instance_lbl,
198 compiler::Label* is_not_instance_lbl) {
199 const intptr_t num_inputs = UsedInputsForTTSKind(test_kind);
200 const SubtypeTestCache& type_test_cache =
202 const auto& stub_entry =
209 if (num_inputs >= 7) {
212 __ PushObject(Object::null_object());
214 if (num_inputs >= 3) {
217 __ PushObject(Object::null_object());
219 if (num_inputs >= 4) {
222 __ PushObject(Object::null_object());
231 "Code assumes cache and result register are the same");
238 is_not_instance_lbl);
239 return type_test_cache.ptr();
256 CompileType* receiver_type,
257 const InstructionSource&
source,
260 const String& dst_name,
261 LocationSummary* locs) {
265 const auto& dst_type =
267 ? AbstractType::Cast(
269 : Object::null_abstract_type();
271 if (!dst_type.IsNull()) {
272 ASSERT(dst_type.IsFinalized());
273 if (dst_type.IsTopTypeForSubtyping())
return;
276 compiler::Label is_assignable, runtime_call;
278 if (dst_type.IsNull()) {
279 __ Comment(
"AssertAssignable for runtime type");
282 StubCode::TypeIsTopTypeForSubtyping(),
283 UntaggedPcDescriptors::kOther, locs);
289 UntaggedPcDescriptors::kOther, locs);
296 test_cache = GenerateCallSubtypeTestStub(kTestTypeMaxArgs, &is_assignable,
299 __ Comment(
"AssertAssignable for compile-time type");
303 __ BranchIf(
EQUAL, &is_assignable);
319 "Expected AssertAssignable to have 4 inputs");
322 if (!dst_type.IsNull()) {
323 __ PushObject(dst_type);
335 UntaggedPcDescriptors::kOther, locs, deopt_id,
env);
345void FlowGraphCompiler::EmitFrameEntry() {
351 __ Comment(
"Invocation Count Check");
358 __ incl(compiler::FieldAddress(function_reg,
359 Function::usage_counter_offset()));
362 compiler::FieldAddress(function_reg, Function::usage_counter_offset()),
363 compiler::Immediate(GetOptimizationThreshold()));
365 compiler::Label dont_optimize;
367 __ jmp(compiler::Address(
THR, Thread::optimize_entry_offset()));
370 __ Comment(
"Enter frame");
381const InstructionSource& PrologueSource() {
382 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
384 return prologue_source;
396 intptr_t args_desc_slot = -1;
402 __ Comment(
"Initialize spill slots");
403 if (num_locals > 1 || (num_locals == 1 && args_desc_slot == -1)) {
404 const compiler::Immediate& raw_null =
405 compiler::Immediate(
static_cast<intptr_t
>(
Object::null()));
406 __ movl(
EAX, raw_null);
408 for (intptr_t
i = 0;
i < num_locals; ++
i) {
409 const intptr_t slot_index =
412 __ movl(compiler::Address(
EBP, slot_index *
kWordSize), value_reg);
419 const intptr_t slot_index =
422 __ LoadObject(
EAX, Object::null_object());
432 if (stub.InVMIsolateHeap()) {
450 const InstructionSource&
source,
453 LocationSummary* locs,
456 __ Call(stub,
false, entry_kind);
458 pending_deoptimization_env_);
462 const InstructionSource&
source,
464 LocationSummary* locs,
468 const auto& stub = StubCode::CallStaticFunction();
469 __ Call(stub,
true, entry_kind);
471 pending_deoptimization_env_);
472 AddStaticCallTarget(
target, entry_kind);
475void FlowGraphCompiler::EmitUnoptimizedStaticCall(
476 intptr_t size_with_type_args,
478 const InstructionSource&
source,
479 LocationSummary* locs,
480 const ICData& ic_data,
485 __ LoadObject(
ECX, ic_data);
487 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
498 __ Comment(
"Edge counter");
499 __ LoadObject(
EAX, edge_counters_array_);
500 __ IncrementSmiField(
506 const ICData& ic_data,
508 const InstructionSource&
source,
509 LocationSummary* locs,
521 __ movl(
EBX, compiler::Address(
530 const ICData& ic_data,
532 const InstructionSource&
source,
533 LocationSummary* locs,
537 entry_kind == Code::EntryKind::kUnchecked);
540 __ movl(
EBX, compiler::Address(
544 const intptr_t entry_point_offset =
550 pending_deoptimization_env_);
556 const Array& arguments_descriptor,
558 const InstructionSource&
source,
559 LocationSummary* locs) {
561 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
562 ASSERT(!FLAG_precompiled_mode);
563 const ArgumentsDescriptor args_desc(arguments_descriptor);
568 __ Comment(
"MegamorphicCall");
572 __ LoadObject(
CODE_REG, StubCode::MegamorphicCall(),
true);
573 __ call(compiler::FieldAddress(
580 ASSERT(!FLAG_precompiled_mode);
594 const InstructionSource&
source,
595 LocationSummary* locs,
597 bool receiver_can_be_smi) {
604 const Array& arguments_descriptor,
605 intptr_t size_with_type_args,
607 const InstructionSource&
source,
608 LocationSummary* locs,
624 int32_t selector_offset,
625 const Array& arguments_descriptor) {
633 bool needs_number_check,
634 const InstructionSource&
source,
636 ASSERT(!needs_number_check || (!obj.IsMint() && !obj.IsDouble()));
638 if (obj.IsSmi() && (Smi::Cast(obj).Value() == 0)) {
639 ASSERT(!needs_number_check);
644 if (needs_number_check) {
648 __ Call(StubCode::OptimizedIdenticalWithNumberCheck());
650 __ Call(StubCode::UnoptimizedIdenticalWithNumberCheck());
657 __ CompareObject(reg, obj);
665 bool needs_number_check,
666 const InstructionSource&
source,
668 if (needs_number_check) {
672 __ Call(StubCode::OptimizedIdenticalWithNumberCheck());
674 __ Call(StubCode::UnoptimizedIdenticalWithNumberCheck());
681 __ cmpl(left, right);
689 __ Comment(
"BoolTest");
690 __ testl(
value, compiler::Immediate(
699 locs->CheckWritableInputs();
700 ClobberDeadTempRegisters(locs);
704 const intptr_t xmm_regs_count = locs->live_registers()->FpuRegisterCount();
705 if (xmm_regs_count > 0) {
712 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
713 __ movups(compiler::Address(
ESP,
offset), xmm_reg);
724 if (locs->live_registers()->ContainsRegister(reg)) {
733 if (locs->live_registers()->ContainsRegister(reg)) {
738 const intptr_t xmm_regs_count = locs->live_registers()->FpuRegisterCount();
739 if (xmm_regs_count > 0) {
744 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
745 __ movups(xmm_reg, compiler::Address(
ESP,
offset));
755void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
757 for (intptr_t
i = 0;
i < locs->temp_count(); ++
i) {
760 if (tmp.IsRegister() &&
761 !locs->live_registers()->ContainsRegister(tmp.reg())) {
762 __ movl(tmp.reg(), compiler::Immediate(0xf7));
768Register FlowGraphCompiler::EmitTestCidRegister() {
772void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
773 intptr_t count_without_type_args,
774 const Array& arguments_descriptor) {
775 __ Comment(
"EmitTestAndCall");
778 compiler::Address(
ESP, (count_without_type_args - 1) *
kWordSize));
782void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
789void FlowGraphCompiler::EmitTestAndCallLoadCid(
Register class_id_reg) {
791 __ LoadClassId(class_id_reg,
EAX);
796 TemporaryRegisterAllocator* tmp) {
797 if (destination.Equals(
source))
return;
799 if (
source.IsRegister()) {
800 if (destination.IsRegister()) {
801 __ movl(destination.reg(),
source.reg());
803 ASSERT(destination.IsStackSlot());
806 }
else if (
source.IsStackSlot()) {
807 if (destination.IsRegister()) {
809 }
else if (destination.IsFpuRegister()) {
813 ASSERT(destination.IsStackSlot());
814 Register scratch = tmp->AllocateTemporary();
817 tmp->ReleaseTemporary();
819 }
else if (
source.IsFpuRegister()) {
820 if (destination.IsFpuRegister()) {
823 __ movaps(destination.fpu_reg(),
source.fpu_reg());
825 if (destination.IsDoubleStackSlot()) {
827 }
else if (destination.IsStackSlot()) {
831 ASSERT(destination.IsQuadStackSlot());
835 }
else if (
source.IsDoubleStackSlot()) {
836 if (destination.IsFpuRegister()) {
838 }
else if (destination.IsStackSlot()) {
843 ASSERT(destination.IsDoubleStackSlot());
847 }
else if (
source.IsQuadStackSlot()) {
848 if (destination.IsFpuRegister()) {
851 ASSERT(destination.IsQuadStackSlot());
855 }
else if (
source.IsPairLocation()) {
856 ASSERT(destination.IsPairLocation());
857 for (intptr_t
i : {0, 1}) {
862 source.constant_instruction()->EmitMoveToLocation(
867void FlowGraphCompiler::EmitNativeMoveArchitecture(
868 const compiler::ffi::NativeLocation& destination,
869 const compiler::ffi::NativeLocation&
source) {
870 const auto& src_type =
source.payload_type();
871 const auto& dst_type = destination.payload_type();
872 ASSERT(src_type.IsFloat() == dst_type.IsFloat());
873 ASSERT(src_type.IsInt() == dst_type.IsInt());
874 ASSERT(src_type.IsSigned() == dst_type.IsSigned());
875 ASSERT(src_type.IsPrimitive());
876 ASSERT(dst_type.IsPrimitive());
877 const intptr_t src_size = src_type.SizeInBytes();
878 const intptr_t dst_size = dst_type.SizeInBytes();
879 const bool sign_or_zero_extend = dst_size > src_size;
881 if (
source.IsRegisters()) {
885 const auto src_reg =
src.reg_at(0);
887 if (destination.IsRegisters()) {
888 const auto&
dst = destination.AsRegisters();
890 const auto dst_reg =
dst.reg_at(0);
891 ASSERT(destination.container_type().SizeInBytes() <= 4);
892 if (!sign_or_zero_extend) {
893 __ MoveRegister(dst_reg, src_reg);
895 switch (src_type.AsPrimitive().representation()) {
903 __ MoveRegister(dst_reg, src_reg);
904 __ shll(dst_reg, compiler::Immediate(8));
905 __ sarl(dst_reg, compiler::Immediate(8));
914 __ MoveRegister(dst_reg, src_reg);
915 __ shll(dst_reg, compiler::Immediate(8));
916 __ shrl(dst_reg, compiler::Immediate(8));
924 }
else if (destination.IsFpuRegisters()) {
929 ASSERT(destination.IsStack());
930 ASSERT(!sign_or_zero_extend);
931 const auto&
dst = destination.AsStack();
933 switch (destination.container_type().SizeInBytes()) {
935 __ movl(dst_addr, src_reg);
938 __ movw(dst_addr, src_reg);
948 }
else if (
source.IsFpuRegisters()) {
949 const auto&
src =
source.AsFpuRegisters();
951 ASSERT(src_type.Equals(dst_type));
953 if (destination.IsRegisters()) {
957 }
else if (destination.IsFpuRegisters()) {
958 const auto&
dst = destination.AsFpuRegisters();
961 __ movaps(
dst.fpu_reg(),
src.fpu_reg());
964 ASSERT(destination.IsStack());
965 ASSERT(src_type.IsFloat());
966 const auto&
dst = destination.AsStack();
970 __ movsd(dst_addr,
src.fpu_reg());
973 __ movss(dst_addr,
src.fpu_reg());
984 if (destination.IsRegisters()) {
985 const auto&
dst = destination.AsRegisters();
988 const auto dst_reg =
dst.reg_at(0);
989 if (!sign_or_zero_extend) {
991 __ movl(dst_reg, src_addr);
993 switch (src_type.AsPrimitive().representation()) {
995 __ movsxb(dst_reg, src_addr);
998 __ movsxw(dst_reg, src_addr);
1001 __ movzxb(dst_reg, src_addr);
1004 __ movzxw(dst_reg, src_addr);
1012 }
else if (destination.IsFpuRegisters()) {
1013 ASSERT(src_type.Equals(dst_type));
1014 ASSERT(src_type.IsFloat());
1015 const auto&
dst = destination.AsFpuRegisters();
1018 __ movsd(
dst.fpu_reg(), src_addr);
1021 __ movss(
dst.fpu_reg(), src_addr);
1028 ASSERT(destination.IsStack());
1035#define __ compiler_->assembler()->
1037void ParallelMoveEmitter::EmitSwap(
const MoveOperands& move) {
1039 const Location destination = move.dest();
1041 if (
source.IsRegister() && destination.IsRegister()) {
1042 __ xchgl(destination.reg(),
source.reg());
1043 }
else if (
source.IsRegister() && destination.IsStackSlot()) {
1045 }
else if (
source.IsStackSlot() && destination.IsRegister()) {
1047 }
else if (
source.IsStackSlot() && destination.IsStackSlot()) {
1050 }
else if (
source.IsFpuRegister() && destination.IsFpuRegister()) {
1052 __ movaps(
source.fpu_reg(), destination.fpu_reg());
1053 __ movaps(destination.fpu_reg(),
FpuTMP);
1054 }
else if (
source.IsFpuRegister() || destination.IsFpuRegister()) {
1055 ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() ||
1056 source.IsDoubleStackSlot() ||
source.IsQuadStackSlot());
1058 destination.IsDoubleStackSlot() ||
source.IsDoubleStackSlot();
1060 source.IsFpuRegister() ?
source.fpu_reg() : destination.fpu_reg();
1061 const compiler::Address& slot_address =
1067 __ movsd(slot_address, reg);
1070 __ movups(slot_address, reg);
1073 }
else if (
source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1074 const compiler::Address& source_slot_address =
1076 const compiler::Address& destination_slot_address =
1079 ScratchFpuRegisterScope ensure_scratch(
this,
FpuTMP);
1080 __ movsd(
FpuTMP, source_slot_address);
1081 __ movsd(ensure_scratch.reg(), destination_slot_address);
1082 __ movsd(destination_slot_address,
FpuTMP);
1083 __ movsd(source_slot_address, ensure_scratch.reg());
1084 }
else if (
source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1085 const compiler::Address& source_slot_address =
1087 const compiler::Address& destination_slot_address =
1090 ScratchFpuRegisterScope ensure_scratch(
this,
FpuTMP);
1091 __ movups(
FpuTMP, source_slot_address);
1092 __ movups(ensure_scratch.reg(), destination_slot_address);
1093 __ movups(destination_slot_address,
FpuTMP);
1094 __ movups(source_slot_address, ensure_scratch.reg());
1100void ParallelMoveEmitter::MoveMemoryToMemory(
const compiler::Address&
dst,
1101 const compiler::Address&
src) {
1102 ScratchRegisterScope ensure_scratch(
this,
kNoRegister);
1103 __ MoveMemoryToMemory(
dst,
src, ensure_scratch.reg());
1106void ParallelMoveEmitter::Exchange(
Register reg,
const compiler::Address& mem) {
1107 ScratchRegisterScope ensure_scratch(
this, reg);
1108 __ movl(ensure_scratch.reg(), mem);
1110 __ movl(reg, ensure_scratch.reg());
1113void ParallelMoveEmitter::Exchange(
const compiler::Address& mem1,
1114 const compiler::Address& mem2) {
1115 ScratchRegisterScope ensure_scratch1(
this,
kNoRegister);
1116 ScratchRegisterScope ensure_scratch2(
this, ensure_scratch1.reg());
1117 __ movl(ensure_scratch1.reg(), mem1);
1118 __ movl(ensure_scratch2.reg(), mem2);
1119 __ movl(mem2, ensure_scratch1.reg());
1120 __ movl(mem1, ensure_scratch2.reg());
1123void ParallelMoveEmitter::Exchange(
Register reg,
1125 intptr_t stack_offset) {
1129void ParallelMoveEmitter::Exchange(
Register base_reg1,
1130 intptr_t stack_offset1,
1132 intptr_t stack_offset2) {
1136void ParallelMoveEmitter::SpillScratch(
Register reg) {
1140void ParallelMoveEmitter::RestoreScratch(
Register reg) {
1144void ParallelMoveEmitter::SpillFpuScratch(
FpuRegister reg) {
1146 __ movups(compiler::Address(
ESP, 0), reg);
1149void ParallelMoveEmitter::RestoreFpuScratch(
FpuRegister reg) {
1150 __ movups(reg, compiler::Address(
ESP, 0));
static void test_cache(skiatest::Reporter *reporter, SkResourceCache &cache, bool testPurge)
#define RELEASE_ASSERT(cond)
static intptr_t element_offset(intptr_t index)
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
const char * Name() const
compiler::Label * entry_label()
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
intptr_t deopt_id() const
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
static intptr_t ToDeoptAfter(intptr_t deopt_id)
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
bool CheckAssertAssignableTypeTestingABILocations(const LocationSummary &locs)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
bool is_optimizing() const
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
intptr_t StackSize() const
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EnterIntrinsicMode()
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
bool may_reoptimize() const
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void ArchSpecificInitialization()
void GenerateNonLazyDeoptableStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void GenerateAssertAssignable(CompileType *receiver_type, const InstructionSource &source, intptr_t deopt_id, Environment *env, const String &dst_name, LocationSummary *locs)
bool CanOptimizeFunction() const
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void GenerateStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, intptr_t deopt_id, Environment *env)
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
bool intrinsic_mode() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
bool IsOptimizable() const
static bool NullIsAssignableTo(const AbstractType &other)
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
static Object & ZoneHandle()
const Function & function() const
int num_stack_locals() const
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
static const Code & SubtypeTestCacheStubForUsedInputs(intptr_t i)
static SubtypeTestCachePtr New(intptr_t num_inputs)
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
Dart_NativeFunction function
compiler::Address NativeLocationToStackSlotAddress(const NativeStackLocation &loc)
const Register ARGS_DESC_REG
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
constexpr intptr_t kWordSize
const int kFpuRegisterSize
DECLARE_FLAG(bool, show_invisible_frames)
ByteRegister ByteRegisterOf(Register reg)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
static constexpr Register kDstNameReg
static constexpr Register kSubtypeTestReg
intptr_t FrameSlotForVariable(const LocalVariable *variable) const
intptr_t FrameSlotForVariableIndex(intptr_t index) const
static constexpr intptr_t kBoolValueMask
static constexpr Register kSubtypeTestCacheReg
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kSubtypeTestCacheResultReg