6#if defined(TARGET_ARCH_IA32)
29DEFINE_FLAG(
bool, trap_on_deoptimization,
false,
"Trap on deoptimization.");
38 for (
int i = 0; i < block_info_.
length(); ++i) {
39 ASSERT(!block_info_[i]->jump_label()->IsLinked());
40 ASSERT(!block_info_[i]->jump_label()->HasNear());
49 return FLAG_enable_simd_inline;
58 intrinsic_mode_ =
true;
63 intrinsic_mode_ =
false;
67 DeoptInfoBuilder* builder,
68 const Array& deopt_table) {
69 if (deopt_env_ ==
nullptr) {
70 ++
builder->current_info_number_;
74 AllocateOutgoingArguments(deopt_env_);
77 Environment* current = deopt_env_;
81 EmitMaterializations(deopt_env_, builder);
89 builder->AddCallerFp(slot_ix++);
95 slot_ix =
builder->EmitMaterializationArguments(slot_ix);
98 for (intptr_t i = current->Length() - 1;
99 i >= current->fixed_parameter_count(); i--) {
100 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
103 builder->AddPcMarker(current->function(), slot_ix++);
104 builder->AddCallerFp(slot_ix++);
106 Environment* previous = current;
107 current = current->outer();
108 while (current !=
nullptr) {
111 builder->AddReturnAddress(current->function(),
117 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
118 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
123 for (intptr_t i = current->Length() - 1;
124 i >= current->fixed_parameter_count(); i--) {
125 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
128 builder->AddPcMarker(current->function(), slot_ix++);
129 builder->AddCallerFp(slot_ix++);
133 current = current->outer();
136 ASSERT(previous !=
nullptr);
139 builder->AddCallerPc(slot_ix++);
142 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
143 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
146 return builder->CreateDeoptInfo(deopt_table);
153 compiler::Assembler* assembler =
compiler->assembler();
154#define __ assembler->
157 if (FLAG_trap_on_deoptimization) {
163 __ Call(StubCode::Deoptimize());
169#define __ assembler()->
172void FlowGraphCompiler::GenerateBoolToJump(
Register bool_register,
173 compiler::Label* is_true,
174 compiler::Label* is_false) {
175 const compiler::Immediate& raw_null =
176 compiler::Immediate(
static_cast<intptr_t
>(
Object::null()));
177 compiler::Label fall_through;
178 __ cmpl(bool_register, raw_null);
180 BranchLabels labels = {is_true, is_false, &fall_through};
184 __ j(true_condition, is_true);
186 __ Bind(&fall_through);
199SubtypeTestCachePtr FlowGraphCompiler::GenerateCallSubtypeTestStub(
200 TypeTestStubKind test_kind,
201 compiler::Label* is_instance_lbl,
202 compiler::Label* is_not_instance_lbl) {
203 const intptr_t num_inputs = UsedInputsForTTSKind(test_kind);
204 const SubtypeTestCache& type_test_cache =
206 const auto& stub_entry =
213 if (num_inputs >= 7) {
216 __ PushObject(Object::null_object());
218 if (num_inputs >= 3) {
221 __ PushObject(Object::null_object());
223 if (num_inputs >= 4) {
226 __ PushObject(Object::null_object());
235 "Code assumes cache and result register are the same");
242 is_not_instance_lbl);
243 return type_test_cache.ptr();
260 CompileType* receiver_type,
261 const InstructionSource&
source,
264 const String& dst_name,
265 LocationSummary* locs) {
269 const auto& dst_type =
271 ? AbstractType::Cast(
273 : Object::null_abstract_type();
275 if (!dst_type.IsNull()) {
276 ASSERT(dst_type.IsFinalized());
277 if (dst_type.IsTopTypeForSubtyping())
return;
280 compiler::Label is_assignable, runtime_call;
282 if (dst_type.IsNull()) {
283 __ Comment(
"AssertAssignable for runtime type");
286 StubCode::TypeIsTopTypeForSubtyping(),
287 UntaggedPcDescriptors::kOther, locs);
293 UntaggedPcDescriptors::kOther, locs);
300 test_cache = GenerateCallSubtypeTestStub(kTestTypeMaxArgs, &is_assignable,
303 __ Comment(
"AssertAssignable for compile-time type");
307 __ BranchIf(
EQUAL, &is_assignable);
315 __ Bind(&runtime_call);
323 "Expected AssertAssignable to have 4 inputs");
326 if (!dst_type.IsNull()) {
327 __ PushObject(dst_type);
339 UntaggedPcDescriptors::kOther, locs, deopt_id,
env);
344 __ Bind(&is_assignable);
349void FlowGraphCompiler::EmitFrameEntry() {
355 __ Comment(
"Invocation Count Check");
362 __ incl(compiler::FieldAddress(function_reg,
363 Function::usage_counter_offset()));
366 compiler::FieldAddress(function_reg, Function::usage_counter_offset()),
367 compiler::Immediate(GetOptimizationThreshold()));
369 compiler::Label dont_optimize;
371 __ jmp(compiler::Address(
THR, Thread::optimize_entry_offset()));
372 __ Bind(&dont_optimize);
374 __ Comment(
"Enter frame");
385const InstructionSource& PrologueSource() {
386 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
388 return prologue_source;
400 intptr_t args_desc_slot = -1;
402 args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
406 __ Comment(
"Initialize spill slots");
407 if (num_locals > 1 || (num_locals == 1 && args_desc_slot == -1)) {
408 const compiler::Immediate& raw_null =
409 compiler::Immediate(
static_cast<intptr_t
>(
Object::null()));
410 __ movl(
EAX, raw_null);
412 for (intptr_t i = 0; i < num_locals; ++i) {
413 const intptr_t slot_index =
414 compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
416 __ movl(compiler::Address(
EBP, slot_index *
kWordSize), value_reg);
423 const intptr_t slot_index =
424 compiler::target::frame_layout.FrameSlotForVariable(
426 __ LoadObject(
EAX, Object::null_object());
436 if (stub.InVMIsolateHeap()) {
449 compiler::target::Code::entry_point_offset()));
454 const InstructionSource&
source,
457 LocationSummary* locs,
460 __ Call(stub,
false, entry_kind);
462 pending_deoptimization_env_);
466 const InstructionSource&
source,
468 LocationSummary* locs,
472 const auto& stub = StubCode::CallStaticFunction();
473 __ Call(stub,
true, entry_kind);
475 pending_deoptimization_env_);
476 AddStaticCallTarget(
target, entry_kind);
479void FlowGraphCompiler::EmitUnoptimizedStaticCall(
480 intptr_t size_with_type_args,
482 const InstructionSource&
source,
483 LocationSummary* locs,
484 const ICData& ic_data,
489 __ LoadObject(
ECX, ic_data);
491 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
502 __ Comment(
"Edge counter");
503 __ LoadObject(
EAX, edge_counters_array_);
504 __ IncrementSmiField(
510 const ICData& ic_data,
512 const InstructionSource&
source,
513 LocationSummary* locs,
525 __ movl(
EBX, compiler::Address(
534 const ICData& ic_data,
536 const InstructionSource&
source,
537 LocationSummary* locs,
540 ASSERT(entry_kind == Code::EntryKind::kNormal ||
541 entry_kind == Code::EntryKind::kUnchecked);
544 __ movl(
EBX, compiler::Address(
548 const intptr_t entry_point_offset =
549 entry_kind == Code::EntryKind::kNormal
554 pending_deoptimization_env_);
560 const Array& arguments_descriptor,
562 const InstructionSource&
source,
563 LocationSummary* locs) {
565 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
566 ASSERT(!FLAG_precompiled_mode);
567 const ArgumentsDescriptor args_desc(arguments_descriptor);
572 __ Comment(
"MegamorphicCall");
576 __ LoadObject(
CODE_REG, StubCode::MegamorphicCall(),
true);
577 __ call(compiler::FieldAddress(
584 ASSERT(!FLAG_precompiled_mode);
598 const InstructionSource&
source,
599 LocationSummary* locs,
601 bool receiver_can_be_smi) {
608 const Array& arguments_descriptor,
609 intptr_t size_with_type_args,
611 const InstructionSource&
source,
612 LocationSummary* locs,
628 int32_t selector_offset,
629 const Array& arguments_descriptor) {
637 bool needs_number_check,
638 const InstructionSource&
source,
640 ASSERT(!needs_number_check || (!obj.IsMint() && !obj.IsDouble()));
642 if (obj.IsSmi() && (Smi::Cast(obj).Value() == 0)) {
643 ASSERT(!needs_number_check);
648 if (needs_number_check) {
652 __ Call(StubCode::OptimizedIdenticalWithNumberCheck());
654 __ Call(StubCode::UnoptimizedIdenticalWithNumberCheck());
661 __ CompareObject(reg, obj);
669 bool needs_number_check,
670 const InstructionSource&
source,
672 if (needs_number_check) {
676 __ Call(StubCode::OptimizedIdenticalWithNumberCheck());
678 __ Call(StubCode::UnoptimizedIdenticalWithNumberCheck());
693 __ Comment(
"BoolTest");
694 __ testl(value, compiler::Immediate(
703 locs->CheckWritableInputs();
704 ClobberDeadTempRegisters(locs);
708 const intptr_t xmm_regs_count = locs->live_registers()->FpuRegisterCount();
709 if (xmm_regs_count > 0) {
716 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
717 __ movups(compiler::Address(
ESP,
offset), xmm_reg);
728 if (locs->live_registers()->ContainsRegister(reg)) {
737 if (locs->live_registers()->ContainsRegister(reg)) {
742 const intptr_t xmm_regs_count = locs->live_registers()->FpuRegisterCount();
743 if (xmm_regs_count > 0) {
748 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
749 __ movups(xmm_reg, compiler::Address(
ESP,
offset));
759void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
761 for (intptr_t i = 0; i < locs->temp_count(); ++i) {
764 if (tmp.IsRegister() &&
765 !locs->live_registers()->ContainsRegister(tmp.reg())) {
766 __ movl(tmp.reg(), compiler::Immediate(0xf7));
772Register FlowGraphCompiler::EmitTestCidRegister() {
776void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
777 intptr_t count_without_type_args,
778 const Array& arguments_descriptor) {
779 __ Comment(
"EmitTestAndCall");
782 compiler::Address(
ESP, (count_without_type_args - 1) *
kWordSize));
786void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
793void FlowGraphCompiler::EmitTestAndCallLoadCid(
Register class_id_reg) {
795 __ LoadClassId(class_id_reg,
EAX);
800 TemporaryRegisterAllocator* tmp) {
801 if (destination.Equals(
source))
return;
803 if (
source.IsRegister()) {
804 if (destination.IsRegister()) {
805 __ movl(destination.reg(),
source.reg());
807 ASSERT(destination.IsStackSlot());
810 }
else if (
source.IsStackSlot()) {
811 if (destination.IsRegister()) {
813 }
else if (destination.IsFpuRegister()) {
817 ASSERT(destination.IsStackSlot());
818 Register scratch = tmp->AllocateTemporary();
821 tmp->ReleaseTemporary();
823 }
else if (
source.IsFpuRegister()) {
824 if (destination.IsFpuRegister()) {
827 __ movaps(destination.fpu_reg(),
source.fpu_reg());
829 if (destination.IsDoubleStackSlot()) {
831 }
else if (destination.IsStackSlot()) {
835 ASSERT(destination.IsQuadStackSlot());
839 }
else if (
source.IsDoubleStackSlot()) {
840 if (destination.IsFpuRegister()) {
842 }
else if (destination.IsStackSlot()) {
847 ASSERT(destination.IsDoubleStackSlot());
851 }
else if (
source.IsQuadStackSlot()) {
852 if (destination.IsFpuRegister()) {
855 ASSERT(destination.IsQuadStackSlot());
859 }
else if (
source.IsPairLocation()) {
860 ASSERT(destination.IsPairLocation());
861 for (intptr_t i : {0, 1}) {
866 source.constant_instruction()->EmitMoveToLocation(
871void FlowGraphCompiler::EmitNativeMoveArchitecture(
872 const compiler::ffi::NativeLocation& destination,
873 const compiler::ffi::NativeLocation&
source) {
874 const auto& src_type =
source.payload_type();
875 const auto& dst_type = destination.payload_type();
876 ASSERT(src_type.IsFloat() == dst_type.IsFloat());
877 ASSERT(src_type.IsInt() == dst_type.IsInt());
878 ASSERT(src_type.IsSigned() == dst_type.IsSigned());
879 ASSERT(src_type.IsPrimitive());
880 ASSERT(dst_type.IsPrimitive());
881 const intptr_t src_size = src_type.SizeInBytes();
882 const intptr_t dst_size = dst_type.SizeInBytes();
883 const bool sign_or_zero_extend = dst_size > src_size;
885 if (
source.IsRegisters()) {
889 const auto src_reg =
src.reg_at(0);
891 if (destination.IsRegisters()) {
892 const auto&
dst = destination.AsRegisters();
894 const auto dst_reg =
dst.reg_at(0);
895 ASSERT(destination.container_type().SizeInBytes() <= 4);
896 if (!sign_or_zero_extend) {
897 __ MoveRegister(dst_reg, src_reg);
899 switch (src_type.AsPrimitive().representation()) {
907 __ MoveRegister(dst_reg, src_reg);
908 __ shll(dst_reg, compiler::Immediate(8));
909 __ sarl(dst_reg, compiler::Immediate(8));
918 __ MoveRegister(dst_reg, src_reg);
919 __ shll(dst_reg, compiler::Immediate(8));
920 __ shrl(dst_reg, compiler::Immediate(8));
928 }
else if (destination.IsFpuRegisters()) {
933 ASSERT(destination.IsStack());
934 ASSERT(!sign_or_zero_extend);
935 const auto&
dst = destination.AsStack();
937 switch (destination.container_type().SizeInBytes()) {
939 __ movl(dst_addr, src_reg);
942 __ movw(dst_addr, src_reg);
952 }
else if (
source.IsFpuRegisters()) {
953 const auto&
src =
source.AsFpuRegisters();
955 ASSERT(src_type.Equals(dst_type));
957 if (destination.IsRegisters()) {
961 }
else if (destination.IsFpuRegisters()) {
962 const auto&
dst = destination.AsFpuRegisters();
965 __ movaps(
dst.fpu_reg(),
src.fpu_reg());
968 ASSERT(destination.IsStack());
969 ASSERT(src_type.IsFloat());
970 const auto&
dst = destination.AsStack();
974 __ movsd(dst_addr,
src.fpu_reg());
977 __ movss(dst_addr,
src.fpu_reg());
988 if (destination.IsRegisters()) {
989 const auto&
dst = destination.AsRegisters();
992 const auto dst_reg =
dst.reg_at(0);
993 if (!sign_or_zero_extend) {
995 __ movl(dst_reg, src_addr);
997 switch (src_type.AsPrimitive().representation()) {
999 __ movsxb(dst_reg, src_addr);
1002 __ movsxw(dst_reg, src_addr);
1005 __ movzxb(dst_reg, src_addr);
1008 __ movzxw(dst_reg, src_addr);
1016 }
else if (destination.IsFpuRegisters()) {
1017 ASSERT(src_type.Equals(dst_type));
1018 ASSERT(src_type.IsFloat());
1019 const auto&
dst = destination.AsFpuRegisters();
1022 __ movsd(
dst.fpu_reg(), src_addr);
1025 __ movss(
dst.fpu_reg(), src_addr);
1032 ASSERT(destination.IsStack());
1039#define __ compiler_->assembler()->
1041void ParallelMoveEmitter::EmitSwap(
const MoveOperands& move) {
1043 const Location destination = move.dest();
1045 if (
source.IsRegister() && destination.IsRegister()) {
1046 __ xchgl(destination.reg(),
source.reg());
1047 }
else if (
source.IsRegister() && destination.IsStackSlot()) {
1049 }
else if (
source.IsStackSlot() && destination.IsRegister()) {
1051 }
else if (
source.IsStackSlot() && destination.IsStackSlot()) {
1054 }
else if (
source.IsFpuRegister() && destination.IsFpuRegister()) {
1056 __ movaps(
source.fpu_reg(), destination.fpu_reg());
1057 __ movaps(destination.fpu_reg(),
FpuTMP);
1058 }
else if (
source.IsFpuRegister() || destination.IsFpuRegister()) {
1059 ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() ||
1060 source.IsDoubleStackSlot() ||
source.IsQuadStackSlot());
1062 destination.IsDoubleStackSlot() ||
source.IsDoubleStackSlot();
1064 source.IsFpuRegister() ?
source.fpu_reg() : destination.fpu_reg();
1065 const compiler::Address& slot_address =
1071 __ movsd(slot_address, reg);
1074 __ movups(slot_address, reg);
1077 }
else if (
source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1078 const compiler::Address& source_slot_address =
1080 const compiler::Address& destination_slot_address =
1083 ScratchFpuRegisterScope ensure_scratch(
this,
FpuTMP);
1084 __ movsd(
FpuTMP, source_slot_address);
1085 __ movsd(ensure_scratch.reg(), destination_slot_address);
1086 __ movsd(destination_slot_address,
FpuTMP);
1087 __ movsd(source_slot_address, ensure_scratch.reg());
1088 }
else if (
source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1089 const compiler::Address& source_slot_address =
1091 const compiler::Address& destination_slot_address =
1094 ScratchFpuRegisterScope ensure_scratch(
this,
FpuTMP);
1095 __ movups(
FpuTMP, source_slot_address);
1096 __ movups(ensure_scratch.reg(), destination_slot_address);
1097 __ movups(destination_slot_address,
FpuTMP);
1098 __ movups(source_slot_address, ensure_scratch.reg());
1104void ParallelMoveEmitter::MoveMemoryToMemory(
const compiler::Address& dst,
1105 const compiler::Address& src) {
1106 ScratchRegisterScope ensure_scratch(
this,
kNoRegister);
1107 __ MoveMemoryToMemory(dst, src, ensure_scratch.reg());
1110void ParallelMoveEmitter::Exchange(
Register reg,
const compiler::Address& mem) {
1111 ScratchRegisterScope ensure_scratch(
this, reg);
1112 __ movl(ensure_scratch.reg(), mem);
1114 __ movl(reg, ensure_scratch.reg());
1117void ParallelMoveEmitter::Exchange(
const compiler::Address& mem1,
1118 const compiler::Address& mem2) {
1119 ScratchRegisterScope ensure_scratch1(
this,
kNoRegister);
1120 ScratchRegisterScope ensure_scratch2(
this, ensure_scratch1.reg());
1121 __ movl(ensure_scratch1.reg(), mem1);
1122 __ movl(ensure_scratch2.reg(), mem2);
1123 __ movl(mem2, ensure_scratch1.reg());
1124 __ movl(mem1, ensure_scratch2.reg());
1127void ParallelMoveEmitter::Exchange(
Register reg,
1129 intptr_t stack_offset) {
1133void ParallelMoveEmitter::Exchange(
Register base_reg1,
1134 intptr_t stack_offset1,
1136 intptr_t stack_offset2) {
1140void ParallelMoveEmitter::SpillScratch(
Register reg) {
1144void ParallelMoveEmitter::RestoreScratch(
Register reg) {
1148void ParallelMoveEmitter::SpillFpuScratch(
FpuRegister reg) {
1150 __ movups(compiler::Address(
ESP, 0), reg);
1153void ParallelMoveEmitter::RestoreFpuScratch(
FpuRegister reg) {
1154 __ movups(reg, compiler::Address(
ESP, 0));
static void test_cache(skiatest::Reporter *reporter, SkResourceCache &cache, bool testPurge)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
#define RELEASE_ASSERT(cond)
static intptr_t element_offset(intptr_t index)
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
const char * Name() const
compiler::Label * entry_label()
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
intptr_t deopt_id() const
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
static intptr_t ToDeoptAfter(intptr_t deopt_id)
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
bool CheckAssertAssignableTypeTestingABILocations(const LocationSummary &locs)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
bool is_optimizing() const
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
intptr_t StackSize() const
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EnterIntrinsicMode()
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static bool SupportsUnboxedDoubles()
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
bool may_reoptimize() const
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void ArchSpecificInitialization()
void GenerateNonLazyDeoptableStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void GenerateAssertAssignable(CompileType *receiver_type, const InstructionSource &source, intptr_t deopt_id, Environment *env, const String &dst_name, LocationSummary *locs)
bool CanOptimizeFunction() const
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void GenerateStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, intptr_t deopt_id, Environment *env)
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
bool intrinsic_mode() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
bool IsOptimizable() const
static bool NullIsAssignableTo(const AbstractType &other)
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
static Object & ZoneHandle()
const Function & function() const
int num_stack_locals() const
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
static const Code & SubtypeTestCacheStubForUsedInputs(intptr_t i)
static SubtypeTestCachePtr New(intptr_t num_inputs)
#define DECLARE_FLAG(type, name)
#define DEFINE_FLAG(type, name, default_value, comment)
Dart_NativeFunction function
compiler::Address NativeLocationToStackSlotAddress(const NativeStackLocation &loc)
const Register ARGS_DESC_REG
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
constexpr intptr_t kWordSize
const int kFpuRegisterSize
ByteRegister ByteRegisterOf(Register reg)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
static constexpr Register kDstNameReg
static constexpr Register kSubtypeTestReg
static constexpr intptr_t kBoolValueMask
static constexpr Register kSubtypeTestCacheReg
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kSubtypeTestCacheResultReg