6#if defined(TARGET_ARCH_X64)
28DEFINE_FLAG(
bool, trap_on_deoptimization,
false,
"Trap on deoptimization.");
32 if (FLAG_precompiled_mode) {
37 if (CanPcRelativeCall(stub)) {
38 assembler_->generate_invoke_write_barrier_wrapper_ = [&](
Register reg) {
39 const intptr_t offset_into_target =
42 AddPcRelativeCallStubTarget(stub);
46 const auto& array_stub =
48 if (CanPcRelativeCall(stub)) {
49 assembler_->generate_invoke_array_write_barrier_ = [&]() {
51 AddPcRelativeCallStubTarget(array_stub);
60 for (
int i = 0;
i < block_info_.
length(); ++
i) {
61 ASSERT(!block_info_[
i]->jump_label()->IsLinked());
62 ASSERT(!block_info_[
i]->jump_label()->HasNear());
67 return FLAG_enable_simd_inline;
76 intrinsic_mode_ =
true;
82 intrinsic_mode_ =
false;
87 const Array& deopt_table) {
88 if (deopt_env_ ==
nullptr) {
89 ++
builder->current_info_number_;
93 AllocateOutgoingArguments(deopt_env_);
96 Environment* current = deopt_env_;
100 EmitMaterializations(deopt_env_,
builder);
107 builder->AddPp(current->function(), slot_ix++);
109 builder->AddCallerFp(slot_ix++);
115 slot_ix =
builder->EmitMaterializationArguments(slot_ix);
118 for (intptr_t
i = current->Length() - 1;
119 i >= current->fixed_parameter_count();
i--) {
120 builder->AddCopy(current->ValueAt(
i), current->LocationAt(
i), slot_ix++);
123 Environment* previous = current;
124 current = current->outer();
125 while (current !=
nullptr) {
126 builder->AddPp(current->function(), slot_ix++);
127 builder->AddPcMarker(previous->function(), slot_ix++);
128 builder->AddCallerFp(slot_ix++);
132 builder->AddReturnAddress(current->function(),
138 for (intptr_t
i = previous->fixed_parameter_count() - 1;
i >= 0;
i--) {
139 builder->AddCopy(previous->ValueAt(
i), previous->LocationAt(
i),
144 for (intptr_t
i = current->Length() - 1;
145 i >= current->fixed_parameter_count();
i--) {
146 builder->AddCopy(current->ValueAt(
i), current->LocationAt(
i), slot_ix++);
151 current = current->outer();
154 ASSERT(previous !=
nullptr);
157 builder->AddCallerPp(slot_ix++);
158 builder->AddPcMarker(previous->function(), slot_ix++);
159 builder->AddCallerFp(slot_ix++);
160 builder->AddCallerPc(slot_ix++);
163 for (intptr_t
i = previous->fixed_parameter_count() - 1;
i >= 0;
i--) {
164 builder->AddCopy(previous->ValueAt(
i), previous->LocationAt(
i), slot_ix++);
167 return builder->CreateDeoptInfo(deopt_table);
174 compiler::Assembler* assembler =
compiler->assembler();
175#define __ assembler->
178 if (FLAG_trap_on_deoptimization) {
183 __ call(compiler::Address(
THR, Thread::deoptimize_entry_offset()));
189#define __ assembler->
194 intptr_t sub_type_cache_index) {
196 sub_type_cache_index);
197 __ Call(compiler::FieldAddress(
203#define __ assembler()->
207void FlowGraphCompiler::GenerateBoolToJump(
Register bool_register,
208 compiler::Label* is_true,
209 compiler::Label* is_false) {
210 compiler::Label fall_through;
211 __ CompareObject(bool_register, Object::null_object());
213 BranchLabels labels = {is_true, is_false, &fall_through};
217 __ j(true_condition, is_true);
224void FlowGraphCompiler::EmitFrameEntry() {
225 if (!
flow_graph().graph_entry()->NeedsFrame()) {
226 if (FLAG_precompiled_mode) {
240 __ Comment(
"Invocation Count Check");
242 __ movq(function_reg,
248 __ incl(compiler::FieldAddress(function_reg,
249 Function::usage_counter_offset()));
251 __ cmpl(compiler::FieldAddress(function_reg,
252 Function::usage_counter_offset()),
253 compiler::Immediate(GetOptimizationThreshold()));
255 compiler::Label dont_optimize;
257 __ jmp(compiler::Address(
THR, Thread::optimize_entry_offset()));
261 __ Comment(
"Enter frame");
266const InstructionSource& PrologueSource() {
267 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
269 return prologue_source;
282 intptr_t args_desc_slot = -1;
288 __ Comment(
"Initialize spill slots");
289 if (num_locals > 1 || (num_locals == 1 && args_desc_slot == -1)) {
290 __ LoadObject(
RAX, Object::null_object());
292 for (intptr_t
i = 0;
i < num_locals; ++
i) {
293 const intptr_t slot_index =
296 __ movq(compiler::Address(
RBP, slot_index *
kWordSize), value_reg);
303 const intptr_t slot_index =
306 __ LoadObject(
RAX, Object::null_object());
317 if (CanPcRelativeCall(stub)) {
318 __ GenerateUnRelocatedPcRelativeCall();
319 AddPcRelativeCallStubTarget(stub);
321 __ Call(stub, snapshot_behavior);
328 if (CanPcRelativeCall(stub)) {
329 __ GenerateUnRelocatedPcRelativeTailCall();
330 AddPcRelativeTailCallStubTarget(stub);
333 __ jmp(compiler::FieldAddress(
341 if (CanPcRelativeCall(stub)) {
342 if (
flow_graph().graph_entry()->NeedsFrame()) {
345 __ GenerateUnRelocatedPcRelativeTailCall();
346 AddPcRelativeTailCallStubTarget(stub);
352 if (
flow_graph().graph_entry()->NeedsFrame()) {
355 __ jmp(compiler::FieldAddress(
362 const InstructionSource&
source,
365 LocationSummary* locs,
369 pending_deoptimization_env_);
373 const InstructionSource&
source,
376 LocationSummary* locs,
379 __ CallPatchable(stub, entry_kind);
381 pending_deoptimization_env_);
385 const InstructionSource&
source,
387 LocationSummary* locs,
392 if (CanPcRelativeCall(
target)) {
393 __ GenerateUnRelocatedPcRelativeCall();
394 AddPcRelativeCallTarget(
target, entry_kind);
396 pending_deoptimization_env_);
402 const auto& stub_entry = StubCode::CallStaticFunction();
403 __ CallWithEquivalence(stub_entry,
target, entry_kind);
405 pending_deoptimization_env_);
406 AddStaticCallTarget(
target, entry_kind);
410void FlowGraphCompiler::EmitUnoptimizedStaticCall(
411 intptr_t size_with_type_args,
413 const InstructionSource&
source,
414 LocationSummary* locs,
415 const ICData& ic_data,
420 __ LoadObject(
RBX, ic_data);
422 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
434 __ Comment(
"Edge counter");
435 __ LoadObject(
RAX, edge_counters_array_);
436 __ IncrementCompressedSmiField(
442 const ICData& ic_data,
444 const InstructionSource&
source,
445 LocationSummary* locs,
457 __ movq(
RDX, compiler::Address(
466 const ICData& ic_data,
468 const InstructionSource&
source,
469 LocationSummary* locs,
473 entry_kind == Code::EntryKind::kUnchecked);
476 __ movq(
RDX, compiler::Address(
480 const intptr_t entry_point_offset =
486 pending_deoptimization_env_);
492 const Array& arguments_descriptor,
494 const InstructionSource&
source,
495 LocationSummary* locs) {
497 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
498 ASSERT(!FLAG_precompiled_mode);
499 const ArgumentsDescriptor args_desc(arguments_descriptor);
503 __ Comment(
"MegamorphicCall");
509 __ LoadUniqueObject(
CODE_REG, StubCode::MegamorphicCall());
510 __ call(compiler::FieldAddress(
529 const InstructionSource&
source,
530 LocationSummary* locs,
532 bool receiver_can_be_smi) {
535 entry_kind == Code::EntryKind::kUnchecked);
536 ASSERT(ic_data.NumArgsTested() == 1);
537 const Code& initial_stub = StubCode::SwitchableCallMiss();
538 const char* switchable_call_mode =
"smiable";
539 if (!receiver_can_be_smi) {
540 switchable_call_mode =
"non-smi";
541 ic_data.set_receiver_cannot_be_smi(
true);
543 const UnlinkedCall&
data =
546 __ Comment(
"InstanceCallAOT (%s)", switchable_call_mode);
547 __ movq(
RDX, compiler::Address(
551 const auto snapshot_behavior =
553 __ LoadUniqueObject(
RCX, initial_stub, snapshot_behavior);
558 pending_deoptimization_env_);
564 const Array& arguments_descriptor,
565 intptr_t size_with_type_args,
567 const InstructionSource&
source,
568 LocationSummary* locs,
575 if (!FLAG_precompiled_mode) {
588 int32_t selector_offset,
589 const Array& arguments_descriptor) {
593 ASSERT(cid_reg != table_reg);
595 if (!arguments_descriptor.IsNull()) {
600 __ LoadDispatchTable(table_reg);
607 bool needs_number_check,
608 const InstructionSource&
source,
610 ASSERT(!needs_number_check || (!obj.IsMint() && !obj.IsDouble()));
612 if (obj.IsSmi() && (Smi::Cast(obj).Value() == 0)) {
613 ASSERT(!needs_number_check);
618 if (needs_number_check) {
623 __ Call(StubCode::OptimizedIdenticalWithNumberCheck());
627 __ CallPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
635 __ CompareObject(reg, obj);
643 bool needs_number_check,
644 const InstructionSource&
source,
646 if (needs_number_check) {
650 __ Call(StubCode::OptimizedIdenticalWithNumberCheck());
652 __ CallPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
659 __ CompareObjectRegisters(left, right);
667 __ Comment(
"BoolTest");
668 __ testq(
value, compiler::Immediate(
677 locs->CheckWritableInputs();
678 ClobberDeadTempRegisters(locs);
682 __ PushRegisters(*locs->live_registers());
686 __ PopRegisters(*locs->live_registers());
690void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
692 for (intptr_t
i = 0;
i < locs->temp_count(); ++
i) {
695 if (tmp.IsRegister() &&
696 !locs->live_registers()->ContainsRegister(tmp.reg())) {
697 __ movq(tmp.reg(), compiler::Immediate(0xf7));
703Register FlowGraphCompiler::EmitTestCidRegister() {
707void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
708 intptr_t count_without_type_args,
709 const Array& arguments_descriptor) {
710 __ Comment(
"EmitTestAndCall");
713 compiler::Address(
RSP, (count_without_type_args - 1) *
kWordSize));
717void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
724void FlowGraphCompiler::EmitTestAndCallLoadCid(
Register class_id_reg) {
726 __ LoadClassId(class_id_reg,
RAX);
731 TemporaryRegisterAllocator* tmp) {
732 if (destination.Equals(
source))
return;
734 if (
source.IsRegister()) {
735 if (destination.IsRegister()) {
736 __ movq(destination.reg(),
source.reg());
738 ASSERT(destination.IsStackSlot());
741 }
else if (
source.IsStackSlot()) {
742 if (destination.IsRegister()) {
744 }
else if (destination.IsFpuRegister()) {
747 __ movq(destination.fpu_reg(),
TMP);
749 ASSERT(destination.IsStackSlot());
753 }
else if (
source.IsFpuRegister()) {
754 if (destination.IsFpuRegister()) {
757 __ movaps(destination.fpu_reg(),
source.fpu_reg());
759 if (destination.IsDoubleStackSlot()) {
762 ASSERT(destination.IsQuadStackSlot());
766 }
else if (
source.IsDoubleStackSlot()) {
767 if (destination.IsFpuRegister()) {
770 ASSERT(destination.IsDoubleStackSlot() ||
771 destination.IsStackSlot() );
775 }
else if (
source.IsQuadStackSlot()) {
776 if (destination.IsFpuRegister()) {
779 ASSERT(destination.IsQuadStackSlot());
786 source.constant_instruction()->EmitMoveToLocation(
this, destination);
790void FlowGraphCompiler::EmitNativeMoveArchitecture(
791 const compiler::ffi::NativeLocation& destination,
792 const compiler::ffi::NativeLocation&
source) {
793 const auto& src_type =
source.payload_type();
794 const auto& dst_type = destination.payload_type();
795 ASSERT(src_type.IsSigned() == dst_type.IsSigned());
796 ASSERT(src_type.IsPrimitive());
797 ASSERT(dst_type.IsPrimitive());
798 const intptr_t src_size = src_type.SizeInBytes();
799 const intptr_t dst_size = dst_type.SizeInBytes();
800 const bool sign_or_zero_extend = dst_size > src_size;
802 if (
source.IsRegisters()) {
805 const auto src_reg =
src.reg_at(0);
807 if (destination.IsRegisters()) {
808 const auto&
dst = destination.AsRegisters();
810 const auto dst_reg =
dst.reg_at(0);
811 ASSERT(destination.container_type().SizeInBytes() <= 8);
812 if (!sign_or_zero_extend) {
813 __ MoveRegister(dst_reg, src_reg);
816 switch (src_type.AsPrimitive().representation()) {
818 __ movsxb(dst_reg, src_reg);
821 __ movsxw(dst_reg, src_reg);
824 __ movsxd(dst_reg, src_reg);
830 __ MoveRegister(dst_reg, src_reg);
831 __ shlq(dst_reg, compiler::Immediate(64 - src_size *
kBitsPerByte));
832 __ sarq(dst_reg, compiler::Immediate(64 - src_size *
kBitsPerByte));
835 __ movzxb(dst_reg, src_reg);
838 __ movzxw(dst_reg, src_reg);
841 __ movl(dst_reg, src_reg);
847 __ MoveRegister(dst_reg, src_reg);
848 __ shlq(dst_reg, compiler::Immediate(64 - src_size *
kBitsPerByte));
849 __ shrq(dst_reg, compiler::Immediate(64 - src_size *
kBitsPerByte));
856 }
else if (destination.IsFpuRegisters()) {
857 const auto&
dst = destination.AsFpuRegisters();
858 ASSERT(src_size == dst_size);
861 __ movq(
dst.fpu_reg(), src_reg);
864 __ movd(
dst.fpu_reg(), src_reg);
871 ASSERT(destination.IsStack());
872 const auto&
dst = destination.AsStack();
874 ASSERT(!sign_or_zero_extend);
875 switch (destination.container_type().SizeInBytes()) {
877 __ movq(dst_addr, src_reg);
880 __ movl(dst_addr, src_reg);
883 __ movw(dst_addr, src_reg);
893 }
else if (
source.IsFpuRegisters()) {
894 const auto&
src =
source.AsFpuRegisters();
896 ASSERT(src_type.Equals(dst_type));
898 if (destination.IsRegisters()) {
899 ASSERT(src_size == dst_size);
900 const auto&
dst = destination.AsRegisters();
902 const auto dst_reg =
dst.reg_at(0);
905 __ movq(dst_reg,
src.fpu_reg());
908 __ movl(dst_reg,
src.fpu_reg());
914 }
else if (destination.IsFpuRegisters()) {
915 const auto&
dst = destination.AsFpuRegisters();
918 __ movaps(
dst.fpu_reg(),
src.fpu_reg());
921 ASSERT(destination.IsStack());
922 ASSERT(src_type.IsFloat());
923 const auto&
dst = destination.AsStack();
927 __ movsd(dst_addr,
src.fpu_reg());
930 __ movss(dst_addr,
src.fpu_reg());
941 if (destination.IsRegisters()) {
942 const auto&
dst = destination.AsRegisters();
944 const auto dst_reg =
dst.reg_at(0);
945 EmitNativeLoad(dst_reg,
src.base_register(),
src.offset_in_bytes(),
946 src_type.AsPrimitive().representation());
947 }
else if (destination.IsFpuRegisters()) {
948 ASSERT(src_type.Equals(dst_type));
949 ASSERT(src_type.IsFloat());
950 const auto&
dst = destination.AsFpuRegisters();
953 __ movsd(
dst.fpu_reg(), src_addr);
956 __ movss(
dst.fpu_reg(), src_addr);
963 ASSERT(destination.IsStack());
969void FlowGraphCompiler::EmitNativeLoad(
Register dst,
1002 __ shlq(
TMP, compiler::Immediate(16));
1008 __ shlq(
TMP, compiler::Immediate(16));
1014 __ shlq(
TMP, compiler::Immediate(32));
1020 __ shlq(
TMP, compiler::Immediate(32));
1026 __ shlq(
TMP, compiler::Immediate(32));
1032 __ shlq(
TMP, compiler::Immediate(32));
1038 __ shlq(
TMP, compiler::Immediate(32));
1041 __ shlq(
TMP, compiler::Immediate(48));
1047 __ shlq(
TMP, compiler::Immediate(32));
1050 __ shlq(
TMP, compiler::Immediate(48));
1061 compiler::Label skip_reloc;
1062 __ jmp(&skip_reloc);
1064 const intptr_t reloc_end =
__ CodeSize();
1067 const intptr_t kLeaqLength = 7;
1070 ASSERT((
__ CodeSize() - reloc_end) == kLeaqLength);
1073 __ movq(tmp, compiler::Address(
dst, 0));
1081 __ movq(
dst, compiler::Address(
dst, 0));
1085#define __ compiler_->assembler()->
1087void ParallelMoveEmitter::EmitSwap(
const MoveOperands& move) {
1089 const Location destination = move.dest();
1091 if (
source.IsRegister() && destination.IsRegister()) {
1092 __ xchgq(destination.reg(),
source.reg());
1093 }
else if (
source.IsRegister() && destination.IsStackSlot()) {
1095 }
else if (
source.IsStackSlot() && destination.IsRegister()) {
1097 }
else if (
source.IsStackSlot() && destination.IsStackSlot()) {
1100 }
else if (
source.IsFpuRegister() && destination.IsFpuRegister()) {
1102 __ movaps(
source.fpu_reg(), destination.fpu_reg());
1103 __ movaps(destination.fpu_reg(),
FpuTMP);
1104 }
else if (
source.IsFpuRegister() || destination.IsFpuRegister()) {
1105 ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() ||
1106 source.IsDoubleStackSlot() ||
source.IsQuadStackSlot());
1108 destination.IsDoubleStackSlot() ||
source.IsDoubleStackSlot();
1110 source.IsFpuRegister() ?
source.fpu_reg() : destination.fpu_reg();
1111 compiler::Address slot_address =
1117 __ movsd(slot_address, reg);
1120 __ movups(slot_address, reg);
1123 }
else if (
source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1124 const compiler::Address& source_slot_address =
1126 const compiler::Address& destination_slot_address =
1129 ScratchFpuRegisterScope ensure_scratch(
this,
FpuTMP);
1130 __ movsd(
FpuTMP, source_slot_address);
1131 __ movsd(ensure_scratch.reg(), destination_slot_address);
1132 __ movsd(destination_slot_address,
FpuTMP);
1133 __ movsd(source_slot_address, ensure_scratch.reg());
1134 }
else if (
source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1135 const compiler::Address& source_slot_address =
1137 const compiler::Address& destination_slot_address =
1140 ScratchFpuRegisterScope ensure_scratch(
this,
FpuTMP);
1141 __ movups(
FpuTMP, source_slot_address);
1142 __ movups(ensure_scratch.reg(), destination_slot_address);
1143 __ movups(destination_slot_address,
FpuTMP);
1144 __ movups(source_slot_address, ensure_scratch.reg());
1150void ParallelMoveEmitter::MoveMemoryToMemory(
const compiler::Address&
dst,
1151 const compiler::Address&
src) {
1155void ParallelMoveEmitter::Exchange(
Register reg,
const compiler::Address& mem) {
1156 __ Exchange(reg, mem);
1159void ParallelMoveEmitter::Exchange(
const compiler::Address& mem1,
1160 const compiler::Address& mem2) {
1161 __ Exchange(mem1, mem2);
1164void ParallelMoveEmitter::Exchange(
Register reg,
1166 intptr_t stack_offset) {
1170void ParallelMoveEmitter::Exchange(
Register base_reg1,
1171 intptr_t stack_offset1,
1173 intptr_t stack_offset2) {
1177void ParallelMoveEmitter::SpillScratch(
Register reg) {
1181void ParallelMoveEmitter::RestoreScratch(
Register reg) {
1185void ParallelMoveEmitter::SpillFpuScratch(
FpuRegister reg) {
1187 __ movups(compiler::Address(
RSP, 0), reg);
1190void ParallelMoveEmitter::RestoreFpuScratch(
FpuRegister reg) {
1191 __ movups(reg, compiler::Address(
RSP, 0));
static intptr_t element_offset(intptr_t index)
static intptr_t owner_offset()
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
const char * Name() const
compiler::Label * entry_label()
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
intptr_t deopt_id() const
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
static intptr_t ToDeoptAfter(intptr_t deopt_id)
static constexpr intptr_t kOriginElement
IsolateGroup * isolate_group() const
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitTailCallToStub(const Code &stub)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
bool is_optimizing() const
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void InsertBSSRelocation(BSS::Relocation reloc)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
intptr_t StackSize() const
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EnterIntrinsicMode()
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
bool may_reoptimize() const
void GeneratePatchableCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void ArchSpecificInitialization()
bool CanOptimizeFunction() const
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
compiler::Assembler * assembler() const
bool intrinsic_mode() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
bool IsClosureFunction() const
bool IsOptimizable() const
ObjectStore * object_store() const
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
static Object & ZoneHandle()
const Function & function() const
int num_stack_locals() const
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg)
static Address AddressRIPRelative(int32_t disp)
void set_constant_pool_allowed(bool b)
void GenerateUnRelocatedPcRelativeCall(Condition cond=AL, intptr_t offset_into_target=0)
bool constant_pool_allowed() const
static word type_test_stub_entry_point_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
Dart_NativeFunction function
compiler::Address NativeLocationToStackSlotAddress(const NativeStackLocation &loc)
static constexpr intptr_t kWordSize
constexpr intptr_t kBitsPerByte
const Register ARGS_DESC_REG
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
constexpr intptr_t kWordSize
static int8_t data[kExtLength]
const int kFpuRegisterSize
DECLARE_FLAG(bool, show_invisible_frames)
ByteRegister ByteRegisterOf(Register reg)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
static constexpr Register kClassIdReg
intptr_t FrameSlotForVariable(const LocalVariable *variable) const
intptr_t FrameSlotForVariableIndex(intptr_t index) const
static constexpr intptr_t kBoolValueMask
static constexpr Register kSubtypeTestCacheReg
@ kResetToSwitchableCallMissEntryPoint