6#if defined(TARGET_ARCH_X64)
28DEFINE_FLAG(
bool, trap_on_deoptimization,
false,
"Trap on deoptimization.");
32 if (FLAG_precompiled_mode) {
37 if (CanPcRelativeCall(stub)) {
38 assembler_->generate_invoke_write_barrier_wrapper_ = [&](
Register reg) {
39 const intptr_t offset_into_target =
42 AddPcRelativeCallStubTarget(stub);
46 const auto& array_stub =
48 if (CanPcRelativeCall(stub)) {
49 assembler_->generate_invoke_array_write_barrier_ = [&]() {
51 AddPcRelativeCallStubTarget(array_stub);
60 for (
int i = 0; i < block_info_.
length(); ++i) {
61 ASSERT(!block_info_[i]->jump_label()->IsLinked());
62 ASSERT(!block_info_[i]->jump_label()->HasNear());
71 return FLAG_enable_simd_inline;
80 intrinsic_mode_ =
true;
86 intrinsic_mode_ =
false;
90 DeoptInfoBuilder* builder,
91 const Array& deopt_table) {
92 if (deopt_env_ ==
nullptr) {
93 ++
builder->current_info_number_;
97 AllocateOutgoingArguments(deopt_env_);
100 Environment* current = deopt_env_;
104 EmitMaterializations(deopt_env_, builder);
111 builder->AddPp(current->function(), slot_ix++);
113 builder->AddCallerFp(slot_ix++);
119 slot_ix =
builder->EmitMaterializationArguments(slot_ix);
122 for (intptr_t i = current->Length() - 1;
123 i >= current->fixed_parameter_count(); i--) {
124 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
127 Environment* previous = current;
128 current = current->outer();
129 while (current !=
nullptr) {
130 builder->AddPp(current->function(), slot_ix++);
131 builder->AddPcMarker(previous->function(), slot_ix++);
132 builder->AddCallerFp(slot_ix++);
136 builder->AddReturnAddress(current->function(),
142 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
143 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
148 for (intptr_t i = current->Length() - 1;
149 i >= current->fixed_parameter_count(); i--) {
150 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
155 current = current->outer();
158 ASSERT(previous !=
nullptr);
161 builder->AddCallerPp(slot_ix++);
162 builder->AddPcMarker(previous->function(), slot_ix++);
163 builder->AddCallerFp(slot_ix++);
164 builder->AddCallerPc(slot_ix++);
167 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
168 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
171 return builder->CreateDeoptInfo(deopt_table);
178 compiler::Assembler* assembler =
compiler->assembler();
179#define __ assembler->
182 if (FLAG_trap_on_deoptimization) {
187 __ call(compiler::Address(
THR, Thread::deoptimize_entry_offset()));
193#define __ assembler->
198 intptr_t sub_type_cache_index) {
200 sub_type_cache_index);
201 __ Call(compiler::FieldAddress(
203 compiler::target::AbstractType::type_test_stub_entry_point_offset()));
207#define __ assembler()->
211void FlowGraphCompiler::GenerateBoolToJump(
Register bool_register,
212 compiler::Label* is_true,
213 compiler::Label* is_false) {
214 compiler::Label fall_through;
215 __ CompareObject(bool_register, Object::null_object());
217 BranchLabels labels = {is_true, is_false, &fall_through};
221 __ j(true_condition, is_true);
223 __ Bind(&fall_through);
228void FlowGraphCompiler::EmitFrameEntry() {
229 if (!
flow_graph().graph_entry()->NeedsFrame()) {
230 if (FLAG_precompiled_mode) {
244 __ Comment(
"Invocation Count Check");
246 __ movq(function_reg,
252 __ incl(compiler::FieldAddress(function_reg,
253 Function::usage_counter_offset()));
255 __ cmpl(compiler::FieldAddress(function_reg,
256 Function::usage_counter_offset()),
257 compiler::Immediate(GetOptimizationThreshold()));
259 compiler::Label dont_optimize;
261 __ jmp(compiler::Address(
THR, Thread::optimize_entry_offset()));
262 __ Bind(&dont_optimize);
265 __ Comment(
"Enter frame");
270const InstructionSource& PrologueSource() {
271 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
273 return prologue_source;
286 intptr_t args_desc_slot = -1;
288 args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
292 __ Comment(
"Initialize spill slots");
293 if (num_locals > 1 || (num_locals == 1 && args_desc_slot == -1)) {
294 __ LoadObject(
RAX, Object::null_object());
296 for (intptr_t i = 0; i < num_locals; ++i) {
297 const intptr_t slot_index =
298 compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
300 __ movq(compiler::Address(
RBP, slot_index *
kWordSize), value_reg);
307 const intptr_t slot_index =
308 compiler::target::frame_layout.FrameSlotForVariable(
310 __ LoadObject(
RAX, Object::null_object());
321 if (CanPcRelativeCall(stub)) {
322 __ GenerateUnRelocatedPcRelativeCall();
323 AddPcRelativeCallStubTarget(stub);
325 __ Call(stub, snapshot_behavior);
332 if (CanPcRelativeCall(stub)) {
333 __ GenerateUnRelocatedPcRelativeTailCall();
334 AddPcRelativeTailCallStubTarget(stub);
337 __ jmp(compiler::FieldAddress(
338 CODE_REG, compiler::target::Code::entry_point_offset()));
345 if (CanPcRelativeCall(stub)) {
346 if (
flow_graph().graph_entry()->NeedsFrame()) {
349 __ GenerateUnRelocatedPcRelativeTailCall();
350 AddPcRelativeTailCallStubTarget(stub);
356 if (
flow_graph().graph_entry()->NeedsFrame()) {
359 __ jmp(compiler::FieldAddress(
360 CODE_REG, compiler::target::Code::entry_point_offset()));
366 const InstructionSource&
source,
369 LocationSummary* locs,
373 pending_deoptimization_env_);
377 const InstructionSource&
source,
380 LocationSummary* locs,
383 __ CallPatchable(stub, entry_kind);
385 pending_deoptimization_env_);
389 const InstructionSource&
source,
391 LocationSummary* locs,
396 if (CanPcRelativeCall(
target)) {
397 __ GenerateUnRelocatedPcRelativeCall();
398 AddPcRelativeCallTarget(
target, entry_kind);
400 pending_deoptimization_env_);
406 const auto& stub_entry = StubCode::CallStaticFunction();
407 __ CallWithEquivalence(stub_entry,
target, entry_kind);
409 pending_deoptimization_env_);
410 AddStaticCallTarget(
target, entry_kind);
414void FlowGraphCompiler::EmitUnoptimizedStaticCall(
415 intptr_t size_with_type_args,
417 const InstructionSource&
source,
418 LocationSummary* locs,
419 const ICData& ic_data,
424 __ LoadObject(
RBX, ic_data);
426 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
438 __ Comment(
"Edge counter");
439 __ LoadObject(
RAX, edge_counters_array_);
440 __ IncrementCompressedSmiField(
446 const ICData& ic_data,
448 const InstructionSource&
source,
449 LocationSummary* locs,
461 __ movq(
RDX, compiler::Address(
470 const ICData& ic_data,
472 const InstructionSource&
source,
473 LocationSummary* locs,
476 ASSERT(entry_kind == Code::EntryKind::kNormal ||
477 entry_kind == Code::EntryKind::kUnchecked);
480 __ movq(
RDX, compiler::Address(
484 const intptr_t entry_point_offset =
485 entry_kind == Code::EntryKind::kNormal
490 pending_deoptimization_env_);
496 const Array& arguments_descriptor,
498 const InstructionSource&
source,
499 LocationSummary* locs) {
501 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
502 ASSERT(!FLAG_precompiled_mode);
503 const ArgumentsDescriptor args_desc(arguments_descriptor);
507 __ Comment(
"MegamorphicCall");
513 __ LoadUniqueObject(
CODE_REG, StubCode::MegamorphicCall());
514 __ call(compiler::FieldAddress(
533 const InstructionSource&
source,
534 LocationSummary* locs,
536 bool receiver_can_be_smi) {
538 ASSERT(entry_kind == Code::EntryKind::kNormal ||
539 entry_kind == Code::EntryKind::kUnchecked);
540 ASSERT(ic_data.NumArgsTested() == 1);
541 const Code& initial_stub = StubCode::SwitchableCallMiss();
542 const char* switchable_call_mode =
"smiable";
543 if (!receiver_can_be_smi) {
544 switchable_call_mode =
"non-smi";
545 ic_data.set_receiver_cannot_be_smi(
true);
547 const UnlinkedCall&
data =
550 __ Comment(
"InstanceCallAOT (%s)", switchable_call_mode);
551 __ movq(
RDX, compiler::Address(
553 if (FLAG_precompiled_mode) {
556 const auto snapshot_behavior =
558 __ LoadUniqueObject(
RCX, initial_stub, snapshot_behavior);
560 const intptr_t entry_point_offset =
561 entry_kind == Code::EntryKind::kNormal
565 __ movq(
RCX, compiler::FieldAddress(
CODE_REG, entry_point_offset));
571 pending_deoptimization_env_);
577 const Array& arguments_descriptor,
578 intptr_t size_with_type_args,
580 const InstructionSource&
source,
581 LocationSummary* locs,
588 if (!FLAG_precompiled_mode) {
601 int32_t selector_offset,
602 const Array& arguments_descriptor) {
606 ASSERT(cid_reg != table_reg);
608 if (!arguments_descriptor.IsNull()) {
612 compiler::target::kWordSize;
613 __ LoadDispatchTable(table_reg);
620 bool needs_number_check,
621 const InstructionSource&
source,
623 ASSERT(!needs_number_check || (!obj.IsMint() && !obj.IsDouble()));
625 if (obj.IsSmi() && (Smi::Cast(obj).Value() == 0)) {
626 ASSERT(!needs_number_check);
631 if (needs_number_check) {
636 __ Call(StubCode::OptimizedIdenticalWithNumberCheck());
640 __ CallPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
648 __ CompareObject(reg, obj);
656 bool needs_number_check,
657 const InstructionSource&
source,
659 if (needs_number_check) {
663 __ Call(StubCode::OptimizedIdenticalWithNumberCheck());
665 __ CallPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
680 __ Comment(
"BoolTest");
681 __ testq(value, compiler::Immediate(
690 locs->CheckWritableInputs();
691 ClobberDeadTempRegisters(locs);
695 __ PushRegisters(*locs->live_registers());
699 __ PopRegisters(*locs->live_registers());
703void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
705 for (intptr_t i = 0; i < locs->temp_count(); ++i) {
708 if (tmp.IsRegister() &&
709 !locs->live_registers()->ContainsRegister(tmp.reg())) {
710 __ movq(tmp.reg(), compiler::Immediate(0xf7));
716Register FlowGraphCompiler::EmitTestCidRegister() {
720void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
721 intptr_t count_without_type_args,
722 const Array& arguments_descriptor) {
723 __ Comment(
"EmitTestAndCall");
726 compiler::Address(
RSP, (count_without_type_args - 1) *
kWordSize));
730void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
737void FlowGraphCompiler::EmitTestAndCallLoadCid(
Register class_id_reg) {
739 __ LoadClassId(class_id_reg,
RAX);
744 TemporaryRegisterAllocator* tmp) {
745 if (destination.Equals(
source))
return;
747 if (
source.IsRegister()) {
748 if (destination.IsRegister()) {
749 __ movq(destination.reg(),
source.reg());
751 ASSERT(destination.IsStackSlot());
754 }
else if (
source.IsStackSlot()) {
755 if (destination.IsRegister()) {
757 }
else if (destination.IsFpuRegister()) {
760 __ movq(destination.fpu_reg(),
TMP);
762 ASSERT(destination.IsStackSlot());
766 }
else if (
source.IsFpuRegister()) {
767 if (destination.IsFpuRegister()) {
770 __ movaps(destination.fpu_reg(),
source.fpu_reg());
772 if (destination.IsDoubleStackSlot()) {
775 ASSERT(destination.IsQuadStackSlot());
779 }
else if (
source.IsDoubleStackSlot()) {
780 if (destination.IsFpuRegister()) {
783 ASSERT(destination.IsDoubleStackSlot() ||
784 destination.IsStackSlot() );
788 }
else if (
source.IsQuadStackSlot()) {
789 if (destination.IsFpuRegister()) {
792 ASSERT(destination.IsQuadStackSlot());
799 source.constant_instruction()->EmitMoveToLocation(
this, destination);
803void FlowGraphCompiler::EmitNativeMoveArchitecture(
804 const compiler::ffi::NativeLocation& destination,
805 const compiler::ffi::NativeLocation&
source) {
806 const auto& src_type =
source.payload_type();
807 const auto& dst_type = destination.payload_type();
808 ASSERT(src_type.IsSigned() == dst_type.IsSigned());
809 ASSERT(src_type.IsPrimitive());
810 ASSERT(dst_type.IsPrimitive());
811 const intptr_t src_size = src_type.SizeInBytes();
812 const intptr_t dst_size = dst_type.SizeInBytes();
813 const bool sign_or_zero_extend = dst_size > src_size;
815 if (
source.IsRegisters()) {
818 const auto src_reg =
src.reg_at(0);
820 if (destination.IsRegisters()) {
821 const auto&
dst = destination.AsRegisters();
823 const auto dst_reg =
dst.reg_at(0);
824 ASSERT(destination.container_type().SizeInBytes() <= 8);
825 if (!sign_or_zero_extend) {
826 __ MoveRegister(dst_reg, src_reg);
829 switch (src_type.AsPrimitive().representation()) {
831 __ movsxb(dst_reg, src_reg);
834 __ movsxw(dst_reg, src_reg);
837 __ movsxd(dst_reg, src_reg);
843 __ MoveRegister(dst_reg, src_reg);
844 __ shlq(dst_reg, compiler::Immediate(64 - src_size *
kBitsPerByte));
845 __ sarq(dst_reg, compiler::Immediate(64 - src_size *
kBitsPerByte));
848 __ movzxb(dst_reg, src_reg);
851 __ movzxw(dst_reg, src_reg);
854 __ movl(dst_reg, src_reg);
860 __ MoveRegister(dst_reg, src_reg);
861 __ shlq(dst_reg, compiler::Immediate(64 - src_size *
kBitsPerByte));
862 __ shrq(dst_reg, compiler::Immediate(64 - src_size *
kBitsPerByte));
869 }
else if (destination.IsFpuRegisters()) {
870 const auto&
dst = destination.AsFpuRegisters();
871 ASSERT(src_size == dst_size);
874 __ movq(
dst.fpu_reg(), src_reg);
877 __ movd(
dst.fpu_reg(), src_reg);
884 ASSERT(destination.IsStack());
885 const auto&
dst = destination.AsStack();
887 ASSERT(!sign_or_zero_extend);
888 switch (destination.container_type().SizeInBytes()) {
890 __ movq(dst_addr, src_reg);
893 __ movl(dst_addr, src_reg);
896 __ movw(dst_addr, src_reg);
906 }
else if (
source.IsFpuRegisters()) {
907 const auto&
src =
source.AsFpuRegisters();
909 ASSERT(src_type.Equals(dst_type));
911 if (destination.IsRegisters()) {
912 ASSERT(src_size == dst_size);
913 const auto&
dst = destination.AsRegisters();
915 const auto dst_reg =
dst.reg_at(0);
918 __ movq(dst_reg,
src.fpu_reg());
921 __ movl(dst_reg,
src.fpu_reg());
927 }
else if (destination.IsFpuRegisters()) {
928 const auto&
dst = destination.AsFpuRegisters();
931 __ movaps(
dst.fpu_reg(),
src.fpu_reg());
934 ASSERT(destination.IsStack());
935 ASSERT(src_type.IsFloat());
936 const auto&
dst = destination.AsStack();
940 __ movsd(dst_addr,
src.fpu_reg());
943 __ movss(dst_addr,
src.fpu_reg());
954 if (destination.IsRegisters()) {
955 const auto&
dst = destination.AsRegisters();
957 const auto dst_reg =
dst.reg_at(0);
958 EmitNativeLoad(dst_reg,
src.base_register(),
src.offset_in_bytes(),
959 src_type.AsPrimitive().representation());
960 }
else if (destination.IsFpuRegisters()) {
961 ASSERT(src_type.Equals(dst_type));
962 ASSERT(src_type.IsFloat());
963 const auto&
dst = destination.AsFpuRegisters();
966 __ movsd(
dst.fpu_reg(), src_addr);
969 __ movss(
dst.fpu_reg(), src_addr);
976 ASSERT(destination.IsStack());
982void FlowGraphCompiler::EmitNativeLoad(
Register dst,
1015 __ shlq(
TMP, compiler::Immediate(16));
1021 __ shlq(
TMP, compiler::Immediate(16));
1027 __ shlq(
TMP, compiler::Immediate(32));
1033 __ shlq(
TMP, compiler::Immediate(32));
1039 __ shlq(
TMP, compiler::Immediate(32));
1045 __ shlq(
TMP, compiler::Immediate(32));
1051 __ shlq(
TMP, compiler::Immediate(32));
1054 __ shlq(
TMP, compiler::Immediate(48));
1060 __ shlq(
TMP, compiler::Immediate(32));
1063 __ shlq(
TMP, compiler::Immediate(48));
1074 compiler::Label skip_reloc;
1075 __ jmp(&skip_reloc);
1077 const intptr_t reloc_end =
__ CodeSize();
1078 __ Bind(&skip_reloc);
1080 const intptr_t kLeaqLength = 7;
1082 -kLeaqLength - compiler::target::kWordSize));
1083 ASSERT((
__ CodeSize() - reloc_end) == kLeaqLength);
1086 __ movq(tmp, compiler::Address(dst, 0));
1094 __ movq(dst, compiler::Address(dst, 0));
1098#define __ compiler_->assembler()->
1100void ParallelMoveEmitter::EmitSwap(
const MoveOperands& move) {
1102 const Location destination = move.dest();
1104 if (
source.IsRegister() && destination.IsRegister()) {
1105 __ xchgq(destination.reg(),
source.reg());
1106 }
else if (
source.IsRegister() && destination.IsStackSlot()) {
1108 }
else if (
source.IsStackSlot() && destination.IsRegister()) {
1110 }
else if (
source.IsStackSlot() && destination.IsStackSlot()) {
1113 }
else if (
source.IsFpuRegister() && destination.IsFpuRegister()) {
1115 __ movaps(
source.fpu_reg(), destination.fpu_reg());
1116 __ movaps(destination.fpu_reg(),
FpuTMP);
1117 }
else if (
source.IsFpuRegister() || destination.IsFpuRegister()) {
1118 ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() ||
1119 source.IsDoubleStackSlot() ||
source.IsQuadStackSlot());
1121 destination.IsDoubleStackSlot() ||
source.IsDoubleStackSlot();
1123 source.IsFpuRegister() ?
source.fpu_reg() : destination.fpu_reg();
1124 compiler::Address slot_address =
1130 __ movsd(slot_address, reg);
1133 __ movups(slot_address, reg);
1136 }
else if (
source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1137 const compiler::Address& source_slot_address =
1139 const compiler::Address& destination_slot_address =
1142 ScratchFpuRegisterScope ensure_scratch(
this,
FpuTMP);
1143 __ movsd(
FpuTMP, source_slot_address);
1144 __ movsd(ensure_scratch.reg(), destination_slot_address);
1145 __ movsd(destination_slot_address,
FpuTMP);
1146 __ movsd(source_slot_address, ensure_scratch.reg());
1147 }
else if (
source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1148 const compiler::Address& source_slot_address =
1150 const compiler::Address& destination_slot_address =
1153 ScratchFpuRegisterScope ensure_scratch(
this,
FpuTMP);
1154 __ movups(
FpuTMP, source_slot_address);
1155 __ movups(ensure_scratch.reg(), destination_slot_address);
1156 __ movups(destination_slot_address,
FpuTMP);
1157 __ movups(source_slot_address, ensure_scratch.reg());
1163void ParallelMoveEmitter::MoveMemoryToMemory(
const compiler::Address& dst,
1164 const compiler::Address& src) {
1165 __ MoveMemoryToMemory(dst, src);
1168void ParallelMoveEmitter::Exchange(
Register reg,
const compiler::Address& mem) {
1169 __ Exchange(reg, mem);
1172void ParallelMoveEmitter::Exchange(
const compiler::Address& mem1,
1173 const compiler::Address& mem2) {
1174 __ Exchange(mem1, mem2);
1177void ParallelMoveEmitter::Exchange(
Register reg,
1179 intptr_t stack_offset) {
1183void ParallelMoveEmitter::Exchange(
Register base_reg1,
1184 intptr_t stack_offset1,
1186 intptr_t stack_offset2) {
1190void ParallelMoveEmitter::SpillScratch(
Register reg) {
1194void ParallelMoveEmitter::RestoreScratch(
Register reg) {
1198void ParallelMoveEmitter::SpillFpuScratch(
FpuRegister reg) {
1200 __ movups(compiler::Address(
RSP, 0), reg);
1203void ParallelMoveEmitter::RestoreFpuScratch(
FpuRegister reg) {
1204 __ movups(reg, compiler::Address(
RSP, 0));
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
static intptr_t element_offset(intptr_t index)
static intptr_t owner_offset()
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
const char * Name() const
compiler::Label * entry_label()
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
intptr_t deopt_id() const
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
static intptr_t ToDeoptAfter(intptr_t deopt_id)
static constexpr intptr_t kOriginElement
IsolateGroup * isolate_group() const
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitTailCallToStub(const Code &stub)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
bool is_optimizing() const
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void InsertBSSRelocation(BSS::Relocation reloc)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
intptr_t StackSize() const
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EnterIntrinsicMode()
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static bool SupportsUnboxedDoubles()
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
bool may_reoptimize() const
void GeneratePatchableCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void ArchSpecificInitialization()
bool CanOptimizeFunction() const
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
compiler::Assembler * assembler() const
bool intrinsic_mode() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
bool IsClosureFunction() const
bool IsOptimizable() const
ObjectStore * object_store() const
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
static Object & ZoneHandle()
const Function & function() const
int num_stack_locals() const
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg)
static Address AddressRIPRelative(int32_t disp)
void set_constant_pool_allowed(bool b)
void GenerateUnRelocatedPcRelativeCall(Condition cond=AL, intptr_t offset_into_target=0)
bool constant_pool_allowed() const
#define DECLARE_FLAG(type, name)
#define DEFINE_FLAG(type, name, default_value, comment)
Dart_NativeFunction function
compiler::Address NativeLocationToStackSlotAddress(const NativeStackLocation &loc)
constexpr intptr_t kBitsPerByte
const Register ARGS_DESC_REG
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
constexpr intptr_t kWordSize
static int8_t data[kExtLength]
const int kFpuRegisterSize
ByteRegister ByteRegisterOf(Register reg)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
static constexpr Register kClassIdReg
static constexpr intptr_t kBoolValueMask
static constexpr Register kSubtypeTestCacheReg
@ kResetToSwitchableCallMissEntryPoint