6#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
28DEFINE_FLAG(
bool, trap_on_deoptimization,
false,
"Trap on deoptimization.");
41 for (
int i = 0;
i < block_info_.
length(); ++
i) {
42 ASSERT(!block_info_[
i]->jump_label()->IsLinked());
62 intrinsic_mode_ =
true;
68 intrinsic_mode_ =
false;
73 const Array& deopt_table) {
74 if (deopt_env_ ==
nullptr) {
75 ++
builder->current_info_number_;
79 AllocateOutgoingArguments(deopt_env_);
82 Environment* current = deopt_env_;
86 EmitMaterializations(deopt_env_,
builder);
93 builder->AddPp(current->function(), slot_ix++);
95 builder->AddCallerFp(slot_ix++);
101 slot_ix =
builder->EmitMaterializationArguments(slot_ix);
104 for (intptr_t
i = current->Length() - 1;
105 i >= current->fixed_parameter_count();
i--) {
106 builder->AddCopy(current->ValueAt(
i), current->LocationAt(
i), slot_ix++);
109 Environment* previous = current;
110 current = current->outer();
111 while (current !=
nullptr) {
112 builder->AddPp(current->function(), slot_ix++);
113 builder->AddPcMarker(previous->function(), slot_ix++);
114 builder->AddCallerFp(slot_ix++);
118 builder->AddReturnAddress(current->function(),
124 for (intptr_t
i = previous->fixed_parameter_count() - 1;
i >= 0;
i--) {
125 builder->AddCopy(previous->ValueAt(
i), previous->LocationAt(
i),
130 for (intptr_t
i = current->Length() - 1;
131 i >= current->fixed_parameter_count();
i--) {
132 builder->AddCopy(current->ValueAt(
i), current->LocationAt(
i), slot_ix++);
137 current = current->outer();
140 ASSERT(previous !=
nullptr);
143 builder->AddCallerPp(slot_ix++);
144 builder->AddPcMarker(previous->function(), slot_ix++);
145 builder->AddCallerFp(slot_ix++);
146 builder->AddCallerPc(slot_ix++);
149 for (intptr_t
i = previous->fixed_parameter_count() - 1;
i >= 0;
i--) {
150 builder->AddCopy(previous->ValueAt(
i), previous->LocationAt(
i), slot_ix++);
153 return builder->CreateDeoptInfo(deopt_table);
160 compiler::Assembler* assembler =
compiler->assembler();
161#define __ assembler->
164 if (FLAG_trap_on_deoptimization) {
169 __ Call(compiler::Address(
THR, Thread::deoptimize_entry_offset()));
174#define __ assembler->
179 intptr_t sub_type_cache_index) {
182 compiler::FieldAddress(
186 sub_type_cache_index);
191#define __ assembler()->
195void FlowGraphCompiler::GenerateBoolToJump(
Register bool_register,
196 compiler::Label* is_true,
197 compiler::Label* is_false) {
198 compiler::Label fall_through;
199 __ beq(bool_register,
NULL_REG, &fall_through,
201 BranchLabels labels = {is_true, is_false, &fall_through};
205 __ BranchIf(true_condition, is_true);
210void FlowGraphCompiler::EmitFrameEntry() {
214 __ Comment(
"Invocation Count Check");
219 __ LoadFieldFromOffset(usage_reg, function_reg,
220 Function::usage_counter_offset(),
225 __ addi(usage_reg, usage_reg, 1);
226 __ StoreFieldToOffset(usage_reg, function_reg,
227 Function::usage_counter_offset(),
230 __ CompareImmediate(usage_reg, GetOptimizationThreshold());
231 compiler::Label dont_optimize;
233 __ lx(
TMP, compiler::Address(
THR, Thread::optimize_entry_offset()));
238 if (
flow_graph().graph_entry()->NeedsFrame()) {
239 __ Comment(
"Enter frame");
248 }
else if (FLAG_precompiled_mode) {
253const InstructionSource& PrologueSource() {
254 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
256 return prologue_source;
269 intptr_t args_desc_slot = -1;
275 __ Comment(
"Initialize spill slots");
276 const intptr_t fp_to_sp_delta =
278 for (intptr_t
i = 0;
i < num_locals; ++
i) {
279 const intptr_t slot_index =
284 __ StoreToOffset(value_reg,
SP,
285 (slot_index + fp_to_sp_delta) *
kWordSize);
292 const intptr_t slot_index =
295 const intptr_t fp_to_sp_delta =
307 if (CanPcRelativeCall(stub)) {
308 __ GenerateUnRelocatedPcRelativeCall();
309 AddPcRelativeCallStubTarget(stub);
319 if (CanPcRelativeCall(stub)) {
320 __ GenerateUnRelocatedPcRelativeTailCall();
321 AddPcRelativeTailCallStubTarget(stub);
324 __ lx(
TMP, compiler::FieldAddress(
333 if (CanPcRelativeCall(stub)) {
334 if (
flow_graph().graph_entry()->NeedsFrame()) {
337 __ GenerateUnRelocatedPcRelativeTailCall();
338 AddPcRelativeTailCallStubTarget(stub);
344 if (
flow_graph().graph_entry()->NeedsFrame()) {
347 __ lx(
TMP, compiler::FieldAddress(
355 const InstructionSource&
source,
358 LocationSummary* locs,
362 pending_deoptimization_env_);
366 const InstructionSource&
source,
369 LocationSummary* locs,
372 __ JumpAndLinkPatchable(stub, entry_kind);
374 pending_deoptimization_env_);
378 const InstructionSource&
source,
380 LocationSummary* locs,
384 if (CanPcRelativeCall(
target)) {
385 __ GenerateUnRelocatedPcRelativeCall();
386 AddPcRelativeCallTarget(
target, entry_kind);
388 pending_deoptimization_env_);
395 const auto& stub = StubCode::CallStaticFunction();
396 __ JumpAndLinkWithEquivalence(stub,
target, entry_kind);
398 pending_deoptimization_env_);
399 AddStaticCallTarget(
target, entry_kind);
411 __ Comment(
"Edge counter");
412 __ LoadObject(
A0, edge_counters_array_);
420 const ICData& ic_data,
422 const InstructionSource&
source,
423 LocationSummary* locs,
435 __ LoadFromOffset(
A0,
SP, (ic_data.SizeWithoutTypeArgs() - 1) *
kWordSize);
443 const ICData& ic_data,
445 const InstructionSource&
source,
446 LocationSummary* locs,
450 entry_kind == Code::EntryKind::kUnchecked);
452 __ LoadFromOffset(
A0,
SP, (ic_data.SizeWithoutTypeArgs() - 1) *
kWordSize);
455 const intptr_t entry_point_offset =
459 __ lx(
RA, compiler::FieldAddress(
CODE_REG, entry_point_offset));
462 pending_deoptimization_env_);
468 const Array& arguments_descriptor,
470 const InstructionSource&
source,
471 LocationSummary* locs) {
473 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
474 ASSERT(!FLAG_precompiled_mode);
475 const ArgumentsDescriptor args_desc(arguments_descriptor);
480 __ Comment(
"MegamorphicCall");
486 __ LoadUniqueObject(
CODE_REG, StubCode::MegamorphicCall());
487 __ Call(compiler::FieldAddress(
506 const InstructionSource&
source,
507 LocationSummary* locs,
509 bool receiver_can_be_smi) {
511 ASSERT(ic_data.NumArgsTested() == 1);
512 const Code& initial_stub = StubCode::SwitchableCallMiss();
513 const char* switchable_call_mode =
"smiable";
514 if (!receiver_can_be_smi) {
515 switchable_call_mode =
"non-smi";
516 ic_data.set_receiver_cannot_be_smi(
true);
518 const UnlinkedCall&
data =
521 __ Comment(
"InstanceCallAOT (%s)", switchable_call_mode);
525 __ LoadFromOffset(
A0,
SP, (ic_data.SizeWithoutTypeArgs() - 1) *
kWordSize);
528 const auto snapshot_behavior =
530 __ LoadUniqueObject(
RA, initial_stub, snapshot_behavior);
535 locs, pending_deoptimization_env_);
539void FlowGraphCompiler::EmitUnoptimizedStaticCall(
540 intptr_t size_with_type_args,
542 const InstructionSource&
source,
543 LocationSummary* locs,
544 const ICData& ic_data,
551 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
557 const Array& arguments_descriptor,
558 intptr_t size_with_type_args,
560 const InstructionSource&
source,
561 LocationSummary* locs,
568 if (!FLAG_precompiled_mode) {
580 int32_t selector_offset,
581 const Array& arguments_descriptor) {
585 if (!arguments_descriptor.IsNull()) {
601 bool needs_number_check,
602 const InstructionSource&
source,
604 if (needs_number_check) {
605 ASSERT(!obj.IsMint() && !obj.IsDouble());
606 __ LoadObject(
TMP, obj);
607 __ PushRegisterPair(
TMP, reg);
610 __ JumpAndLink(StubCode::OptimizedIdenticalWithNumberCheck());
614 __ JumpAndLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
618 __ PopRegisterPair(
ZR, reg);
622 __ CompareImmediate(
TMP, 0);
624 __ CompareObject(reg, obj);
632 bool needs_number_check,
633 const InstructionSource&
source,
635 if (needs_number_check) {
636 __ PushRegisterPair(right, left);
638 __ JumpAndLink(StubCode::OptimizedIdenticalWithNumberCheck());
640 __ JumpAndLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
643 __ PopRegisterPair(right, left);
648 __ CompareImmediate(
TMP, 0);
650 __ CompareObjectRegisters(left, right);
658 __ Comment(
"BoolTest");
667 locs->CheckWritableInputs();
668 ClobberDeadTempRegisters(locs);
671 __ PushRegisters(*locs->live_registers());
675 __ PopRegisters(*locs->live_registers());
679void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
681 for (intptr_t
i = 0;
i < locs->temp_count(); ++
i) {
684 if (tmp.IsRegister() &&
685 !locs->live_registers()->ContainsRegister(tmp.reg())) {
686 __ li(tmp.reg(), 0xf7);
692Register FlowGraphCompiler::EmitTestCidRegister() {
696void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
697 intptr_t count_without_type_args,
698 const Array& arguments_descriptor) {
699 __ Comment(
"EmitTestAndCall");
705void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
708 __ BranchIfSmi(
A0, label);
710 __ BranchIfNotSmi(
A0, label);
714void FlowGraphCompiler::EmitTestAndCallLoadCid(
Register class_id_reg) {
716 __ LoadClassId(class_id_reg,
A0);
720 if (loc.IsStackSlot() && (loc.base_reg() ==
FP)) {
721 intptr_t fp_sp_dist =
726 if (loc.IsDoubleStackSlot() && (loc.base_reg() ==
FP)) {
727 intptr_t fp_sp_dist =
737 TemporaryRegisterAllocator* allocator) {
738 if (destination.Equals(
source))
return;
740 if (
source.IsRegister()) {
741 if (destination.IsRegister()) {
742 __ mv(destination.reg(),
source.reg());
744 ASSERT(destination.IsStackSlot());
745 const intptr_t dest_offset = destination.ToStackSlotOffset();
746 __ StoreToOffset(
source.reg(), destination.base_reg(), dest_offset);
748 }
else if (
source.IsStackSlot()) {
749 if (destination.IsRegister()) {
750 const intptr_t source_offset =
source.ToStackSlotOffset();
751 __ LoadFromOffset(destination.reg(),
source.base_reg(), source_offset);
752 }
else if (destination.IsFpuRegister()) {
753 const intptr_t src_offset =
source.ToStackSlotOffset();
755 __ LoadDFromOffset(
dst,
source.base_reg(), src_offset);
757 ASSERT(destination.IsStackSlot());
758 const intptr_t source_offset =
source.ToStackSlotOffset();
759 const intptr_t dest_offset = destination.ToStackSlotOffset();
760 __ LoadFromOffset(
TMP,
source.base_reg(), source_offset);
761 __ StoreToOffset(
TMP, destination.base_reg(), dest_offset);
763 }
else if (
source.IsFpuRegister()) {
764 if (destination.IsFpuRegister()) {
765 __ fmvd(destination.fpu_reg(),
source.fpu_reg());
767 if (destination.IsStackSlot() ||
768 destination.IsDoubleStackSlot()) {
769 const intptr_t dest_offset = destination.ToStackSlotOffset();
771 __ StoreDToOffset(
src, destination.base_reg(), dest_offset);
773 ASSERT(destination.IsQuadStackSlot());
777 }
else if (
source.IsDoubleStackSlot()) {
778 if (destination.IsFpuRegister()) {
779 const intptr_t source_offset =
source.ToStackSlotOffset();
781 __ LoadDFromOffset(
dst,
source.base_reg(), source_offset);
783 ASSERT(destination.IsDoubleStackSlot() ||
784 destination.IsStackSlot() );
785 const intptr_t source_offset =
source.ToStackSlotOffset();
786 const intptr_t dest_offset = destination.ToStackSlotOffset();
787 __ LoadDFromOffset(
FTMP,
source.base_reg(), source_offset);
788 __ StoreDToOffset(
FTMP, destination.base_reg(), dest_offset);
790 }
else if (
source.IsQuadStackSlot()) {
792 }
else if (
source.IsPairLocation()) {
794 ASSERT(destination.IsPairLocation());
795 for (intptr_t
i : {0, 1}) {
803 source.constant_instruction()->EmitMoveToLocation(
this, destination,
TMP,
823void FlowGraphCompiler::EmitNativeMoveArchitecture(
824 const compiler::ffi::NativeLocation& destination,
825 const compiler::ffi::NativeLocation&
source) {
826 const auto& src_type =
source.payload_type();
827 const auto& dst_type = destination.payload_type();
829 ASSERT(src_type.IsSigned() == dst_type.IsSigned());
830 ASSERT(src_type.IsPrimitive());
831 ASSERT(dst_type.IsPrimitive());
832 const intptr_t src_size = src_type.SizeInBytes();
833 const intptr_t dst_size = dst_type.SizeInBytes();
834 const bool sign_or_zero_extend = dst_size > src_size;
836 if (
source.IsRegisters()) {
839 const auto src_reg =
src.reg_at(0);
841 if (destination.IsRegisters()) {
842 const auto&
dst = destination.AsRegisters();
844 const auto dst_reg =
dst.reg_at(0);
845 ASSERT(destination.container_type().SizeInBytes() <=
847 if (!sign_or_zero_extend) {
849 __ MoveRegister(dst_reg, src_reg);
853 __ addiw(dst_reg, src_reg, 0);
855 __ MoveRegister(dst_reg, src_reg);
859 switch (src_type.AsPrimitive().representation()) {
863 __ slli(dst_reg, src_reg, XLEN - 8);
864 __ srai(dst_reg, dst_reg, XLEN - 8);
867 __ slli(dst_reg, src_reg, XLEN - 16);
868 __ srai(dst_reg, dst_reg, XLEN - 16);
871 __ andi(dst_reg, src_reg, 0xFF);
874 __ slli(dst_reg, src_reg, 16);
876 __ srli(dst_reg, dst_reg, 16);
878 __ srliw(dst_reg, dst_reg, 16);
885 __ addiw(dst_reg, src_reg, 0);
911 }
else if (destination.IsFpuRegisters()) {
912 const auto&
dst = destination.AsFpuRegisters();
913 ASSERT(src_size == dst_size);
917 __ fmvwx(
dst.fpu_reg(),
src.reg_at(0));
923 __ fmvdx(
dst.fpu_reg(),
src.reg_at(0));
931 ASSERT(destination.IsStack());
932 const auto&
dst = destination.AsStack();
933 ASSERT(!sign_or_zero_extend);
935 BytesToOperandSize(destination.container_type().SizeInBytes());
936 __ StoreToOffset(
src.reg_at(0),
dst.base_register(),
937 dst.offset_in_bytes(), op_size);
939 }
else if (
source.IsFpuRegisters()) {
940 const auto&
src =
source.AsFpuRegisters();
942 ASSERT(src_type.Equals(dst_type));
944 if (destination.IsRegisters()) {
945 const auto&
dst = destination.AsRegisters();
946 ASSERT(src_size == dst_size);
950 __ fmvxw(
dst.reg_at(0),
src.fpu_reg());
956 __ fmvxd(
dst.reg_at(0),
src.fpu_reg());
963 }
else if (destination.IsFpuRegisters()) {
964 const auto&
dst = destination.AsFpuRegisters();
965 __ fmvd(
dst.fpu_reg(),
src.fpu_reg());
968 ASSERT(destination.IsStack());
969 ASSERT(src_type.IsFloat());
970 const auto&
dst = destination.AsStack();
973 __ StoreDToOffset(
src.fpu_reg(),
dst.base_register(),
974 dst.offset_in_bytes());
977 __ StoreSToOffset(
src.fpu_reg(),
dst.base_register(),
978 dst.offset_in_bytes());
988 if (destination.IsRegisters()) {
989 const auto&
dst = destination.AsRegisters();
991 const auto dst_reg =
dst.reg_at(0);
992 EmitNativeLoad(dst_reg,
src.base_register(),
src.offset_in_bytes(),
993 src_type.AsPrimitive().representation());
994 }
else if (destination.IsFpuRegisters()) {
995 ASSERT(src_type.Equals(dst_type));
996 ASSERT(src_type.IsFloat());
997 const auto&
dst = destination.AsFpuRegisters();
1000 __ LoadDFromOffset(
dst.fpu_reg(),
src.base_register(),
1001 src.offset_in_bytes());
1004 __ LoadSFromOffset(
dst.fpu_reg(),
src.base_register(),
1005 src.offset_in_bytes());
1011 ASSERT(destination.IsStack());
1017void FlowGraphCompiler::EmitNativeLoad(
Register dst,
1062 __ PushRegister(tmp);
1068 __ slli(tmp, tmp, 16);
1074 __ slli(tmp, tmp, 16);
1081 __ slli(tmp, tmp, 32);
1087 __ slli(tmp, tmp, 32);
1093 __ slli(tmp, tmp, 32);
1099 __ slli(tmp, tmp, 32);
1105 __ slli(tmp, tmp, 32);
1108 __ slli(tmp, tmp, 48);
1114 __ slli(tmp, tmp, 32);
1117 __ slli(tmp, tmp, 48);
1125 __ PopRegister(tmp);
1131 compiler::Label skip_reloc;
1140 __ lx(
dst, compiler::Address(tmp));
1144 __ add(tmp, tmp,
dst);
1148 __ lx(
dst, compiler::Address(tmp));
1152#define __ compiler_->assembler()->
1154void ParallelMoveEmitter::EmitSwap(
const MoveOperands& move) {
1156 const Location destination = move.dest();
1158 if (
source.IsRegister() && destination.IsRegister()) {
1162 __ mv(
source.reg(), destination.reg());
1163 __ mv(destination.reg(),
TMP);
1164 }
else if (
source.IsRegister() && destination.IsStackSlot()) {
1165 Exchange(
source.reg(), destination.base_reg(),
1166 destination.ToStackSlotOffset());
1167 }
else if (
source.IsStackSlot() && destination.IsRegister()) {
1168 Exchange(destination.reg(),
source.base_reg(),
source.ToStackSlotOffset());
1169 }
else if (
source.IsStackSlot() && destination.IsStackSlot()) {
1170 Exchange(
source.base_reg(),
source.ToStackSlotOffset(),
1171 destination.base_reg(), destination.ToStackSlotOffset());
1172 }
else if (
source.IsFpuRegister() && destination.IsFpuRegister()) {
1178 }
else if (
source.IsFpuRegister() || destination.IsFpuRegister()) {
1180 }
else if (
source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1181 const intptr_t source_offset =
source.ToStackSlotOffset();
1182 const intptr_t dest_offset = destination.ToStackSlotOffset();
1185 FRegister scratch = ensure_scratch.reg();
1186 __ LoadDFromOffset(
FTMP,
source.base_reg(), source_offset);
1187 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset);
1188 __ StoreDToOffset(
FTMP, destination.base_reg(), dest_offset);
1189 __ StoreDToOffset(scratch,
source.base_reg(), source_offset);
1190 }
else if (
source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1197void ParallelMoveEmitter::MoveMemoryToMemory(
const compiler::Address&
dst,
1198 const compiler::Address&
src) {
1204void ParallelMoveEmitter::Exchange(
Register reg,
const compiler::Address& mem) {
1210void ParallelMoveEmitter::Exchange(
const compiler::Address& mem1,
1211 const compiler::Address& mem2) {
1215void ParallelMoveEmitter::Exchange(
Register reg,
1217 intptr_t stack_offset) {
1219 __ LoadFromOffset(reg, base_reg, stack_offset);
1220 __ StoreToOffset(
TMP, base_reg, stack_offset);
1223void ParallelMoveEmitter::Exchange(
Register base_reg1,
1224 intptr_t stack_offset1,
1226 intptr_t stack_offset2) {
1228 ScratchRegisterScope tmp2(
this, tmp1.reg());
1229 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1);
1230 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2);
1231 __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2);
1232 __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1);
1235void ParallelMoveEmitter::SpillScratch(
Register reg) {
1236 __ PushRegister(reg);
1239void ParallelMoveEmitter::RestoreScratch(
Register reg) {
1240 __ PopRegister(reg);
1243void ParallelMoveEmitter::SpillFpuScratch(
FpuRegister reg) {
1244 __ subi(
SP,
SP,
sizeof(
double));
1245 __ fsd(reg, compiler::Address(
SP, 0));
1248void ParallelMoveEmitter::RestoreFpuScratch(
FpuRegister reg) {
1249 __ fld(reg, compiler::Address(
SP, 0));
1250 __ addi(
SP,
SP,
sizeof(
double));
static intptr_t element_offset(intptr_t index)
static intptr_t owner_offset()
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
const char * Name() const
compiler::Label * entry_label()
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
intptr_t deopt_id() const
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
static intptr_t ToDeoptAfter(intptr_t deopt_id)
static constexpr intptr_t kOriginElement
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitTailCallToStub(const Code &stub)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
bool is_optimizing() const
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void InsertBSSRelocation(BSS::Relocation reloc)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
intptr_t StackSize() const
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EnterIntrinsicMode()
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
bool may_reoptimize() const
void GeneratePatchableCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void ArchSpecificInitialization()
bool CanOptimizeFunction() const
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
compiler::Assembler * assembler() const
bool intrinsic_mode() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
bool IsClosureFunction() const
bool IsOptimizable() const
static Location StackSlot(intptr_t stack_index, Register base)
static Location DoubleStackSlot(intptr_t stack_index, Register base)
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
static Object & ZoneHandle()
const Function & function() const
int num_stack_locals() const
static intptr_t RawValue(intptr_t value)
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
void set_constant_pool_allowed(bool b)
bool constant_pool_allowed() const
static word type_test_stub_entry_point_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
Dart_NativeFunction function
static constexpr intptr_t kWordSize
const FpuRegister kNoFpuRegister
constexpr intptr_t kBitsPerByte
constexpr intptr_t kWordSizeLog2
const Register ARGS_DESC_REG
const Register DISPATCH_TABLE_REG
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const Register IC_DATA_REG
constexpr intptr_t kWordSize
static int8_t data[kExtLength]
DECLARE_FLAG(bool, show_invisible_frames)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
static constexpr Register kClassIdReg
intptr_t first_local_from_fp
intptr_t FrameSlotForVariable(const LocalVariable *variable) const
intptr_t dart_fixed_frame_size
intptr_t FrameSlotForVariableIndex(intptr_t index) const
static constexpr intptr_t kBoolValueMask
static constexpr Register kScratchReg
static constexpr Register kSubtypeTestCacheReg
@ kResetToSwitchableCallMissEntryPoint