6#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
28DEFINE_FLAG(
bool, trap_on_deoptimization,
false,
"Trap on deoptimization.");
41 for (
int i = 0; i < block_info_.
length(); ++i) {
42 ASSERT(!block_info_[i]->jump_label()->IsLinked());
66 intrinsic_mode_ =
true;
72 intrinsic_mode_ =
false;
76 DeoptInfoBuilder* builder,
77 const Array& deopt_table) {
78 if (deopt_env_ ==
nullptr) {
79 ++
builder->current_info_number_;
83 AllocateOutgoingArguments(deopt_env_);
86 Environment* current = deopt_env_;
90 EmitMaterializations(deopt_env_, builder);
97 builder->AddPp(current->function(), slot_ix++);
99 builder->AddCallerFp(slot_ix++);
105 slot_ix =
builder->EmitMaterializationArguments(slot_ix);
108 for (intptr_t i = current->Length() - 1;
109 i >= current->fixed_parameter_count(); i--) {
110 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
113 Environment* previous = current;
114 current = current->outer();
115 while (current !=
nullptr) {
116 builder->AddPp(current->function(), slot_ix++);
117 builder->AddPcMarker(previous->function(), slot_ix++);
118 builder->AddCallerFp(slot_ix++);
122 builder->AddReturnAddress(current->function(),
128 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
129 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
134 for (intptr_t i = current->Length() - 1;
135 i >= current->fixed_parameter_count(); i--) {
136 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
141 current = current->outer();
144 ASSERT(previous !=
nullptr);
147 builder->AddCallerPp(slot_ix++);
148 builder->AddPcMarker(previous->function(), slot_ix++);
149 builder->AddCallerFp(slot_ix++);
150 builder->AddCallerPc(slot_ix++);
153 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
154 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
157 return builder->CreateDeoptInfo(deopt_table);
164 compiler::Assembler* assembler =
compiler->assembler();
165#define __ assembler->
168 if (FLAG_trap_on_deoptimization) {
173 __ Call(compiler::Address(
THR, Thread::deoptimize_entry_offset()));
178#define __ assembler->
183 intptr_t sub_type_cache_index) {
186 compiler::FieldAddress(
188 compiler::target::AbstractType::type_test_stub_entry_point_offset()));
190 sub_type_cache_index);
195#define __ assembler()->
199void FlowGraphCompiler::GenerateBoolToJump(
Register bool_register,
200 compiler::Label* is_true,
201 compiler::Label* is_false) {
202 compiler::Label fall_through;
203 __ beq(bool_register,
NULL_REG, &fall_through,
205 BranchLabels labels = {is_true, is_false, &fall_through};
209 __ BranchIf(true_condition, is_true);
211 __ Bind(&fall_through);
214void FlowGraphCompiler::EmitFrameEntry() {
218 __ Comment(
"Invocation Count Check");
223 __ LoadFieldFromOffset(usage_reg, function_reg,
224 Function::usage_counter_offset(),
229 __ addi(usage_reg, usage_reg, 1);
230 __ StoreFieldToOffset(usage_reg, function_reg,
231 Function::usage_counter_offset(),
234 __ CompareImmediate(usage_reg, GetOptimizationThreshold());
235 compiler::Label dont_optimize;
237 __ lx(
TMP, compiler::Address(
THR, Thread::optimize_entry_offset()));
239 __ Bind(&dont_optimize);
242 if (
flow_graph().graph_entry()->NeedsFrame()) {
243 __ Comment(
"Enter frame");
252 }
else if (FLAG_precompiled_mode) {
257const InstructionSource& PrologueSource() {
258 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
260 return prologue_source;
273 intptr_t args_desc_slot = -1;
275 args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
279 __ Comment(
"Initialize spill slots");
280 const intptr_t fp_to_sp_delta =
281 num_locals + compiler::target::frame_layout.dart_fixed_frame_size;
282 for (intptr_t i = 0; i < num_locals; ++i) {
283 const intptr_t slot_index =
284 compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
288 __ StoreToOffset(value_reg,
SP,
289 (slot_index + fp_to_sp_delta) *
kWordSize);
296 const intptr_t slot_index =
297 compiler::target::frame_layout.FrameSlotForVariable(
299 const intptr_t fp_to_sp_delta =
300 StackSize() + compiler::target::frame_layout.dart_fixed_frame_size;
311 if (CanPcRelativeCall(stub)) {
312 __ GenerateUnRelocatedPcRelativeCall();
313 AddPcRelativeCallStubTarget(stub);
323 if (CanPcRelativeCall(stub)) {
324 __ GenerateUnRelocatedPcRelativeTailCall();
325 AddPcRelativeTailCallStubTarget(stub);
328 __ lx(
TMP, compiler::FieldAddress(
329 CODE_REG, compiler::target::Code::entry_point_offset()));
337 if (CanPcRelativeCall(stub)) {
338 if (
flow_graph().graph_entry()->NeedsFrame()) {
341 __ GenerateUnRelocatedPcRelativeTailCall();
342 AddPcRelativeTailCallStubTarget(stub);
348 if (
flow_graph().graph_entry()->NeedsFrame()) {
351 __ lx(
TMP, compiler::FieldAddress(
352 CODE_REG, compiler::target::Code::entry_point_offset()));
359 const InstructionSource&
source,
362 LocationSummary* locs,
366 pending_deoptimization_env_);
370 const InstructionSource&
source,
373 LocationSummary* locs,
376 __ JumpAndLinkPatchable(stub, entry_kind);
378 pending_deoptimization_env_);
382 const InstructionSource&
source,
384 LocationSummary* locs,
388 if (CanPcRelativeCall(
target)) {
389 __ GenerateUnRelocatedPcRelativeCall();
390 AddPcRelativeCallTarget(
target, entry_kind);
392 pending_deoptimization_env_);
399 const auto& stub = StubCode::CallStaticFunction();
400 __ JumpAndLinkWithEquivalence(stub,
target, entry_kind);
402 pending_deoptimization_env_);
403 AddStaticCallTarget(
target, entry_kind);
415 __ Comment(
"Edge counter");
416 __ LoadObject(
A0, edge_counters_array_);
424 const ICData& ic_data,
426 const InstructionSource&
source,
427 LocationSummary* locs,
439 __ LoadFromOffset(
A0,
SP, (ic_data.SizeWithoutTypeArgs() - 1) *
kWordSize);
447 const ICData& ic_data,
449 const InstructionSource&
source,
450 LocationSummary* locs,
453 ASSERT(entry_kind == Code::EntryKind::kNormal ||
454 entry_kind == Code::EntryKind::kUnchecked);
456 __ LoadFromOffset(
A0,
SP, (ic_data.SizeWithoutTypeArgs() - 1) *
kWordSize);
459 const intptr_t entry_point_offset =
460 entry_kind == Code::EntryKind::kNormal
463 __ lx(
RA, compiler::FieldAddress(
CODE_REG, entry_point_offset));
466 pending_deoptimization_env_);
472 const Array& arguments_descriptor,
474 const InstructionSource&
source,
475 LocationSummary* locs) {
477 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
478 ASSERT(!FLAG_precompiled_mode);
479 const ArgumentsDescriptor args_desc(arguments_descriptor);
484 __ Comment(
"MegamorphicCall");
487 (args_desc.Count() - 1) * compiler::target::kWordSize);
490 __ LoadUniqueObject(
CODE_REG, StubCode::MegamorphicCall());
491 __ Call(compiler::FieldAddress(
510 const InstructionSource&
source,
511 LocationSummary* locs,
513 bool receiver_can_be_smi) {
515 ASSERT(ic_data.NumArgsTested() == 1);
516 const Code& initial_stub = StubCode::SwitchableCallMiss();
517 const char* switchable_call_mode =
"smiable";
518 if (!receiver_can_be_smi) {
519 switchable_call_mode =
"non-smi";
520 ic_data.set_receiver_cannot_be_smi(
true);
522 const UnlinkedCall&
data =
525 __ Comment(
"InstanceCallAOT (%s)", switchable_call_mode);
529 __ LoadFromOffset(
A0,
SP, (ic_data.SizeWithoutTypeArgs() - 1) *
kWordSize);
530 if (FLAG_precompiled_mode) {
533 const auto snapshot_behavior =
535 __ LoadUniqueObject(
RA, initial_stub, snapshot_behavior);
538 const intptr_t entry_point_offset =
539 entry_kind == Code::EntryKind::kNormal
540 ? compiler::target::Code::entry_point_offset(
541 Code::EntryKind::kMonomorphic)
544 __ lx(
RA, compiler::FieldAddress(
CODE_REG, entry_point_offset));
550 locs, pending_deoptimization_env_);
554void FlowGraphCompiler::EmitUnoptimizedStaticCall(
555 intptr_t size_with_type_args,
557 const InstructionSource&
source,
558 LocationSummary* locs,
559 const ICData& ic_data,
566 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
572 const Array& arguments_descriptor,
573 intptr_t size_with_type_args,
575 const InstructionSource&
source,
576 LocationSummary* locs,
583 if (!FLAG_precompiled_mode) {
595 int32_t selector_offset,
596 const Array& arguments_descriptor) {
600 if (!arguments_descriptor.IsNull()) {
608 compiler::target::kWordSizeLog2);
609 __ LoadFromOffset(
TMP,
TMP,
offset << compiler::target::kWordSizeLog2);
616 bool needs_number_check,
617 const InstructionSource&
source,
619 if (needs_number_check) {
620 ASSERT(!obj.IsMint() && !obj.IsDouble());
621 __ LoadObject(
TMP, obj);
622 __ PushRegisterPair(
TMP, reg);
625 __ JumpAndLink(StubCode::OptimizedIdenticalWithNumberCheck());
629 __ JumpAndLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
633 __ PopRegisterPair(
ZR, reg);
637 __ CompareImmediate(
TMP, 0);
639 __ CompareObject(reg, obj);
647 bool needs_number_check,
648 const InstructionSource&
source,
650 if (needs_number_check) {
653 __ JumpAndLink(StubCode::OptimizedIdenticalWithNumberCheck());
655 __ JumpAndLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
663 __ CompareImmediate(
TMP, 0);
673 __ Comment(
"BoolTest");
682 locs->CheckWritableInputs();
683 ClobberDeadTempRegisters(locs);
686 __ PushRegisters(*locs->live_registers());
690 __ PopRegisters(*locs->live_registers());
694void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
696 for (intptr_t i = 0; i < locs->temp_count(); ++i) {
699 if (tmp.IsRegister() &&
700 !locs->live_registers()->ContainsRegister(tmp.reg())) {
701 __ li(tmp.reg(), 0xf7);
707Register FlowGraphCompiler::EmitTestCidRegister() {
711void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
712 intptr_t count_without_type_args,
713 const Array& arguments_descriptor) {
714 __ Comment(
"EmitTestAndCall");
720void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
723 __ BranchIfSmi(
A0, label);
725 __ BranchIfNotSmi(
A0, label);
729void FlowGraphCompiler::EmitTestAndCallLoadCid(
Register class_id_reg) {
731 __ LoadClassId(class_id_reg,
A0);
735 if (loc.IsStackSlot() && (loc.base_reg() ==
FP)) {
736 intptr_t fp_sp_dist =
737 (compiler::target::frame_layout.first_local_from_fp + 1 -
StackSize());
738 __ CheckFpSpDist(fp_sp_dist * compiler::target::kWordSize);
741 if (loc.IsDoubleStackSlot() && (loc.base_reg() ==
FP)) {
742 intptr_t fp_sp_dist =
743 (compiler::target::frame_layout.first_local_from_fp + 1 -
StackSize());
744 __ CheckFpSpDist(fp_sp_dist * compiler::target::kWordSize);
752 TemporaryRegisterAllocator* allocator) {
753 if (destination.Equals(
source))
return;
755 if (
source.IsRegister()) {
756 if (destination.IsRegister()) {
757 __ mv(destination.reg(),
source.reg());
759 ASSERT(destination.IsStackSlot());
760 const intptr_t dest_offset = destination.ToStackSlotOffset();
761 __ StoreToOffset(
source.reg(), destination.base_reg(), dest_offset);
763 }
else if (
source.IsStackSlot()) {
764 if (destination.IsRegister()) {
765 const intptr_t source_offset =
source.ToStackSlotOffset();
766 __ LoadFromOffset(destination.reg(),
source.base_reg(), source_offset);
767 }
else if (destination.IsFpuRegister()) {
768 const intptr_t src_offset =
source.ToStackSlotOffset();
770 __ LoadDFromOffset(dst,
source.base_reg(), src_offset);
772 ASSERT(destination.IsStackSlot());
773 const intptr_t source_offset =
source.ToStackSlotOffset();
774 const intptr_t dest_offset = destination.ToStackSlotOffset();
775 __ LoadFromOffset(
TMP,
source.base_reg(), source_offset);
776 __ StoreToOffset(
TMP, destination.base_reg(), dest_offset);
778 }
else if (
source.IsFpuRegister()) {
779 if (destination.IsFpuRegister()) {
780 __ fmvd(destination.fpu_reg(),
source.fpu_reg());
782 if (destination.IsStackSlot() ||
783 destination.IsDoubleStackSlot()) {
784 const intptr_t dest_offset = destination.ToStackSlotOffset();
786 __ StoreDToOffset(src, destination.base_reg(), dest_offset);
788 ASSERT(destination.IsQuadStackSlot());
792 }
else if (
source.IsDoubleStackSlot()) {
793 if (destination.IsFpuRegister()) {
794 const intptr_t source_offset =
source.ToStackSlotOffset();
796 __ LoadDFromOffset(dst,
source.base_reg(), source_offset);
798 ASSERT(destination.IsDoubleStackSlot() ||
799 destination.IsStackSlot() );
800 const intptr_t source_offset =
source.ToStackSlotOffset();
801 const intptr_t dest_offset = destination.ToStackSlotOffset();
802 __ LoadDFromOffset(
FTMP,
source.base_reg(), source_offset);
803 __ StoreDToOffset(
FTMP, destination.base_reg(), dest_offset);
805 }
else if (
source.IsQuadStackSlot()) {
807 }
else if (
source.IsPairLocation()) {
809 ASSERT(destination.IsPairLocation());
810 for (intptr_t i : {0, 1}) {
811 EmitMove(destination.Component(i),
source.Component(i), allocator);
818 source.constant_instruction()->EmitMoveToLocation(
this, destination,
TMP,
838void FlowGraphCompiler::EmitNativeMoveArchitecture(
839 const compiler::ffi::NativeLocation& destination,
840 const compiler::ffi::NativeLocation&
source) {
841 const auto& src_type =
source.payload_type();
842 const auto& dst_type = destination.payload_type();
844 ASSERT(src_type.IsSigned() == dst_type.IsSigned());
845 ASSERT(src_type.IsPrimitive());
846 ASSERT(dst_type.IsPrimitive());
847 const intptr_t src_size = src_type.SizeInBytes();
848 const intptr_t dst_size = dst_type.SizeInBytes();
849 const bool sign_or_zero_extend = dst_size > src_size;
851 if (
source.IsRegisters()) {
854 const auto src_reg =
src.reg_at(0);
856 if (destination.IsRegisters()) {
857 const auto&
dst = destination.AsRegisters();
859 const auto dst_reg =
dst.reg_at(0);
860 ASSERT(destination.container_type().SizeInBytes() <=
861 compiler::target::kWordSize);
862 if (!sign_or_zero_extend) {
864 __ MoveRegister(dst_reg, src_reg);
868 __ addiw(dst_reg, src_reg, 0);
870 __ MoveRegister(dst_reg, src_reg);
874 switch (src_type.AsPrimitive().representation()) {
878 __ slli(dst_reg, src_reg, XLEN - 8);
879 __ srai(dst_reg, dst_reg, XLEN - 8);
882 __ slli(dst_reg, src_reg, XLEN - 16);
883 __ srai(dst_reg, dst_reg, XLEN - 16);
886 __ andi(dst_reg, src_reg, 0xFF);
889 __ slli(dst_reg, src_reg, 16);
891 __ srli(dst_reg, dst_reg, 16);
893 __ srliw(dst_reg, dst_reg, 16);
900 __ addiw(dst_reg, src_reg, 0);
926 }
else if (destination.IsFpuRegisters()) {
927 const auto&
dst = destination.AsFpuRegisters();
928 ASSERT(src_size == dst_size);
932 __ fmvwx(
dst.fpu_reg(),
src.reg_at(0));
938 __ fmvdx(
dst.fpu_reg(),
src.reg_at(0));
946 ASSERT(destination.IsStack());
947 const auto&
dst = destination.AsStack();
948 ASSERT(!sign_or_zero_extend);
950 BytesToOperandSize(destination.container_type().SizeInBytes());
951 __ StoreToOffset(
src.reg_at(0),
dst.base_register(),
952 dst.offset_in_bytes(), op_size);
954 }
else if (
source.IsFpuRegisters()) {
955 const auto&
src =
source.AsFpuRegisters();
957 ASSERT(src_type.Equals(dst_type));
959 if (destination.IsRegisters()) {
960 const auto&
dst = destination.AsRegisters();
961 ASSERT(src_size == dst_size);
965 __ fmvxw(
dst.reg_at(0),
src.fpu_reg());
971 __ fmvxd(
dst.reg_at(0),
src.fpu_reg());
978 }
else if (destination.IsFpuRegisters()) {
979 const auto&
dst = destination.AsFpuRegisters();
980 __ fmvd(
dst.fpu_reg(),
src.fpu_reg());
983 ASSERT(destination.IsStack());
984 ASSERT(src_type.IsFloat());
985 const auto&
dst = destination.AsStack();
988 __ StoreDToOffset(
src.fpu_reg(),
dst.base_register(),
989 dst.offset_in_bytes());
992 __ StoreSToOffset(
src.fpu_reg(),
dst.base_register(),
993 dst.offset_in_bytes());
1003 if (destination.IsRegisters()) {
1004 const auto&
dst = destination.AsRegisters();
1006 const auto dst_reg =
dst.reg_at(0);
1007 EmitNativeLoad(dst_reg,
src.base_register(),
src.offset_in_bytes(),
1008 src_type.AsPrimitive().representation());
1009 }
else if (destination.IsFpuRegisters()) {
1010 ASSERT(src_type.Equals(dst_type));
1011 ASSERT(src_type.IsFloat());
1012 const auto&
dst = destination.AsFpuRegisters();
1015 __ LoadDFromOffset(
dst.fpu_reg(),
src.base_register(),
1016 src.offset_in_bytes());
1019 __ LoadSFromOffset(
dst.fpu_reg(),
src.base_register(),
1020 src.offset_in_bytes());
1026 ASSERT(destination.IsStack());
1032void FlowGraphCompiler::EmitNativeLoad(
Register dst,
1077 __ PushRegister(tmp);
1083 __ slli(tmp, tmp, 16);
1084 __ or_(dst, dst, tmp);
1089 __ slli(tmp, tmp, 16);
1090 __ or_(dst, dst, tmp);
1096 __ slli(tmp, tmp, 32);
1097 __ or_(dst, dst, tmp);
1102 __ slli(tmp, tmp, 32);
1103 __ or_(dst, dst, tmp);
1108 __ slli(tmp, tmp, 32);
1109 __ or_(dst, dst, tmp);
1114 __ slli(tmp, tmp, 32);
1115 __ or_(dst, dst, tmp);
1120 __ slli(tmp, tmp, 32);
1121 __ or_(dst, dst, tmp);
1123 __ slli(tmp, tmp, 48);
1124 __ or_(dst, dst, tmp);
1129 __ slli(tmp, tmp, 32);
1130 __ or_(dst, dst, tmp);
1132 __ slli(tmp, tmp, 48);
1133 __ or_(dst, dst, tmp);
1140 __ PopRegister(tmp);
1146 compiler::Label skip_reloc;
1149 __ Bind(&skip_reloc);
1152 __ addi(tmp, tmp, -compiler::target::kWordSize);
1155 __ lx(dst, compiler::Address(tmp));
1159 __ add(tmp, tmp, dst);
1163 __ lx(dst, compiler::Address(tmp));
1167#define __ compiler_->assembler()->
1169void ParallelMoveEmitter::EmitSwap(
const MoveOperands& move) {
1171 const Location destination = move.dest();
1173 if (
source.IsRegister() && destination.IsRegister()) {
1177 __ mv(
source.reg(), destination.reg());
1178 __ mv(destination.reg(),
TMP);
1179 }
else if (
source.IsRegister() && destination.IsStackSlot()) {
1180 Exchange(
source.reg(), destination.base_reg(),
1181 destination.ToStackSlotOffset());
1182 }
else if (
source.IsStackSlot() && destination.IsRegister()) {
1183 Exchange(destination.reg(),
source.base_reg(),
source.ToStackSlotOffset());
1184 }
else if (
source.IsStackSlot() && destination.IsStackSlot()) {
1185 Exchange(
source.base_reg(),
source.ToStackSlotOffset(),
1186 destination.base_reg(), destination.ToStackSlotOffset());
1187 }
else if (
source.IsFpuRegister() && destination.IsFpuRegister()) {
1193 }
else if (
source.IsFpuRegister() || destination.IsFpuRegister()) {
1195 }
else if (
source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1196 const intptr_t source_offset =
source.ToStackSlotOffset();
1197 const intptr_t dest_offset = destination.ToStackSlotOffset();
1200 FRegister scratch = ensure_scratch.reg();
1201 __ LoadDFromOffset(
FTMP,
source.base_reg(), source_offset);
1202 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset);
1203 __ StoreDToOffset(
FTMP, destination.base_reg(), dest_offset);
1204 __ StoreDToOffset(scratch,
source.base_reg(), source_offset);
1205 }
else if (
source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1212void ParallelMoveEmitter::MoveMemoryToMemory(
const compiler::Address& dst,
1213 const compiler::Address& src) {
1219void ParallelMoveEmitter::Exchange(
Register reg,
const compiler::Address& mem) {
1225void ParallelMoveEmitter::Exchange(
const compiler::Address& mem1,
1226 const compiler::Address& mem2) {
1230void ParallelMoveEmitter::Exchange(
Register reg,
1232 intptr_t stack_offset) {
1234 __ LoadFromOffset(reg, base_reg, stack_offset);
1235 __ StoreToOffset(
TMP, base_reg, stack_offset);
1238void ParallelMoveEmitter::Exchange(
Register base_reg1,
1239 intptr_t stack_offset1,
1241 intptr_t stack_offset2) {
1243 ScratchRegisterScope tmp2(
this, tmp1.reg());
1244 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1);
1245 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2);
1246 __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2);
1247 __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1);
1250void ParallelMoveEmitter::SpillScratch(
Register reg) {
1251 __ PushRegister(reg);
1254void ParallelMoveEmitter::RestoreScratch(
Register reg) {
1255 __ PopRegister(reg);
1258void ParallelMoveEmitter::SpillFpuScratch(
FpuRegister reg) {
1259 __ subi(
SP,
SP,
sizeof(
double));
1260 __ fsd(reg, compiler::Address(
SP, 0));
1263void ParallelMoveEmitter::RestoreFpuScratch(
FpuRegister reg) {
1264 __ fld(reg, compiler::Address(
SP, 0));
1265 __ addi(
SP,
SP,
sizeof(
double));
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
static intptr_t element_offset(intptr_t index)
static intptr_t owner_offset()
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
const char * Name() const
compiler::Label * entry_label()
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
intptr_t deopt_id() const
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
static intptr_t ToDeoptAfter(intptr_t deopt_id)
static constexpr intptr_t kOriginElement
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitTailCallToStub(const Code &stub)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
bool is_optimizing() const
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void InsertBSSRelocation(BSS::Relocation reloc)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
intptr_t StackSize() const
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EnterIntrinsicMode()
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static bool SupportsUnboxedDoubles()
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
bool may_reoptimize() const
void GeneratePatchableCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void ArchSpecificInitialization()
bool CanOptimizeFunction() const
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
compiler::Assembler * assembler() const
bool intrinsic_mode() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
bool IsClosureFunction() const
bool IsOptimizable() const
static Location StackSlot(intptr_t stack_index, Register base)
static Location DoubleStackSlot(intptr_t stack_index, Register base)
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
static Object & ZoneHandle()
const Function & function() const
int num_stack_locals() const
static intptr_t RawValue(intptr_t value)
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
void set_constant_pool_allowed(bool b)
bool constant_pool_allowed() const
#define DECLARE_FLAG(type, name)
#define DEFINE_FLAG(type, name, default_value, comment)
Dart_NativeFunction function
const FpuRegister kNoFpuRegister
constexpr intptr_t kBitsPerByte
const Register ARGS_DESC_REG
const Register DISPATCH_TABLE_REG
const Register IC_DATA_REG
constexpr intptr_t kWordSize
static int8_t data[kExtLength]
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
static constexpr Register kClassIdReg
static constexpr intptr_t kBoolValueMask
static constexpr Register kScratchReg
static constexpr Register kSubtypeTestCacheReg
@ kResetToSwitchableCallMissEntryPoint