6#if defined(TARGET_ARCH_ARM64)
28DEFINE_FLAG(
bool, trap_on_deoptimization,
false,
"Trap on deoptimization.");
32 if (FLAG_precompiled_mode) {
37 if (CanPcRelativeCall(stub)) {
38 assembler_->generate_invoke_write_barrier_wrapper_ = [&](
Register reg) {
39 const intptr_t offset_into_target =
42 AddPcRelativeCallStubTarget(stub);
46 const auto& array_stub =
48 if (CanPcRelativeCall(stub)) {
49 assembler_->generate_invoke_array_write_barrier_ = [&]() {
51 AddPcRelativeCallStubTarget(array_stub);
60 for (
int i = 0;
i < block_info_.
length(); ++
i) {
61 ASSERT(!block_info_[
i]->jump_label()->IsLinked());
66 return FLAG_enable_simd_inline;
75 intrinsic_mode_ =
true;
81 intrinsic_mode_ =
false;
86 const Array& deopt_table) {
87 if (deopt_env_ ==
nullptr) {
88 ++
builder->current_info_number_;
92 AllocateOutgoingArguments(deopt_env_);
95 Environment* current = deopt_env_;
99 EmitMaterializations(deopt_env_,
builder);
106 builder->AddPp(current->function(), slot_ix++);
108 builder->AddCallerFp(slot_ix++);
114 slot_ix =
builder->EmitMaterializationArguments(slot_ix);
117 for (intptr_t
i = current->Length() - 1;
118 i >= current->fixed_parameter_count();
i--) {
119 builder->AddCopy(current->ValueAt(
i), current->LocationAt(
i), slot_ix++);
122 Environment* previous = current;
123 current = current->outer();
124 while (current !=
nullptr) {
125 builder->AddPp(current->function(), slot_ix++);
126 builder->AddPcMarker(previous->function(), slot_ix++);
127 builder->AddCallerFp(slot_ix++);
131 builder->AddReturnAddress(current->function(),
137 for (intptr_t
i = previous->fixed_parameter_count() - 1;
i >= 0;
i--) {
138 builder->AddCopy(previous->ValueAt(
i), previous->LocationAt(
i),
143 for (intptr_t
i = current->Length() - 1;
144 i >= current->fixed_parameter_count();
i--) {
145 builder->AddCopy(current->ValueAt(
i), current->LocationAt(
i), slot_ix++);
150 current = current->outer();
153 ASSERT(previous !=
nullptr);
156 builder->AddCallerPp(slot_ix++);
157 builder->AddPcMarker(previous->function(), slot_ix++);
158 builder->AddCallerFp(slot_ix++);
159 builder->AddCallerPc(slot_ix++);
162 for (intptr_t
i = previous->fixed_parameter_count() - 1;
i >= 0;
i--) {
163 builder->AddCopy(previous->ValueAt(
i), previous->LocationAt(
i), slot_ix++);
166 return builder->CreateDeoptInfo(deopt_table);
173 compiler::Assembler* assembler =
compiler->assembler();
174#define __ assembler->
177 if (FLAG_trap_on_deoptimization) {
182 __ Call(compiler::Address(
THR, Thread::deoptimize_entry_offset()));
187#define __ assembler->
192 intptr_t sub_type_cache_index) {
195 compiler::FieldAddress(
199 sub_type_cache_index);
204#define __ assembler()->
208void FlowGraphCompiler::GenerateBoolToJump(
Register bool_register,
209 compiler::Label* is_true,
210 compiler::Label* is_false) {
211 compiler::Label fall_through;
212 __ CompareObject(bool_register, Object::null_object());
213 __ b(&fall_through,
EQ);
214 BranchLabels labels = {is_true, is_false, &fall_through};
221void FlowGraphCompiler::EmitFrameEntry() {
225 __ Comment(
"Invocation Count Check");
230 __ LoadFieldFromOffset(
R7, function_reg, Function::usage_counter_offset(),
235 __ add(
R7,
R7, compiler::Operand(1));
236 __ StoreFieldToOffset(
R7, function_reg, Function::usage_counter_offset(),
239 __ CompareImmediate(
R7, GetOptimizationThreshold());
241 compiler::Label dont_optimize;
242 __ b(&dont_optimize,
LT);
243 __ ldr(
TMP, compiler::Address(
THR, Thread::optimize_entry_offset()));
248 if (
flow_graph().graph_entry()->NeedsFrame()) {
249 __ Comment(
"Enter frame");
258 }
else if (FLAG_precompiled_mode) {
263const InstructionSource& PrologueSource() {
264 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
266 return prologue_source;
279 intptr_t args_desc_slot = -1;
285 __ Comment(
"Initialize spill slots");
286 for (intptr_t
i = 0;
i < num_locals; ++
i) {
287 const intptr_t slot_index =
298 const intptr_t slot_index =
311 if (CanPcRelativeCall(stub)) {
312 __ GenerateUnRelocatedPcRelativeCall();
313 AddPcRelativeCallStubTarget(stub);
323 if (CanPcRelativeCall(stub)) {
324 __ GenerateUnRelocatedPcRelativeTailCall();
325 AddPcRelativeTailCallStubTarget(stub);
328 __ ldr(
TMP, compiler::FieldAddress(
337 if (CanPcRelativeCall(stub)) {
338 if (
flow_graph().graph_entry()->NeedsFrame()) {
341 __ GenerateUnRelocatedPcRelativeTailCall();
342 AddPcRelativeTailCallStubTarget(stub);
348 if (
flow_graph().graph_entry()->NeedsFrame()) {
351 __ ldr(
TMP, compiler::FieldAddress(
359 const InstructionSource&
source,
362 LocationSummary* locs,
366 pending_deoptimization_env_);
370 const InstructionSource&
source,
373 LocationSummary* locs,
376 __ BranchLinkPatchable(stub, entry_kind);
378 pending_deoptimization_env_);
382 const InstructionSource&
source,
384 LocationSummary* locs,
388 if (CanPcRelativeCall(
target)) {
389 __ GenerateUnRelocatedPcRelativeCall();
390 AddPcRelativeCallTarget(
target, entry_kind);
392 pending_deoptimization_env_);
399 const auto& stub = StubCode::CallStaticFunction();
400 __ BranchLinkWithEquivalence(stub,
target, entry_kind);
402 pending_deoptimization_env_);
403 AddStaticCallTarget(
target, entry_kind);
415 __ Comment(
"Edge counter");
416 __ LoadObject(
R0, edge_counters_array_);
425 const ICData& ic_data,
427 const InstructionSource&
source,
428 LocationSummary* locs,
440 __ LoadFromOffset(
R0,
SP, (ic_data.SizeWithoutTypeArgs() - 1) *
kWordSize);
448 const ICData& ic_data,
450 const InstructionSource&
source,
451 LocationSummary* locs,
455 entry_kind == Code::EntryKind::kUnchecked);
457 __ LoadFromOffset(
R0,
SP, (ic_data.SizeWithoutTypeArgs() - 1) *
kWordSize);
459 compiler::ObjectPoolBuilder& op =
__ object_pool_builder();
460 const intptr_t ic_data_index =
461 op.AddObject(ic_data, ObjectPool::Patchability::kPatchable);
462 const intptr_t stub_index =
463 op.AddObject(stub, ObjectPool::Patchability::kPatchable);
464 ASSERT((ic_data_index + 1) == stub_index);
466 const intptr_t entry_point_offset =
470 __ Call(compiler::FieldAddress(
CODE_REG, entry_point_offset));
472 pending_deoptimization_env_);
478 const Array& arguments_descriptor,
480 const InstructionSource&
source,
481 LocationSummary* locs) {
483 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
484 ASSERT(!FLAG_precompiled_mode);
485 const ArgumentsDescriptor args_desc(arguments_descriptor);
490 __ Comment(
"MegamorphicCall");
495 compiler::ObjectPoolBuilder& op =
__ object_pool_builder();
496 const intptr_t data_index =
497 op.AddObject(
cache, ObjectPool::Patchability::kPatchable);
498 const intptr_t stub_index = op.AddObject(
499 StubCode::MegamorphicCall(), ObjectPool::Patchability::kPatchable);
500 ASSERT((data_index + 1) == stub_index);
502 CLOBBERS_LR(
__ ldr(
LR, compiler::FieldAddress(
504 Code::EntryKind::kMonomorphic))));
505 CLOBBERS_LR(
__ blr(
LR));
523 const InstructionSource&
source,
524 LocationSummary* locs,
526 bool receiver_can_be_smi) {
528 ASSERT(ic_data.NumArgsTested() == 1);
529 const Code& initial_stub = StubCode::SwitchableCallMiss();
530 const char* switchable_call_mode =
"smiable";
531 if (!receiver_can_be_smi) {
532 switchable_call_mode =
"non-smi";
533 ic_data.set_receiver_cannot_be_smi(
true);
535 const UnlinkedCall&
data =
538 compiler::ObjectPoolBuilder& op =
__ object_pool_builder();
540 __ Comment(
"InstanceCallAOT (%s)", switchable_call_mode);
543 __ LoadImmediate(
R4, 0);
544 __ LoadFromOffset(
R0,
SP, (ic_data.SizeWithoutTypeArgs() - 1) *
kWordSize);
546 const auto snapshot_behavior =
550 const intptr_t data_index =
551 op.AddObject(
data, ObjectPool::Patchability::kPatchable);
552 const intptr_t initial_stub_index = op.AddObject(
553 initial_stub, ObjectPool::Patchability::kPatchable, snapshot_behavior);
554 ASSERT((data_index + 1) == initial_stub_index);
558 CLOBBERS_LR(
__ LoadDoubleWordFromPoolIndex(
R5,
LR, data_index));
559 CLOBBERS_LR(
__ blr(
LR));
562 locs, pending_deoptimization_env_);
566void FlowGraphCompiler::EmitUnoptimizedStaticCall(
567 intptr_t size_with_type_args,
569 const InstructionSource&
source,
570 LocationSummary* locs,
571 const ICData& ic_data,
576 __ LoadObject(
R5, ic_data);
578 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
584 const Array& arguments_descriptor,
585 intptr_t size_with_type_args,
587 const InstructionSource&
source,
588 LocationSummary* locs,
595 if (!FLAG_precompiled_mode) {
607 int32_t selector_offset,
608 const Array& arguments_descriptor) {
612 if (!arguments_descriptor.IsNull()) {
629 bool needs_number_check,
630 const InstructionSource&
source,
632 if (needs_number_check) {
633 ASSERT(!obj.IsMint() && !obj.IsDouble());
634 __ LoadObject(
TMP, obj);
635 __ PushPair(
TMP, reg);
638 __ BranchLink(StubCode::OptimizedIdenticalWithNumberCheck());
642 __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
651 __ CompareObject(reg, obj);
659 bool needs_number_check,
660 const InstructionSource&
source,
662 if (needs_number_check) {
663 __ PushPair(right, left);
665 __ BranchLink(StubCode::OptimizedIdenticalWithNumberCheck());
667 __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
671 __ PopPair(right, left);
673 __ CompareObjectRegisters(left, right);
681 __ Comment(
"BoolTest");
682 if (labels.true_label ==
nullptr || labels.false_label ==
nullptr) {
683 __ tsti(
value, compiler::Immediate(
687 const intptr_t bool_bit =
689 if (labels.fall_through == labels.false_label) {
691 __ tbnz(labels.true_label,
value, bool_bit);
693 __ tbz(labels.true_label,
value, bool_bit);
697 __ tbz(labels.false_label,
value, bool_bit);
699 __ tbnz(labels.false_label,
value, bool_bit);
701 if (labels.fall_through != labels.true_label) {
702 __ b(labels.true_label);
712 locs->CheckWritableInputs();
713 ClobberDeadTempRegisters(locs);
716 __ PushRegisters(*locs->live_registers());
720 __ PopRegisters(*locs->live_registers());
724void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
726 for (intptr_t
i = 0;
i < locs->temp_count(); ++
i) {
729 if (tmp.IsRegister() &&
730 !locs->live_registers()->ContainsRegister(tmp.reg())) {
731 __ movz(tmp.reg(), compiler::Immediate(0xf7), 0);
737Register FlowGraphCompiler::EmitTestCidRegister() {
741void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
742 intptr_t count_without_type_args,
743 const Array& arguments_descriptor) {
744 __ Comment(
"EmitTestAndCall");
750void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
753 __ BranchIfSmi(
R0, label);
755 __ BranchIfNotSmi(
R0, label);
759void FlowGraphCompiler::EmitTestAndCallLoadCid(
Register class_id_reg) {
761 __ LoadClassId(class_id_reg,
R0);
766 TemporaryRegisterAllocator* allocator) {
767 if (destination.Equals(
source))
return;
769 if (
source.IsRegister()) {
770 if (destination.IsRegister()) {
771 __ mov(destination.reg(),
source.reg());
773 ASSERT(destination.IsStackSlot());
774 const intptr_t dest_offset = destination.ToStackSlotOffset();
775 __ StoreToOffset(
source.reg(), destination.base_reg(), dest_offset);
777 }
else if (
source.IsStackSlot()) {
778 if (destination.IsRegister()) {
779 const intptr_t source_offset =
source.ToStackSlotOffset();
780 __ LoadFromOffset(destination.reg(),
source.base_reg(), source_offset);
781 }
else if (destination.IsFpuRegister()) {
782 const intptr_t src_offset =
source.ToStackSlotOffset();
784 __ LoadDFromOffset(
dst,
source.base_reg(), src_offset);
786 ASSERT(destination.IsStackSlot());
787 const intptr_t source_offset =
source.ToStackSlotOffset();
788 const intptr_t dest_offset = destination.ToStackSlotOffset();
789 Register tmp = allocator->AllocateTemporary();
790 __ LoadFromOffset(tmp,
source.base_reg(), source_offset);
791 __ StoreToOffset(tmp, destination.base_reg(), dest_offset);
792 allocator->ReleaseTemporary();
794 }
else if (
source.IsFpuRegister()) {
795 if (destination.IsFpuRegister()) {
796 __ vmov(destination.fpu_reg(),
source.fpu_reg());
798 if (destination.IsStackSlot() ||
799 destination.IsDoubleStackSlot()) {
800 const intptr_t dest_offset = destination.ToStackSlotOffset();
802 __ StoreDToOffset(
src, destination.base_reg(), dest_offset);
804 ASSERT(destination.IsQuadStackSlot());
805 const intptr_t dest_offset = destination.ToStackSlotOffset();
806 __ StoreQToOffset(
source.fpu_reg(), destination.base_reg(),
810 }
else if (
source.IsDoubleStackSlot()) {
811 if (destination.IsFpuRegister()) {
812 const intptr_t source_offset =
source.ToStackSlotOffset();
814 __ LoadDFromOffset(
dst,
source.base_reg(), source_offset);
816 ASSERT(destination.IsDoubleStackSlot() ||
817 destination.IsStackSlot() );
818 const intptr_t source_offset =
source.ToStackSlotOffset();
819 const intptr_t dest_offset = destination.ToStackSlotOffset();
820 __ LoadDFromOffset(
VTMP,
source.base_reg(), source_offset);
821 __ StoreDToOffset(
VTMP, destination.base_reg(), dest_offset);
823 }
else if (
source.IsQuadStackSlot()) {
824 if (destination.IsFpuRegister()) {
825 const intptr_t source_offset =
source.ToStackSlotOffset();
826 __ LoadQFromOffset(destination.fpu_reg(),
source.base_reg(),
829 ASSERT(destination.IsQuadStackSlot());
830 const intptr_t source_offset =
source.ToStackSlotOffset();
831 const intptr_t dest_offset = destination.ToStackSlotOffset();
832 __ LoadQFromOffset(
VTMP,
source.base_reg(), source_offset);
833 __ StoreQToOffset(
VTMP, destination.base_reg(), dest_offset);
837 if (destination.IsStackSlot()) {
838 Register tmp = allocator->AllocateTemporary();
839 source.constant_instruction()->EmitMoveToLocation(
this, destination, tmp);
840 allocator->ReleaseTemporary();
842 source.constant_instruction()->EmitMoveToLocation(
this, destination);
862void FlowGraphCompiler::EmitNativeMoveArchitecture(
863 const compiler::ffi::NativeLocation& destination,
864 const compiler::ffi::NativeLocation&
source) {
865 const auto& src_payload_type =
source.payload_type();
866 const auto& dst_payload_type = destination.payload_type();
867 const auto& src_container_type =
source.container_type();
868 const auto& dst_container_type = destination.container_type();
869 ASSERT(src_container_type.IsFloat() == dst_container_type.IsFloat());
870 ASSERT(src_container_type.IsInt() == dst_container_type.IsInt());
871 ASSERT(src_payload_type.IsSigned() == dst_payload_type.IsSigned());
872 ASSERT(src_payload_type.IsPrimitive());
873 ASSERT(dst_payload_type.IsPrimitive());
874 const intptr_t src_size = src_payload_type.SizeInBytes();
875 const intptr_t dst_size = dst_payload_type.SizeInBytes();
876 const bool sign_or_zero_extend = dst_size > src_size;
878 if (
source.IsRegisters()) {
881 const auto src_reg =
src.reg_at(0);
883 if (destination.IsRegisters()) {
884 const auto&
dst = destination.AsRegisters();
886 const auto dst_reg =
dst.reg_at(0);
887 ASSERT(destination.container_type().SizeInBytes() <= 8);
888 if (!sign_or_zero_extend) {
889 __ MoveRegister(dst_reg, src_reg);
891 if (src_payload_type.IsSigned()) {
898 }
else if (destination.IsFpuRegisters()) {
903 ASSERT(destination.IsStack());
904 const auto&
dst = destination.AsStack();
905 ASSERT(!sign_or_zero_extend);
907 BytesToOperandSize(destination.container_type().SizeInBytes());
908 __ StoreToOffset(
src.reg_at(0),
dst.base_register(),
909 dst.offset_in_bytes(), op_size);
912 }
else if (
source.IsFpuRegisters()) {
913 const auto&
src =
source.AsFpuRegisters();
915 ASSERT(src_payload_type.Equals(dst_payload_type));
917 if (destination.IsRegisters()) {
921 }
else if (destination.IsFpuRegisters()) {
922 const auto&
dst = destination.AsFpuRegisters();
923 __ vmov(
dst.fpu_reg(),
src.fpu_reg());
926 ASSERT(destination.IsStack());
927 ASSERT(src_payload_type.IsFloat());
928 const auto&
dst = destination.AsStack();
931 __ StoreDToOffset(
src.fpu_reg(),
dst.base_register(),
932 dst.offset_in_bytes());
935 __ StoreSToOffset(
src.fpu_reg(),
dst.base_register(),
936 dst.offset_in_bytes());
946 if (destination.IsRegisters()) {
947 const auto&
dst = destination.AsRegisters();
949 const auto dst_reg =
dst.reg_at(0);
950 EmitNativeLoad(dst_reg,
src.base_register(),
src.offset_in_bytes(),
951 src_payload_type.AsPrimitive().representation());
952 }
else if (destination.IsFpuRegisters()) {
953 ASSERT(src_payload_type.Equals(dst_payload_type));
954 ASSERT(src_payload_type.IsFloat());
955 const auto&
dst = destination.AsFpuRegisters();
958 __ LoadDFromOffset(
dst.fpu_reg(),
src.base_register(),
959 src.offset_in_bytes());
962 __ LoadSFromOffset(
dst.fpu_reg(),
src.base_register(),
963 src.offset_in_bytes());
970 ASSERT(destination.IsStack());
976void FlowGraphCompiler::EmitNativeLoad(
Register dst,
1058 compiler::Label skip_reloc;
1066 __ ldr(
dst, compiler::Address(tmp));
1070 __ add(tmp, tmp, compiler::Operand(
dst));
1074 __ ldr(
dst, compiler::Address(tmp));
1078#define __ compiler_->assembler()->
1080void ParallelMoveEmitter::EmitSwap(
const MoveOperands& move) {
1082 const Location destination = move.dest();
1084 if (
source.IsRegister() && destination.IsRegister()) {
1088 __ mov(
source.reg(), destination.reg());
1089 __ mov(destination.reg(),
TMP);
1090 }
else if (
source.IsRegister() && destination.IsStackSlot()) {
1091 Exchange(
source.reg(), destination.base_reg(),
1092 destination.ToStackSlotOffset());
1093 }
else if (
source.IsStackSlot() && destination.IsRegister()) {
1094 Exchange(destination.reg(),
source.base_reg(),
source.ToStackSlotOffset());
1095 }
else if (
source.IsStackSlot() && destination.IsStackSlot()) {
1096 Exchange(
source.base_reg(),
source.ToStackSlotOffset(),
1097 destination.base_reg(), destination.ToStackSlotOffset());
1098 }
else if (
source.IsFpuRegister() && destination.IsFpuRegister()) {
1104 }
else if (
source.IsFpuRegister() || destination.IsFpuRegister()) {
1105 ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() ||
1106 source.IsDoubleStackSlot() ||
source.IsQuadStackSlot());
1108 destination.IsDoubleStackSlot() ||
source.IsDoubleStackSlot();
1110 source.IsFpuRegister() ?
source.fpu_reg() : destination.fpu_reg();
1112 source.IsFpuRegister() ? destination.base_reg() :
source.base_reg();
1113 const intptr_t slot_offset =
source.IsFpuRegister()
1114 ? destination.ToStackSlotOffset()
1115 :
source.ToStackSlotOffset();
1118 __ LoadDFromOffset(
VTMP, base_reg, slot_offset);
1119 __ StoreDToOffset(reg, base_reg, slot_offset);
1122 __ LoadQFromOffset(
VTMP, base_reg, slot_offset);
1123 __ StoreQToOffset(reg, base_reg, slot_offset);
1126 }
else if (
source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1127 const intptr_t source_offset =
source.ToStackSlotOffset();
1128 const intptr_t dest_offset = destination.ToStackSlotOffset();
1131 VRegister scratch = ensure_scratch.reg();
1132 __ LoadDFromOffset(
VTMP,
source.base_reg(), source_offset);
1133 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset);
1134 __ StoreDToOffset(
VTMP, destination.base_reg(), dest_offset);
1135 __ StoreDToOffset(scratch,
source.base_reg(), source_offset);
1136 }
else if (
source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1137 const intptr_t source_offset =
source.ToStackSlotOffset();
1138 const intptr_t dest_offset = destination.ToStackSlotOffset();
1141 VRegister scratch = ensure_scratch.reg();
1142 __ LoadQFromOffset(
VTMP,
source.base_reg(), source_offset);
1143 __ LoadQFromOffset(scratch, destination.base_reg(), dest_offset);
1144 __ StoreQToOffset(
VTMP, destination.base_reg(), dest_offset);
1145 __ StoreQToOffset(scratch,
source.base_reg(), source_offset);
1151void ParallelMoveEmitter::MoveMemoryToMemory(
const compiler::Address&
dst,
1152 const compiler::Address&
src) {
1158void ParallelMoveEmitter::Exchange(
Register reg,
const compiler::Address& mem) {
1164void ParallelMoveEmitter::Exchange(
const compiler::Address& mem1,
1165 const compiler::Address& mem2) {
1169void ParallelMoveEmitter::Exchange(
Register reg,
1171 intptr_t stack_offset) {
1172 ScratchRegisterScope tmp(
this, reg);
1173 __ mov(tmp.reg(), reg);
1174 __ LoadFromOffset(reg, base_reg, stack_offset);
1175 __ StoreToOffset(tmp.reg(), base_reg, stack_offset);
1178void ParallelMoveEmitter::Exchange(
Register base_reg1,
1179 intptr_t stack_offset1,
1181 intptr_t stack_offset2) {
1183 ScratchRegisterScope tmp2(
this, tmp1.reg());
1184 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1);
1185 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2);
1186 __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2);
1187 __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1);
1190void ParallelMoveEmitter::SpillScratch(
Register reg) {
1194void ParallelMoveEmitter::RestoreScratch(
Register reg) {
1198void ParallelMoveEmitter::SpillFpuScratch(
FpuRegister reg) {
1202void ParallelMoveEmitter::RestoreFpuScratch(
FpuRegister reg) {
static intptr_t element_offset(intptr_t index)
static intptr_t owner_offset()
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
const char * Name() const
compiler::Label * entry_label()
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
intptr_t deopt_id() const
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
static intptr_t ToDeoptAfter(intptr_t deopt_id)
static constexpr intptr_t kOriginElement
IsolateGroup * isolate_group() const
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitTailCallToStub(const Code &stub)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
bool is_optimizing() const
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void InsertBSSRelocation(BSS::Relocation reloc)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
intptr_t StackSize() const
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EnterIntrinsicMode()
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
bool may_reoptimize() const
void GeneratePatchableCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void ArchSpecificInitialization()
bool CanOptimizeFunction() const
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
compiler::Assembler * assembler() const
bool intrinsic_mode() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
bool IsClosureFunction() const
bool IsOptimizable() const
ObjectStore * object_store() const
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
static Object & ZoneHandle()
const Function & function() const
int num_stack_locals() const
static intptr_t RawValue(intptr_t value)
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg)
void set_constant_pool_allowed(bool b)
void GenerateUnRelocatedPcRelativeCall(Condition cond=AL, intptr_t offset_into_target=0)
bool constant_pool_allowed() const
static word type_test_stub_entry_point_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
Dart_NativeFunction function
static constexpr intptr_t kWordSize
const FpuRegister kNoFpuRegister
constexpr intptr_t kBitsPerByte
const Register ARGS_DESC_REG
const Register DISPATCH_TABLE_REG
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const Register IC_DATA_REG
constexpr intptr_t kWordSize
static int8_t data[kExtLength]
DECLARE_FLAG(bool, show_invisible_frames)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
static constexpr Register kClassIdReg
intptr_t FrameSlotForVariable(const LocalVariable *variable) const
intptr_t FrameSlotForVariableIndex(intptr_t index) const
static constexpr intptr_t kBoolValueBitPosition
static constexpr intptr_t kBoolValueMask
static constexpr Register kScratchReg
static constexpr Register kSubtypeTestCacheReg
@ kResetToSwitchableCallMissEntryPoint