6#if defined(TARGET_ARCH_ARM64)
28DEFINE_FLAG(
bool, trap_on_deoptimization,
false,
"Trap on deoptimization.");
32 if (FLAG_precompiled_mode) {
37 if (CanPcRelativeCall(stub)) {
38 assembler_->generate_invoke_write_barrier_wrapper_ = [&](
Register reg) {
39 const intptr_t offset_into_target =
42 AddPcRelativeCallStubTarget(stub);
46 const auto& array_stub =
48 if (CanPcRelativeCall(stub)) {
49 assembler_->generate_invoke_array_write_barrier_ = [&]() {
51 AddPcRelativeCallStubTarget(array_stub);
60 for (
int i = 0; i < block_info_.
length(); ++i) {
61 ASSERT(!block_info_[i]->jump_label()->IsLinked());
70 return FLAG_enable_simd_inline;
79 intrinsic_mode_ =
true;
85 intrinsic_mode_ =
false;
89 DeoptInfoBuilder* builder,
90 const Array& deopt_table) {
91 if (deopt_env_ ==
nullptr) {
92 ++
builder->current_info_number_;
96 AllocateOutgoingArguments(deopt_env_);
99 Environment* current = deopt_env_;
103 EmitMaterializations(deopt_env_, builder);
110 builder->AddPp(current->function(), slot_ix++);
112 builder->AddCallerFp(slot_ix++);
118 slot_ix =
builder->EmitMaterializationArguments(slot_ix);
121 for (intptr_t i = current->Length() - 1;
122 i >= current->fixed_parameter_count(); i--) {
123 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
126 Environment* previous = current;
127 current = current->outer();
128 while (current !=
nullptr) {
129 builder->AddPp(current->function(), slot_ix++);
130 builder->AddPcMarker(previous->function(), slot_ix++);
131 builder->AddCallerFp(slot_ix++);
135 builder->AddReturnAddress(current->function(),
141 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
142 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
147 for (intptr_t i = current->Length() - 1;
148 i >= current->fixed_parameter_count(); i--) {
149 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
154 current = current->outer();
157 ASSERT(previous !=
nullptr);
160 builder->AddCallerPp(slot_ix++);
161 builder->AddPcMarker(previous->function(), slot_ix++);
162 builder->AddCallerFp(slot_ix++);
163 builder->AddCallerPc(slot_ix++);
166 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
167 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
170 return builder->CreateDeoptInfo(deopt_table);
177 compiler::Assembler* assembler =
compiler->assembler();
178#define __ assembler->
181 if (FLAG_trap_on_deoptimization) {
186 __ Call(compiler::Address(
THR, Thread::deoptimize_entry_offset()));
191#define __ assembler->
196 intptr_t sub_type_cache_index) {
199 compiler::FieldAddress(
201 compiler::target::AbstractType::type_test_stub_entry_point_offset()));
203 sub_type_cache_index);
208#define __ assembler()->
212void FlowGraphCompiler::GenerateBoolToJump(
Register bool_register,
213 compiler::Label* is_true,
214 compiler::Label* is_false) {
215 compiler::Label fall_through;
216 __ CompareObject(bool_register, Object::null_object());
217 __ b(&fall_through,
EQ);
218 BranchLabels labels = {is_true, is_false, &fall_through};
222 __ Bind(&fall_through);
225void FlowGraphCompiler::EmitFrameEntry() {
229 __ Comment(
"Invocation Count Check");
234 __ LoadFieldFromOffset(
R7, function_reg, Function::usage_counter_offset(),
239 __ add(
R7,
R7, compiler::Operand(1));
240 __ StoreFieldToOffset(
R7, function_reg, Function::usage_counter_offset(),
243 __ CompareImmediate(
R7, GetOptimizationThreshold());
245 compiler::Label dont_optimize;
246 __ b(&dont_optimize,
LT);
247 __ ldr(
TMP, compiler::Address(
THR, Thread::optimize_entry_offset()));
249 __ Bind(&dont_optimize);
252 if (
flow_graph().graph_entry()->NeedsFrame()) {
253 __ Comment(
"Enter frame");
262 }
else if (FLAG_precompiled_mode) {
267const InstructionSource& PrologueSource() {
268 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
270 return prologue_source;
283 intptr_t args_desc_slot = -1;
285 args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
289 __ Comment(
"Initialize spill slots");
290 for (intptr_t i = 0; i < num_locals; ++i) {
291 const intptr_t slot_index =
292 compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
302 const intptr_t slot_index =
303 compiler::target::frame_layout.FrameSlotForVariable(
315 if (CanPcRelativeCall(stub)) {
316 __ GenerateUnRelocatedPcRelativeCall();
317 AddPcRelativeCallStubTarget(stub);
327 if (CanPcRelativeCall(stub)) {
328 __ GenerateUnRelocatedPcRelativeTailCall();
329 AddPcRelativeTailCallStubTarget(stub);
332 __ ldr(
TMP, compiler::FieldAddress(
333 CODE_REG, compiler::target::Code::entry_point_offset()));
341 if (CanPcRelativeCall(stub)) {
342 if (
flow_graph().graph_entry()->NeedsFrame()) {
345 __ GenerateUnRelocatedPcRelativeTailCall();
346 AddPcRelativeTailCallStubTarget(stub);
352 if (
flow_graph().graph_entry()->NeedsFrame()) {
355 __ ldr(
TMP, compiler::FieldAddress(
356 CODE_REG, compiler::target::Code::entry_point_offset()));
363 const InstructionSource&
source,
366 LocationSummary* locs,
370 pending_deoptimization_env_);
374 const InstructionSource&
source,
377 LocationSummary* locs,
380 __ BranchLinkPatchable(stub, entry_kind);
382 pending_deoptimization_env_);
386 const InstructionSource&
source,
388 LocationSummary* locs,
392 if (CanPcRelativeCall(
target)) {
393 __ GenerateUnRelocatedPcRelativeCall();
394 AddPcRelativeCallTarget(
target, entry_kind);
396 pending_deoptimization_env_);
403 const auto& stub = StubCode::CallStaticFunction();
404 __ BranchLinkWithEquivalence(stub,
target, entry_kind);
406 pending_deoptimization_env_);
407 AddStaticCallTarget(
target, entry_kind);
419 __ Comment(
"Edge counter");
420 __ LoadObject(
R0, edge_counters_array_);
429 const ICData& ic_data,
431 const InstructionSource&
source,
432 LocationSummary* locs,
444 __ LoadFromOffset(
R0,
SP, (ic_data.SizeWithoutTypeArgs() - 1) *
kWordSize);
452 const ICData& ic_data,
454 const InstructionSource&
source,
455 LocationSummary* locs,
458 ASSERT(entry_kind == Code::EntryKind::kNormal ||
459 entry_kind == Code::EntryKind::kUnchecked);
461 __ LoadFromOffset(
R0,
SP, (ic_data.SizeWithoutTypeArgs() - 1) *
kWordSize);
463 compiler::ObjectPoolBuilder& op =
__ object_pool_builder();
464 const intptr_t ic_data_index =
465 op.AddObject(ic_data, ObjectPool::Patchability::kPatchable);
466 const intptr_t stub_index =
467 op.AddObject(stub, ObjectPool::Patchability::kPatchable);
468 ASSERT((ic_data_index + 1) == stub_index);
470 const intptr_t entry_point_offset =
471 entry_kind == Code::EntryKind::kNormal
474 __ Call(compiler::FieldAddress(
CODE_REG, entry_point_offset));
476 pending_deoptimization_env_);
482 const Array& arguments_descriptor,
484 const InstructionSource&
source,
485 LocationSummary* locs) {
487 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
488 ASSERT(!FLAG_precompiled_mode);
489 const ArgumentsDescriptor args_desc(arguments_descriptor);
494 __ Comment(
"MegamorphicCall");
499 compiler::ObjectPoolBuilder& op =
__ object_pool_builder();
500 const intptr_t data_index =
501 op.AddObject(cache, ObjectPool::Patchability::kPatchable);
502 const intptr_t stub_index = op.AddObject(
503 StubCode::MegamorphicCall(), ObjectPool::Patchability::kPatchable);
504 ASSERT((data_index + 1) == stub_index);
506 CLOBBERS_LR(
__ ldr(
LR, compiler::FieldAddress(
508 Code::EntryKind::kMonomorphic))));
509 CLOBBERS_LR(
__ blr(
LR));
527 const InstructionSource&
source,
528 LocationSummary* locs,
530 bool receiver_can_be_smi) {
532 ASSERT(ic_data.NumArgsTested() == 1);
533 const Code& initial_stub = StubCode::SwitchableCallMiss();
534 const char* switchable_call_mode =
"smiable";
535 if (!receiver_can_be_smi) {
536 switchable_call_mode =
"non-smi";
537 ic_data.set_receiver_cannot_be_smi(
true);
539 const UnlinkedCall&
data =
542 compiler::ObjectPoolBuilder& op =
__ object_pool_builder();
544 __ Comment(
"InstanceCallAOT (%s)", switchable_call_mode);
547 __ LoadImmediate(
R4, 0);
548 __ LoadFromOffset(
R0,
SP, (ic_data.SizeWithoutTypeArgs() - 1) *
kWordSize);
550 const auto snapshot_behavior =
554 const intptr_t data_index =
555 op.AddObject(
data, ObjectPool::Patchability::kPatchable);
556 const intptr_t initial_stub_index = op.AddObject(
557 initial_stub, ObjectPool::Patchability::kPatchable, snapshot_behavior);
558 ASSERT((data_index + 1) == initial_stub_index);
560 if (FLAG_precompiled_mode) {
563 CLOBBERS_LR(
__ LoadDoubleWordFromPoolIndex(
R5,
LR, data_index));
565 __ LoadDoubleWordFromPoolIndex(
R5,
CODE_REG, data_index);
566 const intptr_t entry_point_offset =
567 entry_kind == Code::EntryKind::kNormal
568 ? compiler::target::Code::entry_point_offset(
569 Code::EntryKind::kMonomorphic)
573 __ ldr(
LR, compiler::FieldAddress(
CODE_REG, entry_point_offset)));
575 CLOBBERS_LR(
__ blr(
LR));
578 locs, pending_deoptimization_env_);
582void FlowGraphCompiler::EmitUnoptimizedStaticCall(
583 intptr_t size_with_type_args,
585 const InstructionSource&
source,
586 LocationSummary* locs,
587 const ICData& ic_data,
592 __ LoadObject(
R5, ic_data);
594 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
600 const Array& arguments_descriptor,
601 intptr_t size_with_type_args,
603 const InstructionSource&
source,
604 LocationSummary* locs,
611 if (!FLAG_precompiled_mode) {
623 int32_t selector_offset,
624 const Array& arguments_descriptor) {
628 if (!arguments_descriptor.IsNull()) {
645 bool needs_number_check,
646 const InstructionSource&
source,
648 if (needs_number_check) {
649 ASSERT(!obj.IsMint() && !obj.IsDouble());
650 __ LoadObject(
TMP, obj);
651 __ PushPair(
TMP, reg);
654 __ BranchLink(StubCode::OptimizedIdenticalWithNumberCheck());
658 __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
667 __ CompareObject(reg, obj);
675 bool needs_number_check,
676 const InstructionSource&
source,
678 if (needs_number_check) {
681 __ BranchLink(StubCode::OptimizedIdenticalWithNumberCheck());
683 __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
697 __ Comment(
"BoolTest");
698 if (labels.true_label ==
nullptr || labels.false_label ==
nullptr) {
699 __ tsti(value, compiler::Immediate(
703 const intptr_t bool_bit =
705 if (labels.fall_through == labels.false_label) {
707 __ tbnz(labels.true_label, value, bool_bit);
709 __ tbz(labels.true_label, value, bool_bit);
713 __ tbz(labels.false_label, value, bool_bit);
715 __ tbnz(labels.false_label, value, bool_bit);
717 if (labels.fall_through != labels.true_label) {
718 __ b(labels.true_label);
728 locs->CheckWritableInputs();
729 ClobberDeadTempRegisters(locs);
732 __ PushRegisters(*locs->live_registers());
736 __ PopRegisters(*locs->live_registers());
740void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
742 for (intptr_t i = 0; i < locs->temp_count(); ++i) {
745 if (tmp.IsRegister() &&
746 !locs->live_registers()->ContainsRegister(tmp.reg())) {
747 __ movz(tmp.reg(), compiler::Immediate(0xf7), 0);
753Register FlowGraphCompiler::EmitTestCidRegister() {
757void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
758 intptr_t count_without_type_args,
759 const Array& arguments_descriptor) {
760 __ Comment(
"EmitTestAndCall");
766void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
769 __ BranchIfSmi(
R0, label);
771 __ BranchIfNotSmi(
R0, label);
775void FlowGraphCompiler::EmitTestAndCallLoadCid(
Register class_id_reg) {
777 __ LoadClassId(class_id_reg,
R0);
782 TemporaryRegisterAllocator* allocator) {
783 if (destination.Equals(
source))
return;
785 if (
source.IsRegister()) {
786 if (destination.IsRegister()) {
787 __ mov(destination.reg(),
source.reg());
789 ASSERT(destination.IsStackSlot());
790 const intptr_t dest_offset = destination.ToStackSlotOffset();
791 __ StoreToOffset(
source.reg(), destination.base_reg(), dest_offset);
793 }
else if (
source.IsStackSlot()) {
794 if (destination.IsRegister()) {
795 const intptr_t source_offset =
source.ToStackSlotOffset();
796 __ LoadFromOffset(destination.reg(),
source.base_reg(), source_offset);
797 }
else if (destination.IsFpuRegister()) {
798 const intptr_t src_offset =
source.ToStackSlotOffset();
800 __ LoadDFromOffset(dst,
source.base_reg(), src_offset);
802 ASSERT(destination.IsStackSlot());
803 const intptr_t source_offset =
source.ToStackSlotOffset();
804 const intptr_t dest_offset = destination.ToStackSlotOffset();
805 Register tmp = allocator->AllocateTemporary();
806 __ LoadFromOffset(tmp,
source.base_reg(), source_offset);
807 __ StoreToOffset(tmp, destination.base_reg(), dest_offset);
808 allocator->ReleaseTemporary();
810 }
else if (
source.IsFpuRegister()) {
811 if (destination.IsFpuRegister()) {
812 __ vmov(destination.fpu_reg(),
source.fpu_reg());
814 if (destination.IsStackSlot() ||
815 destination.IsDoubleStackSlot()) {
816 const intptr_t dest_offset = destination.ToStackSlotOffset();
818 __ StoreDToOffset(src, destination.base_reg(), dest_offset);
820 ASSERT(destination.IsQuadStackSlot());
821 const intptr_t dest_offset = destination.ToStackSlotOffset();
822 __ StoreQToOffset(
source.fpu_reg(), destination.base_reg(),
826 }
else if (
source.IsDoubleStackSlot()) {
827 if (destination.IsFpuRegister()) {
828 const intptr_t source_offset =
source.ToStackSlotOffset();
830 __ LoadDFromOffset(dst,
source.base_reg(), source_offset);
832 ASSERT(destination.IsDoubleStackSlot() ||
833 destination.IsStackSlot() );
834 const intptr_t source_offset =
source.ToStackSlotOffset();
835 const intptr_t dest_offset = destination.ToStackSlotOffset();
836 __ LoadDFromOffset(
VTMP,
source.base_reg(), source_offset);
837 __ StoreDToOffset(
VTMP, destination.base_reg(), dest_offset);
839 }
else if (
source.IsQuadStackSlot()) {
840 if (destination.IsFpuRegister()) {
841 const intptr_t source_offset =
source.ToStackSlotOffset();
842 __ LoadQFromOffset(destination.fpu_reg(),
source.base_reg(),
845 ASSERT(destination.IsQuadStackSlot());
846 const intptr_t source_offset =
source.ToStackSlotOffset();
847 const intptr_t dest_offset = destination.ToStackSlotOffset();
848 __ LoadQFromOffset(
VTMP,
source.base_reg(), source_offset);
849 __ StoreQToOffset(
VTMP, destination.base_reg(), dest_offset);
853 if (destination.IsStackSlot()) {
854 Register tmp = allocator->AllocateTemporary();
855 source.constant_instruction()->EmitMoveToLocation(
this, destination, tmp);
856 allocator->ReleaseTemporary();
858 source.constant_instruction()->EmitMoveToLocation(
this, destination);
878void FlowGraphCompiler::EmitNativeMoveArchitecture(
879 const compiler::ffi::NativeLocation& destination,
880 const compiler::ffi::NativeLocation&
source) {
881 const auto& src_payload_type =
source.payload_type();
882 const auto& dst_payload_type = destination.payload_type();
883 const auto& src_container_type =
source.container_type();
884 const auto& dst_container_type = destination.container_type();
885 ASSERT(src_container_type.IsFloat() == dst_container_type.IsFloat());
886 ASSERT(src_container_type.IsInt() == dst_container_type.IsInt());
887 ASSERT(src_payload_type.IsSigned() == dst_payload_type.IsSigned());
888 ASSERT(src_payload_type.IsPrimitive());
889 ASSERT(dst_payload_type.IsPrimitive());
890 const intptr_t src_size = src_payload_type.SizeInBytes();
891 const intptr_t dst_size = dst_payload_type.SizeInBytes();
892 const bool sign_or_zero_extend = dst_size > src_size;
894 if (
source.IsRegisters()) {
897 const auto src_reg =
src.reg_at(0);
899 if (destination.IsRegisters()) {
900 const auto&
dst = destination.AsRegisters();
902 const auto dst_reg =
dst.reg_at(0);
903 ASSERT(destination.container_type().SizeInBytes() <= 8);
904 if (!sign_or_zero_extend) {
905 __ MoveRegister(dst_reg, src_reg);
907 if (src_payload_type.IsSigned()) {
914 }
else if (destination.IsFpuRegisters()) {
919 ASSERT(destination.IsStack());
920 const auto&
dst = destination.AsStack();
921 ASSERT(!sign_or_zero_extend);
923 BytesToOperandSize(destination.container_type().SizeInBytes());
924 __ StoreToOffset(
src.reg_at(0),
dst.base_register(),
925 dst.offset_in_bytes(), op_size);
928 }
else if (
source.IsFpuRegisters()) {
929 const auto&
src =
source.AsFpuRegisters();
931 ASSERT(src_payload_type.Equals(dst_payload_type));
933 if (destination.IsRegisters()) {
937 }
else if (destination.IsFpuRegisters()) {
938 const auto&
dst = destination.AsFpuRegisters();
939 __ vmov(
dst.fpu_reg(),
src.fpu_reg());
942 ASSERT(destination.IsStack());
943 ASSERT(src_payload_type.IsFloat());
944 const auto&
dst = destination.AsStack();
947 __ StoreDToOffset(
src.fpu_reg(),
dst.base_register(),
948 dst.offset_in_bytes());
951 __ StoreSToOffset(
src.fpu_reg(),
dst.base_register(),
952 dst.offset_in_bytes());
962 if (destination.IsRegisters()) {
963 const auto&
dst = destination.AsRegisters();
965 const auto dst_reg =
dst.reg_at(0);
966 EmitNativeLoad(dst_reg,
src.base_register(),
src.offset_in_bytes(),
967 src_payload_type.AsPrimitive().representation());
968 }
else if (destination.IsFpuRegisters()) {
969 ASSERT(src_payload_type.Equals(dst_payload_type));
970 ASSERT(src_payload_type.IsFloat());
971 const auto&
dst = destination.AsFpuRegisters();
974 __ LoadDFromOffset(
dst.fpu_reg(),
src.base_register(),
975 src.offset_in_bytes());
978 __ LoadSFromOffset(
dst.fpu_reg(),
src.base_register(),
979 src.offset_in_bytes());
986 ASSERT(destination.IsStack());
992void FlowGraphCompiler::EmitNativeLoad(
Register dst,
1025 __ orr(dst, dst, compiler::Operand(
TMP,
LSL, 16));
1030 __ orr(dst, dst, compiler::Operand(
TMP,
LSL, 16));
1035 __ orr(dst, dst, compiler::Operand(
TMP,
LSL, 32));
1040 __ orr(dst, dst, compiler::Operand(
TMP,
LSL, 32));
1045 __ orr(dst, dst, compiler::Operand(
TMP,
LSL, 32));
1050 __ orr(dst, dst, compiler::Operand(
TMP,
LSL, 32));
1055 __ orr(dst, dst, compiler::Operand(
TMP,
LSL, 32));
1057 __ orr(dst, dst, compiler::Operand(
TMP,
LSL, 48));
1062 __ orr(dst, dst, compiler::Operand(
TMP,
LSL, 32));
1064 __ orr(dst, dst, compiler::Operand(
TMP,
LSL, 48));
1074 compiler::Label skip_reloc;
1077 __ Bind(&skip_reloc);
1079 __ adr(tmp, compiler::Immediate(-compiler::target::kWordSize));
1082 __ ldr(dst, compiler::Address(tmp));
1086 __ add(tmp, tmp, compiler::Operand(dst));
1090 __ ldr(dst, compiler::Address(tmp));
1094#define __ compiler_->assembler()->
1096void ParallelMoveEmitter::EmitSwap(
const MoveOperands& move) {
1098 const Location destination = move.dest();
1100 if (
source.IsRegister() && destination.IsRegister()) {
1104 __ mov(
source.reg(), destination.reg());
1105 __ mov(destination.reg(),
TMP);
1106 }
else if (
source.IsRegister() && destination.IsStackSlot()) {
1107 Exchange(
source.reg(), destination.base_reg(),
1108 destination.ToStackSlotOffset());
1109 }
else if (
source.IsStackSlot() && destination.IsRegister()) {
1110 Exchange(destination.reg(),
source.base_reg(),
source.ToStackSlotOffset());
1111 }
else if (
source.IsStackSlot() && destination.IsStackSlot()) {
1112 Exchange(
source.base_reg(),
source.ToStackSlotOffset(),
1113 destination.base_reg(), destination.ToStackSlotOffset());
1114 }
else if (
source.IsFpuRegister() && destination.IsFpuRegister()) {
1120 }
else if (
source.IsFpuRegister() || destination.IsFpuRegister()) {
1121 ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() ||
1122 source.IsDoubleStackSlot() ||
source.IsQuadStackSlot());
1124 destination.IsDoubleStackSlot() ||
source.IsDoubleStackSlot();
1126 source.IsFpuRegister() ?
source.fpu_reg() : destination.fpu_reg();
1128 source.IsFpuRegister() ? destination.base_reg() :
source.base_reg();
1129 const intptr_t slot_offset =
source.IsFpuRegister()
1130 ? destination.ToStackSlotOffset()
1131 :
source.ToStackSlotOffset();
1134 __ LoadDFromOffset(
VTMP, base_reg, slot_offset);
1135 __ StoreDToOffset(reg, base_reg, slot_offset);
1138 __ LoadQFromOffset(
VTMP, base_reg, slot_offset);
1139 __ StoreQToOffset(reg, base_reg, slot_offset);
1142 }
else if (
source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1143 const intptr_t source_offset =
source.ToStackSlotOffset();
1144 const intptr_t dest_offset = destination.ToStackSlotOffset();
1147 VRegister scratch = ensure_scratch.reg();
1148 __ LoadDFromOffset(
VTMP,
source.base_reg(), source_offset);
1149 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset);
1150 __ StoreDToOffset(
VTMP, destination.base_reg(), dest_offset);
1151 __ StoreDToOffset(scratch,
source.base_reg(), source_offset);
1152 }
else if (
source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1153 const intptr_t source_offset =
source.ToStackSlotOffset();
1154 const intptr_t dest_offset = destination.ToStackSlotOffset();
1157 VRegister scratch = ensure_scratch.reg();
1158 __ LoadQFromOffset(
VTMP,
source.base_reg(), source_offset);
1159 __ LoadQFromOffset(scratch, destination.base_reg(), dest_offset);
1160 __ StoreQToOffset(
VTMP, destination.base_reg(), dest_offset);
1161 __ StoreQToOffset(scratch,
source.base_reg(), source_offset);
1167void ParallelMoveEmitter::MoveMemoryToMemory(
const compiler::Address& dst,
1168 const compiler::Address& src) {
1174void ParallelMoveEmitter::Exchange(
Register reg,
const compiler::Address& mem) {
1180void ParallelMoveEmitter::Exchange(
const compiler::Address& mem1,
1181 const compiler::Address& mem2) {
1185void ParallelMoveEmitter::Exchange(
Register reg,
1187 intptr_t stack_offset) {
1188 ScratchRegisterScope tmp(
this, reg);
1189 __ mov(tmp.reg(), reg);
1190 __ LoadFromOffset(reg, base_reg, stack_offset);
1191 __ StoreToOffset(tmp.reg(), base_reg, stack_offset);
1194void ParallelMoveEmitter::Exchange(
Register base_reg1,
1195 intptr_t stack_offset1,
1197 intptr_t stack_offset2) {
1199 ScratchRegisterScope tmp2(
this, tmp1.reg());
1200 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1);
1201 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2);
1202 __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2);
1203 __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1);
1206void ParallelMoveEmitter::SpillScratch(
Register reg) {
1210void ParallelMoveEmitter::RestoreScratch(
Register reg) {
1214void ParallelMoveEmitter::SpillFpuScratch(
FpuRegister reg) {
1218void ParallelMoveEmitter::RestoreFpuScratch(
FpuRegister reg) {
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
static intptr_t element_offset(intptr_t index)
static intptr_t owner_offset()
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
const char * Name() const
compiler::Label * entry_label()
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
intptr_t deopt_id() const
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
static intptr_t ToDeoptAfter(intptr_t deopt_id)
static constexpr intptr_t kOriginElement
IsolateGroup * isolate_group() const
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitTailCallToStub(const Code &stub)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
bool is_optimizing() const
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void InsertBSSRelocation(BSS::Relocation reloc)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
intptr_t StackSize() const
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EnterIntrinsicMode()
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static bool SupportsUnboxedDoubles()
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
bool may_reoptimize() const
void GeneratePatchableCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void ArchSpecificInitialization()
bool CanOptimizeFunction() const
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
compiler::Assembler * assembler() const
bool intrinsic_mode() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
bool IsClosureFunction() const
bool IsOptimizable() const
ObjectStore * object_store() const
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
static Object & ZoneHandle()
const Function & function() const
int num_stack_locals() const
static intptr_t RawValue(intptr_t value)
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg)
void set_constant_pool_allowed(bool b)
void GenerateUnRelocatedPcRelativeCall(Condition cond=AL, intptr_t offset_into_target=0)
bool constant_pool_allowed() const
#define DECLARE_FLAG(type, name)
#define DEFINE_FLAG(type, name, default_value, comment)
Dart_NativeFunction function
const FpuRegister kNoFpuRegister
constexpr intptr_t kBitsPerByte
const Register ARGS_DESC_REG
const Register DISPATCH_TABLE_REG
const Register IC_DATA_REG
constexpr intptr_t kWordSize
static int8_t data[kExtLength]
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
static constexpr Register kClassIdReg
static constexpr intptr_t kBoolValueBitPosition
static constexpr intptr_t kBoolValueMask
static constexpr Register kScratchReg
static constexpr Register kSubtypeTestCacheReg
@ kResetToSwitchableCallMissEntryPoint