42 trace_inlining_intervals,
44 "Inlining interval diagnostics");
46DEFINE_FLAG(
bool, enable_peephole,
true,
"Enable peephole optimization");
51 "Enable inlining of SIMD related method calls.");
53 min_optimization_counter_threshold,
55 "The minimum invocation count for a function.");
57 optimization_counter_scale,
59 "The scale of invocation count, by size of the function.");
60DEFINE_FLAG(
bool, source_lines,
false,
"Emit source line as assembly comment.");
64 "Do not emit PC relative calls.");
78 "Align all loop headers to 32 byte boundary");
80#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
81compiler::LRState ComputeInnerLRState(
const FlowGraph& flow_graph) {
85 bool has_native_entries =
false;
86 for (intptr_t i = 0; i < entry->SuccessorCount(); i++) {
87 if (entry->SuccessorAt(i)->IsNativeEntry()) {
88 has_native_entries =
true;
93 auto state = compiler::LRState::OnEntry();
94 if (has_native_entries) {
112void CompilerDeoptInfo::AllocateOutgoingArguments(Environment*
env) {
113 if (
env ==
nullptr)
return;
114 for (Environment::ShallowIterator it(
env); !it.Done(); it.Advance()) {
115 if (it.CurrentLocation().IsInvalid()) {
116 if (
auto move_arg = it.CurrentValue()->definition()->AsMoveArgument()) {
117 it.SetCurrentLocation(move_arg->locs()->out(0));
123void CompilerDeoptInfo::EmitMaterializations(Environment*
env,
124 DeoptInfoBuilder* builder) {
125 for (Environment::DeepIterator it(
env); !it.Done(); it.Advance()) {
126 if (it.CurrentLocation().IsInvalid()) {
127 MaterializeObjectInstr* mat =
128 it.CurrentValue()->definition()->AsMaterializeObject();
130 builder->AddMaterialization(mat);
146 : thread_(
Thread::Current()),
147 zone_(
Thread::Current()->zone()),
148 assembler_(assembler),
149 parsed_function_(parsed_function),
150 flow_graph_(*flow_graph),
151 block_order_(*flow_graph->CodegenBlockOrder()),
152 current_block_(nullptr),
153 exception_handlers_list_(nullptr),
154 pc_descriptors_list_(nullptr),
155 compressed_stackmaps_builder_(nullptr),
156 code_source_map_builder_(nullptr),
157 catch_entry_moves_maps_builder_(nullptr),
158 block_info_(block_order_.
length()),
160 static_calls_target_table_(),
162 is_optimizing_(is_optimizing),
163 speculative_policy_(speculative_policy),
164 may_reoptimize_(
false),
165 intrinsic_mode_(
false),
168 Class::ZoneHandle(isolate_group()->object_store()->double_class())),
170 Class::ZoneHandle(isolate_group()->object_store()->mint_class())),
171 float32x4_class_(
Class::ZoneHandle(
172 isolate_group()->object_store()->float32x4_class())),
173 float64x2_class_(
Class::ZoneHandle(
174 isolate_group()->object_store()->float64x2_class())),
176 Class::ZoneHandle(isolate_group()->object_store()->int32x4_class())),
179 pending_deoptimization_env_(nullptr),
180 deopt_id_to_ic_data_(deopt_id_to_ic_data),
181 edge_counters_array_(
Array::ZoneHandle()) {
186 deopt_id_to_ic_data_ =
nullptr;
189 deopt_id_to_ic_data_->EnsureLength(len,
nullptr);
195 const bool stack_traces_only =
true;
197 const bool stack_traces_only =
false;
201 ASSERT(inline_id_to_function[0]->ptr() ==
203 code_source_map_builder_ =
new (zone_)
205 inline_id_to_token_pos, inline_id_to_function);
211 compressed_stackmaps_builder_ =
215 exception_handlers_list_ =
217#if defined(DART_PRECOMPILER)
225 for (
int i = 0; i < block_order_.length(); ++i) {
226 block_info_.
Add(
new (
zone()) BlockInfo());
231 if (
auto* branch = current->AsBranch()) {
232 current = branch->comparison();
234 if (
auto* instance_call = current->AsInstanceCall()) {
235 const ICData* ic_data = instance_call->ic_data();
237 may_reoptimize_ =
true;
246 const intptr_t num_counters = flow_graph_.
preorder().length();
247 const Array& edge_counters =
249 for (intptr_t i = 0; i < num_counters; ++i) {
250 edge_counters.
SetAt(i, Object::smi_zero());
252 edge_counters_array_ = edge_counters.
ptr();
278 if (FLAG_stacktrace_every > 0 || FLAG_deoptimize_every > 0 ||
280 (
isolate_group()->reload_every_n_stack_overflow_checks() > 0)) {
285 if (FLAG_stacktrace_filter !=
nullptr &&
287 FLAG_stacktrace_filter) !=
nullptr) {
292 FLAG_deoptimize_filter) !=
nullptr) {
302 return !block->IsGraphEntry() && !block->IsFunctionEntry() &&
303 !block->IsCatchBlockEntry() && !block->IsOsrEntry() &&
305 block->next()->IsGoto() &&
321 BlockEntryInstr*
target = block->next()->AsGoto()->successor();
327void FlowGraphCompiler::CompactBlocks() {
331 compiler::Label* nonempty_label =
nullptr;
344 BlockInfo*
block_info = block_info_[block->postorder_number()];
345 block_info->set_next_nonempty_label(nonempty_label);
352 block_info->set_next_nonempty_label(nonempty_label);
355#if defined(DART_PRECOMPILER)
356static intptr_t LocationToStackIndex(
const Location& src) {
358 return -compiler::target::frame_layout.VariableIndexForFrameSlot(
362static CatchEntryMove CatchEntryMoveFor(compiler::Assembler* assembler,
365 intptr_t dst_index) {
366 if (
src.IsConstant()) {
368 if (
src.constant().ptr() == Object::optimized_out().ptr()) {
369 return CatchEntryMove();
371 const intptr_t pool_index =
372 assembler->object_pool_builder().FindObject(
src.constant());
374 pool_index, dst_index);
377 if (
src.IsPairLocation()) {
378 const auto lo_loc =
src.AsPairLocation()->At(0);
379 const auto hi_loc =
src.AsPairLocation()->At(1);
380 ASSERT(lo_loc.IsStackSlot() && hi_loc.IsStackSlot());
384 LocationToStackIndex(hi_loc)),
408 case kUnboxedFloat32x4:
411 case kUnboxedFloat64x2:
414 case kUnboxedInt32x4:
428#if defined(DART_PRECOMPILER)
441 auto param = (*idefs)[i]->AsParameter();
444 if (param ==
nullptr)
continue;
449 if (dst.IsRegister())
continue;
456 env->ValueAt(i)->definition()->representation();
457 const auto move = CatchEntryMoveFor(
assembler(), src_type, src,
458 LocationToStackIndex(dst));
459 if (!move.IsRedundant()) {
460 catch_entry_moves_maps_builder_->
Append(move);
464 catch_entry_moves_maps_builder_->
EndMapping();
480 if (
env !=
nullptr) {
486 const intptr_t dest_deopt_id =
env->LazyDeoptToBeforeDeoptId()
502 intptr_t yield_index) {
507void FlowGraphCompiler::EmitInstructionPrologue(
Instruction* instr) {
516 AllocateRegistersLocally(instr);
520#define __ assembler()->
522void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
526 Definition* defn = instr->AsDefinition();
527 if (defn !=
nullptr && defn->HasTemp()) {
529 if (
value.IsRegister()) {
531 }
else if (
value.IsFpuRegister()) {
533 switch (instr->representation()) {
535 stub = &StubCode::BoxDouble();
537 case kUnboxedFloat32x4:
538 stub = &StubCode::BoxFloat32x4();
540 case kUnboxedFloat64x2:
541 stub = &StubCode::BoxFloat64x2();
550 instr->locs()->live_registers()->Clear();
551 if (instr->representation() == kUnboxedDouble) {
558 *stub, UntaggedPcDescriptors::kOther, instr->locs());
560 }
else if (
value.IsConstant()) {
561 __ PushObject(
value.constant());
564 __ PushValueAtOffset(
value.base_reg(),
value.ToStackSlotOffset());
571void FlowGraphCompiler::EmitSourceLine(Instruction* instr) {
572 if (!instr->token_pos().IsReal()) {
575 const InstructionSource&
source = instr->source();
576 const intptr_t inlining_id =
source.inlining_id < 0 ? 0 :
source.inlining_id;
579 ASSERT(instr->env() ==
nullptr ||
583 if (
script.GetTokenLocation(
source.token_pos, &line_nr)) {
591 if (
auto def = instr->AsDefinition()) {
605bool FlowGraphCompiler::IsPeephole(Instruction* instr)
const {
631#if !defined(TARGET_ARCH_IA32)
648#if !defined(TARGET_ARCH_IA32)
651 GenerateDeferredCode();
654 for (intptr_t i = 0; i < indirect_gotos_.length(); ++i) {
655 indirect_gotos_[i]->ComputeOffsetTable(
this);
659#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
665 Symbols::vm_align_loops(),
680 if (FLAG_precompiled_mode) {
684#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
685 const auto inner_lr_state = ComputeInnerLRState(
flow_graph());
688#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
689 const bool should_align_loops =
690 FLAG_align_all_loops || IsMarkedWithAlignLoops(
function());
693 for (intptr_t i = 0; i <
block_order().length(); ++i) {
703#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
706 if (entry->IsFunctionEntry() || entry->IsNativeEntry()) {
728#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
739 ASSERT(pending_deoptimization_env_ ==
nullptr);
740 pending_deoptimization_env_ = entry->env();
741 set_current_instruction(entry);
743 entry->EmitNativeCode(
this);
745 set_current_instruction(
nullptr);
746 pending_deoptimization_env_ =
nullptr;
757 set_current_instruction(instr);
767 if (FLAG_code_comments || FLAG_disassemble ||
768 FLAG_disassemble_optimized) {
769 if (FLAG_source_lines) {
770 EmitSourceLine(instr);
776 EmitInstructionPrologue(instr);
777 ASSERT(pending_deoptimization_env_ ==
nullptr);
778 pending_deoptimization_env_ = instr->
env();
782 pending_deoptimization_env_ =
nullptr;
783 if (IsPeephole(instr)) {
784 ASSERT(top_of_stack_ ==
nullptr);
785 top_of_stack_ = instr->AsDefinition();
787 EmitInstructionEpilogue(instr);
793 FrameStateUpdateWith(instr);
797 set_current_instruction(
nullptr);
799 if (
auto indirect_goto = instr->AsIndirectGoto()) {
800 indirect_gotos_.Add(indirect_goto);
813 parsed_function_.
Bailout(
"FlowGraphCompiler", reason);
817 if (is_optimizing_) {
826 const intptr_t stack_depth =
829 return StackSize() - stack_depth - num_stack_locals;
835 return block_info_[block_index]->jump_label();
840 return block_info_[block_index]->WasCompacted();
845 return block_info_[current_index]->next_nonempty_label();
861 slow_path_code_.Add(code);
864void FlowGraphCompiler::GenerateDeferredCode() {
865#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
866 const auto lr_state = ComputeInnerLRState(
flow_graph());
869 for (intptr_t i = 0; i < slow_path_code_.length(); i++) {
874#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
886 set_current_instruction(
nullptr);
891 const InstructionSource deopt_source(TokenPosition::kDeferredDeoptInfo,
893 for (intptr_t i = 0; i < deopt_infos_.length(); i++) {
895#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
898 deopt_infos_[i]->GenerateCode(
this, i);
922 intptr_t yield_index) {
925 if (FLAG_precompiled_mode && (kind == UntaggedPcDescriptors::kDeopt))
return;
929 pc_descriptors_list_->
AddDescriptor(kind, pc_offset, deopt_id, root_pos,
930 try_index, yield_index);
943#if defined(DART_PRECOMPILER)
948 if (FLAG_precompiled_mode && FLAG_dwarf_stack_traces_mode)
return;
950 const intptr_t name_index =
959 const auto entry_point = entry_kind == Code::EntryKind::kUnchecked
962 static_calls_target_table_.
Add(
new (
zone()) StaticCallsStruct(
967void FlowGraphCompiler::AddPcRelativeCallStubTarget(
const Code& stub_code) {
969 ASSERT(!stub_code.IsNull());
970 static_calls_target_table_.
Add(
new (
zone()) StaticCallsStruct(
972 nullptr, &stub_code,
nullptr));
975void FlowGraphCompiler::AddPcRelativeTailCallStubTarget(
const Code& stub_code) {
977 ASSERT(!stub_code.IsNull());
978 static_calls_target_table_.
Add(
new (
zone()) StaticCallsStruct(
980 nullptr, &stub_code,
nullptr));
983void FlowGraphCompiler::AddPcRelativeTTSCallTypeTarget(
984 const AbstractType& dst_type) {
986 ASSERT(!dst_type.IsNull());
987 static_calls_target_table_.
Add(
new (
zone()) StaticCallsStruct(
989 nullptr,
nullptr, &dst_type));
992void FlowGraphCompiler::AddStaticCallTarget(
const Function& func,
995 const auto entry_point = entry_kind == Code::EntryKind::kUnchecked
998 static_calls_target_table_.
Add(
new (
zone()) StaticCallsStruct(
1005 static_calls_target_table_.
Add(
new (
zone()) StaticCallsStruct(
1012 dispatch_table_call_targets_.Add(selector);
1019 ASSERT(!FLAG_precompiled_mode);
1020 if (
env !=
nullptr) {
1028 deopt_infos_.Add(
info);
1039 deopt_infos_.Add(
info);
1048 intptr_t slow_path_argument_count) {
1050 const intptr_t spill_area_size =
1054 ASSERT(registers !=
nullptr);
1055 const intptr_t kFpuRegisterSpillFactor =
1066 bitmap.SetLength(spill_area_size);
1069 const intptr_t args_count = instr->ArgumentCount();
1072 for (intptr_t i = 0; i < args_count; i++) {
1073 const auto move_arg =
1074 instr->ArgumentValueAt(i)->instruction()->AsMoveArgument();
1075 const auto rep = move_arg->representation();
1076 if (move_arg->is_register_move()) {
1080 ASSERT(rep == kTagged || rep == kUnboxedInt64 || rep == kUnboxedDouble);
1081 static_assert(compiler::target::kIntSpillFactor ==
1082 compiler::target::kDoubleSpillFactor,
1083 "int and double are of the same size");
1084 const bool is_tagged = move_arg->representation() == kTagged;
1085 const intptr_t num_bits =
1086 is_tagged ? 1 : compiler::target::kIntSpillFactor;
1089 const intptr_t last_arg_bit =
1090 (spill_area_size - 1) - move_arg->sp_relative_index();
1091 bitmap.SetRange(last_arg_bit - (num_bits - 1), last_arg_bit, is_tagged);
1093 ASSERT(slow_path_argument_count == 0 || !using_shared_stub);
1098 intptr_t spill_area_bits =
bitmap.Length();
1099 while (spill_area_bits > 0) {
1100 if (!
bitmap.Get(spill_area_bits - 1)) {
1105 bitmap.SetLength(spill_area_bits);
1124 for (intptr_t j = 0; j < kFpuRegisterSpillFactor; ++j) {
1141 if (using_shared_stub) {
1162 for (intptr_t i = 0; i < slow_path_argument_count; ++i) {
1180 intptr_t num_slow_path_args) {
1182 const bool shared_stub_save_fpu_registers =
1186 ASSERT(!using_shared_stub || num_slow_path_args == 0);
1187 if (
env ==
nullptr) {
1195 env->DeepCopy(
zone(),
env->Length() -
env->LazyDeoptPruneCount());
1199 if (using_shared_stub) {
1206 const intptr_t kFpuRegisterSpillFactor =
1214 next_slot += kFpuRegisterSpillFactor;
1215 fpu_reg_slots[i] = (next_slot - 1);
1217 if (using_shared_stub && shared_stub_save_fpu_registers) {
1218 next_slot += kFpuRegisterSpillFactor;
1220 fpu_reg_slots[i] = -1;
1229 cpu_reg_slots[i] = next_slot++;
1231 if (using_shared_stub) next_slot++;
1232 cpu_reg_slots[i] = -1;
1239 Location loc = it.CurrentLocation();
1242 loc,
value->definition(), cpu_reg_slots, fpu_reg_slots));
1245 return slow_path_env;
1252 return intrinsic_slow_path_label_;
1256 if (FLAG_precompiled_mode) {
1257 if (FLAG_trace_compiler) {
1259 "Retrying compilation %s, suppressing inlining of deopt_id:%" Pd "\n",
1265 deopt_id, Object::speculative_inlining_error());
1269 ASSERT(pending_deoptimization_env_ !=
nullptr);
1270 if (pending_deoptimization_env_->
IsHoisted()) {
1274 deopt_id, reason,
flags, pending_deoptimization_env_);
1275 deopt_infos_.Add(stub);
1280 ASSERT(exception_handlers_list_ !=
nullptr);
1283 code.set_exception_handlers(handlers);
1287 ASSERT(pc_descriptors_list_ !=
nullptr);
1290 if (!is_optimizing_) descriptors.
Verify(parsed_function_.
function());
1291 code.set_pc_descriptors(descriptors);
1296 if (FLAG_precompiled_mode) {
1297 return Array::empty_array().ptr();
1302 const intptr_t incoming_arg_count =
1307 if (deopt_info_table_size == 0) {
1308 return Object::empty_array().ptr();
1310 const Array& array =
1315 for (intptr_t i = 0; i < deopt_infos_.length(); i++) {
1317 info = deopt_infos_[i]->CreateDeoptInfo(
this, &builder, array);
1319 deopt_infos_[i]->reason(), deopt_infos_[i]->
flags());
1327 ASSERT(compressed_stackmaps_builder_ !=
nullptr);
1331 code.set_compressed_stackmaps(maps);
1338 if (code.is_optimized()) {
1341 code.set_var_descriptors(Object::empty_var_descriptors());
1357 info.set_index(compiler::target::frame_layout.FrameSlotForVariable(
1359 var_descs.
SetVar(0, Symbols::CurrentContextVar(), &
info);
1361 code.set_var_descriptors(var_descs);
1366#if defined(DART_PRECOMPILER)
1367 if (FLAG_precompiled_mode) {
1370 code.set_catch_entry_moves_maps(maps);
1374 code.set_num_variables(
flow_graph().variable_count());
1379 const auto& calls = static_calls_target_table_;
1381 const auto& targets =
1386 for (intptr_t i = 0; i < calls.length(); i++) {
1387 auto entry = calls[i];
1388 kind_type_and_offset =
1392 auto view = entries[i];
1395 if (entry->function !=
nullptr) {
1396 target = entry->function;
1399 if (entry->code !=
nullptr) {
1404 if (entry->dst_type !=
nullptr) {
1409 code.set_static_calls_target_table(targets);
1413 const Array& inlined_id_array =
1415 code.set_inlined_id_to_function(inlined_id_array);
1419 code.set_code_source_map(map);
1427 code.GetInlinedFunctionsAtInstruction(code.Size() - 1, &fs, &tokens);
1433 if (TryIntrinsifyHelper()) {
1434 fully_intrinsified_ =
true;
1440bool FlowGraphCompiler::TryIntrinsifyHelper() {
1470 ASSERT(FLAG_precompiled_mode ||
1491 return StubCode::OneArgOptimizedCheckInlineCacheWithExactnessCheck();
1493 return StubCode::OneArgCheckInlineCacheWithExactnessCheck();
1496 return optimized ? StubCode::OneArgOptimizedCheckInlineCache()
1497 : StubCode::OneArgCheckInlineCache();
1500 return optimized ? StubCode::TwoArgsOptimizedCheckInlineCache()
1501 : StubCode::TwoArgsCheckInlineCache();
1512 const ICData& ic_data_in,
1514 bool receiver_can_be_smi) {
1516 if (FLAG_precompiled_mode) {
1519 receiver_can_be_smi);
1528 ic_data, deopt_id,
source, locs, entry_kind);
1538 deopt_id,
source, locs, entry_kind);
1546 const ICData& ic_data_in,
1567 if (call_ic_data.
IsNull()) {
1568 const intptr_t kNumArgsChecked = 0;
1571 kNumArgsChecked, rebind_rule)
1573 call_ic_data = call_ic_data.
Original();
1577 locs, call_ic_data, entry_kind);
1588 if (
type.IsNumberType()) {
1591 }
else if (
type.IsIntType()) {
1593 }
else if (
type.IsDoubleType()) {
1594 args.Add(kDoubleCid);
1596 CheckClassIds(class_id_reg,
args, is_instance_lbl, is_not_instance_lbl);
1606 args.Add(kTwoByteStringCid);
1607 CheckClassIds(class_id_reg,
args, is_instance_lbl, is_not_instance_lbl);
1615 (kGrowableObjectArrayCid == kArrayCid + 2));
1617 ranges.
Add({kArrayCid, kGrowableObjectArrayCid});
1622#if defined(INCLUDE_IL_PRINTER)
1633 return FLAG_reorder_basic_blocks &&
1641 if (!blocked_registers[regno]) {
1642 blocked_registers[regno] =
true;
1643 return static_cast<Register>(regno);
1653 if (!blocked_registers[regno]) {
1654 blocked_registers[regno] =
true;
1662void FlowGraphCompiler::AllocateRegistersLocally(Instruction* instr) {
1664 instr->InitializeLocationSummary(
zone(),
false);
1666 LocationSummary* locs = instr->locs();
1677 blocked_fpu_registers[i] =
false;
1681 for (intptr_t i = 0; i < locs->input_count(); i++) {
1683 if (loc.IsRegister()) {
1685 ASSERT(!blocked_registers[loc.reg()]);
1686 blocked_registers[loc.reg()] =
true;
1687 }
else if (loc.IsFpuRegister()) {
1691 ASSERT(!blocked_fpu_registers[fpu_reg]);
1692 blocked_fpu_registers[fpu_reg] =
true;
1696 for (intptr_t i = 0; i < locs->temp_count(); i++) {
1698 if (loc.IsRegister()) {
1700 ASSERT(!blocked_registers[loc.reg()]);
1701 blocked_registers[loc.reg()] =
true;
1702 }
else if (loc.IsFpuRegister()) {
1706 ASSERT(!blocked_fpu_registers[fpu_reg]);
1707 blocked_fpu_registers[fpu_reg] =
true;
1713 if (top_of_stack_ !=
nullptr) {
1714 const intptr_t
p = locs->input_count() - 1;
1716 if ((instr->RequiredInputRepresentation(p) == kTagged) &&
1717 (locs->in(p).IsUnallocated() || locs->in(p).IsConstant())) {
1721 if (peephole.IsRegister() && !blocked_registers[peephole.reg()]) {
1723 blocked_registers[peephole.reg()] =
true;
1728 if (locs->out(0).IsRegister()) {
1731 blocked_registers[locs->out(0).reg()] =
true;
1732 }
else if (locs->out(0).IsFpuRegister()) {
1735 blocked_fpu_registers[locs->out(0).fpu_reg()] =
true;
1739 ASSERT(!instr->IsMoveArgument());
1741 for (intptr_t i = locs->input_count() - 1; i >= 0; i--) {
1745 if (loc.IsRegister()) {
1747 }
else if (loc.IsFpuRegister()) {
1748 fpu_reg = loc.fpu_reg();
1749 }
else if (loc.IsUnallocated()) {
1750 switch (loc.policy()) {
1773 reg = fpu_unboxing_temp;
1780 if (top_of_stack_ !=
nullptr) {
1781 if (!loc.IsConstant()) {
1787 top_of_stack_ =
nullptr;
1788 }
else if (loc.IsConstant()) {
1793 if (!loc.IsConstant()) {
1794 switch (instr->RequiredInputRepresentation(i)) {
1795 case kUnboxedDouble:
1797 ASSERT(instr->SpeculativeModeOfInput(i) ==
1803 case kUnboxedFloat32x4:
1804 case kUnboxedFloat64x2:
1806 ASSERT(instr->SpeculativeModeOfInput(i) ==
1821 for (intptr_t i = 0; i < locs->temp_count(); i++) {
1823 if (loc.IsUnallocated()) {
1824 switch (loc.policy()) {
1828 locs->set_temp(i, loc);
1833 locs->set_temp(i, loc);
1841 Location result_location = locs->out(0);
1842 if (result_location.IsUnallocated()) {
1843 switch (result_location.policy()) {
1852 result_location = locs->in(0);
1863 locs->set_out(0, result_location);
1869 const String& target_name,
1870 const Array& arguments_descriptor,
1871 intptr_t num_args_tested,
1873 const Function& binary_smi_target) {
1874 if ((deopt_id_to_ic_data_ !=
nullptr) &&
1875 ((*deopt_id_to_ic_data_)[deopt_id] !=
nullptr)) {
1876 const ICData* res = (*deopt_id_to_ic_data_)[deopt_id];
1888 if (!binary_smi_target.
IsNull()) {
1889 ASSERT(num_args_tested == 2);
1895 arguments_descriptor, deopt_id,
1896 num_args_tested, ICData::kInstance, &cids,
1897 binary_smi_target, receiver_type);
1900 arguments_descriptor, deopt_id, num_args_tested,
1901 ICData::kInstance, receiver_type);
1904 if (deopt_id_to_ic_data_ !=
nullptr) {
1905 (*deopt_id_to_ic_data_)[deopt_id] = &ic_data;
1907 ASSERT(!ic_data.is_static_call());
1914 const Array& arguments_descriptor,
1915 intptr_t num_args_tested,
1917 if ((deopt_id_to_ic_data_ !=
nullptr) &&
1918 ((*deopt_id_to_ic_data_)[deopt_id] !=
nullptr)) {
1919 const ICData* res = (*deopt_id_to_ic_data_)[deopt_id];
1931 arguments_descriptor, deopt_id,
1932 num_args_tested, rebind_rule));
1933 if (deopt_id_to_ic_data_ !=
nullptr) {
1934 (*deopt_id_to_ic_data_)[deopt_id] = &ic_data;
1939intptr_t FlowGraphCompiler::GetOptimizationThreshold()
const {
1942 threshold = FLAG_reoptimization_counter_threshold;
1944 threshold = FLAG_regexp_optimization_counter_threshold;
1946 const auto configured_optimization_counter_threshold =
1950 ASSERT(basic_blocks > 0);
1951 threshold = FLAG_optimization_counter_scale * basic_blocks +
1952 FLAG_min_optimization_counter_threshold;
1953 if (threshold > configured_optimization_counter_threshold) {
1954 threshold = configured_optimization_counter_threshold;
1963 if (threshold == 0) threshold = 1;
1978 case kUnboxedDouble:
1980 case kUnboxedFloat32x4:
1982 case kUnboxedFloat64x2:
1984 case kUnboxedInt32x4:
2006 const Array& args_desc_array) {
2025 bool* class_is_abstract_return) {
2029 if (class_id < 0)
return false;
2030 if (class_id >= class_table->NumCids())
return false;
2032 ClassPtr raw_class = class_table->
At(class_id);
2033 if (raw_class ==
nullptr)
return false;
2035 if (cls.
IsNull())
return false;
2039 if (class_is_abstract_return !=
nullptr) {
2042 const bool allow_add =
false;
2045 cls,
name, args_desc, allow_add));
2046 if (target_function.
IsNull())
return false;
2047 *fn_return = target_function.
ptr();
2059 intptr_t total_ic_calls,
2060 bool receiver_can_be_smi) {
2062 if (FLAG_polymorphic_with_deopt) {
2064 AddDeoptStub(deopt_id, ICData::kDeoptPolymorphicInstanceCallTestFail);
2069 deopt_id,
source, locs, complete, total_ic_calls,
2070 call->entry_kind());
2078 deopt_id,
source, locs,
true, total_ic_calls,
2079 call->entry_kind());
2082 const ICData& unary_checks =
2085 call->entry_kind(), receiver_can_be_smi);
2090#define __ assembler()->
2098void FlowGraphCompiler::CheckClassIds(
Register class_id_reg,
2102 for (
const auto&
id : class_ids) {
2103 __ CompareImmediate(class_id_reg,
id);
2104 __ BranchIf(
EQUAL, is_equal_lbl);
2106 __ Jump(is_not_equal_lbl);
2118 intptr_t total_ic_calls,
2121 ASSERT(complete || (failed !=
nullptr));
2123 const Array& arguments_descriptor =
2126 arguments_descriptor);
2128 const int kNoCase = -1;
2129 int smi_case = kNoCase;
2130 int which_case_to_skip = kNoCase;
2134 int non_smi_length =
length;
2138 for (
int i = 0; i <
length; i++) {
2139 const intptr_t
start = targets[i].cid_start;
2140 if (
start > kSmiCid)
continue;
2141 const intptr_t
end = targets[i].cid_end;
2142 if (
end >= kSmiCid) {
2144 if (
start == kSmiCid &&
end == kSmiCid) {
2147 which_case_to_skip = i;
2154 if (smi_case != kNoCase) {
2159 if (!(complete && non_smi_length == 0)) {
2160 EmitTestAndCallSmiBranch(non_smi_length == 0 ? failed : &after_smi_test,
2168 UntaggedPcDescriptors::kOther, locs,
function,
2171 if (match_found !=
nullptr) {
2172 __ Jump(match_found);
2174 __ Bind(&after_smi_test);
2178 EmitTestAndCallSmiBranch(failed,
true);
2182 if (non_smi_length == 0) {
2188 bool add_megamorphic_call =
false;
2192 EmitTestAndCallLoadCid(EmitTestCidRegister());
2196 for (intptr_t i = 0; i <
length; i++) {
2197 if (i == which_case_to_skip)
continue;
2198 const bool is_last_check = (i == last_check);
2200 if (!is_last_check && !complete &&
count < (total_ic_calls >> 5)) {
2205 add_megamorphic_call =
true;
2209 if (!complete || !is_last_check) {
2211 is_last_check ? failed : &next_test,
2212 EmitTestCidRegister(), targets[i], bias,
2219 UntaggedPcDescriptors::kOther, locs,
function,
2222 if (!is_last_check || add_megamorphic_call) {
2223 __ Jump(match_found);
2225 __ Bind(&next_test);
2227 if (add_megamorphic_call) {
2229 source_index, locs);
2234 const Class& type_class,
2237 if (hi !=
nullptr) {
2251 __ CompareImmediate(class_id_reg, type_class.
id());
2252 __ BranchIf(
EQUAL, is_subtype);
2263 bool fall_through_if_inside) {
2268 if (fall_through_if_inside) {
2275 for (intptr_t i = 0; i < cid_ranges.
length(); ++i) {
2278 const bool last_round = i == (cid_ranges.
length() - 1);
2283 const bool jump_on_miss = last_round && fall_through_if_inside;
2286 bias, jump_on_miss);
2296 bool jump_on_miss) {
2297 const intptr_t cid_start = range.
cid_start;
2319 .IsAbstractType()) ||
2337SubtypeTestCachePtr FlowGraphCompiler::GenerateFunctionTypeTest(
2342 __ Comment(
"FunctionTypeTest");
2347 return GenerateCallSubtypeTestStub(TypeTestStubKind::kTestTypeSixArgs,
2348 is_instance_lbl, is_not_instance_lbl);
2366SubtypeTestCachePtr FlowGraphCompiler::GenerateInlineInstanceof(
2367 const InstructionSource&
source,
2368 const AbstractType&
type,
2369 compiler::Label* is_instance_lbl,
2370 compiler::Label* is_not_instance_lbl) {
2372 __ Comment(
"InlineInstanceof");
2373 if (
type.IsObjectType()) {
2377 __ Jump(is_not_instance_lbl);
2380 if (
type.IsFunctionType()) {
2381 return GenerateFunctionTypeTest(
source,
type, is_instance_lbl,
2382 is_not_instance_lbl);
2384 if (
type.IsRecordType()) {
2391 if (
type.IsInstantiated()) {
2396 if (type_class.NumTypeArguments() > 0) {
2397 return GenerateInstantiatedTypeWithArgumentsTest(
2398 source,
type, is_instance_lbl, is_not_instance_lbl);
2401 const bool has_fall_through = GenerateInstantiatedTypeNoArgumentsTest(
2402 source,
type, is_instance_lbl, is_not_instance_lbl);
2403 if (has_fall_through) {
2406 return GenerateSubtype1TestCacheLookup(
2407 source, type_class, is_instance_lbl, is_not_instance_lbl);
2412 return GenerateUninstantiatedTypeTest(
source,
type, is_instance_lbl,
2413 is_not_instance_lbl);
2416FlowGraphCompiler::TypeTestStubKind
2417FlowGraphCompiler::GetTypeTestStubKindForTypeParameter(
2418 const TypeParameter& type_param) {
2423 bound = bound.UnwrapFutureOr();
2424 return !bound.IsTopTypeForSubtyping() && !bound.IsObjectType() &&
2425 !bound.IsDartFunctionType() && bound.IsType()
2426 ? TypeTestStubKind::kTestTypeFourArgs
2427 : TypeTestStubKind::kTestTypeSixArgs;
2436SubtypeTestCachePtr FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
2437 const InstructionSource&
source,
2438 const Class& type_class,
2439 compiler::Label* is_instance_lbl,
2440 compiler::Label* is_not_instance_lbl) {
2445 ASSERT(!type_class.IsObjectClass());
2446 __ Comment(
"Subtype1TestCacheLookup");
2457#if defined(TARGET_ARCH_IA32)
2462 __ PushRegister(kScratch2Reg);
2468 static_assert(kScratch1Reg != kScratch2Reg,
2469 "Scratch registers must be distinct");
2472 __ LoadClassById(kScratch1Reg, kScratch2Reg);
2473#if defined(TARGET_ARCH_IA32)
2475 __ PopRegister(kScratch2Reg);
2477 __ LoadCompressedFieldFromOffset(
2478 kScratch1Reg, kScratch1Reg, compiler::target::Class::super_type_offset());
2482 __ CompareObject(kScratch1Reg, Object::null_object());
2483 __ BranchIf(
EQUAL, is_not_instance_lbl);
2484 __ LoadTypeClassId(kScratch1Reg, kScratch1Reg);
2485 __ CompareImmediate(kScratch1Reg, type_class.id());
2486 __ BranchIf(
EQUAL, is_instance_lbl);
2488 return GenerateCallSubtypeTestStub(TypeTestStubKind::kTestTypeOneArg,
2489 is_instance_lbl, is_not_instance_lbl);
2498FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
2499 const InstructionSource&
source,
2500 const AbstractType&
type,
2501 compiler::Label* is_instance_lbl,
2502 compiler::Label* is_not_instance_lbl) {
2503 __ Comment(
"InstantiatedTypeWithArgumentsTest");
2509 ASSERT(type_class.NumTypeArguments() > 0);
2513 smi_is_ok ? is_instance_lbl : is_not_instance_lbl);
2515 const TypeArguments& type_arguments =
2517 const bool is_raw_type = type_arguments.IsNull() ||
2518 type_arguments.IsRaw(0, type_arguments.Length());
2526 __ CompareImmediate(kScratchReg, type_class.id());
2527 __ BranchIf(
EQUAL, is_instance_lbl);
2529 if (IsListClass(type_class)) {
2532 return GenerateSubtype1TestCacheLookup(
source, type_class, is_instance_lbl,
2533 is_not_instance_lbl);
2536 if (type_arguments.Length() == 1) {
2537 const AbstractType& tp_argument =
2539 if (tp_argument.IsTopTypeForSubtyping()) {
2541 return GenerateSubtype1TestCacheLookup(
2542 source, type_class, is_instance_lbl, is_not_instance_lbl);
2547 return GenerateCallSubtypeTestStub(TypeTestStubKind::kTestTypeTwoArgs,
2548 is_instance_lbl, is_not_instance_lbl);
2559bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
2560 const InstructionSource&
source,
2561 const AbstractType&
type,
2562 compiler::Label* is_instance_lbl,
2563 compiler::Label* is_not_instance_lbl) {
2564 __ Comment(
"InstantiatedTypeNoArgumentsTest");
2569 ASSERT(type_class.NumTypeArguments() == 0);
2577 const bool smi_is_ok =
2581 smi_is_ok ? is_instance_lbl : is_not_instance_lbl);
2584 if (
type.IsBoolType()) {
2585 __ CompareImmediate(kScratchReg, kBoolCid);
2586 __ BranchIf(
EQUAL, is_instance_lbl);
2587 __ Jump(is_not_instance_lbl);
2592 if (
type.IsNumberType() ||
type.IsIntType() ||
type.IsDoubleType()) {
2594 is_not_instance_lbl);
2597 if (
type.IsStringType()) {
2601 if (
type.IsDartFunctionType()) {
2603 __ CompareImmediate(kScratchReg, kClosureCid);
2604 __ BranchIf(
EQUAL, is_instance_lbl);
2607 if (
type.IsDartRecordType()) {
2609 __ CompareImmediate(kScratchReg, kRecordCid);
2610 __ BranchIf(
EQUAL, is_instance_lbl);
2624SubtypeTestCachePtr FlowGraphCompiler::GenerateUninstantiatedTypeTest(
2625 const InstructionSource&
source,
2626 const AbstractType&
type,
2627 compiler::Label* is_instance_lbl,
2628 compiler::Label* is_not_instance_lbl) {
2629 __ Comment(
"UninstantiatedTypeTest");
2634 if (
type.IsTypeParameter()) {
2640 const TypeParameter& type_param = TypeParameter::Cast(
type);
2643 type_param.IsClassTypeParameter()
2647 __ CompareObject(kTypeArgumentsReg, Object::null_object());
2648 __ BranchIf(
EQUAL, is_instance_lbl);
2649 __ LoadCompressedFieldFromOffset(
2650 kScratchReg, kTypeArgumentsReg,
2651 compiler::target::TypeArguments::type_at_offset(type_param.index()));
2654 __ CompareObject(kScratchReg, Object::dynamic_type());
2655 __ BranchIf(
EQUAL, is_instance_lbl);
2660 __ BranchIf(
EQUAL, is_instance_lbl);
2661 __ CompareObject(kScratchReg, Object::void_type());
2662 __ BranchIf(
EQUAL, is_instance_lbl);
2665 compiler::Label not_smi;
2669 __ BranchIf(
EQUAL, is_instance_lbl);
2671 __ BranchIf(
EQUAL, is_instance_lbl);
2675 const auto test_kind = GetTypeTestStubKindForTypeParameter(type_param);
2676 return GenerateCallSubtypeTestStub(test_kind, is_instance_lbl,
2677 is_not_instance_lbl);
2679 if (
type.IsType()) {
2682 if (!
type.IsFutureOrType()) {
2685 const TypeTestStubKind test_kind =
2686 type.IsInstantiated(
kFunctions) ? TypeTestStubKind::kTestTypeThreeArgs
2687 : TypeTestStubKind::kTestTypeFourArgs;
2690 return GenerateCallSubtypeTestStub(test_kind, is_instance_lbl,
2691 is_not_instance_lbl);
2721 if (!unwrapped_type.IsTypeParameter() || unwrapped_type.
IsNullable()) {
2729 : &is_not_instance);
2737 GenerateInlineInstanceof(
source,
type, &is_instance, &is_not_instance);
2746 UntaggedPcDescriptors::kOther, locs, deopt_id,
2750 __ Bind(&is_not_instance);
2754 __ Bind(&is_instance);
2759#if !defined(TARGET_ARCH_IA32)
2769SubtypeTestCachePtr FlowGraphCompiler::GenerateCallSubtypeTestStub(
2770 TypeTestStubKind test_kind,
2773 const intptr_t num_inputs = UsedInputsForTTSKind(test_kind);
2776 const auto& stub_entry =
2779 __ Call(stub_entry);
2781 is_not_instance_lbl);
2782 return type_test_cache.
ptr();
2811 const auto& dst_type =
2813 ? AbstractType::Cast(
2815 : Object::null_abstract_type();
2817 if (!dst_type.IsNull()) {
2818 ASSERT(dst_type.IsFinalized());
2819 if (dst_type.IsTopTypeForSubtyping())
return;
2825 if (dst_type.IsNull()) {
2826 __ Comment(
"AssertAssignable for runtime type");
2829 __ Comment(
"AssertAssignable for compile-time type");
2831 if (dst_type.IsTypeParameter()) {
2858 const intptr_t sub_type_cache_index =
__ object_pool_builder().AddObject(
2860 const intptr_t dst_name_index =
__ object_pool_builder().AddObject(
2862 ASSERT((sub_type_cache_index + 1) == dst_name_index);
2863 ASSERT(
__ constant_pool_allowed());
2865 __ Comment(
"TTSCall");
2869 CanPcRelativeCall(dst_type)) {
2871 sub_type_cache_index);
2872 __ GenerateUnRelocatedPcRelativeCall();
2873 AddPcRelativeTTSCallTypeTarget(dst_type);
2913 bool elide_info =
false;
2917 auto output_dst_type = [&]() ->
void {
2923 type_usage_info->UseTypeInAssertAssignable(dst_type);
2925 ASSERT(!FLAG_precompiled_mode);
2941 return output_dst_type();
2947 bool is_non_smi =
false;
2951 }
else if (!receiver_type->
CanBeSmi()) {
2955 if (dst_type.IsTypeParameter()) {
2958 const TypeParameter& type_param = TypeParameter::Cast(dst_type);
2973 __ CompareObject(kTypeArgumentsReg, Object::null_object());
2977 __ LoadCompressedFieldFromOffset(
2979 compiler::target::TypeArguments::type_at_offset(type_param.
index()));
2980 return output_dst_type();
2983 if (dst_type.IsFunctionType() || dst_type.IsRecordType()) {
2984 return output_dst_type();
2987 if (
auto const hi =
thread()->hierarchy_info()) {
2990 if (hi->CanUseSubtypeRangeCheckFor(dst_type)) {
3005 }
else if (IsListClass(type_class)) {
3019void FlowGraphCompiler::FrameStateUpdateWith(
Instruction* instr) {
3022 switch (instr->
tag()) {
3023 case Instruction::kDropTemps:
3025 instr->AsDropTemps()->num_temps());
3036 Definition* defn = instr->AsDefinition();
3037 if ((defn !=
nullptr) && defn->HasTemp()) {
3038 FrameStatePush(defn);
3042void FlowGraphCompiler::FrameStatePush(Definition* defn) {
3045 if ((rep == kUnboxedDouble || rep == kUnboxedFloat32x4 ||
3046 rep == kUnboxedFloat64x2) &&
3047 defn->locs()->out(0).IsFpuRegister()) {
3051 ASSERT((rep == kTagged) || (rep == kUntagged) ||
3053 frame_state_.Add(rep);
3056void FlowGraphCompiler::FrameStatePop(intptr_t
count) {
3058 frame_state_.TruncateTo(
3062bool FlowGraphCompiler::FrameStateIsSafeToCall() {
3064 for (intptr_t i = 0; i < frame_state_.length(); i++) {
3065 if (frame_state_[i] != kTagged) {
3072void FlowGraphCompiler::FrameStateClear() {
3074 frame_state_.TruncateTo(0);
3078#define __ compiler->assembler()->
3082 __ Comment(
"slow path %s operation",
name());
3084 const bool use_shared_stub =
3087 const bool live_fpu_registers =
3089 const intptr_t num_args =
3094 const bool has_frame =
compiler->flow_graph().graph_entry()->NeedsFrame();
3095 if (use_shared_stub) {
3097#if !defined(TARGET_ARCH_IA32)
3098 ASSERT(
__ constant_pool_allowed());
3099 __ set_constant_pool_allowed(
false);
3101 __ EnterDartFrame(0);
3104#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
3107 RESTORES_LR_FROM_FRAME({});
3115 __ CallRuntime(runtime_entry_, num_args);
3118 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOther, deopt_id,
3121 compiler->RecordSafepoint(locs, num_args);
3122 if (!FLAG_precompiled_mode ||
3129 }
else if (
compiler->is_optimizing()) {
3136 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt,
3140 if (!use_shared_stub) {
3148 return "check null (nsm)";
3150 return "check null (arg)";
3152 return "check null (cast)";
3161 return kNullErrorRuntimeEntry;
3163 return kArgumentNullErrorRuntimeEntry;
3165 return kNullCastErrorRuntimeEntry;
3172 bool save_fpu_registers) {
3173 auto object_store =
compiler->isolate_group()->object_store();
3176 return save_fpu_registers
3177 ? object_store->null_error_stub_with_fpu_regs_stub()
3178 : object_store->null_error_stub_without_fpu_regs_stub();
3180 return save_fpu_registers
3181 ? object_store->null_arg_error_stub_with_fpu_regs_stub()
3182 : object_store->null_arg_error_stub_without_fpu_regs_stub();
3184 return save_fpu_registers
3185 ? object_store->null_cast_error_stub_with_fpu_regs_stub()
3186 : object_store->null_cast_error_stub_without_fpu_regs_stub();
3192 bool save_fpu_registers) {
3193#if defined(TARGET_ARCH_IA32)
3213 compiler::target::Thread::unboxed_runtime_arg_offset());
3216 compiler::target::Thread::unboxed_runtime_arg_offset() +
kInt64Size);
3224 bool save_fpu_registers) {
3225#if defined(TARGET_ARCH_IA32)
3228 auto object_store =
compiler->isolate_group()->object_store();
3232 ? object_store->range_error_stub_with_fpu_regs_stub()
3233 : object_store->range_error_stub_without_fpu_regs_stub());
3247 bool save_fpu_registers) {
3248#if defined(TARGET_ARCH_IA32)
3251 auto object_store =
compiler->isolate_group()->object_store();
3255 ? object_store->write_error_stub_with_fpu_regs_stub()
3256 : object_store->write_error_stub_without_fpu_regs_stub());
3268 bool save_fpu_registers) {
3269#if defined(TARGET_ARCH_IA32)
3276 auto object_store =
compiler->isolate_group()->object_store();
3280 ? object_store->late_initialization_error_stub_with_fpu_regs_stub()
3282 ->late_initialization_error_stub_without_fpu_regs_stub());
3291 if (destination.
IsBoth()) {
3293 const auto& both = destination.
AsBoth();
3300 const auto& both =
source.AsBoth();
3305 const auto& src_payload_type =
source.payload_type();
3306 const auto& dst_payload_type = destination.
payload_type();
3307 const auto& src_container_type =
source.container_type();
3309 const intptr_t src_payload_size = src_payload_type.
SizeInBytes();
3310 const intptr_t dst_payload_size = dst_payload_type.SizeInBytes();
3311 const intptr_t src_container_size = src_container_type.SizeInBytes();
3312 const intptr_t dst_container_size = dst_container_type.SizeInBytes();
3315 ASSERT(src_payload_type.IsPrimitive());
3316 ASSERT(dst_payload_type.IsPrimitive());
3319 ASSERT(src_payload_type.IsSigned() == dst_payload_type.IsSigned());
3322 if (
source.Equals(destination) && src_payload_type.Equals(dst_payload_type) &&
3323 src_container_type.Equals(dst_container_type)) {
3324#if defined(TARGET_ARCH_RISCV64)
3328 EmitNativeMoveArchitecture(destination,
source);
3334 if (src_payload_type.IsInt() && dst_payload_type.IsInt() &&
3335 (src_payload_size != src_container_size ||
3336 dst_payload_size != dst_container_size)) {
3337 if (
source.IsStack() && src_container_size > src_payload_size) {
3341 source.WithOtherNativeType(zone_, src_payload_type, src_payload_type),
3344 if (src_payload_size <= dst_payload_size &&
3345 src_container_size >= dst_container_size) {
3349 zone_, dst_container_type, dst_container_type),
3350 source.WithOtherNativeType(
3351 zone_, dst_container_type, dst_container_type),
3354 if (src_payload_size >= dst_payload_size &&
3355 dst_container_size > dst_payload_size) {
3360 dst_container_type),
3361 source.WithOtherNativeType(zone_, dst_payload_type, dst_payload_type),
3366 ASSERT(src_payload_size == src_container_size);
3367 ASSERT(dst_payload_size == dst_container_size);
3371 if (compiler::target::kWordSize == 4 && src_container_size == 8 &&
3372 dst_container_size == 8 && !
source.IsFpuRegisters() &&
3388#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
3392 const auto& intermediate =
3394 zone_, dst_payload_type, dst_container_type, scratch);
3401 const bool sign_or_zero_extend = dst_container_size > src_container_size;
3404 if (sign_or_zero_extend && destination.
IsStack()) {
3406 const auto& intermediate =
3407 source.WithOtherNativeType(zone_, dst_payload_type, dst_container_type);
3414 EmitNativeMoveArchitecture(destination,
source);
3423 for (intptr_t i : {0, 1}) {
3425 zone_, src_loc, src_type, i);
3432 if (src_type == kUnboxedInt64 &&
3433 dst.container_type().AsPrimitive().representation() ==
3436 src.WithOtherNativeType(zone_, dst.container_type(),
3437 dst.container_type()),
3451 for (intptr_t i : {0, 1}) {
3453 zone_, dst_loc, dst_type, i);
3460 if (dst_type == kUnboxedInt64 &&
3461 src.container_type().AsPrimitive().representation() ==
3463 EmitNativeMove(dst.WithOtherNativeType(zone_, src.container_type(),
3464 src.container_type()),
3476 ASSERT(src.IsConstant() || src.IsPairLocation());
3477 const auto& dst_type = dst.payload_type();
3479 if (dst.IsExpressibleAsLocation() &&
3480 dst_type.IsExpressibleAsRepresentation() &&
3481 dst_type.AsRepresentationOverApprox(zone_) == src_type) {
3483 const Location dst_loc = dst.AsLocation();
3490 if (dst_type.IsInt()) {
3498 ASSERT(dst_type.IsFloat());
3504 if (src.IsPairLocation()) {
3505 for (intptr_t i : {0, 1}) {
3510 const auto& intermediate_native =
3513 EmitMove(intermediate, src.AsPairLocation()->At(i), temp);
3514 EmitNativeMove(dst.Split(zone_, 2, i), intermediate_native, temp);
3517 const auto& intermediate_native =
3531bool FlowGraphCompiler::CanPcRelativeCall(
const Function&
target)
const {
3532 return FLAG_precompiled_mode && !FLAG_force_indirect_calls &&
3537bool FlowGraphCompiler::CanPcRelativeCall(
const Code&
target)
const {
3538 return FLAG_precompiled_mode && !FLAG_force_indirect_calls &&
3539 !
target.InVMIsolateHeap() &&
3544bool FlowGraphCompiler::CanPcRelativeCall(
const AbstractType&
target)
const {
3545 return FLAG_precompiled_mode && !FLAG_force_indirect_calls &&
3546 !
target.InVMIsolateHeap() &&
3548 LoadingUnit::LoadingUnit::kRootId);
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
static void test_cache(skiatest::Reporter *reporter, SkResourceCache &cache, bool testPurge)
static bool ok(int result)
#define DEBUG_ASSERT(cond)
#define RELEASE_ASSERT(cond)
#define COMPILE_ASSERT(expr)
bool IsSubtypeOf(const AbstractType &other, Heap::Space space, FunctionTypeMapping *function_type_equivalence=nullptr) const
bool IsNonNullable() const
bool IsTopTypeForSubtyping() const
bool IsObjectType() const
virtual bool IsInstantiated(Genericity genericity=kAny, intptr_t num_free_fun_type_params=kAllFree) const
virtual ClassPtr type_class() const
static ArrayPtr New(intptr_t len, Heap::Space space=Heap::kNew)
void SetAt(intptr_t index, const Object &value) const
static constexpr intptr_t encode(CallKind value)
bool Contains(intptr_t i) const
intptr_t try_index() const
intptr_t postorder_number() const
bool HasNonRedundantParallelMove() const
intptr_t block_id() const
LoopInfo * loop_info() const
bool IsLoopHeader() const
intptr_t stack_depth() const
Instruction * last_instruction() const
GrowableArray< Definition * > * initial_definitions()
static const Bool & Get(bool value)
TargetEntryInstr * false_successor() const
TargetEntryInstr * true_successor() const
StringPtr target_name() const
intptr_t TypeArgsLen() const
ArrayPtr arguments_descriptor() const
TargetInfo * TargetAt(int i) const
intptr_t catch_try_index() const
const Array & catch_handler_types() const
bool needs_stacktrace() const
bool is_generated() const
static CatchEntryMove FromSlot(SourceKind kind, intptr_t src_slot, intptr_t dest_slot)
static intptr_t EncodePairSource(intptr_t src_lo_slot, intptr_t src_hi_slot)
void NewMapping(intptr_t pc_offset)
TypedDataPtr FinalizeCatchEntryMovesMap()
void Append(const CatchEntryMove &move)
void Add(CidRange *target)
ClassPtr At(intptr_t cid) const
static bool IsSubtypeOf(const Class &cls, const TypeArguments &type_arguments, Nullability nullability, const AbstractType &other, Heap::Space space, FunctionTypeMapping *function_type_equivalence=nullptr)
bool is_finalized() const
ArrayPtr current_functions() const
TokenPosition RootPosition(const InstructionSource &source)
void EndCodeSourceRange(int32_t pc_offset, const InstructionSource &source)
void WriteFunctionEntrySourcePosition(const InstructionSource &source)
void NoteNullCheck(int32_t pc_offset, const InstructionSource &source, intptr_t name_index)
ArrayPtr InliningIdToFunction()
const GrowableArray< const Function * > & inline_id_to_function() const
void BeginCodeSourceRange(int32_t pc_offset, const InstructionSource &source)
CodeSourceMapPtr Finalize()
void NoteDescriptor(UntaggedPcDescriptors::Kind kind, int32_t pc_offset, const InstructionSource &source)
@ kSCallTableFunctionTarget
@ kSCallTableCodeOrTypeTarget
@ kSCallTableKindAndOffset
static EntryCounter SlowPathCounterFor(Instruction::Tag tag)
compiler::Label * entry_label()
void set_pc_offset(intptr_t offset)
intptr_t deopt_id() const
static CompilerState & Current()
CompressedStackMapsPtr Finalize() const
void AddEntry(intptr_t pc_offset, BitmapBuilder *bitmap, intptr_t spill_slot_bit_count)
static constexpr intptr_t kNone
static intptr_t ToDeoptAfter(intptr_t deopt_id)
static SmiPtr EncodeReasonAndFlags(ICData::DeoptReasonId reason, uint32_t flags)
static intptr_t SizeFor(intptr_t length)
static void SetEntry(const Array &table, intptr_t index, const Smi &offset, const TypedData &info, const Smi &reason_and_flags)
void AddDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t pc_offset, intptr_t deopt_id, TokenPosition token_pos, intptr_t try_index, intptr_t yield_index)
PcDescriptorsPtr FinalizePcDescriptors(uword entry_point)
intptr_t CountArgsPushed()
void AddHandler(intptr_t try_index, intptr_t outer_try_index, intptr_t pc_offset, bool is_generated, const Array &handler_types, bool needs_stacktrace)
ExceptionHandlersPtr FinalizeExceptionHandlers(uword entry_point) const
void SetNeedsStackTrace(intptr_t try_index)
IsolateGroup * isolate_group() const
void EmitDropArguments(intptr_t count)
static bool GenerateCidRangesCheck(compiler::Assembler *assembler, Register class_id_reg, const CidRangeVector &cid_ranges, compiler::Label *inside_range_lbl, compiler::Label *outside_range_lbl=nullptr, bool fall_through_if_inside=false)
void AddStubCallTarget(const Code &code)
Instruction * current_instruction() const
void StatsBegin(Instruction *instr)
bool skip_body_compilation() const
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
bool CheckAssertAssignableTypeTestingABILocations(const LocationSummary &locs)
void FinalizeVarDescriptors(const Code &code)
void set_current_block(BlockEntryInstr *value)
BranchLabels CreateBranchLabels(BranchInstr *branch) const
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
bool is_optimizing() const
intptr_t CurrentTryIndex() const
const Class & float64x2_class() const
const Class & BoxClassFor(Representation rep)
void InsertBSSRelocation(BSS::Relocation reloc)
void AddExceptionHandler(CatchBlockEntryInstr *entry)
bool ForcedOptimization() const
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const Class & double_class() const
void FinalizeCatchEntryMovesMap(const Code &code)
bool ForceSlowPathForStackOverflow() const
void GenerateNumberTypeCheck(Register kClassIdReg, const AbstractType &type, compiler::Label *is_instance_lbl, compiler::Label *is_not_instance_lbl)
const FlowGraph & flow_graph() const
compiler::Label * GetJumpLabel(BlockEntryInstr *block_entry) const
void RecordCatchEntryMoves(Environment *env)
intptr_t StackSize() const
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
static bool LookupMethodFor(int class_id, const String &name, const ArgumentsDescriptor &args_desc, Function *fn_return, bool *class_is_abstract_return=nullptr)
bool WasCompacted(BlockEntryInstr *block_entry) const
void AddDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t pc_offset, intptr_t deopt_id, const InstructionSource &source, intptr_t try_index, intptr_t yield_index=UntaggedPcDescriptors::kInvalidYieldIndex)
void EmitPolymorphicInstanceCall(const PolymorphicInstanceCallInstr *call, const CallTargets &targets, ArgumentsInfo args_info, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, bool complete, intptr_t total_call_count, bool receiver_can_be_smi=true)
void EnterIntrinsicMode()
BlockEntryInstr * current_block() const
static constexpr intptr_t kMaxNumberOfCidRangesToTest
compiler::Label * AddDeoptStub(intptr_t deopt_id, ICData::DeoptReasonId reason, uint32_t flags=0)
void EmitMoveToNative(const compiler::ffi::NativeLocation &dst, Location src_loc, Representation src_type, TemporaryRegisterAllocator *temp)
const Class & float32x4_class() const
bool CanFallThroughTo(BlockEntryInstr *block_entry) const
void EmitComment(Instruction *instr)
void EmitTestAndCall(const CallTargets &targets, const String &function_name, ArgumentsInfo args_info, compiler::Label *failed, compiler::Label *match_found, intptr_t deopt_id, const InstructionSource &source_index, LocationSummary *locs, bool complete, intptr_t total_ic_calls, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
static const CallTargets * ResolveCallTargetsForReceiverCid(intptr_t cid, const String &selector, const Array &args_desc_array)
static int EmitTestAndCallCheckCid(compiler::Assembler *assembler, compiler::Label *label, Register class_id_reg, const CidRangeValue &range, int bias, bool jump_on_miss=true)
void SetNeedsStackTrace(intptr_t try_index)
CompilerDeoptInfo * AddSlowPathDeoptInfo(intptr_t deopt_id, Environment *env)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
const Class & mint_class() const
const ICData * GetOrAddInstanceCallICData(intptr_t deopt_id, const String &target_name, const Array &arguments_descriptor, intptr_t num_args_tested, const AbstractType &receiver_type, const Function &binary_smi_target)
void EmitMoveFromNative(Location dst_loc, Representation dst_type, const compiler::ffi::NativeLocation &src, TemporaryRegisterAllocator *temp)
bool IsEmptyBlock(BlockEntryInstr *block) const
void AddSlowPathCode(SlowPathCode *slow_path)
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void FinalizeStaticCallTargetsTable(const Code &code)
bool GenerateSubtypeRangeCheck(Register class_id_reg, const Class &type_class, compiler::Label *is_subtype_lbl)
void AddDispatchTableCallTarget(const compiler::TableSelector *selector)
void GenerateInstanceCall(intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, const ICData &ic_data, Code::EntryKind entry_kind, bool receiver_can_be_smi)
void EmitFunctionEntrySourcePositionDescriptorIfNeeded()
void FinalizeExceptionHandlers(const Code &code)
FlowGraphCompiler(compiler::Assembler *assembler, FlowGraph *flow_graph, const ParsedFunction &parsed_function, bool is_optimizing, SpeculativeInliningPolicy *speculative_policy, const GrowableArray< const Function * > &inline_id_to_function, const GrowableArray< TokenPosition > &inline_id_to_token_pos, const GrowableArray< intptr_t > &caller_inline_id, ZoneGrowableArray< const ICData * > *deopt_id_to_ic_data, CodeStatistics *stats=nullptr)
void GenerateListTypeCheck(Register kClassIdReg, compiler::Label *is_instance_lbl)
void GenerateTTSCall(const InstructionSource &source, intptr_t deopt_id, Environment *env, Register reg_with_type, const AbstractType &dst_type, const String &dst_name, LocationSummary *locs)
void FinalizeStackMaps(const Code &code)
const Class & int32x4_class() const
const GrowableArray< BlockEntryInstr * > & block_order() const
bool may_reoptimize() const
bool CanOSRFunction() const
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void set_intrinsic_slow_path_label(compiler::Label *label)
void Bailout(const char *reason)
void ArchSpecificInitialization()
void GenerateNonLazyDeoptableStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void GenerateAssertAssignable(CompileType *receiver_type, const InstructionSource &source, intptr_t deopt_id, Environment *env, const String &dst_name, LocationSummary *locs)
bool CanOptimizeFunction() const
void FinalizeCodeSourceMap(const Code &code)
ArrayPtr CreateDeoptInfo(compiler::Assembler *assembler)
void GenerateStringTypeCheck(Register kClassIdReg, compiler::Label *is_instance_lbl, compiler::Label *is_not_instance_lbl)
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
void SpecialStatsEnd(intptr_t tag)
const GrowableArray< BlockInfo * > & block_info() const
bool NeedsEdgeCounter(BlockEntryInstr *block)
void FinalizePcDescriptors(const Code &code)
void EmitYieldPositionMetadata(const InstructionSource &source, intptr_t yield_index)
void StatsEnd(Instruction *instr)
void GenerateStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, intptr_t deopt_id, Environment *env)
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
void GenerateInstanceOf(const InstructionSource &source, intptr_t deopt_id, Environment *env, const AbstractType &type, LocationSummary *locs)
compiler::Assembler * assembler() const
void EmitMoveConst(const compiler::ffi::NativeLocation &dst, Location src, Representation src_type, TemporaryRegisterAllocator *temp)
const ICData * GetOrAddStaticCallICData(intptr_t deopt_id, const Function &target, const Array &arguments_descriptor, intptr_t num_args_tested, ICData::RebindRule rebind_rule)
bool intrinsic_mode() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
compiler::Label * NextNonEmptyLabel() const
void GenerateCallerChecksForAssertAssignable(CompileType *receiver_type, const AbstractType &dst_type, compiler::Label *done)
void EndCodeSourceRange(const InstructionSource &source)
void SpecialStatsBegin(intptr_t tag)
void EmitNativeMove(const compiler::ffi::NativeLocation &dst, const compiler::ffi::NativeLocation &src, TemporaryRegisterAllocator *temp)
compiler::Label * intrinsic_slow_path_label() const
void AddNullCheck(const InstructionSource &source, const String &name)
void GenerateStaticCall(intptr_t deopt_id, const InstructionSource &source, const Function &function, ArgumentsInfo args_info, LocationSummary *locs, const ICData &ic_data_in, ICData::RebindRule rebind_rule, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
Environment * SlowPathEnvironmentFor(Instruction *inst, intptr_t num_slow_path_args)
void BeginCodeSourceRange(const InstructionSource &source)
GraphEntryInstr * graph_entry() const
bool IsCompiledForOsr() const
const GrowableArray< BlockEntryInstr * > & preorder() const
intptr_t num_stack_locals() const
BitVector * captured_parameters() const
const ParsedFunction & parsed_function() const
intptr_t variable_count() const
bool IsIrregexpFunction() const
bool MakesCopyOfParameters() const
const char * ToFullyQualifiedCString() const
bool HasBreakpoint() const
intptr_t num_fixed_parameters() const
static bool UseUnboxedRepresentation()
intptr_t spill_slot_count() const
CatchBlockEntryInstr * GetCatchEntry(intptr_t index)
OsrEntryInstr * osr_entry() const
const CidRangeVector & SubtypeRangesForClass(const Class &klass, bool include_abstract, bool exclude_null)
bool is_static_call() const
intptr_t deopt_id() const
ICDataPtr AsUnaryClassChecks() const
intptr_t NumberOfUsedChecks() const
ICDataPtr Original() const
static ICDataPtr NewWithCheck(const Function &owner, const String &target_name, const Array &arguments_descriptor, intptr_t deopt_id, intptr_t num_args_tested, RebindRule rebind_rule, GrowableArray< intptr_t > *cids, const Function &target, const AbstractType &receiver_type=Object::null_abstract_type())
intptr_t NumArgsTested() const
static ICDataPtr NewForStaticCall(const Function &owner, const Function &target, const Array &arguments_descriptor, intptr_t deopt_id, intptr_t num_args_tested, RebindRule rebind_rule)
bool is_tracking_exactness() const
AbstractTypePtr receivers_static_type() const
static bool NullIsAssignableTo(const AbstractType &other)
virtual intptr_t InputCount() const =0
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
virtual BlockEntryInstr * GetBlock()
virtual bool CanBecomeDeoptimizationTarget() const
Environment * env() const
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
bool HasUnmatchedInputRepresentations() const
virtual intptr_t ArgumentCount() const
virtual Representation representation() const
virtual Tag tag() const =0
InstructionSource source() const
intptr_t deopt_id() const
static bool IsSystemIsolateGroup(const IsolateGroup *group)
intptr_t optimization_counter_threshold() const
static IsolateGroup * Current()
ClassTable * class_table() const
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
static bool FindPragma(Thread *T, bool only_core, const Object &object, const String &pragma_name, bool multiple=false, Object *options=nullptr)
static intptr_t LoadingUnitOf(const Function &function)
static LocalVarDescriptorsPtr New(intptr_t num_variables)
void SetVar(intptr_t var_index, const String &name, UntaggedLocalVarDescriptors::VarInfo *info) const
Location out(intptr_t index) const
const BitmapBuilder & stack_bitmap()
intptr_t input_count() const
RegisterSet * live_registers()
bool always_calls() const
bool call_on_shared_slow_path() const
Location in(intptr_t index) const
static Location FpuRegisterLocation(FpuRegister reg)
const char * ToCString() const
static Location RegisterLocation(Register reg)
bool IsPairLocation() const
const Object & constant() const
DART_NORETURN void Jump(int value, const Error &error)
static CodePtr GetStub(FlowGraphCompiler *compiler, CheckNullInstr::ExceptionType exception_type, bool save_fpu_registers)
void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers) override
const char * name() override
CheckNullInstr::ExceptionType exception_type() const
static Object & ZoneHandle()
void Bailout(const char *origin, const char *reason) const
const Function & function() const
int num_stack_locals() const
void Verify(const Function &function) const
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
intptr_t FpuRegisterCount() const
bool ContainsFpuRegister(FpuRegister fpu_reg) const
bool HasUntaggedValues() const
bool ContainsRegister(Register reg) const
bool IsTagged(Register reg) const
static FunctionPtr ResolveDynamicForReceiverClass(const Class &receiver_class, const String &function_name, const ArgumentsDescriptor &args_desc, bool allow_add=true)
void GenerateCode(FlowGraphCompiler *compiler)
Instruction * instruction() const
compiler::Label * entry_label()
static SmiPtr New(intptr_t value)
bool AllowsSpeculativeInlining() const
static StaticTypeExactnessState NotTracking()
static const Code & SubtypeTestCacheStubForUsedInputs(intptr_t i)
static SubtypeTestCachePtr New(intptr_t num_inputs)
static constexpr intptr_t kMaxInputs
Instruction * instruction() const
virtual void ReleaseTemporary()=0
virtual Register AllocateTemporary()=0
LongJumpScope * long_jump_base() const
HierarchyInfo * hierarchy_info() const
static Thread * Current()
TypeUsageInfo * type_usage_info() const
CompilerState & compiler_state()
IsolateGroup * isolate_group() const
virtual void EmitCodeAtSlowPathEntry(FlowGraphCompiler *compiler)
virtual intptr_t GetNumberOfArgumentsForRuntimeCall()
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
virtual void AddMetadataForRuntimeCall(FlowGraphCompiler *compiler)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
virtual const char * name()=0
static const TokenPosition kMinSource
bool IsClassTypeParameter() const
static constexpr T Maximum(T x, T y)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
intptr_t CodeSize() const
ObjectPoolBuilder & object_pool_builder()
void static bool EmittingComments()
intptr_t InsertAlignedRelocation(BSS::Relocation reloc)
void mark_should_be_aligned()
void Comment(const char *format,...) PRINTF_ATTRIBUTE(2
void Jump(Label *label, JumpDistance distance=kFarJump)
void CompareImmediate(Register rn, int32_t value, Condition cond)
void set_constant_pool_allowed(bool b)
void Align(intptr_t alignment, intptr_t offset)
void BranchIf(Condition condition, Label *label, JumpDistance distance=kFarJump)
void LoadUnboxedSimd128(FpuRegister dst, Register base, int32_t offset)
void Bind(Label *label) override
void PopRegister(Register r)
void Drop(intptr_t stack_elements)
void set_lr_state(compiler::LRState b)
void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset)
void Breakpoint() override
void AddImmediate(Register rd, int32_t value, Condition cond=AL)
static bool Intrinsify(const ParsedFunction &parsed_function, FlowGraphCompiler *compiler)
intptr_t FindObject(const Object &obj, ObjectPoolBuilderEntry::Patchability patchable=ObjectPoolBuilderEntry::kNotPatchable, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
virtual bool IsFpuRegisters() const
virtual bool IsBoth() const
const NativeType & container_type() const
virtual bool IsStack() const
virtual NativeLocation & Split(Zone *zone, intptr_t num_parts, intptr_t index) const
static NativeLocation & FromLocation(Zone *zone, Location loc, Representation rep)
virtual NativeLocation & WithOtherNativeType(Zone *zone, const NativeType &new_payload_type, const NativeType &new_container_type) const =0
static NativeLocation & FromPairLocation(Zone *zone, Location loc, Representation rep, intptr_t index)
const BothNativeLocations & AsBoth() const
const NativeType & payload_type() const
virtual Representation AsRepresentation() const
virtual NativePrimitiveType & Split(Zone *zone, intptr_t part) const
virtual intptr_t SizeInBytes() const =0
static NativePrimitiveType & FromRepresentation(Zone *zone, Representation rep)
#define THR_Print(format,...)
FlutterSemanticsFlag flags
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
static const uint8_t buffer[]
#define DECLARE_FLAG(type, name)
#define DEFINE_FLAG(type, name, default_value, comment)
Dart_NativeFunction function
word ToRawSmi(const dart::Object &a)
const FpuRegister kNoFpuRegister
static FpuRegister AllocateFreeFpuRegister(bool *blocked_registers)
const RegList kReservedCpuRegisters
static bool IsPopper(Instruction *instr)
constexpr intptr_t kInt64Size
static const Code & StubEntryFor(const ICData &ic_data, bool optimized)
static bool IsPusher(Instruction *instr)
const int kNumberOfFpuRegisters
Location LocationRemapForSlowPath(Location loc, Definition *def, intptr_t *cpu_reg_slots, intptr_t *fpu_reg_slots)
constexpr RegList kDartAvailableCpuRegs
constexpr int kRegisterAllocationBias
static Register AllocateFreeRegister(bool *blocked_registers)
const intptr_t kPreferredLoopAlignment
const char *const function_name
static constexpr intptr_t kInvalidTryIndex
const int kFpuRegisterSize
const intptr_t count_without_type_args
const intptr_t type_args_len
const intptr_t size_with_type_args
const intptr_t size_without_type_args
ArrayPtr ToArgumentsDescriptor() const
static constexpr Register kResultReg
static constexpr FpuRegister kValueReg
bool IsIllegalRange() const
static constexpr Register kFieldReg
static constexpr bool IsUnboxedInteger(Representation rep)
static constexpr Register kSubtypeTestCacheReg
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kSubtypeTestCacheResultReg
static constexpr Register kScratchReg
static constexpr Register kInstanceOfResultReg
void set_kind(VarInfoKind kind)