42 trace_inlining_intervals,
44 "Inlining interval diagnostics");
46DEFINE_FLAG(
bool, enable_peephole,
true,
"Enable peephole optimization");
51 "Enable inlining of SIMD related method calls.");
53 min_optimization_counter_threshold,
55 "The minimum invocation count for a function.");
57 optimization_counter_scale,
59 "The scale of invocation count, by size of the function.");
60DEFINE_FLAG(
bool, source_lines,
false,
"Emit source line as assembly comment.");
64 "Do not emit PC relative calls.");
78 "Align all loop headers to 32 byte boundary");
80#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
81compiler::LRState ComputeInnerLRState(
const FlowGraph& flow_graph) {
85 bool has_native_entries =
false;
86 for (intptr_t
i = 0;
i < entry->SuccessorCount();
i++) {
87 if (entry->SuccessorAt(
i)->IsNativeEntry()) {
88 has_native_entries =
true;
93 auto state = compiler::LRState::OnEntry();
94 if (has_native_entries) {
112void CompilerDeoptInfo::AllocateOutgoingArguments(Environment*
env) {
113 if (
env ==
nullptr)
return;
115 if (it.CurrentLocation().IsInvalid()) {
116 if (
auto move_arg = it.CurrentValue()->definition()->AsMoveArgument()) {
117 it.SetCurrentLocation(move_arg->locs()->out(0));
123void CompilerDeoptInfo::EmitMaterializations(Environment*
env,
125 for (Environment::DeepIterator it(
env); !it.Done(); it.Advance()) {
126 if (it.CurrentLocation().IsInvalid()) {
127 MaterializeObjectInstr* mat =
128 it.CurrentValue()->definition()->AsMaterializeObject();
130 builder->AddMaterialization(mat);
146 : thread_(
Thread::Current()),
147 zone_(
Thread::Current()->zone()),
148 assembler_(assembler),
149 parsed_function_(parsed_function),
150 flow_graph_(*flow_graph),
151 block_order_(*flow_graph->CodegenBlockOrder()),
152 current_block_(nullptr),
153 exception_handlers_list_(nullptr),
154 pc_descriptors_list_(nullptr),
155 compressed_stackmaps_builder_(nullptr),
156 code_source_map_builder_(nullptr),
157 catch_entry_moves_maps_builder_(nullptr),
158 block_info_(block_order_.
length()),
160 static_calls_target_table_(),
162 is_optimizing_(is_optimizing),
163 speculative_policy_(speculative_policy),
164 may_reoptimize_(
false),
165 intrinsic_mode_(
false),
168 Class::ZoneHandle(isolate_group()->object_store()->double_class())),
170 Class::ZoneHandle(isolate_group()->object_store()->mint_class())),
171 float32x4_class_(
Class::ZoneHandle(
172 isolate_group()->object_store()->float32x4_class())),
173 float64x2_class_(
Class::ZoneHandle(
174 isolate_group()->object_store()->float64x2_class())),
176 Class::ZoneHandle(isolate_group()->object_store()->int32x4_class())),
179 pending_deoptimization_env_(nullptr),
180 deopt_id_to_ic_data_(deopt_id_to_ic_data),
181 edge_counters_array_(
Array::ZoneHandle()) {
186 deopt_id_to_ic_data_ =
nullptr;
189 deopt_id_to_ic_data_->EnsureLength(
len,
nullptr);
195 const bool stack_traces_only =
true;
197 const bool stack_traces_only =
false;
201 ASSERT(inline_id_to_function[0]->ptr() ==
203 code_source_map_builder_ =
new (zone_)
205 inline_id_to_token_pos, inline_id_to_function);
211 compressed_stackmaps_builder_ =
215 exception_handlers_list_ =
217#if defined(DART_PRECOMPILER)
225 for (
int i = 0;
i < block_order_.length(); ++
i) {
226 block_info_.
Add(
new (
zone()) BlockInfo());
231 if (
auto* branch = current->AsBranch()) {
232 current = branch->comparison();
234 if (
auto* instance_call = current->AsInstanceCall()) {
235 const ICData* ic_data = instance_call->ic_data();
237 may_reoptimize_ =
true;
246 const intptr_t num_counters = flow_graph_.
preorder().length();
247 const Array& edge_counters =
249 for (intptr_t
i = 0;
i < num_counters; ++
i) {
250 edge_counters.
SetAt(
i, Object::smi_zero());
252 edge_counters_array_ = edge_counters.
ptr();
278 if (FLAG_stacktrace_every > 0 || FLAG_deoptimize_every > 0 ||
280 (
isolate_group()->reload_every_n_stack_overflow_checks() > 0)) {
285 if (FLAG_stacktrace_filter !=
nullptr &&
287 FLAG_stacktrace_filter) !=
nullptr) {
292 FLAG_deoptimize_filter) !=
nullptr) {
302 return !block->IsGraphEntry() && !block->IsFunctionEntry() &&
303 !block->IsCatchBlockEntry() && !block->IsOsrEntry() &&
305 block->
next()->IsGoto() &&
306 !block->
next()->AsGoto()->HasNonRedundantParallelMove();
321 BlockEntryInstr*
target = block->
next()->AsGoto()->successor();
327void FlowGraphCompiler::CompactBlocks() {
331 compiler::Label* nonempty_label =
nullptr;
344 BlockInfo*
block_info = block_info_[block->postorder_number()];
345 block_info->set_next_nonempty_label(nonempty_label);
352 block_info->set_next_nonempty_label(nonempty_label);
355#if defined(DART_PRECOMPILER)
356static intptr_t LocationToStackIndex(
const Location&
src) {
362static CatchEntryMove CatchEntryMoveFor(compiler::Assembler* assembler,
365 intptr_t dst_index) {
366 if (
src.IsConstant()) {
368 if (
src.constant().ptr() == Object::optimized_out().ptr()) {
369 return CatchEntryMove();
371 const intptr_t pool_index =
372 assembler->object_pool_builder().FindObject(
src.constant());
374 pool_index, dst_index);
377 if (
src.IsPairLocation()) {
378 const auto lo_loc =
src.AsPairLocation()->At(0);
379 const auto hi_loc =
src.AsPairLocation()->At(1);
380 ASSERT(lo_loc.IsStackSlot() && hi_loc.IsStackSlot());
384 LocationToStackIndex(hi_loc)),
408 case kUnboxedFloat32x4:
411 case kUnboxedFloat64x2:
414 case kUnboxedInt32x4:
428#if defined(DART_PRECOMPILER)
441 auto param = (*idefs)[
i]->AsParameter();
444 if (param ==
nullptr)
continue;
449 if (
dst.IsRegister())
continue;
456 env->ValueAt(
i)->definition()->representation();
457 const auto move = CatchEntryMoveFor(
assembler(), src_type,
src,
458 LocationToStackIndex(
dst));
459 if (!move.IsRedundant()) {
460 catch_entry_moves_maps_builder_->
Append(move);
464 catch_entry_moves_maps_builder_->
EndMapping();
480 if (
env !=
nullptr) {
486 const intptr_t dest_deopt_id =
env->LazyDeoptToBeforeDeoptId()
502 intptr_t yield_index) {
507void FlowGraphCompiler::EmitInstructionPrologue(
Instruction* instr) {
516 AllocateRegistersLocally(instr);
520#define __ assembler()->
522void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
526 Definition* defn = instr->AsDefinition();
527 if (defn !=
nullptr && defn->HasTemp()) {
529 if (
value.IsRegister()) {
531 }
else if (
value.IsFpuRegister()) {
533 switch (instr->representation()) {
535 stub = &StubCode::BoxDouble();
537 case kUnboxedFloat32x4:
538 stub = &StubCode::BoxFloat32x4();
540 case kUnboxedFloat64x2:
541 stub = &StubCode::BoxFloat64x2();
550 instr->locs()->live_registers()->Clear();
551 if (instr->representation() == kUnboxedDouble) {
558 *stub, UntaggedPcDescriptors::kOther, instr->locs());
560 }
else if (
value.IsConstant()) {
561 __ PushObject(
value.constant());
564 __ PushValueAtOffset(
value.base_reg(),
value.ToStackSlotOffset());
571void FlowGraphCompiler::EmitSourceLine(Instruction* instr) {
572 if (!instr->token_pos().IsReal()) {
575 const InstructionSource&
source = instr->source();
576 const intptr_t inlining_id =
source.inlining_id < 0 ? 0 :
source.inlining_id;
579 ASSERT(instr->env() ==
nullptr ||
583 if (
script.GetTokenLocation(
source.token_pos, &line_nr)) {
591 if (
auto def = instr->AsDefinition()) {
605bool FlowGraphCompiler::IsPeephole(Instruction* instr)
const {
631#if !defined(TARGET_ARCH_IA32)
648#if !defined(TARGET_ARCH_IA32)
651 GenerateDeferredCode();
654 for (intptr_t
i = 0;
i < indirect_gotos_.length(); ++
i) {
655 indirect_gotos_[
i]->ComputeOffsetTable(
this);
659#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
665 Symbols::vm_align_loops(),
680 if (FLAG_precompiled_mode) {
684#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
685 const auto inner_lr_state = ComputeInnerLRState(
flow_graph());
688#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
689 const bool should_align_loops =
690 FLAG_align_all_loops || IsMarkedWithAlignLoops(
function());
703#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
706 if (entry->IsFunctionEntry() || entry->IsNativeEntry()) {
728#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
739 ASSERT(pending_deoptimization_env_ ==
nullptr);
740 pending_deoptimization_env_ = entry->
env();
741 set_current_instruction(entry);
745 set_current_instruction(
nullptr);
746 pending_deoptimization_env_ =
nullptr;
757 set_current_instruction(instr);
767 if (FLAG_code_comments || FLAG_disassemble ||
768 FLAG_disassemble_optimized) {
769 if (FLAG_source_lines) {
770 EmitSourceLine(instr);
776 EmitInstructionPrologue(instr);
777 ASSERT(pending_deoptimization_env_ ==
nullptr);
778 pending_deoptimization_env_ = instr->
env();
782 pending_deoptimization_env_ =
nullptr;
783 if (IsPeephole(instr)) {
784 ASSERT(top_of_stack_ ==
nullptr);
785 top_of_stack_ = instr->AsDefinition();
787 EmitInstructionEpilogue(instr);
793 FrameStateUpdateWith(instr);
797 set_current_instruction(
nullptr);
799 if (
auto indirect_goto = instr->AsIndirectGoto()) {
800 indirect_gotos_.Add(indirect_goto);
813 parsed_function_.
Bailout(
"FlowGraphCompiler", reason);
817 if (is_optimizing_) {
826 const intptr_t stack_depth =
829 return StackSize() - stack_depth - num_stack_locals;
835 return block_info_[block_index]->jump_label();
840 return block_info_[block_index]->WasCompacted();
845 return block_info_[current_index]->next_nonempty_label();
861 slow_path_code_.Add(
code);
864void FlowGraphCompiler::GenerateDeferredCode() {
865#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
866 const auto lr_state = ComputeInnerLRState(
flow_graph());
869 for (intptr_t
i = 0;
i < slow_path_code_.length();
i++) {
874#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
886 set_current_instruction(
nullptr);
891 const InstructionSource deopt_source(TokenPosition::kDeferredDeoptInfo,
893 for (intptr_t
i = 0;
i < deopt_infos_.length();
i++) {
895#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
898 deopt_infos_[
i]->GenerateCode(
this,
i);
922 intptr_t yield_index) {
925 if (FLAG_precompiled_mode && (kind == UntaggedPcDescriptors::kDeopt))
return;
929 pc_descriptors_list_->
AddDescriptor(kind, pc_offset, deopt_id, root_pos,
930 try_index, yield_index);
943#if defined(DART_PRECOMPILER)
948 if (FLAG_precompiled_mode && FLAG_dwarf_stack_traces_mode)
return;
950 const intptr_t name_index =
959 const auto entry_point = entry_kind == Code::EntryKind::kUnchecked
962 static_calls_target_table_.
Add(
new (
zone()) StaticCallsStruct(
967void FlowGraphCompiler::AddPcRelativeCallStubTarget(
const Code& stub_code) {
969 ASSERT(!stub_code.IsNull());
970 static_calls_target_table_.
Add(
new (
zone()) StaticCallsStruct(
972 nullptr, &stub_code,
nullptr));
975void FlowGraphCompiler::AddPcRelativeTailCallStubTarget(
const Code& stub_code) {
977 ASSERT(!stub_code.IsNull());
978 static_calls_target_table_.
Add(
new (
zone()) StaticCallsStruct(
980 nullptr, &stub_code,
nullptr));
983void FlowGraphCompiler::AddPcRelativeTTSCallTypeTarget(
984 const AbstractType& dst_type) {
986 ASSERT(!dst_type.IsNull());
987 static_calls_target_table_.
Add(
new (
zone()) StaticCallsStruct(
989 nullptr,
nullptr, &dst_type));
992void FlowGraphCompiler::AddStaticCallTarget(
const Function& func,
995 const auto entry_point = entry_kind == Code::EntryKind::kUnchecked
998 static_calls_target_table_.
Add(
new (
zone()) StaticCallsStruct(
1005 static_calls_target_table_.
Add(
new (
zone()) StaticCallsStruct(
1012 dispatch_table_call_targets_.Add(selector);
1019 ASSERT(!FLAG_precompiled_mode);
1020 if (
env !=
nullptr) {
1028 deopt_infos_.Add(
info);
1039 deopt_infos_.Add(
info);
1048 intptr_t slow_path_argument_count) {
1050 const intptr_t spill_area_size =
1054 ASSERT(registers !=
nullptr);
1055 const intptr_t kFpuRegisterSpillFactor =
1066 bitmap.SetLength(spill_area_size);
1069 const intptr_t args_count = instr->ArgumentCount();
1072 for (intptr_t
i = 0;
i < args_count;
i++) {
1073 const auto move_arg =
1074 instr->ArgumentValueAt(
i)->instruction()->AsMoveArgument();
1075 const auto rep = move_arg->representation();
1076 if (move_arg->is_register_move()) {
1080 ASSERT(rep == kTagged || rep == kUnboxedInt64 || rep == kUnboxedDouble);
1083 "int and double are of the same size");
1084 const bool is_tagged = move_arg->representation() == kTagged;
1085 const intptr_t num_bits =
1089 const intptr_t last_arg_bit =
1090 (spill_area_size - 1) - move_arg->sp_relative_index();
1091 bitmap.SetRange(last_arg_bit - (num_bits - 1), last_arg_bit, is_tagged);
1093 ASSERT(slow_path_argument_count == 0 || !using_shared_stub);
1098 intptr_t spill_area_bits =
bitmap.Length();
1099 while (spill_area_bits > 0) {
1100 if (!
bitmap.Get(spill_area_bits - 1)) {
1105 bitmap.SetLength(spill_area_bits);
1124 for (intptr_t j = 0; j < kFpuRegisterSpillFactor; ++j) {
1141 if (using_shared_stub) {
1162 for (intptr_t
i = 0;
i < slow_path_argument_count; ++
i) {
1180 intptr_t num_slow_path_args) {
1182 const bool shared_stub_save_fpu_registers =
1186 ASSERT(!using_shared_stub || num_slow_path_args == 0);
1187 if (
env ==
nullptr) {
1195 env->DeepCopy(
zone(),
env->Length() -
env->LazyDeoptPruneCount());
1199 if (using_shared_stub) {
1206 const intptr_t kFpuRegisterSpillFactor =
1214 next_slot += kFpuRegisterSpillFactor;
1215 fpu_reg_slots[
i] = (next_slot - 1);
1217 if (using_shared_stub && shared_stub_save_fpu_registers) {
1218 next_slot += kFpuRegisterSpillFactor;
1220 fpu_reg_slots[
i] = -1;
1229 cpu_reg_slots[
i] = next_slot++;
1231 if (using_shared_stub) next_slot++;
1232 cpu_reg_slots[
i] = -1;
1239 Location loc = it.CurrentLocation();
1242 loc,
value->definition(), cpu_reg_slots, fpu_reg_slots));
1245 return slow_path_env;
1252 return intrinsic_slow_path_label_;
1256 if (FLAG_precompiled_mode) {
1257 if (FLAG_trace_compiler) {
1259 "Retrying compilation %s, suppressing inlining of deopt_id:%" Pd "\n",
1265 deopt_id, Object::speculative_inlining_error());
1269 ASSERT(pending_deoptimization_env_ !=
nullptr);
1270 if (pending_deoptimization_env_->
IsHoisted()) {
1274 deopt_id, reason,
flags, pending_deoptimization_env_);
1275 deopt_infos_.Add(stub);
1280 ASSERT(exception_handlers_list_ !=
nullptr);
1283 code.set_exception_handlers(handlers);
1287 ASSERT(pc_descriptors_list_ !=
nullptr);
1290 if (!is_optimizing_) descriptors.
Verify(parsed_function_.
function());
1291 code.set_pc_descriptors(descriptors);
1296 if (FLAG_precompiled_mode) {
1297 return Array::empty_array().ptr();
1302 const intptr_t incoming_arg_count =
1307 if (deopt_info_table_size == 0) {
1308 return Object::empty_array().ptr();
1310 const Array& array =
1315 for (intptr_t
i = 0;
i < deopt_infos_.length();
i++) {
1317 info = deopt_infos_[
i]->CreateDeoptInfo(
this, &
builder, array);
1319 deopt_infos_[
i]->reason(), deopt_infos_[
i]->
flags());
1327 ASSERT(compressed_stackmaps_builder_ !=
nullptr);
1331 code.set_compressed_stackmaps(maps);
1338 if (
code.is_optimized()) {
1341 code.set_var_descriptors(Object::empty_var_descriptors());
1359 var_descs.
SetVar(0, Symbols::CurrentContextVar(), &
info);
1361 code.set_var_descriptors(var_descs);
1366#if defined(DART_PRECOMPILER)
1367 if (FLAG_precompiled_mode) {
1370 code.set_catch_entry_moves_maps(maps);
1379 const auto& calls = static_calls_target_table_;
1381 const auto& targets =
1386 for (intptr_t
i = 0;
i < calls.length();
i++) {
1387 auto entry = calls[
i];
1388 kind_type_and_offset =
1392 auto view = entries[
i];
1395 if (entry->function !=
nullptr) {
1396 target = entry->function;
1399 if (entry->code !=
nullptr) {
1404 if (entry->dst_type !=
nullptr) {
1409 code.set_static_calls_target_table(targets);
1413 const Array& inlined_id_array =
1415 code.set_inlined_id_to_function(inlined_id_array);
1419 code.set_code_source_map(
map);
1427 code.GetInlinedFunctionsAtInstruction(
code.Size() - 1, &fs, &
tokens);
1433 if (TryIntrinsifyHelper()) {
1434 fully_intrinsified_ =
true;
1440bool FlowGraphCompiler::TryIntrinsifyHelper() {
1470 ASSERT(FLAG_precompiled_mode ||
1491 return StubCode::OneArgOptimizedCheckInlineCacheWithExactnessCheck();
1493 return StubCode::OneArgCheckInlineCacheWithExactnessCheck();
1496 return optimized ? StubCode::OneArgOptimizedCheckInlineCache()
1497 : StubCode::OneArgCheckInlineCache();
1500 return optimized ? StubCode::TwoArgsOptimizedCheckInlineCache()
1501 : StubCode::TwoArgsCheckInlineCache();
1512 const ICData& ic_data_in,
1514 bool receiver_can_be_smi) {
1516 if (FLAG_precompiled_mode) {
1519 receiver_can_be_smi);
1528 ic_data, deopt_id,
source, locs, entry_kind);
1538 deopt_id,
source, locs, entry_kind);
1546 const ICData& ic_data_in,
1567 if (call_ic_data.
IsNull()) {
1568 const intptr_t kNumArgsChecked = 0;
1571 kNumArgsChecked, rebind_rule)
1573 call_ic_data = call_ic_data.
Original();
1577 locs, call_ic_data, entry_kind);
1588 if (
type.IsNumberType()) {
1589 args.Add(kDoubleCid);
1591 }
else if (
type.IsIntType()) {
1593 }
else if (
type.IsDoubleType()) {
1594 args.Add(kDoubleCid);
1596 CheckClassIds(class_id_reg,
args, is_instance_lbl, is_not_instance_lbl);
1605 args.Add(kOneByteStringCid);
1606 args.Add(kTwoByteStringCid);
1607 CheckClassIds(class_id_reg,
args, is_instance_lbl, is_not_instance_lbl);
1615 (kGrowableObjectArrayCid == kArrayCid + 2));
1617 ranges.
Add({kArrayCid, kGrowableObjectArrayCid});
1622#if defined(INCLUDE_IL_PRINTER)
1633 return FLAG_reorder_basic_blocks &&
1641 if (!blocked_registers[regno]) {
1642 blocked_registers[regno] =
true;
1643 return static_cast<Register>(regno);
1653 if (!blocked_registers[regno]) {
1654 blocked_registers[regno] =
true;
1662void FlowGraphCompiler::AllocateRegistersLocally(Instruction* instr) {
1664 instr->InitializeLocationSummary(
zone(),
false);
1666 LocationSummary* locs = instr->locs();
1677 blocked_fpu_registers[
i] =
false;
1681 for (intptr_t
i = 0;
i < locs->input_count();
i++) {
1683 if (loc.IsRegister()) {
1685 ASSERT(!blocked_registers[loc.reg()]);
1686 blocked_registers[loc.reg()] =
true;
1687 }
else if (loc.IsFpuRegister()) {
1691 ASSERT(!blocked_fpu_registers[fpu_reg]);
1692 blocked_fpu_registers[fpu_reg] =
true;
1696 for (intptr_t
i = 0;
i < locs->temp_count();
i++) {
1698 if (loc.IsRegister()) {
1700 ASSERT(!blocked_registers[loc.reg()]);
1701 blocked_registers[loc.reg()] =
true;
1702 }
else if (loc.IsFpuRegister()) {
1706 ASSERT(!blocked_fpu_registers[fpu_reg]);
1707 blocked_fpu_registers[fpu_reg] =
true;
1713 if (top_of_stack_ !=
nullptr) {
1714 const intptr_t
p = locs->input_count() - 1;
1716 if ((instr->RequiredInputRepresentation(
p) == kTagged) &&
1717 (locs->in(
p).IsUnallocated() || locs->in(
p).IsConstant())) {
1721 if (peephole.IsRegister() && !blocked_registers[peephole.reg()]) {
1723 blocked_registers[peephole.reg()] =
true;
1728 if (locs->out(0).IsRegister()) {
1731 blocked_registers[locs->out(0).reg()] =
true;
1732 }
else if (locs->out(0).IsFpuRegister()) {
1735 blocked_fpu_registers[locs->out(0).fpu_reg()] =
true;
1739 ASSERT(!instr->IsMoveArgument());
1741 for (intptr_t
i = locs->input_count() - 1;
i >= 0;
i--) {
1745 if (loc.IsRegister()) {
1747 }
else if (loc.IsFpuRegister()) {
1748 fpu_reg = loc.fpu_reg();
1749 }
else if (loc.IsUnallocated()) {
1750 switch (loc.policy()) {
1773 reg = fpu_unboxing_temp;
1780 if (top_of_stack_ !=
nullptr) {
1781 if (!loc.IsConstant()) {
1787 top_of_stack_ =
nullptr;
1788 }
else if (loc.IsConstant()) {
1793 if (!loc.IsConstant()) {
1794 switch (instr->RequiredInputRepresentation(
i)) {
1795 case kUnboxedDouble:
1797 ASSERT(instr->SpeculativeModeOfInput(
i) ==
1803 case kUnboxedFloat32x4:
1804 case kUnboxedFloat64x2:
1806 ASSERT(instr->SpeculativeModeOfInput(
i) ==
1821 for (intptr_t
i = 0;
i < locs->temp_count();
i++) {
1823 if (loc.IsUnallocated()) {
1824 switch (loc.policy()) {
1828 locs->set_temp(
i, loc);
1833 locs->set_temp(
i, loc);
1841 Location result_location = locs->out(0);
1842 if (result_location.IsUnallocated()) {
1843 switch (result_location.policy()) {
1852 result_location = locs->in(0);
1863 locs->set_out(0, result_location);
1869 const String& target_name,
1870 const Array& arguments_descriptor,
1871 intptr_t num_args_tested,
1873 const Function& binary_smi_target) {
1874 if ((deopt_id_to_ic_data_ !=
nullptr) &&
1875 ((*deopt_id_to_ic_data_)[deopt_id] !=
nullptr)) {
1876 const ICData* res = (*deopt_id_to_ic_data_)[deopt_id];
1888 if (!binary_smi_target.
IsNull()) {
1889 ASSERT(num_args_tested == 2);
1895 arguments_descriptor, deopt_id,
1896 num_args_tested, ICData::kInstance, &cids,
1897 binary_smi_target, receiver_type);
1900 arguments_descriptor, deopt_id, num_args_tested,
1901 ICData::kInstance, receiver_type);
1904 if (deopt_id_to_ic_data_ !=
nullptr) {
1905 (*deopt_id_to_ic_data_)[deopt_id] = &ic_data;
1907 ASSERT(!ic_data.is_static_call());
1914 const Array& arguments_descriptor,
1915 intptr_t num_args_tested,
1917 if ((deopt_id_to_ic_data_ !=
nullptr) &&
1918 ((*deopt_id_to_ic_data_)[deopt_id] !=
nullptr)) {
1919 const ICData* res = (*deopt_id_to_ic_data_)[deopt_id];
1931 arguments_descriptor, deopt_id,
1932 num_args_tested, rebind_rule));
1933 if (deopt_id_to_ic_data_ !=
nullptr) {
1934 (*deopt_id_to_ic_data_)[deopt_id] = &ic_data;
1939intptr_t FlowGraphCompiler::GetOptimizationThreshold()
const {
1942 threshold = FLAG_reoptimization_counter_threshold;
1944 threshold = FLAG_regexp_optimization_counter_threshold;
1946 const auto configured_optimization_counter_threshold =
1950 ASSERT(basic_blocks > 0);
1951 threshold = FLAG_optimization_counter_scale * basic_blocks +
1952 FLAG_min_optimization_counter_threshold;
1953 if (threshold > configured_optimization_counter_threshold) {
1954 threshold = configured_optimization_counter_threshold;
1963 if (threshold == 0) threshold = 1;
1978 case kUnboxedDouble:
1980 case kUnboxedFloat32x4:
1982 case kUnboxedFloat64x2:
1984 case kUnboxedInt32x4:
2006 const Array& args_desc_array) {
2025 bool* class_is_abstract_return) {
2029 if (class_id < 0)
return false;
2030 if (class_id >= class_table->NumCids())
return false;
2032 ClassPtr raw_class = class_table->
At(class_id);
2033 if (raw_class ==
nullptr)
return false;
2035 if (cls.
IsNull())
return false;
2039 if (class_is_abstract_return !=
nullptr) {
2044 cls,
name, args_desc,
false));
2045 if (target_function.
IsNull())
return false;
2046 *fn_return = target_function.
ptr();
2058 intptr_t total_ic_calls,
2059 bool receiver_can_be_smi) {
2061 if (!FLAG_precompiled_mode) {
2062 if (FLAG_polymorphic_with_deopt) {
2064 AddDeoptStub(deopt_id, ICData::kDeoptPolymorphicInstanceCallTestFail);
2069 deopt_id,
source, locs, complete, total_ic_calls,
2070 call->entry_kind());
2077 deopt_id,
source, locs, complete, total_ic_calls,
2078 call->entry_kind());
2092 deopt_id,
source, locs,
true, total_ic_calls,
2093 call->entry_kind());
2096 const ICData& unary_checks =
2099 call->entry_kind(), receiver_can_be_smi);
2104#define __ assembler()->
2112void FlowGraphCompiler::CheckClassIds(
Register class_id_reg,
2116 for (
const auto&
id : class_ids) {
2117 __ CompareImmediate(class_id_reg,
id);
2118 __ BranchIf(
EQUAL, is_equal_lbl);
2120 __ Jump(is_not_equal_lbl);
2132 intptr_t total_ic_calls,
2135 ASSERT(complete || (failed !=
nullptr));
2137 const Array& arguments_descriptor =
2140 arguments_descriptor);
2142 const int kNoCase = -1;
2143 int smi_case = kNoCase;
2144 int which_case_to_skip = kNoCase;
2148 int non_smi_length =
length;
2153 const intptr_t
start = targets[
i].cid_start;
2154 if (
start > kSmiCid)
continue;
2155 const intptr_t
end = targets[
i].cid_end;
2156 if (
end >= kSmiCid) {
2158 if (
start == kSmiCid &&
end == kSmiCid) {
2161 which_case_to_skip =
i;
2168 if (smi_case != kNoCase) {
2173 if (!(complete && non_smi_length == 0)) {
2174 EmitTestAndCallSmiBranch(non_smi_length == 0 ? failed : &after_smi_test,
2182 UntaggedPcDescriptors::kOther, locs,
function,
2185 if (match_found !=
nullptr) {
2186 __ Jump(match_found);
2188 __ Bind(&after_smi_test);
2192 EmitTestAndCallSmiBranch(failed,
true);
2196 if (non_smi_length == 0) {
2202 bool add_megamorphic_call =
false;
2206 EmitTestAndCallLoadCid(EmitTestCidRegister());
2211 if (
i == which_case_to_skip)
continue;
2212 const bool is_last_check = (
i == last_check);
2214 if (!is_last_check && !complete &&
count < (total_ic_calls >> 5)) {
2219 add_megamorphic_call =
true;
2223 if (!complete || !is_last_check) {
2225 is_last_check ? failed : &next_test,
2226 EmitTestCidRegister(), targets[
i], bias,
2233 UntaggedPcDescriptors::kOther, locs,
function,
2236 if (!is_last_check || add_megamorphic_call) {
2237 __ Jump(match_found);
2241 if (add_megamorphic_call) {
2243 source_index, locs);
2248 const Class& type_class,
2251 if (hi !=
nullptr) {
2265 __ CompareImmediate(class_id_reg, type_class.
id());
2266 __ BranchIf(
EQUAL, is_subtype);
2277 bool fall_through_if_inside) {
2282 if (fall_through_if_inside) {
2289 for (intptr_t
i = 0;
i < cid_ranges.
length(); ++
i) {
2292 const bool last_round =
i == (cid_ranges.
length() - 1);
2297 const bool jump_on_miss = last_round && fall_through_if_inside;
2300 bias, jump_on_miss);
2310 bool jump_on_miss) {
2311 const intptr_t cid_start = range.
cid_start;
2333 .IsAbstractType()) ||
2351SubtypeTestCachePtr FlowGraphCompiler::GenerateFunctionTypeTest(
2356 __ Comment(
"FunctionTypeTest");
2361 return GenerateCallSubtypeTestStub(TypeTestStubKind::kTestTypeSixArgs,
2362 is_instance_lbl, is_not_instance_lbl);
2380SubtypeTestCachePtr FlowGraphCompiler::GenerateInlineInstanceof(
2381 const InstructionSource&
source,
2382 const AbstractType&
type,
2383 compiler::Label* is_instance_lbl,
2384 compiler::Label* is_not_instance_lbl) {
2386 __ Comment(
"InlineInstanceof");
2387 if (
type.IsObjectType()) {
2391 __ Jump(is_not_instance_lbl);
2394 if (
type.IsFunctionType()) {
2395 return GenerateFunctionTypeTest(
source,
type, is_instance_lbl,
2396 is_not_instance_lbl);
2398 if (
type.IsRecordType()) {
2405 if (
type.IsInstantiated()) {
2410 if (type_class.NumTypeArguments() > 0) {
2411 return GenerateInstantiatedTypeWithArgumentsTest(
2412 source,
type, is_instance_lbl, is_not_instance_lbl);
2415 const bool has_fall_through = GenerateInstantiatedTypeNoArgumentsTest(
2416 source,
type, is_instance_lbl, is_not_instance_lbl);
2417 if (has_fall_through) {
2420 return GenerateSubtype1TestCacheLookup(
2421 source, type_class, is_instance_lbl, is_not_instance_lbl);
2426 return GenerateUninstantiatedTypeTest(
source,
type, is_instance_lbl,
2427 is_not_instance_lbl);
2430FlowGraphCompiler::TypeTestStubKind
2431FlowGraphCompiler::GetTypeTestStubKindForTypeParameter(
2432 const TypeParameter& type_param) {
2437 bound = bound.UnwrapFutureOr();
2438 return !bound.IsTopTypeForSubtyping() && !bound.IsObjectType() &&
2439 !bound.IsDartFunctionType() && bound.IsType()
2440 ? TypeTestStubKind::kTestTypeFourArgs
2441 : TypeTestStubKind::kTestTypeSixArgs;
2450SubtypeTestCachePtr FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
2451 const InstructionSource&
source,
2452 const Class& type_class,
2453 compiler::Label* is_instance_lbl,
2454 compiler::Label* is_not_instance_lbl) {
2459 ASSERT(!type_class.IsObjectClass());
2460 __ Comment(
"Subtype1TestCacheLookup");
2471#if defined(TARGET_ARCH_IA32)
2476 __ PushRegister(kScratch2Reg);
2482 static_assert(kScratch1Reg != kScratch2Reg,
2483 "Scratch registers must be distinct");
2486 __ LoadClassById(kScratch1Reg, kScratch2Reg);
2487#if defined(TARGET_ARCH_IA32)
2489 __ PopRegister(kScratch2Reg);
2491 __ LoadCompressedFieldFromOffset(
2496 __ CompareObject(kScratch1Reg, Object::null_object());
2497 __ BranchIf(
EQUAL, is_not_instance_lbl);
2498 __ LoadTypeClassId(kScratch1Reg, kScratch1Reg);
2499 __ CompareImmediate(kScratch1Reg, type_class.id());
2500 __ BranchIf(
EQUAL, is_instance_lbl);
2502 return GenerateCallSubtypeTestStub(TypeTestStubKind::kTestTypeOneArg,
2503 is_instance_lbl, is_not_instance_lbl);
2512FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
2513 const InstructionSource&
source,
2514 const AbstractType&
type,
2515 compiler::Label* is_instance_lbl,
2516 compiler::Label* is_not_instance_lbl) {
2517 __ Comment(
"InstantiatedTypeWithArgumentsTest");
2523 ASSERT(type_class.NumTypeArguments() > 0);
2527 smi_is_ok ? is_instance_lbl : is_not_instance_lbl);
2529 const TypeArguments& type_arguments =
2531 const bool is_raw_type = type_arguments.IsNull() ||
2532 type_arguments.IsRaw(0, type_arguments.Length());
2540 __ CompareImmediate(kScratchReg, type_class.id());
2541 __ BranchIf(
EQUAL, is_instance_lbl);
2543 if (IsListClass(type_class)) {
2546 return GenerateSubtype1TestCacheLookup(
source, type_class, is_instance_lbl,
2547 is_not_instance_lbl);
2550 if (type_arguments.Length() == 1) {
2551 const AbstractType& tp_argument =
2553 if (tp_argument.IsTopTypeForSubtyping()) {
2555 return GenerateSubtype1TestCacheLookup(
2556 source, type_class, is_instance_lbl, is_not_instance_lbl);
2561 return GenerateCallSubtypeTestStub(TypeTestStubKind::kTestTypeTwoArgs,
2562 is_instance_lbl, is_not_instance_lbl);
2573bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
2574 const InstructionSource&
source,
2575 const AbstractType&
type,
2576 compiler::Label* is_instance_lbl,
2577 compiler::Label* is_not_instance_lbl) {
2578 __ Comment(
"InstantiatedTypeNoArgumentsTest");
2583 ASSERT(type_class.NumTypeArguments() == 0);
2591 const bool smi_is_ok =
2595 smi_is_ok ? is_instance_lbl : is_not_instance_lbl);
2598 if (
type.IsBoolType()) {
2599 __ CompareImmediate(kScratchReg, kBoolCid);
2600 __ BranchIf(
EQUAL, is_instance_lbl);
2601 __ Jump(is_not_instance_lbl);
2606 if (
type.IsNumberType() ||
type.IsIntType() ||
type.IsDoubleType()) {
2608 is_not_instance_lbl);
2611 if (
type.IsStringType()) {
2615 if (
type.IsDartFunctionType()) {
2617 __ CompareImmediate(kScratchReg, kClosureCid);
2618 __ BranchIf(
EQUAL, is_instance_lbl);
2621 if (
type.IsDartRecordType()) {
2623 __ CompareImmediate(kScratchReg, kRecordCid);
2624 __ BranchIf(
EQUAL, is_instance_lbl);
2638SubtypeTestCachePtr FlowGraphCompiler::GenerateUninstantiatedTypeTest(
2639 const InstructionSource&
source,
2640 const AbstractType&
type,
2641 compiler::Label* is_instance_lbl,
2642 compiler::Label* is_not_instance_lbl) {
2643 __ Comment(
"UninstantiatedTypeTest");
2648 if (
type.IsTypeParameter()) {
2654 const TypeParameter& type_param = TypeParameter::Cast(
type);
2657 type_param.IsClassTypeParameter()
2661 __ CompareObject(kTypeArgumentsReg, Object::null_object());
2662 __ BranchIf(
EQUAL, is_instance_lbl);
2663 __ LoadCompressedFieldFromOffset(
2664 kScratchReg, kTypeArgumentsReg,
2668 __ CompareObject(kScratchReg, Object::dynamic_type());
2669 __ BranchIf(
EQUAL, is_instance_lbl);
2674 __ BranchIf(
EQUAL, is_instance_lbl);
2675 __ CompareObject(kScratchReg, Object::void_type());
2676 __ BranchIf(
EQUAL, is_instance_lbl);
2679 compiler::Label not_smi;
2683 __ BranchIf(
EQUAL, is_instance_lbl);
2685 __ BranchIf(
EQUAL, is_instance_lbl);
2689 const auto test_kind = GetTypeTestStubKindForTypeParameter(type_param);
2690 return GenerateCallSubtypeTestStub(test_kind, is_instance_lbl,
2691 is_not_instance_lbl);
2693 if (
type.IsType()) {
2696 if (!
type.IsFutureOrType()) {
2699 const TypeTestStubKind test_kind =
2700 type.IsInstantiated(
kFunctions) ? TypeTestStubKind::kTestTypeThreeArgs
2701 : TypeTestStubKind::kTestTypeFourArgs;
2704 return GenerateCallSubtypeTestStub(test_kind, is_instance_lbl,
2705 is_not_instance_lbl);
2735 if (!unwrapped_type.IsTypeParameter() || unwrapped_type.
IsNullable()) {
2740 unwrapped_type.
IsNullable() ? &is_instance : &is_not_instance);
2748 GenerateInlineInstanceof(
source,
type, &is_instance, &is_not_instance);
2757 UntaggedPcDescriptors::kOther, locs, deopt_id,
2761 __ Bind(&is_not_instance);
2770#if !defined(TARGET_ARCH_IA32)
2780SubtypeTestCachePtr FlowGraphCompiler::GenerateCallSubtypeTestStub(
2781 TypeTestStubKind test_kind,
2784 const intptr_t num_inputs = UsedInputsForTTSKind(test_kind);
2787 const auto& stub_entry =
2790 __ Call(stub_entry);
2792 is_not_instance_lbl);
2793 return type_test_cache.
ptr();
2822 const auto& dst_type =
2824 ? AbstractType::Cast(
2826 : Object::null_abstract_type();
2828 if (!dst_type.IsNull()) {
2829 ASSERT(dst_type.IsFinalized());
2830 if (dst_type.IsTopTypeForSubtyping())
return;
2836 if (dst_type.IsNull()) {
2837 __ Comment(
"AssertAssignable for runtime type");
2840 __ Comment(
"AssertAssignable for compile-time type");
2842 if (dst_type.IsTypeParameter()) {
2869 const intptr_t sub_type_cache_index =
__ object_pool_builder().AddObject(
2871 const intptr_t dst_name_index =
__ object_pool_builder().AddObject(
2873 ASSERT((sub_type_cache_index + 1) == dst_name_index);
2874 ASSERT(
__ constant_pool_allowed());
2876 __ Comment(
"TTSCall");
2880 CanPcRelativeCall(dst_type)) {
2882 sub_type_cache_index);
2883 __ GenerateUnRelocatedPcRelativeCall();
2884 AddPcRelativeTTSCallTypeTarget(dst_type);
2924 bool elide_info =
false;
2928 auto output_dst_type = [&]() ->
void {
2934 type_usage_info->UseTypeInAssertAssignable(dst_type);
2936 ASSERT(!FLAG_precompiled_mode);
2952 return output_dst_type();
2958 bool is_non_smi =
false;
2962 }
else if (!receiver_type->
CanBeSmi()) {
2966 if (dst_type.IsTypeParameter()) {
2969 const TypeParameter& type_param = TypeParameter::Cast(dst_type);
2984 __ CompareObject(kTypeArgumentsReg, Object::null_object());
2988 __ LoadCompressedFieldFromOffset(
2991 return output_dst_type();
2994 if (dst_type.IsFunctionType() || dst_type.IsRecordType()) {
2995 return output_dst_type();
2998 if (
auto const hi =
thread()->hierarchy_info()) {
3001 if (hi->CanUseSubtypeRangeCheckFor(dst_type)) {
3016 }
else if (IsListClass(type_class)) {
3030void FlowGraphCompiler::FrameStateUpdateWith(
Instruction* instr) {
3033 switch (instr->
tag()) {
3034 case Instruction::kDropTemps:
3036 instr->AsDropTemps()->num_temps());
3047 Definition* defn = instr->AsDefinition();
3048 if ((defn !=
nullptr) && defn->HasTemp()) {
3049 FrameStatePush(defn);
3053void FlowGraphCompiler::FrameStatePush(Definition* defn) {
3056 if ((rep == kUnboxedDouble || rep == kUnboxedFloat32x4 ||
3057 rep == kUnboxedFloat64x2) &&
3058 defn->locs()->out(0).IsFpuRegister()) {
3062 ASSERT((rep == kTagged) || (rep == kUntagged) ||
3064 frame_state_.Add(rep);
3067void FlowGraphCompiler::FrameStatePop(intptr_t
count) {
3069 frame_state_.TruncateTo(
3073bool FlowGraphCompiler::FrameStateIsSafeToCall() {
3075 for (intptr_t
i = 0;
i < frame_state_.length();
i++) {
3076 if (frame_state_[
i] != kTagged) {
3083void FlowGraphCompiler::FrameStateClear() {
3085 frame_state_.TruncateTo(0);
3089#define __ compiler->assembler()->
3093 __ Comment(
"slow path %s operation",
name());
3095 const bool use_shared_stub =
3098 const bool live_fpu_registers =
3100 const intptr_t num_args =
3105 const bool has_frame =
compiler->flow_graph().graph_entry()->NeedsFrame();
3106 if (use_shared_stub) {
3108#if !defined(TARGET_ARCH_IA32)
3109 ASSERT(
__ constant_pool_allowed());
3110 __ set_constant_pool_allowed(
false);
3112 __ EnterDartFrame(0);
3115#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
3118 RESTORES_LR_FROM_FRAME({});
3126 __ CallRuntime(runtime_entry_, num_args);
3129 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOther, deopt_id,
3132 compiler->RecordSafepoint(locs, num_args);
3133 if (!FLAG_precompiled_mode ||
3140 }
else if (
compiler->is_optimizing()) {
3147 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt,
3151 if (!use_shared_stub) {
3159 return "check null (nsm)";
3161 return "check null (arg)";
3163 return "check null (cast)";
3172 return kNullErrorRuntimeEntry;
3174 return kArgumentNullErrorRuntimeEntry;
3176 return kNullCastErrorRuntimeEntry;
3183 bool save_fpu_registers) {
3184 auto object_store =
compiler->isolate_group()->object_store();
3187 return save_fpu_registers
3188 ? object_store->null_error_stub_with_fpu_regs_stub()
3189 : object_store->null_error_stub_without_fpu_regs_stub();
3191 return save_fpu_registers
3192 ? object_store->null_arg_error_stub_with_fpu_regs_stub()
3193 : object_store->null_arg_error_stub_without_fpu_regs_stub();
3195 return save_fpu_registers
3196 ? object_store->null_cast_error_stub_with_fpu_regs_stub()
3197 : object_store->null_cast_error_stub_without_fpu_regs_stub();
3203 bool save_fpu_registers) {
3204#if defined(TARGET_ARCH_IA32)
3235 bool save_fpu_registers) {
3236#if defined(TARGET_ARCH_IA32)
3239 auto object_store =
compiler->isolate_group()->object_store();
3243 ? object_store->range_error_stub_with_fpu_regs_stub()
3244 : object_store->range_error_stub_without_fpu_regs_stub());
3258 bool save_fpu_registers) {
3259#if defined(TARGET_ARCH_IA32)
3262 auto object_store =
compiler->isolate_group()->object_store();
3266 ? object_store->write_error_stub_with_fpu_regs_stub()
3267 : object_store->write_error_stub_without_fpu_regs_stub());
3279 bool save_fpu_registers) {
3280#if defined(TARGET_ARCH_IA32)
3287 auto object_store =
compiler->isolate_group()->object_store();
3291 ? object_store->late_initialization_error_stub_with_fpu_regs_stub()
3293 ->late_initialization_error_stub_without_fpu_regs_stub());
3302 if (destination.
IsBoth()) {
3304 const auto& both = destination.
AsBoth();
3311 const auto& both =
source.AsBoth();
3316 const auto& src_payload_type =
source.payload_type();
3317 const auto& dst_payload_type = destination.
payload_type();
3318 const auto& src_container_type =
source.container_type();
3320 const intptr_t src_payload_size = src_payload_type.
SizeInBytes();
3321 const intptr_t dst_payload_size = dst_payload_type.SizeInBytes();
3322 const intptr_t src_container_size = src_container_type.SizeInBytes();
3323 const intptr_t dst_container_size = dst_container_type.SizeInBytes();
3326 ASSERT(src_payload_type.IsPrimitive());
3327 ASSERT(dst_payload_type.IsPrimitive());
3330 ASSERT(src_payload_type.IsSigned() == dst_payload_type.IsSigned());
3333 if (
source.Equals(destination) && src_payload_type.Equals(dst_payload_type) &&
3334 src_container_type.Equals(dst_container_type)) {
3335#if defined(TARGET_ARCH_RISCV64)
3339 EmitNativeMoveArchitecture(destination,
source);
3345 if (src_payload_type.IsInt() && dst_payload_type.IsInt() &&
3346 (src_payload_size != src_container_size ||
3347 dst_payload_size != dst_container_size)) {
3348 if (
source.IsStack() && src_container_size > src_payload_size) {
3352 source.WithOtherNativeType(zone_, src_payload_type, src_payload_type),
3355 if (src_payload_size <= dst_payload_size &&
3356 src_container_size >= dst_container_size) {
3360 zone_, dst_container_type, dst_container_type),
3361 source.WithOtherNativeType(
3362 zone_, dst_container_type, dst_container_type),
3365 if (src_payload_size >= dst_payload_size &&
3366 dst_container_size > dst_payload_size) {
3371 dst_container_type),
3372 source.WithOtherNativeType(zone_, dst_payload_type, dst_payload_type),
3377 ASSERT(src_payload_size == src_container_size);
3378 ASSERT(dst_payload_size == dst_container_size);
3383 dst_container_size == 8 && !
source.IsFpuRegisters() &&
3399#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
3403 const auto& intermediate =
3405 zone_, dst_payload_type, dst_container_type, scratch);
3412 const bool sign_or_zero_extend = dst_container_size > src_container_size;
3415 if (sign_or_zero_extend && destination.
IsStack()) {
3417 const auto& intermediate =
3418 source.WithOtherNativeType(zone_, dst_payload_type, dst_container_type);
3425 EmitNativeMoveArchitecture(destination,
source);
3434 for (intptr_t
i : {0, 1}) {
3436 zone_, src_loc, src_type,
i);
3443 if (src_type == kUnboxedInt64 &&
3444 dst.container_type().AsPrimitive().representation() ==
3447 src.WithOtherNativeType(zone_,
dst.container_type(),
3448 dst.container_type()),
3462 for (intptr_t
i : {0, 1}) {
3464 zone_, dst_loc, dst_type,
i);
3471 if (dst_type == kUnboxedInt64 &&
3472 src.container_type().AsPrimitive().representation() ==
3475 src.container_type()),
3488 const auto& dst_type =
dst.payload_type();
3490 if (
dst.IsExpressibleAsLocation() &&
3491 dst_type.IsExpressibleAsRepresentation() &&
3492 dst_type.AsRepresentationOverApprox(zone_) == src_type) {
3501 if (dst_type.IsInt()) {
3509 ASSERT(dst_type.IsFloat());
3515 if (
src.IsPairLocation()) {
3516 for (intptr_t
i : {0, 1}) {
3521 const auto& intermediate_native =
3524 EmitMove(intermediate,
src.AsPairLocation()->At(
i), temp);
3528 const auto& intermediate_native =
3542bool FlowGraphCompiler::CanPcRelativeCall(
const Function&
target)
const {
3543 return FLAG_precompiled_mode && !FLAG_force_indirect_calls &&
3548bool FlowGraphCompiler::CanPcRelativeCall(
const Code&
target)
const {
3549 return FLAG_precompiled_mode && !FLAG_force_indirect_calls &&
3550 !
target.InVMIsolateHeap() &&
3555bool FlowGraphCompiler::CanPcRelativeCall(
const AbstractType&
target)
const {
3556 return FLAG_precompiled_mode && !FLAG_force_indirect_calls &&
3557 !
target.InVMIsolateHeap() &&
3559 LoadingUnit::LoadingUnit::kRootId);
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
static void test_cache(skiatest::Reporter *reporter, SkResourceCache &cache, bool testPurge)
static bool ok(int result)
#define DEBUG_ASSERT(cond)
#define RELEASE_ASSERT(cond)
bool IsSubtypeOf(const AbstractType &other, Heap::Space space, FunctionTypeMapping *function_type_equivalence=nullptr) const
bool IsNonNullable() const
bool IsTopTypeForSubtyping() const
bool IsObjectType() const
virtual bool IsInstantiated(Genericity genericity=kAny, intptr_t num_free_fun_type_params=kAllFree) const
virtual ClassPtr type_class() const
static ArrayPtr New(intptr_t len, Heap::Space space=Heap::kNew)
void SetAt(intptr_t index, const Object &value) const
static constexpr intptr_t encode(CallKind value)
bool Contains(intptr_t i) const
intptr_t try_index() const
intptr_t postorder_number() const
bool HasNonRedundantParallelMove() const
intptr_t block_id() const
LoopInfo * loop_info() const
bool IsLoopHeader() const
intptr_t stack_depth() const
Instruction * last_instruction() const
GrowableArray< Definition * > * initial_definitions()
static const Bool & Get(bool value)
TargetEntryInstr * false_successor() const
TargetEntryInstr * true_successor() const
StringPtr target_name() const
intptr_t TypeArgsLen() const
ArrayPtr arguments_descriptor() const
TargetInfo * TargetAt(int i) const
intptr_t catch_try_index() const
const Array & catch_handler_types() const
bool needs_stacktrace() const
bool is_generated() const
static CatchEntryMove FromSlot(SourceKind kind, intptr_t src_slot, intptr_t dest_slot)
static intptr_t EncodePairSource(intptr_t src_lo_slot, intptr_t src_hi_slot)
void NewMapping(intptr_t pc_offset)
TypedDataPtr FinalizeCatchEntryMovesMap()
void Append(const CatchEntryMove &move)
void Add(CidRange *target)
ClassPtr At(intptr_t cid) const
static bool IsSubtypeOf(const Class &cls, const TypeArguments &type_arguments, Nullability nullability, const AbstractType &other, Heap::Space space, FunctionTypeMapping *function_type_equivalence=nullptr)
bool is_finalized() const
ArrayPtr current_functions() const
TokenPosition RootPosition(const InstructionSource &source)
void EndCodeSourceRange(int32_t pc_offset, const InstructionSource &source)
void WriteFunctionEntrySourcePosition(const InstructionSource &source)
void NoteNullCheck(int32_t pc_offset, const InstructionSource &source, intptr_t name_index)
ArrayPtr InliningIdToFunction()
const GrowableArray< const Function * > & inline_id_to_function() const
void BeginCodeSourceRange(int32_t pc_offset, const InstructionSource &source)
CodeSourceMapPtr Finalize()
void NoteDescriptor(UntaggedPcDescriptors::Kind kind, int32_t pc_offset, const InstructionSource &source)
@ kSCallTableFunctionTarget
@ kSCallTableCodeOrTypeTarget
@ kSCallTableKindAndOffset
static EntryCounter SlowPathCounterFor(Instruction::Tag tag)
compiler::Label * entry_label()
intptr_t deopt_id() const
static CompilerState & Current()
CompressedStackMapsPtr Finalize() const
void AddEntry(intptr_t pc_offset, BitmapBuilder *bitmap, intptr_t spill_slot_bit_count)
static constexpr intptr_t kNone
static intptr_t ToDeoptAfter(intptr_t deopt_id)
static SmiPtr EncodeReasonAndFlags(ICData::DeoptReasonId reason, uint32_t flags)
static intptr_t SizeFor(intptr_t length)
static void SetEntry(const Array &table, intptr_t index, const Smi &offset, const TypedData &info, const Smi &reason_and_flags)
void AddDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t pc_offset, intptr_t deopt_id, TokenPosition token_pos, intptr_t try_index, intptr_t yield_index)
PcDescriptorsPtr FinalizePcDescriptors(uword entry_point)
intptr_t CountArgsPushed()
friend class ShallowIterator
void AddHandler(intptr_t try_index, intptr_t outer_try_index, intptr_t pc_offset, bool is_generated, const Array &handler_types, bool needs_stacktrace)
ExceptionHandlersPtr FinalizeExceptionHandlers(uword entry_point) const
void SetNeedsStackTrace(intptr_t try_index)
IsolateGroup * isolate_group() const
void EmitDropArguments(intptr_t count)
static bool GenerateCidRangesCheck(compiler::Assembler *assembler, Register class_id_reg, const CidRangeVector &cid_ranges, compiler::Label *inside_range_lbl, compiler::Label *outside_range_lbl=nullptr, bool fall_through_if_inside=false)
void AddStubCallTarget(const Code &code)
Instruction * current_instruction() const
void StatsBegin(Instruction *instr)
bool skip_body_compilation() const
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
bool CheckAssertAssignableTypeTestingABILocations(const LocationSummary &locs)
void FinalizeVarDescriptors(const Code &code)
void set_current_block(BlockEntryInstr *value)
BranchLabels CreateBranchLabels(BranchInstr *branch) const
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
bool is_optimizing() const
intptr_t CurrentTryIndex() const
const Class & float64x2_class() const
const Class & BoxClassFor(Representation rep)
void InsertBSSRelocation(BSS::Relocation reloc)
void AddExceptionHandler(CatchBlockEntryInstr *entry)
bool ForcedOptimization() const
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const Class & double_class() const
void FinalizeCatchEntryMovesMap(const Code &code)
bool ForceSlowPathForStackOverflow() const
void GenerateNumberTypeCheck(Register kClassIdReg, const AbstractType &type, compiler::Label *is_instance_lbl, compiler::Label *is_not_instance_lbl)
const FlowGraph & flow_graph() const
compiler::Label * GetJumpLabel(BlockEntryInstr *block_entry) const
void RecordCatchEntryMoves(Environment *env)
intptr_t StackSize() const
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
static bool LookupMethodFor(int class_id, const String &name, const ArgumentsDescriptor &args_desc, Function *fn_return, bool *class_is_abstract_return=nullptr)
bool WasCompacted(BlockEntryInstr *block_entry) const
void AddDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t pc_offset, intptr_t deopt_id, const InstructionSource &source, intptr_t try_index, intptr_t yield_index=UntaggedPcDescriptors::kInvalidYieldIndex)
void EmitPolymorphicInstanceCall(const PolymorphicInstanceCallInstr *call, const CallTargets &targets, ArgumentsInfo args_info, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, bool complete, intptr_t total_call_count, bool receiver_can_be_smi=true)
void EnterIntrinsicMode()
BlockEntryInstr * current_block() const
static constexpr intptr_t kMaxNumberOfCidRangesToTest
compiler::Label * AddDeoptStub(intptr_t deopt_id, ICData::DeoptReasonId reason, uint32_t flags=0)
void EmitMoveToNative(const compiler::ffi::NativeLocation &dst, Location src_loc, Representation src_type, TemporaryRegisterAllocator *temp)
const Class & float32x4_class() const
bool CanFallThroughTo(BlockEntryInstr *block_entry) const
void EmitComment(Instruction *instr)
void EmitTestAndCall(const CallTargets &targets, const String &function_name, ArgumentsInfo args_info, compiler::Label *failed, compiler::Label *match_found, intptr_t deopt_id, const InstructionSource &source_index, LocationSummary *locs, bool complete, intptr_t total_ic_calls, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
static const CallTargets * ResolveCallTargetsForReceiverCid(intptr_t cid, const String &selector, const Array &args_desc_array)
static int EmitTestAndCallCheckCid(compiler::Assembler *assembler, compiler::Label *label, Register class_id_reg, const CidRangeValue &range, int bias, bool jump_on_miss=true)
void SetNeedsStackTrace(intptr_t try_index)
CompilerDeoptInfo * AddSlowPathDeoptInfo(intptr_t deopt_id, Environment *env)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
const Class & mint_class() const
const ICData * GetOrAddInstanceCallICData(intptr_t deopt_id, const String &target_name, const Array &arguments_descriptor, intptr_t num_args_tested, const AbstractType &receiver_type, const Function &binary_smi_target)
void EmitMoveFromNative(Location dst_loc, Representation dst_type, const compiler::ffi::NativeLocation &src, TemporaryRegisterAllocator *temp)
bool IsEmptyBlock(BlockEntryInstr *block) const
void AddSlowPathCode(SlowPathCode *slow_path)
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void FinalizeStaticCallTargetsTable(const Code &code)
bool GenerateSubtypeRangeCheck(Register class_id_reg, const Class &type_class, compiler::Label *is_subtype_lbl)
void AddDispatchTableCallTarget(const compiler::TableSelector *selector)
void GenerateInstanceCall(intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, const ICData &ic_data, Code::EntryKind entry_kind, bool receiver_can_be_smi)
void EmitFunctionEntrySourcePositionDescriptorIfNeeded()
void FinalizeExceptionHandlers(const Code &code)
FlowGraphCompiler(compiler::Assembler *assembler, FlowGraph *flow_graph, const ParsedFunction &parsed_function, bool is_optimizing, SpeculativeInliningPolicy *speculative_policy, const GrowableArray< const Function * > &inline_id_to_function, const GrowableArray< TokenPosition > &inline_id_to_token_pos, const GrowableArray< intptr_t > &caller_inline_id, ZoneGrowableArray< const ICData * > *deopt_id_to_ic_data, CodeStatistics *stats=nullptr)
void GenerateListTypeCheck(Register kClassIdReg, compiler::Label *is_instance_lbl)
void GenerateTTSCall(const InstructionSource &source, intptr_t deopt_id, Environment *env, Register reg_with_type, const AbstractType &dst_type, const String &dst_name, LocationSummary *locs)
void FinalizeStackMaps(const Code &code)
const Class & int32x4_class() const
const GrowableArray< BlockEntryInstr * > & block_order() const
bool may_reoptimize() const
bool CanOSRFunction() const
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void set_intrinsic_slow_path_label(compiler::Label *label)
void Bailout(const char *reason)
void ArchSpecificInitialization()
void GenerateNonLazyDeoptableStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void GenerateAssertAssignable(CompileType *receiver_type, const InstructionSource &source, intptr_t deopt_id, Environment *env, const String &dst_name, LocationSummary *locs)
bool CanOptimizeFunction() const
void FinalizeCodeSourceMap(const Code &code)
ArrayPtr CreateDeoptInfo(compiler::Assembler *assembler)
void GenerateStringTypeCheck(Register kClassIdReg, compiler::Label *is_instance_lbl, compiler::Label *is_not_instance_lbl)
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
void SpecialStatsEnd(intptr_t tag)
const GrowableArray< BlockInfo * > & block_info() const
bool NeedsEdgeCounter(BlockEntryInstr *block)
void FinalizePcDescriptors(const Code &code)
void EmitYieldPositionMetadata(const InstructionSource &source, intptr_t yield_index)
void StatsEnd(Instruction *instr)
void GenerateStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, intptr_t deopt_id, Environment *env)
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
void GenerateInstanceOf(const InstructionSource &source, intptr_t deopt_id, Environment *env, const AbstractType &type, LocationSummary *locs)
compiler::Assembler * assembler() const
void EmitMoveConst(const compiler::ffi::NativeLocation &dst, Location src, Representation src_type, TemporaryRegisterAllocator *temp)
const ICData * GetOrAddStaticCallICData(intptr_t deopt_id, const Function &target, const Array &arguments_descriptor, intptr_t num_args_tested, ICData::RebindRule rebind_rule)
bool intrinsic_mode() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
compiler::Label * NextNonEmptyLabel() const
void GenerateCallerChecksForAssertAssignable(CompileType *receiver_type, const AbstractType &dst_type, compiler::Label *done)
void EndCodeSourceRange(const InstructionSource &source)
void SpecialStatsBegin(intptr_t tag)
void EmitNativeMove(const compiler::ffi::NativeLocation &dst, const compiler::ffi::NativeLocation &src, TemporaryRegisterAllocator *temp)
compiler::Label * intrinsic_slow_path_label() const
void AddNullCheck(const InstructionSource &source, const String &name)
void GenerateStaticCall(intptr_t deopt_id, const InstructionSource &source, const Function &function, ArgumentsInfo args_info, LocationSummary *locs, const ICData &ic_data_in, ICData::RebindRule rebind_rule, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
Environment * SlowPathEnvironmentFor(Instruction *inst, intptr_t num_slow_path_args)
void BeginCodeSourceRange(const InstructionSource &source)
GraphEntryInstr * graph_entry() const
bool IsCompiledForOsr() const
const GrowableArray< BlockEntryInstr * > & preorder() const
intptr_t num_stack_locals() const
BitVector * captured_parameters() const
const ParsedFunction & parsed_function() const
intptr_t variable_count() const
bool IsIrregexpFunction() const
bool MakesCopyOfParameters() const
const char * ToFullyQualifiedCString() const
bool HasBreakpoint() const
intptr_t num_fixed_parameters() const
static bool UseUnboxedRepresentation()
intptr_t spill_slot_count() const
CatchBlockEntryInstr * GetCatchEntry(intptr_t index)
OsrEntryInstr * osr_entry() const
const CidRangeVector & SubtypeRangesForClass(const Class &klass, bool include_abstract, bool exclude_null)
bool is_static_call() const
intptr_t deopt_id() const
ICDataPtr AsUnaryClassChecks() const
intptr_t NumberOfUsedChecks() const
ICDataPtr Original() const
static ICDataPtr NewWithCheck(const Function &owner, const String &target_name, const Array &arguments_descriptor, intptr_t deopt_id, intptr_t num_args_tested, RebindRule rebind_rule, GrowableArray< intptr_t > *cids, const Function &target, const AbstractType &receiver_type=Object::null_abstract_type())
intptr_t NumArgsTested() const
static ICDataPtr NewForStaticCall(const Function &owner, const Function &target, const Array &arguments_descriptor, intptr_t deopt_id, intptr_t num_args_tested, RebindRule rebind_rule)
bool is_tracking_exactness() const
AbstractTypePtr receivers_static_type() const
static bool NullIsAssignableTo(const AbstractType &other)
Instruction * next() const
virtual intptr_t InputCount() const =0
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
virtual BlockEntryInstr * GetBlock()
virtual bool CanBecomeDeoptimizationTarget() const
Environment * env() const
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
bool HasUnmatchedInputRepresentations() const
virtual intptr_t ArgumentCount() const
virtual Representation representation() const
virtual Tag tag() const =0
InstructionSource source() const
intptr_t deopt_id() const
static bool IsSystemIsolateGroup(const IsolateGroup *group)
intptr_t optimization_counter_threshold() const
static IsolateGroup * Current()
ClassTable * class_table() const
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
static bool FindPragma(Thread *T, bool only_core, const Object &object, const String &pragma_name, bool multiple=false, Object *options=nullptr)
static intptr_t LoadingUnitOf(const Function &function)
static LocalVarDescriptorsPtr New(intptr_t num_variables)
void SetVar(intptr_t var_index, const String &name, UntaggedLocalVarDescriptors::VarInfo *info) const
Location out(intptr_t index) const
const BitmapBuilder & stack_bitmap()
intptr_t input_count() const
RegisterSet * live_registers()
bool always_calls() const
bool call_on_shared_slow_path() const
Location in(intptr_t index) const
static Location FpuRegisterLocation(FpuRegister reg)
const char * ToCString() const
static Location RegisterLocation(Register reg)
bool IsPairLocation() const
const Object & constant() const
DART_NORETURN void Jump(int value, const Error &error)
static CodePtr GetStub(FlowGraphCompiler *compiler, CheckNullInstr::ExceptionType exception_type, bool save_fpu_registers)
void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers) override
const char * name() override
CheckNullInstr::ExceptionType exception_type() const
static Object & ZoneHandle()
void Bailout(const char *origin, const char *reason) const
const Function & function() const
int num_stack_locals() const
void Verify(const Function &function) const
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
intptr_t FpuRegisterCount() const
bool ContainsFpuRegister(FpuRegister fpu_reg) const
bool HasUntaggedValues() const
bool ContainsRegister(Register reg) const
bool IsTagged(Register reg) const
static FunctionPtr ResolveDynamicForReceiverClass(const Class &receiver_class, const String &function_name, const ArgumentsDescriptor &args_desc, bool allow_add)
void GenerateCode(FlowGraphCompiler *compiler)
Instruction * instruction() const
compiler::Label * entry_label()
static SmiPtr New(intptr_t value)
bool AllowsSpeculativeInlining() const
static StaticTypeExactnessState NotTracking()
static const Code & SubtypeTestCacheStubForUsedInputs(intptr_t i)
static SubtypeTestCachePtr New(intptr_t num_inputs)
static constexpr intptr_t kMaxInputs
Instruction * instruction() const
virtual void ReleaseTemporary()=0
virtual Register AllocateTemporary()=0
LongJumpScope * long_jump_base() const
HierarchyInfo * hierarchy_info() const
static Thread * Current()
TypeUsageInfo * type_usage_info() const
CompilerState & compiler_state()
IsolateGroup * isolate_group() const
virtual void EmitCodeAtSlowPathEntry(FlowGraphCompiler *compiler)
virtual intptr_t GetNumberOfArgumentsForRuntimeCall()
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
virtual void AddMetadataForRuntimeCall(FlowGraphCompiler *compiler)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
virtual const char * name()=0
static const TokenPosition kMinSource
bool IsClassTypeParameter() const
static constexpr T Maximum(T x, T y)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
intptr_t CodeSize() const
ObjectPoolBuilder & object_pool_builder()
void static bool EmittingComments()
intptr_t InsertAlignedRelocation(BSS::Relocation reloc)
void mark_should_be_aligned()
void Comment(const char *format,...) PRINTF_ATTRIBUTE(2
void Jump(Label *label, JumpDistance distance=kFarJump)
void CompareImmediate(Register rn, int32_t value, Condition cond)
void set_constant_pool_allowed(bool b)
void Align(intptr_t alignment, intptr_t offset)
void BranchIf(Condition condition, Label *label, JumpDistance distance=kFarJump)
void LoadUnboxedSimd128(FpuRegister dst, Register base, int32_t offset)
void Bind(Label *label) override
void PopRegister(Register r)
void Drop(intptr_t stack_elements)
void set_lr_state(compiler::LRState b)
void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset)
void Breakpoint() override
void AddImmediate(Register rd, int32_t value, Condition cond=AL)
static bool Intrinsify(const ParsedFunction &parsed_function, FlowGraphCompiler *compiler)
intptr_t FindObject(const Object &obj, ObjectPoolBuilderEntry::Patchability patchable=ObjectPoolBuilderEntry::kNotPatchable, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
virtual bool IsFpuRegisters() const
virtual bool IsBoth() const
const NativeType & container_type() const
virtual bool IsStack() const
virtual NativeLocation & Split(Zone *zone, intptr_t num_parts, intptr_t index) const
static NativeLocation & FromLocation(Zone *zone, Location loc, Representation rep)
virtual NativeLocation & WithOtherNativeType(Zone *zone, const NativeType &new_payload_type, const NativeType &new_container_type) const =0
static NativeLocation & FromPairLocation(Zone *zone, Location loc, Representation rep, intptr_t index)
const BothNativeLocations & AsBoth() const
const NativeType & payload_type() const
virtual Representation AsRepresentation() const
virtual NativePrimitiveType & Split(Zone *zone, intptr_t part) const
virtual intptr_t SizeInBytes() const =0
static NativePrimitiveType & FromRepresentation(Zone *zone, Representation rep)
static word super_type_offset()
static word value_offset()
static word value_offset()
static word unboxed_runtime_arg_offset()
static word type_at_offset(intptr_t i)
#define THR_Print(format,...)
FlutterSemanticsFlag flags
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
Dart_NativeFunction function
constexpr intptr_t kDoubleSpillFactor
word ToRawSmi(const dart::Object &a)
static constexpr intptr_t kWordSize
constexpr intptr_t kIntSpillFactor
const FpuRegister kNoFpuRegister
static FpuRegister AllocateFreeFpuRegister(bool *blocked_registers)
const RegList kReservedCpuRegisters
static bool IsPopper(Instruction *instr)
constexpr intptr_t kInt64Size
static const Code & StubEntryFor(const ICData &ic_data, bool optimized)
static bool IsPusher(Instruction *instr)
const int kNumberOfFpuRegisters
Location LocationRemapForSlowPath(Location loc, Definition *def, intptr_t *cpu_reg_slots, intptr_t *fpu_reg_slots)
constexpr RegList kDartAvailableCpuRegs
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
constexpr int kRegisterAllocationBias
static Register AllocateFreeRegister(bool *blocked_registers)
const intptr_t kPreferredLoopAlignment
const char *const function_name
static constexpr intptr_t kInvalidTryIndex
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
const int kFpuRegisterSize
DECLARE_FLAG(bool, show_invisible_frames)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
SI auto map(std::index_sequence< I... >, Fn &&fn, const Args &... args) -> skvx::Vec< sizeof...(I), decltype(fn(args[0]...))>
const intptr_t count_without_type_args
const intptr_t type_args_len
const intptr_t size_with_type_args
const intptr_t size_without_type_args
ArrayPtr ToArgumentsDescriptor() const
static constexpr Register kResultReg
static constexpr FpuRegister kValueReg
bool IsIllegalRange() const
intptr_t VariableIndexForFrameSlot(intptr_t frame_slot) const
static constexpr Register kFieldReg
static constexpr bool IsUnboxedInteger(Representation rep)
static constexpr Register kSubtypeTestCacheReg
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kSubtypeTestCacheResultReg
static constexpr Register kScratchReg
static constexpr Register kInstanceOfResultReg