Flutter Engine
The Flutter Engine
flow_graph_compiler.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6#include "vm/globals.h" // Needed here to get TARGET_ARCH_XXX.
7
8#include "platform/utils.h"
9#include "vm/bit_vector.h"
16#include "vm/compiler/cha.h"
19#include "vm/dart_entry.h"
20#include "vm/debugger.h"
22#include "vm/exceptions.h"
23#include "vm/flags.h"
24#include "vm/kernel_isolate.h"
25#include "vm/log.h"
26#include "vm/longjump.h"
27#include "vm/object_store.h"
28#include "vm/parser.h"
29#include "vm/pointer_tagging.h"
30#include "vm/raw_object.h"
31#include "vm/resolver.h"
32#include "vm/service_isolate.h"
33#include "vm/stack_frame.h"
34#include "vm/stub_code.h"
35#include "vm/symbols.h"
36#include "vm/timeline.h"
38
39namespace dart {
40
42 trace_inlining_intervals,
43 false,
44 "Inlining interval diagnostics");
45
46DEFINE_FLAG(bool, enable_peephole, true, "Enable peephole optimization");
47
49 enable_simd_inline,
50 true,
51 "Enable inlining of SIMD related method calls.");
53 min_optimization_counter_threshold,
54 5000,
55 "The minimum invocation count for a function.");
57 optimization_counter_scale,
58 2000,
59 "The scale of invocation count, by size of the function.");
60DEFINE_FLAG(bool, source_lines, false, "Emit source line as assembly comment.");
62 force_indirect_calls,
63 false,
64 "Do not emit PC relative calls.");
65
66DECLARE_FLAG(charp, deoptimize_filter);
67DECLARE_FLAG(bool, intrinsify);
68DECLARE_FLAG(int, regexp_optimization_counter_threshold);
69DECLARE_FLAG(int, reoptimization_counter_threshold);
70DECLARE_FLAG(int, stacktrace_every);
71DECLARE_FLAG(charp, stacktrace_filter);
72DECLARE_FLAG(int, gc_every);
73DECLARE_FLAG(bool, trace_compiler);
74
76 align_all_loops,
77 false,
78 "Align all loop headers to 32 byte boundary");
79
80#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
81compiler::LRState ComputeInnerLRState(const FlowGraph& flow_graph) {
82 auto entry = flow_graph.graph_entry();
83 const bool frameless = !entry->NeedsFrame();
84
85 bool has_native_entries = false;
86 for (intptr_t i = 0; i < entry->SuccessorCount(); i++) {
87 if (entry->SuccessorAt(i)->IsNativeEntry()) {
88 has_native_entries = true;
89 break;
90 }
91 }
92
93 auto state = compiler::LRState::OnEntry();
94 if (has_native_entries) {
95 // We will setup three (3) frames on the stack when entering through
96 // native entry. Keep in sync with NativeEntry/NativeReturn.
97 state = state.EnterFrame().EnterFrame();
98 }
99
100 if (!frameless) {
101 state = state.EnterFrame();
102 }
103
104 return state;
105}
106#endif
107
108// Assign locations to outgoing arguments. Note that MoveArgument
109// can only occur in the innermost environment because we insert
110// them immediately before the call instruction and right before
111// register allocation.
112void CompilerDeoptInfo::AllocateOutgoingArguments(Environment* env) {
113 if (env == nullptr) return;
114 for (Environment::ShallowIterator it(env); !it.Done(); it.Advance()) {
115 if (it.CurrentLocation().IsInvalid()) {
116 if (auto move_arg = it.CurrentValue()->definition()->AsMoveArgument()) {
117 it.SetCurrentLocation(move_arg->locs()->out(0));
118 }
119 }
120 }
121}
122
123void CompilerDeoptInfo::EmitMaterializations(Environment* env,
124 DeoptInfoBuilder* builder) {
125 for (Environment::DeepIterator it(env); !it.Done(); it.Advance()) {
126 if (it.CurrentLocation().IsInvalid()) {
127 MaterializeObjectInstr* mat =
128 it.CurrentValue()->definition()->AsMaterializeObject();
129 ASSERT(mat != nullptr);
130 builder->AddMaterialization(mat);
131 }
132 }
133}
134
136 compiler::Assembler* assembler,
137 FlowGraph* flow_graph,
138 const ParsedFunction& parsed_function,
139 bool is_optimizing,
140 SpeculativeInliningPolicy* speculative_policy,
141 const GrowableArray<const Function*>& inline_id_to_function,
142 const GrowableArray<TokenPosition>& inline_id_to_token_pos,
143 const GrowableArray<intptr_t>& caller_inline_id,
144 ZoneGrowableArray<const ICData*>* deopt_id_to_ic_data,
145 CodeStatistics* stats /* = nullptr */)
146 : thread_(Thread::Current()),
147 zone_(Thread::Current()->zone()),
148 assembler_(assembler),
149 parsed_function_(parsed_function),
150 flow_graph_(*flow_graph),
151 block_order_(*flow_graph->CodegenBlockOrder()),
152 current_block_(nullptr),
153 exception_handlers_list_(nullptr),
154 pc_descriptors_list_(nullptr),
155 compressed_stackmaps_builder_(nullptr),
156 code_source_map_builder_(nullptr),
157 catch_entry_moves_maps_builder_(nullptr),
158 block_info_(block_order_.length()),
159 deopt_infos_(),
160 static_calls_target_table_(),
161 indirect_gotos_(),
162 is_optimizing_(is_optimizing),
163 speculative_policy_(speculative_policy),
164 may_reoptimize_(false),
165 intrinsic_mode_(false),
166 stats_(stats),
167 double_class_(
168 Class::ZoneHandle(isolate_group()->object_store()->double_class())),
169 mint_class_(
170 Class::ZoneHandle(isolate_group()->object_store()->mint_class())),
171 float32x4_class_(Class::ZoneHandle(
172 isolate_group()->object_store()->float32x4_class())),
173 float64x2_class_(Class::ZoneHandle(
174 isolate_group()->object_store()->float64x2_class())),
175 int32x4_class_(
176 Class::ZoneHandle(isolate_group()->object_store()->int32x4_class())),
177 list_class_(Class::ZoneHandle(Library::Handle(Library::CoreLibrary())
178 .LookupClass(Symbols::List()))),
179 pending_deoptimization_env_(nullptr),
180 deopt_id_to_ic_data_(deopt_id_to_ic_data),
181 edge_counters_array_(Array::ZoneHandle()) {
184 if (is_optimizing) {
185 // No need to collect extra ICData objects created during compilation.
186 deopt_id_to_ic_data_ = nullptr;
187 } else {
188 const intptr_t len = thread()->compiler_state().deopt_id();
189 deopt_id_to_ic_data_->EnsureLength(len, nullptr);
190 }
191 ASSERT(assembler != nullptr);
192 ASSERT(!list_class_.IsNull());
193
194#if defined(PRODUCT)
195 const bool stack_traces_only = true;
196#else
197 const bool stack_traces_only = false;
198#endif
199 // Make sure that the function is at the position for inline_id 0.
200 ASSERT(inline_id_to_function.length() >= 1);
201 ASSERT(inline_id_to_function[0]->ptr() ==
203 code_source_map_builder_ = new (zone_)
204 CodeSourceMapBuilder(zone_, stack_traces_only, caller_inline_id,
205 inline_id_to_token_pos, inline_id_to_function);
206
208}
209
211 compressed_stackmaps_builder_ =
213 pc_descriptors_list_ = new (zone()) DescriptorList(
214 zone(), &code_source_map_builder_->inline_id_to_function());
215 exception_handlers_list_ =
217#if defined(DART_PRECOMPILER)
218 catch_entry_moves_maps_builder_ = new (zone()) CatchEntryMovesMapBuilder();
219#endif
220 block_info_.Clear();
221 // Initialize block info and search optimized (non-OSR) code for calls
222 // indicating a non-leaf routine and calls without IC data indicating
223 // possible reoptimization.
224
225 for (int i = 0; i < block_order_.length(); ++i) {
226 block_info_.Add(new (zone()) BlockInfo());
228 BlockEntryInstr* entry = block_order_[i];
229 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) {
230 Instruction* current = it.Current();
231 if (auto* branch = current->AsBranch()) {
232 current = branch->comparison();
233 }
234 if (auto* instance_call = current->AsInstanceCall()) {
235 const ICData* ic_data = instance_call->ic_data();
236 if ((ic_data == nullptr) || (ic_data->NumberOfUsedChecks() == 0)) {
237 may_reoptimize_ = true;
238 }
239 }
240 }
241 }
242 }
243
244 if (!is_optimizing() && FLAG_reorder_basic_blocks) {
245 // Initialize edge counter array.
246 const intptr_t num_counters = flow_graph_.preorder().length();
247 const Array& edge_counters =
248 Array::Handle(Array::New(num_counters, Heap::kOld));
249 for (intptr_t i = 0; i < num_counters; ++i) {
250 edge_counters.SetAt(i, Object::smi_zero());
251 }
252 edge_counters_array_ = edge_counters.ptr();
253 }
254}
255
258}
259
262}
263
265 return isolate_group()->use_osr() && CanOptimizeFunction() &&
266 !is_optimizing();
267}
268
270 const intptr_t offset = assembler()->InsertAlignedRelocation(reloc);
271 AddDescriptor(UntaggedPcDescriptors::kBSSRelocation, /*pc_offset=*/offset,
272 /*deopt_id=*/DeoptId::kNone, InstructionSource(),
273 /*try_index=*/-1);
274}
275
277#if !defined(PRODUCT)
278 if (FLAG_stacktrace_every > 0 || FLAG_deoptimize_every > 0 ||
279 FLAG_gc_every > 0 ||
280 (isolate_group()->reload_every_n_stack_overflow_checks() > 0)) {
282 return true;
283 }
284 }
285 if (FLAG_stacktrace_filter != nullptr &&
286 strstr(parsed_function().function().ToFullyQualifiedCString(),
287 FLAG_stacktrace_filter) != nullptr) {
288 return true;
289 }
290 if (is_optimizing() && FLAG_deoptimize_filter != nullptr &&
291 strstr(parsed_function().function().ToFullyQualifiedCString(),
292 FLAG_deoptimize_filter) != nullptr) {
293 return true;
294 }
295#endif // !defined(PRODUCT)
296 return false;
297}
298
300 // Entry-points cannot be merged because they must have assembly
301 // prologue emitted which should not be included in any block they jump to.
302 return !block->IsGraphEntry() && !block->IsFunctionEntry() &&
303 !block->IsCatchBlockEntry() && !block->IsOsrEntry() &&
304 !block->IsIndirectEntry() && !block->HasNonRedundantParallelMove() &&
305 block->next()->IsGoto() &&
306 !block->next()->AsGoto()->HasNonRedundantParallelMove();
307}
308
309void FlowGraphCompiler::CompactBlock(BlockEntryInstr* block) {
310 BlockInfo* block_info = block_info_[block->postorder_number()];
311
312 // Break out of cycles in the control flow graph.
313 if (block_info->is_marked()) {
314 return;
315 }
316 block_info->mark();
317
318 if (IsEmptyBlock(block)) {
319 // For empty blocks, record a corresponding nonempty target as their
320 // jump label.
321 BlockEntryInstr* target = block->next()->AsGoto()->successor();
322 CompactBlock(target);
323 block_info->set_jump_label(GetJumpLabel(target));
324 }
325}
326
327void FlowGraphCompiler::CompactBlocks() {
328 // This algorithm does not garbage collect blocks in place, but merely
329 // records forwarding label information. In this way it avoids having to
330 // change join and target entries.
331 compiler::Label* nonempty_label = nullptr;
332 for (intptr_t i = block_order().length() - 1; i >= 1; --i) {
333 BlockEntryInstr* block = block_order()[i];
334
335 // Unoptimized code must emit all possible deoptimization points.
336 if (is_optimizing()) {
337 CompactBlock(block);
338 }
339
340 // For nonempty blocks, record the next nonempty block in the block
341 // order. Since no code is emitted for empty blocks, control flow is
342 // eligible to fall through to the next nonempty one.
343 if (!WasCompacted(block)) {
344 BlockInfo* block_info = block_info_[block->postorder_number()];
345 block_info->set_next_nonempty_label(nonempty_label);
346 nonempty_label = GetJumpLabel(block);
347 }
348 }
349
350 ASSERT(block_order()[0]->IsGraphEntry());
351 BlockInfo* block_info = block_info_[block_order()[0]->postorder_number()];
352 block_info->set_next_nonempty_label(nonempty_label);
353}
354
355#if defined(DART_PRECOMPILER)
356static intptr_t LocationToStackIndex(const Location& src) {
357 ASSERT(src.HasStackIndex());
359 src.stack_index());
360}
361
362static CatchEntryMove CatchEntryMoveFor(compiler::Assembler* assembler,
363 Representation src_type,
364 const Location& src,
365 intptr_t dst_index) {
366 if (src.IsConstant()) {
367 // Skip dead locations.
368 if (src.constant().ptr() == Object::optimized_out().ptr()) {
369 return CatchEntryMove();
370 }
371 const intptr_t pool_index =
372 assembler->object_pool_builder().FindObject(src.constant());
374 pool_index, dst_index);
375 }
376
377 if (src.IsPairLocation()) {
378 const auto lo_loc = src.AsPairLocation()->At(0);
379 const auto hi_loc = src.AsPairLocation()->At(1);
380 ASSERT(lo_loc.IsStackSlot() && hi_loc.IsStackSlot());
383 CatchEntryMove::EncodePairSource(LocationToStackIndex(lo_loc),
384 LocationToStackIndex(hi_loc)),
385 dst_index);
386 }
387
389 switch (src_type) {
390 case kTagged:
392 break;
393 case kUnboxedInt64:
395 break;
396 case kUnboxedInt32:
398 break;
399 case kUnboxedUint32:
401 break;
402 case kUnboxedFloat:
404 break;
405 case kUnboxedDouble:
407 break;
408 case kUnboxedFloat32x4:
410 break;
411 case kUnboxedFloat64x2:
413 break;
414 case kUnboxedInt32x4:
416 break;
417 default:
418 UNREACHABLE();
419 break;
420 }
421
422 return CatchEntryMove::FromSlot(src_kind, LocationToStackIndex(src),
423 dst_index);
424}
425#endif
426
428#if defined(DART_PRECOMPILER)
429 const intptr_t try_index = CurrentTryIndex();
430 if (is_optimizing() && env != nullptr && (try_index != kInvalidTryIndex)) {
431 env = env->Outermost();
432 CatchBlockEntryInstr* catch_block =
433 flow_graph().graph_entry()->GetCatchEntry(try_index);
434 const GrowableArray<Definition*>* idefs =
435 catch_block->initial_definitions();
436 catch_entry_moves_maps_builder_->NewMapping(assembler()->CodeSize());
437
438 for (intptr_t i = 0; i < flow_graph().variable_count(); ++i) {
439 // Don't sync captured parameters. They are not in the environment.
440 if (flow_graph().captured_parameters()->Contains(i)) continue;
441 auto param = (*idefs)[i]->AsParameter();
442
443 // Don't sync values that have been replaced with constants.
444 if (param == nullptr) continue;
445 RELEASE_ASSERT(param->env_index() == i);
446 Location dst = param->location();
447
448 // Don't sync exception or stack trace variables.
449 if (dst.IsRegister()) continue;
450
451 Location src = env->LocationAt(i);
452 // Can only occur if AllocationSinking is enabled - and it is disabled
453 // in functions with try.
454 ASSERT(!src.IsInvalid());
455 const Representation src_type =
456 env->ValueAt(i)->definition()->representation();
457 const auto move = CatchEntryMoveFor(assembler(), src_type, src,
458 LocationToStackIndex(dst));
459 if (!move.IsRedundant()) {
460 catch_entry_moves_maps_builder_->Append(move);
461 }
462 }
463
464 catch_entry_moves_maps_builder_->EndMapping();
465 }
466#endif // defined(DART_PRECOMPILER)
467}
468
470 intptr_t deopt_id,
472 LocationSummary* locs,
473 Environment* env) {
474 AddCurrentDescriptor(kind, deopt_id, source);
475 RecordSafepoint(locs);
477 if ((deopt_id != DeoptId::kNone) && !FLAG_precompiled_mode) {
478 // Marks either the continuation point in unoptimized code or the
479 // deoptimization point in optimized code, after call.
480 if (env != nullptr) {
481 // Note that we may lazy-deopt to the same IR instruction in unoptimized
482 // code or to another IR instruction (e.g. if LICM hoisted an instruction
483 // it will lazy-deopt to a Goto).
484 // If we happen to deopt to the beginning of an instruction in unoptimized
485 // code, we'll use the before deopt-id, otherwise the after deopt-id.
486 const intptr_t dest_deopt_id = env->LazyDeoptToBeforeDeoptId()
487 ? deopt_id
488 : DeoptId::ToDeoptAfter(deopt_id);
489 AddDeoptIndexAtCall(dest_deopt_id, env);
490 } else {
491 const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
492 // Add deoptimization continuation point after the call and before the
493 // arguments are removed.
494 AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after,
495 source);
496 }
497 }
498}
499
502 intptr_t yield_index) {
503 AddDescriptor(UntaggedPcDescriptors::kOther, assembler()->CodeSize(),
504 DeoptId::kNone, source, CurrentTryIndex(), yield_index);
505}
506
507void FlowGraphCompiler::EmitInstructionPrologue(Instruction* instr) {
508 if (!is_optimizing()) {
509 if (instr->CanBecomeDeoptimizationTarget() && !instr->IsGoto()) {
510 // Instructions that can be deoptimization targets need to record kDeopt
511 // PcDescriptor corresponding to their deopt id. GotoInstr records its
512 // own so that it can control the placement.
513 AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, instr->deopt_id(),
514 instr->source());
515 }
516 AllocateRegistersLocally(instr);
517 }
518}
519
520#define __ assembler()->
521
522void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
523 if (is_optimizing()) {
524 return;
525 }
526 Definition* defn = instr->AsDefinition();
527 if (defn != nullptr && defn->HasTemp()) {
528 Location value = defn->locs()->out(0);
529 if (value.IsRegister()) {
530 __ PushRegister(value.reg());
531 } else if (value.IsFpuRegister()) {
532 const Code* stub;
533 switch (instr->representation()) {
534 case kUnboxedDouble:
535 stub = &StubCode::BoxDouble();
536 break;
537 case kUnboxedFloat32x4:
538 stub = &StubCode::BoxFloat32x4();
539 break;
540 case kUnboxedFloat64x2:
541 stub = &StubCode::BoxFloat64x2();
542 break;
543 default:
544 UNREACHABLE();
545 break;
546 }
547
548 // In unoptimized code at instruction epilogue the only
549 // live register is an output register.
550 instr->locs()->live_registers()->Clear();
551 if (instr->representation() == kUnboxedDouble) {
552 __ MoveUnboxedDouble(BoxDoubleStubABI::kValueReg, value.fpu_reg());
553 } else {
554 __ MoveUnboxedSimd128(BoxDoubleStubABI::kValueReg, value.fpu_reg());
555 }
557 InstructionSource(), // No token position.
558 *stub, UntaggedPcDescriptors::kOther, instr->locs());
559 __ PushRegister(BoxDoubleStubABI::kResultReg);
560 } else if (value.IsConstant()) {
561 __ PushObject(value.constant());
562 } else {
563 ASSERT(value.IsStackSlot());
564 __ PushValueAtOffset(value.base_reg(), value.ToStackSlotOffset());
565 }
566 }
567}
568
569#undef __
570
571void FlowGraphCompiler::EmitSourceLine(Instruction* instr) {
572 if (!instr->token_pos().IsReal()) {
573 return;
574 }
575 const InstructionSource& source = instr->source();
576 const intptr_t inlining_id = source.inlining_id < 0 ? 0 : source.inlining_id;
577 const Function& function =
578 *code_source_map_builder_->inline_id_to_function()[inlining_id];
579 ASSERT(instr->env() == nullptr ||
580 instr->env()->function().ptr() == function.ptr());
581 const auto& script = Script::Handle(zone(), function.script());
582 intptr_t line_nr;
583 if (script.GetTokenLocation(source.token_pos, &line_nr)) {
584 const String& line = String::Handle(zone(), script.GetLine(line_nr));
585 assembler()->Comment("Line %" Pd " in '%s':\n %s", line_nr,
586 function.ToFullyQualifiedCString(), line.ToCString());
587 }
588}
589
590static bool IsPusher(Instruction* instr) {
591 if (auto def = instr->AsDefinition()) {
592 return def->HasTemp() && (instr->representation() == kTagged);
593 }
594 return false;
595}
596
597static bool IsPopper(Instruction* instr) {
598 // TODO(ajcbik): even allow deopt targets by making environment aware?
599 if (!instr->CanBecomeDeoptimizationTarget()) {
600 return instr->ArgumentCount() == 0 && instr->InputCount() > 0;
601 }
602 return false;
603}
604
605bool FlowGraphCompiler::IsPeephole(Instruction* instr) const {
606 if (FLAG_enable_peephole && !is_optimizing()) {
607 return IsPusher(instr) && IsPopper(instr->next());
608 }
609 return false;
610}
611
613 // When unwinding async stacks we might produce frames which correspond
614 // to future listeners which are going to be called when the future completes.
615 // These listeners are not yet called and thus their frame pc_offset is set
616 // to 0 - which does not actually correspond to any call- or yield- site
617 // inside the code object. Nevertheless we would like to be able to
618 // produce proper position information for it when symbolizing the stack.
619 // To achieve that in AOT mode (where we don't actually have
620 // |Function::token_pos| available) we instead emit an artificial descriptor
621 // at the very beginning of the function.
622 if (FLAG_precompiled_mode && flow_graph().function().IsClosureFunction()) {
623 code_source_map_builder_->WriteFunctionEntrySourcePosition(
624 InstructionSource(flow_graph().function().token_pos()));
625 }
626}
627
629 InitCompiler();
630
631#if !defined(TARGET_ARCH_IA32)
632 // For JIT we have multiple entrypoints functionality which moved the frame
633 // setup into the [TargetEntryInstr] (which will set the constant pool
634 // allowed bit to true). Despite this we still have to set the
635 // constant pool allowed bit to true here as well, because we can generate
636 // code for [CatchEntryInstr]s, which need the pool.
638#endif
639
641 VisitBlocks();
642
643#if defined(DEBUG)
645#endif
646
647 if (!skip_body_compilation()) {
648#if !defined(TARGET_ARCH_IA32)
649 ASSERT(assembler()->constant_pool_allowed());
650#endif
651 GenerateDeferredCode();
652 }
653
654 for (intptr_t i = 0; i < indirect_gotos_.length(); ++i) {
655 indirect_gotos_[i]->ComputeOffsetTable(this);
656 }
657}
658
659#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
660// Returns true if function is marked with vm:align-loops pragma.
661static bool IsMarkedWithAlignLoops(const Function& function) {
664 /*only_core=*/false, function,
665 Symbols::vm_align_loops(),
666 /*multiple=*/false, &options);
667}
668#endif // defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
669
671 CompactBlocks();
673 // The loop_info fields were cleared, recompute.
674 flow_graph().ComputeLoops();
675 }
676
677 // In precompiled mode, we require the function entry to come first (after the
678 // graph entry), since the polymorphic check is performed in the function
679 // entry (see Instructions::EntryPoint).
680 if (FLAG_precompiled_mode) {
681 ASSERT(block_order()[1] == flow_graph().graph_entry()->normal_entry());
682 }
683
684#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
685 const auto inner_lr_state = ComputeInnerLRState(flow_graph());
686#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
687
688#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
689 const bool should_align_loops =
690 FLAG_align_all_loops || IsMarkedWithAlignLoops(function());
691#endif // defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
692
693 for (intptr_t i = 0; i < block_order().length(); ++i) {
694 // Compile the block entry.
695 BlockEntryInstr* entry = block_order()[i];
696 assembler()->Comment("B%" Pd "", entry->block_id());
697 set_current_block(entry);
698
699 if (WasCompacted(entry)) {
700 continue;
701 }
702
703#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
704 // At the start of every non-entry block we expect return address either
705 // to be spilled into the frame or to be in the LR register.
706 if (entry->IsFunctionEntry() || entry->IsNativeEntry()) {
707 assembler()->set_lr_state(compiler::LRState::OnEntry());
708 } else {
709 assembler()->set_lr_state(inner_lr_state);
710 }
711#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
712
713#if defined(DEBUG)
714 if (!is_optimizing()) {
715 FrameStateClear();
716 }
717#endif
718
720 for (LoopInfo* l = entry->loop_info(); l != nullptr; l = l->outer()) {
721 assembler()->Comment(" Loop %" Pd "", l->id());
722 }
723 if (entry->IsLoopHeader()) {
724 assembler()->Comment(" Loop Header");
725 }
726 }
727
728#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
729 if (should_align_loops && entry->IsLoopHeader() &&
733 }
734#else
735 static_assert(kPreferredLoopAlignment == 1);
736#endif // defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
737
739 ASSERT(pending_deoptimization_env_ == nullptr);
740 pending_deoptimization_env_ = entry->env();
741 set_current_instruction(entry);
742 StatsBegin(entry);
743 entry->EmitNativeCode(this);
744 StatsEnd(entry);
745 set_current_instruction(nullptr);
746 pending_deoptimization_env_ = nullptr;
747 EndCodeSourceRange(entry->source());
748
749 if (skip_body_compilation()) {
750 ASSERT(entry == flow_graph().graph_entry()->normal_entry());
751 break;
752 }
753
754 // Compile all successors until an exit, branch, or a block entry.
755 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) {
756 Instruction* instr = it.Current();
757 set_current_instruction(instr);
758 StatsBegin(instr);
759 // Unoptimized code always stores boxed values on the expression stack.
760 // However, unboxed representation is allowed for instruction inputs and
761 // outputs of certain types (e.g. for doubles).
762 // Unboxed inputs/outputs are handled in the instruction prologue
763 // and epilogue, but flagged as a mismatch on the IL level.
766
767 if (FLAG_code_comments || FLAG_disassemble ||
768 FLAG_disassemble_optimized) {
769 if (FLAG_source_lines) {
770 EmitSourceLine(instr);
771 }
772 EmitComment(instr);
773 }
774
776 EmitInstructionPrologue(instr);
777 ASSERT(pending_deoptimization_env_ == nullptr);
778 pending_deoptimization_env_ = instr->env();
779 DEBUG_ONLY(current_instruction_ = instr);
780 instr->EmitNativeCode(this);
781 DEBUG_ONLY(current_instruction_ = nullptr);
782 pending_deoptimization_env_ = nullptr;
783 if (IsPeephole(instr)) {
784 ASSERT(top_of_stack_ == nullptr);
785 top_of_stack_ = instr->AsDefinition();
786 } else {
787 EmitInstructionEpilogue(instr);
788 }
789 EndCodeSourceRange(instr->source());
790
791#if defined(DEBUG)
792 if (!is_optimizing()) {
793 FrameStateUpdateWith(instr);
794 }
795#endif
796 StatsEnd(instr);
797 set_current_instruction(nullptr);
798
799 if (auto indirect_goto = instr->AsIndirectGoto()) {
800 indirect_gotos_.Add(indirect_goto);
801 }
802 }
803
804#if defined(DEBUG)
805 ASSERT(is_optimizing() || FrameStateIsSafeToCall());
806#endif
807 }
808
809 set_current_block(nullptr);
810}
811
812void FlowGraphCompiler::Bailout(const char* reason) {
813 parsed_function_.Bailout("FlowGraphCompiler", reason);
814}
815
817 if (is_optimizing_) {
818 return flow_graph_.graph_entry()->spill_slot_count();
819 } else {
820 return parsed_function_.num_stack_locals();
821 }
822}
823
825 ASSERT(flow_graph().IsCompiledForOsr());
826 const intptr_t stack_depth =
828 const intptr_t num_stack_locals = flow_graph().num_stack_locals();
829 return StackSize() - stack_depth - num_stack_locals;
830}
831
833 BlockEntryInstr* block_entry) const {
834 const intptr_t block_index = block_entry->postorder_number();
835 return block_info_[block_index]->jump_label();
836}
837
839 const intptr_t block_index = block_entry->postorder_number();
840 return block_info_[block_index]->WasCompacted();
841}
842
844 const intptr_t current_index = current_block()->postorder_number();
845 return block_info_[current_index]->next_nonempty_label();
846}
847
849 return NextNonEmptyLabel() == GetJumpLabel(block_entry);
850}
851
853 compiler::Label* true_label = GetJumpLabel(branch->true_successor());
854 compiler::Label* false_label = GetJumpLabel(branch->false_successor());
855 compiler::Label* fall_through = NextNonEmptyLabel();
856 BranchLabels result = {true_label, false_label, fall_through};
857 return result;
858}
859
861 slow_path_code_.Add(code);
862}
863
864void FlowGraphCompiler::GenerateDeferredCode() {
865#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
866 const auto lr_state = ComputeInnerLRState(flow_graph());
867#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
868
869 for (intptr_t i = 0; i < slow_path_code_.length(); i++) {
870 SlowPathCode* const slow_path = slow_path_code_[i];
873 slow_path->instruction()->tag());
874#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
875 assembler()->set_lr_state(lr_state);
876#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
877 set_current_instruction(slow_path->instruction());
878 set_current_block(current_instruction_->GetBlock());
879 SpecialStatsBegin(stats_tag);
880 BeginCodeSourceRange(slow_path->instruction()->source());
881 DEBUG_ONLY(current_instruction_ = slow_path->instruction());
882 slow_path->GenerateCode(this);
883 DEBUG_ONLY(current_instruction_ = nullptr);
884 EndCodeSourceRange(slow_path->instruction()->source());
885 SpecialStatsEnd(stats_tag);
886 set_current_instruction(nullptr);
887 set_current_block(nullptr);
888 }
889 // All code generated by deferred deopt info is treated as in the root
890 // function.
891 const InstructionSource deopt_source(TokenPosition::kDeferredDeoptInfo,
892 /*inlining_id=*/0);
893 for (intptr_t i = 0; i < deopt_infos_.length(); i++) {
894 BeginCodeSourceRange(deopt_source);
895#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
896 assembler()->set_lr_state(lr_state);
897#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
898 deopt_infos_[i]->GenerateCode(this, i);
899 EndCodeSourceRange(deopt_source);
900 }
901}
902
904 exception_handlers_list_->AddHandler(
905 entry->catch_try_index(), entry->try_index(), assembler()->CodeSize(),
906 entry->is_generated(), entry->catch_handler_types(),
907 entry->needs_stacktrace());
908 if (is_optimizing()) {
909 RecordSafepoint(entry->locs());
910 }
911}
912
914 exception_handlers_list_->SetNeedsStackTrace(try_index);
915}
916
918 intptr_t pc_offset,
919 intptr_t deopt_id,
921 intptr_t try_index,
922 intptr_t yield_index) {
923 code_source_map_builder_->NoteDescriptor(kind, pc_offset, source);
924 // Don't emit deopt-descriptors in AOT mode.
925 if (FLAG_precompiled_mode && (kind == UntaggedPcDescriptors::kDeopt)) return;
926 // Use the token position of the original call in the root function if source
927 // has an inlining id.
928 const auto& root_pos = code_source_map_builder_->RootPosition(source);
929 pc_descriptors_list_->AddDescriptor(kind, pc_offset, deopt_id, root_pos,
930 try_index, yield_index);
931}
932
933// Uses current pc position and try-index.
935 intptr_t deopt_id,
936 const InstructionSource& source) {
937 AddDescriptor(kind, assembler()->CodeSize(), deopt_id, source,
939}
940
942 const String& name) {
943#if defined(DART_PRECOMPILER)
944 // If we are generating an AOT snapshot and have DWARF stack traces enabled,
945 // the AOT runtime is unable to obtain the pool index at runtime. Therefore,
946 // there is no reason to put the name into the pool in the first place.
947 // TODO(dartbug.com/40605): Move this info to the pc descriptors.
948 if (FLAG_precompiled_mode && FLAG_dwarf_stack_traces_mode) return;
949#endif
950 const intptr_t name_index =
952 code_source_map_builder_->NoteNullCheck(assembler()->CodeSize(), source,
953 name_index);
954}
955
956void FlowGraphCompiler::AddPcRelativeCallTarget(const Function& function,
957 Code::EntryKind entry_kind) {
958 DEBUG_ASSERT(function.IsNotTemporaryScopedHandle());
959 const auto entry_point = entry_kind == Code::EntryKind::kUnchecked
962 static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
963 Code::kPcRelativeCall, entry_point, assembler()->CodeSize(), &function,
964 nullptr, nullptr));
965}
966
967void FlowGraphCompiler::AddPcRelativeCallStubTarget(const Code& stub_code) {
968 DEBUG_ASSERT(stub_code.IsNotTemporaryScopedHandle());
969 ASSERT(!stub_code.IsNull());
970 static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
972 nullptr, &stub_code, nullptr));
973}
974
975void FlowGraphCompiler::AddPcRelativeTailCallStubTarget(const Code& stub_code) {
976 DEBUG_ASSERT(stub_code.IsNotTemporaryScopedHandle());
977 ASSERT(!stub_code.IsNull());
978 static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
980 nullptr, &stub_code, nullptr));
981}
982
983void FlowGraphCompiler::AddPcRelativeTTSCallTypeTarget(
984 const AbstractType& dst_type) {
985 DEBUG_ASSERT(dst_type.IsNotTemporaryScopedHandle());
986 ASSERT(!dst_type.IsNull());
987 static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
989 nullptr, nullptr, &dst_type));
990}
991
992void FlowGraphCompiler::AddStaticCallTarget(const Function& func,
993 Code::EntryKind entry_kind) {
994 DEBUG_ASSERT(func.IsNotTemporaryScopedHandle());
995 const auto entry_point = entry_kind == Code::EntryKind::kUnchecked
998 static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
999 Code::kCallViaCode, entry_point, assembler()->CodeSize(), &func, nullptr,
1000 nullptr));
1001}
1002
1004 DEBUG_ASSERT(code.IsNotTemporaryScopedHandle());
1005 static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
1006 Code::kCallViaCode, Code::kDefaultEntry, assembler()->CodeSize(), nullptr,
1007 &code, nullptr));
1008}
1009
1011 const compiler::TableSelector* selector) {
1012 dispatch_table_call_targets_.Add(selector);
1013}
1014
1016 Environment* env) {
1019 ASSERT(!FLAG_precompiled_mode);
1020 if (env != nullptr) {
1021 env = env->GetLazyDeoptEnv(zone());
1022 }
1024 new (zone()) CompilerDeoptInfo(deopt_id, ICData::kDeoptAtCall,
1025 0, // No flags.
1026 env);
1027 info->set_pc_offset(assembler()->CodeSize());
1028 deopt_infos_.Add(info);
1029 return info;
1030}
1031
1033 Environment* env) {
1034 ASSERT(deopt_id != DeoptId::kNone);
1035 deopt_id = DeoptId::ToDeoptAfter(deopt_id);
1037 new (zone()) CompilerDeoptInfo(deopt_id, ICData::kDeoptUnknown, 0, env);
1038 info->set_pc_offset(assembler()->CodeSize());
1039 deopt_infos_.Add(info);
1040 return info;
1041}
1042
1043// This function must be in sync with FlowGraphCompiler::SaveLiveRegisters
1044// and FlowGraphCompiler::SlowPathEnvironmentFor.
1045// See StackFrame::VisitObjectPointers for the details of how stack map is
1046// interpreted.
1048 intptr_t slow_path_argument_count) {
1049 if (is_optimizing() || locs->live_registers()->HasUntaggedValues()) {
1050 const intptr_t spill_area_size =
1051 is_optimizing() ? flow_graph_.graph_entry()->spill_slot_count() : 0;
1052
1053 RegisterSet* registers = locs->live_registers();
1054 ASSERT(registers != nullptr);
1055 const intptr_t kFpuRegisterSpillFactor =
1057 const bool using_shared_stub = locs->call_on_shared_slow_path();
1058
1060
1061 // Expand the bitmap to cover the whole area reserved for spill slots.
1062 // (register allocator takes care of marking slots containing live tagged
1063 // values but it does not do the same for other slots so length might be
1064 // below spill_area_size at this point).
1065 RELEASE_ASSERT(bitmap.Length() <= spill_area_size);
1066 bitmap.SetLength(spill_area_size);
1067
1068 auto instr = current_instruction();
1069 const intptr_t args_count = instr->ArgumentCount();
1070 RELEASE_ASSERT(args_count == 0 || is_optimizing());
1071
1072 for (intptr_t i = 0; i < args_count; i++) {
1073 const auto move_arg =
1074 instr->ArgumentValueAt(i)->instruction()->AsMoveArgument();
1075 const auto rep = move_arg->representation();
1076 if (move_arg->is_register_move()) {
1077 continue;
1078 }
1079
1080 ASSERT(rep == kTagged || rep == kUnboxedInt64 || rep == kUnboxedDouble);
1081 static_assert(compiler::target::kIntSpillFactor ==
1083 "int and double are of the same size");
1084 const bool is_tagged = move_arg->representation() == kTagged;
1085 const intptr_t num_bits =
1086 is_tagged ? 1 : compiler::target::kIntSpillFactor;
1087
1088 // Note: bits are reversed so higher bit corresponds to lower word.
1089 const intptr_t last_arg_bit =
1090 (spill_area_size - 1) - move_arg->sp_relative_index();
1091 bitmap.SetRange(last_arg_bit - (num_bits - 1), last_arg_bit, is_tagged);
1092 }
1093 ASSERT(slow_path_argument_count == 0 || !using_shared_stub);
1094 RELEASE_ASSERT(bitmap.Length() == spill_area_size);
1095
1096 // Trim the fully tagged suffix. Stack walking assumes that everything
1097 // not included into the stack map is tagged.
1098 intptr_t spill_area_bits = bitmap.Length();
1099 while (spill_area_bits > 0) {
1100 if (!bitmap.Get(spill_area_bits - 1)) {
1101 break;
1102 }
1103 spill_area_bits--;
1104 }
1105 bitmap.SetLength(spill_area_bits);
1106
1107 // Mark the bits in the stack map in the same order we push registers in
1108 // slow path code (see FlowGraphCompiler::SaveLiveRegisters).
1109 //
1110 // Slow path code can have registers at the safepoint.
1111 if (!locs->always_calls() && !using_shared_stub) {
1112 RegisterSet* regs = locs->live_registers();
1113 if (regs->FpuRegisterCount() > 0) {
1114 // Denote FPU registers with 0 bits in the stackmap. Based on the
1115 // assumption that there are normally few live FPU registers, this
1116 // encoding is simpler and roughly as compact as storing a separate
1117 // count of FPU registers.
1118 //
1119 // FPU registers have the highest register number at the highest
1120 // address (i.e., first in the stackmap).
1121 for (intptr_t i = kNumberOfFpuRegisters - 1; i >= 0; --i) {
1122 FpuRegister reg = static_cast<FpuRegister>(i);
1123 if (regs->ContainsFpuRegister(reg)) {
1124 for (intptr_t j = 0; j < kFpuRegisterSpillFactor; ++j) {
1125 bitmap.Set(bitmap.Length(), false);
1126 }
1127 }
1128 }
1129 }
1130
1131 // General purpose registers have the highest register number at the
1132 // highest address (i.e., first in the stackmap).
1133 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
1134 Register reg = static_cast<Register>(i);
1135 if (locs->live_registers()->ContainsRegister(reg)) {
1136 bitmap.Set(bitmap.Length(), locs->live_registers()->IsTagged(reg));
1137 }
1138 }
1139 }
1140
1141 if (using_shared_stub) {
1142 // To simplify the code in the shared stub, we create an untagged hole
1143 // in the stack frame where the shared stub can leave the return address
1144 // before saving registers.
1145 bitmap.Set(bitmap.Length(), false);
1146 if (registers->FpuRegisterCount() > 0) {
1147 bitmap.SetRange(bitmap.Length(),
1148 bitmap.Length() +
1149 kNumberOfFpuRegisters * kFpuRegisterSpillFactor - 1,
1150 false);
1151 }
1152 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
1153 if ((kReservedCpuRegisters & (1 << i)) != 0) continue;
1154 const Register reg = static_cast<Register>(i);
1155 bitmap.Set(bitmap.Length(),
1156 locs->live_registers()->ContainsRegister(reg) &&
1157 locs->live_registers()->IsTagged(reg));
1158 }
1159 }
1160
1161 // Arguments pushed after live registers in the slow path are tagged.
1162 for (intptr_t i = 0; i < slow_path_argument_count; ++i) {
1163 bitmap.Set(bitmap.Length(), true);
1164 }
1165
1166 compressed_stackmaps_builder_->AddEntry(assembler()->CodeSize(), &bitmap,
1167 spill_area_bits);
1168 }
1169}
1170
1171// This function must be kept in sync with:
1172//
1173// FlowGraphCompiler::RecordSafepoint
1174// FlowGraphCompiler::SaveLiveRegisters
1175// MaterializeObjectInstr::RemapRegisters
1176//
1179 LocationSummary* locs,
1180 intptr_t num_slow_path_args) {
1181 const bool using_shared_stub = locs->call_on_shared_slow_path();
1182 const bool shared_stub_save_fpu_registers =
1183 using_shared_stub && locs->live_registers()->FpuRegisterCount() > 0;
1184 // TODO(sjindel): Modify logic below to account for slow-path args with shared
1185 // stubs.
1186 ASSERT(!using_shared_stub || num_slow_path_args == 0);
1187 if (env == nullptr) {
1188 // In AOT, environments can be removed by EliminateEnvironments pass
1189 // (if not in a try block).
1190 ASSERT(!is_optimizing() || FLAG_precompiled_mode);
1191 return nullptr;
1192 }
1193
1194 Environment* slow_path_env =
1195 env->DeepCopy(zone(), env->Length() - env->LazyDeoptPruneCount());
1196 // 1. Iterate the registers in the order they will be spilled to compute
1197 // the slots they will be spilled to.
1198 intptr_t next_slot = StackSize() + slow_path_env->CountArgsPushed();
1199 if (using_shared_stub) {
1200 // The PC from the call to the shared stub is pushed here.
1201 next_slot++;
1202 }
1203 RegisterSet* regs = locs->live_registers();
1204 intptr_t fpu_reg_slots[kNumberOfFpuRegisters];
1205 intptr_t cpu_reg_slots[kNumberOfCpuRegisters];
1206 const intptr_t kFpuRegisterSpillFactor =
1208 // FPU registers are spilled first from highest to lowest register number.
1209 for (intptr_t i = kNumberOfFpuRegisters - 1; i >= 0; --i) {
1210 FpuRegister reg = static_cast<FpuRegister>(i);
1211 if (regs->ContainsFpuRegister(reg)) {
1212 // We use the lowest address (thus highest index) to identify a
1213 // multi-word spill slot.
1214 next_slot += kFpuRegisterSpillFactor;
1215 fpu_reg_slots[i] = (next_slot - 1);
1216 } else {
1217 if (using_shared_stub && shared_stub_save_fpu_registers) {
1218 next_slot += kFpuRegisterSpillFactor;
1219 }
1220 fpu_reg_slots[i] = -1;
1221 }
1222 }
1223 // General purpose registers are spilled from highest to lowest register
1224 // number.
1225 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
1226 if ((kReservedCpuRegisters & (1 << i)) != 0) continue;
1227 Register reg = static_cast<Register>(i);
1228 if (regs->ContainsRegister(reg)) {
1229 cpu_reg_slots[i] = next_slot++;
1230 } else {
1231 if (using_shared_stub) next_slot++;
1232 cpu_reg_slots[i] = -1;
1233 }
1234 }
1235
1236 // 2. Iterate the environment and replace register locations with the
1237 // corresponding spill slot locations.
1238 for (Environment::DeepIterator it(slow_path_env); !it.Done(); it.Advance()) {
1239 Location loc = it.CurrentLocation();
1240 Value* value = it.CurrentValue();
1241 it.SetCurrentLocation(LocationRemapForSlowPath(
1242 loc, value->definition(), cpu_reg_slots, fpu_reg_slots));
1243 }
1244
1245 return slow_path_env;
1246}
1247
1249 ICData::DeoptReasonId reason,
1250 uint32_t flags) {
1251 if (intrinsic_mode()) {
1252 return intrinsic_slow_path_label_;
1253 }
1254
1255 // No deoptimization allowed when 'FLAG_precompiled_mode' is set.
1256 if (FLAG_precompiled_mode) {
1257 if (FLAG_trace_compiler) {
1258 THR_Print(
1259 "Retrying compilation %s, suppressing inlining of deopt_id:%" Pd "\n",
1260 parsed_function_.function().ToFullyQualifiedCString(), deopt_id);
1261 }
1262 ASSERT(speculative_policy_->AllowsSpeculativeInlining());
1263 ASSERT(deopt_id != 0); // longjmp must return non-zero value.
1265 deopt_id, Object::speculative_inlining_error());
1266 }
1267
1268 ASSERT(is_optimizing_);
1269 ASSERT(pending_deoptimization_env_ != nullptr);
1270 if (pending_deoptimization_env_->IsHoisted()) {
1272 }
1274 deopt_id, reason, flags, pending_deoptimization_env_);
1275 deopt_infos_.Add(stub);
1276 return stub->entry_label();
1277}
1278
1280 ASSERT(exception_handlers_list_ != nullptr);
1282 exception_handlers_list_->FinalizeExceptionHandlers(code.PayloadStart()));
1283 code.set_exception_handlers(handlers);
1284}
1285
1287 ASSERT(pc_descriptors_list_ != nullptr);
1288 const PcDescriptors& descriptors = PcDescriptors::Handle(
1289 pc_descriptors_list_->FinalizePcDescriptors(code.PayloadStart()));
1290 if (!is_optimizing_) descriptors.Verify(parsed_function_.function());
1291 code.set_pc_descriptors(descriptors);
1292}
1293
1295 // No deopt information if we precompile (no deoptimization allowed).
1296 if (FLAG_precompiled_mode) {
1297 return Array::empty_array().ptr();
1298 }
1299 // For functions with optional arguments, all incoming arguments are copied
1300 // to spill slots. The deoptimization environment does not track them.
1302 const intptr_t incoming_arg_count =
1304 DeoptInfoBuilder builder(zone(), incoming_arg_count, assembler);
1305
1306 intptr_t deopt_info_table_size = DeoptTable::SizeFor(deopt_infos_.length());
1307 if (deopt_info_table_size == 0) {
1308 return Object::empty_array().ptr();
1309 } else {
1310 const Array& array =
1311 Array::Handle(Array::New(deopt_info_table_size, Heap::kOld));
1312 Smi& offset = Smi::Handle();
1314 Smi& reason_and_flags = Smi::Handle();
1315 for (intptr_t i = 0; i < deopt_infos_.length(); i++) {
1316 offset = Smi::New(deopt_infos_[i]->pc_offset());
1317 info = deopt_infos_[i]->CreateDeoptInfo(this, &builder, array);
1318 reason_and_flags = DeoptTable::EncodeReasonAndFlags(
1319 deopt_infos_[i]->reason(), deopt_infos_[i]->flags());
1320 DeoptTable::SetEntry(array, i, offset, info, reason_and_flags);
1321 }
1322 return array.ptr();
1323 }
1324}
1325
1327 ASSERT(compressed_stackmaps_builder_ != nullptr);
1328 // Finalize the compressed stack maps and add it to the code object.
1329 const auto& maps =
1330 CompressedStackMaps::Handle(compressed_stackmaps_builder_->Finalize());
1331 code.set_compressed_stackmaps(maps);
1332}
1333
1335#if defined(PRODUCT)
1336// No debugger: no var descriptors.
1337#else
1338 if (code.is_optimized()) {
1339 // Optimized code does not need variable descriptors. They are
1340 // only stored in the unoptimized version.
1341 code.set_var_descriptors(Object::empty_var_descriptors());
1342 return;
1343 }
1345 if (flow_graph().IsIrregexpFunction()) {
1346 // Eager local var descriptors computation for Irregexp function as it is
1347 // complicated to factor out.
1348 // TODO(srdjan): Consider canonicalizing and reusing the local var
1349 // descriptor for IrregexpFunction.
1350 ASSERT(parsed_function().scope() == nullptr);
1351 var_descs = LocalVarDescriptors::New(1);
1354 info.scope_id = 0;
1355 info.begin_pos = TokenPosition::kMinSource;
1357 info.set_index(compiler::target::frame_layout.FrameSlotForVariable(
1358 parsed_function().current_context_var()));
1359 var_descs.SetVar(0, Symbols::CurrentContextVar(), &info);
1360 }
1361 code.set_var_descriptors(var_descs);
1362#endif
1363}
1364
1366#if defined(DART_PRECOMPILER)
1367 if (FLAG_precompiled_mode) {
1369 catch_entry_moves_maps_builder_->FinalizeCatchEntryMovesMap());
1370 code.set_catch_entry_moves_maps(maps);
1371 return;
1372 }
1373#endif
1374 code.set_num_variables(flow_graph().variable_count());
1375}
1376
1378 ASSERT(code.static_calls_target_table() == Array::null());
1379 const auto& calls = static_calls_target_table_;
1380 const intptr_t array_length = calls.length() * Code::kSCallTableEntryLength;
1381 const auto& targets =
1382 Array::Handle(zone(), Array::New(array_length, Heap::kOld));
1383
1384 StaticCallsTable entries(targets);
1385 auto& kind_type_and_offset = Smi::Handle(zone());
1386 for (intptr_t i = 0; i < calls.length(); i++) {
1387 auto entry = calls[i];
1388 kind_type_and_offset =
1389 Smi::New(Code::KindField::encode(entry->call_kind) |
1390 Code::EntryPointField::encode(entry->entry_point) |
1391 Code::OffsetField::encode(entry->offset));
1392 auto view = entries[i];
1393 view.Set<Code::kSCallTableKindAndOffset>(kind_type_and_offset);
1394 const Object* target = nullptr;
1395 if (entry->function != nullptr) {
1396 target = entry->function;
1397 view.Set<Code::kSCallTableFunctionTarget>(*entry->function);
1398 }
1399 if (entry->code != nullptr) {
1400 ASSERT(target == nullptr);
1401 target = entry->code;
1402 view.Set<Code::kSCallTableCodeOrTypeTarget>(*entry->code);
1403 }
1404 if (entry->dst_type != nullptr) {
1405 ASSERT(target == nullptr);
1406 view.Set<Code::kSCallTableCodeOrTypeTarget>(*entry->dst_type);
1407 }
1408 }
1409 code.set_static_calls_target_table(targets);
1410}
1411
1413 const Array& inlined_id_array =
1414 Array::Handle(zone(), code_source_map_builder_->InliningIdToFunction());
1415 code.set_inlined_id_to_function(inlined_id_array);
1416
1417 const CodeSourceMap& map =
1418 CodeSourceMap::Handle(code_source_map_builder_->Finalize());
1419 code.set_code_source_map(map);
1420
1421#if defined(DEBUG)
1422 // Force simulation through the last pc offset. This checks we can decode
1423 // the whole CodeSourceMap without hitting an unknown opcode, stack underflow,
1424 // etc.
1427 code.GetInlinedFunctionsAtInstruction(code.Size() - 1, &fs, &tokens);
1428#endif
1429}
1430
1431// Returns 'true' if regular code generation should be skipped.
1433 if (TryIntrinsifyHelper()) {
1434 fully_intrinsified_ = true;
1435 return true;
1436 }
1437 return false;
1438}
1439
1440bool FlowGraphCompiler::TryIntrinsifyHelper() {
1441 ASSERT(!flow_graph().IsCompiledForOsr());
1442
1445
1447
1449 bool complete = compiler::Intrinsifier::Intrinsify(parsed_function(), this);
1451
1453
1454 // "Deoptimization" from intrinsic continues here. All deoptimization
1455 // branches from intrinsic code redirect to here where the slow-path
1456 // (normal function body) starts.
1457 // This means that there must not be any side-effects in intrinsic code
1458 // before any deoptimization point.
1461 return complete;
1462}
1463
1465 const Code& stub,
1467 LocationSummary* locs,
1468 intptr_t deopt_id,
1469 Environment* env) {
1470 ASSERT(FLAG_precompiled_mode ||
1471 (deopt_id != DeoptId::kNone && (!is_optimizing() || env != nullptr)));
1472 EmitCallToStub(stub);
1473 EmitCallsiteMetadata(source, deopt_id, kind, locs, env);
1474}
1475
1478 const Code& stub,
1480 LocationSummary* locs,
1481 ObjectPool::SnapshotBehavior snapshot_behavior) {
1482 EmitCallToStub(stub, snapshot_behavior);
1483 EmitCallsiteMetadata(source, DeoptId::kNone, kind, locs, /*env=*/nullptr);
1484}
1485
1486static const Code& StubEntryFor(const ICData& ic_data, bool optimized) {
1487 switch (ic_data.NumArgsTested()) {
1488 case 1:
1489 if (ic_data.is_tracking_exactness()) {
1490 if (optimized) {
1491 return StubCode::OneArgOptimizedCheckInlineCacheWithExactnessCheck();
1492 } else {
1493 return StubCode::OneArgCheckInlineCacheWithExactnessCheck();
1494 }
1495 }
1496 return optimized ? StubCode::OneArgOptimizedCheckInlineCache()
1497 : StubCode::OneArgCheckInlineCache();
1498 case 2:
1499 ASSERT(!ic_data.is_tracking_exactness());
1500 return optimized ? StubCode::TwoArgsOptimizedCheckInlineCache()
1501 : StubCode::TwoArgsCheckInlineCache();
1502 default:
1503 ic_data.Print();
1504 UNIMPLEMENTED();
1505 return Code::Handle();
1506 }
1507}
1508
1511 LocationSummary* locs,
1512 const ICData& ic_data_in,
1513 Code::EntryKind entry_kind,
1514 bool receiver_can_be_smi) {
1515 ICData& ic_data = ICData::ZoneHandle(ic_data_in.Original());
1516 if (FLAG_precompiled_mode) {
1517 ic_data = ic_data.AsUnaryClassChecks();
1518 EmitInstanceCallAOT(ic_data, deopt_id, source, locs, entry_kind,
1519 receiver_can_be_smi);
1520 return;
1521 }
1522 ASSERT(!ic_data.IsNull());
1523 if (is_optimizing() && (ic_data_in.NumberOfUsedChecks() == 0)) {
1524 // Emit IC call that will count and thus may need reoptimization at
1525 // function entry.
1526 ASSERT(may_reoptimize() || flow_graph().IsCompiledForOsr());
1527 EmitOptimizedInstanceCall(StubEntryFor(ic_data, /*optimized=*/true),
1528 ic_data, deopt_id, source, locs, entry_kind);
1529 return;
1530 }
1531
1532 if (is_optimizing()) {
1533 EmitMegamorphicInstanceCall(ic_data_in, deopt_id, source, locs);
1534 return;
1535 }
1536
1537 EmitInstanceCallJIT(StubEntryFor(ic_data, /*optimized=*/false), ic_data,
1538 deopt_id, source, locs, entry_kind);
1539}
1540
1543 const Function& function,
1544 ArgumentsInfo args_info,
1545 LocationSummary* locs,
1546 const ICData& ic_data_in,
1547 ICData::RebindRule rebind_rule,
1548 Code::EntryKind entry_kind) {
1549 const ICData& ic_data = ICData::ZoneHandle(ic_data_in.Original());
1550 const Array& arguments_descriptor = Array::ZoneHandle(
1551 zone(), ic_data.IsNull() ? args_info.ToArgumentsDescriptor()
1552 : ic_data.arguments_descriptor());
1553 ASSERT(ArgumentsDescriptor(arguments_descriptor).TypeArgsLen() ==
1554 args_info.type_args_len);
1555 ASSERT(ArgumentsDescriptor(arguments_descriptor).Count() ==
1556 args_info.count_without_type_args);
1557 ASSERT(ArgumentsDescriptor(arguments_descriptor).Size() ==
1558 args_info.size_without_type_args);
1559 // Force-optimized functions lack the deopt info which allows patching of
1560 // optimized static calls.
1561 if (is_optimizing() && (!ForcedOptimization() || FLAG_precompiled_mode)) {
1562 EmitOptimizedStaticCall(function, arguments_descriptor,
1563 args_info.size_with_type_args, deopt_id, source,
1564 locs, entry_kind);
1565 } else {
1566 ICData& call_ic_data = ICData::ZoneHandle(zone(), ic_data.ptr());
1567 if (call_ic_data.IsNull()) {
1568 const intptr_t kNumArgsChecked = 0;
1569 call_ic_data =
1570 GetOrAddStaticCallICData(deopt_id, function, arguments_descriptor,
1571 kNumArgsChecked, rebind_rule)
1572 ->ptr();
1573 call_ic_data = call_ic_data.Original();
1574 }
1575 AddCurrentDescriptor(UntaggedPcDescriptors::kRewind, deopt_id, source);
1576 EmitUnoptimizedStaticCall(args_info.size_with_type_args, deopt_id, source,
1577 locs, call_ic_data, entry_kind);
1578 }
1579}
1580
1582 Register class_id_reg,
1583 const AbstractType& type,
1584 compiler::Label* is_instance_lbl,
1585 compiler::Label* is_not_instance_lbl) {
1586 assembler()->Comment("NumberTypeCheck");
1588 if (type.IsNumberType()) {
1589 args.Add(kDoubleCid);
1590 args.Add(kMintCid);
1591 } else if (type.IsIntType()) {
1592 args.Add(kMintCid);
1593 } else if (type.IsDoubleType()) {
1594 args.Add(kDoubleCid);
1595 }
1596 CheckClassIds(class_id_reg, args, is_instance_lbl, is_not_instance_lbl);
1597}
1598
1600 Register class_id_reg,
1601 compiler::Label* is_instance_lbl,
1602 compiler::Label* is_not_instance_lbl) {
1603 assembler()->Comment("StringTypeCheck");
1605 args.Add(kOneByteStringCid);
1606 args.Add(kTwoByteStringCid);
1607 CheckClassIds(class_id_reg, args, is_instance_lbl, is_not_instance_lbl);
1608}
1609
1611 Register class_id_reg,
1612 compiler::Label* is_instance_lbl) {
1613 assembler()->Comment("ListTypeCheck");
1614 COMPILE_ASSERT((kImmutableArrayCid == kArrayCid + 1) &&
1615 (kGrowableObjectArrayCid == kArrayCid + 2));
1616 CidRangeVector ranges;
1617 ranges.Add({kArrayCid, kGrowableObjectArrayCid});
1618 GenerateCidRangesCheck(assembler(), class_id_reg, ranges, is_instance_lbl);
1619}
1620
1622#if defined(INCLUDE_IL_PRINTER)
1623 char buffer[256];
1624 BufferFormatter f(buffer, sizeof(buffer));
1625 instr->PrintTo(&f);
1626 assembler()->Comment("%s", buffer);
1627#endif // defined(INCLUDE_IL_PRINTER)
1628}
1629
1631 // Only emit an edge counter if there is not goto at the end of the block,
1632 // except for the entry block.
1633 return FLAG_reorder_basic_blocks &&
1634 (!block->last_instruction()->IsGoto() || block->IsFunctionEntry());
1635}
1636
1637// Allocate a register that is not explicitly blocked.
1638static Register AllocateFreeRegister(bool* blocked_registers) {
1639 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
1640 intptr_t regno = (i + kRegisterAllocationBias) % kNumberOfCpuRegisters;
1641 if (!blocked_registers[regno]) {
1642 blocked_registers[regno] = true;
1643 return static_cast<Register>(regno);
1644 }
1645 }
1646 UNREACHABLE();
1647 return kNoRegister;
1648}
1649
1650// Allocate a FPU register that is not explicitly blocked.
1651static FpuRegister AllocateFreeFpuRegister(bool* blocked_registers) {
1652 for (intptr_t regno = 0; regno < kNumberOfFpuRegisters; regno++) {
1653 if (!blocked_registers[regno]) {
1654 blocked_registers[regno] = true;
1655 return static_cast<FpuRegister>(regno);
1656 }
1657 }
1658 UNREACHABLE();
1659 return kNoFpuRegister;
1660}
1661
1662void FlowGraphCompiler::AllocateRegistersLocally(Instruction* instr) {
1664 instr->InitializeLocationSummary(zone(), false); // Not optimizing.
1665
1666 LocationSummary* locs = instr->locs();
1667
1668 bool blocked_registers[kNumberOfCpuRegisters];
1669 bool blocked_fpu_registers[kNumberOfFpuRegisters];
1670
1671 // Block all registers globally reserved by the assembler, etc and mark
1672 // the rest as free.
1673 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
1674 blocked_registers[i] = (kDartAvailableCpuRegs & (1 << i)) == 0;
1675 }
1676 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
1677 blocked_fpu_registers[i] = false;
1678 }
1679
1680 // Mark all fixed input, temp and output registers as used.
1681 for (intptr_t i = 0; i < locs->input_count(); i++) {
1682 Location loc = locs->in(i);
1683 if (loc.IsRegister()) {
1684 // Check that a register is not specified twice in the summary.
1685 ASSERT(!blocked_registers[loc.reg()]);
1686 blocked_registers[loc.reg()] = true;
1687 } else if (loc.IsFpuRegister()) {
1688 // Check that a register is not specified twice in the summary.
1689 const FpuRegister fpu_reg = loc.fpu_reg();
1690 ASSERT((fpu_reg >= 0) && (fpu_reg < kNumberOfFpuRegisters));
1691 ASSERT(!blocked_fpu_registers[fpu_reg]);
1692 blocked_fpu_registers[fpu_reg] = true;
1693 }
1694 }
1695
1696 for (intptr_t i = 0; i < locs->temp_count(); i++) {
1697 Location loc = locs->temp(i);
1698 if (loc.IsRegister()) {
1699 // Check that a register is not specified twice in the summary.
1700 ASSERT(!blocked_registers[loc.reg()]);
1701 blocked_registers[loc.reg()] = true;
1702 } else if (loc.IsFpuRegister()) {
1703 // Check that a register is not specified twice in the summary.
1704 const FpuRegister fpu_reg = loc.fpu_reg();
1705 ASSERT((fpu_reg >= 0) && (fpu_reg < kNumberOfFpuRegisters));
1706 ASSERT(!blocked_fpu_registers[fpu_reg]);
1707 blocked_fpu_registers[fpu_reg] = true;
1708 }
1709 }
1710
1711 // Connect input with peephole output for some special cases. All other
1712 // cases are handled by simply allocating registers and generating code.
1713 if (top_of_stack_ != nullptr) {
1714 const intptr_t p = locs->input_count() - 1;
1715 Location peephole = top_of_stack_->locs()->out(0);
1716 if ((instr->RequiredInputRepresentation(p) == kTagged) &&
1717 (locs->in(p).IsUnallocated() || locs->in(p).IsConstant())) {
1718 // If input is unallocated, match with an output register, if set. Also,
1719 // if input is a direct constant, but the peephole output is a register,
1720 // use that register to avoid wasting the already generated code.
1721 if (peephole.IsRegister() && !blocked_registers[peephole.reg()]) {
1722 locs->set_in(p, Location::RegisterLocation(peephole.reg()));
1723 blocked_registers[peephole.reg()] = true;
1724 }
1725 }
1726 }
1727
1728 if (locs->out(0).IsRegister()) {
1729 // Fixed output registers are allowed to overlap with
1730 // temps and inputs.
1731 blocked_registers[locs->out(0).reg()] = true;
1732 } else if (locs->out(0).IsFpuRegister()) {
1733 // Fixed output registers are allowed to overlap with
1734 // temps and inputs.
1735 blocked_fpu_registers[locs->out(0).fpu_reg()] = true;
1736 }
1737
1738 // Allocate all unallocated input locations.
1739 ASSERT(!instr->IsMoveArgument());
1740 Register fpu_unboxing_temp = kNoRegister;
1741 for (intptr_t i = locs->input_count() - 1; i >= 0; i--) {
1742 Location loc = locs->in(i);
1743 Register reg = kNoRegister;
1744 FpuRegister fpu_reg = kNoFpuRegister;
1745 if (loc.IsRegister()) {
1746 reg = loc.reg();
1747 } else if (loc.IsFpuRegister()) {
1748 fpu_reg = loc.fpu_reg();
1749 } else if (loc.IsUnallocated()) {
1750 switch (loc.policy()) {
1754 case Location::kAny:
1755 reg = AllocateFreeRegister(blocked_registers);
1756 locs->set_in(i, Location::RegisterLocation(reg));
1757 break;
1759 fpu_reg = AllocateFreeFpuRegister(blocked_fpu_registers);
1760 locs->set_in(i, Location::FpuRegisterLocation(fpu_reg));
1761 break;
1762 default:
1763 UNREACHABLE();
1764 }
1765 }
1766
1767 if (fpu_reg != kNoFpuRegister) {
1768 ASSERT(reg == kNoRegister);
1769 // Allocate temporary CPU register for unboxing, but only once.
1770 if (fpu_unboxing_temp == kNoRegister) {
1771 fpu_unboxing_temp = AllocateFreeRegister(blocked_registers);
1772 }
1773 reg = fpu_unboxing_temp;
1774 }
1775
1776 ASSERT(reg != kNoRegister || loc.IsConstant());
1777
1778 // Inputs are consumed from the simulated frame (or a peephole push/pop).
1779 // In case of a call argument we leave it until the call instruction.
1780 if (top_of_stack_ != nullptr) {
1781 if (!loc.IsConstant()) {
1782 // Moves top of stack location of the peephole into the required
1783 // input. None of the required moves needs a temp register allocator.
1784 EmitMove(Location::RegisterLocation(reg), top_of_stack_->locs()->out(0),
1785 nullptr);
1786 }
1787 top_of_stack_ = nullptr; // consumed!
1788 } else if (loc.IsConstant()) {
1789 assembler()->Drop(1);
1790 } else {
1791 assembler()->PopRegister(reg);
1792 }
1793 if (!loc.IsConstant()) {
1794 switch (instr->RequiredInputRepresentation(i)) {
1795 case kUnboxedDouble:
1796 ASSERT(fpu_reg != kNoFpuRegister);
1797 ASSERT(instr->SpeculativeModeOfInput(i) ==
1800 fpu_reg, reg,
1802 break;
1803 case kUnboxedFloat32x4:
1804 case kUnboxedFloat64x2:
1805 ASSERT(fpu_reg != kNoFpuRegister);
1806 ASSERT(instr->SpeculativeModeOfInput(i) ==
1809 fpu_reg, reg,
1811 break;
1812 default:
1813 // No automatic unboxing for other representations.
1814 ASSERT(fpu_reg == kNoFpuRegister);
1815 break;
1816 }
1817 }
1818 }
1819
1820 // Allocate all unallocated temp locations.
1821 for (intptr_t i = 0; i < locs->temp_count(); i++) {
1822 Location loc = locs->temp(i);
1823 if (loc.IsUnallocated()) {
1824 switch (loc.policy()) {
1827 AllocateFreeRegister(blocked_registers));
1828 locs->set_temp(i, loc);
1829 break;
1832 AllocateFreeFpuRegister(blocked_fpu_registers));
1833 locs->set_temp(i, loc);
1834 break;
1835 default:
1836 UNREACHABLE();
1837 }
1838 }
1839 }
1840
1841 Location result_location = locs->out(0);
1842 if (result_location.IsUnallocated()) {
1843 switch (result_location.policy()) {
1844 case Location::kAny:
1848 result_location =
1850 break;
1852 result_location = locs->in(0);
1853 break;
1855 result_location = Location::FpuRegisterLocation(
1856 AllocateFreeFpuRegister(blocked_fpu_registers));
1857 break;
1859 // Only available in optimized mode.
1861 UNREACHABLE();
1862 }
1863 locs->set_out(0, result_location);
1864 }
1865}
1866
1868 intptr_t deopt_id,
1869 const String& target_name,
1870 const Array& arguments_descriptor,
1871 intptr_t num_args_tested,
1872 const AbstractType& receiver_type,
1873 const Function& binary_smi_target) {
1874 if ((deopt_id_to_ic_data_ != nullptr) &&
1875 ((*deopt_id_to_ic_data_)[deopt_id] != nullptr)) {
1876 const ICData* res = (*deopt_id_to_ic_data_)[deopt_id];
1877 ASSERT(res->deopt_id() == deopt_id);
1878 ASSERT(res->target_name() == target_name.ptr());
1879 ASSERT(res->NumArgsTested() == num_args_tested);
1880 ASSERT(res->TypeArgsLen() ==
1881 ArgumentsDescriptor(arguments_descriptor).TypeArgsLen());
1882 ASSERT(!res->is_static_call());
1883 ASSERT(res->receivers_static_type() == receiver_type.ptr());
1884 return res;
1885 }
1886
1887 auto& ic_data = ICData::ZoneHandle(zone());
1888 if (!binary_smi_target.IsNull()) {
1889 ASSERT(num_args_tested == 2);
1890 ASSERT(!binary_smi_target.IsNull());
1891 GrowableArray<intptr_t> cids(num_args_tested);
1892 cids.Add(kSmiCid);
1893 cids.Add(kSmiCid);
1894 ic_data = ICData::NewWithCheck(parsed_function().function(), target_name,
1895 arguments_descriptor, deopt_id,
1896 num_args_tested, ICData::kInstance, &cids,
1897 binary_smi_target, receiver_type);
1898 } else {
1899 ic_data = ICData::New(parsed_function().function(), target_name,
1900 arguments_descriptor, deopt_id, num_args_tested,
1901 ICData::kInstance, receiver_type);
1902 }
1903
1904 if (deopt_id_to_ic_data_ != nullptr) {
1905 (*deopt_id_to_ic_data_)[deopt_id] = &ic_data;
1906 }
1907 ASSERT(!ic_data.is_static_call());
1908 return &ic_data;
1909}
1910
1912 intptr_t deopt_id,
1913 const Function& target,
1914 const Array& arguments_descriptor,
1915 intptr_t num_args_tested,
1916 ICData::RebindRule rebind_rule) {
1917 if ((deopt_id_to_ic_data_ != nullptr) &&
1918 ((*deopt_id_to_ic_data_)[deopt_id] != nullptr)) {
1919 const ICData* res = (*deopt_id_to_ic_data_)[deopt_id];
1920 ASSERT(res->deopt_id() == deopt_id);
1921 ASSERT(res->target_name() == target.name());
1922 ASSERT(res->NumArgsTested() == num_args_tested);
1923 ASSERT(res->TypeArgsLen() ==
1924 ArgumentsDescriptor(arguments_descriptor).TypeArgsLen());
1925 ASSERT(res->is_static_call());
1926 return res;
1927 }
1928
1929 const auto& ic_data = ICData::ZoneHandle(
1931 arguments_descriptor, deopt_id,
1932 num_args_tested, rebind_rule));
1933 if (deopt_id_to_ic_data_ != nullptr) {
1934 (*deopt_id_to_ic_data_)[deopt_id] = &ic_data;
1935 }
1936 return &ic_data;
1937}
1938
1939intptr_t FlowGraphCompiler::GetOptimizationThreshold() const {
1940 intptr_t threshold;
1941 if (is_optimizing()) {
1942 threshold = FLAG_reoptimization_counter_threshold;
1943 } else if (parsed_function_.function().IsIrregexpFunction()) {
1944 threshold = FLAG_regexp_optimization_counter_threshold;
1945 } else {
1946 const auto configured_optimization_counter_threshold =
1948
1949 const intptr_t basic_blocks = flow_graph().preorder().length();
1950 ASSERT(basic_blocks > 0);
1951 threshold = FLAG_optimization_counter_scale * basic_blocks +
1952 FLAG_min_optimization_counter_threshold;
1953 if (threshold > configured_optimization_counter_threshold) {
1954 threshold = configured_optimization_counter_threshold;
1955 }
1956 }
1957
1958 // Threshold = 0 doesn't make sense because we increment the counter before
1959 // testing against the threshold. Perhaps we could interpret it to mean
1960 // "generate optimized code immediately without unoptimized compilation
1961 // first", but this isn't supported in our pipeline because there would be no
1962 // code for the optimized code to deoptimize into.
1963 if (threshold == 0) threshold = 1;
1964
1965 // See Compiler::CanOptimizeFunction. In short, we have to allow the
1966 // unoptimized code to run at least once to prevent an infinite compilation
1967 // loop.
1968 if (threshold == 1 && parsed_function().function().HasBreakpoint()) {
1969 threshold = 2;
1970 }
1971
1972 return threshold;
1973}
1974
1976 switch (rep) {
1977 case kUnboxedFloat:
1978 case kUnboxedDouble:
1979 return double_class();
1980 case kUnboxedFloat32x4:
1981 return float32x4_class();
1982 case kUnboxedFloat64x2:
1983 return float64x2_class();
1984 case kUnboxedInt32x4:
1985 return int32x4_class();
1986 case kUnboxedInt64:
1987 return mint_class();
1988 default:
1989 UNREACHABLE();
1990 return Class::ZoneHandle();
1991 }
1992}
1993
1995 code_source_map_builder_->BeginCodeSourceRange(assembler()->CodeSize(),
1996 source);
1997}
1998
2000 code_source_map_builder_->EndCodeSourceRange(assembler()->CodeSize(), source);
2001}
2002
2004 intptr_t cid,
2005 const String& selector,
2006 const Array& args_desc_array) {
2007 Zone* zone = Thread::Current()->zone();
2008
2009 ArgumentsDescriptor args_desc(args_desc_array);
2010
2012 if (!LookupMethodFor(cid, selector, args_desc, &fn)) return nullptr;
2013
2014 CallTargets* targets = new (zone) CallTargets(zone);
2015 targets->Add(new (zone) TargetInfo(cid, cid, &fn, /* count = */ 1,
2017
2018 return targets;
2019}
2020
2022 const String& name,
2023 const ArgumentsDescriptor& args_desc,
2024 Function* fn_return,
2025 bool* class_is_abstract_return) {
2026 auto thread = Thread::Current();
2027 auto zone = thread->zone();
2028 auto class_table = thread->isolate_group()->class_table();
2029 if (class_id < 0) return false;
2030 if (class_id >= class_table->NumCids()) return false;
2031
2032 ClassPtr raw_class = class_table->At(class_id);
2033 if (raw_class == nullptr) return false;
2034 Class& cls = Class::Handle(zone, raw_class);
2035 if (cls.IsNull()) return false;
2036 if (!cls.is_finalized()) return false;
2037 if (Array::Handle(cls.current_functions()).IsNull()) return false;
2038
2039 if (class_is_abstract_return != nullptr) {
2040 *class_is_abstract_return = cls.is_abstract();
2041 }
2042 Function& target_function =
2044 cls, name, args_desc, /*allow_add=*/false));
2045 if (target_function.IsNull()) return false;
2046 *fn_return = target_function.ptr();
2047 return true;
2048}
2049
2052 const CallTargets& targets,
2053 ArgumentsInfo args_info,
2054 intptr_t deopt_id,
2056 LocationSummary* locs,
2057 bool complete,
2058 intptr_t total_ic_calls,
2059 bool receiver_can_be_smi) {
2060 ASSERT(call != nullptr);
2061 if (!FLAG_precompiled_mode) {
2062 if (FLAG_polymorphic_with_deopt) {
2063 compiler::Label* deopt =
2064 AddDeoptStub(deopt_id, ICData::kDeoptPolymorphicInstanceCallTestFail);
2066 EmitTestAndCall(targets, call->function_name(), args_info,
2067 deopt, // No cid match.
2068 &ok, // Found cid.
2069 deopt_id, source, locs, complete, total_ic_calls,
2070 call->entry_kind());
2071 assembler()->Bind(&ok);
2072 } else {
2073 compiler::Label megamorphic, ok;
2074 EmitTestAndCall(targets, call->function_name(), args_info,
2075 &megamorphic, // No cid match.
2076 &ok, // Found cid.
2077 deopt_id, source, locs, complete, total_ic_calls,
2078 call->entry_kind());
2079 assembler()->Jump(&ok);
2080 assembler()->Bind(&megamorphic);
2081 // Instead of deoptimizing, do a megamorphic call when no matching
2082 // cid found.
2083 EmitMegamorphicInstanceCall(*call->ic_data(), deopt_id, source, locs);
2084 assembler()->Bind(&ok);
2085 }
2086 } else {
2087 if (complete) {
2089 EmitTestAndCall(targets, call->function_name(), args_info,
2090 nullptr, // No cid match.
2091 &ok, // Found cid.
2092 deopt_id, source, locs, true, total_ic_calls,
2093 call->entry_kind());
2094 assembler()->Bind(&ok);
2095 } else {
2096 const ICData& unary_checks =
2097 ICData::ZoneHandle(zone(), call->ic_data()->AsUnaryClassChecks());
2098 EmitInstanceCallAOT(unary_checks, deopt_id, source, locs,
2099 call->entry_kind(), receiver_can_be_smi);
2100 }
2101 }
2102}
2103
2104#define __ assembler()->
2105
2107 if (!is_optimizing()) {
2108 __ Drop(count);
2109 }
2110}
2111
2112void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
2113 const GrowableArray<intptr_t>& class_ids,
2114 compiler::Label* is_equal_lbl,
2115 compiler::Label* is_not_equal_lbl) {
2116 for (const auto& id : class_ids) {
2117 __ CompareImmediate(class_id_reg, id);
2118 __ BranchIf(EQUAL, is_equal_lbl);
2119 }
2120 __ Jump(is_not_equal_lbl);
2121}
2122
2124 const String& function_name,
2125 ArgumentsInfo args_info,
2126 compiler::Label* failed,
2127 compiler::Label* match_found,
2128 intptr_t deopt_id,
2129 const InstructionSource& source_index,
2130 LocationSummary* locs,
2131 bool complete,
2132 intptr_t total_ic_calls,
2133 Code::EntryKind entry_kind) {
2135 ASSERT(complete || (failed != nullptr)); // Complete calls can't fail.
2136
2137 const Array& arguments_descriptor =
2139 EmitTestAndCallLoadReceiver(args_info.count_without_type_args,
2140 arguments_descriptor);
2141
2142 const int kNoCase = -1;
2143 int smi_case = kNoCase;
2144 int which_case_to_skip = kNoCase;
2145
2146 const int length = targets.length();
2147 ASSERT(length > 0);
2148 int non_smi_length = length;
2149
2150 // Find out if one of the classes in one of the cases is the Smi class. We
2151 // will be handling that specially.
2152 for (int i = 0; i < length; i++) {
2153 const intptr_t start = targets[i].cid_start;
2154 if (start > kSmiCid) continue;
2155 const intptr_t end = targets[i].cid_end;
2156 if (end >= kSmiCid) {
2157 smi_case = i;
2158 if (start == kSmiCid && end == kSmiCid) {
2159 // If this case has only the Smi class then we won't need to emit it at
2160 // all later.
2161 which_case_to_skip = i;
2162 non_smi_length--;
2163 }
2164 break;
2165 }
2166 }
2167
2168 if (smi_case != kNoCase) {
2169 compiler::Label after_smi_test;
2170 // If the call is complete and there are no other possible receiver
2171 // classes - then receiver can only be a smi value and we don't need
2172 // to check if it is a smi.
2173 if (!(complete && non_smi_length == 0)) {
2174 EmitTestAndCallSmiBranch(non_smi_length == 0 ? failed : &after_smi_test,
2175 /* jump_if_smi= */ false);
2176 }
2177
2178 // Do not use the code from the function, but let the code be patched so
2179 // that we can record the outgoing edges to other code.
2180 const Function& function = *targets.TargetAt(smi_case)->target;
2181 GenerateStaticDartCall(deopt_id, source_index,
2182 UntaggedPcDescriptors::kOther, locs, function,
2183 entry_kind);
2185 if (match_found != nullptr) {
2186 __ Jump(match_found);
2187 }
2188 __ Bind(&after_smi_test);
2189 } else {
2190 if (!complete) {
2191 // Smi is not a valid class.
2192 EmitTestAndCallSmiBranch(failed, /* jump_if_smi = */ true);
2193 }
2194 }
2195
2196 if (non_smi_length == 0) {
2197 // If non_smi_length is 0 then only a Smi check was needed; the Smi check
2198 // above will fail if there was only one check and receiver is not Smi.
2199 return;
2200 }
2201
2202 bool add_megamorphic_call = false;
2203 int bias = 0;
2204
2205 // Value is not Smi.
2206 EmitTestAndCallLoadCid(EmitTestCidRegister());
2207
2208 int last_check = which_case_to_skip == length - 1 ? length - 2 : length - 1;
2209
2210 for (intptr_t i = 0; i < length; i++) {
2211 if (i == which_case_to_skip) continue;
2212 const bool is_last_check = (i == last_check);
2213 const int count = targets.TargetAt(i)->count;
2214 if (!is_last_check && !complete && count < (total_ic_calls >> 5)) {
2215 // This case is hit too rarely to be worth writing class-id checks inline
2216 // for. Note that we can't do this for calls with only one target because
2217 // the type propagator may have made use of that and expects a deopt if
2218 // a new class is seen at this calls site. See IsMonomorphic.
2219 add_megamorphic_call = true;
2220 break;
2221 }
2222 compiler::Label next_test;
2223 if (!complete || !is_last_check) {
2225 is_last_check ? failed : &next_test,
2226 EmitTestCidRegister(), targets[i], bias,
2227 /*jump_on_miss =*/true);
2228 }
2229 // Do not use the code from the function, but let the code be patched so
2230 // that we can record the outgoing edges to other code.
2231 const Function& function = *targets.TargetAt(i)->target;
2232 GenerateStaticDartCall(deopt_id, source_index,
2233 UntaggedPcDescriptors::kOther, locs, function,
2234 entry_kind);
2236 if (!is_last_check || add_megamorphic_call) {
2237 __ Jump(match_found);
2238 }
2239 __ Bind(&next_test);
2240 }
2241 if (add_megamorphic_call) {
2242 EmitMegamorphicInstanceCall(function_name, arguments_descriptor, deopt_id,
2243 source_index, locs);
2244 }
2245}
2246
2248 const Class& type_class,
2249 compiler::Label* is_subtype) {
2251 if (hi != nullptr) {
2252 const CidRangeVector& ranges =
2253 hi->SubtypeRangesForClass(type_class,
2254 /*include_abstract=*/false,
2255 /*exclude_null=*/false);
2256 if (ranges.length() <= kMaxNumberOfCidRangesToTest) {
2257 GenerateCidRangesCheck(assembler(), class_id_reg, ranges, is_subtype);
2258 return true;
2259 }
2260 }
2261
2262 // We don't have cid-ranges for subclasses, so we'll just test against the
2263 // class directly if it's non-abstract.
2264 if (!type_class.is_abstract()) {
2265 __ CompareImmediate(class_id_reg, type_class.id());
2266 __ BranchIf(EQUAL, is_subtype);
2267 }
2268 return false;
2269}
2270
2272 compiler::Assembler* assembler,
2273 Register class_id_reg,
2274 const CidRangeVector& cid_ranges,
2275 compiler::Label* inside_range_lbl,
2276 compiler::Label* outside_range_lbl,
2277 bool fall_through_if_inside) {
2278 // If there are no valid class ranges, the check will fail. If we are
2279 // supposed to fall-through in the positive case, we'll explicitly jump to
2280 // the [outside_range_lbl].
2281 if (cid_ranges.is_empty()) {
2282 if (fall_through_if_inside) {
2283 assembler->Jump(outside_range_lbl);
2284 }
2285 return false;
2286 }
2287
2288 int bias = 0;
2289 for (intptr_t i = 0; i < cid_ranges.length(); ++i) {
2290 const CidRangeValue& range = cid_ranges[i];
2292 const bool last_round = i == (cid_ranges.length() - 1);
2293
2294 compiler::Label* jump_label = last_round && fall_through_if_inside
2295 ? outside_range_lbl
2296 : inside_range_lbl;
2297 const bool jump_on_miss = last_round && fall_through_if_inside;
2298
2299 bias = EmitTestAndCallCheckCid(assembler, jump_label, class_id_reg, range,
2300 bias, jump_on_miss);
2301 }
2302 return bias != 0;
2303}
2304
2306 compiler::Label* label,
2307 Register class_id_reg,
2308 const CidRangeValue& range,
2309 int bias,
2310 bool jump_on_miss) {
2311 const intptr_t cid_start = range.cid_start;
2312 if (range.IsSingleCid()) {
2313 assembler->CompareImmediate(class_id_reg, cid_start - bias);
2314 assembler->BranchIf(jump_on_miss ? NOT_EQUAL : EQUAL, label);
2315 } else {
2316 assembler->AddImmediate(class_id_reg, bias - cid_start);
2317 bias = cid_start;
2318 assembler->CompareImmediate(class_id_reg, range.Extent());
2320 label);
2321 }
2322 return bias;
2323}
2324
2326 const LocationSummary& locs) {
2332 .constant()
2333 .IsAbstractType()) ||
2343 ASSERT(locs.out(0).IsRegister() &&
2344 locs.out(0).reg() == TypeTestABI::kInstanceReg);
2345 return true;
2346}
2347
2348// Generates function type check.
2349//
2350// See [GenerateInlineInstanceof] for calling convention.
2351SubtypeTestCachePtr FlowGraphCompiler::GenerateFunctionTypeTest(
2353 const AbstractType& type,
2354 compiler::Label* is_instance_lbl,
2355 compiler::Label* is_not_instance_lbl) {
2356 __ Comment("FunctionTypeTest");
2357
2358 __ BranchIfSmi(TypeTestABI::kInstanceReg, is_not_instance_lbl);
2359 // Uninstantiated type class is known at compile time, but the type
2360 // arguments are determined at runtime by the instantiator(s).
2361 return GenerateCallSubtypeTestStub(TypeTestStubKind::kTestTypeSixArgs,
2362 is_instance_lbl, is_not_instance_lbl);
2363}
2364
2365// Inputs (from TypeTestABI):
2366// - kInstanceReg : instance to test against.
2367// - kInstantiatorTypeArgumentsReg : instantiator type arguments (if needed).
2368// - kFunctionTypeArgumentsReg : function type arguments (if needed).
2369//
2370// Preserves all input registers.
2371//
2372// Clobbers kDstTypeReg, kSubtypeTestCacheReg and kSubtypeTestCacheResultReg at
2373// a minimum, may clobber additional registers depending on architecture. See
2374// GenerateSubtypeNTestCacheStub for architecture-specific registers that should
2375// be saved across a subtype test cache stub call.
2376//
2377// Note that this inlined code must be followed by the runtime_call code, as it
2378// may fall through to it. Otherwise, this inline code will jump to the label
2379// is_instance or to the label is_not_instance.
2380SubtypeTestCachePtr FlowGraphCompiler::GenerateInlineInstanceof(
2381 const InstructionSource& source,
2382 const AbstractType& type,
2383 compiler::Label* is_instance_lbl,
2384 compiler::Label* is_not_instance_lbl) {
2385 ASSERT(!type.IsTopTypeForInstanceOf());
2386 __ Comment("InlineInstanceof");
2387 if (type.IsObjectType()) { // Must be non-nullable.
2388 __ CompareObject(TypeTestABI::kInstanceReg, Object::null_object());
2389 // All non-null objects are instances of non-nullable Object.
2390 __ BranchIf(NOT_EQUAL, is_instance_lbl);
2391 __ Jump(is_not_instance_lbl);
2392 return SubtypeTestCache::null(); // No need for an STC.
2393 }
2394 if (type.IsFunctionType()) {
2395 return GenerateFunctionTypeTest(source, type, is_instance_lbl,
2396 is_not_instance_lbl);
2397 }
2398 if (type.IsRecordType()) {
2399 // Subtype test cache stubs are not useful for record types and the results
2400 // of subtype checks are never recorded in the cache.
2401 // Fall through to runtime.
2403 }
2404
2405 if (type.IsInstantiated()) {
2406 const Class& type_class = Class::ZoneHandle(zone(), type.type_class());
2407 // A class equality check is only applicable with a dst type (not a
2408 // function type) of a non-parameterized class or with a raw dst type of
2409 // a parameterized class.
2410 if (type_class.NumTypeArguments() > 0) {
2411 return GenerateInstantiatedTypeWithArgumentsTest(
2412 source, type, is_instance_lbl, is_not_instance_lbl);
2413 // Fall through to runtime call.
2414 }
2415 const bool has_fall_through = GenerateInstantiatedTypeNoArgumentsTest(
2416 source, type, is_instance_lbl, is_not_instance_lbl);
2417 if (has_fall_through) {
2418 // If test non-conclusive so far, try the inlined type-test cache.
2419 // 'type' is known at compile time.
2420 return GenerateSubtype1TestCacheLookup(
2421 source, type_class, is_instance_lbl, is_not_instance_lbl);
2422 } else {
2423 return SubtypeTestCache::null();
2424 }
2425 }
2426 return GenerateUninstantiatedTypeTest(source, type, is_instance_lbl,
2427 is_not_instance_lbl);
2428}
2429
2430FlowGraphCompiler::TypeTestStubKind
2431FlowGraphCompiler::GetTypeTestStubKindForTypeParameter(
2432 const TypeParameter& type_param) {
2433 // If it's guaranteed, by type-parameter bound, that the type parameter will
2434 // never have a value of a function type, then we can safely do a 4-type
2435 // test instead of a 6-type test.
2436 AbstractType& bound = AbstractType::Handle(zone(), type_param.bound());
2437 bound = bound.UnwrapFutureOr();
2438 return !bound.IsTopTypeForSubtyping() && !bound.IsObjectType() &&
2439 !bound.IsDartFunctionType() && bound.IsType()
2440 ? TypeTestStubKind::kTestTypeFourArgs
2441 : TypeTestStubKind::kTestTypeSixArgs;
2442}
2443
2444// Generates quick and subtype cache tests when only the instance need be
2445// checked. Jumps to 'is_instance' or 'is_not_instance' respectively, if any
2446// generated check is conclusive, otherwise falls through if further checking is
2447// required.
2448//
2449// See [GenerateInlineInstanceof] for calling convention.
2450SubtypeTestCachePtr FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
2451 const InstructionSource& source,
2452 const Class& type_class,
2453 compiler::Label* is_instance_lbl,
2454 compiler::Label* is_not_instance_lbl) {
2455 // If the type being tested is non-nullable Object, we are in NNBD strong
2456 // mode, since top types do not reach here. In this case, testing the
2457 // superclass of a null instance yields a wrong result (as the Null class
2458 // extends Object).
2459 ASSERT(!type_class.IsObjectClass());
2460 __ Comment("Subtype1TestCacheLookup");
2461#if defined(DEBUG)
2462 compiler::Label ok;
2463 __ BranchIfNotSmi(TypeTestABI::kInstanceReg, &ok);
2464 __ Breakpoint();
2465 __ Bind(&ok);
2466#endif
2467 // We don't use TypeTestABI::kScratchReg for the first scratch register as
2468 // it is not defined on IA32. Instead, we use the subtype test cache
2469 // register, as it is clobbered by the subtype test cache stub call anyway.
2470 const Register kScratch1Reg = TypeTestABI::kSubtypeTestCacheReg;
2471#if defined(TARGET_ARCH_IA32)
2472 // We don't use TypeTestABI::kScratchReg as it is not defined on IA32.
2473 // Instead, we pick another TypeTestABI register and push/pop it around
2474 // the uses of the second scratch register.
2475 const Register kScratch2Reg = TypeTestABI::kDstTypeReg;
2476 __ PushRegister(kScratch2Reg);
2477#else
2478 // We can use TypeTestABI::kScratchReg for the second scratch register, as
2479 // IA32 is handled separately.
2480 const Register kScratch2Reg = TypeTestABI::kScratchReg;
2481#endif
2482 static_assert(kScratch1Reg != kScratch2Reg,
2483 "Scratch registers must be distinct");
2484 // Check immediate superclass equality.
2485 __ LoadClassId(kScratch2Reg, TypeTestABI::kInstanceReg);
2486 __ LoadClassById(kScratch1Reg, kScratch2Reg);
2487#if defined(TARGET_ARCH_IA32)
2488 // kScratch2 is no longer used, so restore it.
2489 __ PopRegister(kScratch2Reg);
2490#endif
2491 __ LoadCompressedFieldFromOffset(
2492 kScratch1Reg, kScratch1Reg, compiler::target::Class::super_type_offset());
2493 // Check for a null super type. Instances whose class has a null super type
2494 // can only be an instance of top types or of non-nullable Object, but this
2495 // method is not called for those types, so the object cannot be an instance.
2496 __ CompareObject(kScratch1Reg, Object::null_object());
2497 __ BranchIf(EQUAL, is_not_instance_lbl);
2498 __ LoadTypeClassId(kScratch1Reg, kScratch1Reg);
2499 __ CompareImmediate(kScratch1Reg, type_class.id());
2500 __ BranchIf(EQUAL, is_instance_lbl);
2501
2502 return GenerateCallSubtypeTestStub(TypeTestStubKind::kTestTypeOneArg,
2503 is_instance_lbl, is_not_instance_lbl);
2504}
2505
2506// Generates quick and subtype cache tests for an instantiated generic type.
2507// Jumps to 'is_instance' or 'is_not_instance' respectively, if any generated
2508// check is conclusive, otherwise falls through if further checking is required.
2509//
2510// See [GenerateInlineInstanceof] for calling convention.
2511SubtypeTestCachePtr
2512FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
2513 const InstructionSource& source,
2514 const AbstractType& type,
2515 compiler::Label* is_instance_lbl,
2516 compiler::Label* is_not_instance_lbl) {
2517 __ Comment("InstantiatedTypeWithArgumentsTest");
2518 ASSERT(type.IsInstantiated());
2519 ASSERT(!type.IsFunctionType());
2520 ASSERT(!type.IsRecordType());
2521 ASSERT(type.IsType());
2522 const Class& type_class = Class::ZoneHandle(zone(), type.type_class());
2523 ASSERT(type_class.NumTypeArguments() > 0);
2524 const Type& smi_type = Type::Handle(zone(), Type::SmiType());
2525 const bool smi_is_ok = smi_type.IsSubtypeOf(type, Heap::kOld);
2526 __ BranchIfSmi(TypeTestABI::kInstanceReg,
2527 smi_is_ok ? is_instance_lbl : is_not_instance_lbl);
2528
2529 const TypeArguments& type_arguments =
2530 TypeArguments::ZoneHandle(zone(), Type::Cast(type).arguments());
2531 const bool is_raw_type = type_arguments.IsNull() ||
2532 type_arguments.IsRaw(0, type_arguments.Length());
2533 // We don't use TypeTestABI::kScratchReg as it is not defined on IA32.
2534 // Instead, we use the subtype test cache register, as it is clobbered by the
2535 // subtype test cache stub call anyway.
2536 const Register kScratchReg = TypeTestABI::kSubtypeTestCacheReg;
2537 if (is_raw_type) {
2538 // dynamic type argument, check only classes.
2539 __ LoadClassId(kScratchReg, TypeTestABI::kInstanceReg);
2540 __ CompareImmediate(kScratchReg, type_class.id());
2541 __ BranchIf(EQUAL, is_instance_lbl);
2542 // List is a very common case.
2543 if (IsListClass(type_class)) {
2544 GenerateListTypeCheck(kScratchReg, is_instance_lbl);
2545 }
2546 return GenerateSubtype1TestCacheLookup(source, type_class, is_instance_lbl,
2547 is_not_instance_lbl);
2548 }
2549 // If one type argument only, check if type argument is a top type.
2550 if (type_arguments.Length() == 1) {
2551 const AbstractType& tp_argument =
2552 AbstractType::ZoneHandle(zone(), type_arguments.TypeAt(0));
2553 if (tp_argument.IsTopTypeForSubtyping()) {
2554 // Instance class test only necessary.
2555 return GenerateSubtype1TestCacheLookup(
2556 source, type_class, is_instance_lbl, is_not_instance_lbl);
2557 }
2558 }
2559
2560 // Regular subtype test cache involving instance's type arguments.
2561 return GenerateCallSubtypeTestStub(TypeTestStubKind::kTestTypeTwoArgs,
2562 is_instance_lbl, is_not_instance_lbl);
2563}
2564
2565// Generates quick and subtype cache tests for an instantiated non-generic type.
2566// Jumps to 'is_instance' or 'is_not_instance' respectively, if any generated
2567// check is conclusive. Returns whether the code will fall through for further
2568// type checking because the checks are not exhaustive.
2569//
2570// See [GenerateInlineInstanceof] for calling convention.
2571//
2572// Uses kScratchReg, so this implementation cannot be shared with IA32.
2573bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
2574 const InstructionSource& source,
2575 const AbstractType& type,
2576 compiler::Label* is_instance_lbl,
2577 compiler::Label* is_not_instance_lbl) {
2578 __ Comment("InstantiatedTypeNoArgumentsTest");
2579 ASSERT(type.IsInstantiated());
2580 ASSERT(!type.IsFunctionType());
2581 ASSERT(!type.IsRecordType());
2582 const Class& type_class = Class::Handle(zone(), type.type_class());
2583 ASSERT(type_class.NumTypeArguments() == 0);
2584
2585 // We don't use TypeTestABI::kScratchReg as it is not defined on IA32.
2586 // Instead, we use the subtype test cache register, as it is clobbered by the
2587 // subtype test cache stub call anyway.
2588 const Register kScratchReg = TypeTestABI::kSubtypeTestCacheReg;
2589
2590 const Class& smi_class = Class::Handle(zone(), Smi::Class());
2591 const bool smi_is_ok =
2592 Class::IsSubtypeOf(smi_class, Object::null_type_arguments(),
2594 __ BranchIfSmi(TypeTestABI::kInstanceReg,
2595 smi_is_ok ? is_instance_lbl : is_not_instance_lbl);
2596 __ LoadClassId(kScratchReg, TypeTestABI::kInstanceReg);
2597 // Bool interface can be implemented only by core class Bool.
2598 if (type.IsBoolType()) {
2599 __ CompareImmediate(kScratchReg, kBoolCid);
2600 __ BranchIf(EQUAL, is_instance_lbl);
2601 __ Jump(is_not_instance_lbl);
2602 return false;
2603 }
2604 // Custom checking for numbers (Smi, Mint and Double).
2605 // Note that instance is not Smi (checked above).
2606 if (type.IsNumberType() || type.IsIntType() || type.IsDoubleType()) {
2607 GenerateNumberTypeCheck(kScratchReg, type, is_instance_lbl,
2608 is_not_instance_lbl);
2609 return false;
2610 }
2611 if (type.IsStringType()) {
2612 GenerateStringTypeCheck(kScratchReg, is_instance_lbl, is_not_instance_lbl);
2613 return false;
2614 }
2615 if (type.IsDartFunctionType()) {
2616 // Check if instance is a closure.
2617 __ CompareImmediate(kScratchReg, kClosureCid);
2618 __ BranchIf(EQUAL, is_instance_lbl);
2619 return true;
2620 }
2621 if (type.IsDartRecordType()) {
2622 // Check if instance is a record.
2623 __ CompareImmediate(kScratchReg, kRecordCid);
2624 __ BranchIf(EQUAL, is_instance_lbl);
2625 return true;
2626 }
2627
2628 // Fast case for cid-range based checks.
2629 // Warning: This code destroys the contents of [kScratchReg], so this should
2630 // be the last check in this method. It returns whether the checks were
2631 // exhaustive, so we negate it to indicate whether we'll fall through.
2632 return !GenerateSubtypeRangeCheck(kScratchReg, type_class, is_instance_lbl);
2633}
2634
2635// Generates inlined check if 'type' is a type parameter or type itself.
2636//
2637// See [GenerateInlineInstanceof] for calling convention.
2638SubtypeTestCachePtr FlowGraphCompiler::GenerateUninstantiatedTypeTest(
2639 const InstructionSource& source,
2640 const AbstractType& type,
2641 compiler::Label* is_instance_lbl,
2642 compiler::Label* is_not_instance_lbl) {
2643 __ Comment("UninstantiatedTypeTest");
2644 ASSERT(!type.IsInstantiated());
2645 ASSERT(!type.IsFunctionType());
2646 ASSERT(!type.IsRecordType());
2647 // Skip check if destination is a dynamic type.
2648 if (type.IsTypeParameter()) {
2649 // We don't use TypeTestABI::kScratchReg as it is not defined on IA32.
2650 // Instead, we use the subtype test cache register, as it is clobbered by
2651 // the subtype test cache stub call anyway.
2652 const Register kScratchReg = TypeTestABI::kSubtypeTestCacheReg;
2653
2654 const TypeParameter& type_param = TypeParameter::Cast(type);
2655
2656 const Register kTypeArgumentsReg =
2657 type_param.IsClassTypeParameter()
2660 // Check if type arguments are null, i.e. equivalent to vector of dynamic.
2661 __ CompareObject(kTypeArgumentsReg, Object::null_object());
2662 __ BranchIf(EQUAL, is_instance_lbl);
2663 __ LoadCompressedFieldFromOffset(
2664 kScratchReg, kTypeArgumentsReg,
2666 // kScratchReg: Concrete type of type.
2667 // Check if type argument is dynamic, Object?, or void.
2668 __ CompareObject(kScratchReg, Object::dynamic_type());
2669 __ BranchIf(EQUAL, is_instance_lbl);
2670 __ CompareObject(
2671 kScratchReg,
2673 zone(), isolate_group()->object_store()->nullable_object_type()));
2674 __ BranchIf(EQUAL, is_instance_lbl);
2675 __ CompareObject(kScratchReg, Object::void_type());
2676 __ BranchIf(EQUAL, is_instance_lbl);
2677
2678 // For Smi check quickly against int and num interfaces.
2679 compiler::Label not_smi;
2680 __ BranchIfNotSmi(TypeTestABI::kInstanceReg, &not_smi,
2682 __ CompareObject(kScratchReg, Type::ZoneHandle(zone(), Type::IntType()));
2683 __ BranchIf(EQUAL, is_instance_lbl);
2684 __ CompareObject(kScratchReg, Type::ZoneHandle(zone(), Type::Number()));
2685 __ BranchIf(EQUAL, is_instance_lbl);
2686 // Smi can be handled by type test cache.
2687 __ Bind(&not_smi);
2688
2689 const auto test_kind = GetTypeTestStubKindForTypeParameter(type_param);
2690 return GenerateCallSubtypeTestStub(test_kind, is_instance_lbl,
2691 is_not_instance_lbl);
2692 }
2693 if (type.IsType()) {
2694 // The only uninstantiated type to which a Smi is assignable is FutureOr<T>,
2695 // as T might be a top type or int or num when instantiated
2696 if (!type.IsFutureOrType()) {
2697 __ BranchIfSmi(TypeTestABI::kInstanceReg, is_not_instance_lbl);
2698 }
2699 const TypeTestStubKind test_kind =
2700 type.IsInstantiated(kFunctions) ? TypeTestStubKind::kTestTypeThreeArgs
2701 : TypeTestStubKind::kTestTypeFourArgs;
2702 // Uninstantiated type class is known at compile time, but the type
2703 // arguments are determined at runtime by the instantiator(s).
2704 return GenerateCallSubtypeTestStub(test_kind, is_instance_lbl,
2705 is_not_instance_lbl);
2706 }
2707 return SubtypeTestCache::null();
2708}
2709
2710// If instanceof type test cannot be performed successfully at compile time and
2711// therefore eliminated, optimize it by adding inlined tests for:
2712// - Null -> see comment below.
2713// - Smi -> compile time subtype check (only if dst class is not parameterized).
2714// - Class equality (only if class is not parameterized).
2715// Inputs (from TypeTestABI):
2716// - kInstanceReg: object.
2717// - kInstantiatorTypeArgumentsReg: instantiator type arguments or raw_null.
2718// - kFunctionTypeArgumentsReg: function type arguments or raw_null.
2719// Returns:
2720// - true or false in kInstanceOfResultReg.
2722 intptr_t deopt_id,
2724 const AbstractType& type,
2725 LocationSummary* locs) {
2726 ASSERT(type.IsFinalized());
2727 ASSERT(!type.IsTopTypeForInstanceOf()); // Already checked.
2728
2729 compiler::Label is_instance, is_not_instance;
2730 // 'null' is an instance of Null, Object*, Never*, void, and dynamic.
2731 // In addition, 'null' is an instance of any nullable type.
2732 // It is also an instance of FutureOr<T> if it is an instance of T.
2733 const AbstractType& unwrapped_type =
2734 AbstractType::Handle(type.UnwrapFutureOr());
2735 if (!unwrapped_type.IsTypeParameter() || unwrapped_type.IsNullable()) {
2736 // Only nullable type parameter remains nullable after instantiation.
2737 // See NullIsInstanceOf().
2738 __ CompareObject(TypeTestABI::kInstanceReg, Object::null_object());
2739 __ BranchIf(EQUAL,
2740 unwrapped_type.IsNullable() ? &is_instance : &is_not_instance);
2741 }
2742
2743 // Generate inline instanceof test.
2745 // kInstanceReg, kInstantiatorTypeArgumentsReg, and kFunctionTypeArgumentsReg
2746 // are preserved across the call.
2747 test_cache =
2748 GenerateInlineInstanceof(source, type, &is_instance, &is_not_instance);
2749
2750 // test_cache is null if there is no fall-through.
2752 if (!test_cache.IsNull()) {
2753 // Generate Runtime call.
2754 __ LoadUniqueObject(TypeTestABI::kDstTypeReg, type);
2756 GenerateStubCall(source, StubCode::InstanceOf(),
2757 /*kind=*/UntaggedPcDescriptors::kOther, locs, deopt_id,
2758 env);
2760 }
2761 __ Bind(&is_not_instance);
2764
2765 __ Bind(&is_instance);
2767 __ Bind(&done);
2768}
2769
2770#if !defined(TARGET_ARCH_IA32)
2771// Expected inputs (from TypeTestABI):
2772// - kInstanceReg: instance (preserved).
2773// - kInstantiatorTypeArgumentsReg: instantiator type arguments
2774// (for test_kind >= kTestTypeThreeArg).
2775// - kFunctionTypeArgumentsReg: function type arguments
2776// (for test_kind >= kTestTypeFourArg).
2777//
2778// See the arch-specific GenerateSubtypeNTestCacheStub method to see which
2779// registers may need saving across this call.
2780SubtypeTestCachePtr FlowGraphCompiler::GenerateCallSubtypeTestStub(
2781 TypeTestStubKind test_kind,
2782 compiler::Label* is_instance_lbl,
2783 compiler::Label* is_not_instance_lbl) {
2784 const intptr_t num_inputs = UsedInputsForTTSKind(test_kind);
2785 const SubtypeTestCache& type_test_cache =
2787 const auto& stub_entry =
2789 __ LoadUniqueObject(TypeTestABI::kSubtypeTestCacheReg, type_test_cache);
2790 __ Call(stub_entry);
2791 GenerateBoolToJump(TypeTestABI::kSubtypeTestCacheResultReg, is_instance_lbl,
2792 is_not_instance_lbl);
2793 return type_test_cache.ptr();
2794}
2795
2796// Generates an assignable check for a given object. Emits no code if the
2797// destination type is known at compile time and is a top type. See
2798// GenerateCallerChecksForAssertAssignable for other optimized cases.
2799//
2800// Inputs (preserved for successful checks):
2801// - TypeTestABI::kInstanceReg: object.
2802// - TypeTestABI::kDstTypeReg: destination type (if non-constant).
2803// - TypeTestABI::kInstantiatorTypeArgumentsReg: instantiator type arguments.
2804// - TypeTestABI::kFunctionTypeArgumentsReg: function type arguments.
2805//
2806// Throws:
2807// - TypeError (on unsuccessful assignable checks)
2808//
2809// Performance notes: positive checks must be quick, negative checks can be slow
2810// as they throw an exception.
2812 CompileType* receiver_type,
2814 intptr_t deopt_id,
2816 const String& dst_name,
2817 LocationSummary* locs) {
2818 ASSERT(!source.token_pos.IsClassifying());
2820
2821 // Non-null if we have a constant destination type.
2822 const auto& dst_type =
2824 ? AbstractType::Cast(
2826 : Object::null_abstract_type();
2827
2828 if (!dst_type.IsNull()) {
2829 ASSERT(dst_type.IsFinalized());
2830 if (dst_type.IsTopTypeForSubtyping()) return; // No code needed.
2831 }
2832
2835 // Generate caller-side checks to perform prior to calling the TTS.
2836 if (dst_type.IsNull()) {
2837 __ Comment("AssertAssignable for runtime type");
2838 // kDstTypeReg should already contain the destination type.
2839 } else {
2840 __ Comment("AssertAssignable for compile-time type");
2841 GenerateCallerChecksForAssertAssignable(receiver_type, dst_type, &done);
2842 if (dst_type.IsTypeParameter()) {
2843 // The resolved type parameter is in the scratch register.
2844 type_reg = TypeTestABI::kScratchReg;
2845 }
2846 }
2847
2848 GenerateTTSCall(source, deopt_id, env, type_reg, dst_type, dst_name, locs);
2849 __ Bind(&done);
2850}
2851
2852// Generates a call to the type testing stub for the type in [reg_with_type].
2853// Provide a non-null [dst_type] and [dst_name] if they are known at compile
2854// time.
2856 intptr_t deopt_id,
2858 Register reg_with_type,
2859 const AbstractType& dst_type,
2860 const String& dst_name,
2861 LocationSummary* locs) {
2862 ASSERT(!dst_name.IsNull());
2863 // We use 2 consecutive entries in the pool for the subtype cache and the
2864 // destination name. The second entry, namely [dst_name] seems to be unused,
2865 // but it will be used by the code throwing a TypeError if the type test fails
2866 // (see runtime/vm/runtime_entry.cc:TypeCheck). It will use pattern matching
2867 // on the call site to find out at which pool index the destination name is
2868 // located.
2869 const intptr_t sub_type_cache_index = __ object_pool_builder().AddObject(
2870 Object::null_object(), compiler::ObjectPoolBuilderEntry::kPatchable);
2871 const intptr_t dst_name_index = __ object_pool_builder().AddObject(
2873 ASSERT((sub_type_cache_index + 1) == dst_name_index);
2874 ASSERT(__ constant_pool_allowed());
2875
2876 __ Comment("TTSCall");
2877 // If the dst_type is known at compile time and instantiated, we know the
2878 // target TTS stub and so can use a PC-relative call when available.
2879 if (!dst_type.IsNull() && dst_type.IsInstantiated() &&
2880 CanPcRelativeCall(dst_type)) {
2881 __ LoadWordFromPoolIndex(TypeTestABI::kSubtypeTestCacheReg,
2882 sub_type_cache_index);
2883 __ GenerateUnRelocatedPcRelativeCall();
2884 AddPcRelativeTTSCallTypeTarget(dst_type);
2885 } else {
2886 GenerateIndirectTTSCall(assembler(), reg_with_type, sub_type_cache_index);
2887 }
2888
2889 EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kOther, locs,
2890 env);
2891}
2892
2893// Optimize assignable type check by adding inlined tests for:
2894// - non-null object -> return object (only if in null safe mode and type is
2895// non-nullable Object).
2896// - Smi -> compile time subtype check (only if dst class is not parameterized).
2897// - Class equality (only if class is not parameterized).
2898//
2899// Inputs (preserved):
2900// - TypeTestABI::kInstanceReg: object.
2901// - TypeTestABI::kInstantiatorTypeArgumentsReg: instantiator type arguments.
2902// - TypeTestABI::kFunctionTypeArgumentsReg: function type arguments.
2903//
2904// Assumes:
2905// - Destination type is not a top type.
2906// - Object to check is not null, unless in null safe mode and destination type
2907// is not a nullable type.
2908//
2909// Outputs:
2910// - TypeTestABI::kDstTypeReg: destination type
2911// Additional output if dst_type is a TypeParameter:
2912// - TypeTestABI::kScratchReg: type on which to call TTS stub.
2913//
2914// Performance notes: positive checks must be quick, negative checks can be slow
2915// as they throw an exception.
2917 CompileType* receiver_type,
2918 const AbstractType& dst_type,
2920 // Top types should be handled by the caller and cannot reach here.
2921 ASSERT(!dst_type.IsTopTypeForSubtyping());
2922
2923 // Set this to avoid marking the type testing stub for optimization.
2924 bool elide_info = false;
2925 // Call before any return points to set the destination type register and
2926 // mark the destination type TTS as needing optimization, unless it is
2927 // unlikely to be called.
2928 auto output_dst_type = [&]() -> void {
2929 // If we haven't handled the positive case of the type check on the call
2930 // site and we'll be using the TTS of the destination type, we want an
2931 // optimized type testing stub and thus record it in the [TypeUsageInfo].
2932 if (!elide_info) {
2933 if (auto const type_usage_info = thread()->type_usage_info()) {
2934 type_usage_info->UseTypeInAssertAssignable(dst_type);
2935 } else {
2936 ASSERT(!FLAG_precompiled_mode);
2937 }
2938 }
2939 __ LoadObject(TypeTestABI::kDstTypeReg, dst_type);
2940 };
2941
2942 // We can handle certain types and checks very efficiently on the call site,
2943 // meaning those need not be checked within the stubs (which may involve
2944 // a runtime call).
2945
2946 if (dst_type.IsObjectType()) {
2947 // Special case: non-nullable Object.
2948 ASSERT(dst_type.IsNonNullable());
2949 __ CompareObject(TypeTestABI::kInstanceReg, Object::null_object());
2950 __ BranchIf(NOT_EQUAL, done);
2951 // Fall back to type testing stub in caller to throw the exception.
2952 return output_dst_type();
2953 }
2954
2955 // If the int type is assignable to [dst_type] we special case it on the
2956 // caller side!
2957 const Type& int_type = Type::Handle(zone(), Type::IntType());
2958 bool is_non_smi = false;
2959 if (int_type.IsSubtypeOf(dst_type, Heap::kOld)) {
2960 __ BranchIfSmi(TypeTestABI::kInstanceReg, done);
2961 is_non_smi = true;
2962 } else if (!receiver_type->CanBeSmi()) {
2963 is_non_smi = true;
2964 }
2965
2966 if (dst_type.IsTypeParameter()) {
2967 // Special case: Instantiate the type parameter on the caller side, invoking
2968 // the TTS of the corresponding type parameter in the caller.
2969 const TypeParameter& type_param = TypeParameter::Cast(dst_type);
2970 if (!type_param.IsNonNullable()) {
2971 // If the type parameter is nullable when running in strong mode, we need
2972 // to handle null before calling the TTS because the type parameter may be
2973 // instantiated with a non-nullable type, where the TTS rejects null.
2974 __ CompareObject(TypeTestABI::kInstanceReg, Object::null_object());
2975 __ BranchIf(EQUAL, done);
2976 }
2977 const Register kTypeArgumentsReg =
2978 type_param.IsClassTypeParameter()
2981
2982 // Check if type arguments are null, i.e. equivalent to vector of dynamic.
2983 // If so, then the value is guaranteed assignable as dynamic is a top type.
2984 __ CompareObject(kTypeArgumentsReg, Object::null_object());
2985 __ BranchIf(EQUAL, done);
2986 // Put the instantiated type parameter into the scratch register, so its
2987 // TTS can be called by the caller.
2988 __ LoadCompressedFieldFromOffset(
2989 TypeTestABI::kScratchReg, kTypeArgumentsReg,
2991 return output_dst_type();
2992 }
2993
2994 if (dst_type.IsFunctionType() || dst_type.IsRecordType()) {
2995 return output_dst_type();
2996 }
2997
2998 if (auto const hi = thread()->hierarchy_info()) {
2999 const Class& type_class = Class::Handle(zone(), dst_type.type_class());
3000
3001 if (hi->CanUseSubtypeRangeCheckFor(dst_type)) {
3002 const CidRangeVector& ranges = hi->SubtypeRangesForClass(
3003 type_class,
3004 /*include_abstract=*/false,
3005 /*exclude_null=*/!Instance::NullIsAssignableTo(dst_type));
3006 if (ranges.length() <= kMaxNumberOfCidRangesToTest) {
3007 if (is_non_smi) {
3009 } else {
3010 __ LoadClassIdMayBeSmi(TypeTestABI::kScratchReg,
3012 }
3014 done);
3015 elide_info = true;
3016 } else if (IsListClass(type_class)) {
3017 __ LoadClassIdMayBeSmi(TypeTestABI::kScratchReg,
3020 }
3021 }
3022 }
3023 output_dst_type();
3024}
3025#endif // !defined(TARGET_ARCH_IA32)
3026
3027#undef __
3028
3029#if defined(DEBUG)
3030void FlowGraphCompiler::FrameStateUpdateWith(Instruction* instr) {
3032
3033 switch (instr->tag()) {
3034 case Instruction::kDropTemps:
3035 FrameStatePop(instr->locs()->input_count() +
3036 instr->AsDropTemps()->num_temps());
3037 break;
3038
3039 default:
3040 FrameStatePop(instr->locs()->input_count());
3041 break;
3042 }
3043
3044 ASSERT(!instr->locs()->can_call() || FrameStateIsSafeToCall());
3045
3046 FrameStatePop(instr->ArgumentCount());
3047 Definition* defn = instr->AsDefinition();
3048 if ((defn != nullptr) && defn->HasTemp()) {
3049 FrameStatePush(defn);
3050 }
3051}
3052
3053void FlowGraphCompiler::FrameStatePush(Definition* defn) {
3054 Representation rep = defn->representation();
3056 if ((rep == kUnboxedDouble || rep == kUnboxedFloat32x4 ||
3057 rep == kUnboxedFloat64x2) &&
3058 defn->locs()->out(0).IsFpuRegister()) {
3059 // Output value is boxed in the instruction epilogue.
3060 rep = kTagged;
3061 }
3062 ASSERT((rep == kTagged) || (rep == kUntagged) ||
3064 frame_state_.Add(rep);
3065}
3066
3067void FlowGraphCompiler::FrameStatePop(intptr_t count) {
3069 frame_state_.TruncateTo(
3070 Utils::Maximum(static_cast<intptr_t>(0), frame_state_.length() - count));
3071}
3072
3073bool FlowGraphCompiler::FrameStateIsSafeToCall() {
3075 for (intptr_t i = 0; i < frame_state_.length(); i++) {
3076 if (frame_state_[i] != kTagged) {
3077 return false;
3078 }
3079 }
3080 return true;
3081}
3082
3083void FlowGraphCompiler::FrameStateClear() {
3085 frame_state_.TruncateTo(0);
3086}
3087#endif // defined(DEBUG)
3088
3089#define __ compiler->assembler()->
3090
3093 __ Comment("slow path %s operation", name());
3094 }
3095 const bool use_shared_stub =
3096 instruction()->UseSharedSlowPathStub(compiler->is_optimizing());
3097 ASSERT(use_shared_stub == instruction()->locs()->call_on_shared_slow_path());
3098 const bool live_fpu_registers =
3100 const intptr_t num_args =
3101 use_shared_stub ? 0 : GetNumberOfArgumentsForRuntimeCall();
3102 __ Bind(entry_label());
3104 LocationSummary* locs = instruction()->locs();
3105 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
3106 if (use_shared_stub) {
3107 if (!has_frame) {
3108#if !defined(TARGET_ARCH_IA32)
3109 ASSERT(__ constant_pool_allowed());
3110 __ set_constant_pool_allowed(false);
3111#endif
3112 __ EnterDartFrame(0);
3113 }
3114 EmitSharedStubCall(compiler, live_fpu_registers);
3115#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
3116 if (!has_frame) {
3117 // Undo EnterDartFrame for the code generated after this slow path.
3118 RESTORES_LR_FROM_FRAME({});
3119 }
3120#endif
3121 } else {
3122 ASSERT(has_frame);
3123 // Save registers as they are needed for lazy deopt / exception handling.
3124 compiler->SaveLiveRegisters(locs);
3126 __ CallRuntime(runtime_entry_, num_args);
3127 }
3128 const intptr_t deopt_id = instruction()->deopt_id();
3129 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOther, deopt_id,
3130 instruction()->source());
3132 compiler->RecordSafepoint(locs, num_args);
3133 if (!FLAG_precompiled_mode ||
3134 (compiler->CurrentTryIndex() != kInvalidTryIndex)) {
3135 Environment* env =
3136 compiler->SlowPathEnvironmentFor(instruction(), num_args);
3137 // TODO(47044): Should be able to say `FLAG_precompiled_mode` instead.
3138 if (CompilerState::Current().is_aot()) {
3139 compiler->RecordCatchEntryMoves(env);
3140 } else if (compiler->is_optimizing()) {
3141 ASSERT(env != nullptr);
3142 compiler->AddSlowPathDeoptInfo(deopt_id, env);
3143 } else {
3144 ASSERT(env == nullptr);
3145 const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
3146 // Add deoptimization continuation point.
3147 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt,
3148 deopt_id_after, instruction()->source());
3149 }
3150 }
3151 if (!use_shared_stub) {
3152 __ Breakpoint();
3153 }
3154}
3155
3157 switch (exception_type()) {
3159 return "check null (nsm)";
3161 return "check null (arg)";
3163 return "check null (cast)";
3164 }
3165 UNREACHABLE();
3166}
3167
3168const RuntimeEntry& NullErrorSlowPath::GetRuntimeEntry(
3169 CheckNullInstr::ExceptionType exception_type) {
3170 switch (exception_type) {
3172 return kNullErrorRuntimeEntry;
3174 return kArgumentNullErrorRuntimeEntry;
3176 return kNullCastErrorRuntimeEntry;
3177 }
3178 UNREACHABLE();
3179}
3180
3182 CheckNullInstr::ExceptionType exception_type,
3183 bool save_fpu_registers) {
3184 auto object_store = compiler->isolate_group()->object_store();
3185 switch (exception_type) {
3187 return save_fpu_registers
3188 ? object_store->null_error_stub_with_fpu_regs_stub()
3189 : object_store->null_error_stub_without_fpu_regs_stub();
3191 return save_fpu_registers
3192 ? object_store->null_arg_error_stub_with_fpu_regs_stub()
3193 : object_store->null_arg_error_stub_without_fpu_regs_stub();
3195 return save_fpu_registers
3196 ? object_store->null_cast_error_stub_with_fpu_regs_stub()
3197 : object_store->null_cast_error_stub_without_fpu_regs_stub();
3198 }
3199 UNREACHABLE();
3200}
3201
3203 bool save_fpu_registers) {
3204#if defined(TARGET_ARCH_IA32)
3205 UNREACHABLE();
3206#else
3207 const auto& stub =
3208 Code::ZoneHandle(compiler->zone(),
3209 GetStub(compiler, exception_type(), save_fpu_registers));
3210 compiler->EmitCallToStub(stub);
3211#endif
3212}
3213
3216 LocationSummary* locs = instruction()->locs();
3218 // Can't pass unboxed int64 value directly to runtime call, as all
3219 // arguments are expected to be tagged (boxed).
3220 // The unboxed int64 argument is passed through a dedicated slot in Thread.
3221 // TODO(dartbug.com/33549): Clean this up when unboxed values
3222 // could be passed as arguments.
3223 __ StoreToOffset(locs->in(CheckBoundBaseInstr::kLengthPos).reg(), THR,
3225 __ StoreToOffset(
3228 } else {
3229 __ PushRegisterPair(locs->in(CheckBoundBaseInstr::kIndexPos).reg(),
3231 }
3232}
3233
3235 bool save_fpu_registers) {
3236#if defined(TARGET_ARCH_IA32)
3237 UNREACHABLE();
3238#else
3239 auto object_store = compiler->isolate_group()->object_store();
3240 const auto& stub = Code::ZoneHandle(
3241 compiler->zone(),
3242 save_fpu_registers
3243 ? object_store->range_error_stub_with_fpu_regs_stub()
3244 : object_store->range_error_stub_without_fpu_regs_stub());
3245 compiler->EmitCallToStub(stub);
3246#endif
3247}
3248
3251 LocationSummary* locs = instruction()->locs();
3252 __ PushRegister(locs->in(CheckWritableInstr::kReceiver).reg());
3253 __ PushImmediate(
3254 compiler::target::ToRawSmi(instruction()->AsCheckWritable()->kind()));
3255}
3256
3258 bool save_fpu_registers) {
3259#if defined(TARGET_ARCH_IA32)
3260 UNREACHABLE();
3261#else
3262 auto object_store = compiler->isolate_group()->object_store();
3263 const auto& stub = Code::ZoneHandle(
3264 compiler->zone(),
3265 save_fpu_registers
3266 ? object_store->write_error_stub_with_fpu_regs_stub()
3267 : object_store->write_error_stub_without_fpu_regs_stub());
3268 compiler->EmitCallToStub(stub);
3269#endif
3270}
3271
3274 __ PushObject(Field::ZoneHandle(OriginalField()));
3275}
3276
3279 bool save_fpu_registers) {
3280#if defined(TARGET_ARCH_IA32)
3281 UNREACHABLE();
3282#else
3283 ASSERT(instruction()->locs()->temp(0).reg() ==
3286 Field::ZoneHandle(OriginalField()));
3287 auto object_store = compiler->isolate_group()->object_store();
3288 const auto& stub = Code::ZoneHandle(
3289 compiler->zone(),
3290 save_fpu_registers
3291 ? object_store->late_initialization_error_stub_with_fpu_regs_stub()
3292 : object_store
3293 ->late_initialization_error_stub_without_fpu_regs_stub());
3294 compiler->EmitCallToStub(stub);
3295#endif
3296}
3297
3299 const compiler::ffi::NativeLocation& destination,
3302 if (destination.IsBoth()) {
3303 // Copy to both.
3304 const auto& both = destination.AsBoth();
3305 EmitNativeMove(both.location(0), source, temp);
3306 EmitNativeMove(both.location(1), source, temp);
3307 return;
3308 }
3309 if (source.IsBoth()) {
3310 // Copy from one of both.
3311 const auto& both = source.AsBoth();
3312 EmitNativeMove(destination, both.location(0), temp);
3313 return;
3314 }
3315
3316 const auto& src_payload_type = source.payload_type();
3317 const auto& dst_payload_type = destination.payload_type();
3318 const auto& src_container_type = source.container_type();
3319 const auto& dst_container_type = destination.container_type();
3320 const intptr_t src_payload_size = src_payload_type.SizeInBytes();
3321 const intptr_t dst_payload_size = dst_payload_type.SizeInBytes();
3322 const intptr_t src_container_size = src_container_type.SizeInBytes();
3323 const intptr_t dst_container_size = dst_container_type.SizeInBytes();
3324
3325 // This function does not know how to do larger mem copy moves yet.
3326 ASSERT(src_payload_type.IsPrimitive());
3327 ASSERT(dst_payload_type.IsPrimitive());
3328
3329 // This function does not deal with sign conversions yet.
3330 ASSERT(src_payload_type.IsSigned() == dst_payload_type.IsSigned());
3331
3332 // If the location, payload, and container are equal, we're done.
3333 if (source.Equals(destination) && src_payload_type.Equals(dst_payload_type) &&
3334 src_container_type.Equals(dst_container_type)) {
3335#if defined(TARGET_ARCH_RISCV64)
3336 // Except we might still need to adjust for the difference between C's
3337 // representation of uint32 (sign-extended to 64 bits) and Dart's
3338 // (zero-extended).
3339 EmitNativeMoveArchitecture(destination, source);
3340#endif
3341 return;
3342 }
3343
3344 // Solve discrepancies between container size and payload size.
3345 if (src_payload_type.IsInt() && dst_payload_type.IsInt() &&
3346 (src_payload_size != src_container_size ||
3347 dst_payload_size != dst_container_size)) {
3348 if (source.IsStack() && src_container_size > src_payload_size) {
3349 // Shrink loads since all loads are extending.
3350 return EmitNativeMove(
3351 destination,
3352 source.WithOtherNativeType(zone_, src_payload_type, src_payload_type),
3353 temp);
3354 }
3355 if (src_payload_size <= dst_payload_size &&
3356 src_container_size >= dst_container_size) {
3357 // The upper bits of the source are already properly sign or zero
3358 // extended, so just copy the required amount of bits.
3359 return EmitNativeMove(destination.WithOtherNativeType(
3360 zone_, dst_container_type, dst_container_type),
3361 source.WithOtherNativeType(
3362 zone_, dst_container_type, dst_container_type),
3363 temp);
3364 }
3365 if (src_payload_size >= dst_payload_size &&
3366 dst_container_size > dst_payload_size) {
3367 // The upper bits of the source are not properly sign or zero extended
3368 // to be copied to the target, so regard the source as smaller.
3369 return EmitNativeMove(
3370 destination.WithOtherNativeType(zone_, dst_container_type,
3371 dst_container_type),
3372 source.WithOtherNativeType(zone_, dst_payload_type, dst_payload_type),
3373 temp);
3374 }
3375 UNREACHABLE();
3376 }
3377 ASSERT(src_payload_size == src_container_size);
3378 ASSERT(dst_payload_size == dst_container_size);
3379
3380 // Split moves that are larger than kWordSize, these require separate
3381 // instructions on all architectures.
3382 if (compiler::target::kWordSize == 4 && src_container_size == 8 &&
3383 dst_container_size == 8 && !source.IsFpuRegisters() &&
3384 !destination.IsFpuRegisters()) {
3385 // TODO(40209): If this is stack to stack, we could use FpuTMP.
3386 // Test the impact on code size and speed.
3387 EmitNativeMove(destination.Split(zone_, 2, 0), source.Split(zone_, 2, 0),
3388 temp);
3389 EmitNativeMove(destination.Split(zone_, 2, 1), source.Split(zone_, 2, 1),
3390 temp);
3391 return;
3392 }
3393
3394 // Split moves from stack to stack, none of the architectures provides
3395 // memory to memory move instructions.
3396 if (source.IsStack() && destination.IsStack()) {
3397 Register scratch = temp->AllocateTemporary();
3398 ASSERT(scratch != kNoRegister);
3399#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
3400 ASSERT(scratch != TMP); // TMP is an argument register.
3401 ASSERT(scratch != TMP2); // TMP2 is an argument register.
3402#endif
3403 const auto& intermediate =
3405 zone_, dst_payload_type, dst_container_type, scratch);
3406 EmitNativeMove(intermediate, source, temp);
3407 EmitNativeMove(destination, intermediate, temp);
3408 temp->ReleaseTemporary();
3409 return;
3410 }
3411
3412 const bool sign_or_zero_extend = dst_container_size > src_container_size;
3413
3414 // No architecture supports sign extending with memory as destination.
3415 if (sign_or_zero_extend && destination.IsStack()) {
3416 ASSERT(source.IsRegisters());
3417 const auto& intermediate =
3418 source.WithOtherNativeType(zone_, dst_payload_type, dst_container_type);
3419 EmitNativeMove(intermediate, source, temp);
3420 EmitNativeMove(destination, intermediate, temp);
3421 return;
3422 }
3423
3424 // Do the simple architecture specific moves.
3425 EmitNativeMoveArchitecture(destination, source);
3426}
3427
3430 Location src_loc,
3431 Representation src_type,
3433 if (src_loc.IsPairLocation()) {
3434 for (intptr_t i : {0, 1}) {
3436 zone_, src_loc, src_type, i);
3437 EmitNativeMove(dst.Split(zone_, 2, i), src_split, temp);
3438 }
3439 } else {
3440 const auto& src =
3441 compiler::ffi::NativeLocation::FromLocation(zone_, src_loc, src_type);
3442 // Deal with sign mismatch caused by lack of kUnboxedUint64 representation.
3443 if (src_type == kUnboxedInt64 &&
3444 dst.container_type().AsPrimitive().representation() ==
3447 src.WithOtherNativeType(zone_, dst.container_type(),
3448 dst.container_type()),
3449 temp);
3450 } else {
3451 EmitNativeMove(dst, src, temp);
3452 }
3453 }
3454}
3455
3457 Location dst_loc,
3458 Representation dst_type,
3461 if (dst_loc.IsPairLocation()) {
3462 for (intptr_t i : {0, 1}) {
3463 const auto& dest_split = compiler::ffi::NativeLocation::FromPairLocation(
3464 zone_, dst_loc, dst_type, i);
3465 EmitNativeMove(dest_split, src.Split(zone_, 2, i), temp);
3466 }
3467 } else {
3468 const auto& dst =
3469 compiler::ffi::NativeLocation::FromLocation(zone_, dst_loc, dst_type);
3470 // Deal with sign mismatch caused by lack of kUnboxedUint64 representation.
3471 if (dst_type == kUnboxedInt64 &&
3472 src.container_type().AsPrimitive().representation() ==
3474 EmitNativeMove(dst.WithOtherNativeType(zone_, src.container_type(),
3475 src.container_type()),
3476 src, temp);
3477 } else {
3478 EmitNativeMove(dst, src, temp);
3479 }
3480 }
3481}
3482
3484 Location src,
3485 Representation src_type,
3487 ASSERT(src.IsConstant() || src.IsPairLocation());
3488 const auto& dst_type = dst.payload_type();
3489 Register scratch = kNoRegister;
3490 if (dst.IsExpressibleAsLocation() &&
3491 dst_type.IsExpressibleAsRepresentation() &&
3492 dst_type.AsRepresentationOverApprox(zone_) == src_type) {
3493 // We can directly emit the const in the right place and representation.
3494 const Location dst_loc = dst.AsLocation();
3495 assembler()->Comment("dst.IsExpressibleAsLocation() %s",
3496 dst_loc.ToCString());
3497 EmitMove(dst_loc, src, temp);
3498 } else {
3499 // We need an intermediate location.
3500 Location intermediate;
3501 if (dst_type.IsInt()) {
3502 if (TMP == kNoRegister) {
3503 scratch = temp->AllocateTemporary();
3505 } else {
3506 intermediate = Location::RegisterLocation(TMP);
3507 }
3508 } else {
3509 ASSERT(dst_type.IsFloat());
3510 intermediate = Location::FpuRegisterLocation(FpuTMP);
3511 }
3512 assembler()->Comment("constant using intermediate: %s",
3513 intermediate.ToCString());
3514
3515 if (src.IsPairLocation()) {
3516 for (intptr_t i : {0, 1}) {
3517 const Representation src_type_split =
3519 .Split(zone_, i)
3521 const auto& intermediate_native =
3523 src_type_split);
3524 EmitMove(intermediate, src.AsPairLocation()->At(i), temp);
3525 EmitNativeMove(dst.Split(zone_, 2, i), intermediate_native, temp);
3526 }
3527 } else {
3528 const auto& intermediate_native =
3530 src_type);
3531 EmitMove(intermediate, src, temp);
3532 EmitNativeMove(dst, intermediate_native, temp);
3533 }
3534
3535 if (scratch != kNoRegister) {
3536 temp->ReleaseTemporary();
3537 }
3538 }
3539 return;
3540}
3541
3542bool FlowGraphCompiler::CanPcRelativeCall(const Function& target) const {
3543 return FLAG_precompiled_mode && !FLAG_force_indirect_calls &&
3546}
3547
3548bool FlowGraphCompiler::CanPcRelativeCall(const Code& target) const {
3549 return FLAG_precompiled_mode && !FLAG_force_indirect_calls &&
3550 !target.InVMIsolateHeap() &&
3553}
3554
3555bool FlowGraphCompiler::CanPcRelativeCall(const AbstractType& target) const {
3556 return FLAG_precompiled_mode && !FLAG_force_indirect_calls &&
3557 !target.InVMIsolateHeap() &&
3559 LoadingUnit::LoadingUnit::kRootId);
3560}
3561
3562#undef __
3563
3564} // namespace dart
const char * options
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition: DM.cpp:213
int count
Definition: FontMgrTest.cpp:50
static void test_cache(skiatest::Reporter *reporter, SkResourceCache &cache, bool testPurge)
static bool ok(int result)
#define UNREACHABLE()
Definition: assert.h:248
#define DEBUG_ASSERT(cond)
Definition: assert.h:321
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
GLenum type
bool IsSubtypeOf(const AbstractType &other, Heap::Space space, FunctionTypeMapping *function_type_equivalence=nullptr) const
Definition: object.cc:21550
bool IsNonNullable() const
Definition: object.h:9071
bool IsTopTypeForSubtyping() const
Definition: object.cc:21396
bool IsObjectType() const
Definition: object.h:9201
virtual bool IsInstantiated(Genericity genericity=kAny, intptr_t num_free_fun_type_params=kAllFree) const
Definition: object.cc:21151
virtual ClassPtr type_class() const
Definition: object.cc:21042
bool IsNullable() const
Definition: object.h:9066
static ArrayPtr New(intptr_t len, Heap::Space space=Heap::kNew)
Definition: object.h:10959
void SetAt(intptr_t index, const Object &value) const
Definition: object.h:10880
void Add(const T &value)
intptr_t length() const
static constexpr intptr_t encode(CallKind value)
Definition: bitfield.h:165
bool Contains(intptr_t i) const
Definition: bit_vector.h:91
intptr_t try_index() const
Definition: il.h:1730
intptr_t postorder_number() const
Definition: il.h:1658
bool HasNonRedundantParallelMove() const
Definition: il.h:1693
intptr_t block_id() const
Definition: il.h:1661
LoopInfo * loop_info() const
Definition: il.h:1737
bool IsLoopHeader() const
Definition: il.cc:1834
intptr_t stack_depth() const
Definition: il.h:1756
Instruction * last_instruction() const
Definition: il.h:1686
GrowableArray< Definition * > * initial_definitions()
Definition: il.h:1917
static const Bool & Get(bool value)
Definition: object.h:10801
TargetEntryInstr * false_successor() const
Definition: il.h:4048
TargetEntryInstr * true_successor() const
Definition: il.h:4047
StringPtr target_name() const
Definition: object.h:2372
intptr_t TypeArgsLen() const
Definition: object.cc:16480
ArrayPtr arguments_descriptor() const
Definition: object.h:2373
TargetInfo * TargetAt(int i) const
Definition: il.h:796
intptr_t catch_try_index() const
Definition: il.h:2381
const Array & catch_handler_types() const
Definition: il.h:2383
bool needs_stacktrace() const
Definition: il.h:2375
bool is_generated() const
Definition: il.h:2377
static CatchEntryMove FromSlot(SourceKind kind, intptr_t src_slot, intptr_t dest_slot)
Definition: exceptions.h:188
static intptr_t EncodePairSource(intptr_t src_lo_slot, intptr_t src_hi_slot)
Definition: exceptions.h:196
void NewMapping(intptr_t pc_offset)
void Append(const CatchEntryMove &move)
intptr_t length() const
Definition: il.h:758
void Add(CidRange *target)
Definition: il.h:752
ClassPtr At(intptr_t cid) const
Definition: class_table.h:362
intptr_t id() const
Definition: object.h:1233
static bool IsSubtypeOf(const Class &cls, const TypeArguments &type_arguments, Nullability nullability, const AbstractType &other, Heap::Space space, FunctionTypeMapping *function_type_equivalence=nullptr)
Definition: object.cc:5920
bool is_abstract() const
Definition: object.h:1696
bool is_finalized() const
Definition: object.h:1723
ArrayPtr current_functions() const
Definition: object.h:1641
TokenPosition RootPosition(const InstructionSource &source)
void EndCodeSourceRange(int32_t pc_offset, const InstructionSource &source)
void WriteFunctionEntrySourcePosition(const InstructionSource &source)
void NoteNullCheck(int32_t pc_offset, const InstructionSource &source, intptr_t name_index)
const GrowableArray< const Function * > & inline_id_to_function() const
void BeginCodeSourceRange(int32_t pc_offset, const InstructionSource &source)
void NoteDescriptor(UntaggedPcDescriptors::Kind kind, int32_t pc_offset, const InstructionSource &source)
CodeEntryKind EntryKind
Definition: object.h:6788
@ kPcRelativeCall
Definition: object.h:6969
@ kPcRelativeTTSCall
Definition: object.h:6970
@ kCallViaCode
Definition: object.h:6972
@ kPcRelativeTailCall
Definition: object.h:6971
@ kSCallTableEntryLength
Definition: object.h:6984
@ kSCallTableFunctionTarget
Definition: object.h:6983
@ kSCallTableCodeOrTypeTarget
Definition: object.h:6982
@ kSCallTableKindAndOffset
Definition: object.h:6981
@ kDefaultEntry
Definition: object.h:6976
@ kUncheckedEntry
Definition: object.h:6977
static EntryCounter SlowPathCounterFor(Instruction::Tag tag)
intptr_t deopt_id() const
static CompilerState & Current()
CompressedStackMapsPtr Finalize() const
void AddEntry(intptr_t pc_offset, BitmapBuilder *bitmap, intptr_t spill_slot_bit_count)
static constexpr intptr_t kNone
Definition: deopt_id.h:27
static intptr_t ToDeoptAfter(intptr_t deopt_id)
Definition: deopt_id.h:31
static SmiPtr EncodeReasonAndFlags(ICData::DeoptReasonId reason, uint32_t flags)
static intptr_t SizeFor(intptr_t length)
static void SetEntry(const Array &table, intptr_t index, const Smi &offset, const TypedData &info, const Smi &reason_and_flags)
void AddDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t pc_offset, intptr_t deopt_id, TokenPosition token_pos, intptr_t try_index, intptr_t yield_index)
PcDescriptorsPtr FinalizePcDescriptors(uword entry_point)
intptr_t CountArgsPushed()
Definition: il.h:11732
friend class ShallowIterator
Definition: il.h:11769
bool IsHoisted() const
Definition: il.h:11686
void AddHandler(intptr_t try_index, intptr_t outer_try_index, intptr_t pc_offset, bool is_generated, const Array &handler_types, bool needs_stacktrace)
ExceptionHandlersPtr FinalizeExceptionHandlers(uword entry_point) const
void SetNeedsStackTrace(intptr_t try_index)
IsolateGroup * isolate_group() const
void EmitDropArguments(intptr_t count)
static bool GenerateCidRangesCheck(compiler::Assembler *assembler, Register class_id_reg, const CidRangeVector &cid_ranges, compiler::Label *inside_range_lbl, compiler::Label *outside_range_lbl=nullptr, bool fall_through_if_inside=false)
void AddStubCallTarget(const Code &code)
Instruction * current_instruction() const
void StatsBegin(Instruction *instr)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
bool CheckAssertAssignableTypeTestingABILocations(const LocationSummary &locs)
void FinalizeVarDescriptors(const Code &code)
void set_current_block(BlockEntryInstr *value)
BranchLabels CreateBranchLabels(BranchInstr *branch) const
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
intptr_t CurrentTryIndex() const
const Class & float64x2_class() const
const Class & BoxClassFor(Representation rep)
void InsertBSSRelocation(BSS::Relocation reloc)
void AddExceptionHandler(CatchBlockEntryInstr *entry)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const Class & double_class() const
void FinalizeCatchEntryMovesMap(const Code &code)
bool ForceSlowPathForStackOverflow() const
void GenerateNumberTypeCheck(Register kClassIdReg, const AbstractType &type, compiler::Label *is_instance_lbl, compiler::Label *is_not_instance_lbl)
const FlowGraph & flow_graph() const
compiler::Label * GetJumpLabel(BlockEntryInstr *block_entry) const
void RecordCatchEntryMoves(Environment *env)
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
static bool LookupMethodFor(int class_id, const String &name, const ArgumentsDescriptor &args_desc, Function *fn_return, bool *class_is_abstract_return=nullptr)
bool WasCompacted(BlockEntryInstr *block_entry) const
void AddDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t pc_offset, intptr_t deopt_id, const InstructionSource &source, intptr_t try_index, intptr_t yield_index=UntaggedPcDescriptors::kInvalidYieldIndex)
void EmitPolymorphicInstanceCall(const PolymorphicInstanceCallInstr *call, const CallTargets &targets, ArgumentsInfo args_info, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, bool complete, intptr_t total_call_count, bool receiver_can_be_smi=true)
BlockEntryInstr * current_block() const
static constexpr intptr_t kMaxNumberOfCidRangesToTest
compiler::Label * AddDeoptStub(intptr_t deopt_id, ICData::DeoptReasonId reason, uint32_t flags=0)
void EmitMoveToNative(const compiler::ffi::NativeLocation &dst, Location src_loc, Representation src_type, TemporaryRegisterAllocator *temp)
const Class & float32x4_class() const
bool CanFallThroughTo(BlockEntryInstr *block_entry) const
void EmitComment(Instruction *instr)
void EmitTestAndCall(const CallTargets &targets, const String &function_name, ArgumentsInfo args_info, compiler::Label *failed, compiler::Label *match_found, intptr_t deopt_id, const InstructionSource &source_index, LocationSummary *locs, bool complete, intptr_t total_ic_calls, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
static const CallTargets * ResolveCallTargetsForReceiverCid(intptr_t cid, const String &selector, const Array &args_desc_array)
static int EmitTestAndCallCheckCid(compiler::Assembler *assembler, compiler::Label *label, Register class_id_reg, const CidRangeValue &range, int bias, bool jump_on_miss=true)
void SetNeedsStackTrace(intptr_t try_index)
CompilerDeoptInfo * AddSlowPathDeoptInfo(intptr_t deopt_id, Environment *env)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
const Class & mint_class() const
const ICData * GetOrAddInstanceCallICData(intptr_t deopt_id, const String &target_name, const Array &arguments_descriptor, intptr_t num_args_tested, const AbstractType &receiver_type, const Function &binary_smi_target)
void EmitMoveFromNative(Location dst_loc, Representation dst_type, const compiler::ffi::NativeLocation &src, TemporaryRegisterAllocator *temp)
bool IsEmptyBlock(BlockEntryInstr *block) const
void AddSlowPathCode(SlowPathCode *slow_path)
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void FinalizeStaticCallTargetsTable(const Code &code)
bool GenerateSubtypeRangeCheck(Register class_id_reg, const Class &type_class, compiler::Label *is_subtype_lbl)
void AddDispatchTableCallTarget(const compiler::TableSelector *selector)
void GenerateInstanceCall(intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, const ICData &ic_data, Code::EntryKind entry_kind, bool receiver_can_be_smi)
void EmitFunctionEntrySourcePositionDescriptorIfNeeded()
void FinalizeExceptionHandlers(const Code &code)
FlowGraphCompiler(compiler::Assembler *assembler, FlowGraph *flow_graph, const ParsedFunction &parsed_function, bool is_optimizing, SpeculativeInliningPolicy *speculative_policy, const GrowableArray< const Function * > &inline_id_to_function, const GrowableArray< TokenPosition > &inline_id_to_token_pos, const GrowableArray< intptr_t > &caller_inline_id, ZoneGrowableArray< const ICData * > *deopt_id_to_ic_data, CodeStatistics *stats=nullptr)
void GenerateListTypeCheck(Register kClassIdReg, compiler::Label *is_instance_lbl)
void GenerateTTSCall(const InstructionSource &source, intptr_t deopt_id, Environment *env, Register reg_with_type, const AbstractType &dst_type, const String &dst_name, LocationSummary *locs)
void FinalizeStackMaps(const Code &code)
const Class & int32x4_class() const
const GrowableArray< BlockEntryInstr * > & block_order() const
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void set_intrinsic_slow_path_label(compiler::Label *label)
void Bailout(const char *reason)
void GenerateNonLazyDeoptableStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void GenerateAssertAssignable(CompileType *receiver_type, const InstructionSource &source, intptr_t deopt_id, Environment *env, const String &dst_name, LocationSummary *locs)
void FinalizeCodeSourceMap(const Code &code)
ArrayPtr CreateDeoptInfo(compiler::Assembler *assembler)
void GenerateStringTypeCheck(Register kClassIdReg, compiler::Label *is_instance_lbl, compiler::Label *is_not_instance_lbl)
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
void SpecialStatsEnd(intptr_t tag)
const GrowableArray< BlockInfo * > & block_info() const
bool NeedsEdgeCounter(BlockEntryInstr *block)
void FinalizePcDescriptors(const Code &code)
void EmitYieldPositionMetadata(const InstructionSource &source, intptr_t yield_index)
void StatsEnd(Instruction *instr)
void GenerateStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, intptr_t deopt_id, Environment *env)
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
void GenerateInstanceOf(const InstructionSource &source, intptr_t deopt_id, Environment *env, const AbstractType &type, LocationSummary *locs)
compiler::Assembler * assembler() const
void EmitMoveConst(const compiler::ffi::NativeLocation &dst, Location src, Representation src_type, TemporaryRegisterAllocator *temp)
const ICData * GetOrAddStaticCallICData(intptr_t deopt_id, const Function &target, const Array &arguments_descriptor, intptr_t num_args_tested, ICData::RebindRule rebind_rule)
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
compiler::Label * NextNonEmptyLabel() const
void GenerateCallerChecksForAssertAssignable(CompileType *receiver_type, const AbstractType &dst_type, compiler::Label *done)
void EndCodeSourceRange(const InstructionSource &source)
void SpecialStatsBegin(intptr_t tag)
void EmitNativeMove(const compiler::ffi::NativeLocation &dst, const compiler::ffi::NativeLocation &src, TemporaryRegisterAllocator *temp)
compiler::Label * intrinsic_slow_path_label() const
void AddNullCheck(const InstructionSource &source, const String &name)
void GenerateStaticCall(intptr_t deopt_id, const InstructionSource &source, const Function &function, ArgumentsInfo args_info, LocationSummary *locs, const ICData &ic_data_in, ICData::RebindRule rebind_rule, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
Environment * SlowPathEnvironmentFor(Instruction *inst, intptr_t num_slow_path_args)
void BeginCodeSourceRange(const InstructionSource &source)
GraphEntryInstr * graph_entry() const
Definition: flow_graph.h:268
bool IsCompiledForOsr() const
Definition: flow_graph.h:460
const GrowableArray< BlockEntryInstr * > & preorder() const
Definition: flow_graph.h:203
intptr_t num_stack_locals() const
Definition: flow_graph.h:161
BitVector * captured_parameters() const
Definition: flow_graph.h:462
const ParsedFunction & parsed_function() const
Definition: flow_graph.h:129
intptr_t variable_count() const
Definition: flow_graph.h:143
bool IsIrregexpFunction() const
Definition: object.h:3898
bool MakesCopyOfParameters() const
Definition: object.h:3514
const char * ToFullyQualifiedCString() const
Definition: object.cc:9762
ScriptPtr script() const
Definition: object.cc:10881
bool HasBreakpoint() const
Definition: object.cc:7890
intptr_t num_fixed_parameters() const
Definition: object.cc:8856
static bool UseUnboxedRepresentation()
Definition: il.h:10864
intptr_t spill_slot_count() const
Definition: il.h:1983
CatchBlockEntryInstr * GetCatchEntry(intptr_t index)
Definition: il.cc:1248
OsrEntryInstr * osr_entry() const
Definition: il.h:2007
bool NeedsFrame() const
Definition: il.h:1990
@ kOld
Definition: heap.h:39
const CidRangeVector & SubtypeRangesForClass(const Class &klass, bool include_abstract, bool exclude_null)
Definition: il.cc:110
bool is_static_call() const
Definition: object.cc:16555
intptr_t deopt_id() const
Definition: object.h:2468
ICDataPtr AsUnaryClassChecks() const
Definition: object.h:2691
intptr_t NumberOfUsedChecks() const
Definition: object.cc:16640
ICDataPtr Original() const
Definition: object.cc:16437
static ICDataPtr NewWithCheck(const Function &owner, const String &target_name, const Array &arguments_descriptor, intptr_t deopt_id, intptr_t num_args_tested, RebindRule rebind_rule, GrowableArray< intptr_t > *cids, const Function &target, const AbstractType &receiver_type=Object::null_abstract_type())
Definition: object.cc:17360
intptr_t NumArgsTested() const
Definition: object.cc:16471
static ICDataPtr NewForStaticCall(const Function &owner, const Function &target, const Array &arguments_descriptor, intptr_t deopt_id, intptr_t num_args_tested, RebindRule rebind_rule)
Definition: object.cc:17401
bool is_tracking_exactness() const
Definition: object.h:2483
AbstractTypePtr receivers_static_type() const
Definition: object.h:2480
static bool NullIsAssignableTo(const AbstractType &other)
Definition: object.cc:20674
Instruction * next() const
Definition: il.h:1093
virtual intptr_t InputCount() const =0
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Definition: il.h:1377
virtual BlockEntryInstr * GetBlock()
Definition: il.cc:1352
virtual bool CanBecomeDeoptimizationTarget() const
Definition: il.h:1343
Environment * env() const
Definition: il.h:1215
@ kNotSpeculative
Definition: il.h:975
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition: il.h:1213
bool HasUnmatchedInputRepresentations() const
Definition: il.cc:1609
virtual intptr_t ArgumentCount() const
Definition: il.h:1041
virtual Representation representation() const
Definition: il.h:1260
LocationSummary * locs()
Definition: il.h:1192
virtual Tag tag() const =0
InstructionSource source() const
Definition: il.h:1008
intptr_t deopt_id() const
Definition: il.h:993
static bool IsSystemIsolateGroup(const IsolateGroup *group)
Definition: isolate.cc:3605
intptr_t optimization_counter_threshold() const
Definition: isolate.h:306
static IsolateGroup * Current()
Definition: isolate.h:539
ClassTable * class_table() const
Definition: isolate.h:496
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
static bool FindPragma(Thread *T, bool only_core, const Object &object, const String &pragma_name, bool multiple=false, Object *options=nullptr)
Definition: object.cc:4151
static intptr_t LoadingUnitOf(const Function &function)
Definition: object.cc:19730
static LocalVarDescriptorsPtr New(intptr_t num_variables)
Definition: object.cc:16134
void SetVar(intptr_t var_index, const String &name, UntaggedLocalVarDescriptors::VarInfo *info) const
Definition: object.cc:16036
Location out(intptr_t index) const
Definition: locations.h:903
const BitmapBuilder & stack_bitmap()
Definition: locations.h:915
intptr_t input_count() const
Definition: locations.h:864
RegisterSet * live_registers()
Definition: locations.h:941
bool always_calls() const
Definition: locations.h:918
bool call_on_shared_slow_path() const
Definition: locations.h:928
Location in(intptr_t index) const
Definition: locations.h:866
bool IsRegister() const
Definition: locations.h:402
@ kRequiresFpuRegister
Definition: locations.h:332
Register reg() const
Definition: locations.h:404
static Location FpuRegisterLocation(FpuRegister reg)
Definition: locations.h:410
const char * ToCString() const
Definition: locations.cc:445
bool IsConstant() const
Definition: locations.h:292
static Location RegisterLocation(Register reg)
Definition: locations.h:398
bool IsPairLocation() const
Definition: locations.h:316
const Object & constant() const
Definition: locations.cc:373
DART_NORETURN void Jump(int value, const Error &error)
Definition: longjump.cc:22
LoopInfo * outer() const
Definition: loops.h:257
static CodePtr GetStub(FlowGraphCompiler *compiler, CheckNullInstr::ExceptionType exception_type, bool save_fpu_registers)
void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers) override
const char * name() override
CheckNullInstr::ExceptionType exception_type() const
static ObjectPtr null()
Definition: object.h:433
ObjectPtr ptr() const
Definition: object.h:332
void Print() const
Definition: object.cc:2620
bool IsNull() const
Definition: object.h:363
static Object & Handle()
Definition: object.h:407
static Object & ZoneHandle()
Definition: object.h:419
void Bailout(const char *origin, const char *reason) const
Definition: parser.cc:118
const Function & function() const
Definition: parser.h:73
int num_stack_locals() const
Definition: parser.h:194
void Verify(const Function &function) const
Definition: object.cc:15880
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
intptr_t FpuRegisterCount() const
Definition: locations.h:809
bool ContainsFpuRegister(FpuRegister fpu_reg) const
Definition: locations.h:804
bool HasUntaggedValues() const
Definition: locations.h:792
bool ContainsRegister(Register reg) const
Definition: locations.h:800
bool IsTagged(Register reg) const
Definition: locations.h:796
static FunctionPtr ResolveDynamicForReceiverClass(const Class &receiver_class, const String &function_name, const ArgumentsDescriptor &args_desc, bool allow_add)
Definition: resolver.cc:148
void GenerateCode(FlowGraphCompiler *compiler)
Instruction * instruction() const
compiler::Label * entry_label()
static SmiPtr New(intptr_t value)
Definition: object.h:10006
friend class Class
Definition: object.h:10047
bool AllowsSpeculativeInlining() const
Definition: inliner.h:39
static StaticTypeExactnessState NotTracking()
static const Code & SubtypeTestCacheStubForUsedInputs(intptr_t i)
Definition: stub_code.h:66
static SubtypeTestCachePtr New(intptr_t num_inputs)
Definition: object.cc:18924
static constexpr intptr_t kMaxInputs
Definition: object.h:7705
virtual Register AllocateTemporary()=0
Zone * zone() const
Definition: thread_state.h:37
LongJumpScope * long_jump_base() const
Definition: thread_state.h:47
HierarchyInfo * hierarchy_info() const
Definition: thread.h:593
static Thread * Current()
Definition: thread.h:362
TypeUsageInfo * type_usage_info() const
Definition: thread.h:605
CompilerState & compiler_state()
Definition: thread.h:588
IsolateGroup * isolate_group() const
Definition: thread.h:541
virtual void EmitCodeAtSlowPathEntry(FlowGraphCompiler *compiler)
virtual intptr_t GetNumberOfArgumentsForRuntimeCall()
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
virtual void AddMetadataForRuntimeCall(FlowGraphCompiler *compiler)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
virtual const char * name()=0
static const TokenPosition kMinSource
bool IsClassTypeParameter() const
Definition: object.h:9817
intptr_t index() const
Definition: object.h:9821
static TypePtr IntType()
static TypePtr Number()
static TypePtr SmiType()
static constexpr T Maximum(T x, T y)
Definition: utils.h:41
Definition: il.h:75
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
ObjectPoolBuilder & object_pool_builder()
void static bool EmittingComments()
intptr_t InsertAlignedRelocation(BSS::Relocation reloc)
void Comment(const char *format,...) PRINTF_ATTRIBUTE(2
void Jump(Label *label, JumpDistance distance=kFarJump)
void CompareImmediate(Register rn, int32_t value, Condition cond)
void set_constant_pool_allowed(bool b)
void Align(intptr_t alignment, intptr_t offset)
void BranchIf(Condition condition, Label *label, JumpDistance distance=kFarJump)
void LoadUnboxedSimd128(FpuRegister dst, Register base, int32_t offset)
void Bind(Label *label) override
void PopRegister(Register r)
void Drop(intptr_t stack_elements)
void set_lr_state(compiler::LRState b)
void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset)
void Breakpoint() override
void AddImmediate(Register rd, int32_t value, Condition cond=AL)
static bool Intrinsify(const ParsedFunction &parsed_function, FlowGraphCompiler *compiler)
intptr_t FindObject(const Object &obj, ObjectPoolBuilderEntry::Patchability patchable=ObjectPoolBuilderEntry::kNotPatchable, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
const NativeType & container_type() const
virtual NativeLocation & Split(Zone *zone, intptr_t num_parts, intptr_t index) const
static NativeLocation & FromLocation(Zone *zone, Location loc, Representation rep)
virtual NativeLocation & WithOtherNativeType(Zone *zone, const NativeType &new_payload_type, const NativeType &new_container_type) const =0
static NativeLocation & FromPairLocation(Zone *zone, Location loc, Representation rep, intptr_t index)
const BothNativeLocations & AsBoth() const
const NativeType & payload_type() const
virtual Representation AsRepresentation() const
Definition: native_type.cc:313
virtual NativePrimitiveType & Split(Zone *zone, intptr_t part) const
Definition: native_type.cc:396
virtual intptr_t SizeInBytes() const =0
static NativePrimitiveType & FromRepresentation(Zone *zone, Representation rep)
Definition: native_type.cc:632
static word unboxed_runtime_arg_offset()
static word type_at_offset(intptr_t i)
#define THR_Print(format,...)
Definition: log.h:20
#define UNIMPLEMENTED
#define ASSERT(E)
SkBitmap source
Definition: examples.cpp:28
AtkStateType state
FlutterSemanticsFlag flags
glong glong end
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
uint8_t value
GAsyncResult * result
uint32_t * target
const char * charp
Definition: flags.h:12
#define __
Dart_NativeFunction function
Definition: fuchsia.cc:51
size_t length
Definition: bitmap.py:1
exit(kErrorExitCode)
constexpr intptr_t kDoubleSpillFactor
Definition: runtime_api.h:340
word ToRawSmi(const dart::Object &a)
Definition: runtime_api.cc:960
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
constexpr intptr_t kIntSpillFactor
Definition: runtime_api.h:339
FrameLayout frame_layout
Definition: stack_frame.cc:76
Definition: dart_vm.cc:33
const FpuRegister kNoFpuRegister
static FpuRegister AllocateFreeFpuRegister(bool *blocked_registers)
const Register THR
const char *const name
const RegList kReservedCpuRegisters
static bool IsPopper(Instruction *instr)
constexpr intptr_t kInt64Size
Definition: globals.h:453
Representation
Definition: locations.h:66
const FpuRegister FpuTMP
@ UNSIGNED_GREATER
@ NOT_EQUAL
@ UNSIGNED_LESS_EQUAL
const Register TMP2
static const Code & StubEntryFor(const ICData &ic_data, bool optimized)
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
@ kNoRegister
Definition: constants_arm.h:99
static bool IsPusher(Instruction *instr)
const int kNumberOfFpuRegisters
Location LocationRemapForSlowPath(Location loc, Definition *def, intptr_t *cpu_reg_slots, intptr_t *fpu_reg_slots)
Definition: locations.cc:492
constexpr RegList kDartAvailableCpuRegs
const Register TMP
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const intptr_t cid
constexpr int kRegisterAllocationBias
static Register AllocateFreeRegister(bool *blocked_registers)
QRegister FpuRegister
@ kFunctions
Definition: object.h:2251
const intptr_t kPreferredLoopAlignment
const char *const function_name
@ kHeapObjectTag
static constexpr intptr_t kInvalidTryIndex
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
const int kFpuRegisterSize
DECLARE_FLAG(bool, show_invisible_frames)
def call(args)
Definition: dom.py:159
Definition: __init__.py:1
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
dst
Definition: cp.py:12
TSize< Scalar > Size
Definition: size.h:137
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
dictionary stats
Definition: malisc.py:20
SI auto map(std::index_sequence< I... >, Fn &&fn, const Args &... args) -> skvx::Vec< sizeof...(I), decltype(fn(args[0]...))>
Definition: SkVx.h:680
#define DEBUG_ONLY(code)
Definition: globals.h:141
#define Pd
Definition: globals.h:408
SeparatedVector2 offset
const intptr_t count_without_type_args
Definition: il.h:4542
const intptr_t type_args_len
Definition: il.h:4539
const intptr_t size_with_type_args
Definition: il.h:4541
const intptr_t size_without_type_args
Definition: il.h:4543
ArrayPtr ToArgumentsDescriptor() const
Definition: il.h:4534
static constexpr Register kResultReg
static constexpr FpuRegister kValueReg
bool IsSingleCid() const
Definition: il.h:232
bool IsIllegalRange() const
Definition: il.h:241
intptr_t cid_start
Definition: il.h:249
int32_t Extent() const
Definition: il.h:236
intptr_t VariableIndexForFrameSlot(intptr_t frame_slot) const
Definition: frame_layout.h:70
static constexpr Register kFieldReg
static constexpr bool IsUnboxedInteger(Representation rep)
Definition: locations.h:92
const Function * target
Definition: il.h:727
intptr_t count
Definition: il.h:728
static constexpr Register kSubtypeTestCacheReg
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kSubtypeTestCacheResultReg
static constexpr Register kScratchReg
static constexpr Register kInstanceOfResultReg