Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
flow_graph_compiler.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6#include "vm/globals.h" // Needed here to get TARGET_ARCH_XXX.
7
8#include "platform/utils.h"
9#include "vm/bit_vector.h"
16#include "vm/compiler/cha.h"
19#include "vm/dart_entry.h"
20#include "vm/debugger.h"
22#include "vm/exceptions.h"
23#include "vm/flags.h"
24#include "vm/kernel_isolate.h"
25#include "vm/log.h"
26#include "vm/longjump.h"
27#include "vm/object_store.h"
28#include "vm/parser.h"
29#include "vm/pointer_tagging.h"
30#include "vm/raw_object.h"
31#include "vm/resolver.h"
32#include "vm/service_isolate.h"
33#include "vm/stack_frame.h"
34#include "vm/stub_code.h"
35#include "vm/symbols.h"
36#include "vm/timeline.h"
38
39namespace dart {
40
42 trace_inlining_intervals,
43 false,
44 "Inlining interval diagnostics");
45
46DEFINE_FLAG(bool, enable_peephole, true, "Enable peephole optimization");
47
49 enable_simd_inline,
50 true,
51 "Enable inlining of SIMD related method calls.");
53 min_optimization_counter_threshold,
54 5000,
55 "The minimum invocation count for a function.");
57 optimization_counter_scale,
58 2000,
59 "The scale of invocation count, by size of the function.");
60DEFINE_FLAG(bool, source_lines, false, "Emit source line as assembly comment.");
62 force_indirect_calls,
63 false,
64 "Do not emit PC relative calls.");
65
66DECLARE_FLAG(charp, deoptimize_filter);
67DECLARE_FLAG(bool, intrinsify);
68DECLARE_FLAG(int, regexp_optimization_counter_threshold);
69DECLARE_FLAG(int, reoptimization_counter_threshold);
70DECLARE_FLAG(int, stacktrace_every);
71DECLARE_FLAG(charp, stacktrace_filter);
72DECLARE_FLAG(int, gc_every);
73DECLARE_FLAG(bool, trace_compiler);
74
76 align_all_loops,
77 false,
78 "Align all loop headers to 32 byte boundary");
79
80#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
81compiler::LRState ComputeInnerLRState(const FlowGraph& flow_graph) {
82 auto entry = flow_graph.graph_entry();
83 const bool frameless = !entry->NeedsFrame();
84
85 bool has_native_entries = false;
86 for (intptr_t i = 0; i < entry->SuccessorCount(); i++) {
87 if (entry->SuccessorAt(i)->IsNativeEntry()) {
88 has_native_entries = true;
89 break;
90 }
91 }
92
93 auto state = compiler::LRState::OnEntry();
94 if (has_native_entries) {
95 // We will setup three (3) frames on the stack when entering through
96 // native entry. Keep in sync with NativeEntry/NativeReturn.
97 state = state.EnterFrame().EnterFrame();
98 }
99
100 if (!frameless) {
101 state = state.EnterFrame();
102 }
103
104 return state;
105}
106#endif
107
108// Assign locations to outgoing arguments. Note that MoveArgument
109// can only occur in the innermost environment because we insert
110// them immediately before the call instruction and right before
111// register allocation.
112void CompilerDeoptInfo::AllocateOutgoingArguments(Environment* env) {
113 if (env == nullptr) return;
114 for (Environment::ShallowIterator it(env); !it.Done(); it.Advance()) {
115 if (it.CurrentLocation().IsInvalid()) {
116 if (auto move_arg = it.CurrentValue()->definition()->AsMoveArgument()) {
117 it.SetCurrentLocation(move_arg->locs()->out(0));
118 }
119 }
120 }
121}
122
123void CompilerDeoptInfo::EmitMaterializations(Environment* env,
124 DeoptInfoBuilder* builder) {
125 for (Environment::DeepIterator it(env); !it.Done(); it.Advance()) {
126 if (it.CurrentLocation().IsInvalid()) {
127 MaterializeObjectInstr* mat =
128 it.CurrentValue()->definition()->AsMaterializeObject();
129 ASSERT(mat != nullptr);
130 builder->AddMaterialization(mat);
131 }
132 }
133}
134
136 compiler::Assembler* assembler,
137 FlowGraph* flow_graph,
138 const ParsedFunction& parsed_function,
139 bool is_optimizing,
140 SpeculativeInliningPolicy* speculative_policy,
141 const GrowableArray<const Function*>& inline_id_to_function,
142 const GrowableArray<TokenPosition>& inline_id_to_token_pos,
143 const GrowableArray<intptr_t>& caller_inline_id,
144 ZoneGrowableArray<const ICData*>* deopt_id_to_ic_data,
145 CodeStatistics* stats /* = nullptr */)
146 : thread_(Thread::Current()),
147 zone_(Thread::Current()->zone()),
148 assembler_(assembler),
149 parsed_function_(parsed_function),
150 flow_graph_(*flow_graph),
151 block_order_(*flow_graph->CodegenBlockOrder()),
152 current_block_(nullptr),
153 exception_handlers_list_(nullptr),
154 pc_descriptors_list_(nullptr),
155 compressed_stackmaps_builder_(nullptr),
156 code_source_map_builder_(nullptr),
157 catch_entry_moves_maps_builder_(nullptr),
158 block_info_(block_order_.length()),
159 deopt_infos_(),
160 static_calls_target_table_(),
161 indirect_gotos_(),
162 is_optimizing_(is_optimizing),
163 speculative_policy_(speculative_policy),
164 may_reoptimize_(false),
165 intrinsic_mode_(false),
166 stats_(stats),
167 double_class_(
168 Class::ZoneHandle(isolate_group()->object_store()->double_class())),
169 mint_class_(
170 Class::ZoneHandle(isolate_group()->object_store()->mint_class())),
171 float32x4_class_(Class::ZoneHandle(
172 isolate_group()->object_store()->float32x4_class())),
173 float64x2_class_(Class::ZoneHandle(
174 isolate_group()->object_store()->float64x2_class())),
175 int32x4_class_(
176 Class::ZoneHandle(isolate_group()->object_store()->int32x4_class())),
177 list_class_(Class::ZoneHandle(Library::Handle(Library::CoreLibrary())
178 .LookupClass(Symbols::List()))),
179 pending_deoptimization_env_(nullptr),
180 deopt_id_to_ic_data_(deopt_id_to_ic_data),
181 edge_counters_array_(Array::ZoneHandle()) {
184 if (is_optimizing) {
185 // No need to collect extra ICData objects created during compilation.
186 deopt_id_to_ic_data_ = nullptr;
187 } else {
188 const intptr_t len = thread()->compiler_state().deopt_id();
189 deopt_id_to_ic_data_->EnsureLength(len, nullptr);
190 }
191 ASSERT(assembler != nullptr);
192 ASSERT(!list_class_.IsNull());
193
194#if defined(PRODUCT)
195 const bool stack_traces_only = true;
196#else
197 const bool stack_traces_only = false;
198#endif
199 // Make sure that the function is at the position for inline_id 0.
200 ASSERT(inline_id_to_function.length() >= 1);
201 ASSERT(inline_id_to_function[0]->ptr() ==
203 code_source_map_builder_ = new (zone_)
204 CodeSourceMapBuilder(zone_, stack_traces_only, caller_inline_id,
205 inline_id_to_token_pos, inline_id_to_function);
206
208}
209
211 compressed_stackmaps_builder_ =
213 pc_descriptors_list_ = new (zone()) DescriptorList(
214 zone(), &code_source_map_builder_->inline_id_to_function());
215 exception_handlers_list_ =
217#if defined(DART_PRECOMPILER)
218 catch_entry_moves_maps_builder_ = new (zone()) CatchEntryMovesMapBuilder();
219#endif
220 block_info_.Clear();
221 // Initialize block info and search optimized (non-OSR) code for calls
222 // indicating a non-leaf routine and calls without IC data indicating
223 // possible reoptimization.
224
225 for (int i = 0; i < block_order_.length(); ++i) {
226 block_info_.Add(new (zone()) BlockInfo());
228 BlockEntryInstr* entry = block_order_[i];
229 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) {
230 Instruction* current = it.Current();
231 if (auto* branch = current->AsBranch()) {
232 current = branch->comparison();
233 }
234 if (auto* instance_call = current->AsInstanceCall()) {
235 const ICData* ic_data = instance_call->ic_data();
236 if ((ic_data == nullptr) || (ic_data->NumberOfUsedChecks() == 0)) {
237 may_reoptimize_ = true;
238 }
239 }
240 }
241 }
242 }
243
244 if (!is_optimizing() && FLAG_reorder_basic_blocks) {
245 // Initialize edge counter array.
246 const intptr_t num_counters = flow_graph_.preorder().length();
247 const Array& edge_counters =
248 Array::Handle(Array::New(num_counters, Heap::kOld));
249 for (intptr_t i = 0; i < num_counters; ++i) {
250 edge_counters.SetAt(i, Object::smi_zero());
251 }
252 edge_counters_array_ = edge_counters.ptr();
253 }
254}
255
259
263
265 return isolate_group()->use_osr() && CanOptimizeFunction() &&
266 !is_optimizing();
267}
268
270 const intptr_t offset = assembler()->InsertAlignedRelocation(reloc);
271 AddDescriptor(UntaggedPcDescriptors::kBSSRelocation, /*pc_offset=*/offset,
272 /*deopt_id=*/DeoptId::kNone, InstructionSource(),
273 /*try_index=*/-1);
274}
275
277#if !defined(PRODUCT)
278 if (FLAG_stacktrace_every > 0 || FLAG_deoptimize_every > 0 ||
279 FLAG_gc_every > 0 ||
280 (isolate_group()->reload_every_n_stack_overflow_checks() > 0)) {
282 return true;
283 }
284 }
285 if (FLAG_stacktrace_filter != nullptr &&
286 strstr(parsed_function().function().ToFullyQualifiedCString(),
287 FLAG_stacktrace_filter) != nullptr) {
288 return true;
289 }
290 if (is_optimizing() && FLAG_deoptimize_filter != nullptr &&
291 strstr(parsed_function().function().ToFullyQualifiedCString(),
292 FLAG_deoptimize_filter) != nullptr) {
293 return true;
294 }
295#endif // !defined(PRODUCT)
296 return false;
297}
298
300 // Entry-points cannot be merged because they must have assembly
301 // prologue emitted which should not be included in any block they jump to.
302 return !block->IsGraphEntry() && !block->IsFunctionEntry() &&
303 !block->IsCatchBlockEntry() && !block->IsOsrEntry() &&
304 !block->IsIndirectEntry() && !block->HasNonRedundantParallelMove() &&
305 block->next()->IsGoto() &&
306 !block->next()->AsGoto()->HasNonRedundantParallelMove();
307}
308
309void FlowGraphCompiler::CompactBlock(BlockEntryInstr* block) {
310 BlockInfo* block_info = block_info_[block->postorder_number()];
311
312 // Break out of cycles in the control flow graph.
313 if (block_info->is_marked()) {
314 return;
315 }
316 block_info->mark();
317
318 if (IsEmptyBlock(block)) {
319 // For empty blocks, record a corresponding nonempty target as their
320 // jump label.
321 BlockEntryInstr* target = block->next()->AsGoto()->successor();
322 CompactBlock(target);
323 block_info->set_jump_label(GetJumpLabel(target));
324 }
325}
326
327void FlowGraphCompiler::CompactBlocks() {
328 // This algorithm does not garbage collect blocks in place, but merely
329 // records forwarding label information. In this way it avoids having to
330 // change join and target entries.
331 compiler::Label* nonempty_label = nullptr;
332 for (intptr_t i = block_order().length() - 1; i >= 1; --i) {
333 BlockEntryInstr* block = block_order()[i];
334
335 // Unoptimized code must emit all possible deoptimization points.
336 if (is_optimizing()) {
337 CompactBlock(block);
338 }
339
340 // For nonempty blocks, record the next nonempty block in the block
341 // order. Since no code is emitted for empty blocks, control flow is
342 // eligible to fall through to the next nonempty one.
343 if (!WasCompacted(block)) {
344 BlockInfo* block_info = block_info_[block->postorder_number()];
345 block_info->set_next_nonempty_label(nonempty_label);
346 nonempty_label = GetJumpLabel(block);
347 }
348 }
349
350 ASSERT(block_order()[0]->IsGraphEntry());
351 BlockInfo* block_info = block_info_[block_order()[0]->postorder_number()];
352 block_info->set_next_nonempty_label(nonempty_label);
353}
354
355#if defined(DART_PRECOMPILER)
356static intptr_t LocationToStackIndex(const Location& src) {
357 ASSERT(src.HasStackIndex());
358 return -compiler::target::frame_layout.VariableIndexForFrameSlot(
359 src.stack_index());
360}
361
362static CatchEntryMove CatchEntryMoveFor(compiler::Assembler* assembler,
363 Representation src_type,
364 const Location& src,
365 intptr_t dst_index) {
366 if (src.IsConstant()) {
367 // Skip dead locations.
368 if (src.constant().ptr() == Object::optimized_out().ptr()) {
369 return CatchEntryMove();
370 }
371 const intptr_t pool_index =
372 assembler->object_pool_builder().FindObject(src.constant());
374 pool_index, dst_index);
375 }
376
377 if (src.IsPairLocation()) {
378 const auto lo_loc = src.AsPairLocation()->At(0);
379 const auto hi_loc = src.AsPairLocation()->At(1);
380 ASSERT(lo_loc.IsStackSlot() && hi_loc.IsStackSlot());
383 CatchEntryMove::EncodePairSource(LocationToStackIndex(lo_loc),
384 LocationToStackIndex(hi_loc)),
385 dst_index);
386 }
387
389 switch (src_type) {
390 case kTagged:
392 break;
393 case kUnboxedInt64:
395 break;
396 case kUnboxedInt32:
398 break;
399 case kUnboxedUint32:
401 break;
402 case kUnboxedFloat:
404 break;
405 case kUnboxedDouble:
407 break;
408 case kUnboxedFloat32x4:
410 break;
411 case kUnboxedFloat64x2:
413 break;
414 case kUnboxedInt32x4:
416 break;
417 default:
418 UNREACHABLE();
419 break;
420 }
421
422 return CatchEntryMove::FromSlot(src_kind, LocationToStackIndex(src),
423 dst_index);
424}
425#endif
426
428#if defined(DART_PRECOMPILER)
429 const intptr_t try_index = CurrentTryIndex();
430 if (is_optimizing() && env != nullptr && (try_index != kInvalidTryIndex)) {
431 env = env->Outermost();
432 CatchBlockEntryInstr* catch_block =
433 flow_graph().graph_entry()->GetCatchEntry(try_index);
434 const GrowableArray<Definition*>* idefs =
435 catch_block->initial_definitions();
436 catch_entry_moves_maps_builder_->NewMapping(assembler()->CodeSize());
437
438 for (intptr_t i = 0; i < flow_graph().variable_count(); ++i) {
439 // Don't sync captured parameters. They are not in the environment.
440 if (flow_graph().captured_parameters()->Contains(i)) continue;
441 auto param = (*idefs)[i]->AsParameter();
442
443 // Don't sync values that have been replaced with constants.
444 if (param == nullptr) continue;
445 RELEASE_ASSERT(param->env_index() == i);
446 Location dst = param->location();
447
448 // Don't sync exception or stack trace variables.
449 if (dst.IsRegister()) continue;
450
451 Location src = env->LocationAt(i);
452 // Can only occur if AllocationSinking is enabled - and it is disabled
453 // in functions with try.
454 ASSERT(!src.IsInvalid());
455 const Representation src_type =
456 env->ValueAt(i)->definition()->representation();
457 const auto move = CatchEntryMoveFor(assembler(), src_type, src,
458 LocationToStackIndex(dst));
459 if (!move.IsRedundant()) {
460 catch_entry_moves_maps_builder_->Append(move);
461 }
462 }
463
464 catch_entry_moves_maps_builder_->EndMapping();
465 }
466#endif // defined(DART_PRECOMPILER)
467}
468
470 intptr_t deopt_id,
472 LocationSummary* locs,
473 Environment* env) {
474 AddCurrentDescriptor(kind, deopt_id, source);
475 RecordSafepoint(locs);
477 if ((deopt_id != DeoptId::kNone) && !FLAG_precompiled_mode) {
478 // Marks either the continuation point in unoptimized code or the
479 // deoptimization point in optimized code, after call.
480 if (env != nullptr) {
481 // Note that we may lazy-deopt to the same IR instruction in unoptimized
482 // code or to another IR instruction (e.g. if LICM hoisted an instruction
483 // it will lazy-deopt to a Goto).
484 // If we happen to deopt to the beginning of an instruction in unoptimized
485 // code, we'll use the before deopt-id, otherwise the after deopt-id.
486 const intptr_t dest_deopt_id = env->LazyDeoptToBeforeDeoptId()
487 ? deopt_id
488 : DeoptId::ToDeoptAfter(deopt_id);
489 AddDeoptIndexAtCall(dest_deopt_id, env);
490 } else {
491 const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
492 // Add deoptimization continuation point after the call and before the
493 // arguments are removed.
494 AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after,
495 source);
496 }
497 }
498}
499
502 intptr_t yield_index) {
503 AddDescriptor(UntaggedPcDescriptors::kOther, assembler()->CodeSize(),
504 DeoptId::kNone, source, CurrentTryIndex(), yield_index);
505}
506
507void FlowGraphCompiler::EmitInstructionPrologue(Instruction* instr) {
508 if (!is_optimizing()) {
509 if (instr->CanBecomeDeoptimizationTarget() && !instr->IsGoto()) {
510 // Instructions that can be deoptimization targets need to record kDeopt
511 // PcDescriptor corresponding to their deopt id. GotoInstr records its
512 // own so that it can control the placement.
513 AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, instr->deopt_id(),
514 instr->source());
515 }
516 AllocateRegistersLocally(instr);
517 }
518}
519
520#define __ assembler()->
521
522void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
523 if (is_optimizing()) {
524 return;
525 }
526 Definition* defn = instr->AsDefinition();
527 if (defn != nullptr && defn->HasTemp()) {
528 Location value = defn->locs()->out(0);
529 if (value.IsRegister()) {
530 __ PushRegister(value.reg());
531 } else if (value.IsFpuRegister()) {
532 const Code* stub;
533 switch (instr->representation()) {
534 case kUnboxedDouble:
535 stub = &StubCode::BoxDouble();
536 break;
537 case kUnboxedFloat32x4:
538 stub = &StubCode::BoxFloat32x4();
539 break;
540 case kUnboxedFloat64x2:
541 stub = &StubCode::BoxFloat64x2();
542 break;
543 default:
544 UNREACHABLE();
545 break;
546 }
547
548 // In unoptimized code at instruction epilogue the only
549 // live register is an output register.
550 instr->locs()->live_registers()->Clear();
551 if (instr->representation() == kUnboxedDouble) {
552 __ MoveUnboxedDouble(BoxDoubleStubABI::kValueReg, value.fpu_reg());
553 } else {
554 __ MoveUnboxedSimd128(BoxDoubleStubABI::kValueReg, value.fpu_reg());
555 }
557 InstructionSource(), // No token position.
558 *stub, UntaggedPcDescriptors::kOther, instr->locs());
559 __ PushRegister(BoxDoubleStubABI::kResultReg);
560 } else if (value.IsConstant()) {
561 __ PushObject(value.constant());
562 } else {
563 ASSERT(value.IsStackSlot());
564 __ PushValueAtOffset(value.base_reg(), value.ToStackSlotOffset());
565 }
566 }
567}
568
569#undef __
570
571void FlowGraphCompiler::EmitSourceLine(Instruction* instr) {
572 if (!instr->token_pos().IsReal()) {
573 return;
574 }
575 const InstructionSource& source = instr->source();
576 const intptr_t inlining_id = source.inlining_id < 0 ? 0 : source.inlining_id;
577 const Function& function =
578 *code_source_map_builder_->inline_id_to_function()[inlining_id];
579 ASSERT(instr->env() == nullptr ||
580 instr->env()->function().ptr() == function.ptr());
581 const auto& script = Script::Handle(zone(), function.script());
582 intptr_t line_nr;
583 if (script.GetTokenLocation(source.token_pos, &line_nr)) {
584 const String& line = String::Handle(zone(), script.GetLine(line_nr));
585 assembler()->Comment("Line %" Pd " in '%s':\n %s", line_nr,
586 function.ToFullyQualifiedCString(), line.ToCString());
587 }
588}
589
590static bool IsPusher(Instruction* instr) {
591 if (auto def = instr->AsDefinition()) {
592 return def->HasTemp() && (instr->representation() == kTagged);
593 }
594 return false;
595}
596
597static bool IsPopper(Instruction* instr) {
598 // TODO(ajcbik): even allow deopt targets by making environment aware?
599 if (!instr->CanBecomeDeoptimizationTarget()) {
600 return instr->ArgumentCount() == 0 && instr->InputCount() > 0;
601 }
602 return false;
603}
604
605bool FlowGraphCompiler::IsPeephole(Instruction* instr) const {
606 if (FLAG_enable_peephole && !is_optimizing()) {
607 return IsPusher(instr) && IsPopper(instr->next());
608 }
609 return false;
610}
611
613 // When unwinding async stacks we might produce frames which correspond
614 // to future listeners which are going to be called when the future completes.
615 // These listeners are not yet called and thus their frame pc_offset is set
616 // to 0 - which does not actually correspond to any call- or yield- site
617 // inside the code object. Nevertheless we would like to be able to
618 // produce proper position information for it when symbolizing the stack.
619 // To achieve that in AOT mode (where we don't actually have
620 // |Function::token_pos| available) we instead emit an artificial descriptor
621 // at the very beginning of the function.
622 if (FLAG_precompiled_mode && flow_graph().function().IsClosureFunction()) {
623 code_source_map_builder_->WriteFunctionEntrySourcePosition(
624 InstructionSource(flow_graph().function().token_pos()));
625 }
626}
627
629 InitCompiler();
630
631#if !defined(TARGET_ARCH_IA32)
632 // For JIT we have multiple entrypoints functionality which moved the frame
633 // setup into the [TargetEntryInstr] (which will set the constant pool
634 // allowed bit to true). Despite this we still have to set the
635 // constant pool allowed bit to true here as well, because we can generate
636 // code for [CatchEntryInstr]s, which need the pool.
638#endif
639
641 VisitBlocks();
642
643#if defined(DEBUG)
645#endif
646
647 if (!skip_body_compilation()) {
648#if !defined(TARGET_ARCH_IA32)
649 ASSERT(assembler()->constant_pool_allowed());
650#endif
651 GenerateDeferredCode();
652 }
653
654 for (intptr_t i = 0; i < indirect_gotos_.length(); ++i) {
655 indirect_gotos_[i]->ComputeOffsetTable(this);
656 }
657}
658
659#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
660// Returns true if function is marked with vm:align-loops pragma.
661static bool IsMarkedWithAlignLoops(const Function& function) {
664 /*only_core=*/false, function,
665 Symbols::vm_align_loops(),
666 /*multiple=*/false, &options);
667}
668#endif // defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
669
671 CompactBlocks();
673 // The loop_info fields were cleared, recompute.
674 flow_graph().ComputeLoops();
675 }
676
677 // In precompiled mode, we require the function entry to come first (after the
678 // graph entry), since the polymorphic check is performed in the function
679 // entry (see Instructions::EntryPoint).
680 if (FLAG_precompiled_mode) {
681 ASSERT(block_order()[1] == flow_graph().graph_entry()->normal_entry());
682 }
683
684#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
685 const auto inner_lr_state = ComputeInnerLRState(flow_graph());
686#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
687
688#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
689 const bool should_align_loops =
690 FLAG_align_all_loops || IsMarkedWithAlignLoops(function());
691#endif // defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
692
693 for (intptr_t i = 0; i < block_order().length(); ++i) {
694 // Compile the block entry.
695 BlockEntryInstr* entry = block_order()[i];
696 assembler()->Comment("B%" Pd "", entry->block_id());
697 set_current_block(entry);
698
699 if (WasCompacted(entry)) {
700 continue;
701 }
702
703#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
704 // At the start of every non-entry block we expect return address either
705 // to be spilled into the frame or to be in the LR register.
706 if (entry->IsFunctionEntry() || entry->IsNativeEntry()) {
707 assembler()->set_lr_state(compiler::LRState::OnEntry());
708 } else {
709 assembler()->set_lr_state(inner_lr_state);
710 }
711#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
712
713#if defined(DEBUG)
714 if (!is_optimizing()) {
715 FrameStateClear();
716 }
717#endif
718
720 for (LoopInfo* l = entry->loop_info(); l != nullptr; l = l->outer()) {
721 assembler()->Comment(" Loop %" Pd "", l->id());
722 }
723 if (entry->IsLoopHeader()) {
724 assembler()->Comment(" Loop Header");
725 }
726 }
727
728#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
729 if (should_align_loops && entry->IsLoopHeader() &&
733 }
734#else
735 static_assert(kPreferredLoopAlignment == 1);
736#endif // defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_ARM64)
737
738 BeginCodeSourceRange(entry->source());
739 ASSERT(pending_deoptimization_env_ == nullptr);
740 pending_deoptimization_env_ = entry->env();
741 set_current_instruction(entry);
742 StatsBegin(entry);
743 entry->EmitNativeCode(this);
744 StatsEnd(entry);
745 set_current_instruction(nullptr);
746 pending_deoptimization_env_ = nullptr;
747 EndCodeSourceRange(entry->source());
748
749 if (skip_body_compilation()) {
750 ASSERT(entry == flow_graph().graph_entry()->normal_entry());
751 break;
752 }
753
754 // Compile all successors until an exit, branch, or a block entry.
755 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) {
756 Instruction* instr = it.Current();
757 set_current_instruction(instr);
758 StatsBegin(instr);
759 // Unoptimized code always stores boxed values on the expression stack.
760 // However, unboxed representation is allowed for instruction inputs and
761 // outputs of certain types (e.g. for doubles).
762 // Unboxed inputs/outputs are handled in the instruction prologue
763 // and epilogue, but flagged as a mismatch on the IL level.
766
767 if (FLAG_code_comments || FLAG_disassemble ||
768 FLAG_disassemble_optimized) {
769 if (FLAG_source_lines) {
770 EmitSourceLine(instr);
771 }
772 EmitComment(instr);
773 }
774
776 EmitInstructionPrologue(instr);
777 ASSERT(pending_deoptimization_env_ == nullptr);
778 pending_deoptimization_env_ = instr->env();
779 DEBUG_ONLY(current_instruction_ = instr);
780 instr->EmitNativeCode(this);
781 DEBUG_ONLY(current_instruction_ = nullptr);
782 pending_deoptimization_env_ = nullptr;
783 if (IsPeephole(instr)) {
784 ASSERT(top_of_stack_ == nullptr);
785 top_of_stack_ = instr->AsDefinition();
786 } else {
787 EmitInstructionEpilogue(instr);
788 }
789 EndCodeSourceRange(instr->source());
790
791#if defined(DEBUG)
792 if (!is_optimizing()) {
793 FrameStateUpdateWith(instr);
794 }
795#endif
796 StatsEnd(instr);
797 set_current_instruction(nullptr);
798
799 if (auto indirect_goto = instr->AsIndirectGoto()) {
800 indirect_gotos_.Add(indirect_goto);
801 }
802 }
803
804#if defined(DEBUG)
805 ASSERT(is_optimizing() || FrameStateIsSafeToCall());
806#endif
807 }
808
809 set_current_block(nullptr);
810}
811
812void FlowGraphCompiler::Bailout(const char* reason) {
813 parsed_function_.Bailout("FlowGraphCompiler", reason);
814}
815
817 if (is_optimizing_) {
818 return flow_graph_.graph_entry()->spill_slot_count();
819 } else {
820 return parsed_function_.num_stack_locals();
821 }
822}
823
825 ASSERT(flow_graph().IsCompiledForOsr());
826 const intptr_t stack_depth =
828 const intptr_t num_stack_locals = flow_graph().num_stack_locals();
829 return StackSize() - stack_depth - num_stack_locals;
830}
831
833 BlockEntryInstr* block_entry) const {
834 const intptr_t block_index = block_entry->postorder_number();
835 return block_info_[block_index]->jump_label();
836}
837
839 const intptr_t block_index = block_entry->postorder_number();
840 return block_info_[block_index]->WasCompacted();
841}
842
844 const intptr_t current_index = current_block()->postorder_number();
845 return block_info_[current_index]->next_nonempty_label();
846}
847
849 return NextNonEmptyLabel() == GetJumpLabel(block_entry);
850}
851
853 compiler::Label* true_label = GetJumpLabel(branch->true_successor());
854 compiler::Label* false_label = GetJumpLabel(branch->false_successor());
855 compiler::Label* fall_through = NextNonEmptyLabel();
856 BranchLabels result = {true_label, false_label, fall_through};
857 return result;
858}
859
861 slow_path_code_.Add(code);
862}
863
864void FlowGraphCompiler::GenerateDeferredCode() {
865#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
866 const auto lr_state = ComputeInnerLRState(flow_graph());
867#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
868
869 for (intptr_t i = 0; i < slow_path_code_.length(); i++) {
870 SlowPathCode* const slow_path = slow_path_code_[i];
873 slow_path->instruction()->tag());
874#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
875 assembler()->set_lr_state(lr_state);
876#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
877 set_current_instruction(slow_path->instruction());
878 set_current_block(current_instruction_->GetBlock());
879 SpecialStatsBegin(stats_tag);
880 BeginCodeSourceRange(slow_path->instruction()->source());
881 DEBUG_ONLY(current_instruction_ = slow_path->instruction());
882 slow_path->GenerateCode(this);
883 DEBUG_ONLY(current_instruction_ = nullptr);
884 EndCodeSourceRange(slow_path->instruction()->source());
885 SpecialStatsEnd(stats_tag);
886 set_current_instruction(nullptr);
887 set_current_block(nullptr);
888 }
889 // All code generated by deferred deopt info is treated as in the root
890 // function.
891 const InstructionSource deopt_source(TokenPosition::kDeferredDeoptInfo,
892 /*inlining_id=*/0);
893 for (intptr_t i = 0; i < deopt_infos_.length(); i++) {
894 BeginCodeSourceRange(deopt_source);
895#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
896 assembler()->set_lr_state(lr_state);
897#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
898 deopt_infos_[i]->GenerateCode(this, i);
899 EndCodeSourceRange(deopt_source);
900 }
901}
902
904 exception_handlers_list_->AddHandler(
905 entry->catch_try_index(), entry->try_index(), assembler()->CodeSize(),
906 entry->is_generated(), entry->catch_handler_types(),
907 entry->needs_stacktrace());
908 if (is_optimizing()) {
909 RecordSafepoint(entry->locs());
910 }
911}
912
914 exception_handlers_list_->SetNeedsStackTrace(try_index);
915}
916
918 intptr_t pc_offset,
919 intptr_t deopt_id,
921 intptr_t try_index,
922 intptr_t yield_index) {
923 code_source_map_builder_->NoteDescriptor(kind, pc_offset, source);
924 // Don't emit deopt-descriptors in AOT mode.
925 if (FLAG_precompiled_mode && (kind == UntaggedPcDescriptors::kDeopt)) return;
926 // Use the token position of the original call in the root function if source
927 // has an inlining id.
928 const auto& root_pos = code_source_map_builder_->RootPosition(source);
929 pc_descriptors_list_->AddDescriptor(kind, pc_offset, deopt_id, root_pos,
930 try_index, yield_index);
931}
932
933// Uses current pc position and try-index.
935 intptr_t deopt_id,
936 const InstructionSource& source) {
937 AddDescriptor(kind, assembler()->CodeSize(), deopt_id, source,
939}
940
942 const String& name) {
943#if defined(DART_PRECOMPILER)
944 // If we are generating an AOT snapshot and have DWARF stack traces enabled,
945 // the AOT runtime is unable to obtain the pool index at runtime. Therefore,
946 // there is no reason to put the name into the pool in the first place.
947 // TODO(dartbug.com/40605): Move this info to the pc descriptors.
948 if (FLAG_precompiled_mode && FLAG_dwarf_stack_traces_mode) return;
949#endif
950 const intptr_t name_index =
952 code_source_map_builder_->NoteNullCheck(assembler()->CodeSize(), source,
953 name_index);
954}
955
956void FlowGraphCompiler::AddPcRelativeCallTarget(const Function& function,
957 Code::EntryKind entry_kind) {
958 DEBUG_ASSERT(function.IsNotTemporaryScopedHandle());
959 const auto entry_point = entry_kind == Code::EntryKind::kUnchecked
962 static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
963 Code::kPcRelativeCall, entry_point, assembler()->CodeSize(), &function,
964 nullptr, nullptr));
965}
966
967void FlowGraphCompiler::AddPcRelativeCallStubTarget(const Code& stub_code) {
968 DEBUG_ASSERT(stub_code.IsNotTemporaryScopedHandle());
969 ASSERT(!stub_code.IsNull());
970 static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
972 nullptr, &stub_code, nullptr));
973}
974
975void FlowGraphCompiler::AddPcRelativeTailCallStubTarget(const Code& stub_code) {
976 DEBUG_ASSERT(stub_code.IsNotTemporaryScopedHandle());
977 ASSERT(!stub_code.IsNull());
978 static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
980 nullptr, &stub_code, nullptr));
981}
982
983void FlowGraphCompiler::AddPcRelativeTTSCallTypeTarget(
984 const AbstractType& dst_type) {
985 DEBUG_ASSERT(dst_type.IsNotTemporaryScopedHandle());
986 ASSERT(!dst_type.IsNull());
987 static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
989 nullptr, nullptr, &dst_type));
990}
991
992void FlowGraphCompiler::AddStaticCallTarget(const Function& func,
993 Code::EntryKind entry_kind) {
994 DEBUG_ASSERT(func.IsNotTemporaryScopedHandle());
995 const auto entry_point = entry_kind == Code::EntryKind::kUnchecked
998 static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
999 Code::kCallViaCode, entry_point, assembler()->CodeSize(), &func, nullptr,
1000 nullptr));
1001}
1002
1004 DEBUG_ASSERT(code.IsNotTemporaryScopedHandle());
1005 static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
1006 Code::kCallViaCode, Code::kDefaultEntry, assembler()->CodeSize(), nullptr,
1007 &code, nullptr));
1008}
1009
1011 const compiler::TableSelector* selector) {
1012 dispatch_table_call_targets_.Add(selector);
1013}
1014
1016 Environment* env) {
1019 ASSERT(!FLAG_precompiled_mode);
1020 if (env != nullptr) {
1021 env = env->GetLazyDeoptEnv(zone());
1022 }
1024 new (zone()) CompilerDeoptInfo(deopt_id, ICData::kDeoptAtCall,
1025 0, // No flags.
1026 env);
1028 deopt_infos_.Add(info);
1029 return info;
1030}
1031
1033 Environment* env) {
1034 ASSERT(deopt_id != DeoptId::kNone);
1035 deopt_id = DeoptId::ToDeoptAfter(deopt_id);
1037 new (zone()) CompilerDeoptInfo(deopt_id, ICData::kDeoptUnknown, 0, env);
1039 deopt_infos_.Add(info);
1040 return info;
1041}
1042
1043// This function must be in sync with FlowGraphCompiler::SaveLiveRegisters
1044// and FlowGraphCompiler::SlowPathEnvironmentFor.
1045// See StackFrame::VisitObjectPointers for the details of how stack map is
1046// interpreted.
1048 intptr_t slow_path_argument_count) {
1049 if (is_optimizing() || locs->live_registers()->HasUntaggedValues()) {
1050 const intptr_t spill_area_size =
1051 is_optimizing() ? flow_graph_.graph_entry()->spill_slot_count() : 0;
1052
1053 RegisterSet* registers = locs->live_registers();
1054 ASSERT(registers != nullptr);
1055 const intptr_t kFpuRegisterSpillFactor =
1056 kFpuRegisterSize / compiler::target::kWordSize;
1057 const bool using_shared_stub = locs->call_on_shared_slow_path();
1058
1060
1061 // Expand the bitmap to cover the whole area reserved for spill slots.
1062 // (register allocator takes care of marking slots containing live tagged
1063 // values but it does not do the same for other slots so length might be
1064 // below spill_area_size at this point).
1065 RELEASE_ASSERT(bitmap.Length() <= spill_area_size);
1066 bitmap.SetLength(spill_area_size);
1067
1068 auto instr = current_instruction();
1069 const intptr_t args_count = instr->ArgumentCount();
1070 RELEASE_ASSERT(args_count == 0 || is_optimizing());
1071
1072 for (intptr_t i = 0; i < args_count; i++) {
1073 const auto move_arg =
1074 instr->ArgumentValueAt(i)->instruction()->AsMoveArgument();
1075 const auto rep = move_arg->representation();
1076 if (move_arg->is_register_move()) {
1077 continue;
1078 }
1079
1080 ASSERT(rep == kTagged || rep == kUnboxedInt64 || rep == kUnboxedDouble);
1081 static_assert(compiler::target::kIntSpillFactor ==
1082 compiler::target::kDoubleSpillFactor,
1083 "int and double are of the same size");
1084 const bool is_tagged = move_arg->representation() == kTagged;
1085 const intptr_t num_bits =
1086 is_tagged ? 1 : compiler::target::kIntSpillFactor;
1087
1088 // Note: bits are reversed so higher bit corresponds to lower word.
1089 const intptr_t last_arg_bit =
1090 (spill_area_size - 1) - move_arg->sp_relative_index();
1091 bitmap.SetRange(last_arg_bit - (num_bits - 1), last_arg_bit, is_tagged);
1092 }
1093 ASSERT(slow_path_argument_count == 0 || !using_shared_stub);
1094 RELEASE_ASSERT(bitmap.Length() == spill_area_size);
1095
1096 // Trim the fully tagged suffix. Stack walking assumes that everything
1097 // not included into the stack map is tagged.
1098 intptr_t spill_area_bits = bitmap.Length();
1099 while (spill_area_bits > 0) {
1100 if (!bitmap.Get(spill_area_bits - 1)) {
1101 break;
1102 }
1103 spill_area_bits--;
1104 }
1105 bitmap.SetLength(spill_area_bits);
1106
1107 // Mark the bits in the stack map in the same order we push registers in
1108 // slow path code (see FlowGraphCompiler::SaveLiveRegisters).
1109 //
1110 // Slow path code can have registers at the safepoint.
1111 if (!locs->always_calls() && !using_shared_stub) {
1112 RegisterSet* regs = locs->live_registers();
1113 if (regs->FpuRegisterCount() > 0) {
1114 // Denote FPU registers with 0 bits in the stackmap. Based on the
1115 // assumption that there are normally few live FPU registers, this
1116 // encoding is simpler and roughly as compact as storing a separate
1117 // count of FPU registers.
1118 //
1119 // FPU registers have the highest register number at the highest
1120 // address (i.e., first in the stackmap).
1121 for (intptr_t i = kNumberOfFpuRegisters - 1; i >= 0; --i) {
1122 FpuRegister reg = static_cast<FpuRegister>(i);
1123 if (regs->ContainsFpuRegister(reg)) {
1124 for (intptr_t j = 0; j < kFpuRegisterSpillFactor; ++j) {
1125 bitmap.Set(bitmap.Length(), false);
1126 }
1127 }
1128 }
1129 }
1130
1131 // General purpose registers have the highest register number at the
1132 // highest address (i.e., first in the stackmap).
1133 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
1134 Register reg = static_cast<Register>(i);
1135 if (locs->live_registers()->ContainsRegister(reg)) {
1136 bitmap.Set(bitmap.Length(), locs->live_registers()->IsTagged(reg));
1137 }
1138 }
1139 }
1140
1141 if (using_shared_stub) {
1142 // To simplify the code in the shared stub, we create an untagged hole
1143 // in the stack frame where the shared stub can leave the return address
1144 // before saving registers.
1145 bitmap.Set(bitmap.Length(), false);
1146 if (registers->FpuRegisterCount() > 0) {
1147 bitmap.SetRange(bitmap.Length(),
1148 bitmap.Length() +
1149 kNumberOfFpuRegisters * kFpuRegisterSpillFactor - 1,
1150 false);
1151 }
1152 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
1153 if ((kReservedCpuRegisters & (1 << i)) != 0) continue;
1154 const Register reg = static_cast<Register>(i);
1155 bitmap.Set(bitmap.Length(),
1156 locs->live_registers()->ContainsRegister(reg) &&
1157 locs->live_registers()->IsTagged(reg));
1158 }
1159 }
1160
1161 // Arguments pushed after live registers in the slow path are tagged.
1162 for (intptr_t i = 0; i < slow_path_argument_count; ++i) {
1163 bitmap.Set(bitmap.Length(), true);
1164 }
1165
1166 compressed_stackmaps_builder_->AddEntry(assembler()->CodeSize(), &bitmap,
1167 spill_area_bits);
1168 }
1169}
1170
1171// This function must be kept in sync with:
1172//
1173// FlowGraphCompiler::RecordSafepoint
1174// FlowGraphCompiler::SaveLiveRegisters
1175// MaterializeObjectInstr::RemapRegisters
1176//
1179 LocationSummary* locs,
1180 intptr_t num_slow_path_args) {
1181 const bool using_shared_stub = locs->call_on_shared_slow_path();
1182 const bool shared_stub_save_fpu_registers =
1183 using_shared_stub && locs->live_registers()->FpuRegisterCount() > 0;
1184 // TODO(sjindel): Modify logic below to account for slow-path args with shared
1185 // stubs.
1186 ASSERT(!using_shared_stub || num_slow_path_args == 0);
1187 if (env == nullptr) {
1188 // In AOT, environments can be removed by EliminateEnvironments pass
1189 // (if not in a try block).
1190 ASSERT(!is_optimizing() || FLAG_precompiled_mode);
1191 return nullptr;
1192 }
1193
1194 Environment* slow_path_env =
1195 env->DeepCopy(zone(), env->Length() - env->LazyDeoptPruneCount());
1196 // 1. Iterate the registers in the order they will be spilled to compute
1197 // the slots they will be spilled to.
1198 intptr_t next_slot = StackSize() + slow_path_env->CountArgsPushed();
1199 if (using_shared_stub) {
1200 // The PC from the call to the shared stub is pushed here.
1201 next_slot++;
1202 }
1203 RegisterSet* regs = locs->live_registers();
1204 intptr_t fpu_reg_slots[kNumberOfFpuRegisters];
1205 intptr_t cpu_reg_slots[kNumberOfCpuRegisters];
1206 const intptr_t kFpuRegisterSpillFactor =
1207 kFpuRegisterSize / compiler::target::kWordSize;
1208 // FPU registers are spilled first from highest to lowest register number.
1209 for (intptr_t i = kNumberOfFpuRegisters - 1; i >= 0; --i) {
1210 FpuRegister reg = static_cast<FpuRegister>(i);
1211 if (regs->ContainsFpuRegister(reg)) {
1212 // We use the lowest address (thus highest index) to identify a
1213 // multi-word spill slot.
1214 next_slot += kFpuRegisterSpillFactor;
1215 fpu_reg_slots[i] = (next_slot - 1);
1216 } else {
1217 if (using_shared_stub && shared_stub_save_fpu_registers) {
1218 next_slot += kFpuRegisterSpillFactor;
1219 }
1220 fpu_reg_slots[i] = -1;
1221 }
1222 }
1223 // General purpose registers are spilled from highest to lowest register
1224 // number.
1225 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
1226 if ((kReservedCpuRegisters & (1 << i)) != 0) continue;
1227 Register reg = static_cast<Register>(i);
1228 if (regs->ContainsRegister(reg)) {
1229 cpu_reg_slots[i] = next_slot++;
1230 } else {
1231 if (using_shared_stub) next_slot++;
1232 cpu_reg_slots[i] = -1;
1233 }
1234 }
1235
1236 // 2. Iterate the environment and replace register locations with the
1237 // corresponding spill slot locations.
1238 for (Environment::DeepIterator it(slow_path_env); !it.Done(); it.Advance()) {
1239 Location loc = it.CurrentLocation();
1240 Value* value = it.CurrentValue();
1241 it.SetCurrentLocation(LocationRemapForSlowPath(
1242 loc, value->definition(), cpu_reg_slots, fpu_reg_slots));
1243 }
1244
1245 return slow_path_env;
1246}
1247
1249 ICData::DeoptReasonId reason,
1250 uint32_t flags) {
1251 if (intrinsic_mode()) {
1252 return intrinsic_slow_path_label_;
1253 }
1254
1255 // No deoptimization allowed when 'FLAG_precompiled_mode' is set.
1256 if (FLAG_precompiled_mode) {
1257 if (FLAG_trace_compiler) {
1258 THR_Print(
1259 "Retrying compilation %s, suppressing inlining of deopt_id:%" Pd "\n",
1260 parsed_function_.function().ToFullyQualifiedCString(), deopt_id);
1261 }
1262 ASSERT(speculative_policy_->AllowsSpeculativeInlining());
1263 ASSERT(deopt_id != 0); // longjmp must return non-zero value.
1265 deopt_id, Object::speculative_inlining_error());
1266 }
1267
1268 ASSERT(is_optimizing_);
1269 ASSERT(pending_deoptimization_env_ != nullptr);
1270 if (pending_deoptimization_env_->IsHoisted()) {
1272 }
1274 deopt_id, reason, flags, pending_deoptimization_env_);
1275 deopt_infos_.Add(stub);
1276 return stub->entry_label();
1277}
1278
1280 ASSERT(exception_handlers_list_ != nullptr);
1282 exception_handlers_list_->FinalizeExceptionHandlers(code.PayloadStart()));
1283 code.set_exception_handlers(handlers);
1284}
1285
1287 ASSERT(pc_descriptors_list_ != nullptr);
1288 const PcDescriptors& descriptors = PcDescriptors::Handle(
1289 pc_descriptors_list_->FinalizePcDescriptors(code.PayloadStart()));
1290 if (!is_optimizing_) descriptors.Verify(parsed_function_.function());
1291 code.set_pc_descriptors(descriptors);
1292}
1293
1295 // No deopt information if we precompile (no deoptimization allowed).
1296 if (FLAG_precompiled_mode) {
1297 return Array::empty_array().ptr();
1298 }
1299 // For functions with optional arguments, all incoming arguments are copied
1300 // to spill slots. The deoptimization environment does not track them.
1302 const intptr_t incoming_arg_count =
1304 DeoptInfoBuilder builder(zone(), incoming_arg_count, assembler);
1305
1306 intptr_t deopt_info_table_size = DeoptTable::SizeFor(deopt_infos_.length());
1307 if (deopt_info_table_size == 0) {
1308 return Object::empty_array().ptr();
1309 } else {
1310 const Array& array =
1311 Array::Handle(Array::New(deopt_info_table_size, Heap::kOld));
1312 Smi& offset = Smi::Handle();
1314 Smi& reason_and_flags = Smi::Handle();
1315 for (intptr_t i = 0; i < deopt_infos_.length(); i++) {
1316 offset = Smi::New(deopt_infos_[i]->pc_offset());
1317 info = deopt_infos_[i]->CreateDeoptInfo(this, &builder, array);
1318 reason_and_flags = DeoptTable::EncodeReasonAndFlags(
1319 deopt_infos_[i]->reason(), deopt_infos_[i]->flags());
1320 DeoptTable::SetEntry(array, i, offset, info, reason_and_flags);
1321 }
1322 return array.ptr();
1323 }
1324}
1325
1327 ASSERT(compressed_stackmaps_builder_ != nullptr);
1328 // Finalize the compressed stack maps and add it to the code object.
1329 const auto& maps =
1330 CompressedStackMaps::Handle(compressed_stackmaps_builder_->Finalize());
1331 code.set_compressed_stackmaps(maps);
1332}
1333
1335#if defined(PRODUCT)
1336// No debugger: no var descriptors.
1337#else
1338 if (code.is_optimized()) {
1339 // Optimized code does not need variable descriptors. They are
1340 // only stored in the unoptimized version.
1341 code.set_var_descriptors(Object::empty_var_descriptors());
1342 return;
1343 }
1345 if (flow_graph().IsIrregexpFunction()) {
1346 // Eager local var descriptors computation for Irregexp function as it is
1347 // complicated to factor out.
1348 // TODO(srdjan): Consider canonicalizing and reusing the local var
1349 // descriptor for IrregexpFunction.
1350 ASSERT(parsed_function().scope() == nullptr);
1351 var_descs = LocalVarDescriptors::New(1);
1354 info.scope_id = 0;
1355 info.begin_pos = TokenPosition::kMinSource;
1357 info.set_index(compiler::target::frame_layout.FrameSlotForVariable(
1358 parsed_function().current_context_var()));
1359 var_descs.SetVar(0, Symbols::CurrentContextVar(), &info);
1360 }
1361 code.set_var_descriptors(var_descs);
1362#endif
1363}
1364
1366#if defined(DART_PRECOMPILER)
1367 if (FLAG_precompiled_mode) {
1369 catch_entry_moves_maps_builder_->FinalizeCatchEntryMovesMap());
1370 code.set_catch_entry_moves_maps(maps);
1371 return;
1372 }
1373#endif
1374 code.set_num_variables(flow_graph().variable_count());
1375}
1376
1378 ASSERT(code.static_calls_target_table() == Array::null());
1379 const auto& calls = static_calls_target_table_;
1380 const intptr_t array_length = calls.length() * Code::kSCallTableEntryLength;
1381 const auto& targets =
1382 Array::Handle(zone(), Array::New(array_length, Heap::kOld));
1383
1384 StaticCallsTable entries(targets);
1385 auto& kind_type_and_offset = Smi::Handle(zone());
1386 for (intptr_t i = 0; i < calls.length(); i++) {
1387 auto entry = calls[i];
1388 kind_type_and_offset =
1389 Smi::New(Code::KindField::encode(entry->call_kind) |
1390 Code::EntryPointField::encode(entry->entry_point) |
1391 Code::OffsetField::encode(entry->offset));
1392 auto view = entries[i];
1393 view.Set<Code::kSCallTableKindAndOffset>(kind_type_and_offset);
1394 const Object* target = nullptr;
1395 if (entry->function != nullptr) {
1396 target = entry->function;
1397 view.Set<Code::kSCallTableFunctionTarget>(*entry->function);
1398 }
1399 if (entry->code != nullptr) {
1400 ASSERT(target == nullptr);
1401 target = entry->code;
1402 view.Set<Code::kSCallTableCodeOrTypeTarget>(*entry->code);
1403 }
1404 if (entry->dst_type != nullptr) {
1405 ASSERT(target == nullptr);
1406 view.Set<Code::kSCallTableCodeOrTypeTarget>(*entry->dst_type);
1407 }
1408 }
1409 code.set_static_calls_target_table(targets);
1410}
1411
1413 const Array& inlined_id_array =
1414 Array::Handle(zone(), code_source_map_builder_->InliningIdToFunction());
1415 code.set_inlined_id_to_function(inlined_id_array);
1416
1417 const CodeSourceMap& map =
1418 CodeSourceMap::Handle(code_source_map_builder_->Finalize());
1419 code.set_code_source_map(map);
1420
1421#if defined(DEBUG)
1422 // Force simulation through the last pc offset. This checks we can decode
1423 // the whole CodeSourceMap without hitting an unknown opcode, stack underflow,
1424 // etc.
1427 code.GetInlinedFunctionsAtInstruction(code.Size() - 1, &fs, &tokens);
1428#endif
1429}
1430
1431// Returns 'true' if regular code generation should be skipped.
1433 if (TryIntrinsifyHelper()) {
1434 fully_intrinsified_ = true;
1435 return true;
1436 }
1437 return false;
1438}
1439
1440bool FlowGraphCompiler::TryIntrinsifyHelper() {
1441 ASSERT(!flow_graph().IsCompiledForOsr());
1442
1443 compiler::Label exit;
1445
1447
1449 bool complete = compiler::Intrinsifier::Intrinsify(parsed_function(), this);
1451
1453
1454 // "Deoptimization" from intrinsic continues here. All deoptimization
1455 // branches from intrinsic code redirect to here where the slow-path
1456 // (normal function body) starts.
1457 // This means that there must not be any side-effects in intrinsic code
1458 // before any deoptimization point.
1461 return complete;
1462}
1463
1465 const Code& stub,
1467 LocationSummary* locs,
1468 intptr_t deopt_id,
1469 Environment* env) {
1470 ASSERT(FLAG_precompiled_mode ||
1471 (deopt_id != DeoptId::kNone && (!is_optimizing() || env != nullptr)));
1472 EmitCallToStub(stub);
1473 EmitCallsiteMetadata(source, deopt_id, kind, locs, env);
1474}
1475
1478 const Code& stub,
1480 LocationSummary* locs,
1481 ObjectPool::SnapshotBehavior snapshot_behavior) {
1482 EmitCallToStub(stub, snapshot_behavior);
1483 EmitCallsiteMetadata(source, DeoptId::kNone, kind, locs, /*env=*/nullptr);
1484}
1485
1486static const Code& StubEntryFor(const ICData& ic_data, bool optimized) {
1487 switch (ic_data.NumArgsTested()) {
1488 case 1:
1489 if (ic_data.is_tracking_exactness()) {
1490 if (optimized) {
1491 return StubCode::OneArgOptimizedCheckInlineCacheWithExactnessCheck();
1492 } else {
1493 return StubCode::OneArgCheckInlineCacheWithExactnessCheck();
1494 }
1495 }
1496 return optimized ? StubCode::OneArgOptimizedCheckInlineCache()
1497 : StubCode::OneArgCheckInlineCache();
1498 case 2:
1499 ASSERT(!ic_data.is_tracking_exactness());
1500 return optimized ? StubCode::TwoArgsOptimizedCheckInlineCache()
1501 : StubCode::TwoArgsCheckInlineCache();
1502 default:
1503 ic_data.Print();
1504 UNIMPLEMENTED();
1505 return Code::Handle();
1506 }
1507}
1508
1511 LocationSummary* locs,
1512 const ICData& ic_data_in,
1513 Code::EntryKind entry_kind,
1514 bool receiver_can_be_smi) {
1515 ICData& ic_data = ICData::ZoneHandle(ic_data_in.Original());
1516 if (FLAG_precompiled_mode) {
1517 ic_data = ic_data.AsUnaryClassChecks();
1518 EmitInstanceCallAOT(ic_data, deopt_id, source, locs, entry_kind,
1519 receiver_can_be_smi);
1520 return;
1521 }
1522 ASSERT(!ic_data.IsNull());
1523 if (is_optimizing() && (ic_data_in.NumberOfUsedChecks() == 0)) {
1524 // Emit IC call that will count and thus may need reoptimization at
1525 // function entry.
1526 ASSERT(may_reoptimize() || flow_graph().IsCompiledForOsr());
1527 EmitOptimizedInstanceCall(StubEntryFor(ic_data, /*optimized=*/true),
1528 ic_data, deopt_id, source, locs, entry_kind);
1529 return;
1530 }
1531
1532 if (is_optimizing()) {
1533 EmitMegamorphicInstanceCall(ic_data_in, deopt_id, source, locs);
1534 return;
1535 }
1536
1537 EmitInstanceCallJIT(StubEntryFor(ic_data, /*optimized=*/false), ic_data,
1538 deopt_id, source, locs, entry_kind);
1539}
1540
1543 const Function& function,
1544 ArgumentsInfo args_info,
1545 LocationSummary* locs,
1546 const ICData& ic_data_in,
1547 ICData::RebindRule rebind_rule,
1548 Code::EntryKind entry_kind) {
1549 const ICData& ic_data = ICData::ZoneHandle(ic_data_in.Original());
1550 const Array& arguments_descriptor = Array::ZoneHandle(
1551 zone(), ic_data.IsNull() ? args_info.ToArgumentsDescriptor()
1552 : ic_data.arguments_descriptor());
1553 ASSERT(ArgumentsDescriptor(arguments_descriptor).TypeArgsLen() ==
1554 args_info.type_args_len);
1555 ASSERT(ArgumentsDescriptor(arguments_descriptor).Count() ==
1556 args_info.count_without_type_args);
1557 ASSERT(ArgumentsDescriptor(arguments_descriptor).Size() ==
1558 args_info.size_without_type_args);
1559 // Force-optimized functions lack the deopt info which allows patching of
1560 // optimized static calls.
1561 if (is_optimizing() && (!ForcedOptimization() || FLAG_precompiled_mode)) {
1562 EmitOptimizedStaticCall(function, arguments_descriptor,
1563 args_info.size_with_type_args, deopt_id, source,
1564 locs, entry_kind);
1565 } else {
1566 ICData& call_ic_data = ICData::ZoneHandle(zone(), ic_data.ptr());
1567 if (call_ic_data.IsNull()) {
1568 const intptr_t kNumArgsChecked = 0;
1569 call_ic_data =
1570 GetOrAddStaticCallICData(deopt_id, function, arguments_descriptor,
1571 kNumArgsChecked, rebind_rule)
1572 ->ptr();
1573 call_ic_data = call_ic_data.Original();
1574 }
1575 AddCurrentDescriptor(UntaggedPcDescriptors::kRewind, deopt_id, source);
1576 EmitUnoptimizedStaticCall(args_info.size_with_type_args, deopt_id, source,
1577 locs, call_ic_data, entry_kind);
1578 }
1579}
1580
1582 Register class_id_reg,
1583 const AbstractType& type,
1584 compiler::Label* is_instance_lbl,
1585 compiler::Label* is_not_instance_lbl) {
1586 assembler()->Comment("NumberTypeCheck");
1588 if (type.IsNumberType()) {
1589 args.Add(kDoubleCid);
1590 args.Add(kMintCid);
1591 } else if (type.IsIntType()) {
1592 args.Add(kMintCid);
1593 } else if (type.IsDoubleType()) {
1594 args.Add(kDoubleCid);
1595 }
1596 CheckClassIds(class_id_reg, args, is_instance_lbl, is_not_instance_lbl);
1597}
1598
1600 Register class_id_reg,
1601 compiler::Label* is_instance_lbl,
1602 compiler::Label* is_not_instance_lbl) {
1603 assembler()->Comment("StringTypeCheck");
1605 args.Add(kOneByteStringCid);
1606 args.Add(kTwoByteStringCid);
1607 CheckClassIds(class_id_reg, args, is_instance_lbl, is_not_instance_lbl);
1608}
1609
1611 Register class_id_reg,
1612 compiler::Label* is_instance_lbl) {
1613 assembler()->Comment("ListTypeCheck");
1614 COMPILE_ASSERT((kImmutableArrayCid == kArrayCid + 1) &&
1615 (kGrowableObjectArrayCid == kArrayCid + 2));
1616 CidRangeVector ranges;
1617 ranges.Add({kArrayCid, kGrowableObjectArrayCid});
1618 GenerateCidRangesCheck(assembler(), class_id_reg, ranges, is_instance_lbl);
1619}
1620
1622#if defined(INCLUDE_IL_PRINTER)
1623 char buffer[256];
1624 BufferFormatter f(buffer, sizeof(buffer));
1625 instr->PrintTo(&f);
1626 assembler()->Comment("%s", buffer);
1627#endif // defined(INCLUDE_IL_PRINTER)
1628}
1629
1631 // Only emit an edge counter if there is not goto at the end of the block,
1632 // except for the entry block.
1633 return FLAG_reorder_basic_blocks &&
1634 (!block->last_instruction()->IsGoto() || block->IsFunctionEntry());
1635}
1636
1637// Allocate a register that is not explicitly blocked.
1638static Register AllocateFreeRegister(bool* blocked_registers) {
1639 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
1640 intptr_t regno = (i + kRegisterAllocationBias) % kNumberOfCpuRegisters;
1641 if (!blocked_registers[regno]) {
1642 blocked_registers[regno] = true;
1643 return static_cast<Register>(regno);
1644 }
1645 }
1646 UNREACHABLE();
1647 return kNoRegister;
1648}
1649
1650// Allocate a FPU register that is not explicitly blocked.
1651static FpuRegister AllocateFreeFpuRegister(bool* blocked_registers) {
1652 for (intptr_t regno = 0; regno < kNumberOfFpuRegisters; regno++) {
1653 if (!blocked_registers[regno]) {
1654 blocked_registers[regno] = true;
1655 return static_cast<FpuRegister>(regno);
1656 }
1657 }
1658 UNREACHABLE();
1659 return kNoFpuRegister;
1660}
1661
1662void FlowGraphCompiler::AllocateRegistersLocally(Instruction* instr) {
1664 instr->InitializeLocationSummary(zone(), false); // Not optimizing.
1665
1666 LocationSummary* locs = instr->locs();
1667
1668 bool blocked_registers[kNumberOfCpuRegisters];
1669 bool blocked_fpu_registers[kNumberOfFpuRegisters];
1670
1671 // Block all registers globally reserved by the assembler, etc and mark
1672 // the rest as free.
1673 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
1674 blocked_registers[i] = (kDartAvailableCpuRegs & (1 << i)) == 0;
1675 }
1676 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
1677 blocked_fpu_registers[i] = false;
1678 }
1679
1680 // Mark all fixed input, temp and output registers as used.
1681 for (intptr_t i = 0; i < locs->input_count(); i++) {
1682 Location loc = locs->in(i);
1683 if (loc.IsRegister()) {
1684 // Check that a register is not specified twice in the summary.
1685 ASSERT(!blocked_registers[loc.reg()]);
1686 blocked_registers[loc.reg()] = true;
1687 } else if (loc.IsFpuRegister()) {
1688 // Check that a register is not specified twice in the summary.
1689 const FpuRegister fpu_reg = loc.fpu_reg();
1690 ASSERT((fpu_reg >= 0) && (fpu_reg < kNumberOfFpuRegisters));
1691 ASSERT(!blocked_fpu_registers[fpu_reg]);
1692 blocked_fpu_registers[fpu_reg] = true;
1693 }
1694 }
1695
1696 for (intptr_t i = 0; i < locs->temp_count(); i++) {
1697 Location loc = locs->temp(i);
1698 if (loc.IsRegister()) {
1699 // Check that a register is not specified twice in the summary.
1700 ASSERT(!blocked_registers[loc.reg()]);
1701 blocked_registers[loc.reg()] = true;
1702 } else if (loc.IsFpuRegister()) {
1703 // Check that a register is not specified twice in the summary.
1704 const FpuRegister fpu_reg = loc.fpu_reg();
1705 ASSERT((fpu_reg >= 0) && (fpu_reg < kNumberOfFpuRegisters));
1706 ASSERT(!blocked_fpu_registers[fpu_reg]);
1707 blocked_fpu_registers[fpu_reg] = true;
1708 }
1709 }
1710
1711 // Connect input with peephole output for some special cases. All other
1712 // cases are handled by simply allocating registers and generating code.
1713 if (top_of_stack_ != nullptr) {
1714 const intptr_t p = locs->input_count() - 1;
1715 Location peephole = top_of_stack_->locs()->out(0);
1716 if ((instr->RequiredInputRepresentation(p) == kTagged) &&
1717 (locs->in(p).IsUnallocated() || locs->in(p).IsConstant())) {
1718 // If input is unallocated, match with an output register, if set. Also,
1719 // if input is a direct constant, but the peephole output is a register,
1720 // use that register to avoid wasting the already generated code.
1721 if (peephole.IsRegister() && !blocked_registers[peephole.reg()]) {
1722 locs->set_in(p, Location::RegisterLocation(peephole.reg()));
1723 blocked_registers[peephole.reg()] = true;
1724 }
1725 }
1726 }
1727
1728 if (locs->out(0).IsRegister()) {
1729 // Fixed output registers are allowed to overlap with
1730 // temps and inputs.
1731 blocked_registers[locs->out(0).reg()] = true;
1732 } else if (locs->out(0).IsFpuRegister()) {
1733 // Fixed output registers are allowed to overlap with
1734 // temps and inputs.
1735 blocked_fpu_registers[locs->out(0).fpu_reg()] = true;
1736 }
1737
1738 // Allocate all unallocated input locations.
1739 ASSERT(!instr->IsMoveArgument());
1740 Register fpu_unboxing_temp = kNoRegister;
1741 for (intptr_t i = locs->input_count() - 1; i >= 0; i--) {
1742 Location loc = locs->in(i);
1743 Register reg = kNoRegister;
1744 FpuRegister fpu_reg = kNoFpuRegister;
1745 if (loc.IsRegister()) {
1746 reg = loc.reg();
1747 } else if (loc.IsFpuRegister()) {
1748 fpu_reg = loc.fpu_reg();
1749 } else if (loc.IsUnallocated()) {
1750 switch (loc.policy()) {
1754 case Location::kAny:
1755 reg = AllocateFreeRegister(blocked_registers);
1756 locs->set_in(i, Location::RegisterLocation(reg));
1757 break;
1759 fpu_reg = AllocateFreeFpuRegister(blocked_fpu_registers);
1760 locs->set_in(i, Location::FpuRegisterLocation(fpu_reg));
1761 break;
1762 default:
1763 UNREACHABLE();
1764 }
1765 }
1766
1767 if (fpu_reg != kNoFpuRegister) {
1768 ASSERT(reg == kNoRegister);
1769 // Allocate temporary CPU register for unboxing, but only once.
1770 if (fpu_unboxing_temp == kNoRegister) {
1771 fpu_unboxing_temp = AllocateFreeRegister(blocked_registers);
1772 }
1773 reg = fpu_unboxing_temp;
1774 }
1775
1776 ASSERT(reg != kNoRegister || loc.IsConstant());
1777
1778 // Inputs are consumed from the simulated frame (or a peephole push/pop).
1779 // In case of a call argument we leave it until the call instruction.
1780 if (top_of_stack_ != nullptr) {
1781 if (!loc.IsConstant()) {
1782 // Moves top of stack location of the peephole into the required
1783 // input. None of the required moves needs a temp register allocator.
1784 EmitMove(Location::RegisterLocation(reg), top_of_stack_->locs()->out(0),
1785 nullptr);
1786 }
1787 top_of_stack_ = nullptr; // consumed!
1788 } else if (loc.IsConstant()) {
1789 assembler()->Drop(1);
1790 } else {
1791 assembler()->PopRegister(reg);
1792 }
1793 if (!loc.IsConstant()) {
1794 switch (instr->RequiredInputRepresentation(i)) {
1795 case kUnboxedDouble:
1796 ASSERT(fpu_reg != kNoFpuRegister);
1797 ASSERT(instr->SpeculativeModeOfInput(i) ==
1800 fpu_reg, reg,
1801 compiler::target::Double::value_offset() - kHeapObjectTag);
1802 break;
1803 case kUnboxedFloat32x4:
1804 case kUnboxedFloat64x2:
1805 ASSERT(fpu_reg != kNoFpuRegister);
1806 ASSERT(instr->SpeculativeModeOfInput(i) ==
1809 fpu_reg, reg,
1810 compiler::target::Float32x4::value_offset() - kHeapObjectTag);
1811 break;
1812 default:
1813 // No automatic unboxing for other representations.
1814 ASSERT(fpu_reg == kNoFpuRegister);
1815 break;
1816 }
1817 }
1818 }
1819
1820 // Allocate all unallocated temp locations.
1821 for (intptr_t i = 0; i < locs->temp_count(); i++) {
1822 Location loc = locs->temp(i);
1823 if (loc.IsUnallocated()) {
1824 switch (loc.policy()) {
1827 AllocateFreeRegister(blocked_registers));
1828 locs->set_temp(i, loc);
1829 break;
1832 AllocateFreeFpuRegister(blocked_fpu_registers));
1833 locs->set_temp(i, loc);
1834 break;
1835 default:
1836 UNREACHABLE();
1837 }
1838 }
1839 }
1840
1841 Location result_location = locs->out(0);
1842 if (result_location.IsUnallocated()) {
1843 switch (result_location.policy()) {
1844 case Location::kAny:
1848 result_location =
1850 break;
1852 result_location = locs->in(0);
1853 break;
1855 result_location = Location::FpuRegisterLocation(
1856 AllocateFreeFpuRegister(blocked_fpu_registers));
1857 break;
1859 // Only available in optimized mode.
1861 UNREACHABLE();
1862 }
1863 locs->set_out(0, result_location);
1864 }
1865}
1866
1868 intptr_t deopt_id,
1869 const String& target_name,
1870 const Array& arguments_descriptor,
1871 intptr_t num_args_tested,
1872 const AbstractType& receiver_type,
1873 const Function& binary_smi_target) {
1874 if ((deopt_id_to_ic_data_ != nullptr) &&
1875 ((*deopt_id_to_ic_data_)[deopt_id] != nullptr)) {
1876 const ICData* res = (*deopt_id_to_ic_data_)[deopt_id];
1877 ASSERT(res->deopt_id() == deopt_id);
1878 ASSERT(res->target_name() == target_name.ptr());
1879 ASSERT(res->NumArgsTested() == num_args_tested);
1880 ASSERT(res->TypeArgsLen() ==
1881 ArgumentsDescriptor(arguments_descriptor).TypeArgsLen());
1882 ASSERT(!res->is_static_call());
1883 ASSERT(res->receivers_static_type() == receiver_type.ptr());
1884 return res;
1885 }
1886
1887 auto& ic_data = ICData::ZoneHandle(zone());
1888 if (!binary_smi_target.IsNull()) {
1889 ASSERT(num_args_tested == 2);
1890 ASSERT(!binary_smi_target.IsNull());
1891 GrowableArray<intptr_t> cids(num_args_tested);
1892 cids.Add(kSmiCid);
1893 cids.Add(kSmiCid);
1894 ic_data = ICData::NewWithCheck(parsed_function().function(), target_name,
1895 arguments_descriptor, deopt_id,
1896 num_args_tested, ICData::kInstance, &cids,
1897 binary_smi_target, receiver_type);
1898 } else {
1899 ic_data = ICData::New(parsed_function().function(), target_name,
1900 arguments_descriptor, deopt_id, num_args_tested,
1901 ICData::kInstance, receiver_type);
1902 }
1903
1904 if (deopt_id_to_ic_data_ != nullptr) {
1905 (*deopt_id_to_ic_data_)[deopt_id] = &ic_data;
1906 }
1907 ASSERT(!ic_data.is_static_call());
1908 return &ic_data;
1909}
1910
1912 intptr_t deopt_id,
1913 const Function& target,
1914 const Array& arguments_descriptor,
1915 intptr_t num_args_tested,
1916 ICData::RebindRule rebind_rule) {
1917 if ((deopt_id_to_ic_data_ != nullptr) &&
1918 ((*deopt_id_to_ic_data_)[deopt_id] != nullptr)) {
1919 const ICData* res = (*deopt_id_to_ic_data_)[deopt_id];
1920 ASSERT(res->deopt_id() == deopt_id);
1921 ASSERT(res->target_name() == target.name());
1922 ASSERT(res->NumArgsTested() == num_args_tested);
1923 ASSERT(res->TypeArgsLen() ==
1924 ArgumentsDescriptor(arguments_descriptor).TypeArgsLen());
1925 ASSERT(res->is_static_call());
1926 return res;
1927 }
1928
1929 const auto& ic_data = ICData::ZoneHandle(
1931 arguments_descriptor, deopt_id,
1932 num_args_tested, rebind_rule));
1933 if (deopt_id_to_ic_data_ != nullptr) {
1934 (*deopt_id_to_ic_data_)[deopt_id] = &ic_data;
1935 }
1936 return &ic_data;
1937}
1938
1939intptr_t FlowGraphCompiler::GetOptimizationThreshold() const {
1940 intptr_t threshold;
1941 if (is_optimizing()) {
1942 threshold = FLAG_reoptimization_counter_threshold;
1943 } else if (parsed_function_.function().IsIrregexpFunction()) {
1944 threshold = FLAG_regexp_optimization_counter_threshold;
1945 } else {
1946 const auto configured_optimization_counter_threshold =
1948
1949 const intptr_t basic_blocks = flow_graph().preorder().length();
1950 ASSERT(basic_blocks > 0);
1951 threshold = FLAG_optimization_counter_scale * basic_blocks +
1952 FLAG_min_optimization_counter_threshold;
1953 if (threshold > configured_optimization_counter_threshold) {
1954 threshold = configured_optimization_counter_threshold;
1955 }
1956 }
1957
1958 // Threshold = 0 doesn't make sense because we increment the counter before
1959 // testing against the threshold. Perhaps we could interpret it to mean
1960 // "generate optimized code immediately without unoptimized compilation
1961 // first", but this isn't supported in our pipeline because there would be no
1962 // code for the optimized code to deoptimize into.
1963 if (threshold == 0) threshold = 1;
1964
1965 // See Compiler::CanOptimizeFunction. In short, we have to allow the
1966 // unoptimized code to run at least once to prevent an infinite compilation
1967 // loop.
1968 if (threshold == 1 && parsed_function().function().HasBreakpoint()) {
1969 threshold = 2;
1970 }
1971
1972 return threshold;
1973}
1974
1976 switch (rep) {
1977 case kUnboxedFloat:
1978 case kUnboxedDouble:
1979 return double_class();
1980 case kUnboxedFloat32x4:
1981 return float32x4_class();
1982 case kUnboxedFloat64x2:
1983 return float64x2_class();
1984 case kUnboxedInt32x4:
1985 return int32x4_class();
1986 case kUnboxedInt64:
1987 return mint_class();
1988 default:
1989 UNREACHABLE();
1990 return Class::ZoneHandle();
1991 }
1992}
1993
1995 code_source_map_builder_->BeginCodeSourceRange(assembler()->CodeSize(),
1996 source);
1997}
1998
2000 code_source_map_builder_->EndCodeSourceRange(assembler()->CodeSize(), source);
2001}
2002
2004 intptr_t cid,
2005 const String& selector,
2006 const Array& args_desc_array) {
2007 Zone* zone = Thread::Current()->zone();
2008
2009 ArgumentsDescriptor args_desc(args_desc_array);
2010
2012 if (!LookupMethodFor(cid, selector, args_desc, &fn)) return nullptr;
2013
2014 CallTargets* targets = new (zone) CallTargets(zone);
2015 targets->Add(new (zone) TargetInfo(cid, cid, &fn, /* count = */ 1,
2017
2018 return targets;
2019}
2020
2022 const String& name,
2023 const ArgumentsDescriptor& args_desc,
2024 Function* fn_return,
2025 bool* class_is_abstract_return) {
2026 auto thread = Thread::Current();
2027 auto zone = thread->zone();
2028 auto class_table = thread->isolate_group()->class_table();
2029 if (class_id < 0) return false;
2030 if (class_id >= class_table->NumCids()) return false;
2031
2032 ClassPtr raw_class = class_table->At(class_id);
2033 if (raw_class == nullptr) return false;
2034 Class& cls = Class::Handle(zone, raw_class);
2035 if (cls.IsNull()) return false;
2036 if (!cls.is_finalized()) return false;
2037 if (Array::Handle(cls.current_functions()).IsNull()) return false;
2038
2039 if (class_is_abstract_return != nullptr) {
2040 *class_is_abstract_return = cls.is_abstract();
2041 }
2042 const bool allow_add = false;
2043 Function& target_function =
2045 cls, name, args_desc, allow_add));
2046 if (target_function.IsNull()) return false;
2047 *fn_return = target_function.ptr();
2048 return true;
2049}
2050
2052 const PolymorphicInstanceCallInstr* call,
2053 const CallTargets& targets,
2054 ArgumentsInfo args_info,
2055 intptr_t deopt_id,
2057 LocationSummary* locs,
2058 bool complete,
2059 intptr_t total_ic_calls,
2060 bool receiver_can_be_smi) {
2061 ASSERT(call != nullptr);
2062 if (FLAG_polymorphic_with_deopt) {
2063 compiler::Label* deopt =
2064 AddDeoptStub(deopt_id, ICData::kDeoptPolymorphicInstanceCallTestFail);
2066 EmitTestAndCall(targets, call->function_name(), args_info,
2067 deopt, // No cid match.
2068 &ok, // Found cid.
2069 deopt_id, source, locs, complete, total_ic_calls,
2070 call->entry_kind());
2071 assembler()->Bind(&ok);
2072 } else {
2073 if (complete) {
2075 EmitTestAndCall(targets, call->function_name(), args_info,
2076 nullptr, // No cid match.
2077 &ok, // Found cid.
2078 deopt_id, source, locs, true, total_ic_calls,
2079 call->entry_kind());
2080 assembler()->Bind(&ok);
2081 } else {
2082 const ICData& unary_checks =
2083 ICData::ZoneHandle(zone(), call->ic_data()->AsUnaryClassChecks());
2084 EmitInstanceCallAOT(unary_checks, deopt_id, source, locs,
2085 call->entry_kind(), receiver_can_be_smi);
2086 }
2087 }
2088}
2089
2090#define __ assembler()->
2091
2093 if (!is_optimizing()) {
2094 __ Drop(count);
2095 }
2096}
2097
2098void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
2099 const GrowableArray<intptr_t>& class_ids,
2100 compiler::Label* is_equal_lbl,
2101 compiler::Label* is_not_equal_lbl) {
2102 for (const auto& id : class_ids) {
2103 __ CompareImmediate(class_id_reg, id);
2104 __ BranchIf(EQUAL, is_equal_lbl);
2105 }
2106 __ Jump(is_not_equal_lbl);
2107}
2108
2110 const String& function_name,
2111 ArgumentsInfo args_info,
2112 compiler::Label* failed,
2113 compiler::Label* match_found,
2114 intptr_t deopt_id,
2115 const InstructionSource& source_index,
2116 LocationSummary* locs,
2117 bool complete,
2118 intptr_t total_ic_calls,
2119 Code::EntryKind entry_kind) {
2121 ASSERT(complete || (failed != nullptr)); // Complete calls can't fail.
2122
2123 const Array& arguments_descriptor =
2125 EmitTestAndCallLoadReceiver(args_info.count_without_type_args,
2126 arguments_descriptor);
2127
2128 const int kNoCase = -1;
2129 int smi_case = kNoCase;
2130 int which_case_to_skip = kNoCase;
2131
2132 const int length = targets.length();
2133 ASSERT(length > 0);
2134 int non_smi_length = length;
2135
2136 // Find out if one of the classes in one of the cases is the Smi class. We
2137 // will be handling that specially.
2138 for (int i = 0; i < length; i++) {
2139 const intptr_t start = targets[i].cid_start;
2140 if (start > kSmiCid) continue;
2141 const intptr_t end = targets[i].cid_end;
2142 if (end >= kSmiCid) {
2143 smi_case = i;
2144 if (start == kSmiCid && end == kSmiCid) {
2145 // If this case has only the Smi class then we won't need to emit it at
2146 // all later.
2147 which_case_to_skip = i;
2148 non_smi_length--;
2149 }
2150 break;
2151 }
2152 }
2153
2154 if (smi_case != kNoCase) {
2155 compiler::Label after_smi_test;
2156 // If the call is complete and there are no other possible receiver
2157 // classes - then receiver can only be a smi value and we don't need
2158 // to check if it is a smi.
2159 if (!(complete && non_smi_length == 0)) {
2160 EmitTestAndCallSmiBranch(non_smi_length == 0 ? failed : &after_smi_test,
2161 /* jump_if_smi= */ false);
2162 }
2163
2164 // Do not use the code from the function, but let the code be patched so
2165 // that we can record the outgoing edges to other code.
2166 const Function& function = *targets.TargetAt(smi_case)->target;
2167 GenerateStaticDartCall(deopt_id, source_index,
2168 UntaggedPcDescriptors::kOther, locs, function,
2169 entry_kind);
2171 if (match_found != nullptr) {
2172 __ Jump(match_found);
2173 }
2174 __ Bind(&after_smi_test);
2175 } else {
2176 if (!complete) {
2177 // Smi is not a valid class.
2178 EmitTestAndCallSmiBranch(failed, /* jump_if_smi = */ true);
2179 }
2180 }
2181
2182 if (non_smi_length == 0) {
2183 // If non_smi_length is 0 then only a Smi check was needed; the Smi check
2184 // above will fail if there was only one check and receiver is not Smi.
2185 return;
2186 }
2187
2188 bool add_megamorphic_call = false;
2189 int bias = 0;
2190
2191 // Value is not Smi.
2192 EmitTestAndCallLoadCid(EmitTestCidRegister());
2193
2194 int last_check = which_case_to_skip == length - 1 ? length - 2 : length - 1;
2195
2196 for (intptr_t i = 0; i < length; i++) {
2197 if (i == which_case_to_skip) continue;
2198 const bool is_last_check = (i == last_check);
2199 const int count = targets.TargetAt(i)->count;
2200 if (!is_last_check && !complete && count < (total_ic_calls >> 5)) {
2201 // This case is hit too rarely to be worth writing class-id checks inline
2202 // for. Note that we can't do this for calls with only one target because
2203 // the type propagator may have made use of that and expects a deopt if
2204 // a new class is seen at this calls site. See IsMonomorphic.
2205 add_megamorphic_call = true;
2206 break;
2207 }
2208 compiler::Label next_test;
2209 if (!complete || !is_last_check) {
2211 is_last_check ? failed : &next_test,
2212 EmitTestCidRegister(), targets[i], bias,
2213 /*jump_on_miss =*/true);
2214 }
2215 // Do not use the code from the function, but let the code be patched so
2216 // that we can record the outgoing edges to other code.
2217 const Function& function = *targets.TargetAt(i)->target;
2218 GenerateStaticDartCall(deopt_id, source_index,
2219 UntaggedPcDescriptors::kOther, locs, function,
2220 entry_kind);
2222 if (!is_last_check || add_megamorphic_call) {
2223 __ Jump(match_found);
2224 }
2225 __ Bind(&next_test);
2226 }
2227 if (add_megamorphic_call) {
2228 EmitMegamorphicInstanceCall(function_name, arguments_descriptor, deopt_id,
2229 source_index, locs);
2230 }
2231}
2232
2234 const Class& type_class,
2235 compiler::Label* is_subtype) {
2237 if (hi != nullptr) {
2238 const CidRangeVector& ranges =
2239 hi->SubtypeRangesForClass(type_class,
2240 /*include_abstract=*/false,
2241 /*exclude_null=*/false);
2242 if (ranges.length() <= kMaxNumberOfCidRangesToTest) {
2243 GenerateCidRangesCheck(assembler(), class_id_reg, ranges, is_subtype);
2244 return true;
2245 }
2246 }
2247
2248 // We don't have cid-ranges for subclasses, so we'll just test against the
2249 // class directly if it's non-abstract.
2250 if (!type_class.is_abstract()) {
2251 __ CompareImmediate(class_id_reg, type_class.id());
2252 __ BranchIf(EQUAL, is_subtype);
2253 }
2254 return false;
2255}
2256
2258 compiler::Assembler* assembler,
2259 Register class_id_reg,
2260 const CidRangeVector& cid_ranges,
2261 compiler::Label* inside_range_lbl,
2262 compiler::Label* outside_range_lbl,
2263 bool fall_through_if_inside) {
2264 // If there are no valid class ranges, the check will fail. If we are
2265 // supposed to fall-through in the positive case, we'll explicitly jump to
2266 // the [outside_range_lbl].
2267 if (cid_ranges.is_empty()) {
2268 if (fall_through_if_inside) {
2269 assembler->Jump(outside_range_lbl);
2270 }
2271 return false;
2272 }
2273
2274 int bias = 0;
2275 for (intptr_t i = 0; i < cid_ranges.length(); ++i) {
2276 const CidRangeValue& range = cid_ranges[i];
2278 const bool last_round = i == (cid_ranges.length() - 1);
2279
2280 compiler::Label* jump_label = last_round && fall_through_if_inside
2281 ? outside_range_lbl
2282 : inside_range_lbl;
2283 const bool jump_on_miss = last_round && fall_through_if_inside;
2284
2285 bias = EmitTestAndCallCheckCid(assembler, jump_label, class_id_reg, range,
2286 bias, jump_on_miss);
2287 }
2288 return bias != 0;
2289}
2290
2292 compiler::Label* label,
2293 Register class_id_reg,
2294 const CidRangeValue& range,
2295 int bias,
2296 bool jump_on_miss) {
2297 const intptr_t cid_start = range.cid_start;
2298 if (range.IsSingleCid()) {
2299 assembler->CompareImmediate(class_id_reg, cid_start - bias);
2300 assembler->BranchIf(jump_on_miss ? NOT_EQUAL : EQUAL, label);
2301 } else {
2302 assembler->AddImmediate(class_id_reg, bias - cid_start);
2303 bias = cid_start;
2304 assembler->CompareImmediate(class_id_reg, range.Extent());
2306 label);
2307 }
2308 return bias;
2309}
2310
2333
2334// Generates function type check.
2335//
2336// See [GenerateInlineInstanceof] for calling convention.
2337SubtypeTestCachePtr FlowGraphCompiler::GenerateFunctionTypeTest(
2339 const AbstractType& type,
2340 compiler::Label* is_instance_lbl,
2341 compiler::Label* is_not_instance_lbl) {
2342 __ Comment("FunctionTypeTest");
2343
2344 __ BranchIfSmi(TypeTestABI::kInstanceReg, is_not_instance_lbl);
2345 // Uninstantiated type class is known at compile time, but the type
2346 // arguments are determined at runtime by the instantiator(s).
2347 return GenerateCallSubtypeTestStub(TypeTestStubKind::kTestTypeSixArgs,
2348 is_instance_lbl, is_not_instance_lbl);
2349}
2350
2351// Inputs (from TypeTestABI):
2352// - kInstanceReg : instance to test against.
2353// - kInstantiatorTypeArgumentsReg : instantiator type arguments (if needed).
2354// - kFunctionTypeArgumentsReg : function type arguments (if needed).
2355//
2356// Preserves all input registers.
2357//
2358// Clobbers kDstTypeReg, kSubtypeTestCacheReg and kSubtypeTestCacheResultReg at
2359// a minimum, may clobber additional registers depending on architecture. See
2360// GenerateSubtypeNTestCacheStub for architecture-specific registers that should
2361// be saved across a subtype test cache stub call.
2362//
2363// Note that this inlined code must be followed by the runtime_call code, as it
2364// may fall through to it. Otherwise, this inline code will jump to the label
2365// is_instance or to the label is_not_instance.
2366SubtypeTestCachePtr FlowGraphCompiler::GenerateInlineInstanceof(
2367 const InstructionSource& source,
2368 const AbstractType& type,
2369 compiler::Label* is_instance_lbl,
2370 compiler::Label* is_not_instance_lbl) {
2371 ASSERT(!type.IsTopTypeForInstanceOf());
2372 __ Comment("InlineInstanceof");
2373 if (type.IsObjectType()) { // Must be non-nullable.
2374 __ CompareObject(TypeTestABI::kInstanceReg, Object::null_object());
2375 // All non-null objects are instances of non-nullable Object.
2376 __ BranchIf(NOT_EQUAL, is_instance_lbl);
2377 __ Jump(is_not_instance_lbl);
2378 return SubtypeTestCache::null(); // No need for an STC.
2379 }
2380 if (type.IsFunctionType()) {
2381 return GenerateFunctionTypeTest(source, type, is_instance_lbl,
2382 is_not_instance_lbl);
2383 }
2384 if (type.IsRecordType()) {
2385 // Subtype test cache stubs are not useful for record types and the results
2386 // of subtype checks are never recorded in the cache.
2387 // Fall through to runtime.
2389 }
2390
2391 if (type.IsInstantiated()) {
2392 const Class& type_class = Class::ZoneHandle(zone(), type.type_class());
2393 // A class equality check is only applicable with a dst type (not a
2394 // function type) of a non-parameterized class or with a raw dst type of
2395 // a parameterized class.
2396 if (type_class.NumTypeArguments() > 0) {
2397 return GenerateInstantiatedTypeWithArgumentsTest(
2398 source, type, is_instance_lbl, is_not_instance_lbl);
2399 // Fall through to runtime call.
2400 }
2401 const bool has_fall_through = GenerateInstantiatedTypeNoArgumentsTest(
2402 source, type, is_instance_lbl, is_not_instance_lbl);
2403 if (has_fall_through) {
2404 // If test non-conclusive so far, try the inlined type-test cache.
2405 // 'type' is known at compile time.
2406 return GenerateSubtype1TestCacheLookup(
2407 source, type_class, is_instance_lbl, is_not_instance_lbl);
2408 } else {
2409 return SubtypeTestCache::null();
2410 }
2411 }
2412 return GenerateUninstantiatedTypeTest(source, type, is_instance_lbl,
2413 is_not_instance_lbl);
2414}
2415
2416FlowGraphCompiler::TypeTestStubKind
2417FlowGraphCompiler::GetTypeTestStubKindForTypeParameter(
2418 const TypeParameter& type_param) {
2419 // If it's guaranteed, by type-parameter bound, that the type parameter will
2420 // never have a value of a function type, then we can safely do a 4-type
2421 // test instead of a 6-type test.
2422 AbstractType& bound = AbstractType::Handle(zone(), type_param.bound());
2423 bound = bound.UnwrapFutureOr();
2424 return !bound.IsTopTypeForSubtyping() && !bound.IsObjectType() &&
2425 !bound.IsDartFunctionType() && bound.IsType()
2426 ? TypeTestStubKind::kTestTypeFourArgs
2427 : TypeTestStubKind::kTestTypeSixArgs;
2428}
2429
2430// Generates quick and subtype cache tests when only the instance need be
2431// checked. Jumps to 'is_instance' or 'is_not_instance' respectively, if any
2432// generated check is conclusive, otherwise falls through if further checking is
2433// required.
2434//
2435// See [GenerateInlineInstanceof] for calling convention.
2436SubtypeTestCachePtr FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
2437 const InstructionSource& source,
2438 const Class& type_class,
2439 compiler::Label* is_instance_lbl,
2440 compiler::Label* is_not_instance_lbl) {
2441 // If the type being tested is non-nullable Object, we are in NNBD strong
2442 // mode, since top types do not reach here. In this case, testing the
2443 // superclass of a null instance yields a wrong result (as the Null class
2444 // extends Object).
2445 ASSERT(!type_class.IsObjectClass());
2446 __ Comment("Subtype1TestCacheLookup");
2447#if defined(DEBUG)
2448 compiler::Label ok;
2449 __ BranchIfNotSmi(TypeTestABI::kInstanceReg, &ok);
2450 __ Breakpoint();
2451 __ Bind(&ok);
2452#endif
2453 // We don't use TypeTestABI::kScratchReg for the first scratch register as
2454 // it is not defined on IA32. Instead, we use the subtype test cache
2455 // register, as it is clobbered by the subtype test cache stub call anyway.
2456 const Register kScratch1Reg = TypeTestABI::kSubtypeTestCacheReg;
2457#if defined(TARGET_ARCH_IA32)
2458 // We don't use TypeTestABI::kScratchReg as it is not defined on IA32.
2459 // Instead, we pick another TypeTestABI register and push/pop it around
2460 // the uses of the second scratch register.
2461 const Register kScratch2Reg = TypeTestABI::kDstTypeReg;
2462 __ PushRegister(kScratch2Reg);
2463#else
2464 // We can use TypeTestABI::kScratchReg for the second scratch register, as
2465 // IA32 is handled separately.
2466 const Register kScratch2Reg = TypeTestABI::kScratchReg;
2467#endif
2468 static_assert(kScratch1Reg != kScratch2Reg,
2469 "Scratch registers must be distinct");
2470 // Check immediate superclass equality.
2471 __ LoadClassId(kScratch2Reg, TypeTestABI::kInstanceReg);
2472 __ LoadClassById(kScratch1Reg, kScratch2Reg);
2473#if defined(TARGET_ARCH_IA32)
2474 // kScratch2 is no longer used, so restore it.
2475 __ PopRegister(kScratch2Reg);
2476#endif
2477 __ LoadCompressedFieldFromOffset(
2478 kScratch1Reg, kScratch1Reg, compiler::target::Class::super_type_offset());
2479 // Check for a null super type. Instances whose class has a null super type
2480 // can only be an instance of top types or of non-nullable Object, but this
2481 // method is not called for those types, so the object cannot be an instance.
2482 __ CompareObject(kScratch1Reg, Object::null_object());
2483 __ BranchIf(EQUAL, is_not_instance_lbl);
2484 __ LoadTypeClassId(kScratch1Reg, kScratch1Reg);
2485 __ CompareImmediate(kScratch1Reg, type_class.id());
2486 __ BranchIf(EQUAL, is_instance_lbl);
2487
2488 return GenerateCallSubtypeTestStub(TypeTestStubKind::kTestTypeOneArg,
2489 is_instance_lbl, is_not_instance_lbl);
2490}
2491
2492// Generates quick and subtype cache tests for an instantiated generic type.
2493// Jumps to 'is_instance' or 'is_not_instance' respectively, if any generated
2494// check is conclusive, otherwise falls through if further checking is required.
2495//
2496// See [GenerateInlineInstanceof] for calling convention.
2497SubtypeTestCachePtr
2498FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
2499 const InstructionSource& source,
2500 const AbstractType& type,
2501 compiler::Label* is_instance_lbl,
2502 compiler::Label* is_not_instance_lbl) {
2503 __ Comment("InstantiatedTypeWithArgumentsTest");
2504 ASSERT(type.IsInstantiated());
2505 ASSERT(!type.IsFunctionType());
2506 ASSERT(!type.IsRecordType());
2507 ASSERT(type.IsType());
2508 const Class& type_class = Class::ZoneHandle(zone(), type.type_class());
2509 ASSERT(type_class.NumTypeArguments() > 0);
2510 const Type& smi_type = Type::Handle(zone(), Type::SmiType());
2511 const bool smi_is_ok = smi_type.IsSubtypeOf(type, Heap::kOld);
2512 __ BranchIfSmi(TypeTestABI::kInstanceReg,
2513 smi_is_ok ? is_instance_lbl : is_not_instance_lbl);
2514
2515 const TypeArguments& type_arguments =
2516 TypeArguments::ZoneHandle(zone(), Type::Cast(type).arguments());
2517 const bool is_raw_type = type_arguments.IsNull() ||
2518 type_arguments.IsRaw(0, type_arguments.Length());
2519 // We don't use TypeTestABI::kScratchReg as it is not defined on IA32.
2520 // Instead, we use the subtype test cache register, as it is clobbered by the
2521 // subtype test cache stub call anyway.
2522 const Register kScratchReg = TypeTestABI::kSubtypeTestCacheReg;
2523 if (is_raw_type) {
2524 // dynamic type argument, check only classes.
2525 __ LoadClassId(kScratchReg, TypeTestABI::kInstanceReg);
2526 __ CompareImmediate(kScratchReg, type_class.id());
2527 __ BranchIf(EQUAL, is_instance_lbl);
2528 // List is a very common case.
2529 if (IsListClass(type_class)) {
2530 GenerateListTypeCheck(kScratchReg, is_instance_lbl);
2531 }
2532 return GenerateSubtype1TestCacheLookup(source, type_class, is_instance_lbl,
2533 is_not_instance_lbl);
2534 }
2535 // If one type argument only, check if type argument is a top type.
2536 if (type_arguments.Length() == 1) {
2537 const AbstractType& tp_argument =
2538 AbstractType::ZoneHandle(zone(), type_arguments.TypeAt(0));
2539 if (tp_argument.IsTopTypeForSubtyping()) {
2540 // Instance class test only necessary.
2541 return GenerateSubtype1TestCacheLookup(
2542 source, type_class, is_instance_lbl, is_not_instance_lbl);
2543 }
2544 }
2545
2546 // Regular subtype test cache involving instance's type arguments.
2547 return GenerateCallSubtypeTestStub(TypeTestStubKind::kTestTypeTwoArgs,
2548 is_instance_lbl, is_not_instance_lbl);
2549}
2550
2551// Generates quick and subtype cache tests for an instantiated non-generic type.
2552// Jumps to 'is_instance' or 'is_not_instance' respectively, if any generated
2553// check is conclusive. Returns whether the code will fall through for further
2554// type checking because the checks are not exhaustive.
2555//
2556// See [GenerateInlineInstanceof] for calling convention.
2557//
2558// Uses kScratchReg, so this implementation cannot be shared with IA32.
2559bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
2560 const InstructionSource& source,
2561 const AbstractType& type,
2562 compiler::Label* is_instance_lbl,
2563 compiler::Label* is_not_instance_lbl) {
2564 __ Comment("InstantiatedTypeNoArgumentsTest");
2565 ASSERT(type.IsInstantiated());
2566 ASSERT(!type.IsFunctionType());
2567 ASSERT(!type.IsRecordType());
2568 const Class& type_class = Class::Handle(zone(), type.type_class());
2569 ASSERT(type_class.NumTypeArguments() == 0);
2570
2571 // We don't use TypeTestABI::kScratchReg as it is not defined on IA32.
2572 // Instead, we use the subtype test cache register, as it is clobbered by the
2573 // subtype test cache stub call anyway.
2574 const Register kScratchReg = TypeTestABI::kSubtypeTestCacheReg;
2575
2576 const Class& smi_class = Class::Handle(zone(), Smi::Class());
2577 const bool smi_is_ok =
2578 Class::IsSubtypeOf(smi_class, Object::null_type_arguments(),
2580 __ BranchIfSmi(TypeTestABI::kInstanceReg,
2581 smi_is_ok ? is_instance_lbl : is_not_instance_lbl);
2582 __ LoadClassId(kScratchReg, TypeTestABI::kInstanceReg);
2583 // Bool interface can be implemented only by core class Bool.
2584 if (type.IsBoolType()) {
2585 __ CompareImmediate(kScratchReg, kBoolCid);
2586 __ BranchIf(EQUAL, is_instance_lbl);
2587 __ Jump(is_not_instance_lbl);
2588 return false;
2589 }
2590 // Custom checking for numbers (Smi, Mint and Double).
2591 // Note that instance is not Smi (checked above).
2592 if (type.IsNumberType() || type.IsIntType() || type.IsDoubleType()) {
2593 GenerateNumberTypeCheck(kScratchReg, type, is_instance_lbl,
2594 is_not_instance_lbl);
2595 return false;
2596 }
2597 if (type.IsStringType()) {
2598 GenerateStringTypeCheck(kScratchReg, is_instance_lbl, is_not_instance_lbl);
2599 return false;
2600 }
2601 if (type.IsDartFunctionType()) {
2602 // Check if instance is a closure.
2603 __ CompareImmediate(kScratchReg, kClosureCid);
2604 __ BranchIf(EQUAL, is_instance_lbl);
2605 return true;
2606 }
2607 if (type.IsDartRecordType()) {
2608 // Check if instance is a record.
2609 __ CompareImmediate(kScratchReg, kRecordCid);
2610 __ BranchIf(EQUAL, is_instance_lbl);
2611 return true;
2612 }
2613
2614 // Fast case for cid-range based checks.
2615 // Warning: This code destroys the contents of [kScratchReg], so this should
2616 // be the last check in this method. It returns whether the checks were
2617 // exhaustive, so we negate it to indicate whether we'll fall through.
2618 return !GenerateSubtypeRangeCheck(kScratchReg, type_class, is_instance_lbl);
2619}
2620
2621// Generates inlined check if 'type' is a type parameter or type itself.
2622//
2623// See [GenerateInlineInstanceof] for calling convention.
2624SubtypeTestCachePtr FlowGraphCompiler::GenerateUninstantiatedTypeTest(
2625 const InstructionSource& source,
2626 const AbstractType& type,
2627 compiler::Label* is_instance_lbl,
2628 compiler::Label* is_not_instance_lbl) {
2629 __ Comment("UninstantiatedTypeTest");
2630 ASSERT(!type.IsInstantiated());
2631 ASSERT(!type.IsFunctionType());
2632 ASSERT(!type.IsRecordType());
2633 // Skip check if destination is a dynamic type.
2634 if (type.IsTypeParameter()) {
2635 // We don't use TypeTestABI::kScratchReg as it is not defined on IA32.
2636 // Instead, we use the subtype test cache register, as it is clobbered by
2637 // the subtype test cache stub call anyway.
2638 const Register kScratchReg = TypeTestABI::kSubtypeTestCacheReg;
2639
2640 const TypeParameter& type_param = TypeParameter::Cast(type);
2641
2642 const Register kTypeArgumentsReg =
2643 type_param.IsClassTypeParameter()
2646 // Check if type arguments are null, i.e. equivalent to vector of dynamic.
2647 __ CompareObject(kTypeArgumentsReg, Object::null_object());
2648 __ BranchIf(EQUAL, is_instance_lbl);
2649 __ LoadCompressedFieldFromOffset(
2650 kScratchReg, kTypeArgumentsReg,
2651 compiler::target::TypeArguments::type_at_offset(type_param.index()));
2652 // kScratchReg: Concrete type of type.
2653 // Check if type argument is dynamic, Object?, or void.
2654 __ CompareObject(kScratchReg, Object::dynamic_type());
2655 __ BranchIf(EQUAL, is_instance_lbl);
2656 __ CompareObject(
2657 kScratchReg,
2659 zone(), isolate_group()->object_store()->nullable_object_type()));
2660 __ BranchIf(EQUAL, is_instance_lbl);
2661 __ CompareObject(kScratchReg, Object::void_type());
2662 __ BranchIf(EQUAL, is_instance_lbl);
2663
2664 // For Smi check quickly against int and num interfaces.
2665 compiler::Label not_smi;
2666 __ BranchIfNotSmi(TypeTestABI::kInstanceReg, &not_smi,
2668 __ CompareObject(kScratchReg, Type::ZoneHandle(zone(), Type::IntType()));
2669 __ BranchIf(EQUAL, is_instance_lbl);
2670 __ CompareObject(kScratchReg, Type::ZoneHandle(zone(), Type::Number()));
2671 __ BranchIf(EQUAL, is_instance_lbl);
2672 // Smi can be handled by type test cache.
2673 __ Bind(&not_smi);
2674
2675 const auto test_kind = GetTypeTestStubKindForTypeParameter(type_param);
2676 return GenerateCallSubtypeTestStub(test_kind, is_instance_lbl,
2677 is_not_instance_lbl);
2678 }
2679 if (type.IsType()) {
2680 // The only uninstantiated type to which a Smi is assignable is FutureOr<T>,
2681 // as T might be a top type or int or num when instantiated
2682 if (!type.IsFutureOrType()) {
2683 __ BranchIfSmi(TypeTestABI::kInstanceReg, is_not_instance_lbl);
2684 }
2685 const TypeTestStubKind test_kind =
2686 type.IsInstantiated(kFunctions) ? TypeTestStubKind::kTestTypeThreeArgs
2687 : TypeTestStubKind::kTestTypeFourArgs;
2688 // Uninstantiated type class is known at compile time, but the type
2689 // arguments are determined at runtime by the instantiator(s).
2690 return GenerateCallSubtypeTestStub(test_kind, is_instance_lbl,
2691 is_not_instance_lbl);
2692 }
2693 return SubtypeTestCache::null();
2694}
2695
2696// If instanceof type test cannot be performed successfully at compile time and
2697// therefore eliminated, optimize it by adding inlined tests for:
2698// - Null -> see comment below.
2699// - Smi -> compile time subtype check (only if dst class is not parameterized).
2700// - Class equality (only if class is not parameterized).
2701// Inputs (from TypeTestABI):
2702// - kInstanceReg: object.
2703// - kInstantiatorTypeArgumentsReg: instantiator type arguments or raw_null.
2704// - kFunctionTypeArgumentsReg: function type arguments or raw_null.
2705// Returns:
2706// - true or false in kInstanceOfResultReg.
2708 intptr_t deopt_id,
2710 const AbstractType& type,
2711 LocationSummary* locs) {
2712 ASSERT(type.IsFinalized());
2713 ASSERT(!type.IsTopTypeForInstanceOf()); // Already checked.
2714
2715 compiler::Label is_instance, is_not_instance;
2716 // 'null' is an instance of Null, Object*, Never*, void, and dynamic.
2717 // In addition, 'null' is an instance of any nullable type.
2718 // It is also an instance of FutureOr<T> if it is an instance of T.
2719 const AbstractType& unwrapped_type =
2720 AbstractType::Handle(type.UnwrapFutureOr());
2721 if (!unwrapped_type.IsTypeParameter() || unwrapped_type.IsNullable()) {
2722 // Only nullable type parameter remains nullable after instantiation.
2723 // See NullIsInstanceOf().
2724 __ CompareObject(TypeTestABI::kInstanceReg, Object::null_object());
2725 __ BranchIf(EQUAL,
2726 (unwrapped_type.IsNullable() ||
2727 (unwrapped_type.IsLegacy() && unwrapped_type.IsNeverType()))
2728 ? &is_instance
2729 : &is_not_instance);
2730 }
2731
2732 // Generate inline instanceof test.
2734 // kInstanceReg, kInstantiatorTypeArgumentsReg, and kFunctionTypeArgumentsReg
2735 // are preserved across the call.
2736 test_cache =
2737 GenerateInlineInstanceof(source, type, &is_instance, &is_not_instance);
2738
2739 // test_cache is null if there is no fall-through.
2741 if (!test_cache.IsNull()) {
2742 // Generate Runtime call.
2743 __ LoadUniqueObject(TypeTestABI::kDstTypeReg, type);
2745 GenerateStubCall(source, StubCode::InstanceOf(),
2746 /*kind=*/UntaggedPcDescriptors::kOther, locs, deopt_id,
2747 env);
2749 }
2750 __ Bind(&is_not_instance);
2753
2754 __ Bind(&is_instance);
2756 __ Bind(&done);
2757}
2758
2759#if !defined(TARGET_ARCH_IA32)
2760// Expected inputs (from TypeTestABI):
2761// - kInstanceReg: instance (preserved).
2762// - kInstantiatorTypeArgumentsReg: instantiator type arguments
2763// (for test_kind >= kTestTypeThreeArg).
2764// - kFunctionTypeArgumentsReg: function type arguments
2765// (for test_kind >= kTestTypeFourArg).
2766//
2767// See the arch-specific GenerateSubtypeNTestCacheStub method to see which
2768// registers may need saving across this call.
2769SubtypeTestCachePtr FlowGraphCompiler::GenerateCallSubtypeTestStub(
2770 TypeTestStubKind test_kind,
2771 compiler::Label* is_instance_lbl,
2772 compiler::Label* is_not_instance_lbl) {
2773 const intptr_t num_inputs = UsedInputsForTTSKind(test_kind);
2774 const SubtypeTestCache& type_test_cache =
2776 const auto& stub_entry =
2778 __ LoadUniqueObject(TypeTestABI::kSubtypeTestCacheReg, type_test_cache);
2779 __ Call(stub_entry);
2780 GenerateBoolToJump(TypeTestABI::kSubtypeTestCacheResultReg, is_instance_lbl,
2781 is_not_instance_lbl);
2782 return type_test_cache.ptr();
2783}
2784
2785// Generates an assignable check for a given object. Emits no code if the
2786// destination type is known at compile time and is a top type. See
2787// GenerateCallerChecksForAssertAssignable for other optimized cases.
2788//
2789// Inputs (preserved for successful checks):
2790// - TypeTestABI::kInstanceReg: object.
2791// - TypeTestABI::kDstTypeReg: destination type (if non-constant).
2792// - TypeTestABI::kInstantiatorTypeArgumentsReg: instantiator type arguments.
2793// - TypeTestABI::kFunctionTypeArgumentsReg: function type arguments.
2794//
2795// Throws:
2796// - TypeError (on unsuccessful assignable checks)
2797//
2798// Performance notes: positive checks must be quick, negative checks can be slow
2799// as they throw an exception.
2801 CompileType* receiver_type,
2803 intptr_t deopt_id,
2805 const String& dst_name,
2806 LocationSummary* locs) {
2807 ASSERT(!source.token_pos.IsClassifying());
2809
2810 // Non-null if we have a constant destination type.
2811 const auto& dst_type =
2813 ? AbstractType::Cast(
2815 : Object::null_abstract_type();
2816
2817 if (!dst_type.IsNull()) {
2818 ASSERT(dst_type.IsFinalized());
2819 if (dst_type.IsTopTypeForSubtyping()) return; // No code needed.
2820 }
2821
2824 // Generate caller-side checks to perform prior to calling the TTS.
2825 if (dst_type.IsNull()) {
2826 __ Comment("AssertAssignable for runtime type");
2827 // kDstTypeReg should already contain the destination type.
2828 } else {
2829 __ Comment("AssertAssignable for compile-time type");
2830 GenerateCallerChecksForAssertAssignable(receiver_type, dst_type, &done);
2831 if (dst_type.IsTypeParameter()) {
2832 // The resolved type parameter is in the scratch register.
2833 type_reg = TypeTestABI::kScratchReg;
2834 }
2835 }
2836
2837 GenerateTTSCall(source, deopt_id, env, type_reg, dst_type, dst_name, locs);
2838 __ Bind(&done);
2839}
2840
2841// Generates a call to the type testing stub for the type in [reg_with_type].
2842// Provide a non-null [dst_type] and [dst_name] if they are known at compile
2843// time.
2845 intptr_t deopt_id,
2847 Register reg_with_type,
2848 const AbstractType& dst_type,
2849 const String& dst_name,
2850 LocationSummary* locs) {
2851 ASSERT(!dst_name.IsNull());
2852 // We use 2 consecutive entries in the pool for the subtype cache and the
2853 // destination name. The second entry, namely [dst_name] seems to be unused,
2854 // but it will be used by the code throwing a TypeError if the type test fails
2855 // (see runtime/vm/runtime_entry.cc:TypeCheck). It will use pattern matching
2856 // on the call site to find out at which pool index the destination name is
2857 // located.
2858 const intptr_t sub_type_cache_index = __ object_pool_builder().AddObject(
2859 Object::null_object(), compiler::ObjectPoolBuilderEntry::kPatchable);
2860 const intptr_t dst_name_index = __ object_pool_builder().AddObject(
2862 ASSERT((sub_type_cache_index + 1) == dst_name_index);
2863 ASSERT(__ constant_pool_allowed());
2864
2865 __ Comment("TTSCall");
2866 // If the dst_type is known at compile time and instantiated, we know the
2867 // target TTS stub and so can use a PC-relative call when available.
2868 if (!dst_type.IsNull() && dst_type.IsInstantiated() &&
2869 CanPcRelativeCall(dst_type)) {
2870 __ LoadWordFromPoolIndex(TypeTestABI::kSubtypeTestCacheReg,
2871 sub_type_cache_index);
2872 __ GenerateUnRelocatedPcRelativeCall();
2873 AddPcRelativeTTSCallTypeTarget(dst_type);
2874 } else {
2875 GenerateIndirectTTSCall(assembler(), reg_with_type, sub_type_cache_index);
2876 }
2877
2878 EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kOther, locs,
2879 env);
2880}
2881
2882// Optimize assignable type check by adding inlined tests for:
2883// - non-null object -> return object (only if in null safe mode and type is
2884// non-nullable Object).
2885// - Smi -> compile time subtype check (only if dst class is not parameterized).
2886// - Class equality (only if class is not parameterized).
2887//
2888// Inputs (preserved):
2889// - TypeTestABI::kInstanceReg: object.
2890// - TypeTestABI::kInstantiatorTypeArgumentsReg: instantiator type arguments.
2891// - TypeTestABI::kFunctionTypeArgumentsReg: function type arguments.
2892//
2893// Assumes:
2894// - Destination type is not a top type.
2895// - Object to check is not null, unless in null safe mode and destination type
2896// is not a nullable type.
2897//
2898// Outputs:
2899// - TypeTestABI::kDstTypeReg: destination type
2900// Additional output if dst_type is a TypeParameter:
2901// - TypeTestABI::kScratchReg: type on which to call TTS stub.
2902//
2903// Performance notes: positive checks must be quick, negative checks can be slow
2904// as they throw an exception.
2906 CompileType* receiver_type,
2907 const AbstractType& dst_type,
2909 // Top types should be handled by the caller and cannot reach here.
2910 ASSERT(!dst_type.IsTopTypeForSubtyping());
2911
2912 // Set this to avoid marking the type testing stub for optimization.
2913 bool elide_info = false;
2914 // Call before any return points to set the destination type register and
2915 // mark the destination type TTS as needing optimization, unless it is
2916 // unlikely to be called.
2917 auto output_dst_type = [&]() -> void {
2918 // If we haven't handled the positive case of the type check on the call
2919 // site and we'll be using the TTS of the destination type, we want an
2920 // optimized type testing stub and thus record it in the [TypeUsageInfo].
2921 if (!elide_info) {
2922 if (auto const type_usage_info = thread()->type_usage_info()) {
2923 type_usage_info->UseTypeInAssertAssignable(dst_type);
2924 } else {
2925 ASSERT(!FLAG_precompiled_mode);
2926 }
2927 }
2928 __ LoadObject(TypeTestABI::kDstTypeReg, dst_type);
2929 };
2930
2931 // We can handle certain types and checks very efficiently on the call site,
2932 // meaning those need not be checked within the stubs (which may involve
2933 // a runtime call).
2934
2935 if (dst_type.IsObjectType()) {
2936 // Special case: non-nullable Object.
2937 ASSERT(dst_type.IsNonNullable());
2938 __ CompareObject(TypeTestABI::kInstanceReg, Object::null_object());
2939 __ BranchIf(NOT_EQUAL, done);
2940 // Fall back to type testing stub in caller to throw the exception.
2941 return output_dst_type();
2942 }
2943
2944 // If the int type is assignable to [dst_type] we special case it on the
2945 // caller side!
2946 const Type& int_type = Type::Handle(zone(), Type::IntType());
2947 bool is_non_smi = false;
2948 if (int_type.IsSubtypeOf(dst_type, Heap::kOld)) {
2949 __ BranchIfSmi(TypeTestABI::kInstanceReg, done);
2950 is_non_smi = true;
2951 } else if (!receiver_type->CanBeSmi()) {
2952 is_non_smi = true;
2953 }
2954
2955 if (dst_type.IsTypeParameter()) {
2956 // Special case: Instantiate the type parameter on the caller side, invoking
2957 // the TTS of the corresponding type parameter in the caller.
2958 const TypeParameter& type_param = TypeParameter::Cast(dst_type);
2959 if (!type_param.IsNonNullable()) {
2960 // If the type parameter is nullable when running in strong mode, we need
2961 // to handle null before calling the TTS because the type parameter may be
2962 // instantiated with a non-nullable type, where the TTS rejects null.
2963 __ CompareObject(TypeTestABI::kInstanceReg, Object::null_object());
2964 __ BranchIf(EQUAL, done);
2965 }
2966 const Register kTypeArgumentsReg =
2967 type_param.IsClassTypeParameter()
2970
2971 // Check if type arguments are null, i.e. equivalent to vector of dynamic.
2972 // If so, then the value is guaranteed assignable as dynamic is a top type.
2973 __ CompareObject(kTypeArgumentsReg, Object::null_object());
2974 __ BranchIf(EQUAL, done);
2975 // Put the instantiated type parameter into the scratch register, so its
2976 // TTS can be called by the caller.
2977 __ LoadCompressedFieldFromOffset(
2978 TypeTestABI::kScratchReg, kTypeArgumentsReg,
2979 compiler::target::TypeArguments::type_at_offset(type_param.index()));
2980 return output_dst_type();
2981 }
2982
2983 if (dst_type.IsFunctionType() || dst_type.IsRecordType()) {
2984 return output_dst_type();
2985 }
2986
2987 if (auto const hi = thread()->hierarchy_info()) {
2988 const Class& type_class = Class::Handle(zone(), dst_type.type_class());
2989
2990 if (hi->CanUseSubtypeRangeCheckFor(dst_type)) {
2991 const CidRangeVector& ranges = hi->SubtypeRangesForClass(
2992 type_class,
2993 /*include_abstract=*/false,
2994 /*exclude_null=*/!Instance::NullIsAssignableTo(dst_type));
2995 if (ranges.length() <= kMaxNumberOfCidRangesToTest) {
2996 if (is_non_smi) {
2998 } else {
2999 __ LoadClassIdMayBeSmi(TypeTestABI::kScratchReg,
3001 }
3003 done);
3004 elide_info = true;
3005 } else if (IsListClass(type_class)) {
3006 __ LoadClassIdMayBeSmi(TypeTestABI::kScratchReg,
3009 }
3010 }
3011 }
3012 output_dst_type();
3013}
3014#endif // !defined(TARGET_ARCH_IA32)
3015
3016#undef __
3017
3018#if defined(DEBUG)
3019void FlowGraphCompiler::FrameStateUpdateWith(Instruction* instr) {
3021
3022 switch (instr->tag()) {
3023 case Instruction::kDropTemps:
3024 FrameStatePop(instr->locs()->input_count() +
3025 instr->AsDropTemps()->num_temps());
3026 break;
3027
3028 default:
3029 FrameStatePop(instr->locs()->input_count());
3030 break;
3031 }
3032
3033 ASSERT(!instr->locs()->can_call() || FrameStateIsSafeToCall());
3034
3035 FrameStatePop(instr->ArgumentCount());
3036 Definition* defn = instr->AsDefinition();
3037 if ((defn != nullptr) && defn->HasTemp()) {
3038 FrameStatePush(defn);
3039 }
3040}
3041
3042void FlowGraphCompiler::FrameStatePush(Definition* defn) {
3043 Representation rep = defn->representation();
3045 if ((rep == kUnboxedDouble || rep == kUnboxedFloat32x4 ||
3046 rep == kUnboxedFloat64x2) &&
3047 defn->locs()->out(0).IsFpuRegister()) {
3048 // Output value is boxed in the instruction epilogue.
3049 rep = kTagged;
3050 }
3051 ASSERT((rep == kTagged) || (rep == kUntagged) ||
3053 frame_state_.Add(rep);
3054}
3055
3056void FlowGraphCompiler::FrameStatePop(intptr_t count) {
3058 frame_state_.TruncateTo(
3059 Utils::Maximum(static_cast<intptr_t>(0), frame_state_.length() - count));
3060}
3061
3062bool FlowGraphCompiler::FrameStateIsSafeToCall() {
3064 for (intptr_t i = 0; i < frame_state_.length(); i++) {
3065 if (frame_state_[i] != kTagged) {
3066 return false;
3067 }
3068 }
3069 return true;
3070}
3071
3072void FlowGraphCompiler::FrameStateClear() {
3074 frame_state_.TruncateTo(0);
3075}
3076#endif // defined(DEBUG)
3077
3078#define __ compiler->assembler()->
3079
3082 __ Comment("slow path %s operation", name());
3083 }
3084 const bool use_shared_stub =
3085 instruction()->UseSharedSlowPathStub(compiler->is_optimizing());
3086 ASSERT(use_shared_stub == instruction()->locs()->call_on_shared_slow_path());
3087 const bool live_fpu_registers =
3089 const intptr_t num_args =
3090 use_shared_stub ? 0 : GetNumberOfArgumentsForRuntimeCall();
3091 __ Bind(entry_label());
3093 LocationSummary* locs = instruction()->locs();
3094 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
3095 if (use_shared_stub) {
3096 if (!has_frame) {
3097#if !defined(TARGET_ARCH_IA32)
3098 ASSERT(__ constant_pool_allowed());
3099 __ set_constant_pool_allowed(false);
3100#endif
3101 __ EnterDartFrame(0);
3102 }
3103 EmitSharedStubCall(compiler, live_fpu_registers);
3104#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
3105 if (!has_frame) {
3106 // Undo EnterDartFrame for the code generated after this slow path.
3107 RESTORES_LR_FROM_FRAME({});
3108 }
3109#endif
3110 } else {
3111 ASSERT(has_frame);
3112 // Save registers as they are needed for lazy deopt / exception handling.
3113 compiler->SaveLiveRegisters(locs);
3115 __ CallRuntime(runtime_entry_, num_args);
3116 }
3117 const intptr_t deopt_id = instruction()->deopt_id();
3118 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOther, deopt_id,
3119 instruction()->source());
3121 compiler->RecordSafepoint(locs, num_args);
3122 if (!FLAG_precompiled_mode ||
3123 (compiler->CurrentTryIndex() != kInvalidTryIndex)) {
3124 Environment* env =
3125 compiler->SlowPathEnvironmentFor(instruction(), num_args);
3126 // TODO(47044): Should be able to say `FLAG_precompiled_mode` instead.
3127 if (CompilerState::Current().is_aot()) {
3128 compiler->RecordCatchEntryMoves(env);
3129 } else if (compiler->is_optimizing()) {
3130 ASSERT(env != nullptr);
3131 compiler->AddSlowPathDeoptInfo(deopt_id, env);
3132 } else {
3133 ASSERT(env == nullptr);
3134 const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
3135 // Add deoptimization continuation point.
3136 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt,
3137 deopt_id_after, instruction()->source());
3138 }
3139 }
3140 if (!use_shared_stub) {
3141 __ Breakpoint();
3142 }
3143}
3144
3146 switch (exception_type()) {
3148 return "check null (nsm)";
3150 return "check null (arg)";
3152 return "check null (cast)";
3153 }
3154 UNREACHABLE();
3155}
3156
3157const RuntimeEntry& NullErrorSlowPath::GetRuntimeEntry(
3158 CheckNullInstr::ExceptionType exception_type) {
3159 switch (exception_type) {
3161 return kNullErrorRuntimeEntry;
3163 return kArgumentNullErrorRuntimeEntry;
3165 return kNullCastErrorRuntimeEntry;
3166 }
3167 UNREACHABLE();
3168}
3169
3171 CheckNullInstr::ExceptionType exception_type,
3172 bool save_fpu_registers) {
3173 auto object_store = compiler->isolate_group()->object_store();
3174 switch (exception_type) {
3176 return save_fpu_registers
3177 ? object_store->null_error_stub_with_fpu_regs_stub()
3178 : object_store->null_error_stub_without_fpu_regs_stub();
3180 return save_fpu_registers
3181 ? object_store->null_arg_error_stub_with_fpu_regs_stub()
3182 : object_store->null_arg_error_stub_without_fpu_regs_stub();
3184 return save_fpu_registers
3185 ? object_store->null_cast_error_stub_with_fpu_regs_stub()
3186 : object_store->null_cast_error_stub_without_fpu_regs_stub();
3187 }
3188 UNREACHABLE();
3189}
3190
3192 bool save_fpu_registers) {
3193#if defined(TARGET_ARCH_IA32)
3194 UNREACHABLE();
3195#else
3196 const auto& stub =
3197 Code::ZoneHandle(compiler->zone(),
3198 GetStub(compiler, exception_type(), save_fpu_registers));
3199 compiler->EmitCallToStub(stub);
3200#endif
3201}
3202
3205 LocationSummary* locs = instruction()->locs();
3207 // Can't pass unboxed int64 value directly to runtime call, as all
3208 // arguments are expected to be tagged (boxed).
3209 // The unboxed int64 argument is passed through a dedicated slot in Thread.
3210 // TODO(dartbug.com/33549): Clean this up when unboxed values
3211 // could be passed as arguments.
3212 __ StoreToOffset(locs->in(CheckBoundBaseInstr::kLengthPos).reg(), THR,
3213 compiler::target::Thread::unboxed_runtime_arg_offset());
3214 __ StoreToOffset(
3216 compiler::target::Thread::unboxed_runtime_arg_offset() + kInt64Size);
3217 } else {
3218 __ PushRegisterPair(locs->in(CheckBoundBaseInstr::kIndexPos).reg(),
3220 }
3221}
3222
3224 bool save_fpu_registers) {
3225#if defined(TARGET_ARCH_IA32)
3226 UNREACHABLE();
3227#else
3228 auto object_store = compiler->isolate_group()->object_store();
3229 const auto& stub = Code::ZoneHandle(
3230 compiler->zone(),
3231 save_fpu_registers
3232 ? object_store->range_error_stub_with_fpu_regs_stub()
3233 : object_store->range_error_stub_without_fpu_regs_stub());
3234 compiler->EmitCallToStub(stub);
3235#endif
3236}
3237
3240 LocationSummary* locs = instruction()->locs();
3241 __ PushRegister(locs->in(CheckWritableInstr::kReceiver).reg());
3242 __ PushImmediate(
3243 compiler::target::ToRawSmi(instruction()->AsCheckWritable()->kind()));
3244}
3245
3247 bool save_fpu_registers) {
3248#if defined(TARGET_ARCH_IA32)
3249 UNREACHABLE();
3250#else
3251 auto object_store = compiler->isolate_group()->object_store();
3252 const auto& stub = Code::ZoneHandle(
3253 compiler->zone(),
3254 save_fpu_registers
3255 ? object_store->write_error_stub_with_fpu_regs_stub()
3256 : object_store->write_error_stub_without_fpu_regs_stub());
3257 compiler->EmitCallToStub(stub);
3258#endif
3259}
3260
3265
3268 bool save_fpu_registers) {
3269#if defined(TARGET_ARCH_IA32)
3270 UNREACHABLE();
3271#else
3272 ASSERT(instruction()->locs()->temp(0).reg() ==
3275 Field::ZoneHandle(OriginalField()));
3276 auto object_store = compiler->isolate_group()->object_store();
3277 const auto& stub = Code::ZoneHandle(
3278 compiler->zone(),
3279 save_fpu_registers
3280 ? object_store->late_initialization_error_stub_with_fpu_regs_stub()
3281 : object_store
3282 ->late_initialization_error_stub_without_fpu_regs_stub());
3283 compiler->EmitCallToStub(stub);
3284#endif
3285}
3286
3288 const compiler::ffi::NativeLocation& destination,
3291 if (destination.IsBoth()) {
3292 // Copy to both.
3293 const auto& both = destination.AsBoth();
3294 EmitNativeMove(both.location(0), source, temp);
3295 EmitNativeMove(both.location(1), source, temp);
3296 return;
3297 }
3298 if (source.IsBoth()) {
3299 // Copy from one of both.
3300 const auto& both = source.AsBoth();
3301 EmitNativeMove(destination, both.location(0), temp);
3302 return;
3303 }
3304
3305 const auto& src_payload_type = source.payload_type();
3306 const auto& dst_payload_type = destination.payload_type();
3307 const auto& src_container_type = source.container_type();
3308 const auto& dst_container_type = destination.container_type();
3309 const intptr_t src_payload_size = src_payload_type.SizeInBytes();
3310 const intptr_t dst_payload_size = dst_payload_type.SizeInBytes();
3311 const intptr_t src_container_size = src_container_type.SizeInBytes();
3312 const intptr_t dst_container_size = dst_container_type.SizeInBytes();
3313
3314 // This function does not know how to do larger mem copy moves yet.
3315 ASSERT(src_payload_type.IsPrimitive());
3316 ASSERT(dst_payload_type.IsPrimitive());
3317
3318 // This function does not deal with sign conversions yet.
3319 ASSERT(src_payload_type.IsSigned() == dst_payload_type.IsSigned());
3320
3321 // If the location, payload, and container are equal, we're done.
3322 if (source.Equals(destination) && src_payload_type.Equals(dst_payload_type) &&
3323 src_container_type.Equals(dst_container_type)) {
3324#if defined(TARGET_ARCH_RISCV64)
3325 // Except we might still need to adjust for the difference between C's
3326 // representation of uint32 (sign-extended to 64 bits) and Dart's
3327 // (zero-extended).
3328 EmitNativeMoveArchitecture(destination, source);
3329#endif
3330 return;
3331 }
3332
3333 // Solve discrepancies between container size and payload size.
3334 if (src_payload_type.IsInt() && dst_payload_type.IsInt() &&
3335 (src_payload_size != src_container_size ||
3336 dst_payload_size != dst_container_size)) {
3337 if (source.IsStack() && src_container_size > src_payload_size) {
3338 // Shrink loads since all loads are extending.
3339 return EmitNativeMove(
3340 destination,
3341 source.WithOtherNativeType(zone_, src_payload_type, src_payload_type),
3342 temp);
3343 }
3344 if (src_payload_size <= dst_payload_size &&
3345 src_container_size >= dst_container_size) {
3346 // The upper bits of the source are already properly sign or zero
3347 // extended, so just copy the required amount of bits.
3348 return EmitNativeMove(destination.WithOtherNativeType(
3349 zone_, dst_container_type, dst_container_type),
3350 source.WithOtherNativeType(
3351 zone_, dst_container_type, dst_container_type),
3352 temp);
3353 }
3354 if (src_payload_size >= dst_payload_size &&
3355 dst_container_size > dst_payload_size) {
3356 // The upper bits of the source are not properly sign or zero extended
3357 // to be copied to the target, so regard the source as smaller.
3358 return EmitNativeMove(
3359 destination.WithOtherNativeType(zone_, dst_container_type,
3360 dst_container_type),
3361 source.WithOtherNativeType(zone_, dst_payload_type, dst_payload_type),
3362 temp);
3363 }
3364 UNREACHABLE();
3365 }
3366 ASSERT(src_payload_size == src_container_size);
3367 ASSERT(dst_payload_size == dst_container_size);
3368
3369 // Split moves that are larger than kWordSize, these require separate
3370 // instructions on all architectures.
3371 if (compiler::target::kWordSize == 4 && src_container_size == 8 &&
3372 dst_container_size == 8 && !source.IsFpuRegisters() &&
3373 !destination.IsFpuRegisters()) {
3374 // TODO(40209): If this is stack to stack, we could use FpuTMP.
3375 // Test the impact on code size and speed.
3376 EmitNativeMove(destination.Split(zone_, 2, 0), source.Split(zone_, 2, 0),
3377 temp);
3378 EmitNativeMove(destination.Split(zone_, 2, 1), source.Split(zone_, 2, 1),
3379 temp);
3380 return;
3381 }
3382
3383 // Split moves from stack to stack, none of the architectures provides
3384 // memory to memory move instructions.
3385 if (source.IsStack() && destination.IsStack()) {
3386 Register scratch = temp->AllocateTemporary();
3387 ASSERT(scratch != kNoRegister);
3388#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
3389 ASSERT(scratch != TMP); // TMP is an argument register.
3390 ASSERT(scratch != TMP2); // TMP2 is an argument register.
3391#endif
3392 const auto& intermediate =
3394 zone_, dst_payload_type, dst_container_type, scratch);
3395 EmitNativeMove(intermediate, source, temp);
3396 EmitNativeMove(destination, intermediate, temp);
3397 temp->ReleaseTemporary();
3398 return;
3399 }
3400
3401 const bool sign_or_zero_extend = dst_container_size > src_container_size;
3402
3403 // No architecture supports sign extending with memory as destination.
3404 if (sign_or_zero_extend && destination.IsStack()) {
3405 ASSERT(source.IsRegisters());
3406 const auto& intermediate =
3407 source.WithOtherNativeType(zone_, dst_payload_type, dst_container_type);
3408 EmitNativeMove(intermediate, source, temp);
3409 EmitNativeMove(destination, intermediate, temp);
3410 return;
3411 }
3412
3413 // Do the simple architecture specific moves.
3414 EmitNativeMoveArchitecture(destination, source);
3415}
3416
3419 Location src_loc,
3420 Representation src_type,
3422 if (src_loc.IsPairLocation()) {
3423 for (intptr_t i : {0, 1}) {
3425 zone_, src_loc, src_type, i);
3426 EmitNativeMove(dst.Split(zone_, 2, i), src_split, temp);
3427 }
3428 } else {
3429 const auto& src =
3430 compiler::ffi::NativeLocation::FromLocation(zone_, src_loc, src_type);
3431 // Deal with sign mismatch caused by lack of kUnboxedUint64 representation.
3432 if (src_type == kUnboxedInt64 &&
3433 dst.container_type().AsPrimitive().representation() ==
3435 EmitNativeMove(dst,
3436 src.WithOtherNativeType(zone_, dst.container_type(),
3437 dst.container_type()),
3438 temp);
3439 } else {
3440 EmitNativeMove(dst, src, temp);
3441 }
3442 }
3443}
3444
3446 Location dst_loc,
3447 Representation dst_type,
3450 if (dst_loc.IsPairLocation()) {
3451 for (intptr_t i : {0, 1}) {
3452 const auto& dest_split = compiler::ffi::NativeLocation::FromPairLocation(
3453 zone_, dst_loc, dst_type, i);
3454 EmitNativeMove(dest_split, src.Split(zone_, 2, i), temp);
3455 }
3456 } else {
3457 const auto& dst =
3458 compiler::ffi::NativeLocation::FromLocation(zone_, dst_loc, dst_type);
3459 // Deal with sign mismatch caused by lack of kUnboxedUint64 representation.
3460 if (dst_type == kUnboxedInt64 &&
3461 src.container_type().AsPrimitive().representation() ==
3463 EmitNativeMove(dst.WithOtherNativeType(zone_, src.container_type(),
3464 src.container_type()),
3465 src, temp);
3466 } else {
3467 EmitNativeMove(dst, src, temp);
3468 }
3469 }
3470}
3471
3473 Location src,
3474 Representation src_type,
3476 ASSERT(src.IsConstant() || src.IsPairLocation());
3477 const auto& dst_type = dst.payload_type();
3478 Register scratch = kNoRegister;
3479 if (dst.IsExpressibleAsLocation() &&
3480 dst_type.IsExpressibleAsRepresentation() &&
3481 dst_type.AsRepresentationOverApprox(zone_) == src_type) {
3482 // We can directly emit the const in the right place and representation.
3483 const Location dst_loc = dst.AsLocation();
3484 assembler()->Comment("dst.IsExpressibleAsLocation() %s",
3485 dst_loc.ToCString());
3486 EmitMove(dst_loc, src, temp);
3487 } else {
3488 // We need an intermediate location.
3489 Location intermediate;
3490 if (dst_type.IsInt()) {
3491 if (TMP == kNoRegister) {
3492 scratch = temp->AllocateTemporary();
3494 } else {
3495 intermediate = Location::RegisterLocation(TMP);
3496 }
3497 } else {
3498 ASSERT(dst_type.IsFloat());
3499 intermediate = Location::FpuRegisterLocation(FpuTMP);
3500 }
3501 assembler()->Comment("constant using intermediate: %s",
3502 intermediate.ToCString());
3503
3504 if (src.IsPairLocation()) {
3505 for (intptr_t i : {0, 1}) {
3506 const Representation src_type_split =
3508 .Split(zone_, i)
3510 const auto& intermediate_native =
3512 src_type_split);
3513 EmitMove(intermediate, src.AsPairLocation()->At(i), temp);
3514 EmitNativeMove(dst.Split(zone_, 2, i), intermediate_native, temp);
3515 }
3516 } else {
3517 const auto& intermediate_native =
3519 src_type);
3520 EmitMove(intermediate, src, temp);
3521 EmitNativeMove(dst, intermediate_native, temp);
3522 }
3523
3524 if (scratch != kNoRegister) {
3525 temp->ReleaseTemporary();
3526 }
3527 }
3528 return;
3529}
3530
3531bool FlowGraphCompiler::CanPcRelativeCall(const Function& target) const {
3532 return FLAG_precompiled_mode && !FLAG_force_indirect_calls &&
3535}
3536
3537bool FlowGraphCompiler::CanPcRelativeCall(const Code& target) const {
3538 return FLAG_precompiled_mode && !FLAG_force_indirect_calls &&
3539 !target.InVMIsolateHeap() &&
3542}
3543
3544bool FlowGraphCompiler::CanPcRelativeCall(const AbstractType& target) const {
3545 return FLAG_precompiled_mode && !FLAG_force_indirect_calls &&
3546 !target.InVMIsolateHeap() &&
3548 LoadingUnit::LoadingUnit::kRootId);
3549}
3550
3551#undef __
3552
3553} // namespace dart
const char * options
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition DM.cpp:213
int count
static void test_cache(skiatest::Reporter *reporter, SkResourceCache &cache, bool testPurge)
static bool ok(int result)
#define __
#define UNREACHABLE()
Definition assert.h:248
#define DEBUG_ASSERT(cond)
Definition assert.h:321
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
bool IsSubtypeOf(const AbstractType &other, Heap::Space space, FunctionTypeMapping *function_type_equivalence=nullptr) const
Definition object.cc:21611
bool IsNonNullable() const
Definition object.h:9048
bool IsTopTypeForSubtyping() const
Definition object.cc:21457
bool IsObjectType() const
Definition object.h:9181
virtual bool IsInstantiated(Genericity genericity=kAny, intptr_t num_free_fun_type_params=kAllFree) const
Definition object.cc:21200
virtual ClassPtr type_class() const
Definition object.cc:21083
bool IsNullable() const
Definition object.h:9043
bool IsNeverType() const
Definition object.cc:21432
bool IsLegacy() const
Definition object.h:9053
static ArrayPtr New(intptr_t len, Heap::Space space=Heap::kNew)
Definition object.h:10933
void SetAt(intptr_t index, const Object &value) const
Definition object.h:10858
void Add(const T &value)
intptr_t length() const
static constexpr intptr_t encode(CallKind value)
Definition bitfield.h:167
bool Contains(intptr_t i) const
Definition bit_vector.h:91
intptr_t try_index() const
Definition il.h:1724
intptr_t postorder_number() const
Definition il.h:1652
bool HasNonRedundantParallelMove() const
Definition il.h:1687
intptr_t block_id() const
Definition il.h:1655
LoopInfo * loop_info() const
Definition il.h:1731
bool IsLoopHeader() const
Definition il.cc:1825
intptr_t stack_depth() const
Definition il.h:1750
Instruction * last_instruction() const
Definition il.h:1680
GrowableArray< Definition * > * initial_definitions()
Definition il.h:1911
static const Bool & Get(bool value)
Definition object.h:10780
TargetEntryInstr * false_successor() const
Definition il.h:4030
TargetEntryInstr * true_successor() const
Definition il.h:4029
StringPtr target_name() const
Definition object.h:2352
intptr_t TypeArgsLen() const
Definition object.cc:16527
ArrayPtr arguments_descriptor() const
Definition object.h:2353
TargetInfo * TargetAt(int i) const
Definition il.h:790
intptr_t catch_try_index() const
Definition il.h:2363
const Array & catch_handler_types() const
Definition il.h:2365
bool needs_stacktrace() const
Definition il.h:2357
bool is_generated() const
Definition il.h:2359
static CatchEntryMove FromSlot(SourceKind kind, intptr_t src_slot, intptr_t dest_slot)
Definition exceptions.h:188
static intptr_t EncodePairSource(intptr_t src_lo_slot, intptr_t src_hi_slot)
Definition exceptions.h:196
void NewMapping(intptr_t pc_offset)
void Append(const CatchEntryMove &move)
intptr_t length() const
Definition il.h:752
void Add(CidRange *target)
Definition il.h:746
ClassPtr At(intptr_t cid) const
intptr_t id() const
Definition object.h:1235
static bool IsSubtypeOf(const Class &cls, const TypeArguments &type_arguments, Nullability nullability, const AbstractType &other, Heap::Space space, FunctionTypeMapping *function_type_equivalence=nullptr)
Definition object.cc:5975
bool is_abstract() const
Definition object.h:1698
bool is_finalized() const
Definition object.h:1725
ArrayPtr current_functions() const
Definition object.h:1643
TokenPosition RootPosition(const InstructionSource &source)
void EndCodeSourceRange(int32_t pc_offset, const InstructionSource &source)
void WriteFunctionEntrySourcePosition(const InstructionSource &source)
void NoteNullCheck(int32_t pc_offset, const InstructionSource &source, intptr_t name_index)
const GrowableArray< const Function * > & inline_id_to_function() const
void BeginCodeSourceRange(int32_t pc_offset, const InstructionSource &source)
void NoteDescriptor(UntaggedPcDescriptors::Kind kind, int32_t pc_offset, const InstructionSource &source)
CodeEntryKind EntryKind
Definition object.h:6761
@ kPcRelativeCall
Definition object.h:6942
@ kPcRelativeTTSCall
Definition object.h:6943
@ kCallViaCode
Definition object.h:6945
@ kPcRelativeTailCall
Definition object.h:6944
@ kSCallTableEntryLength
Definition object.h:6957
@ kSCallTableFunctionTarget
Definition object.h:6956
@ kSCallTableCodeOrTypeTarget
Definition object.h:6955
@ kSCallTableKindAndOffset
Definition object.h:6954
@ kDefaultEntry
Definition object.h:6949
@ kUncheckedEntry
Definition object.h:6950
static EntryCounter SlowPathCounterFor(Instruction::Tag tag)
void set_pc_offset(intptr_t offset)
intptr_t deopt_id() const
static CompilerState & Current()
CompressedStackMapsPtr Finalize() const
void AddEntry(intptr_t pc_offset, BitmapBuilder *bitmap, intptr_t spill_slot_bit_count)
static constexpr intptr_t kNone
Definition deopt_id.h:27
static intptr_t ToDeoptAfter(intptr_t deopt_id)
Definition deopt_id.h:31
static SmiPtr EncodeReasonAndFlags(ICData::DeoptReasonId reason, uint32_t flags)
static intptr_t SizeFor(intptr_t length)
static void SetEntry(const Array &table, intptr_t index, const Smi &offset, const TypedData &info, const Smi &reason_and_flags)
void AddDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t pc_offset, intptr_t deopt_id, TokenPosition token_pos, intptr_t try_index, intptr_t yield_index)
PcDescriptorsPtr FinalizePcDescriptors(uword entry_point)
intptr_t CountArgsPushed()
Definition il.h:11678
bool IsHoisted() const
Definition il.h:11632
void AddHandler(intptr_t try_index, intptr_t outer_try_index, intptr_t pc_offset, bool is_generated, const Array &handler_types, bool needs_stacktrace)
ExceptionHandlersPtr FinalizeExceptionHandlers(uword entry_point) const
void SetNeedsStackTrace(intptr_t try_index)
IsolateGroup * isolate_group() const
void EmitDropArguments(intptr_t count)
static bool GenerateCidRangesCheck(compiler::Assembler *assembler, Register class_id_reg, const CidRangeVector &cid_ranges, compiler::Label *inside_range_lbl, compiler::Label *outside_range_lbl=nullptr, bool fall_through_if_inside=false)
void AddStubCallTarget(const Code &code)
Instruction * current_instruction() const
void StatsBegin(Instruction *instr)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
bool CheckAssertAssignableTypeTestingABILocations(const LocationSummary &locs)
void FinalizeVarDescriptors(const Code &code)
void set_current_block(BlockEntryInstr *value)
BranchLabels CreateBranchLabels(BranchInstr *branch) const
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
const Class & float64x2_class() const
const Class & BoxClassFor(Representation rep)
void InsertBSSRelocation(BSS::Relocation reloc)
void AddExceptionHandler(CatchBlockEntryInstr *entry)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const Class & double_class() const
void FinalizeCatchEntryMovesMap(const Code &code)
void GenerateNumberTypeCheck(Register kClassIdReg, const AbstractType &type, compiler::Label *is_instance_lbl, compiler::Label *is_not_instance_lbl)
const FlowGraph & flow_graph() const
compiler::Label * GetJumpLabel(BlockEntryInstr *block_entry) const
void RecordCatchEntryMoves(Environment *env)
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
static bool LookupMethodFor(int class_id, const String &name, const ArgumentsDescriptor &args_desc, Function *fn_return, bool *class_is_abstract_return=nullptr)
bool WasCompacted(BlockEntryInstr *block_entry) const
void AddDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t pc_offset, intptr_t deopt_id, const InstructionSource &source, intptr_t try_index, intptr_t yield_index=UntaggedPcDescriptors::kInvalidYieldIndex)
void EmitPolymorphicInstanceCall(const PolymorphicInstanceCallInstr *call, const CallTargets &targets, ArgumentsInfo args_info, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, bool complete, intptr_t total_call_count, bool receiver_can_be_smi=true)
BlockEntryInstr * current_block() const
static constexpr intptr_t kMaxNumberOfCidRangesToTest
compiler::Label * AddDeoptStub(intptr_t deopt_id, ICData::DeoptReasonId reason, uint32_t flags=0)
void EmitMoveToNative(const compiler::ffi::NativeLocation &dst, Location src_loc, Representation src_type, TemporaryRegisterAllocator *temp)
const Class & float32x4_class() const
bool CanFallThroughTo(BlockEntryInstr *block_entry) const
void EmitComment(Instruction *instr)
void EmitTestAndCall(const CallTargets &targets, const String &function_name, ArgumentsInfo args_info, compiler::Label *failed, compiler::Label *match_found, intptr_t deopt_id, const InstructionSource &source_index, LocationSummary *locs, bool complete, intptr_t total_ic_calls, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
static const CallTargets * ResolveCallTargetsForReceiverCid(intptr_t cid, const String &selector, const Array &args_desc_array)
static int EmitTestAndCallCheckCid(compiler::Assembler *assembler, compiler::Label *label, Register class_id_reg, const CidRangeValue &range, int bias, bool jump_on_miss=true)
void SetNeedsStackTrace(intptr_t try_index)
CompilerDeoptInfo * AddSlowPathDeoptInfo(intptr_t deopt_id, Environment *env)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
const Class & mint_class() const
const ICData * GetOrAddInstanceCallICData(intptr_t deopt_id, const String &target_name, const Array &arguments_descriptor, intptr_t num_args_tested, const AbstractType &receiver_type, const Function &binary_smi_target)
void EmitMoveFromNative(Location dst_loc, Representation dst_type, const compiler::ffi::NativeLocation &src, TemporaryRegisterAllocator *temp)
bool IsEmptyBlock(BlockEntryInstr *block) const
void AddSlowPathCode(SlowPathCode *slow_path)
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void FinalizeStaticCallTargetsTable(const Code &code)
bool GenerateSubtypeRangeCheck(Register class_id_reg, const Class &type_class, compiler::Label *is_subtype_lbl)
void AddDispatchTableCallTarget(const compiler::TableSelector *selector)
void GenerateInstanceCall(intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, const ICData &ic_data, Code::EntryKind entry_kind, bool receiver_can_be_smi)
void EmitFunctionEntrySourcePositionDescriptorIfNeeded()
void FinalizeExceptionHandlers(const Code &code)
FlowGraphCompiler(compiler::Assembler *assembler, FlowGraph *flow_graph, const ParsedFunction &parsed_function, bool is_optimizing, SpeculativeInliningPolicy *speculative_policy, const GrowableArray< const Function * > &inline_id_to_function, const GrowableArray< TokenPosition > &inline_id_to_token_pos, const GrowableArray< intptr_t > &caller_inline_id, ZoneGrowableArray< const ICData * > *deopt_id_to_ic_data, CodeStatistics *stats=nullptr)
void GenerateListTypeCheck(Register kClassIdReg, compiler::Label *is_instance_lbl)
void GenerateTTSCall(const InstructionSource &source, intptr_t deopt_id, Environment *env, Register reg_with_type, const AbstractType &dst_type, const String &dst_name, LocationSummary *locs)
void FinalizeStackMaps(const Code &code)
const Class & int32x4_class() const
const GrowableArray< BlockEntryInstr * > & block_order() const
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void set_intrinsic_slow_path_label(compiler::Label *label)
void Bailout(const char *reason)
void GenerateNonLazyDeoptableStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void GenerateAssertAssignable(CompileType *receiver_type, const InstructionSource &source, intptr_t deopt_id, Environment *env, const String &dst_name, LocationSummary *locs)
void FinalizeCodeSourceMap(const Code &code)
ArrayPtr CreateDeoptInfo(compiler::Assembler *assembler)
void GenerateStringTypeCheck(Register kClassIdReg, compiler::Label *is_instance_lbl, compiler::Label *is_not_instance_lbl)
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
void SpecialStatsEnd(intptr_t tag)
const GrowableArray< BlockInfo * > & block_info() const
bool NeedsEdgeCounter(BlockEntryInstr *block)
void FinalizePcDescriptors(const Code &code)
void EmitYieldPositionMetadata(const InstructionSource &source, intptr_t yield_index)
void StatsEnd(Instruction *instr)
void GenerateStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, intptr_t deopt_id, Environment *env)
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
void GenerateInstanceOf(const InstructionSource &source, intptr_t deopt_id, Environment *env, const AbstractType &type, LocationSummary *locs)
compiler::Assembler * assembler() const
void EmitMoveConst(const compiler::ffi::NativeLocation &dst, Location src, Representation src_type, TemporaryRegisterAllocator *temp)
const ICData * GetOrAddStaticCallICData(intptr_t deopt_id, const Function &target, const Array &arguments_descriptor, intptr_t num_args_tested, ICData::RebindRule rebind_rule)
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
compiler::Label * NextNonEmptyLabel() const
void GenerateCallerChecksForAssertAssignable(CompileType *receiver_type, const AbstractType &dst_type, compiler::Label *done)
void EndCodeSourceRange(const InstructionSource &source)
void SpecialStatsBegin(intptr_t tag)
void EmitNativeMove(const compiler::ffi::NativeLocation &dst, const compiler::ffi::NativeLocation &src, TemporaryRegisterAllocator *temp)
compiler::Label * intrinsic_slow_path_label() const
void AddNullCheck(const InstructionSource &source, const String &name)
void GenerateStaticCall(intptr_t deopt_id, const InstructionSource &source, const Function &function, ArgumentsInfo args_info, LocationSummary *locs, const ICData &ic_data_in, ICData::RebindRule rebind_rule, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
Environment * SlowPathEnvironmentFor(Instruction *inst, intptr_t num_slow_path_args)
void BeginCodeSourceRange(const InstructionSource &source)
GraphEntryInstr * graph_entry() const
Definition flow_graph.h:268
bool IsCompiledForOsr() const
Definition flow_graph.h:460
const GrowableArray< BlockEntryInstr * > & preorder() const
Definition flow_graph.h:203
intptr_t num_stack_locals() const
Definition flow_graph.h:161
BitVector * captured_parameters() const
Definition flow_graph.h:462
const ParsedFunction & parsed_function() const
Definition flow_graph.h:129
intptr_t variable_count() const
Definition flow_graph.h:143
bool IsIrregexpFunction() const
Definition object.h:3878
bool MakesCopyOfParameters() const
Definition object.h:3494
const char * ToFullyQualifiedCString() const
Definition object.cc:9820
ScriptPtr script() const
Definition object.cc:10939
bool HasBreakpoint() const
Definition object.cc:7948
intptr_t num_fixed_parameters() const
Definition object.cc:8914
static bool UseUnboxedRepresentation()
Definition il.h:10810
intptr_t spill_slot_count() const
Definition il.h:1968
CatchBlockEntryInstr * GetCatchEntry(intptr_t index)
Definition il.cc:1246
OsrEntryInstr * osr_entry() const
Definition il.h:1992
bool NeedsFrame() const
Definition il.h:1975
@ kOld
Definition heap.h:39
const CidRangeVector & SubtypeRangesForClass(const Class &klass, bool include_abstract, bool exclude_null)
Definition il.cc:110
bool is_static_call() const
Definition object.cc:16602
intptr_t deopt_id() const
Definition object.h:2448
ICDataPtr AsUnaryClassChecks() const
Definition object.h:2671
intptr_t NumberOfUsedChecks() const
Definition object.cc:16687
ICDataPtr Original() const
Definition object.cc:16484
static ICDataPtr NewWithCheck(const Function &owner, const String &target_name, const Array &arguments_descriptor, intptr_t deopt_id, intptr_t num_args_tested, RebindRule rebind_rule, GrowableArray< intptr_t > *cids, const Function &target, const AbstractType &receiver_type=Object::null_abstract_type())
Definition object.cc:17407
intptr_t NumArgsTested() const
Definition object.cc:16518
static ICDataPtr NewForStaticCall(const Function &owner, const Function &target, const Array &arguments_descriptor, intptr_t deopt_id, intptr_t num_args_tested, RebindRule rebind_rule)
Definition object.cc:17448
bool is_tracking_exactness() const
Definition object.h:2463
AbstractTypePtr receivers_static_type() const
Definition object.h:2460
static bool NullIsAssignableTo(const AbstractType &other)
Definition object.cc:20715
virtual intptr_t InputCount() const =0
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Definition il.h:1371
virtual BlockEntryInstr * GetBlock()
Definition il.cc:1350
virtual bool CanBecomeDeoptimizationTarget() const
Definition il.h:1337
Environment * env() const
Definition il.h:1209
@ kNotSpeculative
Definition il.h:969
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition il.h:1207
bool HasUnmatchedInputRepresentations() const
Definition il.cc:1600
virtual intptr_t ArgumentCount() const
Definition il.h:1035
virtual Representation representation() const
Definition il.h:1254
LocationSummary * locs()
Definition il.h:1186
virtual Tag tag() const =0
InstructionSource source() const
Definition il.h:1002
intptr_t deopt_id() const
Definition il.h:987
static bool IsSystemIsolateGroup(const IsolateGroup *group)
Definition isolate.cc:3559
intptr_t optimization_counter_threshold() const
Definition isolate.h:305
static IsolateGroup * Current()
Definition isolate.h:534
ClassTable * class_table() const
Definition isolate.h:491
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
static bool FindPragma(Thread *T, bool only_core, const Object &object, const String &pragma_name, bool multiple=false, Object *options=nullptr)
Definition object.cc:4201
static intptr_t LoadingUnitOf(const Function &function)
Definition object.cc:19780
static LocalVarDescriptorsPtr New(intptr_t num_variables)
Definition object.cc:16181
void SetVar(intptr_t var_index, const String &name, UntaggedLocalVarDescriptors::VarInfo *info) const
Definition object.cc:16083
Location out(intptr_t index) const
Definition locations.h:903
const BitmapBuilder & stack_bitmap()
Definition locations.h:915
intptr_t input_count() const
Definition locations.h:864
RegisterSet * live_registers()
Definition locations.h:941
bool always_calls() const
Definition locations.h:918
bool call_on_shared_slow_path() const
Definition locations.h:928
Location in(intptr_t index) const
Definition locations.h:866
bool IsRegister() const
Definition locations.h:402
Register reg() const
Definition locations.h:404
static Location FpuRegisterLocation(FpuRegister reg)
Definition locations.h:410
const char * ToCString() const
Definition locations.cc:445
bool IsConstant() const
Definition locations.h:292
static Location RegisterLocation(Register reg)
Definition locations.h:398
bool IsPairLocation() const
Definition locations.h:316
const Object & constant() const
Definition locations.cc:373
DART_NORETURN void Jump(int value, const Error &error)
Definition longjump.cc:22
static CodePtr GetStub(FlowGraphCompiler *compiler, CheckNullInstr::ExceptionType exception_type, bool save_fpu_registers)
void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers) override
const char * name() override
CheckNullInstr::ExceptionType exception_type() const
static ObjectPtr null()
Definition object.h:433
ObjectPtr ptr() const
Definition object.h:332
void Print() const
Definition object.cc:2681
bool IsNull() const
Definition object.h:363
static Object & Handle()
Definition object.h:407
static Object & ZoneHandle()
Definition object.h:419
void Bailout(const char *origin, const char *reason) const
Definition parser.cc:118
const Function & function() const
Definition parser.h:73
int num_stack_locals() const
Definition parser.h:194
void Verify(const Function &function) const
Definition object.cc:15927
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
intptr_t FpuRegisterCount() const
Definition locations.h:809
bool ContainsFpuRegister(FpuRegister fpu_reg) const
Definition locations.h:804
bool HasUntaggedValues() const
Definition locations.h:792
bool ContainsRegister(Register reg) const
Definition locations.h:800
bool IsTagged(Register reg) const
Definition locations.h:796
static FunctionPtr ResolveDynamicForReceiverClass(const Class &receiver_class, const String &function_name, const ArgumentsDescriptor &args_desc, bool allow_add=true)
Definition resolver.cc:160
void GenerateCode(FlowGraphCompiler *compiler)
Instruction * instruction() const
compiler::Label * entry_label()
static SmiPtr New(intptr_t value)
Definition object.h:9985
friend class Class
Definition object.h:10026
bool AllowsSpeculativeInlining() const
Definition inliner.h:39
static StaticTypeExactnessState NotTracking()
static const Code & SubtypeTestCacheStubForUsedInputs(intptr_t i)
Definition stub_code.h:66
static SubtypeTestCachePtr New(intptr_t num_inputs)
Definition object.cc:18974
static constexpr intptr_t kMaxInputs
Definition object.h:7676
virtual Register AllocateTemporary()=0
Zone * zone() const
LongJumpScope * long_jump_base() const
HierarchyInfo * hierarchy_info() const
Definition thread.h:588
static Thread * Current()
Definition thread.h:361
TypeUsageInfo * type_usage_info() const
Definition thread.h:600
CompilerState & compiler_state()
Definition thread.h:583
IsolateGroup * isolate_group() const
Definition thread.h:540
virtual void EmitCodeAtSlowPathEntry(FlowGraphCompiler *compiler)
virtual intptr_t GetNumberOfArgumentsForRuntimeCall()
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
virtual void AddMetadataForRuntimeCall(FlowGraphCompiler *compiler)
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
virtual const char * name()=0
static const TokenPosition kMinSource
bool IsClassTypeParameter() const
Definition object.h:9796
intptr_t index() const
Definition object.h:9800
static TypePtr SmiType()
Definition object.cc:21894
static TypePtr Number()
Definition object.cc:21922
static TypePtr IntType()
Definition object.cc:21886
static constexpr T Maximum(T x, T y)
Definition utils.h:26
virtual void PushArgumentsForRuntimeCall(FlowGraphCompiler *compiler)
virtual void EmitSharedStubCall(FlowGraphCompiler *compiler, bool save_fpu_registers)
ObjectPoolBuilder & object_pool_builder()
void static bool EmittingComments()
intptr_t InsertAlignedRelocation(BSS::Relocation reloc)
void Comment(const char *format,...) PRINTF_ATTRIBUTE(2
void Jump(Label *label, JumpDistance distance=kFarJump)
void CompareImmediate(Register rn, int32_t value, Condition cond)
void set_constant_pool_allowed(bool b)
void Align(intptr_t alignment, intptr_t offset)
void BranchIf(Condition condition, Label *label, JumpDistance distance=kFarJump)
void LoadUnboxedSimd128(FpuRegister dst, Register base, int32_t offset)
void Bind(Label *label) override
void PopRegister(Register r)
void Drop(intptr_t stack_elements)
void set_lr_state(compiler::LRState b)
void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset)
void AddImmediate(Register rd, int32_t value, Condition cond=AL)
static bool Intrinsify(const ParsedFunction &parsed_function, FlowGraphCompiler *compiler)
intptr_t FindObject(const Object &obj, ObjectPoolBuilderEntry::Patchability patchable=ObjectPoolBuilderEntry::kNotPatchable, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
const NativeType & container_type() const
virtual NativeLocation & Split(Zone *zone, intptr_t num_parts, intptr_t index) const
static NativeLocation & FromLocation(Zone *zone, Location loc, Representation rep)
virtual NativeLocation & WithOtherNativeType(Zone *zone, const NativeType &new_payload_type, const NativeType &new_container_type) const =0
static NativeLocation & FromPairLocation(Zone *zone, Location loc, Representation rep, intptr_t index)
const BothNativeLocations & AsBoth() const
const NativeType & payload_type() const
virtual Representation AsRepresentation() const
virtual NativePrimitiveType & Split(Zone *zone, intptr_t part) const
virtual intptr_t SizeInBytes() const =0
static NativePrimitiveType & FromRepresentation(Zone *zone, Representation rep)
#define THR_Print(format,...)
Definition log.h:20
#define UNIMPLEMENTED
#define ASSERT(E)
SkBitmap source
Definition examples.cpp:28
AtkStateType state
FlutterSemanticsFlag flags
glong glong end
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
static const uint8_t buffer[]
uint8_t value
GAsyncResult * result
uint32_t * target
const char * charp
Definition flags.h:12
#define DECLARE_FLAG(type, name)
Definition flags.h:14
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
Dart_NativeFunction function
Definition fuchsia.cc:51
size_t length
word ToRawSmi(const dart::Object &a)
const FpuRegister kNoFpuRegister
static FpuRegister AllocateFreeFpuRegister(bool *blocked_registers)
const Register THR
const char *const name
const RegList kReservedCpuRegisters
static bool IsPopper(Instruction *instr)
constexpr intptr_t kInt64Size
Definition globals.h:453
Representation
Definition locations.h:66
const FpuRegister FpuTMP
@ kHeapObjectTag
@ UNSIGNED_GREATER
@ UNSIGNED_LESS_EQUAL
const Register TMP2
static const Code & StubEntryFor(const ICData &ic_data, bool optimized)
@ kNumberOfCpuRegisters
@ kNoRegister
static bool IsPusher(Instruction *instr)
const int kNumberOfFpuRegisters
Location LocationRemapForSlowPath(Location loc, Definition *def, intptr_t *cpu_reg_slots, intptr_t *fpu_reg_slots)
Definition locations.cc:492
constexpr RegList kDartAvailableCpuRegs
const Register TMP
const intptr_t cid
constexpr int kRegisterAllocationBias
static Register AllocateFreeRegister(bool *blocked_registers)
QRegister FpuRegister
@ kFunctions
Definition object.h:2231
const intptr_t kPreferredLoopAlignment
const char *const function_name
static constexpr intptr_t kInvalidTryIndex
const int kFpuRegisterSize
Definition __init__.py:1
#define DEBUG_ONLY(code)
Definition globals.h:141
#define Pd
Definition globals.h:408
Point offset
const intptr_t count_without_type_args
Definition il.h:4524
const intptr_t type_args_len
Definition il.h:4521
const intptr_t size_with_type_args
Definition il.h:4523
const intptr_t size_without_type_args
Definition il.h:4525
ArrayPtr ToArgumentsDescriptor() const
Definition il.h:4516
static constexpr Register kResultReg
static constexpr FpuRegister kValueReg
bool IsSingleCid() const
Definition il.h:232
bool IsIllegalRange() const
Definition il.h:241
intptr_t cid_start
Definition il.h:249
int32_t Extent() const
Definition il.h:236
static constexpr Register kFieldReg
static constexpr bool IsUnboxedInteger(Representation rep)
Definition locations.h:92
const Function * target
Definition il.h:721
intptr_t count
Definition il.h:722
static constexpr Register kSubtypeTestCacheReg
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kSubtypeTestCacheResultReg
static constexpr Register kScratchReg
static constexpr Register kInstanceOfResultReg