Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
flow_graph.cc
Go to the documentation of this file.
1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6
7#include <array>
8
9#include "vm/bit_vector.h"
16#include "vm/compiler/cha.h"
21#include "vm/growable_array.h"
22#include "vm/object_store.h"
23#include "vm/resolver.h"
24
25namespace dart {
26
27#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_IA32)
28// Smi->Int32 widening pass is disabled due to dartbug.com/32619.
29DEFINE_FLAG(bool, use_smi_widening, false, "Enable Smi->Int32 widening pass.");
30DEFINE_FLAG(bool, trace_smi_widening, false, "Trace Smi->Int32 widening pass.");
31#endif
32DEFINE_FLAG(bool, prune_dead_locals, true, "optimize dead locals away");
33
34// Quick access to the current zone.
35#define Z (zone())
36
40 FLAG_reorder_basic_blocks && !function.IsFfiCallbackTrampoline();
41}
42
46 /*only_core=*/false, function,
47 Symbols::vm_unsafe_no_bounds_checks(),
48 /*multiple=*/false, &options);
49}
50
51FlowGraph::FlowGraph(const ParsedFunction& parsed_function,
52 GraphEntryInstr* graph_entry,
53 intptr_t max_block_id,
54 PrologueInfo prologue_info,
55 CompilationMode compilation_mode)
56 : thread_(Thread::Current()),
57 parent_(),
58 current_ssa_temp_index_(0),
59 max_block_id_(max_block_id),
60 parsed_function_(parsed_function),
61 num_direct_parameters_(parsed_function.function().MakesCopyOfParameters()
62 ? 0
63 : parsed_function.function().NumParameters()),
64 direct_parameter_locations_(
65 parsed_function.function().num_fixed_parameters()),
66 graph_entry_(graph_entry),
67 preorder_(),
68 postorder_(),
69 reverse_postorder_(),
70 optimized_block_order_(),
71 constant_null_(nullptr),
72 constant_dead_(nullptr),
73 licm_allowed_(true),
74 should_reorder_blocks_(
75 ShouldReorderBlocks(parsed_function.function(), compilation_mode)),
76 prologue_info_(prologue_info),
77 loop_hierarchy_(nullptr),
78 loop_invariant_loads_(nullptr),
79 captured_parameters_(new(zone()) BitVector(zone(), variable_count())),
80 inlining_id_(-1),
81 should_print_(false),
82 should_remove_all_bounds_checks_(
83 CompilerState::Current().is_aot() &&
84 IsMarkedWithNoBoundsChecks(parsed_function.function())) {
86 &compiler_pass_filters_);
88 zone(), function(),
89 /*should_assign_stack_locations=*/
91 &direct_parameter_locations_);
93}
94
96 if ((replacement->ssa_temp_index() == -1) && (defn->ssa_temp_index() != -1)) {
97 AllocateSSAIndex(replacement);
98 }
99}
100
102 intptr_t argument_count) {
105
106 const intptr_t fixed_parameters_size_in_bytes =
108
109 // Currently we box all optional parameters.
110 return fixed_parameters_size_in_bytes +
112}
113
115 intptr_t index) {
116 if (function.IsNull()) {
117 return kTagged;
118 }
119 ASSERT(index < function.NumParameters());
121 return kUnboxedInt64;
122 } else if (function.is_unboxed_double_parameter_at(index)) {
123 return kUnboxedDouble;
124 } else {
126 return kTagged;
127 }
128}
129
131 if (function.IsNull()) {
132 return kTagged;
133 }
135 return kUnboxedInt64;
136 } else if (function.has_unboxed_double_return()) {
137 return kUnboxedDouble;
138 } else if (function.has_unboxed_record_return()) {
139 return kPairOfTagged;
140 } else {
142 return kTagged;
143 }
144}
145
147 Instruction* current,
148 Instruction* replacement) {
149 Definition* current_defn = current->AsDefinition();
150 if ((replacement != nullptr) && (current_defn != nullptr)) {
151 Definition* replacement_defn = replacement->AsDefinition();
152 ASSERT(replacement_defn != nullptr);
153 current_defn->ReplaceUsesWith(replacement_defn);
154 EnsureSSATempIndex(current_defn, replacement_defn);
155
156 if (FLAG_trace_optimization && should_print()) {
157 THR_Print("Replacing v%" Pd " with v%" Pd "\n",
158 current_defn->ssa_temp_index(),
159 replacement_defn->ssa_temp_index());
160 }
161 } else if (FLAG_trace_optimization && should_print()) {
162 if (current_defn == nullptr) {
163 THR_Print("Removing %s\n", current->DebugName());
164 } else {
165 ASSERT(!current_defn->HasUses());
166 THR_Print("Removing v%" Pd ".\n", current_defn->ssa_temp_index());
167 }
168 }
169 if (current->ArgumentCount() != 0) {
170 ASSERT(!current->HasMoveArguments());
171 }
172 iterator->RemoveCurrentFromGraph();
173}
174
176 return should_reorder_blocks() ? &optimized_block_order_
177 : &reverse_postorder_;
178}
179
181 return should_reorder_blocks() ? &optimized_block_order_
182 : &reverse_postorder_;
183}
184
186 const Object& object,
187 Representation representation) const {
188 return constant_instr_pool_.LookupValue(
189 ConstantAndRepresentation{object, representation});
190}
191
193 Representation representation) {
194 ConstantInstr* constant = GetExistingConstant(object, representation);
195 if (constant == nullptr) {
196 // Otherwise, allocate and add it to the pool.
197 const Object& zone_object = Object::ZoneHandle(zone(), object.ptr());
198 if (representation == kTagged) {
199 constant = new (zone()) ConstantInstr(zone_object);
200 } else {
201 constant = new (zone()) UnboxedConstantInstr(zone_object, representation);
202 }
203 AllocateSSAIndex(constant);
205 constant_instr_pool_.Insert(constant);
206 }
207 return constant;
208}
209
211 Representation target_rep,
212 bool tagged_value_must_be_smi) {
213 switch (target_rep) {
214 case kTagged:
215 return !tagged_value_must_be_smi || value.IsSmi();
216
217 case kUnboxedInt32:
218 if (value.IsInteger()) {
219 return Utils::IsInt(32, Integer::Cast(value).AsInt64Value());
220 }
221 return false;
222
223 case kUnboxedUint32:
224 if (value.IsInteger()) {
225 return Utils::IsUint(32, Integer::Cast(value).AsInt64Value());
226 }
227 return false;
228
229 case kUnboxedInt64:
230 return value.IsInteger();
231
232 case kUnboxedFloat:
233 case kUnboxedDouble:
234 return value.IsInteger() || value.IsDouble();
235
236 default:
237 return false;
238 }
239}
240
242 const Object& value) {
243 // Check that representation of the constant matches expected representation.
244 const auto representation = op->representation();
246 value, representation,
247 /*tagged_value_must_be_smi=*/op->Type()->IsNullableSmi())) {
248 return op;
249 }
250
251 if (((representation == kUnboxedFloat) ||
252 (representation == kUnboxedDouble)) &&
253 value.IsInteger()) {
254 // Convert the boxed constant from int to float/double.
256 Integer::Cast(value).AsDoubleValue())),
257 representation);
258 }
259
260 return GetConstant(value, representation);
261}
262
264 defn->set_previous(graph_entry_);
265 graph_entry_->initial_definitions()->Add(defn);
266}
267
269 Definition* defn) {
270 ASSERT(defn->previous() == nullptr);
271 defn->set_previous(entry);
272 if (auto par = defn->AsParameter()) {
273 par->set_block(entry); // set cached block
274 }
275 entry->initial_definitions()->Add(defn);
276}
277
279 Instruction* instr,
281 UseKind use_kind) {
282 if (use_kind == kValue) {
283 ASSERT(instr->IsDefinition());
284 AllocateSSAIndex(instr->AsDefinition());
285 }
286 instr->InsertAfter(prev);
287 ASSERT(instr->env() == nullptr);
288 if (env != nullptr) {
289 env->DeepCopyTo(zone(), instr);
290 }
291}
292
294 Instruction* instr,
296 UseKind use_kind) {
297 InsertAfter(prev, instr, env, use_kind);
298 if (instr->env() != nullptr) {
300 }
301}
302
304 Instruction* instr,
306 UseKind use_kind) {
307 if (use_kind == kValue) {
308 ASSERT(instr->IsDefinition());
309 AllocateSSAIndex(instr->AsDefinition());
310 }
311 ASSERT(instr->env() == nullptr);
312 if (env != nullptr) {
313 env->DeepCopyTo(zone(), instr);
314 }
315 return prev->AppendInstruction(instr);
316}
318 Instruction* instr,
320 UseKind use_kind) {
321 auto result = AppendTo(prev, instr, env, use_kind);
322 if (instr->env() != nullptr) {
324 }
325 return result;
326}
327
328// A wrapper around block entries including an index of the next successor to
329// be read.
331 public:
333 : block_(block),
334 next_successor_ix_(block->last_instruction()->SuccessorCount() - 1) {}
335
336 bool HasNextSuccessor() const { return next_successor_ix_ >= 0; }
339 return block_->last_instruction()->SuccessorAt(next_successor_ix_--);
340 }
341
342 BlockEntryInstr* block() const { return block_; }
343
344 private:
345 BlockEntryInstr* block_;
346 intptr_t next_successor_ix_;
347
348 DISALLOW_ALLOCATION();
349};
350
353
355
356 // Initialize state.
357 preorder_.Clear();
358 postorder_.Clear();
359 reverse_postorder_.Clear();
360 parent_.Clear();
361
363 graph_entry_->DiscoverBlock(nullptr, &preorder_, &parent_);
364 block_stack.Add(BlockTraversalState(graph_entry_));
365 while (!block_stack.is_empty()) {
366 BlockTraversalState& state = block_stack.Last();
367 BlockEntryInstr* block = state.block();
368 if (state.HasNextSuccessor()) {
369 // Process successors one-by-one.
370 BlockEntryInstr* succ = state.NextSuccessor();
371 if (succ->DiscoverBlock(block, &preorder_, &parent_)) {
372 block_stack.Add(BlockTraversalState(succ));
373 }
374 } else {
375 // All successors have been processed, pop the current block entry node
376 // and add it to the postorder list.
377 block_stack.RemoveLast();
378 block->set_postorder_number(postorder_.length());
379 postorder_.Add(block);
380 }
381 }
382
383 ASSERT(postorder_.length() == preorder_.length());
384
385 // Create an array of blocks in reverse postorder.
386 intptr_t block_count = postorder_.length();
387 for (intptr_t i = 0; i < block_count; ++i) {
388 reverse_postorder_.Add(postorder_[block_count - i - 1]);
389 }
390
392}
393
395 bool changed = false;
396 BitVector* merged = new (zone()) BitVector(zone(), postorder().length());
397 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
398 block_it.Advance()) {
399 BlockEntryInstr* block = block_it.Current();
400 if (block->IsGraphEntry()) continue;
401 if (merged->Contains(block->postorder_number())) continue;
402
403 Instruction* last = block->last_instruction();
404 BlockEntryInstr* last_merged_block = nullptr;
405 while (auto goto_instr = last->AsGoto()) {
406 JoinEntryInstr* successor = goto_instr->successor();
407 if (successor->PredecessorCount() > 1) break;
408 if (block->try_index() != successor->try_index()) break;
409
410 // Replace all phis with their arguments prior to removing successor.
411 for (PhiIterator it(successor); !it.Done(); it.Advance()) {
412 PhiInstr* phi = it.Current();
413 Value* input = phi->InputAt(0);
414 phi->ReplaceUsesWith(input->definition());
415 input->RemoveFromUseList();
416 }
417
418 // Remove environment uses and unlink goto and block entry.
419 successor->UnuseAllInputs();
420 last->previous()->LinkTo(successor->next());
421 last->UnuseAllInputs();
422
423 last = successor->last_instruction();
424 merged->Add(successor->postorder_number());
425 last_merged_block = successor;
426 changed = true;
427 if (FLAG_trace_optimization && should_print()) {
428 THR_Print("Merged blocks B%" Pd " and B%" Pd "\n", block->block_id(),
429 successor->block_id());
430 }
431 }
432 // The new block inherits the block id of the last successor to maintain
433 // the order of phi inputs at its successors consistent with block ids.
434 if (last_merged_block != nullptr) {
435 block->set_block_id(last_merged_block->block_id());
436 }
437 }
438 // Recompute block order after changes were made.
439 if (changed) DiscoverBlocks();
440}
441
442void FlowGraph::ComputeIsReceiverRecursive(
443 PhiInstr* phi,
444 GrowableArray<PhiInstr*>* unmark) const {
445 if (phi->is_receiver() != PhiInstr::kUnknownReceiver) return;
447 for (intptr_t i = 0; i < phi->InputCount(); ++i) {
448 Definition* def = phi->InputAt(i)->definition();
449 if (def->IsParameter() && (def->AsParameter()->env_index() == 0)) continue;
450 if (!def->IsPhi()) {
452 break;
453 }
454 ComputeIsReceiverRecursive(def->AsPhi(), unmark);
455 if (def->AsPhi()->is_receiver() == PhiInstr::kNotReceiver) {
457 break;
458 }
459 }
460
461 if (phi->is_receiver() == PhiInstr::kNotReceiver) {
462 unmark->Add(phi);
463 }
464}
465
466void FlowGraph::ComputeIsReceiver(PhiInstr* phi) const {
467 GrowableArray<PhiInstr*> unmark;
468 ComputeIsReceiverRecursive(phi, &unmark);
469
470 // Now drain unmark.
471 while (!unmark.is_empty()) {
472 PhiInstr* phi = unmark.RemoveLast();
473 for (Value::Iterator it(phi->input_use_list()); !it.Done(); it.Advance()) {
474 PhiInstr* use = it.Current()->instruction()->AsPhi();
475 if ((use != nullptr) && (use->is_receiver() == PhiInstr::kReceiver)) {
476 use->set_is_receiver(PhiInstr::kNotReceiver);
477 unmark.Add(use);
478 }
479 }
480 }
481}
482
484 def = def->OriginalDefinition(); // Could be redefined.
485 if (def->IsParameter()) return (def->AsParameter()->env_index() == 0);
486 if (!def->IsPhi() || graph_entry()->HasSingleEntryPoint()) {
487 return false;
488 }
489 PhiInstr* phi = def->AsPhi();
491 return (phi->is_receiver() == PhiInstr::kReceiver);
492 }
493 // Not known if this phi is the receiver yet. Compute it now.
494 ComputeIsReceiver(phi);
495 return (phi->is_receiver() == PhiInstr::kReceiver);
496}
497
499 InstanceCallInstr* call,
500 UntaggedFunction::Kind kind) const {
501 if (!FLAG_use_cha_deopt && !isolate_group()->all_classes_finalized()) {
502 // Even if class or function are private, lazy class finalization
503 // may later add overriding methods.
504 return ToCheck::kCheckCid;
505 }
506
507 // Best effort to get the receiver class.
508 Value* receiver = call->Receiver();
509 Class& receiver_class = Class::Handle(zone());
510 bool receiver_maybe_null = false;
511 if (function().IsDynamicFunction() && IsReceiver(receiver->definition())) {
512 // Call receiver is callee receiver: calling "this.g()" in f().
513 receiver_class = function().Owner();
514 } else {
515 // Get the receiver's compile type. Note that
516 // we allow nullable types, which may result in just generating
517 // a null check rather than the more elaborate class check
518 CompileType* type = receiver->Type();
519 const intptr_t receiver_cid = type->ToNullableCid();
520 if (receiver_cid != kDynamicCid) {
521 receiver_class = isolate_group()->class_table()->At(receiver_cid);
522 receiver_maybe_null = type->is_nullable();
523 } else {
524 const AbstractType* atype = type->ToAbstractType();
525 if (atype->IsInstantiated() && atype->HasTypeClass() &&
526 !atype->IsDynamicType()) {
527 if (type->is_nullable()) {
528 receiver_maybe_null = true;
529 }
530 receiver_class = atype->type_class();
531 if (receiver_class.is_implemented()) {
532 receiver_class = Class::null();
533 }
534 }
535 }
536 }
537
538 // Useful receiver class information?
539 if (receiver_class.IsNull()) {
540 return ToCheck::kCheckCid;
541 } else if (call->HasICData()) {
542 // If the static class type does not match information found in ICData
543 // (which may be "guessed"), then bail, since subsequent code generation
544 // (AOT and JIT) for inlining uses the latter.
545 // TODO(ajcbik): improve this by using the static class.
546 const intptr_t cid = receiver_class.id();
547 const ICData* data = call->ic_data();
548 bool match = false;
549 Class& cls = Class::Handle(zone());
551 for (intptr_t i = 0, len = data->NumberOfChecks(); i < len; i++) {
552 if (!data->IsUsedAt(i)) {
553 continue; // do not consider
554 }
555 fun = data->GetTargetAt(i);
556 cls = fun.Owner();
557 if (data->GetReceiverClassIdAt(i) == cid || cls.id() == cid) {
558 match = true;
559 break;
560 }
561 }
562 if (!match) {
563 return ToCheck::kCheckCid;
564 }
565 }
566
567 const String& method_name =
568 (kind == UntaggedFunction::kMethodExtractor)
569 ? String::Handle(zone(), Field::NameFromGetter(call->function_name()))
570 : call->function_name();
571
572 // If the receiver can have the null value, exclude any method
573 // that is actually valid on a null receiver.
574 if (receiver_maybe_null) {
575 const Class& null_class =
576 Class::Handle(zone(), isolate_group()->object_store()->null_class());
578 if (null_class.EnsureIsFinalized(thread()) == Error::null()) {
579 target = Resolver::ResolveDynamicAnyArgs(zone(), null_class, method_name);
580 }
581 if (!target.IsNull()) {
582 return ToCheck::kCheckCid;
583 }
584 }
585
586 // Use CHA to determine if the method is not overridden by any subclass
587 // of the receiver class. Any methods that are valid when the receiver
588 // has a null value are excluded above (to avoid throwing an exception
589 // on something valid, like null.hashCode).
590 intptr_t subclass_count = 0;
591 CHA& cha = thread()->compiler_state().cha();
592 if (!cha.HasOverride(receiver_class, method_name, &subclass_count)) {
593 if (FLAG_trace_cha) {
594 THR_Print(
595 " **(CHA) Instance call needs no class check since there "
596 "are no overrides of method '%s' on '%s'\n",
597 method_name.ToCString(), receiver_class.ToCString());
598 }
599 if (FLAG_use_cha_deopt) {
600 cha.AddToGuardedClassesForSubclassCount(receiver_class, subclass_count);
601 }
602 return receiver_maybe_null ? ToCheck::kCheckNull : ToCheck::kNoCheck;
603 }
604 return ToCheck::kCheckCid;
605}
606
608 const Cids& cids,
609 intptr_t deopt_id,
610 const InstructionSource& source) {
611 if (cids.IsMonomorphic() && cids.MonomorphicReceiverCid() == kSmiCid) {
612 return new (zone())
613 CheckSmiInstr(new (zone()) Value(to_check), deopt_id, source);
614 }
615 return new (zone())
616 CheckClassInstr(new (zone()) Value(to_check), deopt_id, cids, source);
617}
618
620 Definition* index,
621 intptr_t deopt_id) {
622 Value* val1 = new (zone()) Value(length);
623 Value* val2 = new (zone()) Value(index);
625 return new (zone()) GenericCheckBoundInstr(val1, val2, deopt_id);
626 }
627 return new (zone()) CheckArrayBoundInstr(val1, val2, deopt_id);
628}
629
631 intptr_t receiver_cid) {
632 const Class& cls =
633 Class::Handle(zone(), isolate_group()->class_table()->At(receiver_cid));
634
635 Definition* load_type_args = new (zone()) LoadFieldInstr(
636 call->Receiver()->CopyWithType(),
637 Slot::GetTypeArgumentsSlotFor(thread(), cls), call->source());
638 InsertBefore(call, load_type_args, call->env(), FlowGraph::kValue);
639
640 const AbstractType& type =
641 AbstractType::Handle(zone(), call->ic_data()->receivers_static_type());
642 ASSERT(!type.IsNull());
644 zone(), Type::Cast(type).GetInstanceTypeArguments(thread()));
645 Instruction* guard = new (zone()) CheckConditionInstr(
646 new StrictCompareInstr(call->source(), Token::kEQ_STRICT,
647 new (zone()) Value(load_type_args),
648 new (zone()) Value(GetConstant(args)),
649 /*needs_number_check=*/false, call->deopt_id()),
650 call->deopt_id());
651 InsertBefore(call, guard, call->env(), FlowGraph::kEffect);
652}
653
654// Verify that a redefinition dominates all uses of the redefined value.
656 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
657 block_it.Advance()) {
658 for (ForwardInstructionIterator instr_it(block_it.Current());
659 !instr_it.Done(); instr_it.Advance()) {
660 RedefinitionInstr* redefinition = instr_it.Current()->AsRedefinition();
661 if (redefinition != nullptr) {
662 Definition* original = redefinition->value()->definition();
663 for (Value::Iterator it(original->input_use_list()); !it.Done();
664 it.Advance()) {
665 Value* original_use = it.Current();
666 if (original_use->instruction() == redefinition) {
667 continue;
668 }
669 if (original_use->instruction()->IsDominatedBy(redefinition)) {
670 FlowGraphPrinter::PrintGraph("VerifyRedefinitions", this);
671 THR_Print("%s\n", redefinition->ToCString());
672 THR_Print("use=%s\n", original_use->instruction()->ToCString());
673 return false;
674 }
675 }
676 }
677 }
678 }
679 return true;
680}
681
683 intptr_t variable_count,
684 const GrowableArray<BlockEntryInstr*>& postorder)
685 : zone_(Thread::Current()->zone()),
686 variable_count_(variable_count),
687 postorder_(postorder),
688 live_out_(postorder.length()),
689 kill_(postorder.length()),
690 live_in_(postorder.length()) {}
691
693 BitVector* live_out = live_out_[block.postorder_number()];
694 bool changed = false;
695 Instruction* last = block.last_instruction();
696 ASSERT(last != nullptr);
697 for (intptr_t i = 0; i < last->SuccessorCount(); i++) {
698 BlockEntryInstr* succ = last->SuccessorAt(i);
699 ASSERT(succ != nullptr);
700 if (live_out->AddAll(live_in_[succ->postorder_number()])) {
701 changed = true;
702 }
703 }
704 return changed;
705}
706
708 BitVector* live_out = live_out_[block.postorder_number()];
709 BitVector* kill = kill_[block.postorder_number()];
710 BitVector* live_in = live_in_[block.postorder_number()];
711 return live_in->KillAndAdd(kill, live_out);
712}
713
715 const intptr_t block_count = postorder_.length();
716 bool changed;
717 do {
718 changed = false;
719
720 for (intptr_t i = 0; i < block_count; i++) {
721 const BlockEntryInstr& block = *postorder_[i];
722
723 // Live-in set depends only on kill set which does not
724 // change in this loop and live-out set. If live-out
725 // set does not change there is no need to recompute
726 // live-in set.
727 if (UpdateLiveOut(block) && UpdateLiveIn(block)) {
728 changed = true;
729 }
730 }
731 } while (changed);
732}
733
735 const intptr_t block_count = postorder_.length();
736 for (intptr_t i = 0; i < block_count; i++) {
738 kill_.Add(new (zone()) BitVector(zone(), variable_count_));
739 live_in_.Add(new (zone()) BitVector(zone(), variable_count_));
740 }
741
744}
745
746static void PrintBitVector(const char* tag, BitVector* v) {
747 THR_Print("%s:", tag);
748 for (BitVector::Iterator it(v); !it.Done(); it.Advance()) {
749 THR_Print(" %" Pd "", it.Current());
750 }
751 THR_Print("\n");
752}
753
755 const intptr_t block_count = postorder_.length();
756 for (intptr_t i = 0; i < block_count; i++) {
757 BlockEntryInstr* block = postorder_[i];
758 THR_Print("block @%" Pd " -> ", block->block_id());
759
760 Instruction* last = block->last_instruction();
761 for (intptr_t j = 0; j < last->SuccessorCount(); j++) {
762 BlockEntryInstr* succ = last->SuccessorAt(j);
763 THR_Print(" @%" Pd "", succ->block_id());
764 }
765 THR_Print("\n");
766
767 PrintBitVector(" live out", live_out_[i]);
768 PrintBitVector(" kill", kill_[i]);
769 PrintBitVector(" live in", live_in_[i]);
770 }
771}
772
773// Computes liveness information for local variables.
775 public:
777 : LivenessAnalysis(flow_graph->variable_count(), flow_graph->postorder()),
778 flow_graph_(flow_graph),
779 assigned_vars_() {}
780
781 // For every block (in preorder) compute and return set of variables that
782 // have new assigned values flowing out of that block.
784 // We can't directly return kill_ because it uses postorder numbering while
785 // SSA construction uses preorder numbering internally.
786 // We have to permute postorder into preorder.
787 assigned_vars_.Clear();
788
789 const intptr_t block_count = flow_graph_->preorder().length();
790 for (intptr_t i = 0; i < block_count; i++) {
791 BlockEntryInstr* block = flow_graph_->preorder()[i];
792 // All locals are assigned inside a try{} block.
793 // This is a safe approximation and workaround to force insertion of
794 // phis for stores that appear non-live because of the way catch-blocks
795 // are connected to the graph: They normally are dominated by the
796 // try-entry, but are direct successors of the graph entry in our flow
797 // graph.
798 // TODO(fschneider): Improve this approximation by better modeling the
799 // actual data flow to reduce the number of redundant phis.
800 BitVector* kill = GetKillSet(block);
801 if (block->InsideTryBlock()) {
802 kill->SetAll();
803 } else {
804 kill->Intersect(GetLiveOutSet(block));
805 }
806 assigned_vars_.Add(kill);
807 }
808
809 return assigned_vars_;
810 }
811
812 // Returns true if the value set by the given store reaches any load from the
813 // same local variable.
815 if (store->local().Equals(*flow_graph_->CurrentContextVar())) {
816 return true;
817 }
818
819 if (store->is_dead()) {
820 return false;
821 }
822 if (store->is_last()) {
823 const intptr_t index = flow_graph_->EnvIndex(&store->local());
824 return GetLiveOutSet(block)->Contains(index);
825 }
826
827 return true;
828 }
829
830 // Returns true if the given load is the last for the local and the value
831 // of the local will not flow into another one.
833 if (load->local().Equals(*flow_graph_->CurrentContextVar())) {
834 return false;
835 }
836 const intptr_t index = flow_graph_->EnvIndex(&load->local());
837 return load->is_last() && !GetLiveOutSet(block)->Contains(index);
838 }
839
840 private:
841 virtual void ComputeInitialSets();
842
843 const FlowGraph* flow_graph_;
844 GrowableArray<BitVector*> assigned_vars_;
845};
846
848 const intptr_t block_count = postorder_.length();
849
850 BitVector* last_loads = new (zone()) BitVector(zone(), variable_count_);
851 for (intptr_t i = 0; i < block_count; i++) {
852 BlockEntryInstr* block = postorder_[i];
853
854 BitVector* kill = kill_[i];
855 BitVector* live_in = live_in_[i];
856 last_loads->Clear();
857
858 // There is an implicit use (load-local) of every local variable at each
859 // call inside a try{} block and every call has an implicit control-flow
860 // to the catch entry. As an approximation we mark all locals as live
861 // inside try{}.
862 // TODO(fschneider): Improve this approximation, since not all local
863 // variable stores actually reach a call.
864 if (block->InsideTryBlock()) {
865 live_in->SetAll();
866 continue;
867 }
868
869 // Iterate backwards starting at the last instruction.
870 for (BackwardInstructionIterator it(block); !it.Done(); it.Advance()) {
871 Instruction* current = it.Current();
872
873 LoadLocalInstr* load = current->AsLoadLocal();
874 if (load != nullptr) {
875 const intptr_t index = flow_graph_->EnvIndex(&load->local());
876 if (index >= live_in->length()) continue; // Skip tmp_locals.
877 live_in->Add(index);
878 if (!last_loads->Contains(index)) {
879 last_loads->Add(index);
880 load->mark_last();
881 }
882 continue;
883 }
884
885 StoreLocalInstr* store = current->AsStoreLocal();
886 if (store != nullptr) {
887 const intptr_t index = flow_graph_->EnvIndex(&store->local());
888 if (index >= live_in->length()) continue; // Skip tmp_locals.
889 if (kill->Contains(index)) {
890 if (!live_in->Contains(index)) {
891 store->mark_dead();
892 }
893 } else {
894 if (!live_in->Contains(index)) {
895 store->mark_last();
896 }
897 kill->Add(index);
898 }
899 live_in->Remove(index);
900 continue;
901 }
902 }
903
904 // For blocks with parameter or special parameter instructions we add them
905 // to the kill set.
906 const bool is_function_entry = block->IsFunctionEntry();
907 const bool is_osr_entry = block->IsOsrEntry();
908 const bool is_catch_block_entry = block->IsCatchBlockEntry();
909 if (is_function_entry || is_osr_entry || is_catch_block_entry) {
910 const intptr_t parameter_count =
911 (is_osr_entry || is_catch_block_entry)
912 ? flow_graph_->variable_count()
913 : flow_graph_->num_direct_parameters();
914 for (intptr_t i = 0; i < parameter_count; ++i) {
915 live_in->Remove(i);
916 kill->Add(i);
917 }
918 }
919 if (is_function_entry) {
920 if (flow_graph_->parsed_function().has_arg_desc_var()) {
921 const auto index = flow_graph_->ArgumentDescriptorEnvIndex();
922 live_in->Remove(index);
923 kill->Add(index);
924 }
925 }
926 }
927}
928
930 ZoneGrowableArray<Definition*>* inlining_parameters) {
931 GrowableArray<BitVector*> dominance_frontier;
933
934#ifdef DEBUG
935 if (inlining_parameters != nullptr) {
936 for (intptr_t i = 0, n = inlining_parameters->length(); i < n; ++i) {
937 Definition* defn = (*inlining_parameters)[i];
938 if (defn->IsConstant()) {
939 ASSERT(defn->previous() == graph_entry_);
940 ASSERT(defn->HasSSATemp());
942 } else {
943 ASSERT(defn->previous() == nullptr);
944 ASSERT(!defn->HasSSATemp());
945 }
946 }
947 } else {
949 }
950#endif
951
952 ComputeDominators(&dominance_frontier);
953
954 VariableLivenessAnalysis variable_liveness(this);
955 variable_liveness.Analyze();
956
957 GrowableArray<PhiInstr*> live_phis;
958
959 InsertPhis(preorder_, variable_liveness.ComputeAssignedVars(),
960 dominance_frontier, &live_phis);
961
962 // Rename uses to reference inserted phis where appropriate.
963 // Collect phis that reach a non-environment use.
964 Rename(&live_phis, &variable_liveness, inlining_parameters);
965
966 // Propagate alive mark transitively from alive phis and then remove
967 // non-live ones.
968 RemoveDeadPhis(&live_phis);
969}
970
971// Compute immediate dominators and the dominance frontier for each basic
972// block. As a side effect of the algorithm, sets the immediate dominator
973// of each basic block.
974//
975// dominance_frontier: an output parameter encoding the dominance frontier.
976// The array maps the preorder block number of a block to the set of
977// (preorder block numbers of) blocks in the dominance frontier.
979 GrowableArray<BitVector*>* dominance_frontier) {
980 // Use the SEMI-NCA algorithm to compute dominators. This is a two-pass
981 // version of the Lengauer-Tarjan algorithm (LT is normally three passes)
982 // that eliminates a pass by using nearest-common ancestor (NCA) to
983 // compute immediate dominators from semidominators. It also removes a
984 // level of indirection in the link-eval forest data structure.
985 //
986 // The algorithm is described in Georgiadis, Tarjan, and Werneck's
987 // "Finding Dominators in Practice".
988 // https://renatowerneck.files.wordpress.com/2016/06/gtw06-dominators.pdf
989
990 // All arrays are maps between preorder basic-block numbers.
991 intptr_t size = parent_.length();
992 GrowableArray<intptr_t> idom(size); // Immediate dominator.
993 GrowableArray<intptr_t> semi(size); // Semidominator.
994 GrowableArray<intptr_t> label(size); // Label for link-eval forest.
995
996 // 1. First pass: compute semidominators as in Lengauer-Tarjan.
997 // Semidominators are computed from a depth-first spanning tree and are an
998 // approximation of immediate dominators.
999
1000 // Use a link-eval data structure with path compression. Implement path
1001 // compression in place by mutating the parent array. Each block has a
1002 // label, which is the minimum block number on the compressed path.
1003
1004 // Initialize idom, semi, and label used by SEMI-NCA. Initialize the
1005 // dominance frontier output array.
1006 for (intptr_t i = 0; i < size; ++i) {
1007 idom.Add(parent_[i]);
1008 semi.Add(i);
1009 label.Add(i);
1010 dominance_frontier->Add(new (zone()) BitVector(zone(), size));
1011 }
1012
1013 // Loop over the blocks in reverse preorder (not including the graph
1014 // entry). Clear the dominated blocks in the graph entry in case
1015 // ComputeDominators is used to recompute them.
1016 preorder_[0]->ClearDominatedBlocks();
1017 for (intptr_t block_index = size - 1; block_index >= 1; --block_index) {
1018 // Loop over the predecessors.
1019 BlockEntryInstr* block = preorder_[block_index];
1020 // Clear the immediately dominated blocks in case ComputeDominators is
1021 // used to recompute them.
1022 block->ClearDominatedBlocks();
1023 for (intptr_t i = 0, count = block->PredecessorCount(); i < count; ++i) {
1024 BlockEntryInstr* pred = block->PredecessorAt(i);
1025 ASSERT(pred != nullptr);
1026
1027 // Look for the semidominator by ascending the semidominator path
1028 // starting from pred.
1029 intptr_t pred_index = pred->preorder_number();
1030 intptr_t best = pred_index;
1031 if (pred_index > block_index) {
1032 CompressPath(block_index, pred_index, &parent_, &label);
1033 best = label[pred_index];
1034 }
1035
1036 // Update the semidominator if we've found a better one.
1037 semi[block_index] = Utils::Minimum(semi[block_index], semi[best]);
1038 }
1039
1040 // Now use label for the semidominator.
1041 label[block_index] = semi[block_index];
1042 }
1043
1044 // 2. Compute the immediate dominators as the nearest common ancestor of
1045 // spanning tree parent and semidominator, for all blocks except the entry.
1046 for (intptr_t block_index = 1; block_index < size; ++block_index) {
1047 intptr_t dom_index = idom[block_index];
1048 while (dom_index > semi[block_index]) {
1049 dom_index = idom[dom_index];
1050 }
1051 idom[block_index] = dom_index;
1052 preorder_[dom_index]->AddDominatedBlock(preorder_[block_index]);
1053 }
1054
1055 // 3. Now compute the dominance frontier for all blocks. This is
1056 // algorithm in "A Simple, Fast Dominance Algorithm" (Figure 5), which is
1057 // attributed to a paper by Ferrante et al. There is no bookkeeping
1058 // required to avoid adding a block twice to the same block's dominance
1059 // frontier because we use a set to represent the dominance frontier.
1060 for (intptr_t block_index = 0; block_index < size; ++block_index) {
1061 BlockEntryInstr* block = preorder_[block_index];
1062 intptr_t count = block->PredecessorCount();
1063 if (count <= 1) continue;
1064 for (intptr_t i = 0; i < count; ++i) {
1065 BlockEntryInstr* runner = block->PredecessorAt(i);
1066 while (runner != block->dominator()) {
1067 (*dominance_frontier)[runner->preorder_number()]->Add(block_index);
1068 runner = runner->dominator();
1069 }
1070 }
1071 }
1072}
1073
1074void FlowGraph::CompressPath(intptr_t start_index,
1075 intptr_t current_index,
1077 GrowableArray<intptr_t>* label) {
1078 intptr_t next_index = (*parent)[current_index];
1079 if (next_index > start_index) {
1080 CompressPath(start_index, next_index, parent, label);
1081 (*label)[current_index] =
1082 Utils::Minimum((*label)[current_index], (*label)[next_index]);
1083 (*parent)[current_index] = (*parent)[next_index];
1084 }
1085}
1086
1087void FlowGraph::InsertPhis(const GrowableArray<BlockEntryInstr*>& preorder,
1088 const GrowableArray<BitVector*>& assigned_vars,
1089 const GrowableArray<BitVector*>& dom_frontier,
1090 GrowableArray<PhiInstr*>* live_phis) {
1091 const intptr_t block_count = preorder.length();
1092 // Map preorder block number to the highest variable index that has a phi
1093 // in that block. Use it to avoid inserting multiple phis for the same
1094 // variable.
1095 GrowableArray<intptr_t> has_already(block_count);
1096 // Map preorder block number to the highest variable index for which the
1097 // block went on the worklist. Use it to avoid adding the same block to
1098 // the worklist more than once for the same variable.
1099 GrowableArray<intptr_t> work(block_count);
1100
1101 // Initialize has_already and work.
1102 for (intptr_t block_index = 0; block_index < block_count; ++block_index) {
1103 has_already.Add(-1);
1104 work.Add(-1);
1105 }
1106
1107 // Insert phis for each variable in turn.
1108 GrowableArray<BlockEntryInstr*> worklist;
1109 for (intptr_t var_index = 0; var_index < variable_count(); ++var_index) {
1110 const bool always_live =
1111 !FLAG_prune_dead_locals || IsImmortalVariable(var_index);
1112 // Add to the worklist each block containing an assignment.
1113 for (intptr_t block_index = 0; block_index < block_count; ++block_index) {
1114 if (assigned_vars[block_index]->Contains(var_index)) {
1115 work[block_index] = var_index;
1116 worklist.Add(preorder[block_index]);
1117 }
1118 }
1119
1120 while (!worklist.is_empty()) {
1121 BlockEntryInstr* current = worklist.RemoveLast();
1122 // Ensure a phi for each block in the dominance frontier of current.
1123 for (BitVector::Iterator it(dom_frontier[current->preorder_number()]);
1124 !it.Done(); it.Advance()) {
1125 int index = it.Current();
1126 if (has_already[index] < var_index) {
1127 JoinEntryInstr* join = preorder[index]->AsJoinEntry();
1128 ASSERT(join != nullptr);
1129 PhiInstr* phi = join->InsertPhi(
1130 var_index, variable_count() + join->stack_depth());
1131 if (always_live) {
1132 phi->mark_alive();
1133 live_phis->Add(phi);
1134 }
1135 has_already[index] = var_index;
1136 if (work[index] < var_index) {
1137 work[index] = var_index;
1138 worklist.Add(join);
1139 }
1140 }
1141 }
1142 }
1143 }
1144}
1145
1147 constant_null_ = GetConstant(Object::ZoneHandle());
1148 constant_dead_ = GetConstant(Object::optimized_out());
1149}
1150
1151void FlowGraph::AddSyntheticPhis(BlockEntryInstr* block) {
1153 if (auto join = block->AsJoinEntry()) {
1154 const intptr_t local_phi_count = variable_count() + join->stack_depth();
1155 // Never insert more phi's than that we had osr variables.
1156 const intptr_t osr_phi_count =
1157 Utils::Minimum(local_phi_count, osr_variable_count());
1158 for (intptr_t i = variable_count(); i < osr_phi_count; ++i) {
1159 if (join->phis() == nullptr || (*join->phis())[i] == nullptr) {
1160 join->InsertPhi(i, local_phi_count)->mark_alive();
1161 }
1162 }
1163 }
1164}
1165
1166void FlowGraph::Rename(GrowableArray<PhiInstr*>* live_phis,
1167 VariableLivenessAnalysis* variable_liveness,
1168 ZoneGrowableArray<Definition*>* inlining_parameters) {
1169 GraphEntryInstr* entry = graph_entry();
1170
1171 // Add global constants to the initial definitions.
1173
1174 // Initial renaming environment.
1175 GrowableArray<Definition*> env(variable_count());
1176 env.FillWith(constant_dead(), 0, num_direct_parameters());
1178
1179 if (entry->catch_entries().is_empty()) {
1180 ASSERT(entry->unchecked_entry() != nullptr ? entry->SuccessorCount() == 2
1181 : entry->SuccessorCount() == 1);
1182 }
1183
1184 // For OSR on a non-empty stack, insert synthetic phis on every joining entry.
1185 // These phis are synthetic since they are not driven by live variable
1186 // analysis, but merely serve the purpose of merging stack slots from
1187 // parameters and other predecessors at the block in which OSR occurred.
1188 // The original definition could flow into a join via multiple predecessors
1189 // but with the same definition, not requiring a phi. However, with an OSR
1190 // entry in a different block, phis are required to merge the OSR variable
1191 // and original definition where there was no phi. Therefore, we need
1192 // synthetic phis in all (reachable) blocks, not just in the first join.
1193 if (IsCompiledForOsr()) {
1194 for (intptr_t i = 0, n = preorder().length(); i < n; ++i) {
1195 AddSyntheticPhis(preorder()[i]);
1196 }
1197 }
1198
1199 RenameRecursive(entry, &env, live_phis, variable_liveness,
1200 inlining_parameters);
1201
1202#if defined(DEBUG)
1203 ValidatePhis();
1204#endif // defined(DEBUG)
1205}
1206
1208 Zone* zone,
1209 const Function& function,
1210 bool should_assign_stack_locations /* = false */,
1211 compiler::ParameterInfoArray* parameter_info /* = nullptr */) {
1214 [&](intptr_t i) {
1215 const intptr_t index = (function.IsFactory() ? (i - 1) : i);
1216 return index >= 0 ? ParameterRepresentationAt(function, index)
1217 : kTagged;
1218 },
1219 should_assign_stack_locations, parameter_info);
1220}
1221
1222void FlowGraph::PopulateEnvironmentFromFunctionEntry(
1223 FunctionEntryInstr* function_entry,
1225 GrowableArray<PhiInstr*>* live_phis,
1226 VariableLivenessAnalysis* variable_liveness,
1227 ZoneGrowableArray<Definition*>* inlining_parameters) {
1229
1230 // Check if inlining_parameters include a type argument vector parameter.
1231 const intptr_t inlined_type_args_param =
1232 ((inlining_parameters != nullptr) && function().IsGeneric()) ? 1 : 0;
1233
1234 ASSERT(variable_count() == env->length());
1235 ASSERT(function().num_fixed_parameters() <= env->length());
1236
1237 const bool copies_parameters = function().MakesCopyOfParameters();
1238 for (intptr_t i = 0; i < function().num_fixed_parameters(); i++) {
1239 const auto& [location, representation] = direct_parameter_locations_[i];
1240 if (location.IsInvalid()) {
1241 ASSERT(function().MakesCopyOfParameters());
1242 continue;
1243 }
1244
1245 const intptr_t env_index =
1246 copies_parameters ? EnvIndex(parsed_function_.RawParameterVariable(i))
1247 : i;
1248
1249 auto param = new (zone())
1250 ParameterInstr(function_entry,
1251 /*env_index=*/env_index,
1252 /*param_index=*/i, location, representation);
1253
1254 AllocateSSAIndex(param);
1255 AddToInitialDefinitions(function_entry, param);
1256 (*env)[env_index] = param;
1257 }
1258
1259 // Override the entries in the renaming environment which are special (i.e.
1260 // inlining arguments, type parameter, args descriptor, context, ...)
1261 {
1262 // Replace parameter slots with inlining definitions coming in.
1263 if (inlining_parameters != nullptr) {
1264 for (intptr_t i = 0; i < function().NumParameters(); ++i) {
1265 Definition* defn = (*inlining_parameters)[inlined_type_args_param + i];
1266 if (defn->IsConstant()) {
1267 ASSERT(defn->previous() == graph_entry_);
1268 ASSERT(defn->HasSSATemp());
1269 } else {
1270 ASSERT(defn->previous() == nullptr);
1271 AllocateSSAIndex(defn);
1272 AddToInitialDefinitions(function_entry, defn);
1273 }
1274 intptr_t index = EnvIndex(parsed_function_.RawParameterVariable(i));
1275 (*env)[index] = defn;
1276 }
1277 }
1278
1279 // Replace the type arguments slot with a special parameter.
1280 const bool reify_generic_argument = function().IsGeneric();
1281 if (reify_generic_argument) {
1282 ASSERT(parsed_function().function_type_arguments() != nullptr);
1283 Definition* defn;
1284 if (inlining_parameters == nullptr) {
1285 // Note: If we are not inlining, then the prologue builder will
1286 // take care of checking that we got the correct reified type
1287 // arguments. This includes checking the argument descriptor in order
1288 // to even find out if the parameter was passed or not.
1289 defn = constant_dead();
1290 } else {
1291 defn = (*inlining_parameters)[0];
1292 }
1293 if (defn->IsConstant()) {
1294 ASSERT(defn->previous() == graph_entry_);
1295 ASSERT(defn->HasSSATemp());
1296 } else {
1297 ASSERT(defn->previous() == nullptr);
1298 AllocateSSAIndex(defn);
1299 AddToInitialDefinitions(function_entry, defn);
1300 }
1301 (*env)[RawTypeArgumentEnvIndex()] = defn;
1302 }
1303
1304 // Replace the argument descriptor slot with a special parameter.
1305 if (parsed_function().has_arg_desc_var()) {
1306 auto defn = new (Z)
1307 ParameterInstr(function_entry, ArgumentDescriptorEnvIndex(),
1310 AllocateSSAIndex(defn);
1311 AddToInitialDefinitions(function_entry, defn);
1312 (*env)[ArgumentDescriptorEnvIndex()] = defn;
1313 }
1314 }
1315}
1316
1317static Location EnvIndexToStackLocation(intptr_t num_direct_parameters,
1318 intptr_t env_index) {
1319 return Location::StackSlot(
1320 compiler::target::frame_layout.FrameSlotForVariableIndex(
1321 num_direct_parameters - env_index),
1322 FPREG);
1323}
1324
1325void FlowGraph::PopulateEnvironmentFromOsrEntry(
1326 OsrEntryInstr* osr_entry,
1327 GrowableArray<Definition*>* env) {
1329 // During OSR, all variables and possibly a non-empty stack are
1330 // passed as parameters. The latter mimics the incoming expression
1331 // stack that was set up prior to triggering OSR.
1332 const intptr_t parameter_count = osr_variable_count();
1333 ASSERT(parameter_count == env->length());
1334 for (intptr_t i = 0; i < parameter_count; i++) {
1335 const intptr_t param_index = (i < num_direct_parameters())
1336 ? i
1337 : ParameterInstr::kNotFunctionParameter;
1338 ParameterInstr* param = new (zone()) ParameterInstr(
1339 osr_entry, /*env_index=*/i, param_index,
1341 AllocateSSAIndex(param);
1342 AddToInitialDefinitions(osr_entry, param);
1343 (*env)[i] = param;
1344 }
1345}
1346
1347void FlowGraph::PopulateEnvironmentFromCatchEntry(
1348 CatchBlockEntryInstr* catch_entry,
1349 GrowableArray<Definition*>* env) {
1350 const intptr_t raw_exception_var_envindex =
1351 catch_entry->raw_exception_var() != nullptr
1352 ? EnvIndex(catch_entry->raw_exception_var())
1353 : -1;
1354 const intptr_t raw_stacktrace_var_envindex =
1355 catch_entry->raw_stacktrace_var() != nullptr
1356 ? EnvIndex(catch_entry->raw_stacktrace_var())
1357 : -1;
1358
1359 // Add real definitions for all locals and parameters.
1360 ASSERT(variable_count() == env->length());
1361 intptr_t additional_slots = 0;
1362 for (intptr_t i = 0, n = variable_count(); i < n; ++i) {
1363 // Local variables will arive on the stack while exception and
1364 // stack trace will be passed in fixed registers.
1365 Location loc;
1366 if (raw_exception_var_envindex == i) {
1368 } else if (raw_stacktrace_var_envindex == i) {
1370 } else {
1371 if (i < num_direct_parameters()) {
1372 const auto [param_loc, param_rep] = GetDirectParameterInfoAt(i);
1373 if (param_rep == kTagged && param_loc.IsStackSlot()) {
1374 loc = param_loc;
1375 } else {
1376 // We can not reuse parameter location for synchronization purposes
1377 // because it is either a register location or it is untagged
1378 // location. This means we need to allocate additional slot
1379 // for synchronization above slots reserved for other variables.
1381 n + additional_slots);
1382 additional_slots++;
1383 }
1384 } else {
1386 }
1387 }
1388 auto param = new (Z) ParameterInstr(
1389 catch_entry, /*env_index=*/i,
1390 /*param_index=*/ParameterInstr::kNotFunctionParameter, loc, kTagged);
1391
1392 AllocateSSAIndex(param); // New SSA temp.
1393 (*env)[i] = param;
1394 AddToInitialDefinitions(catch_entry, param);
1395 }
1396
1398 graph_entry_->fixed_slot_count(),
1399 variable_count() - num_direct_parameters() + additional_slots));
1400}
1401
1402void FlowGraph::AttachEnvironment(Instruction* instr,
1403 GrowableArray<Definition*>* env) {
1404 auto deopt_env = Environment::From(zone(), *env, num_direct_parameters_,
1405 instr->NumberOfInputsConsumedBeforeCall(),
1406 parsed_function_);
1407 instr->SetEnvironment(deopt_env);
1408 for (Environment::DeepIterator it(deopt_env); !it.Done(); it.Advance()) {
1409 Value* use = it.CurrentValue();
1410 use->definition()->AddEnvUse(use);
1411 }
1412}
1413
1414void FlowGraph::RenameRecursive(
1415 BlockEntryInstr* block_entry,
1416 GrowableArray<Definition*>* env,
1417 GrowableArray<PhiInstr*>* live_phis,
1418 VariableLivenessAnalysis* variable_liveness,
1419 ZoneGrowableArray<Definition*>* inlining_parameters) {
1420 // 1. Process phis first.
1421 if (auto join = block_entry->AsJoinEntry()) {
1422 if (join->phis() != nullptr) {
1423 const intptr_t local_phi_count = variable_count() + join->stack_depth();
1424 ASSERT(join->phis()->length() == local_phi_count);
1425 for (intptr_t i = 0; i < local_phi_count; ++i) {
1426 PhiInstr* phi = (*join->phis())[i];
1427 if (phi != nullptr) {
1428 (*env)[i] = phi;
1429 AllocateSSAIndex(phi); // New SSA temp.
1430 if (block_entry->InsideTryBlock() && !phi->is_alive()) {
1431 // This is a safe approximation. Inside try{} all locals are
1432 // used at every call implicitly, so we mark all phis as live
1433 // from the start.
1434 // TODO(fschneider): Improve this approximation to eliminate
1435 // more redundant phis.
1436 phi->mark_alive();
1437 live_phis->Add(phi);
1438 }
1439 }
1440 }
1441 }
1442 } else if (auto osr_entry = block_entry->AsOsrEntry()) {
1443 PopulateEnvironmentFromOsrEntry(osr_entry, env);
1444 } else if (auto function_entry = block_entry->AsFunctionEntry()) {
1446 PopulateEnvironmentFromFunctionEntry(
1447 function_entry, env, live_phis, variable_liveness, inlining_parameters);
1448 } else if (auto catch_entry = block_entry->AsCatchBlockEntry()) {
1449 PopulateEnvironmentFromCatchEntry(catch_entry, env);
1450 }
1451
1452 if (!block_entry->IsGraphEntry() &&
1453 !block_entry->IsBlockEntryWithInitialDefs()) {
1454 // Prune non-live variables at block entry by replacing their environment
1455 // slots with null.
1456 BitVector* live_in = variable_liveness->GetLiveInSet(block_entry);
1457 for (intptr_t i = 0; i < variable_count(); i++) {
1458 // TODO(fschneider): Make sure that live_in always contains the
1459 // CurrentContext variable to avoid the special case here.
1460 if (FLAG_prune_dead_locals && !live_in->Contains(i) &&
1461 !IsImmortalVariable(i)) {
1462 (*env)[i] = constant_dead();
1463 }
1464 }
1465 }
1466
1467 // Attach environment to the block entry.
1468 AttachEnvironment(block_entry, env);
1469
1470 // 2. Process normal instructions.
1471 for (ForwardInstructionIterator it(block_entry); !it.Done(); it.Advance()) {
1472 Instruction* current = it.Current();
1473
1474 // Attach current environment to the instructions that need it.
1475 if (current->NeedsEnvironment()) {
1476 AttachEnvironment(current, env);
1477 }
1478
1479 // 2a. Handle uses:
1480 // Update the expression stack renaming environment for each use by
1481 // removing the renamed value. For each use of a LoadLocal, StoreLocal,
1482 // MakeTemp, DropTemps or Constant (or any expression under OSR),
1483 // replace it with the renamed value.
1484 for (intptr_t i = current->InputCount() - 1; i >= 0; --i) {
1485 Value* v = current->InputAt(i);
1486 // Update expression stack.
1487 ASSERT(env->length() > variable_count());
1488 Definition* reaching_defn = env->RemoveLast();
1489 Definition* input_defn = v->definition();
1490 if (input_defn != reaching_defn) {
1491 // Inspect the replacing definition before making the change.
1492 if (IsCompiledForOsr()) {
1493 // Under OSR, constants can reside on the expression stack. Just
1494 // generate the constant rather than going through a synthetic phi.
1495 if (input_defn->IsConstant() && reaching_defn->IsPhi()) {
1496 ASSERT(env->length() < osr_variable_count());
1497 auto constant = GetConstant(input_defn->AsConstant()->value());
1498 current->ReplaceInEnvironment(reaching_defn, constant);
1499 reaching_defn = constant;
1500 }
1501 } else {
1502 // Note: constants can only be replaced with other constants.
1503 ASSERT(input_defn->IsLoadLocal() || input_defn->IsStoreLocal() ||
1504 input_defn->IsDropTemps() || input_defn->IsMakeTemp() ||
1505 (input_defn->IsConstant() && reaching_defn->IsConstant()));
1506 }
1507 // Assert we are not referencing nulls in the initial environment.
1508 ASSERT(reaching_defn->ssa_temp_index() != -1);
1509 // Replace the definition.
1510 v->set_definition(reaching_defn);
1511 input_defn = reaching_defn;
1512 }
1513 input_defn->AddInputUse(v);
1514 }
1515
1516 // 2b. Handle LoadLocal/StoreLocal/MakeTemp/DropTemps/Constant specially.
1517 // Other definitions are just pushed to the environment directly.
1518 Definition* result = nullptr;
1519 switch (current->tag()) {
1520 case Instruction::kLoadLocal: {
1521 LoadLocalInstr* load = current->Cast<LoadLocalInstr>();
1522
1523 // The graph construction ensures we do not have an unused LoadLocal
1524 // computation.
1525 ASSERT(load->HasTemp());
1526 const intptr_t index = EnvIndex(&load->local());
1527 result = (*env)[index];
1528
1529 PhiInstr* phi = result->AsPhi();
1530 if ((phi != nullptr) && !phi->is_alive()) {
1531 phi->mark_alive();
1532 live_phis->Add(phi);
1533 }
1534
1535 if (FLAG_prune_dead_locals &&
1536 variable_liveness->IsLastLoad(block_entry, load)) {
1537 (*env)[index] = constant_dead();
1538 }
1539
1540 // Record captured parameters so that they can be skipped when
1541 // emitting sync code inside optimized try-blocks.
1542 if (load->local().is_captured_parameter()) {
1543 captured_parameters_->Add(index);
1544 }
1545
1546 if (phi != nullptr) {
1547 // Assign type to Phi if it doesn't have a type yet.
1548 // For a Phi to appear in the local variable it either was placed
1549 // there as incoming value by renaming or it was stored there by
1550 // StoreLocal which took this Phi from another local via LoadLocal,
1551 // to which this reasoning applies recursively.
1552 //
1553 // This means that we are guaranteed to process LoadLocal for a
1554 // matching variable first, unless there was an OSR with a non-empty
1555 // expression stack. In the latter case, Phi inserted by
1556 // FlowGraph::AddSyntheticPhis for expression temp will not have an
1557 // assigned type and may be accessed by StoreLocal and subsequent
1558 // LoadLocal.
1559 //
1560 if (!phi->HasType()) {
1561 // Check if phi corresponds to the same slot.
1562 auto* phis = phi->block()->phis();
1563 if ((index < phis->length()) && (*phis)[index] == phi) {
1564 phi->UpdateType(*load->local().inferred_type());
1565 } else {
1566 ASSERT(IsCompiledForOsr() && (phi->block()->stack_depth() > 0));
1567 }
1568 }
1569 }
1570 break;
1571 }
1572
1573 case Instruction::kStoreLocal: {
1574 StoreLocalInstr* store = current->Cast<StoreLocalInstr>();
1575 const intptr_t index = EnvIndex(&store->local());
1576 result = store->value()->definition();
1577
1578 if (!FLAG_prune_dead_locals ||
1579 variable_liveness->IsStoreAlive(block_entry, store)) {
1580 (*env)[index] = result;
1581 } else {
1582 (*env)[index] = constant_dead();
1583 }
1584 break;
1585 }
1586
1587 case Instruction::kDropTemps: {
1588 // Drop temps from the environment.
1589 DropTempsInstr* drop = current->Cast<DropTempsInstr>();
1590 for (intptr_t j = 0; j < drop->num_temps(); j++) {
1591 env->RemoveLast();
1592 }
1593 if (drop->value() != nullptr) {
1594 result = drop->value()->definition();
1595 }
1596 ASSERT((drop->value() != nullptr) || !drop->HasTemp());
1597 break;
1598 }
1599
1600 case Instruction::kConstant: {
1601 ConstantInstr* constant = current->Cast<ConstantInstr>();
1602 if (constant->HasTemp()) {
1603 result = GetConstant(constant->value());
1604 }
1605 break;
1606 }
1607
1608 case Instruction::kMakeTemp: {
1609 // Simply push a #null value to the expression stack.
1610 result = constant_null_;
1611 break;
1612 }
1613
1614 case Instruction::kMoveArgument:
1615 UNREACHABLE();
1616 break;
1617
1618 case Instruction::kCheckStackOverflow:
1619 // Assert environment integrity at checkpoints.
1621 current->AsCheckStackOverflow()->stack_depth()) ==
1622 env->length());
1623 continue;
1624
1625 default:
1626 // Other definitions directly go into the environment.
1627 if (Definition* definition = current->AsDefinition()) {
1628 if (definition->HasTemp()) {
1629 // Assign fresh SSA temporary and update expression stack.
1630 AllocateSSAIndex(definition);
1631 env->Add(definition);
1632 }
1633 }
1634 continue;
1635 }
1636
1637 // Update expression stack and remove current instruction from the graph.
1638 Definition* definition = current->Cast<Definition>();
1639 if (definition->HasTemp()) {
1640 ASSERT(result != nullptr);
1641 env->Add(result);
1642 }
1643 it.RemoveCurrentFromGraph();
1644 }
1645
1646 // 3. Process dominated blocks.
1647 const bool set_stack = (block_entry == graph_entry()) && IsCompiledForOsr();
1648 for (intptr_t i = 0; i < block_entry->dominated_blocks().length(); ++i) {
1649 BlockEntryInstr* block = block_entry->dominated_blocks()[i];
1650 GrowableArray<Definition*> new_env(env->length());
1651 new_env.AddArray(*env);
1652 // During OSR, when traversing from the graph entry directly any block
1653 // (which may be a non-entry), we must adjust the environment to mimic
1654 // a non-empty incoming expression stack to ensure temporaries refer to
1655 // the right stack items.
1656 const intptr_t stack_depth = block->stack_depth();
1657 ASSERT(stack_depth >= 0);
1658 if (set_stack) {
1659 ASSERT(variable_count() == new_env.length());
1660 new_env.FillWith(constant_dead(), variable_count(), stack_depth);
1661 } else if (!block->last_instruction()->IsTailCall()) {
1662 // Assert environment integrity otherwise.
1663 ASSERT((variable_count() + stack_depth) == new_env.length());
1664 }
1665 RenameRecursive(block, &new_env, live_phis, variable_liveness,
1666 inlining_parameters);
1667 }
1668
1669 // 4. Process successor block. We have edge-split form, so that only blocks
1670 // with one successor can have a join block as successor.
1671 if ((block_entry->last_instruction()->SuccessorCount() == 1) &&
1672 block_entry->last_instruction()->SuccessorAt(0)->IsJoinEntry()) {
1673 JoinEntryInstr* successor =
1674 block_entry->last_instruction()->SuccessorAt(0)->AsJoinEntry();
1675 intptr_t pred_index = successor->IndexOfPredecessor(block_entry);
1676 ASSERT(pred_index >= 0);
1677 if (successor->phis() != nullptr) {
1678 for (intptr_t i = 0; i < successor->phis()->length(); ++i) {
1679 PhiInstr* phi = (*successor->phis())[i];
1680 if (phi != nullptr) {
1681 // Rename input operand.
1682 Definition* input = (*env)[i];
1683 ASSERT(input != nullptr);
1684 ASSERT(!input->IsMoveArgument());
1685 Value* use = new (zone()) Value(input);
1686 phi->SetInputAt(pred_index, use);
1687 }
1688 }
1689 }
1690 }
1691}
1692
1693#if defined(DEBUG)
1694void FlowGraph::ValidatePhis() {
1695 if (!FLAG_prune_dead_locals) {
1696 // We can only check if dead locals are pruned.
1697 return;
1698 }
1699
1700 for (intptr_t i = 0, n = preorder().length(); i < n; ++i) {
1701 BlockEntryInstr* block_entry = preorder()[i];
1702 Instruction* last_instruction = block_entry->last_instruction();
1703
1704 if ((last_instruction->SuccessorCount() == 1) &&
1705 last_instruction->SuccessorAt(0)->IsJoinEntry()) {
1706 JoinEntryInstr* successor =
1707 last_instruction->SuccessorAt(0)->AsJoinEntry();
1708 if (successor->phis() != nullptr) {
1709 for (intptr_t j = 0; j < successor->phis()->length(); ++j) {
1710 PhiInstr* phi = (*successor->phis())[j];
1711 if (phi == nullptr && !IsImmortalVariable(j)) {
1712 // We have no phi node for the this variable.
1713 // Double check we do not have a different value in our env.
1714 // If we do, we would have needed a phi-node in the successor.
1715 ASSERT(last_instruction->env() != nullptr);
1716 Definition* current_definition =
1717 last_instruction->env()->ValueAt(j)->definition();
1718 ASSERT(successor->env() != nullptr);
1719 Definition* successor_definition =
1720 successor->env()->ValueAt(j)->definition();
1721 if (!current_definition->IsConstant() &&
1722 !successor_definition->IsConstant()) {
1723 ASSERT(current_definition == successor_definition);
1724 }
1725 }
1726 }
1727 }
1728 }
1729 }
1730}
1731#endif // defined(DEBUG)
1732
1733void FlowGraph::RemoveDeadPhis(GrowableArray<PhiInstr*>* live_phis) {
1734 // Augment live_phis with those that have implicit real used at
1735 // potentially throwing instructions if there is a try-catch in this graph.
1736 if (!graph_entry()->catch_entries().is_empty()) {
1737 for (BlockIterator it(postorder_iterator()); !it.Done(); it.Advance()) {
1738 JoinEntryInstr* join = it.Current()->AsJoinEntry();
1739 if (join == nullptr) continue;
1740 for (PhiIterator phi_it(join); !phi_it.Done(); phi_it.Advance()) {
1741 PhiInstr* phi = phi_it.Current();
1742 if (phi == nullptr || phi->is_alive() ||
1743 (phi->input_use_list() != nullptr) ||
1744 (phi->env_use_list() == nullptr)) {
1745 continue;
1746 }
1747 for (Value::Iterator it(phi->env_use_list()); !it.Done();
1748 it.Advance()) {
1749 Value* use = it.Current();
1750 if (use->instruction()->MayThrow() &&
1751 use->instruction()->GetBlock()->InsideTryBlock()) {
1752 live_phis->Add(phi);
1753 phi->mark_alive();
1754 break;
1755 }
1756 }
1757 }
1758 }
1759 }
1760
1761 while (!live_phis->is_empty()) {
1762 PhiInstr* phi = live_phis->RemoveLast();
1763 for (intptr_t i = 0; i < phi->InputCount(); i++) {
1764 Value* val = phi->InputAt(i);
1765 PhiInstr* used_phi = val->definition()->AsPhi();
1766 if ((used_phi != nullptr) && !used_phi->is_alive()) {
1767 used_phi->mark_alive();
1768 live_phis->Add(used_phi);
1769 }
1770 }
1771 }
1772
1773 for (BlockIterator it(postorder_iterator()); !it.Done(); it.Advance()) {
1774 JoinEntryInstr* join = it.Current()->AsJoinEntry();
1775 if (join != nullptr) join->RemoveDeadPhis(constant_dead());
1776 }
1777}
1778
1780 Definition* original,
1781 CompileType compile_type) {
1782 RedefinitionInstr* first = prev->next()->AsRedefinition();
1783 if (first != nullptr && (first->constrained_type() != nullptr)) {
1784 if ((first->value()->definition() == original) &&
1785 first->constrained_type()->IsEqualTo(&compile_type)) {
1786 // Already redefined. Do nothing.
1787 return nullptr;
1788 }
1789 }
1790 RedefinitionInstr* redef = new RedefinitionInstr(new Value(original));
1791
1792 // Don't set the constrained type when the type is None(), which denotes an
1793 // unreachable value (e.g. using value null after some form of null check).
1794 if (!compile_type.IsNone()) {
1795 redef->set_constrained_type(new CompileType(compile_type));
1796 }
1797
1798 InsertAfter(prev, redef, nullptr, FlowGraph::kValue);
1799 RenameDominatedUses(original, redef, redef);
1800
1801 if (redef->input_use_list() == nullptr) {
1802 // There are no dominated uses, so the newly added Redefinition is useless.
1803 // Remove Redefinition to avoid interfering with
1804 // BranchSimplifier::Simplify which needs empty blocks.
1805 redef->RemoveFromGraph();
1806 return nullptr;
1807 }
1808
1809 return redef;
1810}
1811
1812void FlowGraph::RemoveRedefinitions(bool keep_checks) {
1813 // Remove redefinition and check instructions that were inserted
1814 // to make a control dependence explicit with a data dependence,
1815 // for example, to inhibit hoisting.
1816 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
1817 block_it.Advance()) {
1819 for (ForwardInstructionIterator instr_it(block_it.Current());
1820 !instr_it.Done(); instr_it.Advance()) {
1821 Instruction* instruction = instr_it.Current();
1822 if (auto redef = instruction->AsRedefinition()) {
1823 redef->ReplaceUsesWith(redef->value()->definition());
1824 instr_it.RemoveCurrentFromGraph();
1825 } else if (keep_checks) {
1826 continue;
1827 } else if (auto def = instruction->AsDefinition()) {
1828 Value* value = def->RedefinedValue();
1829 if (value != nullptr) {
1830 def->ReplaceUsesWith(value->definition());
1831 def->ClearSSATempIndex();
1832 }
1833 }
1834 }
1835 }
1836}
1837
1838BitVector* FlowGraph::FindLoopBlocks(BlockEntryInstr* m,
1839 BlockEntryInstr* n) const {
1841 BitVector* loop_blocks = new (zone()) BitVector(zone(), preorder_.length());
1842
1843 loop_blocks->Add(n->preorder_number());
1844 if (n != m) {
1845 loop_blocks->Add(m->preorder_number());
1846 stack.Add(m);
1847 }
1848
1849 while (!stack.is_empty()) {
1850 BlockEntryInstr* p = stack.RemoveLast();
1851 for (intptr_t i = 0; i < p->PredecessorCount(); ++i) {
1852 BlockEntryInstr* q = p->PredecessorAt(i);
1853 if (!loop_blocks->Contains(q->preorder_number())) {
1854 loop_blocks->Add(q->preorder_number());
1855 stack.Add(q);
1856 }
1857 }
1858 }
1859 return loop_blocks;
1860}
1861
1862LoopHierarchy* FlowGraph::ComputeLoops() const {
1863 // Iterate over all entry blocks in the flow graph to attach
1864 // loop information to each loop header.
1865 ZoneGrowableArray<BlockEntryInstr*>* loop_headers =
1866 new (zone()) ZoneGrowableArray<BlockEntryInstr*>();
1867 for (BlockIterator it = postorder_iterator(); !it.Done(); it.Advance()) {
1868 BlockEntryInstr* block = it.Current();
1869 // Reset loop information on every entry block (since this method
1870 // may recompute loop information on a modified flow graph).
1871 block->set_loop_info(nullptr);
1872 // Iterate over predecessors to find back edges.
1873 for (intptr_t i = 0; i < block->PredecessorCount(); ++i) {
1874 BlockEntryInstr* pred = block->PredecessorAt(i);
1875 if (block->Dominates(pred)) {
1876 // Identify the block as a loop header and add the blocks in the
1877 // loop to the loop information. Loops that share the same loop
1878 // header are treated as one loop by merging these blocks.
1879 BitVector* loop_blocks = FindLoopBlocks(pred, block);
1880 if (block->loop_info() == nullptr) {
1881 intptr_t id = loop_headers->length();
1882 block->set_loop_info(new (zone()) LoopInfo(id, block, loop_blocks));
1883 loop_headers->Add(block);
1884 } else {
1885 ASSERT(block->loop_info()->header() == block);
1886 block->loop_info()->AddBlocks(loop_blocks);
1887 }
1888 block->loop_info()->AddBackEdge(pred);
1889 }
1890 }
1891 }
1892
1893 // Build the loop hierarchy and link every entry block to
1894 // the closest enveloping loop in loop hierarchy.
1895 return new (zone()) LoopHierarchy(loop_headers, preorder_, should_print());
1896}
1897
1899 intptr_t size = 0;
1900 // Iterate each block, skipping the graph entry.
1901 for (intptr_t i = 1; i < preorder_.length(); ++i) {
1902 BlockEntryInstr* block = preorder_[i];
1903
1904 // Skip any blocks from the prologue to make them not count towards the
1905 // inlining instruction budget.
1906 const intptr_t block_id = block->block_id();
1907 if (prologue_info_.Contains(block_id)) {
1908 continue;
1909 }
1910
1911 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
1912 ++size;
1913 }
1914 }
1915 return size;
1916}
1917
1918void FlowGraph::ConvertUse(Value* use, Representation from_rep) {
1919 const Representation to_rep =
1921 if (from_rep == to_rep || to_rep == kNoRepresentation) {
1922 return;
1923 }
1924 InsertConversion(from_rep, to_rep, use, /*is_environment_use=*/false);
1925}
1926
1928 return (rep == kUnboxedInt32) || (rep == kUnboxedUint32) ||
1929 (rep == kUnboxedInt64);
1930}
1931
1935
1939
1943
1944void FlowGraph::InsertConversion(Representation from,
1945 Representation to,
1946 Value* use,
1947 bool is_environment_use) {
1948 ASSERT(from != to);
1949 Instruction* insert_before;
1950 PhiInstr* phi = use->instruction()->AsPhi();
1951 if (phi != nullptr) {
1952 ASSERT(phi->is_alive());
1953 // For phis conversions have to be inserted in the predecessor.
1954 auto predecessor = phi->block()->PredecessorAt(use->use_index());
1955 insert_before = predecessor->last_instruction();
1956 ASSERT(insert_before->GetBlock() == predecessor);
1957 } else {
1958 insert_before = use->instruction();
1959 }
1960 const Instruction::SpeculativeMode speculative_mode =
1961 use->instruction()->SpeculativeModeOfInput(use->use_index());
1962 Instruction* deopt_target = nullptr;
1963 if (speculative_mode == Instruction::kGuardInputs || to == kUnboxedInt32) {
1964 deopt_target = insert_before;
1965 }
1966
1967 Definition* converted = nullptr;
1968 if (IsUnboxedInteger(from) && IsUnboxedInteger(to)) {
1969 const intptr_t deopt_id = (to == kUnboxedInt32) && (deopt_target != nullptr)
1970 ? deopt_target->DeoptimizationTarget()
1972 converted =
1973 new (Z) IntConverterInstr(from, to, use->CopyWithType(), deopt_id);
1974 } else if ((from == kUnboxedInt32) && (to == kUnboxedDouble)) {
1975 converted = new Int32ToDoubleInstr(use->CopyWithType());
1976 } else if ((from == kUnboxedInt64) && (to == kUnboxedDouble) &&
1978 const intptr_t deopt_id = (deopt_target != nullptr)
1979 ? deopt_target->DeoptimizationTarget()
1982 converted = new Int64ToDoubleInstr(use->CopyWithType(), deopt_id);
1983 } else if ((from == kTagged) && Boxing::Supports(to)) {
1984 const intptr_t deopt_id = (deopt_target != nullptr)
1985 ? deopt_target->DeoptimizationTarget()
1987 converted =
1988 UnboxInstr::Create(to, use->CopyWithType(), deopt_id, speculative_mode);
1989 } else if ((to == kTagged) && Boxing::Supports(from)) {
1990 converted = BoxInstr::Create(from, use->CopyWithType());
1991 } else if ((to == kPairOfTagged) && (from == kTagged)) {
1992 // Insert conversion to an unboxed record, which can be only used
1993 // in Return instruction.
1994 ASSERT(use->instruction()->IsDartReturn());
1995 Definition* x = new (Z)
1996 LoadFieldInstr(use->CopyWithType(),
1998 thread(), compiler::target::Record::field_offset(0)),
1999 InstructionSource());
2000 InsertBefore(insert_before, x, nullptr, FlowGraph::kValue);
2001 Definition* y = new (Z)
2002 LoadFieldInstr(use->CopyWithType(),
2004 thread(), compiler::target::Record::field_offset(1)),
2005 InstructionSource());
2006 InsertBefore(insert_before, y, nullptr, FlowGraph::kValue);
2007 converted = new (Z) MakePairInstr(new (Z) Value(x), new (Z) Value(y));
2008 } else if ((to == kTagged) && (from == kPairOfTagged)) {
2009 // Handled in FlowGraph::InsertRecordBoxing.
2010 UNREACHABLE();
2011 } else {
2012 // We have failed to find a suitable conversion instruction. If either
2013 // representations is not boxable, then fail immediately.
2014 if (!Boxing::Supports(from) || !Boxing::Supports(to)) {
2016 FATAL("Illegal conversion %s->%s for the use of %s at %s\n",
2019 use->definition()->ToCString(), use->instruction()->ToCString());
2020 } else {
2021 FATAL("Illegal conversion %s->%s for a use of v%" Pd "\n",
2024 use->definition()->ssa_temp_index());
2025 }
2026 }
2027 // Insert two "dummy" conversion instructions with the correct
2028 // "from" and "to" representation. The inserted instructions will
2029 // trigger a deoptimization if executed. See #12417 for a discussion.
2030 // If the use is not speculative, then this code should be unreachable.
2031 // Insert Stop for a graceful error and aid unreachable code elimination.
2032 if (speculative_mode == Instruction::kNotSpeculative) {
2033 StopInstr* stop = new (Z) StopInstr("Incompatible conversion.");
2034 InsertBefore(insert_before, stop, nullptr, FlowGraph::kEffect);
2035 }
2036 const intptr_t deopt_id = (deopt_target != nullptr)
2037 ? deopt_target->DeoptimizationTarget()
2039 Definition* boxed = BoxInstr::Create(from, use->CopyWithType());
2040 use->BindTo(boxed);
2041 InsertBefore(insert_before, boxed, nullptr, FlowGraph::kValue);
2042 converted = UnboxInstr::Create(to, new (Z) Value(boxed), deopt_id,
2043 speculative_mode);
2044 }
2045 ASSERT(converted != nullptr);
2046 InsertBefore(insert_before, converted,
2047 (deopt_target != nullptr) ? deopt_target->env() : nullptr,
2048 FlowGraph::kValue);
2049 if (is_environment_use) {
2050 use->BindToEnvironment(converted);
2051 } else {
2052 use->BindTo(converted);
2053 }
2054}
2055
2057 if (def->env_use_list() != nullptr) return true;
2058 for (Value::Iterator it(def->input_use_list()); !it.Done(); it.Advance()) {
2059 Value* use = it.Current();
2061 kPairOfTagged) {
2062 return true;
2063 }
2064 }
2065 return false;
2066}
2067
2068void FlowGraph::InsertRecordBoxing(Definition* def) {
2069 // Insert conversion from unboxed record, which can be only returned
2070 // by a Dart call with a known interface/direct target.
2071 const Function* target = nullptr;
2072 if (auto* call = def->AsStaticCall()) {
2073 target = &(call->function());
2074 } else if (auto* call = def->AsInstanceCallBase()) {
2075 target = &(call->interface_target());
2076 } else if (auto* call = def->AsDispatchTableCall()) {
2077 target = &(call->interface_target());
2078 } else {
2079 UNREACHABLE();
2080 }
2081 ASSERT(target != nullptr && !target->IsNull());
2082
2083 kernel::UnboxingInfoMetadata* unboxing_metadata =
2084 kernel::UnboxingInfoMetadataOf(*target, Z);
2085 ASSERT(unboxing_metadata != nullptr);
2086 const RecordShape shape = unboxing_metadata->return_info.record_shape;
2087 ASSERT(shape.num_fields() == 2);
2088
2089 auto* x = new (Z)
2090 ExtractNthOutputInstr(new (Z) Value(def), 0, kTagged, kDynamicCid);
2091 auto* y = new (Z)
2092 ExtractNthOutputInstr(new (Z) Value(def), 1, kTagged, kDynamicCid);
2093 auto* alloc = new (Z)
2094 AllocateSmallRecordInstr(InstructionSource(), shape, new (Z) Value(x),
2095 new (Z) Value(y), nullptr, def->deopt_id());
2096 def->ReplaceUsesWith(alloc);
2097 // Uses of 'def' in 'x' and 'y' should not be replaced as 'x' and 'y'
2098 // are not added to the flow graph yet.
2099 ASSERT(x->value()->definition() == def);
2100 ASSERT(y->value()->definition() == def);
2101 Instruction* insert_before = def->next();
2102 ASSERT(insert_before != nullptr);
2103 InsertBefore(insert_before, x, nullptr, FlowGraph::kValue);
2104 InsertBefore(insert_before, y, nullptr, FlowGraph::kValue);
2105 InsertBefore(insert_before, alloc, def->env(), FlowGraph::kValue);
2106}
2107
2108void FlowGraph::InsertConversionsFor(Definition* def) {
2109 const Representation from_rep = def->representation();
2110
2111 // Insert boxing of a record once after the definition (if needed)
2112 // in order to avoid multiple allocations.
2113 if (from_rep == kPairOfTagged) {
2114 if (NeedsRecordBoxing(def)) {
2115 InsertRecordBoxing(def);
2116 }
2117 return;
2118 }
2119
2120 for (Value::Iterator it(def->input_use_list()); !it.Done(); it.Advance()) {
2121 ConvertUse(it.Current(), from_rep);
2122 }
2123}
2124
2125namespace {
2126class PhiUnboxingHeuristic : public ValueObject {
2127 public:
2128 explicit PhiUnboxingHeuristic(FlowGraph* flow_graph)
2129 : worklist_(flow_graph, 10) {}
2130
2131 void Process(PhiInstr* phi) {
2132 auto new_representation = phi->representation();
2133 switch (phi->Type()->ToCid()) {
2134 case kDoubleCid:
2135 if (CanUnboxDouble()) {
2136 new_representation = DetermineIfAnyIncomingUnboxedFloats(phi)
2137 ? kUnboxedFloat
2138 : kUnboxedDouble;
2139#if defined(DEBUG)
2140 if (new_representation == kUnboxedFloat) {
2141 for (auto input : phi->inputs()) {
2142 ASSERT(input->representation() != kUnboxedDouble);
2143 }
2144 }
2145#endif
2146 }
2147 break;
2148 case kFloat32x4Cid:
2149 if (ShouldInlineSimd()) {
2150 new_representation = kUnboxedFloat32x4;
2151 }
2152 break;
2153 case kInt32x4Cid:
2154 if (ShouldInlineSimd()) {
2155 new_representation = kUnboxedInt32x4;
2156 }
2157 break;
2158 case kFloat64x2Cid:
2159 if (ShouldInlineSimd()) {
2160 new_representation = kUnboxedFloat64x2;
2161 }
2162 break;
2163 }
2164
2165 if (new_representation == kTagged && phi->Type()->IsInt()) {
2166 // Check to see if all the (non-self) inputs are unboxed integers. If so,
2167 // mark the phi as an unboxed integer that can hold the possible values
2168 // that flow into the phi.
2169 for (auto input : phi->inputs()) {
2170 if (input == phi) continue;
2171
2172 if (!IsUnboxedInteger(input->representation())) {
2173 new_representation = kTagged; // Reset to a boxed phi.
2174 break;
2175 }
2176
2177 if (new_representation == kTagged) {
2178 new_representation = input->representation();
2179 } else if (new_representation != input->representation()) {
2180 // Don't allow mixing of untagged and unboxed values.
2181 ASSERT(IsUnboxedInteger(input->representation()));
2182 // It's unclear which representation to use yet, so use
2183 // kNoRepresentation as a "unknown but unboxed int" marker for now.
2184 new_representation = kNoRepresentation;
2185 }
2186 }
2187
2188 if (new_representation == kNoRepresentation) {
2189 // If all the inputs are unboxed integers but with different
2190 // representations, then pick a representation based on the range
2191 // of values that flow into the phi node.
2192 new_representation =
2194 ? kUnboxedInt32
2195 : kUnboxedInt64;
2196 }
2197
2198 // Decide if it is worth to unbox an boxed integer phi.
2199 if (new_representation == kTagged && !phi->Type()->can_be_sentinel()) {
2200#if defined(TARGET_ARCH_IS_64_BIT)
2201 // In AOT mode on 64-bit platforms always unbox integer typed phis
2202 // (similar to how we treat doubles and other boxed numeric types).
2203 // In JIT mode only unbox phis which are not fully known to be Smi.
2204 if (is_aot_ || phi->Type()->ToCid() != kSmiCid) {
2205 new_representation = kUnboxedInt64;
2206 }
2207#else
2208 // If we are on a 32-bit platform check if there are unboxed values
2209 // flowing into the phi and the phi value itself is flowing into an
2210 // unboxed operation prefer to keep it unboxed.
2211 // We use this heuristic instead of eagerly unboxing all the phis
2212 // because we are concerned about the code size and register pressure.
2213 const bool has_unboxed_incoming_value = HasUnboxedIncomingValue(phi);
2214 const bool flows_into_unboxed_use = FlowsIntoUnboxedUse(phi);
2215
2216 if (has_unboxed_incoming_value && flows_into_unboxed_use) {
2217 new_representation =
2219 ? kUnboxedInt32
2220 : kUnboxedInt64;
2221 }
2222#endif
2223 }
2224 }
2225
2226 // If any non-self input of the phi node is untagged, then the phi node
2227 // should only have untagged inputs and thus is marked as untagged.
2228 //
2229 // Note: we can't assert that all inputs are untagged at this point because
2230 // one of the inputs might be a different unvisited phi node. If this
2231 // assumption is broken, then fail later in the SelectRepresentations pass.
2232 for (auto input : phi->inputs()) {
2233 if (input != phi && input->representation() == kUntagged) {
2234 new_representation = kUntagged;
2235 break;
2236 }
2237 }
2238
2239 phi->set_representation(new_representation);
2240 }
2241
2242 private:
2243 // Returns [true] if there are UnboxedFloats representation flowing into
2244 // the |phi|.
2245 // This function looks through phis.
2246 bool DetermineIfAnyIncomingUnboxedFloats(PhiInstr* phi) {
2247 worklist_.Clear();
2248 worklist_.Add(phi);
2249 for (intptr_t i = 0; i < worklist_.definitions().length(); i++) {
2250 const auto defn = worklist_.definitions()[i];
2251 for (auto input : defn->inputs()) {
2252 if (input->representation() == kUnboxedFloat) {
2253 return true;
2254 }
2255 if (input->IsPhi()) {
2256 worklist_.Add(input);
2257 }
2258 }
2259 }
2260 return false;
2261 }
2262
2263 // Returns |true| iff there is an unboxed definition among all potential
2264 // definitions that can flow into the |phi|.
2265 // This function looks through phis.
2266 bool HasUnboxedIncomingValue(PhiInstr* phi) {
2267 worklist_.Clear();
2268 worklist_.Add(phi);
2269 for (intptr_t i = 0; i < worklist_.definitions().length(); i++) {
2270 const auto defn = worklist_.definitions()[i];
2271 for (auto input : defn->inputs()) {
2272 if (IsUnboxedInteger(input->representation()) || input->IsBox()) {
2273 return true;
2274 } else if (input->IsPhi()) {
2275 worklist_.Add(input);
2276 }
2277 }
2278 }
2279 return false;
2280 }
2281
2282 // Returns |true| iff |phi| potentially flows into an unboxed use.
2283 // This function looks through phis.
2284 bool FlowsIntoUnboxedUse(PhiInstr* phi) {
2285 worklist_.Clear();
2286 worklist_.Add(phi);
2287 for (intptr_t i = 0; i < worklist_.definitions().length(); i++) {
2288 const auto defn = worklist_.definitions()[i];
2289 for (auto use : defn->input_uses()) {
2290 if (IsUnboxedInteger(use->instruction()->RequiredInputRepresentation(
2291 use->use_index())) ||
2292 use->instruction()->IsUnbox()) {
2293 return true;
2294 } else if (auto phi_use = use->instruction()->AsPhi()) {
2295 worklist_.Add(phi_use);
2296 }
2297 }
2298 }
2299 return false;
2300 }
2301
2302 const bool is_aot_ = CompilerState::Current().is_aot();
2303 DefinitionWorklist worklist_;
2304};
2305} // namespace
2306
2308 // First we decide for each phi if it is beneficial to unbox it. If so, we
2309 // change it's `phi->representation()`
2310 PhiUnboxingHeuristic phi_unboxing_heuristic(this);
2311 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2312 block_it.Advance()) {
2313 JoinEntryInstr* join_entry = block_it.Current()->AsJoinEntry();
2314 if (join_entry != nullptr) {
2315 for (PhiIterator it(join_entry); !it.Done(); it.Advance()) {
2316 PhiInstr* phi = it.Current();
2317 phi_unboxing_heuristic.Process(phi);
2318 }
2319 }
2320 }
2321
2322 // Process all initial definitions and insert conversions when needed (depends
2323 // on phi unboxing decision above).
2324 for (intptr_t i = 0; i < graph_entry()->initial_definitions()->length();
2325 i++) {
2326 InsertConversionsFor((*graph_entry()->initial_definitions())[i]);
2327 }
2328 for (intptr_t i = 0; i < graph_entry()->SuccessorCount(); ++i) {
2329 auto successor = graph_entry()->SuccessorAt(i);
2330 if (auto entry = successor->AsBlockEntryWithInitialDefs()) {
2331 auto& initial_definitions = *entry->initial_definitions();
2332 for (intptr_t j = 0; j < initial_definitions.length(); j++) {
2333 InsertConversionsFor(initial_definitions[j]);
2334 }
2335 }
2336 }
2337
2338 // Process all normal definitions and insert conversions when needed (depends
2339 // on phi unboxing decision above).
2340 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2341 block_it.Advance()) {
2342 BlockEntryInstr* entry = block_it.Current();
2343 if (JoinEntryInstr* join_entry = entry->AsJoinEntry()) {
2344 for (PhiIterator it(join_entry); !it.Done(); it.Advance()) {
2345 PhiInstr* phi = it.Current();
2346 ASSERT(phi != nullptr);
2347 ASSERT(phi->is_alive());
2348 InsertConversionsFor(phi);
2349 }
2350 }
2351 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) {
2352 Definition* def = it.Current()->AsDefinition();
2353 if (def != nullptr) {
2354 InsertConversionsFor(def);
2355 }
2356 }
2357 }
2358}
2359
2360#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_IA32)
2361// Smi widening pass is only meaningful on platforms where Smi
2362// is smaller than 32bit. For now only support it on ARM and ia32.
2363static bool CanBeWidened(BinarySmiOpInstr* smi_op) {
2364 return BinaryInt32OpInstr::IsSupported(smi_op->op_kind(), smi_op->left(),
2365 smi_op->right());
2366}
2367
2368static bool BenefitsFromWidening(BinarySmiOpInstr* smi_op) {
2369 // TODO(vegorov): when shifts with non-constants shift count are supported
2370 // add them here as we save untagging for the count.
2371 switch (smi_op->op_kind()) {
2372 case Token::kMUL:
2373 case Token::kSHR:
2374 case Token::kUSHR:
2375 // For kMUL we save untagging of the argument.
2376 // For kSHR/kUSHR we save tagging of the result.
2377 return true;
2378
2379 default:
2380 return false;
2381 }
2382}
2383
2384// Maps an entry block to its closest enveloping loop id, or -1 if none.
2385static intptr_t LoopId(BlockEntryInstr* block) {
2386 LoopInfo* loop = block->loop_info();
2387 if (loop != nullptr) {
2388 return loop->id();
2389 }
2390 return -1;
2391}
2392
2394 if (!FLAG_use_smi_widening) {
2395 return;
2396 }
2397
2398 GrowableArray<BinarySmiOpInstr*> candidates;
2399
2400 // Step 1. Collect all instructions that potentially benefit from widening of
2401 // their operands (or their result) into int32 range.
2402 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2403 block_it.Advance()) {
2404 for (ForwardInstructionIterator instr_it(block_it.Current());
2405 !instr_it.Done(); instr_it.Advance()) {
2406 BinarySmiOpInstr* smi_op = instr_it.Current()->AsBinarySmiOp();
2407 if ((smi_op != nullptr) && smi_op->HasSSATemp() &&
2408 BenefitsFromWidening(smi_op) && CanBeWidened(smi_op)) {
2409 candidates.Add(smi_op);
2410 }
2411 }
2412 }
2413
2414 if (candidates.is_empty()) {
2415 return;
2416 }
2417
2418 // Step 2. For each block in the graph compute which loop it belongs to.
2419 // We will use this information later during computation of the widening's
2420 // gain: we are going to assume that only conversion occurring inside the
2421 // same loop should be counted against the gain, all other conversions
2422 // can be hoisted and thus cost nothing compared to the loop cost itself.
2424
2425 // Step 3. For each candidate transitively collect all other BinarySmiOpInstr
2426 // and PhiInstr that depend on it and that it depends on and count amount of
2427 // untagging operations that we save in assumption that this whole graph of
2428 // values is using kUnboxedInt32 representation instead of kTagged.
2429 // Convert those graphs that have positive gain to kUnboxedInt32.
2430
2431 // BitVector containing SSA indexes of all processed definitions. Used to skip
2432 // those candidates that belong to dependency graph of another candidate.
2433 BitVector* processed = new (Z) BitVector(Z, current_ssa_temp_index());
2434
2435 // Worklist used to collect dependency graph.
2436 DefinitionWorklist worklist(this, candidates.length());
2437 for (intptr_t i = 0; i < candidates.length(); i++) {
2438 BinarySmiOpInstr* op = candidates[i];
2439 if (op->WasEliminated() || processed->Contains(op->ssa_temp_index())) {
2440 continue;
2441 }
2442
2443 if (FLAG_support_il_printer && FLAG_trace_smi_widening) {
2444 THR_Print("analysing candidate: %s\n", op->ToCString());
2445 }
2446 worklist.Clear();
2447 worklist.Add(op);
2448
2449 // Collect dependency graph. Note: more items are added to worklist
2450 // inside this loop.
2451 intptr_t gain = 0;
2452 for (intptr_t j = 0; j < worklist.definitions().length(); j++) {
2453 Definition* defn = worklist.definitions()[j];
2454
2455 if (FLAG_support_il_printer && FLAG_trace_smi_widening) {
2456 THR_Print("> %s\n", defn->ToCString());
2457 }
2458
2459 if (defn->IsBinarySmiOp() &&
2460 BenefitsFromWidening(defn->AsBinarySmiOp())) {
2461 gain++;
2462 if (FLAG_support_il_printer && FLAG_trace_smi_widening) {
2463 THR_Print("^ [%" Pd "] (o) %s\n", gain, defn->ToCString());
2464 }
2465 }
2466
2467 const intptr_t defn_loop = LoopId(defn->GetBlock());
2468
2469 // Process all inputs.
2470 for (intptr_t k = 0; k < defn->InputCount(); k++) {
2471 Definition* input = defn->InputAt(k)->definition();
2472 if (input->IsBinarySmiOp() && CanBeWidened(input->AsBinarySmiOp())) {
2473 worklist.Add(input);
2474 } else if (input->IsPhi() && (input->Type()->ToCid() == kSmiCid)) {
2475 worklist.Add(input);
2476 } else if (input->IsBinaryInt64Op()) {
2477 // Mint operation produces untagged result. We avoid tagging.
2478 gain++;
2479 if (FLAG_support_il_printer && FLAG_trace_smi_widening) {
2480 THR_Print("^ [%" Pd "] (i) %s\n", gain, input->ToCString());
2481 }
2482 } else if (defn_loop == LoopId(input->GetBlock()) &&
2483 (input->Type()->ToCid() == kSmiCid)) {
2484 // Input comes from the same loop, is known to be smi and requires
2485 // untagging.
2486 // TODO(vegorov) this heuristic assumes that values that are not
2487 // known to be smi have to be checked and this check can be
2488 // coalesced with untagging. Start coalescing them.
2489 gain--;
2490 if (FLAG_support_il_printer && FLAG_trace_smi_widening) {
2491 THR_Print("v [%" Pd "] (i) %s\n", gain, input->ToCString());
2492 }
2493 }
2494 }
2495
2496 // Process all uses.
2497 for (Value* use = defn->input_use_list(); use != nullptr;
2498 use = use->next_use()) {
2499 Instruction* instr = use->instruction();
2500 Definition* use_defn = instr->AsDefinition();
2501 if (use_defn == nullptr) {
2502 // We assume that tagging before returning or pushing argument costs
2503 // very little compared to the cost of the return/call itself.
2504 ASSERT(!instr->IsMoveArgument());
2505 if (!instr->IsReturnBase() &&
2506 (use->use_index() >= instr->ArgumentCount())) {
2507 gain--;
2508 if (FLAG_support_il_printer && FLAG_trace_smi_widening) {
2509 THR_Print("v [%" Pd "] (u) %s\n", gain,
2510 use->instruction()->ToCString());
2511 }
2512 }
2513 continue;
2514 } else if (use_defn->IsBinarySmiOp() &&
2515 CanBeWidened(use_defn->AsBinarySmiOp())) {
2516 worklist.Add(use_defn);
2517 } else if (use_defn->IsPhi() &&
2518 use_defn->AsPhi()->Type()->ToCid() == kSmiCid) {
2519 worklist.Add(use_defn);
2520 } else if (use_defn->IsBinaryInt64Op()) {
2521 // BinaryInt64Op requires untagging of its inputs.
2522 // Converting kUnboxedInt32 to kUnboxedInt64 is essentially zero cost
2523 // sign extension operation.
2524 gain++;
2525 if (FLAG_support_il_printer && FLAG_trace_smi_widening) {
2526 THR_Print("^ [%" Pd "] (u) %s\n", gain,
2527 use->instruction()->ToCString());
2528 }
2529 } else if (defn_loop == LoopId(instr->GetBlock())) {
2530 gain--;
2531 if (FLAG_support_il_printer && FLAG_trace_smi_widening) {
2532 THR_Print("v [%" Pd "] (u) %s\n", gain,
2533 use->instruction()->ToCString());
2534 }
2535 }
2536 }
2537 }
2538
2539 processed->AddAll(worklist.contains_vector());
2540
2541 if (FLAG_support_il_printer && FLAG_trace_smi_widening) {
2542 THR_Print("~ %s gain %" Pd "\n", op->ToCString(), gain);
2543 }
2544
2545 if (gain > 0) {
2546 // We have positive gain from widening. Convert all BinarySmiOpInstr into
2547 // BinaryInt32OpInstr and set representation of all phis to kUnboxedInt32.
2548 for (intptr_t j = 0; j < worklist.definitions().length(); j++) {
2549 Definition* defn = worklist.definitions()[j];
2550 ASSERT(defn->IsPhi() || defn->IsBinarySmiOp());
2551
2552 // Since we widen the integer representation we've to clear out type
2553 // propagation information (e.g. it might no longer be a _Smi).
2554 for (Value::Iterator it(defn->input_use_list()); !it.Done();
2555 it.Advance()) {
2556 it.Current()->SetReachingType(nullptr);
2557 }
2558
2559 if (defn->IsBinarySmiOp()) {
2560 BinarySmiOpInstr* smi_op = defn->AsBinarySmiOp();
2561 BinaryInt32OpInstr* int32_op = new (Z) BinaryInt32OpInstr(
2562 smi_op->op_kind(), smi_op->left()->CopyWithType(),
2563 smi_op->right()->CopyWithType(), smi_op->DeoptimizationTarget());
2564
2565 smi_op->ReplaceWith(int32_op, nullptr);
2566 } else if (defn->IsPhi()) {
2567 defn->AsPhi()->set_representation(kUnboxedInt32);
2568 ASSERT(defn->Type()->IsInt());
2569 }
2570 }
2571 }
2572 }
2573}
2574#else
2576 // TODO(vegorov) ideally on 64-bit platforms we would like to narrow smi
2577 // operations to 32-bit where it saves tagging and untagging and allows
2578 // to use shorted (and faster) instructions. But we currently don't
2579 // save enough range information in the ICData to drive this decision.
2580}
2581#endif
2582
2584 // After this pass we can no longer perform LICM and hoist instructions
2585 // that can deoptimize.
2586
2587 disallow_licm();
2588 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2589 block_it.Advance()) {
2590 BlockEntryInstr* block = block_it.Current();
2591 if (!block->IsCatchBlockEntry()) {
2592 block->RemoveEnvironment();
2593 }
2594 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
2595 Instruction* current = it.Current();
2596 // This check is inconsistent with the flow graph checker. The flow graph
2597 // checker does not allow for not having an env if the block is not
2598 // inside a try-catch.
2599 // See FlowGraphChecker::VisitInstruction.
2600 if (!current->ComputeCanDeoptimize() &&
2601 !current->ComputeCanDeoptimizeAfterCall() &&
2602 (!current->MayThrow() || !current->GetBlock()->InsideTryBlock())) {
2603 // Instructions that can throw need an environment for optimized
2604 // try-catch.
2605 // TODO(srdjan): --source-lines needs deopt environments to get at
2606 // the code for this instruction, however, leaving the environment
2607 // changes code.
2608 current->RemoveEnvironment();
2609 }
2610 }
2611 }
2612}
2613
2614void FlowGraph::ExtractUntaggedPayload(Instruction* instr,
2615 Value* array,
2616 const Slot& slot,
2617 InnerPointerAccess access) {
2618 auto* const untag_payload = new (Z)
2619 LoadFieldInstr(array->CopyWithType(Z), slot, access, instr->source());
2620 InsertBefore(instr, untag_payload, instr->env(), FlowGraph::kValue);
2621 array->BindTo(untag_payload);
2622 ASSERT_EQUAL(array->definition()->representation(), kUntagged);
2623}
2624
2626 Value* array,
2627 classid_t cid) {
2628 ASSERT(array->instruction() == instr);
2629 // Nothing to do if already untagged.
2630 if (array->definition()->representation() != kTagged) return false;
2631 // If we've determined at compile time that this is an object that has an
2632 // external payload, use the cid of the compile type instead.
2633 if (IsExternalPayloadClassId(array->Type()->ToCid())) {
2634 cid = array->Type()->ToCid();
2635 } else if (!IsExternalPayloadClassId(cid)) {
2636 // Can't extract the payload address if it points to GC-managed memory.
2637 return false;
2638 }
2639
2640 const Slot* slot = nullptr;
2641 if (cid == kPointerCid || IsExternalTypedDataClassId(cid)) {
2642 slot = &Slot::PointerBase_data();
2643 } else {
2644 UNREACHABLE();
2645 }
2646
2647 ExtractUntaggedPayload(instr, array, *slot,
2649 return true;
2650}
2651
2652void FlowGraph::ExtractNonInternalTypedDataPayload(Instruction* instr,
2653 Value* array,
2654 classid_t cid) {
2655 ASSERT(array->instruction() == instr);
2656 // Skip if the array payload has already been extracted.
2657 if (array->definition()->representation() == kUntagged) return;
2658 if (!IsTypedDataBaseClassId(cid)) return;
2659 auto const type_cid = array->Type()->ToCid();
2660 // For external PointerBase objects, the payload should have already been
2661 // extracted during canonicalization.
2663 // Extract payload for typed data view instructions even if array is
2664 // an internal typed data (could happen in the unreachable code),
2665 // as code generation handles direct accesses only for internal typed data.
2666 //
2667 // For internal typed data instructions (which are also used for
2668 // non-internal typed data arrays), don't extract payload if the array is
2669 // an internal typed data object.
2670 if (IsTypedDataViewClassId(cid) || !IsTypedDataClassId(type_cid)) {
2671 ExtractUntaggedPayload(instr, array, Slot::PointerBase_data(),
2673 }
2674}
2675
2677 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2678 block_it.Advance()) {
2679 BlockEntryInstr* block = block_it.Current();
2680 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
2681 Instruction* current = it.Current();
2682 if (auto* const load_indexed = current->AsLoadIndexed()) {
2683 ExtractNonInternalTypedDataPayload(load_indexed, load_indexed->array(),
2684 load_indexed->class_id());
2685 } else if (auto* const store_indexed = current->AsStoreIndexed()) {
2686 ExtractNonInternalTypedDataPayload(
2687 store_indexed, store_indexed->array(), store_indexed->class_id());
2688 } else if (auto* const memory_copy = current->AsMemoryCopy()) {
2689 ExtractNonInternalTypedDataPayload(memory_copy, memory_copy->src(),
2690 memory_copy->src_cid());
2691 ExtractNonInternalTypedDataPayload(memory_copy, memory_copy->dest(),
2692 memory_copy->dest_cid());
2693 }
2694 }
2695 }
2696}
2697
2699 bool changed = false;
2700
2701 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2702 block_it.Advance()) {
2703 BlockEntryInstr* const block = block_it.Current();
2704 if (auto join = block->AsJoinEntry()) {
2705 for (PhiIterator it(join); !it.Done(); it.Advance()) {
2706 PhiInstr* current = it.Current();
2707 if (current->HasUnmatchedInputRepresentations() &&
2709 // Can't canonicalize this instruction until all conversions for its
2710 // speculative inputs are inserted.
2711 continue;
2712 }
2713
2714 Definition* replacement = current->Canonicalize(this);
2715 ASSERT(replacement != nullptr);
2717 !replacement->HasUnmatchedInputRepresentations());
2718 if (replacement != current) {
2719 current->ReplaceUsesWith(replacement);
2720 it.RemoveCurrentFromGraph();
2721 changed = true;
2722 }
2723 }
2724 }
2725 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
2726 Instruction* current = it.Current();
2727 if (current->HasUnmatchedInputRepresentations() &&
2729 // Can't canonicalize this instruction until all conversions for its
2730 // speculative inputs are inserted.
2731 continue;
2732 }
2733
2734 Instruction* replacement = current->Canonicalize(this);
2735
2736 if (replacement != current) {
2737 // For non-definitions Canonicalize should return either nullptr or
2738 // this.
2739 if (replacement != nullptr) {
2740 ASSERT(current->IsDefinition());
2743 if ((replacement->representation() != current->representation()) &&
2744 current->AsDefinition()->HasUses()) {
2745 // Can't canonicalize this instruction as unmatched
2746 // representations are not allowed at this point, but
2747 // replacement has a different representation.
2748 continue;
2749 }
2750 }
2751 }
2752 ReplaceCurrentInstruction(&it, current, replacement);
2753 changed = true;
2754 }
2755 }
2756 }
2757 return changed;
2758}
2759
2761 Zone* zone = Thread::Current()->zone();
2762
2763 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2764 block_it.Advance()) {
2765 ForwardInstructionIterator it(block_it.Current());
2766 for (; !it.Done(); it.Advance()) {
2767 Instruction* instr = it.Current();
2768 if (instr->IsInstanceCall()) {
2769 InstanceCallInstr* call = instr->AsInstanceCall();
2770 if (!call->HasICData()) {
2771 const Array& arguments_descriptor =
2772 Array::Handle(zone, call->GetArgumentsDescriptor());
2773 const ICData& ic_data = ICData::ZoneHandle(
2774 zone,
2775 ICData::New(function, call->function_name(), arguments_descriptor,
2776 call->deopt_id(), call->checked_argument_count(),
2777 ICData::kInstance));
2778 call->set_ic_data(&ic_data);
2779 }
2780 } else if (instr->IsStaticCall()) {
2781 StaticCallInstr* call = instr->AsStaticCall();
2782 if (!call->HasICData()) {
2783 const Array& arguments_descriptor =
2784 Array::Handle(zone, call->GetArgumentsDescriptor());
2785 const Function& target = call->function();
2786 int num_args_checked =
2788 const ICData& ic_data = ICData::ZoneHandle(
2790 function, target, arguments_descriptor,
2791 call->deopt_id(), num_args_checked, ICData::kStatic));
2792 call->set_ic_data(&ic_data);
2793 }
2794 }
2795 }
2796 }
2797}
2798
2799// Optimize (a << b) & c pattern: if c is a positive Smi or zero, then the
2800// shift can be a truncating Smi shift-left and result is always Smi.
2801// Merging occurs only per basic-block.
2803 if (!FLAG_truncating_left_shift) return;
2806 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2807 block_it.Advance()) {
2808 // Merging only per basic-block.
2809 div_mod_merge.Clear();
2810 sin_cos_merge.Clear();
2811 ForwardInstructionIterator it(block_it.Current());
2812 for (; !it.Done(); it.Advance()) {
2813 if (it.Current()->IsBinarySmiOp()) {
2814 BinarySmiOpInstr* binop = it.Current()->AsBinarySmiOp();
2815 if (binop->op_kind() == Token::kBIT_AND) {
2816 OptimizeLeftShiftBitAndSmiOp(&it, binop, binop->left()->definition(),
2817 binop->right()->definition());
2818 } else if ((binop->op_kind() == Token::kTRUNCDIV) ||
2819 (binop->op_kind() == Token::kMOD)) {
2820 if (binop->HasUses()) {
2821 div_mod_merge.Add(binop);
2822 }
2823 }
2824 } else if (it.Current()->IsBinaryInt64Op()) {
2825 BinaryInt64OpInstr* mintop = it.Current()->AsBinaryInt64Op();
2826 if (mintop->op_kind() == Token::kBIT_AND) {
2827 OptimizeLeftShiftBitAndSmiOp(&it, mintop,
2828 mintop->left()->definition(),
2829 mintop->right()->definition());
2830 }
2831 } else if (it.Current()->IsInvokeMathCFunction()) {
2832 InvokeMathCFunctionInstr* math_unary =
2833 it.Current()->AsInvokeMathCFunction();
2834 if ((math_unary->recognized_kind() == MethodRecognizer::kMathSin) ||
2835 (math_unary->recognized_kind() == MethodRecognizer::kMathCos)) {
2836 if (math_unary->HasUses()) {
2837 sin_cos_merge.Add(math_unary);
2838 }
2839 }
2840 }
2841 }
2842 TryMergeTruncDivMod(&div_mod_merge);
2843 }
2844}
2845
2846// Returns true if use is dominated by the given instruction.
2847// Note: uses that occur at instruction itself are not dominated by it.
2849 BlockEntryInstr* dom_block = dom->GetBlock();
2850
2851 Instruction* instr = use->instruction();
2852
2853 PhiInstr* phi = instr->AsPhi();
2854 if (phi != nullptr) {
2855 return dom_block->Dominates(phi->block()->PredecessorAt(use->use_index()));
2856 }
2857
2858 BlockEntryInstr* use_block = instr->GetBlock();
2859 if (use_block == dom_block) {
2860 // Fast path for the case of block entry.
2861 if (dom_block == dom) return true;
2862
2863 for (Instruction* curr = dom->next(); curr != nullptr;
2864 curr = curr->next()) {
2865 if (curr == instr) return true;
2866 }
2867
2868 return false;
2869 }
2870
2871 return dom_block->Dominates(use_block);
2872}
2873
2876 Definition* other) {
2877 for (Value::Iterator it(def->input_use_list()); !it.Done(); it.Advance()) {
2878 Value* use = it.Current();
2879 if (IsDominatedUse(dom, use)) {
2880 use->BindTo(other);
2881 }
2882 }
2883}
2884
2886 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2887 block_it.Advance()) {
2888 for (ForwardInstructionIterator instr_it(block_it.Current());
2889 !instr_it.Done(); instr_it.Advance()) {
2890 Definition* definition = instr_it.Current()->AsDefinition();
2891 // CheckArrayBound instructions have their own mechanism for ensuring
2892 // proper dependencies, so we don't rewrite those here.
2893 if (definition != nullptr && !definition->IsCheckArrayBound()) {
2894 Value* redefined = definition->RedefinedValue();
2895 if (redefined != nullptr) {
2896 if (!definition->HasSSATemp()) {
2897 AllocateSSAIndex(definition);
2898 }
2899 Definition* original = redefined->definition();
2900 RenameDominatedUses(original, definition, definition);
2901 }
2902 }
2903 }
2904 }
2905}
2906
2908 ConstantInstr* const_instr = d->AsConstant();
2909 if ((const_instr != nullptr) && (const_instr->value().IsSmi())) {
2910 return Smi::Cast(const_instr->value()).Value() >= 0;
2911 }
2912 return false;
2913}
2914
2916 BinarySmiOpInstr* instr = d->AsBinarySmiOp();
2917 if ((instr != nullptr) && (instr->op_kind() == Token::kSHL)) {
2918 return instr;
2919 }
2920 return nullptr;
2921}
2922
2923void FlowGraph::OptimizeLeftShiftBitAndSmiOp(
2924 ForwardInstructionIterator* current_iterator,
2925 Definition* bit_and_instr,
2926 Definition* left_instr,
2927 Definition* right_instr) {
2928 ASSERT(bit_and_instr != nullptr);
2929 ASSERT((left_instr != nullptr) && (right_instr != nullptr));
2930
2931 // Check for pattern, smi_shift_left must be single-use.
2932 bool is_positive_or_zero = IsPositiveOrZeroSmiConst(left_instr);
2933 if (!is_positive_or_zero) {
2934 is_positive_or_zero = IsPositiveOrZeroSmiConst(right_instr);
2935 }
2936 if (!is_positive_or_zero) return;
2937
2938 BinarySmiOpInstr* smi_shift_left = nullptr;
2939 if (bit_and_instr->InputAt(0)->IsSingleUse()) {
2940 smi_shift_left = AsSmiShiftLeftInstruction(left_instr);
2941 }
2942 if ((smi_shift_left == nullptr) &&
2943 (bit_and_instr->InputAt(1)->IsSingleUse())) {
2944 smi_shift_left = AsSmiShiftLeftInstruction(right_instr);
2945 }
2946 if (smi_shift_left == nullptr) return;
2947
2948 // Pattern recognized.
2949 smi_shift_left->mark_truncating();
2950 ASSERT(bit_and_instr->IsBinarySmiOp() || bit_and_instr->IsBinaryInt64Op());
2951 if (bit_and_instr->IsBinaryInt64Op()) {
2952 // Replace Mint op with Smi op.
2953 BinarySmiOpInstr* smi_op = new (Z) BinarySmiOpInstr(
2954 Token::kBIT_AND, new (Z) Value(left_instr), new (Z) Value(right_instr),
2955 DeoptId::kNone); // BIT_AND cannot deoptimize.
2956 bit_and_instr->ReplaceWith(smi_op, current_iterator);
2957 }
2958}
2959
2960// Dart:
2961// var x = d % 10;
2962// var y = d ~/ 10;
2963// var z = x + y;
2964//
2965// IL:
2966// v4 <- %(v2, v3)
2967// v5 <- ~/(v2, v3)
2968// v6 <- +(v4, v5)
2969//
2970// IL optimized:
2971// v4 <- DIVMOD(v2, v3);
2972// v5 <- LoadIndexed(v4, 0); // ~/ result
2973// v6 <- LoadIndexed(v4, 1); // % result
2974// v7 <- +(v5, v6)
2975// Because of the environment it is important that merged instruction replaces
2976// first original instruction encountered.
2977void FlowGraph::TryMergeTruncDivMod(
2978 GrowableArray<BinarySmiOpInstr*>* merge_candidates) {
2979 if (merge_candidates->length() < 2) {
2980 // Need at least a TRUNCDIV and a MOD.
2981 return;
2982 }
2983 for (intptr_t i = 0; i < merge_candidates->length(); i++) {
2984 BinarySmiOpInstr* curr_instr = (*merge_candidates)[i];
2985 if (curr_instr == nullptr) {
2986 // Instruction was merged already.
2987 continue;
2988 }
2989 ASSERT((curr_instr->op_kind() == Token::kTRUNCDIV) ||
2990 (curr_instr->op_kind() == Token::kMOD));
2991 // Check if there is kMOD/kTRUNDIV binop with same inputs.
2992 const Token::Kind other_kind = (curr_instr->op_kind() == Token::kTRUNCDIV)
2993 ? Token::kMOD
2994 : Token::kTRUNCDIV;
2995 Definition* left_def = curr_instr->left()->definition();
2996 Definition* right_def = curr_instr->right()->definition();
2997 for (intptr_t k = i + 1; k < merge_candidates->length(); k++) {
2998 BinarySmiOpInstr* other_binop = (*merge_candidates)[k];
2999 // 'other_binop' can be nullptr if it was already merged.
3000 if ((other_binop != nullptr) && (other_binop->op_kind() == other_kind) &&
3001 (other_binop->left()->definition() == left_def) &&
3002 (other_binop->right()->definition() == right_def)) {
3003 (*merge_candidates)[k] = nullptr; // Clear it.
3004 ASSERT(curr_instr->HasUses());
3005 AppendExtractNthOutputForMerged(
3006 curr_instr, TruncDivModInstr::OutputIndexOf(curr_instr->op_kind()),
3007 kTagged, kSmiCid);
3008 ASSERT(other_binop->HasUses());
3009 AppendExtractNthOutputForMerged(
3010 other_binop,
3011 TruncDivModInstr::OutputIndexOf(other_binop->op_kind()), kTagged,
3012 kSmiCid);
3013
3014 // Replace with TruncDivMod.
3015 TruncDivModInstr* div_mod = new (Z) TruncDivModInstr(
3016 curr_instr->left()->CopyWithType(),
3017 curr_instr->right()->CopyWithType(), curr_instr->deopt_id());
3018 curr_instr->ReplaceWith(div_mod, nullptr);
3019 other_binop->ReplaceUsesWith(div_mod);
3020 other_binop->RemoveFromGraph();
3021 // Only one merge possible. Because canonicalization happens later,
3022 // more candidates are possible.
3023 // TODO(srdjan): Allow merging of trunc-div/mod into truncDivMod.
3024 break;
3025 }
3026 }
3027 }
3028}
3029
3030void FlowGraph::AppendExtractNthOutputForMerged(Definition* instr,
3031 intptr_t index,
3032 Representation rep,
3033 intptr_t cid) {
3034 ExtractNthOutputInstr* extract =
3035 new (Z) ExtractNthOutputInstr(new (Z) Value(instr), index, rep, cid);
3036 instr->ReplaceUsesWith(extract);
3037 InsertAfter(instr, extract, nullptr, FlowGraph::kValue);
3038}
3039
3040//
3041// Static helpers for the flow graph utilities.
3042//
3043
3045 TargetEntryInstr* target = new (graph->zone())
3047 inherit->GetBlock()->try_index(), DeoptId::kNone);
3048 target->InheritDeoptTarget(graph->zone(), inherit);
3049 return target;
3050}
3051
3052static JoinEntryInstr* NewJoin(FlowGraph* graph, Instruction* inherit) {
3053 JoinEntryInstr* join = new (graph->zone())
3055 inherit->GetBlock()->try_index(), DeoptId::kNone);
3056 join->InheritDeoptTarget(graph->zone(), inherit);
3057 return join;
3058}
3059
3062 Instruction* inherit) {
3063 GotoInstr* got = new (graph->zone()) GotoInstr(target, DeoptId::kNone);
3064 got->InheritDeoptTarget(graph->zone(), inherit);
3065 return got;
3066}
3067
3069 ComparisonInstr* cmp,
3070 Instruction* inherit) {
3071 BranchInstr* bra = new (graph->zone()) BranchInstr(cmp, DeoptId::kNone);
3072 bra->InheritDeoptTarget(graph->zone(), inherit);
3073 return bra;
3074}
3075
3076//
3077// Flow graph utilities.
3078//
3079
3080// Constructs new diamond decision at the given instruction.
3081//
3082// ENTRY
3083// instruction
3084// if (compare)
3085// / \
3086// B_TRUE B_FALSE
3087// \ /
3088// JOIN
3089//
3091 Instruction* inherit,
3093 TargetEntryInstr** b_true,
3094 TargetEntryInstr** b_false) {
3095 BlockEntryInstr* entry = instruction->GetBlock();
3096
3097 TargetEntryInstr* bt = NewTarget(this, inherit);
3098 TargetEntryInstr* bf = NewTarget(this, inherit);
3099 JoinEntryInstr* join = NewJoin(this, inherit);
3100 GotoInstr* gotot = NewGoto(this, join, inherit);
3101 GotoInstr* gotof = NewGoto(this, join, inherit);
3102 BranchInstr* bra = NewBranch(this, compare, inherit);
3103
3104 instruction->AppendInstruction(bra);
3105 entry->set_last_instruction(bra);
3106
3107 *bra->true_successor_address() = bt;
3108 *bra->false_successor_address() = bf;
3109
3110 bt->AppendInstruction(gotot);
3111 bt->set_last_instruction(gotot);
3112
3113 bf->AppendInstruction(gotof);
3114 bf->set_last_instruction(gotof);
3115
3116 // Update dominance relation incrementally.
3117 for (intptr_t i = 0, n = entry->dominated_blocks().length(); i < n; ++i) {
3118 join->AddDominatedBlock(entry->dominated_blocks()[i]);
3119 }
3120 entry->ClearDominatedBlocks();
3121 entry->AddDominatedBlock(bt);
3122 entry->AddDominatedBlock(bf);
3123 entry->AddDominatedBlock(join);
3124
3125 // TODO(ajcbik): update pred/succ/ordering incrementally too.
3126
3127 // Return new blocks.
3128 *b_true = bt;
3129 *b_false = bf;
3130 return join;
3131}
3132
3134 Instruction* inherit,
3135 const LogicalAnd& condition,
3136 TargetEntryInstr** b_true,
3137 TargetEntryInstr** b_false) {
3138 // First diamond for first comparison.
3139 TargetEntryInstr* bt = nullptr;
3140 TargetEntryInstr* bf = nullptr;
3141 JoinEntryInstr* mid_point =
3142 NewDiamond(instruction, inherit, condition.oper1, &bt, &bf);
3143
3144 // Short-circuit second comparison and connect through phi.
3145 condition.oper2->InsertAfter(bt);
3146 AllocateSSAIndex(condition.oper2);
3147 condition.oper2->InheritDeoptTarget(zone(), inherit); // must inherit
3148 PhiInstr* phi =
3149 AddPhi(mid_point, condition.oper2, GetConstant(Bool::False()));
3150 StrictCompareInstr* circuit = new (zone()) StrictCompareInstr(
3151 inherit->source(), Token::kEQ_STRICT, new (zone()) Value(phi),
3152 new (zone()) Value(GetConstant(Bool::True())), false,
3153 DeoptId::kNone); // don't inherit
3154
3155 // Return new blocks through the second diamond.
3156 return NewDiamond(mid_point, inherit, circuit, b_true, b_false);
3157}
3158
3160 Definition* d1,
3161 Definition* d2) {
3162 PhiInstr* phi = new (zone()) PhiInstr(join, 2);
3163 Value* v1 = new (zone()) Value(d1);
3164 Value* v2 = new (zone()) Value(d2);
3165
3166 AllocateSSAIndex(phi);
3167
3168 phi->mark_alive();
3169 phi->SetInputAt(0, v1);
3170 phi->SetInputAt(1, v2);
3171 d1->AddInputUse(v1);
3172 d2->AddInputUse(v2);
3173 join->InsertPhi(phi);
3174
3175 return phi;
3176}
3177
3179 compiler::ParameterInfoArray argument_locations;
3180
3181 intptr_t max_argument_slot_count = 0;
3182 auto& target = Function::Handle();
3183
3184 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
3185 block_it.Advance()) {
3187 for (ForwardInstructionIterator instr_it(block_it.Current());
3188 !instr_it.Done(); instr_it.Advance()) {
3189 Instruction* instruction = instr_it.Current();
3190 const intptr_t arg_count = instruction->ArgumentCount();
3191 if (arg_count == 0) {
3192 continue;
3193 }
3194
3196 if (auto static_call = instruction->AsStaticCall()) {
3197 target = static_call->function().ptr();
3198 } else if (auto instance_call = instruction->AsInstanceCallBase()) {
3199 target = instance_call->interface_target().ptr();
3200 } else if (auto dispatch_call = instruction->AsDispatchTableCall()) {
3201 target = dispatch_call->interface_target().ptr();
3202 } else if (auto cachable_call = instruction->AsCachableIdempotentCall()) {
3203 target = cachable_call->function().ptr();
3204 }
3205
3206 MoveArgumentsArray* arguments =
3207 new (Z) MoveArgumentsArray(zone(), arg_count);
3208 arguments->EnsureLength(arg_count, nullptr);
3209
3210 const intptr_t stack_arguments_size_in_words =
3212 zone(), target, arg_count,
3213 [&](intptr_t i) {
3214 return instruction->RequiredInputRepresentation(i);
3215 },
3216 /*should_assign_stack_locations=*/true, &argument_locations);
3217
3218 for (intptr_t i = 0; i < arg_count; ++i) {
3219 const auto& [location, rep] = argument_locations[i];
3220 Value* arg = instruction->ArgumentValueAt(i);
3221 (*arguments)[i] = new (Z) MoveArgumentInstr(
3222 arg->CopyWithType(Z), rep, location.ToCallerSpRelative());
3223 }
3225 stack_arguments_size_in_words);
3226
3227 for (auto move_arg : *arguments) {
3228 // Insert all MoveArgument instructions immediately before call.
3229 // MoveArgumentInstr::EmitNativeCode may generate more efficient
3230 // code for subsequent MoveArgument instructions (ARM, ARM64).
3231 if (!move_arg->is_register_move()) {
3232 InsertBefore(instruction, move_arg, /*env=*/nullptr, kEffect);
3233 }
3234 }
3235 instruction->ReplaceInputsWithMoveArguments(arguments);
3236 if (instruction->env() != nullptr) {
3237 instruction->RepairArgumentUsesInEnvironment();
3238 }
3239 }
3240 }
3242}
3243
3244void FlowGraph::Print(const char* phase) {
3245 FlowGraphPrinter::PrintGraph(phase, this);
3246}
3247
3249 public:
3250 SSACompactor(intptr_t num_blocks,
3251 intptr_t num_ssa_vars,
3252 ZoneGrowableArray<Definition*>* detached_defs)
3253 : block_num_(num_blocks),
3254 ssa_num_(num_ssa_vars),
3255 detached_defs_(detached_defs) {
3256 block_num_.EnsureLength(num_blocks, -1);
3257 ssa_num_.EnsureLength(num_ssa_vars, -1);
3258 }
3259
3261 for (auto block : graph->reverse_postorder()) {
3262 block_num_[block->block_id()] = 1;
3263 CollectDetachedMaterializations(block->env());
3264
3265 if (auto* block_with_idefs = block->AsBlockEntryWithInitialDefs()) {
3266 for (Definition* def : *block_with_idefs->initial_definitions()) {
3267 RenumberDefinition(def);
3268 CollectDetachedMaterializations(def->env());
3269 }
3270 }
3271 if (auto* join = block->AsJoinEntry()) {
3272 for (PhiIterator it(join); !it.Done(); it.Advance()) {
3273 RenumberDefinition(it.Current());
3274 }
3275 }
3276 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
3277 Instruction* instr = it.Current();
3278 if (Definition* def = instr->AsDefinition()) {
3279 RenumberDefinition(def);
3280 }
3281 CollectDetachedMaterializations(instr->env());
3282 }
3283 }
3284 for (auto* def : (*detached_defs_)) {
3285 RenumberDefinition(def);
3286 }
3287 graph->set_current_ssa_temp_index(current_ssa_index_);
3288
3289 // Preserve order between block ids to as predecessors are sorted
3290 // by block ids.
3291 intptr_t current_block_index = 0;
3292 for (intptr_t i = 0, n = block_num_.length(); i < n; ++i) {
3293 if (block_num_[i] >= 0) {
3294 block_num_[i] = current_block_index++;
3295 }
3296 }
3297 for (auto block : graph->reverse_postorder()) {
3298 block->set_block_id(block_num_[block->block_id()]);
3299 }
3300 graph->set_max_block_id(current_block_index - 1);
3301 }
3302
3303 private:
3304 void RenumberDefinition(Definition* def) {
3305 if (def->HasSSATemp()) {
3306 const intptr_t old_index = def->ssa_temp_index();
3307 intptr_t new_index = ssa_num_[old_index];
3308 if (new_index < 0) {
3309 ssa_num_[old_index] = new_index = current_ssa_index_++;
3310 }
3311 def->set_ssa_temp_index(new_index);
3312 }
3313 }
3314
3315 bool IsDetachedDefinition(Definition* def) {
3316 return def->IsMaterializeObject() && (def->next() == nullptr);
3317 }
3318
3319 void AddDetachedDefinition(Definition* def) {
3320 for (intptr_t i = 0, n = detached_defs_->length(); i < n; ++i) {
3321 if ((*detached_defs_)[i] == def) {
3322 return;
3323 }
3324 }
3325 detached_defs_->Add(def);
3326 // Follow inputs as detached definitions can reference other
3327 // detached definitions.
3328 for (intptr_t i = 0, n = def->InputCount(); i < n; ++i) {
3329 Definition* input = def->InputAt(i)->definition();
3330 if (IsDetachedDefinition(input)) {
3331 AddDetachedDefinition(input);
3332 }
3333 }
3334 ASSERT(def->env() == nullptr);
3335 }
3336
3337 void CollectDetachedMaterializations(Environment* env) {
3338 if (env == nullptr) {
3339 return;
3340 }
3341 for (Environment::DeepIterator it(env); !it.Done(); it.Advance()) {
3342 Definition* def = it.CurrentValue()->definition();
3343 if (IsDetachedDefinition(def)) {
3344 AddDetachedDefinition(def);
3345 }
3346 }
3347 }
3348
3349 GrowableArray<intptr_t> block_num_;
3350 GrowableArray<intptr_t> ssa_num_;
3351 intptr_t current_ssa_index_ = 0;
3352 ZoneGrowableArray<Definition*>* detached_defs_;
3353};
3354
3356 if (detached_defs == nullptr) {
3357 detached_defs = new (Z) ZoneGrowableArray<Definition*>(Z, 0);
3358 }
3360 detached_defs);
3361 compactor.RenumberGraph(this);
3362}
3363
3364} // namespace dart
static bool compare(const SkBitmap &ref, const SkIRect &iref, const SkBitmap &test, const SkIRect &itest)
Definition BlurTest.cpp:100
const char * options
static bool match(const char *needle, const char *haystack)
Definition DM.cpp:1132
int count
T extract(SkSpan< const uint8_t > &data)
static void is_empty(skiatest::Reporter *reporter, const SkPath &p)
static float prev(float f)
static int block_count(const SkSBlockAllocator< N > &pool)
SI void store(P *ptr, const T &val)
SI T load(const P *ptr)
Vec2Value v2
#define UNREACHABLE()
Definition assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define Z
virtual bool HasTypeClass() const
Definition object.h:9063
virtual bool IsInstantiated(Genericity genericity=kAny, intptr_t num_free_fun_type_params=kAllFree) const
Definition object.cc:21200
virtual ClassPtr type_class() const
Definition object.cc:21083
bool IsDynamicType() const
Definition object.h:9166
void Add(const T &value)
void EnsureLength(intptr_t new_length, const T &default_value)
intptr_t length() const
static bool IsSupported(Token::Kind op_kind, Value *left, Value *right)
Definition il.h:9449
Value * right() const
Definition il.h:9350
Token::Kind op_kind() const
Definition il.h:9348
Value * left() const
Definition il.h:9349
void Add(intptr_t i)
Definition bit_vector.h:63
bool KillAndAdd(BitVector *kill, BitVector *gen)
Definition bit_vector.cc:80
intptr_t length() const
Definition bit_vector.h:117
bool Contains(intptr_t i) const
Definition bit_vector.h:91
bool AddAll(const BitVector *from)
Definition bit_vector.cc:52
void Remove(intptr_t i)
Definition bit_vector.h:68
void Intersect(const BitVector *other)
Definition bit_vector.cc:93
BlockEntryInstr * dominator() const
Definition il.h:1664
void ClearDominatedBlocks()
Definition il.h:1676
intptr_t try_index() const
Definition il.h:1724
intptr_t postorder_number() const
Definition il.h:1652
void AddDominatedBlock(BlockEntryInstr *block)
Definition il.h:1671
void set_block_id(intptr_t block_id)
Definition il.h:1747
intptr_t preorder_number() const
Definition il.h:1649
bool InsideTryBlock() const
Definition il.h:1728
intptr_t block_id() const
Definition il.h:1655
virtual intptr_t PredecessorCount() const =0
const GrowableArray< BlockEntryInstr * > & dominated_blocks()
Definition il.h:1667
bool Dominates(BlockEntryInstr *other) const
Definition il.cc:1806
void set_postorder_number(intptr_t number)
Definition il.h:1653
virtual BlockEntryInstr * PredecessorAt(intptr_t index) const =0
void set_last_instruction(Instruction *instr)
Definition il.h:1681
bool DiscoverBlock(BlockEntryInstr *predecessor, GrowableArray< BlockEntryInstr * > *preorder, GrowableArray< intptr_t > *parent)
Definition il.cc:1690
Instruction * last_instruction() const
Definition il.h:1680
GrowableArray< Definition * > * initial_definitions()
Definition il.h:1911
bool Done() const
Definition flow_graph.h:46
BlockTraversalState(BlockEntryInstr *block)
BlockEntryInstr * NextSuccessor()
BlockEntryInstr * block() const
bool HasNextSuccessor() const
static const Bool & False()
Definition object.h:10778
static const Bool & True()
Definition object.h:10776
static BoxInstr * Create(Representation from, Value *value)
Definition il.cc:4009
TargetEntryInstr ** false_successor_address()
Definition il.h:4033
TargetEntryInstr ** true_successor_address()
Definition il.h:4032
bool HasOverride(const Class &cls, const String &function_name, intptr_t *subclass_count)
Definition cha.cc:221
void AddToGuardedClassesForSubclassCount(const Class &cls, intptr_t subclass_count)
Definition cha.cc:40
intptr_t MonomorphicReceiverCid() const
Definition il.cc:804
bool IsMonomorphic() const
Definition il.cc:799
ClassPtr At(intptr_t cid) const
intptr_t id() const
Definition object.h:1235
ErrorPtr EnsureIsFinalized(Thread *thread) const
Definition object.cc:4979
bool is_implemented() const
Definition object.h:1694
bool IsEqualTo(CompileType *other)
bool IsNone() const
static CompilerState & Current()
const Object & value() const
Definition il.h:4212
bool HasUses() const
Definition il.h:2551
Value * env_use_list() const
Definition il.h:2560
Value * input_use_list() const
Definition il.h:2557
CompileType * Type()
Definition il.h:2503
virtual Value * RedefinedValue() const
Definition il.cc:539
void ReplaceUsesWith(Definition *other)
Definition il.cc:1484
bool HasSSATemp() const
Definition il.h:2490
virtual Definition * AsDefinition()
Definition il.h:2665
void AddInputUse(Value *value)
Definition il.h:2567
Definition * OriginalDefinition()
Definition il.cc:530
void set_ssa_temp_index(intptr_t index)
Definition il.h:2486
intptr_t ssa_temp_index() const
Definition il.h:2485
void ClearSSATempIndex()
Definition il.h:2491
static constexpr intptr_t kNone
Definition deopt_id.h:27
static DoublePtr NewCanonical(double d)
Definition object.cc:23497
void MarkAsLazyDeoptToBeforeDeoptId()
Definition il.h:11624
static Environment * From(Zone *zone, const GrowableArray< Definition * > &definitions, intptr_t fixed_parameter_count, intptr_t lazy_deopt_pruning_count, const ParsedFunction &parsed_function)
Definition il.cc:6451
static StringPtr NameFromGetter(const String &getter_name)
Definition object.cc:11867
static bool SupportsUnboxedDoubles()
static bool SupportsUnboxedSimd128()
static bool CanConvertInt64ToDouble()
static void PrintGraph(const char *phase, FlowGraph *flow_graph)
static bool ShouldPrint(const Function &function, uint8_t **compiler_pass_filter=nullptr)
const GrowableArray< BlockEntryInstr * > & reverse_postorder() const
Definition flow_graph.h:207
GraphEntryInstr * graph_entry() const
Definition flow_graph.h:268
ConstantInstr * GetConstant(const Object &object, Representation representation=kTagged)
IsolateGroup * isolate_group() const
Definition flow_graph.h:262
bool should_print() const
Definition flow_graph.h:505
bool VerifyRedefinitions()
void EnsureSSATempIndex(Definition *defn, Definition *replacement)
Definition flow_graph.cc:95
Instruction * AppendTo(Instruction *prev, Instruction *instr, Environment *env, UseKind use_kind)
void CompactSSA(ZoneGrowableArray< Definition * > *detached_defs=nullptr)
intptr_t max_argument_slot_count() const
Definition flow_graph.h:564
bool IsCompiledForOsr() const
Definition flow_graph.h:460
ConstantInstr * constant_dead() const
Definition flow_graph.h:272
ConstantInstr * GetExistingConstant(const Object &object, Representation representation=kTagged) const
Zone * zone() const
Definition flow_graph.h:261
intptr_t current_ssa_temp_index() const
Definition flow_graph.h:243
bool IsReceiver(Definition *def) const
intptr_t ArgumentDescriptorEnvIndex() const
Definition flow_graph.h:185
static Representation ReturnRepresentationOf(const Function &function)
void ReplaceCurrentInstruction(ForwardInstructionIterator *iterator, Instruction *current, Instruction *replacement)
JoinEntryInstr * NewDiamond(Instruction *instruction, Instruction *inherit, ComparisonInstr *compare, TargetEntryInstr **block_true, TargetEntryInstr **block_false)
Instruction * AppendSpeculativeTo(Instruction *prev, Instruction *instr, Environment *env, UseKind use_kind)
Thread * thread() const
Definition flow_graph.h:260
intptr_t InstructionCount() const
ToCheck CheckForInstanceCall(InstanceCallInstr *call, UntaggedFunction::Kind kind) const
void RemoveRedefinitions(bool keep_checks=false)
const GrowableArray< BlockEntryInstr * > & preorder() const
Definition flow_graph.h:203
void set_current_ssa_temp_index(intptr_t index)
Definition flow_graph.h:244
void CreateCommonConstants()
void AddExactnessGuard(InstanceCallInstr *call, intptr_t receiver_cid)
const std::pair< Location, Representation > & GetDirectParameterInfoAt(intptr_t i)
Definition flow_graph.h:573
void InsertSpeculativeAfter(Instruction *prev, Instruction *instr, Environment *env, UseKind use_kind)
void set_max_argument_slot_count(intptr_t count)
Definition flow_graph.h:568
BlockIterator postorder_iterator() const
Definition flow_graph.h:222
Definition * CreateCheckBound(Definition *length, Definition *index, intptr_t deopt_id)
intptr_t max_block_id() const
Definition flow_graph.h:264
static intptr_t ComputeArgumentsSizeInWords(const Function &function, intptr_t arguments_count)
void InsertMoveArguments()
void Print(const char *phase="unknown")
void DiscoverBlocks()
const GrowableArray< BlockEntryInstr * > & postorder() const
Definition flow_graph.h:204
intptr_t num_stack_locals() const
Definition flow_graph.h:161
void ComputeDominators(GrowableArray< BitVector * > *dominance_frontier)
static Representation ParameterRepresentationAt(const Function &function, intptr_t index)
void EliminateEnvironments()
static intptr_t ComputeLocationsOfFixedParameters(Zone *zone, const Function &function, bool should_assign_stack_locations=false, compiler::ParameterInfoArray *parameter_info=nullptr)
void set_max_block_id(intptr_t id)
Definition flow_graph.h:265
ConstantInstr * constant_null() const
Definition flow_graph.h:270
Instruction * CreateCheckClass(Definition *to_check, const Cids &cids, intptr_t deopt_id, const InstructionSource &source)
const LoopHierarchy & GetLoopHierarchy()
Definition flow_graph.h:430
const Function & function() const
Definition flow_graph.h:130
const ParsedFunction & parsed_function() const
Definition flow_graph.h:129
void ResetLoopHierarchy()
Definition flow_graph.h:444
intptr_t num_direct_parameters() const
Definition flow_graph.h:137
void RenameUsesDominatedByRedefinitions()
FlowGraph(const ParsedFunction &parsed_function, GraphEntryInstr *graph_entry, intptr_t max_block_id, PrologueInfo prologue_info, CompilationMode compilation_mode)
Definition flow_graph.cc:51
bool unmatched_representations_allowed() const
Definition flow_graph.h:411
Definition * TryCreateConstantReplacementFor(Definition *op, const Object &value)
intptr_t osr_variable_count() const
Definition flow_graph.h:149
void PopulateWithICData(const Function &function)
RedefinitionInstr * EnsureRedefinition(Instruction *prev, Definition *original, CompileType compile_type)
void TryOptimizePatterns()
PhiInstr * AddPhi(JoinEntryInstr *join, Definition *d1, Definition *d2)
void AddToGraphInitialDefinitions(Definition *defn)
bool ExtractExternalUntaggedPayload(Instruction *instr, Value *array, classid_t cid)
void AllocateSSAIndex(Definition *def)
Definition flow_graph.h:274
static bool IsConstantRepresentable(const Object &value, Representation target_rep, bool tagged_value_must_be_smi)
BlockIterator reverse_postorder_iterator() const
Definition flow_graph.h:219
void ComputeSSA(ZoneGrowableArray< Definition * > *inlining_parameters)
LocalVariable * CurrentContextVar() const
Definition flow_graph.h:173
intptr_t RawTypeArgumentEnvIndex() const
Definition flow_graph.h:181
void InsertBefore(Instruction *next, Instruction *instr, Environment *env, UseKind use_kind)
Definition flow_graph.h:312
static void RenameDominatedUses(Definition *def, Instruction *dom, Definition *other)
void SelectRepresentations()
intptr_t variable_count() const
Definition flow_graph.h:143
void AddToInitialDefinitions(BlockEntryWithInitialDefs *entry, Definition *defn)
void ExtractNonInternalTypedDataPayloads()
GrowableArray< BlockEntryInstr * > * CodegenBlockOrder()
intptr_t allocate_block_id()
Definition flow_graph.h:266
void InsertAfter(Instruction *prev, Instruction *instr, Environment *env, UseKind use_kind)
bool should_reorder_blocks() const
Definition flow_graph.h:510
bool IsImmortalVariable(intptr_t env_index) const
Definition flow_graph.h:196
void disallow_licm()
Definition flow_graph.h:408
intptr_t EnvIndex(const LocalVariable *variable) const
Definition flow_graph.h:189
Instruction * Current() const
Definition il.h:1847
bool is_unboxed_integer_parameter_at(intptr_t index) const
Definition object.h:3763
bool has_unboxed_return() const
Definition object.h:3783
bool has_unboxed_double_return() const
Definition object.h:3799
bool has_unboxed_integer_return() const
Definition object.h:3791
bool MakesCopyOfParameters() const
Definition object.h:3494
bool has_unboxed_record_return() const
Definition object.h:3807
bool is_unboxed_parameter_at(intptr_t index) const
Definition object.h:3753
bool is_unboxed_double_parameter_at(intptr_t index) const
Definition object.h:3773
bool IsGeneric() const
Definition object.cc:8902
ClassPtr Owner() const
Definition object.cc:10899
intptr_t num_fixed_parameters() const
Definition object.cc:8914
intptr_t NumParameters() const
Definition object.cc:8935
intptr_t fixed_slot_count() const
Definition il.h:1981
virtual BlockEntryInstr * SuccessorAt(intptr_t index) const
Definition il.cc:1985
void set_fixed_slot_count(intptr_t count)
Definition il.h:1982
virtual intptr_t SuccessorCount() const
Definition il.cc:1979
static ICDataPtr NewForStaticCall(const Function &owner, const Function &target, const Array &arguments_descriptor, intptr_t deopt_id, intptr_t num_args_tested, RebindRule rebind_rule)
Definition object.cc:17448
void set_previous(Instruction *instr)
Definition il.h:1082
void LinkTo(Instruction *next)
Definition il.h:1102
virtual bool MayThrow() const =0
void InheritDeoptTarget(Zone *zone, Instruction *other)
Definition il.cc:1560
virtual bool ComputeCanDeoptimizeAfterCall() const
Definition il.h:1064
virtual BlockEntryInstr * SuccessorAt(intptr_t index) const
Definition il.cc:1972
virtual BlockEntryInstr * GetBlock()
Definition il.cc:1350
virtual bool ComputeCanDeoptimize() const =0
Environment * env() const
Definition il.h:1209
@ kNotSpeculative
Definition il.h:969
void RemoveEnvironment()
Definition il.cc:1280
bool HasUnmatchedInputRepresentations() const
Definition il.cc:1600
const char * ToCString() const
Instruction * AppendInstruction(Instruction *tail)
Definition il.cc:1339
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.h:1235
virtual void ReplaceInputsWithMoveArguments(MoveArgumentsArray *move_arguments)
Definition il.h:1049
void UnuseAllInputs()
Definition il.cc:1525
virtual intptr_t ArgumentCount() const
Definition il.h:1035
bool IsDominatedBy(Instruction *dom)
Definition il.cc:1572
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:2620
virtual Representation representation() const
Definition il.h:1254
void RepairArgumentUsesInEnvironment() const
Definition il.cc:1534
void SetInputAt(intptr_t i, Value *value)
Definition il.h:1008
InstructionSource source() const
Definition il.h:1002
Value * ArgumentValueAt(intptr_t index) const
Definition il.h:3417
void InsertAfter(Instruction *prev)
Definition il.cc:1323
virtual intptr_t SuccessorCount() const
Definition il.cc:1968
bool HasMoveArguments() const
Definition il.h:1053
SpeculativeMode SpeculativeModeOfInputs() const
Definition il.h:1239
virtual const char * DebugName() const =0
Instruction * previous() const
Definition il.h:1081
MethodRecognizer::Kind recognized_kind() const
Definition il.h:10209
ClassTable * class_table() const
Definition isolate.h:491
virtual BlockEntryInstr * PredecessorAt(intptr_t index) const
Definition il.h:2051
virtual intptr_t PredecessorCount() const
Definition il.h:2050
static bool FindPragma(Thread *T, bool only_core, const Object &object, const String &pragma_name, bool multiple=false, Object *options=nullptr)
Definition object.cc:4201
const intptr_t variable_count_
Definition flow_graph.h:805
virtual void ComputeInitialSets()=0
GrowableArray< BitVector * > kill_
Definition flow_graph.h:817
bool UpdateLiveOut(const BlockEntryInstr &instr)
BitVector * GetKillSet(BlockEntryInstr *block) const
Definition flow_graph.h:771
LivenessAnalysis(intptr_t variable_count, const GrowableArray< BlockEntryInstr * > &postorder)
GrowableArray< BitVector * > live_out_
Definition flow_graph.h:813
Zone * zone() const
Definition flow_graph.h:801
BitVector * GetLiveOutSet(BlockEntryInstr *block) const
Definition flow_graph.h:775
GrowableArray< BitVector * > live_in_
Definition flow_graph.h:821
const GrowableArray< BlockEntryInstr * > & postorder_
Definition flow_graph.h:807
bool UpdateLiveIn(const BlockEntryInstr &instr)
static Location StackSlot(intptr_t stack_index, Register base)
Definition locations.h:447
static Location RegisterLocation(Register reg)
Definition locations.h:398
static intptr_t NumArgsCheckedForStaticCall(const Function &function)
static ObjectPtr null()
Definition object.h:433
virtual const char * ToCString() const
Definition object.h:366
bool IsNull() const
Definition object.h:363
static Object & Handle()
Definition object.h:407
static Object & ZoneHandle()
Definition object.h:419
static constexpr intptr_t kNotFunctionParameter
Definition il.h:2904
const Function & function() const
Definition parser.h:73
bool has_arg_desc_var() const
Definition parser.h:130
LocalVariable * RawParameterVariable(intptr_t i) const
Definition parser.h:235
JoinEntryInstr * block() const
Definition il.h:2799
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:6700
bool is_alive() const
Definition il.h:2809
void set_is_receiver(ReceiverType is_receiver)
Definition il.h:2872
@ kNotReceiver
Definition il.h:2866
@ kUnknownReceiver
Definition il.h:2866
void mark_alive()
Definition il.h:2810
ReceiverType is_receiver() const
Definition il.h:2868
bool Done() const
Definition il.h:2106
static bool Fits(Range *range, RangeBoundary::RangeSize size)
Value * value() const
Definition il.h:4091
void set_constrained_type(CompileType *type)
Definition il.h:4098
CompileType * constrained_type() const
Definition il.h:4099
static FunctionPtr ResolveDynamicAnyArgs(Zone *zone, const Class &receiver_class, const String &function_name, bool allow_add=true)
Definition resolver.cc:198
SSACompactor(intptr_t num_blocks, intptr_t num_ssa_vars, ZoneGrowableArray< Definition * > *detached_defs)
void RenumberGraph(FlowGraph *graph)
static const Slot & GetRecordFieldSlot(Thread *thread, intptr_t offset_in_bytes)
Definition slot.cc:324
static const Slot & GetTypeArgumentsSlotFor(Thread *thread, const Class &cls)
Definition slot.cc:276
static const char * ToCString(Thread *thread, StringPtr ptr)
Definition object.cc:24205
Zone * zone() const
static Thread * Current()
Definition thread.h:361
CompilerState & compiler_state()
Definition thread.h:583
void CheckForSafepoint()
Definition thread.h:1091
static intptr_t OutputIndexOf(Token::Kind token)
Definition il.cc:7273
static UnboxInstr * Create(Representation to, Value *value, intptr_t deopt_id, SpeculativeMode speculative_mode=kGuardInputs)
Definition il.cc:4045
static bool IsInt(intptr_t N, T value)
Definition utils.h:298
static constexpr T Maximum(T x, T y)
Definition utils.h:26
static T Minimum(T x, T y)
Definition utils.h:21
static bool IsUint(intptr_t N, T value)
Definition utils.h:313
bool Done() const
Definition il.h:83
intptr_t use_index() const
Definition il.h:124
Instruction * instruction() const
Definition il.h:121
Value * CopyWithType(Zone *zone)
Definition il.h:138
void RemoveFromUseList()
Definition il.cc:1448
Definition * definition() const
Definition il.h:103
void BindTo(Definition *definition)
Definition il.h:2700
CompileType * Type()
const GrowableArray< BitVector * > & ComputeAssignedVars()
bool IsStoreAlive(BlockEntryInstr *block, StoreLocalInstr *store)
VariableLivenessAnalysis(FlowGraph *flow_graph)
bool IsLastLoad(BlockEntryInstr *block, LoadLocalInstr *load)
intptr_t InputCount() const
Definition il.h:2776
Value * InputAt(intptr_t i) const
Definition il.h:2777
#define COMPILER_TIMINGS_TIMER_SCOPE(thread, timer_id)
#define THR_Print(format,...)
Definition log.h:20
#define ASSERT(E)
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
Definition main.cc:19
SkBitmap source
Definition examples.cpp:28
#define FATAL(error)
AtkStateType state
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
uint8_t value
GAsyncResult * result
uint32_t * target
constexpr bool FLAG_support_il_printer
Definition flag_list.h:48
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
Dart_NativeFunction function
Definition fuchsia.cc:51
int argument_count
Definition fuchsia.cc:52
size_t length
double y
double x
bool Contains(const Container &container, const Value &value)
intptr_t ComputeCallingConvention(Zone *zone, const Function &target, intptr_t argc, std::function< Representation(intptr_t)> argument_rep, bool should_assign_stack_locations, ParameterInfoArray *parameter_info)
static JoinEntryInstr * NewJoin(FlowGraph *graph, Instruction *inherit)
static bool CanConvertInt64ToDouble()
bool IsTypedDataViewClassId(intptr_t index)
Definition class_id.h:439
bool IsTypedDataClassId(intptr_t index)
Definition class_id.h:433
static GotoInstr * NewGoto(FlowGraph *graph, JoinEntryInstr *target, Instruction *inherit)
static bool ShouldInlineSimd()
bool IsTypedDataBaseClassId(intptr_t index)
Definition class_id.h:429
static Location EnvIndexToStackLocation(intptr_t num_direct_parameters, intptr_t env_index)
static BranchInstr * NewBranch(FlowGraph *graph, ComparisonInstr *cmp, Instruction *inherit)
InnerPointerAccess
Definition il.h:6246
static bool IsPositiveOrZeroSmiConst(Definition *d)
Location LocationExceptionLocation()
Definition locations.cc:484
static bool ShouldReorderBlocks(const Function &function, FlowGraph::CompilationMode mode)
Definition flow_graph.cc:37
int32_t classid_t
Definition globals.h:524
@ kDynamicCid
Definition class_id.h:253
Representation
Definition locations.h:66
const Register ARGS_DESC_REG
static bool NeedsRecordBoxing(Definition *def)
static bool IsDominatedUse(Instruction *dom, Value *use)
static bool CanUnboxDouble()
static void PrintBitVector(const char *tag, BitVector *v)
bool IsExternalPayloadClassId(classid_t cid)
Definition class_id.h:472
static TargetEntryInstr * NewTarget(FlowGraph *graph, Instruction *inherit)
const Register FPREG
const intptr_t cid
ZoneGrowableArray< MoveArgumentInstr * > MoveArgumentsArray
Definition il.h:896
Location LocationStackTraceLocation()
Definition locations.cc:488
static int8_t data[kExtLength]
static BinarySmiOpInstr * AsSmiShiftLeftInstruction(Definition *d)
static bool IsMarkedWithNoBoundsChecks(const Function &function)
Definition flow_graph.cc:43
bool IsExternalTypedDataClassId(intptr_t index)
Definition class_id.h:447
static bool IsUnboxedInteger(Representation rep)
Definition dom.py:1
call(args)
Definition dom.py:159
Definition __init__.py:1
SINT Vec< 2 *N, T > join(const Vec< N, T > &lo, const Vec< N, T > &hi)
Definition SkVx.h:242
#define Pd
Definition globals.h:408
static bool Supports(Representation rep)
Definition il.cc:457
ComparisonInstr * oper1
Definition flow_graph.h:523
ComparisonInstr * oper2
Definition flow_graph.h:524
bool Contains(intptr_t block_id) const
Definition flow_graph.h:108
static const char * ToCString(Representation rep)
Definition locations.cc:129