Flutter Engine
The Flutter Engine
flow_graph.cc
Go to the documentation of this file.
1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6
7#include <array>
8
9#include "vm/bit_vector.h"
16#include "vm/compiler/cha.h"
21#include "vm/growable_array.h"
22#include "vm/object_store.h"
23#include "vm/resolver.h"
24
25namespace dart {
26
27DEFINE_FLAG(bool, prune_dead_locals, true, "optimize dead locals away");
28
29// Quick access to the current zone.
30#define Z (zone())
31
35 FLAG_reorder_basic_blocks && !function.IsFfiCallbackTrampoline();
36}
37
41 /*only_core=*/false, function,
42 Symbols::vm_unsafe_no_bounds_checks(),
43 /*multiple=*/false, &options);
44}
45
46FlowGraph::FlowGraph(const ParsedFunction& parsed_function,
47 GraphEntryInstr* graph_entry,
48 intptr_t max_block_id,
49 PrologueInfo prologue_info,
50 CompilationMode compilation_mode)
51 : thread_(Thread::Current()),
52 parent_(),
53 current_ssa_temp_index_(0),
54 max_block_id_(max_block_id),
55 parsed_function_(parsed_function),
56 num_direct_parameters_(parsed_function.function().MakesCopyOfParameters()
57 ? 0
58 : parsed_function.function().NumParameters()),
59 direct_parameter_locations_(
60 parsed_function.function().num_fixed_parameters()),
61 graph_entry_(graph_entry),
62 preorder_(),
63 postorder_(),
64 reverse_postorder_(),
65 optimized_block_order_(),
66 constant_null_(nullptr),
67 constant_dead_(nullptr),
68 licm_allowed_(true),
69 should_reorder_blocks_(
70 ShouldReorderBlocks(parsed_function.function(), compilation_mode)),
71 prologue_info_(prologue_info),
72 loop_hierarchy_(nullptr),
73 loop_invariant_loads_(nullptr),
74 captured_parameters_(new(zone()) BitVector(zone(), variable_count())),
75 inlining_id_(-1),
76 should_print_(false),
77 should_remove_all_bounds_checks_(
78 CompilerState::Current().is_aot() &&
79 IsMarkedWithNoBoundsChecks(parsed_function.function())) {
81 &compiler_pass_filters_);
83 zone(), function(),
84 /*should_assign_stack_locations=*/
86 &direct_parameter_locations_);
88}
89
91 if ((replacement->ssa_temp_index() == -1) && (defn->ssa_temp_index() != -1)) {
92 AllocateSSAIndex(replacement);
93 }
94}
95
97 intptr_t argument_count) {
100
101 const intptr_t fixed_parameters_size_in_bytes =
103
104 // Currently we box all optional parameters.
105 return fixed_parameters_size_in_bytes +
107}
108
110 intptr_t index) {
111 if (function.IsNull()) {
112 return kTagged;
113 }
114 ASSERT(index < function.NumParameters());
116 return kUnboxedInt64;
117 } else if (function.is_unboxed_double_parameter_at(index)) {
118 return kUnboxedDouble;
119 } else {
121 return kTagged;
122 }
123}
124
126 if (function.IsNull()) {
127 return kTagged;
128 }
130 return kUnboxedInt64;
131 } else if (function.has_unboxed_double_return()) {
132 return kUnboxedDouble;
133 } else if (function.has_unboxed_record_return()) {
134 return kPairOfTagged;
135 } else {
137 return kTagged;
138 }
139}
140
142 Instruction* current,
143 Instruction* replacement) {
144 Definition* current_defn = current->AsDefinition();
145 if ((replacement != nullptr) && (current_defn != nullptr)) {
146 Definition* replacement_defn = replacement->AsDefinition();
147 ASSERT(replacement_defn != nullptr);
148 current_defn->ReplaceUsesWith(replacement_defn);
149 EnsureSSATempIndex(current_defn, replacement_defn);
150
151 if (FLAG_trace_optimization && should_print()) {
152 THR_Print("Replacing v%" Pd " with v%" Pd "\n",
153 current_defn->ssa_temp_index(),
154 replacement_defn->ssa_temp_index());
155 }
156 } else if (FLAG_trace_optimization && should_print()) {
157 if (current_defn == nullptr) {
158 THR_Print("Removing %s\n", current->DebugName());
159 } else {
160 ASSERT(!current_defn->HasUses());
161 THR_Print("Removing v%" Pd ".\n", current_defn->ssa_temp_index());
162 }
163 }
164 if (current->ArgumentCount() != 0) {
165 ASSERT(!current->HasMoveArguments());
166 }
167 iterator->RemoveCurrentFromGraph();
168}
169
171 return should_reorder_blocks() ? &optimized_block_order_
172 : &reverse_postorder_;
173}
174
176 return should_reorder_blocks() ? &optimized_block_order_
177 : &reverse_postorder_;
178}
179
181 const Object& object,
182 Representation representation) const {
183 return constant_instr_pool_.LookupValue(
184 ConstantAndRepresentation{object, representation});
185}
186
188 Representation representation) {
189 ConstantInstr* constant = GetExistingConstant(object, representation);
190 if (constant == nullptr) {
191 // Otherwise, allocate and add it to the pool.
192 const Object& zone_object = Object::ZoneHandle(zone(), object.ptr());
193 if (representation == kTagged) {
194 constant = new (zone()) ConstantInstr(zone_object);
195 } else {
196 constant = new (zone()) UnboxedConstantInstr(zone_object, representation);
197 }
198 AllocateSSAIndex(constant);
200 constant_instr_pool_.Insert(constant);
201 }
202 return constant;
203}
204
206 Representation target_rep,
207 bool tagged_value_must_be_smi) {
208 switch (target_rep) {
209 case kTagged:
210 return !tagged_value_must_be_smi || value.IsSmi();
211
212 case kUnboxedInt32:
213 if (value.IsInteger()) {
214 return Utils::IsInt(32, Integer::Cast(value).AsInt64Value());
215 }
216 return false;
217
218 case kUnboxedUint32:
219 if (value.IsInteger()) {
220 return Utils::IsUint(32, Integer::Cast(value).AsInt64Value());
221 }
222 return false;
223
224 case kUnboxedInt64:
225 return value.IsInteger();
226
227 case kUnboxedFloat:
228 case kUnboxedDouble:
229 return value.IsInteger() || value.IsDouble();
230
231 default:
232 return false;
233 }
234}
235
237 const Object& value) {
238 // Check that representation of the constant matches expected representation.
239 const auto representation = op->representation();
241 value, representation,
242 /*tagged_value_must_be_smi=*/op->Type()->IsNullableSmi())) {
243 return op;
244 }
245
246 if (((representation == kUnboxedFloat) ||
247 (representation == kUnboxedDouble)) &&
248 value.IsInteger()) {
249 // Convert the boxed constant from int to float/double.
251 Integer::Cast(value).AsDoubleValue())),
252 representation);
253 }
254
255 return GetConstant(value, representation);
256}
257
259 defn->set_previous(graph_entry_);
260 graph_entry_->initial_definitions()->Add(defn);
261}
262
264 Definition* defn) {
265 ASSERT(defn->previous() == nullptr);
266 defn->set_previous(entry);
267 if (auto par = defn->AsParameter()) {
268 par->set_block(entry); // set cached block
269 }
270 entry->initial_definitions()->Add(defn);
271}
272
274 Instruction* instr,
276 UseKind use_kind) {
277 if (use_kind == kValue) {
278 ASSERT(instr->IsDefinition());
279 AllocateSSAIndex(instr->AsDefinition());
280 }
281 instr->InsertAfter(prev);
282 ASSERT(instr->env() == nullptr);
283 if (env != nullptr) {
284 env->DeepCopyTo(zone(), instr);
285 }
286}
287
289 Instruction* instr,
291 UseKind use_kind) {
292 InsertAfter(prev, instr, env, use_kind);
293 if (instr->env() != nullptr) {
295 }
296}
297
299 Instruction* instr,
301 UseKind use_kind) {
302 if (use_kind == kValue) {
303 ASSERT(instr->IsDefinition());
304 AllocateSSAIndex(instr->AsDefinition());
305 }
306 ASSERT(instr->env() == nullptr);
307 if (env != nullptr) {
308 env->DeepCopyTo(zone(), instr);
309 }
310 return prev->AppendInstruction(instr);
311}
313 Instruction* instr,
315 UseKind use_kind) {
316 auto result = AppendTo(prev, instr, env, use_kind);
317 if (instr->env() != nullptr) {
319 }
320 return result;
321}
322
323// A wrapper around block entries including an index of the next successor to
324// be read.
326 public:
328 : block_(block),
329 next_successor_ix_(block->last_instruction()->SuccessorCount() - 1) {}
330
331 bool HasNextSuccessor() const { return next_successor_ix_ >= 0; }
334 return block_->last_instruction()->SuccessorAt(next_successor_ix_--);
335 }
336
337 BlockEntryInstr* block() const { return block_; }
338
339 private:
340 BlockEntryInstr* block_;
341 intptr_t next_successor_ix_;
342
343 DISALLOW_ALLOCATION();
344};
345
348
350
351 // Initialize state.
352 preorder_.Clear();
353 postorder_.Clear();
354 reverse_postorder_.Clear();
355 parent_.Clear();
356
358 graph_entry_->DiscoverBlock(nullptr, &preorder_, &parent_);
359 block_stack.Add(BlockTraversalState(graph_entry_));
360 while (!block_stack.is_empty()) {
361 BlockTraversalState& state = block_stack.Last();
362 BlockEntryInstr* block = state.block();
363 if (state.HasNextSuccessor()) {
364 // Process successors one-by-one.
365 BlockEntryInstr* succ = state.NextSuccessor();
366 if (succ->DiscoverBlock(block, &preorder_, &parent_)) {
367 block_stack.Add(BlockTraversalState(succ));
368 }
369 } else {
370 // All successors have been processed, pop the current block entry node
371 // and add it to the postorder list.
372 block_stack.RemoveLast();
373 block->set_postorder_number(postorder_.length());
374 postorder_.Add(block);
375 }
376 }
377
378 ASSERT(postorder_.length() == preorder_.length());
379
380 // Create an array of blocks in reverse postorder.
381 intptr_t block_count = postorder_.length();
382 for (intptr_t i = 0; i < block_count; ++i) {
383 reverse_postorder_.Add(postorder_[block_count - i - 1]);
384 }
385
387}
388
390 bool changed = false;
391 BitVector* merged = new (zone()) BitVector(zone(), postorder().length());
392 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
393 block_it.Advance()) {
394 BlockEntryInstr* block = block_it.Current();
395 if (block->IsGraphEntry()) continue;
396 if (merged->Contains(block->postorder_number())) continue;
397
398 Instruction* last = block->last_instruction();
399 BlockEntryInstr* last_merged_block = nullptr;
400 while (auto goto_instr = last->AsGoto()) {
401 JoinEntryInstr* successor = goto_instr->successor();
402 if (successor->PredecessorCount() > 1) break;
403 if (block->try_index() != successor->try_index()) break;
404
405 // Replace all phis with their arguments prior to removing successor.
406 for (PhiIterator it(successor); !it.Done(); it.Advance()) {
407 PhiInstr* phi = it.Current();
408 Value* input = phi->InputAt(0);
409 phi->ReplaceUsesWith(input->definition());
410 input->RemoveFromUseList();
411 }
412
413 // Remove environment uses and unlink goto and block entry.
414 successor->UnuseAllInputs();
415 last->previous()->LinkTo(successor->next());
416 last->UnuseAllInputs();
417
418 last = successor->last_instruction();
419 merged->Add(successor->postorder_number());
420 last_merged_block = successor;
421 changed = true;
422 if (FLAG_trace_optimization && should_print()) {
423 THR_Print("Merged blocks B%" Pd " and B%" Pd "\n", block->block_id(),
424 successor->block_id());
425 }
426 }
427 // The new block inherits the block id of the last successor to maintain
428 // the order of phi inputs at its successors consistent with block ids.
429 if (last_merged_block != nullptr) {
430 block->set_block_id(last_merged_block->block_id());
431 }
432 }
433 // Recompute block order after changes were made.
434 if (changed) DiscoverBlocks();
435}
436
437void FlowGraph::ComputeIsReceiverRecursive(
438 PhiInstr* phi,
439 GrowableArray<PhiInstr*>* unmark) const {
440 if (phi->is_receiver() != PhiInstr::kUnknownReceiver) return;
442 for (intptr_t i = 0; i < phi->InputCount(); ++i) {
443 Definition* def = phi->InputAt(i)->definition();
444 if (def->IsParameter() && (def->AsParameter()->env_index() == 0)) continue;
445 if (!def->IsPhi()) {
447 break;
448 }
449 ComputeIsReceiverRecursive(def->AsPhi(), unmark);
450 if (def->AsPhi()->is_receiver() == PhiInstr::kNotReceiver) {
452 break;
453 }
454 }
455
456 if (phi->is_receiver() == PhiInstr::kNotReceiver) {
457 unmark->Add(phi);
458 }
459}
460
461void FlowGraph::ComputeIsReceiver(PhiInstr* phi) const {
462 GrowableArray<PhiInstr*> unmark;
463 ComputeIsReceiverRecursive(phi, &unmark);
464
465 // Now drain unmark.
466 while (!unmark.is_empty()) {
467 PhiInstr* phi = unmark.RemoveLast();
468 for (Value::Iterator it(phi->input_use_list()); !it.Done(); it.Advance()) {
469 PhiInstr* use = it.Current()->instruction()->AsPhi();
470 if ((use != nullptr) && (use->is_receiver() == PhiInstr::kReceiver)) {
471 use->set_is_receiver(PhiInstr::kNotReceiver);
472 unmark.Add(use);
473 }
474 }
475 }
476}
477
479 def = def->OriginalDefinition(); // Could be redefined.
480 if (def->IsParameter()) return (def->AsParameter()->env_index() == 0);
481 if (!def->IsPhi() || graph_entry()->HasSingleEntryPoint()) {
482 return false;
483 }
484 PhiInstr* phi = def->AsPhi();
486 return (phi->is_receiver() == PhiInstr::kReceiver);
487 }
488 // Not known if this phi is the receiver yet. Compute it now.
489 ComputeIsReceiver(phi);
490 return (phi->is_receiver() == PhiInstr::kReceiver);
491}
492
495 UntaggedFunction::Kind kind) const {
496 if (!FLAG_use_cha_deopt && !isolate_group()->all_classes_finalized()) {
497 // Even if class or function are private, lazy class finalization
498 // may later add overriding methods.
499 return ToCheck::kCheckCid;
500 }
501
502 // Best effort to get the receiver class.
503 Value* receiver = call->Receiver();
504 Class& receiver_class = Class::Handle(zone());
505 bool receiver_maybe_null = false;
506 if (function().IsDynamicFunction() && IsReceiver(receiver->definition())) {
507 // Call receiver is callee receiver: calling "this.g()" in f().
508 receiver_class = function().Owner();
509 } else {
510 // Get the receiver's compile type. Note that
511 // we allow nullable types, which may result in just generating
512 // a null check rather than the more elaborate class check
513 CompileType* type = receiver->Type();
514 const intptr_t receiver_cid = type->ToNullableCid();
515 if (receiver_cid != kDynamicCid) {
516 receiver_class = isolate_group()->class_table()->At(receiver_cid);
517 receiver_maybe_null = type->is_nullable();
518 } else {
519 const AbstractType* atype = type->ToAbstractType();
520 if (atype->IsInstantiated() && atype->HasTypeClass() &&
521 !atype->IsDynamicType()) {
522 if (type->is_nullable()) {
523 receiver_maybe_null = true;
524 }
525 receiver_class = atype->type_class();
526 if (receiver_class.is_implemented()) {
527 receiver_class = Class::null();
528 }
529 }
530 }
531 }
532
533 // Useful receiver class information?
534 if (receiver_class.IsNull() ||
535 receiver_class.has_dynamically_extendable_subtypes()) {
536 return ToCheck::kCheckCid;
537 } else if (call->HasICData()) {
538 // If the static class type does not match information found in ICData
539 // (which may be "guessed"), then bail, since subsequent code generation
540 // (AOT and JIT) for inlining uses the latter.
541 // TODO(ajcbik): improve this by using the static class.
542 const intptr_t cid = receiver_class.id();
543 const ICData* data = call->ic_data();
544 bool match = false;
545 Class& cls = Class::Handle(zone());
547 for (intptr_t i = 0, len = data->NumberOfChecks(); i < len; i++) {
548 if (!data->IsUsedAt(i)) {
549 continue; // do not consider
550 }
551 fun = data->GetTargetAt(i);
552 cls = fun.Owner();
553 if (data->GetReceiverClassIdAt(i) == cid || cls.id() == cid) {
554 match = true;
555 break;
556 }
557 }
558 if (!match) {
559 return ToCheck::kCheckCid;
560 }
561 }
562
563 const String& method_name =
564 (kind == UntaggedFunction::kMethodExtractor)
565 ? String::Handle(zone(), Field::NameFromGetter(call->function_name()))
566 : call->function_name();
567
568 // If the receiver can have the null value, exclude any method
569 // that is actually valid on a null receiver.
570 if (receiver_maybe_null) {
571 const Class& null_class =
572 Class::Handle(zone(), isolate_group()->object_store()->null_class());
574 if (null_class.EnsureIsFinalized(thread()) == Error::null()) {
575 target = Resolver::ResolveDynamicAnyArgs(zone(), null_class, method_name,
576 /*allow_add=*/true);
577 }
578 if (!target.IsNull()) {
579 return ToCheck::kCheckCid;
580 }
581 }
582
583 // Use CHA to determine if the method is not overridden by any subclass
584 // of the receiver class. Any methods that are valid when the receiver
585 // has a null value are excluded above (to avoid throwing an exception
586 // on something valid, like null.hashCode).
587 intptr_t subclass_count = 0;
588 CHA& cha = thread()->compiler_state().cha();
589 if (!cha.HasOverride(receiver_class, method_name, &subclass_count)) {
590 if (FLAG_trace_cha) {
591 THR_Print(
592 " **(CHA) Instance call needs no class check since there "
593 "are no overrides of method '%s' on '%s'\n",
594 method_name.ToCString(), receiver_class.ToCString());
595 }
596 if (FLAG_use_cha_deopt) {
597 cha.AddToGuardedClassesForSubclassCount(receiver_class, subclass_count);
598 }
599 return receiver_maybe_null ? ToCheck::kCheckNull : ToCheck::kNoCheck;
600 }
601 return ToCheck::kCheckCid;
602}
603
605 const Cids& cids,
606 intptr_t deopt_id,
607 const InstructionSource& source) {
608 if (cids.IsMonomorphic() && cids.MonomorphicReceiverCid() == kSmiCid) {
609 return new (zone())
610 CheckSmiInstr(new (zone()) Value(to_check), deopt_id, source);
611 }
612 return new (zone())
613 CheckClassInstr(new (zone()) Value(to_check), deopt_id, cids, source);
614}
615
617 Definition* index,
618 intptr_t deopt_id) {
619 Value* val1 = new (zone()) Value(length);
620 Value* val2 = new (zone()) Value(index);
622 return new (zone()) GenericCheckBoundInstr(val1, val2, deopt_id);
623 }
624 return new (zone()) CheckArrayBoundInstr(val1, val2, deopt_id);
625}
626
628 intptr_t receiver_cid) {
629 const Class& cls =
630 Class::Handle(zone(), isolate_group()->class_table()->At(receiver_cid));
631
632 Definition* load_type_args = new (zone()) LoadFieldInstr(
633 call->Receiver()->CopyWithType(),
634 Slot::GetTypeArgumentsSlotFor(thread(), cls), call->source());
635 InsertBefore(call, load_type_args, call->env(), FlowGraph::kValue);
636
637 const AbstractType& type =
638 AbstractType::Handle(zone(), call->ic_data()->receivers_static_type());
639 ASSERT(!type.IsNull());
641 zone(), Type::Cast(type).GetInstanceTypeArguments(thread()));
642 Instruction* guard = new (zone()) CheckConditionInstr(
643 new StrictCompareInstr(call->source(), Token::kEQ_STRICT,
644 new (zone()) Value(load_type_args),
645 new (zone()) Value(GetConstant(args)),
646 /*needs_number_check=*/false, call->deopt_id()),
647 call->deopt_id());
648 InsertBefore(call, guard, call->env(), FlowGraph::kEffect);
649}
650
651// Verify that a redefinition dominates all uses of the redefined value.
653 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
654 block_it.Advance()) {
655 for (ForwardInstructionIterator instr_it(block_it.Current());
656 !instr_it.Done(); instr_it.Advance()) {
657 RedefinitionInstr* redefinition = instr_it.Current()->AsRedefinition();
658 if (redefinition != nullptr) {
659 Definition* original = redefinition->value()->definition();
660 for (Value::Iterator it(original->input_use_list()); !it.Done();
661 it.Advance()) {
662 Value* original_use = it.Current();
663 if (original_use->instruction() == redefinition) {
664 continue;
665 }
666 if (original_use->instruction()->IsDominatedBy(redefinition)) {
667 FlowGraphPrinter::PrintGraph("VerifyRedefinitions", this);
668 THR_Print("%s\n", redefinition->ToCString());
669 THR_Print("use=%s\n", original_use->instruction()->ToCString());
670 return false;
671 }
672 }
673 }
674 }
675 }
676 return true;
677}
678
680 intptr_t variable_count,
681 const GrowableArray<BlockEntryInstr*>& postorder)
682 : zone_(Thread::Current()->zone()),
683 variable_count_(variable_count),
684 postorder_(postorder),
685 live_out_(postorder.length()),
686 kill_(postorder.length()),
687 live_in_(postorder.length()) {}
688
690 BitVector* live_out = live_out_[block.postorder_number()];
691 bool changed = false;
692 Instruction* last = block.last_instruction();
693 ASSERT(last != nullptr);
694 for (intptr_t i = 0; i < last->SuccessorCount(); i++) {
695 BlockEntryInstr* succ = last->SuccessorAt(i);
696 ASSERT(succ != nullptr);
697 if (live_out->AddAll(live_in_[succ->postorder_number()])) {
698 changed = true;
699 }
700 }
701 return changed;
702}
703
705 BitVector* live_out = live_out_[block.postorder_number()];
706 BitVector* kill = kill_[block.postorder_number()];
707 BitVector* live_in = live_in_[block.postorder_number()];
708 return live_in->KillAndAdd(kill, live_out);
709}
710
712 const intptr_t block_count = postorder_.length();
713 bool changed;
714 do {
715 changed = false;
716
717 for (intptr_t i = 0; i < block_count; i++) {
718 const BlockEntryInstr& block = *postorder_[i];
719
720 // Live-in set depends only on kill set which does not
721 // change in this loop and live-out set. If live-out
722 // set does not change there is no need to recompute
723 // live-in set.
724 if (UpdateLiveOut(block) && UpdateLiveIn(block)) {
725 changed = true;
726 }
727 }
728 } while (changed);
729}
730
732 const intptr_t block_count = postorder_.length();
733 for (intptr_t i = 0; i < block_count; i++) {
735 kill_.Add(new (zone()) BitVector(zone(), variable_count_));
736 live_in_.Add(new (zone()) BitVector(zone(), variable_count_));
737 }
738
741}
742
743static void PrintBitVector(const char* tag, BitVector* v) {
744 THR_Print("%s:", tag);
745 for (BitVector::Iterator it(v); !it.Done(); it.Advance()) {
746 THR_Print(" %" Pd "", it.Current());
747 }
748 THR_Print("\n");
749}
750
752 const intptr_t block_count = postorder_.length();
753 for (intptr_t i = 0; i < block_count; i++) {
754 BlockEntryInstr* block = postorder_[i];
755 THR_Print("block @%" Pd " -> ", block->block_id());
756
757 Instruction* last = block->last_instruction();
758 for (intptr_t j = 0; j < last->SuccessorCount(); j++) {
759 BlockEntryInstr* succ = last->SuccessorAt(j);
760 THR_Print(" @%" Pd "", succ->block_id());
761 }
762 THR_Print("\n");
763
764 PrintBitVector(" live out", live_out_[i]);
765 PrintBitVector(" kill", kill_[i]);
766 PrintBitVector(" live in", live_in_[i]);
767 }
768}
769
770// Computes liveness information for local variables.
772 public:
774 : LivenessAnalysis(flow_graph->variable_count(), flow_graph->postorder()),
775 flow_graph_(flow_graph),
776 assigned_vars_() {}
777
778 // For every block (in preorder) compute and return set of variables that
779 // have new assigned values flowing out of that block.
781 // We can't directly return kill_ because it uses postorder numbering while
782 // SSA construction uses preorder numbering internally.
783 // We have to permute postorder into preorder.
784 assigned_vars_.Clear();
785
786 const intptr_t block_count = flow_graph_->preorder().length();
787 for (intptr_t i = 0; i < block_count; i++) {
788 BlockEntryInstr* block = flow_graph_->preorder()[i];
789 // All locals are assigned inside a try{} block.
790 // This is a safe approximation and workaround to force insertion of
791 // phis for stores that appear non-live because of the way catch-blocks
792 // are connected to the graph: They normally are dominated by the
793 // try-entry, but are direct successors of the graph entry in our flow
794 // graph.
795 // TODO(fschneider): Improve this approximation by better modeling the
796 // actual data flow to reduce the number of redundant phis.
797 BitVector* kill = GetKillSet(block);
798 if (block->InsideTryBlock()) {
799 kill->SetAll();
800 } else {
801 kill->Intersect(GetLiveOutSet(block));
802 }
803 assigned_vars_.Add(kill);
804 }
805
806 return assigned_vars_;
807 }
808
809 // Returns true if the value set by the given store reaches any load from the
810 // same local variable.
812 if (store->local().Equals(*flow_graph_->CurrentContextVar())) {
813 return true;
814 }
815
816 if (store->is_dead()) {
817 return false;
818 }
819 if (store->is_last()) {
820 const intptr_t index = flow_graph_->EnvIndex(&store->local());
821 return GetLiveOutSet(block)->Contains(index);
822 }
823
824 return true;
825 }
826
827 // Returns true if the given load is the last for the local and the value
828 // of the local will not flow into another one.
830 if (load->local().Equals(*flow_graph_->CurrentContextVar())) {
831 return false;
832 }
833 const intptr_t index = flow_graph_->EnvIndex(&load->local());
834 return load->is_last() && !GetLiveOutSet(block)->Contains(index);
835 }
836
837 private:
838 virtual void ComputeInitialSets();
839
840 const FlowGraph* flow_graph_;
841 GrowableArray<BitVector*> assigned_vars_;
842};
843
845 const intptr_t block_count = postorder_.length();
846
847 BitVector* last_loads = new (zone()) BitVector(zone(), variable_count_);
848 for (intptr_t i = 0; i < block_count; i++) {
849 BlockEntryInstr* block = postorder_[i];
850
851 BitVector* kill = kill_[i];
852 BitVector* live_in = live_in_[i];
853 last_loads->Clear();
854
855 // There is an implicit use (load-local) of every local variable at each
856 // call inside a try{} block and every call has an implicit control-flow
857 // to the catch entry. As an approximation we mark all locals as live
858 // inside try{}.
859 // TODO(fschneider): Improve this approximation, since not all local
860 // variable stores actually reach a call.
861 if (block->InsideTryBlock()) {
862 live_in->SetAll();
863 continue;
864 }
865
866 // Iterate backwards starting at the last instruction.
867 for (BackwardInstructionIterator it(block); !it.Done(); it.Advance()) {
868 Instruction* current = it.Current();
869
870 LoadLocalInstr* load = current->AsLoadLocal();
871 if (load != nullptr) {
872 const intptr_t index = flow_graph_->EnvIndex(&load->local());
873 if (index >= live_in->length()) continue; // Skip tmp_locals.
874 live_in->Add(index);
875 if (!last_loads->Contains(index)) {
876 last_loads->Add(index);
877 load->mark_last();
878 }
879 continue;
880 }
881
882 StoreLocalInstr* store = current->AsStoreLocal();
883 if (store != nullptr) {
884 const intptr_t index = flow_graph_->EnvIndex(&store->local());
885 if (index >= live_in->length()) continue; // Skip tmp_locals.
886 if (kill->Contains(index)) {
887 if (!live_in->Contains(index)) {
888 store->mark_dead();
889 }
890 } else {
891 if (!live_in->Contains(index)) {
892 store->mark_last();
893 }
894 kill->Add(index);
895 }
896 live_in->Remove(index);
897 continue;
898 }
899 }
900
901 // For blocks with parameter or special parameter instructions we add them
902 // to the kill set.
903 const bool is_function_entry = block->IsFunctionEntry();
904 const bool is_osr_entry = block->IsOsrEntry();
905 const bool is_catch_block_entry = block->IsCatchBlockEntry();
906 if (is_function_entry || is_osr_entry || is_catch_block_entry) {
907 const intptr_t parameter_count =
908 (is_osr_entry || is_catch_block_entry)
909 ? flow_graph_->variable_count()
910 : flow_graph_->num_direct_parameters();
911 for (intptr_t i = 0; i < parameter_count; ++i) {
912 live_in->Remove(i);
913 kill->Add(i);
914 }
915 }
916 if (is_function_entry) {
917 if (flow_graph_->parsed_function().has_arg_desc_var()) {
918 const auto index = flow_graph_->ArgumentDescriptorEnvIndex();
919 live_in->Remove(index);
920 kill->Add(index);
921 }
922 }
923 }
924}
925
927 ZoneGrowableArray<Definition*>* inlining_parameters) {
928 GrowableArray<BitVector*> dominance_frontier;
930
931#ifdef DEBUG
932 if (inlining_parameters != nullptr) {
933 for (intptr_t i = 0, n = inlining_parameters->length(); i < n; ++i) {
934 Definition* defn = (*inlining_parameters)[i];
935 if (defn->IsConstant()) {
936 ASSERT(defn->previous() == graph_entry_);
937 ASSERT(defn->HasSSATemp());
939 } else {
940 ASSERT(defn->previous() == nullptr);
941 ASSERT(!defn->HasSSATemp());
942 }
943 }
944 } else {
946 }
947#endif
948
949 ComputeDominators(&dominance_frontier);
950
951 VariableLivenessAnalysis variable_liveness(this);
952 variable_liveness.Analyze();
953
954 GrowableArray<PhiInstr*> live_phis;
955
956 InsertPhis(preorder_, variable_liveness.ComputeAssignedVars(),
957 dominance_frontier, &live_phis);
958
959 // Rename uses to reference inserted phis where appropriate.
960 // Collect phis that reach a non-environment use.
961 Rename(&live_phis, &variable_liveness, inlining_parameters);
962
963 // Propagate alive mark transitively from alive phis and then remove
964 // non-live ones.
965 RemoveDeadPhis(&live_phis);
966}
967
968// Compute immediate dominators and the dominance frontier for each basic
969// block. As a side effect of the algorithm, sets the immediate dominator
970// of each basic block.
971//
972// dominance_frontier: an output parameter encoding the dominance frontier.
973// The array maps the preorder block number of a block to the set of
974// (preorder block numbers of) blocks in the dominance frontier.
976 GrowableArray<BitVector*>* dominance_frontier) {
977 // Use the SEMI-NCA algorithm to compute dominators. This is a two-pass
978 // version of the Lengauer-Tarjan algorithm (LT is normally three passes)
979 // that eliminates a pass by using nearest-common ancestor (NCA) to
980 // compute immediate dominators from semidominators. It also removes a
981 // level of indirection in the link-eval forest data structure.
982 //
983 // The algorithm is described in Georgiadis, Tarjan, and Werneck's
984 // "Finding Dominators in Practice".
985 // https://renatowerneck.files.wordpress.com/2016/06/gtw06-dominators.pdf
986
987 // All arrays are maps between preorder basic-block numbers.
988 intptr_t size = parent_.length();
989 GrowableArray<intptr_t> idom(size); // Immediate dominator.
990 GrowableArray<intptr_t> semi(size); // Semidominator.
991 GrowableArray<intptr_t> label(size); // Label for link-eval forest.
992
993 // 1. First pass: compute semidominators as in Lengauer-Tarjan.
994 // Semidominators are computed from a depth-first spanning tree and are an
995 // approximation of immediate dominators.
996
997 // Use a link-eval data structure with path compression. Implement path
998 // compression in place by mutating the parent array. Each block has a
999 // label, which is the minimum block number on the compressed path.
1000
1001 // Initialize idom, semi, and label used by SEMI-NCA. Initialize the
1002 // dominance frontier output array.
1003 for (intptr_t i = 0; i < size; ++i) {
1004 idom.Add(parent_[i]);
1005 semi.Add(i);
1006 label.Add(i);
1007 dominance_frontier->Add(new (zone()) BitVector(zone(), size));
1008 }
1009
1010 // Loop over the blocks in reverse preorder (not including the graph
1011 // entry). Clear the dominated blocks in the graph entry in case
1012 // ComputeDominators is used to recompute them.
1013 preorder_[0]->ClearDominatedBlocks();
1014 for (intptr_t block_index = size - 1; block_index >= 1; --block_index) {
1015 // Loop over the predecessors.
1016 BlockEntryInstr* block = preorder_[block_index];
1017 // Clear the immediately dominated blocks in case ComputeDominators is
1018 // used to recompute them.
1019 block->ClearDominatedBlocks();
1020 for (intptr_t i = 0, count = block->PredecessorCount(); i < count; ++i) {
1021 BlockEntryInstr* pred = block->PredecessorAt(i);
1022 ASSERT(pred != nullptr);
1023
1024 // Look for the semidominator by ascending the semidominator path
1025 // starting from pred.
1026 intptr_t pred_index = pred->preorder_number();
1027 intptr_t best = pred_index;
1028 if (pred_index > block_index) {
1029 CompressPath(block_index, pred_index, &parent_, &label);
1030 best = label[pred_index];
1031 }
1032
1033 // Update the semidominator if we've found a better one.
1034 semi[block_index] = Utils::Minimum(semi[block_index], semi[best]);
1035 }
1036
1037 // Now use label for the semidominator.
1038 label[block_index] = semi[block_index];
1039 }
1040
1041 // 2. Compute the immediate dominators as the nearest common ancestor of
1042 // spanning tree parent and semidominator, for all blocks except the entry.
1043 for (intptr_t block_index = 1; block_index < size; ++block_index) {
1044 intptr_t dom_index = idom[block_index];
1045 while (dom_index > semi[block_index]) {
1046 dom_index = idom[dom_index];
1047 }
1048 idom[block_index] = dom_index;
1049 preorder_[dom_index]->AddDominatedBlock(preorder_[block_index]);
1050 }
1051
1052 // 3. Now compute the dominance frontier for all blocks. This is
1053 // algorithm in "A Simple, Fast Dominance Algorithm" (Figure 5), which is
1054 // attributed to a paper by Ferrante et al. There is no bookkeeping
1055 // required to avoid adding a block twice to the same block's dominance
1056 // frontier because we use a set to represent the dominance frontier.
1057 for (intptr_t block_index = 0; block_index < size; ++block_index) {
1058 BlockEntryInstr* block = preorder_[block_index];
1059 intptr_t count = block->PredecessorCount();
1060 if (count <= 1) continue;
1061 for (intptr_t i = 0; i < count; ++i) {
1062 BlockEntryInstr* runner = block->PredecessorAt(i);
1063 while (runner != block->dominator()) {
1064 (*dominance_frontier)[runner->preorder_number()]->Add(block_index);
1065 runner = runner->dominator();
1066 }
1067 }
1068 }
1069}
1070
1071void FlowGraph::CompressPath(intptr_t start_index,
1072 intptr_t current_index,
1074 GrowableArray<intptr_t>* label) {
1075 intptr_t next_index = (*parent)[current_index];
1076 if (next_index > start_index) {
1077 CompressPath(start_index, next_index, parent, label);
1078 (*label)[current_index] =
1079 Utils::Minimum((*label)[current_index], (*label)[next_index]);
1080 (*parent)[current_index] = (*parent)[next_index];
1081 }
1082}
1083
1084void FlowGraph::InsertPhis(const GrowableArray<BlockEntryInstr*>& preorder,
1085 const GrowableArray<BitVector*>& assigned_vars,
1086 const GrowableArray<BitVector*>& dom_frontier,
1087 GrowableArray<PhiInstr*>* live_phis) {
1088 const intptr_t block_count = preorder.length();
1089 // Map preorder block number to the highest variable index that has a phi
1090 // in that block. Use it to avoid inserting multiple phis for the same
1091 // variable.
1092 GrowableArray<intptr_t> has_already(block_count);
1093 // Map preorder block number to the highest variable index for which the
1094 // block went on the worklist. Use it to avoid adding the same block to
1095 // the worklist more than once for the same variable.
1096 GrowableArray<intptr_t> work(block_count);
1097
1098 // Initialize has_already and work.
1099 for (intptr_t block_index = 0; block_index < block_count; ++block_index) {
1100 has_already.Add(-1);
1101 work.Add(-1);
1102 }
1103
1104 // Insert phis for each variable in turn.
1105 GrowableArray<BlockEntryInstr*> worklist;
1106 for (intptr_t var_index = 0; var_index < variable_count(); ++var_index) {
1107 const bool always_live =
1108 !FLAG_prune_dead_locals || IsImmortalVariable(var_index);
1109 // Add to the worklist each block containing an assignment.
1110 for (intptr_t block_index = 0; block_index < block_count; ++block_index) {
1111 if (assigned_vars[block_index]->Contains(var_index)) {
1112 work[block_index] = var_index;
1113 worklist.Add(preorder[block_index]);
1114 }
1115 }
1116
1117 while (!worklist.is_empty()) {
1118 BlockEntryInstr* current = worklist.RemoveLast();
1119 // Ensure a phi for each block in the dominance frontier of current.
1120 for (BitVector::Iterator it(dom_frontier[current->preorder_number()]);
1121 !it.Done(); it.Advance()) {
1122 int index = it.Current();
1123 if (has_already[index] < var_index) {
1124 JoinEntryInstr* join = preorder[index]->AsJoinEntry();
1125 ASSERT(join != nullptr);
1126 PhiInstr* phi = join->InsertPhi(
1127 var_index, variable_count() + join->stack_depth());
1128 if (always_live) {
1129 phi->mark_alive();
1130 live_phis->Add(phi);
1131 }
1132 has_already[index] = var_index;
1133 if (work[index] < var_index) {
1134 work[index] = var_index;
1135 worklist.Add(join);
1136 }
1137 }
1138 }
1139 }
1140 }
1141}
1142
1144 constant_null_ = GetConstant(Object::ZoneHandle());
1145 constant_dead_ = GetConstant(Object::optimized_out());
1146}
1147
1148void FlowGraph::AddSyntheticPhis(BlockEntryInstr* block) {
1150 if (auto join = block->AsJoinEntry()) {
1151 const intptr_t local_phi_count = variable_count() + join->stack_depth();
1152 // Never insert more phi's than that we had osr variables.
1153 const intptr_t osr_phi_count =
1154 Utils::Minimum(local_phi_count, osr_variable_count());
1155 for (intptr_t i = variable_count(); i < osr_phi_count; ++i) {
1156 if (join->phis() == nullptr || (*join->phis())[i] == nullptr) {
1157 join->InsertPhi(i, local_phi_count)->mark_alive();
1158 }
1159 }
1160 }
1161}
1162
1163void FlowGraph::Rename(GrowableArray<PhiInstr*>* live_phis,
1164 VariableLivenessAnalysis* variable_liveness,
1165 ZoneGrowableArray<Definition*>* inlining_parameters) {
1166 GraphEntryInstr* entry = graph_entry();
1167
1168 // Add global constants to the initial definitions.
1170
1171 // Initial renaming environment.
1172 GrowableArray<Definition*> env(variable_count());
1173 env.FillWith(constant_dead(), 0, num_direct_parameters());
1175
1176 if (entry->catch_entries().is_empty()) {
1177 ASSERT(entry->unchecked_entry() != nullptr ? entry->SuccessorCount() == 2
1178 : entry->SuccessorCount() == 1);
1179 }
1180
1181 // For OSR on a non-empty stack, insert synthetic phis on every joining entry.
1182 // These phis are synthetic since they are not driven by live variable
1183 // analysis, but merely serve the purpose of merging stack slots from
1184 // parameters and other predecessors at the block in which OSR occurred.
1185 // The original definition could flow into a join via multiple predecessors
1186 // but with the same definition, not requiring a phi. However, with an OSR
1187 // entry in a different block, phis are required to merge the OSR variable
1188 // and original definition where there was no phi. Therefore, we need
1189 // synthetic phis in all (reachable) blocks, not just in the first join.
1190 if (IsCompiledForOsr()) {
1191 for (intptr_t i = 0, n = preorder().length(); i < n; ++i) {
1192 AddSyntheticPhis(preorder()[i]);
1193 }
1194 }
1195
1196 RenameRecursive(entry, &env, live_phis, variable_liveness,
1197 inlining_parameters);
1198
1199#if defined(DEBUG)
1200 ValidatePhis();
1201#endif // defined(DEBUG)
1202}
1203
1205 Zone* zone,
1206 const Function& function,
1207 bool should_assign_stack_locations /* = false */,
1208 compiler::ParameterInfoArray* parameter_info /* = nullptr */) {
1211 [&](intptr_t i) {
1212 const intptr_t index = (function.IsFactory() ? (i - 1) : i);
1213 return index >= 0 ? ParameterRepresentationAt(function, index)
1214 : kTagged;
1215 },
1216 should_assign_stack_locations, parameter_info);
1217}
1218
1219void FlowGraph::PopulateEnvironmentFromFunctionEntry(
1220 FunctionEntryInstr* function_entry,
1222 GrowableArray<PhiInstr*>* live_phis,
1223 VariableLivenessAnalysis* variable_liveness,
1224 ZoneGrowableArray<Definition*>* inlining_parameters) {
1226
1227 // Check if inlining_parameters include a type argument vector parameter.
1228 const intptr_t inlined_type_args_param =
1229 ((inlining_parameters != nullptr) && function().IsGeneric()) ? 1 : 0;
1230
1231 ASSERT(variable_count() == env->length());
1232 ASSERT(function().num_fixed_parameters() <= env->length());
1233
1234 const bool copies_parameters = function().MakesCopyOfParameters();
1235 for (intptr_t i = 0; i < function().num_fixed_parameters(); i++) {
1236 const auto& [location, representation] = direct_parameter_locations_[i];
1237 if (location.IsInvalid()) {
1238 ASSERT(function().MakesCopyOfParameters());
1239 continue;
1240 }
1241
1242 const intptr_t env_index =
1243 copies_parameters ? EnvIndex(parsed_function_.RawParameterVariable(i))
1244 : i;
1245
1246 auto param = new (zone())
1247 ParameterInstr(function_entry,
1248 /*env_index=*/env_index,
1249 /*param_index=*/i, location, representation);
1250
1251 AllocateSSAIndex(param);
1252 AddToInitialDefinitions(function_entry, param);
1253 (*env)[env_index] = param;
1254 }
1255
1256 // Override the entries in the renaming environment which are special (i.e.
1257 // inlining arguments, type parameter, args descriptor, context, ...)
1258 {
1259 // Replace parameter slots with inlining definitions coming in.
1260 if (inlining_parameters != nullptr) {
1261 for (intptr_t i = 0; i < function().NumParameters(); ++i) {
1262 Definition* defn = (*inlining_parameters)[inlined_type_args_param + i];
1263 if (defn->IsConstant()) {
1264 ASSERT(defn->previous() == graph_entry_);
1265 ASSERT(defn->HasSSATemp());
1266 } else {
1267 ASSERT(defn->previous() == nullptr);
1268 AllocateSSAIndex(defn);
1269 AddToInitialDefinitions(function_entry, defn);
1270 }
1271 intptr_t index = EnvIndex(parsed_function_.RawParameterVariable(i));
1272 (*env)[index] = defn;
1273 }
1274 }
1275
1276 // Replace the type arguments slot with a special parameter.
1277 const bool reify_generic_argument = function().IsGeneric();
1278 if (reify_generic_argument) {
1279 ASSERT(parsed_function().function_type_arguments() != nullptr);
1280 Definition* defn;
1281 if (inlining_parameters == nullptr) {
1282 // Note: If we are not inlining, then the prologue builder will
1283 // take care of checking that we got the correct reified type
1284 // arguments. This includes checking the argument descriptor in order
1285 // to even find out if the parameter was passed or not.
1286 defn = constant_dead();
1287 } else {
1288 defn = (*inlining_parameters)[0];
1289 }
1290 if (defn->IsConstant()) {
1291 ASSERT(defn->previous() == graph_entry_);
1292 ASSERT(defn->HasSSATemp());
1293 } else {
1294 ASSERT(defn->previous() == nullptr);
1295 AllocateSSAIndex(defn);
1296 AddToInitialDefinitions(function_entry, defn);
1297 }
1298 (*env)[RawTypeArgumentEnvIndex()] = defn;
1299 }
1300
1301 // Replace the argument descriptor slot with a special parameter.
1302 if (parsed_function().has_arg_desc_var()) {
1303 auto defn = new (Z)
1304 ParameterInstr(function_entry, ArgumentDescriptorEnvIndex(),
1307 AllocateSSAIndex(defn);
1308 AddToInitialDefinitions(function_entry, defn);
1309 (*env)[ArgumentDescriptorEnvIndex()] = defn;
1310 }
1311 }
1312}
1313
1314static Location EnvIndexToStackLocation(intptr_t num_direct_parameters,
1315 intptr_t env_index) {
1316 return Location::StackSlot(
1317 compiler::target::frame_layout.FrameSlotForVariableIndex(
1318 num_direct_parameters - env_index),
1319 FPREG);
1320}
1321
1322void FlowGraph::PopulateEnvironmentFromOsrEntry(
1323 OsrEntryInstr* osr_entry,
1324 GrowableArray<Definition*>* env) {
1326 // During OSR, all variables and possibly a non-empty stack are
1327 // passed as parameters. The latter mimics the incoming expression
1328 // stack that was set up prior to triggering OSR.
1329 const intptr_t parameter_count = osr_variable_count();
1330 ASSERT(parameter_count == env->length());
1331 for (intptr_t i = 0; i < parameter_count; i++) {
1332 const intptr_t param_index = (i < num_direct_parameters())
1333 ? i
1335 ParameterInstr* param = new (zone()) ParameterInstr(
1336 osr_entry, /*env_index=*/i, param_index,
1338 AllocateSSAIndex(param);
1339 AddToInitialDefinitions(osr_entry, param);
1340 (*env)[i] = param;
1341 }
1342}
1343
1344void FlowGraph::PopulateEnvironmentFromCatchEntry(
1345 CatchBlockEntryInstr* catch_entry,
1346 GrowableArray<Definition*>* env) {
1347 const intptr_t raw_exception_var_envindex =
1348 catch_entry->raw_exception_var() != nullptr
1349 ? EnvIndex(catch_entry->raw_exception_var())
1350 : -1;
1351 const intptr_t raw_stacktrace_var_envindex =
1352 catch_entry->raw_stacktrace_var() != nullptr
1353 ? EnvIndex(catch_entry->raw_stacktrace_var())
1354 : -1;
1355
1356 // Add real definitions for all locals and parameters.
1357 ASSERT(variable_count() == env->length());
1358 intptr_t additional_slots = 0;
1359 for (intptr_t i = 0, n = variable_count(); i < n; ++i) {
1360 // Local variables will arive on the stack while exception and
1361 // stack trace will be passed in fixed registers.
1362 Location loc;
1363 if (raw_exception_var_envindex == i) {
1365 } else if (raw_stacktrace_var_envindex == i) {
1367 } else {
1368 if (i < num_direct_parameters()) {
1369 const auto [param_loc, param_rep] = GetDirectParameterInfoAt(i);
1370 if (param_rep == kTagged && param_loc.IsStackSlot()) {
1371 loc = param_loc;
1372 } else {
1373 // We can not reuse parameter location for synchronization purposes
1374 // because it is either a register location or it is untagged
1375 // location. This means we need to allocate additional slot
1376 // for synchronization above slots reserved for other variables.
1378 n + additional_slots);
1379 additional_slots++;
1380 }
1381 } else {
1383 }
1384 }
1385 auto param = new (Z) ParameterInstr(
1386 catch_entry, /*env_index=*/i,
1387 /*param_index=*/ParameterInstr::kNotFunctionParameter, loc, kTagged);
1388
1389 AllocateSSAIndex(param); // New SSA temp.
1390 (*env)[i] = param;
1391 AddToInitialDefinitions(catch_entry, param);
1392 }
1393
1395 graph_entry_->fixed_slot_count(),
1396 variable_count() - num_direct_parameters() + additional_slots));
1397}
1398
1399void FlowGraph::AttachEnvironment(Instruction* instr,
1400 GrowableArray<Definition*>* env) {
1401 auto deopt_env = Environment::From(zone(), *env, num_direct_parameters_,
1402 instr->NumberOfInputsConsumedBeforeCall(),
1403 parsed_function_);
1404 instr->SetEnvironment(deopt_env);
1405 for (Environment::DeepIterator it(deopt_env); !it.Done(); it.Advance()) {
1406 Value* use = it.CurrentValue();
1407 use->definition()->AddEnvUse(use);
1408 }
1409}
1410
1411void FlowGraph::RenameRecursive(
1412 BlockEntryInstr* block_entry,
1413 GrowableArray<Definition*>* env,
1414 GrowableArray<PhiInstr*>* live_phis,
1415 VariableLivenessAnalysis* variable_liveness,
1416 ZoneGrowableArray<Definition*>* inlining_parameters) {
1417 // 1. Process phis first.
1418 if (auto join = block_entry->AsJoinEntry()) {
1419 if (join->phis() != nullptr) {
1420 const intptr_t local_phi_count = variable_count() + join->stack_depth();
1421 ASSERT(join->phis()->length() == local_phi_count);
1422 for (intptr_t i = 0; i < local_phi_count; ++i) {
1423 PhiInstr* phi = (*join->phis())[i];
1424 if (phi != nullptr) {
1425 (*env)[i] = phi;
1426 AllocateSSAIndex(phi); // New SSA temp.
1427 if (block_entry->InsideTryBlock() && !phi->is_alive()) {
1428 // This is a safe approximation. Inside try{} all locals are
1429 // used at every call implicitly, so we mark all phis as live
1430 // from the start.
1431 // TODO(fschneider): Improve this approximation to eliminate
1432 // more redundant phis.
1433 phi->mark_alive();
1434 live_phis->Add(phi);
1435 }
1436 }
1437 }
1438 }
1439 } else if (auto osr_entry = block_entry->AsOsrEntry()) {
1440 PopulateEnvironmentFromOsrEntry(osr_entry, env);
1441 } else if (auto function_entry = block_entry->AsFunctionEntry()) {
1443 PopulateEnvironmentFromFunctionEntry(
1444 function_entry, env, live_phis, variable_liveness, inlining_parameters);
1445 } else if (auto catch_entry = block_entry->AsCatchBlockEntry()) {
1446 PopulateEnvironmentFromCatchEntry(catch_entry, env);
1447 }
1448
1449 if (!block_entry->IsGraphEntry() &&
1450 !block_entry->IsBlockEntryWithInitialDefs()) {
1451 // Prune non-live variables at block entry by replacing their environment
1452 // slots with null.
1453 BitVector* live_in = variable_liveness->GetLiveInSet(block_entry);
1454 for (intptr_t i = 0; i < variable_count(); i++) {
1455 // TODO(fschneider): Make sure that live_in always contains the
1456 // CurrentContext variable to avoid the special case here.
1457 if (FLAG_prune_dead_locals && !live_in->Contains(i) &&
1459 (*env)[i] = constant_dead();
1460 }
1461 }
1462 }
1463
1464 // Attach environment to the block entry.
1465 AttachEnvironment(block_entry, env);
1466
1467 // 2. Process normal instructions.
1468 for (ForwardInstructionIterator it(block_entry); !it.Done(); it.Advance()) {
1469 Instruction* current = it.Current();
1470
1471 // Attach current environment to the instructions that need it.
1472 if (current->NeedsEnvironment()) {
1473 AttachEnvironment(current, env);
1474 }
1475
1476 // 2a. Handle uses:
1477 // Update the expression stack renaming environment for each use by
1478 // removing the renamed value. For each use of a LoadLocal, StoreLocal,
1479 // MakeTemp, DropTemps or Constant (or any expression under OSR),
1480 // replace it with the renamed value.
1481 for (intptr_t i = current->InputCount() - 1; i >= 0; --i) {
1482 Value* v = current->InputAt(i);
1483 // Update expression stack.
1484 ASSERT(env->length() > variable_count());
1485 Definition* reaching_defn = env->RemoveLast();
1486 Definition* input_defn = v->definition();
1487 if (input_defn != reaching_defn) {
1488 // Inspect the replacing definition before making the change.
1489 if (IsCompiledForOsr()) {
1490 // Under OSR, constants can reside on the expression stack. Just
1491 // generate the constant rather than going through a synthetic phi.
1492 if (input_defn->IsConstant() && reaching_defn->IsPhi()) {
1493 ASSERT(env->length() < osr_variable_count());
1494 auto constant = GetConstant(input_defn->AsConstant()->value());
1495 current->ReplaceInEnvironment(reaching_defn, constant);
1496 reaching_defn = constant;
1497 }
1498 } else {
1499 // Note: constants can only be replaced with other constants.
1500 ASSERT(input_defn->IsLoadLocal() || input_defn->IsStoreLocal() ||
1501 input_defn->IsDropTemps() || input_defn->IsMakeTemp() ||
1502 (input_defn->IsConstant() && reaching_defn->IsConstant()));
1503 }
1504 // Assert we are not referencing nulls in the initial environment.
1505 ASSERT(reaching_defn->ssa_temp_index() != -1);
1506 // Replace the definition.
1507 v->set_definition(reaching_defn);
1508 input_defn = reaching_defn;
1509 }
1510 input_defn->AddInputUse(v);
1511 }
1512
1513 // 2b. Handle LoadLocal/StoreLocal/MakeTemp/DropTemps/Constant specially.
1514 // Other definitions are just pushed to the environment directly.
1515 Definition* result = nullptr;
1516 switch (current->tag()) {
1517 case Instruction::kLoadLocal: {
1518 LoadLocalInstr* load = current->Cast<LoadLocalInstr>();
1519
1520 // The graph construction ensures we do not have an unused LoadLocal
1521 // computation.
1522 ASSERT(load->HasTemp());
1523 const intptr_t index = EnvIndex(&load->local());
1524 result = (*env)[index];
1525
1526 PhiInstr* phi = result->AsPhi();
1527 if ((phi != nullptr) && !phi->is_alive()) {
1528 phi->mark_alive();
1529 live_phis->Add(phi);
1530 }
1531
1532 if (FLAG_prune_dead_locals &&
1533 variable_liveness->IsLastLoad(block_entry, load)) {
1534 (*env)[index] = constant_dead();
1535 }
1536
1537 // Record captured parameters so that they can be skipped when
1538 // emitting sync code inside optimized try-blocks.
1539 if (load->local().is_captured_parameter()) {
1540 captured_parameters_->Add(index);
1541 }
1542
1543 if (phi != nullptr) {
1544 // Assign type to Phi if it doesn't have a type yet.
1545 // For a Phi to appear in the local variable it either was placed
1546 // there as incoming value by renaming or it was stored there by
1547 // StoreLocal which took this Phi from another local via LoadLocal,
1548 // to which this reasoning applies recursively.
1549 //
1550 // This means that we are guaranteed to process LoadLocal for a
1551 // matching variable first, unless there was an OSR with a non-empty
1552 // expression stack. In the latter case, Phi inserted by
1553 // FlowGraph::AddSyntheticPhis for expression temp will not have an
1554 // assigned type and may be accessed by StoreLocal and subsequent
1555 // LoadLocal.
1556 //
1557 if (!phi->HasType()) {
1558 // Check if phi corresponds to the same slot.
1559 auto* phis = phi->block()->phis();
1560 if ((index < phis->length()) && (*phis)[index] == phi) {
1561 phi->UpdateType(*load->local().inferred_type());
1562 } else {
1563 ASSERT(IsCompiledForOsr() && (phi->block()->stack_depth() > 0));
1564 }
1565 }
1566 }
1567 break;
1568 }
1569
1570 case Instruction::kStoreLocal: {
1571 StoreLocalInstr* store = current->Cast<StoreLocalInstr>();
1572 const intptr_t index = EnvIndex(&store->local());
1573 result = store->value()->definition();
1574
1575 if (!FLAG_prune_dead_locals ||
1576 variable_liveness->IsStoreAlive(block_entry, store)) {
1577 (*env)[index] = result;
1578 } else {
1579 (*env)[index] = constant_dead();
1580 }
1581 break;
1582 }
1583
1584 case Instruction::kDropTemps: {
1585 // Drop temps from the environment.
1586 DropTempsInstr* drop = current->Cast<DropTempsInstr>();
1587 for (intptr_t j = 0; j < drop->num_temps(); j++) {
1588 env->RemoveLast();
1589 }
1590 if (drop->value() != nullptr) {
1591 result = drop->value()->definition();
1592 }
1593 ASSERT((drop->value() != nullptr) || !drop->HasTemp());
1594 break;
1595 }
1596
1597 case Instruction::kConstant:
1598 case Instruction::kUnboxedConstant: {
1599 ConstantInstr* constant = current->Cast<ConstantInstr>();
1600 if (constant->HasTemp()) {
1601 result = GetConstant(constant->value(), constant->representation());
1602 }
1603 break;
1604 }
1605
1606 case Instruction::kMakeTemp: {
1607 // Simply push a #null value to the expression stack.
1608 result = constant_null_;
1609 break;
1610 }
1611
1612 case Instruction::kMoveArgument:
1613 UNREACHABLE();
1614 break;
1615
1616 case Instruction::kCheckStackOverflow:
1617 // Assert environment integrity at checkpoints.
1619 current->AsCheckStackOverflow()->stack_depth()) ==
1620 env->length());
1621 continue;
1622
1623 default:
1624 // Other definitions directly go into the environment.
1625 if (Definition* definition = current->AsDefinition()) {
1626 if (definition->HasTemp()) {
1627 // Assign fresh SSA temporary and update expression stack.
1628 AllocateSSAIndex(definition);
1629 env->Add(definition);
1630 }
1631 }
1632 continue;
1633 }
1634
1635 // Update expression stack and remove current instruction from the graph.
1636 Definition* definition = current->Cast<Definition>();
1637 if (definition->HasTemp()) {
1638 ASSERT(result != nullptr);
1639 env->Add(result);
1640 }
1641 it.RemoveCurrentFromGraph();
1642 }
1643
1644 // 3. Process dominated blocks.
1645 const bool set_stack = (block_entry == graph_entry()) && IsCompiledForOsr();
1646 for (intptr_t i = 0; i < block_entry->dominated_blocks().length(); ++i) {
1647 BlockEntryInstr* block = block_entry->dominated_blocks()[i];
1648 GrowableArray<Definition*> new_env(env->length());
1649 new_env.AddArray(*env);
1650 // During OSR, when traversing from the graph entry directly any block
1651 // (which may be a non-entry), we must adjust the environment to mimic
1652 // a non-empty incoming expression stack to ensure temporaries refer to
1653 // the right stack items.
1654 const intptr_t stack_depth = block->stack_depth();
1655 ASSERT(stack_depth >= 0);
1656 if (set_stack) {
1657 ASSERT(variable_count() == new_env.length());
1658 new_env.FillWith(constant_dead(), variable_count(), stack_depth);
1659 } else if (!block->last_instruction()->IsTailCall()) {
1660 // Assert environment integrity otherwise.
1661 ASSERT((variable_count() + stack_depth) == new_env.length());
1662 }
1663 RenameRecursive(block, &new_env, live_phis, variable_liveness,
1664 inlining_parameters);
1665 }
1666
1667 // 4. Process successor block. We have edge-split form, so that only blocks
1668 // with one successor can have a join block as successor.
1669 if ((block_entry->last_instruction()->SuccessorCount() == 1) &&
1670 block_entry->last_instruction()->SuccessorAt(0)->IsJoinEntry()) {
1671 JoinEntryInstr* successor =
1672 block_entry->last_instruction()->SuccessorAt(0)->AsJoinEntry();
1673 intptr_t pred_index = successor->IndexOfPredecessor(block_entry);
1674 ASSERT(pred_index >= 0);
1675 if (successor->phis() != nullptr) {
1676 for (intptr_t i = 0; i < successor->phis()->length(); ++i) {
1677 PhiInstr* phi = (*successor->phis())[i];
1678 if (phi != nullptr) {
1679 // Rename input operand.
1680 Definition* input = (*env)[i];
1681 ASSERT(input != nullptr);
1682 ASSERT(!input->IsMoveArgument());
1683 Value* use = new (zone()) Value(input);
1684 phi->SetInputAt(pred_index, use);
1685 }
1686 }
1687 }
1688 }
1689}
1690
1691#if defined(DEBUG)
1692void FlowGraph::ValidatePhis() {
1693 if (!FLAG_prune_dead_locals) {
1694 // We can only check if dead locals are pruned.
1695 return;
1696 }
1697
1698 for (intptr_t i = 0, n = preorder().length(); i < n; ++i) {
1699 BlockEntryInstr* block_entry = preorder()[i];
1700 Instruction* last_instruction = block_entry->last_instruction();
1701
1702 if ((last_instruction->SuccessorCount() == 1) &&
1703 last_instruction->SuccessorAt(0)->IsJoinEntry()) {
1704 JoinEntryInstr* successor =
1705 last_instruction->SuccessorAt(0)->AsJoinEntry();
1706 if (successor->phis() != nullptr) {
1707 for (intptr_t j = 0; j < successor->phis()->length(); ++j) {
1708 PhiInstr* phi = (*successor->phis())[j];
1709 if (phi == nullptr && !IsImmortalVariable(j)) {
1710 // We have no phi node for the this variable.
1711 // Double check we do not have a different value in our env.
1712 // If we do, we would have needed a phi-node in the successor.
1713 ASSERT(last_instruction->env() != nullptr);
1714 Definition* current_definition =
1715 last_instruction->env()->ValueAt(j)->definition();
1716 ASSERT(successor->env() != nullptr);
1717 Definition* successor_definition =
1718 successor->env()->ValueAt(j)->definition();
1719 if (!current_definition->IsConstant() &&
1720 !successor_definition->IsConstant()) {
1721 ASSERT(current_definition == successor_definition);
1722 }
1723 }
1724 }
1725 }
1726 }
1727 }
1728}
1729#endif // defined(DEBUG)
1730
1731void FlowGraph::RemoveDeadPhis(GrowableArray<PhiInstr*>* live_phis) {
1732 // Augment live_phis with those that have implicit real used at
1733 // potentially throwing instructions if there is a try-catch in this graph.
1734 if (!graph_entry()->catch_entries().is_empty()) {
1735 for (BlockIterator it(postorder_iterator()); !it.Done(); it.Advance()) {
1736 JoinEntryInstr* join = it.Current()->AsJoinEntry();
1737 if (join == nullptr) continue;
1738 for (PhiIterator phi_it(join); !phi_it.Done(); phi_it.Advance()) {
1739 PhiInstr* phi = phi_it.Current();
1740 if (phi == nullptr || phi->is_alive() ||
1741 (phi->input_use_list() != nullptr) ||
1742 (phi->env_use_list() == nullptr)) {
1743 continue;
1744 }
1745 for (Value::Iterator it(phi->env_use_list()); !it.Done();
1746 it.Advance()) {
1747 Value* use = it.Current();
1748 if (use->instruction()->MayThrow() &&
1749 use->instruction()->GetBlock()->InsideTryBlock()) {
1750 live_phis->Add(phi);
1751 phi->mark_alive();
1752 break;
1753 }
1754 }
1755 }
1756 }
1757 }
1758
1759 while (!live_phis->is_empty()) {
1760 PhiInstr* phi = live_phis->RemoveLast();
1761 for (intptr_t i = 0; i < phi->InputCount(); i++) {
1762 Value* val = phi->InputAt(i);
1763 PhiInstr* used_phi = val->definition()->AsPhi();
1764 if ((used_phi != nullptr) && !used_phi->is_alive()) {
1765 used_phi->mark_alive();
1766 live_phis->Add(used_phi);
1767 }
1768 }
1769 }
1770
1771 for (BlockIterator it(postorder_iterator()); !it.Done(); it.Advance()) {
1772 JoinEntryInstr* join = it.Current()->AsJoinEntry();
1773 if (join != nullptr) join->RemoveDeadPhis(constant_dead());
1774 }
1775}
1776
1778 Definition* original,
1779 CompileType compile_type) {
1780 RedefinitionInstr* first = prev->next()->AsRedefinition();
1781 if (first != nullptr && (first->constrained_type() != nullptr)) {
1782 if ((first->value()->definition() == original) &&
1783 first->constrained_type()->IsEqualTo(&compile_type)) {
1784 // Already redefined. Do nothing.
1785 return nullptr;
1786 }
1787 }
1788 RedefinitionInstr* redef = new RedefinitionInstr(new Value(original));
1789
1790 // Don't set the constrained type when the type is None(), which denotes an
1791 // unreachable value (e.g. using value null after some form of null check).
1792 if (!compile_type.IsNone()) {
1793 redef->set_constrained_type(new CompileType(compile_type));
1794 }
1795
1796 InsertAfter(prev, redef, nullptr, FlowGraph::kValue);
1797 RenameDominatedUses(original, redef, redef);
1798
1799 if (redef->input_use_list() == nullptr) {
1800 // There are no dominated uses, so the newly added Redefinition is useless.
1801 // Remove Redefinition to avoid interfering with
1802 // BranchSimplifier::Simplify which needs empty blocks.
1803 redef->RemoveFromGraph();
1804 return nullptr;
1805 }
1806
1807 return redef;
1808}
1809
1810void FlowGraph::RemoveRedefinitions(bool keep_checks) {
1811 // Remove redefinition and check instructions that were inserted
1812 // to make a control dependence explicit with a data dependence,
1813 // for example, to inhibit hoisting.
1814 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
1815 block_it.Advance()) {
1817 for (ForwardInstructionIterator instr_it(block_it.Current());
1818 !instr_it.Done(); instr_it.Advance()) {
1819 Instruction* instruction = instr_it.Current();
1820 if (auto redef = instruction->AsRedefinition()) {
1821 redef->ReplaceUsesWith(redef->value()->definition());
1822 instr_it.RemoveCurrentFromGraph();
1823 } else if (keep_checks) {
1824 continue;
1825 } else if (auto def = instruction->AsDefinition()) {
1826 Value* value = def->RedefinedValue();
1827 if (value != nullptr) {
1828 def->ReplaceUsesWith(value->definition());
1829 def->ClearSSATempIndex();
1830 }
1831 }
1832 }
1833 }
1834}
1835
1836BitVector* FlowGraph::FindLoopBlocks(BlockEntryInstr* m,
1837 BlockEntryInstr* n) const {
1839 BitVector* loop_blocks = new (zone()) BitVector(zone(), preorder_.length());
1840
1841 loop_blocks->Add(n->preorder_number());
1842 if (n != m) {
1843 loop_blocks->Add(m->preorder_number());
1844 stack.Add(m);
1845 }
1846
1847 while (!stack.is_empty()) {
1848 BlockEntryInstr* p = stack.RemoveLast();
1849 for (intptr_t i = 0; i < p->PredecessorCount(); ++i) {
1850 BlockEntryInstr* q = p->PredecessorAt(i);
1851 if (!loop_blocks->Contains(q->preorder_number())) {
1852 loop_blocks->Add(q->preorder_number());
1853 stack.Add(q);
1854 }
1855 }
1856 }
1857 return loop_blocks;
1858}
1859
1860LoopHierarchy* FlowGraph::ComputeLoops() const {
1861 // Iterate over all entry blocks in the flow graph to attach
1862 // loop information to each loop header.
1863 ZoneGrowableArray<BlockEntryInstr*>* loop_headers =
1864 new (zone()) ZoneGrowableArray<BlockEntryInstr*>();
1865 for (BlockIterator it = postorder_iterator(); !it.Done(); it.Advance()) {
1866 BlockEntryInstr* block = it.Current();
1867 // Reset loop information on every entry block (since this method
1868 // may recompute loop information on a modified flow graph).
1869 block->set_loop_info(nullptr);
1870 // Iterate over predecessors to find back edges.
1871 for (intptr_t i = 0; i < block->PredecessorCount(); ++i) {
1872 BlockEntryInstr* pred = block->PredecessorAt(i);
1873 if (block->Dominates(pred)) {
1874 // Identify the block as a loop header and add the blocks in the
1875 // loop to the loop information. Loops that share the same loop
1876 // header are treated as one loop by merging these blocks.
1877 BitVector* loop_blocks = FindLoopBlocks(pred, block);
1878 if (block->loop_info() == nullptr) {
1879 intptr_t id = loop_headers->length();
1880 block->set_loop_info(new (zone()) LoopInfo(id, block, loop_blocks));
1881 loop_headers->Add(block);
1882 } else {
1883 ASSERT(block->loop_info()->header() == block);
1884 block->loop_info()->AddBlocks(loop_blocks);
1885 }
1886 block->loop_info()->AddBackEdge(pred);
1887 }
1888 }
1889 }
1890
1891 // Build the loop hierarchy and link every entry block to
1892 // the closest enveloping loop in loop hierarchy.
1893 return new (zone()) LoopHierarchy(loop_headers, preorder_, should_print());
1894}
1895
1897 intptr_t size = 0;
1898 // Iterate each block, skipping the graph entry.
1899 for (intptr_t i = 1; i < preorder_.length(); ++i) {
1900 BlockEntryInstr* block = preorder_[i];
1901
1902 // Skip any blocks from the prologue to make them not count towards the
1903 // inlining instruction budget.
1904 const intptr_t block_id = block->block_id();
1905 if (prologue_info_.Contains(block_id)) {
1906 continue;
1907 }
1908
1909 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
1910 ++size;
1911 }
1912 }
1913 return size;
1914}
1915
1916void FlowGraph::ConvertUse(Value* use, Representation from_rep) {
1917 const Representation to_rep =
1919 if (from_rep == to_rep || to_rep == kNoRepresentation) {
1920 return;
1921 }
1922 InsertConversion(from_rep, to_rep, use, /*is_environment_use=*/false);
1923}
1924
1926 return (rep == kUnboxedInt32) || (rep == kUnboxedUint32) ||
1927 (rep == kUnboxedInt64);
1928}
1929
1930static bool ShouldInlineSimd() {
1932}
1933
1936}
1937
1938void FlowGraph::InsertConversion(Representation from,
1939 Representation to,
1940 Value* use,
1941 bool is_environment_use) {
1942 ASSERT(from != to);
1943 Instruction* insert_before;
1944 PhiInstr* phi = use->instruction()->AsPhi();
1945 if (phi != nullptr) {
1946 ASSERT(phi->is_alive());
1947 // For phis conversions have to be inserted in the predecessor.
1948 auto predecessor = phi->block()->PredecessorAt(use->use_index());
1949 insert_before = predecessor->last_instruction();
1950 ASSERT(insert_before->GetBlock() == predecessor);
1951 } else {
1952 insert_before = use->instruction();
1953 }
1954 const Instruction::SpeculativeMode speculative_mode =
1955 use->instruction()->SpeculativeModeOfInput(use->use_index());
1956 Instruction* deopt_target = nullptr;
1957 if (speculative_mode == Instruction::kGuardInputs || to == kUnboxedInt32) {
1958 deopt_target = insert_before;
1959 }
1960
1961 Definition* converted = nullptr;
1962 if (IsUnboxedInteger(from) && IsUnboxedInteger(to)) {
1963 const intptr_t deopt_id = (to == kUnboxedInt32) && (deopt_target != nullptr)
1964 ? deopt_target->DeoptimizationTarget()
1966 converted =
1967 new (Z) IntConverterInstr(from, to, use->CopyWithType(), deopt_id);
1968 } else if ((from == kUnboxedInt32) && (to == kUnboxedDouble)) {
1969 converted = new Int32ToDoubleInstr(use->CopyWithType());
1970 } else if ((from == kUnboxedInt64) && (to == kUnboxedDouble) &&
1972 const intptr_t deopt_id = (deopt_target != nullptr)
1973 ? deopt_target->DeoptimizationTarget()
1975 converted = new Int64ToDoubleInstr(use->CopyWithType(), deopt_id);
1976 } else if ((from == kTagged) && Boxing::Supports(to)) {
1977 const intptr_t deopt_id = (deopt_target != nullptr)
1978 ? deopt_target->DeoptimizationTarget()
1980 converted =
1981 UnboxInstr::Create(to, use->CopyWithType(), deopt_id, speculative_mode);
1982 } else if ((to == kTagged) && Boxing::Supports(from)) {
1983 converted = BoxInstr::Create(from, use->CopyWithType());
1984 } else if ((to == kPairOfTagged) && (from == kTagged)) {
1985 // Insert conversion to an unboxed record, which can be only used
1986 // in Return instruction.
1987 ASSERT(use->instruction()->IsDartReturn());
1988 Definition* x = new (Z)
1989 LoadFieldInstr(use->CopyWithType(),
1992 InstructionSource());
1993 InsertBefore(insert_before, x, nullptr, FlowGraph::kValue);
1994 Definition* y = new (Z)
1995 LoadFieldInstr(use->CopyWithType(),
1998 InstructionSource());
1999 InsertBefore(insert_before, y, nullptr, FlowGraph::kValue);
2000 converted = new (Z) MakePairInstr(new (Z) Value(x), new (Z) Value(y));
2001 } else if ((to == kTagged) && (from == kPairOfTagged)) {
2002 // Handled in FlowGraph::InsertRecordBoxing.
2003 UNREACHABLE();
2004 } else {
2005 // We have failed to find a suitable conversion instruction. If either
2006 // representations is not boxable, then fail immediately.
2007 if (!Boxing::Supports(from) || !Boxing::Supports(to)) {
2009 FATAL("Illegal conversion %s->%s for the use of %s at %s\n",
2012 use->definition()->ToCString(), use->instruction()->ToCString());
2013 } else {
2014 FATAL("Illegal conversion %s->%s for a use of v%" Pd "\n",
2017 use->definition()->ssa_temp_index());
2018 }
2019 }
2020 // Insert two "dummy" conversion instructions with the correct
2021 // "from" and "to" representation. The inserted instructions will
2022 // trigger a deoptimization if executed. See #12417 for a discussion.
2023 // If the use is not speculative, then this code should be unreachable.
2024 // Insert Stop for a graceful error and aid unreachable code elimination.
2025 if (speculative_mode == Instruction::kNotSpeculative) {
2026 StopInstr* stop = new (Z) StopInstr("Incompatible conversion.");
2027 InsertBefore(insert_before, stop, nullptr, FlowGraph::kEffect);
2028 }
2029 const intptr_t deopt_id = (deopt_target != nullptr)
2030 ? deopt_target->DeoptimizationTarget()
2032 Definition* boxed = BoxInstr::Create(from, use->CopyWithType());
2033 use->BindTo(boxed);
2034 InsertBefore(insert_before, boxed, nullptr, FlowGraph::kValue);
2035 converted = UnboxInstr::Create(to, new (Z) Value(boxed), deopt_id,
2036 speculative_mode);
2037 }
2038 ASSERT(converted != nullptr);
2039 InsertBefore(insert_before, converted,
2040 (deopt_target != nullptr) ? deopt_target->env() : nullptr,
2042 if (is_environment_use) {
2043 use->BindToEnvironment(converted);
2044 } else {
2045 use->BindTo(converted);
2046 }
2047}
2048
2050 if (def->env_use_list() != nullptr) return true;
2051 for (Value::Iterator it(def->input_use_list()); !it.Done(); it.Advance()) {
2052 Value* use = it.Current();
2054 kPairOfTagged) {
2055 return true;
2056 }
2057 }
2058 return false;
2059}
2060
2061void FlowGraph::InsertRecordBoxing(Definition* def) {
2062 // Insert conversion from unboxed record, which can be only returned
2063 // by a Dart call with a known interface/direct target.
2064 const Function* target = nullptr;
2065 if (auto* call = def->AsStaticCall()) {
2066 target = &(call->function());
2067 } else if (auto* call = def->AsInstanceCallBase()) {
2068 target = &(call->interface_target());
2069 } else if (auto* call = def->AsDispatchTableCall()) {
2070 target = &(call->interface_target());
2071 } else {
2072 UNREACHABLE();
2073 }
2074 ASSERT(target != nullptr && !target->IsNull());
2075
2076 kernel::UnboxingInfoMetadata* unboxing_metadata =
2078 ASSERT(unboxing_metadata != nullptr);
2079 const RecordShape shape = unboxing_metadata->return_info.record_shape;
2080 ASSERT(shape.num_fields() == 2);
2081
2082 auto* x = new (Z)
2083 ExtractNthOutputInstr(new (Z) Value(def), 0, kTagged, kDynamicCid);
2084 auto* y = new (Z)
2085 ExtractNthOutputInstr(new (Z) Value(def), 1, kTagged, kDynamicCid);
2086 auto* alloc = new (Z)
2087 AllocateSmallRecordInstr(InstructionSource(), shape, new (Z) Value(x),
2088 new (Z) Value(y), nullptr, def->deopt_id());
2089 def->ReplaceUsesWith(alloc);
2090 // Uses of 'def' in 'x' and 'y' should not be replaced as 'x' and 'y'
2091 // are not added to the flow graph yet.
2092 ASSERT(x->value()->definition() == def);
2093 ASSERT(y->value()->definition() == def);
2094 Instruction* insert_before = def->next();
2095 ASSERT(insert_before != nullptr);
2096 InsertBefore(insert_before, x, nullptr, FlowGraph::kValue);
2097 InsertBefore(insert_before, y, nullptr, FlowGraph::kValue);
2098 InsertBefore(insert_before, alloc, def->env(), FlowGraph::kValue);
2099}
2100
2101void FlowGraph::InsertConversionsFor(Definition* def) {
2102 const Representation from_rep = def->representation();
2103
2104 // Insert boxing of a record once after the definition (if needed)
2105 // in order to avoid multiple allocations.
2106 if (from_rep == kPairOfTagged) {
2107 if (NeedsRecordBoxing(def)) {
2108 InsertRecordBoxing(def);
2109 }
2110 return;
2111 }
2112
2113 for (Value::Iterator it(def->input_use_list()); !it.Done(); it.Advance()) {
2114 ConvertUse(it.Current(), from_rep);
2115 }
2116}
2117
2118namespace {
2119class PhiUnboxingHeuristic : public ValueObject {
2120 public:
2121 explicit PhiUnboxingHeuristic(FlowGraph* flow_graph)
2122 : worklist_(flow_graph, 10) {}
2123
2124 void Process(PhiInstr* phi) {
2125 auto new_representation = phi->representation();
2126 switch (phi->Type()->ToCid()) {
2127 case kDoubleCid:
2128 new_representation = DetermineIfAnyIncomingUnboxedFloats(phi)
2129 ? kUnboxedFloat
2130 : kUnboxedDouble;
2131#if defined(DEBUG)
2132 if (new_representation == kUnboxedFloat) {
2133 for (auto input : phi->inputs()) {
2134 ASSERT(input->representation() != kUnboxedDouble);
2135 }
2136 }
2137#endif
2138 break;
2139 case kFloat32x4Cid:
2140 if (ShouldInlineSimd()) {
2141 new_representation = kUnboxedFloat32x4;
2142 }
2143 break;
2144 case kInt32x4Cid:
2145 if (ShouldInlineSimd()) {
2146 new_representation = kUnboxedInt32x4;
2147 }
2148 break;
2149 case kFloat64x2Cid:
2150 if (ShouldInlineSimd()) {
2151 new_representation = kUnboxedFloat64x2;
2152 }
2153 break;
2154 }
2155
2156 if (new_representation == kTagged && phi->Type()->IsInt()) {
2157 // Check to see if all the (non-self) inputs are unboxed integers. If so,
2158 // mark the phi as an unboxed integer that can hold the possible values
2159 // that flow into the phi.
2160 for (auto input : phi->inputs()) {
2161 if (input == phi) continue;
2162
2163 if (!IsUnboxedInteger(input->representation())) {
2164 new_representation = kTagged; // Reset to a boxed phi.
2165 break;
2166 }
2167
2168 if (new_representation == kTagged) {
2169 new_representation = input->representation();
2170 } else if (new_representation != input->representation()) {
2171 // Don't allow mixing of untagged and unboxed values.
2172 ASSERT(IsUnboxedInteger(input->representation()));
2173 // It's unclear which representation to use yet, so use
2174 // kNoRepresentation as a "unknown but unboxed int" marker for now.
2175 new_representation = kNoRepresentation;
2176 }
2177 }
2178
2179 if (new_representation == kNoRepresentation) {
2180 // If all the inputs are unboxed integers but with different
2181 // representations, then pick a representation based on the range
2182 // of values that flow into the phi node.
2183 new_representation =
2185 ? kUnboxedInt32
2186 : kUnboxedInt64;
2187 }
2188
2189 // Decide if it is worth to unbox an boxed integer phi.
2190 if (new_representation == kTagged && !phi->Type()->can_be_sentinel()) {
2191#if defined(TARGET_ARCH_IS_64_BIT)
2192 // In AOT mode on 64-bit platforms always unbox integer typed phis
2193 // (similar to how we treat doubles and other boxed numeric types).
2194 // In JIT mode only unbox phis which are not fully known to be Smi.
2195 if (is_aot_ || phi->Type()->ToCid() != kSmiCid) {
2196 new_representation = kUnboxedInt64;
2197 }
2198#else
2199 // If we are on a 32-bit platform check if there are unboxed values
2200 // flowing into the phi and the phi value itself is flowing into an
2201 // unboxed operation prefer to keep it unboxed.
2202 // We use this heuristic instead of eagerly unboxing all the phis
2203 // because we are concerned about the code size and register pressure.
2204 const bool has_unboxed_incoming_value = HasUnboxedIncomingValue(phi);
2205 const bool flows_into_unboxed_use = FlowsIntoUnboxedUse(phi);
2206
2207 if (has_unboxed_incoming_value && flows_into_unboxed_use) {
2208 new_representation =
2210 ? kUnboxedInt32
2211 : kUnboxedInt64;
2212 }
2213#endif
2214 }
2215 }
2216
2217 // If any non-self input of the phi node is untagged, then the phi node
2218 // should only have untagged inputs and thus is marked as untagged.
2219 //
2220 // Note: we can't assert that all inputs are untagged at this point because
2221 // one of the inputs might be a different unvisited phi node. If this
2222 // assumption is broken, then fail later in the SelectRepresentations pass.
2223 for (auto input : phi->inputs()) {
2224 if (input != phi && input->representation() == kUntagged) {
2225 new_representation = kUntagged;
2226 break;
2227 }
2228 }
2229
2230 phi->set_representation(new_representation);
2231 }
2232
2233 private:
2234 // Returns [true] if there are UnboxedFloats representation flowing into
2235 // the |phi|.
2236 // This function looks through phis.
2237 bool DetermineIfAnyIncomingUnboxedFloats(PhiInstr* phi) {
2238 worklist_.Clear();
2239 worklist_.Add(phi);
2240 for (intptr_t i = 0; i < worklist_.definitions().length(); i++) {
2241 const auto defn = worklist_.definitions()[i];
2242 for (auto input : defn->inputs()) {
2243 if (input->representation() == kUnboxedFloat) {
2244 return true;
2245 }
2246 if (input->IsPhi()) {
2247 worklist_.Add(input);
2248 }
2249 }
2250 }
2251 return false;
2252 }
2253
2254 // Returns |true| iff there is an unboxed definition among all potential
2255 // definitions that can flow into the |phi|.
2256 // This function looks through phis.
2257 bool HasUnboxedIncomingValue(PhiInstr* phi) {
2258 worklist_.Clear();
2259 worklist_.Add(phi);
2260 for (intptr_t i = 0; i < worklist_.definitions().length(); i++) {
2261 const auto defn = worklist_.definitions()[i];
2262 for (auto input : defn->inputs()) {
2263 if (IsUnboxedInteger(input->representation()) || input->IsBox()) {
2264 return true;
2265 } else if (input->IsPhi()) {
2266 worklist_.Add(input);
2267 }
2268 }
2269 }
2270 return false;
2271 }
2272
2273 // Returns |true| iff |phi| potentially flows into an unboxed use.
2274 // This function looks through phis.
2275 bool FlowsIntoUnboxedUse(PhiInstr* phi) {
2276 worklist_.Clear();
2277 worklist_.Add(phi);
2278 for (intptr_t i = 0; i < worklist_.definitions().length(); i++) {
2279 const auto defn = worklist_.definitions()[i];
2280 for (auto use : defn->input_uses()) {
2281 if (IsUnboxedInteger(use->instruction()->RequiredInputRepresentation(
2282 use->use_index())) ||
2283 use->instruction()->IsUnbox()) {
2284 return true;
2285 } else if (auto phi_use = use->instruction()->AsPhi()) {
2286 worklist_.Add(phi_use);
2287 }
2288 }
2289 }
2290 return false;
2291 }
2292
2293 const bool is_aot_ = CompilerState::Current().is_aot();
2294 DefinitionWorklist worklist_;
2295};
2296} // namespace
2297
2299 // First we decide for each phi if it is beneficial to unbox it. If so, we
2300 // change it's `phi->representation()`
2301 PhiUnboxingHeuristic phi_unboxing_heuristic(this);
2302 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2303 block_it.Advance()) {
2304 JoinEntryInstr* join_entry = block_it.Current()->AsJoinEntry();
2305 if (join_entry != nullptr) {
2306 for (PhiIterator it(join_entry); !it.Done(); it.Advance()) {
2307 PhiInstr* phi = it.Current();
2308 phi_unboxing_heuristic.Process(phi);
2309 }
2310 }
2311 }
2312
2313 // Process all initial definitions and insert conversions when needed (depends
2314 // on phi unboxing decision above).
2315 for (intptr_t i = 0; i < graph_entry()->initial_definitions()->length();
2316 i++) {
2317 InsertConversionsFor((*graph_entry()->initial_definitions())[i]);
2318 }
2319 for (intptr_t i = 0; i < graph_entry()->SuccessorCount(); ++i) {
2320 auto successor = graph_entry()->SuccessorAt(i);
2321 if (auto entry = successor->AsBlockEntryWithInitialDefs()) {
2322 auto& initial_definitions = *entry->initial_definitions();
2323 for (intptr_t j = 0; j < initial_definitions.length(); j++) {
2324 InsertConversionsFor(initial_definitions[j]);
2325 }
2326 }
2327 }
2328
2329 // Process all normal definitions and insert conversions when needed (depends
2330 // on phi unboxing decision above).
2331 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2332 block_it.Advance()) {
2333 BlockEntryInstr* entry = block_it.Current();
2334 if (JoinEntryInstr* join_entry = entry->AsJoinEntry()) {
2335 for (PhiIterator it(join_entry); !it.Done(); it.Advance()) {
2336 PhiInstr* phi = it.Current();
2337 ASSERT(phi != nullptr);
2338 ASSERT(phi->is_alive());
2339 InsertConversionsFor(phi);
2340 }
2341 }
2342 for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) {
2343 Definition* def = it.Current()->AsDefinition();
2344 if (def != nullptr) {
2345 InsertConversionsFor(def);
2346 }
2347 }
2348 }
2349}
2350
2352 // After this pass we can no longer perform LICM and hoist instructions
2353 // that can deoptimize.
2354
2355 disallow_licm();
2356 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2357 block_it.Advance()) {
2358 BlockEntryInstr* block = block_it.Current();
2359 if (!block->IsCatchBlockEntry()) {
2360 block->RemoveEnvironment();
2361 }
2362 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
2363 Instruction* current = it.Current();
2364 // This check is inconsistent with the flow graph checker. The flow graph
2365 // checker does not allow for not having an env if the block is not
2366 // inside a try-catch.
2367 // See FlowGraphChecker::VisitInstruction.
2368 if (!current->ComputeCanDeoptimize() &&
2369 !current->ComputeCanDeoptimizeAfterCall() &&
2370 (!current->MayThrow() || !current->GetBlock()->InsideTryBlock())) {
2371 // Instructions that can throw need an environment for optimized
2372 // try-catch.
2373 // TODO(srdjan): --source-lines needs deopt environments to get at
2374 // the code for this instruction, however, leaving the environment
2375 // changes code.
2376 current->RemoveEnvironment();
2377 }
2378 }
2379 }
2380}
2381
2382void FlowGraph::ExtractUntaggedPayload(Instruction* instr,
2383 Value* array,
2384 const Slot& slot,
2385 InnerPointerAccess access) {
2386 auto* const untag_payload = new (Z)
2387 LoadFieldInstr(array->CopyWithType(Z), slot, access, instr->source());
2388 InsertBefore(instr, untag_payload, instr->env(), FlowGraph::kValue);
2389 array->BindTo(untag_payload);
2390 ASSERT_EQUAL(array->definition()->representation(), kUntagged);
2391}
2392
2394 Value* array,
2395 classid_t cid) {
2396 ASSERT(array->instruction() == instr);
2397 // Nothing to do if already untagged.
2398 if (array->definition()->representation() != kTagged) return false;
2399 // If we've determined at compile time that this is an object that has an
2400 // external payload, use the cid of the compile type instead.
2401 if (IsExternalPayloadClassId(array->Type()->ToCid())) {
2402 cid = array->Type()->ToCid();
2403 } else if (!IsExternalPayloadClassId(cid)) {
2404 // Can't extract the payload address if it points to GC-managed memory.
2405 return false;
2406 }
2407
2408 const Slot* slot = nullptr;
2409 if (cid == kPointerCid || IsExternalTypedDataClassId(cid)) {
2410 slot = &Slot::PointerBase_data();
2411 } else {
2412 UNREACHABLE();
2413 }
2414
2415 ExtractUntaggedPayload(instr, array, *slot,
2417 return true;
2418}
2419
2420void FlowGraph::ExtractNonInternalTypedDataPayload(Instruction* instr,
2421 Value* array,
2422 classid_t cid) {
2423 ASSERT(array->instruction() == instr);
2424 // Skip if the array payload has already been extracted.
2425 if (array->definition()->representation() == kUntagged) return;
2426 if (!IsTypedDataBaseClassId(cid)) return;
2427 auto const type_cid = array->Type()->ToCid();
2428 // For external PointerBase objects, the payload should have already been
2429 // extracted during canonicalization.
2431 // Extract payload for typed data view instructions even if array is
2432 // an internal typed data (could happen in the unreachable code),
2433 // as code generation handles direct accesses only for internal typed data.
2434 //
2435 // For internal typed data instructions (which are also used for
2436 // non-internal typed data arrays), don't extract payload if the array is
2437 // an internal typed data object.
2438 if (IsTypedDataViewClassId(cid) || !IsTypedDataClassId(type_cid)) {
2439 ExtractUntaggedPayload(instr, array, Slot::PointerBase_data(),
2441 }
2442}
2443
2445 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2446 block_it.Advance()) {
2447 BlockEntryInstr* block = block_it.Current();
2448 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
2449 Instruction* current = it.Current();
2450 if (auto* const load_indexed = current->AsLoadIndexed()) {
2451 ExtractNonInternalTypedDataPayload(load_indexed, load_indexed->array(),
2452 load_indexed->class_id());
2453 } else if (auto* const store_indexed = current->AsStoreIndexed()) {
2454 ExtractNonInternalTypedDataPayload(
2455 store_indexed, store_indexed->array(), store_indexed->class_id());
2456 } else if (auto* const memory_copy = current->AsMemoryCopy()) {
2457 ExtractNonInternalTypedDataPayload(memory_copy, memory_copy->src(),
2458 memory_copy->src_cid());
2459 ExtractNonInternalTypedDataPayload(memory_copy, memory_copy->dest(),
2460 memory_copy->dest_cid());
2461 }
2462 }
2463 }
2464}
2465
2467 bool changed = false;
2468
2469 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2470 block_it.Advance()) {
2471 BlockEntryInstr* const block = block_it.Current();
2472 if (auto join = block->AsJoinEntry()) {
2473 for (PhiIterator it(join); !it.Done(); it.Advance()) {
2474 PhiInstr* current = it.Current();
2475 if (current->HasUnmatchedInputRepresentations() &&
2477 // Can't canonicalize this instruction until all conversions for its
2478 // speculative inputs are inserted.
2479 continue;
2480 }
2481
2482 Definition* replacement = current->Canonicalize(this);
2483 ASSERT(replacement != nullptr);
2485 !replacement->HasUnmatchedInputRepresentations());
2486 if (replacement != current) {
2487 current->ReplaceUsesWith(replacement);
2488 it.RemoveCurrentFromGraph();
2489 changed = true;
2490 }
2491 }
2492 }
2493 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
2494 Instruction* current = it.Current();
2495 if (current->HasUnmatchedInputRepresentations() &&
2497 // Can't canonicalize this instruction until all conversions for its
2498 // speculative inputs are inserted.
2499 continue;
2500 }
2501
2502 Instruction* replacement = current->Canonicalize(this);
2503
2504 if (replacement != current) {
2505 // For non-definitions Canonicalize should return either nullptr or
2506 // this.
2507 if (replacement != nullptr) {
2508 ASSERT(current->IsDefinition());
2511 if ((replacement->representation() != current->representation()) &&
2512 current->AsDefinition()->HasUses()) {
2513 // Can't canonicalize this instruction as unmatched
2514 // representations are not allowed at this point, but
2515 // replacement has a different representation.
2516 continue;
2517 }
2518 }
2519 }
2520 ReplaceCurrentInstruction(&it, current, replacement);
2521 changed = true;
2522 }
2523 }
2524 }
2525 return changed;
2526}
2527
2529 Zone* zone = Thread::Current()->zone();
2530
2531 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2532 block_it.Advance()) {
2533 ForwardInstructionIterator it(block_it.Current());
2534 for (; !it.Done(); it.Advance()) {
2535 Instruction* instr = it.Current();
2536 if (instr->IsInstanceCall()) {
2537 InstanceCallInstr* call = instr->AsInstanceCall();
2538 if (!call->HasICData()) {
2539 const Array& arguments_descriptor =
2540 Array::Handle(zone, call->GetArgumentsDescriptor());
2541 const ICData& ic_data = ICData::ZoneHandle(
2542 zone,
2543 ICData::New(function, call->function_name(), arguments_descriptor,
2544 call->deopt_id(), call->checked_argument_count(),
2545 ICData::kInstance));
2546 call->set_ic_data(&ic_data);
2547 }
2548 } else if (instr->IsStaticCall()) {
2549 StaticCallInstr* call = instr->AsStaticCall();
2550 if (!call->HasICData()) {
2551 const Array& arguments_descriptor =
2552 Array::Handle(zone, call->GetArgumentsDescriptor());
2553 const Function& target = call->function();
2554 int num_args_checked =
2556 const ICData& ic_data = ICData::ZoneHandle(
2558 function, target, arguments_descriptor,
2559 call->deopt_id(), num_args_checked, ICData::kStatic));
2560 call->set_ic_data(&ic_data);
2561 }
2562 }
2563 }
2564 }
2565}
2566
2567// Optimize (a << b) & c pattern: if c is a positive Smi or zero, then the
2568// shift can be a truncating Smi shift-left and result is always Smi.
2569// Merging occurs only per basic-block.
2571 if (!FLAG_truncating_left_shift) return;
2574 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2575 block_it.Advance()) {
2576 // Merging only per basic-block.
2577 div_mod_merge.Clear();
2578 sin_cos_merge.Clear();
2579 ForwardInstructionIterator it(block_it.Current());
2580 for (; !it.Done(); it.Advance()) {
2581 if (it.Current()->IsBinarySmiOp()) {
2582 BinarySmiOpInstr* binop = it.Current()->AsBinarySmiOp();
2583 if (binop->op_kind() == Token::kBIT_AND) {
2584 OptimizeLeftShiftBitAndSmiOp(&it, binop, binop->left()->definition(),
2585 binop->right()->definition());
2586 } else if ((binop->op_kind() == Token::kTRUNCDIV) ||
2587 (binop->op_kind() == Token::kMOD)) {
2588 if (binop->HasUses()) {
2589 div_mod_merge.Add(binop);
2590 }
2591 }
2592 } else if (it.Current()->IsBinaryInt64Op()) {
2593 BinaryInt64OpInstr* mintop = it.Current()->AsBinaryInt64Op();
2594 if (mintop->op_kind() == Token::kBIT_AND) {
2595 OptimizeLeftShiftBitAndSmiOp(&it, mintop,
2596 mintop->left()->definition(),
2597 mintop->right()->definition());
2598 }
2599 } else if (it.Current()->IsInvokeMathCFunction()) {
2600 InvokeMathCFunctionInstr* math_unary =
2601 it.Current()->AsInvokeMathCFunction();
2602 if ((math_unary->recognized_kind() == MethodRecognizer::kMathSin) ||
2603 (math_unary->recognized_kind() == MethodRecognizer::kMathCos)) {
2604 if (math_unary->HasUses()) {
2605 sin_cos_merge.Add(math_unary);
2606 }
2607 }
2608 }
2609 }
2610 TryMergeTruncDivMod(&div_mod_merge);
2611 }
2612}
2613
2614// Returns true if use is dominated by the given instruction.
2615// Note: uses that occur at instruction itself are not dominated by it.
2617 BlockEntryInstr* dom_block = dom->GetBlock();
2618
2619 Instruction* instr = use->instruction();
2620
2621 PhiInstr* phi = instr->AsPhi();
2622 if (phi != nullptr) {
2623 return dom_block->Dominates(phi->block()->PredecessorAt(use->use_index()));
2624 }
2625
2626 BlockEntryInstr* use_block = instr->GetBlock();
2627 if (use_block == dom_block) {
2628 // Fast path for the case of block entry.
2629 if (dom_block == dom) return true;
2630
2631 for (Instruction* curr = dom->next(); curr != nullptr;
2632 curr = curr->next()) {
2633 if (curr == instr) return true;
2634 }
2635
2636 return false;
2637 }
2638
2639 return dom_block->Dominates(use_block);
2640}
2641
2644 Definition* other) {
2645 for (Value::Iterator it(def->input_use_list()); !it.Done(); it.Advance()) {
2646 Value* use = it.Current();
2647 if (IsDominatedUse(dom, use)) {
2648 use->BindTo(other);
2649 }
2650 }
2651}
2652
2654 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2655 block_it.Advance()) {
2656 for (ForwardInstructionIterator instr_it(block_it.Current());
2657 !instr_it.Done(); instr_it.Advance()) {
2658 Definition* definition = instr_it.Current()->AsDefinition();
2659 // CheckArrayBound instructions have their own mechanism for ensuring
2660 // proper dependencies, so we don't rewrite those here.
2661 if (definition != nullptr && !definition->IsCheckArrayBound()) {
2662 Value* redefined = definition->RedefinedValue();
2663 if (redefined != nullptr) {
2664 if (!definition->HasSSATemp()) {
2665 AllocateSSAIndex(definition);
2666 }
2667 Definition* original = redefined->definition();
2668 RenameDominatedUses(original, definition, definition);
2669 }
2670 }
2671 }
2672 }
2673}
2674
2676 ConstantInstr* const_instr = d->AsConstant();
2677 if ((const_instr != nullptr) && (const_instr->value().IsSmi())) {
2678 return Smi::Cast(const_instr->value()).Value() >= 0;
2679 }
2680 return false;
2681}
2682
2684 BinarySmiOpInstr* instr = d->AsBinarySmiOp();
2685 if ((instr != nullptr) && (instr->op_kind() == Token::kSHL)) {
2686 return instr;
2687 }
2688 return nullptr;
2689}
2690
2691void FlowGraph::OptimizeLeftShiftBitAndSmiOp(
2692 ForwardInstructionIterator* current_iterator,
2693 Definition* bit_and_instr,
2694 Definition* left_instr,
2695 Definition* right_instr) {
2696 ASSERT(bit_and_instr != nullptr);
2697 ASSERT((left_instr != nullptr) && (right_instr != nullptr));
2698
2699 // Check for pattern, smi_shift_left must be single-use.
2700 bool is_positive_or_zero = IsPositiveOrZeroSmiConst(left_instr);
2701 if (!is_positive_or_zero) {
2702 is_positive_or_zero = IsPositiveOrZeroSmiConst(right_instr);
2703 }
2704 if (!is_positive_or_zero) return;
2705
2706 BinarySmiOpInstr* smi_shift_left = nullptr;
2707 if (bit_and_instr->InputAt(0)->IsSingleUse()) {
2708 smi_shift_left = AsSmiShiftLeftInstruction(left_instr);
2709 }
2710 if ((smi_shift_left == nullptr) &&
2711 (bit_and_instr->InputAt(1)->IsSingleUse())) {
2712 smi_shift_left = AsSmiShiftLeftInstruction(right_instr);
2713 }
2714 if (smi_shift_left == nullptr) return;
2715
2716 // Pattern recognized.
2717 smi_shift_left->mark_truncating();
2718 ASSERT(bit_and_instr->IsBinarySmiOp() || bit_and_instr->IsBinaryInt64Op());
2719 if (bit_and_instr->IsBinaryInt64Op()) {
2720 // Replace Mint op with Smi op.
2721 BinarySmiOpInstr* smi_op = new (Z) BinarySmiOpInstr(
2722 Token::kBIT_AND, new (Z) Value(left_instr), new (Z) Value(right_instr),
2723 DeoptId::kNone); // BIT_AND cannot deoptimize.
2724 bit_and_instr->ReplaceWith(smi_op, current_iterator);
2725 }
2726}
2727
2728// Dart:
2729// var x = d % 10;
2730// var y = d ~/ 10;
2731// var z = x + y;
2732//
2733// IL:
2734// v4 <- %(v2, v3)
2735// v5 <- ~/(v2, v3)
2736// v6 <- +(v4, v5)
2737//
2738// IL optimized:
2739// v4 <- DIVMOD(v2, v3);
2740// v5 <- LoadIndexed(v4, 0); // ~/ result
2741// v6 <- LoadIndexed(v4, 1); // % result
2742// v7 <- +(v5, v6)
2743// Because of the environment it is important that merged instruction replaces
2744// first original instruction encountered.
2745void FlowGraph::TryMergeTruncDivMod(
2746 GrowableArray<BinarySmiOpInstr*>* merge_candidates) {
2747 if (merge_candidates->length() < 2) {
2748 // Need at least a TRUNCDIV and a MOD.
2749 return;
2750 }
2751 for (intptr_t i = 0; i < merge_candidates->length(); i++) {
2752 BinarySmiOpInstr* curr_instr = (*merge_candidates)[i];
2753 if (curr_instr == nullptr) {
2754 // Instruction was merged already.
2755 continue;
2756 }
2757 ASSERT((curr_instr->op_kind() == Token::kTRUNCDIV) ||
2758 (curr_instr->op_kind() == Token::kMOD));
2759 // Check if there is kMOD/kTRUNDIV binop with same inputs.
2760 const Token::Kind other_kind = (curr_instr->op_kind() == Token::kTRUNCDIV)
2761 ? Token::kMOD
2762 : Token::kTRUNCDIV;
2763 Definition* left_def = curr_instr->left()->definition();
2764 Definition* right_def = curr_instr->right()->definition();
2765 for (intptr_t k = i + 1; k < merge_candidates->length(); k++) {
2766 BinarySmiOpInstr* other_binop = (*merge_candidates)[k];
2767 // 'other_binop' can be nullptr if it was already merged.
2768 if ((other_binop != nullptr) && (other_binop->op_kind() == other_kind) &&
2769 (other_binop->left()->definition() == left_def) &&
2770 (other_binop->right()->definition() == right_def)) {
2771 (*merge_candidates)[k] = nullptr; // Clear it.
2772 ASSERT(curr_instr->HasUses());
2773 AppendExtractNthOutputForMerged(
2774 curr_instr, TruncDivModInstr::OutputIndexOf(curr_instr->op_kind()),
2775 kTagged, kSmiCid);
2776 ASSERT(other_binop->HasUses());
2777 AppendExtractNthOutputForMerged(
2778 other_binop,
2779 TruncDivModInstr::OutputIndexOf(other_binop->op_kind()), kTagged,
2780 kSmiCid);
2781
2782 // Replace with TruncDivMod.
2783 TruncDivModInstr* div_mod = new (Z) TruncDivModInstr(
2784 curr_instr->left()->CopyWithType(),
2785 curr_instr->right()->CopyWithType(), curr_instr->deopt_id());
2786 curr_instr->ReplaceWith(div_mod, nullptr);
2787 other_binop->ReplaceUsesWith(div_mod);
2788 other_binop->RemoveFromGraph();
2789 // Only one merge possible. Because canonicalization happens later,
2790 // more candidates are possible.
2791 // TODO(srdjan): Allow merging of trunc-div/mod into truncDivMod.
2792 break;
2793 }
2794 }
2795 }
2796}
2797
2798void FlowGraph::AppendExtractNthOutputForMerged(Definition* instr,
2799 intptr_t index,
2800 Representation rep,
2801 intptr_t cid) {
2802 ExtractNthOutputInstr* extract =
2803 new (Z) ExtractNthOutputInstr(new (Z) Value(instr), index, rep, cid);
2804 instr->ReplaceUsesWith(extract);
2805 InsertAfter(instr, extract, nullptr, FlowGraph::kValue);
2806}
2807
2808//
2809// Static helpers for the flow graph utilities.
2810//
2811
2813 TargetEntryInstr* target = new (graph->zone())
2815 inherit->GetBlock()->try_index(), DeoptId::kNone);
2816 target->InheritDeoptTarget(graph->zone(), inherit);
2817 return target;
2818}
2819
2820static JoinEntryInstr* NewJoin(FlowGraph* graph, Instruction* inherit) {
2821 JoinEntryInstr* join = new (graph->zone())
2823 inherit->GetBlock()->try_index(), DeoptId::kNone);
2824 join->InheritDeoptTarget(graph->zone(), inherit);
2825 return join;
2826}
2827
2830 Instruction* inherit) {
2831 GotoInstr* got = new (graph->zone()) GotoInstr(target, DeoptId::kNone);
2832 got->InheritDeoptTarget(graph->zone(), inherit);
2833 return got;
2834}
2835
2837 ComparisonInstr* cmp,
2838 Instruction* inherit) {
2839 BranchInstr* bra = new (graph->zone()) BranchInstr(cmp, DeoptId::kNone);
2840 bra->InheritDeoptTarget(graph->zone(), inherit);
2841 return bra;
2842}
2843
2844//
2845// Flow graph utilities.
2846//
2847
2848// Constructs new diamond decision at the given instruction.
2849//
2850// ENTRY
2851// instruction
2852// if (compare)
2853// / \
2854// B_TRUE B_FALSE
2855// \ /
2856// JOIN
2857//
2859 Instruction* inherit,
2861 TargetEntryInstr** b_true,
2862 TargetEntryInstr** b_false) {
2863 BlockEntryInstr* entry = instruction->GetBlock();
2864
2865 TargetEntryInstr* bt = NewTarget(this, inherit);
2866 TargetEntryInstr* bf = NewTarget(this, inherit);
2867 JoinEntryInstr* join = NewJoin(this, inherit);
2868 GotoInstr* gotot = NewGoto(this, join, inherit);
2869 GotoInstr* gotof = NewGoto(this, join, inherit);
2870 BranchInstr* bra = NewBranch(this, compare, inherit);
2871
2872 instruction->AppendInstruction(bra);
2873 entry->set_last_instruction(bra);
2874
2875 *bra->true_successor_address() = bt;
2876 *bra->false_successor_address() = bf;
2877
2878 bt->AppendInstruction(gotot);
2879 bt->set_last_instruction(gotot);
2880
2881 bf->AppendInstruction(gotof);
2882 bf->set_last_instruction(gotof);
2883
2884 // Update dominance relation incrementally.
2885 for (intptr_t i = 0, n = entry->dominated_blocks().length(); i < n; ++i) {
2886 join->AddDominatedBlock(entry->dominated_blocks()[i]);
2887 }
2888 entry->ClearDominatedBlocks();
2889 entry->AddDominatedBlock(bt);
2890 entry->AddDominatedBlock(bf);
2891 entry->AddDominatedBlock(join);
2892
2893 // TODO(ajcbik): update pred/succ/ordering incrementally too.
2894
2895 // Return new blocks.
2896 *b_true = bt;
2897 *b_false = bf;
2898 return join;
2899}
2900
2902 Instruction* inherit,
2903 const LogicalAnd& condition,
2904 TargetEntryInstr** b_true,
2905 TargetEntryInstr** b_false) {
2906 // First diamond for first comparison.
2907 TargetEntryInstr* bt = nullptr;
2908 TargetEntryInstr* bf = nullptr;
2909 JoinEntryInstr* mid_point =
2910 NewDiamond(instruction, inherit, condition.oper1, &bt, &bf);
2911
2912 // Short-circuit second comparison and connect through phi.
2913 condition.oper2->InsertAfter(bt);
2914 AllocateSSAIndex(condition.oper2);
2915 condition.oper2->InheritDeoptTarget(zone(), inherit); // must inherit
2916 PhiInstr* phi =
2917 AddPhi(mid_point, condition.oper2, GetConstant(Bool::False()));
2918 StrictCompareInstr* circuit = new (zone()) StrictCompareInstr(
2919 inherit->source(), Token::kEQ_STRICT, new (zone()) Value(phi),
2920 new (zone()) Value(GetConstant(Bool::True())), false,
2921 DeoptId::kNone); // don't inherit
2922
2923 // Return new blocks through the second diamond.
2924 return NewDiamond(mid_point, inherit, circuit, b_true, b_false);
2925}
2926
2928 Definition* d1,
2929 Definition* d2) {
2930 PhiInstr* phi = new (zone()) PhiInstr(join, 2);
2931 Value* v1 = new (zone()) Value(d1);
2932 Value* v2 = new (zone()) Value(d2);
2933
2934 AllocateSSAIndex(phi);
2935
2936 phi->mark_alive();
2937 phi->SetInputAt(0, v1);
2938 phi->SetInputAt(1, v2);
2939 d1->AddInputUse(v1);
2940 d2->AddInputUse(v2);
2941 join->InsertPhi(phi);
2942
2943 return phi;
2944}
2945
2947 compiler::ParameterInfoArray argument_locations;
2948
2949 intptr_t max_argument_slot_count = 0;
2950 auto& target = Function::Handle();
2951
2952 for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
2953 block_it.Advance()) {
2955 for (ForwardInstructionIterator instr_it(block_it.Current());
2956 !instr_it.Done(); instr_it.Advance()) {
2957 Instruction* instruction = instr_it.Current();
2958 const intptr_t arg_count = instruction->ArgumentCount();
2959 if (arg_count == 0) {
2960 continue;
2961 }
2962
2964 if (auto static_call = instruction->AsStaticCall()) {
2965 target = static_call->function().ptr();
2966 } else if (auto instance_call = instruction->AsInstanceCallBase()) {
2967 target = instance_call->interface_target().ptr();
2968 } else if (auto dispatch_call = instruction->AsDispatchTableCall()) {
2969 target = dispatch_call->interface_target().ptr();
2970 } else if (auto cachable_call = instruction->AsCachableIdempotentCall()) {
2971 target = cachable_call->function().ptr();
2972 }
2973
2974 MoveArgumentsArray* arguments =
2975 new (Z) MoveArgumentsArray(zone(), arg_count);
2976 arguments->EnsureLength(arg_count, nullptr);
2977
2978 const intptr_t stack_arguments_size_in_words =
2980 zone(), target, arg_count,
2981 [&](intptr_t i) {
2982 return instruction->RequiredInputRepresentation(i);
2983 },
2984 /*should_assign_stack_locations=*/true, &argument_locations);
2985
2986 for (intptr_t i = 0; i < arg_count; ++i) {
2987 const auto& [location, rep] = argument_locations[i];
2988 Value* arg = instruction->ArgumentValueAt(i);
2989 (*arguments)[i] = new (Z) MoveArgumentInstr(
2990 arg->CopyWithType(Z), rep, location.ToCallerSpRelative());
2991 }
2993 stack_arguments_size_in_words);
2994
2995 for (auto move_arg : *arguments) {
2996 // Insert all MoveArgument instructions immediately before call.
2997 // MoveArgumentInstr::EmitNativeCode may generate more efficient
2998 // code for subsequent MoveArgument instructions (ARM, ARM64).
2999 if (!move_arg->is_register_move()) {
3000 InsertBefore(instruction, move_arg, /*env=*/nullptr, kEffect);
3001 }
3002 }
3003 instruction->ReplaceInputsWithMoveArguments(arguments);
3004 if (instruction->env() != nullptr) {
3005 instruction->RepairArgumentUsesInEnvironment();
3006 }
3007 }
3008 }
3010}
3011
3012void FlowGraph::Print(const char* phase) {
3013 FlowGraphPrinter::PrintGraph(phase, this);
3014}
3015
3017 public:
3018 SSACompactor(intptr_t num_blocks,
3019 intptr_t num_ssa_vars,
3020 ZoneGrowableArray<Definition*>* detached_defs)
3021 : block_num_(num_blocks),
3022 ssa_num_(num_ssa_vars),
3023 detached_defs_(detached_defs) {
3024 block_num_.EnsureLength(num_blocks, -1);
3025 ssa_num_.EnsureLength(num_ssa_vars, -1);
3026 }
3027
3029 for (auto block : graph->reverse_postorder()) {
3030 block_num_[block->block_id()] = 1;
3031 CollectDetachedMaterializations(block->env());
3032
3033 if (auto* block_with_idefs = block->AsBlockEntryWithInitialDefs()) {
3034 for (Definition* def : *block_with_idefs->initial_definitions()) {
3035 RenumberDefinition(def);
3036 CollectDetachedMaterializations(def->env());
3037 }
3038 }
3039 if (auto* join = block->AsJoinEntry()) {
3040 for (PhiIterator it(join); !it.Done(); it.Advance()) {
3041 RenumberDefinition(it.Current());
3042 }
3043 }
3044 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
3045 Instruction* instr = it.Current();
3046 if (Definition* def = instr->AsDefinition()) {
3047 RenumberDefinition(def);
3048 }
3049 CollectDetachedMaterializations(instr->env());
3050 }
3051 }
3052 for (auto* def : (*detached_defs_)) {
3053 RenumberDefinition(def);
3054 }
3055 graph->set_current_ssa_temp_index(current_ssa_index_);
3056
3057 // Preserve order between block ids to as predecessors are sorted
3058 // by block ids.
3059 intptr_t current_block_index = 0;
3060 for (intptr_t i = 0, n = block_num_.length(); i < n; ++i) {
3061 if (block_num_[i] >= 0) {
3062 block_num_[i] = current_block_index++;
3063 }
3064 }
3065 for (auto block : graph->reverse_postorder()) {
3066 block->set_block_id(block_num_[block->block_id()]);
3067 }
3068 graph->set_max_block_id(current_block_index - 1);
3069 }
3070
3071 private:
3072 void RenumberDefinition(Definition* def) {
3073 if (def->HasSSATemp()) {
3074 const intptr_t old_index = def->ssa_temp_index();
3075 intptr_t new_index = ssa_num_[old_index];
3076 if (new_index < 0) {
3077 ssa_num_[old_index] = new_index = current_ssa_index_++;
3078 }
3079 def->set_ssa_temp_index(new_index);
3080 }
3081 }
3082
3083 bool IsDetachedDefinition(Definition* def) {
3084 return def->IsMaterializeObject() && (def->next() == nullptr);
3085 }
3086
3087 void AddDetachedDefinition(Definition* def) {
3088 for (intptr_t i = 0, n = detached_defs_->length(); i < n; ++i) {
3089 if ((*detached_defs_)[i] == def) {
3090 return;
3091 }
3092 }
3093 detached_defs_->Add(def);
3094 // Follow inputs as detached definitions can reference other
3095 // detached definitions.
3096 for (intptr_t i = 0, n = def->InputCount(); i < n; ++i) {
3097 Definition* input = def->InputAt(i)->definition();
3098 if (IsDetachedDefinition(input)) {
3099 AddDetachedDefinition(input);
3100 }
3101 }
3102 ASSERT(def->env() == nullptr);
3103 }
3104
3105 void CollectDetachedMaterializations(Environment* env) {
3106 if (env == nullptr) {
3107 return;
3108 }
3109 for (Environment::DeepIterator it(env); !it.Done(); it.Advance()) {
3110 Definition* def = it.CurrentValue()->definition();
3111 if (IsDetachedDefinition(def)) {
3112 AddDetachedDefinition(def);
3113 }
3114 }
3115 }
3116
3117 GrowableArray<intptr_t> block_num_;
3118 GrowableArray<intptr_t> ssa_num_;
3119 intptr_t current_ssa_index_ = 0;
3120 ZoneGrowableArray<Definition*>* detached_defs_;
3121};
3122
3124 if (detached_defs == nullptr) {
3125 detached_defs = new (Z) ZoneGrowableArray<Definition*>(Z, 0);
3126 }
3128 detached_defs);
3129 compactor.RenumberGraph(this);
3130}
3131
3132} // namespace dart
const char * options
int count
Definition: FontMgrTest.cpp:50
T extract(SkSpan< const uint8_t > &data)
static void is_empty(skiatest::Reporter *reporter, const SkPath &p)
static float prev(float f)
static int block_count(const SkSBlockAllocator< N > &pool)
SI void store(P *ptr, const T &val)
SI T load(const P *ptr)
Definition: Transform_inl.h:98
Vec2Value v2
#define UNREACHABLE()
Definition: assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
GLenum type
virtual bool HasTypeClass() const
Definition: object.h:9083
virtual bool IsInstantiated(Genericity genericity=kAny, intptr_t num_free_fun_type_params=kAllFree) const
Definition: object.cc:21151
virtual ClassPtr type_class() const
Definition: object.cc:21042
bool IsDynamicType() const
Definition: object.h:9186
void Add(const T &value)
void EnsureLength(intptr_t new_length, const T &default_value)
intptr_t length() const
Value * right() const
Definition: il.h:9398
Token::Kind op_kind() const
Definition: il.h:9396
Value * left() const
Definition: il.h:9397
void Add(intptr_t i)
Definition: bit_vector.h:63
bool KillAndAdd(BitVector *kill, BitVector *gen)
Definition: bit_vector.cc:80
intptr_t length() const
Definition: bit_vector.h:117
bool Contains(intptr_t i) const
Definition: bit_vector.h:91
bool AddAll(const BitVector *from)
Definition: bit_vector.cc:52
void Remove(intptr_t i)
Definition: bit_vector.h:68
void Intersect(const BitVector *other)
Definition: bit_vector.cc:93
BlockEntryInstr * dominator() const
Definition: il.h:1670
void ClearDominatedBlocks()
Definition: il.h:1682
intptr_t try_index() const
Definition: il.h:1730
intptr_t postorder_number() const
Definition: il.h:1658
void AddDominatedBlock(BlockEntryInstr *block)
Definition: il.h:1677
void set_block_id(intptr_t block_id)
Definition: il.h:1753
intptr_t preorder_number() const
Definition: il.h:1655
bool InsideTryBlock() const
Definition: il.h:1734
intptr_t block_id() const
Definition: il.h:1661
virtual intptr_t PredecessorCount() const =0
const GrowableArray< BlockEntryInstr * > & dominated_blocks()
Definition: il.h:1673
bool Dominates(BlockEntryInstr *other) const
Definition: il.cc:1815
void set_postorder_number(intptr_t number)
Definition: il.h:1659
virtual BlockEntryInstr * PredecessorAt(intptr_t index) const =0
void set_last_instruction(Instruction *instr)
Definition: il.h:1687
bool DiscoverBlock(BlockEntryInstr *predecessor, GrowableArray< BlockEntryInstr * > *preorder, GrowableArray< intptr_t > *parent)
Definition: il.cc:1699
Instruction * last_instruction() const
Definition: il.h:1686
GrowableArray< Definition * > * initial_definitions()
Definition: il.h:1917
bool Done() const
Definition: flow_graph.h:46
BlockTraversalState(BlockEntryInstr *block)
Definition: flow_graph.cc:327
BlockEntryInstr * NextSuccessor()
Definition: flow_graph.cc:332
BlockEntryInstr * block() const
Definition: flow_graph.cc:337
bool HasNextSuccessor() const
Definition: flow_graph.cc:331
static const Bool & False()
Definition: object.h:10799
static const Bool & True()
Definition: object.h:10797
static BoxInstr * Create(Representation from, Value *value)
Definition: il.cc:4007
TargetEntryInstr ** false_successor_address()
Definition: il.h:4051
TargetEntryInstr ** true_successor_address()
Definition: il.h:4050
Definition: cha.h:25
bool HasOverride(const Class &cls, const String &function_name, intptr_t *subclass_count)
Definition: cha.cc:227
void AddToGuardedClassesForSubclassCount(const Class &cls, intptr_t subclass_count)
Definition: cha.cc:40
Definition: il.h:736
intptr_t MonomorphicReceiverCid() const
Definition: il.cc:806
bool IsMonomorphic() const
Definition: il.cc:801
ClassPtr At(intptr_t cid) const
Definition: class_table.h:362
bool has_dynamically_extendable_subtypes() const
Definition: object.h:2196
intptr_t id() const
Definition: object.h:1233
ErrorPtr EnsureIsFinalized(Thread *thread) const
Definition: object.cc:4924
bool is_implemented() const
Definition: object.h:1692
bool IsEqualTo(CompileType *other)
Definition: compile_type.h:214
bool IsNone() const
Definition: compile_type.h:221
bool is_aot() const
static CompilerState & Current()
const Object & value() const
Definition: il.h:4230
bool HasUses() const
Definition: il.h:2569
Value * env_use_list() const
Definition: il.h:2578
Value * input_use_list() const
Definition: il.h:2575
CompileType * Type()
Definition: il.h:2521
virtual Value * RedefinedValue() const
Definition: il.cc:541
void ReplaceUsesWith(Definition *other)
Definition: il.cc:1493
bool HasSSATemp() const
Definition: il.h:2508
virtual Definition * AsDefinition()
Definition: il.h:2683
void AddInputUse(Value *value)
Definition: il.h:2585
Definition * OriginalDefinition()
Definition: il.cc:532
void set_ssa_temp_index(intptr_t index)
Definition: il.h:2504
intptr_t ssa_temp_index() const
Definition: il.h:2503
void ClearSSATempIndex()
Definition: il.h:2509
static constexpr intptr_t kNone
Definition: deopt_id.h:27
static DoublePtr NewCanonical(double d)
Definition: object.cc:23418
void MarkAsLazyDeoptToBeforeDeoptId()
Definition: il.h:11678
static Environment * From(Zone *zone, const GrowableArray< Definition * > &definitions, intptr_t fixed_parameter_count, intptr_t lazy_deopt_pruning_count, const ParsedFunction &parsed_function)
Definition: il.cc:6443
static StringPtr NameFromGetter(const String &getter_name)
Definition: object.cc:11816
static bool SupportsUnboxedSimd128()
static bool CanConvertInt64ToDouble()
static void PrintGraph(const char *phase, FlowGraph *flow_graph)
Definition: il_printer.cc:1706
static bool ShouldPrint(const Function &function, uint8_t **compiler_pass_filter=nullptr)
Definition: il_printer.cc:1715
const GrowableArray< BlockEntryInstr * > & reverse_postorder() const
Definition: flow_graph.h:207
GraphEntryInstr * graph_entry() const
Definition: flow_graph.h:268
ConstantInstr * GetConstant(const Object &object, Representation representation=kTagged)
Definition: flow_graph.cc:187
IsolateGroup * isolate_group() const
Definition: flow_graph.h:262
bool should_print() const
Definition: flow_graph.h:503
bool VerifyRedefinitions()
Definition: flow_graph.cc:652
void EnsureSSATempIndex(Definition *defn, Definition *replacement)
Definition: flow_graph.cc:90
Instruction * AppendTo(Instruction *prev, Instruction *instr, Environment *env, UseKind use_kind)
Definition: flow_graph.cc:298
void CompactSSA(ZoneGrowableArray< Definition * > *detached_defs=nullptr)
Definition: flow_graph.cc:3123
intptr_t max_argument_slot_count() const
Definition: flow_graph.h:562
bool IsCompiledForOsr() const
Definition: flow_graph.h:460
ConstantInstr * constant_dead() const
Definition: flow_graph.h:272
ConstantInstr * GetExistingConstant(const Object &object, Representation representation=kTagged) const
Definition: flow_graph.cc:180
Zone * zone() const
Definition: flow_graph.h:261
intptr_t current_ssa_temp_index() const
Definition: flow_graph.h:243
bool IsReceiver(Definition *def) const
Definition: flow_graph.cc:478
intptr_t ArgumentDescriptorEnvIndex() const
Definition: flow_graph.h:185
static Representation ReturnRepresentationOf(const Function &function)
Definition: flow_graph.cc:125
void ReplaceCurrentInstruction(ForwardInstructionIterator *iterator, Instruction *current, Instruction *replacement)
Definition: flow_graph.cc:141
JoinEntryInstr * NewDiamond(Instruction *instruction, Instruction *inherit, ComparisonInstr *compare, TargetEntryInstr **block_true, TargetEntryInstr **block_false)
Definition: flow_graph.cc:2858
Instruction * AppendSpeculativeTo(Instruction *prev, Instruction *instr, Environment *env, UseKind use_kind)
Definition: flow_graph.cc:312
Thread * thread() const
Definition: flow_graph.h:260
intptr_t InstructionCount() const
Definition: flow_graph.cc:1896
ToCheck CheckForInstanceCall(InstanceCallInstr *call, UntaggedFunction::Kind kind) const
Definition: flow_graph.cc:493
void RemoveRedefinitions(bool keep_checks=false)
Definition: flow_graph.cc:1810
const GrowableArray< BlockEntryInstr * > & preorder() const
Definition: flow_graph.h:203
void set_current_ssa_temp_index(intptr_t index)
Definition: flow_graph.h:244
void CreateCommonConstants()
Definition: flow_graph.cc:1143
void AddExactnessGuard(InstanceCallInstr *call, intptr_t receiver_cid)
Definition: flow_graph.cc:627
const std::pair< Location, Representation > & GetDirectParameterInfoAt(intptr_t i)
Definition: flow_graph.h:571
void InsertSpeculativeAfter(Instruction *prev, Instruction *instr, Environment *env, UseKind use_kind)
Definition: flow_graph.cc:288
void set_max_argument_slot_count(intptr_t count)
Definition: flow_graph.h:566
BlockIterator postorder_iterator() const
Definition: flow_graph.h:222
Definition * CreateCheckBound(Definition *length, Definition *index, intptr_t deopt_id)
Definition: flow_graph.cc:616
intptr_t max_block_id() const
Definition: flow_graph.h:264
static intptr_t ComputeArgumentsSizeInWords(const Function &function, intptr_t arguments_count)
Definition: flow_graph.cc:96
void InsertMoveArguments()
Definition: flow_graph.cc:2946
void Print(const char *phase="unknown")
Definition: flow_graph.cc:3012
void DiscoverBlocks()
Definition: flow_graph.cc:346
const GrowableArray< BlockEntryInstr * > & postorder() const
Definition: flow_graph.h:204
intptr_t num_stack_locals() const
Definition: flow_graph.h:161
void ComputeDominators(GrowableArray< BitVector * > *dominance_frontier)
Definition: flow_graph.cc:975
static Representation ParameterRepresentationAt(const Function &function, intptr_t index)
Definition: flow_graph.cc:109
void EliminateEnvironments()
Definition: flow_graph.cc:2351
static intptr_t ComputeLocationsOfFixedParameters(Zone *zone, const Function &function, bool should_assign_stack_locations=false, compiler::ParameterInfoArray *parameter_info=nullptr)
Definition: flow_graph.cc:1204
void set_max_block_id(intptr_t id)
Definition: flow_graph.h:265
ConstantInstr * constant_null() const
Definition: flow_graph.h:270
Instruction * CreateCheckClass(Definition *to_check, const Cids &cids, intptr_t deopt_id, const InstructionSource &source)
Definition: flow_graph.cc:604
const Function & function() const
Definition: flow_graph.h:130
const ParsedFunction & parsed_function() const
Definition: flow_graph.h:129
void ResetLoopHierarchy()
Definition: flow_graph.h:444
intptr_t num_direct_parameters() const
Definition: flow_graph.h:137
void RenameUsesDominatedByRedefinitions()
Definition: flow_graph.cc:2653
FlowGraph(const ParsedFunction &parsed_function, GraphEntryInstr *graph_entry, intptr_t max_block_id, PrologueInfo prologue_info, CompilationMode compilation_mode)
Definition: flow_graph.cc:46
bool unmatched_representations_allowed() const
Definition: flow_graph.h:411
Definition * TryCreateConstantReplacementFor(Definition *op, const Object &value)
Definition: flow_graph.cc:236
intptr_t osr_variable_count() const
Definition: flow_graph.h:149
void PopulateWithICData(const Function &function)
Definition: flow_graph.cc:2528
RedefinitionInstr * EnsureRedefinition(Instruction *prev, Definition *original, CompileType compile_type)
Definition: flow_graph.cc:1777
void TryOptimizePatterns()
Definition: flow_graph.cc:2570
PhiInstr * AddPhi(JoinEntryInstr *join, Definition *d1, Definition *d2)
Definition: flow_graph.cc:2927
void AddToGraphInitialDefinitions(Definition *defn)
Definition: flow_graph.cc:258
bool ExtractExternalUntaggedPayload(Instruction *instr, Value *array, classid_t cid)
Definition: flow_graph.cc:2393
void AllocateSSAIndex(Definition *def)
Definition: flow_graph.h:274
static bool IsConstantRepresentable(const Object &value, Representation target_rep, bool tagged_value_must_be_smi)
Definition: flow_graph.cc:205
BlockIterator reverse_postorder_iterator() const
Definition: flow_graph.h:219
void ComputeSSA(ZoneGrowableArray< Definition * > *inlining_parameters)
Definition: flow_graph.cc:926
LocalVariable * CurrentContextVar() const
Definition: flow_graph.h:173
intptr_t RawTypeArgumentEnvIndex() const
Definition: flow_graph.h:181
void InsertBefore(Instruction *next, Instruction *instr, Environment *env, UseKind use_kind)
Definition: flow_graph.h:312
static void RenameDominatedUses(Definition *def, Instruction *dom, Definition *other)
Definition: flow_graph.cc:2642
void SelectRepresentations()
Definition: flow_graph.cc:2298
intptr_t variable_count() const
Definition: flow_graph.h:143
void AddToInitialDefinitions(BlockEntryWithInitialDefs *entry, Definition *defn)
Definition: flow_graph.cc:263
void ExtractNonInternalTypedDataPayloads()
Definition: flow_graph.cc:2444
void MergeBlocks()
Definition: flow_graph.cc:389
GrowableArray< BlockEntryInstr * > * CodegenBlockOrder()
Definition: flow_graph.cc:170
intptr_t allocate_block_id()
Definition: flow_graph.h:266
void InsertAfter(Instruction *prev, Instruction *instr, Environment *env, UseKind use_kind)
Definition: flow_graph.cc:273
bool should_reorder_blocks() const
Definition: flow_graph.h:508
bool IsImmortalVariable(intptr_t env_index) const
Definition: flow_graph.h:196
void disallow_licm()
Definition: flow_graph.h:408
intptr_t EnvIndex(const LocalVariable *variable) const
Definition: flow_graph.h:189
Instruction * Current() const
Definition: il.h:1853
bool is_unboxed_integer_parameter_at(intptr_t index) const
Definition: object.h:3783
bool has_unboxed_return() const
Definition: object.h:3803
bool has_unboxed_double_return() const
Definition: object.h:3819
bool has_unboxed_integer_return() const
Definition: object.h:3811
bool MakesCopyOfParameters() const
Definition: object.h:3514
bool has_unboxed_record_return() const
Definition: object.h:3827
bool is_unboxed_parameter_at(intptr_t index) const
Definition: object.h:3773
bool is_unboxed_double_parameter_at(intptr_t index) const
Definition: object.h:3793
bool IsGeneric() const
Definition: object.cc:8844
ClassPtr Owner() const
Definition: object.cc:10841
intptr_t num_fixed_parameters() const
Definition: object.cc:8856
intptr_t NumParameters() const
Definition: object.cc:8877
intptr_t fixed_slot_count() const
Definition: il.h:1996
virtual BlockEntryInstr * SuccessorAt(intptr_t index) const
Definition: il.cc:1994
void set_fixed_slot_count(intptr_t count)
Definition: il.h:1997
virtual intptr_t SuccessorCount() const
Definition: il.cc:1988
static ICDataPtr NewForStaticCall(const Function &owner, const Function &target, const Array &arguments_descriptor, intptr_t deopt_id, intptr_t num_args_tested, RebindRule rebind_rule)
Definition: object.cc:17401
Instruction * next() const
Definition: il.h:1093
void set_previous(Instruction *instr)
Definition: il.h:1088
void LinkTo(Instruction *next)
Definition: il.h:1108
virtual bool MayThrow() const =0
void InheritDeoptTarget(Zone *zone, Instruction *other)
Definition: il.cc:1569
virtual bool ComputeCanDeoptimizeAfterCall() const
Definition: il.h:1070
virtual BlockEntryInstr * SuccessorAt(intptr_t index) const
Definition: il.cc:1981
virtual BlockEntryInstr * GetBlock()
Definition: il.cc:1352
virtual bool ComputeCanDeoptimize() const =0
Environment * env() const
Definition: il.h:1215
@ kGuardInputs
Definition: il.h:972
@ kNotSpeculative
Definition: il.h:975
void RemoveEnvironment()
Definition: il.cc:1282
bool HasUnmatchedInputRepresentations() const
Definition: il.cc:1609
const char * ToCString() const
Definition: il_printer.cc:1683
Instruction * AppendInstruction(Instruction *tail)
Definition: il.cc:1341
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.h:1241
virtual void ReplaceInputsWithMoveArguments(MoveArgumentsArray *move_arguments)
Definition: il.h:1055
void UnuseAllInputs()
Definition: il.cc:1534
virtual intptr_t ArgumentCount() const
Definition: il.h:1041
bool IsDominatedBy(Instruction *dom)
Definition: il.cc:1581
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:2629
virtual Representation representation() const
Definition: il.h:1260
void RepairArgumentUsesInEnvironment() const
Definition: il.cc:1543
void SetInputAt(intptr_t i, Value *value)
Definition: il.h:1014
InstructionSource source() const
Definition: il.h:1008
Value * ArgumentValueAt(intptr_t index) const
Definition: il.h:3435
void InsertAfter(Instruction *prev)
Definition: il.cc:1325
virtual intptr_t SuccessorCount() const
Definition: il.cc:1977
Instruction * RemoveFromGraph(bool return_previous=true)
Definition: il.cc:1301
bool HasMoveArguments() const
Definition: il.h:1059
SpeculativeMode SpeculativeModeOfInputs() const
Definition: il.h:1245
virtual const char * DebugName() const =0
Instruction * previous() const
Definition: il.h:1087
MethodRecognizer::Kind recognized_kind() const
Definition: il.h:10261
ClassTable * class_table() const
Definition: isolate.h:496
virtual BlockEntryInstr * PredecessorAt(intptr_t index) const
Definition: il.h:2066
virtual intptr_t PredecessorCount() const
Definition: il.h:2065
static bool FindPragma(Thread *T, bool only_core, const Object &object, const String &pragma_name, bool multiple=false, Object *options=nullptr)
Definition: object.cc:4151
const intptr_t variable_count_
Definition: flow_graph.h:803
virtual void ComputeInitialSets()=0
GrowableArray< BitVector * > kill_
Definition: flow_graph.h:815
void ComputeLiveInAndLiveOutSets()
Definition: flow_graph.cc:711
bool UpdateLiveOut(const BlockEntryInstr &instr)
Definition: flow_graph.cc:689
BitVector * GetKillSet(BlockEntryInstr *block) const
Definition: flow_graph.h:769
LivenessAnalysis(intptr_t variable_count, const GrowableArray< BlockEntryInstr * > &postorder)
Definition: flow_graph.cc:679
GrowableArray< BitVector * > live_out_
Definition: flow_graph.h:811
Zone * zone() const
Definition: flow_graph.h:799
BitVector * GetLiveOutSet(BlockEntryInstr *block) const
Definition: flow_graph.h:773
GrowableArray< BitVector * > live_in_
Definition: flow_graph.h:819
const GrowableArray< BlockEntryInstr * > & postorder_
Definition: flow_graph.h:805
bool UpdateLiveIn(const BlockEntryInstr &instr)
Definition: flow_graph.cc:704
static Location StackSlot(intptr_t stack_index, Register base)
Definition: locations.h:447
static Location RegisterLocation(Register reg)
Definition: locations.h:398
static intptr_t NumArgsCheckedForStaticCall(const Function &function)
static ObjectPtr null()
Definition: object.h:433
virtual const char * ToCString() const
Definition: object.h:366
bool IsNull() const
Definition: object.h:363
static Object & Handle()
Definition: object.h:407
static Object & ZoneHandle()
Definition: object.h:419
static constexpr intptr_t kNotFunctionParameter
Definition: il.h:2922
const Function & function() const
Definition: parser.h:73
bool has_arg_desc_var() const
Definition: parser.h:130
LocalVariable * RawParameterVariable(intptr_t i) const
Definition: parser.h:235
JoinEntryInstr * block() const
Definition: il.h:2817
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:6693
bool is_alive() const
Definition: il.h:2827
void set_is_receiver(ReceiverType is_receiver)
Definition: il.h:2890
@ kReceiver
Definition: il.h:2884
@ kNotReceiver
Definition: il.h:2884
@ kUnknownReceiver
Definition: il.h:2884
void mark_alive()
Definition: il.h:2828
ReceiverType is_receiver() const
Definition: il.h:2886
bool Done() const
Definition: il.h:2121
static bool Fits(Range *range, RangeBoundary::RangeSize size)
Value * value() const
Definition: il.h:4109
void set_constrained_type(CompileType *type)
Definition: il.h:4116
CompileType * constrained_type() const
Definition: il.h:4117
static FunctionPtr ResolveDynamicAnyArgs(Zone *zone, const Class &receiver_class, const String &function_name, bool allow_add)
Definition: resolver.cc:185
SSACompactor(intptr_t num_blocks, intptr_t num_ssa_vars, ZoneGrowableArray< Definition * > *detached_defs)
Definition: flow_graph.cc:3018
void RenumberGraph(FlowGraph *graph)
Definition: flow_graph.cc:3028
static const Slot & GetRecordFieldSlot(Thread *thread, intptr_t offset_in_bytes)
Definition: slot.cc:324
static const Slot & GetTypeArgumentsSlotFor(Thread *thread, const Class &cls)
Definition: slot.cc:276
static const char * ToCString(Thread *thread, StringPtr ptr)
Definition: object.cc:24126
Zone * zone() const
Definition: thread_state.h:37
static Thread * Current()
Definition: thread.h:362
CompilerState & compiler_state()
Definition: thread.h:588
void CheckForSafepoint()
Definition: thread.h:1104
static intptr_t OutputIndexOf(Token::Kind token)
Definition: il.cc:7330
static UnboxInstr * Create(Representation to, Value *value, intptr_t deopt_id, SpeculativeMode speculative_mode=kGuardInputs)
Definition: il.cc:4043
static bool IsInt(intptr_t N, T value)
Definition: utils.h:313
static constexpr T Maximum(T x, T y)
Definition: utils.h:41
static T Minimum(T x, T y)
Definition: utils.h:36
static bool IsUint(intptr_t N, T value)
Definition: utils.h:328
bool Done() const
Definition: il.h:83
Definition: il.h:75
intptr_t use_index() const
Definition: il.h:124
Instruction * instruction() const
Definition: il.h:121
Value * CopyWithType(Zone *zone)
Definition: il.h:138
void RemoveFromUseList()
Definition: il.cc:1457
Definition * definition() const
Definition: il.h:103
void BindTo(Definition *definition)
Definition: il.h:2718
CompileType * Type()
const GrowableArray< BitVector * > & ComputeAssignedVars()
Definition: flow_graph.cc:780
bool IsStoreAlive(BlockEntryInstr *block, StoreLocalInstr *store)
Definition: flow_graph.cc:811
virtual void ComputeInitialSets()
Definition: flow_graph.cc:844
VariableLivenessAnalysis(FlowGraph *flow_graph)
Definition: flow_graph.cc:773
bool IsLastLoad(BlockEntryInstr *block, LoadLocalInstr *load)
Definition: flow_graph.cc:829
intptr_t InputCount() const
Definition: il.h:2794
Value * InputAt(intptr_t i) const
Definition: il.h:2795
static word field_offset(intptr_t index)
#define COMPILER_TIMINGS_TIMER_SCOPE(thread, timer_id)
#define THR_Print(format,...)
Definition: log.h:20
#define ASSERT(E)
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
Definition: main.cc:19
SkBitmap source
Definition: examples.cpp:28
#define FATAL(error)
AtkStateType state
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
uint8_t value
GAsyncResult * result
uint32_t * target
constexpr bool FLAG_support_il_printer
Definition: flag_list.h:48
#define Z
Definition: flow_graph.cc:30
Dart_NativeFunction function
Definition: fuchsia.cc:51
int argument_count
Definition: fuchsia.cc:52
size_t length
double y
double x
bool Contains(const Container &container, const Value &value)
def match(bench, filt)
Definition: benchmark.py:23
FrameLayout frame_layout
Definition: stack_frame.cc:76
intptr_t ComputeCallingConvention(Zone *zone, const Function &target, intptr_t argc, std::function< Representation(intptr_t)> argument_rep, bool should_assign_stack_locations, ParameterInfoArray *parameter_info)
static UnboxingInfoMetadata * UnboxingInfoMetadataOf(Zone *zone, const KernelProgramInfo &kernel_program_info, const TypedDataView &kernel_data, intptr_t kernel_data_program_offset, intptr_t kernel_offset)
Definition: kernel.cc:778
Definition: dart_vm.cc:33
static JoinEntryInstr * NewJoin(FlowGraph *graph, Instruction *inherit)
Definition: flow_graph.cc:2820
static bool CanConvertInt64ToDouble()
Definition: flow_graph.cc:1934
bool IsTypedDataViewClassId(intptr_t index)
Definition: class_id.h:439
bool IsTypedDataClassId(intptr_t index)
Definition: class_id.h:433
static GotoInstr * NewGoto(FlowGraph *graph, JoinEntryInstr *target, Instruction *inherit)
Definition: flow_graph.cc:2828
static bool ShouldInlineSimd()
Definition: flow_graph.cc:1930
bool IsTypedDataBaseClassId(intptr_t index)
Definition: class_id.h:429
static Location EnvIndexToStackLocation(intptr_t num_direct_parameters, intptr_t env_index)
Definition: flow_graph.cc:1314
static BranchInstr * NewBranch(FlowGraph *graph, ComparisonInstr *cmp, Instruction *inherit)
Definition: flow_graph.cc:2836
InnerPointerAccess
Definition: il.h:6295
static bool IsPositiveOrZeroSmiConst(Definition *d)
Definition: flow_graph.cc:2675
Location LocationExceptionLocation()
Definition: locations.cc:484
static bool ShouldReorderBlocks(const Function &function, FlowGraph::CompilationMode mode)
Definition: flow_graph.cc:32
int32_t classid_t
Definition: globals.h:524
@ kDynamicCid
Definition: class_id.h:253
Representation
Definition: locations.h:66
const Register ARGS_DESC_REG
static bool NeedsRecordBoxing(Definition *def)
Definition: flow_graph.cc:2049
static bool IsDominatedUse(Instruction *dom, Value *use)
Definition: flow_graph.cc:2616
static void PrintBitVector(const char *tag, BitVector *v)
Definition: flow_graph.cc:743
bool IsExternalPayloadClassId(classid_t cid)
Definition: class_id.h:472
static TargetEntryInstr * NewTarget(FlowGraph *graph, Instruction *inherit)
Definition: flow_graph.cc:2812
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const Register FPREG
const intptr_t cid
ZoneGrowableArray< MoveArgumentInstr * > MoveArgumentsArray
Definition: il.h:902
Location LocationStackTraceLocation()
Definition: locations.cc:488
static int8_t data[kExtLength]
static BinarySmiOpInstr * AsSmiShiftLeftInstruction(Definition *d)
Definition: flow_graph.cc:2683
static bool IsMarkedWithNoBoundsChecks(const Function &function)
Definition: flow_graph.cc:38
bool IsExternalTypedDataClassId(intptr_t index)
Definition: class_id.h:447
static bool IsUnboxedInteger(Representation rep)
Definition: flow_graph.cc:1925
Definition: dom.py:1
def call(args)
Definition: dom.py:159
Definition: __init__.py:1
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
Definition: switches.h:228
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
#define Pd
Definition: globals.h:408
int compare(const void *untyped_lhs, const void *untyped_rhs)
Definition: skdiff.h:161
static SkString join(const CommandLineFlags::StringArray &)
Definition: skpbench.cpp:741
static bool Supports(Representation rep)
Definition: il.cc:459
ComparisonInstr * oper1
Definition: flow_graph.h:521
ComparisonInstr * oper2
Definition: flow_graph.h:522
bool Contains(intptr_t block_id) const
Definition: flow_graph.h:108
static const char * ToCString(Representation rep)
Definition: locations.cc:129