Flutter Engine
The Flutter Engine
linearscan.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6
7#include "vm/bit_vector.h"
14#include "vm/log.h"
15#include "vm/parser.h"
16#include "vm/stack_frame.h"
17
18namespace dart {
19
20#if !defined(PRODUCT)
21#define INCLUDE_LINEAR_SCAN_TRACING_CODE
22#endif
23
24#if defined(INCLUDE_LINEAR_SCAN_TRACING_CODE)
25#define TRACE_ALLOC(statement) \
26 do { \
27 if (FLAG_trace_ssa_allocator && CompilerState::ShouldTrace()) statement; \
28 } while (0)
29#else
30#define TRACE_ALLOC(statement)
31#endif
32
33static constexpr intptr_t kNoVirtualRegister = -1;
34static constexpr intptr_t kTempVirtualRegister = -2;
35static constexpr intptr_t kIllegalPosition = -1;
36static constexpr intptr_t kMaxPosition = 0x7FFFFFFF;
37
38static intptr_t MinPosition(intptr_t a, intptr_t b) {
39 return (a < b) ? a : b;
40}
41
42static bool IsInstructionStartPosition(intptr_t pos) {
43 return (pos & 1) == 0;
44}
45
46static bool IsInstructionEndPosition(intptr_t pos) {
47 return (pos & 1) == 1;
48}
49
50static intptr_t ToInstructionStart(intptr_t pos) {
51 return (pos & ~1);
52}
53
54static intptr_t ToInstructionEnd(intptr_t pos) {
55 return (pos | 1);
56}
57
58// Additional information on loops during register allocation.
60 ExtraLoopInfo(intptr_t s, intptr_t e)
61 : start(s), end(e), backedge_interference(nullptr) {}
62 intptr_t start;
63 intptr_t end;
65};
66
67// Returns extra loop information.
68static ExtraLoopInfo* ComputeExtraLoopInfo(Zone* zone, LoopInfo* loop_info) {
69 intptr_t start = loop_info->header()->start_pos();
70 intptr_t end = start;
71 for (auto back_edge : loop_info->back_edges()) {
72 intptr_t end_pos = back_edge->end_pos();
73 if (end_pos > end) {
74 end = end_pos;
75 }
76 }
77 return new (zone) ExtraLoopInfo(start, end);
78}
79
81 const FlowGraph& flow_graph) {
82 // Currently CodegenBlockOrder is not topologically sorted in JIT and can't
83 // be used for register allocation.
84 return CompilerState::Current().is_aot() ? *flow_graph.CodegenBlockOrder()
85 : flow_graph.reverse_postorder();
86}
87
89 bool intrinsic_mode)
90 : flow_graph_(flow_graph),
91 reaching_defs_(flow_graph),
92 value_representations_(flow_graph.max_vreg()),
93 block_order_(BlockOrderForAllocation(flow_graph)),
94 postorder_(flow_graph.postorder()),
95 instructions_(),
96 block_entries_(),
97 extra_loop_info_(),
98 liveness_(flow_graph),
99 vreg_count_(flow_graph.max_vreg()),
100 live_ranges_(flow_graph.max_vreg()),
101 unallocated_cpu_(),
102 unallocated_fpu_(),
103 cpu_regs_(),
104 fpu_regs_(),
105 blocked_cpu_registers_(),
106 blocked_fpu_registers_(),
107 spilled_(),
108 safepoints_(),
109 register_kind_(),
110 number_of_registers_(0),
111 registers_(),
112 blocked_registers_(),
113 unallocated_(),
114 spill_slots_(),
115 quad_spill_slots_(),
116 untagged_spill_slots_(),
117 cpu_spill_slot_count_(0),
118 intrinsic_mode_(intrinsic_mode) {
119 for (intptr_t i = 0; i < vreg_count_; i++) {
120 live_ranges_.Add(nullptr);
121 }
122 for (intptr_t i = 0; i < vreg_count_; i++) {
123 value_representations_.Add(kNoRepresentation);
124 }
125
126 // All registers are marked as "not blocked" (array initialized to false).
127 // Mark the unavailable ones as "blocked" (true).
128 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
129 if ((kDartAvailableCpuRegs & (1 << i)) == 0) {
130 blocked_cpu_registers_[i] = true;
131 }
132 }
133
134 // FpuTMP is used as scratch by optimized code and parallel move resolver.
135 blocked_fpu_registers_[FpuTMP] = true;
136
137 // Block additional registers needed preserved when generating intrinsics.
138 // TODO(fschneider): Handle saving and restoring these registers when
139 // generating intrinsic code.
140 if (intrinsic_mode) {
141 blocked_cpu_registers_[ARGS_DESC_REG] = true;
142
143#if !defined(TARGET_ARCH_IA32)
144 // Need to preserve CODE_REG to be able to store the PC marker
145 // and load the pool pointer.
146 blocked_cpu_registers_[CODE_REG] = true;
147#endif
148 }
149}
150
151static void DeepLiveness(MaterializeObjectInstr* mat, BitVector* live_in) {
152 if (mat->was_visited_for_liveness()) {
153 return;
154 }
156
157 for (intptr_t i = 0; i < mat->InputCount(); i++) {
158 if (!mat->InputAt(i)->BindsToConstant()) {
159 Definition* defn = mat->InputAt(i)->definition();
160 MaterializeObjectInstr* inner_mat = defn->AsMaterializeObject();
161 if (inner_mat != nullptr) {
162 DeepLiveness(inner_mat, live_in);
163 } else {
164 intptr_t idx = defn->vreg(0);
165 live_in->Add(idx);
166 }
167 }
168 }
169}
170
172 const intptr_t block_count = postorder_.length();
173 for (intptr_t i = 0; i < block_count; i++) {
174 BlockEntryInstr* block = postorder_[i];
175
176 BitVector* kill = kill_[i];
177 BitVector* live_in = live_in_[i];
178
179 // Iterate backwards starting at the last instruction.
180 for (BackwardInstructionIterator it(block); !it.Done(); it.Advance()) {
181 Instruction* current = it.Current();
182
183 // Initialize location summary for instruction.
184 current->InitializeLocationSummary(zone(), true); // opt
185
186 LocationSummary* locs = current->locs();
187#if defined(DEBUG)
188 locs->DiscoverWritableInputs();
189#endif
190
191 // Handle definitions.
192 Definition* current_def = current->AsDefinition();
193 if ((current_def != nullptr) && current_def->HasSSATemp()) {
194 kill->Add(current_def->vreg(0));
195 live_in->Remove(current_def->vreg(0));
196 if (current_def->HasPairRepresentation()) {
197 kill->Add(current_def->vreg(1));
198 live_in->Remove(current_def->vreg(1));
199 }
200 }
201
202 // Handle uses.
203 ASSERT(locs->input_count() == current->InputCount());
204 for (intptr_t j = 0; j < current->InputCount(); j++) {
205 Value* input = current->InputAt(j);
206
207 ASSERT(!locs->in(j).IsConstant() || input->BindsToConstant());
208 if (locs->in(j).IsConstant()) continue;
209
210 live_in->Add(input->definition()->vreg(0));
211 if (input->definition()->HasPairRepresentation()) {
212 live_in->Add(input->definition()->vreg(1));
213 }
214 }
215
216 // Process detached MoveArguments interpreting them as
217 // fixed register inputs.
218 if (current->ArgumentCount() != 0) {
219 auto move_arguments = current->GetMoveArguments();
220 for (auto move : *move_arguments) {
221 if (move->is_register_move()) {
222 auto input = move->value();
223
224 live_in->Add(input->definition()->vreg(0));
225 if (input->definition()->HasPairRepresentation()) {
226 live_in->Add(input->definition()->vreg(1));
227 }
228 }
229 }
230 }
231
232 // Add non-argument uses from the deoptimization environment (pushed
233 // arguments are not allocated by the register allocator).
234 if (current->env() != nullptr) {
235 for (Environment::DeepIterator env_it(current->env()); !env_it.Done();
236 env_it.Advance()) {
237 Definition* defn = env_it.CurrentValue()->definition();
238 if (defn->IsMaterializeObject()) {
239 // MaterializeObject instruction is not in the graph.
240 // Treat its inputs as part of the environment.
241 DeepLiveness(defn->AsMaterializeObject(), live_in);
242 } else if (!defn->IsMoveArgument() && !defn->IsConstant()) {
243 live_in->Add(defn->vreg(0));
244 if (defn->HasPairRepresentation()) {
245 live_in->Add(defn->vreg(1));
246 }
247 }
248 }
249 }
250 }
251
252 // Handle phis.
253 if (block->IsJoinEntry()) {
254 JoinEntryInstr* join = block->AsJoinEntry();
255 for (PhiIterator it(join); !it.Done(); it.Advance()) {
256 PhiInstr* phi = it.Current();
257 ASSERT(phi != nullptr);
258 kill->Add(phi->vreg(0));
259 live_in->Remove(phi->vreg(0));
260 if (phi->HasPairRepresentation()) {
261 kill->Add(phi->vreg(1));
262 live_in->Remove(phi->vreg(1));
263 }
264
265 // If a phi input is not defined by the corresponding predecessor it
266 // must be marked live-in for that predecessor.
267 for (intptr_t k = 0; k < phi->InputCount(); k++) {
268 Value* val = phi->InputAt(k);
269 if (val->BindsToConstant()) continue;
270
271 BlockEntryInstr* pred = block->PredecessorAt(k);
272 const intptr_t use = val->definition()->vreg(0);
273 if (!kill_[pred->postorder_number()]->Contains(use)) {
274 live_in_[pred->postorder_number()]->Add(use);
275 }
276 if (phi->HasPairRepresentation()) {
277 const intptr_t second_use = val->definition()->vreg(1);
278 if (!kill_[pred->postorder_number()]->Contains(second_use)) {
279 live_in_[pred->postorder_number()]->Add(second_use);
280 }
281 }
282 }
283 }
284 } else if (auto entry = block->AsBlockEntryWithInitialDefs()) {
285 // Initialize location summary for instruction if needed.
286 if (entry->IsCatchBlockEntry()) {
287 entry->InitializeLocationSummary(zone(), true); // opt
288 }
289
290 // Process initial definitions, i.e. parameters and special parameters.
291 for (intptr_t i = 0; i < entry->initial_definitions()->length(); i++) {
292 Definition* def = (*entry->initial_definitions())[i];
293 const intptr_t vreg = def->vreg(0);
294 kill_[entry->postorder_number()]->Add(vreg);
295 live_in_[entry->postorder_number()]->Remove(vreg);
296 if (def->HasPairRepresentation()) {
297 kill_[entry->postorder_number()]->Add(def->vreg(1));
298 live_in_[entry->postorder_number()]->Remove(def->vreg(1));
299 }
300 }
301 }
302 }
303}
304
305UsePosition* LiveRange::AddUse(intptr_t pos, Location* location_slot) {
306 ASSERT(location_slot != nullptr);
307 ASSERT((first_use_interval_->start_ <= pos) &&
308 (pos <= first_use_interval_->end_));
309 if (uses_ != nullptr) {
310 if ((uses_->pos() == pos) && (uses_->location_slot() == location_slot)) {
311 return uses_;
312 } else if (uses_->pos() < pos) {
313 // If an instruction at position P is using the same value both as
314 // a fixed register input and a non-fixed input (in this order) we will
315 // add uses both at position P-1 and *then* P which will make
316 // uses_ unsorted unless we account for it here.
317 UsePosition* insert_after = uses_;
318 while ((insert_after->next() != nullptr) &&
319 (insert_after->next()->pos() < pos)) {
320 insert_after = insert_after->next();
321 }
322
323 UsePosition* insert_before = insert_after->next();
324 while (insert_before != nullptr && (insert_before->pos() == pos)) {
325 if (insert_before->location_slot() == location_slot) {
326 return insert_before;
327 }
328 insert_before = insert_before->next();
329 }
330
331 insert_after->set_next(
332 new UsePosition(pos, insert_after->next(), location_slot));
333 return insert_after->next();
334 }
335 }
336 uses_ = new UsePosition(pos, uses_, location_slot);
337 return uses_;
338}
339
341 if (spill_slot().IsConstant() &&
342 (locs->always_calls() && !locs->callee_safe_call())) {
343 // Constants have pseudo spill slot assigned to them from
344 // the very beginning. This means that we don't need to associate
345 // "always_calls" safepoints with these ranges, because they will never
346 // be spilled. We still need to associate slow-path safepoints because
347 // a value might be allocated to a register across a slow-path call.
348 return;
349 }
350
352 SafepointPosition* safepoint =
354
355 if (first_safepoint_ == nullptr) {
356 ASSERT(last_safepoint_ == nullptr);
357 first_safepoint_ = last_safepoint_ = safepoint;
358 } else {
359 ASSERT(last_safepoint_ != nullptr);
360 // We assume that safepoints list is sorted by position and that
361 // safepoints are added in this order.
362 ASSERT(last_safepoint_->pos() < pos);
363 last_safepoint_->set_next(safepoint);
364 last_safepoint_ = safepoint;
365 }
366}
367
369 Location* location_slot,
370 Location* hint) {
371 ASSERT(hint != nullptr);
372 AddUse(pos, location_slot)->set_hint(hint);
373}
374
375void LiveRange::AddUseInterval(intptr_t start, intptr_t end) {
376 ASSERT(start < end);
377
378 // Live ranges are being build by visiting instructions in post-order.
379 // This implies that use intervals will be prepended in a monotonically
380 // decreasing order.
381 if (first_use_interval() != nullptr) {
382 // If the first use interval and the use interval we are adding
383 // touch then we can just extend the first interval to cover their
384 // union.
385 if (start > first_use_interval()->start()) {
386 // The only case when we can add intervals with start greater than
387 // start of an already created interval is BlockLocation.
390 return;
391 } else if (start == first_use_interval()->start()) {
392 // Grow first interval if necessary.
393 if (end <= first_use_interval()->end()) return;
394 first_use_interval_->end_ = end;
395 return;
396 } else if (end == first_use_interval()->start()) {
397 first_use_interval()->start_ = start;
398 return;
399 }
400
402 }
403
404 first_use_interval_ = new UseInterval(start, end, first_use_interval_);
405 if (last_use_interval_ == nullptr) {
406 ASSERT(first_use_interval_->next() == nullptr);
407 last_use_interval_ = first_use_interval_;
408 }
409}
410
411void LiveRange::DefineAt(intptr_t pos) {
412 // Live ranges are being build by visiting instructions in post-order.
413 // This implies that use intervals will be prepended in a monotonically
414 // decreasing order.
415 // When we encounter a use of a value inside a block we optimistically
416 // expand the first use interval to cover the block from the start
417 // to the last use in the block and then we shrink it if we encounter
418 // definition of the value inside the same block.
419 if (first_use_interval_ == nullptr) {
420 // Definition without a use.
421 first_use_interval_ = new UseInterval(pos, pos + 1, nullptr);
422 last_use_interval_ = first_use_interval_;
423 } else {
424 // Shrink the first use interval. It was optimistically expanded to
425 // cover the block from the start to the last use in the block.
426 ASSERT(first_use_interval_->start_ <= pos);
427 first_use_interval_->start_ = pos;
428 }
429}
430
432 if (live_ranges_[vreg] == nullptr) {
433 Representation rep = value_representations_[vreg];
434 ASSERT(rep != kNoRepresentation);
435 live_ranges_[vreg] = new LiveRange(vreg, rep);
436 }
437 return live_ranges_[vreg];
438}
439
440LiveRange* FlowGraphAllocator::MakeLiveRangeForTemporary() {
441 // Representation does not matter for temps.
442 Representation ignored = kNoRepresentation;
443 LiveRange* range = new LiveRange(kTempVirtualRegister, ignored);
444#if defined(DEBUG)
445 temporaries_.Add(range);
446#endif
447 return range;
448}
449
450void FlowGraphAllocator::BlockRegisterLocation(Location loc,
451 intptr_t from,
452 intptr_t to,
453 bool* blocked_registers,
454 LiveRange** blocking_ranges) {
455 if (blocked_registers[loc.register_code()]) {
456 return;
457 }
458
459 if (blocking_ranges[loc.register_code()] == nullptr) {
460 Representation ignored = kNoRepresentation;
461 LiveRange* range = new LiveRange(kNoVirtualRegister, ignored);
462 blocking_ranges[loc.register_code()] = range;
463 range->set_assigned_location(loc);
464#if defined(DEBUG)
465 temporaries_.Add(range);
466#endif
467 }
468
469 blocking_ranges[loc.register_code()]->AddUseInterval(from, to);
470}
471
472// Block location from the start of the instruction to its end.
473void FlowGraphAllocator::BlockLocation(Location loc,
474 intptr_t from,
475 intptr_t to) {
476 if (loc.IsRegister()) {
477 BlockRegisterLocation(loc, from, to, blocked_cpu_registers_, cpu_regs_);
478 } else if (loc.IsFpuRegister()) {
479 BlockRegisterLocation(loc, from, to, blocked_fpu_registers_, fpu_regs_);
480 } else {
481 UNREACHABLE();
482 }
483}
484
485void FlowGraphAllocator::BlockCpuRegisters(intptr_t registers,
486 intptr_t from,
487 intptr_t to) {
488 for (intptr_t r = 0; r < kNumberOfCpuRegisters; r++) {
489 if ((registers & (1 << r)) != 0) {
490 BlockLocation(Location::RegisterLocation(static_cast<Register>(r)), from,
491 to);
492 }
493 }
494}
495
496void FlowGraphAllocator::BlockFpuRegisters(intptr_t fpu_registers,
497 intptr_t from,
498 intptr_t to) {
499 for (intptr_t r = 0; r < kNumberOfFpuRegisters; r++) {
500 if ((fpu_registers & (1 << r)) != 0) {
501 BlockLocation(Location::FpuRegisterLocation(static_cast<FpuRegister>(r)),
502 from, to);
503 }
504 }
505}
506
508 if (first_use_interval() == nullptr) {
509 return;
510 }
511
512 THR_Print(" live range v%" Pd " [%" Pd ", %" Pd ") in ", vreg(), Start(),
513 End());
516 THR_Print(" %s", assigned_location().constant().ToCString());
517 }
518 if (!spill_slot_.IsInvalid() && !spill_slot_.IsConstant()) {
519 THR_Print(" assigned spill slot: %s", spill_slot_.ToCString());
520 }
521 THR_Print("\n");
522
523 SafepointPosition* safepoint = first_safepoint();
524 while (safepoint != nullptr) {
525 THR_Print(" Safepoint [%" Pd "]: ", safepoint->pos());
526 safepoint->locs()->stack_bitmap().Print();
527 THR_Print("\n");
528 safepoint = safepoint->next();
529 }
530
531 UsePosition* use_pos = uses_;
532 for (UseInterval* interval = first_use_interval_; interval != nullptr;
533 interval = interval->next()) {
534 THR_Print(" use interval [%" Pd ", %" Pd ")\n", interval->start(),
535 interval->end());
536 while ((use_pos != nullptr) && (use_pos->pos() <= interval->end())) {
537 THR_Print(" use at %" Pd "", use_pos->pos());
538 if (use_pos->location_slot() != nullptr) {
539 THR_Print(" as ");
540 use_pos->location_slot()->Print();
541 }
542 THR_Print("\n");
543 use_pos = use_pos->next();
544 }
545 }
546
547 if (next_sibling() != nullptr) {
548 next_sibling()->Print();
549 }
550}
551
552void FlowGraphAllocator::PrintLiveRanges() {
553#if defined(DEBUG)
554 for (intptr_t i = 0; i < temporaries_.length(); i++) {
555 temporaries_[i]->Print();
556 }
557#endif
558
559 for (intptr_t i = 0; i < live_ranges_.length(); i++) {
560 if (live_ranges_[i] != nullptr) {
561 live_ranges_[i]->Print();
562 }
563 }
564}
565
566// Returns true if all uses of the given range inside the
567// given loop boundary have Any allocation policy.
569 intptr_t boundary) {
570 UsePosition* use = range->first_use();
571 while ((use != nullptr) && (use->pos() < boundary)) {
572 if (!use->location_slot()->Equals(Location::Any())) {
573 return false;
574 }
575 use = use->next();
576 }
577 return true;
578}
579
580// Returns true if all uses of the given range have Any allocation policy.
582 UsePosition* use = range->first_use();
583 while (use != nullptr) {
584 if (!use->location_slot()->Equals(Location::Any())) {
585 return false;
586 }
587 use = use->next();
588 }
589 return true;
590}
591
592void FlowGraphAllocator::BuildLiveRanges() {
593 const intptr_t block_count = block_order_.length();
594 ASSERT(block_order_[0]->IsGraphEntry());
595 BitVector* current_interference_set = nullptr;
596 Zone* zone = flow_graph_.zone();
597 for (intptr_t x = block_count - 1; x > 0; --x) {
598 BlockEntryInstr* block = block_order_[x];
599
600 ASSERT(BlockEntryAt(block->start_pos()) == block);
601
602 // For every SSA value that is live out of this block, create an interval
603 // that covers the whole block. It will be shortened if we encounter a
604 // definition of this value in this block.
605 for (BitVector::Iterator it(
606 liveness_.GetLiveOutSetAt(block->postorder_number()));
607 !it.Done(); it.Advance()) {
608 LiveRange* range = GetLiveRange(it.Current());
609 range->AddUseInterval(block->start_pos(), block->end_pos());
610 }
611
612 LoopInfo* loop_info = block->loop_info();
613 if ((loop_info != nullptr) && (loop_info->IsBackEdge(block))) {
614 BitVector* backedge_interference =
615 extra_loop_info_[loop_info->id()]->backedge_interference;
616 if (backedge_interference != nullptr) {
617 // Restore interference for subsequent backedge a loop
618 // (perhaps inner loop's header reset set in the meanwhile).
619 current_interference_set = backedge_interference;
620 } else {
621 // All values flowing into the loop header are live at the
622 // back edge and can interfere with phi moves.
623 current_interference_set =
624 new (zone) BitVector(zone, flow_graph_.max_vreg());
625 current_interference_set->AddAll(
626 liveness_.GetLiveInSet(loop_info->header()));
627 extra_loop_info_[loop_info->id()]->backedge_interference =
628 current_interference_set;
629 }
630 }
631
632 // Connect outgoing phi-moves that were created in NumberInstructions
633 // and find last instruction that contributes to liveness.
634 Instruction* current =
635 ConnectOutgoingPhiMoves(block, current_interference_set);
636
637 // Now process all instructions in reverse order.
638 while (current != block) {
639 // Skip parallel moves that we insert while processing instructions.
640 if (!current->IsParallelMove()) {
641 ProcessOneInstruction(block, current, current_interference_set);
642 }
643 current = current->previous();
644 }
645
646 // Check if any values live into the loop can be spilled for free.
647 if (block->IsLoopHeader()) {
648 ASSERT(loop_info != nullptr);
649 current_interference_set = nullptr;
650 for (BitVector::Iterator it(
651 liveness_.GetLiveInSetAt(block->postorder_number()));
652 !it.Done(); it.Advance()) {
653 LiveRange* range = GetLiveRange(it.Current());
654 intptr_t loop_end = extra_loop_info_[loop_info->id()]->end;
655 if (HasOnlyUnconstrainedUsesInLoop(range, loop_end)) {
656 range->MarkHasOnlyUnconstrainedUsesInLoop(loop_info->id());
657 }
658 }
659 }
660
661 if (auto join_entry = block->AsJoinEntry()) {
662 ConnectIncomingPhiMoves(join_entry);
663 } else if (auto catch_entry = block->AsCatchBlockEntry()) {
664 // Catch entries are briefly safepoints after catch entry moves execute
665 // and before execution jumps to the handler.
666 safepoints_.Add(catch_entry);
667
668 // Process initial definitions.
669 ProcessEnvironmentUses(catch_entry, catch_entry); // For lazy deopt
670 for (intptr_t i = 0; i < catch_entry->initial_definitions()->length();
671 i++) {
672 Definition* defn = (*catch_entry->initial_definitions())[i];
673 LiveRange* range = GetLiveRange(defn->vreg(0));
674 range->DefineAt(catch_entry->start_pos()); // Defined at block entry.
675 ProcessInitialDefinition(defn, range, catch_entry, i);
676 }
677 } else if (auto entry = block->AsBlockEntryWithInitialDefs()) {
678 ASSERT(block->IsFunctionEntry() || block->IsOsrEntry());
679 auto& initial_definitions = *entry->initial_definitions();
680 for (intptr_t i = 0; i < initial_definitions.length(); i++) {
681 Definition* defn = initial_definitions[i];
682 if (defn->HasPairRepresentation()) {
683 // The lower bits are pushed after the higher bits
684 LiveRange* range = GetLiveRange(defn->vreg(1));
685 range->AddUseInterval(entry->start_pos(), entry->start_pos() + 2);
686 range->DefineAt(entry->start_pos());
687 ProcessInitialDefinition(defn, range, entry, i,
688 /*second_location_for_definition=*/true);
689 }
690 LiveRange* range = GetLiveRange(defn->vreg(0));
691 range->AddUseInterval(entry->start_pos(), entry->start_pos() + 2);
692 range->DefineAt(entry->start_pos());
693 ProcessInitialDefinition(defn, range, entry, i);
694 }
695 }
696 }
697
698 // Process incoming parameters and constants. Do this after all other
699 // instructions so that safepoints for all calls have already been found.
700 GraphEntryInstr* graph_entry = flow_graph_.graph_entry();
701 for (intptr_t i = 0; i < graph_entry->initial_definitions()->length(); i++) {
702 Definition* defn = (*graph_entry->initial_definitions())[i];
703 if (defn->HasPairRepresentation()) {
704 // The lower bits are pushed after the higher bits
705 LiveRange* range = GetLiveRange(defn->vreg(1));
706 range->AddUseInterval(graph_entry->start_pos(), graph_entry->end_pos());
707 range->DefineAt(graph_entry->start_pos());
708 ProcessInitialDefinition(defn, range, graph_entry, i,
709 /*second_location_for_definition=*/true);
710 }
711 LiveRange* range = GetLiveRange(defn->vreg(0));
712 range->AddUseInterval(graph_entry->start_pos(), graph_entry->end_pos());
713 range->DefineAt(graph_entry->start_pos());
714 ProcessInitialDefinition(defn, range, graph_entry, i);
715 }
716}
717
718void FlowGraphAllocator::SplitInitialDefinitionAt(LiveRange* range,
719 intptr_t pos,
720 Location::Kind kind) {
721 if (range->End() > pos) {
722 LiveRange* tail = range->SplitAt(pos);
723 CompleteRange(tail, kind);
724 }
725}
726
727bool FlowGraphAllocator::IsSuspendStateParameter(Definition* defn) {
728 if (auto param = defn->AsParameter()) {
729 if ((param->GetBlock()->IsOsrEntry() ||
730 param->GetBlock()->IsCatchBlockEntry()) &&
731 flow_graph_.SuspendStateVar() != nullptr &&
732 param->env_index() == flow_graph_.SuspendStateEnvIndex()) {
733 return true;
734 }
735 }
736 return false;
737}
738
739void FlowGraphAllocator::AllocateSpillSlotForInitialDefinition(
740 intptr_t slot_index,
741 intptr_t range_end) {
742 if (slot_index < spill_slots_.length()) {
743 // Multiple initial definitions could exist for the same spill slot
744 // as function could have both OsrEntry and CatchBlockEntry.
745 spill_slots_[slot_index] =
746 Utils::Maximum(spill_slots_[slot_index], range_end);
747 ASSERT(!quad_spill_slots_[slot_index]);
748 ASSERT(!untagged_spill_slots_[slot_index]);
749 } else {
750 while (spill_slots_.length() < slot_index) {
751 spill_slots_.Add(kMaxPosition);
752 quad_spill_slots_.Add(false);
753 untagged_spill_slots_.Add(false);
754 }
755 spill_slots_.Add(range_end);
756 quad_spill_slots_.Add(false);
757 untagged_spill_slots_.Add(false);
758 }
759}
760
761void FlowGraphAllocator::CompleteRange(Definition* defn, LiveRange* range) {
762 AssignSafepoints(defn, range);
763
764 if (range->has_uses_which_require_stack()) {
765 // Reserve a spill slot on the stack if it is not yet reserved.
766 if (range->spill_slot().IsInvalid() ||
767 !range->spill_slot().HasStackIndex()) {
768 range->set_spill_slot(Location()); // Clear current spill slot if any.
769 AllocateSpillSlotFor(range);
771 THR_Print("Allocated spill slot for %s which has use(s) which "
772 "require a stack slot.\n",
773 defn->ToCString()));
774 if (range->representation() == kTagged) {
775 MarkAsObjectAtSafepoints(range);
776 }
777 }
778
779 // Eagerly allocate all uses which require stack.
780 UsePosition* prev = nullptr;
781 for (UsePosition* use = range->first_use(); use != nullptr;
782 use = use->next()) {
783 if (use->location_slot()->Equals(Location::RequiresStack())) {
784 // Allocate this use and remove it from the list.
785 ConvertUseTo(use, range->spill_slot());
786
787 // Unlink the current use.
788 if (prev == nullptr) {
789 range->set_first_use(use->next());
790 } else {
791 prev->set_next(use->next());
792 }
793 } else {
794 prev = use;
795 }
796 }
797 }
798}
799
800void FlowGraphAllocator::ProcessInitialDefinition(
801 Definition* defn,
802 LiveRange* range,
803 BlockEntryInstr* block,
804 intptr_t initial_definition_index,
805 bool second_location_for_definition) {
806 // Save the range end because it may change below.
807 const intptr_t range_end = range->End();
808
809 if (auto param = defn->AsParameter()) {
810 auto location = param->location();
811 RELEASE_ASSERT(!location.IsInvalid());
812 if (location.IsPairLocation()) {
813 location =
814 location.AsPairLocation()->At(second_location_for_definition ? 1 : 0);
815 }
816 range->set_assigned_location(location);
817 if (location.IsMachineRegister()) {
818 CompleteRange(defn, range);
819 if (range->End() > (GetLifetimePosition(block) + 1)) {
820 SplitInitialDefinitionAt(range, GetLifetimePosition(block) + 1,
821 location.kind());
822 }
823 ConvertAllUses(range);
824 BlockLocation(location, GetLifetimePosition(block),
825 GetLifetimePosition(block) + 1);
826 return;
827 } else {
828 range->set_spill_slot(location);
829 }
830 } else {
831 ConstantInstr* constant = defn->AsConstant();
832 ASSERT(constant != nullptr);
833 const intptr_t pair_index = second_location_for_definition ? 1 : 0;
834 range->set_assigned_location(Location::Constant(constant, pair_index));
835 range->set_spill_slot(Location::Constant(constant, pair_index));
836 }
837 CompleteRange(defn, range);
838 range->finger()->Initialize(range);
839 UsePosition* use =
840 range->finger()->FirstRegisterBeneficialUse(block->start_pos());
841 if (use != nullptr) {
842 LiveRange* tail = SplitBetween(range, block->start_pos(), use->pos());
843 CompleteRange(tail, defn->RegisterKindForResult());
844 }
845 ConvertAllUses(range);
846 Location spill_slot = range->spill_slot();
847 if (spill_slot.IsStackSlot() && spill_slot.base_reg() == FPREG &&
848 spill_slot.stack_index() <=
850 !IsSuspendStateParameter(defn) && !defn->IsConstant()) {
851 // On entry to the function, range is stored on the stack above the FP in
852 // the same space which is used for spill slots. Update spill slot state to
853 // reflect that and prevent register allocator from reusing this space as a
854 // spill slot.
855 // Do not allocate spill slot for OSR parameter corresponding to
856 // a synthetic :suspend_state variable as it is already allocated
857 // in AllocateSpillSlotForSuspendState.
858 ASSERT(defn->IsParameter());
859 ASSERT(defn->AsParameter()->env_index() == initial_definition_index);
860 const intptr_t spill_slot_index =
862 spill_slot.stack_index());
863 AllocateSpillSlotForInitialDefinition(spill_slot_index, range_end);
864 // Note, all incoming parameters are assumed to be tagged.
865 MarkAsObjectAtSafepoints(range);
866 } else if (defn->IsConstant() && block->IsCatchBlockEntry() &&
867 (initial_definition_index >=
868 flow_graph_.num_direct_parameters())) {
869 // Constants at catch block entries consume spill slots.
870 AllocateSpillSlotForInitialDefinition(
871 initial_definition_index - flow_graph_.num_direct_parameters(),
872 range_end);
873 }
874}
875
879 } else {
880 return Location::kRegister;
881 }
882}
883
884//
885// When describing shape of live ranges in comments below we are going to use
886// the following notation:
887//
888// B block entry
889// g g' start and end of goto instruction
890// i i' start and end of any other instruction
891// j j' start and end of any other instruction
892
893// - body of a use interval
894// [ start of a use interval
895// ) end of a use interval
896// * use
897//
898// For example diagram
899//
900// i i'
901// value --*--)
902//
903// can be read as: use interval for value starts somewhere before instruction
904// and extends until currently processed instruction, there is a use of value
905// at the start of the instruction.
906//
907
908Instruction* FlowGraphAllocator::ConnectOutgoingPhiMoves(
909 BlockEntryInstr* block,
910 BitVector* interfere_at_backedge) {
911 Instruction* last = block->last_instruction();
912
913 GotoInstr* goto_instr = last->AsGoto();
914 if (goto_instr == nullptr) return last;
915
916 // If we have a parallel move here then the successor block must be a
917 // join with phis. The phi inputs contribute uses to each predecessor
918 // block (and the phi outputs contribute definitions in the successor
919 // block).
920 if (!goto_instr->HasParallelMove()) return goto_instr->previous();
921 ParallelMoveInstr* parallel_move = goto_instr->parallel_move();
922
923 // All uses are recorded at the position of parallel move preceding goto.
924 const intptr_t pos = GetLifetimePosition(goto_instr);
925
926 JoinEntryInstr* join = goto_instr->successor();
927 ASSERT(join != nullptr);
928
929 // Search for the index of the current block in the predecessors of
930 // the join.
931 const intptr_t pred_index = join->IndexOfPredecessor(block);
932
933 // Record the corresponding phi input use for each phi.
934 intptr_t move_index = 0;
935 for (PhiIterator it(join); !it.Done(); it.Advance()) {
936 PhiInstr* phi = it.Current();
937 Value* val = phi->InputAt(pred_index);
938 MoveOperands* move = parallel_move->MoveOperandsAt(move_index++);
939
940 ConstantInstr* constant = val->definition()->AsConstant();
941 if (constant != nullptr) {
942 move->set_src(Location::Constant(constant, /*pair_index*/ 0));
943 if (val->definition()->HasPairRepresentation()) {
944 move = parallel_move->MoveOperandsAt(move_index++);
945 move->set_src(Location::Constant(constant, /*pair_index*/ 1));
946 }
947 continue;
948 }
949
950 // Expected shape of live ranges:
951 //
952 // g g'
953 // value --*
954 //
955 intptr_t vreg = val->definition()->vreg(0);
956 LiveRange* range = GetLiveRange(vreg);
957 if (interfere_at_backedge != nullptr) interfere_at_backedge->Add(vreg);
958
959 range->AddUseInterval(block->start_pos(), pos);
960 range->AddHintedUse(pos, move->src_slot(),
961 GetLiveRange(phi->vreg(0))->assigned_location_slot());
962 move->set_src(Location::PrefersRegister());
963
964 if (val->definition()->HasPairRepresentation()) {
965 move = parallel_move->MoveOperandsAt(move_index++);
966 vreg = val->definition()->vreg(1);
967 range = GetLiveRange(vreg);
968 if (interfere_at_backedge != nullptr) {
969 interfere_at_backedge->Add(vreg);
970 }
971 range->AddUseInterval(block->start_pos(), pos);
972 range->AddHintedUse(pos, move->src_slot(),
973 GetLiveRange(phi->vreg(1))->assigned_location_slot());
974 move->set_src(Location::PrefersRegister());
975 }
976 }
977
978 // Begin backward iteration with the instruction before the parallel
979 // move.
980 return goto_instr->previous();
981}
982
983void FlowGraphAllocator::ConnectIncomingPhiMoves(JoinEntryInstr* join) {
984 // For join blocks we need to add destinations of phi resolution moves
985 // to phi's live range so that register allocator will fill them with moves.
986
987 // All uses are recorded at the start position in the block.
988 const intptr_t pos = join->start_pos();
989 const bool is_loop_header = join->IsLoopHeader();
990
991 intptr_t move_idx = 0;
992 for (PhiIterator it(join); !it.Done(); it.Advance()) {
993 PhiInstr* phi = it.Current();
994 ASSERT(phi != nullptr);
995 const intptr_t vreg = phi->vreg(0);
996 ASSERT(vreg >= 0);
997 const bool is_pair_phi = phi->HasPairRepresentation();
998
999 // Expected shape of live range:
1000 //
1001 // B
1002 // phi [--------
1003 //
1004 LiveRange* range = GetLiveRange(vreg);
1005 range->DefineAt(pos); // Shorten live range.
1006 if (is_loop_header) range->mark_loop_phi();
1007
1008 if (is_pair_phi) {
1009 LiveRange* second_range = GetLiveRange(phi->vreg(1));
1010 second_range->DefineAt(pos); // Shorten live range.
1011 if (is_loop_header) second_range->mark_loop_phi();
1012 }
1013
1014 for (intptr_t pred_idx = 0; pred_idx < phi->InputCount(); pred_idx++) {
1015 BlockEntryInstr* pred = join->PredecessorAt(pred_idx);
1016 GotoInstr* goto_instr = pred->last_instruction()->AsGoto();
1017 ASSERT((goto_instr != nullptr) && (goto_instr->HasParallelMove()));
1018 MoveOperands* move =
1019 goto_instr->parallel_move()->MoveOperandsAt(move_idx);
1020 move->set_dest(Location::PrefersRegister());
1021 range->AddUse(pos, move->dest_slot());
1022 if (is_pair_phi) {
1023 LiveRange* second_range = GetLiveRange(phi->vreg(1));
1024 MoveOperands* second_move =
1025 goto_instr->parallel_move()->MoveOperandsAt(move_idx + 1);
1026 second_move->set_dest(Location::PrefersRegister());
1027 second_range->AddUse(pos, second_move->dest_slot());
1028 }
1029 }
1030
1031 // All phi resolution moves are connected. Phi's live range is
1032 // complete.
1033 AssignSafepoints(phi, range);
1034 CompleteRange(range, phi->RegisterKindForResult());
1035 if (is_pair_phi) {
1036 LiveRange* second_range = GetLiveRange(phi->vreg(1));
1037 AssignSafepoints(phi, second_range);
1038 CompleteRange(second_range, phi->RegisterKindForResult());
1039 }
1040
1041 move_idx += is_pair_phi ? 2 : 1;
1042 }
1043}
1044
1045void FlowGraphAllocator::ProcessEnvironmentUses(BlockEntryInstr* block,
1046 Instruction* current) {
1047 ASSERT(current->env() != nullptr);
1048 Environment* env = current->env();
1049 while (env != nullptr) {
1050 // Any value mentioned in the deoptimization environment should survive
1051 // until the end of instruction but it does not need to be in the register.
1052 // Expected shape of live range:
1053 //
1054 // i i'
1055 // value -----*
1056 //
1057
1058 if (env->Length() == 0) {
1059 env = env->outer();
1060 continue;
1061 }
1062
1063 const intptr_t block_start_pos = block->start_pos();
1064 const intptr_t use_pos = GetLifetimePosition(current) + 1;
1065
1066 Location* locations = flow_graph_.zone()->Alloc<Location>(env->Length());
1067
1068 for (intptr_t i = 0; i < env->Length(); ++i) {
1069 Value* value = env->ValueAt(i);
1070 Definition* def = value->definition();
1071
1072 if (def->HasPairRepresentation()) {
1073 locations[i] = Location::Pair(Location::Any(), Location::Any());
1074 } else {
1075 locations[i] = Location::Any();
1076 }
1077
1078 if (env->outer() == nullptr && flow_graph_.SuspendStateVar() != nullptr &&
1079 i == flow_graph_.SuspendStateEnvIndex()) {
1080 // Make sure synthetic :suspend_state variable gets a correct
1081 // location on the stack frame. It is used by deoptimization.
1082 const intptr_t slot_index =
1084 flow_graph_.parsed_function().suspend_state_var());
1085 locations[i] = Location::StackSlot(slot_index, FPREG);
1086 if (!def->IsConstant()) {
1087 // Update live intervals for Parameter/Phi definitions
1088 // corresponding to :suspend_state in OSR and try/catch cases as
1089 // they are still used when resolving control flow.
1090 ASSERT(def->IsParameter() || def->IsPhi());
1091 ASSERT(!def->HasPairRepresentation());
1092 LiveRange* range = GetLiveRange(def->vreg(0));
1093 range->AddUseInterval(block_start_pos, use_pos);
1094 }
1095 continue;
1096 }
1097
1098 if (def->IsMoveArgument()) {
1099 // Frame size is unknown until after allocation.
1100 locations[i] = Location::NoLocation();
1101 continue;
1102 }
1103
1104 if (auto constant = def->AsConstant()) {
1105 locations[i] = Location::Constant(constant);
1106 continue;
1107 }
1108
1109 if (auto mat = def->AsMaterializeObject()) {
1110 // MaterializeObject itself produces no value. But its uses
1111 // are treated as part of the environment: allocated locations
1112 // will be used when building deoptimization data.
1113 locations[i] = Location::NoLocation();
1114 ProcessMaterializationUses(block, block_start_pos, use_pos, mat);
1115 continue;
1116 }
1117
1118 if (def->HasPairRepresentation()) {
1119 PairLocation* location_pair = locations[i].AsPairLocation();
1120 {
1121 // First live range.
1122 LiveRange* range = GetLiveRange(def->vreg(0));
1123 range->AddUseInterval(block_start_pos, use_pos);
1124 range->AddUse(use_pos, location_pair->SlotAt(0));
1125 }
1126 {
1127 // Second live range.
1128 LiveRange* range = GetLiveRange(def->vreg(1));
1129 range->AddUseInterval(block_start_pos, use_pos);
1130 range->AddUse(use_pos, location_pair->SlotAt(1));
1131 }
1132 } else {
1133 LiveRange* range = GetLiveRange(def->vreg(0));
1134 range->AddUseInterval(block_start_pos, use_pos);
1135 range->AddUse(use_pos, &locations[i]);
1136 }
1137 }
1138
1139 env->set_locations(locations);
1140 env = env->outer();
1141 }
1142}
1143
1144void FlowGraphAllocator::ProcessMaterializationUses(
1145 BlockEntryInstr* block,
1146 const intptr_t block_start_pos,
1147 const intptr_t use_pos,
1148 MaterializeObjectInstr* mat) {
1149 // Materialization can occur several times in the same environment.
1150 // Check if we already processed this one.
1151 if (mat->locations() != nullptr) {
1152 return; // Already processed.
1153 }
1154
1155 // Initialize location for every input of the MaterializeObject instruction.
1156 Location* locations = flow_graph_.zone()->Alloc<Location>(mat->InputCount());
1157 mat->set_locations(locations);
1158
1159 for (intptr_t i = 0; i < mat->InputCount(); ++i) {
1160 Definition* def = mat->InputAt(i)->definition();
1161
1162 ConstantInstr* constant = def->AsConstant();
1163 if (constant != nullptr) {
1164 locations[i] = Location::Constant(constant);
1165 continue;
1166 }
1167
1168 if (def->HasPairRepresentation()) {
1169 locations[i] = Location::Pair(Location::Any(), Location::Any());
1170 PairLocation* location_pair = locations[i].AsPairLocation();
1171 {
1172 // First live range.
1173 LiveRange* range = GetLiveRange(def->vreg(0));
1174 range->AddUseInterval(block_start_pos, use_pos);
1175 range->AddUse(use_pos, location_pair->SlotAt(0));
1176 }
1177 {
1178 // Second live range.
1179 LiveRange* range = GetLiveRange(def->vreg(1));
1180 range->AddUseInterval(block_start_pos, use_pos);
1181 range->AddUse(use_pos, location_pair->SlotAt(1));
1182 }
1183 } else if (def->IsMaterializeObject()) {
1184 locations[i] = Location::NoLocation();
1185 ProcessMaterializationUses(block, block_start_pos, use_pos,
1186 def->AsMaterializeObject());
1187 } else {
1188 locations[i] = Location::Any();
1189 LiveRange* range = GetLiveRange(def->vreg(0));
1190 range->AddUseInterval(block_start_pos, use_pos);
1191 range->AddUse(use_pos, &locations[i]);
1192 }
1193 }
1194}
1195
1196void FlowGraphAllocator::ProcessOneInput(BlockEntryInstr* block,
1197 intptr_t pos,
1198 Location* in_ref,
1199 Value* input,
1200 intptr_t vreg,
1201 RegisterSet* live_registers) {
1202 ASSERT(in_ref != nullptr);
1203 ASSERT(!in_ref->IsPairLocation());
1204 ASSERT(input != nullptr);
1205 ASSERT(block != nullptr);
1206 LiveRange* range = GetLiveRange(vreg);
1207 if (in_ref->IsMachineRegister()) {
1208 // Input is expected in a fixed register. Expected shape of
1209 // live ranges:
1210 //
1211 // j' i i'
1212 // value --*
1213 // register [-----)
1214 //
1215 if (live_registers != nullptr) {
1216 live_registers->Add(*in_ref, range->representation());
1217 }
1218 MoveOperands* move = AddMoveAt(pos - 1, *in_ref, Location::Any());
1219 ASSERT(!in_ref->IsRegister() ||
1220 ((1 << in_ref->reg()) & kDartAvailableCpuRegs) != 0);
1221 BlockLocation(*in_ref, pos - 1, pos + 1);
1222 range->AddUseInterval(block->start_pos(), pos - 1);
1223 range->AddHintedUse(pos - 1, move->src_slot(), in_ref);
1224 } else if (in_ref->IsUnallocated()) {
1225 if (in_ref->policy() == Location::kWritableRegister) {
1226 // Writable unallocated input. Expected shape of
1227 // live ranges:
1228 //
1229 // i i'
1230 // value --*
1231 // temp [--)
1232 MoveOperands* move = AddMoveAt(pos, Location::RequiresRegister(),
1234
1235 // Add uses to the live range of the input.
1236 range->AddUseInterval(block->start_pos(), pos);
1237 range->AddUse(pos, move->src_slot());
1238
1239 // Create live range for the temporary.
1240 LiveRange* temp = MakeLiveRangeForTemporary();
1241 temp->AddUseInterval(pos, pos + 1);
1242 temp->AddHintedUse(pos, in_ref, move->src_slot());
1243 temp->AddUse(pos, move->dest_slot());
1244 *in_ref = Location::RequiresRegister();
1245 CompleteRange(temp, RegisterKindFromPolicy(*in_ref));
1246 } else {
1247 if (in_ref->policy() == Location::kRequiresStack) {
1248 range->mark_has_uses_which_require_stack();
1249 }
1250
1251 // Normal unallocated input. Expected shape of
1252 // live ranges:
1253 //
1254 // i i'
1255 // value -----*
1256 //
1257 range->AddUseInterval(block->start_pos(), pos + 1);
1258 range->AddUse(pos + 1, in_ref);
1259 }
1260 } else {
1261 ASSERT(in_ref->IsConstant());
1262 }
1263}
1264
1265void FlowGraphAllocator::ProcessOneOutput(BlockEntryInstr* block,
1266 intptr_t pos,
1267 Location* out,
1268 Definition* def,
1269 intptr_t vreg,
1270 bool output_same_as_first_input,
1271 Location* in_ref,
1272 Definition* input,
1273 intptr_t input_vreg,
1274 BitVector* interference_set) {
1275 ASSERT(out != nullptr);
1276 ASSERT(!out->IsPairLocation());
1277 ASSERT(def != nullptr);
1278 ASSERT(block != nullptr);
1279
1280 LiveRange* range =
1281 vreg >= 0 ? GetLiveRange(vreg) : MakeLiveRangeForTemporary();
1282
1283 // Process output and finalize its liverange.
1284 if (out->IsMachineRegister()) {
1285 // Fixed output location. Expected shape of live range:
1286 //
1287 // i i' j j'
1288 // register [--)
1289 // output [-------
1290 //
1291 ASSERT(!out->IsRegister() ||
1292 ((1 << out->reg()) & kDartAvailableCpuRegs) != 0);
1293 BlockLocation(*out, pos, pos + 1);
1294
1295 if (range->vreg() == kTempVirtualRegister) return;
1296
1297 // We need to emit move connecting fixed register with another location
1298 // that will be allocated for this output's live range.
1299 // Special case: fixed output followed by a fixed input last use.
1300 UsePosition* use = range->first_use();
1301
1302 // If the value has no uses we don't need to allocate it.
1303 if (use == nullptr) return;
1304
1305 // Connect fixed output to all inputs that immediately follow to avoid
1306 // allocating an intermediary register.
1307 for (; use != nullptr; use = use->next()) {
1308 if (use->pos() == (pos + 1)) {
1309 // Allocate and then drop this use.
1310 ASSERT(use->location_slot()->IsUnallocated());
1311 *(use->location_slot()) = *out;
1312 range->set_first_use(use->next());
1313 } else {
1314 ASSERT(use->pos() > (pos + 1)); // sorted
1315 break;
1316 }
1317 }
1318
1319 // Shorten live range to the point of definition, this might make the range
1320 // empty (if the only use immediately follows). If range is not empty add
1321 // move from a fixed register to an unallocated location.
1322 range->DefineAt(pos + 1);
1323 if (range->Start() == range->End()) return;
1324
1325 MoveOperands* move = AddMoveAt(pos + 1, Location::Any(), *out);
1326 range->AddHintedUse(pos + 1, move->dest_slot(), out);
1327 } else if (output_same_as_first_input) {
1328 ASSERT(in_ref != nullptr);
1329 ASSERT(input != nullptr);
1330 // Output register will contain a value of the first input at instruction's
1331 // start. Expected shape of live ranges:
1332 //
1333 // i i'
1334 // input #0 --*
1335 // output [----
1336 //
1337 ASSERT(in_ref->Equals(Location::RequiresRegister()) ||
1338 in_ref->Equals(Location::RequiresFpuRegister()));
1339 *out = *in_ref;
1340 // Create move that will copy value between input and output.
1341 MoveOperands* move =
1343
1344 // Add uses to the live range of the input.
1345 LiveRange* input_range = GetLiveRange(input_vreg);
1346 input_range->AddUseInterval(block->start_pos(), pos);
1347 input_range->AddUse(pos, move->src_slot());
1348
1349 // Shorten output live range to the point of definition and add both input
1350 // and output uses slots to be filled by allocator.
1351 range->DefineAt(pos);
1352 range->AddHintedUse(pos, out, move->src_slot());
1353 range->AddUse(pos, move->dest_slot());
1354 range->AddUse(pos, in_ref);
1355
1356 if ((interference_set != nullptr) && (range->vreg() >= 0) &&
1357 interference_set->Contains(range->vreg())) {
1358 interference_set->Add(input->vreg(0));
1359 }
1360 } else {
1361 // Normal unallocated location that requires a register. Expected shape of
1362 // live range:
1363 //
1364 // i i'
1365 // output [-------
1366 //
1369
1370 // Shorten live range to the point of definition and add use to be filled by
1371 // allocator.
1372 range->DefineAt(pos);
1373 range->AddUse(pos, out);
1374 }
1375
1376 AssignSafepoints(def, range);
1377 CompleteRange(range, def->RegisterKindForResult());
1378}
1379
1380// Create and update live ranges corresponding to instruction's inputs,
1381// temporaries and output.
1382void FlowGraphAllocator::ProcessOneInstruction(BlockEntryInstr* block,
1383 Instruction* current,
1384 BitVector* interference_set) {
1385 LocationSummary* locs = current->locs();
1386
1387 Definition* def = current->AsDefinition();
1388 if ((def != nullptr) && (def->AsConstant() != nullptr)) {
1389 ASSERT(!def->HasPairRepresentation());
1390 LiveRange* range =
1391 (def->vreg(0) != -1) ? GetLiveRange(def->vreg(0)) : nullptr;
1392
1393 // Drop definitions of constants that have no uses.
1394 if ((range == nullptr) || (range->first_use() == nullptr)) {
1395 locs->set_out(0, Location::NoLocation());
1396 return;
1397 }
1398
1399 // If this constant has only unconstrained uses convert them all
1400 // to use the constant directly and drop this definition.
1401 // TODO(vegorov): improve allocation when we have enough registers to keep
1402 // constants used in the loop in them.
1403 if (HasOnlyUnconstrainedUses(range)) {
1404 ConstantInstr* constant_instr = def->AsConstant();
1405 range->set_assigned_location(Location::Constant(constant_instr));
1406 range->set_spill_slot(Location::Constant(constant_instr));
1407 range->finger()->Initialize(range);
1408 ConvertAllUses(range);
1409
1410 locs->set_out(0, Location::NoLocation());
1411 return;
1412 }
1413 }
1414
1415 const intptr_t pos = GetLifetimePosition(current);
1417
1418 ASSERT(locs->input_count() == current->InputCount());
1419
1420 // Normalize same-as-first-input output if input is specified as
1421 // fixed register.
1422 if (locs->out(0).IsUnallocated() &&
1423 (locs->out(0).policy() == Location::kSameAsFirstInput)) {
1424 if (locs->in(0).IsPairLocation()) {
1425 // Pair input, pair output.
1426 PairLocation* in_pair = locs->in(0).AsPairLocation();
1427 ASSERT(in_pair->At(0).IsMachineRegister() ==
1428 in_pair->At(1).IsMachineRegister());
1429 if (in_pair->At(0).IsMachineRegister() &&
1430 in_pair->At(1).IsMachineRegister()) {
1431 locs->set_out(0, Location::Pair(in_pair->At(0), in_pair->At(1)));
1432 }
1433 } else if (locs->in(0).IsMachineRegister()) {
1434 // Single input, single output.
1435 locs->set_out(0, locs->in(0));
1436 }
1437 }
1438
1439 const bool output_same_as_first_input =
1440 locs->out(0).IsUnallocated() &&
1441 (locs->out(0).policy() == Location::kSameAsFirstInput);
1442
1443 // Output is same as first input which is a pair.
1444 if (output_same_as_first_input && locs->in(0).IsPairLocation()) {
1445 // Make out into a PairLocation.
1446 locs->set_out(0, Location::Pair(Location::RequiresRegister(),
1448 }
1449 // Add uses from the deoptimization environment.
1450 if (current->env() != nullptr) ProcessEnvironmentUses(block, current);
1451
1452 // Process inputs.
1453 // Skip the first input if output is specified with kSameAsFirstInput policy,
1454 // they will be processed together at the very end.
1455 {
1456 for (intptr_t j = output_same_as_first_input ? 1 : 0;
1457 j < locs->input_count(); j++) {
1458 // Determine if we are dealing with a value pair, and if so, whether
1459 // the location is the first register or second register.
1460 Value* input = current->InputAt(j);
1461 Location* in_ref = locs->in_slot(j);
1462 RegisterSet* live_registers = nullptr;
1463 if (locs->HasCallOnSlowPath()) {
1464 live_registers = locs->live_registers();
1465 }
1466 if (in_ref->IsPairLocation()) {
1467 ASSERT(input->definition()->HasPairRepresentation());
1468 PairLocation* pair = in_ref->AsPairLocation();
1469 // Each element of the pair is assigned it's own virtual register number
1470 // and is allocated its own LiveRange.
1471 ProcessOneInput(block, pos, pair->SlotAt(0), input,
1472 input->definition()->vreg(0), live_registers);
1473 ProcessOneInput(block, pos, pair->SlotAt(1), input,
1474 input->definition()->vreg(1), live_registers);
1475 } else {
1476 ProcessOneInput(block, pos, in_ref, input, input->definition()->vreg(0),
1477 live_registers);
1478 }
1479 }
1480 }
1481
1482 // Process MoveArguments interpreting them as fixed register inputs.
1483 if (current->ArgumentCount() != 0) {
1484 auto move_arguments = current->GetMoveArguments();
1485 for (auto move : *move_arguments) {
1486 if (move->is_register_move()) {
1487 auto input = move->value();
1488 if (move->location().IsPairLocation()) {
1489 auto pair = move->location().AsPairLocation();
1490 RELEASE_ASSERT(pair->At(0).IsMachineRegister() &&
1491 pair->At(1).IsMachineRegister());
1492 ProcessOneInput(block, pos, pair->SlotAt(0), input,
1493 input->definition()->vreg(0),
1494 /*live_registers=*/nullptr);
1495 ProcessOneInput(block, pos, pair->SlotAt(1), input,
1496 input->definition()->vreg(1),
1497 /*live_registers=*/nullptr);
1498 } else {
1499 RELEASE_ASSERT(move->location().IsMachineRegister());
1500 ProcessOneInput(block, pos, move->location_slot(), input,
1501 input->definition()->vreg(0),
1502 /*live_registers=*/nullptr);
1503 }
1504 }
1505 }
1506 }
1507
1508 // Process temps.
1509 for (intptr_t j = 0; j < locs->temp_count(); j++) {
1510 // Expected shape of live range:
1511 //
1512 // i i'
1513 // [--)
1514 //
1515
1516 Location temp = locs->temp(j);
1517 // We do not support pair locations for temporaries.
1518 ASSERT(!temp.IsPairLocation());
1519 if (temp.IsMachineRegister()) {
1520 ASSERT(!temp.IsRegister() ||
1521 ((1 << temp.reg()) & kDartAvailableCpuRegs) != 0);
1522 BlockLocation(temp, pos, pos + 1);
1523 } else if (temp.IsUnallocated()) {
1524 LiveRange* range = MakeLiveRangeForTemporary();
1525 range->AddUseInterval(pos, pos + 1);
1526 range->AddUse(pos, locs->temp_slot(j));
1527 CompleteRange(range, RegisterKindFromPolicy(temp));
1528 } else {
1529 UNREACHABLE();
1530 }
1531 }
1532
1533 // Block all volatile (i.e. not native ABI callee-save) registers.
1534 if (locs->native_leaf_call()) {
1535 BlockCpuRegisters(kDartVolatileCpuRegs, pos, pos + 1);
1536 BlockFpuRegisters(kAbiVolatileFpuRegs, pos, pos + 1);
1537#if defined(TARGET_ARCH_ARM)
1538 // We do not yet have a way to say that we only want FPU registers that
1539 // overlap S registers.
1540 // Block all Q/D FPU registers above the 8/16 that have S registers in
1541 // VFPv3-D32.
1542 // This way we avoid ending up trying to do single-word operations on
1543 // registers that don't support it.
1544 BlockFpuRegisters(kFpuRegistersWithoutSOverlap, pos, pos + 1);
1545#endif
1546 }
1547
1548 // Block all allocatable registers for calls.
1549 if (locs->always_calls() && !locs->callee_safe_call()) {
1550 // Expected shape of live range:
1551 //
1552 // i i'
1553 // [--)
1554 //
1555 // The stack bitmap describes the position i.
1556 BlockCpuRegisters(kAllCpuRegistersList, pos, pos + 1);
1557
1558 BlockFpuRegisters(kAllFpuRegistersList, pos, pos + 1);
1559
1560#if defined(DEBUG)
1561 // Verify that temps, inputs and output were specified as fixed
1562 // locations. Every register is blocked now so attempt to
1563 // allocate will go on the stack.
1564 for (intptr_t j = 0; j < locs->temp_count(); j++) {
1565 ASSERT(!locs->temp(j).IsPairLocation());
1566 ASSERT(!locs->temp(j).IsUnallocated());
1567 }
1568
1569 for (intptr_t j = 0; j < locs->input_count(); j++) {
1570 if (locs->in(j).IsPairLocation()) {
1571 PairLocation* pair = locs->in_slot(j)->AsPairLocation();
1572 ASSERT(!pair->At(0).IsUnallocated() ||
1573 pair->At(0).policy() == Location::kAny ||
1574 pair->At(0).policy() == Location::kRequiresStack);
1575 ASSERT(!pair->At(1).IsUnallocated() ||
1576 pair->At(1).policy() == Location::kAny ||
1577 pair->At(1).policy() == Location::kRequiresStack);
1578 } else {
1579 ASSERT(!locs->in(j).IsUnallocated() ||
1580 locs->in(j).policy() == Location::kAny ||
1581 locs->in(j).policy() == Location::kRequiresStack);
1582 }
1583 }
1584
1585 if (locs->out(0).IsPairLocation()) {
1586 PairLocation* pair = locs->out_slot(0)->AsPairLocation();
1587 ASSERT(!pair->At(0).IsUnallocated());
1588 ASSERT(!pair->At(1).IsUnallocated());
1589 } else {
1590 ASSERT(!locs->out(0).IsUnallocated());
1591 }
1592#endif
1593 }
1594
1595 if (locs->can_call() && !locs->native_leaf_call()) {
1596 safepoints_.Add(current);
1597 }
1598
1599 if (def == nullptr) {
1600 ASSERT(locs->out(0).IsInvalid());
1601 return;
1602 }
1603
1604 if (locs->out(0).IsInvalid()) {
1605 ASSERT(def->vreg(0) < 0);
1606 return;
1607 }
1608
1609 ASSERT(locs->output_count() == 1);
1610 Location* out = locs->out_slot(0);
1611 if (out->IsPairLocation()) {
1612 ASSERT(def->HasPairRepresentation());
1613 PairLocation* pair = out->AsPairLocation();
1614 if (output_same_as_first_input) {
1615 ASSERT(locs->in_slot(0)->IsPairLocation());
1616 PairLocation* in_pair = locs->in_slot(0)->AsPairLocation();
1617 Definition* input = current->InputAt(0)->definition();
1618 ASSERT(input->HasPairRepresentation());
1619 // Each element of the pair is assigned it's own virtual register number
1620 // and is allocated its own LiveRange.
1621 ProcessOneOutput(block, pos, // BlockEntry, seq.
1622 pair->SlotAt(0), def, // (output) Location, Definition.
1623 def->vreg(0), // (output) virtual register.
1624 true, // output mapped to first input.
1625 in_pair->SlotAt(0), input, // (input) Location, Def.
1626 input->vreg(0), // (input) virtual register.
1627 interference_set);
1628 ProcessOneOutput(block, pos, pair->SlotAt(1), def, def->vreg(1), true,
1629 in_pair->SlotAt(1), input, input->vreg(1),
1630 interference_set);
1631 } else {
1632 // Each element of the pair is assigned it's own virtual register number
1633 // and is allocated its own LiveRange.
1634 ProcessOneOutput(block, pos, pair->SlotAt(0), def, def->vreg(0),
1635 false, // output is not mapped to first input.
1636 nullptr, nullptr, -1, // First input not needed.
1637 interference_set);
1638 ProcessOneOutput(block, pos, pair->SlotAt(1), def, def->vreg(1), false,
1639 nullptr, nullptr, -1, interference_set);
1640 }
1641 } else {
1642 if (output_same_as_first_input) {
1643 Location* in_ref = locs->in_slot(0);
1644 Definition* input = current->InputAt(0)->definition();
1645 ASSERT(!in_ref->IsPairLocation());
1646 ProcessOneOutput(block, pos, // BlockEntry, Instruction, seq.
1647 out, def, // (output) Location, Definition.
1648 def->vreg(0), // (output) virtual register.
1649 true, // output mapped to first input.
1650 in_ref, input, // (input) Location, Def.
1651 input->vreg(0), // (input) virtual register.
1652 interference_set);
1653 } else {
1654 ProcessOneOutput(block, pos, out, def, def->vreg(0),
1655 false, // output is not mapped to first input.
1656 nullptr, nullptr, -1, // First input not needed.
1657 interference_set);
1658 }
1659 }
1660}
1661
1663 intptr_t pos) {
1664 ASSERT(pos > 0);
1665 Instruction* prev = instr->previous();
1666 ParallelMoveInstr* move = prev->AsParallelMove();
1667 if ((move == nullptr) ||
1669 move = new ParallelMoveInstr();
1670 prev->LinkTo(move);
1671 move->LinkTo(instr);
1673 }
1674 return move;
1675}
1676
1678 intptr_t pos) {
1679 Instruction* next = instr->next();
1680 if (next->IsParallelMove() &&
1682 return next->AsParallelMove();
1683 }
1685}
1686
1687// Linearize the control flow graph. The chosen order will be used by the
1688// linear-scan register allocator. Number most instructions with a pair of
1689// numbers representing lifetime positions. Introduce explicit parallel
1690// move instructions in the predecessors of join nodes. The moves are used
1691// for phi resolution.
1692void FlowGraphAllocator::NumberInstructions() {
1693 intptr_t pos = 0;
1694
1695 for (auto block : block_order_) {
1696 instructions_.Add(block);
1697 block_entries_.Add(block);
1698 block->set_start_pos(pos);
1699 SetLifetimePosition(block, pos);
1700 pos += 2;
1701
1702 for (auto instr : block->instructions()) {
1703 // Do not assign numbers to parallel move instructions.
1704 if (instr->IsParallelMove()) continue;
1705
1706 instructions_.Add(instr);
1707 block_entries_.Add(block);
1708 SetLifetimePosition(instr, pos);
1709 pos += 2;
1710 }
1711 block->set_end_pos(pos);
1712 }
1713
1714 // Create parallel moves in join predecessors. This must be done after
1715 // all instructions are numbered.
1716 for (auto block : block_order_) {
1717 // For join entry predecessors create phi resolution moves if
1718 // necessary. They will be populated by the register allocator.
1719 JoinEntryInstr* join = block->AsJoinEntry();
1720 if (join != nullptr) {
1721 intptr_t move_count = 0;
1722 for (PhiIterator it(join); !it.Done(); it.Advance()) {
1723 move_count += it.Current()->HasPairRepresentation() ? 2 : 1;
1724 }
1725 for (intptr_t i = 0; i < block->PredecessorCount(); i++) {
1726 // Insert the move between the last two instructions of the
1727 // predecessor block (all such blocks have at least two instructions:
1728 // the block entry and goto instructions.)
1729 Instruction* last = block->PredecessorAt(i)->last_instruction();
1730 ASSERT(last->IsGoto());
1731
1732 ParallelMoveInstr* move = last->AsGoto()->GetParallelMove();
1733
1734 // Populate the ParallelMove with empty moves.
1735 for (intptr_t j = 0; j < move_count; j++) {
1736 move->AddMove(Location::NoLocation(), Location::NoLocation());
1737 }
1738 }
1739 }
1740 }
1741
1742 // Prepare some extra information for each loop.
1743 Zone* zone = flow_graph_.zone();
1744 const LoopHierarchy& loop_hierarchy = flow_graph_.loop_hierarchy();
1745 const intptr_t num_loops = loop_hierarchy.num_loops();
1746 for (intptr_t i = 0; i < num_loops; i++) {
1747 extra_loop_info_.Add(
1748 ComputeExtraLoopInfo(zone, loop_hierarchy.headers()[i]->loop_info()));
1749 }
1750}
1751
1752Instruction* FlowGraphAllocator::InstructionAt(intptr_t pos) const {
1753 return instructions_[pos / 2];
1754}
1755
1756BlockEntryInstr* FlowGraphAllocator::BlockEntryAt(intptr_t pos) const {
1757 return block_entries_[pos / 2];
1758}
1759
1760bool FlowGraphAllocator::IsBlockEntry(intptr_t pos) const {
1761 return IsInstructionStartPosition(pos) && InstructionAt(pos)->IsBlockEntry();
1762}
1763
1765 first_pending_use_interval_ = range->first_use_interval();
1766 first_register_use_ = range->first_use();
1767 first_register_beneficial_use_ = range->first_use();
1768 first_hinted_use_ = range->first_use();
1769}
1770
1771bool AllocationFinger::Advance(const intptr_t start) {
1772 UseInterval* a = first_pending_use_interval_;
1773 while (a != nullptr && a->end() <= start)
1774 a = a->next();
1775 first_pending_use_interval_ = a;
1776 return (first_pending_use_interval_ == nullptr);
1777}
1778
1780 UsePosition* use = first_hinted_use_;
1781
1782 while (use != nullptr) {
1783 if (use->HasHint()) return use->hint();
1784 use = use->next();
1785 }
1786
1787 return Location::NoLocation();
1788}
1789
1790static UsePosition* FirstUseAfter(UsePosition* use, intptr_t after) {
1791 while ((use != nullptr) && (use->pos() < after)) {
1792 use = use->next();
1793 }
1794 return use;
1795}
1796
1798 for (UsePosition* use = FirstUseAfter(first_register_use_, after);
1799 use != nullptr; use = use->next()) {
1800 Location* loc = use->location_slot();
1801 if (loc->IsUnallocated() &&
1802 ((loc->policy() == Location::kRequiresRegister) ||
1804 first_register_use_ = use;
1805 return use;
1806 }
1807 }
1808 return nullptr;
1809}
1810
1812 for (UsePosition* use = FirstUseAfter(first_register_beneficial_use_, after);
1813 use != nullptr; use = use->next()) {
1814 Location* loc = use->location_slot();
1815 if (loc->IsUnallocated() && loc->IsRegisterBeneficial()) {
1816 first_register_beneficial_use_ = use;
1817 return use;
1818 }
1819 }
1820 return nullptr;
1821}
1822
1824 if (IsInstructionEndPosition(after)) {
1825 // If after is a position at the end of the instruction disregard
1826 // any use occurring at it.
1827 after += 1;
1828 }
1829 return FirstRegisterUse(after);
1830}
1831
1832void AllocationFinger::UpdateAfterSplit(intptr_t first_use_after_split_pos) {
1833 if ((first_register_use_ != nullptr) &&
1834 (first_register_use_->pos() >= first_use_after_split_pos)) {
1835 first_register_use_ = nullptr;
1836 }
1837
1838 if ((first_register_beneficial_use_ != nullptr) &&
1839 (first_register_beneficial_use_->pos() >= first_use_after_split_pos)) {
1840 first_register_beneficial_use_ = nullptr;
1841 }
1842}
1843
1845 if (this->start() <= other->start()) {
1846 if (other->start() < this->end()) return other->start();
1847 } else if (this->start() < other->end()) {
1848 return this->start();
1849 }
1850 return kIllegalPosition;
1851}
1852
1854 while (a != nullptr && u != nullptr) {
1855 const intptr_t pos = a->Intersect(u);
1856 if (pos != kIllegalPosition) return pos;
1857
1858 if (a->start() < u->start()) {
1859 a = a->next();
1860 } else {
1861 u = u->next();
1862 }
1863 }
1864
1865 return kMaxPosition;
1866}
1867
1868template <typename PositionType>
1869PositionType* SplitListOfPositions(PositionType** head,
1870 intptr_t split_pos,
1871 bool split_at_start) {
1872 PositionType* last_before_split = nullptr;
1873 PositionType* pos = *head;
1874 if (split_at_start) {
1875 while ((pos != nullptr) && (pos->pos() < split_pos)) {
1876 last_before_split = pos;
1877 pos = pos->next();
1878 }
1879 } else {
1880 while ((pos != nullptr) && (pos->pos() <= split_pos)) {
1881 last_before_split = pos;
1882 pos = pos->next();
1883 }
1884 }
1885
1886 if (last_before_split == nullptr) {
1887 *head = nullptr;
1888 } else {
1889 last_before_split->set_next(nullptr);
1890 }
1891
1892 return pos;
1893}
1894
1895LiveRange* LiveRange::SplitAt(intptr_t split_pos) {
1896 if (Start() == split_pos) return this;
1897
1898 UseInterval* interval = finger_.first_pending_use_interval();
1899 if (interval == nullptr) {
1900 finger_.Initialize(this);
1901 interval = finger_.first_pending_use_interval();
1902 }
1903
1904 ASSERT(split_pos < End());
1905
1906 // Corner case. Split position can be inside the a lifetime hole or at its
1907 // end. We need to start over to find the previous interval.
1908 if (split_pos <= interval->start()) interval = first_use_interval_;
1909
1910 UseInterval* last_before_split = nullptr;
1911 while (interval->end() <= split_pos) {
1912 last_before_split = interval;
1913 interval = interval->next();
1914 }
1915
1916 const bool split_at_start = (interval->start() == split_pos);
1917
1918 UseInterval* first_after_split = interval;
1919 if (!split_at_start && interval->Contains(split_pos)) {
1920 first_after_split =
1921 new UseInterval(split_pos, interval->end(), interval->next());
1922 interval->end_ = split_pos;
1923 interval->next_ = first_after_split;
1924 last_before_split = interval;
1925 }
1926
1927 ASSERT(last_before_split != nullptr);
1928 ASSERT(last_before_split->next() == first_after_split);
1929 ASSERT(last_before_split->end() <= split_pos);
1930 ASSERT(split_pos <= first_after_split->start());
1931
1932 UsePosition* first_use_after_split =
1933 SplitListOfPositions(&uses_, split_pos, split_at_start);
1934
1935 SafepointPosition* first_safepoint_after_split =
1936 SplitListOfPositions(&first_safepoint_, split_pos, split_at_start);
1937
1938 UseInterval* last_use_interval = (last_before_split == last_use_interval_)
1939 ? first_after_split
1940 : last_use_interval_;
1941 next_sibling_ = new LiveRange(vreg(), representation(), first_use_after_split,
1942 first_after_split, last_use_interval,
1943 first_safepoint_after_split, next_sibling_);
1944
1945 TRACE_ALLOC(THR_Print(" split sibling [%" Pd ", %" Pd ")\n",
1946 next_sibling_->Start(), next_sibling_->End()));
1947
1948 last_use_interval_ = last_before_split;
1949 last_use_interval_->next_ = nullptr;
1950
1951 if (first_use_after_split != nullptr) {
1952 finger_.UpdateAfterSplit(first_use_after_split->pos());
1953 }
1954
1955 return next_sibling_;
1956}
1957
1958LiveRange* FlowGraphAllocator::SplitBetween(LiveRange* range,
1959 intptr_t from,
1960 intptr_t to) {
1961 TRACE_ALLOC(THR_Print("split v%" Pd " [%" Pd ", %" Pd ") between [%" Pd
1962 ", %" Pd ")\n",
1963 range->vreg(), range->Start(), range->End(), from, to));
1964
1965 intptr_t split_pos = kIllegalPosition;
1966
1967 BlockEntryInstr* split_block_entry = BlockEntryAt(to);
1968 ASSERT(split_block_entry == InstructionAt(to)->GetBlock());
1969
1970 if (from < GetLifetimePosition(split_block_entry)) {
1971 // Interval [from, to) spans multiple blocks.
1972
1973 // If the last block is inside a loop, prefer splitting at the outermost
1974 // loop's header that follows the definition. Note that, as illustrated
1975 // below, if the potential split S linearly appears inside a loop, even
1976 // though it technically does not belong to the natural loop, we still
1977 // prefer splitting at the header H. Splitting in the "middle" of the loop
1978 // would disconnect the prefix of the loop from any block X that follows,
1979 // increasing the chance of "disconnected" allocations.
1980 //
1981 // +--------------------+
1982 // v |
1983 // |loop| |loop|
1984 // . . . . . . . . . . . . . . . . . . . . .
1985 // def------------use -----------
1986 // ^ ^ ^
1987 // H S X
1988 LoopInfo* loop_info = split_block_entry->loop_info();
1989 if (loop_info == nullptr) {
1990 const LoopHierarchy& loop_hierarchy = flow_graph_.loop_hierarchy();
1991 const intptr_t num_loops = loop_hierarchy.num_loops();
1992 for (intptr_t i = 0; i < num_loops; i++) {
1993 if (extra_loop_info_[i]->start < to && to < extra_loop_info_[i]->end) {
1994 // Split loop found!
1995 loop_info = loop_hierarchy.headers()[i]->loop_info();
1996 break;
1997 }
1998 }
1999 }
2000 while ((loop_info != nullptr) &&
2001 (from < GetLifetimePosition(loop_info->header()))) {
2002 split_block_entry = loop_info->header();
2003 loop_info = loop_info->outer();
2004 TRACE_ALLOC(THR_Print(" move back to loop header B%" Pd " at %" Pd "\n",
2005 split_block_entry->block_id(),
2006 GetLifetimePosition(split_block_entry)));
2007 }
2008
2009 // Split at block's start.
2010 split_pos = GetLifetimePosition(split_block_entry);
2011 } else {
2012 // Interval [from, to) is contained inside a single block.
2013
2014 // Split at position corresponding to the end of the previous
2015 // instruction.
2016 split_pos = ToInstructionStart(to) - 1;
2017 }
2018
2019 ASSERT(split_pos != kIllegalPosition);
2020 ASSERT(from < split_pos);
2021
2022 return range->SplitAt(split_pos);
2023}
2024
2025void FlowGraphAllocator::SpillBetween(LiveRange* range,
2026 intptr_t from,
2027 intptr_t to) {
2028 ASSERT(from < to);
2029 TRACE_ALLOC(THR_Print("spill v%" Pd " [%" Pd ", %" Pd ") "
2030 "between [%" Pd ", %" Pd ")\n",
2031 range->vreg(), range->Start(), range->End(), from, to));
2032 LiveRange* tail = range->SplitAt(from);
2033
2034 if (tail->Start() < to) {
2035 // There is an intersection of tail and [from, to).
2036 LiveRange* tail_tail = SplitBetween(tail, tail->Start(), to);
2037 Spill(tail);
2038 AddToUnallocated(tail_tail);
2039 } else {
2040 // No intersection between tail and [from, to).
2041 AddToUnallocated(tail);
2042 }
2043}
2044
2045void FlowGraphAllocator::SpillAfter(LiveRange* range, intptr_t from) {
2046 TRACE_ALLOC(THR_Print("spill v%" Pd " [%" Pd ", %" Pd ") after %" Pd "\n",
2047 range->vreg(), range->Start(), range->End(), from));
2048
2049 // When spilling the value inside the loop check if this spill can
2050 // be moved outside.
2051 LoopInfo* loop_info = BlockEntryAt(from)->loop_info();
2052 if (loop_info != nullptr) {
2053 if ((range->Start() <= loop_info->header()->start_pos()) &&
2054 RangeHasOnlyUnconstrainedUsesInLoop(range, loop_info->id())) {
2055 ASSERT(loop_info->header()->start_pos() <= from);
2056 from = loop_info->header()->start_pos();
2058 THR_Print(" moved spill position to loop header %" Pd "\n", from));
2059 }
2060 }
2061
2062 LiveRange* tail = range->SplitAt(from);
2063 Spill(tail);
2064}
2065
2066void FlowGraphAllocator::AllocateSpillSlotFor(LiveRange* range) {
2067 ASSERT(range->spill_slot().IsInvalid());
2068
2069 // Compute range start and end.
2070 LiveRange* last_sibling = range;
2071 while (last_sibling->next_sibling() != nullptr) {
2072 last_sibling = last_sibling->next_sibling();
2073 }
2074
2075 const intptr_t start = range->Start();
2076 const intptr_t end = last_sibling->End();
2077
2078 // During fpu register allocation spill slot indices are computed in terms of
2079 // double (64bit) stack slots. We treat quad stack slot (128bit) as a
2080 // consecutive pair of two double spill slots.
2081 // Special care is taken to never allocate the same index to both
2082 // double and quad spill slots as it complicates disambiguation during
2083 // parallel move resolution.
2084 const bool need_quad = (register_kind_ == Location::kFpuRegister) &&
2085 ((range->representation() == kUnboxedFloat32x4) ||
2086 (range->representation() == kUnboxedInt32x4) ||
2087 (range->representation() == kUnboxedFloat64x2));
2088 const bool need_untagged = (register_kind_ == Location::kRegister) &&
2089 ((range->representation() == kUntagged));
2090
2091 // Search for a free spill slot among allocated: the value in it should be
2092 // dead and its type should match (e.g. it should not be a part of the quad if
2093 // we are allocating normal double slot).
2094 // For CPU registers we need to take reserved slots for try-catch into
2095 // account.
2096 intptr_t idx = register_kind_ == Location::kRegister
2097 ? flow_graph_.graph_entry()->fixed_slot_count()
2098 : 0;
2099 for (; idx < spill_slots_.length(); idx++) {
2100 if ((need_quad == quad_spill_slots_[idx]) &&
2101 (need_untagged == untagged_spill_slots_[idx]) &&
2102 (spill_slots_[idx] <= start)) {
2103 break;
2104 }
2105 }
2106
2107 while (idx > spill_slots_.length()) {
2108 spill_slots_.Add(kMaxPosition);
2109 quad_spill_slots_.Add(false);
2110 untagged_spill_slots_.Add(false);
2111 }
2112
2113 if (idx == spill_slots_.length()) {
2114 idx = spill_slots_.length();
2115
2116 // No free spill slot found. Allocate a new one.
2117 spill_slots_.Add(0);
2118 quad_spill_slots_.Add(need_quad);
2119 untagged_spill_slots_.Add(need_untagged);
2120 if (need_quad) { // Allocate two double stack slots if we need quad slot.
2121 spill_slots_.Add(0);
2122 quad_spill_slots_.Add(need_quad);
2123 untagged_spill_slots_.Add(need_untagged);
2124 }
2125 }
2126
2127 // Set spill slot expiration boundary to the live range's end.
2128 spill_slots_[idx] = end;
2129 if (need_quad) {
2130 ASSERT(quad_spill_slots_[idx] && quad_spill_slots_[idx + 1]);
2131 idx++; // Use the higher index it corresponds to the lower stack address.
2132 spill_slots_[idx] = end;
2133 } else {
2134 ASSERT(!quad_spill_slots_[idx]);
2135 }
2136
2137 // Assign spill slot to the range.
2138 if (RepresentationUtils::IsUnboxedInteger(range->representation()) ||
2139 range->representation() == kTagged ||
2140 range->representation() == kPairOfTagged ||
2141 range->representation() == kUntagged) {
2142 const intptr_t slot_index =
2144 range->set_spill_slot(Location::StackSlot(slot_index, FPREG));
2145 } else {
2146 // We use the index of the slot with the lowest address as an index for the
2147 // FPU register spill slot. In terms of indexes this relation is inverted:
2148 // so we have to take the highest index.
2149 const intptr_t slot_idx =
2151 -(cpu_spill_slot_count_ + idx * kDoubleSpillFactor +
2152 (kDoubleSpillFactor - 1)));
2153
2154 Location location;
2155 if ((range->representation() == kUnboxedFloat32x4) ||
2156 (range->representation() == kUnboxedInt32x4) ||
2157 (range->representation() == kUnboxedFloat64x2)) {
2158 ASSERT(need_quad);
2159 location = Location::QuadStackSlot(slot_idx, FPREG);
2160 } else {
2161 ASSERT(range->representation() == kUnboxedFloat ||
2162 range->representation() == kUnboxedDouble);
2163 location = Location::DoubleStackSlot(slot_idx, FPREG);
2164 }
2165 range->set_spill_slot(location);
2166 }
2167
2168 spilled_.Add(range);
2169}
2170
2171void FlowGraphAllocator::MarkAsObjectAtSafepoints(LiveRange* range) {
2172 Location spill_slot = range->spill_slot();
2173 intptr_t stack_index = spill_slot.stack_index();
2174 if (spill_slot.base_reg() == FPREG) {
2176 spill_slot.stack_index());
2177 }
2178 ASSERT(stack_index >= 0);
2179 while (range != nullptr) {
2180 for (SafepointPosition* safepoint = range->first_safepoint();
2181 safepoint != nullptr; safepoint = safepoint->next()) {
2182 // Mark the stack slot as having an object.
2183 safepoint->locs()->SetStackBit(stack_index);
2184 }
2185 range = range->next_sibling();
2186 }
2187}
2188
2189void FlowGraphAllocator::Spill(LiveRange* range) {
2190 LiveRange* parent = GetLiveRange(range->vreg());
2191 if (parent->spill_slot().IsInvalid()) {
2192 AllocateSpillSlotFor(parent);
2193 if (range->representation() == kTagged) {
2194 MarkAsObjectAtSafepoints(parent);
2195 }
2196 }
2197 range->set_assigned_location(parent->spill_slot());
2198 ConvertAllUses(range);
2199}
2200
2201void FlowGraphAllocator::AllocateSpillSlotForSuspendState() {
2202 if (flow_graph_.parsed_function().suspend_state_var() == nullptr) {
2203 return;
2204 }
2205
2206 spill_slots_.Add(kMaxPosition);
2207 quad_spill_slots_.Add(false);
2208 untagged_spill_slots_.Add(false);
2209
2210#if defined(DEBUG)
2211 const intptr_t stack_index =
2213 compiler::target::frame_layout.FrameSlotForVariable(
2214 flow_graph_.parsed_function().suspend_state_var()));
2215 ASSERT(stack_index == spill_slots_.length() - 1);
2216#endif
2217}
2218
2219void FlowGraphAllocator::UpdateStackmapsForSuspendState() {
2220 if (flow_graph_.parsed_function().suspend_state_var() == nullptr) {
2221 return;
2222 }
2223
2224 const intptr_t stack_index =
2226 compiler::target::frame_layout.FrameSlotForVariable(
2227 flow_graph_.parsed_function().suspend_state_var()));
2228 ASSERT(stack_index >= 0);
2229
2230 for (intptr_t i = 0, n = safepoints_.length(); i < n; ++i) {
2231 Instruction* safepoint_instr = safepoints_[i];
2232 safepoint_instr->locs()->SetStackBit(stack_index);
2233 }
2234}
2235
2236intptr_t FlowGraphAllocator::FirstIntersectionWithAllocated(
2237 intptr_t reg,
2238 LiveRange* unallocated) {
2239 intptr_t intersection = kMaxPosition;
2240 for (intptr_t i = 0; i < registers_[reg]->length(); i++) {
2241 LiveRange* allocated = (*registers_[reg])[i];
2242 if (allocated == nullptr) continue;
2243
2244 UseInterval* allocated_head =
2245 allocated->finger()->first_pending_use_interval();
2246 if (allocated_head->start() >= intersection) continue;
2247
2248 const intptr_t pos = FirstIntersection(
2249 unallocated->finger()->first_pending_use_interval(), allocated_head);
2250 if (pos < intersection) intersection = pos;
2251 }
2252 return intersection;
2253}
2254
2255void ReachingDefs::AddPhi(PhiInstr* phi) {
2256 if (phi->reaching_defs() == nullptr) {
2257 Zone* zone = flow_graph_.zone();
2258 phi->set_reaching_defs(new (zone) BitVector(zone, flow_graph_.max_vreg()));
2259
2260 // Compute initial set reaching defs set.
2261 bool depends_on_phi = false;
2262 for (intptr_t i = 0; i < phi->InputCount(); i++) {
2263 Definition* input = phi->InputAt(i)->definition();
2264 if (input->IsPhi()) {
2265 depends_on_phi = true;
2266 }
2267 phi->reaching_defs()->Add(input->vreg(0));
2268 if (phi->HasPairRepresentation()) {
2269 phi->reaching_defs()->Add(input->vreg(1));
2270 }
2271 }
2272
2273 // If this phi depends on another phi then we need fix point iteration.
2274 if (depends_on_phi) phis_.Add(phi);
2275 }
2276}
2277
2278void ReachingDefs::Compute() {
2279 // Transitively collect all phis that are used by the given phi.
2280 for (intptr_t i = 0; i < phis_.length(); i++) {
2281 PhiInstr* phi = phis_[i];
2282
2283 // Add all phis that affect this phi to the list.
2284 for (intptr_t i = 0; i < phi->InputCount(); i++) {
2285 PhiInstr* input_phi = phi->InputAt(i)->definition()->AsPhi();
2286 if (input_phi != nullptr) {
2287 AddPhi(input_phi);
2288 }
2289 }
2290 }
2291
2292 // Propagate values until fix point is reached.
2293 bool changed;
2294 do {
2295 changed = false;
2296 for (intptr_t i = 0; i < phis_.length(); i++) {
2297 PhiInstr* phi = phis_[i];
2298 for (intptr_t i = 0; i < phi->InputCount(); i++) {
2299 PhiInstr* input_phi = phi->InputAt(i)->definition()->AsPhi();
2300 if (input_phi != nullptr) {
2301 if (phi->reaching_defs()->AddAll(input_phi->reaching_defs())) {
2302 changed = true;
2303 }
2304 }
2305 }
2306 }
2307 } while (changed);
2308
2309 phis_.Clear();
2310}
2311
2313 if (phi->reaching_defs() == nullptr) {
2314 ASSERT(phis_.is_empty());
2315 AddPhi(phi);
2316 Compute();
2317 }
2318 return phi->reaching_defs();
2319}
2320
2321bool FlowGraphAllocator::AllocateFreeRegister(LiveRange* unallocated) {
2322 intptr_t candidate = kNoRegister;
2323 intptr_t free_until = 0;
2324
2325 // If hint is available try hint first.
2326 // TODO(vegorov): ensure that phis are hinted on the back edge.
2327 Location hint = unallocated->finger()->FirstHint();
2328
2329 // Handle special case for incoming register values (see
2330 // ProcessInitialDefinition): we implement them differently from fixed outputs
2331 // which use prefilled ParallelMove, but that means there is not hinted
2332 // use created and as a result we produce worse code by assigning a random
2333 // free register.
2334 if (!hint.IsMachineRegister() && unallocated->vreg() >= 0) {
2335 auto* parent_range = GetLiveRange(unallocated->vreg());
2336 if (parent_range->End() == unallocated->Start() &&
2337 !IsBlockEntry(unallocated->Start()) &&
2338 parent_range->assigned_location().IsMachineRegister()) {
2339 hint = parent_range->assigned_location();
2340 }
2341 }
2342
2343 if (hint.IsMachineRegister()) {
2344 if (!blocked_registers_[hint.register_code()]) {
2345 free_until =
2346 FirstIntersectionWithAllocated(hint.register_code(), unallocated);
2347 candidate = hint.register_code();
2348 }
2349
2350 TRACE_ALLOC(THR_Print("found hint %s for v%" Pd ": free until %" Pd "\n",
2351 hint.Name(), unallocated->vreg(), free_until));
2352 } else {
2353 for (intptr_t i = 0; i < NumberOfRegisters(); ++i) {
2354 intptr_t reg = (i + kRegisterAllocationBias) % NumberOfRegisters();
2355 if (!blocked_registers_[reg] && (registers_[reg]->length() == 0)) {
2356 candidate = reg;
2357 free_until = kMaxPosition;
2358 break;
2359 }
2360 }
2361 }
2362
2363 ASSERT(0 <= kMaxPosition);
2364 if (free_until != kMaxPosition) {
2365 for (intptr_t i = 0; i < NumberOfRegisters(); ++i) {
2366 intptr_t reg = (i + kRegisterAllocationBias) % NumberOfRegisters();
2367 if (blocked_registers_[reg] || (reg == candidate)) continue;
2368 const intptr_t intersection =
2369 FirstIntersectionWithAllocated(reg, unallocated);
2370 if (intersection > free_until) {
2371 candidate = reg;
2372 free_until = intersection;
2373 if (free_until == kMaxPosition) break;
2374 }
2375 }
2376 }
2377
2378 // All registers are blocked by active ranges.
2379 if (free_until <= unallocated->Start()) return false;
2380
2381 // We have a very good candidate (either hinted to us or completely free).
2382 // If we are in a loop try to reduce number of moves on the back edge by
2383 // searching for a candidate that does not interfere with phis on the back
2384 // edge.
2385 LoopInfo* loop_info = BlockEntryAt(unallocated->Start())->loop_info();
2386 if ((unallocated->vreg() >= 0) && (loop_info != nullptr) &&
2387 (free_until >= extra_loop_info_[loop_info->id()]->end) &&
2388 extra_loop_info_[loop_info->id()]->backedge_interference->Contains(
2389 unallocated->vreg())) {
2390 GrowableArray<bool> used_on_backedge(number_of_registers_);
2391 for (intptr_t i = 0; i < number_of_registers_; i++) {
2392 used_on_backedge.Add(false);
2393 }
2394
2395 for (PhiIterator it(loop_info->header()->AsJoinEntry()); !it.Done();
2396 it.Advance()) {
2397 PhiInstr* phi = it.Current();
2398 ASSERT(phi->is_alive());
2399 const intptr_t phi_vreg = phi->vreg(0);
2400 LiveRange* range = GetLiveRange(phi_vreg);
2401 if (range->assigned_location().kind() == register_kind_) {
2402 const intptr_t reg = range->assigned_location().register_code();
2403 if (!reaching_defs_.Get(phi)->Contains(unallocated->vreg())) {
2404 used_on_backedge[reg] = true;
2405 }
2406 }
2407 if (phi->HasPairRepresentation()) {
2408 const intptr_t second_phi_vreg = phi->vreg(1);
2409 LiveRange* second_range = GetLiveRange(second_phi_vreg);
2410 if (second_range->assigned_location().kind() == register_kind_) {
2411 const intptr_t reg =
2412 second_range->assigned_location().register_code();
2413 if (!reaching_defs_.Get(phi)->Contains(unallocated->vreg())) {
2414 used_on_backedge[reg] = true;
2415 }
2416 }
2417 }
2418 }
2419
2420 if (used_on_backedge[candidate]) {
2421 TRACE_ALLOC(THR_Print("considering %s for v%" Pd
2422 ": has interference on the back edge"
2423 " {loop [%" Pd ", %" Pd ")}\n",
2424 MakeRegisterLocation(candidate).Name(),
2425 unallocated->vreg(),
2426 extra_loop_info_[loop_info->id()]->start,
2427 extra_loop_info_[loop_info->id()]->end));
2428 for (intptr_t i = 0; i < NumberOfRegisters(); ++i) {
2429 intptr_t reg = (i + kRegisterAllocationBias) % NumberOfRegisters();
2430 if (blocked_registers_[reg] || (reg == candidate) ||
2431 used_on_backedge[reg]) {
2432 continue;
2433 }
2434
2435 const intptr_t intersection =
2436 FirstIntersectionWithAllocated(reg, unallocated);
2437 if (intersection >= free_until) {
2438 candidate = reg;
2439 free_until = intersection;
2441 "found %s for v%" Pd " with no interference on the back edge\n",
2442 MakeRegisterLocation(candidate).Name(), candidate));
2443 break;
2444 }
2445 }
2446 }
2447 }
2448
2449 if (free_until != kMaxPosition) {
2450 // There was an intersection. Split unallocated.
2451 TRACE_ALLOC(THR_Print(" splitting at %" Pd "\n", free_until));
2452 LiveRange* tail = unallocated->SplitAt(free_until);
2453 AddToUnallocated(tail);
2454
2455 // If unallocated represents a constant value and does not have
2456 // any uses then avoid using a register for it.
2457 if (unallocated->first_use() == nullptr) {
2458 if (unallocated->vreg() >= 0) {
2459 LiveRange* parent = GetLiveRange(unallocated->vreg());
2460 if (parent->spill_slot().IsConstant()) {
2461 Spill(unallocated);
2462 return true;
2463 }
2464 }
2465 }
2466 }
2467
2468 TRACE_ALLOC(THR_Print(" assigning free register "));
2469 TRACE_ALLOC(MakeRegisterLocation(candidate).Print());
2470 TRACE_ALLOC(THR_Print(" to v%" Pd "\n", unallocated->vreg()));
2471
2472 registers_[candidate]->Add(unallocated);
2473 unallocated->set_assigned_location(MakeRegisterLocation(candidate));
2474 return true;
2475}
2476
2477bool FlowGraphAllocator::RangeHasOnlyUnconstrainedUsesInLoop(LiveRange* range,
2478 intptr_t loop_id) {
2479 if (range->vreg() >= 0) {
2480 LiveRange* parent = GetLiveRange(range->vreg());
2481 return parent->HasOnlyUnconstrainedUsesInLoop(loop_id);
2482 }
2483 return false;
2484}
2485
2486bool FlowGraphAllocator::IsCheapToEvictRegisterInLoop(LoopInfo* loop_info,
2487 intptr_t reg) {
2488 const intptr_t loop_start = extra_loop_info_[loop_info->id()]->start;
2489 const intptr_t loop_end = extra_loop_info_[loop_info->id()]->end;
2490
2491 for (intptr_t i = 0; i < registers_[reg]->length(); i++) {
2492 LiveRange* allocated = (*registers_[reg])[i];
2493 UseInterval* interval = allocated->finger()->first_pending_use_interval();
2494 if (interval->Contains(loop_start)) {
2495 if (!RangeHasOnlyUnconstrainedUsesInLoop(allocated, loop_info->id())) {
2496 return false;
2497 }
2498 } else if (interval->start() < loop_end) {
2499 return false;
2500 }
2501 }
2502
2503 return true;
2504}
2505
2506bool FlowGraphAllocator::HasCheapEvictionCandidate(LiveRange* phi_range) {
2507 ASSERT(phi_range->is_loop_phi());
2508
2509 BlockEntryInstr* header = BlockEntryAt(phi_range->Start());
2510
2511 ASSERT(header->IsLoopHeader());
2512 ASSERT(phi_range->Start() == header->start_pos());
2513
2514 for (intptr_t reg = 0; reg < NumberOfRegisters(); ++reg) {
2515 if (blocked_registers_[reg]) continue;
2516 if (IsCheapToEvictRegisterInLoop(header->loop_info(), reg)) {
2517 return true;
2518 }
2519 }
2520
2521 return false;
2522}
2523
2524void FlowGraphAllocator::AllocateAnyRegister(LiveRange* unallocated) {
2525 // If a loop phi has no register uses we might still want to allocate it
2526 // to the register to reduce amount of memory moves on the back edge.
2527 // This is possible if there is a register blocked by a range that can be
2528 // cheaply evicted i.e. it has no register beneficial uses inside the
2529 // loop.
2530 UsePosition* register_use =
2531 unallocated->finger()->FirstRegisterUse(unallocated->Start());
2532 if ((register_use == nullptr) &&
2533 !(unallocated->is_loop_phi() && HasCheapEvictionCandidate(unallocated))) {
2534 Spill(unallocated);
2535 return;
2536 }
2537
2538 intptr_t candidate = kNoRegister;
2539 intptr_t free_until = 0;
2540 intptr_t blocked_at = kMaxPosition;
2541
2542 for (int i = 0; i < NumberOfRegisters(); ++i) {
2543 int reg = (i + kRegisterAllocationBias) % NumberOfRegisters();
2544 if (blocked_registers_[reg]) continue;
2545 if (UpdateFreeUntil(reg, unallocated, &free_until, &blocked_at)) {
2546 candidate = reg;
2547 }
2548 }
2549
2550 const intptr_t register_use_pos =
2551 (register_use != nullptr) ? register_use->pos() : unallocated->Start();
2552 if (free_until < register_use_pos) {
2553 // Can't acquire free register. Spill until we really need one.
2554 ASSERT(unallocated->Start() < ToInstructionStart(register_use_pos));
2555 SpillBetween(unallocated, unallocated->Start(), register_use->pos());
2556 return;
2557 }
2558
2559 ASSERT(candidate != kNoRegister);
2560
2561 TRACE_ALLOC(THR_Print("assigning blocked register "));
2562 TRACE_ALLOC(MakeRegisterLocation(candidate).Print());
2563 TRACE_ALLOC(THR_Print(" to live range v%" Pd " until %" Pd "\n",
2564 unallocated->vreg(), blocked_at));
2565
2566 if (blocked_at < unallocated->End()) {
2567 // Register is blocked before the end of the live range. Split the range
2568 // at latest at blocked_at position.
2569 LiveRange* tail =
2570 SplitBetween(unallocated, unallocated->Start(), blocked_at + 1);
2571 AddToUnallocated(tail);
2572 }
2573
2574 AssignNonFreeRegister(unallocated, candidate);
2575}
2576
2577bool FlowGraphAllocator::UpdateFreeUntil(intptr_t reg,
2578 LiveRange* unallocated,
2579 intptr_t* cur_free_until,
2580 intptr_t* cur_blocked_at) {
2581 intptr_t free_until = kMaxPosition;
2582 intptr_t blocked_at = kMaxPosition;
2583 const intptr_t start = unallocated->Start();
2584
2585 for (intptr_t i = 0; i < registers_[reg]->length(); i++) {
2586 LiveRange* allocated = (*registers_[reg])[i];
2587
2588 UseInterval* first_pending_use_interval =
2589 allocated->finger()->first_pending_use_interval();
2590 if (first_pending_use_interval->Contains(start)) {
2591 // This is an active interval.
2592 if (allocated->vreg() < 0) {
2593 // This register blocked by an interval that
2594 // can't be spilled.
2595 return false;
2596 }
2597
2598 UsePosition* use = allocated->finger()->FirstInterferingUse(start);
2599 if ((use != nullptr) && ((ToInstructionStart(use->pos()) - start) <= 1)) {
2600 // This register is blocked by interval that is used
2601 // as register in the current instruction and can't
2602 // be spilled.
2603 return false;
2604 }
2605
2606 const intptr_t use_pos = (use != nullptr) ? use->pos() : allocated->End();
2607
2608 if (use_pos < free_until) free_until = use_pos;
2609 } else {
2610 // This is inactive interval.
2611 const intptr_t intersection = FirstIntersection(
2612 first_pending_use_interval, unallocated->first_use_interval());
2613 if (intersection != kMaxPosition) {
2614 if (intersection < free_until) free_until = intersection;
2615 if (allocated->vreg() == kNoVirtualRegister) blocked_at = intersection;
2616 }
2617 }
2618
2619 if (free_until <= *cur_free_until) {
2620 return false;
2621 }
2622 }
2623
2624 ASSERT(free_until > *cur_free_until);
2625 *cur_free_until = free_until;
2626 *cur_blocked_at = blocked_at;
2627 return true;
2628}
2629
2630void FlowGraphAllocator::RemoveEvicted(intptr_t reg, intptr_t first_evicted) {
2631 intptr_t to = first_evicted;
2632 intptr_t from = first_evicted + 1;
2633 while (from < registers_[reg]->length()) {
2634 LiveRange* allocated = (*registers_[reg])[from++];
2635 if (allocated != nullptr) (*registers_[reg])[to++] = allocated;
2636 }
2637 registers_[reg]->TruncateTo(to);
2638}
2639
2640void FlowGraphAllocator::AssignNonFreeRegister(LiveRange* unallocated,
2641 intptr_t reg) {
2642 intptr_t first_evicted = -1;
2643 for (intptr_t i = registers_[reg]->length() - 1; i >= 0; i--) {
2644 LiveRange* allocated = (*registers_[reg])[i];
2645 if (allocated->vreg() < 0) continue; // Can't be evicted.
2646 if (EvictIntersection(allocated, unallocated)) {
2647 // If allocated was not spilled convert all pending uses.
2648 if (allocated->assigned_location().IsMachineRegister()) {
2649 ASSERT(allocated->End() <= unallocated->Start());
2650 ConvertAllUses(allocated);
2651 }
2652 (*registers_[reg])[i] = nullptr;
2653 first_evicted = i;
2654 }
2655 }
2656
2657 // Remove evicted ranges from the array.
2658 if (first_evicted != -1) RemoveEvicted(reg, first_evicted);
2659
2660 registers_[reg]->Add(unallocated);
2661 unallocated->set_assigned_location(MakeRegisterLocation(reg));
2662}
2663
2664bool FlowGraphAllocator::EvictIntersection(LiveRange* allocated,
2665 LiveRange* unallocated) {
2666 UseInterval* first_unallocated =
2667 unallocated->finger()->first_pending_use_interval();
2668 const intptr_t intersection = FirstIntersection(
2669 allocated->finger()->first_pending_use_interval(), first_unallocated);
2670 if (intersection == kMaxPosition) return false;
2671
2672 const intptr_t spill_position = first_unallocated->start();
2673 UsePosition* use = allocated->finger()->FirstInterferingUse(spill_position);
2674 if (use == nullptr) {
2675 // No register uses after this point.
2676 SpillAfter(allocated, spill_position);
2677 } else {
2678 const intptr_t restore_position =
2679 (spill_position < intersection) ? MinPosition(intersection, use->pos())
2680 : use->pos();
2681
2682 SpillBetween(allocated, spill_position, restore_position);
2683 }
2684
2685 return true;
2686}
2687
2688MoveOperands* FlowGraphAllocator::AddMoveAt(intptr_t pos,
2689 Location to,
2690 Location from) {
2691 ASSERT(!IsBlockEntry(pos));
2692
2693 // Now that the GraphEntry (B0) does no longer have any parameter instructions
2694 // in it so we should not attempt to add parallel moves to it.
2695 ASSERT(pos >= kNormalEntryPos);
2696
2697 ParallelMoveInstr* parallel_move = nullptr;
2698 Instruction* instr = InstructionAt(pos);
2699 if (auto entry = instr->AsFunctionEntry()) {
2700 // Parallel moves added to the FunctionEntry will be added after the block
2701 // entry.
2702 parallel_move = CreateParallelMoveAfter(entry, pos);
2703 } else if (IsInstructionStartPosition(pos)) {
2704 parallel_move = CreateParallelMoveBefore(instr, pos);
2705 } else {
2706 parallel_move = CreateParallelMoveAfter(instr, pos);
2707 }
2708
2709 return parallel_move->AddMove(to, from);
2710}
2711
2712void FlowGraphAllocator::ConvertUseTo(UsePosition* use, Location loc) {
2713 ASSERT(!loc.IsPairLocation());
2714 ASSERT(use->location_slot() != nullptr);
2715 Location* slot = use->location_slot();
2716 TRACE_ALLOC(THR_Print(" use at %" Pd " converted to ", use->pos()));
2717 TRACE_ALLOC(loc.Print());
2718 if (loc.IsConstant()) {
2719 TRACE_ALLOC(THR_Print(" %s", loc.constant().ToCString()));
2720 }
2721 TRACE_ALLOC(THR_Print("\n"));
2722 *slot = loc;
2723}
2724
2725void FlowGraphAllocator::ConvertAllUses(LiveRange* range) {
2726 if (range->vreg() == kNoVirtualRegister) return;
2727
2728 const Location loc = range->assigned_location();
2729 ASSERT(!loc.IsInvalid());
2730
2731 TRACE_ALLOC(THR_Print("range [%" Pd ", %" Pd ") "
2732 "for v%" Pd " has been allocated to ",
2733 range->Start(), range->End(), range->vreg()));
2734 TRACE_ALLOC(loc.Print());
2735 if (loc.IsConstant()) {
2736 TRACE_ALLOC(THR_Print(" %s", loc.constant().ToCString()));
2737 }
2738 TRACE_ALLOC(THR_Print(":\n"));
2739
2740 for (UsePosition* use = range->first_use(); use != nullptr;
2741 use = use->next()) {
2742 ConvertUseTo(use, loc);
2743 }
2744
2745 // Add live registers at all safepoints for instructions with slow-path
2746 // code.
2747 if (loc.IsMachineRegister()) {
2748 for (SafepointPosition* safepoint = range->first_safepoint();
2749 safepoint != nullptr; safepoint = safepoint->next()) {
2750 if (!safepoint->locs()->always_calls()) {
2751 ASSERT(safepoint->locs()->can_call());
2752 safepoint->locs()->live_registers()->Add(loc, range->representation());
2753 }
2754 }
2755 }
2756}
2757
2758void FlowGraphAllocator::AdvanceActiveIntervals(const intptr_t start) {
2759 for (intptr_t i = 0; i < NumberOfRegisters(); ++i) {
2760 intptr_t reg = (i + kRegisterAllocationBias) % NumberOfRegisters();
2761 if (registers_[reg]->is_empty()) continue;
2762
2763 intptr_t first_evicted = -1;
2764 for (intptr_t i = registers_[reg]->length() - 1; i >= 0; i--) {
2765 LiveRange* range = (*registers_[reg])[i];
2766 if (range->finger()->Advance(start)) {
2767 ConvertAllUses(range);
2768 (*registers_[reg])[i] = nullptr;
2769 first_evicted = i;
2770 }
2771 }
2772
2773 if (first_evicted != -1) RemoveEvicted(reg, first_evicted);
2774 }
2775}
2776
2777bool LiveRange::Contains(intptr_t pos) const {
2778 if (!CanCover(pos)) return false;
2779
2780 for (UseInterval* interval = first_use_interval_; interval != nullptr;
2781 interval = interval->next()) {
2782 if (interval->Contains(pos)) {
2783 return true;
2784 }
2785 }
2786
2787 return false;
2788}
2789
2790bool FlowGraphAllocator::IsLiveAfterCatchEntry(
2791 CatchBlockEntryInstr* catch_entry,
2792 ParameterInstr* param) {
2793 ASSERT(param->GetBlock() == catch_entry);
2794 auto* raw_exception_var = catch_entry->raw_exception_var();
2795 if (raw_exception_var != nullptr &&
2796 param->env_index() == flow_graph_.EnvIndex(raw_exception_var)) {
2797 return true;
2798 }
2799 auto* raw_stacktrace_var = catch_entry->raw_stacktrace_var();
2800 if (raw_stacktrace_var != nullptr &&
2801 param->env_index() == flow_graph_.EnvIndex(raw_stacktrace_var)) {
2802 return true;
2803 }
2804 return false;
2805}
2806
2807void FlowGraphAllocator::AssignSafepoints(Definition* defn, LiveRange* range) {
2808 for (intptr_t i = safepoints_.length() - 1; i >= 0; i--) {
2809 Instruction* safepoint_instr = safepoints_[i];
2810 if (safepoint_instr == defn) {
2811 // The value is not live until after the definition is fully executed,
2812 // don't assign the safepoint inside the definition itself to
2813 // definition's liverange.
2814 continue;
2815 }
2816 // Exception and stack trace parameters of CatchBlockEntry are live
2817 // only after catch block entry. Their spill slots should not be scanned
2818 // if GC occurs during a safepoint with a catch block entry PC
2819 // (before control is transferred to the catch entry).
2820 if (auto* catch_entry = safepoint_instr->AsCatchBlockEntry()) {
2821 if (auto* param = defn->AsParameter()) {
2822 if ((param->GetBlock() == catch_entry) &&
2823 IsLiveAfterCatchEntry(catch_entry, param)) {
2824 continue;
2825 }
2826 }
2827 }
2828 const intptr_t pos = GetLifetimePosition(safepoint_instr);
2829 if (range->End() <= pos) break;
2830
2831 if (range->Contains(pos)) {
2832 range->AddSafepoint(pos, safepoint_instr->locs());
2833 }
2834 }
2835}
2836
2838 // TODO(vegorov): consider first hint position when ordering live ranges.
2839 return a->Start() <= b->Start();
2840}
2841
2843 LiveRange* range) {
2844 range->finger()->Initialize(range);
2845
2846 if (list->is_empty()) {
2847 list->Add(range);
2848 return;
2849 }
2850
2851 for (intptr_t i = list->length() - 1; i >= 0; i--) {
2852 if (ShouldBeAllocatedBefore(range, (*list)[i])) {
2853 list->InsertAt(i + 1, range);
2854 return;
2855 }
2856 }
2857 list->InsertAt(0, range);
2858}
2859
2860void FlowGraphAllocator::AddToUnallocated(LiveRange* range) {
2861 AddToSortedListOfRanges(&unallocated_, range);
2862}
2863
2864void FlowGraphAllocator::CompleteRange(LiveRange* range, Location::Kind kind) {
2865 switch (kind) {
2867 AddToSortedListOfRanges(&unallocated_cpu_, range);
2868 break;
2869
2871 AddToSortedListOfRanges(&unallocated_fpu_, range);
2872 break;
2873
2874 default:
2875 UNREACHABLE();
2876 }
2877}
2878
2879#if defined(DEBUG)
2880bool FlowGraphAllocator::UnallocatedIsSorted() {
2881 for (intptr_t i = unallocated_.length() - 1; i >= 1; i--) {
2882 LiveRange* a = unallocated_[i];
2883 LiveRange* b = unallocated_[i - 1];
2884 if (!ShouldBeAllocatedBefore(a, b)) {
2885 a->Print();
2886 b->Print();
2887 UNREACHABLE();
2888 return false;
2889 }
2890 }
2891 return true;
2892}
2893#endif
2894
2895void FlowGraphAllocator::PrepareForAllocation(
2896 Location::Kind register_kind,
2897 intptr_t number_of_registers,
2898 const GrowableArray<LiveRange*>& unallocated,
2899 LiveRange** blocking_ranges,
2900 bool* blocked_registers) {
2901 register_kind_ = register_kind;
2902 number_of_registers_ = number_of_registers;
2903
2904 blocked_registers_.Clear();
2905 registers_.Clear();
2906 for (intptr_t i = 0; i < number_of_registers_; i++) {
2907 blocked_registers_.Add(false);
2908 registers_.Add(new ZoneGrowableArray<LiveRange*>);
2909 }
2910 ASSERT(unallocated_.is_empty());
2911 unallocated_.AddArray(unallocated);
2912
2913 for (intptr_t i = 0; i < NumberOfRegisters(); ++i) {
2914 intptr_t reg = (i + kRegisterAllocationBias) % NumberOfRegisters();
2915 blocked_registers_[reg] = blocked_registers[reg];
2916 ASSERT(registers_[reg]->is_empty());
2917
2918 LiveRange* range = blocking_ranges[reg];
2919 if (range != nullptr) {
2920 range->finger()->Initialize(range);
2921 registers_[reg]->Add(range);
2922 }
2923 }
2924}
2925
2926void FlowGraphAllocator::AllocateUnallocatedRanges() {
2927#if defined(DEBUG)
2928 ASSERT(UnallocatedIsSorted());
2929#endif
2930
2931 while (!unallocated_.is_empty()) {
2932 LiveRange* range = unallocated_.RemoveLast();
2933 const intptr_t start = range->Start();
2934 TRACE_ALLOC(THR_Print("Processing live range for v%" Pd " "
2935 "starting at %" Pd "\n",
2936 range->vreg(), start));
2937
2938 // TODO(vegorov): eagerly spill liveranges without register uses.
2939 AdvanceActiveIntervals(start);
2940
2941 if (!AllocateFreeRegister(range)) {
2942 if (intrinsic_mode_) {
2943 // No spilling when compiling intrinsics.
2944 // TODO(fschneider): Handle spilling in intrinsics. For now, the
2945 // IR has to be built so that there are enough free registers.
2946 UNREACHABLE();
2947 }
2948 AllocateAnyRegister(range);
2949 }
2950 }
2951
2952 // All allocation decisions were done.
2953 ASSERT(unallocated_.is_empty());
2954
2955 // Finish allocation.
2956 AdvanceActiveIntervals(kMaxPosition);
2957 TRACE_ALLOC(THR_Print("Allocation completed\n"));
2958}
2959
2960bool FlowGraphAllocator::TargetLocationIsSpillSlot(LiveRange* range,
2961 Location target) {
2962 return GetLiveRange(range->vreg())->spill_slot().Equals(target);
2963}
2964
2965static LiveRange* FindCover(LiveRange* parent, intptr_t pos) {
2966 for (LiveRange* range = parent; range != nullptr;
2967 range = range->next_sibling()) {
2968 if (range->CanCover(pos)) {
2969 return range;
2970 }
2971 }
2972 UNREACHABLE();
2973 return nullptr;
2974}
2975
2977 for (intptr_t j = 1; j < locs.length(); j++) {
2978 if (!locs[j].Equals(locs[0])) {
2979 return false;
2980 }
2981 }
2982 return true;
2983}
2984
2985// Emit move on the edge from |pred| to |succ|.
2987 BlockEntryInstr* pred,
2988 const MoveOperands& move) {
2989 Instruction* last = pred->last_instruction();
2990 if (last->IsGoto() && !pred->IsGraphEntry()) {
2991 ASSERT(last->SuccessorCount() == 1);
2992 last->AsGoto()->GetParallelMove()->AddMove(move.dest(), move.src());
2993 } else {
2994 succ->GetParallelMove()->AddMove(move.dest(), move.src());
2995 }
2996}
2997
2998void FlowGraphAllocator::ResolveControlFlow() {
2999 // Resolve linear control flow between touching split siblings
3000 // inside basic blocks.
3001 for (intptr_t vreg = 0; vreg < live_ranges_.length(); vreg++) {
3002 LiveRange* range = live_ranges_[vreg];
3003 if (range == nullptr) continue;
3004
3005 while (range->next_sibling() != nullptr) {
3006 LiveRange* sibling = range->next_sibling();
3007 TRACE_ALLOC(THR_Print("connecting [%" Pd ", %" Pd ") [", range->Start(),
3008 range->End()));
3009 TRACE_ALLOC(range->assigned_location().Print());
3010 TRACE_ALLOC(THR_Print("] to [%" Pd ", %" Pd ") [", sibling->Start(),
3011 sibling->End()));
3012 TRACE_ALLOC(sibling->assigned_location().Print());
3013 TRACE_ALLOC(THR_Print("]\n"));
3014 if ((range->End() == sibling->Start()) &&
3015 !TargetLocationIsSpillSlot(range, sibling->assigned_location()) &&
3016 !range->assigned_location().Equals(sibling->assigned_location()) &&
3017 !IsBlockEntry(range->End())) {
3018 AddMoveAt(sibling->Start(), sibling->assigned_location(),
3019 range->assigned_location());
3020 }
3021 range = sibling;
3022 }
3023 }
3024
3025 // Resolve non-linear control flow across branches.
3026 // At joins we attempt to sink duplicated moves from predecessors into join
3027 // itself as long as their source is not blocked by other moves.
3028 // Moves which are candidates to sinking are collected in the |pending|
3029 // array and we later compute which one of them we can emit (|can_emit|)
3030 // at the join itself.
3031 GrowableArray<Location> src_locs(2);
3032 GrowableArray<MoveOperands> pending(10);
3033 BitVector* can_emit = new BitVector(flow_graph_.zone(), 10);
3034 for (intptr_t i = 1; i < block_order_.length(); i++) {
3035 BlockEntryInstr* block = block_order_[i];
3036 BitVector* live = liveness_.GetLiveInSet(block);
3037 for (BitVector::Iterator it(live); !it.Done(); it.Advance()) {
3038 LiveRange* range = GetLiveRange(it.Current());
3039 if (range->next_sibling() == nullptr) {
3040 // Nothing to connect. The whole range was allocated to the same
3041 // location.
3043 THR_Print("range v%" Pd " has no siblings\n", range->vreg()));
3044 continue;
3045 }
3046
3047 LiveRange* dst_cover = FindCover(range, block->start_pos());
3048 Location dst = dst_cover->assigned_location();
3049
3050 TRACE_ALLOC(THR_Print("range v%" Pd
3051 " is allocated to %s on entry to B%" Pd
3052 " covered by [%" Pd ", %" Pd ")\n",
3053 range->vreg(), dst.ToCString(), block->block_id(),
3054 dst_cover->Start(), dst_cover->End()));
3055
3056 if (TargetLocationIsSpillSlot(range, dst)) {
3057 // Values are eagerly spilled. Spill slot already contains appropriate
3058 // value.
3060 THR_Print(" [no resolution necessary - range is spilled]\n"));
3061 continue;
3062 }
3063
3064 src_locs.Clear();
3065 for (intptr_t j = 0; j < block->PredecessorCount(); j++) {
3066 BlockEntryInstr* pred = block->PredecessorAt(j);
3067 LiveRange* src_cover = FindCover(range, pred->end_pos() - 1);
3068 Location src = src_cover->assigned_location();
3069 src_locs.Add(src);
3070
3071 TRACE_ALLOC(THR_Print("| incoming value in %s on exit from B%" Pd
3072 " covered by [%" Pd ", %" Pd ")\n",
3073 src.ToCString(), pred->block_id(),
3074 src_cover->Start(), src_cover->End()));
3075 }
3076
3077 // Check if all source locations are the same for the range. Then
3078 // we can try to emit a single move at the destination if we can
3079 // guarantee that source location is available on all incoming edges.
3080 // (i.e. it is not destroyed by some other move).
3081 if ((src_locs.length() > 1) && AreLocationsAllTheSame(src_locs)) {
3082 if (!dst.Equals(src_locs[0])) {
3083 // We have a non-redundant move which potentially can be performed
3084 // at the start of block, however we will only be able to check
3085 // whether or not source location is alive on all incoming edges
3086 // only when we finish processing all live-in values.
3087 pending.Add(MoveOperands(dst, src_locs[0]));
3088 }
3089
3090 // Next incoming value.
3091 continue;
3092 }
3093
3094 for (intptr_t j = 0; j < block->PredecessorCount(); j++) {
3095 if (dst.Equals(src_locs[j])) {
3096 // Redundant move.
3097 continue;
3098 }
3099
3100 EmitMoveOnEdge(block, block->PredecessorAt(j), {dst, src_locs[j]});
3101 }
3102 }
3103
3104 // For each pending move we need to check if it can be emitted into the
3105 // destination block (prerequisite for that is that predecessors should
3106 // not destroy the value in the Goto move).
3107 if (pending.length() > 0) {
3108 if (can_emit->length() < pending.length()) {
3109 can_emit = new BitVector(flow_graph_.zone(), pending.length());
3110 }
3111 can_emit->SetAll();
3112
3113 // Set to |true| when we discover more blocked pending moves and
3114 // need to run another run through pending moves to propagate that.
3115 bool changed = false;
3116
3117 // Process all pending moves and check if any move in the predecessor
3118 // blocks then by overwriting their source.
3119 for (intptr_t j = 0; j < pending.length(); j++) {
3120 Location src = pending[j].src();
3121 for (intptr_t p = 0; p < block->PredecessorCount(); p++) {
3122 BlockEntryInstr* pred = block->PredecessorAt(p);
3123 for (auto move :
3124 pred->last_instruction()->AsGoto()->GetParallelMove()->moves()) {
3125 if (!move->IsRedundant() && move->dest().Equals(src)) {
3126 can_emit->Remove(j);
3127 changed = true;
3128 break;
3129 }
3130 }
3131 }
3132 }
3133
3134 // Check if newly discovered blocked moves block any other pending moves.
3135 while (changed) {
3136 changed = false;
3137 for (intptr_t j = 0; j < pending.length(); j++) {
3138 if (can_emit->Contains(j)) {
3139 for (intptr_t k = 0; k < pending.length(); k++) {
3140 if (!can_emit->Contains(k) &&
3141 pending[k].dest().Equals(pending[j].src())) {
3142 can_emit->Remove(j);
3143 changed = true;
3144 break;
3145 }
3146 }
3147 }
3148 }
3149 }
3150
3151 // Emit pending moves either in the successor block or in predecessors
3152 // (if they are blocked).
3153 for (intptr_t j = 0; j < pending.length(); j++) {
3154 const auto& move = pending[j];
3155 if (can_emit->Contains(j)) {
3156 block->GetParallelMove()->AddMove(move.dest(), move.src());
3157 } else {
3158 for (intptr_t p = 0; p < block->PredecessorCount(); p++) {
3159 EmitMoveOnEdge(block, block->PredecessorAt(p), move);
3160 }
3161 }
3162 }
3163
3164 pending.Clear();
3165 }
3166 }
3167
3168 // Eagerly spill values.
3169 // TODO(vegorov): if value is spilled on the cold path (e.g. by the call)
3170 // this will cause spilling to occur on the fast path (at the definition).
3171 for (intptr_t i = 0; i < spilled_.length(); i++) {
3172 LiveRange* range = spilled_[i];
3173 if (!range->assigned_location().Equals(range->spill_slot())) {
3174 if (range->Start() == 0) {
3175 // We need to handle spilling of constants in a special way. Simply
3176 // place spilling move in the FunctionEntry successors of the graph
3177 // entry.
3178 RELEASE_ASSERT(range->assigned_location().IsConstant());
3179 for (auto block : flow_graph_.graph_entry()->successors()) {
3180 if (block->IsFunctionEntry()) {
3181 AddMoveAt(block->start_pos() + 1, range->spill_slot(),
3182 range->assigned_location());
3183 }
3184 }
3185 } else {
3186 AddMoveAt(range->Start() + 1, range->spill_slot(),
3187 range->assigned_location());
3188 }
3189 }
3190 }
3191}
3192
3194 if (definition_rep == kUnboxedInt64) {
3195 // kUnboxedInt64 is split into two ranges, each of which are kUntagged.
3196 return kUntagged;
3197 } else if (definition_rep == kUnboxedUint32) {
3198 // kUnboxedUint32 is untagged.
3199 return kUntagged;
3200 }
3201 return definition_rep;
3202}
3203
3204void FlowGraphAllocator::CollectRepresentations() {
3205 // Constants.
3206 GraphEntryInstr* graph_entry = flow_graph_.graph_entry();
3207 auto initial_definitions = graph_entry->initial_definitions();
3208 for (intptr_t i = 0; i < initial_definitions->length(); ++i) {
3209 Definition* def = (*initial_definitions)[i];
3210 value_representations_[def->vreg(0)] =
3211 RepresentationForRange(def->representation());
3212 if (def->HasPairRepresentation()) {
3213 value_representations_[def->vreg(1)] =
3214 RepresentationForRange(def->representation());
3215 }
3216 }
3217
3218 for (auto block : block_order_) {
3219 if (auto entry = block->AsBlockEntryWithInitialDefs()) {
3220 initial_definitions = entry->initial_definitions();
3221 for (intptr_t i = 0; i < initial_definitions->length(); ++i) {
3222 Definition* def = (*initial_definitions)[i];
3223 value_representations_[def->vreg(0)] =
3224 RepresentationForRange(def->representation());
3225 if (def->HasPairRepresentation()) {
3226 value_representations_[def->vreg(1)] =
3227 RepresentationForRange(def->representation());
3228 }
3229 }
3230 } else if (auto join = block->AsJoinEntry()) {
3231 for (PhiIterator it(join); !it.Done(); it.Advance()) {
3232 PhiInstr* phi = it.Current();
3233 ASSERT(phi != nullptr && phi->vreg(0) >= 0);
3234 value_representations_[phi->vreg(0)] =
3235 RepresentationForRange(phi->representation());
3236 if (phi->HasPairRepresentation()) {
3237 value_representations_[phi->vreg(1)] =
3238 RepresentationForRange(phi->representation());
3239 }
3240 }
3241 }
3242
3243 // Normal instructions.
3244 for (auto instr : block->instructions()) {
3245 Definition* def = instr->AsDefinition();
3246 if ((def != nullptr) && (def->vreg(0) >= 0)) {
3247 const intptr_t vreg = def->vreg(0);
3248 value_representations_[vreg] =
3249 RepresentationForRange(def->representation());
3250 if (def->HasPairRepresentation()) {
3251 value_representations_[def->vreg(1)] =
3252 RepresentationForRange(def->representation());
3253 }
3254 }
3255 }
3256 }
3257}
3258
3259void FlowGraphAllocator::RemoveFrameIfNotNeeded() {
3260 // Intrinsic functions are naturally frameless.
3261 if (intrinsic_mode_) {
3262 flow_graph_.graph_entry()->MarkFrameless();
3263 return;
3264 }
3265
3266 // For now only AOT compiled code in bare instructions mode supports
3267 // frameless functions. Outside of bare instructions mode we need to preserve
3268 // caller PP - so all functions need a frame if they have their own pool which
3269 // is hard to determine at this stage.
3270 if (!FLAG_precompiled_mode) {
3271 return;
3272 }
3273
3274 // Copying of parameters needs special changes to become frameless.
3275 // Specifically we need to rebase IL instructions which directly access frame
3276 // ({Load,Store}IndexedUnsafeInstr) to use SP rather than FP.
3277 // For now just always give such functions a frame.
3278 if (flow_graph_.parsed_function().function().MakesCopyOfParameters()) {
3279 return;
3280 }
3281
3282 // If we have spills we need to create a frame.
3283 if (flow_graph_.graph_entry()->spill_slot_count() > 0) {
3284 return;
3285 }
3286
3287#if defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_ARM)
3288 bool has_write_barrier_call = false;
3289#endif
3290 intptr_t num_calls_on_shared_slow_path = 0;
3291 for (auto block : block_order_) {
3292 for (auto instruction : block->instructions()) {
3293 if (instruction->HasLocs() && instruction->locs()->can_call()) {
3294 if (!instruction->locs()->call_on_shared_slow_path()) {
3295 // Function contains a call and thus needs a frame.
3296 return;
3297 }
3298 // For calls on shared slow paths the frame can be created on
3299 // a slow path around the call. Only allow one call on a shared
3300 // slow path to avoid extra code size.
3301 ++num_calls_on_shared_slow_path;
3302 if (num_calls_on_shared_slow_path > 1) {
3303 return;
3304 }
3305 }
3306
3307 // Some instructions contain write barriers inside, which can call
3308 // a helper stub. This however is a leaf call and we can entirely
3309 // ignore it on ia32 and x64, because return address is always on the
3310 // stack. On ARM/ARM64 however return address is in LR and needs to
3311 // be explicitly preserved in the frame. Write barrier instruction
3312 // sequence has explicit support for emitting LR spill/restore
3313 // if necessary, however for code size purposes it does not make
3314 // sense to make function frameless if it contains more than 1
3315 // write barrier invocation.
3316#if defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_ARM)
3317 if (auto store_field = instruction->AsStoreField()) {
3318 if (store_field->ShouldEmitStoreBarrier()) {
3319 if (has_write_barrier_call) {
3320 // We already have at least one write barrier call.
3321 // For code size purposes it is better if we have copy of
3322 // LR spill/restore.
3323 return;
3324 }
3325 has_write_barrier_call = true;
3326 }
3327 }
3328
3329 if (auto store_indexed = instruction->AsStoreIndexed()) {
3330 if (store_indexed->ShouldEmitStoreBarrier()) {
3331 if (has_write_barrier_call) {
3332 // We already have at least one write barrier call.
3333 // For code size purposes it is better if we have copy of
3334 // LR spill/restore.
3335 return;
3336 }
3337 has_write_barrier_call = true;
3338 }
3339 }
3340#endif
3341 }
3342 }
3343
3344 // Good to go. No need to setup a frame due to the call.
3345 auto entry = flow_graph_.graph_entry();
3346
3347 entry->MarkFrameless();
3348
3349 // Fix location of parameters to use SP as their base register instead of FP.
3350 auto fix_location_for = [&](BlockEntryInstr* block, ParameterInstr* param,
3351 intptr_t vreg, intptr_t pair_index) {
3352 auto location = param->location();
3353 if (location.IsPairLocation()) {
3354 ASSERT(param->HasPairRepresentation());
3355 location = location.AsPairLocation()->At(pair_index);
3356 }
3357 if (!location.HasStackIndex() || location.base_reg() != FPREG) {
3358 return;
3359 }
3360
3361 const auto fp_relative = location;
3362 const auto sp_relative = fp_relative.ToEntrySpRelative();
3363
3364 for (LiveRange* range = GetLiveRange(vreg); range != nullptr;
3365 range = range->next_sibling()) {
3366 if (range->assigned_location().Equals(fp_relative)) {
3367 range->set_assigned_location(sp_relative);
3368 range->set_spill_slot(sp_relative);
3369 for (UsePosition* use = range->first_use(); use != nullptr;
3370 use = use->next()) {
3371 ASSERT(use->location_slot()->Equals(fp_relative));
3372 *use->location_slot() = sp_relative;
3373 }
3374 }
3375 }
3376 };
3377
3378 for (auto block : entry->successors()) {
3379 if (FunctionEntryInstr* entry = block->AsFunctionEntry()) {
3380 for (auto defn : *entry->initial_definitions()) {
3381 if (auto param = defn->AsParameter()) {
3382 const auto vreg = param->vreg(0);
3383 fix_location_for(block, param, vreg, 0);
3384 if (param->HasPairRepresentation()) {
3385 fix_location_for(block, param, param->vreg(1),
3386 /*pair_index=*/1);
3387 }
3388 }
3389 }
3390 }
3391 }
3392}
3393
3394// Locations assigned by this pass are used when constructing [DeoptInfo] so
3395// there is no need to worry about assigning out locations for detached
3396// [MoveArgument] instructions - because we don't support register based
3397// calling convention in JIT.
3398void FlowGraphAllocator::AllocateOutgoingArguments() {
3399 const intptr_t total_spill_slot_count =
3400 flow_graph_.graph_entry()->spill_slot_count();
3401
3402 for (auto block : block_order_) {
3403 for (auto instr : block->instructions()) {
3404 if (auto move_arg = instr->AsMoveArgument()) {
3405 // Register calling conventions are not used in JIT.
3406 ASSERT(!move_arg->is_register_move());
3407
3408 const intptr_t spill_index =
3409 (total_spill_slot_count - 1) - move_arg->sp_relative_index();
3410 const intptr_t slot_index =
3412 -spill_index);
3413
3414 move_arg->locs()->set_out(
3415 0, (move_arg->representation() == kUnboxedDouble)
3416 ? Location::DoubleStackSlot(slot_index, FPREG)
3417 : Location::StackSlot(slot_index, FPREG));
3418 }
3419 }
3420 }
3421}
3422
3423void FlowGraphAllocator::ScheduleParallelMoves() {
3424 ParallelMoveResolver resolver;
3425
3426 for (auto block : block_order_) {
3427 if (block->HasParallelMove()) {
3428 resolver.Resolve(block->parallel_move());
3429 }
3430 for (auto instruction : block->instructions()) {
3431 if (auto move = instruction->AsParallelMove()) {
3432 resolver.Resolve(move);
3433 }
3434 }
3435 if (auto goto_instr = block->last_instruction()->AsGoto()) {
3436 if (goto_instr->HasParallelMove()) {
3437 resolver.Resolve(goto_instr->parallel_move());
3438 }
3439 }
3440 }
3441}
3442
3444 CollectRepresentations();
3445
3446 liveness_.Analyze();
3447
3448 NumberInstructions();
3449
3450 // Reserve spill slot for :suspend_state synthetic variable before
3451 // reserving spill slots for parameter variables.
3452 AllocateSpillSlotForSuspendState();
3453
3454 BuildLiveRanges();
3455
3456 // Update stackmaps after all safepoints are collected.
3457 UpdateStackmapsForSuspendState();
3458
3459 if (FLAG_print_ssa_liveranges && CompilerState::ShouldTrace()) {
3460 const Function& function = flow_graph_.function();
3461 THR_Print("-- [before ssa allocator] ranges [%s] ---------\n",
3462 function.ToFullyQualifiedCString());
3463 PrintLiveRanges();
3464 THR_Print("----------------------------------------------\n");
3465
3466 THR_Print("-- [before ssa allocator] ir [%s] -------------\n",
3467 function.ToFullyQualifiedCString());
3469#ifndef PRODUCT
3470 FlowGraphPrinter printer(flow_graph_, true);
3471 printer.PrintBlocks();
3472#endif
3473 }
3474 THR_Print("----------------------------------------------\n");
3475 }
3476
3477 PrepareForAllocation(Location::kRegister, kNumberOfCpuRegisters,
3478 unallocated_cpu_, cpu_regs_, blocked_cpu_registers_);
3479 AllocateUnallocatedRanges();
3480 // GraphEntryInstr::fixed_slot_count() stack slots are reserved for catch
3481 // entries. When allocating a spill slot, AllocateSpillSlotFor() accounts for
3482 // these reserved slots and allocates spill slots on top of them.
3483 // However, if there are no spill slots allocated, we still need to reserve
3484 // slots for catch entries in the spill area.
3485 cpu_spill_slot_count_ = Utils::Maximum(
3486 spill_slots_.length(), flow_graph_.graph_entry()->fixed_slot_count());
3487 spill_slots_.Clear();
3488 quad_spill_slots_.Clear();
3489 untagged_spill_slots_.Clear();
3490
3491 PrepareForAllocation(Location::kFpuRegister, kNumberOfFpuRegisters,
3492 unallocated_fpu_, fpu_regs_, blocked_fpu_registers_);
3493 AllocateUnallocatedRanges();
3494
3495 GraphEntryInstr* entry = block_order_[0]->AsGraphEntry();
3496 ASSERT(entry != nullptr);
3497 intptr_t double_spill_slot_count = spill_slots_.length() * kDoubleSpillFactor;
3498 entry->set_spill_slot_count(cpu_spill_slot_count_ + double_spill_slot_count +
3499 flow_graph_.max_argument_slot_count());
3500
3501 RemoveFrameIfNotNeeded();
3502
3503 AllocateOutgoingArguments();
3504
3505 ResolveControlFlow();
3506
3507 ScheduleParallelMoves();
3508
3509 if (FLAG_print_ssa_liveranges && CompilerState::ShouldTrace()) {
3510 const Function& function = flow_graph_.function();
3511
3512 THR_Print("-- [after ssa allocator] ranges [%s] ---------\n",
3513 function.ToFullyQualifiedCString());
3514 PrintLiveRanges();
3515 THR_Print("----------------------------------------------\n");
3516
3517 THR_Print("-- [after ssa allocator] ir [%s] -------------\n",
3518 function.ToFullyQualifiedCString());
3520#ifndef PRODUCT
3521 FlowGraphPrinter printer(flow_graph_, true);
3522 printer.PrintBlocks();
3523#endif
3524 }
3525 THR_Print("----------------------------------------------\n");
3526 }
3527}
3528
3529} // namespace dart
SkPoint pos
static void is_empty(skiatest::Reporter *reporter, const SkPath &p)
static float next(float f)
static float prev(float f)
static int block_count(const SkSBlockAllocator< N > &pool)
#define UNREACHABLE()
Definition: assert.h:248
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
bool Advance(intptr_t start)
Definition: linearscan.cc:1771
UsePosition * FirstRegisterBeneficialUse(intptr_t after_pos)
Definition: linearscan.cc:1811
void Initialize(LiveRange *range)
Definition: linearscan.cc:1764
void UpdateAfterSplit(intptr_t first_use_after_split_pos)
Definition: linearscan.cc:1832
UsePosition * FirstInterferingUse(intptr_t after_pos)
Definition: linearscan.cc:1823
UsePosition * FirstRegisterUse(intptr_t after_pos)
Definition: linearscan.cc:1797
UseInterval * first_pending_use_interval() const
Definition: linearscan.h:474
void InsertAt(intptr_t idx, const T &value)
void Add(const T &value)
intptr_t length() const
void Add(intptr_t i)
Definition: bit_vector.h:63
bool Contains(intptr_t i) const
Definition: bit_vector.h:91
void Remove(intptr_t i)
Definition: bit_vector.h:68
void Print() const
Definition: bitmap.cc:81
intptr_t postorder_number() const
Definition: il.h:1658
ParallelMoveInstr * GetParallelMove()
Definition: il.h:1697
intptr_t block_id() const
Definition: il.h:1661
LoopInfo * loop_info() const
Definition: il.h:1737
virtual BlockEntryInstr * PredecessorAt(intptr_t index) const =0
intptr_t start_pos() const
Definition: il.h:1666
Instruction * last_instruction() const
Definition: il.h:1686
GrowableArray< Definition * > * initial_definitions()
Definition: il.h:1917
const LocalVariable * raw_stacktrace_var() const
Definition: il.h:2371
const LocalVariable * raw_exception_var() const
Definition: il.h:2370
bool is_aot() const
static bool ShouldTrace()
static CompilerState & Current()
bool HasSSATemp() const
Definition: il.h:2508
bool HasPairRepresentation() const
Definition: il.h:2517
intptr_t vreg(intptr_t index) const
Definition: il.h:2511
static DART_FORCE_INLINE intptr_t GetLifetimePosition(const Instruction *instr)
Definition: linearscan.h:78
static DART_FORCE_INLINE void SetLifetimePosition(Instruction *instr, intptr_t pos)
Definition: linearscan.h:69
LiveRange * GetLiveRange(intptr_t vreg)
Definition: linearscan.cc:431
static constexpr intptr_t kDoubleSpillFactor
Definition: linearscan.h:58
FlowGraphAllocator(const FlowGraph &flow_graph, bool intrinsic_mode=false)
Definition: linearscan.cc:88
const GrowableArray< BlockEntryInstr * > & reverse_postorder() const
Definition: flow_graph.h:207
GraphEntryInstr * graph_entry() const
Definition: flow_graph.h:268
intptr_t max_argument_slot_count() const
Definition: flow_graph.h:562
Zone * zone() const
Definition: flow_graph.h:261
intptr_t SuspendStateEnvIndex() const
Definition: flow_graph.h:171
intptr_t max_vreg() const
Definition: flow_graph.h:248
const Function & function() const
Definition: flow_graph.h:130
const ParsedFunction & parsed_function() const
Definition: flow_graph.h:129
intptr_t num_direct_parameters() const
Definition: flow_graph.h:137
const LoopHierarchy & loop_hierarchy() const
Definition: flow_graph.h:437
LocalVariable * SuspendStateVar() const
Definition: flow_graph.h:167
GrowableArray< BlockEntryInstr * > * CodegenBlockOrder()
Definition: flow_graph.cc:170
intptr_t EnvIndex(const LocalVariable *variable) const
Definition: flow_graph.h:189
bool MakesCopyOfParameters() const
Definition: object.h:3514
intptr_t fixed_slot_count() const
Definition: il.h:1996
void MarkFrameless()
Definition: il.h:1991
intptr_t spill_slot_count() const
Definition: il.h:1983
void set_spill_slot_count(intptr_t count)
Definition: il.h:1984
Instruction * next() const
Definition: il.h:1093
virtual intptr_t InputCount() const =0
void LinkTo(Instruction *next)
Definition: il.h:1108
virtual Value * InputAt(intptr_t i) const =0
Environment * env() const
Definition: il.h:1215
void InitializeLocationSummary(Zone *zone, bool optimizing)
Definition: il.h:1202
virtual intptr_t ArgumentCount() const
Definition: il.h:1041
SuccessorsIterable successors() const
Definition: il.h:1138
LocationSummary * locs()
Definition: il.h:1192
virtual intptr_t SuccessorCount() const
Definition: il.cc:1977
virtual MoveArgumentsArray * GetMoveArguments() const
Definition: il.h:1050
Instruction * previous() const
Definition: il.h:1087
void DefineAt(intptr_t pos)
Definition: linearscan.cc:411
UseInterval * last_use_interval() const
Definition: linearscan.h:535
bool Contains(intptr_t pos) const
Definition: linearscan.cc:2777
Location spill_slot() const
Definition: linearscan.h:574
UsePosition * first_use() const
Definition: linearscan.h:532
intptr_t vreg() const
Definition: linearscan.h:529
void AddHintedUse(intptr_t pos, Location *location_slot, Location *hint)
Definition: linearscan.cc:368
Location * assigned_location_slot()
Definition: linearscan.h:537
Location assigned_location() const
Definition: linearscan.h:536
Representation representation() const
Definition: linearscan.h:530
LiveRange(intptr_t vreg, Representation rep)
Definition: linearscan.h:514
AllocationFinger * finger()
Definition: linearscan.h:543
LiveRange * next_sibling() const
Definition: linearscan.h:531
UseInterval * first_use_interval() const
Definition: linearscan.h:534
UsePosition * AddUse(intptr_t pos, Location *location_slot)
Definition: linearscan.cc:305
void AddUseInterval(intptr_t start, intptr_t end)
Definition: linearscan.cc:375
intptr_t End() const
Definition: linearscan.h:539
SafepointPosition * first_safepoint() const
Definition: linearscan.h:541
LiveRange * SplitAt(intptr_t pos)
Definition: linearscan.cc:1895
intptr_t Start() const
Definition: linearscan.h:538
bool CanCover(intptr_t pos) const
Definition: linearscan.h:567
void set_assigned_location(Location location)
Definition: linearscan.h:545
void AddSafepoint(intptr_t pos, LocationSummary *locs)
Definition: linearscan.cc:340
GrowableArray< BitVector * > kill_
Definition: flow_graph.h:815
BitVector * GetLiveOutSetAt(intptr_t postorder_number) const
Definition: flow_graph.h:761
Zone * zone() const
Definition: flow_graph.h:799
GrowableArray< BitVector * > live_in_
Definition: flow_graph.h:819
BitVector * GetLiveInSet(BlockEntryInstr *block) const
Definition: flow_graph.h:765
BitVector * GetLiveInSetAt(intptr_t postorder_number) const
Definition: flow_graph.h:757
const GrowableArray< BlockEntryInstr * > & postorder_
Definition: flow_graph.h:805
bool callee_safe_call() const
Definition: locations.h:922
const BitmapBuilder & stack_bitmap()
Definition: locations.h:915
intptr_t input_count() const
Definition: locations.h:864
bool always_calls() const
Definition: locations.h:918
Location in(intptr_t index) const
Definition: locations.h:866
static Location StackSlot(intptr_t stack_index, Register base)
Definition: locations.h:447
bool IsInvalid() const
Definition: locations.h:289
void Print() const
Definition: locations.cc:452
static Location NoLocation()
Definition: locations.h:387
static Location QuadStackSlot(intptr_t stack_index, Register base)
Definition: locations.h:469
const char * Name() const
Definition: locations.cc:377
static Location Pair(Location first, Location second)
Definition: locations.cc:271
@ kRequiresFpuRegister
Definition: locations.h:332
static Location RequiresStack()
Definition: locations.h:354
static Location FpuRegisterLocation(FpuRegister reg)
Definition: locations.h:410
const char * ToCString() const
Definition: locations.cc:445
static Location DoubleStackSlot(intptr_t stack_index, Register base)
Definition: locations.h:458
Policy policy() const
Definition: locations.h:389
bool IsConstant() const
Definition: locations.h:292
bool IsRegisterBeneficial()
Definition: locations.h:343
static Location RegisterLocation(Register reg)
Definition: locations.h:398
static Location PrefersRegister()
Definition: locations.h:358
static Location Any()
Definition: locations.h:352
bool Equals(Location other) const
Definition: locations.h:519
static Location RequiresRegister()
Definition: locations.h:365
bool IsUnallocated() const
Definition: locations.h:341
bool IsMachineRegister() const
Definition: locations.h:434
static Location RequiresFpuRegister()
Definition: locations.h:369
intptr_t register_code() const
Definition: locations.h:436
static Location Constant(const ConstantInstr *obj, int pair_index=0)
Definition: locations.h:294
intptr_t num_loops() const
Definition: loops.h:335
const ZoneGrowableArray< BlockEntryInstr * > & headers() const
Definition: loops.h:329
const GrowableArray< BlockEntryInstr * > & back_edges()
Definition: loops.h:254
BlockEntryInstr * header() const
Definition: loops.h:252
LoopInfo * outer() const
Definition: loops.h:257
void mark_visited_for_liveness()
Definition: il.h:7759
bool was_visited_for_liveness() const
Definition: il.h:7758
Location src() const
Definition: il.h:1540
Location dest() const
Definition: il.h:1541
MoveOperands * AddMove(Location dest, Location src)
Definition: il.h:1609
virtual BlockEntryInstr * GetBlock()
Definition: il.h:2949
const Function & function() const
Definition: parser.h:73
LocalVariable * suspend_state_var() const
Definition: parser.h:103
BitVector * reaching_defs() const
Definition: il.h:2863
bool Done() const
Definition: il.h:2121
BitVector * Get(PhiInstr *phi)
Definition: linearscan.cc:2312
virtual void ComputeInitialSets()
Definition: linearscan.cc:171
SafepointPosition * next() const
Definition: linearscan.h:498
LocationSummary * locs() const
Definition: linearscan.h:502
void set_next(SafepointPosition *next)
Definition: linearscan.h:497
intptr_t pos() const
Definition: linearscan.h:500
bool Contains(intptr_t pos) const
Definition: linearscan.h:442
UseInterval * next() const
Definition: linearscan.h:440
intptr_t Intersect(UseInterval *other)
Definition: linearscan.cc:1844
intptr_t start() const
Definition: linearscan.h:438
intptr_t end() const
Definition: linearscan.h:439
Location hint() const
Definition: linearscan.h:401
bool HasHint() const
Definition: linearscan.h:408
void set_next(UsePosition *next)
Definition: linearscan.h:410
Location * location_slot() const
Definition: linearscan.h:396
intptr_t pos() const
Definition: linearscan.h:413
UsePosition * next() const
Definition: linearscan.h:411
void set_hint(Location *hint)
Definition: linearscan.h:406
static constexpr T Maximum(T x, T y)
Definition: utils.h:41
Definition: il.h:75
bool BindsToConstant() const
Definition: il.cc:1183
Definition * definition() const
Definition: il.h:103
intptr_t InputCount() const
Definition: il.h:2794
Value * InputAt(intptr_t i) const
Definition: il.h:2795
ElementType * Alloc(intptr_t length)
#define THR_Print(format,...)
Definition: log.h:20
#define ASSERT(E)
static bool b
struct MyStruct s
struct MyStruct a[10]
glong glong end
uint8_t value
uint32_t * target
constexpr bool FLAG_support_il_printer
Definition: flag_list.h:48
Dart_NativeFunction function
Definition: fuchsia.cc:51
size_t length
#define TRACE_ALLOC(statement)
Definition: linearscan.cc:25
double x
ImplicitString Name
Definition: DMSrcSink.h:38
FrameLayout frame_layout
Definition: stack_frame.cc:76
Definition: dart_vm.cc:33
static bool Equals(const Object &expected, const Object &actual)
static ParallelMoveInstr * CreateParallelMoveAfter(Instruction *instr, intptr_t pos)
Definition: linearscan.cc:1677
static intptr_t ToInstructionEnd(intptr_t pos)
Definition: linearscan.cc:54
static bool AreLocationsAllTheSame(const GrowableArray< Location > &locs)
Definition: linearscan.cc:2976
static constexpr intptr_t kIllegalPosition
Definition: linearscan.cc:35
static intptr_t ToInstructionStart(intptr_t pos)
Definition: linearscan.cc:50
static const GrowableArray< BlockEntryInstr * > & BlockOrderForAllocation(const FlowGraph &flow_graph)
Definition: linearscan.cc:80
PositionType * SplitListOfPositions(PositionType **head, intptr_t split_pos, bool split_at_start)
Definition: linearscan.cc:1869
const RegList kAllFpuRegistersList
static Representation RepresentationForRange(Representation definition_rep)
Definition: linearscan.cc:3193
static void EmitMoveOnEdge(BlockEntryInstr *succ, BlockEntryInstr *pred, const MoveOperands &move)
Definition: linearscan.cc:2986
static bool IsInstructionStartPosition(intptr_t pos)
Definition: linearscan.cc:42
static intptr_t MinPosition(intptr_t a, intptr_t b)
Definition: linearscan.cc:38
Representation
Definition: locations.h:66
static ParallelMoveInstr * CreateParallelMoveBefore(Instruction *instr, intptr_t pos)
Definition: linearscan.cc:1662
const FpuRegister FpuTMP
static bool HasOnlyUnconstrainedUses(LiveRange *range)
Definition: linearscan.cc:581
static Location::Kind RegisterKindFromPolicy(Location loc)
Definition: linearscan.cc:876
const Register CODE_REG
static constexpr intptr_t kTempVirtualRegister
Definition: linearscan.cc:34
const Register ARGS_DESC_REG
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
@ kNoRegister
Definition: constants_arm.h:99
const int kNumberOfFpuRegisters
static intptr_t FirstIntersection(UseInterval *a, UseInterval *u)
Definition: linearscan.cc:1853
static constexpr intptr_t kNoVirtualRegister
Definition: linearscan.cc:33
constexpr RegList kDartAvailableCpuRegs
static bool IsInstructionEndPosition(intptr_t pos)
Definition: linearscan.cc:46
const RegList kDartVolatileCpuRegs
const Register FPREG
const RegList kFpuRegistersWithoutSOverlap
static ExtraLoopInfo * ComputeExtraLoopInfo(Zone *zone, LoopInfo *loop_info)
Definition: linearscan.cc:68
constexpr int kRegisterAllocationBias
static bool HasOnlyUnconstrainedUsesInLoop(LiveRange *range, intptr_t boundary)
Definition: linearscan.cc:568
static void DeepLiveness(MaterializeObjectInstr *mat, BitVector *live_in)
Definition: linearscan.cc:151
static void AddToSortedListOfRanges(GrowableArray< LiveRange * > *list, LiveRange *range)
Definition: linearscan.cc:2842
static UsePosition * FirstUseAfter(UsePosition *use, intptr_t after)
Definition: linearscan.cc:1790
static bool IsConstant(Definition *def, int64_t *val)
Definition: loops.cc:123
static LiveRange * FindCover(LiveRange *parent, intptr_t pos)
Definition: linearscan.cc:2965
QRegister FpuRegister
static bool ShouldBeAllocatedBefore(LiveRange *a, LiveRange *b)
Definition: linearscan.cc:2837
const RegList kAllCpuRegistersList
static constexpr intptr_t kMaxPosition
Definition: linearscan.cc:36
const RegList kAbiVolatileFpuRegs
Definition: __init__.py:1
dst
Definition: cp.py:12
#define Pd
Definition: globals.h:408
static const char header[]
Definition: skpbench.cpp:88
static SkString join(const CommandLineFlags::StringArray &)
Definition: skpbench.cpp:741
BitVector * backedge_interference
Definition: linearscan.cc:64
ExtraLoopInfo(intptr_t s, intptr_t e)
Definition: linearscan.cc:60
intptr_t first_local_from_fp
Definition: frame_layout.h:37
intptr_t VariableIndexForFrameSlot(intptr_t frame_slot) const
Definition: frame_layout.h:70
intptr_t FrameSlotForVariable(const LocalVariable *variable) const
Definition: stack_frame.cc:83
intptr_t FrameSlotForVariableIndex(intptr_t index) const
Definition: stack_frame.cc:89
static constexpr bool IsUnboxedInteger(Representation rep)
Definition: locations.h:92