21#define INCLUDE_LINEAR_SCAN_TRACING_CODE
24#if defined(INCLUDE_LINEAR_SCAN_TRACING_CODE)
25#define TRACE_ALLOC(statement) \
27 if (FLAG_trace_ssa_allocator && CompilerState::ShouldTrace()) statement; \
30#define TRACE_ALLOC(statement)
39 return (
a <
b) ?
a :
b;
43 return (
pos & 1) == 0;
47 return (
pos & 1) == 1;
71 for (
auto back_edge : loop_info->
back_edges()) {
72 intptr_t end_pos = back_edge->end_pos();
90 : flow_graph_(flow_graph),
91 reaching_defs_(flow_graph),
92 value_representations_(flow_graph.max_vreg()),
94 postorder_(flow_graph.postorder()),
98 liveness_(flow_graph),
99 vreg_count_(flow_graph.max_vreg()),
100 live_ranges_(flow_graph.max_vreg()),
105 blocked_cpu_registers_(),
106 blocked_fpu_registers_(),
110 number_of_registers_(0),
112 blocked_registers_(),
116 untagged_spill_slots_(),
117 cpu_spill_slot_count_(0),
118 intrinsic_mode_(intrinsic_mode) {
119 for (intptr_t
i = 0;
i < vreg_count_;
i++) {
120 live_ranges_.Add(
nullptr);
122 for (intptr_t
i = 0;
i < vreg_count_;
i++) {
123 value_representations_.
Add(kNoRepresentation);
130 blocked_cpu_registers_[
i] =
true;
135 blocked_fpu_registers_[
FpuTMP] =
true;
140 if (intrinsic_mode) {
143#if !defined(TARGET_ARCH_IA32)
146 blocked_cpu_registers_[
CODE_REG] =
true;
161 if (inner_mat !=
nullptr) {
164 intptr_t idx = defn->
vreg(0);
188 locs->DiscoverWritableInputs();
192 Definition* current_def = current->AsDefinition();
193 if ((current_def !=
nullptr) && current_def->
HasSSATemp()) {
194 kill->
Add(current_def->
vreg(0));
197 kill->
Add(current_def->
vreg(1));
204 for (intptr_t j = 0; j < current->
InputCount(); j++) {
220 for (
auto move : *move_arguments) {
221 if (move->is_register_move()) {
222 auto input = move->value();
224 live_in->
Add(input->definition()->vreg(0));
225 if (input->definition()->HasPairRepresentation()) {
226 live_in->
Add(input->definition()->vreg(1));
234 if (current->
env() !=
nullptr) {
237 Definition* defn = env_it.CurrentValue()->definition();
238 if (defn->IsMaterializeObject()) {
242 }
else if (!defn->IsMoveArgument() && !defn->IsConstant()) {
253 if (block->IsJoinEntry()) {
267 for (intptr_t k = 0; k < phi->
InputCount(); k++) {
284 }
else if (
auto entry = block->AsBlockEntryWithInitialDefs()) {
286 if (entry->IsCatchBlockEntry()) {
287 entry->InitializeLocationSummary(
zone(),
true);
291 for (intptr_t
i = 0;
i < entry->initial_definitions()->
length();
i++) {
292 Definition* def = (*entry->initial_definitions())[
i];
293 const intptr_t vreg = def->
vreg(0);
294 kill_[entry->postorder_number()]->Add(vreg);
295 live_in_[entry->postorder_number()]->Remove(vreg);
297 kill_[entry->postorder_number()]->Add(def->
vreg(1));
298 live_in_[entry->postorder_number()]->Remove(def->
vreg(1));
306 ASSERT(location_slot !=
nullptr);
307 ASSERT((first_use_interval_->start_ <=
pos) &&
308 (pos <= first_use_interval_->end_));
309 if (uses_ !=
nullptr) {
312 }
else if (uses_->
pos() <
pos) {
318 while ((insert_after->
next() !=
nullptr) &&
320 insert_after = insert_after->
next();
324 while (insert_before !=
nullptr && (insert_before->
pos() ==
pos)) {
326 return insert_before;
328 insert_before = insert_before->
next();
333 return insert_after->
next();
355 if (first_safepoint_ ==
nullptr) {
356 ASSERT(last_safepoint_ ==
nullptr);
357 first_safepoint_ = last_safepoint_ = safepoint;
359 ASSERT(last_safepoint_ !=
nullptr);
363 last_safepoint_->
set_next(safepoint);
364 last_safepoint_ = safepoint;
394 first_use_interval_->end_ =
end;
405 if (last_use_interval_ ==
nullptr) {
406 ASSERT(first_use_interval_->
next() ==
nullptr);
407 last_use_interval_ = first_use_interval_;
419 if (first_use_interval_ ==
nullptr) {
422 last_use_interval_ = first_use_interval_;
426 ASSERT(first_use_interval_->start_ <=
pos);
427 first_use_interval_->start_ =
pos;
432 if (live_ranges_[vreg] ==
nullptr) {
434 ASSERT(rep != kNoRepresentation);
435 live_ranges_[vreg] =
new LiveRange(vreg, rep);
437 return live_ranges_[vreg];
440LiveRange* FlowGraphAllocator::MakeLiveRangeForTemporary() {
445 temporaries_.Add(range);
450void FlowGraphAllocator::BlockRegisterLocation(
Location loc,
453 bool* blocked_registers,
454 LiveRange** blocking_ranges) {
455 if (blocked_registers[loc.register_code()]) {
459 if (blocking_ranges[loc.register_code()] ==
nullptr) {
462 blocking_ranges[loc.register_code()] = range;
463 range->set_assigned_location(loc);
465 temporaries_.Add(range);
469 blocking_ranges[loc.register_code()]->AddUseInterval(from, to);
473void FlowGraphAllocator::BlockLocation(
Location loc,
476 if (loc.IsRegister()) {
477 BlockRegisterLocation(loc, from, to, blocked_cpu_registers_, cpu_regs_);
478 }
else if (loc.IsFpuRegister()) {
479 BlockRegisterLocation(loc, from, to, blocked_fpu_registers_, fpu_regs_);
485void FlowGraphAllocator::BlockCpuRegisters(intptr_t registers,
489 if ((registers & (1 << r)) != 0) {
496void FlowGraphAllocator::BlockFpuRegisters(intptr_t fpu_registers,
500 if ((fpu_registers & (1 << r)) != 0) {
524 while (safepoint !=
nullptr) {
528 safepoint = safepoint->
next();
532 for (
UseInterval* interval = first_use_interval_; interval !=
nullptr;
533 interval = interval->
next()) {
534 THR_Print(
" use interval [%" Pd ", %" Pd ")\n", interval->start(),
536 while ((use_pos !=
nullptr) && (use_pos->
pos() <= interval->end())) {
543 use_pos = use_pos->
next();
552void FlowGraphAllocator::PrintLiveRanges() {
554 for (intptr_t
i = 0;
i < temporaries_.length();
i++) {
555 temporaries_[
i]->Print();
559 for (intptr_t
i = 0;
i < live_ranges_.length();
i++) {
560 if (live_ranges_[
i] !=
nullptr) {
561 live_ranges_[
i]->Print();
571 while ((use !=
nullptr) && (use->
pos() < boundary)) {
583 while (use !=
nullptr) {
592void FlowGraphAllocator::BuildLiveRanges() {
593 const intptr_t
block_count = block_order_.length();
594 ASSERT(block_order_[0]->IsGraphEntry());
595 BitVector* current_interference_set =
nullptr;
596 Zone* zone = flow_graph_.
zone();
598 BlockEntryInstr* block = block_order_[
x];
600 ASSERT(BlockEntryAt(block->start_pos()) == block);
605 for (BitVector::Iterator it(
607 !it.Done(); it.Advance()) {
609 range->AddUseInterval(block->start_pos(), block->end_pos());
612 LoopInfo* loop_info = block->loop_info();
613 if ((loop_info !=
nullptr) && (loop_info->IsBackEdge(block))) {
614 BitVector* backedge_interference =
615 extra_loop_info_[loop_info->id()]->backedge_interference;
616 if (backedge_interference !=
nullptr) {
619 current_interference_set = backedge_interference;
623 current_interference_set =
624 new (zone) BitVector(zone, flow_graph_.
max_vreg());
625 current_interference_set->AddAll(
627 extra_loop_info_[loop_info->id()]->backedge_interference =
628 current_interference_set;
634 Instruction* current =
635 ConnectOutgoingPhiMoves(block, current_interference_set);
638 while (current != block) {
640 if (!current->IsParallelMove()) {
641 ProcessOneInstruction(block, current, current_interference_set);
643 current = current->previous();
647 if (block->IsLoopHeader()) {
648 ASSERT(loop_info !=
nullptr);
649 current_interference_set =
nullptr;
650 for (BitVector::Iterator it(
652 !it.Done(); it.Advance()) {
654 intptr_t loop_end = extra_loop_info_[loop_info->id()]->end;
656 range->MarkHasOnlyUnconstrainedUsesInLoop(loop_info->id());
661 if (
auto join_entry = block->AsJoinEntry()) {
662 ConnectIncomingPhiMoves(join_entry);
663 }
else if (
auto catch_entry = block->AsCatchBlockEntry()) {
666 safepoints_.Add(catch_entry);
669 ProcessEnvironmentUses(catch_entry, catch_entry);
670 for (intptr_t
i = 0;
i < catch_entry->initial_definitions()->
length();
672 Definition* defn = (*catch_entry->initial_definitions())[
i];
674 range->DefineAt(catch_entry->start_pos());
675 ProcessInitialDefinition(defn, range, catch_entry,
i);
677 }
else if (
auto entry = block->AsBlockEntryWithInitialDefs()) {
678 ASSERT(block->IsFunctionEntry() || block->IsOsrEntry());
679 auto& initial_definitions = *entry->initial_definitions();
680 for (intptr_t
i = 0;
i < initial_definitions.length();
i++) {
681 Definition* defn = initial_definitions[
i];
682 if (defn->HasPairRepresentation()) {
685 range->AddUseInterval(entry->start_pos(), entry->start_pos() + 2);
686 range->DefineAt(entry->start_pos());
687 ProcessInitialDefinition(defn, range, entry,
i,
691 range->AddUseInterval(entry->start_pos(), entry->start_pos() + 2);
692 range->DefineAt(entry->start_pos());
693 ProcessInitialDefinition(defn, range, entry,
i);
700 GraphEntryInstr* graph_entry = flow_graph_.
graph_entry();
701 for (intptr_t
i = 0;
i < graph_entry->initial_definitions()->
length();
i++) {
702 Definition* defn = (*graph_entry->initial_definitions())[
i];
703 if (defn->HasPairRepresentation()) {
706 range->AddUseInterval(graph_entry->start_pos(), graph_entry->end_pos());
707 range->DefineAt(graph_entry->start_pos());
708 ProcessInitialDefinition(defn, range, graph_entry,
i,
712 range->AddUseInterval(graph_entry->start_pos(), graph_entry->end_pos());
713 range->DefineAt(graph_entry->start_pos());
714 ProcessInitialDefinition(defn, range, graph_entry,
i);
718void FlowGraphAllocator::SplitInitialDefinitionAt(LiveRange* range,
721 if (range->End() >
pos) {
722 LiveRange*
tail = range->SplitAt(
pos);
723 CompleteRange(
tail, kind);
727bool FlowGraphAllocator::IsSuspendStateParameter(Definition* defn) {
728 if (
auto param = defn->AsParameter()) {
729 if ((param->GetBlock()->IsOsrEntry() ||
730 param->GetBlock()->IsCatchBlockEntry()) &&
739void FlowGraphAllocator::AllocateSpillSlotForInitialDefinition(
741 intptr_t range_end) {
742 if (slot_index < spill_slots_.
length()) {
745 spill_slots_[slot_index] =
747 ASSERT(!quad_spill_slots_[slot_index]);
748 ASSERT(!untagged_spill_slots_[slot_index]);
750 while (spill_slots_.
length() < slot_index) {
752 quad_spill_slots_.
Add(
false);
753 untagged_spill_slots_.
Add(
false);
755 spill_slots_.
Add(range_end);
756 quad_spill_slots_.
Add(
false);
757 untagged_spill_slots_.
Add(
false);
761void FlowGraphAllocator::CompleteRange(Definition* defn, LiveRange* range) {
762 AssignSafepoints(defn, range);
764 if (range->has_uses_which_require_stack()) {
766 if (range->spill_slot().IsInvalid() ||
767 !range->spill_slot().HasStackIndex()) {
769 AllocateSpillSlotFor(range);
771 THR_Print(
"Allocated spill slot for %s which has use(s) which "
772 "require a stack slot.\n",
774 if (range->representation() == kTagged) {
775 MarkAsObjectAtSafepoints(range);
780 UsePosition*
prev =
nullptr;
781 for (UsePosition* use = range->first_use(); use !=
nullptr;
785 ConvertUseTo(use, range->spill_slot());
788 if (
prev ==
nullptr) {
789 range->set_first_use(use->next());
791 prev->set_next(use->next());
800void FlowGraphAllocator::ProcessInitialDefinition(
803 BlockEntryInstr* block,
804 intptr_t initial_definition_index,
805 bool second_location_for_definition) {
807 const intptr_t range_end = range->End();
809 if (
auto param = defn->AsParameter()) {
810 auto location = param->location();
812 if (location.IsPairLocation()) {
814 location.AsPairLocation()->At(second_location_for_definition ? 1 : 0);
816 range->set_assigned_location(location);
817 if (location.IsMachineRegister()) {
818 CompleteRange(defn, range);
823 ConvertAllUses(range);
828 range->set_spill_slot(location);
831 ConstantInstr* constant = defn->AsConstant();
832 ASSERT(constant !=
nullptr);
833 const intptr_t pair_index = second_location_for_definition ? 1 : 0;
837 CompleteRange(defn, range);
838 range->finger()->Initialize(range);
840 range->finger()->FirstRegisterBeneficialUse(block->start_pos());
841 if (use !=
nullptr) {
842 LiveRange*
tail = SplitBetween(range, block->start_pos(), use->pos());
843 CompleteRange(
tail, defn->RegisterKindForResult());
845 ConvertAllUses(range);
846 Location spill_slot = range->spill_slot();
847 if (spill_slot.IsStackSlot() && spill_slot.base_reg() ==
FPREG &&
848 spill_slot.stack_index() <=
850 !IsSuspendStateParameter(defn) && !defn->IsConstant()) {
858 ASSERT(defn->IsParameter());
859 ASSERT(defn->AsParameter()->env_index() == initial_definition_index);
860 const intptr_t spill_slot_index =
862 spill_slot.stack_index());
863 AllocateSpillSlotForInitialDefinition(spill_slot_index, range_end);
865 MarkAsObjectAtSafepoints(range);
866 }
else if (defn->IsConstant() && block->IsCatchBlockEntry() &&
867 (initial_definition_index >=
870 AllocateSpillSlotForInitialDefinition(
908Instruction* FlowGraphAllocator::ConnectOutgoingPhiMoves(
909 BlockEntryInstr* block,
910 BitVector* interfere_at_backedge) {
911 Instruction* last = block->last_instruction();
913 GotoInstr* goto_instr = last->AsGoto();
914 if (goto_instr ==
nullptr)
return last;
920 if (!goto_instr->HasParallelMove())
return goto_instr->previous();
921 ParallelMoveInstr* parallel_move = goto_instr->parallel_move();
926 JoinEntryInstr*
join = goto_instr->successor();
931 const intptr_t pred_index =
join->IndexOfPredecessor(block);
934 intptr_t move_index = 0;
935 for (PhiIterator it(
join); !it.Done(); it.Advance()) {
936 PhiInstr* phi = it.Current();
937 Value* val = phi->InputAt(pred_index);
938 MoveOperands* move = parallel_move->MoveOperandsAt(move_index++);
940 ConstantInstr* constant = val->definition()->AsConstant();
941 if (constant !=
nullptr) {
943 if (val->definition()->HasPairRepresentation()) {
944 move = parallel_move->MoveOperandsAt(move_index++);
955 intptr_t vreg = val->definition()->vreg(0);
957 if (interfere_at_backedge !=
nullptr) interfere_at_backedge->Add(vreg);
959 range->AddUseInterval(block->start_pos(),
pos);
960 range->AddHintedUse(
pos, move->src_slot(),
964 if (val->definition()->HasPairRepresentation()) {
965 move = parallel_move->MoveOperandsAt(move_index++);
966 vreg = val->definition()->vreg(1);
968 if (interfere_at_backedge !=
nullptr) {
969 interfere_at_backedge->Add(vreg);
971 range->AddUseInterval(block->start_pos(),
pos);
972 range->AddHintedUse(
pos, move->src_slot(),
980 return goto_instr->previous();
983void FlowGraphAllocator::ConnectIncomingPhiMoves(JoinEntryInstr*
join) {
988 const intptr_t
pos =
join->start_pos();
989 const bool is_loop_header =
join->IsLoopHeader();
991 intptr_t move_idx = 0;
992 for (PhiIterator it(
join); !it.Done(); it.Advance()) {
993 PhiInstr* phi = it.Current();
995 const intptr_t vreg = phi->vreg(0);
997 const bool is_pair_phi = phi->HasPairRepresentation();
1005 range->DefineAt(
pos);
1006 if (is_loop_header) range->mark_loop_phi();
1010 second_range->DefineAt(
pos);
1011 if (is_loop_header) second_range->mark_loop_phi();
1014 for (intptr_t pred_idx = 0; pred_idx < phi->InputCount(); pred_idx++) {
1015 BlockEntryInstr* pred =
join->PredecessorAt(pred_idx);
1016 GotoInstr* goto_instr = pred->last_instruction()->AsGoto();
1017 ASSERT((goto_instr !=
nullptr) && (goto_instr->HasParallelMove()));
1018 MoveOperands* move =
1019 goto_instr->parallel_move()->MoveOperandsAt(move_idx);
1021 range->AddUse(
pos, move->dest_slot());
1024 MoveOperands* second_move =
1025 goto_instr->parallel_move()->MoveOperandsAt(move_idx + 1);
1027 second_range->AddUse(
pos, second_move->dest_slot());
1033 AssignSafepoints(phi, range);
1034 CompleteRange(range, phi->RegisterKindForResult());
1037 AssignSafepoints(phi, second_range);
1038 CompleteRange(second_range, phi->RegisterKindForResult());
1041 move_idx += is_pair_phi ? 2 : 1;
1045void FlowGraphAllocator::ProcessEnvironmentUses(BlockEntryInstr* block,
1046 Instruction* current) {
1047 ASSERT(current->env() !=
nullptr);
1048 Environment*
env = current->env();
1049 while (
env !=
nullptr) {
1058 if (
env->Length() == 0) {
1063 const intptr_t block_start_pos = block->start_pos();
1068 for (intptr_t
i = 0;
i <
env->Length(); ++
i) {
1070 Definition* def =
value->definition();
1072 if (def->HasPairRepresentation()) {
1082 const intptr_t slot_index =
1086 if (!def->IsConstant()) {
1090 ASSERT(def->IsParameter() || def->IsPhi());
1091 ASSERT(!def->HasPairRepresentation());
1093 range->AddUseInterval(block_start_pos, use_pos);
1098 if (def->IsMoveArgument()) {
1104 if (
auto constant = def->AsConstant()) {
1109 if (
auto mat = def->AsMaterializeObject()) {
1114 ProcessMaterializationUses(block, block_start_pos, use_pos, mat);
1118 if (def->HasPairRepresentation()) {
1119 PairLocation* location_pair = locations[
i].AsPairLocation();
1123 range->AddUseInterval(block_start_pos, use_pos);
1124 range->AddUse(use_pos, location_pair->SlotAt(0));
1129 range->AddUseInterval(block_start_pos, use_pos);
1130 range->AddUse(use_pos, location_pair->SlotAt(1));
1134 range->AddUseInterval(block_start_pos, use_pos);
1135 range->AddUse(use_pos, &locations[
i]);
1139 env->set_locations(locations);
1144void FlowGraphAllocator::ProcessMaterializationUses(
1145 BlockEntryInstr* block,
1146 const intptr_t block_start_pos,
1147 const intptr_t use_pos,
1148 MaterializeObjectInstr* mat) {
1151 if (mat->locations() !=
nullptr) {
1157 mat->set_locations(locations);
1159 for (intptr_t
i = 0;
i < mat->InputCount(); ++
i) {
1160 Definition* def = mat->InputAt(
i)->definition();
1162 ConstantInstr* constant = def->AsConstant();
1163 if (constant !=
nullptr) {
1168 if (def->HasPairRepresentation()) {
1170 PairLocation* location_pair = locations[
i].AsPairLocation();
1174 range->AddUseInterval(block_start_pos, use_pos);
1175 range->AddUse(use_pos, location_pair->SlotAt(0));
1180 range->AddUseInterval(block_start_pos, use_pos);
1181 range->AddUse(use_pos, location_pair->SlotAt(1));
1183 }
else if (def->IsMaterializeObject()) {
1185 ProcessMaterializationUses(block, block_start_pos, use_pos,
1186 def->AsMaterializeObject());
1190 range->AddUseInterval(block_start_pos, use_pos);
1191 range->AddUse(use_pos, &locations[
i]);
1196void FlowGraphAllocator::ProcessOneInput(BlockEntryInstr* block,
1201 RegisterSet* live_registers) {
1202 ASSERT(in_ref !=
nullptr);
1203 ASSERT(!in_ref->IsPairLocation());
1204 ASSERT(input !=
nullptr);
1205 ASSERT(block !=
nullptr);
1207 if (in_ref->IsMachineRegister()) {
1215 if (live_registers !=
nullptr) {
1216 live_registers->Add(*in_ref, range->representation());
1219 ASSERT(!in_ref->IsRegister() ||
1221 BlockLocation(*in_ref,
pos - 1,
pos + 1);
1222 range->AddUseInterval(block->start_pos(),
pos - 1);
1223 range->AddHintedUse(
pos - 1, move->src_slot(), in_ref);
1224 }
else if (in_ref->IsUnallocated()) {
1236 range->AddUseInterval(block->start_pos(),
pos);
1237 range->AddUse(
pos, move->src_slot());
1240 LiveRange* temp = MakeLiveRangeForTemporary();
1241 temp->AddUseInterval(
pos,
pos + 1);
1242 temp->AddHintedUse(
pos, in_ref, move->src_slot());
1243 temp->AddUse(
pos, move->dest_slot());
1248 range->mark_has_uses_which_require_stack();
1257 range->AddUseInterval(block->start_pos(),
pos + 1);
1258 range->AddUse(
pos + 1, in_ref);
1261 ASSERT(in_ref->IsConstant());
1265void FlowGraphAllocator::ProcessOneOutput(BlockEntryInstr* block,
1270 bool output_same_as_first_input,
1273 intptr_t input_vreg,
1274 BitVector* interference_set) {
1278 ASSERT(block !=
nullptr);
1281 vreg >= 0 ?
GetLiveRange(vreg) : MakeLiveRangeForTemporary();
1284 if (
out->IsMachineRegister()) {
1300 UsePosition* use = range->first_use();
1303 if (use ==
nullptr)
return;
1307 for (; use !=
nullptr; use = use->next()) {
1308 if (use->pos() == (
pos + 1)) {
1310 ASSERT(use->location_slot()->IsUnallocated());
1311 *(use->location_slot()) = *
out;
1312 range->set_first_use(use->next());
1322 range->DefineAt(
pos + 1);
1323 if (range->Start() == range->End())
return;
1326 range->AddHintedUse(
pos + 1, move->dest_slot(),
out);
1327 }
else if (output_same_as_first_input) {
1328 ASSERT(in_ref !=
nullptr);
1329 ASSERT(input !=
nullptr);
1341 MoveOperands* move =
1346 input_range->AddUseInterval(block->start_pos(),
pos);
1347 input_range->AddUse(
pos, move->src_slot());
1351 range->DefineAt(
pos);
1352 range->AddHintedUse(
pos,
out, move->src_slot());
1353 range->AddUse(
pos, move->dest_slot());
1354 range->AddUse(
pos, in_ref);
1356 if ((interference_set !=
nullptr) && (range->vreg() >= 0) &&
1357 interference_set->Contains(range->vreg())) {
1358 interference_set->Add(input->vreg(0));
1372 range->DefineAt(
pos);
1376 AssignSafepoints(def, range);
1377 CompleteRange(range, def->RegisterKindForResult());
1382void FlowGraphAllocator::ProcessOneInstruction(BlockEntryInstr* block,
1383 Instruction* current,
1384 BitVector* interference_set) {
1385 LocationSummary* locs = current->locs();
1387 Definition* def = current->AsDefinition();
1388 if ((def !=
nullptr) && (def->AsConstant() !=
nullptr)) {
1389 ASSERT(!def->HasPairRepresentation());
1391 (def->vreg(0) != -1) ?
GetLiveRange(def->vreg(0)) :
nullptr;
1394 if ((range ==
nullptr) || (range->first_use() ==
nullptr)) {
1404 ConstantInstr* constant_instr = def->AsConstant();
1407 range->finger()->Initialize(range);
1408 ConvertAllUses(range);
1418 ASSERT(locs->input_count() == current->InputCount());
1422 if (locs->out(0).IsUnallocated() &&
1424 if (locs->in(0).IsPairLocation()) {
1426 PairLocation* in_pair = locs->in(0).AsPairLocation();
1427 ASSERT(in_pair->At(0).IsMachineRegister() ==
1428 in_pair->At(1).IsMachineRegister());
1429 if (in_pair->At(0).IsMachineRegister() &&
1430 in_pair->At(1).IsMachineRegister()) {
1431 locs->set_out(0,
Location::Pair(in_pair->At(0), in_pair->At(1)));
1433 }
else if (locs->in(0).IsMachineRegister()) {
1435 locs->set_out(0, locs->in(0));
1439 const bool output_same_as_first_input =
1440 locs->out(0).IsUnallocated() &&
1444 if (output_same_as_first_input && locs->in(0).IsPairLocation()) {
1450 if (current->env() !=
nullptr) ProcessEnvironmentUses(block, current);
1456 for (intptr_t j = output_same_as_first_input ? 1 : 0;
1457 j < locs->input_count(); j++) {
1460 Value* input = current->InputAt(j);
1461 Location* in_ref = locs->in_slot(j);
1462 RegisterSet* live_registers =
nullptr;
1463 if (locs->HasCallOnSlowPath()) {
1464 live_registers = locs->live_registers();
1466 if (in_ref->IsPairLocation()) {
1467 ASSERT(input->definition()->HasPairRepresentation());
1468 PairLocation* pair = in_ref->AsPairLocation();
1471 ProcessOneInput(block,
pos, pair->SlotAt(0), input,
1472 input->definition()->vreg(0), live_registers);
1473 ProcessOneInput(block,
pos, pair->SlotAt(1), input,
1474 input->definition()->vreg(1), live_registers);
1476 ProcessOneInput(block,
pos, in_ref, input, input->definition()->vreg(0),
1483 if (current->ArgumentCount() != 0) {
1484 auto move_arguments = current->GetMoveArguments();
1485 for (
auto move : *move_arguments) {
1486 if (move->is_register_move()) {
1487 auto input = move->value();
1488 if (move->location().IsPairLocation()) {
1489 auto pair = move->location().AsPairLocation();
1491 pair->At(1).IsMachineRegister());
1492 ProcessOneInput(block,
pos, pair->SlotAt(0), input,
1493 input->definition()->vreg(0),
1495 ProcessOneInput(block,
pos, pair->SlotAt(1), input,
1496 input->definition()->vreg(1),
1500 ProcessOneInput(block,
pos, move->location_slot(), input,
1501 input->definition()->vreg(0),
1509 for (intptr_t j = 0; j < locs->temp_count(); j++) {
1518 ASSERT(!temp.IsPairLocation());
1519 if (temp.IsMachineRegister()) {
1520 ASSERT(!temp.IsRegister() ||
1522 BlockLocation(temp,
pos,
pos + 1);
1523 }
else if (temp.IsUnallocated()) {
1524 LiveRange* range = MakeLiveRangeForTemporary();
1525 range->AddUseInterval(
pos,
pos + 1);
1526 range->AddUse(
pos, locs->temp_slot(j));
1534 if (locs->native_leaf_call()) {
1537#if defined(TARGET_ARCH_ARM)
1549 if (locs->always_calls() && !locs->callee_safe_call()) {
1564 for (intptr_t j = 0; j < locs->temp_count(); j++) {
1565 ASSERT(!locs->temp(j).IsPairLocation());
1566 ASSERT(!locs->temp(j).IsUnallocated());
1569 for (intptr_t j = 0; j < locs->input_count(); j++) {
1570 if (locs->in(j).IsPairLocation()) {
1571 PairLocation* pair = locs->in_slot(j)->AsPairLocation();
1572 ASSERT(!pair->At(0).IsUnallocated() ||
1575 ASSERT(!pair->At(1).IsUnallocated() ||
1579 ASSERT(!locs->in(j).IsUnallocated() ||
1585 if (locs->out(0).IsPairLocation()) {
1586 PairLocation* pair = locs->out_slot(0)->AsPairLocation();
1587 ASSERT(!pair->At(0).IsUnallocated());
1588 ASSERT(!pair->At(1).IsUnallocated());
1590 ASSERT(!locs->out(0).IsUnallocated());
1595 if (locs->can_call() && !locs->native_leaf_call()) {
1596 safepoints_.Add(current);
1599 if (def ==
nullptr) {
1600 ASSERT(locs->out(0).IsInvalid());
1604 if (locs->out(0).IsInvalid()) {
1605 ASSERT(def->vreg(0) < 0);
1609 ASSERT(locs->output_count() == 1);
1611 if (
out->IsPairLocation()) {
1612 ASSERT(def->HasPairRepresentation());
1613 PairLocation* pair =
out->AsPairLocation();
1614 if (output_same_as_first_input) {
1615 ASSERT(locs->in_slot(0)->IsPairLocation());
1616 PairLocation* in_pair = locs->in_slot(0)->AsPairLocation();
1617 Definition* input = current->InputAt(0)->definition();
1618 ASSERT(input->HasPairRepresentation());
1621 ProcessOneOutput(block,
pos,
1622 pair->SlotAt(0), def,
1625 in_pair->SlotAt(0), input,
1628 ProcessOneOutput(block,
pos, pair->SlotAt(1), def, def->vreg(1),
true,
1629 in_pair->SlotAt(1), input, input->vreg(1),
1634 ProcessOneOutput(block,
pos, pair->SlotAt(0), def, def->vreg(0),
1636 nullptr,
nullptr, -1,
1638 ProcessOneOutput(block,
pos, pair->SlotAt(1), def, def->vreg(1),
false,
1639 nullptr,
nullptr, -1, interference_set);
1642 if (output_same_as_first_input) {
1643 Location* in_ref = locs->in_slot(0);
1644 Definition* input = current->InputAt(0)->definition();
1645 ASSERT(!in_ref->IsPairLocation());
1646 ProcessOneOutput(block,
pos,
1654 ProcessOneOutput(block,
pos,
out, def, def->vreg(0),
1656 nullptr,
nullptr, -1,
1667 if ((move ==
nullptr) ||
1680 if (
next->IsParallelMove() &&
1682 return next->AsParallelMove();
1692void FlowGraphAllocator::NumberInstructions() {
1695 for (
auto block : block_order_) {
1696 instructions_.Add(block);
1697 block_entries_.Add(block);
1698 block->set_start_pos(
pos);
1702 for (
auto instr : block->instructions()) {
1704 if (instr->IsParallelMove())
continue;
1706 instructions_.Add(instr);
1707 block_entries_.Add(block);
1711 block->set_end_pos(
pos);
1716 for (
auto block : block_order_) {
1719 JoinEntryInstr*
join = block->AsJoinEntry();
1720 if (
join !=
nullptr) {
1721 intptr_t move_count = 0;
1722 for (PhiIterator it(
join); !it.Done(); it.Advance()) {
1723 move_count += it.Current()->HasPairRepresentation() ? 2 : 1;
1725 for (intptr_t
i = 0;
i < block->PredecessorCount();
i++) {
1729 Instruction* last = block->PredecessorAt(
i)->last_instruction();
1732 ParallelMoveInstr* move = last->AsGoto()->GetParallelMove();
1735 for (intptr_t j = 0; j < move_count; j++) {
1743 Zone* zone = flow_graph_.
zone();
1744 const LoopHierarchy& loop_hierarchy = flow_graph_.
loop_hierarchy();
1745 const intptr_t num_loops = loop_hierarchy.
num_loops();
1746 for (intptr_t
i = 0;
i < num_loops;
i++) {
1747 extra_loop_info_.Add(
1752Instruction* FlowGraphAllocator::InstructionAt(intptr_t
pos)
const {
1753 return instructions_[
pos / 2];
1756BlockEntryInstr* FlowGraphAllocator::BlockEntryAt(intptr_t
pos)
const {
1757 return block_entries_[
pos / 2];
1760bool FlowGraphAllocator::IsBlockEntry(intptr_t
pos)
const {
1766 first_register_use_ = range->
first_use();
1767 first_register_beneficial_use_ = range->
first_use();
1773 while (
a !=
nullptr &&
a->end() <=
start)
1775 first_pending_use_interval_ =
a;
1776 return (first_pending_use_interval_ ==
nullptr);
1782 while (use !=
nullptr) {
1791 while ((use !=
nullptr) && (use->
pos() < after)) {
1799 use !=
nullptr; use = use->
next()) {
1800 Location* loc = use->location_slot();
1804 first_register_use_ = use;
1813 use !=
nullptr; use = use->
next()) {
1814 Location* loc = use->location_slot();
1816 first_register_beneficial_use_ = use;
1833 if ((first_register_use_ !=
nullptr) &&
1834 (first_register_use_->
pos() >= first_use_after_split_pos)) {
1835 first_register_use_ =
nullptr;
1838 if ((first_register_beneficial_use_ !=
nullptr) &&
1839 (first_register_beneficial_use_->
pos() >= first_use_after_split_pos)) {
1840 first_register_beneficial_use_ =
nullptr;
1846 if (other->
start() < this->end())
return other->
start();
1847 }
else if (this->
start() < other->
end()) {
1848 return this->
start();
1854 while (
a !=
nullptr && u !=
nullptr) {
1855 const intptr_t
pos =
a->Intersect(u);
1858 if (
a->start() < u->
start()) {
1868template <
typename PositionType>
1871 bool split_at_start) {
1872 PositionType* last_before_split =
nullptr;
1874 if (split_at_start) {
1875 while ((
pos !=
nullptr) && (
pos->pos() < split_pos)) {
1876 last_before_split =
pos;
1880 while ((
pos !=
nullptr) && (
pos->pos() <= split_pos)) {
1881 last_before_split =
pos;
1886 if (last_before_split ==
nullptr) {
1889 last_before_split->set_next(
nullptr);
1896 if (
Start() == split_pos)
return this;
1899 if (interval ==
nullptr) {
1908 if (split_pos <= interval->
start()) interval = first_use_interval_;
1911 while (interval->
end() <= split_pos) {
1912 last_before_split = interval;
1913 interval = interval->
next();
1916 const bool split_at_start = (interval->
start() == split_pos);
1919 if (!split_at_start && interval->
Contains(split_pos)) {
1922 interval->end_ = split_pos;
1923 interval->next_ = first_after_split;
1924 last_before_split = interval;
1927 ASSERT(last_before_split !=
nullptr);
1928 ASSERT(last_before_split->
next() == first_after_split);
1929 ASSERT(last_before_split->
end() <= split_pos);
1940 : last_use_interval_;
1943 first_safepoint_after_split, next_sibling_);
1946 next_sibling_->
Start(), next_sibling_->
End()));
1948 last_use_interval_ = last_before_split;
1949 last_use_interval_->next_ =
nullptr;
1951 if (first_use_after_split !=
nullptr) {
1955 return next_sibling_;
1963 range->
vreg(), range->
Start(), range->
End(), from, to));
1968 ASSERT(split_block_entry == InstructionAt(to)->GetBlock());
1989 if (loop_info ==
nullptr) {
1991 const intptr_t num_loops = loop_hierarchy.
num_loops();
1992 for (intptr_t
i = 0;
i < num_loops;
i++) {
1993 if (extra_loop_info_[
i]->start < to && to < extra_loop_info_[
i]->end) {
1995 loop_info = loop_hierarchy.
headers()[
i]->loop_info();
2000 while ((loop_info !=
nullptr) &&
2002 split_block_entry = loop_info->
header();
2003 loop_info = loop_info->
outer();
2020 ASSERT(from < split_pos);
2022 return range->
SplitAt(split_pos);
2025void FlowGraphAllocator::SpillBetween(LiveRange* range,
2030 "between [%" Pd ", %" Pd ")\n",
2031 range->vreg(), range->Start(), range->End(), from, to));
2032 LiveRange*
tail = range->SplitAt(from);
2034 if (
tail->Start() < to) {
2036 LiveRange* tail_tail = SplitBetween(
tail,
tail->Start(), to);
2038 AddToUnallocated(tail_tail);
2041 AddToUnallocated(
tail);
2045void FlowGraphAllocator::SpillAfter(LiveRange* range, intptr_t from) {
2047 range->vreg(), range->Start(), range->End(), from));
2051 LoopInfo* loop_info = BlockEntryAt(from)->
loop_info();
2052 if (loop_info !=
nullptr) {
2053 if ((range->Start() <= loop_info->header()->start_pos()) &&
2054 RangeHasOnlyUnconstrainedUsesInLoop(range, loop_info->id())) {
2055 ASSERT(loop_info->header()->start_pos() <= from);
2056 from = loop_info->header()->start_pos();
2058 THR_Print(
" moved spill position to loop header %" Pd "\n", from));
2062 LiveRange*
tail = range->SplitAt(from);
2066void FlowGraphAllocator::AllocateSpillSlotFor(LiveRange* range) {
2067 ASSERT(range->spill_slot().IsInvalid());
2070 LiveRange* last_sibling = range;
2071 while (last_sibling->next_sibling() !=
nullptr) {
2072 last_sibling = last_sibling->next_sibling();
2075 const intptr_t
start = range->Start();
2076 const intptr_t
end = last_sibling->End();
2085 ((range->representation() == kUnboxedFloat32x4) ||
2086 (range->representation() == kUnboxedInt32x4) ||
2087 (range->representation() == kUnboxedFloat64x2));
2089 ((range->representation() == kUntagged));
2099 for (; idx < spill_slots_.
length(); idx++) {
2100 if ((need_quad == quad_spill_slots_[idx]) &&
2101 (need_untagged == untagged_spill_slots_[idx]) &&
2102 (spill_slots_[idx] <= start)) {
2107 while (idx > spill_slots_.
length()) {
2109 quad_spill_slots_.
Add(
false);
2110 untagged_spill_slots_.
Add(
false);
2113 if (idx == spill_slots_.
length()) {
2114 idx = spill_slots_.
length();
2117 spill_slots_.
Add(0);
2118 quad_spill_slots_.
Add(need_quad);
2119 untagged_spill_slots_.
Add(need_untagged);
2121 spill_slots_.
Add(0);
2122 quad_spill_slots_.
Add(need_quad);
2123 untagged_spill_slots_.
Add(need_untagged);
2128 spill_slots_[idx] =
end;
2130 ASSERT(quad_spill_slots_[idx] && quad_spill_slots_[idx + 1]);
2132 spill_slots_[idx] =
end;
2134 ASSERT(!quad_spill_slots_[idx]);
2139 range->representation() == kTagged ||
2140 range->representation() == kPairOfTagged ||
2141 range->representation() == kUntagged) {
2142 const intptr_t slot_index =
2149 const intptr_t slot_idx =
2155 if ((range->representation() == kUnboxedFloat32x4) ||
2156 (range->representation() == kUnboxedInt32x4) ||
2157 (range->representation() == kUnboxedFloat64x2)) {
2161 ASSERT(range->representation() == kUnboxedFloat ||
2162 range->representation() == kUnboxedDouble);
2165 range->set_spill_slot(location);
2168 spilled_.Add(range);
2171void FlowGraphAllocator::MarkAsObjectAtSafepoints(LiveRange* range) {
2172 Location spill_slot = range->spill_slot();
2173 intptr_t stack_index = spill_slot.stack_index();
2174 if (spill_slot.base_reg() ==
FPREG) {
2176 spill_slot.stack_index());
2178 ASSERT(stack_index >= 0);
2179 while (range !=
nullptr) {
2180 for (SafepointPosition* safepoint = range->first_safepoint();
2181 safepoint !=
nullptr; safepoint = safepoint->next()) {
2183 safepoint->locs()->SetStackBit(stack_index);
2185 range = range->next_sibling();
2189void FlowGraphAllocator::Spill(LiveRange* range) {
2191 if (parent->spill_slot().IsInvalid()) {
2192 AllocateSpillSlotFor(parent);
2193 if (range->representation() == kTagged) {
2194 MarkAsObjectAtSafepoints(parent);
2197 range->set_assigned_location(parent->spill_slot());
2198 ConvertAllUses(range);
2201void FlowGraphAllocator::AllocateSpillSlotForSuspendState() {
2207 quad_spill_slots_.
Add(
false);
2208 untagged_spill_slots_.
Add(
false);
2211 const intptr_t stack_index =
2219void FlowGraphAllocator::UpdateStackmapsForSuspendState() {
2224 const intptr_t stack_index =
2228 ASSERT(stack_index >= 0);
2230 for (intptr_t
i = 0, n = safepoints_.length();
i < n; ++
i) {
2231 Instruction* safepoint_instr = safepoints_[
i];
2232 safepoint_instr->locs()->SetStackBit(stack_index);
2236intptr_t FlowGraphAllocator::FirstIntersectionWithAllocated(
2238 LiveRange* unallocated) {
2240 for (intptr_t
i = 0;
i < registers_[reg]->length();
i++) {
2241 LiveRange* allocated = (*registers_[reg])[
i];
2242 if (allocated ==
nullptr)
continue;
2244 UseInterval* allocated_head =
2245 allocated->finger()->first_pending_use_interval();
2246 if (allocated_head->start() >= intersection)
continue;
2249 unallocated->finger()->first_pending_use_interval(), allocated_head);
2250 if (
pos < intersection) intersection =
pos;
2252 return intersection;
2255void ReachingDefs::AddPhi(PhiInstr* phi) {
2256 if (phi->reaching_defs() ==
nullptr) {
2257 Zone* zone = flow_graph_.
zone();
2258 phi->set_reaching_defs(
new (zone) BitVector(zone, flow_graph_.
max_vreg()));
2261 bool depends_on_phi =
false;
2262 for (intptr_t
i = 0;
i < phi->InputCount();
i++) {
2263 Definition* input = phi->InputAt(
i)->definition();
2264 if (input->IsPhi()) {
2265 depends_on_phi =
true;
2267 phi->reaching_defs()->Add(input->vreg(0));
2268 if (phi->HasPairRepresentation()) {
2269 phi->reaching_defs()->Add(input->vreg(1));
2274 if (depends_on_phi) phis_.Add(phi);
2278void ReachingDefs::Compute() {
2280 for (intptr_t
i = 0;
i < phis_.length();
i++) {
2281 PhiInstr* phi = phis_[
i];
2284 for (intptr_t
i = 0;
i < phi->InputCount();
i++) {
2285 PhiInstr* input_phi = phi->InputAt(
i)->definition()->AsPhi();
2286 if (input_phi !=
nullptr) {
2296 for (intptr_t
i = 0;
i < phis_.length();
i++) {
2297 PhiInstr* phi = phis_[
i];
2298 for (intptr_t
i = 0;
i < phi->InputCount();
i++) {
2299 PhiInstr* input_phi = phi->InputAt(
i)->definition()->AsPhi();
2300 if (input_phi !=
nullptr) {
2301 if (phi->reaching_defs()->AddAll(input_phi->reaching_defs())) {
2314 ASSERT(phis_.is_empty());
2321bool FlowGraphAllocator::AllocateFreeRegister(
LiveRange* unallocated) {
2323 intptr_t free_until = 0;
2336 if (parent_range->End() == unallocated->
Start() &&
2337 !IsBlockEntry(unallocated->
Start()) &&
2338 parent_range->assigned_location().IsMachineRegister()) {
2339 hint = parent_range->assigned_location();
2346 FirstIntersectionWithAllocated(hint.
register_code(), unallocated);
2351 hint.
Name(), unallocated->
vreg(), free_until));
2353 for (intptr_t
i = 0;
i < NumberOfRegisters(); ++
i) {
2355 if (!blocked_registers_[reg] && (registers_[reg]->
length() == 0)) {
2365 for (intptr_t
i = 0;
i < NumberOfRegisters(); ++
i) {
2367 if (blocked_registers_[reg] || (reg == candidate))
continue;
2368 const intptr_t intersection =
2369 FirstIntersectionWithAllocated(reg, unallocated);
2370 if (intersection > free_until) {
2372 free_until = intersection;
2379 if (free_until <= unallocated->Start())
return false;
2385 LoopInfo* loop_info = BlockEntryAt(unallocated->
Start())->
loop_info();
2386 if ((unallocated->
vreg() >= 0) && (loop_info !=
nullptr) &&
2387 (free_until >= extra_loop_info_[loop_info->id()]->end) &&
2388 extra_loop_info_[loop_info->id()]->backedge_interference->Contains(
2389 unallocated->
vreg())) {
2390 GrowableArray<bool> used_on_backedge(number_of_registers_);
2391 for (intptr_t
i = 0;
i < number_of_registers_;
i++) {
2392 used_on_backedge.Add(
false);
2395 for (PhiIterator it(loop_info->header()->AsJoinEntry()); !it.Done();
2397 PhiInstr* phi = it.Current();
2399 const intptr_t phi_vreg = phi->vreg(0);
2401 if (range->assigned_location().kind() == register_kind_) {
2402 const intptr_t reg = range->assigned_location().register_code();
2404 used_on_backedge[reg] =
true;
2407 if (phi->HasPairRepresentation()) {
2408 const intptr_t second_phi_vreg = phi->vreg(1);
2409 LiveRange* second_range =
GetLiveRange(second_phi_vreg);
2410 if (second_range->assigned_location().kind() == register_kind_) {
2411 const intptr_t reg =
2412 second_range->assigned_location().register_code();
2414 used_on_backedge[reg] =
true;
2420 if (used_on_backedge[candidate]) {
2422 ": has interference on the back edge"
2423 " {loop [%" Pd ", %" Pd ")}\n",
2424 MakeRegisterLocation(candidate).
Name(),
2425 unallocated->
vreg(),
2426 extra_loop_info_[loop_info->id()]->start,
2427 extra_loop_info_[loop_info->id()]->end));
2428 for (intptr_t
i = 0;
i < NumberOfRegisters(); ++
i) {
2430 if (blocked_registers_[reg] || (reg == candidate) ||
2431 used_on_backedge[reg]) {
2435 const intptr_t intersection =
2436 FirstIntersectionWithAllocated(reg, unallocated);
2437 if (intersection >= free_until) {
2439 free_until = intersection;
2441 "found %s for v%" Pd " with no interference on the back edge\n",
2442 MakeRegisterLocation(candidate).
Name(), candidate));
2452 LiveRange*
tail = unallocated->
SplitAt(free_until);
2453 AddToUnallocated(
tail);
2457 if (unallocated->
first_use() ==
nullptr) {
2458 if (unallocated->
vreg() >= 0) {
2460 if (parent->spill_slot().IsConstant()) {
2469 TRACE_ALLOC(MakeRegisterLocation(candidate).Print());
2472 registers_[candidate]->Add(unallocated);
2477bool FlowGraphAllocator::RangeHasOnlyUnconstrainedUsesInLoop(LiveRange* range,
2479 if (range->vreg() >= 0) {
2481 return parent->HasOnlyUnconstrainedUsesInLoop(loop_id);
2486bool FlowGraphAllocator::IsCheapToEvictRegisterInLoop(LoopInfo* loop_info,
2488 const intptr_t loop_start = extra_loop_info_[loop_info->id()]->start;
2489 const intptr_t loop_end = extra_loop_info_[loop_info->id()]->end;
2491 for (intptr_t
i = 0;
i < registers_[reg]->length();
i++) {
2492 LiveRange* allocated = (*registers_[reg])[
i];
2493 UseInterval* interval = allocated->finger()->first_pending_use_interval();
2494 if (interval->Contains(loop_start)) {
2495 if (!RangeHasOnlyUnconstrainedUsesInLoop(allocated, loop_info->id())) {
2498 }
else if (interval->start() < loop_end) {
2506bool FlowGraphAllocator::HasCheapEvictionCandidate(LiveRange* phi_range) {
2507 ASSERT(phi_range->is_loop_phi());
2509 BlockEntryInstr*
header = BlockEntryAt(phi_range->Start());
2514 for (intptr_t reg = 0; reg < NumberOfRegisters(); ++reg) {
2515 if (blocked_registers_[reg])
continue;
2516 if (IsCheapToEvictRegisterInLoop(
header->loop_info(), reg)) {
2524void FlowGraphAllocator::AllocateAnyRegister(LiveRange* unallocated) {
2530 UsePosition* register_use =
2531 unallocated->finger()->FirstRegisterUse(unallocated->Start());
2532 if ((register_use ==
nullptr) &&
2533 !(unallocated->is_loop_phi() && HasCheapEvictionCandidate(unallocated))) {
2539 intptr_t free_until = 0;
2542 for (
int i = 0;
i < NumberOfRegisters(); ++
i) {
2544 if (blocked_registers_[reg])
continue;
2545 if (UpdateFreeUntil(reg, unallocated, &free_until, &blocked_at)) {
2550 const intptr_t register_use_pos =
2551 (register_use !=
nullptr) ? register_use->pos() : unallocated->Start();
2552 if (free_until < register_use_pos) {
2555 SpillBetween(unallocated, unallocated->Start(), register_use->pos());
2562 TRACE_ALLOC(MakeRegisterLocation(candidate).Print());
2564 unallocated->vreg(), blocked_at));
2566 if (blocked_at < unallocated->End()) {
2570 SplitBetween(unallocated, unallocated->Start(), blocked_at + 1);
2571 AddToUnallocated(
tail);
2574 AssignNonFreeRegister(unallocated, candidate);
2577bool FlowGraphAllocator::UpdateFreeUntil(intptr_t reg,
2578 LiveRange* unallocated,
2579 intptr_t* cur_free_until,
2580 intptr_t* cur_blocked_at) {
2583 const intptr_t
start = unallocated->Start();
2585 for (intptr_t
i = 0;
i < registers_[reg]->length();
i++) {
2586 LiveRange* allocated = (*registers_[reg])[
i];
2588 UseInterval* first_pending_use_interval =
2589 allocated->finger()->first_pending_use_interval();
2590 if (first_pending_use_interval->Contains(start)) {
2592 if (allocated->vreg() < 0) {
2598 UsePosition* use = allocated->finger()->FirstInterferingUse(start);
2606 const intptr_t use_pos = (use !=
nullptr) ? use->pos() : allocated->End();
2608 if (use_pos < free_until) free_until = use_pos;
2612 first_pending_use_interval, unallocated->first_use_interval());
2614 if (intersection < free_until) free_until = intersection;
2619 if (free_until <= *cur_free_until) {
2624 ASSERT(free_until > *cur_free_until);
2625 *cur_free_until = free_until;
2626 *cur_blocked_at = blocked_at;
2630void FlowGraphAllocator::RemoveEvicted(intptr_t reg, intptr_t first_evicted) {
2631 intptr_t to = first_evicted;
2632 intptr_t from = first_evicted + 1;
2633 while (from < registers_[reg]->
length()) {
2634 LiveRange* allocated = (*registers_[reg])[from++];
2635 if (allocated !=
nullptr) (*registers_[reg])[to++] = allocated;
2637 registers_[reg]->TruncateTo(to);
2640void FlowGraphAllocator::AssignNonFreeRegister(LiveRange* unallocated,
2642 intptr_t first_evicted = -1;
2643 for (intptr_t
i = registers_[reg]->
length() - 1;
i >= 0;
i--) {
2644 LiveRange* allocated = (*registers_[reg])[
i];
2645 if (allocated->vreg() < 0)
continue;
2646 if (EvictIntersection(allocated, unallocated)) {
2648 if (allocated->assigned_location().IsMachineRegister()) {
2649 ASSERT(allocated->End() <= unallocated->Start());
2650 ConvertAllUses(allocated);
2652 (*registers_[reg])[
i] =
nullptr;
2658 if (first_evicted != -1) RemoveEvicted(reg, first_evicted);
2660 registers_[reg]->Add(unallocated);
2661 unallocated->set_assigned_location(MakeRegisterLocation(reg));
2664bool FlowGraphAllocator::EvictIntersection(LiveRange* allocated,
2665 LiveRange* unallocated) {
2666 UseInterval* first_unallocated =
2667 unallocated->finger()->first_pending_use_interval();
2669 allocated->finger()->first_pending_use_interval(), first_unallocated);
2672 const intptr_t spill_position = first_unallocated->start();
2673 UsePosition* use = allocated->finger()->FirstInterferingUse(spill_position);
2674 if (use ==
nullptr) {
2676 SpillAfter(allocated, spill_position);
2678 const intptr_t restore_position =
2679 (spill_position < intersection) ?
MinPosition(intersection, use->pos())
2682 SpillBetween(allocated, spill_position, restore_position);
2688MoveOperands* FlowGraphAllocator::AddMoveAt(intptr_t
pos,
2697 ParallelMoveInstr* parallel_move =
nullptr;
2698 Instruction* instr = InstructionAt(
pos);
2699 if (
auto entry = instr->AsFunctionEntry()) {
2709 return parallel_move->AddMove(to, from);
2712void FlowGraphAllocator::ConvertUseTo(UsePosition* use,
Location loc) {
2713 ASSERT(!loc.IsPairLocation());
2714 ASSERT(use->location_slot() !=
nullptr);
2715 Location* slot = use->location_slot();
2718 if (loc.IsConstant()) {
2725void FlowGraphAllocator::ConvertAllUses(LiveRange* range) {
2728 const Location loc = range->assigned_location();
2729 ASSERT(!loc.IsInvalid());
2732 "for v%" Pd " has been allocated to ",
2733 range->Start(), range->End(), range->vreg()));
2735 if (loc.IsConstant()) {
2740 for (UsePosition* use = range->first_use(); use !=
nullptr;
2741 use = use->next()) {
2742 ConvertUseTo(use, loc);
2747 if (loc.IsMachineRegister()) {
2748 for (SafepointPosition* safepoint = range->first_safepoint();
2749 safepoint !=
nullptr; safepoint = safepoint->next()) {
2750 if (!safepoint->locs()->always_calls()) {
2751 ASSERT(safepoint->locs()->can_call());
2752 safepoint->locs()->live_registers()->Add(loc, range->representation());
2758void FlowGraphAllocator::AdvanceActiveIntervals(
const intptr_t start) {
2759 for (intptr_t
i = 0;
i < NumberOfRegisters(); ++
i) {
2761 if (registers_[reg]->
is_empty())
continue;
2763 intptr_t first_evicted = -1;
2764 for (intptr_t
i = registers_[reg]->
length() - 1;
i >= 0;
i--) {
2765 LiveRange* range = (*registers_[reg])[
i];
2766 if (range->finger()->Advance(start)) {
2767 ConvertAllUses(range);
2768 (*registers_[reg])[
i] =
nullptr;
2773 if (first_evicted != -1) RemoveEvicted(reg, first_evicted);
2780 for (
UseInterval* interval = first_use_interval_; interval !=
nullptr;
2781 interval = interval->
next()) {
2782 if (interval->Contains(
pos)) {
2790bool FlowGraphAllocator::IsLiveAfterCatchEntry(
2795 if (raw_exception_var !=
nullptr &&
2796 param->env_index() == flow_graph_.
EnvIndex(raw_exception_var)) {
2800 if (raw_stacktrace_var !=
nullptr &&
2801 param->env_index() == flow_graph_.
EnvIndex(raw_stacktrace_var)) {
2807void FlowGraphAllocator::AssignSafepoints(Definition* defn, LiveRange* range) {
2808 for (intptr_t
i = safepoints_.length() - 1;
i >= 0;
i--) {
2809 Instruction* safepoint_instr = safepoints_[
i];
2810 if (safepoint_instr == defn) {
2820 if (
auto* catch_entry = safepoint_instr->AsCatchBlockEntry()) {
2821 if (
auto* param = defn->AsParameter()) {
2822 if ((param->
GetBlock() == catch_entry) &&
2823 IsLiveAfterCatchEntry(catch_entry, param)) {
2829 if (range->End() <=
pos)
break;
2831 if (range->Contains(
pos)) {
2832 range->AddSafepoint(
pos, safepoint_instr->locs());
2839 return a->Start() <=
b->Start();
2851 for (intptr_t
i = list->
length() - 1;
i >= 0;
i--) {
2860void FlowGraphAllocator::AddToUnallocated(LiveRange* range) {
2864void FlowGraphAllocator::CompleteRange(LiveRange* range,
Location::Kind kind) {
2880bool FlowGraphAllocator::UnallocatedIsSorted() {
2881 for (intptr_t
i = unallocated_.length() - 1;
i >= 1;
i--) {
2882 LiveRange*
a = unallocated_[
i];
2883 LiveRange*
b = unallocated_[
i - 1];
2895void FlowGraphAllocator::PrepareForAllocation(
2897 intptr_t number_of_registers,
2898 const GrowableArray<LiveRange*>& unallocated,
2899 LiveRange** blocking_ranges,
2900 bool* blocked_registers) {
2901 register_kind_ = register_kind;
2902 number_of_registers_ = number_of_registers;
2904 blocked_registers_.
Clear();
2906 for (intptr_t
i = 0;
i < number_of_registers_;
i++) {
2907 blocked_registers_.
Add(
false);
2908 registers_.Add(
new ZoneGrowableArray<LiveRange*>);
2910 ASSERT(unallocated_.is_empty());
2911 unallocated_.AddArray(unallocated);
2913 for (intptr_t
i = 0;
i < NumberOfRegisters(); ++
i) {
2915 blocked_registers_[reg] = blocked_registers[reg];
2918 LiveRange* range = blocking_ranges[reg];
2919 if (range !=
nullptr) {
2920 range->finger()->Initialize(range);
2921 registers_[reg]->Add(range);
2926void FlowGraphAllocator::AllocateUnallocatedRanges() {
2928 ASSERT(UnallocatedIsSorted());
2931 while (!unallocated_.is_empty()) {
2932 LiveRange* range = unallocated_.RemoveLast();
2933 const intptr_t
start = range->Start();
2935 "starting at %" Pd "\n",
2936 range->vreg(), start));
2939 AdvanceActiveIntervals(start);
2941 if (!AllocateFreeRegister(range)) {
2942 if (intrinsic_mode_) {
2948 AllocateAnyRegister(range);
2953 ASSERT(unallocated_.is_empty());
2960bool FlowGraphAllocator::TargetLocationIsSpillSlot(LiveRange* range,
2966 for (
LiveRange* range = parent; range !=
nullptr;
2968 if (range->CanCover(
pos)) {
2977 for (intptr_t j = 1; j < locs.
length(); j++) {
2978 if (!locs[j].
Equals(locs[0])) {
2990 if (last->IsGoto() && !pred->IsGraphEntry()) {
2992 last->AsGoto()->GetParallelMove()->AddMove(move.
dest(), move.
src());
2998void FlowGraphAllocator::ResolveControlFlow() {
3001 for (intptr_t vreg = 0; vreg < live_ranges_.length(); vreg++) {
3002 LiveRange* range = live_ranges_[vreg];
3003 if (range ==
nullptr)
continue;
3005 while (range->next_sibling() !=
nullptr) {
3006 LiveRange* sibling = range->next_sibling();
3012 TRACE_ALLOC(sibling->assigned_location().Print());
3014 if ((range->End() == sibling->Start()) &&
3015 !TargetLocationIsSpillSlot(range, sibling->assigned_location()) &&
3016 !range->assigned_location().Equals(sibling->assigned_location()) &&
3017 !IsBlockEntry(range->End())) {
3018 AddMoveAt(sibling->Start(), sibling->assigned_location(),
3019 range->assigned_location());
3031 GrowableArray<Location> src_locs(2);
3032 GrowableArray<MoveOperands> pending(10);
3033 BitVector* can_emit =
new BitVector(flow_graph_.
zone(), 10);
3034 for (intptr_t
i = 1;
i < block_order_.length();
i++) {
3035 BlockEntryInstr* block = block_order_[
i];
3037 for (BitVector::Iterator it(live); !it.Done(); it.Advance()) {
3039 if (range->next_sibling() ==
nullptr) {
3043 THR_Print(
"range v%" Pd " has no siblings\n", range->vreg()));
3047 LiveRange* dst_cover =
FindCover(range, block->start_pos());
3051 " is allocated to %s on entry to B%" Pd
3052 " covered by [%" Pd ", %" Pd ")\n",
3053 range->vreg(),
dst.ToCString(), block->block_id(),
3054 dst_cover->Start(), dst_cover->End()));
3056 if (TargetLocationIsSpillSlot(range,
dst)) {
3060 THR_Print(
" [no resolution necessary - range is spilled]\n"));
3065 for (intptr_t j = 0; j < block->PredecessorCount(); j++) {
3066 BlockEntryInstr* pred = block->PredecessorAt(j);
3067 LiveRange* src_cover =
FindCover(range, pred->end_pos() - 1);
3072 " covered by [%" Pd ", %" Pd ")\n",
3073 src.ToCString(), pred->block_id(),
3074 src_cover->Start(), src_cover->End()));
3082 if (!
dst.Equals(src_locs[0])) {
3087 pending.Add(MoveOperands(
dst, src_locs[0]));
3094 for (intptr_t j = 0; j < block->PredecessorCount(); j++) {
3095 if (
dst.Equals(src_locs[j])) {
3100 EmitMoveOnEdge(block, block->PredecessorAt(j), {dst, src_locs[j]});
3107 if (pending.length() > 0) {
3108 if (can_emit->length() < pending.length()) {
3109 can_emit =
new BitVector(flow_graph_.
zone(), pending.length());
3115 bool changed =
false;
3119 for (intptr_t j = 0; j < pending.length(); j++) {
3121 for (intptr_t
p = 0;
p < block->PredecessorCount();
p++) {
3122 BlockEntryInstr* pred = block->PredecessorAt(
p);
3124 pred->last_instruction()->AsGoto()->GetParallelMove()->moves()) {
3125 if (!move->IsRedundant() && move->dest().Equals(
src)) {
3126 can_emit->Remove(j);
3137 for (intptr_t j = 0; j < pending.length(); j++) {
3138 if (can_emit->Contains(j)) {
3139 for (intptr_t k = 0; k < pending.length(); k++) {
3140 if (!can_emit->Contains(k) &&
3141 pending[k].dest().Equals(pending[j].src())) {
3142 can_emit->Remove(j);
3153 for (intptr_t j = 0; j < pending.length(); j++) {
3154 const auto& move = pending[j];
3155 if (can_emit->Contains(j)) {
3156 block->GetParallelMove()->AddMove(move.dest(), move.src());
3158 for (intptr_t
p = 0;
p < block->PredecessorCount();
p++) {
3171 for (intptr_t
i = 0;
i < spilled_.length();
i++) {
3172 LiveRange* range = spilled_[
i];
3173 if (!range->assigned_location().Equals(range->spill_slot())) {
3174 if (range->Start() == 0) {
3180 if (block->IsFunctionEntry()) {
3181 AddMoveAt(block->start_pos() + 1, range->spill_slot(),
3182 range->assigned_location());
3186 AddMoveAt(range->Start() + 1, range->spill_slot(),
3187 range->assigned_location());
3194 if (definition_rep == kUnboxedInt64) {
3197 }
else if (definition_rep == kUnboxedUint32) {
3201 return definition_rep;
3204void FlowGraphAllocator::CollectRepresentations() {
3206 GraphEntryInstr* graph_entry = flow_graph_.
graph_entry();
3208 for (intptr_t
i = 0;
i < initial_definitions->length(); ++
i) {
3209 Definition* def = (*initial_definitions)[
i];
3210 value_representations_[def->vreg(0)] =
3212 if (def->HasPairRepresentation()) {
3213 value_representations_[def->vreg(1)] =
3218 for (
auto block : block_order_) {
3219 if (
auto entry = block->AsBlockEntryWithInitialDefs()) {
3220 initial_definitions = entry->initial_definitions();
3221 for (intptr_t
i = 0;
i < initial_definitions->length(); ++
i) {
3222 Definition* def = (*initial_definitions)[
i];
3223 value_representations_[def->vreg(0)] =
3225 if (def->HasPairRepresentation()) {
3226 value_representations_[def->vreg(1)] =
3230 }
else if (
auto join = block->AsJoinEntry()) {
3231 for (PhiIterator it(
join); !it.Done(); it.Advance()) {
3232 PhiInstr* phi = it.Current();
3233 ASSERT(phi !=
nullptr && phi->vreg(0) >= 0);
3234 value_representations_[phi->vreg(0)] =
3236 if (phi->HasPairRepresentation()) {
3237 value_representations_[phi->vreg(1)] =
3244 for (
auto instr : block->instructions()) {
3245 Definition* def = instr->AsDefinition();
3246 if ((def !=
nullptr) && (def->vreg(0) >= 0)) {
3247 const intptr_t vreg = def->vreg(0);
3248 value_representations_[vreg] =
3250 if (def->HasPairRepresentation()) {
3251 value_representations_[def->vreg(1)] =
3259void FlowGraphAllocator::RemoveFrameIfNotNeeded() {
3261 if (intrinsic_mode_) {
3270 if (!FLAG_precompiled_mode) {
3287#if defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_ARM)
3288 bool has_write_barrier_call =
false;
3290 intptr_t num_calls_on_shared_slow_path = 0;
3291 for (
auto block : block_order_) {
3292 for (
auto instruction : block->instructions()) {
3293 if (instruction->HasLocs() && instruction->locs()->can_call()) {
3294 if (!instruction->locs()->call_on_shared_slow_path()) {
3301 ++num_calls_on_shared_slow_path;
3302 if (num_calls_on_shared_slow_path > 1) {
3316#if defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_ARM)
3317 if (
auto store_field = instruction->AsStoreField()) {
3318 if (store_field->ShouldEmitStoreBarrier()) {
3319 if (has_write_barrier_call) {
3325 has_write_barrier_call =
true;
3329 if (
auto store_indexed = instruction->AsStoreIndexed()) {
3330 if (store_indexed->ShouldEmitStoreBarrier()) {
3331 if (has_write_barrier_call) {
3337 has_write_barrier_call =
true;
3350 auto fix_location_for = [&](BlockEntryInstr* block, ParameterInstr* param,
3351 intptr_t vreg, intptr_t pair_index) {
3352 auto location = param->location();
3353 if (location.IsPairLocation()) {
3354 ASSERT(param->HasPairRepresentation());
3355 location = location.AsPairLocation()->At(pair_index);
3357 if (!location.HasStackIndex() || location.base_reg() !=
FPREG) {
3361 const auto fp_relative = location;
3362 const auto sp_relative = fp_relative.ToEntrySpRelative();
3364 for (LiveRange* range =
GetLiveRange(vreg); range !=
nullptr;
3366 if (range->assigned_location().Equals(fp_relative)) {
3367 range->set_assigned_location(sp_relative);
3368 range->set_spill_slot(sp_relative);
3369 for (UsePosition* use = range->first_use(); use !=
nullptr;
3370 use = use->next()) {
3371 ASSERT(use->location_slot()->Equals(fp_relative));
3372 *use->location_slot() = sp_relative;
3378 for (
auto block : entry->successors()) {
3379 if (FunctionEntryInstr* entry = block->AsFunctionEntry()) {
3380 for (
auto defn : *entry->initial_definitions()) {
3381 if (
auto param = defn->AsParameter()) {
3382 const auto vreg = param->vreg(0);
3383 fix_location_for(block, param, vreg, 0);
3384 if (param->HasPairRepresentation()) {
3385 fix_location_for(block, param, param->vreg(1),
3398void FlowGraphAllocator::AllocateOutgoingArguments() {
3399 const intptr_t total_spill_slot_count =
3402 for (
auto block : block_order_) {
3403 for (
auto instr : block->instructions()) {
3404 if (
auto move_arg = instr->AsMoveArgument()) {
3406 ASSERT(!move_arg->is_register_move());
3408 const intptr_t spill_index =
3409 (total_spill_slot_count - 1) - move_arg->sp_relative_index();
3410 const intptr_t slot_index =
3414 move_arg->locs()->set_out(
3415 0, (move_arg->representation() == kUnboxedDouble)
3423void FlowGraphAllocator::ScheduleParallelMoves() {
3424 ParallelMoveResolver resolver;
3426 for (
auto block : block_order_) {
3427 if (block->HasParallelMove()) {
3428 resolver.Resolve(block->parallel_move());
3430 for (
auto instruction : block->instructions()) {
3431 if (
auto move = instruction->AsParallelMove()) {
3432 resolver.Resolve(move);
3435 if (
auto goto_instr = block->last_instruction()->AsGoto()) {
3436 if (goto_instr->HasParallelMove()) {
3437 resolver.Resolve(goto_instr->parallel_move());
3444 CollectRepresentations();
3448 NumberInstructions();
3452 AllocateSpillSlotForSuspendState();
3457 UpdateStackmapsForSuspendState();
3461 THR_Print(
"-- [before ssa allocator] ranges [%s] ---------\n",
3462 function.ToFullyQualifiedCString());
3464 THR_Print(
"----------------------------------------------\n");
3466 THR_Print(
"-- [before ssa allocator] ir [%s] -------------\n",
3467 function.ToFullyQualifiedCString());
3474 THR_Print(
"----------------------------------------------\n");
3478 unallocated_cpu_, cpu_regs_, blocked_cpu_registers_);
3479 AllocateUnallocatedRanges();
3487 spill_slots_.
Clear();
3488 quad_spill_slots_.
Clear();
3489 untagged_spill_slots_.
Clear();
3492 unallocated_fpu_, fpu_regs_, blocked_fpu_registers_);
3493 AllocateUnallocatedRanges();
3496 ASSERT(entry !=
nullptr);
3501 RemoveFrameIfNotNeeded();
3503 AllocateOutgoingArguments();
3505 ResolveControlFlow();
3507 ScheduleParallelMoves();
3512 THR_Print(
"-- [after ssa allocator] ranges [%s] ---------\n",
3513 function.ToFullyQualifiedCString());
3515 THR_Print(
"----------------------------------------------\n");
3517 THR_Print(
"-- [after ssa allocator] ir [%s] -------------\n",
3518 function.ToFullyQualifiedCString());
3525 THR_Print(
"----------------------------------------------\n");
static void is_empty(skiatest::Reporter *reporter, const SkPath &p)
static float next(float f)
static float prev(float f)
static int block_count(const SkSBlockAllocator< N > &pool)
#define RELEASE_ASSERT(cond)
bool Advance(intptr_t start)
UsePosition * FirstRegisterBeneficialUse(intptr_t after_pos)
void Initialize(LiveRange *range)
void UpdateAfterSplit(intptr_t first_use_after_split_pos)
UsePosition * FirstInterferingUse(intptr_t after_pos)
UsePosition * FirstRegisterUse(intptr_t after_pos)
UseInterval * first_pending_use_interval() const
void InsertAt(intptr_t idx, const T &value)
bool Contains(intptr_t i) const
intptr_t postorder_number() const
ParallelMoveInstr * GetParallelMove()
intptr_t block_id() const
LoopInfo * loop_info() const
virtual BlockEntryInstr * PredecessorAt(intptr_t index) const =0
intptr_t start_pos() const
Instruction * last_instruction() const
GrowableArray< Definition * > * initial_definitions()
const LocalVariable * raw_stacktrace_var() const
const LocalVariable * raw_exception_var() const
static bool ShouldTrace()
static CompilerState & Current()
bool HasPairRepresentation() const
intptr_t vreg(intptr_t index) const
static DART_FORCE_INLINE intptr_t GetLifetimePosition(const Instruction *instr)
static DART_FORCE_INLINE void SetLifetimePosition(Instruction *instr, intptr_t pos)
LiveRange * GetLiveRange(intptr_t vreg)
static constexpr intptr_t kDoubleSpillFactor
FlowGraphAllocator(const FlowGraph &flow_graph, bool intrinsic_mode=false)
const GrowableArray< BlockEntryInstr * > & reverse_postorder() const
GraphEntryInstr * graph_entry() const
intptr_t max_argument_slot_count() const
intptr_t SuspendStateEnvIndex() const
intptr_t max_vreg() const
const Function & function() const
const ParsedFunction & parsed_function() const
intptr_t num_direct_parameters() const
const LoopHierarchy & loop_hierarchy() const
LocalVariable * SuspendStateVar() const
GrowableArray< BlockEntryInstr * > * CodegenBlockOrder()
intptr_t EnvIndex(const LocalVariable *variable) const
bool MakesCopyOfParameters() const
intptr_t fixed_slot_count() const
intptr_t spill_slot_count() const
void set_spill_slot_count(intptr_t count)
Instruction * next() const
virtual intptr_t InputCount() const =0
void LinkTo(Instruction *next)
virtual Value * InputAt(intptr_t i) const =0
Environment * env() const
void InitializeLocationSummary(Zone *zone, bool optimizing)
virtual intptr_t ArgumentCount() const
SuccessorsIterable successors() const
virtual intptr_t SuccessorCount() const
virtual MoveArgumentsArray * GetMoveArguments() const
Instruction * previous() const
void DefineAt(intptr_t pos)
UseInterval * last_use_interval() const
bool Contains(intptr_t pos) const
Location spill_slot() const
UsePosition * first_use() const
void AddHintedUse(intptr_t pos, Location *location_slot, Location *hint)
Location * assigned_location_slot()
Location assigned_location() const
Representation representation() const
LiveRange(intptr_t vreg, Representation rep)
AllocationFinger * finger()
LiveRange * next_sibling() const
UseInterval * first_use_interval() const
UsePosition * AddUse(intptr_t pos, Location *location_slot)
void AddUseInterval(intptr_t start, intptr_t end)
SafepointPosition * first_safepoint() const
LiveRange * SplitAt(intptr_t pos)
bool CanCover(intptr_t pos) const
void set_assigned_location(Location location)
void AddSafepoint(intptr_t pos, LocationSummary *locs)
GrowableArray< BitVector * > kill_
BitVector * GetLiveOutSetAt(intptr_t postorder_number) const
GrowableArray< BitVector * > live_in_
BitVector * GetLiveInSet(BlockEntryInstr *block) const
BitVector * GetLiveInSetAt(intptr_t postorder_number) const
const GrowableArray< BlockEntryInstr * > & postorder_
bool callee_safe_call() const
const BitmapBuilder & stack_bitmap()
intptr_t input_count() const
bool always_calls() const
Location in(intptr_t index) const
static Location StackSlot(intptr_t stack_index, Register base)
static Location NoLocation()
static Location QuadStackSlot(intptr_t stack_index, Register base)
const char * Name() const
static Location Pair(Location first, Location second)
static Location RequiresStack()
static Location FpuRegisterLocation(FpuRegister reg)
const char * ToCString() const
static Location DoubleStackSlot(intptr_t stack_index, Register base)
bool IsRegisterBeneficial()
static Location RegisterLocation(Register reg)
static Location PrefersRegister()
bool Equals(Location other) const
static Location RequiresRegister()
bool IsUnallocated() const
bool IsMachineRegister() const
static Location RequiresFpuRegister()
intptr_t register_code() const
static Location Constant(const ConstantInstr *obj, int pair_index=0)
intptr_t num_loops() const
const ZoneGrowableArray< BlockEntryInstr * > & headers() const
const GrowableArray< BlockEntryInstr * > & back_edges()
BlockEntryInstr * header() const
void mark_visited_for_liveness()
bool was_visited_for_liveness() const
MoveOperands * AddMove(Location dest, Location src)
virtual BlockEntryInstr * GetBlock()
const Function & function() const
LocalVariable * suspend_state_var() const
BitVector * reaching_defs() const
BitVector * Get(PhiInstr *phi)
virtual void ComputeInitialSets()
SafepointPosition * next() const
LocationSummary * locs() const
void set_next(SafepointPosition *next)
bool Contains(intptr_t pos) const
UseInterval * next() const
intptr_t Intersect(UseInterval *other)
void set_next(UsePosition *next)
Location * location_slot() const
UsePosition * next() const
void set_hint(Location *hint)
static constexpr T Maximum(T x, T y)
bool BindsToConstant() const
Definition * definition() const
intptr_t InputCount() const
Value * InputAt(intptr_t i) const
ElementType * Alloc(intptr_t length)
#define THR_Print(format,...)
constexpr bool FLAG_support_il_printer
Dart_NativeFunction function
#define TRACE_ALLOC(statement)
static bool Equals(const Object &expected, const Object &actual)
static ParallelMoveInstr * CreateParallelMoveAfter(Instruction *instr, intptr_t pos)
static intptr_t ToInstructionEnd(intptr_t pos)
static bool AreLocationsAllTheSame(const GrowableArray< Location > &locs)
static constexpr intptr_t kIllegalPosition
static intptr_t ToInstructionStart(intptr_t pos)
static const GrowableArray< BlockEntryInstr * > & BlockOrderForAllocation(const FlowGraph &flow_graph)
PositionType * SplitListOfPositions(PositionType **head, intptr_t split_pos, bool split_at_start)
const RegList kAllFpuRegistersList
static Representation RepresentationForRange(Representation definition_rep)
static void EmitMoveOnEdge(BlockEntryInstr *succ, BlockEntryInstr *pred, const MoveOperands &move)
static bool IsInstructionStartPosition(intptr_t pos)
static intptr_t MinPosition(intptr_t a, intptr_t b)
static ParallelMoveInstr * CreateParallelMoveBefore(Instruction *instr, intptr_t pos)
static bool HasOnlyUnconstrainedUses(LiveRange *range)
static Location::Kind RegisterKindFromPolicy(Location loc)
static constexpr intptr_t kTempVirtualRegister
const Register ARGS_DESC_REG
const int kNumberOfFpuRegisters
static intptr_t FirstIntersection(UseInterval *a, UseInterval *u)
static constexpr intptr_t kNoVirtualRegister
constexpr RegList kDartAvailableCpuRegs
static bool IsInstructionEndPosition(intptr_t pos)
const RegList kDartVolatileCpuRegs
const RegList kFpuRegistersWithoutSOverlap
static ExtraLoopInfo * ComputeExtraLoopInfo(Zone *zone, LoopInfo *loop_info)
constexpr int kRegisterAllocationBias
static bool HasOnlyUnconstrainedUsesInLoop(LiveRange *range, intptr_t boundary)
static void DeepLiveness(MaterializeObjectInstr *mat, BitVector *live_in)
static void AddToSortedListOfRanges(GrowableArray< LiveRange * > *list, LiveRange *range)
static UsePosition * FirstUseAfter(UsePosition *use, intptr_t after)
static bool IsConstant(Definition *def, int64_t *val)
static LiveRange * FindCover(LiveRange *parent, intptr_t pos)
static bool ShouldBeAllocatedBefore(LiveRange *a, LiveRange *b)
const RegList kAllCpuRegistersList
static constexpr intptr_t kMaxPosition
const RegList kAbiVolatileFpuRegs
static const char header[]
static SkString join(const CommandLineFlags::StringArray &)
intptr_t first_local_from_fp
intptr_t VariableIndexForFrameSlot(intptr_t frame_slot) const
intptr_t FrameSlotForVariable(const LocalVariable *variable) const
intptr_t FrameSlotForVariableIndex(intptr_t index) const
static constexpr bool IsUnboxedInteger(Representation rep)