20DEFINE_FLAG(
bool, dead_store_elimination,
true,
"Eliminate dead stores");
21DEFINE_FLAG(
bool, load_cse,
true,
"Use redundant load elimination.");
23 optimize_lazy_initializer_calls,
25 "Eliminate redundant lazy initializer calls.");
27 trace_load_optimization,
29 "Print live sets for load optimization pass.");
57 return value->definition()->OriginalDefinition();
60 static bool EqualsIgnoringRedefinitions(
const Instruction&
a,
61 const Instruction&
b) {
62 const auto tag =
a.tag();
63 if (tag !=
b.tag())
return false;
64 const auto input_count =
a.InputCount();
65 if (input_count !=
b.InputCount())
return false;
70 if (tag != Instruction::kLoadField) {
71 for (intptr_t i = 0; i < input_count; ++i) {
72 if (OriginalDefinition(
a.InputAt(i)) !=
73 OriginalDefinition(
b.InputAt(i))) {
78 for (intptr_t i = 0; i < input_count; ++i) {
79 if (!
a.InputAt(i)->Equals(*
b.InputAt(i)))
return false;
82 return a.AttributesEqual(
b);
87 typedef Instruction*
Value;
88 typedef Instruction*
Key;
89 typedef Instruction*
Pair;
91 static Key KeyOf(
Pair kv) {
return kv; }
92 static Value ValueOf(
Pair kv) {
return kv; }
96 for (intptr_t i = 0; i <
key->InputCount(); ++i) {
98 result, OriginalDefinition(
key->InputAt(i))->ssa_temp_index());
103 static inline bool IsKeyEqual(
Pair kv,
Key key) {
104 return EqualsIgnoringRedefinitions(*kv, *
key);
108 DirectChainedHashMap<Trait> map_;
214 flags_(other.flags_),
215 instance_(other.instance_),
223 switch (instr->
tag()) {
224 case Instruction::kLoadField: {
234 case Instruction::kStoreField: {
237 store->RequiredInputRepresentation(StoreFieldInstr::kValuePos));
245 case Instruction::kLoadStaticField:
252 case Instruction::kStoreStaticField:
261 case Instruction::kLoadIndexed: {
280 case Instruction::kStoreIndexed: {
402 ElementSizeMultiplier(
element_size()) / ElementSizeMultiplier(to));
407 intptr_t
id()
const {
return id_; }
449 if (def ==
nullptr) {
463 const char* field_name =
510 return (flags_ == other.flags_) && (instance_ == other.instance_) &&
518 return (defn !=
nullptr) && (defn->IsAllocation() ||
519 (defn->IsStaticCall() &&
520 defn->AsStaticCall()->IsRecognizedFactory()));
524 if (defn !=
nullptr) {
525 if (
auto* alloc = defn->AsAllocateObject()) {
526 auto const cid = alloc->cls().id();
538 bool SameField(
const Place& other)
const {
565 set_element_size(size);
572 const intptr_t scaled_index = index_value *
scale;
583 const bool can_be_view = instance_->
representation() == kUntagged ||
591 set_element_size(size);
630 FATAL(
"Unhandled value size for representation %s",
636 static constexpr intptr_t ElementSizeMultiplier(
ElementSize size) {
637 return 1 << (
static_cast<intptr_t
>(
size) -
static_cast<intptr_t
>(
k1Byte));
641 return offset & ~(ElementSizeMultiplier(size) - 1);
644 static intptr_t ByteOffsetToSmallerElement(
ElementSize size,
646 intptr_t base_offset) {
647 return base_offset +
index * ElementSizeMultiplier(size);
650 class KindBits :
public BitField<uword, Kind, 0, 3> {};
651 class RepresentationBits
652 :
public BitField<uword, Representation, KindBits::kNextBit, 11> {};
656 class ElementSizeBits :
public BitField<uword,
658 RepresentationBits::kNextBit,
659 kNumElementSizeBits> {};
662 Definition* instance_;
701 moves_.EnsureLength(block_num + 1,
nullptr);
703 if (moves_[block_num] ==
nullptr) {
707 moves_[block_num]->Add(
Move(from, to));
714 intptr_t
from()
const {
return from_; }
715 intptr_t
to()
const {
return to_; }
726 return (block_num < moves_.length()) ? moves_[block_num] :
nullptr;
744 print_traces_(print_traces),
745 places_map_(places_map),
750 typed_data_access_sizes_(),
755 zone_, kAnyInstanceAnyIndexAlias));
756 for (intptr_t i = 0; i < places_.length(); i++) {
757 AddRepresentative(places_[i]);
763 const Place*
result = aliases_map_.LookupValue(&alias);
768 return (alias < killed_.length()) ? killed_[alias] :
nullptr;
779 return places_map_->LookupValue(place);
788 THR_Print(
"%s", places_[it.Current()]->ToCString());
796 for (intptr_t i = 0; i < identity_rollback_.length(); ++i) {
809 ComputeAliasing(alloc);
821 kAnyConstantIndexedAlias = 1,
825 kUnknownInstanceConstantIndexedAlias = 2,
829 kAnyAllocationIndexedAlias = 3,
832 kAnyInstanceAnyIndexAlias = 4
836 void AddRepresentative(Place* place) {
837 if (!place->IsImmutableField()) {
838 const Place* alias = CanonicalizeAlias(place->ToAlias());
839 EnsureSet(&representatives_, alias->id())->
Add(place->id());
845 EnsureSet(&representatives_, kAnyConstantIndexedAlias)
849 if (alias->instance() ==
nullptr) {
850 EnsureSet(&representatives_, kUnknownInstanceConstantIndexedAlias)
858 typed_data_access_sizes_.
Add(alias->element_size());
862 EnsureSet(&representatives_, kAnyAllocationIndexedAlias)
866 if (!IsIndependentFromEffects(place)) {
867 aliased_by_effects_->
Add(place->id());
872 void ComputeKillSets() {
873 for (intptr_t i = 0; i < aliases_.length(); ++i) {
874 const Place* alias = aliases_[i];
876 AddAllRepresentatives(alias->id(), alias->id());
877 ComputeKillSet(alias);
880 if (FLAG_trace_load_optimization && print_traces_) {
882 for (intptr_t i = 0; i < aliases_.length(); ++i) {
883 const Place* alias = aliases_[i];
887 if (kill !=
nullptr) {
895 void InsertAlias(
const Place* alias) {
896 aliases_map_.Insert(alias);
900 const Place* CanonicalizeAlias(
const Place& alias) {
901 const Place* canonical = aliases_map_.LookupValue(&alias);
902 if (canonical ==
nullptr) {
904 kAnyInstanceAnyIndexAlias + aliases_.length());
905 InsertAlias(canonical);
907 ASSERT(aliases_map_.LookupValue(&alias) == canonical);
911 BitVector* GetRepresentativesSet(intptr_t alias) {
912 return (alias < representatives_.length()) ? representatives_[alias]
916 BitVector* EnsureSet(GrowableArray<BitVector*>* sets, intptr_t alias) {
917 while (sets->length() <= alias) {
921 BitVector*
set = (*sets)[alias];
922 if (set ==
nullptr) {
928 void AddAllRepresentatives(
const Place* to, intptr_t from) {
929 AddAllRepresentatives(to->id(), from);
932 void AddAllRepresentatives(intptr_t to, intptr_t from) {
933 BitVector* from_set = GetRepresentativesSet(from);
934 if (from_set !=
nullptr) {
935 EnsureSet(&killed_, to)->
AddAll(from_set);
939 void CrossAlias(
const Place* to,
const Place& from) {
944 CrossAlias(to, from_id);
947 void CrossAlias(
const Place* to, intptr_t from) {
948 AddAllRepresentatives(to->id(), from);
949 AddAllRepresentatives(from, to->id());
961 void ComputeKillSet(
const Place* alias) {
962 switch (alias->kind()) {
964 if (alias->instance() ==
nullptr) {
966 AddAllRepresentatives(alias, kAnyConstantIndexedAlias);
967 AddAllRepresentatives(alias, kAnyAllocationIndexedAlias);
971 CrossAlias(alias, kAnyInstanceAnyIndexAlias);
972 AddAllRepresentatives(alias, kUnknownInstanceConstantIndexedAlias);
978 const bool has_aliased_instance =
979 (alias->instance() !=
nullptr) &&
CanBeAliased(alias->instance());
989 for (intptr_t i =
static_cast<intptr_t
>(alias->element_size()) + 1;
992 if (!typed_data_access_sizes_.
Contains(alias->element_size())) {
998 CrossAlias(alias, alias->ToLargerElement(
1002 if (has_aliased_instance) {
1009 const Place no_instance_alias = alias->CopyWithoutInstance();
1014 if (!typed_data_access_sizes_.
Contains(alias->element_size())) {
1019 if (other_size > alias->element_size()) {
1023 no_instance_alias.ToLargerElement(other_size));
1027 const auto num_smaller_elements =
1028 1 << (alias->element_size() - other_size);
1029 for (intptr_t j = 0; j < num_smaller_elements; j++) {
1031 no_instance_alias.ToSmallerElement(other_size, j));
1038 if (alias->instance() ==
nullptr) {
1040 AddAllRepresentatives(alias, kAnyAllocationIndexedAlias);
1041 CrossAlias(alias, kAnyInstanceAnyIndexAlias);
1045 CrossAlias(alias, alias->CopyWithoutIndex());
1047 CrossAlias(alias, alias->CopyWithoutInstance());
1048 CrossAlias(alias, kAnyInstanceAnyIndexAlias);
1060 CrossAlias(alias, alias->CopyWithoutInstance());
1072 bool IsIndependentFromEffects(Place* place) {
1073 if (place->IsImmutableField()) {
1079 (place->instance() !=
nullptr) && !
CanBeAliased(place->instance());
1083 bool HasLoadsFromPlace(Definition* defn,
const Place* place) {
1086 for (
Value* use = defn->input_use_list(); use !=
nullptr;
1087 use = use->next_use()) {
1088 Instruction* instr = use->instruction();
1089 if (UseIsARedefinition(use) &&
1090 HasLoadsFromPlace(instr->Cast<Definition>(), place)) {
1093 bool is_load =
false, is_store;
1094 Place load_place(instr, &is_load, &is_store);
1096 if (is_load && load_place.Equals(*place)) {
1106 static bool UseIsARedefinition(
Value* use) {
1107 Instruction* instr = use->instruction();
1108 return instr->IsDefinition() &&
1109 (instr->Cast<Definition>()->RedefinedValue() == use);
1114 bool AnyUseCreatesAlias(Definition* defn) {
1115 for (
Value* use = defn->input_use_list(); use !=
nullptr;
1116 use = use->next_use()) {
1117 Instruction* instr = use->instruction();
1118 if (instr->HasUnknownSideEffects() ||
1119 (instr->IsLoadField() &&
1120 instr->AsLoadField()->MayCreateUntaggedAlias()) ||
1121 (instr->IsStoreIndexed() &&
1123 instr->IsStoreStaticField() || instr->IsPhi()) {
1125 }
else if (UseIsARedefinition(use) &&
1126 AnyUseCreatesAlias(instr->Cast<Definition>())) {
1128 }
else if (instr->IsStoreField()) {
1129 StoreFieldInstr*
store = instr->AsStoreField();
1131 if (
store->slot().kind() == Slot::Kind::kTypedDataView_typed_data) {
1139 if (use->use_index() != StoreFieldInstr::kInstancePos) {
1140 ASSERT(use->use_index() == StoreFieldInstr::kValuePos);
1144 store->instance()->definition()->OriginalDefinition();
1146 !
instance->Identity().IsAliased()) {
1147 bool is_load, is_store;
1148 Place store_place(instr, &is_load, &is_store);
1150 if (!HasLoadsFromPlace(
instance, &store_place)) {
1154 if (
instance->Identity().IsUnknown()) {
1163 }
else if (
auto*
const alloc = instr->AsAllocation()) {
1166 if (alloc->Identity().IsAliased()) {
1169 Place input_place(alloc, use->use_index());
1170 if (HasLoadsFromPlace(alloc, &input_place)) {
1173 if (alloc->Identity().IsUnknown()) {
1175 aliasing_worklist_.Add(alloc);
1182 void MarkDefinitionAsAliased(Definition*
d) {
1183 auto*
const defn =
d->OriginalDefinition();
1184 if (defn->Identity().IsNotAliased()) {
1186 identity_rollback_.Add(defn);
1189 aliasing_worklist_.Add(defn);
1194 void MarkStoredValuesEscaping(Definition* defn) {
1196 if (
auto*
const alloc = defn->AsAllocation()) {
1197 for (intptr_t i = 0; i < alloc->InputCount(); i++) {
1198 if (
auto*
const slot = alloc->SlotForInput(i)) {
1199 MarkDefinitionAsAliased(alloc->InputAt(i)->definition());
1204 for (
Value* use = defn->input_use_list(); use !=
nullptr;
1205 use = use->next_use()) {
1206 auto instr = use->instruction();
1207 if (UseIsARedefinition(use)) {
1208 MarkStoredValuesEscaping(instr->AsDefinition());
1211 if ((use->use_index() == StoreFieldInstr::kInstancePos) &&
1212 instr->IsStoreField()) {
1213 MarkDefinitionAsAliased(instr->AsStoreField()->value()->definition());
1219 void ComputeAliasing(Definition* alloc) {
1221 ASSERT(alloc->Identity().IsUnknown());
1222 ASSERT(aliasing_worklist_.is_empty());
1225 aliasing_worklist_.Add(alloc);
1227 while (!aliasing_worklist_.is_empty()) {
1228 Definition* defn = aliasing_worklist_.RemoveLast();
1233 if (!defn->Identity().IsAliased() && AnyUseCreatesAlias(defn)) {
1235 identity_rollback_.Add(defn);
1240 if (defn->Identity().IsAliased()) {
1241 MarkStoredValuesEscaping(defn);
1248 const bool print_traces_;
1250 PointerSet<Place>* places_map_;
1252 const ZoneGrowableArray<Place*>& places_;
1254 const PhiPlaceMoves* phi_moves_;
1258 GrowableArray<const Place*> aliases_;
1259 PointerSet<const Place> aliases_map_;
1261 SmallSet<Place::ElementSize> typed_data_access_sizes_;
1267 GrowableArray<BitVector*> representatives_;
1270 GrowableArray<BitVector*> killed_;
1274 BitVector* aliased_by_effects_;
1277 GrowableArray<Definition*> aliasing_worklist_;
1284 GrowableArray<Definition*> identity_rollback_;
1288 if (instr->IsStoreIndexed()) {
1289 return instr->AsStoreIndexed()->value()->definition();
1293 if (store_instance_field !=
nullptr) {
1298 if (store_static_field !=
nullptr) {
1317 bool print_traces) {
1322 for (intptr_t i = 0; i < places->
length(); i++) {
1323 Place* place = (*places)[i];
1329 if (FLAG_trace_optimization && print_traces) {
1333 Place input_place(*place);
1334 for (intptr_t j = 0; j < phi->
InputCount(); j++) {
1342 if (FLAG_trace_optimization && print_traces) {
1379 bool has_loads =
false;
1380 bool has_stores =
false;
1386 instr_it.Advance()) {
1388 Place place(instr, &has_loads, &has_stores);
1399 if (FLAG_trace_optimization && graph->
should_print()) {
1426 if (instr->IsDefinition() &&
1427 instr->AsDefinition()->MayCreateUnsafeUntaggedPointer()) {
1430 if (
auto*
load = instr->AsLoadField()) {
1431 if (
load->slot().is_weak()) {
1435 return instr->IsLoadField() || instr->IsLoadIndexed() ||
1436 instr->IsLoadStaticField();
1440 intptr_t loop_header_index,
1444 (*sets)[loop_header_index]->Contains(
GetPlaceId(instr));
1454 if (FLAG_trace_optimization && flow_graph_->
should_print()) {
1455 THR_Print(
"Hoisting instruction %s:%" Pd " from B%" Pd " to B%" Pd "\n",
1461 if (it !=
nullptr) {
1476void LICM::TrySpecializeSmiPhi(PhiInstr* phi,
1478 BlockEntryInstr* pre_header) {
1479 if (phi->Type()->ToCid() == kSmiCid) {
1487 for (intptr_t i = 0; i < phi->InputCount(); ++i) {
1488 Value* input = phi->InputAt(i);
1489 if (input->Type()->ToCid() != kSmiCid) {
1490 if ((non_smi_input != kNotFound) ||
1501 if ((non_smi_input == kNotFound) ||
1502 (phi->block()->PredecessorAt(non_smi_input) != pre_header)) {
1506 CheckSmiInstr*
check =
nullptr;
1507 for (
Value* use = phi->input_use_list();
1508 (use !=
nullptr) && (
check ==
nullptr); use = use->next_use()) {
1509 check = use->instruction()->AsCheckSmi();
1512 if (
check ==
nullptr) {
1517 Hoist(
nullptr, pre_header,
check);
1520 check->value()->BindTo(phi->InputAt(non_smi_input)->definition());
1521 check->value()->SetReachingType(phi->InputAt(non_smi_input)->Type());
1527 if (flow_graph()->
function().ProhibitsInstructionHoisting() ||
1538 for (intptr_t i = 0; i < loop_headers.
length(); ++i) {
1542 if (pre_header ==
nullptr)
continue;
1551 if (flow_graph()->
function().ProhibitsInstructionHoisting()) {
1566 for (intptr_t i = 0; i < loop_headers.
length(); ++i) {
1571 if (pre_header ==
nullptr) {
1577 bool seen_visible_effect =
false;
1582 loop_it.Advance()) {
1589 seen_visible_effect =
true;
1599 if (
load->AllowsCSE()) {
1600 seen_visible_effect =
true;
1608 bool is_loop_invariant =
false;
1612 is_loop_invariant =
true;
1613 for (intptr_t i = 0; i < current->
InputCount(); ++i) {
1616 is_loop_invariant =
false;
1625 if (is_loop_invariant) {
1626 Hoist(&it, pre_header, current);
1628 seen_visible_effect =
true;
1640 !block_it.Done(); block_it.Advance()) {
1644 instr_it.Advance()) {
1646 if (def !=
nullptr && def->IsAllocation() && def->
env() ==
nullptr &&
1649 if (use !=
nullptr && !use->IsPhi() && IsOneTimeUse(use, def)) {
1650 instr_it.RemoveCurrentFromGraph();
1669 Instruction* use = it.
Current()->instruction();
1675 auto is_dominated_by_another_use = [&](Instruction* instr) {
1676 while (instr != def) {
1677 if (uses.
HasKey(instr)) {
1681 if (
auto block = instr->AsBlockEntry()) {
1682 instr = block->dominator()->last_instruction();
1684 instr = instr->previous();
1691 Instruction* dominant_use =
nullptr;
1693 while (
auto use = use_it.Next()) {
1694 bool dominated_by_another_use =
false;
1696 if (
auto phi = (*use)->AsPhi()) {
1699 ASSERT(phi->InputCount() == phi->block()->PredecessorCount());
1700 dominated_by_another_use =
true;
1701 for (intptr_t i = 0; i < phi->InputCount(); i++) {
1702 if (phi->InputAt(i)->definition() == def) {
1703 if (!is_dominated_by_another_use(
1704 phi->block()->PredecessorAt(i)->last_instruction())) {
1705 dominated_by_another_use =
false;
1714 dominated_by_another_use =
1715 is_dominated_by_another_use((*use)->previous());
1718 if (!dominated_by_another_use) {
1719 if (dominant_use !=
nullptr) {
1724 dominant_use = *use;
1728 return dominant_use;
1731bool DelayAllocations::IsOneTimeUse(Instruction* use, Definition* def) {
1735 BlockEntryInstr* use_block = use->GetBlock();
1736 BlockEntryInstr* def_block = def->GetBlock();
1737 if (use_block == def_block)
return true;
1739 DirectChainedHashMap<IdentitySetKeyValueTrait<BlockEntryInstr*>> seen;
1740 GrowableArray<BlockEntryInstr*>
worklist;
1744 BlockEntryInstr* block =
worklist.RemoveLast();
1745 for (intptr_t i = 0; i < block->PredecessorCount(); i++) {
1746 BlockEntryInstr* pred = block->PredecessorAt(i);
1747 if (pred == use_block)
return false;
1748 if (pred == def_block)
continue;
1749 if (seen.HasKey(pred))
continue;
1761 aliased_set_(aliased_set),
1762 in_(graph_->preorder().
length()),
1763 out_(graph_->preorder().
length()),
1764 gen_(graph_->preorder().
length()),
1765 kill_(graph_->preorder().
length()),
1766 exposed_values_(graph_->preorder().
length()),
1767 out_values_(graph_->preorder().
length()),
1770 congruency_worklist_(6),
1771 in_worklist_(nullptr),
1773 const intptr_t num_blocks = graph_->
preorder().length();
1774 for (intptr_t i = 0; i < num_blocks; i++) {
1780 exposed_values_.Add(
nullptr);
1781 out_values_.Add(
nullptr);
1800 if ((aliased_set !=
nullptr) && !aliased_set->
IsEmpty()) {
1808 return load_optimizer.Optimize();
1817 OptimizeLazyInitialization();
1819 ComputeInitialSets();
1823 MarkLoopInvariantLoads();
1830 bool CallsInitializer(Instruction* instr) {
1831 if (
auto* load_field = instr->AsLoadField()) {
1832 return load_field->calls_initializer();
1833 }
else if (
auto* load_static = instr->AsLoadStaticField()) {
1834 return load_static->calls_initializer();
1839 void ClearCallsInitializer(Instruction* instr) {
1840 if (
auto* load_field = instr->AsLoadField()) {
1841 load_field->set_calls_initializer(
false);
1842 }
else if (
auto* load_static = instr->AsLoadStaticField()) {
1843 load_static->set_calls_initializer(
false);
1849 bool CanForwardLoadTo(Definition*
load, Definition* replacement) {
1852 return !(CallsInitializer(
load) && replacement->Type()->can_be_sentinel());
1857 bool IsSentinelStore(Instruction* instr) {
1859 if (
auto* store_field = instr->AsStoreField()) {
1860 value = store_field->value();
1861 }
else if (
auto* store_static = instr->AsStoreStaticField()) {
1862 value = store_static->value();
1864 return value !=
nullptr &&
value->BindsToConstant() &&
1865 (
value->BoundConstant().ptr() == Object::sentinel().ptr());
1871 void OptimizeLazyInitialization() {
1872 if (!FLAG_optimize_lazy_initializer_calls) {
1879 bool has_lazy_initializer_calls =
false;
1881 !block_it.Done(); block_it.Advance()) {
1882 BlockEntryInstr* block = block_it.Current();
1883 BitVector*
gen = gen_[block->preorder_number()];
1885 for (ForwardInstructionIterator instr_it(block); !instr_it.Done();
1886 instr_it.Advance()) {
1887 Instruction* instr = instr_it.Current();
1889 bool is_load =
false, is_store =
false;
1890 Place place(instr, &is_load, &is_store);
1892 if (is_store && !IsSentinelStore(instr)) {
1894 }
else if (is_load) {
1896 if (CallsInitializer(instr)) {
1897 if (
gen->Contains(place_id)) {
1898 ClearCallsInitializer(instr);
1900 has_lazy_initializer_calls =
true;
1910 if (phi_moves !=
nullptr) {
1911 for (intptr_t i = 0, n = phi_moves->length(); i < n; ++i) {
1912 const intptr_t from = (*phi_moves)[i].from();
1913 const intptr_t to = (*phi_moves)[i].to();
1914 if ((from != to) &&
gen->Contains(from)) {
1921 if (has_lazy_initializer_calls) {
1924 BitVector* temp =
new (
Z) BitVector(
Z, aliased_set_->
max_place_id());
1925 bool changed =
true;
1930 !block_it.
Done(); block_it.Advance()) {
1931 BlockEntryInstr* block = block_it.Current();
1932 BitVector* block_in = in_[block->preorder_number()];
1933 BitVector*
gen = gen_[block->preorder_number()];
1937 if (block->IsGraphEntry()) {
1941 ASSERT(block->PredecessorCount() > 0);
1942 for (intptr_t i = 0, pred_count = block->PredecessorCount();
1943 i < pred_count; ++i) {
1944 BlockEntryInstr* pred = block->PredecessorAt(i);
1945 BitVector* pred_out = gen_[pred->preorder_number()];
1946 temp->Intersect(pred_out);
1950 if (!temp->Equals(*block_in)) {
1951 ASSERT(block_in->SubsetOf(*temp));
1952 block_in->AddAll(temp);
1963 !block_it.Done(); block_it.Advance()) {
1964 BlockEntryInstr* block = block_it.Current();
1965 BitVector* block_in = in_[block->preorder_number()];
1967 for (ForwardInstructionIterator instr_it(block); !instr_it.Done();
1968 instr_it.Advance()) {
1969 Instruction* instr = instr_it.Current();
1970 if (CallsInitializer(instr) &&
1972 ClearCallsInitializer(instr);
1979 for (intptr_t i = 0, n = graph_->
preorder().length(); i < n; ++i) {
1988 bool CanForwardStore(StoreIndexedInstr* array_store) {
1989 if (array_store ==
nullptr)
return true;
1991 array_store->class_id());
1995 static bool AlreadyPinnedByRedefinition(Definition* replacement,
1996 Definition* redefinition) {
1997 Definition* defn = replacement;
1998 if (
auto load_field = replacement->AsLoadField()) {
1999 defn = load_field->instance()->definition();
2000 }
else if (
auto load_indexed = replacement->AsLoadIndexed()) {
2001 defn = load_indexed->array()->definition();
2005 while ((unwrapped = defn->RedefinedValue()) !=
nullptr) {
2006 if (defn == redefinition) {
2009 defn = unwrapped->definition();
2015 Definition* ReplaceLoad(Definition*
load, Definition* replacement) {
2020 Definition* redef =
nullptr;
2021 if (
auto load_field =
load->AsLoadField()) {
2022 auto instance = load_field->instance()->definition();
2023 if (
instance->RedefinedValue() !=
nullptr) {
2024 if ((load_field->slot().kind() ==
2025 Slot::Kind::kGrowableObjectArray_data ||
2026 (load_field->slot().IsDartField() &&
2028 .IsInstantiated()))) {
2032 }
else if (
auto load_indexed =
load->AsLoadIndexed()) {
2033 if (load_indexed->class_id() == kArrayCid ||
2034 load_indexed->class_id() == kImmutableArrayCid) {
2035 auto instance = load_indexed->array()->definition();
2036 if (
instance->RedefinedValue() !=
nullptr) {
2041 if (redef !=
nullptr && !AlreadyPinnedByRedefinition(replacement, redef)) {
2045 auto replacement_redefinition =
2046 new (
zone()) RedefinitionInstr(
new (
zone())
Value(replacement));
2047 if (redef->IsDominatedBy(replacement)) {
2048 graph_->
InsertAfter(redef, replacement_redefinition,
nullptr,
2054 replacement = replacement_redefinition;
2057 load->ReplaceUsesWith(replacement);
2067 void ComputeInitialSets() {
2069 !block_it.Done(); block_it.Advance()) {
2070 BlockEntryInstr* block = block_it.Current();
2071 const intptr_t preorder_number = block->preorder_number();
2073 BitVector* kill = kill_[preorder_number];
2074 BitVector*
gen = gen_[preorder_number];
2076 ZoneGrowableArray<Definition*>* exposed_values =
nullptr;
2077 ZoneGrowableArray<Definition*>* out_values =
nullptr;
2079 for (ForwardInstructionIterator instr_it(block); !instr_it.Done();
2080 instr_it.Advance()) {
2081 Instruction* instr = instr_it.Current();
2083 bool is_load =
false, is_store =
false;
2084 Place place(instr, &is_load, &is_store);
2086 BitVector* killed =
nullptr;
2088 const intptr_t alias_id =
2092 }
else if (!place.IsImmutableField()) {
2116 Place* canonical_place =
nullptr;
2117 if (CanForwardStore(instr->AsStoreIndexed())) {
2119 if (canonical_place !=
nullptr) {
2122 const intptr_t place_id = canonical_place->
id();
2123 if (
gen->Contains(place_id)) {
2124 ASSERT((out_values !=
nullptr) &&
2125 ((*out_values)[place_id] !=
nullptr));
2127 if (FLAG_trace_optimization && graph_->
should_print()) {
2128 THR_Print(
"Removing redundant store to place %" Pd
2129 " in block B%" Pd "\n",
2132 instr_it.RemoveCurrentFromGraph();
2140 if (killed !=
nullptr) {
2141 kill->AddAll(killed);
2145 gen->RemoveAll(killed);
2147 if (canonical_place !=
nullptr) {
2150 gen->Add(canonical_place->id());
2151 if (out_values ==
nullptr) out_values = CreateBlockOutValues();
2155 ASSERT(!instr->IsDefinition() ||
2158 }
else if (is_load) {
2162 if ((canonical !=
nullptr) &&
2163 (canonical->id() !=
GetPlaceId(instr->AsDefinition()))) {
2164 SetPlaceId(instr->AsDefinition(), canonical->id());
2169 if (instr->HasUnknownSideEffects()) {
2177 Definition* defn = instr->AsDefinition();
2178 if (defn ==
nullptr) {
2182 if (
auto*
const alloc = instr->AsAllocation()) {
2183 if (!alloc->ObjectIsInitialized()) {
2188 for (
Value* use = alloc->input_use_list(); use !=
nullptr;
2189 use = use->next_use()) {
2190 if (use->use_index() != 0) {
2195 intptr_t place_id = -1;
2196 Definition* forward_def =
nullptr;
2197 const Slot* slot =
nullptr;
2198 if (
auto*
const load = use->instruction()->AsLoadField()) {
2200 slot = &
load->slot();
2201 }
else if (
auto*
const store = use->instruction()->AsStoreField()) {
2202 ASSERT(!alloc->IsArrayAllocation());
2204 slot = &
store->slot();
2205 }
else if (use->instruction()->IsLoadIndexed() ||
2206 use->instruction()->IsStoreIndexed()) {
2207 if (!alloc->IsArrayAllocation()) {
2212 if (alloc->IsAllocateTypedData()) {
2221 if (aliased_set_->
places()[place_id]->kind() !=
2233 if (slot !=
nullptr) {
2234 ASSERT(forward_def ==
nullptr);
2242 if (aliased_set_->
CanBeAliased(alloc) && slot->IsDartField() &&
2243 slot->is_immutable()) {
2247 const intptr_t
pos = alloc->InputForSlot(*slot);
2249 forward_def = alloc->InputAt(
pos)->definition();
2250 }
else if (!slot->is_tagged()) {
2262 ASSERT(forward_def !=
nullptr);
2264 if (out_values ==
nullptr) out_values = CreateBlockOutValues();
2265 (*out_values)[place_id] = forward_def;
2275 if (
gen->Contains(place_id)) {
2277 ASSERT((out_values !=
nullptr) &&
2278 ((*out_values)[place_id] !=
nullptr));
2280 Definition* replacement = (*out_values)[place_id];
2281 if (CanForwardLoadTo(defn, replacement)) {
2283 if (FLAG_trace_optimization && graph_->
should_print()) {
2285 defn->ssa_temp_index(), replacement->ssa_temp_index());
2288 ReplaceLoad(defn, replacement);
2289 instr_it.RemoveCurrentFromGraph();
2293 }
else if (!kill->Contains(place_id)) {
2297 if (exposed_values ==
nullptr) {
2298 const intptr_t kMaxExposedValuesInitialSize = 5;
2299 exposed_values =
new (
Z) ZoneGrowableArray<Definition*>(
2304 exposed_values->Add(defn);
2309 if (out_values ==
nullptr) out_values = CreateBlockOutValues();
2310 (*out_values)[place_id] = defn;
2313 exposed_values_[preorder_number] = exposed_values;
2314 out_values_[preorder_number] = out_values;
2320 BitVector* forwarded_loads) {
2321 forwarded_loads->Clear();
2323 for (intptr_t i = 0; i < phi_moves->length(); i++) {
2324 const intptr_t from = (*phi_moves)[i].from();
2325 const intptr_t to = (*phi_moves)[i].to();
2326 if (from == to)
continue;
2328 if (
out->Contains(from)) {
2329 forwarded_loads->Add(to);
2333 for (intptr_t i = 0; i < phi_moves->length(); i++) {
2334 const intptr_t from = (*phi_moves)[i].from();
2335 const intptr_t to = (*phi_moves)[i].to();
2336 if (from == to)
continue;
2341 out->AddAll(forwarded_loads);
2346 void ComputeOutSets() {
2347 BitVector* temp =
new (
Z) BitVector(
Z, aliased_set_->
max_place_id());
2348 BitVector* forwarded_loads =
2350 BitVector* temp_out =
new (
Z) BitVector(
Z, aliased_set_->
max_place_id());
2352 bool changed =
true;
2357 !block_it.
Done(); block_it.Advance()) {
2358 BlockEntryInstr* block = block_it.Current();
2360 const intptr_t preorder_number = block->preorder_number();
2362 BitVector* block_in = in_[preorder_number];
2363 BitVector* block_out = out_[preorder_number];
2364 BitVector* block_kill = kill_[preorder_number];
2365 BitVector* block_gen = gen_[preorder_number];
2369 if (block->IsGraphEntry()) {
2373 ASSERT(block->PredecessorCount() > 0);
2374 for (intptr_t i = 0; i < block->PredecessorCount(); i++) {
2375 BlockEntryInstr* pred = block->PredecessorAt(i);
2376 BitVector* pred_out = out_[pred->preorder_number()];
2377 if (pred_out ==
nullptr)
continue;
2380 if (phi_moves !=
nullptr) {
2383 temp_out->CopyFrom(pred_out);
2384 PerformPhiMoves(phi_moves, temp_out, forwarded_loads);
2385 pred_out = temp_out;
2387 temp->Intersect(pred_out);
2391 if (!temp->Equals(*block_in) || (block_out ==
nullptr)) {
2393 block_in->CopyFrom(temp);
2395 temp->RemoveAll(block_kill);
2396 temp->AddAll(block_gen);
2398 if ((block_out ==
nullptr) || !block_out->Equals(*temp)) {
2399 if (block_out ==
nullptr) {
2400 block_out = out_[preorder_number] =
2403 block_out->CopyFrom(temp);
2419 void ComputeOutValues() {
2420 GrowableArray<PhiInstr*> pending_phis(5);
2421 ZoneGrowableArray<Definition*>* temp_forwarded_values =
nullptr;
2424 !block_it.Done(); block_it.Advance()) {
2425 BlockEntryInstr* block = block_it.Current();
2427 const bool can_merge_eagerly = CanMergeEagerly(block);
2429 const intptr_t preorder_number = block->preorder_number();
2431 ZoneGrowableArray<Definition*>* block_out_values =
2432 out_values_[preorder_number];
2436 for (BitVector::Iterator it(out_[preorder_number]); !it.
Done();
2438 const intptr_t place_id = it.
Current();
2440 if (block_out_values ==
nullptr) {
2441 out_values_[preorder_number] = block_out_values =
2442 CreateBlockOutValues();
2445 if ((*block_out_values)[place_id] ==
nullptr) {
2446 ASSERT(block->PredecessorCount() > 0);
2447 Definition* in_value = can_merge_eagerly
2448 ? MergeIncomingValues(block, place_id)
2450 if ((in_value ==
nullptr) &&
2451 (in_[preorder_number]->
Contains(place_id))) {
2452 PhiInstr* phi =
new (
Z)
2453 PhiInstr(block->AsJoinEntry(), block->PredecessorCount());
2455 pending_phis.Add(phi);
2458 (*block_out_values)[place_id] = in_value;
2466 if ((phi_moves !=
nullptr) && (block_out_values !=
nullptr)) {
2467 if (temp_forwarded_values ==
nullptr) {
2468 temp_forwarded_values = CreateBlockOutValues();
2471 for (intptr_t i = 0; i < phi_moves->length(); i++) {
2472 const intptr_t from = (*phi_moves)[i].from();
2473 const intptr_t to = (*phi_moves)[i].to();
2474 if (from == to)
continue;
2476 (*temp_forwarded_values)[to] = (*block_out_values)[from];
2479 for (intptr_t i = 0; i < phi_moves->length(); i++) {
2480 const intptr_t from = (*phi_moves)[i].from();
2481 const intptr_t to = (*phi_moves)[i].to();
2482 if (from == to)
continue;
2484 (*block_out_values)[to] = (*temp_forwarded_values)[to];
2488 if (FLAG_trace_load_optimization && graph_->
should_print()) {
2491 aliased_set_->
PrintSet(in_[preorder_number]);
2495 aliased_set_->
PrintSet(kill_[preorder_number]);
2499 aliased_set_->
PrintSet(out_[preorder_number]);
2506 for (intptr_t i = 0; i < pending_phis.length(); i++) {
2507 FillPhiInputs(pending_phis[i]);
2511 bool CanMergeEagerly(BlockEntryInstr* block) {
2512 for (intptr_t i = 0; i < block->PredecessorCount(); i++) {
2513 BlockEntryInstr* pred = block->PredecessorAt(i);
2514 if (pred->postorder_number() < block->postorder_number()) {
2521 void MarkLoopInvariantLoads() {
2522 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers =
2525 ZoneGrowableArray<BitVector*>* invariant_loads =
2526 new (
Z) ZoneGrowableArray<BitVector*>(loop_headers.length());
2528 for (intptr_t i = 0; i < loop_headers.length(); i++) {
2529 BlockEntryInstr*
header = loop_headers[i];
2530 BlockEntryInstr* pre_header =
header->ImmediateDominator();
2531 if (pre_header ==
nullptr) {
2532 invariant_loads->Add(
nullptr);
2536 BitVector* loop_gen =
new (
Z) BitVector(
Z, aliased_set_->
max_place_id());
2537 for (BitVector::Iterator loop_it(
header->loop_info()->blocks());
2538 !loop_it.Done(); loop_it.Advance()) {
2539 const intptr_t preorder_number = loop_it.Current();
2540 loop_gen->AddAll(gen_[preorder_number]);
2543 for (BitVector::Iterator loop_it(
header->loop_info()->blocks());
2544 !loop_it.Done(); loop_it.Advance()) {
2545 const intptr_t preorder_number = loop_it.Current();
2546 loop_gen->RemoveAll(kill_[preorder_number]);
2549 if (FLAG_trace_optimization && graph_->
should_print()) {
2550 for (BitVector::Iterator it(loop_gen); !it.
Done(); it.
Advance()) {
2551 THR_Print(
"place %s is loop invariant for B%" Pd "\n",
2557 invariant_loads->Add(loop_gen);
2566 Definition* MergeIncomingValues(BlockEntryInstr* block, intptr_t place_id) {
2568 static Definition*
const kDifferentValuesMarker =
2569 reinterpret_cast<Definition*
>(-1);
2570 Definition* incoming =
nullptr;
2571 for (intptr_t i = 0; i < block->PredecessorCount(); i++) {
2572 BlockEntryInstr* pred = block->PredecessorAt(i);
2573 ZoneGrowableArray<Definition*>* pred_out_values =
2574 out_values_[pred->preorder_number()];
2575 if ((pred_out_values ==
nullptr) ||
2576 ((*pred_out_values)[place_id] ==
nullptr)) {
2578 }
else if (incoming ==
nullptr) {
2579 incoming = (*pred_out_values)[place_id];
2580 }
else if (incoming != (*pred_out_values)[place_id]) {
2581 incoming = kDifferentValuesMarker;
2585 if (incoming != kDifferentValuesMarker) {
2586 ASSERT(incoming !=
nullptr);
2592 new (
Z) PhiInstr(block->AsJoinEntry(), block->PredecessorCount());
2598 void FillPhiInputs(PhiInstr* phi) {
2599 BlockEntryInstr* block = phi->GetBlock();
2602 for (intptr_t i = 0; i < block->PredecessorCount(); i++) {
2603 BlockEntryInstr* pred = block->PredecessorAt(i);
2604 ZoneGrowableArray<Definition*>* pred_out_values =
2605 out_values_[pred->preorder_number()];
2606 ASSERT((*pred_out_values)[place_id] !=
nullptr);
2613 Definition* replacement = (*pred_out_values)[place_id]->Replacement();
2615 phi->SetInputAt(i, input);
2616 replacement->AddInputUse(input);
2618 if (replacement->representation() == kUntagged) {
2619 phi->set_representation(kUntagged);
2628 THR_Print(
"created pending phi %s for %s at B%" Pd "\n", phi->ToCString(),
2629 aliased_set_->
places()[place_id]->ToCString(),
2636 void ForwardLoads() {
2638 !block_it.Done(); block_it.Advance()) {
2639 BlockEntryInstr* block = block_it.Current();
2641 ZoneGrowableArray<Definition*>* loads =
2642 exposed_values_[block->preorder_number()];
2643 if (loads ==
nullptr)
continue;
2645 BitVector* in = in_[block->preorder_number()];
2647 for (intptr_t i = 0; i < loads->length(); i++) {
2648 Definition*
load = (*loads)[i];
2651 Definition* replacement = MergeIncomingValues(block,
GetPlaceId(
load));
2652 ASSERT(replacement !=
nullptr);
2659 replacement = replacement->Replacement();
2661 if ((
load != replacement) && CanForwardLoadTo(
load, replacement)) {
2664 if (FLAG_trace_optimization && graph_->
should_print()) {
2666 load->ssa_temp_index(), replacement->ssa_temp_index());
2669 replacement = ReplaceLoad(
load, replacement);
2670 load->RemoveFromGraph();
2671 load->SetReplacement(replacement);
2684 bool EliminateRedundantPhi(PhiInstr* phi) {
2685 Definition*
value =
nullptr;
2688 if (in_worklist_ ==
nullptr) {
2691 in_worklist_->
Clear();
2695 in_worklist_->
Add(phi->ssa_temp_index());
2697 for (intptr_t i = 0; i < worklist_.length(); i++) {
2698 PhiInstr* phi = worklist_[i];
2700 for (intptr_t i = 0; i < phi->InputCount(); i++) {
2701 Definition* input = phi->InputAt(i)->definition();
2702 if (input == phi)
continue;
2704 PhiInstr* phi_input = input->AsPhi();
2705 if ((phi_input !=
nullptr) && !phi_input->is_alive()) {
2706 if (!in_worklist_->
Contains(phi_input->ssa_temp_index())) {
2707 worklist_.Add(phi_input);
2708 in_worklist_->
Add(phi_input->ssa_temp_index());
2713 if (value ==
nullptr) {
2715 }
else if (value != input) {
2723 ASSERT(value !=
nullptr);
2724 for (intptr_t i = 0; i < worklist_.length(); i++) {
2725 worklist_[i]->ReplaceUsesWith(value);
2733 bool CanBeCongruent(Definition*
a, Definition*
b) {
2734 return (
a->tag() ==
b->tag()) &&
2735 ((
a->IsPhi() && (
a->GetBlock() ==
b->GetBlock())) ||
2736 (
a->AllowsCSE() &&
a->AttributesEqual(*
b)));
2744 bool AddPairToCongruencyWorklist(Definition*
a, Definition*
b) {
2745 if (!CanBeCongruent(
a,
b)) {
2751 if (in_worklist_->
Contains(
a->ssa_temp_index())) {
2752 for (intptr_t i = 0; i < congruency_worklist_.length(); i += 2) {
2753 if (
a == congruency_worklist_[i]) {
2754 return (
b == congruency_worklist_[i + 1]);
2758 }
else if (in_worklist_->
Contains(
b->ssa_temp_index())) {
2759 return AddPairToCongruencyWorklist(
b,
a);
2762 congruency_worklist_.Add(
a);
2763 congruency_worklist_.Add(
b);
2764 in_worklist_->
Add(
a->ssa_temp_index());
2768 bool AreInputsCongruent(Definition*
a, Definition*
b) {
2770 ASSERT(
a->InputCount() ==
b->InputCount());
2771 for (intptr_t j = 0; j <
a->InputCount(); j++) {
2772 Definition* inputA =
a->InputAt(j)->definition();
2773 Definition* inputB =
b->InputAt(j)->definition();
2775 if (inputA != inputB) {
2776 if (!AddPairToCongruencyWorklist(inputA, inputB)) {
2785 static bool Dominates(Instruction*
dom, Instruction* other) {
2786 BlockEntryInstr* dom_block =
dom->GetBlock();
2787 BlockEntryInstr* other_block = other->GetBlock();
2789 if (dom_block == other_block) {
2790 for (Instruction* current =
dom->next(); current !=
nullptr;
2791 current = current->
next()) {
2792 if (current == other) {
2799 return dom_block->Dominates(other_block);
2804 bool ReplacePhiWith(PhiInstr* phi, PhiInstr* replacement) {
2805 ASSERT(phi->InputCount() == replacement->InputCount());
2806 ASSERT(phi->block() == replacement->block());
2808 congruency_worklist_.Clear();
2809 if (in_worklist_ ==
nullptr) {
2812 in_worklist_->
Clear();
2817 if (!AddPairToCongruencyWorklist(phi, replacement)) {
2822 for (intptr_t i = 0; i < congruency_worklist_.length(); i += 2) {
2823 if (!AreInputsCongruent(congruency_worklist_[i],
2824 congruency_worklist_[i + 1])) {
2832 for (intptr_t i = 0; i < congruency_worklist_.length(); i += 2) {
2833 Definition*
a = congruency_worklist_[i];
2834 Definition*
b = congruency_worklist_[i + 1];
2842 if (Dominates(
a,
b)) {
2852 THR_Print(
"Replacing %s with congruent %s\n",
a->ToCString(),
2856 a->ReplaceUsesWith(
b);
2861 PhiInstr* phi_a =
a->AsPhi();
2862 if (phi_a->is_alive()) {
2864 phi_a->block()->RemovePhi(phi_a);
2865 phi_a->UnuseAllInputs();
2868 a->RemoveFromGraph();
2878 bool EmitPhi(PhiInstr* phi) {
2879 for (PhiIterator it(phi->block()); !it.
Done(); it.
Advance()) {
2880 if (ReplacePhiWith(phi, it.
Current())) {
2886 phi->block()->InsertPhi(phi);
2895 for (intptr_t i = 0; i < phis_.length(); i++) {
2896 PhiInstr* phi = phis_[i];
2897 if (!phi->HasUses() || EliminateRedundantPhi(phi)) {
2898 phi->UnuseAllInputs();
2905 for (intptr_t i = 0; i < phis_.length(); i++) {
2906 PhiInstr* phi = phis_[i];
2907 if ((phi !=
nullptr) && (!phi->HasUses() || !EmitPhi(phi))) {
2908 phi->UnuseAllInputs();
2913 ZoneGrowableArray<Definition*>* CreateBlockOutValues() {
2914 ZoneGrowableArray<Definition*>*
out =
2915 new (
Z) ZoneGrowableArray<Definition*>(aliased_set_->
max_place_id());
2916 for (intptr_t i = 0; i < aliased_set_->
max_place_id(); i++) {
2923 PointerSet<Place>* map_;
2927 AliasedSet* aliased_set_;
2931 GrowableArray<BitVector*> in_;
2932 GrowableArray<BitVector*> out_;
2933 GrowableArray<BitVector*> gen_;
2934 GrowableArray<BitVector*> kill_;
2937 GrowableArray<ZoneGrowableArray<Definition*>*> exposed_values_;
2941 GrowableArray<ZoneGrowableArray<Definition*>*> out_values_;
2946 GrowableArray<PhiInstr*> phis_;
2949 GrowableArray<PhiInstr*> worklist_;
2950 GrowableArray<Definition*> congruency_worklist_;
2951 BitVector* in_worklist_;
2960 bool run_load_optimization ) {
2961 bool changed =
false;
2962 if (FLAG_load_cse && run_load_optimization) {
2967 changed = OptimizeRecursive(graph, graph->
graph_entry(), &map) || changed;
2972bool DominatorBasedCSE::OptimizeRecursive(
FlowGraph* graph,
2975 bool changed =
false;
2981 if (replacement !=
nullptr) {
2989 map->Insert(current);
2995 if (num_children != 0) {
2998 for (intptr_t i = 0; i < num_children; ++i) {
3000 if (i < num_children - 1) {
3002 CSEInstructionSet child_map(*map);
3003 changed = OptimizeRecursive(graph, child, &child_map) || changed;
3006 changed = OptimizeRecursive(graph, child, map) || changed;
3020 aliased_set_(aliased_set),
3021 exposed_stores_(graph_->postorder().
length()) {
3022 const intptr_t num_blocks = graph_->
postorder().length();
3023 for (intptr_t i = 0; i < num_blocks; i++) {
3024 exposed_stores_.Add(
nullptr);
3039 if ((aliased_set !=
nullptr) && !aliased_set->
IsEmpty()) {
3041 store_optimizer.Optimize();
3048 if (FLAG_trace_load_optimization && graph_->
should_print()) {
3051 EliminateDeadStores();
3054 bool CanEliminateStore(Instruction* instr) {
3062 switch (instr->tag()) {
3063 case Instruction::kStoreField: {
3064 StoreFieldInstr* store_instance = instr->AsStoreField();
3066 return !store_instance->is_initialization();
3068 case Instruction::kStoreIndexed:
3069 case Instruction::kStoreStaticField:
3086 const auto& places = aliased_set_->
places();
3091 for (intptr_t i = 0; i < places.length(); i++) {
3092 Place* place = places[i];
3096 !
instance->Identity().IsAliased()) {
3100 all_aliased_places->
Add(i);
3105 !block_it.
Done(); block_it.Advance()) {
3117 instr_it.Advance()) {
3120 bool is_load =
false;
3121 bool is_store =
false;
3122 Place place(instr, &is_load, &is_store);
3132 CanEliminateStore(instr)) {
3133 if (FLAG_trace_optimization && graph_->
should_print()) {
3134 THR_Print(
"Removing dead store to place %" Pd " in block B%" Pd
3138 instr_it.RemoveCurrentFromGraph();
3143 if (exposed_stores ==
nullptr) {
3144 const intptr_t kMaxExposedStoresInitialSize = 5;
3149 exposed_stores->
Add(instr);
3157 if (instr->IsThrow() || instr->IsReThrow() || instr->IsReturnBase()) {
3174 live_in->
AddAll(all_aliased_places);
3192 instr->
MayThrow() || instr->IsReturnBase()) {
3209 exposed_stores_[postorder_number] = exposed_stores;
3211 if (FLAG_trace_load_optimization && graph_->
should_print()) {
3217 void EliminateDeadStores() {
3220 !block_it.Done(); block_it.Advance()) {
3227 exposed_stores_[postorder_number];
3228 if (exposed_stores ==
nullptr)
continue;
3231 for (intptr_t i = 0; i < exposed_stores->
length(); ++i) {
3233 bool is_load =
false;
3234 bool is_store =
false;
3235 Place place(instr, &is_load, &is_store);
3236 ASSERT(!is_load && is_store);
3237 if (place.IsImmutableField()) {
3244 CanEliminateStore(instr)) {
3245 if (FLAG_trace_optimization && graph_->
should_print()) {
3246 THR_Print(
"Removing dead store to place %" Pd " block B%" Pd "\n",
3256 PointerSet<Place>* map_;
3260 AliasedSet* aliased_set_;
3263 GrowableArray<ZoneGrowableArray<Instruction*>*> exposed_stores_;
3269 if (FLAG_dead_store_elimination && FLAG_load_cse) {
3280 const intptr_t kMaxAllocationSinkingNumElements = 32;
3285 return (
length >= 0) && (
length <= kMaxAllocationSinkingNumElements);
3291 return instr->IsAllocation() &&
3292 (!instr->IsArrayAllocation() ||
3318bool AllocationSinking::IsSafeUse(
Value* use, SafeUseCheck check_type) {
3321 if (use->instruction()->IsMaterializeObject()) {
3325 if (
auto*
const alloc = use->instruction()->AsAllocation()) {
3327 ((check_type == kOptimisticCheck) ||
3328 alloc->Identity().IsAllocationSinkingCandidate());
3331 if (
auto*
store = use->instruction()->AsStoreField()) {
3332 if (use ==
store->value()) {
3335 ((check_type == kOptimisticCheck) ||
3336 instance->Identity().IsAllocationSinkingCandidate());
3341 if (
auto*
store = use->instruction()->AsStoreIndexed()) {
3342 if (use ==
store->index()) {
3345 if (use ==
store->array()) {
3346 if (!
store->index()->BindsToSmiConstant()) {
3349 const intptr_t index =
store->index()->BoundSmiConstant();
3350 if (index < 0 || index >= use->definition()
3351 ->AsArrayAllocation()
3352 ->GetConstantNumElements()) {
3355 if (
auto* alloc_typed_data = use->definition()->AsAllocateTypedData()) {
3356 if (
store->class_id() != alloc_typed_data->class_id() ||
3357 !
store->aligned() ||
3358 store->index_scale() != compiler::target::Instance::ElementSizeFor(
3359 alloc_typed_data->class_id())) {
3364 if (use ==
store->value()) {
3367 ((check_type == kOptimisticCheck) ||
3368 instance->Identity().IsAllocationSinkingCandidate());
3379bool AllocationSinking::IsAllocationSinkingCandidate(Definition* alloc,
3380 SafeUseCheck check_type) {
3381 for (
Value* use = alloc->input_use_list(); use !=
nullptr;
3382 use = use->next_use()) {
3383 if (!IsSafeUse(use, check_type)) {
3386 THR_Print(
"use of %s at %s is unsafe for allocation sinking\n",
3387 alloc->ToCString(), use->instruction()->ToCString());
3399 if (
auto*
const alloc = use->
instruction()->AsAllocation()) {
3403 return store->instance()->definition();
3406 return store->array()->definition();
3414 if (
auto load = instr->AsLoadField()) {
3415 return load->instance()->definition();
3417 if (
auto load = instr->AsLoadIndexed()) {
3418 return load->array()->definition();
3425void AllocationSinking::EliminateAllocation(Definition* alloc) {
3426 ASSERT(IsAllocationSinkingCandidate(alloc, kStrictCheck));
3428 if (FLAG_trace_optimization && flow_graph_->
should_print()) {
3429 THR_Print(
"removing allocation from the graph: v%" Pd "\n",
3430 alloc->ssa_temp_index());
3439 GrowableArray<Instruction*> stores_to_remove;
3440 for (
Value* use = alloc->input_use_list(); use !=
nullptr;
3441 use = use->next_use()) {
3442 Instruction*
const instr = use->instruction();
3448 ASSERT(!instr->IsAllocation());
3449 stores_to_remove.Add(instr);
3458 for (
auto*
const store : stores_to_remove) {
3460 if (
store->previous() !=
nullptr) {
3461 store->RemoveFromGraph();
3468 for (
Value* use = alloc->env_use_list(); use !=
nullptr;
3469 use = use->next_use()) {
3470 ASSERT(use->instruction()->IsMaterializeObject());
3474 alloc->RemoveFromGraph();
3480void AllocationSinking::CollectCandidates() {
3483 !block_it.Done(); block_it.Advance()) {
3484 BlockEntryInstr* block = block_it.Current();
3485 for (ForwardInstructionIterator it(block); !it.
Done(); it.
Advance()) {
3486 Instruction* current = it.
Current();
3491 Definition* alloc = current->
Cast<Definition>();
3492 if (IsAllocationSinkingCandidate(alloc, kOptimisticCheck)) {
3494 candidates_.Add(alloc);
3503 for (intptr_t i = 0; i < candidates_.length(); i++) {
3504 Definition* alloc = candidates_[i];
3505 if (alloc->Identity().IsAllocationSinkingCandidate()) {
3506 if (!IsAllocationSinkingCandidate(alloc, kStrictCheck)) {
3516 for (intptr_t i = 0; i < candidates_.length(); i++) {
3517 Definition* alloc = candidates_[i];
3518 if (alloc->Identity().IsAllocationSinkingCandidate()) {
3519 if (FLAG_trace_optimization && flow_graph_->
should_print()) {
3520 THR_Print(
"discovered allocation sinking candidate: v%" Pd "\n",
3521 alloc->ssa_temp_index());
3525 candidates_[j] = alloc;
3530 candidates_.TruncateTo(j);
3536void AllocationSinking::NormalizeMaterializations() {
3537 for (intptr_t i = 0; i < candidates_.length(); i++) {
3538 Definition* alloc = candidates_[i];
3541 for (
Value* use = alloc->input_use_list(); use !=
nullptr; use = next_use) {
3542 next_use = use->next_use();
3543 if (use->instruction()->IsMaterializeObject()) {
3555void AllocationSinking::RemoveUnusedMaterializations() {
3557 for (intptr_t i = 0; i < materializations_.length(); i++) {
3558 MaterializeObjectInstr* mat = materializations_[i];
3559 if ((mat->input_use_list() ==
nullptr) &&
3560 (mat->env_use_list() ==
nullptr)) {
3565 for (intptr_t i = 0; i < mat->InputCount(); i++) {
3566 Definition*
load = mat->InputAt(i)->definition();
3569 load->RemoveFromGraph();
3572 mat->RemoveFromGraph();
3575 materializations_[j] = mat;
3580 materializations_.TruncateTo(j);
3588void AllocationSinking::DiscoverFailedCandidates() {
3593 for (intptr_t i = 0; i < candidates_.length(); i++) {
3594 Definition* alloc = candidates_[i];
3595 if (alloc->Identity().IsAllocationSinkingCandidate()) {
3596 if (!IsAllocationSinkingCandidate(alloc, kStrictCheck)) {
3606 for (intptr_t i = 0; i < candidates_.length(); i++) {
3607 Definition* alloc = candidates_[i];
3608 if (!alloc->Identity().IsAllocationSinkingCandidate()) {
3609 if (FLAG_trace_optimization && flow_graph_->
should_print()) {
3610 THR_Print(
"allocation v%" Pd " can't be eliminated\n",
3611 alloc->ssa_temp_index());
3615 for (
Value* use = alloc->env_use_list(); use !=
nullptr;
3616 use = use->next_use()) {
3617 ASSERT(use->instruction()->IsMaterializeObject());
3625 alloc->set_env_use_list(
nullptr);
3626 for (
Value* use = alloc->input_use_list(); use !=
nullptr;
3627 use = use->next_use()) {
3628 if (use->instruction()->IsLoadField() ||
3629 use->instruction()->IsLoadIndexed()) {
3630 Definition*
load = use->instruction()->AsDefinition();
3632 load->RemoveFromGraph();
3634 ASSERT(use->instruction()->IsMaterializeObject() ||
3635 use->instruction()->IsPhi() ||
3636 use->instruction()->IsStoreField() ||
3637 use->instruction()->IsStoreIndexed());
3642 candidates_[j] = alloc;
3648 if (j != candidates_.length()) {
3650 for (intptr_t i = 0; i < materializations_.length(); i++) {
3651 MaterializeObjectInstr* mat = materializations_[i];
3652 if (!mat->allocation()->Identity().IsAllocationSinkingCandidate()) {
3655 mat->ReplaceUsesWith(mat->allocation());
3656 mat->RemoveFromGraph();
3659 materializations_[k] = mat;
3664 materializations_.TruncateTo(k);
3667 candidates_.TruncateTo(j);
3677 CollectCandidates();
3701 for (intptr_t i = 0; i < candidates_.length(); i++) {
3702 InsertMaterializations(candidates_[i]);
3715 NormalizeMaterializations();
3717 RemoveUnusedMaterializations();
3721 DiscoverFailedCandidates();
3727 for (intptr_t i = 0; i < candidates_.length(); i++) {
3728 EliminateAllocation(candidates_[i]);
3735 for (intptr_t i = 0; i < materializations_.length(); i++) {
3737 for (intptr_t j = 0; j < mat->
InputCount(); j++) {
3739 if (defn->IsBox()) {
3750 mat->previous()->LinkTo(mat->next());
3751 mat->set_next(
nullptr);
3752 mat->set_previous(
nullptr);
3767 while (mat->
next()->IsMaterializeObject()) {
3768 mat = mat->
next()->AsMaterializeObject();
3776 while (exit->previous()->IsMaterializeObject()) {
3777 exit = exit->previous();
3787 if (exit->IsMaterializeObject()) {
3792 mat !=
nullptr; mat = mat->previous()->AsMaterializeObject()) {
3793 if (mat->allocation() == alloc) {
3812 if (alloc->IsAllocateObject() &&
3822void AllocationSinking::CreateMaterializationAt(
3825 const ZoneGrowableArray<const Slot*>& slots) {
3834 for (
auto slot : slots) {
3835 Definition*
load =
nullptr;
3836 if (slot->IsArrayElement()) {
3837 intptr_t array_cid, index;
3838 if (alloc->IsCreateArray()) {
3839 array_cid = kArrayCid;
3841 compiler::target::Array::index_at_offset(slot->offset_in_bytes());
3842 }
else if (
auto alloc_typed_data = alloc->AsAllocateTypedData()) {
3843 array_cid = alloc_typed_data->class_id();
3844 index = slot->offset_in_bytes() /
3845 compiler::target::Instance::ElementSizeFor(array_cid);
3849 load =
new (
Z) LoadIndexedInstr(
3854 compiler::target::Instance::ElementSizeFor(array_cid),
3860 LoadFieldInstr(
new (
Z)
Value(alloc), *slot, access, alloc->source());
3866 const Class* cls =
nullptr;
3867 intptr_t length_or_shape = -1;
3868 if (
auto instr = alloc->AsAllocateObject()) {
3869 cls = &(instr->cls());
3870 }
else if (alloc->IsAllocateClosure()) {
3873 }
else if (
auto instr = alloc->AsAllocateContext()) {
3875 length_or_shape = instr->num_context_variables();
3876 }
else if (
auto instr = alloc->AsAllocateUninitializedContext()) {
3878 length_or_shape = instr->num_context_variables();
3879 }
else if (
auto instr = alloc->AsCreateArray()) {
3882 length_or_shape = instr->GetConstantNumElements();
3883 }
else if (
auto instr = alloc->AsAllocateTypedData()) {
3886 length_or_shape = instr->GetConstantNumElements();
3887 }
else if (
auto instr = alloc->AsAllocateRecord()) {
3890 length_or_shape = instr->shape().AsInt();
3891 }
else if (
auto instr = alloc->AsAllocateSmallRecord()) {
3894 length_or_shape = instr->shape().AsInt();
3898 MaterializeObjectInstr* mat =
new (
Z) MaterializeObjectInstr(
3899 alloc->AsAllocation(), *cls, length_or_shape, slots, std::move(values));
3907 exit->ReplaceInEnvironment(alloc, mat);
3913 val->set_instruction(mat);
3914 alloc->AddEnvUse(val);
3917 materializations_.Add(mat);
3922template <
typename T>
3925 for (intptr_t i = 0; i < list->
length(); i++) {
3926 if ((*list)[i] ==
value) {
3938void AllocationSinking::ExitsCollector::Collect(Definition* alloc) {
3939 for (
Value* use = alloc->env_use_list(); use !=
nullptr;
3940 use = use->next_use()) {
3941 if (use->instruction()->IsMaterializeObject()) {
3943 use->instruction()->AsMaterializeObject()));
3953 for (
Value* use = alloc->input_use_list(); use !=
nullptr;
3954 use = use->next_use()) {
3956 if ((obj !=
nullptr) && (obj != alloc)) {
3962void AllocationSinking::ExitsCollector::CollectTransitively(Definition* alloc) {
3963 exits_.TruncateTo(0);
3964 worklist_.TruncateTo(0);
3966 worklist_.Add(alloc);
3973 for (intptr_t i = 0; i < worklist_.length(); i++) {
3974 Collect(worklist_[i]);
3979 if (!slot.
IsIdentical(Slot::PointerBase_data()))
return false;
3981 if (!alloc->IsAllocateObject())
return false;
3982 auto const cid = alloc->AsAllocateObject()->cls().id();
3986void AllocationSinking::InsertMaterializations(Definition* alloc) {
3988 auto slots =
new (
Z) ZoneGrowableArray<const Slot*>(5);
3990 for (
Value* use = alloc->input_use_list(); use !=
nullptr;
3991 use = use->next_use()) {
3994 ASSERT(!use->instruction()->AsAllocation());
3995 if (
auto store = use->instruction()->AsStoreField()) {
4001 }
else if (
auto store = use->instruction()->AsStoreIndexed()) {
4002 const intptr_t index =
store->index()->BoundSmiConstant();
4004 if (alloc->IsCreateArray()) {
4005 offset = compiler::target::Array::element_offset(index);
4006 }
else if (alloc->IsAllocateTypedData()) {
4017 if (
auto*
const allocation = alloc->AsAllocation()) {
4018 for (intptr_t
pos = 0;
pos < allocation->InputCount();
pos++) {
4019 if (
auto*
const slot = allocation->SlotForInput(
pos)) {
4023 if (!slot->IsImmutableLengthSlot()) {
4031 exits_collector_.CollectTransitively(alloc);
4034 for (intptr_t i = 0; i < exits_collector_.exits().
length(); i++) {
4035 CreateMaterializationAt(exits_collector_.exits()[i], alloc, *slots);
4055 : flow_graph_(flow_graph),
4058 worklist_(flow_graph, 10) {}
4075 void OptimizeDeadParameters() {
4078 NumberCatchEntryParameters();
4079 ComputeIncomingValues();
4080 CollectAliveParametersOrPhis();
4081 PropagateLivenessToInputs();
4082 EliminateDeadParameters();
4085 static intptr_t GetParameterId(
const Instruction* instr) {
4086 return instr->GetPassSpecificId(CompilerPass::kTryCatchOptimization);
4089 static void SetParameterId(Instruction* instr, intptr_t
id) {
4090 instr->SetPassSpecificId(CompilerPass::kTryCatchOptimization,
id);
4093 static bool HasParameterId(Instruction* instr) {
4094 return instr->HasPassSpecificId(CompilerPass::kTryCatchOptimization);
4099 void NumberCatchEntryParameters() {
4100 for (
auto catch_entry : flow_graph_->graph_entry()->catch_entries()) {
4101 const GrowableArray<Definition*>& idefs =
4102 *catch_entry->initial_definitions();
4103 for (
auto idef : idefs) {
4104 if (idef->IsParameter()) {
4105 SetParameterId(idef, parameter_info_.
length());
4106 parameter_info_.
Add(
new ParameterInfo(idef->AsParameter()));
4110 catch_by_index_.EnsureLength(catch_entry->catch_try_index() + 1,
nullptr);
4111 catch_by_index_[catch_entry->catch_try_index()] = catch_entry;
4118 void ComputeIncomingValues() {
4119 for (
auto block : flow_graph_->reverse_postorder()) {
4124 ASSERT(block->try_index() < catch_by_index_.length());
4125 auto catch_entry = catch_by_index_[block->try_index()];
4126 const auto& idefs = *catch_entry->initial_definitions();
4128 for (ForwardInstructionIterator instr_it(block); !instr_it.Done();
4129 instr_it.Advance()) {
4130 Instruction* current = instr_it.Current();
4131 if (!current->MayThrow())
continue;
4133 Environment*
env = current->env()->Outermost();
4136 for (intptr_t env_idx = 0; env_idx < idefs.length(); ++env_idx) {
4137 if (ParameterInstr* param = idefs[env_idx]->AsParameter()) {
4138 Definition* defn =
env->ValueAt(env_idx)->definition();
4143 for (
auto other_defn :
4144 parameter_info_[GetParameterId(param)]->incoming) {
4145 if (other_defn == defn) {
4151 parameter_info_[GetParameterId(param)]->incoming.
Add(defn);
4164 void CollectAliveParametersOrPhis() {
4165 for (
auto block : flow_graph_->reverse_postorder()) {
4166 if (JoinEntryInstr* join = block->AsJoinEntry()) {
4167 if (
join->phis() ==
nullptr)
continue;
4169 for (
auto phi : *
join->phis()) {
4171 if (HasActualUse(phi)) {
4178 for (
auto info : parameter_info_) {
4179 if (HasActualUse(
info->instr)) {
4180 MarkLive(
info->instr);
4187 void PropagateLivenessToInputs() {
4188 while (!worklist_.
IsEmpty()) {
4190 if (ParameterInstr* param = defn->AsParameter()) {
4191 auto s = parameter_info_[GetParameterId(param)];
4192 for (
auto input :
s->incoming) {
4195 }
else if (PhiInstr* phi = defn->AsPhi()) {
4196 for (intptr_t i = 0; i < phi->InputCount(); i++) {
4197 MarkLive(phi->InputAt(i)->definition());
4205 void MarkLive(Definition* defn) {
4206 if (PhiInstr* phi = defn->AsPhi()) {
4207 if (!phi->is_alive()) {
4211 }
else if (ParameterInstr* param = defn->AsParameter()) {
4212 if (HasParameterId(param)) {
4213 auto input_s = parameter_info_[GetParameterId(param)];
4214 if (!input_s->alive) {
4215 input_s->alive =
true;
4216 worklist_.
Add(param);
4219 }
else if (UnboxInstr* unbox = defn->AsUnbox()) {
4220 MarkLive(unbox->value()->definition());
4226 void EliminateDeadParameters() {
4227 for (
auto info : parameter_info_) {
4233 for (
auto block : flow_graph_->reverse_postorder()) {
4234 if (block->try_index() == -1)
continue;
4236 auto catch_entry = catch_by_index_[block->try_index()];
4237 const auto& idefs = *catch_entry->initial_definitions();
4239 for (ForwardInstructionIterator instr_it(block); !instr_it.Done();
4240 instr_it.Advance()) {
4241 Instruction* current = instr_it.Current();
4242 if (!current->MayThrow())
continue;
4244 Environment*
env = current->env()->Outermost();
4247 for (intptr_t env_idx = 0; env_idx < idefs.length(); ++env_idx) {
4248 if (ParameterInstr* param = idefs[env_idx]->AsParameter()) {
4249 if (!parameter_info_[GetParameterId(param)]->alive) {
4250 env->ValueAt(env_idx)->BindToEnvironment(
4263 static bool HasActualUse(Definition* defn) {
4264 for (
Value* use = defn->input_use_list(); use !=
nullptr;
4265 use = use->next_use()) {
4266 Instruction* use_instruction = use->instruction();
4267 if (UnboxInstr* unbox = use_instruction->AsUnbox()) {
4268 if (HasActualUse(unbox)) {
4271 }
else if (!use_instruction->IsPhi()) {
4278 struct ParameterInfo :
public ZoneAllocated {
4279 explicit ParameterInfo(ParameterInstr* instr) : instr(instr) {}
4281 ParameterInstr* instr;
4283 GrowableArray<Definition*> incoming;
4286 FlowGraph*
const flow_graph_;
4293 GrowableArray<ParameterInfo*> parameter_info_;
4296 GrowableArray<CatchBlockEntryInstr*> catch_by_index_;
4300 DefinitionWorklist worklist_;
4315 OptimizeDeadParameters();
4325 for (
auto catch_entry : catch_entries) {
4339 cdefs[flow_graph_->
EnvIndex(catch_entry->raw_exception_var())] =
nullptr;
4340 cdefs[flow_graph_->
EnvIndex(catch_entry->raw_stacktrace_var())] =
nullptr;
4343 !block_it.Done(); block_it.Advance()) {
4345 if (block->
try_index() == catch_entry->catch_try_index()) {
4347 instr_it.Advance()) {
4352 for (intptr_t env_idx = 0; env_idx < cdefs.
length(); ++env_idx) {
4353 if (cdefs[env_idx] !=
nullptr && !cdefs[env_idx]->
IsConstant() &&
4354 env->ValueAt(env_idx)->BindsToConstant()) {
4357 cdefs[env_idx] =
env->ValueAt(env_idx)->definition();
4359 if (cdefs[env_idx] !=
env->ValueAt(env_idx)->definition()) {
4361 cdefs[env_idx] =
nullptr;
4368 for (intptr_t j = 0; j < idefs->
length(); ++j) {
4369 if (cdefs[j] !=
nullptr && cdefs[j]->
IsConstant()) {
4389 if (!it.
Current()->instruction()->IsPhi())
return true;
4399 if (join !=
nullptr) {
4416 for (intptr_t i = 0; i < phi->
InputCount(); i++) {
4419 if ((used_phi !=
nullptr) && !used_phi->
is_alive()) {
4421 live_phis.
Add(used_phi);
4431 for (
auto block : flow_graph->
postorder()) {
4433 if (join->phis_ ==
nullptr)
continue;
4436 intptr_t to_index = 0;
4437 for (intptr_t i = 0; i < join->phis_->length(); ++i) {
4439 if (phi !=
nullptr) {
4443 (*join->phis_)[i] =
nullptr;
4444 if (FLAG_trace_optimization && flow_graph->
should_print()) {
4448 (*join->phis_)[to_index++] = phi;
4452 if (to_index == 0) {
4453 join->phis_ =
nullptr;
4455 join->phis_->TruncateTo(to_index);
4467 !block_it.Done(); block_it.Advance()) {
4471 ASSERT(!current->IsMoveArgument());
4475 worklist.Add(current);
4476 if (
Definition* def = current->AsDefinition()) {
4477 if (def->HasSSATemp()) {
4478 live.
Add(def->ssa_temp_index());
4486 while (!worklist.is_empty()) {
4488 for (intptr_t i = 0, n = current->
InputCount(); i < n; ++i) {
4492 worklist.Add(input);
4496 for (intptr_t i = 0, n = current->
ArgumentCount(); i < n; ++i) {
4500 worklist.Add(input);
4504 if (current->
env() !=
nullptr) {
4507 Definition* input = it.CurrentValue()->definition();
4508 ASSERT(!input->IsMoveArgument());
4510 worklist.Add(input);
4519 !block_it.Done(); block_it.Advance()) {
4534 ASSERT(!current->IsMoveArgument());
4536 if (
Definition* def = current->AsDefinition()) {
4537 if (def->HasSSATemp() && live.
Contains(def->ssa_temp_index())) {
4551 Symbols::vm_unsafe_no_interrupts(),
4557 if (should_remove_all) {
4560 if (it.
Current()->IsCheckStackOverflow()) {
4574 if (first_stack_overflow_instr ==
nullptr) {
4575 first_stack_overflow_instr = instr;
4581 if (current->IsBranch()) {
4582 current = current->AsBranch()->comparison();
4591 if (first_stack_overflow_instr !=
nullptr) {
4592 first_stack_overflow_instr->RemoveFromGraph();
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
#define check(reporter, ref, unref, make, kill)
static sk_sp< GrTextureProxy > wrapped(skiatest::Reporter *reporter, GrRecordingContext *rContext, GrProxyProvider *proxyProvider, SkBackingFit fit)
#define RELEASE_ASSERT(cond)
static AliasIdentity NotAliased()
static AliasIdentity Aliased()
bool IsNotAliased() const
static AliasIdentity Unknown()
static AliasIdentity AllocationSinkingCandidate()
void RollbackAliasedIdentities()
void PrintSet(BitVector *set)
bool CanBeAliased(Definition *alloc)
intptr_t max_place_id() const
AliasedSet(Zone *zone, PointerSet< Place > *places_map, ZoneGrowableArray< Place * > *places, PhiPlaceMoves *phi_moves, bool print_traces)
BitVector * aliased_by_effects() const
intptr_t LookupAliasId(const Place &alias)
BitVector * GetKilledSet(intptr_t alias)
const PhiPlaceMoves * phi_moves() const
Place * LookupCanonical(Place *place) const
const ZoneGrowableArray< Place * > & places() const
virtual const Slot * SlotForInput(intptr_t pos)
MaterializeObjectInstr * MaterializationFor(Definition *alloc, Instruction *exit)
void DetachMaterializations()
intptr_t GetConstantNumElements() const
bool HasConstantNumElements() const
KeyValueTrait::Value LookupValue(typename KeyValueTrait::Key key) const
void Insert(typename KeyValueTrait::Pair kv)
bool HasKey(typename KeyValueTrait::Key key) const
Iterator GetIterator() const
bool Contains(const T &other, bool isEqual(const T &, const T &)=nullptr) const
void AddArray(const BaseGrowableArray< T, B, Allocator > &src)
static constexpr Kind decode(uword value)
static constexpr uword update(Representation value, uword original)
static constexpr uword encode(Kind value)
bool Contains(intptr_t i) const
bool AddAll(const BitVector *from)
void CopyFrom(const BitVector *other)
intptr_t try_index() const
intptr_t postorder_number() const
intptr_t preorder_number() const
intptr_t block_id() const
BlockEntryInstr * ImmediateDominator() const
const GrowableArray< BlockEntryInstr * > & dominated_blocks()
bool Dominates(BlockEntryInstr *other) const
virtual BlockEntryInstr * PredecessorAt(intptr_t index) const =0
Instruction * last_instruction() const
CSEInstructionSet(const CSEInstructionSet &other)
void Insert(Instruction *instr)
Instruction * Lookup(Instruction *other) const
static void EliminateStackOverflow(FlowGraph *graph)
ClassPtr At(intptr_t cid) const
static CompileType FromCid(intptr_t cid)
static CompilerState & Current()
const Object & value() const
static void EliminateDeadPhis(FlowGraph *graph)
static void RemoveDeadAndRedundantPhisFromTheGraph(FlowGraph *graph)
static void EliminateDeadCode(FlowGraph *graph)
static void Optimize(FlowGraph *graph)
void Add(Definition *defn)
Definition * RemoveLast()
Value * env_use_list() const
virtual AliasIdentity Identity() const
Value * input_use_list() const
void ReplaceUsesWith(Definition *other)
virtual Definition * AsDefinition()
Definition * OriginalDefinition()
intptr_t ssa_temp_index() const
static void Optimize(FlowGraph *graph)
static constexpr intptr_t kNone
static bool Optimize(FlowGraph *graph, bool run_load_optimization=true)
void MarkAsLazyDeoptToBeforeDeoptId()
Environment * Outermost()
FieldPtr Original() const
const GrowableArray< BlockEntryInstr * > & reverse_postorder() const
GraphEntryInstr * graph_entry() const
ConstantInstr * GetConstant(const Object &object, Representation representation=kTagged)
IsolateGroup * isolate_group() const
bool should_print() const
void EnsureSSATempIndex(Definition *defn, Definition *replacement)
intptr_t current_ssa_temp_index() const
void ReplaceCurrentInstruction(ForwardInstructionIterator *iterator, Instruction *current, Instruction *replacement)
const GrowableArray< BlockEntryInstr * > & preorder() const
void set_loop_invariant_loads(ZoneGrowableArray< BitVector * > *loop_invariant_loads)
BlockIterator postorder_iterator() const
const GrowableArray< BlockEntryInstr * > & postorder() const
ConstantInstr * constant_null() const
const LoopHierarchy & GetLoopHierarchy()
const Function & function() const
bool is_huge_method() const
bool is_licm_allowed() const
ZoneGrowableArray< BitVector * > * loop_invariant_loads() const
void AllocateSSAIndex(Definition *def)
BlockIterator reverse_postorder_iterator() const
void InsertBefore(Instruction *next, Instruction *instr, Environment *env, UseKind use_kind)
void InsertAfter(Instruction *prev, Instruction *instr, Environment *env, UseKind use_kind)
intptr_t EnvIndex(const LocalVariable *variable) const
Instruction * Current() const
void RemoveCurrentFromGraph()
const GrowableArray< CatchBlockEntryInstr * > & catch_entries() const
void InsertBefore(Instruction *next)
Instruction * next() const
virtual intptr_t InputCount() const =0
intptr_t GetDeoptId() const
virtual bool MayThrow() const =0
virtual bool HasUnknownSideEffects() const =0
virtual Value * InputAt(intptr_t i) const =0
virtual BlockEntryInstr * GetBlock()
virtual void CopyDeoptIdFrom(const Instruction &instr)
Environment * env() const
virtual bool CanEliminate(const BlockEntryInstr *block) const
virtual Representation RequiredInputRepresentation(intptr_t idx) const
bool HasPassSpecificId(CompilerPass::Id pass) const
virtual bool MayHaveVisibleEffect() const
virtual intptr_t ArgumentCount() const
Definition * ArgumentAt(intptr_t index) const
virtual Representation representation() const
bool CanDeoptimize() const
intptr_t GetPassSpecificId(CompilerPass::Id pass) const
virtual bool AllowsCSE() const
virtual Tag tag() const =0
Instruction * RemoveFromGraph(bool return_previous=true)
bool HasMoveArguments() const
void SetPassSpecificId(CompilerPass::Id pass, intptr_t id)
virtual const char * DebugName() const =0
Instruction * previous() const
ObjectStore * object_store() const
ClassTable * class_table() const
void OptimisticallySpecializeSmiPhis()
LICM(FlowGraph *flow_graph)
static bool FindPragma(Thread *T, bool only_core, const Object &object, const String &pragma_name, bool multiple=false, Object *options=nullptr)
GrowableArray< BitVector * > kill_
GrowableArray< BitVector * > live_out_
GrowableArray< BitVector * > live_in_
const Slot & slot() const
virtual Representation representation() const
intptr_t class_id() const
intptr_t index_scale() const
static bool OptimizeGraph(FlowGraph *graph)
LoadOptimizer(FlowGraph *graph, AliasedSet *aliased_set)
const ZoneGrowableArray< BlockEntryInstr * > & headers() const
void ComputeInduction() const
bool IsAlwaysTaken(BlockEntryInstr *block) const
BitVector * blocks() const
static ClassPtr context_class()
virtual const char * ToCString() const
static Object & ZoneHandle()
virtual BlockEntryInstr * GetBlock()
Move(intptr_t from, intptr_t to)
void CreateOutgoingMove(Zone *zone, BlockEntryInstr *block, intptr_t from, intptr_t to)
const ZoneGrowableArray< Move > * MovesList
MovesList GetOutgoingMoves(BlockEntryInstr *block) const
static Place * CreateAnyInstanceAnyIndexAlias(Zone *zone, intptr_t id)
static Place * Wrap(Zone *zone, const Place &place, intptr_t id)
static bool IsTypedDataViewAllocation(Definition *defn)
intptr_t index_constant() const
bool Equals(const Place &other) const
static const char * DefinitionName(Definition *def)
Place CopyWithoutIndex() const
Place ToSmallerElement(ElementSize to, intptr_t index) const
ElementSize element_size() const
const Field & static_field() const
bool IsImmutableField() const
Place(const Place &other)
Representation representation() const
Place(AllocationInstr *alloc, intptr_t input_pos)
Definition * instance() const
Place(Instruction *instr, bool *is_load, bool *is_store)
const char * ToCString() const
void set_instance(Definition *def)
const Field * static_field_
static bool IsAllocation(Definition *defn)
bool DependsOnInstance() const
const Slot * instance_field_
Place CopyWithoutInstance() const
Place ToLargerElement(ElementSize to) const
Definition * index() const
const Slot & instance_field() const
static const Slot & GetArrayElementSlot(Thread *thread, intptr_t offset_in_bytes)
bool IsIdentical(const Slot &other) const
bool is_immutable() const
Representation representation() const
bool may_contain_inner_pointer() const
bool Contains(T value) const
static SmiPtr New(intptr_t value)
intptr_t class_id() const
intptr_t index_scale() const
virtual void ComputeInitialSets()
StoreOptimizer(FlowGraph *graph, AliasedSet *aliased_set, PointerSet< Place > *map)
static void OptimizeGraph(FlowGraph *graph)
static Thread * Current()
TryCatchAnalyzer(FlowGraph *flow_graph, bool is_aot)
static constexpr uintptr_t RoundUpToPowerOfTwo(uintptr_t x)
static constexpr int ShiftForPowerOfTwo(T x)
static T Minimum(T x, T y)
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Instruction * instruction() const
Definition * definition() const
void BindTo(Definition *definition)
intptr_t InputCount() const
Value * InputAt(intptr_t i) const
ZonePlace(const Place &place)
char * PrintToString(const char *format,...) PRINTF_ATTRIBUTE(2
#define THR_Print(format,...)
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
FlutterSemanticsFlag flags
constexpr bool FLAG_support_il_printer
#define DEFINE_FLAG(type, name, default_value, comment)
Dart_NativeFunction function
bool Contains(const Container &container, const Value &value)
bool IsTypedDataViewClassId(intptr_t index)
bool IsTypedDataClassId(intptr_t index)
void OptimizeCatchEntryStates(FlowGraph *flow_graph, bool is_aot)
static Definition * GetStoredValue(Instruction *instr)
static bool IsMarkedWithNoInterrupts(const Function &function)
bool IsTypedDataBaseClassId(intptr_t index)
void AddInstruction(GrowableArray< T * > *list, T *value)
static bool IsSupportedAllocation(Instruction *instr)
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
static bool IsLoopInvariantLoad(ZoneGrowableArray< BitVector * > *sets, intptr_t loop_header_index, Instruction *instr)
static Instruction * ExitForMaterialization(MaterializeObjectInstr *mat)
bool IsUnmodifiableTypedDataViewClassId(intptr_t index)
GrowableArray< Value * > InputsArray
static bool IsValidLengthForAllocationSinking(ArrayAllocationInstr *array_alloc)
static AliasedSet * NumberPlaces(FlowGraph *graph, PointerSet< Place > *map, CSEMode mode)
static DART_FORCE_INLINE intptr_t GetPlaceId(const Instruction *instr)
static Definition * StoreDestination(Value *use)
static InnerPointerAccess AccessForSlotInAllocatedObject(Definition *alloc, const Slot &slot)
bool IsExternalPayloadClassId(classid_t cid)
static bool IsDataFieldOfTypedDataView(Definition *alloc, const Slot &slot)
constexpr intptr_t kBitsPerInt32
static PhiPlaceMoves * ComputePhiMoves(PointerSet< Place > *map, ZoneGrowableArray< Place * > *places, bool print_traces)
uint32_t FinalizeHash(uint32_t hash, intptr_t hashbits=kBitsPerInt32)
static DART_FORCE_INLINE void SetPlaceId(Instruction *instr, intptr_t id)
static bool IsLoadEliminationCandidate(Instruction *instr)
constexpr int32_t kMaxInt32
static bool IsConstant(Definition *def, int64_t *val)
static bool HasRealUse(Definition *def)
static bool IsPhiDependentPlace(Place *place)
static constexpr intptr_t kInvalidTryIndex
static void AddSlot(ZoneGrowableArray< const Slot * > *slots, const Slot &slot)
static Instruction * FirstMaterializationAt(Instruction *exit)
static DART_FORCE_INLINE bool HasPlaceId(const Instruction *instr)
static Definition * LoadSource(Definition *instr)
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not set
SINT Vec< 2 *N, T > join(const Vec< N, T > &lo, const Vec< N, T > &hi)
static const char header[]
static constexpr size_t ValueSize(Representation rep)
static constexpr bool IsUnboxedInteger(Representation rep)
static constexpr bool IsUnboxed(Representation rep)
static const char * ToCString(Representation rep)
static Representation RepresentationOfArrayElement(classid_t cid)
GrSamplerState::WrapMode Wrap