20DEFINE_FLAG(
bool, dead_store_elimination,
true,
"Eliminate dead stores");
21DEFINE_FLAG(
bool, load_cse,
true,
"Use redundant load elimination.");
23 optimize_lazy_initializer_calls,
25 "Eliminate redundant lazy initializer calls.");
27 trace_load_optimization,
29 "Print live sets for load optimization pass.");
57 return value->definition()->OriginalDefinition();
60 static bool EqualsIgnoringRedefinitions(
const Instruction&
a,
61 const Instruction&
b) {
62 const auto tag =
a.tag();
63 if (tag !=
b.tag())
return false;
64 const auto input_count =
a.InputCount();
65 if (input_count !=
b.InputCount())
return false;
70 if (tag != Instruction::kLoadField) {
71 for (intptr_t
i = 0;
i < input_count; ++
i) {
72 if (OriginalDefinition(
a.InputAt(
i)) !=
73 OriginalDefinition(
b.InputAt(
i))) {
78 for (intptr_t
i = 0;
i < input_count; ++
i) {
79 if (!
a.InputAt(
i)->Equals(*
b.InputAt(
i)))
return false;
82 return a.AttributesEqual(
b);
87 typedef Instruction*
Value;
88 typedef Instruction*
Key;
89 typedef Instruction*
Pair;
91 static Key KeyOf(
Pair kv) {
return kv; }
92 static Value ValueOf(
Pair kv) {
return kv; }
96 for (intptr_t
i = 0;
i <
key->InputCount(); ++
i) {
98 result, OriginalDefinition(
key->InputAt(
i))->ssa_temp_index());
103 static inline bool IsKeyEqual(
Pair kv,
Key key) {
104 return EqualsIgnoringRedefinitions(*kv, *
key);
108 DirectChainedHashMap<Trait> map_;
214 flags_(other.flags_),
215 instance_(other.instance_),
223 switch (instr->
tag()) {
224 case Instruction::kLoadField: {
234 case Instruction::kStoreField: {
237 store->RequiredInputRepresentation(StoreFieldInstr::kValuePos));
238 instance_ =
store->instance()->definition()->OriginalDefinition();
245 case Instruction::kLoadStaticField:
252 case Instruction::kStoreStaticField:
261 case Instruction::kLoadIndexed: {
280 case Instruction::kStoreIndexed: {
341 return Place(RepresentationBits::update(kNoRepresentation, flags_),
386 return Place(ElementSizeBits::update(to, flags_), instance_,
402 ElementSizeMultiplier(
element_size()) / ElementSizeMultiplier(to));
403 return Place(ElementSizeBits::update(to, flags_), instance_,
407 intptr_t
id()
const {
return id_; }
449 if (def ==
nullptr) {
463 const char* field_name =
510 return (flags_ == other.flags_) && (instance_ == other.instance_) &&
518 return (defn !=
nullptr) && (defn->IsAllocation() ||
519 (defn->IsStaticCall() &&
520 defn->AsStaticCall()->IsRecognizedFactory()));
524 if (defn !=
nullptr) {
525 if (
auto* alloc = defn->AsAllocateObject()) {
526 auto const cid = alloc->cls().id();
538 bool SameField(
const Place& other)
const {
545 flags_ = RepresentationBits::update(rep, flags_);
548 void set_kind(
Kind kind) { flags_ = KindBits::update(
kind, flags_); }
551 flags_ = ElementSizeBits::update(
scale, flags_);
565 set_element_size(
size);
572 const intptr_t scaled_index = index_value *
scale;
583 const bool can_be_view = instance_->
representation() == kUntagged ||
591 set_element_size(
size);
630 FATAL(
"Unhandled value size for representation %s",
637 return 1 << (
static_cast<intptr_t
>(
size) -
static_cast<intptr_t
>(
k1Byte));
641 return offset & ~(ElementSizeMultiplier(
size) - 1);
646 intptr_t base_offset) {
647 return base_offset +
index * ElementSizeMultiplier(
size);
650 class KindBits :
public BitField<uword, Kind, 0, 3> {};
652 :
public BitField<uword, Representation, KindBits::kNextBit, 11> {};
656 class ElementSizeBits :
public BitField<uword,
658 RepresentationBits::kNextBit,
659 kNumElementSizeBits> {};
662 Definition* instance_;
701 moves_.EnsureLength(block_num + 1,
nullptr);
703 if (moves_[block_num] ==
nullptr) {
707 moves_[block_num]->Add(
Move(from, to));
714 intptr_t
from()
const {
return from_; }
715 intptr_t
to()
const {
return to_; }
726 return (block_num < moves_.length()) ? moves_[block_num] :
nullptr;
744 print_traces_(print_traces),
745 places_map_(places_map),
750 typed_data_access_sizes_(),
755 zone_, kAnyInstanceAnyIndexAlias));
756 for (intptr_t
i = 0;
i < places_.length();
i++) {
757 AddRepresentative(places_[
i]);
763 const Place*
result = aliases_map_.LookupValue(&alias);
768 return (alias < killed_.length()) ? killed_[alias] :
nullptr;
779 return places_map_->LookupValue(place);
788 THR_Print(
"%s", places_[it.Current()]->ToCString());
796 for (intptr_t
i = 0;
i < identity_rollback_.length(); ++
i) {
809 ComputeAliasing(alloc);
821 kAnyConstantIndexedAlias = 1,
825 kUnknownInstanceConstantIndexedAlias = 2,
829 kAnyAllocationIndexedAlias = 3,
832 kAnyInstanceAnyIndexAlias = 4
836 void AddRepresentative(Place* place) {
837 if (!place->IsImmutableField()) {
838 const Place* alias = CanonicalizeAlias(place->ToAlias());
839 EnsureSet(&representatives_, alias->id())->
Add(place->id());
845 EnsureSet(&representatives_, kAnyConstantIndexedAlias)
849 if (alias->instance() ==
nullptr) {
850 EnsureSet(&representatives_, kUnknownInstanceConstantIndexedAlias)
858 typed_data_access_sizes_.
Add(alias->element_size());
862 EnsureSet(&representatives_, kAnyAllocationIndexedAlias)
866 if (!IsIndependentFromEffects(place)) {
867 aliased_by_effects_->
Add(place->id());
872 void ComputeKillSets() {
873 for (intptr_t
i = 0;
i < aliases_.length(); ++
i) {
874 const Place* alias = aliases_[
i];
876 AddAllRepresentatives(alias->id(), alias->id());
877 ComputeKillSet(alias);
880 if (FLAG_trace_load_optimization && print_traces_) {
882 for (intptr_t
i = 0;
i < aliases_.length(); ++
i) {
883 const Place* alias = aliases_[
i];
887 if (kill !=
nullptr) {
895 void InsertAlias(
const Place* alias) {
896 aliases_map_.Insert(alias);
900 const Place* CanonicalizeAlias(
const Place& alias) {
901 const Place* canonical = aliases_map_.LookupValue(&alias);
902 if (canonical ==
nullptr) {
904 kAnyInstanceAnyIndexAlias + aliases_.length());
905 InsertAlias(canonical);
907 ASSERT(aliases_map_.LookupValue(&alias) == canonical);
911 BitVector* GetRepresentativesSet(intptr_t alias) {
912 return (alias < representatives_.length()) ? representatives_[alias]
916 BitVector* EnsureSet(GrowableArray<BitVector*>* sets, intptr_t alias) {
917 while (sets->length() <= alias) {
921 BitVector*
set = (*sets)[alias];
922 if (
set ==
nullptr) {
928 void AddAllRepresentatives(
const Place* to, intptr_t from) {
929 AddAllRepresentatives(to->id(), from);
932 void AddAllRepresentatives(intptr_t to, intptr_t from) {
933 BitVector* from_set = GetRepresentativesSet(from);
934 if (from_set !=
nullptr) {
935 EnsureSet(&killed_, to)->
AddAll(from_set);
939 void CrossAlias(
const Place* to,
const Place& from) {
944 CrossAlias(to, from_id);
947 void CrossAlias(
const Place* to, intptr_t from) {
948 AddAllRepresentatives(to->id(), from);
949 AddAllRepresentatives(from, to->id());
961 void ComputeKillSet(
const Place* alias) {
962 switch (alias->kind()) {
964 if (alias->instance() ==
nullptr) {
966 AddAllRepresentatives(alias, kAnyConstantIndexedAlias);
967 AddAllRepresentatives(alias, kAnyAllocationIndexedAlias);
971 CrossAlias(alias, kAnyInstanceAnyIndexAlias);
972 AddAllRepresentatives(alias, kUnknownInstanceConstantIndexedAlias);
978 const bool has_aliased_instance =
979 (alias->instance() !=
nullptr) &&
CanBeAliased(alias->instance());
989 for (intptr_t
i =
static_cast<intptr_t
>(alias->element_size()) + 1;
992 if (!typed_data_access_sizes_.
Contains(alias->element_size())) {
998 CrossAlias(alias, alias->ToLargerElement(
1002 if (has_aliased_instance) {
1009 const Place no_instance_alias = alias->CopyWithoutInstance();
1014 if (!typed_data_access_sizes_.
Contains(alias->element_size())) {
1019 if (other_size > alias->element_size()) {
1023 no_instance_alias.ToLargerElement(other_size));
1027 const auto num_smaller_elements =
1028 1 << (alias->element_size() - other_size);
1029 for (intptr_t j = 0; j < num_smaller_elements; j++) {
1031 no_instance_alias.ToSmallerElement(other_size, j));
1038 if (alias->instance() ==
nullptr) {
1040 AddAllRepresentatives(alias, kAnyAllocationIndexedAlias);
1041 CrossAlias(alias, kAnyInstanceAnyIndexAlias);
1045 CrossAlias(alias, alias->CopyWithoutIndex());
1047 CrossAlias(alias, alias->CopyWithoutInstance());
1048 CrossAlias(alias, kAnyInstanceAnyIndexAlias);
1060 CrossAlias(alias, alias->CopyWithoutInstance());
1072 bool IsIndependentFromEffects(Place* place) {
1073 if (place->IsImmutableField()) {
1079 (place->instance() !=
nullptr) && !
CanBeAliased(place->instance());
1083 bool HasLoadsFromPlace(Definition* defn,
const Place* place) {
1086 for (
Value* use = defn->input_use_list(); use !=
nullptr;
1087 use = use->next_use()) {
1088 Instruction* instr = use->instruction();
1089 if (UseIsARedefinition(use) &&
1090 HasLoadsFromPlace(instr->Cast<Definition>(), place)) {
1093 bool is_load =
false, is_store;
1094 Place load_place(instr, &is_load, &is_store);
1096 if (is_load && load_place.Equals(*place)) {
1106 static bool UseIsARedefinition(
Value* use) {
1107 Instruction* instr = use->instruction();
1108 return instr->IsDefinition() &&
1109 (instr->Cast<Definition>()->RedefinedValue() == use);
1114 bool AnyUseCreatesAlias(Definition* defn) {
1115 for (
Value* use = defn->input_use_list(); use !=
nullptr;
1116 use = use->next_use()) {
1117 Instruction* instr = use->instruction();
1118 if (instr->HasUnknownSideEffects() ||
1119 (instr->IsLoadField() &&
1120 instr->AsLoadField()->MayCreateUntaggedAlias()) ||
1121 (instr->IsStoreIndexed() &&
1123 instr->IsStoreStaticField() || instr->IsPhi()) {
1125 }
else if (UseIsARedefinition(use) &&
1126 AnyUseCreatesAlias(instr->Cast<Definition>())) {
1128 }
else if (instr->IsStoreField()) {
1129 StoreFieldInstr*
store = instr->AsStoreField();
1131 if (
store->slot().kind() == Slot::Kind::kTypedDataView_typed_data) {
1139 if (use->use_index() != StoreFieldInstr::kInstancePos) {
1140 ASSERT(use->use_index() == StoreFieldInstr::kValuePos);
1144 store->instance()->definition()->OriginalDefinition();
1146 !
instance->Identity().IsAliased()) {
1147 bool is_load, is_store;
1148 Place store_place(instr, &is_load, &is_store);
1150 if (!HasLoadsFromPlace(
instance, &store_place)) {
1154 if (
instance->Identity().IsUnknown()) {
1163 }
else if (
auto*
const alloc = instr->AsAllocation()) {
1166 if (alloc->Identity().IsAliased()) {
1169 Place input_place(alloc, use->use_index());
1170 if (HasLoadsFromPlace(alloc, &input_place)) {
1173 if (alloc->Identity().IsUnknown()) {
1175 aliasing_worklist_.Add(alloc);
1182 void MarkDefinitionAsAliased(Definition*
d) {
1183 auto*
const defn =
d->OriginalDefinition();
1184 if (defn->Identity().IsNotAliased()) {
1186 identity_rollback_.Add(defn);
1189 aliasing_worklist_.Add(defn);
1194 void MarkStoredValuesEscaping(Definition* defn) {
1196 if (
auto*
const alloc = defn->AsAllocation()) {
1197 for (intptr_t
i = 0;
i < alloc->InputCount();
i++) {
1198 if (
auto*
const slot = alloc->SlotForInput(
i)) {
1199 MarkDefinitionAsAliased(alloc->InputAt(
i)->definition());
1204 for (
Value* use = defn->input_use_list(); use !=
nullptr;
1205 use = use->next_use()) {
1206 auto instr = use->instruction();
1207 if (UseIsARedefinition(use)) {
1208 MarkStoredValuesEscaping(instr->AsDefinition());
1211 if ((use->use_index() == StoreFieldInstr::kInstancePos) &&
1212 instr->IsStoreField()) {
1213 MarkDefinitionAsAliased(instr->AsStoreField()->value()->definition());
1219 void ComputeAliasing(Definition* alloc) {
1221 ASSERT(alloc->Identity().IsUnknown());
1222 ASSERT(aliasing_worklist_.is_empty());
1225 aliasing_worklist_.Add(alloc);
1227 while (!aliasing_worklist_.is_empty()) {
1228 Definition* defn = aliasing_worklist_.RemoveLast();
1233 if (!defn->Identity().IsAliased() && AnyUseCreatesAlias(defn)) {
1235 identity_rollback_.Add(defn);
1240 if (defn->Identity().IsAliased()) {
1241 MarkStoredValuesEscaping(defn);
1248 const bool print_traces_;
1250 PointerSet<Place>* places_map_;
1252 const ZoneGrowableArray<Place*>& places_;
1254 const PhiPlaceMoves* phi_moves_;
1258 GrowableArray<const Place*> aliases_;
1259 PointerSet<const Place> aliases_map_;
1261 SmallSet<Place::ElementSize> typed_data_access_sizes_;
1267 GrowableArray<BitVector*> representatives_;
1270 GrowableArray<BitVector*> killed_;
1274 BitVector* aliased_by_effects_;
1277 GrowableArray<Definition*> aliasing_worklist_;
1284 GrowableArray<Definition*> identity_rollback_;
1288 if (instr->IsStoreIndexed()) {
1289 return instr->AsStoreIndexed()->value()->definition();
1293 if (store_instance_field !=
nullptr) {
1298 if (store_static_field !=
nullptr) {
1317 bool print_traces) {
1322 for (intptr_t
i = 0;
i < places->
length();
i++) {
1323 Place* place = (*places)[
i];
1329 if (FLAG_trace_optimization && print_traces) {
1333 Place input_place(*place);
1334 for (intptr_t j = 0; j < phi->
InputCount(); j++) {
1342 if (FLAG_trace_optimization && print_traces) {
1379 bool has_loads =
false;
1380 bool has_stores =
false;
1386 instr_it.Advance()) {
1388 Place place(instr, &has_loads, &has_stores);
1399 if (FLAG_trace_optimization && graph->
should_print()) {
1426 if (instr->IsDefinition() &&
1427 instr->AsDefinition()->MayCreateUnsafeUntaggedPointer()) {
1430 if (
auto*
load = instr->AsLoadField()) {
1431 if (
load->slot().is_weak()) {
1435 return instr->IsLoadField() || instr->IsLoadIndexed() ||
1436 instr->IsLoadStaticField();
1440 intptr_t loop_header_index,
1444 (*sets)[loop_header_index]->Contains(
GetPlaceId(instr));
1454 if (FLAG_trace_optimization && flow_graph_->
should_print()) {
1455 THR_Print(
"Hoisting instruction %s:%" Pd " from B%" Pd " to B%" Pd "\n",
1461 if (it !=
nullptr) {
1476void LICM::TrySpecializeSmiPhi(PhiInstr* phi,
1478 BlockEntryInstr* pre_header) {
1479 if (phi->Type()->ToCid() == kSmiCid) {
1487 for (intptr_t
i = 0;
i < phi->InputCount(); ++
i) {
1488 Value* input = phi->InputAt(
i);
1489 if (input->Type()->ToCid() != kSmiCid) {
1490 if ((non_smi_input != kNotFound) ||
1501 if ((non_smi_input == kNotFound) ||
1502 (phi->block()->PredecessorAt(non_smi_input) != pre_header)) {
1506 CheckSmiInstr*
check =
nullptr;
1507 for (
Value* use = phi->input_use_list();
1508 (use !=
nullptr) && (
check ==
nullptr); use = use->next_use()) {
1509 check = use->instruction()->AsCheckSmi();
1512 if (
check ==
nullptr) {
1517 Hoist(
nullptr, pre_header,
check);
1520 check->value()->BindTo(phi->InputAt(non_smi_input)->definition());
1521 check->value()->SetReachingType(phi->InputAt(non_smi_input)->Type());
1527 if (flow_graph()->
function().ProhibitsInstructionHoisting() ||
1538 for (intptr_t
i = 0;
i < loop_headers.
length(); ++
i) {
1542 if (pre_header ==
nullptr)
continue;
1551 if (flow_graph()->
function().ProhibitsInstructionHoisting()) {
1566 for (intptr_t
i = 0;
i < loop_headers.
length(); ++
i) {
1571 if (pre_header ==
nullptr) {
1577 bool seen_visible_effect =
false;
1582 loop_it.Advance()) {
1589 seen_visible_effect =
true;
1599 if (
load->AllowsCSE()) {
1600 seen_visible_effect =
true;
1608 bool is_loop_invariant =
false;
1612 is_loop_invariant =
true;
1616 is_loop_invariant =
false;
1625 if (is_loop_invariant) {
1626 Hoist(&it, pre_header, current);
1628 seen_visible_effect =
true;
1640 !block_it.
Done(); block_it.Advance()) {
1644 instr_it.Advance()) {
1646 if (def !=
nullptr && def->IsAllocation() && def->
env() ==
nullptr &&
1649 if (use !=
nullptr && !use->IsPhi() && IsOneTimeUse(use, def)) {
1650 instr_it.RemoveCurrentFromGraph();
1669 Instruction* use = it.
Current()->instruction();
1675 auto is_dominated_by_another_use = [&](Instruction* instr) {
1676 while (instr != def) {
1677 if (uses.
HasKey(instr)) {
1681 if (
auto block = instr->AsBlockEntry()) {
1682 instr = block->dominator()->last_instruction();
1684 instr = instr->previous();
1691 Instruction* dominant_use =
nullptr;
1693 while (
auto use = use_it.Next()) {
1694 bool dominated_by_another_use =
false;
1696 if (
auto phi = (*use)->AsPhi()) {
1699 ASSERT(phi->InputCount() == phi->block()->PredecessorCount());
1700 dominated_by_another_use =
true;
1701 for (intptr_t
i = 0;
i < phi->InputCount();
i++) {
1702 if (phi->InputAt(
i)->definition() == def) {
1703 if (!is_dominated_by_another_use(
1704 phi->block()->PredecessorAt(
i)->last_instruction())) {
1705 dominated_by_another_use =
false;
1714 dominated_by_another_use =
1715 is_dominated_by_another_use((*use)->previous());
1718 if (!dominated_by_another_use) {
1719 if (dominant_use !=
nullptr) {
1724 dominant_use = *use;
1728 return dominant_use;
1731bool DelayAllocations::IsOneTimeUse(Instruction* use, Definition* def) {
1735 BlockEntryInstr* use_block = use->GetBlock();
1736 BlockEntryInstr* def_block = def->GetBlock();
1737 if (use_block == def_block)
return true;
1739 DirectChainedHashMap<IdentitySetKeyValueTrait<BlockEntryInstr*>> seen;
1740 GrowableArray<BlockEntryInstr*>
worklist;
1744 BlockEntryInstr* block =
worklist.RemoveLast();
1745 for (intptr_t
i = 0;
i < block->PredecessorCount();
i++) {
1746 BlockEntryInstr* pred = block->PredecessorAt(
i);
1747 if (pred == use_block)
return false;
1748 if (pred == def_block)
continue;
1749 if (seen.HasKey(pred))
continue;
1761 aliased_set_(aliased_set),
1762 in_(graph_->preorder().
length()),
1763 out_(graph_->preorder().
length()),
1764 gen_(graph_->preorder().
length()),
1765 kill_(graph_->preorder().
length()),
1766 exposed_values_(graph_->preorder().
length()),
1767 out_values_(graph_->preorder().
length()),
1770 congruency_worklist_(6),
1771 in_worklist_(nullptr),
1773 const intptr_t num_blocks = graph_->
preorder().length();
1774 for (intptr_t
i = 0;
i < num_blocks;
i++) {
1780 exposed_values_.Add(
nullptr);
1781 out_values_.Add(
nullptr);
1800 if ((aliased_set !=
nullptr) && !aliased_set->
IsEmpty()) {
1808 return load_optimizer.Optimize();
1817 OptimizeLazyInitialization();
1819 ComputeInitialSets();
1823 MarkLoopInvariantLoads();
1830 bool CallsInitializer(Instruction* instr) {
1831 if (
auto* load_field = instr->AsLoadField()) {
1832 return load_field->calls_initializer();
1833 }
else if (
auto* load_static = instr->AsLoadStaticField()) {
1834 return load_static->calls_initializer();
1839 void ClearCallsInitializer(Instruction* instr) {
1840 if (
auto* load_field = instr->AsLoadField()) {
1841 load_field->set_calls_initializer(
false);
1842 }
else if (
auto* load_static = instr->AsLoadStaticField()) {
1843 load_static->set_calls_initializer(
false);
1849 bool CanForwardLoadTo(Definition*
load, Definition* replacement) {
1852 return !(CallsInitializer(
load) && replacement->Type()->can_be_sentinel());
1857 bool IsSentinelStore(Instruction* instr) {
1859 if (
auto* store_field = instr->AsStoreField()) {
1860 value = store_field->value();
1861 }
else if (
auto* store_static = instr->AsStoreStaticField()) {
1862 value = store_static->value();
1864 return value !=
nullptr &&
value->BindsToConstant() &&
1865 (
value->BoundConstant().ptr() == Object::sentinel().ptr());
1871 void OptimizeLazyInitialization() {
1872 if (!FLAG_optimize_lazy_initializer_calls) {
1879 bool has_lazy_initializer_calls =
false;
1881 !block_it.
Done(); block_it.Advance()) {
1882 BlockEntryInstr* block = block_it.Current();
1883 BitVector*
gen = gen_[block->preorder_number()];
1885 for (ForwardInstructionIterator instr_it(block); !instr_it.Done();
1886 instr_it.Advance()) {
1887 Instruction* instr = instr_it.Current();
1889 bool is_load =
false, is_store =
false;
1890 Place place(instr, &is_load, &is_store);
1892 if (is_store && !IsSentinelStore(instr)) {
1894 }
else if (is_load) {
1896 if (CallsInitializer(instr)) {
1897 if (
gen->Contains(place_id)) {
1898 ClearCallsInitializer(instr);
1900 has_lazy_initializer_calls =
true;
1910 if (phi_moves !=
nullptr) {
1911 for (intptr_t
i = 0, n = phi_moves->length();
i < n; ++
i) {
1912 const intptr_t from = (*phi_moves)[
i].from();
1913 const intptr_t to = (*phi_moves)[
i].to();
1914 if ((from != to) &&
gen->Contains(from)) {
1921 if (has_lazy_initializer_calls) {
1924 BitVector* temp =
new (
Z) BitVector(
Z, aliased_set_->
max_place_id());
1925 bool changed =
true;
1930 !block_it.
Done(); block_it.Advance()) {
1931 BlockEntryInstr* block = block_it.Current();
1932 BitVector* block_in = in_[block->preorder_number()];
1933 BitVector*
gen = gen_[block->preorder_number()];
1937 if (block->IsGraphEntry()) {
1941 ASSERT(block->PredecessorCount() > 0);
1942 for (intptr_t
i = 0, pred_count = block->PredecessorCount();
1943 i < pred_count; ++
i) {
1944 BlockEntryInstr* pred = block->PredecessorAt(
i);
1945 BitVector* pred_out = gen_[pred->preorder_number()];
1946 temp->Intersect(pred_out);
1950 if (!temp->Equals(*block_in)) {
1951 ASSERT(block_in->SubsetOf(*temp));
1952 block_in->AddAll(temp);
1963 !block_it.
Done(); block_it.Advance()) {
1964 BlockEntryInstr* block = block_it.Current();
1965 BitVector* block_in = in_[block->preorder_number()];
1967 for (ForwardInstructionIterator instr_it(block); !instr_it.Done();
1968 instr_it.Advance()) {
1969 Instruction* instr = instr_it.Current();
1970 if (CallsInitializer(instr) &&
1972 ClearCallsInitializer(instr);
1979 for (intptr_t
i = 0, n = graph_->
preorder().length();
i < n; ++
i) {
1988 bool CanForwardStore(StoreIndexedInstr* array_store) {
1989 if (array_store ==
nullptr)
return true;
1991 array_store->class_id());
1995 static bool AlreadyPinnedByRedefinition(Definition* replacement,
1996 Definition* redefinition) {
1997 Definition* defn = replacement;
1998 if (
auto load_field = replacement->AsLoadField()) {
1999 defn = load_field->instance()->definition();
2000 }
else if (
auto load_indexed = replacement->AsLoadIndexed()) {
2001 defn = load_indexed->array()->definition();
2005 while ((unwrapped = defn->RedefinedValue()) !=
nullptr) {
2006 if (defn == redefinition) {
2009 defn = unwrapped->definition();
2015 Definition* ReplaceLoad(Definition*
load, Definition* replacement) {
2020 Definition* redef =
nullptr;
2021 if (
auto load_field =
load->AsLoadField()) {
2022 auto instance = load_field->instance()->definition();
2023 if (
instance->RedefinedValue() !=
nullptr) {
2024 if ((load_field->slot().kind() ==
2025 Slot::Kind::kGrowableObjectArray_data ||
2026 (load_field->slot().IsDartField() &&
2028 .IsInstantiated()))) {
2032 }
else if (
auto load_indexed =
load->AsLoadIndexed()) {
2033 if (load_indexed->class_id() == kArrayCid ||
2034 load_indexed->class_id() == kImmutableArrayCid) {
2035 auto instance = load_indexed->array()->definition();
2036 if (
instance->RedefinedValue() !=
nullptr) {
2041 if (redef !=
nullptr && !AlreadyPinnedByRedefinition(replacement, redef)) {
2045 auto replacement_redefinition =
2046 new (
zone()) RedefinitionInstr(
new (
zone())
Value(replacement));
2047 if (redef->IsDominatedBy(replacement)) {
2048 graph_->
InsertAfter(redef, replacement_redefinition,
nullptr,
2054 replacement = replacement_redefinition;
2057 load->ReplaceUsesWith(replacement);
2067 void ComputeInitialSets() {
2069 !block_it.
Done(); block_it.Advance()) {
2070 BlockEntryInstr* block = block_it.Current();
2071 const intptr_t preorder_number = block->preorder_number();
2073 BitVector* kill = kill_[preorder_number];
2074 BitVector*
gen = gen_[preorder_number];
2076 ZoneGrowableArray<Definition*>* exposed_values =
nullptr;
2077 ZoneGrowableArray<Definition*>* out_values =
nullptr;
2079 for (ForwardInstructionIterator instr_it(block); !instr_it.Done();
2080 instr_it.Advance()) {
2081 Instruction* instr = instr_it.Current();
2083 bool is_load =
false, is_store =
false;
2084 Place place(instr, &is_load, &is_store);
2086 BitVector* killed =
nullptr;
2088 const intptr_t alias_id =
2092 }
else if (!place.IsImmutableField()) {
2116 Place* canonical_place =
nullptr;
2117 if (CanForwardStore(instr->AsStoreIndexed())) {
2119 if (canonical_place !=
nullptr) {
2122 const intptr_t place_id = canonical_place->
id();
2123 if (
gen->Contains(place_id)) {
2124 ASSERT((out_values !=
nullptr) &&
2125 ((*out_values)[place_id] !=
nullptr));
2127 if (FLAG_trace_optimization && graph_->
should_print()) {
2128 THR_Print(
"Removing redundant store to place %" Pd
2129 " in block B%" Pd "\n",
2132 instr_it.RemoveCurrentFromGraph();
2140 if (killed !=
nullptr) {
2141 kill->AddAll(killed);
2145 gen->RemoveAll(killed);
2147 if (canonical_place !=
nullptr) {
2150 gen->Add(canonical_place->id());
2151 if (out_values ==
nullptr) out_values = CreateBlockOutValues();
2155 ASSERT(!instr->IsDefinition() ||
2158 }
else if (is_load) {
2162 if ((canonical !=
nullptr) &&
2163 (canonical->id() !=
GetPlaceId(instr->AsDefinition()))) {
2164 SetPlaceId(instr->AsDefinition(), canonical->id());
2169 if (instr->HasUnknownSideEffects()) {
2177 Definition* defn = instr->AsDefinition();
2178 if (defn ==
nullptr) {
2182 if (
auto*
const alloc = instr->AsAllocation()) {
2183 if (!alloc->ObjectIsInitialized()) {
2188 for (
Value* use = alloc->input_use_list(); use !=
nullptr;
2189 use = use->next_use()) {
2190 if (use->use_index() != 0) {
2195 intptr_t place_id = -1;
2196 Definition* forward_def =
nullptr;
2197 const Slot* slot =
nullptr;
2198 if (
auto*
const load = use->instruction()->AsLoadField()) {
2200 slot = &
load->slot();
2201 }
else if (
auto*
const store = use->instruction()->AsStoreField()) {
2202 ASSERT(!alloc->IsArrayAllocation());
2204 slot = &
store->slot();
2205 }
else if (use->instruction()->IsLoadIndexed() ||
2206 use->instruction()->IsStoreIndexed()) {
2207 if (!alloc->IsArrayAllocation()) {
2212 if (alloc->IsAllocateTypedData()) {
2221 if (aliased_set_->
places()[place_id]->kind() !=
2233 if (slot !=
nullptr) {
2234 ASSERT(forward_def ==
nullptr);
2242 if (aliased_set_->
CanBeAliased(alloc) && slot->IsDartField() &&
2243 slot->is_immutable()) {
2247 const intptr_t
pos = alloc->InputForSlot(*slot);
2249 forward_def = alloc->InputAt(
pos)->definition();
2250 }
else if (!slot->is_tagged()) {
2262 ASSERT(forward_def !=
nullptr);
2264 if (out_values ==
nullptr) out_values = CreateBlockOutValues();
2265 (*out_values)[place_id] = forward_def;
2275 if (
gen->Contains(place_id)) {
2277 ASSERT((out_values !=
nullptr) &&
2278 ((*out_values)[place_id] !=
nullptr));
2280 Definition* replacement = (*out_values)[place_id];
2281 if (CanForwardLoadTo(defn, replacement)) {
2283 if (FLAG_trace_optimization && graph_->
should_print()) {
2285 defn->ssa_temp_index(), replacement->ssa_temp_index());
2288 ReplaceLoad(defn, replacement);
2289 instr_it.RemoveCurrentFromGraph();
2293 }
else if (!kill->Contains(place_id)) {
2297 if (exposed_values ==
nullptr) {
2298 const intptr_t kMaxExposedValuesInitialSize = 5;
2299 exposed_values =
new (
Z) ZoneGrowableArray<Definition*>(
2304 exposed_values->Add(defn);
2309 if (out_values ==
nullptr) out_values = CreateBlockOutValues();
2310 (*out_values)[place_id] = defn;
2313 exposed_values_[preorder_number] = exposed_values;
2314 out_values_[preorder_number] = out_values;
2320 BitVector* forwarded_loads) {
2321 forwarded_loads->Clear();
2323 for (intptr_t
i = 0;
i < phi_moves->length();
i++) {
2324 const intptr_t from = (*phi_moves)[
i].from();
2325 const intptr_t to = (*phi_moves)[
i].to();
2326 if (from == to)
continue;
2328 if (
out->Contains(from)) {
2329 forwarded_loads->Add(to);
2333 for (intptr_t
i = 0;
i < phi_moves->length();
i++) {
2334 const intptr_t from = (*phi_moves)[
i].from();
2335 const intptr_t to = (*phi_moves)[
i].to();
2336 if (from == to)
continue;
2341 out->AddAll(forwarded_loads);
2346 void ComputeOutSets() {
2347 BitVector* temp =
new (
Z) BitVector(
Z, aliased_set_->
max_place_id());
2348 BitVector* forwarded_loads =
2350 BitVector* temp_out =
new (
Z) BitVector(
Z, aliased_set_->
max_place_id());
2352 bool changed =
true;
2357 !block_it.
Done(); block_it.Advance()) {
2358 BlockEntryInstr* block = block_it.Current();
2360 const intptr_t preorder_number = block->preorder_number();
2362 BitVector* block_in = in_[preorder_number];
2363 BitVector* block_out = out_[preorder_number];
2364 BitVector* block_kill = kill_[preorder_number];
2365 BitVector* block_gen = gen_[preorder_number];
2369 if (block->IsGraphEntry()) {
2373 ASSERT(block->PredecessorCount() > 0);
2374 for (intptr_t
i = 0;
i < block->PredecessorCount();
i++) {
2375 BlockEntryInstr* pred = block->PredecessorAt(
i);
2376 BitVector* pred_out = out_[pred->preorder_number()];
2377 if (pred_out ==
nullptr)
continue;
2380 if (phi_moves !=
nullptr) {
2383 temp_out->CopyFrom(pred_out);
2384 PerformPhiMoves(phi_moves, temp_out, forwarded_loads);
2385 pred_out = temp_out;
2387 temp->Intersect(pred_out);
2391 if (!temp->Equals(*block_in) || (block_out ==
nullptr)) {
2393 block_in->CopyFrom(temp);
2395 temp->RemoveAll(block_kill);
2396 temp->AddAll(block_gen);
2398 if ((block_out ==
nullptr) || !block_out->Equals(*temp)) {
2399 if (block_out ==
nullptr) {
2400 block_out = out_[preorder_number] =
2403 block_out->CopyFrom(temp);
2419 void ComputeOutValues() {
2420 GrowableArray<PhiInstr*> pending_phis(5);
2421 ZoneGrowableArray<Definition*>* temp_forwarded_values =
nullptr;
2424 !block_it.
Done(); block_it.Advance()) {
2425 BlockEntryInstr* block = block_it.Current();
2427 const bool can_merge_eagerly = CanMergeEagerly(block);
2429 const intptr_t preorder_number = block->preorder_number();
2431 ZoneGrowableArray<Definition*>* block_out_values =
2432 out_values_[preorder_number];
2436 for (BitVector::Iterator it(out_[preorder_number]); !it.
Done();
2438 const intptr_t place_id = it.
Current();
2440 if (block_out_values ==
nullptr) {
2441 out_values_[preorder_number] = block_out_values =
2442 CreateBlockOutValues();
2445 if ((*block_out_values)[place_id] ==
nullptr) {
2446 ASSERT(block->PredecessorCount() > 0);
2447 Definition* in_value = can_merge_eagerly
2448 ? MergeIncomingValues(block, place_id)
2450 if ((in_value ==
nullptr) &&
2451 (in_[preorder_number]->
Contains(place_id))) {
2452 PhiInstr* phi =
new (
Z)
2453 PhiInstr(block->AsJoinEntry(), block->PredecessorCount());
2455 pending_phis.Add(phi);
2458 (*block_out_values)[place_id] = in_value;
2466 if ((phi_moves !=
nullptr) && (block_out_values !=
nullptr)) {
2467 if (temp_forwarded_values ==
nullptr) {
2468 temp_forwarded_values = CreateBlockOutValues();
2471 for (intptr_t
i = 0;
i < phi_moves->length();
i++) {
2472 const intptr_t from = (*phi_moves)[
i].from();
2473 const intptr_t to = (*phi_moves)[
i].to();
2474 if (from == to)
continue;
2476 (*temp_forwarded_values)[to] = (*block_out_values)[from];
2479 for (intptr_t
i = 0;
i < phi_moves->length();
i++) {
2480 const intptr_t from = (*phi_moves)[
i].from();
2481 const intptr_t to = (*phi_moves)[
i].to();
2482 if (from == to)
continue;
2484 (*block_out_values)[to] = (*temp_forwarded_values)[to];
2488 if (FLAG_trace_load_optimization && graph_->
should_print()) {
2491 aliased_set_->
PrintSet(in_[preorder_number]);
2495 aliased_set_->
PrintSet(kill_[preorder_number]);
2499 aliased_set_->
PrintSet(out_[preorder_number]);
2506 for (intptr_t
i = 0;
i < pending_phis.length();
i++) {
2507 FillPhiInputs(pending_phis[
i]);
2511 bool CanMergeEagerly(BlockEntryInstr* block) {
2512 for (intptr_t
i = 0;
i < block->PredecessorCount();
i++) {
2513 BlockEntryInstr* pred = block->PredecessorAt(
i);
2514 if (pred->postorder_number() < block->postorder_number()) {
2521 void MarkLoopInvariantLoads() {
2522 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers =
2525 ZoneGrowableArray<BitVector*>* invariant_loads =
2526 new (
Z) ZoneGrowableArray<BitVector*>(loop_headers.length());
2528 for (intptr_t
i = 0;
i < loop_headers.length();
i++) {
2529 BlockEntryInstr*
header = loop_headers[
i];
2530 BlockEntryInstr* pre_header =
header->ImmediateDominator();
2531 if (pre_header ==
nullptr) {
2532 invariant_loads->Add(
nullptr);
2536 BitVector* loop_gen =
new (
Z) BitVector(
Z, aliased_set_->
max_place_id());
2537 for (BitVector::Iterator loop_it(
header->loop_info()->blocks());
2538 !loop_it.Done(); loop_it.Advance()) {
2539 const intptr_t preorder_number = loop_it.Current();
2540 loop_gen->AddAll(gen_[preorder_number]);
2543 for (BitVector::Iterator loop_it(
header->loop_info()->blocks());
2544 !loop_it.Done(); loop_it.Advance()) {
2545 const intptr_t preorder_number = loop_it.Current();
2546 loop_gen->RemoveAll(kill_[preorder_number]);
2549 if (FLAG_trace_optimization && graph_->
should_print()) {
2550 for (BitVector::Iterator it(loop_gen); !it.
Done(); it.
Advance()) {
2551 THR_Print(
"place %s is loop invariant for B%" Pd "\n",
2557 invariant_loads->Add(loop_gen);
2566 Definition* MergeIncomingValues(BlockEntryInstr* block, intptr_t place_id) {
2568 static Definition*
const kDifferentValuesMarker =
2569 reinterpret_cast<Definition*
>(-1);
2570 Definition* incoming =
nullptr;
2571 for (intptr_t
i = 0;
i < block->PredecessorCount();
i++) {
2572 BlockEntryInstr* pred = block->PredecessorAt(
i);
2573 ZoneGrowableArray<Definition*>* pred_out_values =
2574 out_values_[pred->preorder_number()];
2575 if ((pred_out_values ==
nullptr) ||
2576 ((*pred_out_values)[place_id] ==
nullptr)) {
2578 }
else if (incoming ==
nullptr) {
2579 incoming = (*pred_out_values)[place_id];
2580 }
else if (incoming != (*pred_out_values)[place_id]) {
2581 incoming = kDifferentValuesMarker;
2585 if (incoming != kDifferentValuesMarker) {
2586 ASSERT(incoming !=
nullptr);
2592 new (
Z) PhiInstr(block->AsJoinEntry(), block->PredecessorCount());
2598 void FillPhiInputs(PhiInstr* phi) {
2599 BlockEntryInstr* block = phi->GetBlock();
2602 for (intptr_t
i = 0;
i < block->PredecessorCount();
i++) {
2603 BlockEntryInstr* pred = block->PredecessorAt(
i);
2604 ZoneGrowableArray<Definition*>* pred_out_values =
2605 out_values_[pred->preorder_number()];
2606 ASSERT((*pred_out_values)[place_id] !=
nullptr);
2613 Definition* replacement = (*pred_out_values)[place_id]->Replacement();
2615 phi->SetInputAt(
i, input);
2616 replacement->AddInputUse(input);
2618 if (replacement->representation() == kUntagged) {
2619 phi->set_representation(kUntagged);
2628 THR_Print(
"created pending phi %s for %s at B%" Pd "\n", phi->ToCString(),
2629 aliased_set_->
places()[place_id]->ToCString(),
2636 void ForwardLoads() {
2638 !block_it.
Done(); block_it.Advance()) {
2639 BlockEntryInstr* block = block_it.Current();
2641 ZoneGrowableArray<Definition*>* loads =
2642 exposed_values_[block->preorder_number()];
2643 if (loads ==
nullptr)
continue;
2645 BitVector* in = in_[block->preorder_number()];
2647 for (intptr_t
i = 0;
i < loads->length();
i++) {
2648 Definition*
load = (*loads)[
i];
2651 Definition* replacement = MergeIncomingValues(block,
GetPlaceId(
load));
2652 ASSERT(replacement !=
nullptr);
2659 replacement = replacement->Replacement();
2661 if ((
load != replacement) && CanForwardLoadTo(
load, replacement)) {
2664 if (FLAG_trace_optimization && graph_->
should_print()) {
2666 load->ssa_temp_index(), replacement->ssa_temp_index());
2669 replacement = ReplaceLoad(
load, replacement);
2670 load->RemoveFromGraph();
2671 load->SetReplacement(replacement);
2684 bool EliminateRedundantPhi(PhiInstr* phi) {
2685 Definition*
value =
nullptr;
2688 if (in_worklist_ ==
nullptr) {
2691 in_worklist_->
Clear();
2695 in_worklist_->
Add(phi->ssa_temp_index());
2697 for (intptr_t
i = 0;
i < worklist_.length();
i++) {
2698 PhiInstr* phi = worklist_[
i];
2700 for (intptr_t
i = 0;
i < phi->InputCount();
i++) {
2701 Definition* input = phi->InputAt(
i)->definition();
2702 if (input == phi)
continue;
2704 PhiInstr* phi_input = input->AsPhi();
2705 if ((phi_input !=
nullptr) && !phi_input->is_alive()) {
2706 if (!in_worklist_->
Contains(phi_input->ssa_temp_index())) {
2707 worklist_.Add(phi_input);
2708 in_worklist_->
Add(phi_input->ssa_temp_index());
2713 if (
value ==
nullptr) {
2715 }
else if (
value != input) {
2724 for (intptr_t
i = 0;
i < worklist_.length();
i++) {
2725 worklist_[
i]->ReplaceUsesWith(
value);
2733 bool CanBeCongruent(Definition*
a, Definition*
b) {
2734 return (
a->tag() ==
b->tag()) &&
2735 ((
a->IsPhi() && (
a->GetBlock() ==
b->GetBlock())) ||
2736 (
a->AllowsCSE() &&
a->AttributesEqual(*
b)));
2744 bool AddPairToCongruencyWorklist(Definition*
a, Definition*
b) {
2745 if (!CanBeCongruent(
a,
b)) {
2751 if (in_worklist_->
Contains(
a->ssa_temp_index())) {
2752 for (intptr_t
i = 0;
i < congruency_worklist_.length();
i += 2) {
2753 if (
a == congruency_worklist_[
i]) {
2754 return (
b == congruency_worklist_[
i + 1]);
2758 }
else if (in_worklist_->
Contains(
b->ssa_temp_index())) {
2759 return AddPairToCongruencyWorklist(
b,
a);
2762 congruency_worklist_.Add(
a);
2763 congruency_worklist_.Add(
b);
2764 in_worklist_->
Add(
a->ssa_temp_index());
2768 bool AreInputsCongruent(Definition*
a, Definition*
b) {
2770 ASSERT(
a->InputCount() ==
b->InputCount());
2771 for (intptr_t j = 0; j <
a->InputCount(); j++) {
2772 Definition* inputA =
a->InputAt(j)->definition();
2773 Definition* inputB =
b->InputAt(j)->definition();
2775 if (inputA != inputB) {
2776 if (!AddPairToCongruencyWorklist(inputA, inputB)) {
2785 static bool Dominates(Instruction*
dom, Instruction* other) {
2786 BlockEntryInstr* dom_block =
dom->GetBlock();
2787 BlockEntryInstr* other_block = other->GetBlock();
2789 if (dom_block == other_block) {
2790 for (Instruction* current =
dom->next(); current !=
nullptr;
2791 current = current->
next()) {
2792 if (current == other) {
2799 return dom_block->Dominates(other_block);
2804 bool ReplacePhiWith(PhiInstr* phi, PhiInstr* replacement) {
2805 ASSERT(phi->InputCount() == replacement->InputCount());
2806 ASSERT(phi->block() == replacement->block());
2808 congruency_worklist_.Clear();
2809 if (in_worklist_ ==
nullptr) {
2812 in_worklist_->
Clear();
2817 if (!AddPairToCongruencyWorklist(phi, replacement)) {
2822 for (intptr_t
i = 0;
i < congruency_worklist_.length();
i += 2) {
2823 if (!AreInputsCongruent(congruency_worklist_[
i],
2824 congruency_worklist_[
i + 1])) {
2832 for (intptr_t
i = 0;
i < congruency_worklist_.length();
i += 2) {
2833 Definition*
a = congruency_worklist_[
i];
2834 Definition*
b = congruency_worklist_[
i + 1];
2842 if (Dominates(
a,
b)) {
2852 THR_Print(
"Replacing %s with congruent %s\n",
a->ToCString(),
2856 a->ReplaceUsesWith(
b);
2861 PhiInstr* phi_a =
a->AsPhi();
2862 if (phi_a->is_alive()) {
2864 phi_a->block()->RemovePhi(phi_a);
2865 phi_a->UnuseAllInputs();
2868 a->RemoveFromGraph();
2878 bool EmitPhi(PhiInstr* phi) {
2879 for (PhiIterator it(phi->block()); !it.
Done(); it.
Advance()) {
2880 if (ReplacePhiWith(phi, it.
Current())) {
2886 phi->block()->InsertPhi(phi);
2895 for (intptr_t
i = 0;
i < phis_.length();
i++) {
2896 PhiInstr* phi = phis_[
i];
2897 if (!phi->HasUses() || EliminateRedundantPhi(phi)) {
2898 phi->UnuseAllInputs();
2905 for (intptr_t
i = 0;
i < phis_.length();
i++) {
2906 PhiInstr* phi = phis_[
i];
2907 if ((phi !=
nullptr) && (!phi->HasUses() || !EmitPhi(phi))) {
2908 phi->UnuseAllInputs();
2913 ZoneGrowableArray<Definition*>* CreateBlockOutValues() {
2914 ZoneGrowableArray<Definition*>*
out =
2915 new (
Z) ZoneGrowableArray<Definition*>(aliased_set_->
max_place_id());
2923 PointerSet<Place>* map_;
2927 AliasedSet* aliased_set_;
2931 GrowableArray<BitVector*> in_;
2932 GrowableArray<BitVector*> out_;
2933 GrowableArray<BitVector*> gen_;
2934 GrowableArray<BitVector*> kill_;
2937 GrowableArray<ZoneGrowableArray<Definition*>*> exposed_values_;
2941 GrowableArray<ZoneGrowableArray<Definition*>*> out_values_;
2946 GrowableArray<PhiInstr*> phis_;
2949 GrowableArray<PhiInstr*> worklist_;
2950 GrowableArray<Definition*> congruency_worklist_;
2951 BitVector* in_worklist_;
2960 bool run_load_optimization ) {
2961 bool changed =
false;
2962 if (FLAG_load_cse && run_load_optimization) {
2967 changed = OptimizeRecursive(graph, graph->
graph_entry(), &
map) || changed;
2972bool DominatorBasedCSE::OptimizeRecursive(
FlowGraph* graph,
2975 bool changed =
false;
2981 if (replacement !=
nullptr) {
2989 map->Insert(current);
2995 if (num_children != 0) {
2998 for (intptr_t
i = 0;
i < num_children; ++
i) {
3000 if (
i < num_children - 1) {
3002 CSEInstructionSet child_map(*
map);
3003 changed = OptimizeRecursive(graph, child, &child_map) || changed;
3006 changed = OptimizeRecursive(graph, child,
map) || changed;
3020 aliased_set_(aliased_set),
3021 exposed_stores_(graph_->postorder().
length()) {
3022 const intptr_t num_blocks = graph_->
postorder().length();
3023 for (intptr_t
i = 0;
i < num_blocks;
i++) {
3024 exposed_stores_.Add(
nullptr);
3039 if ((aliased_set !=
nullptr) && !aliased_set->
IsEmpty()) {
3041 store_optimizer.Optimize();
3048 if (FLAG_trace_load_optimization && graph_->
should_print()) {
3051 EliminateDeadStores();
3054 bool CanEliminateStore(Instruction* instr) {
3062 switch (instr->tag()) {
3063 case Instruction::kStoreField: {
3064 StoreFieldInstr* store_instance = instr->AsStoreField();
3066 return !store_instance->is_initialization();
3068 case Instruction::kStoreIndexed:
3069 case Instruction::kStoreStaticField:
3083 BitVector* all_aliased_places =
nullptr;
3085 all_aliased_places =
3088 const auto& places = aliased_set_->
places();
3089 for (intptr_t
i = 0;
i < places.length();
i++) {
3090 Place* place = places[
i];
3096 if ((all_aliased_places !=
nullptr) &&
3098 instance->Identity().IsAliased())) {
3099 all_aliased_places->
Add(
i);
3108 if (all_aliased_places !=
nullptr) {
3109 all_aliased_places->
Add(
i);
3114 if (index !=
nullptr) {
3124 !block_it.
Done(); block_it.Advance()) {
3136 instr_it.Advance()) {
3139 bool is_load =
false;
3140 bool is_store =
false;
3141 Place place(instr, &is_load, &is_store);
3151 CanEliminateStore(instr)) {
3152 if (FLAG_trace_optimization && graph_->
should_print()) {
3153 THR_Print(
"Removing dead store to place %" Pd " in block B%" Pd
3157 instr_it.RemoveCurrentFromGraph();
3162 if (exposed_stores ==
nullptr) {
3163 const intptr_t kMaxExposedStoresInitialSize = 5;
3168 exposed_stores->
Add(instr);
3176 if (instr->IsThrow() || instr->IsReThrow() || instr->IsReturnBase()) {
3193 live_in->
AddAll(all_aliased_places);
3211 instr->
MayThrow() || instr->IsReturnBase()) {
3228 exposed_stores_[postorder_number] = exposed_stores;
3230 if (FLAG_trace_load_optimization && graph_->
should_print()) {
3236 void EliminateDeadStores() {
3239 !block_it.
Done(); block_it.Advance()) {
3246 exposed_stores_[postorder_number];
3247 if (exposed_stores ==
nullptr)
continue;
3250 for (intptr_t
i = 0;
i < exposed_stores->
length(); ++
i) {
3252 bool is_load =
false;
3253 bool is_store =
false;
3254 Place place(instr, &is_load, &is_store);
3255 ASSERT(!is_load && is_store);
3256 if (place.IsImmutableField()) {
3263 CanEliminateStore(instr)) {
3264 if (FLAG_trace_optimization && graph_->
should_print()) {
3265 THR_Print(
"Removing dead store to place %" Pd " block B%" Pd "\n",
3275 PointerSet<Place>* map_;
3279 AliasedSet* aliased_set_;
3282 GrowableArray<ZoneGrowableArray<Instruction*>*> exposed_stores_;
3288 if (FLAG_dead_store_elimination && FLAG_load_cse) {
3299 const intptr_t kMaxAllocationSinkingNumElements = 32;
3304 return (
length >= 0) && (
length <= kMaxAllocationSinkingNumElements);
3310 return instr->IsAllocation() &&
3311 (!instr->IsArrayAllocation() ||
3337bool AllocationSinking::IsSafeUse(
Value* use, SafeUseCheck check_type) {
3340 if (use->instruction()->IsMaterializeObject()) {
3344 if (
auto*
const alloc = use->instruction()->AsAllocation()) {
3346 ((check_type == kOptimisticCheck) ||
3347 alloc->Identity().IsAllocationSinkingCandidate());
3350 if (
auto*
store = use->instruction()->AsStoreField()) {
3351 if (use ==
store->value()) {
3354 ((check_type == kOptimisticCheck) ||
3355 instance->Identity().IsAllocationSinkingCandidate());
3360 if (
auto*
store = use->instruction()->AsStoreIndexed()) {
3361 if (use ==
store->index()) {
3364 if (use ==
store->array()) {
3365 if (!
store->index()->BindsToSmiConstant()) {
3368 const intptr_t index =
store->index()->BoundSmiConstant();
3369 if (index < 0 || index >= use->definition()
3370 ->AsArrayAllocation()
3371 ->GetConstantNumElements()) {
3374 if (
auto* alloc_typed_data = use->definition()->AsAllocateTypedData()) {
3375 if (
store->class_id() != alloc_typed_data->class_id() ||
3376 !
store->aligned() ||
3378 alloc_typed_data->class_id())) {
3383 if (use ==
store->value()) {
3386 ((check_type == kOptimisticCheck) ||
3387 instance->Identity().IsAllocationSinkingCandidate());
3398bool AllocationSinking::IsAllocationSinkingCandidate(Definition* alloc,
3399 SafeUseCheck check_type) {
3400 for (
Value* use = alloc->input_use_list(); use !=
nullptr;
3401 use = use->next_use()) {
3402 if (!IsSafeUse(use, check_type)) {
3405 THR_Print(
"use of %s at %s is unsafe for allocation sinking\n",
3406 alloc->ToCString(), use->instruction()->ToCString());
3418 if (
auto*
const alloc = use->
instruction()->AsAllocation()) {
3422 return store->instance()->definition();
3425 return store->array()->definition();
3433 if (
auto load = instr->AsLoadField()) {
3434 return load->instance()->definition();
3436 if (
auto load = instr->AsLoadIndexed()) {
3437 return load->array()->definition();
3444void AllocationSinking::EliminateAllocation(Definition* alloc) {
3445 ASSERT(IsAllocationSinkingCandidate(alloc, kStrictCheck));
3447 if (FLAG_trace_optimization && flow_graph_->
should_print()) {
3448 THR_Print(
"removing allocation from the graph: v%" Pd "\n",
3449 alloc->ssa_temp_index());
3458 GrowableArray<Instruction*> stores_to_remove;
3459 for (
Value* use = alloc->input_use_list(); use !=
nullptr;
3460 use = use->next_use()) {
3461 Instruction*
const instr = use->instruction();
3467 ASSERT(!instr->IsAllocation());
3468 stores_to_remove.Add(instr);
3477 for (
auto*
const store : stores_to_remove) {
3479 if (
store->previous() !=
nullptr) {
3480 store->RemoveFromGraph();
3487 for (
Value* use = alloc->env_use_list(); use !=
nullptr;
3488 use = use->next_use()) {
3489 ASSERT(use->instruction()->IsMaterializeObject());
3493 alloc->RemoveFromGraph();
3499void AllocationSinking::CollectCandidates() {
3502 !block_it.
Done(); block_it.Advance()) {
3503 BlockEntryInstr* block = block_it.Current();
3504 for (ForwardInstructionIterator it(block); !it.
Done(); it.
Advance()) {
3505 Instruction* current = it.
Current();
3510 Definition* alloc = current->
Cast<Definition>();
3511 if (IsAllocationSinkingCandidate(alloc, kOptimisticCheck)) {
3513 candidates_.Add(alloc);
3522 for (intptr_t
i = 0;
i < candidates_.length();
i++) {
3523 Definition* alloc = candidates_[
i];
3524 if (alloc->Identity().IsAllocationSinkingCandidate()) {
3525 if (!IsAllocationSinkingCandidate(alloc, kStrictCheck)) {
3535 for (intptr_t
i = 0;
i < candidates_.length();
i++) {
3536 Definition* alloc = candidates_[
i];
3537 if (alloc->Identity().IsAllocationSinkingCandidate()) {
3538 if (FLAG_trace_optimization && flow_graph_->
should_print()) {
3539 THR_Print(
"discovered allocation sinking candidate: v%" Pd "\n",
3540 alloc->ssa_temp_index());
3544 candidates_[j] = alloc;
3549 candidates_.TruncateTo(j);
3555void AllocationSinking::NormalizeMaterializations() {
3556 for (intptr_t
i = 0;
i < candidates_.length();
i++) {
3557 Definition* alloc = candidates_[
i];
3560 for (
Value* use = alloc->input_use_list(); use !=
nullptr; use = next_use) {
3561 next_use = use->next_use();
3562 if (use->instruction()->IsMaterializeObject()) {
3574void AllocationSinking::RemoveUnusedMaterializations() {
3576 for (intptr_t
i = 0;
i < materializations_.length();
i++) {
3577 MaterializeObjectInstr* mat = materializations_[
i];
3578 if ((mat->input_use_list() ==
nullptr) &&
3579 (mat->env_use_list() ==
nullptr)) {
3584 for (intptr_t
i = 0;
i < mat->InputCount();
i++) {
3585 Definition*
load = mat->InputAt(
i)->definition();
3588 load->RemoveFromGraph();
3591 mat->RemoveFromGraph();
3594 materializations_[j] = mat;
3599 materializations_.TruncateTo(j);
3607void AllocationSinking::DiscoverFailedCandidates() {
3612 for (intptr_t
i = 0;
i < candidates_.length();
i++) {
3613 Definition* alloc = candidates_[
i];
3614 if (alloc->Identity().IsAllocationSinkingCandidate()) {
3615 if (!IsAllocationSinkingCandidate(alloc, kStrictCheck)) {
3625 for (intptr_t
i = 0;
i < candidates_.length();
i++) {
3626 Definition* alloc = candidates_[
i];
3627 if (!alloc->Identity().IsAllocationSinkingCandidate()) {
3628 if (FLAG_trace_optimization && flow_graph_->
should_print()) {
3629 THR_Print(
"allocation v%" Pd " can't be eliminated\n",
3630 alloc->ssa_temp_index());
3634 for (
Value* use = alloc->env_use_list(); use !=
nullptr;
3635 use = use->next_use()) {
3636 ASSERT(use->instruction()->IsMaterializeObject());
3644 alloc->set_env_use_list(
nullptr);
3645 for (
Value* use = alloc->input_use_list(); use !=
nullptr;
3646 use = use->next_use()) {
3647 if (use->instruction()->IsLoadField() ||
3648 use->instruction()->IsLoadIndexed()) {
3649 Definition*
load = use->instruction()->AsDefinition();
3651 load->RemoveFromGraph();
3653 ASSERT(use->instruction()->IsMaterializeObject() ||
3654 use->instruction()->IsPhi() ||
3655 use->instruction()->IsStoreField() ||
3656 use->instruction()->IsStoreIndexed());
3661 candidates_[j] = alloc;
3667 if (j != candidates_.length()) {
3669 for (intptr_t
i = 0;
i < materializations_.length();
i++) {
3670 MaterializeObjectInstr* mat = materializations_[
i];
3671 if (!mat->allocation()->Identity().IsAllocationSinkingCandidate()) {
3674 mat->ReplaceUsesWith(mat->allocation());
3675 mat->RemoveFromGraph();
3678 materializations_[k] = mat;
3683 materializations_.TruncateTo(k);
3686 candidates_.TruncateTo(j);
3696 CollectCandidates();
3720 for (intptr_t
i = 0;
i < candidates_.length();
i++) {
3721 InsertMaterializations(candidates_[
i]);
3734 NormalizeMaterializations();
3736 RemoveUnusedMaterializations();
3740 DiscoverFailedCandidates();
3746 for (intptr_t
i = 0;
i < candidates_.length();
i++) {
3747 EliminateAllocation(candidates_[
i]);
3754 for (intptr_t
i = 0;
i < materializations_.length();
i++) {
3756 for (intptr_t j = 0; j < mat->
InputCount(); j++) {
3758 if (defn->IsBox()) {
3769 mat->previous()->LinkTo(mat->next());
3770 mat->set_next(
nullptr);
3771 mat->set_previous(
nullptr);
3786 while (mat->
next()->IsMaterializeObject()) {
3787 mat = mat->
next()->AsMaterializeObject();
3795 while (
exit->previous()->IsMaterializeObject()) {
3806 if (
exit->IsMaterializeObject()) {
3811 mat !=
nullptr; mat = mat->previous()->AsMaterializeObject()) {
3812 if (mat->allocation() == alloc) {
3831 if (alloc->IsAllocateObject() &&
3841void AllocationSinking::CreateMaterializationAt(
3844 const ZoneGrowableArray<const Slot*>& slots) {
3853 for (
auto slot : slots) {
3854 Definition*
load =
nullptr;
3855 if (slot->IsArrayElement()) {
3856 intptr_t array_cid, index;
3857 if (alloc->IsCreateArray()) {
3858 array_cid = kArrayCid;
3861 }
else if (
auto alloc_typed_data = alloc->AsAllocateTypedData()) {
3862 array_cid = alloc_typed_data->class_id();
3863 index = slot->offset_in_bytes() /
3868 load =
new (
Z) LoadIndexedInstr(
3879 LoadFieldInstr(
new (
Z)
Value(alloc), *slot, access, alloc->source());
3885 const Class* cls =
nullptr;
3886 intptr_t length_or_shape = -1;
3887 if (
auto instr = alloc->AsAllocateObject()) {
3888 cls = &(instr->cls());
3889 }
else if (alloc->IsAllocateClosure()) {
3892 }
else if (
auto instr = alloc->AsAllocateContext()) {
3894 length_or_shape = instr->num_context_variables();
3895 }
else if (
auto instr = alloc->AsAllocateUninitializedContext()) {
3897 length_or_shape = instr->num_context_variables();
3898 }
else if (
auto instr = alloc->AsCreateArray()) {
3901 length_or_shape = instr->GetConstantNumElements();
3902 }
else if (
auto instr = alloc->AsAllocateTypedData()) {
3905 length_or_shape = instr->GetConstantNumElements();
3906 }
else if (
auto instr = alloc->AsAllocateRecord()) {
3909 length_or_shape = instr->shape().AsInt();
3910 }
else if (
auto instr = alloc->AsAllocateSmallRecord()) {
3913 length_or_shape = instr->shape().AsInt();
3917 MaterializeObjectInstr* mat =
new (
Z) MaterializeObjectInstr(
3918 alloc->AsAllocation(), *cls, length_or_shape, slots, std::move(
values));
3926 exit->ReplaceInEnvironment(alloc, mat);
3932 val->set_instruction(mat);
3933 alloc->AddEnvUse(val);
3936 materializations_.Add(mat);
3941template <
typename T>
3944 for (intptr_t
i = 0;
i < list->
length();
i++) {
3945 if ((*list)[
i] ==
value) {
3957void AllocationSinking::ExitsCollector::Collect(Definition* alloc) {
3958 for (
Value* use = alloc->env_use_list(); use !=
nullptr;
3959 use = use->next_use()) {
3960 if (use->instruction()->IsMaterializeObject()) {
3962 use->instruction()->AsMaterializeObject()));
3972 for (
Value* use = alloc->input_use_list(); use !=
nullptr;
3973 use = use->next_use()) {
3975 if ((obj !=
nullptr) && (obj != alloc)) {
3981void AllocationSinking::ExitsCollector::CollectTransitively(Definition* alloc) {
3982 exits_.TruncateTo(0);
3983 worklist_.TruncateTo(0);
3985 worklist_.Add(alloc);
3992 for (intptr_t
i = 0;
i < worklist_.length();
i++) {
3993 Collect(worklist_[
i]);
3998 if (!slot.
IsIdentical(Slot::PointerBase_data()))
return false;
4000 if (!alloc->IsAllocateObject())
return false;
4001 auto const cid = alloc->AsAllocateObject()->cls().id();
4005void AllocationSinking::InsertMaterializations(Definition* alloc) {
4007 auto slots =
new (
Z) ZoneGrowableArray<const Slot*>(5);
4009 for (
Value* use = alloc->input_use_list(); use !=
nullptr;
4010 use = use->next_use()) {
4013 ASSERT(!use->instruction()->AsAllocation());
4014 if (
auto store = use->instruction()->AsStoreField()) {
4020 }
else if (
auto store = use->instruction()->AsStoreIndexed()) {
4021 const intptr_t index =
store->index()->BoundSmiConstant();
4023 if (alloc->IsCreateArray()) {
4025 }
else if (alloc->IsAllocateTypedData()) {
4036 if (
auto*
const allocation = alloc->AsAllocation()) {
4037 for (intptr_t
pos = 0;
pos < allocation->InputCount();
pos++) {
4038 if (
auto*
const slot = allocation->SlotForInput(
pos)) {
4042 if (!slot->IsImmutableLengthSlot()) {
4050 exits_collector_.CollectTransitively(alloc);
4053 for (intptr_t
i = 0;
i < exits_collector_.exits().
length();
i++) {
4054 CreateMaterializationAt(exits_collector_.exits()[
i], alloc, *slots);
4074 : flow_graph_(flow_graph),
4077 worklist_(flow_graph, 10) {}
4094 void OptimizeDeadParameters() {
4097 NumberCatchEntryParameters();
4098 ComputeIncomingValues();
4099 CollectAliveParametersOrPhis();
4100 PropagateLivenessToInputs();
4101 EliminateDeadParameters();
4104 static intptr_t GetParameterId(
const Instruction* instr) {
4105 return instr->GetPassSpecificId(CompilerPass::kTryCatchOptimization);
4108 static void SetParameterId(Instruction* instr, intptr_t
id) {
4109 instr->SetPassSpecificId(CompilerPass::kTryCatchOptimization,
id);
4112 static bool HasParameterId(Instruction* instr) {
4113 return instr->HasPassSpecificId(CompilerPass::kTryCatchOptimization);
4118 void NumberCatchEntryParameters() {
4120 const GrowableArray<Definition*>& idefs =
4121 *catch_entry->initial_definitions();
4122 for (
auto idef : idefs) {
4123 if (idef->IsParameter()) {
4124 SetParameterId(idef, parameter_info_.
length());
4125 parameter_info_.
Add(
new ParameterInfo(idef->AsParameter()));
4129 catch_by_index_.EnsureLength(catch_entry->catch_try_index() + 1,
nullptr);
4130 catch_by_index_[catch_entry->catch_try_index()] = catch_entry;
4137 void ComputeIncomingValues() {
4143 ASSERT(block->try_index() < catch_by_index_.length());
4144 auto catch_entry = catch_by_index_[block->try_index()];
4145 const auto& idefs = *catch_entry->initial_definitions();
4147 for (ForwardInstructionIterator instr_it(block); !instr_it.Done();
4148 instr_it.Advance()) {
4149 Instruction* current = instr_it.Current();
4150 if (!current->MayThrow())
continue;
4152 Environment*
env = current->env()->Outermost();
4155 for (intptr_t env_idx = 0; env_idx < idefs.length(); ++env_idx) {
4156 if (ParameterInstr* param = idefs[env_idx]->AsParameter()) {
4157 Definition* defn =
env->ValueAt(env_idx)->definition();
4162 for (
auto other_defn :
4163 parameter_info_[GetParameterId(param)]->incoming) {
4164 if (other_defn == defn) {
4170 parameter_info_[GetParameterId(param)]->incoming.
Add(defn);
4183 void CollectAliveParametersOrPhis() {
4185 if (JoinEntryInstr*
join = block->AsJoinEntry()) {
4186 if (
join->phis() ==
nullptr)
continue;
4188 for (
auto phi : *
join->phis()) {
4190 if (HasActualUse(phi)) {
4197 for (
auto info : parameter_info_) {
4198 if (HasActualUse(
info->instr)) {
4199 MarkLive(
info->instr);
4206 void PropagateLivenessToInputs() {
4207 while (!worklist_.
IsEmpty()) {
4209 if (ParameterInstr* param = defn->AsParameter()) {
4210 auto s = parameter_info_[GetParameterId(param)];
4211 for (
auto input :
s->incoming) {
4214 }
else if (PhiInstr* phi = defn->AsPhi()) {
4215 for (intptr_t
i = 0;
i < phi->InputCount();
i++) {
4216 MarkLive(phi->InputAt(
i)->definition());
4224 void MarkLive(Definition* defn) {
4225 if (PhiInstr* phi = defn->AsPhi()) {
4226 if (!phi->is_alive()) {
4230 }
else if (ParameterInstr* param = defn->AsParameter()) {
4231 if (HasParameterId(param)) {
4232 auto input_s = parameter_info_[GetParameterId(param)];
4233 if (!input_s->alive) {
4234 input_s->alive =
true;
4235 worklist_.
Add(param);
4238 }
else if (UnboxInstr* unbox = defn->AsUnbox()) {
4239 MarkLive(unbox->value()->definition());
4245 void EliminateDeadParameters() {
4246 for (
auto info : parameter_info_) {
4253 if (block->try_index() == -1)
continue;
4255 auto catch_entry = catch_by_index_[block->try_index()];
4256 const auto& idefs = *catch_entry->initial_definitions();
4258 for (ForwardInstructionIterator instr_it(block); !instr_it.Done();
4259 instr_it.Advance()) {
4260 Instruction* current = instr_it.Current();
4261 if (!current->MayThrow())
continue;
4263 Environment*
env = current->env()->Outermost();
4266 for (intptr_t env_idx = 0; env_idx < idefs.length(); ++env_idx) {
4267 if (ParameterInstr* param = idefs[env_idx]->AsParameter()) {
4268 if (!parameter_info_[GetParameterId(param)]->alive) {
4269 env->ValueAt(env_idx)->BindToEnvironment(
4282 static bool HasActualUse(Definition* defn) {
4283 for (
Value* use = defn->input_use_list(); use !=
nullptr;
4284 use = use->next_use()) {
4285 Instruction* use_instruction = use->instruction();
4286 if (UnboxInstr* unbox = use_instruction->AsUnbox()) {
4287 if (HasActualUse(unbox)) {
4290 }
else if (!use_instruction->IsPhi()) {
4297 struct ParameterInfo :
public ZoneAllocated {
4298 explicit ParameterInfo(ParameterInstr* instr) : instr(instr) {}
4300 ParameterInstr* instr;
4302 GrowableArray<Definition*> incoming;
4305 FlowGraph*
const flow_graph_;
4312 GrowableArray<ParameterInfo*> parameter_info_;
4315 GrowableArray<CatchBlockEntryInstr*> catch_by_index_;
4319 DefinitionWorklist worklist_;
4334 OptimizeDeadParameters();
4344 for (
auto catch_entry : catch_entries) {
4358 cdefs[flow_graph_->
EnvIndex(catch_entry->raw_exception_var())] =
nullptr;
4359 cdefs[flow_graph_->
EnvIndex(catch_entry->raw_stacktrace_var())] =
nullptr;
4362 !block_it.
Done(); block_it.Advance()) {
4364 if (block->
try_index() == catch_entry->catch_try_index()) {
4366 instr_it.Advance()) {
4371 for (intptr_t env_idx = 0; env_idx < cdefs.
length(); ++env_idx) {
4372 if (cdefs[env_idx] !=
nullptr && !cdefs[env_idx]->
IsConstant() &&
4373 env->ValueAt(env_idx)->BindsToConstant()) {
4376 cdefs[env_idx] =
env->ValueAt(env_idx)->definition();
4378 if (cdefs[env_idx] !=
env->ValueAt(env_idx)->definition()) {
4380 cdefs[env_idx] =
nullptr;
4387 for (intptr_t j = 0; j < idefs->
length(); ++j) {
4388 if (cdefs[j] !=
nullptr && cdefs[j]->
IsConstant()) {
4408 if (!it.
Current()->instruction()->IsPhi())
return true;
4418 if (
join !=
nullptr) {
4438 if ((used_phi !=
nullptr) && !used_phi->
is_alive()) {
4440 live_phis.
Add(used_phi);
4450 for (
auto block : flow_graph->
postorder()) {
4452 if (
join->phis_ ==
nullptr)
continue;
4455 intptr_t to_index = 0;
4456 for (intptr_t
i = 0;
i <
join->phis_->length(); ++
i) {
4458 if (phi !=
nullptr) {
4462 (*
join->phis_)[
i] =
nullptr;
4463 if (FLAG_trace_optimization && flow_graph->
should_print()) {
4467 (*
join->phis_)[to_index++] = phi;
4471 if (to_index == 0) {
4472 join->phis_ =
nullptr;
4474 join->phis_->TruncateTo(to_index);
4486 !block_it.
Done(); block_it.Advance()) {
4490 ASSERT(!current->IsMoveArgument());
4495 if (
Definition* def = current->AsDefinition()) {
4496 if (def->HasSSATemp()) {
4497 live.
Add(def->ssa_temp_index());
4507 for (intptr_t
i = 0, n = current->
InputCount();
i < n; ++
i) {
4523 if (current->
env() !=
nullptr) {
4526 Definition* input = it.CurrentValue()->definition();
4527 ASSERT(!input->IsMoveArgument());
4538 !block_it.
Done(); block_it.Advance()) {
4553 ASSERT(!current->IsMoveArgument());
4555 if (
Definition* def = current->AsDefinition()) {
4556 if (def->HasSSATemp() && live.
Contains(def->ssa_temp_index())) {
4570 Symbols::vm_unsafe_no_interrupts(),
4576 if (should_remove_all) {
4579 if (it.
Current()->IsCheckStackOverflow()) {
4593 if (first_stack_overflow_instr ==
nullptr) {
4594 first_stack_overflow_instr = instr;
4600 if (current->IsBranch()) {
4601 current = current->AsBranch()->comparison();
4610 if (first_stack_overflow_instr !=
nullptr) {
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
#define check(reporter, ref, unref, make, kill)
static void encode(uint8_t output[16], const uint32_t input[4])
static void copy(void *dst, const uint8_t *src, int width, int bpp, int deltaSrc, int offset, const SkPMColor ctable[])
static sk_sp< GrTextureProxy > wrapped(skiatest::Reporter *reporter, GrRecordingContext *rContext, GrProxyProvider *proxyProvider, SkBackingFit fit)
#define RELEASE_ASSERT(cond)
static AliasIdentity NotAliased()
static AliasIdentity Aliased()
bool IsNotAliased() const
static AliasIdentity Unknown()
static AliasIdentity AllocationSinkingCandidate()
void RollbackAliasedIdentities()
void PrintSet(BitVector *set)
bool CanBeAliased(Definition *alloc)
intptr_t max_place_id() const
AliasedSet(Zone *zone, PointerSet< Place > *places_map, ZoneGrowableArray< Place * > *places, PhiPlaceMoves *phi_moves, bool print_traces)
BitVector * aliased_by_effects() const
intptr_t LookupAliasId(const Place &alias)
BitVector * GetKilledSet(intptr_t alias)
const PhiPlaceMoves * phi_moves() const
Place * LookupCanonical(Place *place) const
const ZoneGrowableArray< Place * > & places() const
virtual const Slot * SlotForInput(intptr_t pos)
MaterializeObjectInstr * MaterializationFor(Definition *alloc, Instruction *exit)
void DetachMaterializations()
intptr_t GetConstantNumElements() const
bool HasConstantNumElements() const
KeyValueTrait::Value LookupValue(typename KeyValueTrait::Key key) const
void Insert(typename KeyValueTrait::Pair kv)
bool HasKey(typename KeyValueTrait::Key key) const
Iterator GetIterator() const
bool Contains(const T &other, bool isEqual(const T &, const T &)=nullptr) const
void AddArray(const BaseGrowableArray< T, B, Allocator > &src)
bool Contains(intptr_t i) const
bool AddAll(const BitVector *from)
void CopyFrom(const BitVector *other)
intptr_t try_index() const
intptr_t postorder_number() const
intptr_t preorder_number() const
intptr_t block_id() const
const GrowableArray< BlockEntryInstr * > & dominated_blocks()
bool Dominates(BlockEntryInstr *other) const
virtual BlockEntryInstr * PredecessorAt(intptr_t index) const =0
Instruction * last_instruction() const
CSEInstructionSet(const CSEInstructionSet &other)
void Insert(Instruction *instr)
Instruction * Lookup(Instruction *other) const
static void EliminateStackOverflow(FlowGraph *graph)
ClassPtr At(intptr_t cid) const
static CompileType FromCid(intptr_t cid)
static CompilerState & Current()
const Object & value() const
static void EliminateDeadPhis(FlowGraph *graph)
static void RemoveDeadAndRedundantPhisFromTheGraph(FlowGraph *graph)
static void EliminateDeadCode(FlowGraph *graph)
static void Optimize(FlowGraph *graph)
void Add(Definition *defn)
Definition * RemoveLast()
Value * env_use_list() const
virtual AliasIdentity Identity() const
Value * input_use_list() const
void ReplaceUsesWith(Definition *other)
virtual Definition * AsDefinition()
Definition * OriginalDefinition()
intptr_t ssa_temp_index() const
static void Optimize(FlowGraph *graph)
static constexpr intptr_t kNone
static bool Optimize(FlowGraph *graph, bool run_load_optimization=true)
void MarkAsLazyDeoptToBeforeDeoptId()
Environment * Outermost()
FieldPtr Original() const
const GrowableArray< BlockEntryInstr * > & reverse_postorder() const
GraphEntryInstr * graph_entry() const
ConstantInstr * GetConstant(const Object &object, Representation representation=kTagged)
IsolateGroup * isolate_group() const
bool should_print() const
void EnsureSSATempIndex(Definition *defn, Definition *replacement)
intptr_t current_ssa_temp_index() const
void ReplaceCurrentInstruction(ForwardInstructionIterator *iterator, Instruction *current, Instruction *replacement)
const GrowableArray< BlockEntryInstr * > & preorder() const
void set_loop_invariant_loads(ZoneGrowableArray< BitVector * > *loop_invariant_loads)
BlockIterator postorder_iterator() const
const GrowableArray< BlockEntryInstr * > & postorder() const
ConstantInstr * constant_null() const
const LoopHierarchy & GetLoopHierarchy()
const Function & function() const
bool is_huge_method() const
bool is_licm_allowed() const
ZoneGrowableArray< BitVector * > * loop_invariant_loads() const
void AllocateSSAIndex(Definition *def)
BlockIterator reverse_postorder_iterator() const
void InsertBefore(Instruction *next, Instruction *instr, Environment *env, UseKind use_kind)
void InsertAfter(Instruction *prev, Instruction *instr, Environment *env, UseKind use_kind)
intptr_t EnvIndex(const LocalVariable *variable) const
Instruction * Current() const
void RemoveCurrentFromGraph()
const GrowableArray< CatchBlockEntryInstr * > & catch_entries() const
void InsertBefore(Instruction *next)
Instruction * next() const
virtual intptr_t InputCount() const =0
intptr_t GetDeoptId() const
virtual bool MayThrow() const =0
virtual bool HasUnknownSideEffects() const =0
virtual Value * InputAt(intptr_t i) const =0
virtual BlockEntryInstr * GetBlock()
virtual void CopyDeoptIdFrom(const Instruction &instr)
Environment * env() const
virtual bool CanEliminate(const BlockEntryInstr *block) const
virtual Representation RequiredInputRepresentation(intptr_t idx) const
bool HasPassSpecificId(CompilerPass::Id pass) const
virtual bool MayHaveVisibleEffect() const
virtual intptr_t ArgumentCount() const
Definition * ArgumentAt(intptr_t index) const
virtual Representation representation() const
bool CanDeoptimize() const
intptr_t GetPassSpecificId(CompilerPass::Id pass) const
virtual bool AllowsCSE() const
virtual Tag tag() const =0
Instruction * RemoveFromGraph(bool return_previous=true)
bool HasMoveArguments() const
void SetPassSpecificId(CompilerPass::Id pass, intptr_t id)
virtual const char * DebugName() const =0
Instruction * previous() const
ObjectStore * object_store() const
ClassTable * class_table() const
void OptimisticallySpecializeSmiPhis()
LICM(FlowGraph *flow_graph)
static bool FindPragma(Thread *T, bool only_core, const Object &object, const String &pragma_name, bool multiple=false, Object *options=nullptr)
GrowableArray< BitVector * > kill_
GrowableArray< BitVector * > live_out_
GrowableArray< BitVector * > live_in_
const Slot & slot() const
virtual Representation representation() const
intptr_t class_id() const
intptr_t index_scale() const
static bool OptimizeGraph(FlowGraph *graph)
LoadOptimizer(FlowGraph *graph, AliasedSet *aliased_set)
const ZoneGrowableArray< BlockEntryInstr * > & headers() const
void ComputeInduction() const
bool IsAlwaysTaken(BlockEntryInstr *block) const
BitVector * blocks() const
static ClassPtr context_class()
virtual const char * ToCString() const
static Object & ZoneHandle()
virtual BlockEntryInstr * GetBlock()
Move(intptr_t from, intptr_t to)
void CreateOutgoingMove(Zone *zone, BlockEntryInstr *block, intptr_t from, intptr_t to)
const ZoneGrowableArray< Move > * MovesList
MovesList GetOutgoingMoves(BlockEntryInstr *block) const
static Place * CreateAnyInstanceAnyIndexAlias(Zone *zone, intptr_t id)
static Place * Wrap(Zone *zone, const Place &place, intptr_t id)
static bool IsTypedDataViewAllocation(Definition *defn)
intptr_t index_constant() const
bool Equals(const Place &other) const
static const char * DefinitionName(Definition *def)
Place CopyWithoutIndex() const
Place ToSmallerElement(ElementSize to, intptr_t index) const
ElementSize element_size() const
const Field & static_field() const
bool IsImmutableField() const
Place(const Place &other)
Representation representation() const
Place(AllocationInstr *alloc, intptr_t input_pos)
Definition * instance() const
Place(Instruction *instr, bool *is_load, bool *is_store)
const char * ToCString() const
void set_instance(Definition *def)
const Field * static_field_
static bool IsAllocation(Definition *defn)
bool DependsOnInstance() const
const Slot * instance_field_
Place CopyWithoutInstance() const
Place ToLargerElement(ElementSize to) const
Definition * index() const
const Slot & instance_field() const
static const Slot & GetArrayElementSlot(Thread *thread, intptr_t offset_in_bytes)
bool IsIdentical(const Slot &other) const
bool is_immutable() const
Representation representation() const
bool may_contain_inner_pointer() const
bool Contains(T value) const
static SmiPtr New(intptr_t value)
intptr_t class_id() const
intptr_t index_scale() const
virtual void ComputeInitialSets()
StoreOptimizer(FlowGraph *graph, AliasedSet *aliased_set, PointerSet< Place > *map)
static void OptimizeGraph(FlowGraph *graph)
static Thread * Current()
TryCatchAnalyzer(FlowGraph *flow_graph, bool is_aot)
static constexpr uintptr_t RoundUpToPowerOfTwo(uintptr_t x)
static constexpr int ShiftForPowerOfTwo(T x)
static T Minimum(T x, T y)
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Instruction * instruction() const
Definition * definition() const
void BindTo(Definition *definition)
intptr_t InputCount() const
Value * InputAt(intptr_t i) const
ZonePlace(const Place &place)
char * PrintToString(const char *format,...) PRINTF_ATTRIBUTE(2
static intptr_t index_at_offset(intptr_t offset_in_bytes)
static word element_offset(intptr_t index)
static word ElementSizeFor(intptr_t cid)
#define THR_Print(format,...)
VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE auto & d
FlutterSemanticsFlag flags
constexpr bool FLAG_support_il_printer
Dart_NativeFunction function
bool Contains(const Container &container, const Value &value)
bool IsTypedDataViewClassId(intptr_t index)
bool IsTypedDataClassId(intptr_t index)
void OptimizeCatchEntryStates(FlowGraph *flow_graph, bool is_aot)
static Definition * GetStoredValue(Instruction *instr)
static bool IsMarkedWithNoInterrupts(const Function &function)
bool IsTypedDataBaseClassId(intptr_t index)
void AddInstruction(GrowableArray< T * > *list, T *value)
static bool IsSupportedAllocation(Instruction *instr)
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
static bool IsLoopInvariantLoad(ZoneGrowableArray< BitVector * > *sets, intptr_t loop_header_index, Instruction *instr)
static Instruction * ExitForMaterialization(MaterializeObjectInstr *mat)
bool IsUnmodifiableTypedDataViewClassId(intptr_t index)
GrowableArray< Value * > InputsArray
static bool IsValidLengthForAllocationSinking(ArrayAllocationInstr *array_alloc)
static AliasedSet * NumberPlaces(FlowGraph *graph, PointerSet< Place > *map, CSEMode mode)
static DART_FORCE_INLINE intptr_t GetPlaceId(const Instruction *instr)
static Definition * StoreDestination(Value *use)
static InnerPointerAccess AccessForSlotInAllocatedObject(Definition *alloc, const Slot &slot)
bool IsExternalPayloadClassId(classid_t cid)
static bool IsDataFieldOfTypedDataView(Definition *alloc, const Slot &slot)
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
constexpr intptr_t kBitsPerInt32
static PhiPlaceMoves * ComputePhiMoves(PointerSet< Place > *map, ZoneGrowableArray< Place * > *places, bool print_traces)
uint32_t FinalizeHash(uint32_t hash, intptr_t hashbits=kBitsPerInt32)
static DART_FORCE_INLINE void SetPlaceId(Instruction *instr, intptr_t id)
static uint32_t Hash(uint32_t key)
static bool IsLoadEliminationCandidate(Instruction *instr)
static intptr_t RepresentationBits(Representation r)
constexpr int32_t kMaxInt32
static bool IsConstant(Definition *def, int64_t *val)
static bool HasRealUse(Definition *def)
static bool IsPhiDependentPlace(Place *place)
static constexpr intptr_t kInvalidTryIndex
static void AddSlot(ZoneGrowableArray< const Slot * > *slots, const Slot &slot)
static Instruction * FirstMaterializationAt(Instruction *exit)
static DART_FORCE_INLINE bool HasPlaceId(const Instruction *instr)
static Definition * LoadSource(Definition *instr)
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not set
SI auto map(std::index_sequence< I... >, Fn &&fn, const Args &... args) -> skvx::Vec< sizeof...(I), decltype(fn(args[0]...))>
static DecodeResult decode(std::string path)
static const char header[]
static SkString join(const CommandLineFlags::StringArray &)
static constexpr size_t ValueSize(Representation rep)
static constexpr bool IsUnboxedInteger(Representation rep)
static constexpr bool IsUnboxed(Representation rep)
static const char * ToCString(Representation rep)
static Representation RepresentationOfArrayElement(classid_t cid)