56 "Propagate IC data from unoptimized to optimized IC calls.");
60 "Generate special IC stubs for two args Smi operations");
69 bool include_abstract)
70 : array_handles_(zone),
73 include_abstract_(include_abstract) {}
81 cids_->
Add(klass.
id());
88 if (!array->IsNull()) {
89 for (intptr_t
i = 0;
i < array->Length(); ++
i) {
90 *subclass_or_implementor ^= (*array).At(
i);
95 if (!array->IsNull()) {
96 for (intptr_t
i = 0;
i < array->Length(); ++
i) {
97 *subclass_or_implementor ^= (*array).At(
i);
107 const bool include_abstract_;
112 bool include_abstract,
115 const intptr_t cid_count =
table->NumCids();
116 std::unique_ptr<CidRangeVector[]>* cid_ranges =
nullptr;
117 if (include_abstract) {
118 cid_ranges = exclude_null ? &cid_subtype_ranges_abstract_nonnullable_
119 : &cid_subtype_ranges_abstract_nullable_;
121 cid_ranges = exclude_null ? &cid_subtype_ranges_nonnullable_
122 : &cid_subtype_ranges_nullable_;
124 if (*cid_ranges ==
nullptr) {
128 if (ranges.
length() == 0) {
129 BuildRangesFor(
table, &ranges, klass, include_abstract, exclude_null);
139 bool include_abstract,
143 supertype_(
AbstractType::Handle(zone(), cls.RareType())),
144 include_abstract_(include_abstract),
145 exclude_null_(exclude_null),
146 to_check_(
Class::Handle(zone())),
151 if (
cid == kTypeArgumentsCid)
return true;
155 if (!exclude_null_ &&
cid ==
kNullCid)
return true;
156 to_check_ = table_->
At(
cid);
158 if (!include_abstract_ && to_check_.
is_abstract())
return true;
165 to_check_ = table_->
At(
cid);
174 Zone* zone()
const {
return thread_->
zone(); }
176 Thread*
const thread_;
177 ClassTable*
const table_;
178 const AbstractType& supertype_;
179 const bool include_abstract_;
180 const bool exclude_null_;
182 AbstractType& subtype_;
188void HierarchyInfo::BuildRangesUsingClassTableFor(ClassTable*
table,
191 bool include_abstract,
193 CidCheckerForRanges checker(
thread(),
table, klass, include_abstract,
196 const intptr_t cid_count =
table->NumCids();
199 for (intptr_t
cid = kInstanceCid;
cid < cid_count; ++
cid) {
202 if (checker.MayInclude(
cid))
continue;
203 if (checker.MustInclude(
cid)) {
207 }
else if (start != -1) {
221void HierarchyInfo::BuildRangesFor(ClassTable*
table,
223 const Class& dst_klass,
224 bool include_abstract,
228 if (dst_klass.InVMIsolateHeap() || dst_klass.id() == kInstanceCid) {
229 BuildRangesUsingClassTableFor(
table, ranges, dst_klass, include_abstract,
235 GrowableArray<intptr_t> cids;
236 SubtypeFinder finder(zone, &cids, include_abstract);
238 SafepointReadRwLocker ml(
thread(),
240 finder.ScanImplementorClasses(dst_klass);
242 if (cids.is_empty())
return;
245 intptr_t* cids_array = cids.data();
247 qsort(cids_array, cids.length(),
sizeof(intptr_t),
248 [](
const void*
a,
const void*
b) {
249 return static_cast<int>(*static_cast<const intptr_t*>(a) -
250 *static_cast<const intptr_t*>(b));
254 CidCheckerForRanges checker(
thread(),
table, dst_klass, include_abstract,
256 intptr_t left_cid = -1;
257 intptr_t right_cid = -1;
258 intptr_t previous_cid = -1;
259 for (intptr_t
i = 0;
i < cids.length(); ++
i) {
260 const intptr_t current_cid = cids[
i];
261 if (current_cid == previous_cid)
continue;
266 if (left_cid != -1) {
267 ASSERT(previous_cid != -1);
269 for (intptr_t j = previous_cid + 1; j < current_cid; ++j) {
271 if (!checker.MayInclude(j)) {
272 ranges->Add({left_cid, right_cid});
273 left_cid = right_cid = -1;
278 previous_cid = current_cid;
280 if (checker.MayInclude(current_cid))
continue;
281 if (checker.MustInclude(current_cid)) {
282 if (left_cid == -1) {
284 left_cid = current_cid;
286 right_cid = current_cid;
287 }
else if (left_cid != -1) {
289 ranges->Add({left_cid, right_cid});
290 left_cid = right_cid = -1;
296 if (left_cid != -1) {
297 ranges->Add(CidRange{left_cid, right_cid});
304 if (!
type.IsInstantiated() || !
type.IsType()) {
316 if (
type.IsFutureOrType()) {
347 if (!
type.IsType() ||
type.IsDartFunctionType()) {
359 if (
type.IsFutureOrType()) {
385 for (intptr_t
i = 0;
i < num_type_parameters; ++
i) {
397 if (!
type.IsRecordType()) {
413 intptr_t* lower_limit,
414 intptr_t* upper_limit) {
416 if (
type.IsNullable()) {
423 const Class& type_class =
429 if (ranges.
length() == 1) {
442#define FOR_EACH_NON_INT_BOXED_REPRESENTATION(M) \
445 M(Float32x4, Float32x4) \
446 M(Float64x2, Float64x2) \
449#define BOXING_IN_SET_CASE(unboxed, boxed) \
450 case kUnboxed##unboxed: \
452#define BOXING_VALUE_OFFSET_CASE(unboxed, boxed) \
453 case kUnboxed##unboxed: \
454 return compiler::target::boxed::value_offset();
455#define BOXING_CID_CASE(unboxed, boxed) \
456 case kUnboxed##unboxed: \
457 return k##boxed##Cid;
509#undef BOXING_CID_CASE
510#undef BOXING_VALUE_OFFSET_CASE
511#undef BOXING_IN_SET_CASE
512#undef FOR_EACH_NON_INT_BOXED_REPRESENTATION
526 if (constant_value_ ==
nullptr) {
529 return *constant_value_;
573 if (def->IsConstraint() || def->IsBox() || def->IsUnbox() ||
574 def->IsIntConverter() || def->IsFloatToDouble() ||
575 def->IsDoubleToFloat()) {
580 if (orig == def)
return def;
586 if (def !=
nullptr) {
589 return load->IsImmutableLengthLoad();
598 bool is_static_call) {
620 if (
tag() != other.
tag())
return false;
639 ASSERT((*a)->IsSingleCid());
640 ASSERT((*b)->IsSingleCid());
641 return (*a)->cid_start - (*b)->cid_start;
648 if (target_info_b->
count != target_info_a->
count) {
649 return (target_info_b->
count - target_info_a->
count);
651 return (*a)->
cid_start - (*b)->cid_start;
699 int argument_number) {
701 for (intptr_t
i = 0;
i < binary_feedback.feedback_.
length();
i++) {
702 ASSERT((argument_number == 0) || (argument_number == 1));
703 const intptr_t
cid = argument_number == 0
704 ? binary_feedback.feedback_[
i].first
705 : binary_feedback.feedback_[
i].second;
709 if (cids->
length() != 0) {
745void CallTargets::CreateHelper(Zone* zone,
const ICData& ic_data) {
748 const intptr_t num_args_tested = ic_data.NumArgsTested();
750 for (
int i = 0, n = ic_data.NumberOfChecks();
i < n;
i++) {
751 if (ic_data.GetCountAt(
i) == 0) {
756 if (num_args_tested == 0) {
757 }
else if (num_args_tested == 1) {
758 ic_data.GetOneClassCheckAt(
i, &
id, &dummy);
760 ASSERT(num_args_tested == 2);
761 GrowableArray<intptr_t> arg_ids;
762 ic_data.GetCheckAt(
i, &arg_ids, &dummy);
766 intptr_t
count = ic_data.GetCountAt(
i);
768 ic_data.GetExactnessAt(
i)));
771 if (ic_data.is_megamorphic()) {
772 ASSERT(num_args_tested == 1);
774 const Array& descriptor =
781 SafepointMutexLocker ml(thread->isolate_group()->type_feedback_mutex());
783 for (intptr_t
i = 0, n = entries.Length();
i < n;
i++) {
785 Smi::Value(entries[
i].Get<MegamorphicCache::kClassIdIndex>());
791 const intptr_t filled_entry_count =
cache.filled_entry_count();
792 ASSERT(filled_entry_count > 0);
802 if (
length() != 1)
return false;
818#define KIND_CASE(name) \
830#define KIND_CASE(name) \
831 if (strcmp(str, #name) == 0) { \
832 *out = Kind::k##name; \
846 is_bit_test_(IsCompactCidRange(cids)),
847 token_pos_(
source.token_pos) {
849 const intptr_t number_of_checks =
cids.
length();
850 ASSERT(number_of_checks > 0);
853 ASSERT(number_of_checks != 1 || !
cids[0].IsSingleCid() ||
854 cids[0].cid_start != kSmiCid);
858 auto const other_check = other.AsCheckClass();
859 ASSERT(other_check !=
nullptr);
864 if (!
cids().IsMonomorphic()) {
878 if (!
cids().IsMonomorphic()) {
886 const intptr_t number_of_checks =
cids.
length();
889 if (number_of_checks <= 2)
return false;
905 const uintptr_t one = 1;
906 intptr_t
min = cids_.ComputeLowestCid();
908 for (intptr_t
i = 0;
i < cids_.length(); ++
i) {
910 uintptr_t range = one + cids_[
i].Extent();
914 run = (one << range) - 1;
916 mask |=
run << (cids_[
i].cid_start -
min);
927 intptr_t num_context_variables,
930 num_context_variables_(num_context_variables) {
936 if (!
HasUses())
return nullptr;
941 auto store = use->instruction()->AsStoreField();
942 if ((
store ==
nullptr) ||
943 (use->use_index() != StoreFieldInstr::kInstancePos)) {
955 if (!
HasUses())
return nullptr;
962 const intptr_t kNumTemps = 0;
979 auto object_store =
compiler->isolate_group()->object_store();
983 stub = object_store->allocate_closure_ta_generic_stub();
985 stub = object_store->allocate_closure_ta_stub();
989 stub = object_store->allocate_closure_generic_stub();
991 stub = object_store->allocate_closure_stub();
994 compiler->GenerateStubCall(
source(), stub, UntaggedPcDescriptors::kOther,
1000 const intptr_t kNumInputs = 1;
1001 const intptr_t kNumTemps = 0;
1002 LocationSummary*
locs =
new (zone)
1014 compiler->GenerateStubCall(
source(), stub, UntaggedPcDescriptors::kOther,
1019 intptr_t index)
const {
1021 return slot_.has_untagged_instance() ? kUntagged : kTagged;
1033 if (is_initialization_ && !
slot().has_untagged_instance() &&
1035 (!
slot().IsContextSlot() ||
1036 !
instance()->definition()->IsAllocateUninitializedContext()) &&
1037 value()->BindsToConstantNull()) {
1041 if (
slot().kind() == Slot::Kind::kPointerBase_data &&
1053 return field().
ptr() == other.AsGuardFieldClass()->field().ptr();
1057 return field().
ptr() == other.AsGuardFieldLength()->field().ptr();
1061 return field().
ptr() == other.AsGuardFieldType()->field().ptr();
1071 const auto& constant_instantiator_type_args =
1073 ? TypeArguments::null_type_arguments()
1074 : TypeArguments::Cast(
1076 const auto& constant_function_type_args =
1078 ? TypeArguments::null_type_arguments()
1081 Z, AbstractType::Cast(
sub_type()->BoundConstant()).ptr());
1083 Z, AbstractType::Cast(
super_type()->BoundConstant()).ptr());
1086 &constant_sub_type, &constant_super_type,
1087 constant_instantiator_type_args, constant_function_type_args)) {
1095 auto const other_op = other.AsStrictCompare();
1096 ASSERT(other_op !=
nullptr);
1102 return handle_surrogates_ ? kCaseInsensitiveCompareUTF16RuntimeEntry
1103 : kCaseInsensitiveCompareUCS2RuntimeEntry;
1107 auto const other_op = other.AsMathMinMax();
1108 ASSERT(other_op !=
nullptr);
1109 return (
op_kind() == other_op->op_kind()) &&
1115 auto const other_op = other.AsBinaryIntegerOp();
1116 return (
op_kind() == other_op->op_kind()) &&
1122 auto const other_load = other.AsLoadField();
1123 ASSERT(other_load !=
nullptr);
1124 return &this->slot_ == &other_load->slot_;
1129 return field().
ptr() == other.AsLoadStaticField()->field().ptr();
1165 auto const other_constant = other.AsConstant();
1166 ASSERT(other_constant !=
nullptr);
1167 return (
value().ptr() == other_constant->value().
ptr() &&
1174 representation_(representation),
1175 constant_address_(0) {
1176 if (representation_ == kUnboxedDouble) {
1188 if (
auto constant =
definition()->OriginalDefinition()->AsConstant()) {
1189 *constant_defn = constant;
1198 return (constant !=
nullptr) && constant->
value().
IsNull();
1204 ASSERT(constant !=
nullptr);
1205 return constant->
value();
1230 parsed_function_(parsed_function),
1232 indirect_entries_(),
1235 spill_slot_count_(0),
1236 fixed_slot_count_(0) {}
1242 if (defn !=
nullptr && defn->
value().
IsNull())
return defn;
1251 for (intptr_t
i = 0;
i < catch_entries_.length(); ++
i) {
1252 if (catch_entries_[
i]->catch_try_index() == index)
return catch_entries_[
i];
1263#define DEFINE_ACCEPT(ShortName, Attrs) \
1264 void ShortName##Instr::Accept(InstructionVisitor* visitor) { \
1265 visitor->Visit##ShortName(this); \
1273 intptr_t use_index = 0;
1275 Value* use = it.CurrentValue();
1284 it.CurrentValue()->RemoveFromUseList();
1292 Value* use = it.CurrentValue();
1314 ASSERT(next_instr !=
nullptr);
1315 ASSERT(!next_instr->IsBlockEntry());
1316 prev_instr->
LinkTo(next_instr);
1322 return return_previous ? prev_instr : next_instr;
1326 ASSERT(previous_ ==
nullptr);
1327 ASSERT(next_ ==
nullptr);
1329 next_ =
prev->next_;
1330 next_->previous_ =
this;
1331 previous_->next_ =
this;
1345 for (intptr_t
i =
tail->InputCount() - 1;
i >= 0; --
i) {
1356 while ((
result !=
nullptr) && !
result->IsBlockEntry()) {
1364 return (
result !=
nullptr) ?
result->AsBlockEntry() :
nullptr;
1378 for (intptr_t
i = 0;
i < block_order_->length(); ++
i) {
1393 if (
value->Type()->IsNull() ||
1394 (
value->Type()->ToNullableCid() == kSmiCid) ||
1395 (
value->Type()->ToNullableCid() == kBoolCid)) {
1405 if (
value->BindsToConstant()) {
1406 if (FLAG_precompiled_mode) {
1416 value =
value->definition()->RedefinedValue();
1417 }
while (
value !=
nullptr);
1425 intptr_t pred_id = predecessor->
block_id();
1453 value->set_previous_use(
nullptr);
1462 if (
next !=
nullptr)
next->set_previous_use(
nullptr);
1465 if (
next !=
nullptr)
next->set_previous_use(
nullptr);
1484 if (it.Current()->instruction() !=
target)
return false;
1494 ASSERT(other !=
nullptr);
1497 Value* current =
nullptr;
1499 if (
next !=
nullptr) {
1501 while (
next !=
nullptr) {
1511 if (
next !=
nullptr)
next->set_previous_use(current);
1519 if (
next !=
nullptr) {
1520 while (
next !=
nullptr) {
1528 if (
next !=
nullptr)
next->set_previous_use(current);
1539 it.CurrentValue()->RemoveFromUseList();
1548 ASSERT(move_arguments !=
nullptr);
1550 ASSERT((arg_count + after_args_input_count) <=
env()->Length());
1551 const intptr_t env_base =
1552 env()->
Length() - arg_count - after_args_input_count;
1553 for (intptr_t
i = 0;
i < arg_count; ++
i) {
1563 call->env()->DeepCopyAfterTo(
1564 flow_graph->
zone(),
this,
call->ArgumentCount(),
1589 if (block == dom_block) {
1599 curr = curr->next()) {
1600 if (curr ==
this)
return true;
1613 if (input_representation != kNoRepresentation &&
1623#define INSTR_ATTRS(type, attrs) InstrAttrs::attrs,
1636 for (intptr_t
i = replacement->
InputCount() - 1;
i >= 0; --
i) {
1649 if ((iterator !=
nullptr) && (
this == iterator->
Current())) {
1651 replacement->
LinkTo(
this);
1673 for (intptr_t
i = new_comparison->
InputCount() - 1;
i >= 0; --
i) {
1680 ASSERT(new_comparison->
env() ==
nullptr);
1684 comparison_ = new_comparison;
1695 return (
i >= 0) && (
i < preorder->
length()) && ((*preorder)[
i] == block);
1711 ASSERT(predecessor !=
nullptr);
1723 intptr_t parent_number =
1725 parent->
Add(parent_number);
1729 preorder->
Add(
this);
1741 last = it.Current();
1744 if (last->IsGoto()) last->AsGoto()->set_block(
this);
1759 const intptr_t osr_id = graph_entry->
osr_id();
1773 instr = it.Current();
1776 ASSERT(instr->IsCheckStackOverflow());
1787 graph_entry, normal_entry->
block_id(), normal_entry->try_index(),
1790 auto goto_join =
new GotoInstr(AsJoinEntry(),
1792 ASSERT(parent !=
nullptr);
1793 goto_join->CopyDeoptIdFrom(*parent);
1794 osr_entry->LinkTo(goto_join);
1818 ASSERT(other !=
nullptr);
1820 while (current !=
nullptr && current !=
this) {
1823 return current ==
this;
1835 return loop_info_ !=
nullptr && loop_info_->
header() ==
this;
1839 return loop_info_ ==
nullptr ? 0 : loop_info_->
NestingDepth();
1856 target->predecessor_ = new_block;
1863 intptr_t old_index =
join->IndexOfPredecessor(
this);
1864 intptr_t pred_count =
join->PredecessorCount();
1866 ASSERT(old_index < pred_count);
1868 intptr_t new_id = new_block->
block_id();
1869 intptr_t new_index = old_index;
1872 for (; new_index < pred_count - 1; ++new_index) {
1873 if (
join->predecessors_[new_index + 1]->block_id() > new_id)
break;
1874 join->predecessors_[new_index] =
join->predecessors_[new_index + 1];
1878 for (; new_index > 0; --new_index) {
1879 if (
join->predecessors_[new_index - 1]->block_id() < new_id)
break;
1880 join->predecessors_[new_index] =
join->predecessors_[new_index - 1];
1883 join->predecessors_[new_index] = new_block;
1885 if ((
join->phis() ==
nullptr) || (old_index == new_index))
return;
1894 intptr_t
step = (old_index < new_index) ? 1 : -1;
1895 for (intptr_t use_idx = old_index; use_idx != new_index;
1907 if (
join !=
nullptr) {
1909 it.Current()->UnuseAllInputs();
1914 it.Current()->UnuseAllInputs();
1923 if (phis_ ==
nullptr) {
1925 for (intptr_t
i = 0;
i < var_count;
i++) {
1926 phis_->Add(
nullptr);
1929 ASSERT((*phis_)[var_index] ==
nullptr);
1935 if (phis_ ==
nullptr) {
1942 ASSERT(phis_ !=
nullptr);
1943 for (intptr_t index = 0; index < phis_->length(); ++index) {
1944 if (phi == (*phis_)[index]) {
1945 (*phis_)[index] = phis_->Last();
1946 phis_->RemoveLast();
1953 if (phis_ ==
nullptr)
return;
1955 intptr_t to_index = 0;
1956 for (intptr_t from_index = 0; from_index < phis_->length(); ++from_index) {
1957 PhiInstr* phi = (*phis_)[from_index];
1958 if (phi !=
nullptr) {
1960 (*phis_)[to_index++] = phi;
1970 if (to_index == 0) {
1973 phis_->TruncateTo(to_index);
1991 (
osr_entry() ==
nullptr ? 0 : 1) + catch_entries_.length();
1996 if (index == 0)
return normal_entry_;
2007 return catch_entries_[index];
2015 if (index == 0)
return true_successor_;
2016 if (index == 1)
return false_successor_;
2050 const intptr_t rep_bitsize =
2052 if (
value()->
Type()->ToCid() == kSmiCid &&
2063 case Token::kBIT_AND:
2064 case Token::kBIT_OR:
2065 case Token::kBIT_XOR:
2088 case Token::kBIT_AND:
2089 case Token::kBIT_OR:
2090 case Token::kBIT_XOR:
2103 case Token::kTRUNCDIV:
2117 if (
right()->BindsToConstant()) {
2119 if (!constant.IsInteger())
return false;
2120 return Integer::Cast(constant).AsInt64Value() != 0;
2126 if (!
right()->BindsToConstant())
return false;
2128 if (!constant.IsSmi())
return false;
2129 const intptr_t int_value = Smi::Cast(constant).Value();
2139 case kUnboxedUint32:
2150 return static_cast<int64_t
>(
static_cast<uint64_t
>(-1) >>
2167 if (left_value == 1) {
2168 if (right->definition()->representation() != kUnboxedDouble) {
2175 return right->definition();
2187 if (!
HasUses())
return nullptr;
2188 if (
value()->definition()->IsFloatToDouble()) {
2190 return value()->
definition()->AsFloatToDouble()->value()->definition();
2192 if (
value()->BindsToConstant()) {
2193 double narrowed_val =
2194 static_cast<float>(Double::Cast(
value()->BoundConstant()).value());
2202 if (!
HasUses())
return nullptr;
2203 if (
value()->BindsToConstant()) {
2210 if (!
HasUses())
return nullptr;
2224 if ((
op_kind() == Token::kMUL) &&
2225 (
left()->definition() ==
right()->definition())) {
2237 return HasUses() ? this :
nullptr;
2246 case Token::kBIT_AND:
2248 case Token::kBIT_OR:
2250 case Token::kBIT_XOR:
2270 case kUnboxedUint32:
2281 if (op ==
nullptr) {
2301 Range* right_range =
nullptr;
2304 case Token::kTRUNCDIV:
2311 right_range =
new Range();
2312 const_def->InferRange(
nullptr, right_range);
2328 case kUnboxedUint32:
2378 if (op ==
nullptr) {
2399 auto*
const replacement =
2401 if (replacement !=
this) {
2415 auto*
const replacement =
2417 if (replacement !=
this) {
2426 if (
left()->BindsToConstant() &&
right()->BindsToConstant()) {
2436 if (
left()->BindsToConstant() && !
right()->BindsToConstant() &&
2454 case Token::kBIT_AND:
2455 case Token::kBIT_OR:
2456 case Token::kBIT_XOR:
2475 }
else if (rhs == 0) {
2488 if (shift !=
nullptr) {
2491 if (
auto shift_with_range = shift->AsShiftIntegerOp()) {
2492 shift_with_range->set_shift_range(
2513 case Token::kBIT_AND:
2520 case Token::kBIT_OR:
2527 case Token::kBIT_XOR:
2534 if (bit_not !=
nullptr) {
2547 case Token::kTRUNCDIV:
2550 }
else if (rhs == -1) {
2554 if (negation !=
nullptr) {
2562 if ((rhs == -1) || (rhs == 1)) {
2564 Object::smi_zero());
2571 Object::smi_zero());
2577 }
else if (rhs < 0) {
2590 Object::smi_zero());
2601 Object::smi_zero());
2602 }
else if ((rhs < 0) || ((rhs >= result_bits) && !
is_truncating())) {
2616 Object::smi_zero());
2675 case kImmutableArrayCid:
2676 case kTypeArgumentsCid:
2684 auto kind =
function.recognized_kind();
2686 case MethodRecognizer::kTypedData_ByteDataView_factory:
2687 case MethodRecognizer::kTypedData_Int8ArrayView_factory:
2688 case MethodRecognizer::kTypedData_Uint8ArrayView_factory:
2689 case MethodRecognizer::kTypedData_Uint8ClampedArrayView_factory:
2690 case MethodRecognizer::kTypedData_Int16ArrayView_factory:
2691 case MethodRecognizer::kTypedData_Uint16ArrayView_factory:
2692 case MethodRecognizer::kTypedData_Int32ArrayView_factory:
2693 case MethodRecognizer::kTypedData_Uint32ArrayView_factory:
2694 case MethodRecognizer::kTypedData_Int64ArrayView_factory:
2695 case MethodRecognizer::kTypedData_Uint64ArrayView_factory:
2696 case MethodRecognizer::kTypedData_Float32ArrayView_factory:
2697 case MethodRecognizer::kTypedData_Float64ArrayView_factory:
2698 case MethodRecognizer::kTypedData_Float32x4ArrayView_factory:
2699 case MethodRecognizer::kTypedData_Int32x4ArrayView_factory:
2700 case MethodRecognizer::kTypedData_Float64x2ArrayView_factory:
2709 auto kind =
function.recognized_kind();
2711 case MethodRecognizer::kTypedData_UnmodifiableByteDataView_factory:
2712 case MethodRecognizer::kTypedData_UnmodifiableInt8ArrayView_factory:
2713 case MethodRecognizer::kTypedData_UnmodifiableUint8ArrayView_factory:
2714 case MethodRecognizer::kTypedData_UnmodifiableUint8ClampedArrayView_factory:
2715 case MethodRecognizer::kTypedData_UnmodifiableInt16ArrayView_factory:
2716 case MethodRecognizer::kTypedData_UnmodifiableUint16ArrayView_factory:
2717 case MethodRecognizer::kTypedData_UnmodifiableInt32ArrayView_factory:
2718 case MethodRecognizer::kTypedData_UnmodifiableUint32ArrayView_factory:
2719 case MethodRecognizer::kTypedData_UnmodifiableInt64ArrayView_factory:
2720 case MethodRecognizer::kTypedData_UnmodifiableUint64ArrayView_factory:
2721 case MethodRecognizer::kTypedData_UnmodifiableFloat32ArrayView_factory:
2722 case MethodRecognizer::kTypedData_UnmodifiableFloat64ArrayView_factory:
2723 case MethodRecognizer::kTypedData_UnmodifiableFloat32x4ArrayView_factory:
2724 case MethodRecognizer::kTypedData_UnmodifiableInt32x4ArrayView_factory:
2725 case MethodRecognizer::kTypedData_UnmodifiableFloat64x2ArrayView_factory:
2733 return HasUses() ? this :
nullptr;
2739 switch (field.
kind()) {
2743 case Slot::Kind::kArgumentsDescriptor_type_args_len:
2751 case Slot::Kind::kArgumentsDescriptor_count:
2759 case Slot::Kind::kArgumentsDescriptor_positional_count:
2767 case Slot::Kind::kArgumentsDescriptor_size:
2780 case Slot::Kind::kTypeArguments_length:
2787 case Slot::Kind::kRecord_shape:
2841 if (
slot().IsIdentical(Slot::PointerBase_data())) {
2856 if (
slot().IsIdentical(Slot::PointerBase_data())) {
2878 if (
call->is_known_list_constructor() &&
2880 return call->ArgumentAt(1);
2881 }
else if (
call->function().recognized_kind() ==
2882 MethodRecognizer::kByteDataFactory) {
2885 return call->ArgumentAt(1);
2895 return call->ArgumentAt(3);
2897 }
else if (
LoadFieldInstr* load_array = orig_instance->AsLoadField()) {
2900 const Slot&
slot = load_array->slot();
2910 switch (
slot().kind()) {
2911 case Slot::Kind::kArray_length:
2913 return create_array->num_elements()->definition();
2916 case Slot::Kind::kTypedDataBase_length:
2918 orig_instance->AsAllocateTypedData()) {
2919 return alloc_typed_data->num_elements()->definition();
2922 case Slot::Kind::kTypedDataView_typed_data:
2929 return call->ArgumentAt(1);
2933 case Slot::Kind::kTypedDataView_offset_in_bytes:
2939 return call->ArgumentAt(2);
2940 }
else if (
call->function().recognized_kind() ==
2941 MethodRecognizer::kByteDataFactory) {
2944 return flow_graph->
GetConstant(Object::smi_zero());
2948 case Slot::Kind::kRecord_shape:
2950 if (
auto* alloc_rec = orig_instance->AsAllocateRecord()) {
2952 }
else if (
auto* alloc_rec = orig_instance->AsAllocateSmallRecord()) {
2956 if (
type->IsRecordType()) {
2958 Smi::Handle(RecordType::Cast(*type).shape().AsSmi()));
2965 if (
call->is_known_list_constructor()) {
2966 return call->ArgumentAt(0);
2971 switch (
call->function().recognized_kind()) {
2972 case MethodRecognizer::kByteDataFactory:
2973 case MethodRecognizer::kLinkedHashBase_getData:
2974 case MethodRecognizer::kImmutableLinkedHashBase_getData:
2980 orig_instance->AsCreateArray()) {
2981 return create_array->type_arguments()->definition();
2982 }
else if (
LoadFieldInstr* load_array = orig_instance->AsLoadField()) {
2983 const Slot&
slot = load_array->slot();
2992 .GetInstanceTypeArguments(flow_graph->
thread())));
2997 case Slot::Kind::kLinkedHashBase_data:
3005 case Slot::Kind::kPointerBase_data:
3020 if (
instance()->BindsToConstant()) {
3031 for (
auto use :
instance()->definition()->input_uses()) {
3032 if (
auto store = use->instruction()->AsStoreField()) {
3033 if ((use->use_index() == StoreFieldInstr::kInstancePos) &&
3035 if (initializing_store ==
nullptr) {
3036 initializing_store =
store;
3038 initializing_store =
nullptr;
3047 if (initializing_store !=
nullptr &&
3058 if (FLAG_eliminate_type_checks) {
3059 if (
value()->
Type()->ToCid() == kBoolCid) {
3076 if (!
dst_type()->BindsToConstant())
return this;
3077 const auto& abs_type = AbstractType::Cast(
dst_type()->BoundConstant());
3079 if (abs_type.IsTopTypeForSubtyping() ||
3080 (FLAG_eliminate_type_checks &&
3081 value()->
Type()->IsAssignableTo(abs_type))) {
3084 if (abs_type.IsInstantiated()) {
3104 ? &TypeArguments::null_type_arguments()
3105 : &TypeArguments::Cast(val);
3110 function_type_args =
3112 ? &TypeArguments::null_type_arguments()
3124 if (instantiator_type_args ==
nullptr) {
3127 if (load_type_args->slot().IsTypeArguments()) {
3132 if (load_field->slot().IsDartField() &&
3135 .static_type_exactness_state()
3136 .IsHasExactSuperClass()) {
3139 Z, load_field->slot().field().type()))
3140 .GetInstanceTypeArguments(thread));
3147 if ((instantiator_type_args !=
nullptr) && (function_type_args !=
nullptr)) {
3149 Z, abs_type.InstantiateFrom(*instantiator_type_args,
3151 if (new_dst_type.
IsNull()) {
3165 (FLAG_eliminate_type_checks &&
3166 value()->
Type()->IsAssignableTo(new_dst_type))) {
3174 return HasUses() ? this :
nullptr;
3179 const intptr_t kNumInputs = 0;
3180 const intptr_t kNumTemps = 0;
3191 ASSERT(!coverage_array_.IsNull());
3192 return coverage_array_.At(coverage_index_) !=
Smi::New(0) ? nullptr :
this;
3210 if ((unbox_defn !=
nullptr) &&
3220 if (
value()->BindsToConstant()) {
3222 case kUnboxedFloat64x2:
3225 case kUnboxedFloat32x4:
3228 case kUnboxedInt32x4:
3240 return HasUses() ? this : NULL;
3275 if (replacement !=
this) {
3280 if (
auto unbox =
value()->definition()->AsUnboxInt64()) {
3282 return unbox->value()->definition();
3284 }
else if (
auto unbox =
value()->definition()->AsUnboxedConstant()) {
3289 if (
auto conv =
value()->definition()->AsIntConverter()) {
3291 if (conv->from() == kUntagged) {
3294 switch (conv->from()) {
3296 replacement =
new BoxInt32Instr(conv->value()->CopyWithType());
3298 case kUnboxedUint32:
3322 if (box_defn !=
nullptr) {
3347 if (val.IsInteger()) {
3351 return flow_graph->
GetConstant(double_val, kUnboxedDouble);
3352 }
else if (val.IsDouble()) {
3353 return flow_graph->
GetConstant(val, kUnboxedDouble);
3359 if (val.IsInteger()) {
3360 double narrowed_val =
3361 static_cast<float>(Integer::Cast(val).AsDoubleValue());
3365 }
else if (val.IsDouble()) {
3366 double narrowed_val =
static_cast<float>(Double::Cast(val).value());
3424 if (
value()->BindsToConstant()) {
3426 if (obj.IsInteger()) {
3430 const int64_t intval = Integer::Cast(obj).AsInt64Value();
3448 if (!
HasUses())
return nullptr;
3451 if (
auto constant =
value()->definition()->AsConstant()) {
3452 if (
from() != kUntagged &&
to() != kUntagged &&
3453 constant->representation() ==
from() && constant->value().IsInteger()) {
3454 const int64_t
value = Integer::Cast(constant->value()).AsInt64Value();
3459 box ^= box.Canonicalize(flow_graph->
thread());
3467 if ((first_converter !=
nullptr) &&
3472 if (intermediate_rep == kUntagged) {
3475 }
else if (!
Range::Fits(src_defn->range(), intermediate_rep)) {
3481 if (first_converter->
from() ==
to()) {
3488 if ((first_converter->
from() == kUntagged) || (
to() == kUntagged)) {
3504 if (unbox_defn !=
nullptr && (
from() == kUnboxedInt64) &&
3523 if (comp->IsRelationalOp()) {
3548 if (
type->IsNone()) {
3555 unwrapped_type.
IsObjectType() || unwrapped_type.IsTypeParameter() ||
3568 if (
compare->needs_number_check()) {
3571 compare->set_needs_number_check(
false);
3574 compare->set_needs_number_check(
false);
3579 Value* other =
nullptr;
3581 if (!
compare->IsComparisonWithConstant(&other, &constant_defn)) {
3586 const bool can_merge = is_branch || (other->
Type()->
ToCid() == kBoolCid);
3590 if (!constant.IsBool() || !can_merge) {
3594 const bool constant_value = Bool::Cast(constant).value();
3598 if ((kind == Token::kEQ_STRICT) == constant_value) {
3604 if (
auto comp = other_defn->AsComparison()) {
3621 if (!right->BindsToConstant() || !right->BoundConstant().IsSmi()) {
3625 const intptr_t
value = Smi::Cast(right->BoundConstant()).Value();
3630 auto mask_op = left->definition()->AsBinaryIntegerOp();
3631 if ((mask_op ==
nullptr) || (mask_op->op_kind() != Token::kBIT_AND) ||
3632 !mask_op->HasOnlyUse(left)) {
3657 bool negated =
false;
3659 comparison()->AsStrictCompare(), &negated,
true);
3684 if (FLAG_trace_optimization && flow_graph->
should_print()) {
3699 comparison()->operation_cid() == kMintCid)) {
3704 bool negate =
false;
3712 if (bit_and !=
nullptr) {
3713 if (FLAG_trace_optimization && flow_graph->
should_print()) {
3735 if (!
HasUses())
return nullptr;
3737 bool negated =
false;
3740 if (negated && replacement->IsComparison()) {
3741 ASSERT(replacement !=
this);
3742 replacement->AsComparison()->NegateComparison();
3754 ASSERT(operation_cid() == kMintCid);
3762 (kind() == Token::kEQ) ? Token::kEQ_STRICT : Token::kNE_STRICT,
3763 left()->CopyWithType(), right()->CopyWithType(),
3779 (left()->
Type()->IsNullableSmi() || right()->
Type()->IsNullableSmi()) &&
3783 (kind() == Token::kEQ) ? Token::kEQ_STRICT : Token::kNE_STRICT,
3784 left()->CopyWithType(), right()->CopyWithType(),
3810 if (!
HasUses())
return nullptr;
3821 if (
value()->BindsToConstant()) {
3823 if (constant_value.IsSmi() &&
3824 cids_.
Contains(Smi::Cast(constant_value).Value())) {
3837 ASSERT((kind == Token::kIS) || (kind == Token::kISNOT));
3839 set_operation_cid(kObjectCid);
3858 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
3859 for (intptr_t
i = 0;
i <
data.length();
i += 2) {
3861 return (
data[
i + 1] == true_result)
3869 return (
data[
data.length() - 1] == true_result)
3887 value_representation_(value_representation) {
3889 ASSERT(value_representation == kTagged ||
3892 set_operation_cid(kObjectCid);
3896 if (
value()->BindsToSmiConstant()) {
3898 bool in_range = lower_ <= val && val <= upper_;
3899 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
3901 Bool::Get(in_range == (kind() == Token::kIS)));
3905 if (
range !=
nullptr) {
3919 }
else if (
lower > upper_ ||
upper < lower_) {
3938 if (
field().guarded_cid() ==
cid) {
3946 if (!
field().needs_length_check()) {
3957 if (
call ==
nullptr) {
3962 if (
call->is_known_list_constructor() &&
3965 }
else if (
call->function().recognized_kind() ==
3966 MethodRecognizer::kByteDataFactory) {
3972 Smi::Cast(
length->value()).Value() == expected_length) {
3985 return (
value()->
Type()->ToCid() == kSmiCid) ? nullptr :
this;
3989 if ((
left()->
Type()->ToCid() == kDoubleCid) ||
3990 (
right()->
Type()->ToCid() == kDoubleCid)) {
4001 auto const other_check = other.AsCheckNull();
4002 ASSERT(other_check !=
nullptr);
4012 case kUnboxedUint16:
4013#if defined(HAS_SMI_63_BITS)
4015 case kUnboxedUint32:
4019#if !defined(HAS_SMI_63_BITS)
4023 case kUnboxedUint32:
4030 case kUnboxedDouble:
4032 case kUnboxedFloat32x4:
4033 case kUnboxedFloat64x2:
4034 case kUnboxedInt32x4:
4051 speculative_mode == SpeculativeMode::kNotSpeculative
4056 case kUnboxedUint32:
4062 case kUnboxedDouble:
4064 case kUnboxedFloat32x4:
4065 case kUnboxedFloat64x2:
4066 case kUnboxedInt32x4:
4075bool UnboxInstr::CanConvertSmi()
const {
4077 case kUnboxedDouble:
4083 case kUnboxedFloat32x4:
4084 case kUnboxedFloat64x2:
4085 case kUnboxedInt32x4:
4104 result->feedback_.Add({arg_ids[0], arg_ids[1]});
4111 intptr_t receiver_cid,
4112 intptr_t argument_cid) {
4114 result->feedback_.Add({receiver_cid, argument_cid});
4119 intptr_t receiver_cid,
4122 const intptr_t
count = 1;
4131 targets->CreateHelper(zone, ic_data);
4133 targets->MergeIntoRanges();
4140 targets.CreateHelper(zone, ic_data);
4163 for (
int idx = 0; idx <
length; idx++) {
4164 int lower_limit_cid = (idx == 0) ? -1 : targets[idx - 1].cid_end;
4165 auto target_info = targets.
TargetAt(idx);
4167 if (
target.is_polymorphic_target())
continue;
4168 for (
int i = target_info->cid_start - 1;
i > lower_limit_cid;
i--) {
4169 bool class_is_abstract =
false;
4171 &class_is_abstract) &&
4173 if (!class_is_abstract) {
4174 target_info->cid_start =
i;
4186 for (
int idx = 0; idx <
length; idx++) {
4187 int upper_limit_cid =
4188 (idx ==
length - 1) ? max_cid : targets[idx + 1].cid_start;
4189 auto target_info = targets.
TargetAt(idx);
4191 if (
target.is_polymorphic_target())
continue;
4197 intptr_t cid_end_including_abstract = target_info->cid_end;
4198 for (
int i = target_info->cid_end + 1;
i < upper_limit_cid;
i++) {
4199 bool class_is_abstract =
false;
4201 &class_is_abstract) &&
4203 cid_end_including_abstract =
i;
4204 if (!class_is_abstract) {
4205 target_info->cid_end =
i;
4216 if ((cid_end_including_abstract > target_info->cid_end) &&
4218 ((cid_end_including_abstract + 1) == targets[idx + 1].cid_start) &&
4220 target_info->cid_end = cid_end_including_abstract;
4224 targets.MergeIntoRanges();
4228void CallTargets::MergeIntoRanges() {
4242 !
target.is_polymorphic_target()) {
4259 for (intptr_t
i = 0;
i <
length();
i++) {
4271#define __ compiler->assembler()->
4274 bool optimizing)
const {
4280 bool optimizing)
const {
4289 InstructionSource());
4297 bool optimizing)
const {
4308 if (
compiler->NeedsEdgeCounter(
this)) {
4316 InstructionSource());
4328 bool optimizing)
const {
4334#if defined(TARGET_ARCH_X64)
4337 if (
__ CodeSize() % 2 == 1) {
4341 if (
tag() == Instruction::kFunctionEntry) {
4345 if (
this ==
compiler->flow_graph().graph_entry()->unchecked_entry()) {
4346 __ BindUncheckedEntryPoint();
4357 if (!FLAG_precompiled_mode) {
4358 __ MonomorphicCheckedEntryJIT();
4360 __ MonomorphicCheckedEntryAOT();
4368#if defined(TARGET_USES_OBJECT_POOL)
4369 __ set_constant_pool_allowed(
false);
4377#if defined(TARGET_USES_OBJECT_POOL)
4378 ASSERT(
__ constant_pool_allowed());
4382 if (
compiler->NeedsEdgeCounter(
this)) {
4390 InstructionSource());
4401 bool optimizing)
const {
4405void NativeEntryInstr::SaveArguments(FlowGraphCompiler*
compiler)
const {
4406 __ Comment(
"SaveArguments");
4410 if (return_loc.IsPointerToMemory()) {
4413 for (intptr_t
i = marshaller_.num_args();
i-- > 0;) {
4417 __ Comment(
"SaveArgumentsEnd");
4427 const intptr_t num_regs = reg_loc.
num_regs();
4430 for (intptr_t
i = num_regs - 1;
i >= 0;
i--) {
4431 __ PushRegister(reg_loc.reg_at(
i));
4442 if (pointer_loc.IsRegisters()) {
4444 ASSERT(regs_loc.num_regs() == 1);
4445 __ PushRegister(regs_loc.reg_at(0));
4447 ASSERT(pointer_loc.IsStack());
4454 for (intptr_t
i = num;
i-- > 0;) {
4459 const auto& both = nloc.
AsBoth();
4465 bool optimizing)
const {
4478#if defined(TARGET_USES_OBJECT_POOL)
4479 __ set_constant_pool_allowed(
false);
4484#if defined(TARGET_USES_OBJECT_POOL)
4485 ASSERT(
__ constant_pool_allowed());
4504 intptr_t
offset = label->Position();
4512 bool optimizing)
const {
4522 const intptr_t kNumInputs = 0;
4529 LocationSummary*
locs =
new (zone) LocationSummary(
4530 zone, kNumInputs, kNumTemps,
4554 const intptr_t field_table_offset =
4560 __ LoadMemoryValue(
result,
THR,
static_cast<int32_t
>(field_table_offset));
4561 __ LoadMemoryValue(
result,
result,
static_cast<int32_t
>(field_offset));
4565 ThrowErrorSlowPathCode* slow_path =
4566 new LateInitializationErrorSlowPath(
this);
4567 compiler->AddSlowPathCode(slow_path);
4569 __ CompareObject(
result, Object::sentinel());
4570 __ BranchIf(
EQUAL, slow_path->entry_label());
4574 auto object_store =
compiler->isolate_group()->object_store();
4577 compiler::Label no_call, call_initializer;
4578 __ CompareObject(
result, Object::sentinel());
4579 if (!
field().is_late()) {
4580 __ BranchIf(
EQUAL, &call_initializer);
4581 __ CompareObject(
result, Object::transition_sentinel());
4586 __ Bind(&call_initializer);
4587 if (
field().needs_load_guard()) {
4588 stub = object_store->init_static_field_stub();
4589 }
else if (
field().is_late()) {
4592 original_field.EnsureInitializerFunction();
4596 ? object_store->init_shared_late_final_static_field_stub()
4597 : object_store->init_shared_late_static_field_stub())
4599 ? object_store->init_late_final_static_field_stub()
4600 : object_store->init_late_static_field_stub());
4605 stub = object_store->init_static_field_stub();
4610 UntaggedPcDescriptors::kOther,
locs(),
4619 const intptr_t kNumInputs = 1;
4633 const intptr_t kNumInputs = 1;
4634 LocationSummary*
locs =
nullptr;
4639 const intptr_t kNumTemps = using_shared_stub ? 1 : 0;
4640 locs =
new (zone) LocationSummary(
4641 zone, kNumInputs, kNumTemps,
4644 if (using_shared_stub) {
4651 const intptr_t kNumTemps = 0;
4660 const intptr_t kNumTemps = 0;
4664 if (rep == kTagged || rep == kUntagged) {
4684 ASSERT(OffsetInBytes() >= 0);
4686 ASSERT(OffsetInBytes() != 0 ||
slot().has_untagged_instance());
4690 __ LoadFromSlot(
locs()->
out(0).reg(), instance_reg,
slot());
4691 EmitNativeCodeForInitializerCall(
compiler);
4692 }
else if (rep == kTagged || rep == kUntagged) {
4693 __ LoadFromSlot(
locs()->
out(0).reg(), instance_reg,
slot());
4697 __ LoadFromSlot(
locs()->
out(0).reg(), instance_reg,
slot());
4700 const Register result_lo = result_pair->At(0).reg();
4701 const Register result_hi = result_pair->At(1).reg();
4702 __ LoadFieldFromOffset(result_lo, instance_reg, OffsetInBytes());
4703 __ LoadFieldFromOffset(result_hi, instance_reg,
4712 __ LoadUnboxedDouble(
result, instance_reg,
4717 __ LoadUnboxedSimd128(
result, instance_reg,
4726void LoadFieldInstr::EmitNativeCodeForInitializerCall(
4731 ThrowErrorSlowPathCode* slow_path =
4732 new LateInitializationErrorSlowPath(
this);
4733 compiler->AddSlowPathCode(slow_path);
4736 __ CompareObject(result_reg, Object::sentinel());
4737 __ BranchIf(
EQUAL, slow_path->entry_label());
4747 compiler::Label no_call;
4753 auto object_store =
compiler->isolate_group()->object_store();
4755 if (field.needs_load_guard()) {
4756 stub = object_store->init_instance_field_stub();
4757 }
else if (field.is_late()) {
4758 if (!field.has_nontrivial_initializer()) {
4759 stub = object_store->init_instance_field_stub();
4763 original_field.EnsureInitializerFunction();
4765 if (field.is_final()) {
4766 stub = object_store->init_late_final_instance_field_stub();
4768 stub = object_store->init_late_instance_field_stub();
4776 UntaggedPcDescriptors::kOther,
locs(),
4782 const intptr_t kNumInputs = 1;
4783 const intptr_t kNumTemps = 0;
4784 LocationSummary* summary =
new (zone)
4791 auto object_store =
compiler->isolate_group()->object_store();
4792 const auto& throw_stub =
4796 UntaggedPcDescriptors::kOther,
locs(),
4808 const intptr_t kNumInputs = 2;
4809 const intptr_t kNumTemps = 0;
4810 LocationSummary* summary =
new (zone)
4818 auto object_store =
compiler->isolate_group()->object_store();
4819 const auto& re_throw_stub =
4824 UntaggedPcDescriptors::kOther,
locs(),
4837 const intptr_t kNumInputs = 1;
4838 const intptr_t kNumTemps = 0;
4839 LocationSummary*
locs =
new (zone)
4847 bool optimizing)
const {
4857 bool optimizing)
const {
4868 bool optimizing)
const {
4869 LocationSummary* summary =
new (zone)
4870 LocationSummary(zone, 1, 0, LocationSummary::ContainsCall::kNoCall);
4882 bool optimizing)
const {
4894 constexpr intptr_t
delta =
4898 compiler::ffi::FrameRebase rebase(
compiler->zone(),
4901 const auto& location =
4902 marshaller_.NativeLocationOfNativeParameter(def_index_);
4904 rebase.Rebase(location.IsPointerToMemory()
4905 ? location.AsPointerToMemory().pointer_location()
4907 NoTemporaryAllocator no_temp;
4910 compiler->EmitMoveFromNative(out_loc, out_rep,
src, &no_temp);
4930 for (intptr_t
i = 0;
i < moves_.length();
i++) {
4939 bool optimizing)
const {
4944 ParallelMoveEmitter(
compiler,
this).EmitNativeCode();
4948 bool optimizing)
const {
4959 bool optimizing)
const {
4971 intptr_t* fpu_reg_slots) {
4972 if (registers_remapped_) {
4975 registers_remapped_ =
true;
4984 bool optimizing)
const {
4986 null_->InitializeLocationSummary(zone, optimizing);
4987 return null_->locs();
4996 bool optimizing)
const {
5001 : LocationSummary::
Make(zone, 0,
Location::NoLocation(),
5002 LocationSummary::kNoCall);
5016 const intptr_t kNumInputs = 1;
5017 const intptr_t kNumTemps = 0;
5018 LocationSummary* summary =
new (zone)
5030 __ ExtendAndSmiTagValue(
5038 bool needs_number_check,
5041 needs_number_check_(needs_number_check) {
5042 ASSERT((kind == Token::kEQ_STRICT) || (kind == Token::kNE_STRICT));
5051 ASSERT(!left.IsConstant() || !right.IsConstant());
5053 if (left.IsConstant()) {
5056 return true_condition;
5058 true_condition = EmitComparisonCodeRegConstant(
5059 compiler, labels, right.reg(), left.constant());
5060 }
else if (
right.IsConstant()) {
5063 return true_condition;
5065 true_condition = EmitComparisonCodeRegConstant(
compiler, labels,
left.reg(),
5068 true_condition =
compiler->EmitEqualityRegRegCompare(
5078 intptr_t input_index,
5082 if (input_type->
ToCid() == kBoolCid && obj.
GetClassId() == kBoolCid) {
5083 bool invert = (kind() != Token::kEQ_STRICT) ^ !Bool::Cast(obj).value();
5084 *true_condition_out =
5093 const intptr_t kNumInputs = 1;
5101 if (input_can_be_smi_ && this->
object()->
Type()->CanBeSmi()) {
5103 __ LoadTaggedClassIdMayBeSmi(
result,
object);
5105 __ LoadClassIdMayBeSmi(
result,
object);
5117#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) || \
5118 defined(TARGET_ARCH_ARM)
5119 const bool needs_temp = (
lower() != 0);
5121 const bool needs_temp =
false;
5123 const intptr_t kNumInputs = 1;
5124 const intptr_t kNumTemps = needs_temp ? 1 : 0;
5125 LocationSummary*
locs =
new (zone)
5136 BranchLabels labels) {
5137 intptr_t
lower = lower_;
5138 intptr_t
upper = upper_;
5139 if (value_representation_ == kTagged) {
5146 __ CompareImmediate(in,
upper);
5148#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) || \
5149 defined(TARGET_ARCH_ARM)
5154 __ AddImmediate(temp, in, -
lower);
5157 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
5162 bool optimizing)
const {
5167 if (!FLAG_two_args_smi_icd) {
5172 return StubCode::SmiAddInlineCache().ptr();
5174 return StubCode::SmiLessInlineCache().ptr();
5176 return StubCode::SmiEqualInlineCache().ptr();
5199 intptr_t idx)
const {
5245 const Array& arguments_descriptor =
5263 if (!specialized_binary_smi_ic_stub.IsNull()) {
5268 const ICData* call_ic_data =
nullptr;
5269 if (!FLAG_propagate_ic_data || !
compiler->is_optimizing() ||
5271 const Array& arguments_descriptor =
5275 if (receivers_static_type_ !=
nullptr) {
5276 receivers_static_type = receivers_static_type_->ptr();
5279 call_ic_data =
compiler->GetOrAddInstanceCallICData(
5287 if (
ic_data()->NumberOfUsedChecks() > 0) {
5288 const ICData& unary_ic_data =
5307 bool use_specialized_smi_ic_stub =
false;
5308 if (!specialized_binary_smi_ic_stub.IsNull() &&
5309 call_ic_data->NumberOfChecksIs(1)) {
5310 GrowableArray<intptr_t> class_ids(2);
5312 call_ic_data->GetCheckAt(0, &class_ids, &
target);
5313 if (class_ids[0] == kSmiCid && class_ids[1] == kSmiCid &&
5314 target.ptr() == binary_smi_op_target.ptr()) {
5315 use_specialized_smi_ic_stub =
true;
5319 if (use_specialized_smi_ic_stub) {
5321 compiler->EmitInstanceCallJIT(specialized_binary_smi_ic_stub,
5342 args_desc, allow_add);
5346 if (targets_ ==
nullptr) {
5352 ASSERT(targets_->is_empty());
5371 intptr_t idx)
const {
5408 for (intptr_t
i = 0;
i <
call->ArgumentCount();
i++) {
5409 args.Add(
call->ArgumentValueAt(
i)->CopyWithType());
5414 call->type_args_len(),
call->argument_names());
5415 return dispatch_table_call;
5420 const intptr_t kNumInputs = 1;
5421 const intptr_t kNumTemps = 0;
5432 if (
selector()->requires_args_descriptor) {
5435 arguments_descriptor = args_info.ToArgumentsDescriptor();
5439 UntaggedPcDescriptors::kOther,
locs(),
env());
5442 if (receiver->Type()->is_nullable()) {
5453 intptr_t idx)
const {
5476 if (targets_ ==
nullptr) {
5482 ASSERT(targets_->is_empty());
5506 if (
length() == 0)
return false;
5538 const intptr_t
len = targets_.length();
5540 for (intptr_t
i = 0;
i <
len;
i++) {
5541 target = targets_.TargetAt(
i)->target->ptr();
5542 if (!
target.IsDispatcherOrImplicitAccessor()) {
5555 bool optimizing)
const {
5563 compiler->EmitPolymorphicInstanceCall(
5570 bool is_string =
true;
5573 bool is_type =
true;
5576 for (intptr_t
i = 0;
i < num_checks;
i++) {
5601 }
else if (is_type) {
5633 if (new_target ==
nullptr) {
5658 if (
target.recognized_kind() == MethodRecognizer::kObjectRuntimeType) {
5661 if (!
type.IsNull()) {
5671 return targets_.HasSingleRecognizedTarget();
5681 }
else if (
function().has_pragma()) {
5682 const intptr_t recognized_cid =
5693 if (
auto konst = defn->AsConstant()) {
5694 const Object& obj = konst->value();
5695 if (obj.IsString()) {
5696 return String::Cast(obj);
5697 }
else if (obj.IsSmi()) {
5700 }
else if (obj.IsBool()) {
5702 }
else if (obj.
IsNull()) {
5703 return Symbols::null();
5706 return String::null_string();
5711 auto arg0 =
call->ArgumentValueAt(0)->definition();
5712 auto create_array = arg0->AsCreateArray();
5713 if (create_array ==
nullptr) {
5716 ASSERT(arg0->IsPhi() || arg0->IsParameter());
5720 Value* num_elements = create_array->num_elements();
5730 pieces.
Add(Object::null_string());
5735 auto current = it.Current()->instruction();
5736 if (current ==
call) {
5739 auto store = current->AsStoreIndexed();
5740 if (
store ==
nullptr || !
store->index()->BindsToConstant() ||
5741 !
store->index()->BoundConstant().IsSmi()) {
5744 intptr_t store_index = Smi::Cast(
store->index()->BoundConstant()).Value();
5749 pieces.
SetAt(store_index, piece);
5755 const String& concatenated =
5762 auto arg0 =
call->ArgumentValueAt(0)->definition();
5774 if (
function().ptr() == compiler_state.StringBaseInterpolate().
ptr()) {
5777 compiler_state.StringBaseInterpolateSingle().
ptr()) {
5786 if (argument->BindsToConstant()) {
5795 if (argument1->BindsToConstant() && argument2->BindsToConstant()) {
5797 if (
Evaluate(flow_graph, argument1->BoundConstant(),
5798 argument2->BoundConstant(), &
result)) {
5805 if (!compiler_state.is_aot()) {
5809 if (kind == MethodRecognizer::kObjectRuntimeType) {
5825 case MethodRecognizer::kSmi_bitLength: {
5827 if (argument.IsInteger()) {
5832 if (!
value.IsNull()) {
5839 case MethodRecognizer::kStringBaseLength:
5840 case MethodRecognizer::kStringBaseIsEmpty: {
5842 if (argument.IsString()) {
5843 const auto& str = String::Cast(argument);
5844 if (kind == MethodRecognizer::kStringBaseLength) {
5866 case MethodRecognizer::kOneByteString_equality:
5867 case MethodRecognizer::kTwoByteString_equality: {
5868 if (argument1.IsString() && argument2.IsString()) {
5883 bool optimizing)
const {
5889 const ICData* call_ic_data =
nullptr;
5890 if (!FLAG_propagate_ic_data || !
compiler->is_optimizing() ||
5892 const Array& arguments_descriptor =
5894 const int num_args_checked =
5896 call_ic_data =
compiler->GetOrAddStaticCallICData(
5905 locs(), *call_ic_data, rebind_rule_,
5908 TypeUsageInfo* type_usage_info =
compiler->thread()->type_usage_info();
5909 if (type_usage_info !=
nullptr) {
5921 intptr_t type_args_len,
5922 const Array& argument_names,
5928 std::move(arguments),
5930 representation_(representation),
5941#if defined(TARGET_ARCH_IA32)
5943 FATAL(
"Not supported on IA32.");
5948 intptr_t idx)
const {
5971 bool optimizing)
const {
5976#if defined(TARGET_ARCH_IA32)
5979 compiler::Label drop_args,
done;
5980 const intptr_t cacheable_pool_index =
__ object_pool_builder().AddImmediate(
5987 const bool need_to_drop_args = !
compiler->is_optimizing();
5990 "CachableIdempotentCall pool load and check. pool_index = "
5992 cacheable_pool_index);
5993 __ LoadWordFromPoolIndex(
dst, cacheable_pool_index);
5994 __ CompareImmediate(
dst, 0);
5996 __ Comment(
"CachableIdempotentCall pool load and check - end");
6002 locs(), null_ic_data, ICData::kNoRebind,
6005 __ Comment(
"CachableIdempotentCall pool store");
6006 if (!
function().HasUnboxedReturnValue()) {
6009 __ StoreWordToPoolIndex(
dst, cacheable_pool_index);
6010 if (need_to_drop_args) {
6013 __ Drop(args_info.size_with_type_args);
6016 __ Comment(
"CachableIdempotentCall pool store - end");
6022 case kParameterCheck:
6024 case kInsertedByFrontend:
6043 const intptr_t kNumInputs = 5;
6044 const intptr_t kNumTemps = 0;
6045 LocationSummary* summary =
new (zone)
6070 const intptr_t kNumInputs = 2;
6071 const intptr_t kNumTemps = 0;
6072 LocationSummary*
locs =
new (zone)
6085 if (
type().IsTypeParameter()) {
6086 const auto& type_parameter = TypeParameter::Cast(
type());
6087 const bool is_function_parameter = type_parameter.IsFunctionTypeParameter();
6089 switch (type_parameter.nullability()) {
6091 stub = is_function_parameter
6092 ? StubCode::InstantiateTypeNonNullableFunctionTypeParameter()
6094 : StubCode::InstantiateTypeNonNullableClassTypeParameter()
6099 is_function_parameter
6100 ? StubCode::InstantiateTypeNullableFunctionTypeParameter().ptr()
6101 : StubCode::InstantiateTypeNullableClassTypeParameter().ptr();
6106 compiler->GenerateStubCall(
source(), stub, UntaggedPcDescriptors::kOther,
6113 const intptr_t kNumInputs = 3;
6114 const intptr_t kNumTemps = 0;
6115 LocationSummary*
locs =
new (zone)
6134 const auto& type_args =
6137 : Object::null_type_arguments();
6138 const intptr_t
len = type_args.Length();
6139 const bool can_function_type_args_be_null =
6142 compiler::Label type_arguments_instantiated;
6143 if (type_args.IsNull()) {
6147 }
else if (type_args.IsRawWhenInstantiatedFromRaw(
len) &&
6148 can_function_type_args_be_null) {
6152 compiler::Label non_null_type_args;
6154 Object::null_object());
6163 __ BranchIf(
EQUAL, &type_arguments_instantiated,
6165 __ Bind(&non_null_type_args);
6170 __ Bind(&type_arguments_instantiated);
6183 compiler::Label* deopt =
6190 ASSERT(!cids_.IsMonomorphic() || !cids_.HasClassId(kSmiCid));
6193 compiler::Label is_ok;
6195 __ BranchIfSmi(
value, cids_.HasClassId(kSmiCid) ? &is_ok : deopt);
6200 intptr_t
min = cids_.ComputeLowestCid();
6201 intptr_t
max = cids_.ComputeHighestCid();
6204 const intptr_t num_checks = cids_.length();
6205 const bool use_near_jump = num_checks < 5;
6207 for (intptr_t
i = 0;
i < num_checks;
i++) {
6208 intptr_t cid_start = cids_[
i].cid_start;
6209 intptr_t cid_end = cids_[
i].cid_end;
6210 if (cid_start == kSmiCid && cid_end == kSmiCid) {
6213 if (cid_start == kSmiCid) cid_start++;
6214 if (cid_end == kSmiCid) cid_end--;
6215 const bool is_last =
6216 (
i == num_checks - 1) ||
6217 (
i == num_checks - 2 && cids_[
i + 1].cid_start == kSmiCid &&
6218 cids_[
i + 1].cid_end == kSmiCid);
6219 bias = EmitCheckCid(
compiler, bias, cid_start, cid_end, is_last, &is_ok,
6220 deopt, use_near_jump);
6228 const intptr_t kNumInputs = 2;
6229 const intptr_t kNumTemps = 0;
6230 LocationSummary*
locs =
new (zone) LocationSummary(
6231 zone, kNumInputs, kNumTemps,
6244 RangeErrorSlowPath* slow_path =
new RangeErrorSlowPath(
this);
6245 compiler->AddSlowPathCode(slow_path);
6255 if (index_cid != kSmiCid) {
6256 __ BranchIfNotSmi(
index, slow_path->entry_label());
6268 const intptr_t kNumInputs = 1;
6269 const intptr_t kNumTemps = 0;
6270 LocationSummary*
locs =
new (zone) LocationSummary(
6271 zone, kNumInputs, kNumTemps,
6285 __ Comment(
"%s slow path allocation of %s",
instruction()->DebugName(),
6297 ASSERT(!kAllocateMintRuntimeEntry.can_lazy_deopt() &&
6298 !kAllocateDoubleRuntimeEntry.can_lazy_deopt() &&
6299 !kAllocateFloat32x4RuntimeEntry.can_lazy_deopt() &&
6300 !kAllocateFloat64x2RuntimeEntry.can_lazy_deopt());
6301 compiler->GenerateNonLazyDeoptableStubCall(
6303 stub, UntaggedPcDescriptors::kOther, locs);
6305 compiler->RestoreLiveRegisters(locs);
6315 __ TryAllocate(cls,
compiler->intrinsic_slow_path_label(),
6320 compiler->AddSlowPathCode(slow_path);
6322 if (FLAG_inline_alloc && !FLAG_use_slow_path) {
6323 __ TryAllocate(cls, slow_path->entry_label(),
6326 __ Jump(slow_path->entry_label());
6328 __ Bind(slow_path->exit_label());
6333 __ Comment(
"DoubleToIntegerSlowPath");
6341 auto slow_path_env =
6350 UntaggedPcDescriptors::kOther, locs,
6359 const intptr_t box_cid = BoxCid();
6360 ASSERT(box_cid != kSmiCid);
6368 if ((
value()->
Type()->ToNullableCid() == box_cid) &&
6370 __ CompareObject(box, Object::null_object());
6373 __ BranchIfSmi(box, CanConvertSmi() ? &is_smi : deopt);
6374 __ CompareClassId(box, box_cid, temp);
6381 compiler::Label
done;
6391 if (BoxCid() == kSmiCid) {
6394 return EmitSmiConversion(
compiler);
6397 case kUnboxedDouble:
6399 case kUnboxedFloat32x4:
6400 case kUnboxedFloat64x2:
6401 case kUnboxedInt32x4:
6406 EmitLoadInt32FromBoxOrSmi(
compiler);
6409 case kUnboxedInt64: {
6410 if (
value()->
Type()->ToCid() == kSmiCid) {
6415 EmitLoadInt64FromBoxOrSmi(
compiler);
6426 const intptr_t box_cid = BoxCid();
6428 if (box_cid == kSmiCid || (CanConvertSmi() && (value_cid == kSmiCid))) {
6432 EmitLoadInt32FromBoxOrSmi(
compiler);
6434 EmitLoadInt64FromBoxOrSmi(
compiler);
6438 EmitLoadFromBoxWithDeopt(
compiler);
6445 intptr_t fixed_parameter_count,
6446 intptr_t lazy_deopt_pruning_count,
6450 parsed_function.
function(),
nullptr);
6451 for (intptr_t
i = 0;
i < definitions.
length(); ++
i) {
6452 env->values_.Add(
new (zone)
Value(definitions[
i]));
6465 (outer_ ==
nullptr) ?
nullptr : outer_->DeepCopy(zone));
6469 copy->MarkAsHoisted();
6471 if (locations_ !=
nullptr) {
6473 copy->set_locations(new_locations);
6476 copy->values_.Add(values_[
i]->CopyWithType(zone));
6477 if (locations_ !=
nullptr) {
6487 it.CurrentValue()->RemoveFromUseList();
6504 it.CurrentValue()->RemoveFromUseList();
6509 copy->SetLazyDeoptPruneCount(0);
6510 for (intptr_t
i = 0;
i < argc;
i++) {
6511 copy->values_.Add(
new (zone)
Value(dead));
6526 intptr_t outer_deopt_id)
const {
6532 outer->SetDeoptId(outer_deopt_id);
6533 outer->SetLazyDeoptPruneCount(0);
6535 intptr_t use_index = instr->
env()->
Length();
6538 value->set_instruction(instr);
6539 value->set_use_index(use_index++);
6585 value_representation_);
6589 auto const other_instr = other.AsTestCids();
6605 auto const other_instr = other.AsTestRange();
6609 return lower_ == other_instr->lower_ && upper_ == other_instr->upper_ &&
6610 value_representation_ == other_instr->value_representation_;
6620 return is_smi_result &&
6621 !
comparison->AsStrictCompare()->needs_number_check();
6627 return is_smi_result;
6635 if (def != first)
return false;
6647 bool look_for_redefinition =
false;
6650 if ((def != first) && (def !=
this)) {
6652 if ((origin != first_origin) && (origin !=
this))
return nullptr;
6653 look_for_redefinition =
true;
6656 if (look_for_redefinition) {
6660 for (intptr_t
i = 1, n =
InputCount(); redef != first_origin &&
i < n;) {
6665 if ((def == redef) || (def ==
this)) {
6670 }
while (
value !=
nullptr);
6674 ASSERT(redef != first_origin);
6703 auto zone = flow_graph->
zone();
6712 if (use->instruction() ==
block()) {
6714 use->set_definition(replacement);
6721 return (replacement !=
nullptr) ? replacement :
this;
6727 (*phis_)[index_] = phis_->Last();
6728 phis_->RemoveLast();
6734 if ((
InputAt(0)->definition()->OriginalDefinition() ==
6735 InputAt(1)->definition()->OriginalDefinition()) &&
6736 strict_compare->kind() == Token::kEQ_STRICT) {
6751 compiler::Label if_true;
6752 compiler::Label* if_false =
6754 BranchLabels labels = {&if_true, if_false, &if_true};
6778 case kGrowableObjectArrayCid:
6780 case kOneByteStringCid:
6781 case kTwoByteStringCid:
6784 case kImmutableArrayCid:
6793 if (kind_ == Kind::kDeeplyImmutableAttachNativeFinalizer) {
6797 ASSERT(kind_ == Kind::kWriteUnmodifiableTypedData);
6813 case kUnboxedFloat32x4:
6814 case kUnboxedInt32x4:
6815 case kUnboxedFloat64x2:
6827 intptr_t index_scale,
6834 index_unboxed_(index_unboxed),
6835 index_scale_(index_scale),
6836 class_id_(class_id),
6838 token_pos_(
source.token_pos),
6839 result_type_(result_type) {
6852 if (
auto box =
index()->definition()->AsBoxInt64()) {
6855 auto Z = flow_graph->
zone();
6877 intptr_t index_scale,
6884 emit_store_barrier_(emit_store_barrier),
6885 index_unboxed_(index_unboxed),
6886 index_scale_(index_scale),
6887 class_id_(class_id),
6889 token_pos_(
source.token_pos),
6890 speculative_mode_(speculative_mode) {
6904 if (
auto box =
index()->definition()->AsBoxInt64()) {
6907 auto Z = flow_graph->
zone();
6926 intptr_t idx)
const {
6928 if (idx == 0)
return kNoRepresentation;
6930 if (index_unboxed_) {
6931#if defined(TARGET_ARCH_IS_64_BIT)
6932 return kUnboxedInt64;
6936 return kNoRepresentation;
6946#if defined(TARGET_ARCH_ARM64)
6958 if (!
length()->BindsToSmiConstant()) {
6960 }
else if (
length()->BoundSmiConstant() == 0) {
6965 if (!
src_start()->BindsToSmiConstant() ||
6975 intptr_t new_element_size = element_size_;
6976 while (((new_length | new_src_start | new_dest_start) & 1) == 0 &&
6979 new_src_start >>= 1;
6980 new_dest_start >>= 1;
6981 new_element_size <<= 1;
6983 if (new_element_size == element_size_) {
6990 auto*
const Z = flow_graph->
zone();
6991 auto*
const length_instr =
6993 auto*
const src_start_instr =
6995 auto*
const dest_start_instr =
7000 element_size_ = new_element_size;
7001 unboxed_inputs_ =
false;
7010 const bool constant_length = length_loc.
IsConstant();
7012 const intptr_t num_elements =
7013 constant_length ? Integer::Cast(length_loc.
constant()).AsInt64Value()
7017 ASSERT(!constant_length || num_elements > 0);
7019#if defined(TARGET_ARCH_IA32)
7037 EmitComputeStartPointer(
compiler, src_cid_, src_reg, src_payload_reg,
7038 src_rep, src_start_loc);
7039 EmitComputeStartPointer(
compiler, dest_cid_, dest_reg, dest_payload_reg,
7040 dest_rep, dest_start_loc);
7043 compiler::Label copy_forwards,
done;
7044 if (!constant_length) {
7045#if defined(TARGET_ARCH_IA32)
7047 __ PushRegister(
ESI);
7053 __ CompareRegisters(dest_payload_reg, src_payload_reg);
7071 __ Comment(
"Copying backwards");
7072 if (constant_length) {
7074 num_elements,
true);
7077 &
done, ©_forwards);
7079 __ Jump(&
done, jump_distance);
7080 __ Comment(
"Copying forwards");
7083 if (constant_length) {
7091#if defined(TARGET_ARCH_IA32)
7092 if (!constant_length) {
7094 __ PopRegister(
ESI);
7100#if !defined(TARGET_ARCH_ARM)
7104 intptr_t num_elements,
7106 ASSERT(element_size_ <= 16);
7107 const intptr_t num_bytes = num_elements * element_size_;
7108#if defined(TARGET_ARCH_ARM64)
7110 const intptr_t mov_size = element_size_;
7112 const intptr_t mov_size =
7115 const intptr_t mov_repeat = num_bytes / mov_size;
7116 ASSERT(num_bytes % mov_size == 0);
7118#if defined(TARGET_ARCH_IA32)
7124 for (intptr_t
i = 0;
i < mov_repeat;
i++) {
7125 const intptr_t
offset = (reversed ? (mov_repeat - (
i + 1)) :
i) * mov_size;
7132 __ LoadFromOffset(temp_reg, src_reg,
offset,
7134 __ StoreToOffset(temp_reg, dest_reg,
offset,
7138 __ LoadFromOffset(temp_reg, src_reg,
offset,
7140 __ StoreToOffset(temp_reg, dest_reg,
offset,
7144#if defined(TARGET_ARCH_IS_64_BIT)
7152#if defined(TARGET_ARCH_ARM64)
7170#if defined(TARGET_ARCH_X64)
7173 __ PushRegisters(kVolatileRegisterSet);
7174 __ MsanUnpoison(dest_reg, num_bytes);
7175 __ PopRegisters(kVolatileRegisterSet);
7191 recognized_kind_(recognized_kind),
7192 token_pos_(
source.token_pos) {
7199 case MethodRecognizer::kDoubleTruncateToDouble:
7200 case MethodRecognizer::kDoubleFloorToDouble:
7201 case MethodRecognizer::kDoubleCeilToDouble:
7202 case MethodRecognizer::kDoubleRoundToDouble:
7203 case MethodRecognizer::kMathAtan:
7204 case MethodRecognizer::kMathTan:
7205 case MethodRecognizer::kMathAcos:
7206 case MethodRecognizer::kMathAsin:
7207 case MethodRecognizer::kMathSin:
7208 case MethodRecognizer::kMathCos:
7209 case MethodRecognizer::kMathExp:
7210 case MethodRecognizer::kMathLog:
7212 case MethodRecognizer::kDoubleMod:
7213 case MethodRecognizer::kDoubleRem:
7214 case MethodRecognizer::kMathDoublePow:
7215 case MethodRecognizer::kMathAtan2:
7224 switch (recognized_kind_) {
7225 case MethodRecognizer::kDoubleTruncateToDouble:
7226 return kLibcTruncRuntimeEntry;
7227 case MethodRecognizer::kDoubleRoundToDouble:
7228 return kLibcRoundRuntimeEntry;
7229 case MethodRecognizer::kDoubleFloorToDouble:
7230 return kLibcFloorRuntimeEntry;
7231 case MethodRecognizer::kDoubleCeilToDouble:
7232 return kLibcCeilRuntimeEntry;
7233 case MethodRecognizer::kMathDoublePow:
7234 return kLibcPowRuntimeEntry;
7235 case MethodRecognizer::kDoubleMod:
7236 return kDartModuloRuntimeEntry;
7237 case MethodRecognizer::kDoubleRem:
7238 return kLibcFmodRuntimeEntry;
7239 case MethodRecognizer::kMathTan:
7240 return kLibcTanRuntimeEntry;
7241 case MethodRecognizer::kMathAsin:
7242 return kLibcAsinRuntimeEntry;
7243 case MethodRecognizer::kMathSin:
7244 return kLibcSinRuntimeEntry;
7245 case MethodRecognizer::kMathCos:
7246 return kLibcCosRuntimeEntry;
7247 case MethodRecognizer::kMathAcos:
7248 return kLibcAcosRuntimeEntry;
7249 case MethodRecognizer::kMathAtan:
7250 return kLibcAtanRuntimeEntry;
7251 case MethodRecognizer::kMathAtan2:
7252 return kLibcAtan2RuntimeEntry;
7253 case MethodRecognizer::kMathExp:
7254 return kLibcExpRuntimeEntry;
7255 case MethodRecognizer::kMathLog:
7256 return kLibcLogRuntimeEntry;
7260 return kLibcPowRuntimeEntry;
7267 switch (recognized_kind_) {
7268 case MethodRecognizer::kDoubleTruncateToDouble:
7269 op_kind = Token::kTRUNCATE;
7271 case MethodRecognizer::kDoubleFloorToDouble:
7272 op_kind = Token::kFLOOR;
7274 case MethodRecognizer::kDoubleCeilToDouble:
7275 op_kind = Token::kCEILING;
7290bool DoubleToIntegerInstr::SupportsFloorAndCeil() {
7291#if defined(TARGET_ARCH_X64)
7293#elif defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV32) || \
7294 defined(TARGET_ARCH_RISCV64)
7302 if (SupportsFloorAndCeil() &&
7304 if (
auto* arg =
value()->definition()->AsInvokeMathCFunction()) {
7305 switch (arg->recognized_kind()) {
7306 case MethodRecognizer::kDoubleFloorToDouble:
7308 recognized_kind_ = MethodRecognizer::kDoubleFloorToInt;
7311 case MethodRecognizer::kDoubleCeilToDouble:
7313 recognized_kind_ = MethodRecognizer::kDoubleCeilToInt;
7332 case Token::kTRUNCDIV:
7343 bool optimizing)
const {
7376 const int num_params =
7378 bool auto_setup_scope =
true;
7380 library,
native_name(), num_params, &auto_setup_scope);
7381 if (native_function ==
nullptr) {
7387 "native function '%s' (%" Pd " arguments) cannot be found",
7391 set_native_c_function(native_function);
7394#if !defined(TARGET_ARCH_ARM) && !defined(TARGET_ARCH_ARM64) && \
7395 !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
7413 if (marshaller_.IsHandleCType(marshaller_.ArgumentIndex(idx))) {
7416 return marshaller_.RepInFfiCall(idx);
7423 return kNoRepresentation;
7432LocationSummary* FfiCallInstr::MakeLocationSummaryInternal(
7436 auto contains_call =
7439 LocationSummary* summary =
new (zone) LocationSummary(
7445 if ((temps & (1 << reg)) != 0) {
7446 summary->set_temp(reg_i,
7452#if defined(TARGET_ARCH_X64) && !defined(DART_TARGET_OS_WINDOWS)
7455 marshaller_.contains_varargs()
7461#define R(r) (1 << r)
7466 for (intptr_t
i = 0, n = marshaller_.NumArgumentDefinitions();
i < n; ++
i) {
7467 summary->set_in(
i, marshaller_.LocInFfiCall(
i));
7470 if (marshaller_.ReturnsCompound()) {
7482 __ Comment(
"EmitParamMoves");
7485 const auto& return_location =
7487 if (return_location.IsPointerToMemory()) {
7488 __ Comment(
"return_location.IsPointerToMemory");
7489 const auto& pointer_location =
7490 return_location.AsPointerToMemory().pointer_location();
7491 const auto& pointer_register =
7492 pointer_location.IsRegisters()
7493 ? pointer_location.AsRegisters().reg_at(0)
7495 __ MoveRegister(pointer_register,
SPREG);
7496 __ AddImmediate(pointer_register, marshaller_.PassByPointerStackOffset(
7499 if (pointer_location.IsStack()) {
7500 const auto& pointer_stack = pointer_location.AsStack();
7501 __ StoreMemoryValue(pointer_register, pointer_stack.base_register(),
7502 pointer_stack.offset_in_bytes());
7510 intptr_t def_index = 0;
7511 for (intptr_t arg_index = 0; arg_index < marshaller_.num_args();
7513 const intptr_t num_defs = marshaller_.NumDefinitions(arg_index);
7514 const auto& arg_target = marshaller_.Location(arg_index);
7515 __ Comment(
"arg_index %" Pd " arg_target %s", arg_index,
7516 arg_target.ToCString());
7520 for (intptr_t
i = 0;
i < num_defs;
i++) {
7521 if ((arg_target.IsPointerToMemory() ||
7522 marshaller_.IsCompoundPointer(arg_index)) &&
7530 __ Comment(
" def_index %" Pd, def_index);
7536 const auto& def_target =
7537 arg_target.payload_type().IsPrimitive() ? arg_target
7538 : arg_target.IsMultiple() ? *arg_target.AsMultiple().locations()[
i]
7539 : arg_target.IsPointerToMemory()
7540 ? arg_target.AsPointerToMemory().pointer_location()
7541 : arg_target.Split(
compiler->zone(),
7546 __ Comment(
"origin.IsConstant()");
7547 ASSERT(!marshaller_.IsHandleCType(arg_index));
7548 ASSERT(!marshaller_.IsTypedDataPointer(arg_index));
7549 ASSERT(!marshaller_.IsCompoundPointer(arg_index));
7550 compiler->EmitMoveConst(def_target, origin, origin_rep, &temp_alloc);
7555 __ Comment(
"origin.IsPairLocation() and constant");
7556 ASSERT(!marshaller_.IsHandleCType(arg_index));
7557 ASSERT(!marshaller_.IsTypedDataPointer(arg_index));
7558 ASSERT(!marshaller_.IsCompoundPointer(arg_index));
7559 compiler->EmitMoveConst(def_target, origin, origin_rep, &temp_alloc);
7560 }
else if (marshaller_.IsHandleCType(arg_index)) {
7561 __ Comment(
"marshaller_.IsHandleCType(arg_index)");
7564 ASSERT(origin_rep == kTagged);
7570 if (def_target.IsRegisters()) {
7571 __ AddImmediate(def_target.AsLocation().reg(), origin.
base_reg(),
7574 ASSERT(def_target.IsStack());
7575 const auto& target_stack = def_target.AsStack();
7578 __ StoreToOffset(temp0, target_stack.base_register(),
7579 target_stack.offset_in_bytes());
7582 __ Comment(
"def_target %s <- origin %s %s",
7591 if (def_target.IsStack()) {
7592 const auto& def_target_stack = def_target.AsStack();
7593 ASSERT(def_target_stack.offset_in_bytes() +
7594 def_target.payload_type().SizeInBytes() <=
7595 marshaller_.RequiredStackSpaceInBytes());
7598 if (marshaller_.IsTypedDataPointer(arg_index) ||
7599 marshaller_.IsCompoundPointer(arg_index)) {
7601 __ Comment(
"Load typed data base address");
7608 __ LoadFromSlot(origin.
reg(), origin.
reg(), Slot::PointerBase_data());
7609 if (marshaller_.IsCompoundPointer(arg_index)) {
7610 __ Comment(
"Load offset in bytes");
7611 const intptr_t offset_in_bytes_def_index = def_index + 1;
7612 const Location offset_in_bytes_loc =
7613 rebase.
Rebase(
locs()->in(offset_in_bytes_def_index));
7616 offset_in_bytes_reg = offset_in_bytes_loc.
reg();
7618 offset_in_bytes_reg = temp1;
7622 offset_in_bytes_loc, &no_temp);
7624 __ AddRegisters(origin.
reg(), offset_in_bytes_reg);
7627 compiler->EmitMoveToNative(def_target, origin, origin_rep, &temp_alloc);
7636 if (arg_target.IsPointerToMemory()) {
7637 __ Comment(
"arg_target.IsPointerToMemory");
7639 const auto& pointer_loc =
7640 arg_target.AsPointerToMemory().pointer_location();
7644 compiler->zone(), pointer_loc.payload_type(),
7645 pointer_loc.container_type(), temp0);
7646 compiler->EmitNativeMove(
dst, pointer_loc, &temp_alloc);
7647 __ LoadFromSlot(temp0, temp0, Slot::PointerBase_data());
7649 __ Comment(
"IsPointerToMemory add offset");
7650 const intptr_t offset_in_bytes_def_index =
7652 const Location offset_in_bytes_loc =
7653 rebase.
Rebase(
locs()->in(offset_in_bytes_def_index));
7656 offset_in_bytes_reg = offset_in_bytes_loc.
reg();
7658 offset_in_bytes_reg = temp1;
7661 offset_in_bytes_loc, &no_temp);
7663 __ AddRegisters(temp0, offset_in_bytes_reg);
7669 __ Comment(
"IsPointerToMemory copy chunks");
7670 const intptr_t sp_offset =
7671 marshaller_.PassByPointerStackOffset(arg_index);
7672 __ UnrolledMemCopy(
SPREG, sp_offset, temp0, 0,
7673 arg_target.payload_type().SizeInBytes(), temp1);
7676 __ MoveRegister(temp0,
SPREG);
7677 __ AddImmediate(temp0, sp_offset);
7679 compiler->zone(), pointer_loc.payload_type(),
7680 pointer_loc.container_type(), temp0);
7681 __ Comment(
"pointer_loc %s <- src %s", pointer_loc.ToCString(),
7683 compiler->EmitNativeMove(pointer_loc,
src, &temp_alloc);
7687 __ Comment(
"EmitParamMovesEnd");
7693 const auto& returnLocation =
7695 if (returnLocation.payload_type().IsVoid()) {
7699 __ Comment(
"EmitReturnMoves");
7702 if (returnLocation.IsRegisters() || returnLocation.IsFpuRegisters()) {
7703 const auto&
src = returnLocation;
7706 compiler->EmitMoveFromNative(dst_loc, dst_type,
src, &no_temp);
7707 }
else if (marshaller_.ReturnsCompound()) {
7708 ASSERT(returnLocation.payload_type().IsCompound());
7727 __ LoadFromSlot(temp0, temp0, Slot::PointerBase_data());
7729 if (returnLocation.IsPointerToMemory()) {
7734 const intptr_t sp_offset =
7736 __ UnrolledMemCopy(temp0, 0,
SPREG, sp_offset,
7737 marshaller_.CompoundReturnSizeInBytes(), temp1);
7739 ASSERT(returnLocation.IsMultiple());
7741 const auto& multiple =
7744 int offset_in_bytes = 0;
7745 for (
int i = 0;
i < multiple.locations().
length();
i++) {
7746 const auto&
src = *multiple.locations().At(
i);
7748 src.payload_type(),
src.container_type(), temp0, offset_in_bytes);
7750 offset_in_bytes +=
src.payload_type().SizeInBytes();
7757 __ Comment(
"EmitReturnMovesEnd");
7762 const intptr_t kNumInputs = 2;
7763#if defined(TARGET_ARCH_IA32)
7766 const intptr_t kNumTemps = 0;
7773 if (rep == kUntagged) {
7787 summary->
set_in(kValuePos,
7790#if defined(TARGET_ARCH_IA32)
7794#elif defined(TARGET_ARCH_X64)
7798#elif defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV32) || \
7799 defined(TARGET_ARCH_RISCV64)
7803 if (
auto constant =
value()->definition()->AsConstant()) {
7804 const auto&
value = constant->value();
7809 summary->
set_in(kValuePos, value_loc);
7815 if (kNumTemps == 1) {
7830 if (rep == kUntagged) {
7831 __ StoreToSlotNoBarrier(
locs()->in(kValuePos).reg(), instance_reg,
slot(),
7836 __ StoreToSlotNoBarrier(
locs()->in(kValuePos).reg(), instance_reg,
slot(),
7842 const Register value_lo = value_pair->At(0).reg();
7843 const Register value_hi = value_pair->At(1).reg();
7845 __ StoreFieldToOffset(value_hi, instance_reg,
7854 __ StoreUnboxedDouble(
value, instance_reg,
7859 __ StoreUnboxedSimd128(
value, instance_reg,
7868 __ StoreToSlot(
locs()->in(kValuePos).reg(), instance_reg,
slot(),
7874 __ StoreObjectIntoObjectOffsetNoBarrier(instance_reg,
OffsetInBytes(),
7877 __ StoreToSlotNoBarrier(
locs()->in(kValuePos).reg(), instance_reg,
slot(),
7885 const intptr_t kNumInputs = 3;
7886 const intptr_t kNumTemps = 0;
7887 auto*
const summary =
new (zone)
7903 const int64_t offset_in_bytes = Utils::AddWithWrapAround<int64_t>(
7904 Utils::MulWithWrapAround<int64_t>(
index()->BoundSmiConstant(),
7906 offset()->BoundSmiConstant());
7918 ASSERT(opt && !IsNoop());
7932 if (index_loc.IsConstant()) {
7933 const int64_t
index = Smi::Cast(index_loc.constant()).
Value();
7937 if (offset_loc.IsConstant()) {
7938 const int64_t disp =
7939 scaled_index + Smi::Cast(offset_loc.constant()).Value();
7943 __ AddScaled(result_reg, base_reg, offset_loc.reg(),
TIMES_1,
7947 Register index_reg = index_loc.reg();
7951#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32)
7957 __ MoveAndSmiTagRegister(result_reg, index_reg);
7958 index_reg = result_reg;
7962 if (offset_loc.IsConstant()) {
7963 const intptr_t disp = Smi::Cast(offset_loc.constant()).Value();
7965 __ AddScaled(result_reg, base_reg, index_reg,
scale, disp);
7968 __ AddScaled(result_reg, base_reg, index_reg,
scale, 0);
7969 __ AddRegisters(result_reg, offset_loc.reg());
7974const Code& DartReturnInstr::GetReturnStub(FlowGraphCompiler*
compiler)
const {
7982 ->return_async_not_future_stub());
7986 compiler->isolate_group()->object_store()->return_async_stub());
7987 }
else if (
function.IsAsyncGenerator()) {
7990 compiler->isolate_group()->object_store()->return_async_star_stub());
7996void NativeReturnInstr::EmitReturnMoves(FlowGraphCompiler*
compiler) {
7998 if (dst1.payload_type().IsVoid()) {
8001 if (dst1.IsMultiple()) {
8002 __ Comment(
"Load TypedDataBase data pointer and apply offset.");
8006 __ LoadFromSlot(typed_data_reg, typed_data_reg, Slot::PointerBase_data());
8010 __ AddRegisters(typed_data_reg, offset_reg);
8012 __ Comment(
"Copy loop");
8013 const auto& multiple = dst1.AsMultiple();
8014 int offset_in_bytes = 0;
8015 for (intptr_t
i = 0;
i < multiple.locations().
length();
i++) {
8016 const auto&
dst = *multiple.locations().At(
i);
8018 dst.AsRegisters().reg_at(0) != typed_data_reg);
8019 const auto&
src = compiler::ffi::NativeStackLocation(
8020 dst.payload_type(),
dst.container_type(), typed_data_reg,
8022 NoTemporaryAllocator no_temp;
8024 offset_in_bytes +=
dst.payload_type().SizeInBytes();
8028 const auto&
dst = dst1.IsPointerToMemory()
8029 ? dst1.AsPointerToMemory().pointer_return_location()
8034 NoTemporaryAllocator no_temp;
8035 compiler->EmitMoveToNative(
dst, src_loc, src_type, &no_temp);
8040 const intptr_t input_count = marshaller_.NumReturnDefinitions();
8041 const intptr_t kNumTemps = 0;
8042 LocationSummary*
locs =
new (zone)
8046 if (native_loc.IsMultiple()) {
8061 const auto& native_return_loc =
8062 native_loc.IsPointerToMemory()
8063 ? native_loc.AsPointerToMemory().pointer_return_location()
8065 locs->
set_in(0, native_return_loc.AsLocation());
8072 const intptr_t kNumInputs = 0;
8073 const intptr_t kNumTemps = 2;
8074 LocationSummary*
locs =
new (zone)
8085 __ LoadObject(array_temp, coverage_array_);
8087 __ StoreFieldToOffset(
8088 value_temp, array_temp,
8096 if (marshaller_.ReturnsCompound()) {
8123 if ((temps & (1 << reg)) != 0) {
8134 const auto& argument_locations =
8136 for (intptr_t
i = 0, n = argument_locations.length();
i < n; ++
i) {
8137 const auto& argument_location = *argument_locations.
At(
i);
8138 if (argument_location.IsRegisters()) {
8139 const auto& reg_location = argument_location.AsRegisters();
8140 ASSERT(reg_location.num_regs() == 1);
8141 summary->
set_in(
i, reg_location.AsLocation());
8142 }
else if (argument_location.IsFpuRegisters()) {
8144 }
else if (argument_location.IsStack()) {
8150 const auto& return_location = native_calling_convention_.
return_location();
8151 ASSERT(return_location.IsRegisters());
8152 summary->
set_out(0, return_location.AsLocation());
8156LeafRuntimeCallInstr::LeafRuntimeCallInstr(
8162 return_representation_(return_representation),
8163 argument_representations_(argument_representations),
8164 native_calling_convention_(native_calling_convention) {
8166 const intptr_t num_inputs = argument_representations.
length() + 1;
8180 const auto& native_function_type =
8182 zone, return_representation, argument_representations);
8183 const auto& native_calling_convention =
8185 zone, native_function_type);
8188 native_calling_convention, std::move(
inputs));
8203 __ Comment(
"EmitParamMoves");
8204 const auto& argument_locations =
8206 for (intptr_t
i = 0, n = argument_locations.length();
i < n; ++
i) {
8207 const auto& argument_location = *argument_locations.
At(
i);
8208 if (argument_location.IsRegisters()) {
8209 const auto& reg_location = argument_location.AsRegisters();
8210 ASSERT(reg_location.num_regs() == 1);
8213 compiler->EmitMoveToNative(argument_location, src_loc, src_rep,
8215 }
else if (argument_location.IsFpuRegisters()) {
8217 }
else if (argument_location.IsStack()) {
8222 argument_location.ToCString());
8223 compiler->EmitMoveToNative(argument_location, src_loc, src_rep,
8229 __ Comment(
"EmitParamMovesEnd");
8236 case MethodRecognizer::kFloat32x4Mul:
8237 return SimdOpInstr::kFloat32x4Mul;
8238 case MethodRecognizer::kFloat32x4Div:
8239 return SimdOpInstr::kFloat32x4Div;
8240 case MethodRecognizer::kFloat32x4Add:
8241 return SimdOpInstr::kFloat32x4Add;
8242 case MethodRecognizer::kFloat32x4Sub:
8243 return SimdOpInstr::kFloat32x4Sub;
8244 case MethodRecognizer::kFloat64x2Mul:
8245 return SimdOpInstr::kFloat64x2Mul;
8246 case MethodRecognizer::kFloat64x2Div:
8247 return SimdOpInstr::kFloat64x2Div;
8248 case MethodRecognizer::kFloat64x2Add:
8249 return SimdOpInstr::kFloat64x2Add;
8250 case MethodRecognizer::kFloat64x2Sub:
8251 return SimdOpInstr::kFloat64x2Sub;
8266 case MethodRecognizer::kFloat32x4Mul:
8267 case MethodRecognizer::kFloat32x4Div:
8268 case MethodRecognizer::kFloat32x4Add:
8269 case MethodRecognizer::kFloat32x4Sub:
8270 case MethodRecognizer::kFloat64x2Mul:
8271 case MethodRecognizer::kFloat64x2Div:
8272 case MethodRecognizer::kFloat64x2Add:
8273 case MethodRecognizer::kFloat64x2Sub:
8276#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64)
8277 case MethodRecognizer::kFloat32x4GreaterThan:
8285 case MethodRecognizer::kFloat32x4GreaterThanOrEqual:
8289 SimdOpInstr(SimdOpInstr::kFloat32x4LessThanOrEqual,
call->deopt_id());
8299 if (receiver !=
nullptr) {
8302 for (intptr_t
i = (receiver !=
nullptr ? 1 : 0);
i < op->
InputCount();
i++) {
8305 if (op->HasMask()) {
8331 return kFloat32x4Add;
8333 return kFloat32x4Sub;
8335 return kFloat32x4Mul;
8337 return kFloat32x4Div;
8346 return kFloat64x2Add;
8348 return kFloat64x2Sub;
8350 return kFloat64x2Mul;
8352 return kFloat64x2Div;
8364 case Token::kBIT_AND:
8365 return kInt32x4BitAnd;
8366 case Token::kBIT_OR:
8367 return kInt32x4BitOr;
8368 case Token::kBIT_XOR:
8369 return kInt32x4BitXor;
8382#define CASE_METHOD(Arity, Mask, Name, ...) \
8383 case MethodRecognizer::k##Name: \
8385#define CASE_BINARY_OP(Arity, Mask, Name, Args, Result)
8388#undef CASE_BINARY_OP
8411 return rep == kUnboxedInt8 ? kUnboxedInt32 : rep;
8415#define REP(T) (SimdRepresentation(kUnboxed##T))
8418#define ENCODE_INPUTS_0()
8419#define ENCODE_INPUTS_1(In0) REP(In0)
8420#define ENCODE_INPUTS_2(In0, In1) REP(In0), REP(In1)
8421#define ENCODE_INPUTS_3(In0, In1, In2) REP(In0), REP(In1), REP(In2)
8422#define ENCODE_INPUTS_4(In0, In1, In2, In3) \
8423 REP(In0), REP(In1), REP(In2), REP(In3)
8426#define HAS_MASK true
8431#define CASE(Arity, Mask, Name, Args, Result) \
8432 {Arity, HAS_##Mask, REP(Result), {PP_APPLY(ENCODE_INPUTS_##Arity, Args)}},
8438#undef ENCODE_INFORMATION
8441#undef ENCODE_INPUTS_0
8442#undef ENCODE_INPUTS_1
8443#undef ENCODE_INPUTS_2
8444#undef ENCODE_INPUTS_3
8445#undef ENCODE_INPUTS_4
8461bool SimdOpInstr::HasMask()
const {
8466 if ((
kind() == SimdOpInstr::kFloat64x2FromDoubles) &&
8467 InputAt(0)->BindsToConstant() &&
InputAt(1)->BindsToConstant()) {
8470 if (
x.IsDouble() &&
y.IsDouble()) {
8477 if ((
kind() == SimdOpInstr::kFloat32x4FromDoubles) &&
8478 InputAt(0)->BindsToConstant() &&
InputAt(1)->BindsToConstant() &&
8479 InputAt(2)->BindsToConstant() &&
InputAt(3)->BindsToConstant()) {
8484 if (
x.IsDouble() &&
y.IsDouble() && z.IsDouble() &&
w.IsDouble()) {
8492 if ((
kind() == SimdOpInstr::kInt32x4FromInts) &&
8493 InputAt(0)->BindsToConstant() &&
InputAt(1)->BindsToConstant() &&
8494 InputAt(2)->BindsToConstant() &&
InputAt(3)->BindsToConstant()) {
8499 if (
x.IsInteger() &&
y.IsInteger() && z.IsInteger() &&
w.IsInteger()) {
8501 Integer::Cast(
x).AsInt64Value(), Integer::Cast(
y).AsInt64Value(),
8502 Integer::Cast(z).AsInt64Value(), Integer::Cast(
w).AsInt64Value(),
8514 const intptr_t kNumInputs = 1;
8515 const intptr_t kNumTemps = 0;
8539 ObjectStore* object_store =
compiler->isolate_group()->object_store();
8543 stub = object_store->clone_suspend_state_stub();
8546 stub = object_store->init_async_stub();
8549 stub = object_store->init_async_star_stub();
8552 stub = object_store->init_sync_star_stub();
8555 stub = object_store->ffi_async_callback_send_stub();
8558 compiler->GenerateStubCall(
source(), stub, UntaggedPcDescriptors::kOther,
8573 const intptr_t kNumTemps = 0;
8588 ObjectStore* object_store =
compiler->isolate_group()->object_store();
8592 stub = object_store->await_stub();
8595 stub = object_store->await_with_type_check_stub();
8598 stub = object_store->yield_async_star_stub();
8601 stub = object_store->suspend_sync_star_at_start_stub();
8604 stub = object_store->suspend_sync_star_at_yield_stub();
8607 compiler->GenerateStubCall(
source(), stub, UntaggedPcDescriptors::kOther,
8610#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32)
8622 UntaggedPcDescriptors::kOther,
locs(),
env());
8628 const intptr_t kNumInputs = 0;
8629 const intptr_t kNumTemps = 0;
8630 LocationSummary*
locs =
new (zone)
8639 compiler->isolate_group()->object_store()->allocate_record_stub());
8642 compiler->GenerateStubCall(
source(), stub, UntaggedPcDescriptors::kOther,
8650 const intptr_t kNumTemps = 0;
8651 LocationSummary*
locs =
new (zone)
8667 auto object_store =
compiler->isolate_group()->object_store();
8669 if (
shape().HasNamedFields()) {
8674 stub = object_store->allocate_record2_named_stub();
8677 stub = object_store->allocate_record3_named_stub();
8685 stub = object_store->allocate_record2_stub();
8688 stub = object_store->allocate_record3_stub();
8694 compiler->GenerateStubCall(
source(), stub, UntaggedPcDescriptors::kOther,
8701 const intptr_t kNumInputs = 2;
8702 const intptr_t kNumTemps = 0;
8703 LocationSummary*
locs =
new (zone)
8723int64_t TestIntInstr::ComputeImmediateMask() {
8724 int64_t mask = Integer::Cast(
locs()->in(1).constant()).AsInt64Value();
8726 switch (representation_) {
static int step(int x, SkScalar min, SkScalar max)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
static void is_empty(skiatest::Reporter *reporter, const SkPath &p)
static float next(float f)
static float prev(float f)
static bool is_integer(SkScalar x)
static void copy(void *dst, const uint8_t *src, int width, int bpp, int deltaSrc, int offset, const SkPMColor ctable[])
#define DEBUG_ASSERT(cond)
#define ASSERT_EQUAL(expected, actual)
#define RELEASE_ASSERT(cond)
bool IsSubtypeOf(const AbstractType &other, Heap::Space space, FunctionTypeMapping *function_type_equivalence=nullptr) const
bool IsTopTypeForSubtyping() const
virtual AbstractTypePtr Canonicalize(Thread *thread) const
bool IsObjectType() const
static bool InstantiateAndTestSubtype(AbstractType *subtype, AbstractType *supertype, const TypeArguments &instantiator_type_args, const TypeArguments &function_type_args)
virtual Definition * Canonicalize(FlowGraph *flow_graph)
virtual intptr_t InputCount() const
bool has_instantiator_type_args() const
@ kInstantiatorTypeArgsPos
virtual Definition * Canonicalize(FlowGraph *flow_graph)
RecordShape shape() const
intptr_t num_fields() const
RecordShape shape() const
virtual intptr_t InputCount() const
classid_t class_id() const
AllocateUninitializedContextInstr(const InstructionSource &source, intptr_t num_context_variables, intptr_t deopt_id)
static bool ParseKind(const char *str, Kind *out)
const String & dst_name() const
virtual intptr_t statistics_tag() const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
virtual Value * RedefinedValue() const
Value * function_type_arguments() const
static const char * KindToCString(Kind kind)
Value * instantiator_type_arguments() const
@ FOR_EACH_ASSERT_ASSIGNABLE_KIND
virtual Value * RedefinedValue() const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Value * function_type_arguments() const
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Value * super_type() const
Value * instantiator_type_arguments() const
void RemoveCurrentFromGraph()
const T & At(intptr_t index) const
void SetAt(intptr_t index, const T &t)
Token::Kind op_kind() const
virtual PRINT_OPERANDS_TO_SUPPORT Definition * Canonicalize(FlowGraph *flow_graph)
virtual Representation representation() const
virtual intptr_t DeoptimizationTarget() const
static const BinaryFeedback * CreateMonomorphic(Zone *zone, intptr_t receiver_cid, intptr_t argument_cid)
BinaryFeedback(Zone *zone)
static const BinaryFeedback * Create(Zone *zone, const ICData &ic_data)
virtual bool ComputeCanDeoptimize() const
static bool IsSupported(Token::Kind op_kind, Value *left, Value *right)
void set_can_overflow(bool overflow)
bool can_overflow() const
bool RightIsNonZero() const
Token::Kind op_kind() const
virtual bool AttributesEqual(const Instruction &other) const
bool RightIsPowerOfTwoConstant() const
static BinaryIntegerOpInstr * Make(Representation representation, Token::Kind op_kind, Value *left, Value *right, intptr_t deopt_id, SpeculativeMode speculative_mode=kGuardInputs)
bool is_truncating() const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
virtual bool ComputeCanDeoptimize() const
Range * right_range() const
bool Contains(intptr_t i) const
BlockEntryInstr * dominator() const
intptr_t NestingDepth() const
void set_preorder_number(intptr_t number)
bool FindOsrEntryAndRelink(GraphEntryInstr *graph_entry, Instruction *parent, BitVector *block_marks)
virtual void ClearPredecessors()=0
ParallelMoveInstr * parallel_move() const
intptr_t preorder_number() const
bool HasParallelMove() const
intptr_t block_id() const
BlockEntryInstr * ImmediateDominator() const
virtual void AddPredecessor(BlockEntryInstr *predecessor)=0
bool Dominates(BlockEntryInstr *other) const
void ReplaceAsPredecessorWith(BlockEntryInstr *new_block)
bool IsLoopHeader() const
void ClearAllInstructions()
void set_last_instruction(Instruction *instr)
intptr_t stack_depth() const
bool DiscoverBlock(BlockEntryInstr *predecessor, GrowableArray< BlockEntryInstr * > *preorder, GrowableArray< intptr_t > *parent)
Instruction * last_instruction() const
GrowableArray< Definition * > * initial_definitions()
static const Bool & False()
static const Bool & Get(bool value)
static const Bool & True()
virtual Definition * Canonicalize(FlowGraph *flow_graph)
static bool IsBootstrapResolver(Dart_NativeEntryResolver resolver)
static void Allocate(FlowGraphCompiler *compiler, Instruction *instruction, const Class &cls, Register result, Register temp)
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
BoxAllocationSlowPath(Instruction *instruction, const Class &cls, Register result)
static BoxInstr * Create(Representation from, Value *value)
Definition * Canonicalize(FlowGraph *flow_graph)
Representation from_representation() const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
virtual Definition * Canonicalize(FlowGraph *flow_graph)
virtual bool ValueFitsSmi() const
Definition * Canonicalize(FlowGraph *flow_graph)
virtual intptr_t SuccessorCount() const
virtual BlockEntryInstr * SuccessorAt(intptr_t index) const
void SetComparison(ComparisonInstr *comp)
ComparisonInstr * comparison() const
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
virtual Representation RequiredInputRepresentation(intptr_t idx) const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
virtual Representation representation() const
CachableIdempotentCallInstr(const InstructionSource &source, Representation representation, const Function &function, intptr_t type_args_len, const Array &argument_names, InputsArray &&arguments, intptr_t deopt_id)
virtual intptr_t ArgumentsSize() const
const Function & function() const
virtual Representation RequiredInputRepresentation(intptr_t idx) const
intptr_t index_scale() const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
StringPtr target_name() const
ArrayPtr arguments_descriptor() const
static const CallTargets * CreateMonomorphic(Zone *zone, intptr_t receiver_cid, const Function &target)
const Function & MostPopularTarget() const
static const CallTargets * Create(Zone *zone, const ICData &ic_data)
TargetInfo * TargetAt(int i) const
bool HasSingleTarget() const
static const CallTargets * CreateAndExpand(Zone *zone, const ICData &ic_data)
intptr_t AggregateCallCount() const
const Function & FirstTarget() const
StaticTypeExactnessState MonomorphicExactness() const
bool HasSingleRecognizedTarget() const
static constexpr Register kSecondReturnReg
static constexpr RegList kVolatileXmmRegisters
static constexpr intptr_t kVolatileCpuRegisters
static constexpr Register kFirstNonArgumentRegister
static constexpr Register kFfiAnyNonAbiRegister
static constexpr Register kReturnReg
static constexpr Register kSecondNonArgumentRegister
const RuntimeEntry & TargetFunction() const
static intptr_t LengthOffsetFor(intptr_t class_id)
static bool IsFixedLengthArrayType(intptr_t class_id)
virtual Value * RedefinedValue() const
bool IsRedundant(bool use_loops=false)
virtual Definition * Canonicalize(FlowGraph *flow_graph)
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
bool IsDeoptIfNull() const
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr, TemplateInstruction, FIELD_LIST) private void EmitBitTest(FlowGraphCompiler *compiler, intptr_t min, intptr_t max, intptr_t mask, compiler::Label *deopt)
void EmitNullCheck(FlowGraphCompiler *compiler, compiler::Label *deopt)
CheckClassInstr(Value *value, intptr_t deopt_id, const Cids &cids, const InstructionSource &source)
const Cids & cids() const
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
virtual bool AttributesEqual(const Instruction &other) const
bool IsDeoptIfNotNull() const
intptr_t ComputeCidMask() const
static bool IsCompactCidRange(const Cids &cids)
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
virtual Value * InputAt(intptr_t i) const
ComparisonInstr * comparison() const
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
const String & function_name() const
static void AddMetadataForRuntimeCall(CheckNullInstr *check_null, FlowGraphCompiler *compiler)
virtual Definition * Canonicalize(FlowGraph *flow_graph)
virtual Value * RedefinedValue() const
virtual bool AttributesEqual(const Instruction &other) const
ExceptionType exception_type() const
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
virtual Value * RedefinedValue() const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
bool MustInclude(intptr_t cid)
CidCheckerForRanges(Thread *thread, ClassTable *table, const Class &cls, bool include_abstract, bool exclude_null)
bool MayInclude(intptr_t cid)
void Sort(int compare(CidRange *const *a, CidRange *const *b))
bool HasClassId(intptr_t cid) const
static Cids * CreateMonomorphic(Zone *zone, intptr_t cid)
intptr_t MonomorphicReceiverCid() const
static Cids * CreateForArgument(Zone *zone, const BinaryFeedback &binary_feedback, int argument_number)
void SetLength(intptr_t len)
intptr_t ComputeLowestCid() const
intptr_t ComputeHighestCid() const
GrowableArray< CidRange * > cid_ranges_
void Add(CidRange *target)
bool Equals(const Cids &other) const
bool IsMonomorphic() const
ClassPtr At(intptr_t cid) const
bool HasValidClassAt(intptr_t cid) const
bool has_dynamically_extendable_subtypes() const
const char * ScrubbedNameCString() const
LibraryPtr library() const
GrowableObjectArrayPtr direct_subclasses() const
ClassPtr SuperClass(ClassTable *class_table=nullptr) const
GrowableObjectArrayPtr direct_implementors() const
intptr_t NumTypeParameters(Thread *thread) const
static bool IsOptimized(CodePtr code)
@ kTagAssertAssignableFromSource
@ kTagAssertAssignableInsertedByFrontend
@ kTagAssertAssignableParameterCheck
virtual void NegateComparison()
intptr_t operation_cid() const
virtual bool AttributesEqual(const Instruction &other) const
virtual Condition EmitComparisonCode(FlowGraphCompiler *compiler, BranchLabels labels)=0
static CompileType FromCid(intptr_t cid)
const AbstractType * ToAbstractType()
static CompilerState & Current()
static bool IsBackgroundCompilation()
static constexpr intptr_t kNoOSRDeoptId
virtual bool AttributesEqual(const Instruction &other) const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
const Object & value() const
ConstantInstr(const Object &value)
static ObjectPtr Unknown()
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
static bool IsArrayLength(Definition *def)
Value * env_use_list() const
void ReplaceWith(Definition *other, ForwardInstructionIterator *iterator)
Value * input_use_list() const
Object & constant_value()
void set_range(const Range &)
virtual Value * RedefinedValue() const
void AddEnvUse(Value *value)
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition * OriginalDefinitionIgnoreBoxingAndConstraints()
void ReplaceUsesWith(Definition *other)
bool HasOnlyInputUse(Value *use) const
void AddInputUse(Value *value)
Definition * OriginalDefinition()
void set_ssa_temp_index(intptr_t index)
void set_input_use_list(Value *head)
bool HasOnlyUse(Value *use) const
void ReplaceWithResult(Instruction *replacement, Definition *replacement_for_uses, ForwardInstructionIterator *iterator)
virtual bool CanReplaceWithConstant() const
ValueListIterable input_uses() const
intptr_t ssa_temp_index() const
void set_env_use_list(Value *head)
static constexpr intptr_t kNone
static intptr_t ToDeoptAfter(intptr_t deopt_id)
virtual Representation representation() const
virtual intptr_t ArgumentsSize() const
const Function & interface_target() const
static DispatchTableCallInstr * FromCall(Zone *zone, const InstanceCallBaseInstr *call, Value *cid, const Function &interface_target, const compiler::TableSelector *selector)
virtual Representation RequiredInputRepresentation(intptr_t idx) const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
const compiler::TableSelector * selector() const
DispatchTableCallInstr(const InstructionSource &source, const Function &interface_target, const compiler::TableSelector *selector, InputsArray &&arguments, intptr_t type_args_len, const Array &argument_names)
virtual Definition * Canonicalize(FlowGraph *flow_graph)
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
virtual Definition * Canonicalize(FlowGraph *flow_graph)
MethodRecognizer::Kind recognized_kind() const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
static DoublePtr NewCanonical(double d)
intptr_t num_temps() const
virtual intptr_t InputCount() const
void DeepCopyToOuter(Zone *zone, Instruction *instr, intptr_t outer_deopt_id) const
void PushValue(Value *value)
intptr_t fixed_parameter_count() const
intptr_t LazyDeoptPruneCount() const
bool LazyDeoptToBeforeDeoptId() const
Value * ValueAt(intptr_t ix) const
void DeepCopyAfterTo(Zone *zone, Instruction *instr, intptr_t argc, Definition *dead, Definition *result) const
void DeepCopyTo(Zone *zone, Instruction *instr) const
Environment * DeepCopy(Zone *zone) const
Environment * outer() const
static Environment * From(Zone *zone, const GrowableArray< Definition * > &definitions, intptr_t fixed_parameter_count, intptr_t lazy_deopt_pruning_count, const ParsedFunction &parsed_function)
Environment(FlowGraphDeserializer *d)
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
EqualityCompareInstr(const InstructionSource &source, Token::Kind kind, Value *left, Value *right, intptr_t cid, intptr_t deopt_id, bool null_aware=false, SpeculativeMode speculative_mode=kGuardInputs)
virtual Definition * Canonicalize(FlowGraph *flow_graph)
bool is_null_aware() const
void set_null_aware(bool value)
static int64_t TruncateTo(int64_t v, Representation r)
static IntegerPtr BitLengthEvaluate(const Object &value, Representation representation, Thread *thread)
static IntegerPtr BinaryIntegerEvaluate(const Object &left, const Object &right, Token::Kind token_kind, bool is_truncating, Representation representation, Thread *thread)
static bool ToIntegerConstant(Value *value, int64_t *result)
static intptr_t GetResultCidOfListFactory(Zone *zone, const Function &function, intptr_t argument_count)
intptr_t CompoundReturnTypedDataIndex() const
void EmitReturnMoves(FlowGraphCompiler *compiler, const Register temp0, const Register temp1)
virtual Representation RequiredInputRepresentation(intptr_t idx) const
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(FfiCallInstr, VariadicDefinition, FIELD_LIST) private void EmitParamMoves(FlowGraphCompiler *compiler, const Register saved_fp, const Register temp0, const Register temp1)
virtual Representation representation() const
intptr_t TargetAddressIndex() const
StaticTypeExactnessState static_type_exactness_state() const
intptr_t guarded_cid() const
intptr_t guarded_list_length() const
AbstractTypePtr type() const
static Float32x4Ptr New(float value0, float value1, float value2, float value3, Heap::Space space=Heap::kNew)
static Float64x2Ptr New(double value0, double value1, Heap::Space space=Heap::kNew)
virtual Definition * Canonicalize(FlowGraph *flow_graph)
static bool LookupMethodFor(int class_id, const String &name, const ArgumentsDescriptor &args_desc, Function *fn_return, bool *class_is_abstract_return=nullptr)
static const CallTargets * ResolveCallTargetsForReceiverCid(intptr_t cid, const String &selector, const Array &args_desc_array)
ForwardInstructionIterator * current_iterator_
virtual void VisitBlocks()
ConstantInstr * GetConstant(const Object &object, Representation representation=kTagged)
bool should_print() const
bool IsCompiledForOsr() const
ConstantInstr * constant_dead() const
static Representation ReturnRepresentationOf(const Function &function)
bool should_remove_all_bounds_checks() const
static intptr_t ComputeArgumentsSizeInWords(const Function &function, intptr_t arguments_count)
static Representation ParameterRepresentationAt(const Function &function, intptr_t index)
ConstantInstr * constant_null() const
const Function & function() const
bool is_licm_allowed() const
bool unmatched_representations_allowed() const
Definition * TryCreateConstantReplacementFor(Definition *op, const Object &value)
bool ExtractExternalUntaggedPayload(Instruction *instr, Value *array, classid_t cid)
void CopyDeoptTarget(Instruction *to, Instruction *from)
void InsertBefore(Instruction *next, Instruction *instr, Environment *env, UseKind use_kind)
void InsertAfter(Instruction *prev, Instruction *instr, Environment *env, UseKind use_kind)
Instruction * Current() const
void RemoveCurrentFromGraph()
MethodRecognizer::Kind recognized_kind() const
AbstractTypePtr result_type() const
virtual Representation RequiredInputRepresentation(intptr_t idx) const
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
virtual Representation representation() const
JoinEntryInstr * successor() const
virtual intptr_t SuccessorCount() const
virtual BlockEntryInstr * SuccessorAt(intptr_t index) const
void RelinkToOsrEntry(Zone *zone, intptr_t max_block_id)
bool IsCompiledForOsr() const
FunctionEntryInstr * normal_entry() const
FunctionEntryInstr * unchecked_entry() const
void set_unchecked_entry(FunctionEntryInstr *target)
void set_normal_entry(FunctionEntryInstr *entry)
virtual BlockEntryInstr * SuccessorAt(intptr_t index) const
CatchBlockEntryInstr * GetCatchEntry(intptr_t index)
ConstantInstr * constant_null()
void set_osr_entry(OsrEntryInstr *entry)
virtual intptr_t SuccessorCount() const
GraphEntryInstr(const ParsedFunction &parsed_function, intptr_t osr_id)
OsrEntryInstr * osr_entry() const
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
virtual bool AttributesEqual(const Instruction &other) const
const Field & field() const
virtual bool AttributesEqual(const Instruction &other) const
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
virtual bool AttributesEqual(const Instruction &other) const
bool InstanceOfHasClassRange(const AbstractType &type, intptr_t *lower_limit, intptr_t *upper_limit)
bool CanUseGenericSubtypeRangeCheckFor(const AbstractType &type)
const CidRangeVector & SubtypeRangesForClass(const Class &klass, bool include_abstract, bool exclude_null)
bool CanUseRecordSubtypeRangeCheckFor(const AbstractType &type)
bool CanUseSubtypeRangeCheckFor(const AbstractType &type)
intptr_t NumArgsTested() const
void GetClassIdsAt(intptr_t index, GrowableArray< intptr_t > *class_ids) const
intptr_t GetCountAt(intptr_t index) const
intptr_t NumberOfChecks() const
static bool Supports(ComparisonInstr *comparison, Value *v1, Value *v2)
ComparisonInstr * comparison() const
void ComputeOffsetTable(FlowGraphCompiler *compiler)
virtual intptr_t SuccessorCount() const
virtual TargetEntryInstr * SuccessorAt(intptr_t index) const
void set_ic_data(const ICData *value)
FunctionPtr ResolveForReceiverClass(const Class &cls, bool allow_add=true)
bool CanReceiverBeSmiBasedOnInterfaceTarget(Zone *zone) const
Code::EntryKind entry_kind() const
const ICData * ic_data() const
virtual Representation RequiredInputRepresentation(intptr_t idx) const
void set_receiver_is_not_smi(bool value)
const Function & interface_target() const
Token::Kind token_kind() const
virtual intptr_t ArgumentsSize() const
bool receiver_is_not_smi() const
void UpdateReceiverSminess(Zone *zone)
virtual Representation representation() const
const String & function_name() const
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InstanceCallInstr, InstanceCallBaseInstr, FIELD_LIST) private const class BinaryFeedback * binary_
const class BinaryFeedback & BinaryFeedback()
PRINT_OPERANDS_TO_SUPPORT bool MatchesCoreName(const String &name)
const CallTargets & Targets()
void EnsureICData(FlowGraph *graph)
intptr_t checked_argument_count() const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Value * type_arguments() const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Value * function_type_arguments() const
const Code & GetStub() const
const AbstractType & type() const
virtual bool AttributesEqual(const Instruction &other) const
virtual void Accept(InstructionVisitor *visitor)=0
Instruction * next() const
virtual intptr_t InputCount() const =0
intptr_t GetDeoptId() const
void set_previous(Instruction *instr)
void SetEnvironment(Environment *deopt_env)
void InheritDeoptTargetAfter(FlowGraph *flow_graph, Definition *call, Definition *result)
void LinkTo(Instruction *next)
void InheritDeoptTarget(Zone *zone, Instruction *other)
virtual Value * InputAt(intptr_t i) const =0
void Goto(JoinEntryInstr *entry)
virtual BlockEntryInstr * SuccessorAt(intptr_t index) const
virtual BlockEntryInstr * GetBlock()
virtual void CopyDeoptIdFrom(const Instruction &instr)
Environment * env() const
virtual LocationSummary * MakeLocationSummary(Zone *zone, bool is_optimizing) const =0
friend class StrictCompareInstr
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
bool HasUnmatchedInputRepresentations() const
const char * ToCString() const
virtual uword Hash() const
Instruction * AppendInstruction(Instruction *tail)
void InitializeLocationSummary(Zone *zone, bool optimizing)
void CheckField(const Field &field) const
virtual Representation RequiredInputRepresentation(intptr_t idx) const
virtual bool MayHaveVisibleEffect() const
virtual intptr_t ArgumentCount() const
void set_next(Instruction *instr)
static const intptr_t kInstructionAttrs[kNumInstructions]
bool IsDominatedBy(Instruction *dom)
bool Equals(const Instruction &other) const
static const ICData * GetICData(const ZoneGrowableArray< const ICData * > &ic_data_array, intptr_t deopt_id, bool is_static_call)
Definition * ArgumentAt(intptr_t index) const
void Unsupported(FlowGraphCompiler *compiler)
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
virtual Representation representation() const
bool CanDeoptimize() const
void RepairArgumentUsesInEnvironment() const
void ReplaceInEnvironment(Definition *current, Definition *replacement)
Location::Kind RegisterKindForResult() const
virtual Tag tag() const =0
void SetInputAt(intptr_t i, Value *value)
InstructionSource source() const
Value * ArgumentValueAt(intptr_t index) const
virtual bool has_inlining_id() const
intptr_t deopt_id() const
void InsertAfter(Instruction *prev)
virtual intptr_t SuccessorCount() const
Instruction * RemoveFromGraph(bool return_previous=true)
SpeculativeMode SpeculativeModeOfInputs() const
virtual MoveArgumentsArray * GetMoveArguments() const
virtual bool CanTriggerGC() const
Instruction * previous() const
static LocationSummary * MakeCallSummary(Zone *zone, const Instruction *instr, LocationSummary *locs=nullptr)
static Int32x4Ptr New(int32_t value0, int32_t value1, int32_t value2, int32_t value3, Heap::Space space=Heap::kNew)
IntConverterInstr(Representation from, Representation to, Value *value, intptr_t deopt_id)
virtual Representation representation() const
bool is_truncating() const
virtual bool ComputeCanDeoptimize() const
Representation to() const
Definition * Canonicalize(FlowGraph *flow_graph)
Representation from() const
static IntegerPtr New(const String &str, Heap::Space space=Heap::kNew)
static IntegerPtr NewCanonical(const String &str)
InvokeMathCFunctionInstr(InputsArray &&inputs, intptr_t deopt_id, MethodRecognizer::Kind recognized_kind, const InstructionSource &source)
const RuntimeEntry & TargetFunction() const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
static intptr_t ArgumentCountFor(MethodRecognizer::Kind recognized_kind_)
intptr_t optimization_counter_threshold() const
ObjectStore * object_store() const
static IsolateGroup * Current()
ClassTable * class_table() const
void RemoveDeadPhis(Definition *replacement)
PhiInstr * InsertPhi(intptr_t var_index, intptr_t var_count)
virtual void AddPredecessor(BlockEntryInstr *predecessor)
intptr_t IndexOfPredecessor(BlockEntryInstr *pred) const
GrowableArray< BlockEntryInstr * > predecessors_
void RemovePhi(PhiInstr *phi)
virtual intptr_t PredecessorCount() const
virtual Representation RequiredInputRepresentation(intptr_t idx) const
intptr_t TargetAddressIndex() const
void EmitParamMoves(FlowGraphCompiler *compiler, Register saved_fp, Register temp0)
LocationSummary * MakeLocationSummaryInternal(Zone *zone, const RegList temps) const
virtual bool MayCreateUnsafeUntaggedPointer() const
static LeafRuntimeCallInstr * Make(Zone *zone, Representation return_representation, const ZoneGrowableArray< Representation > &argument_representations, InputsArray &&inputs)
Dart_NativeEntryResolver native_entry_resolver() const
static bool IsPrivateCoreLibName(const String &name, const String &member)
virtual Definition * Canonicalize(FlowGraph *flow_graph)
virtual Representation representation() const
bool IsImmutableLengthLoad() const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
void set_loads_inner_pointer(InnerPointerAccess value)
const Slot & slot() const
virtual bool MayCreateUnsafeUntaggedPointer() const
bool IsImmutableLoad() const
static bool IsUnmodifiableTypedDataViewFactory(const Function &function)
InnerPointerAccess loads_inner_pointer() const
bool Evaluate(const Object &instance_value, Object *result)
static bool IsFixedLengthArrayCid(intptr_t cid)
virtual bool AttributesEqual(const Instruction &other) const
virtual Representation representation() const
static bool TryEvaluateLoad(const Object &instance, const Field &field, Object *result)
static bool IsTypedDataViewFactory(const Function &function)
bool MayCreateUntaggedAlias() const
intptr_t class_id() const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
static Representation ReturnRepresentation(intptr_t array_cid)
LoadIndexedInstr(Value *array, Value *index, bool index_unboxed, intptr_t index_scale, intptr_t class_id, AlignmentType alignment, intptr_t deopt_id, const InstructionSource &source, CompileType *result_type=nullptr)
intptr_t index_scale() const
virtual bool AllowsCSE() const
const Field & field() const
virtual bool AttributesEqual(const Instruction &other) const
virtual Representation representation() const
Location temp(intptr_t index) const
Location out(intptr_t index) const
static LocationSummary * Make(Zone *zone, intptr_t input_count, Location out, ContainsCall contains_call)
void set_temp(intptr_t index, Location loc)
intptr_t temp_count() const
RegisterSet * live_registers()
void set_out(intptr_t index, Location loc)
Location in(intptr_t index) const
void set_in(intptr_t index, Location loc)
static Location NoLocation()
static Location SameAsFirstInput()
static Location Pair(Location first, Location second)
intptr_t ToStackSlotOffset() const
const char * ToCString() const
intptr_t stack_index() const
Register base_reg() const
static Location RegisterLocation(Register reg)
PairLocation * AsPairLocation() const
static Location RequiresRegister()
bool IsPairLocation() const
static Location RequiresFpuRegister()
FpuRegister fpu_reg() const
const Object & constant() const
static Location Constant(const ConstantInstr *obj, int pair_index=0)
BlockEntryInstr * header() const
intptr_t NestingDepth() const
void RemapRegisters(intptr_t *cpu_reg_slots, intptr_t *fpu_reg_slots)
const Location & LocationAt(intptr_t i)
intptr_t result_cid() const
MethodRecognizer::Kind op_kind() const
virtual bool AttributesEqual(const Instruction &other) const
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MemoryCopyInstr, TemplateInstruction, FIELD_LIST) private void EmitUnrolledCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, intptr_t num_elements, bool reversed)
Value * src_start() const
void EmitLoopCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, Register length_reg, compiler::Label *done, compiler::Label *copy_forwards=nullptr)
void PrepareLengthRegForLoop(FlowGraphCompiler *compiler, Register length_reg, compiler::Label *done)
Value * dest_start() const
static intptr_t ResultCidFromPragma(const Object &function_or_field)
static intptr_t NumArgsCheckedForStaticCall(const Function &function)
static const char * KindToCString(Kind kind)
static intptr_t ParameterCountForResolution(const Function &function)
void set_is_bootstrap_native(bool value)
const String & native_name() const
virtual TokenPosition token_pos() const
void set_is_auto_scope(bool value)
bool is_bootstrap_native() const
const Function & function() const
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(NativeEntryInstr, FunctionEntryInstr, FIELD_LIST) private void SaveArgument(FlowGraphCompiler *compiler, const compiler::ffi::NativeLocation &loc) const
static NativeFunction ResolveNative(const Library &library, const String &function_name, int number_of_arguments, bool *auto_setup_scope)
virtual Representation representation() const
virtual PRINT_OPERANDS_TO_SUPPORT Representation RequiredInputRepresentation(intptr_t idx) const
intptr_t GetClassId() const
bool Contains(uword addr) const
bool InVMIsolateHeap() const
virtual const char * ToCString() const
static Object & ZoneHandle()
Location At(intptr_t i) const
const Function & function() const
JoinEntryInstr * block() const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition * GetReplacementForRedundantPhi() const
PhiInstr * Current() const
void RemoveCurrentFromGraph()
virtual intptr_t CallCount() const
bool IsSureToCallSingleRecognizedTarget() const
static TypePtr ComputeRuntimeType(const CallTargets &targets)
virtual Definition * Canonicalize(FlowGraph *graph)
bool HasOnlyDispatcherOrImplicitAccessorTargets() const
const CallTargets & targets() const
intptr_t total_call_count()
static RangeBoundary FromConstant(int64_t val)
static bool IsSingleton(Range *range)
static bool Overlaps(Range *range, intptr_t min, intptr_t max)
static bool Fits(Range *range, RangeBoundary::RangeSize size)
static bool IsWithin(const Range *range, int64_t min, int64_t max)
static bool IsPositive(Range *range)
static bool CanBeZero(Range *range)
bool IsWithin(int64_t min_int, int64_t max_int) const
static bool IsUnknown(const Range *other)
bool Fits(RangeBoundary::RangeSize size) const
bool Overlaps(int64_t min_int, int64_t max_int) const
intptr_t catch_try_index() const
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
AbstractTypePtr FieldTypeAt(intptr_t index) const
intptr_t NumFields() const
intptr_t num_fields() const
ObjectPtr FieldAt(intptr_t field_index) const
virtual Value * RedefinedValue() const
CompileType * constrained_type() const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
void Remove(Location loc)
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
RelationalOpInstr(const InstructionSource &source, Token::Kind kind, Value *left, Value *right, intptr_t cid, intptr_t deopt_id, SpeculativeMode speculative_mode=kGuardInputs)
static void MessageF(Kind kind, const Script &script, TokenPosition token_pos, bool report_after_token, const char *format,...) PRINTF_ATTRIBUTE(5
static constexpr bool AtLocation
static FunctionPtr ResolveDynamicAnyArgs(Zone *zone, const Class &receiver_class, const String &function_name, bool allow_add)
static FunctionPtr ResolveDynamicForReceiverClass(const Class &receiver_class, const String &function_name, const ArgumentsDescriptor &args_desc, bool allow_add)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ShiftIntegerOpInstr, BinaryIntegerOpInstr, FIELD_LIST) protected bool IsShiftCountInRange(int64_t max=kShiftCountLimit) const
Range * shift_range() const
virtual Representation RequiredInputRepresentation(intptr_t idx) const
virtual intptr_t InputCount() const
virtual Value * InputAt(intptr_t i) const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
static SimdOpInstr * CreateFromCall(Zone *zone, MethodRecognizer::Kind kind, Definition *receiver, Instruction *call, intptr_t mask=0)
static SimdOpInstr * CreateFromFactoryCall(Zone *zone, MethodRecognizer::Kind kind, Instruction *call)
static Kind KindForMethod(MethodRecognizer::Kind method_kind)
static Kind KindForOperator(MethodRecognizer::Kind kind)
virtual Representation representation() const
const Field & field() const
Representation representation() const
intptr_t offset_in_bytes() const
bool is_compressed() const
compiler::Label * entry_label()
compiler::Label * exit_label()
static SmiPtr New(intptr_t value)
static intptr_t RawValue(intptr_t value)
static bool IsValid(int64_t value)
const ICData * ic_data() const
virtual Representation RequiredInputRepresentation(intptr_t idx) const
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StaticCallInstr, TemplateDartCall, FIELD_LIST) private const class BinaryFeedback * binary_
bool Evaluate(FlowGraph *flow_graph, const Object &argument, Object *result)
virtual intptr_t ArgumentsSize() const
static StaticCallInstr * FromCall(Zone *zone, const C *call, const Function &target, intptr_t call_count)
const Function & function() const
const class BinaryFeedback & BinaryFeedback()
void SetResultType(Zone *zone, CompileType new_type)
void set_is_known_list_constructor(bool value)
Code::EntryKind entry_kind() const
virtual Representation representation() const
bool InitResultType(Zone *zone)
const CallTargets & Targets()
virtual Definition * Canonicalize(FlowGraph *flow_graph)
bool IsTriviallyExact() const
bool NeedsFieldGuard() const
static StaticTypeExactnessState NotTracking()
InnerPointerAccess stores_inner_pointer() const
virtual Representation RequiredInputRepresentation(intptr_t index) const
void set_stores_inner_pointer(InnerPointerAccess value)
bool ShouldEmitStoreBarrier() const
bool is_initialization() const
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StoreFieldInstr, TemplateInstruction, FIELD_LIST) private intptr_t OffsetInBytes() const
compiler::Assembler::CanBeSmi CanValueBeSmi() const
const Slot & slot() const
virtual Representation RequiredInputRepresentation(intptr_t idx) const
StoreIndexedInstr(Value *array, Value *index, Value *value, StoreBarrierType emit_store_barrier, bool index_unboxed, intptr_t index_scale, intptr_t class_id, AlignmentType alignment, intptr_t deopt_id, const InstructionSource &source, SpeculativeMode speculative_mode=kGuardInputs)
intptr_t class_id() const
static Representation ValueRepresentation(intptr_t array_cid)
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
intptr_t index_scale() const
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StrictCompareInstr, TemplateComparison, FIELD_LIST) private bool TryEmitBoolTest(FlowGraphCompiler *compiler, BranchLabels labels, intptr_t input_index, const Object &obj, Condition *condition_out)
virtual Definition * Canonicalize(FlowGraph *flow_graph)
bool AttributesEqual(const Instruction &other) const
bool needs_number_check() const
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
bool Equals(const String &str) const
static StringPtr New(const char *cstr, Heap::Space space=Heap::kNew)
static CodePtr GetAllocationStubForClass(const Class &cls)
static CodePtr GetAllocationStubForTypedData(classid_t class_id)
SubtypeFinder(Zone *zone, GrowableArray< intptr_t > *cids, bool include_abstract)
void ScanImplementorClasses(const Class &klass)
bool has_type_args() const
intptr_t resume_deopt_id() const
@ kSuspendSyncStarAtYield
@ kSuspendSyncStarAtStart
Value * type_args() const
virtual PRINT_OPERANDS_TO_SUPPORT Definition * Canonicalize(FlowGraph *flow_graph)
static const String & True()
static const String & False()
static StringPtr FromConcatAll(Thread *thread, const GrowableHandlePtrArray< const String > &strs)
static StringPtr New(Thread *thread, const char *cstr)
static bool double_truncate_round_supported()
virtual Value * InputAt(intptr_t i) const
intptr_t type_args_len() const
const Array & argument_names() const
intptr_t ArgumentCount() const
intptr_t FirstArgIndex() const
intptr_t ArgumentCountWithoutTypeArgs() const
ArrayPtr GetArgumentsDescriptor() const
virtual bool MayThrow() const
bool calls_initializer() const
bool throw_exception_on_initialization() const
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Instruction * instruction() const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
virtual bool AttributesEqual(const Instruction &other) const
TestCidsInstr(const InstructionSource &source, Token::Kind kind, Value *value, const ZoneGrowableArray< intptr_t > &cid_results, intptr_t deopt_id)
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
const ZoneGrowableArray< intptr_t > & cid_results() const
TestIntInstr(const InstructionSource &source, Token::Kind kind, Representation representation, Value *left, Value *right)
static bool IsSupported(Representation representation)
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
TestRangeInstr(const InstructionSource &source, Value *value, uword lower, uword upper, Representation value_representation)
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
virtual bool AttributesEqual(const Instruction &other) const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
IsolateGroup * isolate_group() const
static Thread * Current()
IsolateGroup * isolate_group() const
static Token::Kind NegateComparison(Token::Kind op)
static intptr_t OutputIndexOf(Token::Kind token)
TruncDivModInstr(Value *lhs, Value *rhs, intptr_t deopt_id)
AbstractTypePtr TypeAt(intptr_t index) const
static TypePtr StringType()
static TypePtr NullableNumber()
static TypePtr DartTypeType()
intptr_t ElementSizeInBytes() const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
static UnaryIntegerOpInstr * Make(Representation representation, Token::Kind op_kind, Value *value, intptr_t deopt_id, SpeculativeMode speculative_mode, Range *range)
Token::Kind op_kind() const
Definition * Canonicalize(FlowGraph *flow_graph)
virtual Representation representation() const
void set_speculative_mode(SpeculativeMode value)
virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const
static UnboxInstr * Create(Representation to, Value *value, intptr_t deopt_id, SpeculativeMode speculative_mode=kGuardInputs)
bool is_truncating() const
virtual bool ComputeCanDeoptimize() const
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition * Canonicalize(FlowGraph *flow_graph)
DECLARE_ATTRIBUTES_NAMED(("value", "representation"),(&value(), representation())) private uword constant_address_
UnboxedConstantInstr(const Object &value, Representation representation)
bool IsScanFlagsUnboxed() const
static bool IsInt(intptr_t N, T value)
static constexpr int CountOneBitsWord(uword x)
static constexpr T Maximum(T x, T y)
static constexpr int ShiftForPowerOfTwo(T x)
static T Minimum(T x, T y)
static constexpr bool IsPowerOfTwo(T x)
void BindToEnvironment(Definition *definition)
void set_use_index(intptr_t index)
bool BindsToConstantNull() const
bool BindsToConstant() const
void set_previous_use(Value *previous)
bool CanBe(const Object &value)
static void AddToList(Value *value, Value **list)
bool Equals(const Value &other) const
intptr_t BoundSmiConstant() const
bool BindsToSmiConstant() const
Instruction * instruction() const
void set_next_use(Value *next)
Value * previous_use() const
const Object & BoundConstant() const
void set_definition(Definition *definition)
Value * CopyWithType(Zone *zone)
Definition * definition() const
void BindTo(Definition *definition)
Value(Definition *definition)
void RefineReachingType(CompileType *type)
void set_instruction(Instruction *instruction)
intptr_t InputCount() const
Value * InputAt(intptr_t i) const
ElementType * Alloc(intptr_t length)
void static bool EmittingComments()
const NativeLocation & Rebase(const NativeLocation &loc) const
const NativeLocations & locations() const
static const NativeCallingConvention & FromSignature(Zone *zone, const NativeFunctionType &signature)
const NativeLocation & return_location() const
const NativeLocations & argument_locations() const
intptr_t StackTopInBytes() const
static const NativeFunctionType * FromRepresentations(Zone *zone, Representation return_representation, const ZoneGrowableArray< Representation > &argument_representations)
virtual bool IsMultiple() const
virtual bool IsFpuRegisters() const
virtual bool IsPointerToMemory() const
virtual bool IsBoth() const
const MultipleNativeLocations & AsMultiple() const
virtual bool IsStack() const
const NativeRegistersLocation & AsRegisters() const
const PointerToMemoryLocation & AsPointerToMemory() const
NativeLocation & WidenTo4Bytes(Zone *zone) const
virtual bool IsRegisters() const
const BothNativeLocations & AsBoth() const
const NativeType & payload_type() const
intptr_t num_regs() const
const NativeLocation & pointer_location() const
static word length_offset()
static word element_offset(intptr_t index)
static word OffsetOf(const dart::Field &field)
static word length_offset()
static word InstanceSize()
static word value_offset()
static intptr_t field_index_at_offset(intptr_t offset_in_bytes)
static word length_offset()
static word shared_field_table_values_offset()
static word length_offset()
#define THR_Print(format,...)
Dart_NativeFunction(* Dart_NativeEntryResolver)(Dart_Handle name, int num_of_arguments, bool *auto_setup_scope)
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
Dart_NativeFunction function
#define HANDLESCOPE(thread)
static float max(float r, float g, float b)
static float min(float r, float g, float b)
#define DEFINE_ACCEPT(ShortName, Attrs)
#define INSTR_ATTRS(type, attrs)
#define BOXING_IN_SET_CASE(unboxed, boxed)
#define FOR_EACH_NON_INT_BOXED_REPRESENTATION(M)
#define CASE_BINARY_OP(Arity, Mask, Name, Args, Result)
#define BOXING_CID_CASE(unboxed, boxed)
#define CASE(Arity, Mask, Name, Args, Result)
#define BOXING_VALUE_OFFSET_CASE(unboxed, boxed)
#define CASE_METHOD(Arity, Mask, Name,...)
#define FOR_EACH_INSTRUCTION(M)
#define SIMD_OP_LIST(M, BINARY_OP)
SK_API sk_sp< SkDocument > Make(SkWStream *dst, const SkSerialProcs *=nullptr, std::function< void(const SkPicture *)> onEndPage=nullptr)
bool Contains(const Container &container, const Value &value)
const intptr_t kResultIndex
word ToRawSmi(const dart::Object &a)
static constexpr intptr_t kWordSize
static constexpr word kBitsPerWord
constexpr intptr_t kSmiBits
constexpr OperandSize kWordBytes
static constexpr int kExitLinkSlotFromEntryFp
static AbstractTypePtr InstantiateType(const AbstractType &type, const AbstractType &instantiator)
bool IsTypedDataViewClassId(intptr_t index)
Location LocationRegisterOrConstant(Value *value)
static int64_t RepresentationMask(Representation r)
static Definition * CanonicalizeStrictCompare(StrictCompareInstr *compare, bool *negated, bool is_branch)
uword FindDoubleConstant(double value)
IntegerPtr DoubleToInteger(Zone *zone, double val)
static Condition InvertCondition(Condition c)
static Definition * CanonicalizeCommutativeDoubleArithmetic(Token::Kind op, Value *left, Value *right)
static bool BindsToGivenConstant(Value *v, intptr_t expected)
bool IsTypedDataBaseClassId(intptr_t index)
static constexpr const char * kNone
static const Representation kUnboxedBool
static constexpr Representation kUnboxedUword
static bool MayBeNumber(CompileType *type)
static const SimdOpInfo simd_op_information[]
static bool MayBeBoxableNumber(intptr_t cid)
constexpr int32_t kMinInt32
const Register kWriteBarrierValueReg
DART_EXPORT bool IsNull(Dart_Handle object)
bool IsTypeClassId(intptr_t index)
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
constexpr intptr_t kIntptrMin
static constexpr int kSavedCallerFpSlotFromFp
bool IsUnmodifiableTypedDataViewClassId(intptr_t index)
constexpr intptr_t kBitsPerByte
MallocGrowableArray< CidRangeValue > CidRangeVector
static int OrderByFrequencyThenId(CidRange *const *a, CidRange *const *b)
void RegisterTypeArgumentsUse(const Function &function, TypeUsageInfo *type_usage_info, const Class &klass, Definition *type_arguments)
static int OrderById(CidRange *const *a, CidRange *const *b)
static bool RecognizeTestPattern(Value *left, Value *right, bool *negate)
static bool IsMarked(BlockEntryInstr *block, GrowableArray< BlockEntryInstr * > *preorder)
Location LocationRemapForSlowPath(Location loc, Definition *def, intptr_t *cpu_reg_slots, intptr_t *fpu_reg_slots)
static AlignmentType StrengthenAlignment(intptr_t cid, AlignmentType alignment)
static constexpr int kCallerSpSlotFromFp
static Definition * CanonicalizeStringInterpolate(StaticCallInstr *call, FlowGraph *flow_graph)
bool IsExternalPayloadClassId(classid_t cid)
constexpr intptr_t kInt32Size
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
constexpr intptr_t kBitsPerInt32
static Definition * CanonicalizeStringInterpolateSingle(StaticCallInstr *call, FlowGraph *flow_graph)
static bool IsFpCompare(ComparisonInstr *comp)
DEFINE_BACKEND(LoadThread,(Register out))
uint32_t FinalizeHash(uint32_t hash, intptr_t hashbits=kBitsPerInt32)
static constexpr Representation kUnboxedAddress
static bool IsSingleUseUnboxOrConstant(Value *use)
static intptr_t RepresentationBits(Representation r)
constexpr int32_t kMaxInt32
static const String & EvaluateToString(Zone *zone, Definition *defn)
static bool IsConstant(Definition *def, int64_t *val)
static const intptr_t kMaxElementSizeForEfficientCopy
bool IsIntegerClassId(intptr_t index)
constexpr bool FLAG_target_memory_sanitizer
static constexpr Representation SimdRepresentation(Representation rep)
const char *const function_name
static int8_t data[kExtLength]
void(* NativeFunction)(NativeArguments *arguments)
static intptr_t Usage(Thread *thread, const Function &function)
static ScaleFactor ToScaleFactor(intptr_t index_scale, bool index_unboxed)
static bool IsCommutative(Token::Kind op)
static constexpr intptr_t kInvalidTryIndex
Location LocationRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
ArrayOfTuplesView< MegamorphicCache::EntryType, std::tuple< Smi, Object > > MegamorphicCacheEntries
constexpr intptr_t kBitsPerInt64
bool IsExternalTypedDataClassId(intptr_t index)
static FunctionPtr FindBinarySmiOp(Zone *zone, const String &name)
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
constexpr intptr_t kIntptrMax
bool IsStringClassId(intptr_t index)
static bool AllInputsAreRedefinitions(PhiInstr *phi)
static CodePtr TwoArgsSmiOpInlineCacheEntry(Token::Kind kind)
DECLARE_FLAG(bool, show_invisible_frames)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
static DecodeResult decode(std::string path)
int compare(const void *untyped_lhs, const void *untyped_rhs)
static SkString join(const CommandLineFlags::StringArray &)
static constexpr Register kResultReg
static constexpr Register kFunctionReg
static constexpr Register kContextReg
static constexpr Register kResultReg
static constexpr Register kInstantiatorTypeArgsReg
static constexpr Register kShapeReg
static constexpr Register kResultReg
static constexpr Register kResultReg
static constexpr Register kShapeReg
static constexpr Register kValue2Reg
static constexpr Register kValue0Reg
static constexpr Register kValue1Reg
static constexpr Register kLengthReg
static constexpr Register kResultReg
static constexpr Register kObjectReg
static constexpr Register kSubTypeReg
static constexpr Register kSuperTypeReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kDstNameReg
static bool RequiresAllocation(Representation rep)
static bool Supports(Representation rep)
static constexpr Representation NativeRepresentation(Representation rep)
static intptr_t BoxCid(Representation rep)
static intptr_t ValueOffset(Representation rep)
bool IsIllegalRange() const
static constexpr Register kSourceReg
static constexpr Register kClassIdReg
static constexpr Register kResultReg
static constexpr Register kRecognizedKindReg
static constexpr FpuRegister kInputReg
static constexpr Register kArgsReg
static constexpr Register kFieldReg
static constexpr Register kResultReg
static constexpr Register kInstanceReg
static constexpr Register kResultReg
static constexpr Register kFieldReg
static constexpr Register kTypeArgsReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kTypeReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kResultTypeReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kUninstantiatedTypeArgumentsReg
static constexpr Register kResultTypeArgumentsReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kFieldReg
static constexpr Register kLengthReg
static constexpr Register kIndexReg
static constexpr Register kStackTraceReg
static constexpr Register kExceptionReg
static constexpr size_t ValueSize(Representation rep)
static constexpr bool IsUnboxedInteger(Representation rep)
static bool IsRepresentable(Representation rep, int64_t value)
static int64_t MaxValue(Representation rep)
static compiler::OperandSize OperandSize(Representation rep)
static int64_t MinValue(Representation rep)
static constexpr bool IsUnboxed(Representation rep)
static const char * ToCString(Representation rep)
static Representation RepresentationOfArrayElement(classid_t cid)
static constexpr intptr_t kResumePcDistance
static constexpr Register kArgumentReg
static constexpr Register kTypeArgsReg
StaticTypeExactnessState exactness
static constexpr Register kExceptionReg