Flutter Engine
The Flutter Engine
il.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6
7#include "platform/assert.h"
8#include "platform/globals.h"
9#include "vm/bit_vector.h"
10#include "vm/bootstrap.h"
11#include "vm/code_entry_kind.h"
34#include "vm/constants.h"
35#include "vm/cpu.h"
36#include "vm/dart_entry.h"
37#include "vm/object.h"
38#include "vm/object_store.h"
39#include "vm/os.h"
41#include "vm/resolver.h"
42#include "vm/runtime_entry.h"
43#include "vm/scopes.h"
44#include "vm/stack_frame.h"
45#include "vm/stub_code.h"
46#include "vm/symbols.h"
48
50
51namespace dart {
52
54 propagate_ic_data,
55 true,
56 "Propagate IC data from unoptimized to optimized IC calls.");
58 two_args_smi_icd,
59 true,
60 "Generate special IC stubs for two args Smi operations");
61
62DECLARE_FLAG(bool, inline_alloc);
63DECLARE_FLAG(bool, use_slow_path);
64
66 public:
69 bool include_abstract)
70 : array_handles_(zone),
71 class_handles_(zone),
72 cids_(cids),
73 include_abstract_(include_abstract) {}
74
75 void ScanImplementorClasses(const Class& klass) {
76 // An implementor of [klass] is
77 // * the [klass] itself.
78 // * all implementors of the direct subclasses of [klass].
79 // * all implementors of the direct implementors of [klass].
80 if (include_abstract_ || !klass.is_abstract()) {
81 cids_->Add(klass.id());
82 }
83
84 ScopedHandle<GrowableObjectArray> array(&array_handles_);
85 ScopedHandle<Class> subclass_or_implementor(&class_handles_);
86
87 *array = klass.direct_subclasses();
88 if (!array->IsNull()) {
89 for (intptr_t i = 0; i < array->Length(); ++i) {
90 *subclass_or_implementor ^= (*array).At(i);
91 ScanImplementorClasses(*subclass_or_implementor);
92 }
93 }
94 *array = klass.direct_implementors();
95 if (!array->IsNull()) {
96 for (intptr_t i = 0; i < array->Length(); ++i) {
97 *subclass_or_implementor ^= (*array).At(i);
98 ScanImplementorClasses(*subclass_or_implementor);
99 }
100 }
101 }
102
103 private:
105 ReusableHandleStack<Class> class_handles_;
107 const bool include_abstract_;
108};
109
111 const Class& klass,
112 bool include_abstract,
113 bool exclude_null) {
115 const intptr_t cid_count = table->NumCids();
116 std::unique_ptr<CidRangeVector[]>* cid_ranges = nullptr;
117 if (include_abstract) {
118 cid_ranges = exclude_null ? &cid_subtype_ranges_abstract_nonnullable_
119 : &cid_subtype_ranges_abstract_nullable_;
120 } else {
121 cid_ranges = exclude_null ? &cid_subtype_ranges_nonnullable_
122 : &cid_subtype_ranges_nullable_;
123 }
124 if (*cid_ranges == nullptr) {
125 cid_ranges->reset(new CidRangeVector[cid_count]);
126 }
127 CidRangeVector& ranges = (*cid_ranges)[klass.id()];
128 if (ranges.length() == 0) {
129 BuildRangesFor(table, &ranges, klass, include_abstract, exclude_null);
130 }
131 return ranges;
132}
133
135 public:
138 const Class& cls,
139 bool include_abstract,
140 bool exclude_null)
141 : thread_(thread),
142 table_(table),
143 supertype_(AbstractType::Handle(zone(), cls.RareType())),
144 include_abstract_(include_abstract),
145 exclude_null_(exclude_null),
146 to_check_(Class::Handle(zone())),
147 subtype_(AbstractType::Handle(zone())) {}
148
149 bool MayInclude(intptr_t cid) {
150 if (!table_->HasValidClassAt(cid)) return true;
151 if (cid == kTypeArgumentsCid) return true;
152 if (cid == kVoidCid) return true;
153 if (cid == kDynamicCid) return true;
154 if (cid == kNeverCid) return true;
155 if (!exclude_null_ && cid == kNullCid) return true;
156 to_check_ = table_->At(cid);
157 ASSERT(!to_check_.IsNull());
158 if (!include_abstract_ && to_check_.is_abstract()) return true;
159 return to_check_.IsTopLevel();
160 }
161
162 bool MustInclude(intptr_t cid) {
164 if (cid == kNullCid) return false;
165 to_check_ = table_->At(cid);
166 subtype_ = to_check_.RareType();
167 // Create local zone because deep hierarchies may allocate lots of handles.
168 StackZone stack_zone(thread_);
169 HANDLESCOPE(thread_);
170 return subtype_.IsSubtypeOf(supertype_, Heap::kNew);
171 }
172
173 private:
174 Zone* zone() const { return thread_->zone(); }
175
176 Thread* const thread_;
177 ClassTable* const table_;
178 const AbstractType& supertype_;
179 const bool include_abstract_;
180 const bool exclude_null_;
181 Class& to_check_;
182 AbstractType& subtype_;
183};
184
185// Build the ranges either for:
186// "<obj> as <Type>", or
187// "<obj> is <Type>"
188void HierarchyInfo::BuildRangesUsingClassTableFor(ClassTable* table,
189 CidRangeVector* ranges,
190 const Class& klass,
191 bool include_abstract,
192 bool exclude_null) {
193 CidCheckerForRanges checker(thread(), table, klass, include_abstract,
194 exclude_null);
195 // Iterate over all cids to find the ones to be included in the ranges.
196 const intptr_t cid_count = table->NumCids();
197 intptr_t start = -1;
198 intptr_t end = -1;
199 for (intptr_t cid = kInstanceCid; cid < cid_count; ++cid) {
200 // Some cases are "don't care", i.e., they may or may not be included,
201 // whatever yields the least number of ranges for efficiency.
202 if (checker.MayInclude(cid)) continue;
203 if (checker.MustInclude(cid)) {
204 // On success, open a new or continue any open range.
205 if (start == -1) start = cid;
206 end = cid;
207 } else if (start != -1) {
208 // On failure, close any open range from start to end
209 // (the latter is the most recent succesful "do-care" cid).
210 ranges->Add({start, end});
211 start = end = -1;
212 }
213 }
214
215 // Construct last range if there is a open one.
216 if (start != -1) {
217 ranges->Add({start, end});
218 }
219}
220
221void HierarchyInfo::BuildRangesFor(ClassTable* table,
222 CidRangeVector* ranges,
223 const Class& dst_klass,
224 bool include_abstract,
225 bool exclude_null) {
226 // Use the class table in cases where the direct subclasses and implementors
227 // are not filled out.
228 if (dst_klass.InVMIsolateHeap() || dst_klass.id() == kInstanceCid) {
229 BuildRangesUsingClassTableFor(table, ranges, dst_klass, include_abstract,
230 exclude_null);
231 return;
232 }
233
234 Zone* zone = thread()->zone();
235 GrowableArray<intptr_t> cids;
236 SubtypeFinder finder(zone, &cids, include_abstract);
237 {
238 SafepointReadRwLocker ml(thread(),
239 thread()->isolate_group()->program_lock());
240 finder.ScanImplementorClasses(dst_klass);
241 }
242 if (cids.is_empty()) return;
243
244 // Sort all collected cids.
245 intptr_t* cids_array = cids.data();
246
247 qsort(cids_array, cids.length(), sizeof(intptr_t),
248 [](const void* a, const void* b) {
249 return static_cast<int>(*static_cast<const intptr_t*>(a) -
250 *static_cast<const intptr_t*>(b));
251 });
252
253 // Build ranges of all the cids.
254 CidCheckerForRanges checker(thread(), table, dst_klass, include_abstract,
255 exclude_null);
256 intptr_t left_cid = -1;
257 intptr_t right_cid = -1;
258 intptr_t previous_cid = -1;
259 for (intptr_t i = 0; i < cids.length(); ++i) {
260 const intptr_t current_cid = cids[i];
261 if (current_cid == previous_cid) continue; // Skip duplicates.
262
263 // We sorted, after all!
264 RELEASE_ASSERT(previous_cid < current_cid);
265
266 if (left_cid != -1) {
267 ASSERT(previous_cid != -1);
268 // Check the cids between the previous cid from cids and this one.
269 for (intptr_t j = previous_cid + 1; j < current_cid; ++j) {
270 // Stop if we find a do-care class before reaching the current cid.
271 if (!checker.MayInclude(j)) {
272 ranges->Add({left_cid, right_cid});
273 left_cid = right_cid = -1;
274 break;
275 }
276 }
277 }
278 previous_cid = current_cid;
279
280 if (checker.MayInclude(current_cid)) continue;
281 if (checker.MustInclude(current_cid)) {
282 if (left_cid == -1) {
283 // Open a new range starting at this cid.
284 left_cid = current_cid;
285 }
286 right_cid = current_cid;
287 } else if (left_cid != -1) {
288 // Close the existing range.
289 ranges->Add({left_cid, right_cid});
290 left_cid = right_cid = -1;
291 }
292 }
293
294 // If there is an open cid-range which we haven't finished yet, we'll
295 // complete it.
296 if (left_cid != -1) {
297 ranges->Add(CidRange{left_cid, right_cid});
298 }
299}
300
302 ASSERT(type.IsFinalized());
303
304 if (!type.IsInstantiated() || !type.IsType()) {
305 return false;
306 }
307
308 // The FutureOr<T> type cannot be handled by checking whether the instance is
309 // a subtype of FutureOr and then checking whether the type argument `T`
310 // matches.
311 //
312 // Instead we would need to perform multiple checks:
313 //
314 // instance is Null || instance is T || instance is Future<T>
315 //
316 if (type.IsFutureOrType()) {
317 return false;
318 }
319
320 Zone* zone = thread()->zone();
321 const Class& type_class = Class::Handle(zone, type.type_class());
322 if (type_class.has_dynamically_extendable_subtypes()) {
323 return false;
324 }
325
326 // We can use class id range checks only if we don't have to test type
327 // arguments.
328 //
329 // This is e.g. true for "String" but also for "List<dynamic>". (A type for
330 // which the type arguments vector is instantiated to bounds is known as a
331 // rare type.)
332 if (type_class.IsGeneric()) {
333 const Type& rare_type = Type::Handle(zone, type_class.RareType());
334 if (!rare_type.IsSubtypeOf(type, Heap::kNew)) {
335 ASSERT(Type::Cast(type).arguments() != TypeArguments::null());
336 return false;
337 }
338 }
339
340 return true;
341}
342
344 const AbstractType& type) {
345 ASSERT(type.IsFinalized());
346
347 if (!type.IsType() || type.IsDartFunctionType()) {
348 return false;
349 }
350
351 // The FutureOr<T> type cannot be handled by checking whether the instance is
352 // a subtype of FutureOr and then checking whether the type argument `T`
353 // matches.
354 //
355 // Instead we would need to perform multiple checks:
356 //
357 // instance is Null || instance is T || instance is Future<T>
358 //
359 if (type.IsFutureOrType()) {
360 return false;
361 }
362
363 // NOTE: We do allow non-instantiated types here (in comparison to
364 // [CanUseSubtypeRangeCheckFor], since we handle type parameters in the type
365 // expression in some cases (see below).
366
367 Zone* zone = thread()->zone();
368 const Class& type_class = Class::Handle(zone, type.type_class());
369 const intptr_t num_type_parameters = type_class.NumTypeParameters();
370 if (type_class.has_dynamically_extendable_subtypes()) {
371 return false;
372 }
373
374 // This function should only be called for generic classes.
375 ASSERT(type_class.NumTypeParameters() > 0 &&
376 Type::Cast(type).arguments() != TypeArguments::null());
377
378 const TypeArguments& ta =
379 TypeArguments::Handle(zone, Type::Cast(type).arguments());
380 ASSERT(ta.Length() == num_type_parameters);
381
382 // Ensure we can handle all type arguments
383 // via [CidRange]-based checks or that it is a type parameter.
384 AbstractType& type_arg = AbstractType::Handle(zone);
385 for (intptr_t i = 0; i < num_type_parameters; ++i) {
386 type_arg = ta.TypeAt(i);
387 if (!CanUseSubtypeRangeCheckFor(type_arg) && !type_arg.IsTypeParameter()) {
388 return false;
389 }
390 }
391
392 return true;
393}
394
396 ASSERT(type.IsFinalized());
397 if (!type.IsRecordType()) {
398 return false;
399 }
400 const RecordType& rec = RecordType::Cast(type);
401 Zone* zone = thread()->zone();
402 auto& field_type = AbstractType::Handle(zone);
403 for (intptr_t i = 0, n = rec.NumFields(); i < n; ++i) {
404 field_type = rec.FieldTypeAt(i);
405 if (!CanUseSubtypeRangeCheckFor(field_type)) {
406 return false;
407 }
408 }
409 return true;
410}
411
413 intptr_t* lower_limit,
414 intptr_t* upper_limit) {
415 ASSERT(CompilerState::Current().is_aot());
416 if (type.IsNullable()) {
417 // 'is' test for nullable types should accept null cid in addition to the
418 // class range. In most cases it is not possible to extend class range to
419 // include kNullCid.
420 return false;
421 }
423 const Class& type_class =
424 Class::Handle(thread()->zone(), type.type_class());
425 const CidRangeVector& ranges =
426 SubtypeRangesForClass(type_class,
427 /*include_abstract=*/false,
428 /*exclude_null=*/true);
429 if (ranges.length() == 1) {
430 const CidRangeValue& range = ranges[0];
431 ASSERT(!range.IsIllegalRange());
432 *lower_limit = range.cid_start;
433 *upper_limit = range.cid_end;
434 return true;
435 }
436 }
437 return false;
438}
439
440// The set of supported non-integer unboxed representations.
441// Format: (unboxed representations suffix, boxed class type)
442#define FOR_EACH_NON_INT_BOXED_REPRESENTATION(M) \
443 M(Double, Double) \
444 M(Float, Double) \
445 M(Float32x4, Float32x4) \
446 M(Float64x2, Float64x2) \
447 M(Int32x4, Int32x4)
448
449#define BOXING_IN_SET_CASE(unboxed, boxed) \
450 case kUnboxed##unboxed: \
451 return true;
452#define BOXING_VALUE_OFFSET_CASE(unboxed, boxed) \
453 case kUnboxed##unboxed: \
454 return compiler::target::boxed::value_offset();
455#define BOXING_CID_CASE(unboxed, boxed) \
456 case kUnboxed##unboxed: \
457 return k##boxed##Cid;
458
461 return true;
462 }
463 switch (rep) {
465 default:
466 return false;
467 }
468}
469
474 }
475 return true;
476}
477
481 RepresentationUtils::ValueSize(rep) <= sizeof(int64_t)) {
483 }
484 switch (rep) {
486 default:
487 UNREACHABLE();
488 return 0;
489 }
490}
491
492// Note that not all boxes require allocation (e.g., Smis).
495 if (!Boxing::RequiresAllocation(rep)) {
496 return kSmiCid;
497 } else if (RepresentationUtils::ValueSize(rep) <= sizeof(int64_t)) {
498 return kMintCid;
499 }
500 }
501 switch (rep) {
503 default:
504 UNREACHABLE();
505 return kIllegalCid;
506 }
507}
508
509#undef BOXING_CID_CASE
510#undef BOXING_VALUE_OFFSET_CASE
511#undef BOXING_IN_SET_CASE
512#undef FOR_EACH_NON_INT_BOXED_REPRESENTATION
513
514#if defined(DEBUG)
515void Instruction::CheckField(const Field& field) const {
516 DEBUG_ASSERT(field.IsNotTemporaryScopedHandle());
518}
519#endif // DEBUG
520
521// A value in the constant propagation lattice.
522// - non-constant sentinel
523// - a constant (any non-sentinel value)
524// - unknown sentinel
526 if (constant_value_ == nullptr) {
528 }
529 return *constant_value_;
530}
531
533 Definition* defn = this;
534 Value* unwrapped;
535 while ((unwrapped = defn->RedefinedValue()) != nullptr) {
536 defn = unwrapped->definition();
537 }
538 return defn;
539}
540
542 return nullptr;
543}
544
546 return value();
547}
548
550 return value();
551}
552
554 return value();
555}
556
558 return index();
559}
560
562 return value();
563}
564
566 return value();
567}
568
570 Definition* def = this;
571 while (true) {
572 Definition* orig;
573 if (def->IsConstraint() || def->IsBox() || def->IsUnbox() ||
574 def->IsIntConverter() || def->IsFloatToDouble() ||
575 def->IsDoubleToFloat()) {
576 orig = def->InputAt(0)->definition();
577 } else {
578 orig = def->OriginalDefinition();
579 }
580 if (orig == def) return def;
581 def = orig;
582 }
583}
584
586 if (def != nullptr) {
588 ->AsLoadField()) {
589 return load->IsImmutableLengthLoad();
590 }
591 }
592 return false;
593}
594
596 const ZoneGrowableArray<const ICData*>& ic_data_array,
597 intptr_t deopt_id,
598 bool is_static_call) {
599 // The deopt_id can be outside the range of the IC data array for
600 // computations added in the optimizing compiler.
602 if (deopt_id >= ic_data_array.length()) {
603 return nullptr;
604 }
605 const ICData* result = ic_data_array[deopt_id];
606 ASSERT(result == nullptr || is_static_call == result->is_static_call());
607 return result;
608}
609
611 uword result = tag();
612 for (intptr_t i = 0; i < InputCount(); ++i) {
613 Value* value = InputAt(i);
614 result = CombineHashes(result, value->definition()->ssa_temp_index());
615 }
616 return FinalizeHash(result, kBitsPerInt32 - 1);
617}
618
619bool Instruction::Equals(const Instruction& other) const {
620 if (tag() != other.tag()) return false;
621 if (InputCount() != other.InputCount()) return false;
622 for (intptr_t i = 0; i < InputCount(); ++i) {
623 if (!InputAt(i)->Equals(*other.InputAt(i))) return false;
624 }
625 return AttributesEqual(other);
626}
627
629 compiler->Bailout(ToCString());
630 UNREACHABLE();
631}
632
633bool Value::Equals(const Value& other) const {
634 return definition() == other.definition();
635}
636
637static int OrderById(CidRange* const* a, CidRange* const* b) {
638 // Negative if 'a' should sort before 'b'.
639 ASSERT((*a)->IsSingleCid());
640 ASSERT((*b)->IsSingleCid());
641 return (*a)->cid_start - (*b)->cid_start;
642}
643
644static int OrderByFrequencyThenId(CidRange* const* a, CidRange* const* b) {
645 const TargetInfo* target_info_a = static_cast<const TargetInfo*>(*a);
646 const TargetInfo* target_info_b = static_cast<const TargetInfo*>(*b);
647 // Negative if 'a' should sort before 'b'.
648 if (target_info_b->count != target_info_a->count) {
649 return (target_info_b->count - target_info_a->count);
650 } else {
651 return (*a)->cid_start - (*b)->cid_start;
652 }
653}
654
655bool Cids::Equals(const Cids& other) const {
656 if (length() != other.length()) return false;
657 for (int i = 0; i < length(); i++) {
658 if (cid_ranges_[i]->cid_start != other.cid_ranges_[i]->cid_start ||
659 cid_ranges_[i]->cid_end != other.cid_ranges_[i]->cid_end) {
660 return false;
661 }
662 }
663 return true;
664}
665
666intptr_t Cids::ComputeLowestCid() const {
667 intptr_t min = kIntptrMax;
668 for (intptr_t i = 0; i < cid_ranges_.length(); ++i) {
669 min = Utils::Minimum(min, cid_ranges_[i]->cid_start);
670 }
671 return min;
672}
673
674intptr_t Cids::ComputeHighestCid() const {
675 intptr_t max = -1;
676 for (intptr_t i = 0; i < cid_ranges_.length(); ++i) {
677 max = Utils::Maximum(max, cid_ranges_[i]->cid_end);
678 }
679 return max;
680}
681
682bool Cids::HasClassId(intptr_t cid) const {
683 for (int i = 0; i < length(); i++) {
684 if (cid_ranges_[i]->Contains(cid)) {
685 return true;
686 }
687 }
688 return false;
689}
690
692 Cids* cids = new (zone) Cids(zone);
693 cids->Add(new (zone) CidRange(cid, cid));
694 return cids;
695}
696
698 const BinaryFeedback& binary_feedback,
699 int argument_number) {
700 Cids* cids = new (zone) Cids(zone);
701 for (intptr_t i = 0; i < binary_feedback.feedback_.length(); i++) {
702 ASSERT((argument_number == 0) || (argument_number == 1));
703 const intptr_t cid = argument_number == 0
704 ? binary_feedback.feedback_[i].first
705 : binary_feedback.feedback_[i].second;
706 cids->Add(new (zone) CidRange(cid, cid));
707 }
708
709 if (cids->length() != 0) {
710 cids->Sort(OrderById);
711
712 // Merge adjacent class id ranges.
713 int dest = 0;
714 for (int src = 1; src < cids->length(); src++) {
715 if (cids->cid_ranges_[dest]->cid_end + 1 >=
716 cids->cid_ranges_[src]->cid_start) {
717 cids->cid_ranges_[dest]->cid_end = cids->cid_ranges_[src]->cid_end;
718 } else {
719 dest++;
720 if (src != dest) cids->cid_ranges_[dest] = cids->cid_ranges_[src];
721 }
722 }
723 cids->SetLength(dest + 1);
724 }
725
726 return cids;
727}
728
729static intptr_t Usage(Thread* thread, const Function& function) {
730 intptr_t count = function.usage_counter();
731 if (count < 0) {
732 if (function.HasCode()) {
733 // 'function' is queued for optimized compilation
735 } else {
736 count = 0;
737 }
738 } else if (Code::IsOptimized(function.CurrentCode())) {
739 // 'function' was optimized and stopped counting
741 }
742 return count;
743}
744
745void CallTargets::CreateHelper(Zone* zone, const ICData& ic_data) {
746 Function& dummy = Function::Handle(zone);
747
748 const intptr_t num_args_tested = ic_data.NumArgsTested();
749
750 for (int i = 0, n = ic_data.NumberOfChecks(); i < n; i++) {
751 if (ic_data.GetCountAt(i) == 0) {
752 continue;
753 }
754
755 intptr_t id = kDynamicCid;
756 if (num_args_tested == 0) {
757 } else if (num_args_tested == 1) {
758 ic_data.GetOneClassCheckAt(i, &id, &dummy);
759 } else {
760 ASSERT(num_args_tested == 2);
761 GrowableArray<intptr_t> arg_ids;
762 ic_data.GetCheckAt(i, &arg_ids, &dummy);
763 id = arg_ids[0];
764 }
765 Function& function = Function::ZoneHandle(zone, ic_data.GetTargetAt(i));
766 intptr_t count = ic_data.GetCountAt(i);
767 cid_ranges_.Add(new (zone) TargetInfo(id, id, &function, count,
768 ic_data.GetExactnessAt(i)));
769 }
770
771 if (ic_data.is_megamorphic()) {
772 ASSERT(num_args_tested == 1); // Only 1-arg ICData will turn megamorphic.
773 const String& name = String::Handle(zone, ic_data.target_name());
774 const Array& descriptor =
775 Array::Handle(zone, ic_data.arguments_descriptor());
776 Thread* thread = Thread::Current();
777
778 const auto& cache = MegamorphicCache::Handle(
779 zone, MegamorphicCacheTable::Lookup(thread, name, descriptor));
780 {
781 SafepointMutexLocker ml(thread->isolate_group()->type_feedback_mutex());
782 MegamorphicCacheEntries entries(Array::Handle(zone, cache.buckets()));
783 for (intptr_t i = 0, n = entries.Length(); i < n; i++) {
784 const intptr_t id =
785 Smi::Value(entries[i].Get<MegamorphicCache::kClassIdIndex>());
786 if (id == kIllegalCid) {
787 continue;
788 }
789 Function& function = Function::ZoneHandle(zone);
791 const intptr_t filled_entry_count = cache.filled_entry_count();
792 ASSERT(filled_entry_count > 0);
793 cid_ranges_.Add(new (zone) TargetInfo(
794 id, id, &function, Usage(thread, function) / filled_entry_count,
796 }
797 }
798 }
799}
800
802 if (length() != 1) return false;
803 return cid_ranges_[0]->IsSingleCid();
804}
805
808 return cid_ranges_[0]->cid_start;
809}
810
813 return TargetAt(0)->exactness;
814}
815
817 switch (kind) {
818#define KIND_CASE(name) \
819 case k##name: \
820 return #name;
822#undef KIND_CASE
823 default:
824 UNREACHABLE();
825 return nullptr;
826 }
827}
828
830#define KIND_CASE(name) \
831 if (strcmp(str, #name) == 0) { \
832 *out = Kind::k##name; \
833 return true; \
834 }
836#undef KIND_CASE
837 return false;
838}
839
841 intptr_t deopt_id,
842 const Cids& cids,
844 : TemplateInstruction(source, deopt_id),
845 cids_(cids),
846 is_bit_test_(IsCompactCidRange(cids)),
847 token_pos_(source.token_pos) {
848 // Expected useful check data.
849 const intptr_t number_of_checks = cids.length();
850 ASSERT(number_of_checks > 0);
851 SetInputAt(0, value);
852 // Otherwise use CheckSmiInstr.
853 ASSERT(number_of_checks != 1 || !cids[0].IsSingleCid() ||
854 cids[0].cid_start != kSmiCid);
855}
856
858 auto const other_check = other.AsCheckClass();
859 ASSERT(other_check != nullptr);
860 return cids().Equals(other_check->cids());
861}
862
864 if (!cids().IsMonomorphic()) {
865 return false;
866 }
867 CompileType* in_type = value()->Type();
868 const intptr_t cid = cids().MonomorphicReceiverCid();
869 // Performance check: use CheckSmiInstr instead.
870 ASSERT(cid != kSmiCid);
871 return in_type->is_nullable() && (in_type->ToNullableCid() == cid);
872}
873
874// Null object is a singleton of null-class (except for some sentinel,
875// transitional temporaries). Instead of checking against the null class only
876// we can check against null instance instead.
878 if (!cids().IsMonomorphic()) {
879 return false;
880 }
881 const intptr_t cid = cids().MonomorphicReceiverCid();
882 return cid == kNullCid;
883}
884
886 const intptr_t number_of_checks = cids.length();
887 // If there are only two checks, the extra register pressure needed for the
888 // dense-cid-range code is not justified.
889 if (number_of_checks <= 2) return false;
890
891 // TODO(fschneider): Support smis in dense cid checks.
892 if (cids.HasClassId(kSmiCid)) return false;
893
894 intptr_t min = cids.ComputeLowestCid();
895 intptr_t max = cids.ComputeHighestCid();
897}
898
900 return is_bit_test_;
901}
902
904 ASSERT(IsBitTest());
905 const uintptr_t one = 1;
906 intptr_t min = cids_.ComputeLowestCid();
907 intptr_t mask = 0;
908 for (intptr_t i = 0; i < cids_.length(); ++i) {
909 uintptr_t run;
910 uintptr_t range = one + cids_[i].Extent();
911 if (range >= static_cast<uintptr_t>(compiler::target::kBitsPerWord)) {
912 run = -1;
913 } else {
914 run = (one << range) - 1;
915 }
916 mask |= run << (cids_[i].cid_start - min);
917 }
918 return mask;
919}
920
922 return slot().representation();
923}
924
927 intptr_t num_context_variables,
928 intptr_t deopt_id)
929 : TemplateAllocation(source, deopt_id),
930 num_context_variables_(num_context_variables) {
931 // This instruction is not used in AOT for code size reasons.
932 ASSERT(!CompilerState::Current().is_aot());
933}
934
936 if (!HasUses()) return nullptr;
937 // Remove AllocateContext if it is only used as an object in StoreField
938 // instructions.
939 if (env_use_list() != nullptr) return this;
940 for (auto use : input_uses()) {
941 auto store = use->instruction()->AsStoreField();
942 if ((store == nullptr) ||
943 (use->use_index() != StoreFieldInstr::kInstancePos)) {
944 return this;
945 }
946 }
947 // Cleanup all StoreField uses.
948 while (input_use_list() != nullptr) {
950 }
951 return nullptr;
952}
953
955 if (!HasUses()) return nullptr;
956 return this;
957}
958
960 bool opt) const {
961 const intptr_t kNumInputs = InputCount();
962 const intptr_t kNumTemps = 0;
963 LocationSummary* locs = new (zone)
964 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
973 }
975 return locs;
976}
977
978void AllocateClosureInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
979 auto object_store = compiler->isolate_group()->object_store();
980 Code& stub = Code::ZoneHandle(compiler->zone());
982 if (is_generic()) {
983 stub = object_store->allocate_closure_ta_generic_stub();
984 } else {
985 stub = object_store->allocate_closure_ta_stub();
986 }
987 } else {
988 if (is_generic()) {
989 stub = object_store->allocate_closure_generic_stub();
990 } else {
991 stub = object_store->allocate_closure_stub();
992 }
993 }
994 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
995 locs(), deopt_id(), env());
996}
997
998LocationSummary* AllocateTypedDataInstr::MakeLocationSummary(Zone* zone,
999 bool opt) const {
1000 const intptr_t kNumInputs = 1;
1001 const intptr_t kNumTemps = 0;
1002 LocationSummary* locs = new (zone)
1003 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
1006 locs->set_out(
1008 return locs;
1009}
1010
1011void AllocateTypedDataInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1012 const Code& stub = Code::ZoneHandle(
1014 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
1015 locs(), deopt_id(), env());
1016}
1017
1019 intptr_t index) const {
1020 if (index == 0) {
1021 return slot_.has_untagged_instance() ? kUntagged : kTagged;
1022 }
1023 ASSERT_EQUAL(index, 1);
1024 return slot().representation();
1025}
1026
1028 // Dart objects are allocated null-initialized, which means we can eliminate
1029 // all initializing stores which store null value.
1030 // Context objects can be allocated uninitialized as a performance
1031 // optimization in JIT mode - however in AOT mode we always allocate them
1032 // null initialized.
1033 if (is_initialization_ && !slot().has_untagged_instance() &&
1034 slot().representation() == kTagged &&
1035 (!slot().IsContextSlot() ||
1036 !instance()->definition()->IsAllocateUninitializedContext()) &&
1037 value()->BindsToConstantNull()) {
1038 return nullptr;
1039 }
1040
1041 if (slot().kind() == Slot::Kind::kPointerBase_data &&
1043 const intptr_t cid = instance()->Type()->ToNullableCid();
1044 // Pointers and ExternalTypedData objects never contain inner pointers.
1045 if (cid == kPointerCid || IsExternalTypedDataClassId(cid)) {
1047 }
1048 }
1049 return this;
1050}
1051
1053 return field().ptr() == other.AsGuardFieldClass()->field().ptr();
1054}
1055
1057 return field().ptr() == other.AsGuardFieldLength()->field().ptr();
1058}
1059
1061 return field().ptr() == other.AsGuardFieldType()->field().ptr();
1062}
1063
1065 // If all inputs needed to check instantiation are constant, instantiate the
1066 // sub and super type and remove the instruction if the subtype test succeeds.
1067 if (super_type()->BindsToConstant() && sub_type()->BindsToConstant() &&
1068 instantiator_type_arguments()->BindsToConstant() &&
1069 function_type_arguments()->BindsToConstant()) {
1070 auto Z = Thread::Current()->zone();
1071 const auto& constant_instantiator_type_args =
1073 ? TypeArguments::null_type_arguments()
1074 : TypeArguments::Cast(
1075 instantiator_type_arguments()->BoundConstant());
1076 const auto& constant_function_type_args =
1078 ? TypeArguments::null_type_arguments()
1079 : TypeArguments::Cast(function_type_arguments()->BoundConstant());
1080 auto& constant_sub_type = AbstractType::Handle(
1081 Z, AbstractType::Cast(sub_type()->BoundConstant()).ptr());
1082 auto& constant_super_type = AbstractType::Handle(
1083 Z, AbstractType::Cast(super_type()->BoundConstant()).ptr());
1084
1086 &constant_sub_type, &constant_super_type,
1087 constant_instantiator_type_args, constant_function_type_args)) {
1088 return nullptr;
1089 }
1090 }
1091 return this;
1092}
1093
1095 auto const other_op = other.AsStrictCompare();
1096 ASSERT(other_op != nullptr);
1097 return ComparisonInstr::AttributesEqual(other) &&
1098 (needs_number_check() == other_op->needs_number_check());
1099}
1100
1102 return handle_surrogates_ ? kCaseInsensitiveCompareUTF16RuntimeEntry
1103 : kCaseInsensitiveCompareUCS2RuntimeEntry;
1104}
1105
1107 auto const other_op = other.AsMathMinMax();
1108 ASSERT(other_op != nullptr);
1109 return (op_kind() == other_op->op_kind()) &&
1110 (result_cid() == other_op->result_cid());
1111}
1112
1114 ASSERT(other.tag() == tag());
1115 auto const other_op = other.AsBinaryIntegerOp();
1116 return (op_kind() == other_op->op_kind()) &&
1117 (can_overflow() == other_op->can_overflow()) &&
1118 (is_truncating() == other_op->is_truncating());
1119}
1120
1122 auto const other_load = other.AsLoadField();
1123 ASSERT(other_load != nullptr);
1124 return &this->slot_ == &other_load->slot_;
1125}
1126
1128 ASSERT(AllowsCSE());
1129 return field().ptr() == other.AsLoadStaticField()->field().ptr();
1130}
1131
1134 : TemplateDefinition(source), value_(value), token_pos_(source.token_pos) {
1135 // Check that the value is not an incorrect Integer representation.
1136 ASSERT(!value.IsMint() || !Smi::IsValid(Mint::Cast(value).AsInt64Value()));
1137 // Check that clones of fields are not stored as constants.
1138 ASSERT(!value.IsField() || Field::Cast(value).IsOriginal());
1139 // Check that all non-Smi objects are heap allocated and in old space.
1140 ASSERT(value.IsSmi() || value.IsOld());
1141#if defined(DEBUG)
1142 // Generally, instances in the flow graph should be canonical. Smis, null
1143 // values, and sentinel values are canonical by construction and so we skip
1144 // them here.
1145 if (!value.IsNull() && !value.IsSmi() && value.IsInstance() &&
1146 !value.IsCanonical() && (value.ptr() != Object::sentinel().ptr())) {
1147 // Arrays in ConstantInstrs are usually immutable and canonicalized, but
1148 // the Arrays created as backing for ArgumentsDescriptors may not be
1149 // canonicalized for space reasons when inlined in the IL. However, they
1150 // are still immutable.
1151 //
1152 // IRRegExp compilation uses TypeData non-canonical values as "constants".
1153 // Specifically, the bit tables used for certain character classes are
1154 // represented as TypedData, and so those values are also neither immutable
1155 // (as there are no immutable TypedData values) or canonical.
1156 //
1157 // LibraryPrefixes are also never canonicalized since their equality is
1158 // their identity.
1159 ASSERT(value.IsArray() || value.IsTypedData() || value.IsLibraryPrefix());
1160 }
1161#endif
1162}
1163
1165 auto const other_constant = other.AsConstant();
1166 ASSERT(other_constant != nullptr);
1167 return (value().ptr() == other_constant->value().ptr() &&
1168 representation() == other_constant->representation());
1169}
1170
1172 Representation representation)
1174 representation_(representation),
1175 constant_address_(0) {
1176 if (representation_ == kUnboxedDouble) {
1177 ASSERT(value.IsDouble());
1179 }
1180}
1181
1182// Returns true if the value represents a constant.
1184 return definition()->OriginalDefinition()->IsConstant();
1185}
1186
1187bool Value::BindsToConstant(ConstantInstr** constant_defn) const {
1188 if (auto constant = definition()->OriginalDefinition()->AsConstant()) {
1189 *constant_defn = constant;
1190 return true;
1191 }
1192 return false;
1193}
1194
1195// Returns true if the value represents constant null.
1197 ConstantInstr* constant = definition()->OriginalDefinition()->AsConstant();
1198 return (constant != nullptr) && constant->value().IsNull();
1199}
1200
1203 ConstantInstr* constant = definition()->OriginalDefinition()->AsConstant();
1204 ASSERT(constant != nullptr);
1205 return constant->value();
1206}
1207
1209 return BindsToConstant() && BoundConstant().IsSmi();
1210}
1211
1212intptr_t Value::BoundSmiConstant() const {
1214 return Smi::Cast(BoundConstant()).Value();
1215}
1216
1218 intptr_t osr_id)
1219 : GraphEntryInstr(parsed_function,
1220 osr_id,
1221 CompilerState::Current().GetNextDeoptId()) {}
1222
1224 intptr_t osr_id,
1225 intptr_t deopt_id)
1228 deopt_id,
1229 /*stack_depth*/ 0),
1230 parsed_function_(parsed_function),
1231 catch_entries_(),
1232 indirect_entries_(),
1233 osr_id_(osr_id),
1234 entry_count_(0),
1235 spill_slot_count_(0),
1236 fixed_slot_count_(0) {}
1237
1240 for (intptr_t i = 0; i < initial_definitions()->length(); ++i) {
1241 ConstantInstr* defn = (*initial_definitions())[i]->AsConstant();
1242 if (defn != nullptr && defn->value().IsNull()) return defn;
1243 }
1244 UNREACHABLE();
1245 return nullptr;
1246}
1247
1249 // TODO(fschneider): Sort the catch entries by catch_try_index to avoid
1250 // searching.
1251 for (intptr_t i = 0; i < catch_entries_.length(); ++i) {
1252 if (catch_entries_[i]->catch_try_index() == index) return catch_entries_[i];
1253 }
1254 return nullptr;
1255}
1256
1258 return osr_id_ != Compiler::kNoOSRDeoptId;
1259}
1260
1261// ==== Support for visiting flow graphs.
1262
1263#define DEFINE_ACCEPT(ShortName, Attrs) \
1264 void ShortName##Instr::Accept(InstructionVisitor* visitor) { \
1265 visitor->Visit##ShortName(this); \
1266 }
1267
1269
1270#undef DEFINE_ACCEPT
1271
1273 intptr_t use_index = 0;
1274 for (Environment::DeepIterator it(deopt_env); !it.Done(); it.Advance()) {
1275 Value* use = it.CurrentValue();
1276 use->set_instruction(this);
1277 use->set_use_index(use_index++);
1278 }
1279 env_ = deopt_env;
1280}
1281
1283 for (Environment::DeepIterator it(env()); !it.Done(); it.Advance()) {
1284 it.CurrentValue()->RemoveFromUseList();
1285 }
1286 env_ = nullptr;
1287}
1288
1290 Definition* replacement) {
1291 for (Environment::DeepIterator it(env()); !it.Done(); it.Advance()) {
1292 Value* use = it.CurrentValue();
1293 if (use->definition() == current) {
1294 use->RemoveFromUseList();
1295 use->set_definition(replacement);
1296 replacement->AddEnvUse(use);
1297 }
1298 }
1299}
1300
1302 ASSERT(!IsBlockEntry());
1303 ASSERT(!IsBranch());
1304 ASSERT(!IsThrow());
1305 ASSERT(!IsReturnBase());
1306 ASSERT(!IsReThrow());
1307 ASSERT(!IsGoto());
1308 ASSERT(previous() != nullptr);
1309 // We cannot assert that the instruction, if it is a definition, has no
1310 // uses. This function is used to remove instructions from the graph and
1311 // reinsert them elsewhere (e.g., hoisting).
1312 Instruction* prev_instr = previous();
1313 Instruction* next_instr = next();
1314 ASSERT(next_instr != nullptr);
1315 ASSERT(!next_instr->IsBlockEntry());
1316 prev_instr->LinkTo(next_instr);
1318 // Reset the successor and previous instruction to indicate that the
1319 // instruction is removed from the graph.
1320 set_previous(nullptr);
1321 set_next(nullptr);
1322 return return_previous ? prev_instr : next_instr;
1323}
1324
1326 ASSERT(previous_ == nullptr);
1327 ASSERT(next_ == nullptr);
1328 previous_ = prev;
1329 next_ = prev->next_;
1330 next_->previous_ = this;
1331 previous_->next_ = this;
1332
1333 // Update def-use chains whenever instructions are added to the graph
1334 // after initial graph construction.
1335 for (intptr_t i = InputCount() - 1; i >= 0; --i) {
1336 Value* input = InputAt(i);
1337 input->definition()->AddInputUse(input);
1338 }
1339}
1340
1342 LinkTo(tail);
1343 // Update def-use chains whenever instructions are added to the graph
1344 // after initial graph construction.
1345 for (intptr_t i = tail->InputCount() - 1; i >= 0; --i) {
1346 Value* input = tail->InputAt(i);
1347 input->definition()->AddInputUse(input);
1348 }
1349 return tail;
1350}
1351
1353 // TODO(fschneider): Implement a faster way to get the block of an
1354 // instruction.
1356 while ((result != nullptr) && !result->IsBlockEntry()) {
1357 result = result->previous();
1358 }
1359 // InlineExitCollector::RemoveUnreachableExits may call
1360 // Instruction::GetBlock on instructions which are not properly linked
1361 // to the flow graph (as collected exits may belong to unreachable
1362 // fragments), so this code should gracefully handle the absence of
1363 // BlockEntry.
1364 return (result != nullptr) ? result->AsBlockEntry() : nullptr;
1365}
1366
1368 current_ = current_->RemoveFromGraph(true); // Set current_ to previous.
1369}
1370
1372 current_ = current_->RemoveFromGraph(false); // Set current_ to next.
1373}
1374
1375// Default implementation of visiting basic blocks. Can be overridden.
1377 ASSERT(current_iterator_ == nullptr);
1378 for (intptr_t i = 0; i < block_order_->length(); ++i) {
1379 BlockEntryInstr* entry = (*block_order_)[i];
1380 entry->Accept(this);
1382 current_iterator_ = &it;
1383 for (; !it.Done(); it.Advance()) {
1384 it.Current()->Accept(this);
1385 }
1386 current_iterator_ = nullptr;
1387 }
1388}
1389
1391 Value* value = this;
1392 do {
1393 if (value->Type()->IsNull() ||
1394 (value->Type()->ToNullableCid() == kSmiCid) ||
1395 (value->Type()->ToNullableCid() == kBoolCid)) {
1396 return false;
1397 }
1398
1399 // Strictly speaking, the incremental barrier can only be skipped for
1400 // immediate objects (Smis) or permanent objects (vm-isolate heap or
1401 // image pages). For AOT, we choose to skip the barrier for any constant on
1402 // the assumptions it will remain reachable through the object pool and it
1403 // is on a page created by snapshot loading that is marked so as to never be
1404 // evacuated.
1405 if (value->BindsToConstant()) {
1406 if (FLAG_precompiled_mode) {
1407 return false;
1408 } else {
1409 const Object& constant = value->BoundConstant();
1410 return constant.ptr()->IsHeapObject() && !constant.InVMIsolateHeap();
1411 }
1412 }
1413
1414 // Follow the chain of redefinitions as redefined value could have a more
1415 // accurate type (for example, AssertAssignable of Smi to a generic T).
1416 value = value->definition()->RedefinedValue();
1417 } while (value != nullptr);
1418
1419 return true;
1420}
1421
1423 // Require the predecessors to be sorted by block_id to make managing
1424 // their corresponding phi inputs simpler.
1425 intptr_t pred_id = predecessor->block_id();
1426 intptr_t index = 0;
1427 while ((index < predecessors_.length()) &&
1428 (predecessors_[index]->block_id() < pred_id)) {
1429 ++index;
1430 }
1431#if defined(DEBUG)
1432 for (intptr_t i = index; i < predecessors_.length(); ++i) {
1433 ASSERT(predecessors_[i]->block_id() != pred_id);
1434 }
1435#endif
1436 predecessors_.InsertAt(index, predecessor);
1437}
1438
1440 for (intptr_t i = 0; i < predecessors_.length(); ++i) {
1441 if (predecessors_[i] == pred) return i;
1442 }
1443 return -1;
1444}
1445
1447 ASSERT(value->next_use() == nullptr);
1448 ASSERT(value->previous_use() == nullptr);
1449 Value* next = *list;
1450 ASSERT(value != next);
1451 *list = value;
1452 value->set_next_use(next);
1453 value->set_previous_use(nullptr);
1454 if (next != nullptr) next->set_previous_use(value);
1455}
1456
1458 Definition* def = definition();
1459 Value* next = next_use();
1460 if (this == def->input_use_list()) {
1462 if (next != nullptr) next->set_previous_use(nullptr);
1463 } else if (this == def->env_use_list()) {
1464 def->set_env_use_list(next);
1465 if (next != nullptr) next->set_previous_use(nullptr);
1466 } else if (Value* prev = previous_use()) {
1467 prev->set_next_use(next);
1468 if (next != nullptr) next->set_previous_use(prev);
1469 }
1470
1471 set_previous_use(nullptr);
1472 set_next_use(nullptr);
1473}
1474
1475// True if the definition has a single input use and is used only in
1476// environments at the same instruction as that input use.
1478 if (!HasOnlyInputUse(use)) {
1479 return false;
1480 }
1481
1482 Instruction* target = use->instruction();
1483 for (Value::Iterator it(env_use_list()); !it.Done(); it.Advance()) {
1484 if (it.Current()->instruction() != target) return false;
1485 }
1486 return true;
1487}
1488
1490 return (input_use_list() == use) && (use->next_use() == nullptr);
1491}
1492
1494 ASSERT(other != nullptr);
1495 ASSERT(this != other);
1496
1497 Value* current = nullptr;
1499 if (next != nullptr) {
1500 // Change all the definitions.
1501 while (next != nullptr) {
1502 current = next;
1503 current->set_definition(other);
1504 current->RefineReachingType(other->Type());
1505 next = current->next_use();
1506 }
1507
1508 // Concatenate the lists.
1509 next = other->input_use_list();
1510 current->set_next_use(next);
1511 if (next != nullptr) next->set_previous_use(current);
1513 set_input_use_list(nullptr);
1514 }
1515
1516 // Repeat for environment uses.
1517 current = nullptr;
1518 next = env_use_list();
1519 if (next != nullptr) {
1520 while (next != nullptr) {
1521 current = next;
1522 current->set_definition(other);
1523 current->RefineReachingType(other->Type());
1524 next = current->next_use();
1525 }
1526 next = other->env_use_list();
1527 current->set_next_use(next);
1528 if (next != nullptr) next->set_previous_use(current);
1530 set_env_use_list(nullptr);
1531 }
1532}
1533
1535 for (intptr_t i = InputCount() - 1; i >= 0; --i) {
1537 }
1538 for (Environment::DeepIterator it(env()); !it.Done(); it.Advance()) {
1539 it.CurrentValue()->RemoveFromUseList();
1540 }
1541}
1542
1544 // Some calls (e.g. closure calls) have more inputs than actual arguments.
1545 // Those extra inputs will be consumed from the stack before the call.
1546 const intptr_t after_args_input_count = env()->LazyDeoptPruneCount();
1547 MoveArgumentsArray* move_arguments = GetMoveArguments();
1548 ASSERT(move_arguments != nullptr);
1549 const intptr_t arg_count = ArgumentCount();
1550 ASSERT((arg_count + after_args_input_count) <= env()->Length());
1551 const intptr_t env_base =
1552 env()->Length() - arg_count - after_args_input_count;
1553 for (intptr_t i = 0; i < arg_count; ++i) {
1554 env()->ValueAt(env_base + i)->BindToEnvironment(move_arguments->At(i));
1555 }
1556}
1557
1560 Definition* result) {
1561 ASSERT(call->env() != nullptr);
1562 deopt_id_ = DeoptId::ToDeoptAfter(call->deopt_id_);
1563 call->env()->DeepCopyAfterTo(
1564 flow_graph->zone(), this, call->ArgumentCount(),
1565 flow_graph->constant_dead(),
1566 result != nullptr ? result : flow_graph->constant_dead());
1567}
1568
1570 ASSERT(other->env() != nullptr);
1571 CopyDeoptIdFrom(*other);
1572 other->env()->DeepCopyTo(zone, this);
1573}
1574
1576 ASSERT(const_cast<Instruction*>(this)->GetBlock() == block);
1577 return !MayHaveVisibleEffect() && !CanDeoptimize() &&
1578 this != block->last_instruction();
1579}
1580
1582 BlockEntryInstr* block = GetBlock();
1583 BlockEntryInstr* dom_block = dom->GetBlock();
1584
1585 if (dom->IsPhi()) {
1586 dom = dom_block;
1587 }
1588
1589 if (block == dom_block) {
1590 if ((block == dom) || (this == block->last_instruction())) {
1591 return true;
1592 }
1593
1594 if (IsPhi()) {
1595 return false;
1596 }
1597
1598 for (Instruction* curr = dom->next(); curr != nullptr;
1599 curr = curr->next()) {
1600 if (curr == this) return true;
1601 }
1602
1603 return false;
1604 }
1605
1606 return dom_block->Dominates(block);
1607}
1608
1610 for (intptr_t i = 0; i < InputCount(); i++) {
1611 Definition* input = InputAt(i)->definition();
1612 const Representation input_representation = RequiredInputRepresentation(i);
1613 if (input_representation != kNoRepresentation &&
1614 input_representation != input->representation()) {
1615 return true;
1616 }
1617 }
1618
1619 return false;
1620}
1621
1622const intptr_t Instruction::kInstructionAttrs[Instruction::kNumInstructions] = {
1623#define INSTR_ATTRS(type, attrs) InstrAttrs::attrs,
1625#undef INSTR_ATTRS
1626};
1627
1629 return (kInstructionAttrs[tag()] & InstrAttrs::kNoGC) == 0;
1630}
1631
1633 Definition* replacement_for_uses,
1634 ForwardInstructionIterator* iterator) {
1635 // Record replacement's input uses.
1636 for (intptr_t i = replacement->InputCount() - 1; i >= 0; --i) {
1637 Value* input = replacement->InputAt(i);
1638 input->definition()->AddInputUse(input);
1639 }
1640 // Take replacement's environment from this definition.
1641 ASSERT(replacement->env() == nullptr);
1642 replacement->SetEnvironment(env());
1643 ClearEnv();
1644 // Replace all uses of this definition with replacement_for_uses.
1645 ReplaceUsesWith(replacement_for_uses);
1646
1647 // Finally replace this one with the replacement instruction in the graph.
1648 previous()->LinkTo(replacement);
1649 if ((iterator != nullptr) && (this == iterator->Current())) {
1650 // Remove through the iterator.
1651 replacement->LinkTo(this);
1652 iterator->RemoveCurrentFromGraph();
1653 } else {
1654 replacement->LinkTo(next());
1655 // Remove this definition's input uses.
1657 }
1658 set_previous(nullptr);
1659 set_next(nullptr);
1660}
1661
1663 ForwardInstructionIterator* iterator) {
1664 // Reuse this instruction's SSA name for other.
1665 ASSERT(!other->HasSSATemp());
1666 if (HasSSATemp()) {
1668 }
1669 ReplaceWithResult(other, other, iterator);
1670}
1671
1673 for (intptr_t i = new_comparison->InputCount() - 1; i >= 0; --i) {
1674 Value* input = new_comparison->InputAt(i);
1675 input->definition()->AddInputUse(input);
1676 input->set_instruction(this);
1677 }
1678 // There should be no need to copy or unuse an environment.
1679 ASSERT(comparison()->env() == nullptr);
1680 ASSERT(new_comparison->env() == nullptr);
1681 // Remove the current comparison's input uses.
1683 ASSERT(!new_comparison->HasUses());
1684 comparison_ = new_comparison;
1685}
1686
1687// ==== Postorder graph traversal.
1688static bool IsMarked(BlockEntryInstr* block,
1690 // Detect that a block has been visited as part of the current
1691 // DiscoverBlocks (we can call DiscoverBlocks multiple times). The block
1692 // will be 'marked' by (1) having a preorder number in the range of the
1693 // preorder array and (2) being in the preorder array at that index.
1694 intptr_t i = block->preorder_number();
1695 return (i >= 0) && (i < preorder->length()) && ((*preorder)[i] == block);
1696}
1697
1698// Base class implementation used for JoinEntry and TargetEntry.
1701 GrowableArray<intptr_t>* parent) {
1702 // If this block has a predecessor (i.e., is not the graph entry) we can
1703 // assume the preorder array is non-empty.
1704 ASSERT((predecessor == nullptr) || !preorder->is_empty());
1705 // Blocks with a single predecessor cannot have been reached before.
1706 ASSERT(IsJoinEntry() || !IsMarked(this, preorder));
1707
1708 // 1. If the block has already been reached, add current_block as a
1709 // basic-block predecessor and we are done.
1710 if (IsMarked(this, preorder)) {
1711 ASSERT(predecessor != nullptr);
1712 AddPredecessor(predecessor);
1713 return false;
1714 }
1715
1716 // 2. Otherwise, clear the predecessors which might have been computed on
1717 // some earlier call to DiscoverBlocks and record this predecessor.
1719 if (predecessor != nullptr) AddPredecessor(predecessor);
1720
1721 // 3. The predecessor is the spanning-tree parent. The graph entry has no
1722 // parent, indicated by -1.
1723 intptr_t parent_number =
1724 (predecessor == nullptr) ? -1 : predecessor->preorder_number();
1725 parent->Add(parent_number);
1726
1727 // 4. Assign the preorder number and add the block entry to the list.
1728 set_preorder_number(preorder->length());
1729 preorder->Add(this);
1730
1731 // The preorder and parent arrays are indexed by
1732 // preorder block number, so they should stay in lockstep.
1733 ASSERT(preorder->length() == parent->length());
1734
1735 // 5. Iterate straight-line successors to record assigned variables and
1736 // find the last instruction in the block. The graph entry block consists
1737 // of only the entry instruction, so that is the last instruction in the
1738 // block.
1739 Instruction* last = this;
1740 for (ForwardInstructionIterator it(this); !it.Done(); it.Advance()) {
1741 last = it.Current();
1742 }
1744 if (last->IsGoto()) last->AsGoto()->set_block(this);
1745
1746 return true;
1747}
1748
1749void GraphEntryInstr::RelinkToOsrEntry(Zone* zone, intptr_t max_block_id) {
1750 ASSERT(osr_id_ != Compiler::kNoOSRDeoptId);
1751 BitVector* block_marks = new (zone) BitVector(zone, max_block_id + 1);
1752 bool found = FindOsrEntryAndRelink(this, /*parent=*/nullptr, block_marks);
1753 ASSERT(found);
1754}
1755
1757 Instruction* parent,
1758 BitVector* block_marks) {
1759 const intptr_t osr_id = graph_entry->osr_id();
1760
1761 // Search for the instruction with the OSR id. Use a depth first search
1762 // because basic blocks have not been discovered yet. Prune unreachable
1763 // blocks by replacing the normal entry with a jump to the block
1764 // containing the OSR entry point.
1765
1766 // Do not visit blocks more than once.
1767 if (block_marks->Contains(block_id())) return false;
1768 block_marks->Add(block_id());
1769
1770 // Search this block for the OSR id.
1771 Instruction* instr = this;
1772 for (ForwardInstructionIterator it(this); !it.Done(); it.Advance()) {
1773 instr = it.Current();
1774 if (instr->GetDeoptId() == osr_id) {
1775 // Sanity check that we found a stack check instruction.
1776 ASSERT(instr->IsCheckStackOverflow());
1777 // Loop stack check checks are always in join blocks so that they can
1778 // be the target of a goto.
1779 ASSERT(IsJoinEntry());
1780 // The instruction should be the first instruction in the block so
1781 // we can simply jump to the beginning of the block.
1782 ASSERT(instr->previous() == this);
1783
1784 ASSERT(stack_depth() == instr->AsCheckStackOverflow()->stack_depth());
1785 auto normal_entry = graph_entry->normal_entry();
1786 auto osr_entry = new OsrEntryInstr(
1787 graph_entry, normal_entry->block_id(), normal_entry->try_index(),
1788 normal_entry->deopt_id(), stack_depth());
1789
1790 auto goto_join = new GotoInstr(AsJoinEntry(),
1791 CompilerState::Current().GetNextDeoptId());
1792 ASSERT(parent != nullptr);
1793 goto_join->CopyDeoptIdFrom(*parent);
1794 osr_entry->LinkTo(goto_join);
1795
1796 // Remove normal function entries & add osr entry.
1797 graph_entry->set_normal_entry(nullptr);
1798 graph_entry->set_unchecked_entry(nullptr);
1799 graph_entry->set_osr_entry(osr_entry);
1800
1801 return true;
1802 }
1803 }
1804
1805 // Recursively search the successors.
1806 for (intptr_t i = instr->SuccessorCount() - 1; i >= 0; --i) {
1807 if (instr->SuccessorAt(i)->FindOsrEntryAndRelink(graph_entry, instr,
1808 block_marks)) {
1809 return true;
1810 }
1811 }
1812 return false;
1813}
1814
1816 // TODO(fschneider): Make this faster by e.g. storing dominators for each
1817 // block while computing the dominator tree.
1818 ASSERT(other != nullptr);
1819 BlockEntryInstr* current = other;
1820 while (current != nullptr && current != this) {
1821 current = current->dominator();
1822 }
1823 return current == this;
1824}
1825
1828 if ((last->SuccessorCount() == 1) && (last->SuccessorAt(0) == this)) {
1829 return dominator();
1830 }
1831 return nullptr;
1832}
1833
1835 return loop_info_ != nullptr && loop_info_->header() == this;
1836}
1837
1839 return loop_info_ == nullptr ? 0 : loop_info_->NestingDepth();
1840}
1841
1842// Helper to mutate the graph during inlining. This block should be
1843// replaced with new_block as a predecessor of all of this block's
1844// successors. For each successor, the predecessors will be reordered
1845// to preserve block-order sorting of the predecessors as well as the
1846// phis if the successor is a join.
1848 // Set the last instruction of the new block to that of the old block.
1849 Instruction* last = last_instruction();
1850 new_block->set_last_instruction(last);
1851 // For each successor, update the predecessors.
1852 for (intptr_t sidx = 0; sidx < last->SuccessorCount(); ++sidx) {
1853 // If the successor is a target, update its predecessor.
1854 TargetEntryInstr* target = last->SuccessorAt(sidx)->AsTargetEntry();
1855 if (target != nullptr) {
1856 target->predecessor_ = new_block;
1857 continue;
1858 }
1859 // If the successor is a join, update each predecessor and the phis.
1860 JoinEntryInstr* join = last->SuccessorAt(sidx)->AsJoinEntry();
1861 ASSERT(join != nullptr);
1862 // Find the old predecessor index.
1863 intptr_t old_index = join->IndexOfPredecessor(this);
1864 intptr_t pred_count = join->PredecessorCount();
1865 ASSERT(old_index >= 0);
1866 ASSERT(old_index < pred_count);
1867 // Find the new predecessor index while reordering the predecessors.
1868 intptr_t new_id = new_block->block_id();
1869 intptr_t new_index = old_index;
1870 if (block_id() < new_id) {
1871 // Search upwards, bubbling down intermediate predecessors.
1872 for (; new_index < pred_count - 1; ++new_index) {
1873 if (join->predecessors_[new_index + 1]->block_id() > new_id) break;
1874 join->predecessors_[new_index] = join->predecessors_[new_index + 1];
1875 }
1876 } else {
1877 // Search downwards, bubbling up intermediate predecessors.
1878 for (; new_index > 0; --new_index) {
1879 if (join->predecessors_[new_index - 1]->block_id() < new_id) break;
1880 join->predecessors_[new_index] = join->predecessors_[new_index - 1];
1881 }
1882 }
1883 join->predecessors_[new_index] = new_block;
1884 // If the new and old predecessor index match there is nothing to update.
1885 if ((join->phis() == nullptr) || (old_index == new_index)) return;
1886 // Otherwise, reorder the predecessor uses in each phi.
1887 for (PhiIterator it(join); !it.Done(); it.Advance()) {
1888 PhiInstr* phi = it.Current();
1889 ASSERT(phi != nullptr);
1890 ASSERT(pred_count == phi->InputCount());
1891 // Save the predecessor use.
1892 Value* pred_use = phi->InputAt(old_index);
1893 // Move uses between old and new.
1894 intptr_t step = (old_index < new_index) ? 1 : -1;
1895 for (intptr_t use_idx = old_index; use_idx != new_index;
1896 use_idx += step) {
1897 phi->SetInputAt(use_idx, phi->InputAt(use_idx + step));
1898 }
1899 // Write the predecessor use.
1900 phi->SetInputAt(new_index, pred_use);
1901 }
1902 }
1903}
1904
1906 JoinEntryInstr* join = this->AsJoinEntry();
1907 if (join != nullptr) {
1908 for (PhiIterator it(join); !it.Done(); it.Advance()) {
1909 it.Current()->UnuseAllInputs();
1910 }
1911 }
1913 for (ForwardInstructionIterator it(this); !it.Done(); it.Advance()) {
1914 it.Current()->UnuseAllInputs();
1915 }
1916}
1917
1918PhiInstr* JoinEntryInstr::InsertPhi(intptr_t var_index, intptr_t var_count) {
1919 // Lazily initialize the array of phis.
1920 // Currently, phis are stored in a sparse array that holds the phi
1921 // for variable with index i at position i.
1922 // TODO(fschneider): Store phis in a more compact way.
1923 if (phis_ == nullptr) {
1924 phis_ = new ZoneGrowableArray<PhiInstr*>(var_count);
1925 for (intptr_t i = 0; i < var_count; i++) {
1926 phis_->Add(nullptr);
1927 }
1928 }
1929 ASSERT((*phis_)[var_index] == nullptr);
1930 return (*phis_)[var_index] = new PhiInstr(this, PredecessorCount());
1931}
1932
1934 // Lazily initialize the array of phis.
1935 if (phis_ == nullptr) {
1936 phis_ = new ZoneGrowableArray<PhiInstr*>(1);
1937 }
1938 phis_->Add(phi);
1939}
1940
1942 ASSERT(phis_ != nullptr);
1943 for (intptr_t index = 0; index < phis_->length(); ++index) {
1944 if (phi == (*phis_)[index]) {
1945 (*phis_)[index] = phis_->Last();
1946 phis_->RemoveLast();
1947 return;
1948 }
1949 }
1950}
1951
1953 if (phis_ == nullptr) return;
1954
1955 intptr_t to_index = 0;
1956 for (intptr_t from_index = 0; from_index < phis_->length(); ++from_index) {
1957 PhiInstr* phi = (*phis_)[from_index];
1958 if (phi != nullptr) {
1959 if (phi->is_alive()) {
1960 (*phis_)[to_index++] = phi;
1961 for (intptr_t i = phi->InputCount() - 1; i >= 0; --i) {
1962 Value* input = phi->InputAt(i);
1963 input->definition()->AddInputUse(input);
1964 }
1965 } else {
1966 phi->ReplaceUsesWith(replacement);
1967 }
1968 }
1969 }
1970 if (to_index == 0) {
1971 phis_ = nullptr;
1972 } else {
1973 phis_->TruncateTo(to_index);
1974 }
1975}
1976
1978 return 0;
1979}
1980
1982 // Called only if index is in range. Only control-transfer instructions
1983 // can have non-zero successor counts and they override this function.
1984 UNREACHABLE();
1985 return nullptr;
1986}
1987
1989 return (normal_entry() == nullptr ? 0 : 1) +
1990 (unchecked_entry() == nullptr ? 0 : 1) +
1991 (osr_entry() == nullptr ? 0 : 1) + catch_entries_.length();
1992}
1993
1995 if (normal_entry() != nullptr) {
1996 if (index == 0) return normal_entry_;
1997 index--;
1998 }
1999 if (unchecked_entry() != nullptr) {
2000 if (index == 0) return unchecked_entry();
2001 index--;
2002 }
2003 if (osr_entry() != nullptr) {
2004 if (index == 0) return osr_entry();
2005 index--;
2006 }
2007 return catch_entries_[index];
2008}
2009
2011 return 2;
2012}
2013
2015 if (index == 0) return true_successor_;
2016 if (index == 1) return false_successor_;
2017 UNREACHABLE();
2018 return nullptr;
2019}
2020
2022 return 1;
2023}
2024
2026 ASSERT(index == 0);
2027 return successor();
2028}
2029
2031 LinkTo(new GotoInstr(entry, CompilerState::Current().GetNextDeoptId()));
2032}
2033
2035 return (to() == kUnboxedInt32) && !is_truncating() &&
2036 !RangeUtils::Fits(value()->definition()->range(),
2038}
2039
2042 return false;
2043 }
2044 if (!value()->Type()->IsInt()) {
2045 return true;
2046 }
2047 if (representation() == kUnboxedInt64 || is_truncating()) {
2048 return false;
2049 }
2050 const intptr_t rep_bitsize =
2052 if (value()->Type()->ToCid() == kSmiCid &&
2053 compiler::target::kSmiBits <= rep_bitsize) {
2054 return false;
2055 }
2056 return !RangeUtils::IsWithin(value()->definition()->range(),
2059}
2060
2062 switch (op_kind()) {
2063 case Token::kBIT_AND:
2064 case Token::kBIT_OR:
2065 case Token::kBIT_XOR:
2066 return false;
2067
2068 case Token::kSHR:
2069 return false;
2070
2071 case Token::kUSHR:
2072 case Token::kSHL:
2073 // Currently only shifts by in range constant are supported, see
2074 // BinaryInt32OpInstr::IsSupported.
2075 return can_overflow();
2076
2077 case Token::kMOD: {
2078 UNREACHABLE();
2079 }
2080
2081 default:
2082 return can_overflow();
2083 }
2084}
2085
2087 switch (op_kind()) {
2088 case Token::kBIT_AND:
2089 case Token::kBIT_OR:
2090 case Token::kBIT_XOR:
2091 return false;
2092
2093 case Token::kSHR:
2095
2096 case Token::kUSHR:
2097 case Token::kSHL:
2099
2100 case Token::kMOD:
2102
2103 case Token::kTRUNCDIV:
2106
2107 default:
2108 return can_overflow();
2109 }
2110}
2111
2113 return RangeUtils::IsWithin(shift_range(), 0, max);
2114}
2115
2117 if (right()->BindsToConstant()) {
2118 const auto& constant = right()->BoundConstant();
2119 if (!constant.IsInteger()) return false;
2120 return Integer::Cast(constant).AsInt64Value() != 0;
2121 }
2122 return !RangeUtils::CanBeZero(right()->definition()->range());
2123}
2124
2126 if (!right()->BindsToConstant()) return false;
2127 const Object& constant = right()->BoundConstant();
2128 if (!constant.IsSmi()) return false;
2129 const intptr_t int_value = Smi::Cast(constant).Value();
2130 ASSERT(int_value != kIntptrMin);
2131 return Utils::IsPowerOfTwo(Utils::Abs(int_value));
2132}
2133
2135 switch (r) {
2136 case kTagged:
2137 return compiler::target::kSmiBits + 1;
2138 case kUnboxedInt32:
2139 case kUnboxedUint32:
2140 return 32;
2141 case kUnboxedInt64:
2142 return 64;
2143 default:
2144 UNREACHABLE();
2145 return 0;
2146 }
2147}
2148
2150 return static_cast<int64_t>(static_cast<uint64_t>(-1) >>
2151 (64 - RepresentationBits(r)));
2152}
2153
2155 Value* left,
2156 Value* right) {
2157 int64_t left_value;
2158 if (!Evaluator::ToIntegerConstant(left, &left_value)) {
2159 return nullptr;
2160 }
2161
2162 // Can't apply 0.0 * x -> 0.0 equivalence to double operation because
2163 // 0.0 * NaN is NaN not 0.0.
2164 // Can't apply 0.0 + x -> x to double because 0.0 + (-0.0) is 0.0 not -0.0.
2165 switch (op) {
2166 case Token::kMUL:
2167 if (left_value == 1) {
2168 if (right->definition()->representation() != kUnboxedDouble) {
2169 // Can't yet apply the equivalence because representation selection
2170 // did not run yet. We need it to guarantee that right value is
2171 // correctly coerced to double. The second canonicalization pass
2172 // will apply this equivalence.
2173 return nullptr;
2174 } else {
2175 return right->definition();
2176 }
2177 }
2178 break;
2179 default:
2180 break;
2181 }
2182
2183 return nullptr;
2184}
2185
2187 if (!HasUses()) return nullptr;
2188 if (value()->definition()->IsFloatToDouble()) {
2189 // F2D(D2F(v)) == v.
2190 return value()->definition()->AsFloatToDouble()->value()->definition();
2191 }
2192 if (value()->BindsToConstant()) {
2193 double narrowed_val =
2194 static_cast<float>(Double::Cast(value()->BoundConstant()).value());
2195 return flow_graph->GetConstant(
2196 Double::ZoneHandle(Double::NewCanonical(narrowed_val)), kUnboxedFloat);
2197 }
2198 return this;
2199}
2200
2202 if (!HasUses()) return nullptr;
2203 if (value()->BindsToConstant()) {
2204 return flow_graph->GetConstant(value()->BoundConstant(), kUnboxedDouble);
2205 }
2206 return this;
2207}
2208
2210 if (!HasUses()) return nullptr;
2211
2212 Definition* result = nullptr;
2213
2215 if (result != nullptr) {
2216 return result;
2217 }
2218
2220 if (result != nullptr) {
2221 return result;
2222 }
2223
2224 if ((op_kind() == Token::kMUL) &&
2225 (left()->definition() == right()->definition())) {
2227 Token::kSQUARE, new Value(left()->definition()), DeoptimizationTarget(),
2228 speculative_mode_, representation());
2229 flow_graph->InsertBefore(this, square, env(), FlowGraph::kValue);
2230 return square;
2231 }
2232
2233 return this;
2234}
2235
2237 return HasUses() ? this : nullptr;
2238}
2239
2241 switch (op) {
2242 case Token::kMUL:
2244 case Token::kADD:
2246 case Token::kBIT_AND:
2248 case Token::kBIT_OR:
2250 case Token::kBIT_XOR:
2251 return true;
2252 default:
2253 return false;
2254 }
2255}
2256
2258 Token::Kind op_kind,
2259 Value* value,
2260 intptr_t deopt_id,
2261 SpeculativeMode speculative_mode,
2262 Range* range) {
2263 UnaryIntegerOpInstr* op = nullptr;
2264 switch (representation) {
2265 case kTagged:
2267 break;
2268 case kUnboxedInt32:
2269 return nullptr;
2270 case kUnboxedUint32:
2272 break;
2273 case kUnboxedInt64:
2274 op = new UnaryInt64OpInstr(op_kind, value, deopt_id, speculative_mode);
2275 break;
2276 default:
2277 UNREACHABLE();
2278 return nullptr;
2279 }
2280
2281 if (op == nullptr) {
2282 return op;
2283 }
2284
2285 if (!Range::IsUnknown(range)) {
2286 op->set_range(*range);
2287 }
2288
2290 return op;
2291}
2292
2294 Representation representation,
2295 Token::Kind op_kind,
2296 Value* left,
2297 Value* right,
2298 intptr_t deopt_id,
2299 SpeculativeMode speculative_mode) {
2300 BinaryIntegerOpInstr* op = nullptr;
2301 Range* right_range = nullptr;
2302 switch (op_kind) {
2303 case Token::kMOD:
2304 case Token::kTRUNCDIV:
2305 if (representation != kTagged) break;
2307 case Token::kSHL:
2308 case Token::kSHR:
2309 case Token::kUSHR:
2310 if (auto const const_def = right->definition()->AsConstant()) {
2311 right_range = new Range();
2312 const_def->InferRange(nullptr, right_range);
2313 }
2314 break;
2315 default:
2316 break;
2317 }
2318 switch (representation) {
2319 case kTagged:
2320 op = new BinarySmiOpInstr(op_kind, left, right, deopt_id, right_range);
2321 break;
2322 case kUnboxedInt32:
2324 return nullptr;
2325 }
2327 break;
2328 case kUnboxedUint32:
2329 if ((op_kind == Token::kSHL) || (op_kind == Token::kSHR) ||
2330 (op_kind == Token::kUSHR)) {
2331 if (speculative_mode == kNotSpeculative) {
2333 right_range);
2334 } else {
2336 right_range);
2337 }
2338 } else {
2340 }
2341 break;
2342 case kUnboxedInt64:
2343 if ((op_kind == Token::kSHL) || (op_kind == Token::kSHR) ||
2344 (op_kind == Token::kUSHR)) {
2345 if (speculative_mode == kNotSpeculative) {
2347 right_range);
2348 } else {
2350 right_range);
2351 }
2352 } else {
2354 speculative_mode);
2355 }
2356 break;
2357 default:
2358 UNREACHABLE();
2359 return nullptr;
2360 }
2361
2363 return op;
2364}
2365
2367 Representation representation,
2368 Token::Kind op_kind,
2369 Value* left,
2370 Value* right,
2371 intptr_t deopt_id,
2372 bool can_overflow,
2373 bool is_truncating,
2374 Range* range,
2375 SpeculativeMode speculative_mode) {
2377 representation, op_kind, left, right, deopt_id, speculative_mode);
2378 if (op == nullptr) {
2379 return nullptr;
2380 }
2381 if (!Range::IsUnknown(range)) {
2382 op->set_range(*range);
2383 }
2384
2386 if (is_truncating) {
2387 op->mark_truncating();
2388 }
2389
2390 return op;
2391}
2392
2394 // If range analysis has already determined a single possible value for
2395 // this operation, then replace it if possible.
2397 const auto& value =
2399 auto* const replacement =
2400 flow_graph->TryCreateConstantReplacementFor(this, value);
2401 if (replacement != this) {
2402 return replacement;
2403 }
2404 }
2405
2406 return this;
2407}
2408
2410 // If range analysis has already determined a single possible value for
2411 // this operation, then replace it if possible.
2413 const auto& value =
2415 auto* const replacement =
2416 flow_graph->TryCreateConstantReplacementFor(this, value);
2417 if (replacement != this) {
2418 return replacement;
2419 }
2420 }
2421
2422 // If both operands are constants evaluate this expression. Might
2423 // occur due to load forwarding after constant propagation pass
2424 // have already been run.
2425
2426 if (left()->BindsToConstant() && right()->BindsToConstant()) {
2428 left()->BoundConstant(), right()->BoundConstant(), op_kind(),
2430
2431 if (!result.IsNull()) {
2432 return flow_graph->TryCreateConstantReplacementFor(this, result);
2433 }
2434 }
2435
2436 if (left()->BindsToConstant() && !right()->BindsToConstant() &&
2438 Value* l = left();
2439 Value* r = right();
2440 SetInputAt(0, r);
2441 SetInputAt(1, l);
2442 }
2443
2444 int64_t rhs;
2445 if (!Evaluator::ToIntegerConstant(right(), &rhs)) {
2446 return this;
2447 }
2448
2449 if (is_truncating()) {
2450 switch (op_kind()) {
2451 case Token::kMUL:
2452 case Token::kSUB:
2453 case Token::kADD:
2454 case Token::kBIT_AND:
2455 case Token::kBIT_OR:
2456 case Token::kBIT_XOR:
2458 break;
2459 default:
2460 break;
2461 }
2462 }
2463
2464 if (IsBinaryUint32Op() && HasUnmatchedInputRepresentations()) {
2465 // Canonicalization may eliminate instruction and loose truncation,
2466 // so it is illegal to canonicalize truncating uint32 instruction
2467 // until all conversions for its inputs are inserted.
2468 return this;
2469 }
2470
2471 switch (op_kind()) {
2472 case Token::kMUL:
2473 if (rhs == 1) {
2474 return left()->definition();
2475 } else if (rhs == 0) {
2476 return right()->definition();
2477 } else if ((rhs > 0) && Utils::IsPowerOfTwo(rhs)) {
2478 const int64_t shift_amount = Utils::ShiftForPowerOfTwo(rhs);
2479 const Representation shift_amount_rep =
2480 (SpeculativeModeOfInputs() == kNotSpeculative) ? kUnboxedInt64
2481 : kTagged;
2482 ConstantInstr* constant_shift_amount = flow_graph->GetConstant(
2483 Smi::Handle(Smi::New(shift_amount)), shift_amount_rep);
2485 representation(), Token::kSHL, left()->CopyWithType(),
2486 new Value(constant_shift_amount), GetDeoptId(), can_overflow(),
2488 if (shift != nullptr) {
2489 // Assign a range to the shift factor, just in case range
2490 // analysis no longer runs after this rewriting.
2491 if (auto shift_with_range = shift->AsShiftIntegerOp()) {
2492 shift_with_range->set_shift_range(
2493 new Range(RangeBoundary::FromConstant(shift_amount),
2494 RangeBoundary::FromConstant(shift_amount)));
2495 }
2496 if (!MayThrow()) {
2497 ASSERT(!shift->MayThrow());
2498 }
2499 if (!CanDeoptimize()) {
2500 ASSERT(!shift->CanDeoptimize());
2501 }
2502 flow_graph->InsertBefore(this, shift, env(), FlowGraph::kValue);
2503 return shift;
2504 }
2505 }
2506
2507 break;
2508 case Token::kADD:
2509 if (rhs == 0) {
2510 return left()->definition();
2511 }
2512 break;
2513 case Token::kBIT_AND:
2514 if (rhs == 0) {
2515 return right()->definition();
2516 } else if (rhs == RepresentationMask(representation())) {
2517 return left()->definition();
2518 }
2519 break;
2520 case Token::kBIT_OR:
2521 if (rhs == 0) {
2522 return left()->definition();
2523 } else if (rhs == RepresentationMask(representation())) {
2524 return right()->definition();
2525 }
2526 break;
2527 case Token::kBIT_XOR:
2528 if (rhs == 0) {
2529 return left()->definition();
2530 } else if (rhs == RepresentationMask(representation())) {
2532 representation(), Token::kBIT_NOT, left()->CopyWithType(),
2534 if (bit_not != nullptr) {
2535 flow_graph->InsertBefore(this, bit_not, env(), FlowGraph::kValue);
2536 return bit_not;
2537 }
2538 }
2539 break;
2540
2541 case Token::kSUB:
2542 if (rhs == 0) {
2543 return left()->definition();
2544 }
2545 break;
2546
2547 case Token::kTRUNCDIV:
2548 if (rhs == 1) {
2549 return left()->definition();
2550 } else if (rhs == -1) {
2552 representation(), Token::kNEGATE, left()->CopyWithType(),
2554 if (negation != nullptr) {
2555 flow_graph->InsertBefore(this, negation, env(), FlowGraph::kValue);
2556 return negation;
2557 }
2558 }
2559 break;
2560
2561 case Token::kMOD:
2562 if ((rhs == -1) || (rhs == 1)) {
2563 return flow_graph->TryCreateConstantReplacementFor(this,
2564 Object::smi_zero());
2565 }
2566 break;
2567
2568 case Token::kUSHR:
2569 if (rhs >= kBitsPerInt64) {
2570 return flow_graph->TryCreateConstantReplacementFor(this,
2571 Object::smi_zero());
2572 }
2574 case Token::kSHR:
2575 if (rhs == 0) {
2576 return left()->definition();
2577 } else if (rhs < 0) {
2578 // Instruction will always throw on negative rhs operand.
2579 if (!CanDeoptimize()) {
2580 // For non-speculative operations (no deopt), let
2581 // the code generator deal with throw on slowpath.
2582 break;
2583 }
2585 DeoptimizeInstr* deopt =
2586 new DeoptimizeInstr(ICData::kDeoptBinarySmiOp, GetDeoptId());
2587 flow_graph->InsertBefore(this, deopt, env(), FlowGraph::kEffect);
2588 // Replace with zero since it always throws.
2589 return flow_graph->TryCreateConstantReplacementFor(this,
2590 Object::smi_zero());
2591 }
2592 break;
2593
2594 case Token::kSHL: {
2595 const intptr_t result_bits = RepresentationBits(representation());
2596 if (rhs == 0) {
2597 return left()->definition();
2598 } else if ((rhs >= kBitsPerInt64) ||
2599 ((rhs >= result_bits) && is_truncating())) {
2600 return flow_graph->TryCreateConstantReplacementFor(this,
2601 Object::smi_zero());
2602 } else if ((rhs < 0) || ((rhs >= result_bits) && !is_truncating())) {
2603 // Instruction will always throw on negative rhs operand or
2604 // deoptimize on large rhs operand.
2605 if (!CanDeoptimize()) {
2606 // For non-speculative operations (no deopt), let
2607 // the code generator deal with throw on slowpath.
2608 break;
2609 }
2611 DeoptimizeInstr* deopt =
2612 new DeoptimizeInstr(ICData::kDeoptBinarySmiOp, GetDeoptId());
2613 flow_graph->InsertBefore(this, deopt, env(), FlowGraph::kEffect);
2614 // Replace with zero since it overshifted or always throws.
2615 return flow_graph->TryCreateConstantReplacementFor(this,
2616 Object::smi_zero());
2617 }
2618 break;
2619 }
2620
2621 default:
2622 break;
2623 }
2624
2625 return this;
2626}
2627
2628// Optimizations that eliminate or simplify individual instructions.
2630 return this;
2631}
2632
2634 return this;
2635}
2636
2638 // Must not remove Redefinitions without uses until LICM, even though
2639 // Redefinition might not have any uses itself it can still be dominating
2640 // uses of the value it redefines and must serve as a barrier for those
2641 // uses. RenameUsesDominatedByRedefinitions would normalize the graph and
2642 // route those uses through this redefinition.
2643 if (!HasUses() && !flow_graph->is_licm_allowed()) {
2644 return nullptr;
2645 }
2646 if (constrained_type() != nullptr &&
2647 constrained_type()->IsEqualTo(value()->Type())) {
2648 return value()->definition();
2649 }
2650 return this;
2651}
2652
2654 switch (kind_) {
2655 case kOsrAndPreemption:
2656 return this;
2657 case kOsrOnly:
2658 // Don't need OSR entries in the optimized code.
2659 return nullptr;
2660 }
2661
2662 // Switch above exhausts all possibilities but some compilers can't figure
2663 // it out.
2664 UNREACHABLE();
2665 return this;
2666}
2667
2670 return true;
2671 }
2672
2673 switch (cid) {
2674 case kArrayCid:
2675 case kImmutableArrayCid:
2676 case kTypeArgumentsCid:
2677 return true;
2678 default:
2679 return false;
2680 }
2681}
2682
2684 auto kind = function.recognized_kind();
2685 switch (kind) {
2686 case MethodRecognizer::kTypedData_ByteDataView_factory:
2687 case MethodRecognizer::kTypedData_Int8ArrayView_factory:
2688 case MethodRecognizer::kTypedData_Uint8ArrayView_factory:
2689 case MethodRecognizer::kTypedData_Uint8ClampedArrayView_factory:
2690 case MethodRecognizer::kTypedData_Int16ArrayView_factory:
2691 case MethodRecognizer::kTypedData_Uint16ArrayView_factory:
2692 case MethodRecognizer::kTypedData_Int32ArrayView_factory:
2693 case MethodRecognizer::kTypedData_Uint32ArrayView_factory:
2694 case MethodRecognizer::kTypedData_Int64ArrayView_factory:
2695 case MethodRecognizer::kTypedData_Uint64ArrayView_factory:
2696 case MethodRecognizer::kTypedData_Float32ArrayView_factory:
2697 case MethodRecognizer::kTypedData_Float64ArrayView_factory:
2698 case MethodRecognizer::kTypedData_Float32x4ArrayView_factory:
2699 case MethodRecognizer::kTypedData_Int32x4ArrayView_factory:
2700 case MethodRecognizer::kTypedData_Float64x2ArrayView_factory:
2701 return true;
2702 default:
2703 return false;
2704 }
2705}
2706
2708 const Function& function) {
2709 auto kind = function.recognized_kind();
2710 switch (kind) {
2711 case MethodRecognizer::kTypedData_UnmodifiableByteDataView_factory:
2712 case MethodRecognizer::kTypedData_UnmodifiableInt8ArrayView_factory:
2713 case MethodRecognizer::kTypedData_UnmodifiableUint8ArrayView_factory:
2714 case MethodRecognizer::kTypedData_UnmodifiableUint8ClampedArrayView_factory:
2715 case MethodRecognizer::kTypedData_UnmodifiableInt16ArrayView_factory:
2716 case MethodRecognizer::kTypedData_UnmodifiableUint16ArrayView_factory:
2717 case MethodRecognizer::kTypedData_UnmodifiableInt32ArrayView_factory:
2718 case MethodRecognizer::kTypedData_UnmodifiableUint32ArrayView_factory:
2719 case MethodRecognizer::kTypedData_UnmodifiableInt64ArrayView_factory:
2720 case MethodRecognizer::kTypedData_UnmodifiableUint64ArrayView_factory:
2721 case MethodRecognizer::kTypedData_UnmodifiableFloat32ArrayView_factory:
2722 case MethodRecognizer::kTypedData_UnmodifiableFloat64ArrayView_factory:
2723 case MethodRecognizer::kTypedData_UnmodifiableFloat32x4ArrayView_factory:
2724 case MethodRecognizer::kTypedData_UnmodifiableInt32x4ArrayView_factory:
2725 case MethodRecognizer::kTypedData_UnmodifiableFloat64x2ArrayView_factory:
2726 return true;
2727 default:
2728 return false;
2729 }
2730}
2731
2733 return HasUses() ? this : nullptr;
2734}
2735
2737 const Slot& field,
2738 Object* result) {
2739 switch (field.kind()) {
2741 return TryEvaluateLoad(instance, field.field(), result);
2742
2743 case Slot::Kind::kArgumentsDescriptor_type_args_len:
2744 if (instance.IsArray() && Array::Cast(instance).IsImmutable()) {
2745 ArgumentsDescriptor desc(Array::Cast(instance));
2746 *result = Smi::New(desc.TypeArgsLen());
2747 return true;
2748 }
2749 return false;
2750
2751 case Slot::Kind::kArgumentsDescriptor_count:
2752 if (instance.IsArray() && Array::Cast(instance).IsImmutable()) {
2753 ArgumentsDescriptor desc(Array::Cast(instance));
2754 *result = Smi::New(desc.Count());
2755 return true;
2756 }
2757 return false;
2758
2759 case Slot::Kind::kArgumentsDescriptor_positional_count:
2760 if (instance.IsArray() && Array::Cast(instance).IsImmutable()) {
2761 ArgumentsDescriptor desc(Array::Cast(instance));
2762 *result = Smi::New(desc.PositionalCount());
2763 return true;
2764 }
2765 return false;
2766
2767 case Slot::Kind::kArgumentsDescriptor_size:
2768 // If a constant arguments descriptor appears, then either it is from
2769 // a invocation dispatcher (which always has tagged arguments and so
2770 // [host]Size() == [target]Size() == Count()) or the constant should
2771 // have the correct Size() in terms of the target architecture if any
2772 // spill slots are involved.
2773 if (instance.IsArray() && Array::Cast(instance).IsImmutable()) {
2774 ArgumentsDescriptor desc(Array::Cast(instance));
2775 *result = Smi::New(desc.Size());
2776 return true;
2777 }
2778 return false;
2779
2780 case Slot::Kind::kTypeArguments_length:
2781 if (instance.IsTypeArguments()) {
2782 *result = Smi::New(TypeArguments::Cast(instance).Length());
2783 return true;
2784 }
2785 return false;
2786
2787 case Slot::Kind::kRecord_shape:
2788 if (instance.IsRecord()) {
2789 *result = Record::Cast(instance).shape().AsSmi();
2790 return true;
2791 }
2792 return false;
2793
2795 if (instance.IsRecord()) {
2797 field.offset_in_bytes());
2798 const Record& record = Record::Cast(instance);
2799 if (index < record.num_fields()) {
2800 *result = record.FieldAt(index);
2801 }
2802 return true;
2803 }
2804 return false;
2805
2806 default:
2807 break;
2808 }
2809 return false;
2810}
2811
2813 const Field& field,
2814 Object* result) {
2815 if (!field.is_final() || !instance.IsInstance()) {
2816 return false;
2817 }
2818
2819 // Check that instance really has the field which we
2820 // are trying to load from.
2821 Class& cls = Class::Handle(instance.clazz());
2822 while (cls.ptr() != Class::null() && cls.ptr() != field.Owner()) {
2823 cls = cls.SuperClass();
2824 }
2825 if (cls.ptr() != field.Owner()) {
2826 // Failed to find the field in class or its superclasses.
2827 return false;
2828 }
2829
2830 // Object has the field: execute the load.
2831 *result = Instance::Cast(instance).GetField(field);
2832 return true;
2833}
2834
2837 // If the load is guaranteed to never retrieve a GC-moveable address,
2838 // then the returned address can't alias the (GC-moveable) instance.
2839 return false;
2840 }
2841 if (slot().IsIdentical(Slot::PointerBase_data())) {
2842 // If we know statically that the instance is a typed data view, then the
2843 // data field doesn't alias the instance (but some other typed data object).
2844 const intptr_t cid = instance()->Type()->ToNullableCid();
2845 if (IsUnmodifiableTypedDataViewClassId(cid)) return false;
2846 if (IsTypedDataViewClassId(cid)) return false;
2847 }
2848 return true;
2849}
2850
2853 // The load is guaranteed to never retrieve a GC-moveable address.
2854 return false;
2855 }
2856 if (slot().IsIdentical(Slot::PointerBase_data())) {
2857 // If we know statically that the instance is an external array, then
2858 // the load retrieves a pointer to external memory.
2859 return !IsExternalPayloadClassId(instance()->Type()->ToNullableCid());
2860 }
2861 return true;
2862}
2863
2865 return TryEvaluateLoad(instance, slot(), result);
2866}
2867
2869 if (!HasUses() && !calls_initializer()) return nullptr;
2870
2871 Definition* orig_instance = instance()->definition()->OriginalDefinition();
2872 if (IsImmutableLengthLoad()) {
2874 if (StaticCallInstr* call = orig_instance->AsStaticCall()) {
2875 // For fixed length arrays if the array is the result of a known
2876 // constructor call we can replace the length load with the length
2877 // argument passed to the constructor.
2878 if (call->is_known_list_constructor() &&
2879 IsFixedLengthArrayCid(call->Type()->ToCid())) {
2880 return call->ArgumentAt(1);
2881 } else if (call->function().recognized_kind() ==
2882 MethodRecognizer::kByteDataFactory) {
2883 // Similarly, we check for the ByteData constructor and forward its
2884 // explicit length argument appropriately.
2885 return call->ArgumentAt(1);
2886 } else if (IsTypedDataViewFactory(call->function())) {
2887 // Typed data view factories all take three arguments (after
2888 // the implicit type arguments parameter):
2889 //
2890 // 1) _TypedList buffer -- the underlying data for the view
2891 // 2) int offsetInBytes -- the offset into the buffer to start viewing
2892 // 3) int length -- the number of elements in the view
2893 //
2894 // Here, we forward the third.
2895 return call->ArgumentAt(3);
2896 }
2897 } else if (LoadFieldInstr* load_array = orig_instance->AsLoadField()) {
2898 // For arrays with guarded lengths, replace the length load
2899 // with a constant.
2900 const Slot& slot = load_array->slot();
2901 if (slot.IsDartField()) {
2902 if (slot.field().guarded_list_length() >= 0) {
2903 return flow_graph->GetConstant(
2905 }
2906 }
2907 }
2908 }
2909
2910 switch (slot().kind()) {
2911 case Slot::Kind::kArray_length:
2912 if (CreateArrayInstr* create_array = orig_instance->AsCreateArray()) {
2913 return create_array->num_elements()->definition();
2914 }
2915 break;
2916 case Slot::Kind::kTypedDataBase_length:
2917 if (AllocateTypedDataInstr* alloc_typed_data =
2918 orig_instance->AsAllocateTypedData()) {
2919 return alloc_typed_data->num_elements()->definition();
2920 }
2921 break;
2922 case Slot::Kind::kTypedDataView_typed_data:
2923 // This case cover the first explicit argument to typed data view
2924 // factories, the data (buffer).
2926 if (StaticCallInstr* call = orig_instance->AsStaticCall()) {
2927 if (IsTypedDataViewFactory(call->function()) ||
2929 return call->ArgumentAt(1);
2930 }
2931 }
2932 break;
2933 case Slot::Kind::kTypedDataView_offset_in_bytes:
2934 // This case cover the second explicit argument to typed data view
2935 // factories, the offset into the buffer.
2937 if (StaticCallInstr* call = orig_instance->AsStaticCall()) {
2938 if (IsTypedDataViewFactory(call->function())) {
2939 return call->ArgumentAt(2);
2940 } else if (call->function().recognized_kind() ==
2941 MethodRecognizer::kByteDataFactory) {
2942 // A _ByteDataView returned from the ByteData constructor always
2943 // has an offset of 0.
2944 return flow_graph->GetConstant(Object::smi_zero());
2945 }
2946 }
2947 break;
2948 case Slot::Kind::kRecord_shape:
2950 if (auto* alloc_rec = orig_instance->AsAllocateRecord()) {
2951 return flow_graph->GetConstant(Smi::Handle(alloc_rec->shape().AsSmi()));
2952 } else if (auto* alloc_rec = orig_instance->AsAllocateSmallRecord()) {
2953 return flow_graph->GetConstant(Smi::Handle(alloc_rec->shape().AsSmi()));
2954 } else {
2955 const AbstractType* type = instance()->Type()->ToAbstractType();
2956 if (type->IsRecordType()) {
2957 return flow_graph->GetConstant(
2958 Smi::Handle(RecordType::Cast(*type).shape().AsSmi()));
2959 }
2960 }
2961 break;
2964 if (StaticCallInstr* call = orig_instance->AsStaticCall()) {
2965 if (call->is_known_list_constructor()) {
2966 return call->ArgumentAt(0);
2967 } else if (IsTypedDataViewFactory(call->function()) ||
2969 return flow_graph->constant_null();
2970 }
2971 switch (call->function().recognized_kind()) {
2972 case MethodRecognizer::kByteDataFactory:
2973 case MethodRecognizer::kLinkedHashBase_getData:
2974 case MethodRecognizer::kImmutableLinkedHashBase_getData:
2975 return flow_graph->constant_null();
2976 default:
2977 break;
2978 }
2979 } else if (CreateArrayInstr* create_array =
2980 orig_instance->AsCreateArray()) {
2981 return create_array->type_arguments()->definition();
2982 } else if (LoadFieldInstr* load_array = orig_instance->AsLoadField()) {
2983 const Slot& slot = load_array->slot();
2984 switch (slot.kind()) {
2986 // For trivially exact fields we know that type arguments match
2987 // static type arguments exactly.
2988 const Field& field = slot.field();
2990 return flow_graph->GetConstant(TypeArguments::Handle(
2991 Type::Cast(AbstractType::Handle(field.type()))
2992 .GetInstanceTypeArguments(flow_graph->thread())));
2993 }
2994 break;
2995 }
2996
2997 case Slot::Kind::kLinkedHashBase_data:
2998 return flow_graph->constant_null();
2999
3000 default:
3001 break;
3002 }
3003 }
3004 break;
3005 case Slot::Kind::kPointerBase_data:
3008 const intptr_t cid = instance()->Type()->ToNullableCid();
3009 // Pointers and ExternalTypedData objects never contain inner pointers.
3010 if (cid == kPointerCid || IsExternalTypedDataClassId(cid)) {
3012 }
3013 }
3014 break;
3015 default:
3016 break;
3017 }
3018
3019 // Try folding away loads from constant objects.
3020 if (instance()->BindsToConstant()) {
3022 if (Evaluate(instance()->BoundConstant(), &result)) {
3023 if (result.IsSmi() || result.IsOld()) {
3024 return flow_graph->GetConstant(result);
3025 }
3026 }
3027 }
3028
3029 if (instance()->definition()->IsAllocateObject() && IsImmutableLoad()) {
3030 StoreFieldInstr* initializing_store = nullptr;
3031 for (auto use : instance()->definition()->input_uses()) {
3032 if (auto store = use->instruction()->AsStoreField()) {
3033 if ((use->use_index() == StoreFieldInstr::kInstancePos) &&
3034 store->slot().IsIdentical(slot())) {
3035 if (initializing_store == nullptr) {
3036 initializing_store = store;
3037 } else {
3038 initializing_store = nullptr;
3039 break;
3040 }
3041 }
3042 }
3043 }
3044
3045 // If we find an initializing store then it *must* by construction
3046 // dominate the load.
3047 if (initializing_store != nullptr &&
3048 initializing_store->is_initialization()) {
3049 ASSERT(IsDominatedBy(initializing_store));
3050 return initializing_store->value()->definition();
3051 }
3052 }
3053
3054 return this;
3055}
3056
3058 if (FLAG_eliminate_type_checks) {
3059 if (value()->Type()->ToCid() == kBoolCid) {
3060 return value()->definition();
3061 }
3062
3063 // In strong mode type is already verified either by static analysis
3064 // or runtime checks, so AssertBoolean just ensures that value is not null.
3065 if (!value()->Type()->is_nullable()) {
3066 return value()->definition();
3067 }
3068 }
3069
3070 return this;
3071}
3072
3074 // We need dst_type() to be a constant AbstractType to perform any
3075 // canonicalization.
3076 if (!dst_type()->BindsToConstant()) return this;
3077 const auto& abs_type = AbstractType::Cast(dst_type()->BoundConstant());
3078
3079 if (abs_type.IsTopTypeForSubtyping() ||
3080 (FLAG_eliminate_type_checks &&
3081 value()->Type()->IsAssignableTo(abs_type))) {
3082 return value()->definition();
3083 }
3084 if (abs_type.IsInstantiated()) {
3085 return this;
3086 }
3087
3088 // For uninstantiated target types: If the instantiator and function
3089 // type arguments are constant, instantiate the target type here.
3090 // Note: these constant type arguments might not necessarily correspond
3091 // to the correct instantiator because AssertAssignable might
3092 // be located in the unreachable part of the graph (e.g.
3093 // it might be dominated by CheckClass that always fails).
3094 // This means that the code below must guard against such possibility.
3095 Thread* thread = Thread::Current();
3096 Zone* Z = thread->zone();
3097
3098 const TypeArguments* instantiator_type_args = nullptr;
3099 const TypeArguments* function_type_args = nullptr;
3100
3101 if (instantiator_type_arguments()->BindsToConstant()) {
3103 instantiator_type_args = (val.ptr() == TypeArguments::null())
3104 ? &TypeArguments::null_type_arguments()
3105 : &TypeArguments::Cast(val);
3106 }
3107
3108 if (function_type_arguments()->BindsToConstant()) {
3110 function_type_args =
3111 (val.ptr() == TypeArguments::null())
3112 ? &TypeArguments::null_type_arguments()
3113 : &TypeArguments::Cast(function_type_arguments()->BoundConstant());
3114 }
3115
3116 // If instantiator_type_args are not constant try to match the pattern
3117 // obj.field.:type_arguments where field's static type exactness state
3118 // tells us that all values stored in the field have exact superclass.
3119 // In this case we know the prefix of the actual type arguments vector
3120 // and can try to instantiate the type using just the prefix.
3121 //
3122 // Note: TypeParameter::InstantiateFrom returns an error if we try
3123 // to instantiate it from a vector that is too short.
3124 if (instantiator_type_args == nullptr) {
3125 if (LoadFieldInstr* load_type_args =
3126 instantiator_type_arguments()->definition()->AsLoadField()) {
3127 if (load_type_args->slot().IsTypeArguments()) {
3128 if (LoadFieldInstr* load_field = load_type_args->instance()
3129 ->definition()
3131 ->AsLoadField()) {
3132 if (load_field->slot().IsDartField() &&
3133 load_field->slot()
3134 .field()
3135 .static_type_exactness_state()
3136 .IsHasExactSuperClass()) {
3137 instantiator_type_args = &TypeArguments::Handle(
3138 Z, Type::Cast(AbstractType::Handle(
3139 Z, load_field->slot().field().type()))
3140 .GetInstanceTypeArguments(thread));
3141 }
3142 }
3143 }
3144 }
3145 }
3146
3147 if ((instantiator_type_args != nullptr) && (function_type_args != nullptr)) {
3148 AbstractType& new_dst_type = AbstractType::Handle(
3149 Z, abs_type.InstantiateFrom(*instantiator_type_args,
3150 *function_type_args, kAllFree, Heap::kOld));
3151 if (new_dst_type.IsNull()) {
3152 // Failed instantiation in dead code.
3153 return this;
3154 }
3155 new_dst_type = new_dst_type.Canonicalize(Thread::Current());
3156
3157 // Successfully instantiated destination type: update the type attached
3158 // to this instruction and set type arguments to null because we no
3159 // longer need them (the type was instantiated).
3160 dst_type()->BindTo(flow_graph->GetConstant(new_dst_type));
3163
3164 if (new_dst_type.IsTopTypeForSubtyping() ||
3165 (FLAG_eliminate_type_checks &&
3166 value()->Type()->IsAssignableTo(new_dst_type))) {
3167 return value()->definition();
3168 }
3169 }
3170 return this;
3171}
3172
3174 return HasUses() ? this : nullptr;
3175}
3176
3178 bool opt) const {
3179 const intptr_t kNumInputs = 0;
3180 const intptr_t kNumTemps = 0;
3181 LocationSummary* locs = new (zone)
3182 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3183 return locs;
3184}
3185
3187 return nullptr;
3188}
3189
3191 ASSERT(!coverage_array_.IsNull());
3192 return coverage_array_.At(coverage_index_) != Smi::New(0) ? nullptr : this;
3193}
3194
3196 if (input_use_list() == nullptr) {
3197 // Environments can accommodate any representation. No need to box.
3198 return value()->definition();
3199 }
3200
3201 // Fold away Box<rep>(v) if v has a target representation already.
3202 Definition* value_defn = value()->definition();
3203 if (value_defn->representation() == representation()) {
3204 return value_defn;
3205 }
3206
3207 // Fold away Box<rep>(Unbox<rep>(v)) if value is known to be of the
3208 // right class.
3209 UnboxInstr* unbox_defn = value()->definition()->AsUnbox();
3210 if ((unbox_defn != nullptr) &&
3211 (unbox_defn->representation() == from_representation()) &&
3212 (unbox_defn->value()->Type()->ToCid() == Type()->ToCid())) {
3213 if (from_representation() == kUnboxedFloat) {
3214 // This is a narrowing conversion.
3215 return this;
3216 }
3217 return unbox_defn->value()->definition();
3218 }
3219
3220 if (value()->BindsToConstant()) {
3221 switch (representation()) {
3222 case kUnboxedFloat64x2:
3223 ASSERT(value()->BoundConstant().IsFloat64x2());
3224 return flow_graph->GetConstant(value()->BoundConstant(), kTagged);
3225 case kUnboxedFloat32x4:
3226 ASSERT(value()->BoundConstant().IsFloat32x4());
3227 return flow_graph->GetConstant(value()->BoundConstant(), kTagged);
3228 case kUnboxedInt32x4:
3229 ASSERT(value()->BoundConstant().IsInt32x4());
3230 return flow_graph->GetConstant(value()->BoundConstant(), kTagged);
3231 default:
3232 return this;
3233 }
3234 }
3235
3236 return this;
3237}
3238
3240 return HasUses() ? this : NULL;
3241}
3242
3244 if (!HasUses()) return NULL;
3245
3246 if (BoxLanesInstr* box = value()->definition()->AsBoxLanes()) {
3247 return box->InputAt(lane())->definition();
3248 }
3249
3250 return this;
3251}
3252
3254 Range* range = value()->definition()->range();
3256}
3257
3259 if (input_use_list() == nullptr) {
3260 // Environments can accommodate any representation. No need to box.
3261 return value()->definition();
3262 }
3263
3264 // Fold away Box<rep>(v) if v has a target representation already.
3265 Definition* value_defn = value()->definition();
3266 if (value_defn->representation() == representation()) {
3267 return value_defn;
3268 }
3269
3270 return this;
3271}
3272
3274 Definition* replacement = BoxIntegerInstr::Canonicalize(flow_graph);
3275 if (replacement != this) {
3276 return replacement;
3277 }
3278
3279 // For all x, box(unbox(x)) = x.
3280 if (auto unbox = value()->definition()->AsUnboxInt64()) {
3281 if (unbox->SpeculativeModeOfInputs() == kNotSpeculative) {
3282 return unbox->value()->definition();
3283 }
3284 } else if (auto unbox = value()->definition()->AsUnboxedConstant()) {
3285 return flow_graph->GetConstant(unbox->value());
3286 }
3287
3288 // Find a more precise box instruction.
3289 if (auto conv = value()->definition()->AsIntConverter()) {
3290 Definition* replacement;
3291 if (conv->from() == kUntagged) {
3292 return this;
3293 }
3294 switch (conv->from()) {
3295 case kUnboxedInt32:
3296 replacement = new BoxInt32Instr(conv->value()->CopyWithType());
3297 break;
3298 case kUnboxedUint32:
3299 replacement = new BoxUint32Instr(conv->value()->CopyWithType());
3300 break;
3301 default:
3302 UNREACHABLE();
3303 break;
3304 }
3305 flow_graph->InsertBefore(this, replacement, nullptr, FlowGraph::kValue);
3306 return replacement;
3307 }
3308
3309 return this;
3310}
3311
3313 if (!HasUses() && !CanDeoptimize()) return nullptr;
3314
3315 // Fold away Unbox<rep>(v) if v has a target representation already.
3316 Definition* value_defn = value()->definition();
3317 if (value_defn->representation() == representation()) {
3318 return value_defn;
3319 }
3320
3321 BoxInstr* box_defn = value()->definition()->AsBox();
3322 if (box_defn != nullptr) {
3323 // Fold away Unbox<rep>(Box<rep>(v)).
3324 if (box_defn->from_representation() == representation()) {
3325 return box_defn->value()->definition();
3326 }
3327
3328 if ((box_defn->from_representation() == kUnboxedDouble) &&
3329 (representation() == kUnboxedFloat)) {
3330 Definition* replacement = new DoubleToFloatInstr(
3331 box_defn->value()->CopyWithType(), DeoptId::kNone);
3332 flow_graph->InsertBefore(this, replacement, NULL, FlowGraph::kValue);
3333 return replacement;
3334 }
3335
3336 if ((box_defn->from_representation() == kUnboxedFloat) &&
3337 (representation() == kUnboxedDouble)) {
3338 Definition* replacement = new FloatToDoubleInstr(
3339 box_defn->value()->CopyWithType(), DeoptId::kNone);
3340 flow_graph->InsertBefore(this, replacement, NULL, FlowGraph::kValue);
3341 return replacement;
3342 }
3343 }
3344
3345 if (representation() == kUnboxedDouble && value()->BindsToConstant()) {
3346 const Object& val = value()->BoundConstant();
3347 if (val.IsInteger()) {
3348 const Double& double_val = Double::ZoneHandle(
3349 flow_graph->zone(),
3350 Double::NewCanonical(Integer::Cast(val).AsDoubleValue()));
3351 return flow_graph->GetConstant(double_val, kUnboxedDouble);
3352 } else if (val.IsDouble()) {
3353 return flow_graph->GetConstant(val, kUnboxedDouble);
3354 }
3355 }
3356
3357 if (representation() == kUnboxedFloat && value()->BindsToConstant()) {
3358 const Object& val = value()->BoundConstant();
3359 if (val.IsInteger()) {
3360 double narrowed_val =
3361 static_cast<float>(Integer::Cast(val).AsDoubleValue());
3362 return flow_graph->GetConstant(
3364 kUnboxedFloat);
3365 } else if (val.IsDouble()) {
3366 double narrowed_val = static_cast<float>(Double::Cast(val).value());
3367 return flow_graph->GetConstant(
3369 kUnboxedFloat);
3370 }
3371 }
3372
3373 return this;
3374}
3375
3377 if (!HasUses() && !CanDeoptimize()) return nullptr;
3378
3379 // Fold away Unbox<rep>(v) if v has a target representation already.
3380 Definition* value_defn = value()->definition();
3381 if (value_defn->representation() == representation()) {
3382 return value_defn;
3383 }
3384
3385 // Do not attempt to fold this instruction if we have not matched
3386 // input/output representations yet.
3388 return this;
3389 }
3390
3391 // Fold away UnboxInteger<rep_to>(BoxInteger<rep_from>(v)).
3392 BoxIntegerInstr* box_defn = value()->definition()->AsBoxInteger();
3393 if (box_defn != nullptr && !box_defn->HasUnmatchedInputRepresentations()) {
3394 Representation from_representation =
3395 box_defn->value()->definition()->representation();
3396 if (from_representation == representation()) {
3397 return box_defn->value()->definition();
3398 } else {
3399 // Only operate on explicit unboxed operands.
3401 from_representation, representation(),
3402 box_defn->value()->CopyWithType(),
3403 (representation() == kUnboxedInt32) ? GetDeoptId() : DeoptId::kNone);
3404 // TODO(vegorov): marking resulting converter as truncating when
3405 // unboxing can't deoptimize is a workaround for the missing
3406 // deoptimization environment when we insert converter after
3407 // EliminateEnvironments and there is a mismatch between predicates
3408 // UnboxIntConverterInstr::CanDeoptimize and UnboxInt32::CanDeoptimize.
3409 if ((representation() == kUnboxedInt32) &&
3410 (is_truncating() || !CanDeoptimize())) {
3411 converter->mark_truncating();
3412 }
3413 flow_graph->InsertBefore(this, converter, env(), FlowGraph::kValue);
3414 return converter;
3415 }
3416 }
3417
3419 // Remember if we ever learn out input doesn't require checking, as
3420 // the input Value might be later changed that would make us forget.
3422 }
3423
3424 if (value()->BindsToConstant()) {
3425 const auto& obj = value()->BoundConstant();
3426 if (obj.IsInteger()) {
3427 if (representation() == kUnboxedInt64) {
3428 return flow_graph->GetConstant(obj, representation());
3429 }
3430 const int64_t intval = Integer::Cast(obj).AsInt64Value();
3432 return flow_graph->GetConstant(obj, representation());
3433 }
3434 if (is_truncating()) {
3435 const int64_t result = Evaluator::TruncateTo(intval, representation());
3436 return flow_graph->GetConstant(
3437 Integer::ZoneHandle(flow_graph->zone(),
3439 representation());
3440 }
3441 }
3442 }
3443
3444 return this;
3445}
3446
3448 if (!HasUses()) return nullptr;
3449
3450 // Fold IntConverter({Unboxed}Constant(...)) to UnboxedConstant.
3451 if (auto constant = value()->definition()->AsConstant()) {
3452 if (from() != kUntagged && to() != kUntagged &&
3453 constant->representation() == from() && constant->value().IsInteger()) {
3454 const int64_t value = Integer::Cast(constant->value()).AsInt64Value();
3455 const int64_t result =
3457 if (is_truncating() || (value == result)) {
3459 box ^= box.Canonicalize(flow_graph->thread());
3460 return flow_graph->GetConstant(box, to());
3461 }
3462 }
3463 }
3464
3465 // Fold IntCoverter(b->c, IntConverter(a->b, v)) into IntConverter(a->c, v).
3466 IntConverterInstr* first_converter = value()->definition()->AsIntConverter();
3467 if ((first_converter != nullptr) &&
3468 (first_converter->representation() == from())) {
3469 const auto intermediate_rep = first_converter->representation();
3470 // Only eliminate intermediate conversion if it does not change the value.
3471 auto src_defn = first_converter->value()->definition();
3472 if (intermediate_rep == kUntagged) {
3473 // Both conversions are no-ops, as the other representations must be
3474 // kUnboxedIntPtr.
3475 } else if (!Range::Fits(src_defn->range(), intermediate_rep)) {
3476 return this;
3477 }
3478
3479 // Otherwise it is safe to discard any other conversions from and then back
3480 // to the same integer type.
3481 if (first_converter->from() == to()) {
3482 return src_defn;
3483 }
3484
3485 // Do not merge conversions where the first starts from Untagged or the
3486 // second ends at Untagged, since we expect to see either UnboxedIntPtr
3487 // or UnboxedFfiIntPtr as the other type in an Untagged conversion.
3488 if ((first_converter->from() == kUntagged) || (to() == kUntagged)) {
3489 return this;
3490 }
3491
3493 first_converter->from(), representation(),
3494 first_converter->value()->CopyWithType(),
3495 (to() == kUnboxedInt32) ? GetDeoptId() : DeoptId::kNone);
3496 if ((representation() == kUnboxedInt32) && is_truncating()) {
3497 converter->mark_truncating();
3498 }
3499 flow_graph->InsertBefore(this, converter, env(), FlowGraph::kValue);
3500 return converter;
3501 }
3502
3503 UnboxInt64Instr* unbox_defn = value()->definition()->AsUnboxInt64();
3504 if (unbox_defn != nullptr && (from() == kUnboxedInt64) &&
3505 (to() == kUnboxedInt32) && unbox_defn->HasOnlyInputUse(value())) {
3506 // TODO(vegorov): there is a duplication of code between UnboxedIntConverter
3507 // and code path that unboxes Mint into Int32. We should just schedule
3508 // these instructions close to each other instead of fusing them.
3509 Definition* replacement =
3512 unbox_defn->value()->CopyWithType(), GetDeoptId());
3513 flow_graph->InsertBefore(this, replacement, env(), FlowGraph::kValue);
3514 return replacement;
3515 }
3516
3517 return this;
3518}
3519
3520// Tests for a FP comparison that cannot be negated
3521// (to preserve NaN semantics).
3522static bool IsFpCompare(ComparisonInstr* comp) {
3523 if (comp->IsRelationalOp()) {
3524 return comp->operation_cid() == kDoubleCid;
3525 }
3526 return false;
3527}
3528
3530 Definition* defn = value()->definition();
3531 // Convert e.g. !(x > y) into (x <= y) for non-FP x, y.
3532 if (defn->IsComparison() && defn->HasOnlyUse(value()) &&
3533 defn->Type()->ToCid() == kBoolCid) {
3534 ComparisonInstr* comp = defn->AsComparison();
3535 if (!IsFpCompare(comp)) {
3536 comp->NegateComparison();
3537 return defn;
3538 }
3539 }
3540 return this;
3541}
3542
3543static bool MayBeBoxableNumber(intptr_t cid) {
3544 return (cid == kDynamicCid) || (cid == kMintCid) || (cid == kDoubleCid);
3545}
3546
3548 if (type->IsNone()) {
3549 return false;
3550 }
3551 const AbstractType& unwrapped_type =
3552 AbstractType::Handle(type->ToAbstractType()->UnwrapFutureOr());
3553 // Note that type 'Number' is a subtype of itself.
3554 return unwrapped_type.IsTopTypeForSubtyping() ||
3555 unwrapped_type.IsObjectType() || unwrapped_type.IsTypeParameter() ||
3557 Heap::kOld);
3558}
3559
3560// Returns a replacement for a strict comparison and signals if the result has
3561// to be negated.
3563 bool* negated,
3564 bool is_branch) {
3565 // Use propagated cid and type information to eliminate number checks.
3566 // If one of the inputs is not a boxable number (Mint, Double), or
3567 // is not a subtype of num, no need for number checks.
3568 if (compare->needs_number_check()) {
3569 if (!MayBeBoxableNumber(compare->left()->Type()->ToCid()) ||
3570 !MayBeBoxableNumber(compare->right()->Type()->ToCid())) {
3571 compare->set_needs_number_check(false);
3572 } else if (!MayBeNumber(compare->left()->Type()) ||
3573 !MayBeNumber(compare->right()->Type())) {
3574 compare->set_needs_number_check(false);
3575 }
3576 }
3577 *negated = false;
3578 ConstantInstr* constant_defn = nullptr;
3579 Value* other = nullptr;
3580
3581 if (!compare->IsComparisonWithConstant(&other, &constant_defn)) {
3582 return compare;
3583 }
3584
3585 const Object& constant = constant_defn->value();
3586 const bool can_merge = is_branch || (other->Type()->ToCid() == kBoolCid);
3587 Definition* other_defn = other->definition();
3588 Token::Kind kind = compare->kind();
3589
3590 if (!constant.IsBool() || !can_merge) {
3591 return compare;
3592 }
3593
3594 const bool constant_value = Bool::Cast(constant).value();
3595
3596 // Handle `e === true` and `e !== false`: these cases don't require
3597 // negation and allow direct merge.
3598 if ((kind == Token::kEQ_STRICT) == constant_value) {
3599 return other_defn;
3600 }
3601
3602 // We now have `e !== true` or `e === false`: these cases require
3603 // negation.
3604 if (auto comp = other_defn->AsComparison()) {
3605 if (other_defn->HasOnlyUse(other) && !IsFpCompare(comp)) {
3606 *negated = true;
3607 return other_defn;
3608 }
3609 }
3610
3611 return compare;
3612}
3613
3614static bool BindsToGivenConstant(Value* v, intptr_t expected) {
3615 return v->BindsToConstant() && v->BoundConstant().IsSmi() &&
3616 (Smi::Cast(v->BoundConstant()).Value() == expected);
3617}
3618
3619// Recognize patterns (a & b) == 0 and (a & 2^n) != 2^n.
3620static bool RecognizeTestPattern(Value* left, Value* right, bool* negate) {
3621 if (!right->BindsToConstant() || !right->BoundConstant().IsSmi()) {
3622 return false;
3623 }
3624
3625 const intptr_t value = Smi::Cast(right->BoundConstant()).Value();
3626 if ((value != 0) && !Utils::IsPowerOfTwo(value)) {
3627 return false;
3628 }
3629
3630 auto mask_op = left->definition()->AsBinaryIntegerOp();
3631 if ((mask_op == nullptr) || (mask_op->op_kind() != Token::kBIT_AND) ||
3632 !mask_op->HasOnlyUse(left)) {
3633 return false;
3634 }
3635
3636 if (value == 0) {
3637 // Recognized (a & b) == 0 pattern.
3638 *negate = false;
3639 return true;
3640 }
3641
3642 // Recognize
3643 if (BindsToGivenConstant(mask_op->left(), value) ||
3644 BindsToGivenConstant(mask_op->right(), value)) {
3645 // Recognized (a & 2^n) == 2^n pattern. It's equivalent to (a & 2^n) != 0
3646 // so we need to negate original comparison.
3647 *negate = true;
3648 return true;
3649 }
3650
3651 return false;
3652}
3653
3655 Zone* zone = flow_graph->zone();
3656 if (comparison()->IsStrictCompare()) {
3657 bool negated = false;
3659 comparison()->AsStrictCompare(), &negated, /*is_branch=*/true);
3660 if (replacement == comparison()) {
3661 return this;
3662 }
3663 ComparisonInstr* comp = replacement->AsComparison();
3664 if ((comp == nullptr) || comp->CanDeoptimize() ||
3666 return this;
3667 }
3668
3669 // Replace the comparison if the replacement is used at this branch,
3670 // and has exactly one use.
3671 Value* use = comp->input_use_list();
3672 if ((use->instruction() == this) && comp->HasOnlyUse(use)) {
3673 if (negated) {
3674 comp->NegateComparison();
3675 }
3677 flow_graph->CopyDeoptTarget(this, comp);
3678 // Unlink environment from the comparison since it is copied to the
3679 // branch instruction.
3680 comp->RemoveEnvironment();
3681
3682 comp->RemoveFromGraph();
3683 SetComparison(comp);
3684 if (FLAG_trace_optimization && flow_graph->should_print()) {
3685 THR_Print("Merging comparison v%" Pd "\n", comp->ssa_temp_index());
3686 }
3687 // Clear the comparison's temp index and ssa temp index since the
3688 // value of the comparison is not used outside the branch anymore.
3689 ASSERT(comp->input_use_list() == nullptr);
3690 comp->ClearSSATempIndex();
3691 comp->ClearTempIndex();
3692 }
3693
3694 return this;
3695 }
3696
3697 if (comparison()->IsEqualityCompare() &&
3698 (comparison()->operation_cid() == kSmiCid ||
3699 comparison()->operation_cid() == kMintCid)) {
3700 const auto representation =
3701 comparison()->operation_cid() == kSmiCid ? kTagged : kUnboxedInt64;
3703 BinaryIntegerOpInstr* bit_and = nullptr;
3704 bool negate = false;
3705 if (RecognizeTestPattern(comparison()->left(), comparison()->right(),
3706 &negate)) {
3707 bit_and = comparison()->left()->definition()->AsBinaryIntegerOp();
3708 } else if (RecognizeTestPattern(comparison()->right(),
3709 comparison()->left(), &negate)) {
3710 bit_and = comparison()->right()->definition()->AsBinaryIntegerOp();
3711 }
3712 if (bit_and != nullptr) {
3713 if (FLAG_trace_optimization && flow_graph->should_print()) {
3714 THR_Print("Merging test integer v%" Pd "\n",
3715 bit_and->ssa_temp_index());
3716 }
3718 comparison()->source(),
3719 negate ? Token::NegateComparison(comparison()->kind())
3720 : comparison()->kind(),
3721 representation, bit_and->left()->Copy(zone),
3722 bit_and->right()->Copy(zone));
3725 flow_graph->CopyDeoptTarget(this, bit_and);
3727 bit_and->RemoveFromGraph();
3728 }
3729 }
3730 }
3731 return this;
3732}
3733
3735 if (!HasUses()) return nullptr;
3736
3737 bool negated = false;
3738 Definition* replacement = CanonicalizeStrictCompare(this, &negated,
3739 /*is_branch=*/false);
3740 if (negated && replacement->IsComparison()) {
3741 ASSERT(replacement != this);
3742 replacement->AsComparison()->NegateComparison();
3743 }
3744 return replacement;
3745}
3746
3748 return (use->definition()->IsUnbox() && use->IsSingleUse()) ||
3749 use->definition()->IsConstant();
3750}
3751
3753 if (is_null_aware()) {
3754 ASSERT(operation_cid() == kMintCid);
3755 // Select more efficient instructions based on operand types.
3756 CompileType* left_type = left()->Type();
3757 CompileType* right_type = right()->Type();
3758 if (left_type->IsNull() || left_type->IsNullableSmi() ||
3759 right_type->IsNull() || right_type->IsNullableSmi()) {
3760 auto replacement = new StrictCompareInstr(
3761 source(),
3762 (kind() == Token::kEQ) ? Token::kEQ_STRICT : Token::kNE_STRICT,
3763 left()->CopyWithType(), right()->CopyWithType(),
3764 /*needs_number_check=*/false, DeoptId::kNone);
3765 flow_graph->InsertBefore(this, replacement, env(), FlowGraph::kValue);
3766 return replacement;
3767 } else {
3768 // Null-aware EqualityCompare takes boxed inputs, so make sure
3769 // unmatched representations are still allowed when converting
3770 // EqualityCompare to the unboxed instruction.
3771 if (!left_type->is_nullable() && !right_type->is_nullable() &&
3772 flow_graph->unmatched_representations_allowed()) {
3773 set_null_aware(false);
3774 }
3775 }
3776 } else {
3777 if ((operation_cid() == kMintCid) && IsSingleUseUnboxOrConstant(left()) &&
3778 IsSingleUseUnboxOrConstant(right()) &&
3779 (left()->Type()->IsNullableSmi() || right()->Type()->IsNullableSmi()) &&
3780 flow_graph->unmatched_representations_allowed()) {
3781 auto replacement = new StrictCompareInstr(
3782 source(),
3783 (kind() == Token::kEQ) ? Token::kEQ_STRICT : Token::kNE_STRICT,
3784 left()->CopyWithType(), right()->CopyWithType(),
3785 /*needs_number_check=*/false, DeoptId::kNone);
3786 flow_graph->InsertBefore(this, replacement, env(), FlowGraph::kValue);
3787 return replacement;
3788 }
3789 }
3790 return this;
3791}
3792
3794 if (IsNoop()) {
3795 return base()->definition();
3796 }
3797 return this;
3798}
3799
3801 const intptr_t value_cid = value()->Type()->ToCid();
3802 if (value_cid == kDynamicCid) {
3803 return this;
3804 }
3805
3806 return cids().HasClassId(value_cid) ? nullptr : this;
3807}
3808
3810 if (!HasUses()) return nullptr;
3811
3812 const intptr_t cid = object()->Type()->ToCid();
3813 if (cid != kDynamicCid) {
3814 const auto& smi = Smi::ZoneHandle(flow_graph->zone(), Smi::New(cid));
3815 return flow_graph->GetConstant(smi, representation());
3816 }
3817 return this;
3818}
3819
3821 if (value()->BindsToConstant()) {
3822 const Object& constant_value = value()->BoundConstant();
3823 if (constant_value.IsSmi() &&
3824 cids_.Contains(Smi::Cast(constant_value).Value())) {
3825 return nullptr;
3826 }
3827 }
3828 return this;
3829}
3830
3832 Token::Kind kind,
3833 Value* value,
3834 const ZoneGrowableArray<intptr_t>& cid_results,
3835 intptr_t deopt_id)
3836 : TemplateComparison(source, kind, deopt_id), cid_results_(cid_results) {
3837 ASSERT((kind == Token::kIS) || (kind == Token::kISNOT));
3838 SetInputAt(0, value);
3839 set_operation_cid(kObjectCid);
3840#ifdef DEBUG
3841 ASSERT(cid_results[0] == kSmiCid);
3842 if (deopt_id == DeoptId::kNone) {
3843 // The entry for Smi can be special, but all other entries have
3844 // to match in the no-deopt case.
3845 for (intptr_t i = 4; i < cid_results.length(); i += 2) {
3846 ASSERT(cid_results[i + 1] == cid_results[3]);
3847 }
3848 }
3849#endif
3850}
3851
3853 CompileType* in_type = value()->Type();
3854 intptr_t cid = in_type->ToCid();
3855 if (cid == kDynamicCid) return this;
3856
3858 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
3859 for (intptr_t i = 0; i < data.length(); i += 2) {
3860 if (data[i] == cid) {
3861 return (data[i + 1] == true_result)
3862 ? flow_graph->GetConstant(Bool::True())
3863 : flow_graph->GetConstant(Bool::False());
3864 }
3865 }
3866
3867 if (!CanDeoptimize()) {
3869 return (data[data.length() - 1] == true_result)
3870 ? flow_graph->GetConstant(Bool::False())
3871 : flow_graph->GetConstant(Bool::True());
3872 }
3873
3874 // TODO(sra): Handle nullable input, possibly canonicalizing to a compare
3875 // against `null`.
3876 return this;
3877}
3878
3880 Value* value,
3881 uword lower,
3882 uword upper,
3883 Representation value_representation)
3885 lower_(lower),
3886 upper_(upper),
3887 value_representation_(value_representation) {
3888 ASSERT(lower < upper);
3889 ASSERT(value_representation == kTagged ||
3890 value_representation == kUnboxedUword);
3891 SetInputAt(0, value);
3892 set_operation_cid(kObjectCid);
3893}
3894
3896 if (value()->BindsToSmiConstant()) {
3897 uword val = Smi::Cast(value()->BoundConstant()).Value();
3898 bool in_range = lower_ <= val && val <= upper_;
3899 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
3900 return flow_graph->GetConstant(
3901 Bool::Get(in_range == (kind() == Token::kIS)));
3902 }
3903
3904 const Range* range = value()->definition()->range();
3905 if (range != nullptr) {
3906 if (range->IsWithin(lower_, upper_)) {
3907 return flow_graph->GetConstant(Bool::Get(kind() == Token::kIS));
3908 }
3909 if (!range->Overlaps(lower_, upper_)) {
3910 return flow_graph->GetConstant(Bool::Get(kind() != Token::kIS));
3911 }
3912 }
3913
3914 if (LoadClassIdInstr* load_cid = value()->definition()->AsLoadClassId()) {
3915 uword lower, upper;
3916 load_cid->InferRange(&lower, &upper);
3917 if (lower >= lower_ && upper <= upper_) {
3918 return flow_graph->GetConstant(Bool::Get(kind() == Token::kIS));
3919 } else if (lower > upper_ || upper < lower_) {
3920 return flow_graph->GetConstant(Bool::Get(kind() != Token::kIS));
3921 }
3922 }
3923
3924 return this;
3925}
3926
3928 if (field().guarded_cid() == kDynamicCid) {
3929 return nullptr; // Nothing to guard.
3930 }
3931
3932 if (field().is_nullable() && value()->Type()->IsNull()) {
3933 return nullptr;
3934 }
3935
3936 const intptr_t cid = field().is_nullable() ? value()->Type()->ToNullableCid()
3937 : value()->Type()->ToCid();
3938 if (field().guarded_cid() == cid) {
3939 return nullptr; // Value is guaranteed to have this cid.
3940 }
3941
3942 return this;
3943}
3944
3946 if (!field().needs_length_check()) {
3947 return nullptr; // Nothing to guard.
3948 }
3949
3950 const intptr_t expected_length = field().guarded_list_length();
3951 if (expected_length == Field::kUnknownFixedLength) {
3952 return this;
3953 }
3954
3955 // Check if length is statically known.
3956 StaticCallInstr* call = value()->definition()->AsStaticCall();
3957 if (call == nullptr) {
3958 return this;
3959 }
3960
3961 ConstantInstr* length = nullptr;
3962 if (call->is_known_list_constructor() &&
3963 LoadFieldInstr::IsFixedLengthArrayCid(call->Type()->ToCid())) {
3964 length = call->ArgumentAt(1)->AsConstant();
3965 } else if (call->function().recognized_kind() ==
3966 MethodRecognizer::kByteDataFactory) {
3967 length = call->ArgumentAt(1)->AsConstant();
3968 } else if (LoadFieldInstr::IsTypedDataViewFactory(call->function())) {
3969 length = call->ArgumentAt(3)->AsConstant();
3970 }
3971 if ((length != nullptr) && length->value().IsSmi() &&
3972 Smi::Cast(length->value()).Value() == expected_length) {
3973 return nullptr; // Expected length matched.
3974 }
3975
3976 return this;
3977}
3978
3981 : nullptr;
3982}
3983
3985 return (value()->Type()->ToCid() == kSmiCid) ? nullptr : this;
3986}
3987
3989 if ((left()->Type()->ToCid() == kDoubleCid) ||
3990 (right()->Type()->ToCid() == kDoubleCid)) {
3991 return nullptr; // Remove from the graph.
3992 }
3993 return this;
3994}
3995
3997 return (!value()->Type()->is_nullable()) ? value()->definition() : this;
3998}
3999
4001 auto const other_check = other.AsCheckNull();
4002 ASSERT(other_check != nullptr);
4003 return function_name().Equals(other_check->function_name()) &&
4004 exception_type() == other_check->exception_type();
4005}
4006
4008 switch (from) {
4009 case kUnboxedInt8:
4010 case kUnboxedUint8:
4011 case kUnboxedInt16:
4012 case kUnboxedUint16:
4013#if defined(HAS_SMI_63_BITS)
4014 case kUnboxedInt32:
4015 case kUnboxedUint32:
4016#endif
4017 return new BoxSmallIntInstr(from, value);
4018
4019#if !defined(HAS_SMI_63_BITS)
4020 case kUnboxedInt32:
4021 return new BoxInt32Instr(value);
4022
4023 case kUnboxedUint32:
4024 return new BoxUint32Instr(value);
4025#endif
4026
4027 case kUnboxedInt64:
4028 return new BoxInt64Instr(value);
4029
4030 case kUnboxedDouble:
4031 case kUnboxedFloat:
4032 case kUnboxedFloat32x4:
4033 case kUnboxedFloat64x2:
4034 case kUnboxedInt32x4:
4035 return new BoxInstr(from, value);
4036
4037 default:
4038 UNREACHABLE();
4039 return nullptr;
4040 }
4041}
4042
4044 Value* value,
4045 intptr_t deopt_id,
4046 SpeculativeMode speculative_mode) {
4047 switch (to) {
4048 case kUnboxedInt32:
4049 // We must truncate if we can't deoptimize.
4050 return new UnboxInt32Instr(
4051 speculative_mode == SpeculativeMode::kNotSpeculative
4054 value, deopt_id, speculative_mode);
4055
4056 case kUnboxedUint32:
4057 return new UnboxUint32Instr(value, deopt_id, speculative_mode);
4058
4059 case kUnboxedInt64:
4060 return new UnboxInt64Instr(value, deopt_id, speculative_mode);
4061
4062 case kUnboxedDouble:
4063 case kUnboxedFloat:
4064 case kUnboxedFloat32x4:
4065 case kUnboxedFloat64x2:
4066 case kUnboxedInt32x4:
4067 return new UnboxInstr(to, value, deopt_id, speculative_mode);
4068
4069 default:
4070 UNREACHABLE();
4071 return nullptr;
4072 }
4073}
4074
4075bool UnboxInstr::CanConvertSmi() const {
4076 switch (representation()) {
4077 case kUnboxedDouble:
4078 case kUnboxedFloat:
4079 case kUnboxedInt32:
4080 case kUnboxedInt64:
4081 return true;
4082
4083 case kUnboxedFloat32x4:
4084 case kUnboxedFloat64x2:
4085 case kUnboxedInt32x4:
4086 return false;
4087
4088 default:
4089 UNREACHABLE();
4090 return false;
4091 }
4092}
4093
4095 const ICData& ic_data) {
4096 BinaryFeedback* result = new (zone) BinaryFeedback(zone);
4097 if (ic_data.NumArgsTested() == 2) {
4098 for (intptr_t i = 0, n = ic_data.NumberOfChecks(); i < n; i++) {
4099 if (ic_data.GetCountAt(i) == 0) {
4100 continue;
4101 }
4103 ic_data.GetClassIdsAt(i, &arg_ids);
4104 result->feedback_.Add({arg_ids[0], arg_ids[1]});
4105 }
4106 }
4107 return result;
4108}
4109
4111 intptr_t receiver_cid,
4112 intptr_t argument_cid) {
4113 BinaryFeedback* result = new (zone) BinaryFeedback(zone);
4114 result->feedback_.Add({receiver_cid, argument_cid});
4115 return result;
4116}
4117
4119 intptr_t receiver_cid,
4120 const Function& target) {
4121 CallTargets* targets = new (zone) CallTargets(zone);
4122 const intptr_t count = 1;
4123 targets->cid_ranges_.Add(new (zone) TargetInfo(
4124 receiver_cid, receiver_cid, &Function::ZoneHandle(zone, target.ptr()),
4126 return targets;
4127}
4128
4129const CallTargets* CallTargets::Create(Zone* zone, const ICData& ic_data) {
4130 CallTargets* targets = new (zone) CallTargets(zone);
4131 targets->CreateHelper(zone, ic_data);
4132 targets->Sort(OrderById);
4133 targets->MergeIntoRanges();
4134 return targets;
4135}
4136
4138 const ICData& ic_data) {
4139 CallTargets& targets = *new (zone) CallTargets(zone);
4140 targets.CreateHelper(zone, ic_data);
4141
4142 if (targets.is_empty() || targets.IsMonomorphic()) {
4143 return &targets;
4144 }
4145
4146 targets.Sort(OrderById);
4147
4148 Array& args_desc_array = Array::Handle(zone, ic_data.arguments_descriptor());
4149 ArgumentsDescriptor args_desc(args_desc_array);
4150 String& name = String::Handle(zone, ic_data.target_name());
4151
4152 Function& fn = Function::Handle(zone);
4153
4154 intptr_t length = targets.length();
4155
4156 // Merging/extending cid ranges is also done in Cids::CreateAndExpand.
4157 // If changing this code, consider also adjusting Cids code.
4158
4159 // Spread class-ids to preceding classes where a lookup yields the same
4160 // method. A polymorphic target is not really the same method since its
4161 // behaviour depends on the receiver class-id, so we don't spread the
4162 // class-ids in that case.
4163 for (int idx = 0; idx < length; idx++) {
4164 int lower_limit_cid = (idx == 0) ? -1 : targets[idx - 1].cid_end;
4165 auto target_info = targets.TargetAt(idx);
4166 const Function& target = *target_info->target;
4167 if (target.is_polymorphic_target()) continue;
4168 for (int i = target_info->cid_start - 1; i > lower_limit_cid; i--) {
4169 bool class_is_abstract = false;
4170 if (FlowGraphCompiler::LookupMethodFor(i, name, args_desc, &fn,
4171 &class_is_abstract) &&
4172 fn.ptr() == target.ptr()) {
4173 if (!class_is_abstract) {
4174 target_info->cid_start = i;
4175 target_info->exactness = StaticTypeExactnessState::NotTracking();
4176 }
4177 } else {
4178 break;
4179 }
4180 }
4181 }
4182
4183 // Spread class-ids to following classes where a lookup yields the same
4184 // method.
4185 const intptr_t max_cid = IsolateGroup::Current()->class_table()->NumCids();
4186 for (int idx = 0; idx < length; idx++) {
4187 int upper_limit_cid =
4188 (idx == length - 1) ? max_cid : targets[idx + 1].cid_start;
4189 auto target_info = targets.TargetAt(idx);
4190 const Function& target = *target_info->target;
4191 if (target.is_polymorphic_target()) continue;
4192 // The code below makes attempt to avoid spreading class-id range
4193 // into a suffix that consists purely of abstract classes to
4194 // shorten the range.
4195 // However such spreading is beneficial when it allows to
4196 // merge to consecutive ranges.
4197 intptr_t cid_end_including_abstract = target_info->cid_end;
4198 for (int i = target_info->cid_end + 1; i < upper_limit_cid; i++) {
4199 bool class_is_abstract = false;
4200 if (FlowGraphCompiler::LookupMethodFor(i, name, args_desc, &fn,
4201 &class_is_abstract) &&
4202 fn.ptr() == target.ptr()) {
4203 cid_end_including_abstract = i;
4204 if (!class_is_abstract) {
4205 target_info->cid_end = i;
4206 target_info->exactness = StaticTypeExactnessState::NotTracking();
4207 }
4208 } else {
4209 break;
4210 }
4211 }
4212
4213 // Check if we have a suffix that consists of abstract classes
4214 // and expand into it if that would allow us to merge this
4215 // range with subsequent range.
4216 if ((cid_end_including_abstract > target_info->cid_end) &&
4217 (idx < length - 1) &&
4218 ((cid_end_including_abstract + 1) == targets[idx + 1].cid_start) &&
4219 (target.ptr() == targets.TargetAt(idx + 1)->target->ptr())) {
4220 target_info->cid_end = cid_end_including_abstract;
4221 target_info->exactness = StaticTypeExactnessState::NotTracking();
4222 }
4223 }
4224 targets.MergeIntoRanges();
4225 return &targets;
4226}
4227
4228void CallTargets::MergeIntoRanges() {
4229 if (length() == 0) {
4230 return; // For correctness not performance: must not update length to 1.
4231 }
4232
4233 // Merge adjacent class id ranges.
4234 int dest = 0;
4235 // We merge entries that dispatch to the same target, but polymorphic targets
4236 // are not really the same target since they depend on the class-id, so we
4237 // don't merge them.
4238 for (int src = 1; src < length(); src++) {
4239 const Function& target = *TargetAt(dest)->target;
4240 if (TargetAt(dest)->cid_end + 1 >= TargetAt(src)->cid_start &&
4241 target.ptr() == TargetAt(src)->target->ptr() &&
4242 !target.is_polymorphic_target()) {
4246 } else {
4247 dest++;
4248 if (src != dest) {
4249 // Use cid_ranges_ instead of TargetAt when updating the pointer.
4251 }
4252 }
4253 }
4254 SetLength(dest + 1);
4256}
4257
4259 for (intptr_t i = 0; i < length(); i++) {
4260 THR_Print("cid = [%" Pd ", %" Pd "], count = %" Pd ", target = %s\n",
4261 TargetAt(i)->cid_start, TargetAt(i)->cid_end, TargetAt(i)->count,
4262 TargetAt(i)->target->ToQualifiedCString());
4263 }
4264}
4265
4266// Shared code generation methods (EmitNativeCode and
4267// MakeLocationSummary). Only assembly code that can be shared across all
4268// architectures can be used. Machine specific register allocation and code
4269// generation is located in intermediate_language_<arch>.cc
4270
4271#define __ compiler->assembler()->
4272
4273LocationSummary* GraphEntryInstr::MakeLocationSummary(Zone* zone,
4274 bool optimizing) const {
4275 UNREACHABLE();
4276 return nullptr;
4277}
4278
4279LocationSummary* JoinEntryInstr::MakeLocationSummary(Zone* zone,
4280 bool optimizing) const {
4281 UNREACHABLE();
4282 return nullptr;
4283}
4284
4285void JoinEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4286 __ Bind(compiler->GetJumpLabel(this));
4287 if (!compiler->is_optimizing()) {
4288 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
4289 InstructionSource());
4290 }
4291 if (HasParallelMove()) {
4293 }
4294}
4295
4296LocationSummary* TargetEntryInstr::MakeLocationSummary(Zone* zone,
4297 bool optimizing) const {
4298 UNREACHABLE();
4299 return nullptr;
4300}
4301
4302void TargetEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4303 __ Bind(compiler->GetJumpLabel(this));
4304
4305 // TODO(kusterman): Remove duplicate between
4306 // {TargetEntryInstr,FunctionEntryInstr}::EmitNativeCode.
4307 if (!compiler->is_optimizing()) {
4308 if (compiler->NeedsEdgeCounter(this)) {
4309 compiler->EmitEdgeCounter(preorder_number());
4310 }
4311
4312 // The deoptimization descriptor points after the edge counter code for
4313 // uniformity with ARM, where we can reuse pattern matching code that
4314 // matches backwards from the end of the pattern.
4315 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
4316 InstructionSource());
4317 }
4318 if (HasParallelMove()) {
4320 compiler->EmitComment(parallel_move());
4321 }
4323 }
4324}
4325
4327 Zone* zone,
4328 bool optimizing) const {
4329 UNREACHABLE();
4330 return nullptr;
4331}
4332
4333void FunctionEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4334#if defined(TARGET_ARCH_X64)
4335 // Ensure the start of the monomorphic checked entry is 2-byte aligned (see
4336 // also Assembler::MonomorphicCheckedEntry()).
4337 if (__ CodeSize() % 2 == 1) {
4338 __ nop();
4339 }
4340#endif
4341 if (tag() == Instruction::kFunctionEntry) {
4342 __ Bind(compiler->GetJumpLabel(this));
4343 }
4344
4345 if (this == compiler->flow_graph().graph_entry()->unchecked_entry()) {
4346 __ BindUncheckedEntryPoint();
4347 }
4348
4349 // In the AOT compiler we want to reduce code size, so generate no
4350 // fall-through code in [FlowGraphCompiler::CompileGraph()].
4351 // (As opposed to here where we don't check for the return value of
4352 // [Intrinsify]).
4353 const Function& function = compiler->parsed_function().function();
4354
4355 if (function.NeedsMonomorphicCheckedEntry(compiler->zone())) {
4357 if (!FLAG_precompiled_mode) {
4358 __ MonomorphicCheckedEntryJIT();
4359 } else {
4360 __ MonomorphicCheckedEntryAOT();
4361 }
4363 }
4364
4365 // NOTE: Because of the presence of multiple entry-points, we generate several
4366 // times the same intrinsification & frame setup. That's why we cannot rely on
4367 // the constant pool being `false` when we come in here.
4368#if defined(TARGET_USES_OBJECT_POOL)
4369 __ set_constant_pool_allowed(false);
4370#endif
4371
4372 if (compiler->TryIntrinsify() && compiler->skip_body_compilation()) {
4373 return;
4374 }
4375 compiler->EmitPrologue();
4376
4377#if defined(TARGET_USES_OBJECT_POOL)
4378 ASSERT(__ constant_pool_allowed());
4379#endif
4380
4381 if (!compiler->is_optimizing()) {
4382 if (compiler->NeedsEdgeCounter(this)) {
4383 compiler->EmitEdgeCounter(preorder_number());
4384 }
4385
4386 // The deoptimization descriptor points after the edge counter code for
4387 // uniformity with ARM, where we can reuse pattern matching code that
4388 // matches backwards from the end of the pattern.
4389 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
4390 InstructionSource());
4391 }
4392 if (HasParallelMove()) {
4394 compiler->EmitComment(parallel_move());
4395 }
4397 }
4398}
4399
4400LocationSummary* NativeEntryInstr::MakeLocationSummary(Zone* zone,
4401 bool optimizing) const {
4402 UNREACHABLE();
4403}
4404
4405void NativeEntryInstr::SaveArguments(FlowGraphCompiler* compiler) const {
4406 __ Comment("SaveArguments");
4407
4408 // Save the argument registers, in reverse order.
4409 const auto& return_loc = marshaller_.Location(compiler::ffi::kResultIndex);
4410 if (return_loc.IsPointerToMemory()) {
4411 SaveArgument(compiler, return_loc.AsPointerToMemory().pointer_location());
4412 }
4413 for (intptr_t i = marshaller_.num_args(); i-- > 0;) {
4414 SaveArgument(compiler, marshaller_.Location(i));
4415 }
4416
4417 __ Comment("SaveArgumentsEnd");
4418}
4419
4422 const compiler::ffi::NativeLocation& nloc) const {
4423 if (nloc.IsStack()) return;
4424
4425 if (nloc.IsRegisters()) {
4426 const auto& reg_loc = nloc.WidenTo4Bytes(compiler->zone()).AsRegisters();
4427 const intptr_t num_regs = reg_loc.num_regs();
4428 // Save higher-order component first, so bytes are in little-endian layout
4429 // overall.
4430 for (intptr_t i = num_regs - 1; i >= 0; i--) {
4431 __ PushRegister(reg_loc.reg_at(i));
4432 }
4433 } else if (nloc.IsFpuRegisters()) {
4434 // TODO(dartbug.com/40469): Reduce code size.
4435 __ AddImmediate(SPREG, -8);
4436 NoTemporaryAllocator temp_alloc;
4438 nloc.payload_type(), nloc.payload_type(), SPREG, 0);
4439 compiler->EmitNativeMove(dst, nloc, &temp_alloc);
4440 } else if (nloc.IsPointerToMemory()) {
4441 const auto& pointer_loc = nloc.AsPointerToMemory().pointer_location();
4442 if (pointer_loc.IsRegisters()) {
4443 const auto& regs_loc = pointer_loc.AsRegisters();
4444 ASSERT(regs_loc.num_regs() == 1);
4445 __ PushRegister(regs_loc.reg_at(0));
4446 } else {
4447 ASSERT(pointer_loc.IsStack());
4448 // It's already on the stack, so we don't have to save it.
4449 }
4450 } else if (nloc.IsMultiple()) {
4451 const auto& multiple = nloc.AsMultiple();
4452 const intptr_t num = multiple.locations().length();
4453 // Save the argument registers, in reverse order.
4454 for (intptr_t i = num; i-- > 0;) {
4455 SaveArgument(compiler, *multiple.locations().At(i));
4456 }
4457 } else {
4458 ASSERT(nloc.IsBoth());
4459 const auto& both = nloc.AsBoth();
4460 SaveArgument(compiler, both.location(0));
4461 }
4462}
4463
4465 bool optimizing) const {
4466 UNREACHABLE();
4467 return nullptr;
4468}
4469
4470void OsrEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4471 ASSERT(!CompilerState::Current().is_aot());
4472 ASSERT(compiler->is_optimizing());
4473 __ Bind(compiler->GetJumpLabel(this));
4474
4475 // NOTE: Because the graph can have multiple entrypoints, we generate several
4476 // times the same intrinsification & frame setup. That's why we cannot rely on
4477 // the constant pool being `false` when we come in here.
4478#if defined(TARGET_USES_OBJECT_POOL)
4479 __ set_constant_pool_allowed(false);
4480#endif
4481
4482 compiler->EmitPrologue();
4483
4484#if defined(TARGET_USES_OBJECT_POOL)
4485 ASSERT(__ constant_pool_allowed());
4486#endif
4487
4488 if (HasParallelMove()) {
4490 compiler->EmitComment(parallel_move());
4491 }
4493 }
4494}
4495
4497 ASSERT(SuccessorCount() == offsets_.Length());
4498 intptr_t element_size = offsets_.ElementSizeInBytes();
4499 for (intptr_t i = 0; i < SuccessorCount(); i++) {
4501 auto* label = compiler->GetJumpLabel(target);
4502 RELEASE_ASSERT(label != nullptr);
4503 RELEASE_ASSERT(label->IsBound());
4504 intptr_t offset = label->Position();
4506 offsets_.SetInt32(i * element_size, offset);
4507 }
4508}
4509
4511 Zone* zone,
4512 bool optimizing) const {
4513 return JoinEntryInstr::MakeLocationSummary(zone, optimizing);
4514}
4515
4516void IndirectEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4518}
4519
4520LocationSummary* LoadStaticFieldInstr::MakeLocationSummary(Zone* zone,
4521 bool opt) const {
4522 const intptr_t kNumInputs = 0;
4523 const bool use_shared_stub = UseSharedSlowPathStub(opt);
4524 const intptr_t kNumTemps = calls_initializer() &&
4526 use_shared_stub
4527 ? 1
4528 : 0;
4529 LocationSummary* locs = new (zone) LocationSummary(
4530 zone, kNumInputs, kNumTemps,
4533 ? (use_shared_stub ? LocationSummary::kCallOnSharedSlowPath
4538 use_shared_stub) {
4539 locs->set_temp(
4541 }
4544 : Location::RequiresRegister());
4545 return locs;
4546}
4547
4548void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4549 const Register result = locs()->out(0).reg();
4550
4551 compiler->used_static_fields().Add(&field());
4552
4553 // Note: static fields ids won't be changed by hot-reload.
4554 const intptr_t field_table_offset =
4555 field().is_shared()
4557 : compiler::target::Thread::field_table_values_offset();
4558 const intptr_t field_offset = compiler::target::FieldTable::OffsetOf(field());
4559
4560 __ LoadMemoryValue(result, THR, static_cast<int32_t>(field_table_offset));
4561 __ LoadMemoryValue(result, result, static_cast<int32_t>(field_offset));
4562
4563 if (calls_initializer()) {
4565 ThrowErrorSlowPathCode* slow_path =
4566 new LateInitializationErrorSlowPath(this);
4567 compiler->AddSlowPathCode(slow_path);
4568
4569 __ CompareObject(result, Object::sentinel());
4570 __ BranchIf(EQUAL, slow_path->entry_label());
4571 return;
4572 }
4573 ASSERT(field().has_initializer());
4574 auto object_store = compiler->isolate_group()->object_store();
4575 const Field& original_field = Field::ZoneHandle(field().Original());
4576
4577 compiler::Label no_call, call_initializer;
4578 __ CompareObject(result, Object::sentinel());
4579 if (!field().is_late()) {
4580 __ BranchIf(EQUAL, &call_initializer);
4581 __ CompareObject(result, Object::transition_sentinel());
4582 }
4583 __ BranchIf(NOT_EQUAL, &no_call);
4584
4585 auto& stub = Code::ZoneHandle(compiler->zone());
4586 __ Bind(&call_initializer);
4587 if (field().needs_load_guard()) {
4588 stub = object_store->init_static_field_stub();
4589 } else if (field().is_late()) {
4590 // The stubs below call the initializer function directly, so make sure
4591 // one is created.
4592 original_field.EnsureInitializerFunction();
4593 stub =
4594 field().is_shared()
4595 ? (field().is_final()
4596 ? object_store->init_shared_late_final_static_field_stub()
4597 : object_store->init_shared_late_static_field_stub())
4598 : (field().is_final()
4599 ? object_store->init_late_final_static_field_stub()
4600 : object_store->init_late_static_field_stub());
4601 } else {
4602 // We call to runtime for non-late fields because the stub would need to
4603 // catch any exception generated by the initialization function to change
4604 // the value of the static field from the transition sentinel to null.
4605 stub = object_store->init_static_field_stub();
4606 }
4607
4608 __ LoadObject(InitStaticFieldABI::kFieldReg, original_field);
4609 compiler->GenerateStubCall(source(), stub,
4610 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
4611 deopt_id(), env());
4612
4613 __ Bind(&no_call);
4614 }
4615}
4616
4617LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
4618 bool opt) const {
4619 const intptr_t kNumInputs = 1;
4620 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
4622}
4623
4624void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4625 Register obj = locs()->in(0).reg();
4626 Register result = locs()->out(0).reg();
4627 ASSERT(object()->definition()->representation() == kUntagged);
4628 __ LoadFromOffset(result, obj, offset());
4629}
4630
4631LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
4632 bool opt) const {
4633 const intptr_t kNumInputs = 1;
4634 LocationSummary* locs = nullptr;
4635 auto const rep = slot().representation();
4636 if (calls_initializer()) {
4638 const bool using_shared_stub = UseSharedSlowPathStub(opt);
4639 const intptr_t kNumTemps = using_shared_stub ? 1 : 0;
4640 locs = new (zone) LocationSummary(
4641 zone, kNumInputs, kNumTemps,
4642 using_shared_stub ? LocationSummary::kCallOnSharedSlowPath
4644 if (using_shared_stub) {
4647 }
4650 } else {
4651 const intptr_t kNumTemps = 0;
4652 locs = new (zone)
4653 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
4654 locs->set_in(
4656 locs->set_out(
4658 }
4659 } else {
4660 const intptr_t kNumTemps = 0;
4661 locs = new (zone)
4662 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4664 if (rep == kTagged || rep == kUntagged) {
4666 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
4667 const size_t value_size = RepresentationUtils::ValueSize(rep);
4668 if (value_size <= compiler::target::kWordSize) {
4670 } else {
4671 ASSERT(value_size == 2 * compiler::target::kWordSize);
4674 }
4675 } else {
4677 }
4678 }
4679 return locs;
4680}
4681
4682void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4683 const Register instance_reg = locs()->in(0).reg();
4684 ASSERT(OffsetInBytes() >= 0); // Field is finalized.
4685 // For fields on Dart objects, the offset must point after the header.
4686 ASSERT(OffsetInBytes() != 0 || slot().has_untagged_instance());
4687
4688 auto const rep = slot().representation();
4689 if (calls_initializer()) {
4690 __ LoadFromSlot(locs()->out(0).reg(), instance_reg, slot());
4691 EmitNativeCodeForInitializerCall(compiler);
4692 } else if (rep == kTagged || rep == kUntagged) {
4693 __ LoadFromSlot(locs()->out(0).reg(), instance_reg, slot());
4694 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
4695 const size_t value_size = RepresentationUtils::ValueSize(rep);
4696 if (value_size <= compiler::target::kWordSize) {
4697 __ LoadFromSlot(locs()->out(0).reg(), instance_reg, slot());
4698 } else {
4699 auto const result_pair = locs()->out(0).AsPairLocation();
4700 const Register result_lo = result_pair->At(0).reg();
4701 const Register result_hi = result_pair->At(1).reg();
4702 __ LoadFieldFromOffset(result_lo, instance_reg, OffsetInBytes());
4703 __ LoadFieldFromOffset(result_hi, instance_reg,
4704 OffsetInBytes() + compiler::target::kWordSize);
4705 }
4706 } else {
4707 ASSERT(slot().IsDartField());
4708 const intptr_t cid = slot().field().guarded_cid();
4709 const FpuRegister result = locs()->out(0).fpu_reg();
4710 switch (cid) {
4711 case kDoubleCid:
4712 __ LoadUnboxedDouble(result, instance_reg,
4713 OffsetInBytes() - kHeapObjectTag);
4714 break;
4715 case kFloat32x4Cid:
4716 case kFloat64x2Cid:
4717 __ LoadUnboxedSimd128(result, instance_reg,
4718 OffsetInBytes() - kHeapObjectTag);
4719 break;
4720 default:
4721 UNREACHABLE();
4722 }
4723 }
4724}
4725
4726void LoadFieldInstr::EmitNativeCodeForInitializerCall(
4727 FlowGraphCompiler* compiler) {
4729
4731 ThrowErrorSlowPathCode* slow_path =
4732 new LateInitializationErrorSlowPath(this);
4733 compiler->AddSlowPathCode(slow_path);
4734
4735 const Register result_reg = locs()->out(0).reg();
4736 __ CompareObject(result_reg, Object::sentinel());
4737 __ BranchIf(EQUAL, slow_path->entry_label());
4738 return;
4739 }
4740
4743 ASSERT(slot().IsDartField());
4744 const Field& field = slot().field();
4745 const Field& original_field = Field::ZoneHandle(field.Original());
4746
4747 compiler::Label no_call;
4748 __ CompareObject(InitInstanceFieldABI::kResultReg, Object::sentinel());
4749 __ BranchIf(NOT_EQUAL, &no_call);
4750
4751 __ LoadObject(InitInstanceFieldABI::kFieldReg, original_field);
4752
4753 auto object_store = compiler->isolate_group()->object_store();
4754 auto& stub = Code::ZoneHandle(compiler->zone());
4755 if (field.needs_load_guard()) {
4756 stub = object_store->init_instance_field_stub();
4757 } else if (field.is_late()) {
4758 if (!field.has_nontrivial_initializer()) {
4759 stub = object_store->init_instance_field_stub();
4760 } else {
4761 // Stubs for late field initialization call initializer
4762 // function directly, so make sure one is created.
4763 original_field.EnsureInitializerFunction();
4764
4765 if (field.is_final()) {
4766 stub = object_store->init_late_final_instance_field_stub();
4767 } else {
4768 stub = object_store->init_late_instance_field_stub();
4769 }
4770 }
4771 } else {
4772 UNREACHABLE();
4773 }
4774
4775 compiler->GenerateStubCall(source(), stub,
4776 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
4777 deopt_id(), env());
4778 __ Bind(&no_call);
4779}
4780
4781LocationSummary* ThrowInstr::MakeLocationSummary(Zone* zone, bool opt) const {
4782 const intptr_t kNumInputs = 1;
4783 const intptr_t kNumTemps = 0;
4784 LocationSummary* summary = new (zone)
4785 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
4787 return summary;
4788}
4789
4790void ThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4791 auto object_store = compiler->isolate_group()->object_store();
4792 const auto& throw_stub =
4793 Code::ZoneHandle(compiler->zone(), object_store->throw_stub());
4794
4795 compiler->GenerateStubCall(source(), throw_stub,
4796 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
4797 deopt_id(), env());
4798 // Issue(dartbug.com/41353): Right now we have to emit an extra breakpoint
4799 // instruction: The ThrowInstr will terminate the current block. The very
4800 // next machine code instruction might get a pc descriptor attached with a
4801 // different try-index. If we removed this breakpoint instruction, the
4802 // runtime might associated this call with the try-index of the next
4803 // instruction.
4804 __ Breakpoint();
4805}
4806
4807LocationSummary* ReThrowInstr::MakeLocationSummary(Zone* zone, bool opt) const {
4808 const intptr_t kNumInputs = 2;
4809 const intptr_t kNumTemps = 0;
4810 LocationSummary* summary = new (zone)
4811 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
4814 return summary;
4815}
4816
4817void ReThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4818 auto object_store = compiler->isolate_group()->object_store();
4819 const auto& re_throw_stub =
4820 Code::ZoneHandle(compiler->zone(), object_store->re_throw_stub());
4821
4822 compiler->SetNeedsStackTrace(catch_try_index());
4823 compiler->GenerateStubCall(source(), re_throw_stub,
4824 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
4825 deopt_id(), env());
4826 // Issue(dartbug.com/41353): Right now we have to emit an extra breakpoint
4827 // instruction: The ThrowInstr will terminate the current block. The very
4828 // next machine code instruction might get a pc descriptor attached with a
4829 // different try-index. If we removed this breakpoint instruction, the
4830 // runtime might associated this call with the try-index of the next
4831 // instruction.
4832 __ Breakpoint();
4833}
4834
4835LocationSummary* AssertBooleanInstr::MakeLocationSummary(Zone* zone,
4836 bool opt) const {
4837 const intptr_t kNumInputs = 1;
4838 const intptr_t kNumTemps = 0;
4839 LocationSummary* locs = new (zone)
4840 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
4843 return locs;
4844}
4845
4846LocationSummary* PhiInstr::MakeLocationSummary(Zone* zone,
4847 bool optimizing) const {
4848 UNREACHABLE();
4849 return nullptr;
4850}
4851
4852void PhiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4853 UNREACHABLE();
4854}
4855
4856LocationSummary* RedefinitionInstr::MakeLocationSummary(Zone* zone,
4857 bool optimizing) const {
4858 UNREACHABLE();
4859 return nullptr;
4860}
4861
4862void RedefinitionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4863 UNREACHABLE();
4864}
4865
4867 Zone* zone,
4868 bool optimizing) const {
4869 LocationSummary* summary = new (zone)
4870 LocationSummary(zone, 1, 0, LocationSummary::ContainsCall::kNoCall);
4871 // Keep the parameter alive and reachable, in any location.
4872 summary->set_in(0, Location::Any());
4873 return summary;
4874}
4875
4876void ReachabilityFenceInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4877 // No native code, but we rely on the parameter being passed in here so that
4878 // it stays alive and reachable.
4879}
4880
4881LocationSummary* ParameterInstr::MakeLocationSummary(Zone* zone,
4882 bool optimizing) const {
4883 UNREACHABLE();
4884 return nullptr;
4885}
4886
4887void ParameterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4888 UNREACHABLE();
4889}
4890
4891void NativeParameterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4892 // There are two frames between SaveArguments and the NativeParameterInstr
4893 // moves.
4894 constexpr intptr_t delta =
4895 kCallerSpSlotFromFp // second frame FP to exit link slot
4896 + -kExitLinkSlotFromEntryFp // exit link slot to first frame FP
4897 + kCallerSpSlotFromFp; // first frame FP to argument save SP
4898 compiler::ffi::FrameRebase rebase(compiler->zone(),
4899 /*old_base=*/SPREG, /*new_base=*/FPREG,
4901 const auto& location =
4902 marshaller_.NativeLocationOfNativeParameter(def_index_);
4903 const auto& src =
4904 rebase.Rebase(location.IsPointerToMemory()
4905 ? location.AsPointerToMemory().pointer_location()
4906 : location);
4907 NoTemporaryAllocator no_temp;
4908 const Location out_loc = locs()->out(0);
4909 const Representation out_rep = representation();
4910 compiler->EmitMoveFromNative(out_loc, out_rep, src, &no_temp);
4911}
4912
4913LocationSummary* NativeParameterInstr::MakeLocationSummary(Zone* zone,
4914 bool opt) const {
4915 ASSERT(opt);
4917 if (representation() == kUnboxedInt64 && compiler::target::kWordSize < 8) {
4920 } else {
4923 : Location::RequiresFpuRegister();
4924 }
4925 return LocationSummary::Make(zone, /*num_inputs=*/0, output,
4927}
4928
4930 for (intptr_t i = 0; i < moves_.length(); i++) {
4931 if (!moves_[i]->IsRedundant()) {
4932 return false;
4933 }
4934 }
4935 return true;
4936}
4937
4939 bool optimizing) const {
4940 return nullptr;
4941}
4942
4943void ParallelMoveInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4944 ParallelMoveEmitter(compiler, this).EmitNativeCode();
4945}
4946
4947LocationSummary* ConstraintInstr::MakeLocationSummary(Zone* zone,
4948 bool optimizing) const {
4949 UNREACHABLE();
4950 return nullptr;
4951}
4952
4953void ConstraintInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4954 UNREACHABLE();
4955}
4956
4958 Zone* zone,
4959 bool optimizing) const {
4960 UNREACHABLE();
4961 return nullptr;
4962}
4963
4964void MaterializeObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4965 UNREACHABLE();
4966}
4967
4968// This function should be kept in sync with
4969// FlowGraphCompiler::SlowPathEnvironmentFor().
4970void MaterializeObjectInstr::RemapRegisters(intptr_t* cpu_reg_slots,
4971 intptr_t* fpu_reg_slots) {
4972 if (registers_remapped_) {
4973 return;
4974 }
4975 registers_remapped_ = true;
4976
4977 for (intptr_t i = 0; i < InputCount(); i++) {
4978 locations_[i] = LocationRemapForSlowPath(
4979 LocationAt(i), InputAt(i)->definition(), cpu_reg_slots, fpu_reg_slots);
4980 }
4981}
4982
4984 bool optimizing) const {
4985 ASSERT(!optimizing);
4986 null_->InitializeLocationSummary(zone, optimizing);
4987 return null_->locs();
4988}
4989
4990void MakeTempInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4991 ASSERT(!compiler->is_optimizing());
4992 null_->EmitNativeCode(compiler);
4993}
4994
4995LocationSummary* DropTempsInstr::MakeLocationSummary(Zone* zone,
4996 bool optimizing) const {
4997 ASSERT(!optimizing);
4998 return (InputCount() == 1)
5001 : LocationSummary::Make(zone, 0, Location::NoLocation(),
5002 LocationSummary::kNoCall);
5003}
5004
5005void DropTempsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5006 ASSERT(!compiler->is_optimizing());
5007 // Assert that register assignment is correct.
5008 ASSERT((InputCount() == 0) || (locs()->out(0).reg() == locs()->in(0).reg()));
5009 __ Drop(num_temps());
5010}
5011
5012LocationSummary* BoxSmallIntInstr::MakeLocationSummary(Zone* zone,
5013 bool opt) const {
5016 const intptr_t kNumInputs = 1;
5017 const intptr_t kNumTemps = 0;
5018 LocationSummary* summary = new (zone)
5019 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5020 summary->set_in(0, Location::RequiresRegister());
5021 summary->set_out(0, Location::RequiresRegister());
5022 return summary;
5023}
5024
5025void BoxSmallIntInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5026 const Register value = locs()->in(0).reg();
5027 const Register out = locs()->out(0).reg();
5028 ASSERT(value != out);
5029
5030 __ ExtendAndSmiTagValue(
5032}
5033
5035 Token::Kind kind,
5036 Value* left,
5037 Value* right,
5038 bool needs_number_check,
5039 intptr_t deopt_id)
5040 : TemplateComparison(source, kind, deopt_id),
5041 needs_number_check_(needs_number_check) {
5042 ASSERT((kind == Token::kEQ_STRICT) || (kind == Token::kNE_STRICT));
5043 SetInputAt(0, left);
5044 SetInputAt(1, right);
5045}
5046
5047Condition StrictCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
5048 BranchLabels labels) {
5049 Location left = locs()->in(0);
5050 Location right = locs()->in(1);
5051 ASSERT(!left.IsConstant() || !right.IsConstant());
5052 Condition true_condition;
5053 if (left.IsConstant()) {
5054 if (TryEmitBoolTest(compiler, labels, 1, left.constant(),
5055 &true_condition)) {
5056 return true_condition;
5057 }
5058 true_condition = EmitComparisonCodeRegConstant(
5059 compiler, labels, right.reg(), left.constant());
5060 } else if (right.IsConstant()) {
5061 if (TryEmitBoolTest(compiler, labels, 0, right.constant(),
5062 &true_condition)) {
5063 return true_condition;
5064 }
5065 true_condition = EmitComparisonCodeRegConstant(compiler, labels, left.reg(),
5066 right.constant());
5067 } else {
5068 true_condition = compiler->EmitEqualityRegRegCompare(
5069 left.reg(), right.reg(), needs_number_check(), source(), deopt_id());
5070 }
5071 return true_condition != kInvalidCondition && (kind() != Token::kEQ_STRICT)
5072 ? InvertCondition(true_condition)
5073 : true_condition;
5074}
5075
5077 BranchLabels labels,
5078 intptr_t input_index,
5079 const Object& obj,
5080 Condition* true_condition_out) {
5081 CompileType* input_type = InputAt(input_index)->Type();
5082 if (input_type->ToCid() == kBoolCid && obj.GetClassId() == kBoolCid) {
5083 bool invert = (kind() != Token::kEQ_STRICT) ^ !Bool::Cast(obj).value();
5084 *true_condition_out =
5085 compiler->EmitBoolTest(locs()->in(input_index).reg(), labels, invert);
5086 return true;
5087 }
5088 return false;
5089}
5090
5092 bool opt) const {
5093 const intptr_t kNumInputs = 1;
5094 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
5096}
5097
5098void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5099 const Register object = locs()->in(0).reg();
5100 const Register result = locs()->out(0).reg();
5101 if (input_can_be_smi_ && this->object()->Type()->CanBeSmi()) {
5102 if (representation() == kTagged) {
5103 __ LoadTaggedClassIdMayBeSmi(result, object);
5104 } else {
5105 __ LoadClassIdMayBeSmi(result, object);
5106 }
5107 } else {
5108 __ LoadClassId(result, object);
5109 if (representation() == kTagged) {
5110 __ SmiTag(result);
5111 }
5112 }
5113}
5114
5115LocationSummary* TestRangeInstr::MakeLocationSummary(Zone* zone,
5116 bool opt) const {
5117#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) || \
5118 defined(TARGET_ARCH_ARM)
5119 const bool needs_temp = (lower() != 0);
5120#else
5121 const bool needs_temp = false;
5122#endif
5123 const intptr_t kNumInputs = 1;
5124 const intptr_t kNumTemps = needs_temp ? 1 : 0;
5125 LocationSummary* locs = new (zone)
5126 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5128 if (needs_temp) {
5130 }
5132 return locs;
5133}
5134
5135Condition TestRangeInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
5136 BranchLabels labels) {
5137 intptr_t lower = lower_;
5138 intptr_t upper = upper_;
5139 if (value_representation_ == kTagged) {
5142 }
5143
5144 Register in = locs()->in(0).reg();
5145 if (lower == 0) {
5146 __ CompareImmediate(in, upper);
5147 } else {
5148#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) || \
5149 defined(TARGET_ARCH_ARM)
5150 Register temp = locs()->temp(0).reg();
5151#else
5152 Register temp = TMP;
5153#endif
5154 __ AddImmediate(temp, in, -lower);
5155 __ CompareImmediate(temp, upper - lower);
5156 }
5157 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
5158 return kind() == Token::kIS ? UNSIGNED_LESS_EQUAL : UNSIGNED_GREATER;
5159}
5160
5161LocationSummary* InstanceCallInstr::MakeLocationSummary(Zone* zone,
5162 bool optimizing) const {
5163 return MakeCallSummary(zone, this);
5164}
5165
5167 if (!FLAG_two_args_smi_icd) {
5168 return Code::null();
5169 }
5170 switch (kind) {
5171 case Token::kADD:
5172 return StubCode::SmiAddInlineCache().ptr();
5173 case Token::kLT:
5174 return StubCode::SmiLessInlineCache().ptr();
5175 case Token::kEQ:
5176 return StubCode::SmiEqualInlineCache().ptr();
5177 default:
5178 return Code::null();
5179 }
5180}
5181
5183 Zone* zone) const {
5184 if (!interface_target().IsNull()) {
5185 // Note: target_type is fully instantiated rare type (all type parameters
5186 // are replaced with dynamic) so checking if Smi is assignable to
5187 // it would compute correctly whether or not receiver can be a smi.
5188 const AbstractType& target_type = AbstractType::Handle(
5189 zone, Class::Handle(zone, interface_target().Owner()).RareType());
5190 if (!CompileType::Smi().IsAssignableTo(target_type)) {
5191 return false;
5192 }
5193 }
5194 // In all other cases conservatively assume that the receiver can be a smi.
5195 return true;
5196}
5197
5199 intptr_t idx) const {
5200 // The first input is the array of types
5201 // for generic functions
5202 if (type_args_len() > 0) {
5203 if (idx == 0) {
5204 return kTagged;
5205 }
5206 idx--;
5207 }
5209}
5210
5212 if (interface_target().IsNull()) {
5213 return ArgumentCountWithoutTypeArgs() + ((type_args_len() > 0) ? 1 : 0);
5214 }
5215
5218 ((type_args_len() > 0) ? 1 : 0);
5219}
5220
5223}
5224
5226 if (CompilerState::Current().is_aot() && !receiver_is_not_smi()) {
5227 if (!Receiver()->Type()->CanBeSmi() ||
5230 }
5231 }
5232}
5233
5234static FunctionPtr FindBinarySmiOp(Zone* zone, const String& name) {
5235 const auto& smi_class = Class::Handle(zone, Smi::Class());
5236 return Resolver::ResolveDynamicAnyArgs(zone, smi_class, name,
5237 /*allow_add=*/true);
5238}
5239
5241 if (HasICData()) {
5242 return;
5243 }
5244
5245 const Array& arguments_descriptor =
5248 graph->zone(),
5249 ICData::New(graph->function(), function_name(), arguments_descriptor,
5250 deopt_id(), checked_argument_count(), ICData::kInstance));
5252}
5253
5255 Zone* zone = compiler->zone();
5256
5258
5259 auto& specialized_binary_smi_ic_stub = Code::ZoneHandle(zone);
5260 auto& binary_smi_op_target = Function::Handle(zone);
5261 if (!receiver_is_not_smi()) {
5262 specialized_binary_smi_ic_stub = TwoArgsSmiOpInlineCacheEntry(token_kind());
5263 if (!specialized_binary_smi_ic_stub.IsNull()) {
5264 binary_smi_op_target = FindBinarySmiOp(zone, function_name());
5265 }
5266 }
5267
5268 const ICData* call_ic_data = nullptr;
5269 if (!FLAG_propagate_ic_data || !compiler->is_optimizing() ||
5270 (ic_data() == nullptr)) {
5271 const Array& arguments_descriptor =
5273
5274 AbstractType& receivers_static_type = AbstractType::Handle(zone);
5275 if (receivers_static_type_ != nullptr) {
5276 receivers_static_type = receivers_static_type_->ptr();
5277 }
5278
5279 call_ic_data = compiler->GetOrAddInstanceCallICData(
5280 deopt_id(), function_name(), arguments_descriptor,
5281 checked_argument_count(), receivers_static_type, binary_smi_op_target);
5282 } else {
5283 call_ic_data = &ICData::ZoneHandle(zone, ic_data()->ptr());
5284 }
5285
5286 if (compiler->is_optimizing() && HasICData()) {
5287 if (ic_data()->NumberOfUsedChecks() > 0) {
5288 const ICData& unary_ic_data =
5289 ICData::ZoneHandle(zone, ic_data()->AsUnaryClassChecks());
5290 compiler->GenerateInstanceCall(deopt_id(), source(), locs(),
5291 unary_ic_data, entry_kind(),
5293 } else {
5294 // Call was not visited yet, use original ICData in order to populate it.
5295 compiler->GenerateInstanceCall(deopt_id(), source(), locs(),
5296 *call_ic_data, entry_kind(),
5298 }
5299 } else {
5300 // Unoptimized code.
5301 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kRewind, deopt_id(),
5302 source());
5303
5304 // If the ICData contains a (Smi, Smi, <binary-smi-op-target>) stub already
5305 // we will call the specialized IC Stub that works as a normal IC Stub but
5306 // has inlined fast path for the specific Smi operation.
5307 bool use_specialized_smi_ic_stub = false;
5308 if (!specialized_binary_smi_ic_stub.IsNull() &&
5309 call_ic_data->NumberOfChecksIs(1)) {
5310 GrowableArray<intptr_t> class_ids(2);
5311 auto& target = Function::Handle();
5312 call_ic_data->GetCheckAt(0, &class_ids, &target);
5313 if (class_ids[0] == kSmiCid && class_ids[1] == kSmiCid &&
5314 target.ptr() == binary_smi_op_target.ptr()) {
5315 use_specialized_smi_ic_stub = true;
5316 }
5317 }
5318
5319 if (use_specialized_smi_ic_stub) {
5320 ASSERT(ArgumentCount() == 2);
5321 compiler->EmitInstanceCallJIT(specialized_binary_smi_ic_stub,
5322 *call_ic_data, deopt_id(), source(), locs(),
5323 entry_kind());
5324 } else {
5325 compiler->GenerateInstanceCall(deopt_id(), source(), locs(),
5326 *call_ic_data, entry_kind(),
5328 }
5329 }
5330}
5331
5334}
5335
5337 const Class& cls,
5338 bool allow_add /* = true */) {
5339 const Array& args_desc_array = Array::Handle(GetArgumentsDescriptor());
5340 ArgumentsDescriptor args_desc(args_desc_array);
5342 args_desc, allow_add);
5343}
5344
5346 if (targets_ == nullptr) {
5347 Zone* zone = Thread::Current()->zone();
5348 if (HasICData()) {
5349 targets_ = CallTargets::CreateAndExpand(zone, *ic_data());
5350 } else {
5351 targets_ = new (zone) CallTargets(zone);
5352 ASSERT(targets_->is_empty());
5353 }
5354 }
5355 return *targets_;
5356}
5357
5359 if (binary_ == nullptr) {
5360 Zone* zone = Thread::Current()->zone();
5361 if (HasICData()) {
5363 } else {
5364 binary_ = new (zone) class BinaryFeedback(zone);
5365 }
5366 }
5367 return *binary_;
5368}
5369
5371 intptr_t idx) const {
5372 if (idx == (InputCount() - 1)) {
5373 return kUnboxedUword; // Receiver's CID.
5374 }
5375
5376 // The first input is the array of types
5377 // for generic functions
5378 if (type_args_len() > 0) {
5379 if (idx == 0) {
5380 return kTagged;
5381 }
5382 idx--;
5383 }
5385}
5386
5388 if (interface_target().IsNull()) {
5389 return ArgumentCountWithoutTypeArgs() + ((type_args_len() > 0) ? 1 : 0);
5390 }
5391
5394 ((type_args_len() > 0) ? 1 : 0);
5395}
5396
5399}
5400
5402 Zone* zone,
5404 Value* cid,
5405 const Function& interface_target,
5406 const compiler::TableSelector* selector) {
5407 InputsArray args(zone, call->ArgumentCount() + 1);
5408 for (intptr_t i = 0; i < call->ArgumentCount(); i++) {
5409 args.Add(call->ArgumentValueAt(i)->CopyWithType());
5410 }
5411 args.Add(cid);
5412 auto dispatch_table_call = new (zone) DispatchTableCallInstr(
5413 call->source(), interface_target, selector, std::move(args),
5414 call->type_args_len(), call->argument_names());
5415 return dispatch_table_call;
5416}
5417
5419 bool opt) const {
5420 const intptr_t kNumInputs = 1;
5421 const intptr_t kNumTemps = 0;
5422 LocationSummary* summary = new (zone)
5423 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
5424 summary->set_in(
5426 return MakeCallSummary(zone, this, summary);
5427}
5428
5429void DispatchTableCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5431 Array& arguments_descriptor = Array::ZoneHandle();
5432 if (selector()->requires_args_descriptor) {
5433 ArgumentsInfo args_info(type_args_len(), ArgumentCount(), ArgumentsSize(),
5434 argument_names());
5435 arguments_descriptor = args_info.ToArgumentsDescriptor();
5436 }
5437 compiler->EmitDispatchTableCall(selector()->offset, arguments_descriptor);
5438 compiler->EmitCallsiteMetadata(source(), DeoptId::kNone,
5439 UntaggedPcDescriptors::kOther, locs(), env());
5440 if (selector()->called_on_null && !selector()->on_null_interface) {
5441 Value* receiver = ArgumentValueAt(FirstArgIndex());
5442 if (receiver->Type()->is_nullable()) {
5443 const String& function_name =
5445 compiler->AddNullCheck(source(), function_name);
5446 }
5447 }
5448 compiler->EmitDropArguments(ArgumentsSize());
5449 compiler->AddDispatchTableCallTarget(selector());
5450}
5451
5453 intptr_t idx) const {
5454 // The first input is the array of types
5455 // for generic functions
5456 if (type_args_len() > 0 || function().IsFactory()) {
5457 if (idx == 0) {
5458 return kTagged;
5459 }
5460 idx--;
5461 }
5463}
5464
5468 ((type_args_len() > 0) ? 1 : 0);
5469}
5470
5473}
5474
5476 if (targets_ == nullptr) {
5477 Zone* zone = Thread::Current()->zone();
5478 if (HasICData()) {
5479 targets_ = CallTargets::CreateAndExpand(zone, *ic_data());
5480 } else {
5481 targets_ = new (zone) CallTargets(zone);
5482 ASSERT(targets_->is_empty());
5483 }
5484 }
5485 return *targets_;
5486}
5487
5489 if (binary_ == nullptr) {
5490 Zone* zone = Thread::Current()->zone();
5491 if (HasICData()) {
5493 } else {
5494 binary_ = new (zone) class BinaryFeedback(zone);
5495 }
5496 }
5497 return *binary_;
5498}
5499
5501 if (!HasSingleTarget()) return false;
5503}
5504
5506 if (length() == 0) return false;
5507 for (int i = 0; i < length(); i++) {
5508 if (TargetAt(i)->target->ptr() != TargetAt(0)->target->ptr()) return false;
5509 }
5510 return true;
5511}
5512
5514 ASSERT(length() != 0);
5515 DEBUG_ASSERT(TargetAt(0)->target->IsNotTemporaryScopedHandle());
5516 return *TargetAt(0)->target;
5517}
5518
5520 ASSERT(length() != 0);
5521 DEBUG_ASSERT(TargetAt(0)->target->IsNotTemporaryScopedHandle());
5522 for (int i = 1; i < length(); i++) {
5523 ASSERT(TargetAt(i)->count <= TargetAt(0)->count);
5524 }
5525 return *TargetAt(0)->target;
5526}
5527
5529 intptr_t sum = 0;
5530 for (int i = 0; i < length(); i++) {
5531 sum += TargetAt(i)->count;
5532 }
5533 return sum;
5534}
5535
5537 const {
5538 const intptr_t len = targets_.length();
5540 for (intptr_t i = 0; i < len; i++) {
5541 target = targets_.TargetAt(i)->target->ptr();
5542 if (!target.IsDispatcherOrImplicitAccessor()) {
5543 return false;
5544 }
5545 }
5546 return true;
5547}
5548
5550 return targets().AggregateCallCount();
5551}
5552
5554 Zone* zone,
5555 bool optimizing) const {
5556 return MakeCallSummary(zone, this);
5557}
5558
5560 ArgumentsInfo args_info(type_args_len(), ArgumentCount(), ArgumentsSize(),
5561 argument_names());
5563 compiler->EmitPolymorphicInstanceCall(
5564 this, targets(), args_info, deopt_id(), source(), locs(), complete(),
5566}
5567
5569 const CallTargets& targets) {
5570 bool is_string = true;
5571 bool is_integer = true;
5572 bool is_double = true;
5573 bool is_type = true;
5574
5575 const intptr_t num_checks = targets.length();
5576 for (intptr_t i = 0; i < num_checks; i++) {
5578 targets.TargetAt(0)->target->ptr());
5579 const intptr_t start = targets[i].cid_start;
5580 const intptr_t end = targets[i].cid_end;
5581 for (intptr_t cid = start; cid <= end; cid++) {
5582 is_string = is_string && IsStringClassId(cid);
5584 is_double = is_double && (cid == kDoubleCid);
5585 is_type = is_type && IsTypeClassId(cid);
5586 }
5587 }
5588
5589 if (is_string) {
5591 ASSERT(!is_double);
5592 ASSERT(!is_type);
5593 return Type::StringType();
5594 } else if (is_integer) {
5595 ASSERT(!is_double);
5596 ASSERT(!is_type);
5597 return Type::IntType();
5598 } else if (is_double) {
5599 ASSERT(!is_type);
5600 return Type::Double();
5601 } else if (is_type) {
5602 return Type::DartTypeType();
5603 }
5604
5605 return Type::null();
5606}
5607
5609 const intptr_t receiver_cid = Receiver()->Type()->ToCid();
5610
5611 // We could turn cold call sites for known receiver cids into a StaticCall.
5612 // However, that keeps the ICData of the InstanceCall from being updated.
5613 //
5614 // This is fine if there is no later deoptimization, but if there is, then
5615 // the InstanceCall with the updated ICData for this receiver may then be
5616 // better optimized by the compiler.
5617 //
5618 // This optimization is safe to apply in AOT mode because deoptimization is
5619 // not a concern there.
5620 //
5621 // TODO(dartbug.com/37291): Allow this optimization, but accumulate affected
5622 // InstanceCallInstrs and the corresponding receiver cids during compilation.
5623 // After compilation, add receiver checks to the ICData for those call sites.
5624 if (!CompilerState::Current().is_aot() && Targets().is_empty()) {
5625 return this;
5626 }
5627
5628 const CallTargets* new_target =
5630 receiver_cid,
5631 String::Handle(flow_graph->zone(), ic_data()->target_name()),
5632 Array::Handle(flow_graph->zone(), ic_data()->arguments_descriptor()));
5633 if (new_target == nullptr) {
5634 // No specialization.
5635 return this;
5636 }
5637
5638 ASSERT(new_target->HasSingleTarget());
5639 const Function& target = new_target->FirstTarget();
5641 flow_graph->zone(), this, target, new_target->AggregateCallCount());
5642 flow_graph->InsertBefore(this, specialized, env(), FlowGraph::kValue);
5643 return specialized;
5644}
5645
5647 // TODO(dartbug.com/40188): Allow this to canonicalize into a StaticCall when
5648 // when input class id is constant;
5649 return this;
5650}
5651
5654 return this;
5655 }
5656
5657 const Function& target = targets().FirstTarget();
5658 if (target.recognized_kind() == MethodRecognizer::kObjectRuntimeType) {
5659 const AbstractType& type =
5661 if (!type.IsNull()) {
5662 return flow_graph->GetConstant(type);
5663 }
5664 }
5665
5666 return this;
5667}
5668
5670 if (CompilerState::Current().is_aot() && !complete()) return false;
5671 return targets_.HasSingleRecognizedTarget();
5672}
5673
5675 const intptr_t list_cid = FactoryRecognizer::GetResultCidOfListFactory(
5676 zone, function(), ArgumentCount());
5677 if (list_cid != kDynamicCid) {
5678 SetResultType(zone, CompileType::FromCid(list_cid));
5680 return true;
5681 } else if (function().has_pragma()) {
5682 const intptr_t recognized_cid =
5684 if (recognized_cid != kDynamicCid) {
5685 SetResultType(zone, CompileType::FromCid(recognized_cid));
5686 return true;
5687 }
5688 }
5689 return false;
5690}
5691
5692static const String& EvaluateToString(Zone* zone, Definition* defn) {
5693 if (auto konst = defn->AsConstant()) {
5694 const Object& obj = konst->value();
5695 if (obj.IsString()) {
5696 return String::Cast(obj);
5697 } else if (obj.IsSmi()) {
5698 const char* cstr = obj.ToCString();
5699 return String::Handle(zone, String::New(cstr, Heap::kOld));
5700 } else if (obj.IsBool()) {
5701 return Bool::Cast(obj).value() ? Symbols::True() : Symbols::False();
5702 } else if (obj.IsNull()) {
5703 return Symbols::null();
5704 }
5705 }
5706 return String::null_string();
5707}
5708
5710 FlowGraph* flow_graph) {
5711 auto arg0 = call->ArgumentValueAt(0)->definition();
5712 auto create_array = arg0->AsCreateArray();
5713 if (create_array == nullptr) {
5714 // Do not try to fold interpolate if array is an OSR argument.
5715 ASSERT(flow_graph->IsCompiledForOsr());
5716 ASSERT(arg0->IsPhi() || arg0->IsParameter());
5717 return call;
5718 }
5719 // Check if the string interpolation has only constant inputs.
5720 Value* num_elements = create_array->num_elements();
5721 if (!num_elements->BindsToConstant() ||
5722 !num_elements->BoundConstant().IsSmi()) {
5723 return call;
5724 }
5725 const intptr_t length = Smi::Cast(num_elements->BoundConstant()).Value();
5726 Thread* thread = Thread::Current();
5727 Zone* zone = thread->zone();
5729 for (intptr_t i = 0; i < length; i++) {
5730 pieces.Add(Object::null_string());
5731 }
5732
5733 for (Value::Iterator it(create_array->input_use_list()); !it.Done();
5734 it.Advance()) {
5735 auto current = it.Current()->instruction();
5736 if (current == call) {
5737 continue;
5738 }
5739 auto store = current->AsStoreIndexed();
5740 if (store == nullptr || !store->index()->BindsToConstant() ||
5741 !store->index()->BoundConstant().IsSmi()) {
5742 return call;
5743 }
5744 intptr_t store_index = Smi::Cast(store->index()->BoundConstant()).Value();
5745 ASSERT(store_index < length);
5746 const String& piece =
5747 EvaluateToString(flow_graph->zone(), store->value()->definition());
5748 if (!piece.IsNull()) {
5749 pieces.SetAt(store_index, piece);
5750 } else {
5751 return call;
5752 }
5753 }
5754
5755 const String& concatenated =
5756 String::ZoneHandle(zone, Symbols::FromConcatAll(thread, pieces));
5757 return flow_graph->GetConstant(concatenated);
5758}
5759
5761 FlowGraph* flow_graph) {
5762 auto arg0 = call->ArgumentValueAt(0)->definition();
5763 const auto& result = EvaluateToString(flow_graph->zone(), arg0);
5764 if (!result.IsNull()) {
5765 return flow_graph->GetConstant(String::ZoneHandle(
5766 flow_graph->zone(), Symbols::New(flow_graph->thread(), result)));
5767 }
5768 return call;
5769}
5770
5772 auto& compiler_state = CompilerState::Current();
5773
5774 if (function().ptr() == compiler_state.StringBaseInterpolate().ptr()) {
5775 return CanonicalizeStringInterpolate(this, flow_graph);
5776 } else if (function().ptr() ==
5777 compiler_state.StringBaseInterpolateSingle().ptr()) {
5778 return CanonicalizeStringInterpolateSingle(this, flow_graph);
5779 }
5780
5781 const auto kind = function().recognized_kind();
5782
5783 if (kind != MethodRecognizer::kUnknown) {
5784 if (ArgumentCount() == 1) {
5785 const auto argument = ArgumentValueAt(0);
5786 if (argument->BindsToConstant()) {
5788 if (Evaluate(flow_graph, argument->BoundConstant(), &result)) {
5789 return flow_graph->TryCreateConstantReplacementFor(this, result);
5790 }
5791 }
5792 } else if (ArgumentCount() == 2) {
5793 const auto argument1 = ArgumentValueAt(0);
5794 const auto argument2 = ArgumentValueAt(1);
5795 if (argument1->BindsToConstant() && argument2->BindsToConstant()) {
5797 if (Evaluate(flow_graph, argument1->BoundConstant(),
5798 argument2->BoundConstant(), &result)) {
5799 return flow_graph->TryCreateConstantReplacementFor(this, result);
5800 }
5801 }
5802 }
5803 }
5804
5805 if (!compiler_state.is_aot()) {
5806 return this;
5807 }
5808
5809 if (kind == MethodRecognizer::kObjectRuntimeType) {
5810 if (input_use_list() == nullptr) {
5811 // This function has only environment uses. In precompiled mode it is
5812 // fine to remove it - because we will never deoptimize.
5813 return flow_graph->constant_dead();
5814 }
5815 }
5816
5817 return this;
5818}
5819
5821 const Object& argument,
5822 Object* result) {
5823 const auto kind = function().recognized_kind();
5824 switch (kind) {
5825 case MethodRecognizer::kSmi_bitLength: {
5826 ASSERT(FirstArgIndex() == 0);
5827 if (argument.IsInteger()) {
5828 const Integer& value = Integer::Handle(
5829 flow_graph->zone(),
5831 flow_graph->thread()));
5832 if (!value.IsNull()) {
5833 *result = value.ptr();
5834 return true;
5835 }
5836 }
5837 break;
5838 }
5839 case MethodRecognizer::kStringBaseLength:
5840 case MethodRecognizer::kStringBaseIsEmpty: {
5841 ASSERT(FirstArgIndex() == 0);
5842 if (argument.IsString()) {
5843 const auto& str = String::Cast(argument);
5844 if (kind == MethodRecognizer::kStringBaseLength) {
5845 *result = Integer::New(str.Length());
5846 } else {
5847 *result = Bool::Get(str.Length() == 0).ptr();
5848 break;
5849 }
5850 return true;
5851 }
5852 break;
5853 }
5854 default:
5855 break;
5856 }
5857 return false;
5858}
5859
5861 const Object& argument1,
5862 const Object& argument2,
5863 Object* result) {
5864 const auto kind = function().recognized_kind();
5865 switch (kind) {
5866 case MethodRecognizer::kOneByteString_equality:
5867 case MethodRecognizer::kTwoByteString_equality: {
5868 if (argument1.IsString() && argument2.IsString()) {
5869 *result =
5870 Bool::Get(String::Cast(argument1).Equals(String::Cast(argument2)))
5871 .ptr();
5872 return true;
5873 }
5874 break;
5875 }
5876 default:
5877 break;
5878 }
5879 return false;
5880}
5881
5883 bool optimizing) const {
5884 return MakeCallSummary(zone, this);
5885}
5886
5887void StaticCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5888 Zone* zone = compiler->zone();
5889 const ICData* call_ic_data = nullptr;
5890 if (!FLAG_propagate_ic_data || !compiler->is_optimizing() ||
5891 (ic_data() == nullptr)) {
5892 const Array& arguments_descriptor =
5894 const int num_args_checked =
5896 call_ic_data = compiler->GetOrAddStaticCallICData(
5897 deopt_id(), function(), arguments_descriptor, num_args_checked,
5898 rebind_rule_);
5899 } else {
5900 call_ic_data = &ICData::ZoneHandle(ic_data()->ptr());
5901 }
5902 ArgumentsInfo args_info(type_args_len(), ArgumentCount(), ArgumentsSize(),
5903 argument_names());
5904 compiler->GenerateStaticCall(deopt_id(), source(), function(), args_info,
5905 locs(), *call_ic_data, rebind_rule_,
5906 entry_kind());
5907 if (function().IsFactory()) {
5908 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
5909 if (type_usage_info != nullptr) {
5910 const Class& klass = Class::Handle(function().Owner());
5911 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, klass,
5912 ArgumentAt(0));
5913 }
5914 }
5915}
5916
5919 Representation representation,
5920 const Function& function,
5921 intptr_t type_args_len,
5922 const Array& argument_names,
5923 InputsArray&& arguments,
5924 intptr_t deopt_id)
5925 : TemplateDartCall(deopt_id,
5926 type_args_len,
5927 argument_names,
5928 std::move(arguments),
5929 source),
5930 representation_(representation),
5931 function_(function),
5932 identity_(AliasIdentity::Unknown()) {
5933 DEBUG_ASSERT(function.IsNotTemporaryScopedHandle());
5934 // We use kUntagged for the internal use in FfiNativeLookupAddress
5935 // and kUnboxedAddress for pragma-annotated functions.
5937 function.ptr() ==
5938 IsolateGroup::Current()->object_store()->ffi_resolver_function());
5941#if defined(TARGET_ARCH_IA32)
5942 // No pool to cache in on IA32.
5943 FATAL("Not supported on IA32.");
5944#endif
5945}
5946
5948 intptr_t idx) const {
5949 // The first input is the array of types for generic functions.
5950 if (type_args_len() > 0 || function().IsFactory()) {
5951 if (idx == 0) {
5952 return kTagged;
5953 }
5954 idx--;
5955 }
5957}
5958
5962 ((type_args_len() > 0) ? 1 : 0);
5963}
5964
5966 return this;
5967}
5968
5970 Zone* zone,
5971 bool optimizing) const {
5972 return MakeCallSummary(zone, this);
5973}
5974
5976#if defined(TARGET_ARCH_IA32)
5977 UNREACHABLE();
5978#else
5979 compiler::Label drop_args, done;
5980 const intptr_t cacheable_pool_index = __ object_pool_builder().AddImmediate(
5983 const Register dst = locs()->out(0).reg();
5984
5985 // In optimized mode outgoing arguments are pushed to the end of the fixed
5986 // frame.
5987 const bool need_to_drop_args = !compiler->is_optimizing();
5988
5989 __ Comment(
5990 "CachableIdempotentCall pool load and check. pool_index = "
5991 "%" Pd,
5992 cacheable_pool_index);
5993 __ LoadWordFromPoolIndex(dst, cacheable_pool_index);
5994 __ CompareImmediate(dst, 0);
5995 __ BranchIf(NOT_EQUAL, need_to_drop_args ? &drop_args : &done);
5996 __ Comment("CachableIdempotentCall pool load and check - end");
5997
5998 ArgumentsInfo args_info(type_args_len(), ArgumentCount(), ArgumentsSize(),
5999 argument_names());
6000 const auto& null_ic_data = ICData::ZoneHandle();
6001 compiler->GenerateStaticCall(deopt_id(), source(), function(), args_info,
6002 locs(), null_ic_data, ICData::kNoRebind,
6004
6005 __ Comment("CachableIdempotentCall pool store");
6006 if (!function().HasUnboxedReturnValue()) {
6007 __ LoadWordFromBoxOrSmi(dst, dst);
6008 }
6009 __ StoreWordToPoolIndex(dst, cacheable_pool_index);
6010 if (need_to_drop_args) {
6012 __ Bind(&drop_args);
6013 __ Drop(args_info.size_with_type_args);
6014 }
6015 __ Bind(&done);
6016 __ Comment("CachableIdempotentCall pool store - end");
6017#endif
6018}
6019
6021 switch (kind_) {
6022 case kParameterCheck:
6024 case kInsertedByFrontend:
6026 case kFromSource:
6028 case kUnknown:
6029 break;
6030 }
6031
6032 return tag();
6033}
6034
6036 compiler->GenerateAssertAssignable(value()->Type(), source(), deopt_id(),
6037 env(), dst_name(), locs());
6038 ASSERT(locs()->in(kInstancePos).reg() == locs()->out(0).reg());
6039}
6040
6041LocationSummary* AssertSubtypeInstr::MakeLocationSummary(Zone* zone,
6042 bool opt) const {
6043 const intptr_t kNumInputs = 5;
6044 const intptr_t kNumTemps = 0;
6045 LocationSummary* summary = new (zone)
6046 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6047 summary->set_in(kInstantiatorTAVPos,
6050 summary->set_in(
6053 summary->set_in(kSubTypePos,
6055 summary->set_in(kSuperTypePos,
6057 summary->set_in(kDstNamePos,
6059 return summary;
6060}
6061
6062void AssertSubtypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6063 compiler->GenerateStubCall(source(), StubCode::AssertSubtype(),
6064 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
6065 env());
6066}
6067
6068LocationSummary* InstantiateTypeInstr::MakeLocationSummary(Zone* zone,
6069 bool opt) const {
6070 const intptr_t kNumInputs = 2;
6071 const intptr_t kNumTemps = 0;
6072 LocationSummary* locs = new (zone)
6073 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6078 locs->set_out(0,
6080 return locs;
6081}
6082
6083void InstantiateTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6084 auto& stub = Code::ZoneHandle(StubCode::InstantiateType().ptr());
6085 if (type().IsTypeParameter()) {
6086 const auto& type_parameter = TypeParameter::Cast(type());
6087 const bool is_function_parameter = type_parameter.IsFunctionTypeParameter();
6088
6089 switch (type_parameter.nullability()) {
6091 stub = is_function_parameter
6092 ? StubCode::InstantiateTypeNonNullableFunctionTypeParameter()
6093 .ptr()
6094 : StubCode::InstantiateTypeNonNullableClassTypeParameter()
6095 .ptr();
6096 break;
6098 stub =
6099 is_function_parameter
6100 ? StubCode::InstantiateTypeNullableFunctionTypeParameter().ptr()
6101 : StubCode::InstantiateTypeNullableClassTypeParameter().ptr();
6102 break;
6103 }
6104 }
6105 __ LoadObject(InstantiateTypeABI::kTypeReg, type());
6106 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
6107 locs(), deopt_id(), env());
6108}
6109
6111 Zone* zone,
6112 bool opt) const {
6113 const intptr_t kNumInputs = 3;
6114 const intptr_t kNumTemps = 0;
6115 LocationSummary* locs = new (zone)
6116 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6123 locs->set_out(
6125 return locs;
6126}
6127
6129 FlowGraphCompiler* compiler) {
6130 // We should never try and instantiate a TAV known at compile time to be null,
6131 // so we can use a null value below for the dynamic case.
6132 ASSERT(!type_arguments()->BindsToConstant() ||
6133 !type_arguments()->BoundConstant().IsNull());
6134 const auto& type_args =
6136 ? TypeArguments::Cast(type_arguments()->BoundConstant())
6137 : Object::null_type_arguments();
6138 const intptr_t len = type_args.Length();
6139 const bool can_function_type_args_be_null =
6140 function_type_arguments()->CanBe(Object::null_object());
6141
6142 compiler::Label type_arguments_instantiated;
6143 if (type_args.IsNull()) {
6144 // Currently we only create dynamic InstantiateTypeArguments instructions
6145 // in cases where we know the type argument is uninstantiated at runtime,
6146 // so there are no extra checks needed to call the stub successfully.
6147 } else if (type_args.IsRawWhenInstantiatedFromRaw(len) &&
6148 can_function_type_args_be_null) {
6149 // If both the instantiator and function type arguments are null and if the
6150 // type argument vector instantiated from null becomes a vector of dynamic,
6151 // then use null as the type arguments.
6152 compiler::Label non_null_type_args;
6154 Object::null_object());
6157 if (!function_type_arguments()->BindsToConstant()) {
6158 __ BranchIf(NOT_EQUAL, &non_null_type_args,
6162 }
6163 __ BranchIf(EQUAL, &type_arguments_instantiated,
6165 __ Bind(&non_null_type_args);
6166 }
6167
6168 compiler->GenerateStubCall(source(), GetStub(), UntaggedPcDescriptors::kOther,
6169 locs(), deopt_id(), env());
6170 __ Bind(&type_arguments_instantiated);
6171}
6172
6173LocationSummary* DeoptimizeInstr::MakeLocationSummary(Zone* zone,
6174 bool opt) const {
6175 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
6176}
6177
6178void DeoptimizeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6179 __ Jump(compiler->AddDeoptStub(deopt_id(), deopt_reason_));
6180}
6181
6182void CheckClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6183 compiler::Label* deopt =
6184 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass);
6185 if (IsNullCheck()) {
6186 EmitNullCheck(compiler, deopt);
6187 return;
6188 }
6189
6190 ASSERT(!cids_.IsMonomorphic() || !cids_.HasClassId(kSmiCid));
6191 Register value = locs()->in(0).reg();
6192 Register temp = locs()->temp(0).reg();
6193 compiler::Label is_ok;
6194
6195 __ BranchIfSmi(value, cids_.HasClassId(kSmiCid) ? &is_ok : deopt);
6196
6197 __ LoadClassId(temp, value);
6198
6199 if (IsBitTest()) {
6200 intptr_t min = cids_.ComputeLowestCid();
6201 intptr_t max = cids_.ComputeHighestCid();
6203 } else {
6204 const intptr_t num_checks = cids_.length();
6205 const bool use_near_jump = num_checks < 5;
6206 int bias = 0;
6207 for (intptr_t i = 0; i < num_checks; i++) {
6208 intptr_t cid_start = cids_[i].cid_start;
6209 intptr_t cid_end = cids_[i].cid_end;
6210 if (cid_start == kSmiCid && cid_end == kSmiCid) {
6211 continue; // We already handled Smi above.
6212 }
6213 if (cid_start == kSmiCid) cid_start++;
6214 if (cid_end == kSmiCid) cid_end--;
6215 const bool is_last =
6216 (i == num_checks - 1) ||
6217 (i == num_checks - 2 && cids_[i + 1].cid_start == kSmiCid &&
6218 cids_[i + 1].cid_end == kSmiCid);
6219 bias = EmitCheckCid(compiler, bias, cid_start, cid_end, is_last, &is_ok,
6220 deopt, use_near_jump);
6221 }
6222 }
6223 __ Bind(&is_ok);
6224}
6225
6226LocationSummary* GenericCheckBoundInstr::MakeLocationSummary(Zone* zone,
6227 bool opt) const {
6228 const intptr_t kNumInputs = 2;
6229 const intptr_t kNumTemps = 0;
6230 LocationSummary* locs = new (zone) LocationSummary(
6231 zone, kNumInputs, kNumTemps,
6237 return locs;
6238}
6239
6240void GenericCheckBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6243
6244 RangeErrorSlowPath* slow_path = new RangeErrorSlowPath(this);
6245 compiler->AddSlowPathCode(slow_path);
6246 Location length_loc = locs()->in(kLengthPos);
6247 Location index_loc = locs()->in(kIndexPos);
6248 Register length = length_loc.reg();
6249 Register index = index_loc.reg();
6250 const intptr_t index_cid = this->index()->Type()->ToCid();
6251
6252 // The length comes from one of our variable-sized heap objects (e.g. typed
6253 // data array) and is therefore guaranteed to be in the positive Smi range.
6254 if (representation() == kTagged) {
6255 if (index_cid != kSmiCid) {
6256 __ BranchIfNotSmi(index, slow_path->entry_label());
6257 }
6258 __ CompareObjectRegisters(index, length);
6259 } else {
6260 ASSERT(representation() == kUnboxedInt64);
6261 __ CompareRegisters(index, length);
6262 }
6263 __ BranchIf(UNSIGNED_GREATER_EQUAL, slow_path->entry_label());
6264}
6265
6266LocationSummary* CheckNullInstr::MakeLocationSummary(Zone* zone,
6267 bool opt) const {
6268 const intptr_t kNumInputs = 1;
6269 const intptr_t kNumTemps = 0;
6270 LocationSummary* locs = new (zone) LocationSummary(
6271 zone, kNumInputs, kNumTemps,
6275 return locs;
6276}
6277
6280 compiler->AddNullCheck(check_null->source(), check_null->function_name());
6281}
6282
6285 __ Comment("%s slow path allocation of %s", instruction()->DebugName(),
6286 cls_.ScrubbedNameCString());
6287 }
6288 __ Bind(entry_label());
6289 const auto& stub = Code::ZoneHandle(
6291
6292 LocationSummary* locs = instruction()->locs();
6293
6295 compiler->SaveLiveRegisters(locs);
6296 // Box allocation slow paths cannot lazy-deopt.
6297 ASSERT(!kAllocateMintRuntimeEntry.can_lazy_deopt() &&
6298 !kAllocateDoubleRuntimeEntry.can_lazy_deopt() &&
6299 !kAllocateFloat32x4RuntimeEntry.can_lazy_deopt() &&
6300 !kAllocateFloat64x2RuntimeEntry.can_lazy_deopt());
6301 compiler->GenerateNonLazyDeoptableStubCall(
6302 InstructionSource(), // No token position.
6303 stub, UntaggedPcDescriptors::kOther, locs);
6304 __ MoveRegister(result_, AllocateBoxABI::kResultReg);
6305 compiler->RestoreLiveRegisters(locs);
6306 __ Jump(exit_label());
6307}
6308
6310 Instruction* instruction,
6311 const Class& cls,
6313 Register temp) {
6314 if (compiler->intrinsic_mode()) {
6315 __ TryAllocate(cls, compiler->intrinsic_slow_path_label(),
6317 } else {
6319 auto slow_path = new BoxAllocationSlowPath(instruction, cls, result);
6320 compiler->AddSlowPathCode(slow_path);
6321
6322 if (FLAG_inline_alloc && !FLAG_use_slow_path) {
6323 __ TryAllocate(cls, slow_path->entry_label(),
6325 } else {
6326 __ Jump(slow_path->entry_label());
6327 }
6328 __ Bind(slow_path->exit_label());
6329 }
6330}
6331
6333 __ Comment("DoubleToIntegerSlowPath");
6334 __ Bind(entry_label());
6335
6336 LocationSummary* locs = instruction()->locs();
6337 locs->live_registers()->Remove(locs->out(0));
6338
6339 compiler->SaveLiveRegisters(locs);
6340
6341 auto slow_path_env =
6342 compiler->SlowPathEnvironmentFor(instruction(), /*num_slow_path_args=*/0);
6343
6344 __ MoveUnboxedDouble(DoubleToIntegerStubABI::kInputReg, value_reg_);
6345 __ LoadImmediate(
6347 compiler::target::ToRawSmi(instruction()->recognized_kind()));
6348 compiler->GenerateStubCall(instruction()->source(),
6350 UntaggedPcDescriptors::kOther, locs,
6351 instruction()->deopt_id(), slow_path_env);
6352 __ MoveRegister(instruction()->locs()->out(0).reg(),
6354 compiler->RestoreLiveRegisters(instruction()->locs());
6355 __ Jump(exit_label());
6356}
6357
6358void UnboxInstr::EmitLoadFromBoxWithDeopt(FlowGraphCompiler* compiler) {
6359 const intptr_t box_cid = BoxCid();
6360 ASSERT(box_cid != kSmiCid); // Should never reach here with Smi-able ints.
6361 const Register box = locs()->in(0).reg();
6362 const Register temp =
6363 (locs()->temp_count() > 0) ? locs()->temp(0).reg() : kNoRegister;
6364 compiler::Label* deopt =
6365 compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnbox);
6366 compiler::Label is_smi;
6367
6368 if ((value()->Type()->ToNullableCid() == box_cid) &&
6369 value()->Type()->is_nullable()) {
6370 __ CompareObject(box, Object::null_object());
6371 __ BranchIf(EQUAL, deopt);
6372 } else {
6373 __ BranchIfSmi(box, CanConvertSmi() ? &is_smi : deopt);
6374 __ CompareClassId(box, box_cid, temp);
6375 __ BranchIf(NOT_EQUAL, deopt);
6376 }
6377
6378 EmitLoadFromBox(compiler);
6379
6380 if (is_smi.IsLinked()) {
6381 compiler::Label done;
6383 __ Bind(&is_smi);
6384 EmitSmiConversion(compiler);
6385 __ Bind(&done);
6386 }
6387}
6388
6389void UnboxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6391 if (BoxCid() == kSmiCid) {
6392 // Since the representation fits in a Smi, we can extract it directly.
6393 ASSERT_EQUAL(value()->Type()->ToCid(), kSmiCid);
6394 return EmitSmiConversion(compiler);
6395 }
6396 switch (representation()) {
6397 case kUnboxedDouble:
6398 case kUnboxedFloat:
6399 case kUnboxedFloat32x4:
6400 case kUnboxedFloat64x2:
6401 case kUnboxedInt32x4:
6402 EmitLoadFromBox(compiler);
6403 break;
6404
6405 case kUnboxedInt32:
6406 EmitLoadInt32FromBoxOrSmi(compiler);
6407 break;
6408
6409 case kUnboxedInt64: {
6410 if (value()->Type()->ToCid() == kSmiCid) {
6411 // Smi -> int64 conversion is more efficient than
6412 // handling arbitrary smi/mint.
6413 EmitSmiConversion(compiler);
6414 } else {
6415 EmitLoadInt64FromBoxOrSmi(compiler);
6416 }
6417 break;
6418 }
6419 default:
6420 UNREACHABLE();
6421 break;
6422 }
6423 } else {
6425 const intptr_t value_cid = value()->Type()->ToCid();
6426 const intptr_t box_cid = BoxCid();
6427
6428 if (box_cid == kSmiCid || (CanConvertSmi() && (value_cid == kSmiCid))) {
6429 ASSERT_EQUAL(value_cid, kSmiCid);
6430 EmitSmiConversion(compiler);
6431 } else if (representation() == kUnboxedInt32 && value()->Type()->IsInt()) {
6432 EmitLoadInt32FromBoxOrSmi(compiler);
6433 } else if (representation() == kUnboxedInt64 && value()->Type()->IsInt()) {
6434 EmitLoadInt64FromBoxOrSmi(compiler);
6435 } else if ((value_cid == box_cid) || !CanDeoptimize()) {
6436 EmitLoadFromBox(compiler);
6437 } else {
6438 EmitLoadFromBoxWithDeopt(compiler);
6439 }
6440 }
6441}
6442
6444 const GrowableArray<Definition*>& definitions,
6445 intptr_t fixed_parameter_count,
6446 intptr_t lazy_deopt_pruning_count,
6447 const ParsedFunction& parsed_function) {
6448 Environment* env = new (zone) Environment(
6449 definitions.length(), fixed_parameter_count, lazy_deopt_pruning_count,
6450 parsed_function.function(), nullptr);
6451 for (intptr_t i = 0; i < definitions.length(); ++i) {
6452 env->values_.Add(new (zone) Value(definitions[i]));
6453 }
6454 return env;
6455}
6456
6458 values_.Add(value);
6459}
6460
6462 ASSERT(length <= values_.length());
6463 Environment* copy = new (zone) Environment(
6464 length, fixed_parameter_count_, LazyDeoptPruneCount(), function_,
6465 (outer_ == nullptr) ? nullptr : outer_->DeepCopy(zone));
6466 copy->SetDeoptId(DeoptIdBits::decode(bitfield_));
6467 copy->SetLazyDeoptToBeforeDeoptId(LazyDeoptToBeforeDeoptId());
6468 if (IsHoisted()) {
6469 copy->MarkAsHoisted();
6470 }
6471 if (locations_ != nullptr) {
6472 Location* new_locations = zone->Alloc<Location>(length);
6473 copy->set_locations(new_locations);
6474 }
6475 for (intptr_t i = 0; i < length; ++i) {
6476 copy->values_.Add(values_[i]->CopyWithType(zone));
6477 if (locations_ != nullptr) {
6478 copy->locations_[i] = locations_[i].Copy();
6479 }
6480 }
6481 return copy;
6482}
6483
6484// Copies the environment and updates the environment use lists.
6485void Environment::DeepCopyTo(Zone* zone, Instruction* instr) const {
6486 for (Environment::DeepIterator it(instr->env()); !it.Done(); it.Advance()) {
6487 it.CurrentValue()->RemoveFromUseList();
6488 }
6489
6490 Environment* copy = DeepCopy(zone);
6491 instr->SetEnvironment(copy);
6492 for (Environment::DeepIterator it(copy); !it.Done(); it.Advance()) {
6493 Value* value = it.CurrentValue();
6494 value->definition()->AddEnvUse(value);
6495 }
6496}
6497
6499 Instruction* instr,
6500 intptr_t argc,
6501 Definition* dead,
6502 Definition* result) const {
6503 for (Environment::DeepIterator it(instr->env()); !it.Done(); it.Advance()) {
6504 it.CurrentValue()->RemoveFromUseList();
6505 }
6506
6507 Environment* copy =
6508 DeepCopy(zone, values_.length() - argc - LazyDeoptPruneCount());
6509 copy->SetLazyDeoptPruneCount(0);
6510 for (intptr_t i = 0; i < argc; i++) {
6511 copy->values_.Add(new (zone) Value(dead));
6512 }
6513 copy->values_.Add(new (zone) Value(result));
6514
6515 instr->SetEnvironment(copy);
6516 for (Environment::DeepIterator it(copy); !it.Done(); it.Advance()) {
6517 Value* value = it.CurrentValue();
6518 value->definition()->AddEnvUse(value);
6519 }
6520}
6521
6522// Copies the environment as outer on an inlined instruction and updates the
6523// environment use lists.
6525 Instruction* instr,
6526 intptr_t outer_deopt_id) const {
6527 // Create a deep copy removing caller arguments from the environment.
6528 ASSERT(instr->env()->outer() == nullptr);
6529 intptr_t argument_count = instr->env()->fixed_parameter_count();
6531 DeepCopy(zone, values_.length() - argument_count - LazyDeoptPruneCount());
6532 outer->SetDeoptId(outer_deopt_id);
6533 outer->SetLazyDeoptPruneCount(0);
6534 instr->env()->outer_ = outer;
6535 intptr_t use_index = instr->env()->Length(); // Start index after inner.
6536 for (Environment::DeepIterator it(outer); !it.Done(); it.Advance()) {
6537 Value* value = it.CurrentValue();
6538 value->set_instruction(instr);
6539 value->set_use_index(use_index++);
6540 value->definition()->AddEnvUse(value);
6541 }
6542}
6543
6545 Value* new_right) {
6546 UNREACHABLE();
6547 return nullptr;
6548}
6549
6551 Value* new_right) {
6552 return new EqualityCompareInstr(source(), kind(), new_left, new_right,
6553 operation_cid(), deopt_id(), is_null_aware(),
6554 speculative_mode_);
6555}
6556
6558 Value* new_right) {
6559 return new RelationalOpInstr(source(), kind(), new_left, new_right,
6560 operation_cid(), deopt_id(),
6562}
6563
6565 Value* new_right) {
6566 return new StrictCompareInstr(source(), kind(), new_left, new_right,
6568}
6569
6571 Value* new_right) {
6572 return new TestIntInstr(source(), kind(), representation_, new_left,
6573 new_right);
6574}
6575
6577 Value* new_right) {
6578 return new TestCidsInstr(source(), kind(), new_left, cid_results(),
6579 deopt_id());
6580}
6581
6583 Value* new_right) {
6584 return new TestRangeInstr(source(), new_left, lower_, upper_,
6585 value_representation_);
6586}
6587
6589 auto const other_instr = other.AsTestCids();
6591 return false;
6592 }
6593 if (cid_results().length() != other_instr->cid_results().length()) {
6594 return false;
6595 }
6596 for (intptr_t i = 0; i < cid_results().length(); i++) {
6597 if (cid_results()[i] != other_instr->cid_results()[i]) {
6598 return false;
6599 }
6600 }
6601 return true;
6602}
6603
6605 auto const other_instr = other.AsTestRange();
6607 return false;
6608 }
6609 return lower_ == other_instr->lower_ && upper_ == other_instr->upper_ &&
6610 value_representation_ == other_instr->value_representation_;
6611}
6612
6614 Value* v1,
6615 Value* v2) {
6616 bool is_smi_result = v1->BindsToSmiConstant() && v2->BindsToSmiConstant();
6617 if (comparison->IsStrictCompare()) {
6618 // Strict comparison with number checks calls a stub and is not supported
6619 // by if-conversion.
6620 return is_smi_result &&
6621 !comparison->AsStrictCompare()->needs_number_check();
6622 }
6623 if (comparison->operation_cid() != kSmiCid) {
6624 // Non-smi comparisons are not supported by if-conversion.
6625 return false;
6626 }
6627 return is_smi_result;
6628}
6629
6631 ASSERT(InputCount() > 1);
6632 Definition* first = InputAt(0)->definition();
6633 for (intptr_t i = 1; i < InputCount(); ++i) {
6634 Definition* def = InputAt(i)->definition();
6635 if (def != first) return false;
6636 }
6637 return true;
6638}
6639
6641 Definition* first = InputAt(0)->definition();
6642 if (InputCount() == 1) {
6643 return first;
6644 }
6645 ASSERT(InputCount() > 1);
6646 Definition* first_origin = first->OriginalDefinition();
6647 bool look_for_redefinition = false;
6648 for (intptr_t i = 1; i < InputCount(); ++i) {
6649 Definition* def = InputAt(i)->definition();
6650 if ((def != first) && (def != this)) {
6651 Definition* origin = def->OriginalDefinition();
6652 if ((origin != first_origin) && (origin != this)) return nullptr;
6653 look_for_redefinition = true;
6654 }
6655 }
6656 if (look_for_redefinition) {
6657 // Find the most specific redefinition which is common for all inputs
6658 // (the longest common chain).
6659 Definition* redef = first;
6660 for (intptr_t i = 1, n = InputCount(); redef != first_origin && i < n;) {
6661 Value* value = InputAt(i);
6662 bool found = false;
6663 do {
6664 Definition* def = value->definition();
6665 if ((def == redef) || (def == this)) {
6666 found = true;
6667 break;
6668 }
6669 value = def->RedefinedValue();
6670 } while (value != nullptr);
6671 if (found) {
6672 ++i;
6673 } else {
6674 ASSERT(redef != first_origin);
6675 redef = redef->RedefinedValue()->definition();
6676 }
6677 }
6678 return redef;
6679 } else {
6680 return first;
6681 }
6682}
6683
6685 for (intptr_t i = 0; i < phi->InputCount(); i++) {
6686 if (phi->InputAt(i)->definition()->RedefinedValue() == nullptr) {
6687 return false;
6688 }
6689 }
6690 return true;
6691}
6692
6695 if (replacement != nullptr && flow_graph->is_licm_allowed() &&
6697 // If we are replacing a Phi which has redefinitions as all of its inputs
6698 // then to maintain the redefinition chain we are going to insert a
6699 // redefinition. If any input is *not* a redefinition that means that
6700 // whatever properties were inferred for a Phi also hold on a path
6701 // that does not pass through any redefinitions so there is no need
6702 // to redefine this value.
6703 auto zone = flow_graph->zone();
6704 auto redef = new (zone) RedefinitionInstr(new (zone) Value(replacement));
6705 flow_graph->InsertAfter(block(), redef, /*env=*/nullptr, FlowGraph::kValue);
6706
6707 // Redefinition is not going to dominate the block entry itself, so we
6708 // have to handle environment uses at the block entry specially.
6709 Value* next_use;
6710 for (Value* use = env_use_list(); use != nullptr; use = next_use) {
6711 next_use = use->next_use();
6712 if (use->instruction() == block()) {
6713 use->RemoveFromUseList();
6714 use->set_definition(replacement);
6715 replacement->AddEnvUse(use);
6716 }
6717 }
6718 return redef;
6719 }
6720
6721 return (replacement != nullptr) ? replacement : this;
6722}
6723
6724// Removes current phi from graph and sets current to previous phi.
6727 (*phis_)[index_] = phis_->Last();
6728 phis_->RemoveLast();
6729 --index_;
6730}
6731
6733 if (StrictCompareInstr* strict_compare = comparison()->AsStrictCompare()) {
6734 if ((InputAt(0)->definition()->OriginalDefinition() ==
6735 InputAt(1)->definition()->OriginalDefinition()) &&
6736 strict_compare->kind() == Token::kEQ_STRICT) {
6737 return nullptr;
6738 }
6739 }
6740 return this;
6741}
6742
6744 bool opt) const {
6747 return comparison()->locs();
6748}
6749
6750void CheckConditionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6751 compiler::Label if_true;
6752 compiler::Label* if_false =
6753 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnknown);
6754 BranchLabels labels = {&if_true, if_false, &if_true};
6755 Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
6756 if (true_condition != kInvalidCondition) {
6757 __ BranchIf(InvertCondition(true_condition), if_false);
6758 }
6759 __ Bind(&if_true);
6760}
6761
6764}
6765
6767 return (flow_graph->should_remove_all_bounds_checks() || IsRedundant())
6768 ? index()->definition()
6769 : this;
6770}
6771
6772intptr_t CheckArrayBoundInstr::LengthOffsetFor(intptr_t class_id) {
6773 if (IsTypedDataBaseClassId(class_id)) {
6775 }
6776
6777 switch (class_id) {
6778 case kGrowableObjectArrayCid:
6780 case kOneByteStringCid:
6781 case kTwoByteStringCid:
6783 case kArrayCid:
6784 case kImmutableArrayCid:
6786 default:
6787 UNREACHABLE();
6788 return -1;
6789 }
6790}
6791
6793 if (kind_ == Kind::kDeeplyImmutableAttachNativeFinalizer) {
6794 return this;
6795 }
6796
6797 ASSERT(kind_ == Kind::kWriteUnmodifiableTypedData);
6798 intptr_t cid = value()->Type()->ToCid();
6799 if ((cid != kIllegalCid) && (cid != kDynamicCid) &&
6801 return value()->definition();
6802 }
6803 return this;
6804}
6805
6807 AlignmentType alignment) {
6809 case kUnboxedInt8:
6810 case kUnboxedUint8:
6811 // Don't need to worry about alignment for accessing bytes.
6812 return kAlignedAccess;
6813 case kUnboxedFloat32x4:
6814 case kUnboxedInt32x4:
6815 case kUnboxedFloat64x2:
6816 // TODO(rmacnak): Investigate alignment requirements of floating point
6817 // loads.
6818 return kAlignedAccess;
6819 default:
6820 return alignment;
6821 }
6822}
6823
6825 Value* index,
6826 bool index_unboxed,
6827 intptr_t index_scale,
6828 intptr_t class_id,
6829 AlignmentType alignment,
6830 intptr_t deopt_id,
6832 CompileType* result_type)
6833 : TemplateDefinition(source, deopt_id),
6834 index_unboxed_(index_unboxed),
6835 index_scale_(index_scale),
6836 class_id_(class_id),
6837 alignment_(StrengthenAlignment(class_id, alignment)),
6838 token_pos_(source.token_pos),
6839 result_type_(result_type) {
6840 // In particular, notice that kPointerCid is _not_ supported because it gives
6841 // no information about whether the elements are signed for elements with
6842 // unboxed integer representations. The constructor must take that
6843 // information separately to allow kPointerCid.
6844 ASSERT(class_id != kPointerCid);
6847}
6848
6850 flow_graph->ExtractExternalUntaggedPayload(this, array(), class_id());
6851
6852 if (auto box = index()->definition()->AsBoxInt64()) {
6853 // TODO(dartbug.com/39432): Make LoadIndexed fully suport unboxed indices.
6854 if (!box->ComputeCanDeoptimize() && compiler::target::kWordSize == 8) {
6855 auto Z = flow_graph->zone();
6856 auto load = new (Z) LoadIndexedInstr(
6857 array()->CopyWithType(Z), box->value()->CopyWithType(Z),
6858 /*index_unboxed=*/true, index_scale(), class_id(), alignment_,
6859 GetDeoptId(), source(), result_type_);
6860 flow_graph->InsertBefore(this, load, env(), FlowGraph::kValue);
6861 return load;
6862 }
6863 }
6864 return this;
6865}
6866
6870}
6871
6873 Value* index,
6874 Value* value,
6875 StoreBarrierType emit_store_barrier,
6876 bool index_unboxed,
6877 intptr_t index_scale,
6878 intptr_t class_id,
6879 AlignmentType alignment,
6880 intptr_t deopt_id,
6882 SpeculativeMode speculative_mode)
6883 : TemplateInstruction(source, deopt_id),
6884 emit_store_barrier_(emit_store_barrier),
6885 index_unboxed_(index_unboxed),
6886 index_scale_(index_scale),
6887 class_id_(class_id),
6888 alignment_(StrengthenAlignment(class_id, alignment)),
6889 token_pos_(source.token_pos),
6890 speculative_mode_(speculative_mode) {
6891 // In particular, notice that kPointerCid is _not_ supported because it gives
6892 // no information about whether the elements are signed for elements with
6893 // unboxed integer representations. The constructor must take that information
6894 // separately to allow kPointerCid.
6895 ASSERT(class_id != kPointerCid);
6899}
6900
6902 flow_graph->ExtractExternalUntaggedPayload(this, array(), class_id());
6903
6904 if (auto box = index()->definition()->AsBoxInt64()) {
6905 // TODO(dartbug.com/39432): Make StoreIndexed fully suport unboxed indices.
6906 if (!box->ComputeCanDeoptimize() && compiler::target::kWordSize == 8) {
6907 auto Z = flow_graph->zone();
6908 auto store = new (Z) StoreIndexedInstr(
6909 array()->CopyWithType(Z), box->value()->CopyWithType(Z),
6910 value()->CopyWithType(Z), emit_store_barrier_,
6911 /*index_unboxed=*/true, index_scale(), class_id(), alignment_,
6912 GetDeoptId(), source(), speculative_mode_);
6913 flow_graph->InsertBefore(this, store, env(), FlowGraph::kEffect);
6914 return nullptr;
6915 }
6916 }
6917 return this;
6918}
6919
6923}
6924
6926 intptr_t idx) const {
6927 // Array can be a Dart object or a pointer to external data.
6928 if (idx == 0) return kNoRepresentation; // Flexible input representation.
6929 if (idx == 1) {
6930 if (index_unboxed_) {
6931#if defined(TARGET_ARCH_IS_64_BIT)
6932 return kUnboxedInt64;
6933#else
6934 // TODO(dartbug.com/39432): kUnboxedInt32 || kUnboxedUint32 on 32-bit
6935 // architectures.
6936 return kNoRepresentation; // Index can be any unboxed representation.
6937#endif
6938 } else {
6939 return kTagged; // Index is a smi.
6940 }
6941 }
6942 ASSERT(idx == 2);
6943 return ValueRepresentation(class_id());
6944}
6945
6946#if defined(TARGET_ARCH_ARM64)
6947// We can emit a 16 byte move in a single instruction using LDP/STP.
6948static const intptr_t kMaxElementSizeForEfficientCopy = 16;
6949#else
6950static const intptr_t kMaxElementSizeForEfficientCopy =
6952#endif
6953
6955 flow_graph->ExtractExternalUntaggedPayload(this, src(), src_cid_);
6956 flow_graph->ExtractExternalUntaggedPayload(this, dest(), dest_cid_);
6957
6958 if (!length()->BindsToSmiConstant()) {
6959 return this;
6960 } else if (length()->BoundSmiConstant() == 0) {
6961 // Nothing to copy.
6962 return nullptr;
6963 }
6964
6965 if (!src_start()->BindsToSmiConstant() ||
6966 !dest_start()->BindsToSmiConstant()) {
6967 // TODO(https://dartbug.com/51031): Consider adding support for src/dest
6968 // starts to be in bytes rather than element size.
6969 return this;
6970 }
6971
6972 intptr_t new_length = length()->BoundSmiConstant();
6973 intptr_t new_src_start = src_start()->BoundSmiConstant();
6974 intptr_t new_dest_start = dest_start()->BoundSmiConstant();
6975 intptr_t new_element_size = element_size_;
6976 while (((new_length | new_src_start | new_dest_start) & 1) == 0 &&
6977 new_element_size < kMaxElementSizeForEfficientCopy) {
6978 new_length >>= 1;
6979 new_src_start >>= 1;
6980 new_dest_start >>= 1;
6981 new_element_size <<= 1;
6982 }
6983 if (new_element_size == element_size_) {
6984 return this;
6985 }
6986
6987 // The new element size is larger than the original one, so it must be > 1.
6988 // That means unboxed integers will always require a shift, but Smis
6989 // may not if element_size == 2, so always use Smis.
6990 auto* const Z = flow_graph->zone();
6991 auto* const length_instr =
6992 flow_graph->GetConstant(Smi::ZoneHandle(Z, Smi::New(new_length)));
6993 auto* const src_start_instr =
6994 flow_graph->GetConstant(Smi::ZoneHandle(Z, Smi::New(new_src_start)));
6995 auto* const dest_start_instr =
6996 flow_graph->GetConstant(Smi::ZoneHandle(Z, Smi::New(new_dest_start)));
6997 length()->BindTo(length_instr);
6998 src_start()->BindTo(src_start_instr);
6999 dest_start()->BindTo(dest_start_instr);
7000 element_size_ = new_element_size;
7001 unboxed_inputs_ = false;
7002 return this;
7003}
7004
7006 const Location& length_loc = locs()->in(kLengthPos);
7007 // Note that for all architectures, constant_length is only true if
7008 // length() binds to a _small_ constant, so we can end up generating a loop
7009 // if the constant length() was bound to is too large.
7010 const bool constant_length = length_loc.IsConstant();
7011 const Register length_reg = constant_length ? kNoRegister : length_loc.reg();
7012 const intptr_t num_elements =
7013 constant_length ? Integer::Cast(length_loc.constant()).AsInt64Value()
7014 : -1;
7015
7016 // The zero constant case should be handled via canonicalization.
7017 ASSERT(!constant_length || num_elements > 0);
7018
7019#if defined(TARGET_ARCH_IA32)
7020 // We don't have enough registers to create temps for these, so we just
7021 // define them to be the same as src_reg and dest_reg below.
7022 const Register src_payload_reg = locs()->in(kSrcPos).reg();
7023 const Register dest_payload_reg = locs()->in(kDestPos).reg();
7024#else
7025 const Register src_payload_reg = locs()->temp(0).reg();
7026 const Register dest_payload_reg = locs()->temp(1).reg();
7027#endif
7028
7029 {
7030 const Register src_reg = locs()->in(kSrcPos).reg();
7031 const Register dest_reg = locs()->in(kDestPos).reg();
7032 const Representation src_rep = src()->definition()->representation();
7033 const Representation dest_rep = dest()->definition()->representation();
7034 const Location& src_start_loc = locs()->in(kSrcStartPos);
7035 const Location& dest_start_loc = locs()->in(kDestStartPos);
7036
7037 EmitComputeStartPointer(compiler, src_cid_, src_reg, src_payload_reg,
7038 src_rep, src_start_loc);
7039 EmitComputeStartPointer(compiler, dest_cid_, dest_reg, dest_payload_reg,
7040 dest_rep, dest_start_loc);
7041 }
7042
7043 compiler::Label copy_forwards, done;
7044 if (!constant_length) {
7045#if defined(TARGET_ARCH_IA32)
7046 // Save ESI (THR), as we have to use it on the loop path.
7047 __ PushRegister(ESI);
7048#endif
7049 PrepareLengthRegForLoop(compiler, length_reg, &done);
7050 }
7051 // Omit the reversed loop for possible overlap if copying a single element.
7052 if (can_overlap() && num_elements != 1) {
7053 __ CompareRegisters(dest_payload_reg, src_payload_reg);
7054 // Both regions are the same size, so if there is an overlap, then either:
7055 //
7056 // * The destination region comes before the source, so copying from
7057 // front to back ensures that the data in the overlap is read and
7058 // copied before it is written.
7059 // * The source region comes before the destination, which requires
7060 // copying from back to front to ensure that the data in the overlap is
7061 // read and copied before it is written.
7062 //
7063 // To make the generated code smaller for the unrolled case, we do not
7064 // additionally verify here that there is an actual overlap. Instead, only
7065 // do that when we need to calculate the end address of the regions in
7066 // the loop case.
7067 const auto jump_distance = FLAG_target_memory_sanitizer
7070 __ BranchIf(UNSIGNED_LESS_EQUAL, &copy_forwards, jump_distance);
7071 __ Comment("Copying backwards");
7072 if (constant_length) {
7073 EmitUnrolledCopy(compiler, dest_payload_reg, src_payload_reg,
7074 num_elements, /*reversed=*/true);
7075 } else {
7076 EmitLoopCopy(compiler, dest_payload_reg, src_payload_reg, length_reg,
7077 &done, &copy_forwards);
7078 }
7079 __ Jump(&done, jump_distance);
7080 __ Comment("Copying forwards");
7081 }
7082 __ Bind(&copy_forwards);
7083 if (constant_length) {
7084 EmitUnrolledCopy(compiler, dest_payload_reg, src_payload_reg, num_elements,
7085 /*reversed=*/false);
7086 } else {
7087 EmitLoopCopy(compiler, dest_payload_reg, src_payload_reg, length_reg,
7088 &done);
7089 }
7090 __ Bind(&done);
7091#if defined(TARGET_ARCH_IA32)
7092 if (!constant_length) {
7093 // Restore ESI (THR).
7094 __ PopRegister(ESI);
7095 }
7096#endif
7097}
7098
7099// EmitUnrolledCopy on ARM is different enough that it is defined separately.
7100#if !defined(TARGET_ARCH_ARM)
7102 Register dest_reg,
7103 Register src_reg,
7104 intptr_t num_elements,
7105 bool reversed) {
7106 ASSERT(element_size_ <= 16);
7107 const intptr_t num_bytes = num_elements * element_size_;
7108#if defined(TARGET_ARCH_ARM64)
7109 // We use LDP/STP with TMP/TMP2 to handle 16-byte moves.
7110 const intptr_t mov_size = element_size_;
7111#else
7112 const intptr_t mov_size =
7113 Utils::Minimum<intptr_t>(element_size_, compiler::target::kWordSize);
7114#endif
7115 const intptr_t mov_repeat = num_bytes / mov_size;
7116 ASSERT(num_bytes % mov_size == 0);
7117
7118#if defined(TARGET_ARCH_IA32)
7119 // No TMP on IA32, so we have to allocate one instead.
7120 const Register temp_reg = locs()->temp(0).reg();
7121#else
7122 const Register temp_reg = TMP;
7123#endif
7124 for (intptr_t i = 0; i < mov_repeat; i++) {
7125 const intptr_t offset = (reversed ? (mov_repeat - (i + 1)) : i) * mov_size;
7126 switch (mov_size) {
7127 case 1:
7128 __ LoadFromOffset(temp_reg, src_reg, offset, compiler::kUnsignedByte);
7129 __ StoreToOffset(temp_reg, dest_reg, offset, compiler::kUnsignedByte);
7130 break;
7131 case 2:
7132 __ LoadFromOffset(temp_reg, src_reg, offset,
7134 __ StoreToOffset(temp_reg, dest_reg, offset,
7136 break;
7137 case 4:
7138 __ LoadFromOffset(temp_reg, src_reg, offset,
7140 __ StoreToOffset(temp_reg, dest_reg, offset,
7142 break;
7143 case 8:
7144#if defined(TARGET_ARCH_IS_64_BIT)
7145 __ LoadFromOffset(temp_reg, src_reg, offset, compiler::kEightBytes);
7146 __ StoreToOffset(temp_reg, dest_reg, offset, compiler::kEightBytes);
7147#else
7148 UNREACHABLE();
7149#endif
7150 break;
7151 case 16: {
7152#if defined(TARGET_ARCH_ARM64)
7153 __ ldp(
7154 TMP, TMP2,
7156 __ stp(
7157 TMP, TMP2,
7159#else
7160 UNREACHABLE();
7161#endif
7162 break;
7163 }
7164 default:
7165 UNREACHABLE();
7166 }
7167 }
7168
7170#if defined(TARGET_ARCH_X64)
7173 __ PushRegisters(kVolatileRegisterSet);
7174 __ MsanUnpoison(dest_reg, num_bytes);
7175 __ PopRegisters(kVolatileRegisterSet);
7176#endif
7177 }
7178}
7179#endif
7180
7182 return RepresentationUtils::IsUnboxed(scan_flags_field_.representation());
7183}
7184
7187 intptr_t deopt_id,
7188 MethodRecognizer::Kind recognized_kind,
7190 : VariadicDefinition(std::move(inputs), source, deopt_id),
7191 recognized_kind_(recognized_kind),
7192 token_pos_(source.token_pos) {
7193 ASSERT(InputCount() == ArgumentCountFor(recognized_kind_));
7194}
7195
7198 switch (kind) {
7199 case MethodRecognizer::kDoubleTruncateToDouble:
7200 case MethodRecognizer::kDoubleFloorToDouble:
7201 case MethodRecognizer::kDoubleCeilToDouble:
7202 case MethodRecognizer::kDoubleRoundToDouble:
7203 case MethodRecognizer::kMathAtan:
7204 case MethodRecognizer::kMathTan:
7205 case MethodRecognizer::kMathAcos:
7206 case MethodRecognizer::kMathAsin:
7207 case MethodRecognizer::kMathSin:
7208 case MethodRecognizer::kMathCos:
7209 case MethodRecognizer::kMathExp:
7210 case MethodRecognizer::kMathLog:
7211 return 1;
7212 case MethodRecognizer::kDoubleMod:
7213 case MethodRecognizer::kDoubleRem:
7214 case MethodRecognizer::kMathDoublePow:
7215 case MethodRecognizer::kMathAtan2:
7216 return 2;
7217 default:
7218 UNREACHABLE();
7219 }
7220 return 0;
7221}
7222
7224 switch (recognized_kind_) {
7225 case MethodRecognizer::kDoubleTruncateToDouble:
7226 return kLibcTruncRuntimeEntry;
7227 case MethodRecognizer::kDoubleRoundToDouble:
7228 return kLibcRoundRuntimeEntry;
7229 case MethodRecognizer::kDoubleFloorToDouble:
7230 return kLibcFloorRuntimeEntry;
7231 case MethodRecognizer::kDoubleCeilToDouble:
7232 return kLibcCeilRuntimeEntry;
7233 case MethodRecognizer::kMathDoublePow:
7234 return kLibcPowRuntimeEntry;
7235 case MethodRecognizer::kDoubleMod:
7236 return kDartModuloRuntimeEntry;
7237 case MethodRecognizer::kDoubleRem:
7238 return kLibcFmodRuntimeEntry;
7239 case MethodRecognizer::kMathTan:
7240 return kLibcTanRuntimeEntry;
7241 case MethodRecognizer::kMathAsin:
7242 return kLibcAsinRuntimeEntry;
7243 case MethodRecognizer::kMathSin:
7244 return kLibcSinRuntimeEntry;
7245 case MethodRecognizer::kMathCos:
7246 return kLibcCosRuntimeEntry;
7247 case MethodRecognizer::kMathAcos:
7248 return kLibcAcosRuntimeEntry;
7249 case MethodRecognizer::kMathAtan:
7250 return kLibcAtanRuntimeEntry;
7251 case MethodRecognizer::kMathAtan2:
7252 return kLibcAtan2RuntimeEntry;
7253 case MethodRecognizer::kMathExp:
7254 return kLibcExpRuntimeEntry;
7255 case MethodRecognizer::kMathLog:
7256 return kLibcLogRuntimeEntry;
7257 default:
7258 UNREACHABLE();
7259 }
7260 return kLibcPowRuntimeEntry;
7261}
7262
7264 if (!CompilerState::Current().is_aot() &&
7266 Token::Kind op_kind = Token::kILLEGAL;
7267 switch (recognized_kind_) {
7268 case MethodRecognizer::kDoubleTruncateToDouble:
7269 op_kind = Token::kTRUNCATE;
7270 break;
7271 case MethodRecognizer::kDoubleFloorToDouble:
7272 op_kind = Token::kFLOOR;
7273 break;
7274 case MethodRecognizer::kDoubleCeilToDouble:
7275 op_kind = Token::kCEILING;
7276 break;
7277 default:
7278 return this;
7279 }
7280 auto* instr = new UnaryDoubleOpInstr(
7281 op_kind, new Value(InputAt(0)->definition()), GetDeoptId(),
7282 Instruction::kNotSpeculative, kUnboxedDouble);
7283 flow_graph->InsertBefore(this, instr, env(), FlowGraph::kValue);
7284 return instr;
7285 }
7286
7287 return this;
7288}
7289
7290bool DoubleToIntegerInstr::SupportsFloorAndCeil() {
7291#if defined(TARGET_ARCH_X64)
7292 return CompilerState::Current().is_aot() || FLAG_target_unknown_cpu;
7293#elif defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV32) || \
7294 defined(TARGET_ARCH_RISCV64)
7295 return true;
7296#else
7297 return false;
7298#endif
7299}
7300
7302 if (SupportsFloorAndCeil() &&
7303 (recognized_kind() == MethodRecognizer::kDoubleToInteger)) {
7304 if (auto* arg = value()->definition()->AsInvokeMathCFunction()) {
7305 switch (arg->recognized_kind()) {
7306 case MethodRecognizer::kDoubleFloorToDouble:
7307 // x.floorToDouble().toInt() => x.floor()
7308 recognized_kind_ = MethodRecognizer::kDoubleFloorToInt;
7309 value()->BindTo(arg->InputAt(0)->definition());
7310 break;
7311 case MethodRecognizer::kDoubleCeilToDouble:
7312 // x.ceilToDouble().toInt() => x.ceil()
7313 recognized_kind_ = MethodRecognizer::kDoubleCeilToInt;
7314 value()->BindTo(arg->InputAt(0)->definition());
7315 break;
7316 default:
7317 break;
7318 }
7319 }
7320 }
7321 return this;
7322}
7323
7324TruncDivModInstr::TruncDivModInstr(Value* lhs, Value* rhs, intptr_t deopt_id)
7325 : TemplateDefinition(deopt_id) {
7326 SetInputAt(0, lhs);
7327 SetInputAt(1, rhs);
7328}
7329
7331 switch (token) {
7332 case Token::kTRUNCDIV:
7333 return 0;
7334 case Token::kMOD:
7335 return 1;
7336 default:
7337 UNIMPLEMENTED();
7338 return -1;
7339 }
7340}
7341
7343 bool optimizing) const {
7344 return MakeCallSummary(zone, this);
7345}
7346
7348 if (link_lazily()) {
7349 // Resolution will happen during NativeEntry::LinkNativeCall.
7350 return;
7351 }
7352
7353 Thread* thread = Thread::Current();
7354 Zone* zone = thread->zone();
7355
7356 // Currently we perform unoptimized compilations only on mutator threads. If
7357 // the compiler has to resolve a native to a function pointer it calls out to
7358 // the embedder to do so.
7359 //
7360 // Unfortunately that embedder API was designed by giving it a handle to a
7361 // string. So the embedder will have to call back into the VM to convert it to
7362 // a C string - which requires an active isolate.
7363 //
7364 // => To allow this `dart-->jit-compiler-->embedder-->dart api` we set the
7365 // active isolate again.
7366 //
7367 ActiveIsolateScope active_isolate(thread);
7368
7369 const Class& cls = Class::Handle(zone, function().Owner());
7370 const Library& library = Library::Handle(zone, cls.library());
7371
7375
7376 const int num_params =
7378 bool auto_setup_scope = true;
7380 library, native_name(), num_params, &auto_setup_scope);
7381 if (native_function == nullptr) {
7382 if (has_inlining_id()) {
7383 UNIMPLEMENTED();
7384 }
7387 "native function '%s' (%" Pd " arguments) cannot be found",
7388 native_name().ToCString(), function().NumParameters());
7389 }
7390 set_is_auto_scope(auto_setup_scope);
7391 set_native_c_function(native_function);
7392}
7393
7394#if !defined(TARGET_ARCH_ARM) && !defined(TARGET_ARCH_ARM64) && \
7395 !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
7396
7398 UNREACHABLE();
7399}
7400
7401void BitCastInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7402 UNREACHABLE();
7403}
7404
7405#endif // !defined(TARGET_ARCH_ARM) && !defined(TARGET_ARCH_ARM64) && \
7406 // !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
7407
7409 if (idx < TargetAddressIndex()) {
7410 // All input handles are passed as tagged values to FfiCallInstr and
7411 // are given stack locations. FfiCallInstr then passes an untagged pointer
7412 // to the handle on the stack (Dart_Handle) to the C function.
7413 if (marshaller_.IsHandleCType(marshaller_.ArgumentIndex(idx))) {
7414 return kTagged;
7415 }
7416 return marshaller_.RepInFfiCall(idx);
7417 } else if (idx == TargetAddressIndex()) {
7418#if defined(DEBUG)
7419 auto const rep =
7421 ASSERT(rep == kUntagged || rep == kUnboxedAddress);
7422#endif
7423 return kNoRepresentation; // Allows kUntagged or kUnboxedAddress.
7424 } else {
7426 return kTagged;
7427 }
7428}
7429
7430#define Z zone_
7431
7432LocationSummary* FfiCallInstr::MakeLocationSummaryInternal(
7433 Zone* zone,
7434 bool is_optimizing,
7435 const RegList temps) const {
7436 auto contains_call =
7438
7439 LocationSummary* summary = new (zone) LocationSummary(
7440 zone, /*num_inputs=*/InputCount(),
7441 /*num_temps=*/Utils::CountOneBitsWord(temps), contains_call);
7442
7443 intptr_t reg_i = 0;
7444 for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) {
7445 if ((temps & (1 << reg)) != 0) {
7446 summary->set_temp(reg_i,
7447 Location::RegisterLocation(static_cast<Register>(reg)));
7448 reg_i++;
7449 }
7450 }
7451
7452#if defined(TARGET_ARCH_X64) && !defined(DART_TARGET_OS_WINDOWS)
7453 // Only use R13 if really needed, having R13 free causes less spilling.
7454 const Register target_address =
7455 marshaller_.contains_varargs()
7456 ? R13
7458#else
7460#endif
7461#define R(r) (1 << r)
7462 ASSERT_EQUAL(temps & R(target_address), 0x0);
7463#undef R
7464 summary->set_in(TargetAddressIndex(),
7465 Location::RegisterLocation(target_address));
7466 for (intptr_t i = 0, n = marshaller_.NumArgumentDefinitions(); i < n; ++i) {
7467 summary->set_in(i, marshaller_.LocInFfiCall(i));
7468 }
7469
7470 if (marshaller_.ReturnsCompound()) {
7471 summary->set_in(CompoundReturnTypedDataIndex(), Location::Any());
7472 }
7473 summary->set_out(0, marshaller_.LocInFfiCall(compiler::ffi::kResultIndex));
7474
7475 return summary;
7476}
7477
7479 const Register saved_fp,
7480 const Register temp0,
7481 const Register temp1) {
7482 __ Comment("EmitParamMoves");
7483
7484 // Moves for return pointer.
7485 const auto& return_location =
7486 marshaller_.Location(compiler::ffi::kResultIndex);
7487 if (return_location.IsPointerToMemory()) {
7488 __ Comment("return_location.IsPointerToMemory");
7489 const auto& pointer_location =
7490 return_location.AsPointerToMemory().pointer_location();
7491 const auto& pointer_register =
7492 pointer_location.IsRegisters()
7493 ? pointer_location.AsRegisters().reg_at(0)
7494 : temp0;
7495 __ MoveRegister(pointer_register, SPREG);
7496 __ AddImmediate(pointer_register, marshaller_.PassByPointerStackOffset(
7498
7499 if (pointer_location.IsStack()) {
7500 const auto& pointer_stack = pointer_location.AsStack();
7501 __ StoreMemoryValue(pointer_register, pointer_stack.base_register(),
7502 pointer_stack.offset_in_bytes());
7503 }
7504 }
7505
7506 // Moves for arguments.
7507 compiler::ffi::FrameRebase rebase(compiler->zone(), /*old_base=*/FPREG,
7508 /*new_base=*/saved_fp,
7509 /*stack_delta=*/0);
7510 intptr_t def_index = 0;
7511 for (intptr_t arg_index = 0; arg_index < marshaller_.num_args();
7512 arg_index++) {
7513 const intptr_t num_defs = marshaller_.NumDefinitions(arg_index);
7514 const auto& arg_target = marshaller_.Location(arg_index);
7515 __ Comment("arg_index %" Pd " arg_target %s", arg_index,
7516 arg_target.ToCString());
7517
7518 // First deal with moving all individual definitions passed in to the
7519 // FfiCall to the right native location based on calling convention.
7520 for (intptr_t i = 0; i < num_defs; i++) {
7521 if ((arg_target.IsPointerToMemory() ||
7522 marshaller_.IsCompoundPointer(arg_index)) &&
7523 i == 1) {
7524 // The offset_in_bytes is not an argument for C, so don't move it.
7525 // It is used as offset_in_bytes_loc below and moved there if
7526 // necessary.
7527 def_index++;
7528 continue;
7529 }
7530 __ Comment(" def_index %" Pd, def_index);
7531 Location origin = rebase.Rebase(locs()->in(def_index));
7532 const Representation origin_rep = RequiredInputRepresentation(def_index);
7533
7534 // Find the native location where this individual definition should be
7535 // moved to.
7536 const auto& def_target =
7537 arg_target.payload_type().IsPrimitive() ? arg_target
7538 : arg_target.IsMultiple() ? *arg_target.AsMultiple().locations()[i]
7539 : arg_target.IsPointerToMemory()
7540 ? arg_target.AsPointerToMemory().pointer_location()
7541 : /*arg_target.IsStack()*/ arg_target.Split(compiler->zone(),
7542 num_defs, i);
7543
7544 ConstantTemporaryAllocator temp_alloc(temp0);
7545 if (origin.IsConstant()) {
7546 __ Comment("origin.IsConstant()");
7547 ASSERT(!marshaller_.IsHandleCType(arg_index));
7548 ASSERT(!marshaller_.IsTypedDataPointer(arg_index));
7549 ASSERT(!marshaller_.IsCompoundPointer(arg_index));
7550 compiler->EmitMoveConst(def_target, origin, origin_rep, &temp_alloc);
7551 } else if (origin.IsPairLocation() &&
7552 (origin.AsPairLocation()->At(0).IsConstant() ||
7553 origin.AsPairLocation()->At(1).IsConstant())) {
7554 // Note: half of the pair can be constant.
7555 __ Comment("origin.IsPairLocation() and constant");
7556 ASSERT(!marshaller_.IsHandleCType(arg_index));
7557 ASSERT(!marshaller_.IsTypedDataPointer(arg_index));
7558 ASSERT(!marshaller_.IsCompoundPointer(arg_index));
7559 compiler->EmitMoveConst(def_target, origin, origin_rep, &temp_alloc);
7560 } else if (marshaller_.IsHandleCType(arg_index)) {
7561 __ Comment("marshaller_.IsHandleCType(arg_index)");
7562 // Handles are passed into FfiCalls as Tagged values on the stack, and
7563 // then we pass pointers to these handles to the native function here.
7564 ASSERT(origin_rep == kTagged);
7568 ASSERT(num_defs == 1);
7569 ASSERT(origin.IsStackSlot());
7570 if (def_target.IsRegisters()) {
7571 __ AddImmediate(def_target.AsLocation().reg(), origin.base_reg(),
7573 } else {
7574 ASSERT(def_target.IsStack());
7575 const auto& target_stack = def_target.AsStack();
7576 __ AddImmediate(temp0, origin.base_reg(),
7578 __ StoreToOffset(temp0, target_stack.base_register(),
7579 target_stack.offset_in_bytes());
7580 }
7581 } else {
7582 __ Comment("def_target %s <- origin %s %s",
7583 def_target.ToCString(compiler->zone()), origin.ToCString(),
7584 RepresentationUtils::ToCString(origin_rep));
7585#ifdef DEBUG
7586 // Stack arguments split are in word-size chunks. These chunks can copy
7587 // too much. However, that doesn't matter in practise because we process
7588 // the stack in order.
7589 // It only matters for the last chunk, it should not overwrite what was
7590 // already on the stack.
7591 if (def_target.IsStack()) {
7592 const auto& def_target_stack = def_target.AsStack();
7593 ASSERT(def_target_stack.offset_in_bytes() +
7594 def_target.payload_type().SizeInBytes() <=
7595 marshaller_.RequiredStackSpaceInBytes());
7596 }
7597#endif
7598 if (marshaller_.IsTypedDataPointer(arg_index) ||
7599 marshaller_.IsCompoundPointer(arg_index)) {
7600 // Unwrap typed data before move to native location.
7601 __ Comment("Load typed data base address");
7602 if (origin.IsStackSlot()) {
7603 compiler->EmitMove(Location::RegisterLocation(temp0), origin,
7604 &temp_alloc);
7605 origin = Location::RegisterLocation(temp0);
7606 }
7607 ASSERT(origin.IsRegister());
7608 __ LoadFromSlot(origin.reg(), origin.reg(), Slot::PointerBase_data());
7609 if (marshaller_.IsCompoundPointer(arg_index)) {
7610 __ Comment("Load offset in bytes");
7611 const intptr_t offset_in_bytes_def_index = def_index + 1;
7612 const Location offset_in_bytes_loc =
7613 rebase.Rebase(locs()->in(offset_in_bytes_def_index));
7614 Register offset_in_bytes_reg = kNoRegister;
7615 if (offset_in_bytes_loc.IsRegister()) {
7616 offset_in_bytes_reg = offset_in_bytes_loc.reg();
7617 } else {
7618 offset_in_bytes_reg = temp1;
7619 NoTemporaryAllocator no_temp;
7620 compiler->EmitMove(
7621 Location::RegisterLocation(offset_in_bytes_reg),
7622 offset_in_bytes_loc, &no_temp);
7623 }
7624 __ AddRegisters(origin.reg(), offset_in_bytes_reg);
7625 }
7626 }
7627 compiler->EmitMoveToNative(def_target, origin, origin_rep, &temp_alloc);
7628 }
7629 def_index++;
7630 }
7631
7632 // Then make sure that any pointers passed through the calling convention
7633 // actually have a copy of the struct.
7634 // Note that the step above has already moved the pointer into the expected
7635 // native location.
7636 if (arg_target.IsPointerToMemory()) {
7637 __ Comment("arg_target.IsPointerToMemory");
7638 NoTemporaryAllocator temp_alloc;
7639 const auto& pointer_loc =
7640 arg_target.AsPointerToMemory().pointer_location();
7641
7642 // TypedData data pointed to in temp.
7644 compiler->zone(), pointer_loc.payload_type(),
7645 pointer_loc.container_type(), temp0);
7646 compiler->EmitNativeMove(dst, pointer_loc, &temp_alloc);
7647 __ LoadFromSlot(temp0, temp0, Slot::PointerBase_data());
7648
7649 __ Comment("IsPointerToMemory add offset");
7650 const intptr_t offset_in_bytes_def_index =
7651 def_index - 1; // ++'d already.
7652 const Location offset_in_bytes_loc =
7653 rebase.Rebase(locs()->in(offset_in_bytes_def_index));
7654 Register offset_in_bytes_reg = kNoRegister;
7655 if (offset_in_bytes_loc.IsRegister()) {
7656 offset_in_bytes_reg = offset_in_bytes_loc.reg();
7657 } else {
7658 offset_in_bytes_reg = temp1;
7659 NoTemporaryAllocator no_temp;
7660 compiler->EmitMove(Location::RegisterLocation(offset_in_bytes_reg),
7661 offset_in_bytes_loc, &no_temp);
7662 }
7663 __ AddRegisters(temp0, offset_in_bytes_reg);
7664
7665 // Copy chunks. The destination may be rounded up to a multiple of the
7666 // word size, because we do the same rounding when we allocate the space
7667 // on the stack. But source may not be allocated by the VM and end at a
7668 // page boundary.
7669 __ Comment("IsPointerToMemory copy chunks");
7670 const intptr_t sp_offset =
7671 marshaller_.PassByPointerStackOffset(arg_index);
7672 __ UnrolledMemCopy(SPREG, sp_offset, temp0, 0,
7673 arg_target.payload_type().SizeInBytes(), temp1);
7674
7675 // Store the stack address in the argument location.
7676 __ MoveRegister(temp0, SPREG);
7677 __ AddImmediate(temp0, sp_offset);
7679 compiler->zone(), pointer_loc.payload_type(),
7680 pointer_loc.container_type(), temp0);
7681 __ Comment("pointer_loc %s <- src %s", pointer_loc.ToCString(),
7682 src.ToCString());
7683 compiler->EmitNativeMove(pointer_loc, src, &temp_alloc);
7684 }
7685 }
7686
7687 __ Comment("EmitParamMovesEnd");
7688}
7689
7691 const Register temp0,
7692 const Register temp1) {
7693 const auto& returnLocation =
7694 marshaller_.Location(compiler::ffi::kResultIndex);
7695 if (returnLocation.payload_type().IsVoid()) {
7696 return;
7697 }
7698
7699 __ Comment("EmitReturnMoves");
7700
7701 NoTemporaryAllocator no_temp;
7702 if (returnLocation.IsRegisters() || returnLocation.IsFpuRegisters()) {
7703 const auto& src = returnLocation;
7704 const Location dst_loc = locs()->out(0);
7705 const Representation dst_type = representation();
7706 compiler->EmitMoveFromNative(dst_loc, dst_type, src, &no_temp);
7707 } else if (marshaller_.ReturnsCompound()) {
7708 ASSERT(returnLocation.payload_type().IsCompound());
7709
7710 // Get the typed data pointer which we have pinned to a stack slot.
7711 const Location typed_data_loc = locs()->in(CompoundReturnTypedDataIndex());
7712 if (typed_data_loc.IsStackSlot()) {
7713 ASSERT(typed_data_loc.base_reg() == FPREG);
7714 // If this is a leaf call there is no extra call frame to step through.
7715 if (is_leaf_) {
7716 __ LoadMemoryValue(temp0, FPREG, typed_data_loc.ToStackSlotOffset());
7717 } else {
7718 __ LoadMemoryValue(
7719 temp0, FPREG,
7721 __ LoadMemoryValue(temp0, temp0, typed_data_loc.ToStackSlotOffset());
7722 }
7723 } else {
7724 compiler->EmitMove(Location::RegisterLocation(temp0), typed_data_loc,
7725 &no_temp);
7726 }
7727 __ LoadFromSlot(temp0, temp0, Slot::PointerBase_data());
7728
7729 if (returnLocation.IsPointerToMemory()) {
7730 // Copy blocks from the stack location to TypedData.
7731 // Struct size is rounded up to a multiple of target::kWordSize.
7732 // This is safe because we do the same rounding when we allocate the
7733 // TypedData in IL.
7734 const intptr_t sp_offset =
7735 marshaller_.PassByPointerStackOffset(compiler::ffi::kResultIndex);
7736 __ UnrolledMemCopy(temp0, 0, SPREG, sp_offset,
7737 marshaller_.CompoundReturnSizeInBytes(), temp1);
7738 } else {
7739 ASSERT(returnLocation.IsMultiple());
7740 // Copy to the struct from the native locations.
7741 const auto& multiple =
7742 marshaller_.Location(compiler::ffi::kResultIndex).AsMultiple();
7743
7744 int offset_in_bytes = 0;
7745 for (int i = 0; i < multiple.locations().length(); i++) {
7746 const auto& src = *multiple.locations().At(i);
7748 src.payload_type(), src.container_type(), temp0, offset_in_bytes);
7749 compiler->EmitNativeMove(dst, src, &no_temp);
7750 offset_in_bytes += src.payload_type().SizeInBytes();
7751 }
7752 }
7753 } else {
7754 UNREACHABLE();
7755 }
7756
7757 __ Comment("EmitReturnMovesEnd");
7758}
7759
7761 bool opt) const {
7762 const intptr_t kNumInputs = 2;
7763#if defined(TARGET_ARCH_IA32)
7764 const intptr_t kNumTemps = ShouldEmitStoreBarrier() ? 1 : 0;
7765#else
7766 const intptr_t kNumTemps = 0;
7767#endif
7768 LocationSummary* summary = new (zone)
7769 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7770
7771 summary->set_in(kInstancePos, Location::RequiresRegister());
7772 const Representation rep = slot().representation();
7773 if (rep == kUntagged) {
7774 summary->set_in(kValuePos, Location::RequiresRegister());
7775 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
7776 const size_t value_size = RepresentationUtils::ValueSize(rep);
7777 if (value_size <= compiler::target::kWordSize) {
7778 summary->set_in(kValuePos, Location::RequiresRegister());
7779 } else {
7780 ASSERT(value_size == 2 * compiler::target::kWordSize);
7781 summary->set_in(kValuePos, Location::Pair(Location::RequiresRegister(),
7783 }
7784 } else if (RepresentationUtils::IsUnboxed(rep)) {
7785 summary->set_in(kValuePos, Location::RequiresFpuRegister());
7786 } else if (ShouldEmitStoreBarrier()) {
7787 summary->set_in(kValuePos,
7789 } else {
7790#if defined(TARGET_ARCH_IA32)
7791 // IA32 supports emitting `mov mem, Imm32` even for heap
7792 // pointer immediates.
7793 summary->set_in(kValuePos, LocationRegisterOrConstant(value()));
7794#elif defined(TARGET_ARCH_X64)
7795 // X64 supports emitting `mov mem, Imm32` only with non-pointer
7796 // immediate.
7797 summary->set_in(kValuePos, LocationRegisterOrSmiConstant(value()));
7798#elif defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV32) || \
7799 defined(TARGET_ARCH_RISCV64)
7800 // ARM64 and RISC-V have dedicated zero and null registers which can be
7801 // used in store instructions.
7803 if (auto constant = value()->definition()->AsConstant()) {
7804 const auto& value = constant->value();
7805 if (value.IsNull() || (value.IsSmi() && Smi::Cast(value).Value() == 0)) {
7806 value_loc = Location::Constant(constant);
7807 }
7808 }
7809 summary->set_in(kValuePos, value_loc);
7810#else
7811 // No support for moving immediate to memory directly.
7812 summary->set_in(kValuePos, Location::RequiresRegister());
7813#endif
7814 }
7815 if (kNumTemps == 1) {
7816 summary->set_temp(0, Location::RequiresRegister());
7817 } else {
7818 ASSERT(kNumTemps == 0);
7819 }
7820 return summary;
7821}
7822
7823void StoreFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7824 const Register instance_reg = locs()->in(kInstancePos).reg();
7825 ASSERT(OffsetInBytes() >= 0); // Field is finalized.
7826 // For fields on Dart objects, the offset must point after the header.
7827 ASSERT(OffsetInBytes() != 0 || slot().has_untagged_instance());
7828
7829 const Representation rep = slot().representation();
7830 if (rep == kUntagged) {
7831 __ StoreToSlotNoBarrier(locs()->in(kValuePos).reg(), instance_reg, slot(),
7832 memory_order_);
7833 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
7834 const size_t value_size = RepresentationUtils::ValueSize(rep);
7835 if (value_size <= compiler::target::kWordSize) {
7836 __ StoreToSlotNoBarrier(locs()->in(kValuePos).reg(), instance_reg, slot(),
7837 memory_order_);
7838 } else {
7839 ASSERT(slot().representation() == kUnboxedInt64);
7841 auto const value_pair = locs()->in(kValuePos).AsPairLocation();
7842 const Register value_lo = value_pair->At(0).reg();
7843 const Register value_hi = value_pair->At(1).reg();
7844 __ StoreFieldToOffset(value_lo, instance_reg, OffsetInBytes());
7845 __ StoreFieldToOffset(value_hi, instance_reg,
7847 }
7848 } else if (RepresentationUtils::IsUnboxed(rep)) {
7849 ASSERT(slot().IsDartField());
7850 const intptr_t cid = slot().field().guarded_cid();
7851 const FpuRegister value = locs()->in(kValuePos).fpu_reg();
7852 switch (cid) {
7853 case kDoubleCid:
7854 __ StoreUnboxedDouble(value, instance_reg,
7856 return;
7857 case kFloat32x4Cid:
7858 case kFloat64x2Cid:
7859 __ StoreUnboxedSimd128(value, instance_reg,
7861 return;
7862 default:
7863 UNREACHABLE();
7864 }
7865 } else if (ShouldEmitStoreBarrier()) {
7866 const Register scratch_reg =
7867 locs()->temp_count() > 0 ? locs()->temp(0).reg() : TMP;
7868 __ StoreToSlot(locs()->in(kValuePos).reg(), instance_reg, slot(),
7869 CanValueBeSmi(), memory_order_, scratch_reg);
7870 } else if (locs()->in(kValuePos).IsConstant()) {
7871 const auto& value = locs()->in(kValuePos).constant();
7872 auto const size =
7874 __ StoreObjectIntoObjectOffsetNoBarrier(instance_reg, OffsetInBytes(),
7875 value, memory_order_, size);
7876 } else {
7877 __ StoreToSlotNoBarrier(locs()->in(kValuePos).reg(), instance_reg, slot(),
7878 memory_order_);
7879 }
7880}
7881
7883 Zone* zone,
7884 bool opt) const {
7885 const intptr_t kNumInputs = 3;
7886 const intptr_t kNumTemps = 0;
7887 auto* const summary = new (zone)
7888 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7889
7891 // Only use a Smi constant for the index if multiplying it by the index
7892 // scale would be an int32 constant.
7893 const intptr_t scale_shift = Utils::ShiftForPowerOfTwo(index_scale());
7895 index(), kMinInt32 >> scale_shift,
7896 kMaxInt32 >> scale_shift));
7897 // Only use a Smi constant for the offset if it is an int32 constant.
7899 kMaxInt32));
7900 // Special case for when both inputs are appropriate constants.
7901 if (summary->in(kIndexPos).IsConstant() &&
7902 summary->in(kOffsetPos).IsConstant()) {
7903 const int64_t offset_in_bytes = Utils::AddWithWrapAround<int64_t>(
7904 Utils::MulWithWrapAround<int64_t>(index()->BoundSmiConstant(),
7905 index_scale()),
7906 offset()->BoundSmiConstant());
7907 if (!Utils::IsInt(32, offset_in_bytes)) {
7908 // The offset in bytes calculated from the index and offset cannot
7909 // fit in a 32-bit immediate, so pass the index as a register instead.
7911 }
7912 }
7913
7914 // Currently this instruction can only be used in optimized mode as it takes
7915 // and puts untagged values on the stack, and the canonicalization pass should
7916 // always remove no-op uses of this instruction. Flag this for handling if
7917 // this ever changes.
7918 ASSERT(opt && !IsNoop());
7919 summary->set_out(0, Location::RequiresRegister());
7920
7921 return summary;
7922}
7923
7925 const Register base_reg = locs()->in(kBasePos).reg();
7926 const Location& index_loc = locs()->in(kIndexPos);
7927 const Location& offset_loc = locs()->in(kOffsetPos);
7928 const Register result_reg = locs()->out(0).reg();
7929
7930 ASSERT(!IsNoop());
7931
7932 if (index_loc.IsConstant()) {
7933 const int64_t index = Smi::Cast(index_loc.constant()).Value();
7935 const int64_t scaled_index = index * index_scale();
7936 ASSERT(Utils::IsInt(32, scaled_index));
7937 if (offset_loc.IsConstant()) {
7938 const int64_t disp =
7939 scaled_index + Smi::Cast(offset_loc.constant()).Value();
7940 ASSERT(Utils::IsInt(32, disp));
7941 __ AddScaled(result_reg, kNoRegister, base_reg, TIMES_1, disp);
7942 } else {
7943 __ AddScaled(result_reg, base_reg, offset_loc.reg(), TIMES_1,
7944 scaled_index);
7945 }
7946 } else {
7947 Register index_reg = index_loc.reg();
7950 auto scale = ToScaleFactor(index_scale(), /*index_unboxed=*/true);
7951#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32)
7952 if (scale == TIMES_16) {
7954 // A ScaleFactor of TIMES_16 is invalid for x86, so box the index as a Smi
7955 // (using the result register to store it to avoid allocating a writable
7956 // register for the index) to reduce the ScaleFactor to TIMES_8.
7957 __ MoveAndSmiTagRegister(result_reg, index_reg);
7958 index_reg = result_reg;
7959 scale = TIMES_8;
7960 }
7961#endif
7962 if (offset_loc.IsConstant()) {
7963 const intptr_t disp = Smi::Cast(offset_loc.constant()).Value();
7964 ASSERT(Utils::IsInt(32, disp));
7965 __ AddScaled(result_reg, base_reg, index_reg, scale, disp);
7966 } else {
7967 // No architecture can do this case in a single instruction.
7968 __ AddScaled(result_reg, base_reg, index_reg, scale, /*disp=*/0);
7969 __ AddRegisters(result_reg, offset_loc.reg());
7970 }
7971 }
7972}
7973
7974const Code& DartReturnInstr::GetReturnStub(FlowGraphCompiler* compiler) const {
7975 const Function& function = compiler->parsed_function().function();
7976 ASSERT(function.IsSuspendableFunction());
7977 if (function.IsAsyncFunction()) {
7978 if (compiler->is_optimizing() && !value()->Type()->CanBeFuture()) {
7979 return Code::ZoneHandle(compiler->zone(),
7980 compiler->isolate_group()
7981 ->object_store()
7982 ->return_async_not_future_stub());
7983 }
7984 return Code::ZoneHandle(
7985 compiler->zone(),
7986 compiler->isolate_group()->object_store()->return_async_stub());
7987 } else if (function.IsAsyncGenerator()) {
7988 return Code::ZoneHandle(
7989 compiler->zone(),
7990 compiler->isolate_group()->object_store()->return_async_star_stub());
7991 } else {
7992 UNREACHABLE();
7993 }
7994}
7995
7996void NativeReturnInstr::EmitReturnMoves(FlowGraphCompiler* compiler) {
7997 const auto& dst1 = marshaller_.Location(compiler::ffi::kResultIndex);
7998 if (dst1.payload_type().IsVoid()) {
7999 return;
8000 }
8001 if (dst1.IsMultiple()) {
8002 __ Comment("Load TypedDataBase data pointer and apply offset.");
8003 ASSERT_EQUAL(locs()->input_count(), 2);
8004 Register typed_data_reg = locs()->in(0).reg();
8005 // Load the data pointer out of the TypedData/Pointer.
8006 __ LoadFromSlot(typed_data_reg, typed_data_reg, Slot::PointerBase_data());
8007
8008 // Apply offset.
8009 Register offset_reg = locs()->in(1).reg();
8010 __ AddRegisters(typed_data_reg, offset_reg);
8011
8012 __ Comment("Copy loop");
8013 const auto& multiple = dst1.AsMultiple();
8014 int offset_in_bytes = 0;
8015 for (intptr_t i = 0; i < multiple.locations().length(); i++) {
8016 const auto& dst = *multiple.locations().At(i);
8017 ASSERT(!dst.IsRegisters() ||
8018 dst.AsRegisters().reg_at(0) != typed_data_reg);
8019 const auto& src = compiler::ffi::NativeStackLocation(
8020 dst.payload_type(), dst.container_type(), typed_data_reg,
8021 offset_in_bytes);
8022 NoTemporaryAllocator no_temp;
8023 compiler->EmitNativeMove(dst, src, &no_temp);
8024 offset_in_bytes += dst.payload_type().SizeInBytes();
8025 }
8026 return;
8027 }
8028 const auto& dst = dst1.IsPointerToMemory()
8029 ? dst1.AsPointerToMemory().pointer_return_location()
8030 : dst1;
8031
8032 const Location src_loc = locs()->in(0);
8033 const Representation src_type = RequiredInputRepresentation(0);
8034 NoTemporaryAllocator no_temp;
8035 compiler->EmitMoveToNative(dst, src_loc, src_type, &no_temp);
8036}
8037
8038LocationSummary* NativeReturnInstr::MakeLocationSummary(Zone* zone,
8039 bool opt) const {
8040 const intptr_t input_count = marshaller_.NumReturnDefinitions();
8041 const intptr_t kNumTemps = 0;
8042 LocationSummary* locs = new (zone)
8043 LocationSummary(zone, input_count, kNumTemps, LocationSummary::kNoCall);
8044 const auto& native_loc = marshaller_.Location(compiler::ffi::kResultIndex);
8045
8046 if (native_loc.IsMultiple()) {
8047 ASSERT_EQUAL(input_count, 2);
8048 // Pass in a typed data and offset for easy copying in machine code.
8049 // Can be any register which does not conflict with return registers.
8051 ASSERT(typed_data_reg != CallingConventions::kReturnReg);
8053 locs->set_in(0, Location::RegisterLocation(typed_data_reg));
8054
8056 ASSERT(offset_in_bytes_reg != CallingConventions::kReturnReg);
8057 ASSERT(offset_in_bytes_reg != CallingConventions::kSecondReturnReg);
8058 locs->set_in(1, Location::RegisterLocation(offset_in_bytes_reg));
8059 } else {
8060 ASSERT_EQUAL(input_count, 1);
8061 const auto& native_return_loc =
8062 native_loc.IsPointerToMemory()
8063 ? native_loc.AsPointerToMemory().pointer_return_location()
8064 : native_loc;
8065 locs->set_in(0, native_return_loc.AsLocation());
8066 }
8067 return locs;
8068}
8069
8070LocationSummary* RecordCoverageInstr::MakeLocationSummary(Zone* zone,
8071 bool opt) const {
8072 const intptr_t kNumInputs = 0;
8073 const intptr_t kNumTemps = 2;
8074 LocationSummary* locs = new (zone)
8075 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
8078 return locs;
8079}
8080
8081void RecordCoverageInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
8082 const auto array_temp = locs()->temp(0).reg();
8083 const auto value_temp = locs()->temp(1).reg();
8084
8085 __ LoadObject(array_temp, coverage_array_);
8086 __ LoadImmediate(value_temp, Smi::RawValue(1));
8087 __ StoreFieldToOffset(
8088 value_temp, array_temp,
8091}
8092
8093#undef Z
8094
8096 if (marshaller_.ReturnsCompound()) {
8097 // Don't care, we're discarding the value.
8098 return kTagged;
8099 }
8100 if (marshaller_.IsHandleCType(compiler::ffi::kResultIndex)) {
8101 // The call returns a Dart_Handle, from which we need to extract the
8102 // tagged pointer using LoadField with an appropriate slot.
8103 return kUntagged;
8104 }
8105 return marshaller_.RepInFfiCall(compiler::ffi::kResultIndex);
8106}
8107
8108// TODO(http://dartbug.com/48543): integrate with register allocator directly.
8110 __ MoveRegister(out, THR);
8111}
8112
8114 Zone* zone,
8115 const RegList temps) const {
8116 LocationSummary* summary =
8117 new (zone) LocationSummary(zone, /*num_inputs=*/InputCount(),
8118 /*num_temps=*/Utils::CountOneBitsWord(temps),
8120
8121 intptr_t reg_i = 0;
8122 for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) {
8123 if ((temps & (1 << reg)) != 0) {
8124 summary->set_temp(reg_i,
8125 Location::RegisterLocation(static_cast<Register>(reg)));
8126 reg_i++;
8127 }
8128 }
8129
8130 summary->set_in(TargetAddressIndex(),
8133
8134 const auto& argument_locations =
8135 native_calling_convention_.argument_locations();
8136 for (intptr_t i = 0, n = argument_locations.length(); i < n; ++i) {
8137 const auto& argument_location = *argument_locations.At(i);
8138 if (argument_location.IsRegisters()) {
8139 const auto& reg_location = argument_location.AsRegisters();
8140 ASSERT(reg_location.num_regs() == 1);
8141 summary->set_in(i, reg_location.AsLocation());
8142 } else if (argument_location.IsFpuRegisters()) {
8143 UNIMPLEMENTED();
8144 } else if (argument_location.IsStack()) {
8145 summary->set_in(i, Location::Any());
8146 } else {
8147 UNIMPLEMENTED();
8148 }
8149 }
8150 const auto& return_location = native_calling_convention_.return_location();
8151 ASSERT(return_location.IsRegisters());
8152 summary->set_out(0, return_location.AsLocation());
8153 return summary;
8154}
8155
8156LeafRuntimeCallInstr::LeafRuntimeCallInstr(
8157 Representation return_representation,
8158 const ZoneGrowableArray<Representation>& argument_representations,
8159 const compiler::ffi::NativeCallingConvention& native_calling_convention,
8162 return_representation_(return_representation),
8163 argument_representations_(argument_representations),
8164 native_calling_convention_(native_calling_convention) {
8165#if defined(DEBUG)
8166 const intptr_t num_inputs = argument_representations.length() + 1;
8167 ASSERT_EQUAL(InputCount(), num_inputs);
8168 // The target address should never be an unsafe untagged pointer.
8170 ->definition()
8172#endif
8173}
8174
8176 Zone* zone,
8177 Representation return_representation,
8178 const ZoneGrowableArray<Representation>& argument_representations,
8179 InputsArray&& inputs) {
8180 const auto& native_function_type =
8182 zone, return_representation, argument_representations);
8183 const auto& native_calling_convention =
8185 zone, native_function_type);
8186 return new (zone)
8187 LeafRuntimeCallInstr(return_representation, argument_representations,
8188 native_calling_convention, std::move(inputs));
8189}
8190
8192 Register saved_fp,
8193 Register temp0) {
8194 if (native_calling_convention_.StackTopInBytes() == 0) {
8195 return;
8196 }
8197
8198 ConstantTemporaryAllocator temp_alloc(temp0);
8199 compiler::ffi::FrameRebase rebase(compiler->zone(), /*old_base=*/FPREG,
8200 /*new_base=*/saved_fp,
8201 /*stack_delta=*/0);
8202
8203 __ Comment("EmitParamMoves");
8204 const auto& argument_locations =
8205 native_calling_convention_.argument_locations();
8206 for (intptr_t i = 0, n = argument_locations.length(); i < n; ++i) {
8207 const auto& argument_location = *argument_locations.At(i);
8208 if (argument_location.IsRegisters()) {
8209 const auto& reg_location = argument_location.AsRegisters();
8210 ASSERT(reg_location.num_regs() == 1);
8211 const Location src_loc = rebase.Rebase(locs()->in(i));
8213 compiler->EmitMoveToNative(argument_location, src_loc, src_rep,
8214 &temp_alloc);
8215 } else if (argument_location.IsFpuRegisters()) {
8216 UNIMPLEMENTED();
8217 } else if (argument_location.IsStack()) {
8218 const Location src_loc = rebase.Rebase(locs()->in(i));
8220 __ Comment("Param %" Pd ": %s %s -> %s", i, src_loc.ToCString(),
8222 argument_location.ToCString());
8223 compiler->EmitMoveToNative(argument_location, src_loc, src_rep,
8224 &temp_alloc);
8225 } else {
8226 UNIMPLEMENTED();
8227 }
8228 }
8229 __ Comment("EmitParamMovesEnd");
8230}
8231
8232// SIMD
8233
8235 switch (kind) {
8236 case MethodRecognizer::kFloat32x4Mul:
8237 return SimdOpInstr::kFloat32x4Mul;
8238 case MethodRecognizer::kFloat32x4Div:
8239 return SimdOpInstr::kFloat32x4Div;
8240 case MethodRecognizer::kFloat32x4Add:
8241 return SimdOpInstr::kFloat32x4Add;
8242 case MethodRecognizer::kFloat32x4Sub:
8243 return SimdOpInstr::kFloat32x4Sub;
8244 case MethodRecognizer::kFloat64x2Mul:
8245 return SimdOpInstr::kFloat64x2Mul;
8246 case MethodRecognizer::kFloat64x2Div:
8247 return SimdOpInstr::kFloat64x2Div;
8248 case MethodRecognizer::kFloat64x2Add:
8249 return SimdOpInstr::kFloat64x2Add;
8250 case MethodRecognizer::kFloat64x2Sub:
8251 return SimdOpInstr::kFloat64x2Sub;
8252 default:
8253 break;
8254 }
8255 UNREACHABLE();
8257}
8258
8261 Definition* receiver,
8263 intptr_t mask /* = 0 */) {
8264 SimdOpInstr* op;
8265 switch (kind) {
8266 case MethodRecognizer::kFloat32x4Mul:
8267 case MethodRecognizer::kFloat32x4Div:
8268 case MethodRecognizer::kFloat32x4Add:
8269 case MethodRecognizer::kFloat32x4Sub:
8270 case MethodRecognizer::kFloat64x2Mul:
8271 case MethodRecognizer::kFloat64x2Div:
8272 case MethodRecognizer::kFloat64x2Add:
8273 case MethodRecognizer::kFloat64x2Sub:
8274 op = new (zone) SimdOpInstr(KindForOperator(kind), call->deopt_id());
8275 break;
8276#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64)
8277 case MethodRecognizer::kFloat32x4GreaterThan:
8278 // cmppsgt does not exist, cmppsnlt gives wrong NaN result, need to flip
8279 // at the IL level to get the right SameAsFirstInput.
8280 op = new (zone)
8281 SimdOpInstr(SimdOpInstr::kFloat32x4LessThan, call->deopt_id());
8282 op->SetInputAt(0, call->ArgumentValueAt(1)->CopyWithType(zone));
8283 op->SetInputAt(1, new (zone) Value(receiver));
8284 return op;
8285 case MethodRecognizer::kFloat32x4GreaterThanOrEqual:
8286 // cmppsge does not exist, cmppsnle gives wrong NaN result, need to flip
8287 // at the IL level to get the right SameAsFirstInput.
8288 op = new (zone)
8289 SimdOpInstr(SimdOpInstr::kFloat32x4LessThanOrEqual, call->deopt_id());
8290 op->SetInputAt(0, call->ArgumentValueAt(1)->CopyWithType(zone));
8291 op->SetInputAt(1, new (zone) Value(receiver));
8292 return op;
8293#endif
8294 default:
8295 op = new (zone) SimdOpInstr(KindForMethod(kind), call->deopt_id());
8296 break;
8297 }
8298
8299 if (receiver != nullptr) {
8300 op->SetInputAt(0, new (zone) Value(receiver));
8301 }
8302 for (intptr_t i = (receiver != nullptr ? 1 : 0); i < op->InputCount(); i++) {
8303 op->SetInputAt(i, call->ArgumentValueAt(i)->CopyWithType(zone));
8304 }
8305 if (op->HasMask()) {
8306 op->set_mask(mask);
8307 }
8308 ASSERT(call->ArgumentCount() == (op->InputCount() + (op->HasMask() ? 1 : 0)));
8309
8310 return op;
8311}
8312
8315 Instruction* call) {
8316 SimdOpInstr* op =
8317 new (zone) SimdOpInstr(KindForMethod(kind), call->deopt_id());
8318 for (intptr_t i = 0; i < op->InputCount(); i++) {
8319 // Note: ArgumentAt(0) is type arguments which we don't need.
8320 op->SetInputAt(i, call->ArgumentValueAt(i + 1)->CopyWithType(zone));
8321 }
8322 ASSERT(call->ArgumentCount() == (op->InputCount() + 1));
8323 return op;
8324}
8325
8327 switch (cid) {
8328 case kFloat32x4Cid:
8329 switch (op) {
8330 case Token::kADD:
8331 return kFloat32x4Add;
8332 case Token::kSUB:
8333 return kFloat32x4Sub;
8334 case Token::kMUL:
8335 return kFloat32x4Mul;
8336 case Token::kDIV:
8337 return kFloat32x4Div;
8338 default:
8339 break;
8340 }
8341 break;
8342
8343 case kFloat64x2Cid:
8344 switch (op) {
8345 case Token::kADD:
8346 return kFloat64x2Add;
8347 case Token::kSUB:
8348 return kFloat64x2Sub;
8349 case Token::kMUL:
8350 return kFloat64x2Mul;
8351 case Token::kDIV:
8352 return kFloat64x2Div;
8353 default:
8354 break;
8355 }
8356 break;
8357
8358 case kInt32x4Cid:
8359 switch (op) {
8360 case Token::kADD:
8361 return kInt32x4Add;
8362 case Token::kSUB:
8363 return kInt32x4Sub;
8364 case Token::kBIT_AND:
8365 return kInt32x4BitAnd;
8366 case Token::kBIT_OR:
8367 return kInt32x4BitOr;
8368 case Token::kBIT_XOR:
8369 return kInt32x4BitXor;
8370 default:
8371 break;
8372 }
8373 break;
8374 }
8375
8376 UNREACHABLE();
8377 return kIllegalSimdOp;
8378}
8379
8381 switch (kind) {
8382#define CASE_METHOD(Arity, Mask, Name, ...) \
8383 case MethodRecognizer::k##Name: \
8384 return k##Name;
8385#define CASE_BINARY_OP(Arity, Mask, Name, Args, Result)
8387#undef CASE_METHOD
8388#undef CASE_BINARY_OP
8389 default:
8390 break;
8391 }
8392
8393 FATAL("Not a SIMD method: %s", MethodRecognizer::KindToCString(kind));
8394 return kIllegalSimdOp;
8395}
8396
8397// Methods InputCount(), representation(), RequiredInputRepresentation() and
8398// HasMask() are using an array of SimdOpInfo structures representing all
8399// necessary information about the instruction.
8400
8402 uint8_t arity;
8406};
8407
8409 // Keep the old semantics where kUnboxedInt8 was a locally created
8410 // alias for kUnboxedInt32, and pass everything else through unchanged.
8411 return rep == kUnboxedInt8 ? kUnboxedInt32 : rep;
8412}
8413
8414// Make representation from type name used by SIMD_OP_LIST.
8415#define REP(T) (SimdRepresentation(kUnboxed##T))
8416static const Representation kUnboxedBool = kTagged;
8417
8418#define ENCODE_INPUTS_0()
8419#define ENCODE_INPUTS_1(In0) REP(In0)
8420#define ENCODE_INPUTS_2(In0, In1) REP(In0), REP(In1)
8421#define ENCODE_INPUTS_3(In0, In1, In2) REP(In0), REP(In1), REP(In2)
8422#define ENCODE_INPUTS_4(In0, In1, In2, In3) \
8423 REP(In0), REP(In1), REP(In2), REP(In3)
8424
8425// Helpers for correct interpretation of the Mask field in the SIMD_OP_LIST.
8426#define HAS_MASK true
8427#define HAS__ false
8428
8429// Define the metadata array.
8431#define CASE(Arity, Mask, Name, Args, Result) \
8432 {Arity, HAS_##Mask, REP(Result), {PP_APPLY(ENCODE_INPUTS_##Arity, Args)}},
8434#undef CASE
8435};
8436
8437// Undef all auxiliary macros.
8438#undef ENCODE_INFORMATION
8439#undef HAS__
8440#undef HAS_MASK
8441#undef ENCODE_INPUTS_0
8442#undef ENCODE_INPUTS_1
8443#undef ENCODE_INPUTS_2
8444#undef ENCODE_INPUTS_3
8445#undef ENCODE_INPUTS_4
8446#undef REP
8447
8448intptr_t SimdOpInstr::InputCount() const {
8449 return simd_op_information[kind()].arity;
8450}
8451
8454}
8455
8457 ASSERT(0 <= idx && idx < InputCount());
8458 return simd_op_information[kind()].inputs[idx];
8459}
8460
8461bool SimdOpInstr::HasMask() const {
8463}
8464
8466 if ((kind() == SimdOpInstr::kFloat64x2FromDoubles) &&
8467 InputAt(0)->BindsToConstant() && InputAt(1)->BindsToConstant()) {
8468 const Object& x = InputAt(0)->BoundConstant();
8469 const Object& y = InputAt(1)->BoundConstant();
8470 if (x.IsDouble() && y.IsDouble()) {
8472 Double::Cast(x).value(), Double::Cast(y).value(), Heap::kOld));
8473 result ^= result.Canonicalize(Thread::Current());
8474 return flow_graph->GetConstant(result, kUnboxedFloat64x2);
8475 }
8476 }
8477 if ((kind() == SimdOpInstr::kFloat32x4FromDoubles) &&
8478 InputAt(0)->BindsToConstant() && InputAt(1)->BindsToConstant() &&
8479 InputAt(2)->BindsToConstant() && InputAt(3)->BindsToConstant()) {
8480 const Object& x = InputAt(0)->BoundConstant();
8481 const Object& y = InputAt(1)->BoundConstant();
8482 const Object& z = InputAt(2)->BoundConstant();
8483 const Object& w = InputAt(3)->BoundConstant();
8484 if (x.IsDouble() && y.IsDouble() && z.IsDouble() && w.IsDouble()) {
8486 Double::Cast(x).value(), Double::Cast(y).value(),
8487 Double::Cast(z).value(), Double::Cast(w).value(), Heap::kOld));
8488 result ^= result.Canonicalize(Thread::Current());
8489 return flow_graph->GetConstant(result, kUnboxedFloat32x4);
8490 }
8491 }
8492 if ((kind() == SimdOpInstr::kInt32x4FromInts) &&
8493 InputAt(0)->BindsToConstant() && InputAt(1)->BindsToConstant() &&
8494 InputAt(2)->BindsToConstant() && InputAt(3)->BindsToConstant()) {
8495 const Object& x = InputAt(0)->BoundConstant();
8496 const Object& y = InputAt(1)->BoundConstant();
8497 const Object& z = InputAt(2)->BoundConstant();
8498 const Object& w = InputAt(3)->BoundConstant();
8499 if (x.IsInteger() && y.IsInteger() && z.IsInteger() && w.IsInteger()) {
8501 Integer::Cast(x).AsInt64Value(), Integer::Cast(y).AsInt64Value(),
8502 Integer::Cast(z).AsInt64Value(), Integer::Cast(w).AsInt64Value(),
8503 Heap::kOld));
8504 result ^= result.Canonicalize(Thread::Current());
8505 return flow_graph->GetConstant(result, kUnboxedInt32x4);
8506 }
8507 }
8508
8509 return this;
8510}
8511
8513 bool opt) const {
8514 const intptr_t kNumInputs = 1;
8515 const intptr_t kNumTemps = 0;
8516 LocationSummary* locs = new (zone)
8517 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
8518 switch (stub_id_) {
8520 locs->set_in(
8522 break;
8523 case StubId::kInitAsync:
8528 break;
8530 locs->set_in(
8532 break;
8533 }
8535 return locs;
8536}
8537
8538void Call1ArgStubInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
8539 ObjectStore* object_store = compiler->isolate_group()->object_store();
8540 Code& stub = Code::ZoneHandle(compiler->zone());
8541 switch (stub_id_) {
8543 stub = object_store->clone_suspend_state_stub();
8544 break;
8545 case StubId::kInitAsync:
8546 stub = object_store->init_async_stub();
8547 break;
8549 stub = object_store->init_async_star_stub();
8550 break;
8552 stub = object_store->init_sync_star_stub();
8553 break;
8555 stub = object_store->ffi_async_callback_send_stub();
8556 break;
8557 }
8558 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
8559 locs(), deopt_id(), env());
8560}
8561
8564 !operand()->Type()->CanBeFuture()) {
8566 stub_id_ = StubId::kAwait;
8567 }
8568 return this;
8569}
8570
8572 const intptr_t kNumInputs = has_type_args() ? 2 : 1;
8573 const intptr_t kNumTemps = 0;
8574 LocationSummary* locs = new (zone)
8575 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
8577 if (has_type_args()) {
8579 }
8581 return locs;
8582}
8583
8584void SuspendInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
8585 // Use deopt_id as a yield index.
8586 compiler->EmitYieldPositionMetadata(source(), deopt_id());
8587
8588 ObjectStore* object_store = compiler->isolate_group()->object_store();
8589 Code& stub = Code::ZoneHandle(compiler->zone());
8590 switch (stub_id_) {
8591 case StubId::kAwait:
8592 stub = object_store->await_stub();
8593 break;
8595 stub = object_store->await_with_type_check_stub();
8596 break;
8598 stub = object_store->yield_async_star_stub();
8599 break;
8601 stub = object_store->suspend_sync_star_at_start_stub();
8602 break;
8604 stub = object_store->suspend_sync_star_at_yield_stub();
8605 break;
8606 }
8607 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
8608 locs(), deopt_id(), env());
8609
8610#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32)
8611 // On x86 (X64 and IA32) mismatch between calls and returns
8612 // significantly regresses performance. So suspend stub
8613 // does not return directly to the caller. Instead, a small
8614 // epilogue is generated right after the call to suspend stub,
8615 // and resume stub adjusts resume PC to skip this epilogue.
8616 const intptr_t start = compiler->assembler()->CodeSize();
8617 __ LeaveFrame();
8618 __ ret();
8619 RELEASE_ASSERT(compiler->assembler()->CodeSize() - start ==
8621 compiler->EmitCallsiteMetadata(source(), resume_deopt_id(),
8622 UntaggedPcDescriptors::kOther, locs(), env());
8623#endif
8624}
8625
8626LocationSummary* AllocateRecordInstr::MakeLocationSummary(Zone* zone,
8627 bool opt) const {
8628 const intptr_t kNumInputs = 0;
8629 const intptr_t kNumTemps = 0;
8630 LocationSummary* locs = new (zone)
8631 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
8633 return locs;
8634}
8635
8636void AllocateRecordInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
8637 const Code& stub = Code::ZoneHandle(
8638 compiler->zone(),
8639 compiler->isolate_group()->object_store()->allocate_record_stub());
8640 __ LoadImmediate(AllocateRecordABI::kShapeReg,
8641 Smi::RawValue(shape().AsInt()));
8642 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
8643 locs(), deopt_id(), env());
8644}
8645
8646LocationSummary* AllocateSmallRecordInstr::MakeLocationSummary(Zone* zone,
8647 bool opt) const {
8648 ASSERT(num_fields() == 2 || num_fields() == 3);
8649 const intptr_t kNumInputs = InputCount();
8650 const intptr_t kNumTemps = 0;
8651 LocationSummary* locs = new (zone)
8652 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
8653 locs->set_in(0,
8655 locs->set_in(1,
8657 if (num_fields() > 2) {
8658 locs->set_in(
8660 }
8661 locs->set_out(0,
8663 return locs;
8664}
8665
8666void AllocateSmallRecordInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
8667 auto object_store = compiler->isolate_group()->object_store();
8668 Code& stub = Code::ZoneHandle(compiler->zone());
8669 if (shape().HasNamedFields()) {
8671 Smi::RawValue(shape().AsInt()));
8672 switch (num_fields()) {
8673 case 2:
8674 stub = object_store->allocate_record2_named_stub();
8675 break;
8676 case 3:
8677 stub = object_store->allocate_record3_named_stub();
8678 break;
8679 default:
8680 UNREACHABLE();
8681 }
8682 } else {
8683 switch (num_fields()) {
8684 case 2:
8685 stub = object_store->allocate_record2_stub();
8686 break;
8687 case 3:
8688 stub = object_store->allocate_record3_stub();
8689 break;
8690 default:
8691 UNREACHABLE();
8692 }
8693 }
8694 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
8695 locs(), deopt_id(), env());
8696}
8697
8698LocationSummary* MakePairInstr::MakeLocationSummary(Zone* zone,
8699 bool opt) const {
8700 ASSERT(opt);
8701 const intptr_t kNumInputs = 2;
8702 const intptr_t kNumTemps = 0;
8703 LocationSummary* locs = new (zone)
8704 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
8705 // MakePair instruction is used to combine 2 separate kTagged values into
8706 // a single kPairOfTagged value for the subsequent Return, so it uses
8707 // fixed registers used to return values according to the calling conventions
8708 // in order to avoid any extra moves.
8710 locs->set_in(
8712 locs->set_out(
8713 0, Location::Pair(
8716 return locs;
8717}
8718
8719void MakePairInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
8720 // No-op.
8721}
8722
8723int64_t TestIntInstr::ComputeImmediateMask() {
8724 int64_t mask = Integer::Cast(locs()->in(1).constant()).AsInt64Value();
8725
8726 switch (representation_) {
8727 case kTagged:
8728 // If operand is tagged we need to tag the mask.
8729 if (!Smi::IsValid(mask)) {
8730 // Mask it not a valid Smi. This means top bits are not all equal to
8731 // the sign bit and at least some of them are 1. If they were all
8732 // 0 than it would be a valid positive Smi.
8733 // Adjust the mask to make it a valid Smi: testing any bit above
8734 // kSmiBits is equivalent to testing the sign bit.
8735 mask = (mask & kSmiMax) | kSmiMin;
8736 }
8737 return compiler::target::ToRawSmi(mask);
8738
8739 case kUnboxedInt64:
8740 return mask;
8741
8742 default:
8743 UNREACHABLE();
8744 return -1;
8745 }
8746}
8747
8748#undef __
8749
8750} // namespace dart
static int step(int x, SkScalar min, SkScalar max)
Definition: BlurTest.cpp:215
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
int count
Definition: FontMgrTest.cpp:50
static void is_empty(skiatest::Reporter *reporter, const SkPath &p)
static float next(float f)
static float prev(float f)
static bool is_integer(SkScalar x)
static void copy(void *dst, const uint8_t *src, int width, int bpp, int deltaSrc, int offset, const SkPMColor ctable[])
Definition: SkSwizzler.cpp:31
Type
Definition: SortBench.cpp:56
SI void store(P *ptr, const T &val)
SI T load(const P *ptr)
Definition: Transform_inl.h:98
SI F table(const skcms_Curve *curve, F v)
static size_t element_size(Layout layout, SkSLType type)
Vec2Value v2
#define UNREACHABLE()
Definition: assert.h:248
#define DEBUG_ASSERT(cond)
Definition: assert.h:321
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
GLenum type
bool IsSubtypeOf(const AbstractType &other, Heap::Space space, FunctionTypeMapping *function_type_equivalence=nullptr) const
Definition: object.cc:21550
bool IsTopTypeForSubtyping() const
Definition: object.cc:21396
virtual AbstractTypePtr Canonicalize(Thread *thread) const
Definition: object.cc:21240
bool IsObjectType() const
Definition: object.h:9201
static bool InstantiateAndTestSubtype(AbstractType *subtype, AbstractType *supertype, const TypeArguments &instantiator_type_args, const TypeArguments &function_type_args)
Definition: object.cc:4287
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:954
virtual intptr_t InputCount() const
Definition: il.h:7516
bool is_generic() const
Definition: il.h:7526
bool has_instantiator_type_args() const
Definition: il.h:7523
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:935
RecordShape shape() const
Definition: il.h:7629
intptr_t num_fields() const
Definition: il.h:7675
RecordShape shape() const
Definition: il.h:7674
virtual intptr_t InputCount() const
Definition: il.h:7677
classid_t class_id() const
Definition: il.h:7889
AllocateUninitializedContextInstr(const InstructionSource &source, intptr_t num_context_variables, intptr_t deopt_id)
Definition: il.cc:925
Value * dst_type() const
Definition: il.h:4423
static bool ParseKind(const char *str, Kind *out)
Definition: il.cc:829
const String & dst_name() const
Definition: il.h:4430
Value * value() const
Definition: il.h:4422
virtual intptr_t statistics_tag() const
Definition: il.cc:6020
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3073
virtual Value * RedefinedValue() const
Definition: il.cc:549
Value * function_type_arguments() const
Definition: il.h:4427
static const char * KindToCString(Kind kind)
Definition: il.cc:816
Value * instantiator_type_arguments() const
Definition: il.h:4424
Value * value() const
Definition: il.h:4491
virtual Value * RedefinedValue() const
Definition: il.cc:553
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3057
Value * function_type_arguments() const
Definition: il.h:4340
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:1064
Value * super_type() const
Definition: il.h:4342
Value * sub_type() const
Definition: il.h:4341
Value * instantiator_type_arguments() const
Definition: il.h:4337
void Add(const T &value)
const T & At(intptr_t index) const
intptr_t length() const
void SetAt(intptr_t index, const T &t)
Token::Kind op_kind() const
Definition: il.h:9038
Value * right() const
Definition: il.h:9036
virtual PRINT_OPERANDS_TO_SUPPORT Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:2209
virtual Representation representation() const
Definition: il.h:9044
Value * left() const
Definition: il.h:9035
virtual intptr_t DeoptimizationTarget() const
Definition: il.h:9055
static const BinaryFeedback * CreateMonomorphic(Zone *zone, intptr_t receiver_cid, intptr_t argument_cid)
Definition: il.cc:4110
BinaryFeedback(Zone *zone)
Definition: il.h:838
static const BinaryFeedback * Create(Zone *zone, const ICData &ic_data)
Definition: il.cc:4094
virtual bool ComputeCanDeoptimize() const
Definition: il.cc:2061
static bool IsSupported(Token::Kind op_kind, Value *left, Value *right)
Definition: il.h:9497
void set_can_overflow(bool overflow)
Definition: il.h:9401
bool can_overflow() const
Definition: il.h:9400
bool RightIsNonZero() const
Definition: il.cc:2116
Value * right() const
Definition: il.h:9398
Token::Kind op_kind() const
Definition: il.h:9396
Value * left() const
Definition: il.h:9397
virtual bool AttributesEqual(const Instruction &other) const
Definition: il.cc:1113
bool RightIsPowerOfTwoConstant() const
Definition: il.cc:2125
static BinaryIntegerOpInstr * Make(Representation representation, Token::Kind op_kind, Value *left, Value *right, intptr_t deopt_id, SpeculativeMode speculative_mode=kGuardInputs)
Definition: il.cc:2293
bool is_truncating() const
Definition: il.h:9406
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:2409
virtual bool ComputeCanDeoptimize() const
Definition: il.cc:2086
Range * right_range() const
Definition: il.h:9473
void Add(intptr_t i)
Definition: bit_vector.h:63
bool Contains(intptr_t i) const
Definition: bit_vector.h:91
BlockEntryInstr * dominator() const
Definition: il.h:1670
intptr_t NestingDepth() const
Definition: il.cc:1838
void set_preorder_number(intptr_t number)
Definition: il.h:1656
bool FindOsrEntryAndRelink(GraphEntryInstr *graph_entry, Instruction *parent, BitVector *block_marks)
Definition: il.cc:1756
virtual void ClearPredecessors()=0
ParallelMoveInstr * parallel_move() const
Definition: il.h:1689
intptr_t preorder_number() const
Definition: il.h:1655
bool HasParallelMove() const
Definition: il.h:1691
intptr_t block_id() const
Definition: il.h:1661
BlockEntryInstr * ImmediateDominator() const
Definition: il.cc:1826
virtual void AddPredecessor(BlockEntryInstr *predecessor)=0
bool Dominates(BlockEntryInstr *other) const
Definition: il.cc:1815
void ReplaceAsPredecessorWith(BlockEntryInstr *new_block)
Definition: il.cc:1847
bool IsLoopHeader() const
Definition: il.cc:1834
void ClearAllInstructions()
Definition: il.cc:1905
void set_last_instruction(Instruction *instr)
Definition: il.h:1687
intptr_t stack_depth() const
Definition: il.h:1756
bool DiscoverBlock(BlockEntryInstr *predecessor, GrowableArray< BlockEntryInstr * > *preorder, GrowableArray< intptr_t > *parent)
Definition: il.cc:1699
Instruction * last_instruction() const
Definition: il.h:1686
GrowableArray< Definition * > * initial_definitions()
Definition: il.h:1917
static const Bool & False()
Definition: object.h:10799
static const Bool & Get(bool value)
Definition: object.h:10801
static const Bool & True()
Definition: object.h:10797
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3529
Value * value() const
Definition: il.h:7194
static bool IsBootstrapResolver(Dart_NativeEntryResolver resolver)
static void Allocate(FlowGraphCompiler *compiler, Instruction *instruction, const Class &cls, Register result, Register temp)
Definition: il.cc:6309
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition: il.cc:6283
BoxAllocationSlowPath(Instruction *instruction, const Class &cls, Register result)
static BoxInstr * Create(Representation from, Value *value)
Definition: il.cc:4007
Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3195
Value * value() const
Definition: il.h:8528
Representation from_representation() const
Definition: il.h:8529
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3273
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3258
virtual bool ValueFitsSmi() const
Definition: il.cc:3253
Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3239
virtual intptr_t SuccessorCount() const
Definition: il.cc:2010
virtual BlockEntryInstr * SuccessorAt(intptr_t index) const
Definition: il.cc:2014
void SetComparison(ComparisonInstr *comp)
Definition: il.cc:1672
ComparisonInstr * comparison() const
Definition: il.h:4021
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3654
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.cc:5947
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:5965
virtual Representation representation() const
Definition: il.h:5782
CachableIdempotentCallInstr(const InstructionSource &source, Representation representation, const Function &function, intptr_t type_args_len, const Array &argument_names, InputsArray &&arguments, intptr_t deopt_id)
Definition: il.cc:5917
virtual intptr_t ArgumentsSize() const
Definition: il.cc:5959
const Function & function() const
Definition: il.h:5746
Value * offset() const
Definition: il.h:8014
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.h:8002
intptr_t index_scale() const
Definition: il.h:8015
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3793
StringPtr target_name() const
Definition: object.h:2372
ArrayPtr arguments_descriptor() const
Definition: object.h:2373
void Print() const
Definition: il.cc:4258
CallTargets(Zone *zone)
Definition: il.h:782
static const CallTargets * CreateMonomorphic(Zone *zone, intptr_t receiver_cid, const Function &target)
Definition: il.cc:4118
const Function & MostPopularTarget() const
Definition: il.cc:5519
static const CallTargets * Create(Zone *zone, const ICData &ic_data)
Definition: il.cc:4129
TargetInfo * TargetAt(int i) const
Definition: il.h:796
bool HasSingleTarget() const
Definition: il.cc:5505
static const CallTargets * CreateAndExpand(Zone *zone, const ICData &ic_data)
Definition: il.cc:4137
intptr_t AggregateCallCount() const
Definition: il.cc:5528
const Function & FirstTarget() const
Definition: il.cc:5513
StaticTypeExactnessState MonomorphicExactness() const
Definition: il.cc:811
bool HasSingleRecognizedTarget() const
Definition: il.cc:5500
static constexpr Register kSecondReturnReg
static constexpr RegList kVolatileXmmRegisters
static constexpr intptr_t kVolatileCpuRegisters
static constexpr Register kFirstNonArgumentRegister
static constexpr Register kFfiAnyNonAbiRegister
static constexpr Register kReturnReg
static constexpr Register kSecondNonArgumentRegister
const RuntimeEntry & TargetFunction() const
Definition: il.cc:1101
static intptr_t LengthOffsetFor(intptr_t class_id)
Definition: il.cc:6772
static bool IsFixedLengthArrayType(intptr_t class_id)
Definition: il.cc:6762
virtual Value * RedefinedValue() const
Definition: il.cc:557
Value * index() const
Definition: il.h:10797
Value * length() const
Definition: il.h:10796
bool IsRedundant(bool use_loops=false)
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:6766
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3820
Value * value() const
Definition: il.h:10755
bool IsDeoptIfNull() const
Definition: il.cc:863
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr, TemplateInstruction, FIELD_LIST) private void EmitBitTest(FlowGraphCompiler *compiler, intptr_t min, intptr_t max, intptr_t mask, compiler::Label *deopt)
Value * value() const
Definition: il.h:10594
void EmitNullCheck(FlowGraphCompiler *compiler, compiler::Label *deopt)
CheckClassInstr(Value *value, intptr_t deopt_id, const Cids &cids, const InstructionSource &source)
Definition: il.cc:840
const Cids & cids() const
Definition: il.h:10596
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3800
bool IsNullCheck() const
Definition: il.h:10600
virtual bool AttributesEqual(const Instruction &other) const
Definition: il.cc:857
bool IsDeoptIfNotNull() const
Definition: il.cc:877
intptr_t ComputeCidMask() const
Definition: il.cc:903
bool IsBitTest() const
Definition: il.cc:899
static bool IsCompactCidRange(const Cids &cids)
Definition: il.cc:885
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:6732
virtual Value * InputAt(intptr_t i) const
Definition: il.h:10991
ComparisonInstr * comparison() const
Definition: il.h:10974
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3988
Value * right() const
Definition: il.h:8477
Value * left() const
Definition: il.h:8476
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Definition: il.h:10706
const String & function_name() const
Definition: il.h:10703
static void AddMetadataForRuntimeCall(CheckNullInstr *check_null, FlowGraphCompiler *compiler)
Definition: il.cc:6278
Value * value() const
Definition: il.h:10701
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3996
virtual Value * RedefinedValue() const
Definition: il.cc:565
virtual bool AttributesEqual(const Instruction &other) const
Definition: il.cc:4000
ExceptionType exception_type() const
Definition: il.h:10704
Value * value() const
Definition: il.h:10654
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3984
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:2653
virtual Value * RedefinedValue() const
Definition: il.cc:561
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:6792
Value * value() const
Definition: il.h:10931
bool MustInclude(intptr_t cid)
Definition: il.cc:162
CidCheckerForRanges(Thread *thread, ClassTable *table, const Class &cls, bool include_abstract, bool exclude_null)
Definition: il.cc:136
bool MayInclude(intptr_t cid)
Definition: il.cc:149
Definition: il.h:736
Cids(Zone *zone)
Definition: il.h:738
void Sort(int compare(CidRange *const *a, CidRange *const *b))
Definition: il.h:764
bool HasClassId(intptr_t cid) const
Definition: il.cc:682
static Cids * CreateMonomorphic(Zone *zone, intptr_t cid)
Definition: il.cc:691
intptr_t MonomorphicReceiverCid() const
Definition: il.cc:806
static Cids * CreateForArgument(Zone *zone, const BinaryFeedback &binary_feedback, int argument_number)
Definition: il.cc:697
void SetLength(intptr_t len)
Definition: il.h:760
intptr_t ComputeLowestCid() const
Definition: il.cc:666
intptr_t length() const
Definition: il.h:758
intptr_t ComputeHighestCid() const
Definition: il.cc:674
GrowableArray< CidRange * > cid_ranges_
Definition: il.h:774
void Add(CidRange *target)
Definition: il.h:752
bool is_empty() const
Definition: il.h:762
bool Equals(const Cids &other) const
Definition: il.cc:655
bool IsMonomorphic() const
Definition: il.cc:801
ClassPtr At(intptr_t cid) const
Definition: class_table.h:362
intptr_t NumCids() const
Definition: class_table.h:447
bool HasValidClassAt(intptr_t cid) const
Definition: class_table.h:386
bool has_dynamically_extendable_subtypes() const
Definition: object.h:2196
const char * ScrubbedNameCString() const
Definition: object.cc:2985
LibraryPtr library() const
Definition: object.h:1333
GrowableObjectArrayPtr direct_subclasses() const
Definition: object.h:1537
intptr_t id() const
Definition: object.h:1233
TypePtr RareType() const
Definition: object.cc:3036
bool is_abstract() const
Definition: object.h:1696
bool IsGeneric() const
Definition: object.h:1358
ClassPtr SuperClass(ClassTable *class_table=nullptr) const
Definition: object.cc:3665
bool IsTopLevel() const
Definition: object.cc:6121
GrowableObjectArrayPtr direct_implementors() const
Definition: object.h:1520
intptr_t NumTypeParameters(Thread *thread) const
Definition: object.cc:3555
static bool IsOptimized(CodePtr code)
Definition: object.h:6821
virtual void NegateComparison()
Definition: il.h:3880
intptr_t operation_cid() const
Definition: il.h:3878
Value * right() const
Definition: il.h:3847
virtual bool AttributesEqual(const Instruction &other) const
Definition: il.h:3885
virtual Condition EmitComparisonCode(FlowGraphCompiler *compiler, BranchLabels labels)=0
Value * left() const
Definition: il.h:3846
static CompileType FromCid(intptr_t cid)
intptr_t ToNullableCid()
bool is_nullable() const
Definition: compile_type.h:76
static CompileType Smi()
const AbstractType * ToAbstractType()
bool is_aot() const
static CompilerState & Current()
static bool IsBackgroundCompilation()
Definition: compiler.cc:298
static constexpr intptr_t kNoOSRDeoptId
Definition: compiler.h:73
virtual bool AttributesEqual(const Instruction &other) const
Definition: il.cc:1164
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:2732
const Object & value() const
Definition: il.h:4230
ConstantInstr(const Object &value)
Definition: il.h:4221
static ObjectPtr Unknown()
Value * value() const
Definition: il.h:3486
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3186
bool HasUses() const
Definition: il.h:2569
static bool IsArrayLength(Definition *def)
Definition: il.cc:585
Value * env_use_list() const
Definition: il.h:2578
void ReplaceWith(Definition *other, ForwardInstructionIterator *iterator)
Definition: il.cc:1662
Value * input_use_list() const
Definition: il.h:2575
Object & constant_value()
Definition: il.cc:525
Range * range() const
Definition: il.h:2636
void set_range(const Range &)
CompileType * Type()
Definition: il.h:2521
virtual Value * RedefinedValue() const
Definition: il.cc:541
void AddEnvUse(Value *value)
Definition: il.h:2586
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:2633
Definition * OriginalDefinitionIgnoreBoxingAndConstraints()
Definition: il.cc:569
void ReplaceUsesWith(Definition *other)
Definition: il.cc:1493
bool HasOnlyInputUse(Value *use) const
Definition: il.cc:1489
bool HasSSATemp() const
Definition: il.h:2508
void AddInputUse(Value *value)
Definition: il.h:2585
Definition * OriginalDefinition()
Definition: il.cc:532
void set_ssa_temp_index(intptr_t index)
Definition: il.h:2504
void set_input_use_list(Value *head)
Definition: il.h:2576
bool HasOnlyUse(Value *use) const
Definition: il.cc:1477
void ClearTempIndex()
Definition: il.h:2500
void ReplaceWithResult(Instruction *replacement, Definition *replacement_for_uses, ForwardInstructionIterator *iterator)
Definition: il.cc:1632
virtual bool CanReplaceWithConstant() const
Definition: il.h:2599
ValueListIterable input_uses() const
Definition: il.h:2581
intptr_t ssa_temp_index() const
Definition: il.h:2503
friend class Value
Definition: il.h:2690
void set_env_use_list(Value *head)
Definition: il.h:2579
void ClearSSATempIndex()
Definition: il.h:2509
static constexpr intptr_t kNone
Definition: deopt_id.h:27
static intptr_t ToDeoptAfter(intptr_t deopt_id)
Definition: deopt_id.h:31
virtual Representation representation() const
Definition: il.cc:5397
virtual intptr_t ArgumentsSize() const
Definition: il.cc:5387
const Function & interface_target() const
Definition: il.h:5056
static DispatchTableCallInstr * FromCall(Zone *zone, const InstanceCallBaseInstr *call, Value *cid, const Function &interface_target, const compiler::TableSelector *selector)
Definition: il.cc:5401
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.cc:5370
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:5646
const compiler::TableSelector * selector() const
Definition: il.h:5057
DispatchTableCallInstr(const InstructionSource &source, const Function &interface_target, const compiler::TableSelector *selector, InputsArray &&arguments, intptr_t type_args_len, const Array &argument_names)
Definition: il.h:5028
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:2236
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
Definition: il.cc:6544
Value * value() const
Definition: il.h:10142
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:2186
MethodRecognizer::Kind recognized_kind() const
Definition: il.h:10060
Value * value() const
Definition: il.h:10058
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:7301
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition: il.cc:6332
static DoublePtr NewCanonical(double d)
Definition: object.cc:23418
intptr_t num_temps() const
Definition: il.h:5863
virtual intptr_t InputCount() const
Definition: il.h:5855
void DeepCopyToOuter(Zone *zone, Instruction *instr, intptr_t outer_deopt_id) const
Definition: il.cc:6524
intptr_t Length() const
Definition: il.h:11712
void PushValue(Value *value)
Definition: il.cc:6457
intptr_t fixed_parameter_count() const
Definition: il.h:11730
intptr_t LazyDeoptPruneCount() const
Definition: il.h:11670
bool LazyDeoptToBeforeDeoptId() const
Definition: il.h:11674
Value * ValueAt(intptr_t ix) const
Definition: il.h:11708
void DeepCopyAfterTo(Zone *zone, Instruction *instr, intptr_t argc, Definition *dead, Definition *result) const
Definition: il.cc:6498
void DeepCopyTo(Zone *zone, Instruction *instr) const
Definition: il.cc:6485
Environment * DeepCopy(Zone *zone) const
Definition: il.h:11744
Environment * outer() const
Definition: il.h:11699
bool IsHoisted() const
Definition: il.h:11686
static Environment * From(Zone *zone, const GrowableArray< Definition * > &definitions, intptr_t fixed_parameter_count, intptr_t lazy_deopt_pruning_count, const ParsedFunction &parsed_function)
Definition: il.cc:6443
Environment(FlowGraphDeserializer *d)
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
Definition: il.cc:6550
EqualityCompareInstr(const InstructionSource &source, Token::Kind kind, Value *left, Value *right, intptr_t cid, intptr_t deopt_id, bool null_aware=false, SpeculativeMode speculative_mode=kGuardInputs)
Definition: il.h:5316
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3752
bool is_null_aware() const
Definition: il.h:5341
void set_null_aware(bool value)
Definition: il.h:5342
static int64_t TruncateTo(int64_t v, Representation r)
Definition: evaluator.cc:81
static IntegerPtr BitLengthEvaluate(const Object &value, Representation representation, Thread *thread)
Definition: evaluator.cc:164
static IntegerPtr BinaryIntegerEvaluate(const Object &left, const Object &right, Token::Kind token_kind, bool is_truncating, Representation representation, Thread *thread)
Definition: evaluator.cc:99
static bool ToIntegerConstant(Value *value, int64_t *result)
Definition: evaluator.cc:281
static intptr_t GetResultCidOfListFactory(Zone *zone, const Function &function, intptr_t argument_count)
intptr_t CompoundReturnTypedDataIndex() const
Definition: il.h:6105
void EmitReturnMoves(FlowGraphCompiler *compiler, const Register temp0, const Register temp1)
Definition: il.cc:7690
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.cc:7408
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(FfiCallInstr, VariadicDefinition, FIELD_LIST) private void EmitParamMoves(FlowGraphCompiler *compiler, const Register saved_fp, const Register temp0, const Register temp1)
Definition: il.cc:7478
virtual Representation representation() const
Definition: il.cc:8095
intptr_t TargetAddressIndex() const
Definition: il.h:6100
bool is_final() const
Definition: object.h:4442
ClassPtr Owner() const
Definition: object.cc:11860
bool IsOriginal() const
Definition: object.h:4418
bool is_nullable() const
Definition: object.cc:11770
@ kUnknownFixedLength
Definition: object.h:4728
StaticTypeExactnessState static_type_exactness_state() const
Definition: object.h:4633
intptr_t guarded_cid() const
Definition: object.cc:11749
intptr_t guarded_list_length() const
Definition: object.cc:12101
AbstractTypePtr type() const
Definition: object.h:4550
bool is_shared() const
Definition: object.h:4493
static Float32x4Ptr New(float value0, float value1, float value2, float value3, Heap::Space space=Heap::kNew)
Definition: object.cc:25307
static Float64x2Ptr New(double value0, double value1, Heap::Space space=Heap::kNew)
Definition: object.cc:25475
Value * value() const
Definition: il.h:10183
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:2201
static bool LookupMethodFor(int class_id, const String &name, const ArgumentsDescriptor &args_desc, Function *fn_return, bool *class_is_abstract_return=nullptr)
static const CallTargets * ResolveCallTargetsForReceiverCid(intptr_t cid, const String &selector, const Array &args_desc_array)
ForwardInstructionIterator * current_iterator_
Definition: il.h:11859
virtual void VisitBlocks()
Definition: il.cc:1376
ConstantInstr * GetConstant(const Object &object, Representation representation=kTagged)
Definition: flow_graph.cc:187
bool should_print() const
Definition: flow_graph.h:503
bool IsCompiledForOsr() const
Definition: flow_graph.h:460
ConstantInstr * constant_dead() const
Definition: flow_graph.h:272
Zone * zone() const
Definition: flow_graph.h:261
static Representation ReturnRepresentationOf(const Function &function)
Definition: flow_graph.cc:125
bool should_remove_all_bounds_checks() const
Definition: flow_graph.h:510
Thread * thread() const
Definition: flow_graph.h:260
static intptr_t ComputeArgumentsSizeInWords(const Function &function, intptr_t arguments_count)
Definition: flow_graph.cc:96
static Representation ParameterRepresentationAt(const Function &function, intptr_t index)
Definition: flow_graph.cc:109
ConstantInstr * constant_null() const
Definition: flow_graph.h:270
const Function & function() const
Definition: flow_graph.h:130
bool is_licm_allowed() const
Definition: flow_graph.h:404
bool unmatched_representations_allowed() const
Definition: flow_graph.h:411
Definition * TryCreateConstantReplacementFor(Definition *op, const Object &value)
Definition: flow_graph.cc:236
bool ExtractExternalUntaggedPayload(Instruction *instr, Value *array, classid_t cid)
Definition: flow_graph.cc:2393
void CopyDeoptTarget(Instruction *to, Instruction *from)
Definition: flow_graph.h:395
void InsertBefore(Instruction *next, Instruction *instr, Environment *env, UseKind use_kind)
Definition: flow_graph.h:312
void InsertAfter(Instruction *prev, Instruction *instr, Environment *env, UseKind use_kind)
Definition: flow_graph.cc:273
Instruction * Current() const
Definition: il.h:1853
MethodRecognizer::Kind recognized_kind() const
Definition: object.h:3619
AbstractTypePtr result_type() const
Definition: object.h:3099
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.h:10888
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Definition: il.h:10902
virtual Representation representation() const
Definition: il.h:10884
JoinEntryInstr * successor() const
Definition: il.h:3713
virtual intptr_t SuccessorCount() const
Definition: il.cc:2021
virtual BlockEntryInstr * SuccessorAt(intptr_t index) const
Definition: il.cc:2025
void RelinkToOsrEntry(Zone *zone, intptr_t max_block_id)
Definition: il.cc:1749
bool IsCompiledForOsr() const
Definition: il.cc:1257
FunctionEntryInstr * normal_entry() const
Definition: il.h:2001
FunctionEntryInstr * unchecked_entry() const
Definition: il.h:2002
void set_unchecked_entry(FunctionEntryInstr *target)
Definition: il.h:2004
void set_normal_entry(FunctionEntryInstr *entry)
Definition: il.h:2003
virtual BlockEntryInstr * SuccessorAt(intptr_t index) const
Definition: il.cc:1994
intptr_t osr_id() const
Definition: il.h:1978
CatchBlockEntryInstr * GetCatchEntry(intptr_t index)
Definition: il.cc:1248
ConstantInstr * constant_null()
Definition: il.cc:1238
void set_osr_entry(OsrEntryInstr *entry)
Definition: il.h:2008
virtual intptr_t SuccessorCount() const
Definition: il.cc:1988
GraphEntryInstr(const ParsedFunction &parsed_function, intptr_t osr_id)
Definition: il.cc:1217
OsrEntryInstr * osr_entry() const
Definition: il.h:2007
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3927
virtual bool AttributesEqual(const Instruction &other) const
Definition: il.cc:1052
const Field & field() const
Definition: il.h:6520
Value * value() const
Definition: il.h:6518
virtual bool AttributesEqual(const Instruction &other) const
Definition: il.cc:1056
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3945
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3979
virtual bool AttributesEqual(const Instruction &other) const
Definition: il.cc:1060
@ kNew
Definition: heap.h:38
@ kOld
Definition: heap.h:39
bool InstanceOfHasClassRange(const AbstractType &type, intptr_t *lower_limit, intptr_t *upper_limit)
Definition: il.cc:412
bool CanUseGenericSubtypeRangeCheckFor(const AbstractType &type)
Definition: il.cc:343
const CidRangeVector & SubtypeRangesForClass(const Class &klass, bool include_abstract, bool exclude_null)
Definition: il.cc:110
bool CanUseRecordSubtypeRangeCheckFor(const AbstractType &type)
Definition: il.cc:395
bool CanUseSubtypeRangeCheckFor(const AbstractType &type)
Definition: il.cc:301
intptr_t NumArgsTested() const
Definition: object.cc:16471
void GetClassIdsAt(intptr_t index, GrowableArray< intptr_t > *class_ids) const
Definition: object.cc:16973
intptr_t GetCountAt(intptr_t index) const
Definition: object.cc:17063
intptr_t NumberOfChecks() const
Definition: object.cc:16577
static bool Supports(ComparisonInstr *comparison, Value *v1, Value *v2)
Definition: il.cc:6613
ComparisonInstr * comparison() const
Definition: il.h:5483
void ComputeOffsetTable(FlowGraphCompiler *compiler)
Definition: il.cc:4496
virtual intptr_t SuccessorCount() const
Definition: il.h:3818
virtual TargetEntryInstr * SuccessorAt(intptr_t index) const
Definition: il.h:3819
Value * offset() const
Definition: il.h:3829
void set_ic_data(const ICData *value)
Definition: il.h:4722
FunctionPtr ResolveForReceiverClass(const Class &cls, bool allow_add=true)
Definition: il.cc:5336
bool CanReceiverBeSmiBasedOnInterfaceTarget(Zone *zone) const
Definition: il.cc:5182
Code::EntryKind entry_kind() const
Definition: il.h:4759
const ICData * ic_data() const
Definition: il.h:4716
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.cc:5198
void set_receiver_is_not_smi(bool value)
Definition: il.h:4768
const Function & interface_target() const
Definition: il.h:4726
bool HasICData() const
Definition: il.h:4717
Token::Kind token_kind() const
Definition: il.h:4725
virtual intptr_t ArgumentsSize() const
Definition: il.cc:5211
bool receiver_is_not_smi() const
Definition: il.h:4767
void UpdateReceiverSminess(Zone *zone)
Definition: il.cc:5225
virtual Representation representation() const
Definition: il.cc:5221
const String & function_name() const
Definition: il.h:4724
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InstanceCallInstr, InstanceCallBaseInstr, FIELD_LIST) private const class BinaryFeedback * binary_
Definition: il.h:4916
const class BinaryFeedback & BinaryFeedback()
Definition: il.cc:5358
PRINT_OPERANDS_TO_SUPPORT bool MatchesCoreName(const String &name)
Definition: il.cc:5332
const CallTargets & Targets()
Definition: il.cc:5345
void EnsureICData(FlowGraph *graph)
Definition: il.cc:5240
intptr_t checked_argument_count() const
Definition: il.h:4878
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:5608
Value * type_arguments() const
Definition: il.h:8308
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3173
Value * function_type_arguments() const
Definition: il.h:8307
const Code & GetStub() const
Definition: il.h:8348
const AbstractType & type() const
Definition: il.h:8256
virtual bool AttributesEqual(const Instruction &other) const
Definition: il.h:1331
virtual void Accept(InstructionVisitor *visitor)=0
Instruction * next() const
Definition: il.h:1093
virtual intptr_t InputCount() const =0
intptr_t GetDeoptId() const
Definition: il.h:1409
void set_previous(Instruction *instr)
Definition: il.h:1088
void SetEnvironment(Environment *deopt_env)
Definition: il.cc:1272
void InheritDeoptTargetAfter(FlowGraph *flow_graph, Definition *call, Definition *result)
Definition: il.cc:1558
void LinkTo(Instruction *next)
Definition: il.h:1108
void InheritDeoptTarget(Zone *zone, Instruction *other)
Definition: il.cc:1569
virtual Value * InputAt(intptr_t i) const =0
void Goto(JoinEntryInstr *entry)
Definition: il.cc:2030
virtual BlockEntryInstr * SuccessorAt(intptr_t index) const
Definition: il.cc:1981
virtual BlockEntryInstr * GetBlock()
Definition: il.cc:1352
virtual void CopyDeoptIdFrom(const Instruction &instr)
Definition: il.h:1411
Environment * env() const
Definition: il.h:1215
virtual LocationSummary * MakeLocationSummary(Zone *zone, bool is_optimizing) const =0
friend class StrictCompareInstr
Definition: il.h:1406
@ kGuardInputs
Definition: il.h:972
@ kNotSpeculative
Definition: il.h:975
void RemoveEnvironment()
Definition: il.cc:1282
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition: il.h:1213
bool HasUnmatchedInputRepresentations() const
Definition: il.cc:1609
const char * ToCString() const
Definition: il_printer.cc:1683
virtual uword Hash() const
Definition: il.cc:610
InputsIterable inputs()
Definition: il.h:1033
Instruction * AppendInstruction(Instruction *tail)
Definition: il.cc:1341
void InitializeLocationSummary(Zone *zone, bool optimizing)
Definition: il.h:1202
void CheckField(const Field &field) const
Definition: il.h:1153
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.h:1241
void UnuseAllInputs()
Definition: il.cc:1534
virtual bool MayHaveVisibleEffect() const
Definition: il.h:1352
virtual intptr_t ArgumentCount() const
Definition: il.h:1041
void set_next(Instruction *instr)
Definition: il.h:1094
static const intptr_t kInstructionAttrs[kNumInstructions]
Definition: il.h:968
bool IsDominatedBy(Instruction *dom)
Definition: il.cc:1581
bool Equals(const Instruction &other) const
Definition: il.cc:619
static const ICData * GetICData(const ZoneGrowableArray< const ICData * > &ic_data_array, intptr_t deopt_id, bool is_static_call)
Definition: il.cc:595
Definition * ArgumentAt(intptr_t index) const
Definition: il.h:3441
void Unsupported(FlowGraphCompiler *compiler)
Definition: il.cc:628
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:2629
virtual Representation representation() const
Definition: il.h:1260
bool CanDeoptimize() const
Definition: il.h:1079
void RepairArgumentUsesInEnvironment() const
Definition: il.cc:1543
void ClearEnv()
Definition: il.h:1364
LocationSummary * locs()
Definition: il.h:1192
void ReplaceInEnvironment(Definition *current, Definition *replacement)
Definition: il.cc:1289
Location::Kind RegisterKindForResult() const
Definition: il.h:1384
virtual Tag tag() const =0
void SetInputAt(intptr_t i, Value *value)
Definition: il.h:1014
InstructionSource source() const
Definition: il.h:1008
Value * ArgumentValueAt(intptr_t index) const
Definition: il.h:3435
virtual bool has_inlining_id() const
Definition: il.h:1317
intptr_t deopt_id() const
Definition: il.h:993
void InsertAfter(Instruction *prev)
Definition: il.cc:1325
virtual intptr_t SuccessorCount() const
Definition: il.cc:1977
bool CanEliminate()
Definition: il.h:1360
Instruction * RemoveFromGraph(bool return_previous=true)
Definition: il.cc:1301
SpeculativeMode SpeculativeModeOfInputs() const
Definition: il.h:1245
virtual MoveArgumentsArray * GetMoveArguments() const
Definition: il.h:1050
virtual bool CanTriggerGC() const
Definition: il.cc:1628
Instruction * previous() const
Definition: il.h:1087
static LocationSummary * MakeCallSummary(Zone *zone, const Instruction *instr, LocationSummary *locs=nullptr)
static Int32x4Ptr New(int32_t value0, int32_t value1, int32_t value2, int32_t value3, Heap::Space space=Heap::kNew)
Definition: object.cc:25391
Value * value() const
Definition: il.h:11044
IntConverterInstr(Representation from, Representation to, Value *value, intptr_t deopt_id)
Definition: il.h:11020
virtual Representation representation() const
Definition: il.h:11056
bool is_truncating() const
Definition: il.h:11048
virtual bool ComputeCanDeoptimize() const
Definition: il.cc:2034
Representation to() const
Definition: il.h:11047
Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3447
Representation from() const
Definition: il.h:11046
static IntegerPtr New(const String &str, Heap::Space space=Heap::kNew)
Definition: object.cc:22984
static IntegerPtr NewCanonical(const String &str)
Definition: object.cc:22999
InvokeMathCFunctionInstr(InputsArray &&inputs, intptr_t deopt_id, MethodRecognizer::Kind recognized_kind, const InstructionSource &source)
Definition: il.cc:7185
const RuntimeEntry & TargetFunction() const
Definition: il.cc:7223
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:7263
static intptr_t ArgumentCountFor(MethodRecognizer::Kind recognized_kind_)
Definition: il.cc:7196
intptr_t optimization_counter_threshold() const
Definition: isolate.h:306
ObjectStore * object_store() const
Definition: isolate.h:510
static IsolateGroup * Current()
Definition: isolate.h:539
ClassTable * class_table() const
Definition: isolate.h:496
void RemoveDeadPhis(Definition *replacement)
Definition: il.cc:1952
PhiInstr * InsertPhi(intptr_t var_index, intptr_t var_count)
Definition: il.cc:1918
virtual void AddPredecessor(BlockEntryInstr *predecessor)
Definition: il.cc:1422
intptr_t IndexOfPredecessor(BlockEntryInstr *pred) const
Definition: il.cc:1439
GrowableArray< BlockEntryInstr * > predecessors_
Definition: il.h:2107
void RemovePhi(PhiInstr *phi)
Definition: il.cc:1941
virtual intptr_t PredecessorCount() const
Definition: il.h:2065
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.h:6210
intptr_t TargetAddressIndex() const
Definition: il.h:6198
void EmitParamMoves(FlowGraphCompiler *compiler, Register saved_fp, Register temp0)
Definition: il.cc:8191
LocationSummary * MakeLocationSummaryInternal(Zone *zone, const RegList temps) const
Definition: il.cc:8113
virtual bool MayCreateUnsafeUntaggedPointer() const
Definition: il.h:6218
static LeafRuntimeCallInstr * Make(Zone *zone, Representation return_representation, const ZoneGrowableArray< Representation > &argument_representations, InputsArray &&inputs)
Definition: il.cc:8175
Dart_NativeEntryResolver native_entry_resolver() const
Definition: object.h:5229
static bool IsPrivateCoreLibName(const String &name, const String &member)
Definition: object.cc:14680
Value * object() const
Definition: il.h:8066
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3809
virtual Representation representation() const
Definition: il.h:8060
bool IsImmutableLengthLoad() const
Definition: il.h:8187
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:2868
void set_loads_inner_pointer(InnerPointerAccess value)
Definition: il.h:8149
const Slot & slot() const
Definition: il.h:8144
virtual bool MayCreateUnsafeUntaggedPointer() const
Definition: il.cc:2851
bool IsImmutableLoad() const
Definition: il.h:8177
static bool IsUnmodifiableTypedDataViewFactory(const Function &function)
Definition: il.cc:2707
InnerPointerAccess loads_inner_pointer() const
Definition: il.h:8146
bool Evaluate(const Object &instance_value, Object *result)
Definition: il.cc:2864
static bool IsFixedLengthArrayCid(intptr_t cid)
Definition: il.cc:2668
Value * instance() const
Definition: il.h:8143
virtual bool AttributesEqual(const Instruction &other) const
Definition: il.cc:1121
virtual Representation representation() const
Definition: il.cc:921
static bool TryEvaluateLoad(const Object &instance, const Field &field, Object *result)
Definition: il.cc:2812
static bool IsTypedDataViewFactory(const Function &function)
Definition: il.cc:2683
bool MayCreateUntaggedAlias() const
Definition: il.cc:2835
intptr_t class_id() const
Definition: il.h:6803
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:6849
static Representation ReturnRepresentation(intptr_t array_cid)
Definition: il.cc:6867
LoadIndexedInstr(Value *array, Value *index, bool index_unboxed, intptr_t index_scale, intptr_t class_id, AlignmentType alignment, intptr_t deopt_id, const InstructionSource &source, CompileType *result_type=nullptr)
Definition: il.cc:6824
Value * array() const
Definition: il.h:6800
intptr_t index_scale() const
Definition: il.h:6802
Value * index() const
Definition: il.h:6801
virtual bool AllowsCSE() const
Definition: il.h:6685
const Field & field() const
Definition: il.h:6683
virtual bool AttributesEqual(const Instruction &other) const
Definition: il.cc:1127
intptr_t offset() const
Definition: il.h:7946
virtual Representation representation() const
Definition: il.h:7936
Location temp(intptr_t index) const
Definition: locations.h:882
Location out(intptr_t index) const
Definition: locations.h:903
static LocationSummary * Make(Zone *zone, intptr_t input_count, Location out, ContainsCall contains_call)
Definition: locations.cc:187
void set_temp(intptr_t index, Location loc)
Definition: locations.h:894
intptr_t temp_count() const
Definition: locations.h:880
RegisterSet * live_registers()
Definition: locations.h:941
void set_out(intptr_t index, Location loc)
Definition: locations.cc:232
Location in(intptr_t index) const
Definition: locations.h:866
void set_in(intptr_t index, Location loc)
Definition: locations.cc:205
static Location NoLocation()
Definition: locations.h:387
static Location SameAsFirstInput()
Definition: locations.h:382
bool IsRegister() const
Definition: locations.h:402
static Location Pair(Location first, Location second)
Definition: locations.cc:271
intptr_t ToStackSlotOffset() const
Definition: locations.cc:369
Register reg() const
Definition: locations.h:404
const char * ToCString() const
Definition: locations.cc:445
intptr_t stack_index() const
Definition: locations.h:485
Location Copy() const
Definition: locations.cc:468
bool IsConstant() const
Definition: locations.h:292
Register base_reg() const
Definition: locations.h:480
static Location RegisterLocation(Register reg)
Definition: locations.h:398
static Location Any()
Definition: locations.h:352
PairLocation * AsPairLocation() const
Definition: locations.cc:280
static Location RequiresRegister()
Definition: locations.h:365
bool IsPairLocation() const
Definition: locations.h:316
static Location RequiresFpuRegister()
Definition: locations.h:369
bool IsStackSlot() const
Definition: locations.h:456
FpuRegister fpu_reg() const
Definition: locations.h:416
const Object & constant() const
Definition: locations.cc:373
static Location Constant(const ConstantInstr *obj, int pair_index=0)
Definition: locations.h:294
BlockEntryInstr * header() const
Definition: loops.h:252
intptr_t NestingDepth() const
Definition: loops.cc:1062
void RemapRegisters(intptr_t *cpu_reg_slots, intptr_t *fpu_reg_slots)
Definition: il.cc:4970
const Location & LocationAt(intptr_t i)
Definition: il.h:7730
intptr_t result_cid() const
Definition: il.h:8972
MethodRecognizer::Kind op_kind() const
Definition: il.h:8967
virtual bool AttributesEqual(const Instruction &other) const
Definition: il.cc:1106
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
Value * length() const
Definition: il.h:3211
Value * dest() const
Definition: il.h:3208
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:6954
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MemoryCopyInstr, TemplateInstruction, FIELD_LIST) private void EmitUnrolledCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, intptr_t num_elements, bool reversed)
Definition: il.cc:7101
Value * src_start() const
Definition: il.h:3209
bool can_overlap() const
Definition: il.h:3217
Value * src() const
Definition: il.h:3207
void EmitLoopCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, Register length_reg, compiler::Label *done, compiler::Label *copy_forwards=nullptr)
void PrepareLengthRegForLoop(FlowGraphCompiler *compiler, Register length_reg, compiler::Label *done)
Value * dest_start() const
Definition: il.h:3210
static intptr_t ResultCidFromPragma(const Object &function_or_field)
static intptr_t NumArgsCheckedForStaticCall(const Function &function)
static const char * KindToCString(Kind kind)
static intptr_t ParameterCountForResolution(const Function &function)
void set_is_bootstrap_native(bool value)
Definition: il.h:6056
void SetupNative()
Definition: il.cc:7347
const String & native_name() const
Definition: il.h:6022
virtual TokenPosition token_pos() const
Definition: il.h:6028
void set_is_auto_scope(bool value)
Definition: il.h:6057
bool is_bootstrap_native() const
Definition: il.h:6025
const Function & function() const
Definition: il.h:6023
bool link_lazily() const
Definition: il.h:6027
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(NativeEntryInstr, FunctionEntryInstr, FIELD_LIST) private void SaveArgument(FlowGraphCompiler *compiler, const compiler::ffi::NativeLocation &loc) const
Definition: il.cc:4420
static NativeFunction ResolveNative(const Library &library, const String &function_name, int number_of_arguments, bool *auto_setup_scope)
Definition: native_entry.cc:37
virtual Representation representation() const
Definition: il.h:3005
virtual PRINT_OPERANDS_TO_SUPPORT Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.h:3554
static ObjectPtr null()
Definition: object.h:433
intptr_t GetClassId() const
Definition: object.h:341
ObjectPtr ptr() const
Definition: object.h:332
bool Contains(uword addr) const
Definition: object.h:762
bool InVMIsolateHeap() const
Definition: object.h:395
bool IsCanonical() const
Definition: object.h:335
bool IsOld() const
Definition: object.h:391
virtual const char * ToCString() const
Definition: object.h:366
bool IsNull() const
Definition: object.h:363
static Object & Handle()
Definition: object.h:407
static Object & ZoneHandle()
Definition: object.h:419
Location At(intptr_t i) const
Definition: locations.h:618
bool IsRedundant() const
Definition: il.cc:4929
const Function & function() const
Definition: parser.h:73
JoinEntryInstr * block() const
Definition: il.h:2817
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:6693
bool is_alive() const
Definition: il.h:2827
Definition * GetReplacementForRedundantPhi() const
Definition: il.cc:6640
bool IsRedundant() const
Definition: il.cc:6630
bool Done() const
Definition: il.h:2121
PhiInstr * Current() const
Definition: il.h:2125
void RemoveCurrentFromGraph()
Definition: il.cc:6725
virtual intptr_t CallCount() const
Definition: il.cc:5549
bool IsSureToCallSingleRecognizedTarget() const
Definition: il.cc:5669
static TypePtr ComputeRuntimeType(const CallTargets &targets)
Definition: il.cc:5568
virtual Definition * Canonicalize(FlowGraph *graph)
Definition: il.cc:5652
bool HasOnlyDispatcherOrImplicitAccessorTargets() const
Definition: il.cc:5536
const CallTargets & targets() const
Definition: il.h:4953
static RangeBoundary FromConstant(int64_t val)
static bool IsSingleton(Range *range)
static bool Overlaps(Range *range, intptr_t min, intptr_t max)
static bool Fits(Range *range, RangeBoundary::RangeSize size)
static bool IsWithin(const Range *range, int64_t min, int64_t max)
static bool IsPositive(Range *range)
static bool CanBeZero(Range *range)
bool IsWithin(int64_t min_int, int64_t max_int) const
static bool IsUnknown(const Range *other)
bool Fits(RangeBoundary::RangeSize size) const
bool Overlaps(int64_t min_int, int64_t max_int) const
intptr_t catch_try_index() const
Definition: il.h:3651
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3190
AbstractTypePtr FieldTypeAt(intptr_t index) const
Definition: object.cc:27380
intptr_t NumFields() const
Definition: object.h:13392
intptr_t num_fields() const
Definition: object.h:11425
ObjectPtr FieldAt(intptr_t field_index) const
Definition: object.h:11433
virtual Value * RedefinedValue() const
Definition: il.cc:545
Value * value() const
Definition: il.h:4109
CompileType * constrained_type() const
Definition: il.h:4117
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:2637
void Remove(Location loc)
Definition: locations.h:766
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
Definition: il.cc:6557
RelationalOpInstr(const InstructionSource &source, Token::Kind kind, Value *left, Value *right, intptr_t cid, intptr_t deopt_id, SpeculativeMode speculative_mode=kGuardInputs)
Definition: il.h:5382
static void MessageF(Kind kind, const Script &script, TokenPosition token_pos, bool report_after_token, const char *format,...) PRINTF_ATTRIBUTE(5
Definition: report.cc:123
static constexpr bool AtLocation
Definition: report.h:29
static FunctionPtr ResolveDynamicAnyArgs(Zone *zone, const Class &receiver_class, const String &function_name, bool allow_add)
Definition: resolver.cc:185
static FunctionPtr ResolveDynamicForReceiverClass(const Class &receiver_class, const String &function_name, const ArgumentsDescriptor &args_desc, bool allow_add)
Definition: resolver.cc:148
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ShiftIntegerOpInstr, BinaryIntegerOpInstr, FIELD_LIST) protected bool IsShiftCountInRange(int64_t max=kShiftCountLimit) const
Definition: il.cc:2112
Range * shift_range() const
Definition: il.h:9655
intptr_t mask() const
Definition: il.h:11359
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.cc:8456
virtual intptr_t InputCount() const
Definition: il.cc:8448
virtual Value * InputAt(intptr_t i) const
Definition: il.h:11353
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:8465
static SimdOpInstr * CreateFromCall(Zone *zone, MethodRecognizer::Kind kind, Definition *receiver, Instruction *call, intptr_t mask=0)
Definition: il.cc:8259
Kind kind() const
Definition: il.h:11358
static SimdOpInstr * CreateFromFactoryCall(Zone *zone, MethodRecognizer::Kind kind, Instruction *call)
Definition: il.cc:8313
static Kind KindForMethod(MethodRecognizer::Kind method_kind)
Definition: il.cc:8380
static Kind KindForOperator(MethodRecognizer::Kind kind)
Definition: il.cc:8234
virtual Representation representation() const
Definition: il.cc:8452
Kind kind() const
Definition: slot.h:502
bool IsDartField() const
Definition: slot.h:503
const Field & field() const
Definition: slot.h:540
Representation representation() const
Definition: slot.h:519
intptr_t offset_in_bytes() const
Definition: slot.h:513
bool is_compressed() const
Definition: slot.h:529
compiler::Label * entry_label()
compiler::Label * exit_label()
static SmiPtr New(intptr_t value)
Definition: object.h:10006
intptr_t Value() const
Definition: object.h:9990
static intptr_t RawValue(intptr_t value)
Definition: object.h:10022
friend class Class
Definition: object.h:10047
static bool IsValid(int64_t value)
Definition: object.h:10026
const ICData * ic_data() const
Definition: il.h:5604
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.cc:5452
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StaticCallInstr, TemplateDartCall, FIELD_LIST) private const class BinaryFeedback * binary_
Definition: il.h:5718
bool Evaluate(FlowGraph *flow_graph, const Object &argument, Object *result)
Definition: il.cc:5820
virtual intptr_t ArgumentsSize() const
Definition: il.cc:5465
static StaticCallInstr * FromCall(Zone *zone, const C *call, const Function &target, intptr_t call_count)
Definition: il.h:5584
const Function & function() const
Definition: il.h:5623
const class BinaryFeedback & BinaryFeedback()
Definition: il.cc:5488
void SetResultType(Zone *zone, CompileType new_type)
Definition: il.h:5648
void set_is_known_list_constructor(bool value)
Definition: il.h:5662
Code::EntryKind entry_kind() const
Definition: il.h:5666
virtual Representation representation() const
Definition: il.cc:5471
bool InitResultType(Zone *zone)
Definition: il.cc:5674
const CallTargets & Targets()
Definition: il.cc:5475
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:5771
bool HasICData() const
Definition: il.h:5605
static StaticTypeExactnessState NotTracking()
InnerPointerAccess stores_inner_pointer() const
Definition: il.h:6450
virtual Representation RequiredInputRepresentation(intptr_t index) const
Definition: il.cc:1018
void set_stores_inner_pointer(InnerPointerAccess value)
Definition: il.h:6453
bool ShouldEmitStoreBarrier() const
Definition: il.h:6429
bool is_initialization() const
Definition: il.h:6427
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:1027
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StoreFieldInstr, TemplateInstruction, FIELD_LIST) private intptr_t OffsetInBytes() const
Definition: il.h:6498
compiler::Assembler::CanBeSmi CanValueBeSmi() const
Definition: il.h:6500
Value * value() const
Definition: il.h:6424
Value * instance() const
Definition: il.h:6422
const Slot & slot() const
Definition: il.h:6423
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.cc:6925
StoreIndexedInstr(Value *array, Value *index, Value *value, StoreBarrierType emit_store_barrier, bool index_unboxed, intptr_t index_scale, intptr_t class_id, AlignmentType alignment, intptr_t deopt_id, const InstructionSource &source, SpeculativeMode speculative_mode=kGuardInputs)
Definition: il.cc:6872
Value * value() const
Definition: il.h:7083
Value * array() const
Definition: il.h:7081
intptr_t class_id() const
Definition: il.h:7086
static Representation ValueRepresentation(intptr_t array_cid)
Definition: il.cc:6920
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:6901
intptr_t index_scale() const
Definition: il.h:7085
Value * index() const
Definition: il.h:7082
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StrictCompareInstr, TemplateComparison, FIELD_LIST) private bool TryEmitBoolTest(FlowGraphCompiler *compiler, BranchLabels labels, intptr_t input_index, const Object &obj, Condition *condition_out)
Definition: il.cc:5076
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3734
bool AttributesEqual(const Instruction &other) const
Definition: il.cc:1094
bool needs_number_check() const
Definition: il.h:5125
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
Definition: il.cc:6564
bool Equals(const String &str) const
Definition: object.h:13337
static StringPtr New(const char *cstr, Heap::Space space=Heap::kNew)
Definition: object.cc:23698
static CodePtr GetAllocationStubForClass(const Class &cls)
Definition: stub_code.cc:174
static CodePtr GetAllocationStubForTypedData(classid_t class_id)
Definition: stub_code.cc:279
SubtypeFinder(Zone *zone, GrowableArray< intptr_t > *cids, bool include_abstract)
Definition: il.cc:67
void ScanImplementorClasses(const Class &klass)
Definition: il.cc:75
bool has_type_args() const
Definition: il.h:11505
intptr_t resume_deopt_id() const
Definition: il.h:11515
StubId stub_id() const
Definition: il.h:11514
Value * type_args() const
Definition: il.h:11509
virtual PRINT_OPERANDS_TO_SUPPORT Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:8562
Value * operand() const
Definition: il.h:11508
static const String & True()
Definition: symbols.h:693
static const String & False()
Definition: symbols.h:689
static StringPtr FromConcatAll(Thread *thread, const GrowableHandlePtrArray< const String > &strs)
Definition: symbols.cc:262
static StringPtr New(Thread *thread, const char *cstr)
Definition: symbols.h:723
static bool double_truncate_round_supported()
Definition: cpu_arm.h:72
virtual Value * InputAt(intptr_t i) const
Definition: il.h:3959
intptr_t type_args_len() const
Definition: il.h:4614
const Array & argument_names() const
Definition: il.h:4615
intptr_t ArgumentCount() const
Definition: il.h:4586
intptr_t FirstArgIndex() const
Definition: il.h:4576
intptr_t ArgumentCountWithoutTypeArgs() const
Definition: il.h:4578
Value * Receiver() const
Definition: il.h:4577
ArrayPtr GetArgumentsDescriptor() const
Definition: il.h:4617
bool calls_initializer() const
Definition: il.h:6623
bool throw_exception_on_initialization() const
Definition: il.h:6626
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Definition: il.h:6631
Value * value() const
Definition: il.h:5250
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3852
virtual bool AttributesEqual(const Instruction &other) const
Definition: il.cc:6588
TestCidsInstr(const InstructionSource &source, Token::Kind kind, Value *value, const ZoneGrowableArray< intptr_t > &cid_results, intptr_t deopt_id)
Definition: il.cc:3831
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
Definition: il.cc:6576
const ZoneGrowableArray< intptr_t > & cid_results() const
Definition: il.h:5234
TestIntInstr(const InstructionSource &source, Token::Kind kind, Representation representation, Value *left, Value *right)
Definition: il.h:5160
static bool IsSupported(Representation representation)
Definition: il.h:5188
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
Definition: il.cc:6570
uword upper() const
Definition: il.h:5281
TestRangeInstr(const InstructionSource &source, Value *value, uword lower, uword upper, Representation value_representation)
Definition: il.cc:3879
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
Definition: il.cc:6582
virtual bool AttributesEqual(const Instruction &other) const
Definition: il.cc:6604
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3895
uword lower() const
Definition: il.h:5280
Value * value() const
Definition: il.h:5291
IsolateGroup * isolate_group() const
Zone * zone() const
Definition: thread_state.h:37
static Thread * Current()
Definition: thread.h:362
IsolateGroup * isolate_group() const
Definition: thread.h:541
static Token::Kind NegateComparison(Token::Kind op)
Definition: token.h:322
static intptr_t OutputIndexOf(Token::Kind token)
Definition: il.cc:7330
TruncDivModInstr(Value *lhs, Value *rhs, intptr_t deopt_id)
Definition: il.cc:7324
intptr_t Length() const
Definition: object.cc:7294
AbstractTypePtr TypeAt(intptr_t index) const
Definition: object.cc:7308
static TypePtr IntType()
static TypePtr Double()
static TypePtr StringType()
static TypePtr NullableNumber()
static TypePtr DartTypeType()
intptr_t Length() const
Definition: object.h:11518
intptr_t ElementSizeInBytes() const
Definition: object.h:11531
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:2393
Value * value() const
Definition: il.h:9240
static UnaryIntegerOpInstr * Make(Representation representation, Token::Kind op_kind, Value *value, intptr_t deopt_id, SpeculativeMode speculative_mode, Range *range)
Definition: il.cc:2257
Token::Kind op_kind() const
Definition: il.h:9241
Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3312
virtual Representation representation() const
Definition: il.h:8703
Value * value() const
Definition: il.h:8678
void set_speculative_mode(SpeculativeMode value)
Definition: il.h:8741
virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const
Definition: il.h:8699
static UnboxInstr * Create(Representation to, Value *value, intptr_t deopt_id, SpeculativeMode speculative_mode=kGuardInputs)
Definition: il.cc:4043
bool is_truncating() const
Definition: il.h:8772
virtual bool ComputeCanDeoptimize() const
Definition: il.cc:2040
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3376
intptr_t lane() const
Definition: il.h:10416
Value * value() const
Definition: il.h:10409
Definition * Canonicalize(FlowGraph *flow_graph)
Definition: il.cc:3243
DECLARE_ATTRIBUTES_NAMED(("value", "representation"),(&value(), representation())) private uword constant_address_
Definition: il.h:4296
UnboxedConstantInstr(const Object &value, Representation representation)
Definition: il.cc:1171
bool IsScanFlagsUnboxed() const
Definition: il.cc:7181
static bool IsInt(intptr_t N, T value)
Definition: utils.h:313
static T Abs(T x)
Definition: utils.h:49
static constexpr int CountOneBitsWord(uword x)
Definition: utils.h:176
static constexpr T Maximum(T x, T y)
Definition: utils.h:41
static constexpr int ShiftForPowerOfTwo(T x)
Definition: utils.h:81
static T Minimum(T x, T y)
Definition: utils.h:36
static constexpr bool IsPowerOfTwo(T x)
Definition: utils.h:76
bool Done() const
Definition: il.h:83
Definition: il.h:75
void BindToEnvironment(Definition *definition)
Definition: il.h:2724
void set_use_index(intptr_t index)
Definition: il.h:125
Value * Copy(Zone *zone)
Definition: il.h:134
bool IsSingleUse() const
Definition: il.h:117
bool NeedsWriteBarrier()
Definition: il.cc:1390
bool BindsToConstantNull() const
Definition: il.cc:1196
bool BindsToConstant() const
Definition: il.cc:1183
void set_previous_use(Value *previous)
Definition: il.h:112
bool CanBe(const Object &value)
Definition: il.h:11887
static void AddToList(Value *value, Value **list)
Definition: il.cc:1446
bool Equals(const Value &other) const
Definition: il.cc:633
intptr_t BoundSmiConstant() const
Definition: il.cc:1212
bool BindsToSmiConstant() const
Definition: il.cc:1208
Instruction * instruction() const
Definition: il.h:121
void set_next_use(Value *next)
Definition: il.h:115
Value * previous_use() const
Definition: il.h:111
const Object & BoundConstant() const
Definition: il.cc:1201
Value * next_use() const
Definition: il.h:114
void set_definition(Definition *definition)
Definition: il.h:104
Value * CopyWithType(Zone *zone)
Definition: il.h:138
void RemoveFromUseList()
Definition: il.cc:1457
Definition * definition() const
Definition: il.h:103
void BindTo(Definition *definition)
Definition: il.h:2718
CompileType * Type()
Value(Definition *definition)
Definition: il.h:95
void RefineReachingType(CompileType *type)
void set_instruction(Instruction *instruction)
Definition: il.h:122
intptr_t InputCount() const
Definition: il.h:2794
Value * InputAt(intptr_t i) const
Definition: il.h:2795
ElementType * Alloc(intptr_t length)
void static bool EmittingComments()
const NativeLocation & Rebase(const NativeLocation &loc) const
Definition: frame_rebase.cc:13
const NativeLocations & locations() const
static const NativeCallingConvention & FromSignature(Zone *zone, const NativeFunctionType &signature)
static const NativeFunctionType * FromRepresentations(Zone *zone, Representation return_representation, const ZoneGrowableArray< Representation > &argument_representations)
Definition: native_type.cc:637
virtual bool IsPointerToMemory() const
const MultipleNativeLocations & AsMultiple() const
const NativeRegistersLocation & AsRegisters() const
const PointerToMemoryLocation & AsPointerToMemory() const
NativeLocation & WidenTo4Bytes(Zone *zone) const
const BothNativeLocations & AsBoth() const
const NativeType & payload_type() const
const NativeLocation & pointer_location() const
static word element_offset(intptr_t index)
static word OffsetOf(const dart::Field &field)
static intptr_t field_index_at_offset(intptr_t offset_in_bytes)
static word shared_field_table_values_offset()
#define THR_Print(format,...)
Definition: log.h:20
Dart_NativeFunction(* Dart_NativeEntryResolver)(Dart_Handle name, int num_of_arguments, bool *auto_setup_scope)
Definition: dart_api.h:3234
#define UNIMPLEMENTED
#define ASSERT(E)
static int square(int x)
Definition: etc1.cpp:302
VkInstance instance
Definition: main.cc:48
SkBitmap source
Definition: examples.cpp:28
static bool b
struct MyStruct a[10]
#define FATAL(error)
gboolean invert
glong glong end
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
uint8_t value
GAsyncResult * result
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
int argument_count
Definition: fuchsia.cc:52
#define HANDLESCOPE(thread)
Definition: handles.h:321
static float max(float r, float g, float b)
Definition: hsl.cpp:49
static float min(float r, float g, float b)
Definition: hsl.cpp:48
#define __
Definition: il.cc:4271
#define DEFINE_ACCEPT(ShortName, Attrs)
Definition: il.cc:1263
#define INSTR_ATTRS(type, attrs)
#define BOXING_IN_SET_CASE(unboxed, boxed)
Definition: il.cc:449
#define FOR_EACH_NON_INT_BOXED_REPRESENTATION(M)
Definition: il.cc:442
#define CASE_BINARY_OP(Arity, Mask, Name, Args, Result)
#define BOXING_CID_CASE(unboxed, boxed)
Definition: il.cc:455
#define Z
Definition: il.cc:7430
#define R(r)
#define KIND_CASE(name)
#define CASE(Arity, Mask, Name, Args, Result)
#define BOXING_VALUE_OFFSET_CASE(unboxed, boxed)
Definition: il.cc:452
#define CASE_METHOD(Arity, Mask, Name,...)
#define FOR_EACH_INSTRUCTION(M)
Definition: il.h:405
#define SIMD_OP_LIST(M, BINARY_OP)
Definition: il.h:11249
size_t length
double y
double x
SK_API sk_sp< SkDocument > Make(SkWStream *dst, const SkSerialProcs *=nullptr, std::function< void(const SkPicture *)> onEndPage=nullptr)
bool Contains(const Container &container, const Value &value)
string converter
Definition: cacheimages.py:19
Definition: copy.py:1
const intptr_t kResultIndex
Definition: marshaller.h:28
word ToRawSmi(const dart::Object &a)
Definition: runtime_api.cc:960
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
static constexpr word kBitsPerWord
Definition: runtime_api.h:291
constexpr intptr_t kSmiBits
Definition: runtime_api.h:301
constexpr OperandSize kWordBytes
Definition: dart_vm.cc:33
static constexpr int kExitLinkSlotFromEntryFp
static AbstractTypePtr InstantiateType(const AbstractType &type, const AbstractType &instantiator)
Definition: mirrors.cc:614
bool IsTypedDataViewClassId(intptr_t index)
Definition: class_id.h:439
Location LocationRegisterOrConstant(Value *value)
Definition: locations.cc:289
static int64_t RepresentationMask(Representation r)
Definition: il.cc:2149
static Definition * CanonicalizeStrictCompare(StrictCompareInstr *compare, bool *negated, bool is_branch)
Definition: il.cc:3562
@ TIMES_16
const Register THR
const char *const name
uword FindDoubleConstant(double value)
IntegerPtr DoubleToInteger(Zone *zone, double val)
static Condition InvertCondition(Condition c)
const intptr_t kSmiMax
Definition: globals.h:28
static Definition * CanonicalizeCommutativeDoubleArithmetic(Token::Kind op, Value *left, Value *right)
Definition: il.cc:2154
static bool BindsToGivenConstant(Value *v, intptr_t expected)
Definition: il.cc:3614
bool IsTypedDataBaseClassId(intptr_t index)
Definition: class_id.h:429
static constexpr const char * kNone
Definition: run_vm_tests.cc:43
static const Representation kUnboxedBool
Definition: il.cc:8416
static constexpr Representation kUnboxedUword
Definition: locations.h:171
static bool MayBeNumber(CompileType *type)
Definition: il.cc:3547
static const SimdOpInfo simd_op_information[]
Definition: il.cc:8430
static bool MayBeBoxableNumber(intptr_t cid)
Definition: il.cc:3543
constexpr int32_t kMinInt32
Definition: globals.h:482
const Register kWriteBarrierValueReg
DART_EXPORT bool IsNull(Dart_Handle object)
bool IsTypeClassId(intptr_t index)
Definition: class_id.h:370
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
Definition: hash.h:12
constexpr intptr_t kIntptrMin
Definition: globals.h:556
uint16_t RegList
static constexpr int kSavedCallerFpSlotFromFp
StoreBarrierType
Definition: il.h:6301
bool IsUnmodifiableTypedDataViewClassId(intptr_t index)
Definition: class_id.h:453
@ kIllegalCid
Definition: class_id.h:214
@ kNullCid
Definition: class_id.h:252
@ kVoidCid
Definition: class_id.h:254
@ kDynamicCid
Definition: class_id.h:253
@ kNeverCid
Definition: class_id.h:255
Representation
Definition: locations.h:66
constexpr intptr_t kBitsPerByte
Definition: globals.h:463
MallocGrowableArray< CidRangeValue > CidRangeVector
Definition: il.h:253
static int OrderByFrequencyThenId(CidRange *const *a, CidRange *const *b)
Definition: il.cc:644
uintptr_t uword
Definition: globals.h:501
@ UNSIGNED_GREATER
@ kInvalidCondition
@ UNSIGNED_GREATER_EQUAL
@ NOT_EQUAL
@ UNSIGNED_LESS_EQUAL
void RegisterTypeArgumentsUse(const Function &function, TypeUsageInfo *type_usage_info, const Class &klass, Definition *type_arguments)
const Register TMP2
static int OrderById(CidRange *const *a, CidRange *const *b)
Definition: il.cc:637
static bool RecognizeTestPattern(Value *left, Value *right, bool *negate)
Definition: il.cc:3620
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
@ kNoRegister
Definition: constants_arm.h:99
static bool IsMarked(BlockEntryInstr *block, GrowableArray< BlockEntryInstr * > *preorder)
Definition: il.cc:1688
Location LocationRemapForSlowPath(Location loc, Definition *def, intptr_t *cpu_reg_slots, intptr_t *fpu_reg_slots)
Definition: locations.cc:492
static AlignmentType StrengthenAlignment(intptr_t cid, AlignmentType alignment)
Definition: il.cc:6806
static constexpr int kCallerSpSlotFromFp
static Definition * CanonicalizeStringInterpolate(StaticCallInstr *call, FlowGraph *flow_graph)
Definition: il.cc:5709
bool IsExternalPayloadClassId(classid_t cid)
Definition: class_id.h:472
constexpr intptr_t kInt32Size
Definition: globals.h:450
const Register TMP
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const Register FPREG
constexpr intptr_t kBitsPerInt32
Definition: globals.h:466
const intptr_t cid
static Definition * CanonicalizeStringInterpolateSingle(StaticCallInstr *call, FlowGraph *flow_graph)
Definition: il.cc:5760
static bool IsFpCompare(ComparisonInstr *comp)
Definition: il.cc:3522
DEFINE_BACKEND(LoadThread,(Register out))
Definition: il.cc:8109
uint32_t FinalizeHash(uint32_t hash, intptr_t hashbits=kBitsPerInt32)
Definition: hash.h:20
static constexpr Representation kUnboxedAddress
Definition: locations.h:182
static bool IsSingleUseUnboxOrConstant(Value *use)
Definition: il.cc:3747
static intptr_t RepresentationBits(Representation r)
Definition: il.cc:2134
constexpr int32_t kMaxInt32
Definition: globals.h:483
static const String & EvaluateToString(Zone *zone, Definition *defn)
Definition: il.cc:5692
static bool IsConstant(Definition *def, int64_t *val)
Definition: loops.cc:123
static const intptr_t kMaxElementSizeForEfficientCopy
Definition: il.cc:6950
QRegister FpuRegister
bool IsIntegerClassId(intptr_t index)
Definition: class_id.h:340
constexpr bool FLAG_target_memory_sanitizer
Definition: flags.h:174
static constexpr Representation SimdRepresentation(Representation rep)
Definition: il.cc:8408
const char *const function_name
static int8_t data[kExtLength]
void(* NativeFunction)(NativeArguments *arguments)
static intptr_t Usage(Thread *thread, const Function &function)
Definition: il.cc:729
@ kHeapObjectTag
@ kSmiTagShift
const intptr_t kSmiMin
Definition: globals.h:29
static ScaleFactor ToScaleFactor(intptr_t index_scale, bool index_unboxed)
Definition: constants.h:95
static bool IsCommutative(Token::Kind op)
Definition: il.cc:2240
static constexpr intptr_t kInvalidTryIndex
Location LocationRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
Definition: locations.cc:297
ArrayOfTuplesView< MegamorphicCache::EntryType, std::tuple< Smi, Object > > MegamorphicCacheEntries
Definition: object.h:13561
constexpr intptr_t kBitsPerInt64
Definition: globals.h:467
const Register SPREG
bool IsExternalTypedDataClassId(intptr_t index)
Definition: class_id.h:447
AlignmentType
Definition: il.h:6764
@ kAlignedAccess
Definition: il.h:6766
static FunctionPtr FindBinarySmiOp(Zone *zone, const String &name)
Definition: il.cc:5234
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
constexpr intptr_t kIntptrMax
Definition: globals.h:557
bool IsStringClassId(intptr_t index)
Definition: class_id.h:350
static bool AllInputsAreRedefinitions(PhiInstr *phi)
Definition: il.cc:6684
@ kAllFree
Definition: object.h:2940
static CodePtr TwoArgsSmiOpInlineCacheEntry(Token::Kind kind)
Definition: il.cc:5166
DECLARE_FLAG(bool, show_invisible_frames)
Definition: dom.py:1
def call(args)
Definition: dom.py:159
Definition: __init__.py:1
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
Definition: switches.h:191
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
dst
Definition: cp.py:12
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
Definition: run.py:1
def run(cmd)
Definition: run.py:14
Definition: ref_ptr.h:256
dest
Definition: zip.py:79
SkScalar w
#define FALL_THROUGH
Definition: globals.h:15
#define Pd
Definition: globals.h:408
static DecodeResult decode(std::string path)
Definition: png_codec.cpp:124
int compare(const void *untyped_lhs, const void *untyped_rhs)
Definition: skdiff.h:161
static SkString join(const CommandLineFlags::StringArray &)
Definition: skpbench.cpp:741
const Scalar scale
SeparatedVector2 offset
static constexpr Register kResultReg
static constexpr Register kFunctionReg
static constexpr Register kContextReg
static constexpr Register kResultReg
static constexpr Register kInstantiatorTypeArgsReg
static constexpr Register kShapeReg
static constexpr Register kResultReg
static constexpr Register kResultReg
static constexpr Register kShapeReg
static constexpr Register kValue2Reg
static constexpr Register kValue0Reg
static constexpr Register kValue1Reg
static constexpr Register kLengthReg
static constexpr Register kResultReg
static constexpr Register kObjectReg
static constexpr Register kSubTypeReg
static constexpr Register kSuperTypeReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kDstNameReg
static bool RequiresAllocation(Representation rep)
Definition: il.cc:470
static bool Supports(Representation rep)
Definition: il.cc:459
static constexpr Representation NativeRepresentation(Representation rep)
Definition: il.h:8504
static intptr_t BoxCid(Representation rep)
Definition: il.cc:493
static intptr_t ValueOffset(Representation rep)
Definition: il.cc:478
intptr_t cid_end
Definition: il.h:250
bool IsIllegalRange() const
Definition: il.h:241
intptr_t cid_start
Definition: il.h:249
intptr_t cid_start
Definition: il.h:220
intptr_t cid_end
Definition: il.h:221
static constexpr Register kSourceReg
static constexpr Register kClassIdReg
static constexpr Register kResultReg
static constexpr Register kRecognizedKindReg
static constexpr FpuRegister kInputReg
static constexpr Register kArgsReg
static constexpr Register kFieldReg
static constexpr Register kResultReg
static constexpr Register kInstanceReg
static constexpr Register kResultReg
static constexpr Register kFieldReg
static constexpr Register kTypeArgsReg
static constexpr Register kFunctionTypeArgumentsReg
Definition: constants.h:38
static constexpr Register kTypeReg
Definition: constants.h:34
static constexpr Register kInstantiatorTypeArgumentsReg
Definition: constants.h:36
static constexpr Register kResultTypeReg
Definition: constants.h:40
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kUninstantiatedTypeArgumentsReg
static constexpr Register kResultTypeArgumentsReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kFieldReg
static constexpr Register kLengthReg
static constexpr Register kIndexReg
static constexpr Register kStackTraceReg
static constexpr Register kExceptionReg
static constexpr size_t ValueSize(Representation rep)
Definition: locations.h:112
static constexpr bool IsUnboxedInteger(Representation rep)
Definition: locations.h:92
static bool IsRepresentable(Representation rep, int64_t value)
Definition: locations.cc:72
static int64_t MaxValue(Representation rep)
Definition: locations.cc:62
static compiler::OperandSize OperandSize(Representation rep)
Definition: locations.cc:16
static int64_t MinValue(Representation rep)
Definition: locations.cc:49
static constexpr bool IsUnboxed(Representation rep)
Definition: locations.h:101
static const char * ToCString(Representation rep)
Definition: locations.cc:129
static Representation RepresentationOfArrayElement(classid_t cid)
Definition: locations.cc:79
uint8_t arity
Definition: il.cc:8402
Representation inputs[4]
Definition: il.cc:8405
Representation output
Definition: il.cc:8404
bool has_mask
Definition: il.cc:8403
static constexpr intptr_t kResumePcDistance
static constexpr Register kArgumentReg
static constexpr Register kTypeArgsReg
const Function * target
Definition: il.h:727
StaticTypeExactnessState exactness
Definition: il.h:729
intptr_t count
Definition: il.h:728
static constexpr Register kExceptionReg