Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
il.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6
7#include "platform/assert.h"
8#include "platform/globals.h"
9#include "vm/bit_vector.h"
10#include "vm/bootstrap.h"
11#include "vm/code_entry_kind.h"
34#include "vm/constants.h"
35#include "vm/cpu.h"
36#include "vm/dart_entry.h"
37#include "vm/object.h"
38#include "vm/object_store.h"
39#include "vm/os.h"
41#include "vm/resolver.h"
42#include "vm/runtime_entry.h"
43#include "vm/scopes.h"
44#include "vm/stack_frame.h"
45#include "vm/stub_code.h"
46#include "vm/symbols.h"
48
50
51namespace dart {
52
54 propagate_ic_data,
55 true,
56 "Propagate IC data from unoptimized to optimized IC calls.");
58 two_args_smi_icd,
59 true,
60 "Generate special IC stubs for two args Smi operations");
61
62DECLARE_FLAG(bool, inline_alloc);
63DECLARE_FLAG(bool, use_slow_path);
64
66 public:
69 bool include_abstract)
70 : array_handles_(zone),
71 class_handles_(zone),
72 cids_(cids),
73 include_abstract_(include_abstract) {}
74
75 void ScanImplementorClasses(const Class& klass) {
76 // An implementor of [klass] is
77 // * the [klass] itself.
78 // * all implementors of the direct subclasses of [klass].
79 // * all implementors of the direct implementors of [klass].
80 if (include_abstract_ || !klass.is_abstract()) {
81 cids_->Add(klass.id());
82 }
83
84 ScopedHandle<GrowableObjectArray> array(&array_handles_);
85 ScopedHandle<Class> subclass_or_implementor(&class_handles_);
86
87 *array = klass.direct_subclasses();
88 if (!array->IsNull()) {
89 for (intptr_t i = 0; i < array->Length(); ++i) {
90 *subclass_or_implementor ^= (*array).At(i);
91 ScanImplementorClasses(*subclass_or_implementor);
92 }
93 }
94 *array = klass.direct_implementors();
95 if (!array->IsNull()) {
96 for (intptr_t i = 0; i < array->Length(); ++i) {
97 *subclass_or_implementor ^= (*array).At(i);
98 ScanImplementorClasses(*subclass_or_implementor);
99 }
100 }
101 }
102
103 private:
105 ReusableHandleStack<Class> class_handles_;
107 const bool include_abstract_;
108};
109
111 const Class& klass,
112 bool include_abstract,
113 bool exclude_null) {
115 const intptr_t cid_count = table->NumCids();
116 std::unique_ptr<CidRangeVector[]>* cid_ranges = nullptr;
117 if (include_abstract) {
118 cid_ranges = exclude_null ? &cid_subtype_ranges_abstract_nonnullable_
119 : &cid_subtype_ranges_abstract_nullable_;
120 } else {
121 cid_ranges = exclude_null ? &cid_subtype_ranges_nonnullable_
122 : &cid_subtype_ranges_nullable_;
123 }
124 if (*cid_ranges == nullptr) {
125 cid_ranges->reset(new CidRangeVector[cid_count]);
126 }
127 CidRangeVector& ranges = (*cid_ranges)[klass.id()];
128 if (ranges.length() == 0) {
129 BuildRangesFor(table, &ranges, klass, include_abstract, exclude_null);
130 }
131 return ranges;
132}
133
135 public:
138 const Class& cls,
139 bool include_abstract,
140 bool exclude_null)
141 : thread_(thread),
142 table_(table),
143 supertype_(AbstractType::Handle(zone(), cls.RareType())),
144 include_abstract_(include_abstract),
145 exclude_null_(exclude_null),
146 to_check_(Class::Handle(zone())),
147 subtype_(AbstractType::Handle(zone())) {}
148
149 bool MayInclude(intptr_t cid) {
150 if (!table_->HasValidClassAt(cid)) return true;
151 if (cid == kTypeArgumentsCid) return true;
152 if (cid == kVoidCid) return true;
153 if (cid == kDynamicCid) return true;
154 if (cid == kNeverCid) return true;
155 if (!exclude_null_ && cid == kNullCid) return true;
156 to_check_ = table_->At(cid);
157 ASSERT(!to_check_.IsNull());
158 if (!include_abstract_ && to_check_.is_abstract()) return true;
159 return to_check_.IsTopLevel();
160 }
161
162 bool MustInclude(intptr_t cid) {
164 if (cid == kNullCid) return false;
165 to_check_ = table_->At(cid);
166 subtype_ = to_check_.RareType();
167 // Create local zone because deep hierarchies may allocate lots of handles.
168 StackZone stack_zone(thread_);
169 HANDLESCOPE(thread_);
170 return subtype_.IsSubtypeOf(supertype_, Heap::kNew);
171 }
172
173 private:
174 Zone* zone() const { return thread_->zone(); }
175
176 Thread* const thread_;
177 ClassTable* const table_;
178 const AbstractType& supertype_;
179 const bool include_abstract_;
180 const bool exclude_null_;
181 Class& to_check_;
182 AbstractType& subtype_;
183};
184
185// Build the ranges either for:
186// "<obj> as <Type>", or
187// "<obj> is <Type>"
188void HierarchyInfo::BuildRangesUsingClassTableFor(ClassTable* table,
189 CidRangeVector* ranges,
190 const Class& klass,
191 bool include_abstract,
192 bool exclude_null) {
193 CidCheckerForRanges checker(thread(), table, klass, include_abstract,
194 exclude_null);
195 // Iterate over all cids to find the ones to be included in the ranges.
196 const intptr_t cid_count = table->NumCids();
197 intptr_t start = -1;
198 intptr_t end = -1;
199 for (intptr_t cid = kInstanceCid; cid < cid_count; ++cid) {
200 // Some cases are "don't care", i.e., they may or may not be included,
201 // whatever yields the least number of ranges for efficiency.
202 if (checker.MayInclude(cid)) continue;
203 if (checker.MustInclude(cid)) {
204 // On success, open a new or continue any open range.
205 if (start == -1) start = cid;
206 end = cid;
207 } else if (start != -1) {
208 // On failure, close any open range from start to end
209 // (the latter is the most recent succesful "do-care" cid).
210 ranges->Add({start, end});
211 start = end = -1;
212 }
213 }
214
215 // Construct last range if there is a open one.
216 if (start != -1) {
217 ranges->Add({start, end});
218 }
219}
220
221void HierarchyInfo::BuildRangesFor(ClassTable* table,
222 CidRangeVector* ranges,
223 const Class& dst_klass,
224 bool include_abstract,
225 bool exclude_null) {
226 // Use the class table in cases where the direct subclasses and implementors
227 // are not filled out.
228 if (dst_klass.InVMIsolateHeap() || dst_klass.id() == kInstanceCid) {
229 BuildRangesUsingClassTableFor(table, ranges, dst_klass, include_abstract,
230 exclude_null);
231 return;
232 }
233
234 Zone* zone = thread()->zone();
235 GrowableArray<intptr_t> cids;
236 SubtypeFinder finder(zone, &cids, include_abstract);
237 {
238 SafepointReadRwLocker ml(thread(),
239 thread()->isolate_group()->program_lock());
240 finder.ScanImplementorClasses(dst_klass);
241 }
242 if (cids.is_empty()) return;
243
244 // Sort all collected cids.
245 intptr_t* cids_array = cids.data();
246
247 qsort(cids_array, cids.length(), sizeof(intptr_t),
248 [](const void* a, const void* b) {
249 // MSAN seems unaware of allocations inside qsort. The linker flag
250 // -fsanitize=memory should give us a MSAN-aware version of libc...
251 MSAN_UNPOISON(static_cast<const intptr_t*>(a), sizeof(intptr_t));
252 MSAN_UNPOISON(static_cast<const intptr_t*>(b), sizeof(intptr_t));
253 return static_cast<int>(*static_cast<const intptr_t*>(a) -
254 *static_cast<const intptr_t*>(b));
255 });
256
257 // Build ranges of all the cids.
258 CidCheckerForRanges checker(thread(), table, dst_klass, include_abstract,
259 exclude_null);
260 intptr_t left_cid = -1;
261 intptr_t right_cid = -1;
262 intptr_t previous_cid = -1;
263 for (intptr_t i = 0; i < cids.length(); ++i) {
264 const intptr_t current_cid = cids[i];
265 if (current_cid == previous_cid) continue; // Skip duplicates.
266
267 // We sorted, after all!
268 RELEASE_ASSERT(previous_cid < current_cid);
269
270 if (left_cid != -1) {
271 ASSERT(previous_cid != -1);
272 // Check the cids between the previous cid from cids and this one.
273 for (intptr_t j = previous_cid + 1; j < current_cid; ++j) {
274 // Stop if we find a do-care class before reaching the current cid.
275 if (!checker.MayInclude(j)) {
276 ranges->Add({left_cid, right_cid});
277 left_cid = right_cid = -1;
278 break;
279 }
280 }
281 }
282 previous_cid = current_cid;
283
284 if (checker.MayInclude(current_cid)) continue;
285 if (checker.MustInclude(current_cid)) {
286 if (left_cid == -1) {
287 // Open a new range starting at this cid.
288 left_cid = current_cid;
289 }
290 right_cid = current_cid;
291 } else if (left_cid != -1) {
292 // Close the existing range.
293 ranges->Add({left_cid, right_cid});
294 left_cid = right_cid = -1;
295 }
296 }
297
298 // If there is an open cid-range which we haven't finished yet, we'll
299 // complete it.
300 if (left_cid != -1) {
301 ranges->Add(CidRange{left_cid, right_cid});
302 }
303}
304
306 ASSERT(type.IsFinalized());
307
308 if (!type.IsInstantiated() || !type.IsType()) {
309 return false;
310 }
311
312 // The FutureOr<T> type cannot be handled by checking whether the instance is
313 // a subtype of FutureOr and then checking whether the type argument `T`
314 // matches.
315 //
316 // Instead we would need to perform multiple checks:
317 //
318 // instance is Null || instance is T || instance is Future<T>
319 //
320 if (type.IsFutureOrType()) {
321 return false;
322 }
323
324 Zone* zone = thread()->zone();
325 const Class& type_class = Class::Handle(zone, type.type_class());
326
327 // We can use class id range checks only if we don't have to test type
328 // arguments.
329 //
330 // This is e.g. true for "String" but also for "List<dynamic>". (A type for
331 // which the type arguments vector is instantiated to bounds is known as a
332 // rare type.)
333 if (type_class.IsGeneric()) {
334 const Type& rare_type = Type::Handle(zone, type_class.RareType());
335 if (!rare_type.IsSubtypeOf(type, Heap::kNew)) {
336 ASSERT(Type::Cast(type).arguments() != TypeArguments::null());
337 return false;
338 }
339 }
340
341 return true;
342}
343
345 const AbstractType& type) {
346 ASSERT(type.IsFinalized());
347
348 if (!type.IsType() || type.IsDartFunctionType()) {
349 return false;
350 }
351
352 // The FutureOr<T> type cannot be handled by checking whether the instance is
353 // a subtype of FutureOr and then checking whether the type argument `T`
354 // matches.
355 //
356 // Instead we would need to perform multiple checks:
357 //
358 // instance is Null || instance is T || instance is Future<T>
359 //
360 if (type.IsFutureOrType()) {
361 return false;
362 }
363
364 // NOTE: We do allow non-instantiated types here (in comparison to
365 // [CanUseSubtypeRangeCheckFor], since we handle type parameters in the type
366 // expression in some cases (see below).
367
368 Zone* zone = thread()->zone();
369 const Class& type_class = Class::Handle(zone, type.type_class());
370 const intptr_t num_type_parameters = type_class.NumTypeParameters();
371
372 // This function should only be called for generic classes.
373 ASSERT(type_class.NumTypeParameters() > 0 &&
374 Type::Cast(type).arguments() != TypeArguments::null());
375
376 const TypeArguments& ta =
377 TypeArguments::Handle(zone, Type::Cast(type).arguments());
378 ASSERT(ta.Length() == num_type_parameters);
379
380 // Ensure we can handle all type arguments
381 // via [CidRange]-based checks or that it is a type parameter.
382 AbstractType& type_arg = AbstractType::Handle(zone);
383 for (intptr_t i = 0; i < num_type_parameters; ++i) {
384 type_arg = ta.TypeAt(i);
385 if (!CanUseSubtypeRangeCheckFor(type_arg) && !type_arg.IsTypeParameter()) {
386 return false;
387 }
388 }
389
390 return true;
391}
392
394 ASSERT(type.IsFinalized());
395 if (!type.IsRecordType()) {
396 return false;
397 }
398 const RecordType& rec = RecordType::Cast(type);
399 Zone* zone = thread()->zone();
400 auto& field_type = AbstractType::Handle(zone);
401 for (intptr_t i = 0, n = rec.NumFields(); i < n; ++i) {
402 field_type = rec.FieldTypeAt(i);
403 if (!CanUseSubtypeRangeCheckFor(field_type)) {
404 return false;
405 }
406 }
407 return true;
408}
409
411 intptr_t* lower_limit,
412 intptr_t* upper_limit) {
413 ASSERT(CompilerState::Current().is_aot());
414 if (type.IsNullable()) {
415 // 'is' test for nullable types should accept null cid in addition to the
416 // class range. In most cases it is not possible to extend class range to
417 // include kNullCid.
418 return false;
419 }
421 const Class& type_class =
422 Class::Handle(thread()->zone(), type.type_class());
423 const CidRangeVector& ranges =
424 SubtypeRangesForClass(type_class,
425 /*include_abstract=*/false,
426 /*exclude_null=*/true);
427 if (ranges.length() == 1) {
428 const CidRangeValue& range = ranges[0];
429 ASSERT(!range.IsIllegalRange());
430 *lower_limit = range.cid_start;
431 *upper_limit = range.cid_end;
432 return true;
433 }
434 }
435 return false;
436}
437
438// The set of supported non-integer unboxed representations.
439// Format: (unboxed representations suffix, boxed class type)
440#define FOR_EACH_NON_INT_BOXED_REPRESENTATION(M) \
441 M(Double, Double) \
442 M(Float, Double) \
443 M(Float32x4, Float32x4) \
444 M(Float64x2, Float64x2) \
445 M(Int32x4, Int32x4)
446
447#define BOXING_IN_SET_CASE(unboxed, boxed) \
448 case kUnboxed##unboxed: \
449 return true;
450#define BOXING_VALUE_OFFSET_CASE(unboxed, boxed) \
451 case kUnboxed##unboxed: \
452 return compiler::target::boxed::value_offset();
453#define BOXING_CID_CASE(unboxed, boxed) \
454 case kUnboxed##unboxed: \
455 return k##boxed##Cid;
456
459 return true;
460 }
461 switch (rep) {
463 default:
464 return false;
465 }
466}
467
471 compiler::target::kSmiBits;
472 }
473 return true;
474}
475
479 RepresentationUtils::ValueSize(rep) <= sizeof(int64_t)) {
480 return compiler::target::Mint::value_offset();
481 }
482 switch (rep) {
484 default:
485 UNREACHABLE();
486 return 0;
487 }
488}
489
490// Note that not all boxes require allocation (e.g., Smis).
493 if (!Boxing::RequiresAllocation(rep)) {
494 return kSmiCid;
495 } else if (RepresentationUtils::ValueSize(rep) <= sizeof(int64_t)) {
496 return kMintCid;
497 }
498 }
499 switch (rep) {
501 default:
502 UNREACHABLE();
503 return kIllegalCid;
504 }
505}
506
507#undef BOXING_CID_CASE
508#undef BOXING_VALUE_OFFSET_CASE
509#undef BOXING_IN_SET_CASE
510#undef FOR_EACH_NON_INT_BOXED_REPRESENTATION
511
512#if defined(DEBUG)
513void Instruction::CheckField(const Field& field) const {
514 DEBUG_ASSERT(field.IsNotTemporaryScopedHandle());
516}
517#endif // DEBUG
518
519// A value in the constant propagation lattice.
520// - non-constant sentinel
521// - a constant (any non-sentinel value)
522// - unknown sentinel
524 if (constant_value_ == nullptr) {
526 }
527 return *constant_value_;
528}
529
531 Definition* defn = this;
532 Value* unwrapped;
533 while ((unwrapped = defn->RedefinedValue()) != nullptr) {
534 defn = unwrapped->definition();
535 }
536 return defn;
537}
538
540 return nullptr;
541}
542
544 return value();
545}
546
550
552 return value();
553}
554
556 return index();
557}
558
560 return value();
561}
562
564 return value();
565}
566
568 Definition* def = this;
569 while (true) {
570 Definition* orig;
571 if (def->IsConstraint() || def->IsBox() || def->IsUnbox() ||
572 def->IsIntConverter() || def->IsFloatToDouble() ||
573 def->IsDoubleToFloat()) {
574 orig = def->InputAt(0)->definition();
575 } else {
576 orig = def->OriginalDefinition();
577 }
578 if (orig == def) return def;
579 def = orig;
580 }
581}
582
584 if (def != nullptr) {
586 ->AsLoadField()) {
587 return load->IsImmutableLengthLoad();
588 }
589 }
590 return false;
591}
592
594 const ZoneGrowableArray<const ICData*>& ic_data_array,
595 intptr_t deopt_id,
596 bool is_static_call) {
597 // The deopt_id can be outside the range of the IC data array for
598 // computations added in the optimizing compiler.
600 if (deopt_id >= ic_data_array.length()) {
601 return nullptr;
602 }
603 const ICData* result = ic_data_array[deopt_id];
604 ASSERT(result == nullptr || is_static_call == result->is_static_call());
605 return result;
606}
607
609 uword result = tag();
610 for (intptr_t i = 0; i < InputCount(); ++i) {
611 Value* value = InputAt(i);
612 result = CombineHashes(result, value->definition()->ssa_temp_index());
613 }
614 return FinalizeHash(result, kBitsPerInt32 - 1);
615}
616
617bool Instruction::Equals(const Instruction& other) const {
618 if (tag() != other.tag()) return false;
619 if (InputCount() != other.InputCount()) return false;
620 for (intptr_t i = 0; i < InputCount(); ++i) {
621 if (!InputAt(i)->Equals(*other.InputAt(i))) return false;
622 }
623 return AttributesEqual(other);
624}
625
630
631bool Value::Equals(const Value& other) const {
632 return definition() == other.definition();
633}
634
635static int OrderById(CidRange* const* a, CidRange* const* b) {
636 // Negative if 'a' should sort before 'b'.
637 ASSERT((*a)->IsSingleCid());
638 ASSERT((*b)->IsSingleCid());
639 return (*a)->cid_start - (*b)->cid_start;
640}
641
642static int OrderByFrequencyThenId(CidRange* const* a, CidRange* const* b) {
643 const TargetInfo* target_info_a = static_cast<const TargetInfo*>(*a);
644 const TargetInfo* target_info_b = static_cast<const TargetInfo*>(*b);
645 // Negative if 'a' should sort before 'b'.
646 if (target_info_b->count != target_info_a->count) {
647 return (target_info_b->count - target_info_a->count);
648 } else {
649 return (*a)->cid_start - (*b)->cid_start;
650 }
651}
652
653bool Cids::Equals(const Cids& other) const {
654 if (length() != other.length()) return false;
655 for (int i = 0; i < length(); i++) {
656 if (cid_ranges_[i]->cid_start != other.cid_ranges_[i]->cid_start ||
657 cid_ranges_[i]->cid_end != other.cid_ranges_[i]->cid_end) {
658 return false;
659 }
660 }
661 return true;
662}
663
664intptr_t Cids::ComputeLowestCid() const {
665 intptr_t min = kIntptrMax;
666 for (intptr_t i = 0; i < cid_ranges_.length(); ++i) {
667 min = Utils::Minimum(min, cid_ranges_[i]->cid_start);
668 }
669 return min;
670}
671
672intptr_t Cids::ComputeHighestCid() const {
673 intptr_t max = -1;
674 for (intptr_t i = 0; i < cid_ranges_.length(); ++i) {
675 max = Utils::Maximum(max, cid_ranges_[i]->cid_end);
676 }
677 return max;
678}
679
680bool Cids::HasClassId(intptr_t cid) const {
681 for (int i = 0; i < length(); i++) {
682 if (cid_ranges_[i]->Contains(cid)) {
683 return true;
684 }
685 }
686 return false;
687}
688
690 Cids* cids = new (zone) Cids(zone);
691 cids->Add(new (zone) CidRange(cid, cid));
692 return cids;
693}
694
696 const BinaryFeedback& binary_feedback,
697 int argument_number) {
698 Cids* cids = new (zone) Cids(zone);
699 for (intptr_t i = 0; i < binary_feedback.feedback_.length(); i++) {
700 ASSERT((argument_number == 0) || (argument_number == 1));
701 const intptr_t cid = argument_number == 0
702 ? binary_feedback.feedback_[i].first
703 : binary_feedback.feedback_[i].second;
704 cids->Add(new (zone) CidRange(cid, cid));
705 }
706
707 if (cids->length() != 0) {
708 cids->Sort(OrderById);
709
710 // Merge adjacent class id ranges.
711 int dest = 0;
712 for (int src = 1; src < cids->length(); src++) {
713 if (cids->cid_ranges_[dest]->cid_end + 1 >=
714 cids->cid_ranges_[src]->cid_start) {
715 cids->cid_ranges_[dest]->cid_end = cids->cid_ranges_[src]->cid_end;
716 } else {
717 dest++;
718 if (src != dest) cids->cid_ranges_[dest] = cids->cid_ranges_[src];
719 }
720 }
721 cids->SetLength(dest + 1);
722 }
723
724 return cids;
725}
726
727static intptr_t Usage(Thread* thread, const Function& function) {
728 intptr_t count = function.usage_counter();
729 if (count < 0) {
730 if (function.HasCode()) {
731 // 'function' is queued for optimized compilation
733 } else {
734 count = 0;
735 }
736 } else if (Code::IsOptimized(function.CurrentCode())) {
737 // 'function' was optimized and stopped counting
739 }
740 return count;
741}
742
743void CallTargets::CreateHelper(Zone* zone, const ICData& ic_data) {
744 Function& dummy = Function::Handle(zone);
745
746 const intptr_t num_args_tested = ic_data.NumArgsTested();
747
748 for (int i = 0, n = ic_data.NumberOfChecks(); i < n; i++) {
749 if (ic_data.GetCountAt(i) == 0) {
750 continue;
751 }
752
753 intptr_t id = kDynamicCid;
754 if (num_args_tested == 0) {
755 } else if (num_args_tested == 1) {
756 ic_data.GetOneClassCheckAt(i, &id, &dummy);
757 } else {
758 ASSERT(num_args_tested == 2);
759 GrowableArray<intptr_t> arg_ids;
760 ic_data.GetCheckAt(i, &arg_ids, &dummy);
761 id = arg_ids[0];
762 }
763 Function& function = Function::ZoneHandle(zone, ic_data.GetTargetAt(i));
764 intptr_t count = ic_data.GetCountAt(i);
765 cid_ranges_.Add(new (zone) TargetInfo(id, id, &function, count,
766 ic_data.GetExactnessAt(i)));
767 }
768
769 if (ic_data.is_megamorphic()) {
770 ASSERT(num_args_tested == 1); // Only 1-arg ICData will turn megamorphic.
771 const String& name = String::Handle(zone, ic_data.target_name());
772 const Array& descriptor =
773 Array::Handle(zone, ic_data.arguments_descriptor());
774 Thread* thread = Thread::Current();
775
776 const auto& cache = MegamorphicCache::Handle(
777 zone, MegamorphicCacheTable::Lookup(thread, name, descriptor));
778 {
779 SafepointMutexLocker ml(thread->isolate_group()->type_feedback_mutex());
780 MegamorphicCacheEntries entries(Array::Handle(zone, cache.buckets()));
781 for (intptr_t i = 0, n = entries.Length(); i < n; i++) {
782 const intptr_t id =
783 Smi::Value(entries[i].Get<MegamorphicCache::kClassIdIndex>());
784 if (id == kIllegalCid) {
785 continue;
786 }
787 Function& function = Function::ZoneHandle(zone);
789 const intptr_t filled_entry_count = cache.filled_entry_count();
790 ASSERT(filled_entry_count > 0);
791 cid_ranges_.Add(new (zone) TargetInfo(
792 id, id, &function, Usage(thread, function) / filled_entry_count,
794 }
795 }
796 }
797}
798
800 if (length() != 1) return false;
801 return cid_ranges_[0]->IsSingleCid();
802}
803
806 return cid_ranges_[0]->cid_start;
807}
808
813
815 switch (kind) {
816#define KIND_CASE(name) \
817 case k##name: \
818 return #name;
820#undef KIND_CASE
821 default:
822 UNREACHABLE();
823 return nullptr;
824 }
825}
826
827bool AssertAssignableInstr::ParseKind(const char* str, Kind* out) {
828#define KIND_CASE(name) \
829 if (strcmp(str, #name) == 0) { \
830 *out = Kind::k##name; \
831 return true; \
832 }
834#undef KIND_CASE
835 return false;
836}
837
839 intptr_t deopt_id,
840 const Cids& cids,
842 : TemplateInstruction(source, deopt_id),
843 cids_(cids),
844 is_bit_test_(IsCompactCidRange(cids)),
845 token_pos_(source.token_pos) {
846 // Expected useful check data.
847 const intptr_t number_of_checks = cids.length();
848 ASSERT(number_of_checks > 0);
849 SetInputAt(0, value);
850 // Otherwise use CheckSmiInstr.
851 ASSERT(number_of_checks != 1 || !cids[0].IsSingleCid() ||
852 cids[0].cid_start != kSmiCid);
853}
854
856 auto const other_check = other.AsCheckClass();
857 ASSERT(other_check != nullptr);
858 return cids().Equals(other_check->cids());
859}
860
862 if (!cids().IsMonomorphic()) {
863 return false;
864 }
865 CompileType* in_type = value()->Type();
866 const intptr_t cid = cids().MonomorphicReceiverCid();
867 // Performance check: use CheckSmiInstr instead.
868 ASSERT(cid != kSmiCid);
869 return in_type->is_nullable() && (in_type->ToNullableCid() == cid);
870}
871
872// Null object is a singleton of null-class (except for some sentinel,
873// transitional temporaries). Instead of checking against the null class only
874// we can check against null instance instead.
876 if (!cids().IsMonomorphic()) {
877 return false;
878 }
879 const intptr_t cid = cids().MonomorphicReceiverCid();
880 return cid == kNullCid;
881}
882
884 const intptr_t number_of_checks = cids.length();
885 // If there are only two checks, the extra register pressure needed for the
886 // dense-cid-range code is not justified.
887 if (number_of_checks <= 2) return false;
888
889 // TODO(fschneider): Support smis in dense cid checks.
890 if (cids.HasClassId(kSmiCid)) return false;
891
892 intptr_t min = cids.ComputeLowestCid();
893 intptr_t max = cids.ComputeHighestCid();
894 return (max - min) < compiler::target::kBitsPerWord;
895}
896
898 return is_bit_test_;
899}
900
902 ASSERT(IsBitTest());
903 const uintptr_t one = 1;
904 intptr_t min = cids_.ComputeLowestCid();
905 intptr_t mask = 0;
906 for (intptr_t i = 0; i < cids_.length(); ++i) {
907 uintptr_t run;
908 uintptr_t range = one + cids_[i].Extent();
909 if (range >= static_cast<uintptr_t>(compiler::target::kBitsPerWord)) {
910 run = -1;
911 } else {
912 run = (one << range) - 1;
913 }
914 mask |= run << (cids_[i].cid_start - min);
915 }
916 return mask;
917}
918
922
925 intptr_t num_context_variables,
926 intptr_t deopt_id)
927 : TemplateAllocation(source, deopt_id),
928 num_context_variables_(num_context_variables) {
929 // This instruction is not used in AOT for code size reasons.
930 ASSERT(!CompilerState::Current().is_aot());
931}
932
934 if (!HasUses()) return nullptr;
935 // Remove AllocateContext if it is only used as an object in StoreField
936 // instructions.
937 if (env_use_list() != nullptr) return this;
938 for (auto use : input_uses()) {
939 auto store = use->instruction()->AsStoreField();
940 if ((store == nullptr) ||
941 (use->use_index() != StoreFieldInstr::kInstancePos)) {
942 return this;
943 }
944 }
945 // Cleanup all StoreField uses.
946 while (input_use_list() != nullptr) {
948 }
949 return nullptr;
950}
951
953 if (!HasUses()) return nullptr;
954 return this;
955}
956
958 bool opt) const {
959 const intptr_t kNumInputs = InputCount();
960 const intptr_t kNumTemps = 0;
961 LocationSummary* locs = new (zone)
962 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
971 }
973 return locs;
974}
975
976void AllocateClosureInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
977 auto object_store = compiler->isolate_group()->object_store();
978 Code& stub = Code::ZoneHandle(compiler->zone());
980 if (is_generic()) {
981 stub = object_store->allocate_closure_ta_generic_stub();
982 } else {
983 stub = object_store->allocate_closure_ta_stub();
984 }
985 } else {
986 if (is_generic()) {
987 stub = object_store->allocate_closure_generic_stub();
988 } else {
989 stub = object_store->allocate_closure_stub();
990 }
991 }
992 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
993 locs(), deopt_id(), env());
994}
995
996LocationSummary* AllocateTypedDataInstr::MakeLocationSummary(Zone* zone,
997 bool opt) const {
998 const intptr_t kNumInputs = 1;
999 const intptr_t kNumTemps = 0;
1000 LocationSummary* locs = new (zone)
1001 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
1004 locs->set_out(
1006 return locs;
1007}
1008
1009void AllocateTypedDataInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1010 const Code& stub = Code::ZoneHandle(
1012 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
1013 locs(), deopt_id(), env());
1014}
1015
1017 intptr_t index) const {
1018 if (index == 0) {
1019 return slot_.has_untagged_instance() ? kUntagged : kTagged;
1020 }
1021 ASSERT_EQUAL(index, 1);
1022 return slot().representation();
1023}
1024
1026 // Dart objects are allocated null-initialized, which means we can eliminate
1027 // all initializing stores which store null value.
1028 // Context objects can be allocated uninitialized as a performance
1029 // optimization in JIT mode - however in AOT mode we always allocate them
1030 // null initialized.
1031 if (is_initialization_ && !slot().has_untagged_instance() &&
1032 slot().representation() == kTagged &&
1033 (!slot().IsContextSlot() ||
1034 !instance()->definition()->IsAllocateUninitializedContext()) &&
1035 value()->BindsToConstantNull()) {
1036 return nullptr;
1037 }
1038
1039 if (slot().kind() == Slot::Kind::kPointerBase_data &&
1041 const intptr_t cid = instance()->Type()->ToNullableCid();
1042 // Pointers and ExternalTypedData objects never contain inner pointers.
1043 if (cid == kPointerCid || IsExternalTypedDataClassId(cid)) {
1045 }
1046 }
1047 return this;
1048}
1049
1051 return field().ptr() == other.AsGuardFieldClass()->field().ptr();
1052}
1053
1055 return field().ptr() == other.AsGuardFieldLength()->field().ptr();
1056}
1057
1059 return field().ptr() == other.AsGuardFieldType()->field().ptr();
1060}
1061
1063 // If all inputs needed to check instantiation are constant, instantiate the
1064 // sub and super type and remove the instruction if the subtype test succeeds.
1065 if (super_type()->BindsToConstant() && sub_type()->BindsToConstant() &&
1066 instantiator_type_arguments()->BindsToConstant() &&
1067 function_type_arguments()->BindsToConstant()) {
1068 auto Z = Thread::Current()->zone();
1069 const auto& constant_instantiator_type_args =
1071 ? TypeArguments::null_type_arguments()
1072 : TypeArguments::Cast(
1073 instantiator_type_arguments()->BoundConstant());
1074 const auto& constant_function_type_args =
1076 ? TypeArguments::null_type_arguments()
1077 : TypeArguments::Cast(function_type_arguments()->BoundConstant());
1078 auto& constant_sub_type = AbstractType::Handle(
1079 Z, AbstractType::Cast(sub_type()->BoundConstant()).ptr());
1080 auto& constant_super_type = AbstractType::Handle(
1081 Z, AbstractType::Cast(super_type()->BoundConstant()).ptr());
1082
1084 &constant_sub_type, &constant_super_type,
1085 constant_instantiator_type_args, constant_function_type_args)) {
1086 return nullptr;
1087 }
1088 }
1089 return this;
1090}
1091
1093 auto const other_op = other.AsStrictCompare();
1094 ASSERT(other_op != nullptr);
1095 return ComparisonInstr::AttributesEqual(other) &&
1096 (needs_number_check() == other_op->needs_number_check());
1097}
1098
1100 return handle_surrogates_ ? kCaseInsensitiveCompareUTF16RuntimeEntry
1101 : kCaseInsensitiveCompareUCS2RuntimeEntry;
1102}
1103
1105 auto const other_op = other.AsMathMinMax();
1106 ASSERT(other_op != nullptr);
1107 return (op_kind() == other_op->op_kind()) &&
1108 (result_cid() == other_op->result_cid());
1109}
1110
1112 ASSERT(other.tag() == tag());
1113 auto const other_op = other.AsBinaryIntegerOp();
1114 return (op_kind() == other_op->op_kind()) &&
1115 (can_overflow() == other_op->can_overflow()) &&
1116 (is_truncating() == other_op->is_truncating());
1117}
1118
1120 auto const other_load = other.AsLoadField();
1121 ASSERT(other_load != nullptr);
1122 return &this->slot_ == &other_load->slot_;
1123}
1124
1126 ASSERT(AllowsCSE());
1127 return field().ptr() == other.AsLoadStaticField()->field().ptr();
1128}
1129
1132 : TemplateDefinition(source), value_(value), token_pos_(source.token_pos) {
1133 // Check that the value is not an incorrect Integer representation.
1134 ASSERT(!value.IsMint() || !Smi::IsValid(Mint::Cast(value).AsInt64Value()));
1135 // Check that clones of fields are not stored as constants.
1136 ASSERT(!value.IsField() || Field::Cast(value).IsOriginal());
1137 // Check that all non-Smi objects are heap allocated and in old space.
1138 ASSERT(value.IsSmi() || value.IsOld());
1139#if defined(DEBUG)
1140 // Generally, instances in the flow graph should be canonical. Smis, null
1141 // values, and sentinel values are canonical by construction and so we skip
1142 // them here.
1143 if (!value.IsNull() && !value.IsSmi() && value.IsInstance() &&
1144 !value.IsCanonical() && (value.ptr() != Object::sentinel().ptr())) {
1145 // Arrays in ConstantInstrs are usually immutable and canonicalized, but
1146 // the Arrays created as backing for ArgumentsDescriptors may not be
1147 // canonicalized for space reasons when inlined in the IL. However, they
1148 // are still immutable.
1149 //
1150 // IRRegExp compilation uses TypeData non-canonical values as "constants".
1151 // Specifically, the bit tables used for certain character classes are
1152 // represented as TypedData, and so those values are also neither immutable
1153 // (as there are no immutable TypedData values) or canonical.
1154 //
1155 // LibraryPrefixes are also never canonicalized since their equality is
1156 // their identity.
1157 ASSERT(value.IsArray() || value.IsTypedData() || value.IsLibraryPrefix());
1158 }
1159#endif
1160}
1161
1163 auto const other_constant = other.AsConstant();
1164 ASSERT(other_constant != nullptr);
1165 return (value().ptr() == other_constant->value().ptr() &&
1166 representation() == other_constant->representation());
1167}
1168
1170 Representation representation)
1172 representation_(representation),
1173 constant_address_(0) {
1174 if (representation_ == kUnboxedDouble) {
1175 ASSERT(value.IsDouble());
1177 }
1178}
1179
1180// Returns true if the value represents a constant.
1182 return definition()->OriginalDefinition()->IsConstant();
1183}
1184
1185bool Value::BindsToConstant(ConstantInstr** constant_defn) const {
1186 if (auto constant = definition()->OriginalDefinition()->AsConstant()) {
1187 *constant_defn = constant;
1188 return true;
1189 }
1190 return false;
1191}
1192
1193// Returns true if the value represents constant null.
1195 ConstantInstr* constant = definition()->OriginalDefinition()->AsConstant();
1196 return (constant != nullptr) && constant->value().IsNull();
1197}
1198
1201 ConstantInstr* constant = definition()->OriginalDefinition()->AsConstant();
1202 ASSERT(constant != nullptr);
1203 return constant->value();
1204}
1205
1207 return BindsToConstant() && BoundConstant().IsSmi();
1208}
1209
1210intptr_t Value::BoundSmiConstant() const {
1212 return Smi::Cast(BoundConstant()).Value();
1213}
1214
1216 intptr_t osr_id)
1217 : GraphEntryInstr(parsed_function,
1218 osr_id,
1219 CompilerState::Current().GetNextDeoptId()) {}
1220
1222 intptr_t osr_id,
1223 intptr_t deopt_id)
1226 deopt_id,
1227 /*stack_depth*/ 0),
1228 parsed_function_(parsed_function),
1229 catch_entries_(),
1230 indirect_entries_(),
1231 osr_id_(osr_id),
1232 entry_count_(0),
1233 spill_slot_count_(0),
1234 fixed_slot_count_(0) {}
1235
1238 for (intptr_t i = 0; i < initial_definitions()->length(); ++i) {
1239 ConstantInstr* defn = (*initial_definitions())[i]->AsConstant();
1240 if (defn != nullptr && defn->value().IsNull()) return defn;
1241 }
1242 UNREACHABLE();
1243 return nullptr;
1244}
1245
1247 // TODO(fschneider): Sort the catch entries by catch_try_index to avoid
1248 // searching.
1249 for (intptr_t i = 0; i < catch_entries_.length(); ++i) {
1250 if (catch_entries_[i]->catch_try_index() == index) return catch_entries_[i];
1251 }
1252 return nullptr;
1253}
1254
1256 return osr_id_ != Compiler::kNoOSRDeoptId;
1257}
1258
1259// ==== Support for visiting flow graphs.
1260
1261#define DEFINE_ACCEPT(ShortName, Attrs) \
1262 void ShortName##Instr::Accept(InstructionVisitor* visitor) { \
1263 visitor->Visit##ShortName(this); \
1264 }
1265
1267
1268#undef DEFINE_ACCEPT
1269
1271 intptr_t use_index = 0;
1272 for (Environment::DeepIterator it(deopt_env); !it.Done(); it.Advance()) {
1273 Value* use = it.CurrentValue();
1274 use->set_instruction(this);
1275 use->set_use_index(use_index++);
1276 }
1277 env_ = deopt_env;
1278}
1279
1281 for (Environment::DeepIterator it(env()); !it.Done(); it.Advance()) {
1282 it.CurrentValue()->RemoveFromUseList();
1283 }
1284 env_ = nullptr;
1285}
1286
1288 Definition* replacement) {
1289 for (Environment::DeepIterator it(env()); !it.Done(); it.Advance()) {
1290 Value* use = it.CurrentValue();
1291 if (use->definition() == current) {
1292 use->RemoveFromUseList();
1293 use->set_definition(replacement);
1294 replacement->AddEnvUse(use);
1295 }
1296 }
1297}
1298
1300 ASSERT(!IsBlockEntry());
1301 ASSERT(!IsBranch());
1302 ASSERT(!IsThrow());
1303 ASSERT(!IsReturnBase());
1304 ASSERT(!IsReThrow());
1305 ASSERT(!IsGoto());
1306 ASSERT(previous() != nullptr);
1307 // We cannot assert that the instruction, if it is a definition, has no
1308 // uses. This function is used to remove instructions from the graph and
1309 // reinsert them elsewhere (e.g., hoisting).
1310 Instruction* prev_instr = previous();
1311 Instruction* next_instr = next();
1312 ASSERT(next_instr != nullptr);
1313 ASSERT(!next_instr->IsBlockEntry());
1314 prev_instr->LinkTo(next_instr);
1316 // Reset the successor and previous instruction to indicate that the
1317 // instruction is removed from the graph.
1318 set_previous(nullptr);
1319 set_next(nullptr);
1320 return return_previous ? prev_instr : next_instr;
1321}
1322
1324 ASSERT(previous_ == nullptr);
1325 ASSERT(next_ == nullptr);
1326 previous_ = prev;
1327 next_ = prev->next_;
1328 next_->previous_ = this;
1329 previous_->next_ = this;
1330
1331 // Update def-use chains whenever instructions are added to the graph
1332 // after initial graph construction.
1333 for (intptr_t i = InputCount() - 1; i >= 0; --i) {
1334 Value* input = InputAt(i);
1335 input->definition()->AddInputUse(input);
1336 }
1337}
1338
1340 LinkTo(tail);
1341 // Update def-use chains whenever instructions are added to the graph
1342 // after initial graph construction.
1343 for (intptr_t i = tail->InputCount() - 1; i >= 0; --i) {
1344 Value* input = tail->InputAt(i);
1345 input->definition()->AddInputUse(input);
1346 }
1347 return tail;
1348}
1349
1351 // TODO(fschneider): Implement a faster way to get the block of an
1352 // instruction.
1354 while ((result != nullptr) && !result->IsBlockEntry()) {
1355 result = result->previous();
1356 }
1357 // InlineExitCollector::RemoveUnreachableExits may call
1358 // Instruction::GetBlock on instructions which are not properly linked
1359 // to the flow graph (as collected exits may belong to unreachable
1360 // fragments), so this code should gracefully handle the absence of
1361 // BlockEntry.
1362 return (result != nullptr) ? result->AsBlockEntry() : nullptr;
1363}
1364
1366 current_ = current_->RemoveFromGraph(true); // Set current_ to previous.
1367}
1368
1370 current_ = current_->RemoveFromGraph(false); // Set current_ to next.
1371}
1372
1373// Default implementation of visiting basic blocks. Can be overridden.
1375 ASSERT(current_iterator_ == nullptr);
1376 for (intptr_t i = 0; i < block_order_->length(); ++i) {
1377 BlockEntryInstr* entry = (*block_order_)[i];
1378 entry->Accept(this);
1380 current_iterator_ = &it;
1381 for (; !it.Done(); it.Advance()) {
1382 it.Current()->Accept(this);
1383 }
1384 current_iterator_ = nullptr;
1385 }
1386}
1387
1389 Value* value = this;
1390 do {
1391 if (value->Type()->IsNull() ||
1392 (value->Type()->ToNullableCid() == kSmiCid) ||
1393 (value->Type()->ToNullableCid() == kBoolCid)) {
1394 return false;
1395 }
1396
1397 // Strictly speaking, the incremental barrier can only be skipped for
1398 // immediate objects (Smis) or permanent objects (vm-isolate heap or
1399 // image pages). Here we choose to skip the barrier for any constant on
1400 // the assumption it will remain reachable through the object pool.
1401 if (value->BindsToConstant()) {
1402 return false;
1403 }
1404
1405 // Follow the chain of redefinitions as redefined value could have a more
1406 // accurate type (for example, AssertAssignable of Smi to a generic T).
1407 value = value->definition()->RedefinedValue();
1408 } while (value != nullptr);
1409
1410 return true;
1411}
1412
1414 // Require the predecessors to be sorted by block_id to make managing
1415 // their corresponding phi inputs simpler.
1416 intptr_t pred_id = predecessor->block_id();
1417 intptr_t index = 0;
1418 while ((index < predecessors_.length()) &&
1419 (predecessors_[index]->block_id() < pred_id)) {
1420 ++index;
1421 }
1422#if defined(DEBUG)
1423 for (intptr_t i = index; i < predecessors_.length(); ++i) {
1424 ASSERT(predecessors_[i]->block_id() != pred_id);
1425 }
1426#endif
1427 predecessors_.InsertAt(index, predecessor);
1428}
1429
1431 for (intptr_t i = 0; i < predecessors_.length(); ++i) {
1432 if (predecessors_[i] == pred) return i;
1433 }
1434 return -1;
1435}
1436
1437void Value::AddToList(Value* value, Value** list) {
1438 ASSERT(value->next_use() == nullptr);
1439 ASSERT(value->previous_use() == nullptr);
1440 Value* next = *list;
1441 ASSERT(value != next);
1442 *list = value;
1443 value->set_next_use(next);
1444 value->set_previous_use(nullptr);
1445 if (next != nullptr) next->set_previous_use(value);
1446}
1447
1449 Definition* def = definition();
1450 Value* next = next_use();
1451 if (this == def->input_use_list()) {
1453 if (next != nullptr) next->set_previous_use(nullptr);
1454 } else if (this == def->env_use_list()) {
1455 def->set_env_use_list(next);
1456 if (next != nullptr) next->set_previous_use(nullptr);
1457 } else if (Value* prev = previous_use()) {
1458 prev->set_next_use(next);
1459 if (next != nullptr) next->set_previous_use(prev);
1460 }
1461
1462 set_previous_use(nullptr);
1463 set_next_use(nullptr);
1464}
1465
1466// True if the definition has a single input use and is used only in
1467// environments at the same instruction as that input use.
1469 if (!HasOnlyInputUse(use)) {
1470 return false;
1471 }
1472
1473 Instruction* target = use->instruction();
1474 for (Value::Iterator it(env_use_list()); !it.Done(); it.Advance()) {
1475 if (it.Current()->instruction() != target) return false;
1476 }
1477 return true;
1478}
1479
1481 return (input_use_list() == use) && (use->next_use() == nullptr);
1482}
1483
1485 ASSERT(other != nullptr);
1486 ASSERT(this != other);
1487
1488 Value* current = nullptr;
1490 if (next != nullptr) {
1491 // Change all the definitions.
1492 while (next != nullptr) {
1493 current = next;
1494 current->set_definition(other);
1495 current->RefineReachingType(other->Type());
1496 next = current->next_use();
1497 }
1498
1499 // Concatenate the lists.
1500 next = other->input_use_list();
1501 current->set_next_use(next);
1502 if (next != nullptr) next->set_previous_use(current);
1504 set_input_use_list(nullptr);
1505 }
1506
1507 // Repeat for environment uses.
1508 current = nullptr;
1509 next = env_use_list();
1510 if (next != nullptr) {
1511 while (next != nullptr) {
1512 current = next;
1513 current->set_definition(other);
1514 current->RefineReachingType(other->Type());
1515 next = current->next_use();
1516 }
1517 next = other->env_use_list();
1518 current->set_next_use(next);
1519 if (next != nullptr) next->set_previous_use(current);
1521 set_env_use_list(nullptr);
1522 }
1523}
1524
1526 for (intptr_t i = InputCount() - 1; i >= 0; --i) {
1528 }
1529 for (Environment::DeepIterator it(env()); !it.Done(); it.Advance()) {
1530 it.CurrentValue()->RemoveFromUseList();
1531 }
1532}
1533
1535 // Some calls (e.g. closure calls) have more inputs than actual arguments.
1536 // Those extra inputs will be consumed from the stack before the call.
1537 const intptr_t after_args_input_count = env()->LazyDeoptPruneCount();
1538 MoveArgumentsArray* move_arguments = GetMoveArguments();
1539 ASSERT(move_arguments != nullptr);
1540 const intptr_t arg_count = ArgumentCount();
1541 ASSERT((arg_count + after_args_input_count) <= env()->Length());
1542 const intptr_t env_base =
1543 env()->Length() - arg_count - after_args_input_count;
1544 for (intptr_t i = 0; i < arg_count; ++i) {
1545 env()->ValueAt(env_base + i)->BindToEnvironment(move_arguments->At(i));
1546 }
1547}
1548
1550 Definition* call,
1551 Definition* result) {
1552 ASSERT(call->env() != nullptr);
1553 deopt_id_ = DeoptId::ToDeoptAfter(call->deopt_id_);
1554 call->env()->DeepCopyAfterTo(
1555 flow_graph->zone(), this, call->ArgumentCount(),
1556 flow_graph->constant_dead(),
1557 result != nullptr ? result : flow_graph->constant_dead());
1558}
1559
1561 ASSERT(other->env() != nullptr);
1562 CopyDeoptIdFrom(*other);
1563 other->env()->DeepCopyTo(zone, this);
1564}
1565
1567 ASSERT(const_cast<Instruction*>(this)->GetBlock() == block);
1568 return !MayHaveVisibleEffect() && !CanDeoptimize() &&
1569 this != block->last_instruction();
1570}
1571
1573 BlockEntryInstr* block = GetBlock();
1574 BlockEntryInstr* dom_block = dom->GetBlock();
1575
1576 if (dom->IsPhi()) {
1577 dom = dom_block;
1578 }
1579
1580 if (block == dom_block) {
1581 if ((block == dom) || (this == block->last_instruction())) {
1582 return true;
1583 }
1584
1585 if (IsPhi()) {
1586 return false;
1587 }
1588
1589 for (Instruction* curr = dom->next(); curr != nullptr;
1590 curr = curr->next()) {
1591 if (curr == this) return true;
1592 }
1593
1594 return false;
1595 }
1596
1597 return dom_block->Dominates(block);
1598}
1599
1601 for (intptr_t i = 0; i < InputCount(); i++) {
1602 Definition* input = InputAt(i)->definition();
1603 const Representation input_representation = RequiredInputRepresentation(i);
1604 if (input_representation != kNoRepresentation &&
1605 input_representation != input->representation()) {
1606 return true;
1607 }
1608 }
1609
1610 return false;
1611}
1612
1613const intptr_t Instruction::kInstructionAttrs[Instruction::kNumInstructions] = {
1614#define INSTR_ATTRS(type, attrs) InstrAttrs::attrs,
1616#undef INSTR_ATTRS
1617};
1618
1620 return (kInstructionAttrs[tag()] & InstrAttrs::kNoGC) == 0;
1621}
1622
1624 Definition* replacement_for_uses,
1625 ForwardInstructionIterator* iterator) {
1626 // Record replacement's input uses.
1627 for (intptr_t i = replacement->InputCount() - 1; i >= 0; --i) {
1628 Value* input = replacement->InputAt(i);
1629 input->definition()->AddInputUse(input);
1630 }
1631 // Take replacement's environment from this definition.
1632 ASSERT(replacement->env() == nullptr);
1633 replacement->SetEnvironment(env());
1634 ClearEnv();
1635 // Replace all uses of this definition with replacement_for_uses.
1636 ReplaceUsesWith(replacement_for_uses);
1637
1638 // Finally replace this one with the replacement instruction in the graph.
1639 previous()->LinkTo(replacement);
1640 if ((iterator != nullptr) && (this == iterator->Current())) {
1641 // Remove through the iterator.
1642 replacement->LinkTo(this);
1643 iterator->RemoveCurrentFromGraph();
1644 } else {
1645 replacement->LinkTo(next());
1646 // Remove this definition's input uses.
1648 }
1649 set_previous(nullptr);
1650 set_next(nullptr);
1651}
1652
1654 ForwardInstructionIterator* iterator) {
1655 // Reuse this instruction's SSA name for other.
1656 ASSERT(!other->HasSSATemp());
1657 if (HasSSATemp()) {
1659 }
1660 ReplaceWithResult(other, other, iterator);
1661}
1662
1664 for (intptr_t i = new_comparison->InputCount() - 1; i >= 0; --i) {
1665 Value* input = new_comparison->InputAt(i);
1666 input->definition()->AddInputUse(input);
1667 input->set_instruction(this);
1668 }
1669 // There should be no need to copy or unuse an environment.
1670 ASSERT(comparison()->env() == nullptr);
1671 ASSERT(new_comparison->env() == nullptr);
1672 // Remove the current comparison's input uses.
1674 ASSERT(!new_comparison->HasUses());
1675 comparison_ = new_comparison;
1676}
1677
1678// ==== Postorder graph traversal.
1679static bool IsMarked(BlockEntryInstr* block,
1681 // Detect that a block has been visited as part of the current
1682 // DiscoverBlocks (we can call DiscoverBlocks multiple times). The block
1683 // will be 'marked' by (1) having a preorder number in the range of the
1684 // preorder array and (2) being in the preorder array at that index.
1685 intptr_t i = block->preorder_number();
1686 return (i >= 0) && (i < preorder->length()) && ((*preorder)[i] == block);
1687}
1688
1689// Base class implementation used for JoinEntry and TargetEntry.
1692 GrowableArray<intptr_t>* parent) {
1693 // If this block has a predecessor (i.e., is not the graph entry) we can
1694 // assume the preorder array is non-empty.
1695 ASSERT((predecessor == nullptr) || !preorder->is_empty());
1696 // Blocks with a single predecessor cannot have been reached before.
1697 ASSERT(IsJoinEntry() || !IsMarked(this, preorder));
1698
1699 // 1. If the block has already been reached, add current_block as a
1700 // basic-block predecessor and we are done.
1701 if (IsMarked(this, preorder)) {
1702 ASSERT(predecessor != nullptr);
1703 AddPredecessor(predecessor);
1704 return false;
1705 }
1706
1707 // 2. Otherwise, clear the predecessors which might have been computed on
1708 // some earlier call to DiscoverBlocks and record this predecessor.
1710 if (predecessor != nullptr) AddPredecessor(predecessor);
1711
1712 // 3. The predecessor is the spanning-tree parent. The graph entry has no
1713 // parent, indicated by -1.
1714 intptr_t parent_number =
1715 (predecessor == nullptr) ? -1 : predecessor->preorder_number();
1716 parent->Add(parent_number);
1717
1718 // 4. Assign the preorder number and add the block entry to the list.
1719 set_preorder_number(preorder->length());
1720 preorder->Add(this);
1721
1722 // The preorder and parent arrays are indexed by
1723 // preorder block number, so they should stay in lockstep.
1724 ASSERT(preorder->length() == parent->length());
1725
1726 // 5. Iterate straight-line successors to record assigned variables and
1727 // find the last instruction in the block. The graph entry block consists
1728 // of only the entry instruction, so that is the last instruction in the
1729 // block.
1730 Instruction* last = this;
1731 for (ForwardInstructionIterator it(this); !it.Done(); it.Advance()) {
1732 last = it.Current();
1733 }
1735 if (last->IsGoto()) last->AsGoto()->set_block(this);
1736
1737 return true;
1738}
1739
1740void GraphEntryInstr::RelinkToOsrEntry(Zone* zone, intptr_t max_block_id) {
1741 ASSERT(osr_id_ != Compiler::kNoOSRDeoptId);
1742 BitVector* block_marks = new (zone) BitVector(zone, max_block_id + 1);
1743 bool found = FindOsrEntryAndRelink(this, /*parent=*/nullptr, block_marks);
1744 ASSERT(found);
1745}
1746
1748 Instruction* parent,
1749 BitVector* block_marks) {
1750 const intptr_t osr_id = graph_entry->osr_id();
1751
1752 // Search for the instruction with the OSR id. Use a depth first search
1753 // because basic blocks have not been discovered yet. Prune unreachable
1754 // blocks by replacing the normal entry with a jump to the block
1755 // containing the OSR entry point.
1756
1757 // Do not visit blocks more than once.
1758 if (block_marks->Contains(block_id())) return false;
1759 block_marks->Add(block_id());
1760
1761 // Search this block for the OSR id.
1762 Instruction* instr = this;
1763 for (ForwardInstructionIterator it(this); !it.Done(); it.Advance()) {
1764 instr = it.Current();
1765 if (instr->GetDeoptId() == osr_id) {
1766 // Sanity check that we found a stack check instruction.
1767 ASSERT(instr->IsCheckStackOverflow());
1768 // Loop stack check checks are always in join blocks so that they can
1769 // be the target of a goto.
1770 ASSERT(IsJoinEntry());
1771 // The instruction should be the first instruction in the block so
1772 // we can simply jump to the beginning of the block.
1773 ASSERT(instr->previous() == this);
1774
1775 ASSERT(stack_depth() == instr->AsCheckStackOverflow()->stack_depth());
1776 auto normal_entry = graph_entry->normal_entry();
1777 auto osr_entry = new OsrEntryInstr(
1778 graph_entry, normal_entry->block_id(), normal_entry->try_index(),
1779 normal_entry->deopt_id(), stack_depth());
1780
1781 auto goto_join = new GotoInstr(AsJoinEntry(),
1782 CompilerState::Current().GetNextDeoptId());
1783 ASSERT(parent != nullptr);
1784 goto_join->CopyDeoptIdFrom(*parent);
1785 osr_entry->LinkTo(goto_join);
1786
1787 // Remove normal function entries & add osr entry.
1788 graph_entry->set_normal_entry(nullptr);
1789 graph_entry->set_unchecked_entry(nullptr);
1790 graph_entry->set_osr_entry(osr_entry);
1791
1792 return true;
1793 }
1794 }
1795
1796 // Recursively search the successors.
1797 for (intptr_t i = instr->SuccessorCount() - 1; i >= 0; --i) {
1798 if (instr->SuccessorAt(i)->FindOsrEntryAndRelink(graph_entry, instr,
1799 block_marks)) {
1800 return true;
1801 }
1802 }
1803 return false;
1804}
1805
1807 // TODO(fschneider): Make this faster by e.g. storing dominators for each
1808 // block while computing the dominator tree.
1809 ASSERT(other != nullptr);
1810 BlockEntryInstr* current = other;
1811 while (current != nullptr && current != this) {
1812 current = current->dominator();
1813 }
1814 return current == this;
1815}
1816
1819 if ((last->SuccessorCount() == 1) && (last->SuccessorAt(0) == this)) {
1820 return dominator();
1821 }
1822 return nullptr;
1823}
1824
1826 return loop_info_ != nullptr && loop_info_->header() == this;
1827}
1828
1830 return loop_info_ == nullptr ? 0 : loop_info_->NestingDepth();
1831}
1832
1833// Helper to mutate the graph during inlining. This block should be
1834// replaced with new_block as a predecessor of all of this block's
1835// successors. For each successor, the predecessors will be reordered
1836// to preserve block-order sorting of the predecessors as well as the
1837// phis if the successor is a join.
1839 // Set the last instruction of the new block to that of the old block.
1840 Instruction* last = last_instruction();
1841 new_block->set_last_instruction(last);
1842 // For each successor, update the predecessors.
1843 for (intptr_t sidx = 0; sidx < last->SuccessorCount(); ++sidx) {
1844 // If the successor is a target, update its predecessor.
1845 TargetEntryInstr* target = last->SuccessorAt(sidx)->AsTargetEntry();
1846 if (target != nullptr) {
1847 target->predecessor_ = new_block;
1848 continue;
1849 }
1850 // If the successor is a join, update each predecessor and the phis.
1851 JoinEntryInstr* join = last->SuccessorAt(sidx)->AsJoinEntry();
1852 ASSERT(join != nullptr);
1853 // Find the old predecessor index.
1854 intptr_t old_index = join->IndexOfPredecessor(this);
1855 intptr_t pred_count = join->PredecessorCount();
1856 ASSERT(old_index >= 0);
1857 ASSERT(old_index < pred_count);
1858 // Find the new predecessor index while reordering the predecessors.
1859 intptr_t new_id = new_block->block_id();
1860 intptr_t new_index = old_index;
1861 if (block_id() < new_id) {
1862 // Search upwards, bubbling down intermediate predecessors.
1863 for (; new_index < pred_count - 1; ++new_index) {
1864 if (join->predecessors_[new_index + 1]->block_id() > new_id) break;
1865 join->predecessors_[new_index] = join->predecessors_[new_index + 1];
1866 }
1867 } else {
1868 // Search downwards, bubbling up intermediate predecessors.
1869 for (; new_index > 0; --new_index) {
1870 if (join->predecessors_[new_index - 1]->block_id() < new_id) break;
1871 join->predecessors_[new_index] = join->predecessors_[new_index - 1];
1872 }
1873 }
1874 join->predecessors_[new_index] = new_block;
1875 // If the new and old predecessor index match there is nothing to update.
1876 if ((join->phis() == nullptr) || (old_index == new_index)) return;
1877 // Otherwise, reorder the predecessor uses in each phi.
1878 for (PhiIterator it(join); !it.Done(); it.Advance()) {
1879 PhiInstr* phi = it.Current();
1880 ASSERT(phi != nullptr);
1881 ASSERT(pred_count == phi->InputCount());
1882 // Save the predecessor use.
1883 Value* pred_use = phi->InputAt(old_index);
1884 // Move uses between old and new.
1885 intptr_t step = (old_index < new_index) ? 1 : -1;
1886 for (intptr_t use_idx = old_index; use_idx != new_index;
1887 use_idx += step) {
1888 phi->SetInputAt(use_idx, phi->InputAt(use_idx + step));
1889 }
1890 // Write the predecessor use.
1891 phi->SetInputAt(new_index, pred_use);
1892 }
1893 }
1894}
1895
1897 JoinEntryInstr* join = this->AsJoinEntry();
1898 if (join != nullptr) {
1899 for (PhiIterator it(join); !it.Done(); it.Advance()) {
1900 it.Current()->UnuseAllInputs();
1901 }
1902 }
1903 UnuseAllInputs();
1904 for (ForwardInstructionIterator it(this); !it.Done(); it.Advance()) {
1905 it.Current()->UnuseAllInputs();
1906 }
1907}
1908
1909PhiInstr* JoinEntryInstr::InsertPhi(intptr_t var_index, intptr_t var_count) {
1910 // Lazily initialize the array of phis.
1911 // Currently, phis are stored in a sparse array that holds the phi
1912 // for variable with index i at position i.
1913 // TODO(fschneider): Store phis in a more compact way.
1914 if (phis_ == nullptr) {
1915 phis_ = new ZoneGrowableArray<PhiInstr*>(var_count);
1916 for (intptr_t i = 0; i < var_count; i++) {
1917 phis_->Add(nullptr);
1918 }
1919 }
1920 ASSERT((*phis_)[var_index] == nullptr);
1921 return (*phis_)[var_index] = new PhiInstr(this, PredecessorCount());
1922}
1923
1925 // Lazily initialize the array of phis.
1926 if (phis_ == nullptr) {
1927 phis_ = new ZoneGrowableArray<PhiInstr*>(1);
1928 }
1929 phis_->Add(phi);
1930}
1931
1933 ASSERT(phis_ != nullptr);
1934 for (intptr_t index = 0; index < phis_->length(); ++index) {
1935 if (phi == (*phis_)[index]) {
1936 (*phis_)[index] = phis_->Last();
1937 phis_->RemoveLast();
1938 return;
1939 }
1940 }
1941}
1942
1944 if (phis_ == nullptr) return;
1945
1946 intptr_t to_index = 0;
1947 for (intptr_t from_index = 0; from_index < phis_->length(); ++from_index) {
1948 PhiInstr* phi = (*phis_)[from_index];
1949 if (phi != nullptr) {
1950 if (phi->is_alive()) {
1951 (*phis_)[to_index++] = phi;
1952 for (intptr_t i = phi->InputCount() - 1; i >= 0; --i) {
1953 Value* input = phi->InputAt(i);
1954 input->definition()->AddInputUse(input);
1955 }
1956 } else {
1957 phi->ReplaceUsesWith(replacement);
1958 }
1959 }
1960 }
1961 if (to_index == 0) {
1962 phis_ = nullptr;
1963 } else {
1964 phis_->TruncateTo(to_index);
1965 }
1966}
1967
1969 return 0;
1970}
1971
1973 // Called only if index is in range. Only control-transfer instructions
1974 // can have non-zero successor counts and they override this function.
1975 UNREACHABLE();
1976 return nullptr;
1977}
1978
1980 return (normal_entry() == nullptr ? 0 : 1) +
1981 (unchecked_entry() == nullptr ? 0 : 1) +
1982 (osr_entry() == nullptr ? 0 : 1) + catch_entries_.length();
1983}
1984
1986 if (normal_entry() != nullptr) {
1987 if (index == 0) return normal_entry_;
1988 index--;
1989 }
1990 if (unchecked_entry() != nullptr) {
1991 if (index == 0) return unchecked_entry();
1992 index--;
1993 }
1994 if (osr_entry() != nullptr) {
1995 if (index == 0) return osr_entry();
1996 index--;
1997 }
1998 return catch_entries_[index];
1999}
2000
2002 return 2;
2003}
2004
2006 if (index == 0) return true_successor_;
2007 if (index == 1) return false_successor_;
2008 UNREACHABLE();
2009 return nullptr;
2010}
2011
2013 return 1;
2014}
2015
2017 ASSERT(index == 0);
2018 return successor();
2019}
2020
2022 LinkTo(new GotoInstr(entry, CompilerState::Current().GetNextDeoptId()));
2023}
2024
2026 return (to() == kUnboxedInt32) && !is_truncating() &&
2027 !RangeUtils::Fits(value()->definition()->range(),
2029}
2030
2032 if (SpeculativeModeOfInputs() == kNotSpeculative) {
2033 return false;
2034 }
2035 if (!value()->Type()->IsInt()) {
2036 return true;
2037 }
2038 if (representation() == kUnboxedInt64 || is_truncating()) {
2039 return false;
2040 }
2041 const intptr_t rep_bitsize =
2043 if (value()->Type()->ToCid() == kSmiCid &&
2044 compiler::target::kSmiBits <= rep_bitsize) {
2045 return false;
2046 }
2047 return !RangeUtils::IsWithin(value()->definition()->range(),
2050}
2051
2053 switch (op_kind()) {
2054 case Token::kBIT_AND:
2055 case Token::kBIT_OR:
2056 case Token::kBIT_XOR:
2057 return false;
2058
2059 case Token::kSHR:
2060 return false;
2061
2062 case Token::kUSHR:
2063 case Token::kSHL:
2064 // Currently only shifts by in range constant are supported, see
2065 // BinaryInt32OpInstr::IsSupported.
2066 return can_overflow();
2067
2068 case Token::kMOD: {
2069 UNREACHABLE();
2070 }
2071
2072 default:
2073 return can_overflow();
2074 }
2075}
2076
2078 switch (op_kind()) {
2079 case Token::kBIT_AND:
2080 case Token::kBIT_OR:
2081 case Token::kBIT_XOR:
2082 return false;
2083
2084 case Token::kSHR:
2086
2087 case Token::kUSHR:
2088 case Token::kSHL:
2090
2091 case Token::kMOD:
2093
2094 case Token::kTRUNCDIV:
2097
2098 default:
2099 return can_overflow();
2100 }
2101}
2102
2106
2108 if (right()->BindsToConstant()) {
2109 const auto& constant = right()->BoundConstant();
2110 if (!constant.IsInteger()) return false;
2111 return Integer::Cast(constant).AsInt64Value() != 0;
2112 }
2113 return !RangeUtils::CanBeZero(right()->definition()->range());
2114}
2115
2117 if (!right()->BindsToConstant()) return false;
2118 const Object& constant = right()->BoundConstant();
2119 if (!constant.IsSmi()) return false;
2120 const intptr_t int_value = Smi::Cast(constant).Value();
2121 ASSERT(int_value != kIntptrMin);
2122 return Utils::IsPowerOfTwo(Utils::Abs(int_value));
2123}
2124
2126 switch (r) {
2127 case kTagged:
2128 return compiler::target::kSmiBits + 1;
2129 case kUnboxedInt32:
2130 case kUnboxedUint32:
2131 return 32;
2132 case kUnboxedInt64:
2133 return 64;
2134 default:
2135 UNREACHABLE();
2136 return 0;
2137 }
2138}
2139
2141 return static_cast<int64_t>(static_cast<uint64_t>(-1) >>
2142 (64 - RepresentationBits(r)));
2143}
2144
2146 Value* left,
2147 Value* right) {
2148 int64_t left_value;
2149 if (!Evaluator::ToIntegerConstant(left, &left_value)) {
2150 return nullptr;
2151 }
2152
2153 // Can't apply 0.0 * x -> 0.0 equivalence to double operation because
2154 // 0.0 * NaN is NaN not 0.0.
2155 // Can't apply 0.0 + x -> x to double because 0.0 + (-0.0) is 0.0 not -0.0.
2156 switch (op) {
2157 case Token::kMUL:
2158 if (left_value == 1) {
2159 if (right->definition()->representation() != kUnboxedDouble) {
2160 // Can't yet apply the equivalence because representation selection
2161 // did not run yet. We need it to guarantee that right value is
2162 // correctly coerced to double. The second canonicalization pass
2163 // will apply this equivalence.
2164 return nullptr;
2165 } else {
2166 return right->definition();
2167 }
2168 }
2169 break;
2170 default:
2171 break;
2172 }
2173
2174 return nullptr;
2175}
2176
2178 if (!HasUses()) return nullptr;
2179 if (value()->definition()->IsFloatToDouble()) {
2180 // F2D(D2F(v)) == v.
2181 return value()->definition()->AsFloatToDouble()->value()->definition();
2182 }
2183 if (value()->BindsToConstant()) {
2184 double narrowed_val =
2185 static_cast<float>(Double::Cast(value()->BoundConstant()).value());
2186 return flow_graph->GetConstant(
2187 Double::ZoneHandle(Double::NewCanonical(narrowed_val)), kUnboxedFloat);
2188 }
2189 return this;
2190}
2191
2193 if (!HasUses()) return nullptr;
2194 if (value()->BindsToConstant()) {
2195 return flow_graph->GetConstant(value()->BoundConstant(), kUnboxedDouble);
2196 }
2197 return this;
2198}
2199
2201 if (!HasUses()) return nullptr;
2202
2203 Definition* result = nullptr;
2204
2206 if (result != nullptr) {
2207 return result;
2208 }
2209
2211 if (result != nullptr) {
2212 return result;
2213 }
2214
2215 if ((op_kind() == Token::kMUL) &&
2216 (left()->definition() == right()->definition())) {
2218 Token::kSQUARE, new Value(left()->definition()), DeoptimizationTarget(),
2219 speculative_mode_, representation());
2220 flow_graph->InsertBefore(this, square, env(), FlowGraph::kValue);
2221 return square;
2222 }
2223
2224 return this;
2225}
2226
2228 return HasUses() ? this : nullptr;
2229}
2230
2232 switch (op) {
2233 case Token::kMUL:
2235 case Token::kADD:
2237 case Token::kBIT_AND:
2239 case Token::kBIT_OR:
2241 case Token::kBIT_XOR:
2242 return true;
2243 default:
2244 return false;
2245 }
2246}
2247
2249 Token::Kind op_kind,
2250 Value* value,
2251 intptr_t deopt_id,
2252 SpeculativeMode speculative_mode,
2253 Range* range) {
2254 UnaryIntegerOpInstr* op = nullptr;
2255 switch (representation) {
2256 case kTagged:
2257 op = new UnarySmiOpInstr(op_kind, value, deopt_id);
2258 break;
2259 case kUnboxedInt32:
2260 return nullptr;
2261 case kUnboxedUint32:
2262 op = new UnaryUint32OpInstr(op_kind, value, deopt_id);
2263 break;
2264 case kUnboxedInt64:
2265 op = new UnaryInt64OpInstr(op_kind, value, deopt_id, speculative_mode);
2266 break;
2267 default:
2268 UNREACHABLE();
2269 return nullptr;
2270 }
2271
2272 if (op == nullptr) {
2273 return op;
2274 }
2275
2276 if (!Range::IsUnknown(range)) {
2277 op->set_range(*range);
2278 }
2279
2280 ASSERT(op->representation() == representation);
2281 return op;
2282}
2283
2285 Representation representation,
2286 Token::Kind op_kind,
2287 Value* left,
2288 Value* right,
2289 intptr_t deopt_id,
2290 SpeculativeMode speculative_mode) {
2291 BinaryIntegerOpInstr* op = nullptr;
2292 Range* right_range = nullptr;
2293 switch (op_kind) {
2294 case Token::kMOD:
2295 case Token::kTRUNCDIV:
2296 if (representation != kTagged) break;
2298 case Token::kSHL:
2299 case Token::kSHR:
2300 case Token::kUSHR:
2301 if (auto const const_def = right->definition()->AsConstant()) {
2302 right_range = new Range();
2303 const_def->InferRange(nullptr, right_range);
2304 }
2305 break;
2306 default:
2307 break;
2308 }
2309 switch (representation) {
2310 case kTagged:
2311 op = new BinarySmiOpInstr(op_kind, left, right, deopt_id, right_range);
2312 break;
2313 case kUnboxedInt32:
2315 return nullptr;
2316 }
2317 op = new BinaryInt32OpInstr(op_kind, left, right, deopt_id);
2318 break;
2319 case kUnboxedUint32:
2320 if ((op_kind == Token::kSHL) || (op_kind == Token::kSHR) ||
2321 (op_kind == Token::kUSHR)) {
2322 if (speculative_mode == kNotSpeculative) {
2323 op = new ShiftUint32OpInstr(op_kind, left, right, deopt_id,
2324 right_range);
2325 } else {
2326 op = new SpeculativeShiftUint32OpInstr(op_kind, left, right, deopt_id,
2327 right_range);
2328 }
2329 } else {
2330 op = new BinaryUint32OpInstr(op_kind, left, right, deopt_id);
2331 }
2332 break;
2333 case kUnboxedInt64:
2334 if ((op_kind == Token::kSHL) || (op_kind == Token::kSHR) ||
2335 (op_kind == Token::kUSHR)) {
2336 if (speculative_mode == kNotSpeculative) {
2337 op = new ShiftInt64OpInstr(op_kind, left, right, deopt_id,
2338 right_range);
2339 } else {
2340 op = new SpeculativeShiftInt64OpInstr(op_kind, left, right, deopt_id,
2341 right_range);
2342 }
2343 } else {
2344 op = new BinaryInt64OpInstr(op_kind, left, right, deopt_id,
2345 speculative_mode);
2346 }
2347 break;
2348 default:
2349 UNREACHABLE();
2350 return nullptr;
2351 }
2352
2353 ASSERT(op->representation() == representation);
2354 return op;
2355}
2356
2358 Representation representation,
2359 Token::Kind op_kind,
2360 Value* left,
2361 Value* right,
2362 intptr_t deopt_id,
2363 bool can_overflow,
2364 bool is_truncating,
2365 Range* range,
2366 SpeculativeMode speculative_mode) {
2368 representation, op_kind, left, right, deopt_id, speculative_mode);
2369 if (op == nullptr) {
2370 return nullptr;
2371 }
2372 if (!Range::IsUnknown(range)) {
2373 op->set_range(*range);
2374 }
2375
2377 if (is_truncating) {
2378 op->mark_truncating();
2379 }
2380
2381 return op;
2382}
2383
2385 // If range analysis has already determined a single possible value for
2386 // this operation, then replace it if possible.
2387 if (RangeUtils::IsSingleton(range()) && CanReplaceWithConstant()) {
2388 const auto& value =
2389 Integer::Handle(Integer::NewCanonical(range()->Singleton()));
2390 auto* const replacement =
2391 flow_graph->TryCreateConstantReplacementFor(this, value);
2392 if (replacement != this) {
2393 return replacement;
2394 }
2395 }
2396
2397 return this;
2398}
2399
2401 // If range analysis has already determined a single possible value for
2402 // this operation, then replace it if possible.
2403 if (RangeUtils::IsSingleton(range()) && CanReplaceWithConstant()) {
2404 const auto& value =
2405 Integer::Handle(Integer::NewCanonical(range()->Singleton()));
2406 auto* const replacement =
2407 flow_graph->TryCreateConstantReplacementFor(this, value);
2408 if (replacement != this) {
2409 return replacement;
2410 }
2411 }
2412
2413 // If both operands are constants evaluate this expression. Might
2414 // occur due to load forwarding after constant propagation pass
2415 // have already been run.
2416
2417 if (left()->BindsToConstant() && right()->BindsToConstant()) {
2419 left()->BoundConstant(), right()->BoundConstant(), op_kind(),
2420 is_truncating(), representation(), Thread::Current()));
2421
2422 if (!result.IsNull()) {
2423 return flow_graph->TryCreateConstantReplacementFor(this, result);
2424 }
2425 }
2426
2427 if (left()->BindsToConstant() && !right()->BindsToConstant() &&
2429 Value* l = left();
2430 Value* r = right();
2431 SetInputAt(0, r);
2432 SetInputAt(1, l);
2433 }
2434
2435 int64_t rhs;
2436 if (!Evaluator::ToIntegerConstant(right(), &rhs)) {
2437 return this;
2438 }
2439
2440 if (is_truncating()) {
2441 switch (op_kind()) {
2442 case Token::kMUL:
2443 case Token::kSUB:
2444 case Token::kADD:
2445 case Token::kBIT_AND:
2446 case Token::kBIT_OR:
2447 case Token::kBIT_XOR:
2448 rhs = Evaluator::TruncateTo(rhs, representation());
2449 break;
2450 default:
2451 break;
2452 }
2453 }
2454
2455 if (IsBinaryUint32Op() && HasUnmatchedInputRepresentations()) {
2456 // Canonicalization may eliminate instruction and loose truncation,
2457 // so it is illegal to canonicalize truncating uint32 instruction
2458 // until all conversions for its inputs are inserted.
2459 return this;
2460 }
2461
2462 switch (op_kind()) {
2463 case Token::kMUL:
2464 if (rhs == 1) {
2465 return left()->definition();
2466 } else if (rhs == 0) {
2467 return right()->definition();
2468 } else if ((rhs > 0) && Utils::IsPowerOfTwo(rhs)) {
2469 const int64_t shift_amount = Utils::ShiftForPowerOfTwo(rhs);
2470 const Representation shift_amount_rep =
2471 (SpeculativeModeOfInputs() == kNotSpeculative) ? kUnboxedInt64
2472 : kTagged;
2473 ConstantInstr* constant_shift_amount = flow_graph->GetConstant(
2474 Smi::Handle(Smi::New(shift_amount)), shift_amount_rep);
2476 representation(), Token::kSHL, left()->CopyWithType(),
2477 new Value(constant_shift_amount), GetDeoptId(), can_overflow(),
2478 is_truncating(), range(), SpeculativeModeOfInputs());
2479 if (shift != nullptr) {
2480 // Assign a range to the shift factor, just in case range
2481 // analysis no longer runs after this rewriting.
2482 if (auto shift_with_range = shift->AsShiftIntegerOp()) {
2483 shift_with_range->set_shift_range(
2484 new Range(RangeBoundary::FromConstant(shift_amount),
2485 RangeBoundary::FromConstant(shift_amount)));
2486 }
2487 if (!MayThrow()) {
2488 ASSERT(!shift->MayThrow());
2489 }
2490 if (!CanDeoptimize()) {
2491 ASSERT(!shift->CanDeoptimize());
2492 }
2493 flow_graph->InsertBefore(this, shift, env(), FlowGraph::kValue);
2494 return shift;
2495 }
2496 }
2497
2498 break;
2499 case Token::kADD:
2500 if (rhs == 0) {
2501 return left()->definition();
2502 }
2503 break;
2504 case Token::kBIT_AND:
2505 if (rhs == 0) {
2506 return right()->definition();
2507 } else if (rhs == RepresentationMask(representation())) {
2508 return left()->definition();
2509 }
2510 break;
2511 case Token::kBIT_OR:
2512 if (rhs == 0) {
2513 return left()->definition();
2514 } else if (rhs == RepresentationMask(representation())) {
2515 return right()->definition();
2516 }
2517 break;
2518 case Token::kBIT_XOR:
2519 if (rhs == 0) {
2520 return left()->definition();
2521 } else if (rhs == RepresentationMask(representation())) {
2523 representation(), Token::kBIT_NOT, left()->CopyWithType(),
2524 GetDeoptId(), SpeculativeModeOfInputs(), range());
2525 if (bit_not != nullptr) {
2526 flow_graph->InsertBefore(this, bit_not, env(), FlowGraph::kValue);
2527 return bit_not;
2528 }
2529 }
2530 break;
2531
2532 case Token::kSUB:
2533 if (rhs == 0) {
2534 return left()->definition();
2535 }
2536 break;
2537
2538 case Token::kTRUNCDIV:
2539 if (rhs == 1) {
2540 return left()->definition();
2541 } else if (rhs == -1) {
2543 representation(), Token::kNEGATE, left()->CopyWithType(),
2544 GetDeoptId(), SpeculativeModeOfInputs(), range());
2545 if (negation != nullptr) {
2546 flow_graph->InsertBefore(this, negation, env(), FlowGraph::kValue);
2547 return negation;
2548 }
2549 }
2550 break;
2551
2552 case Token::kMOD:
2553 if (std::abs(rhs) == 1) {
2554 return flow_graph->TryCreateConstantReplacementFor(this,
2555 Object::smi_zero());
2556 }
2557 break;
2558
2559 case Token::kUSHR:
2560 if (rhs >= kBitsPerInt64) {
2561 return flow_graph->TryCreateConstantReplacementFor(this,
2562 Object::smi_zero());
2563 }
2565 case Token::kSHR:
2566 if (rhs == 0) {
2567 return left()->definition();
2568 } else if (rhs < 0) {
2569 // Instruction will always throw on negative rhs operand.
2570 if (!CanDeoptimize()) {
2571 // For non-speculative operations (no deopt), let
2572 // the code generator deal with throw on slowpath.
2573 break;
2574 }
2575 ASSERT(GetDeoptId() != DeoptId::kNone);
2576 DeoptimizeInstr* deopt =
2577 new DeoptimizeInstr(ICData::kDeoptBinarySmiOp, GetDeoptId());
2578 flow_graph->InsertBefore(this, deopt, env(), FlowGraph::kEffect);
2579 // Replace with zero since it always throws.
2580 return flow_graph->TryCreateConstantReplacementFor(this,
2581 Object::smi_zero());
2582 }
2583 break;
2584
2585 case Token::kSHL: {
2586 const intptr_t result_bits = RepresentationBits(representation());
2587 if (rhs == 0) {
2588 return left()->definition();
2589 } else if ((rhs >= kBitsPerInt64) ||
2590 ((rhs >= result_bits) && is_truncating())) {
2591 return flow_graph->TryCreateConstantReplacementFor(this,
2592 Object::smi_zero());
2593 } else if ((rhs < 0) || ((rhs >= result_bits) && !is_truncating())) {
2594 // Instruction will always throw on negative rhs operand or
2595 // deoptimize on large rhs operand.
2596 if (!CanDeoptimize()) {
2597 // For non-speculative operations (no deopt), let
2598 // the code generator deal with throw on slowpath.
2599 break;
2600 }
2601 ASSERT(GetDeoptId() != DeoptId::kNone);
2602 DeoptimizeInstr* deopt =
2603 new DeoptimizeInstr(ICData::kDeoptBinarySmiOp, GetDeoptId());
2604 flow_graph->InsertBefore(this, deopt, env(), FlowGraph::kEffect);
2605 // Replace with zero since it overshifted or always throws.
2606 return flow_graph->TryCreateConstantReplacementFor(this,
2607 Object::smi_zero());
2608 }
2609 break;
2610 }
2611
2612 default:
2613 break;
2614 }
2615
2616 return this;
2617}
2618
2619// Optimizations that eliminate or simplify individual instructions.
2621 return this;
2622}
2623
2625 return this;
2626}
2627
2629 // Must not remove Redefinitions without uses until LICM, even though
2630 // Redefinition might not have any uses itself it can still be dominating
2631 // uses of the value it redefines and must serve as a barrier for those
2632 // uses. RenameUsesDominatedByRedefinitions would normalize the graph and
2633 // route those uses through this redefinition.
2634 if (!HasUses() && !flow_graph->is_licm_allowed()) {
2635 return nullptr;
2636 }
2637 if (constrained_type() != nullptr &&
2638 constrained_type()->IsEqualTo(value()->Type())) {
2639 return value()->definition();
2640 }
2641 return this;
2642}
2643
2645 switch (kind_) {
2646 case kOsrAndPreemption:
2647 return this;
2648 case kOsrOnly:
2649 // Don't need OSR entries in the optimized code.
2650 return nullptr;
2651 }
2652
2653 // Switch above exhausts all possibilities but some compilers can't figure
2654 // it out.
2655 UNREACHABLE();
2656 return this;
2657}
2658
2661 return true;
2662 }
2663
2664 switch (cid) {
2665 case kArrayCid:
2666 case kImmutableArrayCid:
2667 case kTypeArgumentsCid:
2668 return true;
2669 default:
2670 return false;
2671 }
2672}
2673
2675 auto kind = function.recognized_kind();
2676 switch (kind) {
2677 case MethodRecognizer::kTypedData_ByteDataView_factory:
2678 case MethodRecognizer::kTypedData_Int8ArrayView_factory:
2679 case MethodRecognizer::kTypedData_Uint8ArrayView_factory:
2680 case MethodRecognizer::kTypedData_Uint8ClampedArrayView_factory:
2681 case MethodRecognizer::kTypedData_Int16ArrayView_factory:
2682 case MethodRecognizer::kTypedData_Uint16ArrayView_factory:
2683 case MethodRecognizer::kTypedData_Int32ArrayView_factory:
2684 case MethodRecognizer::kTypedData_Uint32ArrayView_factory:
2685 case MethodRecognizer::kTypedData_Int64ArrayView_factory:
2686 case MethodRecognizer::kTypedData_Uint64ArrayView_factory:
2687 case MethodRecognizer::kTypedData_Float32ArrayView_factory:
2688 case MethodRecognizer::kTypedData_Float64ArrayView_factory:
2689 case MethodRecognizer::kTypedData_Float32x4ArrayView_factory:
2690 case MethodRecognizer::kTypedData_Int32x4ArrayView_factory:
2691 case MethodRecognizer::kTypedData_Float64x2ArrayView_factory:
2692 return true;
2693 default:
2694 return false;
2695 }
2696}
2697
2699 const Function& function) {
2700 auto kind = function.recognized_kind();
2701 switch (kind) {
2702 case MethodRecognizer::kTypedData_UnmodifiableByteDataView_factory:
2703 case MethodRecognizer::kTypedData_UnmodifiableInt8ArrayView_factory:
2704 case MethodRecognizer::kTypedData_UnmodifiableUint8ArrayView_factory:
2705 case MethodRecognizer::kTypedData_UnmodifiableUint8ClampedArrayView_factory:
2706 case MethodRecognizer::kTypedData_UnmodifiableInt16ArrayView_factory:
2707 case MethodRecognizer::kTypedData_UnmodifiableUint16ArrayView_factory:
2708 case MethodRecognizer::kTypedData_UnmodifiableInt32ArrayView_factory:
2709 case MethodRecognizer::kTypedData_UnmodifiableUint32ArrayView_factory:
2710 case MethodRecognizer::kTypedData_UnmodifiableInt64ArrayView_factory:
2711 case MethodRecognizer::kTypedData_UnmodifiableUint64ArrayView_factory:
2712 case MethodRecognizer::kTypedData_UnmodifiableFloat32ArrayView_factory:
2713 case MethodRecognizer::kTypedData_UnmodifiableFloat64ArrayView_factory:
2714 case MethodRecognizer::kTypedData_UnmodifiableFloat32x4ArrayView_factory:
2715 case MethodRecognizer::kTypedData_UnmodifiableInt32x4ArrayView_factory:
2716 case MethodRecognizer::kTypedData_UnmodifiableFloat64x2ArrayView_factory:
2717 return true;
2718 default:
2719 return false;
2720 }
2721}
2722
2724 return HasUses() ? this : nullptr;
2725}
2726
2728 const Slot& field,
2729 Object* result) {
2730 switch (field.kind()) {
2732 return TryEvaluateLoad(instance, field.field(), result);
2733
2734 case Slot::Kind::kArgumentsDescriptor_type_args_len:
2735 if (instance.IsArray() && Array::Cast(instance).IsImmutable()) {
2736 ArgumentsDescriptor desc(Array::Cast(instance));
2737 *result = Smi::New(desc.TypeArgsLen());
2738 return true;
2739 }
2740 return false;
2741
2742 case Slot::Kind::kArgumentsDescriptor_count:
2743 if (instance.IsArray() && Array::Cast(instance).IsImmutable()) {
2744 ArgumentsDescriptor desc(Array::Cast(instance));
2745 *result = Smi::New(desc.Count());
2746 return true;
2747 }
2748 return false;
2749
2750 case Slot::Kind::kArgumentsDescriptor_positional_count:
2751 if (instance.IsArray() && Array::Cast(instance).IsImmutable()) {
2752 ArgumentsDescriptor desc(Array::Cast(instance));
2753 *result = Smi::New(desc.PositionalCount());
2754 return true;
2755 }
2756 return false;
2757
2758 case Slot::Kind::kArgumentsDescriptor_size:
2759 // If a constant arguments descriptor appears, then either it is from
2760 // a invocation dispatcher (which always has tagged arguments and so
2761 // [host]Size() == [target]Size() == Count()) or the constant should
2762 // have the correct Size() in terms of the target architecture if any
2763 // spill slots are involved.
2764 if (instance.IsArray() && Array::Cast(instance).IsImmutable()) {
2765 ArgumentsDescriptor desc(Array::Cast(instance));
2766 *result = Smi::New(desc.Size());
2767 return true;
2768 }
2769 return false;
2770
2771 case Slot::Kind::kTypeArguments_length:
2772 if (instance.IsTypeArguments()) {
2773 *result = Smi::New(TypeArguments::Cast(instance).Length());
2774 return true;
2775 }
2776 return false;
2777
2778 case Slot::Kind::kRecord_shape:
2779 if (instance.IsRecord()) {
2780 *result = Record::Cast(instance).shape().AsSmi();
2781 return true;
2782 }
2783 return false;
2784
2786 if (instance.IsRecord()) {
2787 const intptr_t index = compiler::target::Record::field_index_at_offset(
2788 field.offset_in_bytes());
2789 const Record& record = Record::Cast(instance);
2790 if (index < record.num_fields()) {
2791 *result = record.FieldAt(index);
2792 }
2793 return true;
2794 }
2795 return false;
2796
2797 default:
2798 break;
2799 }
2800 return false;
2801}
2802
2804 const Field& field,
2805 Object* result) {
2806 if (!field.is_final() || !instance.IsInstance()) {
2807 return false;
2808 }
2809
2810 // Check that instance really has the field which we
2811 // are trying to load from.
2812 Class& cls = Class::Handle(instance.clazz());
2813 while (cls.ptr() != Class::null() && cls.ptr() != field.Owner()) {
2814 cls = cls.SuperClass();
2815 }
2816 if (cls.ptr() != field.Owner()) {
2817 // Failed to find the field in class or its superclasses.
2818 return false;
2819 }
2820
2821 // Object has the field: execute the load.
2822 *result = Instance::Cast(instance).GetField(field);
2823 return true;
2824}
2825
2828 // If the load is guaranteed to never retrieve a GC-moveable address,
2829 // then the returned address can't alias the (GC-moveable) instance.
2830 return false;
2831 }
2832 if (slot().IsIdentical(Slot::PointerBase_data())) {
2833 // If we know statically that the instance is a typed data view, then the
2834 // data field doesn't alias the instance (but some other typed data object).
2835 const intptr_t cid = instance()->Type()->ToNullableCid();
2836 if (IsUnmodifiableTypedDataViewClassId(cid)) return false;
2837 if (IsTypedDataViewClassId(cid)) return false;
2838 }
2839 return true;
2840}
2841
2844 // The load is guaranteed to never retrieve a GC-moveable address.
2845 return false;
2846 }
2847 if (slot().IsIdentical(Slot::PointerBase_data())) {
2848 // If we know statically that the instance is an external array, then
2849 // the load retrieves a pointer to external memory.
2850 return !IsExternalPayloadClassId(instance()->Type()->ToNullableCid());
2851 }
2852 return true;
2853}
2854
2858
2860 if (!HasUses() && !calls_initializer()) return nullptr;
2861
2862 Definition* orig_instance = instance()->definition()->OriginalDefinition();
2863 if (IsImmutableLengthLoad()) {
2865 if (StaticCallInstr* call = orig_instance->AsStaticCall()) {
2866 // For fixed length arrays if the array is the result of a known
2867 // constructor call we can replace the length load with the length
2868 // argument passed to the constructor.
2869 if (call->is_known_list_constructor() &&
2870 IsFixedLengthArrayCid(call->Type()->ToCid())) {
2871 return call->ArgumentAt(1);
2872 } else if (call->function().recognized_kind() ==
2873 MethodRecognizer::kByteDataFactory) {
2874 // Similarly, we check for the ByteData constructor and forward its
2875 // explicit length argument appropriately.
2876 return call->ArgumentAt(1);
2877 } else if (IsTypedDataViewFactory(call->function())) {
2878 // Typed data view factories all take three arguments (after
2879 // the implicit type arguments parameter):
2880 //
2881 // 1) _TypedList buffer -- the underlying data for the view
2882 // 2) int offsetInBytes -- the offset into the buffer to start viewing
2883 // 3) int length -- the number of elements in the view
2884 //
2885 // Here, we forward the third.
2886 return call->ArgumentAt(3);
2887 }
2888 } else if (LoadFieldInstr* load_array = orig_instance->AsLoadField()) {
2889 // For arrays with guarded lengths, replace the length load
2890 // with a constant.
2891 const Slot& slot = load_array->slot();
2892 if (slot.IsDartField()) {
2893 if (slot.field().guarded_list_length() >= 0) {
2894 return flow_graph->GetConstant(
2896 }
2897 }
2898 }
2899 }
2900
2901 switch (slot().kind()) {
2902 case Slot::Kind::kArray_length:
2903 if (CreateArrayInstr* create_array = orig_instance->AsCreateArray()) {
2904 return create_array->num_elements()->definition();
2905 }
2906 break;
2907 case Slot::Kind::kTypedDataBase_length:
2908 if (AllocateTypedDataInstr* alloc_typed_data =
2909 orig_instance->AsAllocateTypedData()) {
2910 return alloc_typed_data->num_elements()->definition();
2911 }
2912 break;
2913 case Slot::Kind::kTypedDataView_typed_data:
2914 // This case cover the first explicit argument to typed data view
2915 // factories, the data (buffer).
2917 if (StaticCallInstr* call = orig_instance->AsStaticCall()) {
2918 if (IsTypedDataViewFactory(call->function()) ||
2919 IsUnmodifiableTypedDataViewFactory(call->function())) {
2920 return call->ArgumentAt(1);
2921 }
2922 }
2923 break;
2924 case Slot::Kind::kTypedDataView_offset_in_bytes:
2925 // This case cover the second explicit argument to typed data view
2926 // factories, the offset into the buffer.
2928 if (StaticCallInstr* call = orig_instance->AsStaticCall()) {
2929 if (IsTypedDataViewFactory(call->function())) {
2930 return call->ArgumentAt(2);
2931 } else if (call->function().recognized_kind() ==
2932 MethodRecognizer::kByteDataFactory) {
2933 // A _ByteDataView returned from the ByteData constructor always
2934 // has an offset of 0.
2935 return flow_graph->GetConstant(Object::smi_zero());
2936 }
2937 }
2938 break;
2939 case Slot::Kind::kRecord_shape:
2941 if (auto* alloc_rec = orig_instance->AsAllocateRecord()) {
2942 return flow_graph->GetConstant(Smi::Handle(alloc_rec->shape().AsSmi()));
2943 } else if (auto* alloc_rec = orig_instance->AsAllocateSmallRecord()) {
2944 return flow_graph->GetConstant(Smi::Handle(alloc_rec->shape().AsSmi()));
2945 } else {
2946 const AbstractType* type = instance()->Type()->ToAbstractType();
2947 if (type->IsRecordType()) {
2948 return flow_graph->GetConstant(
2949 Smi::Handle(RecordType::Cast(*type).shape().AsSmi()));
2950 }
2951 }
2952 break;
2955 if (StaticCallInstr* call = orig_instance->AsStaticCall()) {
2956 if (call->is_known_list_constructor()) {
2957 return call->ArgumentAt(0);
2958 } else if (IsTypedDataViewFactory(call->function()) ||
2959 IsUnmodifiableTypedDataViewFactory(call->function())) {
2960 return flow_graph->constant_null();
2961 }
2962 switch (call->function().recognized_kind()) {
2963 case MethodRecognizer::kByteDataFactory:
2964 case MethodRecognizer::kLinkedHashBase_getData:
2965 case MethodRecognizer::kImmutableLinkedHashBase_getData:
2966 return flow_graph->constant_null();
2967 default:
2968 break;
2969 }
2970 } else if (CreateArrayInstr* create_array =
2971 orig_instance->AsCreateArray()) {
2972 return create_array->type_arguments()->definition();
2973 } else if (LoadFieldInstr* load_array = orig_instance->AsLoadField()) {
2974 const Slot& slot = load_array->slot();
2975 switch (slot.kind()) {
2977 // For trivially exact fields we know that type arguments match
2978 // static type arguments exactly.
2979 const Field& field = slot.field();
2981 return flow_graph->GetConstant(TypeArguments::Handle(
2982 Type::Cast(AbstractType::Handle(field.type()))
2983 .GetInstanceTypeArguments(flow_graph->thread())));
2984 }
2985 break;
2986 }
2987
2988 case Slot::Kind::kLinkedHashBase_data:
2989 return flow_graph->constant_null();
2990
2991 default:
2992 break;
2993 }
2994 }
2995 break;
2996 case Slot::Kind::kPointerBase_data:
2999 const intptr_t cid = instance()->Type()->ToNullableCid();
3000 // Pointers and ExternalTypedData objects never contain inner pointers.
3001 if (cid == kPointerCid || IsExternalTypedDataClassId(cid)) {
3003 }
3004 }
3005 break;
3006 default:
3007 break;
3008 }
3009
3010 // Try folding away loads from constant objects.
3011 if (instance()->BindsToConstant()) {
3013 if (Evaluate(instance()->BoundConstant(), &result)) {
3014 if (result.IsSmi() || result.IsOld()) {
3015 return flow_graph->GetConstant(result);
3016 }
3017 }
3018 }
3019
3020 if (instance()->definition()->IsAllocateObject() && IsImmutableLoad()) {
3021 StoreFieldInstr* initializing_store = nullptr;
3022 for (auto use : instance()->definition()->input_uses()) {
3023 if (auto store = use->instruction()->AsStoreField()) {
3024 if ((use->use_index() == StoreFieldInstr::kInstancePos) &&
3025 store->slot().IsIdentical(slot())) {
3026 if (initializing_store == nullptr) {
3027 initializing_store = store;
3028 } else {
3029 initializing_store = nullptr;
3030 break;
3031 }
3032 }
3033 }
3034 }
3035
3036 // If we find an initializing store then it *must* by construction
3037 // dominate the load.
3038 if (initializing_store != nullptr &&
3039 initializing_store->is_initialization()) {
3040 ASSERT(IsDominatedBy(initializing_store));
3041 return initializing_store->value()->definition();
3042 }
3043 }
3044
3045 return this;
3046}
3047
3049 if (FLAG_eliminate_type_checks) {
3050 if (value()->Type()->ToCid() == kBoolCid) {
3051 return value()->definition();
3052 }
3053
3054 // In strong mode type is already verified either by static analysis
3055 // or runtime checks, so AssertBoolean just ensures that value is not null.
3056 if (!value()->Type()->is_nullable()) {
3057 return value()->definition();
3058 }
3059 }
3060
3061 return this;
3062}
3063
3065 // We need dst_type() to be a constant AbstractType to perform any
3066 // canonicalization.
3067 if (!dst_type()->BindsToConstant()) return this;
3068 const auto& abs_type = AbstractType::Cast(dst_type()->BoundConstant());
3069
3070 if (abs_type.IsTopTypeForSubtyping() ||
3071 (FLAG_eliminate_type_checks &&
3072 value()->Type()->IsAssignableTo(abs_type))) {
3073 return value()->definition();
3074 }
3075 if (abs_type.IsInstantiated()) {
3076 return this;
3077 }
3078
3079 // For uninstantiated target types: If the instantiator and function
3080 // type arguments are constant, instantiate the target type here.
3081 // Note: these constant type arguments might not necessarily correspond
3082 // to the correct instantiator because AssertAssignable might
3083 // be located in the unreachable part of the graph (e.g.
3084 // it might be dominated by CheckClass that always fails).
3085 // This means that the code below must guard against such possibility.
3086 Thread* thread = Thread::Current();
3087 Zone* Z = thread->zone();
3088
3089 const TypeArguments* instantiator_type_args = nullptr;
3090 const TypeArguments* function_type_args = nullptr;
3091
3092 if (instantiator_type_arguments()->BindsToConstant()) {
3094 instantiator_type_args = (val.ptr() == TypeArguments::null())
3095 ? &TypeArguments::null_type_arguments()
3096 : &TypeArguments::Cast(val);
3097 }
3098
3099 if (function_type_arguments()->BindsToConstant()) {
3101 function_type_args =
3102 (val.ptr() == TypeArguments::null())
3103 ? &TypeArguments::null_type_arguments()
3104 : &TypeArguments::Cast(function_type_arguments()->BoundConstant());
3105 }
3106
3107 // If instantiator_type_args are not constant try to match the pattern
3108 // obj.field.:type_arguments where field's static type exactness state
3109 // tells us that all values stored in the field have exact superclass.
3110 // In this case we know the prefix of the actual type arguments vector
3111 // and can try to instantiate the type using just the prefix.
3112 //
3113 // Note: TypeParameter::InstantiateFrom returns an error if we try
3114 // to instantiate it from a vector that is too short.
3115 if (instantiator_type_args == nullptr) {
3116 if (LoadFieldInstr* load_type_args =
3117 instantiator_type_arguments()->definition()->AsLoadField()) {
3118 if (load_type_args->slot().IsTypeArguments()) {
3119 if (LoadFieldInstr* load_field = load_type_args->instance()
3120 ->definition()
3122 ->AsLoadField()) {
3123 if (load_field->slot().IsDartField() &&
3124 load_field->slot()
3125 .field()
3126 .static_type_exactness_state()
3127 .IsHasExactSuperClass()) {
3128 instantiator_type_args = &TypeArguments::Handle(
3129 Z, Type::Cast(AbstractType::Handle(
3130 Z, load_field->slot().field().type()))
3131 .GetInstanceTypeArguments(thread));
3132 }
3133 }
3134 }
3135 }
3136 }
3137
3138 if ((instantiator_type_args != nullptr) && (function_type_args != nullptr)) {
3139 AbstractType& new_dst_type = AbstractType::Handle(
3140 Z, abs_type.InstantiateFrom(*instantiator_type_args,
3141 *function_type_args, kAllFree, Heap::kOld));
3142 if (new_dst_type.IsNull()) {
3143 // Failed instantiation in dead code.
3144 return this;
3145 }
3146 new_dst_type = new_dst_type.Canonicalize(Thread::Current());
3147
3148 // Successfully instantiated destination type: update the type attached
3149 // to this instruction and set type arguments to null because we no
3150 // longer need them (the type was instantiated).
3151 dst_type()->BindTo(flow_graph->GetConstant(new_dst_type));
3154
3155 if (new_dst_type.IsTopTypeForSubtyping() ||
3156 (FLAG_eliminate_type_checks &&
3157 value()->Type()->IsAssignableTo(new_dst_type))) {
3158 return value()->definition();
3159 }
3160 }
3161 return this;
3162}
3163
3165 return HasUses() ? this : nullptr;
3166}
3167
3168LocationSummary* DebugStepCheckInstr::MakeLocationSummary(Zone* zone,
3169 bool opt) const {
3170 const intptr_t kNumInputs = 0;
3171 const intptr_t kNumTemps = 0;
3172 LocationSummary* locs = new (zone)
3173 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3174 return locs;
3175}
3176
3178 return nullptr;
3179}
3180
3182 ASSERT(!coverage_array_.IsNull());
3183 return coverage_array_.At(coverage_index_) != Smi::New(0) ? nullptr : this;
3184}
3185
3187 if (input_use_list() == nullptr) {
3188 // Environments can accommodate any representation. No need to box.
3189 return value()->definition();
3190 }
3191
3192 // Fold away Box<rep>(v) if v has a target representation already.
3193 Definition* value_defn = value()->definition();
3194 if (value_defn->representation() == representation()) {
3195 return value_defn;
3196 }
3197
3198 // Fold away Box<rep>(Unbox<rep>(v)) if value is known to be of the
3199 // right class.
3200 UnboxInstr* unbox_defn = value()->definition()->AsUnbox();
3201 if ((unbox_defn != nullptr) &&
3202 (unbox_defn->representation() == from_representation()) &&
3203 (unbox_defn->value()->Type()->ToCid() == Type()->ToCid())) {
3204 if (from_representation() == kUnboxedFloat) {
3205 // This is a narrowing conversion.
3206 return this;
3207 }
3208 return unbox_defn->value()->definition();
3209 }
3210
3211 if (value()->BindsToConstant()) {
3212 switch (representation()) {
3213 case kUnboxedFloat64x2:
3214 ASSERT(value()->BoundConstant().IsFloat64x2());
3215 return flow_graph->GetConstant(value()->BoundConstant(), kTagged);
3216 case kUnboxedFloat32x4:
3217 ASSERT(value()->BoundConstant().IsFloat32x4());
3218 return flow_graph->GetConstant(value()->BoundConstant(), kTagged);
3219 case kUnboxedInt32x4:
3220 ASSERT(value()->BoundConstant().IsInt32x4());
3221 return flow_graph->GetConstant(value()->BoundConstant(), kTagged);
3222 default:
3223 return this;
3224 }
3225 }
3226
3227 return this;
3228}
3229
3231 return HasUses() ? this : NULL;
3232}
3233
3235 if (!HasUses()) return NULL;
3236
3237 if (BoxLanesInstr* box = value()->definition()->AsBoxLanes()) {
3238 return box->InputAt(lane())->definition();
3239 }
3240
3241 return this;
3242}
3243
3245 Range* range = value()->definition()->range();
3247}
3248
3250 if (input_use_list() == nullptr) {
3251 // Environments can accommodate any representation. No need to box.
3252 return value()->definition();
3253 }
3254
3255 // Fold away Box<rep>(v) if v has a target representation already.
3256 Definition* value_defn = value()->definition();
3257 if (value_defn->representation() == representation()) {
3258 return value_defn;
3259 }
3260
3261 return this;
3262}
3263
3265 Definition* replacement = BoxIntegerInstr::Canonicalize(flow_graph);
3266 if (replacement != this) {
3267 return replacement;
3268 }
3269
3270 // For all x, box(unbox(x)) = x.
3271 if (auto unbox = value()->definition()->AsUnboxInt64()) {
3272 if (unbox->SpeculativeModeOfInputs() == kNotSpeculative) {
3273 return unbox->value()->definition();
3274 }
3275 } else if (auto unbox = value()->definition()->AsUnboxedConstant()) {
3276 return flow_graph->GetConstant(unbox->value());
3277 }
3278
3279 // Find a more precise box instruction.
3280 if (auto conv = value()->definition()->AsIntConverter()) {
3281 Definition* replacement;
3282 if (conv->from() == kUntagged) {
3283 return this;
3284 }
3285 switch (conv->from()) {
3286 case kUnboxedInt32:
3287 replacement = new BoxInt32Instr(conv->value()->CopyWithType());
3288 break;
3289 case kUnboxedUint32:
3290 replacement = new BoxUint32Instr(conv->value()->CopyWithType());
3291 break;
3292 default:
3293 UNREACHABLE();
3294 break;
3295 }
3296 flow_graph->InsertBefore(this, replacement, nullptr, FlowGraph::kValue);
3297 return replacement;
3298 }
3299
3300 return this;
3301}
3302
3304 if (!HasUses() && !CanDeoptimize()) return nullptr;
3305
3306 // Fold away Unbox<rep>(v) if v has a target representation already.
3307 Definition* value_defn = value()->definition();
3308 if (value_defn->representation() == representation()) {
3309 return value_defn;
3310 }
3311
3312 BoxInstr* box_defn = value()->definition()->AsBox();
3313 if (box_defn != nullptr) {
3314 // Fold away Unbox<rep>(Box<rep>(v)).
3315 if (box_defn->from_representation() == representation()) {
3316 return box_defn->value()->definition();
3317 }
3318
3319 if ((box_defn->from_representation() == kUnboxedDouble) &&
3320 (representation() == kUnboxedFloat)) {
3321 Definition* replacement = new DoubleToFloatInstr(
3322 box_defn->value()->CopyWithType(), DeoptId::kNone);
3323 flow_graph->InsertBefore(this, replacement, NULL, FlowGraph::kValue);
3324 return replacement;
3325 }
3326
3327 if ((box_defn->from_representation() == kUnboxedFloat) &&
3328 (representation() == kUnboxedDouble)) {
3329 Definition* replacement = new FloatToDoubleInstr(
3330 box_defn->value()->CopyWithType(), DeoptId::kNone);
3331 flow_graph->InsertBefore(this, replacement, NULL, FlowGraph::kValue);
3332 return replacement;
3333 }
3334 }
3335
3336 if (representation() == kUnboxedDouble && value()->BindsToConstant()) {
3337 const Object& val = value()->BoundConstant();
3338 if (val.IsInteger()) {
3339 const Double& double_val = Double::ZoneHandle(
3340 flow_graph->zone(),
3341 Double::NewCanonical(Integer::Cast(val).AsDoubleValue()));
3342 return flow_graph->GetConstant(double_val, kUnboxedDouble);
3343 } else if (val.IsDouble()) {
3344 return flow_graph->GetConstant(val, kUnboxedDouble);
3345 }
3346 }
3347
3348 if (representation() == kUnboxedFloat && value()->BindsToConstant()) {
3349 const Object& val = value()->BoundConstant();
3350 if (val.IsInteger()) {
3351 double narrowed_val =
3352 static_cast<float>(Integer::Cast(val).AsDoubleValue());
3353 return flow_graph->GetConstant(
3355 kUnboxedFloat);
3356 } else if (val.IsDouble()) {
3357 double narrowed_val = static_cast<float>(Double::Cast(val).value());
3358 return flow_graph->GetConstant(
3360 kUnboxedFloat);
3361 }
3362 }
3363
3364 return this;
3365}
3366
3368 if (!HasUses() && !CanDeoptimize()) return nullptr;
3369
3370 // Fold away Unbox<rep>(v) if v has a target representation already.
3371 Definition* value_defn = value()->definition();
3372 if (value_defn->representation() == representation()) {
3373 return value_defn;
3374 }
3375
3376 // Do not attempt to fold this instruction if we have not matched
3377 // input/output representations yet.
3378 if (HasUnmatchedInputRepresentations()) {
3379 return this;
3380 }
3381
3382 // Fold away UnboxInteger<rep_to>(BoxInteger<rep_from>(v)).
3383 BoxIntegerInstr* box_defn = value()->definition()->AsBoxInteger();
3384 if (box_defn != nullptr && !box_defn->HasUnmatchedInputRepresentations()) {
3385 Representation from_representation =
3386 box_defn->value()->definition()->representation();
3387 if (from_representation == representation()) {
3388 return box_defn->value()->definition();
3389 } else {
3390 // Only operate on explicit unboxed operands.
3391 IntConverterInstr* converter = new IntConverterInstr(
3392 from_representation, representation(),
3393 box_defn->value()->CopyWithType(),
3394 (representation() == kUnboxedInt32) ? GetDeoptId() : DeoptId::kNone);
3395 // TODO(vegorov): marking resulting converter as truncating when
3396 // unboxing can't deoptimize is a workaround for the missing
3397 // deoptimization environment when we insert converter after
3398 // EliminateEnvironments and there is a mismatch between predicates
3399 // UnboxIntConverterInstr::CanDeoptimize and UnboxInt32::CanDeoptimize.
3400 if ((representation() == kUnboxedInt32) &&
3401 (is_truncating() || !CanDeoptimize())) {
3402 converter->mark_truncating();
3403 }
3404 flow_graph->InsertBefore(this, converter, env(), FlowGraph::kValue);
3405 return converter;
3406 }
3407 }
3408
3409 if ((SpeculativeModeOfInput(0) == kGuardInputs) && !ComputeCanDeoptimize()) {
3410 // Remember if we ever learn out input doesn't require checking, as
3411 // the input Value might be later changed that would make us forget.
3412 set_speculative_mode(kNotSpeculative);
3413 }
3414
3415 if (value()->BindsToConstant()) {
3416 const auto& obj = value()->BoundConstant();
3417 if (obj.IsInteger()) {
3418 if (representation() == kUnboxedInt64) {
3419 return flow_graph->GetConstant(obj, representation());
3420 }
3421 const int64_t intval = Integer::Cast(obj).AsInt64Value();
3423 return flow_graph->GetConstant(obj, representation());
3424 }
3425 if (is_truncating()) {
3426 const int64_t result = Evaluator::TruncateTo(intval, representation());
3427 return flow_graph->GetConstant(
3428 Integer::ZoneHandle(flow_graph->zone(),
3430 representation());
3431 }
3432 }
3433 }
3434
3435 return this;
3436}
3437
3439 if (!HasUses()) return nullptr;
3440
3441 // Fold IntConverter({Unboxed}Constant(...)) to UnboxedConstant.
3442 if (auto constant = value()->definition()->AsConstant()) {
3443 if (from() != kUntagged && to() != kUntagged &&
3444 constant->representation() == from() && constant->value().IsInteger()) {
3445 const int64_t value = Integer::Cast(constant->value()).AsInt64Value();
3446 const int64_t result =
3448 if (is_truncating() || (value == result)) {
3450 box ^= box.Canonicalize(flow_graph->thread());
3451 return flow_graph->GetConstant(box, to());
3452 }
3453 }
3454 }
3455
3456 // Fold IntCoverter(b->c, IntConverter(a->b, v)) into IntConverter(a->c, v).
3457 IntConverterInstr* first_converter = value()->definition()->AsIntConverter();
3458 if ((first_converter != nullptr) &&
3459 (first_converter->representation() == from())) {
3460 const auto intermediate_rep = first_converter->representation();
3461 // Only eliminate intermediate conversion if it does not change the value.
3462 auto src_defn = first_converter->value()->definition();
3463 if (intermediate_rep == kUntagged) {
3464 // Both conversions are no-ops, as the other representations must be
3465 // kUnboxedIntPtr.
3466 } else if (!Range::Fits(src_defn->range(), intermediate_rep)) {
3467 return this;
3468 }
3469
3470 // Otherwise it is safe to discard any other conversions from and then back
3471 // to the same integer type.
3472 if (first_converter->from() == to()) {
3473 return src_defn;
3474 }
3475
3476 // Do not merge conversions where the first starts from Untagged or the
3477 // second ends at Untagged, since we expect to see either UnboxedIntPtr
3478 // or UnboxedFfiIntPtr as the other type in an Untagged conversion.
3479 if ((first_converter->from() == kUntagged) || (to() == kUntagged)) {
3480 return this;
3481 }
3482
3483 IntConverterInstr* converter = new IntConverterInstr(
3484 first_converter->from(), representation(),
3485 first_converter->value()->CopyWithType(),
3486 (to() == kUnboxedInt32) ? GetDeoptId() : DeoptId::kNone);
3487 if ((representation() == kUnboxedInt32) && is_truncating()) {
3488 converter->mark_truncating();
3489 }
3490 flow_graph->InsertBefore(this, converter, env(), FlowGraph::kValue);
3491 return converter;
3492 }
3493
3494 UnboxInt64Instr* unbox_defn = value()->definition()->AsUnboxInt64();
3495 if (unbox_defn != nullptr && (from() == kUnboxedInt64) &&
3496 (to() == kUnboxedInt32) && unbox_defn->HasOnlyInputUse(value())) {
3497 // TODO(vegorov): there is a duplication of code between UnboxedIntConverter
3498 // and code path that unboxes Mint into Int32. We should just schedule
3499 // these instructions close to each other instead of fusing them.
3500 Definition* replacement =
3503 unbox_defn->value()->CopyWithType(), GetDeoptId());
3504 flow_graph->InsertBefore(this, replacement, env(), FlowGraph::kValue);
3505 return replacement;
3506 }
3507
3508 return this;
3509}
3510
3511// Tests for a FP comparison that cannot be negated
3512// (to preserve NaN semantics).
3513static bool IsFpCompare(ComparisonInstr* comp) {
3514 if (comp->IsRelationalOp()) {
3515 return comp->operation_cid() == kDoubleCid;
3516 }
3517 return false;
3518}
3519
3521 Definition* defn = value()->definition();
3522 // Convert e.g. !(x > y) into (x <= y) for non-FP x, y.
3523 if (defn->IsComparison() && defn->HasOnlyUse(value()) &&
3524 defn->Type()->ToCid() == kBoolCid) {
3525 ComparisonInstr* comp = defn->AsComparison();
3526 if (!IsFpCompare(comp)) {
3527 comp->NegateComparison();
3528 return defn;
3529 }
3530 }
3531 return this;
3532}
3533
3534static bool MayBeBoxableNumber(intptr_t cid) {
3535 return (cid == kDynamicCid) || (cid == kMintCid) || (cid == kDoubleCid);
3536}
3537
3539 if (type->IsNone()) {
3540 return false;
3541 }
3542 const AbstractType& unwrapped_type =
3543 AbstractType::Handle(type->ToAbstractType()->UnwrapFutureOr());
3544 // Note that type 'Number' is a subtype of itself.
3545 return unwrapped_type.IsTopTypeForSubtyping() ||
3546 unwrapped_type.IsObjectType() || unwrapped_type.IsTypeParameter() ||
3548 Heap::kOld);
3549}
3550
3551// Returns a replacement for a strict comparison and signals if the result has
3552// to be negated.
3554 bool* negated,
3555 bool is_branch) {
3556 // Use propagated cid and type information to eliminate number checks.
3557 // If one of the inputs is not a boxable number (Mint, Double), or
3558 // is not a subtype of num, no need for number checks.
3559 if (compare->needs_number_check()) {
3560 if (!MayBeBoxableNumber(compare->left()->Type()->ToCid()) ||
3561 !MayBeBoxableNumber(compare->right()->Type()->ToCid())) {
3562 compare->set_needs_number_check(false);
3563 } else if (!MayBeNumber(compare->left()->Type()) ||
3564 !MayBeNumber(compare->right()->Type())) {
3565 compare->set_needs_number_check(false);
3566 }
3567 }
3568 *negated = false;
3569 ConstantInstr* constant_defn = nullptr;
3570 Value* other = nullptr;
3571
3572 if (!compare->IsComparisonWithConstant(&other, &constant_defn)) {
3573 return compare;
3574 }
3575
3576 const Object& constant = constant_defn->value();
3577 const bool can_merge = is_branch || (other->Type()->ToCid() == kBoolCid);
3578 Definition* other_defn = other->definition();
3579 Token::Kind kind = compare->kind();
3580
3581 if (!constant.IsBool() || !can_merge) {
3582 return compare;
3583 }
3584
3585 const bool constant_value = Bool::Cast(constant).value();
3586
3587 // Handle `e === true` and `e !== false`: these cases don't require
3588 // negation and allow direct merge.
3589 if ((kind == Token::kEQ_STRICT) == constant_value) {
3590 return other_defn;
3591 }
3592
3593 // We now have `e !== true` or `e === false`: these cases require
3594 // negation.
3595 if (auto comp = other_defn->AsComparison()) {
3596 if (other_defn->HasOnlyUse(other) && !IsFpCompare(comp)) {
3597 *negated = true;
3598 return other_defn;
3599 }
3600 }
3601
3602 return compare;
3603}
3604
3605static bool BindsToGivenConstant(Value* v, intptr_t expected) {
3606 return v->BindsToConstant() && v->BoundConstant().IsSmi() &&
3607 (Smi::Cast(v->BoundConstant()).Value() == expected);
3608}
3609
3610// Recognize patterns (a & b) == 0 and (a & 2^n) != 2^n.
3611static bool RecognizeTestPattern(Value* left, Value* right, bool* negate) {
3612 if (!right->BindsToConstant() || !right->BoundConstant().IsSmi()) {
3613 return false;
3614 }
3615
3616 const intptr_t value = Smi::Cast(right->BoundConstant()).Value();
3617 if ((value != 0) && !Utils::IsPowerOfTwo(value)) {
3618 return false;
3619 }
3620
3621 BinarySmiOpInstr* mask_op = left->definition()->AsBinarySmiOp();
3622 if ((mask_op == nullptr) || (mask_op->op_kind() != Token::kBIT_AND) ||
3623 !mask_op->HasOnlyUse(left)) {
3624 return false;
3625 }
3626
3627 if (value == 0) {
3628 // Recognized (a & b) == 0 pattern.
3629 *negate = false;
3630 return true;
3631 }
3632
3633 // Recognize
3634 if (BindsToGivenConstant(mask_op->left(), value) ||
3635 BindsToGivenConstant(mask_op->right(), value)) {
3636 // Recognized (a & 2^n) == 2^n pattern. It's equivalent to (a & 2^n) != 0
3637 // so we need to negate original comparison.
3638 *negate = true;
3639 return true;
3640 }
3641
3642 return false;
3643}
3644
3646 Zone* zone = flow_graph->zone();
3647 if (comparison()->IsStrictCompare()) {
3648 bool negated = false;
3650 comparison()->AsStrictCompare(), &negated, /*is_branch=*/true);
3651 if (replacement == comparison()) {
3652 return this;
3653 }
3654 ComparisonInstr* comp = replacement->AsComparison();
3655 if ((comp == nullptr) || comp->CanDeoptimize() ||
3657 return this;
3658 }
3659
3660 // Replace the comparison if the replacement is used at this branch,
3661 // and has exactly one use.
3662 Value* use = comp->input_use_list();
3663 if ((use->instruction() == this) && comp->HasOnlyUse(use)) {
3664 if (negated) {
3665 comp->NegateComparison();
3666 }
3668 flow_graph->CopyDeoptTarget(this, comp);
3669 // Unlink environment from the comparison since it is copied to the
3670 // branch instruction.
3671 comp->RemoveEnvironment();
3672
3673 comp->RemoveFromGraph();
3674 SetComparison(comp);
3675 if (FLAG_trace_optimization && flow_graph->should_print()) {
3676 THR_Print("Merging comparison v%" Pd "\n", comp->ssa_temp_index());
3677 }
3678 // Clear the comparison's temp index and ssa temp index since the
3679 // value of the comparison is not used outside the branch anymore.
3680 ASSERT(comp->input_use_list() == nullptr);
3681 comp->ClearSSATempIndex();
3682 comp->ClearTempIndex();
3683 }
3684
3685 return this;
3686 }
3687
3688 if (comparison()->IsEqualityCompare() &&
3689 comparison()->operation_cid() == kSmiCid) {
3690 BinarySmiOpInstr* bit_and = nullptr;
3691 bool negate = false;
3693 &negate)) {
3694 bit_and = comparison()->left()->definition()->AsBinarySmiOp();
3695 } else if (RecognizeTestPattern(comparison()->right(), comparison()->left(),
3696 &negate)) {
3697 bit_and = comparison()->right()->definition()->AsBinarySmiOp();
3698 }
3699 if (bit_and != nullptr) {
3700 if (FLAG_trace_optimization && flow_graph->should_print()) {
3701 THR_Print("Merging test smi v%" Pd "\n", bit_and->ssa_temp_index());
3702 }
3704 comparison()->source(),
3705 negate ? Token::NegateComparison(comparison()->kind())
3706 : comparison()->kind(),
3707 bit_and->left()->Copy(zone), bit_and->right()->Copy(zone));
3710 flow_graph->CopyDeoptTarget(this, bit_and);
3712 bit_and->RemoveFromGraph();
3713 }
3714 }
3715 return this;
3716}
3717
3719 if (!HasUses()) return nullptr;
3720
3721 bool negated = false;
3722 Definition* replacement = CanonicalizeStrictCompare(this, &negated,
3723 /*is_branch=*/false);
3724 if (negated && replacement->IsComparison()) {
3725 ASSERT(replacement != this);
3726 replacement->AsComparison()->NegateComparison();
3727 }
3728 return replacement;
3729}
3730
3732 return (use->definition()->IsUnbox() && use->IsSingleUse()) ||
3733 use->definition()->IsConstant();
3734}
3735
3737 if (is_null_aware()) {
3738 ASSERT(operation_cid() == kMintCid);
3739 // Select more efficient instructions based on operand types.
3740 CompileType* left_type = left()->Type();
3741 CompileType* right_type = right()->Type();
3742 if (left_type->IsNull() || left_type->IsNullableSmi() ||
3743 right_type->IsNull() || right_type->IsNullableSmi()) {
3744 auto replacement = new StrictCompareInstr(
3745 source(),
3746 (kind() == Token::kEQ) ? Token::kEQ_STRICT : Token::kNE_STRICT,
3747 left()->CopyWithType(), right()->CopyWithType(),
3748 /*needs_number_check=*/false, DeoptId::kNone);
3749 flow_graph->InsertBefore(this, replacement, env(), FlowGraph::kValue);
3750 return replacement;
3751 } else {
3752 // Null-aware EqualityCompare takes boxed inputs, so make sure
3753 // unmatched representations are still allowed when converting
3754 // EqualityCompare to the unboxed instruction.
3755 if (!left_type->is_nullable() && !right_type->is_nullable() &&
3756 flow_graph->unmatched_representations_allowed()) {
3757 set_null_aware(false);
3758 }
3759 }
3760 } else {
3761 if ((operation_cid() == kMintCid) && IsSingleUseUnboxOrConstant(left()) &&
3763 (left()->Type()->IsNullableSmi() || right()->Type()->IsNullableSmi()) &&
3764 flow_graph->unmatched_representations_allowed()) {
3765 auto replacement = new StrictCompareInstr(
3766 source(),
3767 (kind() == Token::kEQ) ? Token::kEQ_STRICT : Token::kNE_STRICT,
3768 left()->CopyWithType(), right()->CopyWithType(),
3769 /*needs_number_check=*/false, DeoptId::kNone);
3770 flow_graph->InsertBefore(this, replacement, env(), FlowGraph::kValue);
3771 return replacement;
3772 }
3773 }
3774 return this;
3775}
3776
3778 if (index()->BindsToSmiConstant() && offset()->BindsToSmiConstant()) {
3779 const intptr_t offset_in_bytes = Utils::AddWithWrapAround(
3780 Utils::MulWithWrapAround<intptr_t>(index()->BoundSmiConstant(),
3781 index_scale()),
3782 offset()->BoundSmiConstant());
3783
3784 if (offset_in_bytes == 0) return base()->definition();
3785
3786 if (compiler::target::IsSmi(offset_in_bytes)) {
3787 auto* const Z = flow_graph->zone();
3788 auto* const new_adjust = new (Z) CalculateElementAddressInstr(
3789 base()->CopyWithType(Z),
3790 new (Z) Value(
3791 flow_graph->GetConstant(Object::smi_zero(), kUnboxedIntPtr)),
3792 /*index_scale=*/1,
3793 new (Z) Value(flow_graph->GetConstant(
3794 Smi::Handle(Smi::New(offset_in_bytes)), kUnboxedIntPtr)));
3795 flow_graph->InsertBefore(this, new_adjust, env(), FlowGraph::kValue);
3796 return new_adjust;
3797 }
3798 }
3799 return this;
3800}
3801
3803 const intptr_t value_cid = value()->Type()->ToCid();
3804 if (value_cid == kDynamicCid) {
3805 return this;
3806 }
3807
3808 return cids().HasClassId(value_cid) ? nullptr : this;
3809}
3810
3812 if (!HasUses()) return nullptr;
3813
3814 const intptr_t cid = object()->Type()->ToCid();
3815 if (cid != kDynamicCid) {
3816 const auto& smi = Smi::ZoneHandle(flow_graph->zone(), Smi::New(cid));
3817 return flow_graph->GetConstant(smi, representation());
3818 }
3819 return this;
3820}
3821
3823 if (value()->BindsToConstant()) {
3824 const Object& constant_value = value()->BoundConstant();
3825 if (constant_value.IsSmi() &&
3826 cids_.Contains(Smi::Cast(constant_value).Value())) {
3827 return nullptr;
3828 }
3829 }
3830 return this;
3831}
3832
3834 Token::Kind kind,
3835 Value* value,
3836 const ZoneGrowableArray<intptr_t>& cid_results,
3837 intptr_t deopt_id)
3838 : TemplateComparison(source, kind, deopt_id), cid_results_(cid_results) {
3839 ASSERT((kind == Token::kIS) || (kind == Token::kISNOT));
3840 SetInputAt(0, value);
3841 set_operation_cid(kObjectCid);
3842#ifdef DEBUG
3843 ASSERT(cid_results[0] == kSmiCid);
3844 if (deopt_id == DeoptId::kNone) {
3845 // The entry for Smi can be special, but all other entries have
3846 // to match in the no-deopt case.
3847 for (intptr_t i = 4; i < cid_results.length(); i += 2) {
3848 ASSERT(cid_results[i + 1] == cid_results[3]);
3849 }
3850 }
3851#endif
3852}
3853
3855 CompileType* in_type = value()->Type();
3856 intptr_t cid = in_type->ToCid();
3857 if (cid == kDynamicCid) return this;
3858
3860 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
3861 for (intptr_t i = 0; i < data.length(); i += 2) {
3862 if (data[i] == cid) {
3863 return (data[i + 1] == true_result)
3864 ? flow_graph->GetConstant(Bool::True())
3865 : flow_graph->GetConstant(Bool::False());
3866 }
3867 }
3868
3869 if (!CanDeoptimize()) {
3870 ASSERT(deopt_id() == DeoptId::kNone);
3871 return (data[data.length() - 1] == true_result)
3872 ? flow_graph->GetConstant(Bool::False())
3873 : flow_graph->GetConstant(Bool::True());
3874 }
3875
3876 // TODO(sra): Handle nullable input, possibly canonicalizing to a compare
3877 // against `null`.
3878 return this;
3879}
3880
3882 Value* value,
3883 uword lower,
3884 uword upper,
3885 Representation value_representation)
3887 lower_(lower),
3888 upper_(upper),
3889 value_representation_(value_representation) {
3890 ASSERT(lower < upper);
3891 ASSERT(value_representation == kTagged ||
3892 value_representation == kUnboxedUword);
3893 SetInputAt(0, value);
3894 set_operation_cid(kObjectCid);
3895}
3896
3898 if (value()->BindsToSmiConstant()) {
3899 uword val = Smi::Cast(value()->BoundConstant()).Value();
3900 bool in_range = lower_ <= val && val <= upper_;
3901 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
3902 return flow_graph->GetConstant(
3903 Bool::Get(in_range == (kind() == Token::kIS)));
3904 }
3905
3906 const Range* range = value()->definition()->range();
3907 if (range != nullptr) {
3908 if (range->IsWithin(lower_, upper_)) {
3909 return flow_graph->GetConstant(Bool::Get(kind() == Token::kIS));
3910 }
3911 if (!range->Overlaps(lower_, upper_)) {
3912 return flow_graph->GetConstant(Bool::Get(kind() != Token::kIS));
3913 }
3914 }
3915
3916 if (LoadClassIdInstr* load_cid = value()->definition()->AsLoadClassId()) {
3917 uword lower, upper;
3918 load_cid->InferRange(&lower, &upper);
3919 if (lower >= lower_ && upper <= upper_) {
3920 return flow_graph->GetConstant(Bool::Get(kind() == Token::kIS));
3921 } else if (lower > upper_ || upper < lower_) {
3922 return flow_graph->GetConstant(Bool::Get(kind() != Token::kIS));
3923 }
3924 }
3925
3926 return this;
3927}
3928
3930 if (field().guarded_cid() == kDynamicCid) {
3931 return nullptr; // Nothing to guard.
3932 }
3933
3934 if (field().is_nullable() && value()->Type()->IsNull()) {
3935 return nullptr;
3936 }
3937
3938 const intptr_t cid = field().is_nullable() ? value()->Type()->ToNullableCid()
3939 : value()->Type()->ToCid();
3940 if (field().guarded_cid() == cid) {
3941 return nullptr; // Value is guaranteed to have this cid.
3942 }
3943
3944 return this;
3945}
3946
3948 if (!field().needs_length_check()) {
3949 return nullptr; // Nothing to guard.
3950 }
3951
3952 const intptr_t expected_length = field().guarded_list_length();
3953 if (expected_length == Field::kUnknownFixedLength) {
3954 return this;
3955 }
3956
3957 // Check if length is statically known.
3958 StaticCallInstr* call = value()->definition()->AsStaticCall();
3959 if (call == nullptr) {
3960 return this;
3961 }
3962
3963 ConstantInstr* length = nullptr;
3964 if (call->is_known_list_constructor() &&
3965 LoadFieldInstr::IsFixedLengthArrayCid(call->Type()->ToCid())) {
3966 length = call->ArgumentAt(1)->AsConstant();
3967 } else if (call->function().recognized_kind() ==
3968 MethodRecognizer::kByteDataFactory) {
3969 length = call->ArgumentAt(1)->AsConstant();
3970 } else if (LoadFieldInstr::IsTypedDataViewFactory(call->function())) {
3971 length = call->ArgumentAt(3)->AsConstant();
3972 }
3973 if ((length != nullptr) && length->value().IsSmi() &&
3974 Smi::Cast(length->value()).Value() == expected_length) {
3975 return nullptr; // Expected length matched.
3976 }
3977
3978 return this;
3979}
3980
3985
3987 return (value()->Type()->ToCid() == kSmiCid) ? nullptr : this;
3988}
3989
3991 if ((left()->Type()->ToCid() == kDoubleCid) ||
3992 (right()->Type()->ToCid() == kDoubleCid)) {
3993 return nullptr; // Remove from the graph.
3994 }
3995 return this;
3996}
3997
3999 return (!value()->Type()->is_nullable()) ? value()->definition() : this;
4000}
4001
4003 auto const other_check = other.AsCheckNull();
4004 ASSERT(other_check != nullptr);
4005 return function_name().Equals(other_check->function_name()) &&
4006 exception_type() == other_check->exception_type();
4007}
4008
4010 switch (from) {
4011 case kUnboxedInt8:
4012 case kUnboxedUint8:
4013 case kUnboxedInt16:
4014 case kUnboxedUint16:
4015#if defined(HAS_SMI_63_BITS)
4016 case kUnboxedInt32:
4017 case kUnboxedUint32:
4018#endif
4019 return new BoxSmallIntInstr(from, value);
4020
4021#if !defined(HAS_SMI_63_BITS)
4022 case kUnboxedInt32:
4023 return new BoxInt32Instr(value);
4024
4025 case kUnboxedUint32:
4026 return new BoxUint32Instr(value);
4027#endif
4028
4029 case kUnboxedInt64:
4030 return new BoxInt64Instr(value);
4031
4032 case kUnboxedDouble:
4033 case kUnboxedFloat:
4034 case kUnboxedFloat32x4:
4035 case kUnboxedFloat64x2:
4036 case kUnboxedInt32x4:
4037 return new BoxInstr(from, value);
4038
4039 default:
4040 UNREACHABLE();
4041 return nullptr;
4042 }
4043}
4044
4046 Value* value,
4047 intptr_t deopt_id,
4048 SpeculativeMode speculative_mode) {
4049 switch (to) {
4050 case kUnboxedInt32:
4051 // We must truncate if we can't deoptimize.
4052 return new UnboxInt32Instr(
4053 speculative_mode == SpeculativeMode::kNotSpeculative
4056 value, deopt_id, speculative_mode);
4057
4058 case kUnboxedUint32:
4059 return new UnboxUint32Instr(value, deopt_id, speculative_mode);
4060
4061 case kUnboxedInt64:
4062 return new UnboxInt64Instr(value, deopt_id, speculative_mode);
4063
4064 case kUnboxedDouble:
4065 case kUnboxedFloat:
4066 case kUnboxedFloat32x4:
4067 case kUnboxedFloat64x2:
4068 case kUnboxedInt32x4:
4070 return new UnboxInstr(to, value, deopt_id, speculative_mode);
4071
4072 default:
4073 UNREACHABLE();
4074 return nullptr;
4075 }
4076}
4077
4078bool UnboxInstr::CanConvertSmi() const {
4079 switch (representation()) {
4080 case kUnboxedDouble:
4081 case kUnboxedFloat:
4082 case kUnboxedInt32:
4083 case kUnboxedInt64:
4084 return true;
4085
4086 case kUnboxedFloat32x4:
4087 case kUnboxedFloat64x2:
4088 case kUnboxedInt32x4:
4089 return false;
4090
4091 default:
4092 UNREACHABLE();
4093 return false;
4094 }
4095}
4096
4098 const ICData& ic_data) {
4099 BinaryFeedback* result = new (zone) BinaryFeedback(zone);
4100 if (ic_data.NumArgsTested() == 2) {
4101 for (intptr_t i = 0, n = ic_data.NumberOfChecks(); i < n; i++) {
4102 if (ic_data.GetCountAt(i) == 0) {
4103 continue;
4104 }
4106 ic_data.GetClassIdsAt(i, &arg_ids);
4107 result->feedback_.Add({arg_ids[0], arg_ids[1]});
4108 }
4109 }
4110 return result;
4111}
4112
4114 intptr_t receiver_cid,
4115 intptr_t argument_cid) {
4116 BinaryFeedback* result = new (zone) BinaryFeedback(zone);
4117 result->feedback_.Add({receiver_cid, argument_cid});
4118 return result;
4119}
4120
4122 intptr_t receiver_cid,
4123 const Function& target) {
4124 CallTargets* targets = new (zone) CallTargets(zone);
4125 const intptr_t count = 1;
4126 targets->cid_ranges_.Add(new (zone) TargetInfo(
4127 receiver_cid, receiver_cid, &Function::ZoneHandle(zone, target.ptr()),
4129 return targets;
4130}
4131
4132const CallTargets* CallTargets::Create(Zone* zone, const ICData& ic_data) {
4133 CallTargets* targets = new (zone) CallTargets(zone);
4134 targets->CreateHelper(zone, ic_data);
4135 targets->Sort(OrderById);
4136 targets->MergeIntoRanges();
4137 return targets;
4138}
4139
4141 const ICData& ic_data) {
4142 CallTargets& targets = *new (zone) CallTargets(zone);
4143 targets.CreateHelper(zone, ic_data);
4144
4145 if (targets.is_empty() || targets.IsMonomorphic()) {
4146 return &targets;
4147 }
4148
4149 targets.Sort(OrderById);
4150
4151 Array& args_desc_array = Array::Handle(zone, ic_data.arguments_descriptor());
4152 ArgumentsDescriptor args_desc(args_desc_array);
4153 String& name = String::Handle(zone, ic_data.target_name());
4154
4155 Function& fn = Function::Handle(zone);
4156
4157 intptr_t length = targets.length();
4158
4159 // Merging/extending cid ranges is also done in Cids::CreateAndExpand.
4160 // If changing this code, consider also adjusting Cids code.
4161
4162 // Spread class-ids to preceding classes where a lookup yields the same
4163 // method. A polymorphic target is not really the same method since its
4164 // behaviour depends on the receiver class-id, so we don't spread the
4165 // class-ids in that case.
4166 for (int idx = 0; idx < length; idx++) {
4167 int lower_limit_cid = (idx == 0) ? -1 : targets[idx - 1].cid_end;
4168 auto target_info = targets.TargetAt(idx);
4169 const Function& target = *target_info->target;
4170 if (target.is_polymorphic_target()) continue;
4171 for (int i = target_info->cid_start - 1; i > lower_limit_cid; i--) {
4172 bool class_is_abstract = false;
4173 if (FlowGraphCompiler::LookupMethodFor(i, name, args_desc, &fn,
4174 &class_is_abstract) &&
4175 fn.ptr() == target.ptr()) {
4176 if (!class_is_abstract) {
4177 target_info->cid_start = i;
4178 target_info->exactness = StaticTypeExactnessState::NotTracking();
4179 }
4180 } else {
4181 break;
4182 }
4183 }
4184 }
4185
4186 // Spread class-ids to following classes where a lookup yields the same
4187 // method.
4188 const intptr_t max_cid = IsolateGroup::Current()->class_table()->NumCids();
4189 for (int idx = 0; idx < length; idx++) {
4190 int upper_limit_cid =
4191 (idx == length - 1) ? max_cid : targets[idx + 1].cid_start;
4192 auto target_info = targets.TargetAt(idx);
4193 const Function& target = *target_info->target;
4194 if (target.is_polymorphic_target()) continue;
4195 // The code below makes attempt to avoid spreading class-id range
4196 // into a suffix that consists purely of abstract classes to
4197 // shorten the range.
4198 // However such spreading is beneficial when it allows to
4199 // merge to consecutive ranges.
4200 intptr_t cid_end_including_abstract = target_info->cid_end;
4201 for (int i = target_info->cid_end + 1; i < upper_limit_cid; i++) {
4202 bool class_is_abstract = false;
4203 if (FlowGraphCompiler::LookupMethodFor(i, name, args_desc, &fn,
4204 &class_is_abstract) &&
4205 fn.ptr() == target.ptr()) {
4206 cid_end_including_abstract = i;
4207 if (!class_is_abstract) {
4208 target_info->cid_end = i;
4209 target_info->exactness = StaticTypeExactnessState::NotTracking();
4210 }
4211 } else {
4212 break;
4213 }
4214 }
4215
4216 // Check if we have a suffix that consists of abstract classes
4217 // and expand into it if that would allow us to merge this
4218 // range with subsequent range.
4219 if ((cid_end_including_abstract > target_info->cid_end) &&
4220 (idx < length - 1) &&
4221 ((cid_end_including_abstract + 1) == targets[idx + 1].cid_start) &&
4222 (target.ptr() == targets.TargetAt(idx + 1)->target->ptr())) {
4223 target_info->cid_end = cid_end_including_abstract;
4224 target_info->exactness = StaticTypeExactnessState::NotTracking();
4225 }
4226 }
4227 targets.MergeIntoRanges();
4228 return &targets;
4229}
4230
4231void CallTargets::MergeIntoRanges() {
4232 if (length() == 0) {
4233 return; // For correctness not performance: must not update length to 1.
4234 }
4235
4236 // Merge adjacent class id ranges.
4237 int dest = 0;
4238 // We merge entries that dispatch to the same target, but polymorphic targets
4239 // are not really the same target since they depend on the class-id, so we
4240 // don't merge them.
4241 for (int src = 1; src < length(); src++) {
4242 const Function& target = *TargetAt(dest)->target;
4243 if (TargetAt(dest)->cid_end + 1 >= TargetAt(src)->cid_start &&
4244 target.ptr() == TargetAt(src)->target->ptr() &&
4245 !target.is_polymorphic_target()) {
4246 TargetAt(dest)->cid_end = TargetAt(src)->cid_end;
4247 TargetAt(dest)->count += TargetAt(src)->count;
4249 } else {
4250 dest++;
4251 if (src != dest) {
4252 // Use cid_ranges_ instead of TargetAt when updating the pointer.
4253 cid_ranges_[dest] = TargetAt(src);
4254 }
4255 }
4256 }
4257 SetLength(dest + 1);
4259}
4260
4262 for (intptr_t i = 0; i < length(); i++) {
4263 THR_Print("cid = [%" Pd ", %" Pd "], count = %" Pd ", target = %s\n",
4264 TargetAt(i)->cid_start, TargetAt(i)->cid_end, TargetAt(i)->count,
4265 TargetAt(i)->target->ToQualifiedCString());
4266 }
4267}
4268
4269// Shared code generation methods (EmitNativeCode and
4270// MakeLocationSummary). Only assembly code that can be shared across all
4271// architectures can be used. Machine specific register allocation and code
4272// generation is located in intermediate_language_<arch>.cc
4273
4274#define __ compiler->assembler()->
4275
4276LocationSummary* GraphEntryInstr::MakeLocationSummary(Zone* zone,
4277 bool optimizing) const {
4278 UNREACHABLE();
4279 return nullptr;
4280}
4281
4282LocationSummary* JoinEntryInstr::MakeLocationSummary(Zone* zone,
4283 bool optimizing) const {
4284 UNREACHABLE();
4285 return nullptr;
4286}
4287
4288void JoinEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4289 __ Bind(compiler->GetJumpLabel(this));
4290 if (!compiler->is_optimizing()) {
4291 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
4292 InstructionSource());
4293 }
4294 if (HasParallelMove()) {
4295 parallel_move()->EmitNativeCode(compiler);
4296 }
4297}
4298
4299LocationSummary* TargetEntryInstr::MakeLocationSummary(Zone* zone,
4300 bool optimizing) const {
4301 UNREACHABLE();
4302 return nullptr;
4303}
4304
4305void TargetEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4306 __ Bind(compiler->GetJumpLabel(this));
4307
4308 // TODO(kusterman): Remove duplicate between
4309 // {TargetEntryInstr,FunctionEntryInstr}::EmitNativeCode.
4310 if (!compiler->is_optimizing()) {
4311 if (compiler->NeedsEdgeCounter(this)) {
4312 compiler->EmitEdgeCounter(preorder_number());
4313 }
4314
4315 // The deoptimization descriptor points after the edge counter code for
4316 // uniformity with ARM, where we can reuse pattern matching code that
4317 // matches backwards from the end of the pattern.
4318 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
4319 InstructionSource());
4320 }
4321 if (HasParallelMove()) {
4323 compiler->EmitComment(parallel_move());
4324 }
4325 parallel_move()->EmitNativeCode(compiler);
4326 }
4327}
4328
4329LocationSummary* FunctionEntryInstr::MakeLocationSummary(
4330 Zone* zone,
4331 bool optimizing) const {
4332 UNREACHABLE();
4333 return nullptr;
4334}
4335
4336void FunctionEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4337#if defined(TARGET_ARCH_X64)
4338 // Ensure the start of the monomorphic checked entry is 2-byte aligned (see
4339 // also Assembler::MonomorphicCheckedEntry()).
4340 if (__ CodeSize() % 2 == 1) {
4341 __ nop();
4342 }
4343#endif
4344 if (tag() == Instruction::kFunctionEntry) {
4345 __ Bind(compiler->GetJumpLabel(this));
4346 }
4347
4348 if (this == compiler->flow_graph().graph_entry()->unchecked_entry()) {
4349 __ BindUncheckedEntryPoint();
4350 }
4351
4352 // In the AOT compiler we want to reduce code size, so generate no
4353 // fall-through code in [FlowGraphCompiler::CompileGraph()].
4354 // (As opposed to here where we don't check for the return value of
4355 // [Intrinsify]).
4356 const Function& function = compiler->parsed_function().function();
4357
4358 if (function.NeedsMonomorphicCheckedEntry(compiler->zone())) {
4360 if (!FLAG_precompiled_mode) {
4361 __ MonomorphicCheckedEntryJIT();
4362 } else {
4363 __ MonomorphicCheckedEntryAOT();
4364 }
4366 }
4367
4368 // NOTE: Because of the presence of multiple entry-points, we generate several
4369 // times the same intrinsification & frame setup. That's why we cannot rely on
4370 // the constant pool being `false` when we come in here.
4371#if defined(TARGET_USES_OBJECT_POOL)
4372 __ set_constant_pool_allowed(false);
4373#endif
4374
4375 if (compiler->TryIntrinsify() && compiler->skip_body_compilation()) {
4376 return;
4377 }
4378 compiler->EmitPrologue();
4379
4380#if defined(TARGET_USES_OBJECT_POOL)
4381 ASSERT(__ constant_pool_allowed());
4382#endif
4383
4384 if (!compiler->is_optimizing()) {
4385 if (compiler->NeedsEdgeCounter(this)) {
4386 compiler->EmitEdgeCounter(preorder_number());
4387 }
4388
4389 // The deoptimization descriptor points after the edge counter code for
4390 // uniformity with ARM, where we can reuse pattern matching code that
4391 // matches backwards from the end of the pattern.
4392 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
4393 InstructionSource());
4394 }
4395 if (HasParallelMove()) {
4397 compiler->EmitComment(parallel_move());
4398 }
4399 parallel_move()->EmitNativeCode(compiler);
4400 }
4401}
4402
4403LocationSummary* NativeEntryInstr::MakeLocationSummary(Zone* zone,
4404 bool optimizing) const {
4405 UNREACHABLE();
4406}
4407
4408void NativeEntryInstr::SaveArguments(FlowGraphCompiler* compiler) const {
4409 __ Comment("SaveArguments");
4410
4411 // Save the argument registers, in reverse order.
4412 const auto& return_loc = marshaller_.Location(compiler::ffi::kResultIndex);
4413 if (return_loc.IsPointerToMemory()) {
4414 SaveArgument(compiler, return_loc.AsPointerToMemory().pointer_location());
4415 }
4416 for (intptr_t i = marshaller_.num_args(); i-- > 0;) {
4417 SaveArgument(compiler, marshaller_.Location(i));
4418 }
4419
4420 __ Comment("SaveArgumentsEnd");
4421}
4422
4425 const compiler::ffi::NativeLocation& nloc) const {
4426 if (nloc.IsStack()) return;
4427
4428 if (nloc.IsRegisters()) {
4429 const auto& reg_loc = nloc.WidenTo4Bytes(compiler->zone()).AsRegisters();
4430 const intptr_t num_regs = reg_loc.num_regs();
4431 // Save higher-order component first, so bytes are in little-endian layout
4432 // overall.
4433 for (intptr_t i = num_regs - 1; i >= 0; i--) {
4434 __ PushRegister(reg_loc.reg_at(i));
4435 }
4436 } else if (nloc.IsFpuRegisters()) {
4437 // TODO(dartbug.com/40469): Reduce code size.
4438 __ AddImmediate(SPREG, -8);
4439 NoTemporaryAllocator temp_alloc;
4440 const auto& dst = compiler::ffi::NativeStackLocation(
4441 nloc.payload_type(), nloc.payload_type(), SPREG, 0);
4442 compiler->EmitNativeMove(dst, nloc, &temp_alloc);
4443 } else if (nloc.IsPointerToMemory()) {
4444 const auto& pointer_loc = nloc.AsPointerToMemory().pointer_location();
4445 if (pointer_loc.IsRegisters()) {
4446 const auto& regs_loc = pointer_loc.AsRegisters();
4447 ASSERT(regs_loc.num_regs() == 1);
4448 __ PushRegister(regs_loc.reg_at(0));
4449 } else {
4450 ASSERT(pointer_loc.IsStack());
4451 // It's already on the stack, so we don't have to save it.
4452 }
4453 } else if (nloc.IsMultiple()) {
4454 const auto& multiple = nloc.AsMultiple();
4455 const intptr_t num = multiple.locations().length();
4456 // Save the argument registers, in reverse order.
4457 for (intptr_t i = num; i-- > 0;) {
4458 SaveArgument(compiler, *multiple.locations().At(i));
4459 }
4460 } else {
4461 ASSERT(nloc.IsBoth());
4462 const auto& both = nloc.AsBoth();
4463 SaveArgument(compiler, both.location(0));
4464 }
4465}
4466
4467LocationSummary* OsrEntryInstr::MakeLocationSummary(Zone* zone,
4468 bool optimizing) const {
4469 UNREACHABLE();
4470 return nullptr;
4471}
4472
4473void OsrEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4474 ASSERT(!CompilerState::Current().is_aot());
4475 ASSERT(compiler->is_optimizing());
4476 __ Bind(compiler->GetJumpLabel(this));
4477
4478 // NOTE: Because the graph can have multiple entrypoints, we generate several
4479 // times the same intrinsification & frame setup. That's why we cannot rely on
4480 // the constant pool being `false` when we come in here.
4481#if defined(TARGET_USES_OBJECT_POOL)
4482 __ set_constant_pool_allowed(false);
4483#endif
4484
4485 compiler->EmitPrologue();
4486
4487#if defined(TARGET_USES_OBJECT_POOL)
4488 ASSERT(__ constant_pool_allowed());
4489#endif
4490
4491 if (HasParallelMove()) {
4493 compiler->EmitComment(parallel_move());
4494 }
4495 parallel_move()->EmitNativeCode(compiler);
4496 }
4497}
4498
4500 ASSERT(SuccessorCount() == offsets_.Length());
4501 intptr_t element_size = offsets_.ElementSizeInBytes();
4502 for (intptr_t i = 0; i < SuccessorCount(); i++) {
4504 auto* label = compiler->GetJumpLabel(target);
4505 RELEASE_ASSERT(label != nullptr);
4506 RELEASE_ASSERT(label->IsBound());
4507 intptr_t offset = label->Position();
4509 offsets_.SetInt32(i * element_size, offset);
4510 }
4511}
4512
4513LocationSummary* IndirectEntryInstr::MakeLocationSummary(
4514 Zone* zone,
4515 bool optimizing) const {
4516 return JoinEntryInstr::MakeLocationSummary(zone, optimizing);
4517}
4518
4519void IndirectEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4520 JoinEntryInstr::EmitNativeCode(compiler);
4521}
4522
4523LocationSummary* LoadStaticFieldInstr::MakeLocationSummary(Zone* zone,
4524 bool opt) const {
4525 const intptr_t kNumInputs = 0;
4526 const bool use_shared_stub = UseSharedSlowPathStub(opt);
4527 const intptr_t kNumTemps = calls_initializer() &&
4529 use_shared_stub
4530 ? 1
4531 : 0;
4532 LocationSummary* locs = new (zone) LocationSummary(
4533 zone, kNumInputs, kNumTemps,
4536 ? (use_shared_stub ? LocationSummary::kCallOnSharedSlowPath
4538 : LocationSummary::kCall)
4539 : LocationSummary::kNoCall);
4541 use_shared_stub) {
4542 locs->set_temp(
4544 }
4545 locs->set_out(0, calls_initializer() ? Location::RegisterLocation(
4547 : Location::RequiresRegister());
4548 return locs;
4549}
4550
4551void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4552 const Register result = locs()->out(0).reg();
4553
4554 compiler->used_static_fields().Add(&field());
4555
4556 // Note: static fields ids won't be changed by hot-reload.
4557 const intptr_t field_table_offset =
4558 compiler::target::Thread::field_table_values_offset();
4559 const intptr_t field_offset = compiler::target::FieldTable::OffsetOf(field());
4560
4561 __ LoadMemoryValue(result, THR, static_cast<int32_t>(field_table_offset));
4562 __ LoadMemoryValue(result, result, static_cast<int32_t>(field_offset));
4563
4564 if (calls_initializer()) {
4566 ThrowErrorSlowPathCode* slow_path =
4567 new LateInitializationErrorSlowPath(this);
4568 compiler->AddSlowPathCode(slow_path);
4569
4570 __ CompareObject(result, Object::sentinel());
4571 __ BranchIf(EQUAL, slow_path->entry_label());
4572 return;
4573 }
4574 ASSERT(field().has_initializer());
4575 auto object_store = compiler->isolate_group()->object_store();
4576 const Field& original_field = Field::ZoneHandle(field().Original());
4577
4578 compiler::Label no_call, call_initializer;
4579 __ CompareObject(result, Object::sentinel());
4580 if (!field().is_late()) {
4581 __ BranchIf(EQUAL, &call_initializer);
4582 __ CompareObject(result, Object::transition_sentinel());
4583 }
4584 __ BranchIf(NOT_EQUAL, &no_call);
4585
4586 auto& stub = Code::ZoneHandle(compiler->zone());
4587 __ Bind(&call_initializer);
4588 if (field().needs_load_guard()) {
4589 stub = object_store->init_static_field_stub();
4590 } else if (field().is_late()) {
4591 // The stubs below call the initializer function directly, so make sure
4592 // one is created.
4593 original_field.EnsureInitializerFunction();
4594 stub = field().is_final()
4595 ? object_store->init_late_final_static_field_stub()
4596 : object_store->init_late_static_field_stub();
4597 } else {
4598 // We call to runtime for non-late fields because the stub would need to
4599 // catch any exception generated by the initialization function to change
4600 // the value of the static field from the transition sentinel to null.
4601 stub = object_store->init_static_field_stub();
4602 }
4603
4604 __ LoadObject(InitStaticFieldABI::kFieldReg, original_field);
4605 compiler->GenerateStubCall(source(), stub,
4606 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
4607 deopt_id(), env());
4608
4609 __ Bind(&no_call);
4610 }
4611}
4612
4613LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
4614 bool opt) const {
4615 const intptr_t kNumInputs = 1;
4616 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
4618}
4619
4620void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4621 Register obj = locs()->in(0).reg();
4622 Register result = locs()->out(0).reg();
4623 ASSERT(object()->definition()->representation() == kUntagged);
4624 __ LoadFromOffset(result, obj, offset());
4625}
4626
4627LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
4628 bool opt) const {
4629 const intptr_t kNumInputs = 1;
4630 LocationSummary* locs = nullptr;
4631 auto const rep = slot().representation();
4632 if (calls_initializer()) {
4634 const bool using_shared_stub = UseSharedSlowPathStub(opt);
4635 const intptr_t kNumTemps = using_shared_stub ? 1 : 0;
4636 locs = new (zone) LocationSummary(
4637 zone, kNumInputs, kNumTemps,
4638 using_shared_stub ? LocationSummary::kCallOnSharedSlowPath
4639 : LocationSummary::kCallOnSlowPath);
4640 if (using_shared_stub) {
4641 locs->set_temp(0, Location::RegisterLocation(
4643 }
4644 locs->set_in(0, Location::RequiresRegister());
4645 locs->set_out(0, Location::RequiresRegister());
4646 } else {
4647 const intptr_t kNumTemps = 0;
4648 locs = new (zone)
4649 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
4650 locs->set_in(
4652 locs->set_out(
4654 }
4655 } else {
4656 const intptr_t kNumTemps = 0;
4657 locs = new (zone)
4658 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4659 locs->set_in(0, Location::RequiresRegister());
4660 if (rep == kTagged || rep == kUntagged) {
4661 locs->set_out(0, Location::RequiresRegister());
4662 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
4663 const size_t value_size = RepresentationUtils::ValueSize(rep);
4664 if (value_size <= compiler::target::kWordSize) {
4665 locs->set_out(0, Location::RequiresRegister());
4666 } else {
4667 ASSERT(value_size == 2 * compiler::target::kWordSize);
4668 locs->set_out(0, Location::Pair(Location::RequiresRegister(),
4670 }
4671 } else {
4672 locs->set_out(0, Location::RequiresFpuRegister());
4673 }
4674 }
4675 return locs;
4676}
4677
4678void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4679 const Register instance_reg = locs()->in(0).reg();
4680 ASSERT(OffsetInBytes() >= 0); // Field is finalized.
4681 // For fields on Dart objects, the offset must point after the header.
4682 ASSERT(OffsetInBytes() != 0 || slot().has_untagged_instance());
4683
4684 auto const rep = slot().representation();
4685 if (calls_initializer()) {
4686 __ LoadFromSlot(locs()->out(0).reg(), instance_reg, slot());
4687 EmitNativeCodeForInitializerCall(compiler);
4688 } else if (rep == kTagged || rep == kUntagged) {
4689 __ LoadFromSlot(locs()->out(0).reg(), instance_reg, slot());
4690 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
4691 const size_t value_size = RepresentationUtils::ValueSize(rep);
4692 if (value_size <= compiler::target::kWordSize) {
4693 __ LoadFromSlot(locs()->out(0).reg(), instance_reg, slot());
4694 } else {
4695 auto const result_pair = locs()->out(0).AsPairLocation();
4696 const Register result_lo = result_pair->At(0).reg();
4697 const Register result_hi = result_pair->At(1).reg();
4698 __ LoadFieldFromOffset(result_lo, instance_reg, OffsetInBytes());
4699 __ LoadFieldFromOffset(result_hi, instance_reg,
4700 OffsetInBytes() + compiler::target::kWordSize);
4701 }
4702 } else {
4703 ASSERT(slot().IsDartField());
4704 const intptr_t cid = slot().field().guarded_cid();
4705 const FpuRegister result = locs()->out(0).fpu_reg();
4706 switch (cid) {
4707 case kDoubleCid:
4708 __ LoadUnboxedDouble(result, instance_reg,
4709 OffsetInBytes() - kHeapObjectTag);
4710 break;
4711 case kFloat32x4Cid:
4712 case kFloat64x2Cid:
4713 __ LoadUnboxedSimd128(result, instance_reg,
4714 OffsetInBytes() - kHeapObjectTag);
4715 break;
4716 default:
4717 UNREACHABLE();
4718 }
4719 }
4720}
4721
4722void LoadFieldInstr::EmitNativeCodeForInitializerCall(
4723 FlowGraphCompiler* compiler) {
4725
4727 ThrowErrorSlowPathCode* slow_path =
4728 new LateInitializationErrorSlowPath(this);
4729 compiler->AddSlowPathCode(slow_path);
4730
4731 const Register result_reg = locs()->out(0).reg();
4732 __ CompareObject(result_reg, Object::sentinel());
4733 __ BranchIf(EQUAL, slow_path->entry_label());
4734 return;
4735 }
4736
4737 ASSERT(locs()->in(0).reg() == InitInstanceFieldABI::kInstanceReg);
4738 ASSERT(locs()->out(0).reg() == InitInstanceFieldABI::kResultReg);
4739 ASSERT(slot().IsDartField());
4740 const Field& field = slot().field();
4741 const Field& original_field = Field::ZoneHandle(field.Original());
4742
4743 compiler::Label no_call;
4744 __ CompareObject(InitInstanceFieldABI::kResultReg, Object::sentinel());
4745 __ BranchIf(NOT_EQUAL, &no_call);
4746
4747 __ LoadObject(InitInstanceFieldABI::kFieldReg, original_field);
4748
4749 auto object_store = compiler->isolate_group()->object_store();
4750 auto& stub = Code::ZoneHandle(compiler->zone());
4751 if (field.needs_load_guard()) {
4752 stub = object_store->init_instance_field_stub();
4753 } else if (field.is_late()) {
4754 if (!field.has_nontrivial_initializer()) {
4755 stub = object_store->init_instance_field_stub();
4756 } else {
4757 // Stubs for late field initialization call initializer
4758 // function directly, so make sure one is created.
4759 original_field.EnsureInitializerFunction();
4760
4761 if (field.is_final()) {
4762 stub = object_store->init_late_final_instance_field_stub();
4763 } else {
4764 stub = object_store->init_late_instance_field_stub();
4765 }
4766 }
4767 } else {
4768 UNREACHABLE();
4769 }
4770
4771 compiler->GenerateStubCall(source(), stub,
4772 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
4773 deopt_id(), env());
4774 __ Bind(&no_call);
4775}
4776
4777LocationSummary* ThrowInstr::MakeLocationSummary(Zone* zone, bool opt) const {
4778 const intptr_t kNumInputs = 1;
4779 const intptr_t kNumTemps = 0;
4780 LocationSummary* summary = new (zone)
4781 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
4783 return summary;
4784}
4785
4786void ThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4787 auto object_store = compiler->isolate_group()->object_store();
4788 const auto& throw_stub =
4789 Code::ZoneHandle(compiler->zone(), object_store->throw_stub());
4790
4791 compiler->GenerateStubCall(source(), throw_stub,
4792 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
4793 deopt_id(), env());
4794 // Issue(dartbug.com/41353): Right now we have to emit an extra breakpoint
4795 // instruction: The ThrowInstr will terminate the current block. The very
4796 // next machine code instruction might get a pc descriptor attached with a
4797 // different try-index. If we removed this breakpoint instruction, the
4798 // runtime might associated this call with the try-index of the next
4799 // instruction.
4800 __ Breakpoint();
4801}
4802
4803LocationSummary* ReThrowInstr::MakeLocationSummary(Zone* zone, bool opt) const {
4804 const intptr_t kNumInputs = 2;
4805 const intptr_t kNumTemps = 0;
4806 LocationSummary* summary = new (zone)
4807 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
4810 return summary;
4811}
4812
4813void ReThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4814 auto object_store = compiler->isolate_group()->object_store();
4815 const auto& re_throw_stub =
4816 Code::ZoneHandle(compiler->zone(), object_store->re_throw_stub());
4817
4818 compiler->SetNeedsStackTrace(catch_try_index());
4819 compiler->GenerateStubCall(source(), re_throw_stub,
4820 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
4821 deopt_id(), env());
4822 // Issue(dartbug.com/41353): Right now we have to emit an extra breakpoint
4823 // instruction: The ThrowInstr will terminate the current block. The very
4824 // next machine code instruction might get a pc descriptor attached with a
4825 // different try-index. If we removed this breakpoint instruction, the
4826 // runtime might associated this call with the try-index of the next
4827 // instruction.
4828 __ Breakpoint();
4829}
4830
4831LocationSummary* AssertBooleanInstr::MakeLocationSummary(Zone* zone,
4832 bool opt) const {
4833 const intptr_t kNumInputs = 1;
4834 const intptr_t kNumTemps = 0;
4835 LocationSummary* locs = new (zone)
4836 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
4839 return locs;
4840}
4841
4842LocationSummary* PhiInstr::MakeLocationSummary(Zone* zone,
4843 bool optimizing) const {
4844 UNREACHABLE();
4845 return nullptr;
4846}
4847
4848void PhiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4849 UNREACHABLE();
4850}
4851
4852LocationSummary* RedefinitionInstr::MakeLocationSummary(Zone* zone,
4853 bool optimizing) const {
4854 UNREACHABLE();
4855 return nullptr;
4856}
4857
4858void RedefinitionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4859 UNREACHABLE();
4860}
4861
4862LocationSummary* ReachabilityFenceInstr::MakeLocationSummary(
4863 Zone* zone,
4864 bool optimizing) const {
4865 LocationSummary* summary = new (zone)
4866 LocationSummary(zone, 1, 0, LocationSummary::ContainsCall::kNoCall);
4867 // Keep the parameter alive and reachable, in any location.
4868 summary->set_in(0, Location::Any());
4869 return summary;
4870}
4871
4872void ReachabilityFenceInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4873 // No native code, but we rely on the parameter being passed in here so that
4874 // it stays alive and reachable.
4875}
4876
4877LocationSummary* ParameterInstr::MakeLocationSummary(Zone* zone,
4878 bool optimizing) const {
4879 UNREACHABLE();
4880 return nullptr;
4881}
4882
4883void ParameterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4884 UNREACHABLE();
4885}
4886
4887void NativeParameterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4888 // There are two frames between SaveArguments and the NativeParameterInstr
4889 // moves.
4890 constexpr intptr_t delta =
4891 kCallerSpSlotFromFp // second frame FP to exit link slot
4892 + -kExitLinkSlotFromEntryFp // exit link slot to first frame FP
4893 + kCallerSpSlotFromFp; // first frame FP to argument save SP
4894 compiler::ffi::FrameRebase rebase(compiler->zone(),
4895 /*old_base=*/SPREG, /*new_base=*/FPREG,
4896 delta * compiler::target::kWordSize);
4897 const auto& location =
4898 marshaller_.NativeLocationOfNativeParameter(def_index_);
4899 const auto& src =
4900 rebase.Rebase(location.IsPointerToMemory()
4901 ? location.AsPointerToMemory().pointer_location()
4902 : location);
4903 NoTemporaryAllocator no_temp;
4904 const Location out_loc = locs()->out(0);
4905 const Representation out_rep = representation();
4906 compiler->EmitMoveFromNative(out_loc, out_rep, src, &no_temp);
4907}
4908
4909LocationSummary* NativeParameterInstr::MakeLocationSummary(Zone* zone,
4910 bool opt) const {
4911 ASSERT(opt);
4913 if (representation() == kUnboxedInt64 && compiler::target::kWordSize < 8) {
4916 } else {
4917 output = RegisterKindForResult() == Location::kRegister
4919 : Location::RequiresFpuRegister();
4920 }
4921 return LocationSummary::Make(zone, /*num_inputs=*/0, output,
4923}
4924
4926 for (intptr_t i = 0; i < moves_.length(); i++) {
4927 if (!moves_[i]->IsRedundant()) {
4928 return false;
4929 }
4930 }
4931 return true;
4932}
4933
4934LocationSummary* ParallelMoveInstr::MakeLocationSummary(Zone* zone,
4935 bool optimizing) const {
4936 return nullptr;
4937}
4938
4939void ParallelMoveInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4940 ParallelMoveEmitter(compiler, this).EmitNativeCode();
4941}
4942
4943LocationSummary* ConstraintInstr::MakeLocationSummary(Zone* zone,
4944 bool optimizing) const {
4945 UNREACHABLE();
4946 return nullptr;
4947}
4948
4949void ConstraintInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4950 UNREACHABLE();
4951}
4952
4954 Zone* zone,
4955 bool optimizing) const {
4956 UNREACHABLE();
4957 return nullptr;
4958}
4959
4960void MaterializeObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4961 UNREACHABLE();
4962}
4963
4964// This function should be kept in sync with
4965// FlowGraphCompiler::SlowPathEnvironmentFor().
4966void MaterializeObjectInstr::RemapRegisters(intptr_t* cpu_reg_slots,
4967 intptr_t* fpu_reg_slots) {
4968 if (registers_remapped_) {
4969 return;
4970 }
4971 registers_remapped_ = true;
4972
4973 for (intptr_t i = 0; i < InputCount(); i++) {
4974 locations_[i] = LocationRemapForSlowPath(
4975 LocationAt(i), InputAt(i)->definition(), cpu_reg_slots, fpu_reg_slots);
4976 }
4977}
4978
4979LocationSummary* MakeTempInstr::MakeLocationSummary(Zone* zone,
4980 bool optimizing) const {
4981 ASSERT(!optimizing);
4982 null_->InitializeLocationSummary(zone, optimizing);
4983 return null_->locs();
4984}
4985
4986void MakeTempInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4987 ASSERT(!compiler->is_optimizing());
4988 null_->EmitNativeCode(compiler);
4989}
4990
4991LocationSummary* DropTempsInstr::MakeLocationSummary(Zone* zone,
4992 bool optimizing) const {
4993 ASSERT(!optimizing);
4994 return (InputCount() == 1)
4997 : LocationSummary::Make(zone, 0, Location::NoLocation(),
4998 LocationSummary::kNoCall);
4999}
5000
5001void DropTempsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5002 ASSERT(!compiler->is_optimizing());
5003 // Assert that register assignment is correct.
5004 ASSERT((InputCount() == 0) || (locs()->out(0).reg() == locs()->in(0).reg()));
5005 __ Drop(num_temps());
5006}
5007
5008LocationSummary* BoxSmallIntInstr::MakeLocationSummary(Zone* zone,
5009 bool opt) const {
5011 compiler::target::kSmiBits);
5012 const intptr_t kNumInputs = 1;
5013 const intptr_t kNumTemps = 0;
5014 LocationSummary* summary = new (zone)
5015 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5016 summary->set_in(0, Location::RequiresRegister());
5017 summary->set_out(0, Location::RequiresRegister());
5018 return summary;
5019}
5020
5021void BoxSmallIntInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5022 const Register value = locs()->in(0).reg();
5023 const Register out = locs()->out(0).reg();
5024 ASSERT(value != out);
5025
5026 __ ExtendAndSmiTagValue(
5028}
5029
5031 Token::Kind kind,
5032 Value* left,
5033 Value* right,
5034 bool needs_number_check,
5035 intptr_t deopt_id)
5036 : TemplateComparison(source, kind, deopt_id),
5037 needs_number_check_(needs_number_check) {
5038 ASSERT((kind == Token::kEQ_STRICT) || (kind == Token::kNE_STRICT));
5039 SetInputAt(0, left);
5040 SetInputAt(1, right);
5041}
5042
5043Condition StrictCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
5044 BranchLabels labels) {
5045 Location left = locs()->in(0);
5046 Location right = locs()->in(1);
5047 ASSERT(!left.IsConstant() || !right.IsConstant());
5048 Condition true_condition;
5049 if (left.IsConstant()) {
5050 if (TryEmitBoolTest(compiler, labels, 1, left.constant(),
5051 &true_condition)) {
5052 return true_condition;
5053 }
5054 true_condition = EmitComparisonCodeRegConstant(
5055 compiler, labels, right.reg(), left.constant());
5056 } else if (right.IsConstant()) {
5057 if (TryEmitBoolTest(compiler, labels, 0, right.constant(),
5058 &true_condition)) {
5059 return true_condition;
5060 }
5061 true_condition = EmitComparisonCodeRegConstant(compiler, labels, left.reg(),
5062 right.constant());
5063 } else {
5064 true_condition = compiler->EmitEqualityRegRegCompare(
5065 left.reg(), right.reg(), needs_number_check(), source(), deopt_id());
5066 }
5067 return true_condition != kInvalidCondition && (kind() != Token::kEQ_STRICT)
5068 ? InvertCondition(true_condition)
5069 : true_condition;
5070}
5071
5073 BranchLabels labels,
5074 intptr_t input_index,
5075 const Object& obj,
5076 Condition* true_condition_out) {
5077 CompileType* input_type = InputAt(input_index)->Type();
5078 if (input_type->ToCid() == kBoolCid && obj.GetClassId() == kBoolCid) {
5079 bool invert = (kind() != Token::kEQ_STRICT) ^ !Bool::Cast(obj).value();
5080 *true_condition_out =
5081 compiler->EmitBoolTest(locs()->in(input_index).reg(), labels, invert);
5082 return true;
5083 }
5084 return false;
5085}
5086
5087LocationSummary* LoadClassIdInstr::MakeLocationSummary(Zone* zone,
5088 bool opt) const {
5089 const intptr_t kNumInputs = 1;
5090 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
5092}
5093
5094void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5095 const Register object = locs()->in(0).reg();
5096 const Register result = locs()->out(0).reg();
5097 if (input_can_be_smi_ && this->object()->Type()->CanBeSmi()) {
5098 if (representation() == kTagged) {
5099 __ LoadTaggedClassIdMayBeSmi(result, object);
5100 } else {
5101 __ LoadClassIdMayBeSmi(result, object);
5102 }
5103 } else {
5104 __ LoadClassId(result, object);
5105 if (representation() == kTagged) {
5106 __ SmiTag(result);
5107 }
5108 }
5109}
5110
5111LocationSummary* TestRangeInstr::MakeLocationSummary(Zone* zone,
5112 bool opt) const {
5113#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) || \
5114 defined(TARGET_ARCH_ARM)
5115 const bool needs_temp = true;
5116#else
5117 const bool needs_temp = false;
5118#endif
5119 const intptr_t kNumInputs = 1;
5120 const intptr_t kNumTemps = needs_temp ? 1 : 0;
5121 LocationSummary* locs = new (zone)
5122 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5123 locs->set_in(0, Location::RequiresRegister());
5124 if (needs_temp) {
5125 locs->set_temp(0, Location::RequiresRegister());
5126 }
5127 locs->set_out(0, Location::RequiresRegister());
5128 return locs;
5129}
5130
5131Condition TestRangeInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
5132 BranchLabels labels) {
5133 intptr_t lower = lower_;
5134 intptr_t upper = upper_;
5135 if (value_representation_ == kTagged) {
5138 }
5139
5140 Register in = locs()->in(0).reg();
5141#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) || \
5142 defined(TARGET_ARCH_ARM)
5143 Register temp = locs()->temp(0).reg();
5144#else
5145 Register temp = TMP;
5146#endif
5147 __ AddImmediate(temp, in, -lower);
5148 __ CompareImmediate(temp, upper - lower);
5149 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
5150 return kind() == Token::kIS ? UNSIGNED_LESS_EQUAL : UNSIGNED_GREATER;
5151}
5152
5153LocationSummary* InstanceCallInstr::MakeLocationSummary(Zone* zone,
5154 bool optimizing) const {
5155 return MakeCallSummary(zone, this);
5156}
5157
5159 if (!FLAG_two_args_smi_icd) {
5160 return Code::null();
5161 }
5162 switch (kind) {
5163 case Token::kADD:
5164 return StubCode::SmiAddInlineCache().ptr();
5165 case Token::kLT:
5166 return StubCode::SmiLessInlineCache().ptr();
5167 case Token::kEQ:
5168 return StubCode::SmiEqualInlineCache().ptr();
5169 default:
5170 return Code::null();
5171 }
5172}
5173
5175 Zone* zone) const {
5176 if (!interface_target().IsNull()) {
5177 // Note: target_type is fully instantiated rare type (all type parameters
5178 // are replaced with dynamic) so checking if Smi is assignable to
5179 // it would compute correctly whether or not receiver can be a smi.
5180 const AbstractType& target_type = AbstractType::Handle(
5181 zone, Class::Handle(zone, interface_target().Owner()).RareType());
5182 if (!CompileType::Smi().IsAssignableTo(target_type)) {
5183 return false;
5184 }
5185 }
5186 // In all other cases conservatively assume that the receiver can be a smi.
5187 return true;
5188}
5189
5191 intptr_t idx) const {
5192 // The first input is the array of types
5193 // for generic functions
5194 if (type_args_len() > 0) {
5195 if (idx == 0) {
5196 return kTagged;
5197 }
5198 idx--;
5199 }
5201}
5202
5204 if (interface_target().IsNull()) {
5205 return ArgumentCountWithoutTypeArgs() + ((type_args_len() > 0) ? 1 : 0);
5206 }
5207
5210 ((type_args_len() > 0) ? 1 : 0);
5211}
5212
5216
5218 if (CompilerState::Current().is_aot() && !receiver_is_not_smi()) {
5219 if (!Receiver()->Type()->CanBeSmi() ||
5222 }
5223 }
5224}
5225
5226static FunctionPtr FindBinarySmiOp(Zone* zone, const String& name) {
5227 const auto& smi_class = Class::Handle(zone, Smi::Class());
5228 auto& smi_op_target = Function::Handle(
5229 zone, Resolver::ResolveDynamicAnyArgs(zone, smi_class, name));
5230
5231#if !defined(DART_PRECOMPILED_RUNTIME)
5232 if (smi_op_target.IsNull() &&
5234 const String& demangled = String::Handle(
5236 smi_op_target = Resolver::ResolveDynamicAnyArgs(zone, smi_class, demangled);
5237 }
5238#endif
5239 return smi_op_target.ptr();
5240}
5241
5243 if (HasICData()) {
5244 return;
5245 }
5246
5247 const Array& arguments_descriptor =
5250 graph->zone(),
5251 ICData::New(graph->function(), function_name(), arguments_descriptor,
5252 deopt_id(), checked_argument_count(), ICData::kInstance));
5254}
5255
5257 Zone* zone = compiler->zone();
5258
5260
5261 auto& specialized_binary_smi_ic_stub = Code::ZoneHandle(zone);
5262 auto& binary_smi_op_target = Function::Handle(zone);
5263 if (!receiver_is_not_smi()) {
5264 specialized_binary_smi_ic_stub = TwoArgsSmiOpInlineCacheEntry(token_kind());
5265 if (!specialized_binary_smi_ic_stub.IsNull()) {
5266 binary_smi_op_target = FindBinarySmiOp(zone, function_name());
5267 }
5268 }
5269
5270 const ICData* call_ic_data = nullptr;
5271 if (!FLAG_propagate_ic_data || !compiler->is_optimizing() ||
5272 (ic_data() == nullptr)) {
5273 const Array& arguments_descriptor =
5275
5276 AbstractType& receivers_static_type = AbstractType::Handle(zone);
5277 if (receivers_static_type_ != nullptr) {
5278 receivers_static_type = receivers_static_type_->ptr();
5279 }
5280
5281 call_ic_data = compiler->GetOrAddInstanceCallICData(
5282 deopt_id(), function_name(), arguments_descriptor,
5283 checked_argument_count(), receivers_static_type, binary_smi_op_target);
5284 } else {
5285 call_ic_data = &ICData::ZoneHandle(zone, ic_data()->ptr());
5286 }
5287
5288 if (compiler->is_optimizing() && HasICData()) {
5289 if (ic_data()->NumberOfUsedChecks() > 0) {
5290 const ICData& unary_ic_data =
5291 ICData::ZoneHandle(zone, ic_data()->AsUnaryClassChecks());
5292 compiler->GenerateInstanceCall(deopt_id(), source(), locs(),
5293 unary_ic_data, entry_kind(),
5295 } else {
5296 // Call was not visited yet, use original ICData in order to populate it.
5297 compiler->GenerateInstanceCall(deopt_id(), source(), locs(),
5298 *call_ic_data, entry_kind(),
5300 }
5301 } else {
5302 // Unoptimized code.
5303 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kRewind, deopt_id(),
5304 source());
5305
5306 // If the ICData contains a (Smi, Smi, <binary-smi-op-target>) stub already
5307 // we will call the specialized IC Stub that works as a normal IC Stub but
5308 // has inlined fast path for the specific Smi operation.
5309 bool use_specialized_smi_ic_stub = false;
5310 if (!specialized_binary_smi_ic_stub.IsNull() &&
5311 call_ic_data->NumberOfChecksIs(1)) {
5312 GrowableArray<intptr_t> class_ids(2);
5313 auto& target = Function::Handle();
5314 call_ic_data->GetCheckAt(0, &class_ids, &target);
5315 if (class_ids[0] == kSmiCid && class_ids[1] == kSmiCid &&
5316 target.ptr() == binary_smi_op_target.ptr()) {
5317 use_specialized_smi_ic_stub = true;
5318 }
5319 }
5320
5321 if (use_specialized_smi_ic_stub) {
5322 ASSERT(ArgumentCount() == 2);
5323 compiler->EmitInstanceCallJIT(specialized_binary_smi_ic_stub,
5324 *call_ic_data, deopt_id(), source(), locs(),
5325 entry_kind());
5326 } else {
5327 compiler->GenerateInstanceCall(deopt_id(), source(), locs(),
5328 *call_ic_data, entry_kind(),
5330 }
5331 }
5332}
5333
5337
5339 const Class& cls,
5340 bool allow_add /* = true */) {
5341 const Array& args_desc_array = Array::Handle(GetArgumentsDescriptor());
5342 ArgumentsDescriptor args_desc(args_desc_array);
5344 args_desc, allow_add);
5345}
5346
5348 if (targets_ == nullptr) {
5349 Zone* zone = Thread::Current()->zone();
5350 if (HasICData()) {
5351 targets_ = CallTargets::CreateAndExpand(zone, *ic_data());
5352 } else {
5353 targets_ = new (zone) CallTargets(zone);
5354 ASSERT(targets_->is_empty());
5355 }
5356 }
5357 return *targets_;
5358}
5359
5361 if (binary_ == nullptr) {
5362 Zone* zone = Thread::Current()->zone();
5363 if (HasICData()) {
5365 } else {
5366 binary_ = new (zone) class BinaryFeedback(zone);
5367 }
5368 }
5369 return *binary_;
5370}
5371
5373 intptr_t idx) const {
5374 if (idx == (InputCount() - 1)) {
5375 return kUnboxedUword; // Receiver's CID.
5376 }
5377
5378 // The first input is the array of types
5379 // for generic functions
5380 if (type_args_len() > 0) {
5381 if (idx == 0) {
5382 return kTagged;
5383 }
5384 idx--;
5385 }
5387}
5388
5390 if (interface_target().IsNull()) {
5391 return ArgumentCountWithoutTypeArgs() + ((type_args_len() > 0) ? 1 : 0);
5392 }
5393
5396 ((type_args_len() > 0) ? 1 : 0);
5397}
5398
5402
5404 Zone* zone,
5405 const InstanceCallBaseInstr* call,
5406 Value* cid,
5407 const Function& interface_target,
5408 const compiler::TableSelector* selector) {
5409 InputsArray args(zone, call->ArgumentCount() + 1);
5410 for (intptr_t i = 0; i < call->ArgumentCount(); i++) {
5411 args.Add(call->ArgumentValueAt(i)->CopyWithType());
5412 }
5413 args.Add(cid);
5414 auto dispatch_table_call = new (zone) DispatchTableCallInstr(
5415 call->source(), interface_target, selector, std::move(args),
5416 call->type_args_len(), call->argument_names());
5417 return dispatch_table_call;
5418}
5419
5421 bool opt) const {
5422 const intptr_t kNumInputs = 1;
5423 const intptr_t kNumTemps = 0;
5424 LocationSummary* summary = new (zone)
5425 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
5426 summary->set_in(
5428 return MakeCallSummary(zone, this, summary);
5429}
5430
5431void DispatchTableCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5433 Array& arguments_descriptor = Array::ZoneHandle();
5434 if (selector()->requires_args_descriptor) {
5435 ArgumentsInfo args_info(type_args_len(), ArgumentCount(), ArgumentsSize(),
5436 argument_names());
5437 arguments_descriptor = args_info.ToArgumentsDescriptor();
5438 }
5439 compiler->EmitDispatchTableCall(selector()->offset, arguments_descriptor);
5440 compiler->EmitCallsiteMetadata(source(), DeoptId::kNone,
5441 UntaggedPcDescriptors::kOther, locs(), env());
5442 if (selector()->called_on_null && !selector()->on_null_interface) {
5443 Value* receiver = ArgumentValueAt(FirstArgIndex());
5444 if (receiver->Type()->is_nullable()) {
5445 const String& function_name =
5447 compiler->AddNullCheck(source(), function_name);
5448 }
5449 }
5450 compiler->EmitDropArguments(ArgumentsSize());
5451 compiler->AddDispatchTableCallTarget(selector());
5452}
5453
5455 intptr_t idx) const {
5456 // The first input is the array of types
5457 // for generic functions
5458 if (type_args_len() > 0 || function().IsFactory()) {
5459 if (idx == 0) {
5460 return kTagged;
5461 }
5462 idx--;
5463 }
5465}
5466
5472
5476
5478 if (targets_ == nullptr) {
5479 Zone* zone = Thread::Current()->zone();
5480 if (HasICData()) {
5481 targets_ = CallTargets::CreateAndExpand(zone, *ic_data());
5482 } else {
5483 targets_ = new (zone) CallTargets(zone);
5484 ASSERT(targets_->is_empty());
5485 }
5486 }
5487 return *targets_;
5488}
5489
5491 if (binary_ == nullptr) {
5492 Zone* zone = Thread::Current()->zone();
5493 if (HasICData()) {
5495 } else {
5496 binary_ = new (zone) class BinaryFeedback(zone);
5497 }
5498 }
5499 return *binary_;
5500}
5501
5506
5508 if (length() == 0) return false;
5509 for (int i = 0; i < length(); i++) {
5510 if (TargetAt(i)->target->ptr() != TargetAt(0)->target->ptr()) return false;
5511 }
5512 return true;
5513}
5514
5516 ASSERT(length() != 0);
5517 DEBUG_ASSERT(TargetAt(0)->target->IsNotTemporaryScopedHandle());
5518 return *TargetAt(0)->target;
5519}
5520
5522 ASSERT(length() != 0);
5523 DEBUG_ASSERT(TargetAt(0)->target->IsNotTemporaryScopedHandle());
5524 for (int i = 1; i < length(); i++) {
5525 ASSERT(TargetAt(i)->count <= TargetAt(0)->count);
5526 }
5527 return *TargetAt(0)->target;
5528}
5529
5531 intptr_t sum = 0;
5532 for (int i = 0; i < length(); i++) {
5533 sum += TargetAt(i)->count;
5534 }
5535 return sum;
5536}
5537
5539 const {
5540 const intptr_t len = targets_.length();
5542 for (intptr_t i = 0; i < len; i++) {
5543 target = targets_.TargetAt(i)->target->ptr();
5544 if (!target.IsDispatcherOrImplicitAccessor()) {
5545 return false;
5546 }
5547 }
5548 return true;
5549}
5550
5554
5556 Zone* zone,
5557 bool optimizing) const {
5558 return MakeCallSummary(zone, this);
5559}
5560
5562 ArgumentsInfo args_info(type_args_len(), ArgumentCount(), ArgumentsSize(),
5563 argument_names());
5565 compiler->EmitPolymorphicInstanceCall(
5566 this, targets(), args_info, deopt_id(), source(), locs(), complete(),
5568}
5569
5571 const CallTargets& targets) {
5572 bool is_string = true;
5573 bool is_integer = true;
5574 bool is_double = true;
5575 bool is_type = true;
5576
5577 const intptr_t num_checks = targets.length();
5578 for (intptr_t i = 0; i < num_checks; i++) {
5579 ASSERT(targets.TargetAt(i)->target->ptr() ==
5580 targets.TargetAt(0)->target->ptr());
5581 const intptr_t start = targets[i].cid_start;
5582 const intptr_t end = targets[i].cid_end;
5583 for (intptr_t cid = start; cid <= end; cid++) {
5584 is_string = is_string && IsStringClassId(cid);
5586 is_double = is_double && (cid == kDoubleCid);
5587 is_type = is_type && IsTypeClassId(cid);
5588 }
5589 }
5590
5591 if (is_string) {
5593 ASSERT(!is_double);
5594 ASSERT(!is_type);
5595 return Type::StringType();
5596 } else if (is_integer) {
5597 ASSERT(!is_double);
5598 ASSERT(!is_type);
5599 return Type::IntType();
5600 } else if (is_double) {
5601 ASSERT(!is_type);
5602 return Type::Double();
5603 } else if (is_type) {
5604 return Type::DartTypeType();
5605 }
5606
5607 return Type::null();
5608}
5609
5611 const intptr_t receiver_cid = Receiver()->Type()->ToCid();
5612
5613 // We could turn cold call sites for known receiver cids into a StaticCall.
5614 // However, that keeps the ICData of the InstanceCall from being updated.
5615 //
5616 // This is fine if there is no later deoptimization, but if there is, then
5617 // the InstanceCall with the updated ICData for this receiver may then be
5618 // better optimized by the compiler.
5619 //
5620 // This optimization is safe to apply in AOT mode because deoptimization is
5621 // not a concern there.
5622 //
5623 // TODO(dartbug.com/37291): Allow this optimization, but accumulate affected
5624 // InstanceCallInstrs and the corresponding receiver cids during compilation.
5625 // After compilation, add receiver checks to the ICData for those call sites.
5626 if (!CompilerState::Current().is_aot() && Targets().is_empty()) {
5627 return this;
5628 }
5629
5630 const CallTargets* new_target =
5632 receiver_cid,
5633 String::Handle(flow_graph->zone(), ic_data()->target_name()),
5634 Array::Handle(flow_graph->zone(), ic_data()->arguments_descriptor()));
5635 if (new_target == nullptr) {
5636 // No specialization.
5637 return this;
5638 }
5639
5640 ASSERT(new_target->HasSingleTarget());
5641 const Function& target = new_target->FirstTarget();
5643 flow_graph->zone(), this, target, new_target->AggregateCallCount());
5644 flow_graph->InsertBefore(this, specialized, env(), FlowGraph::kValue);
5645 return specialized;
5646}
5647
5649 // TODO(dartbug.com/40188): Allow this to canonicalize into a StaticCall when
5650 // when input class id is constant;
5651 return this;
5652}
5653
5656 return this;
5657 }
5658
5659 const Function& target = targets().FirstTarget();
5660 if (target.recognized_kind() == MethodRecognizer::kObjectRuntimeType) {
5661 const AbstractType& type =
5663 if (!type.IsNull()) {
5664 return flow_graph->GetConstant(type);
5665 }
5666 }
5667
5668 return this;
5669}
5670
5672 if (CompilerState::Current().is_aot() && !complete()) return false;
5673 return targets_.HasSingleRecognizedTarget();
5674}
5675
5677 const intptr_t list_cid = FactoryRecognizer::GetResultCidOfListFactory(
5678 zone, function(), ArgumentCount());
5679 if (list_cid != kDynamicCid) {
5680 SetResultType(zone, CompileType::FromCid(list_cid));
5682 return true;
5683 } else if (function().has_pragma()) {
5684 const intptr_t recognized_cid =
5686 if (recognized_cid != kDynamicCid) {
5687 SetResultType(zone, CompileType::FromCid(recognized_cid));
5688 return true;
5689 }
5690 }
5691 return false;
5692}
5693
5694static const String& EvaluateToString(Zone* zone, Definition* defn) {
5695 if (auto konst = defn->AsConstant()) {
5696 const Object& obj = konst->value();
5697 if (obj.IsString()) {
5698 return String::Cast(obj);
5699 } else if (obj.IsSmi()) {
5700 const char* cstr = obj.ToCString();
5701 return String::Handle(zone, String::New(cstr, Heap::kOld));
5702 } else if (obj.IsBool()) {
5703 return Bool::Cast(obj).value() ? Symbols::True() : Symbols::False();
5704 } else if (obj.IsNull()) {
5705 return Symbols::null();
5706 }
5707 }
5708 return String::null_string();
5709}
5710
5712 FlowGraph* flow_graph) {
5713 auto arg0 = call->ArgumentValueAt(0)->definition();
5714 auto create_array = arg0->AsCreateArray();
5715 if (create_array == nullptr) {
5716 // Do not try to fold interpolate if array is an OSR argument.
5717 ASSERT(flow_graph->IsCompiledForOsr());
5718 ASSERT(arg0->IsPhi() || arg0->IsParameter());
5719 return call;
5720 }
5721 // Check if the string interpolation has only constant inputs.
5722 Value* num_elements = create_array->num_elements();
5723 if (!num_elements->BindsToConstant() ||
5724 !num_elements->BoundConstant().IsSmi()) {
5725 return call;
5726 }
5727 const intptr_t length = Smi::Cast(num_elements->BoundConstant()).Value();
5728 Thread* thread = Thread::Current();
5729 Zone* zone = thread->zone();
5731 for (intptr_t i = 0; i < length; i++) {
5732 pieces.Add(Object::null_string());
5733 }
5734
5735 for (Value::Iterator it(create_array->input_use_list()); !it.Done();
5736 it.Advance()) {
5737 auto current = it.Current()->instruction();
5738 if (current == call) {
5739 continue;
5740 }
5741 auto store = current->AsStoreIndexed();
5742 if (store == nullptr || !store->index()->BindsToConstant() ||
5743 !store->index()->BoundConstant().IsSmi()) {
5744 return call;
5745 }
5746 intptr_t store_index = Smi::Cast(store->index()->BoundConstant()).Value();
5747 ASSERT(store_index < length);
5748 const String& piece =
5749 EvaluateToString(flow_graph->zone(), store->value()->definition());
5750 if (!piece.IsNull()) {
5751 pieces.SetAt(store_index, piece);
5752 } else {
5753 return call;
5754 }
5755 }
5756
5757 const String& concatenated =
5758 String::ZoneHandle(zone, Symbols::FromConcatAll(thread, pieces));
5759 return flow_graph->GetConstant(concatenated);
5760}
5761
5763 FlowGraph* flow_graph) {
5764 auto arg0 = call->ArgumentValueAt(0)->definition();
5765 const auto& result = EvaluateToString(flow_graph->zone(), arg0);
5766 if (!result.IsNull()) {
5767 return flow_graph->GetConstant(String::ZoneHandle(
5768 flow_graph->zone(), Symbols::New(flow_graph->thread(), result)));
5769 }
5770 return call;
5771}
5772
5774 auto& compiler_state = CompilerState::Current();
5775
5776 if (function().ptr() == compiler_state.StringBaseInterpolate().ptr()) {
5777 return CanonicalizeStringInterpolate(this, flow_graph);
5778 } else if (function().ptr() ==
5779 compiler_state.StringBaseInterpolateSingle().ptr()) {
5780 return CanonicalizeStringInterpolateSingle(this, flow_graph);
5781 }
5782
5783 const auto kind = function().recognized_kind();
5784
5785 if (kind != MethodRecognizer::kUnknown) {
5786 if (ArgumentCount() == 1) {
5787 const auto argument = ArgumentValueAt(0);
5788 if (argument->BindsToConstant()) {
5790 if (Evaluate(flow_graph, argument->BoundConstant(), &result)) {
5791 return flow_graph->TryCreateConstantReplacementFor(this, result);
5792 }
5793 }
5794 } else if (ArgumentCount() == 2) {
5795 const auto argument1 = ArgumentValueAt(0);
5796 const auto argument2 = ArgumentValueAt(1);
5797 if (argument1->BindsToConstant() && argument2->BindsToConstant()) {
5799 if (Evaluate(flow_graph, argument1->BoundConstant(),
5800 argument2->BoundConstant(), &result)) {
5801 return flow_graph->TryCreateConstantReplacementFor(this, result);
5802 }
5803 }
5804 }
5805 }
5806
5807 if (!compiler_state.is_aot()) {
5808 return this;
5809 }
5810
5811 if (kind == MethodRecognizer::kObjectRuntimeType) {
5812 if (input_use_list() == nullptr) {
5813 // This function has only environment uses. In precompiled mode it is
5814 // fine to remove it - because we will never deoptimize.
5815 return flow_graph->constant_dead();
5816 }
5817 }
5818
5819 return this;
5820}
5821
5823 const Object& argument,
5824 Object* result) {
5825 const auto kind = function().recognized_kind();
5826 switch (kind) {
5827 case MethodRecognizer::kSmi_bitLength: {
5828 ASSERT(FirstArgIndex() == 0);
5829 if (argument.IsInteger()) {
5830 const Integer& value = Integer::Handle(
5831 flow_graph->zone(),
5833 flow_graph->thread()));
5834 if (!value.IsNull()) {
5835 *result = value.ptr();
5836 return true;
5837 }
5838 }
5839 break;
5840 }
5841 case MethodRecognizer::kStringBaseLength:
5842 case MethodRecognizer::kStringBaseIsEmpty: {
5843 ASSERT(FirstArgIndex() == 0);
5844 if (argument.IsString()) {
5845 const auto& str = String::Cast(argument);
5846 if (kind == MethodRecognizer::kStringBaseLength) {
5847 *result = Integer::New(str.Length());
5848 } else {
5849 *result = Bool::Get(str.Length() == 0).ptr();
5850 break;
5851 }
5852 return true;
5853 }
5854 break;
5855 }
5856 default:
5857 break;
5858 }
5859 return false;
5860}
5861
5863 const Object& argument1,
5864 const Object& argument2,
5865 Object* result) {
5866 const auto kind = function().recognized_kind();
5867 switch (kind) {
5868 case MethodRecognizer::kOneByteString_equality:
5869 case MethodRecognizer::kTwoByteString_equality: {
5870 if (argument1.IsString() && argument2.IsString()) {
5871 *result =
5872 Bool::Get(String::Cast(argument1).Equals(String::Cast(argument2)))
5873 .ptr();
5874 return true;
5875 }
5876 break;
5877 }
5878 default:
5879 break;
5880 }
5881 return false;
5882}
5883
5885 bool optimizing) const {
5886 return MakeCallSummary(zone, this);
5887}
5888
5889void StaticCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5890 Zone* zone = compiler->zone();
5891 const ICData* call_ic_data = nullptr;
5892 if (!FLAG_propagate_ic_data || !compiler->is_optimizing() ||
5893 (ic_data() == nullptr)) {
5894 const Array& arguments_descriptor =
5896 const int num_args_checked =
5898 call_ic_data = compiler->GetOrAddStaticCallICData(
5899 deopt_id(), function(), arguments_descriptor, num_args_checked,
5900 rebind_rule_);
5901 } else {
5902 call_ic_data = &ICData::ZoneHandle(ic_data()->ptr());
5903 }
5904 ArgumentsInfo args_info(type_args_len(), ArgumentCount(), ArgumentsSize(),
5905 argument_names());
5906 compiler->GenerateStaticCall(deopt_id(), source(), function(), args_info,
5907 locs(), *call_ic_data, rebind_rule_,
5908 entry_kind());
5909 if (function().IsFactory()) {
5910 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
5911 if (type_usage_info != nullptr) {
5912 const Class& klass = Class::Handle(function().Owner());
5913 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, klass,
5914 ArgumentAt(0));
5915 }
5916 }
5917}
5918
5921 Representation representation,
5922 const Function& function,
5923 intptr_t type_args_len,
5924 const Array& argument_names,
5925 InputsArray&& arguments,
5926 intptr_t deopt_id)
5927 : TemplateDartCall(deopt_id,
5928 type_args_len,
5929 argument_names,
5930 std::move(arguments),
5931 source),
5932 representation_(representation),
5933 function_(function),
5934 identity_(AliasIdentity::Unknown()) {
5935 DEBUG_ASSERT(function.IsNotTemporaryScopedHandle());
5936 // We use kUntagged for the internal use in FfiNativeLookupAddress
5937 // and kUnboxedAddress for pragma-annotated functions.
5939 function.ptr() ==
5940 IsolateGroup::Current()->object_store()->ffi_resolver_function());
5943#if defined(TARGET_ARCH_IA32)
5944 // No pool to cache in on IA32.
5945 FATAL("Not supported on IA32.");
5946#endif
5947}
5948
5950 intptr_t idx) const {
5951 // The first input is the array of types for generic functions.
5952 if (type_args_len() > 0 || function().IsFactory()) {
5953 if (idx == 0) {
5954 return kTagged;
5955 }
5956 idx--;
5957 }
5959}
5960
5966
5970
5972 Zone* zone,
5973 bool optimizing) const {
5974 return MakeCallSummary(zone, this);
5975}
5976
5978#if defined(TARGET_ARCH_IA32)
5979 UNREACHABLE();
5980#else
5981 compiler::Label drop_args, done;
5982 const intptr_t cacheable_pool_index = __ object_pool_builder().AddImmediate(
5985 const Register dst = locs()->out(0).reg();
5986
5987 // In optimized mode outgoing arguments are pushed to the end of the fixed
5988 // frame.
5989 const bool need_to_drop_args = !compiler->is_optimizing();
5990
5991 __ Comment(
5992 "CachableIdempotentCall pool load and check. pool_index = "
5993 "%" Pd,
5994 cacheable_pool_index);
5995 __ LoadWordFromPoolIndex(dst, cacheable_pool_index);
5996 __ CompareImmediate(dst, 0);
5997 __ BranchIf(NOT_EQUAL, need_to_drop_args ? &drop_args : &done);
5998 __ Comment("CachableIdempotentCall pool load and check - end");
5999
6000 ArgumentsInfo args_info(type_args_len(), ArgumentCount(), ArgumentsSize(),
6001 argument_names());
6002 const auto& null_ic_data = ICData::ZoneHandle();
6003 compiler->GenerateStaticCall(deopt_id(), source(), function(), args_info,
6004 locs(), null_ic_data, ICData::kNoRebind,
6005 Code::EntryKind::kNormal);
6006
6007 __ Comment("CachableIdempotentCall pool store");
6008 if (!function().HasUnboxedReturnValue()) {
6009 __ LoadWordFromBoxOrSmi(dst, dst);
6010 }
6011 __ StoreWordToPoolIndex(dst, cacheable_pool_index);
6012 if (need_to_drop_args) {
6014 __ Bind(&drop_args);
6015 __ Drop(args_info.size_with_type_args);
6016 }
6017 __ Bind(&done);
6018 __ Comment("CachableIdempotentCall pool store - end");
6019#endif
6020}
6021
6023 switch (kind_) {
6024 case kParameterCheck:
6026 case kInsertedByFrontend:
6028 case kFromSource:
6030 case kUnknown:
6031 break;
6032 }
6033
6034 return tag();
6035}
6036
6037void AssertAssignableInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6038 compiler->GenerateAssertAssignable(value()->Type(), source(), deopt_id(),
6039 env(), dst_name(), locs());
6040 ASSERT(locs()->in(kInstancePos).reg() == locs()->out(0).reg());
6041}
6042
6043LocationSummary* AssertSubtypeInstr::MakeLocationSummary(Zone* zone,
6044 bool opt) const {
6045 const intptr_t kNumInputs = 5;
6046 const intptr_t kNumTemps = 0;
6047 LocationSummary* summary = new (zone)
6048 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6049 summary->set_in(kInstantiatorTAVPos,
6052 summary->set_in(
6055 summary->set_in(kSubTypePos,
6057 summary->set_in(kSuperTypePos,
6059 summary->set_in(kDstNamePos,
6061 return summary;
6062}
6063
6064void AssertSubtypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6065 compiler->GenerateStubCall(source(), StubCode::AssertSubtype(),
6066 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
6067 env());
6068}
6069
6070LocationSummary* InstantiateTypeInstr::MakeLocationSummary(Zone* zone,
6071 bool opt) const {
6072 const intptr_t kNumInputs = 2;
6073 const intptr_t kNumTemps = 0;
6074 LocationSummary* locs = new (zone)
6075 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6076 locs->set_in(0, Location::RegisterLocation(
6078 locs->set_in(1, Location::RegisterLocation(
6080 locs->set_out(0,
6082 return locs;
6083}
6084
6085void InstantiateTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6086 auto& stub = Code::ZoneHandle(StubCode::InstantiateType().ptr());
6087 if (type().IsTypeParameter()) {
6088 const auto& type_parameter = TypeParameter::Cast(type());
6089 const bool is_function_parameter = type_parameter.IsFunctionTypeParameter();
6090
6091 switch (type_parameter.nullability()) {
6093 stub = is_function_parameter
6094 ? StubCode::InstantiateTypeNonNullableFunctionTypeParameter()
6095 .ptr()
6096 : StubCode::InstantiateTypeNonNullableClassTypeParameter()
6097 .ptr();
6098 break;
6100 stub =
6101 is_function_parameter
6102 ? StubCode::InstantiateTypeNullableFunctionTypeParameter().ptr()
6103 : StubCode::InstantiateTypeNullableClassTypeParameter().ptr();
6104 break;
6106 stub =
6107 is_function_parameter
6108 ? StubCode::InstantiateTypeLegacyFunctionTypeParameter().ptr()
6109 : StubCode::InstantiateTypeLegacyClassTypeParameter().ptr();
6110 break;
6111 }
6112 }
6113 __ LoadObject(InstantiateTypeABI::kTypeReg, type());
6114 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
6115 locs(), deopt_id(), env());
6116}
6117
6118LocationSummary* InstantiateTypeArgumentsInstr::MakeLocationSummary(
6119 Zone* zone,
6120 bool opt) const {
6121 const intptr_t kNumInputs = 3;
6122 const intptr_t kNumTemps = 0;
6123 LocationSummary* locs = new (zone)
6124 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6125 locs->set_in(0, Location::RegisterLocation(
6127 locs->set_in(1, Location::RegisterLocation(
6129 locs->set_in(2, Location::RegisterLocation(
6131 locs->set_out(
6133 return locs;
6134}
6135
6136void InstantiateTypeArgumentsInstr::EmitNativeCode(
6137 FlowGraphCompiler* compiler) {
6138 // We should never try and instantiate a TAV known at compile time to be null,
6139 // so we can use a null value below for the dynamic case.
6140 ASSERT(!type_arguments()->BindsToConstant() ||
6141 !type_arguments()->BoundConstant().IsNull());
6142 const auto& type_args =
6144 ? TypeArguments::Cast(type_arguments()->BoundConstant())
6145 : Object::null_type_arguments();
6146 const intptr_t len = type_args.Length();
6147 const bool can_function_type_args_be_null =
6148 function_type_arguments()->CanBe(Object::null_object());
6149
6150 compiler::Label type_arguments_instantiated;
6151 if (type_args.IsNull()) {
6152 // Currently we only create dynamic InstantiateTypeArguments instructions
6153 // in cases where we know the type argument is uninstantiated at runtime,
6154 // so there are no extra checks needed to call the stub successfully.
6155 } else if (type_args.IsRawWhenInstantiatedFromRaw(len) &&
6156 can_function_type_args_be_null) {
6157 // If both the instantiator and function type arguments are null and if the
6158 // type argument vector instantiated from null becomes a vector of dynamic,
6159 // then use null as the type arguments.
6160 compiler::Label non_null_type_args;
6162 Object::null_object());
6165 if (!function_type_arguments()->BindsToConstant()) {
6166 __ BranchIf(NOT_EQUAL, &non_null_type_args,
6170 }
6171 __ BranchIf(EQUAL, &type_arguments_instantiated,
6173 __ Bind(&non_null_type_args);
6174 }
6175
6176 compiler->GenerateStubCall(source(), GetStub(), UntaggedPcDescriptors::kOther,
6177 locs(), deopt_id(), env());
6178 __ Bind(&type_arguments_instantiated);
6179}
6180
6181LocationSummary* DeoptimizeInstr::MakeLocationSummary(Zone* zone,
6182 bool opt) const {
6183 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
6184}
6185
6186void DeoptimizeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6187 __ Jump(compiler->AddDeoptStub(deopt_id(), deopt_reason_));
6188}
6189
6190void CheckClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6191 compiler::Label* deopt =
6192 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass);
6193 if (IsNullCheck()) {
6194 EmitNullCheck(compiler, deopt);
6195 return;
6196 }
6197
6198 ASSERT(!cids_.IsMonomorphic() || !cids_.HasClassId(kSmiCid));
6199 Register value = locs()->in(0).reg();
6200 Register temp = locs()->temp(0).reg();
6201 compiler::Label is_ok;
6202
6203 __ BranchIfSmi(value, cids_.HasClassId(kSmiCid) ? &is_ok : deopt);
6204
6205 __ LoadClassId(temp, value);
6206
6207 if (IsBitTest()) {
6208 intptr_t min = cids_.ComputeLowestCid();
6209 intptr_t max = cids_.ComputeHighestCid();
6211 } else {
6212 const intptr_t num_checks = cids_.length();
6213 const bool use_near_jump = num_checks < 5;
6214 int bias = 0;
6215 for (intptr_t i = 0; i < num_checks; i++) {
6216 intptr_t cid_start = cids_[i].cid_start;
6217 intptr_t cid_end = cids_[i].cid_end;
6218 if (cid_start == kSmiCid && cid_end == kSmiCid) {
6219 continue; // We already handled Smi above.
6220 }
6221 if (cid_start == kSmiCid) cid_start++;
6222 if (cid_end == kSmiCid) cid_end--;
6223 const bool is_last =
6224 (i == num_checks - 1) ||
6225 (i == num_checks - 2 && cids_[i + 1].cid_start == kSmiCid &&
6226 cids_[i + 1].cid_end == kSmiCid);
6227 bias = EmitCheckCid(compiler, bias, cid_start, cid_end, is_last, &is_ok,
6228 deopt, use_near_jump);
6229 }
6230 }
6231 __ Bind(&is_ok);
6232}
6233
6234LocationSummary* GenericCheckBoundInstr::MakeLocationSummary(Zone* zone,
6235 bool opt) const {
6236 const intptr_t kNumInputs = 2;
6237 const intptr_t kNumTemps = 0;
6238 LocationSummary* locs = new (zone) LocationSummary(
6239 zone, kNumInputs, kNumTemps,
6242 locs->set_in(kLengthPos,
6245 return locs;
6246}
6247
6248void GenericCheckBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6251
6252 RangeErrorSlowPath* slow_path = new RangeErrorSlowPath(this);
6253 compiler->AddSlowPathCode(slow_path);
6254 Location length_loc = locs()->in(kLengthPos);
6255 Location index_loc = locs()->in(kIndexPos);
6256 Register length = length_loc.reg();
6257 Register index = index_loc.reg();
6258 const intptr_t index_cid = this->index()->Type()->ToCid();
6259
6260 // The length comes from one of our variable-sized heap objects (e.g. typed
6261 // data array) and is therefore guaranteed to be in the positive Smi range.
6262 if (representation() == kTagged) {
6263 if (index_cid != kSmiCid) {
6264 __ BranchIfNotSmi(index, slow_path->entry_label());
6265 }
6266 __ CompareObjectRegisters(index, length);
6267 } else {
6268 ASSERT(representation() == kUnboxedInt64);
6269 __ CompareRegisters(index, length);
6270 }
6271 __ BranchIf(UNSIGNED_GREATER_EQUAL, slow_path->entry_label());
6272}
6273
6274LocationSummary* CheckNullInstr::MakeLocationSummary(Zone* zone,
6275 bool opt) const {
6276 const intptr_t kNumInputs = 1;
6277 const intptr_t kNumTemps = 0;
6278 LocationSummary* locs = new (zone) LocationSummary(
6279 zone, kNumInputs, kNumTemps,
6282 locs->set_in(0, Location::RequiresRegister());
6283 return locs;
6284}
6285
6288 compiler->AddNullCheck(check_null->source(), check_null->function_name());
6289}
6290
6293 __ Comment("%s slow path allocation of %s", instruction()->DebugName(),
6294 cls_.ScrubbedNameCString());
6295 }
6296 __ Bind(entry_label());
6297 const auto& stub = Code::ZoneHandle(
6299
6300 LocationSummary* locs = instruction()->locs();
6301
6303 compiler->SaveLiveRegisters(locs);
6304 // Box allocation slow paths cannot lazy-deopt.
6305 ASSERT(!kAllocateMintRuntimeEntry.can_lazy_deopt() &&
6306 !kAllocateDoubleRuntimeEntry.can_lazy_deopt() &&
6307 !kAllocateFloat32x4RuntimeEntry.can_lazy_deopt() &&
6308 !kAllocateFloat64x2RuntimeEntry.can_lazy_deopt());
6309 compiler->GenerateNonLazyDeoptableStubCall(
6310 InstructionSource(), // No token position.
6311 stub, UntaggedPcDescriptors::kOther, locs);
6312 __ MoveRegister(result_, AllocateBoxABI::kResultReg);
6313 compiler->RestoreLiveRegisters(locs);
6314 __ Jump(exit_label());
6315}
6316
6318 Instruction* instruction,
6319 const Class& cls,
6321 Register temp) {
6322 if (compiler->intrinsic_mode()) {
6323 __ TryAllocate(cls, compiler->intrinsic_slow_path_label(),
6325 } else {
6327 auto slow_path = new BoxAllocationSlowPath(instruction, cls, result);
6328 compiler->AddSlowPathCode(slow_path);
6329
6330 if (FLAG_inline_alloc && !FLAG_use_slow_path) {
6331 __ TryAllocate(cls, slow_path->entry_label(),
6333 } else {
6334 __ Jump(slow_path->entry_label());
6335 }
6336 __ Bind(slow_path->exit_label());
6337 }
6338}
6339
6341 __ Comment("DoubleToIntegerSlowPath");
6342 __ Bind(entry_label());
6343
6344 LocationSummary* locs = instruction()->locs();
6345 locs->live_registers()->Remove(locs->out(0));
6346
6347 compiler->SaveLiveRegisters(locs);
6348
6349 auto slow_path_env =
6350 compiler->SlowPathEnvironmentFor(instruction(), /*num_slow_path_args=*/0);
6351
6352 __ MoveUnboxedDouble(DoubleToIntegerStubABI::kInputReg, value_reg_);
6353 __ LoadImmediate(
6355 compiler::target::ToRawSmi(instruction()->recognized_kind()));
6356 compiler->GenerateStubCall(instruction()->source(),
6357 StubCode::DoubleToInteger(),
6358 UntaggedPcDescriptors::kOther, locs,
6359 instruction()->deopt_id(), slow_path_env);
6360 __ MoveRegister(instruction()->locs()->out(0).reg(),
6362 compiler->RestoreLiveRegisters(instruction()->locs());
6363 __ Jump(exit_label());
6364}
6365
6366void UnboxInstr::EmitLoadFromBoxWithDeopt(FlowGraphCompiler* compiler) {
6367 const intptr_t box_cid = BoxCid();
6368 ASSERT(box_cid != kSmiCid); // Should never reach here with Smi-able ints.
6369 const Register box = locs()->in(0).reg();
6370 const Register temp =
6371 (locs()->temp_count() > 0) ? locs()->temp(0).reg() : kNoRegister;
6372 compiler::Label* deopt =
6373 compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnbox);
6374 compiler::Label is_smi;
6375
6376 if ((value()->Type()->ToNullableCid() == box_cid) &&
6377 value()->Type()->is_nullable()) {
6378 __ CompareObject(box, Object::null_object());
6379 __ BranchIf(EQUAL, deopt);
6380 } else {
6381 __ BranchIfSmi(box, CanConvertSmi() ? &is_smi : deopt);
6382 __ CompareClassId(box, box_cid, temp);
6383 __ BranchIf(NOT_EQUAL, deopt);
6384 }
6385
6386 EmitLoadFromBox(compiler);
6387
6388 if (is_smi.IsLinked()) {
6389 compiler::Label done;
6391 __ Bind(&is_smi);
6392 EmitSmiConversion(compiler);
6393 __ Bind(&done);
6394 }
6395}
6396
6397void UnboxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6398 if (SpeculativeModeOfInputs() == kNotSpeculative) {
6399 if (BoxCid() == kSmiCid) {
6400 // Since the representation fits in a Smi, we can extract it directly.
6401 ASSERT_EQUAL(value()->Type()->ToCid(), kSmiCid);
6402 return EmitSmiConversion(compiler);
6403 }
6404 switch (representation()) {
6405 case kUnboxedDouble:
6406 case kUnboxedFloat:
6407 case kUnboxedFloat32x4:
6408 case kUnboxedFloat64x2:
6409 case kUnboxedInt32x4:
6410 EmitLoadFromBox(compiler);
6411 break;
6412
6413 case kUnboxedInt32:
6414 EmitLoadInt32FromBoxOrSmi(compiler);
6415 break;
6416
6417 case kUnboxedInt64: {
6418 if (value()->Type()->ToCid() == kSmiCid) {
6419 // Smi -> int64 conversion is more efficient than
6420 // handling arbitrary smi/mint.
6421 EmitSmiConversion(compiler);
6422 } else {
6423 EmitLoadInt64FromBoxOrSmi(compiler);
6424 }
6425 break;
6426 }
6427 default:
6428 UNREACHABLE();
6429 break;
6430 }
6431 } else {
6432 ASSERT(SpeculativeModeOfInputs() == kGuardInputs);
6433 const intptr_t value_cid = value()->Type()->ToCid();
6434 const intptr_t box_cid = BoxCid();
6435
6436 if (box_cid == kSmiCid || (CanConvertSmi() && (value_cid == kSmiCid))) {
6437 ASSERT_EQUAL(value_cid, kSmiCid);
6438 EmitSmiConversion(compiler);
6439 } else if (representation() == kUnboxedInt32 && value()->Type()->IsInt()) {
6440 EmitLoadInt32FromBoxOrSmi(compiler);
6441 } else if (representation() == kUnboxedInt64 && value()->Type()->IsInt()) {
6442 EmitLoadInt64FromBoxOrSmi(compiler);
6443 } else if ((value_cid == box_cid) || !CanDeoptimize()) {
6444 EmitLoadFromBox(compiler);
6445 } else {
6446 EmitLoadFromBoxWithDeopt(compiler);
6447 }
6448 }
6449}
6450
6452 const GrowableArray<Definition*>& definitions,
6453 intptr_t fixed_parameter_count,
6454 intptr_t lazy_deopt_pruning_count,
6455 const ParsedFunction& parsed_function) {
6456 Environment* env = new (zone) Environment(
6457 definitions.length(), fixed_parameter_count, lazy_deopt_pruning_count,
6458 parsed_function.function(), nullptr);
6459 for (intptr_t i = 0; i < definitions.length(); ++i) {
6460 env->values_.Add(new (zone) Value(definitions[i]));
6461 }
6462 return env;
6463}
6464
6466 values_.Add(value);
6467}
6468
6470 ASSERT(length <= values_.length());
6471 Environment* copy = new (zone) Environment(
6472 length, fixed_parameter_count_, LazyDeoptPruneCount(), function_,
6473 (outer_ == nullptr) ? nullptr : outer_->DeepCopy(zone));
6474 copy->SetDeoptId(DeoptIdBits::decode(bitfield_));
6475 copy->SetLazyDeoptToBeforeDeoptId(LazyDeoptToBeforeDeoptId());
6476 if (IsHoisted()) {
6477 copy->MarkAsHoisted();
6478 }
6479 if (locations_ != nullptr) {
6480 Location* new_locations = zone->Alloc<Location>(length);
6481 copy->set_locations(new_locations);
6482 }
6483 for (intptr_t i = 0; i < length; ++i) {
6484 copy->values_.Add(values_[i]->CopyWithType(zone));
6485 if (locations_ != nullptr) {
6486 copy->locations_[i] = locations_[i].Copy();
6487 }
6488 }
6489 return copy;
6490}
6491
6492// Copies the environment and updates the environment use lists.
6493void Environment::DeepCopyTo(Zone* zone, Instruction* instr) const {
6494 for (Environment::DeepIterator it(instr->env()); !it.Done(); it.Advance()) {
6495 it.CurrentValue()->RemoveFromUseList();
6496 }
6497
6498 Environment* copy = DeepCopy(zone);
6499 instr->SetEnvironment(copy);
6500 for (Environment::DeepIterator it(copy); !it.Done(); it.Advance()) {
6501 Value* value = it.CurrentValue();
6502 value->definition()->AddEnvUse(value);
6503 }
6504}
6505
6507 Instruction* instr,
6508 intptr_t argc,
6509 Definition* dead,
6510 Definition* result) const {
6511 for (Environment::DeepIterator it(instr->env()); !it.Done(); it.Advance()) {
6512 it.CurrentValue()->RemoveFromUseList();
6513 }
6514
6515 Environment* copy =
6516 DeepCopy(zone, values_.length() - argc - LazyDeoptPruneCount());
6517 copy->SetLazyDeoptPruneCount(0);
6518 for (intptr_t i = 0; i < argc; i++) {
6519 copy->values_.Add(new (zone) Value(dead));
6520 }
6521 copy->values_.Add(new (zone) Value(result));
6522
6523 instr->SetEnvironment(copy);
6524 for (Environment::DeepIterator it(copy); !it.Done(); it.Advance()) {
6525 Value* value = it.CurrentValue();
6526 value->definition()->AddEnvUse(value);
6527 }
6528}
6529
6530// Copies the environment as outer on an inlined instruction and updates the
6531// environment use lists.
6533 Instruction* instr,
6534 intptr_t outer_deopt_id) const {
6535 // Create a deep copy removing caller arguments from the environment.
6536 ASSERT(instr->env()->outer() == nullptr);
6537 intptr_t argument_count = instr->env()->fixed_parameter_count();
6539 DeepCopy(zone, values_.length() - argument_count - LazyDeoptPruneCount());
6540 outer->SetDeoptId(outer_deopt_id);
6541 outer->SetLazyDeoptPruneCount(0);
6542 instr->env()->outer_ = outer;
6543 intptr_t use_index = instr->env()->Length(); // Start index after inner.
6544 for (Environment::DeepIterator it(outer); !it.Done(); it.Advance()) {
6545 Value* value = it.CurrentValue();
6546 value->set_instruction(instr);
6547 value->set_use_index(use_index++);
6548 value->definition()->AddEnvUse(value);
6549 }
6550}
6551
6553 Value* new_right) {
6554 UNREACHABLE();
6555 return nullptr;
6556}
6557
6559 Value* new_right) {
6560 return new EqualityCompareInstr(source(), kind(), new_left, new_right,
6561 operation_cid(), deopt_id(), is_null_aware(),
6562 speculative_mode_);
6563}
6564
6566 Value* new_right) {
6567 return new RelationalOpInstr(source(), kind(), new_left, new_right,
6568 operation_cid(), deopt_id(),
6569 SpeculativeModeOfInputs());
6570}
6571
6573 Value* new_right) {
6574 return new StrictCompareInstr(source(), kind(), new_left, new_right,
6576}
6577
6579 Value* new_right) {
6580 return new TestSmiInstr(source(), kind(), new_left, new_right);
6581}
6582
6584 Value* new_right) {
6585 return new TestCidsInstr(source(), kind(), new_left, cid_results(),
6586 deopt_id());
6587}
6588
6590 Value* new_right) {
6591 return new TestRangeInstr(source(), new_left, lower_, upper_,
6592 value_representation_);
6593}
6594
6596 auto const other_instr = other.AsTestCids();
6598 return false;
6599 }
6600 if (cid_results().length() != other_instr->cid_results().length()) {
6601 return false;
6602 }
6603 for (intptr_t i = 0; i < cid_results().length(); i++) {
6604 if (cid_results()[i] != other_instr->cid_results()[i]) {
6605 return false;
6606 }
6607 }
6608 return true;
6609}
6610
6612 auto const other_instr = other.AsTestRange();
6614 return false;
6615 }
6616 return lower_ == other_instr->lower_ && upper_ == other_instr->upper_ &&
6617 value_representation_ == other_instr->value_representation_;
6618}
6619
6621 Value* v1,
6622 Value* v2) {
6623 bool is_smi_result = v1->BindsToSmiConstant() && v2->BindsToSmiConstant();
6624 if (comparison->IsStrictCompare()) {
6625 // Strict comparison with number checks calls a stub and is not supported
6626 // by if-conversion.
6627 return is_smi_result &&
6628 !comparison->AsStrictCompare()->needs_number_check();
6629 }
6630 if (comparison->operation_cid() != kSmiCid) {
6631 // Non-smi comparisons are not supported by if-conversion.
6632 return false;
6633 }
6634 return is_smi_result;
6635}
6636
6638 ASSERT(InputCount() > 1);
6639 Definition* first = InputAt(0)->definition();
6640 for (intptr_t i = 1; i < InputCount(); ++i) {
6641 Definition* def = InputAt(i)->definition();
6642 if (def != first) return false;
6643 }
6644 return true;
6645}
6646
6648 Definition* first = InputAt(0)->definition();
6649 if (InputCount() == 1) {
6650 return first;
6651 }
6652 ASSERT(InputCount() > 1);
6653 Definition* first_origin = first->OriginalDefinition();
6654 bool look_for_redefinition = false;
6655 for (intptr_t i = 1; i < InputCount(); ++i) {
6656 Definition* def = InputAt(i)->definition();
6657 if ((def != first) && (def != this)) {
6658 Definition* origin = def->OriginalDefinition();
6659 if ((origin != first_origin) && (origin != this)) return nullptr;
6660 look_for_redefinition = true;
6661 }
6662 }
6663 if (look_for_redefinition) {
6664 // Find the most specific redefinition which is common for all inputs
6665 // (the longest common chain).
6666 Definition* redef = first;
6667 for (intptr_t i = 1, n = InputCount(); redef != first_origin && i < n;) {
6668 Value* value = InputAt(i);
6669 bool found = false;
6670 do {
6671 Definition* def = value->definition();
6672 if ((def == redef) || (def == this)) {
6673 found = true;
6674 break;
6675 }
6676 value = def->RedefinedValue();
6677 } while (value != nullptr);
6678 if (found) {
6679 ++i;
6680 } else {
6681 ASSERT(redef != first_origin);
6682 redef = redef->RedefinedValue()->definition();
6683 }
6684 }
6685 return redef;
6686 } else {
6687 return first;
6688 }
6689}
6690
6692 for (intptr_t i = 0; i < phi->InputCount(); i++) {
6693 if (phi->InputAt(i)->definition()->RedefinedValue() == nullptr) {
6694 return false;
6695 }
6696 }
6697 return true;
6698}
6699
6702 if (replacement != nullptr && flow_graph->is_licm_allowed() &&
6704 // If we are replacing a Phi which has redefinitions as all of its inputs
6705 // then to maintain the redefinition chain we are going to insert a
6706 // redefinition. If any input is *not* a redefinition that means that
6707 // whatever properties were inferred for a Phi also hold on a path
6708 // that does not pass through any redefinitions so there is no need
6709 // to redefine this value.
6710 auto zone = flow_graph->zone();
6711 auto redef = new (zone) RedefinitionInstr(new (zone) Value(replacement));
6712 flow_graph->InsertAfter(block(), redef, /*env=*/nullptr, FlowGraph::kValue);
6713
6714 // Redefinition is not going to dominate the block entry itself, so we
6715 // have to handle environment uses at the block entry specially.
6716 Value* next_use;
6717 for (Value* use = env_use_list(); use != nullptr; use = next_use) {
6718 next_use = use->next_use();
6719 if (use->instruction() == block()) {
6720 use->RemoveFromUseList();
6721 use->set_definition(replacement);
6722 replacement->AddEnvUse(use);
6723 }
6724 }
6725 return redef;
6726 }
6727
6728 return (replacement != nullptr) ? replacement : this;
6729}
6730
6731// Removes current phi from graph and sets current to previous phi.
6734 (*phis_)[index_] = phis_->Last();
6735 phis_->RemoveLast();
6736 --index_;
6737}
6738
6740 if (StrictCompareInstr* strict_compare = comparison()->AsStrictCompare()) {
6741 if ((InputAt(0)->definition()->OriginalDefinition() ==
6742 InputAt(1)->definition()->OriginalDefinition()) &&
6743 strict_compare->kind() == Token::kEQ_STRICT) {
6744 return nullptr;
6745 }
6746 }
6747 return this;
6748}
6749
6751 bool opt) const {
6754 return comparison()->locs();
6755}
6756
6757void CheckConditionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6758 compiler::Label if_true;
6759 compiler::Label* if_false =
6760 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnknown);
6761 BranchLabels labels = {&if_true, if_false, &if_true};
6762 Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
6763 if (true_condition != kInvalidCondition) {
6764 __ BranchIf(InvertCondition(true_condition), if_false);
6765 }
6766 __ Bind(&if_true);
6767}
6768
6772
6774 return (flow_graph->should_remove_all_bounds_checks() || IsRedundant())
6775 ? index()->definition()
6776 : this;
6777}
6778
6779intptr_t CheckArrayBoundInstr::LengthOffsetFor(intptr_t class_id) {
6780 if (IsTypedDataBaseClassId(class_id)) {
6781 return compiler::target::TypedDataBase::length_offset();
6782 }
6783
6784 switch (class_id) {
6785 case kGrowableObjectArrayCid:
6786 return compiler::target::GrowableObjectArray::length_offset();
6787 case kOneByteStringCid:
6788 case kTwoByteStringCid:
6789 return compiler::target::String::length_offset();
6790 case kArrayCid:
6791 case kImmutableArrayCid:
6792 return compiler::target::Array::length_offset();
6793 default:
6794 UNREACHABLE();
6795 return -1;
6796 }
6797}
6798
6801 return this;
6802 }
6803
6805 intptr_t cid = value()->Type()->ToCid();
6806 if ((cid != kIllegalCid) && (cid != kDynamicCid) &&
6808 return value()->definition();
6809 }
6810 return this;
6811}
6812
6814 AlignmentType alignment) {
6816 case kUnboxedInt8:
6817 case kUnboxedUint8:
6818 // Don't need to worry about alignment for accessing bytes.
6819 return kAlignedAccess;
6820 case kUnboxedFloat32x4:
6821 case kUnboxedInt32x4:
6822 case kUnboxedFloat64x2:
6823 // TODO(rmacnak): Investigate alignment requirements of floating point
6824 // loads.
6825 return kAlignedAccess;
6826 default:
6827 return alignment;
6828 }
6829}
6830
6832 Value* index,
6833 bool index_unboxed,
6834 intptr_t index_scale,
6835 intptr_t class_id,
6836 AlignmentType alignment,
6837 intptr_t deopt_id,
6839 CompileType* result_type)
6840 : TemplateDefinition(source, deopt_id),
6841 index_unboxed_(index_unboxed),
6842 index_scale_(index_scale),
6843 class_id_(class_id),
6844 alignment_(StrengthenAlignment(class_id, alignment)),
6845 token_pos_(source.token_pos),
6846 result_type_(result_type) {
6847 // In particular, notice that kPointerCid is _not_ supported because it gives
6848 // no information about whether the elements are signed for elements with
6849 // unboxed integer representations. The constructor must take that
6850 // information separately to allow kPointerCid.
6851 ASSERT(class_id != kPointerCid);
6852 SetInputAt(kArrayPos, array);
6853 SetInputAt(kIndexPos, index);
6854}
6855
6857 flow_graph->ExtractExternalUntaggedPayload(this, array(), class_id());
6858
6859 if (auto box = index()->definition()->AsBoxInt64()) {
6860 // TODO(dartbug.com/39432): Make LoadIndexed fully suport unboxed indices.
6861 if (!box->ComputeCanDeoptimize() && compiler::target::kWordSize == 8) {
6862 auto Z = flow_graph->zone();
6863 auto load = new (Z) LoadIndexedInstr(
6864 array()->CopyWithType(Z), box->value()->CopyWithType(Z),
6865 /*index_unboxed=*/true, index_scale(), class_id(), alignment_,
6866 GetDeoptId(), source(), result_type_);
6867 flow_graph->InsertBefore(this, load, env(), FlowGraph::kValue);
6868 return load;
6869 }
6870 }
6871 return this;
6872}
6873
6878
6880 Value* index,
6881 Value* value,
6882 StoreBarrierType emit_store_barrier,
6883 bool index_unboxed,
6884 intptr_t index_scale,
6885 intptr_t class_id,
6886 AlignmentType alignment,
6887 intptr_t deopt_id,
6889 SpeculativeMode speculative_mode)
6890 : TemplateInstruction(source, deopt_id),
6891 emit_store_barrier_(emit_store_barrier),
6892 index_unboxed_(index_unboxed),
6893 index_scale_(index_scale),
6894 class_id_(class_id),
6895 alignment_(StrengthenAlignment(class_id, alignment)),
6896 token_pos_(source.token_pos),
6897 speculative_mode_(speculative_mode) {
6898 // In particular, notice that kPointerCid is _not_ supported because it gives
6899 // no information about whether the elements are signed for elements with
6900 // unboxed integer representations. The constructor must take that information
6901 // separately to allow kPointerCid.
6902 ASSERT(class_id != kPointerCid);
6903 SetInputAt(kArrayPos, array);
6904 SetInputAt(kIndexPos, index);
6905 SetInputAt(kValuePos, value);
6906}
6907
6909 flow_graph->ExtractExternalUntaggedPayload(this, array(), class_id());
6910
6911 if (auto box = index()->definition()->AsBoxInt64()) {
6912 // TODO(dartbug.com/39432): Make StoreIndexed fully suport unboxed indices.
6913 if (!box->ComputeCanDeoptimize() && compiler::target::kWordSize == 8) {
6914 auto Z = flow_graph->zone();
6915 auto store = new (Z) StoreIndexedInstr(
6916 array()->CopyWithType(Z), box->value()->CopyWithType(Z),
6917 value()->CopyWithType(Z), emit_store_barrier_,
6918 /*index_unboxed=*/true, index_scale(), class_id(), alignment_,
6919 GetDeoptId(), source(), speculative_mode_);
6920 flow_graph->InsertBefore(this, store, env(), FlowGraph::kEffect);
6921 return nullptr;
6922 }
6923 }
6924 return this;
6925}
6926
6931
6933 intptr_t idx) const {
6934 // Array can be a Dart object or a pointer to external data.
6935 if (idx == 0) return kNoRepresentation; // Flexible input representation.
6936 if (idx == 1) {
6937 if (index_unboxed_) {
6938#if defined(TARGET_ARCH_IS_64_BIT)
6939 return kUnboxedInt64;
6940#else
6941 // TODO(dartbug.com/39432): kUnboxedInt32 || kUnboxedUint32 on 32-bit
6942 // architectures.
6943 return kNoRepresentation; // Index can be any unboxed representation.
6944#endif
6945 } else {
6946 return kTagged; // Index is a smi.
6947 }
6948 }
6949 ASSERT(idx == 2);
6950 return ValueRepresentation(class_id());
6951}
6952
6953#if defined(TARGET_ARCH_ARM64)
6954// We can emit a 16 byte move in a single instruction using LDP/STP.
6955static const intptr_t kMaxElementSizeForEfficientCopy = 16;
6956#else
6957static const intptr_t kMaxElementSizeForEfficientCopy =
6958 compiler::target::kWordSize;
6959#endif
6960
6962 flow_graph->ExtractExternalUntaggedPayload(this, src(), src_cid_);
6963 flow_graph->ExtractExternalUntaggedPayload(this, dest(), dest_cid_);
6964
6965 if (!length()->BindsToSmiConstant()) {
6966 return this;
6967 } else if (length()->BoundSmiConstant() == 0) {
6968 // Nothing to copy.
6969 return nullptr;
6970 }
6971
6972 if (!src_start()->BindsToSmiConstant() ||
6973 !dest_start()->BindsToSmiConstant()) {
6974 // TODO(https://dartbug.com/51031): Consider adding support for src/dest
6975 // starts to be in bytes rather than element size.
6976 return this;
6977 }
6978
6979 intptr_t new_length = length()->BoundSmiConstant();
6980 intptr_t new_src_start = src_start()->BoundSmiConstant();
6981 intptr_t new_dest_start = dest_start()->BoundSmiConstant();
6982 intptr_t new_element_size = element_size_;
6983 while (((new_length | new_src_start | new_dest_start) & 1) == 0 &&
6984 new_element_size < kMaxElementSizeForEfficientCopy) {
6985 new_length >>= 1;
6986 new_src_start >>= 1;
6987 new_dest_start >>= 1;
6988 new_element_size <<= 1;
6989 }
6990 if (new_element_size == element_size_) {
6991 return this;
6992 }
6993
6994 // The new element size is larger than the original one, so it must be > 1.
6995 // That means unboxed integers will always require a shift, but Smis
6996 // may not if element_size == 2, so always use Smis.
6997 auto* const Z = flow_graph->zone();
6998 auto* const length_instr =
6999 flow_graph->GetConstant(Smi::ZoneHandle(Z, Smi::New(new_length)));
7000 auto* const src_start_instr =
7001 flow_graph->GetConstant(Smi::ZoneHandle(Z, Smi::New(new_src_start)));
7002 auto* const dest_start_instr =
7003 flow_graph->GetConstant(Smi::ZoneHandle(Z, Smi::New(new_dest_start)));
7004 length()->BindTo(length_instr);
7005 src_start()->BindTo(src_start_instr);
7006 dest_start()->BindTo(dest_start_instr);
7007 element_size_ = new_element_size;
7008 unboxed_inputs_ = false;
7009 return this;
7010}
7011
7012void MemoryCopyInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7013 const Location& length_loc = locs()->in(kLengthPos);
7014 // Note that for all architectures, constant_length is only true if
7015 // length() binds to a _small_ constant, so we can end up generating a loop
7016 // if the constant length() was bound to is too large.
7017 const bool constant_length = length_loc.IsConstant();
7018 const Register length_reg = constant_length ? kNoRegister : length_loc.reg();
7019 const intptr_t num_elements =
7020 constant_length ? Integer::Cast(length_loc.constant()).AsInt64Value()
7021 : -1;
7022
7023 // The zero constant case should be handled via canonicalization.
7024 ASSERT(!constant_length || num_elements > 0);
7025
7026#if defined(TARGET_ARCH_IA32)
7027 // We don't have enough registers to create temps for these, so we just
7028 // define them to be the same as src_reg and dest_reg below.
7029 const Register src_payload_reg = locs()->in(kSrcPos).reg();
7030 const Register dest_payload_reg = locs()->in(kDestPos).reg();
7031#else
7032 const Register src_payload_reg = locs()->temp(0).reg();
7033 const Register dest_payload_reg = locs()->temp(1).reg();
7034#endif
7035
7036 {
7037 const Register src_reg = locs()->in(kSrcPos).reg();
7038 const Register dest_reg = locs()->in(kDestPos).reg();
7039 const Representation src_rep = src()->definition()->representation();
7040 const Representation dest_rep = dest()->definition()->representation();
7041 const Location& src_start_loc = locs()->in(kSrcStartPos);
7042 const Location& dest_start_loc = locs()->in(kDestStartPos);
7043
7044 EmitComputeStartPointer(compiler, src_cid_, src_reg, src_payload_reg,
7045 src_rep, src_start_loc);
7046 EmitComputeStartPointer(compiler, dest_cid_, dest_reg, dest_payload_reg,
7047 dest_rep, dest_start_loc);
7048 }
7049
7050 compiler::Label copy_forwards, done;
7051 if (!constant_length) {
7052#if defined(TARGET_ARCH_IA32)
7053 // Save ESI (THR), as we have to use it on the loop path.
7054 __ PushRegister(ESI);
7055#endif
7056 PrepareLengthRegForLoop(compiler, length_reg, &done);
7057 }
7058 // Omit the reversed loop for possible overlap if copying a single element.
7059 if (can_overlap() && num_elements != 1) {
7060 __ CompareRegisters(dest_payload_reg, src_payload_reg);
7061 // Both regions are the same size, so if there is an overlap, then either:
7062 //
7063 // * The destination region comes before the source, so copying from
7064 // front to back ensures that the data in the overlap is read and
7065 // copied before it is written.
7066 // * The source region comes before the destination, which requires
7067 // copying from back to front to ensure that the data in the overlap is
7068 // read and copied before it is written.
7069 //
7070 // To make the generated code smaller for the unrolled case, we do not
7071 // additionally verify here that there is an actual overlap. Instead, only
7072 // do that when we need to calculate the end address of the regions in
7073 // the loop case.
7074#if defined(USING_MEMORY_SANITIZER)
7075 const auto jump_distance = compiler::Assembler::kFarJump;
7076#else
7077 const auto jump_distance = compiler::Assembler::kNearJump;
7078#endif
7079 __ BranchIf(UNSIGNED_LESS_EQUAL, &copy_forwards, jump_distance);
7080 __ Comment("Copying backwards");
7081 if (constant_length) {
7082 EmitUnrolledCopy(compiler, dest_payload_reg, src_payload_reg,
7083 num_elements, /*reversed=*/true);
7084 } else {
7085 EmitLoopCopy(compiler, dest_payload_reg, src_payload_reg, length_reg,
7086 &done, &copy_forwards);
7087 }
7088 __ Jump(&done, jump_distance);
7089 __ Comment("Copying forwards");
7090 }
7091 __ Bind(&copy_forwards);
7092 if (constant_length) {
7093 EmitUnrolledCopy(compiler, dest_payload_reg, src_payload_reg, num_elements,
7094 /*reversed=*/false);
7095 } else {
7096 EmitLoopCopy(compiler, dest_payload_reg, src_payload_reg, length_reg,
7097 &done);
7098 }
7099 __ Bind(&done);
7100#if defined(TARGET_ARCH_IA32)
7101 if (!constant_length) {
7102 // Restore ESI (THR).
7103 __ PopRegister(ESI);
7104 }
7105#endif
7106}
7107
7108// EmitUnrolledCopy on ARM is different enough that it is defined separately.
7109#if !defined(TARGET_ARCH_ARM)
7111 Register dest_reg,
7112 Register src_reg,
7113 intptr_t num_elements,
7114 bool reversed) {
7115 ASSERT(element_size_ <= 16);
7116 const intptr_t num_bytes = num_elements * element_size_;
7117#if defined(TARGET_ARCH_ARM64)
7118 // We use LDP/STP with TMP/TMP2 to handle 16-byte moves.
7119 const intptr_t mov_size = element_size_;
7120#else
7121 const intptr_t mov_size =
7122 Utils::Minimum<intptr_t>(element_size_, compiler::target::kWordSize);
7123#endif
7124 const intptr_t mov_repeat = num_bytes / mov_size;
7125 ASSERT(num_bytes % mov_size == 0);
7126
7127#if defined(TARGET_ARCH_IA32)
7128 // No TMP on IA32, so we have to allocate one instead.
7129 const Register temp_reg = locs()->temp(0).reg();
7130#else
7131 const Register temp_reg = TMP;
7132#endif
7133 for (intptr_t i = 0; i < mov_repeat; i++) {
7134 const intptr_t offset = (reversed ? (mov_repeat - (i + 1)) : i) * mov_size;
7135 switch (mov_size) {
7136 case 1:
7137 __ LoadFromOffset(temp_reg, src_reg, offset, compiler::kUnsignedByte);
7138 __ StoreToOffset(temp_reg, dest_reg, offset, compiler::kUnsignedByte);
7139 break;
7140 case 2:
7141 __ LoadFromOffset(temp_reg, src_reg, offset,
7143 __ StoreToOffset(temp_reg, dest_reg, offset,
7145 break;
7146 case 4:
7147 __ LoadFromOffset(temp_reg, src_reg, offset,
7149 __ StoreToOffset(temp_reg, dest_reg, offset,
7151 break;
7152 case 8:
7153#if defined(TARGET_ARCH_IS_64_BIT)
7154 __ LoadFromOffset(temp_reg, src_reg, offset, compiler::kEightBytes);
7155 __ StoreToOffset(temp_reg, dest_reg, offset, compiler::kEightBytes);
7156#else
7157 UNREACHABLE();
7158#endif
7159 break;
7160 case 16: {
7161#if defined(TARGET_ARCH_ARM64)
7162 __ ldp(
7163 TMP, TMP2,
7165 __ stp(
7166 TMP, TMP2,
7168#else
7169 UNREACHABLE();
7170#endif
7171 break;
7172 }
7173 default:
7174 UNREACHABLE();
7175 }
7176 }
7177
7178#if defined(USING_MEMORY_SANITIZER) && defined(TARGET_ARCH_X64)
7181 __ PushRegisters(kVolatileRegisterSet);
7182 __ MsanUnpoison(dest_reg, num_bytes);
7183 __ PopRegisters(kVolatileRegisterSet);
7184#endif
7185}
7186#endif
7187
7189 return RepresentationUtils::IsUnboxed(scan_flags_field_.representation());
7190}
7191
7193 InputsArray&& inputs,
7194 intptr_t deopt_id,
7195 MethodRecognizer::Kind recognized_kind,
7197 : VariadicDefinition(std::move(inputs), source, deopt_id),
7198 recognized_kind_(recognized_kind),
7199 token_pos_(source.token_pos) {
7200 ASSERT(InputCount() == ArgumentCountFor(recognized_kind_));
7201}
7202
7205 switch (kind) {
7206 case MethodRecognizer::kDoubleTruncateToDouble:
7207 case MethodRecognizer::kDoubleFloorToDouble:
7208 case MethodRecognizer::kDoubleCeilToDouble:
7209 case MethodRecognizer::kDoubleRoundToDouble:
7210 case MethodRecognizer::kMathAtan:
7211 case MethodRecognizer::kMathTan:
7212 case MethodRecognizer::kMathAcos:
7213 case MethodRecognizer::kMathAsin:
7214 case MethodRecognizer::kMathSin:
7215 case MethodRecognizer::kMathCos:
7216 case MethodRecognizer::kMathExp:
7217 case MethodRecognizer::kMathLog:
7218 return 1;
7219 case MethodRecognizer::kDoubleMod:
7220 case MethodRecognizer::kMathDoublePow:
7221 case MethodRecognizer::kMathAtan2:
7222 return 2;
7223 default:
7224 UNREACHABLE();
7225 }
7226 return 0;
7227}
7228
7230 switch (recognized_kind_) {
7231 case MethodRecognizer::kDoubleTruncateToDouble:
7232 return kLibcTruncRuntimeEntry;
7233 case MethodRecognizer::kDoubleRoundToDouble:
7234 return kLibcRoundRuntimeEntry;
7235 case MethodRecognizer::kDoubleFloorToDouble:
7236 return kLibcFloorRuntimeEntry;
7237 case MethodRecognizer::kDoubleCeilToDouble:
7238 return kLibcCeilRuntimeEntry;
7239 case MethodRecognizer::kMathDoublePow:
7240 return kLibcPowRuntimeEntry;
7241 case MethodRecognizer::kDoubleMod:
7242 return kDartModuloRuntimeEntry;
7243 case MethodRecognizer::kMathTan:
7244 return kLibcTanRuntimeEntry;
7245 case MethodRecognizer::kMathAsin:
7246 return kLibcAsinRuntimeEntry;
7247 case MethodRecognizer::kMathSin:
7248 return kLibcSinRuntimeEntry;
7249 case MethodRecognizer::kMathCos:
7250 return kLibcCosRuntimeEntry;
7251 case MethodRecognizer::kMathAcos:
7252 return kLibcAcosRuntimeEntry;
7253 case MethodRecognizer::kMathAtan:
7254 return kLibcAtanRuntimeEntry;
7255 case MethodRecognizer::kMathAtan2:
7256 return kLibcAtan2RuntimeEntry;
7257 case MethodRecognizer::kMathExp:
7258 return kLibcExpRuntimeEntry;
7259 case MethodRecognizer::kMathLog:
7260 return kLibcLogRuntimeEntry;
7261 default:
7262 UNREACHABLE();
7263 }
7264 return kLibcPowRuntimeEntry;
7265}
7266
7267TruncDivModInstr::TruncDivModInstr(Value* lhs, Value* rhs, intptr_t deopt_id)
7268 : TemplateDefinition(deopt_id) {
7269 SetInputAt(0, lhs);
7270 SetInputAt(1, rhs);
7271}
7272
7274 switch (token) {
7275 case Token::kTRUNCDIV:
7276 return 0;
7277 case Token::kMOD:
7278 return 1;
7279 default:
7280 UNIMPLEMENTED();
7281 return -1;
7282 }
7283}
7284
7286 bool optimizing) const {
7287 return MakeCallSummary(zone, this);
7288}
7289
7291 if (link_lazily()) {
7292 // Resolution will happen during NativeEntry::LinkNativeCall.
7293 return;
7294 }
7295
7296 Thread* thread = Thread::Current();
7297 Zone* zone = thread->zone();
7298
7299 // Currently we perform unoptimized compilations only on mutator threads. If
7300 // the compiler has to resolve a native to a function pointer it calls out to
7301 // the embedder to do so.
7302 //
7303 // Unfortunately that embedder API was designed by giving it a handle to a
7304 // string. So the embedder will have to call back into the VM to convert it to
7305 // a C string - which requires an active isolate.
7306 //
7307 // => To allow this `dart-->jit-compiler-->embedder-->dart api` we set the
7308 // active isolate again.
7309 //
7310 ActiveIsolateScope active_isolate(thread);
7311
7312 const Class& cls = Class::Handle(zone, function().Owner());
7313 const Library& library = Library::Handle(zone, cls.library());
7314
7318
7319 const int num_params =
7321 bool auto_setup_scope = true;
7323 library, native_name(), num_params, &auto_setup_scope);
7324 if (native_function == nullptr) {
7325 if (has_inlining_id()) {
7326 UNIMPLEMENTED();
7327 }
7330 "native function '%s' (%" Pd " arguments) cannot be found",
7331 native_name().ToCString(), function().NumParameters());
7332 }
7333 set_is_auto_scope(auto_setup_scope);
7334 set_native_c_function(native_function);
7335}
7336
7337#if !defined(TARGET_ARCH_ARM) && !defined(TARGET_ARCH_ARM64) && \
7338 !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
7339
7340LocationSummary* BitCastInstr::MakeLocationSummary(Zone* zone, bool opt) const {
7341 UNREACHABLE();
7342}
7343
7344void BitCastInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7345 UNREACHABLE();
7346}
7347
7348#endif // !defined(TARGET_ARCH_ARM) && !defined(TARGET_ARCH_ARM64) && \
7349 // !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
7350
7352 if (idx < TargetAddressIndex()) {
7353 // All input handles are passed as tagged values to FfiCallInstr and
7354 // are given stack locations. FfiCallInstr then passes an untagged pointer
7355 // to the handle on the stack (Dart_Handle) to the C function.
7356 if (marshaller_.IsHandleCType(marshaller_.ArgumentIndex(idx))) {
7357 return kTagged;
7358 }
7359 return marshaller_.RepInFfiCall(idx);
7360 } else if (idx == TargetAddressIndex()) {
7361#if defined(DEBUG)
7362 auto const rep =
7364 ASSERT(rep == kUntagged || rep == kUnboxedAddress);
7365#endif
7366 return kNoRepresentation; // Allows kUntagged or kUnboxedAddress.
7367 } else {
7369 return kTagged;
7370 }
7371}
7372
7373#define Z zone_
7374
7375LocationSummary* FfiCallInstr::MakeLocationSummaryInternal(
7376 Zone* zone,
7377 bool is_optimizing,
7378 const RegList temps) const {
7379 auto contains_call =
7381
7382 LocationSummary* summary = new (zone) LocationSummary(
7383 zone, /*num_inputs=*/InputCount(),
7384 /*num_temps=*/Utils::CountOneBitsWord(temps), contains_call);
7385
7386 intptr_t reg_i = 0;
7387 for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) {
7388 if ((temps & (1 << reg)) != 0) {
7389 summary->set_temp(reg_i,
7390 Location::RegisterLocation(static_cast<Register>(reg)));
7391 reg_i++;
7392 }
7393 }
7394
7395#if defined(TARGET_ARCH_X64) && !defined(DART_TARGET_OS_WINDOWS)
7396 // Only use R13 if really needed, having R13 free causes less spilling.
7397 const Register target_address =
7398 marshaller_.contains_varargs()
7399 ? R13
7401#else
7403#endif
7404#define R(r) (1 << r)
7405 ASSERT_EQUAL(temps & R(target_address), 0x0);
7406#undef R
7407 summary->set_in(TargetAddressIndex(),
7408 Location::RegisterLocation(target_address));
7409 for (intptr_t i = 0, n = marshaller_.NumArgumentDefinitions(); i < n; ++i) {
7410 summary->set_in(i, marshaller_.LocInFfiCall(i));
7411 }
7412
7413 if (marshaller_.ReturnsCompound()) {
7414 summary->set_in(CompoundReturnTypedDataIndex(), Location::Any());
7415 }
7416 summary->set_out(0, marshaller_.LocInFfiCall(compiler::ffi::kResultIndex));
7417
7418 return summary;
7419}
7420
7422 const Register saved_fp,
7423 const Register temp0,
7424 const Register temp1) {
7425 __ Comment("EmitParamMoves");
7426
7427 // Moves for return pointer.
7428 const auto& return_location =
7429 marshaller_.Location(compiler::ffi::kResultIndex);
7430 if (return_location.IsPointerToMemory()) {
7431 __ Comment("return_location.IsPointerToMemory");
7432 const auto& pointer_location =
7433 return_location.AsPointerToMemory().pointer_location();
7434 const auto& pointer_register =
7435 pointer_location.IsRegisters()
7436 ? pointer_location.AsRegisters().reg_at(0)
7437 : temp0;
7438 __ MoveRegister(pointer_register, SPREG);
7439 __ AddImmediate(pointer_register, marshaller_.PassByPointerStackOffset(
7441
7442 if (pointer_location.IsStack()) {
7443 const auto& pointer_stack = pointer_location.AsStack();
7444 __ StoreMemoryValue(pointer_register, pointer_stack.base_register(),
7445 pointer_stack.offset_in_bytes());
7446 }
7447 }
7448
7449 // Moves for arguments.
7450 compiler::ffi::FrameRebase rebase(compiler->zone(), /*old_base=*/FPREG,
7451 /*new_base=*/saved_fp,
7452 /*stack_delta=*/0);
7453 intptr_t def_index = 0;
7454 for (intptr_t arg_index = 0; arg_index < marshaller_.num_args();
7455 arg_index++) {
7456 const intptr_t num_defs = marshaller_.NumDefinitions(arg_index);
7457 const auto& arg_target = marshaller_.Location(arg_index);
7458 __ Comment("arg_index %" Pd " arg_target %s", arg_index,
7459 arg_target.ToCString());
7460
7461 // First deal with moving all individual definitions passed in to the
7462 // FfiCall to the right native location based on calling convention.
7463 for (intptr_t i = 0; i < num_defs; i++) {
7464 if ((arg_target.IsPointerToMemory() ||
7465 marshaller_.IsCompoundPointer(arg_index)) &&
7466 i == 1) {
7467 // The offset_in_bytes is not an argument for C, so don't move it.
7468 // It is used as offset_in_bytes_loc below and moved there if
7469 // necessary.
7470 def_index++;
7471 continue;
7472 }
7473 __ Comment(" def_index %" Pd, def_index);
7474 Location origin = rebase.Rebase(locs()->in(def_index));
7475 const Representation origin_rep = RequiredInputRepresentation(def_index);
7476
7477 // Find the native location where this individual definition should be
7478 // moved to.
7479 const auto& def_target =
7480 arg_target.payload_type().IsPrimitive() ? arg_target
7481 : arg_target.IsMultiple() ? *arg_target.AsMultiple().locations()[i]
7482 : arg_target.IsPointerToMemory()
7483 ? arg_target.AsPointerToMemory().pointer_location()
7484 : /*arg_target.IsStack()*/ arg_target.Split(compiler->zone(),
7485 num_defs, i);
7486
7487 ConstantTemporaryAllocator temp_alloc(temp0);
7488 if (origin.IsConstant()) {
7489 __ Comment("origin.IsConstant()");
7490 ASSERT(!marshaller_.IsHandleCType(arg_index));
7491 ASSERT(!marshaller_.IsTypedDataPointer(arg_index));
7492 ASSERT(!marshaller_.IsCompoundPointer(arg_index));
7493 compiler->EmitMoveConst(def_target, origin, origin_rep, &temp_alloc);
7494 } else if (origin.IsPairLocation() &&
7495 (origin.AsPairLocation()->At(0).IsConstant() ||
7496 origin.AsPairLocation()->At(1).IsConstant())) {
7497 // Note: half of the pair can be constant.
7498 __ Comment("origin.IsPairLocation() and constant");
7499 ASSERT(!marshaller_.IsHandleCType(arg_index));
7500 ASSERT(!marshaller_.IsTypedDataPointer(arg_index));
7501 ASSERT(!marshaller_.IsCompoundPointer(arg_index));
7502 compiler->EmitMoveConst(def_target, origin, origin_rep, &temp_alloc);
7503 } else if (marshaller_.IsHandleCType(arg_index)) {
7504 __ Comment("marshaller_.IsHandleCType(arg_index)");
7505 // Handles are passed into FfiCalls as Tagged values on the stack, and
7506 // then we pass pointers to these handles to the native function here.
7507 ASSERT(origin_rep == kTagged);
7508 ASSERT(compiler::target::LocalHandle::ptr_offset() == 0);
7509 ASSERT(compiler::target::LocalHandle::InstanceSize() ==
7510 compiler::target::kWordSize);
7511 ASSERT(num_defs == 1);
7512 ASSERT(origin.IsStackSlot());
7513 if (def_target.IsRegisters()) {
7514 __ AddImmediate(def_target.AsLocation().reg(), origin.base_reg(),
7515 origin.stack_index() * compiler::target::kWordSize);
7516 } else {
7517 ASSERT(def_target.IsStack());
7518 const auto& target_stack = def_target.AsStack();
7519 __ AddImmediate(temp0, origin.base_reg(),
7520 origin.stack_index() * compiler::target::kWordSize);
7521 __ StoreToOffset(temp0, target_stack.base_register(),
7522 target_stack.offset_in_bytes());
7523 }
7524 } else {
7525 __ Comment("def_target %s <- origin %s %s",
7526 def_target.ToCString(compiler->zone()), origin.ToCString(),
7527 RepresentationUtils::ToCString(origin_rep));
7528#ifdef DEBUG
7529 // Stack arguments split are in word-size chunks. These chunks can copy
7530 // too much. However, that doesn't matter in practise because we process
7531 // the stack in order.
7532 // It only matters for the last chunk, it should not overwrite what was
7533 // already on the stack.
7534 if (def_target.IsStack()) {
7535 const auto& def_target_stack = def_target.AsStack();
7536 ASSERT(def_target_stack.offset_in_bytes() +
7537 def_target.payload_type().SizeInBytes() <=
7538 marshaller_.RequiredStackSpaceInBytes());
7539 }
7540#endif
7541 if (marshaller_.IsTypedDataPointer(arg_index) ||
7542 marshaller_.IsCompoundPointer(arg_index)) {
7543 // Unwrap typed data before move to native location.
7544 __ Comment("Load typed data base address");
7545 if (origin.IsStackSlot()) {
7546 compiler->EmitMove(Location::RegisterLocation(temp0), origin,
7547 &temp_alloc);
7548 origin = Location::RegisterLocation(temp0);
7549 }
7550 ASSERT(origin.IsRegister());
7551 __ LoadFromSlot(origin.reg(), origin.reg(), Slot::PointerBase_data());
7552 if (marshaller_.IsCompoundPointer(arg_index)) {
7553 __ Comment("Load offset in bytes");
7554 const intptr_t offset_in_bytes_def_index = def_index + 1;
7555 const Location offset_in_bytes_loc =
7556 rebase.Rebase(locs()->in(offset_in_bytes_def_index));
7557 Register offset_in_bytes_reg = kNoRegister;
7558 if (offset_in_bytes_loc.IsRegister()) {
7559 offset_in_bytes_reg = offset_in_bytes_loc.reg();
7560 } else {
7561 offset_in_bytes_reg = temp1;
7562 NoTemporaryAllocator no_temp;
7563 compiler->EmitMove(
7564 Location::RegisterLocation(offset_in_bytes_reg),
7565 offset_in_bytes_loc, &no_temp);
7566 }
7567 __ AddRegisters(origin.reg(), offset_in_bytes_reg);
7568 }
7569 }
7570 compiler->EmitMoveToNative(def_target, origin, origin_rep, &temp_alloc);
7571 }
7572 def_index++;
7573 }
7574
7575 // Then make sure that any pointers passed through the calling convention
7576 // actually have a copy of the struct.
7577 // Note that the step above has already moved the pointer into the expected
7578 // native location.
7579 if (arg_target.IsPointerToMemory()) {
7580 __ Comment("arg_target.IsPointerToMemory");
7581 NoTemporaryAllocator temp_alloc;
7582 const auto& pointer_loc =
7583 arg_target.AsPointerToMemory().pointer_location();
7584
7585 // TypedData data pointed to in temp.
7587 compiler->zone(), pointer_loc.payload_type(),
7588 pointer_loc.container_type(), temp0);
7589 compiler->EmitNativeMove(dst, pointer_loc, &temp_alloc);
7590 __ LoadFromSlot(temp0, temp0, Slot::PointerBase_data());
7591
7592 __ Comment("IsPointerToMemory add offset");
7593 const intptr_t offset_in_bytes_def_index =
7594 def_index - 1; // ++'d already.
7595 const Location offset_in_bytes_loc =
7596 rebase.Rebase(locs()->in(offset_in_bytes_def_index));
7597 Register offset_in_bytes_reg = kNoRegister;
7598 if (offset_in_bytes_loc.IsRegister()) {
7599 offset_in_bytes_reg = offset_in_bytes_loc.reg();
7600 } else {
7601 offset_in_bytes_reg = temp1;
7602 NoTemporaryAllocator no_temp;
7603 compiler->EmitMove(Location::RegisterLocation(offset_in_bytes_reg),
7604 offset_in_bytes_loc, &no_temp);
7605 }
7606 __ AddRegisters(temp0, offset_in_bytes_reg);
7607
7608 // Copy chunks. The destination may be rounded up to a multiple of the
7609 // word size, because we do the same rounding when we allocate the space
7610 // on the stack. But source may not be allocated by the VM and end at a
7611 // page boundary.
7612 __ Comment("IsPointerToMemory copy chunks");
7613 const intptr_t sp_offset =
7614 marshaller_.PassByPointerStackOffset(arg_index);
7615 __ UnrolledMemCopy(SPREG, sp_offset, temp0, 0,
7616 arg_target.payload_type().SizeInBytes(), temp1);
7617
7618 // Store the stack address in the argument location.
7619 __ MoveRegister(temp0, SPREG);
7620 __ AddImmediate(temp0, sp_offset);
7622 compiler->zone(), pointer_loc.payload_type(),
7623 pointer_loc.container_type(), temp0);
7624 __ Comment("pointer_loc %s <- src %s", pointer_loc.ToCString(),
7625 src.ToCString());
7626 compiler->EmitNativeMove(pointer_loc, src, &temp_alloc);
7627 }
7628 }
7629
7630 __ Comment("EmitParamMovesEnd");
7631}
7632
7634 const Register temp0,
7635 const Register temp1) {
7636 const auto& returnLocation =
7637 marshaller_.Location(compiler::ffi::kResultIndex);
7638 if (returnLocation.payload_type().IsVoid()) {
7639 return;
7640 }
7641
7642 __ Comment("EmitReturnMoves");
7643
7644 NoTemporaryAllocator no_temp;
7645 if (returnLocation.IsRegisters() || returnLocation.IsFpuRegisters()) {
7646 const auto& src = returnLocation;
7647 const Location dst_loc = locs()->out(0);
7648 const Representation dst_type = representation();
7649 compiler->EmitMoveFromNative(dst_loc, dst_type, src, &no_temp);
7650 } else if (marshaller_.ReturnsCompound()) {
7651 ASSERT(returnLocation.payload_type().IsCompound());
7652
7653 // Get the typed data pointer which we have pinned to a stack slot.
7654 const Location typed_data_loc = locs()->in(CompoundReturnTypedDataIndex());
7655 if (typed_data_loc.IsStackSlot()) {
7656 ASSERT(typed_data_loc.base_reg() == FPREG);
7657 // If this is a leaf call there is no extra call frame to step through.
7658 if (is_leaf_) {
7659 __ LoadMemoryValue(temp0, FPREG, typed_data_loc.ToStackSlotOffset());
7660 } else {
7661 __ LoadMemoryValue(
7662 temp0, FPREG,
7663 kSavedCallerFpSlotFromFp * compiler::target::kWordSize);
7664 __ LoadMemoryValue(temp0, temp0, typed_data_loc.ToStackSlotOffset());
7665 }
7666 } else {
7667 compiler->EmitMove(Location::RegisterLocation(temp0), typed_data_loc,
7668 &no_temp);
7669 }
7670 __ LoadFromSlot(temp0, temp0, Slot::PointerBase_data());
7671
7672 if (returnLocation.IsPointerToMemory()) {
7673 // Copy blocks from the stack location to TypedData.
7674 // Struct size is rounded up to a multiple of target::kWordSize.
7675 // This is safe because we do the same rounding when we allocate the
7676 // TypedData in IL.
7677 const intptr_t sp_offset =
7678 marshaller_.PassByPointerStackOffset(compiler::ffi::kResultIndex);
7679 __ UnrolledMemCopy(temp0, 0, SPREG, sp_offset,
7680 marshaller_.CompoundReturnSizeInBytes(), temp1);
7681 } else {
7682 ASSERT(returnLocation.IsMultiple());
7683 // Copy to the struct from the native locations.
7684 const auto& multiple =
7685 marshaller_.Location(compiler::ffi::kResultIndex).AsMultiple();
7686
7687 int offset_in_bytes = 0;
7688 for (int i = 0; i < multiple.locations().length(); i++) {
7689 const auto& src = *multiple.locations().At(i);
7690 const auto& dst = compiler::ffi::NativeStackLocation(
7691 src.payload_type(), src.container_type(), temp0, offset_in_bytes);
7692 compiler->EmitNativeMove(dst, src, &no_temp);
7693 offset_in_bytes += src.payload_type().SizeInBytes();
7694 }
7695 }
7696 } else {
7697 UNREACHABLE();
7698 }
7699
7700 __ Comment("EmitReturnMovesEnd");
7701}
7702
7703LocationSummary* StoreFieldInstr::MakeLocationSummary(Zone* zone,
7704 bool opt) const {
7705 const intptr_t kNumInputs = 2;
7706#if defined(TARGET_ARCH_IA32)
7707 const intptr_t kNumTemps = ShouldEmitStoreBarrier() ? 1 : 0;
7708#else
7709 const intptr_t kNumTemps = 0;
7710#endif
7711 LocationSummary* summary = new (zone)
7712 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7713
7714 summary->set_in(kInstancePos, Location::RequiresRegister());
7715 const Representation rep = slot().representation();
7716 if (rep == kUntagged) {
7717 summary->set_in(kValuePos, Location::RequiresRegister());
7718 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
7719 const size_t value_size = RepresentationUtils::ValueSize(rep);
7720 if (value_size <= compiler::target::kWordSize) {
7721 summary->set_in(kValuePos, Location::RequiresRegister());
7722 } else {
7723 ASSERT(value_size == 2 * compiler::target::kWordSize);
7724 summary->set_in(kValuePos, Location::Pair(Location::RequiresRegister(),
7726 }
7727 } else if (RepresentationUtils::IsUnboxed(rep)) {
7728 summary->set_in(kValuePos, Location::RequiresFpuRegister());
7729 } else if (ShouldEmitStoreBarrier()) {
7730 summary->set_in(kValuePos,
7732 } else {
7733#if defined(TARGET_ARCH_IA32)
7734 // IA32 supports emitting `mov mem, Imm32` even for heap
7735 // pointer immediates.
7736 summary->set_in(kValuePos, LocationRegisterOrConstant(value()));
7737#elif defined(TARGET_ARCH_X64)
7738 // X64 supports emitting `mov mem, Imm32` only with non-pointer
7739 // immediate.
7740 summary->set_in(kValuePos, LocationRegisterOrSmiConstant(value()));
7741#elif defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV32) || \
7742 defined(TARGET_ARCH_RISCV64)
7743 // ARM64 and RISC-V have dedicated zero and null registers which can be
7744 // used in store instructions.
7746 if (auto constant = value()->definition()->AsConstant()) {
7747 const auto& value = constant->value();
7748 if (value.IsNull() || (value.IsSmi() && Smi::Cast(value).Value() == 0)) {
7749 value_loc = Location::Constant(constant);
7750 }
7751 }
7752 summary->set_in(kValuePos, value_loc);
7753#else
7754 // No support for moving immediate to memory directly.
7755 summary->set_in(kValuePos, Location::RequiresRegister());
7756#endif
7757 }
7758 if (kNumTemps == 1) {
7759 summary->set_temp(0, Location::RequiresRegister());
7760 } else {
7761 ASSERT(kNumTemps == 0);
7762 }
7763 return summary;
7764}
7765
7766void StoreFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7767 const Register instance_reg = locs()->in(kInstancePos).reg();
7768 ASSERT(OffsetInBytes() >= 0); // Field is finalized.
7769 // For fields on Dart objects, the offset must point after the header.
7770 ASSERT(OffsetInBytes() != 0 || slot().has_untagged_instance());
7771
7772 const Representation rep = slot().representation();
7773 if (rep == kUntagged) {
7774 __ StoreToSlotNoBarrier(locs()->in(kValuePos).reg(), instance_reg, slot(),
7775 memory_order_);
7776 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
7777 const size_t value_size = RepresentationUtils::ValueSize(rep);
7778 if (value_size <= compiler::target::kWordSize) {
7779 __ StoreToSlotNoBarrier(locs()->in(kValuePos).reg(), instance_reg, slot(),
7780 memory_order_);
7781 } else {
7782 ASSERT(slot().representation() == kUnboxedInt64);
7783 ASSERT_EQUAL(compiler::target::kWordSize, kInt32Size);
7784 auto const value_pair = locs()->in(kValuePos).AsPairLocation();
7785 const Register value_lo = value_pair->At(0).reg();
7786 const Register value_hi = value_pair->At(1).reg();
7787 __ StoreFieldToOffset(value_lo, instance_reg, OffsetInBytes());
7788 __ StoreFieldToOffset(value_hi, instance_reg,
7789 OffsetInBytes() + compiler::target::kWordSize);
7790 }
7791 } else if (RepresentationUtils::IsUnboxed(rep)) {
7792 ASSERT(slot().IsDartField());
7793 const intptr_t cid = slot().field().guarded_cid();
7794 const FpuRegister value = locs()->in(kValuePos).fpu_reg();
7795 switch (cid) {
7796 case kDoubleCid:
7797 __ StoreUnboxedDouble(value, instance_reg,
7799 return;
7800 case kFloat32x4Cid:
7801 case kFloat64x2Cid:
7802 __ StoreUnboxedSimd128(value, instance_reg,
7804 return;
7805 default:
7806 UNREACHABLE();
7807 }
7808 } else if (ShouldEmitStoreBarrier()) {
7809 const Register scratch_reg =
7810 locs()->temp_count() > 0 ? locs()->temp(0).reg() : TMP;
7811 __ StoreToSlot(locs()->in(kValuePos).reg(), instance_reg, slot(),
7812 CanValueBeSmi(), memory_order_, scratch_reg);
7813 } else if (locs()->in(kValuePos).IsConstant()) {
7814 const auto& value = locs()->in(kValuePos).constant();
7815 auto const size =
7817 __ StoreObjectIntoObjectOffsetNoBarrier(instance_reg, OffsetInBytes(),
7818 value, memory_order_, size);
7819 } else {
7820 __ StoreToSlotNoBarrier(locs()->in(kValuePos).reg(), instance_reg, slot(),
7821 memory_order_);
7822 }
7823}
7824
7825const Code& DartReturnInstr::GetReturnStub(FlowGraphCompiler* compiler) const {
7826 const Function& function = compiler->parsed_function().function();
7827 ASSERT(function.IsSuspendableFunction());
7828 if (function.IsAsyncFunction()) {
7829 if (compiler->is_optimizing() && !value()->Type()->CanBeFuture()) {
7830 return Code::ZoneHandle(compiler->zone(),
7831 compiler->isolate_group()
7832 ->object_store()
7833 ->return_async_not_future_stub());
7834 }
7835 return Code::ZoneHandle(
7836 compiler->zone(),
7837 compiler->isolate_group()->object_store()->return_async_stub());
7838 } else if (function.IsAsyncGenerator()) {
7839 return Code::ZoneHandle(
7840 compiler->zone(),
7841 compiler->isolate_group()->object_store()->return_async_star_stub());
7842 } else {
7843 UNREACHABLE();
7844 }
7845}
7846
7847void NativeReturnInstr::EmitReturnMoves(FlowGraphCompiler* compiler) {
7848 const auto& dst1 = marshaller_.Location(compiler::ffi::kResultIndex);
7849 if (dst1.payload_type().IsVoid()) {
7850 return;
7851 }
7852 if (dst1.IsMultiple()) {
7853 __ Comment("Load TypedDataBase data pointer and apply offset.");
7854 ASSERT_EQUAL(locs()->input_count(), 2);
7855 Register typed_data_reg = locs()->in(0).reg();
7856 // Load the data pointer out of the TypedData/Pointer.
7857 __ LoadFromSlot(typed_data_reg, typed_data_reg, Slot::PointerBase_data());
7858
7859 // Apply offset.
7860 Register offset_reg = locs()->in(1).reg();
7861 __ AddRegisters(typed_data_reg, offset_reg);
7862
7863 __ Comment("Copy loop");
7864 const auto& multiple = dst1.AsMultiple();
7865 int offset_in_bytes = 0;
7866 for (intptr_t i = 0; i < multiple.locations().length(); i++) {
7867 const auto& dst = *multiple.locations().At(i);
7868 ASSERT(!dst.IsRegisters() ||
7869 dst.AsRegisters().reg_at(0) != typed_data_reg);
7870 const auto& src = compiler::ffi::NativeStackLocation(
7871 dst.payload_type(), dst.container_type(), typed_data_reg,
7872 offset_in_bytes);
7873 NoTemporaryAllocator no_temp;
7874 compiler->EmitNativeMove(dst, src, &no_temp);
7875 offset_in_bytes += dst.payload_type().SizeInBytes();
7876 }
7877 return;
7878 }
7879 const auto& dst = dst1.IsPointerToMemory()
7880 ? dst1.AsPointerToMemory().pointer_return_location()
7881 : dst1;
7882
7883 const Location src_loc = locs()->in(0);
7884 const Representation src_type = RequiredInputRepresentation(0);
7885 NoTemporaryAllocator no_temp;
7886 compiler->EmitMoveToNative(dst, src_loc, src_type, &no_temp);
7887}
7888
7889LocationSummary* NativeReturnInstr::MakeLocationSummary(Zone* zone,
7890 bool opt) const {
7891 const intptr_t input_count = marshaller_.NumReturnDefinitions();
7892 const intptr_t kNumTemps = 0;
7893 LocationSummary* locs = new (zone)
7894 LocationSummary(zone, input_count, kNumTemps, LocationSummary::kNoCall);
7895 const auto& native_loc = marshaller_.Location(compiler::ffi::kResultIndex);
7896
7897 if (native_loc.IsMultiple()) {
7898 ASSERT_EQUAL(input_count, 2);
7899 // Pass in a typed data and offset for easy copying in machine code.
7900 // Can be any register which does not conflict with return registers.
7902 ASSERT(typed_data_reg != CallingConventions::kReturnReg);
7904 locs->set_in(0, Location::RegisterLocation(typed_data_reg));
7905
7907 ASSERT(offset_in_bytes_reg != CallingConventions::kReturnReg);
7908 ASSERT(offset_in_bytes_reg != CallingConventions::kSecondReturnReg);
7909 locs->set_in(1, Location::RegisterLocation(offset_in_bytes_reg));
7910 } else {
7911 ASSERT_EQUAL(input_count, 1);
7912 const auto& native_return_loc =
7913 native_loc.IsPointerToMemory()
7914 ? native_loc.AsPointerToMemory().pointer_return_location()
7915 : native_loc;
7916 locs->set_in(0, native_return_loc.AsLocation());
7917 }
7918 return locs;
7919}
7920
7921LocationSummary* RecordCoverageInstr::MakeLocationSummary(Zone* zone,
7922 bool opt) const {
7923 const intptr_t kNumInputs = 0;
7924 const intptr_t kNumTemps = 2;
7925 LocationSummary* locs = new (zone)
7926 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7928 locs->set_temp(1, Location::RequiresRegister());
7929 return locs;
7930}
7931
7932void RecordCoverageInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7933 const auto array_temp = locs()->temp(0).reg();
7934 const auto value_temp = locs()->temp(1).reg();
7935
7936 __ LoadObject(array_temp, coverage_array_);
7937 __ LoadImmediate(value_temp, Smi::RawValue(1));
7938 __ StoreFieldToOffset(
7939 value_temp, array_temp,
7940 compiler::target::Array::element_offset(coverage_index_),
7942}
7943
7944#undef Z
7945
7947 if (marshaller_.ReturnsCompound()) {
7948 // Don't care, we're discarding the value.
7949 return kTagged;
7950 }
7951 if (marshaller_.IsHandleCType(compiler::ffi::kResultIndex)) {
7952 // The call returns a Dart_Handle, from which we need to extract the
7953 // tagged pointer using LoadField with an appropriate slot.
7954 return kUntagged;
7955 }
7956 return marshaller_.RepInFfiCall(compiler::ffi::kResultIndex);
7957}
7958
7959// TODO(http://dartbug.com/48543): integrate with register allocator directly.
7960DEFINE_BACKEND(LoadThread, (Register out)) {
7961 __ MoveRegister(out, THR);
7962}
7963
7965 Zone* zone,
7966 const RegList temps) const {
7967 LocationSummary* summary =
7968 new (zone) LocationSummary(zone, /*num_inputs=*/InputCount(),
7969 /*num_temps=*/Utils::CountOneBitsWord(temps),
7971
7972 intptr_t reg_i = 0;
7973 for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) {
7974 if ((temps & (1 << reg)) != 0) {
7975 summary->set_temp(reg_i,
7976 Location::RegisterLocation(static_cast<Register>(reg)));
7977 reg_i++;
7978 }
7979 }
7980
7981 summary->set_in(TargetAddressIndex(),
7984
7985 const auto& argument_locations =
7986 native_calling_convention_.argument_locations();
7987 for (intptr_t i = 0, n = argument_locations.length(); i < n; ++i) {
7988 const auto& argument_location = *argument_locations.At(i);
7989 if (argument_location.IsRegisters()) {
7990 const auto& reg_location = argument_location.AsRegisters();
7991 ASSERT(reg_location.num_regs() == 1);
7992 summary->set_in(i, reg_location.AsLocation());
7993 } else if (argument_location.IsFpuRegisters()) {
7994 UNIMPLEMENTED();
7995 } else if (argument_location.IsStack()) {
7996 summary->set_in(i, Location::Any());
7997 } else {
7998 UNIMPLEMENTED();
7999 }
8000 }
8001 const auto& return_location = native_calling_convention_.return_location();
8002 ASSERT(return_location.IsRegisters());
8003 summary->set_out(0, return_location.AsLocation());
8004 return summary;
8005}
8006
8007LeafRuntimeCallInstr::LeafRuntimeCallInstr(
8008 Representation return_representation,
8009 const ZoneGrowableArray<Representation>& argument_representations,
8010 const compiler::ffi::NativeCallingConvention& native_calling_convention,
8011 InputsArray&& inputs)
8012 : VariadicDefinition(std::move(inputs), DeoptId::kNone),
8013 return_representation_(return_representation),
8014 argument_representations_(argument_representations),
8015 native_calling_convention_(native_calling_convention) {
8016#if defined(DEBUG)
8017 const intptr_t num_inputs = argument_representations.length() + 1;
8018 ASSERT_EQUAL(InputCount(), num_inputs);
8019 // The target address should never be an unsafe untagged pointer.
8021 ->definition()
8023#endif
8024}
8025
8027 Zone* zone,
8028 Representation return_representation,
8029 const ZoneGrowableArray<Representation>& argument_representations,
8030 InputsArray&& inputs) {
8031 const auto& native_function_type =
8033 zone, return_representation, argument_representations);
8034 const auto& native_calling_convention =
8036 zone, native_function_type);
8037 return new (zone)
8038 LeafRuntimeCallInstr(return_representation, argument_representations,
8039 native_calling_convention, std::move(inputs));
8040}
8041
8043 Register saved_fp,
8044 Register temp0) {
8045 if (native_calling_convention_.StackTopInBytes() == 0) {
8046 return;
8047 }
8048
8049 ConstantTemporaryAllocator temp_alloc(temp0);
8050 compiler::ffi::FrameRebase rebase(compiler->zone(), /*old_base=*/FPREG,
8051 /*new_base=*/saved_fp,
8052 /*stack_delta=*/0);
8053
8054 __ Comment("EmitParamMoves");
8055 const auto& argument_locations =
8056 native_calling_convention_.argument_locations();
8057 for (intptr_t i = 0, n = argument_locations.length(); i < n; ++i) {
8058 const auto& argument_location = *argument_locations.At(i);
8059 if (argument_location.IsRegisters()) {
8060 const auto& reg_location = argument_location.AsRegisters();
8061 ASSERT(reg_location.num_regs() == 1);
8062 const Location src_loc = rebase.Rebase(locs()->in(i));
8064 compiler->EmitMoveToNative(argument_location, src_loc, src_rep,
8065 &temp_alloc);
8066 } else if (argument_location.IsFpuRegisters()) {
8067 UNIMPLEMENTED();
8068 } else if (argument_location.IsStack()) {
8069 const Location src_loc = rebase.Rebase(locs()->in(i));
8071 __ Comment("Param %" Pd ": %s %s -> %s", i, src_loc.ToCString(),
8073 argument_location.ToCString());
8074 compiler->EmitMoveToNative(argument_location, src_loc, src_rep,
8075 &temp_alloc);
8076 } else {
8077 UNIMPLEMENTED();
8078 }
8079 }
8080 __ Comment("EmitParamMovesEnd");
8081}
8082
8083// SIMD
8084
8086 switch (kind) {
8087 case MethodRecognizer::kFloat32x4Mul:
8088 return SimdOpInstr::kFloat32x4Mul;
8089 case MethodRecognizer::kFloat32x4Div:
8090 return SimdOpInstr::kFloat32x4Div;
8091 case MethodRecognizer::kFloat32x4Add:
8092 return SimdOpInstr::kFloat32x4Add;
8093 case MethodRecognizer::kFloat32x4Sub:
8094 return SimdOpInstr::kFloat32x4Sub;
8095 case MethodRecognizer::kFloat64x2Mul:
8096 return SimdOpInstr::kFloat64x2Mul;
8097 case MethodRecognizer::kFloat64x2Div:
8098 return SimdOpInstr::kFloat64x2Div;
8099 case MethodRecognizer::kFloat64x2Add:
8100 return SimdOpInstr::kFloat64x2Add;
8101 case MethodRecognizer::kFloat64x2Sub:
8102 return SimdOpInstr::kFloat64x2Sub;
8103 default:
8104 break;
8105 }
8106 UNREACHABLE();
8108}
8109
8112 Definition* receiver,
8113 Instruction* call,
8114 intptr_t mask /* = 0 */) {
8115 SimdOpInstr* op;
8116 switch (kind) {
8117 case MethodRecognizer::kFloat32x4Mul:
8118 case MethodRecognizer::kFloat32x4Div:
8119 case MethodRecognizer::kFloat32x4Add:
8120 case MethodRecognizer::kFloat32x4Sub:
8121 case MethodRecognizer::kFloat64x2Mul:
8122 case MethodRecognizer::kFloat64x2Div:
8123 case MethodRecognizer::kFloat64x2Add:
8124 case MethodRecognizer::kFloat64x2Sub:
8125 op = new (zone) SimdOpInstr(KindForOperator(kind), call->deopt_id());
8126 break;
8127#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64)
8128 case MethodRecognizer::kFloat32x4GreaterThan:
8129 // cmppsgt does not exist, cmppsnlt gives wrong NaN result, need to flip
8130 // at the IL level to get the right SameAsFirstInput.
8131 op = new (zone)
8132 SimdOpInstr(SimdOpInstr::kFloat32x4LessThan, call->deopt_id());
8133 op->SetInputAt(0, call->ArgumentValueAt(1)->CopyWithType(zone));
8134 op->SetInputAt(1, new (zone) Value(receiver));
8135 return op;
8136 case MethodRecognizer::kFloat32x4GreaterThanOrEqual:
8137 // cmppsge does not exist, cmppsnle gives wrong NaN result, need to flip
8138 // at the IL level to get the right SameAsFirstInput.
8139 op = new (zone)
8140 SimdOpInstr(SimdOpInstr::kFloat32x4LessThanOrEqual, call->deopt_id());
8141 op->SetInputAt(0, call->ArgumentValueAt(1)->CopyWithType(zone));
8142 op->SetInputAt(1, new (zone) Value(receiver));
8143 return op;
8144#endif
8145 default:
8146 op = new (zone) SimdOpInstr(KindForMethod(kind), call->deopt_id());
8147 break;
8148 }
8149
8150 if (receiver != nullptr) {
8151 op->SetInputAt(0, new (zone) Value(receiver));
8152 }
8153 for (intptr_t i = (receiver != nullptr ? 1 : 0); i < op->InputCount(); i++) {
8154 op->SetInputAt(i, call->ArgumentValueAt(i)->CopyWithType(zone));
8155 }
8156 if (op->HasMask()) {
8157 op->set_mask(mask);
8158 }
8159 ASSERT(call->ArgumentCount() == (op->InputCount() + (op->HasMask() ? 1 : 0)));
8160
8161 return op;
8162}
8163
8166 Instruction* call) {
8167 SimdOpInstr* op =
8168 new (zone) SimdOpInstr(KindForMethod(kind), call->deopt_id());
8169 for (intptr_t i = 0; i < op->InputCount(); i++) {
8170 // Note: ArgumentAt(0) is type arguments which we don't need.
8171 op->SetInputAt(i, call->ArgumentValueAt(i + 1)->CopyWithType(zone));
8172 }
8173 ASSERT(call->ArgumentCount() == (op->InputCount() + 1));
8174 return op;
8175}
8176
8178 switch (cid) {
8179 case kFloat32x4Cid:
8180 switch (op) {
8181 case Token::kADD:
8182 return kFloat32x4Add;
8183 case Token::kSUB:
8184 return kFloat32x4Sub;
8185 case Token::kMUL:
8186 return kFloat32x4Mul;
8187 case Token::kDIV:
8188 return kFloat32x4Div;
8189 default:
8190 break;
8191 }
8192 break;
8193
8194 case kFloat64x2Cid:
8195 switch (op) {
8196 case Token::kADD:
8197 return kFloat64x2Add;
8198 case Token::kSUB:
8199 return kFloat64x2Sub;
8200 case Token::kMUL:
8201 return kFloat64x2Mul;
8202 case Token::kDIV:
8203 return kFloat64x2Div;
8204 default:
8205 break;
8206 }
8207 break;
8208
8209 case kInt32x4Cid:
8210 switch (op) {
8211 case Token::kADD:
8212 return kInt32x4Add;
8213 case Token::kSUB:
8214 return kInt32x4Sub;
8215 case Token::kBIT_AND:
8216 return kInt32x4BitAnd;
8217 case Token::kBIT_OR:
8218 return kInt32x4BitOr;
8219 case Token::kBIT_XOR:
8220 return kInt32x4BitXor;
8221 default:
8222 break;
8223 }
8224 break;
8225 }
8226
8227 UNREACHABLE();
8228 return kIllegalSimdOp;
8229}
8230
8232 switch (kind) {
8233#define CASE_METHOD(Arity, Mask, Name, ...) \
8234 case MethodRecognizer::k##Name: \
8235 return k##Name;
8236#define CASE_BINARY_OP(Arity, Mask, Name, Args, Result)
8238#undef CASE_METHOD
8239#undef CASE_BINARY_OP
8240 default:
8241 break;
8242 }
8243
8244 FATAL("Not a SIMD method: %s", MethodRecognizer::KindToCString(kind));
8245 return kIllegalSimdOp;
8246}
8247
8248// Methods InputCount(), representation(), RequiredInputRepresentation() and
8249// HasMask() are using an array of SimdOpInfo structures representing all
8250// necessary information about the instruction.
8251
8258
8260 // Keep the old semantics where kUnboxedInt8 was a locally created
8261 // alias for kUnboxedInt32, and pass everything else through unchanged.
8262 return rep == kUnboxedInt8 ? kUnboxedInt32 : rep;
8263}
8264
8265// Make representation from type name used by SIMD_OP_LIST.
8266#define REP(T) (SimdRepresentation(kUnboxed##T))
8267static const Representation kUnboxedBool = kTagged;
8268
8269#define ENCODE_INPUTS_0()
8270#define ENCODE_INPUTS_1(In0) REP(In0)
8271#define ENCODE_INPUTS_2(In0, In1) REP(In0), REP(In1)
8272#define ENCODE_INPUTS_3(In0, In1, In2) REP(In0), REP(In1), REP(In2)
8273#define ENCODE_INPUTS_4(In0, In1, In2, In3) \
8274 REP(In0), REP(In1), REP(In2), REP(In3)
8275
8276// Helpers for correct interpretation of the Mask field in the SIMD_OP_LIST.
8277#define HAS_MASK true
8278#define HAS__ false
8279
8280// Define the metadata array.
8282#define CASE(Arity, Mask, Name, Args, Result) \
8283 {Arity, HAS_##Mask, REP(Result), {PP_APPLY(ENCODE_INPUTS_##Arity, Args)}},
8285#undef CASE
8286};
8287
8288// Undef all auxiliary macros.
8289#undef ENCODE_INFORMATION
8290#undef HAS__
8291#undef HAS_MASK
8292#undef ENCODE_INPUTS_0
8293#undef ENCODE_INPUTS_1
8294#undef ENCODE_INPUTS_2
8295#undef ENCODE_INPUTS_3
8296#undef ENCODE_INPUTS_4
8297#undef REP
8298
8299intptr_t SimdOpInstr::InputCount() const {
8300 return simd_op_information[kind()].arity;
8301}
8302
8306
8308 ASSERT(0 <= idx && idx < InputCount());
8309 return simd_op_information[kind()].inputs[idx];
8310}
8311
8312bool SimdOpInstr::HasMask() const {
8314}
8315
8317 if ((kind() == SimdOpInstr::kFloat64x2FromDoubles) &&
8318 InputAt(0)->BindsToConstant() && InputAt(1)->BindsToConstant()) {
8319 const Object& x = InputAt(0)->BoundConstant();
8320 const Object& y = InputAt(1)->BoundConstant();
8321 if (x.IsDouble() && y.IsDouble()) {
8323 Double::Cast(x).value(), Double::Cast(y).value(), Heap::kOld));
8324 result ^= result.Canonicalize(Thread::Current());
8325 return flow_graph->GetConstant(result, kUnboxedFloat64x2);
8326 }
8327 }
8328 if ((kind() == SimdOpInstr::kFloat32x4FromDoubles) &&
8329 InputAt(0)->BindsToConstant() && InputAt(1)->BindsToConstant() &&
8330 InputAt(2)->BindsToConstant() && InputAt(3)->BindsToConstant()) {
8331 const Object& x = InputAt(0)->BoundConstant();
8332 const Object& y = InputAt(1)->BoundConstant();
8333 const Object& z = InputAt(2)->BoundConstant();
8334 const Object& w = InputAt(3)->BoundConstant();
8335 if (x.IsDouble() && y.IsDouble() && z.IsDouble() && w.IsDouble()) {
8337 Double::Cast(x).value(), Double::Cast(y).value(),
8338 Double::Cast(z).value(), Double::Cast(w).value(), Heap::kOld));
8339 result ^= result.Canonicalize(Thread::Current());
8340 return flow_graph->GetConstant(result, kUnboxedFloat32x4);
8341 }
8342 }
8343 if ((kind() == SimdOpInstr::kInt32x4FromInts) &&
8344 InputAt(0)->BindsToConstant() && InputAt(1)->BindsToConstant() &&
8345 InputAt(2)->BindsToConstant() && InputAt(3)->BindsToConstant()) {
8346 const Object& x = InputAt(0)->BoundConstant();
8347 const Object& y = InputAt(1)->BoundConstant();
8348 const Object& z = InputAt(2)->BoundConstant();
8349 const Object& w = InputAt(3)->BoundConstant();
8350 if (x.IsInteger() && y.IsInteger() && z.IsInteger() && w.IsInteger()) {
8352 Integer::Cast(x).AsInt64Value(), Integer::Cast(y).AsInt64Value(),
8353 Integer::Cast(z).AsInt64Value(), Integer::Cast(w).AsInt64Value(),
8354 Heap::kOld));
8355 result ^= result.Canonicalize(Thread::Current());
8356 return flow_graph->GetConstant(result, kUnboxedInt32x4);
8357 }
8358 }
8359
8360 return this;
8361}
8362
8363LocationSummary* Call1ArgStubInstr::MakeLocationSummary(Zone* zone,
8364 bool opt) const {
8365 const intptr_t kNumInputs = 1;
8366 const intptr_t kNumTemps = 0;
8367 LocationSummary* locs = new (zone)
8368 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
8369 switch (stub_id_) {
8371 locs->set_in(
8373 break;
8374 case StubId::kInitAsync:
8379 break;
8381 locs->set_in(
8383 break;
8384 }
8386 return locs;
8387}
8388
8389void Call1ArgStubInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
8390 ObjectStore* object_store = compiler->isolate_group()->object_store();
8391 Code& stub = Code::ZoneHandle(compiler->zone());
8392 switch (stub_id_) {
8394 stub = object_store->clone_suspend_state_stub();
8395 break;
8396 case StubId::kInitAsync:
8397 stub = object_store->init_async_stub();
8398 break;
8400 stub = object_store->init_async_star_stub();
8401 break;
8403 stub = object_store->init_sync_star_stub();
8404 break;
8406 stub = object_store->ffi_async_callback_send_stub();
8407 break;
8408 }
8409 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
8410 locs(), deopt_id(), env());
8411}
8412
8415 !operand()->Type()->CanBeFuture()) {
8417 stub_id_ = StubId::kAwait;
8418 }
8419 return this;
8420}
8421
8422LocationSummary* SuspendInstr::MakeLocationSummary(Zone* zone, bool opt) const {
8423 const intptr_t kNumInputs = has_type_args() ? 2 : 1;
8424 const intptr_t kNumTemps = 0;
8425 LocationSummary* locs = new (zone)
8426 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
8428 if (has_type_args()) {
8430 }
8432 return locs;
8433}
8434
8435void SuspendInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
8436 // Use deopt_id as a yield index.
8437 compiler->EmitYieldPositionMetadata(source(), deopt_id());
8438
8439 ObjectStore* object_store = compiler->isolate_group()->object_store();
8440 Code& stub = Code::ZoneHandle(compiler->zone());
8441 switch (stub_id_) {
8442 case StubId::kAwait:
8443 stub = object_store->await_stub();
8444 break;
8446 stub = object_store->await_with_type_check_stub();
8447 break;
8449 stub = object_store->yield_async_star_stub();
8450 break;
8452 stub = object_store->suspend_sync_star_at_start_stub();
8453 break;
8455 stub = object_store->suspend_sync_star_at_yield_stub();
8456 break;
8457 }
8458 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
8459 locs(), deopt_id(), env());
8460
8461#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32)
8462 // On x86 (X64 and IA32) mismatch between calls and returns
8463 // significantly regresses performance. So suspend stub
8464 // does not return directly to the caller. Instead, a small
8465 // epilogue is generated right after the call to suspend stub,
8466 // and resume stub adjusts resume PC to skip this epilogue.
8467 const intptr_t start = compiler->assembler()->CodeSize();
8468 __ LeaveFrame();
8469 __ ret();
8470 RELEASE_ASSERT(compiler->assembler()->CodeSize() - start ==
8472 compiler->EmitCallsiteMetadata(source(), resume_deopt_id(),
8473 UntaggedPcDescriptors::kOther, locs(), env());
8474#endif
8475}
8476
8477LocationSummary* AllocateRecordInstr::MakeLocationSummary(Zone* zone,
8478 bool opt) const {
8479 const intptr_t kNumInputs = 0;
8480 const intptr_t kNumTemps = 0;
8481 LocationSummary* locs = new (zone)
8482 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
8484 return locs;
8485}
8486
8487void AllocateRecordInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
8488 const Code& stub = Code::ZoneHandle(
8489 compiler->zone(),
8490 compiler->isolate_group()->object_store()->allocate_record_stub());
8491 __ LoadImmediate(AllocateRecordABI::kShapeReg,
8492 Smi::RawValue(shape().AsInt()));
8493 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
8494 locs(), deopt_id(), env());
8495}
8496
8497LocationSummary* AllocateSmallRecordInstr::MakeLocationSummary(Zone* zone,
8498 bool opt) const {
8499 ASSERT(num_fields() == 2 || num_fields() == 3);
8500 const intptr_t kNumInputs = InputCount();
8501 const intptr_t kNumTemps = 0;
8502 LocationSummary* locs = new (zone)
8503 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
8504 locs->set_in(0,
8506 locs->set_in(1,
8508 if (num_fields() > 2) {
8509 locs->set_in(
8511 }
8512 locs->set_out(0,
8514 return locs;
8515}
8516
8517void AllocateSmallRecordInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
8518 auto object_store = compiler->isolate_group()->object_store();
8519 Code& stub = Code::ZoneHandle(compiler->zone());
8520 if (shape().HasNamedFields()) {
8522 Smi::RawValue(shape().AsInt()));
8523 switch (num_fields()) {
8524 case 2:
8525 stub = object_store->allocate_record2_named_stub();
8526 break;
8527 case 3:
8528 stub = object_store->allocate_record3_named_stub();
8529 break;
8530 default:
8531 UNREACHABLE();
8532 }
8533 } else {
8534 switch (num_fields()) {
8535 case 2:
8536 stub = object_store->allocate_record2_stub();
8537 break;
8538 case 3:
8539 stub = object_store->allocate_record3_stub();
8540 break;
8541 default:
8542 UNREACHABLE();
8543 }
8544 }
8545 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
8546 locs(), deopt_id(), env());
8547}
8548
8549LocationSummary* MakePairInstr::MakeLocationSummary(Zone* zone,
8550 bool opt) const {
8551 ASSERT(opt);
8552 const intptr_t kNumInputs = 2;
8553 const intptr_t kNumTemps = 0;
8554 LocationSummary* locs = new (zone)
8555 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
8556 // MakePair instruction is used to combine 2 separate kTagged values into
8557 // a single kPairOfTagged value for the subsequent Return, so it uses
8558 // fixed registers used to return values according to the calling conventions
8559 // in order to avoid any extra moves.
8561 locs->set_in(
8563 locs->set_out(
8564 0, Location::Pair(
8567 return locs;
8568}
8569
8570void MakePairInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
8571 // No-op.
8572}
8573
8574#undef __
8575
8576} // namespace dart
static int step(int x, SkScalar min, SkScalar max)
Definition BlurTest.cpp:215
static bool compare(const SkBitmap &ref, const SkIRect &iref, const SkBitmap &test, const SkIRect &itest)
Definition BlurTest.cpp:100
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
int count
static void is_empty(skiatest::Reporter *reporter, const SkPath &p)
static float next(float f)
static float prev(float f)
static std::unique_ptr< SkEncoder > Make(SkWStream *dst, const SkPixmap *src, const SkYUVAPixmaps *srcYUVA, const SkColorSpace *srcYUVAColorSpace, const SkJpegEncoder::Options &options)
static bool is_integer(SkScalar x)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
SI void store(P *ptr, const T &val)
SI T load(const P *ptr)
SI F table(const skcms_Curve *curve, F v)
static size_t element_size(Layout layout, SkSLType type)
Vec2Value v2
#define __
#define UNREACHABLE()
Definition assert.h:248
#define DEBUG_ASSERT(cond)
Definition assert.h:321
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define Z
bool IsSubtypeOf(const AbstractType &other, Heap::Space space, FunctionTypeMapping *function_type_equivalence=nullptr) const
Definition object.cc:21611
bool IsTopTypeForSubtyping() const
Definition object.cc:21457
virtual AbstractTypePtr Canonicalize(Thread *thread) const
Definition object.cc:21297
bool IsObjectType() const
Definition object.h:9181
static bool InstantiateAndTestSubtype(AbstractType *subtype, AbstractType *supertype, const TypeArguments &instantiator_type_args, const TypeArguments &function_type_args)
Definition object.cc:4342
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:952
virtual intptr_t InputCount() const
Definition il.h:7477
bool is_generic() const
Definition il.h:7487
bool has_instantiator_type_args() const
Definition il.h:7484
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:933
RecordShape shape() const
Definition il.h:7590
intptr_t num_fields() const
Definition il.h:7636
RecordShape shape() const
Definition il.h:7635
virtual intptr_t InputCount() const
Definition il.h:7638
classid_t class_id() const
Definition il.h:7850
AllocateUninitializedContextInstr(const InstructionSource &source, intptr_t num_context_variables, intptr_t deopt_id)
Definition il.cc:923
Value * dst_type() const
Definition il.h:4405
static bool ParseKind(const char *str, Kind *out)
Definition il.cc:827
const String & dst_name() const
Definition il.h:4412
Value * value() const
Definition il.h:4404
virtual intptr_t statistics_tag() const
Definition il.cc:6022
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3064
virtual Value * RedefinedValue() const
Definition il.cc:547
Value * function_type_arguments() const
Definition il.h:4409
static const char * KindToCString(Kind kind)
Definition il.cc:814
Value * instantiator_type_arguments() const
Definition il.h:4406
Value * value() const
Definition il.h:4473
virtual Value * RedefinedValue() const
Definition il.cc:551
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3048
Value * function_type_arguments() const
Definition il.h:4322
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:1062
Value * super_type() const
Definition il.h:4324
Value * sub_type() const
Definition il.h:4323
Value * instantiator_type_arguments() const
Definition il.h:4319
void Add(const T &value)
const T & At(intptr_t index) const
intptr_t length() const
void SetAt(intptr_t index, const T &t)
Token::Kind op_kind() const
Definition il.h:8990
Value * right() const
Definition il.h:8988
virtual PRINT_OPERANDS_TO_SUPPORT Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:2200
virtual Representation representation() const
Definition il.h:8996
Value * left() const
Definition il.h:8987
virtual intptr_t DeoptimizationTarget() const
Definition il.h:9007
static const BinaryFeedback * CreateMonomorphic(Zone *zone, intptr_t receiver_cid, intptr_t argument_cid)
Definition il.cc:4113
static const BinaryFeedback * Create(Zone *zone, const ICData &ic_data)
Definition il.cc:4097
virtual bool ComputeCanDeoptimize() const
Definition il.cc:2052
static bool IsSupported(Token::Kind op_kind, Value *left, Value *right)
Definition il.h:9449
void set_can_overflow(bool overflow)
Definition il.h:9353
bool can_overflow() const
Definition il.h:9352
bool RightIsNonZero() const
Definition il.cc:2107
Value * right() const
Definition il.h:9350
Token::Kind op_kind() const
Definition il.h:9348
Value * left() const
Definition il.h:9349
virtual bool AttributesEqual(const Instruction &other) const
Definition il.cc:1111
bool RightIsPowerOfTwoConstant() const
Definition il.cc:2116
static BinaryIntegerOpInstr * Make(Representation representation, Token::Kind op_kind, Value *left, Value *right, intptr_t deopt_id, SpeculativeMode speculative_mode=kGuardInputs)
Definition il.cc:2284
bool is_truncating() const
Definition il.h:9358
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:2400
virtual bool ComputeCanDeoptimize() const
Definition il.cc:2077
Range * right_range() const
Definition il.h:9425
void Add(intptr_t i)
Definition bit_vector.h:63
bool Contains(intptr_t i) const
Definition bit_vector.h:91
BlockEntryInstr * dominator() const
Definition il.h:1664
intptr_t NestingDepth() const
Definition il.cc:1829
void set_preorder_number(intptr_t number)
Definition il.h:1650
bool FindOsrEntryAndRelink(GraphEntryInstr *graph_entry, Instruction *parent, BitVector *block_marks)
Definition il.cc:1747
virtual void ClearPredecessors()=0
ParallelMoveInstr * parallel_move() const
Definition il.h:1683
intptr_t preorder_number() const
Definition il.h:1649
bool HasParallelMove() const
Definition il.h:1685
intptr_t block_id() const
Definition il.h:1655
BlockEntryInstr * ImmediateDominator() const
Definition il.cc:1817
virtual void AddPredecessor(BlockEntryInstr *predecessor)=0
bool Dominates(BlockEntryInstr *other) const
Definition il.cc:1806
void ReplaceAsPredecessorWith(BlockEntryInstr *new_block)
Definition il.cc:1838
bool IsLoopHeader() const
Definition il.cc:1825
void ClearAllInstructions()
Definition il.cc:1896
void set_last_instruction(Instruction *instr)
Definition il.h:1681
intptr_t stack_depth() const
Definition il.h:1750
bool DiscoverBlock(BlockEntryInstr *predecessor, GrowableArray< BlockEntryInstr * > *preorder, GrowableArray< intptr_t > *parent)
Definition il.cc:1690
Instruction * last_instruction() const
Definition il.h:1680
GrowableArray< Definition * > * initial_definitions()
Definition il.h:1911
static const Bool & False()
Definition object.h:10778
static const Bool & Get(bool value)
Definition object.h:10780
static const Bool & True()
Definition object.h:10776
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3520
Value * value() const
Definition il.h:7155
static bool IsBootstrapResolver(Dart_NativeEntryResolver resolver)
static void Allocate(FlowGraphCompiler *compiler, Instruction *instruction, const Class &cls, Register result, Register temp)
Definition il.cc:6317
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition il.cc:6291
static BoxInstr * Create(Representation from, Value *value)
Definition il.cc:4009
Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3186
Value * value() const
Definition il.h:8480
Representation from_representation() const
Definition il.h:8481
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3264
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3249
virtual bool ValueFitsSmi() const
Definition il.cc:3244
Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3230
virtual intptr_t SuccessorCount() const
Definition il.cc:2001
virtual BlockEntryInstr * SuccessorAt(intptr_t index) const
Definition il.cc:2005
void SetComparison(ComparisonInstr *comp)
Definition il.cc:1663
ComparisonInstr * comparison() const
Definition il.h:4003
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3645
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.cc:5949
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:5967
virtual Representation representation() const
Definition il.h:5733
CachableIdempotentCallInstr(const InstructionSource &source, Representation representation, const Function &function, intptr_t type_args_len, const Array &argument_names, InputsArray &&arguments, intptr_t deopt_id)
Definition il.cc:5919
virtual intptr_t ArgumentsSize() const
Definition il.cc:5961
const Function & function() const
Definition il.h:5697
intptr_t index_scale() const
Definition il.h:7972
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3777
StringPtr target_name() const
Definition object.h:2352
ArrayPtr arguments_descriptor() const
Definition object.h:2353
void Print() const
Definition il.cc:4261
static const CallTargets * CreateMonomorphic(Zone *zone, intptr_t receiver_cid, const Function &target)
Definition il.cc:4121
const Function & MostPopularTarget() const
Definition il.cc:5521
static const CallTargets * Create(Zone *zone, const ICData &ic_data)
Definition il.cc:4132
TargetInfo * TargetAt(int i) const
Definition il.h:790
bool HasSingleTarget() const
Definition il.cc:5507
static const CallTargets * CreateAndExpand(Zone *zone, const ICData &ic_data)
Definition il.cc:4140
intptr_t AggregateCallCount() const
Definition il.cc:5530
const Function & FirstTarget() const
Definition il.cc:5515
StaticTypeExactnessState MonomorphicExactness() const
Definition il.cc:809
bool HasSingleRecognizedTarget() const
Definition il.cc:5502
static constexpr Register kSecondReturnReg
static constexpr RegList kVolatileXmmRegisters
static constexpr intptr_t kVolatileCpuRegisters
static constexpr Register kFirstNonArgumentRegister
static constexpr Register kFfiAnyNonAbiRegister
static constexpr Register kReturnReg
static constexpr Register kSecondNonArgumentRegister
const RuntimeEntry & TargetFunction() const
Definition il.cc:1099
static intptr_t LengthOffsetFor(intptr_t class_id)
Definition il.cc:6779
static bool IsFixedLengthArrayType(intptr_t class_id)
Definition il.cc:6769
virtual Value * RedefinedValue() const
Definition il.cc:555
Value * index() const
Definition il.h:10743
Value * length() const
Definition il.h:10742
bool IsRedundant(bool use_loops=false)
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:6773
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3822
Value * value() const
Definition il.h:10701
bool IsDeoptIfNull() const
Definition il.cc:861
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr, TemplateInstruction, FIELD_LIST) private void EmitBitTest(FlowGraphCompiler *compiler, intptr_t min, intptr_t max, intptr_t mask, compiler::Label *deopt)
Value * value() const
Definition il.h:10540
void EmitNullCheck(FlowGraphCompiler *compiler, compiler::Label *deopt)
CheckClassInstr(Value *value, intptr_t deopt_id, const Cids &cids, const InstructionSource &source)
Definition il.cc:838
const Cids & cids() const
Definition il.h:10542
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3802
bool IsNullCheck() const
Definition il.h:10546
virtual bool AttributesEqual(const Instruction &other) const
Definition il.cc:855
bool IsDeoptIfNotNull() const
Definition il.cc:875
intptr_t ComputeCidMask() const
Definition il.cc:901
bool IsBitTest() const
Definition il.cc:897
static bool IsCompactCidRange(const Cids &cids)
Definition il.cc:883
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:6739
virtual Value * InputAt(intptr_t i) const
Definition il.h:10937
ComparisonInstr * comparison() const
Definition il.h:10920
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3990
Value * right() const
Definition il.h:8429
Value * left() const
Definition il.h:8428
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Definition il.h:10652
const String & function_name() const
Definition il.h:10649
static void AddMetadataForRuntimeCall(CheckNullInstr *check_null, FlowGraphCompiler *compiler)
Definition il.cc:6286
Value * value() const
Definition il.h:10647
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3998
virtual Value * RedefinedValue() const
Definition il.cc:563
virtual bool AttributesEqual(const Instruction &other) const
Definition il.cc:4002
ExceptionType exception_type() const
Definition il.h:10650
Value * value() const
Definition il.h:10600
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3986
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:2644
virtual Value * RedefinedValue() const
Definition il.cc:559
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:6799
Value * value() const
Definition il.h:10877
@ kDeeplyImmutableAttachNativeFinalizer
Definition il.h:10862
bool MustInclude(intptr_t cid)
Definition il.cc:162
CidCheckerForRanges(Thread *thread, ClassTable *table, const Class &cls, bool include_abstract, bool exclude_null)
Definition il.cc:136
bool MayInclude(intptr_t cid)
Definition il.cc:149
void Sort(int compare(CidRange *const *a, CidRange *const *b))
Definition il.h:758
bool HasClassId(intptr_t cid) const
Definition il.cc:680
static Cids * CreateMonomorphic(Zone *zone, intptr_t cid)
Definition il.cc:689
intptr_t MonomorphicReceiverCid() const
Definition il.cc:804
static Cids * CreateForArgument(Zone *zone, const BinaryFeedback &binary_feedback, int argument_number)
Definition il.cc:695
void SetLength(intptr_t len)
Definition il.h:754
intptr_t ComputeLowestCid() const
Definition il.cc:664
intptr_t length() const
Definition il.h:752
intptr_t ComputeHighestCid() const
Definition il.cc:672
GrowableArray< CidRange * > cid_ranges_
Definition il.h:768
void Add(CidRange *target)
Definition il.h:746
bool is_empty() const
Definition il.h:756
bool Equals(const Cids &other) const
Definition il.cc:653
bool IsMonomorphic() const
Definition il.cc:799
ClassPtr At(intptr_t cid) const
intptr_t NumCids() const
bool HasValidClassAt(intptr_t cid) const
const char * ScrubbedNameCString() const
Definition object.cc:3046
LibraryPtr library() const
Definition object.h:1335
GrowableObjectArrayPtr direct_subclasses() const
Definition object.h:1539
intptr_t id() const
Definition object.h:1235
TypePtr RareType() const
Definition object.cc:3097
bool is_abstract() const
Definition object.h:1698
bool IsGeneric() const
Definition object.h:1360
ClassPtr SuperClass(ClassTable *class_table=nullptr) const
Definition object.cc:3715
bool IsTopLevel() const
Definition object.cc:6176
GrowableObjectArrayPtr direct_implementors() const
Definition object.h:1522
intptr_t NumTypeParameters(Thread *thread) const
Definition object.cc:3605
static bool IsOptimized(CodePtr code)
Definition object.h:6794
virtual void NegateComparison()
Definition il.h:3862
intptr_t operation_cid() const
Definition il.h:3860
Value * right() const
Definition il.h:3829
virtual bool AttributesEqual(const Instruction &other) const
Definition il.h:3867
virtual Condition EmitComparisonCode(FlowGraphCompiler *compiler, BranchLabels labels)=0
Value * left() const
Definition il.h:3828
Token::Kind kind() const
Definition il.h:3832
static CompileType FromCid(intptr_t cid)
bool is_nullable() const
static CompileType Smi()
const AbstractType * ToAbstractType()
static CompilerState & Current()
static bool IsBackgroundCompilation()
Definition compiler.cc:299
static constexpr intptr_t kNoOSRDeoptId
Definition compiler.h:73
virtual bool AttributesEqual(const Instruction &other) const
Definition il.cc:1162
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:2723
const Object & value() const
Definition il.h:4212
ConstantInstr(const Object &value)
Definition il.h:4203
virtual void InferRange(RangeAnalysis *analysis, Range *range)
Value * value() const
Definition il.h:3468
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3177
bool HasUses() const
Definition il.h:2551
static bool IsArrayLength(Definition *def)
Definition il.cc:583
Value * env_use_list() const
Definition il.h:2560
void ReplaceWith(Definition *other, ForwardInstructionIterator *iterator)
Definition il.cc:1653
Value * input_use_list() const
Definition il.h:2557
Object & constant_value()
Definition il.cc:523
Range * range() const
Definition il.h:2618
CompileType * Type()
Definition il.h:2503
virtual Value * RedefinedValue() const
Definition il.cc:539
void AddEnvUse(Value *value)
Definition il.h:2568
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:2624
Definition * OriginalDefinitionIgnoreBoxingAndConstraints()
Definition il.cc:567
void ReplaceUsesWith(Definition *other)
Definition il.cc:1484
bool HasOnlyInputUse(Value *use) const
Definition il.cc:1480
bool HasSSATemp() const
Definition il.h:2490
void AddInputUse(Value *value)
Definition il.h:2567
Definition * OriginalDefinition()
Definition il.cc:530
void set_ssa_temp_index(intptr_t index)
Definition il.h:2486
void set_input_use_list(Value *head)
Definition il.h:2558
bool HasOnlyUse(Value *use) const
Definition il.cc:1468
void ClearTempIndex()
Definition il.h:2482
void ReplaceWithResult(Instruction *replacement, Definition *replacement_for_uses, ForwardInstructionIterator *iterator)
Definition il.cc:1623
ValueListIterable input_uses() const
Definition il.h:2563
intptr_t ssa_temp_index() const
Definition il.h:2485
friend class Value
Definition il.h:2672
void set_env_use_list(Value *head)
Definition il.h:2561
void ClearSSATempIndex()
Definition il.h:2491
static constexpr intptr_t kNone
Definition deopt_id.h:27
static intptr_t ToDeoptAfter(intptr_t deopt_id)
Definition deopt_id.h:31
virtual Representation representation() const
Definition il.cc:5399
virtual intptr_t ArgumentsSize() const
Definition il.cc:5389
const Function & interface_target() const
Definition il.h:5038
static DispatchTableCallInstr * FromCall(Zone *zone, const InstanceCallBaseInstr *call, Value *cid, const Function &interface_target, const compiler::TableSelector *selector)
Definition il.cc:5403
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.cc:5372
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:5648
const compiler::TableSelector * selector() const
Definition il.h:5039
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:2227
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
Definition il.cc:6552
Value * value() const
Definition il.h:10090
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:2177
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition il.cc:6340
static DoublePtr NewCanonical(double d)
Definition object.cc:23497
intptr_t num_temps() const
Definition il.h:5814
virtual intptr_t InputCount() const
Definition il.h:5806
void DeepCopyToOuter(Zone *zone, Instruction *instr, intptr_t outer_deopt_id) const
Definition il.cc:6532
intptr_t Length() const
Definition il.h:11658
void PushValue(Value *value)
Definition il.cc:6465
intptr_t fixed_parameter_count() const
Definition il.h:11676
intptr_t LazyDeoptPruneCount() const
Definition il.h:11616
bool LazyDeoptToBeforeDeoptId() const
Definition il.h:11620
Value * ValueAt(intptr_t ix) const
Definition il.h:11654
void DeepCopyAfterTo(Zone *zone, Instruction *instr, intptr_t argc, Definition *dead, Definition *result) const
Definition il.cc:6506
void DeepCopyTo(Zone *zone, Instruction *instr) const
Definition il.cc:6493
Environment * DeepCopy(Zone *zone) const
Definition il.h:11690
Environment * outer() const
Definition il.h:11645
bool IsHoisted() const
Definition il.h:11632
static Environment * From(Zone *zone, const GrowableArray< Definition * > &definitions, intptr_t fixed_parameter_count, intptr_t lazy_deopt_pruning_count, const ParsedFunction &parsed_function)
Definition il.cc:6451
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
Definition il.cc:6558
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3736
bool is_null_aware() const
Definition il.h:5292
void set_null_aware(bool value)
Definition il.h:5293
static int64_t TruncateTo(int64_t v, Representation r)
Definition evaluator.cc:81
static IntegerPtr BitLengthEvaluate(const Object &value, Representation representation, Thread *thread)
Definition evaluator.cc:164
static IntegerPtr BinaryIntegerEvaluate(const Object &left, const Object &right, Token::Kind token_kind, bool is_truncating, Representation representation, Thread *thread)
Definition evaluator.cc:99
static bool ToIntegerConstant(Value *value, int64_t *result)
Definition evaluator.cc:281
static intptr_t GetResultCidOfListFactory(Zone *zone, const Function &function, intptr_t argument_count)
intptr_t CompoundReturnTypedDataIndex() const
Definition il.h:6056
void EmitReturnMoves(FlowGraphCompiler *compiler, const Register temp0, const Register temp1)
Definition il.cc:7633
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.cc:7351
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(FfiCallInstr, VariadicDefinition, FIELD_LIST) private void EmitParamMoves(FlowGraphCompiler *compiler, const Register saved_fp, const Register temp0, const Register temp1)
Definition il.cc:7421
virtual Representation representation() const
Definition il.cc:7946
intptr_t TargetAddressIndex() const
Definition il.h:6051
bool is_final() const
Definition object.h:4420
ClassPtr Owner() const
Definition object.cc:11911
bool IsOriginal() const
Definition object.h:4396
bool is_nullable() const
Definition object.cc:11821
@ kUnknownFixedLength
Definition object.h:4701
StaticTypeExactnessState static_type_exactness_state() const
Definition object.h:4606
intptr_t guarded_cid() const
Definition object.cc:11800
intptr_t guarded_list_length() const
Definition object.cc:12152
AbstractTypePtr type() const
Definition object.h:4523
static Float32x4Ptr New(float value0, float value1, float value2, float value3, Heap::Space space=Heap::kNew)
Definition object.cc:25386
static Float64x2Ptr New(double value0, double value1, Heap::Space space=Heap::kNew)
Definition object.cc:25554
Value * value() const
Definition il.h:10131
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:2192
static bool LookupMethodFor(int class_id, const String &name, const ArgumentsDescriptor &args_desc, Function *fn_return, bool *class_is_abstract_return=nullptr)
static const CallTargets * ResolveCallTargetsForReceiverCid(intptr_t cid, const String &selector, const Array &args_desc_array)
static bool SupportsUnboxedDoubles()
ForwardInstructionIterator * current_iterator_
Definition il.h:11805
virtual void VisitBlocks()
Definition il.cc:1374
ConstantInstr * GetConstant(const Object &object, Representation representation=kTagged)
bool should_print() const
Definition flow_graph.h:505
bool IsCompiledForOsr() const
Definition flow_graph.h:460
ConstantInstr * constant_dead() const
Definition flow_graph.h:272
Zone * zone() const
Definition flow_graph.h:261
static Representation ReturnRepresentationOf(const Function &function)
bool should_remove_all_bounds_checks() const
Definition flow_graph.h:512
Thread * thread() const
Definition flow_graph.h:260
static intptr_t ComputeArgumentsSizeInWords(const Function &function, intptr_t arguments_count)
static Representation ParameterRepresentationAt(const Function &function, intptr_t index)
ConstantInstr * constant_null() const
Definition flow_graph.h:270
const Function & function() const
Definition flow_graph.h:130
bool is_licm_allowed() const
Definition flow_graph.h:404
bool unmatched_representations_allowed() const
Definition flow_graph.h:411
Definition * TryCreateConstantReplacementFor(Definition *op, const Object &value)
bool ExtractExternalUntaggedPayload(Instruction *instr, Value *array, classid_t cid)
void CopyDeoptTarget(Instruction *to, Instruction *from)
Definition flow_graph.h:395
void InsertBefore(Instruction *next, Instruction *instr, Environment *env, UseKind use_kind)
Definition flow_graph.h:312
void InsertAfter(Instruction *prev, Instruction *instr, Environment *env, UseKind use_kind)
Instruction * Current() const
Definition il.h:1847
static bool IsDynamicInvocationForwarderName(const String &name)
Definition object.cc:4240
static StringPtr DemangleDynamicInvocationForwarderName(const String &name)
Definition object.cc:4248
MethodRecognizer::Kind recognized_kind() const
Definition object.h:3599
AbstractTypePtr result_type() const
Definition object.h:3079
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.h:10834
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Definition il.h:10848
virtual Representation representation() const
Definition il.h:10830
JoinEntryInstr * successor() const
Definition il.h:3695
virtual intptr_t SuccessorCount() const
Definition il.cc:2012
virtual BlockEntryInstr * SuccessorAt(intptr_t index) const
Definition il.cc:2016
void RelinkToOsrEntry(Zone *zone, intptr_t max_block_id)
Definition il.cc:1740
bool IsCompiledForOsr() const
Definition il.cc:1255
FunctionEntryInstr * normal_entry() const
Definition il.h:1986
FunctionEntryInstr * unchecked_entry() const
Definition il.h:1987
void set_unchecked_entry(FunctionEntryInstr *target)
Definition il.h:1989
void set_normal_entry(FunctionEntryInstr *entry)
Definition il.h:1988
virtual BlockEntryInstr * SuccessorAt(intptr_t index) const
Definition il.cc:1985
intptr_t osr_id() const
Definition il.h:1963
CatchBlockEntryInstr * GetCatchEntry(intptr_t index)
Definition il.cc:1246
ConstantInstr * constant_null()
Definition il.cc:1236
void set_osr_entry(OsrEntryInstr *entry)
Definition il.h:1993
virtual intptr_t SuccessorCount() const
Definition il.cc:1979
GraphEntryInstr(const ParsedFunction &parsed_function, intptr_t osr_id)
Definition il.cc:1215
OsrEntryInstr * osr_entry() const
Definition il.h:1992
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3929
virtual bool AttributesEqual(const Instruction &other) const
Definition il.cc:1050
const Field & field() const
Definition il.h:6476
Value * value() const
Definition il.h:6474
virtual bool AttributesEqual(const Instruction &other) const
Definition il.cc:1054
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3947
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3981
virtual bool AttributesEqual(const Instruction &other) const
Definition il.cc:1058
@ kNew
Definition heap.h:38
@ kOld
Definition heap.h:39
bool InstanceOfHasClassRange(const AbstractType &type, intptr_t *lower_limit, intptr_t *upper_limit)
Definition il.cc:410
bool CanUseGenericSubtypeRangeCheckFor(const AbstractType &type)
Definition il.cc:344
const CidRangeVector & SubtypeRangesForClass(const Class &klass, bool include_abstract, bool exclude_null)
Definition il.cc:110
bool CanUseRecordSubtypeRangeCheckFor(const AbstractType &type)
Definition il.cc:393
bool CanUseSubtypeRangeCheckFor(const AbstractType &type)
Definition il.cc:305
intptr_t NumArgsTested() const
Definition object.cc:16518
void GetClassIdsAt(intptr_t index, GrowableArray< intptr_t > *class_ids) const
Definition object.cc:17020
intptr_t GetCountAt(intptr_t index) const
Definition object.cc:17110
intptr_t NumberOfChecks() const
Definition object.cc:16624
static bool Supports(ComparisonInstr *comparison, Value *v1, Value *v2)
Definition il.cc:6620
ComparisonInstr * comparison() const
Definition il.h:5434
void ComputeOffsetTable(FlowGraphCompiler *compiler)
Definition il.cc:4499
virtual intptr_t SuccessorCount() const
Definition il.h:3800
virtual TargetEntryInstr * SuccessorAt(intptr_t index) const
Definition il.h:3801
Value * offset() const
Definition il.h:3811
void set_ic_data(const ICData *value)
Definition il.h:4704
FunctionPtr ResolveForReceiverClass(const Class &cls, bool allow_add=true)
Definition il.cc:5338
bool CanReceiverBeSmiBasedOnInterfaceTarget(Zone *zone) const
Definition il.cc:5174
Code::EntryKind entry_kind() const
Definition il.h:4741
const ICData * ic_data() const
Definition il.h:4698
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.cc:5190
void set_receiver_is_not_smi(bool value)
Definition il.h:4750
const Function & interface_target() const
Definition il.h:4708
bool HasICData() const
Definition il.h:4699
Token::Kind token_kind() const
Definition il.h:4707
virtual intptr_t ArgumentsSize() const
Definition il.cc:5203
bool receiver_is_not_smi() const
Definition il.h:4749
void UpdateReceiverSminess(Zone *zone)
Definition il.cc:5217
virtual Representation representation() const
Definition il.cc:5213
const String & function_name() const
Definition il.h:4706
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(InstanceCallInstr, InstanceCallBaseInstr, FIELD_LIST) private const class BinaryFeedback * binary_
Definition il.h:4898
const class BinaryFeedback & BinaryFeedback()
Definition il.cc:5360
PRINT_OPERANDS_TO_SUPPORT bool MatchesCoreName(const String &name)
Definition il.cc:5334
const CallTargets & Targets()
Definition il.cc:5347
void EnsureICData(FlowGraph *graph)
Definition il.cc:5242
intptr_t checked_argument_count() const
Definition il.h:4860
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:5610
Value * type_arguments() const
Definition il.h:8260
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3164
Value * function_type_arguments() const
Definition il.h:8259
const Code & GetStub() const
Definition il.h:8300
const AbstractType & type() const
Definition il.h:8208
virtual bool AttributesEqual(const Instruction &other) const
Definition il.h:1325
virtual void Accept(InstructionVisitor *visitor)=0
Instruction * next() const
Definition il.h:1087
virtual intptr_t InputCount() const =0
intptr_t GetDeoptId() const
Definition il.h:1403
void set_previous(Instruction *instr)
Definition il.h:1082
void SetEnvironment(Environment *deopt_env)
Definition il.cc:1270
void InheritDeoptTargetAfter(FlowGraph *flow_graph, Definition *call, Definition *result)
Definition il.cc:1549
void LinkTo(Instruction *next)
Definition il.h:1102
void InheritDeoptTarget(Zone *zone, Instruction *other)
Definition il.cc:1560
virtual Value * InputAt(intptr_t i) const =0
void Goto(JoinEntryInstr *entry)
Definition il.cc:2021
virtual BlockEntryInstr * SuccessorAt(intptr_t index) const
Definition il.cc:1972
virtual BlockEntryInstr * GetBlock()
Definition il.cc:1350
virtual void CopyDeoptIdFrom(const Instruction &instr)
Definition il.h:1405
Environment * env() const
Definition il.h:1209
virtual LocationSummary * MakeLocationSummary(Zone *zone, bool is_optimizing) const =0
void RemoveEnvironment()
Definition il.cc:1280
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition il.h:1207
bool HasUnmatchedInputRepresentations() const
Definition il.cc:1600
const char * ToCString() const
virtual uword Hash() const
Definition il.cc:608
InputsIterable inputs()
Definition il.h:1027
Instruction * AppendInstruction(Instruction *tail)
Definition il.cc:1339
void InitializeLocationSummary(Zone *zone, bool optimizing)
Definition il.h:1196
void CheckField(const Field &field) const
Definition il.h:1147
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.h:1235
void UnuseAllInputs()
Definition il.cc:1525
virtual bool MayHaveVisibleEffect() const
Definition il.h:1346
virtual intptr_t ArgumentCount() const
Definition il.h:1035
void set_next(Instruction *instr)
Definition il.h:1088
static const intptr_t kInstructionAttrs[kNumInstructions]
Definition il.h:962
bool IsDominatedBy(Instruction *dom)
Definition il.cc:1572
bool Equals(const Instruction &other) const
Definition il.cc:617
static const ICData * GetICData(const ZoneGrowableArray< const ICData * > &ic_data_array, intptr_t deopt_id, bool is_static_call)
Definition il.cc:593
Definition * ArgumentAt(intptr_t index) const
Definition il.h:3423
void Unsupported(FlowGraphCompiler *compiler)
Definition il.cc:626
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:2620
virtual Representation representation() const
Definition il.h:1254
bool CanDeoptimize() const
Definition il.h:1073
void RepairArgumentUsesInEnvironment() const
Definition il.cc:1534
void ClearEnv()
Definition il.h:1358
LocationSummary * locs()
Definition il.h:1186
void ReplaceInEnvironment(Definition *current, Definition *replacement)
Definition il.cc:1287
virtual Tag tag() const =0
void SetInputAt(intptr_t i, Value *value)
Definition il.h:1008
InstructionSource source() const
Definition il.h:1002
Value * ArgumentValueAt(intptr_t index) const
Definition il.h:3417
virtual bool has_inlining_id() const
Definition il.h:1311
intptr_t deopt_id() const
Definition il.h:987
void InsertAfter(Instruction *prev)
Definition il.cc:1323
virtual intptr_t SuccessorCount() const
Definition il.cc:1968
bool CanEliminate()
Definition il.h:1354
Instruction * RemoveFromGraph(bool return_previous=true)
Definition il.cc:1299
virtual MoveArgumentsArray * GetMoveArguments() const
Definition il.h:1044
virtual bool CanTriggerGC() const
Definition il.cc:1619
Instruction * previous() const
Definition il.h:1081
static LocationSummary * MakeCallSummary(Zone *zone, const Instruction *instr, LocationSummary *locs=nullptr)
static Int32x4Ptr New(int32_t value0, int32_t value1, int32_t value2, int32_t value3, Heap::Space space=Heap::kNew)
Definition object.cc:25470
Value * value() const
Definition il.h:10990
virtual Representation representation() const
Definition il.h:11002
bool is_truncating() const
Definition il.h:10994
virtual bool ComputeCanDeoptimize() const
Definition il.cc:2025
Representation to() const
Definition il.h:10993
Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3438
Representation from() const
Definition il.h:10992
static IntegerPtr New(const String &str, Heap::Space space=Heap::kNew)
Definition object.cc:23063
static IntegerPtr NewCanonical(const String &str)
Definition object.cc:23078
InvokeMathCFunctionInstr(InputsArray &&inputs, intptr_t deopt_id, MethodRecognizer::Kind recognized_kind, const InstructionSource &source)
Definition il.cc:7192
const RuntimeEntry & TargetFunction() const
Definition il.cc:7229
static intptr_t ArgumentCountFor(MethodRecognizer::Kind recognized_kind_)
Definition il.cc:7203
intptr_t optimization_counter_threshold() const
Definition isolate.h:305
ObjectStore * object_store() const
Definition isolate.h:505
static IsolateGroup * Current()
Definition isolate.h:534
ClassTable * class_table() const
Definition isolate.h:491
void RemoveDeadPhis(Definition *replacement)
Definition il.cc:1943
PhiInstr * InsertPhi(intptr_t var_index, intptr_t var_count)
Definition il.cc:1909
virtual void AddPredecessor(BlockEntryInstr *predecessor)
Definition il.cc:1413
intptr_t IndexOfPredecessor(BlockEntryInstr *pred) const
Definition il.cc:1430
GrowableArray< BlockEntryInstr * > predecessors_
Definition il.h:2092
void RemovePhi(PhiInstr *phi)
Definition il.cc:1932
virtual intptr_t PredecessorCount() const
Definition il.h:2050
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.h:6161
intptr_t TargetAddressIndex() const
Definition il.h:6149
void EmitParamMoves(FlowGraphCompiler *compiler, Register saved_fp, Register temp0)
Definition il.cc:8042
LocationSummary * MakeLocationSummaryInternal(Zone *zone, const RegList temps) const
Definition il.cc:7964
virtual bool MayCreateUnsafeUntaggedPointer() const
Definition il.h:6169
static LeafRuntimeCallInstr * Make(Zone *zone, Representation return_representation, const ZoneGrowableArray< Representation > &argument_representations, InputsArray &&inputs)
Definition il.cc:8026
Dart_NativeEntryResolver native_entry_resolver() const
Definition object.h:5200
static bool IsPrivateCoreLibName(const String &name, const String &member)
Definition object.cc:14727
Value * object() const
Definition il.h:8018
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3811
virtual Representation representation() const
Definition il.h:8012
bool IsImmutableLengthLoad() const
Definition il.h:8139
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:2859
void set_loads_inner_pointer(InnerPointerAccess value)
Definition il.h:8101
const Slot & slot() const
Definition il.h:8096
virtual bool MayCreateUnsafeUntaggedPointer() const
Definition il.cc:2842
bool IsImmutableLoad() const
Definition il.h:8129
static bool IsUnmodifiableTypedDataViewFactory(const Function &function)
Definition il.cc:2698
InnerPointerAccess loads_inner_pointer() const
Definition il.h:8098
bool Evaluate(const Object &instance_value, Object *result)
Definition il.cc:2855
static bool IsFixedLengthArrayCid(intptr_t cid)
Definition il.cc:2659
Value * instance() const
Definition il.h:8095
virtual bool AttributesEqual(const Instruction &other) const
Definition il.cc:1119
virtual Representation representation() const
Definition il.cc:919
static bool TryEvaluateLoad(const Object &instance, const Field &field, Object *result)
Definition il.cc:2803
static bool IsTypedDataViewFactory(const Function &function)
Definition il.cc:2674
bool MayCreateUntaggedAlias() const
Definition il.cc:2826
intptr_t class_id() const
Definition il.h:6759
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:6856
static Representation ReturnRepresentation(intptr_t array_cid)
Definition il.cc:6874
LoadIndexedInstr(Value *array, Value *index, bool index_unboxed, intptr_t index_scale, intptr_t class_id, AlignmentType alignment, intptr_t deopt_id, const InstructionSource &source, CompileType *result_type=nullptr)
Definition il.cc:6831
Value * array() const
Definition il.h:6756
intptr_t index_scale() const
Definition il.h:6758
Value * index() const
Definition il.h:6757
virtual bool AllowsCSE() const
Definition il.h:6641
const Field & field() const
Definition il.h:6639
virtual bool AttributesEqual(const Instruction &other) const
Definition il.cc:1125
intptr_t offset() const
Definition il.h:7907
virtual Representation representation() const
Definition il.h:7897
Location out(intptr_t index) const
Definition locations.h:903
static LocationSummary * Make(Zone *zone, intptr_t input_count, Location out, ContainsCall contains_call)
Definition locations.cc:187
void set_temp(intptr_t index, Location loc)
Definition locations.h:894
RegisterSet * live_registers()
Definition locations.h:941
void set_out(intptr_t index, Location loc)
Definition locations.cc:232
Location in(intptr_t index) const
Definition locations.h:866
void set_in(intptr_t index, Location loc)
Definition locations.cc:205
static Location NoLocation()
Definition locations.h:387
static Location SameAsFirstInput()
Definition locations.h:382
bool IsRegister() const
Definition locations.h:402
static Location Pair(Location first, Location second)
Definition locations.cc:271
intptr_t ToStackSlotOffset() const
Definition locations.cc:369
Register reg() const
Definition locations.h:404
const char * ToCString() const
Definition locations.cc:445
intptr_t stack_index() const
Definition locations.h:485
Location Copy() const
Definition locations.cc:468
bool IsConstant() const
Definition locations.h:292
Register base_reg() const
Definition locations.h:480
static Location RegisterLocation(Register reg)
Definition locations.h:398
static Location Any()
Definition locations.h:352
PairLocation * AsPairLocation() const
Definition locations.cc:280
static Location RequiresRegister()
Definition locations.h:365
bool IsPairLocation() const
Definition locations.h:316
static Location RequiresFpuRegister()
Definition locations.h:369
bool IsStackSlot() const
Definition locations.h:456
const Object & constant() const
Definition locations.cc:373
static Location Constant(const ConstantInstr *obj, int pair_index=0)
Definition locations.h:294
BlockEntryInstr * header() const
Definition loops.h:252
intptr_t NestingDepth() const
Definition loops.cc:1062
void RemapRegisters(intptr_t *cpu_reg_slots, intptr_t *fpu_reg_slots)
Definition il.cc:4966
const Location & LocationAt(intptr_t i)
Definition il.h:7691
intptr_t result_cid() const
Definition il.h:8924
MethodRecognizer::Kind op_kind() const
Definition il.h:8919
virtual bool AttributesEqual(const Instruction &other) const
Definition il.cc:1104
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
Value * length() const
Definition il.h:3193
Value * dest() const
Definition il.h:3190
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:6961
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MemoryCopyInstr, TemplateInstruction, FIELD_LIST) private void EmitUnrolledCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, intptr_t num_elements, bool reversed)
Definition il.cc:7110
Value * src_start() const
Definition il.h:3191
bool can_overlap() const
Definition il.h:3199
Value * src() const
Definition il.h:3189
void EmitLoopCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, Register length_reg, compiler::Label *done, compiler::Label *copy_forwards=nullptr)
void PrepareLengthRegForLoop(FlowGraphCompiler *compiler, Register length_reg, compiler::Label *done)
Value * dest_start() const
Definition il.h:3192
static intptr_t ResultCidFromPragma(const Object &function_or_field)
static intptr_t NumArgsCheckedForStaticCall(const Function &function)
static const char * KindToCString(Kind kind)
static intptr_t ParameterCountForResolution(const Function &function)
void set_is_bootstrap_native(bool value)
Definition il.h:6007
const String & native_name() const
Definition il.h:5973
virtual TokenPosition token_pos() const
Definition il.h:5979
void set_is_auto_scope(bool value)
Definition il.h:6008
bool is_bootstrap_native() const
Definition il.h:5976
const Function & function() const
Definition il.h:5974
bool link_lazily() const
Definition il.h:5978
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(NativeEntryInstr, FunctionEntryInstr, FIELD_LIST) private void SaveArgument(FlowGraphCompiler *compiler, const compiler::ffi::NativeLocation &loc) const
Definition il.cc:4423
static NativeFunction ResolveNative(const Library &library, const String &function_name, int number_of_arguments, bool *auto_setup_scope)
virtual Representation representation() const
Definition il.h:2987
virtual PRINT_OPERANDS_TO_SUPPORT Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.h:3536
static ObjectPtr null()
Definition object.h:433
intptr_t GetClassId() const
Definition object.h:341
ObjectPtr ptr() const
Definition object.h:332
bool Contains(uword addr) const
Definition object.h:762
bool IsCanonical() const
Definition object.h:335
bool IsOld() const
Definition object.h:391
virtual const char * ToCString() const
Definition object.h:366
bool IsNull() const
Definition object.h:363
static Object & Handle()
Definition object.h:407
static Object & ZoneHandle()
Definition object.h:419
Location At(intptr_t i) const
Definition locations.h:618
bool IsRedundant() const
Definition il.cc:4925
const Function & function() const
Definition parser.h:73
JoinEntryInstr * block() const
Definition il.h:2799
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:6700
bool is_alive() const
Definition il.h:2809
Definition * GetReplacementForRedundantPhi() const
Definition il.cc:6647
bool IsRedundant() const
Definition il.cc:6637
bool Done() const
Definition il.h:2106
PhiInstr * Current() const
Definition il.h:2110
void RemoveCurrentFromGraph()
Definition il.cc:6732
virtual intptr_t CallCount() const
Definition il.cc:5551
bool IsSureToCallSingleRecognizedTarget() const
Definition il.cc:5671
static TypePtr ComputeRuntimeType(const CallTargets &targets)
Definition il.cc:5570
virtual Definition * Canonicalize(FlowGraph *graph)
Definition il.cc:5654
bool HasOnlyDispatcherOrImplicitAccessorTargets() const
Definition il.cc:5538
const CallTargets & targets() const
Definition il.h:4935
static RangeBoundary FromConstant(int64_t val)
static bool IsSingleton(Range *range)
static bool Overlaps(Range *range, intptr_t min, intptr_t max)
static bool Fits(Range *range, RangeBoundary::RangeSize size)
static bool IsWithin(const Range *range, int64_t min, int64_t max)
static bool IsPositive(Range *range)
static bool CanBeZero(Range *range)
bool IsWithin(int64_t min_int, int64_t max_int) const
static bool IsUnknown(const Range *other)
bool Fits(RangeBoundary::RangeSize size) const
bool Overlaps(int64_t min_int, int64_t max_int) const
intptr_t catch_try_index() const
Definition il.h:3633
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3181
AbstractTypePtr FieldTypeAt(intptr_t index) const
Definition object.cc:27456
intptr_t NumFields() const
Definition object.h:13366
intptr_t num_fields() const
Definition object.h:11399
ObjectPtr FieldAt(intptr_t field_index) const
Definition object.h:11407
virtual Value * RedefinedValue() const
Definition il.cc:543
Value * value() const
Definition il.h:4091
CompileType * constrained_type() const
Definition il.h:4099
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:2628
void Remove(Location loc)
Definition locations.h:766
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
Definition il.cc:6565
static void MessageF(Kind kind, const Script &script, TokenPosition token_pos, bool report_after_token, const char *format,...) PRINTF_ATTRIBUTE(5
Definition report.cc:123
static constexpr bool AtLocation
Definition report.h:29
static FunctionPtr ResolveDynamicAnyArgs(Zone *zone, const Class &receiver_class, const String &function_name, bool allow_add=true)
Definition resolver.cc:198
static FunctionPtr ResolveDynamicForReceiverClass(const Class &receiver_class, const String &function_name, const ArgumentsDescriptor &args_desc, bool allow_add=true)
Definition resolver.cc:160
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ShiftIntegerOpInstr, BinaryIntegerOpInstr, FIELD_LIST) protected bool IsShiftCountInRange(int64_t max=kShiftCountLimit) const
Definition il.cc:2103
Range * shift_range() const
Definition il.h:9607
intptr_t mask() const
Definition il.h:11305
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.cc:8307
virtual intptr_t InputCount() const
Definition il.cc:8299
virtual Value * InputAt(intptr_t i) const
Definition il.h:11299
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:8316
static SimdOpInstr * CreateFromCall(Zone *zone, MethodRecognizer::Kind kind, Definition *receiver, Instruction *call, intptr_t mask=0)
Definition il.cc:8110
Kind kind() const
Definition il.h:11304
static SimdOpInstr * CreateFromFactoryCall(Zone *zone, MethodRecognizer::Kind kind, Instruction *call)
Definition il.cc:8164
static Kind KindForMethod(MethodRecognizer::Kind method_kind)
Definition il.cc:8231
static Kind KindForOperator(MethodRecognizer::Kind kind)
Definition il.cc:8085
virtual Representation representation() const
Definition il.cc:8303
Kind kind() const
Definition slot.h:502
bool IsDartField() const
Definition slot.h:503
const Field & field() const
Definition slot.h:540
Representation representation() const
Definition slot.h:519
intptr_t offset_in_bytes() const
Definition slot.h:513
bool is_compressed() const
Definition slot.h:529
compiler::Label * entry_label()
compiler::Label * exit_label()
static SmiPtr New(intptr_t value)
Definition object.h:9985
intptr_t Value() const
Definition object.h:9969
static intptr_t RawValue(intptr_t value)
Definition object.h:10001
friend class Class
Definition object.h:10026
static bool IsValid(int64_t value)
Definition object.h:10005
const ICData * ic_data() const
Definition il.h:5555
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.cc:5454
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StaticCallInstr, TemplateDartCall, FIELD_LIST) private const class BinaryFeedback * binary_
Definition il.h:5669
bool Evaluate(FlowGraph *flow_graph, const Object &argument, Object *result)
Definition il.cc:5822
virtual intptr_t ArgumentsSize() const
Definition il.cc:5467
static StaticCallInstr * FromCall(Zone *zone, const C *call, const Function &target, intptr_t call_count)
Definition il.h:5535
const Function & function() const
Definition il.h:5574
const class BinaryFeedback & BinaryFeedback()
Definition il.cc:5490
void SetResultType(Zone *zone, CompileType new_type)
Definition il.h:5599
void set_is_known_list_constructor(bool value)
Definition il.h:5613
Code::EntryKind entry_kind() const
Definition il.h:5617
virtual Representation representation() const
Definition il.cc:5473
bool InitResultType(Zone *zone)
Definition il.cc:5676
const CallTargets & Targets()
Definition il.cc:5477
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:5773
bool HasICData() const
Definition il.h:5556
static StaticTypeExactnessState NotTracking()
InnerPointerAccess stores_inner_pointer() const
Definition il.h:6406
virtual Representation RequiredInputRepresentation(intptr_t index) const
Definition il.cc:1016
void set_stores_inner_pointer(InnerPointerAccess value)
Definition il.h:6409
bool ShouldEmitStoreBarrier() const
Definition il.h:6380
bool is_initialization() const
Definition il.h:6378
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:1025
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StoreFieldInstr, TemplateInstruction, FIELD_LIST) private intptr_t OffsetInBytes() const
Definition il.h:6454
compiler::Assembler::CanBeSmi CanValueBeSmi() const
Definition il.h:6456
Value * value() const
Definition il.h:6375
Value * instance() const
Definition il.h:6373
const Slot & slot() const
Definition il.h:6374
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.cc:6932
StoreIndexedInstr(Value *array, Value *index, Value *value, StoreBarrierType emit_store_barrier, bool index_unboxed, intptr_t index_scale, intptr_t class_id, AlignmentType alignment, intptr_t deopt_id, const InstructionSource &source, SpeculativeMode speculative_mode=kGuardInputs)
Definition il.cc:6879
Value * value() const
Definition il.h:7039
Value * array() const
Definition il.h:7037
intptr_t class_id() const
Definition il.h:7042
static Representation ValueRepresentation(intptr_t array_cid)
Definition il.cc:6927
virtual Instruction * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:6908
intptr_t index_scale() const
Definition il.h:7041
Value * index() const
Definition il.h:7038
StrictCompareInstr(const InstructionSource &source, Token::Kind kind, Value *left, Value *right, bool needs_number_check, intptr_t deopt_id)
Definition il.cc:5030
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StrictCompareInstr, TemplateComparison, FIELD_LIST) private bool TryEmitBoolTest(FlowGraphCompiler *compiler, BranchLabels labels, intptr_t input_index, const Object &obj, Condition *condition_out)
Definition il.cc:5072
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3718
bool AttributesEqual(const Instruction &other) const
Definition il.cc:1092
bool needs_number_check() const
Definition il.h:5107
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
Definition il.cc:6572
bool Equals(const String &str) const
Definition object.h:13311
static StringPtr New(const char *cstr, Heap::Space space=Heap::kNew)
Definition object.cc:23777
static CodePtr GetAllocationStubForClass(const Class &cls)
Definition stub_code.cc:174
static CodePtr GetAllocationStubForTypedData(classid_t class_id)
Definition stub_code.cc:279
SubtypeFinder(Zone *zone, GrowableArray< intptr_t > *cids, bool include_abstract)
Definition il.cc:67
void ScanImplementorClasses(const Class &klass)
Definition il.cc:75
bool has_type_args() const
Definition il.h:11451
intptr_t resume_deopt_id() const
Definition il.h:11461
StubId stub_id() const
Definition il.h:11460
Value * type_args() const
Definition il.h:11455
virtual PRINT_OPERANDS_TO_SUPPORT Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:8413
Value * operand() const
Definition il.h:11454
static const String & True()
Definition symbols.h:692
static const String & False()
Definition symbols.h:688
static StringPtr FromConcatAll(Thread *thread, const GrowableHandlePtrArray< const String > &strs)
Definition symbols.cc:262
static StringPtr New(Thread *thread, const char *cstr)
Definition symbols.h:722
virtual Value * InputAt(intptr_t i) const
Definition il.h:3941
intptr_t type_args_len() const
Definition il.h:4596
const Array & argument_names() const
Definition il.h:4597
intptr_t ArgumentCount() const
Definition il.h:4568
intptr_t FirstArgIndex() const
Definition il.h:4558
intptr_t ArgumentCountWithoutTypeArgs() const
Definition il.h:4560
Value * Receiver() const
Definition il.h:4559
ArrayPtr GetArgumentsDescriptor() const
Definition il.h:4599
bool calls_initializer() const
Definition il.h:6579
bool throw_exception_on_initialization() const
Definition il.h:6582
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Definition il.h:6587
Value * value() const
Definition il.h:5201
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3854
virtual bool AttributesEqual(const Instruction &other) const
Definition il.cc:6595
TestCidsInstr(const InstructionSource &source, Token::Kind kind, Value *value, const ZoneGrowableArray< intptr_t > &cid_results, intptr_t deopt_id)
Definition il.cc:3833
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
Definition il.cc:6583
const ZoneGrowableArray< intptr_t > & cid_results() const
Definition il.h:5185
uword upper() const
Definition il.h:5232
TestRangeInstr(const InstructionSource &source, Value *value, uword lower, uword upper, Representation value_representation)
Definition il.cc:3881
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
Definition il.cc:6589
virtual bool AttributesEqual(const Instruction &other) const
Definition il.cc:6611
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3897
uword lower() const
Definition il.h:5231
Value * value() const
Definition il.h:5242
virtual ComparisonInstr * CopyWithNewOperands(Value *left, Value *right)
Definition il.cc:6578
IsolateGroup * isolate_group() const
Zone * zone() const
static Thread * Current()
Definition thread.h:361
IsolateGroup * isolate_group() const
Definition thread.h:540
static Token::Kind NegateComparison(Token::Kind op)
Definition token.h:322
static intptr_t OutputIndexOf(Token::Kind token)
Definition il.cc:7273
TruncDivModInstr(Value *lhs, Value *rhs, intptr_t deopt_id)
Definition il.cc:7267
intptr_t Length() const
Definition object.cc:7352
AbstractTypePtr TypeAt(intptr_t index) const
Definition object.cc:7366
static TypePtr DartTypeType()
Definition object.cc:21942
static TypePtr NullableNumber()
Definition object.cc:21926
static TypePtr Double()
Definition object.cc:21902
static TypePtr StringType()
Definition object.cc:21930
static TypePtr IntType()
Definition object.cc:21886
intptr_t Length() const
Definition object.h:11492
intptr_t ElementSizeInBytes() const
Definition object.h:11505
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:2384
Value * value() const
Definition il.h:9192
static UnaryIntegerOpInstr * Make(Representation representation, Token::Kind op_kind, Value *value, intptr_t deopt_id, SpeculativeMode speculative_mode, Range *range)
Definition il.cc:2248
Token::Kind op_kind() const
Definition il.h:9193
Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3303
virtual Representation representation() const
Definition il.h:8655
Value * value() const
Definition il.h:8630
void set_speculative_mode(SpeculativeMode value)
Definition il.h:8693
virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const
Definition il.h:8651
static UnboxInstr * Create(Representation to, Value *value, intptr_t deopt_id, SpeculativeMode speculative_mode=kGuardInputs)
Definition il.cc:4045
bool is_truncating() const
Definition il.h:8724
virtual bool ComputeCanDeoptimize() const
Definition il.cc:2031
virtual Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3367
intptr_t lane() const
Definition il.h:10362
Value * value() const
Definition il.h:10355
Definition * Canonicalize(FlowGraph *flow_graph)
Definition il.cc:3234
DECLARE_ATTRIBUTES_NAMED(("value", "representation"),(&value(), representation())) private uword constant_address_
Definition il.h:4278
UnboxedConstantInstr(const Object &value, Representation representation)
Definition il.cc:1169
bool IsScanFlagsUnboxed() const
Definition il.cc:7188
static T Abs(T x)
Definition utils.h:34
static constexpr int CountOneBitsWord(uword x)
Definition utils.h:161
static constexpr T Maximum(T x, T y)
Definition utils.h:26
static constexpr int ShiftForPowerOfTwo(T x)
Definition utils.h:66
static T Minimum(T x, T y)
Definition utils.h:21
static T AddWithWrapAround(T a, T b)
Definition utils.h:416
static constexpr bool IsPowerOfTwo(T x)
Definition utils.h:61
bool Done() const
Definition il.h:83
void BindToEnvironment(Definition *definition)
Definition il.h:2706
void set_use_index(intptr_t index)
Definition il.h:125
Value * Copy(Zone *zone)
Definition il.h:134
bool IsSingleUse() const
Definition il.h:117
bool NeedsWriteBarrier()
Definition il.cc:1388
bool BindsToConstantNull() const
Definition il.cc:1194
bool BindsToConstant() const
Definition il.cc:1181
void set_previous_use(Value *previous)
Definition il.h:112
bool CanBe(const Object &value)
Definition il.h:11833
static void AddToList(Value *value, Value **list)
Definition il.cc:1437
bool Equals(const Value &other) const
Definition il.cc:631
intptr_t BoundSmiConstant() const
Definition il.cc:1210
bool BindsToSmiConstant() const
Definition il.cc:1206
Instruction * instruction() const
Definition il.h:121
void set_next_use(Value *next)
Definition il.h:115
Value * previous_use() const
Definition il.h:111
const Object & BoundConstant() const
Definition il.cc:1199
Value * next_use() const
Definition il.h:114
void set_definition(Definition *definition)
Definition il.h:104
Value * CopyWithType(Zone *zone)
Definition il.h:138
void RemoveFromUseList()
Definition il.cc:1448
Definition * definition() const
Definition il.h:103
void BindTo(Definition *definition)
Definition il.h:2700
CompileType * Type()
Value(Definition *definition)
Definition il.h:95
void RefineReachingType(CompileType *type)
void set_instruction(Instruction *instruction)
Definition il.h:122
intptr_t InputCount() const
Definition il.h:2776
Value * InputAt(intptr_t i) const
Definition il.h:2777
ElementType * Alloc(intptr_t length)
void static bool EmittingComments()
const NativeLocation & Rebase(const NativeLocation &loc) const
const NativeLocations & locations() const
static const NativeCallingConvention & FromSignature(Zone *zone, const NativeFunctionType &signature)
static const NativeFunctionType * FromRepresentations(Zone *zone, Representation return_representation, const ZoneGrowableArray< Representation > &argument_representations)
const MultipleNativeLocations & AsMultiple() const
const NativeRegistersLocation & AsRegisters() const
const PointerToMemoryLocation & AsPointerToMemory() const
NativeLocation & WidenTo4Bytes(Zone *zone) const
const BothNativeLocations & AsBoth() const
const NativeType & payload_type() const
const NativeLocation & pointer_location() const
#define THR_Print(format,...)
Definition log.h:20
Dart_NativeFunction(* Dart_NativeEntryResolver)(Dart_Handle name, int num_of_arguments, bool *auto_setup_scope)
Definition dart_api.h:3225
#define UNIMPLEMENTED
#define ASSERT(E)
static int square(int x)
Definition etc1.cpp:302
VkInstance instance
Definition main.cc:48
SkBitmap source
Definition examples.cpp:28
static bool b
struct MyStruct a[10]
#define FATAL(error)
gboolean invert
glong glong end
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
uint8_t value
GAsyncResult * result
uint32_t * target
#define DECLARE_FLAG(type, name)
Definition flags.h:14
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
Dart_NativeFunction function
Definition fuchsia.cc:51
int argument_count
Definition fuchsia.cc:52
#define HANDLESCOPE(thread)
Definition handles.h:321
static float max(float r, float g, float b)
Definition hsl.cpp:49
static float min(float r, float g, float b)
Definition hsl.cpp:48
#define DEFINE_ACCEPT(ShortName, Attrs)
Definition il.cc:1261
#define INSTR_ATTRS(type, attrs)
#define BOXING_IN_SET_CASE(unboxed, boxed)
Definition il.cc:447
#define FOR_EACH_NON_INT_BOXED_REPRESENTATION(M)
Definition il.cc:440
#define CASE_BINARY_OP(Arity, Mask, Name, Args, Result)
#define BOXING_CID_CASE(unboxed, boxed)
Definition il.cc:453
#define R(r)
#define KIND_CASE(name)
#define CASE(Arity, Mask, Name, Args, Result)
#define BOXING_VALUE_OFFSET_CASE(unboxed, boxed)
Definition il.cc:450
#define CASE_METHOD(Arity, Mask, Name,...)
#define FOR_EACH_INSTRUCTION(M)
Definition il.h:405
#define SIMD_OP_LIST(M, BINARY_OP)
Definition il.h:11195
size_t length
#define DEFINE_BACKEND(Name, Args)
double y
double x
Definition copy.py:1
const intptr_t kResultIndex
Definition marshaller.h:28
word ToRawSmi(const dart::Object &a)
bool IsSmi(int64_t v)
constexpr OperandSize kWordBytes
static constexpr int kExitLinkSlotFromEntryFp
static bool Equals(const Object &expected, const Object &actual)
bool IsTypedDataViewClassId(intptr_t index)
Definition class_id.h:439
Location LocationRegisterOrConstant(Value *value)
Definition locations.cc:289
static int64_t RepresentationMask(Representation r)
Definition il.cc:2140
static Definition * CanonicalizeStrictCompare(StrictCompareInstr *compare, bool *negated, bool is_branch)
Definition il.cc:3553
const Register THR
const char *const name
uword FindDoubleConstant(double value)
static Condition InvertCondition(Condition c)
static Definition * CanonicalizeCommutativeDoubleArithmetic(Token::Kind op, Value *left, Value *right)
Definition il.cc:2145
static bool BindsToGivenConstant(Value *v, intptr_t expected)
Definition il.cc:3605
bool IsTypedDataBaseClassId(intptr_t index)
Definition class_id.h:429
static constexpr const char * kNone
static const Representation kUnboxedBool
Definition il.cc:8267
static constexpr Representation kUnboxedUword
Definition locations.h:171
static bool MayBeNumber(CompileType *type)
Definition il.cc:3538
static const SimdOpInfo simd_op_information[]
Definition il.cc:8281
static bool MayBeBoxableNumber(intptr_t cid)
Definition il.cc:3534
const Register kWriteBarrierValueReg
DART_EXPORT bool IsNull(Dart_Handle object)
bool IsTypeClassId(intptr_t index)
Definition class_id.h:370
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
Definition hash.h:12
constexpr intptr_t kIntptrMin
Definition globals.h:556
uint16_t RegList
static constexpr int kSavedCallerFpSlotFromFp
StoreBarrierType
Definition il.h:6252
bool IsUnmodifiableTypedDataViewClassId(intptr_t index)
Definition class_id.h:453
@ kIllegalCid
Definition class_id.h:214
@ kNullCid
Definition class_id.h:252
@ kVoidCid
Definition class_id.h:254
@ kDynamicCid
Definition class_id.h:253
@ kNeverCid
Definition class_id.h:255
Representation
Definition locations.h:66
constexpr intptr_t kBitsPerByte
Definition globals.h:463
@ kHeapObjectTag
MallocGrowableArray< CidRangeValue > CidRangeVector
Definition il.h:253
static int OrderByFrequencyThenId(CidRange *const *a, CidRange *const *b)
Definition il.cc:642
uintptr_t uword
Definition globals.h:501
@ UNSIGNED_GREATER
@ kInvalidCondition
@ UNSIGNED_GREATER_EQUAL
@ UNSIGNED_LESS_EQUAL
void RegisterTypeArgumentsUse(const Function &function, TypeUsageInfo *type_usage_info, const Class &klass, Definition *type_arguments)
const Register TMP2
static int OrderById(CidRange *const *a, CidRange *const *b)
Definition il.cc:635
static bool RecognizeTestPattern(Value *left, Value *right, bool *negate)
Definition il.cc:3611
@ kNumberOfCpuRegisters
@ kNoRegister
static bool IsMarked(BlockEntryInstr *block, GrowableArray< BlockEntryInstr * > *preorder)
Definition il.cc:1679
Location LocationRemapForSlowPath(Location loc, Definition *def, intptr_t *cpu_reg_slots, intptr_t *fpu_reg_slots)
Definition locations.cc:492
static AlignmentType StrengthenAlignment(intptr_t cid, AlignmentType alignment)
Definition il.cc:6813
static constexpr int kCallerSpSlotFromFp
static Definition * CanonicalizeStringInterpolate(StaticCallInstr *call, FlowGraph *flow_graph)
Definition il.cc:5711
bool IsExternalPayloadClassId(classid_t cid)
Definition class_id.h:472
constexpr intptr_t kInt32Size
Definition globals.h:450
@ kAllFree
Definition object.h:2920
const Register TMP
const Register FPREG
constexpr intptr_t kBitsPerInt32
Definition globals.h:466
const intptr_t cid
static Definition * CanonicalizeStringInterpolateSingle(StaticCallInstr *call, FlowGraph *flow_graph)
Definition il.cc:5762
static bool IsFpCompare(ComparisonInstr *comp)
Definition il.cc:3513
uint32_t FinalizeHash(uint32_t hash, intptr_t hashbits=kBitsPerInt32)
Definition hash.h:20
static constexpr Representation kUnboxedAddress
Definition locations.h:182
static bool IsSingleUseUnboxOrConstant(Value *use)
Definition il.cc:3731
static intptr_t RepresentationBits(Representation r)
Definition il.cc:2125
static const String & EvaluateToString(Zone *zone, Definition *defn)
Definition il.cc:5694
static bool IsConstant(Definition *def, int64_t *val)
Definition loops.cc:123
static constexpr Representation kUnboxedIntPtr
Definition locations.h:176
static const intptr_t kMaxElementSizeForEfficientCopy
Definition il.cc:6957
QRegister FpuRegister
bool IsIntegerClassId(intptr_t index)
Definition class_id.h:340
static constexpr Representation SimdRepresentation(Representation rep)
Definition il.cc:8259
const char *const function_name
static int8_t data[kExtLength]
void(* NativeFunction)(NativeArguments *arguments)
static bool IsCommutative(Token::Kind op)
Definition il.cc:2231
static constexpr intptr_t kInvalidTryIndex
Location LocationRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
Definition locations.cc:297
ArrayOfTuplesView< MegamorphicCache::EntryType, std::tuple< Smi, Object > > MegamorphicCacheEntries
Definition object.h:13535
constexpr intptr_t kBitsPerInt64
Definition globals.h:467
const Register SPREG
bool IsExternalTypedDataClassId(intptr_t index)
Definition class_id.h:447
AlignmentType
Definition il.h:6720
@ kAlignedAccess
Definition il.h:6722
static FunctionPtr FindBinarySmiOp(Zone *zone, const String &name)
Definition il.cc:5226
constexpr intptr_t kIntptrMax
Definition globals.h:557
bool IsStringClassId(intptr_t index)
Definition class_id.h:350
static bool AllInputsAreRedefinitions(PhiInstr *phi)
Definition il.cc:6691
static CodePtr TwoArgsSmiOpInlineCacheEntry(Token::Kind kind)
Definition il.cc:5158
Definition dom.py:1
Definition __init__.py:1
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
Definition switches.h:191
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
dst
Definition cp.py:12
Definition run.py:1
Definition ref_ptr.h:256
dest
Definition zip.py:79
SkScalar w
#define FALL_THROUGH
Definition globals.h:15
#define Pd
Definition globals.h:408
Point offset
static constexpr Register kResultReg
static constexpr Register kFunctionReg
static constexpr Register kContextReg
static constexpr Register kResultReg
static constexpr Register kInstantiatorTypeArgsReg
static constexpr Register kShapeReg
static constexpr Register kResultReg
static constexpr Register kResultReg
static constexpr Register kShapeReg
static constexpr Register kValue2Reg
static constexpr Register kValue0Reg
static constexpr Register kValue1Reg
static constexpr Register kLengthReg
static constexpr Register kResultReg
static constexpr Register kObjectReg
static constexpr Register kSubTypeReg
static constexpr Register kSuperTypeReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kDstNameReg
static bool RequiresAllocation(Representation rep)
Definition il.cc:468
static bool Supports(Representation rep)
Definition il.cc:457
static constexpr Representation NativeRepresentation(Representation rep)
Definition il.h:8456
static intptr_t BoxCid(Representation rep)
Definition il.cc:491
static intptr_t ValueOffset(Representation rep)
Definition il.cc:476
intptr_t cid_end
Definition il.h:250
bool IsIllegalRange() const
Definition il.h:241
intptr_t cid_start
Definition il.h:249
intptr_t cid_start
Definition il.h:220
intptr_t cid_end
Definition il.h:221
static constexpr Register kSourceReg
static constexpr Register kClassIdReg
static constexpr Register kResultReg
static constexpr Register kRecognizedKindReg
static constexpr FpuRegister kInputReg
static constexpr Register kArgsReg
static constexpr Register kFieldReg
static constexpr Register kResultReg
static constexpr Register kInstanceReg
static constexpr Register kResultReg
static constexpr Register kFieldReg
static constexpr Register kTypeArgsReg
static constexpr Register kFunctionTypeArgumentsReg
Definition constants.h:38
static constexpr Register kTypeReg
Definition constants.h:34
static constexpr Register kInstantiatorTypeArgumentsReg
Definition constants.h:36
static constexpr Register kResultTypeReg
Definition constants.h:40
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kUninstantiatedTypeArgumentsReg
static constexpr Register kResultTypeArgumentsReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kFieldReg
static constexpr Register kLengthReg
static constexpr Register kIndexReg
static constexpr Register kStackTraceReg
static constexpr Register kExceptionReg
static constexpr size_t ValueSize(Representation rep)
Definition locations.h:112
static constexpr bool IsUnboxedInteger(Representation rep)
Definition locations.h:92
static bool IsRepresentable(Representation rep, int64_t value)
Definition locations.cc:72
static int64_t MaxValue(Representation rep)
Definition locations.cc:62
static compiler::OperandSize OperandSize(Representation rep)
Definition locations.cc:16
static int64_t MinValue(Representation rep)
Definition locations.cc:49
static constexpr bool IsUnboxed(Representation rep)
Definition locations.h:101
static const char * ToCString(Representation rep)
Definition locations.cc:129
static Representation RepresentationOfArrayElement(classid_t cid)
Definition locations.cc:79
uint8_t arity
Definition il.cc:8253
Representation inputs[4]
Definition il.cc:8256
Representation output
Definition il.cc:8255
bool has_mask
Definition il.cc:8254
static constexpr intptr_t kResumePcDistance
static constexpr Register kArgumentReg
static constexpr Register kTypeArgsReg
const Function * target
Definition il.h:721
StaticTypeExactnessState exactness
Definition il.h:723
intptr_t count
Definition il.h:722
static constexpr Register kExceptionReg
void Usage()
Definition main.cc:42