Flutter Engine
The Flutter Engine
range_analysis.cc
Go to the documentation of this file.
1// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6
7#include "vm/bit_vector.h"
10
11namespace dart {
12
14 array_bounds_check_elimination,
15 true,
16 "Eliminate redundant bounds checks.");
17DEFINE_FLAG(bool, trace_range_analysis, false, "Trace range analysis progress");
19 trace_integer_ir_selection,
20 false,
21 "Print integer IR selection optimization pass.");
22DECLARE_FLAG(bool, trace_constant_propagation);
23
24// Quick access to the locally defined zone() method.
25#define Z (zone())
26
27#if defined(DEBUG)
28static void CheckRangeForRepresentation(const Assert& assert,
29 const Instruction* instr,
30 const Range* range,
31 Representation rep) {
32 const Range other = Range::Full(rep);
33 if (!RangeUtils::IsWithin(range, &other)) {
34 assert.Fail(
35 "During range analysis for:\n %s\n"
36 "expected range containing only %s-representable values, but got %s",
37 instr->ToCString(), RepresentationUtils::ToCString(rep),
38 Range::ToCString(range));
39 }
40}
41
42#define ASSERT_VALID_RANGE_FOR_REPRESENTATION(instr, range, representation) \
43 do { \
44 CheckRangeForRepresentation(dart::Assert(__FILE__, __LINE__), instr, \
45 range, representation); \
46 } while (false)
47#else
48#define ASSERT_VALID_RANGE_FOR_REPRESENTATION(instr, range, representation) \
49 do { \
50 USE(instr); \
51 USE(range); \
52 USE(representation); \
53 } while (false)
54#endif
55
57 CollectValues();
58 InsertConstraints();
59 flow_graph_->GetLoopHierarchy().ComputeInduction();
60 InferRanges();
61 EliminateRedundantBoundsChecks();
62 MarkUnreachableBlocks();
63
64 NarrowMintToInt32();
65
66 IntegerInstructionSelector iis(flow_graph_);
67 iis.Select();
68
69 RemoveConstraints();
70}
71
72// Helper method to chase to a constrained definition.
74 while (defn->IsConstraint()) {
75 defn = defn->AsConstraint()->value()->definition();
76 }
77 return defn;
78}
79
80void RangeAnalysis::CollectValues() {
81 auto graph_entry = flow_graph_->graph_entry();
82
83 auto& initial = *graph_entry->initial_definitions();
84 for (intptr_t i = 0; i < initial.length(); ++i) {
85 Definition* current = initial[i];
86 if (IsIntegerDefinition(current)) {
87 values_.Add(current);
88 }
89 }
90
91 for (intptr_t i = 0; i < graph_entry->SuccessorCount(); ++i) {
92 auto successor = graph_entry->SuccessorAt(i);
93 if (auto entry = successor->AsBlockEntryWithInitialDefs()) {
94 const auto& initial = *entry->initial_definitions();
95 for (intptr_t j = 0; j < initial.length(); ++j) {
96 Definition* current = initial[j];
97 if (IsIntegerDefinition(current)) {
98 values_.Add(current);
99 }
100 }
101 }
102 }
103
104 for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator();
105 !block_it.Done(); block_it.Advance()) {
106 BlockEntryInstr* block = block_it.Current();
107 JoinEntryInstr* join = block->AsJoinEntry();
108 if (join != nullptr) {
109 for (PhiIterator phi_it(join); !phi_it.Done(); phi_it.Advance()) {
110 PhiInstr* current = phi_it.Current();
111 if (current->Type()->IsInt()) {
112 values_.Add(current);
113 }
114 }
115 }
116
117 for (ForwardInstructionIterator instr_it(block); !instr_it.Done();
118 instr_it.Advance()) {
119 Instruction* current = instr_it.Current();
120 Definition* defn = current->AsDefinition();
121 if (defn != nullptr) {
122 if (defn->HasSSATemp() && IsIntegerDefinition(defn)) {
123 values_.Add(defn);
124 if (defn->IsBinaryInt64Op()) {
125 binary_int64_ops_.Add(defn->AsBinaryInt64Op());
126 } else if (defn->IsShiftInt64Op() ||
127 defn->IsSpeculativeShiftInt64Op()) {
128 shift_int64_ops_.Add(defn->AsShiftIntegerOp());
129 }
130 }
131 }
132 if (auto check = current->AsCheckBoundBase()) {
133 bounds_checks_.Add(check);
134 }
135 }
136 }
137}
138
139// Given a boundary (right operand) and a comparison operation return
140// a symbolic range constraint for the left operand of the comparison assuming
141// that it evaluated to true.
142// For example for the comparison a < b symbol a is constrained with range
143// [Smi::kMinValue, b - 1].
144Range* RangeAnalysis::ConstraintSmiRange(Token::Kind op, Definition* boundary) {
145 switch (op) {
146 case Token::kEQ:
147 return new (Z) Range(RangeBoundary::FromDefinition(boundary),
149 case Token::kNE:
151 case Token::kLT:
152 return new (Z) Range(RangeBoundary::MinSmi(),
153 RangeBoundary::FromDefinition(boundary, -1));
154 case Token::kGT:
155 return new (Z) Range(RangeBoundary::FromDefinition(boundary, 1),
157 case Token::kLTE:
158 return new (Z) Range(RangeBoundary::MinSmi(),
160 case Token::kGTE:
161 return new (Z) Range(RangeBoundary::FromDefinition(boundary),
163 default:
164 UNREACHABLE();
165 return nullptr;
166 }
167}
168
169ConstraintInstr* RangeAnalysis::InsertConstraintFor(Value* use,
170 Definition* defn,
171 Range* constraint_range,
172 Instruction* after) {
173 // No need to constrain constants.
174 if (defn->IsConstant()) return nullptr;
175
176 // Check if the value is already constrained to avoid inserting duplicated
177 // constraints.
178 ConstraintInstr* constraint = after->next()->AsConstraint();
179 while (constraint != nullptr) {
180 if ((constraint->value()->definition() == defn) &&
181 constraint->constraint()->Equals(constraint_range)) {
182 return nullptr;
183 }
184 constraint = constraint->next()->AsConstraint();
185 }
186
187 constraint = new (Z) ConstraintInstr(use->CopyWithType(), constraint_range);
188
189 flow_graph_->InsertAfter(after, constraint, nullptr, FlowGraph::kValue);
190 FlowGraph::RenameDominatedUses(defn, constraint, constraint);
191 constraints_.Add(constraint);
192 return constraint;
193}
194
195bool RangeAnalysis::ConstrainValueAfterBranch(Value* use, Definition* defn) {
196 BranchInstr* branch = use->instruction()->AsBranch();
197 RelationalOpInstr* rel_op = branch->comparison()->AsRelationalOp();
198 if ((rel_op != nullptr) && (rel_op->operation_cid() == kSmiCid)) {
199 // Found comparison of two smis. Constrain defn at true and false
200 // successors using the other operand as a boundary.
201 Definition* boundary;
202 Token::Kind op_kind;
203 if (use->use_index() == 0) { // Left operand.
204 boundary = rel_op->InputAt(1)->definition();
205 op_kind = rel_op->kind();
206 } else {
207 ASSERT(use->use_index() == 1); // Right operand.
208 boundary = rel_op->InputAt(0)->definition();
209 // InsertConstraintFor assumes that defn is left operand of a
210 // comparison if it is right operand flip the comparison.
211 op_kind = Token::FlipComparison(rel_op->kind());
212 }
213
214 // Constrain definition at the true successor.
215 ConstraintInstr* true_constraint =
216 InsertConstraintFor(use, defn, ConstraintSmiRange(op_kind, boundary),
217 branch->true_successor());
218 if (true_constraint != nullptr) {
219 true_constraint->set_target(branch->true_successor());
220 }
221
222 // Constrain definition with a negated condition at the false successor.
223 ConstraintInstr* false_constraint = InsertConstraintFor(
224 use, defn,
225 ConstraintSmiRange(Token::NegateComparison(op_kind), boundary),
226 branch->false_successor());
227 if (false_constraint != nullptr) {
228 false_constraint->set_target(branch->false_successor());
229 }
230
231 return true;
232 }
233
234 return false;
235}
236
237void RangeAnalysis::InsertConstraintsFor(Definition* defn) {
238 for (Value* use = defn->input_use_list(); use != nullptr;
239 use = use->next_use()) {
240 if (auto branch = use->instruction()->AsBranch()) {
241 if (ConstrainValueAfterBranch(use, defn)) {
242 Value* other_value = branch->InputAt(1 - use->use_index());
243 if (!IsIntegerDefinition(other_value->definition())) {
244 ConstrainValueAfterBranch(other_value, other_value->definition());
245 }
246 }
247 } else if (auto check = use->instruction()->AsCheckBoundBase()) {
248 ConstrainValueAfterCheckBound(use, check, defn);
249 }
250 }
251}
252
253void RangeAnalysis::ConstrainValueAfterCheckBound(Value* use,
254 CheckBoundBaseInstr* check,
255 Definition* defn) {
256 const intptr_t use_index = use->use_index();
257
258 Range* constraint_range = nullptr;
259 if (use_index == CheckBoundBaseInstr::kIndexPos) {
260 Definition* length = check->length()->definition();
261 constraint_range = new (Z) Range(RangeBoundary::FromConstant(0),
263 } else {
265 Definition* index = check->index()->definition();
266 constraint_range = new (Z)
268 }
269 InsertConstraintFor(use, defn, constraint_range, check);
270}
271
272void RangeAnalysis::InsertConstraints() {
273 for (intptr_t i = 0; i < values_.length(); i++) {
274 InsertConstraintsFor(values_[i]);
275 }
276
277 for (intptr_t i = 0; i < constraints_.length(); i++) {
278 InsertConstraintsFor(constraints_[i]);
279 }
280}
281
283 Definition* defn = value->definition();
284 const Range* range = defn->range();
285
286 if ((range == nullptr) && (defn->Type()->ToCid() != kSmiCid)) {
287 // Type propagator determined that reaching type for this use is Smi.
288 // However the definition itself is not a smi-definition and
289 // thus it will never have range assigned to it. Just return the widest
290 // range possible for this value.
291 // We don't need to handle kMintCid here because all external mints
292 // (e.g. results of loads or function call) can be used only after they
293 // pass through UnboxInt64Instr which is considered as mint-definition
294 // and will have a range assigned to it.
295 // Note: that we can't return nullptr here because it is used as lattice's
296 // bottom element to indicate that the range was not computed *yet*.
297 return &smi_range_;
298 }
299
300 return range;
301}
302
304 Definition* defn = value->definition();
305 const Range* range = defn->range();
306
307 if ((range == nullptr) && !defn->Type()->IsInt()) {
308 // Type propagator determined that reaching type for this use is int.
309 // However the definition itself is not a int-definition and
310 // thus it will never have range assigned to it. Just return the widest
311 // range possible for this value.
312 // Note: that we can't return nullptr here because it is used as lattice's
313 // bottom element to indicate that the range was not computed *yet*.
314 return &int64_range_;
315 }
316
317 return range;
318}
319
323 return (a == b) || (a->AllowsCSE() && b->AllowsCSE() && a->Equals(*b));
324}
325
326static bool DependOnSameSymbol(const RangeBoundary& a, const RangeBoundary& b) {
327 return a.IsSymbol() && b.IsSymbol() &&
328 AreEqualDefinitions(a.symbol(), b.symbol());
329}
330
331// Given the current range of a phi and a newly computed range check
332// if it is growing towards negative infinity, if it does widen it to
333// MinSmi.
334static RangeBoundary WidenMin(const Range* range,
335 const Range* new_range,
337 RangeBoundary min = range->min();
338 RangeBoundary new_min = new_range->min();
339
340 if (min.IsSymbol()) {
341 if (min.LowerBound().Overflowed(size)) {
343 } else if (DependOnSameSymbol(min, new_min)) {
344 return min.offset() <= new_min.offset()
345 ? min
347 } else if (min.UpperBound(size) <= new_min.LowerBound(size)) {
348 return min;
349 }
350 }
351
352 min = Range::ConstantMin(range, size);
353 new_min = Range::ConstantMin(new_range, size);
354
355 return (min.ConstantValue() <= new_min.ConstantValue())
356 ? min
358}
359
360// Given the current range of a phi and a newly computed range check
361// if it is growing towards positive infinity, if it does widen it to
362// MaxSmi.
363static RangeBoundary WidenMax(const Range* range,
364 const Range* new_range,
366 RangeBoundary max = range->max();
367 RangeBoundary new_max = new_range->max();
368
369 if (max.IsSymbol()) {
370 if (max.UpperBound().Overflowed(size)) {
372 } else if (DependOnSameSymbol(max, new_max)) {
373 return max.offset() >= new_max.offset()
374 ? max
376 } else if (max.LowerBound(size) >= new_max.UpperBound(size)) {
377 return max;
378 }
379 }
380
381 max = Range::ConstantMax(range, size);
382 new_max = Range::ConstantMax(new_range, size);
383
384 return (max.ConstantValue() >= new_max.ConstantValue())
385 ? max
387}
388
389// Given the current range of a phi and a newly computed range check
390// if we can perform narrowing: use newly computed minimum to improve precision
391// of the computed range. We do it only if current minimum was widened and is
392// equal to MinSmi.
393// Newly computed minimum is expected to be greater or equal than old one as
394// we are running after widening phase.
395static RangeBoundary NarrowMin(const Range* range,
396 const Range* new_range,
399 const RangeBoundary new_min = Range::ConstantMin(new_range, size);
400 if (min.ConstantValue() > new_min.ConstantValue()) return range->min();
401
402 // TODO(vegorov): consider using negative infinity to indicate widened bound.
403 return range->min().IsMinimumOrBelow(size) ? new_range->min() : range->min();
404}
405
406// Given the current range of a phi and a newly computed range check
407// if we can perform narrowing: use newly computed maximum to improve precision
408// of the computed range. We do it only if current maximum was widened and is
409// equal to MaxSmi.
410// Newly computed maximum is expected to be less or equal than old one as
411// we are running after widening phase.
412static RangeBoundary NarrowMax(const Range* range,
413 const Range* new_range,
416 const RangeBoundary new_max = Range::ConstantMax(new_range, size);
417 if (max.ConstantValue() < new_max.ConstantValue()) return range->max();
418
419 // TODO(vegorov): consider using positive infinity to indicate widened bound.
420 return range->max().IsMaximumOrAbove(size) ? new_range->max() : range->max();
421}
422
423char RangeAnalysis::OpPrefix(JoinOperator op) {
424 switch (op) {
425 case WIDEN:
426 return 'W';
427 case NARROW:
428 return 'N';
429 case NONE:
430 return 'I';
431 }
432 UNREACHABLE();
433 return ' ';
434}
435
437 ASSERT(phi->IsPhi());
438 if (phi->Type()->ToCid() == kSmiCid) {
440 } else if (phi->representation() == kUnboxedInt32) {
442 } else if (phi->Type()->IsInt()) {
444 } else {
445 UNREACHABLE();
447 }
448}
449
450bool RangeAnalysis::InferRange(JoinOperator op,
451 Definition* defn,
452 intptr_t iteration) {
453 Range range;
454 defn->InferRange(this, &range);
455
456 if (!Range::IsUnknown(&range)) {
457 if (!Range::IsUnknown(defn->range()) && defn->IsPhi()) {
459 if (op == WIDEN) {
460 range = Range(WidenMin(defn->range(), &range, size),
461 WidenMax(defn->range(), &range, size));
462 } else if (op == NARROW) {
463 range = Range(NarrowMin(defn->range(), &range, size),
464 NarrowMax(defn->range(), &range, size));
465 }
466 }
467
468 if (!range.Equals(defn->range())) {
469#ifndef PRODUCT
470 if (FLAG_support_il_printer && FLAG_trace_range_analysis) {
471 THR_Print("%c [%" Pd "] %s: %s => %s\n", OpPrefix(op), iteration,
472 defn->ToCString(), Range::ToCString(defn->range()),
473 Range::ToCString(&range));
474 }
475#endif // !PRODUCT
476 defn->set_range(range);
477 return true;
478 }
479 }
480
481 return false;
482}
483
484void RangeAnalysis::CollectDefinitions(BitVector* set) {
485 for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator();
486 !block_it.Done(); block_it.Advance()) {
487 BlockEntryInstr* block = block_it.Current();
488
489 JoinEntryInstr* join = block->AsJoinEntry();
490 if (join != nullptr) {
491 for (PhiIterator it(join); !it.Done(); it.Advance()) {
492 PhiInstr* phi = it.Current();
493 if (set->Contains(phi->ssa_temp_index())) {
494 definitions_.Add(phi);
495 }
496 }
497 }
498
499 for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
500 Definition* defn = it.Current()->AsDefinition();
501 if ((defn != nullptr) && defn->HasSSATemp() &&
502 set->Contains(defn->ssa_temp_index())) {
503 definitions_.Add(defn);
504 }
505 }
506 }
507}
508
509void RangeAnalysis::Iterate(JoinOperator op, intptr_t max_iterations) {
510 // TODO(vegorov): switch to worklist if this becomes performance bottleneck.
511 intptr_t iteration = 0;
512 bool changed;
513 do {
514 changed = false;
515 for (intptr_t i = 0; i < definitions_.length(); i++) {
516 Definition* defn = definitions_[i];
517 if (InferRange(op, defn, iteration)) {
518 changed = true;
519 }
520 }
521
522 iteration++;
523 } while (changed && (iteration < max_iterations));
524}
525
526void RangeAnalysis::InferRanges() {
527 Zone* zone = flow_graph_->zone();
528 // Initialize bitvector for quick filtering of int values.
529 BitVector* set =
530 new (zone) BitVector(zone, flow_graph_->current_ssa_temp_index());
531 for (intptr_t i = 0; i < values_.length(); i++) {
532 set->Add(values_[i]->ssa_temp_index());
533 }
534 for (intptr_t i = 0; i < constraints_.length(); i++) {
535 set->Add(constraints_[i]->ssa_temp_index());
536 }
537
538 // Collect integer definitions (including constraints) in the reverse
539 // postorder. This improves convergence speed compared to iterating
540 // values_ and constraints_ array separately.
541 auto graph_entry = flow_graph_->graph_entry();
542 const auto& initial = *graph_entry->initial_definitions();
543 for (intptr_t i = 0; i < initial.length(); ++i) {
544 Definition* definition = initial[i];
545 if (set->Contains(definition->ssa_temp_index())) {
546 definitions_.Add(definition);
547 }
548 }
549
550 for (intptr_t i = 0; i < graph_entry->SuccessorCount(); ++i) {
551 auto successor = graph_entry->SuccessorAt(i);
552 if (auto function_entry = successor->AsFunctionEntry()) {
553 const auto& initial = *function_entry->initial_definitions();
554 for (intptr_t j = 0; j < initial.length(); ++j) {
555 Definition* definition = initial[j];
556 if (set->Contains(definition->ssa_temp_index())) {
557 definitions_.Add(definition);
558 }
559 }
560 }
561 }
562
563 CollectDefinitions(set);
564
565 // Perform an iteration of range inference just propagating ranges
566 // through the graph as-is without applying widening or narrowing.
567 // This helps to improve precision of initial bounds.
568 // We are doing 2 iterations to hit common cases where phi range
569 // stabilizes quickly and yields a better precision than after
570 // widening and narrowing.
571 Iterate(NONE, 2);
572
573 // Perform fix-point iteration of range inference applying widening
574 // operator to phis to ensure fast convergence.
575 // Widening simply maps growing bounds to the respective range bound.
576 Iterate(WIDEN, kMaxInt32);
577
578 // Perform fix-point iteration of range inference applying narrowing
579 // to phis to compute more accurate range.
580 // Narrowing only improves those boundaries that were widened up to
581 // range boundary and leaves other boundaries intact.
582 Iterate(NARROW, kMaxInt32);
583}
584
586 if (!Range::IsUnknown(defn->range())) {
587 return;
588 }
589
590 if (!IsIntegerDefinition(defn)) {
591 return;
592 }
593
594 for (intptr_t i = 0; i < defn->InputCount(); i++) {
595 Definition* input_defn = defn->InputAt(i)->definition();
596 if (!input_defn->HasSSATemp() || input_defn->IsConstant()) {
597 AssignRangesRecursively(input_defn);
598 }
599 }
600
601 Range new_range;
602 defn->InferRange(this, &new_range);
603 if (!Range::IsUnknown(&new_range)) {
604 defn->set_range(new_range);
605 }
606}
607
608// Scheduler is a helper class that inserts floating control-flow less
609// subgraphs into the flow graph.
610// It always attempts to schedule instructions into the loop preheader in the
611// way similar to LICM optimization pass.
612// Scheduler supports rollback - that is it keeps track of instructions it
613// schedules and can remove all instructions it inserted from the graph.
615 public:
616 explicit Scheduler(FlowGraph* flow_graph)
617 : flow_graph_(flow_graph),
618 loop_headers_(flow_graph->GetLoopHierarchy().headers()),
619 pre_headers_(loop_headers_.length()) {
620 for (intptr_t i = 0; i < loop_headers_.length(); i++) {
621 pre_headers_.Add(loop_headers_[i]->ImmediateDominator());
622 }
623 }
624
625 // Clear the list of emitted instructions.
626 void Start() { emitted_.Clear(); }
627
628 // Given the floating instruction attempt to schedule it into one of the
629 // loop preheaders that dominates given post_dominator instruction.
630 // Some of the instruction inputs can potentially be unscheduled as well.
631 // Returns nullptr is the scheduling fails (e.g. inputs are not invariant for
632 // any loop containing post_dominator).
633 // Resulting schedule should be equivalent to one obtained by inserting
634 // instructions right before post_dominator and running CSE and LICM passes.
635 template <typename T>
636 T* Emit(T* instruction, Instruction* post_dominator) {
637 return static_cast<T*>(EmitRecursively(instruction, post_dominator));
638 }
639
640 // Undo all insertions recorded in the list of emitted instructions.
641 void Rollback() {
642 for (intptr_t i = emitted_.length() - 1; i >= 0; i--) {
643 emitted_[i]->RemoveFromGraph();
644 }
645 emitted_.Clear();
646 }
647
648 private:
649 Instruction* EmitRecursively(Instruction* instruction, Instruction* sink) {
650 // Schedule all unscheduled inputs and unwrap all constrained inputs.
651 for (intptr_t i = 0; i < instruction->InputCount(); i++) {
652 Definition* defn = instruction->InputAt(i)->definition();
653
654 // Instruction is not in the graph yet which means that none of
655 // its input uses should be recorded at defn's use chains.
656 // Verify this assumption to ensure that we are not going to
657 // leave use-lists in an inconsistent state when we start
658 // rewriting inputs via set_definition.
659 ASSERT(instruction->InputAt(i)->IsSingleUse() &&
660 !defn->HasOnlyInputUse(instruction->InputAt(i)));
661
662 if (!defn->HasSSATemp()) {
663 Definition* scheduled = Emit(defn, sink);
664 if (scheduled == nullptr) {
665 return nullptr;
666 }
667 instruction->InputAt(i)->set_definition(scheduled);
668 } else if (defn->IsConstraint()) {
669 instruction->InputAt(i)->set_definition(UnwrapConstraint(defn));
670 }
671 }
672
673 // Attempt to find equivalent instruction that was already scheduled.
674 // If the instruction is still in the graph (it could have been
675 // un-scheduled by a rollback action) and it dominates the sink - use it.
676 Instruction* emitted = map_.LookupValue(instruction);
677 if (emitted != nullptr && !emitted->WasEliminated() &&
678 sink->IsDominatedBy(emitted)) {
679 return emitted;
680 }
681
682 // Attempt to find suitable pre-header. Iterate loop headers backwards to
683 // attempt scheduling into the outermost loop first.
684 for (intptr_t i = loop_headers_.length() - 1; i >= 0; i--) {
685 BlockEntryInstr* header = loop_headers_[i];
686 BlockEntryInstr* pre_header = pre_headers_[i];
687
688 if (pre_header == nullptr) {
689 continue;
690 }
691
692 if (!sink->IsDominatedBy(header)) {
693 continue;
694 }
695
696 Instruction* last = pre_header->last_instruction();
697
698 bool inputs_are_invariant = true;
699 for (intptr_t j = 0; j < instruction->InputCount(); j++) {
700 Definition* defn = instruction->InputAt(j)->definition();
701 if (!last->IsDominatedBy(defn)) {
702 inputs_are_invariant = false;
703 break;
704 }
705 }
706
707 if (inputs_are_invariant) {
708 EmitTo(pre_header, instruction);
709 return instruction;
710 }
711 }
712
713 return nullptr;
714 }
715
716 void EmitTo(BlockEntryInstr* block, Instruction* instr) {
717 GotoInstr* last = block->last_instruction()->AsGoto();
718 flow_graph_->InsertBefore(
719 last, instr, last->env(),
720 instr->IsDefinition() ? FlowGraph::kValue : FlowGraph::kEffect);
721 instr->CopyDeoptIdFrom(*last);
722
723 map_.Insert(instr);
724 emitted_.Add(instr);
725 }
726
727 FlowGraph* flow_graph_;
728 PointerSet<Instruction> map_;
729 const ZoneGrowableArray<BlockEntryInstr*>& loop_headers_;
730 GrowableArray<BlockEntryInstr*> pre_headers_;
731 GrowableArray<Instruction*> emitted_;
732};
733
734// If bounds check 0 <= index < length is not redundant we attempt to
735// replace it with a sequence of checks that guarantee
736//
737// 0 <= LowerBound(index) < UpperBound(index) < length
738//
739// and hoist all of those checks out of the enclosing loop.
740//
741// Upper/Lower bounds are symbolic arithmetic expressions with +, -, *
742// operations.
744 public:
745 BoundsCheckGeneralizer(RangeAnalysis* range_analysis, FlowGraph* flow_graph)
746 : range_analysis_(range_analysis),
747 flow_graph_(flow_graph),
748 scheduler_(flow_graph) {}
749
751 Definition* upper_bound =
752 ConstructUpperBound(check->index()->definition(), check);
753 if (upper_bound == UnwrapConstraint(check->index()->definition())) {
754 // Unable to construct upper bound for the index.
755 if (FLAG_support_il_printer && FLAG_trace_range_analysis) {
756 THR_Print("Failed to construct upper bound for %s index\n",
757 check->ToCString());
758 }
759 return;
760 }
761
762 // Re-associate subexpressions inside upper_bound to collect all constants
763 // together. This will expose more redundancies when we are going to emit
764 // upper bound through scheduler.
765 if (!Simplify(&upper_bound, nullptr)) {
766 if (FLAG_support_il_printer && FLAG_trace_range_analysis) {
767 THR_Print("Failed to simplify upper bound for %s index\n",
768 check->ToCString());
769 }
770 return;
771 }
772 upper_bound = ApplyConstraints(upper_bound, check);
773 range_analysis_->AssignRangesRecursively(upper_bound);
774
775 // We are going to constrain any symbols participating in + and * operations
776 // to guarantee that they are positive. Find all symbols that need
777 // constraining. If there is a subtraction subexpression with non-positive
778 // range give up on generalization for simplicity.
779 GrowableArray<Definition*> non_positive_symbols;
780 if (!FindNonPositiveSymbols(&non_positive_symbols, upper_bound)) {
781#ifndef PRODUCT
782 if (FLAG_support_il_printer && FLAG_trace_range_analysis) {
783 THR_Print(
784 "Failed to generalize %s index to %s"
785 " (can't ensure positivity)\n",
786 check->ToCString(), IndexBoundToCString(upper_bound));
787 }
788#endif // !PRODUCT
789 return;
790 }
791
792 // Check that we can statically prove that lower bound of the index is
793 // non-negative under the assumption that all potentially non-positive
794 // symbols are positive.
795 GrowableArray<ConstraintInstr*> positive_constraints(
796 non_positive_symbols.length());
797 Range* positive_range =
800 for (intptr_t i = 0; i < non_positive_symbols.length(); i++) {
801 Definition* symbol = non_positive_symbols[i];
802 positive_constraints.Add(
803 new ConstraintInstr(new Value(symbol), positive_range));
804 }
805
806 Definition* lower_bound =
807 ConstructLowerBound(check->index()->definition(), check);
808 // No need to simplify lower bound before applying constraints as
809 // we are not going to emit it.
810 lower_bound = ApplyConstraints(lower_bound, check, &positive_constraints);
811 range_analysis_->AssignRangesRecursively(lower_bound);
812
813 if (!RangeUtils::IsPositive(lower_bound->range())) {
814// Can't prove that lower bound is positive even with additional checks
815// against potentially non-positive symbols. Give up.
816#ifndef PRODUCT
817 if (FLAG_support_il_printer && FLAG_trace_range_analysis) {
818 THR_Print(
819 "Failed to generalize %s index to %s"
820 " (lower bound is not positive)\n",
821 check->ToCString(), IndexBoundToCString(upper_bound));
822 }
823#endif // !PRODUCT
824 return;
825 }
826
827#ifndef PRODUCT
828 if (FLAG_support_il_printer && FLAG_trace_range_analysis) {
829 THR_Print("For %s computed index bounds [%s, %s]\n", check->ToCString(),
830 IndexBoundToCString(lower_bound),
831 IndexBoundToCString(upper_bound));
832 }
833#endif // !PRODUCT
834
835 // At this point we know that 0 <= index < UpperBound(index) under
836 // certain preconditions. Start by emitting this preconditions.
837 scheduler_.Start();
838
839 // AOT should only see non-deopting GenericCheckBound.
840 ASSERT(!CompilerState::Current().is_aot());
841
842 ConstantInstr* max_smi = flow_graph_->GetConstant(
844 for (intptr_t i = 0; i < non_positive_symbols.length(); i++) {
845 CheckArrayBoundInstr* precondition = new CheckArrayBoundInstr(
846 new Value(max_smi), new Value(non_positive_symbols[i]),
848 precondition->mark_generalized();
849 precondition = scheduler_.Emit(precondition, check);
850 if (precondition == nullptr) {
851 if (FLAG_trace_range_analysis) {
852 THR_Print(" => failed to insert positivity constraint\n");
853 }
854 scheduler_.Rollback();
855 return;
856 }
857 }
858
860 new Value(UnwrapConstraint(check->length()->definition())),
861 new Value(upper_bound), DeoptId::kNone);
862 new_check->mark_generalized();
863 if (new_check->IsRedundant()) {
864 if (FLAG_trace_range_analysis) {
865 THR_Print(" => generalized check is redundant\n");
866 }
868 return;
869 }
870
871 new_check = scheduler_.Emit(new_check, check);
872 if (new_check != nullptr) {
873 if (FLAG_trace_range_analysis) {
874 THR_Print(" => generalized check was hoisted into B%" Pd "\n",
875 new_check->GetBlock()->block_id());
876 }
878 } else {
879 if (FLAG_trace_range_analysis) {
880 THR_Print(" => generalized check can't be hoisted\n");
881 }
882 scheduler_.Rollback();
883 }
884 }
885
887 BinarySmiOpInstr* binary_op = check->index()->definition()->AsBinarySmiOp();
888 if (binary_op != nullptr) {
889 binary_op->set_can_overflow(false);
890 }
891 check->ReplaceUsesWith(check->index()->definition());
892 check->RemoveFromGraph();
893 }
894
895 private:
896 BinarySmiOpInstr* MakeBinaryOp(Token::Kind op_kind,
897 Definition* left,
898 Definition* right) {
899 return new BinarySmiOpInstr(op_kind, new Value(left), new Value(right),
901 }
902
903 BinarySmiOpInstr* MakeBinaryOp(Token::Kind op_kind,
904 Definition* left,
905 intptr_t right) {
906 ConstantInstr* constant_right =
907 flow_graph_->GetConstant(Smi::Handle(Smi::New(right)));
908 return MakeBinaryOp(op_kind, left, constant_right);
909 }
910
911 Definition* RangeBoundaryToDefinition(const RangeBoundary& bound) {
912 Definition* symbol = UnwrapConstraint(bound.symbol());
913 if (bound.offset() == 0) {
914 return symbol;
915 } else {
916 return MakeBinaryOp(Token::kADD, symbol, bound.offset());
917 }
918 }
919
920 typedef Definition* (BoundsCheckGeneralizer::* PhiBoundFunc)(PhiInstr*,
921 LoopInfo*,
922 InductionVar*,
923 Instruction*);
924
925 // Construct symbolic lower bound for a value at the given point.
926 Definition* ConstructLowerBound(Definition* value, Instruction* point) {
927 return ConstructBound(&BoundsCheckGeneralizer::InductionVariableLowerBound,
928 value, point);
929 }
930
931 // Construct symbolic upper bound for a value at the given point.
932 Definition* ConstructUpperBound(Definition* value, Instruction* point) {
933 return ConstructBound(&BoundsCheckGeneralizer::InductionVariableUpperBound,
934 value, point);
935 }
936
937 // Helper methods to implement "older" business logic.
938 // TODO(ajcbik): generalize with new induction variable information
939
940 // Only accept loops with a smi constraint on smi induction.
941 LoopInfo* GetSmiBoundedLoop(PhiInstr* phi) {
942 LoopInfo* loop = phi->GetBlock()->loop_info();
943 if (loop == nullptr) {
944 return nullptr;
945 }
946 ConstraintInstr* limit = loop->limit();
947 if (limit == nullptr) {
948 return nullptr;
949 }
950 Definition* def = UnwrapConstraint(limit->value()->definition());
951 Range* constraining_range = limit->constraint();
952 if (GetSmiInduction(loop, def) != nullptr &&
953 constraining_range->min().Equals(RangeBoundary::MinSmi()) &&
954 constraining_range->max().IsSymbol() &&
955 def->IsDominatedBy(constraining_range->max().symbol())) {
956 return loop;
957 }
958 return nullptr;
959 }
960
961 // Returns true if x is invariant and is either based on a Smi definition
962 // or is a Smi constant.
963 static bool IsSmiInvariant(const InductionVar* x) {
964 return InductionVar::IsInvariant(x) && Smi::IsValid(x->offset()) &&
965 Smi::IsValid(x->mult()) &&
966 (x->mult() == 0 || x->def()->Type()->ToCid() == kSmiCid);
967 }
968
969 // Only accept smi linear induction with unit stride.
970 InductionVar* GetSmiInduction(LoopInfo* loop, Definition* def) {
971 if (loop != nullptr && def->Type()->ToCid() == kSmiCid) {
972 InductionVar* induc = loop->LookupInduction(def);
973 int64_t stride;
974 if (induc != nullptr && InductionVar::IsLinear(induc, &stride) &&
975 stride == 1 && IsSmiInvariant(induc->initial())) {
976 return induc;
977 }
978 }
979 return nullptr;
980 }
981
982 // Reconstruct invariant.
983 Definition* GenerateInvariant(InductionVar* induc) {
984 Definition* res = nullptr;
985 if (induc->mult() == 0) {
986 res =
987 flow_graph_->GetConstant(Smi::ZoneHandle(Smi::New(induc->offset())));
988 } else {
989 res = induc->def();
990 if (induc->mult() != 1) {
991 res = MakeBinaryOp(Token::kMUL, res, induc->mult());
992 }
993 if (induc->offset() != 0) {
994 res = MakeBinaryOp(Token::kADD, res, induc->offset());
995 }
996 }
997 return res;
998 }
999
1000 // Construct symbolic bound for a value at the given point:
1001 //
1002 // 1. if value is an induction variable use its bounds;
1003 // 2. if value is addition or multiplication construct bounds for left
1004 // and right hand sides separately and use addition/multiplication
1005 // of bounds as a bound (addition and multiplication are monotone
1006 // operations for both operands);
1007 // 3. if value is a substraction then construct bound for the left hand
1008 // side and use substraction of the right hand side from the left hand
1009 // side bound as a bound for an expression (substraction is monotone for
1010 // the left hand side operand).
1011 //
1012 Definition* ConstructBound(PhiBoundFunc phi_bound_func,
1013 Definition* value,
1014 Instruction* point) {
1016 if (value->IsPhi()) {
1017 PhiInstr* phi = value->AsPhi();
1018 LoopInfo* loop = GetSmiBoundedLoop(phi);
1019 InductionVar* induc = GetSmiInduction(loop, phi);
1020 if (induc != nullptr) {
1021 return (this->*phi_bound_func)(phi, loop, induc, point);
1022 }
1023 } else if (value->IsBinarySmiOp()) {
1024 BinarySmiOpInstr* bin_op = value->AsBinarySmiOp();
1025 if ((bin_op->op_kind() == Token::kADD) ||
1026 (bin_op->op_kind() == Token::kMUL) ||
1027 (bin_op->op_kind() == Token::kSUB)) {
1028 Definition* new_left =
1029 ConstructBound(phi_bound_func, bin_op->left()->definition(), point);
1030 Definition* new_right =
1031 (bin_op->op_kind() != Token::kSUB)
1032 ? ConstructBound(phi_bound_func, bin_op->right()->definition(),
1033 point)
1034 : UnwrapConstraint(bin_op->right()->definition());
1035
1036 if ((new_left != UnwrapConstraint(bin_op->left()->definition())) ||
1037 (new_right != UnwrapConstraint(bin_op->right()->definition()))) {
1038 return MakeBinaryOp(bin_op->op_kind(), new_left, new_right);
1039 }
1040 }
1041 }
1042 return value;
1043 }
1044
1045 Definition* InductionVariableUpperBound(PhiInstr* phi,
1046 LoopInfo* loop,
1047 InductionVar* induc,
1048 Instruction* point) {
1049 // Test if limit dominates given point.
1050 ConstraintInstr* limit = loop->limit();
1051 if (!point->IsDominatedBy(limit)) {
1052 return phi;
1053 }
1054 // Decide between direct or indirect bound.
1055 Definition* bounded_def = UnwrapConstraint(limit->value()->definition());
1056 if (bounded_def == phi) {
1057 // Given a smi bounded loop with smi induction variable
1058 //
1059 // x <- phi(x0, x + 1)
1060 //
1061 // and a constraint x <= M that dominates the given
1062 // point we conclude that M is an upper bound for x.
1063 return RangeBoundaryToDefinition(limit->constraint()->max());
1064 } else {
1065 // Given a smi bounded loop with two smi induction variables
1066 //
1067 // x <- phi(x0, x + 1)
1068 // y <- phi(y0, y + 1)
1069 //
1070 // and a constraint x <= M that dominates the given
1071 // point we can conclude that
1072 //
1073 // y <= y0 + (M - x0)
1074 //
1075 InductionVar* bounded_induc = GetSmiInduction(loop, bounded_def);
1076 Definition* x0 = GenerateInvariant(bounded_induc->initial());
1077 Definition* y0 = GenerateInvariant(induc->initial());
1078 Definition* m = RangeBoundaryToDefinition(limit->constraint()->max());
1079 BinarySmiOpInstr* loop_length =
1080 MakeBinaryOp(Token::kSUB, ConstructUpperBound(m, point),
1081 ConstructLowerBound(x0, point));
1082 return MakeBinaryOp(Token::kADD, ConstructUpperBound(y0, point),
1083 loop_length);
1084 }
1085 }
1086
1087 Definition* InductionVariableLowerBound(PhiInstr* phi,
1088 LoopInfo* loop,
1089 InductionVar* induc,
1090 Instruction* point) {
1091 // Given a smi bounded loop with smi induction variable
1092 //
1093 // x <- phi(x0, x + 1)
1094 //
1095 // we can conclude that LowerBound(x) == x0.
1096 return ConstructLowerBound(GenerateInvariant(induc->initial()), point);
1097 }
1098
1099 // Try to re-associate binary operations in the floating DAG of operations
1100 // to collect all constants together, e.g. x + C0 + y + C1 is simplified into
1101 // x + y + (C0 + C1).
1102 bool Simplify(Definition** defn, intptr_t* constant) {
1103 if ((*defn)->IsBinarySmiOp()) {
1104 BinarySmiOpInstr* binary_op = (*defn)->AsBinarySmiOp();
1105 Definition* left = binary_op->left()->definition();
1106 Definition* right = binary_op->right()->definition();
1107
1108 intptr_t c = 0;
1109 if (binary_op->op_kind() == Token::kADD) {
1110 intptr_t left_const = 0;
1111 intptr_t right_const = 0;
1112 if (!Simplify(&left, &left_const) || !Simplify(&right, &right_const)) {
1113 return false;
1114 }
1115
1116 c = left_const + right_const;
1117 if (Utils::WillAddOverflow(left_const, right_const) ||
1119 return false; // Abort.
1120 }
1121
1122 if (constant != nullptr) {
1123 *constant = c;
1124 }
1125
1126 if ((left == nullptr) && (right == nullptr)) {
1127 if (constant != nullptr) {
1128 *defn = nullptr;
1129 } else {
1130 *defn = flow_graph_->GetConstant(Smi::Handle(Smi::New(c)));
1131 }
1132 return true;
1133 }
1134
1135 if (left == nullptr) {
1136 if ((constant != nullptr) || (c == 0)) {
1137 *defn = right;
1138 return true;
1139 } else {
1140 left = right;
1141 right = nullptr;
1142 }
1143 }
1144
1145 if (right == nullptr) {
1146 if ((constant != nullptr) || (c == 0)) {
1147 *defn = left;
1148 return true;
1149 } else {
1150 right = flow_graph_->GetConstant(Smi::Handle(Smi::New(c)));
1151 c = 0;
1152 }
1153 }
1154 } else if (binary_op->op_kind() == Token::kSUB) {
1155 intptr_t left_const = 0;
1156 intptr_t right_const = 0;
1157 if (!Simplify(&left, &left_const) || !Simplify(&right, &right_const)) {
1158 return false;
1159 }
1160
1161 c = (left_const - right_const);
1162 if (Utils::WillSubOverflow(left_const, right_const) ||
1164 return false; // Abort.
1165 }
1166
1167 if (constant != nullptr) {
1168 *constant = c;
1169 }
1170
1171 if ((left == nullptr) && (right == nullptr)) {
1172 if (constant != nullptr) {
1173 *defn = nullptr;
1174 } else {
1175 *defn = flow_graph_->GetConstant(Smi::Handle(Smi::New(c)));
1176 }
1177 return true;
1178 }
1179
1180 if (left == nullptr) {
1181 left = flow_graph_->GetConstant(Object::smi_zero());
1182 }
1183
1184 if (right == nullptr) {
1185 if ((constant != nullptr) || (c == 0)) {
1186 *defn = left;
1187 return true;
1188 } else {
1189 right = flow_graph_->GetConstant(Smi::Handle(Smi::New(-c)));
1190 c = 0;
1191 }
1192 }
1193 } else if (binary_op->op_kind() == Token::kMUL) {
1194 if (!Simplify(&left, nullptr) || !Simplify(&right, nullptr)) {
1195 return false;
1196 }
1197 } else {
1198 // Don't attempt to simplify any other binary operation.
1199 return true;
1200 }
1201
1202 ASSERT(left != nullptr);
1203 ASSERT(right != nullptr);
1204
1205 const bool left_changed = (left != binary_op->left()->definition());
1206 const bool right_changed = (right != binary_op->right()->definition());
1207 if (left_changed || right_changed) {
1208 if (!(*defn)->HasSSATemp()) {
1209 if (left_changed) binary_op->left()->set_definition(left);
1210 if (right_changed) binary_op->right()->set_definition(right);
1211 *defn = binary_op;
1212 } else {
1213 *defn = MakeBinaryOp(binary_op->op_kind(), UnwrapConstraint(left),
1214 UnwrapConstraint(right));
1215 }
1216 }
1217
1218 if ((c != 0) && (constant == nullptr)) {
1219 *defn = MakeBinaryOp(Token::kADD, *defn, c);
1220 }
1221 } else if ((*defn)->IsConstant()) {
1222 ConstantInstr* constant_defn = (*defn)->AsConstant();
1223 if ((constant != nullptr) && constant_defn->IsSmi()) {
1224 *defn = nullptr;
1225 *constant = Smi::Cast(constant_defn->value()).Value();
1226 }
1227 }
1228
1229 return true;
1230 }
1231
1232 // If possible find a set of symbols that need to be non-negative to
1233 // guarantee that expression as whole is non-negative.
1234 bool FindNonPositiveSymbols(GrowableArray<Definition*>* symbols,
1235 Definition* defn) {
1236 if (defn->IsConstant()) {
1237 const Object& value = defn->AsConstant()->value();
1238 return compiler::target::IsSmi(value) && (Smi::Cast(value).Value() >= 0);
1239 } else if (defn->HasSSATemp()) {
1240 if (!RangeUtils::IsPositive(defn->range())) {
1241 symbols->Add(defn);
1242 }
1243 return true;
1244 } else if (defn->IsBinarySmiOp()) {
1245 BinarySmiOpInstr* binary_op = defn->AsBinarySmiOp();
1246 ASSERT((binary_op->op_kind() == Token::kADD) ||
1247 (binary_op->op_kind() == Token::kSUB) ||
1248 (binary_op->op_kind() == Token::kMUL));
1249
1250 if (RangeUtils::IsPositive(defn->range())) {
1251 // We can statically prove that this subexpression is always positive.
1252 // No need to inspect its subexpressions.
1253 return true;
1254 }
1255
1256 if (binary_op->op_kind() == Token::kSUB) {
1257 // For addition and multiplication it's enough to ensure that
1258 // lhs and rhs are positive to guarantee that defn as whole is
1259 // positive. This does not work for substraction so just give up.
1260 return false;
1261 }
1262
1263 return FindNonPositiveSymbols(symbols, binary_op->left()->definition()) &&
1264 FindNonPositiveSymbols(symbols, binary_op->right()->definition());
1265 }
1266 UNREACHABLE();
1267 return false;
1268 }
1269
1270 // Find innermost constraint for the given definition dominating given
1271 // instruction.
1272 static Definition* FindInnermostConstraint(Definition* defn,
1273 Instruction* post_dominator) {
1274 for (Value* use = defn->input_use_list(); use != nullptr;
1275 use = use->next_use()) {
1276 ConstraintInstr* constraint = use->instruction()->AsConstraint();
1277 if ((constraint != nullptr) &&
1278 post_dominator->IsDominatedBy(constraint)) {
1279 return FindInnermostConstraint(constraint, post_dominator);
1280 }
1281 }
1282 return defn;
1283 }
1284
1285 // Replace symbolic parts of the boundary with respective constraints
1286 // that hold at the given point in the flow graph signified by
1287 // post_dominator.
1288 // Constraints array allows to provide a set of additional floating
1289 // constraints that were not inserted into the graph.
1290 static Definition* ApplyConstraints(
1291 Definition* defn,
1292 Instruction* post_dominator,
1293 GrowableArray<ConstraintInstr*>* constraints = nullptr) {
1294 if (defn->HasSSATemp()) {
1295 defn = FindInnermostConstraint(defn, post_dominator);
1296 if (constraints != nullptr) {
1297 for (intptr_t i = 0; i < constraints->length(); i++) {
1298 ConstraintInstr* constraint = (*constraints)[i];
1299 if (constraint->value()->definition() == defn) {
1300 return constraint;
1301 }
1302 }
1303 }
1304 return defn;
1305 }
1306
1307 for (intptr_t i = 0; i < defn->InputCount(); i++) {
1308 defn->InputAt(i)->set_definition(ApplyConstraints(
1309 defn->InputAt(i)->definition(), post_dominator, constraints));
1310 }
1311
1312 return defn;
1313 }
1314
1315#ifndef PRODUCT
1316 static void PrettyPrintIndexBoundRecursively(BaseTextBuffer* f,
1317 Definition* index_bound) {
1318 BinarySmiOpInstr* binary_op = index_bound->AsBinarySmiOp();
1319 if (binary_op != nullptr) {
1320 f->AddString("(");
1321 PrettyPrintIndexBoundRecursively(f, binary_op->left()->definition());
1322 f->Printf(" %s ", Token::Str(binary_op->op_kind()));
1323 PrettyPrintIndexBoundRecursively(f, binary_op->right()->definition());
1324 f->AddString(")");
1325 } else if (index_bound->IsConstant()) {
1326 f->Printf("%" Pd "",
1327 Smi::Cast(index_bound->AsConstant()->value()).Value());
1328 } else {
1329 f->Printf("v%" Pd "", index_bound->ssa_temp_index());
1330 }
1331 f->Printf(" {%s}", Range::ToCString(index_bound->range()));
1332 }
1333
1334 static const char* IndexBoundToCString(Definition* index_bound) {
1335 char buffer[1024];
1336 BufferFormatter f(buffer, sizeof(buffer));
1337 PrettyPrintIndexBoundRecursively(&f, index_bound);
1339 }
1340#endif // !PRODUCT
1341
1342 RangeAnalysis* range_analysis_;
1343 FlowGraph* flow_graph_;
1344 Scheduler scheduler_;
1345};
1346
1347void RangeAnalysis::EliminateRedundantBoundsChecks() {
1348 if (FLAG_array_bounds_check_elimination) {
1349 const Function& function = flow_graph_->function();
1350 // Generalization only if we have not deoptimized on a generalized
1351 // check earlier and we are not compiling precompiled code
1352 // (no optimistic hoisting of checks possible)
1353 const bool try_generalization =
1355 !function.ProhibitsBoundsCheckGeneralization();
1356 BoundsCheckGeneralizer generalizer(this, flow_graph_);
1357 for (CheckBoundBaseInstr* check : bounds_checks_) {
1358 if (check->IsRedundant(/*use_loops=*/true)) {
1359 check->ReplaceUsesWith(check->index()->definition());
1360 check->RemoveFromGraph();
1361 } else if (try_generalization) {
1362 if (auto jit_check = check->AsCheckArrayBound()) {
1363 generalizer.TryGeneralize(jit_check);
1364 }
1365 }
1366 }
1367 }
1368}
1369
1370void RangeAnalysis::MarkUnreachableBlocks() {
1371 for (intptr_t i = 0; i < constraints_.length(); i++) {
1372 if (Range::IsUnknown(constraints_[i]->range())) {
1373 TargetEntryInstr* target = constraints_[i]->target();
1374 if (target == nullptr) {
1375 // TODO(vegorov): replace Constraint with an unconditional
1376 // deoptimization and kill all dominated dead code.
1377 continue;
1378 }
1379
1380 BranchInstr* branch =
1381 target->PredecessorAt(0)->last_instruction()->AsBranch();
1382 if (target == branch->true_successor()) {
1383 // True unreachable.
1384 if (FLAG_trace_constant_propagation && flow_graph_->should_print()) {
1385 THR_Print("Range analysis: True unreachable (B%" Pd ")\n",
1386 branch->true_successor()->block_id());
1387 }
1388 branch->set_constant_target(branch->false_successor());
1389 } else {
1390 ASSERT(target == branch->false_successor());
1391 // False unreachable.
1392 if (FLAG_trace_constant_propagation && flow_graph_->should_print()) {
1393 THR_Print("Range analysis: False unreachable (B%" Pd ")\n",
1394 branch->false_successor()->block_id());
1395 }
1396 branch->set_constant_target(branch->true_successor());
1397 }
1398 }
1399 }
1400}
1401
1402void RangeAnalysis::RemoveConstraints() {
1403 for (intptr_t i = 0; i < constraints_.length(); i++) {
1404 Definition* def = constraints_[i]->value()->definition();
1405 // Some constraints might be constraining constraints. Unwind the chain of
1406 // constraints until we reach the actual definition.
1407 while (def->IsConstraint()) {
1408 def = def->AsConstraint()->value()->definition();
1409 }
1410 constraints_[i]->ReplaceUsesWith(def);
1411 constraints_[i]->RemoveFromGraph();
1412 }
1413}
1414
1417 RangeUtils::Fits(int64_op->left()->definition()->range(),
1419 RangeUtils::Fits(int64_op->right()->definition()->range(),
1421 BinaryInt32OpInstr::IsSupported(int64_op->op_kind(), int64_op->left(),
1422 int64_op->right())) {
1423 BinaryInt32OpInstr* int32_op = new BinaryInt32OpInstr(
1424 int64_op->op_kind(), int64_op->left()->CopyWithType(),
1425 int64_op->right()->CopyWithType(), int64_op->DeoptimizationTarget());
1426 int32_op->set_range(*int64_op->range());
1427 int32_op->set_can_overflow(false);
1428 int64_op->ReplaceWith(int32_op, nullptr);
1429 }
1430}
1431
1434 RangeUtils::Fits(int64_op->left()->definition()->range(),
1436 RangeUtils::Fits(int64_op->right()->definition()->range(),
1438 BinaryInt32OpInstr::IsSupported(int64_op->op_kind(), int64_op->left(),
1439 int64_op->right())) {
1440 BinaryInt32OpInstr* int32_op = new BinaryInt32OpInstr(
1441 int64_op->op_kind(), int64_op->left()->CopyWithType(),
1442 int64_op->right()->CopyWithType(), int64_op->DeoptimizationTarget());
1443 int32_op->set_range(*int64_op->range());
1444 int32_op->set_can_overflow(false);
1445 int64_op->ReplaceWith(int32_op, nullptr);
1446 }
1447}
1448
1449void RangeAnalysis::NarrowMintToInt32() {
1450 for (intptr_t i = 0; i < binary_int64_ops_.length(); i++) {
1451 NarrowBinaryInt64Op(binary_int64_ops_[i]);
1452 }
1453
1454 for (intptr_t i = 0; i < shift_int64_ops_.length(); i++) {
1455 NarrowShiftInt64Op(shift_int64_ops_[i]);
1456 }
1457}
1458
1460 : flow_graph_(flow_graph) {
1461 ASSERT(flow_graph_ != nullptr);
1462 zone_ = flow_graph_->zone();
1463 selected_uint32_defs_ =
1464 new (zone_) BitVector(zone_, flow_graph_->current_ssa_temp_index());
1465}
1466
1468 if (FLAG_trace_integer_ir_selection) {
1469 THR_Print("---- starting integer ir selection -------\n");
1470 }
1471 FindPotentialUint32Definitions();
1472 FindUint32NarrowingDefinitions();
1473 Propagate();
1474 ReplaceInstructions();
1475 if (FLAG_support_il_printer && FLAG_trace_integer_ir_selection) {
1476 THR_Print("---- after integer ir selection -------\n");
1477 FlowGraphPrinter printer(*flow_graph_);
1478 printer.PrintBlocks();
1479 }
1480}
1481
1482bool IntegerInstructionSelector::IsPotentialUint32Definition(Definition* def) {
1483 // TODO(johnmccutchan): Consider Smi operations, to avoid unnecessary tagging
1484 // & untagged of intermediate results.
1485 // TODO(johnmccutchan): Consider phis.
1486 return def->IsBoxInt64() || def->IsUnboxInt64() || def->IsShiftInt64Op() ||
1487 def->IsSpeculativeShiftInt64Op() ||
1488 (def->IsBinaryInt64Op() && BinaryUint32OpInstr::IsSupported(
1489 def->AsBinaryInt64Op()->op_kind())) ||
1490 (def->IsUnaryInt64Op() &&
1491 UnaryUint32OpInstr::IsSupported(def->AsUnaryInt64Op()->op_kind()));
1492}
1493
1494void IntegerInstructionSelector::FindPotentialUint32Definitions() {
1495 if (FLAG_trace_integer_ir_selection) {
1496 THR_Print("++++ Finding potential Uint32 definitions:\n");
1497 }
1498
1499 for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator();
1500 !block_it.Done(); block_it.Advance()) {
1501 BlockEntryInstr* block = block_it.Current();
1502
1503 for (ForwardInstructionIterator instr_it(block); !instr_it.Done();
1504 instr_it.Advance()) {
1505 Instruction* current = instr_it.Current();
1506 Definition* defn = current->AsDefinition();
1507 if ((defn != nullptr) && defn->HasSSATemp()) {
1508 if (IsPotentialUint32Definition(defn)) {
1509 if (FLAG_support_il_printer && FLAG_trace_integer_ir_selection) {
1510 THR_Print("Adding %s\n", current->ToCString());
1511 }
1512 potential_uint32_defs_.Add(defn);
1513 }
1514 }
1515 }
1516 }
1517}
1518
1519// BinaryInt64Op masks and stores into unsigned typed arrays that truncate the
1520// value into a Uint32 range.
1521bool IntegerInstructionSelector::IsUint32NarrowingDefinition(Definition* def) {
1522 if (def->IsBinaryInt64Op()) {
1523 BinaryInt64OpInstr* op = def->AsBinaryInt64Op();
1524 // Must be a mask operation.
1525 if (op->op_kind() != Token::kBIT_AND) {
1526 return false;
1527 }
1528 Range* range = op->range();
1529 if ((range == nullptr) ||
1530 !range->IsWithin(0, static_cast<int64_t>(kMaxUint32))) {
1531 return false;
1532 }
1533 return true;
1534 }
1535 // TODO(johnmccutchan): Add typed array stores.
1536 return false;
1537}
1538
1539void IntegerInstructionSelector::FindUint32NarrowingDefinitions() {
1540 ASSERT(selected_uint32_defs_ != nullptr);
1541 if (FLAG_trace_integer_ir_selection) {
1542 THR_Print("++++ Selecting Uint32 definitions:\n");
1543 THR_Print("++++ Initial set:\n");
1544 }
1545 for (intptr_t i = 0; i < potential_uint32_defs_.length(); i++) {
1546 Definition* defn = potential_uint32_defs_[i];
1547 if (IsUint32NarrowingDefinition(defn)) {
1548 if (FLAG_support_il_printer && FLAG_trace_integer_ir_selection) {
1549 THR_Print("Adding %s\n", defn->ToCString());
1550 }
1551 selected_uint32_defs_->Add(defn->ssa_temp_index());
1552 }
1553 }
1554}
1555
1556bool IntegerInstructionSelector::AllUsesAreUint32Narrowing(Value* list_head) {
1557 for (Value::Iterator it(list_head); !it.Done(); it.Advance()) {
1558 Value* use = it.Current();
1559 Definition* defn = use->instruction()->AsDefinition();
1560 if ((defn == nullptr) || !defn->HasSSATemp() ||
1561 !selected_uint32_defs_->Contains(defn->ssa_temp_index())) {
1562 return false;
1563 }
1564 // Right-hand side operand of ShiftInt64Op is not narrowing (all its bits
1565 // should be taken into account).
1566 if (ShiftIntegerOpInstr* shift = defn->AsShiftIntegerOp()) {
1567 if (use == shift->right()) {
1568 return false;
1569 }
1570 }
1571 }
1572 return true;
1573}
1574
1575bool IntegerInstructionSelector::CanBecomeUint32(Definition* def) {
1576 ASSERT(IsPotentialUint32Definition(def));
1577 if (def->IsBoxInt64()) {
1578 // If a BoxInt64's input is a candidate, the box is a candidate.
1579 Definition* box_input = def->AsBoxInt64()->value()->definition();
1580 return selected_uint32_defs_->Contains(box_input->ssa_temp_index());
1581 }
1582 // A right shift with an input outside of Uint32 range cannot be converted
1583 // because we need the high bits.
1584 if (def->IsShiftInt64Op() || def->IsSpeculativeShiftInt64Op()) {
1585 ShiftIntegerOpInstr* op = def->AsShiftIntegerOp();
1586 if ((op->op_kind() == Token::kSHR) || (op->op_kind() == Token::kUSHR)) {
1587 Definition* shift_input = op->left()->definition();
1588 ASSERT(shift_input != nullptr);
1589 Range* range = shift_input->range();
1590 if ((range == nullptr) ||
1591 !range->IsWithin(0, static_cast<int64_t>(kMaxUint32))) {
1592 return false;
1593 }
1594 }
1595 }
1596 if (!def->HasUses()) {
1597 // No uses, skip.
1598 return false;
1599 }
1600 return AllUsesAreUint32Narrowing(def->input_use_list()) &&
1601 AllUsesAreUint32Narrowing(def->env_use_list());
1602}
1603
1604void IntegerInstructionSelector::Propagate() {
1605 ASSERT(selected_uint32_defs_ != nullptr);
1606 bool changed = true;
1607 intptr_t iteration = 0;
1608 while (changed) {
1609 if (FLAG_trace_integer_ir_selection) {
1610 THR_Print("+++ Iteration: %" Pd "\n", iteration++);
1611 }
1612 changed = false;
1613 for (intptr_t i = 0; i < potential_uint32_defs_.length(); i++) {
1614 Definition* defn = potential_uint32_defs_[i];
1615 if (selected_uint32_defs_->Contains(defn->ssa_temp_index())) {
1616 // Already marked as a candidate, skip.
1617 continue;
1618 }
1619 if (defn->IsConstant()) {
1620 // Skip constants.
1621 continue;
1622 }
1623 if (CanBecomeUint32(defn)) {
1624 if (FLAG_support_il_printer && FLAG_trace_integer_ir_selection) {
1625 THR_Print("Adding %s\n", defn->ToCString());
1626 }
1627 // Found a new candidate.
1628 selected_uint32_defs_->Add(defn->ssa_temp_index());
1629 // Haven't reached fixed point yet.
1630 changed = true;
1631 }
1632 }
1633 }
1634 if (FLAG_trace_integer_ir_selection) {
1635 THR_Print("Reached fixed point\n");
1636 }
1637}
1638
1639Definition* IntegerInstructionSelector::ConstructReplacementFor(
1640 Definition* def) {
1641 // Should only see mint definitions.
1642 ASSERT(IsPotentialUint32Definition(def));
1643 // Should not see constant instructions.
1644 ASSERT(!def->IsConstant());
1645 if (def->IsBinaryIntegerOp()) {
1646 BinaryIntegerOpInstr* op = def->AsBinaryIntegerOp();
1647 Token::Kind op_kind = op->op_kind();
1648 Value* left = op->left()->CopyWithType();
1649 Value* right = op->right()->CopyWithType();
1650 intptr_t deopt_id = op->DeoptimizationTarget();
1652 kUnboxedUint32, op_kind, left, right, deopt_id,
1653 def->IsSpeculativeShiftInt64Op() ? Instruction::kGuardInputs
1655 } else if (def->IsBoxInt64()) {
1656 Value* value = def->AsBoxInt64()->value()->CopyWithType();
1657 return new (Z) BoxUint32Instr(value);
1658 } else if (def->IsUnboxInt64()) {
1659 UnboxInstr* unbox = def->AsUnboxInt64();
1660 Value* value = unbox->value()->CopyWithType();
1661 intptr_t deopt_id = unbox->DeoptimizationTarget();
1662 return new (Z)
1663 UnboxUint32Instr(value, deopt_id, def->SpeculativeModeOfInputs());
1664 } else if (def->IsUnaryInt64Op()) {
1665 UnaryInt64OpInstr* op = def->AsUnaryInt64Op();
1666 Token::Kind op_kind = op->op_kind();
1667 Value* value = op->value()->CopyWithType();
1668 intptr_t deopt_id = op->DeoptimizationTarget();
1669 return new (Z) UnaryUint32OpInstr(op_kind, value, deopt_id);
1670 }
1671 UNREACHABLE();
1672 return nullptr;
1673}
1674
1675void IntegerInstructionSelector::ReplaceInstructions() {
1676 if (FLAG_trace_integer_ir_selection) {
1677 THR_Print("++++ Replacing instructions:\n");
1678 }
1679 for (intptr_t i = 0; i < potential_uint32_defs_.length(); i++) {
1680 Definition* defn = potential_uint32_defs_[i];
1681 if (!selected_uint32_defs_->Contains(defn->ssa_temp_index())) {
1682 // Not a candidate.
1683 continue;
1684 }
1685 Definition* replacement = ConstructReplacementFor(defn);
1686 ASSERT(replacement != nullptr);
1687 if (!Range::IsUnknown(defn->range())) {
1688 if (defn->range()->IsPositive()) {
1689 replacement->set_range(*defn->range());
1690 } else {
1691 replacement->set_range(Range(RangeBoundary::FromConstant(0),
1693 }
1694 }
1695 if (FLAG_support_il_printer && FLAG_trace_integer_ir_selection) {
1696 THR_Print("Replacing %s with %s\n", defn->ToCString(),
1697 replacement->ToCString());
1698 }
1699 defn->ReplaceWith(replacement, nullptr);
1700 }
1701}
1702
1704 if (defn->IsConstant() && defn->AsConstant()->IsSmi()) {
1705 return FromConstant(Smi::Cast(defn->AsConstant()->value()).Value() + offs);
1706 }
1708 return RangeBoundary(kSymbol, reinterpret_cast<intptr_t>(defn), offs);
1709}
1710
1712 if (IsConstant()) return *this;
1713 return Add(Range::ConstantMinSmi(symbol()->range()),
1715}
1716
1718 if (IsConstant()) return *this;
1719
1720 return Add(Range::ConstantMaxSmi(symbol()->range()),
1722}
1723
1725 const RangeBoundary& b) {
1726 ASSERT(a.IsConstant() && b.IsConstant());
1727 return Utils::WillAddOverflow(a.ConstantValue(), b.ConstantValue());
1728}
1729
1731 const RangeBoundary& b) {
1732 ASSERT(a.IsConstant() && b.IsConstant());
1734 const int64_t result = a.ConstantValue() + b.ConstantValue();
1736}
1737
1739 const RangeBoundary& b) {
1740 ASSERT(a.IsConstant() && b.IsConstant());
1741 return Utils::WillSubOverflow(a.ConstantValue(), b.ConstantValue());
1742}
1743
1745 const RangeBoundary& b) {
1746 ASSERT(a.IsConstant() && b.IsConstant());
1748 const int64_t result = a.ConstantValue() - b.ConstantValue();
1750}
1751
1753 const RangeBoundary& b,
1755 if (a.IsSymbol() && b.IsConstant()) {
1756 if (Utils::WillAddOverflow(a.offset(), b.ConstantValue())) {
1757 return false;
1758 }
1759
1760 const int64_t offset = a.offset() + b.ConstantValue();
1761
1763 return false;
1764 }
1765
1767 return true;
1768 } else if (b.IsSymbol() && a.IsConstant()) {
1769 return SymbolicAdd(b, a, result);
1770 }
1771 return false;
1772}
1773
1775 const RangeBoundary& b,
1777 if (a.IsSymbol() && b.IsConstant()) {
1778 if (Utils::WillSubOverflow(a.offset(), b.ConstantValue())) {
1779 return false;
1780 }
1781
1782 const int64_t offset = a.offset() - b.ConstantValue();
1783
1785 return false;
1786 }
1787
1789 return true;
1790 }
1791 return false;
1792}
1793
1794bool RangeBoundary::Equals(const RangeBoundary& other) const {
1795 if (IsConstant() && other.IsConstant()) {
1796 return ConstantValue() == other.ConstantValue();
1797 } else if (IsSymbol() && other.IsSymbol()) {
1798 return (offset() == other.offset()) && DependOnSameSymbol(*this, other);
1799 } else if (IsUnknown() && other.IsUnknown()) {
1800 return true;
1801 }
1802 return false;
1803}
1804
1806 int64_t shift_count) {
1807 ASSERT(value_boundary.IsConstant());
1808 ASSERT(shift_count >= 0);
1809 const int64_t value = value_boundary.ConstantValue();
1810 if (value == 0) {
1811 return false;
1812 }
1813 return (shift_count >= kBitsPerInt64) ||
1814 !Utils::IsInt(static_cast<int>(kBitsPerInt64 - shift_count), value);
1815}
1816
1818 int64_t shift_count) {
1819 ASSERT(value_boundary.IsConstant());
1820 ASSERT(!WillShlOverflow(value_boundary, shift_count));
1821 ASSERT(shift_count >= 0);
1822 int64_t value = value_boundary.ConstantValue();
1823
1824 if (value == 0) {
1826 } else {
1827 // Result stays in 64 bit range.
1828 const int64_t result = static_cast<uint64_t>(value) << shift_count;
1829 return RangeBoundary(result);
1830 }
1831}
1832
1834 int64_t shift_count) {
1835 ASSERT(value_boundary.IsConstant());
1836 ASSERT(shift_count >= 0);
1837 const int64_t value = static_cast<int64_t>(value_boundary.ConstantValue());
1838 const int64_t result = (shift_count <= 63)
1839 ? (value >> shift_count)
1840 : (value >= 0 ? 0 : -1); // Dart semantics
1841 return RangeBoundary(result);
1842}
1843
1845 const RangeBoundary& overflow) {
1846 if (a.IsConstant()) {
1847 return a;
1848 }
1849
1850 int64_t offset = a.offset();
1851 Definition* symbol = a.symbol();
1852
1853 bool changed;
1854 do {
1855 changed = false;
1856 if (symbol->IsConstraint()) {
1857 symbol = symbol->AsConstraint()->value()->definition();
1858 changed = true;
1859 } else if (symbol->IsBinarySmiOp()) {
1860 BinarySmiOpInstr* op = symbol->AsBinarySmiOp();
1861 Definition* left = op->left()->definition();
1862 Definition* right = op->right()->definition();
1863 switch (op->op_kind()) {
1864 case Token::kADD:
1865 if (right->IsConstant()) {
1866 int64_t rhs = Smi::Cast(right->AsConstant()->value()).Value();
1867 if (Utils::WillAddOverflow(offset, rhs)) {
1868 return overflow;
1869 }
1870 offset += rhs;
1871 symbol = left;
1872 changed = true;
1873 } else if (left->IsConstant()) {
1874 int64_t rhs = Smi::Cast(left->AsConstant()->value()).Value();
1875 if (Utils::WillAddOverflow(offset, rhs)) {
1876 return overflow;
1877 }
1878 offset += rhs;
1879 symbol = right;
1880 changed = true;
1881 }
1882 break;
1883
1884 case Token::kSUB:
1885 if (right->IsConstant()) {
1886 int64_t rhs = Smi::Cast(right->AsConstant()->value()).Value();
1887 if (Utils::WillSubOverflow(offset, rhs)) {
1888 return overflow;
1889 }
1890 offset -= rhs;
1891 symbol = left;
1892 changed = true;
1893 }
1894 break;
1895
1896 default:
1897 break;
1898 }
1899 }
1900 } while (changed);
1901
1903 return overflow;
1904 }
1905
1906 return RangeBoundary::FromDefinition(symbol, offset);
1907}
1908
1910 if (!a->IsSymbol()) return false;
1911
1912 Range* range = a->symbol()->range();
1913 if ((range == nullptr) || !range->max().IsSymbol()) return false;
1914
1915 if (Utils::WillAddOverflow(range->max().offset(), a->offset())) {
1917 return true;
1918 }
1919
1920 const int64_t offset = range->max().offset() + a->offset();
1921
1924 return true;
1925 }
1926
1930
1931 return true;
1932}
1933
1935 if (!a->IsSymbol()) return false;
1936
1937 Range* range = a->symbol()->range();
1938 if ((range == nullptr) || !range->min().IsSymbol()) return false;
1939
1940 if (Utils::WillAddOverflow(range->min().offset(), a->offset())) {
1942 return true;
1943 }
1944
1945 const int64_t offset = range->min().offset() + a->offset();
1946
1949 return true;
1950 }
1951
1955
1956 return true;
1957}
1958
1959typedef bool (*BoundaryOp)(RangeBoundary*);
1960
1963 BoundaryOp op,
1964 const RangeBoundary& overflow) {
1965 if (!a->IsSymbol() || !b->IsSymbol()) {
1966 return false;
1967 }
1968
1969 RangeBoundary canonical_a = *a;
1970 RangeBoundary canonical_b = *b;
1971
1972 do {
1973 if (DependOnSameSymbol(canonical_a, canonical_b)) {
1974 *a = canonical_a;
1975 *b = canonical_b;
1976 return true;
1977 }
1978 } while (op(&canonical_a) || op(&canonical_b));
1979
1980 return false;
1981}
1982
1986 if (a.Equals(b)) {
1987 return b;
1988 }
1989
1992 return (a.offset() <= b.offset()) ? a : b;
1993 }
1994
1995 const int64_t inf_a = a.LowerBound(size);
1996 const int64_t inf_b = b.LowerBound(size);
1997 const int64_t sup_a = a.UpperBound(size);
1998 const int64_t sup_b = b.UpperBound(size);
1999
2000 if ((sup_a <= inf_b) && !a.LowerBound().Overflowed(size)) {
2001 return a;
2002 } else if ((sup_b <= inf_a) && !b.LowerBound().Overflowed(size)) {
2003 return b;
2004 } else {
2005 return RangeBoundary::FromConstant(Utils::Minimum(inf_a, inf_b));
2006 }
2007}
2008
2012 if (a.Equals(b)) {
2013 return b;
2014 }
2015
2019 return (a.offset() >= b.offset()) ? a : b;
2020 }
2021
2022 const int64_t inf_a = a.LowerBound(size);
2023 const int64_t inf_b = b.LowerBound(size);
2024 const int64_t sup_a = a.UpperBound(size);
2025 const int64_t sup_b = b.UpperBound(size);
2026
2027 if ((sup_a <= inf_b) && !b.UpperBound().Overflowed(size)) {
2028 return b;
2029 } else if ((sup_b <= inf_a) && !a.UpperBound().Overflowed(size)) {
2030 return a;
2031 } else {
2032 return RangeBoundary::FromConstant(Utils::Maximum(sup_a, sup_b));
2033 }
2034}
2035
2037 ASSERT(!a.IsUnknown() && !b.IsUnknown());
2038
2039 if (a.Equals(b)) {
2040 return a;
2041 }
2042
2043 if (a.IsConstant() && b.IsConstant()) {
2044 return RangeBoundary(Utils::Maximum(a.ConstantValue(), b.ConstantValue()));
2045 }
2046
2047 if (a.IsMinimumOrBelow(RangeBoundary::kRangeBoundarySmi)) {
2048 return b;
2049 } else if (b.IsMinimumOrBelow(RangeBoundary::kRangeBoundarySmi)) {
2050 return a;
2051 }
2052
2056 return (a.offset() >= b.offset()) ? a : b;
2057 }
2058
2059 const int64_t inf_a = a.SmiLowerBound();
2060 const int64_t inf_b = b.SmiLowerBound();
2061
2062 return (inf_a >= inf_b) ? a : b;
2063}
2064
2066 ASSERT(!a.IsUnknown() && !b.IsUnknown());
2067
2068 if (a.Equals(b)) {
2069 return a;
2070 }
2071
2072 if (a.IsConstant() && b.IsConstant()) {
2073 return RangeBoundary(Utils::Minimum(a.ConstantValue(), b.ConstantValue()));
2074 }
2075
2076 if (a.IsMaximumOrAbove(RangeBoundary::kRangeBoundarySmi)) {
2077 return b;
2078 } else if (b.IsMaximumOrAbove(RangeBoundary::kRangeBoundarySmi)) {
2079 return a;
2080 }
2081
2085 return (a.offset() <= b.offset()) ? a : b;
2086 }
2087
2088 const int64_t sup_a = a.SmiUpperBound();
2089 const int64_t sup_b = b.SmiUpperBound();
2090
2091 return (sup_a <= sup_b) ? a : b;
2092}
2093
2095 ASSERT(IsConstant());
2096 return value_;
2097}
2098
2100 switch (r) {
2101 case kTagged:
2103 case kUnboxedInt8:
2105 case kUnboxedInt16:
2106 case kUnboxedUint8: // Overapproximate Uint8 as Int16.
2108 case kUnboxedInt32:
2109 case kUnboxedUint16: // Overapproximate Uint16 as Int32.
2111 case kUnboxedInt64:
2112 case kUnboxedUint32: // Overapproximate Uint32 as Int64.
2114 default:
2115 UNREACHABLE();
2117 }
2118}
2119
2124}
2125
2126bool Range::IsPositive() const {
2127 return OnlyGreaterThanOrEqualTo(0);
2128}
2129
2130bool Range::IsNegative() const {
2131 return OnlyLessThanOrEqualTo(-1);
2132}
2133
2134bool Range::OnlyLessThanOrEqualTo(int64_t val) const {
2135 const RangeBoundary upper_bound = max().UpperBound();
2136 return upper_bound.ConstantValue() <= val;
2137}
2138
2139bool Range::OnlyGreaterThanOrEqualTo(int64_t val) const {
2140 const RangeBoundary lower_bound = min().LowerBound();
2141 return lower_bound.ConstantValue() >= val;
2142}
2143
2144// Inclusive.
2145bool Range::IsWithin(int64_t min_int, int64_t max_int) const {
2146 return OnlyGreaterThanOrEqualTo(min_int) && OnlyLessThanOrEqualTo(max_int);
2147}
2148
2149bool Range::IsWithin(const Range* other) const {
2150 auto const lower_bound = other->min().LowerBound();
2151 auto const upper_bound = other->max().UpperBound();
2152 return IsWithin(other->min().ConstantValue(), other->max().ConstantValue());
2153}
2154
2155bool Range::Overlaps(int64_t min_int, int64_t max_int) const {
2157 RangeBoundary upper = max().UpperBound();
2158 const int64_t this_min = lower.ConstantValue();
2159 const int64_t this_max = upper.ConstantValue();
2160 if ((this_min <= min_int) && (min_int <= this_max)) return true;
2161 if ((this_min <= max_int) && (max_int <= this_max)) return true;
2162 if ((min_int < this_min) && (max_int > this_max)) return true;
2163 return false;
2164}
2165
2167 // Constant case: For example [0, -1].
2168 if (Range::ConstantMin(this).ConstantValue() >
2169 Range::ConstantMax(this).ConstantValue()) {
2170 return true;
2171 }
2172 // Symbol case: For example [v+1, v].
2173 return DependOnSameSymbol(min(), max()) && min().offset() > max().offset();
2174}
2175
2177 min_ = min_.Clamp(size);
2178 max_ = max_.Clamp(size);
2179}
2180
2182 min_ = min_.LowerBound().Clamp(size);
2183 max_ = max_.UpperBound().Clamp(size);
2184}
2185
2186void Range::Shl(const Range* left,
2187 const Range* right,
2188 RangeBoundary* result_min,
2189 RangeBoundary* result_max) {
2190 ASSERT(left != nullptr);
2191 ASSERT(right != nullptr);
2192 ASSERT(result_min != nullptr);
2193 ASSERT(result_max != nullptr);
2194 RangeBoundary left_max = Range::ConstantMax(left);
2195 RangeBoundary left_min = Range::ConstantMin(left);
2196 // A negative shift count always deoptimizes (and throws), so the minimum
2197 // shift count is zero.
2198 int64_t right_max = Utils::Maximum(Range::ConstantMax(right).ConstantValue(),
2199 static_cast<int64_t>(0));
2200 int64_t right_min = Utils::Maximum(Range::ConstantMin(right).ConstantValue(),
2201 static_cast<int64_t>(0));
2202 bool overflow = false;
2203 {
2204 const auto shift_amount =
2205 left_min.ConstantValue() > 0 ? right_min : right_max;
2206 if (RangeBoundary::WillShlOverflow(left_min, shift_amount)) {
2207 overflow = true;
2208 } else {
2209 *result_min = RangeBoundary::Shl(left_min, shift_amount);
2210 }
2211 }
2212 {
2213 const auto shift_amount =
2214 left_max.ConstantValue() > 0 ? right_max : right_min;
2215 if (RangeBoundary::WillShlOverflow(left_max, shift_amount)) {
2216 overflow = true;
2217 } else {
2218 *result_max = RangeBoundary::Shl(left_max, shift_amount);
2219 }
2220 }
2221 if (overflow) {
2222 *result_min =
2224 *result_max =
2226 }
2227}
2228
2229void Range::Shr(const Range* left,
2230 const Range* right,
2231 RangeBoundary* result_min,
2232 RangeBoundary* result_max) {
2233 RangeBoundary left_max = Range::ConstantMax(left);
2234 RangeBoundary left_min = Range::ConstantMin(left);
2235 // A negative shift count always deoptimizes (and throws), so the minimum
2236 // shift count is zero.
2237 int64_t right_max = Utils::Maximum(Range::ConstantMax(right).ConstantValue(),
2238 static_cast<int64_t>(0));
2239 int64_t right_min = Utils::Maximum(Range::ConstantMin(right).ConstantValue(),
2240 static_cast<int64_t>(0));
2241
2242 *result_min = RangeBoundary::Shr(
2243 left_min, left_min.ConstantValue() > 0 ? right_max : right_min);
2244
2245 *result_max = RangeBoundary::Shr(
2246 left_max, left_max.ConstantValue() > 0 ? right_min : right_max);
2247}
2248
2249static void ConvertRangeToUnsigned(int64_t a,
2250 int64_t b,
2251 uint64_t* ua,
2252 uint64_t* ub) {
2253 ASSERT(a <= b);
2254 if ((a < 0) && (b >= 0)) {
2255 // Range contains -1 and 0 and wraps-around as unsigned.
2256 *ua = 0;
2257 *ub = kMaxUint64;
2258 } else {
2259 // Range is fully in the negative or non-negative part
2260 // and doesn't wrap-around if interpreted as unsigned.
2261 *ua = static_cast<uint64_t>(a);
2262 *ub = static_cast<uint64_t>(b);
2263 }
2264}
2265
2266static void ConvertRangeToSigned(uint64_t a,
2267 uint64_t b,
2268 int64_t* sa,
2269 int64_t* sb) {
2270 ASSERT(a <= b);
2271 if ((a <= static_cast<uint64_t>(kMaxInt64)) &&
2272 (b >= static_cast<uint64_t>(kMinInt64))) {
2273 // Range contains kMinInt64 and kMaxInt64 and wraps-around as signed.
2274 *sa = kMinInt64;
2275 *sb = kMaxInt64;
2276 } else {
2277 // Range is fully in the negative or non-negative part
2278 // and doesn't wrap-around if interpreted as signed.
2279 *sa = static_cast<int64_t>(a);
2280 *sb = static_cast<int64_t>(b);
2281 }
2282}
2283
2284void Range::Ushr(const Range* left,
2285 const Range* right,
2286 RangeBoundary* result_min,
2287 RangeBoundary* result_max) {
2288 const int64_t left_max = Range::ConstantMax(left).ConstantValue();
2289 const int64_t left_min = Range::ConstantMin(left).ConstantValue();
2290 // A negative shift count always deoptimizes (and throws), so the minimum
2291 // shift count is zero.
2292 const int64_t right_max = Utils::Maximum(
2293 Range::ConstantMax(right).ConstantValue(), static_cast<int64_t>(0));
2294 const int64_t right_min = Utils::Maximum(
2295 Range::ConstantMin(right).ConstantValue(), static_cast<int64_t>(0));
2296
2297 uint64_t unsigned_left_min, unsigned_left_max;
2298 ConvertRangeToUnsigned(left_min, left_max, &unsigned_left_min,
2299 &unsigned_left_max);
2300
2301 const uint64_t unsigned_result_min =
2302 (right_max >= kBitsPerInt64)
2303 ? 0
2304 : unsigned_left_min >> static_cast<uint64_t>(right_max);
2305 const uint64_t unsigned_result_max =
2306 (right_min >= kBitsPerInt64)
2307 ? 0
2308 : unsigned_left_max >> static_cast<uint64_t>(right_min);
2309
2310 int64_t signed_result_min, signed_result_max;
2311 ConvertRangeToSigned(unsigned_result_min, unsigned_result_max,
2312 &signed_result_min, &signed_result_max);
2313
2314 *result_min = RangeBoundary(signed_result_min);
2315 *result_max = RangeBoundary(signed_result_max);
2316}
2317
2318void Range::And(const Range* left_range,
2319 const Range* right_range,
2320 RangeBoundary* result_min,
2321 RangeBoundary* result_max) {
2322 ASSERT(left_range != nullptr);
2323 ASSERT(right_range != nullptr);
2324 ASSERT(result_min != nullptr);
2325 ASSERT(result_max != nullptr);
2326
2327 if (Range::ConstantMin(right_range).ConstantValue() >= 0) {
2328 *result_min = RangeBoundary::FromConstant(0);
2329 *result_max = Range::ConstantMax(right_range);
2330 return;
2331 }
2332
2333 if (Range::ConstantMin(left_range).ConstantValue() >= 0) {
2334 *result_min = RangeBoundary::FromConstant(0);
2335 *result_max = Range::ConstantMax(left_range);
2336 return;
2337 }
2338
2339 BitwiseOp(left_range, right_range, result_min, result_max);
2340}
2341
2342static int BitSize(const Range* range) {
2343 const int64_t min = Range::ConstantMin(range).ConstantValue();
2344 const int64_t max = Range::ConstantMax(range).ConstantValue();
2346}
2347
2348void Range::BitwiseOp(const Range* left_range,
2349 const Range* right_range,
2350 RangeBoundary* result_min,
2351 RangeBoundary* result_max) {
2352 const int bitsize = Utils::Maximum(BitSize(left_range), BitSize(right_range));
2353
2354 if (left_range->IsPositive() && right_range->IsPositive()) {
2355 *result_min = RangeBoundary::FromConstant(0);
2356 } else {
2357 *result_min =
2358 RangeBoundary::FromConstant(-(static_cast<uint64_t>(1) << bitsize));
2359 }
2360
2361 *result_max =
2362 RangeBoundary::FromConstant((static_cast<uint64_t>(1) << bitsize) - 1);
2363}
2364
2365void Range::Add(const Range* left_range,
2366 const Range* right_range,
2367 RangeBoundary* result_min,
2368 RangeBoundary* result_max,
2369 Definition* left_defn) {
2370 ASSERT(left_range != nullptr);
2371 ASSERT(right_range != nullptr);
2372 ASSERT(result_min != nullptr);
2373 ASSERT(result_max != nullptr);
2374
2375 RangeBoundary left_min = Definition::IsArrayLength(left_defn)
2377 : left_range->min();
2378
2379 RangeBoundary left_max = Definition::IsArrayLength(left_defn)
2381 : left_range->max();
2382
2383 bool overflow = false;
2384 if (!RangeBoundary::SymbolicAdd(left_min, right_range->min(), result_min)) {
2385 const auto left_min_bound = left_range->min().LowerBound();
2386 const auto right_min_bound = right_range->min().LowerBound();
2387 if (RangeBoundary::WillAddOverflow(left_min_bound, right_min_bound)) {
2388 overflow = true;
2389 } else {
2390 *result_min = RangeBoundary::Add(left_min_bound, right_min_bound);
2391 }
2392 }
2393 if (!RangeBoundary::SymbolicAdd(left_max, right_range->max(), result_max)) {
2394 const auto left_max_bound = left_range->max().UpperBound();
2395 const auto right_max_bound = right_range->max().UpperBound();
2396 if (RangeBoundary::WillAddOverflow(left_max_bound, right_max_bound)) {
2397 overflow = true;
2398 } else {
2399 *result_max = RangeBoundary::Add(left_max_bound, right_max_bound);
2400 }
2401 }
2402 if (overflow) {
2403 *result_min =
2405 *result_max =
2407 }
2408}
2409
2410void Range::Sub(const Range* left_range,
2411 const Range* right_range,
2412 RangeBoundary* result_min,
2413 RangeBoundary* result_max,
2414 Definition* left_defn) {
2415 ASSERT(left_range != nullptr);
2416 ASSERT(right_range != nullptr);
2417 ASSERT(result_min != nullptr);
2418 ASSERT(result_max != nullptr);
2419
2420 RangeBoundary left_min = Definition::IsArrayLength(left_defn)
2422 : left_range->min();
2423
2424 RangeBoundary left_max = Definition::IsArrayLength(left_defn)
2426 : left_range->max();
2427
2428 bool overflow = false;
2429 if (!RangeBoundary::SymbolicSub(left_min, right_range->max(), result_min)) {
2430 const auto left_min_bound = left_range->min().LowerBound();
2431 const auto right_max_bound = right_range->max().UpperBound();
2432 if (RangeBoundary::WillSubOverflow(left_min_bound, right_max_bound)) {
2433 overflow = true;
2434 } else {
2435 *result_min = RangeBoundary::Sub(left_min_bound, right_max_bound);
2436 }
2437 }
2438 if (!RangeBoundary::SymbolicSub(left_max, right_range->min(), result_max)) {
2439 const auto left_max_bound = left_range->max().UpperBound();
2440 const auto right_min_bound = right_range->min().LowerBound();
2441 if (RangeBoundary::WillSubOverflow(left_max_bound, right_min_bound)) {
2442 overflow = true;
2443 } else {
2444 *result_max = RangeBoundary::Sub(left_max_bound, right_min_bound);
2445 }
2446 }
2447 if (overflow) {
2448 *result_min =
2450 *result_max =
2452 }
2453}
2454
2455void Range::Mul(const Range* left_range,
2456 const Range* right_range,
2457 RangeBoundary* result_min,
2458 RangeBoundary* result_max) {
2459 ASSERT(left_range != nullptr);
2460 ASSERT(right_range != nullptr);
2461 ASSERT(result_min != nullptr);
2462 ASSERT(result_max != nullptr);
2463
2464 const int64_t left_max = ConstantAbsMax(left_range);
2465 const int64_t right_max = ConstantAbsMax(right_range);
2466 if ((left_max <= -compiler::target::kSmiMin) &&
2467 (right_max <= -compiler::target::kSmiMin) &&
2468 ((left_max == 0) || (right_max <= kMaxInt64 / left_max))) {
2469 // Product of left and right max values stays in 64 bit range.
2470 const int64_t mul_max = left_max * right_max;
2471 if (OnlyPositiveOrZero(*left_range, *right_range) ||
2472 OnlyNegativeOrZero(*left_range, *right_range)) {
2473 // If both ranges are of the same sign then the range of the result
2474 // is positive and is between multiplications of absolute minimums
2475 // and absolute maximums.
2476 const int64_t mul_min =
2477 ConstantAbsMin(left_range) * ConstantAbsMin(right_range);
2478 *result_min = RangeBoundary::FromConstant(mul_min);
2479 *result_max = RangeBoundary::FromConstant(mul_max);
2480 } else {
2481 // If ranges have mixed signs then use conservative approximation:
2482 // absolute value of the result is less or equal to multiplication
2483 // of absolute maximums.
2484 *result_min = RangeBoundary::FromConstant(-mul_max);
2485 *result_max = RangeBoundary::FromConstant(mul_max);
2486 }
2487 return;
2488 }
2489
2492}
2493
2494void Range::TruncDiv(const Range* left_range,
2495 const Range* right_range,
2496 RangeBoundary* result_min,
2497 RangeBoundary* result_max) {
2498 ASSERT(left_range != nullptr);
2499 ASSERT(right_range != nullptr);
2500 ASSERT(result_min != nullptr);
2501 ASSERT(result_max != nullptr);
2502
2503 if (left_range->OnlyGreaterThanOrEqualTo(0) &&
2504 right_range->OnlyGreaterThanOrEqualTo(1)) {
2505 const int64_t left_max = ConstantAbsMax(left_range);
2506 const int64_t left_min = ConstantAbsMin(left_range);
2507 const int64_t right_max = ConstantAbsMax(right_range);
2508 const int64_t right_min = ConstantAbsMin(right_range);
2509
2510 *result_max = RangeBoundary::FromConstant(left_max / right_min);
2511 *result_min = RangeBoundary::FromConstant(left_min / right_max);
2512 return;
2513 }
2514
2517}
2518
2519void Range::Mod(const Range* right_range,
2520 RangeBoundary* result_min,
2521 RangeBoundary* result_max) {
2522 ASSERT(right_range != nullptr);
2523 ASSERT(result_min != nullptr);
2524 ASSERT(result_max != nullptr);
2525 // Each modulo result is positive and bounded by one less than
2526 // the maximum of the right-hand-side (it is unlikely that the
2527 // left-hand-side further refines this in typical programs).
2528 // Note that x % MinInt can be MaxInt and x % 0 always throws.
2529 const int64_t kModMin = 0;
2530 int64_t mod_max = kMaxInt64;
2531 if (Range::ConstantMin(right_range).ConstantValue() != kMinInt64) {
2532 const int64_t right_max = ConstantAbsMax(right_range);
2533 mod_max = Utils::Maximum(right_max - 1, kModMin);
2534 }
2535 *result_min = RangeBoundary::FromConstant(kModMin);
2536 *result_max = RangeBoundary::FromConstant(mod_max);
2537}
2538
2539// Both the a and b ranges are >= 0.
2541 return a.OnlyGreaterThanOrEqualTo(0) && b.OnlyGreaterThanOrEqualTo(0);
2542}
2543
2544// Both the a and b ranges are <= 0.
2546 return a.OnlyLessThanOrEqualTo(0) && b.OnlyLessThanOrEqualTo(0);
2547}
2548
2549// Return the maximum absolute value included in range.
2550int64_t Range::ConstantAbsMax(const Range* range) {
2551 if (range == nullptr) {
2552 return RangeBoundary::kMax;
2553 }
2554 const int64_t abs_min =
2555 Utils::AbsWithSaturation(Range::ConstantMin(range).ConstantValue());
2556 const int64_t abs_max =
2557 Utils::AbsWithSaturation(Range::ConstantMax(range).ConstantValue());
2558 return Utils::Maximum(abs_min, abs_max);
2559}
2560
2561// Return the minimum absolute value included in range.
2562int64_t Range::ConstantAbsMin(const Range* range) {
2563 if (range == nullptr) {
2564 return 0;
2565 }
2566 const int64_t abs_min =
2567 Utils::AbsWithSaturation(Range::ConstantMin(range).ConstantValue());
2568 const int64_t abs_max =
2569 Utils::AbsWithSaturation(Range::ConstantMax(range).ConstantValue());
2570 return Utils::Minimum(abs_min, abs_max);
2571}
2572
2574 const Range* left_range,
2575 const Range* right_range,
2576 Definition* left_defn,
2577 Range* result) {
2578 ASSERT(left_range != nullptr);
2579 ASSERT(right_range != nullptr);
2580
2584
2585 switch (op) {
2586 case Token::kADD:
2587 Range::Add(left_range, right_range, &min, &max, left_defn);
2588 break;
2589
2590 case Token::kSUB:
2591 Range::Sub(left_range, right_range, &min, &max, left_defn);
2592 break;
2593
2594 case Token::kMUL:
2595 Range::Mul(left_range, right_range, &min, &max);
2596 break;
2597
2598 case Token::kTRUNCDIV:
2599 Range::TruncDiv(left_range, right_range, &min, &max);
2600 break;
2601
2602 case Token::kMOD:
2603 Range::Mod(right_range, &min, &max);
2604 break;
2605
2606 case Token::kSHL:
2607 Range::Shl(left_range, right_range, &min, &max);
2608 break;
2609
2610 case Token::kSHR:
2611 Range::Shr(left_range, right_range, &min, &max);
2612 break;
2613
2614 case Token::kUSHR:
2615 Range::Ushr(left_range, right_range, &min, &max);
2616 break;
2617
2618 case Token::kBIT_AND:
2619 Range::And(left_range, right_range, &min, &max);
2620 break;
2621
2622 case Token::kBIT_XOR:
2623 case Token::kBIT_OR:
2624 Range::BitwiseOp(left_range, right_range, &min, &max);
2625 break;
2626
2627 default:
2628 *result =
2631 return;
2632 }
2633
2634 ASSERT(!min.IsUnknown() && !max.IsUnknown());
2635
2636 // Sanity: avoid [l, u] with constants l > u.
2637 ASSERT(!min.IsConstant() || !max.IsConstant() ||
2639
2640 *result = Range(min, max);
2641}
2642
2643void Definition::set_range(const Range& range) {
2644 if (range_ == nullptr) {
2645 range_ = new Range();
2646 }
2647 *range_ = range;
2648}
2649
2651 if (Type()->ToCid() == kSmiCid) {
2653 } else if (IsInt64Definition()) {
2655 } else if (IsInt32Definition()) {
2657 } else if (Type()->IsInt()) {
2659 } else {
2660 // Only Smi and Mint supported.
2661 FATAL("Unsupported type in: %s", ToCString());
2662 }
2663
2664 // If the representation also gives us range information, then refine
2665 // the range from the type by using the intersection of the two.
2668 }
2669}
2670
2671static bool DependsOnSymbol(const RangeBoundary& a, Definition* symbol) {
2672 return a.IsSymbol() && (UnwrapConstraint(a.symbol()) == symbol);
2673}
2674
2675// Given the range and definition update the range so that
2676// it covers both original range and definitions range.
2677//
2678// The following should also hold:
2679//
2680// [_|_, _|_] U a = a U [_|_, _|_] = a
2681//
2682static void Join(Range* range,
2683 Definition* defn,
2684 const Range* defn_range,
2686 if (Range::IsUnknown(defn_range)) {
2687 return;
2688 }
2689
2690 if (Range::IsUnknown(range)) {
2691 *range = *defn_range;
2692 return;
2693 }
2694
2695 Range other = *defn_range;
2696
2697 // Handle patterns where range already depends on defn as a symbol:
2698 //
2699 // (..., S+o] U range(S) and [S+o, ...) U range(S)
2700 //
2701 // To improve precision of the computed join use [S, S] instead of
2702 // using range(S). It will be canonicalized away by JoinMin/JoinMax
2703 // functions.
2704 Definition* unwrapped = UnwrapConstraint(defn);
2705 if (DependsOnSymbol(range->min(), unwrapped) ||
2706 DependsOnSymbol(range->max(), unwrapped)) {
2707 other = Range(RangeBoundary::FromDefinition(defn, 0),
2709 }
2710
2711 // First try to compare ranges based on their upper and lower bounds.
2712 const int64_t inf_range = range->min().LowerBound(size);
2713 const int64_t inf_other = other.min().LowerBound(size);
2714 const int64_t sup_range = range->max().UpperBound(size);
2715 const int64_t sup_other = other.max().UpperBound(size);
2716
2717 if (sup_range <= inf_other) {
2718 // The range is fully below defn's range. Keep the minimum and
2719 // expand the maximum.
2720 range->set_max(other.max());
2721 } else if (sup_other <= inf_range) {
2722 // The range is fully above defn's range. Keep the maximum and
2723 // expand the minimum.
2724 range->set_min(other.min());
2725 } else {
2726 // Can't compare ranges as whole. Join minimum and maximum separately.
2727 *range = Range(RangeBoundary::JoinMin(range->min(), other.min(), size),
2728 RangeBoundary::JoinMax(range->max(), other.max(), size));
2729 }
2730}
2731
2732// A definition dominates a phi if its block dominates the phi's block
2733// and the two blocks are different.
2735 return a->Dominates(phi_block) && (a != phi_block);
2736}
2737
2738// When assigning range to a phi we must take care to avoid self-reference
2739// cycles when phi's range depends on the phi itself.
2740// To prevent such cases we impose additional restriction on symbols that
2741// can be used as boundaries for phi's range: they must dominate
2742// phi's definition.
2744 const RangeBoundary& a,
2745 const RangeBoundary& limit) {
2746 if (!a.IsSymbol() || DominatesPhi(a.symbol()->GetBlock(), phi_block)) {
2747 return a;
2748 }
2749
2750 // Symbol does not dominate phi. Try unwrapping constraint and check again.
2751 Definition* unwrapped = UnwrapConstraint(a.symbol());
2752 if ((unwrapped != a.symbol()) &&
2753 DominatesPhi(unwrapped->GetBlock(), phi_block)) {
2754 return RangeBoundary::FromDefinition(unwrapped, a.offset());
2755 }
2756
2757 return limit;
2758}
2759
2760static const Range* GetInputRange(RangeAnalysis* analysis,
2762 Value* input) {
2763 switch (size) {
2765 return analysis->GetSmiRange(input);
2769 return input->definition()->range();
2771 return analysis->GetIntRange(input);
2772 default:
2773 UNREACHABLE();
2774 return nullptr;
2775 }
2776}
2777
2780 for (intptr_t i = 0; i < InputCount(); i++) {
2781 Value* input = InputAt(i);
2782 Join(range, input->definition(), GetInputRange(analysis, size, input),
2783 size);
2784 }
2785
2786 BlockEntryInstr* phi_block = GetBlock();
2787 range->set_min(
2789 range->set_max(
2791}
2792
2794 if (value_.IsSmi()) {
2795 int64_t value = Smi::Cast(value_).Value();
2798 } else if (value_.IsMint()) {
2799 int64_t value = Mint::Cast(value_).value();
2802 } else {
2803 // Only Smi and Mint supported.
2804 FATAL("Unexpected constant: %s\n", value_.ToCString());
2805 }
2806}
2807
2809 const Range* value_range = analysis->GetSmiRange(value());
2810 if (Range::IsUnknown(value_range)) {
2811 return;
2812 }
2813
2814 // TODO(vegorov) check if precision of the analysis can be improved by
2815 // recognizing intersections of the form:
2816 //
2817 // (..., S+x] ^ [S+x, ...) = [S+x, S+x]
2818 //
2819 Range result = value_range->Intersect(constraint());
2820
2821 if (result.IsUnsatisfiable()) {
2822 return;
2823 }
2824
2825 *range = result;
2826}
2827
2829 switch (slot().kind()) {
2830 case Slot::Kind::kArray_length:
2831 case Slot::Kind::kGrowableObjectArray_length:
2832 *range = Range(
2835 break;
2836
2837 case Slot::Kind::kTypedDataBase_length:
2838 case Slot::Kind::kTypedDataView_offset_in_bytes:
2840 break;
2841
2842 case Slot::Kind::kAbstractType_hash:
2843 case Slot::Kind::kTypeArguments_hash:
2845 break;
2846
2847 case Slot::Kind::kTypeArguments_length:
2851 break;
2852
2853 case Slot::Kind::kRecord_shape:
2855 break;
2856
2857 case Slot::Kind::kString_length:
2858 *range = Range(
2861 break;
2862
2866 // Use default value.
2867 Definition::InferRange(analysis, range);
2868 break;
2869
2872#define NATIVE_SLOT_CASE(ClassName, __, FieldName, ___, ____) \
2873 case Slot::Kind::k##ClassName##_##FieldName:
2875#undef NATIVE_SLOT_CASE
2876 // Not an integer valued field.
2877 UNREACHABLE();
2878 break;
2879
2881 // Should not be used in LoadField instructions.
2882 UNREACHABLE();
2883 break;
2884
2885#define UNBOXED_NATIVE_SLOT_CASE(Class, __, Field, ___, ____) \
2886 case Slot::Kind::k##Class##_##Field:
2888#undef UNBOXED_NATIVE_SLOT_CASE
2889 *range = Range::Full(slot().representation());
2890 break;
2891
2892 case Slot::Kind::kClosure_hash:
2893 case Slot::Kind::kLinkedHashBase_hash_mask:
2894 case Slot::Kind::kLinkedHashBase_used_data:
2895 case Slot::Kind::kLinkedHashBase_deleted_keys:
2897 break;
2898
2899 case Slot::Kind::kArgumentsDescriptor_type_args_len:
2900 case Slot::Kind::kArgumentsDescriptor_positional_count:
2901 case Slot::Kind::kArgumentsDescriptor_count:
2902 case Slot::Kind::kArgumentsDescriptor_size:
2904 break;
2905 }
2906}
2907
2909 // Use the precise array element representation instead of the returned
2910 // representation to avoid overapproximating the range for small elements.
2911 auto const rep =
2914 *range = Range::Full(rep);
2915 } else {
2916 Definition::InferRange(analysis, range);
2917 }
2918}
2919
2921 ASSERT(kIllegalCid == 0);
2922 *lower = 1;
2923 *upper = kClassIdTagMax;
2924
2925 CompileType* ctype = object()->Type();
2926 intptr_t cid = ctype->ToCid();
2927 if (cid != kDynamicCid) {
2928 *lower = *upper = cid;
2929 } else if (CompilerState::Current().is_aot()) {
2930 IsolateGroup* isolate_group = IsolateGroup::Current();
2931 *upper = isolate_group->has_dynamically_extendable_classes()
2933 : isolate_group->class_table()->NumCids();
2934
2936 if (hi != nullptr) {
2937 const auto& type = *ctype->ToAbstractType();
2938 if (type.IsType() && !type.IsFutureOrType() &&
2940 const auto& type_class = Class::Handle(type.type_class());
2941 if (!type_class.has_dynamically_extendable_subtypes()) {
2942 const auto& ranges =
2943 hi->SubtypeRangesForClass(type_class, /*include_abstract=*/false,
2944 /*exclude_null=*/true);
2945 if (ranges.length() > 0) {
2946 *lower = ranges[0].cid_start;
2947 *upper = ranges[ranges.length() - 1].cid_end;
2948 }
2949 }
2950 }
2951 }
2952 }
2953}
2954
2956 uword lower, upper;
2957 InferRange(&lower, &upper);
2960}
2961
2965 // Take the number of loaded characters into account when determining the
2966 // range of the result.
2967 ASSERT(element_count_ > 0);
2968 switch (class_id()) {
2969 case kOneByteStringCid:
2970 ASSERT(element_count_ <= 4);
2971 *range = Range(zero, RangeBoundary::FromConstant(
2972 Utils::NBitMask(kBitsPerByte * element_count_)));
2973 break;
2974 case kTwoByteStringCid:
2975 ASSERT(element_count_ <= 2);
2977 2 * kBitsPerByte * element_count_)));
2978 break;
2979 default:
2980 UNREACHABLE();
2981 break;
2982 }
2983}
2984
2986 // The input bytes given to the Utf8Scan instruction are in non-negative Smi
2987 // range and so is the resulting computed length.
2989}
2990
2992 const intptr_t min = Utils::Minimum(if_true_, if_false_);
2993 const intptr_t max = Utils::Maximum(if_true_, if_false_);
2994 *range =
2996}
2997
2998void BinaryIntegerOpInstr::InferRangeHelper(const Range* left_range,
2999 const Range* right_range,
3000 Range* range) {
3001 // TODO(vegorov): canonicalize BinaryIntegerOp to always have constant on the
3002 // right and a non-constant on the left.
3003 if (Range::IsUnknown(left_range) || Range::IsUnknown(right_range)) {
3004 return;
3005 }
3006
3007 Range::BinaryOp(op_kind(), left_range, right_range, left()->definition(),
3008 range);
3010
3011 const RangeBoundary::RangeSize range_size =
3013
3014 // Calculate overflowed status before clamping if operation is
3015 // not truncating.
3016 if (!is_truncating()) {
3017 set_can_overflow(!range->Fits(range_size));
3018 }
3019
3020 range->Clamp(range_size);
3021}
3022
3023static void CacheRange(Range** slot,
3024 const Range* range,
3026 if (range != nullptr) {
3027 if (*slot == nullptr) {
3028 *slot = new Range();
3029 }
3030 **slot = *range;
3031
3032 // Eliminate any symbolic dependencies from the range information.
3033 (*slot)->ClampToConstant(size);
3034 } else if (*slot != nullptr) {
3035 **slot = Range(); // Clear cached range information.
3036 }
3037}
3038
3040 auto const left_size =
3042 auto const right_size =
3044 InferRangeHelper(GetInputRange(analysis, left_size, left()),
3045 GetInputRange(analysis, right_size, right()), range);
3046}
3047
3049 const Range* right_smi_range = analysis->GetSmiRange(right());
3050 // TODO(vegorov) completely remove this once GetSmiRange is eliminated.
3051 if (op_kind() == Token::kSHL || op_kind() == Token::kSHR ||
3052 op_kind() == Token::kUSHR || op_kind() == Token::kMOD ||
3053 op_kind() == Token::kTRUNCDIV) {
3054 CacheRange(&right_range_, right_smi_range,
3056 }
3057 InferRangeHelper(analysis->GetSmiRange(left()), right_smi_range, range);
3058}
3059
3061 const Range* right_range = RequiredInputRepresentation(1) == kTagged
3062 ? analysis->GetSmiRange(right())
3063 : right()->definition()->range();
3064 CacheRange(&shift_range_, right()->definition()->range(),
3066 InferRangeHelper(left()->definition()->range(), right_range, range);
3067}
3068
3070 const Range* value_range = value()->definition()->range();
3071 if (Range::IsUnknown(value_range)) {
3073 } else {
3074 ASSERT_VALID_RANGE_FOR_REPRESENTATION(value()->definition(), value_range,
3076 *range = *value_range;
3077 }
3078}
3079
3081 auto* const value_range = value()->Type()->ToCid() == kSmiCid
3082 ? analysis->GetSmiRange(value())
3083 : value()->definition()->range();
3084 const Range to_range = Range::Full(representation());
3085
3086 if (Range::IsUnknown(value_range)) {
3087 *range = to_range;
3088 } else if (value_range->IsWithin(&to_range)) {
3089 *range = *value_range;
3090 } else if (is_truncating()) {
3091 // If truncating, then in most cases any non-representable values means
3092 // no assumption can be made about the truncated value.
3093 *range = to_range;
3094 } else {
3095 // When not truncating, then unboxing deoptimizes if the value is outside
3096 // the range representation.
3097 *range = value_range->Intersect(&to_range);
3098 }
3100}
3101
3103 ASSERT(to() != kUntagged); // Not an integer-valued definition.
3106
3107 const Range* const value_range = value()->definition()->range();
3108 const Range to_range = Range::Full(to());
3109
3110 if (from() == kUntagged) {
3111 ASSERT(value_range == nullptr); // Not an integer-valued definition.
3112 *range = to_range;
3113 } else if (Range::IsUnknown(value_range)) {
3114 *range = to_range;
3115 } else if (RepresentationUtils::ValueSize(to()) >
3119 // All signed unboxed ints of larger sizes can represent all values for
3120 // signed or unsigned unboxed ints of smaller sizes, and all unsigned
3121 // unboxed ints of larger sizes can represent all values for unsigned
3122 // boxed ints of smaller sizes.
3123 *range = *value_range;
3124 } else if (is_truncating()) {
3125 // Either the bits are being reinterpreted (if the two representations
3126 // are the same size) or a larger value is being truncated. That means
3127 // we need to determine whether or not the value range lies within the
3128 // range of numbers that have the same representation (modulo truncation).
3129 const Range common_range = Range::Full(from()).Intersect(&to_range);
3130 if (value_range->IsWithin(&common_range)) {
3131 *range = *value_range;
3132 } else {
3133 // In most cases, if there are non-representable values, then no
3134 // assumptions can be made about the converted value.
3135 *range = to_range;
3136 }
3137 } else {
3138 // The conversion deoptimizes if the value is outside the range represented
3139 // by to(), so we can just take the intersection.
3140 *range = value_range->Intersect(&to_range);
3141 }
3142
3144}
3145
3147 const Range* value_range = value()->definition()->range();
3148 if (!Range::IsUnknown(value_range)) {
3149 *range = *value_range;
3150 } else {
3152 }
3153}
3154
3156 if (index->BindsToSmiConstant() && length->BindsToSmiConstant()) {
3157 const auto index_val = index->BoundSmiConstant();
3158 const auto length_val = length->BoundSmiConstant();
3159 return (0 <= index_val && index_val < length_val);
3160 }
3161
3162 // Range of the index is unknown can't decide if the check is redundant.
3163 Definition* index_defn = index->definition();
3164 Range* index_range = index_defn->range();
3165 if (index_range == nullptr) {
3166 if (!index->BindsToSmiConstant()) {
3167 return false;
3168 }
3169 // index_defn itself is not necessarily the constant.
3170 index_defn = index_defn->OriginalDefinition();
3171 Range range;
3172 index_defn->InferRange(nullptr, &range);
3173 ASSERT(!Range::IsUnknown(&range));
3174 index_defn->set_range(range);
3175 index_range = index_defn->range();
3176 }
3177
3178 // Range of the index is not positive. Check can't be redundant.
3179 if (Range::ConstantMinSmi(index_range).ConstantValue() < 0) {
3180 return false;
3181 }
3182
3184 RangeBoundary max_upper = max.UpperBound();
3185 RangeBoundary array_length =
3187 RangeBoundary length_lower = array_length.LowerBound();
3188 if (max_upper.OverflowedSmi() || length_lower.OverflowedSmi()) {
3189 return false;
3190 }
3191
3192 // Try to compare constant boundaries.
3193 if (max_upper.ConstantValue() < length_lower.ConstantValue()) {
3194 return true;
3195 }
3196
3197 RangeBoundary canonical_length = CanonicalizeBoundary(
3198 array_length,
3200 if (canonical_length.OverflowedSmi()) {
3201 return false;
3202 }
3203
3204 // Try symbolic comparison.
3205 do {
3206 if (DependOnSameSymbol(max, canonical_length)) {
3207 return max.offset() < canonical_length.offset();
3208 }
3209 } while (CanonicalizeMaxBoundary(&max) ||
3210 CanonicalizeMinBoundary(&canonical_length));
3211
3212 // Failed to prove that maximum is bounded with array length.
3213 return false;
3214}
3215
3217 // First, try to prove redundancy with the results of range analysis.
3219 return true;
3220 } else if (!use_loops) {
3221 return false;
3222 }
3223 // Next, try to prove redundancy with the results of induction analysis.
3224 LoopInfo* loop = GetBlock()->loop_info();
3225 if (loop != nullptr) {
3226 return loop->IsInRange(this, index(), length());
3227 }
3228 return false;
3229}
3230
3231} // namespace dart
#define check(reporter, ref, unref, make, kill)
Definition: RefCntTest.cpp:85
#define UNREACHABLE()
Definition: assert.h:248
GLenum type
virtual void InferRange(RangeAnalysis *analysis, Range *range)
Value * value() const
Definition: il.h:4422
void Add(const T &value)
intptr_t length() const
static bool IsSupported(Token::Kind op_kind, Value *left, Value *right)
Definition: il.h:9497
void set_can_overflow(bool overflow)
Definition: il.h:9401
Value * right() const
Definition: il.h:9398
virtual void InferRange(RangeAnalysis *analysis, Range *range)
Token::Kind op_kind() const
Definition: il.h:9396
Value * left() const
Definition: il.h:9397
static BinaryIntegerOpInstr * Make(Representation representation, Token::Kind op_kind, Value *left, Value *right, intptr_t deopt_id, SpeculativeMode speculative_mode=kGuardInputs)
Definition: il.cc:2293
bool is_truncating() const
Definition: il.h:9406
virtual intptr_t DeoptimizationTarget() const
Definition: il.h:9424
virtual void InferRange(RangeAnalysis *analysis, Range *range)
static bool IsSupported(Token::Kind op_kind)
Definition: il.h:9566
void Add(intptr_t i)
Definition: bit_vector.h:63
bool Contains(intptr_t i) const
Definition: bit_vector.h:91
intptr_t block_id() const
Definition: il.h:1661
LoopInfo * loop_info() const
Definition: il.h:1737
GrowableArray< Definition * > * initial_definitions()
Definition: il.h:1917
bool Done() const
Definition: flow_graph.h:46
static void RemoveGeneralizedCheck(CheckArrayBoundInstr *check)
BoundsCheckGeneralizer(RangeAnalysis *range_analysis, FlowGraph *flow_graph)
void TryGeneralize(CheckArrayBoundInstr *check)
Value * value() const
Definition: il.h:8528
Representation from_representation() const
Definition: il.h:8529
virtual void InferRange(RangeAnalysis *analysis, Range *range)
Value * index() const
Definition: il.h:10797
Value * length() const
Definition: il.h:10796
bool IsRedundant(bool use_loops=false)
intptr_t NumCids() const
Definition: class_table.h:447
const AbstractType * ToAbstractType()
bool is_aot() const
static CompilerState & Current()
const Object & value() const
Definition: il.h:4230
virtual void InferRange(RangeAnalysis *analysis, Range *range)
virtual void InferRange(RangeAnalysis *analysis, Range *range)
Value * value() const
Definition: il.h:4192
Range * constraint() const
Definition: il.h:4193
bool IsInt64Definition()
Definition: il.h:8884
static bool IsArrayLength(Definition *def)
Definition: il.cc:585
void ReplaceWith(Definition *other, ForwardInstructionIterator *iterator)
Definition: il.cc:1662
Range * range() const
Definition: il.h:2636
void set_range(const Range &)
CompileType * Type()
Definition: il.h:2521
bool IsInt32Definition()
Definition: il.h:2534
bool HasOnlyInputUse(Value *use) const
Definition: il.cc:1489
bool HasSSATemp() const
Definition: il.h:2508
Range * range_
Definition: il.h:2692
virtual void InferRange(RangeAnalysis *analysis, Range *range)
Definition * OriginalDefinition()
Definition: il.cc:532
friend class Value
Definition: il.h:2690
static constexpr intptr_t kNone
Definition: deopt_id.h:27
GraphEntryInstr * graph_entry() const
Definition: flow_graph.h:268
ConstantInstr * GetConstant(const Object &object, Representation representation=kTagged)
Definition: flow_graph.cc:187
bool should_print() const
Definition: flow_graph.h:503
Zone * zone() const
Definition: flow_graph.h:261
intptr_t current_ssa_temp_index() const
Definition: flow_graph.h:243
const LoopHierarchy & GetLoopHierarchy()
Definition: flow_graph.h:430
const Function & function() const
Definition: flow_graph.h:130
BlockIterator reverse_postorder_iterator() const
Definition: flow_graph.h:219
void InsertBefore(Instruction *next, Instruction *instr, Environment *env, UseKind use_kind)
Definition: flow_graph.h:312
static void RenameDominatedUses(Definition *def, Instruction *dom, Definition *other)
Definition: flow_graph.cc:2642
void InsertAfter(Instruction *prev, Instruction *instr, Environment *env, UseKind use_kind)
Definition: flow_graph.cc:273
const CidRangeVector & SubtypeRangesForClass(const Class &klass, bool include_abstract, bool exclude_null)
Definition: il.cc:110
virtual void InferRange(RangeAnalysis *analysis, Range *range)
static bool IsInvariant(const InductionVar *x)
Definition: loops.h:135
static bool IsLinear(const InductionVar *x)
Definition: loops.h:154
static bool NullIsAssignableTo(const AbstractType &other)
Definition: object.cc:20674
virtual intptr_t InputCount() const =0
virtual Value * InputAt(intptr_t i) const =0
virtual BlockEntryInstr * GetBlock()
Definition: il.cc:1352
@ kGuardInputs
Definition: il.h:972
@ kNotSpeculative
Definition: il.h:975
const char * ToCString() const
Definition: il_printer.cc:1683
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.h:1241
bool IsDominatedBy(Instruction *dom)
Definition: il.cc:1581
virtual Representation representation() const
Definition: il.h:1260
Value * value() const
Definition: il.h:11044
bool is_truncating() const
Definition: il.h:11048
virtual void InferRange(RangeAnalysis *analysis, Range *range)
Representation to() const
Definition: il.h:11047
Representation from() const
Definition: il.h:11046
IntegerInstructionSelector(FlowGraph *flow_graph)
bool has_dynamically_extendable_classes() const
Definition: isolate.h:720
static IsolateGroup * Current()
Definition: isolate.h:539
ClassTable * class_table() const
Definition: isolate.h:496
Value * object() const
Definition: il.h:8066
void InferRange(uword *lower, uword *upper)
virtual void InferRange(RangeAnalysis *analysis, Range *range)
intptr_t class_id() const
Definition: il.h:6899
virtual void InferRange(RangeAnalysis *analysis, Range *range)
const Slot & slot() const
Definition: il.h:8144
virtual Representation representation() const
Definition: il.cc:921
intptr_t class_id() const
Definition: il.h:6803
virtual void InferRange(RangeAnalysis *analysis, Range *range)
void ComputeInduction() const
Definition: loops.cc:1207
bool IsInRange(Instruction *pos, Value *index, Value *length)
Definition: loops.cc:1093
static Object & Handle()
Definition: object.h:407
static Object & ZoneHandle()
Definition: object.h:419
virtual void InferRange(RangeAnalysis *analysis, Range *range)
virtual BlockEntryInstr * GetBlock()
Definition: il.h:2816
void AssignRangesRecursively(Definition *defn)
const Range * GetIntRange(Value *value) const
static bool IsIntegerDefinition(Definition *defn)
const Range * GetSmiRange(Value *value) const
bool IsUnknown() const
bool Equals(const RangeBoundary &other) const
bool IsConstant() const
bool OverflowedSmi() const
static bool WillSubOverflow(const RangeBoundary &a, const RangeBoundary &b)
static RangeBoundary FromDefinition(Definition *defn, int64_t offs=0)
int64_t offset() const
static RangeBoundary MaxSmi()
RangeBoundary Clamp(RangeSize size) const
static bool SymbolicAdd(const RangeBoundary &a, const RangeBoundary &b, RangeBoundary *result)
static bool IsValidOffsetForSymbolicRangeBoundary(int64_t offset)
bool IsMaximumOrAbove(RangeSize size) const
static bool WillAddOverflow(const RangeBoundary &a, const RangeBoundary &b)
static RangeBoundary MinConstant(RangeSize size)
static RangeBoundary Add(const RangeBoundary &a, const RangeBoundary &b)
RangeBoundary UpperBound() const
static RangeBoundary JoinMax(RangeBoundary a, RangeBoundary b, RangeBoundary::RangeSize size)
RangeBoundary LowerBound() const
static bool SymbolicSub(const RangeBoundary &a, const RangeBoundary &b, RangeBoundary *result)
static RangeBoundary IntersectionMin(RangeBoundary a, RangeBoundary b)
static RangeBoundary MinSmi()
static RangeBoundary Shr(const RangeBoundary &value_boundary, int64_t shift_count)
static RangeBoundary IntersectionMax(RangeBoundary a, RangeBoundary b)
int64_t ConstantValue() const
static RangeBoundary JoinMin(RangeBoundary a, RangeBoundary b, RangeBoundary::RangeSize size)
static constexpr int64_t kMax
static RangeBoundary Sub(const RangeBoundary &a, const RangeBoundary &b)
bool IsSymbol() const
static RangeBoundary Shl(const RangeBoundary &value_boundary, int64_t shift_count)
static RangeBoundary MaxConstant(RangeSize size)
static bool WillShlOverflow(const RangeBoundary &a, int64_t shift_count)
bool IsMinimumOrBelow(RangeSize size) const
static RangeBoundary FromConstant(int64_t val)
Definition * symbol() const
static bool Fits(Range *range, RangeBoundary::RangeSize size)
static bool IsWithin(const Range *range, int64_t min, int64_t max)
static bool IsPositive(Range *range)
static RangeBoundary ConstantMinSmi(const Range *range)
static int64_t ConstantAbsMax(const Range *range)
static bool OnlyNegativeOrZero(const Range &a, const Range &b)
static void BitwiseOp(const Range *left_range, const Range *right_range, RangeBoundary *min, RangeBoundary *max)
bool IsPositive() const
static const char * ToCString(const Range *range)
void set_max(const RangeBoundary &value)
static void Shr(const Range *left_range, const Range *right_range, RangeBoundary *min, RangeBoundary *max)
static RangeBoundary ConstantMaxSmi(const Range *range)
bool IsWithin(int64_t min_int, int64_t max_int) const
Range Intersect(const Range *other) const
void ClampToConstant(RangeBoundary::RangeSize size)
static void Mul(const Range *left_range, const Range *right_range, RangeBoundary *min, RangeBoundary *max)
bool IsNegative() const
static bool OnlyPositiveOrZero(const Range &a, const Range &b)
static bool IsUnknown(const Range *other)
static void TruncDiv(const Range *left_range, const Range *right_range, RangeBoundary *min, RangeBoundary *max)
static void Ushr(const Range *left_range, const Range *right_range, RangeBoundary *min, RangeBoundary *max)
const RangeBoundary & min() const
bool IsUnsatisfiable() const
bool Fits(RangeBoundary::RangeSize size) const
static void BinaryOp(const Token::Kind op, const Range *left_range, const Range *right_range, Definition *left_defn, Range *result)
const RangeBoundary & max() const
static void Add(const Range *left_range, const Range *right_range, RangeBoundary *min, RangeBoundary *max, Definition *left_defn)
static void And(const Range *left_range, const Range *right_range, RangeBoundary *min, RangeBoundary *max)
static Range Full(RangeBoundary::RangeSize size)
static void Sub(const Range *left_range, const Range *right_range, RangeBoundary *min, RangeBoundary *max, Definition *left_defn)
void Clamp(RangeBoundary::RangeSize size)
static void Shl(const Range *left_range, const Range *right_range, RangeBoundary *min, RangeBoundary *max)
bool OnlyGreaterThanOrEqualTo(int64_t val) const
static RangeBoundary ConstantMin(const Range *range)
void set_min(const RangeBoundary &value)
static void Mod(const Range *right_range, RangeBoundary *min, RangeBoundary *max)
bool OnlyLessThanOrEqualTo(int64_t val) const
static RangeBoundary ConstantMax(const Range *range)
bool Overlaps(int64_t min_int, int64_t max_int) const
static int64_t ConstantAbsMin(const Range *range)
T * Emit(T *instruction, Instruction *post_dominator)
Scheduler(FlowGraph *flow_graph)
virtual void InferRange(RangeAnalysis *analysis, Range *range)
static SmiPtr New(intptr_t value)
Definition: object.h:10006
static bool IsValid(int64_t value)
Definition: object.h:10026
Zone * zone() const
Definition: thread_state.h:37
HierarchyInfo * hierarchy_info() const
Definition: thread.h:593
static Thread * Current()
Definition: thread.h:362
static Token::Kind FlipComparison(Token::Kind op)
Definition: token.h:352
static Token::Kind NegateComparison(Token::Kind op)
Definition: token.h:322
static const char * Str(Kind tok)
Definition: token.h:269
static bool IsSupported(Token::Kind op_kind)
Definition: il.h:9308
virtual Representation representation() const
Definition: il.h:8703
Value * value() const
Definition: il.h:8678
bool is_truncating() const
Definition: il.h:8772
virtual void InferRange(RangeAnalysis *analysis, Range *range)
virtual void InferRange(RangeAnalysis *analysis, Range *range)
static T AbsWithSaturation(T x)
Definition: utils.h:62
static bool IsInt(intptr_t N, T value)
Definition: utils.h:313
static constexpr T Maximum(T x, T y)
Definition: utils.h:41
static constexpr T NBitMask(size_t n)
Definition: utils.h:548
static bool WillSubOverflow(int64_t a, int64_t b)
Definition: utils.h:423
static T Minimum(T x, T y)
Definition: utils.h:36
static constexpr size_t BitLength(int64_t value)
Definition: utils.h:213
static bool WillAddOverflow(int64_t a, int64_t b)
Definition: utils.h:418
Definition: il.h:75
bool IsSingleUse() const
Definition: il.h:117
intptr_t BoundSmiConstant() const
Definition: il.cc:1212
bool BindsToSmiConstant() const
Definition: il.cc:1208
void set_definition(Definition *definition)
Definition: il.h:104
Value * CopyWithType(Zone *zone)
Definition: il.h:138
Definition * definition() const
Definition: il.h:103
CompileType * Type()
intptr_t InputCount() const
Definition: il.h:2794
Value * InputAt(intptr_t i) const
Definition: il.h:2795
char * MakeCopyOfString(const char *str)
Definition: zone.cc:270
static const word kMaxElements
Definition: runtime_api.h:594
static const word kMaxElements
Definition: runtime_api.h:783
#define THR_Print(format,...)
Definition: log.h:20
#define ASSERT(E)
static bool b
struct MyStruct a[10]
#define FATAL(error)
uint8_t value
GAsyncResult * result
uint32_t * target
constexpr bool FLAG_support_il_printer
Definition: flag_list.h:48
Dart_NativeFunction function
Definition: fuchsia.cc:51
static float max(float r, float g, float b)
Definition: hsl.cpp:49
static float min(float r, float g, float b)
Definition: hsl.cpp:48
size_t length
double x
constexpr word kSmiMin
Definition: runtime_api.h:306
bool IsSmi(int64_t v)
Definition: runtime_api.cc:31
constexpr word kSmiMax
Definition: runtime_api.h:305
Definition: dart_vm.cc:33
static void ConvertRangeToSigned(uint64_t a, uint64_t b, int64_t *sa, int64_t *sb)
constexpr int64_t kMaxInt64
Definition: globals.h:486
constexpr int64_t kMinInt64
Definition: globals.h:485
static void NarrowBinaryInt64Op(BinaryInt64OpInstr *int64_op)
static bool IsRedundantBasedOnRangeInformation(Value *index, Value *length)
static RangeBoundary NarrowMin(const Range *range, const Range *new_range, RangeBoundary::RangeSize size)
static bool AreEqualDefinitions(Definition *a, Definition *b)
static void ConvertRangeToUnsigned(int64_t a, int64_t b, uint64_t *ua, uint64_t *ub)
static void Join(Range *range, Definition *defn, const Range *defn_range, RangeBoundary::RangeSize size)
static const Range * GetInputRange(RangeAnalysis *analysis, RangeBoundary::RangeSize size, Value *input)
static RangeBoundary WidenMax(const Range *range, const Range *new_range, RangeBoundary::RangeSize size)
constexpr uint64_t kMaxUint64
Definition: globals.h:487
@ kIllegalCid
Definition: class_id.h:214
@ kDynamicCid
Definition: class_id.h:253
Representation
Definition: locations.h:66
static void CacheRange(Range **slot, const Range *range, RangeBoundary::RangeSize size)
constexpr uint32_t kMaxUint32
Definition: globals.h:484
constexpr intptr_t kBitsPerByte
Definition: globals.h:463
static bool CanonicalizeMaxBoundary(RangeBoundary *a)
uintptr_t uword
Definition: globals.h:501
static RangeBoundary EnsureAcyclicSymbol(BlockEntryInstr *phi_block, const RangeBoundary &a, const RangeBoundary &limit)
static bool CanonicalizeForComparison(RangeBoundary *a, RangeBoundary *b, BoundaryOp op, const RangeBoundary &overflow)
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const intptr_t cid
static Definition * UnwrapConstraint(Definition *defn)
static int BitSize(const Range *range)
static bool DominatesPhi(BlockEntryInstr *a, BlockEntryInstr *phi_block)
static RangeBoundary WidenMin(const Range *range, const Range *new_range, RangeBoundary::RangeSize size)
constexpr int32_t kMaxInt32
Definition: globals.h:483
static void NarrowShiftInt64Op(ShiftIntegerOpInstr *int64_op)
static RangeBoundary::RangeSize RepresentationToRangeSize(Representation r)
static RangeBoundary::RangeSize RangeSizeForPhi(Definition *phi)
static bool DependOnSameSymbol(const RangeBoundary &a, const RangeBoundary &b)
static RangeBoundary CanonicalizeBoundary(const RangeBoundary &a, const RangeBoundary &overflow)
static RangeBoundary NarrowMax(const Range *range, const Range *new_range, RangeBoundary::RangeSize size)
static constexpr intptr_t kClassIdTagMax
Definition: class_id.h:22
constexpr intptr_t kBitsPerInt64
Definition: globals.h:467
static bool DependsOnSymbol(const RangeBoundary &a, Definition *symbol)
bool IsStringClassId(intptr_t index)
Definition: class_id.h:350
bool(* BoundaryOp)(RangeBoundary *)
static bool CanonicalizeMinBoundary(RangeBoundary *a)
DECLARE_FLAG(bool, show_invisible_frames)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not set
Definition: switches.h:76
dictionary headers
Definition: find_headers.py:71
#define Pd
Definition: globals.h:408
#define T
Definition: precompiler.cc:65
#define ASSERT_VALID_RANGE_FOR_REPRESENTATION(instr, range, representation)
#define Z
#define UNBOXED_NATIVE_SLOT_CASE(Class, __, Field, ___, ____)
#define NATIVE_SLOT_CASE(ClassName, __, FieldName, ___, ____)
static const char header[]
Definition: skpbench.cpp:88
static SkString join(const CommandLineFlags::StringArray &)
Definition: skpbench.cpp:741
#define UNBOXED_NATIVE_SLOTS_LIST(V)
Definition: slot.h:297
#define NOT_INT_NATIVE_SLOTS_LIST(V)
Definition: slot.h:335
SeparatedVector2 offset
static constexpr size_t ValueSize(Representation rep)
Definition: locations.h:112
static constexpr bool IsUnboxedInteger(Representation rep)
Definition: locations.h:92
static int64_t MaxValue(Representation rep)
Definition: locations.cc:62
static int64_t MinValue(Representation rep)
Definition: locations.cc:49
static constexpr bool IsUnboxed(Representation rep)
Definition: locations.h:101
static const char * ToCString(Representation rep)
Definition: locations.cc:129
static bool IsUnsignedInteger(Representation rep)
Definition: locations.h:126
static Representation RepresentationOfArrayElement(classid_t cid)
Definition: locations.cc:79