Flutter Engine
The Flutter Engine
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Modules Pages
aot_call_specializer.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6
7#include <utility>
8
9#include "vm/bit_vector.h"
17#include "vm/compiler/cha.h"
22#include "vm/cpu.h"
23#include "vm/dart_entry.h"
24#include "vm/exceptions.h"
25#include "vm/hash_map.h"
26#include "vm/object.h"
27#include "vm/object_store.h"
28#include "vm/parser.h"
29#include "vm/resolver.h"
30#include "vm/scopes.h"
31#include "vm/stack_frame.h"
32#include "vm/symbols.h"
33
34namespace dart {
35
37 max_exhaustive_polymorphic_checks,
38 5,
39 "If a call receiver is known to be of at most this many classes, "
40 "generate exhaustive class tests instead of a megamorphic call");
41
42// Quick access to the current isolate and zone.
43#define IG (isolate_group())
44#define Z (zone())
45
46#ifdef DART_PRECOMPILER
47
48// Returns named function that is a unique dynamic target, i.e.,
49// - the target is identified by its name alone, since it occurs only once.
50// - target's class has no subclasses, and neither is subclassed, i.e.,
51// the receiver type can be only the function's class.
52// Returns Function::null() if there is no unique dynamic target for
53// given 'fname'. 'fname' must be a symbol.
54static void GetUniqueDynamicTarget(IsolateGroup* isolate_group,
55 const String& fname,
56 Object* function) {
57 UniqueFunctionsMap functions_map(
58 isolate_group->object_store()->unique_dynamic_targets());
59 ASSERT(fname.IsSymbol());
60 *function = functions_map.GetOrNull(fname);
61 ASSERT(functions_map.Release().ptr() ==
62 isolate_group->object_store()->unique_dynamic_targets());
63}
64
66 Precompiler* precompiler,
67 FlowGraph* flow_graph,
68 SpeculativeInliningPolicy* speculative_policy)
69 : CallSpecializer(flow_graph,
70 speculative_policy,
71 /* should_clone_fields=*/false),
72 precompiler_(precompiler),
73 has_unique_no_such_method_(false) {
74 Function& target_function = Function::Handle();
75 if (isolate_group()->object_store()->unique_dynamic_targets() !=
76 Array::null()) {
77 GetUniqueDynamicTarget(isolate_group(), Symbols::NoSuchMethod(),
78 &target_function);
79 has_unique_no_such_method_ = !target_function.IsNull();
80 }
81}
82
83bool AotCallSpecializer::TryCreateICDataForUniqueTarget(
84 InstanceCallInstr* call) {
85 if (isolate_group()->object_store()->unique_dynamic_targets() ==
86 Array::null()) {
87 return false;
88 }
89
90 // Check if the target is unique.
91 Function& target_function = Function::Handle(Z);
92 GetUniqueDynamicTarget(isolate_group(), call->function_name(),
93 &target_function);
94
95 if (target_function.IsNull()) {
96 return false;
97 }
98
99 // Calls passing named arguments and calls to a function taking named
100 // arguments must be resolved/checked at runtime.
101 // Calls passing a type argument vector and calls to a generic function must
102 // be resolved/checked at runtime.
103 if (target_function.HasOptionalNamedParameters() ||
104 target_function.IsGeneric() ||
105 !target_function.AreValidArgumentCounts(
106 call->type_args_len(), call->ArgumentCountWithoutTypeArgs(),
107 call->argument_names().IsNull() ? 0 : call->argument_names().Length(),
108 /* error_message = */ nullptr)) {
109 return false;
110 }
111
112 const Class& cls = Class::Handle(Z, target_function.Owner());
113 intptr_t implementor_cid = kIllegalCid;
114 if (!CHA::HasSingleConcreteImplementation(cls, &implementor_cid)) {
115 return false;
116 }
117
118 call->SetTargets(
119 CallTargets::CreateMonomorphic(Z, implementor_cid, target_function));
120 ASSERT(call->Targets().IsMonomorphic());
121
122 // If we know that the only noSuchMethod is Object.noSuchMethod then
123 // this call is guaranteed to either succeed or throw.
124 if (has_unique_no_such_method_) {
125 call->set_has_unique_selector(true);
126
127 // Add redefinition of the receiver to prevent code motion across
128 // this call.
129 const intptr_t receiver_index = call->FirstArgIndex();
130 RedefinitionInstr* redefinition = new (Z)
131 RedefinitionInstr(new (Z) Value(call->ArgumentAt(receiver_index)));
132 flow_graph()->AllocateSSAIndex(redefinition);
133 redefinition->InsertAfter(call);
134 // Replace all uses of the receiver dominated by this call.
135 FlowGraph::RenameDominatedUses(call->ArgumentAt(receiver_index),
136 redefinition, redefinition);
137 if (!redefinition->HasUses()) {
138 redefinition->RemoveFromGraph();
139 }
140 }
141
142 return true;
143}
144
145bool AotCallSpecializer::TryCreateICData(InstanceCallInstr* call) {
146 if (TryCreateICDataForUniqueTarget(call)) {
147 return true;
148 }
149
150 return CallSpecializer::TryCreateICData(call);
151}
152
153bool AotCallSpecializer::RecognizeRuntimeTypeGetter(InstanceCallInstr* call) {
154 if ((precompiler_ == nullptr) ||
155 !precompiler_->get_runtime_type_is_unique()) {
156 return false;
157 }
158
159 if (call->function_name().ptr() != Symbols::GetRuntimeType().ptr()) {
160 return false;
161 }
162
163 // There is only a single function Object.get:runtimeType that can be invoked
164 // by this call. Convert dynamic invocation to a static one.
165 const Class& cls = Class::Handle(Z, IG->object_store()->object_class());
166 const Function& function =
167 Function::Handle(Z, call->ResolveForReceiverClass(cls));
168 ASSERT(!function.IsNull());
169 const Function& target = Function::ZoneHandle(Z, function.ptr());
170 StaticCallInstr* static_call =
171 StaticCallInstr::FromCall(Z, call, target, call->CallCount());
172 // Since the result is either a Type or a FunctionType, we cannot pin it.
173 call->ReplaceWith(static_call, current_iterator());
174 return true;
175}
176
177static bool IsGetRuntimeType(Definition* defn) {
178 StaticCallInstr* call = defn->AsStaticCall();
179 return (call != nullptr) && (call->function().recognized_kind() ==
180 MethodRecognizer::kObjectRuntimeType);
181}
182
183// Recognize a.runtimeType == b.runtimeType and fold it into
184// Object._haveSameRuntimeType(a, b).
185// Note: this optimization is not speculative.
186bool AotCallSpecializer::TryReplaceWithHaveSameRuntimeType(
187 TemplateDartCall<0>* call) {
188 ASSERT((call->IsInstanceCall() &&
189 (call->AsInstanceCall()->ic_data()->NumArgsTested() == 2)) ||
190 call->IsStaticCall());
191 ASSERT(call->type_args_len() == 0);
192 ASSERT(call->ArgumentCount() == 2);
193
194 Definition* left = call->ArgumentAt(0);
195 Definition* right = call->ArgumentAt(1);
196
197 if (IsGetRuntimeType(left) && left->input_use_list()->IsSingleUse() &&
198 IsGetRuntimeType(right) && right->input_use_list()->IsSingleUse()) {
199 const Class& cls = Class::Handle(Z, IG->object_store()->object_class());
200 const Function& have_same_runtime_type = Function::ZoneHandle(
201 Z,
202 cls.LookupStaticFunctionAllowPrivate(Symbols::HaveSameRuntimeType()));
203 ASSERT(!have_same_runtime_type.IsNull());
204
205 InputsArray args(Z, 2);
206 args.Add(left->ArgumentValueAt(0)->CopyWithType(Z));
207 args.Add(right->ArgumentValueAt(0)->CopyWithType(Z));
208 const intptr_t kTypeArgsLen = 0;
209 StaticCallInstr* static_call = new (Z)
210 StaticCallInstr(call->source(), have_same_runtime_type, kTypeArgsLen,
211 Object::null_array(), // argument_names
212 std::move(args), call->deopt_id(), call->CallCount(),
213 ICData::kOptimized);
214 static_call->SetResultType(Z, CompileType::FromCid(kBoolCid));
215 ReplaceCall(call, static_call);
216 // ReplaceCall moved environment from 'call' to 'static_call'.
217 // Update arguments of 'static_call' in the environment.
218 Environment* env = static_call->env();
219 env->ValueAt(env->Length() - 2)
220 ->BindToEnvironment(static_call->ArgumentAt(0));
221 env->ValueAt(env->Length() - 1)
222 ->BindToEnvironment(static_call->ArgumentAt(1));
223 return true;
224 }
225
226 return false;
227}
228
229bool AotCallSpecializer::TryInlineFieldAccess(InstanceCallInstr* call) {
230 const Token::Kind op_kind = call->token_kind();
231 if ((op_kind == Token::kGET) && TryInlineInstanceGetter(call)) {
232 return true;
233 }
234 if ((op_kind == Token::kSET) && TryInlineInstanceSetter(call)) {
235 return true;
236 }
237 return false;
238}
239
240bool AotCallSpecializer::TryInlineFieldAccess(StaticCallInstr* call) {
241 if (call->function().IsImplicitGetterFunction()) {
242 Field& field = Field::ZoneHandle(call->function().accessor_field());
243 if (field.is_late()) {
244 // TODO(dartbug.com/40447): Inline implicit getters for late fields.
245 return false;
246 }
247 if (should_clone_fields_) {
248 field = field.CloneFromOriginal();
249 }
250 InlineImplicitInstanceGetter(call, field);
251 return true;
252 }
253
254 return false;
255}
256
257bool AotCallSpecializer::IsSupportedIntOperandForStaticDoubleOp(
258 CompileType* operand_type) {
259 if (operand_type->IsNullableInt()) {
260 if (operand_type->ToNullableCid() == kSmiCid) {
261 return true;
262 }
263
265 return true;
266 }
267 }
268
269 return false;
270}
271
272Value* AotCallSpecializer::PrepareStaticOpInput(Value* input,
273 intptr_t cid,
274 Instruction* call) {
275 ASSERT((cid == kDoubleCid) || (cid == kMintCid));
276
277 if (input->Type()->is_nullable()) {
278 const String& function_name =
279 (call->IsInstanceCall()
280 ? call->AsInstanceCall()->function_name()
281 : String::ZoneHandle(Z, call->AsStaticCall()->function().name()));
282 AddCheckNull(input, function_name, call->deopt_id(), call->env(), call);
283 }
284
285 input = input->CopyWithType(Z);
286
287 if (cid == kDoubleCid && input->Type()->IsNullableInt()) {
288 Definition* conversion = nullptr;
289
290 if (input->Type()->ToNullableCid() == kSmiCid) {
291 conversion = new (Z) SmiToDoubleInstr(input, call->source());
293 conversion = new (Z) Int64ToDoubleInstr(input, DeoptId::kNone,
294 Instruction::kNotSpeculative);
295 } else {
296 UNREACHABLE();
297 }
298
299 if (FLAG_trace_strong_mode_types) {
300 THR_Print("[Strong mode] Inserted %s\n", conversion->ToCString());
301 }
302 InsertBefore(call, conversion, /* env = */ nullptr, FlowGraph::kValue);
303 return new (Z) Value(conversion);
304 }
305
306 return input;
307}
308
309CompileType AotCallSpecializer::BuildStrengthenedReceiverType(Value* input,
310 intptr_t cid) {
311 CompileType* old_type = input->Type();
312 CompileType* refined_type = old_type;
313
314 CompileType type = CompileType::None();
315 if (cid == kSmiCid) {
316 type = CompileType::NullableSmi();
317 refined_type = CompileType::ComputeRefinedType(old_type, &type);
318 } else if (cid == kMintCid) {
319 type = CompileType::NullableMint();
320 refined_type = CompileType::ComputeRefinedType(old_type, &type);
321 } else if (cid == kIntegerCid && !input->Type()->IsNullableInt()) {
322 type = CompileType::NullableInt();
323 refined_type = CompileType::ComputeRefinedType(old_type, &type);
324 } else if (cid == kDoubleCid && !input->Type()->IsNullableDouble()) {
325 type = CompileType::NullableDouble();
326 refined_type = CompileType::ComputeRefinedType(old_type, &type);
327 }
328
329 if (refined_type != old_type) {
330 return *refined_type;
331 }
332 return CompileType::None();
333}
334
335// After replacing a call with a specialized instruction, make sure to
336// update types at all uses, as specialized instruction can provide a more
337// specific type.
338static void RefineUseTypes(Definition* instr) {
339 CompileType* new_type = instr->Type();
340 for (Value::Iterator it(instr->input_use_list()); !it.Done(); it.Advance()) {
341 it.Current()->RefineReachingType(new_type);
342 }
343}
344
345bool AotCallSpecializer::TryOptimizeInstanceCallUsingStaticTypes(
346 InstanceCallInstr* instr) {
347 const Token::Kind op_kind = instr->token_kind();
348 return TryOptimizeIntegerOperation(instr, op_kind) ||
349 TryOptimizeDoubleOperation(instr, op_kind);
350}
351
352bool AotCallSpecializer::TryOptimizeStaticCallUsingStaticTypes(
353 StaticCallInstr* instr) {
354 const String& name = String::Handle(Z, instr->function().name());
355 const Token::Kind op_kind = MethodTokenRecognizer::RecognizeTokenKind(name);
356
357 if (op_kind == Token::kEQ && TryReplaceWithHaveSameRuntimeType(instr)) {
358 return true;
359 }
360
361 // We only specialize instance methods for int/double operations.
362 const auto& target = instr->function();
363 if (!target.IsDynamicFunction()) {
364 return false;
365 }
366
367 // For de-virtualized instance calls, we strengthen the type here manually
368 // because it might not be attached to the receiver.
369 // See http://dartbug.com/35179 for preserving the receiver type information.
370 const Class& owner = Class::Handle(Z, target.Owner());
371 const intptr_t cid = owner.id();
372 if (cid == kSmiCid || cid == kMintCid || cid == kIntegerCid ||
373 cid == kDoubleCid) {
374 // Sometimes TFA de-virtualizes instance calls to static calls. In such
375 // cases the VM might have a looser type on the receiver, so we explicitly
376 // tighten it (this is safe since it was proven that the receiver is either
377 // null or will end up with that target).
378 const intptr_t receiver_index = instr->FirstArgIndex();
379 const intptr_t argument_count = instr->ArgumentCountWithoutTypeArgs();
380 if (argument_count >= 1) {
381 auto receiver_value = instr->ArgumentValueAt(receiver_index);
382 auto receiver = receiver_value->definition();
383 auto type = BuildStrengthenedReceiverType(receiver_value, cid);
384 if (!type.IsNone()) {
385 auto redefinition =
386 flow_graph()->EnsureRedefinition(instr->previous(), receiver, type);
387 if (redefinition != nullptr) {
388 RefineUseTypes(redefinition);
389 }
390 }
391 }
392 }
393
394 return TryOptimizeIntegerOperation(instr, op_kind) ||
395 TryOptimizeDoubleOperation(instr, op_kind);
396}
397
398Definition* AotCallSpecializer::TryOptimizeDivisionOperation(
399 TemplateDartCall<0>* instr,
400 Token::Kind op_kind,
401 Value* left_value,
402 Value* right_value) {
403 auto unboxed_constant = [&](int64_t value) -> Definition* {
405#if defined(TARGET_ARCH_IS_32_BIT)
406 Definition* const const_def = new (Z) UnboxedConstantInstr(
407 Smi::ZoneHandle(Z, Smi::New(value)), kUnboxedInt32);
408 InsertBefore(instr, const_def, /*env=*/nullptr, FlowGraph::kValue);
409 return new (Z) IntConverterInstr(kUnboxedInt32, kUnboxedInt64,
410 new (Z) Value(const_def), DeoptId::kNone);
411#else
412 return new (Z) UnboxedConstantInstr(Smi::ZoneHandle(Z, Smi::New(value)),
413 kUnboxedInt64);
414#endif
415 };
416
417 if (!right_value->BindsToConstant()) {
418 return nullptr;
419 }
420
421 const Object& rhs = right_value->BoundConstant();
422 const int64_t value = Integer::Cast(rhs).AsInt64Value(); // smi and mint
423
424 if (value == kMinInt64) {
425 return nullptr; // The absolute value can't be held in an int64_t.
426 }
427
428 const int64_t magnitude = Utils::Abs(value);
429 // The replacements for both operations assume that the magnitude of the
430 // value is a power of two and that the mask derived from the magnitude
431 // can fit in a Smi.
432 if (!Utils::IsPowerOfTwo(magnitude) ||
434 return nullptr;
435 }
436
437 if (op_kind == Token::kMOD) {
438 // Modulo against a constant power-of-two can be optimized into a mask.
439 // x % y -> x & (|y| - 1) for smi masks only
440 left_value = PrepareStaticOpInput(left_value, kMintCid, instr);
441
442 Definition* right_definition = unboxed_constant(magnitude - 1);
443 if (magnitude == 1) return right_definition;
444 InsertBefore(instr, right_definition, /*env=*/nullptr, FlowGraph::kValue);
445 right_value = new (Z) Value(right_definition);
446 return new (Z)
447 BinaryInt64OpInstr(Token::kBIT_AND, left_value, right_value,
448 DeoptId::kNone, Instruction::kNotSpeculative);
449 } else {
450 ASSERT_EQUAL(op_kind, Token::kTRUNCDIV);
451#if !defined(TARGET_ARCH_IS_32_BIT)
452 // If BinaryInt64Op(kTRUNCDIV, ...) is supported, then only perform the
453 // simplest replacements and use the instruction otherwise.
454 if (magnitude != 1) return nullptr;
455#endif
456
457 // If the divisor is negative, then we need to negate the final result.
458 const bool negate = value < 0;
459 Definition* result = nullptr;
460
461 left_value = PrepareStaticOpInput(left_value, kMintCid, instr);
462 if (magnitude > 1) {
463 // For two's complement signed arithmetic where the bit width is k
464 // and the divisor is 2^n for some n in [0, k), we can perform a simple
465 // shift if m is non-negative:
466 // m ~/ 2^n => m >> n
467 // For negative m, however, this won't work since just shifting m rounds
468 // towards negative infinity. Instead, we add (2^n - 1) first before
469 // shifting, which rounds the result towards positive infinity
470 // (and thus rounding towards zero, since m is negative):
471 // m ~/ 2^n => (m + (2^n - 1)) >> n
472 // By sign extending the sign bit (the (k-1)-bit) and using that as a
473 // mask, we get a non-branching computation that only adds (2^n - 1)
474 // when m is negative, rounding towards zero in both cases:
475 // m ~/ 2^n => (m + ((m >> (k - 1)) & (2^n - 1))) >> n
476 auto* const sign_bit_position = unboxed_constant(63);
477 InsertBefore(instr, sign_bit_position, /*env=*/nullptr,
478 FlowGraph::kValue);
479 auto* const sign_bit_extended = new (Z)
480 ShiftInt64OpInstr(Token::kSHR, left_value,
481 new (Z) Value(sign_bit_position), DeoptId::kNone);
482 InsertBefore(instr, sign_bit_extended, /*env=*/nullptr,
483 FlowGraph::kValue);
484 auto* rounding_adjustment = unboxed_constant(magnitude - 1);
485 InsertBefore(instr, rounding_adjustment, /*env=*/nullptr,
486 FlowGraph::kValue);
487 rounding_adjustment = new (Z)
488 BinaryInt64OpInstr(Token::kBIT_AND, new (Z) Value(sign_bit_extended),
489 new (Z) Value(rounding_adjustment), DeoptId::kNone,
490 Instruction::kNotSpeculative);
491 InsertBefore(instr, rounding_adjustment, /*env=*/nullptr,
492 FlowGraph::kValue);
493 auto* const left_definition = new (Z)
494 BinaryInt64OpInstr(Token::kADD, left_value->CopyWithType(Z),
495 new (Z) Value(rounding_adjustment), DeoptId::kNone,
496 Instruction::kNotSpeculative);
497 InsertBefore(instr, left_definition, /*env=*/nullptr, FlowGraph::kValue);
498 left_value = new (Z) Value(left_definition);
499 auto* const right_definition =
500 unboxed_constant(Utils::ShiftForPowerOfTwo(magnitude));
501 InsertBefore(instr, right_definition, /*env=*/nullptr, FlowGraph::kValue);
502 right_value = new (Z) Value(right_definition);
503 result = new (Z) ShiftInt64OpInstr(Token::kSHR, left_value, right_value,
505 } else {
507 // No division needed, just redefine the value.
508 result = new (Z) RedefinitionInstr(left_value);
509 }
510 if (negate) {
511 InsertBefore(instr, result, /*env=*/nullptr, FlowGraph::kValue);
512 result = new (Z) UnaryInt64OpInstr(Token::kNEGATE, new (Z) Value(result),
514 }
515 return result;
516 }
517}
518
519bool AotCallSpecializer::TryOptimizeIntegerOperation(TemplateDartCall<0>* instr,
520 Token::Kind op_kind) {
521 if (instr->type_args_len() != 0) {
522 // Arithmetic operations don't have type arguments.
523 return false;
524 }
525
526 Definition* replacement = nullptr;
527 if (instr->ArgumentCount() == 2) {
528 Value* left_value = instr->ArgumentValueAt(0);
529 Value* right_value = instr->ArgumentValueAt(1);
530 CompileType* left_type = left_value->Type();
531 CompileType* right_type = right_value->Type();
532
533 bool has_nullable_int_args =
534 left_type->IsNullableInt() && right_type->IsNullableInt();
535
536 if (auto* call = instr->AsInstanceCall()) {
537 if (!call->CanReceiverBeSmiBasedOnInterfaceTarget(Z)) {
538 has_nullable_int_args = false;
539 }
540 }
541
542 // We only support binary operations if both operands are nullable integers
543 // or when we can use a cheap strict comparison operation.
544 if (!has_nullable_int_args) {
545 return false;
546 }
547
548 switch (op_kind) {
549 case Token::kEQ:
550 case Token::kNE: {
551 const bool either_can_be_null =
552 left_type->is_nullable() || right_type->is_nullable();
553 replacement = new (Z) EqualityCompareInstr(
554 instr->source(), op_kind, left_value->CopyWithType(Z),
555 right_value->CopyWithType(Z), kMintCid, DeoptId::kNone,
556 /*null_aware=*/either_can_be_null, Instruction::kNotSpeculative);
557 break;
558 }
559 case Token::kLT:
560 case Token::kLTE:
561 case Token::kGT:
562 case Token::kGTE:
563 left_value = PrepareStaticOpInput(left_value, kMintCid, instr);
564 right_value = PrepareStaticOpInput(right_value, kMintCid, instr);
565 replacement = new (Z) RelationalOpInstr(
566 instr->source(), op_kind, left_value, right_value, kMintCid,
567 DeoptId::kNone, Instruction::kNotSpeculative);
568 break;
569 case Token::kMOD:
570 case Token::kTRUNCDIV:
571 replacement = TryOptimizeDivisionOperation(instr, op_kind, left_value,
572 right_value);
573 if (replacement != nullptr) break;
574#if defined(TARGET_ARCH_IS_32_BIT)
575 // Truncating 64-bit division and modulus via BinaryInt64OpInstr are
576 // not implemented on 32-bit architectures, so we can only optimize
577 // certain cases and otherwise must leave the call in.
578 break;
579#else
581#endif
582 case Token::kSHL:
584 case Token::kSHR:
586 case Token::kUSHR:
588 case Token::kBIT_OR:
590 case Token::kBIT_XOR:
592 case Token::kBIT_AND:
594 case Token::kADD:
596 case Token::kSUB:
598 case Token::kMUL: {
599 if (op_kind == Token::kSHL || op_kind == Token::kSHR ||
600 op_kind == Token::kUSHR) {
601 left_value = PrepareStaticOpInput(left_value, kMintCid, instr);
602 right_value = PrepareStaticOpInput(right_value, kMintCid, instr);
603 replacement = new (Z) ShiftInt64OpInstr(op_kind, left_value,
604 right_value, DeoptId::kNone);
605 } else {
606 left_value = PrepareStaticOpInput(left_value, kMintCid, instr);
607 right_value = PrepareStaticOpInput(right_value, kMintCid, instr);
608 replacement = new (Z)
609 BinaryInt64OpInstr(op_kind, left_value, right_value,
610 DeoptId::kNone, Instruction::kNotSpeculative);
611 }
612 break;
613 }
614
615 default:
616 break;
617 }
618 } else if (instr->ArgumentCount() == 1) {
619 Value* left_value = instr->ArgumentValueAt(0);
620 CompileType* left_type = left_value->Type();
621
622 // We only support unary operations on nullable integers.
623 if (!left_type->IsNullableInt()) {
624 return false;
625 }
626
627 if (op_kind == Token::kNEGATE || op_kind == Token::kBIT_NOT) {
628 left_value = PrepareStaticOpInput(left_value, kMintCid, instr);
629 replacement = new (Z) UnaryInt64OpInstr(
630 op_kind, left_value, DeoptId::kNone, Instruction::kNotSpeculative);
631 }
632 }
633
634 if (replacement != nullptr && !replacement->ComputeCanDeoptimize()) {
635 if (FLAG_trace_strong_mode_types) {
636 THR_Print("[Strong mode] Optimization: replacing %s with %s\n",
637 instr->ToCString(), replacement->ToCString());
638 }
639 ReplaceCall(instr, replacement);
640 RefineUseTypes(replacement);
641 return true;
642 }
643
644 return false;
645}
646
647bool AotCallSpecializer::TryOptimizeDoubleOperation(TemplateDartCall<0>* instr,
648 Token::Kind op_kind) {
649 if (instr->type_args_len() != 0) {
650 // Arithmetic operations don't have type arguments.
651 return false;
652 }
653
654 Definition* replacement = nullptr;
655
656 if (instr->ArgumentCount() == 2) {
657 Value* left_value = instr->ArgumentValueAt(0);
658 Value* right_value = instr->ArgumentValueAt(1);
659 CompileType* left_type = left_value->Type();
660 CompileType* right_type = right_value->Type();
661
662 if (!left_type->IsNullableDouble() &&
663 !IsSupportedIntOperandForStaticDoubleOp(left_type)) {
664 return false;
665 }
666 if (!right_type->IsNullableDouble() &&
667 !IsSupportedIntOperandForStaticDoubleOp(right_type)) {
668 return false;
669 }
670
671 switch (op_kind) {
672 case Token::kEQ:
674 case Token::kNE: {
675 // TODO(dartbug.com/32166): Support EQ, NE for nullable doubles.
676 // (requires null-aware comparison instruction).
677 if (!left_type->is_nullable() && !right_type->is_nullable()) {
678 left_value = PrepareStaticOpInput(left_value, kDoubleCid, instr);
679 right_value = PrepareStaticOpInput(right_value, kDoubleCid, instr);
680 replacement = new (Z) EqualityCompareInstr(
681 instr->source(), op_kind, left_value, right_value, kDoubleCid,
682 DeoptId::kNone, /*null_aware=*/false,
683 Instruction::kNotSpeculative);
684 break;
685 }
686 break;
687 }
688 case Token::kLT:
690 case Token::kLTE:
692 case Token::kGT:
694 case Token::kGTE: {
695 left_value = PrepareStaticOpInput(left_value, kDoubleCid, instr);
696 right_value = PrepareStaticOpInput(right_value, kDoubleCid, instr);
697 replacement = new (Z) RelationalOpInstr(
698 instr->source(), op_kind, left_value, right_value, kDoubleCid,
699 DeoptId::kNone, Instruction::kNotSpeculative);
700 break;
701 }
702 case Token::kADD:
704 case Token::kSUB:
706 case Token::kMUL:
708 case Token::kDIV: {
709 left_value = PrepareStaticOpInput(left_value, kDoubleCid, instr);
710 right_value = PrepareStaticOpInput(right_value, kDoubleCid, instr);
711 replacement = new (Z) BinaryDoubleOpInstr(
712 op_kind, left_value, right_value, DeoptId::kNone, instr->source(),
713 Instruction::kNotSpeculative);
714 break;
715 }
716
717 case Token::kBIT_OR:
719 case Token::kBIT_XOR:
721 case Token::kBIT_AND:
723 case Token::kMOD:
725 case Token::kTRUNCDIV:
727 default:
728 break;
729 }
730 } else if (instr->ArgumentCount() == 1) {
731 Value* left_value = instr->ArgumentValueAt(0);
732 CompileType* left_type = left_value->Type();
733
734 // We only support unary operations on nullable doubles.
735 if (!left_type->IsNullableDouble()) {
736 return false;
737 }
738
739 if (op_kind == Token::kNEGATE) {
740 left_value = PrepareStaticOpInput(left_value, kDoubleCid, instr);
741 replacement = new (Z)
742 UnaryDoubleOpInstr(Token::kNEGATE, left_value, instr->deopt_id(),
743 Instruction::kNotSpeculative);
744 }
745 }
746
747 if (replacement != nullptr && !replacement->ComputeCanDeoptimize()) {
748 if (FLAG_trace_strong_mode_types) {
749 THR_Print("[Strong mode] Optimization: replacing %s with %s\n",
750 instr->ToCString(), replacement->ToCString());
751 }
752 ReplaceCall(instr, replacement);
753 RefineUseTypes(replacement);
754 return true;
755 }
756
757 return false;
758}
759
760// Tries to optimize instance call by replacing it with a faster instruction
761// (e.g, binary op, field load, ..).
762// TODO(dartbug.com/30635) Evaluate how much this can be shared with
763// JitCallSpecializer.
764void AotCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
765 // Type test is special as it always gets converted into inlined code.
766 const Token::Kind op_kind = instr->token_kind();
767 if (Token::IsTypeTestOperator(op_kind)) {
768 ReplaceWithInstanceOf(instr);
769 return;
770 }
771
772 if (TryInlineFieldAccess(instr)) {
773 return;
774 }
775
776 if (RecognizeRuntimeTypeGetter(instr)) {
777 return;
778 }
779
780 if ((op_kind == Token::kEQ) && TryReplaceWithHaveSameRuntimeType(instr)) {
781 return;
782 }
783
784 const CallTargets& targets = instr->Targets();
785 const intptr_t receiver_idx = instr->FirstArgIndex();
786
787 if (TryOptimizeInstanceCallUsingStaticTypes(instr)) {
788 return;
789 }
790
791 bool has_one_target = targets.HasSingleTarget();
792 if (has_one_target) {
793 // Check if the single target is a polymorphic target, if it is,
794 // we don't have one target.
795 const Function& target = targets.FirstTarget();
796 has_one_target =
797 !target.is_polymorphic_target() && !target.IsDynamicallyOverridden();
798 }
799
800 if (has_one_target) {
801 const Function& target = targets.FirstTarget();
802 UntaggedFunction::Kind function_kind = target.kind();
803 if (flow_graph()->CheckForInstanceCall(instr, function_kind) ==
804 FlowGraph::ToCheck::kNoCheck) {
805 StaticCallInstr* call = StaticCallInstr::FromCall(
806 Z, instr, target, targets.AggregateCallCount());
807 instr->ReplaceWith(call, current_iterator());
808 return;
809 }
810 }
811
812 // No IC data checks. Try resolve target using the propagated cid.
813 const intptr_t receiver_cid =
814 instr->ArgumentValueAt(receiver_idx)->Type()->ToCid();
815 if (receiver_cid != kDynamicCid && receiver_cid != kSentinelCid) {
816 const Class& receiver_class =
817 Class::Handle(Z, isolate_group()->class_table()->At(receiver_cid));
818 const Function& function =
819 Function::Handle(Z, instr->ResolveForReceiverClass(receiver_class));
820 if (!function.IsNull()) {
821 const Function& target = Function::ZoneHandle(Z, function.ptr());
822 StaticCallInstr* call =
823 StaticCallInstr::FromCall(Z, instr, target, instr->CallCount());
824 instr->ReplaceWith(call, current_iterator());
825 return;
826 }
827 }
828
829 // Check for x == y, where x has type T?, there are no subtypes of T, and
830 // T does not override ==. Replace with StrictCompare.
831 if (instr->token_kind() == Token::kEQ || instr->token_kind() == Token::kNE) {
832 GrowableArray<intptr_t> class_ids(6);
833 if (instr->ArgumentValueAt(receiver_idx)->Type()->Specialize(&class_ids)) {
834 bool is_object_eq = true;
835 for (intptr_t i = 0; i < class_ids.length(); i++) {
836 const intptr_t cid = class_ids[i];
837 // Skip sentinel cid. It may appear in the unreachable code after
838 // inlining a method which doesn't return.
839 if (cid == kSentinelCid) continue;
840 const Class& cls =
841 Class::Handle(Z, isolate_group()->class_table()->At(cid));
842 const Function& target =
843 Function::Handle(Z, instr->ResolveForReceiverClass(cls));
844 if (target.recognized_kind() != MethodRecognizer::kObjectEquals) {
845 is_object_eq = false;
846 break;
847 }
848 }
849 if (is_object_eq) {
850 auto* replacement = new (Z) StrictCompareInstr(
851 instr->source(),
852 (instr->token_kind() == Token::kEQ) ? Token::kEQ_STRICT
853 : Token::kNE_STRICT,
854 instr->ArgumentValueAt(0)->CopyWithType(Z),
855 instr->ArgumentValueAt(1)->CopyWithType(Z),
856 /*needs_number_check=*/false, DeoptId::kNone);
857 ReplaceCall(instr, replacement);
858 RefineUseTypes(replacement);
859 return;
860 }
861 }
862 }
863
864 Definition* callee_receiver = instr->ArgumentAt(receiver_idx);
865 const Function& function = flow_graph()->function();
866 Class& receiver_class = Class::Handle(Z);
867
868 if (function.IsDynamicFunction() &&
869 flow_graph()->IsReceiver(callee_receiver)) {
870 // Call receiver is method receiver.
871 receiver_class = function.Owner();
872 } else {
873 // Check if we have an non-nullable compile type for the receiver.
874 CompileType* type = instr->ArgumentAt(receiver_idx)->Type();
875 if (type->ToAbstractType()->IsType() &&
876 !type->ToAbstractType()->IsDynamicType() && !type->is_nullable()) {
877 receiver_class = type->ToAbstractType()->type_class();
878 if (receiver_class.is_implemented()) {
879 receiver_class = Class::null();
880 }
881 }
882 }
883 if (!receiver_class.IsNull()) {
884 GrowableArray<intptr_t> class_ids(6);
885 if (thread()->compiler_state().cha().ConcreteSubclasses(receiver_class,
886 &class_ids)) {
887 // First check if all subclasses end up calling the same method.
888 // If this is the case we will replace instance call with a direct
889 // static call.
890 // Otherwise we will try to create ICData that contains all possible
891 // targets with appropriate checks.
892 Function& single_target = Function::Handle(Z);
893 ICData& ic_data = ICData::Handle(Z);
894 const Array& args_desc_array =
895 Array::Handle(Z, instr->GetArgumentsDescriptor());
896 Function& target = Function::Handle(Z);
897 Class& cls = Class::Handle(Z);
898 for (intptr_t i = 0; i < class_ids.length(); i++) {
899 const intptr_t cid = class_ids[i];
900 cls = isolate_group()->class_table()->At(cid);
901 target = instr->ResolveForReceiverClass(cls);
902 ASSERT(target.IsNull() || !target.IsInvokeFieldDispatcher());
903 if (target.IsNull()) {
904 single_target = Function::null();
905 ic_data = ICData::null();
906 break;
907 } else if (ic_data.IsNull()) {
908 // First we are trying to compute a single target for all subclasses.
909 if (single_target.IsNull()) {
910 ASSERT(i == 0);
911 single_target = target.ptr();
912 continue;
913 } else if (single_target.ptr() == target.ptr()) {
914 continue;
915 }
916
917 // The call does not resolve to a single target within the hierarchy.
918 // If we have too many subclasses abort the optimization.
919 if (class_ids.length() > FLAG_max_exhaustive_polymorphic_checks) {
920 single_target = Function::null();
921 break;
922 }
923
924 // Create an ICData and map all previously seen classes (< i) to
925 // the computed single_target.
926 ic_data = ICData::New(function, instr->function_name(),
927 args_desc_array, DeoptId::kNone,
928 /* args_tested = */ 1, ICData::kOptimized);
929 for (intptr_t j = 0; j < i; j++) {
930 ic_data.AddReceiverCheck(class_ids[j], single_target);
931 }
932
933 single_target = Function::null();
934 }
935
936 ASSERT(ic_data.ptr() != ICData::null());
937 ASSERT(single_target.ptr() == Function::null());
938 ic_data.AddReceiverCheck(cid, target);
939 }
940
941 if (single_target.ptr() != Function::null()) {
942 // If this is a getter or setter invocation try inlining it right away
943 // instead of replacing it with a static call.
944 if ((op_kind == Token::kGET) || (op_kind == Token::kSET)) {
945 // Create fake IC data with the resolved target.
946 const ICData& ic_data = ICData::Handle(
947 ICData::New(flow_graph()->function(), instr->function_name(),
948 args_desc_array, DeoptId::kNone,
949 /* args_tested = */ 1, ICData::kOptimized));
950 cls = single_target.Owner();
951 ic_data.AddReceiverCheck(cls.id(), single_target);
952 instr->set_ic_data(&ic_data);
953
954 if (TryInlineFieldAccess(instr)) {
955 return;
956 }
957 }
958
959 // We have computed that there is only a single target for this call
960 // within the whole hierarchy. Replace InstanceCall with StaticCall.
961 const Function& target = Function::ZoneHandle(Z, single_target.ptr());
962 StaticCallInstr* call =
963 StaticCallInstr::FromCall(Z, instr, target, instr->CallCount());
964 instr->ReplaceWith(call, current_iterator());
965 return;
966 } else if ((ic_data.ptr() != ICData::null()) &&
967 !ic_data.NumberOfChecksIs(0)) {
968 const CallTargets* targets = CallTargets::Create(Z, ic_data);
969 ASSERT(!targets->is_empty());
970 PolymorphicInstanceCallInstr* call =
971 PolymorphicInstanceCallInstr::FromCall(Z, instr, *targets,
972 /* complete = */ true);
973 instr->ReplaceWith(call, current_iterator());
974 return;
975 }
976 }
977
978 // Detect if o.m(...) is a call through a getter and expand it
979 // into o.get:m().call(...).
980 if (TryExpandCallThroughGetter(receiver_class, instr)) {
981 return;
982 }
983 }
984
985 // More than one target. Generate generic polymorphic call without
986 // deoptimization.
987 if (targets.length() > 0) {
988 ASSERT(!FLAG_polymorphic_with_deopt);
989 // OK to use checks with PolymorphicInstanceCallInstr since no
990 // deoptimization is allowed.
991 PolymorphicInstanceCallInstr* call =
992 PolymorphicInstanceCallInstr::FromCall(Z, instr, targets,
993 /* complete = */ false);
994 instr->ReplaceWith(call, current_iterator());
995 return;
996 }
997}
998
999void AotCallSpecializer::VisitStaticCall(StaticCallInstr* instr) {
1000 if (TryInlineFieldAccess(instr)) {
1001 return;
1002 }
1003 CallSpecializer::VisitStaticCall(instr);
1004}
1005
1006bool AotCallSpecializer::TryExpandCallThroughGetter(const Class& receiver_class,
1007 InstanceCallInstr* call) {
1008 // If it's an accessor call it can't be a call through getter.
1009 if (call->token_kind() == Token::kGET || call->token_kind() == Token::kSET) {
1010 return false;
1011 }
1012
1013 // Ignore callsites like f.call() for now. Those need to be handled
1014 // specially if f is a closure.
1015 if (call->function_name().ptr() == Symbols::call().ptr()) {
1016 return false;
1017 }
1018
1019 Function& target = Function::Handle(Z);
1020
1021 const String& getter_name =
1022 String::ZoneHandle(Z, Symbols::FromGet(thread(), call->function_name()));
1023
1024 const Array& args_desc_array = Array::Handle(
1025 Z,
1026 ArgumentsDescriptor::NewBoxed(/*type_args_len=*/0, /*num_arguments=*/1));
1027 ArgumentsDescriptor args_desc(args_desc_array);
1028 target = Resolver::ResolveDynamicForReceiverClass(
1029 receiver_class, getter_name, args_desc, /*allow_add=*/false);
1030 if (target.ptr() == Function::null() || target.IsMethodExtractor()) {
1031 return false;
1032 }
1033
1034 // We found a getter with the same name as the method this
1035 // call tries to invoke. This implies call through getter
1036 // because methods can't override getters. Build
1037 // o.get:m().call(...) sequence and replace o.m(...) invocation.
1038
1039 const intptr_t receiver_idx = call->type_args_len() > 0 ? 1 : 0;
1040
1041 InputsArray get_arguments(Z, 1);
1042 get_arguments.Add(call->ArgumentValueAt(receiver_idx)->CopyWithType(Z));
1043 InstanceCallInstr* invoke_get = new (Z) InstanceCallInstr(
1044 call->source(), getter_name, Token::kGET, std::move(get_arguments),
1045 /*type_args_len=*/0,
1046 /*argument_names=*/Object::empty_array(),
1047 /*checked_argument_count=*/1,
1048 thread()->compiler_state().GetNextDeoptId());
1049
1050 // Arguments to the .call() are the same as arguments to the
1051 // original call (including type arguments), but receiver
1052 // is replaced with the result of the get.
1053 InputsArray call_arguments(Z, call->ArgumentCount());
1054 if (call->type_args_len() > 0) {
1055 call_arguments.Add(call->ArgumentValueAt(0)->CopyWithType(Z));
1056 }
1057 call_arguments.Add(new (Z) Value(invoke_get));
1058 for (intptr_t i = receiver_idx + 1; i < call->ArgumentCount(); i++) {
1059 call_arguments.Add(call->ArgumentValueAt(i)->CopyWithType(Z));
1060 }
1061
1062 InstanceCallInstr* invoke_call = new (Z) InstanceCallInstr(
1063 call->source(), Symbols::call(), Token::kILLEGAL,
1064 std::move(call_arguments), call->type_args_len(), call->argument_names(),
1065 /*checked_argument_count=*/1,
1066 thread()->compiler_state().GetNextDeoptId());
1067
1068 // Create environment and insert 'invoke_get'.
1069 Environment* get_env =
1070 call->env()->DeepCopy(Z, call->env()->Length() - call->ArgumentCount());
1071 for (intptr_t i = 0, n = invoke_get->ArgumentCount(); i < n; i++) {
1072 get_env->PushValue(new (Z) Value(invoke_get->ArgumentAt(i)));
1073 }
1074 InsertBefore(call, invoke_get, get_env, FlowGraph::kValue);
1075
1076 // Replace original call with .call(...) invocation.
1077 call->ReplaceWith(invoke_call, current_iterator());
1078
1079 // ReplaceWith moved environment from 'call' to 'invoke_call'.
1080 // Update receiver argument in the environment.
1081 Environment* invoke_env = invoke_call->env();
1082 invoke_env
1083 ->ValueAt(invoke_env->Length() - invoke_call->ArgumentCount() +
1084 receiver_idx)
1085 ->BindToEnvironment(invoke_get);
1086
1087 // AOT compiler expects all calls to have an ICData.
1088 invoke_get->EnsureICData(flow_graph());
1089 invoke_call->EnsureICData(flow_graph());
1090
1091 // Specialize newly inserted calls.
1092 TryCreateICData(invoke_get);
1093 VisitInstanceCall(invoke_get);
1094 TryCreateICData(invoke_call);
1095 VisitInstanceCall(invoke_call);
1096
1097 // Success.
1098 return true;
1099}
1100
1101void AotCallSpecializer::VisitPolymorphicInstanceCall(
1102 PolymorphicInstanceCallInstr* call) {
1103 const intptr_t receiver_idx = call->type_args_len() > 0 ? 1 : 0;
1104 const intptr_t receiver_cid =
1105 call->ArgumentValueAt(receiver_idx)->Type()->ToCid();
1106 if (receiver_cid != kDynamicCid && receiver_cid != kSentinelCid) {
1107 const Class& receiver_class =
1108 Class::Handle(Z, isolate_group()->class_table()->At(receiver_cid));
1109 const Function& function =
1110 Function::ZoneHandle(Z, call->ResolveForReceiverClass(receiver_class));
1111 if (!function.IsNull()) {
1112 // Only one target. Replace by static call.
1113 StaticCallInstr* new_call =
1114 StaticCallInstr::FromCall(Z, call, function, call->CallCount());
1115 call->ReplaceWith(new_call, current_iterator());
1116 }
1117 }
1118}
1119
1120bool AotCallSpecializer::TryReplaceInstanceOfWithRangeCheck(
1121 InstanceCallInstr* call,
1122 const AbstractType& type) {
1123 HierarchyInfo* hi = thread()->hierarchy_info();
1124 if (hi == nullptr) {
1125 return false;
1126 }
1127
1128 intptr_t lower_limit, upper_limit;
1129 if (!hi->InstanceOfHasClassRange(type, &lower_limit, &upper_limit)) {
1130 return false;
1131 }
1132
1133 Definition* left = call->ArgumentAt(0);
1134 LoadClassIdInstr* load_cid =
1135 new (Z) LoadClassIdInstr(new (Z) Value(left), kUnboxedUword);
1136 InsertBefore(call, load_cid, nullptr, FlowGraph::kValue);
1137
1138 ComparisonInstr* check_range;
1139 if (lower_limit == upper_limit) {
1140 ConstantInstr* cid_constant = flow_graph()->GetConstant(
1141 Smi::Handle(Z, Smi::New(lower_limit)), kUnboxedUword);
1142 check_range = new (Z) EqualityCompareInstr(
1143 call->source(), Token::kEQ, new Value(load_cid),
1144 new Value(cid_constant), kIntegerCid, DeoptId::kNone, false,
1145 Instruction::kNotSpeculative);
1146 } else {
1147 check_range =
1148 new (Z) TestRangeInstr(call->source(), new (Z) Value(load_cid),
1149 lower_limit, upper_limit, kUnboxedUword);
1150 }
1151 ReplaceCall(call, check_range);
1152
1153 return true;
1154}
1155
1156void AotCallSpecializer::ReplaceInstanceCallsWithDispatchTableCalls() {
1157 ASSERT(current_iterator_ == nullptr);
1158 const intptr_t max_block_id = flow_graph()->max_block_id();
1159 for (BlockIterator block_it = flow_graph()->reverse_postorder_iterator();
1160 !block_it.Done(); block_it.Advance()) {
1161 ForwardInstructionIterator it(block_it.Current());
1162 current_iterator_ = &it;
1163 while (!it.Done()) {
1164 Instruction* instr = it.Current();
1165 // Advance to the next instruction before replacing a call,
1166 // as call can be replaced with a diamond and the rest of
1167 // the instructions can be moved to a new basic block.
1168 if (!it.Done()) it.Advance();
1169
1170 if (auto call = instr->AsInstanceCall()) {
1171 TryReplaceWithDispatchTableCall(call);
1172 } else if (auto call = instr->AsPolymorphicInstanceCall()) {
1173 TryReplaceWithDispatchTableCall(call);
1174 }
1175 }
1176 current_iterator_ = nullptr;
1177 }
1178 if (flow_graph()->max_block_id() != max_block_id) {
1179 flow_graph()->DiscoverBlocks();
1180 }
1181}
1182
1183const Function& AotCallSpecializer::InterfaceTargetForTableDispatch(
1184 InstanceCallBaseInstr* call) {
1185 const Function& interface_target = call->interface_target();
1186 if (!interface_target.IsNull()) {
1187 return interface_target;
1188 }
1189
1190 // Dynamic call or tearoff.
1191 const Function& tearoff_interface_target = call->tearoff_interface_target();
1192 if (!tearoff_interface_target.IsNull()) {
1193 // Tearoff.
1194 return Function::ZoneHandle(
1195 Z, tearoff_interface_target.GetMethodExtractor(call->function_name()));
1196 }
1197
1198 // Dynamic call.
1199 return Function::null_function();
1200}
1201
1202void AotCallSpecializer::TryReplaceWithDispatchTableCall(
1203 InstanceCallBaseInstr* call) {
1204 const Function& interface_target = InterfaceTargetForTableDispatch(call);
1205 if (interface_target.IsNull()) {
1206 // Dynamic call.
1207 return;
1208 }
1209
1210 Value* receiver = call->ArgumentValueAt(call->FirstArgIndex());
1211 const compiler::TableSelector* selector =
1212 precompiler_->selector_map()->GetSelector(interface_target);
1213
1214 if (selector == nullptr) {
1215#if defined(DEBUG)
1216 if (!interface_target.IsDynamicallyOverridden()) {
1217 // Target functions were removed by tree shaking. This call is dead code,
1218 // or the receiver is always null.
1219 AddCheckNull(receiver->CopyWithType(Z), call->function_name(),
1220 DeoptId::kNone, call->env(), call);
1221 StopInstr* stop = new (Z) StopInstr("Dead instance call executed.");
1222 InsertBefore(call, stop, call->env(), FlowGraph::kEffect);
1223 }
1224#endif
1225 return;
1226 }
1227
1228 const bool receiver_can_be_smi =
1229 call->CanReceiverBeSmiBasedOnInterfaceTarget(Z);
1230 auto load_cid = new (Z) LoadClassIdInstr(receiver->CopyWithType(Z),
1231 kUnboxedUword, receiver_can_be_smi);
1232 InsertBefore(call, load_cid, call->env(), FlowGraph::kValue);
1233
1234 const auto& cls = Class::Handle(Z, interface_target.Owner());
1235 if (cls.has_dynamically_extendable_subtypes()) {
1236 ReplaceWithConditionalDispatchTableCall(call, load_cid, interface_target,
1237 selector);
1238 return;
1239 }
1240
1241 auto dispatch_table_call = DispatchTableCallInstr::FromCall(
1242 Z, call, new (Z) Value(load_cid), interface_target, selector);
1243 call->ReplaceWith(dispatch_table_call, current_iterator());
1244}
1245
1246static void InheritDeoptTargetIfNeeded(Zone* zone,
1247 Instruction* instr,
1248 Instruction* from) {
1249 if (from->env() != nullptr) {
1250 instr->InheritDeoptTarget(zone, from);
1251 }
1252}
1253
1254void AotCallSpecializer::ReplaceWithConditionalDispatchTableCall(
1255 InstanceCallBaseInstr* call,
1256 LoadClassIdInstr* load_cid,
1257 const Function& interface_target,
1258 const compiler::TableSelector* selector) {
1259 BlockEntryInstr* current_block = call->GetBlock();
1260 const bool has_uses = call->HasUses();
1261
1262 const intptr_t num_cids = isolate_group()->class_table()->NumCids();
1263 auto* compare = new (Z) TestRangeInstr(
1264 call->source(), new (Z) Value(load_cid), 0, num_cids, kUnboxedUword);
1265
1266 BranchInstr* branch = new (Z) BranchInstr(compare, DeoptId::kNone);
1267 InheritDeoptTargetIfNeeded(Z, branch, call);
1268
1269 TargetEntryInstr* true_target =
1270 new (Z) TargetEntryInstr(flow_graph()->allocate_block_id(),
1271 current_block->try_index(), DeoptId::kNone);
1272 InheritDeoptTargetIfNeeded(Z, true_target, call);
1273 *branch->true_successor_address() = true_target;
1274
1275 TargetEntryInstr* false_target =
1276 new (Z) TargetEntryInstr(flow_graph()->allocate_block_id(),
1277 current_block->try_index(), DeoptId::kNone);
1278 InheritDeoptTargetIfNeeded(Z, false_target, call);
1279 *branch->false_successor_address() = false_target;
1280
1281 JoinEntryInstr* join =
1282 new (Z) JoinEntryInstr(flow_graph()->allocate_block_id(),
1283 current_block->try_index(), DeoptId::kNone);
1284 InheritDeoptTargetIfNeeded(Z, join, call);
1285
1286 current_block->ReplaceAsPredecessorWith(join);
1287
1288 for (intptr_t i = 0, n = current_block->dominated_blocks().length(); i < n;
1289 ++i) {
1290 BlockEntryInstr* block = current_block->dominated_blocks()[i];
1291 join->AddDominatedBlock(block);
1292 }
1293 current_block->ClearDominatedBlocks();
1294 current_block->AddDominatedBlock(join);
1295 current_block->AddDominatedBlock(true_target);
1296 current_block->AddDominatedBlock(false_target);
1297
1298 PhiInstr* phi = nullptr;
1299 if (has_uses) {
1300 phi = new (Z) PhiInstr(join, 2);
1301 phi->mark_alive();
1302 flow_graph()->AllocateSSAIndex(phi);
1303 join->InsertPhi(phi);
1304 phi->UpdateType(*call->Type());
1305 phi->set_representation(call->representation());
1306 call->ReplaceUsesWith(phi);
1307 }
1308
1309 GotoInstr* true_goto = new (Z) GotoInstr(join, DeoptId::kNone);
1310 InheritDeoptTargetIfNeeded(Z, true_goto, call);
1311 true_target->LinkTo(true_goto);
1312 true_target->set_last_instruction(true_goto);
1313
1314 GotoInstr* false_goto = new (Z) GotoInstr(join, DeoptId::kNone);
1315 InheritDeoptTargetIfNeeded(Z, false_goto, call);
1316 false_target->LinkTo(false_goto);
1317 false_target->set_last_instruction(false_goto);
1318
1319 auto dispatch_table_call = DispatchTableCallInstr::FromCall(
1320 Z, call, new (Z) Value(load_cid), interface_target, selector);
1321 ASSERT(dispatch_table_call->representation() == call->representation());
1322 InsertBefore(true_goto, dispatch_table_call, call->env(),
1323 has_uses ? FlowGraph::kValue : FlowGraph::kEffect);
1324
1325 call->previous()->AppendInstruction(branch);
1326 call->set_previous(nullptr);
1327 join->LinkTo(call->next());
1328 call->set_next(nullptr);
1329 call->UnuseAllInputs(); // So it can be re-added to the graph.
1330 call->InsertBefore(false_goto);
1331 InheritDeoptTargetIfNeeded(Z, call, call); // Restore env use list.
1332
1333 if (has_uses) {
1334 phi->SetInputAt(0, new (Z) Value(dispatch_table_call));
1335 dispatch_table_call->AddInputUse(phi->InputAt(0));
1336 phi->SetInputAt(1, new (Z) Value(call));
1337 call->AddInputUse(phi->InputAt(1));
1338 }
1339}
1340
1341#endif // DART_PRECOMPILER
1342
1343} // namespace dart
static sk_sp< Effect > Create()
Definition: RefCntTest.cpp:117
static double magnitude(double a)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
#define IG
#define Z
#define UNREACHABLE()
Definition: assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
GLenum type
AotCallSpecializer(Precompiler *precompiler, FlowGraph *flow_graph, SpeculativeInliningPolicy *speculative_policy)
#define THR_Print(format,...)
Definition: log.h:20
#define ASSERT(E)
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
uint8_t value
GAsyncResult * result
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
int argument_count
Definition: fuchsia.cc:52
bool IsSmi(int64_t v)
Definition: runtime_api.cc:31
Definition: dart_vm.cc:33
static bool CanConvertInt64ToDouble()
Definition: flow_graph.cc:1934
constexpr int64_t kMinInt64
Definition: globals.h:485
static constexpr Representation kUnboxedUword
Definition: locations.h:171
static void RefineUseTypes(Definition *instr)
@ kIllegalCid
Definition: class_id.h:214
@ kDynamicCid
Definition: class_id.h:253
GrowableArray< Value * > InputsArray
Definition: il.h:901
UnorderedHashMap< FunctionsTraits > UniqueFunctionsMap
Definition: precompiler.h:430
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const intptr_t cid
const char *const function_name
def call(args)
Definition: dom.py:159
Definition: __init__.py:1
@ kNone
Definition: layer.h:53
DEF_SWITCHES_START aot vmservice shared library name
Definition: switches.h:32
#define FALL_THROUGH
Definition: globals.h:15
int compare(const void *untyped_lhs, const void *untyped_rhs)
Definition: skdiff.h:161
static SkString join(const CommandLineFlags::StringArray &)
Definition: skpbench.cpp:741