Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
aot_call_specializer.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6
7#include <utility>
8
9#include "vm/bit_vector.h"
17#include "vm/compiler/cha.h"
22#include "vm/cpu.h"
23#include "vm/dart_entry.h"
24#include "vm/exceptions.h"
25#include "vm/hash_map.h"
26#include "vm/object.h"
27#include "vm/object_store.h"
28#include "vm/parser.h"
29#include "vm/resolver.h"
30#include "vm/scopes.h"
31#include "vm/stack_frame.h"
32#include "vm/symbols.h"
33
34namespace dart {
35
37 max_exhaustive_polymorphic_checks,
38 5,
39 "If a call receiver is known to be of at most this many classes, "
40 "generate exhaustive class tests instead of a megamorphic call");
41
42// Quick access to the current isolate and zone.
43#define IG (isolate_group())
44#define Z (zone())
45
46#ifdef DART_PRECOMPILER
47
48// Returns named function that is a unique dynamic target, i.e.,
49// - the target is identified by its name alone, since it occurs only once.
50// - target's class has no subclasses, and neither is subclassed, i.e.,
51// the receiver type can be only the function's class.
52// Returns Function::null() if there is no unique dynamic target for
53// given 'fname'. 'fname' must be a symbol.
54static void GetUniqueDynamicTarget(IsolateGroup* isolate_group,
55 const String& fname,
56 Object* function) {
57 UniqueFunctionsMap functions_map(
58 isolate_group->object_store()->unique_dynamic_targets());
59 ASSERT(fname.IsSymbol());
60 *function = functions_map.GetOrNull(fname);
61 ASSERT(functions_map.Release().ptr() ==
62 isolate_group->object_store()->unique_dynamic_targets());
63}
64
66 Precompiler* precompiler,
67 FlowGraph* flow_graph,
68 SpeculativeInliningPolicy* speculative_policy)
69 : CallSpecializer(flow_graph,
70 speculative_policy,
71 /* should_clone_fields=*/false),
72 precompiler_(precompiler),
73 has_unique_no_such_method_(false) {
74 Function& target_function = Function::Handle();
75 if (isolate_group()->object_store()->unique_dynamic_targets() !=
76 Array::null()) {
77 GetUniqueDynamicTarget(isolate_group(), Symbols::NoSuchMethod(),
78 &target_function);
79 has_unique_no_such_method_ = !target_function.IsNull();
80 }
81}
82
83bool AotCallSpecializer::TryCreateICDataForUniqueTarget(
84 InstanceCallInstr* call) {
85 if (isolate_group()->object_store()->unique_dynamic_targets() ==
86 Array::null()) {
87 return false;
88 }
89
90 // Check if the target is unique.
91 Function& target_function = Function::Handle(Z);
92 GetUniqueDynamicTarget(isolate_group(), call->function_name(),
93 &target_function);
94
95 if (target_function.IsNull()) {
96 return false;
97 }
98
99 // Calls passing named arguments and calls to a function taking named
100 // arguments must be resolved/checked at runtime.
101 // Calls passing a type argument vector and calls to a generic function must
102 // be resolved/checked at runtime.
103 if (target_function.HasOptionalNamedParameters() ||
104 target_function.IsGeneric() ||
105 !target_function.AreValidArgumentCounts(
106 call->type_args_len(), call->ArgumentCountWithoutTypeArgs(),
107 call->argument_names().IsNull() ? 0 : call->argument_names().Length(),
108 /* error_message = */ nullptr)) {
109 return false;
110 }
111
112 const Class& cls = Class::Handle(Z, target_function.Owner());
113 intptr_t implementor_cid = kIllegalCid;
114 if (!CHA::HasSingleConcreteImplementation(cls, &implementor_cid)) {
115 return false;
116 }
117
118 call->SetTargets(
119 CallTargets::CreateMonomorphic(Z, implementor_cid, target_function));
120 ASSERT(call->Targets().IsMonomorphic());
121
122 // If we know that the only noSuchMethod is Object.noSuchMethod then
123 // this call is guaranteed to either succeed or throw.
124 if (has_unique_no_such_method_) {
125 call->set_has_unique_selector(true);
126
127 // Add redefinition of the receiver to prevent code motion across
128 // this call.
129 const intptr_t receiver_index = call->FirstArgIndex();
130 RedefinitionInstr* redefinition = new (Z)
131 RedefinitionInstr(new (Z) Value(call->ArgumentAt(receiver_index)));
132 flow_graph()->AllocateSSAIndex(redefinition);
133 redefinition->InsertAfter(call);
134 // Replace all uses of the receiver dominated by this call.
135 FlowGraph::RenameDominatedUses(call->ArgumentAt(receiver_index),
136 redefinition, redefinition);
137 if (!redefinition->HasUses()) {
138 redefinition->RemoveFromGraph();
139 }
140 }
141
142 return true;
143}
144
145bool AotCallSpecializer::TryCreateICData(InstanceCallInstr* call) {
146 if (TryCreateICDataForUniqueTarget(call)) {
147 return true;
148 }
149
150 return CallSpecializer::TryCreateICData(call);
151}
152
153bool AotCallSpecializer::RecognizeRuntimeTypeGetter(InstanceCallInstr* call) {
154 if ((precompiler_ == nullptr) ||
155 !precompiler_->get_runtime_type_is_unique()) {
156 return false;
157 }
158
159 if (call->function_name().ptr() != Symbols::GetRuntimeType().ptr()) {
160 return false;
161 }
162
163 // There is only a single function Object.get:runtimeType that can be invoked
164 // by this call. Convert dynamic invocation to a static one.
165 const Class& cls = Class::Handle(Z, IG->object_store()->object_class());
166 const Function& function =
167 Function::Handle(Z, call->ResolveForReceiverClass(cls));
168 ASSERT(!function.IsNull());
169 const Function& target = Function::ZoneHandle(Z, function.ptr());
170 StaticCallInstr* static_call =
171 StaticCallInstr::FromCall(Z, call, target, call->CallCount());
172 // Since the result is either a Type or a FunctionType, we cannot pin it.
173 call->ReplaceWith(static_call, current_iterator());
174 return true;
175}
176
177static bool IsGetRuntimeType(Definition* defn) {
178 StaticCallInstr* call = defn->AsStaticCall();
179 return (call != nullptr) && (call->function().recognized_kind() ==
180 MethodRecognizer::kObjectRuntimeType);
181}
182
183// Recognize a.runtimeType == b.runtimeType and fold it into
184// Object._haveSameRuntimeType(a, b).
185// Note: this optimization is not speculative.
186bool AotCallSpecializer::TryReplaceWithHaveSameRuntimeType(
187 TemplateDartCall<0>* call) {
188 ASSERT((call->IsInstanceCall() &&
189 (call->AsInstanceCall()->ic_data()->NumArgsTested() == 2)) ||
190 call->IsStaticCall());
191 ASSERT(call->type_args_len() == 0);
192 ASSERT(call->ArgumentCount() == 2);
193
194 Definition* left = call->ArgumentAt(0);
195 Definition* right = call->ArgumentAt(1);
196
197 if (IsGetRuntimeType(left) && left->input_use_list()->IsSingleUse() &&
198 IsGetRuntimeType(right) && right->input_use_list()->IsSingleUse()) {
199 const Class& cls = Class::Handle(Z, IG->object_store()->object_class());
200 const Function& have_same_runtime_type = Function::ZoneHandle(
201 Z,
202 cls.LookupStaticFunctionAllowPrivate(Symbols::HaveSameRuntimeType()));
203 ASSERT(!have_same_runtime_type.IsNull());
204
205 InputsArray args(Z, 2);
206 args.Add(left->ArgumentValueAt(0)->CopyWithType(Z));
207 args.Add(right->ArgumentValueAt(0)->CopyWithType(Z));
208 const intptr_t kTypeArgsLen = 0;
209 StaticCallInstr* static_call = new (Z)
210 StaticCallInstr(call->source(), have_same_runtime_type, kTypeArgsLen,
211 Object::null_array(), // argument_names
212 std::move(args), call->deopt_id(), call->CallCount(),
213 ICData::kOptimized);
214 static_call->SetResultType(Z, CompileType::FromCid(kBoolCid));
215 ReplaceCall(call, static_call);
216 // ReplaceCall moved environment from 'call' to 'static_call'.
217 // Update arguments of 'static_call' in the environment.
218 Environment* env = static_call->env();
219 env->ValueAt(env->Length() - 2)
220 ->BindToEnvironment(static_call->ArgumentAt(0));
221 env->ValueAt(env->Length() - 1)
222 ->BindToEnvironment(static_call->ArgumentAt(1));
223 return true;
224 }
225
226 return false;
227}
228
229bool AotCallSpecializer::TryInlineFieldAccess(InstanceCallInstr* call) {
230 const Token::Kind op_kind = call->token_kind();
231 if ((op_kind == Token::kGET) && TryInlineInstanceGetter(call)) {
232 return true;
233 }
234 if ((op_kind == Token::kSET) && TryInlineInstanceSetter(call)) {
235 return true;
236 }
237 return false;
238}
239
240bool AotCallSpecializer::TryInlineFieldAccess(StaticCallInstr* call) {
241 if (call->function().IsImplicitGetterFunction()) {
242 Field& field = Field::ZoneHandle(call->function().accessor_field());
243 if (field.is_late()) {
244 // TODO(dartbug.com/40447): Inline implicit getters for late fields.
245 return false;
246 }
247 if (should_clone_fields_) {
248 field = field.CloneFromOriginal();
249 }
250 InlineImplicitInstanceGetter(call, field);
251 return true;
252 }
253
254 return false;
255}
256
257bool AotCallSpecializer::IsSupportedIntOperandForStaticDoubleOp(
258 CompileType* operand_type) {
259 if (operand_type->IsNullableInt()) {
260 if (operand_type->ToNullableCid() == kSmiCid) {
261 return true;
262 }
263
264 if (FlowGraphCompiler::CanConvertInt64ToDouble()) {
265 return true;
266 }
267 }
268
269 return false;
270}
271
272Value* AotCallSpecializer::PrepareStaticOpInput(Value* input,
273 intptr_t cid,
274 Instruction* call) {
275 ASSERT((cid == kDoubleCid) || (cid == kMintCid));
276
277 if (input->Type()->is_nullable()) {
278 const String& function_name =
279 (call->IsInstanceCall()
280 ? call->AsInstanceCall()->function_name()
281 : String::ZoneHandle(Z, call->AsStaticCall()->function().name()));
282 AddCheckNull(input, function_name, call->deopt_id(), call->env(), call);
283 }
284
285 input = input->CopyWithType(Z);
286
287 if (cid == kDoubleCid && input->Type()->IsNullableInt()) {
288 Definition* conversion = nullptr;
289
290 if (input->Type()->ToNullableCid() == kSmiCid) {
291 conversion = new (Z) SmiToDoubleInstr(input, call->source());
292 } else if (FlowGraphCompiler::CanConvertInt64ToDouble()) {
293 conversion = new (Z) Int64ToDoubleInstr(input, DeoptId::kNone,
294 Instruction::kNotSpeculative);
295 } else {
296 UNREACHABLE();
297 }
298
299 if (FLAG_trace_strong_mode_types) {
300 THR_Print("[Strong mode] Inserted %s\n", conversion->ToCString());
301 }
302 InsertBefore(call, conversion, /* env = */ nullptr, FlowGraph::kValue);
303 return new (Z) Value(conversion);
304 }
305
306 return input;
307}
308
309CompileType AotCallSpecializer::BuildStrengthenedReceiverType(Value* input,
310 intptr_t cid) {
311 CompileType* old_type = input->Type();
312 CompileType* refined_type = old_type;
313
314 CompileType type = CompileType::None();
315 if (cid == kSmiCid) {
316 type = CompileType::NullableSmi();
317 refined_type = CompileType::ComputeRefinedType(old_type, &type);
318 } else if (cid == kMintCid) {
319 type = CompileType::NullableMint();
320 refined_type = CompileType::ComputeRefinedType(old_type, &type);
321 } else if (cid == kIntegerCid && !input->Type()->IsNullableInt()) {
322 type = CompileType::NullableInt();
323 refined_type = CompileType::ComputeRefinedType(old_type, &type);
324 } else if (cid == kDoubleCid && !input->Type()->IsNullableDouble()) {
325 type = CompileType::NullableDouble();
326 refined_type = CompileType::ComputeRefinedType(old_type, &type);
327 }
328
329 if (refined_type != old_type) {
330 return *refined_type;
331 }
332 return CompileType::None();
333}
334
335// After replacing a call with a specialized instruction, make sure to
336// update types at all uses, as specialized instruction can provide a more
337// specific type.
338static void RefineUseTypes(Definition* instr) {
339 CompileType* new_type = instr->Type();
340 for (Value::Iterator it(instr->input_use_list()); !it.Done(); it.Advance()) {
341 it.Current()->RefineReachingType(new_type);
342 }
343}
344
345bool AotCallSpecializer::TryOptimizeInstanceCallUsingStaticTypes(
346 InstanceCallInstr* instr) {
347 const Token::Kind op_kind = instr->token_kind();
348 return TryOptimizeIntegerOperation(instr, op_kind) ||
349 TryOptimizeDoubleOperation(instr, op_kind);
350}
351
352bool AotCallSpecializer::TryOptimizeStaticCallUsingStaticTypes(
353 StaticCallInstr* instr) {
354 const String& name = String::Handle(Z, instr->function().name());
355 const Token::Kind op_kind = MethodTokenRecognizer::RecognizeTokenKind(name);
356
357 if (op_kind == Token::kEQ && TryReplaceWithHaveSameRuntimeType(instr)) {
358 return true;
359 }
360
361 // We only specialize instance methods for int/double operations.
362 const auto& target = instr->function();
363 if (!target.IsDynamicFunction()) {
364 return false;
365 }
366
367 // For de-virtualized instance calls, we strengthen the type here manually
368 // because it might not be attached to the receiver.
369 // See http://dartbug.com/35179 for preserving the receiver type information.
370 const Class& owner = Class::Handle(Z, target.Owner());
371 const intptr_t cid = owner.id();
372 if (cid == kSmiCid || cid == kMintCid || cid == kIntegerCid ||
373 cid == kDoubleCid) {
374 // Sometimes TFA de-virtualizes instance calls to static calls. In such
375 // cases the VM might have a looser type on the receiver, so we explicitly
376 // tighten it (this is safe since it was proven that the receiver is either
377 // null or will end up with that target).
378 const intptr_t receiver_index = instr->FirstArgIndex();
379 const intptr_t argument_count = instr->ArgumentCountWithoutTypeArgs();
380 if (argument_count >= 1) {
381 auto receiver_value = instr->ArgumentValueAt(receiver_index);
382 auto receiver = receiver_value->definition();
383 auto type = BuildStrengthenedReceiverType(receiver_value, cid);
384 if (!type.IsNone()) {
385 auto redefinition =
386 flow_graph()->EnsureRedefinition(instr->previous(), receiver, type);
387 if (redefinition != nullptr) {
388 RefineUseTypes(redefinition);
389 }
390 }
391 }
392 }
393
394 return TryOptimizeIntegerOperation(instr, op_kind) ||
395 TryOptimizeDoubleOperation(instr, op_kind);
396}
397
398Definition* AotCallSpecializer::TryOptimizeDivisionOperation(
399 TemplateDartCall<0>* instr,
400 Token::Kind op_kind,
401 Value* left_value,
402 Value* right_value) {
403 auto unboxed_constant = [&](int64_t value) -> Definition* {
404 ASSERT(compiler::target::IsSmi(value));
405#if defined(TARGET_ARCH_IS_32_BIT)
406 Definition* const const_def = new (Z) UnboxedConstantInstr(
407 Smi::ZoneHandle(Z, Smi::New(value)), kUnboxedInt32);
408 InsertBefore(instr, const_def, /*env=*/nullptr, FlowGraph::kValue);
409 return new (Z) IntConverterInstr(kUnboxedInt32, kUnboxedInt64,
410 new (Z) Value(const_def), DeoptId::kNone);
411#else
412 return new (Z) UnboxedConstantInstr(Smi::ZoneHandle(Z, Smi::New(value)),
413 kUnboxedInt64);
414#endif
415 };
416
417 if (!right_value->BindsToConstant()) {
418 return nullptr;
419 }
420
421 const Object& rhs = right_value->BoundConstant();
422 const int64_t value = Integer::Cast(rhs).AsInt64Value(); // smi and mint
423
424 if (value == kMinInt64) {
425 return nullptr; // The absolute value can't be held in an int64_t.
426 }
427
428 const int64_t magnitude = Utils::Abs(value);
429 // The replacements for both operations assume that the magnitude of the
430 // value is a power of two and that the mask derived from the magnitude
431 // can fit in a Smi.
432 if (!Utils::IsPowerOfTwo(magnitude) ||
433 !compiler::target::IsSmi(magnitude - 1)) {
434 return nullptr;
435 }
436
437 if (op_kind == Token::kMOD) {
438 // Modulo against a constant power-of-two can be optimized into a mask.
439 // x % y -> x & (|y| - 1) for smi masks only
440 left_value = PrepareStaticOpInput(left_value, kMintCid, instr);
441
442 Definition* right_definition = unboxed_constant(magnitude - 1);
443 if (magnitude == 1) return right_definition;
444 InsertBefore(instr, right_definition, /*env=*/nullptr, FlowGraph::kValue);
445 right_value = new (Z) Value(right_definition);
446 return new (Z)
447 BinaryInt64OpInstr(Token::kBIT_AND, left_value, right_value,
448 DeoptId::kNone, Instruction::kNotSpeculative);
449 } else {
450 ASSERT_EQUAL(op_kind, Token::kTRUNCDIV);
451#if !defined(TARGET_ARCH_IS_32_BIT)
452 // If BinaryInt64Op(kTRUNCDIV, ...) is supported, then only perform the
453 // simplest replacements and use the instruction otherwise.
454 if (magnitude != 1) return nullptr;
455#endif
456
457 // If the divisor is negative, then we need to negate the final result.
458 const bool negate = value < 0;
459 Definition* result = nullptr;
460
461 left_value = PrepareStaticOpInput(left_value, kMintCid, instr);
462 if (magnitude > 1) {
463 // For two's complement signed arithmetic where the bit width is k
464 // and the divisor is 2^n for some n in [0, k), we can perform a simple
465 // shift if m is non-negative:
466 // m ~/ 2^n => m >> n
467 // For negative m, however, this won't work since just shifting m rounds
468 // towards negative infinity. Instead, we add (2^n - 1) first before
469 // shifting, which rounds the result towards positive infinity
470 // (and thus rounding towards zero, since m is negative):
471 // m ~/ 2^n => (m + (2^n - 1)) >> n
472 // By sign extending the sign bit (the (k-1)-bit) and using that as a
473 // mask, we get a non-branching computation that only adds (2^n - 1)
474 // when m is negative, rounding towards zero in both cases:
475 // m ~/ 2^n => (m + ((m >> (k - 1)) & (2^n - 1))) >> n
476 auto* const sign_bit_position = unboxed_constant(63);
477 InsertBefore(instr, sign_bit_position, /*env=*/nullptr,
478 FlowGraph::kValue);
479 auto* const sign_bit_extended = new (Z)
480 ShiftInt64OpInstr(Token::kSHR, left_value,
481 new (Z) Value(sign_bit_position), DeoptId::kNone);
482 InsertBefore(instr, sign_bit_extended, /*env=*/nullptr,
483 FlowGraph::kValue);
484 auto* rounding_adjustment = unboxed_constant(magnitude - 1);
485 InsertBefore(instr, rounding_adjustment, /*env=*/nullptr,
486 FlowGraph::kValue);
487 rounding_adjustment = new (Z)
488 BinaryInt64OpInstr(Token::kBIT_AND, new (Z) Value(sign_bit_extended),
489 new (Z) Value(rounding_adjustment), DeoptId::kNone,
490 Instruction::kNotSpeculative);
491 InsertBefore(instr, rounding_adjustment, /*env=*/nullptr,
492 FlowGraph::kValue);
493 auto* const left_definition = new (Z)
494 BinaryInt64OpInstr(Token::kADD, left_value->CopyWithType(Z),
495 new (Z) Value(rounding_adjustment), DeoptId::kNone,
496 Instruction::kNotSpeculative);
497 InsertBefore(instr, left_definition, /*env=*/nullptr, FlowGraph::kValue);
498 left_value = new (Z) Value(left_definition);
499 auto* const right_definition =
500 unboxed_constant(Utils::ShiftForPowerOfTwo(magnitude));
501 InsertBefore(instr, right_definition, /*env=*/nullptr, FlowGraph::kValue);
502 right_value = new (Z) Value(right_definition);
503 result = new (Z) ShiftInt64OpInstr(Token::kSHR, left_value, right_value,
504 DeoptId::kNone);
505 } else {
507 // No division needed, just redefine the value.
508 result = new (Z) RedefinitionInstr(left_value);
509 }
510 if (negate) {
511 InsertBefore(instr, result, /*env=*/nullptr, FlowGraph::kValue);
512 result = new (Z) UnaryInt64OpInstr(Token::kNEGATE, new (Z) Value(result),
513 DeoptId::kNone);
514 }
515 return result;
516 }
517}
518
519bool AotCallSpecializer::TryOptimizeIntegerOperation(TemplateDartCall<0>* instr,
520 Token::Kind op_kind) {
521 if (instr->type_args_len() != 0) {
522 // Arithmetic operations don't have type arguments.
523 return false;
524 }
525
526 Definition* replacement = nullptr;
527 if (instr->ArgumentCount() == 2) {
528 Value* left_value = instr->ArgumentValueAt(0);
529 Value* right_value = instr->ArgumentValueAt(1);
530 CompileType* left_type = left_value->Type();
531 CompileType* right_type = right_value->Type();
532
533 bool has_nullable_int_args =
534 left_type->IsNullableInt() && right_type->IsNullableInt();
535
536 if (auto* call = instr->AsInstanceCall()) {
537 if (!call->CanReceiverBeSmiBasedOnInterfaceTarget(zone())) {
538 has_nullable_int_args = false;
539 }
540 }
541
542 // We only support binary operations if both operands are nullable integers
543 // or when we can use a cheap strict comparison operation.
544 if (!has_nullable_int_args) {
545 return false;
546 }
547
548 switch (op_kind) {
549 case Token::kEQ:
550 case Token::kNE: {
551 const bool either_can_be_null =
552 left_type->is_nullable() || right_type->is_nullable();
553 replacement = new (Z) EqualityCompareInstr(
554 instr->source(), op_kind, left_value->CopyWithType(Z),
555 right_value->CopyWithType(Z), kMintCid, DeoptId::kNone,
556 /*null_aware=*/either_can_be_null, Instruction::kNotSpeculative);
557 break;
558 }
559 case Token::kLT:
560 case Token::kLTE:
561 case Token::kGT:
562 case Token::kGTE:
563 left_value = PrepareStaticOpInput(left_value, kMintCid, instr);
564 right_value = PrepareStaticOpInput(right_value, kMintCid, instr);
565 replacement = new (Z) RelationalOpInstr(
566 instr->source(), op_kind, left_value, right_value, kMintCid,
567 DeoptId::kNone, Instruction::kNotSpeculative);
568 break;
569 case Token::kMOD:
570 case Token::kTRUNCDIV:
571 replacement = TryOptimizeDivisionOperation(instr, op_kind, left_value,
572 right_value);
573 if (replacement != nullptr) break;
574#if defined(TARGET_ARCH_IS_32_BIT)
575 // Truncating 64-bit division and modulus via BinaryInt64OpInstr are
576 // not implemented on 32-bit architectures, so we can only optimize
577 // certain cases and otherwise must leave the call in.
578 break;
579#else
581#endif
582 case Token::kSHL:
584 case Token::kSHR:
586 case Token::kUSHR:
588 case Token::kBIT_OR:
590 case Token::kBIT_XOR:
592 case Token::kBIT_AND:
594 case Token::kADD:
596 case Token::kSUB:
598 case Token::kMUL: {
599 if (op_kind == Token::kSHL || op_kind == Token::kSHR ||
600 op_kind == Token::kUSHR) {
601 left_value = PrepareStaticOpInput(left_value, kMintCid, instr);
602 right_value = PrepareStaticOpInput(right_value, kMintCid, instr);
603 replacement = new (Z) ShiftInt64OpInstr(op_kind, left_value,
604 right_value, DeoptId::kNone);
605 } else {
606 left_value = PrepareStaticOpInput(left_value, kMintCid, instr);
607 right_value = PrepareStaticOpInput(right_value, kMintCid, instr);
608 replacement = new (Z)
609 BinaryInt64OpInstr(op_kind, left_value, right_value,
610 DeoptId::kNone, Instruction::kNotSpeculative);
611 }
612 break;
613 }
614
615 default:
616 break;
617 }
618 } else if (instr->ArgumentCount() == 1) {
619 Value* left_value = instr->ArgumentValueAt(0);
620 CompileType* left_type = left_value->Type();
621
622 // We only support unary operations on nullable integers.
623 if (!left_type->IsNullableInt()) {
624 return false;
625 }
626
627 if (op_kind == Token::kNEGATE || op_kind == Token::kBIT_NOT) {
628 left_value = PrepareStaticOpInput(left_value, kMintCid, instr);
629 replacement = new (Z) UnaryInt64OpInstr(
630 op_kind, left_value, DeoptId::kNone, Instruction::kNotSpeculative);
631 }
632 }
633
634 if (replacement != nullptr && !replacement->ComputeCanDeoptimize()) {
635 if (FLAG_trace_strong_mode_types) {
636 THR_Print("[Strong mode] Optimization: replacing %s with %s\n",
637 instr->ToCString(), replacement->ToCString());
638 }
639 ReplaceCall(instr, replacement);
640 RefineUseTypes(replacement);
641 return true;
642 }
643
644 return false;
645}
646
647bool AotCallSpecializer::TryOptimizeDoubleOperation(TemplateDartCall<0>* instr,
648 Token::Kind op_kind) {
649 if (instr->type_args_len() != 0) {
650 // Arithmetic operations don't have type arguments.
651 return false;
652 }
653
654 if (!FlowGraphCompiler::SupportsUnboxedDoubles()) {
655 return false;
656 }
657
658 Definition* replacement = nullptr;
659
660 if (instr->ArgumentCount() == 2) {
661 Value* left_value = instr->ArgumentValueAt(0);
662 Value* right_value = instr->ArgumentValueAt(1);
663 CompileType* left_type = left_value->Type();
664 CompileType* right_type = right_value->Type();
665
666 if (!left_type->IsNullableDouble() &&
667 !IsSupportedIntOperandForStaticDoubleOp(left_type)) {
668 return false;
669 }
670 if (!right_type->IsNullableDouble() &&
671 !IsSupportedIntOperandForStaticDoubleOp(right_type)) {
672 return false;
673 }
674
675 switch (op_kind) {
676 case Token::kEQ:
678 case Token::kNE: {
679 // TODO(dartbug.com/32166): Support EQ, NE for nullable doubles.
680 // (requires null-aware comparison instruction).
681 if (!left_type->is_nullable() && !right_type->is_nullable()) {
682 left_value = PrepareStaticOpInput(left_value, kDoubleCid, instr);
683 right_value = PrepareStaticOpInput(right_value, kDoubleCid, instr);
684 replacement = new (Z) EqualityCompareInstr(
685 instr->source(), op_kind, left_value, right_value, kDoubleCid,
686 DeoptId::kNone, /*null_aware=*/false,
687 Instruction::kNotSpeculative);
688 break;
689 }
690 break;
691 }
692 case Token::kLT:
694 case Token::kLTE:
696 case Token::kGT:
698 case Token::kGTE: {
699 left_value = PrepareStaticOpInput(left_value, kDoubleCid, instr);
700 right_value = PrepareStaticOpInput(right_value, kDoubleCid, instr);
701 replacement = new (Z) RelationalOpInstr(
702 instr->source(), op_kind, left_value, right_value, kDoubleCid,
703 DeoptId::kNone, Instruction::kNotSpeculative);
704 break;
705 }
706 case Token::kADD:
708 case Token::kSUB:
710 case Token::kMUL:
712 case Token::kDIV: {
713 left_value = PrepareStaticOpInput(left_value, kDoubleCid, instr);
714 right_value = PrepareStaticOpInput(right_value, kDoubleCid, instr);
715 replacement = new (Z) BinaryDoubleOpInstr(
716 op_kind, left_value, right_value, DeoptId::kNone, instr->source(),
717 Instruction::kNotSpeculative);
718 break;
719 }
720
721 case Token::kBIT_OR:
723 case Token::kBIT_XOR:
725 case Token::kBIT_AND:
727 case Token::kMOD:
729 case Token::kTRUNCDIV:
731 default:
732 break;
733 }
734 } else if (instr->ArgumentCount() == 1) {
735 Value* left_value = instr->ArgumentValueAt(0);
736 CompileType* left_type = left_value->Type();
737
738 // We only support unary operations on nullable doubles.
739 if (!left_type->IsNullableDouble()) {
740 return false;
741 }
742
743 if (op_kind == Token::kNEGATE) {
744 left_value = PrepareStaticOpInput(left_value, kDoubleCid, instr);
745 replacement = new (Z)
746 UnaryDoubleOpInstr(Token::kNEGATE, left_value, instr->deopt_id(),
747 Instruction::kNotSpeculative);
748 }
749 }
750
751 if (replacement != nullptr && !replacement->ComputeCanDeoptimize()) {
752 if (FLAG_trace_strong_mode_types) {
753 THR_Print("[Strong mode] Optimization: replacing %s with %s\n",
754 instr->ToCString(), replacement->ToCString());
755 }
756 ReplaceCall(instr, replacement);
757 RefineUseTypes(replacement);
758 return true;
759 }
760
761 return false;
762}
763
764// Tries to optimize instance call by replacing it with a faster instruction
765// (e.g, binary op, field load, ..).
766// TODO(dartbug.com/30635) Evaluate how much this can be shared with
767// JitCallSpecializer.
768void AotCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
769 // Type test is special as it always gets converted into inlined code.
770 const Token::Kind op_kind = instr->token_kind();
771 if (Token::IsTypeTestOperator(op_kind)) {
772 ReplaceWithInstanceOf(instr);
773 return;
774 }
775
776 if (TryInlineFieldAccess(instr)) {
777 return;
778 }
779
780 if (RecognizeRuntimeTypeGetter(instr)) {
781 return;
782 }
783
784 if ((op_kind == Token::kEQ) && TryReplaceWithHaveSameRuntimeType(instr)) {
785 return;
786 }
787
788 const CallTargets& targets = instr->Targets();
789 const intptr_t receiver_idx = instr->FirstArgIndex();
790
791 if (TryOptimizeInstanceCallUsingStaticTypes(instr)) {
792 return;
793 }
794
795 bool has_one_target = targets.HasSingleTarget();
796 if (has_one_target) {
797 // Check if the single target is a polymorphic target, if it is,
798 // we don't have one target.
799 const Function& target = targets.FirstTarget();
800 has_one_target = !target.is_polymorphic_target();
801 }
802
803 if (has_one_target) {
804 const Function& target = targets.FirstTarget();
805 UntaggedFunction::Kind function_kind = target.kind();
806 if (flow_graph()->CheckForInstanceCall(instr, function_kind) ==
807 FlowGraph::ToCheck::kNoCheck) {
808 StaticCallInstr* call = StaticCallInstr::FromCall(
809 Z, instr, target, targets.AggregateCallCount());
810 instr->ReplaceWith(call, current_iterator());
811 return;
812 }
813 }
814
815 // No IC data checks. Try resolve target using the propagated cid.
816 const intptr_t receiver_cid =
817 instr->ArgumentValueAt(receiver_idx)->Type()->ToCid();
818 if (receiver_cid != kDynamicCid && receiver_cid != kSentinelCid) {
819 const Class& receiver_class =
820 Class::Handle(Z, isolate_group()->class_table()->At(receiver_cid));
821 const Function& function =
822 Function::Handle(Z, instr->ResolveForReceiverClass(receiver_class));
823 if (!function.IsNull()) {
824 const Function& target = Function::ZoneHandle(Z, function.ptr());
825 StaticCallInstr* call =
826 StaticCallInstr::FromCall(Z, instr, target, instr->CallCount());
827 instr->ReplaceWith(call, current_iterator());
828 return;
829 }
830 }
831
832 // Check for x == y, where x has type T?, there are no subtypes of T, and
833 // T does not override ==. Replace with StrictCompare.
834 if (instr->token_kind() == Token::kEQ || instr->token_kind() == Token::kNE) {
835 GrowableArray<intptr_t> class_ids(6);
836 if (instr->ArgumentValueAt(receiver_idx)->Type()->Specialize(&class_ids)) {
837 bool is_object_eq = true;
838 for (intptr_t i = 0; i < class_ids.length(); i++) {
839 const intptr_t cid = class_ids[i];
840 // Skip sentinel cid. It may appear in the unreachable code after
841 // inlining a method which doesn't return.
842 if (cid == kSentinelCid) continue;
843 const Class& cls =
844 Class::Handle(Z, isolate_group()->class_table()->At(cid));
845 const Function& target =
846 Function::Handle(Z, instr->ResolveForReceiverClass(cls));
847 if (target.recognized_kind() != MethodRecognizer::kObjectEquals) {
848 is_object_eq = false;
849 break;
850 }
851 }
852 if (is_object_eq) {
853 auto* replacement = new (Z) StrictCompareInstr(
854 instr->source(),
855 (instr->token_kind() == Token::kEQ) ? Token::kEQ_STRICT
856 : Token::kNE_STRICT,
857 instr->ArgumentValueAt(0)->CopyWithType(Z),
858 instr->ArgumentValueAt(1)->CopyWithType(Z),
859 /*needs_number_check=*/false, DeoptId::kNone);
860 ReplaceCall(instr, replacement);
861 RefineUseTypes(replacement);
862 return;
863 }
864 }
865 }
866
867 Definition* callee_receiver = instr->ArgumentAt(receiver_idx);
868 const Function& function = flow_graph()->function();
869 Class& receiver_class = Class::Handle(Z);
870
871 if (function.IsDynamicFunction() &&
872 flow_graph()->IsReceiver(callee_receiver)) {
873 // Call receiver is method receiver.
874 receiver_class = function.Owner();
875 } else {
876 // Check if we have an non-nullable compile type for the receiver.
877 CompileType* type = instr->ArgumentAt(receiver_idx)->Type();
878 if (type->ToAbstractType()->IsType() &&
879 !type->ToAbstractType()->IsDynamicType() && !type->is_nullable()) {
880 receiver_class = type->ToAbstractType()->type_class();
881 if (receiver_class.is_implemented()) {
882 receiver_class = Class::null();
883 }
884 }
885 }
886 if (!receiver_class.IsNull()) {
887 GrowableArray<intptr_t> class_ids(6);
888 if (thread()->compiler_state().cha().ConcreteSubclasses(receiver_class,
889 &class_ids)) {
890 // First check if all subclasses end up calling the same method.
891 // If this is the case we will replace instance call with a direct
892 // static call.
893 // Otherwise we will try to create ICData that contains all possible
894 // targets with appropriate checks.
895 Function& single_target = Function::Handle(Z);
896 ICData& ic_data = ICData::Handle(Z);
897 const Array& args_desc_array =
898 Array::Handle(Z, instr->GetArgumentsDescriptor());
899 Function& target = Function::Handle(Z);
900 Class& cls = Class::Handle(Z);
901 for (intptr_t i = 0; i < class_ids.length(); i++) {
902 const intptr_t cid = class_ids[i];
903 cls = isolate_group()->class_table()->At(cid);
904 target = instr->ResolveForReceiverClass(cls);
905 ASSERT(target.IsNull() || !target.IsInvokeFieldDispatcher());
906 if (target.IsNull()) {
907 single_target = Function::null();
908 ic_data = ICData::null();
909 break;
910 } else if (ic_data.IsNull()) {
911 // First we are trying to compute a single target for all subclasses.
912 if (single_target.IsNull()) {
913 ASSERT(i == 0);
914 single_target = target.ptr();
915 continue;
916 } else if (single_target.ptr() == target.ptr()) {
917 continue;
918 }
919
920 // The call does not resolve to a single target within the hierarchy.
921 // If we have too many subclasses abort the optimization.
922 if (class_ids.length() > FLAG_max_exhaustive_polymorphic_checks) {
923 single_target = Function::null();
924 break;
925 }
926
927 // Create an ICData and map all previously seen classes (< i) to
928 // the computed single_target.
929 ic_data = ICData::New(function, instr->function_name(),
930 args_desc_array, DeoptId::kNone,
931 /* args_tested = */ 1, ICData::kOptimized);
932 for (intptr_t j = 0; j < i; j++) {
933 ic_data.AddReceiverCheck(class_ids[j], single_target);
934 }
935
936 single_target = Function::null();
937 }
938
939 ASSERT(ic_data.ptr() != ICData::null());
940 ASSERT(single_target.ptr() == Function::null());
941 ic_data.AddReceiverCheck(cid, target);
942 }
943
944 if (single_target.ptr() != Function::null()) {
945 // If this is a getter or setter invocation try inlining it right away
946 // instead of replacing it with a static call.
947 if ((op_kind == Token::kGET) || (op_kind == Token::kSET)) {
948 // Create fake IC data with the resolved target.
949 const ICData& ic_data = ICData::Handle(
950 ICData::New(flow_graph()->function(), instr->function_name(),
951 args_desc_array, DeoptId::kNone,
952 /* args_tested = */ 1, ICData::kOptimized));
953 cls = single_target.Owner();
954 ic_data.AddReceiverCheck(cls.id(), single_target);
955 instr->set_ic_data(&ic_data);
956
957 if (TryInlineFieldAccess(instr)) {
958 return;
959 }
960 }
961
962 // We have computed that there is only a single target for this call
963 // within the whole hierarchy. Replace InstanceCall with StaticCall.
964 const Function& target = Function::ZoneHandle(Z, single_target.ptr());
965 StaticCallInstr* call =
966 StaticCallInstr::FromCall(Z, instr, target, instr->CallCount());
967 instr->ReplaceWith(call, current_iterator());
968 return;
969 } else if ((ic_data.ptr() != ICData::null()) &&
970 !ic_data.NumberOfChecksIs(0)) {
971 const CallTargets* targets = CallTargets::Create(Z, ic_data);
972 ASSERT(!targets->is_empty());
973 PolymorphicInstanceCallInstr* call =
974 PolymorphicInstanceCallInstr::FromCall(Z, instr, *targets,
975 /* complete = */ true);
976 instr->ReplaceWith(call, current_iterator());
977 return;
978 }
979 }
980
981 // Detect if o.m(...) is a call through a getter and expand it
982 // into o.get:m().call(...).
983 if (TryExpandCallThroughGetter(receiver_class, instr)) {
984 return;
985 }
986 }
987
988 // More than one target. Generate generic polymorphic call without
989 // deoptimization.
990 if (targets.length() > 0) {
991 ASSERT(!FLAG_polymorphic_with_deopt);
992 // OK to use checks with PolymorphicInstanceCallInstr since no
993 // deoptimization is allowed.
994 PolymorphicInstanceCallInstr* call =
995 PolymorphicInstanceCallInstr::FromCall(Z, instr, targets,
996 /* complete = */ false);
997 instr->ReplaceWith(call, current_iterator());
998 return;
999 }
1000}
1001
1002void AotCallSpecializer::VisitStaticCall(StaticCallInstr* instr) {
1003 if (TryInlineFieldAccess(instr)) {
1004 return;
1005 }
1006 CallSpecializer::VisitStaticCall(instr);
1007}
1008
1009bool AotCallSpecializer::TryExpandCallThroughGetter(const Class& receiver_class,
1010 InstanceCallInstr* call) {
1011 // If it's an accessor call it can't be a call through getter.
1012 if (call->token_kind() == Token::kGET || call->token_kind() == Token::kSET) {
1013 return false;
1014 }
1015
1016 // Ignore callsites like f.call() for now. Those need to be handled
1017 // specially if f is a closure.
1018 if (call->function_name().ptr() == Symbols::call().ptr()) {
1019 return false;
1020 }
1021
1022 Function& target = Function::Handle(Z);
1023
1024 const String& getter_name =
1025 String::ZoneHandle(Z, Symbols::FromGet(thread(), call->function_name()));
1026
1027 const Array& args_desc_array = Array::Handle(
1028 Z,
1029 ArgumentsDescriptor::NewBoxed(/*type_args_len=*/0, /*num_arguments=*/1));
1030 ArgumentsDescriptor args_desc(args_desc_array);
1031 target = Resolver::ResolveDynamicForReceiverClass(
1032 receiver_class, getter_name, args_desc, /*allow_add=*/false);
1033 if (target.ptr() == Function::null() || target.IsMethodExtractor()) {
1034 return false;
1035 }
1036
1037 // We found a getter with the same name as the method this
1038 // call tries to invoke. This implies call through getter
1039 // because methods can't override getters. Build
1040 // o.get:m().call(...) sequence and replace o.m(...) invocation.
1041
1042 const intptr_t receiver_idx = call->type_args_len() > 0 ? 1 : 0;
1043
1044 InputsArray get_arguments(Z, 1);
1045 get_arguments.Add(call->ArgumentValueAt(receiver_idx)->CopyWithType(Z));
1046 InstanceCallInstr* invoke_get = new (Z) InstanceCallInstr(
1047 call->source(), getter_name, Token::kGET, std::move(get_arguments),
1048 /*type_args_len=*/0,
1049 /*argument_names=*/Object::empty_array(),
1050 /*checked_argument_count=*/1,
1051 thread()->compiler_state().GetNextDeoptId());
1052
1053 // Arguments to the .call() are the same as arguments to the
1054 // original call (including type arguments), but receiver
1055 // is replaced with the result of the get.
1056 InputsArray call_arguments(Z, call->ArgumentCount());
1057 if (call->type_args_len() > 0) {
1058 call_arguments.Add(call->ArgumentValueAt(0)->CopyWithType(Z));
1059 }
1060 call_arguments.Add(new (Z) Value(invoke_get));
1061 for (intptr_t i = receiver_idx + 1; i < call->ArgumentCount(); i++) {
1062 call_arguments.Add(call->ArgumentValueAt(i)->CopyWithType(Z));
1063 }
1064
1065 InstanceCallInstr* invoke_call = new (Z) InstanceCallInstr(
1066 call->source(), Symbols::call(), Token::kILLEGAL,
1067 std::move(call_arguments), call->type_args_len(), call->argument_names(),
1068 /*checked_argument_count=*/1,
1069 thread()->compiler_state().GetNextDeoptId());
1070
1071 // Create environment and insert 'invoke_get'.
1072 Environment* get_env =
1073 call->env()->DeepCopy(Z, call->env()->Length() - call->ArgumentCount());
1074 for (intptr_t i = 0, n = invoke_get->ArgumentCount(); i < n; i++) {
1075 get_env->PushValue(new (Z) Value(invoke_get->ArgumentAt(i)));
1076 }
1077 InsertBefore(call, invoke_get, get_env, FlowGraph::kValue);
1078
1079 // Replace original call with .call(...) invocation.
1080 call->ReplaceWith(invoke_call, current_iterator());
1081
1082 // ReplaceWith moved environment from 'call' to 'invoke_call'.
1083 // Update receiver argument in the environment.
1084 Environment* invoke_env = invoke_call->env();
1085 invoke_env
1086 ->ValueAt(invoke_env->Length() - invoke_call->ArgumentCount() +
1087 receiver_idx)
1088 ->BindToEnvironment(invoke_get);
1089
1090 // AOT compiler expects all calls to have an ICData.
1091 invoke_get->EnsureICData(flow_graph());
1092 invoke_call->EnsureICData(flow_graph());
1093
1094 // Specialize newly inserted calls.
1095 TryCreateICData(invoke_get);
1096 VisitInstanceCall(invoke_get);
1097 TryCreateICData(invoke_call);
1098 VisitInstanceCall(invoke_call);
1099
1100 // Success.
1101 return true;
1102}
1103
1104void AotCallSpecializer::VisitPolymorphicInstanceCall(
1105 PolymorphicInstanceCallInstr* call) {
1106 const intptr_t receiver_idx = call->type_args_len() > 0 ? 1 : 0;
1107 const intptr_t receiver_cid =
1108 call->ArgumentValueAt(receiver_idx)->Type()->ToCid();
1109 if (receiver_cid != kDynamicCid && receiver_cid != kSentinelCid) {
1110 const Class& receiver_class =
1111 Class::Handle(Z, isolate_group()->class_table()->At(receiver_cid));
1112 const Function& function =
1113 Function::ZoneHandle(Z, call->ResolveForReceiverClass(receiver_class));
1114 if (!function.IsNull()) {
1115 // Only one target. Replace by static call.
1116 StaticCallInstr* new_call =
1117 StaticCallInstr::FromCall(Z, call, function, call->CallCount());
1118 call->ReplaceWith(new_call, current_iterator());
1119 }
1120 }
1121}
1122
1123bool AotCallSpecializer::TryReplaceInstanceOfWithRangeCheck(
1124 InstanceCallInstr* call,
1125 const AbstractType& type) {
1126 HierarchyInfo* hi = thread()->hierarchy_info();
1127 if (hi == nullptr) {
1128 return false;
1129 }
1130
1131 intptr_t lower_limit, upper_limit;
1132 if (!hi->InstanceOfHasClassRange(type, &lower_limit, &upper_limit)) {
1133 return false;
1134 }
1135
1136 Definition* left = call->ArgumentAt(0);
1137 LoadClassIdInstr* load_cid =
1138 new (Z) LoadClassIdInstr(new (Z) Value(left), kUnboxedUword);
1139 InsertBefore(call, load_cid, nullptr, FlowGraph::kValue);
1140
1141 ComparisonInstr* check_range;
1142 if (lower_limit == upper_limit) {
1143 ConstantInstr* cid_constant = flow_graph()->GetConstant(
1144 Smi::Handle(Z, Smi::New(lower_limit)), kUnboxedUword);
1145 check_range = new (Z) EqualityCompareInstr(
1146 call->source(), Token::kEQ, new Value(load_cid),
1147 new Value(cid_constant), kIntegerCid, DeoptId::kNone, false,
1148 Instruction::kNotSpeculative);
1149 } else {
1150 check_range =
1151 new (Z) TestRangeInstr(call->source(), new (Z) Value(load_cid),
1152 lower_limit, upper_limit, kUnboxedUword);
1153 }
1154 ReplaceCall(call, check_range);
1155
1156 return true;
1157}
1158
1159void AotCallSpecializer::ReplaceInstanceCallsWithDispatchTableCalls() {
1160 ASSERT(current_iterator_ == nullptr);
1161 for (BlockIterator block_it = flow_graph()->reverse_postorder_iterator();
1162 !block_it.Done(); block_it.Advance()) {
1163 ForwardInstructionIterator it(block_it.Current());
1164 current_iterator_ = &it;
1165 for (; !it.Done(); it.Advance()) {
1166 Instruction* instr = it.Current();
1167 if (auto call = instr->AsInstanceCall()) {
1168 TryReplaceWithDispatchTableCall(call);
1169 } else if (auto call = instr->AsPolymorphicInstanceCall()) {
1170 TryReplaceWithDispatchTableCall(call);
1171 }
1172 }
1173 current_iterator_ = nullptr;
1174 }
1175}
1176
1177const Function& AotCallSpecializer::InterfaceTargetForTableDispatch(
1178 InstanceCallBaseInstr* call) {
1179 const Function& interface_target = call->interface_target();
1180 if (!interface_target.IsNull()) {
1181 return interface_target;
1182 }
1183
1184 // Dynamic call or tearoff.
1185 const Function& tearoff_interface_target = call->tearoff_interface_target();
1186 if (!tearoff_interface_target.IsNull()) {
1187 // Tearoff.
1188 return Function::ZoneHandle(
1189 Z, tearoff_interface_target.GetMethodExtractor(call->function_name()));
1190 }
1191
1192 // Dynamic call.
1193 return Function::null_function();
1194}
1195
1196void AotCallSpecializer::TryReplaceWithDispatchTableCall(
1197 InstanceCallBaseInstr* call) {
1198 const Function& interface_target = InterfaceTargetForTableDispatch(call);
1199 if (interface_target.IsNull()) {
1200 // Dynamic call.
1201 return;
1202 }
1203
1204 Value* receiver = call->ArgumentValueAt(call->FirstArgIndex());
1205 const compiler::TableSelector* selector =
1206 precompiler_->selector_map()->GetSelector(interface_target);
1207
1208 if (selector == nullptr) {
1209 // Target functions were removed by tree shaking. This call is dead code,
1210 // or the receiver is always null.
1211#if defined(DEBUG)
1212 AddCheckNull(receiver->CopyWithType(Z), call->function_name(),
1213 DeoptId::kNone, call->env(), call);
1214 StopInstr* stop = new (Z) StopInstr("Dead instance call executed.");
1215 InsertBefore(call, stop, call->env(), FlowGraph::kEffect);
1216#endif
1217 return;
1218 }
1219
1220 const bool receiver_can_be_smi =
1221 call->CanReceiverBeSmiBasedOnInterfaceTarget(zone());
1222 auto load_cid = new (Z) LoadClassIdInstr(receiver->CopyWithType(Z),
1223 kUnboxedUword, receiver_can_be_smi);
1224 InsertBefore(call, load_cid, call->env(), FlowGraph::kValue);
1225
1226 auto dispatch_table_call = DispatchTableCallInstr::FromCall(
1227 Z, call, new (Z) Value(load_cid), interface_target, selector);
1228 call->ReplaceWith(dispatch_table_call, current_iterator());
1229}
1230
1231#endif // DART_PRECOMPILER
1232
1233} // namespace dart
static double magnitude(double a)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
#define IG
#define Z
#define UNREACHABLE()
Definition assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
AotCallSpecializer(Precompiler *precompiler, FlowGraph *flow_graph, SpeculativeInliningPolicy *speculative_policy)
#define THR_Print(format,...)
Definition log.h:20
#define ASSERT(E)
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
uint8_t value
GAsyncResult * result
uint32_t * target
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
Dart_NativeFunction function
Definition fuchsia.cc:51
const char * name
Definition fuchsia.cc:50
int argument_count
Definition fuchsia.cc:52
@ kIllegalCid
Definition class_id.h:214
GrowableArray< Value * > InputsArray
Definition il.h:895
UnorderedHashMap< FunctionsTraits > UniqueFunctionsMap
const intptr_t cid
const char *const function_name
call(args)
Definition dom.py:159
Definition __init__.py:1
#define FALL_THROUGH
Definition globals.h:15