Flutter Engine
The Flutter Engine
flow_graph_compiler_ia32.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32.
6#if defined(TARGET_ARCH_IA32)
7
9
10#include "vm/code_patcher.h"
17#include "vm/cpu.h"
18#include "vm/dart_entry.h"
20#include "vm/instructions.h"
21#include "vm/object_store.h"
22#include "vm/parser.h"
23#include "vm/stack_frame.h"
24#include "vm/stub_code.h"
25#include "vm/symbols.h"
26
27namespace dart {
28
29DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
30
31DECLARE_FLAG(bool, enable_simd_inline);
32
34
36 // BlockInfos are zone-allocated, so their destructors are not called.
37 // Verify the labels explicitly here.
38 for (int i = 0; i < block_info_.length(); ++i) {
39 ASSERT(!block_info_[i]->jump_label()->IsLinked());
40 ASSERT(!block_info_[i]->jump_label()->HasNear());
41 }
42}
43
45 return FLAG_enable_simd_inline;
46}
47
49 return true;
50}
51
54 intrinsic_mode_ = true;
55}
56
59 intrinsic_mode_ = false;
60}
61
62TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
63 DeoptInfoBuilder* builder,
64 const Array& deopt_table) {
65 if (deopt_env_ == nullptr) {
66 ++builder->current_info_number_;
67 return TypedData::null();
68 }
69
70 AllocateOutgoingArguments(deopt_env_);
71
72 intptr_t slot_ix = 0;
73 Environment* current = deopt_env_;
74
75 // Emit all kMaterializeObject instructions describing objects to be
76 // materialized on the deoptimization as a prefix to the deoptimization info.
77 EmitMaterializations(deopt_env_, builder);
78
79 // The real frame starts here.
80 builder->MarkFrameStart();
81
82 Zone* zone = compiler->zone();
83
84 builder->AddPcMarker(Function::ZoneHandle(zone), slot_ix++);
85 builder->AddCallerFp(slot_ix++);
86 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++);
87
88 // Emit all values that are needed for materialization as a part of the
89 // expression stack for the bottom-most frame. This guarantees that GC
90 // will be able to find them during materialization.
91 slot_ix = builder->EmitMaterializationArguments(slot_ix);
92
93 // For the innermost environment, set outgoing arguments and the locals.
94 for (intptr_t i = current->Length() - 1;
95 i >= current->fixed_parameter_count(); i--) {
96 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
97 }
98
99 builder->AddPcMarker(current->function(), slot_ix++);
100 builder->AddCallerFp(slot_ix++);
101
102 Environment* previous = current;
103 current = current->outer();
104 while (current != nullptr) {
105 // For any outer environment the deopt id is that of the call instruction
106 // which is recorded in the outer environment.
107 builder->AddReturnAddress(current->function(),
108 DeoptId::ToDeoptAfter(current->GetDeoptId()),
109 slot_ix++);
110
111 // The values of outgoing arguments can be changed from the inlined call so
112 // we must read them from the previous environment.
113 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
114 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
115 slot_ix++);
116 }
117
118 // Set the locals, note that outgoing arguments are not in the environment.
119 for (intptr_t i = current->Length() - 1;
120 i >= current->fixed_parameter_count(); i--) {
121 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
122 }
123
124 builder->AddPcMarker(current->function(), slot_ix++);
125 builder->AddCallerFp(slot_ix++);
126
127 // Iterate on the outer environment.
128 previous = current;
129 current = current->outer();
130 }
131 // The previous pointer is now the outermost environment.
132 ASSERT(previous != nullptr);
133
134 // For the outermost environment, set caller PC.
135 builder->AddCallerPc(slot_ix++);
136
137 // For the outermost environment, set the incoming arguments.
138 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
139 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
140 }
141
142 return builder->CreateDeoptInfo(deopt_table);
143}
144
146 intptr_t stub_ix) {
147 // Calls do not need stubs, they share a deoptimization trampoline.
148 ASSERT(reason() != ICData::kDeoptAtCall);
149 compiler::Assembler* assembler = compiler->assembler();
150#define __ assembler->
151 __ Comment("%s", Name());
153 if (FLAG_trap_on_deoptimization) {
154 __ int3();
155 }
156
157 ASSERT(deopt_env() != nullptr);
158 __ pushl(CODE_REG);
159 __ Call(StubCode::Deoptimize());
160 set_pc_offset(assembler->CodeSize());
161 __ int3();
162#undef __
163}
164
165#define __ assembler()->
166
167// Fall through if bool_register contains null.
168void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
169 compiler::Label* is_true,
170 compiler::Label* is_false) {
171 const compiler::Immediate& raw_null =
172 compiler::Immediate(static_cast<intptr_t>(Object::null()));
173 compiler::Label fall_through;
174 __ cmpl(bool_register, raw_null);
175 __ j(EQUAL, &fall_through, compiler::Assembler::kNearJump);
176 BranchLabels labels = {is_true, is_false, &fall_through};
177 Condition true_condition =
178 EmitBoolTest(bool_register, labels, /*invert=*/false);
179 ASSERT(true_condition != kInvalidCondition);
180 __ j(true_condition, is_true);
181 __ jmp(is_false);
182 __ Bind(&fall_through);
183}
184
185// Input registers (from TypeTestABI):
186// - kInstanceReg: instance.
187// - kDstTypeReg: destination type (for test_kind == kTestTypeSevenArg).
188// - kInstantiatorTypeArgumentsReg: instantiator type arguments
189// (for test_kind >= kTestTypeThreeArg).
190// - kFunctionTypeArgumentsReg: function type arguments
191// (for test_kind >= kTestTypeFourArg).
192//
193// Only preserves kInstanceReg from TypeTestABI, all other TypeTestABI
194// registers may be used and thus must be saved by the caller.
195SubtypeTestCachePtr FlowGraphCompiler::GenerateCallSubtypeTestStub(
196 TypeTestStubKind test_kind,
197 compiler::Label* is_instance_lbl,
198 compiler::Label* is_not_instance_lbl) {
199 const intptr_t num_inputs = UsedInputsForTTSKind(test_kind);
200 const SubtypeTestCache& type_test_cache =
202 const auto& stub_entry =
204 __ LoadObject(TypeTestABI::kSubtypeTestCacheReg, type_test_cache);
207 // Registers for unused inputs may not have GC-safe values to push, so push
208 // the null object if the input is unused instead.
209 if (num_inputs >= 7) {
211 } else {
212 __ PushObject(Object::null_object());
213 }
214 if (num_inputs >= 3) {
216 } else {
217 __ PushObject(Object::null_object());
218 }
219 if (num_inputs >= 4) {
221 } else {
222 __ PushObject(Object::null_object());
223 }
224 __ Call(stub_entry);
225 // Restore all but kSubtypeTestCacheReg (since it is the same as
226 // kSubtypeTestCacheResultReg). Since the generated code is documented as
227 // clobbering all on-kInstanceReg TypeTestABI registers, it's okay to pop
228 // null into the registers that didn't have guaranteed GC-safe values prior.
229 static_assert(TypeTestABI::kSubtypeTestCacheReg ==
231 "Code assumes cache and result register are the same");
235 __ popl(TypeTestABI::kInstanceReg); // Restore receiver.
236 __ Drop(1);
237 GenerateBoolToJump(TypeTestABI::kSubtypeTestCacheResultReg, is_instance_lbl,
238 is_not_instance_lbl);
239 return type_test_cache.ptr();
240}
241
242// Optimize assignable type check by adding inlined tests for:
243// - null -> return null.
244// - Smi -> compile time subtype check (only if dst class is not parameterized).
245// - Class equality (only if class is not parameterized).
246// Inputs:
247// - EAX: object.
248// - EBX: destination type (if non-constant).
249// - EDX: instantiator type arguments or raw_null.
250// - ECX: function type arguments or raw_null.
251// Returns:
252// - object in EAX for successful assignable check (or throws TypeError).
253// Performance notes: positive checks must be quick, negative checks can be slow
254// as they throw an exception.
256 CompileType* receiver_type,
257 const InstructionSource& source,
258 intptr_t deopt_id,
259 Environment* env,
260 const String& dst_name,
261 LocationSummary* locs) {
262 ASSERT(!source.token_pos.IsClassifying());
264
265 const auto& dst_type =
266 locs->in(AssertAssignableInstr::kDstTypePos).IsConstant()
267 ? AbstractType::Cast(
268 locs->in(AssertAssignableInstr::kDstTypePos).constant())
269 : Object::null_abstract_type();
270
271 if (!dst_type.IsNull()) {
272 ASSERT(dst_type.IsFinalized());
273 if (dst_type.IsTopTypeForSubtyping()) return; // No code needed.
274 }
275
276 compiler::Label is_assignable, runtime_call;
278 if (dst_type.IsNull()) {
279 __ Comment("AssertAssignable for runtime type");
280 // kDstTypeReg should already contain the destination type.
282 StubCode::TypeIsTopTypeForSubtyping(),
283 UntaggedPcDescriptors::kOther, locs);
284 // TypeTestABI::kSubtypeTestCacheReg is 0 if the type is a top type.
285 __ BranchIfZero(TypeTestABI::kSubtypeTestCacheReg, &is_assignable,
287
288 GenerateNonLazyDeoptableStubCall(source, StubCode::NullIsAssignableToType(),
289 UntaggedPcDescriptors::kOther, locs);
290 // TypeTestABI::kSubtypeTestCacheReg is 0 if the object is null and is
291 // assignable.
292 __ BranchIfZero(TypeTestABI::kSubtypeTestCacheReg, &is_assignable,
294
295 // Use the full-arg version of the cache.
296 test_cache = GenerateCallSubtypeTestStub(kTestTypeMaxArgs, &is_assignable,
297 &runtime_call);
298 } else {
299 __ Comment("AssertAssignable for compile-time type");
300
301 if (Instance::NullIsAssignableTo(dst_type)) {
302 __ CompareObject(TypeTestABI::kInstanceReg, Object::null_object());
303 __ BranchIf(EQUAL, &is_assignable);
304 }
305
306 // Generate inline type check, linking to runtime call if not assignable.
307 test_cache = GenerateInlineInstanceof(source, dst_type, &is_assignable,
308 &runtime_call);
309 }
310
311 __ Bind(&runtime_call);
312
313 // We push the inputs of [AssertAssignable] in the same order as they lie on
314 // the stack in unoptimized code.
315 // That will make the deopt environment we emit as metadata correct and
316 // doesn't need pruning (as in other architectures).
317
318 static_assert(AssertAssignableInstr::kNumInputs == 4,
319 "Expected AssertAssignable to have 4 inputs");
320
321 __ PushRegister(TypeTestABI::kInstanceReg);
322 if (!dst_type.IsNull()) {
323 __ PushObject(dst_type);
324 } else {
325 __ PushRegister(TypeTestABI::kDstTypeReg);
326 }
329
330 // Pass destination name and subtype test reg as register arguments.
331 __ LoadObject(AssertAssignableStubABI::kDstNameReg, dst_name);
333
334 GenerateStubCall(source, StubCode::AssertAssignable(),
335 UntaggedPcDescriptors::kOther, locs, deopt_id, env);
336
338 __ PopRegister(TypeTestABI::kInstanceReg);
339
340 __ Bind(&is_assignable);
341}
342
343// NOTE: If the entry code shape changes, ReturnAddressLocator in profiler.cc
344// needs to be updated to match.
345void FlowGraphCompiler::EmitFrameEntry() {
346 RELEASE_ASSERT(flow_graph().graph_entry()->NeedsFrame());
347
348 const Function& function = parsed_function().function();
350 (!is_optimizing() || may_reoptimize())) {
351 __ Comment("Invocation Count Check");
352 const Register function_reg = EBX;
353 __ LoadObject(function_reg, function);
354
355 // Reoptimization of an optimized function is triggered by counting in
356 // IC stubs, but not at the entry of the function.
357 if (!is_optimizing()) {
358 __ incl(compiler::FieldAddress(function_reg,
359 Function::usage_counter_offset()));
360 }
361 __ cmpl(
362 compiler::FieldAddress(function_reg, Function::usage_counter_offset()),
363 compiler::Immediate(GetOptimizationThreshold()));
364 ASSERT(function_reg == EBX);
365 compiler::Label dont_optimize;
366 __ j(LESS, &dont_optimize, compiler::Assembler::kNearJump);
367 __ jmp(compiler::Address(THR, Thread::optimize_entry_offset()));
368 __ Bind(&dont_optimize);
369 }
370 __ Comment("Enter frame");
371 if (flow_graph().IsCompiledForOsr()) {
372 intptr_t extra_slots = ExtraStackSlotsOnOsrEntry();
373 ASSERT(extra_slots >= 0);
374 __ EnterOsrFrame(extra_slots * kWordSize);
375 } else {
376 ASSERT(StackSize() >= 0);
377 __ EnterDartFrame(StackSize() * kWordSize);
378 }
379}
380
381const InstructionSource& PrologueSource() {
382 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
383 /*inlining_id=*/0);
384 return prologue_source;
385}
386
388 BeginCodeSourceRange(PrologueSource());
389
390 EmitFrameEntry();
391
392 // In unoptimized code, initialize (non-argument) stack allocated slots.
393 if (!is_optimizing()) {
394 const int num_locals = parsed_function().num_stack_locals();
395
396 intptr_t args_desc_slot = -1;
397 if (parsed_function().has_arg_desc_var()) {
399 parsed_function().arg_desc_var());
400 }
401
402 __ Comment("Initialize spill slots");
403 if (num_locals > 1 || (num_locals == 1 && args_desc_slot == -1)) {
404 const compiler::Immediate& raw_null =
405 compiler::Immediate(static_cast<intptr_t>(Object::null()));
406 __ movl(EAX, raw_null);
407 }
408 for (intptr_t i = 0; i < num_locals; ++i) {
409 const intptr_t slot_index =
411 Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : EAX;
412 __ movl(compiler::Address(EBP, slot_index * kWordSize), value_reg);
413 }
414 } else if (parsed_function().suspend_state_var() != nullptr &&
415 !flow_graph().IsCompiledForOsr()) {
416 // Initialize synthetic :suspend_state variable early
417 // as it may be accessed by GC and exception handling before
418 // InitSuspendableFunction stub is called.
419 const intptr_t slot_index =
421 parsed_function().suspend_state_var());
422 __ LoadObject(EAX, Object::null_object());
423 __ movl(compiler::Address(EBP, slot_index * kWordSize), EAX);
424 }
425
426 EndCodeSourceRange(PrologueSource());
427}
428
430 const Code& stub,
431 ObjectPool::SnapshotBehavior snapshot_behavior) {
432 if (stub.InVMIsolateHeap()) {
433 __ CallVmStub(stub);
434 } else {
435 // Ignore snapshot_behavior, ia32 doesn't do snapshots.
436 __ Call(stub);
437 }
438 AddStubCallTarget(stub);
439}
440
441void FlowGraphCompiler::EmitJumpToStub(const Code& stub) {
442 ASSERT(!stub.IsNull());
443 __ LoadObject(CODE_REG, stub);
444 __ jmp(compiler::FieldAddress(CODE_REG,
446 AddStubCallTarget(stub);
447}
448
449void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
450 const InstructionSource& source,
451 const Code& stub,
453 LocationSummary* locs,
454 Code::EntryKind entry_kind) {
455 ASSERT(CanCallDart());
456 __ Call(stub, /*moveable_target=*/false, entry_kind);
457 EmitCallsiteMetadata(source, deopt_id, kind, locs,
458 pending_deoptimization_env_);
459}
460
461void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
462 const InstructionSource& source,
464 LocationSummary* locs,
465 const Function& target,
466 Code::EntryKind entry_kind) {
467 ASSERT(CanCallDart());
468 const auto& stub = StubCode::CallStaticFunction();
469 __ Call(stub, /*movable_target=*/true, entry_kind);
470 EmitCallsiteMetadata(source, deopt_id, kind, locs,
471 pending_deoptimization_env_);
472 AddStaticCallTarget(target, entry_kind);
473}
474
475void FlowGraphCompiler::EmitUnoptimizedStaticCall(
476 intptr_t size_with_type_args,
477 intptr_t deopt_id,
478 const InstructionSource& source,
479 LocationSummary* locs,
480 const ICData& ic_data,
481 Code::EntryKind entry_kind) {
482 ASSERT(CanCallDart());
483 const Code& stub =
484 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
485 __ LoadObject(ECX, ic_data);
486 GenerateDartCall(deopt_id, source, stub,
487 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
488 EmitDropArguments(size_with_type_args);
489}
490
491void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
492 // We do not check for overflow when incrementing the edge counter. The
493 // function should normally be optimized long before the counter can
494 // overflow; and though we do not reset the counters when we optimize or
495 // deoptimize, there is a bound on the number of
496 // optimization/deoptimization cycles we will attempt.
497 ASSERT(!edge_counters_array_.IsNull());
498 __ Comment("Edge counter");
499 __ LoadObject(EAX, edge_counters_array_);
500 __ IncrementSmiField(
501 compiler::FieldAddress(EAX, Array::element_offset(edge_id)), 1);
502}
503
505 const Code& stub,
506 const ICData& ic_data,
507 intptr_t deopt_id,
508 const InstructionSource& source,
509 LocationSummary* locs,
510 Code::EntryKind entry_kind) {
511 ASSERT(CanCallDart());
512 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
513 // Each ICData propagated from unoptimized to optimized code contains the
514 // function that corresponds to the Dart function of that IC call. Due
515 // to inlining in optimized code, that function may not correspond to the
516 // top-level function (parsed_function().function()) which could be
517 // reoptimized and which counter needs to be incremented.
518 // Pass the function explicitly, it is used in IC stub.
519 __ LoadObject(EAX, parsed_function().function());
520 // Load receiver into EBX.
521 __ movl(EBX, compiler::Address(
522 ESP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
523 __ LoadObject(IC_DATA_REG, ic_data);
524 GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
525 entry_kind);
526 EmitDropArguments(ic_data.SizeWithTypeArgs());
527}
528
529void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
530 const ICData& ic_data,
531 intptr_t deopt_id,
532 const InstructionSource& source,
533 LocationSummary* locs,
534 Code::EntryKind entry_kind) {
535 ASSERT(CanCallDart());
536 ASSERT(entry_kind == Code::EntryKind::kNormal ||
537 entry_kind == Code::EntryKind::kUnchecked);
538 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
539 // Load receiver into EBX.
540 __ movl(EBX, compiler::Address(
541 ESP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
542 __ LoadObject(IC_DATA_REG, ic_data, true);
543 __ LoadObject(CODE_REG, stub, true);
544 const intptr_t entry_point_offset =
545 entry_kind == Code::EntryKind::kNormal
546 ? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
547 : Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
548 __ call(compiler::FieldAddress(CODE_REG, entry_point_offset));
549 EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kIcCall, locs,
550 pending_deoptimization_env_);
551 EmitDropArguments(ic_data.SizeWithTypeArgs());
552}
553
555 const String& name,
556 const Array& arguments_descriptor,
557 intptr_t deopt_id,
558 const InstructionSource& source,
559 LocationSummary* locs) {
560 ASSERT(CanCallDart());
561 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
562 ASSERT(!FLAG_precompiled_mode);
563 const ArgumentsDescriptor args_desc(arguments_descriptor);
564 const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
565 zone(),
566 MegamorphicCacheTable::Lookup(thread(), name, arguments_descriptor));
567
568 __ Comment("MegamorphicCall");
569 // Load receiver into EBX.
570 __ movl(EBX, compiler::Address(ESP, (args_desc.Count() - 1) * kWordSize));
571 __ LoadObject(IC_DATA_REG, cache, true);
572 __ LoadObject(CODE_REG, StubCode::MegamorphicCall(), true);
573 __ call(compiler::FieldAddress(
574 CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
575
576 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
577 RecordSafepoint(locs);
578 const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
579 // Precompilation not implemented on ia32 platform.
580 ASSERT(!FLAG_precompiled_mode);
581 if (is_optimizing()) {
582 AddDeoptIndexAtCall(deopt_id_after, pending_deoptimization_env_);
583 } else {
584 // Add deoptimization continuation point after the call and before the
585 // arguments are removed.
586 AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after, source);
587 }
588 RecordCatchEntryMoves(pending_deoptimization_env_);
589 EmitDropArguments(args_desc.SizeWithTypeArgs());
590}
591
592void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
593 intptr_t deopt_id,
594 const InstructionSource& source,
595 LocationSummary* locs,
596 Code::EntryKind entry_kind,
597 bool receiver_can_be_smi) {
598 // Only generated with precompilation.
599 UNREACHABLE();
600}
601
603 const Function& function,
604 const Array& arguments_descriptor,
605 intptr_t size_with_type_args,
606 intptr_t deopt_id,
607 const InstructionSource& source,
608 LocationSummary* locs,
609 Code::EntryKind entry_kind) {
610 ASSERT(CanCallDart());
612 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
613 } else {
614 __ xorl(ARGS_DESC_REG, ARGS_DESC_REG); // GC safe smi zero because of stub.
615 }
616 // Do not use the code from the function, but let the code be patched so that
617 // we can record the outgoing edges to other code.
618 GenerateStaticDartCall(deopt_id, source, UntaggedPcDescriptors::kOther, locs,
619 function, entry_kind);
620 EmitDropArguments(size_with_type_args);
621}
622
624 int32_t selector_offset,
625 const Array& arguments_descriptor) {
626 // Only generated with precompilation.
627 UNREACHABLE();
628}
629
631 Register reg,
632 const Object& obj,
633 bool needs_number_check,
634 const InstructionSource& source,
635 intptr_t deopt_id) {
636 ASSERT(!needs_number_check || (!obj.IsMint() && !obj.IsDouble()));
637
638 if (obj.IsSmi() && (Smi::Cast(obj).Value() == 0)) {
639 ASSERT(!needs_number_check);
640 __ testl(reg, reg);
641 return EQUAL;
642 }
643
644 if (needs_number_check) {
645 __ pushl(reg);
646 __ PushObject(obj);
647 if (is_optimizing()) {
648 __ Call(StubCode::OptimizedIdenticalWithNumberCheck());
649 } else {
650 __ Call(StubCode::UnoptimizedIdenticalWithNumberCheck());
651 }
652 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
653 // Stub returns result in flags (result of a cmpl, we need ZF computed).
654 __ popl(reg); // Discard constant.
655 __ popl(reg); // Restore 'reg'.
656 } else {
657 __ CompareObject(reg, obj);
658 }
659 return EQUAL;
660}
661
663 Register left,
664 Register right,
665 bool needs_number_check,
666 const InstructionSource& source,
667 intptr_t deopt_id) {
668 if (needs_number_check) {
669 __ pushl(left);
670 __ pushl(right);
671 if (is_optimizing()) {
672 __ Call(StubCode::OptimizedIdenticalWithNumberCheck());
673 } else {
674 __ Call(StubCode::UnoptimizedIdenticalWithNumberCheck());
675 }
676 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
677 // Stub returns result in flags (result of a cmpl, we need ZF computed).
678 __ popl(right);
679 __ popl(left);
680 } else {
681 __ cmpl(left, right);
682 }
683 return EQUAL;
684}
685
687 BranchLabels labels,
688 bool invert) {
689 __ Comment("BoolTest");
690 __ testl(value, compiler::Immediate(
692 return invert ? NOT_EQUAL : EQUAL;
693}
694
695// This function must be in sync with FlowGraphCompiler::RecordSafepoint and
696// FlowGraphCompiler::SlowPathEnvironmentFor.
697void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
698#if defined(DEBUG)
699 locs->CheckWritableInputs();
700 ClobberDeadTempRegisters(locs);
701#endif
702
703 // TODO(vegorov): consider saving only caller save (volatile) registers.
704 const intptr_t xmm_regs_count = locs->live_registers()->FpuRegisterCount();
705 if (xmm_regs_count > 0) {
706 __ subl(ESP, compiler::Immediate(xmm_regs_count * kFpuRegisterSize));
707 // Store XMM registers with the lowest register number at the lowest
708 // address.
709 intptr_t offset = 0;
710 for (intptr_t i = 0; i < kNumberOfXmmRegisters; ++i) {
711 XmmRegister xmm_reg = static_cast<XmmRegister>(i);
712 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
713 __ movups(compiler::Address(ESP, offset), xmm_reg);
715 }
716 }
717 ASSERT(offset == (xmm_regs_count * kFpuRegisterSize));
718 }
719
720 // The order in which the registers are pushed must match the order
721 // in which the registers are encoded in the safe point's stack map.
722 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
723 Register reg = static_cast<Register>(i);
724 if (locs->live_registers()->ContainsRegister(reg)) {
725 __ pushl(reg);
726 }
727 }
728}
729
730void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
731 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
732 Register reg = static_cast<Register>(i);
733 if (locs->live_registers()->ContainsRegister(reg)) {
734 __ popl(reg);
735 }
736 }
737
738 const intptr_t xmm_regs_count = locs->live_registers()->FpuRegisterCount();
739 if (xmm_regs_count > 0) {
740 // XMM registers have the lowest register number at the lowest address.
741 intptr_t offset = 0;
742 for (intptr_t i = 0; i < kNumberOfXmmRegisters; ++i) {
743 XmmRegister xmm_reg = static_cast<XmmRegister>(i);
744 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
745 __ movups(xmm_reg, compiler::Address(ESP, offset));
747 }
748 }
749 ASSERT(offset == (xmm_regs_count * kFpuRegisterSize));
750 __ addl(ESP, compiler::Immediate(offset));
751 }
752}
753
754#if defined(DEBUG)
755void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
756 // Clobber temporaries that have not been manually preserved.
757 for (intptr_t i = 0; i < locs->temp_count(); ++i) {
758 Location tmp = locs->temp(i);
759 // TODO(zerny): clobber non-live temporary FPU registers.
760 if (tmp.IsRegister() &&
761 !locs->live_registers()->ContainsRegister(tmp.reg())) {
762 __ movl(tmp.reg(), compiler::Immediate(0xf7));
763 }
764 }
765}
766#endif
767
768Register FlowGraphCompiler::EmitTestCidRegister() {
769 return EDI;
770}
771
772void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
773 intptr_t count_without_type_args,
774 const Array& arguments_descriptor) {
775 __ Comment("EmitTestAndCall");
776 // Load receiver into EAX.
777 __ movl(EAX,
778 compiler::Address(ESP, (count_without_type_args - 1) * kWordSize));
779 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
780}
781
782void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
783 bool if_smi) {
784 __ testl(EAX, compiler::Immediate(kSmiTagMask));
785 // Jump if receiver is (not) Smi.
786 __ j(if_smi ? ZERO : NOT_ZERO, label);
787}
788
789void FlowGraphCompiler::EmitTestAndCallLoadCid(Register class_id_reg) {
790 ASSERT(class_id_reg != EAX);
791 __ LoadClassId(class_id_reg, EAX);
792}
793
796 TemporaryRegisterAllocator* tmp) {
797 if (destination.Equals(source)) return;
798
799 if (source.IsRegister()) {
800 if (destination.IsRegister()) {
801 __ movl(destination.reg(), source.reg());
802 } else {
803 ASSERT(destination.IsStackSlot());
804 __ movl(LocationToStackSlotAddress(destination), source.reg());
805 }
806 } else if (source.IsStackSlot()) {
807 if (destination.IsRegister()) {
808 __ movl(destination.reg(), LocationToStackSlotAddress(source));
809 } else if (destination.IsFpuRegister()) {
810 // 32-bit float
811 __ movss(destination.fpu_reg(), LocationToStackSlotAddress(source));
812 } else {
813 ASSERT(destination.IsStackSlot());
814 Register scratch = tmp->AllocateTemporary();
815 __ MoveMemoryToMemory(LocationToStackSlotAddress(destination),
817 tmp->ReleaseTemporary();
818 }
819 } else if (source.IsFpuRegister()) {
820 if (destination.IsFpuRegister()) {
821 // Optimization manual recommends using MOVAPS for register
822 // to register moves.
823 __ movaps(destination.fpu_reg(), source.fpu_reg());
824 } else {
825 if (destination.IsDoubleStackSlot()) {
826 __ movsd(LocationToStackSlotAddress(destination), source.fpu_reg());
827 } else if (destination.IsStackSlot()) {
828 // 32-bit float
829 __ movss(LocationToStackSlotAddress(destination), source.fpu_reg());
830 } else {
831 ASSERT(destination.IsQuadStackSlot());
832 __ movups(LocationToStackSlotAddress(destination), source.fpu_reg());
833 }
834 }
835 } else if (source.IsDoubleStackSlot()) {
836 if (destination.IsFpuRegister()) {
837 __ movsd(destination.fpu_reg(), LocationToStackSlotAddress(source));
838 } else if (destination.IsStackSlot()) {
839 // Source holds a 32-bit float, take only the lower 32-bits
841 __ movss(LocationToStackSlotAddress(destination), FpuTMP);
842 } else {
843 ASSERT(destination.IsDoubleStackSlot());
845 __ movsd(LocationToStackSlotAddress(destination), FpuTMP);
846 }
847 } else if (source.IsQuadStackSlot()) {
848 if (destination.IsFpuRegister()) {
849 __ movups(destination.fpu_reg(), LocationToStackSlotAddress(source));
850 } else {
851 ASSERT(destination.IsQuadStackSlot());
853 __ movups(LocationToStackSlotAddress(destination), FpuTMP);
854 }
855 } else if (source.IsPairLocation()) {
856 ASSERT(destination.IsPairLocation());
857 for (intptr_t i : {0, 1}) {
858 EmitMove(destination.Component(i), source.Component(i), tmp);
859 }
860 } else {
861 ASSERT(source.IsConstant());
862 source.constant_instruction()->EmitMoveToLocation(
863 this, destination, kNoRegister, source.pair_index());
864 }
865}
866
867void FlowGraphCompiler::EmitNativeMoveArchitecture(
868 const compiler::ffi::NativeLocation& destination,
869 const compiler::ffi::NativeLocation& source) {
870 const auto& src_type = source.payload_type();
871 const auto& dst_type = destination.payload_type();
872 ASSERT(src_type.IsFloat() == dst_type.IsFloat());
873 ASSERT(src_type.IsInt() == dst_type.IsInt());
874 ASSERT(src_type.IsSigned() == dst_type.IsSigned());
875 ASSERT(src_type.IsPrimitive());
876 ASSERT(dst_type.IsPrimitive());
877 const intptr_t src_size = src_type.SizeInBytes();
878 const intptr_t dst_size = dst_type.SizeInBytes();
879 const bool sign_or_zero_extend = dst_size > src_size;
880
881 if (source.IsRegisters()) {
882 const auto& src = source.AsRegisters();
883 ASSERT(src.num_regs() == 1);
884 ASSERT(src_size <= 4);
885 const auto src_reg = src.reg_at(0);
886
887 if (destination.IsRegisters()) {
888 const auto& dst = destination.AsRegisters();
889 ASSERT(dst.num_regs() == 1);
890 const auto dst_reg = dst.reg_at(0);
891 ASSERT(destination.container_type().SizeInBytes() <= 4);
892 if (!sign_or_zero_extend) {
893 __ MoveRegister(dst_reg, src_reg);
894 } else {
895 switch (src_type.AsPrimitive().representation()) {
896 case compiler::ffi::kInt8: // Sign extend operand.
897 __ ExtendValue(dst_reg, src_reg, compiler::kByte);
898 return;
900 __ ExtendValue(dst_reg, src_reg, compiler::kTwoBytes);
901 return;
903 __ MoveRegister(dst_reg, src_reg);
904 __ shll(dst_reg, compiler::Immediate(8));
905 __ sarl(dst_reg, compiler::Immediate(8));
906 return;
907 case compiler::ffi::kUint8: // Zero extend operand.
908 __ ExtendValue(dst_reg, src_reg, compiler::kUnsignedByte);
909 return;
911 __ ExtendValue(dst_reg, src_reg, compiler::kUnsignedTwoBytes);
912 return;
914 __ MoveRegister(dst_reg, src_reg);
915 __ shll(dst_reg, compiler::Immediate(8));
916 __ shrl(dst_reg, compiler::Immediate(8));
917 return;
918 default:
919 // 32 to 64 bit is covered in IL by Representation conversions.
921 }
922 }
923
924 } else if (destination.IsFpuRegisters()) {
925 // Fpu Registers should only contain doubles and registers only ints.
927
928 } else {
929 ASSERT(destination.IsStack());
930 ASSERT(!sign_or_zero_extend);
931 const auto& dst = destination.AsStack();
932 const auto dst_addr = NativeLocationToStackSlotAddress(dst);
933 switch (destination.container_type().SizeInBytes()) {
934 case 4:
935 __ movl(dst_addr, src_reg);
936 return;
937 case 2:
938 __ movw(dst_addr, src_reg);
939 return;
940 case 1:
941 __ movb(dst_addr, ByteRegisterOf(src_reg));
942 return;
943 default:
944 UNREACHABLE();
945 }
946 }
947
948 } else if (source.IsFpuRegisters()) {
949 const auto& src = source.AsFpuRegisters();
950 // We have not implemented conversions here, use IL convert instructions.
951 ASSERT(src_type.Equals(dst_type));
952
953 if (destination.IsRegisters()) {
954 // Fpu Registers should only contain doubles and registers only ints.
956
957 } else if (destination.IsFpuRegisters()) {
958 const auto& dst = destination.AsFpuRegisters();
959 // Optimization manual recommends using MOVAPS for register
960 // to register moves.
961 __ movaps(dst.fpu_reg(), src.fpu_reg());
962
963 } else {
964 ASSERT(destination.IsStack());
965 ASSERT(src_type.IsFloat());
966 const auto& dst = destination.AsStack();
967 const auto dst_addr = NativeLocationToStackSlotAddress(dst);
968 switch (dst_size) {
969 case 8:
970 __ movsd(dst_addr, src.fpu_reg());
971 return;
972 case 4:
973 __ movss(dst_addr, src.fpu_reg());
974 return;
975 default:
976 UNREACHABLE();
977 }
978 }
979
980 } else {
981 ASSERT(source.IsStack());
982 const auto& src = source.AsStack();
983 const auto src_addr = NativeLocationToStackSlotAddress(src);
984 if (destination.IsRegisters()) {
985 const auto& dst = destination.AsRegisters();
986 ASSERT(dst.num_regs() == 1);
987 ASSERT(dst_size <= 4);
988 const auto dst_reg = dst.reg_at(0);
989 if (!sign_or_zero_extend) {
990 ASSERT(dst_size == 4);
991 __ movl(dst_reg, src_addr);
992 } else {
993 switch (src_type.AsPrimitive().representation()) {
994 case compiler::ffi::kInt8: // Sign extend operand.
995 __ movsxb(dst_reg, src_addr);
996 return;
998 __ movsxw(dst_reg, src_addr);
999 return;
1000 case compiler::ffi::kUint8: // Zero extend operand.
1001 __ movzxb(dst_reg, src_addr);
1002 return;
1004 __ movzxw(dst_reg, src_addr);
1005 return;
1006 default:
1007 // 32 to 64 bit is covered in IL by Representation conversions.
1008 UNIMPLEMENTED();
1009 }
1010 }
1011
1012 } else if (destination.IsFpuRegisters()) {
1013 ASSERT(src_type.Equals(dst_type));
1014 ASSERT(src_type.IsFloat());
1015 const auto& dst = destination.AsFpuRegisters();
1016 switch (dst_size) {
1017 case 8:
1018 __ movsd(dst.fpu_reg(), src_addr);
1019 return;
1020 case 4:
1021 __ movss(dst.fpu_reg(), src_addr);
1022 return;
1023 default:
1024 UNREACHABLE();
1025 }
1026
1027 } else {
1028 ASSERT(destination.IsStack());
1029 UNREACHABLE();
1030 }
1031 }
1032}
1033
1034#undef __
1035#define __ compiler_->assembler()->
1036
1037void ParallelMoveEmitter::EmitSwap(const MoveOperands& move) {
1038 const Location source = move.src();
1039 const Location destination = move.dest();
1040
1041 if (source.IsRegister() && destination.IsRegister()) {
1042 __ xchgl(destination.reg(), source.reg());
1043 } else if (source.IsRegister() && destination.IsStackSlot()) {
1044 Exchange(source.reg(), LocationToStackSlotAddress(destination));
1045 } else if (source.IsStackSlot() && destination.IsRegister()) {
1046 Exchange(destination.reg(), LocationToStackSlotAddress(source));
1047 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1048 Exchange(LocationToStackSlotAddress(destination),
1050 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1051 __ movaps(FpuTMP, source.fpu_reg());
1052 __ movaps(source.fpu_reg(), destination.fpu_reg());
1053 __ movaps(destination.fpu_reg(), FpuTMP);
1054 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1055 ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() ||
1056 source.IsDoubleStackSlot() || source.IsQuadStackSlot());
1057 bool double_width =
1058 destination.IsDoubleStackSlot() || source.IsDoubleStackSlot();
1059 XmmRegister reg =
1060 source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg();
1061 const compiler::Address& slot_address =
1062 source.IsFpuRegister() ? LocationToStackSlotAddress(destination)
1064
1065 if (double_width) {
1066 __ movsd(FpuTMP, slot_address);
1067 __ movsd(slot_address, reg);
1068 } else {
1069 __ movups(FpuTMP, slot_address);
1070 __ movups(slot_address, reg);
1071 }
1072 __ movaps(reg, FpuTMP);
1073 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1074 const compiler::Address& source_slot_address =
1076 const compiler::Address& destination_slot_address =
1077 LocationToStackSlotAddress(destination);
1078
1079 ScratchFpuRegisterScope ensure_scratch(this, FpuTMP);
1080 __ movsd(FpuTMP, source_slot_address);
1081 __ movsd(ensure_scratch.reg(), destination_slot_address);
1082 __ movsd(destination_slot_address, FpuTMP);
1083 __ movsd(source_slot_address, ensure_scratch.reg());
1084 } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1085 const compiler::Address& source_slot_address =
1087 const compiler::Address& destination_slot_address =
1088 LocationToStackSlotAddress(destination);
1089
1090 ScratchFpuRegisterScope ensure_scratch(this, FpuTMP);
1091 __ movups(FpuTMP, source_slot_address);
1092 __ movups(ensure_scratch.reg(), destination_slot_address);
1093 __ movups(destination_slot_address, FpuTMP);
1094 __ movups(source_slot_address, ensure_scratch.reg());
1095 } else {
1096 UNREACHABLE();
1097 }
1098}
1099
1100void ParallelMoveEmitter::MoveMemoryToMemory(const compiler::Address& dst,
1101 const compiler::Address& src) {
1102 ScratchRegisterScope ensure_scratch(this, kNoRegister);
1103 __ MoveMemoryToMemory(dst, src, ensure_scratch.reg());
1104}
1105
1106void ParallelMoveEmitter::Exchange(Register reg, const compiler::Address& mem) {
1107 ScratchRegisterScope ensure_scratch(this, reg);
1108 __ movl(ensure_scratch.reg(), mem);
1109 __ movl(mem, reg);
1110 __ movl(reg, ensure_scratch.reg());
1111}
1112
1113void ParallelMoveEmitter::Exchange(const compiler::Address& mem1,
1114 const compiler::Address& mem2) {
1115 ScratchRegisterScope ensure_scratch1(this, kNoRegister);
1116 ScratchRegisterScope ensure_scratch2(this, ensure_scratch1.reg());
1117 __ movl(ensure_scratch1.reg(), mem1);
1118 __ movl(ensure_scratch2.reg(), mem2);
1119 __ movl(mem2, ensure_scratch1.reg());
1120 __ movl(mem1, ensure_scratch2.reg());
1121}
1122
1123void ParallelMoveEmitter::Exchange(Register reg,
1124 Register base_reg,
1125 intptr_t stack_offset) {
1126 UNREACHABLE();
1127}
1128
1129void ParallelMoveEmitter::Exchange(Register base_reg1,
1130 intptr_t stack_offset1,
1131 Register base_reg2,
1132 intptr_t stack_offset2) {
1133 UNREACHABLE();
1134}
1135
1136void ParallelMoveEmitter::SpillScratch(Register reg) {
1137 __ pushl(reg);
1138}
1139
1140void ParallelMoveEmitter::RestoreScratch(Register reg) {
1141 __ popl(reg);
1142}
1143
1144void ParallelMoveEmitter::SpillFpuScratch(FpuRegister reg) {
1145 __ subl(ESP, compiler::Immediate(kFpuRegisterSize));
1146 __ movups(compiler::Address(ESP, 0), reg);
1147}
1148
1149void ParallelMoveEmitter::RestoreFpuScratch(FpuRegister reg) {
1150 __ movups(reg, compiler::Address(ESP, 0));
1151 __ addl(ESP, compiler::Immediate(kFpuRegisterSize));
1152}
1153
1154#undef __
1155
1156} // namespace dart
1157
1158#endif // defined(TARGET_ARCH_IA32)
static void test_cache(skiatest::Reporter *reporter, SkResourceCache &cache, bool testPurge)
#define __
#define UNREACHABLE()
Definition: assert.h:248
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
static intptr_t element_offset(intptr_t index)
Definition: object.h:10838
intptr_t length() const
CodeEntryKind EntryKind
Definition: object.h:6788
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
Definition: object.h:6793
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
Definition: deopt_id.h:27
static intptr_t ToDeoptAfter(intptr_t deopt_id)
Definition: deopt_id.h:31
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
bool CheckAssertAssignableTypeTestingABILocations(const LocationSummary &locs)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void GenerateNonLazyDeoptableStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void GenerateAssertAssignable(CompileType *receiver_type, const InstructionSource &source, intptr_t deopt_id, Environment *env, const String &dst_name, LocationSummary *locs)
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void GenerateStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, intptr_t deopt_id, Environment *env)
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
Definition: object.cc:11437
bool IsOptimizable() const
Definition: object.cc:8930
static bool NullIsAssignableTo(const AbstractType &other)
Definition: object.cc:20674
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
Definition: object.h:5554
static ObjectPtr null()
Definition: object.h:433
bool IsNull() const
Definition: object.h:363
static Object & Handle()
Definition: object.h:407
static Object & ZoneHandle()
Definition: object.h:419
const Function & function() const
Definition: parser.h:73
int num_stack_locals() const
Definition: parser.h:194
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
Definition: stub_code.cc:316
static const Code & SubtypeTestCacheStubForUsedInputs(intptr_t i)
Definition: stub_code.h:66
static SubtypeTestCachePtr New(intptr_t num_inputs)
Definition: object.cc:18924
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
#define UNIMPLEMENTED
#define ASSERT(E)
SkBitmap source
Definition: examples.cpp:28
gboolean invert
uint8_t value
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
compiler::Address NativeLocationToStackSlotAddress(const NativeStackLocation &loc)
FrameLayout frame_layout
Definition: stack_frame.cc:76
Definition: dart_vm.cc:33
const Register THR
const char *const name
const FpuRegister FpuTMP
const Register CODE_REG
@ kInvalidCondition
@ NOT_ZERO
@ NOT_EQUAL
const Register ARGS_DESC_REG
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
@ kNoRegister
Definition: constants_arm.h:99
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
Definition: locations.cc:365
constexpr intptr_t kWordSize
Definition: globals.h:509
QRegister FpuRegister
@ kSmiTagMask
const int kFpuRegisterSize
DECLARE_FLAG(bool, show_invisible_frames)
ByteRegister ByteRegisterOf(Register reg)
@ kNumberOfXmmRegisters
def call(args)
Definition: dom.py:159
Definition: __init__.py:1
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
Definition: switches.h:191
dst
Definition: cp.py:12
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
SeparatedVector2 offset
static constexpr Register kDstNameReg
static constexpr Register kSubtypeTestReg
intptr_t FrameSlotForVariable(const LocalVariable *variable) const
Definition: stack_frame.cc:83
intptr_t FrameSlotForVariableIndex(intptr_t index) const
Definition: stack_frame.cc:89
static constexpr intptr_t kBoolValueMask
static constexpr Register kSubtypeTestCacheReg
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kSubtypeTestCacheResultReg