Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
flow_graph_compiler_ia32.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32.
6#if defined(TARGET_ARCH_IA32)
7
9
10#include "vm/code_patcher.h"
17#include "vm/cpu.h"
18#include "vm/dart_entry.h"
20#include "vm/instructions.h"
21#include "vm/object_store.h"
22#include "vm/parser.h"
23#include "vm/stack_frame.h"
24#include "vm/stub_code.h"
25#include "vm/symbols.h"
26
27namespace dart {
28
29DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
30
31DECLARE_FLAG(bool, enable_simd_inline);
32
34
36 // BlockInfos are zone-allocated, so their destructors are not called.
37 // Verify the labels explicitly here.
38 for (int i = 0; i < block_info_.length(); ++i) {
39 ASSERT(!block_info_[i]->jump_label()->IsLinked());
40 ASSERT(!block_info_[i]->jump_label()->HasNear());
41 }
42}
43
45 return true;
46}
47
49 return FLAG_enable_simd_inline;
50}
51
53 return true;
54}
55
58 intrinsic_mode_ = true;
59}
60
63 intrinsic_mode_ = false;
64}
65
66TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
67 DeoptInfoBuilder* builder,
68 const Array& deopt_table) {
69 if (deopt_env_ == nullptr) {
70 ++builder->current_info_number_;
71 return TypedData::null();
72 }
73
74 AllocateOutgoingArguments(deopt_env_);
75
76 intptr_t slot_ix = 0;
77 Environment* current = deopt_env_;
78
79 // Emit all kMaterializeObject instructions describing objects to be
80 // materialized on the deoptimization as a prefix to the deoptimization info.
81 EmitMaterializations(deopt_env_, builder);
82
83 // The real frame starts here.
84 builder->MarkFrameStart();
85
86 Zone* zone = compiler->zone();
87
88 builder->AddPcMarker(Function::ZoneHandle(zone), slot_ix++);
89 builder->AddCallerFp(slot_ix++);
90 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++);
91
92 // Emit all values that are needed for materialization as a part of the
93 // expression stack for the bottom-most frame. This guarantees that GC
94 // will be able to find them during materialization.
95 slot_ix = builder->EmitMaterializationArguments(slot_ix);
96
97 // For the innermost environment, set outgoing arguments and the locals.
98 for (intptr_t i = current->Length() - 1;
99 i >= current->fixed_parameter_count(); i--) {
100 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
101 }
102
103 builder->AddPcMarker(current->function(), slot_ix++);
104 builder->AddCallerFp(slot_ix++);
105
106 Environment* previous = current;
107 current = current->outer();
108 while (current != nullptr) {
109 // For any outer environment the deopt id is that of the call instruction
110 // which is recorded in the outer environment.
111 builder->AddReturnAddress(current->function(),
112 DeoptId::ToDeoptAfter(current->GetDeoptId()),
113 slot_ix++);
114
115 // The values of outgoing arguments can be changed from the inlined call so
116 // we must read them from the previous environment.
117 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
118 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
119 slot_ix++);
120 }
121
122 // Set the locals, note that outgoing arguments are not in the environment.
123 for (intptr_t i = current->Length() - 1;
124 i >= current->fixed_parameter_count(); i--) {
125 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
126 }
127
128 builder->AddPcMarker(current->function(), slot_ix++);
129 builder->AddCallerFp(slot_ix++);
130
131 // Iterate on the outer environment.
132 previous = current;
133 current = current->outer();
134 }
135 // The previous pointer is now the outermost environment.
136 ASSERT(previous != nullptr);
137
138 // For the outermost environment, set caller PC.
139 builder->AddCallerPc(slot_ix++);
140
141 // For the outermost environment, set the incoming arguments.
142 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
143 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
144 }
145
146 return builder->CreateDeoptInfo(deopt_table);
147}
148
150 intptr_t stub_ix) {
151 // Calls do not need stubs, they share a deoptimization trampoline.
152 ASSERT(reason() != ICData::kDeoptAtCall);
153 compiler::Assembler* assembler = compiler->assembler();
154#define __ assembler->
155 __ Comment("%s", Name());
156 __ Bind(entry_label());
157 if (FLAG_trap_on_deoptimization) {
158 __ int3();
159 }
160
161 ASSERT(deopt_env() != nullptr);
162 __ pushl(CODE_REG);
163 __ Call(StubCode::Deoptimize());
164 set_pc_offset(assembler->CodeSize());
165 __ int3();
166#undef __
167}
168
169#define __ assembler()->
170
171// Fall through if bool_register contains null.
172void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
173 compiler::Label* is_true,
174 compiler::Label* is_false) {
175 const compiler::Immediate& raw_null =
176 compiler::Immediate(static_cast<intptr_t>(Object::null()));
177 compiler::Label fall_through;
178 __ cmpl(bool_register, raw_null);
179 __ j(EQUAL, &fall_through, compiler::Assembler::kNearJump);
180 BranchLabels labels = {is_true, is_false, &fall_through};
181 Condition true_condition =
182 EmitBoolTest(bool_register, labels, /*invert=*/false);
183 ASSERT(true_condition != kInvalidCondition);
184 __ j(true_condition, is_true);
185 __ jmp(is_false);
186 __ Bind(&fall_through);
187}
188
189// Input registers (from TypeTestABI):
190// - kInstanceReg: instance.
191// - kDstTypeReg: destination type (for test_kind == kTestTypeSevenArg).
192// - kInstantiatorTypeArgumentsReg: instantiator type arguments
193// (for test_kind >= kTestTypeThreeArg).
194// - kFunctionTypeArgumentsReg: function type arguments
195// (for test_kind >= kTestTypeFourArg).
196//
197// Only preserves kInstanceReg from TypeTestABI, all other TypeTestABI
198// registers may be used and thus must be saved by the caller.
199SubtypeTestCachePtr FlowGraphCompiler::GenerateCallSubtypeTestStub(
200 TypeTestStubKind test_kind,
201 compiler::Label* is_instance_lbl,
202 compiler::Label* is_not_instance_lbl) {
203 const intptr_t num_inputs = UsedInputsForTTSKind(test_kind);
204 const SubtypeTestCache& type_test_cache =
206 const auto& stub_entry =
208 __ LoadObject(TypeTestABI::kSubtypeTestCacheReg, type_test_cache);
211 // Registers for unused inputs may not have GC-safe values to push, so push
212 // the null object if the input is unused instead.
213 if (num_inputs >= 7) {
215 } else {
216 __ PushObject(Object::null_object());
217 }
218 if (num_inputs >= 3) {
220 } else {
221 __ PushObject(Object::null_object());
222 }
223 if (num_inputs >= 4) {
225 } else {
226 __ PushObject(Object::null_object());
227 }
228 __ Call(stub_entry);
229 // Restore all but kSubtypeTestCacheReg (since it is the same as
230 // kSubtypeTestCacheResultReg). Since the generated code is documented as
231 // clobbering all on-kInstanceReg TypeTestABI registers, it's okay to pop
232 // null into the registers that didn't have guaranteed GC-safe values prior.
233 static_assert(TypeTestABI::kSubtypeTestCacheReg ==
235 "Code assumes cache and result register are the same");
239 __ popl(TypeTestABI::kInstanceReg); // Restore receiver.
240 __ Drop(1);
241 GenerateBoolToJump(TypeTestABI::kSubtypeTestCacheResultReg, is_instance_lbl,
242 is_not_instance_lbl);
243 return type_test_cache.ptr();
244}
245
246// Optimize assignable type check by adding inlined tests for:
247// - null -> return null.
248// - Smi -> compile time subtype check (only if dst class is not parameterized).
249// - Class equality (only if class is not parameterized).
250// Inputs:
251// - EAX: object.
252// - EBX: destination type (if non-constant).
253// - EDX: instantiator type arguments or raw_null.
254// - ECX: function type arguments or raw_null.
255// Returns:
256// - object in EAX for successful assignable check (or throws TypeError).
257// Performance notes: positive checks must be quick, negative checks can be slow
258// as they throw an exception.
260 CompileType* receiver_type,
261 const InstructionSource& source,
262 intptr_t deopt_id,
263 Environment* env,
264 const String& dst_name,
265 LocationSummary* locs) {
266 ASSERT(!source.token_pos.IsClassifying());
268
269 const auto& dst_type =
270 locs->in(AssertAssignableInstr::kDstTypePos).IsConstant()
271 ? AbstractType::Cast(
272 locs->in(AssertAssignableInstr::kDstTypePos).constant())
273 : Object::null_abstract_type();
274
275 if (!dst_type.IsNull()) {
276 ASSERT(dst_type.IsFinalized());
277 if (dst_type.IsTopTypeForSubtyping()) return; // No code needed.
278 }
279
280 compiler::Label is_assignable, runtime_call;
282 if (dst_type.IsNull()) {
283 __ Comment("AssertAssignable for runtime type");
284 // kDstTypeReg should already contain the destination type.
286 StubCode::TypeIsTopTypeForSubtyping(),
287 UntaggedPcDescriptors::kOther, locs);
288 // TypeTestABI::kSubtypeTestCacheReg is 0 if the type is a top type.
289 __ BranchIfZero(TypeTestABI::kSubtypeTestCacheReg, &is_assignable,
291
292 GenerateNonLazyDeoptableStubCall(source, StubCode::NullIsAssignableToType(),
293 UntaggedPcDescriptors::kOther, locs);
294 // TypeTestABI::kSubtypeTestCacheReg is 0 if the object is null and is
295 // assignable.
296 __ BranchIfZero(TypeTestABI::kSubtypeTestCacheReg, &is_assignable,
298
299 // Use the full-arg version of the cache.
300 test_cache = GenerateCallSubtypeTestStub(kTestTypeMaxArgs, &is_assignable,
301 &runtime_call);
302 } else {
303 __ Comment("AssertAssignable for compile-time type");
304
305 if (Instance::NullIsAssignableTo(dst_type)) {
306 __ CompareObject(TypeTestABI::kInstanceReg, Object::null_object());
307 __ BranchIf(EQUAL, &is_assignable);
308 }
309
310 // Generate inline type check, linking to runtime call if not assignable.
311 test_cache = GenerateInlineInstanceof(source, dst_type, &is_assignable,
312 &runtime_call);
313 }
314
315 __ Bind(&runtime_call);
316
317 // We push the inputs of [AssertAssignable] in the same order as they lie on
318 // the stack in unoptimized code.
319 // That will make the deopt environment we emit as metadata correct and
320 // doesn't need pruning (as in other architectures).
321
322 static_assert(AssertAssignableInstr::kNumInputs == 4,
323 "Expected AssertAssignable to have 4 inputs");
324
325 __ PushRegister(TypeTestABI::kInstanceReg);
326 if (!dst_type.IsNull()) {
327 __ PushObject(dst_type);
328 } else {
329 __ PushRegister(TypeTestABI::kDstTypeReg);
330 }
333
334 // Pass destination name and subtype test reg as register arguments.
335 __ LoadObject(AssertAssignableStubABI::kDstNameReg, dst_name);
337
338 GenerateStubCall(source, StubCode::AssertAssignable(),
339 UntaggedPcDescriptors::kOther, locs, deopt_id, env);
340
342 __ PopRegister(TypeTestABI::kInstanceReg);
343
344 __ Bind(&is_assignable);
345}
346
347// NOTE: If the entry code shape changes, ReturnAddressLocator in profiler.cc
348// needs to be updated to match.
349void FlowGraphCompiler::EmitFrameEntry() {
350 RELEASE_ASSERT(flow_graph().graph_entry()->NeedsFrame());
351
352 const Function& function = parsed_function().function();
354 (!is_optimizing() || may_reoptimize())) {
355 __ Comment("Invocation Count Check");
356 const Register function_reg = EBX;
357 __ LoadObject(function_reg, function);
358
359 // Reoptimization of an optimized function is triggered by counting in
360 // IC stubs, but not at the entry of the function.
361 if (!is_optimizing()) {
362 __ incl(compiler::FieldAddress(function_reg,
363 Function::usage_counter_offset()));
364 }
365 __ cmpl(
366 compiler::FieldAddress(function_reg, Function::usage_counter_offset()),
367 compiler::Immediate(GetOptimizationThreshold()));
368 ASSERT(function_reg == EBX);
369 compiler::Label dont_optimize;
370 __ j(LESS, &dont_optimize, compiler::Assembler::kNearJump);
371 __ jmp(compiler::Address(THR, Thread::optimize_entry_offset()));
372 __ Bind(&dont_optimize);
373 }
374 __ Comment("Enter frame");
375 if (flow_graph().IsCompiledForOsr()) {
376 intptr_t extra_slots = ExtraStackSlotsOnOsrEntry();
377 ASSERT(extra_slots >= 0);
378 __ EnterOsrFrame(extra_slots * kWordSize);
379 } else {
380 ASSERT(StackSize() >= 0);
381 __ EnterDartFrame(StackSize() * kWordSize);
382 }
383}
384
385const InstructionSource& PrologueSource() {
386 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
387 /*inlining_id=*/0);
388 return prologue_source;
389}
390
392 BeginCodeSourceRange(PrologueSource());
393
394 EmitFrameEntry();
395
396 // In unoptimized code, initialize (non-argument) stack allocated slots.
397 if (!is_optimizing()) {
398 const int num_locals = parsed_function().num_stack_locals();
399
400 intptr_t args_desc_slot = -1;
401 if (parsed_function().has_arg_desc_var()) {
402 args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
403 parsed_function().arg_desc_var());
404 }
405
406 __ Comment("Initialize spill slots");
407 if (num_locals > 1 || (num_locals == 1 && args_desc_slot == -1)) {
408 const compiler::Immediate& raw_null =
409 compiler::Immediate(static_cast<intptr_t>(Object::null()));
410 __ movl(EAX, raw_null);
411 }
412 for (intptr_t i = 0; i < num_locals; ++i) {
413 const intptr_t slot_index =
414 compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
415 Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : EAX;
416 __ movl(compiler::Address(EBP, slot_index * kWordSize), value_reg);
417 }
418 } else if (parsed_function().suspend_state_var() != nullptr &&
419 !flow_graph().IsCompiledForOsr()) {
420 // Initialize synthetic :suspend_state variable early
421 // as it may be accessed by GC and exception handling before
422 // InitSuspendableFunction stub is called.
423 const intptr_t slot_index =
424 compiler::target::frame_layout.FrameSlotForVariable(
425 parsed_function().suspend_state_var());
426 __ LoadObject(EAX, Object::null_object());
427 __ movl(compiler::Address(EBP, slot_index * kWordSize), EAX);
428 }
429
430 EndCodeSourceRange(PrologueSource());
431}
432
434 const Code& stub,
435 ObjectPool::SnapshotBehavior snapshot_behavior) {
436 if (stub.InVMIsolateHeap()) {
437 __ CallVmStub(stub);
438 } else {
439 // Ignore snapshot_behavior, ia32 doesn't do snapshots.
440 __ Call(stub);
441 }
442 AddStubCallTarget(stub);
443}
444
445void FlowGraphCompiler::EmitJumpToStub(const Code& stub) {
446 ASSERT(!stub.IsNull());
447 __ LoadObject(CODE_REG, stub);
448 __ jmp(compiler::FieldAddress(CODE_REG,
449 compiler::target::Code::entry_point_offset()));
450 AddStubCallTarget(stub);
451}
452
453void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
454 const InstructionSource& source,
455 const Code& stub,
457 LocationSummary* locs,
458 Code::EntryKind entry_kind) {
459 ASSERT(CanCallDart());
460 __ Call(stub, /*moveable_target=*/false, entry_kind);
461 EmitCallsiteMetadata(source, deopt_id, kind, locs,
462 pending_deoptimization_env_);
463}
464
465void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
466 const InstructionSource& source,
468 LocationSummary* locs,
469 const Function& target,
470 Code::EntryKind entry_kind) {
471 ASSERT(CanCallDart());
472 const auto& stub = StubCode::CallStaticFunction();
473 __ Call(stub, /*movable_target=*/true, entry_kind);
474 EmitCallsiteMetadata(source, deopt_id, kind, locs,
475 pending_deoptimization_env_);
476 AddStaticCallTarget(target, entry_kind);
477}
478
479void FlowGraphCompiler::EmitUnoptimizedStaticCall(
480 intptr_t size_with_type_args,
481 intptr_t deopt_id,
482 const InstructionSource& source,
483 LocationSummary* locs,
484 const ICData& ic_data,
485 Code::EntryKind entry_kind) {
486 ASSERT(CanCallDart());
487 const Code& stub =
488 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
489 __ LoadObject(ECX, ic_data);
490 GenerateDartCall(deopt_id, source, stub,
491 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
492 EmitDropArguments(size_with_type_args);
493}
494
495void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
496 // We do not check for overflow when incrementing the edge counter. The
497 // function should normally be optimized long before the counter can
498 // overflow; and though we do not reset the counters when we optimize or
499 // deoptimize, there is a bound on the number of
500 // optimization/deoptimization cycles we will attempt.
501 ASSERT(!edge_counters_array_.IsNull());
502 __ Comment("Edge counter");
503 __ LoadObject(EAX, edge_counters_array_);
504 __ IncrementSmiField(
505 compiler::FieldAddress(EAX, Array::element_offset(edge_id)), 1);
506}
507
509 const Code& stub,
510 const ICData& ic_data,
511 intptr_t deopt_id,
512 const InstructionSource& source,
513 LocationSummary* locs,
514 Code::EntryKind entry_kind) {
515 ASSERT(CanCallDart());
516 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
517 // Each ICData propagated from unoptimized to optimized code contains the
518 // function that corresponds to the Dart function of that IC call. Due
519 // to inlining in optimized code, that function may not correspond to the
520 // top-level function (parsed_function().function()) which could be
521 // reoptimized and which counter needs to be incremented.
522 // Pass the function explicitly, it is used in IC stub.
523 __ LoadObject(EAX, parsed_function().function());
524 // Load receiver into EBX.
525 __ movl(EBX, compiler::Address(
526 ESP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
527 __ LoadObject(IC_DATA_REG, ic_data);
528 GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
529 entry_kind);
530 EmitDropArguments(ic_data.SizeWithTypeArgs());
531}
532
533void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
534 const ICData& ic_data,
535 intptr_t deopt_id,
536 const InstructionSource& source,
537 LocationSummary* locs,
538 Code::EntryKind entry_kind) {
539 ASSERT(CanCallDart());
540 ASSERT(entry_kind == Code::EntryKind::kNormal ||
541 entry_kind == Code::EntryKind::kUnchecked);
542 ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
543 // Load receiver into EBX.
544 __ movl(EBX, compiler::Address(
545 ESP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
546 __ LoadObject(IC_DATA_REG, ic_data, true);
547 __ LoadObject(CODE_REG, stub, true);
548 const intptr_t entry_point_offset =
549 entry_kind == Code::EntryKind::kNormal
550 ? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
551 : Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
552 __ call(compiler::FieldAddress(CODE_REG, entry_point_offset));
553 EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kIcCall, locs,
554 pending_deoptimization_env_);
555 EmitDropArguments(ic_data.SizeWithTypeArgs());
556}
557
559 const String& name,
560 const Array& arguments_descriptor,
561 intptr_t deopt_id,
562 const InstructionSource& source,
563 LocationSummary* locs) {
564 ASSERT(CanCallDart());
565 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
566 ASSERT(!FLAG_precompiled_mode);
567 const ArgumentsDescriptor args_desc(arguments_descriptor);
568 const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
569 zone(),
570 MegamorphicCacheTable::Lookup(thread(), name, arguments_descriptor));
571
572 __ Comment("MegamorphicCall");
573 // Load receiver into EBX.
574 __ movl(EBX, compiler::Address(ESP, (args_desc.Count() - 1) * kWordSize));
575 __ LoadObject(IC_DATA_REG, cache, true);
576 __ LoadObject(CODE_REG, StubCode::MegamorphicCall(), true);
577 __ call(compiler::FieldAddress(
578 CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
579
580 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
581 RecordSafepoint(locs);
582 const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
583 // Precompilation not implemented on ia32 platform.
584 ASSERT(!FLAG_precompiled_mode);
585 if (is_optimizing()) {
586 AddDeoptIndexAtCall(deopt_id_after, pending_deoptimization_env_);
587 } else {
588 // Add deoptimization continuation point after the call and before the
589 // arguments are removed.
590 AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after, source);
591 }
592 RecordCatchEntryMoves(pending_deoptimization_env_);
593 EmitDropArguments(args_desc.SizeWithTypeArgs());
594}
595
596void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
597 intptr_t deopt_id,
598 const InstructionSource& source,
599 LocationSummary* locs,
600 Code::EntryKind entry_kind,
601 bool receiver_can_be_smi) {
602 // Only generated with precompilation.
603 UNREACHABLE();
604}
605
607 const Function& function,
608 const Array& arguments_descriptor,
609 intptr_t size_with_type_args,
610 intptr_t deopt_id,
611 const InstructionSource& source,
612 LocationSummary* locs,
613 Code::EntryKind entry_kind) {
614 ASSERT(CanCallDart());
616 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
617 } else {
618 __ xorl(ARGS_DESC_REG, ARGS_DESC_REG); // GC safe smi zero because of stub.
619 }
620 // Do not use the code from the function, but let the code be patched so that
621 // we can record the outgoing edges to other code.
622 GenerateStaticDartCall(deopt_id, source, UntaggedPcDescriptors::kOther, locs,
623 function, entry_kind);
624 EmitDropArguments(size_with_type_args);
625}
626
628 int32_t selector_offset,
629 const Array& arguments_descriptor) {
630 // Only generated with precompilation.
631 UNREACHABLE();
632}
633
635 Register reg,
636 const Object& obj,
637 bool needs_number_check,
638 const InstructionSource& source,
639 intptr_t deopt_id) {
640 ASSERT(!needs_number_check || (!obj.IsMint() && !obj.IsDouble()));
641
642 if (obj.IsSmi() && (Smi::Cast(obj).Value() == 0)) {
643 ASSERT(!needs_number_check);
644 __ testl(reg, reg);
645 return EQUAL;
646 }
647
648 if (needs_number_check) {
649 __ pushl(reg);
650 __ PushObject(obj);
651 if (is_optimizing()) {
652 __ Call(StubCode::OptimizedIdenticalWithNumberCheck());
653 } else {
654 __ Call(StubCode::UnoptimizedIdenticalWithNumberCheck());
655 }
656 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
657 // Stub returns result in flags (result of a cmpl, we need ZF computed).
658 __ popl(reg); // Discard constant.
659 __ popl(reg); // Restore 'reg'.
660 } else {
661 __ CompareObject(reg, obj);
662 }
663 return EQUAL;
664}
665
669 bool needs_number_check,
670 const InstructionSource& source,
671 intptr_t deopt_id) {
672 if (needs_number_check) {
673 __ pushl(left);
674 __ pushl(right);
675 if (is_optimizing()) {
676 __ Call(StubCode::OptimizedIdenticalWithNumberCheck());
677 } else {
678 __ Call(StubCode::UnoptimizedIdenticalWithNumberCheck());
679 }
680 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
681 // Stub returns result in flags (result of a cmpl, we need ZF computed).
682 __ popl(right);
683 __ popl(left);
684 } else {
685 __ cmpl(left, right);
686 }
687 return EQUAL;
688}
689
691 BranchLabels labels,
692 bool invert) {
693 __ Comment("BoolTest");
694 __ testl(value, compiler::Immediate(
696 return invert ? NOT_EQUAL : EQUAL;
697}
698
699// This function must be in sync with FlowGraphCompiler::RecordSafepoint and
700// FlowGraphCompiler::SlowPathEnvironmentFor.
701void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
702#if defined(DEBUG)
703 locs->CheckWritableInputs();
704 ClobberDeadTempRegisters(locs);
705#endif
706
707 // TODO(vegorov): consider saving only caller save (volatile) registers.
708 const intptr_t xmm_regs_count = locs->live_registers()->FpuRegisterCount();
709 if (xmm_regs_count > 0) {
710 __ subl(ESP, compiler::Immediate(xmm_regs_count * kFpuRegisterSize));
711 // Store XMM registers with the lowest register number at the lowest
712 // address.
713 intptr_t offset = 0;
714 for (intptr_t i = 0; i < kNumberOfXmmRegisters; ++i) {
715 XmmRegister xmm_reg = static_cast<XmmRegister>(i);
716 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
717 __ movups(compiler::Address(ESP, offset), xmm_reg);
719 }
720 }
721 ASSERT(offset == (xmm_regs_count * kFpuRegisterSize));
722 }
723
724 // The order in which the registers are pushed must match the order
725 // in which the registers are encoded in the safe point's stack map.
726 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
727 Register reg = static_cast<Register>(i);
728 if (locs->live_registers()->ContainsRegister(reg)) {
729 __ pushl(reg);
730 }
731 }
732}
733
734void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
735 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
736 Register reg = static_cast<Register>(i);
737 if (locs->live_registers()->ContainsRegister(reg)) {
738 __ popl(reg);
739 }
740 }
741
742 const intptr_t xmm_regs_count = locs->live_registers()->FpuRegisterCount();
743 if (xmm_regs_count > 0) {
744 // XMM registers have the lowest register number at the lowest address.
745 intptr_t offset = 0;
746 for (intptr_t i = 0; i < kNumberOfXmmRegisters; ++i) {
747 XmmRegister xmm_reg = static_cast<XmmRegister>(i);
748 if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
749 __ movups(xmm_reg, compiler::Address(ESP, offset));
751 }
752 }
753 ASSERT(offset == (xmm_regs_count * kFpuRegisterSize));
754 __ addl(ESP, compiler::Immediate(offset));
755 }
756}
757
758#if defined(DEBUG)
759void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
760 // Clobber temporaries that have not been manually preserved.
761 for (intptr_t i = 0; i < locs->temp_count(); ++i) {
762 Location tmp = locs->temp(i);
763 // TODO(zerny): clobber non-live temporary FPU registers.
764 if (tmp.IsRegister() &&
765 !locs->live_registers()->ContainsRegister(tmp.reg())) {
766 __ movl(tmp.reg(), compiler::Immediate(0xf7));
767 }
768 }
769}
770#endif
771
772Register FlowGraphCompiler::EmitTestCidRegister() {
773 return EDI;
774}
775
776void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
777 intptr_t count_without_type_args,
778 const Array& arguments_descriptor) {
779 __ Comment("EmitTestAndCall");
780 // Load receiver into EAX.
781 __ movl(EAX,
782 compiler::Address(ESP, (count_without_type_args - 1) * kWordSize));
783 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
784}
785
786void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
787 bool if_smi) {
788 __ testl(EAX, compiler::Immediate(kSmiTagMask));
789 // Jump if receiver is (not) Smi.
790 __ j(if_smi ? ZERO : NOT_ZERO, label);
791}
792
793void FlowGraphCompiler::EmitTestAndCallLoadCid(Register class_id_reg) {
794 ASSERT(class_id_reg != EAX);
795 __ LoadClassId(class_id_reg, EAX);
796}
797
800 TemporaryRegisterAllocator* tmp) {
801 if (destination.Equals(source)) return;
802
803 if (source.IsRegister()) {
804 if (destination.IsRegister()) {
805 __ movl(destination.reg(), source.reg());
806 } else {
807 ASSERT(destination.IsStackSlot());
808 __ movl(LocationToStackSlotAddress(destination), source.reg());
809 }
810 } else if (source.IsStackSlot()) {
811 if (destination.IsRegister()) {
812 __ movl(destination.reg(), LocationToStackSlotAddress(source));
813 } else if (destination.IsFpuRegister()) {
814 // 32-bit float
815 __ movss(destination.fpu_reg(), LocationToStackSlotAddress(source));
816 } else {
817 ASSERT(destination.IsStackSlot());
818 Register scratch = tmp->AllocateTemporary();
819 __ MoveMemoryToMemory(LocationToStackSlotAddress(destination),
821 tmp->ReleaseTemporary();
822 }
823 } else if (source.IsFpuRegister()) {
824 if (destination.IsFpuRegister()) {
825 // Optimization manual recommends using MOVAPS for register
826 // to register moves.
827 __ movaps(destination.fpu_reg(), source.fpu_reg());
828 } else {
829 if (destination.IsDoubleStackSlot()) {
830 __ movsd(LocationToStackSlotAddress(destination), source.fpu_reg());
831 } else if (destination.IsStackSlot()) {
832 // 32-bit float
833 __ movss(LocationToStackSlotAddress(destination), source.fpu_reg());
834 } else {
835 ASSERT(destination.IsQuadStackSlot());
836 __ movups(LocationToStackSlotAddress(destination), source.fpu_reg());
837 }
838 }
839 } else if (source.IsDoubleStackSlot()) {
840 if (destination.IsFpuRegister()) {
841 __ movsd(destination.fpu_reg(), LocationToStackSlotAddress(source));
842 } else if (destination.IsStackSlot()) {
843 // Source holds a 32-bit float, take only the lower 32-bits
845 __ movss(LocationToStackSlotAddress(destination), FpuTMP);
846 } else {
847 ASSERT(destination.IsDoubleStackSlot());
849 __ movsd(LocationToStackSlotAddress(destination), FpuTMP);
850 }
851 } else if (source.IsQuadStackSlot()) {
852 if (destination.IsFpuRegister()) {
853 __ movups(destination.fpu_reg(), LocationToStackSlotAddress(source));
854 } else {
855 ASSERT(destination.IsQuadStackSlot());
857 __ movups(LocationToStackSlotAddress(destination), FpuTMP);
858 }
859 } else if (source.IsPairLocation()) {
860 ASSERT(destination.IsPairLocation());
861 for (intptr_t i : {0, 1}) {
862 EmitMove(destination.Component(i), source.Component(i), tmp);
863 }
864 } else {
865 ASSERT(source.IsConstant());
866 source.constant_instruction()->EmitMoveToLocation(
867 this, destination, kNoRegister, source.pair_index());
868 }
869}
870
871void FlowGraphCompiler::EmitNativeMoveArchitecture(
872 const compiler::ffi::NativeLocation& destination,
873 const compiler::ffi::NativeLocation& source) {
874 const auto& src_type = source.payload_type();
875 const auto& dst_type = destination.payload_type();
876 ASSERT(src_type.IsFloat() == dst_type.IsFloat());
877 ASSERT(src_type.IsInt() == dst_type.IsInt());
878 ASSERT(src_type.IsSigned() == dst_type.IsSigned());
879 ASSERT(src_type.IsPrimitive());
880 ASSERT(dst_type.IsPrimitive());
881 const intptr_t src_size = src_type.SizeInBytes();
882 const intptr_t dst_size = dst_type.SizeInBytes();
883 const bool sign_or_zero_extend = dst_size > src_size;
884
885 if (source.IsRegisters()) {
886 const auto& src = source.AsRegisters();
887 ASSERT(src.num_regs() == 1);
888 ASSERT(src_size <= 4);
889 const auto src_reg = src.reg_at(0);
890
891 if (destination.IsRegisters()) {
892 const auto& dst = destination.AsRegisters();
893 ASSERT(dst.num_regs() == 1);
894 const auto dst_reg = dst.reg_at(0);
895 ASSERT(destination.container_type().SizeInBytes() <= 4);
896 if (!sign_or_zero_extend) {
897 __ MoveRegister(dst_reg, src_reg);
898 } else {
899 switch (src_type.AsPrimitive().representation()) {
900 case compiler::ffi::kInt8: // Sign extend operand.
901 __ ExtendValue(dst_reg, src_reg, compiler::kByte);
902 return;
904 __ ExtendValue(dst_reg, src_reg, compiler::kTwoBytes);
905 return;
907 __ MoveRegister(dst_reg, src_reg);
908 __ shll(dst_reg, compiler::Immediate(8));
909 __ sarl(dst_reg, compiler::Immediate(8));
910 return;
911 case compiler::ffi::kUint8: // Zero extend operand.
912 __ ExtendValue(dst_reg, src_reg, compiler::kUnsignedByte);
913 return;
915 __ ExtendValue(dst_reg, src_reg, compiler::kUnsignedTwoBytes);
916 return;
918 __ MoveRegister(dst_reg, src_reg);
919 __ shll(dst_reg, compiler::Immediate(8));
920 __ shrl(dst_reg, compiler::Immediate(8));
921 return;
922 default:
923 // 32 to 64 bit is covered in IL by Representation conversions.
925 }
926 }
927
928 } else if (destination.IsFpuRegisters()) {
929 // Fpu Registers should only contain doubles and registers only ints.
931
932 } else {
933 ASSERT(destination.IsStack());
934 ASSERT(!sign_or_zero_extend);
935 const auto& dst = destination.AsStack();
936 const auto dst_addr = NativeLocationToStackSlotAddress(dst);
937 switch (destination.container_type().SizeInBytes()) {
938 case 4:
939 __ movl(dst_addr, src_reg);
940 return;
941 case 2:
942 __ movw(dst_addr, src_reg);
943 return;
944 case 1:
945 __ movb(dst_addr, ByteRegisterOf(src_reg));
946 return;
947 default:
948 UNREACHABLE();
949 }
950 }
951
952 } else if (source.IsFpuRegisters()) {
953 const auto& src = source.AsFpuRegisters();
954 // We have not implemented conversions here, use IL convert instructions.
955 ASSERT(src_type.Equals(dst_type));
956
957 if (destination.IsRegisters()) {
958 // Fpu Registers should only contain doubles and registers only ints.
960
961 } else if (destination.IsFpuRegisters()) {
962 const auto& dst = destination.AsFpuRegisters();
963 // Optimization manual recommends using MOVAPS for register
964 // to register moves.
965 __ movaps(dst.fpu_reg(), src.fpu_reg());
966
967 } else {
968 ASSERT(destination.IsStack());
969 ASSERT(src_type.IsFloat());
970 const auto& dst = destination.AsStack();
971 const auto dst_addr = NativeLocationToStackSlotAddress(dst);
972 switch (dst_size) {
973 case 8:
974 __ movsd(dst_addr, src.fpu_reg());
975 return;
976 case 4:
977 __ movss(dst_addr, src.fpu_reg());
978 return;
979 default:
980 UNREACHABLE();
981 }
982 }
983
984 } else {
985 ASSERT(source.IsStack());
986 const auto& src = source.AsStack();
987 const auto src_addr = NativeLocationToStackSlotAddress(src);
988 if (destination.IsRegisters()) {
989 const auto& dst = destination.AsRegisters();
990 ASSERT(dst.num_regs() == 1);
991 ASSERT(dst_size <= 4);
992 const auto dst_reg = dst.reg_at(0);
993 if (!sign_or_zero_extend) {
994 ASSERT(dst_size == 4);
995 __ movl(dst_reg, src_addr);
996 } else {
997 switch (src_type.AsPrimitive().representation()) {
998 case compiler::ffi::kInt8: // Sign extend operand.
999 __ movsxb(dst_reg, src_addr);
1000 return;
1002 __ movsxw(dst_reg, src_addr);
1003 return;
1004 case compiler::ffi::kUint8: // Zero extend operand.
1005 __ movzxb(dst_reg, src_addr);
1006 return;
1008 __ movzxw(dst_reg, src_addr);
1009 return;
1010 default:
1011 // 32 to 64 bit is covered in IL by Representation conversions.
1012 UNIMPLEMENTED();
1013 }
1014 }
1015
1016 } else if (destination.IsFpuRegisters()) {
1017 ASSERT(src_type.Equals(dst_type));
1018 ASSERT(src_type.IsFloat());
1019 const auto& dst = destination.AsFpuRegisters();
1020 switch (dst_size) {
1021 case 8:
1022 __ movsd(dst.fpu_reg(), src_addr);
1023 return;
1024 case 4:
1025 __ movss(dst.fpu_reg(), src_addr);
1026 return;
1027 default:
1028 UNREACHABLE();
1029 }
1030
1031 } else {
1032 ASSERT(destination.IsStack());
1033 UNREACHABLE();
1034 }
1035 }
1036}
1037
1038#undef __
1039#define __ compiler_->assembler()->
1040
1041void ParallelMoveEmitter::EmitSwap(const MoveOperands& move) {
1042 const Location source = move.src();
1043 const Location destination = move.dest();
1044
1045 if (source.IsRegister() && destination.IsRegister()) {
1046 __ xchgl(destination.reg(), source.reg());
1047 } else if (source.IsRegister() && destination.IsStackSlot()) {
1048 Exchange(source.reg(), LocationToStackSlotAddress(destination));
1049 } else if (source.IsStackSlot() && destination.IsRegister()) {
1050 Exchange(destination.reg(), LocationToStackSlotAddress(source));
1051 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1052 Exchange(LocationToStackSlotAddress(destination),
1054 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1055 __ movaps(FpuTMP, source.fpu_reg());
1056 __ movaps(source.fpu_reg(), destination.fpu_reg());
1057 __ movaps(destination.fpu_reg(), FpuTMP);
1058 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1059 ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() ||
1060 source.IsDoubleStackSlot() || source.IsQuadStackSlot());
1061 bool double_width =
1062 destination.IsDoubleStackSlot() || source.IsDoubleStackSlot();
1063 XmmRegister reg =
1064 source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg();
1065 const compiler::Address& slot_address =
1066 source.IsFpuRegister() ? LocationToStackSlotAddress(destination)
1068
1069 if (double_width) {
1070 __ movsd(FpuTMP, slot_address);
1071 __ movsd(slot_address, reg);
1072 } else {
1073 __ movups(FpuTMP, slot_address);
1074 __ movups(slot_address, reg);
1075 }
1076 __ movaps(reg, FpuTMP);
1077 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1078 const compiler::Address& source_slot_address =
1080 const compiler::Address& destination_slot_address =
1081 LocationToStackSlotAddress(destination);
1082
1083 ScratchFpuRegisterScope ensure_scratch(this, FpuTMP);
1084 __ movsd(FpuTMP, source_slot_address);
1085 __ movsd(ensure_scratch.reg(), destination_slot_address);
1086 __ movsd(destination_slot_address, FpuTMP);
1087 __ movsd(source_slot_address, ensure_scratch.reg());
1088 } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1089 const compiler::Address& source_slot_address =
1091 const compiler::Address& destination_slot_address =
1092 LocationToStackSlotAddress(destination);
1093
1094 ScratchFpuRegisterScope ensure_scratch(this, FpuTMP);
1095 __ movups(FpuTMP, source_slot_address);
1096 __ movups(ensure_scratch.reg(), destination_slot_address);
1097 __ movups(destination_slot_address, FpuTMP);
1098 __ movups(source_slot_address, ensure_scratch.reg());
1099 } else {
1100 UNREACHABLE();
1101 }
1102}
1103
1104void ParallelMoveEmitter::MoveMemoryToMemory(const compiler::Address& dst,
1105 const compiler::Address& src) {
1106 ScratchRegisterScope ensure_scratch(this, kNoRegister);
1107 __ MoveMemoryToMemory(dst, src, ensure_scratch.reg());
1108}
1109
1110void ParallelMoveEmitter::Exchange(Register reg, const compiler::Address& mem) {
1111 ScratchRegisterScope ensure_scratch(this, reg);
1112 __ movl(ensure_scratch.reg(), mem);
1113 __ movl(mem, reg);
1114 __ movl(reg, ensure_scratch.reg());
1115}
1116
1117void ParallelMoveEmitter::Exchange(const compiler::Address& mem1,
1118 const compiler::Address& mem2) {
1119 ScratchRegisterScope ensure_scratch1(this, kNoRegister);
1120 ScratchRegisterScope ensure_scratch2(this, ensure_scratch1.reg());
1121 __ movl(ensure_scratch1.reg(), mem1);
1122 __ movl(ensure_scratch2.reg(), mem2);
1123 __ movl(mem2, ensure_scratch1.reg());
1124 __ movl(mem1, ensure_scratch2.reg());
1125}
1126
1127void ParallelMoveEmitter::Exchange(Register reg,
1128 Register base_reg,
1129 intptr_t stack_offset) {
1130 UNREACHABLE();
1131}
1132
1133void ParallelMoveEmitter::Exchange(Register base_reg1,
1134 intptr_t stack_offset1,
1135 Register base_reg2,
1136 intptr_t stack_offset2) {
1137 UNREACHABLE();
1138}
1139
1140void ParallelMoveEmitter::SpillScratch(Register reg) {
1141 __ pushl(reg);
1142}
1143
1144void ParallelMoveEmitter::RestoreScratch(Register reg) {
1145 __ popl(reg);
1146}
1147
1148void ParallelMoveEmitter::SpillFpuScratch(FpuRegister reg) {
1149 __ subl(ESP, compiler::Immediate(kFpuRegisterSize));
1150 __ movups(compiler::Address(ESP, 0), reg);
1151}
1152
1153void ParallelMoveEmitter::RestoreFpuScratch(FpuRegister reg) {
1154 __ movups(reg, compiler::Address(ESP, 0));
1155 __ addl(ESP, compiler::Immediate(kFpuRegisterSize));
1156}
1157
1158#undef __
1159
1160} // namespace dart
1161
1162#endif // defined(TARGET_ARCH_IA32)
static void test_cache(skiatest::Reporter *reporter, SkResourceCache &cache, bool testPurge)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
#define __
#define UNREACHABLE()
Definition assert.h:248
#define RELEASE_ASSERT(cond)
Definition assert.h:327
static intptr_t element_offset(intptr_t index)
Definition object.h:10817
intptr_t length() const
CodeEntryKind EntryKind
Definition object.h:6761
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
Definition object.h:6766
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
Definition deopt_id.h:27
static intptr_t ToDeoptAfter(intptr_t deopt_id)
Definition deopt_id.h:31
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
bool CheckAssertAssignableTypeTestingABILocations(const LocationSummary &locs)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static bool SupportsUnboxedDoubles()
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void GenerateNonLazyDeoptableStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void GenerateAssertAssignable(CompileType *receiver_type, const InstructionSource &source, intptr_t deopt_id, Environment *env, const String &dst_name, LocationSummary *locs)
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void GenerateStubCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, intptr_t deopt_id, Environment *env)
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
Definition object.cc:11488
bool IsOptimizable() const
Definition object.cc:8988
static bool NullIsAssignableTo(const AbstractType &other)
Definition object.cc:20715
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
Definition object.h:5525
static ObjectPtr null()
Definition object.h:433
bool IsNull() const
Definition object.h:363
static Object & Handle()
Definition object.h:407
static Object & ZoneHandle()
Definition object.h:419
const Function & function() const
Definition parser.h:73
int num_stack_locals() const
Definition parser.h:194
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
Definition stub_code.cc:316
static const Code & SubtypeTestCacheStubForUsedInputs(intptr_t i)
Definition stub_code.h:66
static SubtypeTestCachePtr New(intptr_t num_inputs)
Definition object.cc:18974
#define UNIMPLEMENTED
#define ASSERT(E)
SkBitmap source
Definition examples.cpp:28
gboolean invert
uint32_t * target
#define DECLARE_FLAG(type, name)
Definition flags.h:14
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
Dart_NativeFunction function
Definition fuchsia.cc:51
compiler::Address NativeLocationToStackSlotAddress(const NativeStackLocation &loc)
const Register THR
const char *const name
const FpuRegister FpuTMP
const Register CODE_REG
@ kInvalidCondition
const Register ARGS_DESC_REG
@ kNumberOfCpuRegisters
@ kNoRegister
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
Definition locations.cc:365
constexpr intptr_t kWordSize
Definition globals.h:509
QRegister FpuRegister
const int kFpuRegisterSize
ByteRegister ByteRegisterOf(Register reg)
@ kNumberOfXmmRegisters
call(args)
Definition dom.py:159
Definition __init__.py:1
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
Definition switches.h:191
dst
Definition cp.py:12
Point offset
static constexpr Register kDstNameReg
static constexpr Register kSubtypeTestReg
static constexpr intptr_t kBoolValueMask
static constexpr Register kSubtypeTestCacheReg
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kSubtypeTestCacheResultReg