Flutter Engine
The Flutter Engine
flow_graph_compiler_x64.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
6#if defined(TARGET_ARCH_X64)
7
9
16#include "vm/dart_entry.h"
18#include "vm/dispatch_table.h"
19#include "vm/instructions.h"
20#include "vm/object_store.h"
21#include "vm/parser.h"
22#include "vm/stack_frame.h"
23#include "vm/stub_code.h"
24#include "vm/symbols.h"
25
26namespace dart {
27
28DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
29DECLARE_FLAG(bool, enable_simd_inline);
30
32 if (FLAG_precompiled_mode) {
33 auto object_store = isolate_group()->object_store();
34
35 const auto& stub =
36 Code::ZoneHandle(object_store->write_barrier_wrappers_stub());
37 if (CanPcRelativeCall(stub)) {
38 assembler_->generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
39 const intptr_t offset_into_target =
41 assembler_->GenerateUnRelocatedPcRelativeCall(offset_into_target);
42 AddPcRelativeCallStubTarget(stub);
43 };
44 }
45
46 const auto& array_stub =
47 Code::ZoneHandle(object_store->array_write_barrier_stub());
48 if (CanPcRelativeCall(stub)) {
49 assembler_->generate_invoke_array_write_barrier_ = [&]() {
51 AddPcRelativeCallStubTarget(array_stub);
52 };
53 }
54 }
55}
56
58 // BlockInfos are zone-allocated, so their destructors are not called.
59 // Verify the labels explicitly here.
60 for (int i = 0; i < block_info_.length(); ++i) {
61 ASSERT(!block_info_[i]->jump_label()->IsLinked());
62 ASSERT(!block_info_[i]->jump_label()->HasNear());
63 }
64}
65
67 return FLAG_enable_simd_inline;
68}
69
71 return true;
72}
73
76 intrinsic_mode_ = true;
77 ASSERT(!assembler()->constant_pool_allowed());
78}
79
82 intrinsic_mode_ = false;
83}
84
85TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
86 DeoptInfoBuilder* builder,
87 const Array& deopt_table) {
88 if (deopt_env_ == nullptr) {
89 ++builder->current_info_number_;
90 return TypedData::null();
91 }
92
93 AllocateOutgoingArguments(deopt_env_);
94
95 intptr_t slot_ix = 0;
96 Environment* current = deopt_env_;
97
98 // Emit all kMaterializeObject instructions describing objects to be
99 // materialized on the deoptimization as a prefix to the deoptimization info.
100 EmitMaterializations(deopt_env_, builder);
101
102 // The real frame starts here.
103 builder->MarkFrameStart();
104
105 Zone* zone = compiler->zone();
106
107 builder->AddPp(current->function(), slot_ix++);
108 builder->AddPcMarker(Function::ZoneHandle(zone), slot_ix++);
109 builder->AddCallerFp(slot_ix++);
110 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++);
111
112 // Emit all values that are needed for materialization as a part of the
113 // expression stack for the bottom-most frame. This guarantees that GC
114 // will be able to find them during materialization.
115 slot_ix = builder->EmitMaterializationArguments(slot_ix);
116
117 // For the innermost environment, set outgoing arguments and the locals.
118 for (intptr_t i = current->Length() - 1;
119 i >= current->fixed_parameter_count(); i--) {
120 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
121 }
122
123 Environment* previous = current;
124 current = current->outer();
125 while (current != nullptr) {
126 builder->AddPp(current->function(), slot_ix++);
127 builder->AddPcMarker(previous->function(), slot_ix++);
128 builder->AddCallerFp(slot_ix++);
129
130 // For any outer environment the deopt id is that of the call instruction
131 // which is recorded in the outer environment.
132 builder->AddReturnAddress(current->function(),
133 DeoptId::ToDeoptAfter(current->GetDeoptId()),
134 slot_ix++);
135
136 // The values of outgoing arguments can be changed from the inlined call so
137 // we must read them from the previous environment.
138 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
139 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
140 slot_ix++);
141 }
142
143 // Set the locals, note that outgoing arguments are not in the environment.
144 for (intptr_t i = current->Length() - 1;
145 i >= current->fixed_parameter_count(); i--) {
146 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
147 }
148
149 // Iterate on the outer environment.
150 previous = current;
151 current = current->outer();
152 }
153 // The previous pointer is now the outermost environment.
154 ASSERT(previous != nullptr);
155
156 // Set slots for the outermost environment.
157 builder->AddCallerPp(slot_ix++);
158 builder->AddPcMarker(previous->function(), slot_ix++);
159 builder->AddCallerFp(slot_ix++);
160 builder->AddCallerPc(slot_ix++);
161
162 // For the outermost environment, set the incoming arguments.
163 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
164 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
165 }
166
167 return builder->CreateDeoptInfo(deopt_table);
168}
169
171 intptr_t stub_ix) {
172 // Calls do not need stubs, they share a deoptimization trampoline.
173 ASSERT(reason() != ICData::kDeoptAtCall);
174 compiler::Assembler* assembler = compiler->assembler();
175#define __ assembler->
176 __ Comment("%s", Name());
178 if (FLAG_trap_on_deoptimization) {
179 __ int3();
180 }
181
182 ASSERT(deopt_env() != nullptr);
183 __ call(compiler::Address(THR, Thread::deoptimize_entry_offset()));
184 set_pc_offset(assembler->CodeSize());
185 __ int3();
186#undef __
187}
188
189#define __ assembler->
190// Static methods of FlowGraphCompiler that take an assembler.
191
192void FlowGraphCompiler::GenerateIndirectTTSCall(compiler::Assembler* assembler,
193 Register reg_to_call,
194 intptr_t sub_type_cache_index) {
195 __ LoadWordFromPoolIndex(TypeTestABI::kSubtypeTestCacheReg,
196 sub_type_cache_index);
197 __ Call(compiler::FieldAddress(
198 reg_to_call,
200}
201
202#undef __
203#define __ assembler()->
204// Instance methods of FlowGraphCompiler.
205
206// Fall through if bool_register contains null.
207void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
208 compiler::Label* is_true,
209 compiler::Label* is_false) {
210 compiler::Label fall_through;
211 __ CompareObject(bool_register, Object::null_object());
212 __ j(EQUAL, &fall_through, compiler::Assembler::kNearJump);
213 BranchLabels labels = {is_true, is_false, &fall_through};
214 Condition true_condition =
215 EmitBoolTest(bool_register, labels, /*invert=*/false);
216 ASSERT(true_condition != kInvalidCondition);
217 __ j(true_condition, is_true);
218 __ jmp(is_false);
219 __ Bind(&fall_through);
220}
221
222// NOTE: If the entry code shape changes, ReturnAddressLocator in profiler.cc
223// needs to be updated to match.
224void FlowGraphCompiler::EmitFrameEntry() {
225 if (!flow_graph().graph_entry()->NeedsFrame()) {
226 if (FLAG_precompiled_mode) {
228 }
229 return;
230 }
231
232 if (flow_graph().IsCompiledForOsr()) {
233 const intptr_t extra_slots = ExtraStackSlotsOnOsrEntry();
234 ASSERT(extra_slots >= 0);
235 __ EnterOsrFrame(extra_slots * kWordSize);
236 } else {
237 const Function& function = parsed_function().function();
239 (!is_optimizing() || may_reoptimize())) {
240 __ Comment("Invocation Count Check");
241 const Register function_reg = RDI;
242 __ movq(function_reg,
243 compiler::FieldAddress(CODE_REG, Code::owner_offset()));
244
245 // Reoptimization of an optimized function is triggered by counting in
246 // IC stubs, but not at the entry of the function.
247 if (!is_optimizing()) {
248 __ incl(compiler::FieldAddress(function_reg,
249 Function::usage_counter_offset()));
250 }
251 __ cmpl(compiler::FieldAddress(function_reg,
252 Function::usage_counter_offset()),
253 compiler::Immediate(GetOptimizationThreshold()));
254 ASSERT(function_reg == RDI);
255 compiler::Label dont_optimize;
256 __ j(LESS, &dont_optimize, compiler::Assembler::kNearJump);
257 __ jmp(compiler::Address(THR, Thread::optimize_entry_offset()));
258 __ Bind(&dont_optimize);
259 }
260 ASSERT(StackSize() >= 0);
261 __ Comment("Enter frame");
262 __ EnterDartFrame(StackSize() * kWordSize);
263 }
264}
265
266const InstructionSource& PrologueSource() {
267 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
268 /*inlining_id=*/0);
269 return prologue_source;
270}
271
273 BeginCodeSourceRange(PrologueSource());
274
275 EmitFrameEntry();
276 ASSERT(assembler()->constant_pool_allowed());
277
278 // In unoptimized code, initialize (non-argument) stack allocated slots.
279 if (!is_optimizing()) {
280 const int num_locals = parsed_function().num_stack_locals();
281
282 intptr_t args_desc_slot = -1;
283 if (parsed_function().has_arg_desc_var()) {
285 parsed_function().arg_desc_var());
286 }
287
288 __ Comment("Initialize spill slots");
289 if (num_locals > 1 || (num_locals == 1 && args_desc_slot == -1)) {
290 __ LoadObject(RAX, Object::null_object());
291 }
292 for (intptr_t i = 0; i < num_locals; ++i) {
293 const intptr_t slot_index =
295 Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : RAX;
296 __ movq(compiler::Address(RBP, slot_index * kWordSize), value_reg);
297 }
298 } else if (parsed_function().suspend_state_var() != nullptr &&
299 !flow_graph().IsCompiledForOsr()) {
300 // Initialize synthetic :suspend_state variable early
301 // as it may be accessed by GC and exception handling before
302 // InitSuspendableFunction stub is called.
303 const intptr_t slot_index =
305 parsed_function().suspend_state_var());
306 __ LoadObject(RAX, Object::null_object());
307 __ movq(compiler::Address(RBP, slot_index * kWordSize), RAX);
308 }
309
310 EndCodeSourceRange(PrologueSource());
311}
312
314 const Code& stub,
315 ObjectPool::SnapshotBehavior snapshot_behavior) {
316 ASSERT(!stub.IsNull());
317 if (CanPcRelativeCall(stub)) {
318 __ GenerateUnRelocatedPcRelativeCall();
319 AddPcRelativeCallStubTarget(stub);
320 } else {
321 __ Call(stub, snapshot_behavior);
322 AddStubCallTarget(stub);
323 }
324}
325
326void FlowGraphCompiler::EmitJumpToStub(const Code& stub) {
327 ASSERT(!stub.IsNull());
328 if (CanPcRelativeCall(stub)) {
329 __ GenerateUnRelocatedPcRelativeTailCall();
330 AddPcRelativeTailCallStubTarget(stub);
331 } else {
332 __ LoadObject(CODE_REG, stub);
333 __ jmp(compiler::FieldAddress(
335 AddStubCallTarget(stub);
336 }
337}
338
339void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
340 ASSERT(!stub.IsNull());
341 if (CanPcRelativeCall(stub)) {
342 if (flow_graph().graph_entry()->NeedsFrame()) {
343 __ LeaveDartFrame();
344 }
345 __ GenerateUnRelocatedPcRelativeTailCall();
346 AddPcRelativeTailCallStubTarget(stub);
347#if defined(DEBUG)
348 __ Breakpoint();
349#endif
350 } else {
351 __ LoadObject(CODE_REG, stub);
352 if (flow_graph().graph_entry()->NeedsFrame()) {
353 __ LeaveDartFrame();
354 }
355 __ jmp(compiler::FieldAddress(
357 AddStubCallTarget(stub);
358 }
359}
360
362 const InstructionSource& source,
363 const Code& stub,
365 LocationSummary* locs,
366 ObjectPool::SnapshotBehavior snapshot_behavior) {
367 __ CallPatchable(stub, CodeEntryKind::kNormal, snapshot_behavior);
369 pending_deoptimization_env_);
370}
371
372void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
373 const InstructionSource& source,
374 const Code& stub,
376 LocationSummary* locs,
377 Code::EntryKind entry_kind) {
378 ASSERT(CanCallDart());
379 __ CallPatchable(stub, entry_kind);
380 EmitCallsiteMetadata(source, deopt_id, kind, locs,
381 pending_deoptimization_env_);
382}
383
384void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
385 const InstructionSource& source,
387 LocationSummary* locs,
388 const Function& target,
389 Code::EntryKind entry_kind) {
390 ASSERT(CanCallDart());
392 if (CanPcRelativeCall(target)) {
393 __ GenerateUnRelocatedPcRelativeCall();
394 AddPcRelativeCallTarget(target, entry_kind);
395 EmitCallsiteMetadata(source, deopt_id, kind, locs,
396 pending_deoptimization_env_);
397 } else {
398 // Call sites to the same target can share object pool entries. These
399 // call sites are never patched for breakpoints: the function is deoptimized
400 // and the unoptimized code with IC calls for static calls is patched
401 // instead.
402 const auto& stub_entry = StubCode::CallStaticFunction();
403 __ CallWithEquivalence(stub_entry, target, entry_kind);
404 EmitCallsiteMetadata(source, deopt_id, kind, locs,
405 pending_deoptimization_env_);
406 AddStaticCallTarget(target, entry_kind);
407 }
408}
409
410void FlowGraphCompiler::EmitUnoptimizedStaticCall(
411 intptr_t size_with_type_args,
412 intptr_t deopt_id,
413 const InstructionSource& source,
414 LocationSummary* locs,
415 const ICData& ic_data,
416 Code::EntryKind entry_kind) {
417 ASSERT(CanCallDart());
418 const Code& stub =
419 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
420 __ LoadObject(RBX, ic_data);
421 GenerateDartCall(deopt_id, source, stub,
422 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
423 EmitDropArguments(size_with_type_args);
424}
425
426void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
427 // We do not check for overflow when incrementing the edge counter. The
428 // function should normally be optimized long before the counter can
429 // overflow; and though we do not reset the counters when we optimize or
430 // deoptimize, there is a bound on the number of
431 // optimization/deoptimization cycles we will attempt.
432 ASSERT(!edge_counters_array_.IsNull());
433 ASSERT(assembler_->constant_pool_allowed());
434 __ Comment("Edge counter");
435 __ LoadObject(RAX, edge_counters_array_);
436 __ IncrementCompressedSmiField(
437 compiler::FieldAddress(RAX, Array::element_offset(edge_id)), 1);
438}
439
441 const Code& stub,
442 const ICData& ic_data,
443 intptr_t deopt_id,
444 const InstructionSource& source,
445 LocationSummary* locs,
446 Code::EntryKind entry_kind) {
447 ASSERT(CanCallDart());
448 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
449 // Each ICData propagated from unoptimized to optimized code contains the
450 // function that corresponds to the Dart function of that IC call. Due
451 // to inlining in optimized code, that function may not correspond to the
452 // top-level function (parsed_function().function()) which could be
453 // reoptimized and which counter needs to be incremented.
454 // Pass the function explicitly, it is used in IC stub.
455 __ LoadObject(RDI, parsed_function().function());
456 // Load receiver into RDX.
457 __ movq(RDX, compiler::Address(
458 RSP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
459 __ LoadUniqueObject(IC_DATA_REG, ic_data);
460 GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
461 entry_kind);
462 EmitDropArguments(ic_data.SizeWithTypeArgs());
463}
464
465void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
466 const ICData& ic_data,
467 intptr_t deopt_id,
468 const InstructionSource& source,
469 LocationSummary* locs,
470 Code::EntryKind entry_kind) {
471 ASSERT(CanCallDart());
472 ASSERT(entry_kind == Code::EntryKind::kNormal ||
473 entry_kind == Code::EntryKind::kUnchecked);
474 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
475 // Load receiver into RDX.
476 __ movq(RDX, compiler::Address(
477 RSP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
478 __ LoadUniqueObject(IC_DATA_REG, ic_data);
479 __ LoadUniqueObject(CODE_REG, stub);
480 const intptr_t entry_point_offset =
481 entry_kind == Code::EntryKind::kNormal
482 ? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
483 : Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
484 __ call(compiler::FieldAddress(CODE_REG, entry_point_offset));
485 EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kIcCall, locs,
486 pending_deoptimization_env_);
487 EmitDropArguments(ic_data.SizeWithTypeArgs());
488}
489
491 const String& name,
492 const Array& arguments_descriptor,
493 intptr_t deopt_id,
494 const InstructionSource& source,
495 LocationSummary* locs) {
496 ASSERT(CanCallDart());
497 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
498 ASSERT(!FLAG_precompiled_mode);
499 const ArgumentsDescriptor args_desc(arguments_descriptor);
500 const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
501 zone(),
502 MegamorphicCacheTable::Lookup(thread(), name, arguments_descriptor));
503 __ Comment("MegamorphicCall");
504 // Load receiver into RDX.
505 __ movq(RDX, compiler::Address(RSP, (args_desc.Count() - 1) * kWordSize));
506
507 // Use same code pattern as instance call so it can be parsed by code patcher.
508 __ LoadUniqueObject(IC_DATA_REG, cache);
509 __ LoadUniqueObject(CODE_REG, StubCode::MegamorphicCall());
510 __ call(compiler::FieldAddress(
511 CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
512
513 RecordSafepoint(locs);
514 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
515 const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
516 if (is_optimizing()) {
517 AddDeoptIndexAtCall(deopt_id_after, pending_deoptimization_env_);
518 } else {
519 // Add deoptimization continuation point after the call and before the
520 // arguments are removed.
521 AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after, source);
522 }
523 RecordCatchEntryMoves(pending_deoptimization_env_);
524 EmitDropArguments(args_desc.SizeWithTypeArgs());
525}
526
527void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
528 intptr_t deopt_id,
529 const InstructionSource& source,
530 LocationSummary* locs,
531 Code::EntryKind entry_kind,
532 bool receiver_can_be_smi) {
533 ASSERT(CanCallDart());
534 ASSERT(entry_kind == Code::EntryKind::kNormal ||
535 entry_kind == Code::EntryKind::kUnchecked);
536 ASSERT(ic_data.NumArgsTested() == 1);
537 const Code& initial_stub = StubCode::SwitchableCallMiss();
538 const char* switchable_call_mode = "smiable";
539 if (!receiver_can_be_smi) {
540 switchable_call_mode = "non-smi";
541 ic_data.set_receiver_cannot_be_smi(true);
542 }
543 const UnlinkedCall& data =
544 UnlinkedCall::ZoneHandle(zone(), ic_data.AsUnlinkedCall());
545
546 __ Comment("InstanceCallAOT (%s)", switchable_call_mode);
547 __ movq(RDX, compiler::Address(
548 RSP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
549 // The AOT runtime will replace the slot in the object pool with the
550 // entrypoint address - see app_snapshot.cc.
551 const auto snapshot_behavior =
553 __ LoadUniqueObject(RCX, initial_stub, snapshot_behavior);
554 __ LoadUniqueObject(RBX, data);
555 __ call(RCX);
556
557 EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kOther, locs,
558 pending_deoptimization_env_);
559 EmitDropArguments(ic_data.SizeWithTypeArgs());
560}
561
563 const Function& function,
564 const Array& arguments_descriptor,
565 intptr_t size_with_type_args,
566 intptr_t deopt_id,
567 const InstructionSource& source,
568 LocationSummary* locs,
569 Code::EntryKind entry_kind) {
570 ASSERT(CanCallDart());
573 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
574 } else {
575 if (!FLAG_precompiled_mode) {
576 __ xorl(ARGS_DESC_REG,
577 ARGS_DESC_REG); // GC safe smi zero because of stub.
578 }
579 }
580 // Do not use the code from the function, but let the code be patched so that
581 // we can record the outgoing edges to other code.
582 GenerateStaticDartCall(deopt_id, source, UntaggedPcDescriptors::kOther, locs,
583 function, entry_kind);
584 EmitDropArguments(size_with_type_args);
585}
586
588 int32_t selector_offset,
589 const Array& arguments_descriptor) {
590 const auto cid_reg = DispatchTableNullErrorABI::kClassIdReg;
591 ASSERT(CanCallDart());
592 const Register table_reg = RAX;
593 ASSERT(cid_reg != table_reg);
594 ASSERT(cid_reg != ARGS_DESC_REG);
595 if (!arguments_descriptor.IsNull()) {
596 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
597 }
598 const intptr_t offset = (selector_offset - DispatchTable::kOriginElement) *
600 __ LoadDispatchTable(table_reg);
601 __ call(compiler::Address(table_reg, cid_reg, TIMES_8, offset));
602}
603
605 Register reg,
606 const Object& obj,
607 bool needs_number_check,
608 const InstructionSource& source,
609 intptr_t deopt_id) {
610 ASSERT(!needs_number_check || (!obj.IsMint() && !obj.IsDouble()));
611
612 if (obj.IsSmi() && (Smi::Cast(obj).Value() == 0)) {
613 ASSERT(!needs_number_check);
614 __ OBJ(test)(reg, reg);
615 return EQUAL;
616 }
617
618 if (needs_number_check) {
619 __ pushq(reg);
620 __ PushObject(obj);
621 if (is_optimizing()) {
622 // No breakpoints in optimized code.
623 __ Call(StubCode::OptimizedIdenticalWithNumberCheck());
624 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, deopt_id, source);
625 } else {
626 // Patchable to support breakpoints.
627 __ CallPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
628 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id,
629 source);
630 }
631 // Stub returns result in flags (result of a cmpq, we need ZF computed).
632 __ popq(reg); // Discard constant.
633 __ popq(reg); // Restore 'reg'.
634 } else {
635 __ CompareObject(reg, obj);
636 }
637 return EQUAL;
638}
639
641 Register left,
642 Register right,
643 bool needs_number_check,
644 const InstructionSource& source,
645 intptr_t deopt_id) {
646 if (needs_number_check) {
647 __ pushq(left);
648 __ pushq(right);
649 if (is_optimizing()) {
650 __ Call(StubCode::OptimizedIdenticalWithNumberCheck());
651 } else {
652 __ CallPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
653 }
654 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
655 // Stub returns result in flags (result of a cmpq, we need ZF computed).
656 __ popq(right);
657 __ popq(left);
658 } else {
659 __ CompareObjectRegisters(left, right);
660 }
661 return EQUAL;
662}
663
665 BranchLabels labels,
666 bool invert) {
667 __ Comment("BoolTest");
668 __ testq(value, compiler::Immediate(
670 return invert ? NOT_EQUAL : EQUAL;
671}
672
673// This function must be in sync with FlowGraphCompiler::RecordSafepoint and
674// FlowGraphCompiler::SlowPathEnvironmentFor.
675void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
676#if defined(DEBUG)
677 locs->CheckWritableInputs();
678 ClobberDeadTempRegisters(locs);
679#endif
680
681 // TODO(vegorov): avoid saving non-volatile registers.
682 __ PushRegisters(*locs->live_registers());
683}
684
685void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
686 __ PopRegisters(*locs->live_registers());
687}
688
689#if defined(DEBUG)
690void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
691 // Clobber temporaries that have not been manually preserved.
692 for (intptr_t i = 0; i < locs->temp_count(); ++i) {
693 Location tmp = locs->temp(i);
694 // TODO(zerny): clobber non-live temporary FPU registers.
695 if (tmp.IsRegister() &&
696 !locs->live_registers()->ContainsRegister(tmp.reg())) {
697 __ movq(tmp.reg(), compiler::Immediate(0xf7));
698 }
699 }
700}
701#endif
702
703Register FlowGraphCompiler::EmitTestCidRegister() {
704 return RDI;
705}
706
707void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
708 intptr_t count_without_type_args,
709 const Array& arguments_descriptor) {
710 __ Comment("EmitTestAndCall");
711 // Load receiver into RAX.
712 __ movq(RAX,
713 compiler::Address(RSP, (count_without_type_args - 1) * kWordSize));
714 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
715}
716
717void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
718 bool if_smi) {
719 __ testq(RAX, compiler::Immediate(kSmiTagMask));
720 // Jump if receiver is (not) Smi.
721 __ j(if_smi ? ZERO : NOT_ZERO, label);
722}
723
724void FlowGraphCompiler::EmitTestAndCallLoadCid(Register class_id_reg) {
725 ASSERT(class_id_reg != RAX);
726 __ LoadClassId(class_id_reg, RAX);
727}
728
731 TemporaryRegisterAllocator* tmp) {
732 if (destination.Equals(source)) return;
733
734 if (source.IsRegister()) {
735 if (destination.IsRegister()) {
736 __ movq(destination.reg(), source.reg());
737 } else {
738 ASSERT(destination.IsStackSlot());
739 __ movq(LocationToStackSlotAddress(destination), source.reg());
740 }
741 } else if (source.IsStackSlot()) {
742 if (destination.IsRegister()) {
743 __ movq(destination.reg(), LocationToStackSlotAddress(source));
744 } else if (destination.IsFpuRegister()) {
745 // 32-bit float
747 __ movq(destination.fpu_reg(), TMP);
748 } else {
749 ASSERT(destination.IsStackSlot());
750 __ MoveMemoryToMemory(LocationToStackSlotAddress(destination),
752 }
753 } else if (source.IsFpuRegister()) {
754 if (destination.IsFpuRegister()) {
755 // Optimization manual recommends using MOVAPS for register
756 // to register moves.
757 __ movaps(destination.fpu_reg(), source.fpu_reg());
758 } else {
759 if (destination.IsDoubleStackSlot()) {
760 __ movsd(LocationToStackSlotAddress(destination), source.fpu_reg());
761 } else {
762 ASSERT(destination.IsQuadStackSlot());
763 __ movups(LocationToStackSlotAddress(destination), source.fpu_reg());
764 }
765 }
766 } else if (source.IsDoubleStackSlot()) {
767 if (destination.IsFpuRegister()) {
768 __ movsd(destination.fpu_reg(), LocationToStackSlotAddress(source));
769 } else {
770 ASSERT(destination.IsDoubleStackSlot() ||
771 destination.IsStackSlot() /*32-bit float*/);
773 __ movsd(LocationToStackSlotAddress(destination), FpuTMP);
774 }
775 } else if (source.IsQuadStackSlot()) {
776 if (destination.IsFpuRegister()) {
777 __ movups(destination.fpu_reg(), LocationToStackSlotAddress(source));
778 } else {
779 ASSERT(destination.IsQuadStackSlot());
781 __ movups(LocationToStackSlotAddress(destination), FpuTMP);
782 }
783 } else {
784 ASSERT(!source.IsInvalid());
785 ASSERT(source.IsConstant());
786 source.constant_instruction()->EmitMoveToLocation(this, destination);
787 }
788}
789
790void FlowGraphCompiler::EmitNativeMoveArchitecture(
791 const compiler::ffi::NativeLocation& destination,
792 const compiler::ffi::NativeLocation& source) {
793 const auto& src_type = source.payload_type();
794 const auto& dst_type = destination.payload_type();
795 ASSERT(src_type.IsSigned() == dst_type.IsSigned());
796 ASSERT(src_type.IsPrimitive());
797 ASSERT(dst_type.IsPrimitive());
798 const intptr_t src_size = src_type.SizeInBytes();
799 const intptr_t dst_size = dst_type.SizeInBytes();
800 const bool sign_or_zero_extend = dst_size > src_size;
801
802 if (source.IsRegisters()) {
803 const auto& src = source.AsRegisters();
804 ASSERT(src.num_regs() == 1);
805 const auto src_reg = src.reg_at(0);
806
807 if (destination.IsRegisters()) {
808 const auto& dst = destination.AsRegisters();
809 ASSERT(dst.num_regs() == 1);
810 const auto dst_reg = dst.reg_at(0);
811 ASSERT(destination.container_type().SizeInBytes() <= 8);
812 if (!sign_or_zero_extend) {
813 __ MoveRegister(dst_reg, src_reg);
814 return;
815 } else {
816 switch (src_type.AsPrimitive().representation()) {
817 case compiler::ffi::kInt8: // Sign extend operand.
818 __ movsxb(dst_reg, src_reg);
819 return;
821 __ movsxw(dst_reg, src_reg);
822 return;
824 __ movsxd(dst_reg, src_reg);
825 return;
830 __ MoveRegister(dst_reg, src_reg);
831 __ shlq(dst_reg, compiler::Immediate(64 - src_size * kBitsPerByte));
832 __ sarq(dst_reg, compiler::Immediate(64 - src_size * kBitsPerByte));
833 return;
834 case compiler::ffi::kUint8: // Zero extend operand.
835 __ movzxb(dst_reg, src_reg);
836 return;
838 __ movzxw(dst_reg, src_reg);
839 return;
841 __ movl(dst_reg, src_reg);
842 return;
847 __ MoveRegister(dst_reg, src_reg);
848 __ shlq(dst_reg, compiler::Immediate(64 - src_size * kBitsPerByte));
849 __ shrq(dst_reg, compiler::Immediate(64 - src_size * kBitsPerByte));
850 return;
851 default:
852 UNREACHABLE();
853 }
854 }
855
856 } else if (destination.IsFpuRegisters()) {
857 const auto& dst = destination.AsFpuRegisters();
858 ASSERT(src_size == dst_size);
859 switch (dst_size) {
860 case 8:
861 __ movq(dst.fpu_reg(), src_reg);
862 return;
863 case 4:
864 __ movd(dst.fpu_reg(), src_reg);
865 return;
866 default:
867 UNREACHABLE();
868 }
869
870 } else {
871 ASSERT(destination.IsStack());
872 const auto& dst = destination.AsStack();
873 const auto dst_addr = NativeLocationToStackSlotAddress(dst);
874 ASSERT(!sign_or_zero_extend);
875 switch (destination.container_type().SizeInBytes()) {
876 case 8:
877 __ movq(dst_addr, src_reg);
878 return;
879 case 4:
880 __ movl(dst_addr, src_reg);
881 return;
882 case 2:
883 __ movw(dst_addr, src_reg);
884 return;
885 case 1:
886 __ movb(dst_addr, ByteRegisterOf(src_reg));
887 return;
888 default:
889 UNREACHABLE();
890 }
891 }
892
893 } else if (source.IsFpuRegisters()) {
894 const auto& src = source.AsFpuRegisters();
895 // We have not implemented conversions here, use IL convert instructions.
896 ASSERT(src_type.Equals(dst_type));
897
898 if (destination.IsRegisters()) {
899 ASSERT(src_size == dst_size);
900 const auto& dst = destination.AsRegisters();
901 ASSERT(dst.num_regs() == 1);
902 const auto dst_reg = dst.reg_at(0);
903 switch (dst_size) {
904 case 8:
905 __ movq(dst_reg, src.fpu_reg());
906 return;
907 case 4:
908 __ movl(dst_reg, src.fpu_reg());
909 return;
910 default:
911 UNREACHABLE();
912 }
913
914 } else if (destination.IsFpuRegisters()) {
915 const auto& dst = destination.AsFpuRegisters();
916 // Optimization manual recommends using MOVAPS for register
917 // to register moves.
918 __ movaps(dst.fpu_reg(), src.fpu_reg());
919
920 } else {
921 ASSERT(destination.IsStack());
922 ASSERT(src_type.IsFloat());
923 const auto& dst = destination.AsStack();
924 const auto dst_addr = NativeLocationToStackSlotAddress(dst);
925 switch (dst_size) {
926 case 8:
927 __ movsd(dst_addr, src.fpu_reg());
928 return;
929 case 4:
930 __ movss(dst_addr, src.fpu_reg());
931 return;
932 default:
933 UNREACHABLE();
934 }
935 }
936
937 } else {
938 ASSERT(source.IsStack());
939 const auto& src = source.AsStack();
940 const auto src_addr = NativeLocationToStackSlotAddress(src);
941 if (destination.IsRegisters()) {
942 const auto& dst = destination.AsRegisters();
943 ASSERT(dst.num_regs() == 1);
944 const auto dst_reg = dst.reg_at(0);
945 EmitNativeLoad(dst_reg, src.base_register(), src.offset_in_bytes(),
946 src_type.AsPrimitive().representation());
947 } else if (destination.IsFpuRegisters()) {
948 ASSERT(src_type.Equals(dst_type));
949 ASSERT(src_type.IsFloat());
950 const auto& dst = destination.AsFpuRegisters();
951 switch (dst_size) {
952 case 8:
953 __ movsd(dst.fpu_reg(), src_addr);
954 return;
955 case 4:
956 __ movss(dst.fpu_reg(), src_addr);
957 return;
958 default:
959 UNREACHABLE();
960 }
961
962 } else {
963 ASSERT(destination.IsStack());
964 UNREACHABLE();
965 }
966 }
967}
968
969void FlowGraphCompiler::EmitNativeLoad(Register dst,
971 intptr_t offset,
973 switch (type) {
975 __ LoadFromOffset(dst, base, offset, compiler::kByte);
976 break;
978 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedByte);
979 break;
981 __ LoadFromOffset(dst, base, offset, compiler::kTwoBytes);
982 break;
984 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
985 break;
987 __ LoadFromOffset(dst, base, offset, compiler::kFourBytes);
988 break;
991 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
992 break;
996 __ LoadFromOffset(dst, base, offset, compiler::kEightBytes);
997 break;
998
1000 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1001 __ LoadFromOffset(TMP, base, offset + 2, compiler::kByte);
1002 __ shlq(TMP, compiler::Immediate(16));
1003 __ orq(dst, TMP);
1004 break;
1006 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1007 __ LoadFromOffset(TMP, base, offset + 2, compiler::kUnsignedByte);
1008 __ shlq(TMP, compiler::Immediate(16));
1009 __ orq(dst, TMP);
1010 break;
1012 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1013 __ LoadFromOffset(TMP, base, offset + 4, compiler::kByte);
1014 __ shlq(TMP, compiler::Immediate(32));
1015 __ orq(dst, TMP);
1016 break;
1018 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1019 __ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedByte);
1020 __ shlq(TMP, compiler::Immediate(32));
1021 __ orq(dst, TMP);
1022 break;
1024 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1025 __ LoadFromOffset(TMP, base, offset + 4, compiler::kTwoBytes);
1026 __ shlq(TMP, compiler::Immediate(32));
1027 __ orq(dst, TMP);
1028 break;
1030 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1031 __ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedTwoBytes);
1032 __ shlq(TMP, compiler::Immediate(32));
1033 __ orq(dst, TMP);
1034 break;
1036 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1037 __ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedTwoBytes);
1038 __ shlq(TMP, compiler::Immediate(32));
1039 __ orq(dst, TMP);
1040 __ LoadFromOffset(TMP, base, offset + 6, compiler::kByte);
1041 __ shlq(TMP, compiler::Immediate(48));
1042 __ orq(dst, TMP);
1043 break;
1045 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1046 __ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedTwoBytes);
1047 __ shlq(TMP, compiler::Immediate(32));
1048 __ orq(dst, TMP);
1049 __ LoadFromOffset(TMP, base, offset + 6, compiler::kUnsignedByte);
1050 __ shlq(TMP, compiler::Immediate(48));
1051 __ orq(dst, TMP);
1052 break;
1053 default:
1054 UNREACHABLE();
1055 }
1056}
1057
1059 Register dst,
1060 Register tmp) {
1061 compiler::Label skip_reloc;
1062 __ jmp(&skip_reloc);
1063 InsertBSSRelocation(relocation);
1064 const intptr_t reloc_end = __ CodeSize();
1065 __ Bind(&skip_reloc);
1066
1067 const intptr_t kLeaqLength = 7;
1069 -kLeaqLength - compiler::target::kWordSize));
1070 ASSERT((__ CodeSize() - reloc_end) == kLeaqLength);
1071
1072 // dst holds the address of the relocation.
1073 __ movq(tmp, compiler::Address(dst, 0));
1074
1075 // tmp holds the relocation itself: dst - bss_start.
1076 // dst = dst + (bss_start - dst) = bss_start
1077 __ addq(dst, tmp);
1078
1079 // dst holds the start of the BSS section.
1080 // Load the routine.
1081 __ movq(dst, compiler::Address(dst, 0));
1082}
1083
1084#undef __
1085#define __ compiler_->assembler()->
1086
1087void ParallelMoveEmitter::EmitSwap(const MoveOperands& move) {
1088 const Location source = move.src();
1089 const Location destination = move.dest();
1090
1091 if (source.IsRegister() && destination.IsRegister()) {
1092 __ xchgq(destination.reg(), source.reg());
1093 } else if (source.IsRegister() && destination.IsStackSlot()) {
1094 Exchange(source.reg(), LocationToStackSlotAddress(destination));
1095 } else if (source.IsStackSlot() && destination.IsRegister()) {
1096 Exchange(destination.reg(), LocationToStackSlotAddress(source));
1097 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1098 Exchange(LocationToStackSlotAddress(destination),
1100 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1101 __ movaps(FpuTMP, source.fpu_reg());
1102 __ movaps(source.fpu_reg(), destination.fpu_reg());
1103 __ movaps(destination.fpu_reg(), FpuTMP);
1104 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1105 ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() ||
1106 source.IsDoubleStackSlot() || source.IsQuadStackSlot());
1107 bool double_width =
1108 destination.IsDoubleStackSlot() || source.IsDoubleStackSlot();
1109 XmmRegister reg =
1110 source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg();
1111 compiler::Address slot_address =
1112 source.IsFpuRegister() ? LocationToStackSlotAddress(destination)
1114
1115 if (double_width) {
1116 __ movsd(FpuTMP, slot_address);
1117 __ movsd(slot_address, reg);
1118 } else {
1119 __ movups(FpuTMP, slot_address);
1120 __ movups(slot_address, reg);
1121 }
1122 __ movaps(reg, FpuTMP);
1123 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1124 const compiler::Address& source_slot_address =
1126 const compiler::Address& destination_slot_address =
1127 LocationToStackSlotAddress(destination);
1128
1129 ScratchFpuRegisterScope ensure_scratch(this, FpuTMP);
1130 __ movsd(FpuTMP, source_slot_address);
1131 __ movsd(ensure_scratch.reg(), destination_slot_address);
1132 __ movsd(destination_slot_address, FpuTMP);
1133 __ movsd(source_slot_address, ensure_scratch.reg());
1134 } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1135 const compiler::Address& source_slot_address =
1137 const compiler::Address& destination_slot_address =
1138 LocationToStackSlotAddress(destination);
1139
1140 ScratchFpuRegisterScope ensure_scratch(this, FpuTMP);
1141 __ movups(FpuTMP, source_slot_address);
1142 __ movups(ensure_scratch.reg(), destination_slot_address);
1143 __ movups(destination_slot_address, FpuTMP);
1144 __ movups(source_slot_address, ensure_scratch.reg());
1145 } else {
1146 UNREACHABLE();
1147 }
1148}
1149
1150void ParallelMoveEmitter::MoveMemoryToMemory(const compiler::Address& dst,
1151 const compiler::Address& src) {
1152 __ MoveMemoryToMemory(dst, src);
1153}
1154
1155void ParallelMoveEmitter::Exchange(Register reg, const compiler::Address& mem) {
1156 __ Exchange(reg, mem);
1157}
1158
1159void ParallelMoveEmitter::Exchange(const compiler::Address& mem1,
1160 const compiler::Address& mem2) {
1161 __ Exchange(mem1, mem2);
1162}
1163
1164void ParallelMoveEmitter::Exchange(Register reg,
1165 Register base_reg,
1166 intptr_t stack_offset) {
1167 UNREACHABLE();
1168}
1169
1170void ParallelMoveEmitter::Exchange(Register base_reg1,
1171 intptr_t stack_offset1,
1172 Register base_reg2,
1173 intptr_t stack_offset2) {
1174 UNREACHABLE();
1175}
1176
1177void ParallelMoveEmitter::SpillScratch(Register reg) {
1178 __ pushq(reg);
1179}
1180
1181void ParallelMoveEmitter::RestoreScratch(Register reg) {
1182 __ popq(reg);
1183}
1184
1185void ParallelMoveEmitter::SpillFpuScratch(FpuRegister reg) {
1186 __ AddImmediate(RSP, compiler::Immediate(-kFpuRegisterSize));
1187 __ movups(compiler::Address(RSP, 0), reg);
1188}
1189
1190void ParallelMoveEmitter::RestoreFpuScratch(FpuRegister reg) {
1191 __ movups(reg, compiler::Address(RSP, 0));
1192 __ AddImmediate(RSP, compiler::Immediate(kFpuRegisterSize));
1193}
1194
1195#undef __
1196
1197} // namespace dart
1198
1199#endif // defined(TARGET_ARCH_X64)
#define __
#define OBJ(op)
#define UNREACHABLE()
Definition: assert.h:248
GLenum type
static intptr_t element_offset(intptr_t index)
Definition: object.h:10838
intptr_t length() const
static intptr_t owner_offset()
Definition: object.h:7149
CodeEntryKind EntryKind
Definition: object.h:6788
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
Definition: object.h:6793
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
Definition: deopt_id.h:27
static intptr_t ToDeoptAfter(intptr_t deopt_id)
Definition: deopt_id.h:31
static constexpr intptr_t kOriginElement
IsolateGroup * isolate_group() const
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitTailCallToStub(const Code &stub)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void InsertBSSRelocation(BSS::Relocation reloc)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void GeneratePatchableCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
compiler::Assembler * assembler() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
Definition: object.cc:11437
bool IsClosureFunction() const
Definition: object.h:3891
bool IsOptimizable() const
Definition: object.cc:8930
ObjectStore * object_store() const
Definition: isolate.h:510
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
Definition: object.h:5554
static ObjectPtr null()
Definition: object.h:433
bool IsNull() const
Definition: object.h:363
static Object & Handle()
Definition: object.h:407
static Object & ZoneHandle()
Definition: object.h:419
const Function & function() const
Definition: parser.h:73
int num_stack_locals() const
Definition: parser.h:194
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
Definition: stub_code.cc:316
static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg)
Definition: thread.h:765
static Address AddressRIPRelative(int32_t disp)
void set_constant_pool_allowed(bool b)
void GenerateUnRelocatedPcRelativeCall(Condition cond=AL, intptr_t offset_into_target=0)
bool constant_pool_allowed() const
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
#define ASSERT(E)
SkBitmap source
Definition: examples.cpp:28
gboolean invert
uint8_t value
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
compiler::Address NativeLocationToStackSlotAddress(const NativeStackLocation &loc)
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
FrameLayout frame_layout
Definition: stack_frame.cc:76
Definition: dart_vm.cc:33
const Register THR
const char *const name
constexpr intptr_t kBitsPerByte
Definition: globals.h:463
const FpuRegister FpuTMP
const Register CODE_REG
@ kInvalidCondition
@ NOT_ZERO
@ NOT_EQUAL
const Register ARGS_DESC_REG
const Register TMP
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
Definition: locations.cc:365
constexpr intptr_t kWordSize
Definition: globals.h:509
QRegister FpuRegister
static int8_t data[kExtLength]
@ kSmiTagMask
const int kFpuRegisterSize
DECLARE_FLAG(bool, show_invisible_frames)
ByteRegister ByteRegisterOf(Register reg)
def call(args)
Definition: dom.py:159
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
Definition: switches.h:191
dst
Definition: cp.py:12
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
SeparatedVector2 offset
static constexpr Register kClassIdReg
intptr_t FrameSlotForVariable(const LocalVariable *variable) const
Definition: stack_frame.cc:83
intptr_t FrameSlotForVariableIndex(intptr_t index) const
Definition: stack_frame.cc:89
static constexpr intptr_t kBoolValueMask
static constexpr Register kSubtypeTestCacheReg