Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
flow_graph_compiler_x64.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
6#if defined(TARGET_ARCH_X64)
7
9
16#include "vm/dart_entry.h"
18#include "vm/dispatch_table.h"
19#include "vm/instructions.h"
20#include "vm/object_store.h"
21#include "vm/parser.h"
22#include "vm/stack_frame.h"
23#include "vm/stub_code.h"
24#include "vm/symbols.h"
25
26namespace dart {
27
28DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
29DECLARE_FLAG(bool, enable_simd_inline);
30
32 if (FLAG_precompiled_mode) {
33 auto object_store = isolate_group()->object_store();
34
35 const auto& stub =
36 Code::ZoneHandle(object_store->write_barrier_wrappers_stub());
37 if (CanPcRelativeCall(stub)) {
38 assembler_->generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
39 const intptr_t offset_into_target =
41 assembler_->GenerateUnRelocatedPcRelativeCall(offset_into_target);
42 AddPcRelativeCallStubTarget(stub);
43 };
44 }
45
46 const auto& array_stub =
47 Code::ZoneHandle(object_store->array_write_barrier_stub());
48 if (CanPcRelativeCall(stub)) {
49 assembler_->generate_invoke_array_write_barrier_ = [&]() {
51 AddPcRelativeCallStubTarget(array_stub);
52 };
53 }
54 }
55}
56
58 // BlockInfos are zone-allocated, so their destructors are not called.
59 // Verify the labels explicitly here.
60 for (int i = 0; i < block_info_.length(); ++i) {
61 ASSERT(!block_info_[i]->jump_label()->IsLinked());
62 ASSERT(!block_info_[i]->jump_label()->HasNear());
63 }
64}
65
67 return true;
68}
69
71 return FLAG_enable_simd_inline;
72}
73
75 return true;
76}
77
80 intrinsic_mode_ = true;
81 ASSERT(!assembler()->constant_pool_allowed());
82}
83
86 intrinsic_mode_ = false;
87}
88
89TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
90 DeoptInfoBuilder* builder,
91 const Array& deopt_table) {
92 if (deopt_env_ == nullptr) {
93 ++builder->current_info_number_;
94 return TypedData::null();
95 }
96
97 AllocateOutgoingArguments(deopt_env_);
98
99 intptr_t slot_ix = 0;
100 Environment* current = deopt_env_;
101
102 // Emit all kMaterializeObject instructions describing objects to be
103 // materialized on the deoptimization as a prefix to the deoptimization info.
104 EmitMaterializations(deopt_env_, builder);
105
106 // The real frame starts here.
107 builder->MarkFrameStart();
108
109 Zone* zone = compiler->zone();
110
111 builder->AddPp(current->function(), slot_ix++);
112 builder->AddPcMarker(Function::ZoneHandle(zone), slot_ix++);
113 builder->AddCallerFp(slot_ix++);
114 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++);
115
116 // Emit all values that are needed for materialization as a part of the
117 // expression stack for the bottom-most frame. This guarantees that GC
118 // will be able to find them during materialization.
119 slot_ix = builder->EmitMaterializationArguments(slot_ix);
120
121 // For the innermost environment, set outgoing arguments and the locals.
122 for (intptr_t i = current->Length() - 1;
123 i >= current->fixed_parameter_count(); i--) {
124 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
125 }
126
127 Environment* previous = current;
128 current = current->outer();
129 while (current != nullptr) {
130 builder->AddPp(current->function(), slot_ix++);
131 builder->AddPcMarker(previous->function(), slot_ix++);
132 builder->AddCallerFp(slot_ix++);
133
134 // For any outer environment the deopt id is that of the call instruction
135 // which is recorded in the outer environment.
136 builder->AddReturnAddress(current->function(),
137 DeoptId::ToDeoptAfter(current->GetDeoptId()),
138 slot_ix++);
139
140 // The values of outgoing arguments can be changed from the inlined call so
141 // we must read them from the previous environment.
142 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
143 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
144 slot_ix++);
145 }
146
147 // Set the locals, note that outgoing arguments are not in the environment.
148 for (intptr_t i = current->Length() - 1;
149 i >= current->fixed_parameter_count(); i--) {
150 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
151 }
152
153 // Iterate on the outer environment.
154 previous = current;
155 current = current->outer();
156 }
157 // The previous pointer is now the outermost environment.
158 ASSERT(previous != nullptr);
159
160 // Set slots for the outermost environment.
161 builder->AddCallerPp(slot_ix++);
162 builder->AddPcMarker(previous->function(), slot_ix++);
163 builder->AddCallerFp(slot_ix++);
164 builder->AddCallerPc(slot_ix++);
165
166 // For the outermost environment, set the incoming arguments.
167 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
168 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
169 }
170
171 return builder->CreateDeoptInfo(deopt_table);
172}
173
175 intptr_t stub_ix) {
176 // Calls do not need stubs, they share a deoptimization trampoline.
177 ASSERT(reason() != ICData::kDeoptAtCall);
178 compiler::Assembler* assembler = compiler->assembler();
179#define __ assembler->
180 __ Comment("%s", Name());
181 __ Bind(entry_label());
182 if (FLAG_trap_on_deoptimization) {
183 __ int3();
184 }
185
186 ASSERT(deopt_env() != nullptr);
187 __ call(compiler::Address(THR, Thread::deoptimize_entry_offset()));
188 set_pc_offset(assembler->CodeSize());
189 __ int3();
190#undef __
191}
192
193#define __ assembler->
194// Static methods of FlowGraphCompiler that take an assembler.
195
196void FlowGraphCompiler::GenerateIndirectTTSCall(compiler::Assembler* assembler,
197 Register reg_to_call,
198 intptr_t sub_type_cache_index) {
199 __ LoadWordFromPoolIndex(TypeTestABI::kSubtypeTestCacheReg,
200 sub_type_cache_index);
201 __ Call(compiler::FieldAddress(
202 reg_to_call,
203 compiler::target::AbstractType::type_test_stub_entry_point_offset()));
204}
205
206#undef __
207#define __ assembler()->
208// Instance methods of FlowGraphCompiler.
209
210// Fall through if bool_register contains null.
211void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
212 compiler::Label* is_true,
213 compiler::Label* is_false) {
214 compiler::Label fall_through;
215 __ CompareObject(bool_register, Object::null_object());
216 __ j(EQUAL, &fall_through, compiler::Assembler::kNearJump);
217 BranchLabels labels = {is_true, is_false, &fall_through};
218 Condition true_condition =
219 EmitBoolTest(bool_register, labels, /*invert=*/false);
220 ASSERT(true_condition != kInvalidCondition);
221 __ j(true_condition, is_true);
222 __ jmp(is_false);
223 __ Bind(&fall_through);
224}
225
226// NOTE: If the entry code shape changes, ReturnAddressLocator in profiler.cc
227// needs to be updated to match.
228void FlowGraphCompiler::EmitFrameEntry() {
229 if (!flow_graph().graph_entry()->NeedsFrame()) {
230 if (FLAG_precompiled_mode) {
232 }
233 return;
234 }
235
236 if (flow_graph().IsCompiledForOsr()) {
237 const intptr_t extra_slots = ExtraStackSlotsOnOsrEntry();
238 ASSERT(extra_slots >= 0);
239 __ EnterOsrFrame(extra_slots * kWordSize);
240 } else {
241 const Function& function = parsed_function().function();
243 (!is_optimizing() || may_reoptimize())) {
244 __ Comment("Invocation Count Check");
245 const Register function_reg = RDI;
246 __ movq(function_reg,
247 compiler::FieldAddress(CODE_REG, Code::owner_offset()));
248
249 // Reoptimization of an optimized function is triggered by counting in
250 // IC stubs, but not at the entry of the function.
251 if (!is_optimizing()) {
252 __ incl(compiler::FieldAddress(function_reg,
253 Function::usage_counter_offset()));
254 }
255 __ cmpl(compiler::FieldAddress(function_reg,
256 Function::usage_counter_offset()),
257 compiler::Immediate(GetOptimizationThreshold()));
258 ASSERT(function_reg == RDI);
259 compiler::Label dont_optimize;
260 __ j(LESS, &dont_optimize, compiler::Assembler::kNearJump);
261 __ jmp(compiler::Address(THR, Thread::optimize_entry_offset()));
262 __ Bind(&dont_optimize);
263 }
264 ASSERT(StackSize() >= 0);
265 __ Comment("Enter frame");
266 __ EnterDartFrame(StackSize() * kWordSize);
267 }
268}
269
270const InstructionSource& PrologueSource() {
271 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
272 /*inlining_id=*/0);
273 return prologue_source;
274}
275
277 BeginCodeSourceRange(PrologueSource());
278
279 EmitFrameEntry();
280 ASSERT(assembler()->constant_pool_allowed());
281
282 // In unoptimized code, initialize (non-argument) stack allocated slots.
283 if (!is_optimizing()) {
284 const int num_locals = parsed_function().num_stack_locals();
285
286 intptr_t args_desc_slot = -1;
287 if (parsed_function().has_arg_desc_var()) {
288 args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
289 parsed_function().arg_desc_var());
290 }
291
292 __ Comment("Initialize spill slots");
293 if (num_locals > 1 || (num_locals == 1 && args_desc_slot == -1)) {
294 __ LoadObject(RAX, Object::null_object());
295 }
296 for (intptr_t i = 0; i < num_locals; ++i) {
297 const intptr_t slot_index =
298 compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
299 Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : RAX;
300 __ movq(compiler::Address(RBP, slot_index * kWordSize), value_reg);
301 }
302 } else if (parsed_function().suspend_state_var() != nullptr &&
303 !flow_graph().IsCompiledForOsr()) {
304 // Initialize synthetic :suspend_state variable early
305 // as it may be accessed by GC and exception handling before
306 // InitSuspendableFunction stub is called.
307 const intptr_t slot_index =
308 compiler::target::frame_layout.FrameSlotForVariable(
309 parsed_function().suspend_state_var());
310 __ LoadObject(RAX, Object::null_object());
311 __ movq(compiler::Address(RBP, slot_index * kWordSize), RAX);
312 }
313
314 EndCodeSourceRange(PrologueSource());
315}
316
318 const Code& stub,
319 ObjectPool::SnapshotBehavior snapshot_behavior) {
320 ASSERT(!stub.IsNull());
321 if (CanPcRelativeCall(stub)) {
322 __ GenerateUnRelocatedPcRelativeCall();
323 AddPcRelativeCallStubTarget(stub);
324 } else {
325 __ Call(stub, snapshot_behavior);
326 AddStubCallTarget(stub);
327 }
328}
329
330void FlowGraphCompiler::EmitJumpToStub(const Code& stub) {
331 ASSERT(!stub.IsNull());
332 if (CanPcRelativeCall(stub)) {
333 __ GenerateUnRelocatedPcRelativeTailCall();
334 AddPcRelativeTailCallStubTarget(stub);
335 } else {
336 __ LoadObject(CODE_REG, stub);
337 __ jmp(compiler::FieldAddress(
338 CODE_REG, compiler::target::Code::entry_point_offset()));
339 AddStubCallTarget(stub);
340 }
341}
342
343void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
344 ASSERT(!stub.IsNull());
345 if (CanPcRelativeCall(stub)) {
346 if (flow_graph().graph_entry()->NeedsFrame()) {
347 __ LeaveDartFrame();
348 }
349 __ GenerateUnRelocatedPcRelativeTailCall();
350 AddPcRelativeTailCallStubTarget(stub);
351#if defined(DEBUG)
352 __ Breakpoint();
353#endif
354 } else {
355 __ LoadObject(CODE_REG, stub);
356 if (flow_graph().graph_entry()->NeedsFrame()) {
357 __ LeaveDartFrame();
358 }
359 __ jmp(compiler::FieldAddress(
360 CODE_REG, compiler::target::Code::entry_point_offset()));
361 AddStubCallTarget(stub);
362 }
363}
364
366 const InstructionSource& source,
367 const Code& stub,
369 LocationSummary* locs,
370 ObjectPool::SnapshotBehavior snapshot_behavior) {
371 __ CallPatchable(stub, CodeEntryKind::kNormal, snapshot_behavior);
373 pending_deoptimization_env_);
374}
375
376void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
377 const InstructionSource& source,
378 const Code& stub,
380 LocationSummary* locs,
381 Code::EntryKind entry_kind) {
382 ASSERT(CanCallDart());
383 __ CallPatchable(stub, entry_kind);
384 EmitCallsiteMetadata(source, deopt_id, kind, locs,
385 pending_deoptimization_env_);
386}
387
388void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
389 const InstructionSource& source,
391 LocationSummary* locs,
392 const Function& target,
393 Code::EntryKind entry_kind) {
394 ASSERT(CanCallDart());
396 if (CanPcRelativeCall(target)) {
397 __ GenerateUnRelocatedPcRelativeCall();
398 AddPcRelativeCallTarget(target, entry_kind);
399 EmitCallsiteMetadata(source, deopt_id, kind, locs,
400 pending_deoptimization_env_);
401 } else {
402 // Call sites to the same target can share object pool entries. These
403 // call sites are never patched for breakpoints: the function is deoptimized
404 // and the unoptimized code with IC calls for static calls is patched
405 // instead.
406 const auto& stub_entry = StubCode::CallStaticFunction();
407 __ CallWithEquivalence(stub_entry, target, entry_kind);
408 EmitCallsiteMetadata(source, deopt_id, kind, locs,
409 pending_deoptimization_env_);
410 AddStaticCallTarget(target, entry_kind);
411 }
412}
413
414void FlowGraphCompiler::EmitUnoptimizedStaticCall(
415 intptr_t size_with_type_args,
416 intptr_t deopt_id,
417 const InstructionSource& source,
418 LocationSummary* locs,
419 const ICData& ic_data,
420 Code::EntryKind entry_kind) {
421 ASSERT(CanCallDart());
422 const Code& stub =
423 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
424 __ LoadObject(RBX, ic_data);
425 GenerateDartCall(deopt_id, source, stub,
426 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
427 EmitDropArguments(size_with_type_args);
428}
429
430void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
431 // We do not check for overflow when incrementing the edge counter. The
432 // function should normally be optimized long before the counter can
433 // overflow; and though we do not reset the counters when we optimize or
434 // deoptimize, there is a bound on the number of
435 // optimization/deoptimization cycles we will attempt.
436 ASSERT(!edge_counters_array_.IsNull());
437 ASSERT(assembler_->constant_pool_allowed());
438 __ Comment("Edge counter");
439 __ LoadObject(RAX, edge_counters_array_);
440 __ IncrementCompressedSmiField(
441 compiler::FieldAddress(RAX, Array::element_offset(edge_id)), 1);
442}
443
445 const Code& stub,
446 const ICData& ic_data,
447 intptr_t deopt_id,
448 const InstructionSource& source,
449 LocationSummary* locs,
450 Code::EntryKind entry_kind) {
451 ASSERT(CanCallDart());
452 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
453 // Each ICData propagated from unoptimized to optimized code contains the
454 // function that corresponds to the Dart function of that IC call. Due
455 // to inlining in optimized code, that function may not correspond to the
456 // top-level function (parsed_function().function()) which could be
457 // reoptimized and which counter needs to be incremented.
458 // Pass the function explicitly, it is used in IC stub.
459 __ LoadObject(RDI, parsed_function().function());
460 // Load receiver into RDX.
461 __ movq(RDX, compiler::Address(
462 RSP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
463 __ LoadUniqueObject(IC_DATA_REG, ic_data);
464 GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
465 entry_kind);
466 EmitDropArguments(ic_data.SizeWithTypeArgs());
467}
468
469void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
470 const ICData& ic_data,
471 intptr_t deopt_id,
472 const InstructionSource& source,
473 LocationSummary* locs,
474 Code::EntryKind entry_kind) {
475 ASSERT(CanCallDart());
476 ASSERT(entry_kind == Code::EntryKind::kNormal ||
477 entry_kind == Code::EntryKind::kUnchecked);
478 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
479 // Load receiver into RDX.
480 __ movq(RDX, compiler::Address(
481 RSP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
482 __ LoadUniqueObject(IC_DATA_REG, ic_data);
483 __ LoadUniqueObject(CODE_REG, stub);
484 const intptr_t entry_point_offset =
485 entry_kind == Code::EntryKind::kNormal
486 ? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
487 : Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
488 __ call(compiler::FieldAddress(CODE_REG, entry_point_offset));
489 EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kIcCall, locs,
490 pending_deoptimization_env_);
491 EmitDropArguments(ic_data.SizeWithTypeArgs());
492}
493
495 const String& name,
496 const Array& arguments_descriptor,
497 intptr_t deopt_id,
498 const InstructionSource& source,
499 LocationSummary* locs) {
500 ASSERT(CanCallDart());
501 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
502 ASSERT(!FLAG_precompiled_mode);
503 const ArgumentsDescriptor args_desc(arguments_descriptor);
504 const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
505 zone(),
506 MegamorphicCacheTable::Lookup(thread(), name, arguments_descriptor));
507 __ Comment("MegamorphicCall");
508 // Load receiver into RDX.
509 __ movq(RDX, compiler::Address(RSP, (args_desc.Count() - 1) * kWordSize));
510
511 // Use same code pattern as instance call so it can be parsed by code patcher.
512 __ LoadUniqueObject(IC_DATA_REG, cache);
513 __ LoadUniqueObject(CODE_REG, StubCode::MegamorphicCall());
514 __ call(compiler::FieldAddress(
515 CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
516
517 RecordSafepoint(locs);
518 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
519 const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
520 if (is_optimizing()) {
521 AddDeoptIndexAtCall(deopt_id_after, pending_deoptimization_env_);
522 } else {
523 // Add deoptimization continuation point after the call and before the
524 // arguments are removed.
525 AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after, source);
526 }
527 RecordCatchEntryMoves(pending_deoptimization_env_);
528 EmitDropArguments(args_desc.SizeWithTypeArgs());
529}
530
531void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
532 intptr_t deopt_id,
533 const InstructionSource& source,
534 LocationSummary* locs,
535 Code::EntryKind entry_kind,
536 bool receiver_can_be_smi) {
537 ASSERT(CanCallDart());
538 ASSERT(entry_kind == Code::EntryKind::kNormal ||
539 entry_kind == Code::EntryKind::kUnchecked);
540 ASSERT(ic_data.NumArgsTested() == 1);
541 const Code& initial_stub = StubCode::SwitchableCallMiss();
542 const char* switchable_call_mode = "smiable";
543 if (!receiver_can_be_smi) {
544 switchable_call_mode = "non-smi";
545 ic_data.set_receiver_cannot_be_smi(true);
546 }
547 const UnlinkedCall& data =
548 UnlinkedCall::ZoneHandle(zone(), ic_data.AsUnlinkedCall());
549
550 __ Comment("InstanceCallAOT (%s)", switchable_call_mode);
551 __ movq(RDX, compiler::Address(
552 RSP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
553 if (FLAG_precompiled_mode) {
554 // The AOT runtime will replace the slot in the object pool with the
555 // entrypoint address - see app_snapshot.cc.
556 const auto snapshot_behavior =
558 __ LoadUniqueObject(RCX, initial_stub, snapshot_behavior);
559 } else {
560 const intptr_t entry_point_offset =
561 entry_kind == Code::EntryKind::kNormal
562 ? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
563 : Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
564 __ LoadUniqueObject(CODE_REG, initial_stub);
565 __ movq(RCX, compiler::FieldAddress(CODE_REG, entry_point_offset));
566 }
567 __ LoadUniqueObject(RBX, data);
568 __ call(RCX);
569
570 EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kOther, locs,
571 pending_deoptimization_env_);
572 EmitDropArguments(ic_data.SizeWithTypeArgs());
573}
574
576 const Function& function,
577 const Array& arguments_descriptor,
578 intptr_t size_with_type_args,
579 intptr_t deopt_id,
580 const InstructionSource& source,
581 LocationSummary* locs,
582 Code::EntryKind entry_kind) {
583 ASSERT(CanCallDart());
586 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
587 } else {
588 if (!FLAG_precompiled_mode) {
589 __ xorl(ARGS_DESC_REG,
590 ARGS_DESC_REG); // GC safe smi zero because of stub.
591 }
592 }
593 // Do not use the code from the function, but let the code be patched so that
594 // we can record the outgoing edges to other code.
595 GenerateStaticDartCall(deopt_id, source, UntaggedPcDescriptors::kOther, locs,
596 function, entry_kind);
597 EmitDropArguments(size_with_type_args);
598}
599
601 int32_t selector_offset,
602 const Array& arguments_descriptor) {
603 const auto cid_reg = DispatchTableNullErrorABI::kClassIdReg;
604 ASSERT(CanCallDart());
605 const Register table_reg = RAX;
606 ASSERT(cid_reg != table_reg);
607 ASSERT(cid_reg != ARGS_DESC_REG);
608 if (!arguments_descriptor.IsNull()) {
609 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
610 }
611 const intptr_t offset = (selector_offset - DispatchTable::kOriginElement) *
612 compiler::target::kWordSize;
613 __ LoadDispatchTable(table_reg);
614 __ call(compiler::Address(table_reg, cid_reg, TIMES_8, offset));
615}
616
618 Register reg,
619 const Object& obj,
620 bool needs_number_check,
621 const InstructionSource& source,
622 intptr_t deopt_id) {
623 ASSERT(!needs_number_check || (!obj.IsMint() && !obj.IsDouble()));
624
625 if (obj.IsSmi() && (Smi::Cast(obj).Value() == 0)) {
626 ASSERT(!needs_number_check);
627 __ OBJ(test)(reg, reg);
628 return EQUAL;
629 }
630
631 if (needs_number_check) {
632 __ pushq(reg);
633 __ PushObject(obj);
634 if (is_optimizing()) {
635 // No breakpoints in optimized code.
636 __ Call(StubCode::OptimizedIdenticalWithNumberCheck());
637 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, deopt_id, source);
638 } else {
639 // Patchable to support breakpoints.
640 __ CallPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
641 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id,
642 source);
643 }
644 // Stub returns result in flags (result of a cmpq, we need ZF computed).
645 __ popq(reg); // Discard constant.
646 __ popq(reg); // Restore 'reg'.
647 } else {
648 __ CompareObject(reg, obj);
649 }
650 return EQUAL;
651}
652
656 bool needs_number_check,
657 const InstructionSource& source,
658 intptr_t deopt_id) {
659 if (needs_number_check) {
660 __ pushq(left);
661 __ pushq(right);
662 if (is_optimizing()) {
663 __ Call(StubCode::OptimizedIdenticalWithNumberCheck());
664 } else {
665 __ CallPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
666 }
667 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
668 // Stub returns result in flags (result of a cmpq, we need ZF computed).
669 __ popq(right);
670 __ popq(left);
671 } else {
672 __ CompareObjectRegisters(left, right);
673 }
674 return EQUAL;
675}
676
678 BranchLabels labels,
679 bool invert) {
680 __ Comment("BoolTest");
681 __ testq(value, compiler::Immediate(
683 return invert ? NOT_EQUAL : EQUAL;
684}
685
686// This function must be in sync with FlowGraphCompiler::RecordSafepoint and
687// FlowGraphCompiler::SlowPathEnvironmentFor.
688void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
689#if defined(DEBUG)
690 locs->CheckWritableInputs();
691 ClobberDeadTempRegisters(locs);
692#endif
693
694 // TODO(vegorov): avoid saving non-volatile registers.
695 __ PushRegisters(*locs->live_registers());
696}
697
698void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
699 __ PopRegisters(*locs->live_registers());
700}
701
702#if defined(DEBUG)
703void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
704 // Clobber temporaries that have not been manually preserved.
705 for (intptr_t i = 0; i < locs->temp_count(); ++i) {
706 Location tmp = locs->temp(i);
707 // TODO(zerny): clobber non-live temporary FPU registers.
708 if (tmp.IsRegister() &&
709 !locs->live_registers()->ContainsRegister(tmp.reg())) {
710 __ movq(tmp.reg(), compiler::Immediate(0xf7));
711 }
712 }
713}
714#endif
715
716Register FlowGraphCompiler::EmitTestCidRegister() {
717 return RDI;
718}
719
720void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
721 intptr_t count_without_type_args,
722 const Array& arguments_descriptor) {
723 __ Comment("EmitTestAndCall");
724 // Load receiver into RAX.
725 __ movq(RAX,
726 compiler::Address(RSP, (count_without_type_args - 1) * kWordSize));
727 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
728}
729
730void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
731 bool if_smi) {
732 __ testq(RAX, compiler::Immediate(kSmiTagMask));
733 // Jump if receiver is (not) Smi.
734 __ j(if_smi ? ZERO : NOT_ZERO, label);
735}
736
737void FlowGraphCompiler::EmitTestAndCallLoadCid(Register class_id_reg) {
738 ASSERT(class_id_reg != RAX);
739 __ LoadClassId(class_id_reg, RAX);
740}
741
744 TemporaryRegisterAllocator* tmp) {
745 if (destination.Equals(source)) return;
746
747 if (source.IsRegister()) {
748 if (destination.IsRegister()) {
749 __ movq(destination.reg(), source.reg());
750 } else {
751 ASSERT(destination.IsStackSlot());
752 __ movq(LocationToStackSlotAddress(destination), source.reg());
753 }
754 } else if (source.IsStackSlot()) {
755 if (destination.IsRegister()) {
756 __ movq(destination.reg(), LocationToStackSlotAddress(source));
757 } else if (destination.IsFpuRegister()) {
758 // 32-bit float
760 __ movq(destination.fpu_reg(), TMP);
761 } else {
762 ASSERT(destination.IsStackSlot());
763 __ MoveMemoryToMemory(LocationToStackSlotAddress(destination),
765 }
766 } else if (source.IsFpuRegister()) {
767 if (destination.IsFpuRegister()) {
768 // Optimization manual recommends using MOVAPS for register
769 // to register moves.
770 __ movaps(destination.fpu_reg(), source.fpu_reg());
771 } else {
772 if (destination.IsDoubleStackSlot()) {
773 __ movsd(LocationToStackSlotAddress(destination), source.fpu_reg());
774 } else {
775 ASSERT(destination.IsQuadStackSlot());
776 __ movups(LocationToStackSlotAddress(destination), source.fpu_reg());
777 }
778 }
779 } else if (source.IsDoubleStackSlot()) {
780 if (destination.IsFpuRegister()) {
781 __ movsd(destination.fpu_reg(), LocationToStackSlotAddress(source));
782 } else {
783 ASSERT(destination.IsDoubleStackSlot() ||
784 destination.IsStackSlot() /*32-bit float*/);
786 __ movsd(LocationToStackSlotAddress(destination), FpuTMP);
787 }
788 } else if (source.IsQuadStackSlot()) {
789 if (destination.IsFpuRegister()) {
790 __ movups(destination.fpu_reg(), LocationToStackSlotAddress(source));
791 } else {
792 ASSERT(destination.IsQuadStackSlot());
794 __ movups(LocationToStackSlotAddress(destination), FpuTMP);
795 }
796 } else {
797 ASSERT(!source.IsInvalid());
798 ASSERT(source.IsConstant());
799 source.constant_instruction()->EmitMoveToLocation(this, destination);
800 }
801}
802
803void FlowGraphCompiler::EmitNativeMoveArchitecture(
804 const compiler::ffi::NativeLocation& destination,
805 const compiler::ffi::NativeLocation& source) {
806 const auto& src_type = source.payload_type();
807 const auto& dst_type = destination.payload_type();
808 ASSERT(src_type.IsSigned() == dst_type.IsSigned());
809 ASSERT(src_type.IsPrimitive());
810 ASSERT(dst_type.IsPrimitive());
811 const intptr_t src_size = src_type.SizeInBytes();
812 const intptr_t dst_size = dst_type.SizeInBytes();
813 const bool sign_or_zero_extend = dst_size > src_size;
814
815 if (source.IsRegisters()) {
816 const auto& src = source.AsRegisters();
817 ASSERT(src.num_regs() == 1);
818 const auto src_reg = src.reg_at(0);
819
820 if (destination.IsRegisters()) {
821 const auto& dst = destination.AsRegisters();
822 ASSERT(dst.num_regs() == 1);
823 const auto dst_reg = dst.reg_at(0);
824 ASSERT(destination.container_type().SizeInBytes() <= 8);
825 if (!sign_or_zero_extend) {
826 __ MoveRegister(dst_reg, src_reg);
827 return;
828 } else {
829 switch (src_type.AsPrimitive().representation()) {
830 case compiler::ffi::kInt8: // Sign extend operand.
831 __ movsxb(dst_reg, src_reg);
832 return;
834 __ movsxw(dst_reg, src_reg);
835 return;
837 __ movsxd(dst_reg, src_reg);
838 return;
843 __ MoveRegister(dst_reg, src_reg);
844 __ shlq(dst_reg, compiler::Immediate(64 - src_size * kBitsPerByte));
845 __ sarq(dst_reg, compiler::Immediate(64 - src_size * kBitsPerByte));
846 return;
847 case compiler::ffi::kUint8: // Zero extend operand.
848 __ movzxb(dst_reg, src_reg);
849 return;
851 __ movzxw(dst_reg, src_reg);
852 return;
854 __ movl(dst_reg, src_reg);
855 return;
860 __ MoveRegister(dst_reg, src_reg);
861 __ shlq(dst_reg, compiler::Immediate(64 - src_size * kBitsPerByte));
862 __ shrq(dst_reg, compiler::Immediate(64 - src_size * kBitsPerByte));
863 return;
864 default:
865 UNREACHABLE();
866 }
867 }
868
869 } else if (destination.IsFpuRegisters()) {
870 const auto& dst = destination.AsFpuRegisters();
871 ASSERT(src_size == dst_size);
872 switch (dst_size) {
873 case 8:
874 __ movq(dst.fpu_reg(), src_reg);
875 return;
876 case 4:
877 __ movd(dst.fpu_reg(), src_reg);
878 return;
879 default:
880 UNREACHABLE();
881 }
882
883 } else {
884 ASSERT(destination.IsStack());
885 const auto& dst = destination.AsStack();
886 const auto dst_addr = NativeLocationToStackSlotAddress(dst);
887 ASSERT(!sign_or_zero_extend);
888 switch (destination.container_type().SizeInBytes()) {
889 case 8:
890 __ movq(dst_addr, src_reg);
891 return;
892 case 4:
893 __ movl(dst_addr, src_reg);
894 return;
895 case 2:
896 __ movw(dst_addr, src_reg);
897 return;
898 case 1:
899 __ movb(dst_addr, ByteRegisterOf(src_reg));
900 return;
901 default:
902 UNREACHABLE();
903 }
904 }
905
906 } else if (source.IsFpuRegisters()) {
907 const auto& src = source.AsFpuRegisters();
908 // We have not implemented conversions here, use IL convert instructions.
909 ASSERT(src_type.Equals(dst_type));
910
911 if (destination.IsRegisters()) {
912 ASSERT(src_size == dst_size);
913 const auto& dst = destination.AsRegisters();
914 ASSERT(dst.num_regs() == 1);
915 const auto dst_reg = dst.reg_at(0);
916 switch (dst_size) {
917 case 8:
918 __ movq(dst_reg, src.fpu_reg());
919 return;
920 case 4:
921 __ movl(dst_reg, src.fpu_reg());
922 return;
923 default:
924 UNREACHABLE();
925 }
926
927 } else if (destination.IsFpuRegisters()) {
928 const auto& dst = destination.AsFpuRegisters();
929 // Optimization manual recommends using MOVAPS for register
930 // to register moves.
931 __ movaps(dst.fpu_reg(), src.fpu_reg());
932
933 } else {
934 ASSERT(destination.IsStack());
935 ASSERT(src_type.IsFloat());
936 const auto& dst = destination.AsStack();
937 const auto dst_addr = NativeLocationToStackSlotAddress(dst);
938 switch (dst_size) {
939 case 8:
940 __ movsd(dst_addr, src.fpu_reg());
941 return;
942 case 4:
943 __ movss(dst_addr, src.fpu_reg());
944 return;
945 default:
946 UNREACHABLE();
947 }
948 }
949
950 } else {
951 ASSERT(source.IsStack());
952 const auto& src = source.AsStack();
953 const auto src_addr = NativeLocationToStackSlotAddress(src);
954 if (destination.IsRegisters()) {
955 const auto& dst = destination.AsRegisters();
956 ASSERT(dst.num_regs() == 1);
957 const auto dst_reg = dst.reg_at(0);
958 EmitNativeLoad(dst_reg, src.base_register(), src.offset_in_bytes(),
959 src_type.AsPrimitive().representation());
960 } else if (destination.IsFpuRegisters()) {
961 ASSERT(src_type.Equals(dst_type));
962 ASSERT(src_type.IsFloat());
963 const auto& dst = destination.AsFpuRegisters();
964 switch (dst_size) {
965 case 8:
966 __ movsd(dst.fpu_reg(), src_addr);
967 return;
968 case 4:
969 __ movss(dst.fpu_reg(), src_addr);
970 return;
971 default:
972 UNREACHABLE();
973 }
974
975 } else {
976 ASSERT(destination.IsStack());
977 UNREACHABLE();
978 }
979 }
980}
981
982void FlowGraphCompiler::EmitNativeLoad(Register dst,
984 intptr_t offset,
986 switch (type) {
988 __ LoadFromOffset(dst, base, offset, compiler::kByte);
989 break;
991 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedByte);
992 break;
994 __ LoadFromOffset(dst, base, offset, compiler::kTwoBytes);
995 break;
997 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
998 break;
1000 __ LoadFromOffset(dst, base, offset, compiler::kFourBytes);
1001 break;
1004 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1005 break;
1009 __ LoadFromOffset(dst, base, offset, compiler::kEightBytes);
1010 break;
1011
1013 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1014 __ LoadFromOffset(TMP, base, offset + 2, compiler::kByte);
1015 __ shlq(TMP, compiler::Immediate(16));
1016 __ orq(dst, TMP);
1017 break;
1019 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1020 __ LoadFromOffset(TMP, base, offset + 2, compiler::kUnsignedByte);
1021 __ shlq(TMP, compiler::Immediate(16));
1022 __ orq(dst, TMP);
1023 break;
1025 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1026 __ LoadFromOffset(TMP, base, offset + 4, compiler::kByte);
1027 __ shlq(TMP, compiler::Immediate(32));
1028 __ orq(dst, TMP);
1029 break;
1031 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1032 __ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedByte);
1033 __ shlq(TMP, compiler::Immediate(32));
1034 __ orq(dst, TMP);
1035 break;
1037 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1038 __ LoadFromOffset(TMP, base, offset + 4, compiler::kTwoBytes);
1039 __ shlq(TMP, compiler::Immediate(32));
1040 __ orq(dst, TMP);
1041 break;
1043 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1044 __ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedTwoBytes);
1045 __ shlq(TMP, compiler::Immediate(32));
1046 __ orq(dst, TMP);
1047 break;
1049 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1050 __ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedTwoBytes);
1051 __ shlq(TMP, compiler::Immediate(32));
1052 __ orq(dst, TMP);
1053 __ LoadFromOffset(TMP, base, offset + 6, compiler::kByte);
1054 __ shlq(TMP, compiler::Immediate(48));
1055 __ orq(dst, TMP);
1056 break;
1058 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1059 __ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedTwoBytes);
1060 __ shlq(TMP, compiler::Immediate(32));
1061 __ orq(dst, TMP);
1062 __ LoadFromOffset(TMP, base, offset + 6, compiler::kUnsignedByte);
1063 __ shlq(TMP, compiler::Immediate(48));
1064 __ orq(dst, TMP);
1065 break;
1066 default:
1067 UNREACHABLE();
1068 }
1069}
1070
1072 Register dst,
1073 Register tmp) {
1074 compiler::Label skip_reloc;
1075 __ jmp(&skip_reloc);
1076 InsertBSSRelocation(relocation);
1077 const intptr_t reloc_end = __ CodeSize();
1078 __ Bind(&skip_reloc);
1079
1080 const intptr_t kLeaqLength = 7;
1082 -kLeaqLength - compiler::target::kWordSize));
1083 ASSERT((__ CodeSize() - reloc_end) == kLeaqLength);
1084
1085 // dst holds the address of the relocation.
1086 __ movq(tmp, compiler::Address(dst, 0));
1087
1088 // tmp holds the relocation itself: dst - bss_start.
1089 // dst = dst + (bss_start - dst) = bss_start
1090 __ addq(dst, tmp);
1091
1092 // dst holds the start of the BSS section.
1093 // Load the routine.
1094 __ movq(dst, compiler::Address(dst, 0));
1095}
1096
1097#undef __
1098#define __ compiler_->assembler()->
1099
1100void ParallelMoveEmitter::EmitSwap(const MoveOperands& move) {
1101 const Location source = move.src();
1102 const Location destination = move.dest();
1103
1104 if (source.IsRegister() && destination.IsRegister()) {
1105 __ xchgq(destination.reg(), source.reg());
1106 } else if (source.IsRegister() && destination.IsStackSlot()) {
1107 Exchange(source.reg(), LocationToStackSlotAddress(destination));
1108 } else if (source.IsStackSlot() && destination.IsRegister()) {
1109 Exchange(destination.reg(), LocationToStackSlotAddress(source));
1110 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1111 Exchange(LocationToStackSlotAddress(destination),
1113 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1114 __ movaps(FpuTMP, source.fpu_reg());
1115 __ movaps(source.fpu_reg(), destination.fpu_reg());
1116 __ movaps(destination.fpu_reg(), FpuTMP);
1117 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1118 ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() ||
1119 source.IsDoubleStackSlot() || source.IsQuadStackSlot());
1120 bool double_width =
1121 destination.IsDoubleStackSlot() || source.IsDoubleStackSlot();
1122 XmmRegister reg =
1123 source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg();
1124 compiler::Address slot_address =
1125 source.IsFpuRegister() ? LocationToStackSlotAddress(destination)
1127
1128 if (double_width) {
1129 __ movsd(FpuTMP, slot_address);
1130 __ movsd(slot_address, reg);
1131 } else {
1132 __ movups(FpuTMP, slot_address);
1133 __ movups(slot_address, reg);
1134 }
1135 __ movaps(reg, FpuTMP);
1136 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1137 const compiler::Address& source_slot_address =
1139 const compiler::Address& destination_slot_address =
1140 LocationToStackSlotAddress(destination);
1141
1142 ScratchFpuRegisterScope ensure_scratch(this, FpuTMP);
1143 __ movsd(FpuTMP, source_slot_address);
1144 __ movsd(ensure_scratch.reg(), destination_slot_address);
1145 __ movsd(destination_slot_address, FpuTMP);
1146 __ movsd(source_slot_address, ensure_scratch.reg());
1147 } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1148 const compiler::Address& source_slot_address =
1150 const compiler::Address& destination_slot_address =
1151 LocationToStackSlotAddress(destination);
1152
1153 ScratchFpuRegisterScope ensure_scratch(this, FpuTMP);
1154 __ movups(FpuTMP, source_slot_address);
1155 __ movups(ensure_scratch.reg(), destination_slot_address);
1156 __ movups(destination_slot_address, FpuTMP);
1157 __ movups(source_slot_address, ensure_scratch.reg());
1158 } else {
1159 UNREACHABLE();
1160 }
1161}
1162
1163void ParallelMoveEmitter::MoveMemoryToMemory(const compiler::Address& dst,
1164 const compiler::Address& src) {
1165 __ MoveMemoryToMemory(dst, src);
1166}
1167
1168void ParallelMoveEmitter::Exchange(Register reg, const compiler::Address& mem) {
1169 __ Exchange(reg, mem);
1170}
1171
1172void ParallelMoveEmitter::Exchange(const compiler::Address& mem1,
1173 const compiler::Address& mem2) {
1174 __ Exchange(mem1, mem2);
1175}
1176
1177void ParallelMoveEmitter::Exchange(Register reg,
1178 Register base_reg,
1179 intptr_t stack_offset) {
1180 UNREACHABLE();
1181}
1182
1183void ParallelMoveEmitter::Exchange(Register base_reg1,
1184 intptr_t stack_offset1,
1185 Register base_reg2,
1186 intptr_t stack_offset2) {
1187 UNREACHABLE();
1188}
1189
1190void ParallelMoveEmitter::SpillScratch(Register reg) {
1191 __ pushq(reg);
1192}
1193
1194void ParallelMoveEmitter::RestoreScratch(Register reg) {
1195 __ popq(reg);
1196}
1197
1198void ParallelMoveEmitter::SpillFpuScratch(FpuRegister reg) {
1199 __ AddImmediate(RSP, compiler::Immediate(-kFpuRegisterSize));
1200 __ movups(compiler::Address(RSP, 0), reg);
1201}
1202
1203void ParallelMoveEmitter::RestoreFpuScratch(FpuRegister reg) {
1204 __ movups(reg, compiler::Address(RSP, 0));
1205 __ AddImmediate(RSP, compiler::Immediate(kFpuRegisterSize));
1206}
1207
1208#undef __
1209
1210} // namespace dart
1211
1212#endif // defined(TARGET_ARCH_X64)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
#define __
#define OBJ(op)
#define UNREACHABLE()
Definition assert.h:248
static intptr_t element_offset(intptr_t index)
Definition object.h:10817
intptr_t length() const
static intptr_t owner_offset()
Definition object.h:7120
CodeEntryKind EntryKind
Definition object.h:6761
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
Definition object.h:6766
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
Definition deopt_id.h:27
static intptr_t ToDeoptAfter(intptr_t deopt_id)
Definition deopt_id.h:31
static constexpr intptr_t kOriginElement
IsolateGroup * isolate_group() const
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitTailCallToStub(const Code &stub)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void InsertBSSRelocation(BSS::Relocation reloc)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static bool SupportsUnboxedDoubles()
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void GeneratePatchableCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
compiler::Assembler * assembler() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
Definition object.cc:11488
bool IsClosureFunction() const
Definition object.h:3871
bool IsOptimizable() const
Definition object.cc:8988
ObjectStore * object_store() const
Definition isolate.h:505
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
Definition object.h:5525
static ObjectPtr null()
Definition object.h:433
bool IsNull() const
Definition object.h:363
static Object & Handle()
Definition object.h:407
static Object & ZoneHandle()
Definition object.h:419
const Function & function() const
Definition parser.h:73
int num_stack_locals() const
Definition parser.h:194
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
Definition stub_code.cc:316
static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg)
Definition thread.h:752
static Address AddressRIPRelative(int32_t disp)
void set_constant_pool_allowed(bool b)
void GenerateUnRelocatedPcRelativeCall(Condition cond=AL, intptr_t offset_into_target=0)
#define ASSERT(E)
SkBitmap source
Definition examples.cpp:28
gboolean invert
uint32_t * target
#define DECLARE_FLAG(type, name)
Definition flags.h:14
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
Dart_NativeFunction function
Definition fuchsia.cc:51
compiler::Address NativeLocationToStackSlotAddress(const NativeStackLocation &loc)
const Register THR
const char *const name
constexpr intptr_t kBitsPerByte
Definition globals.h:463
const FpuRegister FpuTMP
const Register CODE_REG
@ kInvalidCondition
const Register ARGS_DESC_REG
const Register TMP
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
Definition locations.cc:365
constexpr intptr_t kWordSize
Definition globals.h:509
QRegister FpuRegister
static int8_t data[kExtLength]
const int kFpuRegisterSize
ByteRegister ByteRegisterOf(Register reg)
call(args)
Definition dom.py:159
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
Definition switches.h:191
dst
Definition cp.py:12
Point offset
static constexpr Register kClassIdReg
static constexpr intptr_t kBoolValueMask
static constexpr Register kSubtypeTestCacheReg