Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
flow_graph_compiler_arm64.cc
Go to the documentation of this file.
1// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64.
6#if defined(TARGET_ARCH_ARM64)
7
9
15#include "vm/cpu.h"
16#include "vm/dart_entry.h"
18#include "vm/dispatch_table.h"
19#include "vm/instructions.h"
20#include "vm/object_store.h"
21#include "vm/parser.h"
22#include "vm/stack_frame.h"
23#include "vm/stub_code.h"
24#include "vm/symbols.h"
25
26namespace dart {
27
28DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
29DECLARE_FLAG(bool, enable_simd_inline);
30
32 if (FLAG_precompiled_mode) {
33 auto object_store = isolate_group()->object_store();
34
35 const auto& stub =
36 Code::ZoneHandle(object_store->write_barrier_wrappers_stub());
37 if (CanPcRelativeCall(stub)) {
38 assembler_->generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
39 const intptr_t offset_into_target =
41 assembler_->GenerateUnRelocatedPcRelativeCall(offset_into_target);
42 AddPcRelativeCallStubTarget(stub);
43 };
44 }
45
46 const auto& array_stub =
47 Code::ZoneHandle(object_store->array_write_barrier_stub());
48 if (CanPcRelativeCall(stub)) {
49 assembler_->generate_invoke_array_write_barrier_ = [&]() {
51 AddPcRelativeCallStubTarget(array_stub);
52 };
53 }
54 }
55}
56
58 // BlockInfos are zone-allocated, so their destructors are not called.
59 // Verify the labels explicitly here.
60 for (int i = 0; i < block_info_.length(); ++i) {
61 ASSERT(!block_info_[i]->jump_label()->IsLinked());
62 }
63}
64
66 return true;
67}
68
70 return FLAG_enable_simd_inline;
71}
72
74 return true;
75}
76
79 intrinsic_mode_ = true;
80 ASSERT(!assembler()->constant_pool_allowed());
81}
82
85 intrinsic_mode_ = false;
86}
87
88TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
89 DeoptInfoBuilder* builder,
90 const Array& deopt_table) {
91 if (deopt_env_ == nullptr) {
92 ++builder->current_info_number_;
93 return TypedData::null();
94 }
95
96 AllocateOutgoingArguments(deopt_env_);
97
98 intptr_t slot_ix = 0;
99 Environment* current = deopt_env_;
100
101 // Emit all kMaterializeObject instructions describing objects to be
102 // materialized on the deoptimization as a prefix to the deoptimization info.
103 EmitMaterializations(deopt_env_, builder);
104
105 // The real frame starts here.
106 builder->MarkFrameStart();
107
108 Zone* zone = compiler->zone();
109
110 builder->AddPp(current->function(), slot_ix++);
111 builder->AddPcMarker(Function::ZoneHandle(zone), slot_ix++);
112 builder->AddCallerFp(slot_ix++);
113 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++);
114
115 // Emit all values that are needed for materialization as a part of the
116 // expression stack for the bottom-most frame. This guarantees that GC
117 // will be able to find them during materialization.
118 slot_ix = builder->EmitMaterializationArguments(slot_ix);
119
120 // For the innermost environment, set outgoing arguments and the locals.
121 for (intptr_t i = current->Length() - 1;
122 i >= current->fixed_parameter_count(); i--) {
123 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
124 }
125
126 Environment* previous = current;
127 current = current->outer();
128 while (current != nullptr) {
129 builder->AddPp(current->function(), slot_ix++);
130 builder->AddPcMarker(previous->function(), slot_ix++);
131 builder->AddCallerFp(slot_ix++);
132
133 // For any outer environment the deopt id is that of the call instruction
134 // which is recorded in the outer environment.
135 builder->AddReturnAddress(current->function(),
136 DeoptId::ToDeoptAfter(current->GetDeoptId()),
137 slot_ix++);
138
139 // The values of outgoing arguments can be changed from the inlined call so
140 // we must read them from the previous environment.
141 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
142 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
143 slot_ix++);
144 }
145
146 // Set the locals, note that outgoing arguments are not in the environment.
147 for (intptr_t i = current->Length() - 1;
148 i >= current->fixed_parameter_count(); i--) {
149 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
150 }
151
152 // Iterate on the outer environment.
153 previous = current;
154 current = current->outer();
155 }
156 // The previous pointer is now the outermost environment.
157 ASSERT(previous != nullptr);
158
159 // Add slots for the outermost environment.
160 builder->AddCallerPp(slot_ix++);
161 builder->AddPcMarker(previous->function(), slot_ix++);
162 builder->AddCallerFp(slot_ix++);
163 builder->AddCallerPc(slot_ix++);
164
165 // For the outermost environment, set the incoming arguments.
166 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
167 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
168 }
169
170 return builder->CreateDeoptInfo(deopt_table);
171}
172
174 intptr_t stub_ix) {
175 // Calls do not need stubs, they share a deoptimization trampoline.
176 ASSERT(reason() != ICData::kDeoptAtCall);
177 compiler::Assembler* assembler = compiler->assembler();
178#define __ assembler->
179 __ Comment("%s", Name());
180 __ Bind(entry_label());
181 if (FLAG_trap_on_deoptimization) {
182 __ brk(0);
183 }
184
185 ASSERT(deopt_env() != nullptr);
186 __ Call(compiler::Address(THR, Thread::deoptimize_entry_offset()));
187 set_pc_offset(assembler->CodeSize());
188#undef __
189}
190
191#define __ assembler->
192// Static methods of FlowGraphCompiler that take an assembler.
193
194void FlowGraphCompiler::GenerateIndirectTTSCall(compiler::Assembler* assembler,
195 Register reg_to_call,
196 intptr_t sub_type_cache_index) {
197 __ LoadField(
199 compiler::FieldAddress(
200 reg_to_call,
201 compiler::target::AbstractType::type_test_stub_entry_point_offset()));
202 __ LoadWordFromPoolIndex(TypeTestABI::kSubtypeTestCacheReg,
203 sub_type_cache_index);
205}
206
207#undef __
208#define __ assembler()->
209// Instance methods of FlowGraphCompiler.
210
211// Fall through if bool_register contains null.
212void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
213 compiler::Label* is_true,
214 compiler::Label* is_false) {
215 compiler::Label fall_through;
216 __ CompareObject(bool_register, Object::null_object());
217 __ b(&fall_through, EQ);
218 BranchLabels labels = {is_true, is_false, &fall_through};
219 Condition true_condition =
220 EmitBoolTest(bool_register, labels, /*invert=*/false);
221 ASSERT(true_condition == kInvalidCondition);
222 __ Bind(&fall_through);
223}
224
225void FlowGraphCompiler::EmitFrameEntry() {
226 const Function& function = parsed_function().function();
228 (!is_optimizing() || may_reoptimize())) {
229 __ Comment("Invocation Count Check");
230 const Register function_reg = R6;
231 __ ldr(function_reg,
232 compiler::FieldAddress(CODE_REG, Code::owner_offset()));
233
234 __ LoadFieldFromOffset(R7, function_reg, Function::usage_counter_offset(),
236 // Reoptimization of an optimized function is triggered by counting in
237 // IC stubs, but not at the entry of the function.
238 if (!is_optimizing()) {
239 __ add(R7, R7, compiler::Operand(1));
240 __ StoreFieldToOffset(R7, function_reg, Function::usage_counter_offset(),
242 }
243 __ CompareImmediate(R7, GetOptimizationThreshold());
244 ASSERT(function_reg == R6);
245 compiler::Label dont_optimize;
246 __ b(&dont_optimize, LT);
247 __ ldr(TMP, compiler::Address(THR, Thread::optimize_entry_offset()));
248 __ br(TMP);
249 __ Bind(&dont_optimize);
250 }
251
252 if (flow_graph().graph_entry()->NeedsFrame()) {
253 __ Comment("Enter frame");
254 if (flow_graph().IsCompiledForOsr()) {
255 const intptr_t extra_slots = ExtraStackSlotsOnOsrEntry();
256 ASSERT(extra_slots >= 0);
257 __ EnterOsrFrame(extra_slots * kWordSize);
258 } else {
259 ASSERT(StackSize() >= 0);
260 __ EnterDartFrame(StackSize() * kWordSize);
261 }
262 } else if (FLAG_precompiled_mode) {
264 }
265}
266
267const InstructionSource& PrologueSource() {
268 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
269 /*inlining_id=*/0);
270 return prologue_source;
271}
272
274 BeginCodeSourceRange(PrologueSource());
275
276 EmitFrameEntry();
277 ASSERT(assembler()->constant_pool_allowed());
278
279 // In unoptimized code, initialize (non-argument) stack allocated slots.
280 if (!is_optimizing()) {
281 const int num_locals = parsed_function().num_stack_locals();
282
283 intptr_t args_desc_slot = -1;
284 if (parsed_function().has_arg_desc_var()) {
285 args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
286 parsed_function().arg_desc_var());
287 }
288
289 __ Comment("Initialize spill slots");
290 for (intptr_t i = 0; i < num_locals; ++i) {
291 const intptr_t slot_index =
292 compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
293 Register value_reg =
294 slot_index == args_desc_slot ? ARGS_DESC_REG : NULL_REG;
295 __ StoreToOffset(value_reg, FP, slot_index * kWordSize);
296 }
297 } else if (parsed_function().suspend_state_var() != nullptr &&
298 !flow_graph().IsCompiledForOsr()) {
299 // Initialize synthetic :suspend_state variable early
300 // as it may be accessed by GC and exception handling before
301 // InitSuspendableFunction stub is called.
302 const intptr_t slot_index =
303 compiler::target::frame_layout.FrameSlotForVariable(
304 parsed_function().suspend_state_var());
305 __ StoreToOffset(NULL_REG, FP, slot_index * kWordSize);
306 }
307
308 EndCodeSourceRange(PrologueSource());
309}
310
312 const Code& stub,
313 ObjectPool::SnapshotBehavior snapshot_behavior) {
314 ASSERT(!stub.IsNull());
315 if (CanPcRelativeCall(stub)) {
316 __ GenerateUnRelocatedPcRelativeCall();
317 AddPcRelativeCallStubTarget(stub);
318 } else {
320 CodeEntryKind::kNormal, snapshot_behavior);
321 AddStubCallTarget(stub);
322 }
323}
324
325void FlowGraphCompiler::EmitJumpToStub(const Code& stub) {
326 ASSERT(!stub.IsNull());
327 if (CanPcRelativeCall(stub)) {
328 __ GenerateUnRelocatedPcRelativeTailCall();
329 AddPcRelativeTailCallStubTarget(stub);
330 } else {
331 __ LoadObject(CODE_REG, stub);
332 __ ldr(TMP, compiler::FieldAddress(
333 CODE_REG, compiler::target::Code::entry_point_offset()));
334 __ br(TMP);
335 AddStubCallTarget(stub);
336 }
337}
338
339void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
340 ASSERT(!stub.IsNull());
341 if (CanPcRelativeCall(stub)) {
342 if (flow_graph().graph_entry()->NeedsFrame()) {
343 __ LeaveDartFrame();
344 }
345 __ GenerateUnRelocatedPcRelativeTailCall();
346 AddPcRelativeTailCallStubTarget(stub);
347#if defined(DEBUG)
348 __ Breakpoint();
349#endif
350 } else {
351 __ LoadObject(CODE_REG, stub);
352 if (flow_graph().graph_entry()->NeedsFrame()) {
353 __ LeaveDartFrame();
354 }
355 __ ldr(TMP, compiler::FieldAddress(
356 CODE_REG, compiler::target::Code::entry_point_offset()));
357 __ br(TMP);
358 AddStubCallTarget(stub);
359 }
360}
361
363 const InstructionSource& source,
364 const Code& stub,
366 LocationSummary* locs,
367 ObjectPool::SnapshotBehavior snapshot_behavior) {
368 __ BranchLinkPatchable(stub, CodeEntryKind::kNormal, snapshot_behavior);
370 pending_deoptimization_env_);
371}
372
373void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
374 const InstructionSource& source,
375 const Code& stub,
377 LocationSummary* locs,
378 Code::EntryKind entry_kind) {
379 ASSERT(CanCallDart());
380 __ BranchLinkPatchable(stub, entry_kind);
381 EmitCallsiteMetadata(source, deopt_id, kind, locs,
382 pending_deoptimization_env_);
383}
384
385void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
386 const InstructionSource& source,
388 LocationSummary* locs,
389 const Function& target,
390 Code::EntryKind entry_kind) {
391 ASSERT(CanCallDart());
392 if (CanPcRelativeCall(target)) {
393 __ GenerateUnRelocatedPcRelativeCall();
394 AddPcRelativeCallTarget(target, entry_kind);
395 EmitCallsiteMetadata(source, deopt_id, kind, locs,
396 pending_deoptimization_env_);
397 } else {
398 // Call sites to the same target can share object pool entries. These
399 // call sites are never patched for breakpoints: the function is deoptimized
400 // and the unoptimized code with IC calls for static calls is patched
401 // instead.
403 const auto& stub = StubCode::CallStaticFunction();
404 __ BranchLinkWithEquivalence(stub, target, entry_kind);
405 EmitCallsiteMetadata(source, deopt_id, kind, locs,
406 pending_deoptimization_env_);
407 AddStaticCallTarget(target, entry_kind);
408 }
409}
410
411void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
412 // We do not check for overflow when incrementing the edge counter. The
413 // function should normally be optimized long before the counter can
414 // overflow; and though we do not reset the counters when we optimize or
415 // deoptimize, there is a bound on the number of
416 // optimization/deoptimization cycles we will attempt.
417 ASSERT(!edge_counters_array_.IsNull());
418 ASSERT(assembler_->constant_pool_allowed());
419 __ Comment("Edge counter");
420 __ LoadObject(R0, edge_counters_array_);
421 __ LoadCompressedSmiFieldFromOffset(TMP, R0, Array::element_offset(edge_id));
422 __ add(TMP, TMP, compiler::Operand(Smi::RawValue(1)), compiler::kObjectBytes);
423 __ StoreFieldToOffset(TMP, R0, Array::element_offset(edge_id),
425}
426
428 const Code& stub,
429 const ICData& ic_data,
430 intptr_t deopt_id,
431 const InstructionSource& source,
432 LocationSummary* locs,
433 Code::EntryKind entry_kind) {
434 ASSERT(CanCallDart());
435 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
436 // Each ICData propagated from unoptimized to optimized code contains the
437 // function that corresponds to the Dart function of that IC call. Due
438 // to inlining in optimized code, that function may not correspond to the
439 // top-level function (parsed_function().function()) which could be
440 // reoptimized and which counter needs to be incremented.
441 // Pass the function explicitly, it is used in IC stub.
442
443 __ LoadObject(R6, parsed_function().function());
444 __ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
445 __ LoadUniqueObject(IC_DATA_REG, ic_data);
446 GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
447 entry_kind);
448 EmitDropArguments(ic_data.SizeWithTypeArgs());
449}
450
451void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
452 const ICData& ic_data,
453 intptr_t deopt_id,
454 const InstructionSource& source,
455 LocationSummary* locs,
456 Code::EntryKind entry_kind) {
457 ASSERT(CanCallDart());
458 ASSERT(entry_kind == Code::EntryKind::kNormal ||
459 entry_kind == Code::EntryKind::kUnchecked);
460 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
461 __ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
462
463 compiler::ObjectPoolBuilder& op = __ object_pool_builder();
464 const intptr_t ic_data_index =
465 op.AddObject(ic_data, ObjectPool::Patchability::kPatchable);
466 const intptr_t stub_index =
467 op.AddObject(stub, ObjectPool::Patchability::kPatchable);
468 ASSERT((ic_data_index + 1) == stub_index);
469 __ LoadDoubleWordFromPoolIndex(IC_DATA_REG, CODE_REG, ic_data_index);
470 const intptr_t entry_point_offset =
471 entry_kind == Code::EntryKind::kNormal
472 ? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
473 : Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
474 __ Call(compiler::FieldAddress(CODE_REG, entry_point_offset));
475 EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kIcCall, locs,
476 pending_deoptimization_env_);
477 EmitDropArguments(ic_data.SizeWithTypeArgs());
478}
479
481 const String& name,
482 const Array& arguments_descriptor,
483 intptr_t deopt_id,
484 const InstructionSource& source,
485 LocationSummary* locs) {
486 ASSERT(CanCallDart());
487 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
488 ASSERT(!FLAG_precompiled_mode);
489 const ArgumentsDescriptor args_desc(arguments_descriptor);
490 const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
491 zone(),
492 MegamorphicCacheTable::Lookup(thread(), name, arguments_descriptor));
493
494 __ Comment("MegamorphicCall");
495 // Load receiver into R0.
496 __ LoadFromOffset(R0, SP, (args_desc.Count() - 1) * kWordSize);
497
498 // Use same code pattern as instance call so it can be parsed by code patcher.
499 compiler::ObjectPoolBuilder& op = __ object_pool_builder();
500 const intptr_t data_index =
501 op.AddObject(cache, ObjectPool::Patchability::kPatchable);
502 const intptr_t stub_index = op.AddObject(
503 StubCode::MegamorphicCall(), ObjectPool::Patchability::kPatchable);
504 ASSERT((data_index + 1) == stub_index);
505 __ LoadDoubleWordFromPoolIndex(IC_DATA_REG, CODE_REG, data_index);
506 CLOBBERS_LR(__ ldr(LR, compiler::FieldAddress(
508 Code::EntryKind::kMonomorphic))));
509 CLOBBERS_LR(__ blr(LR));
510
511 RecordSafepoint(locs);
512 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
513 const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
514 if (is_optimizing()) {
515 AddDeoptIndexAtCall(deopt_id_after, pending_deoptimization_env_);
516 } else {
517 // Add deoptimization continuation point after the call and before the
518 // arguments are removed.
519 AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after, source);
520 }
521 RecordCatchEntryMoves(pending_deoptimization_env_);
522 EmitDropArguments(args_desc.SizeWithTypeArgs());
523}
524
525void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
526 intptr_t deopt_id,
527 const InstructionSource& source,
528 LocationSummary* locs,
529 Code::EntryKind entry_kind,
530 bool receiver_can_be_smi) {
531 ASSERT(CanCallDart());
532 ASSERT(ic_data.NumArgsTested() == 1);
533 const Code& initial_stub = StubCode::SwitchableCallMiss();
534 const char* switchable_call_mode = "smiable";
535 if (!receiver_can_be_smi) {
536 switchable_call_mode = "non-smi";
537 ic_data.set_receiver_cannot_be_smi(true);
538 }
539 const UnlinkedCall& data =
540 UnlinkedCall::ZoneHandle(zone(), ic_data.AsUnlinkedCall());
541
542 compiler::ObjectPoolBuilder& op = __ object_pool_builder();
543
544 __ Comment("InstanceCallAOT (%s)", switchable_call_mode);
545 // Clear argument descriptor to keep gc happy when it gets pushed on to
546 // the stack.
547 __ LoadImmediate(R4, 0);
548 __ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
549
550 const auto snapshot_behavior =
551 FLAG_precompiled_mode ? compiler::ObjectPoolBuilderEntry::
554 const intptr_t data_index =
555 op.AddObject(data, ObjectPool::Patchability::kPatchable);
556 const intptr_t initial_stub_index = op.AddObject(
557 initial_stub, ObjectPool::Patchability::kPatchable, snapshot_behavior);
558 ASSERT((data_index + 1) == initial_stub_index);
559
560 if (FLAG_precompiled_mode) {
561 // The AOT runtime will replace the slot in the object pool with the
562 // entrypoint address - see app_snapshot.cc.
563 CLOBBERS_LR(__ LoadDoubleWordFromPoolIndex(R5, LR, data_index));
564 } else {
565 __ LoadDoubleWordFromPoolIndex(R5, CODE_REG, data_index);
566 const intptr_t entry_point_offset =
567 entry_kind == Code::EntryKind::kNormal
568 ? compiler::target::Code::entry_point_offset(
569 Code::EntryKind::kMonomorphic)
570 : compiler::target::Code::entry_point_offset(
571 Code::EntryKind::kMonomorphicUnchecked);
572 CLOBBERS_LR(
573 __ ldr(LR, compiler::FieldAddress(CODE_REG, entry_point_offset)));
574 }
575 CLOBBERS_LR(__ blr(LR));
576
577 EmitCallsiteMetadata(source, DeoptId::kNone, UntaggedPcDescriptors::kOther,
578 locs, pending_deoptimization_env_);
579 EmitDropArguments(ic_data.SizeWithTypeArgs());
580}
581
582void FlowGraphCompiler::EmitUnoptimizedStaticCall(
583 intptr_t size_with_type_args,
584 intptr_t deopt_id,
585 const InstructionSource& source,
586 LocationSummary* locs,
587 const ICData& ic_data,
588 Code::EntryKind entry_kind) {
589 ASSERT(CanCallDart());
590 const Code& stub =
591 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
592 __ LoadObject(R5, ic_data);
593 GenerateDartCall(deopt_id, source, stub,
594 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
595 EmitDropArguments(size_with_type_args);
596}
597
599 const Function& function,
600 const Array& arguments_descriptor,
601 intptr_t size_with_type_args,
602 intptr_t deopt_id,
603 const InstructionSource& source,
604 LocationSummary* locs,
605 Code::EntryKind entry_kind) {
606 ASSERT(CanCallDart());
609 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
610 } else {
611 if (!FLAG_precompiled_mode) {
612 __ LoadImmediate(ARGS_DESC_REG, 0); // GC safe smi zero because of stub.
613 }
614 }
615 // Do not use the code from the function, but let the code be patched so that
616 // we can record the outgoing edges to other code.
617 GenerateStaticDartCall(deopt_id, source, UntaggedPcDescriptors::kOther, locs,
618 function, entry_kind);
619 EmitDropArguments(size_with_type_args);
620}
621
623 int32_t selector_offset,
624 const Array& arguments_descriptor) {
625 const auto cid_reg = DispatchTableNullErrorABI::kClassIdReg;
626 ASSERT(CanCallDart());
627 ASSERT(cid_reg != ARGS_DESC_REG);
628 if (!arguments_descriptor.IsNull()) {
629 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
630 }
631 const intptr_t offset = selector_offset - DispatchTable::kOriginElement;
632 CLOBBERS_LR({
633 // Would like cid_reg to be available on entry to the target function
634 // for checking purposes.
635 ASSERT(cid_reg != LR);
636 __ AddImmediate(LR, cid_reg, offset);
637 __ Call(compiler::Address(DISPATCH_TABLE_REG, LR, UXTX,
639 });
640}
641
643 Register reg,
644 const Object& obj,
645 bool needs_number_check,
646 const InstructionSource& source,
647 intptr_t deopt_id) {
648 if (needs_number_check) {
649 ASSERT(!obj.IsMint() && !obj.IsDouble());
650 __ LoadObject(TMP, obj);
651 __ PushPair(TMP, reg);
652 if (is_optimizing()) {
653 // No breakpoints in optimized code.
654 __ BranchLink(StubCode::OptimizedIdenticalWithNumberCheck());
655 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, deopt_id, source);
656 } else {
657 // Patchable to support breakpoints.
658 __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
659 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id,
660 source);
661 }
662 // Stub returns result in flags (result of a cmp, we need Z computed).
663 // Discard constant.
664 // Restore 'reg'.
665 __ PopPair(ZR, reg);
666 } else {
667 __ CompareObject(reg, obj);
668 }
669 return EQ;
670}
671
675 bool needs_number_check,
676 const InstructionSource& source,
677 intptr_t deopt_id) {
678 if (needs_number_check) {
679 __ PushPair(right, left);
680 if (is_optimizing()) {
681 __ BranchLink(StubCode::OptimizedIdenticalWithNumberCheck());
682 } else {
683 __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
684 }
685 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
686 // Stub returns result in flags (result of a cmp, we need Z computed).
687 __ PopPair(right, left);
688 } else {
689 __ CompareObjectRegisters(left, right);
690 }
691 return EQ;
692}
693
695 BranchLabels labels,
696 bool invert) {
697 __ Comment("BoolTest");
698 if (labels.true_label == nullptr || labels.false_label == nullptr) {
699 __ tsti(value, compiler::Immediate(
701 return invert ? NE : EQ;
702 }
703 const intptr_t bool_bit =
705 if (labels.fall_through == labels.false_label) {
706 if (invert) {
707 __ tbnz(labels.true_label, value, bool_bit);
708 } else {
709 __ tbz(labels.true_label, value, bool_bit);
710 }
711 } else {
712 if (invert) {
713 __ tbz(labels.false_label, value, bool_bit);
714 } else {
715 __ tbnz(labels.false_label, value, bool_bit);
716 }
717 if (labels.fall_through != labels.true_label) {
718 __ b(labels.true_label);
719 }
720 }
721 return kInvalidCondition;
722}
723
724// This function must be in sync with FlowGraphCompiler::RecordSafepoint and
725// FlowGraphCompiler::SlowPathEnvironmentFor.
726void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
727#if defined(DEBUG)
728 locs->CheckWritableInputs();
729 ClobberDeadTempRegisters(locs);
730#endif
731 // TODO(vegorov): consider saving only caller save (volatile) registers.
732 __ PushRegisters(*locs->live_registers());
733}
734
735void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
736 __ PopRegisters(*locs->live_registers());
737}
738
739#if defined(DEBUG)
740void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
741 // Clobber temporaries that have not been manually preserved.
742 for (intptr_t i = 0; i < locs->temp_count(); ++i) {
743 Location tmp = locs->temp(i);
744 // TODO(zerny): clobber non-live temporary FPU registers.
745 if (tmp.IsRegister() &&
746 !locs->live_registers()->ContainsRegister(tmp.reg())) {
747 __ movz(tmp.reg(), compiler::Immediate(0xf7), 0);
748 }
749 }
750}
751#endif
752
753Register FlowGraphCompiler::EmitTestCidRegister() {
754 return R2;
755}
756
757void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
758 intptr_t count_without_type_args,
759 const Array& arguments_descriptor) {
760 __ Comment("EmitTestAndCall");
761 // Load receiver into R0.
762 __ LoadFromOffset(R0, SP, (count_without_type_args - 1) * kWordSize);
763 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
764}
765
766void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
767 bool if_smi) {
768 if (if_smi) {
769 __ BranchIfSmi(R0, label);
770 } else {
771 __ BranchIfNotSmi(R0, label);
772 }
773}
774
775void FlowGraphCompiler::EmitTestAndCallLoadCid(Register class_id_reg) {
776 ASSERT(class_id_reg != R0);
777 __ LoadClassId(class_id_reg, R0);
778}
779
782 TemporaryRegisterAllocator* allocator) {
783 if (destination.Equals(source)) return;
784
785 if (source.IsRegister()) {
786 if (destination.IsRegister()) {
787 __ mov(destination.reg(), source.reg());
788 } else {
789 ASSERT(destination.IsStackSlot());
790 const intptr_t dest_offset = destination.ToStackSlotOffset();
791 __ StoreToOffset(source.reg(), destination.base_reg(), dest_offset);
792 }
793 } else if (source.IsStackSlot()) {
794 if (destination.IsRegister()) {
795 const intptr_t source_offset = source.ToStackSlotOffset();
796 __ LoadFromOffset(destination.reg(), source.base_reg(), source_offset);
797 } else if (destination.IsFpuRegister()) {
798 const intptr_t src_offset = source.ToStackSlotOffset();
799 VRegister dst = destination.fpu_reg();
800 __ LoadDFromOffset(dst, source.base_reg(), src_offset);
801 } else {
802 ASSERT(destination.IsStackSlot());
803 const intptr_t source_offset = source.ToStackSlotOffset();
804 const intptr_t dest_offset = destination.ToStackSlotOffset();
805 Register tmp = allocator->AllocateTemporary();
806 __ LoadFromOffset(tmp, source.base_reg(), source_offset);
807 __ StoreToOffset(tmp, destination.base_reg(), dest_offset);
808 allocator->ReleaseTemporary();
809 }
810 } else if (source.IsFpuRegister()) {
811 if (destination.IsFpuRegister()) {
812 __ vmov(destination.fpu_reg(), source.fpu_reg());
813 } else {
814 if (destination.IsStackSlot() /*32-bit float*/ ||
815 destination.IsDoubleStackSlot()) {
816 const intptr_t dest_offset = destination.ToStackSlotOffset();
817 VRegister src = source.fpu_reg();
818 __ StoreDToOffset(src, destination.base_reg(), dest_offset);
819 } else {
820 ASSERT(destination.IsQuadStackSlot());
821 const intptr_t dest_offset = destination.ToStackSlotOffset();
822 __ StoreQToOffset(source.fpu_reg(), destination.base_reg(),
823 dest_offset);
824 }
825 }
826 } else if (source.IsDoubleStackSlot()) {
827 if (destination.IsFpuRegister()) {
828 const intptr_t source_offset = source.ToStackSlotOffset();
829 const VRegister dst = destination.fpu_reg();
830 __ LoadDFromOffset(dst, source.base_reg(), source_offset);
831 } else {
832 ASSERT(destination.IsDoubleStackSlot() ||
833 destination.IsStackSlot() /*32-bit float*/);
834 const intptr_t source_offset = source.ToStackSlotOffset();
835 const intptr_t dest_offset = destination.ToStackSlotOffset();
836 __ LoadDFromOffset(VTMP, source.base_reg(), source_offset);
837 __ StoreDToOffset(VTMP, destination.base_reg(), dest_offset);
838 }
839 } else if (source.IsQuadStackSlot()) {
840 if (destination.IsFpuRegister()) {
841 const intptr_t source_offset = source.ToStackSlotOffset();
842 __ LoadQFromOffset(destination.fpu_reg(), source.base_reg(),
843 source_offset);
844 } else {
845 ASSERT(destination.IsQuadStackSlot());
846 const intptr_t source_offset = source.ToStackSlotOffset();
847 const intptr_t dest_offset = destination.ToStackSlotOffset();
848 __ LoadQFromOffset(VTMP, source.base_reg(), source_offset);
849 __ StoreQToOffset(VTMP, destination.base_reg(), dest_offset);
850 }
851 } else {
852 ASSERT(source.IsConstant());
853 if (destination.IsStackSlot()) {
854 Register tmp = allocator->AllocateTemporary();
855 source.constant_instruction()->EmitMoveToLocation(this, destination, tmp);
856 allocator->ReleaseTemporary();
857 } else {
858 source.constant_instruction()->EmitMoveToLocation(this, destination);
859 }
860 }
861}
862
863static compiler::OperandSize BytesToOperandSize(intptr_t bytes) {
864 switch (bytes) {
865 case 8:
867 case 4:
869 case 2:
871 case 1:
873 default:
875 }
876}
877
878void FlowGraphCompiler::EmitNativeMoveArchitecture(
879 const compiler::ffi::NativeLocation& destination,
880 const compiler::ffi::NativeLocation& source) {
881 const auto& src_payload_type = source.payload_type();
882 const auto& dst_payload_type = destination.payload_type();
883 const auto& src_container_type = source.container_type();
884 const auto& dst_container_type = destination.container_type();
885 ASSERT(src_container_type.IsFloat() == dst_container_type.IsFloat());
886 ASSERT(src_container_type.IsInt() == dst_container_type.IsInt());
887 ASSERT(src_payload_type.IsSigned() == dst_payload_type.IsSigned());
888 ASSERT(src_payload_type.IsPrimitive());
889 ASSERT(dst_payload_type.IsPrimitive());
890 const intptr_t src_size = src_payload_type.SizeInBytes();
891 const intptr_t dst_size = dst_payload_type.SizeInBytes();
892 const bool sign_or_zero_extend = dst_size > src_size;
893
894 if (source.IsRegisters()) {
895 const auto& src = source.AsRegisters();
896 ASSERT(src.num_regs() == 1);
897 const auto src_reg = src.reg_at(0);
898
899 if (destination.IsRegisters()) {
900 const auto& dst = destination.AsRegisters();
901 ASSERT(dst.num_regs() == 1);
902 const auto dst_reg = dst.reg_at(0);
903 ASSERT(destination.container_type().SizeInBytes() <= 8);
904 if (!sign_or_zero_extend) {
905 __ MoveRegister(dst_reg, src_reg);
906 } else {
907 if (src_payload_type.IsSigned()) {
908 __ sbfx(dst_reg, src_reg, 0, src_size * kBitsPerByte);
909 } else {
910 __ ubfx(dst_reg, src_reg, 0, src_size * kBitsPerByte);
911 }
912 }
913
914 } else if (destination.IsFpuRegisters()) {
915 // Fpu Registers should only contain doubles and registers only ints.
917
918 } else {
919 ASSERT(destination.IsStack());
920 const auto& dst = destination.AsStack();
921 ASSERT(!sign_or_zero_extend);
922 auto const op_size =
923 BytesToOperandSize(destination.container_type().SizeInBytes());
924 __ StoreToOffset(src.reg_at(0), dst.base_register(),
925 dst.offset_in_bytes(), op_size);
926 }
927
928 } else if (source.IsFpuRegisters()) {
929 const auto& src = source.AsFpuRegisters();
930 // We have not implemented conversions here, use IL convert instructions.
931 ASSERT(src_payload_type.Equals(dst_payload_type));
932
933 if (destination.IsRegisters()) {
934 // Fpu Registers should only contain doubles and registers only ints.
936
937 } else if (destination.IsFpuRegisters()) {
938 const auto& dst = destination.AsFpuRegisters();
939 __ vmov(dst.fpu_reg(), src.fpu_reg());
940
941 } else {
942 ASSERT(destination.IsStack());
943 ASSERT(src_payload_type.IsFloat());
944 const auto& dst = destination.AsStack();
945 switch (dst_size) {
946 case 8:
947 __ StoreDToOffset(src.fpu_reg(), dst.base_register(),
948 dst.offset_in_bytes());
949 return;
950 case 4:
951 __ StoreSToOffset(src.fpu_reg(), dst.base_register(),
952 dst.offset_in_bytes());
953 return;
954 default:
955 UNREACHABLE();
956 }
957 }
958
959 } else {
960 ASSERT(source.IsStack());
961 const auto& src = source.AsStack();
962 if (destination.IsRegisters()) {
963 const auto& dst = destination.AsRegisters();
964 ASSERT(dst.num_regs() == 1);
965 const auto dst_reg = dst.reg_at(0);
966 EmitNativeLoad(dst_reg, src.base_register(), src.offset_in_bytes(),
967 src_payload_type.AsPrimitive().representation());
968 } else if (destination.IsFpuRegisters()) {
969 ASSERT(src_payload_type.Equals(dst_payload_type));
970 ASSERT(src_payload_type.IsFloat());
971 const auto& dst = destination.AsFpuRegisters();
972 switch (src_size) {
973 case 8:
974 __ LoadDFromOffset(dst.fpu_reg(), src.base_register(),
975 src.offset_in_bytes());
976 return;
977 case 4:
978 __ LoadSFromOffset(dst.fpu_reg(), src.base_register(),
979 src.offset_in_bytes());
980 return;
981 default:
983 }
984
985 } else {
986 ASSERT(destination.IsStack());
987 UNREACHABLE();
988 }
989 }
990}
991
992void FlowGraphCompiler::EmitNativeLoad(Register dst,
994 intptr_t offset,
996 switch (type) {
998 __ LoadFromOffset(dst, base, offset, compiler::kByte);
999 break;
1001 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedByte);
1002 break;
1004 __ LoadFromOffset(dst, base, offset, compiler::kTwoBytes);
1005 break;
1007 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1008 break;
1010 __ LoadFromOffset(dst, base, offset, compiler::kFourBytes);
1011 break;
1014 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1015 break;
1019 __ LoadFromOffset(dst, base, offset, compiler::kEightBytes);
1020 break;
1021
1023 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1024 __ LoadFromOffset(TMP, base, offset + 2, compiler::kByte);
1025 __ orr(dst, dst, compiler::Operand(TMP, LSL, 16));
1026 break;
1028 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1029 __ LoadFromOffset(TMP, base, offset + 2, compiler::kUnsignedByte);
1030 __ orr(dst, dst, compiler::Operand(TMP, LSL, 16));
1031 break;
1033 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1034 __ LoadFromOffset(TMP, base, offset + 4, compiler::kByte);
1035 __ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1036 break;
1038 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1039 __ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedByte);
1040 __ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1041 break;
1043 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1044 __ LoadFromOffset(TMP, base, offset + 4, compiler::kTwoBytes);
1045 __ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1046 break;
1048 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1049 __ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedTwoBytes);
1050 __ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1051 break;
1053 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1054 __ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedTwoBytes);
1055 __ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1056 __ LoadFromOffset(TMP, base, offset + 6, compiler::kByte);
1057 __ orr(dst, dst, compiler::Operand(TMP, LSL, 48));
1058 break;
1060 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1061 __ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedTwoBytes);
1062 __ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1063 __ LoadFromOffset(TMP, base, offset + 6, compiler::kUnsignedByte);
1064 __ orr(dst, dst, compiler::Operand(TMP, LSL, 48));
1065 break;
1066 default:
1067 UNREACHABLE();
1068 }
1069}
1070
1072 Register dst,
1073 Register tmp) {
1074 compiler::Label skip_reloc;
1075 __ b(&skip_reloc);
1076 InsertBSSRelocation(relocation);
1077 __ Bind(&skip_reloc);
1078
1079 __ adr(tmp, compiler::Immediate(-compiler::target::kWordSize));
1080
1081 // tmp holds the address of the relocation.
1082 __ ldr(dst, compiler::Address(tmp));
1083
1084 // dst holds the relocation itself: tmp - bss_start.
1085 // tmp = tmp + (bss_start - tmp) = bss_start
1086 __ add(tmp, tmp, compiler::Operand(dst));
1087
1088 // tmp holds the start of the BSS section.
1089 // Load the "get-thread" routine: *bss_start.
1090 __ ldr(dst, compiler::Address(tmp));
1091}
1092
1093#undef __
1094#define __ compiler_->assembler()->
1095
1096void ParallelMoveEmitter::EmitSwap(const MoveOperands& move) {
1097 const Location source = move.src();
1098 const Location destination = move.dest();
1099
1100 if (source.IsRegister() && destination.IsRegister()) {
1101 ASSERT(source.reg() != TMP);
1102 ASSERT(destination.reg() != TMP);
1103 __ mov(TMP, source.reg());
1104 __ mov(source.reg(), destination.reg());
1105 __ mov(destination.reg(), TMP);
1106 } else if (source.IsRegister() && destination.IsStackSlot()) {
1107 Exchange(source.reg(), destination.base_reg(),
1108 destination.ToStackSlotOffset());
1109 } else if (source.IsStackSlot() && destination.IsRegister()) {
1110 Exchange(destination.reg(), source.base_reg(), source.ToStackSlotOffset());
1111 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1112 Exchange(source.base_reg(), source.ToStackSlotOffset(),
1113 destination.base_reg(), destination.ToStackSlotOffset());
1114 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1115 const VRegister dst = destination.fpu_reg();
1116 const VRegister src = source.fpu_reg();
1117 __ vmov(VTMP, src);
1118 __ vmov(src, dst);
1119 __ vmov(dst, VTMP);
1120 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1121 ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() ||
1122 source.IsDoubleStackSlot() || source.IsQuadStackSlot());
1123 bool double_width =
1124 destination.IsDoubleStackSlot() || source.IsDoubleStackSlot();
1125 VRegister reg =
1126 source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg();
1127 Register base_reg =
1128 source.IsFpuRegister() ? destination.base_reg() : source.base_reg();
1129 const intptr_t slot_offset = source.IsFpuRegister()
1130 ? destination.ToStackSlotOffset()
1131 : source.ToStackSlotOffset();
1132
1133 if (double_width) {
1134 __ LoadDFromOffset(VTMP, base_reg, slot_offset);
1135 __ StoreDToOffset(reg, base_reg, slot_offset);
1136 __ fmovdd(reg, VTMP);
1137 } else {
1138 __ LoadQFromOffset(VTMP, base_reg, slot_offset);
1139 __ StoreQToOffset(reg, base_reg, slot_offset);
1140 __ vmov(reg, VTMP);
1141 }
1142 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1143 const intptr_t source_offset = source.ToStackSlotOffset();
1144 const intptr_t dest_offset = destination.ToStackSlotOffset();
1145
1146 ScratchFpuRegisterScope ensure_scratch(this, kNoFpuRegister);
1147 VRegister scratch = ensure_scratch.reg();
1148 __ LoadDFromOffset(VTMP, source.base_reg(), source_offset);
1149 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset);
1150 __ StoreDToOffset(VTMP, destination.base_reg(), dest_offset);
1151 __ StoreDToOffset(scratch, source.base_reg(), source_offset);
1152 } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1153 const intptr_t source_offset = source.ToStackSlotOffset();
1154 const intptr_t dest_offset = destination.ToStackSlotOffset();
1155
1156 ScratchFpuRegisterScope ensure_scratch(this, kNoFpuRegister);
1157 VRegister scratch = ensure_scratch.reg();
1158 __ LoadQFromOffset(VTMP, source.base_reg(), source_offset);
1159 __ LoadQFromOffset(scratch, destination.base_reg(), dest_offset);
1160 __ StoreQToOffset(VTMP, destination.base_reg(), dest_offset);
1161 __ StoreQToOffset(scratch, source.base_reg(), source_offset);
1162 } else {
1163 UNREACHABLE();
1164 }
1165}
1166
1167void ParallelMoveEmitter::MoveMemoryToMemory(const compiler::Address& dst,
1168 const compiler::Address& src) {
1169 UNREACHABLE();
1170}
1171
1172// Do not call or implement this function. Instead, use the form below that
1173// uses an offset from the frame pointer instead of an Address.
1174void ParallelMoveEmitter::Exchange(Register reg, const compiler::Address& mem) {
1175 UNREACHABLE();
1176}
1177
1178// Do not call or implement this function. Instead, use the form below that
1179// uses offsets from the frame pointer instead of Addresses.
1180void ParallelMoveEmitter::Exchange(const compiler::Address& mem1,
1181 const compiler::Address& mem2) {
1182 UNREACHABLE();
1183}
1184
1185void ParallelMoveEmitter::Exchange(Register reg,
1186 Register base_reg,
1187 intptr_t stack_offset) {
1188 ScratchRegisterScope tmp(this, reg);
1189 __ mov(tmp.reg(), reg);
1190 __ LoadFromOffset(reg, base_reg, stack_offset);
1191 __ StoreToOffset(tmp.reg(), base_reg, stack_offset);
1192}
1193
1194void ParallelMoveEmitter::Exchange(Register base_reg1,
1195 intptr_t stack_offset1,
1196 Register base_reg2,
1197 intptr_t stack_offset2) {
1198 ScratchRegisterScope tmp1(this, kNoRegister);
1199 ScratchRegisterScope tmp2(this, tmp1.reg());
1200 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1);
1201 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2);
1202 __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2);
1203 __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1);
1204}
1205
1206void ParallelMoveEmitter::SpillScratch(Register reg) {
1207 __ Push(reg);
1208}
1209
1210void ParallelMoveEmitter::RestoreScratch(Register reg) {
1211 __ Pop(reg);
1212}
1213
1214void ParallelMoveEmitter::SpillFpuScratch(FpuRegister reg) {
1215 __ PushQuad(reg);
1216}
1217
1218void ParallelMoveEmitter::RestoreFpuScratch(FpuRegister reg) {
1219 __ PopQuad(reg);
1220}
1221
1222#undef __
1223
1224} // namespace dart
1225
1226#endif // defined(TARGET_ARCH_ARM64)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
#define __
#define UNREACHABLE()
Definition assert.h:248
static intptr_t element_offset(intptr_t index)
Definition object.h:10817
intptr_t length() const
static intptr_t owner_offset()
Definition object.h:7120
CodeEntryKind EntryKind
Definition object.h:6761
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
Definition object.h:6766
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
Definition deopt_id.h:27
static intptr_t ToDeoptAfter(intptr_t deopt_id)
Definition deopt_id.h:31
static constexpr intptr_t kOriginElement
IsolateGroup * isolate_group() const
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitTailCallToStub(const Code &stub)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void InsertBSSRelocation(BSS::Relocation reloc)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static bool SupportsUnboxedDoubles()
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void GeneratePatchableCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
compiler::Assembler * assembler() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
Definition object.cc:11488
bool IsClosureFunction() const
Definition object.h:3871
bool IsOptimizable() const
Definition object.cc:8988
ObjectStore * object_store() const
Definition isolate.h:505
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
Definition object.h:5525
static ObjectPtr null()
Definition object.h:433
bool IsNull() const
Definition object.h:363
static Object & Handle()
Definition object.h:407
static Object & ZoneHandle()
Definition object.h:419
const Function & function() const
Definition parser.h:73
int num_stack_locals() const
Definition parser.h:194
static intptr_t RawValue(intptr_t value)
Definition object.h:10001
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
Definition stub_code.cc:316
static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg)
Definition thread.h:752
void set_constant_pool_allowed(bool b)
void GenerateUnRelocatedPcRelativeCall(Condition cond=AL, intptr_t offset_into_target=0)
#define UNIMPLEMENTED
#define ASSERT(E)
SkBitmap source
Definition examples.cpp:28
static bool b
gboolean invert
uint32_t * target
#define DECLARE_FLAG(type, name)
Definition flags.h:14
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
Dart_NativeFunction function
Definition fuchsia.cc:51
const FpuRegister kNoFpuRegister
const Register THR
const char *const name
const VRegister VTMP
const Register NULL_REG
constexpr intptr_t kBitsPerByte
Definition globals.h:463
const Register CODE_REG
@ kInvalidCondition
const Register ARGS_DESC_REG
const Register DISPATCH_TABLE_REG
@ kNoRegister
const Register TMP
const Register IC_DATA_REG
constexpr intptr_t kWordSize
Definition globals.h:509
QRegister FpuRegister
static int8_t data[kExtLength]
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
Definition switches.h:191
dst
Definition cp.py:12
Point offset
static constexpr Register kClassIdReg
static constexpr intptr_t kBoolValueBitPosition
static constexpr intptr_t kBoolValueMask
static constexpr Register kScratchReg
static constexpr Register kSubtypeTestCacheReg