Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
flow_graph_compiler_arm.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6#if defined(TARGET_ARCH_ARM)
7
9
15#include "vm/cpu.h"
16#include "vm/dart_entry.h"
18#include "vm/dispatch_table.h"
19#include "vm/instructions.h"
20#include "vm/object_store.h"
21#include "vm/parser.h"
22#include "vm/stack_frame.h"
23#include "vm/stub_code.h"
24#include "vm/symbols.h"
25
26namespace dart {
27
28DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
29DEFINE_FLAG(bool, unbox_doubles, true, "Optimize double arithmetic.");
30DECLARE_FLAG(bool, enable_simd_inline);
31
33 if (FLAG_precompiled_mode) {
34 auto object_store = isolate_group()->object_store();
35
36 const auto& stub =
37 Code::ZoneHandle(object_store->write_barrier_wrappers_stub());
38 if (CanPcRelativeCall(stub)) {
39 assembler_->generate_invoke_write_barrier_wrapper_ =
40 [&](Condition condition, Register reg) {
41 const intptr_t offset_into_target =
43 assembler_->GenerateUnRelocatedPcRelativeCall(condition,
44 offset_into_target);
45 AddPcRelativeCallStubTarget(stub);
46 };
47 }
48
49 const auto& array_stub =
50 Code::ZoneHandle(object_store->array_write_barrier_stub());
51 if (CanPcRelativeCall(stub)) {
52 assembler_->generate_invoke_array_write_barrier_ =
53 [&](Condition condition) {
54 assembler_->GenerateUnRelocatedPcRelativeCall(condition);
55 AddPcRelativeCallStubTarget(array_stub);
56 };
57 }
58 }
59}
60
62 // BlockInfos are zone-allocated, so their destructors are not called.
63 // Verify the labels explicitly here.
64 for (int i = 0; i < block_info_.length(); ++i) {
65 ASSERT(!block_info_[i]->jump_label()->IsLinked());
66 }
67}
68
70 return FLAG_unbox_doubles;
71}
72
74 return TargetCPUFeatures::neon_supported() && FLAG_enable_simd_inline;
75}
76
78 // ARM does not have a short instruction sequence for converting int64 to
79 // double.
80 return false;
81}
82
85 intrinsic_mode_ = true;
86 ASSERT(!assembler()->constant_pool_allowed());
87}
88
91 intrinsic_mode_ = false;
92}
93
94TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
95 DeoptInfoBuilder* builder,
96 const Array& deopt_table) {
97 if (deopt_env_ == nullptr) {
98 ++builder->current_info_number_;
99 return TypedData::null();
100 }
101
102 AllocateOutgoingArguments(deopt_env_);
103
104 intptr_t slot_ix = 0;
105 Environment* current = deopt_env_;
106
107 // Emit all kMaterializeObject instructions describing objects to be
108 // materialized on the deoptimization as a prefix to the deoptimization info.
109 EmitMaterializations(deopt_env_, builder);
110
111 // The real frame starts here.
112 builder->MarkFrameStart();
113
114 Zone* zone = compiler->zone();
115
116 builder->AddPp(current->function(), slot_ix++);
117 builder->AddPcMarker(Function::ZoneHandle(zone), slot_ix++);
118 builder->AddCallerFp(slot_ix++);
119 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++);
120
121 // Emit all values that are needed for materialization as a part of the
122 // expression stack for the bottom-most frame. This guarantees that GC
123 // will be able to find them during materialization.
124 slot_ix = builder->EmitMaterializationArguments(slot_ix);
125
126 // For the innermost environment, set outgoing arguments and the locals.
127 for (intptr_t i = current->Length() - 1;
128 i >= current->fixed_parameter_count(); i--) {
129 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
130 }
131
132 Environment* previous = current;
133 current = current->outer();
134 while (current != nullptr) {
135 builder->AddPp(current->function(), slot_ix++);
136 builder->AddPcMarker(previous->function(), slot_ix++);
137 builder->AddCallerFp(slot_ix++);
138
139 // For any outer environment the deopt id is that of the call instruction
140 // which is recorded in the outer environment.
141 builder->AddReturnAddress(current->function(),
142 DeoptId::ToDeoptAfter(current->GetDeoptId()),
143 slot_ix++);
144
145 // The values of outgoing arguments can be changed from the inlined call so
146 // we must read them from the previous environment.
147 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
148 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
149 slot_ix++);
150 }
151
152 // Set the locals, note that outgoing arguments are not in the environment.
153 for (intptr_t i = current->Length() - 1;
154 i >= current->fixed_parameter_count(); i--) {
155 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
156 }
157
158 // Iterate on the outer environment.
159 previous = current;
160 current = current->outer();
161 }
162 // The previous pointer is now the outermost environment.
163 ASSERT(previous != nullptr);
164
165 // Set slots for the outermost environment.
166 builder->AddCallerPp(slot_ix++);
167 builder->AddPcMarker(previous->function(), slot_ix++);
168 builder->AddCallerFp(slot_ix++);
169 builder->AddCallerPc(slot_ix++);
170
171 // For the outermost environment, set the incoming arguments.
172 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
173 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
174 }
175
176 return builder->CreateDeoptInfo(deopt_table);
177}
178
180 intptr_t stub_ix) {
181 // Calls do not need stubs, they share a deoptimization trampoline.
182 ASSERT(reason() != ICData::kDeoptAtCall);
183 compiler::Assembler* assembler = compiler->assembler();
184#define __ assembler->
185 __ Comment("%s", Name());
186 __ Bind(entry_label());
187 if (FLAG_trap_on_deoptimization) {
188 __ bkpt(0);
189 }
190
191 ASSERT(deopt_env() != nullptr);
192 __ Call(compiler::Address(
193 THR, compiler::target::Thread::deoptimize_entry_offset()));
194 set_pc_offset(assembler->CodeSize());
195#undef __
196}
197
198#define __ assembler->
199// Static methods of FlowGraphCompiler that take an assembler.
200
201void FlowGraphCompiler::GenerateIndirectTTSCall(compiler::Assembler* assembler,
202 Register reg_to_call,
203 intptr_t sub_type_cache_index) {
204 __ LoadField(
206 compiler::FieldAddress(
207 reg_to_call,
208 compiler::target::AbstractType::type_test_stub_entry_point_offset()));
209 __ LoadWordFromPoolIndex(TypeTestABI::kSubtypeTestCacheReg,
210 sub_type_cache_index);
212}
213
214#undef __
215#define __ assembler()->
216// Instance methods of FlowGraphCompiler.
217
218// Fall through if bool_register contains null.
219void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
220 compiler::Label* is_true,
221 compiler::Label* is_false) {
222 compiler::Label fall_through;
223 __ CompareObject(bool_register, Object::null_object());
224 __ b(&fall_through, EQ);
225 BranchLabels labels = {is_true, is_false, &fall_through};
226 Condition true_condition =
227 EmitBoolTest(bool_register, labels, /*invert=*/false);
228 ASSERT(true_condition != kInvalidCondition);
229 __ b(is_true, true_condition);
230 __ b(is_false);
231 __ Bind(&fall_through);
232}
233
234void FlowGraphCompiler::EmitFrameEntry() {
235 const Function& function = parsed_function().function();
237 (!is_optimizing() || may_reoptimize())) {
238 __ Comment("Invocation Count Check");
239 const Register function_reg = R8;
240 __ ldr(function_reg, compiler::FieldAddress(
241 CODE_REG, compiler::target::Code::owner_offset()));
242 __ ldr(R3, compiler::FieldAddress(
243 function_reg,
244 compiler::target::Function::usage_counter_offset()));
245 // Reoptimization of an optimized function is triggered by counting in
246 // IC stubs, but not at the entry of the function.
247 if (!is_optimizing()) {
248 __ add(R3, R3, compiler::Operand(1));
249 __ str(R3, compiler::FieldAddress(
250 function_reg,
251 compiler::target::Function::usage_counter_offset()));
252 }
253 __ CompareImmediate(R3, GetOptimizationThreshold());
254 ASSERT(function_reg == R8);
255 __ Branch(compiler::Address(
256 THR, compiler::target::Thread::optimize_entry_offset()),
257 GE);
258 }
259
260 if (flow_graph().graph_entry()->NeedsFrame()) {
261 __ Comment("Enter frame");
262 if (flow_graph().IsCompiledForOsr()) {
263 const intptr_t extra_slots = ExtraStackSlotsOnOsrEntry();
264 ASSERT(extra_slots >= 0);
265 __ EnterOsrFrame(extra_slots * compiler::target::kWordSize);
266 } else {
267 ASSERT(StackSize() >= 0);
268 __ EnterDartFrame(StackSize() * compiler::target::kWordSize);
269 }
270 } else if (FLAG_precompiled_mode) {
272 }
273}
274
275const InstructionSource& PrologueSource() {
276 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
277 /*inlining_id=*/0);
278 return prologue_source;
279}
280
282 BeginCodeSourceRange(PrologueSource());
283
284 EmitFrameEntry();
285 ASSERT(assembler()->constant_pool_allowed());
286
287 // In unoptimized code, initialize (non-argument) stack allocated slots.
288 if (!is_optimizing()) {
289 const int num_locals = parsed_function().num_stack_locals();
290
291 intptr_t args_desc_slot = -1;
292 if (parsed_function().has_arg_desc_var()) {
293 args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
294 parsed_function().arg_desc_var());
295 }
296
297 __ Comment("Initialize spill slots");
298 if (num_locals > 1 || (num_locals == 1 && args_desc_slot == -1)) {
299 __ LoadObject(R0, Object::null_object());
300 }
301 for (intptr_t i = 0; i < num_locals; ++i) {
302 const intptr_t slot_index =
303 compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
304 Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : R0;
305 __ StoreToOffset(value_reg, FP, slot_index * compiler::target::kWordSize);
306 }
307 } else if (parsed_function().suspend_state_var() != nullptr &&
308 !flow_graph().IsCompiledForOsr()) {
309 // Initialize synthetic :suspend_state variable early
310 // as it may be accessed by GC and exception handling before
311 // InitSuspendableFunction stub is called.
312 const intptr_t slot_index =
313 compiler::target::frame_layout.FrameSlotForVariable(
314 parsed_function().suspend_state_var());
315 __ LoadObject(R0, Object::null_object());
316 __ StoreToOffset(R0, FP, slot_index * compiler::target::kWordSize);
317 }
318
319 EndCodeSourceRange(PrologueSource());
320}
321
323 const Code& stub,
324 ObjectPool::SnapshotBehavior snapshot_behavior) {
325 ASSERT(!stub.IsNull());
326 if (CanPcRelativeCall(stub)) {
327 __ GenerateUnRelocatedPcRelativeCall();
328 AddPcRelativeCallStubTarget(stub);
329 } else {
331 CodeEntryKind::kNormal, snapshot_behavior);
332 AddStubCallTarget(stub);
333 }
334}
335
336void FlowGraphCompiler::EmitJumpToStub(const Code& stub) {
337 ASSERT(!stub.IsNull());
338 if (CanPcRelativeCall(stub)) {
339 __ GenerateUnRelocatedPcRelativeTailCall();
340 AddPcRelativeTailCallStubTarget(stub);
341 } else {
342 __ LoadObject(CODE_REG, stub);
343 __ ldr(PC, compiler::FieldAddress(
344 CODE_REG, compiler::target::Code::entry_point_offset()));
345 AddStubCallTarget(stub);
346 }
347}
348
349void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
350 ASSERT(!stub.IsNull());
351 if (CanPcRelativeCall(stub)) {
352 if (flow_graph().graph_entry()->NeedsFrame()) {
353 __ LeaveDartFrame();
354 }
355 __ GenerateUnRelocatedPcRelativeTailCall();
356 AddPcRelativeTailCallStubTarget(stub);
357#if defined(DEBUG)
358 __ Breakpoint();
359#endif
360 } else {
361 __ LoadObject(CODE_REG, stub);
362 if (flow_graph().graph_entry()->NeedsFrame()) {
363 __ LeaveDartFrame();
364 }
365 __ ldr(PC, compiler::FieldAddress(
366 CODE_REG, compiler::target::Code::entry_point_offset()));
367 AddStubCallTarget(stub);
368 }
369}
370
372 const InstructionSource& source,
373 const Code& stub,
375 LocationSummary* locs,
376 ObjectPool::SnapshotBehavior snapshot_behavior) {
377 __ BranchLinkPatchable(stub, CodeEntryKind::kNormal, snapshot_behavior);
379 pending_deoptimization_env_);
380}
381
382void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
383 const InstructionSource& source,
384 const Code& stub,
386 LocationSummary* locs,
387 Code::EntryKind entry_kind) {
388 ASSERT(CanCallDart());
389 __ BranchLinkPatchable(stub, entry_kind);
390 EmitCallsiteMetadata(source, deopt_id, kind, locs,
391 pending_deoptimization_env_);
392}
393
394void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
395 const InstructionSource& source,
397 LocationSummary* locs,
398 const Function& target,
399 Code::EntryKind entry_kind) {
400 ASSERT(CanCallDart());
401 if (CanPcRelativeCall(target)) {
402 __ GenerateUnRelocatedPcRelativeCall();
403 AddPcRelativeCallTarget(target, entry_kind);
404 EmitCallsiteMetadata(source, deopt_id, kind, locs,
405 pending_deoptimization_env_);
406 } else {
408 // Call sites to the same target can share object pool entries. These
409 // call sites are never patched for breakpoints: the function is deoptimized
410 // and the unoptimized code with IC calls for static calls is patched
411 // instead.
412 const auto& stub = StubCode::CallStaticFunction();
413 __ BranchLinkWithEquivalence(stub, target, entry_kind);
414 EmitCallsiteMetadata(source, deopt_id, kind, locs,
415 pending_deoptimization_env_);
416 AddStaticCallTarget(target, entry_kind);
417 }
418}
419
420void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
421 // We do not check for overflow when incrementing the edge counter. The
422 // function should normally be optimized long before the counter can
423 // overflow; and though we do not reset the counters when we optimize or
424 // deoptimize, there is a bound on the number of
425 // optimization/deoptimization cycles we will attempt.
426 ASSERT(!edge_counters_array_.IsNull());
427 ASSERT(assembler_->constant_pool_allowed());
428 __ Comment("Edge counter");
429 __ LoadObject(R0, edge_counters_array_);
430#if defined(DEBUG)
431 bool old_use_far_branches = assembler_->use_far_branches();
432 assembler_->set_use_far_branches(true);
433#endif // DEBUG
434 __ LoadFieldFromOffset(R1, R0,
435 compiler::target::Array::element_offset(edge_id));
436 __ add(R1, R1, compiler::Operand(Smi::RawValue(1)));
437 __ StoreIntoObjectOffsetNoBarrier(
438 R0, compiler::target::Array::element_offset(edge_id), R1);
439#if defined(DEBUG)
440 assembler_->set_use_far_branches(old_use_far_branches);
441#endif // DEBUG
442}
443
445 const Code& stub,
446 const ICData& ic_data,
447 intptr_t deopt_id,
448 const InstructionSource& source,
449 LocationSummary* locs,
450 Code::EntryKind entry_kind) {
451 ASSERT(CanCallDart());
452 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
453 // Each ICData propagated from unoptimized to optimized code contains the
454 // function that corresponds to the Dart function of that IC call. Due
455 // to inlining in optimized code, that function may not correspond to the
456 // top-level function (parsed_function().function()) which could be
457 // reoptimized and which counter needs to be incremented.
458 // Pass the function explicitly, it is used in IC stub.
459
460 __ LoadObject(R8, parsed_function().function());
461 __ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
462 __ LoadUniqueObject(IC_DATA_REG, ic_data);
463 GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
464 entry_kind);
465 EmitDropArguments(ic_data.SizeWithTypeArgs());
466}
467
468void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
469 const ICData& ic_data,
470 intptr_t deopt_id,
471 const InstructionSource& source,
472 LocationSummary* locs,
473 Code::EntryKind entry_kind) {
474 ASSERT(CanCallDart());
475 ASSERT(entry_kind == Code::EntryKind::kNormal ||
476 entry_kind == Code::EntryKind::kUnchecked);
477 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
478 __ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
479 __ LoadUniqueObject(IC_DATA_REG, ic_data);
480 __ LoadUniqueObject(CODE_REG, stub);
481 const intptr_t entry_point_offset =
482 entry_kind == Code::EntryKind::kNormal
483 ? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
484 : Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
485 __ Call(compiler::FieldAddress(CODE_REG, entry_point_offset));
486 EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kIcCall, locs,
487 pending_deoptimization_env_);
488 EmitDropArguments(ic_data.SizeWithTypeArgs());
489}
490
492 const String& name,
493 const Array& arguments_descriptor,
494 intptr_t deopt_id,
495 const InstructionSource& source,
496 LocationSummary* locs) {
497 ASSERT(CanCallDart());
498 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
499 ASSERT(!FLAG_precompiled_mode);
500 const ArgumentsDescriptor args_desc(arguments_descriptor);
501 const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
502 zone(),
503 MegamorphicCacheTable::Lookup(thread(), name, arguments_descriptor));
504
505 __ Comment("MegamorphicCall");
506 // Load receiver into R0.
507 __ LoadFromOffset(R0, SP,
508 (args_desc.Count() - 1) * compiler::target::kWordSize);
509 // Use same code pattern as instance call so it can be parsed by code patcher.
510 __ LoadUniqueObject(IC_DATA_REG, cache);
511 __ LoadUniqueObject(CODE_REG, StubCode::MegamorphicCall());
512 __ Call(compiler::FieldAddress(
513 CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
514
515 RecordSafepoint(locs);
516 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
517 const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
518 if (is_optimizing()) {
519 AddDeoptIndexAtCall(deopt_id_after, pending_deoptimization_env_);
520 } else {
521 // Add deoptimization continuation point after the call and before the
522 // arguments are removed.
523 AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after, source);
524 }
525 RecordCatchEntryMoves(pending_deoptimization_env_);
526 EmitDropArguments(args_desc.SizeWithTypeArgs());
527}
528
529void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
530 intptr_t deopt_id,
531 const InstructionSource& source,
532 LocationSummary* locs,
533 Code::EntryKind entry_kind,
534 bool receiver_can_be_smi) {
535 ASSERT(CanCallDart());
536 ASSERT(entry_kind == Code::EntryKind::kNormal ||
537 entry_kind == Code::EntryKind::kUnchecked);
538 ASSERT(ic_data.NumArgsTested() == 1);
539 const Code& initial_stub = StubCode::SwitchableCallMiss();
540 const char* switchable_call_mode = "smiable";
541 if (!receiver_can_be_smi) {
542 switchable_call_mode = "non-smi";
543 ic_data.set_receiver_cannot_be_smi(true);
544 }
545 const UnlinkedCall& data =
546 UnlinkedCall::ZoneHandle(zone(), ic_data.AsUnlinkedCall());
547
548 __ Comment("InstanceCallAOT (%s)", switchable_call_mode);
549 __ LoadFromOffset(
550 R0, SP,
551 (ic_data.SizeWithoutTypeArgs() - 1) * compiler::target::kWordSize);
552 if (FLAG_precompiled_mode) {
553 // The AOT runtime will replace the slot in the object pool with the
554 // entrypoint address - see app_snapshot.cc.
555 const auto snapshot_behavior =
557 CLOBBERS_LR(__ LoadUniqueObject(LR, initial_stub, AL, snapshot_behavior));
558 } else {
559 __ LoadUniqueObject(CODE_REG, initial_stub);
560 const intptr_t entry_point_offset =
561 entry_kind == Code::EntryKind::kNormal
562 ? compiler::target::Code::entry_point_offset(
563 Code::EntryKind::kMonomorphic)
564 : compiler::target::Code::entry_point_offset(
565 Code::EntryKind::kMonomorphicUnchecked);
566 CLOBBERS_LR(
567 __ ldr(LR, compiler::FieldAddress(CODE_REG, entry_point_offset)));
568 }
569 __ LoadUniqueObject(R9, data);
570 CLOBBERS_LR(__ blx(LR));
571
572 EmitCallsiteMetadata(source, DeoptId::kNone, UntaggedPcDescriptors::kOther,
573 locs, pending_deoptimization_env_);
574 EmitDropArguments(ic_data.SizeWithTypeArgs());
575}
576
577void FlowGraphCompiler::EmitUnoptimizedStaticCall(
578 intptr_t size_with_type_args,
579 intptr_t deopt_id,
580 const InstructionSource& source,
581 LocationSummary* locs,
582 const ICData& ic_data,
583 Code::EntryKind entry_kind) {
584 ASSERT(CanCallDart());
585 const Code& stub =
586 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
587 __ LoadObject(R9, ic_data);
588 GenerateDartCall(deopt_id, source, stub,
589 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
590 EmitDropArguments(size_with_type_args);
591}
592
594 const Function& function,
595 const Array& arguments_descriptor,
596 intptr_t size_with_type_args,
597 intptr_t deopt_id,
598 const InstructionSource& source,
599 LocationSummary* locs,
600 Code::EntryKind entry_kind) {
601 ASSERT(CanCallDart());
604 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
605 } else {
606 if (!FLAG_precompiled_mode) {
607 __ LoadImmediate(ARGS_DESC_REG, 0); // GC safe smi zero because of stub.
608 }
609 }
610 // Do not use the code from the function, but let the code be patched so that
611 // we can record the outgoing edges to other code.
612 GenerateStaticDartCall(deopt_id, source, UntaggedPcDescriptors::kOther, locs,
613 function, entry_kind);
614 EmitDropArguments(size_with_type_args);
615}
616
618 int32_t selector_offset,
619 const Array& arguments_descriptor) {
620 const auto cid_reg = DispatchTableNullErrorABI::kClassIdReg;
621 ASSERT(CanCallDart());
622 ASSERT(cid_reg != ARGS_DESC_REG);
623 if (!arguments_descriptor.IsNull()) {
624 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
625 }
626 intptr_t offset = (selector_offset - DispatchTable::kOriginElement) *
627 compiler::target::kWordSize;
628 CLOBBERS_LR({
629 // Would like cid_reg to be available on entry to the target function
630 // for checking purposes.
631 ASSERT(cid_reg != LR);
632 if (offset == 0) {
633 __ ldr(LR, compiler::Address(DISPATCH_TABLE_REG, cid_reg, LSL,
634 compiler::target::kWordSizeLog2));
635 } else {
637 compiler::Operand(cid_reg, LSL, compiler::target::kWordSizeLog2));
638 if (!Utils::MagnitudeIsUint(12, offset)) {
639 const intptr_t adjust = offset & -(1 << 12);
640 __ AddImmediate(LR, LR, adjust);
641 offset -= adjust;
642 }
643 __ ldr(LR, compiler::Address(LR, offset));
644 }
645 __ blx(LR);
646 });
647}
648
650 Register reg,
651 const Object& obj,
652 bool needs_number_check,
653 const InstructionSource& source,
654 intptr_t deopt_id) {
655 if (needs_number_check) {
656 ASSERT(!obj.IsMint() && !obj.IsDouble());
657 __ Push(reg);
658 __ PushObject(obj);
659 if (is_optimizing()) {
660 // No breakpoints in optimized code.
661 __ BranchLink(StubCode::OptimizedIdenticalWithNumberCheck());
662 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, deopt_id, source);
663 } else {
664 // Patchable to support breakpoints.
665 __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
666 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id,
667 source);
668 }
669 // Stub returns result in flags (result of a cmp, we need Z computed).
670 __ Drop(1); // Discard constant.
671 __ Pop(reg); // Restore 'reg'.
672 } else {
673 __ CompareObject(reg, obj);
674 }
675 return EQ;
676}
677
681 bool needs_number_check,
682 const InstructionSource& source,
683 intptr_t deopt_id) {
684 if (needs_number_check) {
685 __ Push(left);
686 __ Push(right);
687 if (is_optimizing()) {
688 __ BranchLink(StubCode::OptimizedIdenticalWithNumberCheck());
689 } else {
690 __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
691 }
692 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
693 // Stub returns result in flags (result of a cmp, we need Z computed).
694 __ Pop(right);
695 __ Pop(left);
696 } else {
697 __ cmp(left, compiler::Operand(right));
698 }
699 return EQ;
700}
701
703 BranchLabels labels,
704 bool invert) {
705 __ Comment("BoolTest");
706 __ tst(value,
708 return invert ? NE : EQ;
709}
710
711// This function must be in sync with FlowGraphCompiler::RecordSafepoint and
712// FlowGraphCompiler::SlowPathEnvironmentFor.
713void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
714#if defined(DEBUG)
715 locs->CheckWritableInputs();
716 ClobberDeadTempRegisters(locs);
717#endif
718 // TODO(vegorov): consider saving only caller save (volatile) registers.
719 __ PushRegisters(*locs->live_registers());
720}
721
722void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
723 __ PopRegisters(*locs->live_registers());
724}
725
726#if defined(DEBUG)
727void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
728 // Clobber temporaries that have not been manually preserved.
729 for (intptr_t i = 0; i < locs->temp_count(); ++i) {
730 Location tmp = locs->temp(i);
731 // TODO(zerny): clobber non-live temporary FPU registers.
732 if (tmp.IsRegister() &&
733 !locs->live_registers()->ContainsRegister(tmp.reg())) {
734 __ mov(tmp.reg(), compiler::Operand(0xf7));
735 }
736 }
737}
738#endif
739
740Register FlowGraphCompiler::EmitTestCidRegister() {
741 return R2;
742}
743
744void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
745 intptr_t count_without_type_args,
746 const Array& arguments_descriptor) {
747 __ Comment("EmitTestAndCall");
748 // Load receiver into R0.
749 __ LoadFromOffset(
750 R0, SP, (count_without_type_args - 1) * compiler::target::kWordSize);
751 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
752}
753
754void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
755 bool if_smi) {
756 __ tst(R0, compiler::Operand(kSmiTagMask));
757 // Jump if receiver is not Smi.
758 __ b(label, if_smi ? EQ : NE);
759}
760
761void FlowGraphCompiler::EmitTestAndCallLoadCid(Register class_id_reg) {
762 ASSERT(class_id_reg != R0);
763 __ LoadClassId(class_id_reg, R0);
764}
765
768 TemporaryRegisterAllocator* allocator) {
769 if (destination.Equals(source)) return;
770
771 if (source.IsRegister()) {
772 if (destination.IsRegister()) {
773 __ mov(destination.reg(), compiler::Operand(source.reg()));
774 } else {
775 ASSERT(destination.IsStackSlot());
776 const intptr_t dest_offset = destination.ToStackSlotOffset();
777 __ StoreToOffset(source.reg(), destination.base_reg(), dest_offset);
778 }
779 } else if (source.IsStackSlot()) {
780 if (destination.IsRegister()) {
781 const intptr_t source_offset = source.ToStackSlotOffset();
782 __ LoadFromOffset(destination.reg(), source.base_reg(), source_offset);
783 } else {
784 ASSERT(destination.IsStackSlot());
785 const intptr_t source_offset = source.ToStackSlotOffset();
786 const intptr_t dest_offset = destination.ToStackSlotOffset();
787
788 CLOBBERS_LR({
789 // LR not used by register allocator.
790 COMPILE_ASSERT(((1 << LR) & kDartAvailableCpuRegs) == 0);
791 // StoreToOffset uses TMP in the case where dest_offset is too large or
792 // small in order to calculate a new base. We fall back to using LR as a
793 // temporary as we know we're in a ParallelMove.
794 const Register temp_reg = LR;
795
796 __ LoadFromOffset(temp_reg, source.base_reg(), source_offset);
797 __ StoreToOffset(temp_reg, destination.base_reg(), dest_offset);
798 });
799 }
800 } else if (source.IsFpuRegister()) {
801 if (destination.IsFpuRegister()) {
803 __ vmovq(destination.fpu_reg(), source.fpu_reg());
804 } else {
805 // If we're not inlining simd values, then only the even numbered D
806 // register will have anything in them.
807 __ vmovd(EvenDRegisterOf(destination.fpu_reg()),
808 EvenDRegisterOf(source.fpu_reg()));
809 }
810 } else if (destination.IsStackSlot()) {
811 // 32-bit float
812 const intptr_t dest_offset = destination.ToStackSlotOffset();
814 __ StoreSToOffset(src, destination.base_reg(), dest_offset);
815 } else if (destination.IsDoubleStackSlot()) {
816 const intptr_t dest_offset = destination.ToStackSlotOffset();
817 DRegister src = EvenDRegisterOf(source.fpu_reg());
818 __ StoreDToOffset(src, destination.base_reg(), dest_offset);
819 } else {
820 ASSERT(destination.IsQuadStackSlot());
821 const intptr_t dest_offset = destination.ToStackSlotOffset();
822 const DRegister dsrc0 = EvenDRegisterOf(source.fpu_reg());
823 __ StoreMultipleDToOffset(dsrc0, 2, destination.base_reg(), dest_offset);
824 }
825 } else if (source.IsDoubleStackSlot()) {
826 if (destination.IsFpuRegister()) {
827 const intptr_t source_offset = source.ToStackSlotOffset();
828 const DRegister dst = EvenDRegisterOf(destination.fpu_reg());
829 __ LoadDFromOffset(dst, source.base_reg(), source_offset);
830 } else if (destination.IsStackSlot()) {
831 // 32-bit float
832 const intptr_t source_offset = source.ToStackSlotOffset();
833 const intptr_t dest_offset = destination.ToStackSlotOffset();
834 __ LoadSFromOffset(STMP, source.base_reg(), source_offset);
835 __ StoreSToOffset(STMP, destination.base_reg(), dest_offset);
836 } else {
837 ASSERT(destination.IsDoubleStackSlot());
838 const intptr_t source_offset = source.ToStackSlotOffset();
839 const intptr_t dest_offset = destination.ToStackSlotOffset();
840 __ LoadDFromOffset(DTMP, source.base_reg(), source_offset);
841 __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset);
842 }
843 } else if (source.IsQuadStackSlot()) {
844 if (destination.IsFpuRegister()) {
845 const intptr_t source_offset = source.ToStackSlotOffset();
846 const DRegister dst0 = EvenDRegisterOf(destination.fpu_reg());
847 __ LoadMultipleDFromOffset(dst0, 2, source.base_reg(), source_offset);
848 } else {
849 ASSERT(destination.IsQuadStackSlot());
850 const intptr_t source_offset = source.ToStackSlotOffset();
851 const intptr_t dest_offset = destination.ToStackSlotOffset();
852 const DRegister dtmp0 = DTMP;
853 __ LoadMultipleDFromOffset(dtmp0, 2, source.base_reg(), source_offset);
854 __ StoreMultipleDToOffset(dtmp0, 2, destination.base_reg(), dest_offset);
855 }
856 } else if (source.IsPairLocation()) {
857 ASSERT(destination.IsPairLocation());
858 for (intptr_t i : {0, 1}) {
859 EmitMove(destination.Component(i), source.Component(i), allocator);
860 }
861 } else {
862 ASSERT(source.IsConstant());
863 if (destination.IsFpuRegister() || destination.IsDoubleStackSlot() ||
864 destination.IsStackSlot()) {
865 Register tmp = allocator->AllocateTemporary();
866 source.constant_instruction()->EmitMoveToLocation(this, destination, tmp,
867 source.pair_index());
868 allocator->ReleaseTemporary();
869 } else {
870 source.constant_instruction()->EmitMoveToLocation(
871 this, destination, kNoRegister, source.pair_index());
872 }
873 }
874}
875
876static compiler::OperandSize BytesToOperandSize(intptr_t bytes) {
877 switch (bytes) {
878 case 4:
880 case 2:
882 case 1:
884 default:
886 }
887}
888
889void FlowGraphCompiler::EmitNativeMoveArchitecture(
890 const compiler::ffi::NativeLocation& destination,
891 const compiler::ffi::NativeLocation& source) {
892 const auto& src_payload_type = source.payload_type();
893 const auto& dst_payload_type = destination.payload_type();
894 const auto& src_container_type = source.container_type();
895 const auto& dst_container_type = destination.container_type();
896 ASSERT(src_container_type.IsFloat() == dst_container_type.IsFloat());
897 ASSERT(src_container_type.IsInt() == dst_container_type.IsInt());
898 ASSERT(src_payload_type.IsSigned() == dst_payload_type.IsSigned());
899 ASSERT(src_payload_type.IsPrimitive());
900 ASSERT(dst_payload_type.IsPrimitive());
901 const intptr_t src_size = src_payload_type.SizeInBytes();
902 const intptr_t dst_size = dst_payload_type.SizeInBytes();
903 const bool sign_or_zero_extend = dst_size > src_size;
904
905 if (source.IsRegisters()) {
906 const auto& src = source.AsRegisters();
907 ASSERT(src.num_regs() == 1);
908 ASSERT(src_size <= 4);
909 const auto src_reg = src.reg_at(0);
910
911 if (destination.IsRegisters()) {
912 const auto& dst = destination.AsRegisters();
913 ASSERT(dst.num_regs() == 1);
914 const auto dst_reg = dst.reg_at(0);
915 ASSERT(destination.container_type().SizeInBytes() <= 4);
916 if (!sign_or_zero_extend) {
917 __ MoveRegister(dst_reg, src_reg);
918 } else {
919 if (src_payload_type.IsSigned()) {
920 __ sbfx(dst_reg, src_reg, 0, src_size * kBitsPerByte);
921 } else {
922 __ ubfx(dst_reg, src_reg, 0, src_size * kBitsPerByte);
923 }
924 }
925
926 } else if (destination.IsFpuRegisters()) {
927 // Fpu Registers should only contain doubles and registers only ints.
928 // The bit casts are done with a BitCastInstr.
929 // TODO(dartbug.com/40371): Remove BitCastInstr and implement here.
931
932 } else {
933 ASSERT(destination.IsStack());
934 const auto& dst = destination.AsStack();
935 ASSERT(!sign_or_zero_extend);
936 auto const op_size =
937 BytesToOperandSize(destination.container_type().SizeInBytes());
938 __ StoreToOffset(src.reg_at(0), dst.base_register(),
939 dst.offset_in_bytes(), op_size);
940 }
941
942 } else if (source.IsFpuRegisters()) {
943 const auto& src = source.AsFpuRegisters();
944 // We have not implemented conversions here, use IL convert instructions.
945 ASSERT(src_payload_type.Equals(dst_payload_type));
946
947 if (destination.IsRegisters()) {
948 // Fpu Registers should only contain doubles and registers only ints.
949 // The bit casts are done with a BitCastInstr.
950 // TODO(dartbug.com/40371): Remove BitCastInstr and implement here.
952
953 } else if (destination.IsFpuRegisters()) {
954 const auto& dst = destination.AsFpuRegisters();
955 switch (dst_size) {
956 case 16:
957 __ vmovq(dst.fpu_reg(), src.fpu_reg());
958 return;
959 case 8:
960 __ vmovd(dst.fpu_as_d_reg(), src.fpu_as_d_reg());
961 return;
962 case 4:
963 __ vmovs(dst.fpu_as_s_reg(), src.fpu_as_s_reg());
964 return;
965 default:
966 UNREACHABLE();
967 }
968
969 } else {
970 ASSERT(destination.IsStack());
971 ASSERT(src_payload_type.IsFloat());
972 const auto& dst = destination.AsStack();
973 switch (dst_size) {
974 case 8:
975 __ StoreDToOffset(src.fpu_as_d_reg(), dst.base_register(),
976 dst.offset_in_bytes());
977 return;
978 case 4:
979 __ StoreSToOffset(src.fpu_as_s_reg(), dst.base_register(),
980 dst.offset_in_bytes());
981 return;
982 default:
983 // TODO(dartbug.com/37470): Case 16 for simd packed data.
984 UNREACHABLE();
985 }
986 }
987
988 } else {
989 ASSERT(source.IsStack());
990 const auto& src = source.AsStack();
991 if (destination.IsRegisters()) {
992 const auto& dst = destination.AsRegisters();
993 ASSERT(dst.num_regs() == 1);
994 const auto dst_reg = dst.reg_at(0);
995 EmitNativeLoad(dst_reg, src.base_register(), src.offset_in_bytes(),
996 src_payload_type.AsPrimitive().representation());
997 } else if (destination.IsFpuRegisters()) {
998 ASSERT(src_payload_type.Equals(dst_payload_type));
999 ASSERT(src_payload_type.IsFloat());
1000 const auto& dst = destination.AsFpuRegisters();
1001 switch (src_size) {
1002 case 8:
1003 __ LoadDFromOffset(dst.fpu_as_d_reg(), src.base_register(),
1004 src.offset_in_bytes());
1005 return;
1006 case 4:
1007 __ LoadSFromOffset(dst.fpu_as_s_reg(), src.base_register(),
1008 src.offset_in_bytes());
1009 return;
1010 default:
1011 UNIMPLEMENTED();
1012 }
1013
1014 } else {
1015 ASSERT(destination.IsStack());
1016 UNREACHABLE();
1017 }
1018 }
1019}
1020
1021void FlowGraphCompiler::EmitNativeLoad(Register dst,
1022 Register base,
1023 intptr_t offset,
1025 switch (type) {
1027 __ LoadFromOffset(dst, base, offset, compiler::kByte);
1028 break;
1030 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedByte);
1031 break;
1033 __ LoadFromOffset(dst, base, offset, compiler::kTwoBytes);
1034 break;
1036 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1037 break;
1039 __ LoadFromOffset(dst, base, offset, compiler::kFourBytes);
1040 break;
1044 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1045 break;
1046
1048 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1049 __ LoadFromOffset(TMP, base, offset + 2, compiler::kByte);
1050 __ orr(dst, dst, compiler::Operand(TMP, LSL, 16));
1051 break;
1053 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1054 __ LoadFromOffset(TMP, base, offset + 2, compiler::kUnsignedByte);
1055 __ orr(dst, dst, compiler::Operand(TMP, LSL, 16));
1056 break;
1057 default:
1058 UNREACHABLE();
1059 }
1060}
1061
1063 Register dst,
1064 Register tmp) {
1065 compiler::Label skip_reloc;
1066 __ b(&skip_reloc);
1067 InsertBSSRelocation(relocation);
1068 __ Bind(&skip_reloc);
1069
1070 // For historical reasons, the PC on ARM points 8 bytes (two instructions)
1071 // past the current instruction.
1072 __ sub(tmp, PC,
1073 compiler::Operand(Instr::kPCReadOffset + compiler::target::kWordSize));
1074
1075 // tmp holds the address of the relocation.
1076 __ ldr(dst, compiler::Address(tmp));
1077
1078 // dst holds the relocation itself: tmp - bss_start.
1079 // tmp = tmp + (bss_start - tmp) = bss_start
1080 __ add(tmp, tmp, compiler::Operand(dst));
1081
1082 // tmp holds the start of the BSS section.
1083 // Load the "get-thread" routine: *bss_start.
1084 __ ldr(dst, compiler::Address(tmp));
1085}
1086
1087#undef __
1088#define __ compiler_->assembler()->
1089
1090void ParallelMoveEmitter::EmitSwap(const MoveOperands& move) {
1091 const Location source = move.src();
1092 const Location destination = move.dest();
1093
1094 if (source.IsRegister() && destination.IsRegister()) {
1095 ASSERT(source.reg() != IP);
1096 ASSERT(destination.reg() != IP);
1097 __ mov(IP, compiler::Operand(source.reg()));
1098 __ mov(source.reg(), compiler::Operand(destination.reg()));
1099 __ mov(destination.reg(), compiler::Operand(IP));
1100 } else if (source.IsRegister() && destination.IsStackSlot()) {
1101 Exchange(source.reg(), destination.base_reg(),
1102 destination.ToStackSlotOffset());
1103 } else if (source.IsStackSlot() && destination.IsRegister()) {
1104 Exchange(destination.reg(), source.base_reg(), source.ToStackSlotOffset());
1105 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1106 Exchange(source.base_reg(), source.ToStackSlotOffset(),
1107 destination.base_reg(), destination.ToStackSlotOffset());
1108 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1110 const QRegister dst = destination.fpu_reg();
1111 const QRegister src = source.fpu_reg();
1112 ASSERT(dst != QTMP && src != QTMP);
1113 __ vmovq(QTMP, src);
1114 __ vmovq(src, dst);
1115 __ vmovq(dst, QTMP);
1116 } else {
1117 const DRegister dst = EvenDRegisterOf(destination.fpu_reg());
1118 const DRegister src = EvenDRegisterOf(source.fpu_reg());
1119 ASSERT(dst != DTMP && src != DTMP);
1120 __ vmovd(DTMP, src);
1121 __ vmovd(src, dst);
1122 __ vmovd(dst, DTMP);
1123 }
1124 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1125 ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() ||
1126 source.IsDoubleStackSlot() || source.IsQuadStackSlot());
1127 bool double_width =
1128 destination.IsDoubleStackSlot() || source.IsDoubleStackSlot();
1129 QRegister qreg =
1130 source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg();
1131 DRegister reg = EvenDRegisterOf(qreg);
1132 Register base_reg =
1133 source.IsFpuRegister() ? destination.base_reg() : source.base_reg();
1134 const intptr_t slot_offset = source.IsFpuRegister()
1135 ? destination.ToStackSlotOffset()
1136 : source.ToStackSlotOffset();
1137
1138 if (double_width) {
1139 __ LoadDFromOffset(DTMP, base_reg, slot_offset);
1140 __ StoreDToOffset(reg, base_reg, slot_offset);
1141 __ vmovd(reg, DTMP);
1142 } else {
1143 __ LoadMultipleDFromOffset(DTMP, 2, base_reg, slot_offset);
1144 __ StoreMultipleDToOffset(reg, 2, base_reg, slot_offset);
1145 __ vmovq(qreg, QTMP);
1146 }
1147 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1148 const intptr_t source_offset = source.ToStackSlotOffset();
1149 const intptr_t dest_offset = destination.ToStackSlotOffset();
1150
1151 ScratchFpuRegisterScope ensure_scratch(this, kNoQRegister);
1152 DRegister scratch = EvenDRegisterOf(ensure_scratch.reg());
1153 __ LoadDFromOffset(DTMP, source.base_reg(), source_offset);
1154 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset);
1155 __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset);
1156 __ StoreDToOffset(scratch, destination.base_reg(), source_offset);
1157 } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1158 const intptr_t source_offset = source.ToStackSlotOffset();
1159 const intptr_t dest_offset = destination.ToStackSlotOffset();
1160
1161 ScratchFpuRegisterScope ensure_scratch(this, kNoQRegister);
1162 DRegister scratch = EvenDRegisterOf(ensure_scratch.reg());
1163 __ LoadMultipleDFromOffset(DTMP, 2, source.base_reg(), source_offset);
1164 __ LoadMultipleDFromOffset(scratch, 2, destination.base_reg(), dest_offset);
1165 __ StoreMultipleDToOffset(DTMP, 2, destination.base_reg(), dest_offset);
1166 __ StoreMultipleDToOffset(scratch, 2, destination.base_reg(),
1167 source_offset);
1168 } else {
1169 UNREACHABLE();
1170 }
1171}
1172
1173void ParallelMoveEmitter::MoveMemoryToMemory(const compiler::Address& dst,
1174 const compiler::Address& src) {
1175 UNREACHABLE();
1176}
1177
1178// Do not call or implement this function. Instead, use the form below that
1179// uses an offset from the frame pointer instead of an Address.
1180void ParallelMoveEmitter::Exchange(Register reg, const compiler::Address& mem) {
1181 UNREACHABLE();
1182}
1183
1184// Do not call or implement this function. Instead, use the form below that
1185// uses offsets from the frame pointer instead of Addresses.
1186void ParallelMoveEmitter::Exchange(const compiler::Address& mem1,
1187 const compiler::Address& mem2) {
1188 UNREACHABLE();
1189}
1190
1191void ParallelMoveEmitter::Exchange(Register reg,
1192 Register base_reg,
1193 intptr_t stack_offset) {
1194 ScratchRegisterScope tmp(this, reg);
1195 __ mov(tmp.reg(), compiler::Operand(reg));
1196 __ LoadFromOffset(reg, base_reg, stack_offset);
1197 __ StoreToOffset(tmp.reg(), base_reg, stack_offset);
1198}
1199
1200void ParallelMoveEmitter::Exchange(Register base_reg1,
1201 intptr_t stack_offset1,
1202 Register base_reg2,
1203 intptr_t stack_offset2) {
1204 ScratchRegisterScope tmp1(this, kNoRegister);
1205 ScratchRegisterScope tmp2(this, tmp1.reg());
1206 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1);
1207 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2);
1208 __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2);
1209 __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1);
1210}
1211
1212void ParallelMoveEmitter::SpillScratch(Register reg) {
1213 __ Push(reg);
1214}
1215
1216void ParallelMoveEmitter::RestoreScratch(Register reg) {
1217 __ Pop(reg);
1218}
1219
1220void ParallelMoveEmitter::SpillFpuScratch(FpuRegister reg) {
1221 __ PushQuad(reg);
1222}
1223
1224void ParallelMoveEmitter::RestoreFpuScratch(FpuRegister reg) {
1225 __ PopQuad(reg);
1226}
1227
1228#undef __
1229
1230} // namespace dart
1231
1232#endif // defined(TARGET_ARCH_ARM)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
#define __
#define UNREACHABLE()
Definition assert.h:248
#define COMPILE_ASSERT(expr)
Definition assert.h:339
intptr_t length() const
CodeEntryKind EntryKind
Definition object.h:6761
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
Definition object.h:6766
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
Definition deopt_id.h:27
static intptr_t ToDeoptAfter(intptr_t deopt_id)
Definition deopt_id.h:31
static constexpr intptr_t kOriginElement
IsolateGroup * isolate_group() const
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitTailCallToStub(const Code &stub)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void InsertBSSRelocation(BSS::Relocation reloc)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static bool SupportsUnboxedDoubles()
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void GeneratePatchableCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
compiler::Assembler * assembler() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
Definition object.cc:11488
bool IsClosureFunction() const
Definition object.h:3871
bool IsOptimizable() const
Definition object.cc:8988
ObjectStore * object_store() const
Definition isolate.h:505
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
Definition object.h:5525
static ObjectPtr null()
Definition object.h:433
bool IsNull() const
Definition object.h:363
static Object & Handle()
Definition object.h:407
static Object & ZoneHandle()
Definition object.h:419
const Function & function() const
Definition parser.h:73
int num_stack_locals() const
Definition parser.h:194
static intptr_t RawValue(intptr_t value)
Definition object.h:10001
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
Definition stub_code.cc:316
static bool neon_supported()
Definition cpu_arm.h:76
static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg)
Definition thread.h:752
static bool MagnitudeIsUint(intptr_t N, T value)
Definition utils.h:337
void set_constant_pool_allowed(bool b)
void GenerateUnRelocatedPcRelativeCall(Condition cond=AL, intptr_t offset_into_target=0)
#define UNIMPLEMENTED
#define ASSERT(E)
SkBitmap source
Definition examples.cpp:28
static bool b
gboolean invert
uint32_t * target
#define DECLARE_FLAG(type, name)
Definition flags.h:14
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
Dart_NativeFunction function
Definition fuchsia.cc:51
const Register THR
const char *const name
static DRegister EvenDRegisterOf(QRegister q)
const DRegister DTMP
constexpr intptr_t kBitsPerByte
Definition globals.h:463
const Register CODE_REG
@ kInvalidCondition
const Register ARGS_DESC_REG
const Register DISPATCH_TABLE_REG
@ kNoRegister
constexpr RegList kDartAvailableCpuRegs
const Register TMP
const QRegister QTMP
const Register IC_DATA_REG
constexpr intptr_t kWordSize
Definition globals.h:509
QRegister FpuRegister
@ kNoQRegister
static int8_t data[kExtLength]
static SRegister EvenSRegisterOf(DRegister d)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
Definition switches.h:191
dst
Definition cp.py:12
Point offset
static constexpr Register kClassIdReg
static constexpr intptr_t kBoolValueMask
static constexpr Register kScratchReg
static constexpr Register kSubtypeTestCacheReg