Flutter Engine
The Flutter Engine
flow_graph_compiler_arm.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6#if defined(TARGET_ARCH_ARM)
7
9
15#include "vm/cpu.h"
16#include "vm/dart_entry.h"
18#include "vm/dispatch_table.h"
19#include "vm/instructions.h"
20#include "vm/object_store.h"
21#include "vm/parser.h"
22#include "vm/stack_frame.h"
23#include "vm/stub_code.h"
24#include "vm/symbols.h"
25
26namespace dart {
27
28DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
29DECLARE_FLAG(bool, enable_simd_inline);
30
32 if (FLAG_precompiled_mode) {
33 auto object_store = isolate_group()->object_store();
34
35 const auto& stub =
36 Code::ZoneHandle(object_store->write_barrier_wrappers_stub());
37 if (CanPcRelativeCall(stub)) {
38 assembler_->generate_invoke_write_barrier_wrapper_ =
39 [&](Condition condition, Register reg) {
40 const intptr_t offset_into_target =
42 assembler_->GenerateUnRelocatedPcRelativeCall(condition,
43 offset_into_target);
44 AddPcRelativeCallStubTarget(stub);
45 };
46 }
47
48 const auto& array_stub =
49 Code::ZoneHandle(object_store->array_write_barrier_stub());
50 if (CanPcRelativeCall(stub)) {
51 assembler_->generate_invoke_array_write_barrier_ =
52 [&](Condition condition) {
53 assembler_->GenerateUnRelocatedPcRelativeCall(condition);
54 AddPcRelativeCallStubTarget(array_stub);
55 };
56 }
57 }
58}
59
61 // BlockInfos are zone-allocated, so their destructors are not called.
62 // Verify the labels explicitly here.
63 for (int i = 0; i < block_info_.length(); ++i) {
64 ASSERT(!block_info_[i]->jump_label()->IsLinked());
65 }
66}
67
69 return TargetCPUFeatures::neon_supported() && FLAG_enable_simd_inline;
70}
71
73 // ARM does not have a short instruction sequence for converting int64 to
74 // double.
75 return false;
76}
77
80 intrinsic_mode_ = true;
81 ASSERT(!assembler()->constant_pool_allowed());
82}
83
86 intrinsic_mode_ = false;
87}
88
89TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
90 DeoptInfoBuilder* builder,
91 const Array& deopt_table) {
92 if (deopt_env_ == nullptr) {
93 ++builder->current_info_number_;
94 return TypedData::null();
95 }
96
97 AllocateOutgoingArguments(deopt_env_);
98
99 intptr_t slot_ix = 0;
100 Environment* current = deopt_env_;
101
102 // Emit all kMaterializeObject instructions describing objects to be
103 // materialized on the deoptimization as a prefix to the deoptimization info.
104 EmitMaterializations(deopt_env_, builder);
105
106 // The real frame starts here.
107 builder->MarkFrameStart();
108
109 Zone* zone = compiler->zone();
110
111 builder->AddPp(current->function(), slot_ix++);
112 builder->AddPcMarker(Function::ZoneHandle(zone), slot_ix++);
113 builder->AddCallerFp(slot_ix++);
114 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++);
115
116 // Emit all values that are needed for materialization as a part of the
117 // expression stack for the bottom-most frame. This guarantees that GC
118 // will be able to find them during materialization.
119 slot_ix = builder->EmitMaterializationArguments(slot_ix);
120
121 // For the innermost environment, set outgoing arguments and the locals.
122 for (intptr_t i = current->Length() - 1;
123 i >= current->fixed_parameter_count(); i--) {
124 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
125 }
126
127 Environment* previous = current;
128 current = current->outer();
129 while (current != nullptr) {
130 builder->AddPp(current->function(), slot_ix++);
131 builder->AddPcMarker(previous->function(), slot_ix++);
132 builder->AddCallerFp(slot_ix++);
133
134 // For any outer environment the deopt id is that of the call instruction
135 // which is recorded in the outer environment.
136 builder->AddReturnAddress(current->function(),
137 DeoptId::ToDeoptAfter(current->GetDeoptId()),
138 slot_ix++);
139
140 // The values of outgoing arguments can be changed from the inlined call so
141 // we must read them from the previous environment.
142 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
143 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
144 slot_ix++);
145 }
146
147 // Set the locals, note that outgoing arguments are not in the environment.
148 for (intptr_t i = current->Length() - 1;
149 i >= current->fixed_parameter_count(); i--) {
150 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
151 }
152
153 // Iterate on the outer environment.
154 previous = current;
155 current = current->outer();
156 }
157 // The previous pointer is now the outermost environment.
158 ASSERT(previous != nullptr);
159
160 // Set slots for the outermost environment.
161 builder->AddCallerPp(slot_ix++);
162 builder->AddPcMarker(previous->function(), slot_ix++);
163 builder->AddCallerFp(slot_ix++);
164 builder->AddCallerPc(slot_ix++);
165
166 // For the outermost environment, set the incoming arguments.
167 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
168 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
169 }
170
171 return builder->CreateDeoptInfo(deopt_table);
172}
173
175 intptr_t stub_ix) {
176 // Calls do not need stubs, they share a deoptimization trampoline.
177 ASSERT(reason() != ICData::kDeoptAtCall);
178 compiler::Assembler* assembler = compiler->assembler();
179#define __ assembler->
180 __ Comment("%s", Name());
182 if (FLAG_trap_on_deoptimization) {
183 __ bkpt(0);
184 }
185
186 ASSERT(deopt_env() != nullptr);
187 __ Call(compiler::Address(
189 set_pc_offset(assembler->CodeSize());
190#undef __
191}
192
193#define __ assembler->
194// Static methods of FlowGraphCompiler that take an assembler.
195
196void FlowGraphCompiler::GenerateIndirectTTSCall(compiler::Assembler* assembler,
197 Register reg_to_call,
198 intptr_t sub_type_cache_index) {
199 __ LoadField(
201 compiler::FieldAddress(
202 reg_to_call,
204 __ LoadWordFromPoolIndex(TypeTestABI::kSubtypeTestCacheReg,
205 sub_type_cache_index);
207}
208
209#undef __
210#define __ assembler()->
211// Instance methods of FlowGraphCompiler.
212
213// Fall through if bool_register contains null.
214void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
215 compiler::Label* is_true,
216 compiler::Label* is_false) {
217 compiler::Label fall_through;
218 __ CompareObject(bool_register, Object::null_object());
219 __ b(&fall_through, EQ);
220 BranchLabels labels = {is_true, is_false, &fall_through};
221 Condition true_condition =
222 EmitBoolTest(bool_register, labels, /*invert=*/false);
223 ASSERT(true_condition != kInvalidCondition);
224 __ b(is_true, true_condition);
225 __ b(is_false);
226 __ Bind(&fall_through);
227}
228
229void FlowGraphCompiler::EmitFrameEntry() {
230 const Function& function = parsed_function().function();
232 (!is_optimizing() || may_reoptimize())) {
233 __ Comment("Invocation Count Check");
234 const Register function_reg = R8;
235 __ ldr(function_reg, compiler::FieldAddress(
237 __ ldr(R3, compiler::FieldAddress(
238 function_reg,
240 // Reoptimization of an optimized function is triggered by counting in
241 // IC stubs, but not at the entry of the function.
242 if (!is_optimizing()) {
243 __ add(R3, R3, compiler::Operand(1));
244 __ str(R3, compiler::FieldAddress(
245 function_reg,
247 }
248 __ CompareImmediate(R3, GetOptimizationThreshold());
249 ASSERT(function_reg == R8);
250 __ Branch(compiler::Address(
252 GE);
253 }
254
255 if (flow_graph().graph_entry()->NeedsFrame()) {
256 __ Comment("Enter frame");
257 if (flow_graph().IsCompiledForOsr()) {
258 const intptr_t extra_slots = ExtraStackSlotsOnOsrEntry();
259 ASSERT(extra_slots >= 0);
260 __ EnterOsrFrame(extra_slots * compiler::target::kWordSize);
261 } else {
262 ASSERT(StackSize() >= 0);
263 __ EnterDartFrame(StackSize() * compiler::target::kWordSize);
264 }
265 } else if (FLAG_precompiled_mode) {
267 }
268}
269
270const InstructionSource& PrologueSource() {
271 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
272 /*inlining_id=*/0);
273 return prologue_source;
274}
275
277 BeginCodeSourceRange(PrologueSource());
278
279 EmitFrameEntry();
280 ASSERT(assembler()->constant_pool_allowed());
281
282 // In unoptimized code, initialize (non-argument) stack allocated slots.
283 if (!is_optimizing()) {
284 const int num_locals = parsed_function().num_stack_locals();
285
286 intptr_t args_desc_slot = -1;
287 if (parsed_function().has_arg_desc_var()) {
289 parsed_function().arg_desc_var());
290 }
291
292 __ Comment("Initialize spill slots");
293 if (num_locals > 1 || (num_locals == 1 && args_desc_slot == -1)) {
294 __ LoadObject(R0, Object::null_object());
295 }
296 for (intptr_t i = 0; i < num_locals; ++i) {
297 const intptr_t slot_index =
299 Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : R0;
300 __ StoreToOffset(value_reg, FP, slot_index * compiler::target::kWordSize);
301 }
302 } else if (parsed_function().suspend_state_var() != nullptr &&
303 !flow_graph().IsCompiledForOsr()) {
304 // Initialize synthetic :suspend_state variable early
305 // as it may be accessed by GC and exception handling before
306 // InitSuspendableFunction stub is called.
307 const intptr_t slot_index =
309 parsed_function().suspend_state_var());
310 __ LoadObject(R0, Object::null_object());
311 __ StoreToOffset(R0, FP, slot_index * compiler::target::kWordSize);
312 }
313
314 EndCodeSourceRange(PrologueSource());
315}
316
318 const Code& stub,
319 ObjectPool::SnapshotBehavior snapshot_behavior) {
320 ASSERT(!stub.IsNull());
321 if (CanPcRelativeCall(stub)) {
322 __ GenerateUnRelocatedPcRelativeCall();
323 AddPcRelativeCallStubTarget(stub);
324 } else {
326 CodeEntryKind::kNormal, snapshot_behavior);
327 AddStubCallTarget(stub);
328 }
329}
330
331void FlowGraphCompiler::EmitJumpToStub(const Code& stub) {
332 ASSERT(!stub.IsNull());
333 if (CanPcRelativeCall(stub)) {
334 __ GenerateUnRelocatedPcRelativeTailCall();
335 AddPcRelativeTailCallStubTarget(stub);
336 } else {
337 __ LoadObject(CODE_REG, stub);
338 __ ldr(PC, compiler::FieldAddress(
340 AddStubCallTarget(stub);
341 }
342}
343
344void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
345 ASSERT(!stub.IsNull());
346 if (CanPcRelativeCall(stub)) {
347 if (flow_graph().graph_entry()->NeedsFrame()) {
348 __ LeaveDartFrame();
349 }
350 __ GenerateUnRelocatedPcRelativeTailCall();
351 AddPcRelativeTailCallStubTarget(stub);
352#if defined(DEBUG)
353 __ Breakpoint();
354#endif
355 } else {
356 __ LoadObject(CODE_REG, stub);
357 if (flow_graph().graph_entry()->NeedsFrame()) {
358 __ LeaveDartFrame();
359 }
360 __ ldr(PC, compiler::FieldAddress(
362 AddStubCallTarget(stub);
363 }
364}
365
367 const InstructionSource& source,
368 const Code& stub,
370 LocationSummary* locs,
371 ObjectPool::SnapshotBehavior snapshot_behavior) {
372 __ BranchLinkPatchable(stub, CodeEntryKind::kNormal, snapshot_behavior);
374 pending_deoptimization_env_);
375}
376
377void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
378 const InstructionSource& source,
379 const Code& stub,
381 LocationSummary* locs,
382 Code::EntryKind entry_kind) {
383 ASSERT(CanCallDart());
384 __ BranchLinkPatchable(stub, entry_kind);
385 EmitCallsiteMetadata(source, deopt_id, kind, locs,
386 pending_deoptimization_env_);
387}
388
389void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
390 const InstructionSource& source,
392 LocationSummary* locs,
393 const Function& target,
394 Code::EntryKind entry_kind) {
395 ASSERT(CanCallDart());
396 if (CanPcRelativeCall(target)) {
397 __ GenerateUnRelocatedPcRelativeCall();
398 AddPcRelativeCallTarget(target, entry_kind);
399 EmitCallsiteMetadata(source, deopt_id, kind, locs,
400 pending_deoptimization_env_);
401 } else {
403 // Call sites to the same target can share object pool entries. These
404 // call sites are never patched for breakpoints: the function is deoptimized
405 // and the unoptimized code with IC calls for static calls is patched
406 // instead.
407 const auto& stub = StubCode::CallStaticFunction();
408 __ BranchLinkWithEquivalence(stub, target, entry_kind);
409 EmitCallsiteMetadata(source, deopt_id, kind, locs,
410 pending_deoptimization_env_);
411 AddStaticCallTarget(target, entry_kind);
412 }
413}
414
415void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
416 // We do not check for overflow when incrementing the edge counter. The
417 // function should normally be optimized long before the counter can
418 // overflow; and though we do not reset the counters when we optimize or
419 // deoptimize, there is a bound on the number of
420 // optimization/deoptimization cycles we will attempt.
421 ASSERT(!edge_counters_array_.IsNull());
422 ASSERT(assembler_->constant_pool_allowed());
423 __ Comment("Edge counter");
424 __ LoadObject(R0, edge_counters_array_);
425#if defined(DEBUG)
426 bool old_use_far_branches = assembler_->use_far_branches();
427 assembler_->set_use_far_branches(true);
428#endif // DEBUG
429 __ LoadFieldFromOffset(R1, R0,
431 __ add(R1, R1, compiler::Operand(Smi::RawValue(1)));
432 __ StoreIntoObjectOffsetNoBarrier(
434#if defined(DEBUG)
435 assembler_->set_use_far_branches(old_use_far_branches);
436#endif // DEBUG
437}
438
440 const Code& stub,
441 const ICData& ic_data,
442 intptr_t deopt_id,
443 const InstructionSource& source,
444 LocationSummary* locs,
445 Code::EntryKind entry_kind) {
446 ASSERT(CanCallDart());
447 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
448 // Each ICData propagated from unoptimized to optimized code contains the
449 // function that corresponds to the Dart function of that IC call. Due
450 // to inlining in optimized code, that function may not correspond to the
451 // top-level function (parsed_function().function()) which could be
452 // reoptimized and which counter needs to be incremented.
453 // Pass the function explicitly, it is used in IC stub.
454
455 __ LoadObject(R8, parsed_function().function());
456 __ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
457 __ LoadUniqueObject(IC_DATA_REG, ic_data);
458 GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
459 entry_kind);
460 EmitDropArguments(ic_data.SizeWithTypeArgs());
461}
462
463void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
464 const ICData& ic_data,
465 intptr_t deopt_id,
466 const InstructionSource& source,
467 LocationSummary* locs,
468 Code::EntryKind entry_kind) {
469 ASSERT(CanCallDart());
470 ASSERT(entry_kind == Code::EntryKind::kNormal ||
471 entry_kind == Code::EntryKind::kUnchecked);
472 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
473 __ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
474 __ LoadUniqueObject(IC_DATA_REG, ic_data);
475 __ LoadUniqueObject(CODE_REG, stub);
476 const intptr_t entry_point_offset =
477 entry_kind == Code::EntryKind::kNormal
478 ? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
479 : Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
480 __ Call(compiler::FieldAddress(CODE_REG, entry_point_offset));
481 EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kIcCall, locs,
482 pending_deoptimization_env_);
483 EmitDropArguments(ic_data.SizeWithTypeArgs());
484}
485
487 const String& name,
488 const Array& arguments_descriptor,
489 intptr_t deopt_id,
490 const InstructionSource& source,
491 LocationSummary* locs) {
492 ASSERT(CanCallDart());
493 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
494 ASSERT(!FLAG_precompiled_mode);
495 const ArgumentsDescriptor args_desc(arguments_descriptor);
496 const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
497 zone(),
498 MegamorphicCacheTable::Lookup(thread(), name, arguments_descriptor));
499
500 __ Comment("MegamorphicCall");
501 // Load receiver into R0.
502 __ LoadFromOffset(R0, SP,
503 (args_desc.Count() - 1) * compiler::target::kWordSize);
504 // Use same code pattern as instance call so it can be parsed by code patcher.
505 __ LoadUniqueObject(IC_DATA_REG, cache);
506 __ LoadUniqueObject(CODE_REG, StubCode::MegamorphicCall());
507 __ Call(compiler::FieldAddress(
508 CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
509
510 RecordSafepoint(locs);
511 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
512 const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
513 if (is_optimizing()) {
514 AddDeoptIndexAtCall(deopt_id_after, pending_deoptimization_env_);
515 } else {
516 // Add deoptimization continuation point after the call and before the
517 // arguments are removed.
518 AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after, source);
519 }
520 RecordCatchEntryMoves(pending_deoptimization_env_);
521 EmitDropArguments(args_desc.SizeWithTypeArgs());
522}
523
524void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
525 intptr_t deopt_id,
526 const InstructionSource& source,
527 LocationSummary* locs,
528 Code::EntryKind entry_kind,
529 bool receiver_can_be_smi) {
530 ASSERT(CanCallDart());
531 ASSERT(entry_kind == Code::EntryKind::kNormal ||
532 entry_kind == Code::EntryKind::kUnchecked);
533 ASSERT(ic_data.NumArgsTested() == 1);
534 const Code& initial_stub = StubCode::SwitchableCallMiss();
535 const char* switchable_call_mode = "smiable";
536 if (!receiver_can_be_smi) {
537 switchable_call_mode = "non-smi";
538 ic_data.set_receiver_cannot_be_smi(true);
539 }
540 const UnlinkedCall& data =
541 UnlinkedCall::ZoneHandle(zone(), ic_data.AsUnlinkedCall());
542
543 __ Comment("InstanceCallAOT (%s)", switchable_call_mode);
544 __ LoadFromOffset(
545 R0, SP,
546 (ic_data.SizeWithoutTypeArgs() - 1) * compiler::target::kWordSize);
547 // The AOT runtime will replace the slot in the object pool with the
548 // entrypoint address - see app_snapshot.cc.
549 const auto snapshot_behavior =
551 CLOBBERS_LR(__ LoadUniqueObject(LR, initial_stub, AL, snapshot_behavior));
552 __ LoadUniqueObject(R9, data);
553 CLOBBERS_LR(__ blx(LR));
554
555 EmitCallsiteMetadata(source, DeoptId::kNone, UntaggedPcDescriptors::kOther,
556 locs, pending_deoptimization_env_);
557 EmitDropArguments(ic_data.SizeWithTypeArgs());
558}
559
560void FlowGraphCompiler::EmitUnoptimizedStaticCall(
561 intptr_t size_with_type_args,
562 intptr_t deopt_id,
563 const InstructionSource& source,
564 LocationSummary* locs,
565 const ICData& ic_data,
566 Code::EntryKind entry_kind) {
567 ASSERT(CanCallDart());
568 const Code& stub =
569 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
570 __ LoadObject(R9, ic_data);
571 GenerateDartCall(deopt_id, source, stub,
572 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
573 EmitDropArguments(size_with_type_args);
574}
575
577 const Function& function,
578 const Array& arguments_descriptor,
579 intptr_t size_with_type_args,
580 intptr_t deopt_id,
581 const InstructionSource& source,
582 LocationSummary* locs,
583 Code::EntryKind entry_kind) {
584 ASSERT(CanCallDart());
587 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
588 } else {
589 if (!FLAG_precompiled_mode) {
590 __ LoadImmediate(ARGS_DESC_REG, 0); // GC safe smi zero because of stub.
591 }
592 }
593 // Do not use the code from the function, but let the code be patched so that
594 // we can record the outgoing edges to other code.
595 GenerateStaticDartCall(deopt_id, source, UntaggedPcDescriptors::kOther, locs,
596 function, entry_kind);
597 EmitDropArguments(size_with_type_args);
598}
599
601 int32_t selector_offset,
602 const Array& arguments_descriptor) {
603 const auto cid_reg = DispatchTableNullErrorABI::kClassIdReg;
604 ASSERT(CanCallDart());
605 ASSERT(cid_reg != ARGS_DESC_REG);
606 if (!arguments_descriptor.IsNull()) {
607 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
608 }
609 intptr_t offset = (selector_offset - DispatchTable::kOriginElement) *
611 CLOBBERS_LR({
612 // Would like cid_reg to be available on entry to the target function
613 // for checking purposes.
614 ASSERT(cid_reg != LR);
615 if (offset == 0) {
616 __ ldr(LR, compiler::Address(DISPATCH_TABLE_REG, cid_reg, LSL,
618 } else {
620 compiler::Operand(cid_reg, LSL, compiler::target::kWordSizeLog2));
621 if (!Utils::MagnitudeIsUint(12, offset)) {
622 const intptr_t adjust = offset & -(1 << 12);
623 __ AddImmediate(LR, LR, adjust);
624 offset -= adjust;
625 }
626 __ ldr(LR, compiler::Address(LR, offset));
627 }
628 __ blx(LR);
629 });
630}
631
633 Register reg,
634 const Object& obj,
635 bool needs_number_check,
636 const InstructionSource& source,
637 intptr_t deopt_id) {
638 if (needs_number_check) {
639 ASSERT(!obj.IsMint() && !obj.IsDouble());
640 __ Push(reg);
641 __ PushObject(obj);
642 if (is_optimizing()) {
643 // No breakpoints in optimized code.
644 __ BranchLink(StubCode::OptimizedIdenticalWithNumberCheck());
645 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, deopt_id, source);
646 } else {
647 // Patchable to support breakpoints.
648 __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
649 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id,
650 source);
651 }
652 // Stub returns result in flags (result of a cmp, we need Z computed).
653 __ Drop(1); // Discard constant.
654 __ Pop(reg); // Restore 'reg'.
655 } else {
656 __ CompareObject(reg, obj);
657 }
658 return EQ;
659}
660
662 Register left,
663 Register right,
664 bool needs_number_check,
665 const InstructionSource& source,
666 intptr_t deopt_id) {
667 if (needs_number_check) {
668 __ Push(left);
669 __ Push(right);
670 if (is_optimizing()) {
671 __ BranchLink(StubCode::OptimizedIdenticalWithNumberCheck());
672 } else {
673 __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
674 }
675 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
676 // Stub returns result in flags (result of a cmp, we need Z computed).
677 __ Pop(right);
678 __ Pop(left);
679 } else {
680 __ cmp(left, compiler::Operand(right));
681 }
682 return EQ;
683}
684
686 BranchLabels labels,
687 bool invert) {
688 __ Comment("BoolTest");
689 __ tst(value,
691 return invert ? NE : EQ;
692}
693
694// This function must be in sync with FlowGraphCompiler::RecordSafepoint and
695// FlowGraphCompiler::SlowPathEnvironmentFor.
696void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
697#if defined(DEBUG)
698 locs->CheckWritableInputs();
699 ClobberDeadTempRegisters(locs);
700#endif
701 // TODO(vegorov): consider saving only caller save (volatile) registers.
702 __ PushRegisters(*locs->live_registers());
703}
704
705void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
706 __ PopRegisters(*locs->live_registers());
707}
708
709#if defined(DEBUG)
710void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
711 // Clobber temporaries that have not been manually preserved.
712 for (intptr_t i = 0; i < locs->temp_count(); ++i) {
713 Location tmp = locs->temp(i);
714 // TODO(zerny): clobber non-live temporary FPU registers.
715 if (tmp.IsRegister() &&
716 !locs->live_registers()->ContainsRegister(tmp.reg())) {
717 __ mov(tmp.reg(), compiler::Operand(0xf7));
718 }
719 }
720}
721#endif
722
723Register FlowGraphCompiler::EmitTestCidRegister() {
724 return R2;
725}
726
727void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
728 intptr_t count_without_type_args,
729 const Array& arguments_descriptor) {
730 __ Comment("EmitTestAndCall");
731 // Load receiver into R0.
732 __ LoadFromOffset(
733 R0, SP, (count_without_type_args - 1) * compiler::target::kWordSize);
734 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
735}
736
737void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
738 bool if_smi) {
739 __ tst(R0, compiler::Operand(kSmiTagMask));
740 // Jump if receiver is not Smi.
741 __ b(label, if_smi ? EQ : NE);
742}
743
744void FlowGraphCompiler::EmitTestAndCallLoadCid(Register class_id_reg) {
745 ASSERT(class_id_reg != R0);
746 __ LoadClassId(class_id_reg, R0);
747}
748
751 TemporaryRegisterAllocator* allocator) {
752 if (destination.Equals(source)) return;
753
754 if (source.IsRegister()) {
755 if (destination.IsRegister()) {
756 __ mov(destination.reg(), compiler::Operand(source.reg()));
757 } else {
758 ASSERT(destination.IsStackSlot());
759 const intptr_t dest_offset = destination.ToStackSlotOffset();
760 __ StoreToOffset(source.reg(), destination.base_reg(), dest_offset);
761 }
762 } else if (source.IsStackSlot()) {
763 if (destination.IsRegister()) {
764 const intptr_t source_offset = source.ToStackSlotOffset();
765 __ LoadFromOffset(destination.reg(), source.base_reg(), source_offset);
766 } else {
767 ASSERT(destination.IsStackSlot());
768 const intptr_t source_offset = source.ToStackSlotOffset();
769 const intptr_t dest_offset = destination.ToStackSlotOffset();
770
771 CLOBBERS_LR({
772 // LR not used by register allocator.
773 COMPILE_ASSERT(((1 << LR) & kDartAvailableCpuRegs) == 0);
774 // StoreToOffset uses TMP in the case where dest_offset is too large or
775 // small in order to calculate a new base. We fall back to using LR as a
776 // temporary as we know we're in a ParallelMove.
777 const Register temp_reg = LR;
778
779 __ LoadFromOffset(temp_reg, source.base_reg(), source_offset);
780 __ StoreToOffset(temp_reg, destination.base_reg(), dest_offset);
781 });
782 }
783 } else if (source.IsFpuRegister()) {
784 if (destination.IsFpuRegister()) {
786 __ vmovq(destination.fpu_reg(), source.fpu_reg());
787 } else {
788 // If we're not inlining simd values, then only the even numbered D
789 // register will have anything in them.
790 __ vmovd(EvenDRegisterOf(destination.fpu_reg()),
791 EvenDRegisterOf(source.fpu_reg()));
792 }
793 } else if (destination.IsStackSlot()) {
794 // 32-bit float
795 const intptr_t dest_offset = destination.ToStackSlotOffset();
797 __ StoreSToOffset(src, destination.base_reg(), dest_offset);
798 } else if (destination.IsDoubleStackSlot()) {
799 const intptr_t dest_offset = destination.ToStackSlotOffset();
800 DRegister src = EvenDRegisterOf(source.fpu_reg());
801 __ StoreDToOffset(src, destination.base_reg(), dest_offset);
802 } else {
803 ASSERT(destination.IsQuadStackSlot());
804 const intptr_t dest_offset = destination.ToStackSlotOffset();
805 const DRegister dsrc0 = EvenDRegisterOf(source.fpu_reg());
806 __ StoreMultipleDToOffset(dsrc0, 2, destination.base_reg(), dest_offset);
807 }
808 } else if (source.IsDoubleStackSlot()) {
809 if (destination.IsFpuRegister()) {
810 const intptr_t source_offset = source.ToStackSlotOffset();
811 const DRegister dst = EvenDRegisterOf(destination.fpu_reg());
812 __ LoadDFromOffset(dst, source.base_reg(), source_offset);
813 } else if (destination.IsStackSlot()) {
814 // 32-bit float
815 const intptr_t source_offset = source.ToStackSlotOffset();
816 const intptr_t dest_offset = destination.ToStackSlotOffset();
817 __ LoadSFromOffset(STMP, source.base_reg(), source_offset);
818 __ StoreSToOffset(STMP, destination.base_reg(), dest_offset);
819 } else {
820 ASSERT(destination.IsDoubleStackSlot());
821 const intptr_t source_offset = source.ToStackSlotOffset();
822 const intptr_t dest_offset = destination.ToStackSlotOffset();
823 __ LoadDFromOffset(DTMP, source.base_reg(), source_offset);
824 __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset);
825 }
826 } else if (source.IsQuadStackSlot()) {
827 if (destination.IsFpuRegister()) {
828 const intptr_t source_offset = source.ToStackSlotOffset();
829 const DRegister dst0 = EvenDRegisterOf(destination.fpu_reg());
830 __ LoadMultipleDFromOffset(dst0, 2, source.base_reg(), source_offset);
831 } else {
832 ASSERT(destination.IsQuadStackSlot());
833 const intptr_t source_offset = source.ToStackSlotOffset();
834 const intptr_t dest_offset = destination.ToStackSlotOffset();
835 const DRegister dtmp0 = DTMP;
836 __ LoadMultipleDFromOffset(dtmp0, 2, source.base_reg(), source_offset);
837 __ StoreMultipleDToOffset(dtmp0, 2, destination.base_reg(), dest_offset);
838 }
839 } else if (source.IsPairLocation()) {
840 ASSERT(destination.IsPairLocation());
841 for (intptr_t i : {0, 1}) {
842 EmitMove(destination.Component(i), source.Component(i), allocator);
843 }
844 } else {
845 ASSERT(source.IsConstant());
846 if (destination.IsFpuRegister() || destination.IsDoubleStackSlot() ||
847 destination.IsStackSlot()) {
848 Register tmp = allocator->AllocateTemporary();
849 source.constant_instruction()->EmitMoveToLocation(this, destination, tmp,
850 source.pair_index());
851 allocator->ReleaseTemporary();
852 } else {
853 source.constant_instruction()->EmitMoveToLocation(
854 this, destination, kNoRegister, source.pair_index());
855 }
856 }
857}
858
859static compiler::OperandSize BytesToOperandSize(intptr_t bytes) {
860 switch (bytes) {
861 case 4:
863 case 2:
865 case 1:
867 default:
869 }
870}
871
872void FlowGraphCompiler::EmitNativeMoveArchitecture(
873 const compiler::ffi::NativeLocation& destination,
874 const compiler::ffi::NativeLocation& source) {
875 const auto& src_payload_type = source.payload_type();
876 const auto& dst_payload_type = destination.payload_type();
877 const auto& src_container_type = source.container_type();
878 const auto& dst_container_type = destination.container_type();
879 ASSERT(src_container_type.IsFloat() == dst_container_type.IsFloat());
880 ASSERT(src_container_type.IsInt() == dst_container_type.IsInt());
881 ASSERT(src_payload_type.IsSigned() == dst_payload_type.IsSigned());
882 ASSERT(src_payload_type.IsPrimitive());
883 ASSERT(dst_payload_type.IsPrimitive());
884 const intptr_t src_size = src_payload_type.SizeInBytes();
885 const intptr_t dst_size = dst_payload_type.SizeInBytes();
886 const bool sign_or_zero_extend = dst_size > src_size;
887
888 if (source.IsRegisters()) {
889 const auto& src = source.AsRegisters();
890 ASSERT(src.num_regs() == 1);
891 ASSERT(src_size <= 4);
892 const auto src_reg = src.reg_at(0);
893
894 if (destination.IsRegisters()) {
895 const auto& dst = destination.AsRegisters();
896 ASSERT(dst.num_regs() == 1);
897 const auto dst_reg = dst.reg_at(0);
898 ASSERT(destination.container_type().SizeInBytes() <= 4);
899 if (!sign_or_zero_extend) {
900 __ MoveRegister(dst_reg, src_reg);
901 } else {
902 if (src_payload_type.IsSigned()) {
903 __ sbfx(dst_reg, src_reg, 0, src_size * kBitsPerByte);
904 } else {
905 __ ubfx(dst_reg, src_reg, 0, src_size * kBitsPerByte);
906 }
907 }
908
909 } else if (destination.IsFpuRegisters()) {
910 // Fpu Registers should only contain doubles and registers only ints.
911 // The bit casts are done with a BitCastInstr.
912 // TODO(dartbug.com/40371): Remove BitCastInstr and implement here.
914
915 } else {
916 ASSERT(destination.IsStack());
917 const auto& dst = destination.AsStack();
918 ASSERT(!sign_or_zero_extend);
919 auto const op_size =
920 BytesToOperandSize(destination.container_type().SizeInBytes());
921 __ StoreToOffset(src.reg_at(0), dst.base_register(),
922 dst.offset_in_bytes(), op_size);
923 }
924
925 } else if (source.IsFpuRegisters()) {
926 const auto& src = source.AsFpuRegisters();
927 // We have not implemented conversions here, use IL convert instructions.
928 ASSERT(src_payload_type.Equals(dst_payload_type));
929
930 if (destination.IsRegisters()) {
931 // Fpu Registers should only contain doubles and registers only ints.
932 // The bit casts are done with a BitCastInstr.
933 // TODO(dartbug.com/40371): Remove BitCastInstr and implement here.
935
936 } else if (destination.IsFpuRegisters()) {
937 const auto& dst = destination.AsFpuRegisters();
938 switch (dst_size) {
939 case 16:
940 __ vmovq(dst.fpu_reg(), src.fpu_reg());
941 return;
942 case 8:
943 __ vmovd(dst.fpu_as_d_reg(), src.fpu_as_d_reg());
944 return;
945 case 4:
946 __ vmovs(dst.fpu_as_s_reg(), src.fpu_as_s_reg());
947 return;
948 default:
949 UNREACHABLE();
950 }
951
952 } else {
953 ASSERT(destination.IsStack());
954 ASSERT(src_payload_type.IsFloat());
955 const auto& dst = destination.AsStack();
956 switch (dst_size) {
957 case 8:
958 __ StoreDToOffset(src.fpu_as_d_reg(), dst.base_register(),
959 dst.offset_in_bytes());
960 return;
961 case 4:
962 __ StoreSToOffset(src.fpu_as_s_reg(), dst.base_register(),
963 dst.offset_in_bytes());
964 return;
965 default:
966 // TODO(dartbug.com/37470): Case 16 for simd packed data.
967 UNREACHABLE();
968 }
969 }
970
971 } else {
972 ASSERT(source.IsStack());
973 const auto& src = source.AsStack();
974 if (destination.IsRegisters()) {
975 const auto& dst = destination.AsRegisters();
976 ASSERT(dst.num_regs() == 1);
977 const auto dst_reg = dst.reg_at(0);
978 EmitNativeLoad(dst_reg, src.base_register(), src.offset_in_bytes(),
979 src_payload_type.AsPrimitive().representation());
980 } else if (destination.IsFpuRegisters()) {
981 ASSERT(src_payload_type.Equals(dst_payload_type));
982 ASSERT(src_payload_type.IsFloat());
983 const auto& dst = destination.AsFpuRegisters();
984 switch (src_size) {
985 case 8:
986 __ LoadDFromOffset(dst.fpu_as_d_reg(), src.base_register(),
987 src.offset_in_bytes());
988 return;
989 case 4:
990 __ LoadSFromOffset(dst.fpu_as_s_reg(), src.base_register(),
991 src.offset_in_bytes());
992 return;
993 default:
995 }
996
997 } else {
998 ASSERT(destination.IsStack());
999 UNREACHABLE();
1000 }
1001 }
1002}
1003
1004void FlowGraphCompiler::EmitNativeLoad(Register dst,
1005 Register base,
1006 intptr_t offset,
1008 switch (type) {
1010 __ LoadFromOffset(dst, base, offset, compiler::kByte);
1011 break;
1013 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedByte);
1014 break;
1016 __ LoadFromOffset(dst, base, offset, compiler::kTwoBytes);
1017 break;
1019 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1020 break;
1022 __ LoadFromOffset(dst, base, offset, compiler::kFourBytes);
1023 break;
1027 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1028 break;
1029
1031 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1032 __ LoadFromOffset(TMP, base, offset + 2, compiler::kByte);
1033 __ orr(dst, dst, compiler::Operand(TMP, LSL, 16));
1034 break;
1036 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1037 __ LoadFromOffset(TMP, base, offset + 2, compiler::kUnsignedByte);
1038 __ orr(dst, dst, compiler::Operand(TMP, LSL, 16));
1039 break;
1040 default:
1041 UNREACHABLE();
1042 }
1043}
1044
1046 Register dst,
1047 Register tmp) {
1048 compiler::Label skip_reloc;
1049 __ b(&skip_reloc);
1050 InsertBSSRelocation(relocation);
1051 __ Bind(&skip_reloc);
1052
1053 // For historical reasons, the PC on ARM points 8 bytes (two instructions)
1054 // past the current instruction.
1055 __ sub(tmp, PC,
1057
1058 // tmp holds the address of the relocation.
1059 __ ldr(dst, compiler::Address(tmp));
1060
1061 // dst holds the relocation itself: tmp - bss_start.
1062 // tmp = tmp + (bss_start - tmp) = bss_start
1063 __ add(tmp, tmp, compiler::Operand(dst));
1064
1065 // tmp holds the start of the BSS section.
1066 // Load the "get-thread" routine: *bss_start.
1067 __ ldr(dst, compiler::Address(tmp));
1068}
1069
1070#undef __
1071#define __ compiler_->assembler()->
1072
1073void ParallelMoveEmitter::EmitSwap(const MoveOperands& move) {
1074 const Location source = move.src();
1075 const Location destination = move.dest();
1076
1077 if (source.IsRegister() && destination.IsRegister()) {
1078 ASSERT(source.reg() != IP);
1079 ASSERT(destination.reg() != IP);
1080 __ mov(IP, compiler::Operand(source.reg()));
1081 __ mov(source.reg(), compiler::Operand(destination.reg()));
1082 __ mov(destination.reg(), compiler::Operand(IP));
1083 } else if (source.IsRegister() && destination.IsStackSlot()) {
1084 Exchange(source.reg(), destination.base_reg(),
1085 destination.ToStackSlotOffset());
1086 } else if (source.IsStackSlot() && destination.IsRegister()) {
1087 Exchange(destination.reg(), source.base_reg(), source.ToStackSlotOffset());
1088 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1089 Exchange(source.base_reg(), source.ToStackSlotOffset(),
1090 destination.base_reg(), destination.ToStackSlotOffset());
1091 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1093 const QRegister dst = destination.fpu_reg();
1094 const QRegister src = source.fpu_reg();
1095 ASSERT(dst != QTMP && src != QTMP);
1096 __ vmovq(QTMP, src);
1097 __ vmovq(src, dst);
1098 __ vmovq(dst, QTMP);
1099 } else {
1100 const DRegister dst = EvenDRegisterOf(destination.fpu_reg());
1101 const DRegister src = EvenDRegisterOf(source.fpu_reg());
1102 ASSERT(dst != DTMP && src != DTMP);
1103 __ vmovd(DTMP, src);
1104 __ vmovd(src, dst);
1105 __ vmovd(dst, DTMP);
1106 }
1107 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1108 ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() ||
1109 source.IsDoubleStackSlot() || source.IsQuadStackSlot());
1110 bool double_width =
1111 destination.IsDoubleStackSlot() || source.IsDoubleStackSlot();
1112 QRegister qreg =
1113 source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg();
1114 DRegister reg = EvenDRegisterOf(qreg);
1115 Register base_reg =
1116 source.IsFpuRegister() ? destination.base_reg() : source.base_reg();
1117 const intptr_t slot_offset = source.IsFpuRegister()
1118 ? destination.ToStackSlotOffset()
1119 : source.ToStackSlotOffset();
1120
1121 if (double_width) {
1122 __ LoadDFromOffset(DTMP, base_reg, slot_offset);
1123 __ StoreDToOffset(reg, base_reg, slot_offset);
1124 __ vmovd(reg, DTMP);
1125 } else {
1126 __ LoadMultipleDFromOffset(DTMP, 2, base_reg, slot_offset);
1127 __ StoreMultipleDToOffset(reg, 2, base_reg, slot_offset);
1128 __ vmovq(qreg, QTMP);
1129 }
1130 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1131 const intptr_t source_offset = source.ToStackSlotOffset();
1132 const intptr_t dest_offset = destination.ToStackSlotOffset();
1133
1134 ScratchFpuRegisterScope ensure_scratch(this, kNoQRegister);
1135 DRegister scratch = EvenDRegisterOf(ensure_scratch.reg());
1136 __ LoadDFromOffset(DTMP, source.base_reg(), source_offset);
1137 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset);
1138 __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset);
1139 __ StoreDToOffset(scratch, destination.base_reg(), source_offset);
1140 } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1141 const intptr_t source_offset = source.ToStackSlotOffset();
1142 const intptr_t dest_offset = destination.ToStackSlotOffset();
1143
1144 ScratchFpuRegisterScope ensure_scratch(this, kNoQRegister);
1145 DRegister scratch = EvenDRegisterOf(ensure_scratch.reg());
1146 __ LoadMultipleDFromOffset(DTMP, 2, source.base_reg(), source_offset);
1147 __ LoadMultipleDFromOffset(scratch, 2, destination.base_reg(), dest_offset);
1148 __ StoreMultipleDToOffset(DTMP, 2, destination.base_reg(), dest_offset);
1149 __ StoreMultipleDToOffset(scratch, 2, destination.base_reg(),
1150 source_offset);
1151 } else {
1152 UNREACHABLE();
1153 }
1154}
1155
1156void ParallelMoveEmitter::MoveMemoryToMemory(const compiler::Address& dst,
1157 const compiler::Address& src) {
1158 UNREACHABLE();
1159}
1160
1161// Do not call or implement this function. Instead, use the form below that
1162// uses an offset from the frame pointer instead of an Address.
1163void ParallelMoveEmitter::Exchange(Register reg, const compiler::Address& mem) {
1164 UNREACHABLE();
1165}
1166
1167// Do not call or implement this function. Instead, use the form below that
1168// uses offsets from the frame pointer instead of Addresses.
1169void ParallelMoveEmitter::Exchange(const compiler::Address& mem1,
1170 const compiler::Address& mem2) {
1171 UNREACHABLE();
1172}
1173
1174void ParallelMoveEmitter::Exchange(Register reg,
1175 Register base_reg,
1176 intptr_t stack_offset) {
1177 ScratchRegisterScope tmp(this, reg);
1178 __ mov(tmp.reg(), compiler::Operand(reg));
1179 __ LoadFromOffset(reg, base_reg, stack_offset);
1180 __ StoreToOffset(tmp.reg(), base_reg, stack_offset);
1181}
1182
1183void ParallelMoveEmitter::Exchange(Register base_reg1,
1184 intptr_t stack_offset1,
1185 Register base_reg2,
1186 intptr_t stack_offset2) {
1187 ScratchRegisterScope tmp1(this, kNoRegister);
1188 ScratchRegisterScope tmp2(this, tmp1.reg());
1189 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1);
1190 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2);
1191 __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2);
1192 __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1);
1193}
1194
1195void ParallelMoveEmitter::SpillScratch(Register reg) {
1196 __ Push(reg);
1197}
1198
1199void ParallelMoveEmitter::RestoreScratch(Register reg) {
1200 __ Pop(reg);
1201}
1202
1203void ParallelMoveEmitter::SpillFpuScratch(FpuRegister reg) {
1204 __ PushQuad(reg);
1205}
1206
1207void ParallelMoveEmitter::RestoreFpuScratch(FpuRegister reg) {
1208 __ PopQuad(reg);
1209}
1210
1211#undef __
1212
1213} // namespace dart
1214
1215#endif // defined(TARGET_ARCH_ARM)
#define __
#define UNREACHABLE()
Definition: assert.h:248
GLenum type
intptr_t length() const
CodeEntryKind EntryKind
Definition: object.h:6788
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
Definition: object.h:6793
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
Definition: deopt_id.h:27
static intptr_t ToDeoptAfter(intptr_t deopt_id)
Definition: deopt_id.h:31
static constexpr intptr_t kOriginElement
IsolateGroup * isolate_group() const
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitTailCallToStub(const Code &stub)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void InsertBSSRelocation(BSS::Relocation reloc)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void GeneratePatchableCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
compiler::Assembler * assembler() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
Definition: object.cc:11437
bool IsClosureFunction() const
Definition: object.h:3891
bool IsOptimizable() const
Definition: object.cc:8930
ObjectStore * object_store() const
Definition: isolate.h:510
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
Definition: object.h:5554
static ObjectPtr null()
Definition: object.h:433
bool IsNull() const
Definition: object.h:363
static Object & Handle()
Definition: object.h:407
static Object & ZoneHandle()
Definition: object.h:419
const Function & function() const
Definition: parser.h:73
int num_stack_locals() const
Definition: parser.h:194
static intptr_t RawValue(intptr_t value)
Definition: object.h:10022
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
Definition: stub_code.cc:316
static bool neon_supported()
Definition: cpu_arm.h:76
static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg)
Definition: thread.h:765
static bool MagnitudeIsUint(intptr_t N, T value)
Definition: utils.h:352
void set_constant_pool_allowed(bool b)
void GenerateUnRelocatedPcRelativeCall(Condition cond=AL, intptr_t offset_into_target=0)
bool constant_pool_allowed() const
static word element_offset(intptr_t index)
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word deoptimize_entry_offset()
#define UNIMPLEMENTED
#define ASSERT(E)
SkBitmap source
Definition: examples.cpp:28
static bool b
gboolean invert
uint8_t value
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
FrameLayout frame_layout
Definition: stack_frame.cc:76
Definition: dart_vm.cc:33
const Register THR
const char *const name
static DRegister EvenDRegisterOf(QRegister q)
const DRegister DTMP
constexpr intptr_t kBitsPerByte
Definition: globals.h:463
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
const Register CODE_REG
@ kInvalidCondition
const Register ARGS_DESC_REG
const Register DISPATCH_TABLE_REG
@ kNoRegister
Definition: constants_arm.h:99
constexpr RegList kDartAvailableCpuRegs
const Register TMP
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const QRegister QTMP
const Register IC_DATA_REG
constexpr intptr_t kWordSize
Definition: globals.h:509
QRegister FpuRegister
@ kNoQRegister
static int8_t data[kExtLength]
@ kSmiTagMask
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
static SRegister EvenSRegisterOf(DRegister d)
DECLARE_FLAG(bool, show_invisible_frames)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
Definition: switches.h:191
dst
Definition: cp.py:12
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
SeparatedVector2 offset
static constexpr Register kClassIdReg
intptr_t FrameSlotForVariable(const LocalVariable *variable) const
Definition: stack_frame.cc:83
intptr_t FrameSlotForVariableIndex(intptr_t index) const
Definition: stack_frame.cc:89
static constexpr intptr_t kBoolValueMask
static constexpr Register kScratchReg
static constexpr Register kSubtypeTestCacheReg