Flutter Engine
The Flutter Engine
flow_graph_compiler_arm64.cc
Go to the documentation of this file.
1// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64.
6#if defined(TARGET_ARCH_ARM64)
7
9
15#include "vm/cpu.h"
16#include "vm/dart_entry.h"
18#include "vm/dispatch_table.h"
19#include "vm/instructions.h"
20#include "vm/object_store.h"
21#include "vm/parser.h"
22#include "vm/stack_frame.h"
23#include "vm/stub_code.h"
24#include "vm/symbols.h"
25
26namespace dart {
27
28DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
29DECLARE_FLAG(bool, enable_simd_inline);
30
32 if (FLAG_precompiled_mode) {
33 auto object_store = isolate_group()->object_store();
34
35 const auto& stub =
36 Code::ZoneHandle(object_store->write_barrier_wrappers_stub());
37 if (CanPcRelativeCall(stub)) {
38 assembler_->generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
39 const intptr_t offset_into_target =
41 assembler_->GenerateUnRelocatedPcRelativeCall(offset_into_target);
42 AddPcRelativeCallStubTarget(stub);
43 };
44 }
45
46 const auto& array_stub =
47 Code::ZoneHandle(object_store->array_write_barrier_stub());
48 if (CanPcRelativeCall(stub)) {
49 assembler_->generate_invoke_array_write_barrier_ = [&]() {
51 AddPcRelativeCallStubTarget(array_stub);
52 };
53 }
54 }
55}
56
58 // BlockInfos are zone-allocated, so their destructors are not called.
59 // Verify the labels explicitly here.
60 for (int i = 0; i < block_info_.length(); ++i) {
61 ASSERT(!block_info_[i]->jump_label()->IsLinked());
62 }
63}
64
66 return FLAG_enable_simd_inline;
67}
68
70 return true;
71}
72
75 intrinsic_mode_ = true;
76 ASSERT(!assembler()->constant_pool_allowed());
77}
78
81 intrinsic_mode_ = false;
82}
83
84TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
85 DeoptInfoBuilder* builder,
86 const Array& deopt_table) {
87 if (deopt_env_ == nullptr) {
88 ++builder->current_info_number_;
89 return TypedData::null();
90 }
91
92 AllocateOutgoingArguments(deopt_env_);
93
94 intptr_t slot_ix = 0;
95 Environment* current = deopt_env_;
96
97 // Emit all kMaterializeObject instructions describing objects to be
98 // materialized on the deoptimization as a prefix to the deoptimization info.
99 EmitMaterializations(deopt_env_, builder);
100
101 // The real frame starts here.
102 builder->MarkFrameStart();
103
104 Zone* zone = compiler->zone();
105
106 builder->AddPp(current->function(), slot_ix++);
107 builder->AddPcMarker(Function::ZoneHandle(zone), slot_ix++);
108 builder->AddCallerFp(slot_ix++);
109 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++);
110
111 // Emit all values that are needed for materialization as a part of the
112 // expression stack for the bottom-most frame. This guarantees that GC
113 // will be able to find them during materialization.
114 slot_ix = builder->EmitMaterializationArguments(slot_ix);
115
116 // For the innermost environment, set outgoing arguments and the locals.
117 for (intptr_t i = current->Length() - 1;
118 i >= current->fixed_parameter_count(); i--) {
119 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
120 }
121
122 Environment* previous = current;
123 current = current->outer();
124 while (current != nullptr) {
125 builder->AddPp(current->function(), slot_ix++);
126 builder->AddPcMarker(previous->function(), slot_ix++);
127 builder->AddCallerFp(slot_ix++);
128
129 // For any outer environment the deopt id is that of the call instruction
130 // which is recorded in the outer environment.
131 builder->AddReturnAddress(current->function(),
132 DeoptId::ToDeoptAfter(current->GetDeoptId()),
133 slot_ix++);
134
135 // The values of outgoing arguments can be changed from the inlined call so
136 // we must read them from the previous environment.
137 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
138 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
139 slot_ix++);
140 }
141
142 // Set the locals, note that outgoing arguments are not in the environment.
143 for (intptr_t i = current->Length() - 1;
144 i >= current->fixed_parameter_count(); i--) {
145 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
146 }
147
148 // Iterate on the outer environment.
149 previous = current;
150 current = current->outer();
151 }
152 // The previous pointer is now the outermost environment.
153 ASSERT(previous != nullptr);
154
155 // Add slots for the outermost environment.
156 builder->AddCallerPp(slot_ix++);
157 builder->AddPcMarker(previous->function(), slot_ix++);
158 builder->AddCallerFp(slot_ix++);
159 builder->AddCallerPc(slot_ix++);
160
161 // For the outermost environment, set the incoming arguments.
162 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
163 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
164 }
165
166 return builder->CreateDeoptInfo(deopt_table);
167}
168
170 intptr_t stub_ix) {
171 // Calls do not need stubs, they share a deoptimization trampoline.
172 ASSERT(reason() != ICData::kDeoptAtCall);
173 compiler::Assembler* assembler = compiler->assembler();
174#define __ assembler->
175 __ Comment("%s", Name());
177 if (FLAG_trap_on_deoptimization) {
178 __ brk(0);
179 }
180
181 ASSERT(deopt_env() != nullptr);
182 __ Call(compiler::Address(THR, Thread::deoptimize_entry_offset()));
183 set_pc_offset(assembler->CodeSize());
184#undef __
185}
186
187#define __ assembler->
188// Static methods of FlowGraphCompiler that take an assembler.
189
190void FlowGraphCompiler::GenerateIndirectTTSCall(compiler::Assembler* assembler,
191 Register reg_to_call,
192 intptr_t sub_type_cache_index) {
193 __ LoadField(
195 compiler::FieldAddress(
196 reg_to_call,
198 __ LoadWordFromPoolIndex(TypeTestABI::kSubtypeTestCacheReg,
199 sub_type_cache_index);
201}
202
203#undef __
204#define __ assembler()->
205// Instance methods of FlowGraphCompiler.
206
207// Fall through if bool_register contains null.
208void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
209 compiler::Label* is_true,
210 compiler::Label* is_false) {
211 compiler::Label fall_through;
212 __ CompareObject(bool_register, Object::null_object());
213 __ b(&fall_through, EQ);
214 BranchLabels labels = {is_true, is_false, &fall_through};
215 Condition true_condition =
216 EmitBoolTest(bool_register, labels, /*invert=*/false);
217 ASSERT(true_condition == kInvalidCondition);
218 __ Bind(&fall_through);
219}
220
221void FlowGraphCompiler::EmitFrameEntry() {
222 const Function& function = parsed_function().function();
224 (!is_optimizing() || may_reoptimize())) {
225 __ Comment("Invocation Count Check");
226 const Register function_reg = R6;
227 __ ldr(function_reg,
228 compiler::FieldAddress(CODE_REG, Code::owner_offset()));
229
230 __ LoadFieldFromOffset(R7, function_reg, Function::usage_counter_offset(),
232 // Reoptimization of an optimized function is triggered by counting in
233 // IC stubs, but not at the entry of the function.
234 if (!is_optimizing()) {
235 __ add(R7, R7, compiler::Operand(1));
236 __ StoreFieldToOffset(R7, function_reg, Function::usage_counter_offset(),
238 }
239 __ CompareImmediate(R7, GetOptimizationThreshold());
240 ASSERT(function_reg == R6);
241 compiler::Label dont_optimize;
242 __ b(&dont_optimize, LT);
243 __ ldr(TMP, compiler::Address(THR, Thread::optimize_entry_offset()));
244 __ br(TMP);
245 __ Bind(&dont_optimize);
246 }
247
248 if (flow_graph().graph_entry()->NeedsFrame()) {
249 __ Comment("Enter frame");
250 if (flow_graph().IsCompiledForOsr()) {
251 const intptr_t extra_slots = ExtraStackSlotsOnOsrEntry();
252 ASSERT(extra_slots >= 0);
253 __ EnterOsrFrame(extra_slots * kWordSize);
254 } else {
255 ASSERT(StackSize() >= 0);
256 __ EnterDartFrame(StackSize() * kWordSize);
257 }
258 } else if (FLAG_precompiled_mode) {
260 }
261}
262
263const InstructionSource& PrologueSource() {
264 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
265 /*inlining_id=*/0);
266 return prologue_source;
267}
268
270 BeginCodeSourceRange(PrologueSource());
271
272 EmitFrameEntry();
273 ASSERT(assembler()->constant_pool_allowed());
274
275 // In unoptimized code, initialize (non-argument) stack allocated slots.
276 if (!is_optimizing()) {
277 const int num_locals = parsed_function().num_stack_locals();
278
279 intptr_t args_desc_slot = -1;
280 if (parsed_function().has_arg_desc_var()) {
282 parsed_function().arg_desc_var());
283 }
284
285 __ Comment("Initialize spill slots");
286 for (intptr_t i = 0; i < num_locals; ++i) {
287 const intptr_t slot_index =
289 Register value_reg =
290 slot_index == args_desc_slot ? ARGS_DESC_REG : NULL_REG;
291 __ StoreToOffset(value_reg, FP, slot_index * kWordSize);
292 }
293 } else if (parsed_function().suspend_state_var() != nullptr &&
294 !flow_graph().IsCompiledForOsr()) {
295 // Initialize synthetic :suspend_state variable early
296 // as it may be accessed by GC and exception handling before
297 // InitSuspendableFunction stub is called.
298 const intptr_t slot_index =
300 parsed_function().suspend_state_var());
301 __ StoreToOffset(NULL_REG, FP, slot_index * kWordSize);
302 }
303
304 EndCodeSourceRange(PrologueSource());
305}
306
308 const Code& stub,
309 ObjectPool::SnapshotBehavior snapshot_behavior) {
310 ASSERT(!stub.IsNull());
311 if (CanPcRelativeCall(stub)) {
312 __ GenerateUnRelocatedPcRelativeCall();
313 AddPcRelativeCallStubTarget(stub);
314 } else {
316 CodeEntryKind::kNormal, snapshot_behavior);
317 AddStubCallTarget(stub);
318 }
319}
320
321void FlowGraphCompiler::EmitJumpToStub(const Code& stub) {
322 ASSERT(!stub.IsNull());
323 if (CanPcRelativeCall(stub)) {
324 __ GenerateUnRelocatedPcRelativeTailCall();
325 AddPcRelativeTailCallStubTarget(stub);
326 } else {
327 __ LoadObject(CODE_REG, stub);
328 __ ldr(TMP, compiler::FieldAddress(
330 __ br(TMP);
331 AddStubCallTarget(stub);
332 }
333}
334
335void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
336 ASSERT(!stub.IsNull());
337 if (CanPcRelativeCall(stub)) {
338 if (flow_graph().graph_entry()->NeedsFrame()) {
339 __ LeaveDartFrame();
340 }
341 __ GenerateUnRelocatedPcRelativeTailCall();
342 AddPcRelativeTailCallStubTarget(stub);
343#if defined(DEBUG)
344 __ Breakpoint();
345#endif
346 } else {
347 __ LoadObject(CODE_REG, stub);
348 if (flow_graph().graph_entry()->NeedsFrame()) {
349 __ LeaveDartFrame();
350 }
351 __ ldr(TMP, compiler::FieldAddress(
353 __ br(TMP);
354 AddStubCallTarget(stub);
355 }
356}
357
359 const InstructionSource& source,
360 const Code& stub,
362 LocationSummary* locs,
363 ObjectPool::SnapshotBehavior snapshot_behavior) {
364 __ BranchLinkPatchable(stub, CodeEntryKind::kNormal, snapshot_behavior);
366 pending_deoptimization_env_);
367}
368
369void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
370 const InstructionSource& source,
371 const Code& stub,
373 LocationSummary* locs,
374 Code::EntryKind entry_kind) {
375 ASSERT(CanCallDart());
376 __ BranchLinkPatchable(stub, entry_kind);
377 EmitCallsiteMetadata(source, deopt_id, kind, locs,
378 pending_deoptimization_env_);
379}
380
381void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
382 const InstructionSource& source,
384 LocationSummary* locs,
385 const Function& target,
386 Code::EntryKind entry_kind) {
387 ASSERT(CanCallDart());
388 if (CanPcRelativeCall(target)) {
389 __ GenerateUnRelocatedPcRelativeCall();
390 AddPcRelativeCallTarget(target, entry_kind);
391 EmitCallsiteMetadata(source, deopt_id, kind, locs,
392 pending_deoptimization_env_);
393 } else {
394 // Call sites to the same target can share object pool entries. These
395 // call sites are never patched for breakpoints: the function is deoptimized
396 // and the unoptimized code with IC calls for static calls is patched
397 // instead.
399 const auto& stub = StubCode::CallStaticFunction();
400 __ BranchLinkWithEquivalence(stub, target, entry_kind);
401 EmitCallsiteMetadata(source, deopt_id, kind, locs,
402 pending_deoptimization_env_);
403 AddStaticCallTarget(target, entry_kind);
404 }
405}
406
407void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
408 // We do not check for overflow when incrementing the edge counter. The
409 // function should normally be optimized long before the counter can
410 // overflow; and though we do not reset the counters when we optimize or
411 // deoptimize, there is a bound on the number of
412 // optimization/deoptimization cycles we will attempt.
413 ASSERT(!edge_counters_array_.IsNull());
414 ASSERT(assembler_->constant_pool_allowed());
415 __ Comment("Edge counter");
416 __ LoadObject(R0, edge_counters_array_);
417 __ LoadCompressedSmiFieldFromOffset(TMP, R0, Array::element_offset(edge_id));
418 __ add(TMP, TMP, compiler::Operand(Smi::RawValue(1)), compiler::kObjectBytes);
419 __ StoreFieldToOffset(TMP, R0, Array::element_offset(edge_id),
421}
422
424 const Code& stub,
425 const ICData& ic_data,
426 intptr_t deopt_id,
427 const InstructionSource& source,
428 LocationSummary* locs,
429 Code::EntryKind entry_kind) {
430 ASSERT(CanCallDart());
431 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
432 // Each ICData propagated from unoptimized to optimized code contains the
433 // function that corresponds to the Dart function of that IC call. Due
434 // to inlining in optimized code, that function may not correspond to the
435 // top-level function (parsed_function().function()) which could be
436 // reoptimized and which counter needs to be incremented.
437 // Pass the function explicitly, it is used in IC stub.
438
439 __ LoadObject(R6, parsed_function().function());
440 __ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
441 __ LoadUniqueObject(IC_DATA_REG, ic_data);
442 GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
443 entry_kind);
444 EmitDropArguments(ic_data.SizeWithTypeArgs());
445}
446
447void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
448 const ICData& ic_data,
449 intptr_t deopt_id,
450 const InstructionSource& source,
451 LocationSummary* locs,
452 Code::EntryKind entry_kind) {
453 ASSERT(CanCallDart());
454 ASSERT(entry_kind == Code::EntryKind::kNormal ||
455 entry_kind == Code::EntryKind::kUnchecked);
456 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
457 __ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
458
459 compiler::ObjectPoolBuilder& op = __ object_pool_builder();
460 const intptr_t ic_data_index =
461 op.AddObject(ic_data, ObjectPool::Patchability::kPatchable);
462 const intptr_t stub_index =
463 op.AddObject(stub, ObjectPool::Patchability::kPatchable);
464 ASSERT((ic_data_index + 1) == stub_index);
465 __ LoadDoubleWordFromPoolIndex(IC_DATA_REG, CODE_REG, ic_data_index);
466 const intptr_t entry_point_offset =
467 entry_kind == Code::EntryKind::kNormal
468 ? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
469 : Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
470 __ Call(compiler::FieldAddress(CODE_REG, entry_point_offset));
471 EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kIcCall, locs,
472 pending_deoptimization_env_);
473 EmitDropArguments(ic_data.SizeWithTypeArgs());
474}
475
477 const String& name,
478 const Array& arguments_descriptor,
479 intptr_t deopt_id,
480 const InstructionSource& source,
481 LocationSummary* locs) {
482 ASSERT(CanCallDart());
483 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
484 ASSERT(!FLAG_precompiled_mode);
485 const ArgumentsDescriptor args_desc(arguments_descriptor);
486 const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
487 zone(),
488 MegamorphicCacheTable::Lookup(thread(), name, arguments_descriptor));
489
490 __ Comment("MegamorphicCall");
491 // Load receiver into R0.
492 __ LoadFromOffset(R0, SP, (args_desc.Count() - 1) * kWordSize);
493
494 // Use same code pattern as instance call so it can be parsed by code patcher.
495 compiler::ObjectPoolBuilder& op = __ object_pool_builder();
496 const intptr_t data_index =
497 op.AddObject(cache, ObjectPool::Patchability::kPatchable);
498 const intptr_t stub_index = op.AddObject(
499 StubCode::MegamorphicCall(), ObjectPool::Patchability::kPatchable);
500 ASSERT((data_index + 1) == stub_index);
501 __ LoadDoubleWordFromPoolIndex(IC_DATA_REG, CODE_REG, data_index);
502 CLOBBERS_LR(__ ldr(LR, compiler::FieldAddress(
504 Code::EntryKind::kMonomorphic))));
505 CLOBBERS_LR(__ blr(LR));
506
507 RecordSafepoint(locs);
508 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
509 const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
510 if (is_optimizing()) {
511 AddDeoptIndexAtCall(deopt_id_after, pending_deoptimization_env_);
512 } else {
513 // Add deoptimization continuation point after the call and before the
514 // arguments are removed.
515 AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after, source);
516 }
517 RecordCatchEntryMoves(pending_deoptimization_env_);
518 EmitDropArguments(args_desc.SizeWithTypeArgs());
519}
520
521void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
522 intptr_t deopt_id,
523 const InstructionSource& source,
524 LocationSummary* locs,
525 Code::EntryKind entry_kind,
526 bool receiver_can_be_smi) {
527 ASSERT(CanCallDart());
528 ASSERT(ic_data.NumArgsTested() == 1);
529 const Code& initial_stub = StubCode::SwitchableCallMiss();
530 const char* switchable_call_mode = "smiable";
531 if (!receiver_can_be_smi) {
532 switchable_call_mode = "non-smi";
533 ic_data.set_receiver_cannot_be_smi(true);
534 }
535 const UnlinkedCall& data =
536 UnlinkedCall::ZoneHandle(zone(), ic_data.AsUnlinkedCall());
537
538 compiler::ObjectPoolBuilder& op = __ object_pool_builder();
539
540 __ Comment("InstanceCallAOT (%s)", switchable_call_mode);
541 // Clear argument descriptor to keep gc happy when it gets pushed on to
542 // the stack.
543 __ LoadImmediate(R4, 0);
544 __ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
545
546 const auto snapshot_behavior =
547 FLAG_precompiled_mode ? compiler::ObjectPoolBuilderEntry::
550 const intptr_t data_index =
551 op.AddObject(data, ObjectPool::Patchability::kPatchable);
552 const intptr_t initial_stub_index = op.AddObject(
553 initial_stub, ObjectPool::Patchability::kPatchable, snapshot_behavior);
554 ASSERT((data_index + 1) == initial_stub_index);
555
556 // The AOT runtime will replace the slot in the object pool with the
557 // entrypoint address - see app_snapshot.cc.
558 CLOBBERS_LR(__ LoadDoubleWordFromPoolIndex(R5, LR, data_index));
559 CLOBBERS_LR(__ blr(LR));
560
561 EmitCallsiteMetadata(source, DeoptId::kNone, UntaggedPcDescriptors::kOther,
562 locs, pending_deoptimization_env_);
563 EmitDropArguments(ic_data.SizeWithTypeArgs());
564}
565
566void FlowGraphCompiler::EmitUnoptimizedStaticCall(
567 intptr_t size_with_type_args,
568 intptr_t deopt_id,
569 const InstructionSource& source,
570 LocationSummary* locs,
571 const ICData& ic_data,
572 Code::EntryKind entry_kind) {
573 ASSERT(CanCallDart());
574 const Code& stub =
575 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
576 __ LoadObject(R5, ic_data);
577 GenerateDartCall(deopt_id, source, stub,
578 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
579 EmitDropArguments(size_with_type_args);
580}
581
583 const Function& function,
584 const Array& arguments_descriptor,
585 intptr_t size_with_type_args,
586 intptr_t deopt_id,
587 const InstructionSource& source,
588 LocationSummary* locs,
589 Code::EntryKind entry_kind) {
590 ASSERT(CanCallDart());
593 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
594 } else {
595 if (!FLAG_precompiled_mode) {
596 __ LoadImmediate(ARGS_DESC_REG, 0); // GC safe smi zero because of stub.
597 }
598 }
599 // Do not use the code from the function, but let the code be patched so that
600 // we can record the outgoing edges to other code.
601 GenerateStaticDartCall(deopt_id, source, UntaggedPcDescriptors::kOther, locs,
602 function, entry_kind);
603 EmitDropArguments(size_with_type_args);
604}
605
607 int32_t selector_offset,
608 const Array& arguments_descriptor) {
609 const auto cid_reg = DispatchTableNullErrorABI::kClassIdReg;
610 ASSERT(CanCallDart());
611 ASSERT(cid_reg != ARGS_DESC_REG);
612 if (!arguments_descriptor.IsNull()) {
613 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
614 }
615 const intptr_t offset = selector_offset - DispatchTable::kOriginElement;
616 CLOBBERS_LR({
617 // Would like cid_reg to be available on entry to the target function
618 // for checking purposes.
619 ASSERT(cid_reg != LR);
620 __ AddImmediate(LR, cid_reg, offset);
621 __ Call(compiler::Address(DISPATCH_TABLE_REG, LR, UXTX,
623 });
624}
625
627 Register reg,
628 const Object& obj,
629 bool needs_number_check,
630 const InstructionSource& source,
631 intptr_t deopt_id) {
632 if (needs_number_check) {
633 ASSERT(!obj.IsMint() && !obj.IsDouble());
634 __ LoadObject(TMP, obj);
635 __ PushPair(TMP, reg);
636 if (is_optimizing()) {
637 // No breakpoints in optimized code.
638 __ BranchLink(StubCode::OptimizedIdenticalWithNumberCheck());
639 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, deopt_id, source);
640 } else {
641 // Patchable to support breakpoints.
642 __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
643 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id,
644 source);
645 }
646 // Stub returns result in flags (result of a cmp, we need Z computed).
647 // Discard constant.
648 // Restore 'reg'.
649 __ PopPair(ZR, reg);
650 } else {
651 __ CompareObject(reg, obj);
652 }
653 return EQ;
654}
655
657 Register left,
658 Register right,
659 bool needs_number_check,
660 const InstructionSource& source,
661 intptr_t deopt_id) {
662 if (needs_number_check) {
663 __ PushPair(right, left);
664 if (is_optimizing()) {
665 __ BranchLink(StubCode::OptimizedIdenticalWithNumberCheck());
666 } else {
667 __ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
668 }
669 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
670 // Stub returns result in flags (result of a cmp, we need Z computed).
671 __ PopPair(right, left);
672 } else {
673 __ CompareObjectRegisters(left, right);
674 }
675 return EQ;
676}
677
679 BranchLabels labels,
680 bool invert) {
681 __ Comment("BoolTest");
682 if (labels.true_label == nullptr || labels.false_label == nullptr) {
683 __ tsti(value, compiler::Immediate(
685 return invert ? NE : EQ;
686 }
687 const intptr_t bool_bit =
689 if (labels.fall_through == labels.false_label) {
690 if (invert) {
691 __ tbnz(labels.true_label, value, bool_bit);
692 } else {
693 __ tbz(labels.true_label, value, bool_bit);
694 }
695 } else {
696 if (invert) {
697 __ tbz(labels.false_label, value, bool_bit);
698 } else {
699 __ tbnz(labels.false_label, value, bool_bit);
700 }
701 if (labels.fall_through != labels.true_label) {
702 __ b(labels.true_label);
703 }
704 }
705 return kInvalidCondition;
706}
707
708// This function must be in sync with FlowGraphCompiler::RecordSafepoint and
709// FlowGraphCompiler::SlowPathEnvironmentFor.
710void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
711#if defined(DEBUG)
712 locs->CheckWritableInputs();
713 ClobberDeadTempRegisters(locs);
714#endif
715 // TODO(vegorov): consider saving only caller save (volatile) registers.
716 __ PushRegisters(*locs->live_registers());
717}
718
719void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
720 __ PopRegisters(*locs->live_registers());
721}
722
723#if defined(DEBUG)
724void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
725 // Clobber temporaries that have not been manually preserved.
726 for (intptr_t i = 0; i < locs->temp_count(); ++i) {
727 Location tmp = locs->temp(i);
728 // TODO(zerny): clobber non-live temporary FPU registers.
729 if (tmp.IsRegister() &&
730 !locs->live_registers()->ContainsRegister(tmp.reg())) {
731 __ movz(tmp.reg(), compiler::Immediate(0xf7), 0);
732 }
733 }
734}
735#endif
736
737Register FlowGraphCompiler::EmitTestCidRegister() {
738 return R2;
739}
740
741void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
742 intptr_t count_without_type_args,
743 const Array& arguments_descriptor) {
744 __ Comment("EmitTestAndCall");
745 // Load receiver into R0.
746 __ LoadFromOffset(R0, SP, (count_without_type_args - 1) * kWordSize);
747 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
748}
749
750void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
751 bool if_smi) {
752 if (if_smi) {
753 __ BranchIfSmi(R0, label);
754 } else {
755 __ BranchIfNotSmi(R0, label);
756 }
757}
758
759void FlowGraphCompiler::EmitTestAndCallLoadCid(Register class_id_reg) {
760 ASSERT(class_id_reg != R0);
761 __ LoadClassId(class_id_reg, R0);
762}
763
766 TemporaryRegisterAllocator* allocator) {
767 if (destination.Equals(source)) return;
768
769 if (source.IsRegister()) {
770 if (destination.IsRegister()) {
771 __ mov(destination.reg(), source.reg());
772 } else {
773 ASSERT(destination.IsStackSlot());
774 const intptr_t dest_offset = destination.ToStackSlotOffset();
775 __ StoreToOffset(source.reg(), destination.base_reg(), dest_offset);
776 }
777 } else if (source.IsStackSlot()) {
778 if (destination.IsRegister()) {
779 const intptr_t source_offset = source.ToStackSlotOffset();
780 __ LoadFromOffset(destination.reg(), source.base_reg(), source_offset);
781 } else if (destination.IsFpuRegister()) {
782 const intptr_t src_offset = source.ToStackSlotOffset();
783 VRegister dst = destination.fpu_reg();
784 __ LoadDFromOffset(dst, source.base_reg(), src_offset);
785 } else {
786 ASSERT(destination.IsStackSlot());
787 const intptr_t source_offset = source.ToStackSlotOffset();
788 const intptr_t dest_offset = destination.ToStackSlotOffset();
789 Register tmp = allocator->AllocateTemporary();
790 __ LoadFromOffset(tmp, source.base_reg(), source_offset);
791 __ StoreToOffset(tmp, destination.base_reg(), dest_offset);
792 allocator->ReleaseTemporary();
793 }
794 } else if (source.IsFpuRegister()) {
795 if (destination.IsFpuRegister()) {
796 __ vmov(destination.fpu_reg(), source.fpu_reg());
797 } else {
798 if (destination.IsStackSlot() /*32-bit float*/ ||
799 destination.IsDoubleStackSlot()) {
800 const intptr_t dest_offset = destination.ToStackSlotOffset();
801 VRegister src = source.fpu_reg();
802 __ StoreDToOffset(src, destination.base_reg(), dest_offset);
803 } else {
804 ASSERT(destination.IsQuadStackSlot());
805 const intptr_t dest_offset = destination.ToStackSlotOffset();
806 __ StoreQToOffset(source.fpu_reg(), destination.base_reg(),
807 dest_offset);
808 }
809 }
810 } else if (source.IsDoubleStackSlot()) {
811 if (destination.IsFpuRegister()) {
812 const intptr_t source_offset = source.ToStackSlotOffset();
813 const VRegister dst = destination.fpu_reg();
814 __ LoadDFromOffset(dst, source.base_reg(), source_offset);
815 } else {
816 ASSERT(destination.IsDoubleStackSlot() ||
817 destination.IsStackSlot() /*32-bit float*/);
818 const intptr_t source_offset = source.ToStackSlotOffset();
819 const intptr_t dest_offset = destination.ToStackSlotOffset();
820 __ LoadDFromOffset(VTMP, source.base_reg(), source_offset);
821 __ StoreDToOffset(VTMP, destination.base_reg(), dest_offset);
822 }
823 } else if (source.IsQuadStackSlot()) {
824 if (destination.IsFpuRegister()) {
825 const intptr_t source_offset = source.ToStackSlotOffset();
826 __ LoadQFromOffset(destination.fpu_reg(), source.base_reg(),
827 source_offset);
828 } else {
829 ASSERT(destination.IsQuadStackSlot());
830 const intptr_t source_offset = source.ToStackSlotOffset();
831 const intptr_t dest_offset = destination.ToStackSlotOffset();
832 __ LoadQFromOffset(VTMP, source.base_reg(), source_offset);
833 __ StoreQToOffset(VTMP, destination.base_reg(), dest_offset);
834 }
835 } else {
836 ASSERT(source.IsConstant());
837 if (destination.IsStackSlot()) {
838 Register tmp = allocator->AllocateTemporary();
839 source.constant_instruction()->EmitMoveToLocation(this, destination, tmp);
840 allocator->ReleaseTemporary();
841 } else {
842 source.constant_instruction()->EmitMoveToLocation(this, destination);
843 }
844 }
845}
846
847static compiler::OperandSize BytesToOperandSize(intptr_t bytes) {
848 switch (bytes) {
849 case 8:
851 case 4:
853 case 2:
855 case 1:
857 default:
859 }
860}
861
862void FlowGraphCompiler::EmitNativeMoveArchitecture(
863 const compiler::ffi::NativeLocation& destination,
864 const compiler::ffi::NativeLocation& source) {
865 const auto& src_payload_type = source.payload_type();
866 const auto& dst_payload_type = destination.payload_type();
867 const auto& src_container_type = source.container_type();
868 const auto& dst_container_type = destination.container_type();
869 ASSERT(src_container_type.IsFloat() == dst_container_type.IsFloat());
870 ASSERT(src_container_type.IsInt() == dst_container_type.IsInt());
871 ASSERT(src_payload_type.IsSigned() == dst_payload_type.IsSigned());
872 ASSERT(src_payload_type.IsPrimitive());
873 ASSERT(dst_payload_type.IsPrimitive());
874 const intptr_t src_size = src_payload_type.SizeInBytes();
875 const intptr_t dst_size = dst_payload_type.SizeInBytes();
876 const bool sign_or_zero_extend = dst_size > src_size;
877
878 if (source.IsRegisters()) {
879 const auto& src = source.AsRegisters();
880 ASSERT(src.num_regs() == 1);
881 const auto src_reg = src.reg_at(0);
882
883 if (destination.IsRegisters()) {
884 const auto& dst = destination.AsRegisters();
885 ASSERT(dst.num_regs() == 1);
886 const auto dst_reg = dst.reg_at(0);
887 ASSERT(destination.container_type().SizeInBytes() <= 8);
888 if (!sign_or_zero_extend) {
889 __ MoveRegister(dst_reg, src_reg);
890 } else {
891 if (src_payload_type.IsSigned()) {
892 __ sbfx(dst_reg, src_reg, 0, src_size * kBitsPerByte);
893 } else {
894 __ ubfx(dst_reg, src_reg, 0, src_size * kBitsPerByte);
895 }
896 }
897
898 } else if (destination.IsFpuRegisters()) {
899 // Fpu Registers should only contain doubles and registers only ints.
901
902 } else {
903 ASSERT(destination.IsStack());
904 const auto& dst = destination.AsStack();
905 ASSERT(!sign_or_zero_extend);
906 auto const op_size =
907 BytesToOperandSize(destination.container_type().SizeInBytes());
908 __ StoreToOffset(src.reg_at(0), dst.base_register(),
909 dst.offset_in_bytes(), op_size);
910 }
911
912 } else if (source.IsFpuRegisters()) {
913 const auto& src = source.AsFpuRegisters();
914 // We have not implemented conversions here, use IL convert instructions.
915 ASSERT(src_payload_type.Equals(dst_payload_type));
916
917 if (destination.IsRegisters()) {
918 // Fpu Registers should only contain doubles and registers only ints.
920
921 } else if (destination.IsFpuRegisters()) {
922 const auto& dst = destination.AsFpuRegisters();
923 __ vmov(dst.fpu_reg(), src.fpu_reg());
924
925 } else {
926 ASSERT(destination.IsStack());
927 ASSERT(src_payload_type.IsFloat());
928 const auto& dst = destination.AsStack();
929 switch (dst_size) {
930 case 8:
931 __ StoreDToOffset(src.fpu_reg(), dst.base_register(),
932 dst.offset_in_bytes());
933 return;
934 case 4:
935 __ StoreSToOffset(src.fpu_reg(), dst.base_register(),
936 dst.offset_in_bytes());
937 return;
938 default:
939 UNREACHABLE();
940 }
941 }
942
943 } else {
944 ASSERT(source.IsStack());
945 const auto& src = source.AsStack();
946 if (destination.IsRegisters()) {
947 const auto& dst = destination.AsRegisters();
948 ASSERT(dst.num_regs() == 1);
949 const auto dst_reg = dst.reg_at(0);
950 EmitNativeLoad(dst_reg, src.base_register(), src.offset_in_bytes(),
951 src_payload_type.AsPrimitive().representation());
952 } else if (destination.IsFpuRegisters()) {
953 ASSERT(src_payload_type.Equals(dst_payload_type));
954 ASSERT(src_payload_type.IsFloat());
955 const auto& dst = destination.AsFpuRegisters();
956 switch (src_size) {
957 case 8:
958 __ LoadDFromOffset(dst.fpu_reg(), src.base_register(),
959 src.offset_in_bytes());
960 return;
961 case 4:
962 __ LoadSFromOffset(dst.fpu_reg(), src.base_register(),
963 src.offset_in_bytes());
964 return;
965 default:
967 }
968
969 } else {
970 ASSERT(destination.IsStack());
971 UNREACHABLE();
972 }
973 }
974}
975
976void FlowGraphCompiler::EmitNativeLoad(Register dst,
978 intptr_t offset,
980 switch (type) {
982 __ LoadFromOffset(dst, base, offset, compiler::kByte);
983 break;
985 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedByte);
986 break;
988 __ LoadFromOffset(dst, base, offset, compiler::kTwoBytes);
989 break;
991 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
992 break;
994 __ LoadFromOffset(dst, base, offset, compiler::kFourBytes);
995 break;
998 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
999 break;
1003 __ LoadFromOffset(dst, base, offset, compiler::kEightBytes);
1004 break;
1005
1007 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1008 __ LoadFromOffset(TMP, base, offset + 2, compiler::kByte);
1009 __ orr(dst, dst, compiler::Operand(TMP, LSL, 16));
1010 break;
1012 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedTwoBytes);
1013 __ LoadFromOffset(TMP, base, offset + 2, compiler::kUnsignedByte);
1014 __ orr(dst, dst, compiler::Operand(TMP, LSL, 16));
1015 break;
1017 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1018 __ LoadFromOffset(TMP, base, offset + 4, compiler::kByte);
1019 __ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1020 break;
1022 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1023 __ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedByte);
1024 __ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1025 break;
1027 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1028 __ LoadFromOffset(TMP, base, offset + 4, compiler::kTwoBytes);
1029 __ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1030 break;
1032 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1033 __ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedTwoBytes);
1034 __ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1035 break;
1037 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1038 __ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedTwoBytes);
1039 __ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1040 __ LoadFromOffset(TMP, base, offset + 6, compiler::kByte);
1041 __ orr(dst, dst, compiler::Operand(TMP, LSL, 48));
1042 break;
1044 __ LoadFromOffset(dst, base, offset, compiler::kUnsignedFourBytes);
1045 __ LoadFromOffset(TMP, base, offset + 4, compiler::kUnsignedTwoBytes);
1046 __ orr(dst, dst, compiler::Operand(TMP, LSL, 32));
1047 __ LoadFromOffset(TMP, base, offset + 6, compiler::kUnsignedByte);
1048 __ orr(dst, dst, compiler::Operand(TMP, LSL, 48));
1049 break;
1050 default:
1051 UNREACHABLE();
1052 }
1053}
1054
1056 Register dst,
1057 Register tmp) {
1058 compiler::Label skip_reloc;
1059 __ b(&skip_reloc);
1060 InsertBSSRelocation(relocation);
1061 __ Bind(&skip_reloc);
1062
1063 __ adr(tmp, compiler::Immediate(-compiler::target::kWordSize));
1064
1065 // tmp holds the address of the relocation.
1066 __ ldr(dst, compiler::Address(tmp));
1067
1068 // dst holds the relocation itself: tmp - bss_start.
1069 // tmp = tmp + (bss_start - tmp) = bss_start
1070 __ add(tmp, tmp, compiler::Operand(dst));
1071
1072 // tmp holds the start of the BSS section.
1073 // Load the "get-thread" routine: *bss_start.
1074 __ ldr(dst, compiler::Address(tmp));
1075}
1076
1077#undef __
1078#define __ compiler_->assembler()->
1079
1080void ParallelMoveEmitter::EmitSwap(const MoveOperands& move) {
1081 const Location source = move.src();
1082 const Location destination = move.dest();
1083
1084 if (source.IsRegister() && destination.IsRegister()) {
1085 ASSERT(source.reg() != TMP);
1086 ASSERT(destination.reg() != TMP);
1087 __ mov(TMP, source.reg());
1088 __ mov(source.reg(), destination.reg());
1089 __ mov(destination.reg(), TMP);
1090 } else if (source.IsRegister() && destination.IsStackSlot()) {
1091 Exchange(source.reg(), destination.base_reg(),
1092 destination.ToStackSlotOffset());
1093 } else if (source.IsStackSlot() && destination.IsRegister()) {
1094 Exchange(destination.reg(), source.base_reg(), source.ToStackSlotOffset());
1095 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1096 Exchange(source.base_reg(), source.ToStackSlotOffset(),
1097 destination.base_reg(), destination.ToStackSlotOffset());
1098 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1099 const VRegister dst = destination.fpu_reg();
1100 const VRegister src = source.fpu_reg();
1101 __ vmov(VTMP, src);
1102 __ vmov(src, dst);
1103 __ vmov(dst, VTMP);
1104 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1105 ASSERT(destination.IsDoubleStackSlot() || destination.IsQuadStackSlot() ||
1106 source.IsDoubleStackSlot() || source.IsQuadStackSlot());
1107 bool double_width =
1108 destination.IsDoubleStackSlot() || source.IsDoubleStackSlot();
1109 VRegister reg =
1110 source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg();
1111 Register base_reg =
1112 source.IsFpuRegister() ? destination.base_reg() : source.base_reg();
1113 const intptr_t slot_offset = source.IsFpuRegister()
1114 ? destination.ToStackSlotOffset()
1115 : source.ToStackSlotOffset();
1116
1117 if (double_width) {
1118 __ LoadDFromOffset(VTMP, base_reg, slot_offset);
1119 __ StoreDToOffset(reg, base_reg, slot_offset);
1120 __ fmovdd(reg, VTMP);
1121 } else {
1122 __ LoadQFromOffset(VTMP, base_reg, slot_offset);
1123 __ StoreQToOffset(reg, base_reg, slot_offset);
1124 __ vmov(reg, VTMP);
1125 }
1126 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1127 const intptr_t source_offset = source.ToStackSlotOffset();
1128 const intptr_t dest_offset = destination.ToStackSlotOffset();
1129
1130 ScratchFpuRegisterScope ensure_scratch(this, kNoFpuRegister);
1131 VRegister scratch = ensure_scratch.reg();
1132 __ LoadDFromOffset(VTMP, source.base_reg(), source_offset);
1133 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset);
1134 __ StoreDToOffset(VTMP, destination.base_reg(), dest_offset);
1135 __ StoreDToOffset(scratch, source.base_reg(), source_offset);
1136 } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1137 const intptr_t source_offset = source.ToStackSlotOffset();
1138 const intptr_t dest_offset = destination.ToStackSlotOffset();
1139
1140 ScratchFpuRegisterScope ensure_scratch(this, kNoFpuRegister);
1141 VRegister scratch = ensure_scratch.reg();
1142 __ LoadQFromOffset(VTMP, source.base_reg(), source_offset);
1143 __ LoadQFromOffset(scratch, destination.base_reg(), dest_offset);
1144 __ StoreQToOffset(VTMP, destination.base_reg(), dest_offset);
1145 __ StoreQToOffset(scratch, source.base_reg(), source_offset);
1146 } else {
1147 UNREACHABLE();
1148 }
1149}
1150
1151void ParallelMoveEmitter::MoveMemoryToMemory(const compiler::Address& dst,
1152 const compiler::Address& src) {
1153 UNREACHABLE();
1154}
1155
1156// Do not call or implement this function. Instead, use the form below that
1157// uses an offset from the frame pointer instead of an Address.
1158void ParallelMoveEmitter::Exchange(Register reg, const compiler::Address& mem) {
1159 UNREACHABLE();
1160}
1161
1162// Do not call or implement this function. Instead, use the form below that
1163// uses offsets from the frame pointer instead of Addresses.
1164void ParallelMoveEmitter::Exchange(const compiler::Address& mem1,
1165 const compiler::Address& mem2) {
1166 UNREACHABLE();
1167}
1168
1169void ParallelMoveEmitter::Exchange(Register reg,
1170 Register base_reg,
1171 intptr_t stack_offset) {
1172 ScratchRegisterScope tmp(this, reg);
1173 __ mov(tmp.reg(), reg);
1174 __ LoadFromOffset(reg, base_reg, stack_offset);
1175 __ StoreToOffset(tmp.reg(), base_reg, stack_offset);
1176}
1177
1178void ParallelMoveEmitter::Exchange(Register base_reg1,
1179 intptr_t stack_offset1,
1180 Register base_reg2,
1181 intptr_t stack_offset2) {
1182 ScratchRegisterScope tmp1(this, kNoRegister);
1183 ScratchRegisterScope tmp2(this, tmp1.reg());
1184 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1);
1185 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2);
1186 __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2);
1187 __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1);
1188}
1189
1190void ParallelMoveEmitter::SpillScratch(Register reg) {
1191 __ Push(reg);
1192}
1193
1194void ParallelMoveEmitter::RestoreScratch(Register reg) {
1195 __ Pop(reg);
1196}
1197
1198void ParallelMoveEmitter::SpillFpuScratch(FpuRegister reg) {
1199 __ PushQuad(reg);
1200}
1201
1202void ParallelMoveEmitter::RestoreFpuScratch(FpuRegister reg) {
1203 __ PopQuad(reg);
1204}
1205
1206#undef __
1207
1208} // namespace dart
1209
1210#endif // defined(TARGET_ARCH_ARM64)
#define __
#define UNREACHABLE()
Definition: assert.h:248
GLenum type
static intptr_t element_offset(intptr_t index)
Definition: object.h:10838
intptr_t length() const
static intptr_t owner_offset()
Definition: object.h:7149
CodeEntryKind EntryKind
Definition: object.h:6788
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
Definition: object.h:6793
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
Definition: deopt_id.h:27
static intptr_t ToDeoptAfter(intptr_t deopt_id)
Definition: deopt_id.h:31
static constexpr intptr_t kOriginElement
IsolateGroup * isolate_group() const
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitTailCallToStub(const Code &stub)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void InsertBSSRelocation(BSS::Relocation reloc)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void GeneratePatchableCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
compiler::Assembler * assembler() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
Definition: object.cc:11437
bool IsClosureFunction() const
Definition: object.h:3891
bool IsOptimizable() const
Definition: object.cc:8930
ObjectStore * object_store() const
Definition: isolate.h:510
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
Definition: object.h:5554
static ObjectPtr null()
Definition: object.h:433
bool IsNull() const
Definition: object.h:363
static Object & Handle()
Definition: object.h:407
static Object & ZoneHandle()
Definition: object.h:419
const Function & function() const
Definition: parser.h:73
int num_stack_locals() const
Definition: parser.h:194
static intptr_t RawValue(intptr_t value)
Definition: object.h:10022
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
Definition: stub_code.cc:316
static intptr_t WriteBarrierWrappersOffsetForRegister(Register reg)
Definition: thread.h:765
void set_constant_pool_allowed(bool b)
void GenerateUnRelocatedPcRelativeCall(Condition cond=AL, intptr_t offset_into_target=0)
bool constant_pool_allowed() const
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
#define UNIMPLEMENTED
#define ASSERT(E)
SkBitmap source
Definition: examples.cpp:28
static bool b
gboolean invert
uint8_t value
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
FrameLayout frame_layout
Definition: stack_frame.cc:76
Definition: dart_vm.cc:33
const FpuRegister kNoFpuRegister
const Register THR
const char *const name
const VRegister VTMP
const Register NULL_REG
constexpr intptr_t kBitsPerByte
Definition: globals.h:463
const Register CODE_REG
@ kInvalidCondition
const Register ARGS_DESC_REG
const Register DISPATCH_TABLE_REG
@ kNoRegister
Definition: constants_arm.h:99
const Register TMP
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const Register IC_DATA_REG
constexpr intptr_t kWordSize
Definition: globals.h:509
QRegister FpuRegister
static int8_t data[kExtLength]
DECLARE_FLAG(bool, show_invisible_frames)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
Definition: switches.h:191
dst
Definition: cp.py:12
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
SeparatedVector2 offset
static constexpr Register kClassIdReg
intptr_t FrameSlotForVariable(const LocalVariable *variable) const
Definition: stack_frame.cc:83
intptr_t FrameSlotForVariableIndex(intptr_t index) const
Definition: stack_frame.cc:89
static constexpr intptr_t kBoolValueBitPosition
static constexpr intptr_t kBoolValueMask
static constexpr Register kScratchReg
static constexpr Register kSubtypeTestCacheReg