Flutter Engine
The Flutter Engine
flow_graph_compiler_riscv.cc
Go to the documentation of this file.
1// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_RISCV.
6#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
7
9
15#include "vm/cpu.h"
16#include "vm/dart_entry.h"
18#include "vm/dispatch_table.h"
19#include "vm/instructions.h"
20#include "vm/object_store.h"
21#include "vm/parser.h"
22#include "vm/stack_frame.h"
23#include "vm/stub_code.h"
24#include "vm/symbols.h"
25
26namespace dart {
27
28DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
29DECLARE_FLAG(bool, enable_simd_inline);
30
32 // Note: Unlike the other architectures, we are not using PC-relative calls
33 // in AOT to call the write barrier stubs. We are making use of TMP as an
34 // alternate link register to avoid spilling RA inline and don't want to
35 // introduce another relocation type.
36}
37
39 // BlockInfos are zone-allocated, so their destructors are not called.
40 // Verify the labels explicitly here.
41 for (int i = 0; i < block_info_.length(); ++i) {
42 ASSERT(!block_info_[i]->jump_label()->IsLinked());
43 }
44}
45
47 // TODO(riscv): Dynamically test for the vector extension and otherwise
48 // allocate SIMD values to register-pairs or quads?
49 return false;
50}
51
53#if XLEN == 32
54 return false;
55#else
56 return true;
57#endif
58}
59
62 intrinsic_mode_ = true;
63 ASSERT(!assembler()->constant_pool_allowed());
64}
65
68 intrinsic_mode_ = false;
69}
70
71TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
72 DeoptInfoBuilder* builder,
73 const Array& deopt_table) {
74 if (deopt_env_ == nullptr) {
75 ++builder->current_info_number_;
76 return TypedData::null();
77 }
78
79 AllocateOutgoingArguments(deopt_env_);
80
81 intptr_t slot_ix = 0;
82 Environment* current = deopt_env_;
83
84 // Emit all kMaterializeObject instructions describing objects to be
85 // materialized on the deoptimization as a prefix to the deoptimization info.
86 EmitMaterializations(deopt_env_, builder);
87
88 // The real frame starts here.
89 builder->MarkFrameStart();
90
91 Zone* zone = compiler->zone();
92
93 builder->AddPp(current->function(), slot_ix++);
94 builder->AddPcMarker(Function::ZoneHandle(zone), slot_ix++);
95 builder->AddCallerFp(slot_ix++);
96 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++);
97
98 // Emit all values that are needed for materialization as a part of the
99 // expression stack for the bottom-most frame. This guarantees that GC
100 // will be able to find them during materialization.
101 slot_ix = builder->EmitMaterializationArguments(slot_ix);
102
103 // For the innermost environment, set outgoing arguments and the locals.
104 for (intptr_t i = current->Length() - 1;
105 i >= current->fixed_parameter_count(); i--) {
106 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
107 }
108
109 Environment* previous = current;
110 current = current->outer();
111 while (current != nullptr) {
112 builder->AddPp(current->function(), slot_ix++);
113 builder->AddPcMarker(previous->function(), slot_ix++);
114 builder->AddCallerFp(slot_ix++);
115
116 // For any outer environment the deopt id is that of the call instruction
117 // which is recorded in the outer environment.
118 builder->AddReturnAddress(current->function(),
119 DeoptId::ToDeoptAfter(current->GetDeoptId()),
120 slot_ix++);
121
122 // The values of outgoing arguments can be changed from the inlined call so
123 // we must read them from the previous environment.
124 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
125 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
126 slot_ix++);
127 }
128
129 // Set the locals, note that outgoing arguments are not in the environment.
130 for (intptr_t i = current->Length() - 1;
131 i >= current->fixed_parameter_count(); i--) {
132 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
133 }
134
135 // Iterate on the outer environment.
136 previous = current;
137 current = current->outer();
138 }
139 // The previous pointer is now the outermost environment.
140 ASSERT(previous != nullptr);
141
142 // Add slots for the outermost environment.
143 builder->AddCallerPp(slot_ix++);
144 builder->AddPcMarker(previous->function(), slot_ix++);
145 builder->AddCallerFp(slot_ix++);
146 builder->AddCallerPc(slot_ix++);
147
148 // For the outermost environment, set the incoming arguments.
149 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
150 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
151 }
152
153 return builder->CreateDeoptInfo(deopt_table);
154}
155
157 intptr_t stub_ix) {
158 // Calls do not need stubs, they share a deoptimization trampoline.
159 ASSERT(reason() != ICData::kDeoptAtCall);
160 compiler::Assembler* assembler = compiler->assembler();
161#define __ assembler->
162 __ Comment("%s", Name());
164 if (FLAG_trap_on_deoptimization) {
165 __ trap();
166 }
167
168 ASSERT(deopt_env() != nullptr);
169 __ Call(compiler::Address(THR, Thread::deoptimize_entry_offset()));
170 set_pc_offset(assembler->CodeSize());
171#undef __
172}
173
174#define __ assembler->
175// Static methods of FlowGraphCompiler that take an assembler.
176
177void FlowGraphCompiler::GenerateIndirectTTSCall(compiler::Assembler* assembler,
178 Register reg_to_call,
179 intptr_t sub_type_cache_index) {
180 __ LoadField(
182 compiler::FieldAddress(
183 reg_to_call,
185 __ LoadWordFromPoolIndex(TypeTestABI::kSubtypeTestCacheReg,
186 sub_type_cache_index);
188}
189
190#undef __
191#define __ assembler()->
192// Instance methods of FlowGraphCompiler.
193
194// Fall through if bool_register contains null.
195void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
196 compiler::Label* is_true,
197 compiler::Label* is_false) {
198 compiler::Label fall_through;
199 __ beq(bool_register, NULL_REG, &fall_through,
201 BranchLabels labels = {is_true, is_false, &fall_through};
202 Condition true_condition =
203 EmitBoolTest(bool_register, labels, /*invert=*/false);
204 ASSERT(true_condition != kInvalidCondition);
205 __ BranchIf(true_condition, is_true);
206 __ j(is_false);
207 __ Bind(&fall_through);
208}
209
210void FlowGraphCompiler::EmitFrameEntry() {
211 const Function& function = parsed_function().function();
213 (!is_optimizing() || may_reoptimize())) {
214 __ Comment("Invocation Count Check");
215 const Register function_reg = A0;
216 const Register usage_reg = A1;
217 __ lx(function_reg, compiler::FieldAddress(CODE_REG, Code::owner_offset()));
218
219 __ LoadFieldFromOffset(usage_reg, function_reg,
220 Function::usage_counter_offset(),
222 // Reoptimization of an optimized function is triggered by counting in
223 // IC stubs, but not at the entry of the function.
224 if (!is_optimizing()) {
225 __ addi(usage_reg, usage_reg, 1);
226 __ StoreFieldToOffset(usage_reg, function_reg,
227 Function::usage_counter_offset(),
229 }
230 __ CompareImmediate(usage_reg, GetOptimizationThreshold());
231 compiler::Label dont_optimize;
232 __ BranchIf(LT, &dont_optimize, compiler::Assembler::kNearJump);
233 __ lx(TMP, compiler::Address(THR, Thread::optimize_entry_offset()));
234 __ jr(TMP);
235 __ Bind(&dont_optimize);
236 }
237
238 if (flow_graph().graph_entry()->NeedsFrame()) {
239 __ Comment("Enter frame");
240 if (flow_graph().IsCompiledForOsr()) {
241 const intptr_t extra_slots = ExtraStackSlotsOnOsrEntry();
242 ASSERT(extra_slots >= 0);
243 __ EnterOsrFrame(extra_slots * kWordSize);
244 } else {
245 ASSERT(StackSize() >= 0);
246 __ EnterDartFrame(StackSize() * kWordSize);
247 }
248 } else if (FLAG_precompiled_mode) {
250 }
251}
252
253const InstructionSource& PrologueSource() {
254 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
255 /*inlining_id=*/0);
256 return prologue_source;
257}
258
260 BeginCodeSourceRange(PrologueSource());
261
262 EmitFrameEntry();
263 ASSERT(assembler()->constant_pool_allowed());
264
265 // In unoptimized code, initialize (non-argument) stack allocated slots.
266 if (!is_optimizing()) {
267 const int num_locals = parsed_function().num_stack_locals();
268
269 intptr_t args_desc_slot = -1;
270 if (parsed_function().has_arg_desc_var()) {
272 parsed_function().arg_desc_var());
273 }
274
275 __ Comment("Initialize spill slots");
276 const intptr_t fp_to_sp_delta =
278 for (intptr_t i = 0; i < num_locals; ++i) {
279 const intptr_t slot_index =
281 Register value_reg =
282 slot_index == args_desc_slot ? ARGS_DESC_REG : NULL_REG;
283 // SP-relative addresses allow for compressed instructions.
284 __ StoreToOffset(value_reg, SP,
285 (slot_index + fp_to_sp_delta) * kWordSize);
286 }
287 } else if (parsed_function().suspend_state_var() != nullptr &&
288 !flow_graph().IsCompiledForOsr()) {
289 // Initialize synthetic :suspend_state variable early
290 // as it may be accessed by GC and exception handling before
291 // InitSuspendableFunction stub is called.
292 const intptr_t slot_index =
294 parsed_function().suspend_state_var());
295 const intptr_t fp_to_sp_delta =
297 __ StoreToOffset(NULL_REG, SP, (slot_index + fp_to_sp_delta) * kWordSize);
298 }
299
300 EndCodeSourceRange(PrologueSource());
301}
302
304 const Code& stub,
305 ObjectPool::SnapshotBehavior snapshot_behavior) {
306 ASSERT(!stub.IsNull());
307 if (CanPcRelativeCall(stub)) {
308 __ GenerateUnRelocatedPcRelativeCall();
309 AddPcRelativeCallStubTarget(stub);
310 } else {
312 CodeEntryKind::kNormal, snapshot_behavior);
313 AddStubCallTarget(stub);
314 }
315}
316
317void FlowGraphCompiler::EmitJumpToStub(const Code& stub) {
318 ASSERT(!stub.IsNull());
319 if (CanPcRelativeCall(stub)) {
320 __ GenerateUnRelocatedPcRelativeTailCall();
321 AddPcRelativeTailCallStubTarget(stub);
322 } else {
323 __ LoadObject(CODE_REG, stub);
324 __ lx(TMP, compiler::FieldAddress(
326 __ jr(TMP);
327 AddStubCallTarget(stub);
328 }
329}
330
331void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
332 ASSERT(!stub.IsNull());
333 if (CanPcRelativeCall(stub)) {
334 if (flow_graph().graph_entry()->NeedsFrame()) {
335 __ LeaveDartFrame();
336 }
337 __ GenerateUnRelocatedPcRelativeTailCall();
338 AddPcRelativeTailCallStubTarget(stub);
339#if defined(DEBUG)
340 __ Breakpoint();
341#endif
342 } else {
343 __ LoadObject(CODE_REG, stub);
344 if (flow_graph().graph_entry()->NeedsFrame()) {
345 __ LeaveDartFrame();
346 }
347 __ lx(TMP, compiler::FieldAddress(
349 __ jr(TMP);
350 AddStubCallTarget(stub);
351 }
352}
353
355 const InstructionSource& source,
356 const Code& stub,
358 LocationSummary* locs,
359 ObjectPool::SnapshotBehavior snapshot_behavior) {
360 __ JumpAndLinkPatchable(stub, CodeEntryKind::kNormal, snapshot_behavior);
362 pending_deoptimization_env_);
363}
364
365void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
366 const InstructionSource& source,
367 const Code& stub,
369 LocationSummary* locs,
370 Code::EntryKind entry_kind) {
371 ASSERT(CanCallDart());
372 __ JumpAndLinkPatchable(stub, entry_kind);
373 EmitCallsiteMetadata(source, deopt_id, kind, locs,
374 pending_deoptimization_env_);
375}
376
377void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
378 const InstructionSource& source,
380 LocationSummary* locs,
381 const Function& target,
382 Code::EntryKind entry_kind) {
383 ASSERT(CanCallDart());
384 if (CanPcRelativeCall(target)) {
385 __ GenerateUnRelocatedPcRelativeCall();
386 AddPcRelativeCallTarget(target, entry_kind);
387 EmitCallsiteMetadata(source, deopt_id, kind, locs,
388 pending_deoptimization_env_);
389 } else {
390 // Call sites to the same target can share object pool entries. These
391 // call sites are never patched for breakpoints: the function is deoptimized
392 // and the unoptimized code with IC calls for static calls is patched
393 // instead.
395 const auto& stub = StubCode::CallStaticFunction();
396 __ JumpAndLinkWithEquivalence(stub, target, entry_kind);
397 EmitCallsiteMetadata(source, deopt_id, kind, locs,
398 pending_deoptimization_env_);
399 AddStaticCallTarget(target, entry_kind);
400 }
401}
402
403void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
404 // We do not check for overflow when incrementing the edge counter. The
405 // function should normally be optimized long before the counter can
406 // overflow; and though we do not reset the counters when we optimize or
407 // deoptimize, there is a bound on the number of
408 // optimization/deoptimization cycles we will attempt.
409 ASSERT(!edge_counters_array_.IsNull());
410 ASSERT(assembler_->constant_pool_allowed());
411 __ Comment("Edge counter");
412 __ LoadObject(A0, edge_counters_array_);
413 __ LoadFieldFromOffset(TMP, A0, Array::element_offset(edge_id));
414 __ addi(TMP, TMP, Smi::RawValue(1));
415 __ StoreFieldToOffset(TMP, A0, Array::element_offset(edge_id));
416}
417
419 const Code& stub,
420 const ICData& ic_data,
421 intptr_t deopt_id,
422 const InstructionSource& source,
423 LocationSummary* locs,
424 Code::EntryKind entry_kind) {
425 ASSERT(CanCallDart());
426 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
427 // Each ICData propagated from unoptimized to optimized code contains the
428 // function that corresponds to the Dart function of that IC call. Due
429 // to inlining in optimized code, that function may not correspond to the
430 // top-level function (parsed_function().function()) which could be
431 // reoptimized and which counter needs to be incremented.
432 // Pass the function explicitly, it is used in IC stub.
433
434 __ LoadObject(A6, parsed_function().function());
435 __ LoadFromOffset(A0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
436 __ LoadUniqueObject(IC_DATA_REG, ic_data);
437 GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
438 entry_kind);
439 EmitDropArguments(ic_data.SizeWithTypeArgs());
440}
441
442void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
443 const ICData& ic_data,
444 intptr_t deopt_id,
445 const InstructionSource& source,
446 LocationSummary* locs,
447 Code::EntryKind entry_kind) {
448 ASSERT(CanCallDart());
449 ASSERT(entry_kind == Code::EntryKind::kNormal ||
450 entry_kind == Code::EntryKind::kUnchecked);
451 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
452 __ LoadFromOffset(A0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
453 __ LoadUniqueObject(IC_DATA_REG, ic_data);
454 __ LoadUniqueObject(CODE_REG, stub);
455 const intptr_t entry_point_offset =
456 entry_kind == Code::EntryKind::kNormal
457 ? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
458 : Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
459 __ lx(RA, compiler::FieldAddress(CODE_REG, entry_point_offset));
460 __ jalr(RA);
461 EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kIcCall, locs,
462 pending_deoptimization_env_);
463 EmitDropArguments(ic_data.SizeWithTypeArgs());
464}
465
467 const String& name,
468 const Array& arguments_descriptor,
469 intptr_t deopt_id,
470 const InstructionSource& source,
471 LocationSummary* locs) {
472 ASSERT(CanCallDart());
473 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
474 ASSERT(!FLAG_precompiled_mode);
475 const ArgumentsDescriptor args_desc(arguments_descriptor);
476 const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
477 zone(),
478 MegamorphicCacheTable::Lookup(thread(), name, arguments_descriptor));
479
480 __ Comment("MegamorphicCall");
481 // Load receiver into A0.
482 __ LoadFromOffset(A0, SP,
483 (args_desc.Count() - 1) * compiler::target::kWordSize);
484 // Use same code pattern as instance call so it can be parsed by code patcher.
485 __ LoadUniqueObject(IC_DATA_REG, cache);
486 __ LoadUniqueObject(CODE_REG, StubCode::MegamorphicCall());
487 __ Call(compiler::FieldAddress(
488 CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
489
490 RecordSafepoint(locs);
491 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
492 const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
493 if (is_optimizing()) {
494 AddDeoptIndexAtCall(deopt_id_after, pending_deoptimization_env_);
495 } else {
496 // Add deoptimization continuation point after the call and before the
497 // arguments are removed.
498 AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after, source);
499 }
500 RecordCatchEntryMoves(pending_deoptimization_env_);
501 EmitDropArguments(args_desc.SizeWithTypeArgs());
502}
503
504void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
505 intptr_t deopt_id,
506 const InstructionSource& source,
507 LocationSummary* locs,
508 Code::EntryKind entry_kind,
509 bool receiver_can_be_smi) {
510 ASSERT(CanCallDart());
511 ASSERT(ic_data.NumArgsTested() == 1);
512 const Code& initial_stub = StubCode::SwitchableCallMiss();
513 const char* switchable_call_mode = "smiable";
514 if (!receiver_can_be_smi) {
515 switchable_call_mode = "non-smi";
516 ic_data.set_receiver_cannot_be_smi(true);
517 }
518 const UnlinkedCall& data =
519 UnlinkedCall::ZoneHandle(zone(), ic_data.AsUnlinkedCall());
520
521 __ Comment("InstanceCallAOT (%s)", switchable_call_mode);
522 // Clear argument descriptor to keep gc happy when it gets pushed on to
523 // the stack.
524 __ LoadImmediate(ARGS_DESC_REG, 0);
525 __ LoadFromOffset(A0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
526 // The AOT runtime will replace the slot in the object pool with the
527 // entrypoint address - see app_snapshot.cc.
528 const auto snapshot_behavior =
530 __ LoadUniqueObject(RA, initial_stub, snapshot_behavior);
531 __ LoadUniqueObject(IC_DATA_REG, data);
532 __ jalr(RA);
533
534 EmitCallsiteMetadata(source, DeoptId::kNone, UntaggedPcDescriptors::kOther,
535 locs, pending_deoptimization_env_);
536 EmitDropArguments(ic_data.SizeWithTypeArgs());
537}
538
539void FlowGraphCompiler::EmitUnoptimizedStaticCall(
540 intptr_t size_with_type_args,
541 intptr_t deopt_id,
542 const InstructionSource& source,
543 LocationSummary* locs,
544 const ICData& ic_data,
545 Code::EntryKind entry_kind) {
546 ASSERT(CanCallDart());
547 const Code& stub =
548 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
549 __ LoadObject(IC_DATA_REG, ic_data);
550 GenerateDartCall(deopt_id, source, stub,
551 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
552 EmitDropArguments(size_with_type_args);
553}
554
556 const Function& function,
557 const Array& arguments_descriptor,
558 intptr_t size_with_type_args,
559 intptr_t deopt_id,
560 const InstructionSource& source,
561 LocationSummary* locs,
562 Code::EntryKind entry_kind) {
563 ASSERT(CanCallDart());
566 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
567 } else {
568 if (!FLAG_precompiled_mode) {
569 __ LoadImmediate(ARGS_DESC_REG, 0); // GC safe smi zero because of stub.
570 }
571 }
572 // Do not use the code from the function, but let the code be patched so that
573 // we can record the outgoing edges to other code.
574 GenerateStaticDartCall(deopt_id, source, UntaggedPcDescriptors::kOther, locs,
575 function, entry_kind);
576 EmitDropArguments(size_with_type_args);
577}
578
580 int32_t selector_offset,
581 const Array& arguments_descriptor) {
582 const auto cid_reg = DispatchTableNullErrorABI::kClassIdReg;
583 ASSERT(CanCallDart());
584 ASSERT(cid_reg != ARGS_DESC_REG);
585 if (!arguments_descriptor.IsNull()) {
586 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
587 }
588 const uintptr_t offset = selector_offset - DispatchTable::kOriginElement;
589 // Would like cid_reg to be available on entry to the target function
590 // for checking purposes.
591 ASSERT(cid_reg != TMP);
592 __ AddShifted(TMP, DISPATCH_TABLE_REG, cid_reg,
594 __ LoadFromOffset(TMP, TMP, offset << compiler::target::kWordSizeLog2);
595 __ jalr(TMP);
596}
597
599 Register reg,
600 const Object& obj,
601 bool needs_number_check,
602 const InstructionSource& source,
603 intptr_t deopt_id) {
604 if (needs_number_check) {
605 ASSERT(!obj.IsMint() && !obj.IsDouble());
606 __ LoadObject(TMP, obj);
607 __ PushRegisterPair(TMP, reg);
608 if (is_optimizing()) {
609 // No breakpoints in optimized code.
610 __ JumpAndLink(StubCode::OptimizedIdenticalWithNumberCheck());
611 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, deopt_id, source);
612 } else {
613 // Patchable to support breakpoints.
614 __ JumpAndLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
615 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id,
616 source);
617 }
618 __ PopRegisterPair(ZR, reg);
619 // RISC-V has no condition flags, so the result is instead returned as
620 // TMP zero if equal, non-zero if non-equal.
621 ASSERT(reg != TMP);
622 __ CompareImmediate(TMP, 0);
623 } else {
624 __ CompareObject(reg, obj);
625 }
626 return EQ;
627}
628
630 Register left,
631 Register right,
632 bool needs_number_check,
633 const InstructionSource& source,
634 intptr_t deopt_id) {
635 if (needs_number_check) {
636 __ PushRegisterPair(right, left);
637 if (is_optimizing()) {
638 __ JumpAndLink(StubCode::OptimizedIdenticalWithNumberCheck());
639 } else {
640 __ JumpAndLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
641 }
642 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
643 __ PopRegisterPair(right, left);
644 // RISC-V has no condition flags, so the result is instead returned as
645 // TMP zero if equal, non-zero if non-equal.
646 ASSERT(left != TMP);
647 ASSERT(right != TMP);
648 __ CompareImmediate(TMP, 0);
649 } else {
650 __ CompareObjectRegisters(left, right);
651 }
652 return EQ;
653}
654
656 BranchLabels labels,
657 bool invert) {
658 __ Comment("BoolTest");
660 return invert ? NE : EQ;
661}
662
663// This function must be in sync with FlowGraphCompiler::RecordSafepoint and
664// FlowGraphCompiler::SlowPathEnvironmentFor.
665void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
666#if defined(DEBUG)
667 locs->CheckWritableInputs();
668 ClobberDeadTempRegisters(locs);
669#endif
670 // TODO(vegorov): consider saving only caller save (volatile) registers.
671 __ PushRegisters(*locs->live_registers());
672}
673
674void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
675 __ PopRegisters(*locs->live_registers());
676}
677
678#if defined(DEBUG)
679void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
680 // Clobber temporaries that have not been manually preserved.
681 for (intptr_t i = 0; i < locs->temp_count(); ++i) {
682 Location tmp = locs->temp(i);
683 // TODO(zerny): clobber non-live temporary FPU registers.
684 if (tmp.IsRegister() &&
685 !locs->live_registers()->ContainsRegister(tmp.reg())) {
686 __ li(tmp.reg(), 0xf7);
687 }
688 }
689}
690#endif
691
692Register FlowGraphCompiler::EmitTestCidRegister() {
693 return A1;
694}
695
696void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
697 intptr_t count_without_type_args,
698 const Array& arguments_descriptor) {
699 __ Comment("EmitTestAndCall");
700 // Load receiver into A0.
701 __ LoadFromOffset(A0, SP, (count_without_type_args - 1) * kWordSize);
702 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
703}
704
705void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
706 bool if_smi) {
707 if (if_smi) {
708 __ BranchIfSmi(A0, label);
709 } else {
710 __ BranchIfNotSmi(A0, label);
711 }
712}
713
714void FlowGraphCompiler::EmitTestAndCallLoadCid(Register class_id_reg) {
715 ASSERT(class_id_reg != A0);
716 __ LoadClassId(class_id_reg, A0);
717}
718
719Location FlowGraphCompiler::RebaseIfImprovesAddressing(Location loc) const {
720 if (loc.IsStackSlot() && (loc.base_reg() == FP)) {
721 intptr_t fp_sp_dist =
723 __ CheckFpSpDist(fp_sp_dist * compiler::target::kWordSize);
724 return Location::StackSlot(loc.stack_index() - fp_sp_dist, SP);
725 }
726 if (loc.IsDoubleStackSlot() && (loc.base_reg() == FP)) {
727 intptr_t fp_sp_dist =
729 __ CheckFpSpDist(fp_sp_dist * compiler::target::kWordSize);
730 return Location::DoubleStackSlot(loc.stack_index() - fp_sp_dist, SP);
731 }
732 return loc;
733}
734
737 TemporaryRegisterAllocator* allocator) {
738 if (destination.Equals(source)) return;
739
740 if (source.IsRegister()) {
741 if (destination.IsRegister()) {
742 __ mv(destination.reg(), source.reg());
743 } else {
744 ASSERT(destination.IsStackSlot());
745 const intptr_t dest_offset = destination.ToStackSlotOffset();
746 __ StoreToOffset(source.reg(), destination.base_reg(), dest_offset);
747 }
748 } else if (source.IsStackSlot()) {
749 if (destination.IsRegister()) {
750 const intptr_t source_offset = source.ToStackSlotOffset();
751 __ LoadFromOffset(destination.reg(), source.base_reg(), source_offset);
752 } else if (destination.IsFpuRegister()) {
753 const intptr_t src_offset = source.ToStackSlotOffset();
754 FRegister dst = destination.fpu_reg();
755 __ LoadDFromOffset(dst, source.base_reg(), src_offset);
756 } else {
757 ASSERT(destination.IsStackSlot());
758 const intptr_t source_offset = source.ToStackSlotOffset();
759 const intptr_t dest_offset = destination.ToStackSlotOffset();
760 __ LoadFromOffset(TMP, source.base_reg(), source_offset);
761 __ StoreToOffset(TMP, destination.base_reg(), dest_offset);
762 }
763 } else if (source.IsFpuRegister()) {
764 if (destination.IsFpuRegister()) {
765 __ fmvd(destination.fpu_reg(), source.fpu_reg());
766 } else {
767 if (destination.IsStackSlot() /*32-bit float*/ ||
768 destination.IsDoubleStackSlot()) {
769 const intptr_t dest_offset = destination.ToStackSlotOffset();
770 FRegister src = source.fpu_reg();
771 __ StoreDToOffset(src, destination.base_reg(), dest_offset);
772 } else {
773 ASSERT(destination.IsQuadStackSlot());
775 }
776 }
777 } else if (source.IsDoubleStackSlot()) {
778 if (destination.IsFpuRegister()) {
779 const intptr_t source_offset = source.ToStackSlotOffset();
780 const FRegister dst = destination.fpu_reg();
781 __ LoadDFromOffset(dst, source.base_reg(), source_offset);
782 } else {
783 ASSERT(destination.IsDoubleStackSlot() ||
784 destination.IsStackSlot() /*32-bit float*/);
785 const intptr_t source_offset = source.ToStackSlotOffset();
786 const intptr_t dest_offset = destination.ToStackSlotOffset();
787 __ LoadDFromOffset(FTMP, source.base_reg(), source_offset);
788 __ StoreDToOffset(FTMP, destination.base_reg(), dest_offset);
789 }
790 } else if (source.IsQuadStackSlot()) {
792 } else if (source.IsPairLocation()) {
793#if XLEN == 32
794 ASSERT(destination.IsPairLocation());
795 for (intptr_t i : {0, 1}) {
796 EmitMove(destination.Component(i), source.Component(i), allocator);
797 }
798#else
799 UNREACHABLE();
800#endif
801 } else {
802 ASSERT(source.IsConstant());
803 source.constant_instruction()->EmitMoveToLocation(this, destination, TMP,
804 source.pair_index());
805 }
806}
807
808static compiler::OperandSize BytesToOperandSize(intptr_t bytes) {
809 switch (bytes) {
810 case 8:
812 case 4:
814 case 2:
816 case 1:
818 default:
820 }
821}
822
823void FlowGraphCompiler::EmitNativeMoveArchitecture(
824 const compiler::ffi::NativeLocation& destination,
825 const compiler::ffi::NativeLocation& source) {
826 const auto& src_type = source.payload_type();
827 const auto& dst_type = destination.payload_type();
828
829 ASSERT(src_type.IsSigned() == dst_type.IsSigned());
830 ASSERT(src_type.IsPrimitive());
831 ASSERT(dst_type.IsPrimitive());
832 const intptr_t src_size = src_type.SizeInBytes();
833 const intptr_t dst_size = dst_type.SizeInBytes();
834 const bool sign_or_zero_extend = dst_size > src_size;
835
836 if (source.IsRegisters()) {
837 const auto& src = source.AsRegisters();
838 ASSERT(src.num_regs() == 1);
839 const auto src_reg = src.reg_at(0);
840
841 if (destination.IsRegisters()) {
842 const auto& dst = destination.AsRegisters();
843 ASSERT(dst.num_regs() == 1);
844 const auto dst_reg = dst.reg_at(0);
845 ASSERT(destination.container_type().SizeInBytes() <=
847 if (!sign_or_zero_extend) {
848#if XLEN == 32
849 __ MoveRegister(dst_reg, src_reg);
850#else
851 if (src_size <= 4) {
852 // Signed-extended to XLEN, even unsigned types.
853 __ addiw(dst_reg, src_reg, 0);
854 } else {
855 __ MoveRegister(dst_reg, src_reg);
856 }
857#endif
858 } else {
859 switch (src_type.AsPrimitive().representation()) {
860 // Calling convention: scalars are extended according to the sign of
861 // their type to 32-bits, then sign-extended to XLEN bits.
863 __ slli(dst_reg, src_reg, XLEN - 8);
864 __ srai(dst_reg, dst_reg, XLEN - 8);
865 return;
867 __ slli(dst_reg, src_reg, XLEN - 16);
868 __ srai(dst_reg, dst_reg, XLEN - 16);
869 return;
871 __ andi(dst_reg, src_reg, 0xFF);
872 return;
874 __ slli(dst_reg, src_reg, 16);
875#if XLEN == 32
876 __ srli(dst_reg, dst_reg, 16);
877#else
878 __ srliw(dst_reg, dst_reg, 16);
879#endif
880 return;
881#if XLEN >= 64
884 // Note even uint32 is sign-extended to XLEN.
885 __ addiw(dst_reg, src_reg, 0);
886 return;
887#endif
889#if XLEN >= 64
893#endif
894 __ slli(dst_reg, src_reg, XLEN - src_size * kBitsPerByte);
895 __ srai(dst_reg, dst_reg, XLEN - src_size * kBitsPerByte);
896 return;
898#if XLEN >= 64
902#endif
903 __ slli(dst_reg, src_reg, XLEN - src_size * kBitsPerByte);
904 __ srli(dst_reg, dst_reg, XLEN - src_size * kBitsPerByte);
905 return;
906 default:
907 UNREACHABLE();
908 }
909 }
910
911 } else if (destination.IsFpuRegisters()) {
912 const auto& dst = destination.AsFpuRegisters();
913 ASSERT(src_size == dst_size);
914 ASSERT(src.num_regs() == 1);
915 switch (src_size) {
916 case 4:
917 __ fmvwx(dst.fpu_reg(), src.reg_at(0));
918 return;
919 case 8:
920#if XLEN == 32
922#else
923 __ fmvdx(dst.fpu_reg(), src.reg_at(0));
924#endif
925 return;
926 default:
927 UNREACHABLE();
928 }
929
930 } else {
931 ASSERT(destination.IsStack());
932 const auto& dst = destination.AsStack();
933 ASSERT(!sign_or_zero_extend);
934 auto const op_size =
935 BytesToOperandSize(destination.container_type().SizeInBytes());
936 __ StoreToOffset(src.reg_at(0), dst.base_register(),
937 dst.offset_in_bytes(), op_size);
938 }
939 } else if (source.IsFpuRegisters()) {
940 const auto& src = source.AsFpuRegisters();
941 // We have not implemented conversions here, use IL convert instructions.
942 ASSERT(src_type.Equals(dst_type));
943
944 if (destination.IsRegisters()) {
945 const auto& dst = destination.AsRegisters();
946 ASSERT(src_size == dst_size);
947 ASSERT(dst.num_regs() == 1);
948 switch (src_size) {
949 case 4:
950 __ fmvxw(dst.reg_at(0), src.fpu_reg());
951 return;
952 case 8:
953#if XLEN == 32
955#else
956 __ fmvxd(dst.reg_at(0), src.fpu_reg());
957#endif
958 return;
959 default:
960 UNREACHABLE();
961 }
962
963 } else if (destination.IsFpuRegisters()) {
964 const auto& dst = destination.AsFpuRegisters();
965 __ fmvd(dst.fpu_reg(), src.fpu_reg());
966
967 } else {
968 ASSERT(destination.IsStack());
969 ASSERT(src_type.IsFloat());
970 const auto& dst = destination.AsStack();
971 switch (dst_size) {
972 case 8:
973 __ StoreDToOffset(src.fpu_reg(), dst.base_register(),
974 dst.offset_in_bytes());
975 return;
976 case 4:
977 __ StoreSToOffset(src.fpu_reg(), dst.base_register(),
978 dst.offset_in_bytes());
979 return;
980 default:
981 UNREACHABLE();
982 }
983 }
984
985 } else {
986 ASSERT(source.IsStack());
987 const auto& src = source.AsStack();
988 if (destination.IsRegisters()) {
989 const auto& dst = destination.AsRegisters();
990 ASSERT(dst.num_regs() == 1);
991 const auto dst_reg = dst.reg_at(0);
992 EmitNativeLoad(dst_reg, src.base_register(), src.offset_in_bytes(),
993 src_type.AsPrimitive().representation());
994 } else if (destination.IsFpuRegisters()) {
995 ASSERT(src_type.Equals(dst_type));
996 ASSERT(src_type.IsFloat());
997 const auto& dst = destination.AsFpuRegisters();
998 switch (src_size) {
999 case 8:
1000 __ LoadDFromOffset(dst.fpu_reg(), src.base_register(),
1001 src.offset_in_bytes());
1002 return;
1003 case 4:
1004 __ LoadSFromOffset(dst.fpu_reg(), src.base_register(),
1005 src.offset_in_bytes());
1006 return;
1007 default:
1008 UNIMPLEMENTED();
1009 }
1010 } else {
1011 ASSERT(destination.IsStack());
1012 UNREACHABLE();
1013 }
1014 }
1015}
1016
1017void FlowGraphCompiler::EmitNativeLoad(Register dst,
1018 Register base,
1019 intptr_t offset,
1021 switch (type) {
1023 __ lb(dst, compiler::Address(base, offset));
1024 return;
1026 __ lbu(dst, compiler::Address(base, offset));
1027 return;
1029 __ lh(dst, compiler::Address(base, offset));
1030 return;
1032 __ lhu(dst, compiler::Address(base, offset));
1033 return;
1035 __ lw(dst, compiler::Address(base, offset));
1036 return;
1039#if XLEN == 32
1040 __ lw(dst, compiler::Address(base, offset));
1041#else
1042 __ lwu(dst, compiler::Address(base, offset));
1043#endif
1044 return;
1045#if XLEN >= 64
1049 __ ld(dst, compiler::Address(base, offset));
1050 return;
1051#endif
1052 default:
1053 break;
1054 }
1055
1056 Register tmp = kNoRegister;
1057 if (dst != T1 && base != T1) tmp = T1;
1058 if (dst != T2 && base != T2) tmp = T2;
1059 if (dst != T3 && base != T3) tmp = T3;
1060 ASSERT(tmp != kNoRegister);
1062 __ PushRegister(tmp);
1063
1064 switch (type) {
1066 __ lhu(dst, compiler::Address(base, offset));
1067 __ lb(tmp, compiler::Address(base, offset + 2));
1068 __ slli(tmp, tmp, 16);
1069 __ or_(dst, dst, tmp);
1070 break;
1072 __ lhu(dst, compiler::Address(base, offset));
1073 __ lbu(tmp, compiler::Address(base, offset + 2));
1074 __ slli(tmp, tmp, 16);
1075 __ or_(dst, dst, tmp);
1076 break;
1077#if XLEN >= 64
1079 __ lwu(dst, compiler::Address(base, offset));
1080 __ lb(tmp, compiler::Address(base, offset + 4));
1081 __ slli(tmp, tmp, 32);
1082 __ or_(dst, dst, tmp);
1083 break;
1085 __ lwu(dst, compiler::Address(base, offset));
1086 __ lbu(tmp, compiler::Address(base, offset + 4));
1087 __ slli(tmp, tmp, 32);
1088 __ or_(dst, dst, tmp);
1089 break;
1091 __ lwu(dst, compiler::Address(base, offset));
1092 __ lh(tmp, compiler::Address(base, offset + 4));
1093 __ slli(tmp, tmp, 32);
1094 __ or_(dst, dst, tmp);
1095 break;
1097 __ lwu(dst, compiler::Address(base, offset));
1098 __ lhu(tmp, compiler::Address(base, offset + 4));
1099 __ slli(tmp, tmp, 32);
1100 __ or_(dst, dst, tmp);
1101 break;
1103 __ lwu(dst, compiler::Address(base, offset));
1104 __ lhu(tmp, compiler::Address(base, offset + 4));
1105 __ slli(tmp, tmp, 32);
1106 __ or_(dst, dst, tmp);
1107 __ lb(tmp, compiler::Address(base, offset + 6));
1108 __ slli(tmp, tmp, 48);
1109 __ or_(dst, dst, tmp);
1110 break;
1112 __ lwu(dst, compiler::Address(base, offset));
1113 __ lhu(tmp, compiler::Address(base, offset + 4));
1114 __ slli(tmp, tmp, 32);
1115 __ or_(dst, dst, tmp);
1116 __ lbu(tmp, compiler::Address(base, offset + 6));
1117 __ slli(tmp, tmp, 48);
1118 __ or_(dst, dst, tmp);
1119 break;
1120#endif
1121 default:
1122 UNREACHABLE();
1123 }
1124
1125 __ PopRegister(tmp);
1126}
1127
1129 Register dst,
1130 Register tmp) {
1131 compiler::Label skip_reloc;
1132 __ j(&skip_reloc, compiler::Assembler::kNearJump);
1133 InsertBSSRelocation(relocation);
1134 __ Bind(&skip_reloc);
1135
1136 __ auipc(tmp, 0);
1137 __ addi(tmp, tmp, -compiler::target::kWordSize);
1138
1139 // tmp holds the address of the relocation.
1140 __ lx(dst, compiler::Address(tmp));
1141
1142 // dst holds the relocation itself: tmp - bss_start.
1143 // tmp = tmp + (bss_start - tmp) = bss_start
1144 __ add(tmp, tmp, dst);
1145
1146 // tmp holds the start of the BSS section.
1147 // Load the "get-thread" routine: *bss_start.
1148 __ lx(dst, compiler::Address(tmp));
1149}
1150
1151#undef __
1152#define __ compiler_->assembler()->
1153
1154void ParallelMoveEmitter::EmitSwap(const MoveOperands& move) {
1155 const Location source = move.src();
1156 const Location destination = move.dest();
1157
1158 if (source.IsRegister() && destination.IsRegister()) {
1159 ASSERT(source.reg() != TMP);
1160 ASSERT(destination.reg() != TMP);
1161 __ mv(TMP, source.reg());
1162 __ mv(source.reg(), destination.reg());
1163 __ mv(destination.reg(), TMP);
1164 } else if (source.IsRegister() && destination.IsStackSlot()) {
1165 Exchange(source.reg(), destination.base_reg(),
1166 destination.ToStackSlotOffset());
1167 } else if (source.IsStackSlot() && destination.IsRegister()) {
1168 Exchange(destination.reg(), source.base_reg(), source.ToStackSlotOffset());
1169 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1170 Exchange(source.base_reg(), source.ToStackSlotOffset(),
1171 destination.base_reg(), destination.ToStackSlotOffset());
1172 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1173 const FRegister dst = destination.fpu_reg();
1174 const FRegister src = source.fpu_reg();
1175 __ fmvd(FTMP, src);
1176 __ fmvd(src, dst);
1177 __ fmvd(dst, FTMP);
1178 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1179 UNIMPLEMENTED();
1180 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1181 const intptr_t source_offset = source.ToStackSlotOffset();
1182 const intptr_t dest_offset = destination.ToStackSlotOffset();
1183
1184 ScratchFpuRegisterScope ensure_scratch(this, kNoFpuRegister);
1185 FRegister scratch = ensure_scratch.reg();
1186 __ LoadDFromOffset(FTMP, source.base_reg(), source_offset);
1187 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset);
1188 __ StoreDToOffset(FTMP, destination.base_reg(), dest_offset);
1189 __ StoreDToOffset(scratch, source.base_reg(), source_offset);
1190 } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1191 UNIMPLEMENTED();
1192 } else {
1193 UNREACHABLE();
1194 }
1195}
1196
1197void ParallelMoveEmitter::MoveMemoryToMemory(const compiler::Address& dst,
1198 const compiler::Address& src) {
1199 UNREACHABLE();
1200}
1201
1202// Do not call or implement this function. Instead, use the form below that
1203// uses an offset from the frame pointer instead of an Address.
1204void ParallelMoveEmitter::Exchange(Register reg, const compiler::Address& mem) {
1205 UNREACHABLE();
1206}
1207
1208// Do not call or implement this function. Instead, use the form below that
1209// uses offsets from the frame pointer instead of Addresses.
1210void ParallelMoveEmitter::Exchange(const compiler::Address& mem1,
1211 const compiler::Address& mem2) {
1212 UNREACHABLE();
1213}
1214
1215void ParallelMoveEmitter::Exchange(Register reg,
1216 Register base_reg,
1217 intptr_t stack_offset) {
1218 __ mv(TMP, reg);
1219 __ LoadFromOffset(reg, base_reg, stack_offset);
1220 __ StoreToOffset(TMP, base_reg, stack_offset);
1221}
1222
1223void ParallelMoveEmitter::Exchange(Register base_reg1,
1224 intptr_t stack_offset1,
1225 Register base_reg2,
1226 intptr_t stack_offset2) {
1227 ScratchRegisterScope tmp1(this, kNoRegister);
1228 ScratchRegisterScope tmp2(this, tmp1.reg());
1229 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1);
1230 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2);
1231 __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2);
1232 __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1);
1233}
1234
1235void ParallelMoveEmitter::SpillScratch(Register reg) {
1236 __ PushRegister(reg);
1237}
1238
1239void ParallelMoveEmitter::RestoreScratch(Register reg) {
1240 __ PopRegister(reg);
1241}
1242
1243void ParallelMoveEmitter::SpillFpuScratch(FpuRegister reg) {
1244 __ subi(SP, SP, sizeof(double));
1245 __ fsd(reg, compiler::Address(SP, 0));
1246}
1247
1248void ParallelMoveEmitter::RestoreFpuScratch(FpuRegister reg) {
1249 __ fld(reg, compiler::Address(SP, 0));
1250 __ addi(SP, SP, sizeof(double));
1251}
1252
1253#undef __
1254
1255} // namespace dart
1256
1257#endif // defined(TARGET_ARCH_RISCV)
#define __
#define UNREACHABLE()
Definition: assert.h:248
GLenum type
static intptr_t element_offset(intptr_t index)
Definition: object.h:10838
intptr_t length() const
static intptr_t owner_offset()
Definition: object.h:7149
CodeEntryKind EntryKind
Definition: object.h:6788
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
Definition: object.h:6793
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
Definition: deopt_id.h:27
static intptr_t ToDeoptAfter(intptr_t deopt_id)
Definition: deopt_id.h:31
static constexpr intptr_t kOriginElement
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitTailCallToStub(const Code &stub)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void InsertBSSRelocation(BSS::Relocation reloc)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void GeneratePatchableCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
compiler::Assembler * assembler() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
Definition: object.cc:11437
bool IsClosureFunction() const
Definition: object.h:3891
bool IsOptimizable() const
Definition: object.cc:8930
static Location StackSlot(intptr_t stack_index, Register base)
Definition: locations.h:447
static Location DoubleStackSlot(intptr_t stack_index, Register base)
Definition: locations.h:458
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
Definition: object.h:5554
static ObjectPtr null()
Definition: object.h:433
bool IsNull() const
Definition: object.h:363
static Object & Handle()
Definition: object.h:407
static Object & ZoneHandle()
Definition: object.h:419
const Function & function() const
Definition: parser.h:73
int num_stack_locals() const
Definition: parser.h:194
static intptr_t RawValue(intptr_t value)
Definition: object.h:10022
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
Definition: stub_code.cc:316
void set_constant_pool_allowed(bool b)
bool constant_pool_allowed() const
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
#define UNIMPLEMENTED
#define ASSERT(E)
SkBitmap source
Definition: examples.cpp:28
gboolean invert
uint8_t value
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
FrameLayout frame_layout
Definition: stack_frame.cc:76
Definition: dart_vm.cc:33
const FpuRegister kNoFpuRegister
const Register THR
const char *const name
const Register NULL_REG
constexpr intptr_t kBitsPerByte
Definition: globals.h:463
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
const Register CODE_REG
@ kInvalidCondition
const Register ARGS_DESC_REG
const Register DISPATCH_TABLE_REG
@ kNoRegister
Definition: constants_arm.h:99
const FRegister FTMP
const Register TMP
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const Register IC_DATA_REG
constexpr intptr_t kWordSize
Definition: globals.h:509
QRegister FpuRegister
static int8_t data[kExtLength]
DECLARE_FLAG(bool, show_invisible_frames)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
Definition: switches.h:191
dst
Definition: cp.py:12
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
SeparatedVector2 offset
static constexpr Register kClassIdReg
intptr_t first_local_from_fp
Definition: frame_layout.h:37
intptr_t FrameSlotForVariable(const LocalVariable *variable) const
Definition: stack_frame.cc:83
intptr_t dart_fixed_frame_size
Definition: frame_layout.h:40
intptr_t FrameSlotForVariableIndex(intptr_t index) const
Definition: stack_frame.cc:89
static constexpr intptr_t kBoolValueMask
static constexpr Register kScratchReg
static constexpr Register kSubtypeTestCacheReg