Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
flow_graph_compiler_riscv.cc
Go to the documentation of this file.
1// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_RISCV.
6#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
7
9
15#include "vm/cpu.h"
16#include "vm/dart_entry.h"
18#include "vm/dispatch_table.h"
19#include "vm/instructions.h"
20#include "vm/object_store.h"
21#include "vm/parser.h"
22#include "vm/stack_frame.h"
23#include "vm/stub_code.h"
24#include "vm/symbols.h"
25
26namespace dart {
27
28DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
29DECLARE_FLAG(bool, enable_simd_inline);
30
32 // Note: Unlike the other architectures, we are not using PC-relative calls
33 // in AOT to call the write barrier stubs. We are making use of TMP as an
34 // alternate link register to avoid spilling RA inline and don't want to
35 // introduce another relocation type.
36}
37
39 // BlockInfos are zone-allocated, so their destructors are not called.
40 // Verify the labels explicitly here.
41 for (int i = 0; i < block_info_.length(); ++i) {
42 ASSERT(!block_info_[i]->jump_label()->IsLinked());
43 }
44}
45
47 return true;
48}
49
51 // TODO(riscv): Dynamically test for the vector extension and otherwise
52 // allocate SIMD values to register-pairs or quads?
53 return false;
54}
55
57#if XLEN == 32
58 return false;
59#else
60 return true;
61#endif
62}
63
66 intrinsic_mode_ = true;
67 ASSERT(!assembler()->constant_pool_allowed());
68}
69
72 intrinsic_mode_ = false;
73}
74
75TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
76 DeoptInfoBuilder* builder,
77 const Array& deopt_table) {
78 if (deopt_env_ == nullptr) {
79 ++builder->current_info_number_;
80 return TypedData::null();
81 }
82
83 AllocateOutgoingArguments(deopt_env_);
84
85 intptr_t slot_ix = 0;
86 Environment* current = deopt_env_;
87
88 // Emit all kMaterializeObject instructions describing objects to be
89 // materialized on the deoptimization as a prefix to the deoptimization info.
90 EmitMaterializations(deopt_env_, builder);
91
92 // The real frame starts here.
93 builder->MarkFrameStart();
94
95 Zone* zone = compiler->zone();
96
97 builder->AddPp(current->function(), slot_ix++);
98 builder->AddPcMarker(Function::ZoneHandle(zone), slot_ix++);
99 builder->AddCallerFp(slot_ix++);
100 builder->AddReturnAddress(current->function(), deopt_id(), slot_ix++);
101
102 // Emit all values that are needed for materialization as a part of the
103 // expression stack for the bottom-most frame. This guarantees that GC
104 // will be able to find them during materialization.
105 slot_ix = builder->EmitMaterializationArguments(slot_ix);
106
107 // For the innermost environment, set outgoing arguments and the locals.
108 for (intptr_t i = current->Length() - 1;
109 i >= current->fixed_parameter_count(); i--) {
110 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
111 }
112
113 Environment* previous = current;
114 current = current->outer();
115 while (current != nullptr) {
116 builder->AddPp(current->function(), slot_ix++);
117 builder->AddPcMarker(previous->function(), slot_ix++);
118 builder->AddCallerFp(slot_ix++);
119
120 // For any outer environment the deopt id is that of the call instruction
121 // which is recorded in the outer environment.
122 builder->AddReturnAddress(current->function(),
123 DeoptId::ToDeoptAfter(current->GetDeoptId()),
124 slot_ix++);
125
126 // The values of outgoing arguments can be changed from the inlined call so
127 // we must read them from the previous environment.
128 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
129 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i),
130 slot_ix++);
131 }
132
133 // Set the locals, note that outgoing arguments are not in the environment.
134 for (intptr_t i = current->Length() - 1;
135 i >= current->fixed_parameter_count(); i--) {
136 builder->AddCopy(current->ValueAt(i), current->LocationAt(i), slot_ix++);
137 }
138
139 // Iterate on the outer environment.
140 previous = current;
141 current = current->outer();
142 }
143 // The previous pointer is now the outermost environment.
144 ASSERT(previous != nullptr);
145
146 // Add slots for the outermost environment.
147 builder->AddCallerPp(slot_ix++);
148 builder->AddPcMarker(previous->function(), slot_ix++);
149 builder->AddCallerFp(slot_ix++);
150 builder->AddCallerPc(slot_ix++);
151
152 // For the outermost environment, set the incoming arguments.
153 for (intptr_t i = previous->fixed_parameter_count() - 1; i >= 0; i--) {
154 builder->AddCopy(previous->ValueAt(i), previous->LocationAt(i), slot_ix++);
155 }
156
157 return builder->CreateDeoptInfo(deopt_table);
158}
159
161 intptr_t stub_ix) {
162 // Calls do not need stubs, they share a deoptimization trampoline.
163 ASSERT(reason() != ICData::kDeoptAtCall);
164 compiler::Assembler* assembler = compiler->assembler();
165#define __ assembler->
166 __ Comment("%s", Name());
167 __ Bind(entry_label());
168 if (FLAG_trap_on_deoptimization) {
169 __ trap();
170 }
171
172 ASSERT(deopt_env() != nullptr);
173 __ Call(compiler::Address(THR, Thread::deoptimize_entry_offset()));
174 set_pc_offset(assembler->CodeSize());
175#undef __
176}
177
178#define __ assembler->
179// Static methods of FlowGraphCompiler that take an assembler.
180
181void FlowGraphCompiler::GenerateIndirectTTSCall(compiler::Assembler* assembler,
182 Register reg_to_call,
183 intptr_t sub_type_cache_index) {
184 __ LoadField(
186 compiler::FieldAddress(
187 reg_to_call,
188 compiler::target::AbstractType::type_test_stub_entry_point_offset()));
189 __ LoadWordFromPoolIndex(TypeTestABI::kSubtypeTestCacheReg,
190 sub_type_cache_index);
192}
193
194#undef __
195#define __ assembler()->
196// Instance methods of FlowGraphCompiler.
197
198// Fall through if bool_register contains null.
199void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
200 compiler::Label* is_true,
201 compiler::Label* is_false) {
202 compiler::Label fall_through;
203 __ beq(bool_register, NULL_REG, &fall_through,
205 BranchLabels labels = {is_true, is_false, &fall_through};
206 Condition true_condition =
207 EmitBoolTest(bool_register, labels, /*invert=*/false);
208 ASSERT(true_condition != kInvalidCondition);
209 __ BranchIf(true_condition, is_true);
210 __ j(is_false);
211 __ Bind(&fall_through);
212}
213
214void FlowGraphCompiler::EmitFrameEntry() {
215 const Function& function = parsed_function().function();
217 (!is_optimizing() || may_reoptimize())) {
218 __ Comment("Invocation Count Check");
219 const Register function_reg = A0;
220 const Register usage_reg = A1;
221 __ lx(function_reg, compiler::FieldAddress(CODE_REG, Code::owner_offset()));
222
223 __ LoadFieldFromOffset(usage_reg, function_reg,
224 Function::usage_counter_offset(),
226 // Reoptimization of an optimized function is triggered by counting in
227 // IC stubs, but not at the entry of the function.
228 if (!is_optimizing()) {
229 __ addi(usage_reg, usage_reg, 1);
230 __ StoreFieldToOffset(usage_reg, function_reg,
231 Function::usage_counter_offset(),
233 }
234 __ CompareImmediate(usage_reg, GetOptimizationThreshold());
235 compiler::Label dont_optimize;
236 __ BranchIf(LT, &dont_optimize, compiler::Assembler::kNearJump);
237 __ lx(TMP, compiler::Address(THR, Thread::optimize_entry_offset()));
238 __ jr(TMP);
239 __ Bind(&dont_optimize);
240 }
241
242 if (flow_graph().graph_entry()->NeedsFrame()) {
243 __ Comment("Enter frame");
244 if (flow_graph().IsCompiledForOsr()) {
245 const intptr_t extra_slots = ExtraStackSlotsOnOsrEntry();
246 ASSERT(extra_slots >= 0);
247 __ EnterOsrFrame(extra_slots * kWordSize);
248 } else {
249 ASSERT(StackSize() >= 0);
250 __ EnterDartFrame(StackSize() * kWordSize);
251 }
252 } else if (FLAG_precompiled_mode) {
254 }
255}
256
257const InstructionSource& PrologueSource() {
258 static InstructionSource prologue_source(TokenPosition::kDartCodePrologue,
259 /*inlining_id=*/0);
260 return prologue_source;
261}
262
264 BeginCodeSourceRange(PrologueSource());
265
266 EmitFrameEntry();
267 ASSERT(assembler()->constant_pool_allowed());
268
269 // In unoptimized code, initialize (non-argument) stack allocated slots.
270 if (!is_optimizing()) {
271 const int num_locals = parsed_function().num_stack_locals();
272
273 intptr_t args_desc_slot = -1;
274 if (parsed_function().has_arg_desc_var()) {
275 args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
276 parsed_function().arg_desc_var());
277 }
278
279 __ Comment("Initialize spill slots");
280 const intptr_t fp_to_sp_delta =
281 num_locals + compiler::target::frame_layout.dart_fixed_frame_size;
282 for (intptr_t i = 0; i < num_locals; ++i) {
283 const intptr_t slot_index =
284 compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
285 Register value_reg =
286 slot_index == args_desc_slot ? ARGS_DESC_REG : NULL_REG;
287 // SP-relative addresses allow for compressed instructions.
288 __ StoreToOffset(value_reg, SP,
289 (slot_index + fp_to_sp_delta) * kWordSize);
290 }
291 } else if (parsed_function().suspend_state_var() != nullptr &&
292 !flow_graph().IsCompiledForOsr()) {
293 // Initialize synthetic :suspend_state variable early
294 // as it may be accessed by GC and exception handling before
295 // InitSuspendableFunction stub is called.
296 const intptr_t slot_index =
297 compiler::target::frame_layout.FrameSlotForVariable(
298 parsed_function().suspend_state_var());
299 const intptr_t fp_to_sp_delta =
300 StackSize() + compiler::target::frame_layout.dart_fixed_frame_size;
301 __ StoreToOffset(NULL_REG, SP, (slot_index + fp_to_sp_delta) * kWordSize);
302 }
303
304 EndCodeSourceRange(PrologueSource());
305}
306
308 const Code& stub,
309 ObjectPool::SnapshotBehavior snapshot_behavior) {
310 ASSERT(!stub.IsNull());
311 if (CanPcRelativeCall(stub)) {
312 __ GenerateUnRelocatedPcRelativeCall();
313 AddPcRelativeCallStubTarget(stub);
314 } else {
316 CodeEntryKind::kNormal, snapshot_behavior);
317 AddStubCallTarget(stub);
318 }
319}
320
321void FlowGraphCompiler::EmitJumpToStub(const Code& stub) {
322 ASSERT(!stub.IsNull());
323 if (CanPcRelativeCall(stub)) {
324 __ GenerateUnRelocatedPcRelativeTailCall();
325 AddPcRelativeTailCallStubTarget(stub);
326 } else {
327 __ LoadObject(CODE_REG, stub);
328 __ lx(TMP, compiler::FieldAddress(
329 CODE_REG, compiler::target::Code::entry_point_offset()));
330 __ jr(TMP);
331 AddStubCallTarget(stub);
332 }
333}
334
335void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
336 ASSERT(!stub.IsNull());
337 if (CanPcRelativeCall(stub)) {
338 if (flow_graph().graph_entry()->NeedsFrame()) {
339 __ LeaveDartFrame();
340 }
341 __ GenerateUnRelocatedPcRelativeTailCall();
342 AddPcRelativeTailCallStubTarget(stub);
343#if defined(DEBUG)
344 __ Breakpoint();
345#endif
346 } else {
347 __ LoadObject(CODE_REG, stub);
348 if (flow_graph().graph_entry()->NeedsFrame()) {
349 __ LeaveDartFrame();
350 }
351 __ lx(TMP, compiler::FieldAddress(
352 CODE_REG, compiler::target::Code::entry_point_offset()));
353 __ jr(TMP);
354 AddStubCallTarget(stub);
355 }
356}
357
359 const InstructionSource& source,
360 const Code& stub,
362 LocationSummary* locs,
363 ObjectPool::SnapshotBehavior snapshot_behavior) {
364 __ JumpAndLinkPatchable(stub, CodeEntryKind::kNormal, snapshot_behavior);
366 pending_deoptimization_env_);
367}
368
369void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
370 const InstructionSource& source,
371 const Code& stub,
373 LocationSummary* locs,
374 Code::EntryKind entry_kind) {
375 ASSERT(CanCallDart());
376 __ JumpAndLinkPatchable(stub, entry_kind);
377 EmitCallsiteMetadata(source, deopt_id, kind, locs,
378 pending_deoptimization_env_);
379}
380
381void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
382 const InstructionSource& source,
384 LocationSummary* locs,
385 const Function& target,
386 Code::EntryKind entry_kind) {
387 ASSERT(CanCallDart());
388 if (CanPcRelativeCall(target)) {
389 __ GenerateUnRelocatedPcRelativeCall();
390 AddPcRelativeCallTarget(target, entry_kind);
391 EmitCallsiteMetadata(source, deopt_id, kind, locs,
392 pending_deoptimization_env_);
393 } else {
394 // Call sites to the same target can share object pool entries. These
395 // call sites are never patched for breakpoints: the function is deoptimized
396 // and the unoptimized code with IC calls for static calls is patched
397 // instead.
399 const auto& stub = StubCode::CallStaticFunction();
400 __ JumpAndLinkWithEquivalence(stub, target, entry_kind);
401 EmitCallsiteMetadata(source, deopt_id, kind, locs,
402 pending_deoptimization_env_);
403 AddStaticCallTarget(target, entry_kind);
404 }
405}
406
407void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
408 // We do not check for overflow when incrementing the edge counter. The
409 // function should normally be optimized long before the counter can
410 // overflow; and though we do not reset the counters when we optimize or
411 // deoptimize, there is a bound on the number of
412 // optimization/deoptimization cycles we will attempt.
413 ASSERT(!edge_counters_array_.IsNull());
414 ASSERT(assembler_->constant_pool_allowed());
415 __ Comment("Edge counter");
416 __ LoadObject(A0, edge_counters_array_);
417 __ LoadFieldFromOffset(TMP, A0, Array::element_offset(edge_id));
418 __ addi(TMP, TMP, Smi::RawValue(1));
419 __ StoreFieldToOffset(TMP, A0, Array::element_offset(edge_id));
420}
421
423 const Code& stub,
424 const ICData& ic_data,
425 intptr_t deopt_id,
426 const InstructionSource& source,
427 LocationSummary* locs,
428 Code::EntryKind entry_kind) {
429 ASSERT(CanCallDart());
430 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
431 // Each ICData propagated from unoptimized to optimized code contains the
432 // function that corresponds to the Dart function of that IC call. Due
433 // to inlining in optimized code, that function may not correspond to the
434 // top-level function (parsed_function().function()) which could be
435 // reoptimized and which counter needs to be incremented.
436 // Pass the function explicitly, it is used in IC stub.
437
438 __ LoadObject(A6, parsed_function().function());
439 __ LoadFromOffset(A0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
440 __ LoadUniqueObject(IC_DATA_REG, ic_data);
441 GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
442 entry_kind);
443 EmitDropArguments(ic_data.SizeWithTypeArgs());
444}
445
446void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
447 const ICData& ic_data,
448 intptr_t deopt_id,
449 const InstructionSource& source,
450 LocationSummary* locs,
451 Code::EntryKind entry_kind) {
452 ASSERT(CanCallDart());
453 ASSERT(entry_kind == Code::EntryKind::kNormal ||
454 entry_kind == Code::EntryKind::kUnchecked);
455 ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
456 __ LoadFromOffset(A0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
457 __ LoadUniqueObject(IC_DATA_REG, ic_data);
458 __ LoadUniqueObject(CODE_REG, stub);
459 const intptr_t entry_point_offset =
460 entry_kind == Code::EntryKind::kNormal
461 ? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
462 : Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
463 __ lx(RA, compiler::FieldAddress(CODE_REG, entry_point_offset));
464 __ jalr(RA);
465 EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kIcCall, locs,
466 pending_deoptimization_env_);
467 EmitDropArguments(ic_data.SizeWithTypeArgs());
468}
469
471 const String& name,
472 const Array& arguments_descriptor,
473 intptr_t deopt_id,
474 const InstructionSource& source,
475 LocationSummary* locs) {
476 ASSERT(CanCallDart());
477 ASSERT(!arguments_descriptor.IsNull() && (arguments_descriptor.Length() > 0));
478 ASSERT(!FLAG_precompiled_mode);
479 const ArgumentsDescriptor args_desc(arguments_descriptor);
480 const MegamorphicCache& cache = MegamorphicCache::ZoneHandle(
481 zone(),
482 MegamorphicCacheTable::Lookup(thread(), name, arguments_descriptor));
483
484 __ Comment("MegamorphicCall");
485 // Load receiver into A0.
486 __ LoadFromOffset(A0, SP,
487 (args_desc.Count() - 1) * compiler::target::kWordSize);
488 // Use same code pattern as instance call so it can be parsed by code patcher.
489 __ LoadUniqueObject(IC_DATA_REG, cache);
490 __ LoadUniqueObject(CODE_REG, StubCode::MegamorphicCall());
491 __ Call(compiler::FieldAddress(
492 CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
493
494 RecordSafepoint(locs);
495 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
496 const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
497 if (is_optimizing()) {
498 AddDeoptIndexAtCall(deopt_id_after, pending_deoptimization_env_);
499 } else {
500 // Add deoptimization continuation point after the call and before the
501 // arguments are removed.
502 AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after, source);
503 }
504 RecordCatchEntryMoves(pending_deoptimization_env_);
505 EmitDropArguments(args_desc.SizeWithTypeArgs());
506}
507
508void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
509 intptr_t deopt_id,
510 const InstructionSource& source,
511 LocationSummary* locs,
512 Code::EntryKind entry_kind,
513 bool receiver_can_be_smi) {
514 ASSERT(CanCallDart());
515 ASSERT(ic_data.NumArgsTested() == 1);
516 const Code& initial_stub = StubCode::SwitchableCallMiss();
517 const char* switchable_call_mode = "smiable";
518 if (!receiver_can_be_smi) {
519 switchable_call_mode = "non-smi";
520 ic_data.set_receiver_cannot_be_smi(true);
521 }
522 const UnlinkedCall& data =
523 UnlinkedCall::ZoneHandle(zone(), ic_data.AsUnlinkedCall());
524
525 __ Comment("InstanceCallAOT (%s)", switchable_call_mode);
526 // Clear argument descriptor to keep gc happy when it gets pushed on to
527 // the stack.
528 __ LoadImmediate(ARGS_DESC_REG, 0);
529 __ LoadFromOffset(A0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
530 if (FLAG_precompiled_mode) {
531 // The AOT runtime will replace the slot in the object pool with the
532 // entrypoint address - see app_snapshot.cc.
533 const auto snapshot_behavior =
535 __ LoadUniqueObject(RA, initial_stub, snapshot_behavior);
536 } else {
537 __ LoadUniqueObject(CODE_REG, initial_stub);
538 const intptr_t entry_point_offset =
539 entry_kind == Code::EntryKind::kNormal
540 ? compiler::target::Code::entry_point_offset(
541 Code::EntryKind::kMonomorphic)
542 : compiler::target::Code::entry_point_offset(
543 Code::EntryKind::kMonomorphicUnchecked);
544 __ lx(RA, compiler::FieldAddress(CODE_REG, entry_point_offset));
545 }
546 __ LoadUniqueObject(IC_DATA_REG, data);
547 __ jalr(RA);
548
549 EmitCallsiteMetadata(source, DeoptId::kNone, UntaggedPcDescriptors::kOther,
550 locs, pending_deoptimization_env_);
551 EmitDropArguments(ic_data.SizeWithTypeArgs());
552}
553
554void FlowGraphCompiler::EmitUnoptimizedStaticCall(
555 intptr_t size_with_type_args,
556 intptr_t deopt_id,
557 const InstructionSource& source,
558 LocationSummary* locs,
559 const ICData& ic_data,
560 Code::EntryKind entry_kind) {
561 ASSERT(CanCallDart());
562 const Code& stub =
563 StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
564 __ LoadObject(IC_DATA_REG, ic_data);
565 GenerateDartCall(deopt_id, source, stub,
566 UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
567 EmitDropArguments(size_with_type_args);
568}
569
571 const Function& function,
572 const Array& arguments_descriptor,
573 intptr_t size_with_type_args,
574 intptr_t deopt_id,
575 const InstructionSource& source,
576 LocationSummary* locs,
577 Code::EntryKind entry_kind) {
578 ASSERT(CanCallDart());
581 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
582 } else {
583 if (!FLAG_precompiled_mode) {
584 __ LoadImmediate(ARGS_DESC_REG, 0); // GC safe smi zero because of stub.
585 }
586 }
587 // Do not use the code from the function, but let the code be patched so that
588 // we can record the outgoing edges to other code.
589 GenerateStaticDartCall(deopt_id, source, UntaggedPcDescriptors::kOther, locs,
590 function, entry_kind);
591 EmitDropArguments(size_with_type_args);
592}
593
595 int32_t selector_offset,
596 const Array& arguments_descriptor) {
597 const auto cid_reg = DispatchTableNullErrorABI::kClassIdReg;
598 ASSERT(CanCallDart());
599 ASSERT(cid_reg != ARGS_DESC_REG);
600 if (!arguments_descriptor.IsNull()) {
601 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
602 }
603 const uintptr_t offset = selector_offset - DispatchTable::kOriginElement;
604 // Would like cid_reg to be available on entry to the target function
605 // for checking purposes.
606 ASSERT(cid_reg != TMP);
607 __ AddShifted(TMP, DISPATCH_TABLE_REG, cid_reg,
608 compiler::target::kWordSizeLog2);
609 __ LoadFromOffset(TMP, TMP, offset << compiler::target::kWordSizeLog2);
610 __ jalr(TMP);
611}
612
614 Register reg,
615 const Object& obj,
616 bool needs_number_check,
617 const InstructionSource& source,
618 intptr_t deopt_id) {
619 if (needs_number_check) {
620 ASSERT(!obj.IsMint() && !obj.IsDouble());
621 __ LoadObject(TMP, obj);
622 __ PushRegisterPair(TMP, reg);
623 if (is_optimizing()) {
624 // No breakpoints in optimized code.
625 __ JumpAndLink(StubCode::OptimizedIdenticalWithNumberCheck());
626 AddCurrentDescriptor(UntaggedPcDescriptors::kOther, deopt_id, source);
627 } else {
628 // Patchable to support breakpoints.
629 __ JumpAndLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
630 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id,
631 source);
632 }
633 __ PopRegisterPair(ZR, reg);
634 // RISC-V has no condition flags, so the result is instead returned as
635 // TMP zero if equal, non-zero if non-equal.
636 ASSERT(reg != TMP);
637 __ CompareImmediate(TMP, 0);
638 } else {
639 __ CompareObject(reg, obj);
640 }
641 return EQ;
642}
643
647 bool needs_number_check,
648 const InstructionSource& source,
649 intptr_t deopt_id) {
650 if (needs_number_check) {
651 __ PushRegisterPair(right, left);
652 if (is_optimizing()) {
653 __ JumpAndLink(StubCode::OptimizedIdenticalWithNumberCheck());
654 } else {
655 __ JumpAndLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
656 }
657 AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
658 __ PopRegisterPair(right, left);
659 // RISC-V has no condition flags, so the result is instead returned as
660 // TMP zero if equal, non-zero if non-equal.
661 ASSERT(left != TMP);
662 ASSERT(right != TMP);
663 __ CompareImmediate(TMP, 0);
664 } else {
665 __ CompareObjectRegisters(left, right);
666 }
667 return EQ;
668}
669
671 BranchLabels labels,
672 bool invert) {
673 __ Comment("BoolTest");
675 return invert ? NE : EQ;
676}
677
678// This function must be in sync with FlowGraphCompiler::RecordSafepoint and
679// FlowGraphCompiler::SlowPathEnvironmentFor.
680void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
681#if defined(DEBUG)
682 locs->CheckWritableInputs();
683 ClobberDeadTempRegisters(locs);
684#endif
685 // TODO(vegorov): consider saving only caller save (volatile) registers.
686 __ PushRegisters(*locs->live_registers());
687}
688
689void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
690 __ PopRegisters(*locs->live_registers());
691}
692
693#if defined(DEBUG)
694void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
695 // Clobber temporaries that have not been manually preserved.
696 for (intptr_t i = 0; i < locs->temp_count(); ++i) {
697 Location tmp = locs->temp(i);
698 // TODO(zerny): clobber non-live temporary FPU registers.
699 if (tmp.IsRegister() &&
700 !locs->live_registers()->ContainsRegister(tmp.reg())) {
701 __ li(tmp.reg(), 0xf7);
702 }
703 }
704}
705#endif
706
707Register FlowGraphCompiler::EmitTestCidRegister() {
708 return A1;
709}
710
711void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
712 intptr_t count_without_type_args,
713 const Array& arguments_descriptor) {
714 __ Comment("EmitTestAndCall");
715 // Load receiver into A0.
716 __ LoadFromOffset(A0, SP, (count_without_type_args - 1) * kWordSize);
717 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
718}
719
720void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
721 bool if_smi) {
722 if (if_smi) {
723 __ BranchIfSmi(A0, label);
724 } else {
725 __ BranchIfNotSmi(A0, label);
726 }
727}
728
729void FlowGraphCompiler::EmitTestAndCallLoadCid(Register class_id_reg) {
730 ASSERT(class_id_reg != A0);
731 __ LoadClassId(class_id_reg, A0);
732}
733
734Location FlowGraphCompiler::RebaseIfImprovesAddressing(Location loc) const {
735 if (loc.IsStackSlot() && (loc.base_reg() == FP)) {
736 intptr_t fp_sp_dist =
737 (compiler::target::frame_layout.first_local_from_fp + 1 - StackSize());
738 __ CheckFpSpDist(fp_sp_dist * compiler::target::kWordSize);
739 return Location::StackSlot(loc.stack_index() - fp_sp_dist, SP);
740 }
741 if (loc.IsDoubleStackSlot() && (loc.base_reg() == FP)) {
742 intptr_t fp_sp_dist =
743 (compiler::target::frame_layout.first_local_from_fp + 1 - StackSize());
744 __ CheckFpSpDist(fp_sp_dist * compiler::target::kWordSize);
745 return Location::DoubleStackSlot(loc.stack_index() - fp_sp_dist, SP);
746 }
747 return loc;
748}
749
752 TemporaryRegisterAllocator* allocator) {
753 if (destination.Equals(source)) return;
754
755 if (source.IsRegister()) {
756 if (destination.IsRegister()) {
757 __ mv(destination.reg(), source.reg());
758 } else {
759 ASSERT(destination.IsStackSlot());
760 const intptr_t dest_offset = destination.ToStackSlotOffset();
761 __ StoreToOffset(source.reg(), destination.base_reg(), dest_offset);
762 }
763 } else if (source.IsStackSlot()) {
764 if (destination.IsRegister()) {
765 const intptr_t source_offset = source.ToStackSlotOffset();
766 __ LoadFromOffset(destination.reg(), source.base_reg(), source_offset);
767 } else if (destination.IsFpuRegister()) {
768 const intptr_t src_offset = source.ToStackSlotOffset();
769 FRegister dst = destination.fpu_reg();
770 __ LoadDFromOffset(dst, source.base_reg(), src_offset);
771 } else {
772 ASSERT(destination.IsStackSlot());
773 const intptr_t source_offset = source.ToStackSlotOffset();
774 const intptr_t dest_offset = destination.ToStackSlotOffset();
775 __ LoadFromOffset(TMP, source.base_reg(), source_offset);
776 __ StoreToOffset(TMP, destination.base_reg(), dest_offset);
777 }
778 } else if (source.IsFpuRegister()) {
779 if (destination.IsFpuRegister()) {
780 __ fmvd(destination.fpu_reg(), source.fpu_reg());
781 } else {
782 if (destination.IsStackSlot() /*32-bit float*/ ||
783 destination.IsDoubleStackSlot()) {
784 const intptr_t dest_offset = destination.ToStackSlotOffset();
785 FRegister src = source.fpu_reg();
786 __ StoreDToOffset(src, destination.base_reg(), dest_offset);
787 } else {
788 ASSERT(destination.IsQuadStackSlot());
790 }
791 }
792 } else if (source.IsDoubleStackSlot()) {
793 if (destination.IsFpuRegister()) {
794 const intptr_t source_offset = source.ToStackSlotOffset();
795 const FRegister dst = destination.fpu_reg();
796 __ LoadDFromOffset(dst, source.base_reg(), source_offset);
797 } else {
798 ASSERT(destination.IsDoubleStackSlot() ||
799 destination.IsStackSlot() /*32-bit float*/);
800 const intptr_t source_offset = source.ToStackSlotOffset();
801 const intptr_t dest_offset = destination.ToStackSlotOffset();
802 __ LoadDFromOffset(FTMP, source.base_reg(), source_offset);
803 __ StoreDToOffset(FTMP, destination.base_reg(), dest_offset);
804 }
805 } else if (source.IsQuadStackSlot()) {
807 } else if (source.IsPairLocation()) {
808#if XLEN == 32
809 ASSERT(destination.IsPairLocation());
810 for (intptr_t i : {0, 1}) {
811 EmitMove(destination.Component(i), source.Component(i), allocator);
812 }
813#else
814 UNREACHABLE();
815#endif
816 } else {
817 ASSERT(source.IsConstant());
818 source.constant_instruction()->EmitMoveToLocation(this, destination, TMP,
819 source.pair_index());
820 }
821}
822
823static compiler::OperandSize BytesToOperandSize(intptr_t bytes) {
824 switch (bytes) {
825 case 8:
827 case 4:
829 case 2:
831 case 1:
833 default:
835 }
836}
837
838void FlowGraphCompiler::EmitNativeMoveArchitecture(
839 const compiler::ffi::NativeLocation& destination,
840 const compiler::ffi::NativeLocation& source) {
841 const auto& src_type = source.payload_type();
842 const auto& dst_type = destination.payload_type();
843
844 ASSERT(src_type.IsSigned() == dst_type.IsSigned());
845 ASSERT(src_type.IsPrimitive());
846 ASSERT(dst_type.IsPrimitive());
847 const intptr_t src_size = src_type.SizeInBytes();
848 const intptr_t dst_size = dst_type.SizeInBytes();
849 const bool sign_or_zero_extend = dst_size > src_size;
850
851 if (source.IsRegisters()) {
852 const auto& src = source.AsRegisters();
853 ASSERT(src.num_regs() == 1);
854 const auto src_reg = src.reg_at(0);
855
856 if (destination.IsRegisters()) {
857 const auto& dst = destination.AsRegisters();
858 ASSERT(dst.num_regs() == 1);
859 const auto dst_reg = dst.reg_at(0);
860 ASSERT(destination.container_type().SizeInBytes() <=
861 compiler::target::kWordSize);
862 if (!sign_or_zero_extend) {
863#if XLEN == 32
864 __ MoveRegister(dst_reg, src_reg);
865#else
866 if (src_size <= 4) {
867 // Signed-extended to XLEN, even unsigned types.
868 __ addiw(dst_reg, src_reg, 0);
869 } else {
870 __ MoveRegister(dst_reg, src_reg);
871 }
872#endif
873 } else {
874 switch (src_type.AsPrimitive().representation()) {
875 // Calling convention: scalars are extended according to the sign of
876 // their type to 32-bits, then sign-extended to XLEN bits.
878 __ slli(dst_reg, src_reg, XLEN - 8);
879 __ srai(dst_reg, dst_reg, XLEN - 8);
880 return;
882 __ slli(dst_reg, src_reg, XLEN - 16);
883 __ srai(dst_reg, dst_reg, XLEN - 16);
884 return;
886 __ andi(dst_reg, src_reg, 0xFF);
887 return;
889 __ slli(dst_reg, src_reg, 16);
890#if XLEN == 32
891 __ srli(dst_reg, dst_reg, 16);
892#else
893 __ srliw(dst_reg, dst_reg, 16);
894#endif
895 return;
896#if XLEN >= 64
899 // Note even uint32 is sign-extended to XLEN.
900 __ addiw(dst_reg, src_reg, 0);
901 return;
902#endif
904#if XLEN >= 64
908#endif
909 __ slli(dst_reg, src_reg, XLEN - src_size * kBitsPerByte);
910 __ srai(dst_reg, dst_reg, XLEN - src_size * kBitsPerByte);
911 return;
913#if XLEN >= 64
917#endif
918 __ slli(dst_reg, src_reg, XLEN - src_size * kBitsPerByte);
919 __ srli(dst_reg, dst_reg, XLEN - src_size * kBitsPerByte);
920 return;
921 default:
922 UNREACHABLE();
923 }
924 }
925
926 } else if (destination.IsFpuRegisters()) {
927 const auto& dst = destination.AsFpuRegisters();
928 ASSERT(src_size == dst_size);
929 ASSERT(src.num_regs() == 1);
930 switch (src_size) {
931 case 4:
932 __ fmvwx(dst.fpu_reg(), src.reg_at(0));
933 return;
934 case 8:
935#if XLEN == 32
937#else
938 __ fmvdx(dst.fpu_reg(), src.reg_at(0));
939#endif
940 return;
941 default:
942 UNREACHABLE();
943 }
944
945 } else {
946 ASSERT(destination.IsStack());
947 const auto& dst = destination.AsStack();
948 ASSERT(!sign_or_zero_extend);
949 auto const op_size =
950 BytesToOperandSize(destination.container_type().SizeInBytes());
951 __ StoreToOffset(src.reg_at(0), dst.base_register(),
952 dst.offset_in_bytes(), op_size);
953 }
954 } else if (source.IsFpuRegisters()) {
955 const auto& src = source.AsFpuRegisters();
956 // We have not implemented conversions here, use IL convert instructions.
957 ASSERT(src_type.Equals(dst_type));
958
959 if (destination.IsRegisters()) {
960 const auto& dst = destination.AsRegisters();
961 ASSERT(src_size == dst_size);
962 ASSERT(dst.num_regs() == 1);
963 switch (src_size) {
964 case 4:
965 __ fmvxw(dst.reg_at(0), src.fpu_reg());
966 return;
967 case 8:
968#if XLEN == 32
970#else
971 __ fmvxd(dst.reg_at(0), src.fpu_reg());
972#endif
973 return;
974 default:
975 UNREACHABLE();
976 }
977
978 } else if (destination.IsFpuRegisters()) {
979 const auto& dst = destination.AsFpuRegisters();
980 __ fmvd(dst.fpu_reg(), src.fpu_reg());
981
982 } else {
983 ASSERT(destination.IsStack());
984 ASSERT(src_type.IsFloat());
985 const auto& dst = destination.AsStack();
986 switch (dst_size) {
987 case 8:
988 __ StoreDToOffset(src.fpu_reg(), dst.base_register(),
989 dst.offset_in_bytes());
990 return;
991 case 4:
992 __ StoreSToOffset(src.fpu_reg(), dst.base_register(),
993 dst.offset_in_bytes());
994 return;
995 default:
996 UNREACHABLE();
997 }
998 }
999
1000 } else {
1001 ASSERT(source.IsStack());
1002 const auto& src = source.AsStack();
1003 if (destination.IsRegisters()) {
1004 const auto& dst = destination.AsRegisters();
1005 ASSERT(dst.num_regs() == 1);
1006 const auto dst_reg = dst.reg_at(0);
1007 EmitNativeLoad(dst_reg, src.base_register(), src.offset_in_bytes(),
1008 src_type.AsPrimitive().representation());
1009 } else if (destination.IsFpuRegisters()) {
1010 ASSERT(src_type.Equals(dst_type));
1011 ASSERT(src_type.IsFloat());
1012 const auto& dst = destination.AsFpuRegisters();
1013 switch (src_size) {
1014 case 8:
1015 __ LoadDFromOffset(dst.fpu_reg(), src.base_register(),
1016 src.offset_in_bytes());
1017 return;
1018 case 4:
1019 __ LoadSFromOffset(dst.fpu_reg(), src.base_register(),
1020 src.offset_in_bytes());
1021 return;
1022 default:
1023 UNIMPLEMENTED();
1024 }
1025 } else {
1026 ASSERT(destination.IsStack());
1027 UNREACHABLE();
1028 }
1029 }
1030}
1031
1032void FlowGraphCompiler::EmitNativeLoad(Register dst,
1033 Register base,
1034 intptr_t offset,
1036 switch (type) {
1038 __ lb(dst, compiler::Address(base, offset));
1039 return;
1041 __ lbu(dst, compiler::Address(base, offset));
1042 return;
1044 __ lh(dst, compiler::Address(base, offset));
1045 return;
1047 __ lhu(dst, compiler::Address(base, offset));
1048 return;
1050 __ lw(dst, compiler::Address(base, offset));
1051 return;
1054#if XLEN == 32
1055 __ lw(dst, compiler::Address(base, offset));
1056#else
1057 __ lwu(dst, compiler::Address(base, offset));
1058#endif
1059 return;
1060#if XLEN >= 64
1064 __ ld(dst, compiler::Address(base, offset));
1065 return;
1066#endif
1067 default:
1068 break;
1069 }
1070
1071 Register tmp = kNoRegister;
1072 if (dst != T1 && base != T1) tmp = T1;
1073 if (dst != T2 && base != T2) tmp = T2;
1074 if (dst != T3 && base != T3) tmp = T3;
1075 ASSERT(tmp != kNoRegister);
1076 if (base == SP) offset += compiler::target::kWordSize;
1077 __ PushRegister(tmp);
1078
1079 switch (type) {
1081 __ lhu(dst, compiler::Address(base, offset));
1082 __ lb(tmp, compiler::Address(base, offset + 2));
1083 __ slli(tmp, tmp, 16);
1084 __ or_(dst, dst, tmp);
1085 break;
1087 __ lhu(dst, compiler::Address(base, offset));
1088 __ lbu(tmp, compiler::Address(base, offset + 2));
1089 __ slli(tmp, tmp, 16);
1090 __ or_(dst, dst, tmp);
1091 break;
1092#if XLEN >= 64
1094 __ lwu(dst, compiler::Address(base, offset));
1095 __ lb(tmp, compiler::Address(base, offset + 4));
1096 __ slli(tmp, tmp, 32);
1097 __ or_(dst, dst, tmp);
1098 break;
1100 __ lwu(dst, compiler::Address(base, offset));
1101 __ lbu(tmp, compiler::Address(base, offset + 4));
1102 __ slli(tmp, tmp, 32);
1103 __ or_(dst, dst, tmp);
1104 break;
1106 __ lwu(dst, compiler::Address(base, offset));
1107 __ lh(tmp, compiler::Address(base, offset + 4));
1108 __ slli(tmp, tmp, 32);
1109 __ or_(dst, dst, tmp);
1110 break;
1112 __ lwu(dst, compiler::Address(base, offset));
1113 __ lhu(tmp, compiler::Address(base, offset + 4));
1114 __ slli(tmp, tmp, 32);
1115 __ or_(dst, dst, tmp);
1116 break;
1118 __ lwu(dst, compiler::Address(base, offset));
1119 __ lhu(tmp, compiler::Address(base, offset + 4));
1120 __ slli(tmp, tmp, 32);
1121 __ or_(dst, dst, tmp);
1122 __ lb(tmp, compiler::Address(base, offset + 6));
1123 __ slli(tmp, tmp, 48);
1124 __ or_(dst, dst, tmp);
1125 break;
1127 __ lwu(dst, compiler::Address(base, offset));
1128 __ lhu(tmp, compiler::Address(base, offset + 4));
1129 __ slli(tmp, tmp, 32);
1130 __ or_(dst, dst, tmp);
1131 __ lbu(tmp, compiler::Address(base, offset + 6));
1132 __ slli(tmp, tmp, 48);
1133 __ or_(dst, dst, tmp);
1134 break;
1135#endif
1136 default:
1137 UNREACHABLE();
1138 }
1139
1140 __ PopRegister(tmp);
1141}
1142
1144 Register dst,
1145 Register tmp) {
1146 compiler::Label skip_reloc;
1147 __ j(&skip_reloc, compiler::Assembler::kNearJump);
1148 InsertBSSRelocation(relocation);
1149 __ Bind(&skip_reloc);
1150
1151 __ auipc(tmp, 0);
1152 __ addi(tmp, tmp, -compiler::target::kWordSize);
1153
1154 // tmp holds the address of the relocation.
1155 __ lx(dst, compiler::Address(tmp));
1156
1157 // dst holds the relocation itself: tmp - bss_start.
1158 // tmp = tmp + (bss_start - tmp) = bss_start
1159 __ add(tmp, tmp, dst);
1160
1161 // tmp holds the start of the BSS section.
1162 // Load the "get-thread" routine: *bss_start.
1163 __ lx(dst, compiler::Address(tmp));
1164}
1165
1166#undef __
1167#define __ compiler_->assembler()->
1168
1169void ParallelMoveEmitter::EmitSwap(const MoveOperands& move) {
1170 const Location source = move.src();
1171 const Location destination = move.dest();
1172
1173 if (source.IsRegister() && destination.IsRegister()) {
1174 ASSERT(source.reg() != TMP);
1175 ASSERT(destination.reg() != TMP);
1176 __ mv(TMP, source.reg());
1177 __ mv(source.reg(), destination.reg());
1178 __ mv(destination.reg(), TMP);
1179 } else if (source.IsRegister() && destination.IsStackSlot()) {
1180 Exchange(source.reg(), destination.base_reg(),
1181 destination.ToStackSlotOffset());
1182 } else if (source.IsStackSlot() && destination.IsRegister()) {
1183 Exchange(destination.reg(), source.base_reg(), source.ToStackSlotOffset());
1184 } else if (source.IsStackSlot() && destination.IsStackSlot()) {
1185 Exchange(source.base_reg(), source.ToStackSlotOffset(),
1186 destination.base_reg(), destination.ToStackSlotOffset());
1187 } else if (source.IsFpuRegister() && destination.IsFpuRegister()) {
1188 const FRegister dst = destination.fpu_reg();
1189 const FRegister src = source.fpu_reg();
1190 __ fmvd(FTMP, src);
1191 __ fmvd(src, dst);
1192 __ fmvd(dst, FTMP);
1193 } else if (source.IsFpuRegister() || destination.IsFpuRegister()) {
1194 UNIMPLEMENTED();
1195 } else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
1196 const intptr_t source_offset = source.ToStackSlotOffset();
1197 const intptr_t dest_offset = destination.ToStackSlotOffset();
1198
1199 ScratchFpuRegisterScope ensure_scratch(this, kNoFpuRegister);
1200 FRegister scratch = ensure_scratch.reg();
1201 __ LoadDFromOffset(FTMP, source.base_reg(), source_offset);
1202 __ LoadDFromOffset(scratch, destination.base_reg(), dest_offset);
1203 __ StoreDToOffset(FTMP, destination.base_reg(), dest_offset);
1204 __ StoreDToOffset(scratch, source.base_reg(), source_offset);
1205 } else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
1206 UNIMPLEMENTED();
1207 } else {
1208 UNREACHABLE();
1209 }
1210}
1211
1212void ParallelMoveEmitter::MoveMemoryToMemory(const compiler::Address& dst,
1213 const compiler::Address& src) {
1214 UNREACHABLE();
1215}
1216
1217// Do not call or implement this function. Instead, use the form below that
1218// uses an offset from the frame pointer instead of an Address.
1219void ParallelMoveEmitter::Exchange(Register reg, const compiler::Address& mem) {
1220 UNREACHABLE();
1221}
1222
1223// Do not call or implement this function. Instead, use the form below that
1224// uses offsets from the frame pointer instead of Addresses.
1225void ParallelMoveEmitter::Exchange(const compiler::Address& mem1,
1226 const compiler::Address& mem2) {
1227 UNREACHABLE();
1228}
1229
1230void ParallelMoveEmitter::Exchange(Register reg,
1231 Register base_reg,
1232 intptr_t stack_offset) {
1233 __ mv(TMP, reg);
1234 __ LoadFromOffset(reg, base_reg, stack_offset);
1235 __ StoreToOffset(TMP, base_reg, stack_offset);
1236}
1237
1238void ParallelMoveEmitter::Exchange(Register base_reg1,
1239 intptr_t stack_offset1,
1240 Register base_reg2,
1241 intptr_t stack_offset2) {
1242 ScratchRegisterScope tmp1(this, kNoRegister);
1243 ScratchRegisterScope tmp2(this, tmp1.reg());
1244 __ LoadFromOffset(tmp1.reg(), base_reg1, stack_offset1);
1245 __ LoadFromOffset(tmp2.reg(), base_reg2, stack_offset2);
1246 __ StoreToOffset(tmp1.reg(), base_reg2, stack_offset2);
1247 __ StoreToOffset(tmp2.reg(), base_reg1, stack_offset1);
1248}
1249
1250void ParallelMoveEmitter::SpillScratch(Register reg) {
1251 __ PushRegister(reg);
1252}
1253
1254void ParallelMoveEmitter::RestoreScratch(Register reg) {
1255 __ PopRegister(reg);
1256}
1257
1258void ParallelMoveEmitter::SpillFpuScratch(FpuRegister reg) {
1259 __ subi(SP, SP, sizeof(double));
1260 __ fsd(reg, compiler::Address(SP, 0));
1261}
1262
1263void ParallelMoveEmitter::RestoreFpuScratch(FpuRegister reg) {
1264 __ fld(reg, compiler::Address(SP, 0));
1265 __ addi(SP, SP, sizeof(double));
1266}
1267
1268#undef __
1269
1270} // namespace dart
1271
1272#endif // defined(TARGET_ARCH_RISCV)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
#define __
#define UNREACHABLE()
Definition assert.h:248
static intptr_t element_offset(intptr_t index)
Definition object.h:10817
intptr_t length() const
static intptr_t owner_offset()
Definition object.h:7120
CodeEntryKind EntryKind
Definition object.h:6761
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
Definition object.h:6766
virtual void GenerateCode(FlowGraphCompiler *compiler, intptr_t stub_ix)
const Environment * deopt_env() const
ICData::DeoptReasonId reason() const
void set_pc_offset(intptr_t offset)
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler *compiler, DeoptInfoBuilder *builder, const Array &deopt_table)
static constexpr intptr_t kNone
Definition deopt_id.h:27
static intptr_t ToDeoptAfter(intptr_t deopt_id)
Definition deopt_id.h:31
static constexpr intptr_t kOriginElement
void EmitDropArguments(intptr_t count)
Condition EmitEqualityRegConstCompare(Register reg, const Object &obj, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void EmitTailCallToStub(const Code &stub)
void AddStubCallTarget(const Code &code)
void EmitJumpToStub(const Code &stub)
void RecordSafepoint(LocationSummary *locs, intptr_t slow_path_argument_count=0)
void EmitOptimizedStaticCall(const Function &function, const Array &arguments_descriptor, intptr_t size_with_type_args, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitCallToStub(const Code &stub, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
const Function & function() const
void GenerateStaticDartCall(intptr_t deopt_id, const InstructionSource &source, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, const Function &target, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
Condition EmitBoolTest(Register value, BranchLabels labels, bool invert)
void InsertBSSRelocation(BSS::Relocation reloc)
void SaveLiveRegisters(LocationSummary *locs)
CompilerDeoptInfo * AddDeoptIndexAtCall(intptr_t deopt_id, Environment *env)
const FlowGraph & flow_graph() const
void RecordCatchEntryMoves(Environment *env)
const ParsedFunction & parsed_function() const
intptr_t ExtraStackSlotsOnOsrEntry() const
void EmitEdgeCounter(intptr_t edge_id)
void GenerateDartCall(intptr_t deopt_id, const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void RestoreLiveRegisters(LocationSummary *locs)
void EmitInstanceCallJIT(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind)
static bool SupportsUnboxedDoubles()
static void GenerateIndirectTTSCall(compiler::Assembler *assembler, Register reg_with_type, intptr_t sub_type_cache_index)
void LoadBSSEntry(BSS::Relocation relocation, Register dst, Register tmp)
void EmitDispatchTableCall(int32_t selector_offset, const Array &arguments_descriptor)
Condition EmitEqualityRegRegCompare(Register left, Register right, bool needs_number_check, const InstructionSource &source, intptr_t deopt_id)
void GeneratePatchableCall(const InstructionSource &source, const Code &stub, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, ObjectPool::SnapshotBehavior snapshot_behavior=compiler::ObjectPoolBuilderEntry::kSnapshotable)
void EmitMegamorphicInstanceCall(const ICData &icdata, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs)
void EmitOptimizedInstanceCall(const Code &stub, const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal)
void EmitInstanceCallAOT(const ICData &ic_data, intptr_t deopt_id, const InstructionSource &source, LocationSummary *locs, Code::EntryKind entry_kind=Code::EntryKind::kNormal, bool receiver_can_be_smi=true)
static bool SupportsUnboxedSimd128()
void EmitMove(Location dst, Location src, TemporaryRegisterAllocator *temp)
void EmitCallsiteMetadata(const InstructionSource &source, intptr_t deopt_id, UntaggedPcDescriptors::Kind kind, LocationSummary *locs, Environment *env)
static bool CanConvertInt64ToDouble()
compiler::Assembler * assembler() const
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t deopt_id, const InstructionSource &source)
void EndCodeSourceRange(const InstructionSource &source)
void BeginCodeSourceRange(const InstructionSource &source)
bool PrologueNeedsArgumentsDescriptor() const
Definition object.cc:11488
bool IsClosureFunction() const
Definition object.h:3871
bool IsOptimizable() const
Definition object.cc:8988
static Location StackSlot(intptr_t stack_index, Register base)
Definition locations.h:447
static Location DoubleStackSlot(intptr_t stack_index, Register base)
Definition locations.h:458
static MegamorphicCachePtr Lookup(Thread *thread, const String &name, const Array &descriptor)
compiler::ObjectPoolBuilderEntry::SnapshotBehavior SnapshotBehavior
Definition object.h:5525
static ObjectPtr null()
Definition object.h:433
bool IsNull() const
Definition object.h:363
static Object & Handle()
Definition object.h:407
static Object & ZoneHandle()
Definition object.h:419
const Function & function() const
Definition parser.h:73
int num_stack_locals() const
Definition parser.h:194
static intptr_t RawValue(intptr_t value)
Definition object.h:10001
static const Code & UnoptimizedStaticCallEntry(intptr_t num_args_tested)
Definition stub_code.cc:316
void set_constant_pool_allowed(bool b)
#define UNIMPLEMENTED
#define ASSERT(E)
SkBitmap source
Definition examples.cpp:28
gboolean invert
uint32_t * target
#define DECLARE_FLAG(type, name)
Definition flags.h:14
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
Dart_NativeFunction function
Definition fuchsia.cc:51
const FpuRegister kNoFpuRegister
const Register THR
const char *const name
const Register NULL_REG
constexpr intptr_t kBitsPerByte
Definition globals.h:463
const Register CODE_REG
@ kInvalidCondition
const Register ARGS_DESC_REG
const Register DISPATCH_TABLE_REG
@ kNoRegister
const FRegister FTMP
const Register TMP
const Register IC_DATA_REG
constexpr intptr_t kWordSize
Definition globals.h:509
QRegister FpuRegister
static int8_t data[kExtLength]
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace Enable an endless trace buffer The default is a ring buffer This is useful when very old events need to viewed For during application launch Memory usage will continue to grow indefinitely however Start app with an specific route defined on the framework flutter assets Path to the Flutter assets directory enable service port Allow the VM service to fallback to automatic port selection if binding to a specified port fails trace Trace early application lifecycle Automatically switches to an endless trace buffer trace skia Filters out all Skia trace event categories except those that are specified in this comma separated list dump skp on shader Automatically dump the skp that triggers new shader compilations This is useful for writing custom ShaderWarmUp to reduce jank By this is not enabled to reduce the overhead purge persistent cache
Definition switches.h:191
dst
Definition cp.py:12
Point offset
static constexpr Register kClassIdReg
static constexpr intptr_t kBoolValueMask
static constexpr Register kScratchReg
static constexpr Register kSubtypeTestCacheReg