Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
stub_code_compiler_riscv.cc
Go to the documentation of this file.
1// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h"
6
7// For `AllocateObjectInstr::WillAllocateNewOrRemembered`
8// For `GenericCheckBoundInstr::UseUnboxedRepresentation`
10
11#define SHOULD_NOT_INCLUDE_RUNTIME
12
14
15#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
16
17#include "vm/class_id.h"
18#include "vm/code_entry_kind.h"
22#include "vm/constants.h"
24#include "vm/instructions.h"
26#include "vm/tags.h"
27
28#define __ assembler->
29
30namespace dart {
31namespace compiler {
32
33// Ensures that [A0] is a new object, if not it will be added to the remembered
34// set via a leaf runtime call.
35//
36// WARNING: This might clobber all registers except for [A0], [THR] and [FP].
37// The caller should simply call LeaveStubFrame() and return.
39 // If the object is not in an active TLAB, we call a leaf-runtime to add it to
40 // the remembered set and/or deferred marking worklist. This test assumes a
41 // Page's TLAB use is always ascending.
42 Label done;
43 __ AndImmediate(TMP, A0, target::kPageMask);
44 __ LoadFromOffset(TMP, TMP, target::Page::original_top_offset());
45 __ CompareRegisters(A0, TMP);
46 __ BranchIf(UNSIGNED_GREATER_EQUAL, &done);
47
48 {
49 LeafRuntimeScope rt(assembler, /*frame_size=*/0,
50 /*preserve_registers=*/false);
51 // A0 already loaded.
52 __ mv(A1, THR);
53 rt.Call(kEnsureRememberedAndMarkingDeferredRuntimeEntry,
54 /*argument_count=*/2);
55 }
56
57 __ Bind(&done);
58}
59
60// Input parameters:
61// RA : return address.
62// SP : address of last argument in argument array.
63// SP + 8*T4 - 8 : address of first argument in argument array.
64// SP + 8*T4 : address of return value.
65// T5 : address of the runtime function to call.
66// T4 : number of arguments to the call.
67void StubCodeCompiler::GenerateCallToRuntimeStub() {
68 const intptr_t thread_offset = target::NativeArguments::thread_offset();
69 const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
70 const intptr_t argv_offset = target::NativeArguments::argv_offset();
71 const intptr_t retval_offset = target::NativeArguments::retval_offset();
72
73 __ Comment("CallToRuntimeStub");
74 __ lx(CODE_REG, Address(THR, target::Thread::call_to_runtime_stub_offset()));
75 __ SetPrologueOffset();
76 __ EnterStubFrame();
77
78 // Save exit frame information to enable stack walking as we are about
79 // to transition to Dart VM C++ code.
80 __ StoreToOffset(FP, THR, target::Thread::top_exit_frame_info_offset());
81
82 // Mark that the thread exited generated code through a runtime call.
83 __ LoadImmediate(TMP, target::Thread::exit_through_runtime_call());
84 __ StoreToOffset(TMP, THR, target::Thread::exit_through_ffi_offset());
85
86#if defined(DEBUG)
87 {
88 Label ok;
89 // Check that we are always entering from Dart code.
90 __ LoadFromOffset(TMP, THR, target::Thread::vm_tag_offset());
91 __ CompareImmediate(TMP, VMTag::kDartTagId);
92 __ BranchIf(EQ, &ok);
93 __ Stop("Not coming from Dart code.");
94 __ Bind(&ok);
95 }
96#endif
97
98 // Mark that the thread is executing VM code.
99 __ StoreToOffset(T5, THR, target::Thread::vm_tag_offset());
100
101 // Reserve space for arguments and align frame before entering C++ world.
102 // target::NativeArguments are passed in registers.
103 __ Comment("align stack");
104 // Reserve space for arguments.
105 ASSERT(target::NativeArguments::StructSize() == 4 * target::kWordSize);
106 __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize());
107
108 // Pass target::NativeArguments structure by value and call runtime.
109 // Registers R0, R1, R2, and R3 are used.
110
111 ASSERT(thread_offset == 0 * target::kWordSize);
112 ASSERT(argc_tag_offset == 1 * target::kWordSize);
113 ASSERT(argv_offset == 2 * target::kWordSize);
114 __ slli(T2, T4, target::kWordSizeLog2);
115 __ add(T2, FP, T2); // Compute argv.
116 // Set argv in target::NativeArguments.
117 __ AddImmediate(T2,
118 target::frame_layout.param_end_from_fp * target::kWordSize);
119
120 ASSERT(retval_offset == 3 * target::kWordSize);
121 __ AddImmediate(T3, T2, target::kWordSize);
122
123 __ StoreToOffset(THR, SP, thread_offset);
124 __ StoreToOffset(T4, SP, argc_tag_offset);
125 __ StoreToOffset(T2, SP, argv_offset);
126 __ StoreToOffset(T3, SP, retval_offset);
127 __ mv(A0, SP); // Pass the pointer to the target::NativeArguments.
128
130 __ jalr(T5);
131 __ Comment("CallToRuntimeStub return");
132
133 // Refresh pinned registers values (inc. write barrier mask and null object).
134 __ RestorePinnedRegisters();
135
136 // Retval is next to 1st argument.
137 // Mark that the thread is executing Dart code.
138 __ LoadImmediate(TMP, VMTag::kDartTagId);
139 __ StoreToOffset(TMP, THR, target::Thread::vm_tag_offset());
140
141 // Mark that the thread has not exited generated Dart code.
142 __ StoreToOffset(ZR, THR, target::Thread::exit_through_ffi_offset());
143
144 // Reset exit frame information in Isolate's mutator thread structure.
145 __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
146
147 // Restore the global object pool after returning from runtime (old space is
148 // moving, so the GOP could have been relocated).
149 if (FLAG_precompiled_mode) {
150 __ SetupGlobalPoolAndDispatchTable();
151 }
152
153 __ LeaveStubFrame();
154
155 // The following return can jump to a lazy-deopt stub, which assumes A0
156 // contains a return value and will save it in a GC-visible way. We therefore
157 // have to ensure A0 does not contain any garbage value left from the C
158 // function we called (which has return type "void").
159 // (See GenerateDeoptimizationSequence::saved_result_slot_from_fp.)
160 __ LoadImmediate(A0, 0);
161 __ ret();
162}
163
164void StubCodeCompiler::GenerateSharedStubGeneric(
165 bool save_fpu_registers,
166 intptr_t self_code_stub_offset_from_thread,
167 bool allow_return,
168 std::function<void()> perform_runtime_call) {
169 // We want the saved registers to appear like part of the caller's frame, so
170 // we push them before calling EnterStubFrame.
171 RegisterSet all_registers;
172 all_registers.AddAllNonReservedRegisters(save_fpu_registers);
173
174 // To make the stack map calculation architecture independent we do the same
175 // as on intel.
176 __ PushRegister(RA);
177 __ PushRegisters(all_registers);
178 __ lx(CODE_REG, Address(THR, self_code_stub_offset_from_thread));
179 __ EnterStubFrame();
180 perform_runtime_call();
181 if (!allow_return) {
182 __ Breakpoint();
183 return;
184 }
185 __ LeaveStubFrame();
186 __ PopRegisters(all_registers);
187 __ Drop(1); // We use the RA restored via LeaveStubFrame.
188 __ ret();
189}
190
191void StubCodeCompiler::GenerateSharedStub(
192 bool save_fpu_registers,
193 const RuntimeEntry* target,
194 intptr_t self_code_stub_offset_from_thread,
195 bool allow_return,
196 bool store_runtime_result_in_result_register) {
197 ASSERT(!store_runtime_result_in_result_register || allow_return);
198 auto perform_runtime_call = [&]() {
199 if (store_runtime_result_in_result_register) {
200 __ PushRegister(NULL_REG);
201 }
202 __ CallRuntime(*target, /*argument_count=*/0);
203 if (store_runtime_result_in_result_register) {
204 __ PopRegister(A0);
205 __ sx(A0, Address(FP, target::kWordSize *
208 }
209 };
210 GenerateSharedStubGeneric(save_fpu_registers,
211 self_code_stub_offset_from_thread, allow_return,
212 perform_runtime_call);
213}
214
215void StubCodeCompiler::GenerateEnterSafepointStub() {
216 RegisterSet all_registers;
217 all_registers.AddAllGeneralRegisters();
218
219 __ PushRegisters(all_registers);
220 __ EnterFrame(0);
221
222 __ ReserveAlignedFrameSpace(0);
223
224 __ lx(TMP, Address(THR, kEnterSafepointRuntimeEntry.OffsetFromThread()));
225 __ jalr(TMP);
226
227 __ LeaveFrame();
228 __ PopRegisters(all_registers);
229 __ ret();
230}
231
232static void GenerateExitSafepointStubCommon(Assembler* assembler,
233 uword runtime_entry_offset) {
234 RegisterSet all_registers;
235 all_registers.AddAllGeneralRegisters();
236
237 __ PushRegisters(all_registers);
238 __ EnterFrame(0);
239
240 __ ReserveAlignedFrameSpace(0);
241
242 // Set the execution state to VM while waiting for the safepoint to end.
243 // This isn't strictly necessary but enables tests to check that we're not
244 // in native code anymore. See tests/ffi/function_gc_test.dart for example.
245 __ LoadImmediate(TMP, target::Thread::vm_execution_state());
246 __ sx(TMP, Address(THR, target::Thread::execution_state_offset()));
247
248 __ lx(TMP, Address(THR, runtime_entry_offset));
249 __ jalr(TMP);
250
251 __ LeaveFrame();
252 __ PopRegisters(all_registers);
253 __ ret();
254}
255
256void StubCodeCompiler::GenerateExitSafepointStub() {
257 GenerateExitSafepointStubCommon(
258 assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
259}
260
261void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
262 GenerateExitSafepointStubCommon(
263 assembler,
264 kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
265}
266
267// Calls native code within a safepoint.
268//
269// On entry:
270// T0: target to call
271// Stack: set up for native call (SP), aligned, CSP < SP
272//
273// On exit:
274// S3: clobbered, although normally callee-saved
275// Stack: preserved, CSP == SP
276void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
278 __ mv(S3, RA);
279 __ LoadImmediate(T1, target::Thread::exit_through_ffi());
280 __ TransitionGeneratedToNative(T0, FPREG, T1 /*volatile*/,
281 /*enter_safepoint=*/true);
282
283#if defined(DEBUG)
284 // Check SP alignment.
285 __ andi(T2 /*volatile*/, SP, ~(OS::ActivationFrameAlignment() - 1));
286 Label done;
287 __ beq(T2, SP, &done);
288 __ Breakpoint();
289 __ Bind(&done);
290#endif
291
292 __ jalr(T0);
293
294 __ TransitionNativeToGenerated(T1, /*leave_safepoint=*/true);
295 __ jr(S3);
296}
297
298void StubCodeCompiler::GenerateLoadBSSEntry(BSS::Relocation relocation,
299 Register dst,
300 Register tmp) {
301 compiler::Label skip_reloc;
302 __ j(&skip_reloc, compiler::Assembler::kNearJump);
303 InsertBSSRelocation(relocation);
304 __ Bind(&skip_reloc);
305
306 __ auipc(tmp, 0);
307 __ addi(tmp, tmp, -compiler::target::kWordSize);
308
309 // tmp holds the address of the relocation.
310 __ lx(dst, compiler::Address(tmp));
311
312 // dst holds the relocation itself: tmp - bss_start.
313 // tmp = tmp + (bss_start - tmp) = bss_start
314 __ add(tmp, tmp, dst);
315
316 // tmp holds the start of the BSS section.
317 // Load the "get-thread" routine: *bss_start.
318 __ lx(dst, compiler::Address(tmp));
319}
320
321void StubCodeCompiler::GenerateLoadFfiCallbackMetadataRuntimeFunction(
322 uword function_index,
323 Register dst) {
324 // Keep in sync with FfiCallbackMetadata::EnsureFirstTrampolinePageLocked.
325 // Note: If the stub was aligned, this could be a single PC relative load.
326
327 // Load a pointer to the beginning of the stub into dst.
328 const intptr_t code_size = __ CodeSize();
329 __ auipc(dst, 0);
330 __ AddImmediate(dst, -code_size);
331
332 // Round dst down to the page size.
333 __ AndImmediate(dst, FfiCallbackMetadata::kPageMask);
334
335 // Load the function from the function table.
336 __ LoadFromOffset(dst, dst,
338}
339
340void StubCodeCompiler::GenerateFfiCallbackTrampolineStub() {
341#if defined(USING_SIMULATOR) && !defined(DART_PRECOMPILER)
342 // TODO(37299): FFI is not supported in SIMRISCV32/64.
343 __ ebreak();
344#else
345 Label body;
346
347 // T1 is volatile and not used for passing any arguments.
350 ++i) {
351 // The FfiCallbackMetadata table is keyed by the trampoline entry point. So
352 // look up the current PC, then jump to the shared section.
353 __ auipc(T1, 0);
354 __ j(&body);
355 }
356
357 ASSERT_EQUAL(__ CodeSize(),
358 FfiCallbackMetadata::kNativeCallbackTrampolineSize *
360
361 const intptr_t shared_stub_start = __ CodeSize();
362
363 __ Bind(&body);
364
365 // Save THR (callee-saved) and RA. Keeps stack aligned.
366 COMPILE_ASSERT(FfiCallbackMetadata::kNativeCallbackTrampolineStackDelta == 2);
367 __ PushRegisterPair(RA, THR);
369
370 // Load the thread, verify the callback ID and exit the safepoint.
371 //
372 // We exit the safepoint inside DLRT_GetFfiCallbackMetadata in order to save
373 // code size on this shared stub.
374 {
375 // Push arguments and callback id.
376 __ subi(SP, SP, 9 * target::kWordSize);
377 __ sx(T1, Address(SP, 8 * target::kWordSize));
378 __ sx(A7, Address(SP, 7 * target::kWordSize));
379 __ sx(A6, Address(SP, 6 * target::kWordSize));
380 __ sx(A5, Address(SP, 5 * target::kWordSize));
381 __ sx(A4, Address(SP, 4 * target::kWordSize));
382 __ sx(A3, Address(SP, 3 * target::kWordSize));
383 __ sx(A2, Address(SP, 2 * target::kWordSize));
384 __ sx(A1, Address(SP, 1 * target::kWordSize));
385 __ sx(A0, Address(SP, 0 * target::kWordSize));
386
387 __ EnterFrame(0);
388 // Reserve one slot for the entry point and one for the tramp abi.
389 __ ReserveAlignedFrameSpace(2 * target::kWordSize);
390
391 // Since DLRT_GetFfiCallbackMetadata can theoretically be loaded anywhere,
392 // we use the same trick as before to ensure a predictable instruction
393 // sequence.
394 Label call;
395 __ mv(A0, T1); // trampoline
396 __ mv(A1, SPREG); // out_entry_point
397 __ addi(A2, SPREG, target::kWordSize); // out_trampoline_type
398
399#if defined(DART_TARGET_OS_FUCHSIA)
400 // TODO(https://dartbug.com/52579): Remove.
401 if (FLAG_precompiled_mode) {
402 GenerateLoadBSSEntry(BSS::Relocation::DRT_GetFfiCallbackMetadata, T1, T2);
403 } else {
404 const intptr_t kPCRelativeLoadOffset = 12;
405 intptr_t start = __ CodeSize();
406 __ auipc(T1, 0);
407 __ lx(T1, Address(T1, kPCRelativeLoadOffset));
408 __ j(&call);
409
410 ASSERT_EQUAL(__ CodeSize() - start, kPCRelativeLoadOffset);
411#if XLEN == 32
412 __ Emit32(reinterpret_cast<int32_t>(&DLRT_GetFfiCallbackMetadata));
413#else
414 __ Emit64(reinterpret_cast<int64_t>(&DLRT_GetFfiCallbackMetadata));
415#endif
416 }
417#else
418 GenerateLoadFfiCallbackMetadataRuntimeFunction(
420#endif // defined(DART_TARGET_OS_FUCHSIA)
421
422 __ Bind(&call);
423 __ jalr(T1);
424 __ mv(THR, A0);
425 __ lx(T2, Address(SPREG, 0)); // entry_point
426 __ lx(T3, Address(SPREG, target::kWordSize)); // trampoline_type
427
428 __ LeaveFrame();
429
430 // Restore arguments and callback id.
431 __ lx(A0, Address(SP, 0 * target::kWordSize));
432 __ lx(A1, Address(SP, 1 * target::kWordSize));
433 __ lx(A2, Address(SP, 2 * target::kWordSize));
434 __ lx(A3, Address(SP, 3 * target::kWordSize));
435 __ lx(A4, Address(SP, 4 * target::kWordSize));
436 __ lx(A5, Address(SP, 5 * target::kWordSize));
437 __ lx(A6, Address(SP, 6 * target::kWordSize));
438 __ lx(A7, Address(SP, 7 * target::kWordSize));
439 __ lx(T1, Address(SP, 8 * target::kWordSize));
440 __ addi(SP, SP, 9 * target::kWordSize);
441 }
442
445
446 Label async_callback;
447 Label done;
448
449 // If GetFfiCallbackMetadata returned a null thread, it means that the
450 // callback was invoked after it was deleted. In this case, do nothing.
452
453 // Check the trampoline type to see how the callback should be invoked.
457
458 // Sync callback. The entry point contains the target function, so just call
459 // it. DLRT_GetThreadForNativeCallbackTrampoline exited the safepoint, so
460 // re-enter it afterwards.
461
462 // Clobbers all volatile registers, including the callback ID in T1.
463 __ jalr(T2);
464
465 // Clobbers TMP, TMP2 and T1 -- all volatile and not holding return values.
466 __ EnterFullSafepoint(/*scratch=*/T1);
467
469 __ Bind(&async_callback);
470
471 // Async callback. The entrypoint marshals the arguments into a message and
472 // sends it over the send port. DLRT_GetThreadForNativeCallbackTrampoline
473 // entered a temporary isolate, so exit it afterwards.
474
475 // Clobbers all volatile registers, including the callback ID in T1.
476 __ jalr(T2);
477
478 // Exit the temporary isolate.
479 {
480 __ EnterFrame(0);
481 __ ReserveAlignedFrameSpace(0);
482
483 Label call;
484
485#if defined(DART_TARGET_OS_FUCHSIA)
486 // TODO(https://dartbug.com/52579): Remove.
487 if (FLAG_precompiled_mode) {
488 GenerateLoadBSSEntry(BSS::Relocation::DRT_ExitTemporaryIsolate, T1, T2);
489 } else {
490 const intptr_t kPCRelativeLoadOffset = 12;
491 intptr_t start = __ CodeSize();
492 __ auipc(T1, 0);
493 __ lx(T1, Address(T1, kPCRelativeLoadOffset));
494 __ j(&call);
495
496 ASSERT_EQUAL(__ CodeSize() - start, kPCRelativeLoadOffset);
497#if XLEN == 32
498 __ Emit32(reinterpret_cast<int32_t>(&DLRT_ExitTemporaryIsolate));
499#else
500 __ Emit64(reinterpret_cast<int64_t>(&DLRT_ExitTemporaryIsolate));
501#endif
502 }
503#else
504 GenerateLoadFfiCallbackMetadataRuntimeFunction(
506#endif // defined(DART_TARGET_OS_FUCHSIA)
507
508 __ Bind(&call);
509 __ jalr(T1);
510
511 __ LeaveFrame();
512 }
513
514 __ Bind(&done);
515 __ PopRegisterPair(RA, THR);
516 __ ret();
517
518 ASSERT_LESS_OR_EQUAL(__ CodeSize() - shared_stub_start,
519 FfiCallbackMetadata::kNativeCallbackSharedStubSize);
521
522#if defined(DEBUG)
523 while (__ CodeSize() < FfiCallbackMetadata::kPageSize) {
524 __ ebreak();
525 }
526#endif
527#endif
528}
529
530void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
531 __ EnterStubFrame();
534 __ CallRuntime(kDispatchTableNullErrorRuntimeEntry, /*argument_count=*/1);
535 // The NullError runtime entry does not return.
536 __ Breakpoint();
537}
538
539void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
540 auto perform_runtime_call = [&]() {
541 // If the generated code has unboxed index/length we need to box them before
542 // calling the runtime entry.
543#if XLEN == 32
545#else
547 Label length, smi_case;
548
549 // The user-controlled index might not fit into a Smi.
553 __ beq(TMP, TMP2, &length); // No overflow.
554 {
555 // Allocate a mint, reload the two registers and populate the mint.
556 __ PushRegister(NULL_REG);
557 __ CallRuntime(kAllocateMintRuntimeEntry, /*argument_count=*/0);
558 __ PopRegister(RangeErrorABI::kIndexReg);
559 __ lx(TMP,
560 Address(FP, target::kWordSize *
563 __ sx(TMP, FieldAddress(RangeErrorABI::kIndexReg,
564 target::Mint::value_offset()));
566 Address(FP, target::kWordSize *
569 }
570
571 // Length is guaranteed to be in positive Smi range (it comes from a load
572 // of a vm recognized array).
573 __ Bind(&length);
575 }
576#endif // XLEN != 32
577 __ PushRegistersInOrder(
579 __ CallRuntime(kRangeErrorRuntimeEntry, /*argument_count=*/2);
580 __ Breakpoint();
581 };
582
583 GenerateSharedStubGeneric(
584 /*save_fpu_registers=*/with_fpu_regs,
585 with_fpu_regs
586 ? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
587 : target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
588 /*allow_return=*/false, perform_runtime_call);
589}
590
591void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
592 auto perform_runtime_call = [&]() {
593 __ CallRuntime(kWriteErrorRuntimeEntry, /*argument_count=*/2);
594 __ Breakpoint();
595 };
596
597 GenerateSharedStubGeneric(
598 /*save_fpu_registers=*/with_fpu_regs,
599 with_fpu_regs
600 ? target::Thread::write_error_shared_with_fpu_regs_stub_offset()
601 : target::Thread::write_error_shared_without_fpu_regs_stub_offset(),
602 /*allow_return=*/false, perform_runtime_call);
603}
604
605// Input parameters:
606// RA : return address.
607// SP : address of return value.
608// T5 : address of the native function to call.
609// T2 : address of first argument in argument array.
610// T1 : argc_tag including number of arguments and function kind.
611static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
612 Address wrapper) {
613 const intptr_t thread_offset = target::NativeArguments::thread_offset();
614 const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
615 const intptr_t argv_offset = target::NativeArguments::argv_offset();
616 const intptr_t retval_offset = target::NativeArguments::retval_offset();
617
618 __ EnterStubFrame();
619
620 // Save exit frame information to enable stack walking as we are about
621 // to transition to native code.
622 __ StoreToOffset(FP, THR, target::Thread::top_exit_frame_info_offset());
623
624 // Mark that the thread exited generated code through a runtime call.
625 __ LoadImmediate(TMP, target::Thread::exit_through_runtime_call());
626 __ StoreToOffset(TMP, THR, target::Thread::exit_through_ffi_offset());
627
628#if defined(DEBUG)
629 {
630 Label ok;
631 // Check that we are always entering from Dart code.
632 __ LoadFromOffset(TMP, THR, target::Thread::vm_tag_offset());
633 __ CompareImmediate(TMP, VMTag::kDartTagId);
634 __ BranchIf(EQ, &ok);
635 __ Stop("Not coming from Dart code.");
636 __ Bind(&ok);
637 }
638#endif
639
640 // Mark that the thread is executing native code.
641 __ StoreToOffset(T5, THR, target::Thread::vm_tag_offset());
642
643 // Reserve space for the native arguments structure passed on the stack (the
644 // outgoing pointer parameter to the native arguments structure is passed in
645 // R0) and align frame before entering the C++ world.
646 __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize());
647
648 // Initialize target::NativeArguments structure and call native function.
649 ASSERT(thread_offset == 0 * target::kWordSize);
650 ASSERT(argc_tag_offset == 1 * target::kWordSize);
651 // Set argc in target::NativeArguments: R1 already contains argc.
652 ASSERT(argv_offset == 2 * target::kWordSize);
653 // Set argv in target::NativeArguments: R2 already contains argv.
654 // Set retval in NativeArgs.
655 ASSERT(retval_offset == 3 * target::kWordSize);
656 __ AddImmediate(
657 T3, FP, (target::frame_layout.param_end_from_fp + 1) * target::kWordSize);
658
659 // Passing the structure by value as in runtime calls would require changing
660 // Dart API for native functions.
661 // For now, space is reserved on the stack and we pass a pointer to it.
662 __ StoreToOffset(THR, SP, thread_offset);
663 __ StoreToOffset(T1, SP, argc_tag_offset);
664 __ StoreToOffset(T2, SP, argv_offset);
665 __ StoreToOffset(T3, SP, retval_offset);
666 __ mv(A0, SP); // Pass the pointer to the target::NativeArguments.
667 __ mv(A1, T5); // Pass the function entrypoint to call.
668
669 // Call native function invocation wrapper or redirection via simulator.
671 __ Call(wrapper);
672
673 // Refresh pinned registers values (inc. write barrier mask and null object).
674 __ RestorePinnedRegisters();
675
676 // Mark that the thread is executing Dart code.
677 __ LoadImmediate(TMP, VMTag::kDartTagId);
678 __ StoreToOffset(TMP, THR, target::Thread::vm_tag_offset());
679
680 // Mark that the thread has not exited generated Dart code.
681 __ StoreToOffset(ZR, THR, target::Thread::exit_through_ffi_offset());
682
683 // Reset exit frame information in Isolate's mutator thread structure.
684 __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
685
686 // Restore the global object pool after returning from runtime (old space is
687 // moving, so the GOP could have been relocated).
688 if (FLAG_precompiled_mode) {
689 __ SetupGlobalPoolAndDispatchTable();
690 }
691
692 __ LeaveStubFrame();
693 __ ret();
694}
695
696void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
697 GenerateCallNativeWithWrapperStub(
698 assembler,
699 Address(THR,
700 target::Thread::no_scope_native_wrapper_entry_point_offset()));
701}
702
703void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
704 GenerateCallNativeWithWrapperStub(
705 assembler,
706 Address(THR,
707 target::Thread::auto_scope_native_wrapper_entry_point_offset()));
708}
709
710// Input parameters:
711// RA : return address.
712// SP : address of return value.
713// R5 : address of the native function to call.
714// R2 : address of first argument in argument array.
715// R1 : argc_tag including number of arguments and function kind.
716void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
717 GenerateCallNativeWithWrapperStub(
718 assembler,
719 Address(THR,
720 target::Thread::bootstrap_native_wrapper_entry_point_offset()));
721}
722
723// Input parameters:
724// ARGS_DESC_REG: arguments descriptor array.
725void StubCodeCompiler::GenerateCallStaticFunctionStub() {
726 // Create a stub frame as we are pushing some objects on the stack before
727 // calling into the runtime.
728 __ EnterStubFrame();
729 __ subi(SP, SP, 2 * target::kWordSize);
730 __ sx(ARGS_DESC_REG,
731 Address(SP, 1 * target::kWordSize)); // Preserve args descriptor.
732 __ sx(ZR, Address(SP, 0 * target::kWordSize)); // Result slot.
733 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
734 __ lx(CODE_REG, Address(SP, 0 * target::kWordSize)); // Result.
735 __ lx(ARGS_DESC_REG,
736 Address(SP, 1 * target::kWordSize)); // Restore args descriptor.
737 __ addi(SP, SP, 2 * target::kWordSize);
738 __ LeaveStubFrame();
739 // Jump to the dart function.
740 __ LoadFieldFromOffset(TMP, CODE_REG, target::Code::entry_point_offset());
741 __ jr(TMP);
742}
743
744// Called from a static call only when an invalid code has been entered
745// (invalid because its function was optimized or deoptimized).
746// ARGS_DESC_REG: arguments descriptor array.
747void StubCodeCompiler::GenerateFixCallersTargetStub() {
748 Label monomorphic;
749 __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
750
751 // Load code pointer to this stub from the thread:
752 // The one that is passed in, is not correct - it points to the code object
753 // that needs to be replaced.
754 __ lx(CODE_REG,
755 Address(THR, target::Thread::fix_callers_target_code_offset()));
756 // Create a stub frame as we are pushing some objects on the stack before
757 // calling into the runtime.
758 __ EnterStubFrame();
759 // Setup space on stack for return value and preserve arguments descriptor.
760 __ PushRegistersInOrder({ARGS_DESC_REG, ZR});
761 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
762 // Get Code object result and restore arguments descriptor array.
763 __ PopRegister(CODE_REG);
764 __ PopRegister(ARGS_DESC_REG);
765 // Remove the stub frame.
766 __ LeaveStubFrame();
767 // Jump to the dart function.
768 __ LoadFieldFromOffset(TMP, CODE_REG, target::Code::entry_point_offset());
769 __ jr(TMP);
770
771 __ Bind(&monomorphic);
772 // Load code pointer to this stub from the thread:
773 // The one that is passed in, is not correct - it points to the code object
774 // that needs to be replaced.
775 __ lx(CODE_REG,
776 Address(THR, target::Thread::fix_callers_target_code_offset()));
777 // Create a stub frame as we are pushing some objects on the stack before
778 // calling into the runtime.
779 __ EnterStubFrame();
780 // Setup result slot, preserve receiver and
781 // push old cache value (also 2nd return value).
782 __ PushRegistersInOrder({ZR, A0, S5});
783 __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 2);
784 __ PopRegister(S5); // Get target cache object.
785 __ PopRegister(A0); // Restore receiver.
786 __ PopRegister(CODE_REG); // Get target Code object.
787 // Remove the stub frame.
788 __ LeaveStubFrame();
789 // Jump to the dart function.
790 __ LoadFieldFromOffset(
791 TMP, CODE_REG,
792 target::Code::entry_point_offset(CodeEntryKind::kMonomorphic));
793 __ jr(TMP);
794}
795
796// Called from object allocate instruction when the allocation stub has been
797// disabled.
798void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
799 // Load code pointer to this stub from the thread:
800 // The one that is passed in, is not correct - it points to the code object
801 // that needs to be replaced.
802 __ lx(CODE_REG,
803 Address(THR, target::Thread::fix_allocation_stub_code_offset()));
804 __ EnterStubFrame();
805 // Setup space on stack for return value.
806 __ PushRegister(ZR);
807 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
808 // Get Code object result.
809 __ PopRegister(CODE_REG);
810 // Remove the stub frame.
811 __ LeaveStubFrame();
812 // Jump to the dart function.
813 __ LoadFieldFromOffset(TMP, CODE_REG, target::Code::entry_point_offset());
814 __ jr(TMP);
815}
816
817// Called from object allocate instruction when the allocation stub for a
818// generic class has been disabled.
819void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
820 // Load code pointer to this stub from the thread:
821 // The one that is passed in, is not correct - it points to the code object
822 // that needs to be replaced.
823 __ lx(CODE_REG,
824 Address(THR, target::Thread::fix_allocation_stub_code_offset()));
825 __ EnterStubFrame();
826 // Preserve type arguments register.
828 // Setup space on stack for return value.
829 __ PushRegister(ZR);
830 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
831 // Get Code object result.
832 __ PopRegister(CODE_REG);
833 // Restore type arguments register.
835 // Remove the stub frame.
836 __ LeaveStubFrame();
837 // Jump to the dart function.
838 __ LoadFieldFromOffset(TMP, CODE_REG, target::Code::entry_point_offset());
839 __ jr(TMP);
840}
841
842// Input parameters:
843// T2: smi-tagged argument count, may be zero.
844// FP[target::frame_layout.param_end_from_fp + 1]: last argument.
845static void PushArrayOfArguments(Assembler* assembler) {
848
849 // Allocate array to store arguments of caller.
850 __ LoadObject(T1, NullObject());
851 // T1: null element type for raw Array.
852 // T2: smi-tagged argument count, may be zero.
853 __ JumpAndLink(StubCodeAllocateArray());
854 // A0: newly allocated array.
855 // T2: smi-tagged argument count, may be zero (was preserved by the stub).
856 __ PushRegister(A0); // Array is in A0 and on top of stack.
857 __ SmiUntag(T2);
858 __ slli(T1, T2, target::kWordSizeLog2);
859 __ add(T1, T1, FP);
860 __ AddImmediate(T1,
861 target::frame_layout.param_end_from_fp * target::kWordSize);
862 __ AddImmediate(T3, A0, target::Array::data_offset() - kHeapObjectTag);
863 // T1: address of first argument on stack.
864 // T3: address of first argument in array.
865
866 Label loop, loop_exit;
867 __ Bind(&loop);
868 __ beqz(T2, &loop_exit);
869 __ lx(T6, Address(T1, 0));
870 __ addi(T1, T1, -target::kWordSize);
871 __ StoreCompressedIntoObject(A0, Address(T3, 0), T6);
872 __ addi(T3, T3, target::kCompressedWordSize);
873 __ addi(T2, T2, -1);
874 __ j(&loop);
875 __ Bind(&loop_exit);
876}
877
878// Used by eager and lazy deoptimization. Preserve result in RAX if necessary.
879// This stub translates optimized frame into unoptimized frame. The optimized
880// frame can contain values in registers and on stack, the unoptimized
881// frame contains all values on stack.
882// Deoptimization occurs in following steps:
883// - Push all registers that can contain values.
884// - Call C routine to copy the stack and saved registers into temporary buffer.
885// - Adjust caller's frame to correct unoptimized frame size.
886// - Fill the unoptimized frame.
887// - Materialize objects that require allocation (e.g. Double instances).
888// GC can occur only after frame is fully rewritten.
889// Stack after TagAndPushPP() below:
890// +------------------+
891// | Saved PP | <- PP
892// +------------------+
893// | PC marker | <- TOS
894// +------------------+
895// | Saved FP |
896// +------------------+
897// | return-address | (deoptimization point)
898// +------------------+
899// | Saved CODE_REG | <- FP of stub
900// +------------------+
901// | ... | <- SP of optimized frame
902//
903// Parts of the code cannot GC, part of the code can GC.
904static void GenerateDeoptimizationSequence(Assembler* assembler,
905 DeoptStubKind kind) {
906 // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
907 // is no need to set the correct PC marker or load PP, since they get patched.
908 __ EnterStubFrame();
909
910 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
911 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
912 const intptr_t saved_result_slot_from_fp =
913 target::frame_layout.first_local_from_fp + 1 -
915 const intptr_t saved_exception_slot_from_fp =
916 target::frame_layout.first_local_from_fp + 1 -
918 const intptr_t saved_stacktrace_slot_from_fp =
919 target::frame_layout.first_local_from_fp + 1 -
921 // Result in A0 is preserved as part of pushing all registers below.
922
923 // Push registers in their enumeration order: lowest register number at
924 // lowest address.
925 __ subi(SP, SP, kNumberOfCpuRegisters * target::kWordSize);
926 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; i--) {
927 const Register r = static_cast<Register>(i);
928 if (r == CODE_REG) {
929 // Save the original value of CODE_REG pushed before invoking this stub
930 // instead of the value used to call this stub.
931 COMPILE_ASSERT(TMP > CODE_REG); // TMP saved first
932 __ lx(TMP, Address(FP, 0 * target::kWordSize));
933 __ sx(TMP, Address(SP, i * target::kWordSize));
934 } else {
935 __ sx(r, Address(SP, i * target::kWordSize));
936 }
937 }
938
940 for (intptr_t i = kNumberOfFpuRegisters - 1; i >= 0; i--) {
941 FRegister freg = static_cast<FRegister>(i);
942 __ fsd(freg, Address(SP, i * kFpuRegisterSize));
943 }
944
945 {
946 __ mv(A0, SP); // Pass address of saved registers block.
947 LeafRuntimeScope rt(assembler,
948 /*frame_size=*/0,
949 /*preserve_registers=*/false);
950 bool is_lazy =
951 (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
952 __ li(A1, is_lazy ? 1 : 0);
953 rt.Call(kDeoptimizeCopyFrameRuntimeEntry, 2);
954 // Result (A0) is stack-size (FP - SP) in bytes.
955 }
956
957 if (kind == kLazyDeoptFromReturn) {
958 // Restore result into T1 temporarily.
959 __ LoadFromOffset(T1, FP, saved_result_slot_from_fp * target::kWordSize);
960 } else if (kind == kLazyDeoptFromThrow) {
961 // Restore result into T1 temporarily.
962 __ LoadFromOffset(T1, FP, saved_exception_slot_from_fp * target::kWordSize);
963 __ LoadFromOffset(T2, FP,
964 saved_stacktrace_slot_from_fp * target::kWordSize);
965 }
966
967 // There is a Dart Frame on the stack. We must restore PP and leave frame.
968 __ RestoreCodePointer();
969 __ LeaveStubFrame();
970 __ sub(SP, FP, A0);
971
972 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
973 // is no need to set the correct PC marker or load PP, since they get patched.
974 __ EnterStubFrame();
975
976 if (kind == kLazyDeoptFromReturn) {
977 __ PushRegister(T1); // Preserve result as first local.
978 } else if (kind == kLazyDeoptFromThrow) {
979 // Preserve exception as first local.
980 // Preserve stacktrace as second local.
981 __ PushRegistersInOrder({T1, T2});
982 }
983 {
984 __ mv(A0, FP); // Pass last FP as parameter in R0.
985 LeafRuntimeScope rt(assembler,
986 /*frame_size=*/0,
987 /*preserve_registers=*/false);
988 rt.Call(kDeoptimizeFillFrameRuntimeEntry, 1);
989 }
990 if (kind == kLazyDeoptFromReturn) {
991 // Restore result into T1.
992 __ LoadFromOffset(
993 T1, FP, target::frame_layout.first_local_from_fp * target::kWordSize);
994 } else if (kind == kLazyDeoptFromThrow) {
995 // Restore result into T1.
996 __ LoadFromOffset(
997 T1, FP, target::frame_layout.first_local_from_fp * target::kWordSize);
998 __ LoadFromOffset(
999 T2, FP,
1000 (target::frame_layout.first_local_from_fp - 1) * target::kWordSize);
1001 }
1002 // Code above cannot cause GC.
1003 // There is a Dart Frame on the stack. We must restore PP and leave frame.
1004 __ RestoreCodePointer();
1005 __ LeaveStubFrame();
1006
1007 // Frame is fully rewritten at this point and it is safe to perform a GC.
1008 // Materialize any objects that were deferred by FillFrame because they
1009 // require allocation.
1010 // Enter stub frame with loading PP. The caller's PP is not materialized yet.
1011 __ EnterStubFrame();
1012 if (kind == kLazyDeoptFromReturn) {
1013 __ PushRegister(T1); // Preserve result, it will be GC-d here.
1014 } else if (kind == kLazyDeoptFromThrow) {
1015 // Preserve CODE_REG for one more runtime call.
1016 __ PushRegister(CODE_REG);
1017 // Preserve exception, it will be GC-d here.
1018 // Preserve stacktrace, it will be GC-d here.
1019 __ PushRegistersInOrder({T1, T2});
1020 }
1021
1022 __ PushRegister(ZR); // Space for the result.
1023 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
1024 // Result tells stub how many bytes to remove from the expression stack
1025 // of the bottom-most frame. They were used as materialization arguments.
1026 __ PopRegister(T2);
1027 __ SmiUntag(T2);
1028 if (kind == kLazyDeoptFromReturn) {
1029 __ PopRegister(A0); // Restore result.
1030 } else if (kind == kLazyDeoptFromThrow) {
1031 __ PopRegister(A1); // Restore stacktrace.
1032 __ PopRegister(A0); // Restore exception.
1033 __ PopRegister(CODE_REG);
1034 }
1035 __ LeaveStubFrame();
1036 // Remove materialization arguments.
1037 __ add(SP, SP, T2);
1038 // The caller is responsible for emitting the return instruction.
1039
1040 if (kind == kLazyDeoptFromThrow) {
1041 // Unoptimized frame is now ready to accept the exception. Rethrow it to
1042 // find the right handler. Ask rethrow machinery to bypass debugger it
1043 // was already notified about this exception.
1044 __ EnterStubFrame();
1045 __ PushRegister(ZR); // Space for the result value (unused)
1046 __ PushRegister(A0); // Exception
1047 __ PushRegister(A1); // Stacktrace
1048 __ PushImmediate(target::ToRawSmi(1)); // Bypass debugger.
1049 __ CallRuntime(kReThrowRuntimeEntry, 3);
1050 __ LeaveStubFrame();
1051 }
1052}
1053
1054// A0: result, must be preserved
1055void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
1056 // Push zap value instead of CODE_REG for lazy deopt.
1057 __ LoadImmediate(TMP, kZapCodeReg);
1058 __ PushRegister(TMP);
1059 // Return address for "call" to deopt stub.
1060 __ LoadImmediate(RA, kZapReturnAddress);
1061 __ lx(CODE_REG,
1062 Address(THR, target::Thread::lazy_deopt_from_return_stub_offset()));
1063 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
1064 __ ret();
1065}
1066
1067// A0: exception, must be preserved
1068// A1: stacktrace, must be preserved
1069void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
1070 // Push zap value instead of CODE_REG for lazy deopt.
1071 __ LoadImmediate(TMP, kZapCodeReg);
1072 __ PushRegister(TMP);
1073 // Return address for "call" to deopt stub.
1074 __ LoadImmediate(RA, kZapReturnAddress);
1075 __ lx(CODE_REG,
1076 Address(THR, target::Thread::lazy_deopt_from_throw_stub_offset()));
1077 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
1078 __ ret();
1079}
1080
1081void StubCodeCompiler::GenerateDeoptimizeStub() {
1082 __ PushRegister(CODE_REG);
1083 __ lx(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset()));
1084 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
1085 __ ret();
1086}
1087
1088// IC_DATA_REG: ICData/MegamorphicCache
1089static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler) {
1090 __ EnterStubFrame();
1091
1092 __ lx(ARGS_DESC_REG,
1093 FieldAddress(IC_DATA_REG,
1094 target::CallSiteData::arguments_descriptor_offset()));
1095
1096 // Load the receiver.
1097 __ LoadCompressedSmiFieldFromOffset(
1098 T2, ARGS_DESC_REG, target::ArgumentsDescriptor::size_offset());
1099 __ AddShifted(TMP, FP, T2, target::kWordSizeLog2 - 1); // T2 is Smi.
1100 __ LoadFromOffset(A0, TMP,
1101 target::frame_layout.param_end_from_fp * target::kWordSize);
1102 // Push: result slot, receiver, ICData/MegamorphicCache,
1103 // arguments descriptor.
1104 __ PushRegistersInOrder({ZR, A0, IC_DATA_REG, ARGS_DESC_REG});
1105
1106 // Adjust arguments count.
1107 __ LoadCompressedSmiFieldFromOffset(
1108 T3, ARGS_DESC_REG, target::ArgumentsDescriptor::type_args_len_offset());
1109 Label args_count_ok;
1110 __ beqz(T3, &args_count_ok, Assembler::kNearJump);
1111 // Include the type arguments.
1112 __ addi(T2, T2, target::ToRawSmi(1));
1113 __ Bind(&args_count_ok);
1114
1115 // T2: Smi-tagged arguments array length.
1116 PushArrayOfArguments(assembler);
1117 const intptr_t kNumArgs = 4;
1118 __ CallRuntime(kNoSuchMethodFromCallStubRuntimeEntry, kNumArgs);
1119 __ Drop(4);
1120 __ PopRegister(A0); // Return value.
1121 __ LeaveStubFrame();
1122 __ ret();
1123}
1124
1125static void GenerateDispatcherCode(Assembler* assembler,
1126 Label* call_target_function) {
1127 __ Comment("NoSuchMethodDispatch");
1128 // When lazily generated invocation dispatchers are disabled, the
1129 // miss-handler may return null.
1130 __ bne(T0, NULL_REG, call_target_function);
1131
1132 GenerateNoSuchMethodDispatcherBody(assembler);
1133}
1134
1135// Input:
1136// ARGS_DESC_REG - arguments descriptor
1137// IC_DATA_REG - icdata/megamorphic_cache
1138void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
1139 GenerateNoSuchMethodDispatcherBody(assembler);
1140}
1141
1142// Called for inline allocation of arrays.
1143// Input registers (preserved):
1144// RA: return address.
1145// AllocateArrayABI::kLengthReg: array length as Smi.
1146// AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
1147// Output registers:
1148// AllocateArrayABI::kResultReg: newly allocated array.
1149// Clobbered:
1150// T3, T4, T5
1151void StubCodeCompiler::GenerateAllocateArrayStub() {
1152 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1153 Label slow_case;
1154 // Compute the size to be allocated, it is based on the array length
1155 // and is computed as:
1156 // RoundedAllocationSize(
1157 // (array_length * kCompressedWordSize) + target::Array::header_size()).
1158 // Check that length is a Smi.
1159 __ BranchIfNotSmi(AllocateArrayABI::kLengthReg, &slow_case);
1160
1161 // Check length >= 0 && length <= kMaxNewSpaceElements
1162 const intptr_t max_len =
1163 target::ToRawSmi(target::Array::kMaxNewSpaceElements);
1164 __ CompareImmediate(AllocateArrayABI::kLengthReg, max_len, kObjectBytes);
1165 __ BranchIf(HI, &slow_case);
1166
1167 const intptr_t cid = kArrayCid;
1168 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, &slow_case, T4));
1169
1170 // Calculate and align allocation size.
1171 // Load new object start and calculate next object start.
1172 // AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
1173 // AllocateArrayABI::kLengthReg: array length as Smi.
1175 Address(THR, target::Thread::top_offset()));
1176 intptr_t fixed_size_plus_alignment_padding =
1177 target::Array::header_size() +
1179 // AllocateArrayABI::kLengthReg is Smi.
1181 target::kWordSizeLog2 - kSmiTagSize);
1182 __ AddImmediate(T3, fixed_size_plus_alignment_padding);
1184 // AllocateArrayABI::kResultReg: potential new object start.
1185 // T3: object size in bytes.
1187 // Branch if unsigned overflow.
1188 __ bltu(T4, AllocateArrayABI::kResultReg, &slow_case);
1189
1190 // Check if the allocation fits into the remaining space.
1191 // AllocateArrayABI::kResultReg: potential new object start.
1192 // AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
1193 // AllocateArrayABI::kLengthReg: array length as Smi.
1194 // T3: array size.
1195 // T4: potential next object start.
1196 __ LoadFromOffset(TMP, THR, target::Thread::end_offset());
1197 __ bgeu(T4, TMP, &slow_case); // Branch if unsigned higher or equal.
1198 __ CheckAllocationCanary(AllocateArrayABI::kResultReg);
1199
1200 // Successfully allocated the object(s), now update top to point to
1201 // next object start and initialize the object.
1202 // AllocateArrayABI::kResultReg: potential new object start.
1203 // T3: array size.
1204 // T4: potential next object start.
1205 __ sx(T4, Address(THR, target::Thread::top_offset()));
1208
1209 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
1210 // AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
1211 // AllocateArrayABI::kLengthReg: array length as Smi.
1212 // R3: array size.
1213 // R7: new object end address.
1214
1215 // Store the type argument field.
1216 __ StoreCompressedIntoObjectOffsetNoBarrier(
1217 AllocateArrayABI::kResultReg, target::Array::type_arguments_offset(),
1219
1220 // Set the length field.
1221 __ StoreCompressedIntoObjectOffsetNoBarrier(AllocateArrayABI::kResultReg,
1222 target::Array::length_offset(),
1224
1225 // Calculate the size tag.
1226 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
1227 // AllocateArrayABI::kLengthReg: array length as Smi.
1228 // T3: array size.
1229 // T4: new object end address.
1230 const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
1232 __ li(T5, 0);
1233 __ CompareImmediate(T3, target::UntaggedObject::kSizeTagMaxSizeTag);
1234 compiler::Label zero_tag;
1235 __ BranchIf(UNSIGNED_GREATER, &zero_tag);
1236 __ slli(T5, T3, shift);
1237 __ Bind(&zero_tag);
1238
1239 // Get the class index and insert it into the tags.
1240 const uword tags =
1241 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
1242
1243 __ OrImmediate(T5, T5, tags);
1244 __ StoreFieldToOffset(T5, AllocateArrayABI::kResultReg,
1245 target::Array::tags_offset());
1246
1247 // Initialize all array elements to raw_null.
1248 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
1249 // R7: new object end address.
1250 // AllocateArrayABI::kLengthReg: array length as Smi.
1251 __ AddImmediate(T3, AllocateArrayABI::kResultReg,
1252 target::Array::data_offset() - kHeapObjectTag);
1253 // R3: iterator which initially points to the start of the variable
1254 // data area to be initialized.
1255 Label loop;
1256 __ Bind(&loop);
1257 for (intptr_t offset = 0; offset < target::kObjectAlignment;
1258 offset += target::kCompressedWordSize) {
1259 __ StoreCompressedIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
1260 Address(T3, offset), NULL_REG);
1261 }
1262 // Safe to only check every kObjectAlignment bytes instead of each word.
1263 ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
1264 __ addi(T3, T3, target::kObjectAlignment);
1265 __ bltu(T3, T4, &loop);
1266 __ WriteAllocationCanary(T4); // Fix overshoot.
1267
1268 // Done allocating and initializing the array.
1269 // AllocateArrayABI::kResultReg: new object.
1270 // AllocateArrayABI::kLengthReg: array length as Smi (preserved).
1271 __ ret();
1272
1273 // Unable to allocate the array using the fast inline code, just call
1274 // into the runtime.
1275 __ Bind(&slow_case);
1276 }
1277
1278 // Create a stub frame as we are pushing some objects on the stack before
1279 // calling into the runtime.
1280 __ EnterStubFrame();
1281 __ subi(SP, SP, 3 * target::kWordSize);
1282 __ sx(ZR, Address(SP, 2 * target::kWordSize)); // Result slot.
1283 __ sx(AllocateArrayABI::kLengthReg, Address(SP, 1 * target::kWordSize));
1285 Address(SP, 0 * target::kWordSize));
1286 __ CallRuntime(kAllocateArrayRuntimeEntry, 2);
1287
1288 // Write-barrier elimination might be enabled for this array (depending on the
1289 // array length). To be sure we will check if the allocated object is in old
1290 // space and if so call a leaf runtime to add it to the remembered set.
1292 __ lx(AllocateArrayABI::kResultReg, Address(SP, 2 * target::kWordSize));
1294
1296 Address(SP, 0 * target::kWordSize));
1297 __ lx(AllocateArrayABI::kLengthReg, Address(SP, 1 * target::kWordSize));
1298 __ lx(AllocateArrayABI::kResultReg, Address(SP, 2 * target::kWordSize));
1299 __ addi(SP, SP, 3 * target::kWordSize);
1300 __ LeaveStubFrame();
1301 __ ret();
1302}
1303
1304void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
1305 // For test purpose call allocation stub without inline allocation attempt.
1306 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1307 Label slow_case;
1308 __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
1310 __ ret();
1311
1312 __ Bind(&slow_case);
1313 }
1316 GenerateSharedStub(/*save_fpu_registers=*/true, &kAllocateMintRuntimeEntry,
1317 target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
1318 /*allow_return=*/true,
1319 /*store_runtime_result_in_result_register=*/true);
1320}
1321
1322void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
1323 // For test purpose call allocation stub without inline allocation attempt.
1324 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1325 Label slow_case;
1326 __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
1328 __ ret();
1329
1330 __ Bind(&slow_case);
1331 }
1334 GenerateSharedStub(
1335 /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
1336 target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
1337 /*allow_return=*/true,
1338 /*store_runtime_result_in_result_register=*/true);
1339}
1340
1341// Called when invoking Dart code from C++ (VM code).
1342// Input parameters:
1343// RA : points to return address.
1344// A0 : target code or entry point (in bare instructions mode).
1345// A1 : arguments descriptor array.
1346// A2 : arguments array.
1347// A3 : current thread.
1348// Beware! TMP == A3
1349void StubCodeCompiler::GenerateInvokeDartCodeStub() {
1350 __ Comment("InvokeDartCodeStub");
1351
1352 __ EnterFrame(1 * target::kWordSize);
1353
1354 // Push code object to PC marker slot.
1355 __ lx(TMP2, Address(A3, target::Thread::invoke_dart_code_stub_offset()));
1356 __ sx(TMP2, Address(SP, 0 * target::kWordSize));
1357
1358#if defined(DART_TARGET_OS_FUCHSIA) || defined(DART_TARGET_OS_ANDROID)
1359 __ sx(GP, Address(A3, target::Thread::saved_shadow_call_stack_offset()));
1360#elif defined(USING_SHADOW_CALL_STACK)
1361#error Unimplemented
1362#endif
1363
1364 // TODO(riscv): Consider using only volatile FPU registers in Dart code so we
1365 // don't need to save the preserved FPU registers here.
1366 __ PushNativeCalleeSavedRegisters();
1367
1368 // Set up THR, which caches the current thread in Dart code.
1369 if (THR != A3) {
1370 __ mv(THR, A3);
1371 }
1372
1373 // Refresh pinned registers values (inc. write barrier mask and null object).
1374 __ RestorePinnedRegisters();
1375
1376 // Save the current VMTag, top resource and top exit frame info on the stack.
1377 // StackFrameIterator reads the top exit frame info saved in this frame.
1378 __ subi(SP, SP, 4 * target::kWordSize);
1379 __ lx(TMP, Address(THR, target::Thread::vm_tag_offset()));
1380 __ sx(TMP, Address(SP, 3 * target::kWordSize));
1381 __ lx(TMP, Address(THR, target::Thread::top_resource_offset()));
1382 __ sx(ZR, Address(THR, target::Thread::top_resource_offset()));
1383 __ sx(TMP, Address(SP, 2 * target::kWordSize));
1384 __ lx(TMP, Address(THR, target::Thread::exit_through_ffi_offset()));
1385 __ sx(ZR, Address(THR, target::Thread::exit_through_ffi_offset()));
1386 __ sx(TMP, Address(SP, 1 * target::kWordSize));
1387 __ lx(TMP, Address(THR, target::Thread::top_exit_frame_info_offset()));
1388 __ sx(ZR, Address(THR, target::Thread::top_exit_frame_info_offset()));
1389 __ sx(TMP, Address(SP, 0 * target::kWordSize));
1390 // target::frame_layout.exit_link_slot_from_entry_fp must be kept in sync
1391 // with the code below.
1392#if XLEN == 32
1393 ASSERT_EQUAL(target::frame_layout.exit_link_slot_from_entry_fp, -42);
1394#elif XLEN == 64
1395 ASSERT_EQUAL(target::frame_layout.exit_link_slot_from_entry_fp, -30);
1396#endif
1397 // In debug mode, verify that we've pushed the top exit frame info at the
1398 // correct offset from FP.
1399 __ EmitEntryFrameVerification();
1400
1401 // Mark that the thread is executing Dart code. Do this after initializing the
1402 // exit link for the profiler.
1403 __ LoadImmediate(TMP, VMTag::kDartTagId);
1404 __ StoreToOffset(TMP, THR, target::Thread::vm_tag_offset());
1405
1406 // Load arguments descriptor array, which is passed to Dart code.
1407 __ mv(ARGS_DESC_REG, A1);
1408
1409 // Load number of arguments into T5 and adjust count for type arguments.
1410 __ LoadFieldFromOffset(T5, ARGS_DESC_REG,
1411 target::ArgumentsDescriptor::count_offset());
1412 __ LoadFieldFromOffset(T3, ARGS_DESC_REG,
1413 target::ArgumentsDescriptor::type_args_len_offset());
1414 __ SmiUntag(T5);
1415 // Include the type arguments.
1416 __ snez(T3, T3); // T3 <- T3 == 0 ? 0 : 1
1417 __ add(T5, T5, T3);
1418
1419 // Compute address of 'arguments array' data area into A2.
1420 __ AddImmediate(A2, A2, target::Array::data_offset() - kHeapObjectTag);
1421
1422 // Set up arguments for the Dart call.
1423 Label push_arguments;
1424 Label done_push_arguments;
1425 __ beqz(T5, &done_push_arguments); // check if there are arguments.
1426 __ LoadImmediate(T2, 0);
1427 __ Bind(&push_arguments);
1428 __ lx(T3, Address(A2, 0));
1429 __ PushRegister(T3);
1430 __ addi(T2, T2, 1);
1431 __ addi(A2, A2, target::kWordSize);
1432 __ blt(T2, T5, &push_arguments, compiler::Assembler::kNearJump);
1433 __ Bind(&done_push_arguments);
1434
1435 if (FLAG_precompiled_mode) {
1436 __ SetupGlobalPoolAndDispatchTable();
1437 __ mv(CODE_REG, ZR); // GC-safe value into CODE_REG.
1438 } else {
1439 // We now load the pool pointer(PP) with a GC safe value as we are about to
1440 // invoke dart code. We don't need a real object pool here.
1441 __ li(PP, 1); // PP is untagged, callee will tag and spill PP.
1442 __ mv(CODE_REG, A0);
1443 __ lx(A0, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
1444 }
1445
1446 // Call the Dart code entrypoint.
1447 __ jalr(A0); // ARGS_DESC_REG is the arguments descriptor array.
1448 __ Comment("InvokeDartCodeStub return");
1449
1450 // Get rid of arguments pushed on the stack.
1451 __ addi(
1452 SP, FP,
1453 target::frame_layout.exit_link_slot_from_entry_fp * target::kWordSize);
1454
1455 // Restore the current VMTag, the saved top exit frame info and top resource
1456 // back into the Thread structure.
1457 __ lx(TMP, Address(SP, 0 * target::kWordSize));
1458 __ sx(TMP, Address(THR, target::Thread::top_exit_frame_info_offset()));
1459 __ lx(TMP, Address(SP, 1 * target::kWordSize));
1460 __ sx(TMP, Address(THR, target::Thread::exit_through_ffi_offset()));
1461 __ lx(TMP, Address(SP, 2 * target::kWordSize));
1462 __ sx(TMP, Address(THR, target::Thread::top_resource_offset()));
1463 __ lx(TMP, Address(SP, 3 * target::kWordSize));
1464 __ sx(TMP, Address(THR, target::Thread::vm_tag_offset()));
1465 __ addi(SP, SP, 4 * target::kWordSize);
1466
1467 __ PopNativeCalleeSavedRegisters();
1468
1469 // Restore the frame pointer and C stack pointer and return.
1470 __ LeaveFrame();
1471 __ ret();
1472}
1473
1474// Helper to generate space allocation of context stub.
1475// This does not initialise the fields of the context.
1476// Input:
1477// T1: number of context variables.
1478// Output:
1479// A0: new allocated Context object.
1480// Clobbered:
1481// T2, T3, T4, TMP
1482static void GenerateAllocateContextSpaceStub(Assembler* assembler,
1483 Label* slow_case) {
1484 // First compute the rounded instance size.
1485 // R1: number of context variables.
1486 intptr_t fixed_size_plus_alignment_padding =
1487 target::Context::header_size() +
1490 __ AddImmediate(T2, fixed_size_plus_alignment_padding);
1492
1493 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, slow_case, T4));
1494 // Now allocate the object.
1495 // T1: number of context variables.
1496 // T2: object size.
1497 __ lx(A0, Address(THR, target::Thread::top_offset()));
1498 __ add(T3, T2, A0);
1499 // Check if the allocation fits into the remaining space.
1500 // A0: potential new object.
1501 // T1: number of context variables.
1502 // T2: object size.
1503 // T3: potential next object start.
1504 __ lx(TMP, Address(THR, target::Thread::end_offset()));
1505 __ CompareRegisters(T3, TMP);
1506 __ BranchIf(CS, slow_case); // Branch if unsigned higher or equal.
1507 __ CheckAllocationCanary(A0);
1508
1509 // Successfully allocated the object, now update top to point to
1510 // next object start and initialize the object.
1511 // A0: new object.
1512 // T1: number of context variables.
1513 // T2: object size.
1514 // T3: next object start.
1515 __ sx(T3, Address(THR, target::Thread::top_offset()));
1516 __ addi(A0, A0, kHeapObjectTag);
1517
1518 // Calculate the size tag.
1519 // A0: new object.
1520 // T1: number of context variables.
1521 // T2: object size.
1522 const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
1524 __ li(T3, 0);
1525 __ CompareImmediate(T2, target::UntaggedObject::kSizeTagMaxSizeTag);
1526 // If no size tag overflow, shift R2 left, else set R2 to zero.
1527 compiler::Label zero_tag;
1528 __ BranchIf(HI, &zero_tag);
1529 __ slli(T3, T2, shift);
1530 __ Bind(&zero_tag);
1531
1532 // Get the class index and insert it into the tags.
1533 // T3: size and bit tags.
1534 const uword tags =
1535 target::MakeTagWordForNewSpaceObject(kContextCid, /*instance_size=*/0);
1536
1537 __ OrImmediate(T3, T3, tags);
1538 __ StoreFieldToOffset(T3, A0, target::Object::tags_offset());
1539
1540 // Setup up number of context variables field.
1541 // A0: new object.
1542 // T1: number of context variables as integer value (not object).
1543 __ StoreFieldToOffset(T1, A0, target::Context::num_variables_offset(),
1544 kFourBytes);
1545}
1546
1547// Called for inline allocation of contexts.
1548// Input:
1549// T1: number of context variables.
1550// Output:
1551// A0: new allocated Context object.
1552void StubCodeCompiler::GenerateAllocateContextStub() {
1553 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1554 Label slow_case;
1555
1556 GenerateAllocateContextSpaceStub(assembler, &slow_case);
1557
1558 // Setup the parent field.
1559 // A0: new object.
1560 // T1: number of context variables.
1561 __ StoreCompressedIntoObjectOffset(A0, target::Context::parent_offset(),
1562 NULL_REG);
1563
1564 // Initialize the context variables.
1565 // A0: new object.
1566 // T1: number of context variables.
1567 {
1568 Label loop, done;
1569 __ AddImmediate(T3, A0,
1570 target::Context::variable_offset(0) - kHeapObjectTag);
1571 __ Bind(&loop);
1572 __ subi(T1, T1, 1);
1573 __ bltz(T1, &done);
1574 __ sx(NULL_REG, Address(T3, 0));
1575 __ addi(T3, T3, target::kCompressedWordSize);
1576 __ j(&loop);
1577 __ Bind(&done);
1578 }
1579
1580 // Done allocating and initializing the context.
1581 // A0: new object.
1582 __ ret();
1583
1584 __ Bind(&slow_case);
1585 }
1586
1587 // Create a stub frame as we are pushing some objects on the stack before
1588 // calling into the runtime.
1589 __ EnterStubFrame();
1590 // Setup space on stack for return value.
1591 __ SmiTag(T1);
1592 __ PushObject(NullObject());
1593 __ PushRegister(T1);
1594 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context.
1595 __ Drop(1); // Pop number of context variables argument.
1596 __ PopRegister(A0); // Pop the new context object.
1597
1598 // Write-barrier elimination might be enabled for this context (depending on
1599 // the size). To be sure we will check if the allocated object is in old
1600 // space and if so call a leaf runtime to add it to the remembered set.
1602
1603 // A0: new object
1604 // Restore the frame pointer.
1605 __ LeaveStubFrame();
1606 __ ret();
1607}
1608
1609// Called for clone of contexts.
1610// Input:
1611// T5: context variable to clone.
1612// Output:
1613// A0: new allocated Context object.
1614void StubCodeCompiler::GenerateCloneContextStub() {
1615 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1616 Label slow_case;
1617
1618 // Load num. variable (int32) in the existing context.
1619 __ lw(T1, FieldAddress(T5, target::Context::num_variables_offset()));
1620
1621 GenerateAllocateContextSpaceStub(assembler, &slow_case);
1622
1623 // Load parent in the existing context.
1624 __ LoadCompressed(T3, FieldAddress(T5, target::Context::parent_offset()));
1625 // Setup the parent field.
1626 // A0: new context.
1627 __ StoreCompressedIntoObjectNoBarrier(
1628 A0, FieldAddress(A0, target::Context::parent_offset()), T3);
1629
1630 // Clone the context variables.
1631 // A0: new context.
1632 // T1: number of context variables.
1633 {
1634 Label loop, done;
1635 // T3: Variable array address, new context.
1636 __ AddImmediate(T3, A0,
1637 target::Context::variable_offset(0) - kHeapObjectTag);
1638 // T4: Variable array address, old context.
1639 __ AddImmediate(T4, T5,
1640 target::Context::variable_offset(0) - kHeapObjectTag);
1641
1642 __ Bind(&loop);
1643 __ subi(T1, T1, 1);
1644 __ bltz(T1, &done);
1645 __ lx(T5, Address(T4, 0));
1646 __ addi(T4, T4, target::kCompressedWordSize);
1647 __ sx(T5, Address(T3, 0));
1648 __ addi(T3, T3, target::kCompressedWordSize);
1649 __ j(&loop);
1650
1651 __ Bind(&done);
1652 }
1653
1654 // Done allocating and initializing the context.
1655 // A0: new object.
1656 __ ret();
1657
1658 __ Bind(&slow_case);
1659 }
1660
1661 __ EnterStubFrame();
1662
1663 __ subi(SP, SP, 2 * target::kWordSize);
1664 __ sx(NULL_REG, Address(SP, 1 * target::kWordSize)); // Result slot.
1665 __ sx(T5, Address(SP, 0 * target::kWordSize)); // Context argument.
1666 __ CallRuntime(kCloneContextRuntimeEntry, 1);
1667 __ lx(A0, Address(SP, 1 * target::kWordSize)); // Context result.
1668 __ subi(SP, SP, 2 * target::kWordSize);
1669
1670 // Write-barrier elimination might be enabled for this context (depending on
1671 // the size). To be sure we will check if the allocated object is in old
1672 // space and if so call a leaf runtime to add it to the remembered set.
1674
1675 // A0: new object
1676 __ LeaveStubFrame();
1677 __ ret();
1678}
1679
1680void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
1681 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
1682 if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
1683
1684 Register reg = static_cast<Register>(i);
1685 intptr_t start = __ CodeSize();
1686 __ addi(SP, SP, -3 * target::kWordSize);
1687 __ sx(RA, Address(SP, 2 * target::kWordSize));
1688 __ sx(TMP, Address(SP, 1 * target::kWordSize));
1689 __ sx(kWriteBarrierObjectReg, Address(SP, 0 * target::kWordSize));
1690 __ mv(kWriteBarrierObjectReg, reg);
1691 __ Call(Address(THR, target::Thread::write_barrier_entry_point_offset()));
1692 __ lx(kWriteBarrierObjectReg, Address(SP, 0 * target::kWordSize));
1693 __ lx(TMP, Address(SP, 1 * target::kWordSize));
1694 __ lx(RA, Address(SP, 2 * target::kWordSize));
1695 __ addi(SP, SP, 3 * target::kWordSize);
1696 __ jr(TMP); // Return.
1697 intptr_t end = __ CodeSize();
1699 }
1700}
1701
1702// Helper stub to implement Assembler::StoreIntoObject/Array.
1703// Input parameters:
1704// A0: Object (old)
1705// A1: Value (old or new)
1706// A6: Slot
1707// If A1 is new, add A0 to the store buffer. Otherwise A1 is old, mark A1
1708// and add it to the mark list.
1712static void GenerateWriteBarrierStubHelper(Assembler* assembler, bool cards) {
1713 RegisterSet spill_set((1 << T2) | (1 << T3) | (1 << T4), 0);
1714
1715 Label skip_marking;
1716 __ lbu(TMP, FieldAddress(A1, target::Object::tags_offset()));
1717 __ lbu(TMP2, Address(THR, target::Thread::write_barrier_mask_offset()));
1718 __ and_(TMP, TMP, TMP2);
1719 __ andi(TMP, TMP, target::UntaggedObject::kIncrementalBarrierMask);
1720 __ beqz(TMP, &skip_marking);
1721
1722 {
1723 // Atomically clear kNotMarkedBit.
1724 Label done;
1725 __ PushRegisters(spill_set);
1726 __ addi(T3, A1, target::Object::tags_offset() - kHeapObjectTag);
1727 // T3: Untagged address of header word (amo's do not support offsets).
1728 __ li(TMP2, ~(1 << target::UntaggedObject::kNotMarkedBit));
1729#if XLEN == 32
1730 __ amoandw(TMP2, TMP2, Address(T3, 0));
1731#else
1732 __ amoandd(TMP2, TMP2, Address(T3, 0));
1733#endif
1734 __ andi(TMP2, TMP2, 1 << target::UntaggedObject::kNotMarkedBit);
1735 __ beqz(TMP2, &done); // Was already clear -> lost race.
1736
1737 __ lx(T4, Address(THR, target::Thread::marking_stack_block_offset()));
1739 __ slli(T3, T2, target::kWordSizeLog2);
1740 __ add(T3, T4, T3);
1742 __ addi(T2, T2, 1);
1744 __ CompareImmediate(T2, target::MarkingStackBlock::kSize);
1745 __ BranchIf(NE, &done);
1746
1747 {
1748 LeafRuntimeScope rt(assembler, /*frame_size=*/0,
1749 /*preserve_registers=*/true);
1750 __ mv(A0, THR);
1751 rt.Call(kMarkingStackBlockProcessRuntimeEntry, /*argument_count=*/1);
1752 }
1753
1754 __ Bind(&done);
1755 __ PopRegisters(spill_set);
1756 }
1757
1758 Label add_to_remembered_set, remember_card;
1759 __ Bind(&skip_marking);
1760 __ lbu(TMP, FieldAddress(A0, target::Object::tags_offset()));
1761 __ lbu(TMP2, FieldAddress(A1, target::Object::tags_offset()));
1762 __ srli(TMP, TMP, target::UntaggedObject::kBarrierOverlapShift);
1763 __ and_(TMP, TMP2, TMP);
1764 __ andi(TMP, TMP, target::UntaggedObject::kGenerationalBarrierMask);
1765 __ bnez(TMP, &add_to_remembered_set);
1766 __ ret();
1767
1768 __ Bind(&add_to_remembered_set);
1769 if (cards) {
1770 __ lbu(TMP2, FieldAddress(A0, target::Object::tags_offset()));
1771 __ andi(TMP2, TMP2, 1 << target::UntaggedObject::kCardRememberedBit);
1772 __ bnez(TMP2, &remember_card);
1773 } else {
1774#if defined(DEBUG)
1775 Label ok;
1776 __ lbu(TMP2, FieldAddress(A0, target::Object::tags_offset()));
1777 __ andi(TMP2, TMP2, 1 << target::UntaggedObject::kCardRememberedBit);
1778 __ beqz(TMP2, &ok, Assembler::kNearJump);
1779 __ Stop("Wrong barrier!");
1780 __ Bind(&ok);
1781#endif
1782 }
1783 {
1784 // Atomically clear kOldAndNotRememberedBit.
1785 Label done;
1786 __ PushRegisters(spill_set);
1787 __ addi(T3, A0, target::Object::tags_offset() - kHeapObjectTag);
1788 // T3: Untagged address of header word (amo's do not support offsets).
1789 __ li(TMP2, ~(1 << target::UntaggedObject::kOldAndNotRememberedBit));
1790#if XLEN == 32
1791 __ amoandw(TMP2, TMP2, Address(T3, 0));
1792#else
1793 __ amoandd(TMP2, TMP2, Address(T3, 0));
1794#endif
1795 __ andi(TMP2, TMP2, 1 << target::UntaggedObject::kOldAndNotRememberedBit);
1796 __ beqz(TMP2, &done); // Was already clear -> lost race.
1797
1798 __ lx(T4, Address(THR, target::Thread::store_buffer_block_offset()));
1800 __ slli(T3, T2, target::kWordSizeLog2);
1801 __ add(T3, T4, T3);
1803 __ addi(T2, T2, 1);
1805 __ CompareImmediate(T2, target::StoreBufferBlock::kSize);
1806 __ BranchIf(NE, &done);
1807
1808 {
1809 LeafRuntimeScope rt(assembler, /*frame_size=*/0,
1810 /*preserve_registers=*/true);
1811 __ mv(A0, THR);
1812 rt.Call(kStoreBufferBlockProcessRuntimeEntry, /*argument_count=*/1);
1813 }
1814
1815 __ Bind(&done);
1816 __ PopRegisters(spill_set);
1817 __ ret();
1818 }
1819 if (cards) {
1820 Label remember_card_slow;
1821
1822 // Get card table.
1823 __ Bind(&remember_card);
1824 __ AndImmediate(TMP, A0, target::kPageMask); // Page.
1825 __ lx(TMP2,
1826 Address(TMP, target::Page::card_table_offset())); // Card table.
1827 __ beqz(TMP2, &remember_card_slow);
1828
1829 // Dirty the card. Not atomic: we assume mutable arrays are not shared
1830 // between threads.
1831 __ sub(A6, A6, TMP); // Offset in page.
1832 __ srli(A6, A6, target::Page::kBytesPerCardLog2); // Card index.
1833 __ li(TMP, 1);
1834 __ sll(TMP, TMP, A6); // Bit mask. (Shift amount is mod XLEN.)
1835 __ srli(A6, A6, target::kBitsPerWordLog2);
1836 __ slli(A6, A6, target::kWordSizeLog2);
1837 __ add(TMP2, TMP2, A6); // Card word address.
1838 __ lx(A6, Address(TMP2, 0));
1839 __ or_(A6, A6, TMP);
1840 __ sx(A6, Address(TMP2, 0));
1841 __ ret();
1842
1843 // Card table not yet allocated.
1844 __ Bind(&remember_card_slow);
1845 {
1846 LeafRuntimeScope rt(assembler, /*frame_size=*/0,
1847 /*preserve_registers=*/true);
1848 __ mv(A0, A0); // Arg0 = Object
1849 __ mv(A1, A6); // Arg1 = Slot
1850 rt.Call(kRememberCardRuntimeEntry, /*argument_count=*/2);
1851 }
1852 __ ret();
1853 }
1854}
1855
1856void StubCodeCompiler::GenerateWriteBarrierStub() {
1857 GenerateWriteBarrierStubHelper(assembler, false);
1858}
1859
1860void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
1861 GenerateWriteBarrierStubHelper(assembler, true);
1862}
1863
1864static void GenerateAllocateObjectHelper(Assembler* assembler,
1865 bool is_cls_parameterized) {
1866 const Register kTagsReg = AllocateObjectABI::kTagsReg;
1867
1868 {
1869 Label slow_case;
1870
1871#if !defined(PRODUCT)
1872 {
1873 const Register kCidRegister = TMP2;
1874 __ ExtractClassIdFromTags(kCidRegister, AllocateObjectABI::kTagsReg);
1875 __ MaybeTraceAllocation(kCidRegister, &slow_case, TMP);
1876 }
1877#endif
1878
1879 const Register kNewTopReg = T3;
1880
1881 // Bump allocation.
1882 {
1883 const Register kInstanceSizeReg = T4;
1884 const Register kEndReg = T5;
1885
1886 __ ExtractInstanceSizeFromTags(kInstanceSizeReg, kTagsReg);
1887
1888 // Load two words from Thread::top: top and end.
1889 // AllocateObjectABI::kResultReg: potential next object start.
1891 Address(THR, target::Thread::top_offset()));
1892 __ lx(kEndReg, Address(THR, target::Thread::end_offset()));
1893
1894 __ add(kNewTopReg, AllocateObjectABI::kResultReg, kInstanceSizeReg);
1895
1896 __ CompareRegisters(kEndReg, kNewTopReg);
1897 __ BranchIf(UNSIGNED_LESS_EQUAL, &slow_case);
1898 __ CheckAllocationCanary(AllocateObjectABI::kResultReg);
1899
1900 // Successfully allocated the object, now update top to point to
1901 // next object start and store the class in the class field of object.
1902 __ sx(kNewTopReg, Address(THR, target::Thread::top_offset()));
1903 } // kInstanceSizeReg = R4, kEndReg = R5
1904
1905 // Tags.
1906 __ sx(kTagsReg, Address(AllocateObjectABI::kResultReg,
1907 target::Object::tags_offset()));
1908
1909 // Initialize the remaining words of the object.
1910 {
1911 const Register kFieldReg = T4;
1912
1913 __ AddImmediate(kFieldReg, AllocateObjectABI::kResultReg,
1914 target::Instance::first_field_offset());
1915 Label loop;
1916 __ Bind(&loop);
1917 for (intptr_t offset = 0; offset < target::kObjectAlignment;
1918 offset += target::kCompressedWordSize) {
1919 __ StoreCompressedIntoObjectNoBarrier(AllocateObjectABI::kResultReg,
1920 Address(kFieldReg, offset),
1921 NULL_REG);
1922 }
1923 // Safe to only check every kObjectAlignment bytes instead of each word.
1924 ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
1925 __ addi(kFieldReg, kFieldReg, target::kObjectAlignment);
1926 __ bltu(kFieldReg, kNewTopReg, &loop);
1927 __ WriteAllocationCanary(kNewTopReg); // Fix overshoot.
1928 } // kFieldReg = T4
1929
1930 if (is_cls_parameterized) {
1931 Label not_parameterized_case;
1932
1933 const Register kClsIdReg = T4;
1934 const Register kTypeOffsetReg = T5;
1935
1936 __ ExtractClassIdFromTags(kClsIdReg, kTagsReg);
1937
1938 // Load class' type_arguments_field offset in words.
1939 __ LoadClassById(kTypeOffsetReg, kClsIdReg);
1940 __ lw(
1941 kTypeOffsetReg,
1942 FieldAddress(kTypeOffsetReg,
1943 target::Class::
1944 host_type_arguments_field_offset_in_words_offset()));
1945
1946 // Set the type arguments in the new object.
1947 __ slli(kTypeOffsetReg, kTypeOffsetReg, target::kWordSizeLog2);
1948 __ add(kTypeOffsetReg, kTypeOffsetReg, AllocateObjectABI::kResultReg);
1949 __ sx(AllocateObjectABI::kTypeArgumentsReg, Address(kTypeOffsetReg, 0));
1950
1951 __ Bind(&not_parameterized_case);
1952 } // kClsIdReg = R4, kTypeOffsetReg = R5
1953
1954 __ AddImmediate(AllocateObjectABI::kResultReg,
1956
1957 __ ret();
1958
1959 __ Bind(&slow_case);
1960 } // kNewTopReg = R3
1961
1962 // Fall back on slow case:
1963 if (!is_cls_parameterized) {
1965 }
1966 // Tail call to generic allocation stub.
1967 __ lx(
1968 TMP,
1969 Address(THR, target::Thread::allocate_object_slow_entry_point_offset()));
1970 __ jr(TMP);
1971}
1972
1973// Called for inline allocation of objects (any class).
1974void StubCodeCompiler::GenerateAllocateObjectStub() {
1975 GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
1976}
1977
1978void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
1979 GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
1980}
1981
1982void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
1983 if (!FLAG_precompiled_mode) {
1984 __ lx(CODE_REG,
1985 Address(THR, target::Thread::call_to_runtime_stub_offset()));
1986 }
1987
1988 // Create a stub frame as we are pushing some objects on the stack before
1989 // calling into the runtime.
1990 __ EnterStubFrame();
1991
1992 __ ExtractClassIdFromTags(AllocateObjectABI::kTagsReg,
1994 __ LoadClassById(A0, AllocateObjectABI::kTagsReg);
1995
1996 __ subi(SP, SP, 3 * target::kWordSize);
1997 __ sx(ZR, Address(SP, 2 * target::kWordSize)); // Result slot.
1998 __ sx(A0, Address(SP, 1 * target::kWordSize)); // Arg0: Class object.
2000 Address(SP, 0 * target::kWordSize)); // Arg1: Type args or null.
2001 __ CallRuntime(kAllocateObjectRuntimeEntry, 2);
2002 __ lx(AllocateObjectABI::kResultReg, Address(SP, 2 * target::kWordSize));
2003 __ addi(SP, SP, 3 * target::kWordSize);
2004
2005 // Write-barrier elimination is enabled for [cls] and we therefore need to
2006 // ensure that the object is in new-space or has remembered bit set.
2008
2009 __ LeaveStubFrame();
2010
2011 __ ret();
2012}
2013
2014// Called for inline allocation of objects.
2016 UnresolvedPcRelativeCalls* unresolved_calls,
2017 const Class& cls,
2018 const Code& allocate_object,
2019 const Code& allocat_object_parametrized) {
2020 classid_t cls_id = target::Class::GetId(cls);
2021 ASSERT(cls_id != kIllegalCid);
2022
2023 // The generated code is different if the class is parameterized.
2024 const bool is_cls_parameterized = target::Class::NumTypeArguments(cls) > 0;
2025 ASSERT(!is_cls_parameterized || target::Class::TypeArgumentsFieldOffset(
2026 cls) != target::Class::kNoTypeArguments);
2027
2028 const intptr_t instance_size = target::Class::GetInstanceSize(cls);
2029 ASSERT(instance_size > 0);
2030
2031 const uword tags =
2032 target::MakeTagWordForNewSpaceObject(cls_id, instance_size);
2033
2034 // Note: Keep in sync with helper function.
2035 const Register kTagsReg = AllocateObjectABI::kTagsReg;
2037
2038 __ LoadImmediate(kTagsReg, tags);
2039
2040 if (!FLAG_use_slow_path && FLAG_inline_alloc &&
2041 !target::Class::TraceAllocation(cls) &&
2042 target::SizeFitsInSizeTag(instance_size)) {
2044 RELEASE_ASSERT(target::Heap::IsAllocatableInNewSpace(instance_size));
2045 if (is_cls_parameterized) {
2046 if (!IsSameObject(NullObject(),
2047 CastHandle<Object>(allocat_object_parametrized))) {
2048 __ GenerateUnRelocatedPcRelativeTailCall();
2049 unresolved_calls->Add(new UnresolvedPcRelativeCall(
2050 __ CodeSize(), allocat_object_parametrized, /*is_tail_call=*/true));
2051 } else {
2052 __ lx(TMP,
2053 Address(THR,
2054 target::Thread::
2055 allocate_object_parameterized_entry_point_offset()));
2056 __ jr(TMP);
2057 }
2058 } else {
2059 if (!IsSameObject(NullObject(), CastHandle<Object>(allocate_object))) {
2060 __ GenerateUnRelocatedPcRelativeTailCall();
2061 unresolved_calls->Add(new UnresolvedPcRelativeCall(
2062 __ CodeSize(), allocate_object, /*is_tail_call=*/true));
2063 } else {
2064 __ lx(
2065 TMP,
2066 Address(THR, target::Thread::allocate_object_entry_point_offset()));
2067 __ jr(TMP);
2068 }
2069 }
2070 } else {
2071 if (!is_cls_parameterized) {
2073 }
2074 __ lx(TMP,
2075 Address(THR,
2076 target::Thread::allocate_object_slow_entry_point_offset()));
2077 __ jr(TMP);
2078 }
2079}
2080
2081// Called for invoking "dynamic noSuchMethod(Invocation invocation)" function
2082// from the entry code of a dart function after an error in passed argument
2083// name or number is detected.
2084// Input parameters:
2085// RA : return address.
2086// SP : address of last argument.
2087// S4: arguments descriptor array.
2088void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
2089 __ EnterStubFrame();
2090
2091 // Load the receiver.
2092 __ LoadCompressedSmiFieldFromOffset(
2093 T2, S4, target::ArgumentsDescriptor::size_offset());
2094 __ AddShifted(TMP, FP, T2, target::kWordSizeLog2 - 1); // T2 is Smi
2095 __ LoadFromOffset(A0, TMP,
2096 target::frame_layout.param_end_from_fp * target::kWordSize);
2097
2098 // Load the function.
2099 __ LoadCompressedFieldFromOffset(TMP, A0, target::Closure::function_offset());
2100
2101 // Push result slot, receiver, function, arguments descriptor.
2102 __ PushRegistersInOrder({ZR, A0, TMP, S4});
2103
2104 // Adjust arguments count.
2105 __ LoadCompressedSmiFieldFromOffset(
2106 T3, S4, target::ArgumentsDescriptor::type_args_len_offset());
2107 Label args_count_ok;
2108 __ beqz(T3, &args_count_ok, Assembler::kNearJump);
2109 // Include the type arguments.
2110 __ addi(T2, T2, target::ToRawSmi(1));
2111 __ Bind(&args_count_ok);
2112
2113 // T2: Smi-tagged arguments array length.
2114 PushArrayOfArguments(assembler);
2115
2116 const intptr_t kNumArgs = 4;
2117 __ CallRuntime(kNoSuchMethodFromPrologueRuntimeEntry, kNumArgs);
2118 // noSuchMethod on closures always throws an error, so it will never return.
2119 __ ebreak();
2120}
2121
2122// A6: function object.
2123// S5: inline cache data object.
2124// Cannot use function object from ICData as it may be the inlined
2125// function and not the top-scope function.
2127 if (FLAG_precompiled_mode) {
2128 __ Breakpoint();
2129 return;
2130 }
2131 if (FLAG_trace_optimized_ic_calls) {
2132 __ Stop("Unimplemented");
2133 }
2134 __ LoadFieldFromOffset(TMP, A6, target::Function::usage_counter_offset(),
2135 kFourBytes);
2136 __ addi(TMP, TMP, 1);
2137 __ StoreFieldToOffset(TMP, A6, target::Function::usage_counter_offset(),
2138 kFourBytes);
2139}
2140
2141// Loads function into 'func_reg'.
2143 if (FLAG_precompiled_mode) {
2144 __ trap();
2145 return;
2146 }
2147 if (FLAG_optimization_counter_threshold >= 0) {
2148 __ Comment("Increment function counter");
2149 __ LoadFieldFromOffset(func_reg, IC_DATA_REG,
2150 target::ICData::owner_offset());
2151 __ LoadFieldFromOffset(
2152 A1, func_reg, target::Function::usage_counter_offset(), kFourBytes);
2153 __ AddImmediate(A1, 1);
2154 __ StoreFieldToOffset(A1, func_reg,
2155 target::Function::usage_counter_offset(), kFourBytes);
2156 }
2157}
2158
2159// Note: S5 must be preserved.
2160// Attempt a quick Smi operation for known operations ('kind'). The ICData
2161// must have been primed with a Smi/Smi check that will be used for counting
2162// the invocations.
2163static void EmitFastSmiOp(Assembler* assembler,
2164 Token::Kind kind,
2165 intptr_t num_args,
2166 Label* not_smi_or_overflow) {
2167 __ Comment("Fast Smi op");
2168 __ lx(A0, Address(SP, +1 * target::kWordSize)); // Left.
2169 __ lx(A1, Address(SP, +0 * target::kWordSize)); // Right.
2170 __ or_(TMP2, A0, A1);
2171 __ andi(TMP2, TMP2, kSmiTagMask);
2172 __ bnez(TMP2, not_smi_or_overflow);
2173 switch (kind) {
2174 case Token::kADD: {
2175 __ AddBranchOverflow(A0, A0, A1, not_smi_or_overflow);
2176 break;
2177 }
2178 case Token::kLT: {
2179 // TODO(riscv): Bit tricks with stl and NULL_REG.
2180 Label load_true, done;
2181 __ blt(A0, A1, &load_true, compiler::Assembler::kNearJump);
2182 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
2184 __ Bind(&load_true);
2185 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
2186 __ Bind(&done);
2187 break;
2188 }
2189 case Token::kEQ: {
2190 // TODO(riscv): Bit tricks with stl and NULL_REG.
2191 Label load_true, done;
2192 __ beq(A0, A1, &load_true, Assembler::kNearJump);
2193 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
2195 __ Bind(&load_true);
2196 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
2197 __ Bind(&done);
2198 break;
2199 }
2200 default:
2201 UNIMPLEMENTED();
2202 }
2203
2204 // S5: IC data object (preserved).
2205 __ LoadFieldFromOffset(A6, IC_DATA_REG, target::ICData::entries_offset());
2206 // R6: ic_data_array with check entries: classes and target functions.
2207 __ AddImmediate(A6, target::Array::data_offset() - kHeapObjectTag);
2208// R6: points directly to the first ic data array element.
2209#if defined(DEBUG)
2210 // Check that first entry is for Smi/Smi.
2211 Label error, ok;
2212 const intptr_t imm_smi_cid = target::ToRawSmi(kSmiCid);
2213 __ LoadCompressedSmiFromOffset(TMP, A6, 0);
2214 __ CompareImmediate(TMP, imm_smi_cid);
2215 __ BranchIf(NE, &error);
2216 __ LoadCompressedSmiFromOffset(TMP, A6, target::kCompressedWordSize);
2217 __ CompareImmediate(TMP, imm_smi_cid);
2218 __ BranchIf(EQ, &ok);
2219 __ Bind(&error);
2220 __ Stop("Incorrect IC data");
2221 __ Bind(&ok);
2222#endif
2223 if (FLAG_optimization_counter_threshold >= 0) {
2224 const intptr_t count_offset =
2225 target::ICData::CountIndexFor(num_args) * target::kCompressedWordSize;
2226 // Update counter, ignore overflow.
2227 __ LoadCompressedSmiFromOffset(A1, A6, count_offset);
2228 __ addi(A1, A1, target::ToRawSmi(1));
2229 __ StoreToOffset(A1, A6, count_offset);
2230 }
2231
2232 __ ret();
2233}
2234
2235// Saves the offset of the target entry-point (from the Function) into T6.
2236//
2237// Must be the first code generated, since any code before will be skipped in
2238// the unchecked entry-point.
2239static void GenerateRecordEntryPoint(Assembler* assembler) {
2240 Label done;
2241 __ LoadImmediate(T6, target::Function::entry_point_offset() - kHeapObjectTag);
2243 __ BindUncheckedEntryPoint();
2244 __ LoadImmediate(
2245 T6, target::Function::entry_point_offset(CodeEntryKind::kUnchecked) -
2247 __ Bind(&done);
2248}
2249
2250// Generate inline cache check for 'num_args'.
2251// A0: receiver (if instance call)
2252// S5: ICData
2253// RA: return address
2254// Control flow:
2255// - If receiver is null -> jump to IC miss.
2256// - If receiver is Smi -> load Smi class.
2257// - If receiver is not-Smi -> load receiver's class.
2258// - Check if 'num_args' (including receiver) match any IC data group.
2259// - Match found -> jump to target.
2260// - Match not found -> jump to IC miss.
2262 intptr_t num_args,
2263 const RuntimeEntry& handle_ic_miss,
2264 Token::Kind kind,
2265 Optimized optimized,
2266 CallType type,
2267 Exactness exactness) {
2268 const bool save_entry_point = kind == Token::kILLEGAL;
2269 if (FLAG_precompiled_mode) {
2270 __ Breakpoint();
2271 return;
2272 }
2273
2274 if (save_entry_point) {
2275 GenerateRecordEntryPoint(assembler);
2276 // T6: untagged entry point offset
2277 }
2278
2279 if (optimized == kOptimized) {
2281 } else {
2282 GenerateUsageCounterIncrement(/*scratch=*/T0);
2283 }
2284
2285 ASSERT(num_args == 1 || num_args == 2);
2286#if defined(DEBUG)
2287 {
2288 Label ok;
2289 // Check that the IC data array has NumArgsTested() == num_args.
2290 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
2291 __ LoadFromOffset(TMP, IC_DATA_REG,
2292 target::ICData::state_bits_offset() - kHeapObjectTag,
2294 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
2295 __ andi(TMP, TMP, target::ICData::NumArgsTestedMask());
2296 __ CompareImmediate(TMP2, num_args);
2297 __ BranchIf(EQ, &ok, Assembler::kNearJump);
2298 __ Stop("Incorrect stub for IC data");
2299 __ Bind(&ok);
2300 }
2301#endif // DEBUG
2302
2303#if !defined(PRODUCT)
2304 Label stepping, done_stepping;
2305 if (optimized == kUnoptimized) {
2306 __ Comment("Check single stepping");
2307 __ LoadIsolate(TMP);
2308 __ LoadFromOffset(TMP, TMP, target::Isolate::single_step_offset(),
2310 __ bnez(TMP, &stepping);
2311 __ Bind(&done_stepping);
2312 }
2313#endif
2314
2315 Label not_smi_or_overflow;
2316 if (kind != Token::kILLEGAL) {
2317 EmitFastSmiOp(assembler, kind, num_args, &not_smi_or_overflow);
2318 }
2319 __ Bind(&not_smi_or_overflow);
2320
2321 __ Comment("Extract ICData initial values and receiver cid");
2322 // S5: IC data object (preserved).
2323 __ LoadFieldFromOffset(A1, IC_DATA_REG, target::ICData::entries_offset());
2324 // A1: ic_data_array with check entries: classes and target functions.
2325 __ AddImmediate(A1, target::Array::data_offset() - kHeapObjectTag);
2326 // A1: points directly to the first ic data array element.
2327
2328 if (type == kInstanceCall) {
2329 __ LoadTaggedClassIdMayBeSmi(T1, A0);
2330 __ LoadFieldFromOffset(ARGS_DESC_REG, IC_DATA_REG,
2331 target::CallSiteData::arguments_descriptor_offset());
2332 if (num_args == 2) {
2333 __ LoadCompressedSmiFieldFromOffset(
2334 A7, ARGS_DESC_REG, target::ArgumentsDescriptor::count_offset());
2335 __ slli(A7, A7, target::kWordSizeLog2 - kSmiTagSize);
2336 __ add(A7, SP, A7);
2337 __ lx(A6, Address(A7, -2 * target::kWordSize));
2338 __ LoadTaggedClassIdMayBeSmi(T2, A6);
2339 }
2340 } else {
2341 __ LoadFieldFromOffset(ARGS_DESC_REG, IC_DATA_REG,
2342 target::CallSiteData::arguments_descriptor_offset());
2343 __ LoadCompressedSmiFieldFromOffset(
2344 A7, ARGS_DESC_REG, target::ArgumentsDescriptor::count_offset());
2345 __ slli(A7, A7, target::kWordSizeLog2 - kSmiTagSize);
2346 __ add(A7, A7, SP);
2347 __ lx(A6, Address(A7, -1 * target::kWordSize));
2348 __ LoadTaggedClassIdMayBeSmi(T1, A6);
2349 if (num_args == 2) {
2350 __ lx(A6, Address(A7, -2 * target::kWordSize));
2351 __ LoadTaggedClassIdMayBeSmi(T2, A6);
2352 }
2353 }
2354 // T1: first argument class ID as Smi.
2355 // T2: second argument class ID as Smi.
2356 // S4: args descriptor
2357
2358 // We unroll the generic one that is generated once more than the others.
2359 const bool optimize = kind == Token::kILLEGAL;
2360
2361 // Loop that checks if there is an IC data match.
2362 Label loop, found, miss;
2363 __ Comment("ICData loop");
2364
2365 __ Bind(&loop);
2366 for (int unroll = optimize ? 4 : 2; unroll >= 0; unroll--) {
2367 Label update;
2368
2369 __ LoadCompressedSmiFromOffset(A7, A1, 0);
2370 if (num_args == 1) {
2371 __ beq(A7, T1, &found); // Class id match?
2372 } else {
2373 __ bne(A7, T1, &update); // Continue.
2374 __ LoadCompressedSmiFromOffset(A7, A1, target::kCompressedWordSize);
2375 __ beq(A7, T2, &found); // Class id match?
2376 }
2377 __ Bind(&update);
2378
2379 const intptr_t entry_size = target::ICData::TestEntryLengthFor(
2380 num_args, exactness == kCheckExactness) *
2381 target::kCompressedWordSize;
2382 __ AddImmediate(A1, entry_size); // Next entry.
2383
2384 __ CompareImmediate(A7, target::ToRawSmi(kIllegalCid)); // Done?
2385 if (unroll == 0) {
2386 __ BranchIf(NE, &loop);
2387 } else {
2388 __ BranchIf(EQ, &miss);
2389 }
2390 }
2391
2392 __ Bind(&miss);
2393 __ Comment("IC miss");
2394
2395 // Compute address of arguments.
2396 __ LoadCompressedSmiFieldFromOffset(
2397 A7, ARGS_DESC_REG, target::ArgumentsDescriptor::count_offset());
2398 __ slli(A7, A7, target::kWordSizeLog2 - kSmiTagSize);
2399 __ add(A7, A7, SP);
2400 __ subi(A7, A7, 1 * target::kWordSize);
2401
2402 // A7: address of receiver
2403 // Create a stub frame as we are pushing some objects on the stack before
2404 // calling into the runtime.
2405 __ EnterStubFrame();
2406 // Preserve IC data object and arguments descriptor array and
2407 // setup space on stack for result (target code object).
2408 __ PushRegistersInOrder({ARGS_DESC_REG, IC_DATA_REG});
2409 if (save_entry_point) {
2410 __ SmiTag(T6);
2411 __ PushRegister(T6);
2412 }
2413 // Setup space on stack for the result (target code object).
2414 __ PushRegister(ZR);
2415 // Push call arguments.
2416 for (intptr_t i = 0; i < num_args; i++) {
2417 __ LoadFromOffset(TMP, A7, -target::kWordSize * i);
2418 __ PushRegister(TMP);
2419 }
2420 // Pass IC data object.
2421 __ PushRegister(IC_DATA_REG);
2422 __ CallRuntime(handle_ic_miss, num_args + 1);
2423 // Remove the call arguments pushed earlier, including the IC data object.
2424 __ Drop(num_args + 1);
2425 // Pop returned function object into R0.
2426 // Restore arguments descriptor array and IC data array.
2427 __ PopRegister(FUNCTION_REG); // Pop returned function object into T0.
2428 if (save_entry_point) {
2429 __ PopRegister(T6);
2430 __ SmiUntag(T6);
2431 }
2432 __ PopRegister(IC_DATA_REG); // Restore IC Data.
2433 __ PopRegister(ARGS_DESC_REG); // Restore arguments descriptor array.
2434 __ RestoreCodePointer();
2435 __ LeaveStubFrame();
2436 Label call_target_function;
2437 if (!FLAG_lazy_dispatchers) {
2438 GenerateDispatcherCode(assembler, &call_target_function);
2439 } else {
2440 __ j(&call_target_function);
2441 }
2442
2443 __ Bind(&found);
2444 // A1: pointer to an IC data check group.
2445 const intptr_t target_offset =
2446 target::ICData::TargetIndexFor(num_args) * target::kCompressedWordSize;
2447 const intptr_t count_offset =
2448 target::ICData::CountIndexFor(num_args) * target::kCompressedWordSize;
2449 const intptr_t exactness_offset =
2450 target::ICData::ExactnessIndexFor(num_args) * target::kCompressedWordSize;
2451
2452 Label call_target_function_through_unchecked_entry;
2453 if (exactness == kCheckExactness) {
2454 Label exactness_ok;
2455 ASSERT(num_args == 1);
2456 __ LoadCompressedSmi(T1, Address(A1, exactness_offset));
2457 __ LoadImmediate(
2460 __ blt(T1, TMP, &exactness_ok);
2461 __ beq(T1, TMP, &call_target_function_through_unchecked_entry);
2462
2463 // Check trivial exactness.
2464 // Note: UntaggedICData::receivers_static_type_ is guaranteed to be not null
2465 // because we only emit calls to this stub when it is not null.
2466 __ LoadCompressed(
2467 T2, FieldAddress(S5, target::ICData::receivers_static_type_offset()));
2468 __ LoadCompressed(T2, FieldAddress(T2, target::Type::arguments_offset()));
2469 // T1 contains an offset to type arguments in words as a smi,
2470 // hence TIMES_4. A0 is guaranteed to be non-smi because it is expected
2471 // to have type arguments.
2472 __ LoadIndexedPayload(TMP, A0, 0, T1, TIMES_COMPRESSED_HALF_WORD_SIZE,
2473 kObjectBytes);
2474 __ beq(T2, TMP, &call_target_function_through_unchecked_entry);
2475
2476 // Update exactness state (not-exact anymore).
2477 __ LoadImmediate(
2479 __ StoreToOffset(TMP, A1, exactness_offset, kObjectBytes);
2480 __ Bind(&exactness_ok);
2481 }
2482 __ LoadCompressedFromOffset(FUNCTION_REG, A1, target_offset);
2483
2484 if (FLAG_optimization_counter_threshold >= 0) {
2485 __ Comment("Update caller's counter");
2486 __ LoadCompressedSmiFromOffset(TMP, A1, count_offset);
2487 __ addi(TMP, TMP, target::ToRawSmi(1)); // Ignore overflow.
2488 __ StoreToOffset(TMP, A1, count_offset, kObjectBytes);
2489 }
2490
2491 __ Comment("Call target");
2492 __ Bind(&call_target_function);
2493 // T0: target function.
2494 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
2495 target::Function::code_offset());
2496 if (save_entry_point) {
2497 __ add(A7, FUNCTION_REG, T6);
2498 __ lx(A7, Address(A7, 0));
2499 } else {
2500 __ LoadFieldFromOffset(A7, FUNCTION_REG,
2501 target::Function::entry_point_offset());
2502 }
2503 __ jr(A7); // FUNCTION_REG: Function, argument to lazy compile stub.
2504
2505 if (exactness == kCheckExactness) {
2506 __ Bind(&call_target_function_through_unchecked_entry);
2507 if (FLAG_optimization_counter_threshold >= 0) {
2508 __ Comment("Update ICData counter");
2509 __ LoadCompressedSmiFromOffset(TMP, A1, count_offset);
2510 __ addi(TMP, TMP, target::ToRawSmi(1)); // Ignore overflow.
2511 __ StoreToOffset(TMP, A1, count_offset, kObjectBytes);
2512 }
2513 __ Comment("Call target (via unchecked entry point)");
2514 __ LoadCompressedFromOffset(FUNCTION_REG, A1, target_offset);
2515 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
2516 target::Function::code_offset());
2517 __ LoadFieldFromOffset(
2519 target::Function::entry_point_offset(CodeEntryKind::kUnchecked));
2520 __ jr(A7);
2521 }
2522
2523#if !defined(PRODUCT)
2524 if (optimized == kUnoptimized) {
2525 __ Bind(&stepping);
2526 __ EnterStubFrame();
2527 if (type == kInstanceCall) {
2528 __ PushRegister(A0); // Preserve receiver.
2529 }
2530 if (save_entry_point) {
2531 __ SmiTag(T6);
2532 __ PushRegister(T6);
2533 }
2534 __ PushRegister(IC_DATA_REG); // Preserve IC data.
2535 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2536 __ PopRegister(IC_DATA_REG);
2537 if (save_entry_point) {
2538 __ PopRegister(T6);
2539 __ SmiUntag(T6);
2540 }
2541 if (type == kInstanceCall) {
2542 __ PopRegister(A0);
2543 }
2544 __ RestoreCodePointer();
2545 __ LeaveStubFrame();
2546 __ j(&done_stepping);
2547 }
2548#endif
2549}
2550
2551// A0: receiver
2552// S5: ICData
2553// RA: return address
2554void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
2556 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2558}
2559
2560// A0: receiver
2561// S5: ICData
2562// RA: return address
2563void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
2565 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2567}
2568
2569// A0: receiver
2570// S5: ICData
2571// RA: return address
2572void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
2574 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2576}
2577
2578// A0: receiver
2579// S5: ICData
2580// RA: return address
2581void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
2583 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
2585}
2586
2587// A0: receiver
2588// S5: ICData
2589// RA: return address
2590void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
2592 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
2594}
2595
2596// A0: receiver
2597// S5: ICData
2598// RA: return address
2599void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
2601 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
2603}
2604
2605// A0: receiver
2606// S5: ICData
2607// A6: Function
2608// RA: return address
2609void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
2611 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
2613}
2614
2615// A0: receiver
2616// S5: ICData
2617// A6: Function
2618// RA: return address
2619void StubCodeCompiler::
2620 GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
2622 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
2624}
2625
2626// A0: receiver
2627// S5: ICData
2628// A6: Function
2629// RA: return address
2630void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
2632 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2634}
2635
2636// S5: ICData
2637// RA: return address
2638void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
2639 GenerateRecordEntryPoint(assembler);
2640 GenerateUsageCounterIncrement(/* scratch */ T0);
2641
2642#if defined(DEBUG)
2643 {
2644 Label ok;
2645 // Check that the IC data array has NumArgsTested() == 0.
2646 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
2647 __ LoadFromOffset(TMP, IC_DATA_REG,
2648 target::ICData::state_bits_offset() - kHeapObjectTag,
2650 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
2651 __ andi(TMP, TMP, target::ICData::NumArgsTestedMask());
2652 __ CompareImmediate(TMP, 0);
2653 __ BranchIf(EQ, &ok);
2654 __ Stop("Incorrect IC data for unoptimized static call");
2655 __ Bind(&ok);
2656 }
2657#endif // DEBUG
2658
2659 // Check single stepping.
2660#if !defined(PRODUCT)
2661 Label stepping, done_stepping;
2662 __ LoadIsolate(TMP);
2663 __ LoadFromOffset(TMP, TMP, target::Isolate::single_step_offset(),
2665 __ bnez(TMP, &stepping, Assembler::kNearJump);
2666 __ Bind(&done_stepping);
2667#endif
2668
2669 // T5: IC data object (preserved).
2670 __ LoadFieldFromOffset(A0, IC_DATA_REG, target::ICData::entries_offset());
2671 // A0: ic_data_array with entries: target functions and count.
2672 __ AddImmediate(A0, target::Array::data_offset() - kHeapObjectTag);
2673 // A0: points directly to the first ic data array element.
2674 const intptr_t target_offset =
2675 target::ICData::TargetIndexFor(0) * target::kCompressedWordSize;
2676 const intptr_t count_offset =
2677 target::ICData::CountIndexFor(0) * target::kCompressedWordSize;
2678
2679 if (FLAG_optimization_counter_threshold >= 0) {
2680 // Increment count for this call, ignore overflow.
2681 __ LoadCompressedSmiFromOffset(TMP, A0, count_offset);
2682 __ addi(TMP, TMP, target::ToRawSmi(1));
2683 __ StoreToOffset(TMP, A0, count_offset);
2684 }
2685
2686 // Load arguments descriptor into T4.
2687 __ LoadFieldFromOffset(ARGS_DESC_REG, IC_DATA_REG,
2688 target::CallSiteData::arguments_descriptor_offset());
2689
2690 // Get function and call it, if possible.
2691 __ LoadCompressedFromOffset(FUNCTION_REG, A0, target_offset);
2692 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
2693 target::Function::code_offset());
2694 __ add(A0, FUNCTION_REG, T6);
2695 __ lx(TMP, Address(A0, 0));
2696 __ jr(TMP); // FUNCTION_REG: Function, argument to lazy compile stub.
2697
2698#if !defined(PRODUCT)
2699 __ Bind(&stepping);
2700 __ EnterStubFrame();
2701 __ PushRegister(IC_DATA_REG); // Preserve IC data.
2702 __ SmiTag(T6);
2703 __ PushRegister(T6);
2704 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2705 __ PopRegister(T6);
2706 __ SmiUntag(T6);
2707 __ PopRegister(IC_DATA_REG);
2708 __ RestoreCodePointer();
2709 __ LeaveStubFrame();
2710 __ j(&done_stepping);
2711#endif
2712}
2713
2714// S5: ICData
2715// RA: return address
2716void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
2717 GenerateUsageCounterIncrement(/* scratch */ T0);
2718 GenerateNArgsCheckInlineCacheStub(1, kStaticCallMissHandlerOneArgRuntimeEntry,
2719 Token::kILLEGAL, kUnoptimized, kStaticCall,
2721}
2722
2723// S5: ICData
2724// RA: return address
2725void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
2726 GenerateUsageCounterIncrement(/* scratch */ T0);
2728 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2730}
2731
2732// Stub for compiling a function and jumping to the compiled code.
2733// ARGS_DESC_REG: Arguments descriptor.
2734// FUNCTION_REG: Function.
2735void StubCodeCompiler::GenerateLazyCompileStub() {
2736 // Preserve arg desc.
2737 __ EnterStubFrame();
2738 // Save arguments descriptor and pass function.
2739 __ PushRegistersInOrder({ARGS_DESC_REG, FUNCTION_REG});
2740 __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
2741 __ PopRegister(FUNCTION_REG); // Restore function.
2742 __ PopRegister(ARGS_DESC_REG); // Restore arg desc.
2743 __ LeaveStubFrame();
2744
2745 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
2746 target::Function::code_offset());
2747 __ LoadFieldFromOffset(TMP, FUNCTION_REG,
2748 target::Function::entry_point_offset());
2749 __ jr(TMP);
2750}
2751
2752// A0: Receiver
2753// S5: ICData
2754void StubCodeCompiler::GenerateICCallBreakpointStub() {
2755#if defined(PRODUCT)
2756 __ Stop("No debugging in PRODUCT mode");
2757#else
2758 __ EnterStubFrame();
2759 __ subi(SP, SP, 3 * target::kWordSize);
2760 __ sx(A0, Address(SP, 2 * target::kWordSize)); // Preserve receiver.
2761 __ sx(S5, Address(SP, 1 * target::kWordSize)); // Preserve IC data.
2762 __ sx(ZR, Address(SP, 0 * target::kWordSize)); // Space for result.
2763 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2764 __ lx(CODE_REG, Address(SP, 0 * target::kWordSize)); // Original stub.
2765 __ lx(S5, Address(SP, 1 * target::kWordSize)); // Restore IC data.
2766 __ lx(A0, Address(SP, 2 * target::kWordSize)); // Restore receiver.
2767 __ LeaveStubFrame();
2768 __ LoadFieldFromOffset(TMP, CODE_REG, target::Code::entry_point_offset());
2769 __ jr(TMP);
2770#endif
2771}
2772
2773// S5: ICData
2774void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
2775#if defined(PRODUCT)
2776 __ Stop("No debugging in PRODUCT mode");
2777#else
2778 __ EnterStubFrame();
2779 __ subi(SP, SP, 2 * target::kWordSize);
2780 __ sx(S5, Address(SP, 1 * target::kWordSize)); // Preserve IC data.
2781 __ sx(ZR, Address(SP, 0 * target::kWordSize)); // Space for result.
2782 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2783 __ lx(CODE_REG, Address(SP, 0 * target::kWordSize)); // Original stub.
2784 __ lx(S5, Address(SP, 1 * target::kWordSize)); // Restore IC data.
2785 __ LeaveStubFrame();
2786 __ LoadFieldFromOffset(TMP, CODE_REG, target::Code::entry_point_offset());
2787 __ jr(TMP);
2788#endif // defined(PRODUCT)
2789}
2790
2791void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
2792#if defined(PRODUCT)
2793 __ Stop("No debugging in PRODUCT mode");
2794#else
2795 __ EnterStubFrame();
2796 __ subi(SP, SP, 1 * target::kWordSize);
2797 __ sx(ZR, Address(SP, 0 * target::kWordSize)); // Space for result.
2798 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2799 __ lx(CODE_REG, Address(SP, 0 * target::kWordSize));
2800 __ LeaveStubFrame();
2801 __ LoadFieldFromOffset(TMP, CODE_REG, target::Code::entry_point_offset());
2802 __ jr(TMP);
2803#endif // defined(PRODUCT)
2804}
2805
2806// Called only from unoptimized code. All relevant registers have been saved.
2807void StubCodeCompiler::GenerateDebugStepCheckStub() {
2808#if defined(PRODUCT)
2809 __ Stop("No debugging in PRODUCT mode");
2810#else
2811 // Check single stepping.
2812 Label stepping, done_stepping;
2813 __ LoadIsolate(A1);
2814 __ LoadFromOffset(A1, A1, target::Isolate::single_step_offset(),
2816 __ bnez(A1, &stepping, compiler::Assembler::kNearJump);
2817 __ Bind(&done_stepping);
2818 __ ret();
2819
2820 __ Bind(&stepping);
2821 __ EnterStubFrame();
2822 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2823 __ LeaveStubFrame();
2824 __ j(&done_stepping);
2825#endif // defined(PRODUCT)
2826}
2827
2828// Used to check class and type arguments. Arguments passed in registers:
2829//
2830// Inputs (all preserved, mostly from TypeTestABI struct):
2831// - kSubtypeTestCacheReg: UntaggedSubtypeTestCache
2832// - kInstanceReg: instance to test against.
2833// - kDstTypeReg: destination type (for n>=7).
2834// - kInstantiatorTypeArgumentsReg: instantiator type arguments (for n>=3).
2835// - kFunctionTypeArgumentsReg: function type arguments (for n>=4).
2836// - RA: return address.
2837//
2838// Outputs (from TypeTestABI struct):
2839// - kSubtypeTestCacheResultReg: the cached result, or null if not found.
2840void StubCodeCompiler::GenerateSubtypeNTestCacheStub(Assembler* assembler,
2841 int n) {
2842 ASSERT(n >= 1);
2844 // If we need the parent function type arguments for a closure, we also need
2845 // the delayed type arguments, so this case will never happen.
2846 ASSERT(n != 5);
2847
2848 // We could initialize kSubtypeTestCacheResultReg with null and use that as
2849 // the null register up until exit, which means we'd just need to return
2850 // without setting it in the not_found case.
2851 //
2852 // However, that would mean the expense of keeping another register live
2853 // across the loop to hold the cache entry address, and the not_found case
2854 // means we're going to runtime, so optimize for the found case instead.
2855 //
2856 // Thus, we use it to store the current cache entry, since it's distinct from
2857 // all the preserved input registers and the scratch register, and the last
2858 // use of the current cache entry is to set kSubtypeTestCacheResultReg.
2860
2861 Label not_found;
2862 GenerateSubtypeTestCacheSearch(
2863 assembler, n, NULL_REG, kCacheArrayReg,
2871 [&](Assembler* assembler, int n) {
2872 __ LoadCompressed(
2874 Address(kCacheArrayReg, target::kCompressedWordSize *
2875 target::SubtypeTestCache::kTestResult));
2876 __ Ret();
2877 },
2878 [&](Assembler* assembler, int n) {
2880 __ Ret();
2881 });
2882}
2883
2884void StubCodeCompiler::GenerateGetCStackPointerStub() {
2885 __ mv(A0, SP);
2886 __ ret();
2887}
2888
2889// Jump to a frame on the call stack.
2890// RA: return address.
2891// A0: program_counter.
2892// A1: stack_pointer.
2893// A2: frame_pointer.
2894// A3: thread.
2895// Does not return.
2896//
2897// Notice: We need to keep this in sync with `Simulator::JumpToFrame()`.
2898void StubCodeCompiler::GenerateJumpToFrameStub() {
2901 __ mv(CALLEE_SAVED_TEMP, A0); // Program counter.
2902 __ mv(SP, A1); // Stack pointer.
2903 __ mv(FP, A2); // Frame_pointer.
2904 __ mv(THR, A3);
2905#if defined(DART_TARGET_OS_FUCHSIA) || defined(DART_TARGET_OS_ANDROID)
2906 // We need to restore the shadow call stack pointer like longjmp would,
2907 // effectively popping all the return addresses between the Dart exit frame
2908 // and Exceptions::JumpToFrame, otherwise the shadow call stack might
2909 // eventually overflow.
2910 __ lx(GP, Address(THR, target::Thread::saved_shadow_call_stack_offset()));
2911#elif defined(USING_SHADOW_CALL_STACK)
2912#error Unimplemented
2913#endif
2914 Label exit_through_non_ffi;
2915 // Check if we exited generated from FFI. If so do transition - this is needed
2916 // because normally runtime calls transition back to generated via destructor
2917 // of TransitionGeneratedToVM/Native that is part of runtime boilerplate
2918 // code (see DEFINE_RUNTIME_ENTRY_IMPL in runtime_entry.h). Ffi calls don't
2919 // have this boilerplate, don't have this stack resource, have to transition
2920 // explicitly.
2921 __ LoadFromOffset(TMP, THR,
2922 compiler::target::Thread::exit_through_ffi_offset());
2923 __ LoadImmediate(TMP2, target::Thread::exit_through_ffi());
2924 __ bne(TMP, TMP2, &exit_through_non_ffi);
2925 __ TransitionNativeToGenerated(TMP, /*leave_safepoint=*/true,
2926 /*ignore_unwind_in_progress=*/true);
2927 __ Bind(&exit_through_non_ffi);
2928
2929 // Refresh pinned registers values (inc. write barrier mask and null object).
2930 __ RestorePinnedRegisters();
2931 // Set the tag.
2932 __ LoadImmediate(TMP, VMTag::kDartTagId);
2933 __ StoreToOffset(TMP, THR, target::Thread::vm_tag_offset());
2934 // Clear top exit frame.
2935 __ StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
2936 // Restore the pool pointer.
2937 __ RestoreCodePointer();
2938 if (FLAG_precompiled_mode) {
2939 __ SetupGlobalPoolAndDispatchTable();
2940 } else {
2941 __ LoadPoolPointer();
2942 }
2943 __ jr(CALLEE_SAVED_TEMP); // Jump to continuation point.
2944}
2945
2946// Run an exception handler. Execution comes from JumpToFrame
2947// stub or from the simulator.
2948//
2949// The arguments are stored in the Thread object.
2950// Does not return.
2951void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
2952 // Exception object.
2954 __ LoadFromOffset(A0, THR, target::Thread::active_exception_offset());
2955 __ StoreToOffset(NULL_REG, THR, target::Thread::active_exception_offset());
2956
2957 // StackTrace object.
2959 __ LoadFromOffset(A1, THR, target::Thread::active_stacktrace_offset());
2960 __ StoreToOffset(NULL_REG, THR, target::Thread::active_stacktrace_offset());
2961
2962 __ LoadFromOffset(RA, THR, target::Thread::resume_pc_offset());
2963 __ ret(); // Jump to the exception handler code.
2964}
2965
2966// Deoptimize a frame on the call stack before rewinding.
2967// The arguments are stored in the Thread object.
2968// No result.
2969void StubCodeCompiler::GenerateDeoptForRewindStub() {
2970 // Push zap value instead of CODE_REG.
2971 __ LoadImmediate(TMP, kZapCodeReg);
2972 __ PushRegister(TMP);
2973
2974 // Load the deopt pc into RA.
2975 __ LoadFromOffset(RA, THR, target::Thread::resume_pc_offset());
2976 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
2977
2978 // After we have deoptimized, jump to the correct frame.
2979 __ EnterStubFrame();
2980 __ CallRuntime(kRewindPostDeoptRuntimeEntry, 0);
2981 __ LeaveStubFrame();
2982 __ ebreak();
2983}
2984
2985// Calls to the runtime to optimize the given function.
2986// A0: function to be re-optimized.
2987// ARGS_DESC_REG: argument descriptor (preserved).
2988void StubCodeCompiler::GenerateOptimizeFunctionStub() {
2989 __ LoadFromOffset(CODE_REG, THR, target::Thread::optimize_stub_offset());
2990 __ EnterStubFrame();
2991
2992 __ subi(SP, SP, 3 * target::kWordSize);
2993 __ sx(ARGS_DESC_REG,
2994 Address(SP, 2 * target::kWordSize)); // Preserves args descriptor.
2995 __ sx(ZR, Address(SP, 1 * target::kWordSize)); // Result slot.
2996 __ sx(A0, Address(SP, 0 * target::kWordSize)); // Function argument.
2997 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
2998 __ lx(FUNCTION_REG, Address(SP, 1 * target::kWordSize)); // Function result.
2999 __ lx(ARGS_DESC_REG,
3000 Address(SP, 2 * target::kWordSize)); // Restore args descriptor.
3001 __ addi(SP, SP, 3 * target::kWordSize);
3002
3003 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
3004 target::Function::code_offset());
3005 __ LoadFieldFromOffset(A1, FUNCTION_REG,
3006 target::Function::entry_point_offset());
3007 __ LeaveStubFrame();
3008 __ jr(A1);
3009 __ ebreak();
3010}
3011
3012// Does identical check (object references are equal or not equal) with special
3013// checks for boxed numbers and returns with TMP = 0 iff left and right are
3014// identical.
3015static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
3016 const Register left,
3017 const Register right) {
3018 Label reference_compare, check_mint, done;
3019 // If any of the arguments is Smi do reference compare.
3020 // Note: A Mint cannot contain a value that would fit in Smi.
3021 __ BranchIfSmi(left, &reference_compare, Assembler::kNearJump);
3022 __ BranchIfSmi(right, &reference_compare, Assembler::kNearJump);
3023
3024 // Value compare for two doubles.
3025 __ CompareClassId(left, kDoubleCid, /*scratch*/ TMP);
3026 __ BranchIf(NOT_EQUAL, &check_mint, Assembler::kNearJump);
3027 __ CompareClassId(right, kDoubleCid, /*scratch*/ TMP);
3028 __ BranchIf(NOT_EQUAL, &reference_compare, Assembler::kNearJump);
3029
3030 // Double values bitwise compare.
3031#if XLEN == 32
3032 __ lw(T0, FieldAddress(left, target::Double::value_offset()));
3033 __ lw(T1, FieldAddress(right, target::Double::value_offset()));
3034 __ xor_(TMP, T0, T1);
3035 __ lw(T0, FieldAddress(left, target::Double::value_offset() + 4));
3036 __ lw(T1, FieldAddress(right, target::Double::value_offset() + 4));
3037 __ xor_(TMP2, T0, T1);
3038 __ or_(TMP, TMP, TMP2);
3039#else
3040 __ ld(T0, FieldAddress(left, target::Double::value_offset()));
3041 __ ld(T1, FieldAddress(right, target::Double::value_offset()));
3042 __ xor_(TMP, T0, T1);
3043#endif
3045
3046 __ Bind(&check_mint);
3047 __ CompareClassId(left, kMintCid, /*scratch*/ TMP);
3048 __ BranchIf(NOT_EQUAL, &reference_compare, Assembler::kNearJump);
3049 __ CompareClassId(right, kMintCid, /*scratch*/ TMP);
3050 __ BranchIf(NOT_EQUAL, &reference_compare, Assembler::kNearJump);
3051#if XLEN == 32
3052 __ lw(T0, FieldAddress(left, target::Mint::value_offset()));
3053 __ lw(T1, FieldAddress(right, target::Mint::value_offset()));
3054 __ xor_(TMP, T0, T1);
3055 __ lw(T0, FieldAddress(left, target::Mint::value_offset() + 4));
3056 __ lw(T1, FieldAddress(right, target::Mint::value_offset() + 4));
3057 __ xor_(TMP2, T0, T1);
3058 __ or_(TMP, TMP, TMP2);
3059#else
3060 __ ld(T0, FieldAddress(left, target::Mint::value_offset()));
3061 __ ld(T1, FieldAddress(right, target::Mint::value_offset()));
3062 __ xor_(TMP, T0, T1);
3063#endif
3065
3066 __ Bind(&reference_compare);
3067 __ xor_(TMP, left, right);
3068 __ Bind(&done);
3069}
3070
3071// Called only from unoptimized code. All relevant registers have been saved.
3072// RA: return address.
3073// SP + 4: left operand.
3074// SP + 0: right operand.
3075// Return TMP set to 0 if equal.
3076void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
3077#if !defined(PRODUCT)
3078 // Check single stepping.
3079 Label stepping, done_stepping;
3080 __ LoadIsolate(TMP);
3081 __ LoadFromOffset(TMP, TMP, target::Isolate::single_step_offset(),
3083 __ bnez(TMP, &stepping);
3084 __ Bind(&done_stepping);
3085#endif
3086
3087 const Register left = A0;
3088 const Register right = A1;
3089 __ LoadFromOffset(left, SP, 1 * target::kWordSize);
3090 __ LoadFromOffset(right, SP, 0 * target::kWordSize);
3091 GenerateIdenticalWithNumberCheckStub(assembler, left, right);
3092 __ ret();
3093
3094#if !defined(PRODUCT)
3095 __ Bind(&stepping);
3096 __ EnterStubFrame();
3097 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
3098 __ RestoreCodePointer();
3099 __ LeaveStubFrame();
3100 __ j(&done_stepping);
3101#endif
3102}
3103
3104// Called from optimized code only.
3105// RA: return address.
3106// SP + 4: left operand.
3107// SP + 0: right operand.
3108// Return TMP set to 0 if equal.
3109void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
3110 const Register left = A0;
3111 const Register right = A1;
3112 __ LoadFromOffset(left, SP, 1 * target::kWordSize);
3113 __ LoadFromOffset(right, SP, 0 * target::kWordSize);
3114 GenerateIdenticalWithNumberCheckStub(assembler, left, right);
3115 __ ret();
3116}
3117
3118// Called from megamorphic call sites.
3119// A0: receiver (passed to target)
3120// IC_DATA_REG: MegamorphicCache (preserved)
3121// Passed to target:
3122// FUNCTION_REG: target function
3123// CODE_REG: target Code
3124// ARGS_DESC_REG: arguments descriptor
3125void StubCodeCompiler::GenerateMegamorphicCallStub() {
3126 // Jump if receiver is a smi.
3127 Label smi_case;
3128 __ BranchIfSmi(A0, &smi_case);
3129
3130 // Loads the cid of the object.
3131 __ LoadClassId(T5, A0);
3132
3133 Label cid_loaded;
3134 __ Bind(&cid_loaded);
3135 __ lx(T2,
3136 FieldAddress(IC_DATA_REG, target::MegamorphicCache::buckets_offset()));
3137 __ lx(T1, FieldAddress(IC_DATA_REG, target::MegamorphicCache::mask_offset()));
3138 // T2: cache buckets array.
3139 // T1: mask as a smi.
3140
3141 // Make the cid into a smi.
3142 __ SmiTag(T5);
3143 // T5: class ID of the receiver (smi).
3144
3145 // Compute the table index.
3146 ASSERT(target::MegamorphicCache::kSpreadFactor == 7);
3147 // Use lsl and sub to multiply with 7 == 8 - 1.
3148 __ slli(T3, T5, 3);
3149 __ sub(T3, T3, T5);
3150 // T3: probe.
3151 Label loop;
3152 __ Bind(&loop);
3153 __ and_(T3, T3, T1);
3154
3155 const intptr_t base = target::Array::data_offset();
3156 // T3 is smi tagged, but table entries are 16 bytes, so LSL 3.
3157 __ AddShifted(TMP, T2, T3, kCompressedWordSizeLog2);
3158 __ LoadCompressedSmiFieldFromOffset(T4, TMP, base);
3159 Label probe_failed;
3160 __ CompareObjectRegisters(T4, T5);
3161 __ BranchIf(NE, &probe_failed);
3162
3163 Label load_target;
3164 __ Bind(&load_target);
3165 // Call the target found in the cache. For a class id match, this is a
3166 // proper target for the given name and arguments descriptor. If the
3167 // illegal class id was found, the target is a cache miss handler that can
3168 // be invoked as a normal Dart function.
3169 __ LoadCompressed(FUNCTION_REG,
3170 FieldAddress(TMP, base + target::kCompressedWordSize));
3171 __ lx(A1, FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
3172 __ lx(ARGS_DESC_REG,
3173 FieldAddress(IC_DATA_REG,
3174 target::CallSiteData::arguments_descriptor_offset()));
3175 if (!FLAG_precompiled_mode) {
3176 __ LoadCompressed(
3177 CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
3178 }
3179 __ jr(A1); // T0: Function, argument to lazy compile stub.
3180
3181 // Probe failed, check if it is a miss.
3182 __ Bind(&probe_failed);
3183 ASSERT(kIllegalCid == 0);
3184 Label miss;
3185 __ beqz(T4, &miss); // branch if miss.
3186
3187 // Try next extry in the table.
3188 __ AddImmediate(T3, target::ToRawSmi(1));
3189 __ j(&loop);
3190
3191 // Load cid for the Smi case.
3192 __ Bind(&smi_case);
3193 __ LoadImmediate(T5, kSmiCid);
3194 __ j(&cid_loaded);
3195
3196 __ Bind(&miss);
3197 GenerateSwitchableCallMissStub();
3198}
3199
3200// Input:
3201// A0 - receiver
3202// IC_DATA_REG - icdata
3203void StubCodeCompiler::GenerateICCallThroughCodeStub() {
3204 Label loop, found, miss;
3205 __ lx(T1, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
3206 __ lx(ARGS_DESC_REG,
3207 FieldAddress(IC_DATA_REG,
3208 target::CallSiteData::arguments_descriptor_offset()));
3209 __ AddImmediate(T1, target::Array::data_offset() - kHeapObjectTag);
3210 // T1: first IC entry
3211 __ LoadTaggedClassIdMayBeSmi(A1, A0);
3212 // A1: receiver cid as Smi
3213
3214 __ Bind(&loop);
3215 __ LoadCompressedSmi(T2, Address(T1, 0));
3216 __ beq(A1, T2, &found);
3217 __ CompareImmediate(T2, target::ToRawSmi(kIllegalCid));
3218 __ BranchIf(EQ, &miss);
3219
3220 const intptr_t entry_length =
3221 target::ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) *
3222 target::kCompressedWordSize;
3223 __ AddImmediate(T1, entry_length); // Next entry.
3224 __ j(&loop);
3225
3226 __ Bind(&found);
3227 if (FLAG_precompiled_mode) {
3228 const intptr_t entry_offset =
3229 target::ICData::EntryPointIndexFor(1) * target::kCompressedWordSize;
3230 __ LoadCompressed(A1, Address(T1, entry_offset));
3231 __ lx(A1, FieldAddress(A1, target::Function::entry_point_offset()));
3232 } else {
3233 const intptr_t code_offset =
3234 target::ICData::CodeIndexFor(1) * target::kCompressedWordSize;
3235 __ LoadCompressed(CODE_REG, Address(T1, code_offset));
3236 __ lx(A1, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
3237 }
3238 __ jr(A1);
3239
3240 __ Bind(&miss);
3241 __ lx(A1, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
3242 __ jr(A1);
3243}
3244
3245// Implement the monomorphic entry check for call-sites where the receiver
3246// might be a Smi.
3247//
3248// A0: receiver
3249// S5: MonomorphicSmiableCall object
3250//
3251// T1,T2: clobbered
3252void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
3253 Label miss;
3254 __ LoadClassIdMayBeSmi(T1, A0);
3255
3256 // Note: this stub is only used in AOT mode, hence the direct (bare) call.
3257 __ LoadField(
3258 T2,
3259 FieldAddress(S5, target::MonomorphicSmiableCall::expected_cid_offset()));
3260 __ LoadField(
3261 TMP,
3262 FieldAddress(S5, target::MonomorphicSmiableCall::entrypoint_offset()));
3263 __ bne(T1, T2, &miss);
3264 __ jr(TMP);
3265
3266 __ Bind(&miss);
3267 __ lx(TMP, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
3268 __ jr(TMP);
3269}
3270
3271// Called from switchable IC calls.
3272// A0: receiver
3273void StubCodeCompiler::GenerateSwitchableCallMissStub() {
3274 __ lx(CODE_REG,
3275 Address(THR, target::Thread::switchable_call_miss_stub_offset()));
3276 __ EnterStubFrame();
3277 // Preserve receiver, setup result slot,
3278 // pass Arg0: stub out and Arg1: Receiver.
3279 __ PushRegistersInOrder({A0, ZR, ZR, A0});
3280 __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
3281 __ Drop(1);
3282 __ PopRegister(CODE_REG); // result = stub
3283 __ PopRegister(IC_DATA_REG); // result = IC
3284
3285 __ PopRegister(A0); // Restore receiver.
3286 __ LeaveStubFrame();
3287
3288 __ lx(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset(
3290 __ jr(TMP);
3291}
3292
3293// Called from switchable IC calls.
3294// A0: receiver
3295// S5: SingleTargetCache
3296// Passed to target:
3297// CODE_REG: target Code object
3298void StubCodeCompiler::GenerateSingleTargetCallStub() {
3299 Label miss;
3300 __ LoadClassIdMayBeSmi(A1, A0);
3301 __ lhu(T2, FieldAddress(S5, target::SingleTargetCache::lower_limit_offset()));
3302 __ lhu(T3, FieldAddress(S5, target::SingleTargetCache::upper_limit_offset()));
3303
3304 __ blt(A1, T2, &miss);
3305 __ bgt(A1, T3, &miss);
3306
3307 __ lx(TMP, FieldAddress(S5, target::SingleTargetCache::entry_point_offset()));
3308 __ lx(CODE_REG, FieldAddress(S5, target::SingleTargetCache::target_offset()));
3309 __ jr(TMP);
3310
3311 __ Bind(&miss);
3312 __ EnterStubFrame();
3313 // Preserve receiver, setup result slot,
3314 // pass Arg0: Stub out and Arg1: Receiver.
3315 __ PushRegistersInOrder({A0, ZR, ZR, A0});
3316 __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
3317 __ Drop(1);
3318 __ PopRegister(CODE_REG); // result = stub
3319 __ PopRegister(S5); // result = IC
3320
3321 __ PopRegister(A0); // Restore receiver.
3322 __ LeaveStubFrame();
3323
3324 __ lx(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset(
3326 __ jr(TMP);
3327}
3328
3329static int GetScaleFactor(intptr_t size) {
3330 switch (size) {
3331 case 1:
3332 return 0;
3333 case 2:
3334 return 1;
3335 case 4:
3336 return 2;
3337 case 8:
3338 return 3;
3339 case 16:
3340 return 4;
3341 }
3342 UNREACHABLE();
3343 return -1;
3344}
3345
3346void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
3348 const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
3349 const intptr_t scale_shift = GetScaleFactor(element_size);
3350
3353
3354 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
3355 Label call_runtime;
3356 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &call_runtime, T3));
3358 /* Check that length is a positive Smi. */
3359 /* T3: requested array length argument. */
3360 __ BranchIfNotSmi(T3, &call_runtime);
3361 __ SmiUntag(T3);
3362 /* Check for length >= 0 && length <= max_len. */
3363 /* T3: untagged array length. */
3364 __ CompareImmediate(T3, max_len, kObjectBytes);
3365 __ BranchIf(UNSIGNED_GREATER, &call_runtime);
3366 if (scale_shift != 0) {
3367 __ slli(T3, T3, scale_shift);
3368 }
3369 const intptr_t fixed_size_plus_alignment_padding =
3370 target::TypedData::HeaderSize() +
3372 __ AddImmediate(T3, fixed_size_plus_alignment_padding);
3374 __ lx(A0, Address(THR, target::Thread::top_offset()));
3375
3376 /* T3: allocation size. */
3377 __ add(T4, A0, T3);
3378 __ bltu(T4, A0, &call_runtime); /* Fail on unsigned overflow. */
3379
3380 /* Check if the allocation fits into the remaining space. */
3381 /* A0: potential new object start. */
3382 /* T4: potential next object start. */
3383 /* T3: allocation size. */
3384 __ lx(TMP, Address(THR, target::Thread::end_offset()));
3385 __ bgeu(T4, TMP, &call_runtime);
3386 __ CheckAllocationCanary(A0);
3387
3388 /* Successfully allocated the object(s), now update top to point to */
3389 /* next object start and initialize the object. */
3390 __ sx(T4, Address(THR, target::Thread::top_offset()));
3391 __ AddImmediate(A0, kHeapObjectTag);
3392 /* Initialize the tags. */
3393 /* A0: new object start as a tagged pointer. */
3394 /* T4: new object end address. */
3395 /* T3: allocation size. */
3396 {
3397 __ li(T5, 0);
3398 __ CompareImmediate(T3, target::UntaggedObject::kSizeTagMaxSizeTag);
3399 compiler::Label zero_tags;
3400 __ BranchIf(HI, &zero_tags);
3401 __ slli(T5, T3,
3402 target::UntaggedObject::kTagBitsSizeTagPos -
3404 __ Bind(&zero_tags);
3405
3406 /* Get the class index and insert it into the tags. */
3407 uword tags =
3408 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
3409 __ OrImmediate(T5, T5, tags);
3410 __ sx(T5, FieldAddress(A0, target::Object::tags_offset())); /* Tags. */
3411 }
3412 /* Set the length field. */
3413 /* A0: new object start as a tagged pointer. */
3414 /* T4: new object end address. */
3415 __ mv(T3, AllocateTypedDataArrayABI::kLengthReg); /* Array length. */
3416 __ StoreCompressedIntoObjectNoBarrier(
3417 A0, FieldAddress(A0, target::TypedDataBase::length_offset()), T3);
3418 /* Initialize all array elements to 0. */
3419 /* A0: new object start as a tagged pointer. */
3420 /* T4: new object end address. */
3421 /* T3: iterator which initially points to the start of the variable */
3422 /* R3: scratch register. */
3423 /* data area to be initialized. */
3424 __ AddImmediate(T3, A0, target::TypedData::HeaderSize() - 1);
3425 __ StoreInternalPointer(
3426 A0, FieldAddress(A0, target::PointerBase::data_offset()), T3);
3427 Label loop;
3428 __ Bind(&loop);
3429 for (intptr_t offset = 0; offset < target::kObjectAlignment;
3430 offset += target::kWordSize) {
3431 __ sx(ZR, Address(T3, offset));
3432 }
3433 // Safe to only check every kObjectAlignment bytes instead of each word.
3434 ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
3435 __ addi(T3, T3, target::kObjectAlignment);
3436 __ bltu(T3, T4, &loop);
3437 __ WriteAllocationCanary(T4); // Fix overshoot.
3438
3439 __ Ret();
3440
3441 __ Bind(&call_runtime);
3442 }
3443
3444 __ EnterStubFrame();
3445 __ PushRegister(ZR); // Result slot.
3446 __ PushImmediate(target::ToRawSmi(cid)); // Cid
3447 __ PushRegister(AllocateTypedDataArrayABI::kLengthReg); // Array length
3448 __ CallRuntime(kAllocateTypedDataRuntimeEntry, 2);
3449 __ Drop(2); // Drop arguments.
3451 __ LeaveStubFrame();
3452 __ Ret();
3453}
3454
3455} // namespace compiler
3456
3457} // namespace dart
3458
3459#endif // defined(TARGET_ARCH_RISCV)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
static void async_callback(void *c, std::unique_ptr< const SkImage::AsyncReadResult > result)
static bool ok(int result)
static SkTileMode optimize(SkTileMode tm, int dimension)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
static size_t element_size(Layout layout, SkSLType type)
#define __
#define UNREACHABLE()
Definition assert.h:248
#define ASSERT_LESS_OR_EQUAL(expected, actual)
Definition assert.h:313
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
virtual bool WillAllocateNewOrRemembered() const
Definition il.h:7412
static constexpr intptr_t NumCallbackTrampolinesPerPage()
static constexpr intptr_t kPageMask
static constexpr uword RuntimeFunctionOffset(uword function_index)
static constexpr intptr_t kPageSize
static bool UseUnboxedRepresentation()
Definition il.h:10810
static intptr_t ActivationFrameAlignment()
static intptr_t pointers_offset()
static intptr_t top_offset()
static StaticTypeExactnessState HasExactSuperType()
static StaticTypeExactnessState NotExact()
static constexpr intptr_t kMaxInputs
Definition object.h:7676
void GenerateNArgsCheckInlineCacheStub(intptr_t num_args, const RuntimeEntry &handle_ic_miss, Token::Kind kind, Optimized optimized, CallType type, Exactness exactness)
void GenerateUsageCounterIncrement(Register temp_reg)
void GenerateAllocationStubForClass(UnresolvedPcRelativeCalls *unresolved_calls, const Class &cls, const dart::Code &allocate_object, const dart::Code &allocat_object_parametrized)
static intptr_t WordOffsetFromFpToCpuRegister(Register cpu_register)
#define UNIMPLEMENTED
#define ASSERT(E)
glong glong end
const uint8_t uint32_t uint32_t GError ** error
uint32_t * target
size_t length
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
word ToRawSmi(const dart::Object &a)
bool SizeFitsInSizeTag(uword instance_size)
word TypedDataMaxNewSpaceElements(classid_t cid)
word TypedDataElementSizeInBytes(classid_t cid)
const Bool & TrueObject()
GrowableArray< UnresolvedPcRelativeCall * > UnresolvedPcRelativeCalls
bool IsSameObject(const Object &a, const Object &b)
const Bool & FalseObject()
const Object & NullObject()
const Code & StubCodeAllocateArray()
const Class & MintClass()
const Register kWriteBarrierSlotReg
@ TIMES_COMPRESSED_HALF_WORD_SIZE
constexpr bool IsAbiPreservedRegister(Register reg)
Definition constants.h:90
const Register THR
static constexpr intptr_t kCompressedWordSizeLog2
Definition globals.h:43
const Register kExceptionObjectReg
const Register kWriteBarrierObjectReg
Thread * DLRT_GetFfiCallbackMetadata(FfiCallbackMetadata::Trampoline trampoline, uword *out_entry_point, uword *out_trampoline_type)
const Register NULL_REG
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition constants.h:85
static constexpr uword kZapReturnAddress
int32_t classid_t
Definition globals.h:524
@ kIllegalCid
Definition class_id.h:214
const Register CALLEE_SAVED_TEMP
@ kHeapObjectTag
uintptr_t uword
Definition globals.h:501
void DLRT_ExitTemporaryIsolate()
const Register CODE_REG
@ UNSIGNED_GREATER
@ UNSIGNED_GREATER_EQUAL
@ UNSIGNED_LESS_EQUAL
const Register TMP2
const Register ARGS_DESC_REG
@ kNumberOfCpuRegisters
const int kNumberOfFpuRegisters
static constexpr bool IsArgumentRegister(Register reg)
Definition constants.h:77
constexpr RegList kDartAvailableCpuRegs
const Register TMP
const Register FPREG
const intptr_t cid
const intptr_t kStoreBufferWrapperSize
const Register FUNCTION_REG
const Register IC_DATA_REG
static constexpr intptr_t kAllocationRedZoneSize
Definition page.h:41
const Register PP
static constexpr uword kZapCodeReg
const Register kStackTraceObjectReg
const Register SPREG
const int kFpuRegisterSize
Point offset
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTempReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTagsReg
static constexpr Register kLengthReg
static constexpr Register kResultReg
static constexpr Register kClassIdReg
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment
static constexpr Register kLengthReg
static constexpr Register kIndexReg
static constexpr Register kCacheContentsSizeReg
static constexpr Register kInstanceInstantiatorTypeArgumentsReg
static constexpr Register kInstanceParentFunctionTypeArgumentsReg
static constexpr Register kProbeDistanceReg
static constexpr Register kInstanceCidOrSignatureReg
static constexpr Register kCacheEntriesEndReg
static constexpr Register kInstanceDelayedFunctionTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kSubtypeTestCacheResultReg
#define NOT_IN_PRODUCT(code)
Definition globals.h:84