Flutter Engine
The Flutter Engine
stub_code_compiler_arm64.cc
Go to the documentation of this file.
1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h"
6
7// For `AllocateObjectInstr::WillAllocateNewOrRemembered`
8// For `GenericCheckBoundInstr::UseUnboxedRepresentation`
10
11#define SHOULD_NOT_INCLUDE_RUNTIME
12
14
15#if defined(TARGET_ARCH_ARM64)
16
17#include "vm/class_id.h"
18#include "vm/code_entry_kind.h"
22#include "vm/constants.h"
24#include "vm/instructions.h"
26#include "vm/tags.h"
27
28#define __ assembler->
29
30namespace dart {
31namespace compiler {
32
33// Ensures that [R0] is a new object, if not it will be added to the remembered
34// set via a leaf runtime call.
35//
36// WARNING: This might clobber all registers except for [R0], [THR] and [FP].
37// The caller should simply call LeaveStubFrame() and return.
39 // If the object is not in an active TLAB, we call a leaf-runtime to add it to
40 // the remembered set and/or deferred marking worklist. This test assumes a
41 // Page's TLAB use is always ascending.
42 Label done;
43 __ AndImmediate(TMP, R0, target::kPageMask);
44 __ LoadFromOffset(TMP, TMP, target::Page::original_top_offset());
45 __ CompareRegisters(R0, TMP);
46 __ BranchIf(UNSIGNED_GREATER_EQUAL, &done);
47
48 {
49 LeafRuntimeScope rt(assembler, /*frame_size=*/0,
50 /*preserve_registers=*/false);
51 // R0 already loaded.
52 __ mov(R1, THR);
53 rt.Call(kEnsureRememberedAndMarkingDeferredRuntimeEntry,
54 /*argument_count=*/2);
55 }
56
57 __ Bind(&done);
58}
59
60// In TSAN mode the runtime will throw an exception using an intermediary
61// longjmp() call to unwind the C frames in a way that TSAN can understand.
62//
63// This wrapper will setup a [jmp_buf] on the stack and initialize it to be a
64// target for a possible longjmp(). In the exceptional case we'll forward
65// control of execution to the usual JumpToFrame stub.
66//
67// In non-TSAN mode this will do nothing and the runtime will call the
68// JumpToFrame stub directly.
69//
70// The callback [fun] may be invoked with a modified [RSP] due to allocating
71// a [jmp_buf] allocating structure on the stack (as well as the saved old
72// [Thread::tsan_utils_->setjmp_buffer_]).
73static void WithExceptionCatchingTrampoline(Assembler* assembler,
74 std::function<void()> fun) {
75#if !defined(USING_SIMULATOR)
76 const Register kTsanUtilsReg = R3;
77
78 // Reserve space for arguments and align frame before entering C++ world.
79 const intptr_t kJumpBufferSize = sizeof(jmp_buf);
80 // Save & Restore the volatile CPU registers across the setjmp() call.
81 const RegisterSet volatile_registers(
82 kAbiVolatileCpuRegs & ~(1 << R0) & ~(1 << SP),
83 /*fpu_registers=*/0);
84
85 const Register kSavedRspReg = R20;
87 // We rely on THR being preserved across the setjmp() call.
89
90 if (FLAG_target_thread_sanitizer) {
91 Label do_native_call;
92
93 // Save old jmp_buf.
94 __ ldr(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
95 __ ldr(TMP,
96 Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
97 __ Push(TMP);
98
99 // Allocate jmp_buf struct on stack & remember pointer to it on the
100 // [Thread::tsan_utils_->setjmp_buffer] (which exceptions.cc will longjmp()
101 // to)
102 __ AddImmediate(SP, -kJumpBufferSize);
103 __ str(SP,
104 Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
105
106 // Call setjmp() with a pointer to the allocated jmp_buf struct.
107 __ MoveRegister(R0, SP);
108 __ PushRegisters(volatile_registers);
109 __ EnterCFrame(0);
110 __ mov(R25, CSP);
111 __ mov(CSP, SP);
112 __ ldr(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
113 __ CallCFunction(
114 Address(kTsanUtilsReg, target::TsanUtils::setjmp_function_offset()));
115 __ mov(SP, CSP);
116 __ mov(CSP, R25);
117 __ LeaveCFrame();
118 __ PopRegisters(volatile_registers);
119
120 // We are the target of a longjmp() iff setjmp() returns non-0.
121 __ cbz(&do_native_call, R0);
122
123 // We are the target of a longjmp: Cleanup the stack and tail-call the
124 // JumpToFrame stub which will take care of unwinding the stack and hand
125 // execution to the catch entry.
126 __ AddImmediate(SP, kJumpBufferSize);
127 __ ldr(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
128 __ Pop(TMP);
129 __ str(TMP,
130 Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
131
132 __ ldr(R0,
133 Address(kTsanUtilsReg, target::TsanUtils::exception_pc_offset()));
134 __ ldr(R1,
135 Address(kTsanUtilsReg, target::TsanUtils::exception_sp_offset()));
136 __ ldr(R2,
137 Address(kTsanUtilsReg, target::TsanUtils::exception_fp_offset()));
138 __ MoveRegister(R3, THR);
140
141 // We leave the created [jump_buf] structure on the stack as well as the
142 // pushed old [Thread::tsan_utils_->setjmp_buffer_].
143 __ Bind(&do_native_call);
144 __ MoveRegister(kSavedRspReg, SP);
145 }
146#endif // !defined(USING_SIMULATOR)
147
148 fun();
149
150#if !defined(USING_SIMULATOR)
151 if (FLAG_target_thread_sanitizer) {
152 __ MoveRegister(SP, kSavedRspReg);
153 __ AddImmediate(SP, kJumpBufferSize);
154 const Register kTsanUtilsReg2 = kSavedRspReg;
155 __ ldr(kTsanUtilsReg2, Address(THR, target::Thread::tsan_utils_offset()));
156 __ Pop(TMP);
157 __ str(TMP,
158 Address(kTsanUtilsReg2, target::TsanUtils::setjmp_buffer_offset()));
159 }
160#endif // !defined(USING_SIMULATOR)
161}
162
163// Input parameters:
164// LR : return address.
165// SP : address of last argument in argument array.
166// SP + 8*R4 - 8 : address of first argument in argument array.
167// SP + 8*R4 : address of return value.
168// R5 : address of the runtime function to call.
169// R4 : number of arguments to the call.
170void StubCodeCompiler::GenerateCallToRuntimeStub() {
171 const intptr_t thread_offset = target::NativeArguments::thread_offset();
172 const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
173 const intptr_t argv_offset = target::NativeArguments::argv_offset();
174 const intptr_t retval_offset = target::NativeArguments::retval_offset();
175
176 __ Comment("CallToRuntimeStub");
178 __ SetPrologueOffset();
179 __ EnterStubFrame();
180
181 // Save exit frame information to enable stack walking as we are about
182 // to transition to Dart VM C++ code.
184
185 // Mark that the thread exited generated code through a runtime call.
188
189#if defined(DEBUG)
190 {
191 Label ok;
192 // Check that we are always entering from Dart code.
193 __ LoadFromOffset(R8, THR, target::Thread::vm_tag_offset());
194 __ CompareImmediate(R8, VMTag::kDartTagId);
195 __ b(&ok, EQ);
196 __ Stop("Not coming from Dart code.");
197 __ Bind(&ok);
198 }
199#endif
200
201 // Mark that the thread is executing VM code.
202 __ StoreToOffset(R5, THR, target::Thread::vm_tag_offset());
203
204 WithExceptionCatchingTrampoline(assembler, [&]() {
205 // Reserve space for arguments and align frame before entering C++ world.
206 // target::NativeArguments are passed in registers.
207 __ Comment("align stack");
208 // Reserve space for arguments.
210 __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize());
211
212 // Pass target::NativeArguments structure by value and call runtime.
213 // Registers R0, R1, R2, and R3 are used.
214
215 ASSERT(thread_offset == 0 * target::kWordSize);
216 // Set thread in NativeArgs.
217 __ mov(R0, THR);
218
219 ASSERT(argc_tag_offset == 1 * target::kWordSize);
220 __ mov(R1, R4); // Set argc in target::NativeArguments.
221
222 ASSERT(argv_offset == 2 * target::kWordSize);
223 __ add(R2, ZR, Operand(R4, LSL, 3));
224 __ add(R2, FP, Operand(R2)); // Compute argv.
225 // Set argv in target::NativeArguments.
226 __ AddImmediate(R2,
227 target::frame_layout.param_end_from_fp * target::kWordSize);
228
229 ASSERT(retval_offset == 3 * target::kWordSize);
230 __ AddImmediate(R3, R2, target::kWordSize);
231
232 __ StoreToOffset(R0, SP, thread_offset);
233 __ StoreToOffset(R1, SP, argc_tag_offset);
234 __ StoreToOffset(R2, SP, argv_offset);
235 __ StoreToOffset(R3, SP, retval_offset);
236 __ mov(R0, SP); // Pass the pointer to the target::NativeArguments.
237
238 // We are entering runtime code, so the C stack pointer must be restored
239 // from the stack limit to the top of the stack. We cache the stack limit
240 // address in a callee-saved register.
241 __ mov(R25, CSP);
242 __ mov(CSP, SP);
243
244 __ blr(R5);
245 __ Comment("CallToRuntimeStub return");
246
247 // Restore SP and CSP.
248 __ mov(SP, CSP);
249 __ mov(CSP, R25);
250
251 // Refresh pinned registers (write barrier mask, null, dispatch table, etc).
252 __ RestorePinnedRegisters();
253
254 // Retval is next to 1st argument.
255 // Mark that the thread is executing Dart code.
256 __ LoadImmediate(R2, VMTag::kDartTagId);
257 __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset());
258
259 // Mark that the thread has not exited generated Dart code.
261
262 // Reset exit frame information in Isolate's mutator thread structure.
264
265 // Restore the global object pool after returning from runtime (old space is
266 // moving, so the GOP could have been relocated).
267 if (FLAG_precompiled_mode) {
268 __ SetupGlobalPoolAndDispatchTable();
269 }
270 });
271
272 __ LeaveStubFrame();
273
274 // The following return can jump to a lazy-deopt stub, which assumes R0
275 // contains a return value and will save it in a GC-visible way. We therefore
276 // have to ensure R0 does not contain any garbage value left from the C
277 // function we called (which has return type "void").
278 // (See GenerateDeoptimizationSequence::saved_result_slot_from_fp.)
279 __ LoadImmediate(R0, 0);
280 __ ret();
281}
282
283void StubCodeCompiler::GenerateSharedStubGeneric(
284 bool save_fpu_registers,
285 intptr_t self_code_stub_offset_from_thread,
286 bool allow_return,
287 std::function<void()> perform_runtime_call) {
288 // We want the saved registers to appear like part of the caller's frame, so
289 // we push them before calling EnterStubFrame.
290 RegisterSet all_registers;
291 all_registers.AddAllNonReservedRegisters(save_fpu_registers);
292
293 // To make the stack map calculation architecture independent we do the same
294 // as on intel.
295 READS_RETURN_ADDRESS_FROM_LR(__ Push(LR));
296 __ PushRegisters(all_registers);
297 __ ldr(CODE_REG, Address(THR, self_code_stub_offset_from_thread));
298 __ EnterStubFrame();
299 perform_runtime_call();
300 if (!allow_return) {
301 __ Breakpoint();
302 return;
303 }
304 __ LeaveStubFrame();
305 __ PopRegisters(all_registers);
306 __ Drop(1); // We use the LR restored via LeaveStubFrame.
307 READS_RETURN_ADDRESS_FROM_LR(__ ret(LR));
308}
309
310void StubCodeCompiler::GenerateSharedStub(
311 bool save_fpu_registers,
312 const RuntimeEntry* target,
313 intptr_t self_code_stub_offset_from_thread,
314 bool allow_return,
315 bool store_runtime_result_in_result_register) {
316 ASSERT(!store_runtime_result_in_result_register || allow_return);
317 auto perform_runtime_call = [&]() {
318 if (store_runtime_result_in_result_register) {
319 __ PushRegister(NULL_REG);
320 }
321 __ CallRuntime(*target, /*argument_count=*/0);
322 if (store_runtime_result_in_result_register) {
323 __ PopRegister(R0);
324 __ str(R0,
325 Address(FP, target::kWordSize *
328 }
329 };
330 GenerateSharedStubGeneric(save_fpu_registers,
331 self_code_stub_offset_from_thread, allow_return,
332 perform_runtime_call);
333}
334
335void StubCodeCompiler::GenerateEnterSafepointStub() {
336 RegisterSet all_registers;
337 all_registers.AddAllGeneralRegisters();
338
339 __ EnterFrame(0);
340 __ PushRegisters(all_registers);
341
344 __ ReserveAlignedFrameSpace(0);
345 __ mov(CSP, SP);
346
347 __ ldr(R0, Address(THR, kEnterSafepointRuntimeEntry.OffsetFromThread()));
348 __ blr(R0);
349
352
353 __ PopRegisters(all_registers);
354 __ LeaveFrame();
355
356 __ Ret();
357}
358
359static void GenerateExitSafepointStubCommon(Assembler* assembler,
360 uword runtime_entry_offset) {
361 RegisterSet all_registers;
362 all_registers.AddAllGeneralRegisters();
363
364 __ EnterFrame(0);
365 __ PushRegisters(all_registers);
366
369 __ ReserveAlignedFrameSpace(0);
370 __ mov(CSP, SP);
371
372 // Set the execution state to VM while waiting for the safepoint to end.
373 // This isn't strictly necessary but enables tests to check that we're not
374 // in native code anymore. See tests/ffi/function_gc_test.dart for example.
375 __ LoadImmediate(R0, target::Thread::vm_execution_state());
377
378 __ ldr(R0, Address(THR, runtime_entry_offset));
379 __ blr(R0);
380
383
384 __ PopRegisters(all_registers);
385 __ LeaveFrame();
386
387 __ Ret();
388}
389
390void StubCodeCompiler::GenerateExitSafepointStub() {
391 GenerateExitSafepointStubCommon(
392 assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
393}
394
395void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
396 GenerateExitSafepointStubCommon(
397 assembler,
398 kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
399}
400
401// Calls native code within a safepoint.
402//
403// On entry:
404// R9: target to call
405// Stack: set up for native call (SP), aligned, CSP < SP
406//
407// On exit:
408// R19: clobbered, although normally callee-saved
409// Stack: preserved, CSP == SP
410void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
412
413 SPILLS_RETURN_ADDRESS_FROM_LR_TO_REGISTER(__ mov(R19, LR));
414 __ LoadImmediate(R10, target::Thread::exit_through_ffi());
415 __ TransitionGeneratedToNative(R9, FPREG, R10 /*volatile*/,
416 /*enter_safepoint=*/true);
417 __ mov(R25, CSP);
418 __ mov(CSP, SP);
419
420#if defined(DEBUG)
421 // Check CSP alignment.
422 __ andi(R11 /*volatile*/, SP,
423 Immediate(~(OS::ActivationFrameAlignment() - 1)));
424 __ cmp(R11, Operand(SP));
425 Label done;
426 __ b(&done, EQ);
427 __ Breakpoint();
428 __ Bind(&done);
429#endif
430
431 __ blr(R9);
432
433 __ mov(SP, CSP);
434 __ mov(CSP, R25);
435
436 __ TransitionNativeToGenerated(R10, /*leave_safepoint=*/true);
437 __ ret(R19);
438}
439
440void StubCodeCompiler::GenerateLoadBSSEntry(BSS::Relocation relocation,
442 Register tmp) {
443 compiler::Label skip_reloc;
444 __ b(&skip_reloc);
445 InsertBSSRelocation(relocation);
446 __ Bind(&skip_reloc);
447
448 __ adr(tmp, compiler::Immediate(-compiler::target::kWordSize));
449
450 // tmp holds the address of the relocation.
451 __ ldr(dst, compiler::Address(tmp));
452
453 // dst holds the relocation itself: tmp - bss_start.
454 // tmp = tmp + (bss_start - tmp) = bss_start
455 __ add(tmp, tmp, compiler::Operand(dst));
456
457 // tmp holds the start of the BSS section.
458 // Load the "get-thread" routine: *bss_start.
459 __ ldr(dst, compiler::Address(tmp));
460}
461
462void StubCodeCompiler::GenerateLoadFfiCallbackMetadataRuntimeFunction(
463 uword function_index,
464 Register dst) {
465 // Keep in sync with FfiCallbackMetadata::EnsureFirstTrampolinePageLocked.
466 // Note: If the stub was aligned, this could be a single PC relative load.
467
468 // Load a pointer to the beginning of the stub into dst.
469 const intptr_t code_size = __ CodeSize();
470 __ adr(dst, Immediate(-code_size));
471
472 // Round dst down to the page size.
473 __ andi(dst, dst, Immediate(FfiCallbackMetadata::kPageMask));
474
475 // Load the function from the function table.
476 __ LoadFromOffset(dst, dst,
478}
479
480void StubCodeCompiler::GenerateFfiCallbackTrampolineStub() {
481#if defined(USING_SIMULATOR) && !defined(DART_PRECOMPILER)
482 // TODO(37299): FFI is not supported in SIMARM64.
483 __ Breakpoint();
484#else
485 Label body;
486
487 // R9 is volatile and not used for passing any arguments.
490 ++i) {
491 // The FfiCallbackMetadata table is keyed by the trampoline entry point. So
492 // look up the current PC, then jump to the shared section.
493 __ adr(R9, Immediate(0));
494 __ b(&body);
495 }
496
497 ASSERT_EQUAL(__ CodeSize(),
498 FfiCallbackMetadata::kNativeCallbackTrampolineSize *
500
501 __ Bind(&body);
502
503 const intptr_t shared_stub_start = __ CodeSize();
504
505 // Save THR (callee-saved) and LR on the real C stack (CSP). Keeps it
506 // aligned.
507 COMPILE_ASSERT(FfiCallbackMetadata::kNativeCallbackTrampolineStackDelta == 2);
508 SPILLS_LR_TO_FRAME(__ stp(
510
512
513 RegisterSet all_registers;
514 all_registers.AddAllArgumentRegisters();
515 all_registers.Add(Location::RegisterLocation(
517
518 // Load the thread, verify the callback ID and exit the safepoint.
519 //
520 // We exit the safepoint inside DLRT_GetFfiCallbackMetadata in order to save
521 // code size on this shared stub.
522 {
523 __ SetupDartSP();
524
525 __ EnterFrame(0);
526 __ PushRegisters(all_registers);
527
528 __ mov(R0, R9);
529
530 // We also need to look up the entry point for the trampoline. This is
531 // returned using a pointer passed to the second arg of the C function
532 // below. We aim that pointer at a reserved stack slot.
533 __ AddImmediate(SP, SP, -compiler::target::kWordSize);
534 __ mov(R1, SP);
535
536 // We also need to know if this is a sync or async callback. This is also
537 // returned by pointer.
538 __ AddImmediate(SP, SP, -compiler::target::kWordSize);
539 __ mov(R2, SP);
540
541 __ EnterFrame(0);
542 __ ReserveAlignedFrameSpace(0);
543
544#if defined(DART_TARGET_OS_FUCHSIA)
545 // TODO(https://dartbug.com/52579): Remove.
546 if (FLAG_precompiled_mode) {
547 GenerateLoadBSSEntry(BSS::Relocation::DRT_GetFfiCallbackMetadata, R4, R9);
548 } else {
549 Label call;
551 __ b(&call);
552 __ Emit64(reinterpret_cast<int64_t>(&DLRT_GetFfiCallbackMetadata));
553 __ Bind(&call);
554 }
555#else
556 GenerateLoadFfiCallbackMetadataRuntimeFunction(
558#endif // defined(DART_TARGET_OS_FUCHSIA)
559
560 __ mov(CSP, SP);
561 __ blr(R4);
562 __ mov(SP, CSP);
563 __ mov(THR, R0);
564
565 __ LeaveFrame();
566
567 // The trampoline type is at the top of the stack. Pop it into R9.
568 __ Pop(R9);
569
570 // Entry point is now at the top of the stack. Pop it into R10.
572 __ Pop(R10);
573
574 __ PopRegisters(all_registers);
575 __ LeaveFrame();
576
577 __ RestoreCSP();
578 }
579
580 Label async_callback;
581 Label done;
582
583 // If GetFfiCallbackMetadata returned a null thread, it means that the async
584 // callback was invoked after it was deleted. In this case, do nothing.
585 __ cmp(THR, Operand(0));
586 __ b(&done, EQ);
587
588 // Check the trampoline type to see how the callback should be invoked.
589 __ cmp(
590 R9,
592 __ b(&async_callback, EQ);
593
594 // Sync callback. The entry point contains the target function, so just call
595 // it. DLRT_GetThreadForNativeCallbackTrampoline exited the safepoint, so
596 // re-enter it afterwards.
597
598 // Clobbers all volatile registers, including the callback ID in R9.
599 // Resets CSP and SP, important for EnterSafepoint below.
600 __ blr(R10);
601
602 // Clobbers TMP, TMP2 and R9 -- all volatile and not holding return values.
603 __ EnterFullSafepoint(/*scratch=*/R9);
604
605 __ b(&done);
607
608 // Async callback. The entrypoint marshals the arguments into a message and
609 // sends it over the send port. DLRT_GetThreadForNativeCallbackTrampoline
610 // entered a temporary isolate, so exit it afterwards.
611
612 // Clobbers all volatile registers, including the callback ID in R9.
613 // Resets CSP and SP, important for EnterSafepoint below.
614 __ blr(R10);
615
616 // Exit the temporary isolate.
617 {
618 __ SetupDartSP();
619 __ EnterFrame(0);
620 __ ReserveAlignedFrameSpace(0);
621
622#if defined(DART_TARGET_OS_FUCHSIA)
623 // TODO(https://dartbug.com/52579): Remove.
624 if (FLAG_precompiled_mode) {
625 GenerateLoadBSSEntry(BSS::Relocation::DRT_ExitTemporaryIsolate, R4, R9);
626 } else {
627 Label call;
629 __ b(&call);
630 __ Emit64(reinterpret_cast<int64_t>(&DLRT_ExitTemporaryIsolate));
631 __ Bind(&call);
632 }
633#else
634 GenerateLoadFfiCallbackMetadataRuntimeFunction(
636#endif
637
638 __ mov(CSP, SP);
639 __ blr(R4);
640 __ mov(SP, CSP);
641 __ mov(THR, R0);
642
643 __ LeaveFrame();
644 __ RestoreCSP();
645 }
646
647 __ Bind(&done);
648
649 // Pop LR and THR from the real stack (CSP).
650 RESTORES_LR_FROM_FRAME(__ ldp(
652
653 __ ret();
654
655 ASSERT_LESS_OR_EQUAL(__ CodeSize() - shared_stub_start,
656 FfiCallbackMetadata::kNativeCallbackSharedStubSize);
658
659#if defined(DEBUG)
660 while (__ CodeSize() < FfiCallbackMetadata::kPageSize) {
661 __ Breakpoint();
662 }
663#endif
664#endif // !defined(HOST_ARCH_ARM64)
665}
666
667void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
668 __ EnterStubFrame();
671 __ CallRuntime(kDispatchTableNullErrorRuntimeEntry, /*argument_count=*/1);
672 // The NullError runtime entry does not return.
673 __ Breakpoint();
674}
675
676void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
677 auto perform_runtime_call = [&]() {
678 // If the generated code has unboxed index/length we need to box them before
679 // calling the runtime entry.
681 Label length, smi_case;
682
683 // The user-controlled index might not fit into a Smi.
684#if !defined(DART_COMPRESSED_POINTERS)
686 compiler::Operand(RangeErrorABI::kIndexReg));
687 __ BranchIf(NO_OVERFLOW, &length);
688#else
692 __ cmp(TMP,
693 compiler::Operand(RangeErrorABI::kIndexReg, ASR, kSmiTagSize));
694 __ BranchIf(EQ, &length);
695#endif
696 {
697 // Allocate a mint, reload the two registers and populate the mint.
698 __ PushRegister(NULL_REG);
699 __ CallRuntime(kAllocateMintRuntimeEntry, /*argument_count=*/0);
700 __ PopRegister(RangeErrorABI::kIndexReg);
701 __ ldr(TMP,
702 Address(FP, target::kWordSize *
705 __ str(TMP, FieldAddress(RangeErrorABI::kIndexReg,
708 Address(FP, target::kWordSize *
711 }
712
713 // Length is guaranteed to be in positive Smi range (it comes from a load
714 // of a vm recognized array).
715 __ Bind(&length);
717 }
718 __ PushRegistersInOrder(
720 __ CallRuntime(kRangeErrorRuntimeEntry, /*argument_count=*/2);
721 __ Breakpoint();
722 };
723
724 GenerateSharedStubGeneric(
725 /*save_fpu_registers=*/with_fpu_regs,
726 with_fpu_regs
729 /*allow_return=*/false, perform_runtime_call);
730}
731
732void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
733 auto perform_runtime_call = [&]() {
734 __ CallRuntime(kWriteErrorRuntimeEntry, /*argument_count=*/2);
735 __ Breakpoint();
736 };
737
738 GenerateSharedStubGeneric(
739 /*save_fpu_registers=*/with_fpu_regs,
740 with_fpu_regs
743 /*allow_return=*/false, perform_runtime_call);
744}
745
746// Input parameters:
747// LR : return address.
748// SP : address of return value.
749// R5 : address of the native function to call.
750// R2 : address of first argument in argument array.
751// R1 : argc_tag including number of arguments and function kind.
752static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
753 Address wrapper) {
754 const intptr_t thread_offset = target::NativeArguments::thread_offset();
755 const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
756 const intptr_t argv_offset = target::NativeArguments::argv_offset();
757 const intptr_t retval_offset = target::NativeArguments::retval_offset();
758
759 __ EnterStubFrame();
760
761 // Save exit frame information to enable stack walking as we are about
762 // to transition to native code.
764
765 // Mark that the thread exited generated code through a runtime call.
768
769#if defined(DEBUG)
770 {
771 Label ok;
772 // Check that we are always entering from Dart code.
773 __ LoadFromOffset(R6, THR, target::Thread::vm_tag_offset());
774 __ CompareImmediate(R6, VMTag::kDartTagId);
775 __ b(&ok, EQ);
776 __ Stop("Not coming from Dart code.");
777 __ Bind(&ok);
778 }
779#endif
780
781 // Mark that the thread is executing native code.
782 __ StoreToOffset(R5, THR, target::Thread::vm_tag_offset());
783
784 WithExceptionCatchingTrampoline(assembler, [&]() {
785 // Reserve space for the native arguments structure passed on the stack (the
786 // outgoing pointer parameter to the native arguments structure is passed in
787 // R0) and align frame before entering the C++ world.
788 __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize());
789
790 // Initialize target::NativeArguments structure and call native function.
791 // Registers R0, R1, R2, and R3 are used.
792
793 ASSERT(thread_offset == 0 * target::kWordSize);
794 // Set thread in NativeArgs.
795 __ mov(R0, THR);
796
797 ASSERT(argc_tag_offset == 1 * target::kWordSize);
798 // Set argc in target::NativeArguments: R1 already contains argc.
799
800 ASSERT(argv_offset == 2 * target::kWordSize);
801 // Set argv in target::NativeArguments: R2 already contains argv.
802
803 // Set retval in NativeArgs.
804 ASSERT(retval_offset == 3 * target::kWordSize);
805 __ AddImmediate(
806 R3, FP,
807 (target::frame_layout.param_end_from_fp + 1) * target::kWordSize);
808
809 // Passing the structure by value as in runtime calls would require changing
810 // Dart API for native functions.
811 // For now, space is reserved on the stack and we pass a pointer to it.
812 __ StoreToOffset(R0, SP, thread_offset);
813 __ StoreToOffset(R1, SP, argc_tag_offset);
814 __ StoreToOffset(R2, SP, argv_offset);
815 __ StoreToOffset(R3, SP, retval_offset);
816 __ mov(R0, SP); // Pass the pointer to the target::NativeArguments.
817
818 // We are entering runtime code, so the C stack pointer must be restored
819 // from the stack limit to the top of the stack. We cache the stack limit
820 // address in the Dart SP register, which is callee-saved in the C ABI.
821 __ mov(R25, CSP);
822 __ mov(CSP, SP);
823
824 __ mov(R1, R5); // Pass the function entrypoint to call.
825
826 // Call native function invocation wrapper or redirection via simulator.
827 __ Call(wrapper);
828
829 // Restore SP and CSP.
830 __ mov(SP, CSP);
831 __ mov(CSP, R25);
832
833 // Refresh pinned registers (write barrier mask, null, dispatch table, etc).
834 __ RestorePinnedRegisters();
835
836 // Mark that the thread is executing Dart code.
837 __ LoadImmediate(R2, VMTag::kDartTagId);
838 __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset());
839
840 // Mark that the thread has not exited generated Dart code.
842
843 // Reset exit frame information in Isolate's mutator thread structure.
845
846 // Restore the global object pool after returning from runtime (old space is
847 // moving, so the GOP could have been relocated).
848 if (FLAG_precompiled_mode) {
849 __ SetupGlobalPoolAndDispatchTable();
850 }
851 });
852
853 __ LeaveStubFrame();
854 __ ret();
855}
856
857void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
858 GenerateCallNativeWithWrapperStub(
859 assembler,
860 Address(THR,
862}
863
864void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
865 GenerateCallNativeWithWrapperStub(
866 assembler,
867 Address(THR,
869}
870
871// Input parameters:
872// LR : return address.
873// SP : address of return value.
874// R5 : address of the native function to call.
875// R2 : address of first argument in argument array.
876// R1 : argc_tag including number of arguments and function kind.
877void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
878 GenerateCallNativeWithWrapperStub(
879 assembler,
880 Address(THR,
882}
883
884// Input parameters:
885// ARGS_DESC_REG: arguments descriptor array.
886void StubCodeCompiler::GenerateCallStaticFunctionStub() {
887 // Create a stub frame as we are pushing some objects on the stack before
888 // calling into the runtime.
889 __ EnterStubFrame();
890 // Setup space on stack for return value and preserve arguments descriptor.
891 __ Push(ARGS_DESC_REG);
892 __ Push(ZR);
893 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
894 // Get Code object result and restore arguments descriptor array.
895 __ Pop(CODE_REG);
896 __ Pop(ARGS_DESC_REG);
897 // Remove the stub frame.
898 __ LeaveStubFrame();
899 // Jump to the dart function.
900 __ LoadFieldFromOffset(R0, CODE_REG, target::Code::entry_point_offset());
901 __ br(R0);
902}
903
904// Called from a static call only when an invalid code has been entered
905// (invalid because its function was optimized or deoptimized).
906// ARGS_DESC_REG: arguments descriptor array.
907void StubCodeCompiler::GenerateFixCallersTargetStub() {
908 Label monomorphic;
909 __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
910
911 // Load code pointer to this stub from the thread:
912 // The one that is passed in, is not correct - it points to the code object
913 // that needs to be replaced.
914 __ ldr(CODE_REG,
916 // Create a stub frame as we are pushing some objects on the stack before
917 // calling into the runtime.
918 __ EnterStubFrame();
919 // Setup space on stack for return value and preserve arguments descriptor.
920 __ Push(ARGS_DESC_REG);
921 __ Push(ZR);
922 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
923 // Get Code object result and restore arguments descriptor array.
924 __ Pop(CODE_REG);
925 __ Pop(ARGS_DESC_REG);
926 // Remove the stub frame.
927 __ LeaveStubFrame();
928 // Jump to the dart function.
929 __ LoadFieldFromOffset(R0, CODE_REG, target::Code::entry_point_offset());
930 __ br(R0);
931
932 __ Bind(&monomorphic);
933 // Load code pointer to this stub from the thread:
934 // The one that is passed in, is not correct - it points to the code object
935 // that needs to be replaced.
936 __ ldr(CODE_REG,
938 // Create a stub frame as we are pushing some objects on the stack before
939 // calling into the runtime.
940 __ EnterStubFrame();
941 __ Push(ZR); // Result slot.
942 __ Push(R0); // Preserve receiver.
943 __ Push(R5); // Old cache value (also 2nd return value).
944 __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 2);
945 __ Pop(R5); // Get target cache object.
946 __ Pop(R0); // Restore receiver.
947 __ Pop(CODE_REG); // Get target Code object.
948 // Remove the stub frame.
949 __ LeaveStubFrame();
950 // Jump to the dart function.
951 __ LoadFieldFromOffset(
952 R1, CODE_REG,
954 __ br(R1);
955}
956
957// Called from object allocate instruction when the allocation stub has been
958// disabled.
959void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
960 // Load code pointer to this stub from the thread:
961 // The one that is passed in, is not correct - it points to the code object
962 // that needs to be replaced.
963 __ ldr(CODE_REG,
965 __ EnterStubFrame();
966 // Setup space on stack for return value.
967 __ Push(ZR);
968 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
969 // Get Code object result.
970 __ Pop(CODE_REG);
971 // Remove the stub frame.
972 __ LeaveStubFrame();
973 // Jump to the dart function.
974 __ LoadFieldFromOffset(R0, CODE_REG, target::Code::entry_point_offset());
975 __ br(R0);
976}
977
978// Called from object allocate instruction when the allocation stub for a
979// generic class has been disabled.
980void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
981 // Load code pointer to this stub from the thread:
982 // The one that is passed in, is not correct - it points to the code object
983 // that needs to be replaced.
984 __ ldr(CODE_REG,
986 __ EnterStubFrame();
987 // Preserve type arguments register.
989 // Setup space on stack for return value.
990 __ Push(ZR);
991 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
992 // Get Code object result.
993 __ Pop(CODE_REG);
994 // Restore type arguments register.
996 // Remove the stub frame.
997 __ LeaveStubFrame();
998 // Jump to the dart function.
999 __ LoadFieldFromOffset(R0, CODE_REG, target::Code::entry_point_offset());
1000 __ br(R0);
1001}
1002
1003// Input parameters:
1004// R2: smi-tagged argument count, may be zero.
1005// FP[target::frame_layout.param_end_from_fp + 1]: last argument.
1006static void PushArrayOfArguments(Assembler* assembler) {
1007 // Allocate array to store arguments of caller.
1008 __ LoadObject(R1, NullObject());
1009 // R1: null element type for raw Array.
1010 // R2: smi-tagged argument count, may be zero.
1011 __ BranchLink(StubCodeAllocateArray());
1012 // R0: newly allocated array.
1013 // R2: smi-tagged argument count, may be zero (was preserved by the stub).
1014 __ Push(R0); // Array is in R0 and on top of stack.
1015 __ SmiUntag(R2);
1016 __ add(R1, FP, Operand(R2, LSL, target::kWordSizeLog2));
1017 __ AddImmediate(R1,
1018 target::frame_layout.param_end_from_fp * target::kWordSize);
1019 __ AddImmediate(R3, R0, target::Array::data_offset() - kHeapObjectTag);
1020 // R1: address of first argument on stack.
1021 // R3: address of first argument in array.
1022
1023 Label loop, loop_exit;
1024 __ Bind(&loop);
1025 __ CompareRegisters(R2, ZR);
1026 __ b(&loop_exit, LE);
1027 __ ldr(R7, Address(R1));
1028 __ AddImmediate(R1, -target::kWordSize);
1029 __ AddImmediate(R3, target::kCompressedWordSize);
1030 __ AddImmediate(R2, R2, -1);
1031 __ StoreCompressedIntoObject(R0, Address(R3, -target::kCompressedWordSize),
1032 R7);
1033 __ b(&loop);
1034 __ Bind(&loop_exit);
1035}
1036
1037// Used by eager and lazy deoptimization. Preserve result in RAX if necessary.
1038// This stub translates optimized frame into unoptimized frame. The optimized
1039// frame can contain values in registers and on stack, the unoptimized
1040// frame contains all values on stack.
1041// Deoptimization occurs in following steps:
1042// - Push all registers that can contain values.
1043// - Call C routine to copy the stack and saved registers into temporary buffer.
1044// - Adjust caller's frame to correct unoptimized frame size.
1045// - Fill the unoptimized frame.
1046// - Materialize objects that require allocation (e.g. Double instances).
1047// GC can occur only after frame is fully rewritten.
1048// Stack after TagAndPushPP() below:
1049// +------------------+
1050// | Saved PP | <- PP
1051// +------------------+
1052// | PC marker | <- TOS
1053// +------------------+
1054// | Saved FP | <- FP of stub
1055// +------------------+
1056// | return-address | (deoptimization point)
1057// +------------------+
1058// | Saved CODE_REG |
1059// +------------------+
1060// | ... | <- SP of optimized frame
1061//
1062// Parts of the code cannot GC, part of the code can GC.
1063static void GenerateDeoptimizationSequence(Assembler* assembler,
1064 DeoptStubKind kind) {
1065 // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
1066 // is no need to set the correct PC marker or load PP, since they get patched.
1067 __ EnterStubFrame();
1068
1069 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
1070 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
1071 const intptr_t saved_result_slot_from_fp =
1074 const intptr_t saved_exception_slot_from_fp =
1077 const intptr_t saved_stacktrace_slot_from_fp =
1080 // Result in R0 is preserved as part of pushing all registers below.
1081
1082 // Push registers in their enumeration order: lowest register number at
1083 // lowest address.
1084 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; i--) {
1085 const Register r = static_cast<Register>(i);
1086 if (r == CODE_REG) {
1087 // Save the original value of CODE_REG pushed before invoking this stub
1088 // instead of the value used to call this stub.
1090 __ ldr(R25, Address(FP, 2 * target::kWordSize));
1091 __ str(R25, Address(SP, -1 * target::kWordSize, Address::PreIndex));
1092 } else if (r == R15) {
1093 // Because we save registers in decreasing order, IP0 will already be
1094 // saved.
1096 __ mov(IP0, R15);
1097 __ str(IP0, Address(SP, -1 * target::kWordSize, Address::PreIndex));
1098 } else if (r == R31) {
1099 __ str(ZR, Address(SP, -1 * target::kWordSize, Address::PreIndex));
1100 } else {
1101 __ str(r, Address(SP, -1 * target::kWordSize, Address::PreIndex));
1102 }
1103 }
1104
1105 for (intptr_t reg_idx = kNumberOfVRegisters - 1; reg_idx >= 0; reg_idx--) {
1106 VRegister vreg = static_cast<VRegister>(reg_idx);
1107 __ PushQuad(vreg);
1108 }
1109
1110 {
1111 __ mov(R0, SP); // Pass address of saved registers block.
1112 LeafRuntimeScope rt(assembler,
1113 /*frame_size=*/0,
1114 /*preserve_registers=*/false);
1115 bool is_lazy =
1116 (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
1117 __ LoadImmediate(R1, is_lazy ? 1 : 0);
1118 rt.Call(kDeoptimizeCopyFrameRuntimeEntry, 2);
1119 // Result (R0) is stack-size (FP - SP) in bytes.
1120 }
1121
1122 if (kind == kLazyDeoptFromReturn) {
1123 // Restore result into R1 temporarily.
1124 __ LoadFromOffset(R1, FP, saved_result_slot_from_fp * target::kWordSize);
1125 } else if (kind == kLazyDeoptFromThrow) {
1126 // Restore result into R1 temporarily.
1127 __ LoadFromOffset(R1, FP, saved_exception_slot_from_fp * target::kWordSize);
1128 __ LoadFromOffset(R2, FP,
1129 saved_stacktrace_slot_from_fp * target::kWordSize);
1130 }
1131
1132 // There is a Dart Frame on the stack. We must restore PP and leave frame.
1133 __ RestoreCodePointer();
1134 __ LeaveStubFrame();
1135 __ sub(SP, FP, Operand(R0));
1136
1137 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
1138 // is no need to set the correct PC marker or load PP, since they get patched.
1139 __ EnterStubFrame();
1140
1141 if (kind == kLazyDeoptFromReturn) {
1142 __ Push(R1); // Preserve result as first local.
1143 } else if (kind == kLazyDeoptFromThrow) {
1144 __ Push(R1); // Preserve exception as first local.
1145 __ Push(R2); // Preserve stacktrace as second local.
1146 }
1147 {
1148 __ mov(R0, FP); // Pass last FP as parameter in R0.
1149 LeafRuntimeScope rt(assembler,
1150 /*frame_size=*/0,
1151 /*preserve_registers=*/false);
1152 rt.Call(kDeoptimizeFillFrameRuntimeEntry, 1);
1153 }
1154 if (kind == kLazyDeoptFromReturn) {
1155 // Restore result into R1.
1156 __ LoadFromOffset(
1157 R1, FP, target::frame_layout.first_local_from_fp * target::kWordSize);
1158 } else if (kind == kLazyDeoptFromThrow) {
1159 // Restore result into R1.
1160 __ LoadFromOffset(
1161 R1, FP, target::frame_layout.first_local_from_fp * target::kWordSize);
1162 __ LoadFromOffset(
1163 R2, FP,
1164 (target::frame_layout.first_local_from_fp - 1) * target::kWordSize);
1165 }
1166 // Code above cannot cause GC.
1167 // There is a Dart Frame on the stack. We must restore PP and leave frame.
1168 __ RestoreCodePointer();
1169 __ LeaveStubFrame();
1170
1171 // Frame is fully rewritten at this point and it is safe to perform a GC.
1172 // Materialize any objects that were deferred by FillFrame because they
1173 // require allocation.
1174 // Enter stub frame with loading PP. The caller's PP is not materialized yet.
1175 __ EnterStubFrame();
1176 if (kind == kLazyDeoptFromReturn) {
1177 __ Push(R1); // Preserve result, it will be GC-d here.
1178 } else if (kind == kLazyDeoptFromThrow) {
1179 // Preserve CODE_REG for one more runtime call.
1180 __ Push(CODE_REG);
1181 __ Push(R1); // Preserve exception, it will be GC-d here.
1182 __ Push(R2); // Preserve stacktrace, it will be GC-d here.
1183 }
1184
1185 __ Push(ZR); // Space for the result.
1186 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
1187 // Result tells stub how many bytes to remove from the expression stack
1188 // of the bottom-most frame. They were used as materialization arguments.
1189 __ Pop(R2);
1190 __ SmiUntag(R2);
1191 if (kind == kLazyDeoptFromReturn) {
1192 __ Pop(R0); // Restore result.
1193 } else if (kind == kLazyDeoptFromThrow) {
1194 __ Pop(R1); // Restore stacktrace.
1195 __ Pop(R0); // Restore exception.
1196 __ Pop(CODE_REG);
1197 }
1198 __ LeaveStubFrame();
1199 // Remove materialization arguments.
1200 __ add(SP, SP, Operand(R2));
1201 // The caller is responsible for emitting the return instruction.
1202
1203 if (kind == kLazyDeoptFromThrow) {
1204 // Unoptimized frame is now ready to accept the exception. Rethrow it to
1205 // find the right handler. Ask rethrow machinery to bypass debugger it
1206 // was already notified about this exception.
1207 __ EnterStubFrame();
1208 __ Push(ZR); // Space for the return value (unused).
1209 __ Push(R0); // Exception
1210 __ Push(R1); // Stacktrace
1211 __ PushImmediate(target::ToRawSmi(1)); // Bypass debugger
1212 __ CallRuntime(kReThrowRuntimeEntry, 3);
1213 __ LeaveStubFrame();
1214 }
1215}
1216
1217// R0: result, must be preserved
1218void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
1219 // Push zap value instead of CODE_REG for lazy deopt.
1220 __ LoadImmediate(TMP, kZapCodeReg);
1221 __ Push(TMP);
1222 // Return address for "call" to deopt stub.
1223 WRITES_RETURN_ADDRESS_TO_LR(__ LoadImmediate(LR, kZapReturnAddress));
1224 __ ldr(CODE_REG,
1226 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
1227 __ ret();
1228}
1229
1230// R0: exception, must be preserved
1231// R1: stacktrace, must be preserved
1232void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
1233 // Push zap value instead of CODE_REG for lazy deopt.
1234 __ LoadImmediate(TMP, kZapCodeReg);
1235 __ Push(TMP);
1236 // Return address for "call" to deopt stub.
1237 WRITES_RETURN_ADDRESS_TO_LR(__ LoadImmediate(LR, kZapReturnAddress));
1238 __ ldr(CODE_REG,
1240 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
1241 __ ret();
1242}
1243
1244void StubCodeCompiler::GenerateDeoptimizeStub() {
1245 __ Push(CODE_REG);
1247 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
1248 __ ret();
1249}
1250
1251// IC_DATA_REG: ICData/MegamorphicCache
1252static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler) {
1253 __ EnterStubFrame();
1254
1255 __ ldr(ARGS_DESC_REG,
1256 FieldAddress(IC_DATA_REG,
1258
1259 // Load the receiver.
1260 __ LoadCompressedSmiFieldFromOffset(
1262 __ add(TMP, FP, Operand(R2, LSL, target::kWordSizeLog2 - 1)); // R2 is Smi.
1263 __ LoadFromOffset(R6, TMP,
1264 target::frame_layout.param_end_from_fp * target::kWordSize);
1265 __ Push(ZR); // Result slot.
1266 __ Push(R6); // Receiver.
1267 __ Push(IC_DATA_REG); // ICData/MegamorphicCache.
1268 __ Push(ARGS_DESC_REG); // Arguments descriptor.
1269
1270 // Adjust arguments count.
1271 __ LoadCompressedSmiFieldFromOffset(
1273 __ AddImmediate(TMP, R2, 1, kObjectBytes); // Include the type arguments.
1274 __ cmp(R3, Operand(0), kObjectBytes);
1275 // R2 <- (R3 == 0) ? R2 : TMP + 1 (R2 : R2 + 2).
1276 __ csinc(R2, R2, TMP, EQ, kObjectBytes);
1277
1278 // R2: Smi-tagged arguments array length.
1279 PushArrayOfArguments(assembler);
1280 const intptr_t kNumArgs = 4;
1281 __ CallRuntime(kNoSuchMethodFromCallStubRuntimeEntry, kNumArgs);
1282 __ Drop(4);
1283 __ Pop(R0); // Return value.
1284 __ LeaveStubFrame();
1285 __ ret();
1286}
1287
1288static void GenerateDispatcherCode(Assembler* assembler,
1289 Label* call_target_function) {
1290 __ Comment("NoSuchMethodDispatch");
1291 // When lazily generated invocation dispatchers are disabled, the
1292 // miss-handler may return null.
1293 __ CompareObject(R0, NullObject());
1294 __ b(call_target_function, NE);
1295
1296 GenerateNoSuchMethodDispatcherBody(assembler);
1297}
1298
1299// Input:
1300// ARGS_DESC_REG - arguments descriptor
1301// IC_DATA_REG - icdata/megamorphic_cache
1302void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
1303 GenerateNoSuchMethodDispatcherBody(assembler);
1304}
1305
1306// Called for inline allocation of arrays.
1307// Input registers (preserved):
1308// LR: return address.
1309// AllocateArrayABI::kLengthReg: array length as Smi.
1310// AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
1311// Output registers:
1312// AllocateArrayABI::kResultReg: newly allocated array.
1313// Clobbered:
1314// R3, R7
1315void StubCodeCompiler::GenerateAllocateArrayStub() {
1316 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1317 Label slow_case;
1318 // Compute the size to be allocated, it is based on the array length
1319 // and is computed as:
1320 // RoundedAllocationSize(
1321 // (array_length * kCompressedWordSize) + target::Array::header_size()).
1322 // Check that length is a Smi.
1323 __ BranchIfNotSmi(AllocateArrayABI::kLengthReg, &slow_case);
1324
1325 // Check length >= 0 && length <= kMaxNewSpaceElements
1326 const intptr_t max_len =
1328 __ CompareImmediate(AllocateArrayABI::kLengthReg, max_len, kObjectBytes);
1329 __ b(&slow_case, HI);
1330
1331 const intptr_t cid = kArrayCid;
1332 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, &slow_case, R4));
1333
1334 // Calculate and align allocation size.
1335 // Load new object start and calculate next object start.
1336 // AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
1337 // AllocateArrayABI::kLengthReg: array length as Smi.
1339 Address(THR, target::Thread::top_offset()));
1340 intptr_t fixed_size_plus_alignment_padding =
1343 __ LoadImmediate(R3, fixed_size_plus_alignment_padding);
1344// AllocateArrayABI::kLengthReg is Smi.
1345#if defined(DART_COMPRESSED_POINTERS)
1346 __ add(R3, R3, Operand(AllocateArrayABI::kLengthReg, LSL, 1), kObjectBytes);
1347#else
1348 __ add(R3, R3, Operand(AllocateArrayABI::kLengthReg, LSL, 2), kObjectBytes);
1349#endif
1350 ASSERT(kSmiTagShift == 1);
1351 __ andi(R3, R3,
1353 // AllocateArrayABI::kResultReg: potential new object start.
1354 // R3: object size in bytes.
1355 __ adds(R7, R3, Operand(AllocateArrayABI::kResultReg));
1356 __ b(&slow_case, CS); // Branch if unsigned overflow.
1357
1358 // Check if the allocation fits into the remaining space.
1359 // AllocateArrayABI::kResultReg: potential new object start.
1360 // AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
1361 // AllocateArrayABI::kLengthReg: array length as Smi.
1362 // R3: array size.
1363 // R7: potential next object start.
1364 __ LoadFromOffset(TMP, THR, target::Thread::end_offset());
1365 __ CompareRegisters(R7, TMP);
1366 __ b(&slow_case, CS); // Branch if unsigned higher or equal.
1367 __ CheckAllocationCanary(AllocateArrayABI::kResultReg);
1368
1369 // Successfully allocated the object(s), now update top to point to
1370 // next object start and initialize the object.
1371 // AllocateArrayABI::kResultReg: potential new object start.
1372 // R3: array size.
1373 // R7: potential next object start.
1374 __ str(R7, Address(THR, target::Thread::top_offset()));
1376 Operand(kHeapObjectTag));
1377
1378 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
1379 // AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
1380 // AllocateArrayABI::kLengthReg: array length as Smi.
1381 // R3: array size.
1382 // R7: new object end address.
1383
1384 // Calculate the size tag.
1385 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
1386 // AllocateArrayABI::kLengthReg: array length as Smi.
1387 // R3: array size.
1388 // R7: new object end address.
1389 const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
1392 // If no size tag overflow, shift R3 left, else set R3 to zero.
1393 __ LslImmediate(TMP, R3, shift);
1394 __ csel(R3, TMP, R3, LS);
1395 __ csel(R3, ZR, R3, HI);
1396
1397 // Get the class index and insert it into the tags.
1398 const uword tags =
1399 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
1400
1401 __ LoadImmediate(TMP, tags);
1402 __ orr(R3, R3, Operand(TMP));
1403 __ StoreFieldToOffset(R3, AllocateArrayABI::kResultReg,
1405
1406 // Store the type argument field.
1407 __ StoreCompressedIntoObjectOffsetNoBarrier(
1410
1411 // Set the length field.
1412 __ StoreCompressedIntoObjectOffsetNoBarrier(AllocateArrayABI::kResultReg,
1415
1416 // Initialize all array elements to raw_null.
1417 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
1418 // R7: new object end address.
1419 // AllocateArrayABI::kLengthReg: array length as Smi.
1420 __ AddImmediate(R3, AllocateArrayABI::kResultReg,
1422 // R3: iterator which initially points to the start of the variable
1423 // data area to be initialized.
1424#if defined(DART_COMPRESSED_POINTERS)
1425 const Register kWordOfNulls = TMP;
1426 __ andi(kWordOfNulls, NULL_REG, Immediate(0xFFFFFFFF));
1427 __ orr(kWordOfNulls, kWordOfNulls, Operand(kWordOfNulls, LSL, 32));
1428#else
1429 const Register kWordOfNulls = NULL_REG;
1430#endif
1431 Label loop;
1432 __ Bind(&loop);
1434 __ stp(kWordOfNulls, kWordOfNulls,
1436 // Safe to only check every kObjectAlignment bytes instead of each word.
1438 __ CompareRegisters(R3, R7);
1439 __ b(&loop, UNSIGNED_LESS);
1440 __ WriteAllocationCanary(R7); // Fix overshoot.
1441
1442 // Done allocating and initializing the array.
1443 // AllocateArrayABI::kResultReg: new object.
1444 // AllocateArrayABI::kLengthReg: array length as Smi (preserved).
1445 __ ret();
1446
1447 // Unable to allocate the array using the fast inline code, just call
1448 // into the runtime.
1449 __ Bind(&slow_case);
1450 }
1451 // Create a stub frame as we are pushing some objects on the stack before
1452 // calling into the runtime.
1453 __ EnterStubFrame();
1454 // Setup space on stack for return value.
1455 // Push array length as Smi and element type.
1456 __ Push(ZR);
1459 __ CallRuntime(kAllocateArrayRuntimeEntry, 2);
1460
1461 // Write-barrier elimination might be enabled for this array (depending on the
1462 // array length). To be sure we will check if the allocated object is in old
1463 // space and if so call a leaf runtime to add it to the remembered set.
1466
1467 // Pop arguments; result is popped in IP.
1471 __ LeaveStubFrame();
1472 __ ret();
1473}
1474
1475void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
1476 // For test purpose call allocation stub without inline allocation attempt.
1477 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1478 Label slow_case;
1479 __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
1481 __ Ret();
1482
1483 __ Bind(&slow_case);
1484 }
1487 GenerateSharedStub(/*save_fpu_registers=*/true, &kAllocateMintRuntimeEntry,
1489 /*allow_return=*/true,
1490 /*store_runtime_result_in_result_register=*/true);
1491}
1492
1493void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
1494 // For test purpose call allocation stub without inline allocation attempt.
1495 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1496 Label slow_case;
1497 __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
1499 __ Ret();
1500
1501 __ Bind(&slow_case);
1502 }
1505 GenerateSharedStub(
1506 /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
1508 /*allow_return=*/true,
1509 /*store_runtime_result_in_result_register=*/true);
1510}
1511
1512// Called when invoking Dart code from C++ (VM code).
1513// Input parameters:
1514// LR : points to return address.
1515// R0 : target code or entry point (in bare instructions mode).
1516// R1 : arguments descriptor array.
1517// R2 : arguments array.
1518// R3 : current thread.
1519void StubCodeCompiler::GenerateInvokeDartCodeStub() {
1520 __ Comment("InvokeDartCodeStub");
1521
1522 // Copy the C stack pointer (CSP/R31) into the stack pointer we'll actually
1523 // use to access the stack (SP/R15) and set the C stack pointer to near the
1524 // stack limit, loaded from the Thread held in R3, to prevent signal handlers
1525 // from over-writing Dart frames.
1526 __ mov(SP, CSP);
1527 __ SetupCSPFromThread(R3);
1528 __ EnterFrame(0);
1529
1530 // Push code object to PC marker slot.
1532 __ Push(TMP);
1533
1534#if defined(DART_TARGET_OS_FUCHSIA)
1536#elif defined(USING_SHADOW_CALL_STACK)
1537#error Unimplemented
1538#endif
1539
1540 __ PushNativeCalleeSavedRegisters();
1541
1542 // Set up THR, which caches the current thread in Dart code.
1543 if (THR != R3) {
1544 __ mov(THR, R3);
1545 }
1546
1547 // Refresh pinned registers (write barrier mask, null, dispatch table, etc).
1548 __ RestorePinnedRegisters();
1549
1550 // Save the current VMTag on the stack.
1551 __ LoadFromOffset(R4, THR, target::Thread::vm_tag_offset());
1552 __ Push(R4);
1553
1554 // Save top resource and top exit frame info. Use R6 as a temporary register.
1555 // StackFrameIterator reads the top exit frame info saved in this frame.
1556 __ LoadFromOffset(R6, THR, target::Thread::top_resource_offset());
1557 __ StoreToOffset(ZR, THR, target::Thread::top_resource_offset());
1558 __ Push(R6);
1559
1561 __ Push(R6);
1563
1566 // target::frame_layout.exit_link_slot_from_entry_fp must be kept in sync
1567 // with the code below.
1568#if defined(DART_TARGET_OS_FUCHSIA)
1569 ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -24);
1570#else
1571 ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -23);
1572#endif
1573 __ Push(R6);
1574 // In debug mode, verify that we've pushed the top exit frame info at the
1575 // correct offset from FP.
1576 __ EmitEntryFrameVerification();
1577
1578 // Mark that the thread is executing Dart code. Do this after initializing the
1579 // exit link for the profiler.
1580 __ LoadImmediate(R6, VMTag::kDartTagId);
1581 __ StoreToOffset(R6, THR, target::Thread::vm_tag_offset());
1582
1583 // Load arguments descriptor array into R4, which is passed to Dart code.
1584 __ mov(R4, R1);
1585
1586 // Load number of arguments into R5 and adjust count for type arguments.
1587 __ LoadCompressedSmiFieldFromOffset(
1589 __ LoadCompressedSmiFieldFromOffset(
1591 __ SmiUntag(R5);
1592 // Include the type arguments.
1593 __ cmp(R3, Operand(0), kObjectBytes);
1594 __ csinc(R5, R5, R5, EQ); // R5 <- (R3 == 0) ? R5 : R5 + 1
1595
1596 // Compute address of 'arguments array' data area into R2.
1597 __ AddImmediate(R2, R2, target::Array::data_offset() - kHeapObjectTag);
1598
1599 // Set up arguments for the Dart call.
1600 Label push_arguments;
1601 Label done_push_arguments;
1602 __ cmp(R5, Operand(0));
1603 __ b(&done_push_arguments, EQ); // check if there are arguments.
1604 __ LoadImmediate(R1, 0);
1605 __ Bind(&push_arguments);
1606 __ LoadCompressed(R3, Address(R2));
1607 __ Push(R3);
1608 __ add(R1, R1, Operand(1));
1609 __ add(R2, R2, Operand(target::kCompressedWordSize));
1610 __ cmp(R1, Operand(R5));
1611 __ b(&push_arguments, LT);
1612 __ Bind(&done_push_arguments);
1613
1614 if (FLAG_precompiled_mode) {
1615 __ SetupGlobalPoolAndDispatchTable();
1616 __ mov(CODE_REG, ZR); // GC-safe value into CODE_REG.
1617 } else {
1618 // We now load the pool pointer(PP) with a GC safe value as we are about to
1619 // invoke dart code. We don't need a real object pool here.
1620 // Smi zero does not work because ARM64 assumes PP to be untagged.
1621 __ LoadObject(PP, NullObject());
1622 __ mov(CODE_REG, R0);
1623 __ ldr(R0, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
1624 }
1625
1626 // Call the Dart code entrypoint.
1627 __ blr(R0); // R4 is the arguments descriptor array.
1628 __ Comment("InvokeDartCodeStub return");
1629
1630 // Get rid of arguments pushed on the stack.
1631 __ AddImmediate(
1632 SP, FP,
1633 target::frame_layout.exit_link_slot_from_entry_fp * target::kWordSize);
1634
1635 // Restore the saved top exit frame info and top resource back into the
1636 // Isolate structure. Uses R6 as a temporary register for this.
1637 __ Pop(R6);
1639 __ Pop(R6);
1641 __ Pop(R6);
1642 __ StoreToOffset(R6, THR, target::Thread::top_resource_offset());
1643
1644 // Restore the current VMTag from the stack.
1645 __ Pop(R4);
1646 __ StoreToOffset(R4, THR, target::Thread::vm_tag_offset());
1647
1648 __ PopNativeCalleeSavedRegisters();
1649
1650 // Restore the frame pointer and C stack pointer and return.
1651 __ LeaveFrame();
1652 __ RestoreCSP();
1653 __ ret();
1654}
1655
1656// Helper to generate space allocation of context stub.
1657// This does not initialise the fields of the context.
1658// Input:
1659// R1: number of context variables.
1660// Output:
1661// R0: new allocated Context object.
1662// Clobbered:
1663// R2, R3, R4, TMP
1664static void GenerateAllocateContextSpaceStub(Assembler* assembler,
1665 Label* slow_case) {
1666 // First compute the rounded instance size.
1667 // R1: number of context variables.
1668 intptr_t fixed_size_plus_alignment_padding =
1671 __ LoadImmediate(R2, fixed_size_plus_alignment_padding);
1672 __ add(R2, R2, Operand(R1, LSL, kCompressedWordSizeLog2));
1673 ASSERT(kSmiTagShift == 1);
1674 __ andi(R2, R2, Immediate(~(target::ObjectAlignment::kObjectAlignment - 1)));
1675
1676 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, slow_case, R4));
1677 // Now allocate the object.
1678 // R1: number of context variables.
1679 // R2: object size.
1680 __ ldr(R0, Address(THR, target::Thread::top_offset()));
1681 __ add(R3, R2, Operand(R0));
1682 // Check if the allocation fits into the remaining space.
1683 // R0: potential new object.
1684 // R1: number of context variables.
1685 // R2: object size.
1686 // R3: potential next object start.
1687 __ ldr(TMP, Address(THR, target::Thread::end_offset()));
1688 __ CompareRegisters(R3, TMP);
1689 __ b(slow_case, CS); // Branch if unsigned higher or equal.
1690 __ CheckAllocationCanary(R0);
1691
1692 // Successfully allocated the object, now update top to point to
1693 // next object start and initialize the object.
1694 // R0: new object.
1695 // R1: number of context variables.
1696 // R2: object size.
1697 // R3: next object start.
1698 __ str(R3, Address(THR, target::Thread::top_offset()));
1699 __ add(R0, R0, Operand(kHeapObjectTag));
1700
1701 // Calculate the size tag.
1702 // R0: new object.
1703 // R1: number of context variables.
1704 // R2: object size.
1705 const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
1708 // If no size tag overflow, shift R2 left, else set R2 to zero.
1709 __ LslImmediate(TMP, R2, shift);
1710 __ csel(R2, TMP, R2, LS);
1711 __ csel(R2, ZR, R2, HI);
1712
1713 // Get the class index and insert it into the tags.
1714 // R2: size and bit tags.
1715 const uword tags =
1716 target::MakeTagWordForNewSpaceObject(kContextCid, /*instance_size=*/0);
1717
1718 __ LoadImmediate(TMP, tags);
1719 __ orr(R2, R2, Operand(TMP));
1720 __ StoreFieldToOffset(R2, R0, target::Object::tags_offset());
1721
1722 // Setup up number of context variables field.
1723 // R0: new object.
1724 // R1: number of context variables as integer value (not object).
1725 __ StoreFieldToOffset(R1, R0, target::Context::num_variables_offset(),
1726 kFourBytes);
1727}
1728
1729// Called for inline allocation of contexts.
1730// Input:
1731// R1: number of context variables.
1732// Output:
1733// R0: new allocated Context object.
1734// Clobbered:
1735// R2, R3, R4, TMP
1736void StubCodeCompiler::GenerateAllocateContextStub() {
1737 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1738 Label slow_case;
1739
1740 GenerateAllocateContextSpaceStub(assembler, &slow_case);
1741
1742 // Setup the parent field.
1743 // R0: new object.
1744 // R1: number of context variables.
1745 __ StoreCompressedIntoObjectOffset(R0, target::Context::parent_offset(),
1746 NULL_REG);
1747
1748 // Initialize the context variables.
1749 // R0: new object.
1750 // R1: number of context variables.
1751 __ AddImmediate(R3, R0,
1753#if defined(DART_COMPRESSED_POINTERS)
1754 const Register kWordOfNulls = TMP;
1755 __ andi(kWordOfNulls, NULL_REG, Immediate(0xFFFFFFFF));
1756 __ orr(kWordOfNulls, kWordOfNulls, Operand(kWordOfNulls, LSL, 32));
1757#else
1758 const Register kWordOfNulls = NULL_REG;
1759#endif
1760 Label loop;
1761 __ Bind(&loop);
1763 __ stp(kWordOfNulls, kWordOfNulls,
1765 // Safe to only check every kObjectAlignment bytes instead of each word.
1767 __ subs(R1, R1,
1769 __ b(&loop, HI);
1770#if defined(DEBUG)
1771 __ ldr(TMP2, Address(THR, target::Thread::top_offset()));
1772 __ WriteAllocationCanary(TMP2); // Fix overshoot.
1773#endif
1774
1775 // Done allocating and initializing the context.
1776 // R0: new object.
1777 __ ret();
1778
1779 __ Bind(&slow_case);
1780 }
1781 // Create a stub frame as we are pushing some objects on the stack before
1782 // calling into the runtime.
1783 __ EnterStubFrame();
1784 // Setup space on stack for return value.
1785 __ SmiTag(R1);
1786 __ PushObject(NullObject());
1787 __ Push(R1);
1788 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context.
1789 __ Drop(1); // Pop number of context variables argument.
1790 __ Pop(R0); // Pop the new context object.
1791
1792 // Write-barrier elimination might be enabled for this context (depending on
1793 // the size). To be sure we will check if the allocated object is in old
1794 // space and if so call a leaf runtime to add it to the remembered set.
1796
1797 // R0: new object
1798 // Restore the frame pointer.
1799 __ LeaveStubFrame();
1800
1801 __ ret();
1802}
1803
1804// Called for clone of contexts.
1805// Input:
1806// R5: context variable to clone.
1807// Output:
1808// R0: new allocated Context object.
1809// Clobbered:
1810// R1, (R2), R3, R4, (TMP)
1811void StubCodeCompiler::GenerateCloneContextStub() {
1812 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1813 Label slow_case;
1814
1815 // Load num. variable (int32) in the existing context.
1816 __ ldr(R1, FieldAddress(R5, target::Context::num_variables_offset()),
1817 kFourBytes);
1818
1819 GenerateAllocateContextSpaceStub(assembler, &slow_case);
1820
1821 // Load parent in the existing context.
1822 __ LoadCompressed(R3, FieldAddress(R5, target::Context::parent_offset()));
1823 // Setup the parent field.
1824 // R0: new context.
1825 __ StoreCompressedIntoObjectNoBarrier(
1826 R0, FieldAddress(R0, target::Context::parent_offset()), R3);
1827
1828 // Clone the context variables.
1829 // R0: new context.
1830 // R1: number of context variables.
1831 {
1832 Label loop, done;
1833 // R3: Variable array address, new context.
1834 __ AddImmediate(R3, R0,
1836 // R4: Variable array address, old context.
1837 __ AddImmediate(R4, R5,
1839
1840 __ Bind(&loop);
1841 __ subs(R1, R1, Operand(1));
1842 __ b(&done, MI);
1843
1844 __ ldr(R5, Address(R4, R1, UXTX, Address::Scaled), kObjectBytes);
1845 __ str(R5, Address(R3, R1, UXTX, Address::Scaled), kObjectBytes);
1846 __ b(&loop, NE); // Loop if R1 not zero.
1847
1848 __ Bind(&done);
1849 }
1850
1851 // Done allocating and initializing the context.
1852 // R0: new object.
1853 __ ret();
1854
1855 __ Bind(&slow_case);
1856 }
1857
1858 // Create a stub frame as we are pushing some objects on the stack before
1859 // calling into the runtime.
1860 __ EnterStubFrame();
1861 // Setup space on stack for return value.
1862 __ PushPair(R5, NULL_REG);
1863 __ CallRuntime(kCloneContextRuntimeEntry, 1); // Clone context.
1864 // Pop number of context variables argument.
1865 // Pop the new context object.
1866 __ PopPair(R1, R0);
1867
1868 // Write-barrier elimination might be enabled for this context (depending on
1869 // the size). To be sure we will check if the allocated object is in old
1870 // space and if so call a leaf runtime to add it to the remembered set.
1872
1873 // R0: new object
1874 // Restore the frame pointer.
1875 __ LeaveStubFrame();
1876 __ ret();
1877}
1878
1879void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
1880 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
1881 if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
1882
1883 Register reg = static_cast<Register>(i);
1884 intptr_t start = __ CodeSize();
1885 SPILLS_LR_TO_FRAME(__ Push(LR));
1887 __ mov(kWriteBarrierObjectReg, reg);
1890 RESTORES_LR_FROM_FRAME(__ Pop(LR));
1891 READS_RETURN_ADDRESS_FROM_LR(__ ret(LR));
1892 intptr_t end = __ CodeSize();
1893
1895 }
1896}
1897
1898// Helper stub to implement Assembler::StoreIntoObject/Array.
1899// Input parameters:
1900// R1: Object (old)
1901// R0: Value (old or new)
1902// R25: Slot
1903// If R0 is new, add R1 to the store buffer. Otherwise R0 is old, mark R0
1904// and add it to the mark list.
1908static void GenerateWriteBarrierStubHelper(Assembler* assembler, bool cards) {
1909 RegisterSet spill_set((1 << R2) | (1 << R3) | (1 << R4), 0);
1910
1911 Label skip_marking;
1912 __ ldr(TMP, FieldAddress(R0, target::Object::tags_offset()));
1914 __ and_(TMP, TMP, Operand(TMP2));
1916 __ b(&skip_marking, ZERO);
1917
1918 {
1919 // Atomically clear kNotMarkedBit.
1920 Label retry, is_new, done;
1921 __ PushRegisters(spill_set);
1923 // R3: Untagged address of header word (atomics do not support offsets).
1925 __ LoadImmediate(TMP, 1 << target::UntaggedObject::kNotMarkedBit);
1926 __ ldclr(TMP, TMP, R3);
1928 } else {
1929 __ Bind(&retry);
1930 __ ldxr(R2, R3, kEightBytes);
1932 __ AndImmediate(R2, R2, ~(1 << target::UntaggedObject::kNotMarkedBit));
1933 __ stxr(R4, R2, R3, kEightBytes);
1934 __ cbnz(&retry, R4);
1935 }
1936
1938
1939 auto mark_stack_push = [&](intptr_t offset, const RuntimeEntry& entry) {
1940 __ LoadFromOffset(R4, THR, offset);
1941 __ LoadFromOffset(R2, R4, target::MarkingStackBlock::top_offset(),
1943 __ add(R3, R4, Operand(R2, LSL, target::kWordSizeLog2));
1945 __ add(R2, R2, Operand(1));
1948 __ CompareImmediate(R2, target::MarkingStackBlock::kSize);
1949 __ b(&done, NE);
1950
1951 {
1952 LeafRuntimeScope rt(assembler,
1953 /*frame_size=*/0,
1954 /*preserve_registers=*/true);
1955 __ mov(R0, THR);
1956 rt.Call(entry, /*argument_count=*/1);
1957 }
1958 };
1959
1961 kOldMarkingStackBlockProcessRuntimeEntry);
1962 __ b(&done);
1963
1964 __ Bind(&is_new);
1966 kNewMarkingStackBlockProcessRuntimeEntry);
1967
1968 __ Bind(&done);
1969 __ clrex();
1970 __ PopRegisters(spill_set);
1971 }
1972
1973 Label add_to_remembered_set, remember_card;
1974 __ Bind(&skip_marking);
1975 __ ldr(TMP, FieldAddress(R1, target::Object::tags_offset()));
1976 __ ldr(TMP2, FieldAddress(R0, target::Object::tags_offset()));
1977 __ and_(TMP, TMP2,
1980 __ b(&add_to_remembered_set, NOT_ZERO);
1981 __ ret();
1982
1983 __ Bind(&add_to_remembered_set);
1984 if (cards) {
1985 __ LoadFieldFromOffset(TMP, R1, target::Object::tags_offset(), kFourBytes);
1986 __ tbnz(&remember_card, TMP, target::UntaggedObject::kCardRememberedBit);
1987 } else {
1988#if defined(DEBUG)
1989 Label ok;
1990 __ LoadFieldFromOffset(TMP, R1, target::Object::tags_offset(), kFourBytes);
1992 __ Stop("Wrong barrier");
1993 __ Bind(&ok);
1994#endif
1995 }
1996 {
1997 // Atomically clear kOldAndNotRememberedBit.
1998 Label retry, done;
1999 __ PushRegisters(spill_set);
2001 // R3: Untagged address of header word (atomics do not support offsets).
2003 __ LoadImmediate(TMP,
2005 __ ldclr(TMP, TMP, R3);
2007 } else {
2008 __ Bind(&retry);
2009 __ ldxr(R2, R3, kEightBytes);
2011 __ AndImmediate(R2, R2,
2013 __ stxr(R4, R2, R3, kEightBytes);
2014 __ cbnz(&retry, R4);
2015 }
2016
2017 // Load the StoreBuffer block out of the thread. Then load top_ out of the
2018 // StoreBufferBlock and add the address to the pointers_.
2020 __ LoadFromOffset(R2, R4, target::StoreBufferBlock::top_offset(),
2022 __ add(R3, R4, Operand(R2, LSL, target::kWordSizeLog2));
2024
2025 // Increment top_ and check for overflow.
2026 // R2: top_.
2027 // R4: StoreBufferBlock.
2028 __ add(R2, R2, Operand(1));
2029 __ StoreToOffset(R2, R4, target::StoreBufferBlock::top_offset(),
2031 __ CompareImmediate(R2, target::StoreBufferBlock::kSize);
2032 __ b(&done, NE);
2033
2034 {
2035 LeafRuntimeScope rt(assembler,
2036 /*frame_size=*/0,
2037 /*preserve_registers=*/true);
2038 __ mov(R0, THR);
2039 rt.Call(kStoreBufferBlockProcessRuntimeEntry, /*argument_count=*/1);
2040 }
2041
2042 __ Bind(&done);
2043 __ PopRegisters(spill_set);
2044 __ ret();
2045 }
2046 if (cards) {
2047 Label remember_card_slow, retry;
2048
2049 // Get card table.
2050 __ Bind(&remember_card);
2051 __ AndImmediate(TMP, R1, target::kPageMask); // Page.
2052 __ ldr(TMP2,
2053 Address(TMP, target::Page::card_table_offset())); // Card table.
2054 __ cbz(&remember_card_slow, TMP2);
2055
2056 // Atomically dirty the card.
2057 __ sub(R25, R25, Operand(TMP)); // Offset in page.
2058 __ LsrImmediate(R25, R25, target::Page::kBytesPerCardLog2); // Index.
2059 __ LoadImmediate(TMP, 1);
2060 __ lslv(TMP, TMP, R25); // Bit mask. (Shift amount is mod 64.)
2061 __ LsrImmediate(R25, R25, target::kBitsPerWordLog2); // Word index.
2062 __ add(TMP2, TMP2, Operand(R25, LSL, target::kWordSizeLog2)); // Word addr.
2063
2065 __ ldset(TMP, ZR, TMP2);
2066 } else {
2067 __ PushRegister(R0);
2068 __ Bind(&retry);
2069 __ ldxr(R25, TMP2);
2070 __ orr(R25, R25, Operand(TMP));
2071 __ stxr(R0, R25, TMP2);
2072 __ cbnz(&retry, R0);
2073 __ PopRegister(R0);
2074 }
2075 __ ret();
2076
2077 // Card table not yet allocated.
2078 __ Bind(&remember_card_slow);
2079 {
2080 LeafRuntimeScope rt(assembler,
2081 /*frame_size=*/0,
2082 /*preserve_registers=*/true);
2083 __ mov(R0, R1); // Arg0 = Object
2084 __ mov(R1, R25); // Arg1 = Slot
2085 rt.Call(kRememberCardRuntimeEntry, /*argument_count=*/2);
2086 }
2087 __ ret();
2088 }
2089}
2090
2091void StubCodeCompiler::GenerateWriteBarrierStub() {
2092 GenerateWriteBarrierStubHelper(assembler, false);
2093}
2094
2095void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
2096 GenerateWriteBarrierStubHelper(assembler, true);
2097}
2098
2099static void GenerateAllocateObjectHelper(Assembler* assembler,
2100 bool is_cls_parameterized) {
2101 const Register kTagsReg = AllocateObjectABI::kTagsReg;
2102
2103 {
2104 Label slow_case;
2105
2106#if !defined(PRODUCT)
2107 {
2108 const Register kTraceAllocationTempReg = R8;
2109 const Register kCidRegister = R9;
2110 __ ExtractClassIdFromTags(kCidRegister, AllocateObjectABI::kTagsReg);
2111 __ MaybeTraceAllocation(kCidRegister, &slow_case,
2112 kTraceAllocationTempReg);
2113 }
2114#endif
2115
2116 const Register kNewTopReg = R3;
2117
2118 // Bump allocation.
2119 {
2120 const Register kInstanceSizeReg = R4;
2121 const Register kEndReg = R5;
2122
2123 __ ExtractInstanceSizeFromTags(kInstanceSizeReg, kTagsReg);
2124
2125 // Load two words from Thread::top: top and end.
2126 // AllocateObjectABI::kResultReg: potential next object start.
2127 __ ldp(AllocateObjectABI::kResultReg, kEndReg,
2129
2130 __ add(kNewTopReg, AllocateObjectABI::kResultReg,
2131 Operand(kInstanceSizeReg));
2132
2133 __ CompareRegisters(kEndReg, kNewTopReg);
2134 __ b(&slow_case, UNSIGNED_LESS_EQUAL);
2135
2136 // Successfully allocated the object, now update top to point to
2137 // next object start and store the class in the class field of object.
2138 __ str(kNewTopReg, Address(THR, target::Thread::top_offset()));
2139 } // kInstanceSizeReg = R4, kEndReg = R5
2140
2141 // Tags.
2142 __ str(kTagsReg, Address(AllocateObjectABI::kResultReg,
2144
2145 // Initialize the remaining words of the object.
2146 {
2147 const Register kFieldReg = R4;
2148 __ AddImmediate(kFieldReg, AllocateObjectABI::kResultReg,
2150#if defined(DART_COMPRESSED_POINTERS)
2151 const Register kWordOfNulls = TMP;
2152 __ andi(kWordOfNulls, NULL_REG, Immediate(0xFFFFFFFF));
2153 __ orr(kWordOfNulls, kWordOfNulls, Operand(kWordOfNulls, LSL, 32));
2154#else
2155 const Register kWordOfNulls = NULL_REG;
2156#endif
2157 Label loop;
2158 __ Bind(&loop);
2160 __ stp(kWordOfNulls, kWordOfNulls,
2161 Address(kFieldReg, 2 * target::kWordSize, Address::PairPostIndex));
2162 // Safe to only check every kObjectAlignment bytes instead of each word.
2164 __ CompareRegisters(kFieldReg, kNewTopReg);
2165 __ b(&loop, UNSIGNED_LESS);
2166 __ WriteAllocationCanary(kNewTopReg); // Fix overshoot.
2167 } // kFieldReg = R4
2168
2169 __ AddImmediate(AllocateObjectABI::kResultReg,
2171
2172 if (is_cls_parameterized) {
2173 Label not_parameterized_case;
2174
2175 const Register kClsIdReg = R4;
2176 const Register kTypeOffsetReg = R5;
2177
2178 __ ExtractClassIdFromTags(kClsIdReg, kTagsReg);
2179
2180 // Load class' type_arguments_field offset in words.
2181 __ LoadClassById(kTypeOffsetReg, kClsIdReg);
2182 __ ldr(
2183 kTypeOffsetReg,
2184 FieldAddress(kTypeOffsetReg,
2185 target::Class::
2186 host_type_arguments_field_offset_in_words_offset()),
2187 kFourBytes);
2188
2189 // Set the type arguments in the new object.
2190 __ add(kTypeOffsetReg, AllocateObjectABI::kResultReg,
2191 Operand(kTypeOffsetReg, LSL, target::kCompressedWordSizeLog2));
2192 __ StoreCompressedIntoObjectNoBarrier(
2193 AllocateObjectABI::kResultReg, FieldAddress(kTypeOffsetReg, 0),
2195
2196 __ Bind(&not_parameterized_case);
2197 } // kClsIdReg = R4, kTypeOffsetReg = R5
2198
2199 __ ret();
2200
2201 __ Bind(&slow_case);
2202 } // kNewTopReg = R3
2203
2204 // Fall back on slow case:
2205 if (!is_cls_parameterized) {
2207 }
2208 // Tail call to generic allocation stub.
2209 __ ldr(
2210 R3,
2212 __ br(R3);
2213}
2214
2215// Called for inline allocation of objects (any class).
2216void StubCodeCompiler::GenerateAllocateObjectStub() {
2217 GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
2218}
2219
2220void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
2221 GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
2222}
2223
2224void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
2225 if (!FLAG_precompiled_mode) {
2226 __ ldr(CODE_REG,
2228 }
2229
2230 __ ExtractClassIdFromTags(AllocateObjectABI::kTagsReg,
2232
2233 // Create a stub frame as we are pushing some objects on the stack before
2234 // calling into the runtime.
2235 __ EnterStubFrame();
2236
2237 __ LoadClassById(R0, AllocateObjectABI::kTagsReg);
2238 __ PushPair(R0, NULL_REG); // Pushes result slot, then class object.
2239
2240 // Should be Object::null() if class is non-parameterized.
2242
2243 __ CallRuntime(kAllocateObjectRuntimeEntry, 2);
2244
2245 // Load result off the stack into result register.
2247
2248 // Write-barrier elimination is enabled for [cls] and we therefore need to
2249 // ensure that the object is in new-space or has remembered bit set.
2251
2252 __ LeaveStubFrame();
2253
2254 __ ret();
2255}
2256
2257// Called for inline allocation of objects.
2259 UnresolvedPcRelativeCalls* unresolved_calls,
2260 const Class& cls,
2261 const Code& allocate_object,
2262 const Code& allocat_object_parametrized) {
2263 classid_t cls_id = target::Class::GetId(cls);
2264 ASSERT(cls_id != kIllegalCid);
2265
2266 // The generated code is different if the class is parameterized.
2267 const bool is_cls_parameterized = target::Class::NumTypeArguments(cls) > 0;
2268 ASSERT(!is_cls_parameterized || target::Class::TypeArgumentsFieldOffset(
2270
2271 const intptr_t instance_size = target::Class::GetInstanceSize(cls);
2272 ASSERT(instance_size > 0);
2273
2274 const uword tags =
2275 target::MakeTagWordForNewSpaceObject(cls_id, instance_size);
2276
2277 // Note: Keep in sync with helper function.
2278 const Register kTagsReg = AllocateObjectABI::kTagsReg;
2279
2280 __ LoadImmediate(kTagsReg, tags);
2281
2282 if (!FLAG_use_slow_path && FLAG_inline_alloc &&
2284 target::SizeFitsInSizeTag(instance_size)) {
2287
2288 if (is_cls_parameterized) {
2289 if (!IsSameObject(NullObject(),
2290 CastHandle<Object>(allocat_object_parametrized))) {
2291 __ GenerateUnRelocatedPcRelativeTailCall();
2292 unresolved_calls->Add(new UnresolvedPcRelativeCall(
2293 __ CodeSize(), allocat_object_parametrized, /*is_tail_call=*/true));
2294 } else {
2295 __ ldr(R4,
2296 Address(THR,
2297 target::Thread::
2298 allocate_object_parameterized_entry_point_offset()));
2299 __ br(R4);
2300 }
2301 } else {
2302 if (!IsSameObject(NullObject(), CastHandle<Object>(allocate_object))) {
2303 __ GenerateUnRelocatedPcRelativeTailCall();
2304 unresolved_calls->Add(new UnresolvedPcRelativeCall(
2305 __ CodeSize(), allocate_object, /*is_tail_call=*/true));
2306 } else {
2307 __ ldr(
2308 R4,
2310 __ br(R4);
2311 }
2312 }
2313 } else {
2314 if (!is_cls_parameterized) {
2316 }
2317 __ ldr(R4,
2318 Address(THR,
2320 __ br(R4);
2321 }
2322}
2323
2324// Called for invoking "dynamic noSuchMethod(Invocation invocation)" function
2325// from the entry code of a dart function after an error in passed argument
2326// name or number is detected.
2327// Input parameters:
2328// LR : return address.
2329// SP : address of last argument.
2330// R4: arguments descriptor array.
2331void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
2332 __ EnterStubFrame();
2333
2334 // Load the receiver.
2335 __ LoadCompressedSmiFieldFromOffset(
2337 __ add(TMP, FP, Operand(R2, LSL, target::kWordSizeLog2 - 1));
2338 __ LoadFromOffset(R6, TMP,
2339 target::frame_layout.param_end_from_fp * target::kWordSize);
2340
2341 // Load the function.
2342 __ LoadCompressedFieldFromOffset(TMP, R6, target::Closure::function_offset());
2343
2344 __ Push(ZR); // Result slot.
2345 __ Push(R6); // Receiver.
2346 __ Push(TMP); // Function
2347 __ Push(R4); // Arguments descriptor.
2348
2349 // Adjust arguments count.
2350 __ LoadCompressedSmiFieldFromOffset(
2352 __ AddImmediate(TMP, R2, 1, kObjectBytes); // Include the type arguments.
2353 __ cmp(R3, Operand(0), kObjectBytes);
2354 // R2 <- (R3 == 0) ? R2 : TMP + 1 (R2 : R2 + 2).
2355 __ csinc(R2, R2, TMP, EQ, kObjectBytes);
2356
2357 // R2: Smi-tagged arguments array length.
2358 PushArrayOfArguments(assembler);
2359
2360 const intptr_t kNumArgs = 4;
2361 __ CallRuntime(kNoSuchMethodFromPrologueRuntimeEntry, kNumArgs);
2362 // noSuchMethod on closures always throws an error, so it will never return.
2363 __ brk(0);
2364}
2365
2366// R6: function object.
2367// R5: inline cache data object.
2368// Cannot use function object from ICData as it may be the inlined
2369// function and not the top-scope function.
2371 Register ic_reg = R5;
2372 Register func_reg = R6;
2373 if (FLAG_precompiled_mode) {
2374 __ Breakpoint();
2375 return;
2376 }
2377 if (FLAG_trace_optimized_ic_calls) {
2378 __ EnterStubFrame();
2379 __ Push(R6); // Preserve.
2380 __ Push(R5); // Preserve.
2381 __ Push(ic_reg); // Argument.
2382 __ Push(func_reg); // Argument.
2383 __ CallRuntime(kTraceICCallRuntimeEntry, 2);
2384 __ Drop(2); // Discard argument;
2385 __ Pop(R5); // Restore.
2386 __ Pop(R6); // Restore.
2387 __ LeaveStubFrame();
2388 }
2389 __ LoadFieldFromOffset(R7, func_reg, target::Function::usage_counter_offset(),
2390 kFourBytes);
2391 __ add(R7, R7, Operand(1));
2392 __ StoreFieldToOffset(R7, func_reg, target::Function::usage_counter_offset(),
2393 kFourBytes);
2394}
2395
2396// Loads function into 'temp_reg'.
2398 if (FLAG_precompiled_mode) {
2399 __ Breakpoint();
2400 return;
2401 }
2402 if (FLAG_optimization_counter_threshold >= 0) {
2403 Register func_reg = temp_reg;
2404 ASSERT(temp_reg == R6);
2405 __ Comment("Increment function counter");
2406 __ LoadFieldFromOffset(func_reg, IC_DATA_REG,
2408 __ LoadFieldFromOffset(
2410 __ AddImmediate(R7, 1);
2411 __ StoreFieldToOffset(R7, func_reg,
2413 }
2414}
2415
2416// Note: R5 must be preserved.
2417// Attempt a quick Smi operation for known operations ('kind'). The ICData
2418// must have been primed with a Smi/Smi check that will be used for counting
2419// the invocations.
2420static void EmitFastSmiOp(Assembler* assembler,
2421 Token::Kind kind,
2422 intptr_t num_args,
2423 Label* not_smi_or_overflow) {
2424 __ Comment("Fast Smi op");
2425 __ ldr(R0, Address(SP, +1 * target::kWordSize)); // Left.
2426 __ ldr(R1, Address(SP, +0 * target::kWordSize)); // Right.
2427 __ orr(TMP, R0, Operand(R1));
2428 __ BranchIfNotSmi(TMP, not_smi_or_overflow);
2429 switch (kind) {
2430 case Token::kADD: {
2431 __ adds(R0, R1, Operand(R0), kObjectBytes); // Add.
2432 __ b(not_smi_or_overflow, VS); // Branch if overflow.
2433 break;
2434 }
2435 case Token::kLT: {
2436 __ CompareObjectRegisters(R0, R1);
2437 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
2438 __ LoadObject(R1, CastHandle<Object>(FalseObject()));
2439 __ csel(R0, R0, R1, LT);
2440 break;
2441 }
2442 case Token::kEQ: {
2443 __ CompareObjectRegisters(R0, R1);
2444 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
2445 __ LoadObject(R1, CastHandle<Object>(FalseObject()));
2446 __ csel(R0, R0, R1, EQ);
2447 break;
2448 }
2449 default:
2450 UNIMPLEMENTED();
2451 }
2452
2453 // R5: IC data object (preserved).
2454 __ LoadFieldFromOffset(R6, R5, target::ICData::entries_offset());
2455 // R6: ic_data_array with check entries: classes and target functions.
2457// R6: points directly to the first ic data array element.
2458#if defined(DEBUG)
2459 // Check that first entry is for Smi/Smi.
2460 Label error, ok;
2461 const intptr_t imm_smi_cid = target::ToRawSmi(kSmiCid);
2462 __ LoadCompressedSmiFromOffset(R1, R6, 0);
2463 __ CompareImmediate(R1, imm_smi_cid, kObjectBytes);
2464 __ b(&error, NE);
2465 __ LoadCompressedSmiFromOffset(R1, R6, target::kCompressedWordSize);
2466 __ CompareImmediate(R1, imm_smi_cid, kObjectBytes);
2467 __ b(&ok, EQ);
2468 __ Bind(&error);
2469 __ Stop("Incorrect IC data");
2470 __ Bind(&ok);
2471#endif
2472 if (FLAG_optimization_counter_threshold >= 0) {
2473 const intptr_t count_offset =
2475 // Update counter, ignore overflow.
2476 __ LoadCompressedSmiFromOffset(R1, R6, count_offset);
2477 __ adds(R1, R1, Operand(target::ToRawSmi(1)), kObjectBytes);
2478 __ StoreToOffset(R1, R6, count_offset, kObjectBytes);
2479 }
2480
2481 __ ret();
2482}
2483
2484// Saves the offset of the target entry-point (from the Function) into R8.
2485//
2486// Must be the first code generated, since any code before will be skipped in
2487// the unchecked entry-point.
2488static void GenerateRecordEntryPoint(Assembler* assembler) {
2489 Label done;
2491 __ b(&done);
2492 __ BindUncheckedEntryPoint();
2493 __ LoadImmediate(
2496 __ Bind(&done);
2497}
2498
2499// Generate inline cache check for 'num_args'.
2500// R0: receiver (if instance call)
2501// R5: ICData
2502// LR: return address
2503// Control flow:
2504// - If receiver is null -> jump to IC miss.
2505// - If receiver is Smi -> load Smi class.
2506// - If receiver is not-Smi -> load receiver's class.
2507// - Check if 'num_args' (including receiver) match any IC data group.
2508// - Match found -> jump to target.
2509// - Match not found -> jump to IC miss.
2511 intptr_t num_args,
2512 const RuntimeEntry& handle_ic_miss,
2513 Token::Kind kind,
2514 Optimized optimized,
2515 CallType type,
2516 Exactness exactness) {
2517 const bool save_entry_point = kind == Token::kILLEGAL;
2518 if (FLAG_precompiled_mode) {
2519 __ Breakpoint();
2520 return;
2521 }
2522
2523 if (save_entry_point) {
2524 GenerateRecordEntryPoint(assembler);
2525 }
2526
2527 if (optimized == kOptimized) {
2529 } else {
2530 GenerateUsageCounterIncrement(/*scratch=*/R6);
2531 }
2532
2533 ASSERT(num_args == 1 || num_args == 2);
2534#if defined(DEBUG)
2535 {
2536 Label ok;
2537 // Check that the IC data array has NumArgsTested() == num_args.
2538 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
2539 __ LoadFromOffset(R6, R5,
2542 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
2543 __ andi(R6, R6, Immediate(target::ICData::NumArgsTestedMask()));
2544 __ CompareImmediate(R6, num_args);
2545 __ b(&ok, EQ);
2546 __ Stop("Incorrect stub for IC data");
2547 __ Bind(&ok);
2548 }
2549#endif // DEBUG
2550
2551#if !defined(PRODUCT)
2552 Label stepping, done_stepping;
2553 if (optimized == kUnoptimized) {
2554 __ Comment("Check single stepping");
2555 __ LoadIsolate(R6);
2556 __ LoadFromOffset(R6, R6, target::Isolate::single_step_offset(),
2558 __ CompareRegisters(R6, ZR);
2559 __ b(&stepping, NE);
2560 __ Bind(&done_stepping);
2561 }
2562#endif
2563
2564 Label not_smi_or_overflow;
2565 if (kind != Token::kILLEGAL) {
2566 EmitFastSmiOp(assembler, kind, num_args, &not_smi_or_overflow);
2567 }
2568 __ Bind(&not_smi_or_overflow);
2569
2570 __ Comment("Extract ICData initial values and receiver cid");
2571 // R5: IC data object (preserved).
2572 __ LoadFieldFromOffset(R6, R5, target::ICData::entries_offset());
2573 // R6: ic_data_array with check entries: classes and target functions.
2575 // R6: points directly to the first ic data array element.
2576
2577 if (type == kInstanceCall) {
2578 __ LoadTaggedClassIdMayBeSmi(R3, R0);
2579 __ LoadFieldFromOffset(ARGS_DESC_REG, R5,
2581 if (num_args == 2) {
2582 __ LoadCompressedSmiFieldFromOffset(
2584 __ SmiUntag(R7); // Untag so we can use the LSL 3 addressing mode.
2585 __ sub(R7, R7, Operand(2));
2586 // R1 <- [SP + (R1 << 3)]
2587 __ ldr(R1, Address(SP, R7, UXTX, Address::Scaled));
2588 __ LoadTaggedClassIdMayBeSmi(R1, R1);
2589 }
2590 } else {
2591 __ LoadFieldFromOffset(ARGS_DESC_REG, R5,
2593 // Get the receiver's class ID (first read number of arguments from
2594 // arguments descriptor array and then access the receiver from the stack).
2595 __ LoadCompressedSmiFieldFromOffset(
2597 __ SmiUntag(R7); // Untag so we can use the LSL 3 addressing mode.
2598 __ sub(R7, R7, Operand(1));
2599 // R0 <- [SP + (R7 << 3)]
2600 __ ldr(R0, Address(SP, R7, UXTX, Address::Scaled));
2601 __ LoadTaggedClassIdMayBeSmi(R3, R0);
2602 if (num_args == 2) {
2603 __ AddImmediate(R1, R7, -1);
2604 // R1 <- [SP + (R1 << 3)]
2605 __ ldr(R1, Address(SP, R1, UXTX, Address::Scaled));
2606 __ LoadTaggedClassIdMayBeSmi(R1, R1);
2607 }
2608 }
2609 // R3: first argument class ID as Smi.
2610 // R1: second argument class ID as Smi.
2611 // R4: args descriptor
2612
2613 // We unroll the generic one that is generated once more than the others.
2614 const bool optimize = kind == Token::kILLEGAL;
2615
2616 // Loop that checks if there is an IC data match.
2617 Label loop, found, miss;
2618 __ Comment("ICData loop");
2619
2620 __ Bind(&loop);
2621 for (int unroll = optimize ? 4 : 2; unroll >= 0; unroll--) {
2622 Label update;
2623
2624 __ LoadCompressedSmiFromOffset(R2, R6, 0);
2625 __ CompareObjectRegisters(R3, R2); // Class id match?
2626 if (num_args == 2) {
2627 __ b(&update, NE); // Continue.
2628 __ LoadCompressedSmiFromOffset(R2, R6, target::kCompressedWordSize);
2629 __ CompareObjectRegisters(R1, R2); // Class id match?
2630 }
2631 __ b(&found, EQ); // Break.
2632
2633 __ Bind(&update);
2634
2635 const intptr_t entry_size = target::ICData::TestEntryLengthFor(
2636 num_args, exactness == kCheckExactness) *
2638 __ AddImmediate(R6, entry_size); // Next entry.
2639
2640 __ CompareImmediate(R2, target::ToRawSmi(kIllegalCid)); // Done?
2641 if (unroll == 0) {
2642 __ b(&loop, NE);
2643 } else {
2644 __ b(&miss, EQ);
2645 }
2646 }
2647
2648 __ Bind(&miss);
2649 __ Comment("IC miss");
2650
2651 // Compute address of arguments.
2652 __ LoadCompressedSmiFieldFromOffset(
2654 __ SmiUntag(R7); // Untag so we can use the LSL 3 addressing mode.
2655 __ sub(R7, R7, Operand(1));
2656 // R7: argument_count - 1 (untagged).
2657 // R7 <- SP + (R7 << 3)
2658 __ add(R7, SP, Operand(R7, UXTX, 3)); // R7 is Untagged.
2659 // R7: address of receiver.
2660 // Create a stub frame as we are pushing some objects on the stack before
2661 // calling into the runtime.
2662 __ EnterStubFrame();
2663 // Preserve IC data object and arguments descriptor array and
2664 // setup space on stack for result (target code object).
2665 __ Push(ARGS_DESC_REG); // Preserve arguments descriptor array.
2666 __ Push(R5); // Preserve IC Data.
2667 if (save_entry_point) {
2668 __ SmiTag(R8);
2669 __ Push(R8);
2670 }
2671 // Setup space on stack for the result (target code object).
2672 __ Push(ZR);
2673 // Push call arguments.
2674 for (intptr_t i = 0; i < num_args; i++) {
2675 __ LoadFromOffset(TMP, R7, -i * target::kWordSize);
2676 __ Push(TMP);
2677 }
2678 // Pass IC data object.
2679 __ Push(R5);
2680 __ CallRuntime(handle_ic_miss, num_args + 1);
2681 // Remove the call arguments pushed earlier, including the IC data object.
2682 __ Drop(num_args + 1);
2683 // Pop returned function object into R0.
2684 // Restore arguments descriptor array and IC data array.
2685 __ Pop(FUNCTION_REG); // Pop returned function object into R0.
2686 if (save_entry_point) {
2687 __ Pop(R8);
2688 __ SmiUntag(R8);
2689 }
2690 __ Pop(R5); // Restore IC Data.
2691 __ Pop(ARGS_DESC_REG); // Restore arguments descriptor array.
2692 __ RestoreCodePointer();
2693 __ LeaveStubFrame();
2694 Label call_target_function;
2695 if (FLAG_precompiled_mode) {
2696 GenerateDispatcherCode(assembler, &call_target_function);
2697 } else {
2698 __ b(&call_target_function);
2699 }
2700
2701 __ Bind(&found);
2702 // R6: pointer to an IC data check group.
2703 const intptr_t target_offset =
2705 const intptr_t count_offset =
2707 const intptr_t exactness_offset =
2709
2710 Label call_target_function_through_unchecked_entry;
2711 if (exactness == kCheckExactness) {
2712 Label exactness_ok;
2713 ASSERT(num_args == 1);
2714 __ LoadCompressedSmi(R1, Address(R6, exactness_offset));
2715 __ CompareImmediate(
2716 R1,
2719 kObjectBytes);
2720 __ BranchIf(LESS, &exactness_ok);
2721 __ BranchIf(EQUAL, &call_target_function_through_unchecked_entry);
2722
2723 // Check trivial exactness.
2724 // Note: UntaggedICData::receivers_static_type_ is guaranteed to be not null
2725 // because we only emit calls to this stub when it is not null.
2726 __ LoadCompressed(
2728 __ LoadCompressed(R2, FieldAddress(R2, target::Type::arguments_offset()));
2729 // R1 contains an offset to type arguments in words as a smi,
2730 // hence TIMES_4. R0 is guaranteed to be non-smi because it is expected
2731 // to have type arguments.
2732#if defined(DART_COMPRESSED_POINTERS)
2733 __ sxtw(R1, R1);
2734#endif
2735 __ LoadIndexedPayload(R3, R0, 0, R1, TIMES_COMPRESSED_HALF_WORD_SIZE,
2736 kObjectBytes);
2737 __ CompareObjectRegisters(R2, R3);
2738 __ BranchIf(EQUAL, &call_target_function_through_unchecked_entry);
2739
2740 // Update exactness state (not-exact anymore).
2741 __ LoadImmediate(
2743 __ StoreToOffset(R1, R6, exactness_offset, kObjectBytes);
2744 __ Bind(&exactness_ok);
2745 }
2746 __ LoadCompressedFromOffset(FUNCTION_REG, R6, target_offset);
2747
2748 if (FLAG_optimization_counter_threshold >= 0) {
2749 __ Comment("Update caller's counter");
2750 __ LoadCompressedSmiFromOffset(R1, R6, count_offset);
2751 // Ignore overflow.
2752 __ add(R1, R1, Operand(target::ToRawSmi(1)), kObjectBytes);
2753 __ StoreToOffset(R1, R6, count_offset, kObjectBytes);
2754 }
2755
2756 __ Comment("Call target");
2757 __ Bind(&call_target_function);
2758 // R0: target function.
2759 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
2761 if (save_entry_point) {
2762 __ add(R2, FUNCTION_REG, Operand(R8));
2763 __ ldr(R2, Address(R2, 0));
2764 } else {
2765 __ LoadFieldFromOffset(R2, FUNCTION_REG,
2767 }
2768 __ br(R2);
2769
2770 if (exactness == kCheckExactness) {
2771 __ Bind(&call_target_function_through_unchecked_entry);
2772 if (FLAG_optimization_counter_threshold >= 0) {
2773 __ Comment("Update ICData counter");
2774 __ LoadCompressedSmiFromOffset(R1, R6, count_offset);
2775 // Ignore overflow.
2776 __ add(R1, R1, Operand(target::ToRawSmi(1)), kObjectBytes);
2777 __ StoreToOffset(R1, R6, count_offset, kObjectBytes);
2778 }
2779 __ Comment("Call target (via unchecked entry point)");
2780 __ LoadCompressedFromOffset(FUNCTION_REG, R6, target_offset);
2781 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
2783 __ LoadFieldFromOffset(
2786 __ br(R2);
2787 }
2788
2789#if !defined(PRODUCT)
2790 if (optimized == kUnoptimized) {
2791 __ Bind(&stepping);
2792 __ EnterStubFrame();
2793 if (type == kInstanceCall) {
2794 __ Push(R0); // Preserve receiver.
2795 }
2796 if (save_entry_point) {
2797 __ SmiTag(R8);
2798 __ Push(R8);
2799 }
2800 __ Push(R5); // Preserve IC data.
2801 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2802 __ Pop(R5);
2803 if (save_entry_point) {
2804 __ Pop(R8);
2805 __ SmiUntag(R8);
2806 }
2807 if (type == kInstanceCall) {
2808 __ Pop(R0);
2809 }
2810 __ RestoreCodePointer();
2811 __ LeaveStubFrame();
2812 __ b(&done_stepping);
2813 }
2814#endif
2815}
2816
2817// R0: receiver
2818// R5: ICData
2819// LR: return address
2820void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
2822 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2824}
2825
2826// R0: receiver
2827// R5: ICData
2828// LR: return address
2829void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
2831 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2833}
2834
2835// R0: receiver
2836// R5: ICData
2837// LR: return address
2838void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
2840 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2842}
2843
2844// R0: receiver
2845// R5: ICData
2846// LR: return address
2847void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
2849 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
2851}
2852
2853// R0: receiver
2854// R5: ICData
2855// LR: return address
2856void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
2858 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
2860}
2861
2862// R0: receiver
2863// R5: ICData
2864// LR: return address
2865void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
2867 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
2869}
2870
2871// R0: receiver
2872// R5: ICData
2873// R6: Function
2874// LR: return address
2875void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
2877 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
2879}
2880
2881// R0: receiver
2882// R5: ICData
2883// R6: Function
2884// LR: return address
2885void StubCodeCompiler::
2886 GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
2888 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
2890}
2891
2892// R0: receiver
2893// R5: ICData
2894// R6: Function
2895// LR: return address
2896void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
2898 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2900}
2901
2902// R5: ICData
2903// LR: return address
2904void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
2905 GenerateRecordEntryPoint(assembler);
2906 GenerateUsageCounterIncrement(/* scratch */ R6);
2907#if defined(DEBUG)
2908 {
2909 Label ok;
2910 // Check that the IC data array has NumArgsTested() == 0.
2911 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
2912 __ LoadFromOffset(R6, R5,
2915 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
2916 __ andi(R6, R6, Immediate(target::ICData::NumArgsTestedMask()));
2917 __ CompareImmediate(R6, 0);
2918 __ b(&ok, EQ);
2919 __ Stop("Incorrect IC data for unoptimized static call");
2920 __ Bind(&ok);
2921 }
2922#endif // DEBUG
2923
2924 // Check single stepping.
2925#if !defined(PRODUCT)
2926 Label stepping, done_stepping;
2927 __ LoadIsolate(R6);
2928 __ LoadFromOffset(R6, R6, target::Isolate::single_step_offset(),
2930 __ CompareImmediate(R6, 0);
2931 __ b(&stepping, NE);
2932 __ Bind(&done_stepping);
2933#endif
2934
2935 // R5: IC data object (preserved).
2936 __ LoadFieldFromOffset(R6, R5, target::ICData::entries_offset());
2937 // R6: ic_data_array with entries: target functions and count.
2939 // R6: points directly to the first ic data array element.
2940 const intptr_t target_offset =
2942 const intptr_t count_offset =
2944
2945 if (FLAG_optimization_counter_threshold >= 0) {
2946 // Increment count for this call, ignore overflow.
2947 __ LoadCompressedSmiFromOffset(R1, R6, count_offset);
2948 __ adds(R1, R1, Operand(target::ToRawSmi(1)), kObjectBytes);
2949 __ StoreToOffset(R1, R6, count_offset, kObjectBytes);
2950 }
2951
2952 // Load arguments descriptor into R4.
2953 __ LoadFieldFromOffset(ARGS_DESC_REG, R5,
2955
2956 // Get function and call it, if possible.
2957 __ LoadCompressedFromOffset(FUNCTION_REG, R6, target_offset);
2958 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
2960 __ add(R2, FUNCTION_REG, Operand(R8));
2961 __ ldr(R2, Address(R2, 0));
2962 __ br(R2);
2963
2964#if !defined(PRODUCT)
2965 __ Bind(&stepping);
2966 __ EnterStubFrame();
2967 __ Push(R5); // Preserve IC data.
2968 __ SmiTag(R8);
2969 __ Push(R8);
2970 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2971 __ Pop(R8);
2972 __ SmiUntag(R8);
2973 __ Pop(R5);
2974 __ RestoreCodePointer();
2975 __ LeaveStubFrame();
2976 __ b(&done_stepping);
2977#endif
2978}
2979
2980// R5: ICData
2981// LR: return address
2982void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
2983 GenerateUsageCounterIncrement(/* scratch */ R6);
2984 GenerateNArgsCheckInlineCacheStub(1, kStaticCallMissHandlerOneArgRuntimeEntry,
2985 Token::kILLEGAL, kUnoptimized, kStaticCall,
2987}
2988
2989// R5: ICData
2990// LR: return address
2991void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
2992 GenerateUsageCounterIncrement(/* scratch */ R6);
2994 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2996}
2997
2998// Stub for compiling a function and jumping to the compiled code.
2999// ARGS_DESC_REG: Arguments descriptor.
3000// FUNCTION_REG: Function.
3001void StubCodeCompiler::GenerateLazyCompileStub() {
3002 // Preserve arg desc.
3003 __ EnterStubFrame();
3004 __ Push(ARGS_DESC_REG); // Save arg. desc.
3005 __ Push(FUNCTION_REG); // Pass function.
3006 __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
3007 __ Pop(FUNCTION_REG); // Restore function.
3008 __ Pop(ARGS_DESC_REG); // Restore arg desc.
3009 __ LeaveStubFrame();
3010
3011 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
3013 __ LoadFieldFromOffset(R2, FUNCTION_REG,
3015 __ br(R2);
3016}
3017
3018// R5: Contains an ICData.
3019void StubCodeCompiler::GenerateICCallBreakpointStub() {
3020#if defined(PRODUCT)
3021 __ Stop("No debugging in PRODUCT mode");
3022#else
3023 __ EnterStubFrame();
3024 __ Push(R0); // Preserve receiver.
3025 __ Push(R5); // Preserve IC data.
3026 __ Push(ZR); // Space for result.
3027 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
3028 __ Pop(CODE_REG); // Original stub.
3029 __ Pop(R5); // Restore IC data.
3030 __ Pop(R0); // Restore receiver.
3031 __ LeaveStubFrame();
3032 __ LoadFieldFromOffset(TMP, CODE_REG, target::Code::entry_point_offset());
3033 __ br(TMP);
3034#endif // defined(PRODUCT)
3035}
3036
3037void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
3038#if defined(PRODUCT)
3039 __ Stop("No debugging in PRODUCT mode");
3040#else
3041 __ EnterStubFrame();
3042 __ Push(R5); // Preserve IC data.
3043 __ Push(ZR); // Space for result.
3044 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
3045 __ Pop(CODE_REG); // Original stub.
3046 __ Pop(R5); // Restore IC data.
3047 __ LeaveStubFrame();
3048 __ LoadFieldFromOffset(TMP, CODE_REG, target::Code::entry_point_offset());
3049 __ br(TMP);
3050#endif // defined(PRODUCT)
3051}
3052
3053void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
3054#if defined(PRODUCT)
3055 __ Stop("No debugging in PRODUCT mode");
3056#else
3057 __ EnterStubFrame();
3058 __ Push(ZR); // Space for result.
3059 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
3060 __ Pop(CODE_REG);
3061 __ LeaveStubFrame();
3062 __ LoadFieldFromOffset(R0, CODE_REG, target::Code::entry_point_offset());
3063 __ br(R0);
3064#endif // defined(PRODUCT)
3065}
3066
3067// Called only from unoptimized code. All relevant registers have been saved.
3068void StubCodeCompiler::GenerateDebugStepCheckStub() {
3069#if defined(PRODUCT)
3070 __ Stop("No debugging in PRODUCT mode");
3071#else
3072 // Check single stepping.
3073 Label stepping, done_stepping;
3074 __ LoadIsolate(R1);
3075 __ LoadFromOffset(R1, R1, target::Isolate::single_step_offset(),
3077 __ CompareImmediate(R1, 0);
3078 __ b(&stepping, NE);
3079 __ Bind(&done_stepping);
3080 __ ret();
3081
3082 __ Bind(&stepping);
3083 __ EnterStubFrame();
3084 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
3085 __ LeaveStubFrame();
3086 __ b(&done_stepping);
3087#endif // defined(PRODUCT)
3088}
3089
3090// Used to check class and type arguments. Arguments passed in registers:
3091//
3092// Inputs (all preserved, mostly from TypeTestABI struct):
3093// - kSubtypeTestCacheReg: UntaggedSubtypeTestCache
3094// - kInstanceReg: instance to test against.
3095// - kDstTypeReg: destination type (for n>=7).
3096// - kInstantiatorTypeArgumentsReg: instantiator type arguments (for n>=3).
3097// - kFunctionTypeArgumentsReg: function type arguments (for n>=4).
3098// - LR: return address.
3099//
3100// Outputs (from TypeTestABI struct):
3101// - kSubtypeTestCacheResultReg: the cached result, or null if not found.
3102void StubCodeCompiler::GenerateSubtypeNTestCacheStub(Assembler* assembler,
3103 int n) {
3104 ASSERT(n >= 1);
3106 // If we need the parent function type arguments for a closure, we also need
3107 // the delayed type arguments, so this case will never happen.
3108 ASSERT(n != 5);
3109
3110 // We could initialize kSubtypeTestCacheResultReg with null and use that as
3111 // the null register up until exit, which means we'd just need to return
3112 // without setting it in the not_found case.
3113 //
3114 // However, that would mean the expense of keeping another register live
3115 // across the loop to hold the cache entry address, and the not_found case
3116 // means we're going to runtime, so optimize for the found case instead.
3117 //
3118 // Thus, we use it to store the current cache entry, since it's distinct from
3119 // all the preserved input registers and the scratch register, and the last
3120 // use of the current cache entry is to set kSubtypeTestCacheResultReg.
3122
3123 Label not_found;
3124 GenerateSubtypeTestCacheSearch(
3125 assembler, n, NULL_REG, kCacheArrayReg,
3133 [&](Assembler* assembler, int n) {
3134 __ LoadCompressed(
3136 Address(kCacheArrayReg, target::kCompressedWordSize *
3138 __ Ret();
3139 },
3140 [&](Assembler* assembler, int n) {
3142 __ Ret();
3143 });
3144}
3145
3146void StubCodeCompiler::GenerateGetCStackPointerStub() {
3147 __ mov(R0, CSP);
3148 __ ret();
3149}
3150
3151// Jump to a frame on the call stack.
3152// LR: return address.
3153// R0: program_counter.
3154// R1: stack_pointer.
3155// R2: frame_pointer.
3156// R3: thread.
3157// Does not return.
3158//
3159// Notice: We need to keep this in sync with `Simulator::JumpToFrame()`.
3160void StubCodeCompiler::GenerateJumpToFrameStub() {
3163 __ set_lr_state(compiler::LRState::Clobbered());
3164 __ mov(CALLEE_SAVED_TEMP, R0); // Program counter.
3165 __ mov(SP, R1); // Stack pointer.
3166 __ mov(FP, R2); // Frame_pointer.
3167 __ mov(THR, R3);
3168 __ SetupCSPFromThread(THR);
3169#if defined(DART_TARGET_OS_FUCHSIA)
3170 // We need to restore the shadow call stack pointer like longjmp would,
3171 // effectively popping all the return addresses between the Dart exit frame
3172 // and Exceptions::JumpToFrame, otherwise the shadow call stack might
3173 // eventually overflow.
3175#elif defined(USING_SHADOW_CALL_STACK)
3176#error Unimplemented
3177#endif
3178 Label exit_through_non_ffi;
3179 Register tmp1 = R0, tmp2 = R1;
3180 // Check if we exited generated from FFI. If so do transition - this is needed
3181 // because normally runtime calls transition back to generated via destructor
3182 // of TransitionGeneratedToVM/Native that is part of runtime boilerplate
3183 // code (see DEFINE_RUNTIME_ENTRY_IMPL in runtime_entry.h). Ffi calls don't
3184 // have this boilerplate, don't have this stack resource, have to transition
3185 // explicitly.
3186 __ LoadFromOffset(tmp1, THR,
3188 __ LoadImmediate(tmp2, target::Thread::exit_through_ffi());
3189 __ cmp(tmp1, Operand(tmp2));
3190 __ b(&exit_through_non_ffi, NE);
3191 __ TransitionNativeToGenerated(tmp1, /*leave_safepoint=*/true,
3192 /*ignore_unwind_in_progress=*/true);
3193 __ Bind(&exit_through_non_ffi);
3194
3195 // Refresh pinned registers (write barrier mask, null, dispatch table, etc).
3196 __ RestorePinnedRegisters();
3197 // Set the tag.
3198 __ LoadImmediate(R2, VMTag::kDartTagId);
3199 __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset());
3200 // Clear top exit frame.
3202 // Restore the pool pointer.
3203 __ RestoreCodePointer();
3204 if (FLAG_precompiled_mode) {
3205 __ SetupGlobalPoolAndDispatchTable();
3206 } else {
3207 __ LoadPoolPointer();
3208 }
3209 __ ret(CALLEE_SAVED_TEMP); // Jump to continuation point.
3210}
3211
3212// Run an exception handler. Execution comes from JumpToFrame
3213// stub or from the simulator.
3214//
3215// The arguments are stored in the Thread object.
3216// Does not return.
3217void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
3218 WRITES_RETURN_ADDRESS_TO_LR(
3219 __ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset()));
3220
3221 word offset_from_thread = 0;
3222 bool ok = target::CanLoadFromThread(NullObject(), &offset_from_thread);
3223 ASSERT(ok);
3224 __ LoadFromOffset(R2, THR, offset_from_thread);
3225
3226 // Exception object.
3229
3230 // StackTrace object.
3233
3234 __ ret(); // Jump to the exception handler code.
3235}
3236
3237// Deoptimize a frame on the call stack before rewinding.
3238// The arguments are stored in the Thread object.
3239// No result.
3240void StubCodeCompiler::GenerateDeoptForRewindStub() {
3241 // Push zap value instead of CODE_REG.
3242 __ LoadImmediate(TMP, kZapCodeReg);
3243 __ Push(TMP);
3244
3245 // Load the deopt pc into LR.
3246 WRITES_RETURN_ADDRESS_TO_LR(
3247 __ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset()));
3248 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
3249
3250 // After we have deoptimized, jump to the correct frame.
3251 __ EnterStubFrame();
3252 __ CallRuntime(kRewindPostDeoptRuntimeEntry, 0);
3253 __ LeaveStubFrame();
3254 __ brk(0);
3255}
3256
3257// Calls to the runtime to optimize the given function.
3258// R6: function to be re-optimized.
3259// ARGS_DESC_REG: argument descriptor (preserved).
3260void StubCodeCompiler::GenerateOptimizeFunctionStub() {
3262 __ EnterStubFrame();
3263 __ Push(ARGS_DESC_REG);
3264 // Setup space on stack for the return value.
3265 __ Push(ZR);
3266 __ Push(R6);
3267 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
3268 __ Pop(R0); // Discard argument.
3269 __ Pop(FUNCTION_REG); // Get Function object
3270 __ Pop(ARGS_DESC_REG); // Restore argument descriptor.
3271 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
3273 __ LoadFieldFromOffset(R1, FUNCTION_REG,
3275 __ LeaveStubFrame();
3276 __ br(R1);
3277 __ brk(0);
3278}
3279
3280// Does identical check (object references are equal or not equal) with special
3281// checks for boxed numbers and returns with ZF set iff left and right are
3282// identical.
3283static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
3284 const Register left,
3285 const Register right) {
3286 Label reference_compare, check_mint;
3287 // If any of the arguments is Smi do reference compare.
3288 // Note: A Mint cannot contain a value that would fit in Smi.
3289 __ BranchIfSmi(left, &reference_compare);
3290 __ BranchIfSmi(right, &reference_compare);
3291
3292 // Value compare for two doubles.
3293 __ CompareClassId(left, kDoubleCid);
3294 __ b(&check_mint, NE);
3295 __ CompareClassId(right, kDoubleCid);
3296 __ b(&reference_compare, NE); // Do not branch directly to ret! See below.
3297
3298 // Double values bitwise compare.
3299 __ LoadFieldFromOffset(left, left, target::Double::value_offset());
3300 __ LoadFieldFromOffset(right, right, target::Double::value_offset());
3301 __ CompareRegisters(left, right);
3302 __ ret();
3303
3304 __ Bind(&check_mint);
3305 __ CompareClassId(left, kMintCid);
3306 __ b(&reference_compare, NE);
3307 __ CompareClassId(right, kMintCid);
3308 __ b(&reference_compare, NE); // Do not branch directly to ret! See below.
3309 __ LoadFieldFromOffset(left, left, target::Mint::value_offset());
3310 __ LoadFieldFromOffset(right, right, target::Mint::value_offset());
3311 __ CompareRegisters(left, right);
3312 __ ret();
3313
3314 __ Bind(&reference_compare);
3315 __ CompareObjectRegisters(left, right);
3316 // None of the branches above go directly here to avoid generating a
3317 // conditional branch to a ret instruction.
3318 // This is an attempt to workaround a possible CPU on Exynos 2100 SoC.
3319 // See https://github.com/flutter/flutter/issues/88261
3320 __ ret();
3321}
3322
3323// Called only from unoptimized code. All relevant registers have been saved.
3324// LR: return address.
3325// SP + 4: left operand.
3326// SP + 0: right operand.
3327// Return Zero condition flag set if equal.
3328void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
3329#if !defined(PRODUCT)
3330 // Check single stepping.
3331 Label stepping, done_stepping;
3332 __ LoadIsolate(R1);
3333 __ LoadFromOffset(R1, R1, target::Isolate::single_step_offset(),
3335 __ CompareImmediate(R1, 0);
3336 __ b(&stepping, NE);
3337 __ Bind(&done_stepping);
3338#endif
3339
3340 const Register left = R1;
3341 const Register right = R0;
3342 __ LoadFromOffset(left, SP, 1 * target::kWordSize);
3343 __ LoadFromOffset(right, SP, 0 * target::kWordSize);
3344 GenerateIdenticalWithNumberCheckStub(assembler, left, right);
3345
3346#if !defined(PRODUCT)
3347 __ Bind(&stepping);
3348 __ EnterStubFrame();
3349 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
3350 __ RestoreCodePointer();
3351 __ LeaveStubFrame();
3352 __ b(&done_stepping);
3353#endif
3354}
3355
3356// Called from optimized code only.
3357// LR: return address.
3358// SP + 4: left operand.
3359// SP + 0: right operand.
3360// Return Zero condition flag set if equal.
3361void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
3362 const Register left = R1;
3363 const Register right = R0;
3364 __ LoadFromOffset(left, SP, 1 * target::kWordSize);
3365 __ LoadFromOffset(right, SP, 0 * target::kWordSize);
3366 GenerateIdenticalWithNumberCheckStub(assembler, left, right);
3367}
3368
3369// Called from megamorphic call sites.
3370// R0: receiver (passed to target)
3371// IC_DATA_REG: MegamorphicCache (preserved)
3372// Passed to target:
3373// FUNCTION_REG: target function
3374// CODE_REG: target Code
3375// ARGS_DESC_REG: arguments descriptor
3376void StubCodeCompiler::GenerateMegamorphicCallStub() {
3377 // Jump if receiver is a smi.
3378 Label smi_case;
3379 __ BranchIfSmi(R0, &smi_case);
3380
3381 // Loads the cid of the object.
3382 __ LoadClassId(R8, R0);
3383
3384 Label cid_loaded;
3385 __ Bind(&cid_loaded);
3386 __ ldr(R2,
3388 __ ldr(R1,
3390 // R2: cache buckets array.
3391 // R1: mask as a smi.
3392
3393 // Make the cid into a smi.
3394 __ SmiTag(R8);
3395 // R8: class ID of the receiver (smi).
3396
3397 // Compute the table index.
3399 // Use lsl and sub to multiply with 7 == 8 - 1.
3400 __ LslImmediate(R3, R8, 3);
3401 __ sub(R3, R3, Operand(R8));
3402 // R3: probe.
3403 Label loop;
3404 __ Bind(&loop);
3405 __ and_(R3, R3, Operand(R1));
3406
3407 const intptr_t base = target::Array::data_offset();
3408 // R3 is smi tagged, but table entries are 16 bytes, so LSL 3.
3409 __ add(TMP, R2, Operand(R3, LSL, kCompressedWordSizeLog2));
3410 __ LoadCompressedSmiFieldFromOffset(R6, TMP, base);
3411 Label probe_failed;
3412 __ CompareObjectRegisters(R6, R8);
3413 __ b(&probe_failed, NE);
3414
3415 Label load_target;
3416 __ Bind(&load_target);
3417 // Call the target found in the cache. For a class id match, this is a
3418 // proper target for the given name and arguments descriptor. If the
3419 // illegal class id was found, the target is a cache miss handler that can
3420 // be invoked as a normal Dart function.
3421 __ LoadCompressed(FUNCTION_REG,
3422 FieldAddress(TMP, base + target::kCompressedWordSize));
3423 __ ldr(R1,
3425 __ ldr(ARGS_DESC_REG,
3426 FieldAddress(IC_DATA_REG,
3428 if (!FLAG_precompiled_mode) {
3429 __ LoadCompressed(
3431 }
3432 __ br(R1);
3433
3434 // Probe failed, check if it is a miss.
3435 __ Bind(&probe_failed);
3436 ASSERT(kIllegalCid == 0);
3437 __ tst(R6, Operand(R6), kObjectBytes);
3438 Label miss;
3439 __ b(&miss, EQ); // branch if miss.
3440
3441 // Try next extry in the table.
3442 __ AddImmediate(R3, target::ToRawSmi(1));
3443 __ b(&loop);
3444
3445 // Load cid for the Smi case.
3446 __ Bind(&smi_case);
3447 __ LoadImmediate(R8, kSmiCid);
3448 __ b(&cid_loaded);
3449
3450 __ Bind(&miss);
3451 GenerateSwitchableCallMissStub();
3452}
3453
3454// Input:
3455// R0 - receiver
3456// IC_DATA_REG - icdata
3457void StubCodeCompiler::GenerateICCallThroughCodeStub() {
3458 Label loop, found, miss;
3459 __ ldr(R8, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
3460 __ ldr(ARGS_DESC_REG,
3461 FieldAddress(IC_DATA_REG,
3464 // R8: first IC entry
3465 __ LoadTaggedClassIdMayBeSmi(R1, R0);
3466 // R1: receiver cid as Smi
3467
3468 __ Bind(&loop);
3469 __ LoadCompressedSmi(R2, Address(R8, 0));
3470 __ cmp(R1, Operand(R2), kObjectBytes);
3471 __ b(&found, EQ);
3472 __ CompareImmediate(R2, target::ToRawSmi(kIllegalCid), kObjectBytes);
3473 __ b(&miss, EQ);
3474
3475 const intptr_t entry_length =
3476 target::ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) *
3478 __ AddImmediate(R8, entry_length); // Next entry.
3479 __ b(&loop);
3480
3481 __ Bind(&found);
3482 if (FLAG_precompiled_mode) {
3483 const intptr_t entry_offset =
3485 __ LoadCompressed(R1, Address(R8, entry_offset));
3486 __ ldr(R1, FieldAddress(R1, target::Function::entry_point_offset()));
3487 } else {
3488 const intptr_t code_offset =
3490 __ LoadCompressed(CODE_REG, Address(R8, code_offset));
3491 __ ldr(R1, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
3492 }
3493 __ br(R1);
3494
3495 __ Bind(&miss);
3497 __ br(R1);
3498}
3499
3500// Implement the monomorphic entry check for call-sites where the receiver
3501// might be a Smi.
3502//
3503// R0: receiver
3504// R5: MonomorphicSmiableCall object
3505//
3506// R1: clobbered
3507void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
3508 Label miss;
3509 __ LoadClassIdMayBeSmi(IP0, R0);
3510
3511 // Note: this stub is only used in AOT mode, hence the direct (bare) call.
3512 __ LoadField(
3513 IP1,
3515 __ LoadField(
3516 R1,
3518 __ cmp(IP0, Operand(IP1));
3519 __ b(&miss, NE);
3520 __ br(R1);
3521
3522 __ Bind(&miss);
3523 __ ldr(IP0,
3525 __ br(IP0);
3526}
3527
3528// Called from switchable IC calls.
3529// R0: receiver
3530void StubCodeCompiler::GenerateSwitchableCallMissStub() {
3531 __ ldr(CODE_REG,
3533 __ EnterStubFrame();
3534 __ Push(R0); // Preserve receiver.
3535
3536 __ Push(ZR); // Result slot.
3537 __ Push(ZR); // Arg0: stub out.
3538 __ Push(R0); // Arg1: Receiver
3539 __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
3540 __ Drop(1);
3541 __ Pop(CODE_REG); // result = stub
3542 __ Pop(R5); // result = IC
3543
3544 __ Pop(R0); // Restore receiver.
3545 __ LeaveStubFrame();
3546
3549 __ br(R1);
3550}
3551
3552// Called from switchable IC calls.
3553// R0: receiver
3554// R5: SingleTargetCache
3555// Passed to target:
3556// CODE_REG: target Code object
3557void StubCodeCompiler::GenerateSingleTargetCallStub() {
3558 Label miss;
3559 __ LoadClassIdMayBeSmi(R1, R0);
3564
3565 __ cmp(R1, Operand(R2));
3566 __ b(&miss, LT);
3567 __ cmp(R1, Operand(R3));
3568 __ b(&miss, GT);
3569
3571 __ ldr(CODE_REG,
3573 __ br(R1);
3574
3575 __ Bind(&miss);
3576 __ EnterStubFrame();
3577 __ Push(R0); // Preserve receiver.
3578
3579 __ Push(ZR); // Result slot.
3580 __ Push(ZR); // Arg0: Stub out.
3581 __ Push(R0); // Arg1: Receiver
3582 __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
3583 __ Drop(1);
3584 __ Pop(CODE_REG); // result = stub
3585 __ Pop(R5); // result = IC
3586
3587 __ Pop(R0); // Restore receiver.
3588 __ LeaveStubFrame();
3589
3592 __ br(R1);
3593}
3594
3595static int GetScaleFactor(intptr_t size) {
3596 switch (size) {
3597 case 1:
3598 return 0;
3599 case 2:
3600 return 1;
3601 case 4:
3602 return 2;
3603 case 8:
3604 return 3;
3605 case 16:
3606 return 4;
3607 }
3608 UNREACHABLE();
3609 return -1;
3610}
3611
3612void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
3614 const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
3615 const intptr_t scale_shift = GetScaleFactor(element_size);
3616
3619
3620 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
3621 Label call_runtime;
3622 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &call_runtime, R2));
3624 /* Check that length is a positive Smi. */
3625 /* R2: requested array length argument. */
3626 __ BranchIfNotSmi(R2, &call_runtime);
3627 __ SmiUntag(R2);
3628 /* Check for length >= 0 && length <= max_len. */
3629 /* R2: untagged array length. */
3630 __ CompareImmediate(R2, max_len, kObjectBytes);
3631 __ b(&call_runtime, HI);
3632 __ LslImmediate(R2, R2, scale_shift);
3633 const intptr_t fixed_size_plus_alignment_padding =
3636 __ AddImmediate(R2, fixed_size_plus_alignment_padding);
3637 __ andi(R2, R2,
3639 __ ldr(R0, Address(THR, target::Thread::top_offset()));
3640
3641 /* R2: allocation size. */
3642 __ adds(R1, R0, Operand(R2));
3643 __ b(&call_runtime, CS); /* Fail on unsigned overflow. */
3644
3645 /* Check if the allocation fits into the remaining space. */
3646 /* R0: potential new object start. */
3647 /* R1: potential next object start. */
3648 /* R2: allocation size. */
3649 __ ldr(R6, Address(THR, target::Thread::end_offset()));
3650 __ cmp(R1, Operand(R6));
3651 __ b(&call_runtime, CS);
3652 __ CheckAllocationCanary(R0);
3653
3654 /* Successfully allocated the object(s), now update top to point to */
3655 /* next object start and initialize the object. */
3656 __ str(R1, Address(THR, target::Thread::top_offset()));
3657 __ AddImmediate(R0, kHeapObjectTag);
3658 /* Initialize the tags. */
3659 /* R0: new object start as a tagged pointer. */
3660 /* R1: new object end address. */
3661 /* R2: allocation size. */
3662 {
3664 __ LslImmediate(R2, R2,
3667 __ csel(R2, ZR, R2, HI);
3668
3669 /* Get the class index and insert it into the tags. */
3670 uword tags =
3671 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
3672 __ LoadImmediate(TMP, tags);
3673 __ orr(R2, R2, Operand(TMP));
3674 __ str(R2, FieldAddress(R0, target::Object::tags_offset())); /* Tags. */
3675 }
3676 /* Set the length field. */
3677 /* R0: new object start as a tagged pointer. */
3678 /* R1: new object end address. */
3679 __ mov(R2, AllocateTypedDataArrayABI::kLengthReg); /* Array length. */
3680 __ StoreCompressedIntoObjectNoBarrier(
3681 R0, FieldAddress(R0, target::TypedDataBase::length_offset()), R2);
3682 /* Initialize all array elements to 0. */
3683 /* R0: new object start as a tagged pointer. */
3684 /* R1: new object end address. */
3685 /* R2: iterator which initially points to the start of the variable */
3686 /* data area to be initialized. */
3687 __ AddImmediate(R2, R0, target::TypedData::HeaderSize() - 1);
3688 __ StoreInternalPointer(
3689 R0, FieldAddress(R0, target::PointerBase::data_offset()), R2);
3690 Label loop;
3691 __ Bind(&loop);
3693 __ stp(ZR, ZR, Address(R2, 2 * target::kWordSize, Address::PairPostIndex));
3694 __ cmp(R2, Operand(R1));
3695 __ b(&loop, UNSIGNED_LESS);
3696 __ WriteAllocationCanary(R1); // Fix overshoot.
3697
3698 __ Ret();
3699
3700 __ Bind(&call_runtime);
3701 }
3702
3703 __ EnterStubFrame();
3704 __ Push(ZR); // Result slot.
3705 __ PushImmediate(target::ToRawSmi(cid)); // Cid
3706 __ Push(AllocateTypedDataArrayABI::kLengthReg); // Array length
3707 __ CallRuntime(kAllocateTypedDataRuntimeEntry, 2);
3708 __ Drop(2); // Drop arguments.
3710 __ LeaveStubFrame();
3711 __ Ret();
3712}
3713
3714} // namespace compiler
3715
3716} // namespace dart
3717
3718#endif // defined(TARGET_ARCH_ARM64)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
static void async_callback(void *c, std::unique_ptr< const SkImage::AsyncReadResult > result)
static bool ok(int result)
static SkTileMode optimize(SkTileMode tm, int dimension)
static size_t element_size(Layout layout, SkSLType type)
#define __
#define UNREACHABLE()
Definition: assert.h:248
#define ASSERT_LESS_OR_EQUAL(expected, actual)
Definition: assert.h:313
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
GLenum type
virtual bool WillAllocateNewOrRemembered() const
Definition: il.h:7451
static constexpr Register kPointerToReturnStructRegisterCall
static constexpr intptr_t NumCallbackTrampolinesPerPage()
static constexpr intptr_t kPageMask
static constexpr uword RuntimeFunctionOffset(uword function_index)
static constexpr intptr_t kPageSize
static bool UseUnboxedRepresentation()
Definition: il.h:10864
static Location RegisterLocation(Register reg)
Definition: locations.h:398
static intptr_t ActivationFrameAlignment()
static StaticTypeExactnessState HasExactSuperType()
static StaticTypeExactnessState NotExact()
static constexpr intptr_t kMaxInputs
Definition: object.h:7705
static bool atomic_memory_supported()
Definition: cpu_arm64.h:48
static Address PC(int32_t pc_off)
void GenerateNArgsCheckInlineCacheStub(intptr_t num_args, const RuntimeEntry &handle_ic_miss, Token::Kind kind, Optimized optimized, CallType type, Exactness exactness)
void GenerateUsageCounterIncrement(Register temp_reg)
void GenerateAllocationStubForClass(UnresolvedPcRelativeCalls *unresolved_calls, const Class &cls, const dart::Code &allocate_object, const dart::Code &allocat_object_parametrized)
static intptr_t WordOffsetFromFpToCpuRegister(Register cpu_register)
static word type_arguments_offset()
static const word kMaxNewSpaceElements
Definition: runtime_api.h:595
static bool TraceAllocation(const dart::Class &klass)
Definition: runtime_api.cc:543
static intptr_t NumTypeArguments(const dart::Class &klass)
Definition: runtime_api.cc:530
static uword GetInstanceSize(const dart::Class &handle)
Definition: runtime_api.cc:515
static const word kNoTypeArguments
Definition: runtime_api.h:486
static classid_t GetId(const dart::Class &handle)
Definition: runtime_api.cc:441
static intptr_t TypeArgumentsFieldOffset(const dart::Class &klass)
Definition: runtime_api.cc:539
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word variable_offset(intptr_t index)
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static bool IsAllocatableInNewSpace(intptr_t instance_size)
static word ExactnessIndexFor(word num_args)
Definition: runtime_api.cc:615
static word TestEntryLengthFor(word num_args, bool exactness_check)
Definition: runtime_api.cc:619
static word receivers_static_type_offset()
static word CodeIndexFor(word num_args)
Definition: runtime_api.cc:603
static word TargetIndexFor(word num_args)
Definition: runtime_api.cc:611
static word CountIndexFor(word num_args)
Definition: runtime_api.cc:607
static word EntryPointIndexFor(word num_args)
Definition: runtime_api.cc:623
static word original_top_offset()
static const word kBytesPerCardLog2
Definition: runtime_api.h:1487
static word allocate_mint_without_fpu_regs_stub_offset()
static word allocate_object_slow_entry_point_offset()
static word auto_scope_native_wrapper_entry_point_offset()
static word lazy_deopt_from_throw_stub_offset()
static word active_exception_offset()
static word exit_through_ffi_offset()
static uword exit_through_runtime_call()
Definition: runtime_api.cc:919
static word jump_to_frame_entry_point_offset()
static word new_marking_stack_block_offset()
static word invoke_dart_code_stub_offset()
static word saved_shadow_call_stack_offset()
static word write_error_shared_without_fpu_regs_stub_offset()
static word no_scope_native_wrapper_entry_point_offset()
static word top_exit_frame_info_offset()
static word range_error_shared_without_fpu_regs_stub_offset()
static word range_error_shared_with_fpu_regs_stub_offset()
static word fix_allocation_stub_code_offset()
static word switchable_call_miss_stub_offset()
static word fix_callers_target_code_offset()
static word store_buffer_block_offset()
static word deoptimize_stub_offset()
static word write_barrier_entry_point_offset()
static word lazy_deopt_from_return_stub_offset()
static word allocate_object_entry_point_offset()
static word switchable_call_miss_entry_offset()
static word active_stacktrace_offset()
static word allocate_mint_with_fpu_regs_stub_offset()
static word bootstrap_native_wrapper_entry_point_offset()
static word write_error_shared_with_fpu_regs_stub_offset()
static word write_barrier_mask_offset()
static word call_to_runtime_stub_offset()
static word execution_state_offset()
static word old_marking_stack_block_offset()
static const word kGenerationalBarrierMask
Definition: runtime_api.h:434
#define UNIMPLEMENTED
#define ASSERT(E)
static bool b
glong glong end
const uint8_t uint32_t uint32_t GError ** error
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
size_t length
SK_API bool Encode(SkWStream *dst, const SkPixmap &src, const Options &options)
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
Definition: runtime_api.cc:360
static constexpr intptr_t kCompressedWordSizeLog2
Definition: runtime_api.h:287
static constexpr word kBitsPerWordLog2
Definition: runtime_api.h:290
bool CanLoadFromThread(const dart::Object &object, intptr_t *offset)
Definition: runtime_api.cc:941
word ToRawSmi(const dart::Object &a)
Definition: runtime_api.cc:960
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
static constexpr intptr_t kCompressedWordSize
Definition: runtime_api.h:286
bool SizeFitsInSizeTag(uword instance_size)
Definition: runtime_api.cc:355
static constexpr intptr_t kObjectAlignment
Definition: runtime_api.h:313
FrameLayout frame_layout
Definition: stack_frame.cc:76
word TypedDataMaxNewSpaceElements(classid_t cid)
Definition: runtime_api.cc:255
word TypedDataElementSizeInBytes(classid_t cid)
Definition: runtime_api.cc:251
const Bool & TrueObject()
Definition: runtime_api.cc:157
GrowableArray< UnresolvedPcRelativeCall * > UnresolvedPcRelativeCalls
bool IsSameObject(const Object &a, const Object &b)
Definition: runtime_api.cc:60
const Bool & FalseObject()
Definition: runtime_api.cc:161
const Object & NullObject()
Definition: runtime_api.cc:149
const Code & StubCodeAllocateArray()
Definition: runtime_api.cc:294
const Class & MintClass()
Definition: runtime_api.cc:190
Definition: dart_vm.cc:33
const Register kWriteBarrierSlotReg
@ TIMES_COMPRESSED_HALF_WORD_SIZE
constexpr bool IsAbiPreservedRegister(Register reg)
Definition: constants.h:90
const Register THR
static constexpr intptr_t kCompressedWordSizeLog2
Definition: globals.h:43
const RegList kAbiVolatileCpuRegs
const Register kExceptionObjectReg
const Register kWriteBarrierObjectReg
Thread * DLRT_GetFfiCallbackMetadata(FfiCallbackMetadata::Trampoline trampoline, uword *out_entry_point, uword *out_trampoline_type)
const Register NULL_REG
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition: constants.h:85
static constexpr uword kZapReturnAddress
int32_t classid_t
Definition: globals.h:524
@ kIllegalCid
Definition: class_id.h:214
const Register CALLEE_SAVED_TEMP
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
uintptr_t uword
Definition: globals.h:501
void DLRT_ExitTemporaryIsolate()
intptr_t word
Definition: globals.h:500
const Register CODE_REG
@ UNSIGNED_GREATER_EQUAL
@ NOT_ZERO
@ NO_OVERFLOW
@ UNSIGNED_LESS
@ UNSIGNED_LESS_EQUAL
const Register TMP2
const Register ARGS_DESC_REG
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
static constexpr bool IsArgumentRegister(Register reg)
Definition: constants.h:77
constexpr RegList kDartAvailableCpuRegs
@ kNumberOfVRegisters
const Register TMP
const Register FPREG
const intptr_t cid
const intptr_t kStoreBufferWrapperSize
const Register FUNCTION_REG
const Register IC_DATA_REG
static constexpr intptr_t kAllocationRedZoneSize
Definition: page.h:41
const Register PP
static constexpr uword kZapCodeReg
const Register kStackTraceObjectReg
NOT_IN_PRODUCT(LibraryPtr ReloadTestScript(const char *script))
@ kSmiTagSize
@ kHeapObjectTag
@ kSmiTagShift
const Register CALLEE_SAVED_TEMP2
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
@ kLazyDeoptFromThrow
@ kLazyDeoptFromReturn
def call(args)
Definition: dom.py:159
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
dst
Definition: cp.py:12
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
Definition: update.py:1
SeparatedVector2 offset
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTempReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTagsReg
static constexpr Register kLengthReg
static constexpr Register kResultReg
static constexpr Register kClassIdReg
intptr_t first_local_from_fp
Definition: frame_layout.h:37
static constexpr intptr_t kNewObjectBitPosition
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment
static constexpr Register kLengthReg
static constexpr Register kIndexReg
static constexpr Register kCacheContentsSizeReg
static constexpr Register kInstanceInstantiatorTypeArgumentsReg
static constexpr Register kInstanceParentFunctionTypeArgumentsReg
static constexpr Register kProbeDistanceReg
static constexpr Register kInstanceCidOrSignatureReg
static constexpr Register kCacheEntriesEndReg
static constexpr Register kInstanceDelayedFunctionTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kSubtypeTestCacheResultReg