Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
stub_code_compiler_arm.cc
Go to the documentation of this file.
1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6#include "vm/globals.h"
7
8// For `AllocateObjectInstr::WillAllocateNewOrRemembered`
9// For `GenericCheckBoundInstr::UseUnboxedRepresentation`
11
12#define SHOULD_NOT_INCLUDE_RUNTIME
13
15
16#if defined(TARGET_ARCH_ARM)
17
18#include "vm/class_id.h"
19#include "vm/code_entry_kind.h"
23#include "vm/constants.h"
25#include "vm/instructions.h"
27#include "vm/tags.h"
28
29#define __ assembler->
30
31namespace dart {
32namespace compiler {
33
34// Ensures that [R0] is a new object, if not it will be added to the remembered
35// set via a leaf runtime call.
36//
37// WARNING: This might clobber all registers except for [R0], [THR] and [FP].
38// The caller should simply call LeaveStubFrame() and return.
40 // If the object is not in an active TLAB, we call a leaf-runtime to add it to
41 // the remembered set and/or deferred marking worklist. This test assumes a
42 // Page's TLAB use is always ascending.
43 Label done;
44 __ AndImmediate(TMP, R0, target::kPageMask);
45 __ LoadFromOffset(TMP, TMP, target::Page::original_top_offset());
46 __ CompareRegisters(R0, TMP);
47 __ BranchIf(UNSIGNED_GREATER_EQUAL, &done);
48
49 {
50 LeafRuntimeScope rt(assembler,
51 /*frame_size=*/0,
52 /*preserve_registers=*/false);
53 // [R0] already contains first argument.
54 __ mov(R1, Operand(THR));
55 rt.Call(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
56 }
57
58 __ Bind(&done);
59}
60
61// Input parameters:
62// LR : return address.
63// SP : address of last argument in argument array.
64// SP + 4*R4 - 4 : address of first argument in argument array.
65// SP + 4*R4 : address of return value.
66// R9 : address of the runtime function to call.
67// R4 : number of arguments to the call.
68void StubCodeCompiler::GenerateCallToRuntimeStub() {
69 const intptr_t thread_offset = target::NativeArguments::thread_offset();
70 const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
71 const intptr_t argv_offset = target::NativeArguments::argv_offset();
72 const intptr_t retval_offset = target::NativeArguments::retval_offset();
73
74 __ ldr(CODE_REG, Address(THR, target::Thread::call_to_runtime_stub_offset()));
75 __ EnterStubFrame();
76
77 // Save exit frame information to enable stack walking as we are about
78 // to transition to Dart VM C++ code.
79 __ StoreToOffset(FP, THR, target::Thread::top_exit_frame_info_offset());
80
81 // Mark that the thread exited generated code through a runtime call.
82 __ LoadImmediate(R8, target::Thread::exit_through_runtime_call());
83 __ StoreToOffset(R8, THR, target::Thread::exit_through_ffi_offset());
84
85#if defined(DEBUG)
86 {
87 Label ok;
88 // Check that we are always entering from Dart code.
89 __ LoadFromOffset(R8, THR, target::Thread::vm_tag_offset());
90 __ CompareImmediate(R8, VMTag::kDartTagId);
91 __ b(&ok, EQ);
92 __ Stop("Not coming from Dart code.");
93 __ Bind(&ok);
94 }
95#endif
96
97 // Mark that the thread is executing VM code.
98 __ StoreToOffset(R9, THR, target::Thread::vm_tag_offset());
99
100 // Reserve space for arguments and align frame before entering C++ world.
101 // target::NativeArguments are passed in registers.
102 ASSERT(target::NativeArguments::StructSize() == 4 * target::kWordSize);
103 __ ReserveAlignedFrameSpace(0);
104
105 // Pass target::NativeArguments structure by value and call runtime.
106 // Registers R0, R1, R2, and R3 are used.
107
108 ASSERT(thread_offset == 0 * target::kWordSize);
109 // Set thread in NativeArgs.
110 __ mov(R0, Operand(THR));
111
112 ASSERT(argc_tag_offset == 1 * target::kWordSize);
113 __ mov(R1, Operand(R4)); // Set argc in target::NativeArguments.
114
115 ASSERT(argv_offset == 2 * target::kWordSize);
116 __ add(R2, FP, Operand(R4, LSL, 2)); // Compute argv.
117 // Set argv in target::NativeArguments.
118 __ AddImmediate(R2,
119 target::frame_layout.param_end_from_fp * target::kWordSize);
120
121 ASSERT(retval_offset == 3 * target::kWordSize);
122 __ add(R3, R2,
123 Operand(target::kWordSize)); // Retval is next to 1st argument.
124
125 // Call runtime or redirection via simulator.
126 __ blx(R9);
127
128 // Mark that the thread is executing Dart code.
129 __ LoadImmediate(R2, VMTag::kDartTagId);
130 __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset());
131
132 // Mark that the thread has not exited generated Dart code.
133 __ LoadImmediate(R2, 0);
134 __ StoreToOffset(R2, THR, target::Thread::exit_through_ffi_offset());
135
136 // Reset exit frame information in Isolate's mutator thread structure.
137 __ StoreToOffset(R2, THR, target::Thread::top_exit_frame_info_offset());
138
139 // Restore the global object pool after returning from runtime (old space is
140 // moving, so the GOP could have been relocated).
141 if (FLAG_precompiled_mode) {
142 __ SetupGlobalPoolAndDispatchTable();
143 }
144
145 __ LeaveStubFrame();
146
147 // The following return can jump to a lazy-deopt stub, which assumes R0
148 // contains a return value and will save it in a GC-visible way. We therefore
149 // have to ensure R0 does not contain any garbage value left from the C
150 // function we called (which has return type "void").
151 // (See GenerateDeoptimizationSequence::saved_result_slot_from_fp.)
152 __ LoadImmediate(R0, 0);
153 __ Ret();
154}
155
156void StubCodeCompiler::GenerateSharedStubGeneric(
157 bool save_fpu_registers,
158 intptr_t self_code_stub_offset_from_thread,
159 bool allow_return,
160 std::function<void()> perform_runtime_call) {
161 // We want the saved registers to appear like part of the caller's frame, so
162 // we push them before calling EnterStubFrame.
163 RegisterSet all_registers;
164 all_registers.AddAllNonReservedRegisters(save_fpu_registers);
165
166 // To make the stack map calculation architecture independent we do the same
167 // as on intel.
168 READS_RETURN_ADDRESS_FROM_LR(__ Push(LR));
169 __ PushRegisters(all_registers);
170 __ ldr(CODE_REG, Address(THR, self_code_stub_offset_from_thread));
171 __ EnterStubFrame();
172 perform_runtime_call();
173 if (!allow_return) {
174 __ Breakpoint();
175 return;
176 }
177 __ LeaveStubFrame();
178 __ PopRegisters(all_registers);
179 __ Drop(1); // We use the LR restored via LeaveStubFrame.
180 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR));
181}
182
183void StubCodeCompiler::GenerateSharedStub(
184 bool save_fpu_registers,
185 const RuntimeEntry* target,
186 intptr_t self_code_stub_offset_from_thread,
187 bool allow_return,
188 bool store_runtime_result_in_result_register) {
189 ASSERT(!store_runtime_result_in_result_register || allow_return);
190 auto perform_runtime_call = [&]() {
191 if (store_runtime_result_in_result_register) {
192 // Reserve space for the result on the stack. This needs to be a GC
193 // safe value.
194 __ PushImmediate(Smi::RawValue(0));
195 }
196 __ CallRuntime(*target, /*argument_count=*/0);
197 if (store_runtime_result_in_result_register) {
198 __ PopRegister(R0);
199 __ str(R0,
200 Address(FP, target::kWordSize *
203 }
204 };
205 GenerateSharedStubGeneric(save_fpu_registers,
206 self_code_stub_offset_from_thread, allow_return,
207 perform_runtime_call);
208}
209
210void StubCodeCompiler::GenerateEnterSafepointStub() {
211 RegisterSet all_registers;
212 all_registers.AddAllGeneralRegisters();
213 __ PushRegisters(all_registers);
214
215 SPILLS_LR_TO_FRAME(__ EnterFrame((1 << FP) | (1 << LR), 0));
216 __ ReserveAlignedFrameSpace(0);
217 __ ldr(R0, Address(THR, kEnterSafepointRuntimeEntry.OffsetFromThread()));
218 __ blx(R0);
219 RESTORES_LR_FROM_FRAME(__ LeaveFrame((1 << FP) | (1 << LR), 0));
220
221 __ PopRegisters(all_registers);
222 __ Ret();
223}
224
225static void GenerateExitSafepointStubCommon(Assembler* assembler,
226 uword runtime_entry_offset) {
227 RegisterSet all_registers;
228 all_registers.AddAllGeneralRegisters();
229 __ PushRegisters(all_registers);
230
231 SPILLS_LR_TO_FRAME(__ EnterFrame((1 << FP) | (1 << LR), 0));
232 __ ReserveAlignedFrameSpace(0);
233
234 // Set the execution state to VM while waiting for the safepoint to end.
235 // This isn't strictly necessary but enables tests to check that we're not
236 // in native code anymore. See tests/ffi/function_gc_test.dart for example.
237 __ LoadImmediate(R0, target::Thread::vm_execution_state());
238 __ str(R0, Address(THR, target::Thread::execution_state_offset()));
239
240 __ ldr(R0, Address(THR, runtime_entry_offset));
241 __ blx(R0);
242 RESTORES_LR_FROM_FRAME(__ LeaveFrame((1 << FP) | (1 << LR), 0));
243
244 __ PopRegisters(all_registers);
245 __ Ret();
246}
247
248void StubCodeCompiler::GenerateExitSafepointStub() {
249 GenerateExitSafepointStubCommon(
250 assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
251}
252
253void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
254 GenerateExitSafepointStubCommon(
255 assembler,
256 kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
257}
258
259// Call a native function within a safepoint.
260//
261// On entry:
262// Stack: set up for call, incl. alignment
263// R8: target to call
264//
265// On exit:
266// Stack: preserved
267// NOTFP, R4: clobbered, although normally callee-saved
268void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
270
271 // TransitionGeneratedToNative might clobber LR if it takes the slow path.
272 SPILLS_RETURN_ADDRESS_FROM_LR_TO_REGISTER(__ mov(R4, Operand(LR)));
273
274 __ LoadImmediate(R9, target::Thread::exit_through_ffi());
275 __ TransitionGeneratedToNative(R8, FPREG, R9 /*volatile*/, NOTFP,
276 /*enter_safepoint=*/true);
277
278 __ blx(R8);
279
280 __ TransitionNativeToGenerated(R9 /*volatile*/, NOTFP,
281 /*exit_safepoint=*/true);
282
283 __ bx(R4);
284}
285
286void StubCodeCompiler::GenerateLoadBSSEntry(BSS::Relocation relocation,
287 Register dst,
288 Register tmp) {
289 compiler::Label skip_reloc;
290 __ b(&skip_reloc);
291 InsertBSSRelocation(relocation);
292 __ Bind(&skip_reloc);
293
294 // For historical reasons, the PC on ARM points 8 bytes (two instructions)
295 // past the current instruction.
296 __ sub(tmp, PC,
297 compiler::Operand(Instr::kPCReadOffset + compiler::target::kWordSize));
298
299 // tmp holds the address of the relocation.
300 __ ldr(dst, compiler::Address(tmp));
301
302 // dst holds the relocation itself: tmp - bss_start.
303 // tmp = tmp + (bss_start - tmp) = bss_start
304 __ add(tmp, tmp, compiler::Operand(dst));
305
306 // tmp holds the start of the BSS section.
307 // Load the "get-thread" routine: *bss_start.
308 __ ldr(dst, compiler::Address(tmp));
309}
310
311void StubCodeCompiler::GenerateLoadFfiCallbackMetadataRuntimeFunction(
312 uword function_index,
313 Register dst) {
314 // Keep in sync with FfiCallbackMetadata::EnsureFirstTrampolinePageLocked.
315 // Note: If the stub was aligned, this could be a single PC relative load.
316
317 // Load a pointer to the beginning of the stub into dst.
318 const intptr_t code_size = __ CodeSize();
319 __ SubImmediate(dst, PC, Instr::kPCReadOffset + code_size);
320
321 // Round dst down to the page size.
322 __ AndImmediate(dst, dst, FfiCallbackMetadata::kPageMask);
323
324 // Load the function from the function table.
325 __ LoadFromOffset(dst, dst,
327}
328
329void StubCodeCompiler::GenerateFfiCallbackTrampolineStub() {
330#if defined(USING_SIMULATOR) && !defined(DART_PRECOMPILER)
331 // TODO(37299): FFI is not supported in SIMARM.
332 __ Breakpoint();
333#else
334 Label body;
335
336 // TMP is volatile and not used for passing any arguments.
339 ++i) {
340 // The FfiCallbackMetadata table is keyed by the trampoline entry point. So
341 // look up the current PC, then jump to the shared section. The PC is offset
342 // by Instr::kPCReadOffset, which is subtracted below.
343 __ mov(TMP, Operand(PC));
344 __ b(&body);
345 }
346
347 ASSERT(__ CodeSize() ==
348 FfiCallbackMetadata::kNativeCallbackTrampolineSize *
350
351 __ Bind(&body);
352
353 const intptr_t shared_stub_start = __ CodeSize();
354
355 // Save THR (callee-saved), R4 & R5 (temporaries, callee-saved), and LR.
356 COMPILE_ASSERT(FfiCallbackMetadata::kNativeCallbackTrampolineStackDelta == 4);
357 SPILLS_LR_TO_FRAME(
358 __ PushList((1 << LR) | (1 << THR) | (1 << R4) | (1 << R5)));
359
360 // The PC is in TMP, but is offset by kPCReadOffset. To get the actual
361 // trampoline entry point we need to subtract that.
362 __ sub(R4, TMP, Operand(Instr::kPCReadOffset));
363
366
367 RegisterSet argument_registers;
368 argument_registers.AddAllArgumentRegisters();
369 __ PushRegisters(argument_registers);
370
371 // Load the thread, verify the callback ID and exit the safepoint.
372 //
373 // We exit the safepoint inside DLRT_GetFfiCallbackMetadata in order to save
374 // code size on this shared stub.
375 {
376 __ mov(R0, Operand(R4));
377
378 // We also need to look up the entry point for the trampoline. This is
379 // returned using a pointer passed to the second arg of the C function
380 // below. We aim that pointer at a reserved stack slot.
381 __ sub(SP, SP, Operand(compiler::target::kWordSize));
382 __ mov(R1, Operand(SP));
383
384 // We also need to know if this is a sync or async callback. This is also
385 // returned by pointer.
386 __ sub(SP, SP, Operand(compiler::target::kWordSize));
387 __ mov(R2, Operand(SP));
388
389 __ EnterFrame(1 << FP, 0);
390 __ ReserveAlignedFrameSpace(0);
391
392 GenerateLoadFfiCallbackMetadataRuntimeFunction(
394
395 __ blx(R4);
396 __ mov(THR, Operand(R0));
397
398 __ LeaveFrame(1 << FP);
399
400 // The trampoline type is at the top of the stack. Pop it into R4.
401 __ Pop(R4);
402
403 // Entry point is now at the top of the stack. Pop it into R5.
404 __ Pop(R5);
405 }
406
407 __ PopRegisters(argument_registers);
408
409 Label async_callback;
410 Label done;
411
412 // If GetFfiCallbackMetadata returned a null thread, it means that the async
413 // callback was invoked after it was deleted. In this case, do nothing.
414 __ cmp(THR, Operand(0));
415 __ b(&done, EQ);
416
417 // Check the trampoline type to see how the callback should be invoked.
418 __ cmp(
419 R4,
422
423 // Sync callback. The entry point contains the target function, so just call
424 // it. DLRT_GetThreadForNativeCallbackTrampoline exited the safepoint, so
425 // re-enter it afterwards.
426
427 // On entry to the function, there will be four extra slots on the stack:
428 // saved THR, R4, R5 and the return address. The target will know to skip
429 // them.
430 __ blx(R5);
431
432 // Clobbers R4, R5 and TMP, all saved or volatile.
433 __ EnterFullSafepoint(R4, R5);
434
435 __ b(&done);
436 __ Bind(&async_callback);
437
438 // Async callback. The entrypoint marshals the arguments into a message and
439 // sends it over the send port. DLRT_GetThreadForNativeCallbackTrampoline
440 // entered a temporary isolate, so exit it afterwards.
441
442 // On entry to the function, there will be four extra slots on the stack:
443 // saved THR, R4, R5 and the return address. The target will know to skip
444 // them.
445 __ blx(R5);
446
447 // Exit the temporary isolate.
448 {
449 __ EnterFrame(1 << FP, 0);
450 __ ReserveAlignedFrameSpace(0);
451
452 GenerateLoadFfiCallbackMetadataRuntimeFunction(
454
455 __ blx(R4);
456
457 __ LeaveFrame(1 << FP);
458 }
459
460 __ Bind(&done);
461
462 // Returns.
463 __ PopList((1 << PC) | (1 << THR) | (1 << R4) | (1 << R5));
464
465 ASSERT_LESS_OR_EQUAL(__ CodeSize() - shared_stub_start,
466 FfiCallbackMetadata::kNativeCallbackSharedStubSize);
468
469#if defined(DEBUG)
470 while (__ CodeSize() < FfiCallbackMetadata::kPageSize) {
471 __ Breakpoint();
472 }
473#endif
474#endif
475}
476
477void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
478 __ EnterStubFrame();
481 __ CallRuntime(kDispatchTableNullErrorRuntimeEntry, /*argument_count=*/1);
482 // The NullError runtime entry does not return.
483 __ Breakpoint();
484}
485
486void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
487 auto perform_runtime_call = [&]() {
489 __ PushRegistersInOrder(
491 __ CallRuntime(kRangeErrorRuntimeEntry, /*argument_count=*/2);
492 __ Breakpoint();
493 };
494
495 GenerateSharedStubGeneric(
496 /*save_fpu_registers=*/with_fpu_regs,
497 with_fpu_regs
498 ? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
499 : target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
500 /*allow_return=*/false, perform_runtime_call);
501}
502
503void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
504 auto perform_runtime_call = [&]() {
505 __ CallRuntime(kWriteErrorRuntimeEntry, /*argument_count=*/2);
506 __ Breakpoint();
507 };
508
509 GenerateSharedStubGeneric(
510 /*save_fpu_registers=*/with_fpu_regs,
511 with_fpu_regs
512 ? target::Thread::write_error_shared_with_fpu_regs_stub_offset()
513 : target::Thread::write_error_shared_without_fpu_regs_stub_offset(),
514 /*allow_return=*/false, perform_runtime_call);
515}
516
517// Input parameters:
518// LR : return address.
519// SP : address of return value.
520// R9 : address of the native function to call.
521// R2 : address of first argument in argument array.
522// R1 : argc_tag including number of arguments and function kind.
523static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
524 Address wrapper) {
525 const intptr_t thread_offset = target::NativeArguments::thread_offset();
526 const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
527 const intptr_t argv_offset = target::NativeArguments::argv_offset();
528 const intptr_t retval_offset = target::NativeArguments::retval_offset();
529
530 __ EnterStubFrame();
531
532 // Save exit frame information to enable stack walking as we are about
533 // to transition to native code.
534 __ StoreToOffset(FP, THR, target::Thread::top_exit_frame_info_offset());
535
536 // Mark that the thread exited generated code through a runtime call.
537 __ LoadImmediate(R8, target::Thread::exit_through_runtime_call());
538 __ StoreToOffset(R8, THR, target::Thread::exit_through_ffi_offset());
539
540#if defined(DEBUG)
541 {
542 Label ok;
543 // Check that we are always entering from Dart code.
544 __ LoadFromOffset(R8, THR, target::Thread::vm_tag_offset());
545 __ CompareImmediate(R8, VMTag::kDartTagId);
546 __ b(&ok, EQ);
547 __ Stop("Not coming from Dart code.");
548 __ Bind(&ok);
549 }
550#endif
551
552 // Mark that the thread is executing native code.
553 __ StoreToOffset(R9, THR, target::Thread::vm_tag_offset());
554
555 // Reserve space for the native arguments structure passed on the stack (the
556 // outgoing pointer parameter to the native arguments structure is passed in
557 // R0) and align frame before entering the C++ world.
558 __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize());
559
560 // Initialize target::NativeArguments structure and call native function.
561 // Registers R0, R1, R2, and R3 are used.
562
563 ASSERT(thread_offset == 0 * target::kWordSize);
564 // Set thread in NativeArgs.
565 __ mov(R0, Operand(THR));
566
567 ASSERT(argc_tag_offset == 1 * target::kWordSize);
568 // Set argc in target::NativeArguments: R1 already contains argc.
569
570 ASSERT(argv_offset == 2 * target::kWordSize);
571 // Set argv in target::NativeArguments: R2 already contains argv.
572
573 // Set retval in NativeArgs.
574 ASSERT(retval_offset == 3 * target::kWordSize);
575 __ add(R3, FP,
576 Operand((target::frame_layout.param_end_from_fp + 1) *
577 target::kWordSize));
578
579 // Passing the structure by value as in runtime calls would require changing
580 // Dart API for native functions.
581 // For now, space is reserved on the stack and we pass a pointer to it.
582 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3));
583 __ mov(R0, Operand(SP)); // Pass the pointer to the target::NativeArguments.
584
585 __ mov(R1, Operand(R9)); // Pass the function entrypoint to call.
586
587 // Call native function invocation wrapper or redirection via simulator.
588 __ Call(wrapper);
589
590 // Mark that the thread is executing Dart code.
591 __ LoadImmediate(R2, VMTag::kDartTagId);
592 __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset());
593
594 // Mark that the thread has not exited generated Dart code.
595 __ LoadImmediate(R2, 0);
596 __ StoreToOffset(R2, THR, target::Thread::exit_through_ffi_offset());
597
598 // Reset exit frame information in Isolate's mutator thread structure.
599 __ StoreToOffset(R2, THR, target::Thread::top_exit_frame_info_offset());
600
601 // Restore the global object pool after returning from runtime (old space is
602 // moving, so the GOP could have been relocated).
603 if (FLAG_precompiled_mode) {
604 __ SetupGlobalPoolAndDispatchTable();
605 }
606
607 __ LeaveStubFrame();
608 __ Ret();
609}
610
611void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
612 GenerateCallNativeWithWrapperStub(
613 assembler,
614 Address(THR,
615 target::Thread::no_scope_native_wrapper_entry_point_offset()));
616}
617
618void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
619 GenerateCallNativeWithWrapperStub(
620 assembler,
621 Address(THR,
622 target::Thread::auto_scope_native_wrapper_entry_point_offset()));
623}
624
625// Input parameters:
626// LR : return address.
627// SP : address of return value.
628// R9 : address of the native function to call.
629// R2 : address of first argument in argument array.
630// R1 : argc_tag including number of arguments and function kind.
631void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
632 GenerateCallNativeWithWrapperStub(
633 assembler,
634 Address(THR,
635 target::Thread::bootstrap_native_wrapper_entry_point_offset()));
636}
637
638// Input parameters:
639// ARGS_DESC_REG: arguments descriptor array.
640void StubCodeCompiler::GenerateCallStaticFunctionStub() {
641 // Create a stub frame as we are pushing some objects on the stack before
642 // calling into the runtime.
643 __ EnterStubFrame();
644 // Setup space on stack for return value and preserve arguments descriptor.
645 __ LoadImmediate(R0, 0);
646 __ PushList((1 << R0) | (1 << ARGS_DESC_REG));
647 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
648 // Get Code object result and restore arguments descriptor array.
649 __ PopList((1 << R0) | (1 << ARGS_DESC_REG));
650 // Remove the stub frame.
651 __ LeaveStubFrame();
652 // Jump to the dart function.
653 __ mov(CODE_REG, Operand(R0));
654 __ Branch(FieldAddress(R0, target::Code::entry_point_offset()));
655}
656
657// Called from a static call only when an invalid code has been entered
658// (invalid because its function was optimized or deoptimized).
659// ARGS_DESC_REG: arguments descriptor array.
660void StubCodeCompiler::GenerateFixCallersTargetStub() {
661 Label monomorphic;
662 __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
663
664 // Load code pointer to this stub from the thread:
665 // The one that is passed in, is not correct - it points to the code object
666 // that needs to be replaced.
667 __ ldr(CODE_REG,
668 Address(THR, target::Thread::fix_callers_target_code_offset()));
669 // Create a stub frame as we are pushing some objects on the stack before
670 // calling into the runtime.
671 __ EnterStubFrame();
672 // Setup space on stack for return value and preserve arguments descriptor.
673 __ LoadImmediate(R0, 0);
674 __ PushList((1 << R0) | (1 << ARGS_DESC_REG));
675 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
676 // Get Code object result and restore arguments descriptor array.
677 __ PopList((1 << R0) | (1 << ARGS_DESC_REG));
678 // Remove the stub frame.
679 __ LeaveStubFrame();
680 // Jump to the dart function.
681 __ mov(CODE_REG, Operand(R0));
682 __ Branch(FieldAddress(R0, target::Code::entry_point_offset()));
683
684 __ Bind(&monomorphic);
685 // Load code pointer to this stub from the thread:
686 // The one that is passed in, is not correct - it points to the code object
687 // that needs to be replaced.
688 __ ldr(CODE_REG,
689 Address(THR, target::Thread::fix_callers_target_code_offset()));
690 // Create a stub frame as we are pushing some objects on the stack before
691 // calling into the runtime.
692 __ EnterStubFrame();
693 __ LoadImmediate(R1, 0);
694 __ Push(R1); // Result slot.
695 __ Push(R0); // Preserve receiver.
696 __ Push(R9); // Old cache value (also 2nd return value).
697 __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 2);
698 __ Pop(R9); // Get target cache object.
699 __ Pop(R0); // Restore receiver.
700 __ Pop(CODE_REG); // Get target Code object.
701 // Remove the stub frame.
702 __ LeaveStubFrame();
703 // Jump to the dart function.
704 __ Branch(FieldAddress(
705 CODE_REG, target::Code::entry_point_offset(CodeEntryKind::kMonomorphic)));
706}
707
708// Called from object allocate instruction when the allocation stub has been
709// disabled.
710void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
711 // Load code pointer to this stub from the thread:
712 // The one that is passed in, is not correct - it points to the code object
713 // that needs to be replaced.
714 __ ldr(CODE_REG,
715 Address(THR, target::Thread::fix_allocation_stub_code_offset()));
716 __ EnterStubFrame();
717 // Setup space on stack for return value.
718 __ LoadImmediate(R0, 0);
719 __ Push(R0);
720 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
721 // Get Code object result.
722 __ Pop(R0);
723 // Remove the stub frame.
724 __ LeaveStubFrame();
725 // Jump to the dart function.
726 __ mov(CODE_REG, Operand(R0));
727 __ Branch(FieldAddress(R0, target::Code::entry_point_offset()));
728}
729
730// Called from object allocate instruction when the allocation stub for a
731// generic class has been disabled.
732void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
733 // Load code pointer to this stub from the thread:
734 // The one that is passed in, is not correct - it points to the code object
735 // that needs to be replaced.
736 __ ldr(CODE_REG,
737 Address(THR, target::Thread::fix_allocation_stub_code_offset()));
738 __ EnterStubFrame();
739 // Preserve type arguments register.
741 // Setup space on stack for return value.
742 __ LoadImmediate(R0, 0);
743 __ Push(R0);
744 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
745 // Get Code object result.
746 __ Pop(R0);
747 // Restore type arguments register.
749 // Remove the stub frame.
750 __ LeaveStubFrame();
751 // Jump to the dart function.
752 __ mov(CODE_REG, Operand(R0));
753 __ Branch(FieldAddress(R0, target::Code::entry_point_offset()));
754}
755
756// Input parameters:
757// R2: smi-tagged argument count, may be zero.
758// FP[target::frame_layout.param_end_from_fp + 1]: last argument.
759static void PushArrayOfArguments(Assembler* assembler) {
760 // Allocate array to store arguments of caller.
761 __ LoadObject(R1, NullObject());
762 // R1: null element type for raw Array.
763 // R2: smi-tagged argument count, may be zero.
764 __ BranchLink(StubCodeAllocateArray());
765 // R0: newly allocated array.
766 // R2: smi-tagged argument count, may be zero (was preserved by the stub).
767 __ Push(R0); // Array is in R0 and on top of stack.
768 __ AddImmediate(R1, FP,
769 target::frame_layout.param_end_from_fp * target::kWordSize);
770 __ AddImmediate(R3, R0, target::Array::data_offset() - kHeapObjectTag);
771 // Copy arguments from stack to array (starting at the end).
772 // R1: address just beyond last argument on stack.
773 // R3: address of first argument in array.
774 Label enter;
775 __ b(&enter);
776 Label loop;
777 __ Bind(&loop);
778 __ ldr(R8, Address(R1, target::kWordSize, Address::PreIndex));
779 // Generational barrier is needed, array is not necessarily in new space.
780 __ StoreIntoObject(R0, Address(R3, R2, LSL, 1), R8);
781 __ Bind(&enter);
782 __ subs(R2, R2, Operand(target::ToRawSmi(1))); // R2 is Smi.
783 __ b(&loop, PL);
784}
785
786// Used by eager and lazy deoptimization. Preserve result in R0 if necessary.
787// This stub translates optimized frame into unoptimized frame. The optimized
788// frame can contain values in registers and on stack, the unoptimized
789// frame contains all values on stack.
790// Deoptimization occurs in following steps:
791// - Push all registers that can contain values.
792// - Call C routine to copy the stack and saved registers into temporary buffer.
793// - Adjust caller's frame to correct unoptimized frame size.
794// - Fill the unoptimized frame.
795// - Materialize objects that require allocation (e.g. Double instances).
796// GC can occur only after frame is fully rewritten.
797// Stack after EnterFrame(...) below:
798// +------------------+
799// | Saved PP | <- TOS
800// +------------------+
801// | Saved FP | <- FP of stub
802// +------------------+
803// | Saved LR | (deoptimization point)
804// +------------------+
805// | pc marker |
806// +------------------+
807// | Saved CODE_REG |
808// +------------------+
809// | ... | <- SP of optimized frame
810//
811// Parts of the code cannot GC, part of the code can GC.
812static void GenerateDeoptimizationSequence(Assembler* assembler,
813 DeoptStubKind kind) {
814 // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
815 // is no need to set the correct PC marker or load PP, since they get patched.
816 __ EnterDartFrame(0);
817 __ LoadPoolPointer();
818
819 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
820 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
821 const intptr_t saved_result_slot_from_fp =
822 target::frame_layout.first_local_from_fp + 1 -
824 const intptr_t saved_exception_slot_from_fp =
825 target::frame_layout.first_local_from_fp + 1 -
827 const intptr_t saved_stacktrace_slot_from_fp =
828 target::frame_layout.first_local_from_fp + 1 -
830 // Result in R0 is preserved as part of pushing all registers below.
831
832 // Push registers in their enumeration order: lowest register number at
833 // lowest address.
834 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
835 if (i == CODE_REG) {
836 // Save the original value of CODE_REG pushed before invoking this stub
837 // instead of the value used to call this stub.
838 __ ldr(IP, Address(FP, 2 * target::kWordSize));
839 __ Push(IP);
840 } else if (i == SP) {
841 // Push(SP) has unpredictable behavior.
842 __ mov(IP, Operand(SP));
843 __ Push(IP);
844 } else {
845 __ Push(static_cast<Register>(i));
846 }
847 }
848
849 ASSERT(kFpuRegisterSize == 4 * target::kWordSize);
850 if (kNumberOfDRegisters > 16) {
851 __ vstmd(DB_W, SP, D16, kNumberOfDRegisters - 16);
852 __ vstmd(DB_W, SP, D0, 16);
853 } else {
854 __ vstmd(DB_W, SP, D0, kNumberOfDRegisters);
855 }
856
857 {
858 __ mov(R0, Operand(SP)); // Pass address of saved registers block.
859 LeafRuntimeScope rt(assembler,
860 /*frame_size=*/0,
861 /*preserve_registers=*/false);
862 bool is_lazy =
863 (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
864 __ mov(R1, Operand(is_lazy ? 1 : 0));
865 rt.Call(kDeoptimizeCopyFrameRuntimeEntry, 2);
866 // Result (R0) is stack-size (FP - SP) in bytes.
867 }
868
869 if (kind == kLazyDeoptFromReturn) {
870 // Restore result into R1 temporarily.
871 __ ldr(R1, Address(FP, saved_result_slot_from_fp * target::kWordSize));
872 } else if (kind == kLazyDeoptFromThrow) {
873 // Restore result into R1 temporarily.
874 __ ldr(R1, Address(FP, saved_exception_slot_from_fp * target::kWordSize));
875 __ ldr(R2, Address(FP, saved_stacktrace_slot_from_fp * target::kWordSize));
876 }
877
878 __ RestoreCodePointer();
879 __ LeaveDartFrame();
880 __ sub(SP, FP, Operand(R0));
881
882 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
883 // is no need to set the correct PC marker or load PP, since they get patched.
884 __ EnterStubFrame();
885 if (kind == kLazyDeoptFromReturn) {
886 __ Push(R1); // Preserve result as first local.
887 } else if (kind == kLazyDeoptFromThrow) {
888 __ Push(R1); // Preserve exception as first local.
889 __ Push(R2); // Preserve stacktrace as second local.
890 }
891 {
892 __ mov(R0, Operand(FP)); // Get last FP address.
893 LeafRuntimeScope rt(assembler,
894 /*frame_size=*/0,
895 /*preserve_registers=*/false);
896 rt.Call(kDeoptimizeFillFrameRuntimeEntry, 1);
897 }
898 if (kind == kLazyDeoptFromReturn) {
899 // Restore result into R1.
900 __ ldr(R1, Address(FP, target::frame_layout.first_local_from_fp *
901 target::kWordSize));
902 } else if (kind == kLazyDeoptFromThrow) {
903 // Restore result into R1.
904 __ ldr(R1, Address(FP, target::frame_layout.first_local_from_fp *
905 target::kWordSize));
906 __ ldr(R2, Address(FP, (target::frame_layout.first_local_from_fp - 1) *
907 target::kWordSize));
908 }
909 // Code above cannot cause GC.
910 __ RestoreCodePointer();
911 __ LeaveStubFrame();
912
913 // Frame is fully rewritten at this point and it is safe to perform a GC.
914 // Materialize any objects that were deferred by FillFrame because they
915 // require allocation.
916 // Enter stub frame with loading PP. The caller's PP is not materialized yet.
917 __ EnterStubFrame();
918 if (kind == kLazyDeoptFromReturn) {
919 __ Push(R1); // Preserve result, it will be GC-d here.
920 } else if (kind == kLazyDeoptFromThrow) {
921 // Preserve CODE_REG for one more runtime call.
922 __ Push(CODE_REG);
923 __ Push(R1); // Preserve exception, it will be GC-d here.
924 __ Push(R2); // Preserve stacktrace, it will be GC-d here.
925 }
926 __ PushObject(NullObject()); // Space for the result.
927 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
928 // Result tells stub how many bytes to remove from the expression stack
929 // of the bottom-most frame. They were used as materialization arguments.
930 __ Pop(R2);
931 if (kind == kLazyDeoptFromReturn) {
932 __ Pop(R0); // Restore result.
933 } else if (kind == kLazyDeoptFromThrow) {
934 __ Pop(R1); // Restore stacktrace.
935 __ Pop(R0); // Restore exception.
936 __ Pop(CODE_REG);
937 }
938 __ LeaveStubFrame();
939 // Remove materialization arguments.
940 __ add(SP, SP, Operand(R2, ASR, kSmiTagSize));
941 // The caller is responsible for emitting the return instruction.
942
943 if (kind == kLazyDeoptFromThrow) {
944 // Unoptimized frame is now ready to accept the exception. Rethrow it to
945 // find the right handler. Ask rethrow machinery to bypass debugger it
946 // was already notified about this exception.
947 __ EnterStubFrame();
948 __ PushImmediate(
949 target::ToRawSmi(0)); // Space for the return value (unused).
950 __ Push(R0); // Exception
951 __ Push(R1); // Stacktrace
952 __ PushImmediate(target::ToRawSmi(1)); // Bypass debugger
953 __ CallRuntime(kReThrowRuntimeEntry, 3);
954 __ LeaveStubFrame();
955 }
956}
957
958// R0: result, must be preserved
959void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
960 // Push zap value instead of CODE_REG for lazy deopt.
961 __ LoadImmediate(IP, kZapCodeReg);
962 __ Push(IP);
963 // Return address for "call" to deopt stub.
964 WRITES_RETURN_ADDRESS_TO_LR(__ LoadImmediate(LR, kZapReturnAddress));
965 __ ldr(CODE_REG,
966 Address(THR, target::Thread::lazy_deopt_from_return_stub_offset()));
967 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
968 __ Ret();
969}
970
971// R0: exception, must be preserved
972// R1: stacktrace, must be preserved
973void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
974 // Push zap value instead of CODE_REG for lazy deopt.
975 __ LoadImmediate(IP, kZapCodeReg);
976 __ Push(IP);
977 // Return address for "call" to deopt stub.
978 WRITES_RETURN_ADDRESS_TO_LR(__ LoadImmediate(LR, kZapReturnAddress));
979 __ ldr(CODE_REG,
980 Address(THR, target::Thread::lazy_deopt_from_throw_stub_offset()));
981 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
982 __ Ret();
983}
984
985void StubCodeCompiler::GenerateDeoptimizeStub() {
986 __ Push(CODE_REG);
987 __ ldr(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset()));
988 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
989 __ Ret();
990}
991
992// IC_DATA_REG: ICData/MegamorphicCache
993static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler) {
994 __ EnterStubFrame();
995
996 __ ldr(ARGS_DESC_REG,
997 FieldAddress(IC_DATA_REG,
998 target::CallSiteData::arguments_descriptor_offset()));
999
1000 // Load the receiver.
1001 __ ldr(R2, FieldAddress(ARGS_DESC_REG,
1002 target::ArgumentsDescriptor::size_offset()));
1003 __ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi.
1004 __ ldr(R8, Address(IP, target::frame_layout.param_end_from_fp *
1005 target::kWordSize));
1006 __ LoadImmediate(IP, 0);
1007 __ Push(IP); // Result slot.
1008 __ Push(R8); // Receiver.
1009 __ Push(IC_DATA_REG); // ICData/MegamorphicCache.
1010 __ Push(ARGS_DESC_REG); // Arguments descriptor.
1011
1012 // Adjust arguments count.
1013 __ ldr(R3, FieldAddress(ARGS_DESC_REG,
1014 target::ArgumentsDescriptor::type_args_len_offset()));
1015 __ cmp(R3, Operand(0));
1016 __ AddImmediate(R2, R2, target::ToRawSmi(1),
1017 NE); // Include the type arguments.
1018
1019 // R2: Smi-tagged arguments array length.
1020 PushArrayOfArguments(assembler);
1021 const intptr_t kNumArgs = 4;
1022 __ CallRuntime(kNoSuchMethodFromCallStubRuntimeEntry, kNumArgs);
1023 __ Drop(4);
1024 __ Pop(R0); // Return value.
1025 __ LeaveStubFrame();
1026 __ Ret();
1027}
1028
1029static void GenerateDispatcherCode(Assembler* assembler,
1030 Label* call_target_function) {
1031 __ Comment("NoSuchMethodDispatch");
1032 // When lazily generated invocation dispatchers are disabled, the
1033 // miss-handler may return null.
1034 __ CompareObject(R0, NullObject());
1035 __ b(call_target_function, NE);
1036
1037 GenerateNoSuchMethodDispatcherBody(assembler);
1038}
1039
1040// Input:
1041// ARGS_DESC_REG - arguments descriptor
1042// IC_DATA_REG - icdata/megamorphic_cache
1043void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
1044 GenerateNoSuchMethodDispatcherBody(assembler);
1045}
1046
1047// Called for inline allocation of arrays.
1048// Input registers (preserved):
1049// LR: return address.
1050// AllocateArrayABI::kLengthReg: array length as Smi.
1051// AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
1052// Output registers:
1053// AllocateArrayABI::kResultReg: newly allocated array.
1054// Clobbered:
1055// R3, R4, R8, R9
1056void StubCodeCompiler::GenerateAllocateArrayStub() {
1057 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1058 Label slow_case;
1059 // Compute the size to be allocated, it is based on the array length
1060 // and is computed as:
1061 // RoundedAllocationSize(
1062 // (array_length * kwordSize) + target::Array::header_size()).
1063 __ mov(R3, Operand(AllocateArrayABI::kLengthReg)); // Array length.
1064 // Check that length is a Smi.
1065 __ tst(R3, Operand(kSmiTagMask));
1066 __ b(&slow_case, NE);
1067
1068 // Check length >= 0 && length <= kMaxNewSpaceElements
1069 const intptr_t max_len =
1070 target::ToRawSmi(target::Array::kMaxNewSpaceElements);
1071 __ CompareImmediate(R3, max_len);
1072 __ b(&slow_case, HI);
1073
1074 const intptr_t cid = kArrayCid;
1075 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &slow_case, R4));
1076
1077 const intptr_t fixed_size_plus_alignment_padding =
1078 target::Array::header_size() +
1080 __ LoadImmediate(R9, fixed_size_plus_alignment_padding);
1081 __ add(R9, R9, Operand(R3, LSL, 1)); // R3 is a Smi.
1082 ASSERT(kSmiTagShift == 1);
1084
1085 // R9: Allocation size.
1086 // Potential new object start.
1088 Address(THR, target::Thread::top_offset()));
1090 Operand(R9)); // Potential next object start.
1091 __ b(&slow_case, CS); // Branch if unsigned overflow.
1092
1093 // Check if the allocation fits into the remaining space.
1094 // AllocateArrayABI::kResultReg: potential new object start.
1095 // R3: potential next object start.
1096 // R9: allocation size.
1097 __ ldr(TMP, Address(THR, target::Thread::end_offset()));
1098 __ cmp(R3, Operand(TMP));
1099 __ b(&slow_case, CS);
1100 __ CheckAllocationCanary(AllocateArrayABI::kResultReg);
1101
1102 // Successfully allocated the object(s), now update top to point to
1103 // next object start and initialize the object.
1104 __ str(R3, Address(THR, target::Thread::top_offset()));
1106 Operand(kHeapObjectTag));
1107
1108 // Initialize the tags.
1109 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
1110 // R3: new object end address.
1111 // R9: allocation size.
1112 {
1113 const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
1115
1116 __ CompareImmediate(R9, target::UntaggedObject::kSizeTagMaxSizeTag);
1117 __ mov(R8, Operand(R9, LSL, shift), LS);
1118 __ mov(R8, Operand(0), HI);
1119
1120 // Get the class index and insert it into the tags.
1121 // R8: size and bit tags.
1122 const uword tags =
1123 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
1124 __ LoadImmediate(TMP, tags);
1125 __ orr(R8, R8, Operand(TMP));
1126 __ str(R8, FieldAddress(AllocateArrayABI::kResultReg,
1127 target::Array::tags_offset())); // Store tags.
1128 }
1129
1130 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
1131 // R3: new object end address.
1132 // Store the type argument field.
1133 __ StoreIntoObjectNoBarrier(
1135 FieldAddress(AllocateArrayABI::kResultReg,
1136 target::Array::type_arguments_offset()),
1138
1139 // Set the length field.
1140 __ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
1141 FieldAddress(AllocateArrayABI::kResultReg,
1142 target::Array::length_offset()),
1144
1145 // Initialize all array elements to raw_null.
1146 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
1147 // R8, R9: null
1148 // R4: iterator which initially points to the start of the variable
1149 // data area to be initialized.
1150 // R3: new object end address.
1151 // R9: allocation size.
1152
1153 __ LoadObject(R8, NullObject());
1154 __ mov(R9, Operand(R8));
1155 __ AddImmediate(R4, AllocateArrayABI::kResultReg,
1156 target::Array::header_size() - kHeapObjectTag);
1157 __ InitializeFieldsNoBarrier(AllocateArrayABI::kResultReg, R4, R3, R8, R9);
1158 __ Ret();
1159 // Unable to allocate the array using the fast inline code, just call
1160 // into the runtime.
1161 __ Bind(&slow_case);
1162 }
1163
1164 // Create a stub frame as we are pushing some objects on the stack before
1165 // calling into the runtime.
1166 __ EnterStubFrame();
1167 __ LoadImmediate(TMP, 0);
1168 // Setup space on stack for return value.
1169 // Push array length as Smi and element type.
1170 __ PushList((1 << AllocateArrayABI::kTypeArgumentsReg) |
1171 (1 << AllocateArrayABI::kLengthReg) | (1 << IP));
1172 __ CallRuntime(kAllocateArrayRuntimeEntry, 2);
1173
1174 // Write-barrier elimination might be enabled for this array (depending on the
1175 // array length). To be sure we will check if the allocated object is in old
1176 // space and if so call a leaf runtime to add it to the remembered set.
1177 __ ldr(AllocateArrayABI::kResultReg, Address(SP, 2 * target::kWordSize));
1179
1180 // Pop arguments; result is popped in IP.
1182 (1 << AllocateArrayABI::kLengthReg) | (1 << IP));
1183 __ mov(AllocateArrayABI::kResultReg, Operand(IP));
1184 __ LeaveStubFrame();
1185 __ Ret();
1186}
1187
1188// Called for allocation of Mint.
1189void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
1190 // For test purpose call allocation stub without inline allocation attempt.
1191 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1192 Label slow_case;
1193 __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
1195 __ Ret();
1196
1197 __ Bind(&slow_case);
1198 }
1201 GenerateSharedStub(/*save_fpu_registers=*/true, &kAllocateMintRuntimeEntry,
1202 target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
1203 /*allow_return=*/true,
1204 /*store_runtime_result_in_result_register=*/true);
1205}
1206
1207// Called for allocation of Mint.
1208void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
1209 // For test purpose call allocation stub without inline allocation attempt.
1210 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1211 Label slow_case;
1212 __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
1214 __ Ret();
1215
1216 __ Bind(&slow_case);
1217 }
1220 GenerateSharedStub(
1221 /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
1222 target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
1223 /*allow_return=*/true,
1224 /*store_runtime_result_in_result_register=*/true);
1225}
1226
1227// Called when invoking Dart code from C++ (VM code).
1228// Input parameters:
1229// LR : points to return address.
1230// R0 : target code or entry point (in bare instructions mode).
1231// R1 : arguments descriptor array.
1232// R2 : arguments array.
1233// R3 : current thread.
1234void StubCodeCompiler::GenerateInvokeDartCodeStub() {
1235 SPILLS_LR_TO_FRAME(__ EnterFrame((1 << FP) | (1 << LR), 0));
1236
1237 // Push code object to PC marker slot.
1238 __ ldr(IP, Address(R3, target::Thread::invoke_dart_code_stub_offset()));
1239 __ Push(IP);
1240
1241 __ PushNativeCalleeSavedRegisters();
1242
1243 // Set up THR, which caches the current thread in Dart code.
1244 if (THR != R3) {
1245 __ mov(THR, Operand(R3));
1246 }
1247
1248#if defined(USING_SHADOW_CALL_STACK)
1249#error Unimplemented
1250#endif
1251
1252 // Save the current VMTag on the stack.
1253 __ LoadFromOffset(R9, THR, target::Thread::vm_tag_offset());
1254 __ Push(R9);
1255
1256 // Save top resource and top exit frame info. Use R4-6 as temporary registers.
1257 // StackFrameIterator reads the top exit frame info saved in this frame.
1258 __ LoadFromOffset(R4, THR, target::Thread::top_resource_offset());
1259 __ Push(R4);
1260 __ LoadImmediate(R8, 0);
1261 __ StoreToOffset(R8, THR, target::Thread::top_resource_offset());
1262
1263 __ LoadFromOffset(R8, THR, target::Thread::exit_through_ffi_offset());
1264 __ Push(R8);
1265 __ LoadImmediate(R8, 0);
1266 __ StoreToOffset(R8, THR, target::Thread::exit_through_ffi_offset());
1267
1268 __ LoadFromOffset(R9, THR, target::Thread::top_exit_frame_info_offset());
1269 __ StoreToOffset(R8, THR, target::Thread::top_exit_frame_info_offset());
1270
1271 // target::frame_layout.exit_link_slot_from_entry_fp must be kept in sync
1272 // with the code below.
1273#if defined(DART_TARGET_OS_MACOS) || defined(DART_TARGET_OS_MACOS_IOS)
1274 ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -27);
1275#else
1276 ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -28);
1277#endif
1278 __ Push(R9);
1279
1280 __ EmitEntryFrameVerification(R9);
1281
1282 // Mark that the thread is executing Dart code. Do this after initializing the
1283 // exit link for the profiler.
1284 __ LoadImmediate(R9, VMTag::kDartTagId);
1285 __ StoreToOffset(R9, THR, target::Thread::vm_tag_offset());
1286
1287 // Load arguments descriptor array into R4, which is passed to Dart code.
1288 __ mov(R4, Operand(R1));
1289
1290 // Load number of arguments into R9 and adjust count for type arguments.
1291 __ ldr(R3,
1292 FieldAddress(R4, target::ArgumentsDescriptor::type_args_len_offset()));
1293 __ ldr(R9, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
1294 __ cmp(R3, Operand(0));
1295 __ AddImmediate(R9, R9, target::ToRawSmi(1),
1296 NE); // Include the type arguments.
1297 __ SmiUntag(R9);
1298
1299 // Compute address of 'arguments array' data area into R2.
1300 __ AddImmediate(R2, R2, target::Array::data_offset() - kHeapObjectTag);
1301
1302 // Set up arguments for the Dart call.
1303 Label push_arguments;
1304 Label done_push_arguments;
1305 __ CompareImmediate(R9, 0); // check if there are arguments.
1306 __ b(&done_push_arguments, EQ);
1307 __ LoadImmediate(R1, 0);
1308 __ Bind(&push_arguments);
1309 __ ldr(R3, Address(R2));
1310 __ Push(R3);
1311 __ AddImmediate(R2, target::kWordSize);
1312 __ AddImmediate(R1, 1);
1313 __ cmp(R1, Operand(R9));
1314 __ b(&push_arguments, LT);
1315 __ Bind(&done_push_arguments);
1316
1317 // Call the Dart code entrypoint.
1318 if (FLAG_precompiled_mode) {
1319 __ SetupGlobalPoolAndDispatchTable();
1320 __ LoadImmediate(CODE_REG, 0); // GC safe value into CODE_REG.
1321 } else {
1322 __ LoadImmediate(PP, 0); // GC safe value into PP.
1323 __ mov(CODE_REG, Operand(R0));
1324 __ ldr(R0, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
1325 }
1326 __ blx(R0); // R4 is the arguments descriptor array.
1327
1328 // Get rid of arguments pushed on the stack.
1329 __ AddImmediate(
1330 SP, FP,
1331 target::frame_layout.exit_link_slot_from_entry_fp * target::kWordSize);
1332
1333 // Restore the saved top exit frame info and top resource back into the
1334 // Isolate structure. Uses R9 as a temporary register for this.
1335 __ Pop(R9);
1336 __ StoreToOffset(R9, THR, target::Thread::top_exit_frame_info_offset());
1337 __ Pop(R9);
1338 __ StoreToOffset(R9, THR, target::Thread::exit_through_ffi_offset());
1339 __ Pop(R9);
1340 __ StoreToOffset(R9, THR, target::Thread::top_resource_offset());
1341
1342 // Restore the current VMTag from the stack.
1343 __ Pop(R4);
1344 __ StoreToOffset(R4, THR, target::Thread::vm_tag_offset());
1345
1346#if defined(USING_SHADOW_CALL_STACK)
1347#error Unimplemented
1348#endif
1349
1350 __ PopNativeCalleeSavedRegisters();
1351
1352 __ set_constant_pool_allowed(false);
1353
1354 // Restore the frame pointer and return.
1355 RESTORES_LR_FROM_FRAME(__ LeaveFrame((1 << FP) | (1 << LR)));
1356 __ Ret();
1357}
1358
1359// Helper to generate space allocation of context stub.
1360// This does not initialise the fields of the context.
1361// Input:
1362// R1: number of context variables.
1363// Output:
1364// R0: new allocated Context object.
1365// Clobbered:
1366// R2, R3, R8, R9
1367static void GenerateAllocateContext(Assembler* assembler, Label* slow_case) {
1368 // First compute the rounded instance size.
1369 // R1: number of context variables.
1370 const intptr_t fixed_size_plus_alignment_padding =
1371 target::Context::header_size() +
1373 __ LoadImmediate(R2, fixed_size_plus_alignment_padding);
1374 __ add(R2, R2, Operand(R1, LSL, 2));
1375 ASSERT(kSmiTagShift == 1);
1377
1378 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, slow_case, R8));
1379 // Now allocate the object.
1380 // R1: number of context variables.
1381 // R2: object size.
1382 __ ldr(R0, Address(THR, target::Thread::top_offset()));
1383 __ add(R3, R2, Operand(R0));
1384 // Check if the allocation fits into the remaining space.
1385 // R0: potential new object.
1386 // R1: number of context variables.
1387 // R2: object size.
1388 // R3: potential next object start.
1389 __ ldr(IP, Address(THR, target::Thread::end_offset()));
1390 __ cmp(R3, Operand(IP));
1391 __ b(slow_case, CS); // Branch if unsigned higher or equal.
1392 __ CheckAllocationCanary(R0);
1393
1394 // Successfully allocated the object, now update top to point to
1395 // next object start and initialize the object.
1396 // R0: new object start (untagged).
1397 // R1: number of context variables.
1398 // R2: object size.
1399 // R3: next object start.
1400 __ str(R3, Address(THR, target::Thread::top_offset()));
1401 __ add(R0, R0, Operand(kHeapObjectTag));
1402
1403 // Calculate the size tag.
1404 // R0: new object (tagged).
1405 // R1: number of context variables.
1406 // R2: object size.
1407 // R3: next object start.
1408 const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
1410 __ CompareImmediate(R2, target::UntaggedObject::kSizeTagMaxSizeTag);
1411 // If no size tag overflow, shift R2 left, else set R2 to zero.
1412 __ mov(R9, Operand(R2, LSL, shift), LS);
1413 __ mov(R9, Operand(0), HI);
1414
1415 // Get the class index and insert it into the tags.
1416 // R9: size and bit tags.
1417 const uword tags =
1418 target::MakeTagWordForNewSpaceObject(kContextCid, /*instance_size=*/0);
1419
1420 __ LoadImmediate(IP, tags);
1421 __ orr(R9, R9, Operand(IP));
1422 __ str(R9, FieldAddress(R0, target::Object::tags_offset()));
1423
1424 // Setup up number of context variables field.
1425 // R0: new object.
1426 // R1: number of context variables as integer value (not object).
1427 // R2: object size.
1428 // R3: next object start.
1429 __ str(R1, FieldAddress(R0, target::Context::num_variables_offset()));
1430}
1431
1432// Called for inline allocation of contexts.
1433// Input:
1434// R1: number of context variables.
1435// Output:
1436// R0: new allocated Context object.
1437// Clobbered:
1438// Potentially any since is can go to runtime.
1439void StubCodeCompiler::GenerateAllocateContextStub() {
1440 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1441 Label slow_case;
1442
1443 GenerateAllocateContext(assembler, &slow_case);
1444
1445 // Setup the parent field.
1446 // R0: new object.
1447 // R2: object size.
1448 // R3: next object start.
1449 __ LoadObject(R8, NullObject());
1450 __ MoveRegister(R9, R8); // Needed for InitializeFieldsNoBarrier.
1451 __ StoreIntoObjectNoBarrier(
1452 R0, FieldAddress(R0, target::Context::parent_offset()), R8);
1453
1454 // Initialize the context variables.
1455 // R0: new object.
1456 // R2: object size.
1457 // R3: next object start.
1458 // R8, R9: raw null.
1459 __ AddImmediate(R1, R0,
1460 target::Context::variable_offset(0) - kHeapObjectTag);
1461 __ InitializeFieldsNoBarrier(R0, R1, R3, R8, R9);
1462
1463 // Done allocating and initializing the context.
1464 // R0: new object.
1465 __ Ret();
1466
1467 __ Bind(&slow_case);
1468 }
1469
1470 // Create a stub frame as we are pushing some objects on the stack before
1471 // calling into the runtime.
1472 __ EnterStubFrame();
1473 // Setup space on stack for return value.
1474 __ LoadImmediate(R2, 0);
1475 __ SmiTag(R1);
1476 __ PushList((1 << R1) | (1 << R2));
1477 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context.
1478 __ Drop(1); // Pop number of context variables argument.
1479 __ Pop(R0); // Pop the new context object.
1480
1481 // Write-barrier elimination might be enabled for this context (depending on
1482 // the size). To be sure we will check if the allocated object is in old
1483 // space and if so call a leaf runtime to add it to the remembered set.
1485
1486 // R0: new object
1487 // Restore the frame pointer.
1488 __ LeaveStubFrame();
1489
1490 __ Ret();
1491}
1492
1493// Called for clone of contexts.
1494// Input:
1495// R4: context variable to clone.
1496// Output:
1497// R0: new allocated Context object.
1498// Clobbered:
1499// Potentially any since it can go to runtime.
1500void StubCodeCompiler::GenerateCloneContextStub() {
1501 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1502 Label slow_case;
1503
1504 // Load num. variable in the existing context.
1505 __ ldr(R1, FieldAddress(R4, target::Context::num_variables_offset()));
1506
1507 GenerateAllocateContext(assembler, &slow_case);
1508
1509 // Load parent in the existing context.
1510 __ ldr(R2, FieldAddress(R4, target::Context::parent_offset()));
1511 // Setup the parent field.
1512 // R0: new object.
1513 __ StoreIntoObjectNoBarrier(
1514 R0, FieldAddress(R0, target::Context::parent_offset()), R2);
1515
1516 // Clone the context variables.
1517 // R0: new object.
1518 // R1: number of context variables.
1519 {
1520 Label loop, done;
1521 __ AddImmediate(R2, R0,
1522 target::Context::variable_offset(0) - kHeapObjectTag);
1523 __ AddImmediate(R3, R4,
1524 target::Context::variable_offset(0) - kHeapObjectTag);
1525
1526 __ Bind(&loop);
1527 __ subs(R1, R1, Operand(1));
1528 __ b(&done, MI);
1529
1530 __ ldr(R9, Address(R3, R1, LSL, target::kWordSizeLog2));
1531 __ str(R9, Address(R2, R1, LSL, target::kWordSizeLog2));
1532
1533 __ b(&loop, NE); // Loop if R1 not zero.
1534
1535 __ Bind(&done);
1536 }
1537
1538 // Done allocating and initializing the context.
1539 // R0: new object.
1540 __ Ret();
1541
1542 __ Bind(&slow_case);
1543 }
1544
1545 // Create a stub frame as we are pushing some objects on the stack before
1546 // calling into the runtime.
1547 __ EnterStubFrame();
1548 // Setup space on stack for return value.
1549 __ LoadImmediate(R0, 0);
1550 __ PushRegisterPair(R4, R0);
1551 __ CallRuntime(kCloneContextRuntimeEntry, 1); // Clone context.
1552 // R4: Pop number of context variables argument.
1553 // R0: Pop the new context object.
1554 __ PopRegisterPair(R4, R0);
1555
1556 // Write-barrier elimination might be enabled for this context (depending on
1557 // the size). To be sure we will check if the allocated object is in old
1558 // space and if so call a leaf runtime to add it to the remembered set.
1560
1561 // R0: new object
1562 // Restore the frame pointer.
1563 __ LeaveStubFrame();
1564 __ Ret();
1565}
1566
1567void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
1568 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
1569 if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
1570
1571 Register reg = static_cast<Register>(i);
1572 intptr_t start = __ CodeSize();
1573 SPILLS_LR_TO_FRAME(__ PushList((1 << LR) | (1 << kWriteBarrierObjectReg)));
1574 __ mov(kWriteBarrierObjectReg, Operand(reg));
1575 __ Call(Address(THR, target::Thread::write_barrier_entry_point_offset()));
1576 RESTORES_LR_FROM_FRAME(
1577 __ PopList((1 << LR) | (1 << kWriteBarrierObjectReg)));
1578 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR));
1579 intptr_t end = __ CodeSize();
1580
1582 }
1583}
1584
1585// Helper stub to implement Assembler::StoreIntoObject.
1586// Input parameters:
1587// R1: Object (old)
1588// R0: Value (old or new)
1589// R9: Slot
1590// If R0 is new, add R1 to the store buffer. Otherwise R0 is old, mark R0
1591// and add it to the mark list.
1595static void GenerateWriteBarrierStubHelper(Assembler* assembler, bool cards) {
1596 Label skip_marking;
1597 __ Push(R2);
1598 __ ldr(TMP, FieldAddress(R0, target::Object::tags_offset()));
1599 __ ldr(R2, Address(THR, target::Thread::write_barrier_mask_offset()));
1600 __ and_(TMP, TMP, Operand(R2));
1601 __ Pop(R2);
1602 __ tst(TMP, Operand(target::UntaggedObject::kIncrementalBarrierMask));
1603 __ b(&skip_marking, ZERO);
1604
1605 {
1606 // Atomically clear kNotMarkedBit.
1607 Label retry, done;
1608 __ PushList((1 << R2) | (1 << R3) | (1 << R4)); // Spill.
1609 __ AddImmediate(R3, R0, target::Object::tags_offset() - kHeapObjectTag);
1610 // R3: Untagged address of header word (ldrex/strex do not support offsets).
1611 __ Bind(&retry);
1612 __ ldrex(R2, R3);
1613 __ tst(R2, Operand(1 << target::UntaggedObject::kNotMarkedBit));
1614 __ b(&done, ZERO); // Marked by another thread.
1615 __ bic(R2, R2, Operand(1 << target::UntaggedObject::kNotMarkedBit));
1616 __ strex(R4, R2, R3);
1617 __ cmp(R4, Operand(1));
1618 __ b(&retry, EQ);
1619
1620 __ ldr(R4, Address(THR, target::Thread::marking_stack_block_offset()));
1622 __ add(R3, R4, Operand(R2, LSL, target::kWordSizeLog2));
1624 __ add(R2, R2, Operand(1));
1626 __ CompareImmediate(R2, target::MarkingStackBlock::kSize);
1627 __ b(&done, NE);
1628
1629 {
1630 LeafRuntimeScope rt(assembler, /*frame_size=*/0,
1631 /*preserve_registers=*/true);
1632 __ mov(R0, Operand(THR));
1633 rt.Call(kMarkingStackBlockProcessRuntimeEntry, 1);
1634 }
1635
1636 __ Bind(&done);
1637 __ clrex();
1638 __ PopList((1 << R2) | (1 << R3) | (1 << R4)); // Unspill.
1639 }
1640
1641 Label add_to_remembered_set, remember_card;
1642 __ Bind(&skip_marking);
1643 __ Push(R2);
1644 __ ldr(TMP, FieldAddress(R1, target::Object::tags_offset()));
1645 __ ldr(R2, FieldAddress(R0, target::Object::tags_offset()));
1646 __ and_(TMP, R2,
1647 Operand(TMP, LSR, target::UntaggedObject::kBarrierOverlapShift));
1648 __ Pop(R2);
1649 __ tst(TMP, Operand(target::UntaggedObject::kGenerationalBarrierMask));
1650 __ b(&add_to_remembered_set, NOT_ZERO);
1651 __ Ret();
1652
1653 __ Bind(&add_to_remembered_set);
1654 if (cards) {
1655 __ ldr(TMP, FieldAddress(R1, target::Object::tags_offset()));
1656 __ tst(TMP, Operand(1 << target::UntaggedObject::kCardRememberedBit));
1657 __ b(&remember_card, NOT_ZERO);
1658 } else {
1659#if defined(DEBUG)
1660 Label ok;
1661 __ ldr(TMP, FieldAddress(R1, target::Object::tags_offset()));
1662 __ tst(TMP, Operand(1 << target::UntaggedObject::kCardRememberedBit));
1663 __ b(&ok, ZERO);
1664 __ Stop("Wrong barrier");
1665 __ Bind(&ok);
1666#endif
1667 }
1668
1669 {
1670 // Atomically clear kOldAndNotRememberedBit.
1671 Label retry, done;
1672 __ PushList((1 << R2) | (1 << R3) | (1 << R4));
1673 __ AddImmediate(R3, R1, target::Object::tags_offset() - kHeapObjectTag);
1674 // R3: Untagged address of header word (ldrex/strex do not support offsets).
1675 __ Bind(&retry);
1676 __ ldrex(R2, R3);
1677 __ tst(R2, Operand(1 << target::UntaggedObject::kOldAndNotRememberedBit));
1678 __ b(&done, ZERO); // Remembered by another thread.
1679 __ bic(R2, R2,
1680 Operand(1 << target::UntaggedObject::kOldAndNotRememberedBit));
1681 __ strex(R4, R2, R3);
1682 __ cmp(R4, Operand(1));
1683 __ b(&retry, EQ);
1684
1685 // Load the StoreBuffer block out of the thread. Then load top_ out of the
1686 // StoreBufferBlock and add the address to the pointers_.
1687 __ ldr(R4, Address(THR, target::Thread::store_buffer_block_offset()));
1688 __ ldr(R2, Address(R4, target::StoreBufferBlock::top_offset()));
1689 __ add(R3, R4, Operand(R2, LSL, target::kWordSizeLog2));
1691
1692 // Increment top_ and check for overflow.
1693 // R2: top_.
1694 // R4: StoreBufferBlock.
1695 __ add(R2, R2, Operand(1));
1696 __ str(R2, Address(R4, target::StoreBufferBlock::top_offset()));
1697 __ CompareImmediate(R2, target::StoreBufferBlock::kSize);
1698 __ b(&done, NE);
1699
1700 {
1701 LeafRuntimeScope rt(assembler, /*frame_size=*/0,
1702 /*preserve_registers=*/true);
1703 __ mov(R0, Operand(THR));
1704 rt.Call(kStoreBufferBlockProcessRuntimeEntry, 1);
1705 }
1706
1707 __ Bind(&done);
1708 __ PopList((1 << R2) | (1 << R3) | (1 << R4));
1709 __ Ret();
1710 }
1711 if (cards) {
1712 Label remember_card_slow;
1713
1714 // Get card table.
1715 __ Bind(&remember_card);
1716 __ AndImmediate(TMP, R1, target::kPageMask); // Page.
1717 __ ldr(TMP,
1718 Address(TMP, target::Page::card_table_offset())); // Card table.
1719 __ cmp(TMP, Operand(0));
1720 __ b(&remember_card_slow, EQ);
1721
1722 // Dirty the card. Not atomic: we assume mutable arrays are not shared
1723 // between threads
1724 __ PushList((1 << R0) | (1 << R1));
1725 __ AndImmediate(TMP, R1, target::kPageMask); // Page.
1726 __ sub(R9, R9, Operand(TMP)); // Offset in page.
1727 __ Lsr(R9, R9, Operand(target::Page::kBytesPerCardLog2)); // Card index.
1728 __ AndImmediate(R1, R9, target::kBitsPerWord - 1); // Lsl is not mod 32.
1729 __ LoadImmediate(R0, 1); // Bit offset.
1730 __ Lsl(R0, R0, R1); // Bit mask.
1731 __ ldr(TMP,
1732 Address(TMP, target::Page::card_table_offset())); // Card table.
1733 __ Lsr(R9, R9, Operand(target::kBitsPerWordLog2)); // Word index.
1734 __ add(TMP, TMP, Operand(R9, LSL, target::kWordSizeLog2)); // Word address.
1735 __ ldr(R1, Address(TMP, 0));
1736 __ orr(R1, R1, Operand(R0));
1737 __ str(R1, Address(TMP, 0));
1738 __ PopList((1 << R0) | (1 << R1));
1739 __ Ret();
1740
1741 // Card table not yet allocated.
1742 __ Bind(&remember_card_slow);
1743 {
1744 LeafRuntimeScope rt(assembler, /*frame_size=*/0,
1745 /*preserve_registers=*/true);
1746 __ mov(R0, Operand(R1)); // Arg0 = Object
1747 __ mov(R1, Operand(R9)); // Arg1 = Slot
1748 rt.Call(kRememberCardRuntimeEntry, 2);
1749 }
1750 __ Ret();
1751 }
1752}
1753
1754void StubCodeCompiler::GenerateWriteBarrierStub() {
1755 GenerateWriteBarrierStubHelper(assembler, false);
1756}
1757
1758void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
1759 GenerateWriteBarrierStubHelper(assembler, true);
1760}
1761
1762static void GenerateAllocateObjectHelper(Assembler* assembler,
1763 bool is_cls_parameterized) {
1764 const Register kTagsReg = AllocateObjectABI::kTagsReg;
1765
1766 {
1767 Label slow_case;
1768
1769#if !defined(PRODUCT)
1770 {
1771 const Register kTraceAllocationTempReg = R8;
1772 const Register kCidRegister = R9;
1773 __ ExtractClassIdFromTags(kCidRegister, AllocateObjectABI::kTagsReg);
1774 __ MaybeTraceAllocation(kCidRegister, &slow_case,
1775 kTraceAllocationTempReg);
1776 }
1777#endif
1778
1779 const Register kNewTopReg = R8;
1780
1781 // Bump allocation.
1782 {
1783 const Register kEndReg = R1;
1784 const Register kInstanceSizeReg = R9;
1785
1786 __ ExtractInstanceSizeFromTags(kInstanceSizeReg, kTagsReg);
1787
1788 // Load two words from Thread::top: top and end.
1789 // AllocateObjectABI::kResultReg: potential next object start.
1790 __ ldrd(AllocateObjectABI::kResultReg, kEndReg, THR,
1791 target::Thread::top_offset());
1792
1793 __ add(kNewTopReg, AllocateObjectABI::kResultReg,
1794 Operand(kInstanceSizeReg));
1795
1796 __ CompareRegisters(kEndReg, kNewTopReg);
1797 __ b(&slow_case, UNSIGNED_LESS_EQUAL);
1798
1799 // Successfully allocated the object, now update top to point to
1800 // next object start and store the class in the class field of object.
1801 __ str(kNewTopReg, Address(THR, target::Thread::top_offset()));
1802 } // kEndReg = R1, kInstanceSizeReg = R9
1803
1804 // Tags.
1805 __ str(kTagsReg, Address(AllocateObjectABI::kResultReg,
1806 target::Object::tags_offset()));
1807
1808 // Initialize the remaining words of the object.
1809 {
1810 const Register kFieldReg = R1;
1811 const Register kNullReg = R9;
1812
1813 __ LoadObject(kNullReg, NullObject());
1814
1815 __ AddImmediate(kFieldReg, AllocateObjectABI::kResultReg,
1816 target::Instance::first_field_offset());
1817 Label done, init_loop;
1818 __ Bind(&init_loop);
1819 __ CompareRegisters(kFieldReg, kNewTopReg);
1821 __ str(kNullReg,
1822 Address(kFieldReg, target::kWordSize, Address::PostIndex));
1823 __ b(&init_loop);
1824
1825 __ Bind(&done);
1826 } // kFieldReg = R1, kNullReg = R9
1827
1828 // Store parameterized type.
1829 if (is_cls_parameterized) {
1830 Label not_parameterized_case;
1831
1832 const Register kClsIdReg = R2;
1833 const Register kTypeOffsetReg = R9;
1834
1835 __ ExtractClassIdFromTags(kClsIdReg, kTagsReg);
1836
1837 // Load class' type_arguments_field offset in words.
1838 __ LoadClassById(kTypeOffsetReg, kClsIdReg);
1839 __ ldr(
1840 kTypeOffsetReg,
1841 FieldAddress(kTypeOffsetReg,
1842 target::Class::
1843 host_type_arguments_field_offset_in_words_offset()));
1844
1845 // Set the type arguments in the new object.
1846 __ StoreIntoObjectNoBarrier(
1848 Address(AllocateObjectABI::kResultReg, kTypeOffsetReg, LSL,
1849 target::kWordSizeLog2),
1851
1852 __ Bind(&not_parameterized_case);
1853 } // kClsIdReg = R1, kTypeOffsetReg = R9
1854
1855 __ AddImmediate(AllocateObjectABI::kResultReg,
1857
1858 __ Ret();
1859
1860 __ Bind(&slow_case);
1861 } // kNewTopReg = R8
1862
1863 // Fall back on slow case:
1864 {
1865 const Register kStubReg = R8;
1866
1867 if (!is_cls_parameterized) {
1869 }
1870
1871 // Tail call to generic allocation stub.
1872 __ ldr(kStubReg,
1873 Address(THR,
1874 target::Thread::allocate_object_slow_entry_point_offset()));
1875 __ bx(kStubReg);
1876 } // kStubReg = R8
1877}
1878
1879// Called for inline allocation of objects (any class).
1880void StubCodeCompiler::GenerateAllocateObjectStub() {
1881 GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
1882}
1883
1884void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
1885 GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
1886}
1887
1888void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
1889 const Register kClsReg = R1;
1890
1891 if (!FLAG_precompiled_mode) {
1892 __ ldr(CODE_REG,
1893 Address(THR, target::Thread::call_to_runtime_stub_offset()));
1894 }
1895
1896 // Create a stub frame as we are pushing some objects on the stack before
1897 // calling into the runtime.
1898 __ EnterStubFrame();
1899
1900 __ ExtractClassIdFromTags(AllocateObjectABI::kResultReg,
1902 __ LoadClassById(kClsReg, AllocateObjectABI::kResultReg);
1903
1905
1906 // Pushes result slot, then parameter class and type arguments.
1907 // Type arguments should be Object::null() if class is non-parameterized.
1908 __ PushRegistersInOrder({AllocateObjectABI::kResultReg, kClsReg,
1910
1911 __ CallRuntime(kAllocateObjectRuntimeEntry, 2);
1912
1913 // Load result off the stack into result register.
1914 __ ldr(AllocateObjectABI::kResultReg, Address(SP, 2 * target::kWordSize));
1915
1916 // Write-barrier elimination is enabled for [cls] and we therefore need to
1917 // ensure that the object is in new-space or has remembered bit set.
1919
1920 __ LeaveDartFrameAndReturn();
1921}
1922
1923// Called for inline allocation of objects.
1925 UnresolvedPcRelativeCalls* unresolved_calls,
1926 const Class& cls,
1927 const Code& allocate_object,
1928 const Code& allocat_object_parametrized) {
1929 classid_t cls_id = target::Class::GetId(cls);
1930 ASSERT(cls_id != kIllegalCid);
1931
1932 // The generated code is different if the class is parameterized.
1933 const bool is_cls_parameterized = target::Class::NumTypeArguments(cls) > 0;
1934 ASSERT(!is_cls_parameterized || target::Class::TypeArgumentsFieldOffset(
1935 cls) != target::Class::kNoTypeArguments);
1936
1937 const intptr_t instance_size = target::Class::GetInstanceSize(cls);
1938 ASSERT(instance_size > 0);
1939
1940 const uword tags =
1941 target::MakeTagWordForNewSpaceObject(cls_id, instance_size);
1942
1943 const Register kTagsReg = AllocateObjectABI::kTagsReg;
1944
1945 __ LoadImmediate(kTagsReg, tags);
1946
1947 if (!FLAG_use_slow_path && FLAG_inline_alloc &&
1948 !target::Class::TraceAllocation(cls) &&
1949 target::SizeFitsInSizeTag(instance_size)) {
1951 RELEASE_ASSERT(target::Heap::IsAllocatableInNewSpace(instance_size));
1952
1953 if (is_cls_parameterized) {
1954 if (!IsSameObject(NullObject(),
1955 CastHandle<Object>(allocat_object_parametrized))) {
1956 __ GenerateUnRelocatedPcRelativeTailCall();
1957 unresolved_calls->Add(new UnresolvedPcRelativeCall(
1958 __ CodeSize(), allocat_object_parametrized, /*is_tail_call=*/true));
1959 } else {
1960 __ ldr(PC,
1961 Address(THR,
1962 target::Thread::
1963 allocate_object_parameterized_entry_point_offset()));
1964 }
1965 } else {
1966 if (!IsSameObject(NullObject(), CastHandle<Object>(allocate_object))) {
1967 __ GenerateUnRelocatedPcRelativeTailCall();
1968 unresolved_calls->Add(new UnresolvedPcRelativeCall(
1969 __ CodeSize(), allocate_object, /*is_tail_call=*/true));
1970 } else {
1971 __ ldr(
1972 PC,
1973 Address(THR, target::Thread::allocate_object_entry_point_offset()));
1974 }
1975 }
1976 } else {
1977 if (!is_cls_parameterized) {
1979 }
1980 __ ldr(PC,
1981 Address(THR,
1982 target::Thread::allocate_object_slow_entry_point_offset()));
1983 }
1984}
1985
1986// Called for invoking "dynamic noSuchMethod(Invocation invocation)" function
1987// from the entry code of a dart function after an error in passed argument
1988// name or number is detected.
1989// Input parameters:
1990// LR : return address.
1991// SP : address of last argument.
1992// R4: arguments descriptor array.
1993void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
1994 __ EnterStubFrame();
1995
1996 // Load the receiver.
1997 __ ldr(R2, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
1998 __ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi.
1999 __ ldr(R8, Address(IP, target::frame_layout.param_end_from_fp *
2000 target::kWordSize));
2001
2002 // Load the function.
2003 __ ldr(R6, FieldAddress(R8, target::Closure::function_offset()));
2004
2005 // Push space for the return value.
2006 // Push the receiver.
2007 // Push arguments descriptor array.
2008 __ LoadImmediate(IP, 0);
2009 __ PushList((1 << R4) | (1 << R6) | (1 << R8) | (1 << IP));
2010
2011 // Adjust arguments count.
2012 __ ldr(R3,
2013 FieldAddress(R4, target::ArgumentsDescriptor::type_args_len_offset()));
2014 __ cmp(R3, Operand(0));
2015 __ AddImmediate(R2, R2, target::ToRawSmi(1),
2016 NE); // Include the type arguments.
2017
2018 // R2: Smi-tagged arguments array length.
2019 PushArrayOfArguments(assembler);
2020
2021 const intptr_t kNumArgs = 4;
2022 __ CallRuntime(kNoSuchMethodFromPrologueRuntimeEntry, kNumArgs);
2023 // noSuchMethod on closures always throws an error, so it will never return.
2024 __ bkpt(0);
2025}
2026
2027// R8: function object.
2028// R9: inline cache data object.
2029// Cannot use function object from ICData as it may be the inlined
2030// function and not the top-scope function.
2032 Register ic_reg = R9;
2033 Register func_reg = R8;
2034 if (FLAG_precompiled_mode) {
2035 __ Breakpoint();
2036 return;
2037 }
2038 if (FLAG_trace_optimized_ic_calls) {
2039 __ EnterStubFrame();
2040 __ PushList((1 << R9) | (1 << R8)); // Preserve.
2041 __ Push(ic_reg); // Argument.
2042 __ Push(func_reg); // Argument.
2043 __ CallRuntime(kTraceICCallRuntimeEntry, 2);
2044 __ Drop(2); // Discard argument;
2045 __ PopList((1 << R9) | (1 << R8)); // Restore.
2046 __ LeaveStubFrame();
2047 }
2048 __ ldr(TMP, FieldAddress(func_reg, target::Function::usage_counter_offset()));
2049 __ add(TMP, TMP, Operand(1));
2050 __ str(TMP, FieldAddress(func_reg, target::Function::usage_counter_offset()));
2051}
2052
2053// Loads function into 'temp_reg'.
2055 if (FLAG_precompiled_mode) {
2056 __ Breakpoint();
2057 return;
2058 }
2059 if (FLAG_optimization_counter_threshold >= 0) {
2060 Register func_reg = temp_reg;
2061 ASSERT(temp_reg == R8);
2062 __ Comment("Increment function counter");
2063 __ ldr(func_reg, FieldAddress(IC_DATA_REG, target::ICData::owner_offset()));
2064 __ ldr(TMP,
2065 FieldAddress(func_reg, target::Function::usage_counter_offset()));
2066 __ add(TMP, TMP, Operand(1));
2067 __ str(TMP,
2068 FieldAddress(func_reg, target::Function::usage_counter_offset()));
2069 }
2070}
2071
2072// Note: R9 must be preserved.
2073// Attempt a quick Smi operation for known operations ('kind'). The ICData
2074// must have been primed with a Smi/Smi check that will be used for counting
2075// the invocations.
2076static void EmitFastSmiOp(Assembler* assembler,
2077 Token::Kind kind,
2078 intptr_t num_args,
2079 Label* not_smi_or_overflow) {
2080 __ Comment("Fast Smi op");
2081 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left.
2082 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Right.
2083 __ orr(TMP, R0, Operand(R1));
2084 __ tst(TMP, Operand(kSmiTagMask));
2085 __ b(not_smi_or_overflow, NE);
2086 switch (kind) {
2087 case Token::kADD: {
2088 __ adds(R0, R1, Operand(R0)); // Adds.
2089 __ b(not_smi_or_overflow, VS); // Branch if overflow.
2090 break;
2091 }
2092 case Token::kLT: {
2093 __ cmp(R0, Operand(R1));
2094 __ LoadObject(R0, CastHandle<Object>(TrueObject()), LT);
2095 __ LoadObject(R0, CastHandle<Object>(FalseObject()), GE);
2096 break;
2097 }
2098 case Token::kEQ: {
2099 __ cmp(R0, Operand(R1));
2100 __ LoadObject(R0, CastHandle<Object>(TrueObject()), EQ);
2101 __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
2102 break;
2103 }
2104 default:
2105 UNIMPLEMENTED();
2106 }
2107 // R9: IC data object (preserved).
2108 __ ldr(R8, FieldAddress(R9, target::ICData::entries_offset()));
2109 // R8: ic_data_array with check entries: classes and target functions.
2110 __ AddImmediate(R8, target::Array::data_offset() - kHeapObjectTag);
2111// R8: points directly to the first ic data array element.
2112#if defined(DEBUG)
2113 // Check that first entry is for Smi/Smi.
2114 Label error, ok;
2115 const intptr_t imm_smi_cid = target::ToRawSmi(kSmiCid);
2116 __ ldr(R1, Address(R8, 0));
2117 __ CompareImmediate(R1, imm_smi_cid);
2118 __ b(&error, NE);
2119 __ ldr(R1, Address(R8, target::kWordSize));
2120 __ CompareImmediate(R1, imm_smi_cid);
2121 __ b(&ok, EQ);
2122 __ Bind(&error);
2123 __ Stop("Incorrect IC data");
2124 __ Bind(&ok);
2125#endif
2126 if (FLAG_optimization_counter_threshold >= 0) {
2127 // Update counter, ignore overflow.
2128 const intptr_t count_offset =
2129 target::ICData::CountIndexFor(num_args) * target::kWordSize;
2130 __ LoadFromOffset(R1, R8, count_offset);
2131 __ adds(R1, R1, Operand(target::ToRawSmi(1)));
2132 __ StoreIntoSmiField(Address(R8, count_offset), R1);
2133 }
2134 __ Ret();
2135}
2136
2137// Saves the offset of the target entry-point (from the Function) into R3.
2138//
2139// Must be the first code generated, since any code before will be skipped in
2140// the unchecked entry-point.
2141static void GenerateRecordEntryPoint(Assembler* assembler) {
2142 Label done;
2143 __ mov(R3, Operand(target::Function::entry_point_offset() - kHeapObjectTag));
2144 __ b(&done);
2145 __ BindUncheckedEntryPoint();
2146 __ mov(
2147 R3,
2148 Operand(target::Function::entry_point_offset(CodeEntryKind::kUnchecked) -
2150 __ Bind(&done);
2151}
2152
2153// Generate inline cache check for 'num_args'.
2154// R0: receiver (if instance call)
2155// R9: ICData
2156// LR: return address
2157// Control flow:
2158// - If receiver is null -> jump to IC miss.
2159// - If receiver is Smi -> load Smi class.
2160// - If receiver is not-Smi -> load receiver's class.
2161// - Check if 'num_args' (including receiver) match any IC data group.
2162// - Match found -> jump to target.
2163// - Match not found -> jump to IC miss.
2165 intptr_t num_args,
2166 const RuntimeEntry& handle_ic_miss,
2167 Token::Kind kind,
2168 Optimized optimized,
2169 CallType type,
2170 Exactness exactness) {
2171 if (FLAG_precompiled_mode) {
2172 __ Breakpoint();
2173 return;
2174 }
2175
2176 const bool save_entry_point = kind == Token::kILLEGAL;
2177 if (save_entry_point) {
2178 GenerateRecordEntryPoint(assembler);
2179 }
2180
2181 if (optimized == kOptimized) {
2183 } else {
2184 GenerateUsageCounterIncrement(/* scratch */ R8);
2185 }
2186
2187 __ CheckCodePointer();
2188 ASSERT(num_args == 1 || num_args == 2);
2189#if defined(DEBUG)
2190 {
2191 Label ok;
2192 // Check that the IC data array has NumArgsTested() == num_args.
2193 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
2194 __ ldr(R8, FieldAddress(R9, target::ICData::state_bits_offset()));
2195 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
2196 __ and_(R8, R8, Operand(target::ICData::NumArgsTestedMask()));
2197 __ CompareImmediate(R8, num_args);
2198 __ b(&ok, EQ);
2199 __ Stop("Incorrect stub for IC data");
2200 __ Bind(&ok);
2201 }
2202#endif // DEBUG
2203
2204#if !defined(PRODUCT)
2205 Label stepping, done_stepping;
2206 if (optimized == kUnoptimized) {
2207 __ Comment("Check single stepping");
2208 __ LoadIsolate(R8);
2209 __ ldrb(R8, Address(R8, target::Isolate::single_step_offset()));
2210 __ CompareImmediate(R8, 0);
2211 __ b(&stepping, NE);
2212 __ Bind(&done_stepping);
2213 }
2214#endif
2215
2216 Label not_smi_or_overflow;
2217 if (kind != Token::kILLEGAL) {
2218 EmitFastSmiOp(assembler, kind, num_args, &not_smi_or_overflow);
2219 }
2220 __ Bind(&not_smi_or_overflow);
2221
2222 __ Comment("Extract ICData initial values and receiver cid");
2223 // R9: IC data object (preserved).
2224 __ ldr(R8, FieldAddress(R9, target::ICData::entries_offset()));
2225 // R8: ic_data_array with check entries: classes and target functions.
2226 const int kIcDataOffset = target::Array::data_offset() - kHeapObjectTag;
2227 // R8: points at the IC data array.
2228
2229 if (type == kInstanceCall) {
2230 __ LoadTaggedClassIdMayBeSmi(NOTFP, R0);
2231 __ ldr(
2233 FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
2234 if (num_args == 2) {
2235 __ ldr(R1, FieldAddress(ARGS_DESC_REG,
2236 target::ArgumentsDescriptor::count_offset()));
2237 __ sub(R1, R1, Operand(target::ToRawSmi(2)));
2238 __ ldr(R1, Address(SP, R1, LSL, 1)); // R1 (argument_count - 2) is Smi.
2239 __ LoadTaggedClassIdMayBeSmi(R1, R1);
2240 }
2241 } else {
2242 // Load arguments descriptor into R4.
2243 __ ldr(
2245 FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
2246
2247 // Get the receiver's class ID (first read number of arguments from
2248 // arguments descriptor array and then access the receiver from the stack).
2249 __ ldr(R1, FieldAddress(ARGS_DESC_REG,
2250 target::ArgumentsDescriptor::count_offset()));
2251 __ sub(R1, R1, Operand(target::ToRawSmi(1)));
2252 // R1: argument_count - 1 (smi).
2253
2254 __ ldr(R0, Address(SP, R1, LSL, 1)); // R1 (argument_count - 1) is Smi.
2255 __ LoadTaggedClassIdMayBeSmi(NOTFP, R0);
2256
2257 if (num_args == 2) {
2258 __ sub(R1, R1, Operand(target::ToRawSmi(1)));
2259 __ ldr(R1, Address(SP, R1, LSL, 1)); // R1 (argument_count - 2) is Smi.
2260 __ LoadTaggedClassIdMayBeSmi(R1, R1);
2261 }
2262 }
2263 // NOTFP: first argument class ID as Smi.
2264 // R1: second argument class ID as Smi.
2265 // R4: args descriptor
2266
2267 // Loop that checks if there is an IC data match.
2268 Label loop, found, miss;
2269 __ Comment("ICData loop");
2270
2271 // We unroll the generic one that is generated once more than the others.
2272 const bool optimize = kind == Token::kILLEGAL;
2273
2274 __ Bind(&loop);
2275 for (int unroll = optimize ? 4 : 2; unroll >= 0; unroll--) {
2276 Label update;
2277
2278 __ ldr(R2, Address(R8, kIcDataOffset));
2279 __ cmp(NOTFP, Operand(R2)); // Class id match?
2280 if (num_args == 2) {
2281 __ b(&update, NE); // Continue.
2282 __ ldr(R2, Address(R8, kIcDataOffset + target::kWordSize));
2283 __ cmp(R1, Operand(R2)); // Class id match?
2284 }
2285 __ b(&found, EQ); // Break.
2286
2287 __ Bind(&update);
2288
2289 const intptr_t entry_size = target::ICData::TestEntryLengthFor(
2290 num_args, exactness == kCheckExactness) *
2291 target::kWordSize;
2292 __ AddImmediate(R8, entry_size); // Next entry.
2293
2294 __ CompareImmediate(R2, target::ToRawSmi(kIllegalCid)); // Done?
2295 if (unroll == 0) {
2296 __ b(&loop, NE);
2297 } else {
2298 __ b(&miss, EQ);
2299 }
2300 }
2301
2302 __ Bind(&miss);
2303 __ Comment("IC miss");
2304 // Compute address of arguments.
2305 __ ldr(R1, FieldAddress(ARGS_DESC_REG,
2306 target::ArgumentsDescriptor::count_offset()));
2307 __ sub(R1, R1, Operand(target::ToRawSmi(1)));
2308 // R1: argument_count - 1 (smi).
2309 __ add(R1, SP, Operand(R1, LSL, 1)); // R1 is Smi.
2310 // R1: address of receiver.
2311 // Create a stub frame as we are pushing some objects on the stack before
2312 // calling into the runtime.
2313 __ EnterStubFrame();
2314 __ LoadImmediate(R0, 0);
2315 // Preserve IC data object and arguments descriptor array and
2316 // setup space on stack for result (target code object).
2317 RegList regs = (1 << R0) | (1 << ARGS_DESC_REG) | (1 << R9);
2318 if (save_entry_point) {
2319 __ SmiTag(R3);
2320 regs |= 1 << R3;
2321 }
2322 __ PushList(regs);
2323 // Push call arguments.
2324 for (intptr_t i = 0; i < num_args; i++) {
2325 __ LoadFromOffset(TMP, R1, -i * target::kWordSize);
2326 __ Push(TMP);
2327 }
2328 // Pass IC data object.
2329 __ Push(R9);
2330 __ CallRuntime(handle_ic_miss, num_args + 1);
2331 // Remove the call arguments pushed earlier, including the IC data object.
2332 __ Drop(num_args + 1);
2333 // Pop returned function object into R0.
2334 // Restore arguments descriptor array and IC data array.
2336 __ PopList(regs);
2337 if (save_entry_point) {
2338 __ SmiUntag(R3);
2339 }
2340 __ RestoreCodePointer();
2341 __ LeaveStubFrame();
2342 Label call_target_function;
2343 if (!FLAG_lazy_dispatchers) {
2344 GenerateDispatcherCode(assembler, &call_target_function);
2345 } else {
2346 __ b(&call_target_function);
2347 }
2348
2349 __ Bind(&found);
2350 // R8: pointer to an IC data check group.
2351 const intptr_t target_offset =
2352 target::ICData::TargetIndexFor(num_args) * target::kWordSize;
2353 const intptr_t count_offset =
2354 target::ICData::CountIndexFor(num_args) * target::kWordSize;
2355 const intptr_t exactness_offset =
2356 target::ICData::ExactnessIndexFor(num_args) * target::kWordSize;
2357
2358 Label call_target_function_through_unchecked_entry;
2359 if (exactness == kCheckExactness) {
2360 Label exactness_ok;
2361 ASSERT(num_args == 1);
2362 __ ldr(R1, Address(R8, kIcDataOffset + exactness_offset));
2363 __ CompareImmediate(
2366 __ BranchIf(LESS, &exactness_ok);
2367 __ BranchIf(EQUAL, &call_target_function_through_unchecked_entry);
2368
2369 // Check trivial exactness.
2370 // Note: UntaggedICData::receivers_static_type_ is guaranteed to be not null
2371 // because we only emit calls to this stub when it is not null.
2372 __ ldr(R2,
2373 FieldAddress(R9, target::ICData::receivers_static_type_offset()));
2374 __ ldr(R2, FieldAddress(R2, target::Type::arguments_offset()));
2375 // R1 contains an offset to type arguments in words as a smi,
2376 // hence TIMES_2. R0 is guaranteed to be non-smi because it is expected
2377 // to have type argument.
2378 __ LoadIndexedPayload(TMP, R0, 0, R1, TIMES_2);
2379 __ CompareObjectRegisters(R2, TMP);
2380 __ BranchIf(EQUAL, &call_target_function_through_unchecked_entry);
2381
2382 // Update exactness state (not-exact anymore).
2383 __ LoadImmediate(
2385 __ str(R1, Address(R8, kIcDataOffset + exactness_offset));
2386 __ Bind(&exactness_ok);
2387 }
2388 __ LoadFromOffset(FUNCTION_REG, R8, kIcDataOffset + target_offset);
2389
2390 if (FLAG_optimization_counter_threshold >= 0) {
2391 __ Comment("Update caller's counter");
2392 __ LoadFromOffset(R1, R8, kIcDataOffset + count_offset);
2393 __ add(R1, R1, Operand(target::ToRawSmi(1))); // Ignore overflow.
2394 __ StoreIntoSmiField(Address(R8, kIcDataOffset + count_offset), R1);
2395 }
2396
2397 __ Comment("Call target");
2398 __ Bind(&call_target_function);
2399 // R0: target function.
2400 __ ldr(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
2401
2402 if (save_entry_point) {
2403 __ Branch(Address(FUNCTION_REG, R3));
2404 } else {
2405 __ Branch(
2406 FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
2407 }
2408
2409 if (exactness == kCheckExactness) {
2410 __ Bind(&call_target_function_through_unchecked_entry);
2411 if (FLAG_optimization_counter_threshold >= 0) {
2412 __ Comment("Update ICData counter");
2413 __ LoadFromOffset(R1, R8, kIcDataOffset + count_offset);
2414 __ add(R1, R1, Operand(target::ToRawSmi(1))); // Ignore overflow.
2415 __ StoreIntoSmiField(Address(R8, kIcDataOffset + count_offset), R1);
2416 }
2417 __ Comment("Call target (via unchecked entry point)");
2418 __ LoadFromOffset(FUNCTION_REG, R8, kIcDataOffset + target_offset);
2419 __ ldr(CODE_REG,
2420 FieldAddress(FUNCTION_REG, target::Function::code_offset()));
2421 __ Branch(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset(
2423 }
2424
2425#if !defined(PRODUCT)
2426 if (optimized == kUnoptimized) {
2427 __ Bind(&stepping);
2428 __ EnterStubFrame();
2429 if (type == kInstanceCall) {
2430 __ Push(R0); // Preserve receiver.
2431 }
2432 RegList regs = 1 << R9;
2433 if (save_entry_point) {
2434 regs |= 1 << R3;
2435 __ SmiTag(R3); // Entry-point is not Smi.
2436 }
2437 __ PushList(regs); // Preserve IC data and entry-point.
2438 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2439 __ PopList(regs); // Restore IC data and entry-point
2440 if (save_entry_point) {
2441 __ SmiUntag(R3);
2442 }
2443 if (type == kInstanceCall) {
2444 __ Pop(R0);
2445 }
2446 __ RestoreCodePointer();
2447 __ LeaveStubFrame();
2448 __ b(&done_stepping);
2449 }
2450#endif
2451}
2452
2453// R0: receiver
2454// R9: ICData
2455// LR: return address
2456void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
2458 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2460}
2461
2462// R0: receiver
2463// R9: ICData
2464// LR: return address
2465void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
2467 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2469}
2470
2471// R0: receiver
2472// R9: ICData
2473// LR: return address
2474void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
2476 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2478}
2479
2480// R0: receiver
2481// R9: ICData
2482// LR: return address
2483void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
2485 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
2487}
2488
2489// R0: receiver
2490// R9: ICData
2491// LR: return address
2492void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
2494 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
2496}
2497
2498// R0: receiver
2499// R9: ICData
2500// LR: return address
2501void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
2503 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
2505}
2506
2507// R0: receiver
2508// R9: ICData
2509// R8: Function
2510// LR: return address
2511void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
2513 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
2515}
2516
2517// R0: receiver
2518// R9: ICData
2519// R8: Function
2520// LR: return address
2521void StubCodeCompiler::
2522 GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
2524 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
2526}
2527
2528// R0: receiver
2529// R9: ICData
2530// R8: Function
2531// LR: return address
2532void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
2534 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2536}
2537
2538// R9: ICData
2539// LR: return address
2540void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
2541 GenerateRecordEntryPoint(assembler);
2542 GenerateUsageCounterIncrement(/* scratch */ R8);
2543#if defined(DEBUG)
2544 {
2545 Label ok;
2546 // Check that the IC data array has NumArgsTested() == 0.
2547 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
2548 __ ldr(R8, FieldAddress(R9, target::ICData::state_bits_offset()));
2549 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
2550 __ and_(R8, R8, Operand(target::ICData::NumArgsTestedMask()));
2551 __ CompareImmediate(R8, 0);
2552 __ b(&ok, EQ);
2553 __ Stop("Incorrect IC data for unoptimized static call");
2554 __ Bind(&ok);
2555 }
2556#endif // DEBUG
2557
2558#if !defined(PRODUCT)
2559 // Check single stepping.
2560 Label stepping, done_stepping;
2561 __ LoadIsolate(R8);
2562 __ ldrb(R8, Address(R8, target::Isolate::single_step_offset()));
2563 __ CompareImmediate(R8, 0);
2564 __ b(&stepping, NE);
2565 __ Bind(&done_stepping);
2566#endif
2567
2568 // R9: IC data object (preserved).
2569 __ ldr(R8, FieldAddress(R9, target::ICData::entries_offset()));
2570 // R8: ic_data_array with entries: target functions and count.
2571 __ AddImmediate(R8, target::Array::data_offset() - kHeapObjectTag);
2572 // R8: points directly to the first ic data array element.
2573 const intptr_t target_offset =
2574 target::ICData::TargetIndexFor(0) * target::kWordSize;
2575 const intptr_t count_offset =
2576 target::ICData::CountIndexFor(0) * target::kWordSize;
2577
2578 if (FLAG_optimization_counter_threshold >= 0) {
2579 // Increment count for this call, ignore overflow.
2580 __ LoadFromOffset(R1, R8, count_offset);
2581 __ adds(R1, R1, Operand(target::ToRawSmi(1)));
2582 __ StoreIntoSmiField(Address(R8, count_offset), R1);
2583 }
2584
2585 // Load arguments descriptor into R4.
2586 __ ldr(ARGS_DESC_REG,
2587 FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
2588
2589 // Get function and call it, if possible.
2590 __ LoadFromOffset(FUNCTION_REG, R8, target_offset);
2591 __ ldr(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
2592
2593 __ Branch(Address(FUNCTION_REG, R3));
2594
2595#if !defined(PRODUCT)
2596 __ Bind(&stepping);
2597 __ EnterStubFrame();
2598 __ SmiTag(R3); // Entry-point is not Smi.
2599 __ PushList((1 << R9) | (1 << R3)); // Preserve IC data and entry-point.
2600 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2601 __ PopList((1 << R9) | (1 << R3));
2602 __ SmiUntag(R3);
2603 __ RestoreCodePointer();
2604 __ LeaveStubFrame();
2605 __ b(&done_stepping);
2606#endif
2607}
2608
2609// R9: ICData
2610// LR: return address
2611void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
2612 GenerateUsageCounterIncrement(/* scratch */ R8);
2613 GenerateNArgsCheckInlineCacheStub(1, kStaticCallMissHandlerOneArgRuntimeEntry,
2614 Token::kILLEGAL, kUnoptimized, kStaticCall,
2616}
2617
2618// R9: ICData
2619// LR: return address
2620void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
2621 GenerateUsageCounterIncrement(/* scratch */ R8);
2623 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2625}
2626
2627// Stub for compiling a function and jumping to the compiled code.
2628// ARGS_DESC_REG: Arguments descriptor.
2629// FUNCTION_REG: Function.
2630void StubCodeCompiler::GenerateLazyCompileStub() {
2631 __ EnterStubFrame();
2632 // Preserve arg desc, pass function.
2634 __ PushList((1 << FUNCTION_REG) | (1 << ARGS_DESC_REG));
2635 __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
2636 __ PopList((1 << FUNCTION_REG) | (1 << ARGS_DESC_REG));
2637 __ LeaveStubFrame();
2638
2639 __ ldr(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
2640 __ Branch(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
2641}
2642
2643// R9: Contains an ICData.
2644void StubCodeCompiler::GenerateICCallBreakpointStub() {
2645#if defined(PRODUCT)
2646 __ Stop("No debugging in PRODUCT mode");
2647#else
2648 __ EnterStubFrame();
2649 __ Push(R0); // Preserve receiver.
2650 __ Push(R9); // Preserve IC data.
2651 __ PushImmediate(0); // Space for result.
2652 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2653 __ Pop(CODE_REG); // Original stub.
2654 __ Pop(R9); // Restore IC data.
2655 __ Pop(R0); // Restore receiver.
2656 __ LeaveStubFrame();
2657 __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
2658#endif // defined(PRODUCT)
2659}
2660
2661void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
2662#if defined(PRODUCT)
2663 __ Stop("No debugging in PRODUCT mode");
2664#else
2665 __ EnterStubFrame();
2666 __ Push(R9); // Preserve IC data.
2667 __ PushImmediate(0); // Space for result.
2668 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2669 __ Pop(CODE_REG); // Original stub.
2670 __ Pop(R9); // Restore IC data.
2671 __ LeaveStubFrame();
2672 __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
2673#endif // defined(PRODUCT)
2674}
2675
2676void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
2677#if defined(PRODUCT)
2678 __ Stop("No debugging in PRODUCT mode");
2679#else
2680 __ EnterStubFrame();
2681 __ LoadImmediate(R0, 0);
2682 // Make room for result.
2683 __ PushList((1 << R0));
2684 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2685 __ PopList((1 << CODE_REG));
2686 __ LeaveStubFrame();
2687 __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
2688#endif // defined(PRODUCT)
2689}
2690
2691// Called only from unoptimized code. All relevant registers have been saved.
2692void StubCodeCompiler::GenerateDebugStepCheckStub() {
2693#if defined(PRODUCT)
2694 __ Stop("No debugging in PRODUCT mode");
2695#else
2696 // Check single stepping.
2697 Label stepping, done_stepping;
2698 __ LoadIsolate(R1);
2699 __ ldrb(R1, Address(R1, target::Isolate::single_step_offset()));
2700 __ CompareImmediate(R1, 0);
2701 __ b(&stepping, NE);
2702 __ Bind(&done_stepping);
2703 __ Ret();
2704
2705 __ Bind(&stepping);
2706 __ EnterStubFrame();
2707 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2708 __ LeaveStubFrame();
2709 __ b(&done_stepping);
2710#endif // defined(PRODUCT)
2711}
2712
2713// Used to check class and type arguments. Arguments passed in registers:
2714//
2715// Inputs (all preserved, mostly from TypeTestABI struct):
2716// - kSubtypeTestCacheReg: SubtypeTestCacheLayout
2717// - kInstanceReg: instance to test against.
2718// - kDstTypeReg: destination type (for n>=7).
2719// - kInstantiatorTypeArgumentsReg: instantiator type arguments (for n>=3).
2720// - kFunctionTypeArgumentsReg: function type arguments (for n>=4).
2721// - LR: return address.
2722//
2723// Outputs (from TypeTestABI struct):
2724// - kSubtypeTestCacheResultReg: the cached result, or null if not found.
2725void StubCodeCompiler::GenerateSubtypeNTestCacheStub(Assembler* assembler,
2726 int n) {
2727 ASSERT(n >= 1);
2729 // If we need the parent function type arguments for a closure, we also need
2730 // the delayed type arguments, so this case will never happen.
2731 ASSERT(n != 5);
2732 RegisterSet saved_registers;
2733
2734 // Safe as the original value of TypeTestABI::kSubtypeTestCacheReg is only
2735 // used to initialize this register.
2736 const Register kCacheArrayReg = TypeTestABI::kSubtypeTestCacheReg;
2737 saved_registers.AddRegister(kCacheArrayReg);
2738
2739 // CODE_REG is used only in JIT mode, and the dispatch table only exists in
2740 // AOT mode, so we can use the corresponding register for the mode we're not
2741 // in without having to preserve it.
2742 const Register kNullReg =
2743 FLAG_precompiled_mode ? CODE_REG : DISPATCH_TABLE_REG;
2744 __ LoadObject(kNullReg, NullObject());
2745
2746 // Free up additional registers needed for checks in the loop. Initially
2747 // define them as kNoRegister so any unexpected uses are caught.
2748 Register kInstanceInstantiatorTypeArgumentsReg = kNoRegister;
2749 if (n >= 2) {
2750 kInstanceInstantiatorTypeArgumentsReg = PP;
2751 saved_registers.AddRegister(kInstanceInstantiatorTypeArgumentsReg);
2752 }
2753 Register kInstanceParentFunctionTypeArgumentsReg = kNoRegister;
2754 if (n >= 5) {
2755 // For this, we look at the pair of Registers we considered for kNullReg
2756 // and use the one that must be preserved instead.
2757 kInstanceParentFunctionTypeArgumentsReg =
2758 FLAG_precompiled_mode ? DISPATCH_TABLE_REG : CODE_REG;
2759 saved_registers.AddRegister(kInstanceParentFunctionTypeArgumentsReg);
2760 }
2761 Register kInstanceDelayedFunctionTypeArgumentsReg = kNoRegister;
2762 if (n >= 6) {
2763 // We retrieve all the needed fields from the instance during loop
2764 // initialization and store them in registers, so we don't need the value
2765 // of kInstanceReg during the loop and just need to save and restore it.
2766 // Thus, use kInstanceReg for the last field that can possibly be retrieved
2767 // from the instance.
2768 kInstanceDelayedFunctionTypeArgumentsReg = TypeTestABI::kInstanceReg;
2769 saved_registers.AddRegister(kInstanceDelayedFunctionTypeArgumentsReg);
2770 }
2771
2772 // We'll replace these with actual registers if possible, but fall back to
2773 // the stack if register pressure is too great. The last two values are
2774 // used in every loop iteration, and so are more important to put in
2775 // registers if possible, whereas the first is used only when we go off
2776 // the end of the backing array (usually at most once per check).
2777 Register kCacheContentsSizeReg = kNoRegister;
2778 if (n < 5) {
2779 // Use the register we would have used for the parent function type args.
2780 kCacheContentsSizeReg =
2781 FLAG_precompiled_mode ? DISPATCH_TABLE_REG : CODE_REG;
2782 saved_registers.AddRegister(kCacheContentsSizeReg);
2783 }
2784 Register kProbeDistanceReg = kNoRegister;
2785 if (n < 6) {
2786 // Use the register we would have used for the delayed type args.
2787 kProbeDistanceReg = TypeTestABI::kInstanceReg;
2788 saved_registers.AddRegister(kProbeDistanceReg);
2789 }
2790 Register kCacheEntryEndReg = kNoRegister;
2791 if (n < 7) {
2792 // Use the destination type, as that is the last input that might be unused.
2793 kCacheEntryEndReg = TypeTestABI::kDstTypeReg;
2794 saved_registers.AddRegister(TypeTestABI::kDstTypeReg);
2795 }
2796
2797 __ PushRegisters(saved_registers);
2798
2799 Label not_found;
2800 GenerateSubtypeTestCacheSearch(
2801 assembler, n, kNullReg, kCacheArrayReg,
2803 kInstanceInstantiatorTypeArgumentsReg,
2804 kInstanceParentFunctionTypeArgumentsReg,
2805 kInstanceDelayedFunctionTypeArgumentsReg, kCacheEntryEndReg,
2806 kCacheContentsSizeReg, kProbeDistanceReg,
2807 [&](Assembler* assembler, int n) {
2808 __ LoadCompressed(
2810 Address(kCacheArrayReg, target::kCompressedWordSize *
2811 target::SubtypeTestCache::kTestResult));
2812 __ PopRegisters(saved_registers);
2813 __ Ret();
2814 },
2815 [&](Assembler* assembler, int n) {
2816 __ MoveRegister(TypeTestABI::kSubtypeTestCacheResultReg, kNullReg);
2817 __ PopRegisters(saved_registers);
2818 __ Ret();
2819 });
2820}
2821
2822// Return the current stack pointer address, used to do stack alignment checks.
2823void StubCodeCompiler::GenerateGetCStackPointerStub() {
2824 __ mov(R0, Operand(SP));
2825 __ Ret();
2826}
2827
2828// Jump to a frame on the call stack.
2829// LR: return address.
2830// R0: program_counter.
2831// R1: stack_pointer.
2832// R2: frame_pointer.
2833// R3: thread.
2834// Does not return.
2835//
2836// Notice: We need to keep this in sync with `Simulator::JumpToFrame()`.
2837void StubCodeCompiler::GenerateJumpToFrameStub() {
2842 __ mov(IP, Operand(R1)); // Copy Stack pointer into IP.
2843 // TransitionGeneratedToNative might clobber LR if it takes the slow path.
2844 __ mov(R4, Operand(R0)); // Program counter.
2845 __ mov(THR, Operand(R3)); // Thread.
2846 __ mov(FP, Operand(R2)); // Frame_pointer.
2847 __ mov(SP, Operand(IP)); // Set Stack pointer.
2848#if defined(USING_SHADOW_CALL_STACK)
2849#error Unimplemented
2850#endif
2851 Label exit_through_non_ffi;
2852 Register tmp1 = R0, tmp2 = R1;
2853 // Check if we exited generated from FFI. If so do transition - this is needed
2854 // because normally runtime calls transition back to generated via destructor
2855 // of TransitionGeneratedToVM/Native that is part of runtime boilerplate
2856 // code (see DEFINE_RUNTIME_ENTRY_IMPL in runtime_entry.h). Ffi calls don't
2857 // have this boilerplate, don't have this stack resource, have to transition
2858 // explicitly.
2859 __ LoadFromOffset(tmp1, THR,
2860 compiler::target::Thread::exit_through_ffi_offset());
2861 __ LoadImmediate(tmp2, target::Thread::exit_through_ffi());
2862 __ cmp(tmp1, Operand(tmp2));
2863 __ b(&exit_through_non_ffi, NE);
2864 __ TransitionNativeToGenerated(tmp1, tmp2,
2865 /*leave_safepoint=*/true,
2866 /*ignore_unwind_in_progress=*/true);
2867 __ Bind(&exit_through_non_ffi);
2868
2869 // Set the tag.
2870 __ LoadImmediate(R2, VMTag::kDartTagId);
2871 __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset());
2872 // Clear top exit frame.
2873 __ LoadImmediate(R2, 0);
2874 __ StoreToOffset(R2, THR, target::Thread::top_exit_frame_info_offset());
2875 // Restore the pool pointer.
2876 __ RestoreCodePointer();
2877 if (FLAG_precompiled_mode) {
2878 __ SetupGlobalPoolAndDispatchTable();
2879 __ set_constant_pool_allowed(true);
2880 } else {
2881 __ LoadPoolPointer();
2882 }
2883 __ bx(R4); // Jump to continuation point.
2884}
2885
2886// Run an exception handler. Execution comes from JumpToFrame
2887// stub or from the simulator.
2888//
2889// The arguments are stored in the Thread object.
2890// Does not return.
2891void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
2892 WRITES_RETURN_ADDRESS_TO_LR(
2893 __ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset()));
2894
2895 word offset_from_thread = 0;
2896 bool ok = target::CanLoadFromThread(NullObject(), &offset_from_thread);
2897 ASSERT(ok);
2898 __ LoadFromOffset(R2, THR, offset_from_thread);
2899
2900 // Exception object.
2901 __ LoadFromOffset(R0, THR, target::Thread::active_exception_offset());
2902 __ StoreToOffset(R2, THR, target::Thread::active_exception_offset());
2903
2904 // StackTrace object.
2905 __ LoadFromOffset(R1, THR, target::Thread::active_stacktrace_offset());
2906 __ StoreToOffset(R2, THR, target::Thread::active_stacktrace_offset());
2907
2908 READS_RETURN_ADDRESS_FROM_LR(
2909 __ bx(LR)); // Jump to the exception handler code.
2910}
2911
2912// Deoptimize a frame on the call stack before rewinding.
2913// The arguments are stored in the Thread object.
2914// No result.
2915void StubCodeCompiler::GenerateDeoptForRewindStub() {
2916 // Push zap value instead of CODE_REG.
2917 __ LoadImmediate(IP, kZapCodeReg);
2918 __ Push(IP);
2919
2920 // Load the deopt pc into LR.
2921 WRITES_RETURN_ADDRESS_TO_LR(
2922 __ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset()));
2923 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
2924
2925 // After we have deoptimized, jump to the correct frame.
2926 __ EnterStubFrame();
2927 __ CallRuntime(kRewindPostDeoptRuntimeEntry, 0);
2928 __ LeaveStubFrame();
2929 __ bkpt(0);
2930}
2931
2932// Calls to the runtime to optimize the given function.
2933// R8: function to be reoptimized.
2934// ARGS_DESC_REG: argument descriptor (preserved).
2935void StubCodeCompiler::GenerateOptimizeFunctionStub() {
2936 __ ldr(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
2937 __ EnterStubFrame();
2938 __ Push(ARGS_DESC_REG);
2939 __ LoadImmediate(IP, 0);
2940 __ Push(IP); // Setup space on stack for return value.
2941 __ Push(R8);
2942 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
2943 __ Pop(R0); // Discard argument.
2944 __ Pop(FUNCTION_REG); // Get Function object
2945 __ Pop(ARGS_DESC_REG); // Restore argument descriptor.
2946 __ LeaveStubFrame();
2947 __ ldr(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
2948 __ Branch(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
2949 __ bkpt(0);
2950}
2951
2952// Does identical check (object references are equal or not equal) with special
2953// checks for boxed numbers.
2954// LR: return address.
2955// Return Zero condition flag set if equal.
2956// Note: A Mint cannot contain a value that would fit in Smi.
2957static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
2958 const Register left,
2959 const Register right,
2960 const Register temp) {
2961 Label reference_compare, done, check_mint;
2962 // If any of the arguments is Smi do reference compare.
2963 __ tst(left, Operand(kSmiTagMask));
2964 __ b(&reference_compare, EQ);
2965 __ tst(right, Operand(kSmiTagMask));
2966 __ b(&reference_compare, EQ);
2967
2968 // Value compare for two doubles.
2969 __ CompareClassId(left, kDoubleCid, temp);
2970 __ b(&check_mint, NE);
2971 __ CompareClassId(right, kDoubleCid, temp);
2972 __ b(&done, NE);
2973
2974 // Double values bitwise compare.
2975 __ ldr(temp, FieldAddress(left, target::Double::value_offset() +
2976 0 * target::kWordSize));
2977 __ ldr(IP, FieldAddress(right, target::Double::value_offset() +
2978 0 * target::kWordSize));
2979 __ cmp(temp, Operand(IP));
2980 __ b(&done, NE);
2981 __ ldr(temp, FieldAddress(left, target::Double::value_offset() +
2982 1 * target::kWordSize));
2983 __ ldr(IP, FieldAddress(right, target::Double::value_offset() +
2984 1 * target::kWordSize));
2985 __ cmp(temp, Operand(IP));
2986 __ b(&done);
2987
2988 __ Bind(&check_mint);
2989 __ CompareClassId(left, kMintCid, temp);
2990 __ b(&reference_compare, NE);
2991 __ CompareClassId(right, kMintCid, temp);
2992 __ b(&done, NE);
2993 __ ldr(temp, FieldAddress(
2994 left, target::Mint::value_offset() + 0 * target::kWordSize));
2995 __ ldr(IP, FieldAddress(
2996 right, target::Mint::value_offset() + 0 * target::kWordSize));
2997 __ cmp(temp, Operand(IP));
2998 __ b(&done, NE);
2999 __ ldr(temp, FieldAddress(
3000 left, target::Mint::value_offset() + 1 * target::kWordSize));
3001 __ ldr(IP, FieldAddress(
3002 right, target::Mint::value_offset() + 1 * target::kWordSize));
3003 __ cmp(temp, Operand(IP));
3004 __ b(&done);
3005
3006 __ Bind(&reference_compare);
3007 __ cmp(left, Operand(right));
3008 __ Bind(&done);
3009}
3010
3011// Called only from unoptimized code. All relevant registers have been saved.
3012// LR: return address.
3013// SP + 4: left operand.
3014// SP + 0: right operand.
3015// Return Zero condition flag set if equal.
3016void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
3017#if !defined(PRODUCT)
3018 // Check single stepping.
3019 Label stepping, done_stepping;
3020 __ LoadIsolate(R1);
3021 __ ldrb(R1, Address(R1, target::Isolate::single_step_offset()));
3022 __ CompareImmediate(R1, 0);
3023 __ b(&stepping, NE);
3024 __ Bind(&done_stepping);
3025#endif
3026
3027 const Register temp = R2;
3028 const Register left = R1;
3029 const Register right = R0;
3030 __ ldr(left, Address(SP, 1 * target::kWordSize));
3031 __ ldr(right, Address(SP, 0 * target::kWordSize));
3032 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
3033 __ Ret();
3034
3035#if !defined(PRODUCT)
3036 __ Bind(&stepping);
3037 __ EnterStubFrame();
3038 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
3039 __ RestoreCodePointer();
3040 __ LeaveStubFrame();
3041 __ b(&done_stepping);
3042#endif
3043}
3044
3045// Called from optimized code only.
3046// LR: return address.
3047// SP + 4: left operand.
3048// SP + 0: right operand.
3049// Return Zero condition flag set if equal.
3050void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
3051 const Register temp = R2;
3052 const Register left = R1;
3053 const Register right = R0;
3054 __ ldr(left, Address(SP, 1 * target::kWordSize));
3055 __ ldr(right, Address(SP, 0 * target::kWordSize));
3056 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
3057 __ Ret();
3058}
3059
3060// Called from megamorphic calls.
3061// R0: receiver
3062// IC_DATA_REG: MegamorphicCache (preserved)
3063// Passed to target:
3064// FUNCTION_REG: target function
3065// ARGS_DESC_REG: arguments descriptor
3066// CODE_REG: target Code
3067void StubCodeCompiler::GenerateMegamorphicCallStub() {
3068 __ LoadTaggedClassIdMayBeSmi(R8, R0);
3069 // R8: receiver cid as Smi.
3070 __ ldr(R2,
3071 FieldAddress(IC_DATA_REG, target::MegamorphicCache::buckets_offset()));
3072 __ ldr(R1,
3073 FieldAddress(IC_DATA_REG, target::MegamorphicCache::mask_offset()));
3074 // R2: cache buckets array.
3075 // R1: mask as a smi.
3076
3077 // Compute the table index.
3078 ASSERT(target::MegamorphicCache::kSpreadFactor == 7);
3079 // Use reverse subtract to multiply with 7 == 8 - 1.
3080 __ rsb(R3, R8, Operand(R8, LSL, 3));
3081 // R3: probe.
3082 Label loop;
3083 __ Bind(&loop);
3084 __ and_(R3, R3, Operand(R1));
3085
3086 const intptr_t base = target::Array::data_offset();
3087 // R3 is smi tagged, but table entries are two words, so LSL 2.
3088 Label probe_failed;
3089 __ add(IP, R2, Operand(R3, LSL, 2));
3090 __ ldr(R6, FieldAddress(IP, base));
3091 __ cmp(R6, Operand(R8));
3092 __ b(&probe_failed, NE);
3093
3094 Label load_target;
3095 __ Bind(&load_target);
3096 // Call the target found in the cache. For a class id match, this is a
3097 // proper target for the given name and arguments descriptor. If the
3098 // illegal class id was found, the target is a cache miss handler that can
3099 // be invoked as a normal Dart function.
3100 __ ldr(FUNCTION_REG, FieldAddress(IP, base + target::kWordSize));
3101 if (!FLAG_precompiled_mode) {
3102 __ ldr(CODE_REG,
3103 FieldAddress(FUNCTION_REG, target::Function::code_offset()));
3104 }
3105 __ ldr(ARGS_DESC_REG,
3106 FieldAddress(IC_DATA_REG,
3107 target::CallSiteData::arguments_descriptor_offset()));
3108 __ Branch(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
3109
3110 // Probe failed, check if it is a miss.
3111 __ Bind(&probe_failed);
3112 ASSERT(kIllegalCid == 0);
3113 __ tst(R6, Operand(R6));
3114 Label miss;
3115 __ b(&miss, EQ); // branch if miss.
3116
3117 // Try next entry in the table.
3118 __ AddImmediate(R3, target::ToRawSmi(1));
3119 __ b(&loop);
3120
3121 __ Bind(&miss);
3122 GenerateSwitchableCallMissStub();
3123}
3124
3125void StubCodeCompiler::GenerateICCallThroughCodeStub() {
3126 Label loop, found, miss;
3127 __ ldr(R8, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
3128 __ ldr(R4, FieldAddress(IC_DATA_REG,
3129 target::CallSiteData::arguments_descriptor_offset()));
3130 __ AddImmediate(R8, target::Array::data_offset() - kHeapObjectTag);
3131 // R8: first IC entry
3132 __ LoadTaggedClassIdMayBeSmi(R1, R0);
3133 // R1: receiver cid as Smi
3134
3135 __ Bind(&loop);
3136 __ ldr(R2, Address(R8, 0));
3137 __ cmp(R1, Operand(R2));
3138 __ b(&found, EQ);
3139 __ CompareImmediate(R2, target::ToRawSmi(kIllegalCid));
3140 __ b(&miss, EQ);
3141
3142 const intptr_t entry_length =
3143 target::ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) *
3144 target::kWordSize;
3145 __ AddImmediate(R8, entry_length); // Next entry.
3146 __ b(&loop);
3147
3148 __ Bind(&found);
3149 if (FLAG_precompiled_mode) {
3150 const intptr_t entry_offset =
3151 target::ICData::EntryPointIndexFor(1) * target::kWordSize;
3152 __ LoadCompressed(R0, Address(R8, entry_offset));
3153 __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
3154 } else {
3155 const intptr_t code_offset =
3156 target::ICData::CodeIndexFor(1) * target::kWordSize;
3157 __ LoadCompressed(CODE_REG, Address(R8, code_offset));
3158 __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
3159 }
3160
3161 __ Bind(&miss);
3162 __ Branch(Address(THR, target::Thread::switchable_call_miss_entry_offset()));
3163}
3164
3165// Implement the monomorphic entry check for call-sites where the receiver
3166// might be a Smi.
3167//
3168// R0: receiver
3169// R9: MonomorphicSmiableCall object
3170//
3171// R2, R3: clobbered
3172void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
3173 __ LoadClassIdMayBeSmi(IP, R0);
3174
3175 // entrypoint_ should come right after expected_cid_
3176 ASSERT(target::MonomorphicSmiableCall::entrypoint_offset() ==
3177 target::MonomorphicSmiableCall::expected_cid_offset() +
3178 target::kWordSize);
3179
3180 // Note: this stub is only used in AOT mode, hence the direct (bare) call.
3181 // Simultaneously load the expected cid into R2 and the entrypoint into R3.
3182 __ ldrd(
3183 R2, R3, R9,
3184 target::MonomorphicSmiableCall::expected_cid_offset() - kHeapObjectTag);
3185 __ cmp(R2, Operand(IP));
3186 __ Branch(Address(THR, target::Thread::switchable_call_miss_entry_offset()),
3187 NE);
3188 __ bx(R3);
3189}
3190
3191static void CallSwitchableCallMissRuntimeEntry(Assembler* assembler,
3192 Register receiver_reg) {
3193 __ LoadImmediate(IP, 0);
3194 __ Push(IP); // Result slot
3195 __ Push(IP); // Arg0: stub out
3196 __ Push(receiver_reg); // Arg1: Receiver
3197 __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
3198 __ Pop(R0); // Get the receiver
3199 __ Pop(CODE_REG); // result = stub
3200 __ Pop(R9); // result = IC
3201}
3202
3203// Called from switchable IC calls.
3204// R0: receiver
3205void StubCodeCompiler::GenerateSwitchableCallMissStub() {
3206 __ ldr(CODE_REG,
3207 Address(THR, target::Thread::switchable_call_miss_stub_offset()));
3208 __ EnterStubFrame();
3209 CallSwitchableCallMissRuntimeEntry(assembler, /*receiver_reg=*/R0);
3210 __ LeaveStubFrame();
3211
3212 __ Branch(FieldAddress(
3213 CODE_REG, target::Code::entry_point_offset(CodeEntryKind::kNormal)));
3214}
3215
3216// Called from switchable IC calls.
3217// R0: receiver
3218// R9: SingleTargetCache
3219// Passed to target:
3220// CODE_REG: target Code object
3221void StubCodeCompiler::GenerateSingleTargetCallStub() {
3222 Label miss;
3223 __ LoadClassIdMayBeSmi(R1, R0);
3224 __ ldrh(R2,
3225 FieldAddress(R9, target::SingleTargetCache::lower_limit_offset()));
3226 __ ldrh(R3,
3227 FieldAddress(R9, target::SingleTargetCache::upper_limit_offset()));
3228
3229 __ cmp(R1, Operand(R2));
3230 __ b(&miss, LT);
3231 __ cmp(R1, Operand(R3));
3232 __ b(&miss, GT);
3233
3234 __ ldr(CODE_REG,
3235 FieldAddress(R9, target::SingleTargetCache::target_offset()));
3236 __ Branch(FieldAddress(R9, target::SingleTargetCache::entry_point_offset()));
3237
3238 __ Bind(&miss);
3239 __ EnterStubFrame();
3240 CallSwitchableCallMissRuntimeEntry(assembler, /*receiver_reg=*/R0);
3241 __ LeaveStubFrame();
3242
3243 __ Branch(FieldAddress(
3244 CODE_REG, target::Code::entry_point_offset(CodeEntryKind::kMonomorphic)));
3245}
3246
3247static int GetScaleFactor(intptr_t size) {
3248 switch (size) {
3249 case 1:
3250 return 0;
3251 case 2:
3252 return 1;
3253 case 4:
3254 return 2;
3255 case 8:
3256 return 3;
3257 case 16:
3258 return 4;
3259 }
3260 UNREACHABLE();
3261 return -1;
3262}
3263
3264void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
3266 const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
3267 const intptr_t scale_shift = GetScaleFactor(element_size);
3268
3271
3272 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
3273 Label call_runtime;
3274 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &call_runtime, R2));
3276 /* Check that length is a positive Smi. */
3277 /* R2: requested array length argument. */
3278 __ tst(R2, Operand(kSmiTagMask));
3279 __ b(&call_runtime, NE);
3280 __ SmiUntag(R2);
3281 /* Check for length >= 0 && length <= max_len. */
3282 /* R2: untagged array length. */
3283 __ CompareImmediate(R2, max_len);
3284 __ b(&call_runtime, HI);
3285 __ mov(R2, Operand(R2, LSL, scale_shift));
3286 const intptr_t fixed_size_plus_alignment_padding =
3287 target::TypedData::HeaderSize() +
3289 __ AddImmediate(R2, fixed_size_plus_alignment_padding);
3291 __ ldr(R0, Address(THR, target::Thread::top_offset()));
3292
3293 /* R2: allocation size. */
3294 __ adds(R1, R0, Operand(R2));
3295 __ b(&call_runtime, CS); /* Fail on unsigned overflow. */
3296
3297 /* Check if the allocation fits into the remaining space. */
3298 /* R0: potential new object start. */
3299 /* R1: potential next object start. */
3300 /* R2: allocation size. */
3301 __ ldr(IP, Address(THR, target::Thread::end_offset()));
3302 __ cmp(R1, Operand(IP));
3303 __ b(&call_runtime, CS);
3304 __ CheckAllocationCanary(R0);
3305
3306 __ str(R1, Address(THR, target::Thread::top_offset()));
3307 __ AddImmediate(R0, kHeapObjectTag);
3308 /* Initialize the tags. */
3309 /* R0: new object start as a tagged pointer. */
3310 /* R1: new object end address. */
3311 /* R2: allocation size. */
3312 {
3313 __ CompareImmediate(R2, target::UntaggedObject::kSizeTagMaxSizeTag);
3314 __ mov(R3,
3315 Operand(R2, LSL,
3316 target::UntaggedObject::kTagBitsSizeTagPos -
3318 LS);
3319 __ mov(R3, Operand(0), HI);
3320
3321 /* Get the class index and insert it into the tags. */
3322 uword tags =
3323 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
3324 __ LoadImmediate(TMP, tags);
3325 __ orr(R3, R3, Operand(TMP));
3326 __ str(R3, FieldAddress(R0, target::Object::tags_offset())); /* Tags. */
3327 }
3328 /* Set the length field. */
3329 /* R0: new object start as a tagged pointer. */
3330 /* R1: new object end address. */
3331 /* R2: allocation size. */
3332 __ mov(R3,
3333 Operand(AllocateTypedDataArrayABI::kLengthReg)); /* Array length. */
3334 __ StoreIntoObjectNoBarrier(
3335 R0, FieldAddress(R0, target::TypedDataBase::length_offset()), R3);
3336 /* Initialize all array elements to 0. */
3337 /* R0: new object start as a tagged pointer. */
3338 /* R1: new object end address. */
3339 /* R2: allocation size. */
3340 /* R3: iterator which initially points to the start of the variable */
3341 /* R8, R9: zero. */
3342 /* data area to be initialized. */
3343 __ LoadImmediate(R8, 0);
3344 __ mov(R9, Operand(R8));
3345 __ AddImmediate(R3, R0, target::TypedData::HeaderSize() - 1);
3346 __ StoreInternalPointer(
3347 R0, FieldAddress(R0, target::PointerBase::data_offset()), R3);
3348 Label init_loop;
3349 __ Bind(&init_loop);
3350 __ AddImmediate(R3, 2 * target::kWordSize);
3351 __ cmp(R3, Operand(R1));
3352 __ strd(R8, R9, R3, -2 * target::kWordSize, LS);
3353 __ b(&init_loop, CC);
3354 __ str(R8, Address(R3, -2 * target::kWordSize), HI);
3355 __ WriteAllocationCanary(R1); // Fix overshoot.
3356
3357 __ Ret();
3358
3359 __ Bind(&call_runtime);
3360 }
3361
3362 __ EnterStubFrame();
3363 __ PushObject(Object::null_object()); // Make room for the result.
3364 __ PushImmediate(target::ToRawSmi(cid)); // Cid
3365 __ Push(AllocateTypedDataArrayABI::kLengthReg); // Array length
3366 __ CallRuntime(kAllocateTypedDataRuntimeEntry, 2);
3367 __ Drop(2); // Drop arguments.
3369 __ LeaveStubFrame();
3370 __ Ret();
3371}
3372
3373} // namespace compiler
3374
3375} // namespace dart
3376
3377#endif // defined(TARGET_ARCH_ARM)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
static void async_callback(void *c, std::unique_ptr< const SkImage::AsyncReadResult > result)
static bool ok(int result)
static SkTileMode optimize(SkTileMode tm, int dimension)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
static size_t element_size(Layout layout, SkSLType type)
#define __
#define UNREACHABLE()
Definition assert.h:248
#define ASSERT_LESS_OR_EQUAL(expected, actual)
Definition assert.h:313
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
virtual bool WillAllocateNewOrRemembered() const
Definition il.h:7412
static constexpr intptr_t NumCallbackTrampolinesPerPage()
static constexpr intptr_t kPageMask
static constexpr uword RuntimeFunctionOffset(uword function_index)
static constexpr intptr_t kPageSize
static bool UseUnboxedRepresentation()
Definition il.h:10810
static intptr_t pointers_offset()
static intptr_t top_offset()
static intptr_t RawValue(intptr_t value)
Definition object.h:10001
static StaticTypeExactnessState HasExactSuperType()
static StaticTypeExactnessState NotExact()
static constexpr intptr_t kMaxInputs
Definition object.h:7676
void GenerateNArgsCheckInlineCacheStub(intptr_t num_args, const RuntimeEntry &handle_ic_miss, Token::Kind kind, Optimized optimized, CallType type, Exactness exactness)
void GenerateUsageCounterIncrement(Register temp_reg)
void GenerateAllocationStubForClass(UnresolvedPcRelativeCalls *unresolved_calls, const Class &cls, const dart::Code &allocate_object, const dart::Code &allocat_object_parametrized)
static intptr_t WordOffsetFromFpToCpuRegister(Register cpu_register)
#define UNIMPLEMENTED
#define ASSERT(E)
static bool b
glong glong end
const uint8_t uint32_t uint32_t GError ** error
uint32_t * target
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
bool CanLoadFromThread(const dart::Object &object, intptr_t *offset)
word ToRawSmi(const dart::Object &a)
bool SizeFitsInSizeTag(uword instance_size)
word TypedDataMaxNewSpaceElements(classid_t cid)
word TypedDataElementSizeInBytes(classid_t cid)
const Bool & TrueObject()
GrowableArray< UnresolvedPcRelativeCall * > UnresolvedPcRelativeCalls
bool IsSameObject(const Object &a, const Object &b)
const Bool & FalseObject()
const Object & NullObject()
const Code & StubCodeAllocateArray()
const Class & MintClass()
const Register kWriteBarrierSlotReg
constexpr bool IsAbiPreservedRegister(Register reg)
Definition constants.h:90
const Register THR
const Register kExceptionObjectReg
const Register kWriteBarrierObjectReg
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition constants.h:85
static constexpr uword kZapReturnAddress
uint16_t RegList
int32_t classid_t
Definition globals.h:524
@ kIllegalCid
Definition class_id.h:214
@ kHeapObjectTag
uintptr_t uword
Definition globals.h:501
intptr_t word
Definition globals.h:500
const Register CODE_REG
@ UNSIGNED_GREATER_EQUAL
@ UNSIGNED_LESS_EQUAL
const Register ARGS_DESC_REG
const Register DISPATCH_TABLE_REG
@ kNumberOfCpuRegisters
@ kNoRegister
static constexpr bool IsArgumentRegister(Register reg)
Definition constants.h:77
constexpr RegList kDartAvailableCpuRegs
const Register TMP
const Register FPREG
const intptr_t cid
const intptr_t kStoreBufferWrapperSize
const Register FUNCTION_REG
const Register IC_DATA_REG
const Register PP
static constexpr uword kZapCodeReg
@ kNumberOfDRegisters
const Register kStackTraceObjectReg
const int kFpuRegisterSize
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTempReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTagsReg
static constexpr Register kLengthReg
static constexpr Register kResultReg
static constexpr Register kClassIdReg
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment
static constexpr Register kLengthReg
static constexpr Register kIndexReg
static constexpr Register kInstanceCidOrSignatureReg
static constexpr Register kResultReg
static constexpr Register kSubtypeTestCacheReg
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kSubtypeTestCacheResultReg
#define NOT_IN_PRODUCT(code)
Definition globals.h:84