Flutter Engine
The Flutter Engine
stub_code_compiler_arm.cc
Go to the documentation of this file.
1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6#include "vm/globals.h"
7
8// For `AllocateObjectInstr::WillAllocateNewOrRemembered`
9// For `GenericCheckBoundInstr::UseUnboxedRepresentation`
11
12#define SHOULD_NOT_INCLUDE_RUNTIME
13
15
16#if defined(TARGET_ARCH_ARM)
17
18#include "vm/class_id.h"
19#include "vm/code_entry_kind.h"
23#include "vm/constants.h"
25#include "vm/instructions.h"
27#include "vm/tags.h"
28
29#define __ assembler->
30
31namespace dart {
32namespace compiler {
33
34// Ensures that [R0] is a new object, if not it will be added to the remembered
35// set via a leaf runtime call.
36//
37// WARNING: This might clobber all registers except for [R0], [THR] and [FP].
38// The caller should simply call LeaveStubFrame() and return.
40 // If the object is not in an active TLAB, we call a leaf-runtime to add it to
41 // the remembered set and/or deferred marking worklist. This test assumes a
42 // Page's TLAB use is always ascending.
43 Label done;
44 __ AndImmediate(TMP, R0, target::kPageMask);
45 __ LoadFromOffset(TMP, TMP, target::Page::original_top_offset());
46 __ CompareRegisters(R0, TMP);
47 __ BranchIf(UNSIGNED_GREATER_EQUAL, &done);
48
49 {
50 LeafRuntimeScope rt(assembler,
51 /*frame_size=*/0,
52 /*preserve_registers=*/false);
53 // [R0] already contains first argument.
54 __ mov(R1, Operand(THR));
55 rt.Call(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
56 }
57
58 __ Bind(&done);
59}
60
61// Input parameters:
62// LR : return address.
63// SP : address of last argument in argument array.
64// SP + 4*R4 - 4 : address of first argument in argument array.
65// SP + 4*R4 : address of return value.
66// R9 : address of the runtime function to call.
67// R4 : number of arguments to the call.
68void StubCodeCompiler::GenerateCallToRuntimeStub() {
69 const intptr_t thread_offset = target::NativeArguments::thread_offset();
70 const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
71 const intptr_t argv_offset = target::NativeArguments::argv_offset();
72 const intptr_t retval_offset = target::NativeArguments::retval_offset();
73
75 __ EnterStubFrame();
76
77 // Save exit frame information to enable stack walking as we are about
78 // to transition to Dart VM C++ code.
80
81 // Mark that the thread exited generated code through a runtime call.
84
85#if defined(DEBUG)
86 {
87 Label ok;
88 // Check that we are always entering from Dart code.
89 __ LoadFromOffset(R8, THR, target::Thread::vm_tag_offset());
90 __ CompareImmediate(R8, VMTag::kDartTagId);
91 __ b(&ok, EQ);
92 __ Stop("Not coming from Dart code.");
93 __ Bind(&ok);
94 }
95#endif
96
97 // Mark that the thread is executing VM code.
98 __ StoreToOffset(R9, THR, target::Thread::vm_tag_offset());
99
100 // Reserve space for arguments and align frame before entering C++ world.
101 // target::NativeArguments are passed in registers.
103 __ ReserveAlignedFrameSpace(0);
104
105 // Pass target::NativeArguments structure by value and call runtime.
106 // Registers R0, R1, R2, and R3 are used.
107
108 ASSERT(thread_offset == 0 * target::kWordSize);
109 // Set thread in NativeArgs.
110 __ mov(R0, Operand(THR));
111
112 ASSERT(argc_tag_offset == 1 * target::kWordSize);
113 __ mov(R1, Operand(R4)); // Set argc in target::NativeArguments.
114
115 ASSERT(argv_offset == 2 * target::kWordSize);
116 __ add(R2, FP, Operand(R4, LSL, 2)); // Compute argv.
117 // Set argv in target::NativeArguments.
118 __ AddImmediate(R2,
119 target::frame_layout.param_end_from_fp * target::kWordSize);
120
121 ASSERT(retval_offset == 3 * target::kWordSize);
122 __ add(R3, R2,
123 Operand(target::kWordSize)); // Retval is next to 1st argument.
124
125 // Call runtime or redirection via simulator.
126 __ blx(R9);
127
128 // Mark that the thread is executing Dart code.
129 __ LoadImmediate(R2, VMTag::kDartTagId);
130 __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset());
131
132 // Mark that the thread has not exited generated Dart code.
133 __ LoadImmediate(R2, 0);
135
136 // Reset exit frame information in Isolate's mutator thread structure.
138
139 // Restore the global object pool after returning from runtime (old space is
140 // moving, so the GOP could have been relocated).
141 if (FLAG_precompiled_mode) {
142 __ SetupGlobalPoolAndDispatchTable();
143 }
144
145 __ LeaveStubFrame();
146
147 // The following return can jump to a lazy-deopt stub, which assumes R0
148 // contains a return value and will save it in a GC-visible way. We therefore
149 // have to ensure R0 does not contain any garbage value left from the C
150 // function we called (which has return type "void").
151 // (See GenerateDeoptimizationSequence::saved_result_slot_from_fp.)
152 __ LoadImmediate(R0, 0);
153 __ Ret();
154}
155
156void StubCodeCompiler::GenerateSharedStubGeneric(
157 bool save_fpu_registers,
158 intptr_t self_code_stub_offset_from_thread,
159 bool allow_return,
160 std::function<void()> perform_runtime_call) {
161 // We want the saved registers to appear like part of the caller's frame, so
162 // we push them before calling EnterStubFrame.
163 RegisterSet all_registers;
164 all_registers.AddAllNonReservedRegisters(save_fpu_registers);
165
166 // To make the stack map calculation architecture independent we do the same
167 // as on intel.
168 READS_RETURN_ADDRESS_FROM_LR(__ Push(LR));
169 __ PushRegisters(all_registers);
170 __ ldr(CODE_REG, Address(THR, self_code_stub_offset_from_thread));
171 __ EnterStubFrame();
172 perform_runtime_call();
173 if (!allow_return) {
174 __ Breakpoint();
175 return;
176 }
177 __ LeaveStubFrame();
178 __ PopRegisters(all_registers);
179 __ Drop(1); // We use the LR restored via LeaveStubFrame.
180 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR));
181}
182
183void StubCodeCompiler::GenerateSharedStub(
184 bool save_fpu_registers,
185 const RuntimeEntry* target,
186 intptr_t self_code_stub_offset_from_thread,
187 bool allow_return,
188 bool store_runtime_result_in_result_register) {
189 ASSERT(!store_runtime_result_in_result_register || allow_return);
190 auto perform_runtime_call = [&]() {
191 if (store_runtime_result_in_result_register) {
192 // Reserve space for the result on the stack. This needs to be a GC
193 // safe value.
194 __ PushImmediate(Smi::RawValue(0));
195 }
196 __ CallRuntime(*target, /*argument_count=*/0);
197 if (store_runtime_result_in_result_register) {
198 __ PopRegister(R0);
199 __ str(R0,
200 Address(FP, target::kWordSize *
203 }
204 };
205 GenerateSharedStubGeneric(save_fpu_registers,
206 self_code_stub_offset_from_thread, allow_return,
207 perform_runtime_call);
208}
209
210void StubCodeCompiler::GenerateEnterSafepointStub() {
211 RegisterSet all_registers;
212 all_registers.AddAllGeneralRegisters();
213 __ PushRegisters(all_registers);
214
215 SPILLS_LR_TO_FRAME(__ EnterFrame((1 << FP) | (1 << LR), 0));
216 __ ReserveAlignedFrameSpace(0);
217 __ ldr(R0, Address(THR, kEnterSafepointRuntimeEntry.OffsetFromThread()));
218 __ blx(R0);
219 RESTORES_LR_FROM_FRAME(__ LeaveFrame((1 << FP) | (1 << LR), 0));
220
221 __ PopRegisters(all_registers);
222 __ Ret();
223}
224
225static void GenerateExitSafepointStubCommon(Assembler* assembler,
226 uword runtime_entry_offset) {
227 RegisterSet all_registers;
228 all_registers.AddAllGeneralRegisters();
229 __ PushRegisters(all_registers);
230
231 SPILLS_LR_TO_FRAME(__ EnterFrame((1 << FP) | (1 << LR), 0));
232 __ ReserveAlignedFrameSpace(0);
233
234 // Set the execution state to VM while waiting for the safepoint to end.
235 // This isn't strictly necessary but enables tests to check that we're not
236 // in native code anymore. See tests/ffi/function_gc_test.dart for example.
237 __ LoadImmediate(R0, target::Thread::vm_execution_state());
239
240 __ ldr(R0, Address(THR, runtime_entry_offset));
241 __ blx(R0);
242 RESTORES_LR_FROM_FRAME(__ LeaveFrame((1 << FP) | (1 << LR), 0));
243
244 __ PopRegisters(all_registers);
245 __ Ret();
246}
247
248void StubCodeCompiler::GenerateExitSafepointStub() {
249 GenerateExitSafepointStubCommon(
250 assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
251}
252
253void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
254 GenerateExitSafepointStubCommon(
255 assembler,
256 kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
257}
258
259// Call a native function within a safepoint.
260//
261// On entry:
262// Stack: set up for call, incl. alignment
263// R8: target to call
264//
265// On exit:
266// Stack: preserved
267// NOTFP, R4: clobbered, although normally callee-saved
268void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
270
271 // TransitionGeneratedToNative might clobber LR if it takes the slow path.
272 SPILLS_RETURN_ADDRESS_FROM_LR_TO_REGISTER(__ mov(R4, Operand(LR)));
273
274 __ LoadImmediate(R9, target::Thread::exit_through_ffi());
275 __ TransitionGeneratedToNative(R8, FPREG, R9 /*volatile*/, NOTFP,
276 /*enter_safepoint=*/true);
277
278 __ blx(R8);
279
280 __ TransitionNativeToGenerated(R9 /*volatile*/, NOTFP,
281 /*exit_safepoint=*/true);
282
283 __ bx(R4);
284}
285
286void StubCodeCompiler::GenerateLoadBSSEntry(BSS::Relocation relocation,
288 Register tmp) {
289 compiler::Label skip_reloc;
290 __ b(&skip_reloc);
291 InsertBSSRelocation(relocation);
292 __ Bind(&skip_reloc);
293
294 // For historical reasons, the PC on ARM points 8 bytes (two instructions)
295 // past the current instruction.
296 __ sub(tmp, PC,
298
299 // tmp holds the address of the relocation.
300 __ ldr(dst, compiler::Address(tmp));
301
302 // dst holds the relocation itself: tmp - bss_start.
303 // tmp = tmp + (bss_start - tmp) = bss_start
304 __ add(tmp, tmp, compiler::Operand(dst));
305
306 // tmp holds the start of the BSS section.
307 // Load the "get-thread" routine: *bss_start.
308 __ ldr(dst, compiler::Address(tmp));
309}
310
311void StubCodeCompiler::GenerateLoadFfiCallbackMetadataRuntimeFunction(
312 uword function_index,
313 Register dst) {
314 // Keep in sync with FfiCallbackMetadata::EnsureFirstTrampolinePageLocked.
315 // Note: If the stub was aligned, this could be a single PC relative load.
316
317 // Load a pointer to the beginning of the stub into dst.
318 const intptr_t code_size = __ CodeSize();
319 __ SubImmediate(dst, PC, Instr::kPCReadOffset + code_size);
320
321 // Round dst down to the page size.
323
324 // Load the function from the function table.
325 __ LoadFromOffset(dst, dst,
327}
328
329void StubCodeCompiler::GenerateFfiCallbackTrampolineStub() {
330#if defined(USING_SIMULATOR) && !defined(DART_PRECOMPILER)
331 // TODO(37299): FFI is not supported in SIMARM.
332 __ Breakpoint();
333#else
334 Label body;
335
336 // TMP is volatile and not used for passing any arguments.
339 ++i) {
340 // The FfiCallbackMetadata table is keyed by the trampoline entry point. So
341 // look up the current PC, then jump to the shared section. The PC is offset
342 // by Instr::kPCReadOffset, which is subtracted below.
343 __ mov(TMP, Operand(PC));
344 __ b(&body);
345 }
346
347 ASSERT(__ CodeSize() ==
348 FfiCallbackMetadata::kNativeCallbackTrampolineSize *
350
351 __ Bind(&body);
352
353 const intptr_t shared_stub_start = __ CodeSize();
354
355 // Save THR (callee-saved), R4 & R5 (temporaries, callee-saved), and LR.
356 COMPILE_ASSERT(FfiCallbackMetadata::kNativeCallbackTrampolineStackDelta == 4);
357 SPILLS_LR_TO_FRAME(
358 __ PushList((1 << LR) | (1 << THR) | (1 << R4) | (1 << R5)));
359
360 // The PC is in TMP, but is offset by kPCReadOffset. To get the actual
361 // trampoline entry point we need to subtract that.
362 __ sub(R4, TMP, Operand(Instr::kPCReadOffset));
363
366
367 RegisterSet argument_registers;
368 argument_registers.AddAllArgumentRegisters();
369 __ PushRegisters(argument_registers);
370
371 // Load the thread, verify the callback ID and exit the safepoint.
372 //
373 // We exit the safepoint inside DLRT_GetFfiCallbackMetadata in order to save
374 // code size on this shared stub.
375 {
376 __ mov(R0, Operand(R4));
377
378 // We also need to look up the entry point for the trampoline. This is
379 // returned using a pointer passed to the second arg of the C function
380 // below. We aim that pointer at a reserved stack slot.
381 __ sub(SP, SP, Operand(compiler::target::kWordSize));
382 __ mov(R1, Operand(SP));
383
384 // We also need to know if this is a sync or async callback. This is also
385 // returned by pointer.
386 __ sub(SP, SP, Operand(compiler::target::kWordSize));
387 __ mov(R2, Operand(SP));
388
389 __ EnterFrame(1 << FP, 0);
390 __ ReserveAlignedFrameSpace(0);
391
392 GenerateLoadFfiCallbackMetadataRuntimeFunction(
394
395 __ blx(R4);
396 __ mov(THR, Operand(R0));
397
398 __ LeaveFrame(1 << FP);
399
400 // The trampoline type is at the top of the stack. Pop it into R4.
401 __ Pop(R4);
402
403 // Entry point is now at the top of the stack. Pop it into R5.
404 __ Pop(R5);
405 }
406
407 __ PopRegisters(argument_registers);
408
409 Label async_callback;
410 Label done;
411
412 // If GetFfiCallbackMetadata returned a null thread, it means that the async
413 // callback was invoked after it was deleted. In this case, do nothing.
414 __ cmp(THR, Operand(0));
415 __ b(&done, EQ);
416
417 // Check the trampoline type to see how the callback should be invoked.
418 __ cmp(
419 R4,
421 __ b(&async_callback, EQ);
422
423 // Sync callback. The entry point contains the target function, so just call
424 // it. DLRT_GetThreadForNativeCallbackTrampoline exited the safepoint, so
425 // re-enter it afterwards.
426
427 // On entry to the function, there will be four extra slots on the stack:
428 // saved THR, R4, R5 and the return address. The target will know to skip
429 // them.
430 __ blx(R5);
431
432 // Clobbers R4, R5 and TMP, all saved or volatile.
433 __ EnterFullSafepoint(R4, R5);
434
435 __ b(&done);
437
438 // Async callback. The entrypoint marshals the arguments into a message and
439 // sends it over the send port. DLRT_GetThreadForNativeCallbackTrampoline
440 // entered a temporary isolate, so exit it afterwards.
441
442 // On entry to the function, there will be four extra slots on the stack:
443 // saved THR, R4, R5 and the return address. The target will know to skip
444 // them.
445 __ blx(R5);
446
447 // Exit the temporary isolate.
448 {
449 __ EnterFrame(1 << FP, 0);
450 __ ReserveAlignedFrameSpace(0);
451
452 GenerateLoadFfiCallbackMetadataRuntimeFunction(
454
455 __ blx(R4);
456
457 __ LeaveFrame(1 << FP);
458 }
459
460 __ Bind(&done);
461
462 // Returns.
463 __ PopList((1 << PC) | (1 << THR) | (1 << R4) | (1 << R5));
464
465 ASSERT_LESS_OR_EQUAL(__ CodeSize() - shared_stub_start,
466 FfiCallbackMetadata::kNativeCallbackSharedStubSize);
468
469#if defined(DEBUG)
470 while (__ CodeSize() < FfiCallbackMetadata::kPageSize) {
471 __ Breakpoint();
472 }
473#endif
474#endif
475}
476
477void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
478 __ EnterStubFrame();
481 __ CallRuntime(kDispatchTableNullErrorRuntimeEntry, /*argument_count=*/1);
482 // The NullError runtime entry does not return.
483 __ Breakpoint();
484}
485
486void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
487 auto perform_runtime_call = [&]() {
489 __ PushRegistersInOrder(
491 __ CallRuntime(kRangeErrorRuntimeEntry, /*argument_count=*/2);
492 __ Breakpoint();
493 };
494
495 GenerateSharedStubGeneric(
496 /*save_fpu_registers=*/with_fpu_regs,
497 with_fpu_regs
500 /*allow_return=*/false, perform_runtime_call);
501}
502
503void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
504 auto perform_runtime_call = [&]() {
505 __ CallRuntime(kWriteErrorRuntimeEntry, /*argument_count=*/2);
506 __ Breakpoint();
507 };
508
509 GenerateSharedStubGeneric(
510 /*save_fpu_registers=*/with_fpu_regs,
511 with_fpu_regs
514 /*allow_return=*/false, perform_runtime_call);
515}
516
517// Input parameters:
518// LR : return address.
519// SP : address of return value.
520// R9 : address of the native function to call.
521// R2 : address of first argument in argument array.
522// R1 : argc_tag including number of arguments and function kind.
523static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
524 Address wrapper) {
525 const intptr_t thread_offset = target::NativeArguments::thread_offset();
526 const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
527 const intptr_t argv_offset = target::NativeArguments::argv_offset();
528 const intptr_t retval_offset = target::NativeArguments::retval_offset();
529
530 __ EnterStubFrame();
531
532 // Save exit frame information to enable stack walking as we are about
533 // to transition to native code.
535
536 // Mark that the thread exited generated code through a runtime call.
539
540#if defined(DEBUG)
541 {
542 Label ok;
543 // Check that we are always entering from Dart code.
544 __ LoadFromOffset(R8, THR, target::Thread::vm_tag_offset());
545 __ CompareImmediate(R8, VMTag::kDartTagId);
546 __ b(&ok, EQ);
547 __ Stop("Not coming from Dart code.");
548 __ Bind(&ok);
549 }
550#endif
551
552 // Mark that the thread is executing native code.
553 __ StoreToOffset(R9, THR, target::Thread::vm_tag_offset());
554
555 // Reserve space for the native arguments structure passed on the stack (the
556 // outgoing pointer parameter to the native arguments structure is passed in
557 // R0) and align frame before entering the C++ world.
558 __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize());
559
560 // Initialize target::NativeArguments structure and call native function.
561 // Registers R0, R1, R2, and R3 are used.
562
563 ASSERT(thread_offset == 0 * target::kWordSize);
564 // Set thread in NativeArgs.
565 __ mov(R0, Operand(THR));
566
567 ASSERT(argc_tag_offset == 1 * target::kWordSize);
568 // Set argc in target::NativeArguments: R1 already contains argc.
569
570 ASSERT(argv_offset == 2 * target::kWordSize);
571 // Set argv in target::NativeArguments: R2 already contains argv.
572
573 // Set retval in NativeArgs.
574 ASSERT(retval_offset == 3 * target::kWordSize);
575 __ add(R3, FP,
576 Operand((target::frame_layout.param_end_from_fp + 1) *
578
579 // Passing the structure by value as in runtime calls would require changing
580 // Dart API for native functions.
581 // For now, space is reserved on the stack and we pass a pointer to it.
582 __ stm(IA, SP, (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3));
583 __ mov(R0, Operand(SP)); // Pass the pointer to the target::NativeArguments.
584
585 __ mov(R1, Operand(R9)); // Pass the function entrypoint to call.
586
587 // Call native function invocation wrapper or redirection via simulator.
588 __ Call(wrapper);
589
590 // Mark that the thread is executing Dart code.
591 __ LoadImmediate(R2, VMTag::kDartTagId);
592 __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset());
593
594 // Mark that the thread has not exited generated Dart code.
595 __ LoadImmediate(R2, 0);
597
598 // Reset exit frame information in Isolate's mutator thread structure.
600
601 // Restore the global object pool after returning from runtime (old space is
602 // moving, so the GOP could have been relocated).
603 if (FLAG_precompiled_mode) {
604 __ SetupGlobalPoolAndDispatchTable();
605 }
606
607 __ LeaveStubFrame();
608 __ Ret();
609}
610
611void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
612 GenerateCallNativeWithWrapperStub(
613 assembler,
614 Address(THR,
616}
617
618void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
619 GenerateCallNativeWithWrapperStub(
620 assembler,
621 Address(THR,
623}
624
625// Input parameters:
626// LR : return address.
627// SP : address of return value.
628// R9 : address of the native function to call.
629// R2 : address of first argument in argument array.
630// R1 : argc_tag including number of arguments and function kind.
631void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
632 GenerateCallNativeWithWrapperStub(
633 assembler,
634 Address(THR,
636}
637
638// Input parameters:
639// ARGS_DESC_REG: arguments descriptor array.
640void StubCodeCompiler::GenerateCallStaticFunctionStub() {
641 // Create a stub frame as we are pushing some objects on the stack before
642 // calling into the runtime.
643 __ EnterStubFrame();
644 // Setup space on stack for return value and preserve arguments descriptor.
645 __ LoadImmediate(R0, 0);
646 __ PushList((1 << R0) | (1 << ARGS_DESC_REG));
647 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
648 // Get Code object result and restore arguments descriptor array.
649 __ PopList((1 << R0) | (1 << ARGS_DESC_REG));
650 // Remove the stub frame.
651 __ LeaveStubFrame();
652 // Jump to the dart function.
653 __ mov(CODE_REG, Operand(R0));
654 __ Branch(FieldAddress(R0, target::Code::entry_point_offset()));
655}
656
657// Called from a static call only when an invalid code has been entered
658// (invalid because its function was optimized or deoptimized).
659// ARGS_DESC_REG: arguments descriptor array.
660void StubCodeCompiler::GenerateFixCallersTargetStub() {
661 Label monomorphic;
662 __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
663
664 // Load code pointer to this stub from the thread:
665 // The one that is passed in, is not correct - it points to the code object
666 // that needs to be replaced.
667 __ ldr(CODE_REG,
669 // Create a stub frame as we are pushing some objects on the stack before
670 // calling into the runtime.
671 __ EnterStubFrame();
672 // Setup space on stack for return value and preserve arguments descriptor.
673 __ LoadImmediate(R0, 0);
674 __ PushList((1 << R0) | (1 << ARGS_DESC_REG));
675 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
676 // Get Code object result and restore arguments descriptor array.
677 __ PopList((1 << R0) | (1 << ARGS_DESC_REG));
678 // Remove the stub frame.
679 __ LeaveStubFrame();
680 // Jump to the dart function.
681 __ mov(CODE_REG, Operand(R0));
682 __ Branch(FieldAddress(R0, target::Code::entry_point_offset()));
683
684 __ Bind(&monomorphic);
685 // Load code pointer to this stub from the thread:
686 // The one that is passed in, is not correct - it points to the code object
687 // that needs to be replaced.
688 __ ldr(CODE_REG,
690 // Create a stub frame as we are pushing some objects on the stack before
691 // calling into the runtime.
692 __ EnterStubFrame();
693 __ LoadImmediate(R1, 0);
694 __ Push(R1); // Result slot.
695 __ Push(R0); // Preserve receiver.
696 __ Push(R9); // Old cache value (also 2nd return value).
697 __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 2);
698 __ Pop(R9); // Get target cache object.
699 __ Pop(R0); // Restore receiver.
700 __ Pop(CODE_REG); // Get target Code object.
701 // Remove the stub frame.
702 __ LeaveStubFrame();
703 // Jump to the dart function.
704 __ Branch(FieldAddress(
706}
707
708// Called from object allocate instruction when the allocation stub has been
709// disabled.
710void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
711 // Load code pointer to this stub from the thread:
712 // The one that is passed in, is not correct - it points to the code object
713 // that needs to be replaced.
714 __ ldr(CODE_REG,
716 __ EnterStubFrame();
717 // Setup space on stack for return value.
718 __ LoadImmediate(R0, 0);
719 __ Push(R0);
720 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
721 // Get Code object result.
722 __ Pop(R0);
723 // Remove the stub frame.
724 __ LeaveStubFrame();
725 // Jump to the dart function.
726 __ mov(CODE_REG, Operand(R0));
727 __ Branch(FieldAddress(R0, target::Code::entry_point_offset()));
728}
729
730// Called from object allocate instruction when the allocation stub for a
731// generic class has been disabled.
732void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
733 // Load code pointer to this stub from the thread:
734 // The one that is passed in, is not correct - it points to the code object
735 // that needs to be replaced.
736 __ ldr(CODE_REG,
738 __ EnterStubFrame();
739 // Preserve type arguments register.
741 // Setup space on stack for return value.
742 __ LoadImmediate(R0, 0);
743 __ Push(R0);
744 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
745 // Get Code object result.
746 __ Pop(R0);
747 // Restore type arguments register.
749 // Remove the stub frame.
750 __ LeaveStubFrame();
751 // Jump to the dart function.
752 __ mov(CODE_REG, Operand(R0));
753 __ Branch(FieldAddress(R0, target::Code::entry_point_offset()));
754}
755
756// Input parameters:
757// R2: smi-tagged argument count, may be zero.
758// FP[target::frame_layout.param_end_from_fp + 1]: last argument.
759static void PushArrayOfArguments(Assembler* assembler) {
760 // Allocate array to store arguments of caller.
761 __ LoadObject(R1, NullObject());
762 // R1: null element type for raw Array.
763 // R2: smi-tagged argument count, may be zero.
764 __ BranchLink(StubCodeAllocateArray());
765 // R0: newly allocated array.
766 // R2: smi-tagged argument count, may be zero (was preserved by the stub).
767 __ Push(R0); // Array is in R0 and on top of stack.
768 __ AddImmediate(R1, FP,
769 target::frame_layout.param_end_from_fp * target::kWordSize);
771 // Copy arguments from stack to array (starting at the end).
772 // R1: address just beyond last argument on stack.
773 // R3: address of first argument in array.
774 Label enter;
775 __ b(&enter);
776 Label loop;
777 __ Bind(&loop);
778 __ ldr(R8, Address(R1, target::kWordSize, Address::PreIndex));
779 // Generational barrier is needed, array is not necessarily in new space.
780 __ StoreIntoObject(R0, Address(R3, R2, LSL, 1), R8);
781 __ Bind(&enter);
782 __ subs(R2, R2, Operand(target::ToRawSmi(1))); // R2 is Smi.
783 __ b(&loop, PL);
784}
785
786// Used by eager and lazy deoptimization. Preserve result in R0 if necessary.
787// This stub translates optimized frame into unoptimized frame. The optimized
788// frame can contain values in registers and on stack, the unoptimized
789// frame contains all values on stack.
790// Deoptimization occurs in following steps:
791// - Push all registers that can contain values.
792// - Call C routine to copy the stack and saved registers into temporary buffer.
793// - Adjust caller's frame to correct unoptimized frame size.
794// - Fill the unoptimized frame.
795// - Materialize objects that require allocation (e.g. Double instances).
796// GC can occur only after frame is fully rewritten.
797// Stack after EnterFrame(...) below:
798// +------------------+
799// | Saved PP | <- TOS
800// +------------------+
801// | Saved FP | <- FP of stub
802// +------------------+
803// | Saved LR | (deoptimization point)
804// +------------------+
805// | pc marker |
806// +------------------+
807// | Saved CODE_REG |
808// +------------------+
809// | ... | <- SP of optimized frame
810//
811// Parts of the code cannot GC, part of the code can GC.
812static void GenerateDeoptimizationSequence(Assembler* assembler,
813 DeoptStubKind kind) {
814 // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
815 // is no need to set the correct PC marker or load PP, since they get patched.
816 __ EnterDartFrame(0);
817 __ LoadPoolPointer();
818
819 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
820 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
821 const intptr_t saved_result_slot_from_fp =
824 const intptr_t saved_exception_slot_from_fp =
827 const intptr_t saved_stacktrace_slot_from_fp =
830 // Result in R0 is preserved as part of pushing all registers below.
831
832 // Push registers in their enumeration order: lowest register number at
833 // lowest address.
834 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
835 if (i == CODE_REG) {
836 // Save the original value of CODE_REG pushed before invoking this stub
837 // instead of the value used to call this stub.
838 __ ldr(IP, Address(FP, 2 * target::kWordSize));
839 __ Push(IP);
840 } else if (i == SP) {
841 // Push(SP) has unpredictable behavior.
842 __ mov(IP, Operand(SP));
843 __ Push(IP);
844 } else {
845 __ Push(static_cast<Register>(i));
846 }
847 }
848
850 if (kNumberOfDRegisters > 16) {
851 __ vstmd(DB_W, SP, D16, kNumberOfDRegisters - 16);
852 __ vstmd(DB_W, SP, D0, 16);
853 } else {
854 __ vstmd(DB_W, SP, D0, kNumberOfDRegisters);
855 }
856
857 {
858 __ mov(R0, Operand(SP)); // Pass address of saved registers block.
859 LeafRuntimeScope rt(assembler,
860 /*frame_size=*/0,
861 /*preserve_registers=*/false);
862 bool is_lazy =
863 (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
864 __ mov(R1, Operand(is_lazy ? 1 : 0));
865 rt.Call(kDeoptimizeCopyFrameRuntimeEntry, 2);
866 // Result (R0) is stack-size (FP - SP) in bytes.
867 }
868
869 if (kind == kLazyDeoptFromReturn) {
870 // Restore result into R1 temporarily.
871 __ ldr(R1, Address(FP, saved_result_slot_from_fp * target::kWordSize));
872 } else if (kind == kLazyDeoptFromThrow) {
873 // Restore result into R1 temporarily.
874 __ ldr(R1, Address(FP, saved_exception_slot_from_fp * target::kWordSize));
875 __ ldr(R2, Address(FP, saved_stacktrace_slot_from_fp * target::kWordSize));
876 }
877
878 __ RestoreCodePointer();
879 __ LeaveDartFrame();
880 __ sub(SP, FP, Operand(R0));
881
882 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
883 // is no need to set the correct PC marker or load PP, since they get patched.
884 __ EnterStubFrame();
885 if (kind == kLazyDeoptFromReturn) {
886 __ Push(R1); // Preserve result as first local.
887 } else if (kind == kLazyDeoptFromThrow) {
888 __ Push(R1); // Preserve exception as first local.
889 __ Push(R2); // Preserve stacktrace as second local.
890 }
891 {
892 __ mov(R0, Operand(FP)); // Get last FP address.
893 LeafRuntimeScope rt(assembler,
894 /*frame_size=*/0,
895 /*preserve_registers=*/false);
896 rt.Call(kDeoptimizeFillFrameRuntimeEntry, 1);
897 }
898 if (kind == kLazyDeoptFromReturn) {
899 // Restore result into R1.
900 __ ldr(R1, Address(FP, target::frame_layout.first_local_from_fp *
902 } else if (kind == kLazyDeoptFromThrow) {
903 // Restore result into R1.
904 __ ldr(R1, Address(FP, target::frame_layout.first_local_from_fp *
906 __ ldr(R2, Address(FP, (target::frame_layout.first_local_from_fp - 1) *
908 }
909 // Code above cannot cause GC.
910 __ RestoreCodePointer();
911 __ LeaveStubFrame();
912
913 // Frame is fully rewritten at this point and it is safe to perform a GC.
914 // Materialize any objects that were deferred by FillFrame because they
915 // require allocation.
916 // Enter stub frame with loading PP. The caller's PP is not materialized yet.
917 __ EnterStubFrame();
918 if (kind == kLazyDeoptFromReturn) {
919 __ Push(R1); // Preserve result, it will be GC-d here.
920 } else if (kind == kLazyDeoptFromThrow) {
921 // Preserve CODE_REG for one more runtime call.
922 __ Push(CODE_REG);
923 __ Push(R1); // Preserve exception, it will be GC-d here.
924 __ Push(R2); // Preserve stacktrace, it will be GC-d here.
925 }
926 __ PushObject(NullObject()); // Space for the result.
927 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
928 // Result tells stub how many bytes to remove from the expression stack
929 // of the bottom-most frame. They were used as materialization arguments.
930 __ Pop(R2);
931 if (kind == kLazyDeoptFromReturn) {
932 __ Pop(R0); // Restore result.
933 } else if (kind == kLazyDeoptFromThrow) {
934 __ Pop(R1); // Restore stacktrace.
935 __ Pop(R0); // Restore exception.
936 __ Pop(CODE_REG);
937 }
938 __ LeaveStubFrame();
939 // Remove materialization arguments.
940 __ add(SP, SP, Operand(R2, ASR, kSmiTagSize));
941 // The caller is responsible for emitting the return instruction.
942
943 if (kind == kLazyDeoptFromThrow) {
944 // Unoptimized frame is now ready to accept the exception. Rethrow it to
945 // find the right handler. Ask rethrow machinery to bypass debugger it
946 // was already notified about this exception.
947 __ EnterStubFrame();
948 __ PushImmediate(
949 target::ToRawSmi(0)); // Space for the return value (unused).
950 __ Push(R0); // Exception
951 __ Push(R1); // Stacktrace
952 __ PushImmediate(target::ToRawSmi(1)); // Bypass debugger
953 __ CallRuntime(kReThrowRuntimeEntry, 3);
954 __ LeaveStubFrame();
955 }
956}
957
958// R0: result, must be preserved
959void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
960 // Push zap value instead of CODE_REG for lazy deopt.
961 __ LoadImmediate(IP, kZapCodeReg);
962 __ Push(IP);
963 // Return address for "call" to deopt stub.
964 WRITES_RETURN_ADDRESS_TO_LR(__ LoadImmediate(LR, kZapReturnAddress));
965 __ ldr(CODE_REG,
967 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
968 __ Ret();
969}
970
971// R0: exception, must be preserved
972// R1: stacktrace, must be preserved
973void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
974 // Push zap value instead of CODE_REG for lazy deopt.
975 __ LoadImmediate(IP, kZapCodeReg);
976 __ Push(IP);
977 // Return address for "call" to deopt stub.
978 WRITES_RETURN_ADDRESS_TO_LR(__ LoadImmediate(LR, kZapReturnAddress));
979 __ ldr(CODE_REG,
981 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
982 __ Ret();
983}
984
985void StubCodeCompiler::GenerateDeoptimizeStub() {
986 __ Push(CODE_REG);
988 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
989 __ Ret();
990}
991
992// IC_DATA_REG: ICData/MegamorphicCache
993static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler) {
994 __ EnterStubFrame();
995
996 __ ldr(ARGS_DESC_REG,
997 FieldAddress(IC_DATA_REG,
999
1000 // Load the receiver.
1001 __ ldr(R2, FieldAddress(ARGS_DESC_REG,
1003 __ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi.
1004 __ ldr(R8, Address(IP, target::frame_layout.param_end_from_fp *
1006 __ LoadImmediate(IP, 0);
1007 __ Push(IP); // Result slot.
1008 __ Push(R8); // Receiver.
1009 __ Push(IC_DATA_REG); // ICData/MegamorphicCache.
1010 __ Push(ARGS_DESC_REG); // Arguments descriptor.
1011
1012 // Adjust arguments count.
1013 __ ldr(R3, FieldAddress(ARGS_DESC_REG,
1015 __ cmp(R3, Operand(0));
1016 __ AddImmediate(R2, R2, target::ToRawSmi(1),
1017 NE); // Include the type arguments.
1018
1019 // R2: Smi-tagged arguments array length.
1020 PushArrayOfArguments(assembler);
1021 const intptr_t kNumArgs = 4;
1022 __ CallRuntime(kNoSuchMethodFromCallStubRuntimeEntry, kNumArgs);
1023 __ Drop(4);
1024 __ Pop(R0); // Return value.
1025 __ LeaveStubFrame();
1026 __ Ret();
1027}
1028
1029static void GenerateDispatcherCode(Assembler* assembler,
1030 Label* call_target_function) {
1031 __ Comment("NoSuchMethodDispatch");
1032 // When lazily generated invocation dispatchers are disabled, the
1033 // miss-handler may return null.
1034 __ CompareObject(R0, NullObject());
1035 __ b(call_target_function, NE);
1036
1037 GenerateNoSuchMethodDispatcherBody(assembler);
1038}
1039
1040// Input:
1041// ARGS_DESC_REG - arguments descriptor
1042// IC_DATA_REG - icdata/megamorphic_cache
1043void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
1044 GenerateNoSuchMethodDispatcherBody(assembler);
1045}
1046
1047// Called for inline allocation of arrays.
1048// Input registers (preserved):
1049// LR: return address.
1050// AllocateArrayABI::kLengthReg: array length as Smi.
1051// AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
1052// Output registers:
1053// AllocateArrayABI::kResultReg: newly allocated array.
1054// Clobbered:
1055// R3, R4, R8, R9
1056void StubCodeCompiler::GenerateAllocateArrayStub() {
1057 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1058 Label slow_case;
1059 // Compute the size to be allocated, it is based on the array length
1060 // and is computed as:
1061 // RoundedAllocationSize(
1062 // (array_length * kwordSize) + target::Array::header_size()).
1063 __ mov(R3, Operand(AllocateArrayABI::kLengthReg)); // Array length.
1064 // Check that length is a Smi.
1065 __ tst(R3, Operand(kSmiTagMask));
1066 __ b(&slow_case, NE);
1067
1068 // Check length >= 0 && length <= kMaxNewSpaceElements
1069 const intptr_t max_len =
1071 __ CompareImmediate(R3, max_len);
1072 __ b(&slow_case, HI);
1073
1074 const intptr_t cid = kArrayCid;
1075 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &slow_case, R4));
1076
1077 const intptr_t fixed_size_plus_alignment_padding =
1080 __ LoadImmediate(R9, fixed_size_plus_alignment_padding);
1081 __ add(R9, R9, Operand(R3, LSL, 1)); // R3 is a Smi.
1082 ASSERT(kSmiTagShift == 1);
1084
1085 // R9: Allocation size.
1086 // Potential new object start.
1088 Address(THR, target::Thread::top_offset()));
1090 Operand(R9)); // Potential next object start.
1091 __ b(&slow_case, CS); // Branch if unsigned overflow.
1092
1093 // Check if the allocation fits into the remaining space.
1094 // AllocateArrayABI::kResultReg: potential new object start.
1095 // R3: potential next object start.
1096 // R9: allocation size.
1097 __ ldr(TMP, Address(THR, target::Thread::end_offset()));
1098 __ cmp(R3, Operand(TMP));
1099 __ b(&slow_case, CS);
1100 __ CheckAllocationCanary(AllocateArrayABI::kResultReg);
1101
1102 // Successfully allocated the object(s), now update top to point to
1103 // next object start and initialize the object.
1104 __ str(R3, Address(THR, target::Thread::top_offset()));
1106 Operand(kHeapObjectTag));
1107
1108 // Initialize the tags.
1109 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
1110 // R3: new object end address.
1111 // R9: allocation size.
1112 {
1113 const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
1115
1117 __ mov(R8, Operand(R9, LSL, shift), LS);
1118 __ mov(R8, Operand(0), HI);
1119
1120 // Get the class index and insert it into the tags.
1121 // R8: size and bit tags.
1122 const uword tags =
1123 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
1124 __ LoadImmediate(TMP, tags);
1125 __ orr(R8, R8, Operand(TMP));
1126 __ str(R8, FieldAddress(AllocateArrayABI::kResultReg,
1127 target::Array::tags_offset())); // Store tags.
1128 }
1129
1130 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
1131 // R3: new object end address.
1132 // Store the type argument field.
1133 __ StoreIntoObjectNoBarrier(
1135 FieldAddress(AllocateArrayABI::kResultReg,
1138
1139 // Set the length field.
1140 __ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
1141 FieldAddress(AllocateArrayABI::kResultReg,
1144
1145 // Initialize all array elements to raw_null.
1146 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
1147 // R8, R9: null
1148 // R4: iterator which initially points to the start of the variable
1149 // data area to be initialized.
1150 // R3: new object end address.
1151 // R9: allocation size.
1152
1153 __ LoadObject(R8, NullObject());
1154 __ mov(R9, Operand(R8));
1155 __ AddImmediate(R4, AllocateArrayABI::kResultReg,
1157 __ InitializeFieldsNoBarrier(AllocateArrayABI::kResultReg, R4, R3, R8, R9);
1158 __ Ret();
1159 // Unable to allocate the array using the fast inline code, just call
1160 // into the runtime.
1161 __ Bind(&slow_case);
1162 }
1163
1164 // Create a stub frame as we are pushing some objects on the stack before
1165 // calling into the runtime.
1166 __ EnterStubFrame();
1167 __ LoadImmediate(TMP, 0);
1168 // Setup space on stack for return value.
1169 // Push array length as Smi and element type.
1170 __ PushList((1 << AllocateArrayABI::kTypeArgumentsReg) |
1171 (1 << AllocateArrayABI::kLengthReg) | (1 << IP));
1172 __ CallRuntime(kAllocateArrayRuntimeEntry, 2);
1173
1174 // Write-barrier elimination might be enabled for this array (depending on the
1175 // array length). To be sure we will check if the allocated object is in old
1176 // space and if so call a leaf runtime to add it to the remembered set.
1179
1180 // Pop arguments; result is popped in IP.
1182 (1 << AllocateArrayABI::kLengthReg) | (1 << IP));
1183 __ mov(AllocateArrayABI::kResultReg, Operand(IP));
1184 __ LeaveStubFrame();
1185 __ Ret();
1186}
1187
1188// Called for allocation of Mint.
1189void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
1190 // For test purpose call allocation stub without inline allocation attempt.
1191 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1192 Label slow_case;
1193 __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
1195 __ Ret();
1196
1197 __ Bind(&slow_case);
1198 }
1201 GenerateSharedStub(/*save_fpu_registers=*/true, &kAllocateMintRuntimeEntry,
1203 /*allow_return=*/true,
1204 /*store_runtime_result_in_result_register=*/true);
1205}
1206
1207// Called for allocation of Mint.
1208void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
1209 // For test purpose call allocation stub without inline allocation attempt.
1210 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1211 Label slow_case;
1212 __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
1214 __ Ret();
1215
1216 __ Bind(&slow_case);
1217 }
1220 GenerateSharedStub(
1221 /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
1223 /*allow_return=*/true,
1224 /*store_runtime_result_in_result_register=*/true);
1225}
1226
1227// Called when invoking Dart code from C++ (VM code).
1228// Input parameters:
1229// LR : points to return address.
1230// R0 : target code or entry point (in bare instructions mode).
1231// R1 : arguments descriptor array.
1232// R2 : arguments array.
1233// R3 : current thread.
1234void StubCodeCompiler::GenerateInvokeDartCodeStub() {
1235 SPILLS_LR_TO_FRAME(__ EnterFrame((1 << FP) | (1 << LR), 0));
1236
1237 // Push code object to PC marker slot.
1239 __ Push(IP);
1240
1241 __ PushNativeCalleeSavedRegisters();
1242
1243 // Set up THR, which caches the current thread in Dart code.
1244 if (THR != R3) {
1245 __ mov(THR, Operand(R3));
1246 }
1247
1248#if defined(USING_SHADOW_CALL_STACK)
1249#error Unimplemented
1250#endif
1251
1252 // Save the current VMTag on the stack.
1253 __ LoadFromOffset(R9, THR, target::Thread::vm_tag_offset());
1254 __ Push(R9);
1255
1256 // Save top resource and top exit frame info. Use R4-6 as temporary registers.
1257 // StackFrameIterator reads the top exit frame info saved in this frame.
1258 __ LoadFromOffset(R4, THR, target::Thread::top_resource_offset());
1259 __ Push(R4);
1260 __ LoadImmediate(R8, 0);
1261 __ StoreToOffset(R8, THR, target::Thread::top_resource_offset());
1262
1264 __ Push(R8);
1265 __ LoadImmediate(R8, 0);
1267
1270
1271 // target::frame_layout.exit_link_slot_from_entry_fp must be kept in sync
1272 // with the code below.
1273#if defined(DART_TARGET_OS_MACOS) || defined(DART_TARGET_OS_MACOS_IOS)
1274 ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -27);
1275#else
1276 ASSERT(target::frame_layout.exit_link_slot_from_entry_fp == -28);
1277#endif
1278 __ Push(R9);
1279
1280 __ EmitEntryFrameVerification(R9);
1281
1282 // Mark that the thread is executing Dart code. Do this after initializing the
1283 // exit link for the profiler.
1284 __ LoadImmediate(R9, VMTag::kDartTagId);
1285 __ StoreToOffset(R9, THR, target::Thread::vm_tag_offset());
1286
1287 // Load arguments descriptor array into R4, which is passed to Dart code.
1288 __ mov(R4, Operand(R1));
1289
1290 // Load number of arguments into R9 and adjust count for type arguments.
1291 __ ldr(R3,
1293 __ ldr(R9, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
1294 __ cmp(R3, Operand(0));
1295 __ AddImmediate(R9, R9, target::ToRawSmi(1),
1296 NE); // Include the type arguments.
1297 __ SmiUntag(R9);
1298
1299 // Compute address of 'arguments array' data area into R2.
1300 __ AddImmediate(R2, R2, target::Array::data_offset() - kHeapObjectTag);
1301
1302 // Set up arguments for the Dart call.
1303 Label push_arguments;
1304 Label done_push_arguments;
1305 __ CompareImmediate(R9, 0); // check if there are arguments.
1306 __ b(&done_push_arguments, EQ);
1307 __ LoadImmediate(R1, 0);
1308 __ Bind(&push_arguments);
1309 __ ldr(R3, Address(R2));
1310 __ Push(R3);
1311 __ AddImmediate(R2, target::kWordSize);
1312 __ AddImmediate(R1, 1);
1313 __ cmp(R1, Operand(R9));
1314 __ b(&push_arguments, LT);
1315 __ Bind(&done_push_arguments);
1316
1317 // Call the Dart code entrypoint.
1318 if (FLAG_precompiled_mode) {
1319 __ SetupGlobalPoolAndDispatchTable();
1320 __ LoadImmediate(CODE_REG, 0); // GC safe value into CODE_REG.
1321 } else {
1322 __ LoadImmediate(PP, 0); // GC safe value into PP.
1323 __ mov(CODE_REG, Operand(R0));
1324 __ ldr(R0, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
1325 }
1326 __ blx(R0); // R4 is the arguments descriptor array.
1327
1328 // Get rid of arguments pushed on the stack.
1329 __ AddImmediate(
1330 SP, FP,
1331 target::frame_layout.exit_link_slot_from_entry_fp * target::kWordSize);
1332
1333 // Restore the saved top exit frame info and top resource back into the
1334 // Isolate structure. Uses R9 as a temporary register for this.
1335 __ Pop(R9);
1337 __ Pop(R9);
1339 __ Pop(R9);
1340 __ StoreToOffset(R9, THR, target::Thread::top_resource_offset());
1341
1342 // Restore the current VMTag from the stack.
1343 __ Pop(R4);
1344 __ StoreToOffset(R4, THR, target::Thread::vm_tag_offset());
1345
1346#if defined(USING_SHADOW_CALL_STACK)
1347#error Unimplemented
1348#endif
1349
1350 __ PopNativeCalleeSavedRegisters();
1351
1352 __ set_constant_pool_allowed(false);
1353
1354 // Restore the frame pointer and return.
1355 RESTORES_LR_FROM_FRAME(__ LeaveFrame((1 << FP) | (1 << LR)));
1356 __ Ret();
1357}
1358
1359// Helper to generate space allocation of context stub.
1360// This does not initialise the fields of the context.
1361// Input:
1362// R1: number of context variables.
1363// Output:
1364// R0: new allocated Context object.
1365// Clobbered:
1366// R2, R3, R8, R9
1367static void GenerateAllocateContext(Assembler* assembler, Label* slow_case) {
1368 // First compute the rounded instance size.
1369 // R1: number of context variables.
1370 const intptr_t fixed_size_plus_alignment_padding =
1373 __ LoadImmediate(R2, fixed_size_plus_alignment_padding);
1374 __ add(R2, R2, Operand(R1, LSL, 2));
1375 ASSERT(kSmiTagShift == 1);
1377
1378 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, slow_case, R8));
1379 // Now allocate the object.
1380 // R1: number of context variables.
1381 // R2: object size.
1382 __ ldr(R0, Address(THR, target::Thread::top_offset()));
1383 __ add(R3, R2, Operand(R0));
1384 // Check if the allocation fits into the remaining space.
1385 // R0: potential new object.
1386 // R1: number of context variables.
1387 // R2: object size.
1388 // R3: potential next object start.
1389 __ ldr(IP, Address(THR, target::Thread::end_offset()));
1390 __ cmp(R3, Operand(IP));
1391 __ b(slow_case, CS); // Branch if unsigned higher or equal.
1392 __ CheckAllocationCanary(R0);
1393
1394 // Successfully allocated the object, now update top to point to
1395 // next object start and initialize the object.
1396 // R0: new object start (untagged).
1397 // R1: number of context variables.
1398 // R2: object size.
1399 // R3: next object start.
1400 __ str(R3, Address(THR, target::Thread::top_offset()));
1401 __ add(R0, R0, Operand(kHeapObjectTag));
1402
1403 // Calculate the size tag.
1404 // R0: new object (tagged).
1405 // R1: number of context variables.
1406 // R2: object size.
1407 // R3: next object start.
1408 const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
1411 // If no size tag overflow, shift R2 left, else set R2 to zero.
1412 __ mov(R9, Operand(R2, LSL, shift), LS);
1413 __ mov(R9, Operand(0), HI);
1414
1415 // Get the class index and insert it into the tags.
1416 // R9: size and bit tags.
1417 const uword tags =
1418 target::MakeTagWordForNewSpaceObject(kContextCid, /*instance_size=*/0);
1419
1420 __ LoadImmediate(IP, tags);
1421 __ orr(R9, R9, Operand(IP));
1422 __ str(R9, FieldAddress(R0, target::Object::tags_offset()));
1423
1424 // Setup up number of context variables field.
1425 // R0: new object.
1426 // R1: number of context variables as integer value (not object).
1427 // R2: object size.
1428 // R3: next object start.
1429 __ str(R1, FieldAddress(R0, target::Context::num_variables_offset()));
1430}
1431
1432// Called for inline allocation of contexts.
1433// Input:
1434// R1: number of context variables.
1435// Output:
1436// R0: new allocated Context object.
1437// Clobbered:
1438// Potentially any since is can go to runtime.
1439void StubCodeCompiler::GenerateAllocateContextStub() {
1440 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1441 Label slow_case;
1442
1443 GenerateAllocateContext(assembler, &slow_case);
1444
1445 // Setup the parent field.
1446 // R0: new object.
1447 // R2: object size.
1448 // R3: next object start.
1449 __ LoadObject(R8, NullObject());
1450 __ MoveRegister(R9, R8); // Needed for InitializeFieldsNoBarrier.
1451 __ StoreIntoObjectNoBarrier(
1452 R0, FieldAddress(R0, target::Context::parent_offset()), R8);
1453
1454 // Initialize the context variables.
1455 // R0: new object.
1456 // R2: object size.
1457 // R3: next object start.
1458 // R8, R9: raw null.
1459 __ AddImmediate(R1, R0,
1461 __ InitializeFieldsNoBarrier(R0, R1, R3, R8, R9);
1462
1463 // Done allocating and initializing the context.
1464 // R0: new object.
1465 __ Ret();
1466
1467 __ Bind(&slow_case);
1468 }
1469
1470 // Create a stub frame as we are pushing some objects on the stack before
1471 // calling into the runtime.
1472 __ EnterStubFrame();
1473 // Setup space on stack for return value.
1474 __ LoadImmediate(R2, 0);
1475 __ SmiTag(R1);
1476 __ PushList((1 << R1) | (1 << R2));
1477 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context.
1478 __ Drop(1); // Pop number of context variables argument.
1479 __ Pop(R0); // Pop the new context object.
1480
1481 // Write-barrier elimination might be enabled for this context (depending on
1482 // the size). To be sure we will check if the allocated object is in old
1483 // space and if so call a leaf runtime to add it to the remembered set.
1485
1486 // R0: new object
1487 // Restore the frame pointer.
1488 __ LeaveStubFrame();
1489
1490 __ Ret();
1491}
1492
1493// Called for clone of contexts.
1494// Input:
1495// R4: context variable to clone.
1496// Output:
1497// R0: new allocated Context object.
1498// Clobbered:
1499// Potentially any since it can go to runtime.
1500void StubCodeCompiler::GenerateCloneContextStub() {
1501 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1502 Label slow_case;
1503
1504 // Load num. variable in the existing context.
1505 __ ldr(R1, FieldAddress(R4, target::Context::num_variables_offset()));
1506
1507 GenerateAllocateContext(assembler, &slow_case);
1508
1509 // Load parent in the existing context.
1510 __ ldr(R2, FieldAddress(R4, target::Context::parent_offset()));
1511 // Setup the parent field.
1512 // R0: new object.
1513 __ StoreIntoObjectNoBarrier(
1514 R0, FieldAddress(R0, target::Context::parent_offset()), R2);
1515
1516 // Clone the context variables.
1517 // R0: new object.
1518 // R1: number of context variables.
1519 {
1520 Label loop, done;
1521 __ AddImmediate(R2, R0,
1523 __ AddImmediate(R3, R4,
1525
1526 __ Bind(&loop);
1527 __ subs(R1, R1, Operand(1));
1528 __ b(&done, MI);
1529
1530 __ ldr(R9, Address(R3, R1, LSL, target::kWordSizeLog2));
1531 __ str(R9, Address(R2, R1, LSL, target::kWordSizeLog2));
1532
1533 __ b(&loop, NE); // Loop if R1 not zero.
1534
1535 __ Bind(&done);
1536 }
1537
1538 // Done allocating and initializing the context.
1539 // R0: new object.
1540 __ Ret();
1541
1542 __ Bind(&slow_case);
1543 }
1544
1545 // Create a stub frame as we are pushing some objects on the stack before
1546 // calling into the runtime.
1547 __ EnterStubFrame();
1548 // Setup space on stack for return value.
1549 __ LoadImmediate(R0, 0);
1550 __ PushRegisterPair(R4, R0);
1551 __ CallRuntime(kCloneContextRuntimeEntry, 1); // Clone context.
1552 // R4: Pop number of context variables argument.
1553 // R0: Pop the new context object.
1554 __ PopRegisterPair(R4, R0);
1555
1556 // Write-barrier elimination might be enabled for this context (depending on
1557 // the size). To be sure we will check if the allocated object is in old
1558 // space and if so call a leaf runtime to add it to the remembered set.
1560
1561 // R0: new object
1562 // Restore the frame pointer.
1563 __ LeaveStubFrame();
1564 __ Ret();
1565}
1566
1567void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
1568 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
1569 if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
1570
1571 Register reg = static_cast<Register>(i);
1572 intptr_t start = __ CodeSize();
1573 SPILLS_LR_TO_FRAME(__ PushList((1 << LR) | (1 << kWriteBarrierObjectReg)));
1574 __ mov(kWriteBarrierObjectReg, Operand(reg));
1576 RESTORES_LR_FROM_FRAME(
1577 __ PopList((1 << LR) | (1 << kWriteBarrierObjectReg)));
1578 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR));
1579 intptr_t end = __ CodeSize();
1580
1582 }
1583}
1584
1585// Helper stub to implement Assembler::StoreIntoObject.
1586// Input parameters:
1587// R1: Object (old)
1588// R0: Value (old or new)
1589// R9: Slot
1590// If R0 is new, add R1 to the store buffer. Otherwise R0 is old, mark R0
1591// and add it to the mark list.
1595static void GenerateWriteBarrierStubHelper(Assembler* assembler, bool cards) {
1596 Label skip_marking;
1597 __ Push(R2);
1598 __ ldr(TMP, FieldAddress(R0, target::Object::tags_offset()));
1600 __ and_(TMP, TMP, Operand(R2));
1601 __ Pop(R2);
1603 __ b(&skip_marking, ZERO);
1604
1605 {
1606 // Atomically clear kNotMarkedBit.
1607 Label retry, is_new, done;
1608 __ PushList((1 << R2) | (1 << R3) | (1 << R4)); // Spill.
1610 // R3: Untagged address of header word (ldrex/strex do not support offsets).
1611 __ Bind(&retry);
1612 __ ldrex(R2, R3);
1613 __ tst(R2, Operand(1 << target::UntaggedObject::kNotMarkedBit));
1614 __ b(&done, ZERO); // Marked by another thread.
1615 __ bic(R2, R2, Operand(1 << target::UntaggedObject::kNotMarkedBit));
1616 __ strex(R4, R2, R3);
1617 __ cmp(R4, Operand(1));
1618 __ b(&retry, EQ);
1619
1621 __ b(&is_new, NOT_ZERO);
1622
1623 auto mark_stack_push = [&](intptr_t offset, const RuntimeEntry& entry) {
1624 __ ldr(R4, Address(THR, offset));
1626 __ add(R3, R4, Operand(R2, LSL, target::kWordSizeLog2));
1628 __ add(R2, R2, Operand(1));
1630 __ CompareImmediate(R2, target::MarkingStackBlock::kSize);
1631 __ b(&done, NE);
1632
1633 {
1634 LeafRuntimeScope rt(assembler, /*frame_size=*/0,
1635 /*preserve_registers=*/true);
1636 __ mov(R0, Operand(THR));
1637 rt.Call(entry, 1);
1638 }
1639 };
1640
1642 kOldMarkingStackBlockProcessRuntimeEntry);
1643 __ b(&done);
1644
1645 __ Bind(&is_new);
1647 kNewMarkingStackBlockProcessRuntimeEntry);
1648
1649 __ Bind(&done);
1650 __ clrex();
1651 __ PopList((1 << R2) | (1 << R3) | (1 << R4)); // Unspill.
1652 }
1653
1654 Label add_to_remembered_set, remember_card;
1655 __ Bind(&skip_marking);
1656 __ Push(R2);
1657 __ ldr(TMP, FieldAddress(R1, target::Object::tags_offset()));
1658 __ ldr(R2, FieldAddress(R0, target::Object::tags_offset()));
1659 __ and_(TMP, R2,
1661 __ Pop(R2);
1663 __ b(&add_to_remembered_set, NOT_ZERO);
1664 __ Ret();
1665
1666 __ Bind(&add_to_remembered_set);
1667 if (cards) {
1668 __ ldr(TMP, FieldAddress(R1, target::Object::tags_offset()));
1670 __ b(&remember_card, NOT_ZERO);
1671 } else {
1672#if defined(DEBUG)
1673 Label ok;
1674 __ ldr(TMP, FieldAddress(R1, target::Object::tags_offset()));
1676 __ b(&ok, ZERO);
1677 __ Stop("Wrong barrier");
1678 __ Bind(&ok);
1679#endif
1680 }
1681
1682 {
1683 // Atomically clear kOldAndNotRememberedBit.
1684 Label retry, done;
1685 __ PushList((1 << R2) | (1 << R3) | (1 << R4));
1687 // R3: Untagged address of header word (ldrex/strex do not support offsets).
1688 __ Bind(&retry);
1689 __ ldrex(R2, R3);
1691 __ b(&done, ZERO); // Remembered by another thread.
1692 __ bic(R2, R2,
1694 __ strex(R4, R2, R3);
1695 __ cmp(R4, Operand(1));
1696 __ b(&retry, EQ);
1697
1698 // Load the StoreBuffer block out of the thread. Then load top_ out of the
1699 // StoreBufferBlock and add the address to the pointers_.
1701 __ ldr(R2, Address(R4, target::StoreBufferBlock::top_offset()));
1702 __ add(R3, R4, Operand(R2, LSL, target::kWordSizeLog2));
1704
1705 // Increment top_ and check for overflow.
1706 // R2: top_.
1707 // R4: StoreBufferBlock.
1708 __ add(R2, R2, Operand(1));
1709 __ str(R2, Address(R4, target::StoreBufferBlock::top_offset()));
1710 __ CompareImmediate(R2, target::StoreBufferBlock::kSize);
1711 __ b(&done, NE);
1712
1713 {
1714 LeafRuntimeScope rt(assembler, /*frame_size=*/0,
1715 /*preserve_registers=*/true);
1716 __ mov(R0, Operand(THR));
1717 rt.Call(kStoreBufferBlockProcessRuntimeEntry, 1);
1718 }
1719
1720 __ Bind(&done);
1721 __ PopList((1 << R2) | (1 << R3) | (1 << R4));
1722 __ Ret();
1723 }
1724 if (cards) {
1725 Label remember_card_slow, retry;
1726
1727 // Get card table.
1728 __ Bind(&remember_card);
1729 __ AndImmediate(TMP, R1, target::kPageMask); // Page.
1730 __ ldr(TMP,
1731 Address(TMP, target::Page::card_table_offset())); // Card table.
1732 __ cmp(TMP, Operand(0));
1733 __ b(&remember_card_slow, EQ);
1734
1735 // Atomically dirty the card.
1736 __ PushList((1 << R0) | (1 << R1) | (1 << R2));
1737 __ AndImmediate(TMP, R1, target::kPageMask); // Page.
1738 __ sub(R9, R9, Operand(TMP)); // Offset in page.
1739 __ Lsr(R9, R9, Operand(target::Page::kBytesPerCardLog2)); // Card index.
1740 __ AndImmediate(R1, R9, target::kBitsPerWord - 1); // Lsl is not mod 32.
1741 __ LoadImmediate(R0, 1); // Bit offset.
1742 __ Lsl(R0, R0, R1); // Bit mask.
1743 __ ldr(TMP,
1744 Address(TMP, target::Page::card_table_offset())); // Card table.
1745 __ Lsr(R9, R9, Operand(target::kBitsPerWordLog2)); // Word index.
1746 __ add(TMP, TMP, Operand(R9, LSL, target::kWordSizeLog2)); // Word address.
1747
1748 __ Bind(&retry);
1749 __ ldrex(R1, TMP);
1750 __ orr(R1, R1, Operand(R0));
1751 __ strex(R2, R1, TMP);
1752 __ cmp(R2, Operand(1));
1753 __ b(&retry, EQ);
1754 __ PopList((1 << R0) | (1 << R1) | (1 << R2));
1755 __ Ret();
1756
1757 // Card table not yet allocated.
1758 __ Bind(&remember_card_slow);
1759 {
1760 LeafRuntimeScope rt(assembler, /*frame_size=*/0,
1761 /*preserve_registers=*/true);
1762 __ mov(R0, Operand(R1)); // Arg0 = Object
1763 __ mov(R1, Operand(R9)); // Arg1 = Slot
1764 rt.Call(kRememberCardRuntimeEntry, 2);
1765 }
1766 __ Ret();
1767 }
1768}
1769
1770void StubCodeCompiler::GenerateWriteBarrierStub() {
1771 GenerateWriteBarrierStubHelper(assembler, false);
1772}
1773
1774void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
1775 GenerateWriteBarrierStubHelper(assembler, true);
1776}
1777
1778static void GenerateAllocateObjectHelper(Assembler* assembler,
1779 bool is_cls_parameterized) {
1780 const Register kTagsReg = AllocateObjectABI::kTagsReg;
1781
1782 {
1783 Label slow_case;
1784
1785#if !defined(PRODUCT)
1786 {
1787 const Register kTraceAllocationTempReg = R8;
1788 const Register kCidRegister = R9;
1789 __ ExtractClassIdFromTags(kCidRegister, AllocateObjectABI::kTagsReg);
1790 __ MaybeTraceAllocation(kCidRegister, &slow_case,
1791 kTraceAllocationTempReg);
1792 }
1793#endif
1794
1795 const Register kNewTopReg = R8;
1796
1797 // Bump allocation.
1798 {
1799 const Register kEndReg = R1;
1800 const Register kInstanceSizeReg = R9;
1801
1802 __ ExtractInstanceSizeFromTags(kInstanceSizeReg, kTagsReg);
1803
1804 // Load two words from Thread::top: top and end.
1805 // AllocateObjectABI::kResultReg: potential next object start.
1806 __ ldrd(AllocateObjectABI::kResultReg, kEndReg, THR,
1808
1809 __ add(kNewTopReg, AllocateObjectABI::kResultReg,
1810 Operand(kInstanceSizeReg));
1811
1812 __ CompareRegisters(kEndReg, kNewTopReg);
1813 __ b(&slow_case, UNSIGNED_LESS_EQUAL);
1814
1815 // Successfully allocated the object, now update top to point to
1816 // next object start and store the class in the class field of object.
1817 __ str(kNewTopReg, Address(THR, target::Thread::top_offset()));
1818 } // kEndReg = R1, kInstanceSizeReg = R9
1819
1820 // Tags.
1821 __ str(kTagsReg, Address(AllocateObjectABI::kResultReg,
1823
1824 // Initialize the remaining words of the object.
1825 {
1826 const Register kFieldReg = R1;
1827 const Register kNullReg = R9;
1828
1829 __ LoadObject(kNullReg, NullObject());
1830
1831 __ AddImmediate(kFieldReg, AllocateObjectABI::kResultReg,
1833 Label done, init_loop;
1834 __ Bind(&init_loop);
1835 __ CompareRegisters(kFieldReg, kNewTopReg);
1837 __ str(kNullReg,
1838 Address(kFieldReg, target::kWordSize, Address::PostIndex));
1839 __ b(&init_loop);
1840
1841 __ Bind(&done);
1842 } // kFieldReg = R1, kNullReg = R9
1843
1844 __ AddImmediate(AllocateObjectABI::kResultReg,
1846
1847 // Store parameterized type.
1848 if (is_cls_parameterized) {
1849 Label not_parameterized_case;
1850
1851 const Register kClsIdReg = R2;
1852 const Register kTypeOffsetReg = R9;
1853
1854 __ ExtractClassIdFromTags(kClsIdReg, kTagsReg);
1855
1856 // Load class' type_arguments_field offset in words.
1857 __ LoadClassById(kTypeOffsetReg, kClsIdReg);
1858 __ ldr(
1859 kTypeOffsetReg,
1860 FieldAddress(kTypeOffsetReg,
1861 target::Class::
1862 host_type_arguments_field_offset_in_words_offset()));
1863
1864 // Set the type arguments in the new object.
1865 __ add(kTypeOffsetReg, AllocateObjectABI::kResultReg,
1866 Operand(kTypeOffsetReg, LSL, target::kWordSizeLog2));
1867 __ StoreIntoObjectNoBarrier(AllocateObjectABI::kResultReg,
1868 FieldAddress(kTypeOffsetReg, 0),
1870
1871 __ Bind(&not_parameterized_case);
1872 } // kClsIdReg = R1, kTypeOffsetReg = R9
1873
1874 __ Ret();
1875
1876 __ Bind(&slow_case);
1877 } // kNewTopReg = R8
1878
1879 // Fall back on slow case:
1880 {
1881 const Register kStubReg = R8;
1882
1883 if (!is_cls_parameterized) {
1885 }
1886
1887 // Tail call to generic allocation stub.
1888 __ ldr(kStubReg,
1889 Address(THR,
1891 __ bx(kStubReg);
1892 } // kStubReg = R8
1893}
1894
1895// Called for inline allocation of objects (any class).
1896void StubCodeCompiler::GenerateAllocateObjectStub() {
1897 GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
1898}
1899
1900void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
1901 GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
1902}
1903
1904void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
1905 const Register kClsReg = R1;
1906
1907 if (!FLAG_precompiled_mode) {
1908 __ ldr(CODE_REG,
1910 }
1911
1912 // Create a stub frame as we are pushing some objects on the stack before
1913 // calling into the runtime.
1914 __ EnterStubFrame();
1915
1916 __ ExtractClassIdFromTags(AllocateObjectABI::kResultReg,
1918 __ LoadClassById(kClsReg, AllocateObjectABI::kResultReg);
1919
1921
1922 // Pushes result slot, then parameter class and type arguments.
1923 // Type arguments should be Object::null() if class is non-parameterized.
1924 __ PushRegistersInOrder({AllocateObjectABI::kResultReg, kClsReg,
1926
1927 __ CallRuntime(kAllocateObjectRuntimeEntry, 2);
1928
1929 // Load result off the stack into result register.
1931
1932 // Write-barrier elimination is enabled for [cls] and we therefore need to
1933 // ensure that the object is in new-space or has remembered bit set.
1935
1936 __ LeaveDartFrameAndReturn();
1937}
1938
1939// Called for inline allocation of objects.
1941 UnresolvedPcRelativeCalls* unresolved_calls,
1942 const Class& cls,
1943 const Code& allocate_object,
1944 const Code& allocat_object_parametrized) {
1945 classid_t cls_id = target::Class::GetId(cls);
1946 ASSERT(cls_id != kIllegalCid);
1947
1948 // The generated code is different if the class is parameterized.
1949 const bool is_cls_parameterized = target::Class::NumTypeArguments(cls) > 0;
1950 ASSERT(!is_cls_parameterized || target::Class::TypeArgumentsFieldOffset(
1952
1953 const intptr_t instance_size = target::Class::GetInstanceSize(cls);
1954 ASSERT(instance_size > 0);
1955
1956 const uword tags =
1957 target::MakeTagWordForNewSpaceObject(cls_id, instance_size);
1958
1959 const Register kTagsReg = AllocateObjectABI::kTagsReg;
1960
1961 __ LoadImmediate(kTagsReg, tags);
1962
1963 if (!FLAG_use_slow_path && FLAG_inline_alloc &&
1965 target::SizeFitsInSizeTag(instance_size)) {
1968
1969 if (is_cls_parameterized) {
1970 if (!IsSameObject(NullObject(),
1971 CastHandle<Object>(allocat_object_parametrized))) {
1972 __ GenerateUnRelocatedPcRelativeTailCall();
1973 unresolved_calls->Add(new UnresolvedPcRelativeCall(
1974 __ CodeSize(), allocat_object_parametrized, /*is_tail_call=*/true));
1975 } else {
1976 __ ldr(PC,
1977 Address(THR,
1978 target::Thread::
1979 allocate_object_parameterized_entry_point_offset()));
1980 }
1981 } else {
1982 if (!IsSameObject(NullObject(), CastHandle<Object>(allocate_object))) {
1983 __ GenerateUnRelocatedPcRelativeTailCall();
1984 unresolved_calls->Add(new UnresolvedPcRelativeCall(
1985 __ CodeSize(), allocate_object, /*is_tail_call=*/true));
1986 } else {
1987 __ ldr(
1988 PC,
1990 }
1991 }
1992 } else {
1993 if (!is_cls_parameterized) {
1995 }
1996 __ ldr(PC,
1997 Address(THR,
1999 }
2000}
2001
2002// Called for invoking "dynamic noSuchMethod(Invocation invocation)" function
2003// from the entry code of a dart function after an error in passed argument
2004// name or number is detected.
2005// Input parameters:
2006// LR : return address.
2007// SP : address of last argument.
2008// R4: arguments descriptor array.
2009void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
2010 __ EnterStubFrame();
2011
2012 // Load the receiver.
2013 __ ldr(R2, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
2014 __ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi.
2015 __ ldr(R8, Address(IP, target::frame_layout.param_end_from_fp *
2017
2018 // Load the function.
2019 __ ldr(R6, FieldAddress(R8, target::Closure::function_offset()));
2020
2021 // Push space for the return value.
2022 // Push the receiver.
2023 // Push arguments descriptor array.
2024 __ LoadImmediate(IP, 0);
2025 __ PushList((1 << R4) | (1 << R6) | (1 << R8) | (1 << IP));
2026
2027 // Adjust arguments count.
2028 __ ldr(R3,
2030 __ cmp(R3, Operand(0));
2031 __ AddImmediate(R2, R2, target::ToRawSmi(1),
2032 NE); // Include the type arguments.
2033
2034 // R2: Smi-tagged arguments array length.
2035 PushArrayOfArguments(assembler);
2036
2037 const intptr_t kNumArgs = 4;
2038 __ CallRuntime(kNoSuchMethodFromPrologueRuntimeEntry, kNumArgs);
2039 // noSuchMethod on closures always throws an error, so it will never return.
2040 __ bkpt(0);
2041}
2042
2043// R8: function object.
2044// R9: inline cache data object.
2045// Cannot use function object from ICData as it may be the inlined
2046// function and not the top-scope function.
2048 Register ic_reg = R9;
2049 Register func_reg = R8;
2050 if (FLAG_precompiled_mode) {
2051 __ Breakpoint();
2052 return;
2053 }
2054 if (FLAG_trace_optimized_ic_calls) {
2055 __ EnterStubFrame();
2056 __ PushList((1 << R9) | (1 << R8)); // Preserve.
2057 __ Push(ic_reg); // Argument.
2058 __ Push(func_reg); // Argument.
2059 __ CallRuntime(kTraceICCallRuntimeEntry, 2);
2060 __ Drop(2); // Discard argument;
2061 __ PopList((1 << R9) | (1 << R8)); // Restore.
2062 __ LeaveStubFrame();
2063 }
2064 __ ldr(TMP, FieldAddress(func_reg, target::Function::usage_counter_offset()));
2065 __ add(TMP, TMP, Operand(1));
2066 __ str(TMP, FieldAddress(func_reg, target::Function::usage_counter_offset()));
2067}
2068
2069// Loads function into 'temp_reg'.
2071 if (FLAG_precompiled_mode) {
2072 __ Breakpoint();
2073 return;
2074 }
2075 if (FLAG_optimization_counter_threshold >= 0) {
2076 Register func_reg = temp_reg;
2077 ASSERT(temp_reg == R8);
2078 __ Comment("Increment function counter");
2079 __ ldr(func_reg, FieldAddress(IC_DATA_REG, target::ICData::owner_offset()));
2080 __ ldr(TMP,
2081 FieldAddress(func_reg, target::Function::usage_counter_offset()));
2082 __ add(TMP, TMP, Operand(1));
2083 __ str(TMP,
2084 FieldAddress(func_reg, target::Function::usage_counter_offset()));
2085 }
2086}
2087
2088// Note: R9 must be preserved.
2089// Attempt a quick Smi operation for known operations ('kind'). The ICData
2090// must have been primed with a Smi/Smi check that will be used for counting
2091// the invocations.
2092static void EmitFastSmiOp(Assembler* assembler,
2093 Token::Kind kind,
2094 intptr_t num_args,
2095 Label* not_smi_or_overflow) {
2096 __ Comment("Fast Smi op");
2097 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left.
2098 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Right.
2099 __ orr(TMP, R0, Operand(R1));
2100 __ tst(TMP, Operand(kSmiTagMask));
2101 __ b(not_smi_or_overflow, NE);
2102 switch (kind) {
2103 case Token::kADD: {
2104 __ adds(R0, R1, Operand(R0)); // Adds.
2105 __ b(not_smi_or_overflow, VS); // Branch if overflow.
2106 break;
2107 }
2108 case Token::kLT: {
2109 __ cmp(R0, Operand(R1));
2110 __ LoadObject(R0, CastHandle<Object>(TrueObject()), LT);
2111 __ LoadObject(R0, CastHandle<Object>(FalseObject()), GE);
2112 break;
2113 }
2114 case Token::kEQ: {
2115 __ cmp(R0, Operand(R1));
2116 __ LoadObject(R0, CastHandle<Object>(TrueObject()), EQ);
2117 __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
2118 break;
2119 }
2120 default:
2121 UNIMPLEMENTED();
2122 }
2123 // R9: IC data object (preserved).
2124 __ ldr(R8, FieldAddress(R9, target::ICData::entries_offset()));
2125 // R8: ic_data_array with check entries: classes and target functions.
2127// R8: points directly to the first ic data array element.
2128#if defined(DEBUG)
2129 // Check that first entry is for Smi/Smi.
2130 Label error, ok;
2131 const intptr_t imm_smi_cid = target::ToRawSmi(kSmiCid);
2132 __ ldr(R1, Address(R8, 0));
2133 __ CompareImmediate(R1, imm_smi_cid);
2134 __ b(&error, NE);
2135 __ ldr(R1, Address(R8, target::kWordSize));
2136 __ CompareImmediate(R1, imm_smi_cid);
2137 __ b(&ok, EQ);
2138 __ Bind(&error);
2139 __ Stop("Incorrect IC data");
2140 __ Bind(&ok);
2141#endif
2142 if (FLAG_optimization_counter_threshold >= 0) {
2143 // Update counter, ignore overflow.
2144 const intptr_t count_offset =
2146 __ LoadFromOffset(R1, R8, count_offset);
2147 __ adds(R1, R1, Operand(target::ToRawSmi(1)));
2148 __ StoreIntoSmiField(Address(R8, count_offset), R1);
2149 }
2150 __ Ret();
2151}
2152
2153// Saves the offset of the target entry-point (from the Function) into R3.
2154//
2155// Must be the first code generated, since any code before will be skipped in
2156// the unchecked entry-point.
2157static void GenerateRecordEntryPoint(Assembler* assembler) {
2158 Label done;
2160 __ b(&done);
2161 __ BindUncheckedEntryPoint();
2162 __ mov(
2163 R3,
2166 __ Bind(&done);
2167}
2168
2169// Generate inline cache check for 'num_args'.
2170// R0: receiver (if instance call)
2171// R9: ICData
2172// LR: return address
2173// Control flow:
2174// - If receiver is null -> jump to IC miss.
2175// - If receiver is Smi -> load Smi class.
2176// - If receiver is not-Smi -> load receiver's class.
2177// - Check if 'num_args' (including receiver) match any IC data group.
2178// - Match found -> jump to target.
2179// - Match not found -> jump to IC miss.
2181 intptr_t num_args,
2182 const RuntimeEntry& handle_ic_miss,
2183 Token::Kind kind,
2184 Optimized optimized,
2185 CallType type,
2186 Exactness exactness) {
2187 if (FLAG_precompiled_mode) {
2188 __ Breakpoint();
2189 return;
2190 }
2191
2192 const bool save_entry_point = kind == Token::kILLEGAL;
2193 if (save_entry_point) {
2194 GenerateRecordEntryPoint(assembler);
2195 }
2196
2197 if (optimized == kOptimized) {
2199 } else {
2200 GenerateUsageCounterIncrement(/* scratch */ R8);
2201 }
2202
2203 __ CheckCodePointer();
2204 ASSERT(num_args == 1 || num_args == 2);
2205#if defined(DEBUG)
2206 {
2207 Label ok;
2208 // Check that the IC data array has NumArgsTested() == num_args.
2209 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
2210 __ ldr(R8, FieldAddress(R9, target::ICData::state_bits_offset()));
2211 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
2212 __ and_(R8, R8, Operand(target::ICData::NumArgsTestedMask()));
2213 __ CompareImmediate(R8, num_args);
2214 __ b(&ok, EQ);
2215 __ Stop("Incorrect stub for IC data");
2216 __ Bind(&ok);
2217 }
2218#endif // DEBUG
2219
2220#if !defined(PRODUCT)
2221 Label stepping, done_stepping;
2222 if (optimized == kUnoptimized) {
2223 __ Comment("Check single stepping");
2224 __ LoadIsolate(R8);
2225 __ ldrb(R8, Address(R8, target::Isolate::single_step_offset()));
2226 __ CompareImmediate(R8, 0);
2227 __ b(&stepping, NE);
2228 __ Bind(&done_stepping);
2229 }
2230#endif
2231
2232 Label not_smi_or_overflow;
2233 if (kind != Token::kILLEGAL) {
2234 EmitFastSmiOp(assembler, kind, num_args, &not_smi_or_overflow);
2235 }
2236 __ Bind(&not_smi_or_overflow);
2237
2238 __ Comment("Extract ICData initial values and receiver cid");
2239 // R9: IC data object (preserved).
2240 __ ldr(R8, FieldAddress(R9, target::ICData::entries_offset()));
2241 // R8: ic_data_array with check entries: classes and target functions.
2242 const int kIcDataOffset = target::Array::data_offset() - kHeapObjectTag;
2243 // R8: points at the IC data array.
2244
2245 if (type == kInstanceCall) {
2246 __ LoadTaggedClassIdMayBeSmi(NOTFP, R0);
2247 __ ldr(
2250 if (num_args == 2) {
2251 __ ldr(R1, FieldAddress(ARGS_DESC_REG,
2253 __ sub(R1, R1, Operand(target::ToRawSmi(2)));
2254 __ ldr(R1, Address(SP, R1, LSL, 1)); // R1 (argument_count - 2) is Smi.
2255 __ LoadTaggedClassIdMayBeSmi(R1, R1);
2256 }
2257 } else {
2258 // Load arguments descriptor into R4.
2259 __ ldr(
2262
2263 // Get the receiver's class ID (first read number of arguments from
2264 // arguments descriptor array and then access the receiver from the stack).
2265 __ ldr(R1, FieldAddress(ARGS_DESC_REG,
2267 __ sub(R1, R1, Operand(target::ToRawSmi(1)));
2268 // R1: argument_count - 1 (smi).
2269
2270 __ ldr(R0, Address(SP, R1, LSL, 1)); // R1 (argument_count - 1) is Smi.
2271 __ LoadTaggedClassIdMayBeSmi(NOTFP, R0);
2272
2273 if (num_args == 2) {
2274 __ sub(R1, R1, Operand(target::ToRawSmi(1)));
2275 __ ldr(R1, Address(SP, R1, LSL, 1)); // R1 (argument_count - 2) is Smi.
2276 __ LoadTaggedClassIdMayBeSmi(R1, R1);
2277 }
2278 }
2279 // NOTFP: first argument class ID as Smi.
2280 // R1: second argument class ID as Smi.
2281 // R4: args descriptor
2282
2283 // Loop that checks if there is an IC data match.
2284 Label loop, found, miss;
2285 __ Comment("ICData loop");
2286
2287 // We unroll the generic one that is generated once more than the others.
2288 const bool optimize = kind == Token::kILLEGAL;
2289
2290 __ Bind(&loop);
2291 for (int unroll = optimize ? 4 : 2; unroll >= 0; unroll--) {
2292 Label update;
2293
2294 __ ldr(R2, Address(R8, kIcDataOffset));
2295 __ cmp(NOTFP, Operand(R2)); // Class id match?
2296 if (num_args == 2) {
2297 __ b(&update, NE); // Continue.
2298 __ ldr(R2, Address(R8, kIcDataOffset + target::kWordSize));
2299 __ cmp(R1, Operand(R2)); // Class id match?
2300 }
2301 __ b(&found, EQ); // Break.
2302
2303 __ Bind(&update);
2304
2305 const intptr_t entry_size = target::ICData::TestEntryLengthFor(
2306 num_args, exactness == kCheckExactness) *
2308 __ AddImmediate(R8, entry_size); // Next entry.
2309
2310 __ CompareImmediate(R2, target::ToRawSmi(kIllegalCid)); // Done?
2311 if (unroll == 0) {
2312 __ b(&loop, NE);
2313 } else {
2314 __ b(&miss, EQ);
2315 }
2316 }
2317
2318 __ Bind(&miss);
2319 __ Comment("IC miss");
2320 // Compute address of arguments.
2321 __ ldr(R1, FieldAddress(ARGS_DESC_REG,
2323 __ sub(R1, R1, Operand(target::ToRawSmi(1)));
2324 // R1: argument_count - 1 (smi).
2325 __ add(R1, SP, Operand(R1, LSL, 1)); // R1 is Smi.
2326 // R1: address of receiver.
2327 // Create a stub frame as we are pushing some objects on the stack before
2328 // calling into the runtime.
2329 __ EnterStubFrame();
2330 __ LoadImmediate(R0, 0);
2331 // Preserve IC data object and arguments descriptor array and
2332 // setup space on stack for result (target code object).
2333 RegList regs = (1 << R0) | (1 << ARGS_DESC_REG) | (1 << R9);
2334 if (save_entry_point) {
2335 __ SmiTag(R3);
2336 regs |= 1 << R3;
2337 }
2338 __ PushList(regs);
2339 // Push call arguments.
2340 for (intptr_t i = 0; i < num_args; i++) {
2341 __ LoadFromOffset(TMP, R1, -i * target::kWordSize);
2342 __ Push(TMP);
2343 }
2344 // Pass IC data object.
2345 __ Push(R9);
2346 __ CallRuntime(handle_ic_miss, num_args + 1);
2347 // Remove the call arguments pushed earlier, including the IC data object.
2348 __ Drop(num_args + 1);
2349 // Pop returned function object into R0.
2350 // Restore arguments descriptor array and IC data array.
2352 __ PopList(regs);
2353 if (save_entry_point) {
2354 __ SmiUntag(R3);
2355 }
2356 __ RestoreCodePointer();
2357 __ LeaveStubFrame();
2358 Label call_target_function;
2359 if (FLAG_precompiled_mode) {
2360 GenerateDispatcherCode(assembler, &call_target_function);
2361 } else {
2362 __ b(&call_target_function);
2363 }
2364
2365 __ Bind(&found);
2366 // R8: pointer to an IC data check group.
2367 const intptr_t target_offset =
2369 const intptr_t count_offset =
2371 const intptr_t exactness_offset =
2373
2374 Label call_target_function_through_unchecked_entry;
2375 if (exactness == kCheckExactness) {
2376 Label exactness_ok;
2377 ASSERT(num_args == 1);
2378 __ ldr(R1, Address(R8, kIcDataOffset + exactness_offset));
2379 __ CompareImmediate(
2382 __ BranchIf(LESS, &exactness_ok);
2383 __ BranchIf(EQUAL, &call_target_function_through_unchecked_entry);
2384
2385 // Check trivial exactness.
2386 // Note: UntaggedICData::receivers_static_type_ is guaranteed to be not null
2387 // because we only emit calls to this stub when it is not null.
2388 __ ldr(R2,
2390 __ ldr(R2, FieldAddress(R2, target::Type::arguments_offset()));
2391 // R1 contains an offset to type arguments in words as a smi,
2392 // hence TIMES_2. R0 is guaranteed to be non-smi because it is expected
2393 // to have type argument.
2394 __ LoadIndexedPayload(TMP, R0, 0, R1, TIMES_2);
2395 __ CompareObjectRegisters(R2, TMP);
2396 __ BranchIf(EQUAL, &call_target_function_through_unchecked_entry);
2397
2398 // Update exactness state (not-exact anymore).
2399 __ LoadImmediate(
2401 __ str(R1, Address(R8, kIcDataOffset + exactness_offset));
2402 __ Bind(&exactness_ok);
2403 }
2404 __ LoadFromOffset(FUNCTION_REG, R8, kIcDataOffset + target_offset);
2405
2406 if (FLAG_optimization_counter_threshold >= 0) {
2407 __ Comment("Update caller's counter");
2408 __ LoadFromOffset(R1, R8, kIcDataOffset + count_offset);
2409 __ add(R1, R1, Operand(target::ToRawSmi(1))); // Ignore overflow.
2410 __ StoreIntoSmiField(Address(R8, kIcDataOffset + count_offset), R1);
2411 }
2412
2413 __ Comment("Call target");
2414 __ Bind(&call_target_function);
2415 // R0: target function.
2417
2418 if (save_entry_point) {
2419 __ Branch(Address(FUNCTION_REG, R3));
2420 } else {
2421 __ Branch(
2423 }
2424
2425 if (exactness == kCheckExactness) {
2426 __ Bind(&call_target_function_through_unchecked_entry);
2427 if (FLAG_optimization_counter_threshold >= 0) {
2428 __ Comment("Update ICData counter");
2429 __ LoadFromOffset(R1, R8, kIcDataOffset + count_offset);
2430 __ add(R1, R1, Operand(target::ToRawSmi(1))); // Ignore overflow.
2431 __ StoreIntoSmiField(Address(R8, kIcDataOffset + count_offset), R1);
2432 }
2433 __ Comment("Call target (via unchecked entry point)");
2434 __ LoadFromOffset(FUNCTION_REG, R8, kIcDataOffset + target_offset);
2435 __ ldr(CODE_REG,
2439 }
2440
2441#if !defined(PRODUCT)
2442 if (optimized == kUnoptimized) {
2443 __ Bind(&stepping);
2444 __ EnterStubFrame();
2445 if (type == kInstanceCall) {
2446 __ Push(R0); // Preserve receiver.
2447 }
2448 RegList regs = 1 << R9;
2449 if (save_entry_point) {
2450 regs |= 1 << R3;
2451 __ SmiTag(R3); // Entry-point is not Smi.
2452 }
2453 __ PushList(regs); // Preserve IC data and entry-point.
2454 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2455 __ PopList(regs); // Restore IC data and entry-point
2456 if (save_entry_point) {
2457 __ SmiUntag(R3);
2458 }
2459 if (type == kInstanceCall) {
2460 __ Pop(R0);
2461 }
2462 __ RestoreCodePointer();
2463 __ LeaveStubFrame();
2464 __ b(&done_stepping);
2465 }
2466#endif
2467}
2468
2469// R0: receiver
2470// R9: ICData
2471// LR: return address
2472void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
2474 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2476}
2477
2478// R0: receiver
2479// R9: ICData
2480// LR: return address
2481void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
2483 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2485}
2486
2487// R0: receiver
2488// R9: ICData
2489// LR: return address
2490void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
2492 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2494}
2495
2496// R0: receiver
2497// R9: ICData
2498// LR: return address
2499void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
2501 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
2503}
2504
2505// R0: receiver
2506// R9: ICData
2507// LR: return address
2508void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
2510 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
2512}
2513
2514// R0: receiver
2515// R9: ICData
2516// LR: return address
2517void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
2519 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
2521}
2522
2523// R0: receiver
2524// R9: ICData
2525// R8: Function
2526// LR: return address
2527void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
2529 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
2531}
2532
2533// R0: receiver
2534// R9: ICData
2535// R8: Function
2536// LR: return address
2537void StubCodeCompiler::
2538 GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
2540 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
2542}
2543
2544// R0: receiver
2545// R9: ICData
2546// R8: Function
2547// LR: return address
2548void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
2550 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2552}
2553
2554// R9: ICData
2555// LR: return address
2556void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
2557 GenerateRecordEntryPoint(assembler);
2558 GenerateUsageCounterIncrement(/* scratch */ R8);
2559#if defined(DEBUG)
2560 {
2561 Label ok;
2562 // Check that the IC data array has NumArgsTested() == 0.
2563 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
2564 __ ldr(R8, FieldAddress(R9, target::ICData::state_bits_offset()));
2565 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
2566 __ and_(R8, R8, Operand(target::ICData::NumArgsTestedMask()));
2567 __ CompareImmediate(R8, 0);
2568 __ b(&ok, EQ);
2569 __ Stop("Incorrect IC data for unoptimized static call");
2570 __ Bind(&ok);
2571 }
2572#endif // DEBUG
2573
2574#if !defined(PRODUCT)
2575 // Check single stepping.
2576 Label stepping, done_stepping;
2577 __ LoadIsolate(R8);
2578 __ ldrb(R8, Address(R8, target::Isolate::single_step_offset()));
2579 __ CompareImmediate(R8, 0);
2580 __ b(&stepping, NE);
2581 __ Bind(&done_stepping);
2582#endif
2583
2584 // R9: IC data object (preserved).
2585 __ ldr(R8, FieldAddress(R9, target::ICData::entries_offset()));
2586 // R8: ic_data_array with entries: target functions and count.
2588 // R8: points directly to the first ic data array element.
2589 const intptr_t target_offset =
2591 const intptr_t count_offset =
2593
2594 if (FLAG_optimization_counter_threshold >= 0) {
2595 // Increment count for this call, ignore overflow.
2596 __ LoadFromOffset(R1, R8, count_offset);
2597 __ adds(R1, R1, Operand(target::ToRawSmi(1)));
2598 __ StoreIntoSmiField(Address(R8, count_offset), R1);
2599 }
2600
2601 // Load arguments descriptor into R4.
2602 __ ldr(ARGS_DESC_REG,
2604
2605 // Get function and call it, if possible.
2606 __ LoadFromOffset(FUNCTION_REG, R8, target_offset);
2608
2609 __ Branch(Address(FUNCTION_REG, R3));
2610
2611#if !defined(PRODUCT)
2612 __ Bind(&stepping);
2613 __ EnterStubFrame();
2614 __ SmiTag(R3); // Entry-point is not Smi.
2615 __ PushList((1 << R9) | (1 << R3)); // Preserve IC data and entry-point.
2616 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2617 __ PopList((1 << R9) | (1 << R3));
2618 __ SmiUntag(R3);
2619 __ RestoreCodePointer();
2620 __ LeaveStubFrame();
2621 __ b(&done_stepping);
2622#endif
2623}
2624
2625// R9: ICData
2626// LR: return address
2627void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
2628 GenerateUsageCounterIncrement(/* scratch */ R8);
2629 GenerateNArgsCheckInlineCacheStub(1, kStaticCallMissHandlerOneArgRuntimeEntry,
2630 Token::kILLEGAL, kUnoptimized, kStaticCall,
2632}
2633
2634// R9: ICData
2635// LR: return address
2636void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
2637 GenerateUsageCounterIncrement(/* scratch */ R8);
2639 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2641}
2642
2643// Stub for compiling a function and jumping to the compiled code.
2644// ARGS_DESC_REG: Arguments descriptor.
2645// FUNCTION_REG: Function.
2646void StubCodeCompiler::GenerateLazyCompileStub() {
2647 __ EnterStubFrame();
2648 // Preserve arg desc, pass function.
2650 __ PushList((1 << FUNCTION_REG) | (1 << ARGS_DESC_REG));
2651 __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
2652 __ PopList((1 << FUNCTION_REG) | (1 << ARGS_DESC_REG));
2653 __ LeaveStubFrame();
2654
2656 __ Branch(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
2657}
2658
2659// R9: Contains an ICData.
2660void StubCodeCompiler::GenerateICCallBreakpointStub() {
2661#if defined(PRODUCT)
2662 __ Stop("No debugging in PRODUCT mode");
2663#else
2664 __ EnterStubFrame();
2665 __ Push(R0); // Preserve receiver.
2666 __ Push(R9); // Preserve IC data.
2667 __ PushImmediate(0); // Space for result.
2668 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2669 __ Pop(CODE_REG); // Original stub.
2670 __ Pop(R9); // Restore IC data.
2671 __ Pop(R0); // Restore receiver.
2672 __ LeaveStubFrame();
2673 __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
2674#endif // defined(PRODUCT)
2675}
2676
2677void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
2678#if defined(PRODUCT)
2679 __ Stop("No debugging in PRODUCT mode");
2680#else
2681 __ EnterStubFrame();
2682 __ Push(R9); // Preserve IC data.
2683 __ PushImmediate(0); // Space for result.
2684 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2685 __ Pop(CODE_REG); // Original stub.
2686 __ Pop(R9); // Restore IC data.
2687 __ LeaveStubFrame();
2688 __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
2689#endif // defined(PRODUCT)
2690}
2691
2692void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
2693#if defined(PRODUCT)
2694 __ Stop("No debugging in PRODUCT mode");
2695#else
2696 __ EnterStubFrame();
2697 __ LoadImmediate(R0, 0);
2698 // Make room for result.
2699 __ PushList((1 << R0));
2700 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2701 __ PopList((1 << CODE_REG));
2702 __ LeaveStubFrame();
2703 __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
2704#endif // defined(PRODUCT)
2705}
2706
2707// Called only from unoptimized code. All relevant registers have been saved.
2708void StubCodeCompiler::GenerateDebugStepCheckStub() {
2709#if defined(PRODUCT)
2710 __ Stop("No debugging in PRODUCT mode");
2711#else
2712 // Check single stepping.
2713 Label stepping, done_stepping;
2714 __ LoadIsolate(R1);
2715 __ ldrb(R1, Address(R1, target::Isolate::single_step_offset()));
2716 __ CompareImmediate(R1, 0);
2717 __ b(&stepping, NE);
2718 __ Bind(&done_stepping);
2719 __ Ret();
2720
2721 __ Bind(&stepping);
2722 __ EnterStubFrame();
2723 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2724 __ LeaveStubFrame();
2725 __ b(&done_stepping);
2726#endif // defined(PRODUCT)
2727}
2728
2729// Used to check class and type arguments. Arguments passed in registers:
2730//
2731// Inputs (all preserved, mostly from TypeTestABI struct):
2732// - kSubtypeTestCacheReg: SubtypeTestCacheLayout
2733// - kInstanceReg: instance to test against.
2734// - kDstTypeReg: destination type (for n>=7).
2735// - kInstantiatorTypeArgumentsReg: instantiator type arguments (for n>=3).
2736// - kFunctionTypeArgumentsReg: function type arguments (for n>=4).
2737// - LR: return address.
2738//
2739// Outputs (from TypeTestABI struct):
2740// - kSubtypeTestCacheResultReg: the cached result, or null if not found.
2741void StubCodeCompiler::GenerateSubtypeNTestCacheStub(Assembler* assembler,
2742 int n) {
2743 ASSERT(n >= 1);
2745 // If we need the parent function type arguments for a closure, we also need
2746 // the delayed type arguments, so this case will never happen.
2747 ASSERT(n != 5);
2748 RegisterSet saved_registers;
2749
2750 // Safe as the original value of TypeTestABI::kSubtypeTestCacheReg is only
2751 // used to initialize this register.
2752 const Register kCacheArrayReg = TypeTestABI::kSubtypeTestCacheReg;
2753 saved_registers.AddRegister(kCacheArrayReg);
2754
2755 // CODE_REG is used only in JIT mode, and the dispatch table only exists in
2756 // AOT mode, so we can use the corresponding register for the mode we're not
2757 // in without having to preserve it.
2758 const Register kNullReg =
2759 FLAG_precompiled_mode ? CODE_REG : DISPATCH_TABLE_REG;
2760 __ LoadObject(kNullReg, NullObject());
2761
2762 // Free up additional registers needed for checks in the loop. Initially
2763 // define them as kNoRegister so any unexpected uses are caught.
2764 Register kInstanceInstantiatorTypeArgumentsReg = kNoRegister;
2765 if (n >= 2) {
2766 kInstanceInstantiatorTypeArgumentsReg = PP;
2767 saved_registers.AddRegister(kInstanceInstantiatorTypeArgumentsReg);
2768 }
2769 Register kInstanceParentFunctionTypeArgumentsReg = kNoRegister;
2770 if (n >= 5) {
2771 // For this, we look at the pair of Registers we considered for kNullReg
2772 // and use the one that must be preserved instead.
2773 kInstanceParentFunctionTypeArgumentsReg =
2774 FLAG_precompiled_mode ? DISPATCH_TABLE_REG : CODE_REG;
2775 saved_registers.AddRegister(kInstanceParentFunctionTypeArgumentsReg);
2776 }
2777 Register kInstanceDelayedFunctionTypeArgumentsReg = kNoRegister;
2778 if (n >= 6) {
2779 // We retrieve all the needed fields from the instance during loop
2780 // initialization and store them in registers, so we don't need the value
2781 // of kInstanceReg during the loop and just need to save and restore it.
2782 // Thus, use kInstanceReg for the last field that can possibly be retrieved
2783 // from the instance.
2784 kInstanceDelayedFunctionTypeArgumentsReg = TypeTestABI::kInstanceReg;
2785 saved_registers.AddRegister(kInstanceDelayedFunctionTypeArgumentsReg);
2786 }
2787
2788 // We'll replace these with actual registers if possible, but fall back to
2789 // the stack if register pressure is too great. The last two values are
2790 // used in every loop iteration, and so are more important to put in
2791 // registers if possible, whereas the first is used only when we go off
2792 // the end of the backing array (usually at most once per check).
2793 Register kCacheContentsSizeReg = kNoRegister;
2794 if (n < 5) {
2795 // Use the register we would have used for the parent function type args.
2796 kCacheContentsSizeReg =
2797 FLAG_precompiled_mode ? DISPATCH_TABLE_REG : CODE_REG;
2798 saved_registers.AddRegister(kCacheContentsSizeReg);
2799 }
2800 Register kProbeDistanceReg = kNoRegister;
2801 if (n < 6) {
2802 // Use the register we would have used for the delayed type args.
2803 kProbeDistanceReg = TypeTestABI::kInstanceReg;
2804 saved_registers.AddRegister(kProbeDistanceReg);
2805 }
2806 Register kCacheEntryEndReg = kNoRegister;
2807 if (n < 7) {
2808 // Use the destination type, as that is the last input that might be unused.
2809 kCacheEntryEndReg = TypeTestABI::kDstTypeReg;
2810 saved_registers.AddRegister(TypeTestABI::kDstTypeReg);
2811 }
2812
2813 __ PushRegisters(saved_registers);
2814
2815 Label not_found;
2816 GenerateSubtypeTestCacheSearch(
2817 assembler, n, kNullReg, kCacheArrayReg,
2819 kInstanceInstantiatorTypeArgumentsReg,
2820 kInstanceParentFunctionTypeArgumentsReg,
2821 kInstanceDelayedFunctionTypeArgumentsReg, kCacheEntryEndReg,
2822 kCacheContentsSizeReg, kProbeDistanceReg,
2823 [&](Assembler* assembler, int n) {
2824 __ LoadCompressed(
2826 Address(kCacheArrayReg, target::kCompressedWordSize *
2828 __ PopRegisters(saved_registers);
2829 __ Ret();
2830 },
2831 [&](Assembler* assembler, int n) {
2832 __ MoveRegister(TypeTestABI::kSubtypeTestCacheResultReg, kNullReg);
2833 __ PopRegisters(saved_registers);
2834 __ Ret();
2835 });
2836}
2837
2838// Return the current stack pointer address, used to do stack alignment checks.
2839void StubCodeCompiler::GenerateGetCStackPointerStub() {
2840 __ mov(R0, Operand(SP));
2841 __ Ret();
2842}
2843
2844// Jump to a frame on the call stack.
2845// LR: return address.
2846// R0: program_counter.
2847// R1: stack_pointer.
2848// R2: frame_pointer.
2849// R3: thread.
2850// Does not return.
2851//
2852// Notice: We need to keep this in sync with `Simulator::JumpToFrame()`.
2853void StubCodeCompiler::GenerateJumpToFrameStub() {
2858 __ mov(IP, Operand(R1)); // Copy Stack pointer into IP.
2859 // TransitionGeneratedToNative might clobber LR if it takes the slow path.
2860 __ mov(R4, Operand(R0)); // Program counter.
2861 __ mov(THR, Operand(R3)); // Thread.
2862 __ mov(FP, Operand(R2)); // Frame_pointer.
2863 __ mov(SP, Operand(IP)); // Set Stack pointer.
2864#if defined(USING_SHADOW_CALL_STACK)
2865#error Unimplemented
2866#endif
2867 Label exit_through_non_ffi;
2868 Register tmp1 = R0, tmp2 = R1;
2869 // Check if we exited generated from FFI. If so do transition - this is needed
2870 // because normally runtime calls transition back to generated via destructor
2871 // of TransitionGeneratedToVM/Native that is part of runtime boilerplate
2872 // code (see DEFINE_RUNTIME_ENTRY_IMPL in runtime_entry.h). Ffi calls don't
2873 // have this boilerplate, don't have this stack resource, have to transition
2874 // explicitly.
2875 __ LoadFromOffset(tmp1, THR,
2877 __ LoadImmediate(tmp2, target::Thread::exit_through_ffi());
2878 __ cmp(tmp1, Operand(tmp2));
2879 __ b(&exit_through_non_ffi, NE);
2880 __ TransitionNativeToGenerated(tmp1, tmp2,
2881 /*leave_safepoint=*/true,
2882 /*ignore_unwind_in_progress=*/true);
2883 __ Bind(&exit_through_non_ffi);
2884
2885 // Set the tag.
2886 __ LoadImmediate(R2, VMTag::kDartTagId);
2887 __ StoreToOffset(R2, THR, target::Thread::vm_tag_offset());
2888 // Clear top exit frame.
2889 __ LoadImmediate(R2, 0);
2891 // Restore the pool pointer.
2892 __ RestoreCodePointer();
2893 if (FLAG_precompiled_mode) {
2894 __ SetupGlobalPoolAndDispatchTable();
2895 __ set_constant_pool_allowed(true);
2896 } else {
2897 __ LoadPoolPointer();
2898 }
2899 __ bx(R4); // Jump to continuation point.
2900}
2901
2902// Run an exception handler. Execution comes from JumpToFrame
2903// stub or from the simulator.
2904//
2905// The arguments are stored in the Thread object.
2906// Does not return.
2907void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
2908 WRITES_RETURN_ADDRESS_TO_LR(
2909 __ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset()));
2910
2911 word offset_from_thread = 0;
2912 bool ok = target::CanLoadFromThread(NullObject(), &offset_from_thread);
2913 ASSERT(ok);
2914 __ LoadFromOffset(R2, THR, offset_from_thread);
2915
2916 // Exception object.
2919
2920 // StackTrace object.
2923
2924 READS_RETURN_ADDRESS_FROM_LR(
2925 __ bx(LR)); // Jump to the exception handler code.
2926}
2927
2928// Deoptimize a frame on the call stack before rewinding.
2929// The arguments are stored in the Thread object.
2930// No result.
2931void StubCodeCompiler::GenerateDeoptForRewindStub() {
2932 // Push zap value instead of CODE_REG.
2933 __ LoadImmediate(IP, kZapCodeReg);
2934 __ Push(IP);
2935
2936 // Load the deopt pc into LR.
2937 WRITES_RETURN_ADDRESS_TO_LR(
2938 __ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset()));
2939 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
2940
2941 // After we have deoptimized, jump to the correct frame.
2942 __ EnterStubFrame();
2943 __ CallRuntime(kRewindPostDeoptRuntimeEntry, 0);
2944 __ LeaveStubFrame();
2945 __ bkpt(0);
2946}
2947
2948// Calls to the runtime to optimize the given function.
2949// R8: function to be reoptimized.
2950// ARGS_DESC_REG: argument descriptor (preserved).
2951void StubCodeCompiler::GenerateOptimizeFunctionStub() {
2953 __ EnterStubFrame();
2954 __ Push(ARGS_DESC_REG);
2955 __ LoadImmediate(IP, 0);
2956 __ Push(IP); // Setup space on stack for return value.
2957 __ Push(R8);
2958 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
2959 __ Pop(R0); // Discard argument.
2960 __ Pop(FUNCTION_REG); // Get Function object
2961 __ Pop(ARGS_DESC_REG); // Restore argument descriptor.
2962 __ LeaveStubFrame();
2964 __ Branch(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
2965 __ bkpt(0);
2966}
2967
2968// Does identical check (object references are equal or not equal) with special
2969// checks for boxed numbers.
2970// LR: return address.
2971// Return Zero condition flag set if equal.
2972// Note: A Mint cannot contain a value that would fit in Smi.
2973static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
2974 const Register left,
2975 const Register right,
2976 const Register temp) {
2977 Label reference_compare, done, check_mint;
2978 // If any of the arguments is Smi do reference compare.
2979 __ tst(left, Operand(kSmiTagMask));
2980 __ b(&reference_compare, EQ);
2981 __ tst(right, Operand(kSmiTagMask));
2982 __ b(&reference_compare, EQ);
2983
2984 // Value compare for two doubles.
2985 __ CompareClassId(left, kDoubleCid, temp);
2986 __ b(&check_mint, NE);
2987 __ CompareClassId(right, kDoubleCid, temp);
2988 __ b(&done, NE);
2989
2990 // Double values bitwise compare.
2991 __ ldr(temp, FieldAddress(left, target::Double::value_offset() +
2992 0 * target::kWordSize));
2993 __ ldr(IP, FieldAddress(right, target::Double::value_offset() +
2994 0 * target::kWordSize));
2995 __ cmp(temp, Operand(IP));
2996 __ b(&done, NE);
2997 __ ldr(temp, FieldAddress(left, target::Double::value_offset() +
2998 1 * target::kWordSize));
2999 __ ldr(IP, FieldAddress(right, target::Double::value_offset() +
3000 1 * target::kWordSize));
3001 __ cmp(temp, Operand(IP));
3002 __ b(&done);
3003
3004 __ Bind(&check_mint);
3005 __ CompareClassId(left, kMintCid, temp);
3006 __ b(&reference_compare, NE);
3007 __ CompareClassId(right, kMintCid, temp);
3008 __ b(&done, NE);
3009 __ ldr(temp, FieldAddress(
3011 __ ldr(IP, FieldAddress(
3013 __ cmp(temp, Operand(IP));
3014 __ b(&done, NE);
3015 __ ldr(temp, FieldAddress(
3017 __ ldr(IP, FieldAddress(
3019 __ cmp(temp, Operand(IP));
3020 __ b(&done);
3021
3022 __ Bind(&reference_compare);
3023 __ cmp(left, Operand(right));
3024 __ Bind(&done);
3025}
3026
3027// Called only from unoptimized code. All relevant registers have been saved.
3028// LR: return address.
3029// SP + 4: left operand.
3030// SP + 0: right operand.
3031// Return Zero condition flag set if equal.
3032void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
3033#if !defined(PRODUCT)
3034 // Check single stepping.
3035 Label stepping, done_stepping;
3036 __ LoadIsolate(R1);
3037 __ ldrb(R1, Address(R1, target::Isolate::single_step_offset()));
3038 __ CompareImmediate(R1, 0);
3039 __ b(&stepping, NE);
3040 __ Bind(&done_stepping);
3041#endif
3042
3043 const Register temp = R2;
3044 const Register left = R1;
3045 const Register right = R0;
3046 __ ldr(left, Address(SP, 1 * target::kWordSize));
3047 __ ldr(right, Address(SP, 0 * target::kWordSize));
3048 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
3049 __ Ret();
3050
3051#if !defined(PRODUCT)
3052 __ Bind(&stepping);
3053 __ EnterStubFrame();
3054 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
3055 __ RestoreCodePointer();
3056 __ LeaveStubFrame();
3057 __ b(&done_stepping);
3058#endif
3059}
3060
3061// Called from optimized code only.
3062// LR: return address.
3063// SP + 4: left operand.
3064// SP + 0: right operand.
3065// Return Zero condition flag set if equal.
3066void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
3067 const Register temp = R2;
3068 const Register left = R1;
3069 const Register right = R0;
3070 __ ldr(left, Address(SP, 1 * target::kWordSize));
3071 __ ldr(right, Address(SP, 0 * target::kWordSize));
3072 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp);
3073 __ Ret();
3074}
3075
3076// Called from megamorphic calls.
3077// R0: receiver
3078// IC_DATA_REG: MegamorphicCache (preserved)
3079// Passed to target:
3080// FUNCTION_REG: target function
3081// ARGS_DESC_REG: arguments descriptor
3082// CODE_REG: target Code
3083void StubCodeCompiler::GenerateMegamorphicCallStub() {
3084 __ LoadTaggedClassIdMayBeSmi(R8, R0);
3085 // R8: receiver cid as Smi.
3086 __ ldr(R2,
3088 __ ldr(R1,
3090 // R2: cache buckets array.
3091 // R1: mask as a smi.
3092
3093 // Compute the table index.
3095 // Use reverse subtract to multiply with 7 == 8 - 1.
3096 __ rsb(R3, R8, Operand(R8, LSL, 3));
3097 // R3: probe.
3098 Label loop;
3099 __ Bind(&loop);
3100 __ and_(R3, R3, Operand(R1));
3101
3102 const intptr_t base = target::Array::data_offset();
3103 // R3 is smi tagged, but table entries are two words, so LSL 2.
3104 Label probe_failed;
3105 __ add(IP, R2, Operand(R3, LSL, 2));
3106 __ ldr(R6, FieldAddress(IP, base));
3107 __ cmp(R6, Operand(R8));
3108 __ b(&probe_failed, NE);
3109
3110 Label load_target;
3111 __ Bind(&load_target);
3112 // Call the target found in the cache. For a class id match, this is a
3113 // proper target for the given name and arguments descriptor. If the
3114 // illegal class id was found, the target is a cache miss handler that can
3115 // be invoked as a normal Dart function.
3116 __ ldr(FUNCTION_REG, FieldAddress(IP, base + target::kWordSize));
3117 if (!FLAG_precompiled_mode) {
3118 __ ldr(CODE_REG,
3120 }
3121 __ ldr(ARGS_DESC_REG,
3122 FieldAddress(IC_DATA_REG,
3124 __ Branch(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
3125
3126 // Probe failed, check if it is a miss.
3127 __ Bind(&probe_failed);
3128 ASSERT(kIllegalCid == 0);
3129 __ tst(R6, Operand(R6));
3130 Label miss;
3131 __ b(&miss, EQ); // branch if miss.
3132
3133 // Try next entry in the table.
3134 __ AddImmediate(R3, target::ToRawSmi(1));
3135 __ b(&loop);
3136
3137 __ Bind(&miss);
3138 GenerateSwitchableCallMissStub();
3139}
3140
3141void StubCodeCompiler::GenerateICCallThroughCodeStub() {
3142 Label loop, found, miss;
3143 __ ldr(R8, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
3144 __ ldr(R4, FieldAddress(IC_DATA_REG,
3147 // R8: first IC entry
3148 __ LoadTaggedClassIdMayBeSmi(R1, R0);
3149 // R1: receiver cid as Smi
3150
3151 __ Bind(&loop);
3152 __ ldr(R2, Address(R8, 0));
3153 __ cmp(R1, Operand(R2));
3154 __ b(&found, EQ);
3155 __ CompareImmediate(R2, target::ToRawSmi(kIllegalCid));
3156 __ b(&miss, EQ);
3157
3158 const intptr_t entry_length =
3159 target::ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) *
3161 __ AddImmediate(R8, entry_length); // Next entry.
3162 __ b(&loop);
3163
3164 __ Bind(&found);
3165 if (FLAG_precompiled_mode) {
3166 const intptr_t entry_offset =
3168 __ LoadCompressed(R0, Address(R8, entry_offset));
3169 __ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
3170 } else {
3171 const intptr_t code_offset =
3173 __ LoadCompressed(CODE_REG, Address(R8, code_offset));
3174 __ Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
3175 }
3176
3177 __ Bind(&miss);
3179}
3180
3181// Implement the monomorphic entry check for call-sites where the receiver
3182// might be a Smi.
3183//
3184// R0: receiver
3185// R9: MonomorphicSmiableCall object
3186//
3187// R2, R3: clobbered
3188void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
3189 __ LoadClassIdMayBeSmi(IP, R0);
3190
3191 // entrypoint_ should come right after expected_cid_
3195
3196 // Note: this stub is only used in AOT mode, hence the direct (bare) call.
3197 // Simultaneously load the expected cid into R2 and the entrypoint into R3.
3198 __ ldrd(
3199 R2, R3, R9,
3201 __ cmp(R2, Operand(IP));
3203 NE);
3204 __ bx(R3);
3205}
3206
3207static void CallSwitchableCallMissRuntimeEntry(Assembler* assembler,
3208 Register receiver_reg) {
3209 __ LoadImmediate(IP, 0);
3210 __ Push(IP); // Result slot
3211 __ Push(IP); // Arg0: stub out
3212 __ Push(receiver_reg); // Arg1: Receiver
3213 __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
3214 __ Pop(R0); // Get the receiver
3215 __ Pop(CODE_REG); // result = stub
3216 __ Pop(R9); // result = IC
3217}
3218
3219// Called from switchable IC calls.
3220// R0: receiver
3221void StubCodeCompiler::GenerateSwitchableCallMissStub() {
3222 __ ldr(CODE_REG,
3224 __ EnterStubFrame();
3225 CallSwitchableCallMissRuntimeEntry(assembler, /*receiver_reg=*/R0);
3226 __ LeaveStubFrame();
3227
3228 __ Branch(FieldAddress(
3230}
3231
3232// Called from switchable IC calls.
3233// R0: receiver
3234// R9: SingleTargetCache
3235// Passed to target:
3236// CODE_REG: target Code object
3237void StubCodeCompiler::GenerateSingleTargetCallStub() {
3238 Label miss;
3239 __ LoadClassIdMayBeSmi(R1, R0);
3240 __ ldrh(R2,
3242 __ ldrh(R3,
3244
3245 __ cmp(R1, Operand(R2));
3246 __ b(&miss, LT);
3247 __ cmp(R1, Operand(R3));
3248 __ b(&miss, GT);
3249
3250 __ ldr(CODE_REG,
3252 __ Branch(FieldAddress(R9, target::SingleTargetCache::entry_point_offset()));
3253
3254 __ Bind(&miss);
3255 __ EnterStubFrame();
3256 CallSwitchableCallMissRuntimeEntry(assembler, /*receiver_reg=*/R0);
3257 __ LeaveStubFrame();
3258
3259 __ Branch(FieldAddress(
3261}
3262
3263static int GetScaleFactor(intptr_t size) {
3264 switch (size) {
3265 case 1:
3266 return 0;
3267 case 2:
3268 return 1;
3269 case 4:
3270 return 2;
3271 case 8:
3272 return 3;
3273 case 16:
3274 return 4;
3275 }
3276 UNREACHABLE();
3277 return -1;
3278}
3279
3280void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
3282 const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
3283 const intptr_t scale_shift = GetScaleFactor(element_size);
3284
3287
3288 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
3289 Label call_runtime;
3290 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &call_runtime, R2));
3292 /* Check that length is a positive Smi. */
3293 /* R2: requested array length argument. */
3294 __ tst(R2, Operand(kSmiTagMask));
3295 __ b(&call_runtime, NE);
3296 __ SmiUntag(R2);
3297 /* Check for length >= 0 && length <= max_len. */
3298 /* R2: untagged array length. */
3299 __ CompareImmediate(R2, max_len);
3300 __ b(&call_runtime, HI);
3301 __ mov(R2, Operand(R2, LSL, scale_shift));
3302 const intptr_t fixed_size_plus_alignment_padding =
3305 __ AddImmediate(R2, fixed_size_plus_alignment_padding);
3307 __ ldr(R0, Address(THR, target::Thread::top_offset()));
3308
3309 /* R2: allocation size. */
3310 __ adds(R1, R0, Operand(R2));
3311 __ b(&call_runtime, CS); /* Fail on unsigned overflow. */
3312
3313 /* Check if the allocation fits into the remaining space. */
3314 /* R0: potential new object start. */
3315 /* R1: potential next object start. */
3316 /* R2: allocation size. */
3317 __ ldr(IP, Address(THR, target::Thread::end_offset()));
3318 __ cmp(R1, Operand(IP));
3319 __ b(&call_runtime, CS);
3320 __ CheckAllocationCanary(R0);
3321
3322 __ str(R1, Address(THR, target::Thread::top_offset()));
3323 __ AddImmediate(R0, kHeapObjectTag);
3324 /* Initialize the tags. */
3325 /* R0: new object start as a tagged pointer. */
3326 /* R1: new object end address. */
3327 /* R2: allocation size. */
3328 {
3330 __ mov(R3,
3331 Operand(R2, LSL,
3334 LS);
3335 __ mov(R3, Operand(0), HI);
3336
3337 /* Get the class index and insert it into the tags. */
3338 uword tags =
3339 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
3340 __ LoadImmediate(TMP, tags);
3341 __ orr(R3, R3, Operand(TMP));
3342 __ str(R3, FieldAddress(R0, target::Object::tags_offset())); /* Tags. */
3343 }
3344 /* Set the length field. */
3345 /* R0: new object start as a tagged pointer. */
3346 /* R1: new object end address. */
3347 /* R2: allocation size. */
3348 __ mov(R3,
3349 Operand(AllocateTypedDataArrayABI::kLengthReg)); /* Array length. */
3350 __ StoreIntoObjectNoBarrier(
3351 R0, FieldAddress(R0, target::TypedDataBase::length_offset()), R3);
3352 /* Initialize all array elements to 0. */
3353 /* R0: new object start as a tagged pointer. */
3354 /* R1: new object end address. */
3355 /* R2: allocation size. */
3356 /* R3: iterator which initially points to the start of the variable */
3357 /* R8, R9: zero. */
3358 /* data area to be initialized. */
3359 __ LoadImmediate(R8, 0);
3360 __ mov(R9, Operand(R8));
3361 __ AddImmediate(R3, R0, target::TypedData::HeaderSize() - 1);
3362 __ StoreInternalPointer(
3363 R0, FieldAddress(R0, target::PointerBase::data_offset()), R3);
3364 Label init_loop;
3365 __ Bind(&init_loop);
3366 __ AddImmediate(R3, 2 * target::kWordSize);
3367 __ cmp(R3, Operand(R1));
3368 __ strd(R8, R9, R3, -2 * target::kWordSize, LS);
3369 __ b(&init_loop, CC);
3370 __ str(R8, Address(R3, -2 * target::kWordSize), HI);
3371 __ WriteAllocationCanary(R1); // Fix overshoot.
3372
3373 __ Ret();
3374
3375 __ Bind(&call_runtime);
3376 }
3377
3378 __ EnterStubFrame();
3379 __ PushObject(Object::null_object()); // Make room for the result.
3380 __ PushImmediate(target::ToRawSmi(cid)); // Cid
3381 __ Push(AllocateTypedDataArrayABI::kLengthReg); // Array length
3382 __ CallRuntime(kAllocateTypedDataRuntimeEntry, 2);
3383 __ Drop(2); // Drop arguments.
3385 __ LeaveStubFrame();
3386 __ Ret();
3387}
3388
3389} // namespace compiler
3390
3391} // namespace dart
3392
3393#endif // defined(TARGET_ARCH_ARM)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
static void async_callback(void *c, std::unique_ptr< const SkImage::AsyncReadResult > result)
static bool ok(int result)
static SkTileMode optimize(SkTileMode tm, int dimension)
static size_t element_size(Layout layout, SkSLType type)
#define __
#define UNREACHABLE()
Definition: assert.h:248
#define ASSERT_LESS_OR_EQUAL(expected, actual)
Definition: assert.h:313
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
GLenum type
virtual bool WillAllocateNewOrRemembered() const
Definition: il.h:7451
static constexpr intptr_t NumCallbackTrampolinesPerPage()
static constexpr intptr_t kPageMask
static constexpr uword RuntimeFunctionOffset(uword function_index)
static constexpr intptr_t kPageSize
static bool UseUnboxedRepresentation()
Definition: il.h:10864
static intptr_t RawValue(intptr_t value)
Definition: object.h:10022
static StaticTypeExactnessState HasExactSuperType()
static StaticTypeExactnessState NotExact()
static constexpr intptr_t kMaxInputs
Definition: object.h:7705
void GenerateNArgsCheckInlineCacheStub(intptr_t num_args, const RuntimeEntry &handle_ic_miss, Token::Kind kind, Optimized optimized, CallType type, Exactness exactness)
void GenerateUsageCounterIncrement(Register temp_reg)
void GenerateAllocationStubForClass(UnresolvedPcRelativeCalls *unresolved_calls, const Class &cls, const dart::Code &allocate_object, const dart::Code &allocat_object_parametrized)
static intptr_t WordOffsetFromFpToCpuRegister(Register cpu_register)
static word type_arguments_offset()
static const word kMaxNewSpaceElements
Definition: runtime_api.h:595
static bool TraceAllocation(const dart::Class &klass)
Definition: runtime_api.cc:543
static intptr_t NumTypeArguments(const dart::Class &klass)
Definition: runtime_api.cc:530
static uword GetInstanceSize(const dart::Class &handle)
Definition: runtime_api.cc:515
static const word kNoTypeArguments
Definition: runtime_api.h:486
static classid_t GetId(const dart::Class &handle)
Definition: runtime_api.cc:441
static intptr_t TypeArgumentsFieldOffset(const dart::Class &klass)
Definition: runtime_api.cc:539
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word variable_offset(intptr_t index)
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static bool IsAllocatableInNewSpace(intptr_t instance_size)
static word ExactnessIndexFor(word num_args)
Definition: runtime_api.cc:615
static word TestEntryLengthFor(word num_args, bool exactness_check)
Definition: runtime_api.cc:619
static word receivers_static_type_offset()
static word CodeIndexFor(word num_args)
Definition: runtime_api.cc:603
static word TargetIndexFor(word num_args)
Definition: runtime_api.cc:611
static word CountIndexFor(word num_args)
Definition: runtime_api.cc:607
static word EntryPointIndexFor(word num_args)
Definition: runtime_api.cc:623
static word original_top_offset()
static const word kBytesPerCardLog2
Definition: runtime_api.h:1487
static word allocate_mint_without_fpu_regs_stub_offset()
static word allocate_object_slow_entry_point_offset()
static word auto_scope_native_wrapper_entry_point_offset()
static word lazy_deopt_from_throw_stub_offset()
static word active_exception_offset()
static word exit_through_ffi_offset()
static uword exit_through_runtime_call()
Definition: runtime_api.cc:919
static word new_marking_stack_block_offset()
static word invoke_dart_code_stub_offset()
static word write_error_shared_without_fpu_regs_stub_offset()
static word no_scope_native_wrapper_entry_point_offset()
static word top_exit_frame_info_offset()
static word range_error_shared_without_fpu_regs_stub_offset()
static word range_error_shared_with_fpu_regs_stub_offset()
static word fix_allocation_stub_code_offset()
static word switchable_call_miss_stub_offset()
static word fix_callers_target_code_offset()
static word store_buffer_block_offset()
static word deoptimize_stub_offset()
static word write_barrier_entry_point_offset()
static word lazy_deopt_from_return_stub_offset()
static word allocate_object_entry_point_offset()
static word switchable_call_miss_entry_offset()
static word active_stacktrace_offset()
static word allocate_mint_with_fpu_regs_stub_offset()
static word bootstrap_native_wrapper_entry_point_offset()
static word write_error_shared_with_fpu_regs_stub_offset()
static word write_barrier_mask_offset()
static word call_to_runtime_stub_offset()
static word execution_state_offset()
static word old_marking_stack_block_offset()
static const word kGenerationalBarrierMask
Definition: runtime_api.h:434
#define UNIMPLEMENTED
#define ASSERT(E)
static bool b
glong glong end
const uint8_t uint32_t uint32_t GError ** error
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
SK_API bool Encode(SkWStream *dst, const SkPixmap &src, const Options &options)
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
Definition: runtime_api.cc:360
static constexpr word kBitsPerWordLog2
Definition: runtime_api.h:290
bool CanLoadFromThread(const dart::Object &object, intptr_t *offset)
Definition: runtime_api.cc:941
word ToRawSmi(const dart::Object &a)
Definition: runtime_api.cc:960
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
static constexpr word kBitsPerWord
Definition: runtime_api.h:291
static constexpr intptr_t kCompressedWordSize
Definition: runtime_api.h:286
bool SizeFitsInSizeTag(uword instance_size)
Definition: runtime_api.cc:355
FrameLayout frame_layout
Definition: stack_frame.cc:76
word TypedDataMaxNewSpaceElements(classid_t cid)
Definition: runtime_api.cc:255
word TypedDataElementSizeInBytes(classid_t cid)
Definition: runtime_api.cc:251
const Bool & TrueObject()
Definition: runtime_api.cc:157
GrowableArray< UnresolvedPcRelativeCall * > UnresolvedPcRelativeCalls
bool IsSameObject(const Object &a, const Object &b)
Definition: runtime_api.cc:60
const Bool & FalseObject()
Definition: runtime_api.cc:161
const Object & NullObject()
Definition: runtime_api.cc:149
const Code & StubCodeAllocateArray()
Definition: runtime_api.cc:294
const Class & MintClass()
Definition: runtime_api.cc:190
Definition: dart_vm.cc:33
const Register kWriteBarrierSlotReg
constexpr bool IsAbiPreservedRegister(Register reg)
Definition: constants.h:90
const Register THR
const Register kExceptionObjectReg
const Register kWriteBarrierObjectReg
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition: constants.h:85
static constexpr uword kZapReturnAddress
uint16_t RegList
int32_t classid_t
Definition: globals.h:524
@ kIllegalCid
Definition: class_id.h:214
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
uintptr_t uword
Definition: globals.h:501
intptr_t word
Definition: globals.h:500
const Register CODE_REG
@ UNSIGNED_GREATER_EQUAL
@ NOT_ZERO
@ UNSIGNED_LESS_EQUAL
const Register ARGS_DESC_REG
const Register DISPATCH_TABLE_REG
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
@ kNoRegister
Definition: constants_arm.h:99
static constexpr bool IsArgumentRegister(Register reg)
Definition: constants.h:77
constexpr RegList kDartAvailableCpuRegs
const Register TMP
const Register FPREG
const intptr_t cid
const intptr_t kStoreBufferWrapperSize
const Register FUNCTION_REG
const Register IC_DATA_REG
const Register PP
static constexpr uword kZapCodeReg
@ kNumberOfDRegisters
const Register kStackTraceObjectReg
NOT_IN_PRODUCT(LibraryPtr ReloadTestScript(const char *script))
@ kSmiTagSize
@ kHeapObjectTag
@ kSmiTagMask
@ kSmiTagShift
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
const int kFpuRegisterSize
@ kLazyDeoptFromThrow
@ kLazyDeoptFromReturn
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
dst
Definition: cp.py:12
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
Definition: update.py:1
SeparatedVector2 offset
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTempReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTagsReg
static constexpr Register kLengthReg
static constexpr Register kResultReg
static constexpr Register kClassIdReg
intptr_t first_local_from_fp
Definition: frame_layout.h:37
static constexpr intptr_t kNewObjectBitPosition
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment
static constexpr Register kLengthReg
static constexpr Register kIndexReg
static constexpr Register kInstanceCidOrSignatureReg
static constexpr Register kResultReg
static constexpr Register kSubtypeTestCacheReg
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kSubtypeTestCacheResultReg