Flutter Engine
The Flutter Engine
stub_code_compiler_x64.cc
Go to the documentation of this file.
1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include <setjmp.h>
6
8#include "vm/globals.h"
9
10// For `AllocateObjectInstr::WillAllocateNewOrRemembered`
11// For `GenericCheckBoundInstr::UseUnboxedRepresentation`
13
14#define SHOULD_NOT_INCLUDE_RUNTIME
15
18
19#if defined(TARGET_ARCH_X64)
20
21#include "vm/class_id.h"
22#include "vm/code_entry_kind.h"
25#include "vm/constants.h"
27#include "vm/instructions.h"
29#include "vm/tags.h"
30
31#define __ assembler->
32
33namespace dart {
34namespace compiler {
35
36// Ensures that [RAX] is a new object, if not it will be added to the remembered
37// set via a leaf runtime call.
38//
39// WARNING: This might clobber all registers except for [RAX], [THR] and [FP].
40// The caller should simply call LeaveStubFrame() and return.
42 // If the object is not in an active TLAB, we call a leaf-runtime to add it to
43 // the remembered set and/or deferred marking worklist. This test assumes a
44 // Page's TLAB use is always ascending.
45 Label done;
46 __ AndImmediate(TMP, RAX, target::kPageMask);
47 __ LoadFromOffset(TMP, TMP, target::Page::original_top_offset());
48 __ CompareRegisters(RAX, TMP);
49 __ BranchIf(UNSIGNED_GREATER_EQUAL, &done);
50
51 {
52 LeafRuntimeScope rt(assembler, /*frame_size=*/0,
53 /*preserve_registers=*/false);
56 rt.Call(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
57 }
58
59 __ Bind(&done);
60}
61
62// In TSAN mode the runtime will throw an exception using an intermediary
63// longjmp() call to unwind the C frames in a way that TSAN can understand.
64//
65// This wrapper will setup a [jmp_buf] on the stack and initialize it to be a
66// target for a possible longjmp(). In the exceptional case we'll forward
67// control of execution to the usual JumpToFrame stub.
68//
69// In non-TSAN mode this will do nothing and the runtime will call the
70// JumpToFrame stub directly.
71//
72// The callback [fun] may be invoked with a modified [RSP] due to allocating
73// a [jmp_buf] allocating structure on the stack (as well as the saved old
74// [Thread::tsan_utils_->setjmp_buffer_]).
75static void WithExceptionCatchingTrampoline(Assembler* assembler,
76 std::function<void()> fun) {
77#if !defined(USING_SIMULATOR)
78 const Register kTsanUtilsReg = RAX;
79
80 // Reserve space for arguments and align frame before entering C++ world.
81 const intptr_t kJumpBufferSize = sizeof(jmp_buf);
82 // Save & Restore the volatile CPU registers across the setjmp() call.
83 const RegisterSet volatile_registers(
85 /*fpu_registers=*/0);
86
87 const Register kSavedRspReg = R12;
89 // We rely on THR being preserved across the setjmp() call.
91
92 if (FLAG_target_thread_sanitizer) {
93 Label do_native_call;
94
95 // Save old jmp_buf.
96 __ movq(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
97 __ pushq(Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
98
99 // Allocate jmp_buf struct on stack & remember pointer to it on the
100 // [Thread::tsan_utils_->setjmp_buffer] (which exceptions.cc will longjmp()
101 // to)
102 __ AddImmediate(RSP, Immediate(-kJumpBufferSize));
103 __ movq(Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()),
104 RSP);
105
106 // Call setjmp() with a pointer to the allocated jmp_buf struct.
107 __ MoveRegister(CallingConventions::kArg1Reg, RSP);
108 __ PushRegisters(volatile_registers);
110 __ MoveRegister(kSavedRspReg, RSP);
111 __ andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
112 }
113 __ movq(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
114 __ CallCFunction(
115 Address(kTsanUtilsReg, target::TsanUtils::setjmp_function_offset()),
116 /*restore_rsp=*/true);
118 __ MoveRegister(RSP, kSavedRspReg);
119 }
120 __ PopRegisters(volatile_registers);
121
122 // We are the target of a longjmp() iff setjmp() returns non-0.
123 __ CompareImmediate(RAX, 0);
124 __ BranchIf(EQUAL, &do_native_call);
125
126 // We are the target of a longjmp: Cleanup the stack and tail-call the
127 // JumpToFrame stub which will take care of unwinding the stack and hand
128 // execution to the catch entry.
129 __ AddImmediate(RSP, Immediate(kJumpBufferSize));
130 __ movq(kTsanUtilsReg, Address(THR, target::Thread::tsan_utils_offset()));
131 __ popq(Address(kTsanUtilsReg, target::TsanUtils::setjmp_buffer_offset()));
132
134 Address(kTsanUtilsReg, target::TsanUtils::exception_pc_offset()));
136 Address(kTsanUtilsReg, target::TsanUtils::exception_sp_offset()));
138 Address(kTsanUtilsReg, target::TsanUtils::exception_fp_offset()));
139 __ MoveRegister(CallingConventions::kArg4Reg, THR);
141
142 // We leave the created [jump_buf] structure on the stack as well as the
143 // pushed old [Thread::tsan_utils_->setjmp_buffer_].
144 __ Bind(&do_native_call);
145 __ MoveRegister(kSavedRspReg, RSP);
146 }
147#endif // !defined(USING_SIMULATOR)
148
149 fun();
150
151#if !defined(USING_SIMULATOR)
152 if (FLAG_target_thread_sanitizer) {
153 __ MoveRegister(RSP, kSavedRspReg);
154 __ AddImmediate(RSP, Immediate(kJumpBufferSize));
155 const Register kTsanUtilsReg2 = kSavedRspReg;
156 __ movq(kTsanUtilsReg2, Address(THR, target::Thread::tsan_utils_offset()));
157 __ popq(Address(kTsanUtilsReg2, target::TsanUtils::setjmp_buffer_offset()));
158 }
159#endif // !defined(USING_SIMULATOR)
160}
161
162// Input parameters:
163// RSP : points to return address.
164// RSP + 8 : address of last argument in argument array.
165// RSP + 8*R10 : address of first argument in argument array.
166// RSP + 8*R10 + 8 : address of return value.
167// RBX : address of the runtime function to call.
168// R10 : number of arguments to the call.
169// Must preserve callee saved registers R12 and R13.
170void StubCodeCompiler::GenerateCallToRuntimeStub() {
171 const intptr_t thread_offset = target::NativeArguments::thread_offset();
172 const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
173 const intptr_t argv_offset = target::NativeArguments::argv_offset();
174 const intptr_t retval_offset = target::NativeArguments::retval_offset();
175
176 __ movq(CODE_REG,
178 __ EnterStubFrame();
179
180 // Save exit frame information to enable stack walking as we are about
181 // to transition to Dart VM C++ code.
183
184 // Mark that the thread exited generated code through a runtime call.
187
188#if defined(DEBUG)
189 {
190 Label ok;
191 // Check that we are always entering from Dart code.
192 __ movq(RAX, Immediate(VMTag::kDartTagId));
195 __ Stop("Not coming from Dart code.");
196 __ Bind(&ok);
197 }
198#endif
199
200 // Mark that the thread is executing VM code.
202
203 WithExceptionCatchingTrampoline(assembler, [&]() {
204 // Reserve space for arguments and align frame before entering C++ world.
205 __ subq(RSP, Immediate(target::NativeArguments::StructSize()));
207 __ andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
208 }
209
210 // Pass target::NativeArguments structure by value and call runtime.
211 __ movq(Address(RSP, thread_offset), THR); // Set thread in NativeArgs.
212 __ movq(Address(RSP, argc_tag_offset),
213 R10); // Set argc in target::NativeArguments.
214 // Compute argv.
215 __ leaq(RAX, Address(RBP, R10, TIMES_8,
216 target::frame_layout.param_end_from_fp *
218 __ movq(Address(RSP, argv_offset),
219 RAX); // Set argv in target::NativeArguments.
220 __ addq(
221 RAX,
222 Immediate(1 * target::kWordSize)); // Retval is next to 1st argument.
223 __ movq(Address(RSP, retval_offset),
224 RAX); // Set retval in target::NativeArguments.
225#if defined(DART_TARGET_OS_WINDOWS)
227 CallingConventions::kRegisterTransferLimit);
229#endif
230 __ CallCFunction(RBX);
231
232 // Mark that the thread is executing Dart code.
233 __ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartTagId));
234
235 // Mark that the thread has not exited generated Dart code.
237 Immediate(0));
238
239 // Reset exit frame information in Isolate's mutator thread structure.
241 Immediate(0));
242
243 // Restore the global object pool after returning from runtime (old space is
244 // moving, so the GOP could have been relocated).
245 if (FLAG_precompiled_mode) {
247 }
248 });
249
250 __ LeaveStubFrame();
251
252 // The following return can jump to a lazy-deopt stub, which assumes RAX
253 // contains a return value and will save it in a GC-visible way. We therefore
254 // have to ensure RAX does not contain any garbage value left from the C
255 // function we called (which has return type "void").
256 // (See GenerateDeoptimizationSequence::saved_result_slot_from_fp.)
257 __ xorq(RAX, RAX);
258 __ ret();
259}
260
261void StubCodeCompiler::GenerateSharedStubGeneric(
262 bool save_fpu_registers,
263 intptr_t self_code_stub_offset_from_thread,
264 bool allow_return,
265 std::function<void()> perform_runtime_call) {
266 // We want the saved registers to appear like part of the caller's frame, so
267 // we push them before calling EnterStubFrame.
268 const RegisterSet saved_registers(
269 kDartAvailableCpuRegs, save_fpu_registers ? kAllFpuRegistersList : 0);
270 __ PushRegisters(saved_registers);
271
272 const intptr_t kSavedCpuRegisterSlots =
274 const intptr_t kSavedFpuRegisterSlots =
275 save_fpu_registers
277 : 0;
278 const intptr_t kAllSavedRegistersSlots =
279 kSavedCpuRegisterSlots + kSavedFpuRegisterSlots;
280
281 // Copy down the return address so the stack layout is correct.
282 __ pushq(Address(RSP, kAllSavedRegistersSlots * target::kWordSize));
283 __ movq(CODE_REG, Address(THR, self_code_stub_offset_from_thread));
284 __ EnterStubFrame();
285 perform_runtime_call();
286 if (!allow_return) {
287 __ Breakpoint();
288 return;
289 }
290 __ LeaveStubFrame();
291 // Copy up the return address (in case it was changed).
292 __ popq(TMP);
293 __ movq(Address(RSP, kAllSavedRegistersSlots * target::kWordSize), TMP);
294 __ PopRegisters(saved_registers);
295 __ ret();
296}
297
298void StubCodeCompiler::GenerateSharedStub(
299 bool save_fpu_registers,
300 const RuntimeEntry* target,
301 intptr_t self_code_stub_offset_from_thread,
302 bool allow_return,
303 bool store_runtime_result_in_result_register) {
304 auto perform_runtime_call = [&]() {
305 if (store_runtime_result_in_result_register) {
306 __ PushImmediate(Immediate(0));
307 }
308 __ CallRuntime(*target, /*argument_count=*/0);
309 if (store_runtime_result_in_result_register) {
310 __ PopRegister(RAX);
311 __ movq(Address(RBP, target::kWordSize *
314 RAX);
315 }
316 };
317 GenerateSharedStubGeneric(save_fpu_registers,
318 self_code_stub_offset_from_thread, allow_return,
319 perform_runtime_call);
320}
321
322void StubCodeCompiler::GenerateEnterSafepointStub() {
323 RegisterSet all_registers;
324 all_registers.AddAllGeneralRegisters();
325 __ PushRegisters(all_registers);
326
327 __ EnterFrame(0);
328 __ ReserveAlignedFrameSpace(0);
329 __ movq(RAX, Address(THR, kEnterSafepointRuntimeEntry.OffsetFromThread()));
330 __ CallCFunction(RAX);
331 __ LeaveFrame();
332
333 __ PopRegisters(all_registers);
334 __ ret();
335}
336
337static void GenerateExitSafepointStubCommon(Assembler* assembler,
338 uword runtime_entry_offset) {
339 RegisterSet all_registers;
340 all_registers.AddAllGeneralRegisters();
341 __ PushRegisters(all_registers);
342
343 __ EnterFrame(0);
344 __ ReserveAlignedFrameSpace(0);
345
346 // Set the execution state to VM while waiting for the safepoint to end.
347 // This isn't strictly necessary but enables tests to check that we're not
348 // in native code anymore. See tests/ffi/function_gc_test.dart for example.
351
352 __ movq(RAX, Address(THR, runtime_entry_offset));
353 __ CallCFunction(RAX);
354 __ LeaveFrame();
355
356 __ PopRegisters(all_registers);
357 __ ret();
358}
359
360void StubCodeCompiler::GenerateExitSafepointStub() {
361 GenerateExitSafepointStubCommon(
362 assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
363}
364
365void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
366 GenerateExitSafepointStubCommon(
367 assembler,
368 kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
369}
370
371// Calls native code within a safepoint.
372//
373// On entry:
374// Stack: arguments set up and aligned for native call, excl. shadow space
375// RBX = target address to call
376//
377// On exit:
378// Stack pointer lowered by shadow space
379// RBX, R12 clobbered
380void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
381 __ movq(R12, compiler::Immediate(target::Thread::exit_through_ffi()));
382 __ TransitionGeneratedToNative(RBX, FPREG, R12,
383 /*enter_safepoint=*/true);
384
385 __ popq(R12);
386 __ CallCFunction(RBX, /*restore_rsp=*/true);
387
388 __ TransitionNativeToGenerated(/*leave_safepoint=*/true);
389
390 // Faster than jmp because it doesn't confuse the branch predictor.
391 __ pushq(R12);
392 __ ret();
393}
394
395void StubCodeCompiler::GenerateLoadBSSEntry(BSS::Relocation relocation,
397 Register tmp) {
398 compiler::Label skip_reloc;
399 __ jmp(&skip_reloc);
400 InsertBSSRelocation(relocation);
401 const intptr_t reloc_end = __ CodeSize();
402 __ Bind(&skip_reloc);
403
404 const intptr_t kLeaqLength = 7;
406 -kLeaqLength - compiler::target::kWordSize));
407 ASSERT((__ CodeSize() - reloc_end) == kLeaqLength);
408
409 // dst holds the address of the relocation.
410 __ movq(tmp, compiler::Address(dst, 0));
411
412 // tmp holds the relocation itself: dst - bss_start.
413 // dst = dst + (bss_start - dst) = bss_start
414 __ addq(dst, tmp);
415
416 // dst holds the start of the BSS section.
417 // Load the routine.
418 __ movq(dst, compiler::Address(dst, 0));
419}
420
421void StubCodeCompiler::GenerateLoadFfiCallbackMetadataRuntimeFunction(
422 uword function_index,
423 Register dst) {
424 // Keep in sync with FfiCallbackMetadata::EnsureFirstTrampolinePageLocked.
425 // Note: If the stub was aligned, this could be a single PC relative load.
426
427 // Load a pointer to the beginning of the stub into dst.
428 const intptr_t kLeaqLength = 7;
429 const intptr_t code_size = __ CodeSize();
430 __ leaq(dst, Address::AddressRIPRelative(-kLeaqLength - code_size));
431
432 // Round dst down to the page size.
433 __ andq(dst, Immediate(FfiCallbackMetadata::kPageMask));
434
435 // Load the function from the function table.
436 __ LoadFromOffset(dst, dst,
438}
439
440static const RegisterSet kArgumentRegisterSet(
443
444void StubCodeCompiler::GenerateFfiCallbackTrampolineStub() {
445 // RAX is volatile and not used for passing any arguments.
447
448 Label body;
450 ++i) {
451 // The FfiCallbackMetadata table is keyed by the trampoline entry point. So
452 // look up the current PC, then jump to the shared section. RIP gives us the
453 // address of the next instruction, so to get the true entry point, we have
454 // to subtract the size of the leaq instruction.
455 const intptr_t kLeaqLength = 7;
456 const intptr_t size_before = __ CodeSize();
457 __ leaq(RAX, Address::AddressRIPRelative(-kLeaqLength));
458 const intptr_t size_after = __ CodeSize();
459 ASSERT_EQUAL(size_after - size_before, kLeaqLength);
460 __ jmp(&body);
461 }
462
463 ASSERT_EQUAL(__ CodeSize(),
464 FfiCallbackMetadata::kNativeCallbackTrampolineSize *
466
467 __ Bind(&body);
468
469 const intptr_t shared_stub_start = __ CodeSize();
470
471 // Save THR which is callee-saved.
472 __ pushq(THR);
473
474 // 2 = THR & return address
475 COMPILE_ASSERT(2 == FfiCallbackMetadata::kNativeCallbackTrampolineStackDelta);
476
477 // Save all registers which might hold arguments.
478 __ PushRegisters(kArgumentRegisterSet);
479
480 // Load the thread, verify the callback ID and exit the safepoint.
481 //
482 // We exit the safepoint inside DLRT_GetFfiCallbackMetadata in order to safe
483 // code size on this shared stub.
484 {
487
488 // We also need to look up the entry point for the trampoline. This is
489 // returned using a pointer passed to the second arg of the C function
490 // below. We aim that pointer at a reserved stack slot.
492 __ pushq(Immediate(0)); // Reserve a stack slot for the entry point.
494
495 // We also need to know if this is a sync or async callback. This is also
496 // returned by pointer.
498 __ pushq(Immediate(0)); // Reserve a stack slot for the trampoline type.
500
501#if defined(DART_TARGET_OS_FUCHSIA)
502 // TODO(https://dartbug.com/52579): Remove.
503 if (FLAG_precompiled_mode) {
505 TMP);
506 } else {
507 __ movq(RAX, Immediate(
508 reinterpret_cast<int64_t>(DLRT_GetFfiCallbackMetadata)));
509 }
510#else
511 GenerateLoadFfiCallbackMetadataRuntimeFunction(
513#endif // defined(DART_TARGET_OS_FUCHSIA)
514
515 __ EnterFrame(0);
516 __ ReserveAlignedFrameSpace(0);
517
518 __ CallCFunction(RAX);
519 __ movq(THR, RAX);
520
521 __ LeaveFrame();
522
523 // The trampoline type is at the top of the stack. Pop it into RAX.
524 __ popq(RAX);
525
526 // Entry point is now at the top of the stack. Pop it into TMP.
527 __ popq(TMP);
528 }
529
530 // Restore the arguments.
531 __ PopRegisters(kArgumentRegisterSet);
532
533 // Current state:
534 //
535 // Stack:
536 // <old stack (arguments)>
537 // <return address>
538 // <saved THR>
539 //
540 // Registers: Like entry, except TMP == target, RAX == abi, and THR == thread
541 // All argument registers are untouched.
542
543 Label async_callback;
544 Label done;
545
546 // If GetFfiCallbackMetadata returned a null thread, it means that the
547 // callback was invoked after it was deleted. In this case, do nothing.
548 __ cmpq(THR, Immediate(0));
550
551 // Check the trampoline type to see how the callback should be invoked.
552 __ cmpq(RAX, Immediate(static_cast<uword>(
555
556 // Sync callback. The entry point contains the target function, so just call
557 // it. DLRT_GetThreadForNativeCallbackTrampoline exited the safepoint, so
558 // re-enter it afterwards.
559
560 // On entry to the function, there will be two extra slots on the stack:
561 // the saved THR and the return address. The target will know to skip them.
562 __ call(TMP);
563
564 // Takes care to not clobber *any* registers (besides TMP).
565 __ EnterFullSafepoint();
566
569
570 // Async callback. The entrypoint marshals the arguments into a message and
571 // sends it over the send port. DLRT_GetThreadForNativeCallbackTrampoline
572 // entered a temporary isolate, so exit it afterwards.
573
574 // On entry to the function, there will be two extra slots on the stack:
575 // the saved THR and the return address. The target will know to skip them.
576 __ call(TMP);
577
578 // Exit the temporary isolate.
579 {
580#if defined(DART_TARGET_OS_FUCHSIA)
581 // TODO(https://dartbug.com/52579): Remove.
582 if (FLAG_precompiled_mode) {
583 GenerateLoadBSSEntry(BSS::Relocation::DRT_ExitTemporaryIsolate, RAX, TMP);
584 } else {
585 __ movq(RAX,
586 Immediate(reinterpret_cast<int64_t>(DLRT_ExitTemporaryIsolate)));
587 }
588#else
589 GenerateLoadFfiCallbackMetadataRuntimeFunction(
591#endif // defined(DART_TARGET_OS_FUCHSIA)
592
593 __ EnterFrame(0);
594 __ ReserveAlignedFrameSpace(0);
595
596 __ CallCFunction(RAX);
597
598 __ LeaveFrame();
599 }
600
601 __ Bind(&done);
602
603 // Restore THR (callee-saved).
604 __ popq(THR);
605
606 __ ret();
607
608 // 'kNativeCallbackSharedStubSize' is an upper bound because the exact
609 // instruction size can vary slightly based on OS calling conventions.
610 ASSERT_LESS_OR_EQUAL(__ CodeSize() - shared_stub_start,
611 FfiCallbackMetadata::kNativeCallbackSharedStubSize);
613
614#if defined(DEBUG)
615 while (__ CodeSize() < FfiCallbackMetadata::kPageSize) {
616 __ Breakpoint();
617 }
618#endif
619}
620
621void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
622 __ EnterStubFrame();
625 __ CallRuntime(kDispatchTableNullErrorRuntimeEntry, /*argument_count=*/1);
626 // The NullError runtime entry does not return.
627 __ Breakpoint();
628}
629
630void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
631 auto perform_runtime_call = [&]() {
632 // If the generated code has unboxed index/length we need to box them before
633 // calling the runtime entry.
635 Label length, smi_case;
636
637 // The user-controlled index might not fit into a Smi.
638#if !defined(DART_COMPRESSED_POINTERS)
640 __ BranchIf(NO_OVERFLOW, &length);
641#else
644 __ sarq(TMP, Immediate(30));
645 __ addq(TMP, Immediate(1));
646 __ cmpq(TMP, Immediate(2));
647 __ j(BELOW, &length);
648#endif
649 {
650 // Allocate a mint, reload the two registers and populate the mint.
651 __ PushImmediate(Immediate(0));
652 __ CallRuntime(kAllocateMintRuntimeEntry, /*argument_count=*/0);
653 __ PopRegister(RangeErrorABI::kIndexReg);
654 __ movq(
655 TMP,
656 Address(RBP, target::kWordSize *
659 __ movq(FieldAddress(RangeErrorABI::kIndexReg,
661 TMP);
662 __ movq(
664 Address(RBP, target::kWordSize *
667 }
668
669 // Length is guaranteed to be in positive Smi range (it comes from a load
670 // of a vm recognized array).
671 __ Bind(&length);
673 }
674 __ PushRegistersInOrder(
676 __ CallRuntime(kRangeErrorRuntimeEntry, /*argument_count=*/2);
677 __ Breakpoint();
678 };
679
680 GenerateSharedStubGeneric(
681 /*save_fpu_registers=*/with_fpu_regs,
682 with_fpu_regs
685 /*allow_return=*/false, perform_runtime_call);
686}
687
688void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
689 auto perform_runtime_call = [&]() {
690 __ CallRuntime(kWriteErrorRuntimeEntry, /*argument_count=*/2);
691 __ Breakpoint();
692 };
693
694 GenerateSharedStubGeneric(
695 /*save_fpu_registers=*/with_fpu_regs,
696 with_fpu_regs
699 /*allow_return=*/false, perform_runtime_call);
700}
701
702// Input parameters:
703// RSP : points to return address.
704// RSP + 8 : address of return value.
705// R13 : address of first argument in argument array.
706// RBX : address of the native function to call.
707// R10 : argc_tag including number of arguments and function kind.
708static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
709 Address wrapper_address) {
710 const intptr_t native_args_struct_offset = 0;
711 const intptr_t thread_offset =
712 target::NativeArguments::thread_offset() + native_args_struct_offset;
713 const intptr_t argc_tag_offset =
714 target::NativeArguments::argc_tag_offset() + native_args_struct_offset;
715 const intptr_t argv_offset =
716 target::NativeArguments::argv_offset() + native_args_struct_offset;
717 const intptr_t retval_offset =
718 target::NativeArguments::retval_offset() + native_args_struct_offset;
719
720 __ EnterStubFrame();
721
722 // Save exit frame information to enable stack walking as we are about
723 // to transition to native code.
725
726 // Mark that the thread exited generated code through a runtime call.
729
730#if defined(DEBUG)
731 {
732 Label ok;
733 // Check that we are always entering from Dart code.
734 __ movq(R8, Immediate(VMTag::kDartTagId));
737 __ Stop("Not coming from Dart code.");
738 __ Bind(&ok);
739 }
740#endif
741
742 // Mark that the thread is executing native code.
744
745 WithExceptionCatchingTrampoline(assembler, [&]() {
746 // Reserve space for the native arguments structure passed on the stack (the
747 // outgoing pointer parameter to the native arguments structure is passed in
748 // RDI) and align frame before entering the C++ world.
749 __ subq(RSP, Immediate(target::NativeArguments::StructSize()));
751 __ andq(RSP, Immediate(~(OS::ActivationFrameAlignment() - 1)));
752 }
753
754 // Pass target::NativeArguments structure by value and call native function.
755 // Set thread in NativeArgs.
756 __ movq(Address(RSP, thread_offset), THR);
757 // Set argc in target::NativeArguments.
758 __ movq(Address(RSP, argc_tag_offset), R10);
759 // Set argv in target::NativeArguments.
760 __ movq(Address(RSP, argv_offset), R13);
761 // Compute return value addr.
762 __ leaq(RAX, Address(RBP, (target::frame_layout.param_end_from_fp + 1) *
764 // Set retval in target::NativeArguments.
765 __ movq(Address(RSP, retval_offset), RAX);
766
767 // Pass the pointer to the target::NativeArguments.
769 // Pass pointer to function entrypoint.
771
772 __ movq(RAX, wrapper_address);
773 __ CallCFunction(RAX);
774
775 // Mark that the thread is executing Dart code.
776 __ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartTagId));
777
778 // Mark that the thread has not exited generated Dart code.
780 Immediate(0));
781
782 // Reset exit frame information in Isolate's mutator thread structure.
784 Immediate(0));
785
786 // Restore the global object pool after returning from runtime (old space is
787 // moving, so the GOP could have been relocated).
788 if (FLAG_precompiled_mode) {
790 }
791 });
792
793 __ LeaveStubFrame();
794 __ ret();
795}
796
797void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
798 GenerateCallNativeWithWrapperStub(
799 assembler,
800 Address(THR,
802}
803
804void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
805 GenerateCallNativeWithWrapperStub(
806 assembler,
807 Address(THR,
809}
810
811// Input parameters:
812// RSP : points to return address.
813// RSP + 8 : address of return value.
814// RAX : address of first argument in argument array.
815// RBX : address of the native function to call.
816// R10 : argc_tag including number of arguments and function kind.
817void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
818 GenerateCallNativeWithWrapperStub(
819 assembler,
820 Address(THR,
822}
823
824// Input parameters:
825// ARGS_DESC_REG: arguments descriptor array.
826void StubCodeCompiler::GenerateCallStaticFunctionStub() {
827 __ EnterStubFrame();
828 __ pushq(ARGS_DESC_REG); // Preserve arguments descriptor array.
829 // Setup space on stack for return value.
830 __ pushq(Immediate(0));
831 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
832 __ popq(CODE_REG); // Get Code object result.
833 __ popq(ARGS_DESC_REG); // Restore arguments descriptor array.
834 // Remove the stub frame as we are about to jump to the dart function.
835 __ LeaveStubFrame();
836
837 __ movq(RBX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
838 __ jmp(RBX);
839}
840
841// Called from a static call only when an invalid code has been entered
842// (invalid because its function was optimized or deoptimized).
843// ARGS_DESC_REG: arguments descriptor array.
844void StubCodeCompiler::GenerateFixCallersTargetStub() {
845 Label monomorphic;
846 __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
847
848 // This was a static call.
849 // Load code pointer to this stub from the thread:
850 // The one that is passed in, is not correct - it points to the code object
851 // that needs to be replaced.
852 __ movq(CODE_REG,
854 __ EnterStubFrame();
855 __ pushq(ARGS_DESC_REG); // Preserve arguments descriptor array.
856 // Setup space on stack for return value.
857 __ pushq(Immediate(0));
858 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
859 __ popq(CODE_REG); // Get Code object.
860 __ popq(ARGS_DESC_REG); // Restore arguments descriptor array.
861 __ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
862 __ LeaveStubFrame();
863 __ jmp(RAX);
864 __ int3();
865
866 __ Bind(&monomorphic);
867 // This was a switchable call.
868 // Load code pointer to this stub from the thread:
869 // The one that is passed in, is not correct - it points to the code object
870 // that needs to be replaced.
871 __ movq(CODE_REG,
873 __ EnterStubFrame();
874 __ pushq(Immediate(0)); // Result slot.
875 __ pushq(RDX); // Preserve receiver.
876 __ pushq(RBX); // Old cache value (also 2nd return value).
877 __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 2);
878 __ popq(RBX); // Get target cache object.
879 __ popq(RDX); // Restore receiver.
880 __ popq(CODE_REG); // Get target Code object.
883 __ LeaveStubFrame();
884 __ jmp(RAX);
885 __ int3();
886}
887
888// Called from object allocate instruction when the allocation stub has been
889// disabled.
890void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
891 // Load code pointer to this stub from the thread:
892 // The one that is passed in, is not correct - it points to the code object
893 // that needs to be replaced.
894 __ movq(CODE_REG,
896 __ EnterStubFrame();
897 // Setup space on stack for return value.
898 __ pushq(Immediate(0));
899 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
900 __ popq(CODE_REG); // Get Code object.
901 __ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
902 __ LeaveStubFrame();
903 __ jmp(RAX);
904 __ int3();
905}
906
907// Called from object allocate instruction when the allocation stub for a
908// generic class has been disabled.
909void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
910 // Load code pointer to this stub from the thread:
911 // The one that is passed in, is not correct - it points to the code object
912 // that needs to be replaced.
913 __ movq(CODE_REG,
915 __ EnterStubFrame();
916 // Setup space on stack for return value.
918 __ pushq(Immediate(0));
919 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
920 __ popq(CODE_REG); // Get Code object.
922 __ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
923 __ LeaveStubFrame();
924 __ jmp(RAX);
925 __ int3();
926}
927
928// Input parameters:
929// R10: smi-tagged argument count, may be zero.
930// RBP[target::frame_layout.param_end_from_fp + 1]: last argument.
931static void PushArrayOfArguments(Assembler* assembler) {
932 __ LoadObject(R12, NullObject());
933 // Allocate array to store arguments of caller.
934 __ movq(RBX, R12); // Null element type for raw Array.
936 __ SmiUntag(R10);
937 // RAX: newly allocated array.
938 // R10: length of the array (was preserved by the stub).
939 __ pushq(RAX); // Array is in RAX and on top of stack.
940 __ leaq(R12,
941 Address(RBP, R10, TIMES_8,
942 target::frame_layout.param_end_from_fp * target::kWordSize));
943 __ leaq(RBX, FieldAddress(RAX, target::Array::data_offset()));
944 // R12: address of first argument on stack.
945 // RBX: address of first argument in array.
946 Label loop, loop_condition;
947#if defined(DEBUG)
948 static auto const kJumpLength = Assembler::kFarJump;
949#else
950 static auto const kJumpLength = Assembler::kNearJump;
951#endif // DEBUG
952 __ jmp(&loop_condition, kJumpLength);
953 __ Bind(&loop);
954 __ movq(RDI, Address(R12, 0));
955 // Generational barrier is needed, array is not necessarily in new space.
956 __ StoreCompressedIntoObject(RAX, Address(RBX, 0), RDI);
957 __ addq(RBX, Immediate(target::kCompressedWordSize));
958 __ subq(R12, Immediate(target::kWordSize));
959 __ Bind(&loop_condition);
960 __ decq(R10);
962}
963
964// Used by eager and lazy deoptimization. Preserve result in RAX if necessary.
965// This stub translates optimized frame into unoptimized frame. The optimized
966// frame can contain values in registers and on stack, the unoptimized
967// frame contains all values on stack.
968// Deoptimization occurs in following steps:
969// - Push all registers that can contain values.
970// - Call C routine to copy the stack and saved registers into temporary buffer.
971// - Adjust caller's frame to correct unoptimized frame size.
972// - Fill the unoptimized frame.
973// - Materialize objects that require allocation (e.g. Double instances).
974// GC can occur only after frame is fully rewritten.
975// Stack after EnterDartFrame(0, PP, kNoRegister) below:
976// +------------------+
977// | Saved PP | <- PP
978// +------------------+
979// | PC marker | <- TOS
980// +------------------+
981// | Saved FP | <- FP of stub
982// +------------------+
983// | return-address | (deoptimization point)
984// +------------------+
985// | Saved CODE_REG |
986// +------------------+
987// | ... | <- SP of optimized frame
988//
989// Parts of the code cannot GC, part of the code can GC.
990static void GenerateDeoptimizationSequence(Assembler* assembler,
991 DeoptStubKind kind) {
992 // DeoptimizeCopyFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
993 // is no need to set the correct PC marker or load PP, since they get patched.
994 __ EnterStubFrame();
995
996 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
997 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
998 const intptr_t saved_result_slot_from_fp =
1001 const intptr_t saved_exception_slot_from_fp =
1004 const intptr_t saved_stacktrace_slot_from_fp =
1007 // Result in RAX is preserved as part of pushing all registers below.
1008
1009 // Push registers in their enumeration order: lowest register number at
1010 // lowest address.
1011 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; i--) {
1012 if (i == CODE_REG) {
1013 // Save the original value of CODE_REG pushed before invoking this stub
1014 // instead of the value used to call this stub.
1015 __ pushq(Address(RBP, 2 * target::kWordSize));
1016 } else {
1017 __ pushq(static_cast<Register>(i));
1018 }
1019 }
1020 __ subq(RSP, Immediate(kNumberOfXmmRegisters * kFpuRegisterSize));
1021 intptr_t offset = 0;
1022 for (intptr_t reg_idx = 0; reg_idx < kNumberOfXmmRegisters; ++reg_idx) {
1023 XmmRegister xmm_reg = static_cast<XmmRegister>(reg_idx);
1024 __ movups(Address(RSP, offset), xmm_reg);
1026 }
1027
1028 {
1029 // Pass address of saved registers block.
1031 LeafRuntimeScope rt(assembler,
1032 /*frame_size=*/0,
1033 /*preserve_registers=*/false);
1034 bool is_lazy =
1035 (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow);
1036 __ movq(CallingConventions::kArg2Reg, Immediate(is_lazy ? 1 : 0));
1037 rt.Call(kDeoptimizeCopyFrameRuntimeEntry, 2);
1038 // Result (RAX) is stack-size (FP - SP) in bytes.
1039 }
1040
1041 if (kind == kLazyDeoptFromReturn) {
1042 // Restore result into RBX temporarily.
1043 __ movq(RBX, Address(RBP, saved_result_slot_from_fp * target::kWordSize));
1044 } else if (kind == kLazyDeoptFromThrow) {
1045 // Restore result into RBX temporarily.
1046 __ movq(RBX,
1047 Address(RBP, saved_exception_slot_from_fp * target::kWordSize));
1048 __ movq(RDX,
1049 Address(RBP, saved_stacktrace_slot_from_fp * target::kWordSize));
1050 }
1051
1052 // There is a Dart Frame on the stack. We must restore PP and leave frame.
1053 __ RestoreCodePointer();
1054 __ LeaveStubFrame();
1055
1056 __ popq(RCX); // Preserve return address.
1057 __ movq(RSP, RBP); // Discard optimized frame.
1058 __ subq(RSP, RAX); // Reserve space for deoptimized frame.
1059 __ pushq(RCX); // Restore return address.
1060
1061 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there
1062 // is no need to set the correct PC marker or load PP, since they get patched.
1063 __ EnterStubFrame();
1064
1065 if (kind == kLazyDeoptFromReturn) {
1066 __ pushq(RBX); // Preserve result as first local.
1067 } else if (kind == kLazyDeoptFromThrow) {
1068 __ pushq(RBX); // Preserve exception as first local.
1069 __ pushq(RDX); // Preserve stacktrace as second local.
1070 }
1071 {
1072 __ movq(CallingConventions::kArg1Reg, RBP); // Pass last FP as a parameter.
1073 LeafRuntimeScope rt(assembler,
1074 /*frame_size=*/0,
1075 /*preserve_registers=*/false);
1076 rt.Call(kDeoptimizeFillFrameRuntimeEntry, 1);
1077 }
1078 if (kind == kLazyDeoptFromReturn) {
1079 // Restore result into RBX.
1080 __ movq(RBX, Address(RBP, target::frame_layout.first_local_from_fp *
1082 } else if (kind == kLazyDeoptFromThrow) {
1083 // Restore exception into RBX.
1084 __ movq(RBX, Address(RBP, target::frame_layout.first_local_from_fp *
1086 // Restore stacktrace into RDX.
1087 __ movq(RDX, Address(RBP, (target::frame_layout.first_local_from_fp - 1) *
1089 }
1090 // Code above cannot cause GC.
1091 // There is a Dart Frame on the stack. We must restore PP and leave frame.
1092 __ RestoreCodePointer();
1093 __ LeaveStubFrame();
1094
1095 // Frame is fully rewritten at this point and it is safe to perform a GC.
1096 // Materialize any objects that were deferred by FillFrame because they
1097 // require allocation.
1098 // Enter stub frame with loading PP. The caller's PP is not materialized yet.
1099 __ EnterStubFrame();
1100 if (kind == kLazyDeoptFromReturn) {
1101 __ pushq(RBX); // Preserve result, it will be GC-d here.
1102 } else if (kind == kLazyDeoptFromThrow) {
1103 // Preserve CODE_REG for one more runtime call.
1104 __ pushq(CODE_REG);
1105 __ pushq(RBX); // Preserve exception.
1106 __ pushq(RDX); // Preserve stacktrace.
1107 }
1108 __ pushq(Immediate(target::ToRawSmi(0))); // Space for the result.
1109 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
1110 // Result tells stub how many bytes to remove from the expression stack
1111 // of the bottom-most frame. They were used as materialization arguments.
1112 __ popq(RBX);
1113 __ SmiUntag(RBX);
1114 if (kind == kLazyDeoptFromReturn) {
1115 __ popq(RAX); // Restore result.
1116 } else if (kind == kLazyDeoptFromThrow) {
1117 __ popq(RDX); // Restore stacktrace.
1118 __ popq(RAX); // Restore exception.
1119 __ popq(CODE_REG);
1120 }
1121 __ LeaveStubFrame();
1122
1123 __ popq(RCX); // Pop return address.
1124 __ addq(RSP, RBX); // Remove materialization arguments.
1125 __ pushq(RCX); // Push return address.
1126 // The caller is responsible for emitting the return instruction.
1127
1128 if (kind == kLazyDeoptFromThrow) {
1129 // Unoptimized frame is now ready to accept the exception. Rethrow it to
1130 // find the right handler.
1131 __ EnterStubFrame();
1132 __ pushq(Immediate(target::ToRawSmi(0))); // Space for the result.
1133 __ pushq(RAX); // Exception
1134 __ pushq(RDX); // Stacktrace
1135 __ pushq(Immediate(target::ToRawSmi(1))); // Bypass debugger.
1136 __ CallRuntime(kReThrowRuntimeEntry, 3);
1137 __ LeaveStubFrame();
1138 }
1139}
1140
1141// RAX: result, must be preserved
1142void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
1143 // Push zap value instead of CODE_REG for lazy deopt.
1144 __ pushq(Immediate(kZapCodeReg));
1145 // Return address for "call" to deopt stub.
1146 __ pushq(Immediate(kZapReturnAddress));
1147 __ movq(CODE_REG,
1149 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
1150 __ ret();
1151}
1152
1153// RAX: exception, must be preserved
1154// RDX: stacktrace, must be preserved
1155void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
1156 // Push zap value instead of CODE_REG for lazy deopt.
1157 __ pushq(Immediate(kZapCodeReg));
1158 // Return address for "call" to deopt stub.
1159 __ pushq(Immediate(kZapReturnAddress));
1160 __ movq(CODE_REG,
1162 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
1163 __ ret();
1164}
1165
1166void StubCodeCompiler::GenerateDeoptimizeStub() {
1167 __ popq(TMP);
1168 __ pushq(CODE_REG);
1169 __ pushq(TMP);
1171 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
1172 __ ret();
1173}
1174
1175// Input:
1176// IC_DATA_REG - icdata/megamorphic_cache
1177// RDI - arguments descriptor size
1178static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler,
1179 Register receiver_reg) {
1180 __ pushq(Immediate(0)); // Setup space on stack for result.
1181 __ pushq(receiver_reg); // Receiver.
1182 __ pushq(IC_DATA_REG); // ICData/MegamorphicCache.
1183 __ pushq(ARGS_DESC_REG); // Arguments descriptor array.
1184
1185 // Adjust arguments count.
1186 __ OBJ(cmp)(FieldAddress(ARGS_DESC_REG,
1188 Immediate(0));
1189 __ OBJ(mov)(R10, RDI);
1190 Label args_count_ok;
1191 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
1192 // Include the type arguments.
1193 __ OBJ(add)(R10, Immediate(target::ToRawSmi(1)));
1194 __ Bind(&args_count_ok);
1195
1196 // R10: Smi-tagged arguments array length.
1197 PushArrayOfArguments(assembler);
1198 const intptr_t kNumArgs = 4;
1199 __ CallRuntime(kNoSuchMethodFromCallStubRuntimeEntry, kNumArgs);
1200 __ Drop(4);
1201 __ popq(RAX); // Return value.
1202 __ LeaveStubFrame();
1203 __ ret();
1204}
1205
1206// Input:
1207// IC_DATA_REG - icdata/megamorphic_cache
1208// ARGS_DESC_REG - argument descriptor
1209static void GenerateDispatcherCode(Assembler* assembler,
1210 Label* call_target_function) {
1211 __ Comment("NoSuchMethodDispatch");
1212 // When lazily generated invocation dispatchers are disabled, the
1213 // miss-handler may return null.
1214 __ CompareObject(RAX, NullObject());
1215 __ j(NOT_EQUAL, call_target_function);
1216
1217 __ EnterStubFrame();
1218 // Load the receiver.
1219 __ OBJ(mov)(RDI, FieldAddress(ARGS_DESC_REG,
1221 __ movq(RAX,
1222 Address(RBP, RDI, TIMES_HALF_WORD_SIZE,
1223 target::frame_layout.param_end_from_fp * target::kWordSize));
1224
1225 GenerateNoSuchMethodDispatcherBody(assembler, /*receiver_reg=*/RAX);
1226}
1227
1228// Input:
1229// IC_DATA_REG - icdata/megamorphic_cache
1230// RDX - receiver
1231void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
1232 __ EnterStubFrame();
1233
1234 __ movq(ARGS_DESC_REG,
1235 FieldAddress(IC_DATA_REG,
1237 __ OBJ(mov)(RDI, FieldAddress(ARGS_DESC_REG,
1239
1240 GenerateNoSuchMethodDispatcherBody(assembler, /*receiver_reg=*/RDX);
1241}
1242
1243// Called for inline allocation of arrays.
1244// Input registers (preserved):
1245// AllocateArrayABI::kLengthReg: array length as Smi.
1246// AllocateArrayABI::kTypeArgumentsReg: type arguments of array.
1247// Output registers:
1248// AllocateArrayABI::kResultReg: newly allocated array.
1249// Clobbered:
1250// RCX, RDI, R12
1251void StubCodeCompiler::GenerateAllocateArrayStub() {
1252 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1253 Label slow_case;
1254 // Compute the size to be allocated, it is based on the array length
1255 // and is computed as:
1256 // RoundedAllocationSize(
1257 // (array_length * target::kCompressedWordSize) +
1258 // target::Array::header_size()).
1259 __ movq(RDI, AllocateArrayABI::kLengthReg); // Array Length.
1260 // Check that length is Smi.
1261 __ testq(RDI, Immediate(kSmiTagMask));
1262 __ j(NOT_ZERO, &slow_case);
1263
1264 // Check length >= 0 && length <= kMaxNewSpaceElements
1265 const Immediate& max_len =
1267 __ OBJ(cmp)(RDI, max_len);
1268 __ j(ABOVE, &slow_case);
1269
1270 // Check for allocation tracing.
1271 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, &slow_case));
1272
1273 const intptr_t fixed_size_plus_alignment_padding =
1276 // RDI is a Smi.
1278 fixed_size_plus_alignment_padding));
1279 ASSERT(kSmiTagShift == 1);
1281
1282 const intptr_t cid = kArrayCid;
1284 Address(THR, target::Thread::top_offset()));
1285
1286 // RDI: allocation size.
1288 __ addq(RCX, RDI);
1289 __ j(CARRY, &slow_case);
1290
1291 // Check if the allocation fits into the remaining space.
1292 // AllocateArrayABI::kResultReg: potential new object start.
1293 // RCX: potential next object start.
1294 // RDI: allocation size.
1295 __ cmpq(RCX, Address(THR, target::Thread::end_offset()));
1296 __ j(ABOVE_EQUAL, &slow_case);
1297 __ CheckAllocationCanary(AllocateArrayABI::kResultReg);
1298
1299 // Successfully allocated the object(s), now update top to point to
1300 // next object start and initialize the object.
1301 __ movq(Address(THR, target::Thread::top_offset()), RCX);
1303
1304 // Initialize the tags.
1305 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
1306 // RDI: allocation size.
1307 {
1308 Label size_tag_overflow, done;
1310 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
1314
1315 __ Bind(&size_tag_overflow);
1316 __ LoadImmediate(RDI, Immediate(0));
1317 __ Bind(&done);
1318
1319 // Get the class index and insert it into the tags.
1321 __ orq(RDI, Immediate(tags));
1322 __ movq(FieldAddress(RAX, target::Array::tags_offset()), RDI); // Tags.
1323 }
1324
1325 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
1326 // Store the type argument field.
1327 // No generational barrier needed, since we store into a new object.
1328 __ StoreCompressedIntoObjectNoBarrier(
1330 FieldAddress(AllocateArrayABI::kResultReg,
1333
1334 // Set the length field.
1335 __ StoreCompressedIntoObjectNoBarrier(
1337 FieldAddress(AllocateArrayABI::kResultReg,
1340
1341 // Initialize all array elements to raw_null.
1342 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
1343 // RCX: new object end address.
1344 // RDI: iterator which initially points to the start of the variable
1345 // data area to be initialized.
1346 __ LoadObject(R12, NullObject());
1347 __ leaq(RDI, FieldAddress(AllocateArrayABI::kResultReg,
1349 Label loop;
1350 __ Bind(&loop);
1351 for (intptr_t offset = 0; offset < target::kObjectAlignment;
1353 // No generational barrier needed, since we are storing null.
1354 __ StoreCompressedIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
1355 Address(RDI, offset), R12);
1356 }
1357 // Safe to only check every kObjectAlignment bytes instead of each word.
1359 __ addq(RDI, Immediate(target::kObjectAlignment));
1360 __ cmpq(RDI, RCX);
1361 __ j(UNSIGNED_LESS, &loop);
1362 __ WriteAllocationCanary(RCX);
1363 __ ret();
1364
1365 // Unable to allocate the array using the fast inline code, just call
1366 // into the runtime.
1367 __ Bind(&slow_case);
1368 }
1369 // Create a stub frame as we are pushing some objects on the stack before
1370 // calling into the runtime.
1371 __ EnterStubFrame();
1372 __ pushq(Immediate(0)); // Space for return value.
1373 __ pushq(AllocateArrayABI::kLengthReg); // Array length as Smi.
1374 __ pushq(AllocateArrayABI::kTypeArgumentsReg); // Element type.
1375 __ CallRuntime(kAllocateArrayRuntimeEntry, 2);
1376
1377 // Write-barrier elimination might be enabled for this array (depending on the
1378 // array length). To be sure we will check if the allocated object is in old
1379 // space and if so call a leaf runtime to add it to the remembered set.
1382
1383 __ popq(AllocateArrayABI::kTypeArgumentsReg); // Pop element type argument.
1384 __ popq(AllocateArrayABI::kLengthReg); // Pop array length argument.
1385 __ popq(AllocateArrayABI::kResultReg); // Pop allocated object.
1386 __ LeaveStubFrame();
1387 __ ret();
1388}
1389
1390void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
1391 // For test purpose call allocation stub without inline allocation attempt.
1392 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1393 Label slow_case;
1394 __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
1396 __ Ret();
1397
1398 __ Bind(&slow_case);
1399 }
1402 GenerateSharedStub(/*save_fpu_registers=*/true, &kAllocateMintRuntimeEntry,
1404 /*allow_return=*/true,
1405 /*store_runtime_result_in_result_register=*/true);
1406}
1407
1408void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
1409 // For test purpose call allocation stub without inline allocation attempt.
1410 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1411 Label slow_case;
1412 __ TryAllocate(compiler::MintClass(), &slow_case, Assembler::kNearJump,
1414 __ Ret();
1415
1416 __ Bind(&slow_case);
1417 }
1420 GenerateSharedStub(
1421 /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
1423 /*allow_return=*/true,
1424 /*store_runtime_result_in_result_register=*/true);
1425}
1426
1427static const RegisterSet kCalleeSavedRegisterSet(
1430
1431// Called when invoking Dart code from C++ (VM code).
1432// Input parameters:
1433// RSP : points to return address.
1434// RDI : target code or entry point (in bare instructions mode).
1435// RSI : arguments descriptor array.
1436// RDX : arguments array.
1437// RCX : current thread.
1438void StubCodeCompiler::GenerateInvokeDartCodeStub() {
1439 __ EnterFrame(0);
1440
1441 const Register kTargetReg = CallingConventions::kArg1Reg;
1442 const Register kArgDescReg = CallingConventions::kArg2Reg;
1443 const Register kArgsReg = CallingConventions::kArg3Reg;
1444 const Register kThreadReg = CallingConventions::kArg4Reg;
1445
1446 // Push code object to PC marker slot.
1447 __ pushq(Address(kThreadReg, target::Thread::invoke_dart_code_stub_offset()));
1448
1449 // At this point, the stack looks like:
1450 // | stub code object
1451 // | saved RBP | <-- RBP
1452 // | saved PC (return to DartEntry::InvokeFunction) |
1453
1454 const intptr_t kInitialOffset = 2;
1455 // Save arguments descriptor array, later replaced by Smi argument count.
1456 const intptr_t kArgumentsDescOffset = -(kInitialOffset)*target::kWordSize;
1457 __ pushq(kArgDescReg);
1458
1459 // Save C++ ABI callee-saved registers.
1460 __ PushRegisters(kCalleeSavedRegisterSet);
1461
1462 // If any additional (or fewer) values are pushed, the offsets in
1463 // target::frame_layout.exit_link_slot_from_entry_fp will need to be changed.
1464
1465 // Set up THR, which caches the current thread in Dart code.
1466 if (THR != kThreadReg) {
1467 __ movq(THR, kThreadReg);
1468 }
1469
1470#if defined(USING_SHADOW_CALL_STACK)
1471#error Unimplemented
1472#endif
1473
1474 // Save the current VMTag on the stack.
1476 __ pushq(RAX);
1477
1478 // Save top resource and top exit frame info. Use RAX as a temporary register.
1479 // StackFrameIterator reads the top exit frame info saved in this frame.
1480 __ movq(RAX, Address(THR, target::Thread::top_resource_offset()));
1481 __ pushq(RAX);
1482 __ movq(Address(THR, target::Thread::top_resource_offset()), Immediate(0));
1483
1485 __ pushq(RAX);
1487 Immediate(0));
1488
1490 __ pushq(RAX);
1491
1492 // The constant target::frame_layout.exit_link_slot_from_entry_fp must be kept
1493 // in sync with the code above.
1494 __ EmitEntryFrameVerification();
1495
1497 Immediate(0));
1498
1499 // Mark that the thread is executing Dart code. Do this after initializing the
1500 // exit link for the profiler.
1501 __ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartTagId));
1502
1503 // Load arguments descriptor array into R10, which is passed to Dart code.
1504 __ movq(R10, kArgDescReg);
1505
1506 // Push arguments. At this point we only need to preserve kTargetReg.
1507 ASSERT(kTargetReg != RDX);
1508
1509 // Load number of arguments into RBX and adjust count for type arguments.
1510 __ OBJ(mov)(RBX,
1512 __ OBJ(cmp)(
1514 Immediate(0));
1515 Label args_count_ok;
1516 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
1517 __ addq(RBX, Immediate(target::ToRawSmi(1))); // Include the type arguments.
1518 __ Bind(&args_count_ok);
1519 // Save number of arguments as Smi on stack, replacing saved ArgumentsDesc.
1520 __ movq(Address(RBP, kArgumentsDescOffset), RBX);
1521 __ SmiUntag(RBX);
1522
1523 // Compute address of 'arguments array' data area into RDX.
1524 __ leaq(RDX, FieldAddress(kArgsReg, target::Array::data_offset()));
1525
1526 // Set up arguments for the Dart call.
1527 Label push_arguments;
1528 Label done_push_arguments;
1529 __ j(ZERO, &done_push_arguments, Assembler::kNearJump);
1530 __ LoadImmediate(RAX, Immediate(0));
1531 __ Bind(&push_arguments);
1532#if defined(DART_COMPRESSED_POINTERS)
1533 __ LoadCompressed(TMP, Address(RDX, RAX, TIMES_COMPRESSED_WORD_SIZE, 0));
1534 __ pushq(TMP);
1535#else
1536 __ pushq(Address(RDX, RAX, TIMES_8, 0));
1537#endif
1538 __ incq(RAX);
1539 __ cmpq(RAX, RBX);
1540 __ j(LESS, &push_arguments, Assembler::kNearJump);
1541 __ Bind(&done_push_arguments);
1542
1543 // Call the Dart code entrypoint.
1544 if (FLAG_precompiled_mode) {
1546 __ xorq(CODE_REG, CODE_REG); // GC-safe value into CODE_REG.
1547 } else {
1548 __ xorq(PP, PP); // GC-safe value into PP.
1549 __ movq(CODE_REG, kTargetReg);
1550 __ movq(kTargetReg,
1552 }
1553 __ call(kTargetReg); // R10 is the arguments descriptor array.
1554
1555 // Read the saved number of passed arguments as Smi.
1556 __ movq(RDX, Address(RBP, kArgumentsDescOffset));
1557
1558 // Get rid of arguments pushed on the stack.
1559 __ leaq(RSP, Address(RSP, RDX, TIMES_4, 0)); // RDX is a Smi.
1560
1561 // Restore the saved top exit frame info and top resource back into the
1562 // Isolate structure.
1565 __ popq(Address(THR, target::Thread::top_resource_offset()));
1566
1567 // Restore the current VMTag from the stack.
1569
1570#if defined(USING_SHADOW_CALL_STACK)
1571#error Unimplemented
1572#endif
1573
1574 // Restore C++ ABI callee-saved registers.
1575 __ PopRegisters(kCalleeSavedRegisterSet);
1576 __ set_constant_pool_allowed(false);
1577
1578 // Restore the frame pointer.
1579 __ LeaveFrame();
1580
1581 __ ret();
1582}
1583
1584// Helper to generate space allocation of context stub.
1585// This does not initialize the fields of the context.
1586// Input:
1587// R10: number of context variables.
1588// Output:
1589// RAX: new, uninitialized allocated Context object.
1590// Clobbered:
1591// R13
1592static void GenerateAllocateContextSpaceStub(Assembler* assembler,
1593 Label* slow_case) {
1594 // First compute the rounded instance size.
1595 // R10: number of context variables.
1596 intptr_t fixed_size_plus_alignment_padding =
1599 __ leaq(R13, Address(R10, TIMES_COMPRESSED_WORD_SIZE,
1600 fixed_size_plus_alignment_padding));
1602
1603 // Check for allocation tracing.
1604 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, slow_case));
1605
1606 // Now allocate the object.
1607 // R10: number of context variables.
1608 __ movq(RAX, Address(THR, target::Thread::top_offset()));
1609 __ addq(R13, RAX);
1610 // Check if the allocation fits into the remaining space.
1611 // RAX: potential new object.
1612 // R13: potential next object start.
1613 // R10: number of context variables.
1614 __ cmpq(R13, Address(THR, target::Thread::end_offset()));
1615 __ j(ABOVE_EQUAL, slow_case);
1616 __ CheckAllocationCanary(RAX);
1617
1618 // Successfully allocated the object, now update top to point to
1619 // next object start and initialize the object.
1620 // RAX: new object.
1621 // R13: next object start.
1622 // R10: number of context variables.
1623 __ movq(Address(THR, target::Thread::top_offset()), R13);
1624 // R13: Size of allocation in bytes.
1625 __ subq(R13, RAX);
1626 __ addq(RAX, Immediate(kHeapObjectTag));
1627 // Generate isolate-independent code to allow sharing between isolates.
1628
1629 // Calculate the size tag.
1630 // RAX: new object.
1631 // R10: number of context variables.
1632 {
1633 Label size_tag_overflow, done;
1634 __ leaq(R13, Address(R10, TIMES_COMPRESSED_WORD_SIZE,
1635 fixed_size_plus_alignment_padding));
1638 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
1641 __ jmp(&done);
1642
1643 __ Bind(&size_tag_overflow);
1644 // Set overflow size tag value.
1645 __ LoadImmediate(R13, Immediate(0));
1646
1647 __ Bind(&done);
1648 // RAX: new object.
1649 // R10: number of context variables.
1650 // R13: size and bit tags.
1651 uword tags = target::MakeTagWordForNewSpaceObject(kContextCid, 0);
1652 __ orq(R13, Immediate(tags));
1653 __ movq(FieldAddress(RAX, target::Object::tags_offset()), R13); // Tags.
1654 }
1655
1656 // Setup up number of context variables field.
1657 // RAX: new object.
1658 // R10: number of context variables as integer value (not object).
1659 __ movl(FieldAddress(RAX, target::Context::num_variables_offset()), R10);
1660}
1661
1662// Called for inline allocation of contexts.
1663// Input:
1664// R10: number of context variables.
1665// Output:
1666// RAX: new allocated Context object.
1667// Clobbered:
1668// R9, R13
1669void StubCodeCompiler::GenerateAllocateContextStub() {
1670 __ LoadObject(R9, NullObject());
1671 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1672 Label slow_case;
1673
1674 GenerateAllocateContextSpaceStub(assembler, &slow_case);
1675
1676 // Setup the parent field.
1677 // RAX: new object.
1678 // R9: Parent object, initialized to null.
1679 // No generational barrier needed, since we are storing null.
1680 __ StoreCompressedIntoObjectNoBarrier(
1681 RAX, FieldAddress(RAX, target::Context::parent_offset()), R9);
1682
1683 // Initialize the context variables.
1684 // RAX: new object.
1685 // R10: number of context variables.
1686 {
1687 Label loop, entry;
1688 __ leaq(R13, FieldAddress(RAX, target::Context::variable_offset(0)));
1689#if defined(DEBUG)
1690 static auto const kJumpLength = Assembler::kFarJump;
1691#else
1692 static auto const kJumpLength = Assembler::kNearJump;
1693#endif // DEBUG
1694 __ jmp(&entry, kJumpLength);
1695 __ Bind(&loop);
1696 __ decq(R10);
1697 // No generational barrier needed, since we are storing null.
1698 __ StoreCompressedIntoObjectNoBarrier(
1699 RAX, Address(R13, R10, TIMES_COMPRESSED_WORD_SIZE, 0), R9);
1700 __ Bind(&entry);
1701 __ cmpq(R10, Immediate(0));
1703 }
1704
1705 // Done allocating and initializing the context.
1706 // RAX: new object.
1707 __ ret();
1708
1709 __ Bind(&slow_case);
1710 }
1711 // Create a stub frame.
1712 __ EnterStubFrame();
1713 __ pushq(R9); // Setup space on stack for the return value.
1714 __ SmiTag(R10);
1715 __ pushq(R10); // Push number of context variables.
1716 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context.
1717 __ popq(RAX); // Pop number of context variables argument.
1718 __ popq(RAX); // Pop the new context object.
1719 // Write-barrier elimination might be enabled for this context (depending on
1720 // the size). To be sure we will check if the allocated object is in old
1721 // space and if so call a leaf runtime to add it to the remembered set.
1723
1724 // RAX: new object
1725 // Restore the frame pointer.
1726 __ LeaveStubFrame();
1727
1728 __ ret();
1729}
1730
1731// Called for inline clone of contexts.
1732// Input:
1733// R9: context to clone.
1734// Output:
1735// RAX: new allocated Context object.
1736// Clobbered:
1737// R10, R13
1738void StubCodeCompiler::GenerateCloneContextStub() {
1739 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1740 Label slow_case;
1741
1742 // Load num. variable (int32_t) in the existing context.
1743 __ movsxd(R10, FieldAddress(R9, target::Context::num_variables_offset()));
1744
1745 // Allocate new context of same size.
1746 GenerateAllocateContextSpaceStub(assembler, &slow_case);
1747
1748 // Load parent in the existing context.
1749 __ LoadCompressed(R13, FieldAddress(R9, target::Context::parent_offset()));
1750 // Setup the parent field.
1751 // RAX: new object.
1752 // R9: Old parent object.
1753 __ StoreCompressedIntoObjectNoBarrier(
1754 RAX, FieldAddress(RAX, target::Context::parent_offset()), R13);
1755
1756 // Clone the context variables.
1757 // RAX: new context clone.
1758 // R10: number of context variables.
1759 {
1760 Label loop, entry;
1761 __ jmp(&entry, Assembler::kNearJump);
1762 __ Bind(&loop);
1763 __ decq(R10);
1764 __ LoadCompressed(R13, FieldAddress(R9, R10, TIMES_COMPRESSED_WORD_SIZE,
1766 __ StoreCompressedIntoObjectNoBarrier(
1767 RAX,
1768 FieldAddress(RAX, R10, TIMES_COMPRESSED_WORD_SIZE,
1770 R13);
1771 __ Bind(&entry);
1772 __ cmpq(R10, Immediate(0));
1774 }
1775
1776 // Done allocating and initializing the context.
1777 // RAX: new object.
1778 __ ret();
1779
1780 __ Bind(&slow_case);
1781 }
1782
1783 // Create a stub frame.
1784 __ EnterStubFrame();
1785
1786 __ PushObject(NullObject()); // Make space on stack for the return value.
1787 __ pushq(R9); // Push context.
1788 __ CallRuntime(kCloneContextRuntimeEntry, 1); // Clone context.
1789 __ popq(RAX); // Pop context argument.
1790 __ popq(RAX); // Pop the new context object.
1791
1792 // Write-barrier elimination might be enabled for this context (depending on
1793 // the size). To be sure we will check if the allocated object is in old
1794 // space and if so call a leaf runtime to add it to the remembered set.
1796
1797 // RAX: new object
1798 // Restore the frame pointer.
1799 __ LeaveStubFrame();
1800
1801 __ ret();
1802}
1803
1804void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
1805 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
1806 if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
1807
1808 Register reg = static_cast<Register>(i);
1809 intptr_t start = __ CodeSize();
1811 __ movq(kWriteBarrierObjectReg, reg);
1814 __ ret();
1815 intptr_t end = __ CodeSize();
1816
1818 }
1819}
1820
1821// Helper stub to implement Assembler::StoreIntoObject/Array.
1822// Input parameters:
1823// RDX: Object (old)
1824// RAX: Value (old or new)
1825// R13: Slot
1826// If RAX is new, add RDX to the store buffer. Otherwise RAX is old, mark RAX
1827// and add it to the mark list.
1831static void GenerateWriteBarrierStubHelper(Assembler* assembler, bool cards) {
1832 Label skip_marking;
1833 __ movq(TMP, FieldAddress(RAX, target::Object::tags_offset()));
1836 __ j(ZERO, &skip_marking);
1837
1838 {
1839 // Atomically clear kNotMarkedBit.
1840 Label retry, is_new, done;
1841 __ pushq(RAX); // Spill.
1842 __ pushq(RCX); // Spill.
1843 __ movq(TMP, RAX); // RAX is fixed implicit operand of CAS.
1844 __ movq(RAX, FieldAddress(TMP, target::Object::tags_offset()));
1845
1846 __ Bind(&retry);
1847 __ movq(RCX, RAX);
1848 __ testq(RCX, Immediate(1 << target::UntaggedObject::kNotMarkedBit));
1849 __ j(ZERO, &done); // Marked by another thread.
1850
1851 __ andq(RCX, Immediate(~(1 << target::UntaggedObject::kNotMarkedBit)));
1852 // Cmpxchgq: compare value = implicit operand RAX, new value = RCX.
1853 // On failure, RAX is updated with the current value.
1854 __ LockCmpxchgq(FieldAddress(TMP, target::Object::tags_offset()), RCX);
1855 __ j(NOT_EQUAL, &retry, Assembler::kNearJump);
1856
1857 __ testq(TMP,
1859 __ j(NOT_ZERO, &is_new);
1860
1861 auto mark_stack_push = [&](intptr_t offset, const RuntimeEntry& entry) {
1862 __ movq(RAX, Address(THR, offset));
1864 __ movq(Address(RAX, RCX, TIMES_8,
1866 TMP);
1867 __ incq(RCX);
1869 __ cmpl(RCX, Immediate(target::MarkingStackBlock::kSize));
1870 __ j(NOT_EQUAL, &done);
1871
1872 {
1873 LeafRuntimeScope rt(assembler,
1874 /*frame_size=*/0,
1875 /*preserve_registers=*/true);
1877 rt.Call(entry, 1);
1878 }
1879 };
1880
1882 kOldMarkingStackBlockProcessRuntimeEntry);
1883 __ jmp(&done);
1884
1885 __ Bind(&is_new);
1887 kNewMarkingStackBlockProcessRuntimeEntry);
1888
1889 __ Bind(&done);
1890 __ popq(RCX); // Unspill.
1891 __ popq(RAX); // Unspill.
1892 }
1893
1894 Label add_to_remembered_set, remember_card;
1895 __ Bind(&skip_marking);
1896 __ movq(TMP, FieldAddress(RDX, target::Object::tags_offset()));
1898 __ andq(TMP, FieldAddress(RAX, target::Object::tags_offset()));
1900 __ j(NOT_ZERO, &add_to_remembered_set, Assembler::kNearJump);
1901 __ ret();
1902
1903 __ Bind(&add_to_remembered_set);
1904 if (cards) {
1905 __ movl(TMP, FieldAddress(RDX, target::Object::tags_offset()));
1906 __ testl(TMP, Immediate(1 << target::UntaggedObject::kCardRememberedBit));
1907 __ j(NOT_ZERO, &remember_card, Assembler::kFarJump);
1908 } else {
1909#if defined(DEBUG)
1910 Label ok;
1911 __ movl(TMP, FieldAddress(RDX, target::Object::tags_offset()));
1912 __ testl(TMP, Immediate(1 << target::UntaggedObject::kCardRememberedBit));
1914 __ Stop("Wrong barrier");
1915 __ Bind(&ok);
1916#endif
1917 }
1918 {
1919 // Atomically clear kOldAndNotRemembered.
1920 Label retry, done;
1921 __ pushq(RAX); // Spill.
1922 __ pushq(RCX); // Spill.
1923 __ movq(RAX, FieldAddress(RDX, target::Object::tags_offset()));
1924
1925 __ Bind(&retry);
1926 __ movq(RCX, RAX);
1927 __ testq(RCX,
1929 __ j(ZERO, &done); // Remembered by another thread.
1930 __ andq(RCX,
1932 // Cmpxchgq: compare value = implicit operand RAX, new value = RCX.
1933 // On failure, RAX is updated with the current value.
1934 __ LockCmpxchgq(FieldAddress(RDX, target::Object::tags_offset()), RCX);
1935 __ j(NOT_EQUAL, &retry, Assembler::kNearJump);
1936
1937 // Load the StoreBuffer block out of the thread. Then load top_ out of the
1938 // StoreBufferBlock and add the address to the pointers_.
1939 // RDX: Address being stored
1942 __ movq(
1944 RDX);
1945
1946 // Increment top_ and check for overflow.
1947 // RCX: top_
1948 // RAX: StoreBufferBlock
1949 __ incq(RCX);
1951 __ cmpl(RCX, Immediate(target::StoreBufferBlock::kSize));
1952 __ j(NOT_EQUAL, &done);
1953
1954 {
1955 LeafRuntimeScope rt(assembler,
1956 /*frame_size=*/0,
1957 /*preserve_registers=*/true);
1959 rt.Call(kStoreBufferBlockProcessRuntimeEntry, 1);
1960 }
1961
1962 __ Bind(&done);
1963 __ popq(RCX); // Unspill.
1964 __ popq(RAX); // Unspill.
1965 __ ret();
1966 }
1967
1968 if (cards) {
1969 Label remember_card_slow;
1970
1971 // Get card table.
1972 __ Bind(&remember_card);
1973 __ movq(TMP, RDX); // Object.
1974 __ andq(TMP, Immediate(target::kPageMask)); // Page.
1975 __ cmpq(Address(TMP, target::Page::card_table_offset()), Immediate(0));
1976 __ j(EQUAL, &remember_card_slow, Assembler::kNearJump);
1977
1978 // Atomically dirty the card.
1979 __ pushq(RAX);
1980 __ pushq(RCX);
1981 __ subq(R13, TMP); // Offset in page.
1982 __ movq(TMP,
1983 Address(TMP, target::Page::card_table_offset())); // Card table.
1984 __ shrq(R13, Immediate(target::Page::kBytesPerCardLog2)); // Card index.
1985 __ movq(RCX, R13);
1986 __ shrq(R13, Immediate(target::kBitsPerWordLog2)); // Word offset.
1987 __ movq(RAX, Immediate(1));
1988 __ shlq(RAX, RCX); // Bit mask. (Shift amount is mod 63.)
1989 __ lock();
1990 __ orq(Address(TMP, R13, TIMES_8, 0), RAX);
1991 __ popq(RCX);
1992 __ popq(RAX);
1993 __ ret();
1994
1995 // Card table not yet allocated.
1996 __ Bind(&remember_card_slow);
1997 {
1998 LeafRuntimeScope rt(assembler,
1999 /*frame_size=*/0,
2000 /*preserve_registers=*/true);
2003 rt.Call(kRememberCardRuntimeEntry, 2);
2004 }
2005 __ ret();
2006 }
2007}
2008
2009void StubCodeCompiler::GenerateWriteBarrierStub() {
2010 GenerateWriteBarrierStubHelper(assembler, false);
2011}
2012
2013void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
2014 GenerateWriteBarrierStubHelper(assembler, true);
2015}
2016
2017static void GenerateAllocateObjectHelper(Assembler* assembler,
2018 bool is_cls_parameterized) {
2019 // Note: Keep in sync with calling function.
2020 const Register kTagsReg = AllocateObjectABI::kTagsReg;
2021
2022 {
2023 Label slow_case;
2024 const Register kNewTopReg = R9;
2025
2026#if !defined(PRODUCT)
2027 {
2028 const Register kCidRegister = RSI;
2029 __ ExtractClassIdFromTags(kCidRegister, AllocateObjectABI::kTagsReg);
2030 __ MaybeTraceAllocation(kCidRegister, &slow_case, TMP);
2031 }
2032#endif
2033 // Allocate the object and update top to point to
2034 // next object start and initialize the allocated object.
2035 {
2036 const Register kInstanceSizeReg = RSI;
2037
2038 __ ExtractInstanceSizeFromTags(kInstanceSizeReg, kTagsReg);
2039
2041 Address(THR, target::Thread::top_offset()));
2042 __ leaq(kNewTopReg, Address(AllocateObjectABI::kResultReg,
2043 kInstanceSizeReg, TIMES_1, 0));
2044 // Check if the allocation fits into the remaining space.
2045 __ cmpq(kNewTopReg, Address(THR, target::Thread::end_offset()));
2046 __ j(ABOVE_EQUAL, &slow_case);
2047 __ CheckAllocationCanary(AllocateObjectABI::kResultReg);
2048
2049 __ movq(Address(THR, target::Thread::top_offset()), kNewTopReg);
2050 } // kInstanceSizeReg = RSI
2051
2052 // Set the tags.
2053 // 64 bit store also zeros the identity hash field.
2054 __ movq(
2056 kTagsReg);
2057
2059
2060 // Initialize the remaining words of the object.
2061 {
2062 const Register kNextFieldReg = RDI;
2063 __ leaq(kNextFieldReg,
2064 FieldAddress(AllocateObjectABI::kResultReg,
2066
2067 const Register kNullReg = R10;
2068 __ LoadObject(kNullReg, NullObject());
2069
2070 // Loop until the whole object is initialized.
2071 Label loop;
2072 __ Bind(&loop);
2073 for (intptr_t offset = 0; offset < target::kObjectAlignment;
2075 __ StoreCompressedIntoObjectNoBarrier(AllocateObjectABI::kResultReg,
2076 Address(kNextFieldReg, offset),
2077 kNullReg);
2078 }
2079 // Safe to only check every kObjectAlignment bytes instead of each word.
2081 __ addq(kNextFieldReg, Immediate(target::kObjectAlignment));
2082 __ cmpq(kNextFieldReg, kNewTopReg);
2083 __ j(UNSIGNED_LESS, &loop);
2084 } // kNextFieldReg = RDI, kNullReg = R10
2085
2086 __ WriteAllocationCanary(kNewTopReg); // Fix overshoot.
2087
2088 if (is_cls_parameterized) {
2089 Label not_parameterized_case;
2090
2091 const Register kClsIdReg = R9;
2092 const Register kTypeOffsetReg = RDI;
2093
2094 __ ExtractClassIdFromTags(kClsIdReg, kTagsReg);
2095
2096 // Load class' type_arguments_field offset in words.
2097 __ LoadClassById(kTypeOffsetReg, kClsIdReg);
2098 __ movl(
2099 kTypeOffsetReg,
2100 FieldAddress(kTypeOffsetReg,
2101 target::Class::
2102 host_type_arguments_field_offset_in_words_offset()));
2103
2104 // Set the type arguments in the new object.
2105 __ StoreCompressedIntoObject(
2107 FieldAddress(AllocateObjectABI::kResultReg, kTypeOffsetReg,
2110
2111 __ Bind(&not_parameterized_case);
2112 } // kTypeOffsetReg = RDI;
2113
2114 __ ret();
2115
2116 __ Bind(&slow_case);
2117 } // kNewTopReg = R9;
2118
2119 // Fall back on slow case:
2120 if (!is_cls_parameterized) {
2122 }
2123 // Tail call to generic allocation stub.
2124 __ jmp(
2126}
2127
2128// Called for inline allocation of objects (any class).
2129void StubCodeCompiler::GenerateAllocateObjectStub() {
2130 GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
2131}
2132
2133void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
2134 GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
2135}
2136
2137void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
2138 if (!FLAG_precompiled_mode) {
2139 __ movq(CODE_REG,
2141 }
2142
2143 __ ExtractClassIdFromTags(AllocateObjectABI::kTagsReg,
2145
2146 // Create a stub frame.
2147 // Ensure constant pool is allowed so we can e.g. load class object.
2148 __ EnterStubFrame();
2149
2150 // Setup space on stack for return value.
2153
2154 // Push class of object to be allocated.
2157
2158 // Must be Object::null() if non-parameterized class.
2160
2161 __ CallRuntime(kAllocateObjectRuntimeEntry, 2);
2162
2163 __ popq(AllocateObjectABI::kResultReg); // Drop type arguments.
2164 __ popq(AllocateObjectABI::kResultReg); // Drop class.
2165 __ popq(AllocateObjectABI::kResultReg); // Pop newly allocated object.
2166
2167 // Write-barrier elimination is enabled for [cls] and we therefore need to
2168 // ensure that the object is in new-space or has remembered bit set.
2170
2171 // AllocateObjectABI::kResultReg: new object
2172 // Restore the frame pointer.
2173 __ LeaveStubFrame();
2174
2175 __ ret();
2176}
2177
2178// Called for inline allocation of objects.
2180 UnresolvedPcRelativeCalls* unresolved_calls,
2181 const Class& cls,
2182 const Code& allocate_object,
2183 const Code& allocat_object_parametrized) {
2184 classid_t cls_id = target::Class::GetId(cls);
2185 ASSERT(cls_id != kIllegalCid);
2186
2187 const intptr_t cls_type_arg_field_offset =
2189
2190 // The generated code is different if the class is parameterized.
2191 const bool is_cls_parameterized = target::Class::NumTypeArguments(cls) > 0;
2192 ASSERT(!is_cls_parameterized ||
2193 cls_type_arg_field_offset != target::Class::kNoTypeArguments);
2194
2195 const intptr_t instance_size = target::Class::GetInstanceSize(cls);
2196 ASSERT(instance_size > 0);
2197 const uword tags =
2198 target::MakeTagWordForNewSpaceObject(cls_id, instance_size);
2199
2200 const Register kTagsReg = AllocateObjectABI::kTagsReg;
2201
2202 __ movq(kTagsReg, Immediate(tags));
2203
2204 // Load the appropriate generic alloc. stub.
2205 if (!FLAG_use_slow_path && FLAG_inline_alloc &&
2207 target::SizeFitsInSizeTag(instance_size)) {
2210
2211 if (is_cls_parameterized) {
2212 if (!IsSameObject(NullObject(),
2213 CastHandle<Object>(allocat_object_parametrized))) {
2214 __ GenerateUnRelocatedPcRelativeTailCall();
2215 unresolved_calls->Add(new UnresolvedPcRelativeCall(
2216 __ CodeSize(), allocat_object_parametrized, /*is_tail_call=*/true));
2217 } else {
2218 __ jmp(Address(THR,
2219 target::Thread::
2220 allocate_object_parameterized_entry_point_offset()));
2221 }
2222 } else {
2223 if (!IsSameObject(NullObject(), CastHandle<Object>(allocate_object))) {
2224 __ GenerateUnRelocatedPcRelativeTailCall();
2225 unresolved_calls->Add(new UnresolvedPcRelativeCall(
2226 __ CodeSize(), allocate_object, /*is_tail_call=*/true));
2227 } else {
2228 __ jmp(
2230 }
2231 }
2232 } else {
2233 if (!is_cls_parameterized) {
2235 }
2236 __ jmp(Address(THR,
2238 }
2239}
2240
2241// Called for invoking "dynamic noSuchMethod(Invocation invocation)" function
2242// from the entry code of a dart function after an error in passed argument
2243// name or number is detected.
2244// Input parameters:
2245// RSP : points to return address.
2246// RSP + 8 : address of last argument.
2247// R10 : arguments descriptor array.
2248void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
2249 __ EnterStubFrame();
2250
2251 // Load the receiver.
2252 // Note: In compressed pointer mode LoadCompressedSmi zero extends R13,
2253 // rather than sign extending it. This is ok since it's an unsigned value.
2254 __ LoadCompressedSmi(
2256 __ movq(RAX,
2257 Address(RBP, R13, TIMES_4,
2258 target::frame_layout.param_end_from_fp * target::kWordSize));
2259
2260 // Load the function.
2261 __ LoadCompressed(RBX, FieldAddress(RAX, target::Closure::function_offset()));
2262
2263 __ pushq(Immediate(0)); // Result slot.
2264 __ pushq(RAX); // Receiver.
2265 __ pushq(RBX); // Function.
2266 __ pushq(R10); // Arguments descriptor array.
2267
2268 // Adjust arguments count.
2269 __ OBJ(cmp)(
2271 Immediate(0));
2272 __ movq(R10, R13);
2273 Label args_count_ok;
2274 __ j(EQUAL, &args_count_ok, Assembler::kNearJump);
2275 __ addq(R10, Immediate(target::ToRawSmi(1))); // Include the type arguments.
2276 __ Bind(&args_count_ok);
2277
2278 // R10: Smi-tagged arguments array length.
2279 PushArrayOfArguments(assembler);
2280
2281 const intptr_t kNumArgs = 4;
2282 __ CallRuntime(kNoSuchMethodFromPrologueRuntimeEntry, kNumArgs);
2283 // noSuchMethod on closures always throws an error, so it will never return.
2284 __ int3();
2285}
2286
2287// Cannot use function object from ICData as it may be the inlined
2288// function and not the top-scope function.
2290 if (FLAG_precompiled_mode) {
2291 __ Breakpoint();
2292 return;
2293 }
2294 Register ic_reg = RBX;
2295 Register func_reg = RDI;
2296 if (FLAG_trace_optimized_ic_calls) {
2297 __ EnterStubFrame();
2298 __ pushq(func_reg); // Preserve
2299 __ pushq(ic_reg); // Preserve.
2300 __ pushq(ic_reg); // Argument.
2301 __ pushq(func_reg); // Argument.
2302 __ CallRuntime(kTraceICCallRuntimeEntry, 2);
2303 __ popq(RAX); // Discard argument;
2304 __ popq(RAX); // Discard argument;
2305 __ popq(ic_reg); // Restore.
2306 __ popq(func_reg); // Restore.
2307 __ LeaveStubFrame();
2308 }
2309 __ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
2310}
2311
2312// Loads function into 'temp_reg', preserves IC_DATA_REG.
2314 if (FLAG_precompiled_mode) {
2315 __ Breakpoint();
2316 return;
2317 }
2318 if (FLAG_optimization_counter_threshold >= 0) {
2319 Register func_reg = temp_reg;
2320 ASSERT(func_reg != IC_DATA_REG);
2321 __ Comment("Increment function counter");
2322 __ movq(func_reg,
2323 FieldAddress(IC_DATA_REG, target::ICData::owner_offset()));
2324 __ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
2325 }
2326}
2327
2328// Note: RBX must be preserved.
2329// Attempt a quick Smi operation for known operations ('kind'). The ICData
2330// must have been primed with a Smi/Smi check that will be used for counting
2331// the invocations.
2332static void EmitFastSmiOp(Assembler* assembler,
2333 Token::Kind kind,
2334 intptr_t num_args,
2335 Label* not_smi_or_overflow) {
2336 __ Comment("Fast Smi op");
2337 ASSERT(num_args == 2);
2338 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Left.
2339 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Right
2340 __ movq(R13, RCX);
2341 __ orq(R13, RAX);
2342 __ testq(R13, Immediate(kSmiTagMask));
2343 __ j(NOT_ZERO, not_smi_or_overflow);
2344 switch (kind) {
2345 case Token::kADD: {
2346 __ OBJ(add)(RAX, RCX);
2347 __ j(OVERFLOW, not_smi_or_overflow);
2348 break;
2349 }
2350 case Token::kLT: {
2351 __ OBJ(cmp)(RAX, RCX);
2353 __ movzxb(RAX, RAX); // RAX := RAX < RCX ? 0 : 1
2354 __ movq(RAX,
2358 break;
2359 }
2360 case Token::kEQ: {
2361 __ OBJ(cmp)(RAX, RCX);
2362 __ setcc(NOT_EQUAL, ByteRegisterOf(RAX));
2363 __ movzxb(RAX, RAX); // RAX := RAX == RCX ? 0 : 1
2364 __ movq(RAX,
2368 break;
2369 }
2370 default:
2371 UNIMPLEMENTED();
2372 }
2373
2374 // RBX: IC data object (preserved).
2375 __ movq(R13, FieldAddress(RBX, target::ICData::entries_offset()));
2376 // R13: ic_data_array with check entries: classes and target functions.
2377 __ leaq(R13, FieldAddress(R13, target::Array::data_offset()));
2378// R13: points directly to the first ic data array element.
2379#if defined(DEBUG)
2380 // Check that first entry is for Smi/Smi.
2381 Label error, ok;
2382 const Immediate& imm_smi_cid = Immediate(target::ToRawSmi(kSmiCid));
2383 __ OBJ(cmp)(Address(R13, 0 * target::kCompressedWordSize), imm_smi_cid);
2385 __ OBJ(cmp)(Address(R13, 1 * target::kCompressedWordSize), imm_smi_cid);
2387 __ Bind(&error);
2388 __ Stop("Incorrect IC data");
2389 __ Bind(&ok);
2390#endif
2391
2392 if (FLAG_optimization_counter_threshold >= 0) {
2393 const intptr_t count_offset =
2395 // Update counter, ignore overflow.
2396 __ OBJ(add)(Address(R13, count_offset), Immediate(target::ToRawSmi(1)));
2397 }
2398
2399 __ ret();
2400}
2401
2402// Saves the offset of the target entry-point (from the Function) into R8.
2403//
2404// Must be the first code generated, since any code before will be skipped in
2405// the unchecked entry-point.
2406static void GenerateRecordEntryPoint(Assembler* assembler) {
2407 Label done;
2408 __ movq(R8,
2410 __ jmp(&done);
2411 __ BindUncheckedEntryPoint();
2415 __ Bind(&done);
2416}
2417
2418// Generate inline cache check for 'num_args'.
2419// RDX: receiver (if instance call)
2420// RBX: ICData
2421// RSP[0]: return address
2422// Control flow:
2423// - If receiver is null -> jump to IC miss.
2424// - If receiver is Smi -> load Smi class.
2425// - If receiver is not-Smi -> load receiver's class.
2426// - Check if 'num_args' (including receiver) match any IC data group.
2427// - Match found -> jump to target.
2428// - Match not found -> jump to IC miss.
2430 intptr_t num_args,
2431 const RuntimeEntry& handle_ic_miss,
2432 Token::Kind kind,
2433 Optimized optimized,
2434 CallType type,
2435 Exactness exactness) {
2436 if (FLAG_precompiled_mode) {
2437 __ Breakpoint();
2438 return;
2439 }
2440
2441 const bool save_entry_point = kind == Token::kILLEGAL;
2442 if (save_entry_point) {
2443 GenerateRecordEntryPoint(assembler);
2444 }
2445
2446 if (optimized == kOptimized) {
2448 } else {
2449 GenerateUsageCounterIncrement(/* scratch */ RCX);
2450 }
2451
2452 ASSERT(num_args == 1 || num_args == 2);
2453#if defined(DEBUG)
2454 {
2455 Label ok;
2456 // Check that the IC data array has NumArgsTested() == num_args.
2457 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
2458 __ movl(RCX, FieldAddress(RBX, target::ICData::state_bits_offset()));
2459 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
2460 __ andq(RCX, Immediate(target::ICData::NumArgsTestedMask()));
2461 __ cmpq(RCX, Immediate(num_args));
2463 __ Stop("Incorrect stub for IC data");
2464 __ Bind(&ok);
2465 }
2466#endif // DEBUG
2467
2468#if !defined(PRODUCT)
2469 Label stepping, done_stepping;
2470 if (optimized == kUnoptimized) {
2471 __ Comment("Check single stepping");
2472 __ LoadIsolate(RAX);
2473 __ cmpb(Address(RAX, target::Isolate::single_step_offset()), Immediate(0));
2474 __ j(NOT_EQUAL, &stepping);
2475 __ Bind(&done_stepping);
2476 }
2477#endif
2478
2479 Label not_smi_or_overflow;
2480 if (kind != Token::kILLEGAL) {
2481 EmitFastSmiOp(assembler, kind, num_args, &not_smi_or_overflow);
2482 }
2483 __ Bind(&not_smi_or_overflow);
2484
2485 __ Comment("Extract ICData initial values and receiver cid");
2486 // RBX: IC data object (preserved).
2487 __ movq(R13, FieldAddress(RBX, target::ICData::entries_offset()));
2488 // R13: ic_data_array with check entries: classes and target functions.
2489 __ leaq(R13, FieldAddress(R13, target::Array::data_offset()));
2490 // R13: points directly to the first ic data array element.
2491
2492 if (type == kInstanceCall) {
2493 __ LoadTaggedClassIdMayBeSmi(RAX, RDX);
2494 __ movq(
2497 if (num_args == 2) {
2498 __ OBJ(mov)(RCX,
2499 FieldAddress(ARGS_DESC_REG,
2501 __ movq(R9, Address(RSP, RCX, TIMES_4, -target::kWordSize));
2502 __ LoadTaggedClassIdMayBeSmi(RCX, R9);
2503 }
2504 } else {
2505 __ movq(
2508 __ OBJ(mov)(RCX, FieldAddress(ARGS_DESC_REG,
2510 __ movq(RDX, Address(RSP, RCX, TIMES_4, 0));
2511 __ LoadTaggedClassIdMayBeSmi(RAX, RDX);
2512 if (num_args == 2) {
2513 __ movq(R9, Address(RSP, RCX, TIMES_4, -target::kWordSize));
2514 __ LoadTaggedClassIdMayBeSmi(RCX, R9);
2515 }
2516 }
2517 // RAX: first argument class ID as Smi.
2518 // RCX: second argument class ID as Smi.
2519 // R10: args descriptor
2520
2521 // Loop that checks if there is an IC data match.
2522 Label loop, found, miss;
2523 __ Comment("ICData loop");
2524
2525 // We unroll the generic one that is generated once more than the others.
2526 const bool optimize = kind == Token::kILLEGAL;
2527 const intptr_t target_offset =
2529 const intptr_t count_offset =
2531 const intptr_t exactness_offset =
2533
2534 __ Bind(&loop);
2535 for (int unroll = optimize ? 4 : 2; unroll >= 0; unroll--) {
2536 Label update;
2537 __ OBJ(mov)(R9, Address(R13, 0));
2538 __ cmpq(RAX, R9); // Class id match?
2539 if (num_args == 2) {
2540 __ j(NOT_EQUAL, &update); // Continue.
2541 __ OBJ(mov)(R9, Address(R13, target::kCompressedWordSize));
2542 // R9: next class ID to check (smi).
2543 __ cmpq(RCX, R9); // Class id match?
2544 }
2545 __ j(EQUAL, &found); // Break.
2546
2547 __ Bind(&update);
2548
2549 const intptr_t entry_size = target::ICData::TestEntryLengthFor(
2550 num_args, exactness == kCheckExactness) *
2552 __ addq(R13, Immediate(entry_size)); // Next entry.
2553
2554 __ cmpq(R9, Immediate(target::ToRawSmi(kIllegalCid))); // Done?
2555 if (unroll == 0) {
2556 __ j(NOT_EQUAL, &loop);
2557 } else {
2558 __ j(EQUAL, &miss);
2559 }
2560 }
2561
2562 __ Bind(&miss);
2563 __ Comment("IC miss");
2564 // Compute address of arguments (first read number of arguments from
2565 // arguments descriptor array and then compute address on the stack).
2566 __ OBJ(mov)(RAX, FieldAddress(ARGS_DESC_REG,
2568 __ leaq(RAX, Address(RSP, RAX, TIMES_4, 0)); // RAX is Smi.
2569 __ EnterStubFrame();
2570 if (save_entry_point) {
2571 __ SmiTag(R8); // Entry-point offset is not Smi.
2572 __ pushq(R8); // Preserve entry point.
2573 }
2574 __ pushq(ARGS_DESC_REG); // Preserve arguments descriptor array.
2575 __ pushq(RBX); // Preserve IC data object.
2576 __ pushq(Immediate(0)); // Result slot.
2577 // Push call arguments.
2578 for (intptr_t i = 0; i < num_args; i++) {
2579 __ movq(RCX, Address(RAX, -target::kWordSize * i));
2580 __ pushq(RCX);
2581 }
2582 __ pushq(RBX); // Pass IC data object.
2583 __ CallRuntime(handle_ic_miss, num_args + 1);
2584 // Remove the call arguments pushed earlier, including the IC data object.
2585 for (intptr_t i = 0; i < num_args + 1; i++) {
2586 __ popq(RAX);
2587 }
2588 __ popq(FUNCTION_REG); // Pop returned function object into RAX.
2589 __ popq(RBX); // Restore IC data array.
2590 __ popq(ARGS_DESC_REG); // Restore arguments descriptor array.
2591 if (save_entry_point) {
2592 __ popq(R8); // Restore entry point.
2593 __ SmiUntag(R8); // Entry-point offset is not Smi.
2594 }
2595 __ RestoreCodePointer();
2596 __ LeaveStubFrame();
2597 Label call_target_function;
2598 if (FLAG_precompiled_mode) {
2599 GenerateDispatcherCode(assembler, &call_target_function);
2600 } else {
2601 __ jmp(&call_target_function);
2602 }
2603
2604 __ Bind(&found);
2605 // R13: Pointer to an IC data check group.
2606 Label call_target_function_through_unchecked_entry;
2607 if (exactness == kCheckExactness) {
2608 Label exactness_ok;
2609 ASSERT(num_args == 1);
2610 __ OBJ(mov)(RAX, Address(R13, exactness_offset));
2611 __ OBJ(cmp)(RAX,
2612 Immediate(target::ToRawSmi(
2614 __ j(LESS, &exactness_ok);
2615 __ j(EQUAL, &call_target_function_through_unchecked_entry);
2616
2617 // Check trivial exactness.
2618 // Note: UntaggedICData::receivers_static_type_ is guaranteed to be not null
2619 // because we only emit calls to this stub when it is not null.
2620 __ movq(RCX,
2622 __ LoadCompressed(RCX, FieldAddress(RCX, target::Type::arguments_offset()));
2623 // RAX contains an offset to type arguments in words as a smi,
2624 // hence TIMES_4. RDX is guaranteed to be non-smi because it is expected
2625 // to have type arguments.
2626#if defined(DART_COMPRESSED_POINTERS)
2627 __ movsxd(RAX, RAX);
2628#endif
2629 __ OBJ(cmp)(RCX,
2630 FieldAddress(RDX, RAX, TIMES_COMPRESSED_HALF_WORD_SIZE, 0));
2631 __ j(EQUAL, &call_target_function_through_unchecked_entry);
2632
2633 // Update exactness state (not-exact anymore).
2634 __ OBJ(mov)(Address(R13, exactness_offset),
2635 Immediate(target::ToRawSmi(
2637 __ Bind(&exactness_ok);
2638 }
2639 __ LoadCompressed(FUNCTION_REG, Address(R13, target_offset));
2640
2641 if (FLAG_optimization_counter_threshold >= 0) {
2642 __ Comment("Update ICData counter");
2643 // Ignore overflow.
2644 __ OBJ(add)(Address(R13, count_offset), Immediate(target::ToRawSmi(1)));
2645 }
2646
2647 __ Comment("Call target (via specified entry point)");
2648 __ Bind(&call_target_function);
2649 // RAX: Target function.
2650 __ LoadCompressed(
2652 if (save_entry_point) {
2653 __ addq(R8, RAX);
2654 __ jmp(Address(R8, 0));
2655 } else {
2657 }
2658
2659 if (exactness == kCheckExactness) {
2660 __ Bind(&call_target_function_through_unchecked_entry);
2661 if (FLAG_optimization_counter_threshold >= 0) {
2662 __ Comment("Update ICData counter");
2663 // Ignore overflow.
2664 __ addq(Address(R13, count_offset), Immediate(target::ToRawSmi(1)));
2665 }
2666 __ Comment("Call target (via unchecked entry point)");
2667 __ LoadCompressed(FUNCTION_REG, Address(R13, target_offset));
2668 __ LoadCompressed(
2672 }
2673
2674#if !defined(PRODUCT)
2675 if (optimized == kUnoptimized) {
2676 __ Bind(&stepping);
2677 __ EnterStubFrame();
2678 if (type == kInstanceCall) {
2679 __ pushq(RDX); // Preserve receiver.
2680 }
2681 __ pushq(RBX); // Preserve ICData.
2682 if (save_entry_point) {
2683 __ SmiTag(R8); // Entry-point offset is not Smi.
2684 __ pushq(R8); // Preserve entry point.
2685 }
2686 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2687 if (save_entry_point) {
2688 __ popq(R8); // Restore entry point.
2689 __ SmiUntag(R8);
2690 }
2691 __ popq(RBX); // Restore ICData.
2692 if (type == kInstanceCall) {
2693 __ popq(RDX); // Restore receiver.
2694 }
2695 __ RestoreCodePointer();
2696 __ LeaveStubFrame();
2697 __ jmp(&done_stepping);
2698 }
2699#endif
2700}
2701
2702// RDX: receiver
2703// RBX: ICData
2704// RSP[0]: return address
2705void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
2707 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2709}
2710
2711// RDX: receiver
2712// RBX: ICData
2713// RSP[0]: return address
2714void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
2716 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2718}
2719
2720// RDX: receiver
2721// RBX: ICData
2722// RSP[0]: return address
2723void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
2725 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2727}
2728
2729// RDX: receiver
2730// RBX: ICData
2731// RSP[0]: return address
2732void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
2734 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
2736}
2737
2738// RDX: receiver
2739// RBX: ICData
2740// RSP[0]: return address
2741void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
2743 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
2745}
2746
2747// RDX: receiver
2748// RBX: ICData
2749// RSP[0]: return address
2750void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
2752 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
2754}
2755
2756// RDX: receiver
2757// RBX: ICData
2758// RDI: Function
2759// RSP[0]: return address
2760void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
2762 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
2764}
2765
2766// RDX: receiver
2767// RBX: ICData
2768// RDI: Function
2769// RSP[0]: return address
2770void StubCodeCompiler::
2771 GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
2773 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
2775}
2776
2777// RDX: receiver
2778// RBX: ICData
2779// RDI: Function
2780// RSP[0]: return address
2781void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
2783 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2785}
2786
2787// RBX: ICData
2788// RSP[0]: return address
2789void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
2790 GenerateRecordEntryPoint(assembler);
2791 GenerateUsageCounterIncrement(/* scratch */ RCX);
2792#if defined(DEBUG)
2793 {
2794 Label ok;
2795 // Check that the IC data array has NumArgsTested() == 0.
2796 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'.
2797 __ movl(RCX, FieldAddress(RBX, target::ICData::state_bits_offset()));
2798 ASSERT(target::ICData::NumArgsTestedShift() == 0); // No shift needed.
2799 __ andq(RCX, Immediate(target::ICData::NumArgsTestedMask()));
2800 __ cmpq(RCX, Immediate(0));
2802 __ Stop("Incorrect IC data for unoptimized static call");
2803 __ Bind(&ok);
2804 }
2805#endif // DEBUG
2806
2807#if !defined(PRODUCT)
2808 // Check single stepping.
2809 Label stepping, done_stepping;
2810 __ LoadIsolate(RAX);
2811 __ movzxb(RAX, Address(RAX, target::Isolate::single_step_offset()));
2812 __ cmpq(RAX, Immediate(0));
2813#if defined(DEBUG)
2814 static auto const kJumpLength = Assembler::kFarJump;
2815#else
2816 static auto const kJumpLength = Assembler::kNearJump;
2817#endif // DEBUG
2818 __ j(NOT_EQUAL, &stepping, kJumpLength);
2819 __ Bind(&done_stepping);
2820#endif
2821
2822 // RBX: IC data object (preserved).
2823 __ movq(R12, FieldAddress(RBX, target::ICData::entries_offset()));
2824 // R12: ic_data_array with entries: target functions and count.
2825 __ leaq(R12, FieldAddress(R12, target::Array::data_offset()));
2826 // R12: points directly to the first ic data array element.
2827 const intptr_t target_offset =
2829 const intptr_t count_offset =
2831
2832 if (FLAG_optimization_counter_threshold >= 0) {
2833 // Increment count for this call, ignore overflow.
2834 __ OBJ(add)(Address(R12, count_offset), Immediate(target::ToRawSmi(1)));
2835 }
2836
2837 // Load arguments descriptor into R10.
2838 __ movq(
2841
2842 // Get function and call it, if possible.
2843 __ LoadCompressed(FUNCTION_REG, Address(R12, target_offset));
2844 __ LoadCompressed(
2846
2847 __ addq(R8, FUNCTION_REG);
2848 __ jmp(Address(R8, 0));
2849
2850#if !defined(PRODUCT)
2851 __ Bind(&stepping);
2852 __ EnterStubFrame();
2853 __ pushq(RBX); // Preserve IC data object.
2854 __ SmiTag(R8); // Entry-point is not Smi.
2855 __ pushq(R8); // Preserve entry-point.
2856 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2857 __ popq(R8); // Restore entry-point.
2858 __ SmiUntag(R8);
2859 __ popq(RBX);
2860 __ RestoreCodePointer();
2861 __ LeaveStubFrame();
2862 __ jmp(&done_stepping, Assembler::kNearJump);
2863#endif
2864}
2865
2866// RBX: ICData
2867// RSP[0]: return address
2868void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
2869 GenerateNArgsCheckInlineCacheStub(1, kStaticCallMissHandlerOneArgRuntimeEntry,
2870 Token::kILLEGAL, kUnoptimized, kStaticCall,
2872}
2873
2874// RBX: ICData
2875// RSP[0]: return address
2876void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
2878 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2880}
2881
2882// Stub for compiling a function and jumping to the compiled code.
2883// ARGS_DESC_REG: Arguments descriptor.
2884// FUNCTION_REG: Function.
2885void StubCodeCompiler::GenerateLazyCompileStub() {
2886 __ EnterStubFrame();
2887 __ pushq(ARGS_DESC_REG); // Preserve arguments descriptor array.
2888 __ pushq(FUNCTION_REG); // Pass function.
2889 __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
2890 __ popq(FUNCTION_REG); // Restore function.
2891 __ popq(ARGS_DESC_REG); // Restore arguments descriptor array.
2892 __ LeaveStubFrame();
2893
2894 __ LoadCompressed(
2896 __ movq(RCX,
2898 __ jmp(RCX);
2899}
2900
2901// RBX: Contains an ICData.
2902// TOS(0): return address (Dart code).
2903void StubCodeCompiler::GenerateICCallBreakpointStub() {
2904#if defined(PRODUCT)
2905 __ Stop("No debugging in PRODUCT mode");
2906#else
2907 __ EnterStubFrame();
2908 __ pushq(RDX); // Preserve receiver.
2909 __ pushq(RBX); // Preserve IC data.
2910 __ pushq(Immediate(0)); // Result slot.
2911 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2912 __ popq(CODE_REG); // Original stub.
2913 __ popq(RBX); // Restore IC data.
2914 __ popq(RDX); // Restore receiver.
2915 __ LeaveStubFrame();
2916
2917 __ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
2918 __ jmp(RAX); // Jump to original stub.
2919#endif // defined(PRODUCT)
2920}
2921
2922void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
2923#if defined(PRODUCT)
2924 __ Stop("No debugging in PRODUCT mode");
2925#else
2926 __ EnterStubFrame();
2927 __ pushq(RBX); // Preserve IC data.
2928 __ pushq(Immediate(0)); // Result slot.
2929 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2930 __ popq(CODE_REG); // Original stub.
2931 __ popq(RBX); // Restore IC data.
2932 __ LeaveStubFrame();
2933
2934 __ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
2935 __ jmp(RAX); // Jump to original stub.
2936#endif // defined(PRODUCT)
2937}
2938
2939// TOS(0): return address (Dart code).
2940void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
2941#if defined(PRODUCT)
2942 __ Stop("No debugging in PRODUCT mode");
2943#else
2944 __ EnterStubFrame();
2945 __ pushq(Immediate(0)); // Result slot.
2946 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2947 __ popq(CODE_REG); // Original stub.
2948 __ LeaveStubFrame();
2949
2950 __ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
2951 __ jmp(RAX); // Jump to original stub.
2952#endif // defined(PRODUCT)
2953}
2954
2955// Called only from unoptimized code.
2956void StubCodeCompiler::GenerateDebugStepCheckStub() {
2957#if defined(PRODUCT)
2958 __ Stop("No debugging in PRODUCT mode");
2959#else
2960 // Check single stepping.
2961 Label stepping, done_stepping;
2962 __ LoadIsolate(RAX);
2963 __ movzxb(RAX, Address(RAX, target::Isolate::single_step_offset()));
2964 __ cmpq(RAX, Immediate(0));
2965 __ j(NOT_EQUAL, &stepping, Assembler::kNearJump);
2966 __ Bind(&done_stepping);
2967 __ ret();
2968
2969 __ Bind(&stepping);
2970 __ EnterStubFrame();
2971 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2972 __ LeaveStubFrame();
2973 __ jmp(&done_stepping, Assembler::kNearJump);
2974#endif // defined(PRODUCT)
2975}
2976
2977// Used to check class and type arguments. Arguments passed in registers:
2978//
2979// Input registers (all preserved, from TypeTestABI struct):
2980// - kSubtypeTestCacheReg: UntaggedSubtypeTestCache
2981// - kInstanceReg: instance to test against (must be preserved).
2982// - kDstTypeReg: destination type (for n>=7).
2983// - kInstantiatorTypeArgumentsReg : instantiator type arguments (for n>=3).
2984// - kFunctionTypeArgumentsReg : function type arguments (for n>=4).
2985// Inputs from stack:
2986// - TOS + 0: return address.
2987//
2988// Outputs (from TypeTestABI struct):
2989// - kSubtypeTestCacheResultReg: the cached result, or null if not found.
2990void StubCodeCompiler::GenerateSubtypeNTestCacheStub(Assembler* assembler,
2991 int n) {
2992 ASSERT(n >= 1);
2994 // If we need the parent function type arguments for a closure, we also need
2995 // the delayed type arguments, so this case will never happen.
2996 ASSERT(n != 5);
2997 RegisterSet saved_registers;
2998
2999 // Until we have the result, we use the result register to store the null
3000 // value for quick access. This has the side benefit of initializing the
3001 // result to null, so it only needs to be changed if found.
3003 __ LoadObject(kNullReg, NullObject());
3004
3005 // Free up additional registers needed for checks in the loop. Initially
3006 // define them as kNoRegister so any unexpected uses are caught.
3007 Register kInstanceParentFunctionTypeArgumentsReg = kNoRegister;
3008 if (n >= 5) {
3009 kInstanceParentFunctionTypeArgumentsReg = PP;
3010 saved_registers.AddRegister(kInstanceParentFunctionTypeArgumentsReg);
3011 }
3012 Register kInstanceDelayedFunctionTypeArgumentsReg = kNoRegister;
3013 if (n >= 6) {
3014 kInstanceDelayedFunctionTypeArgumentsReg = CODE_REG;
3015 saved_registers.AddRegister(kInstanceDelayedFunctionTypeArgumentsReg);
3016 }
3017
3018 // We'll replace these with actual registers if possible, but fall back to
3019 // the stack if register pressure is too great. The last two values are
3020 // used in every loop iteration, and so are more important to put in
3021 // registers if possible, whereas the first is used only when we go off
3022 // the end of the backing array (usually at most once per check).
3023 Register kCacheContentsSizeReg = kNoRegister;
3024 if (n < 5) {
3025 // Use the register we would have used for the parent function type args.
3026 kCacheContentsSizeReg = PP;
3027 saved_registers.AddRegister(kCacheContentsSizeReg);
3028 }
3029 Register kProbeDistanceReg = kNoRegister;
3030 if (n < 6) {
3031 // Use the register we would have used for the delayed type args.
3032 kProbeDistanceReg = CODE_REG;
3033 saved_registers.AddRegister(kProbeDistanceReg);
3034 }
3035 Register kCacheEntryEndReg = kNoRegister;
3036 if (n < 2) {
3037 // This register isn't in use and doesn't require saving/restoring.
3039 } else if (n < 7) {
3040 // Use the destination type, as that is the last input that might be unused.
3041 kCacheEntryEndReg = TypeTestABI::kDstTypeReg;
3042 saved_registers.AddRegister(TypeTestABI::kDstTypeReg);
3043 }
3044
3045 __ PushRegisters(saved_registers);
3046
3047 Label done;
3048 GenerateSubtypeTestCacheSearch(
3052 kInstanceParentFunctionTypeArgumentsReg,
3053 kInstanceDelayedFunctionTypeArgumentsReg, kCacheEntryEndReg,
3054 kCacheContentsSizeReg, kProbeDistanceReg,
3055 [&](Assembler* assembler, int n) {
3060 __ PopRegisters(saved_registers);
3061 __ Ret();
3062 },
3063 [&](Assembler* assembler, int n) {
3064 // We initialize kSubtypeTestCacheResultReg to null so it can be used
3065 // for null checks, so the result value is already set.
3066 __ PopRegisters(saved_registers);
3067 __ Ret();
3068 });
3069}
3070
3071// Return the current stack pointer address, used to stack alignment
3072// checks.
3073// TOS + 0: return address
3074// Result in RAX.
3075void StubCodeCompiler::GenerateGetCStackPointerStub() {
3076 __ leaq(RAX, Address(RSP, target::kWordSize));
3077 __ ret();
3078}
3079
3080// Jump to a frame on the call stack.
3081// TOS + 0: return address
3082// Arg1: program counter
3083// Arg2: stack pointer
3084// Arg3: frame_pointer
3085// Arg4: thread
3086// No Result.
3087void StubCodeCompiler::GenerateJumpToFrameStub() {
3091#if defined(USING_SHADOW_CALL_STACK)
3092#error Unimplemented
3093#endif
3094 Label exit_through_non_ffi;
3095 // Check if we exited generated from FFI. If so do transition - this is needed
3096 // because normally runtime calls transition back to generated via destructor
3097 // of TransitionGeneratedToVM/Native that is part of runtime boilerplate
3098 // code (see DEFINE_RUNTIME_ENTRY_IMPL in runtime_entry.h). Ffi calls don't
3099 // have this boilerplate, don't have this stack resource, have to transition
3100 // explicitly.
3101 __ cmpq(compiler::Address(
3103 compiler::Immediate(target::Thread::exit_through_ffi()));
3104 __ j(NOT_EQUAL, &exit_through_non_ffi, compiler::Assembler::kNearJump);
3105 __ TransitionNativeToGenerated(/*leave_safepoint=*/true,
3106 /*ignore_unwind_in_progress=*/true);
3107 __ Bind(&exit_through_non_ffi);
3108
3109 // Set the tag.
3110 __ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartTagId));
3111 // Clear top exit frame.
3113 Immediate(0));
3114 // Restore the pool pointer.
3115 __ RestoreCodePointer();
3116 if (FLAG_precompiled_mode) {
3118 } else {
3119 __ LoadPoolPointer(PP);
3120 }
3121 __ jmp(CallingConventions::kArg1Reg); // Jump to program counter.
3122}
3123
3124// Run an exception handler. Execution comes from JumpToFrame stub.
3125//
3126// The arguments are stored in the Thread object.
3127// No result.
3128void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
3133
3134 word offset_from_thread = 0;
3135 bool ok = target::CanLoadFromThread(NullObject(), &offset_from_thread);
3136 ASSERT(ok);
3137 __ movq(TMP, Address(THR, offset_from_thread));
3138
3139 // Load the exception from the current thread.
3140 Address exception_addr(THR, target::Thread::active_exception_offset());
3141 __ movq(kExceptionObjectReg, exception_addr);
3142 __ movq(exception_addr, TMP);
3143
3144 // Load the stacktrace from the current thread.
3145 Address stacktrace_addr(THR, target::Thread::active_stacktrace_offset());
3146 __ movq(kStackTraceObjectReg, stacktrace_addr);
3147 __ movq(stacktrace_addr, TMP);
3148
3149 __ jmp(CallingConventions::kArg1Reg); // Jump to continuation point.
3150}
3151
3152// Deoptimize a frame on the call stack before rewinding.
3153// The arguments are stored in the Thread object.
3154// No result.
3155void StubCodeCompiler::GenerateDeoptForRewindStub() {
3156 // Push zap value instead of CODE_REG.
3157 __ pushq(Immediate(kZapCodeReg));
3158
3159 // Push the deopt pc.
3160 __ pushq(Address(THR, target::Thread::resume_pc_offset()));
3161#if defined(USING_SHADOW_CALL_STACK)
3162#error Unimplemented
3163#endif
3164 GenerateDeoptimizationSequence(assembler, kEagerDeopt);
3165
3166 // After we have deoptimized, jump to the correct frame.
3167 __ EnterStubFrame();
3168 __ CallRuntime(kRewindPostDeoptRuntimeEntry, 0);
3169 __ LeaveStubFrame();
3170 __ int3();
3171}
3172
3173// Calls to the runtime to optimize the given function.
3174// RDI: function to be reoptimized.
3175// ARGS_DESC_REG: argument descriptor (preserved).
3176void StubCodeCompiler::GenerateOptimizeFunctionStub() {
3178 __ EnterStubFrame();
3179 __ pushq(ARGS_DESC_REG); // Preserve args descriptor.
3180 __ pushq(Immediate(0)); // Result slot.
3181 __ pushq(RDI); // Arg0: function to optimize
3182 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
3183 __ popq(RAX); // Discard argument.
3184 __ popq(FUNCTION_REG); // Get Function object.
3185 __ popq(ARGS_DESC_REG); // Restore argument descriptor.
3186 __ LeaveStubFrame();
3187 __ LoadCompressed(
3189 __ movq(RCX,
3191 __ jmp(RCX);
3192 __ int3();
3193}
3194
3195// Does identical check (object references are equal or not equal) with special
3196// checks for boxed numbers.
3197// Left and right are pushed on stack.
3198// Return ZF set.
3199// Note: A Mint cannot contain a value that would fit in Smi.
3200static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
3201 const Register left,
3202 const Register right) {
3203 Label reference_compare, done, check_mint;
3204 // If any of the arguments is Smi do reference compare.
3205 __ testq(left, Immediate(kSmiTagMask));
3206 __ j(ZERO, &reference_compare);
3207 __ testq(right, Immediate(kSmiTagMask));
3208 __ j(ZERO, &reference_compare);
3209
3210 // Value compare for two doubles.
3211 __ CompareClassId(left, kDoubleCid);
3212 __ j(NOT_EQUAL, &check_mint, Assembler::kNearJump);
3213 __ CompareClassId(right, kDoubleCid);
3215
3216 // Double values bitwise compare.
3217 __ movq(left, FieldAddress(left, target::Double::value_offset()));
3218 __ cmpq(left, FieldAddress(right, target::Double::value_offset()));
3220
3221 __ Bind(&check_mint);
3222 __ CompareClassId(left, kMintCid);
3223 __ j(NOT_EQUAL, &reference_compare, Assembler::kNearJump);
3224 __ CompareClassId(right, kMintCid);
3226 __ movq(left, FieldAddress(left, target::Mint::value_offset()));
3227 __ cmpq(left, FieldAddress(right, target::Mint::value_offset()));
3229
3230 __ Bind(&reference_compare);
3231 __ CompareObjectRegisters(left, right);
3232 __ Bind(&done);
3233}
3234
3235// Called only from unoptimized code. All relevant registers have been saved.
3236// TOS + 0: return address
3237// TOS + 1: right argument.
3238// TOS + 2: left argument.
3239// Returns ZF set.
3240void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
3241#if !defined(PRODUCT)
3242 // Check single stepping.
3243 Label stepping, done_stepping;
3244 __ LoadIsolate(RAX);
3245 __ movzxb(RAX, Address(RAX, target::Isolate::single_step_offset()));
3246 __ cmpq(RAX, Immediate(0));
3247 __ j(NOT_EQUAL, &stepping);
3248 __ Bind(&done_stepping);
3249#endif
3250
3251 const Register left = RAX;
3252 const Register right = RDX;
3253
3254 __ movq(left, Address(RSP, 2 * target::kWordSize));
3255 __ movq(right, Address(RSP, 1 * target::kWordSize));
3256 GenerateIdenticalWithNumberCheckStub(assembler, left, right);
3257 __ ret();
3258
3259#if !defined(PRODUCT)
3260 __ Bind(&stepping);
3261 __ EnterStubFrame();
3262 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
3263 __ RestoreCodePointer();
3264 __ LeaveStubFrame();
3265 __ jmp(&done_stepping);
3266#endif
3267}
3268
3269// Called from optimized code only.
3270// TOS + 0: return address
3271// TOS + 1: right argument.
3272// TOS + 2: left argument.
3273// Returns ZF set.
3274void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
3275 const Register left = RAX;
3276 const Register right = RDX;
3277
3278 __ movq(left, Address(RSP, 2 * target::kWordSize));
3279 __ movq(right, Address(RSP, 1 * target::kWordSize));
3280 GenerateIdenticalWithNumberCheckStub(assembler, left, right);
3281 __ ret();
3282}
3283
3284// Called from megamorphic calls.
3285// RDX: receiver (passed to target)
3286// IC_DATA_REG: target::MegamorphicCache (preserved)
3287// Passed to target:
3288// FUNCTION_REG: target function
3289// CODE_REG: target Code
3290// ARGS_DESC_REG: arguments descriptor
3291void StubCodeCompiler::GenerateMegamorphicCallStub() {
3292 // Jump if receiver is a smi.
3293 Label smi_case;
3294 __ testq(RDX, Immediate(kSmiTagMask));
3295 // Jump out of line for smi case.
3296 __ j(ZERO, &smi_case, Assembler::kNearJump);
3297
3298 // Loads the cid of the object.
3299 __ LoadClassId(RAX, RDX);
3300
3301 Label cid_loaded;
3302 __ Bind(&cid_loaded);
3303 __ movq(R9,
3305 __ movq(RDI, FieldAddress(IC_DATA_REG,
3307 // R9: mask as a smi.
3308 // RDI: cache buckets array.
3309
3310 // Tag cid as a smi.
3311 __ addq(RAX, RAX);
3312
3313 // Compute the table index.
3315 // Use leaq and subq multiply with 7 == 8 - 1.
3316 __ leaq(RCX, Address(RAX, TIMES_8, 0));
3317 __ subq(RCX, RAX);
3318
3319 Label loop;
3320 __ Bind(&loop);
3321 __ andq(RCX, R9);
3322
3323 const intptr_t base = target::Array::data_offset();
3324 // RCX is smi tagged, but table entries are two words, so TIMES_8.
3325 Label probe_failed;
3326 __ OBJ(cmp)(RAX, FieldAddress(RDI, RCX, TIMES_COMPRESSED_WORD_SIZE, base));
3327 __ j(NOT_EQUAL, &probe_failed, Assembler::kNearJump);
3328
3329 Label load_target;
3330 __ Bind(&load_target);
3331 // Call the target found in the cache. For a class id match, this is a
3332 // proper target for the given name and arguments descriptor. If the
3333 // illegal class id was found, the target is a cache miss handler that can
3334 // be invoked as a normal Dart function.
3335 __ LoadCompressed(FUNCTION_REG,
3336 FieldAddress(RDI, RCX, TIMES_COMPRESSED_WORD_SIZE,
3338 __ movq(ARGS_DESC_REG,
3339 FieldAddress(IC_DATA_REG,
3341 __ movq(RCX,
3343 if (!FLAG_precompiled_mode) {
3344 __ LoadCompressed(
3346 }
3347 __ jmp(RCX);
3348
3349 // Probe failed, check if it is a miss.
3350 __ Bind(&probe_failed);
3351 __ OBJ(cmp)(FieldAddress(RDI, RCX, TIMES_COMPRESSED_WORD_SIZE, base),
3352 Immediate(target::ToRawSmi(kIllegalCid)));
3353 Label miss;
3354 __ j(ZERO, &miss, Assembler::kNearJump);
3355
3356 // Try next entry in the table.
3357 __ AddImmediate(RCX, Immediate(target::ToRawSmi(1)));
3358 __ jmp(&loop);
3359
3360 // Load cid for the Smi case.
3361 __ Bind(&smi_case);
3362 __ movq(RAX, Immediate(kSmiCid));
3363 __ jmp(&cid_loaded);
3364
3365 __ Bind(&miss);
3366 GenerateSwitchableCallMissStub();
3367}
3368
3369// Input:
3370// IC_DATA_REG - icdata
3371// RDX - receiver object
3372void StubCodeCompiler::GenerateICCallThroughCodeStub() {
3373 Label loop, found, miss;
3374 __ movq(R13, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
3375 __ movq(ARGS_DESC_REG,
3376 FieldAddress(IC_DATA_REG,
3378 __ leaq(R13, FieldAddress(R13, target::Array::data_offset()));
3379 // R13: first IC entry
3380 __ LoadTaggedClassIdMayBeSmi(RAX, RDX);
3381 // RAX: receiver cid as Smi
3382
3383 __ Bind(&loop);
3384 __ OBJ(mov)(R9, Address(R13, 0));
3385 __ OBJ(cmp)(RAX, R9);
3386 __ j(EQUAL, &found, Assembler::kNearJump);
3387
3389 __ OBJ(test)(R9, R9);
3390 __ j(ZERO, &miss, Assembler::kNearJump);
3391
3392 const intptr_t entry_length =
3393 target::ICData::TestEntryLengthFor(1, /*tracking_exactness=*/false) *
3395 __ addq(R13, Immediate(entry_length)); // Next entry.
3396 __ jmp(&loop);
3397
3398 __ Bind(&found);
3399 if (FLAG_precompiled_mode) {
3400 const intptr_t entry_offset =
3402 __ LoadCompressed(RCX, Address(R13, entry_offset));
3403 __ jmp(FieldAddress(RCX, target::Function::entry_point_offset()));
3404 } else {
3405 const intptr_t code_offset =
3407 __ LoadCompressed(CODE_REG, Address(R13, code_offset));
3408 __ jmp(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
3409 }
3410
3411 __ Bind(&miss);
3413}
3414
3415void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
3416 Label have_cid, miss;
3417
3418 __ movq(RAX, Immediate(kSmiCid));
3419 __ movzxw(
3420 RCX,
3422 __ testq(RDX, Immediate(kSmiTagMask));
3423 __ j(ZERO, &have_cid, Assembler::kNearJump);
3424 __ LoadClassId(RAX, RDX);
3425 __ Bind(&have_cid);
3426 __ cmpq(RAX, RCX);
3428 // Note: this stub is only used in AOT mode, hence the direct (bare) call.
3429 __ jmp(
3431
3432 __ Bind(&miss);
3434}
3435
3436// Called from switchable IC calls.
3437// RDX: receiver
3438void StubCodeCompiler::GenerateSwitchableCallMissStub() {
3439 __ movq(CODE_REG,
3441 __ EnterStubFrame();
3442 __ pushq(RDX); // Preserve receiver.
3443
3444 __ pushq(Immediate(0)); // Result slot.
3445 __ pushq(Immediate(0)); // Arg0: stub out.
3446 __ pushq(RDX); // Arg1: Receiver
3447 __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
3448 __ popq(RBX);
3449 __ popq(CODE_REG); // result = stub
3450 __ popq(RBX); // result = IC
3451
3452 __ popq(RDX); // Restore receiver.
3453 __ LeaveStubFrame();
3454
3455 __ movq(RCX, FieldAddress(CODE_REG, target::Code::entry_point_offset(
3457 __ jmp(RCX);
3458}
3459
3460// Called from switchable IC calls.
3461// RDX: receiver
3462// RBX: SingleTargetCache
3463// Passed to target::
3464// CODE_REG: target Code object
3465void StubCodeCompiler::GenerateSingleTargetCallStub() {
3466 Label miss;
3467 __ LoadClassIdMayBeSmi(RAX, RDX);
3468 __ movzxw(R9,
3470 __ movzxw(R10,
3472 __ cmpq(RAX, R9);
3473 __ j(LESS, &miss, Assembler::kNearJump);
3474 __ cmpq(RAX, R10);
3475 __ j(GREATER, &miss, Assembler::kNearJump);
3476 __ movq(RCX,
3478 __ movq(CODE_REG,
3480 __ jmp(RCX);
3481
3482 __ Bind(&miss);
3483 __ EnterStubFrame();
3484 __ pushq(RDX); // Preserve receiver.
3485
3486 __ pushq(Immediate(0)); // Result slot.
3487 __ pushq(Immediate(0)); // Arg0: stub out
3488 __ pushq(RDX); // Arg1: Receiver
3489 __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
3490 __ popq(RBX);
3491 __ popq(CODE_REG); // result = stub
3492 __ popq(RBX); // result = IC
3493
3494 __ popq(RDX); // Restore receiver.
3495 __ LeaveStubFrame();
3496
3497 __ movq(RCX, FieldAddress(CODE_REG, target::Code::entry_point_offset(
3499 __ jmp(RCX);
3500}
3501
3502static ScaleFactor GetScaleFactor(intptr_t size) {
3503 switch (size) {
3504 case 1:
3505 return TIMES_1;
3506 case 2:
3507 return TIMES_2;
3508 case 4:
3509 return TIMES_4;
3510 case 8:
3511 return TIMES_8;
3512 case 16:
3513 return TIMES_16;
3514 }
3515 UNREACHABLE();
3516 return static_cast<ScaleFactor>(0);
3517}
3518
3519void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
3521 const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
3522 ScaleFactor scale_factor = GetScaleFactor(element_size);
3523
3526
3527 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
3528 // Save length argument for possible runtime call, as
3529 // RAX is clobbered.
3530 Label call_runtime;
3532
3533 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, &call_runtime));
3535 /* Check that length is a positive Smi. */
3536 /* RDI: requested array length argument. */
3537 __ testq(RDI, Immediate(kSmiTagMask));
3538 __ j(NOT_ZERO, &call_runtime);
3539 __ SmiUntag(RDI);
3540 /* Check for length >= 0 && length <= max_len. */
3541 /* RDI: untagged array length. */
3542 __ cmpq(RDI, Immediate(max_len));
3543 __ j(ABOVE, &call_runtime);
3544 /* Special case for scaling by 16. */
3545 if (scale_factor == TIMES_16) {
3546 /* double length of array. */
3547 __ addq(RDI, RDI);
3548 /* only scale by 8. */
3549 scale_factor = TIMES_8;
3550 }
3551 const intptr_t fixed_size_plus_alignment_padding =
3554 __ leaq(RDI, Address(RDI, scale_factor, fixed_size_plus_alignment_padding));
3556 __ movq(RAX, Address(THR, target::Thread::top_offset()));
3557 __ movq(RCX, RAX);
3558
3559 /* RDI: allocation size. */
3560 __ addq(RCX, RDI);
3561 __ j(CARRY, &call_runtime);
3562
3563 /* Check if the allocation fits into the remaining space. */
3564 /* RAX: potential new object start. */
3565 /* RCX: potential next object start. */
3566 /* RDI: allocation size. */
3567 __ cmpq(RCX, Address(THR, target::Thread::end_offset()));
3568 __ j(ABOVE_EQUAL, &call_runtime);
3569 __ CheckAllocationCanary(RAX);
3570
3571 /* Successfully allocated the object(s), now update top to point to */
3572 /* next object start and initialize the object. */
3573 __ movq(Address(THR, target::Thread::top_offset()), RCX);
3574 __ addq(RAX, Immediate(kHeapObjectTag));
3575 /* Initialize the tags. */
3576 /* RAX: new object start as a tagged pointer. */
3577 /* RCX: new object end address. */
3578 /* RDI: allocation size. */
3579 /* R13: scratch register. */
3580 {
3581 Label size_tag_overflow, done;
3583 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
3587
3588 __ Bind(&size_tag_overflow);
3589 __ LoadImmediate(RDI, Immediate(0));
3590 __ Bind(&done);
3591
3592 /* Get the class index and insert it into the tags. */
3593 uword tags =
3594 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
3595 __ orq(RDI, Immediate(tags));
3596 __ movq(FieldAddress(RAX, target::Object::tags_offset()),
3597 RDI); /* Tags. */
3598 }
3599 /* Set the length field. */
3600 /* RAX: new object start as a tagged pointer. */
3601 /* RCX: new object end address. */
3602 __ popq(RDI); /* Array length. */
3603 __ StoreCompressedIntoObjectNoBarrier(
3604 RAX, FieldAddress(RAX, target::TypedDataBase::length_offset()), RDI);
3605 /* Initialize all array elements to 0. */
3606 /* RAX: new object start as a tagged pointer. */
3607 /* RCX: new object end address. */
3608 /* RDI: iterator which initially points to the start of the variable */
3609 /* RBX: scratch register. */
3610 /* data area to be initialized. */
3611 __ pxor(XMM0, XMM0); /* Zero. */
3612 __ leaq(RDI, FieldAddress(RAX, target::TypedData::HeaderSize()));
3613 __ StoreInternalPointer(
3614 RAX, FieldAddress(RAX, target::PointerBase::data_offset()), RDI);
3615 Label loop;
3616 __ Bind(&loop);
3618 __ movups(Address(RDI, 0), XMM0);
3619 // Safe to only check every kObjectAlignment bytes instead of each word.
3621 __ addq(RDI, Immediate(target::kObjectAlignment));
3622 __ cmpq(RDI, RCX);
3624
3625 __ WriteAllocationCanary(RCX); // Fix overshoot.
3626 __ ret();
3627
3628 __ Bind(&call_runtime);
3630 }
3631
3632 __ EnterStubFrame();
3633 __ PushObject(Object::null_object()); // Make room for the result.
3634 __ PushImmediate(Immediate(target::ToRawSmi(cid)));
3636 __ CallRuntime(kAllocateTypedDataRuntimeEntry, 2);
3637 __ Drop(2); // Drop arguments.
3639 __ LeaveStubFrame();
3640 __ ret();
3641}
3642
3643} // namespace compiler
3644
3645} // namespace dart
3646
3647#endif // defined(TARGET_ARCH_X64)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
static void async_callback(void *c, std::unique_ptr< const SkImage::AsyncReadResult > result)
static bool ok(int result)
static SkTileMode optimize(SkTileMode tm, int dimension)
static size_t element_size(Layout layout, SkSLType type)
#define __
#define OBJ(op)
#define UNREACHABLE()
Definition: assert.h:248
#define ASSERT_LESS_OR_EQUAL(expected, actual)
Definition: assert.h:313
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
GLenum type
virtual bool WillAllocateNewOrRemembered() const
Definition: il.h:7451
static constexpr intptr_t kCalleeSaveCpuRegisters
static constexpr intptr_t kVolatileCpuRegisters
static constexpr intptr_t kFpuArgumentRegisters
static constexpr Register kArg3Reg
static constexpr Register kArg1Reg
static constexpr intptr_t kArgumentRegisters
static constexpr Register kArg2Reg
static constexpr Register kArg4Reg
static constexpr intptr_t kCalleeSaveXmmRegisters
static constexpr intptr_t NumCallbackTrampolinesPerPage()
static constexpr intptr_t kPageMask
static constexpr uword RuntimeFunctionOffset(uword function_index)
static constexpr intptr_t kPageSize
static bool UseUnboxedRepresentation()
Definition: il.h:10864
static intptr_t ActivationFrameAlignment()
static StaticTypeExactnessState HasExactSuperType()
static StaticTypeExactnessState NotExact()
static constexpr intptr_t kMaxInputs
Definition: object.h:7705
static constexpr int CountOneBitsWord(uword x)
Definition: utils.h:176
static Address AddressRIPRelative(int32_t disp)
static Address VMTagAddress()
void GenerateNArgsCheckInlineCacheStub(intptr_t num_args, const RuntimeEntry &handle_ic_miss, Token::Kind kind, Optimized optimized, CallType type, Exactness exactness)
void GenerateUsageCounterIncrement(Register temp_reg)
void GenerateAllocationStubForClass(UnresolvedPcRelativeCalls *unresolved_calls, const Class &cls, const dart::Code &allocate_object, const dart::Code &allocat_object_parametrized)
static intptr_t WordOffsetFromFpToCpuRegister(Register cpu_register)
static word type_arguments_offset()
static const word kMaxNewSpaceElements
Definition: runtime_api.h:595
static bool TraceAllocation(const dart::Class &klass)
Definition: runtime_api.cc:543
static intptr_t NumTypeArguments(const dart::Class &klass)
Definition: runtime_api.cc:530
static uword GetInstanceSize(const dart::Class &handle)
Definition: runtime_api.cc:515
static const word kNoTypeArguments
Definition: runtime_api.h:486
static classid_t GetId(const dart::Class &handle)
Definition: runtime_api.cc:441
static intptr_t TypeArgumentsFieldOffset(const dart::Class &klass)
Definition: runtime_api.cc:539
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word variable_offset(intptr_t index)
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static bool IsAllocatableInNewSpace(intptr_t instance_size)
static word ExactnessIndexFor(word num_args)
Definition: runtime_api.cc:615
static word TestEntryLengthFor(word num_args, bool exactness_check)
Definition: runtime_api.cc:619
static word receivers_static_type_offset()
static word CodeIndexFor(word num_args)
Definition: runtime_api.cc:603
static word TargetIndexFor(word num_args)
Definition: runtime_api.cc:611
static word CountIndexFor(word num_args)
Definition: runtime_api.cc:607
static word EntryPointIndexFor(word num_args)
Definition: runtime_api.cc:623
static word original_top_offset()
static const word kBytesPerCardLog2
Definition: runtime_api.h:1487
static word allocate_mint_without_fpu_regs_stub_offset()
static word allocate_object_slow_entry_point_offset()
static word auto_scope_native_wrapper_entry_point_offset()
static word lazy_deopt_from_throw_stub_offset()
static word active_exception_offset()
static word exit_through_ffi_offset()
static uword exit_through_runtime_call()
Definition: runtime_api.cc:919
static word jump_to_frame_entry_point_offset()
static word new_marking_stack_block_offset()
static word global_object_pool_offset()
static word invoke_dart_code_stub_offset()
static word write_error_shared_without_fpu_regs_stub_offset()
static word no_scope_native_wrapper_entry_point_offset()
static word top_exit_frame_info_offset()
static word range_error_shared_without_fpu_regs_stub_offset()
static word range_error_shared_with_fpu_regs_stub_offset()
static word fix_allocation_stub_code_offset()
static word switchable_call_miss_stub_offset()
static word fix_callers_target_code_offset()
static word store_buffer_block_offset()
static word deoptimize_stub_offset()
static word write_barrier_entry_point_offset()
static word lazy_deopt_from_return_stub_offset()
static word allocate_object_entry_point_offset()
static word switchable_call_miss_entry_offset()
static word active_stacktrace_offset()
static word allocate_mint_with_fpu_regs_stub_offset()
static word bootstrap_native_wrapper_entry_point_offset()
static word write_error_shared_with_fpu_regs_stub_offset()
static word write_barrier_mask_offset()
static word call_to_runtime_stub_offset()
static word execution_state_offset()
static word old_marking_stack_block_offset()
static const word kGenerationalBarrierMask
Definition: runtime_api.h:434
#define UNIMPLEMENTED
#define ASSERT(E)
glong glong end
const uint8_t uint32_t uint32_t GError ** error
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
size_t length
SK_API bool Encode(SkWStream *dst, const SkPixmap &src, const Options &options)
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
Definition: runtime_api.cc:360
static constexpr word kBitsPerWordLog2
Definition: runtime_api.h:290
bool CanLoadFromThread(const dart::Object &object, intptr_t *offset)
Definition: runtime_api.cc:941
word ToRawSmi(const dart::Object &a)
Definition: runtime_api.cc:960
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
static constexpr intptr_t kCompressedWordSize
Definition: runtime_api.h:286
bool SizeFitsInSizeTag(uword instance_size)
Definition: runtime_api.cc:355
static constexpr intptr_t kObjectAlignment
Definition: runtime_api.h:313
FrameLayout frame_layout
Definition: stack_frame.cc:76
word TypedDataMaxNewSpaceElements(classid_t cid)
Definition: runtime_api.cc:255
word TypedDataElementSizeInBytes(classid_t cid)
Definition: runtime_api.cc:251
GrowableArray< UnresolvedPcRelativeCall * > UnresolvedPcRelativeCalls
bool IsSameObject(const Object &a, const Object &b)
Definition: runtime_api.cc:60
const Object & NullObject()
Definition: runtime_api.cc:149
const Code & StubCodeAllocateArray()
Definition: runtime_api.cc:294
const Class & MintClass()
Definition: runtime_api.cc:190
Definition: dart_vm.cc:33
const Register kWriteBarrierSlotReg
@ TIMES_16
@ TIMES_COMPRESSED_HALF_WORD_SIZE
@ TIMES_COMPRESSED_WORD_SIZE
const Register THR
const Register kExceptionObjectReg
const Register kWriteBarrierObjectReg
Thread * DLRT_GetFfiCallbackMetadata(FfiCallbackMetadata::Trampoline trampoline, uword *out_entry_point, uword *out_trampoline_type)
const RegList kAllFpuRegistersList
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition: constants.h:85
static constexpr uword kZapReturnAddress
int32_t classid_t
Definition: globals.h:524
@ kIllegalCid
Definition: class_id.h:214
uintptr_t uword
Definition: globals.h:501
void DLRT_ExitTemporaryIsolate()
intptr_t word
Definition: globals.h:500
const Register CODE_REG
@ OVERFLOW
@ GREATER_EQUAL
@ UNSIGNED_GREATER_EQUAL
@ NOT_ZERO
@ NO_OVERFLOW
@ UNSIGNED_LESS
@ NOT_EQUAL
@ ABOVE_EQUAL
Definition: constants_x86.h:16
@ POSITIVE
Definition: constants_x86.h:33
const Register ARGS_DESC_REG
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
@ kNoRegister
Definition: constants_arm.h:99
const int kNumberOfFpuRegisters
static constexpr bool IsArgumentRegister(Register reg)
Definition: constants.h:77
constexpr RegList kDartAvailableCpuRegs
const Register TMP
const Register FPREG
const intptr_t cid
const intptr_t kStoreBufferWrapperSize
const Register FUNCTION_REG
const Register IC_DATA_REG
static constexpr intptr_t kAllocationRedZoneSize
Definition: page.h:41
const Register PP
static constexpr uword kZapCodeReg
const Register kStackTraceObjectReg
NOT_IN_PRODUCT(LibraryPtr ReloadTestScript(const char *script))
@ kHeapObjectTag
@ kSmiTagMask
@ kSmiTagShift
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
const int kFpuRegisterSize
@ kLazyDeoptFromThrow
@ kLazyDeoptFromReturn
ByteRegister ByteRegisterOf(Register reg)
@ kNumberOfXmmRegisters
def call(args)
Definition: dom.py:159
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
dst
Definition: cp.py:12
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
Definition: update.py:1
SeparatedVector2 offset
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTempReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTagsReg
static constexpr Register kLengthReg
static constexpr Register kResultReg
static constexpr Register kClassIdReg
intptr_t first_local_from_fp
Definition: frame_layout.h:37
static constexpr intptr_t kNewObjectBitPosition
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment
static constexpr Register kLengthReg
static constexpr Register kIndexReg
static constexpr Register kInstanceInstantiatorTypeArgumentsReg
static constexpr Register kInstanceCidOrSignatureReg
static constexpr Register kCacheEntryReg
static constexpr Register kResultReg
static constexpr Register kDstTypeReg
static constexpr Register kSubtypeTestCacheResultReg