11#define SHOULD_NOT_INCLUDE_RUNTIME
15#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
44 __ LoadFromOffset(
TMP,
TMP, target::Page::original_top_offset());
53 rt.Call(kEnsureRememberedAndMarkingDeferredRuntimeEntry,
67void StubCodeCompiler::GenerateCallToRuntimeStub() {
68 const intptr_t thread_offset = target::NativeArguments::thread_offset();
69 const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
70 const intptr_t argv_offset = target::NativeArguments::argv_offset();
71 const intptr_t retval_offset = target::NativeArguments::retval_offset();
73 __ Comment(
"CallToRuntimeStub");
74 __ lx(
CODE_REG, Address(
THR, target::Thread::call_to_runtime_stub_offset()));
75 __ SetPrologueOffset();
80 __ StoreToOffset(
FP,
THR, target::Thread::top_exit_frame_info_offset());
83 __ LoadImmediate(
TMP, target::Thread::exit_through_runtime_call());
84 __ StoreToOffset(
TMP,
THR, target::Thread::exit_through_ffi_offset());
90 __ LoadFromOffset(
TMP,
THR, target::Thread::vm_tag_offset());
91 __ CompareImmediate(
TMP, VMTag::kDartTagId);
93 __ Stop(
"Not coming from Dart code.");
99 __ StoreToOffset(
T5,
THR, target::Thread::vm_tag_offset());
103 __ Comment(
"align stack");
105 ASSERT(target::NativeArguments::StructSize() == 4 * target::kWordSize);
106 __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize());
111 ASSERT(thread_offset == 0 * target::kWordSize);
112 ASSERT(argc_tag_offset == 1 * target::kWordSize);
113 ASSERT(argv_offset == 2 * target::kWordSize);
114 __ slli(
T2,
T4, target::kWordSizeLog2);
118 target::frame_layout.param_end_from_fp * target::kWordSize);
120 ASSERT(retval_offset == 3 * target::kWordSize);
121 __ AddImmediate(
T3,
T2, target::kWordSize);
123 __ StoreToOffset(
THR,
SP, thread_offset);
124 __ StoreToOffset(
T4,
SP, argc_tag_offset);
125 __ StoreToOffset(
T2,
SP, argv_offset);
126 __ StoreToOffset(
T3,
SP, retval_offset);
131 __ Comment(
"CallToRuntimeStub return");
134 __ RestorePinnedRegisters();
138 __ LoadImmediate(
TMP, VMTag::kDartTagId);
139 __ StoreToOffset(
TMP,
THR, target::Thread::vm_tag_offset());
142 __ StoreToOffset(
ZR,
THR, target::Thread::exit_through_ffi_offset());
145 __ StoreToOffset(
ZR,
THR, target::Thread::top_exit_frame_info_offset());
149 if (FLAG_precompiled_mode) {
150 __ SetupGlobalPoolAndDispatchTable();
160 __ LoadImmediate(
A0, 0);
164void StubCodeCompiler::GenerateSharedStubGeneric(
165 bool save_fpu_registers,
166 intptr_t self_code_stub_offset_from_thread,
168 std::function<
void()> perform_runtime_call) {
171 RegisterSet all_registers;
172 all_registers.AddAllNonReservedRegisters(save_fpu_registers);
177 __ PushRegisters(all_registers);
178 __ lx(
CODE_REG, Address(
THR, self_code_stub_offset_from_thread));
180 perform_runtime_call();
186 __ PopRegisters(all_registers);
191void StubCodeCompiler::GenerateSharedStub(
192 bool save_fpu_registers,
193 const RuntimeEntry*
target,
194 intptr_t self_code_stub_offset_from_thread,
196 bool store_runtime_result_in_result_register) {
197 ASSERT(!store_runtime_result_in_result_register || allow_return);
198 auto perform_runtime_call = [&]() {
199 if (store_runtime_result_in_result_register) {
203 if (store_runtime_result_in_result_register) {
205 __ sx(
A0, Address(
FP, target::kWordSize *
210 GenerateSharedStubGeneric(save_fpu_registers,
211 self_code_stub_offset_from_thread, allow_return,
212 perform_runtime_call);
215void StubCodeCompiler::GenerateEnterSafepointStub() {
216 RegisterSet all_registers;
217 all_registers.AddAllGeneralRegisters();
219 __ PushRegisters(all_registers);
222 __ ReserveAlignedFrameSpace(0);
224 __ lx(
TMP, Address(
THR, kEnterSafepointRuntimeEntry.OffsetFromThread()));
228 __ PopRegisters(all_registers);
232static void GenerateExitSafepointStubCommon(Assembler* assembler,
233 uword runtime_entry_offset) {
234 RegisterSet all_registers;
235 all_registers.AddAllGeneralRegisters();
237 __ PushRegisters(all_registers);
240 __ ReserveAlignedFrameSpace(0);
245 __ LoadImmediate(
TMP, target::Thread::vm_execution_state());
246 __ sx(
TMP, Address(
THR, target::Thread::execution_state_offset()));
248 __ lx(
TMP, Address(
THR, runtime_entry_offset));
252 __ PopRegisters(all_registers);
256void StubCodeCompiler::GenerateExitSafepointStub() {
257 GenerateExitSafepointStubCommon(
258 assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
261void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
262 GenerateExitSafepointStubCommon(
264 kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
276void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
279 __ LoadImmediate(
T1, target::Thread::exit_through_ffi());
294 __ TransitionNativeToGenerated(
T1,
true);
301 compiler::Label skip_reloc;
303 InsertBSSRelocation(relocation);
304 __ Bind(&skip_reloc);
307 __ addi(tmp, tmp, -compiler::target::kWordSize);
310 __ lx(dst, compiler::Address(tmp));
314 __ add(tmp, tmp, dst);
318 __ lx(dst, compiler::Address(tmp));
321void StubCodeCompiler::GenerateLoadFfiCallbackMetadataRuntimeFunction(
322 uword function_index,
328 const intptr_t code_size =
__ CodeSize();
330 __ AddImmediate(dst, -code_size);
336 __ LoadFromOffset(dst, dst,
340void StubCodeCompiler::GenerateFfiCallbackTrampolineStub() {
341#if defined(USING_SIMULATOR) && !defined(DART_PRECOMPILER)
358 FfiCallbackMetadata::kNativeCallbackTrampolineSize *
361 const intptr_t shared_stub_start =
__ CodeSize();
366 COMPILE_ASSERT(FfiCallbackMetadata::kNativeCallbackTrampolineStackDelta == 2);
376 __ subi(
SP,
SP, 9 * target::kWordSize);
377 __ sx(
T1, Address(
SP, 8 * target::kWordSize));
378 __ sx(
A7, Address(
SP, 7 * target::kWordSize));
379 __ sx(
A6, Address(
SP, 6 * target::kWordSize));
380 __ sx(
A5, Address(
SP, 5 * target::kWordSize));
381 __ sx(
A4, Address(
SP, 4 * target::kWordSize));
382 __ sx(
A3, Address(
SP, 3 * target::kWordSize));
383 __ sx(
A2, Address(
SP, 2 * target::kWordSize));
384 __ sx(
A1, Address(
SP, 1 * target::kWordSize));
385 __ sx(
A0, Address(
SP, 0 * target::kWordSize));
389 __ ReserveAlignedFrameSpace(2 * target::kWordSize);
399#if defined(DART_TARGET_OS_FUCHSIA)
401 if (FLAG_precompiled_mode) {
404 const intptr_t kPCRelativeLoadOffset = 12;
405 intptr_t
start =
__ CodeSize();
407 __ lx(
T1, Address(
T1, kPCRelativeLoadOffset));
418 GenerateLoadFfiCallbackMetadataRuntimeFunction(
426 __ lx(
T3, Address(
SPREG, target::kWordSize));
431 __ lx(
A0, Address(
SP, 0 * target::kWordSize));
432 __ lx(
A1, Address(
SP, 1 * target::kWordSize));
433 __ lx(
A2, Address(
SP, 2 * target::kWordSize));
434 __ lx(
A3, Address(
SP, 3 * target::kWordSize));
435 __ lx(
A4, Address(
SP, 4 * target::kWordSize));
436 __ lx(
A5, Address(
SP, 5 * target::kWordSize));
437 __ lx(
A6, Address(
SP, 6 * target::kWordSize));
438 __ lx(
A7, Address(
SP, 7 * target::kWordSize));
439 __ lx(
T1, Address(
SP, 8 * target::kWordSize));
440 __ addi(
SP,
SP, 9 * target::kWordSize);
466 __ EnterFullSafepoint(
T1);
481 __ ReserveAlignedFrameSpace(0);
485#if defined(DART_TARGET_OS_FUCHSIA)
487 if (FLAG_precompiled_mode) {
490 const intptr_t kPCRelativeLoadOffset = 12;
491 intptr_t
start =
__ CodeSize();
493 __ lx(
T1, Address(
T1, kPCRelativeLoadOffset));
504 GenerateLoadFfiCallbackMetadataRuntimeFunction(
519 FfiCallbackMetadata::kNativeCallbackSharedStubSize);
530void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
534 __ CallRuntime(kDispatchTableNullErrorRuntimeEntry, 1);
539void StubCodeCompiler::GenerateRangeError(
bool with_fpu_regs) {
540 auto perform_runtime_call = [&]() {
557 __ CallRuntime(kAllocateMintRuntimeEntry, 0);
560 Address(
FP, target::kWordSize *
564 target::Mint::value_offset()));
566 Address(
FP, target::kWordSize *
577 __ PushRegistersInOrder(
579 __ CallRuntime(kRangeErrorRuntimeEntry, 2);
583 GenerateSharedStubGeneric(
586 ? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
587 : target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
588 false, perform_runtime_call);
591void StubCodeCompiler::GenerateWriteError(
bool with_fpu_regs) {
592 auto perform_runtime_call = [&]() {
593 __ CallRuntime(kWriteErrorRuntimeEntry, 2);
597 GenerateSharedStubGeneric(
600 ? target::Thread::write_error_shared_with_fpu_regs_stub_offset()
601 : target::Thread::write_error_shared_without_fpu_regs_stub_offset(),
602 false, perform_runtime_call);
611static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
613 const intptr_t thread_offset = target::NativeArguments::thread_offset();
614 const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
615 const intptr_t argv_offset = target::NativeArguments::argv_offset();
616 const intptr_t retval_offset = target::NativeArguments::retval_offset();
622 __ StoreToOffset(
FP,
THR, target::Thread::top_exit_frame_info_offset());
625 __ LoadImmediate(
TMP, target::Thread::exit_through_runtime_call());
626 __ StoreToOffset(
TMP,
THR, target::Thread::exit_through_ffi_offset());
632 __ LoadFromOffset(
TMP,
THR, target::Thread::vm_tag_offset());
633 __ CompareImmediate(
TMP, VMTag::kDartTagId);
635 __ Stop(
"Not coming from Dart code.");
641 __ StoreToOffset(
T5,
THR, target::Thread::vm_tag_offset());
646 __ ReserveAlignedFrameSpace(target::NativeArguments::StructSize());
649 ASSERT(thread_offset == 0 * target::kWordSize);
650 ASSERT(argc_tag_offset == 1 * target::kWordSize);
652 ASSERT(argv_offset == 2 * target::kWordSize);
655 ASSERT(retval_offset == 3 * target::kWordSize);
657 T3,
FP, (target::frame_layout.param_end_from_fp + 1) * target::kWordSize);
662 __ StoreToOffset(
THR,
SP, thread_offset);
663 __ StoreToOffset(
T1,
SP, argc_tag_offset);
664 __ StoreToOffset(
T2,
SP, argv_offset);
665 __ StoreToOffset(
T3,
SP, retval_offset);
674 __ RestorePinnedRegisters();
677 __ LoadImmediate(
TMP, VMTag::kDartTagId);
678 __ StoreToOffset(
TMP,
THR, target::Thread::vm_tag_offset());
681 __ StoreToOffset(
ZR,
THR, target::Thread::exit_through_ffi_offset());
684 __ StoreToOffset(
ZR,
THR, target::Thread::top_exit_frame_info_offset());
688 if (FLAG_precompiled_mode) {
689 __ SetupGlobalPoolAndDispatchTable();
696void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
697 GenerateCallNativeWithWrapperStub(
700 target::Thread::no_scope_native_wrapper_entry_point_offset()));
703void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
704 GenerateCallNativeWithWrapperStub(
707 target::Thread::auto_scope_native_wrapper_entry_point_offset()));
716void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
717 GenerateCallNativeWithWrapperStub(
720 target::Thread::bootstrap_native_wrapper_entry_point_offset()));
725void StubCodeCompiler::GenerateCallStaticFunctionStub() {
729 __ subi(
SP,
SP, 2 * target::kWordSize);
731 Address(
SP, 1 * target::kWordSize));
732 __ sx(
ZR, Address(
SP, 0 * target::kWordSize));
733 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
736 Address(
SP, 1 * target::kWordSize));
737 __ addi(
SP,
SP, 2 * target::kWordSize);
740 __ LoadFieldFromOffset(
TMP,
CODE_REG, target::Code::entry_point_offset());
747void StubCodeCompiler::GenerateFixCallersTargetStub() {
749 __ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
755 Address(
THR, target::Thread::fix_callers_target_code_offset()));
761 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
768 __ LoadFieldFromOffset(
TMP,
CODE_REG, target::Code::entry_point_offset());
771 __ Bind(&monomorphic);
776 Address(
THR, target::Thread::fix_callers_target_code_offset()));
782 __ PushRegistersInOrder({
ZR,
A0,
S5});
783 __ CallRuntime(kFixCallersTargetMonomorphicRuntimeEntry, 2);
790 __ LoadFieldFromOffset(
798void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
803 Address(
THR, target::Thread::fix_allocation_stub_code_offset()));
807 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
813 __ LoadFieldFromOffset(
TMP,
CODE_REG, target::Code::entry_point_offset());
819void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
824 Address(
THR, target::Thread::fix_allocation_stub_code_offset()));
830 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
838 __ LoadFieldFromOffset(
TMP,
CODE_REG, target::Code::entry_point_offset());
845static void PushArrayOfArguments(Assembler* assembler) {
858 __ slli(
T1,
T2, target::kWordSizeLog2);
861 target::frame_layout.param_end_from_fp * target::kWordSize);
866 Label loop, loop_exit;
868 __ beqz(
T2, &loop_exit);
869 __ lx(
T6, Address(
T1, 0));
870 __ addi(
T1,
T1, -target::kWordSize);
871 __ StoreCompressedIntoObject(
A0, Address(
T3, 0),
T6);
872 __ addi(
T3,
T3, target::kCompressedWordSize);
904static void GenerateDeoptimizationSequence(Assembler* assembler,
912 const intptr_t saved_result_slot_from_fp =
913 target::frame_layout.first_local_from_fp + 1 -
915 const intptr_t saved_exception_slot_from_fp =
916 target::frame_layout.first_local_from_fp + 1 -
918 const intptr_t saved_stacktrace_slot_from_fp =
919 target::frame_layout.first_local_from_fp + 1 -
932 __ lx(
TMP, Address(
FP, 0 * target::kWordSize));
933 __ sx(
TMP, Address(
SP, i * target::kWordSize));
935 __ sx(r, Address(
SP, i * target::kWordSize));
947 LeafRuntimeScope rt(assembler,
952 __ li(
A1, is_lazy ? 1 : 0);
953 rt.Call(kDeoptimizeCopyFrameRuntimeEntry, 2);
959 __ LoadFromOffset(
T1,
FP, saved_result_slot_from_fp * target::kWordSize);
962 __ LoadFromOffset(
T1,
FP, saved_exception_slot_from_fp * target::kWordSize);
964 saved_stacktrace_slot_from_fp * target::kWordSize);
968 __ RestoreCodePointer();
981 __ PushRegistersInOrder({
T1,
T2});
985 LeafRuntimeScope rt(assembler,
988 rt.Call(kDeoptimizeFillFrameRuntimeEntry, 1);
993 T1,
FP, target::frame_layout.first_local_from_fp * target::kWordSize);
997 T1,
FP, target::frame_layout.first_local_from_fp * target::kWordSize);
1000 (target::frame_layout.first_local_from_fp - 1) * target::kWordSize);
1004 __ RestoreCodePointer();
1005 __ LeaveStubFrame();
1011 __ EnterStubFrame();
1013 __ PushRegister(
T1);
1019 __ PushRegistersInOrder({
T1,
T2});
1022 __ PushRegister(
ZR);
1023 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0);
1035 __ LeaveStubFrame();
1044 __ EnterStubFrame();
1045 __ PushRegister(
ZR);
1046 __ PushRegister(
A0);
1047 __ PushRegister(
A1);
1049 __ CallRuntime(kReThrowRuntimeEntry, 3);
1050 __ LeaveStubFrame();
1055void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
1058 __ PushRegister(
TMP);
1062 Address(
THR, target::Thread::lazy_deopt_from_return_stub_offset()));
1069void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
1072 __ PushRegister(
TMP);
1076 Address(
THR, target::Thread::lazy_deopt_from_throw_stub_offset()));
1081void StubCodeCompiler::GenerateDeoptimizeStub() {
1083 __ lx(
CODE_REG, Address(
THR, target::Thread::deoptimize_stub_offset()));
1089static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler) {
1090 __ EnterStubFrame();
1094 target::CallSiteData::arguments_descriptor_offset()));
1097 __ LoadCompressedSmiFieldFromOffset(
1099 __ AddShifted(
TMP,
FP,
T2, target::kWordSizeLog2 - 1);
1101 target::frame_layout.param_end_from_fp * target::kWordSize);
1107 __ LoadCompressedSmiFieldFromOffset(
1108 T3,
ARGS_DESC_REG, target::ArgumentsDescriptor::type_args_len_offset());
1109 Label args_count_ok;
1113 __ Bind(&args_count_ok);
1116 PushArrayOfArguments(assembler);
1117 const intptr_t kNumArgs = 4;
1118 __ CallRuntime(kNoSuchMethodFromCallStubRuntimeEntry, kNumArgs);
1121 __ LeaveStubFrame();
1125static void GenerateDispatcherCode(Assembler* assembler,
1126 Label* call_target_function) {
1127 __ Comment(
"NoSuchMethodDispatch");
1132 GenerateNoSuchMethodDispatcherBody(assembler);
1138void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
1139 GenerateNoSuchMethodDispatcherBody(
assembler);
1151void StubCodeCompiler::GenerateAllocateArrayStub() {
1152 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1162 const intptr_t max_len =
1165 __ BranchIf(
HI, &slow_case);
1167 const intptr_t
cid = kArrayCid;
1175 Address(
THR, target::Thread::top_offset()));
1176 intptr_t fixed_size_plus_alignment_padding =
1177 target::Array::header_size() +
1182 __ AddImmediate(
T3, fixed_size_plus_alignment_padding);
1196 __ LoadFromOffset(
TMP,
THR, target::Thread::end_offset());
1205 __ sx(
T4, Address(
THR, target::Thread::top_offset()));
1216 __ StoreCompressedIntoObjectOffsetNoBarrier(
1222 target::Array::length_offset(),
1230 const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
1233 __ CompareImmediate(
T3, target::UntaggedObject::kSizeTagMaxSizeTag);
1234 compiler::Label zero_tag;
1243 __ OrImmediate(
T5,
T5, tags);
1245 target::Array::tags_offset());
1257 for (intptr_t
offset = 0;
offset < target::kObjectAlignment;
1258 offset += target::kCompressedWordSize) {
1264 __ addi(
T3,
T3, target::kObjectAlignment);
1266 __ WriteAllocationCanary(
T4);
1275 __ Bind(&slow_case);
1280 __ EnterStubFrame();
1281 __ subi(
SP,
SP, 3 * target::kWordSize);
1282 __ sx(
ZR, Address(
SP, 2 * target::kWordSize));
1285 Address(
SP, 0 * target::kWordSize));
1286 __ CallRuntime(kAllocateArrayRuntimeEntry, 2);
1296 Address(
SP, 0 * target::kWordSize));
1299 __ addi(
SP,
SP, 3 * target::kWordSize);
1300 __ LeaveStubFrame();
1304void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
1306 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1312 __ Bind(&slow_case);
1316 GenerateSharedStub(
true, &kAllocateMintRuntimeEntry,
1317 target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
1322void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
1324 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1330 __ Bind(&slow_case);
1335 false, &kAllocateMintRuntimeEntry,
1336 target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
1349void StubCodeCompiler::GenerateInvokeDartCodeStub() {
1350 __ Comment(
"InvokeDartCodeStub");
1352 __ EnterFrame(1 * target::kWordSize);
1355 __ lx(
TMP2, Address(
A3, target::Thread::invoke_dart_code_stub_offset()));
1356 __ sx(
TMP2, Address(
SP, 0 * target::kWordSize));
1358#if defined(DART_TARGET_OS_FUCHSIA) || defined(DART_TARGET_OS_ANDROID)
1359 __ sx(
GP, Address(
A3, target::Thread::saved_shadow_call_stack_offset()));
1360#elif defined(USING_SHADOW_CALL_STACK)
1366 __ PushNativeCalleeSavedRegisters();
1374 __ RestorePinnedRegisters();
1378 __ subi(
SP,
SP, 4 * target::kWordSize);
1379 __ lx(
TMP, Address(
THR, target::Thread::vm_tag_offset()));
1380 __ sx(
TMP, Address(
SP, 3 * target::kWordSize));
1381 __ lx(
TMP, Address(
THR, target::Thread::top_resource_offset()));
1382 __ sx(
ZR, Address(
THR, target::Thread::top_resource_offset()));
1383 __ sx(
TMP, Address(
SP, 2 * target::kWordSize));
1384 __ lx(
TMP, Address(
THR, target::Thread::exit_through_ffi_offset()));
1385 __ sx(
ZR, Address(
THR, target::Thread::exit_through_ffi_offset()));
1386 __ sx(
TMP, Address(
SP, 1 * target::kWordSize));
1387 __ lx(
TMP, Address(
THR, target::Thread::top_exit_frame_info_offset()));
1388 __ sx(
ZR, Address(
THR, target::Thread::top_exit_frame_info_offset()));
1389 __ sx(
TMP, Address(
SP, 0 * target::kWordSize));
1393 ASSERT_EQUAL(target::frame_layout.exit_link_slot_from_entry_fp, -42);
1395 ASSERT_EQUAL(target::frame_layout.exit_link_slot_from_entry_fp, -30);
1399 __ EmitEntryFrameVerification();
1403 __ LoadImmediate(
TMP, VMTag::kDartTagId);
1404 __ StoreToOffset(
TMP,
THR, target::Thread::vm_tag_offset());
1411 target::ArgumentsDescriptor::count_offset());
1413 target::ArgumentsDescriptor::type_args_len_offset());
1423 Label push_arguments;
1424 Label done_push_arguments;
1425 __ beqz(
T5, &done_push_arguments);
1426 __ LoadImmediate(
T2, 0);
1427 __ Bind(&push_arguments);
1428 __ lx(
T3, Address(
A2, 0));
1429 __ PushRegister(
T3);
1431 __ addi(
A2,
A2, target::kWordSize);
1433 __ Bind(&done_push_arguments);
1435 if (FLAG_precompiled_mode) {
1436 __ SetupGlobalPoolAndDispatchTable();
1443 __ lx(
A0, FieldAddress(
CODE_REG, target::Code::entry_point_offset()));
1448 __ Comment(
"InvokeDartCodeStub return");
1453 target::frame_layout.exit_link_slot_from_entry_fp * target::kWordSize);
1457 __ lx(
TMP, Address(
SP, 0 * target::kWordSize));
1458 __ sx(
TMP, Address(
THR, target::Thread::top_exit_frame_info_offset()));
1459 __ lx(
TMP, Address(
SP, 1 * target::kWordSize));
1460 __ sx(
TMP, Address(
THR, target::Thread::exit_through_ffi_offset()));
1461 __ lx(
TMP, Address(
SP, 2 * target::kWordSize));
1462 __ sx(
TMP, Address(
THR, target::Thread::top_resource_offset()));
1463 __ lx(
TMP, Address(
SP, 3 * target::kWordSize));
1464 __ sx(
TMP, Address(
THR, target::Thread::vm_tag_offset()));
1465 __ addi(
SP,
SP, 4 * target::kWordSize);
1467 __ PopNativeCalleeSavedRegisters();
1482static void GenerateAllocateContextSpaceStub(Assembler* assembler,
1486 intptr_t fixed_size_plus_alignment_padding =
1487 target::Context::header_size() +
1490 __ AddImmediate(
T2, fixed_size_plus_alignment_padding);
1497 __ lx(
A0, Address(
THR, target::Thread::top_offset()));
1504 __ lx(
TMP, Address(
THR, target::Thread::end_offset()));
1506 __ BranchIf(
CS, slow_case);
1507 __ CheckAllocationCanary(
A0);
1515 __ sx(
T3, Address(
THR, target::Thread::top_offset()));
1522 const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
1525 __ CompareImmediate(
T2, target::UntaggedObject::kSizeTagMaxSizeTag);
1527 compiler::Label zero_tag;
1528 __ BranchIf(
HI, &zero_tag);
1537 __ OrImmediate(
T3,
T3, tags);
1538 __ StoreFieldToOffset(
T3,
A0, target::Object::tags_offset());
1543 __ StoreFieldToOffset(
T1,
A0, target::Context::num_variables_offset(),
1552void StubCodeCompiler::GenerateAllocateContextStub() {
1553 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1556 GenerateAllocateContextSpaceStub(
assembler, &slow_case);
1561 __ StoreCompressedIntoObjectOffset(
A0, target::Context::parent_offset(),
1575 __ addi(
T3,
T3, target::kCompressedWordSize);
1584 __ Bind(&slow_case);
1589 __ EnterStubFrame();
1593 __ PushRegister(
T1);
1594 __ CallRuntime(kAllocateContextRuntimeEntry, 1);
1605 __ LeaveStubFrame();
1614void StubCodeCompiler::GenerateCloneContextStub() {
1615 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1619 __ lw(
T1, FieldAddress(
T5, target::Context::num_variables_offset()));
1621 GenerateAllocateContextSpaceStub(
assembler, &slow_case);
1624 __ LoadCompressed(
T3, FieldAddress(
T5, target::Context::parent_offset()));
1627 __ StoreCompressedIntoObjectNoBarrier(
1628 A0, FieldAddress(
A0, target::Context::parent_offset()),
T3);
1645 __ lx(
T5, Address(
T4, 0));
1646 __ addi(
T4,
T4, target::kCompressedWordSize);
1647 __ sx(
T5, Address(
T3, 0));
1648 __ addi(
T3,
T3, target::kCompressedWordSize);
1658 __ Bind(&slow_case);
1661 __ EnterStubFrame();
1663 __ subi(
SP,
SP, 2 * target::kWordSize);
1665 __ sx(
T5, Address(
SP, 0 * target::kWordSize));
1666 __ CallRuntime(kCloneContextRuntimeEntry, 1);
1667 __ lx(
A0, Address(
SP, 1 * target::kWordSize));
1668 __ subi(
SP,
SP, 2 * target::kWordSize);
1676 __ LeaveStubFrame();
1680void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
1685 intptr_t
start =
__ CodeSize();
1686 __ addi(
SP,
SP, -3 * target::kWordSize);
1687 __ sx(
RA, Address(
SP, 2 * target::kWordSize));
1688 __ sx(
TMP, Address(
SP, 1 * target::kWordSize));
1691 __ Call(Address(
THR, target::Thread::write_barrier_entry_point_offset()));
1693 __ lx(
TMP, Address(
SP, 1 * target::kWordSize));
1694 __ lx(
RA, Address(
SP, 2 * target::kWordSize));
1695 __ addi(
SP,
SP, 3 * target::kWordSize);
1697 intptr_t
end =
__ CodeSize();
1712static void GenerateWriteBarrierStubHelper(Assembler* assembler,
bool cards) {
1713 RegisterSet spill_set((1 <<
T2) | (1 <<
T3) | (1 <<
T4), 0);
1716 __ lbu(
TMP, FieldAddress(
A1, target::Object::tags_offset()));
1717 __ lbu(
TMP2, Address(
THR, target::Thread::write_barrier_mask_offset()));
1719 __ andi(
TMP,
TMP, target::UntaggedObject::kIncrementalBarrierMask);
1720 __ beqz(
TMP, &skip_marking);
1725 __ PushRegisters(spill_set);
1728 __ li(
TMP2, ~(1 << target::UntaggedObject::kNotMarkedBit));
1734 __ andi(
TMP2,
TMP2, 1 << target::UntaggedObject::kNotMarkedBit);
1737 __ lx(
T4, Address(
THR, target::Thread::marking_stack_block_offset()));
1739 __ slli(
T3,
T2, target::kWordSizeLog2);
1748 LeafRuntimeScope rt(assembler, 0,
1751 rt.Call(kMarkingStackBlockProcessRuntimeEntry, 1);
1755 __ PopRegisters(spill_set);
1758 Label add_to_remembered_set, remember_card;
1759 __ Bind(&skip_marking);
1760 __ lbu(
TMP, FieldAddress(
A0, target::Object::tags_offset()));
1761 __ lbu(
TMP2, FieldAddress(
A1, target::Object::tags_offset()));
1762 __ srli(
TMP,
TMP, target::UntaggedObject::kBarrierOverlapShift);
1764 __ andi(
TMP,
TMP, target::UntaggedObject::kGenerationalBarrierMask);
1765 __ bnez(
TMP, &add_to_remembered_set);
1768 __ Bind(&add_to_remembered_set);
1770 __ lbu(
TMP2, FieldAddress(
A0, target::Object::tags_offset()));
1771 __ andi(
TMP2,
TMP2, 1 << target::UntaggedObject::kCardRememberedBit);
1772 __ bnez(
TMP2, &remember_card);
1776 __ lbu(
TMP2, FieldAddress(
A0, target::Object::tags_offset()));
1777 __ andi(
TMP2,
TMP2, 1 << target::UntaggedObject::kCardRememberedBit);
1779 __ Stop(
"Wrong barrier!");
1786 __ PushRegisters(spill_set);
1789 __ li(
TMP2, ~(1 << target::UntaggedObject::kOldAndNotRememberedBit));
1795 __ andi(
TMP2,
TMP2, 1 << target::UntaggedObject::kOldAndNotRememberedBit);
1798 __ lx(
T4, Address(
THR, target::Thread::store_buffer_block_offset()));
1800 __ slli(
T3,
T2, target::kWordSizeLog2);
1809 LeafRuntimeScope rt(assembler, 0,
1812 rt.Call(kStoreBufferBlockProcessRuntimeEntry, 1);
1816 __ PopRegisters(spill_set);
1820 Label remember_card_slow;
1823 __ Bind(&remember_card);
1826 Address(
TMP, target::Page::card_table_offset()));
1827 __ beqz(
TMP2, &remember_card_slow);
1832 __ srli(
A6,
A6, target::Page::kBytesPerCardLog2);
1835 __ srli(
A6,
A6, target::kBitsPerWordLog2);
1836 __ slli(
A6,
A6, target::kWordSizeLog2);
1844 __ Bind(&remember_card_slow);
1846 LeafRuntimeScope rt(assembler, 0,
1850 rt.Call(kRememberCardRuntimeEntry, 2);
1856void StubCodeCompiler::GenerateWriteBarrierStub() {
1857 GenerateWriteBarrierStubHelper(
assembler,
false);
1860void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
1861 GenerateWriteBarrierStubHelper(
assembler,
true);
1864static void GenerateAllocateObjectHelper(Assembler* assembler,
1865 bool is_cls_parameterized) {
1871#if !defined(PRODUCT)
1875 __ MaybeTraceAllocation(kCidRegister, &slow_case,
TMP);
1886 __ ExtractInstanceSizeFromTags(kInstanceSizeReg, kTagsReg);
1891 Address(
THR, target::Thread::top_offset()));
1892 __ lx(kEndReg, Address(
THR, target::Thread::end_offset()));
1896 __ CompareRegisters(kEndReg, kNewTopReg);
1902 __ sx(kNewTopReg, Address(
THR, target::Thread::top_offset()));
1907 target::Object::tags_offset()));
1914 target::Instance::first_field_offset());
1917 for (intptr_t
offset = 0;
offset < target::kObjectAlignment;
1918 offset += target::kCompressedWordSize) {
1920 Address(kFieldReg,
offset),
1925 __ addi(kFieldReg, kFieldReg, target::kObjectAlignment);
1926 __ bltu(kFieldReg, kNewTopReg, &loop);
1927 __ WriteAllocationCanary(kNewTopReg);
1930 if (is_cls_parameterized) {
1931 Label not_parameterized_case;
1936 __ ExtractClassIdFromTags(kClsIdReg, kTagsReg);
1939 __ LoadClassById(kTypeOffsetReg, kClsIdReg);
1942 FieldAddress(kTypeOffsetReg,
1944 host_type_arguments_field_offset_in_words_offset()));
1947 __ slli(kTypeOffsetReg, kTypeOffsetReg, target::kWordSizeLog2);
1951 __ Bind(¬_parameterized_case);
1959 __ Bind(&slow_case);
1963 if (!is_cls_parameterized) {
1969 Address(
THR, target::Thread::allocate_object_slow_entry_point_offset()));
1974void StubCodeCompiler::GenerateAllocateObjectStub() {
1975 GenerateAllocateObjectHelper(
assembler,
false);
1978void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
1979 GenerateAllocateObjectHelper(
assembler,
true);
1982void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
1983 if (!FLAG_precompiled_mode) {
1985 Address(
THR, target::Thread::call_to_runtime_stub_offset()));
1990 __ EnterStubFrame();
1996 __ subi(
SP,
SP, 3 * target::kWordSize);
1997 __ sx(
ZR, Address(
SP, 2 * target::kWordSize));
1998 __ sx(
A0, Address(
SP, 1 * target::kWordSize));
2000 Address(
SP, 0 * target::kWordSize));
2001 __ CallRuntime(kAllocateObjectRuntimeEntry, 2);
2003 __ addi(
SP,
SP, 3 * target::kWordSize);
2009 __ LeaveStubFrame();
2018 const Code& allocate_object,
2019 const Code& allocat_object_parametrized) {
2020 classid_t cls_id = target::Class::GetId(cls);
2024 const bool is_cls_parameterized = target::Class::NumTypeArguments(cls) > 0;
2025 ASSERT(!is_cls_parameterized || target::Class::TypeArgumentsFieldOffset(
2026 cls) != target::Class::kNoTypeArguments);
2028 const intptr_t instance_size = target::Class::GetInstanceSize(cls);
2029 ASSERT(instance_size > 0);
2038 __ LoadImmediate(kTagsReg, tags);
2040 if (!FLAG_use_slow_path && FLAG_inline_alloc &&
2041 !target::Class::TraceAllocation(cls) &&
2044 RELEASE_ASSERT(target::Heap::IsAllocatableInNewSpace(instance_size));
2045 if (is_cls_parameterized) {
2047 CastHandle<Object>(allocat_object_parametrized))) {
2048 __ GenerateUnRelocatedPcRelativeTailCall();
2049 unresolved_calls->Add(
new UnresolvedPcRelativeCall(
2050 __ CodeSize(), allocat_object_parametrized,
true));
2055 allocate_object_parameterized_entry_point_offset()));
2060 __ GenerateUnRelocatedPcRelativeTailCall();
2061 unresolved_calls->Add(
new UnresolvedPcRelativeCall(
2062 __ CodeSize(), allocate_object,
true));
2066 Address(
THR, target::Thread::allocate_object_entry_point_offset()));
2071 if (!is_cls_parameterized) {
2076 target::Thread::allocate_object_slow_entry_point_offset()));
2088void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
2089 __ EnterStubFrame();
2092 __ LoadCompressedSmiFieldFromOffset(
2093 T2,
S4, target::ArgumentsDescriptor::size_offset());
2094 __ AddShifted(
TMP,
FP,
T2, target::kWordSizeLog2 - 1);
2096 target::frame_layout.param_end_from_fp * target::kWordSize);
2099 __ LoadCompressedFieldFromOffset(
TMP,
A0, target::Closure::function_offset());
2105 __ LoadCompressedSmiFieldFromOffset(
2106 T3,
S4, target::ArgumentsDescriptor::type_args_len_offset());
2107 Label args_count_ok;
2111 __ Bind(&args_count_ok);
2116 const intptr_t kNumArgs = 4;
2117 __ CallRuntime(kNoSuchMethodFromPrologueRuntimeEntry, kNumArgs);
2127 if (FLAG_precompiled_mode) {
2131 if (FLAG_trace_optimized_ic_calls) {
2132 __ Stop(
"Unimplemented");
2134 __ LoadFieldFromOffset(
TMP,
A6, target::Function::usage_counter_offset(),
2137 __ StoreFieldToOffset(
TMP,
A6, target::Function::usage_counter_offset(),
2143 if (FLAG_precompiled_mode) {
2147 if (FLAG_optimization_counter_threshold >= 0) {
2148 __ Comment(
"Increment function counter");
2150 target::ICData::owner_offset());
2151 __ LoadFieldFromOffset(
2152 A1, func_reg, target::Function::usage_counter_offset(),
kFourBytes);
2153 __ AddImmediate(
A1, 1);
2154 __ StoreFieldToOffset(
A1, func_reg,
2155 target::Function::usage_counter_offset(),
kFourBytes);
2163static void EmitFastSmiOp(Assembler* assembler,
2166 Label* not_smi_or_overflow) {
2167 __ Comment(
"Fast Smi op");
2168 __ lx(
A0, Address(
SP, +1 * target::kWordSize));
2169 __ lx(
A1, Address(
SP, +0 * target::kWordSize));
2172 __ bnez(
TMP2, not_smi_or_overflow);
2175 __ AddBranchOverflow(
A0,
A0,
A1, not_smi_or_overflow);
2180 Label load_true,
done;
2184 __ Bind(&load_true);
2191 Label load_true,
done;
2195 __ Bind(&load_true);
2205 __ LoadFieldFromOffset(
A6,
IC_DATA_REG, target::ICData::entries_offset());
2213 __ LoadCompressedSmiFromOffset(
TMP,
A6, 0);
2214 __ CompareImmediate(
TMP, imm_smi_cid);
2216 __ LoadCompressedSmiFromOffset(
TMP,
A6, target::kCompressedWordSize);
2217 __ CompareImmediate(
TMP, imm_smi_cid);
2220 __ Stop(
"Incorrect IC data");
2223 if (FLAG_optimization_counter_threshold >= 0) {
2224 const intptr_t count_offset =
2225 target::ICData::CountIndexFor(num_args) * target::kCompressedWordSize;
2227 __ LoadCompressedSmiFromOffset(
A1,
A6, count_offset);
2229 __ StoreToOffset(
A1,
A6, count_offset);
2239static void GenerateRecordEntryPoint(Assembler* assembler) {
2243 __ BindUncheckedEntryPoint();
2263 const RuntimeEntry& handle_ic_miss,
2265 Optimized optimized,
2267 Exactness exactness) {
2268 const bool save_entry_point = kind == Token::kILLEGAL;
2269 if (FLAG_precompiled_mode) {
2274 if (save_entry_point) {
2285 ASSERT(num_args == 1 || num_args == 2);
2294 ASSERT(target::ICData::NumArgsTestedShift() == 0);
2295 __ andi(
TMP,
TMP, target::ICData::NumArgsTestedMask());
2296 __ CompareImmediate(
TMP2, num_args);
2298 __ Stop(
"Incorrect stub for IC data");
2303#if !defined(PRODUCT)
2304 Label stepping, done_stepping;
2306 __ Comment(
"Check single stepping");
2307 __ LoadIsolate(
TMP);
2308 __ LoadFromOffset(
TMP,
TMP, target::Isolate::single_step_offset(),
2310 __ bnez(
TMP, &stepping);
2311 __ Bind(&done_stepping);
2315 Label not_smi_or_overflow;
2316 if (kind != Token::kILLEGAL) {
2317 EmitFastSmiOp(
assembler, kind, num_args, ¬_smi_or_overflow);
2319 __ Bind(¬_smi_or_overflow);
2321 __ Comment(
"Extract ICData initial values and receiver cid");
2323 __ LoadFieldFromOffset(
A1,
IC_DATA_REG, target::ICData::entries_offset());
2329 __ LoadTaggedClassIdMayBeSmi(
T1,
A0);
2331 target::CallSiteData::arguments_descriptor_offset());
2332 if (num_args == 2) {
2333 __ LoadCompressedSmiFieldFromOffset(
2337 __ lx(
A6, Address(
A7, -2 * target::kWordSize));
2338 __ LoadTaggedClassIdMayBeSmi(
T2,
A6);
2342 target::CallSiteData::arguments_descriptor_offset());
2343 __ LoadCompressedSmiFieldFromOffset(
2347 __ lx(
A6, Address(
A7, -1 * target::kWordSize));
2348 __ LoadTaggedClassIdMayBeSmi(
T1,
A6);
2349 if (num_args == 2) {
2350 __ lx(
A6, Address(
A7, -2 * target::kWordSize));
2351 __ LoadTaggedClassIdMayBeSmi(
T2,
A6);
2359 const bool optimize = kind == Token::kILLEGAL;
2362 Label loop, found, miss;
2363 __ Comment(
"ICData loop");
2366 for (
int unroll =
optimize ? 4 : 2; unroll >= 0; unroll--) {
2369 __ LoadCompressedSmiFromOffset(
A7,
A1, 0);
2370 if (num_args == 1) {
2374 __ LoadCompressedSmiFromOffset(
A7,
A1, target::kCompressedWordSize);
2379 const intptr_t entry_size = target::ICData::TestEntryLengthFor(
2381 target::kCompressedWordSize;
2382 __ AddImmediate(
A1, entry_size);
2386 __ BranchIf(
NE, &loop);
2388 __ BranchIf(
EQ, &miss);
2393 __ Comment(
"IC miss");
2396 __ LoadCompressedSmiFieldFromOffset(
2400 __ subi(
A7,
A7, 1 * target::kWordSize);
2405 __ EnterStubFrame();
2409 if (save_entry_point) {
2411 __ PushRegister(
T6);
2414 __ PushRegister(
ZR);
2416 for (intptr_t i = 0; i < num_args; i++) {
2417 __ LoadFromOffset(
TMP,
A7, -target::kWordSize * i);
2418 __ PushRegister(
TMP);
2422 __ CallRuntime(handle_ic_miss, num_args + 1);
2424 __ Drop(num_args + 1);
2428 if (save_entry_point) {
2434 __ RestoreCodePointer();
2435 __ LeaveStubFrame();
2436 Label call_target_function;
2437 if (!FLAG_lazy_dispatchers) {
2438 GenerateDispatcherCode(
assembler, &call_target_function);
2440 __ j(&call_target_function);
2445 const intptr_t target_offset =
2446 target::ICData::TargetIndexFor(num_args) * target::kCompressedWordSize;
2447 const intptr_t count_offset =
2448 target::ICData::CountIndexFor(num_args) * target::kCompressedWordSize;
2449 const intptr_t exactness_offset =
2450 target::ICData::ExactnessIndexFor(num_args) * target::kCompressedWordSize;
2452 Label call_target_function_through_unchecked_entry;
2456 __ LoadCompressedSmi(
T1, Address(
A1, exactness_offset));
2460 __ blt(
T1,
TMP, &exactness_ok);
2461 __ beq(
T1,
TMP, &call_target_function_through_unchecked_entry);
2467 T2, FieldAddress(
S5, target::ICData::receivers_static_type_offset()));
2468 __ LoadCompressed(
T2, FieldAddress(
T2, target::Type::arguments_offset()));
2474 __ beq(
T2,
TMP, &call_target_function_through_unchecked_entry);
2480 __ Bind(&exactness_ok);
2484 if (FLAG_optimization_counter_threshold >= 0) {
2485 __ Comment(
"Update caller's counter");
2486 __ LoadCompressedSmiFromOffset(
TMP,
A1, count_offset);
2491 __ Comment(
"Call target");
2492 __ Bind(&call_target_function);
2495 target::Function::code_offset());
2496 if (save_entry_point) {
2498 __ lx(
A7, Address(
A7, 0));
2501 target::Function::entry_point_offset());
2506 __ Bind(&call_target_function_through_unchecked_entry);
2507 if (FLAG_optimization_counter_threshold >= 0) {
2508 __ Comment(
"Update ICData counter");
2509 __ LoadCompressedSmiFromOffset(
TMP,
A1, count_offset);
2513 __ Comment(
"Call target (via unchecked entry point)");
2516 target::Function::code_offset());
2517 __ LoadFieldFromOffset(
2523#if !defined(PRODUCT)
2526 __ EnterStubFrame();
2528 __ PushRegister(
A0);
2530 if (save_entry_point) {
2532 __ PushRegister(
T6);
2535 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2537 if (save_entry_point) {
2544 __ RestoreCodePointer();
2545 __ LeaveStubFrame();
2546 __ j(&done_stepping);
2554void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
2556 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2563void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
2565 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
2572void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
2574 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2581void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
2583 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
kUnoptimized,
2590void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
2592 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
kUnoptimized,
2599void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
2601 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
kUnoptimized,
2609void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
2611 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kOptimized,
2619void StubCodeCompiler::
2620 GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
2622 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kOptimized,
2630void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
2632 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2638void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
2650 ASSERT(target::ICData::NumArgsTestedShift() == 0);
2651 __ andi(
TMP,
TMP, target::ICData::NumArgsTestedMask());
2652 __ CompareImmediate(
TMP, 0);
2654 __ Stop(
"Incorrect IC data for unoptimized static call");
2660#if !defined(PRODUCT)
2661 Label stepping, done_stepping;
2662 __ LoadIsolate(
TMP);
2663 __ LoadFromOffset(
TMP,
TMP, target::Isolate::single_step_offset(),
2666 __ Bind(&done_stepping);
2670 __ LoadFieldFromOffset(
A0,
IC_DATA_REG, target::ICData::entries_offset());
2674 const intptr_t target_offset =
2675 target::ICData::TargetIndexFor(0) * target::kCompressedWordSize;
2676 const intptr_t count_offset =
2677 target::ICData::CountIndexFor(0) * target::kCompressedWordSize;
2679 if (FLAG_optimization_counter_threshold >= 0) {
2681 __ LoadCompressedSmiFromOffset(
TMP,
A0, count_offset);
2683 __ StoreToOffset(
TMP,
A0, count_offset);
2688 target::CallSiteData::arguments_descriptor_offset());
2693 target::Function::code_offset());
2698#if !defined(PRODUCT)
2700 __ EnterStubFrame();
2703 __ PushRegister(
T6);
2704 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2708 __ RestoreCodePointer();
2709 __ LeaveStubFrame();
2710 __ j(&done_stepping);
2716void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
2725void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
2728 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2735void StubCodeCompiler::GenerateLazyCompileStub() {
2737 __ EnterStubFrame();
2740 __ CallRuntime(kCompileFunctionRuntimeEntry, 1);
2743 __ LeaveStubFrame();
2746 target::Function::code_offset());
2748 target::Function::entry_point_offset());
2754void StubCodeCompiler::GenerateICCallBreakpointStub() {
2756 __ Stop(
"No debugging in PRODUCT mode");
2758 __ EnterStubFrame();
2759 __ subi(
SP,
SP, 3 * target::kWordSize);
2760 __ sx(
A0, Address(
SP, 2 * target::kWordSize));
2761 __ sx(
S5, Address(
SP, 1 * target::kWordSize));
2762 __ sx(
ZR, Address(
SP, 0 * target::kWordSize));
2763 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2765 __ lx(
S5, Address(
SP, 1 * target::kWordSize));
2766 __ lx(
A0, Address(
SP, 2 * target::kWordSize));
2767 __ LeaveStubFrame();
2768 __ LoadFieldFromOffset(
TMP,
CODE_REG, target::Code::entry_point_offset());
2774void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
2776 __ Stop(
"No debugging in PRODUCT mode");
2778 __ EnterStubFrame();
2779 __ subi(
SP,
SP, 2 * target::kWordSize);
2780 __ sx(
S5, Address(
SP, 1 * target::kWordSize));
2781 __ sx(
ZR, Address(
SP, 0 * target::kWordSize));
2782 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2784 __ lx(
S5, Address(
SP, 1 * target::kWordSize));
2785 __ LeaveStubFrame();
2786 __ LoadFieldFromOffset(
TMP,
CODE_REG, target::Code::entry_point_offset());
2791void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
2793 __ Stop(
"No debugging in PRODUCT mode");
2795 __ EnterStubFrame();
2796 __ subi(
SP,
SP, 1 * target::kWordSize);
2797 __ sx(
ZR, Address(
SP, 0 * target::kWordSize));
2798 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0);
2800 __ LeaveStubFrame();
2801 __ LoadFieldFromOffset(
TMP,
CODE_REG, target::Code::entry_point_offset());
2807void StubCodeCompiler::GenerateDebugStepCheckStub() {
2809 __ Stop(
"No debugging in PRODUCT mode");
2812 Label stepping, done_stepping;
2814 __ LoadFromOffset(
A1,
A1, target::Isolate::single_step_offset(),
2817 __ Bind(&done_stepping);
2821 __ EnterStubFrame();
2822 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
2823 __ LeaveStubFrame();
2824 __ j(&done_stepping);
2840void StubCodeCompiler::GenerateSubtypeNTestCacheStub(Assembler* assembler,
2862 GenerateSubtypeTestCacheSearch(
2874 Address(kCacheArrayReg, target::kCompressedWordSize *
2875 target::SubtypeTestCache::kTestResult));
2884void StubCodeCompiler::GenerateGetCStackPointerStub() {
2898void StubCodeCompiler::GenerateJumpToFrameStub() {
2905#if defined(DART_TARGET_OS_FUCHSIA) || defined(DART_TARGET_OS_ANDROID)
2910 __ lx(
GP, Address(
THR, target::Thread::saved_shadow_call_stack_offset()));
2911#elif defined(USING_SHADOW_CALL_STACK)
2914 Label exit_through_non_ffi;
2922 compiler::target::Thread::exit_through_ffi_offset());
2923 __ LoadImmediate(
TMP2, target::Thread::exit_through_ffi());
2924 __ bne(
TMP,
TMP2, &exit_through_non_ffi);
2925 __ TransitionNativeToGenerated(
TMP,
true,
2927 __ Bind(&exit_through_non_ffi);
2930 __ RestorePinnedRegisters();
2932 __ LoadImmediate(
TMP, VMTag::kDartTagId);
2933 __ StoreToOffset(
TMP,
THR, target::Thread::vm_tag_offset());
2935 __ StoreToOffset(
ZR,
THR, target::Thread::top_exit_frame_info_offset());
2937 __ RestoreCodePointer();
2938 if (FLAG_precompiled_mode) {
2939 __ SetupGlobalPoolAndDispatchTable();
2941 __ LoadPoolPointer();
2951void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
2954 __ LoadFromOffset(
A0,
THR, target::Thread::active_exception_offset());
2955 __ StoreToOffset(
NULL_REG,
THR, target::Thread::active_exception_offset());
2959 __ LoadFromOffset(
A1,
THR, target::Thread::active_stacktrace_offset());
2960 __ StoreToOffset(
NULL_REG,
THR, target::Thread::active_stacktrace_offset());
2962 __ LoadFromOffset(
RA,
THR, target::Thread::resume_pc_offset());
2969void StubCodeCompiler::GenerateDeoptForRewindStub() {
2972 __ PushRegister(
TMP);
2975 __ LoadFromOffset(
RA,
THR, target::Thread::resume_pc_offset());
2979 __ EnterStubFrame();
2980 __ CallRuntime(kRewindPostDeoptRuntimeEntry, 0);
2981 __ LeaveStubFrame();
2988void StubCodeCompiler::GenerateOptimizeFunctionStub() {
2989 __ LoadFromOffset(
CODE_REG,
THR, target::Thread::optimize_stub_offset());
2990 __ EnterStubFrame();
2992 __ subi(
SP,
SP, 3 * target::kWordSize);
2994 Address(
SP, 2 * target::kWordSize));
2995 __ sx(
ZR, Address(
SP, 1 * target::kWordSize));
2996 __ sx(
A0, Address(
SP, 0 * target::kWordSize));
2997 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
3000 Address(
SP, 2 * target::kWordSize));
3001 __ addi(
SP,
SP, 3 * target::kWordSize);
3004 target::Function::code_offset());
3006 target::Function::entry_point_offset());
3007 __ LeaveStubFrame();
3015static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
3018 Label reference_compare, check_mint,
done;
3025 __ CompareClassId(
left, kDoubleCid,
TMP);
3032 __ lw(
T0, FieldAddress(
left, target::Double::value_offset()));
3033 __ lw(
T1, FieldAddress(
right, target::Double::value_offset()));
3035 __ lw(
T0, FieldAddress(
left, target::Double::value_offset() + 4));
3036 __ lw(
T1, FieldAddress(
right, target::Double::value_offset() + 4));
3040 __ ld(
T0, FieldAddress(
left, target::Double::value_offset()));
3041 __ ld(
T1, FieldAddress(
right, target::Double::value_offset()));
3046 __ Bind(&check_mint);
3052 __ lw(
T0, FieldAddress(
left, target::Mint::value_offset()));
3053 __ lw(
T1, FieldAddress(
right, target::Mint::value_offset()));
3055 __ lw(
T0, FieldAddress(
left, target::Mint::value_offset() + 4));
3056 __ lw(
T1, FieldAddress(
right, target::Mint::value_offset() + 4));
3060 __ ld(
T0, FieldAddress(
left, target::Mint::value_offset()));
3061 __ ld(
T1, FieldAddress(
right, target::Mint::value_offset()));
3066 __ Bind(&reference_compare);
3076void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
3077#if !defined(PRODUCT)
3079 Label stepping, done_stepping;
3080 __ LoadIsolate(
TMP);
3081 __ LoadFromOffset(
TMP,
TMP, target::Isolate::single_step_offset(),
3083 __ bnez(
TMP, &stepping);
3084 __ Bind(&done_stepping);
3089 __ LoadFromOffset(
left,
SP, 1 * target::kWordSize);
3090 __ LoadFromOffset(
right,
SP, 0 * target::kWordSize);
3094#if !defined(PRODUCT)
3096 __ EnterStubFrame();
3097 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0);
3098 __ RestoreCodePointer();
3099 __ LeaveStubFrame();
3100 __ j(&done_stepping);
3109void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
3112 __ LoadFromOffset(
left,
SP, 1 * target::kWordSize);
3113 __ LoadFromOffset(
right,
SP, 0 * target::kWordSize);
3125void StubCodeCompiler::GenerateMegamorphicCallStub() {
3128 __ BranchIfSmi(
A0, &smi_case);
3134 __ Bind(&cid_loaded);
3136 FieldAddress(
IC_DATA_REG, target::MegamorphicCache::buckets_offset()));
3137 __ lx(
T1, FieldAddress(
IC_DATA_REG, target::MegamorphicCache::mask_offset()));
3146 ASSERT(target::MegamorphicCache::kSpreadFactor == 7);
3155 const intptr_t
base = target::Array::data_offset();
3158 __ LoadCompressedSmiFieldFromOffset(
T4,
TMP,
base);
3160 __ CompareObjectRegisters(
T4,
T5);
3161 __ BranchIf(
NE, &probe_failed);
3164 __ Bind(&load_target);
3170 FieldAddress(
TMP,
base + target::kCompressedWordSize));
3171 __ lx(
A1, FieldAddress(
FUNCTION_REG, target::Function::entry_point_offset()));
3174 target::CallSiteData::arguments_descriptor_offset()));
3175 if (!FLAG_precompiled_mode) {
3182 __ Bind(&probe_failed);
3193 __ LoadImmediate(
T5, kSmiCid);
3197 GenerateSwitchableCallMissStub();
3203void StubCodeCompiler::GenerateICCallThroughCodeStub() {
3204 Label loop, found, miss;
3205 __ lx(
T1, FieldAddress(
IC_DATA_REG, target::ICData::entries_offset()));
3208 target::CallSiteData::arguments_descriptor_offset()));
3211 __ LoadTaggedClassIdMayBeSmi(
A1,
A0);
3215 __ LoadCompressedSmi(
T2, Address(
T1, 0));
3218 __ BranchIf(
EQ, &miss);
3220 const intptr_t entry_length =
3221 target::ICData::TestEntryLengthFor(1,
false) *
3222 target::kCompressedWordSize;
3223 __ AddImmediate(
T1, entry_length);
3227 if (FLAG_precompiled_mode) {
3228 const intptr_t entry_offset =
3229 target::ICData::EntryPointIndexFor(1) * target::kCompressedWordSize;
3230 __ LoadCompressed(
A1, Address(
T1, entry_offset));
3231 __ lx(
A1, FieldAddress(
A1, target::Function::entry_point_offset()));
3233 const intptr_t code_offset =
3234 target::ICData::CodeIndexFor(1) * target::kCompressedWordSize;
3236 __ lx(
A1, FieldAddress(
CODE_REG, target::Code::entry_point_offset()));
3241 __ lx(
A1, Address(
THR, target::Thread::switchable_call_miss_entry_offset()));
3252void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
3254 __ LoadClassIdMayBeSmi(
T1,
A0);
3259 FieldAddress(
S5, target::MonomorphicSmiableCall::expected_cid_offset()));
3262 FieldAddress(
S5, target::MonomorphicSmiableCall::entrypoint_offset()));
3267 __ lx(
TMP, Address(
THR, target::Thread::switchable_call_miss_entry_offset()));
3273void StubCodeCompiler::GenerateSwitchableCallMissStub() {
3275 Address(
THR, target::Thread::switchable_call_miss_stub_offset()));
3276 __ EnterStubFrame();
3280 __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
3286 __ LeaveStubFrame();
3288 __ lx(
TMP, FieldAddress(
CODE_REG, target::Code::entry_point_offset(
3298void StubCodeCompiler::GenerateSingleTargetCallStub() {
3300 __ LoadClassIdMayBeSmi(
A1,
A0);
3301 __ lhu(
T2, FieldAddress(
S5, target::SingleTargetCache::lower_limit_offset()));
3302 __ lhu(
T3, FieldAddress(
S5, target::SingleTargetCache::upper_limit_offset()));
3307 __ lx(
TMP, FieldAddress(
S5, target::SingleTargetCache::entry_point_offset()));
3308 __ lx(
CODE_REG, FieldAddress(
S5, target::SingleTargetCache::target_offset()));
3312 __ EnterStubFrame();
3316 __ CallRuntime(kSwitchableCallMissRuntimeEntry, 2);
3322 __ LeaveStubFrame();
3324 __ lx(
TMP, FieldAddress(
CODE_REG, target::Code::entry_point_offset(
3329static int GetScaleFactor(intptr_t size) {
3346void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t
cid) {
3349 const intptr_t scale_shift = GetScaleFactor(
element_size);
3354 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
3360 __ BranchIfNotSmi(
T3, &call_runtime);
3366 if (scale_shift != 0) {
3367 __ slli(
T3,
T3, scale_shift);
3369 const intptr_t fixed_size_plus_alignment_padding =
3370 target::TypedData::HeaderSize() +
3372 __ AddImmediate(
T3, fixed_size_plus_alignment_padding);
3374 __ lx(
A0, Address(
THR, target::Thread::top_offset()));
3378 __ bltu(
T4,
A0, &call_runtime);
3384 __ lx(
TMP, Address(
THR, target::Thread::end_offset()));
3385 __ bgeu(
T4,
TMP, &call_runtime);
3386 __ CheckAllocationCanary(
A0);
3390 __ sx(
T4, Address(
THR, target::Thread::top_offset()));
3398 __ CompareImmediate(
T3, target::UntaggedObject::kSizeTagMaxSizeTag);
3399 compiler::Label zero_tags;
3400 __ BranchIf(
HI, &zero_tags);
3402 target::UntaggedObject::kTagBitsSizeTagPos -
3404 __ Bind(&zero_tags);
3409 __ OrImmediate(
T5,
T5, tags);
3410 __ sx(
T5, FieldAddress(
A0, target::Object::tags_offset()));
3416 __ StoreCompressedIntoObjectNoBarrier(
3417 A0, FieldAddress(
A0, target::TypedDataBase::length_offset()),
T3);
3424 __ AddImmediate(
T3,
A0, target::TypedData::HeaderSize() - 1);
3425 __ StoreInternalPointer(
3426 A0, FieldAddress(
A0, target::PointerBase::data_offset()),
T3);
3429 for (intptr_t
offset = 0;
offset < target::kObjectAlignment;
3430 offset += target::kWordSize) {
3435 __ addi(
T3,
T3, target::kObjectAlignment);
3437 __ WriteAllocationCanary(
T4);
3441 __ Bind(&call_runtime);
3444 __ EnterStubFrame();
3445 __ PushRegister(
ZR);
3448 __ CallRuntime(kAllocateTypedDataRuntimeEntry, 2);
3451 __ LeaveStubFrame();
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
static void async_callback(void *c, std::unique_ptr< const SkImage::AsyncReadResult > result)
static bool ok(int result)
static SkTileMode optimize(SkTileMode tm, int dimension)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
#define ASSERT_LESS_OR_EQUAL(expected, actual)
#define ASSERT_EQUAL(expected, actual)
#define RELEASE_ASSERT(cond)
#define COMPILE_ASSERT(expr)
virtual bool WillAllocateNewOrRemembered() const
@ DRT_GetFfiCallbackMetadata
@ DRT_ExitTemporaryIsolate
static bool UseUnboxedRepresentation()
static intptr_t ActivationFrameAlignment()
static intptr_t pointers_offset()
static intptr_t top_offset()
static StaticTypeExactnessState HasExactSuperType()
static StaticTypeExactnessState NotExact()
static constexpr intptr_t kMaxInputs
void GenerateNArgsCheckInlineCacheStub(intptr_t num_args, const RuntimeEntry &handle_ic_miss, Token::Kind kind, Optimized optimized, CallType type, Exactness exactness)
void EnsureIsNewOrRemembered()
void GenerateUsageCounterIncrement(Register temp_reg)
void GenerateAllocationStubForClass(UnresolvedPcRelativeCalls *unresolved_calls, const Class &cls, const dart::Code &allocate_object, const dart::Code &allocat_object_parametrized)
void GenerateOptimizedUsageCounterIncrement()
static intptr_t WordOffsetFromFpToCpuRegister(Register cpu_register)
const uint8_t uint32_t uint32_t GError ** error
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
word ToRawSmi(const dart::Object &a)
bool SizeFitsInSizeTag(uword instance_size)
word TypedDataMaxNewSpaceElements(classid_t cid)
word TypedDataElementSizeInBytes(classid_t cid)
const Bool & TrueObject()
GrowableArray< UnresolvedPcRelativeCall * > UnresolvedPcRelativeCalls
bool IsSameObject(const Object &a, const Object &b)
const Bool & FalseObject()
const Object & NullObject()
const Code & StubCodeAllocateArray()
const Class & MintClass()
const Register kWriteBarrierSlotReg
@ TIMES_COMPRESSED_HALF_WORD_SIZE
constexpr bool IsAbiPreservedRegister(Register reg)
static constexpr intptr_t kCompressedWordSizeLog2
const Register kExceptionObjectReg
const Register kWriteBarrierObjectReg
Thread * DLRT_GetFfiCallbackMetadata(FfiCallbackMetadata::Trampoline trampoline, uword *out_entry_point, uword *out_trampoline_type)
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
static constexpr uword kZapReturnAddress
const Register CALLEE_SAVED_TEMP
void DLRT_ExitTemporaryIsolate()
const Register ARGS_DESC_REG
const int kNumberOfFpuRegisters
static constexpr bool IsArgumentRegister(Register reg)
constexpr RegList kDartAvailableCpuRegs
const intptr_t kStoreBufferWrapperSize
const Register FUNCTION_REG
const Register IC_DATA_REG
static constexpr intptr_t kAllocationRedZoneSize
static constexpr uword kZapCodeReg
const Register kStackTraceObjectReg
const int kFpuRegisterSize
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTempReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTagsReg
static constexpr Register kLengthReg
static constexpr Register kResultReg
static constexpr Register kClassIdReg
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment
static constexpr Register kLengthReg
static constexpr Register kIndexReg
static constexpr Register kCacheContentsSizeReg
static constexpr Register kInstanceInstantiatorTypeArgumentsReg
static constexpr Register kInstanceParentFunctionTypeArgumentsReg
static constexpr Register kProbeDistanceReg
static constexpr Register kInstanceCidOrSignatureReg
static constexpr Register kCacheEntriesEndReg
static constexpr Register kInstanceDelayedFunctionTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kSubtypeTestCacheResultReg
#define NOT_IN_PRODUCT(code)