Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
stub_code_compiler.cc
Go to the documentation of this file.
1// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6#include "vm/flags.h"
7#include "vm/globals.h"
8
9// For `StubCodeCompiler::GenerateAllocateUnhandledExceptionStub`
11
12#define SHOULD_NOT_INCLUDE_RUNTIME
13
15
16#include "vm/code_descriptors.h"
20#include "vm/stack_frame.h"
21
22#define __ assembler->
23
24namespace dart {
25namespace compiler {
26
28 Register cpu_register) {
30
31 intptr_t slots_from_fp = target::frame_layout.param_end_from_fp + 1;
32 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
33 Register reg = static_cast<Register>(i);
34 if (reg == cpu_register) break;
36 slots_from_fp++;
37 }
38 }
39 return slots_from_fp;
40}
41
42void StubCodeCompiler::GenerateInitStaticFieldStub() {
43 __ EnterStubFrame();
44 __ PushObject(NullObject()); // Make room for result.
46 __ CallRuntime(kInitStaticFieldRuntimeEntry, /*argument_count=*/1);
47 __ Drop(1);
49 __ LeaveStubFrame();
50 __ Ret();
51}
52
53void StubCodeCompiler::GenerateInitLateStaticFieldStub(bool is_final) {
58
59 __ EnterStubFrame();
60
61 __ Comment("Calling initializer function");
62 __ PushRegister(kFieldReg);
63 __ LoadCompressedFieldFromOffset(
64 FUNCTION_REG, kFieldReg, target::Field::initializer_function_offset());
65 if (!FLAG_precompiled_mode) {
66 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
67 target::Function::code_offset());
68 // Load a GC-safe value for the arguments descriptor (unused but tagged).
69 __ LoadImmediate(ARGS_DESC_REG, 0);
70 }
71 __ Call(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
72 __ MoveRegister(kResultReg, CallingConventions::kReturnReg);
73 __ PopRegister(kFieldReg);
74 __ LoadStaticFieldAddress(kAddressReg, kFieldReg, kScratchReg);
75
76 Label throw_exception;
77 if (is_final) {
78 __ Comment("Checking that initializer did not set late final field");
79 __ LoadFromOffset(kScratchReg, kAddressReg, 0);
80 __ CompareObject(kScratchReg, SentinelObject());
81 __ BranchIf(NOT_EQUAL, &throw_exception);
82 }
83
84 __ StoreToOffset(kResultReg, kAddressReg, 0);
85 __ LeaveStubFrame();
86 __ Ret();
87
88 if (is_final) {
89#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
90 // We are jumping over LeaveStubFrame so restore LR state to match one
91 // at the jump point.
92 __ set_lr_state(compiler::LRState::OnEntry().EnterFrame());
93#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
94 __ Bind(&throw_exception);
95 __ PushObject(NullObject()); // Make room for (unused) result.
96 __ PushRegister(kFieldReg);
97 __ CallRuntime(kLateFieldAssignedDuringInitializationErrorRuntimeEntry,
98 /*argument_count=*/1);
99 __ Breakpoint();
100 }
101}
102
103void StubCodeCompiler::GenerateInitLateStaticFieldStub() {
104 GenerateInitLateStaticFieldStub(/*is_final=*/false);
105}
106
107void StubCodeCompiler::GenerateInitLateFinalStaticFieldStub() {
108 GenerateInitLateStaticFieldStub(/*is_final=*/true);
109}
110
111void StubCodeCompiler::GenerateInitInstanceFieldStub() {
112 __ EnterStubFrame();
113 __ PushObject(NullObject()); // Make room for result.
114 __ PushRegistersInOrder(
116 __ CallRuntime(kInitInstanceFieldRuntimeEntry, /*argument_count=*/2);
117 __ Drop(2);
119 __ LeaveStubFrame();
120 __ Ret();
121}
122
123void StubCodeCompiler::GenerateInitLateInstanceFieldStub(bool is_final) {
124 const Register kInstanceReg = InitInstanceFieldABI::kInstanceReg;
128
129 __ EnterStubFrame();
130 // Save kFieldReg and kInstanceReg for later.
131 // Call initializer function.
132 __ PushRegistersInOrder({kFieldReg, kInstanceReg, kInstanceReg});
133
134 static_assert(
136 "Result is a return value from initializer");
137
138 __ LoadCompressedFieldFromOffset(
140 target::Field::initializer_function_offset());
141 if (!FLAG_precompiled_mode) {
142 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
143 target::Function::code_offset());
144 // Load a GC-safe value for the arguments descriptor (unused but tagged).
145 __ LoadImmediate(ARGS_DESC_REG, 0);
146 }
147 __ Call(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
148 __ Drop(1); // Drop argument.
149
150 __ PopRegisterPair(kInstanceReg, kFieldReg);
151 __ LoadCompressedFieldFromOffset(
152 kScratchReg, kFieldReg, target::Field::host_offset_or_field_id_offset());
153#if defined(DART_COMPRESSED_POINTERS)
154 // TODO(compressed-pointers): Variant of LoadFieldAddressForRegOffset that
155 // ignores upper bits?
156 __ SmiUntag(kScratchReg);
157 __ SmiTag(kScratchReg);
158#endif
159 __ LoadCompressedFieldAddressForRegOffset(kAddressReg, kInstanceReg,
160 kScratchReg);
161
162 Label throw_exception;
163 if (is_final) {
164 __ LoadCompressed(kScratchReg, Address(kAddressReg, 0));
165 __ CompareObject(kScratchReg, SentinelObject());
166 __ BranchIf(NOT_EQUAL, &throw_exception);
167 }
168
169#if defined(TARGET_ARCH_IA32)
170 // On IA32 StoreIntoObject clobbers value register, so scratch
171 // register is used in StoreIntoObject to preserve kResultReg.
172 __ MoveRegister(kScratchReg, InitInstanceFieldABI::kResultReg);
173 __ StoreIntoObject(kInstanceReg, Address(kAddressReg, 0), kScratchReg);
174#else
175 __ StoreCompressedIntoObject(kInstanceReg, Address(kAddressReg, 0),
177#endif // defined(TARGET_ARCH_IA32)
178
179 __ LeaveStubFrame();
180 __ Ret();
181
182 if (is_final) {
183#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
184 // We are jumping over LeaveStubFrame so restore LR state to match one
185 // at the jump point.
186 __ set_lr_state(compiler::LRState::OnEntry().EnterFrame());
187#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
188 __ Bind(&throw_exception);
189 __ PushObject(NullObject()); // Make room for (unused) result.
190 __ PushRegister(kFieldReg);
191 __ CallRuntime(kLateFieldAssignedDuringInitializationErrorRuntimeEntry,
192 /*argument_count=*/1);
193 __ Breakpoint();
194 }
195}
196
197void StubCodeCompiler::GenerateInitLateInstanceFieldStub() {
198 GenerateInitLateInstanceFieldStub(/*is_final=*/false);
199}
200
201void StubCodeCompiler::GenerateInitLateFinalInstanceFieldStub() {
202 GenerateInitLateInstanceFieldStub(/*is_final=*/true);
203}
204
205void StubCodeCompiler::GenerateThrowStub() {
206 __ EnterStubFrame();
207 __ PushObject(NullObject()); // Make room for (unused) result.
208 __ PushRegister(ThrowABI::kExceptionReg);
209 __ CallRuntime(kThrowRuntimeEntry, /*argument_count=*/1);
210 __ Breakpoint();
211}
212
213void StubCodeCompiler::GenerateReThrowStub() {
214 __ EnterStubFrame();
215 __ PushObject(NullObject()); // Make room for (unused) result.
216 __ PushRegistersInOrder(
218 __ PushImmediate(Smi::RawValue(0)); // Do not bypass debugger.
219 __ CallRuntime(kReThrowRuntimeEntry, /*argument_count=*/3);
220 __ Breakpoint();
221}
222
223void StubCodeCompiler::GenerateAssertBooleanStub() {
224 __ EnterStubFrame();
225 __ PushObject(NullObject()); // Make room for (unused) result.
226 __ PushRegister(AssertBooleanABI::kObjectReg);
227 __ CallRuntime(kNonBoolTypeErrorRuntimeEntry, /*argument_count=*/1);
228 __ Breakpoint();
229}
230
231void StubCodeCompiler::GenerateAssertSubtypeStub() {
232 __ EnterStubFrame();
238 __ CallRuntime(kSubtypeCheckRuntimeEntry, /*argument_count=*/5);
239 __ Drop(5); // Drop unused result as well as arguments.
240 __ LeaveStubFrame();
241 __ Ret();
242}
243
244void StubCodeCompiler::GenerateAssertAssignableStub() {
245#if !defined(TARGET_ARCH_IA32)
246 __ Breakpoint();
247#else
248 __ EnterStubFrame();
249 __ PushObject(Object::null_object()); // Make room for the result.
250 __ pushl(Address(
252 __ pushl(Address(
254 __ pushl(Address(
255 EBP,
257 __ pushl(Address(EBP, target::kWordSize *
259 __ PushRegistersInOrder({AssertAssignableStubABI::kDstNameReg,
262 __ CallRuntime(kTypeCheckRuntimeEntry, /*argument_count=*/7);
263 __ Drop(8);
264 __ LeaveStubFrame();
265 __ Ret();
266#endif
267}
268
269// Instantiate type arguments from instantiator and function type args.
270// Inputs:
271// - InstantiationABI::kUninstantiatedTypeArgumentsReg: tav to instantiate
272// - InstantiationABI::kInstantiatorTypeArgumentsReg: instantiator tav
273// - InstantiationABI::kFunctionTypeArgumentsReg: function tav
274// Outputs:
275// - InstantiationABI::kResultTypeArgumentsReg: instantiated tav
276// Clobbers:
277// - InstantiationABI::kScratchReg
278void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub() {
279 // We only need the offset of the current entry up until we either call
280 // the runtime or until we retrieve the instantiated type arguments out of it
281 // to put in the result register, so we use the result register to store it.
283
284 // The registers that need spilling prior to traversing a hash-based cache.
285 const RegisterSet saved_registers(InstantiateTAVInternalRegs::kSavedRegisters,
286 /*fpu_register_mask=*/0);
287
290 "Must handle possibility of inst tav reg being spilled");
291 static_assert(((1 << InstantiationABI::kFunctionTypeArgumentsReg) &
293 "Must handle possibility of function tav reg being spilled");
294
295 // Takes labels for the cache hit/miss cases (to allow for restoring spilled
296 // registers).
297 auto check_entry = [&](compiler::Label* found, compiler::Label* not_found) {
298 __ Comment("Check cache entry");
299 // Use load-acquire to get the entry.
302 "sentinel is not same index as instantiator type args");
303 __ LoadAcquireCompressedFromOffset(
306 target::kCompressedWordSize);
307 // Test for an unoccupied entry by checking for the Smi sentinel.
308 __ BranchIfSmi(InstantiationABI::kScratchReg, not_found);
309 // Otherwise it must be occupied and contain TypeArguments objects.
310 compiler::Label next;
311 __ CompareRegisters(InstantiationABI::kScratchReg,
314 __ LoadCompressed(
316 compiler::Address(kEntryReg,
318 target::kCompressedWordSize));
319 __ CompareRegisters(InstantiationABI::kScratchReg,
321 __ BranchIf(EQUAL, found);
322 __ Bind(&next);
323 };
324
325 // Lookup cache before calling runtime.
326 __ LoadAcquireCompressedFromOffset(
329 target::TypeArguments::instantiations_offset() - kHeapObjectTag);
330 // Go ahead and load the backing array data address into kEntryReg.
331 __ LoadFieldAddressForOffset(kEntryReg, InstantiationABI::kScratchReg,
332 target::Array::data_offset());
333
334 compiler::Label linear_cache_loop, hash_cache_search, cache_hit, call_runtime;
335
336 // There is a maximum size for linear caches that is smaller than the size
337 // of any hash-based cache, so we check the size of the backing array to
338 // determine if this is a linear or hash-based cache.
340 Slot::Array_length());
341 __ CompareImmediate(
344#if defined(TARGET_ARCH_IA32)
345 // We just don't have enough registers to do hash-based cache searching in a
346 // way that doesn't overly complicate the generation code, so just go to
347 // runtime.
348 __ BranchIf(GREATER, &call_runtime);
349#else
350 __ BranchIf(GREATER, &hash_cache_search);
351#endif
352
353 __ Comment("Check linear cache");
354 // Move kEntryReg to the start of the first entry.
355 __ AddImmediate(kEntryReg, TypeArguments::Cache::kHeaderSize *
356 target::kCompressedWordSize);
357 __ Bind(&linear_cache_loop);
358 check_entry(&cache_hit, &call_runtime);
359 __ AddImmediate(kEntryReg, TypeArguments::Cache::kEntrySize *
360 target::kCompressedWordSize);
361 __ Jump(&linear_cache_loop, compiler::Assembler::kNearJump);
362
363#if !defined(TARGET_ARCH_IA32)
364 __ Bind(&hash_cache_search);
365 __ Comment("Check hash-based cache");
366
367 compiler::Label pop_before_success, pop_before_failure;
368 if (!saved_registers.IsEmpty()) {
369 __ Comment("Spills due to register pressure");
370 __ PushRegisters(saved_registers);
371 }
372
373 __ Comment("Calculate address of first entry");
374 __ AddImmediate(
376 TypeArguments::Cache::kHeaderSize * target::kCompressedWordSize);
377
378 __ Comment("Calculate probe mask");
379 __ LoadAcquireCompressedFromOffset(
381 TypeArguments::Cache::kMetadataIndex * target::kCompressedWordSize);
382 __ LsrImmediate(
389 // Can use kEntryReg as scratch now until we're entering the loop.
390
391 // Retrieve the hash from the TAV. If the retrieved hash is 0, jumps to
392 // not_found, otherwise falls through.
393 auto retrieve_hash = [&](Register dst, Register src) {
394 Label is_not_null, done;
395 __ CompareObject(src, NullObject());
396 __ BranchIf(NOT_EQUAL, &is_not_null, compiler::Assembler::kNearJump);
397 __ LoadImmediate(dst, TypeArguments::kAllDynamicHash);
399 __ Bind(&is_not_null);
400 __ LoadFromSlot(dst, src, Slot::TypeArguments_hash());
401 __ SmiUntag(dst);
402 // If the retrieved hash is 0, then it hasn't been computed yet.
403 __ BranchIfZero(dst, &pop_before_failure);
404 __ Bind(&done);
405 };
406
407 __ Comment("Calculate initial probe from type argument vector hashes");
410 retrieve_hash(InstantiationABI::kScratchReg,
416 // Use the probe mask to get a valid entry index.
419
420 // Start off the probing distance at zero (will increment prior to use).
422
423 compiler::Label loop;
424 __ Bind(&loop);
425 __ Comment("Loop over hash cache entries");
426 // Convert the current entry index into the entry address.
428 __ MulImmediate(kEntryReg, TypeArguments::Cache::kEntrySize *
429 target::kCompressedWordSize);
430 __ AddRegisters(kEntryReg, InstantiateTAVInternalRegs::kEntryStartReg);
431 check_entry(&pop_before_success, &pop_before_failure);
432 // Increment the probing distance and then add it to the current entry
433 // index, then mask the result with the probe mask.
439 __ Jump(&loop);
440
441 __ Bind(&pop_before_failure);
442 if (!saved_registers.IsEmpty()) {
443 __ Comment("Restore spilled registers on cache miss");
444 __ PopRegisters(saved_registers);
445 }
446#endif
447
448 // Instantiate non-null type arguments.
449 // A runtime call to instantiate the type arguments is required.
450 __ Bind(&call_runtime);
451 __ Comment("Cache miss");
452 __ EnterStubFrame();
453#if !defined(DART_ASSEMBLER_HAS_NULL_REG)
454 __ PushObject(Object::null_object()); // Make room for the result.
455#endif
456#if defined(TARGET_ARCH_ARM)
461 "Should be ordered to push arguments with one instruction");
462#endif
463 __ PushRegistersInOrder({
464#if defined(DART_ASSEMBLER_HAS_NULL_REG)
465 NULL_REG,
466#endif
470 });
471 __ CallRuntime(kInstantiateTypeArgumentsRuntimeEntry, 3);
472 __ Drop(3); // Drop 2 type vectors, and uninstantiated type.
474 __ LeaveStubFrame();
475 __ Ret();
476
477#if !defined(TARGET_ARCH_IA32)
478 __ Bind(&pop_before_success);
479 if (!saved_registers.IsEmpty()) {
480 __ Comment("Restore spilled registers on cache hit");
481 __ PopRegisters(saved_registers);
482 }
483#endif
484
485 __ Bind(&cache_hit);
486 __ Comment("Cache hit");
487 __ LoadCompressed(
489 compiler::Address(kEntryReg,
491 target::kCompressedWordSize));
492 __ Ret();
493}
494
495void StubCodeCompiler::
496 GenerateInstantiateTypeArgumentsMayShareInstantiatorTAStub() {
498 const Register kScratch2Reg = InstantiationABI::kScratchReg;
499 // Return the instantiator type arguments if its nullability is compatible for
500 // sharing, otherwise proceed to instantiation cache lookup.
501 compiler::Label cache_lookup;
502 __ LoadCompressedSmi(
503 kScratch1Reg,
505 target::TypeArguments::nullability_offset()));
506 __ LoadCompressedSmi(
507 kScratch2Reg,
509 target::TypeArguments::nullability_offset()));
510 __ AndRegisters(kScratch2Reg, kScratch1Reg);
511 __ CompareRegisters(kScratch2Reg, kScratch1Reg);
512 __ BranchIf(NOT_EQUAL, &cache_lookup, compiler::Assembler::kNearJump);
515 __ Ret();
516
517 __ Bind(&cache_lookup);
518 GenerateInstantiateTypeArgumentsStub();
519}
520
521void StubCodeCompiler::
522 GenerateInstantiateTypeArgumentsMayShareFunctionTAStub() {
524 const Register kScratch2Reg = InstantiationABI::kScratchReg;
525 // Return the function type arguments if its nullability is compatible for
526 // sharing, otherwise proceed to instantiation cache lookup.
527 compiler::Label cache_lookup;
528 __ LoadCompressedSmi(
529 kScratch1Reg,
531 target::TypeArguments::nullability_offset()));
532 __ LoadCompressedSmi(
533 kScratch2Reg,
534 compiler::FieldAddress(InstantiationABI::kFunctionTypeArgumentsReg,
535 target::TypeArguments::nullability_offset()));
536 __ AndRegisters(kScratch2Reg, kScratch1Reg);
537 __ CompareRegisters(kScratch2Reg, kScratch1Reg);
538 __ BranchIf(NOT_EQUAL, &cache_lookup, compiler::Assembler::kNearJump);
541 __ Ret();
542
543 __ Bind(&cache_lookup);
544 GenerateInstantiateTypeArgumentsStub();
545}
546
548 __ EnterStubFrame();
549 __ PushObject(Object::null_object());
550 __ PushRegistersInOrder({InstantiateTypeABI::kTypeReg,
553 __ CallRuntime(kInstantiateTypeRuntimeEntry, /*argument_count=*/3);
554 __ Drop(3);
556 __ LeaveStubFrame();
557 __ Ret();
558}
559
561 Nullability nullability,
562 bool is_function_parameter) {
563 Label runtime_call, return_dynamic, type_parameter_value_is_not_type;
564
565 if (is_function_parameter) {
567 TypeArguments::null_object());
568 __ BranchIf(EQUAL, &return_dynamic);
569 __ LoadFieldFromOffset(
571 target::TypeParameter::index_offset(), kUnsignedTwoBytes);
572 __ LoadIndexedCompressed(InstantiateTypeABI::kResultTypeReg,
574 target::TypeArguments::types_offset(),
576 } else {
578 TypeArguments::null_object());
579 __ BranchIf(EQUAL, &return_dynamic);
580 __ LoadFieldFromOffset(
582 target::TypeParameter::index_offset(), kUnsignedTwoBytes);
583 __ LoadIndexedCompressed(InstantiateTypeABI::kResultTypeReg,
585 target::TypeArguments::types_offset(),
587 }
588
591
592 switch (nullability) {
594 __ Ret();
595 break;
597 __ CompareAbstractTypeNullabilityWith(
599 static_cast<int8_t>(Nullability::kNullable),
601 __ BranchIf(NOT_EQUAL, &runtime_call);
602 __ Ret();
603 break;
605 __ CompareAbstractTypeNullabilityWith(
607 static_cast<int8_t>(Nullability::kNonNullable),
609 __ BranchIf(EQUAL, &runtime_call);
610 __ Ret();
611 }
612
613 // The TAV was null, so the value of the type parameter is "dynamic".
614 __ Bind(&return_dynamic);
615 __ LoadObject(InstantiateTypeABI::kResultTypeReg, Type::dynamic_type());
616 __ Ret();
617
618 __ Bind(&runtime_call);
620}
621
622void StubCodeCompiler::
623 GenerateInstantiateTypeNonNullableClassTypeParameterStub() {
625 /*is_function_parameter=*/false);
626}
627
628void StubCodeCompiler::GenerateInstantiateTypeNullableClassTypeParameterStub() {
630 /*is_function_parameter=*/false);
631}
632
633void StubCodeCompiler::GenerateInstantiateTypeLegacyClassTypeParameterStub() {
635 /*is_function_parameter=*/false);
636}
637
638void StubCodeCompiler::
639 GenerateInstantiateTypeNonNullableFunctionTypeParameterStub() {
641 /*is_function_parameter=*/true);
642}
643
644void StubCodeCompiler::
645 GenerateInstantiateTypeNullableFunctionTypeParameterStub() {
647 /*is_function_parameter=*/true);
648}
649
650void StubCodeCompiler::
651 GenerateInstantiateTypeLegacyFunctionTypeParameterStub() {
653 /*is_function_parameter=*/true);
654}
655
656void StubCodeCompiler::GenerateInstantiateTypeStub() {
658}
659
660void StubCodeCompiler::GenerateInstanceOfStub() {
661 __ EnterStubFrame();
662 __ PushObject(NullObject()); // Make room for the result.
667 __ CallRuntime(kInstanceofRuntimeEntry, /*argument_count=*/5);
668 __ Drop(5);
670 __ LeaveStubFrame();
671 __ Ret();
672}
673
674// For use in GenerateTypeIsTopTypeForSubtyping and
675// GenerateNullIsAssignableToType.
677 Register type_reg,
678 Register scratch_reg) {
679#if defined(DEBUG)
680 compiler::Label is_type_param_or_type_or_function_type;
681 __ LoadClassIdMayBeSmi(scratch_reg, type_reg);
682 __ CompareImmediate(scratch_reg, kTypeParameterCid);
683 __ BranchIf(EQUAL, &is_type_param_or_type_or_function_type,
685 __ CompareImmediate(scratch_reg, kTypeCid);
686 __ BranchIf(EQUAL, &is_type_param_or_type_or_function_type,
688 __ CompareImmediate(scratch_reg, kFunctionTypeCid);
689 __ BranchIf(EQUAL, &is_type_param_or_type_or_function_type,
691 __ Stop("not a type or function type or type parameter");
692 __ Bind(&is_type_param_or_type_or_function_type);
693#endif
694}
695
696// Version of AbstractType::IsTopTypeForSubtyping() used when the type is not
697// known at compile time. Must be kept in sync.
698//
699// Inputs:
700// - TypeTestABI::kDstTypeReg: Destination type.
701//
702// Non-preserved scratch registers:
703// - TypeTestABI::kScratchReg (only on non-IA32 architectures)
704//
705// Outputs:
706// - TypeTestABI::kSubtypeTestCacheReg: 0 if the value is guaranteed assignable,
707// non-zero otherwise.
708//
709// All registers other than outputs and non-preserved scratches are preserved.
710void StubCodeCompiler::GenerateTypeIsTopTypeForSubtypingStub() {
711 // The only case where the original value of kSubtypeTestCacheReg is needed
712 // after the stub call is on IA32, where it's currently preserved on the stack
713 // before calling the stub (as it's also CODE_REG on that architecture), so we
714 // both use it as a scratch and clobber it for the return value.
715 const Register scratch1_reg = TypeTestABI::kSubtypeTestCacheReg;
716 // We reuse the first scratch register as the output register because we're
717 // always guaranteed to have a type in it (starting with kDstType), and all
718 // non-Smi ObjectPtrs are non-zero values.
719 const Register output_reg = scratch1_reg;
720#if defined(TARGET_ARCH_IA32)
721 // The remaining scratch registers are preserved and restored before exit on
722 // IA32. Because we have few registers to choose from (which are all used in
723 // TypeTestABI), use specific TestTypeABI registers.
725 // Preserve non-output scratch registers.
726 __ PushRegister(scratch2_reg);
727#else
728 const Register scratch2_reg = TypeTestABI::kScratchReg;
729#endif
730 static_assert(scratch1_reg != scratch2_reg,
731 "both scratch registers are the same");
732
733 compiler::Label check_top_type, is_top_type, done;
734 // Initialize scratch1_reg with the type to check (which also sets the
735 // output register to a non-zero value). scratch1_reg (and thus the output
736 // register) will always have a type in it from here on out.
737 __ MoveRegister(scratch1_reg, TypeTestABI::kDstTypeReg);
738 __ Bind(&check_top_type);
739 // scratch1_reg: Current type to check.
741 scratch2_reg);
742 compiler::Label is_type_ref;
743 __ CompareClassId(scratch1_reg, kTypeCid, scratch2_reg);
744 // Type parameters can't be top types themselves, though a particular
745 // instantiation may result in a top type.
746 // Function types cannot be top types.
747 __ BranchIf(NOT_EQUAL, &done);
748 __ LoadTypeClassId(scratch2_reg, scratch1_reg);
749 __ CompareImmediate(scratch2_reg, kDynamicCid);
750 __ BranchIf(EQUAL, &is_top_type, compiler::Assembler::kNearJump);
751 __ CompareImmediate(scratch2_reg, kVoidCid);
752 __ BranchIf(EQUAL, &is_top_type, compiler::Assembler::kNearJump);
753 compiler::Label unwrap_future_or;
754 __ CompareImmediate(scratch2_reg, kFutureOrCid);
755 __ BranchIf(EQUAL, &unwrap_future_or, compiler::Assembler::kNearJump);
756 __ CompareImmediate(scratch2_reg, kInstanceCid);
758 // Instance type isn't a top type if non-nullable.
759 __ CompareAbstractTypeNullabilityWith(
760 scratch1_reg, static_cast<int8_t>(Nullability::kNonNullable),
761 scratch2_reg);
763 __ Bind(&is_top_type);
764 __ LoadImmediate(output_reg, 0);
765 __ Bind(&done);
766#if defined(TARGET_ARCH_IA32)
767 // Restore preserved scratch registers.
768 __ PopRegister(scratch2_reg);
769#endif
770 __ Ret();
771 // An uncommon case, so off the main trunk of the function.
772 __ Bind(&unwrap_future_or);
773 __ LoadCompressedField(
774 scratch2_reg,
775 compiler::FieldAddress(scratch1_reg,
776 compiler::target::Type::arguments_offset()));
777 __ CompareObject(scratch2_reg, Object::null_object());
778 // If the arguments are null, then unwrapping gives dynamic, a top type.
779 __ BranchIf(EQUAL, &is_top_type, compiler::Assembler::kNearJump);
780 __ LoadCompressedField(
781 scratch1_reg,
783 scratch2_reg, compiler::target::TypeArguments::type_at_offset(0)));
784 __ Jump(&check_top_type, compiler::Assembler::kNearJump);
785}
786
787// Version of Instance::NullIsAssignableTo(other, inst_tav, fun_tav) used when
788// the destination type was not known at compile time. Must be kept in sync.
789//
790// Inputs:
791// - TypeTestABI::kInstanceReg: Object to check for assignability.
792// - TypeTestABI::kDstTypeReg: Destination type.
793// - TypeTestABI::kInstantiatorTypeArgumentsReg: Instantiator TAV.
794// - TypeTestABI::kFunctionTypeArgumentsReg: Function TAV.
795//
796// Non-preserved non-output scratch registers:
797// - TypeTestABI::kScratchReg (only on non-IA32 architectures)
798//
799// Outputs:
800// - TypeTestABI::kSubtypeTestCacheReg: 0 if the value is guaranteed assignable,
801// non-zero otherwise.
802//
803// All registers other than outputs and non-preserved scratches are preserved.
804void StubCodeCompiler::GenerateNullIsAssignableToTypeStub() {
805 // The only case where the original value of kSubtypeTestCacheReg is needed
806 // after the stub call is on IA32, where it's currently preserved on the stack
807 // before calling the stub (as it's also CODE_REG on that architecture), so we
808 // both use it as a scratch to hold the current type to inspect and also
809 // clobber it for the return value.
810 const Register kCurrentTypeReg = TypeTestABI::kSubtypeTestCacheReg;
811 // We reuse the first scratch register as the output register because we're
812 // always guaranteed to have a type in it (starting with the contents of
813 // kDstTypeReg), and all non-Smi ObjectPtrs are non-zero values.
814 const Register kOutputReg = kCurrentTypeReg;
815#if defined(TARGET_ARCH_IA32)
816 // The remaining scratch registers are preserved and restored before exit on
817 // IA32. Because we have few registers to choose from (which are all used in
818 // TypeTestABI), use specific TestTypeABI registers.
820 // Preserve non-output scratch registers.
821 __ PushRegister(kScratchReg);
822#else
823 const Register kScratchReg = TypeTestABI::kScratchReg;
824#endif
825 static_assert(kCurrentTypeReg != kScratchReg,
826 "code assumes distinct scratch registers");
827
828 compiler::Label is_assignable, done;
829 // Initialize the first scratch register (and thus the output register) with
830 // the destination type. We do this before the check to ensure the output
831 // register has a non-zero value if kInstanceReg is not null.
832 __ MoveRegister(kCurrentTypeReg, TypeTestABI::kDstTypeReg);
833 __ CompareObject(TypeTestABI::kInstanceReg, Object::null_object());
834
835 compiler::Label check_null_assignable;
836 // Skip checking the type if not null.
837 __ BranchIf(NOT_EQUAL, &done);
838 __ Bind(&check_null_assignable);
839 // scratch1_reg: Current type to check.
841 kScratchReg);
842 compiler::Label is_not_type;
843 __ CompareClassId(kCurrentTypeReg, kTypeCid, kScratchReg);
844 __ BranchIf(NOT_EQUAL, &is_not_type, compiler::Assembler::kNearJump);
845 __ CompareAbstractTypeNullabilityWith(
846 kCurrentTypeReg, static_cast<int8_t>(Nullability::kNonNullable),
847 kScratchReg);
848 __ BranchIf(NOT_EQUAL, &is_assignable);
849 // FutureOr is a special case because it may have the non-nullable bit set,
850 // but FutureOr<T> functions as the union of T and Future<T>, so it must be
851 // unwrapped to see if T is nullable.
852 __ LoadTypeClassId(kScratchReg, kCurrentTypeReg);
853 __ CompareImmediate(kScratchReg, kFutureOrCid);
854 __ BranchIf(NOT_EQUAL, &done);
855 __ LoadCompressedField(
856 kScratchReg,
857 compiler::FieldAddress(kCurrentTypeReg,
858 compiler::target::Type::arguments_offset()));
859 __ CompareObject(kScratchReg, Object::null_object());
860 // If the arguments are null, then unwrapping gives the dynamic type,
861 // which can take null.
862 __ BranchIf(EQUAL, &is_assignable);
863 __ LoadCompressedField(
864 kCurrentTypeReg,
865 compiler::FieldAddress(
866 kScratchReg, compiler::target::TypeArguments::type_at_offset(0)));
867 __ Jump(&check_null_assignable, compiler::Assembler::kNearJump);
868 __ Bind(&is_not_type);
869 // Null is assignable to a type parameter only if it is nullable or if the
870 // instantiation is nullable.
871 __ CompareAbstractTypeNullabilityWith(
872 kCurrentTypeReg, static_cast<int8_t>(Nullability::kNonNullable),
873 kScratchReg);
874 __ BranchIf(NOT_EQUAL, &is_assignable);
875
876 // Don't set kScratchReg in here as on IA32, that's the function TAV reg.
877 auto handle_case = [&](Register tav) {
878 // We can reuse kCurrentTypeReg to hold the index because we no longer
879 // need the type parameter afterwards.
880 auto const kIndexReg = kCurrentTypeReg;
881 // If the TAV is null, resolving gives the (nullable) dynamic type.
882 __ CompareObject(tav, NullObject());
883 __ BranchIf(EQUAL, &is_assignable, Assembler::kNearJump);
884 // Resolve the type parameter to its instantiated type and loop.
885 __ LoadFieldFromOffset(kIndexReg, kCurrentTypeReg,
886 target::TypeParameter::index_offset(),
888 __ LoadIndexedCompressed(kCurrentTypeReg, tav,
889 target::TypeArguments::types_offset(), kIndexReg);
890 __ Jump(&check_null_assignable);
891 };
892
893 Label function_type_param;
894 __ LoadFromSlot(kScratchReg, TypeTestABI::kDstTypeReg,
895 Slot::AbstractType_flags());
896 __ BranchIfBit(kScratchReg,
897 target::UntaggedTypeParameter::kIsFunctionTypeParameterBit,
898 NOT_ZERO, &function_type_param, Assembler::kNearJump);
900 __ Bind(&function_type_param);
901#if defined(TARGET_ARCH_IA32)
902 // Function TAV is on top of stack because we're using that register as
903 // kScratchReg.
905#endif
907
908 __ Bind(&is_assignable);
909 __ LoadImmediate(kOutputReg, 0);
910 __ Bind(&done);
911#if defined(TARGET_ARCH_IA32)
912 // Restore preserved scratch registers.
913 __ PopRegister(kScratchReg);
914#endif
915 __ Ret();
916}
917
918#if !defined(TARGET_ARCH_IA32)
919// The <X>TypeTestStubs are used to test whether a given value is of a given
920// type. All variants have the same calling convention:
921//
922// Inputs (from TypeTestABI struct):
923// - kSubtypeTestCacheReg: RawSubtypeTestCache
924// - kInstanceReg: instance to test against.
925// - kInstantiatorTypeArgumentsReg : instantiator type arguments (if needed).
926// - kFunctionTypeArgumentsReg : function type arguments (if needed).
927//
928// See GenerateSubtypeNTestCacheStub for registers that may need saving by the
929// caller.
930//
931// Output (from TypeTestABI struct):
932// - kResultReg: checked instance.
933//
934// Throws if the check is unsuccessful.
935//
936// Note of warning: The caller will not populate CODE_REG and we have therefore
937// no access to the pool.
938void StubCodeCompiler::GenerateDefaultTypeTestStub() {
939 __ LoadFromOffset(CODE_REG, THR,
940 target::Thread::slow_type_test_stub_offset());
941 __ Jump(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
942}
943
944// Used instead of DefaultTypeTestStub when null is assignable.
945void StubCodeCompiler::GenerateDefaultNullableTypeTestStub() {
946 Label done;
947
948 // Fast case for 'null'.
949 __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
950 __ BranchIf(EQUAL, &done);
951
952 __ LoadFromOffset(CODE_REG, THR,
953 target::Thread::slow_type_test_stub_offset());
954 __ Jump(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
955
956 __ Bind(&done);
957 __ Ret();
958}
959
960void StubCodeCompiler::GenerateTopTypeTypeTestStub() {
961 __ Ret();
962}
963
964void StubCodeCompiler::GenerateUnreachableTypeTestStub() {
965 __ Breakpoint();
966}
967
969 bool allow_null) {
970 Label done;
971
972 if (allow_null) {
973 __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
974 __ BranchIf(EQUAL, &done, Assembler::kNearJump);
975 }
976
977 auto handle_case = [&](Register tav) {
978 // If the TAV is null, then resolving the type parameter gives the dynamic
979 // type, which is a top type.
980 __ CompareObject(tav, NullObject());
981 __ BranchIf(EQUAL, &done, Assembler::kNearJump);
982 // Resolve the type parameter to its instantiated type and tail call the
983 // instantiated type's TTS.
985 target::TypeParameter::index_offset(),
987 __ LoadIndexedCompressed(TypeTestABI::kScratchReg, tav,
988 target::TypeArguments::types_offset(),
990 __ Jump(FieldAddress(
992 target::AbstractType::type_test_stub_entry_point_offset()));
993 };
994
995 Label function_type_param;
997 Slot::AbstractType_flags());
998 __ BranchIfBit(TypeTestABI::kScratchReg,
999 target::UntaggedTypeParameter::kIsFunctionTypeParameterBit,
1000 NOT_ZERO, &function_type_param, Assembler::kNearJump);
1002 __ Bind(&function_type_param);
1004 __ Bind(&done);
1005 __ Ret();
1006}
1007
1008void StubCodeCompiler::GenerateNullableTypeParameterTypeTestStub() {
1009 BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/true);
1010}
1011
1012void StubCodeCompiler::GenerateTypeParameterTypeTestStub() {
1013 BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/false);
1014}
1015
1017 TypeCheckMode mode) {
1018 __ PushObject(NullObject()); // Make room for result.
1022 __ PushObject(NullObject());
1024 __ PushImmediate(target::ToRawSmi(mode));
1025 __ CallRuntime(kTypeCheckRuntimeEntry, 7);
1026 __ Drop(1); // mode
1028 __ Drop(1); // dst_name
1031 __ PopRegister(TypeTestABI::kDstTypeReg);
1032 __ PopRegister(TypeTestABI::kInstanceReg);
1033 __ Drop(1); // Discard return value.
1034}
1035
1036void StubCodeCompiler::GenerateLazySpecializeTypeTestStub() {
1037 __ LoadFromOffset(CODE_REG, THR,
1038 target::Thread::lazy_specialize_type_test_stub_offset());
1039 __ EnterStubFrame();
1041 __ LeaveStubFrame();
1042 __ Ret();
1043}
1044
1045// Used instead of LazySpecializeTypeTestStub when null is assignable.
1046void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub() {
1047 Label done;
1048
1049 __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
1050 __ BranchIf(EQUAL, &done);
1051
1052 __ LoadFromOffset(CODE_REG, THR,
1053 target::Thread::lazy_specialize_type_test_stub_offset());
1054 __ EnterStubFrame();
1056 __ LeaveStubFrame();
1057
1058 __ Bind(&done);
1059 __ Ret();
1060}
1061
1062void StubCodeCompiler::GenerateSlowTypeTestStub() {
1063 Label done, call_runtime;
1064
1065 if (!FLAG_precompiled_mode) {
1066 __ LoadFromOffset(CODE_REG, THR,
1067 target::Thread::slow_type_test_stub_offset());
1068 }
1069 __ EnterStubFrame();
1070
1071 // If the subtype-cache is null, it needs to be lazily-created by the runtime.
1073 __ BranchIf(EQUAL, &call_runtime);
1074
1075 // Use the number of inputs used by the STC to determine which stub to call.
1076 Label call_2, call_3, call_4, call_6;
1077 __ Comment("Check number of STC inputs");
1079 Slot::SubtypeTestCache_num_inputs());
1080 __ CompareImmediate(TypeTestABI::kScratchReg, 2);
1081 __ BranchIf(EQUAL, &call_2, Assembler::kNearJump);
1082 __ CompareImmediate(TypeTestABI::kScratchReg, 3);
1083 __ BranchIf(EQUAL, &call_3, Assembler::kNearJump);
1084 __ CompareImmediate(TypeTestABI::kScratchReg, 4);
1085 __ BranchIf(EQUAL, &call_4, Assembler::kNearJump);
1086 __ CompareImmediate(TypeTestABI::kScratchReg, 6);
1087 __ BranchIf(EQUAL, &call_6, Assembler::kNearJump);
1088 // Fall through to the all inputs case.
1089
1090 {
1091 __ Comment("Call 7 input STC check");
1094 CastHandle<Object>(TrueObject()));
1095 __ BranchIf(EQUAL, &done); // Cache said: yes.
1096 __ Jump(&call_runtime, Assembler::kNearJump);
1097 }
1098
1099 __ Bind(&call_6);
1100 {
1101 __ Comment("Call 6 input STC check");
1104 CastHandle<Object>(TrueObject()));
1105 __ BranchIf(EQUAL, &done); // Cache said: yes.
1106 __ Jump(&call_runtime, Assembler::kNearJump);
1107 }
1108
1109 __ Bind(&call_4);
1110 {
1111 __ Comment("Call 4 input STC check");
1114 CastHandle<Object>(TrueObject()));
1115 __ BranchIf(EQUAL, &done); // Cache said: yes.
1116 __ Jump(&call_runtime, Assembler::kNearJump);
1117 }
1118
1119 __ Bind(&call_3);
1120 {
1121 __ Comment("Call 3 input STC check");
1124 CastHandle<Object>(TrueObject()));
1125 __ BranchIf(EQUAL, &done); // Cache said: yes.
1126 __ Jump(&call_runtime, Assembler::kNearJump);
1127 }
1128
1129 __ Bind(&call_2);
1130 {
1131 __ Comment("Call 2 input STC check");
1134 CastHandle<Object>(TrueObject()));
1135 __ BranchIf(EQUAL, &done); // Cache said: yes.
1136 // Fall through to runtime_call
1137 }
1138
1139 __ Bind(&call_runtime);
1140 __ Comment("Call runtime");
1141
1143
1144 __ Bind(&done);
1145 __ Comment("Done");
1146 __ LeaveStubFrame();
1147 __ Ret();
1148}
1149#else
1150// Type testing stubs are not implemented on IA32.
1151#define GENERATE_BREAKPOINT_STUB(Name) \
1152 void StubCodeCompiler::Generate##Name##Stub() { \
1153 __ Breakpoint(); \
1154 }
1155
1156VM_TYPE_TESTING_STUB_CODE_LIST(GENERATE_BREAKPOINT_STUB)
1157
1158#undef GENERATE_BREAKPOINT_STUB
1159#endif // !defined(TARGET_ARCH_IA32)
1160
1161// Called for inline allocation of closure.
1162// Input (preserved):
1163// AllocateClosureABI::kFunctionReg: closure function.
1164// AllocateClosureABI::kContextReg: closure context.
1165// AllocateClosureABI::kInstantiatorTypeArgs: instantiator type arguments.
1166// Output:
1167// AllocateClosureABI::kResultReg: new allocated Closure object.
1168// Clobbered:
1169// AllocateClosureABI::kScratchReg
1170void StubCodeCompiler::GenerateAllocateClosureStub(
1171 bool has_instantiator_type_args,
1172 bool is_generic) {
1173 const intptr_t instance_size =
1174 target::RoundedAllocationSize(target::Closure::InstanceSize());
1175 __ EnsureHasClassIdInDEBUG(kFunctionCid, AllocateClosureABI::kFunctionReg,
1177 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1178 Label slow_case;
1179 __ Comment("Inline allocation of uninitialized closure");
1180#if defined(DEBUG)
1181 // Need to account for the debug checks added by StoreToSlotNoBarrier.
1182 const auto distance = Assembler::kFarJump;
1183#else
1184 const auto distance = Assembler::kNearJump;
1185#endif
1186 __ TryAllocateObject(kClosureCid, instance_size, &slow_case, distance,
1189
1190 __ Comment("Inline initialization of allocated closure");
1191 // Put null in the scratch register for initializing most boxed fields.
1192 // We initialize the fields in offset order below.
1193 // Since the TryAllocateObject above did not go to the slow path, we're
1194 // guaranteed an object in new space here, and thus no barriers are needed.
1196 if (has_instantiator_type_args) {
1199 Slot::Closure_instantiator_type_arguments());
1200 } else {
1201 __ StoreToSlotNoBarrier(AllocateClosureABI::kScratchReg,
1203 Slot::Closure_instantiator_type_arguments());
1204 }
1205 __ StoreToSlotNoBarrier(AllocateClosureABI::kScratchReg,
1207 Slot::Closure_function_type_arguments());
1208 if (!is_generic) {
1209 __ StoreToSlotNoBarrier(AllocateClosureABI::kScratchReg,
1211 Slot::Closure_delayed_type_arguments());
1212 }
1213 __ StoreToSlotNoBarrier(AllocateClosureABI::kFunctionReg,
1215 Slot::Closure_function());
1216 __ StoreToSlotNoBarrier(AllocateClosureABI::kContextReg,
1218 Slot::Closure_context());
1219 __ StoreToSlotNoBarrier(AllocateClosureABI::kScratchReg,
1221 Slot::Closure_hash());
1222 if (is_generic) {
1224 __ StoreToSlotNoBarrier(AllocateClosureABI::kScratchReg,
1226 Slot::Closure_delayed_type_arguments());
1227 }
1228#if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
1229 if (FLAG_precompiled_mode) {
1230 // Set the closure entry point in precompiled mode, either to the function
1231 // entry point in bare instructions mode or to 0 otherwise (to catch
1232 // misuse). This overwrites the scratch register, but there are no more
1233 // boxed fields.
1236 Slot::Function_entry_point());
1237 __ StoreToSlotNoBarrier(AllocateClosureABI::kScratchReg,
1239 Slot::Closure_entry_point());
1240 }
1241#endif
1242
1243 // AllocateClosureABI::kResultReg: new object.
1244 __ Ret();
1245
1246 __ Bind(&slow_case);
1247 }
1248
1249 __ Comment("Closure allocation via runtime");
1250 __ EnterStubFrame();
1251 __ PushObject(NullObject()); // Space on the stack for the return value.
1252 __ PushRegistersInOrder(
1254 if (has_instantiator_type_args) {
1256 } else {
1257 __ PushObject(NullObject());
1258 }
1259 if (is_generic) {
1260 __ PushObject(EmptyTypeArguments());
1261 } else {
1262 __ PushObject(NullObject());
1263 }
1264 __ CallRuntime(kAllocateClosureRuntimeEntry, 4);
1265 if (has_instantiator_type_args) {
1266 __ Drop(1);
1268 } else {
1269 __ Drop(2);
1270 }
1276 __ LeaveStubFrame();
1277
1278 // AllocateClosureABI::kResultReg: new object
1279 __ Ret();
1280}
1281
1282void StubCodeCompiler::GenerateAllocateClosureStub() {
1283 GenerateAllocateClosureStub(/*has_instantiator_type_args=*/false,
1284 /*is_generic=*/false);
1285}
1286
1287void StubCodeCompiler::GenerateAllocateClosureGenericStub() {
1288 GenerateAllocateClosureStub(/*has_instantiator_type_args=*/false,
1289 /*is_generic=*/true);
1290}
1291
1292void StubCodeCompiler::GenerateAllocateClosureTAStub() {
1293 GenerateAllocateClosureStub(/*has_instantiator_type_args=*/true,
1294 /*is_generic=*/false);
1295}
1296
1297void StubCodeCompiler::GenerateAllocateClosureTAGenericStub() {
1298 GenerateAllocateClosureStub(/*has_instantiator_type_args=*/true,
1299 /*is_generic=*/true);
1300}
1301
1302// Generates allocation stub for _GrowableList class.
1303// This stub exists solely for performance reasons: default allocation
1304// stub is slower as it doesn't use specialized inline allocation.
1305void StubCodeCompiler::GenerateAllocateGrowableArrayStub() {
1306#if defined(TARGET_ARCH_IA32)
1307 // This stub is not used on IA32 because IA32 version of
1308 // StubCodeCompiler::GenerateAllocationStubForClass uses inline
1309 // allocation. Also, AllocateObjectSlow stub is not generated on IA32.
1310 __ Breakpoint();
1311#else
1312 const intptr_t instance_size = target::RoundedAllocationSize(
1313 target::GrowableObjectArray::InstanceSize());
1314
1315 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1316 Label slow_case;
1317 __ Comment("Inline allocation of GrowableList");
1318 __ TryAllocateObject(kGrowableObjectArrayCid, instance_size, &slow_case,
1320 /*temp_reg=*/AllocateObjectABI::kTagsReg);
1321 __ StoreIntoObjectNoBarrier(
1323 FieldAddress(AllocateObjectABI::kResultReg,
1324 target::GrowableObjectArray::type_arguments_offset()),
1326
1327 __ Ret();
1328 __ Bind(&slow_case);
1329 }
1330
1332 kGrowableObjectArrayCid, instance_size);
1333 __ LoadImmediate(AllocateObjectABI::kTagsReg, tags);
1334 __ Jump(
1335 Address(THR, target::Thread::allocate_object_slow_entry_point_offset()));
1336#endif // defined(TARGET_ARCH_IA32)
1337}
1338
1339void StubCodeCompiler::GenerateAllocateRecordStub() {
1340 const Register result_reg = AllocateRecordABI::kResultReg;
1341 const Register shape_reg = AllocateRecordABI::kShapeReg;
1342 const Register temp_reg = AllocateRecordABI::kTemp1Reg;
1343 const Register new_top_reg = AllocateRecordABI::kTemp2Reg;
1344
1345 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1346 Label slow_case;
1347
1348 // Check for allocation tracing.
1349 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kRecordCid, &slow_case, temp_reg));
1350
1351 // Extract number of fields from the shape.
1352 __ AndImmediate(
1353 temp_reg, shape_reg,
1354 compiler::target::RecordShape::kNumFieldsMask << kSmiTagShift);
1355
1356 // Compute the rounded instance size.
1357 const intptr_t fixed_size_plus_alignment_padding =
1358 (target::Record::field_offset(0) +
1360 __ AddScaled(temp_reg, temp_reg, TIMES_COMPRESSED_HALF_WORD_SIZE,
1361 fixed_size_plus_alignment_padding);
1362 __ AndImmediate(temp_reg, -target::ObjectAlignment::kObjectAlignment);
1363
1364 // Now allocate the object.
1365 __ LoadFromOffset(result_reg, THR, target::Thread::top_offset());
1366 __ MoveRegister(new_top_reg, temp_reg);
1367 __ AddRegisters(new_top_reg, result_reg);
1368 // Check if the allocation fits into the remaining space.
1369 __ CompareWithMemoryValue(new_top_reg,
1370 Address(THR, target::Thread::end_offset()));
1371 __ BranchIf(UNSIGNED_GREATER_EQUAL, &slow_case);
1372 __ CheckAllocationCanary(result_reg);
1373
1374 // Successfully allocated the object, now update top to point to
1375 // next object start and initialize the object.
1376 __ StoreToOffset(new_top_reg, THR, target::Thread::top_offset());
1377 __ AddImmediate(result_reg, kHeapObjectTag);
1378
1379 // Calculate the size tag.
1380 {
1381 Label size_tag_overflow, done;
1382 __ CompareImmediate(temp_reg, target::UntaggedObject::kSizeTagMaxSizeTag);
1383 __ BranchIf(UNSIGNED_GREATER, &size_tag_overflow, Assembler::kNearJump);
1384 __ LslImmediate(temp_reg,
1385 target::UntaggedObject::kTagBitsSizeTagPos -
1388
1389 __ Bind(&size_tag_overflow);
1390 // Set overflow size tag value.
1391 __ LoadImmediate(temp_reg, 0);
1392
1393 __ Bind(&done);
1394 uword tags = target::MakeTagWordForNewSpaceObject(kRecordCid, 0);
1395 __ OrImmediate(temp_reg, tags);
1396 __ StoreFieldToOffset(temp_reg, result_reg,
1397 target::Object::tags_offset()); // Tags.
1398 }
1399
1400 __ StoreCompressedIntoObjectNoBarrier(
1401 result_reg, FieldAddress(result_reg, target::Record::shape_offset()),
1402 shape_reg);
1403
1404 // Initialize the remaining words of the object.
1405 {
1406 const Register field_reg = shape_reg;
1407#if defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV32) || \
1408 defined(TARGET_ARCH_RISCV64)
1409 const Register null_reg = NULL_REG;
1410#else
1411 const Register null_reg = temp_reg;
1412 __ LoadObject(null_reg, NullObject());
1413#endif
1414
1415 Label loop, done;
1416 __ AddImmediate(field_reg, result_reg, target::Record::field_offset(0));
1417 __ CompareRegisters(field_reg, new_top_reg);
1419
1420 __ Bind(&loop);
1421 for (intptr_t offset = 0; offset < target::kObjectAlignment;
1422 offset += target::kCompressedWordSize) {
1423 __ StoreCompressedIntoObjectNoBarrier(
1424 result_reg, FieldAddress(field_reg, offset), null_reg);
1425 }
1426 // Safe to only check every kObjectAlignment bytes instead of each word.
1427 ASSERT(kAllocationRedZoneSize >= target::kObjectAlignment);
1428 __ AddImmediate(field_reg, target::kObjectAlignment);
1429 __ CompareRegisters(field_reg, new_top_reg);
1430 __ BranchIf(UNSIGNED_LESS, &loop, Assembler::kNearJump);
1431 __ Bind(&done);
1432 }
1433
1434 __ WriteAllocationCanary(new_top_reg); // Fix overshoot.
1435 __ Ret();
1436
1437 __ Bind(&slow_case);
1438 }
1439
1440 __ EnterStubFrame();
1441 __ PushObject(NullObject()); // Space on the stack for the return value.
1442 __ PushRegister(shape_reg);
1443 __ CallRuntime(kAllocateRecordRuntimeEntry, 1);
1444 __ Drop(1);
1445 __ PopRegister(AllocateRecordABI::kResultReg);
1446
1448 __ LeaveStubFrame();
1449 __ Ret();
1450}
1451
1452void StubCodeCompiler::GenerateAllocateSmallRecordStub(intptr_t num_fields,
1453 bool has_named_fields) {
1454 ASSERT(num_fields == 2 || num_fields == 3);
1461 Label slow_case;
1462
1463 if ((num_fields > 2) && (value2_reg == kNoRegister)) {
1464 // Not implemented.
1465 __ Breakpoint();
1466 return;
1467 }
1468
1469#if defined(DEBUG)
1470 // Need to account for the debug checks added by
1471 // StoreCompressedIntoObjectNoBarrier.
1472 const auto distance = Assembler::kFarJump;
1473#else
1474 const auto distance = Assembler::kNearJump;
1475#endif
1476 __ TryAllocateObject(kRecordCid, target::Record::InstanceSize(num_fields),
1477 &slow_case, distance, result_reg, temp_reg);
1478
1479 if (!has_named_fields) {
1480 __ LoadImmediate(
1481 shape_reg, Smi::RawValue(RecordShape::ForUnnamed(num_fields).AsInt()));
1482 }
1483 __ StoreCompressedIntoObjectNoBarrier(
1484 result_reg, FieldAddress(result_reg, target::Record::shape_offset()),
1485 shape_reg);
1486
1487 __ StoreCompressedIntoObjectNoBarrier(
1488 result_reg, FieldAddress(result_reg, target::Record::field_offset(0)),
1489 value0_reg);
1490
1491 __ StoreCompressedIntoObjectNoBarrier(
1492 result_reg, FieldAddress(result_reg, target::Record::field_offset(1)),
1493 value1_reg);
1494
1495 if (num_fields > 2) {
1496 __ StoreCompressedIntoObjectNoBarrier(
1497 result_reg, FieldAddress(result_reg, target::Record::field_offset(2)),
1498 value2_reg);
1499 }
1500
1501 __ Ret();
1502
1503 __ Bind(&slow_case);
1504
1505 __ EnterStubFrame();
1506 __ PushObject(NullObject()); // Space on the stack for the return value.
1507 if (has_named_fields) {
1508 __ PushRegister(shape_reg);
1509 } else {
1510 __ PushImmediate(
1511 Smi::RawValue(RecordShape::ForUnnamed(num_fields).AsInt()));
1512 }
1513 __ PushRegistersInOrder({value0_reg, value1_reg});
1514 if (num_fields > 2) {
1515 __ PushRegister(value2_reg);
1516 } else {
1517 __ PushObject(NullObject());
1518 }
1519 __ CallRuntime(kAllocateSmallRecordRuntimeEntry, 4);
1520 __ Drop(4);
1521 __ PopRegister(result_reg);
1522
1524 __ LeaveStubFrame();
1525 __ Ret();
1526}
1527
1528void StubCodeCompiler::GenerateAllocateRecord2Stub() {
1529 GenerateAllocateSmallRecordStub(2, /*has_named_fields=*/false);
1530}
1531
1532void StubCodeCompiler::GenerateAllocateRecord2NamedStub() {
1533 GenerateAllocateSmallRecordStub(2, /*has_named_fields=*/true);
1534}
1535
1536void StubCodeCompiler::GenerateAllocateRecord3Stub() {
1537 GenerateAllocateSmallRecordStub(3, /*has_named_fields=*/false);
1538}
1539
1540void StubCodeCompiler::GenerateAllocateRecord3NamedStub() {
1541 GenerateAllocateSmallRecordStub(3, /*has_named_fields=*/true);
1542}
1543
1544// The UnhandledException class lives in the VM isolate, so it cannot cache
1545// an allocation stub for itself. Instead, we cache it in the stub code list.
1546void StubCodeCompiler::GenerateAllocateUnhandledExceptionStub() {
1547 Thread* thread = Thread::Current();
1548 auto class_table = thread->isolate_group()->class_table();
1549 ASSERT(class_table->HasValidClassAt(kUnhandledExceptionCid));
1550 const auto& cls = Class::ZoneHandle(thread->zone(),
1551 class_table->At(kUnhandledExceptionCid));
1552 ASSERT(!cls.IsNull());
1553
1556}
1557
1558#define TYPED_DATA_ALLOCATION_STUB(clazz) \
1559 void StubCodeCompiler::GenerateAllocate##clazz##Stub() { \
1560 GenerateAllocateTypedDataArrayStub(kTypedData##clazz##Cid); \
1561 }
1563#undef TYPED_DATA_ALLOCATION_STUB
1564
1565void StubCodeCompiler::GenerateLateInitializationError(bool with_fpu_regs) {
1566 auto perform_runtime_call = [&]() {
1568 __ CallRuntime(kLateFieldNotInitializedErrorRuntimeEntry,
1569 /*argument_count=*/1);
1570 };
1571 GenerateSharedStubGeneric(
1572 /*save_fpu_registers=*/with_fpu_regs,
1573 with_fpu_regs
1574 ? target::Thread::
1575 late_initialization_error_shared_with_fpu_regs_stub_offset()
1576 : target::Thread::
1577 late_initialization_error_shared_without_fpu_regs_stub_offset(),
1578 /*allow_return=*/false, perform_runtime_call);
1579}
1580
1581void StubCodeCompiler::
1582 GenerateLateInitializationErrorSharedWithoutFPURegsStub() {
1583 GenerateLateInitializationError(/*with_fpu_regs=*/false);
1584}
1585
1586void StubCodeCompiler::GenerateLateInitializationErrorSharedWithFPURegsStub() {
1587 GenerateLateInitializationError(/*with_fpu_regs=*/true);
1588}
1589
1590void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub() {
1591 GenerateSharedStub(
1592 /*save_fpu_registers=*/false, &kNullErrorRuntimeEntry,
1593 target::Thread::null_error_shared_without_fpu_regs_stub_offset(),
1594 /*allow_return=*/false);
1595}
1596
1597void StubCodeCompiler::GenerateNullErrorSharedWithFPURegsStub() {
1598 GenerateSharedStub(
1599 /*save_fpu_registers=*/true, &kNullErrorRuntimeEntry,
1600 target::Thread::null_error_shared_with_fpu_regs_stub_offset(),
1601 /*allow_return=*/false);
1602}
1603
1604void StubCodeCompiler::GenerateNullArgErrorSharedWithoutFPURegsStub() {
1605 GenerateSharedStub(
1606 /*save_fpu_registers=*/false, &kArgumentNullErrorRuntimeEntry,
1607 target::Thread::null_arg_error_shared_without_fpu_regs_stub_offset(),
1608 /*allow_return=*/false);
1609}
1610
1611void StubCodeCompiler::GenerateNullArgErrorSharedWithFPURegsStub() {
1612 GenerateSharedStub(
1613 /*save_fpu_registers=*/true, &kArgumentNullErrorRuntimeEntry,
1614 target::Thread::null_arg_error_shared_with_fpu_regs_stub_offset(),
1615 /*allow_return=*/false);
1616}
1617
1618void StubCodeCompiler::GenerateNullCastErrorSharedWithoutFPURegsStub() {
1619 GenerateSharedStub(
1620 /*save_fpu_registers=*/false, &kNullCastErrorRuntimeEntry,
1621 target::Thread::null_cast_error_shared_without_fpu_regs_stub_offset(),
1622 /*allow_return=*/false);
1623}
1624
1625void StubCodeCompiler::GenerateNullCastErrorSharedWithFPURegsStub() {
1626 GenerateSharedStub(
1627 /*save_fpu_registers=*/true, &kNullCastErrorRuntimeEntry,
1628 target::Thread::null_cast_error_shared_with_fpu_regs_stub_offset(),
1629 /*allow_return=*/false);
1630}
1631
1632void StubCodeCompiler::GenerateStackOverflowSharedWithoutFPURegsStub() {
1633 GenerateSharedStub(
1634 /*save_fpu_registers=*/false, &kInterruptOrStackOverflowRuntimeEntry,
1635 target::Thread::stack_overflow_shared_without_fpu_regs_stub_offset(),
1636 /*allow_return=*/true);
1637}
1638
1639void StubCodeCompiler::GenerateStackOverflowSharedWithFPURegsStub() {
1640 GenerateSharedStub(
1641 /*save_fpu_registers=*/true, &kInterruptOrStackOverflowRuntimeEntry,
1642 target::Thread::stack_overflow_shared_with_fpu_regs_stub_offset(),
1643 /*allow_return=*/true);
1644}
1645
1646void StubCodeCompiler::GenerateRangeErrorSharedWithoutFPURegsStub() {
1647 GenerateRangeError(/*with_fpu_regs=*/false);
1648}
1649
1650void StubCodeCompiler::GenerateRangeErrorSharedWithFPURegsStub() {
1651 GenerateRangeError(/*with_fpu_regs=*/true);
1652}
1653
1654void StubCodeCompiler::GenerateWriteErrorSharedWithoutFPURegsStub() {
1655 GenerateWriteError(/*with_fpu_regs=*/false);
1656}
1657
1658void StubCodeCompiler::GenerateWriteErrorSharedWithFPURegsStub() {
1659 GenerateWriteError(/*with_fpu_regs=*/true);
1660}
1661
1662void StubCodeCompiler::GenerateFrameAwaitingMaterializationStub() {
1663 __ Breakpoint(); // Marker stub.
1664}
1665
1666void StubCodeCompiler::GenerateAsynchronousGapMarkerStub() {
1667 __ Breakpoint(); // Marker stub.
1668}
1669
1670void StubCodeCompiler::GenerateUnknownDartCodeStub() {
1671 // Enter frame to include caller into the backtrace.
1672 __ EnterStubFrame();
1673 __ Breakpoint(); // Marker stub.
1674}
1675
1676void StubCodeCompiler::GenerateNotLoadedStub() {
1677 __ EnterStubFrame();
1678 __ CallRuntime(kNotLoadedRuntimeEntry, 0);
1679 __ Breakpoint();
1680}
1681
1682#define EMIT_BOX_ALLOCATION(Name) \
1683 void StubCodeCompiler::GenerateAllocate##Name##Stub() { \
1684 Label call_runtime; \
1685 if (!FLAG_use_slow_path && FLAG_inline_alloc) { \
1686 __ TryAllocate(compiler::Name##Class(), &call_runtime, \
1687 Assembler::kNearJump, AllocateBoxABI::kResultReg, \
1688 AllocateBoxABI::kTempReg); \
1689 __ Ret(); \
1690 } \
1691 __ Bind(&call_runtime); \
1692 __ EnterStubFrame(); \
1693 __ PushObject(NullObject()); /* Make room for result. */ \
1694 __ CallRuntime(kAllocate##Name##RuntimeEntry, 0); \
1695 __ PopRegister(AllocateBoxABI::kResultReg); \
1696 __ LeaveStubFrame(); \
1697 __ Ret(); \
1698 }
1699
1701EMIT_BOX_ALLOCATION(Double)
1702EMIT_BOX_ALLOCATION(Float32x4)
1703EMIT_BOX_ALLOCATION(Float64x2)
1704EMIT_BOX_ALLOCATION(Int32x4)
1705
1706#undef EMIT_BOX_ALLOCATION
1707
1708static void GenerateBoxFpuValueStub(Assembler* assembler,
1709 const dart::Class& cls,
1710 const RuntimeEntry& runtime_entry,
1711 void (Assembler::* store_value)(FpuRegister,
1712 Register,
1713 int32_t)) {
1714 Label call_runtime;
1715 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1716 __ TryAllocate(cls, &call_runtime, compiler::Assembler::kFarJump,
1718 (assembler->*store_value)(
1720 compiler::target::Double::value_offset() - kHeapObjectTag);
1721 __ Ret();
1722 }
1723 __ Bind(&call_runtime);
1724 __ EnterStubFrame();
1725 __ PushObject(NullObject()); /* Make room for result. */
1726 (assembler->*store_value)(BoxDoubleStubABI::kValueReg, THR,
1727 target::Thread::unboxed_runtime_arg_offset());
1728 __ CallRuntime(runtime_entry, 0);
1729 __ PopRegister(BoxDoubleStubABI::kResultReg);
1730 __ LeaveStubFrame();
1731 __ Ret();
1732}
1733
1734void StubCodeCompiler::GenerateBoxDoubleStub() {
1736 kBoxDoubleRuntimeEntry,
1738}
1739
1740void StubCodeCompiler::GenerateBoxFloat32x4Stub() {
1741#if !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
1743 kBoxFloat32x4RuntimeEntry,
1745#else
1746 __ Stop("Not supported on RISC-V.");
1747#endif
1748}
1749
1750void StubCodeCompiler::GenerateBoxFloat64x2Stub() {
1751#if !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
1753 kBoxFloat64x2RuntimeEntry,
1755#else
1756 __ Stop("Not supported on RISC-V.");
1757#endif
1758}
1759
1760void StubCodeCompiler::GenerateDoubleToIntegerStub() {
1761 __ EnterStubFrame();
1762 __ StoreUnboxedDouble(DoubleToIntegerStubABI::kInputReg, THR,
1763 target::Thread::unboxed_runtime_arg_offset());
1764 __ PushObject(NullObject()); /* Make room for result. */
1766 __ CallRuntime(kDoubleToIntegerRuntimeEntry, 1);
1767 __ Drop(1);
1769 __ LeaveStubFrame();
1770 __ Ret();
1771}
1772
1773static intptr_t SuspendStateFpOffset() {
1774 return compiler::target::frame_layout.FrameSlotForVariableIndex(
1776 compiler::target::kWordSize;
1777}
1778
1780 Assembler* assembler,
1781 intptr_t entry_point_offset_in_thread,
1782 intptr_t function_offset_in_object_store,
1783 bool uses_args_desc = false) {
1784 if (FLAG_precompiled_mode) {
1785 __ Call(Address(THR, entry_point_offset_in_thread));
1786 } else {
1787 __ LoadIsolateGroup(FUNCTION_REG);
1788 __ LoadFromOffset(FUNCTION_REG, FUNCTION_REG,
1789 target::IsolateGroup::object_store_offset());
1790 __ LoadFromOffset(FUNCTION_REG, FUNCTION_REG,
1791 function_offset_in_object_store);
1792 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
1793 target::Function::code_offset());
1794 if (!uses_args_desc) {
1795 // Load a GC-safe value for the arguments descriptor (unused but tagged).
1796 __ LoadImmediate(ARGS_DESC_REG, 0);
1797 }
1798 __ Call(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
1799 }
1800}
1801
1802// Helper to generate allocation of _SuspendState instance.
1803// Initializes tags, frame_capacity and frame_size.
1804// Other fields are not initialized.
1805//
1806// Input:
1807// frame_size_reg: size of the frame payload in bytes.
1808// Output:
1809// result_reg: allocated instance.
1810// Clobbers:
1811// result_reg, temp_reg.
1813 Label* slow_case,
1814 Register result_reg,
1815 Register frame_size_reg,
1816 Register temp_reg) {
1817 if (FLAG_use_slow_path || !FLAG_inline_alloc) {
1818 __ Jump(slow_case);
1819 return;
1820 }
1821
1822 // Check for allocation tracing.
1824 __ MaybeTraceAllocation(kSuspendStateCid, slow_case, temp_reg));
1825
1826 // Compute the rounded instance size.
1827 const intptr_t fixed_size_plus_alignment_padding =
1828 (target::SuspendState::HeaderSize() +
1829 target::SuspendState::FrameSizeGrowthGap() * target::kWordSize +
1831 __ AddImmediate(temp_reg, frame_size_reg, fixed_size_plus_alignment_padding);
1832 __ AndImmediate(temp_reg, -target::ObjectAlignment::kObjectAlignment);
1833
1834 // Now allocate the object.
1835 __ LoadFromOffset(result_reg, THR, target::Thread::top_offset());
1836 __ AddRegisters(temp_reg, result_reg);
1837 // Check if the allocation fits into the remaining space.
1838 __ CompareWithMemoryValue(temp_reg,
1839 Address(THR, target::Thread::end_offset()));
1840 __ BranchIf(UNSIGNED_GREATER_EQUAL, slow_case);
1841 __ CheckAllocationCanary(result_reg);
1842
1843 // Successfully allocated the object, now update top to point to
1844 // next object start and initialize the object.
1845 __ StoreToOffset(temp_reg, THR, target::Thread::top_offset());
1846 __ SubRegisters(temp_reg, result_reg);
1847 __ AddImmediate(result_reg, kHeapObjectTag);
1848
1849 if (!FLAG_precompiled_mode) {
1850 // Use rounded object size to calculate and save frame capacity.
1851 __ AddImmediate(temp_reg, temp_reg,
1852 -target::SuspendState::payload_offset());
1853 __ StoreFieldToOffset(temp_reg, result_reg,
1854 target::SuspendState::frame_capacity_offset());
1855 // Restore rounded object size.
1856 __ AddImmediate(temp_reg, temp_reg, target::SuspendState::payload_offset());
1857 }
1858
1859 // Calculate the size tag.
1860 {
1861 Label size_tag_overflow, done;
1862 __ CompareImmediate(temp_reg, target::UntaggedObject::kSizeTagMaxSizeTag);
1863 __ BranchIf(UNSIGNED_GREATER, &size_tag_overflow, Assembler::kNearJump);
1864 __ LslImmediate(temp_reg,
1865 target::UntaggedObject::kTagBitsSizeTagPos -
1868
1869 __ Bind(&size_tag_overflow);
1870 // Set overflow size tag value.
1871 __ LoadImmediate(temp_reg, 0);
1872
1873 __ Bind(&done);
1874 uword tags = target::MakeTagWordForNewSpaceObject(kSuspendStateCid, 0);
1875 __ OrImmediate(temp_reg, tags);
1876 __ StoreFieldToOffset(temp_reg, result_reg,
1877 target::Object::tags_offset()); // Tags.
1878 }
1879
1880 __ StoreFieldToOffset(frame_size_reg, result_reg,
1881 target::SuspendState::frame_size_offset());
1882}
1883
1884void StubCodeCompiler::GenerateSuspendStub(
1885 bool call_suspend_function,
1886 bool pass_type_arguments,
1887 intptr_t suspend_entry_point_offset_in_thread,
1888 intptr_t suspend_function_offset_in_object_store) {
1889 const Register kArgument = SuspendStubABI::kArgumentReg;
1890 const Register kTypeArgs = SuspendStubABI::kTypeArgsReg;
1891 const Register kTemp = SuspendStubABI::kTempReg;
1892 const Register kFrameSize = SuspendStubABI::kFrameSizeReg;
1893 const Register kSuspendState = SuspendStubABI::kSuspendStateReg;
1894 const Register kFunctionData = SuspendStubABI::kFunctionDataReg;
1895 const Register kSrcFrame = SuspendStubABI::kSrcFrameReg;
1896 const Register kDstFrame = SuspendStubABI::kDstFrameReg;
1897 Label alloc_slow_case, alloc_done, init_done, resize_suspend_state,
1898 remember_object, call_dart;
1899
1900#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
1901 SPILLS_LR_TO_FRAME({}); // Simulate entering the caller (Dart) frame.
1902#endif
1903
1904 __ LoadFromOffset(kSuspendState, FPREG, SuspendStateFpOffset());
1905
1906 __ AddImmediate(
1907 kFrameSize, FPREG,
1908 -target::frame_layout.last_param_from_entry_sp * target::kWordSize);
1909 __ SubRegisters(kFrameSize, SPREG);
1910
1911 __ EnterStubFrame();
1912
1913 if (pass_type_arguments) {
1914 __ PushRegister(kTypeArgs);
1915 }
1916
1917 __ CompareClassId(kSuspendState, kSuspendStateCid, kTemp);
1918
1919 if (FLAG_precompiled_mode) {
1920 __ BranchIf(EQUAL, &init_done);
1921 } else {
1922 Label alloc_suspend_state;
1923 __ BranchIf(NOT_EQUAL, &alloc_suspend_state);
1924
1925 __ CompareWithMemoryValue(
1926 kFrameSize,
1927 FieldAddress(kSuspendState,
1928 target::SuspendState::frame_capacity_offset()));
1929 __ BranchIf(UNSIGNED_GREATER, &resize_suspend_state);
1930
1931 __ StoreFieldToOffset(kFrameSize, kSuspendState,
1932 target::SuspendState::frame_size_offset());
1933 __ Jump(&init_done);
1934
1935 __ Bind(&alloc_suspend_state);
1936 }
1937
1938 __ Comment("Allocate SuspendState");
1939 __ MoveRegister(kFunctionData, kSuspendState);
1940
1941 GenerateAllocateSuspendState(assembler, &alloc_slow_case, kSuspendState,
1942 kFrameSize, kTemp);
1943
1944 __ StoreCompressedIntoObjectNoBarrier(
1945 kSuspendState,
1946 FieldAddress(kSuspendState, target::SuspendState::function_data_offset()),
1947 kFunctionData);
1948
1949 {
1950#if defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV32) || \
1951 defined(TARGET_ARCH_RISCV64)
1952 const Register kNullReg = NULL_REG;
1953#else
1954 const Register kNullReg = kTemp;
1955 __ LoadObject(kNullReg, NullObject());
1956#endif
1957 __ StoreCompressedIntoObjectNoBarrier(
1958 kSuspendState,
1959 FieldAddress(kSuspendState,
1960 target::SuspendState::then_callback_offset()),
1961 kNullReg);
1962 __ StoreCompressedIntoObjectNoBarrier(
1963 kSuspendState,
1964 FieldAddress(kSuspendState,
1965 target::SuspendState::error_callback_offset()),
1966 kNullReg);
1967 }
1968
1969 __ Bind(&alloc_done);
1970
1971 __ Comment("Save SuspendState to frame");
1972 __ LoadFromOffset(kTemp, FPREG, kSavedCallerFpSlotFromFp * target::kWordSize);
1973 __ StoreToOffset(kSuspendState, kTemp, SuspendStateFpOffset());
1974
1975 __ Bind(&init_done);
1976 __ Comment("Copy frame to SuspendState");
1977
1978#ifdef DEBUG
1979 {
1980 // Verify that SuspendState.frame_size == kFrameSize.
1981 Label okay;
1982 __ LoadFieldFromOffset(kTemp, kSuspendState,
1983 target::SuspendState::frame_size_offset());
1984 __ CompareRegisters(kTemp, kFrameSize);
1985 __ BranchIf(EQUAL, &okay);
1986 __ Breakpoint();
1987 __ Bind(&okay);
1988 }
1989#endif
1990
1991 if (kSrcFrame == THR) {
1992 __ PushRegister(THR);
1993 }
1994 __ AddImmediate(kSrcFrame, FPREG, kCallerSpSlotFromFp * target::kWordSize);
1995 __ AddImmediate(kDstFrame, kSuspendState,
1996 target::SuspendState::payload_offset() - kHeapObjectTag);
1997 __ CopyMemoryWords(kSrcFrame, kDstFrame, kFrameSize, kTemp);
1998 if (kSrcFrame == THR) {
1999 __ PopRegister(THR);
2000 }
2001
2002 __ LoadFromOffset(kTemp, FPREG, kSavedCallerPcSlotFromFp * target::kWordSize);
2003 __ StoreFieldToOffset(kTemp, kSuspendState,
2004 target::SuspendState::pc_offset());
2005
2006#ifdef DEBUG
2007 {
2008 // Verify that kSuspendState matches :suspend_state in the copied stack
2009 // frame.
2010 Label okay;
2011 __ LoadFieldFromOffset(kTemp, kSuspendState,
2012 target::SuspendState::frame_size_offset());
2013 __ AddRegisters(kTemp, kSuspendState);
2014 __ LoadFieldFromOffset(
2015 kTemp, kTemp,
2016 target::SuspendState::payload_offset() + SuspendStateFpOffset());
2017 __ CompareRegisters(kTemp, kSuspendState);
2018 __ BranchIf(EQUAL, &okay);
2019 __ Breakpoint();
2020 __ Bind(&okay);
2021 }
2022#endif
2023
2024 if (call_suspend_function) {
2025 // Push arguments for suspend Dart function early to preserve them
2026 // across write barrier.
2027 __ PushRegistersInOrder({kSuspendState, kArgument});
2028 }
2029
2030 // Write barrier.
2031 __ AndImmediate(kTemp, kSuspendState, target::kPageMask);
2032 __ LoadFromOffset(kTemp, kTemp, target::Page::original_top_offset());
2033 __ CompareRegisters(kSuspendState, kTemp);
2034 __ BranchIf(UNSIGNED_LESS, &remember_object);
2035 // Assumption: SuspendStates are always on non-image pages.
2036 // TODO(rmacnak): Also check original_end if we bound TLABs to smaller than a
2037 // heap page.
2038
2039 __ Bind(&call_dart);
2040 if (call_suspend_function) {
2041 __ Comment("Call suspend Dart function");
2042 if (pass_type_arguments) {
2043 __ LoadObject(ARGS_DESC_REG,
2044 ArgumentsDescriptorBoxed(/*type_args_len=*/1,
2045 /*num_arguments=*/2));
2046 }
2047 CallDartCoreLibraryFunction(assembler, suspend_entry_point_offset_in_thread,
2048 suspend_function_offset_in_object_store,
2049 /*uses_args_desc=*/pass_type_arguments);
2050 } else {
2051 // SuspendStub returns either the result of Dart callback,
2052 // or SuspendStub argument (if Dart callback is not used).
2053 // The latter is used by yield/yield* in sync* functions
2054 // to indicate that iteration should be continued.
2055 __ MoveRegister(CallingConventions::kReturnReg, kArgument);
2056 }
2057
2058 __ LeaveStubFrame();
2059
2060#if !defined(TARGET_ARCH_X64) && !defined(TARGET_ARCH_IA32)
2061 // Drop caller frame on all architectures except x86 (X64/IA32) which
2062 // needs to maintain call/return balance to avoid performance regressions.
2063 __ LeaveDartFrame();
2064#elif defined(TARGET_ARCH_X64)
2065 // Restore PP in JIT mode on x64 as epilogue following SuspendStub call
2066 // will only unwind frame and return.
2067 if (!FLAG_precompiled_mode) {
2068 __ LoadFromOffset(
2069 PP, FPREG,
2070 target::frame_layout.saved_caller_pp_from_fp * target::kWordSize);
2071 }
2072#endif
2073 __ Ret();
2074
2075#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
2076 // Slow path is executed with Dart and stub frames still on the stack.
2077 SPILLS_LR_TO_FRAME({});
2078 SPILLS_LR_TO_FRAME({});
2079#endif
2080 __ Bind(&alloc_slow_case);
2081 __ Comment("SuspendState Allocation slow case");
2082 // Save argument and frame size.
2083 __ PushRegistersInOrder({kArgument, kFrameSize});
2084 __ PushObject(NullObject()); // Make space on stack for the return value.
2085 __ SmiTag(kFrameSize);
2086 // Pass frame size and function data to runtime entry.
2087 __ PushRegistersInOrder({kFrameSize, kFunctionData});
2088 __ CallRuntime(kAllocateSuspendStateRuntimeEntry, 2);
2089 __ Drop(2); // Drop arguments
2090 __ PopRegister(kSuspendState); // Get result.
2091 __ PopRegister(kFrameSize); // Restore frame size.
2092 __ PopRegister(kArgument); // Restore argument.
2093 __ Jump(&alloc_done);
2094
2095 __ Bind(&resize_suspend_state);
2096 __ Comment("Resize SuspendState");
2097 // Save argument and frame size.
2098 __ PushRegistersInOrder({kArgument, kFrameSize});
2099 __ PushObject(NullObject()); // Make space on stack for the return value.
2100 __ SmiTag(kFrameSize);
2101 // Pass frame size and old suspend state to runtime entry.
2102 __ PushRegistersInOrder({kFrameSize, kSuspendState});
2103 // It's okay to call runtime for resizing SuspendState objects
2104 // as it can only happen in the unoptimized code if expression
2105 // stack grows between suspends, or once after OSR transition.
2106 __ CallRuntime(kAllocateSuspendStateRuntimeEntry, 2);
2107 __ Drop(2); // Drop arguments
2108 __ PopRegister(kSuspendState); // Get result.
2109 __ PopRegister(kFrameSize); // Restore frame size.
2110 __ PopRegister(kArgument); // Restore argument.
2111 __ Jump(&alloc_done);
2112
2113 __ Bind(&remember_object);
2114 __ Comment("Old gen SuspendState slow case");
2115 if (!call_suspend_function) {
2116 // Save kArgument which contains the return value
2117 // if suspend function is not called.
2118 __ PushRegister(kArgument);
2119 }
2120 {
2121#if defined(TARGET_ARCH_IA32)
2122 LeafRuntimeScope rt(assembler, /*frame_size=*/2 * target::kWordSize,
2123 /*preserve_registers=*/false);
2124 __ movl(Address(ESP, 1 * target::kWordSize), THR);
2125 __ movl(Address(ESP, 0 * target::kWordSize), kSuspendState);
2126#else
2127 LeafRuntimeScope rt(assembler, /*frame_size=*/0,
2128 /*preserve_registers=*/false);
2129 __ MoveRegister(CallingConventions::ArgumentRegisters[0], kSuspendState);
2131#endif
2132 rt.Call(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
2133 }
2134 if (!call_suspend_function) {
2135 __ PopRegister(kArgument);
2136 }
2137 __ Jump(&call_dart);
2138}
2139
2140void StubCodeCompiler::GenerateAwaitStub() {
2141 GenerateSuspendStub(
2142 /*call_suspend_function=*/true,
2143 /*pass_type_arguments=*/false,
2144 target::Thread::suspend_state_await_entry_point_offset(),
2145 target::ObjectStore::suspend_state_await_offset());
2146}
2147
2148void StubCodeCompiler::GenerateAwaitWithTypeCheckStub() {
2149 GenerateSuspendStub(
2150
2151 /*call_suspend_function=*/true,
2152 /*pass_type_arguments=*/true,
2153 target::Thread::suspend_state_await_with_type_check_entry_point_offset(),
2154 target::ObjectStore::suspend_state_await_with_type_check_offset());
2155}
2156
2157void StubCodeCompiler::GenerateYieldAsyncStarStub() {
2158 GenerateSuspendStub(
2159
2160 /*call_suspend_function=*/true,
2161 /*pass_type_arguments=*/false,
2162 target::Thread::suspend_state_yield_async_star_entry_point_offset(),
2163 target::ObjectStore::suspend_state_yield_async_star_offset());
2164}
2165
2166void StubCodeCompiler::GenerateSuspendSyncStarAtStartStub() {
2167 GenerateSuspendStub(
2168
2169 /*call_suspend_function=*/true,
2170 /*pass_type_arguments=*/false,
2171 target::Thread::
2172 suspend_state_suspend_sync_star_at_start_entry_point_offset(),
2173 target::ObjectStore::suspend_state_suspend_sync_star_at_start_offset());
2174}
2175
2176void StubCodeCompiler::GenerateSuspendSyncStarAtYieldStub() {
2177 GenerateSuspendStub(
2178 /*call_suspend_function=*/false,
2179 /*pass_type_arguments=*/false, -1, -1);
2180}
2181
2182void StubCodeCompiler::GenerateInitSuspendableFunctionStub(
2183 intptr_t init_entry_point_offset_in_thread,
2184 intptr_t init_function_offset_in_object_store) {
2186
2187 __ EnterStubFrame();
2188 __ LoadObject(ARGS_DESC_REG, ArgumentsDescriptorBoxed(/*type_args_len=*/1,
2189 /*num_arguments=*/0));
2190 __ PushRegister(kTypeArgs);
2191 CallDartCoreLibraryFunction(assembler, init_entry_point_offset_in_thread,
2192 init_function_offset_in_object_store,
2193 /*uses_args_desc=*/true);
2194 __ LeaveStubFrame();
2195
2196 // Set :suspend_state in the caller frame.
2199 __ Ret();
2200}
2201
2202void StubCodeCompiler::GenerateInitAsyncStub() {
2203 GenerateInitSuspendableFunctionStub(
2204 target::Thread::suspend_state_init_async_entry_point_offset(),
2205 target::ObjectStore::suspend_state_init_async_offset());
2206}
2207
2208void StubCodeCompiler::GenerateInitAsyncStarStub() {
2209 GenerateInitSuspendableFunctionStub(
2210 target::Thread::suspend_state_init_async_star_entry_point_offset(),
2211 target::ObjectStore::suspend_state_init_async_star_offset());
2212}
2213
2214void StubCodeCompiler::GenerateInitSyncStarStub() {
2215 GenerateInitSuspendableFunctionStub(
2216 target::Thread::suspend_state_init_sync_star_entry_point_offset(),
2217 target::ObjectStore::suspend_state_init_sync_star_offset());
2218}
2219
2220void StubCodeCompiler::GenerateResumeStub() {
2221 const Register kSuspendState = ResumeStubABI::kSuspendStateReg;
2222 const Register kTemp = ResumeStubABI::kTempReg;
2223 const Register kFrameSize = ResumeStubABI::kFrameSizeReg;
2224 const Register kSrcFrame = ResumeStubABI::kSrcFrameReg;
2225 const Register kDstFrame = ResumeStubABI::kDstFrameReg;
2226 const Register kResumePc = ResumeStubABI::kResumePcReg;
2227 const Register kException = ResumeStubABI::kExceptionReg;
2228 const Register kStackTrace = ResumeStubABI::kStackTraceReg;
2229 Label call_runtime;
2230
2231 // Top of the stack on entry:
2232 // ... [SuspendState] [value] [exception] [stackTrace] [ReturnAddress]
2233
2234 __ EnterDartFrame(0);
2235
2236 const intptr_t param_offset =
2237 target::frame_layout.param_end_from_fp * target::kWordSize;
2238 __ LoadFromOffset(kSuspendState, FPREG, param_offset + 4 * target::kWordSize);
2239#ifdef DEBUG
2240 {
2241 Label okay;
2242 __ CompareClassId(kSuspendState, kSuspendStateCid, kTemp);
2243 __ BranchIf(EQUAL, &okay);
2244 __ Breakpoint();
2245 __ Bind(&okay);
2246 }
2247 {
2248 Label okay;
2249 __ LoadFieldFromOffset(kTemp, kSuspendState,
2250 target::SuspendState::pc_offset());
2251 __ CompareImmediate(kTemp, 0);
2252 __ BranchIf(NOT_EQUAL, &okay);
2253 __ Breakpoint();
2254 __ Bind(&okay);
2255 }
2256#endif
2257
2258 __ LoadFieldFromOffset(kFrameSize, kSuspendState,
2259 target::SuspendState::frame_size_offset());
2260#ifdef DEBUG
2261 {
2262 Label okay;
2263 __ MoveRegister(kTemp, kFrameSize);
2264 __ AddRegisters(kTemp, kSuspendState);
2265 __ LoadFieldFromOffset(
2266 kTemp, kTemp,
2267 target::SuspendState::payload_offset() + SuspendStateFpOffset());
2268 __ CompareRegisters(kTemp, kSuspendState);
2269 __ BranchIf(EQUAL, &okay);
2270 __ Breakpoint();
2271 __ Bind(&okay);
2272 }
2273#endif
2274 if (!FLAG_precompiled_mode) {
2275 // Copy Code object (part of the fixed frame which is not copied below)
2276 // and restore pool pointer.
2277 __ MoveRegister(kTemp, kSuspendState);
2278 __ AddRegisters(kTemp, kFrameSize);
2279 __ LoadFromOffset(
2280 CODE_REG, kTemp,
2281 target::SuspendState::payload_offset() - kHeapObjectTag +
2282 target::frame_layout.code_from_fp * target::kWordSize);
2283 __ StoreToOffset(CODE_REG, FPREG,
2284 target::frame_layout.code_from_fp * target::kWordSize);
2285#if !defined(TARGET_ARCH_IA32)
2286 __ LoadPoolPointer(PP);
2287#endif
2288 }
2289 // Do not copy fixed frame between the first local and FP.
2290 __ AddImmediate(kFrameSize, (target::frame_layout.first_local_from_fp + 1) *
2291 target::kWordSize);
2292 __ SubRegisters(SPREG, kFrameSize);
2293
2294 __ Comment("Copy frame from SuspendState");
2295 intptr_t num_saved_regs = 0;
2296 if (kSrcFrame == THR) {
2297 __ PushRegister(THR);
2298 ++num_saved_regs;
2299 }
2300 if (kDstFrame == CODE_REG) {
2301 __ PushRegister(CODE_REG);
2302 ++num_saved_regs;
2303 }
2304 __ AddImmediate(kSrcFrame, kSuspendState,
2305 target::SuspendState::payload_offset() - kHeapObjectTag);
2306 __ AddImmediate(kDstFrame, SPREG, num_saved_regs * target::kWordSize);
2307 __ CopyMemoryWords(kSrcFrame, kDstFrame, kFrameSize, kTemp);
2308 if (kDstFrame == CODE_REG) {
2309 __ PopRegister(CODE_REG);
2310 }
2311 if (kSrcFrame == THR) {
2312 __ PopRegister(THR);
2313 }
2314
2315 __ Comment("Transfer control");
2316
2317 __ LoadFieldFromOffset(kResumePc, kSuspendState,
2318 target::SuspendState::pc_offset());
2319 __ StoreZero(FieldAddress(kSuspendState, target::SuspendState::pc_offset()),
2320 kTemp);
2321
2322#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32)
2323 // Adjust resume PC to skip extra epilogue generated on x86
2324 // right after the call to suspend stub in order to maintain
2325 // call/return balance.
2326 __ AddImmediate(kResumePc, SuspendStubABI::kResumePcDistance);
2327#endif
2328
2329 static_assert((kException != CODE_REG) && (kException != PP),
2330 "should not interfere");
2331 __ LoadFromOffset(kException, FPREG, param_offset + 2 * target::kWordSize);
2332 __ CompareObject(kException, NullObject());
2333 __ BranchIf(NOT_EQUAL, &call_runtime);
2334
2335 if (!FLAG_precompiled_mode) {
2336 // Check if Code is disabled.
2337 __ LoadFieldFromOffset(kTemp, CODE_REG,
2338 target::Code::instructions_offset());
2339 __ CompareWithMemoryValue(
2340 kTemp,
2341 FieldAddress(CODE_REG, target::Code::active_instructions_offset()));
2342 __ BranchIf(NOT_EQUAL, &call_runtime);
2343
2344#if !defined(PRODUCT)
2345 // Check if there is a breakpoint at resumption.
2346 __ LoadIsolate(kTemp);
2347 __ LoadFromOffset(kTemp, kTemp,
2348 target::Isolate::has_resumption_breakpoints_offset(),
2350 __ CompareImmediate(kTemp, 0);
2351 __ BranchIf(NOT_EQUAL, &call_runtime);
2352#endif
2353 }
2354
2355 __ LoadFromOffset(CallingConventions::kReturnReg, FPREG,
2356 param_offset + 3 * target::kWordSize);
2357
2358 __ Jump(kResumePc);
2359
2360 __ Comment("Call runtime to throw exception or deopt");
2361 __ Bind(&call_runtime);
2362
2363 __ LoadFromOffset(kStackTrace, FPREG, param_offset + 1 * target::kWordSize);
2364 static_assert((kStackTrace != CODE_REG) && (kStackTrace != PP),
2365 "should not interfere");
2366
2367 // Set return address as if suspended Dart function called
2368 // stub with kResumePc as a return address.
2369 __ SetReturnAddress(kResumePc);
2370
2371 if (!FLAG_precompiled_mode) {
2372 __ LoadFromOffset(CODE_REG, THR, target::Thread::resume_stub_offset());
2373 }
2374#if !defined(TARGET_ARCH_IA32)
2375 __ set_constant_pool_allowed(false);
2376#endif
2377 __ EnterStubFrame();
2378 __ PushObject(NullObject()); // Make room for (unused) result.
2379 __ PushRegistersInOrder({kException, kStackTrace});
2380 __ CallRuntime(kResumeFrameRuntimeEntry, /*argument_count=*/2);
2381
2382 if (FLAG_precompiled_mode) {
2383 __ Breakpoint();
2384 } else {
2385 __ LeaveStubFrame();
2386 __ LoadFromOffset(CallingConventions::kReturnReg, FPREG,
2387 param_offset + 3 * target::kWordSize);
2388 // Lazy deoptimize.
2389 __ Ret();
2390 }
2391}
2392
2393void StubCodeCompiler::GenerateReturnStub(
2394 intptr_t return_entry_point_offset_in_thread,
2395 intptr_t return_function_offset_in_object_store,
2396 intptr_t return_stub_offset_in_thread) {
2397 const Register kSuspendState = ReturnStubABI::kSuspendStateReg;
2398
2399#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
2400 SPILLS_LR_TO_FRAME({}); // Simulate entering the caller (Dart) frame.
2401#endif
2402
2403 __ LoadFromOffset(kSuspendState, FPREG, SuspendStateFpOffset());
2404#ifdef DEBUG
2405 {
2406 Label okay;
2407 __ CompareObject(kSuspendState, NullObject());
2408 __ BranchIf(NOT_EQUAL, &okay);
2409 __ Breakpoint();
2410 __ Bind(&okay);
2411 }
2412#endif
2413 __ LeaveDartFrame();
2414 if (!FLAG_precompiled_mode) {
2415 __ LoadFromOffset(CODE_REG, THR, return_stub_offset_in_thread);
2416 }
2417 __ EnterStubFrame();
2418 __ PushRegistersInOrder({kSuspendState, CallingConventions::kReturnReg});
2419 CallDartCoreLibraryFunction(assembler, return_entry_point_offset_in_thread,
2420 return_function_offset_in_object_store);
2421 __ LeaveStubFrame();
2422 __ Ret();
2423}
2424
2425void StubCodeCompiler::GenerateReturnAsyncStub() {
2426 GenerateReturnStub(
2427 target::Thread::suspend_state_return_async_entry_point_offset(),
2428 target::ObjectStore::suspend_state_return_async_offset(),
2429 target::Thread::return_async_stub_offset());
2430}
2431
2432void StubCodeCompiler::GenerateReturnAsyncNotFutureStub() {
2433 GenerateReturnStub(
2434 target::Thread::
2435 suspend_state_return_async_not_future_entry_point_offset(),
2436 target::ObjectStore::suspend_state_return_async_not_future_offset(),
2437 target::Thread::return_async_not_future_stub_offset());
2438}
2439
2440void StubCodeCompiler::GenerateReturnAsyncStarStub() {
2441 GenerateReturnStub(
2442 target::Thread::suspend_state_return_async_star_entry_point_offset(),
2443 target::ObjectStore::suspend_state_return_async_star_offset(),
2444 target::Thread::return_async_star_stub_offset());
2445}
2446
2447void StubCodeCompiler::GenerateAsyncExceptionHandlerStub() {
2449 ASSERT(kSuspendState != kExceptionObjectReg);
2450 ASSERT(kSuspendState != kStackTraceObjectReg);
2451 Label rethrow_exception;
2452
2453#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
2454 SPILLS_LR_TO_FRAME({}); // Simulate entering the caller (Dart) frame.
2455#endif
2456
2457 __ LoadFromOffset(kSuspendState, FPREG, SuspendStateFpOffset());
2458
2459 // Check if suspend_state is initialized. Otherwise
2460 // exception was thrown from the prologue code and
2461 // should be synchronously propagated.
2462 __ CompareObject(kSuspendState, NullObject());
2463 __ BranchIf(EQUAL, &rethrow_exception);
2464
2465 __ LeaveDartFrame();
2466 if (!FLAG_precompiled_mode) {
2467 __ LoadFromOffset(CODE_REG, THR,
2468 target::Thread::async_exception_handler_stub_offset());
2469 }
2470 __ EnterStubFrame();
2471 __ PushRegistersInOrder(
2472 {kSuspendState, kExceptionObjectReg, kStackTraceObjectReg});
2474 assembler,
2475 target::Thread::suspend_state_handle_exception_entry_point_offset(),
2476 target::ObjectStore::suspend_state_handle_exception_offset());
2477 __ LeaveStubFrame();
2478 __ Ret();
2479
2480#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
2481 // Rethrow case is used when Dart frame is still on the stack.
2482 SPILLS_LR_TO_FRAME({});
2483#endif
2484 __ Comment("Rethrow exception");
2485 __ Bind(&rethrow_exception);
2486 __ LeaveDartFrame();
2487 if (!FLAG_precompiled_mode) {
2488 __ LoadFromOffset(CODE_REG, THR,
2489 target::Thread::async_exception_handler_stub_offset());
2490 }
2491 __ EnterStubFrame();
2492 __ PushObject(NullObject()); // Make room for (unused) result.
2493 __ PushRegistersInOrder({kExceptionObjectReg, kStackTraceObjectReg});
2494 __ PushImmediate(Smi::RawValue(0)); // Do not bypass debugger.
2495 __ CallRuntime(kReThrowRuntimeEntry, /*argument_count=*/3);
2496 __ Breakpoint();
2497}
2498
2499void StubCodeCompiler::GenerateCloneSuspendStateStub() {
2506 Label alloc_slow_case;
2507
2508#ifdef DEBUG
2509 {
2510 // Can only clone _SuspendState objects with copied frames.
2511 Label okay;
2512 __ LoadFieldFromOffset(kTemp, kSource, target::SuspendState::pc_offset());
2513 __ CompareImmediate(kTemp, 0);
2514 __ BranchIf(NOT_EQUAL, &okay);
2515 __ Breakpoint();
2516 __ Bind(&okay);
2517 }
2518#endif
2519
2520 __ LoadFieldFromOffset(kFrameSize, kSource,
2521 target::SuspendState::frame_size_offset());
2522
2523 GenerateAllocateSuspendState(assembler, &alloc_slow_case, kDestination,
2524 kFrameSize, kTemp);
2525
2526 // Copy pc.
2527 __ LoadFieldFromOffset(kTemp, kSource, target::SuspendState::pc_offset());
2528 __ StoreFieldToOffset(kTemp, kDestination, target::SuspendState::pc_offset());
2529
2530 // Copy function_data.
2531 __ LoadCompressedFieldFromOffset(
2532 kTemp, kSource, target::SuspendState::function_data_offset());
2533 __ StoreCompressedIntoObjectNoBarrier(
2534 kDestination,
2535 FieldAddress(kDestination, target::SuspendState::function_data_offset()),
2536 kTemp);
2537
2538 // Copy then_callback.
2539 __ LoadCompressedFieldFromOffset(
2540 kTemp, kSource, target::SuspendState::then_callback_offset());
2541 __ StoreCompressedIntoObjectNoBarrier(
2542 kDestination,
2543 FieldAddress(kDestination, target::SuspendState::then_callback_offset()),
2544 kTemp);
2545
2546 // Copy error_callback.
2547 __ LoadCompressedFieldFromOffset(
2548 kTemp, kSource, target::SuspendState::error_callback_offset());
2549 __ StoreCompressedIntoObjectNoBarrier(
2550 kDestination,
2551 FieldAddress(kDestination, target::SuspendState::error_callback_offset()),
2552 kTemp);
2553
2554 // Copy payload frame.
2555 if (kSrcFrame == THR) {
2556 __ PushRegister(THR);
2557 }
2558 const uword offset = target::SuspendState::payload_offset() - kHeapObjectTag;
2559 __ AddImmediate(kSrcFrame, kSource, offset);
2560 __ AddImmediate(kDstFrame, kDestination, offset);
2561 __ CopyMemoryWords(kSrcFrame, kDstFrame, kFrameSize, kTemp);
2562 if (kSrcFrame == THR) {
2563 __ PopRegister(THR);
2564 }
2565
2566 // Update value of :suspend_state variable in the copied frame
2567 // for the new SuspendState.
2568 __ LoadFieldFromOffset(kTemp, kDestination,
2569 target::SuspendState::frame_size_offset());
2570 __ AddRegisters(kTemp, kDestination);
2571 __ StoreFieldToOffset(
2572 kDestination, kTemp,
2573 target::SuspendState::payload_offset() + SuspendStateFpOffset());
2574
2575 __ MoveRegister(CallingConventions::kReturnReg, kDestination);
2577 __ Ret();
2578
2579 __ Bind(&alloc_slow_case);
2580 __ Comment("CloneSuspendState slow case");
2581 __ EnterStubFrame();
2582 __ PushObject(NullObject()); // Make space on stack for the return value.
2583 __ PushRegister(kSource);
2584 __ CallRuntime(kCloneSuspendStateRuntimeEntry, 1);
2585 __ Drop(1); // Drop argument
2586 __ PopRegister(CallingConventions::kReturnReg); // Get result.
2587 __ LeaveStubFrame();
2588 __ Ret();
2589}
2590
2591void StubCodeCompiler::GenerateFfiAsyncCallbackSendStub() {
2592 __ EnterStubFrame();
2593 __ PushObject(NullObject()); // Make space on stack for the return value.
2595 __ CallRuntime(kFfiAsyncCallbackSendRuntimeEntry, 1);
2596 __ Drop(1); // Drop argument.
2597 __ PopRegister(CallingConventions::kReturnReg); // Get result.
2598 __ LeaveStubFrame();
2599 __ Ret();
2600}
2601
2602void StubCodeCompiler::InsertBSSRelocation(BSS::Relocation reloc) {
2603 ASSERT(pc_descriptors_list_ != nullptr);
2604 const intptr_t pc_offset = assembler->InsertAlignedRelocation(reloc);
2605 pc_descriptors_list_->AddDescriptor(
2606 UntaggedPcDescriptors::kBSSRelocation, pc_offset,
2607 /*deopt_id=*/DeoptId::kNone,
2608 /*root_pos=*/TokenPosition::kNoSource,
2609 /*try_index=*/-1,
2611}
2612
2613#if !defined(TARGET_ARCH_IA32)
2615 int n,
2616 Register null_reg,
2617 Register cache_entry_reg,
2618 Register instance_cid_or_sig_reg,
2619 Register instance_type_args_reg,
2620 Register parent_fun_type_args_reg,
2621 Register delayed_type_args_reg,
2622 Label* found,
2623 Label* not_found,
2624 Label* next_iteration) {
2625 __ Comment("Loop");
2626 // LoadAcquireCompressed assumes the loaded value is a heap object and
2627 // extends it with the heap bits if compressed. However, the entry may be
2628 // a Smi.
2629 //
2630 // Instead, just use LoadAcquire to load the lower bits when compressed and
2631 // only compare the low bits of the loaded value using CompareObjectRegisters.
2632 __ LoadAcquireFromOffset(
2633 TypeTestABI::kScratchReg, cache_entry_reg,
2634 target::kCompressedWordSize *
2635 target::SubtypeTestCache::kInstanceCidOrSignature,
2636 kObjectBytes);
2637 __ CompareObjectRegisters(TypeTestABI::kScratchReg, null_reg);
2638 __ BranchIf(EQUAL, not_found, Assembler::kNearJump);
2639 __ CompareObjectRegisters(TypeTestABI::kScratchReg, instance_cid_or_sig_reg);
2640 if (n == 1) {
2641 __ BranchIf(EQUAL, found, Assembler::kNearJump);
2642 return;
2643 }
2644
2645 __ BranchIf(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2646 __ CompareWithMemoryValue(
2647 instance_type_args_reg,
2648 Address(cache_entry_reg,
2649 target::kCompressedWordSize *
2650 target::SubtypeTestCache::kInstanceTypeArguments),
2651 kObjectBytes);
2652 if (n == 2) {
2653 __ BranchIf(EQUAL, found, Assembler::kNearJump);
2654 return;
2655 }
2656
2657 __ BranchIf(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2658 __ CompareWithMemoryValue(
2660 Address(cache_entry_reg,
2661 target::kCompressedWordSize *
2662 target::SubtypeTestCache::kInstantiatorTypeArguments),
2663 kObjectBytes);
2664 if (n == 3) {
2665 __ BranchIf(EQUAL, found, Assembler::kNearJump);
2666 return;
2667 }
2668
2669 __ BranchIf(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2670 __ CompareWithMemoryValue(
2672 Address(cache_entry_reg,
2673 target::kCompressedWordSize *
2674 target::SubtypeTestCache::kFunctionTypeArguments),
2675 kObjectBytes);
2676 if (n == 4) {
2677 __ BranchIf(EQUAL, found, Assembler::kNearJump);
2678 return;
2679 }
2680
2681 __ BranchIf(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2682 __ CompareWithMemoryValue(
2683 parent_fun_type_args_reg,
2684 Address(
2685 cache_entry_reg,
2686 target::kCompressedWordSize *
2687 target::SubtypeTestCache::kInstanceParentFunctionTypeArguments),
2688 kObjectBytes);
2689 if (n == 5) {
2690 __ BranchIf(EQUAL, found, Assembler::kNearJump);
2691 return;
2692 }
2693
2694 __ BranchIf(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2695 __ CompareWithMemoryValue(
2696 delayed_type_args_reg,
2697 Address(
2698 cache_entry_reg,
2699 target::kCompressedWordSize *
2700 target::SubtypeTestCache::kInstanceDelayedFunctionTypeArguments),
2701 kObjectBytes);
2702 if (n == 6) {
2703 __ BranchIf(EQUAL, found, Assembler::kNearJump);
2704 return;
2705 }
2706
2707 __ BranchIf(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2708 __ CompareWithMemoryValue(
2710 Address(cache_entry_reg, target::kCompressedWordSize *
2711 target::SubtypeTestCache::kDestinationType),
2712 kObjectBytes);
2713 __ BranchIf(EQUAL, found, Assembler::kNearJump);
2714}
2715
2716// An object that uses RAII to load from and store to the stack when
2717// appropriate, allowing the code within that scope to act as if the given
2718// register is always provided. Either the Register value stored at [reg] must
2719// be a valid register (not kNoRegister) or [depth] must be a valid stack depth
2720// (not StackRegisterScope::kNoDepth).
2721//
2722// When the Register value stored at [reg] is a valid register, this scope
2723// generates no assembly and does not change the value stored at [reg].
2724//
2725// When [depth] is a valid stack depth, this scope object performs the
2726// following actions:
2727//
2728// On construction:
2729// * Generates assembly to load the value on the stack at [depth] into [alt].
2730// * Sets the Register value pointed to by [reg] to [alt].
2731//
2732// On destruction:
2733// * Generates assembly to store the value of [alt] into the stack at [depth].
2734// * Resets the Register value pointed to by [reg] to kNoRegister.
2736 public:
2738 Register* reg,
2739 intptr_t depth,
2740 Register alt = TMP)
2741 : assembler(assembler), reg_(reg), depth_(depth), alt_(alt) {
2742 if (depth_ != kNoDepth) {
2743 ASSERT(depth_ >= 0);
2744 ASSERT(*reg_ == kNoRegister);
2745 ASSERT(alt_ != kNoRegister);
2746 __ LoadFromStack(alt_, depth_);
2747 *reg_ = alt_;
2748 } else {
2749 ASSERT(*reg_ != kNoRegister);
2750 }
2751 }
2752
2754 if (depth_ != kNoDepth) {
2755 __ StoreToStack(alt_, depth_);
2756 *reg_ = kNoRegister;
2757 }
2758 }
2759
2760 static constexpr intptr_t kNoDepth = kIntptrMin;
2761
2762 private:
2763 Assembler* const assembler;
2764 Register* const reg_;
2765 const intptr_t depth_;
2766 const Register alt_;
2767};
2768
2769// Same inputs as StubCodeCompiler::GenerateSubtypeTestCacheSearch with
2770// the following additional requirements:
2771// - catch_entry_reg: the address of the backing array for the cache.
2772// - TypeTestABI::kScratchReg: the Smi value of the length field for the
2773// backing array in cache_entry_reg
2774//
2775// Also expects that all the STC entry input registers have been filled.
2777 Assembler* assembler,
2778 int n,
2779 Register null_reg,
2780 Register cache_entry_reg,
2781 Register instance_cid_or_sig_reg,
2782 Register instance_type_args_reg,
2783 Register parent_fun_type_args_reg,
2784 Register delayed_type_args_reg,
2785 Register cache_entry_end_reg,
2786 Register cache_contents_size_reg,
2787 Register probe_distance_reg,
2789 const StubCodeCompiler::STCSearchExitGenerator& gen_not_found) {
2790 // Since the test entry size is a power of 2, we can use shr to divide.
2791 const intptr_t kTestEntryLengthLog2 =
2792 Utils::ShiftForPowerOfTwo(target::SubtypeTestCache::kTestEntryLength);
2793
2794 // Before we finish calculating the initial probe entry, we'll need the
2795 // starting cache entry and the number of entries. We'll store these in
2796 // [cache_contents_size_reg] and [probe_distance_reg] (or their equivalent
2797 // stack slots), respectively.
2798 __ Comment("Hash cache traversal");
2799 __ Comment("Calculating number of entries");
2800 // The array length is a Smi so it needs to be untagged.
2801 __ SmiUntag(TypeTestABI::kScratchReg);
2802 __ LsrImmediate(TypeTestABI::kScratchReg, kTestEntryLengthLog2);
2803 if (probe_distance_reg != kNoRegister) {
2804 __ MoveRegister(probe_distance_reg, TypeTestABI::kScratchReg);
2805 } else {
2806 __ PushRegister(TypeTestABI::kScratchReg);
2807 }
2808
2809 __ Comment("Calculating starting entry address");
2810 __ AddImmediate(cache_entry_reg,
2811 target::Array::data_offset() - kHeapObjectTag);
2812 if (cache_contents_size_reg != kNoRegister) {
2813 __ MoveRegister(cache_contents_size_reg, cache_entry_reg);
2814 } else {
2815 __ PushRegister(cache_entry_reg);
2816 }
2817
2818 __ Comment("Calculating end of entries address");
2819 __ LslImmediate(TypeTestABI::kScratchReg,
2820 kTestEntryLengthLog2 + target::kCompressedWordSizeLog2);
2821 __ AddRegisters(TypeTestABI::kScratchReg, cache_entry_reg);
2822 if (cache_entry_end_reg != kNoRegister) {
2823 __ MoveRegister(cache_entry_end_reg, TypeTestABI::kScratchReg);
2824 } else {
2825 __ PushRegister(TypeTestABI::kScratchReg);
2826 }
2827
2828 // At this point, the stack is in the following order, if the corresponding
2829 // value doesn't have a register assignment:
2830 // <number of total entries in cache array>
2831 // <cache array entries start>
2832 // <cache array entries end>
2833 // --------- top of stack
2834 //
2835 // and after calculating the initial entry, we'll replace them as follows:
2836 // <probe distance>
2837 // <-cache array contents size> (note this is _negative_)
2838 // <cache array entries end>
2839 // ---------- top of stack
2840 //
2841 // So name them according to their later use.
2842 intptr_t kProbeDistanceDepth = StackRegisterScope::kNoDepth;
2843 intptr_t kHashStackElements = 0;
2844 if (probe_distance_reg == kNoRegister) {
2845 kProbeDistanceDepth = 0;
2846 kHashStackElements++;
2847 }
2848 intptr_t kCacheContentsSizeDepth = StackRegisterScope::kNoDepth;
2849 if (cache_contents_size_reg == kNoRegister) {
2850 kProbeDistanceDepth++;
2851 kHashStackElements++;
2852 kCacheContentsSizeDepth = 0;
2853 }
2854 intptr_t kCacheArrayEndDepth = StackRegisterScope::kNoDepth;
2855 if (cache_entry_end_reg == kNoRegister) {
2856 kProbeDistanceDepth++;
2857 kCacheContentsSizeDepth++;
2858 kHashStackElements++;
2859 kCacheArrayEndDepth = 0;
2860 }
2861
2862 // After this point, any exits should go through one of these two labels,
2863 // which will pop the extra stack elements pushed above.
2864 Label found, not_found;
2865
2866 // When retrieving hashes from objects below, note that a hash of 0 means
2867 // the hash hasn't been computed yet and we need to go to runtime.
2868 auto get_abstract_type_hash = [&](Register dst, Register src,
2869 const char* name) {
2870 ASSERT(dst != kNoRegister);
2871 ASSERT(src != kNoRegister);
2872 __ Comment("Loading %s type hash", name);
2873 __ LoadFromSlot(dst, src, Slot::AbstractType_hash());
2874 __ SmiUntag(dst);
2875 __ CompareImmediate(dst, 0);
2876 __ BranchIf(EQUAL, &not_found);
2877 };
2878 auto get_type_arguments_hash = [&](Register dst, Register src,
2879 const char* name) {
2880 ASSERT(dst != kNoRegister);
2881 ASSERT(src != kNoRegister);
2882 Label done;
2883 __ Comment("Loading %s type arguments hash", name);
2884 // Preload the hash value for TypeArguments::null() so control can jump
2885 // to done if null.
2886 __ LoadImmediate(dst, TypeArguments::kAllDynamicHash);
2887 __ CompareRegisters(src, null_reg);
2888 __ BranchIf(EQUAL, &done, Assembler::kNearJump);
2889 __ LoadFromSlot(dst, src, Slot::TypeArguments_hash());
2890 __ SmiUntag(dst);
2891 __ CompareImmediate(dst, 0);
2892 __ BranchIf(EQUAL, &not_found);
2893 __ Bind(&done);
2894 };
2895
2896 __ Comment("Hash the entry inputs");
2897 {
2898 Label done;
2899 // Assume a Smi tagged instance cid to avoid a branch in the common case.
2900 __ MoveRegister(cache_entry_reg, instance_cid_or_sig_reg);
2901 __ SmiUntag(cache_entry_reg);
2902 __ BranchIfSmi(instance_cid_or_sig_reg, &done, Assembler::kNearJump);
2903 get_abstract_type_hash(cache_entry_reg, instance_cid_or_sig_reg,
2904 "closure signature");
2905 __ Bind(&done);
2906 }
2907 if (n >= 7) {
2908 get_abstract_type_hash(TypeTestABI::kScratchReg, TypeTestABI::kDstTypeReg,
2909 "destination");
2910 __ CombineHashes(cache_entry_reg, TypeTestABI::kScratchReg);
2911 }
2912 if (n >= 6) {
2913 get_type_arguments_hash(TypeTestABI::kScratchReg, delayed_type_args_reg,
2914 "delayed");
2915 __ CombineHashes(cache_entry_reg, TypeTestABI::kScratchReg);
2916 }
2917 if (n >= 5) {
2918 get_type_arguments_hash(TypeTestABI::kScratchReg, parent_fun_type_args_reg,
2919 "parent function");
2920 __ CombineHashes(cache_entry_reg, TypeTestABI::kScratchReg);
2921 }
2922 if (n >= 4) {
2923 get_type_arguments_hash(TypeTestABI::kScratchReg,
2925 __ CombineHashes(cache_entry_reg, TypeTestABI::kScratchReg);
2926 }
2927 if (n >= 3) {
2928 get_type_arguments_hash(TypeTestABI::kScratchReg,
2930 "instantiator");
2931 __ CombineHashes(cache_entry_reg, TypeTestABI::kScratchReg);
2932 }
2933 if (n >= 2) {
2934 get_type_arguments_hash(TypeTestABI::kScratchReg, instance_type_args_reg,
2935 "instance");
2936 __ CombineHashes(cache_entry_reg, TypeTestABI::kScratchReg);
2937 }
2938 __ FinalizeHash(cache_entry_reg);
2939
2940 // This requires the number of entries in a hash cache to be a power of 2.
2941 __ Comment("Converting hash to probe entry index");
2942 {
2943 StackRegisterScope scope(assembler, &probe_distance_reg,
2944 kProbeDistanceDepth, TypeTestABI::kScratchReg);
2945 // The entry count is not needed after this point; create the mask in place.
2946 __ AddImmediate(probe_distance_reg, -1);
2947 __ AndRegisters(cache_entry_reg, probe_distance_reg);
2948 // Now set the register to the initial probe distance in words.
2949 __ Comment("Set initial probe distance");
2950 __ LoadImmediate(probe_distance_reg,
2951 target::kCompressedWordSize *
2952 target::SubtypeTestCache::kTestEntryLength);
2953 }
2954
2955 // Now cache_entry_reg is the starting probe entry index.
2956 __ Comment("Converting probe entry index to probe entry address");
2957 {
2958 StackRegisterScope scope(assembler, &cache_contents_size_reg,
2959 kCacheContentsSizeDepth, TypeTestABI::kScratchReg);
2960 __ LslImmediate(cache_entry_reg,
2961 kTestEntryLengthLog2 + target::kCompressedWordSizeLog2);
2962 __ AddRegisters(cache_entry_reg, cache_contents_size_reg);
2963 // Now set the register to the negated size of the cache contents in words.
2964 __ Comment("Set negated cache contents size");
2965 if (cache_entry_end_reg != kNoRegister) {
2966 __ SubRegisters(cache_contents_size_reg, cache_entry_end_reg);
2967 } else {
2968 __ LoadFromStack(TMP, kCacheArrayEndDepth);
2969 __ SubRegisters(cache_contents_size_reg, TMP);
2970 }
2971 }
2972
2973 Label loop, next_iteration;
2974 __ Bind(&loop);
2976 assembler, n, null_reg, cache_entry_reg, instance_cid_or_sig_reg,
2977 instance_type_args_reg, parent_fun_type_args_reg, delayed_type_args_reg,
2978 &found, &not_found, &next_iteration);
2979 __ Bind(&next_iteration);
2980 __ Comment("Move to next entry");
2981 {
2982 StackRegisterScope scope(assembler, &probe_distance_reg,
2983 kProbeDistanceDepth, TypeTestABI::kScratchReg);
2984 __ AddRegisters(cache_entry_reg, probe_distance_reg);
2985 __ Comment("Adjust probe distance");
2986 __ AddImmediate(probe_distance_reg,
2987 target::kCompressedWordSize *
2988 target::SubtypeTestCache::kTestEntryLength);
2989 }
2990 __ Comment("Check for leaving array");
2991 // Make sure we haven't run off the array.
2992 if (cache_entry_end_reg != kNoRegister) {
2993 __ CompareRegisters(cache_entry_reg, cache_entry_end_reg);
2994 } else {
2995 __ CompareToStack(cache_entry_reg, kCacheArrayEndDepth);
2996 }
2997 __ BranchIf(LESS, &loop, Assembler::kNearJump);
2998 __ Comment("Wrap around to start of entries");
2999 // Add the negated size of the cache contents.
3000 if (cache_contents_size_reg != kNoRegister) {
3001 __ AddRegisters(cache_entry_reg, cache_contents_size_reg);
3002 } else {
3003 __ LoadFromStack(TypeTestABI::kScratchReg, kCacheContentsSizeDepth);
3004 __ AddRegisters(cache_entry_reg, TypeTestABI::kScratchReg);
3005 }
3006 __ Jump(&loop, Assembler::kNearJump);
3007
3008 __ Bind(&found);
3009 __ Comment("Hash found");
3010 __ Drop(kHashStackElements);
3011 gen_found(assembler, n);
3012 __ Bind(&not_found);
3013 __ Comment("Hash not found");
3014 __ Drop(kHashStackElements);
3015 gen_not_found(assembler, n);
3016}
3017
3018// Same inputs as StubCodeCompiler::GenerateSubtypeTestCacheSearch with
3019// the following additional requirement:
3020// - catch_entry_reg: the address of the backing array for the cache.
3021//
3022// Also expects that all the STC entry input registers have been filled.
3024 Assembler* assembler,
3025 int n,
3026 Register null_reg,
3027 Register cache_entry_reg,
3028 Register instance_cid_or_sig_reg,
3029 Register instance_type_args_reg,
3030 Register parent_fun_type_args_reg,
3031 Register delayed_type_args_reg,
3033 const StubCodeCompiler::STCSearchExitGenerator& gen_not_found) {
3034 __ Comment("Linear cache traversal");
3035 __ AddImmediate(cache_entry_reg,
3036 target::Array::data_offset() - kHeapObjectTag);
3037
3038 Label found, not_found, loop, next_iteration;
3039 __ Bind(&loop);
3041 assembler, n, null_reg, cache_entry_reg, instance_cid_or_sig_reg,
3042 instance_type_args_reg, parent_fun_type_args_reg, delayed_type_args_reg,
3043 &found, &not_found, &next_iteration);
3044 __ Bind(&next_iteration);
3045 __ Comment("Next iteration");
3046 __ AddImmediate(
3047 cache_entry_reg,
3048 target::kCompressedWordSize * target::SubtypeTestCache::kTestEntryLength);
3049 __ Jump(&loop, Assembler::kNearJump);
3050
3051 __ Bind(&found);
3052 __ Comment("Linear found");
3053 gen_found(assembler, n);
3054 __ Bind(&not_found);
3055 __ Comment("Linear not found");
3056 gen_not_found(assembler, n);
3057}
3058
3059void StubCodeCompiler::GenerateSubtypeTestCacheSearch(
3060 Assembler* assembler,
3061 int n,
3062 Register null_reg,
3063 Register cache_entry_reg,
3064 Register instance_cid_or_sig_reg,
3065 Register instance_type_args_reg,
3066 Register parent_fun_type_args_reg,
3067 Register delayed_type_args_reg,
3068 Register cache_entry_end_reg,
3069 Register cache_contents_size_reg,
3070 Register probe_distance_reg,
3072 const StubCodeCompiler::STCSearchExitGenerator& gen_not_found) {
3073#if defined(DEBUG)
3074 RegisterSet input_regs;
3075 ASSERT(null_reg != kNoRegister);
3076 input_regs.AddRegister(null_reg);
3077 ASSERT(cache_entry_reg != kNoRegister);
3078 ASSERT(!input_regs.ContainsRegister(cache_entry_reg));
3079 input_regs.AddRegister(cache_entry_reg);
3080 ASSERT(instance_cid_or_sig_reg != kNoRegister);
3081 ASSERT(!input_regs.ContainsRegister(instance_cid_or_sig_reg));
3082 input_regs.AddRegister(instance_cid_or_sig_reg);
3083 if (n >= 2) {
3084 ASSERT(instance_type_args_reg != kNoRegister);
3085 ASSERT(!input_regs.ContainsRegister(instance_type_args_reg));
3086 input_regs.AddRegister(instance_type_args_reg);
3087 }
3088 if (n >= 5) {
3089 ASSERT(parent_fun_type_args_reg != kNoRegister);
3090 ASSERT(!input_regs.ContainsRegister(parent_fun_type_args_reg));
3091 input_regs.AddRegister(parent_fun_type_args_reg);
3092 }
3094 if (n >= 6) {
3095 ASSERT(delayed_type_args_reg != kNoRegister);
3096 ASSERT(!input_regs.ContainsRegister(delayed_type_args_reg));
3097 input_regs.AddRegister(delayed_type_args_reg);
3098 }
3099 if (cache_entry_end_reg != kNoRegister) {
3100 ASSERT(!input_regs.ContainsRegister(cache_entry_end_reg));
3101 input_regs.AddRegister(cache_entry_end_reg);
3102 }
3103 if (cache_contents_size_reg != kNoRegister) {
3104 ASSERT(!input_regs.ContainsRegister(cache_contents_size_reg));
3105 input_regs.AddRegister(cache_contents_size_reg);
3106 }
3107 if (probe_distance_reg != kNoRegister) {
3108 ASSERT(!input_regs.ContainsRegister(probe_distance_reg));
3109 input_regs.AddRegister(probe_distance_reg);
3110 }
3111 // We can allow the use of the registers below only if we're not expecting
3112 // them as an inspected input.
3113 if (n >= 3) {
3114 ASSERT(!input_regs.ContainsRegister(
3116 }
3117 if (n >= 4) {
3118 ASSERT(
3120 }
3121 if (n >= 7) {
3123 }
3124 // We use this as a scratch, so it has to be distinct from the others.
3126
3127 // Verify the STC we received has exactly as many inputs as this stub expects.
3128 Label search_stc;
3130 Slot::SubtypeTestCache_num_inputs());
3131 __ CompareImmediate(TypeTestABI::kScratchReg, n);
3132 __ BranchIf(EQUAL, &search_stc, Assembler::kNearJump);
3133 __ Breakpoint();
3134 __ Bind(&search_stc);
3135#endif
3136
3137 __ LoadAcquireCompressedFromOffset(
3138 cache_entry_reg, TypeTestABI::kSubtypeTestCacheReg,
3139 target::SubtypeTestCache::cache_offset() - kHeapObjectTag);
3140
3141 // Fill in all the STC input registers.
3142 Label initialized, not_closure;
3143 if (n >= 3) {
3144 __ LoadClassIdMayBeSmi(instance_cid_or_sig_reg, TypeTestABI::kInstanceReg);
3145 } else {
3146 // If the type is fully instantiated, then it can be determined at compile
3147 // time whether Smi is a subtype of the type or not. Thus, this code should
3148 // never be called with a Smi instance.
3149 __ LoadClassId(instance_cid_or_sig_reg, TypeTestABI::kInstanceReg);
3150 }
3151 __ CompareImmediate(instance_cid_or_sig_reg, kClosureCid);
3152 __ BranchIf(NOT_EQUAL, &not_closure, Assembler::kNearJump);
3153
3154 // Closure handling.
3155 {
3156 __ Comment("Closure");
3157 __ LoadCompressed(instance_cid_or_sig_reg,
3158 FieldAddress(TypeTestABI::kInstanceReg,
3159 target::Closure::function_offset()));
3160 __ LoadCompressed(instance_cid_or_sig_reg,
3161 FieldAddress(instance_cid_or_sig_reg,
3162 target::Function::signature_offset()));
3163 if (n >= 2) {
3164 __ LoadCompressed(
3165 instance_type_args_reg,
3166 FieldAddress(TypeTestABI::kInstanceReg,
3167 target::Closure::instantiator_type_arguments_offset()));
3168 }
3169 if (n >= 5) {
3170 __ LoadCompressed(
3171 parent_fun_type_args_reg,
3172 FieldAddress(TypeTestABI::kInstanceReg,
3173 target::Closure::function_type_arguments_offset()));
3174 }
3175 if (n >= 6) {
3176 __ LoadCompressed(
3177 delayed_type_args_reg,
3178 FieldAddress(TypeTestABI::kInstanceReg,
3179 target::Closure::delayed_type_arguments_offset()));
3180 }
3181
3182 __ Jump(&initialized, Assembler::kNearJump);
3183 }
3184
3185 // Non-Closure handling.
3186 {
3187 __ Comment("Non-Closure");
3188 __ Bind(&not_closure);
3189 if (n >= 2) {
3190 Label has_no_type_arguments;
3191 __ LoadClassById(TypeTestABI::kScratchReg, instance_cid_or_sig_reg);
3192 __ MoveRegister(instance_type_args_reg, null_reg);
3193 __ LoadFieldFromOffset(
3195 target::Class::host_type_arguments_field_offset_in_words_offset(),
3196 kFourBytes);
3197 __ CompareImmediate(TypeTestABI::kScratchReg,
3198 target::Class::kNoTypeArguments, kFourBytes);
3199 __ BranchIf(EQUAL, &has_no_type_arguments, Assembler::kNearJump);
3200 __ LoadIndexedCompressed(instance_type_args_reg,
3203 __ Bind(&has_no_type_arguments);
3204 __ Comment("No type arguments");
3205 }
3206 __ SmiTag(instance_cid_or_sig_reg);
3207 if (n >= 5) {
3208 __ MoveRegister(parent_fun_type_args_reg, null_reg);
3209 }
3210 if (n >= 6) {
3211 __ MoveRegister(delayed_type_args_reg, null_reg);
3212 }
3213 }
3214
3215 __ Bind(&initialized);
3216 // There is a maximum size for linear caches that is smaller than the size
3217 // of any hash-based cache, so we check the size of the backing array to
3218 // determine if this is a linear or hash-based cache.
3219 //
3220 // We load it into TypeTestABI::kScratchReg as the hash search code expects
3221 // it there.
3222 Label is_hash;
3223 __ LoadFromSlot(TypeTestABI::kScratchReg, cache_entry_reg,
3224 Slot::Array_length());
3225 __ CompareImmediate(TypeTestABI::kScratchReg,
3227 __ BranchIf(GREATER, &is_hash);
3228
3230 assembler, n, null_reg, cache_entry_reg, instance_cid_or_sig_reg,
3231 instance_type_args_reg, parent_fun_type_args_reg, delayed_type_args_reg,
3232 gen_found, gen_not_found);
3233
3234 __ Bind(&is_hash);
3236 assembler, n, null_reg, cache_entry_reg, instance_cid_or_sig_reg,
3237 instance_type_args_reg, parent_fun_type_args_reg, delayed_type_args_reg,
3238 cache_entry_end_reg, cache_contents_size_reg, probe_distance_reg,
3239 gen_found, gen_not_found);
3240}
3241#endif
3242
3243// See comment on [GenerateSubtypeNTestCacheStub].
3244void StubCodeCompiler::GenerateSubtype1TestCacheStub() {
3245 GenerateSubtypeNTestCacheStub(assembler, 1);
3246}
3247
3248// See comment on [GenerateSubtypeNTestCacheStub].
3249void StubCodeCompiler::GenerateSubtype2TestCacheStub() {
3250 GenerateSubtypeNTestCacheStub(assembler, 2);
3251}
3252
3253// See comment on [GenerateSubtypeNTestCacheStub].
3254void StubCodeCompiler::GenerateSubtype3TestCacheStub() {
3255 GenerateSubtypeNTestCacheStub(assembler, 3);
3256}
3257
3258// See comment on [GenerateSubtypeNTestCacheStub].
3259void StubCodeCompiler::GenerateSubtype4TestCacheStub() {
3260 GenerateSubtypeNTestCacheStub(assembler, 4);
3261}
3262
3263// See comment on [GenerateSubtypeNTestCacheStub].
3264void StubCodeCompiler::GenerateSubtype6TestCacheStub() {
3265 GenerateSubtypeNTestCacheStub(assembler, 6);
3266}
3267
3268// See comment on [GenerateSubtypeNTestCacheStub].
3269void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
3270 GenerateSubtypeNTestCacheStub(assembler, 7);
3271}
3272
3273} // namespace compiler
3274
3275} // namespace dart
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
static float next(float f)
#define __
#define CLASS_LIST_TYPED_DATA(V)
Definition class_id.h:137
static constexpr int shift()
Definition bitfield.h:161
static const Register ArgumentRegisters[]
static constexpr Register kReturnReg
static constexpr intptr_t kNone
Definition deopt_id.h:27
void AddDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t pc_offset, intptr_t deopt_id, TokenPosition token_pos, intptr_t try_index, intptr_t yield_index)
static ObjectPtr null()
Definition object.h:433
static Object & Handle()
Definition object.h:407
static Object & ZoneHandle()
Definition object.h:419
static RecordShape ForUnnamed(intptr_t num_fields)
Definition object.h:11282
void AddRegister(Register reg, Representation rep=kTagged)
Definition locations.h:750
bool Contains(Location loc)
Definition locations.h:774
bool ContainsRegister(Register reg) const
Definition locations.h:800
static SmiPtr New(intptr_t value)
Definition object.h:9985
static intptr_t RawValue(intptr_t value)
Definition object.h:10001
static constexpr intptr_t kMaxLinearCacheSize
Definition object.h:7883
static constexpr intptr_t kSuspendStateVarIndex
Definition object.h:12591
static Thread * Current()
Definition thread.h:361
static constexpr intptr_t kMaxLinearCacheSize
Definition object.h:8930
static constexpr intptr_t kAllDynamicHash
Definition object.h:8549
static constexpr intptr_t kInvalidYieldIndex
static constexpr int ShiftForPowerOfTwo(T x)
Definition utils.h:66
intptr_t InsertAlignedRelocation(BSS::Relocation reloc)
void StoreUnboxedDouble(FpuRegister src, Register base, int32_t offset)
void StoreUnboxedSimd128(FpuRegister src, Register base, int32_t offset)
StackRegisterScope(Assembler *assembler, Register *reg, intptr_t depth, Register alt=TMP)
std::function< void(Assembler *, int)> STCSearchExitGenerator
void GenerateAllocationStubForClass(UnresolvedPcRelativeCalls *unresolved_calls, const Class &cls, const dart::Code &allocate_object, const dart::Code &allocat_object_parametrized)
static intptr_t WordOffsetFromFpToCpuRegister(Register cpu_register)
#define ASSERT(E)
uint32_t * target
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
word ToRawSmi(const dart::Object &a)
bool WillAllocateNewOrRememberedObject(intptr_t instance_size)
static void InvokeTypeCheckFromTypeTestStub(Assembler *assembler, TypeCheckMode mode)
static intptr_t SuspendStateFpOffset()
static void CallDartCoreLibraryFunction(Assembler *assembler, intptr_t entry_point_offset_in_thread, intptr_t function_offset_in_object_store, bool uses_args_desc=false)
static void EnsureIsTypeOrFunctionTypeOrTypeParameter(Assembler *assembler, Register type_reg, Register scratch_reg)
static void BuildInstantiateTypeRuntimeCall(Assembler *assembler)
const Class & Float64x2Class()
static void GenerateSubtypeTestCacheHashSearch(Assembler *assembler, int n, Register null_reg, Register cache_entry_reg, Register instance_cid_or_sig_reg, Register instance_type_args_reg, Register parent_fun_type_args_reg, Register delayed_type_args_reg, Register cache_entry_end_reg, Register cache_contents_size_reg, Register probe_distance_reg, const StubCodeCompiler::STCSearchExitGenerator &gen_found, const StubCodeCompiler::STCSearchExitGenerator &gen_not_found)
const Class & Float32x4Class()
static void GenerateBoxFpuValueStub(Assembler *assembler, const dart::Class &cls, const RuntimeEntry &runtime_entry, void(Assembler::*store_value)(FpuRegister, Register, int32_t))
const Array & ArgumentsDescriptorBoxed(intptr_t type_args_len, intptr_t num_arguments)
const Code & StubCodeSubtype2TestCache()
const Bool & TrueObject()
const Code & StubCodeSubtype6TestCache()
const Code & StubCodeSubtype7TestCache()
const Code & StubCodeSubtype3TestCache()
const Object & SentinelObject()
static void GenerateAllocateSuspendState(Assembler *assembler, Label *slow_case, Register result_reg, Register frame_size_reg, Register temp_reg)
const Object & NullObject()
const Class & DoubleClass()
static void BuildTypeParameterTypeTestStub(Assembler *assembler, bool allow_null)
static void GenerateSubtypeTestCacheLinearSearch(Assembler *assembler, int n, Register null_reg, Register cache_entry_reg, Register instance_cid_or_sig_reg, Register instance_type_args_reg, Register parent_fun_type_args_reg, Register delayed_type_args_reg, const StubCodeCompiler::STCSearchExitGenerator &gen_found, const StubCodeCompiler::STCSearchExitGenerator &gen_not_found)
const Object & EmptyTypeArguments()
const Code & StubCodeSubtype4TestCache()
static void GenerateSubtypeTestCacheLoopBody(Assembler *assembler, int n, Register null_reg, Register cache_entry_reg, Register instance_cid_or_sig_reg, Register instance_type_args_reg, Register parent_fun_type_args_reg, Register delayed_type_args_reg, Label *found, Label *not_found, Label *next_iteration)
static void BuildInstantiateTypeParameterStub(Assembler *assembler, Nullability nullability, bool is_function_parameter)
@ TIMES_COMPRESSED_HALF_WORD_SIZE
const Register THR
const char *const name
static constexpr int kSavedCallerPcSlotFromFp
const Register kExceptionObjectReg
Nullability
Definition object.h:1112
const Register NULL_REG
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
Definition hash.h:12
constexpr intptr_t kIntptrMin
Definition globals.h:556
static constexpr int kSavedCallerFpSlotFromFp
@ kVoidCid
Definition class_id.h:254
@ kDynamicCid
Definition class_id.h:253
@ kHeapObjectTag
uintptr_t uword
Definition globals.h:501
const Register CODE_REG
@ UNSIGNED_GREATER
@ UNSIGNED_GREATER_EQUAL
@ UNSIGNED_LESS
const Register ARGS_DESC_REG
@ kNumberOfCpuRegisters
@ kNoRegister
static constexpr int kCallerSpSlotFromFp
constexpr RegList kDartAvailableCpuRegs
const Register TMP
const Register FPREG
uint32_t FinalizeHash(uint32_t hash, intptr_t hashbits=kBitsPerInt32)
Definition hash.h:20
@ kTypeCheckFromLazySpecializeStub
@ kTypeCheckFromInline
@ kTypeCheckFromSlowStub
const Register FUNCTION_REG
static constexpr intptr_t kAllocationRedZoneSize
Definition page.h:41
const Register PP
const Register kStackTraceObjectReg
const Register SPREG
dst
Definition cp.py:12
Point offset
static constexpr Register kFunctionReg
static constexpr Register kContextReg
static constexpr Register kResultReg
static constexpr Register kInstantiatorTypeArgsReg
static constexpr Register kScratchReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTagsReg
static constexpr Register kShapeReg
static constexpr Register kResultReg
static constexpr Register kTemp1Reg
static constexpr Register kTemp2Reg
static constexpr Register kResultReg
static constexpr Register kShapeReg
static constexpr Register kValue2Reg
static constexpr Register kValue0Reg
static constexpr Register kTempReg
static constexpr Register kValue1Reg
static constexpr Register kDstNameReg
static constexpr intptr_t kFunctionTAVSlotFromFp
static constexpr intptr_t kDstTypeSlotFromFp
static constexpr Register kSubtypeTestReg
static constexpr intptr_t kInstanceSlotFromFp
static constexpr intptr_t kInstantiatorTAVSlotFromFp
static constexpr Register kObjectReg
static constexpr Register kSubTypeReg
static constexpr Register kSuperTypeReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kDstNameReg
static constexpr Register kSuspendStateReg
static constexpr Register kTempReg
static constexpr Register kResultReg
static constexpr FpuRegister kValueReg
static constexpr Register kDestinationReg
static constexpr Register kSrcFrameReg
static constexpr Register kFrameSizeReg
static constexpr Register kSourceReg
static constexpr Register kTempReg
static constexpr Register kDstFrameReg
static constexpr Register kResultReg
static constexpr Register kRecognizedKindReg
static constexpr FpuRegister kInputReg
static constexpr Register kArgsReg
static constexpr Register kFieldReg
static constexpr Register kResultReg
static constexpr Register kInstanceReg
static constexpr Register kAddressReg
static constexpr Register kScratchReg
static constexpr Register kAddressReg
static constexpr Register kScratchReg
static constexpr Register kResultReg
static constexpr Register kFieldReg
static constexpr Register kTypeArgsReg
static constexpr Register kEntryStartReg
static constexpr intptr_t kSavedRegisters
static constexpr Register kCurrentEntryIndexReg
static constexpr Register kProbeMaskReg
static constexpr Register kProbeDistanceReg
static constexpr Register kFunctionTypeArgumentsReg
Definition constants.h:38
static constexpr Register kTypeReg
Definition constants.h:34
static constexpr Register kScratchReg
Definition constants.h:41
static constexpr Register kInstantiatorTypeArgumentsReg
Definition constants.h:36
static constexpr Register kResultTypeReg
Definition constants.h:40
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kScratchReg
static constexpr Register kUninstantiatedTypeArgumentsReg
static constexpr Register kResultTypeArgumentsReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kFieldReg
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment
static constexpr Register kStackTraceReg
static constexpr Register kExceptionReg
static constexpr Register kSrcFrameReg
static constexpr Register kDstFrameReg
static constexpr Register kFrameSizeReg
static constexpr Register kSuspendStateReg
static constexpr Register kExceptionReg
static constexpr Register kTempReg
static constexpr Register kResumePcReg
static constexpr Register kStackTraceReg
static constexpr Register kSuspendStateReg
static constexpr Register kSrcFrameReg
static constexpr Register kFunctionDataReg
static constexpr Register kSuspendStateReg
static constexpr intptr_t kResumePcDistance
static constexpr Register kTempReg
static constexpr Register kArgumentReg
static constexpr Register kDstFrameReg
static constexpr Register kTypeArgsReg
static constexpr Register kFrameSizeReg
static constexpr Register kExceptionReg
static constexpr Register kSubtypeTestCacheReg
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kSubtypeTestCacheResultReg
static constexpr Register kScratchReg
static constexpr Register kInstanceOfResultReg
#define TYPED_DATA_ALLOCATION_STUB(clazz)
#define EMIT_BOX_ALLOCATION(Name)
#define VM_TYPE_TESTING_STUB_CODE_LIST(V)
#define NOT_IN_PRODUCT(code)
Definition globals.h:84