Flutter Engine
The Flutter Engine
stub_code_compiler.cc
Go to the documentation of this file.
1// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6#include "vm/flags.h"
7#include "vm/globals.h"
8
9// For `StubCodeCompiler::GenerateAllocateUnhandledExceptionStub`
11
12#define SHOULD_NOT_INCLUDE_RUNTIME
13
15
16#include "vm/code_descriptors.h"
20#include "vm/stack_frame.h"
21
22#define __ assembler->
23
24namespace dart {
25namespace compiler {
26
28 Register cpu_register) {
30
31 intptr_t slots_from_fp = target::frame_layout.param_end_from_fp + 1;
32 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
33 Register reg = static_cast<Register>(i);
34 if (reg == cpu_register) break;
36 slots_from_fp++;
37 }
38 }
39 return slots_from_fp;
40}
41
42void StubCodeCompiler::GenerateInitStaticFieldStub() {
43 __ EnterStubFrame();
44 __ PushObject(NullObject()); // Make room for result.
46 __ CallRuntime(kInitStaticFieldRuntimeEntry, /*argument_count=*/1);
47 __ Drop(1);
49 __ LeaveStubFrame();
50 __ Ret();
51}
52
53void StubCodeCompiler::GenerateInitLateStaticFieldStub(bool is_final,
54 bool is_shared) {
59
60 __ EnterStubFrame();
61
62 __ Comment("Calling initializer function");
63 __ PushRegister(kFieldReg);
64 __ LoadCompressedFieldFromOffset(
66 if (!FLAG_precompiled_mode) {
67 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
69 // Load a GC-safe value for the arguments descriptor (unused but tagged).
70 __ LoadImmediate(ARGS_DESC_REG, 0);
71 }
73 __ MoveRegister(kResultReg, CallingConventions::kReturnReg);
74 __ PopRegister(kFieldReg);
75 __ LoadStaticFieldAddress(kAddressReg, kFieldReg, kScratchReg, is_shared);
76
77 Label throw_exception;
78 if (is_final) {
79 __ Comment("Checking that initializer did not set late final field");
80 __ LoadFromOffset(kScratchReg, kAddressReg, 0);
81 __ CompareObject(kScratchReg, SentinelObject());
82 __ BranchIf(NOT_EQUAL, &throw_exception);
83 }
84
85 __ StoreToOffset(kResultReg, kAddressReg, 0);
86 __ LeaveStubFrame();
87 __ Ret();
88
89 if (is_final) {
90#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
91 // We are jumping over LeaveStubFrame so restore LR state to match one
92 // at the jump point.
93 __ set_lr_state(compiler::LRState::OnEntry().EnterFrame());
94#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
95 __ Bind(&throw_exception);
96 __ PushObject(NullObject()); // Make room for (unused) result.
97 __ PushRegister(kFieldReg);
98 __ CallRuntime(kLateFieldAssignedDuringInitializationErrorRuntimeEntry,
99 /*argument_count=*/1);
100 __ Breakpoint();
101 }
102}
103
104void StubCodeCompiler::GenerateInitLateStaticFieldStub() {
105 GenerateInitLateStaticFieldStub(/*is_final=*/false, /*is_shared=*/false);
106}
107
108void StubCodeCompiler::GenerateInitLateFinalStaticFieldStub() {
109 GenerateInitLateStaticFieldStub(/*is_final=*/true, /*shared=*/false);
110}
111
112void StubCodeCompiler::GenerateInitSharedLateStaticFieldStub() {
113 GenerateInitLateStaticFieldStub(/*is_final=*/false, /*is_shared=*/true);
114}
115
116void StubCodeCompiler::GenerateInitSharedLateFinalStaticFieldStub() {
117 GenerateInitLateStaticFieldStub(/*is_final=*/true, /*shared=*/true);
118}
119
120void StubCodeCompiler::GenerateInitInstanceFieldStub() {
121 __ EnterStubFrame();
122 __ PushObject(NullObject()); // Make room for result.
123 __ PushRegistersInOrder(
125 __ CallRuntime(kInitInstanceFieldRuntimeEntry, /*argument_count=*/2);
126 __ Drop(2);
128 __ LeaveStubFrame();
129 __ Ret();
130}
131
132void StubCodeCompiler::GenerateInitLateInstanceFieldStub(bool is_final) {
133 const Register kInstanceReg = InitInstanceFieldABI::kInstanceReg;
137
138 __ EnterStubFrame();
139 // Save kFieldReg and kInstanceReg for later.
140 // Call initializer function.
141 __ PushRegistersInOrder({kFieldReg, kInstanceReg, kInstanceReg});
142
143 static_assert(
145 "Result is a return value from initializer");
146
147 __ LoadCompressedFieldFromOffset(
150 if (!FLAG_precompiled_mode) {
151 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
153 // Load a GC-safe value for the arguments descriptor (unused but tagged).
154 __ LoadImmediate(ARGS_DESC_REG, 0);
155 }
157 __ Drop(1); // Drop argument.
158
159 __ PopRegisterPair(kInstanceReg, kFieldReg);
160 __ LoadCompressedFieldFromOffset(
161 kScratchReg, kFieldReg, target::Field::host_offset_or_field_id_offset());
162#if defined(DART_COMPRESSED_POINTERS)
163 // TODO(compressed-pointers): Variant of LoadFieldAddressForRegOffset that
164 // ignores upper bits?
165 __ SmiUntag(kScratchReg);
166 __ SmiTag(kScratchReg);
167#endif
168 __ LoadCompressedFieldAddressForRegOffset(kAddressReg, kInstanceReg,
169 kScratchReg);
170
171 Label throw_exception;
172 if (is_final) {
173 __ LoadCompressed(kScratchReg, Address(kAddressReg, 0));
174 __ CompareObject(kScratchReg, SentinelObject());
175 __ BranchIf(NOT_EQUAL, &throw_exception);
176 }
177
178#if defined(TARGET_ARCH_IA32)
179 // On IA32 StoreIntoObject clobbers value register, so scratch
180 // register is used in StoreIntoObject to preserve kResultReg.
181 __ MoveRegister(kScratchReg, InitInstanceFieldABI::kResultReg);
182 __ StoreIntoObject(kInstanceReg, Address(kAddressReg, 0), kScratchReg);
183#else
184 __ StoreCompressedIntoObject(kInstanceReg, Address(kAddressReg, 0),
186#endif // defined(TARGET_ARCH_IA32)
187
188 __ LeaveStubFrame();
189 __ Ret();
190
191 if (is_final) {
192#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
193 // We are jumping over LeaveStubFrame so restore LR state to match one
194 // at the jump point.
195 __ set_lr_state(compiler::LRState::OnEntry().EnterFrame());
196#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
197 __ Bind(&throw_exception);
198 __ PushObject(NullObject()); // Make room for (unused) result.
199 __ PushRegister(kFieldReg);
200 __ CallRuntime(kLateFieldAssignedDuringInitializationErrorRuntimeEntry,
201 /*argument_count=*/1);
202 __ Breakpoint();
203 }
204}
205
206void StubCodeCompiler::GenerateInitLateInstanceFieldStub() {
207 GenerateInitLateInstanceFieldStub(/*is_final=*/false);
208}
209
210void StubCodeCompiler::GenerateInitLateFinalInstanceFieldStub() {
211 GenerateInitLateInstanceFieldStub(/*is_final=*/true);
212}
213
214void StubCodeCompiler::GenerateThrowStub() {
215 __ EnterStubFrame();
216 __ PushObject(NullObject()); // Make room for (unused) result.
217 __ PushRegister(ThrowABI::kExceptionReg);
218 __ CallRuntime(kThrowRuntimeEntry, /*argument_count=*/1);
219 __ Breakpoint();
220}
221
222void StubCodeCompiler::GenerateReThrowStub() {
223 __ EnterStubFrame();
224 __ PushObject(NullObject()); // Make room for (unused) result.
225 __ PushRegistersInOrder(
227 __ PushImmediate(Smi::RawValue(0)); // Do not bypass debugger.
228 __ CallRuntime(kReThrowRuntimeEntry, /*argument_count=*/3);
229 __ Breakpoint();
230}
231
232void StubCodeCompiler::GenerateAssertBooleanStub() {
233 __ EnterStubFrame();
234 __ PushObject(NullObject()); // Make room for (unused) result.
235 __ PushRegister(AssertBooleanABI::kObjectReg);
236 __ CallRuntime(kNonBoolTypeErrorRuntimeEntry, /*argument_count=*/1);
237 __ Breakpoint();
238}
239
240void StubCodeCompiler::GenerateAssertSubtypeStub() {
241 __ EnterStubFrame();
247 __ CallRuntime(kSubtypeCheckRuntimeEntry, /*argument_count=*/5);
248 __ Drop(5); // Drop unused result as well as arguments.
249 __ LeaveStubFrame();
250 __ Ret();
251}
252
253void StubCodeCompiler::GenerateAssertAssignableStub() {
254#if !defined(TARGET_ARCH_IA32)
255 __ Breakpoint();
256#else
257 __ EnterStubFrame();
258 __ PushObject(Object::null_object()); // Make room for the result.
259 __ pushl(Address(
261 __ pushl(Address(
263 __ pushl(Address(
264 EBP,
266 __ pushl(Address(EBP, target::kWordSize *
268 __ PushRegistersInOrder({AssertAssignableStubABI::kDstNameReg,
271 __ CallRuntime(kTypeCheckRuntimeEntry, /*argument_count=*/7);
272 __ Drop(8);
273 __ LeaveStubFrame();
274 __ Ret();
275#endif
276}
277
278// Instantiate type arguments from instantiator and function type args.
279// Inputs:
280// - InstantiationABI::kUninstantiatedTypeArgumentsReg: tav to instantiate
281// - InstantiationABI::kInstantiatorTypeArgumentsReg: instantiator tav
282// - InstantiationABI::kFunctionTypeArgumentsReg: function tav
283// Outputs:
284// - InstantiationABI::kResultTypeArgumentsReg: instantiated tav
285// Clobbers:
286// - InstantiationABI::kScratchReg
287void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub() {
288 // We only need the offset of the current entry up until we either call
289 // the runtime or until we retrieve the instantiated type arguments out of it
290 // to put in the result register, so we use the result register to store it.
292
293 // The registers that need spilling prior to traversing a hash-based cache.
294 const RegisterSet saved_registers(InstantiateTAVInternalRegs::kSavedRegisters,
295 /*fpu_register_mask=*/0);
296
299 "Must handle possibility of inst tav reg being spilled");
300 static_assert(((1 << InstantiationABI::kFunctionTypeArgumentsReg) &
302 "Must handle possibility of function tav reg being spilled");
303
304 // Takes labels for the cache hit/miss cases (to allow for restoring spilled
305 // registers).
306 auto check_entry = [&](compiler::Label* found, compiler::Label* not_found) {
307 __ Comment("Check cache entry");
308 // Use load-acquire to get the entry.
311 "sentinel is not same index as instantiator type args");
312 __ LoadAcquireCompressedFromOffset(
316 // Test for an unoccupied entry by checking for the Smi sentinel.
317 __ BranchIfSmi(InstantiationABI::kScratchReg, not_found);
318 // Otherwise it must be occupied and contain TypeArguments objects.
319 compiler::Label next;
320 __ CompareRegisters(InstantiationABI::kScratchReg,
323 __ LoadCompressed(
325 compiler::Address(kEntryReg,
328 __ CompareRegisters(InstantiationABI::kScratchReg,
330 __ BranchIf(EQUAL, found);
331 __ Bind(&next);
332 };
333
334 // Lookup cache before calling runtime.
335 __ LoadAcquireCompressedFromOffset(
339 // Go ahead and load the backing array data address into kEntryReg.
340 __ LoadFieldAddressForOffset(kEntryReg, InstantiationABI::kScratchReg,
342
343 compiler::Label linear_cache_loop, hash_cache_search, cache_hit, call_runtime;
344
345 // There is a maximum size for linear caches that is smaller than the size
346 // of any hash-based cache, so we check the size of the backing array to
347 // determine if this is a linear or hash-based cache.
349 Slot::Array_length());
350 __ CompareImmediate(
353#if defined(TARGET_ARCH_IA32)
354 // We just don't have enough registers to do hash-based cache searching in a
355 // way that doesn't overly complicate the generation code, so just go to
356 // runtime.
357 __ BranchIf(GREATER, &call_runtime);
358#else
359 __ BranchIf(GREATER, &hash_cache_search);
360#endif
361
362 __ Comment("Check linear cache");
363 // Move kEntryReg to the start of the first entry.
364 __ AddImmediate(kEntryReg, TypeArguments::Cache::kHeaderSize *
366 __ Bind(&linear_cache_loop);
367 check_entry(&cache_hit, &call_runtime);
368 __ AddImmediate(kEntryReg, TypeArguments::Cache::kEntrySize *
370 __ Jump(&linear_cache_loop, compiler::Assembler::kNearJump);
371
372#if !defined(TARGET_ARCH_IA32)
373 __ Bind(&hash_cache_search);
374 __ Comment("Check hash-based cache");
375
376 compiler::Label pop_before_success, pop_before_failure;
377 if (!saved_registers.IsEmpty()) {
378 __ Comment("Spills due to register pressure");
379 __ PushRegisters(saved_registers);
380 }
381
382 __ Comment("Calculate address of first entry");
383 __ AddImmediate(
386
387 __ Comment("Calculate probe mask");
388 __ LoadAcquireCompressedFromOffset(
391 __ LsrImmediate(
398 // Can use kEntryReg as scratch now until we're entering the loop.
399
400 // Retrieve the hash from the TAV. If the retrieved hash is 0, jumps to
401 // not_found, otherwise falls through.
402 auto retrieve_hash = [&](Register dst, Register src) {
403 Label is_not_null, done;
404 __ CompareObject(src, NullObject());
405 __ BranchIf(NOT_EQUAL, &is_not_null, compiler::Assembler::kNearJump);
406 __ LoadImmediate(dst, TypeArguments::kAllDynamicHash);
408 __ Bind(&is_not_null);
409 __ LoadFromSlot(dst, src, Slot::TypeArguments_hash());
410 __ SmiUntag(dst);
411 // If the retrieved hash is 0, then it hasn't been computed yet.
412 __ BranchIfZero(dst, &pop_before_failure);
413 __ Bind(&done);
414 };
415
416 __ Comment("Calculate initial probe from type argument vector hashes");
419 retrieve_hash(InstantiationABI::kScratchReg,
425 // Use the probe mask to get a valid entry index.
428
429 // Start off the probing distance at zero (will increment prior to use).
431
432 compiler::Label loop;
433 __ Bind(&loop);
434 __ Comment("Loop over hash cache entries");
435 // Convert the current entry index into the entry address.
437 __ MulImmediate(kEntryReg, TypeArguments::Cache::kEntrySize *
439 __ AddRegisters(kEntryReg, InstantiateTAVInternalRegs::kEntryStartReg);
440 check_entry(&pop_before_success, &pop_before_failure);
441 // Increment the probing distance and then add it to the current entry
442 // index, then mask the result with the probe mask.
448 __ Jump(&loop);
449
450 __ Bind(&pop_before_failure);
451 if (!saved_registers.IsEmpty()) {
452 __ Comment("Restore spilled registers on cache miss");
453 __ PopRegisters(saved_registers);
454 }
455#endif
456
457 // Instantiate non-null type arguments.
458 // A runtime call to instantiate the type arguments is required.
459 __ Bind(&call_runtime);
460 __ Comment("Cache miss");
461 __ EnterStubFrame();
462#if !defined(DART_ASSEMBLER_HAS_NULL_REG)
463 __ PushObject(Object::null_object()); // Make room for the result.
464#endif
465#if defined(TARGET_ARCH_ARM)
470 "Should be ordered to push arguments with one instruction");
471#endif
472 __ PushRegistersInOrder({
473#if defined(DART_ASSEMBLER_HAS_NULL_REG)
474 NULL_REG,
475#endif
479 });
480 __ CallRuntime(kInstantiateTypeArgumentsRuntimeEntry, 3);
481 __ Drop(3); // Drop 2 type vectors, and uninstantiated type.
483 __ LeaveStubFrame();
484 __ Ret();
485
486#if !defined(TARGET_ARCH_IA32)
487 __ Bind(&pop_before_success);
488 if (!saved_registers.IsEmpty()) {
489 __ Comment("Restore spilled registers on cache hit");
490 __ PopRegisters(saved_registers);
491 }
492#endif
493
494 __ Bind(&cache_hit);
495 __ Comment("Cache hit");
496 __ LoadCompressed(
498 compiler::Address(kEntryReg,
501 __ Ret();
502}
503
504void StubCodeCompiler::
505 GenerateInstantiateTypeArgumentsMayShareInstantiatorTAStub() {
507 const Register kScratch2Reg = InstantiationABI::kScratchReg;
508 // Return the instantiator type arguments if its nullability is compatible for
509 // sharing, otherwise proceed to instantiation cache lookup.
510 compiler::Label cache_lookup;
511 __ LoadCompressedSmi(
512 kScratch1Reg,
515 __ LoadCompressedSmi(
516 kScratch2Reg,
519 __ AndRegisters(kScratch2Reg, kScratch1Reg);
520 __ CompareRegisters(kScratch2Reg, kScratch1Reg);
521 __ BranchIf(NOT_EQUAL, &cache_lookup, compiler::Assembler::kNearJump);
524 __ Ret();
525
526 __ Bind(&cache_lookup);
527 GenerateInstantiateTypeArgumentsStub();
528}
529
530void StubCodeCompiler::
531 GenerateInstantiateTypeArgumentsMayShareFunctionTAStub() {
533 const Register kScratch2Reg = InstantiationABI::kScratchReg;
534 // Return the function type arguments if its nullability is compatible for
535 // sharing, otherwise proceed to instantiation cache lookup.
536 compiler::Label cache_lookup;
537 __ LoadCompressedSmi(
538 kScratch1Reg,
541 __ LoadCompressedSmi(
542 kScratch2Reg,
543 compiler::FieldAddress(InstantiationABI::kFunctionTypeArgumentsReg,
545 __ AndRegisters(kScratch2Reg, kScratch1Reg);
546 __ CompareRegisters(kScratch2Reg, kScratch1Reg);
547 __ BranchIf(NOT_EQUAL, &cache_lookup, compiler::Assembler::kNearJump);
550 __ Ret();
551
552 __ Bind(&cache_lookup);
553 GenerateInstantiateTypeArgumentsStub();
554}
555
557 __ EnterStubFrame();
558 __ PushObject(Object::null_object());
559 __ PushRegistersInOrder({InstantiateTypeABI::kTypeReg,
562 __ CallRuntime(kInstantiateTypeRuntimeEntry, /*argument_count=*/3);
563 __ Drop(3);
565 __ LeaveStubFrame();
566 __ Ret();
567}
568
570 Nullability nullability,
571 bool is_function_parameter) {
572 Label runtime_call, return_dynamic, type_parameter_value_is_not_type;
573
574 if (is_function_parameter) {
576 TypeArguments::null_object());
577 __ BranchIf(EQUAL, &return_dynamic);
578 __ LoadFieldFromOffset(
581 __ LoadIndexedCompressed(InstantiateTypeABI::kResultTypeReg,
585 } else {
587 TypeArguments::null_object());
588 __ BranchIf(EQUAL, &return_dynamic);
589 __ LoadFieldFromOffset(
592 __ LoadIndexedCompressed(InstantiateTypeABI::kResultTypeReg,
596 }
597
600
601 switch (nullability) {
603 __ Ret();
604 break;
606 __ CompareAbstractTypeNullabilityWith(
608 static_cast<int8_t>(Nullability::kNullable),
610 __ BranchIf(NOT_EQUAL, &runtime_call);
611 __ Ret();
612 break;
613 }
614
615 // The TAV was null, so the value of the type parameter is "dynamic".
616 __ Bind(&return_dynamic);
617 __ LoadObject(InstantiateTypeABI::kResultTypeReg, Type::dynamic_type());
618 __ Ret();
619
620 __ Bind(&runtime_call);
622}
623
624void StubCodeCompiler::
625 GenerateInstantiateTypeNonNullableClassTypeParameterStub() {
627 /*is_function_parameter=*/false);
628}
629
630void StubCodeCompiler::GenerateInstantiateTypeNullableClassTypeParameterStub() {
632 /*is_function_parameter=*/false);
633}
634
635void StubCodeCompiler::
636 GenerateInstantiateTypeNonNullableFunctionTypeParameterStub() {
638 /*is_function_parameter=*/true);
639}
640
641void StubCodeCompiler::
642 GenerateInstantiateTypeNullableFunctionTypeParameterStub() {
644 /*is_function_parameter=*/true);
645}
646
647void StubCodeCompiler::GenerateInstantiateTypeStub() {
649}
650
651void StubCodeCompiler::GenerateInstanceOfStub() {
652 __ EnterStubFrame();
653 __ PushObject(NullObject()); // Make room for the result.
658 __ CallRuntime(kInstanceofRuntimeEntry, /*argument_count=*/5);
659 __ Drop(5);
661 __ LeaveStubFrame();
662 __ Ret();
663}
664
665// For use in GenerateTypeIsTopTypeForSubtyping and
666// GenerateNullIsAssignableToType.
668 Register type_reg,
669 Register scratch_reg) {
670#if defined(DEBUG)
671 compiler::Label is_type_param_or_type_or_function_type;
672 __ LoadClassIdMayBeSmi(scratch_reg, type_reg);
673 __ CompareImmediate(scratch_reg, kTypeParameterCid);
674 __ BranchIf(EQUAL, &is_type_param_or_type_or_function_type,
676 __ CompareImmediate(scratch_reg, kTypeCid);
677 __ BranchIf(EQUAL, &is_type_param_or_type_or_function_type,
679 __ CompareImmediate(scratch_reg, kFunctionTypeCid);
680 __ BranchIf(EQUAL, &is_type_param_or_type_or_function_type,
682 __ Stop("not a type or function type or type parameter");
683 __ Bind(&is_type_param_or_type_or_function_type);
684#endif
685}
686
687// Version of AbstractType::IsTopTypeForSubtyping() used when the type is not
688// known at compile time. Must be kept in sync.
689//
690// Inputs:
691// - TypeTestABI::kDstTypeReg: Destination type.
692//
693// Non-preserved scratch registers:
694// - TypeTestABI::kScratchReg (only on non-IA32 architectures)
695//
696// Outputs:
697// - TypeTestABI::kSubtypeTestCacheReg: 0 if the value is guaranteed assignable,
698// non-zero otherwise.
699//
700// All registers other than outputs and non-preserved scratches are preserved.
701void StubCodeCompiler::GenerateTypeIsTopTypeForSubtypingStub() {
702 // The only case where the original value of kSubtypeTestCacheReg is needed
703 // after the stub call is on IA32, where it's currently preserved on the stack
704 // before calling the stub (as it's also CODE_REG on that architecture), so we
705 // both use it as a scratch and clobber it for the return value.
706 const Register scratch1_reg = TypeTestABI::kSubtypeTestCacheReg;
707 // We reuse the first scratch register as the output register because we're
708 // always guaranteed to have a type in it (starting with kDstType), and all
709 // non-Smi ObjectPtrs are non-zero values.
710 const Register output_reg = scratch1_reg;
711#if defined(TARGET_ARCH_IA32)
712 // The remaining scratch registers are preserved and restored before exit on
713 // IA32. Because we have few registers to choose from (which are all used in
714 // TypeTestABI), use specific TestTypeABI registers.
716 // Preserve non-output scratch registers.
717 __ PushRegister(scratch2_reg);
718#else
719 const Register scratch2_reg = TypeTestABI::kScratchReg;
720#endif
721 static_assert(scratch1_reg != scratch2_reg,
722 "both scratch registers are the same");
723
724 compiler::Label check_top_type, is_top_type, done;
725 // Initialize scratch1_reg with the type to check (which also sets the
726 // output register to a non-zero value). scratch1_reg (and thus the output
727 // register) will always have a type in it from here on out.
728 __ MoveRegister(scratch1_reg, TypeTestABI::kDstTypeReg);
729 __ Bind(&check_top_type);
730 // scratch1_reg: Current type to check.
732 scratch2_reg);
733 compiler::Label is_type_ref;
734 __ CompareClassId(scratch1_reg, kTypeCid, scratch2_reg);
735 // Type parameters can't be top types themselves, though a particular
736 // instantiation may result in a top type.
737 // Function types cannot be top types.
738 __ BranchIf(NOT_EQUAL, &done);
739 __ LoadTypeClassId(scratch2_reg, scratch1_reg);
740 __ CompareImmediate(scratch2_reg, kDynamicCid);
741 __ BranchIf(EQUAL, &is_top_type, compiler::Assembler::kNearJump);
742 __ CompareImmediate(scratch2_reg, kVoidCid);
743 __ BranchIf(EQUAL, &is_top_type, compiler::Assembler::kNearJump);
744 compiler::Label unwrap_future_or;
745 __ CompareImmediate(scratch2_reg, kFutureOrCid);
746 __ BranchIf(EQUAL, &unwrap_future_or, compiler::Assembler::kNearJump);
747 __ CompareImmediate(scratch2_reg, kInstanceCid);
749 // Instance type isn't a top type if non-nullable.
750 __ CompareAbstractTypeNullabilityWith(
751 scratch1_reg, static_cast<int8_t>(Nullability::kNonNullable),
752 scratch2_reg);
754 __ Bind(&is_top_type);
755 __ LoadImmediate(output_reg, 0);
756 __ Bind(&done);
757#if defined(TARGET_ARCH_IA32)
758 // Restore preserved scratch registers.
759 __ PopRegister(scratch2_reg);
760#endif
761 __ Ret();
762 // An uncommon case, so off the main trunk of the function.
763 __ Bind(&unwrap_future_or);
764 __ LoadCompressedField(
765 scratch2_reg,
766 compiler::FieldAddress(scratch1_reg,
768 __ CompareObject(scratch2_reg, Object::null_object());
769 // If the arguments are null, then unwrapping gives dynamic, a top type.
770 __ BranchIf(EQUAL, &is_top_type, compiler::Assembler::kNearJump);
771 __ LoadCompressedField(
772 scratch1_reg,
775 __ Jump(&check_top_type, compiler::Assembler::kNearJump);
776}
777
778// Version of Instance::NullIsAssignableTo(other, inst_tav, fun_tav) used when
779// the destination type was not known at compile time. Must be kept in sync.
780//
781// Inputs:
782// - TypeTestABI::kInstanceReg: Object to check for assignability.
783// - TypeTestABI::kDstTypeReg: Destination type.
784// - TypeTestABI::kInstantiatorTypeArgumentsReg: Instantiator TAV.
785// - TypeTestABI::kFunctionTypeArgumentsReg: Function TAV.
786//
787// Non-preserved non-output scratch registers:
788// - TypeTestABI::kScratchReg (only on non-IA32 architectures)
789//
790// Outputs:
791// - TypeTestABI::kSubtypeTestCacheReg: 0 if the value is guaranteed assignable,
792// non-zero otherwise.
793//
794// All registers other than outputs and non-preserved scratches are preserved.
795void StubCodeCompiler::GenerateNullIsAssignableToTypeStub() {
796 // The only case where the original value of kSubtypeTestCacheReg is needed
797 // after the stub call is on IA32, where it's currently preserved on the stack
798 // before calling the stub (as it's also CODE_REG on that architecture), so we
799 // both use it as a scratch to hold the current type to inspect and also
800 // clobber it for the return value.
801 const Register kCurrentTypeReg = TypeTestABI::kSubtypeTestCacheReg;
802 // We reuse the first scratch register as the output register because we're
803 // always guaranteed to have a type in it (starting with the contents of
804 // kDstTypeReg), and all non-Smi ObjectPtrs are non-zero values.
805 const Register kOutputReg = kCurrentTypeReg;
806#if defined(TARGET_ARCH_IA32)
807 // The remaining scratch registers are preserved and restored before exit on
808 // IA32. Because we have few registers to choose from (which are all used in
809 // TypeTestABI), use specific TestTypeABI registers.
811 // Preserve non-output scratch registers.
812 __ PushRegister(kScratchReg);
813#else
814 const Register kScratchReg = TypeTestABI::kScratchReg;
815#endif
816 static_assert(kCurrentTypeReg != kScratchReg,
817 "code assumes distinct scratch registers");
818
819 compiler::Label is_assignable, done;
820 // Initialize the first scratch register (and thus the output register) with
821 // the destination type. We do this before the check to ensure the output
822 // register has a non-zero value if kInstanceReg is not null.
823 __ MoveRegister(kCurrentTypeReg, TypeTestABI::kDstTypeReg);
824 __ CompareObject(TypeTestABI::kInstanceReg, Object::null_object());
825
826 compiler::Label check_null_assignable;
827 // Skip checking the type if not null.
828 __ BranchIf(NOT_EQUAL, &done);
829 __ Bind(&check_null_assignable);
830 // scratch1_reg: Current type to check.
832 kScratchReg);
833 compiler::Label is_not_type;
834 __ CompareClassId(kCurrentTypeReg, kTypeCid, kScratchReg);
835 __ BranchIf(NOT_EQUAL, &is_not_type, compiler::Assembler::kNearJump);
836 __ CompareAbstractTypeNullabilityWith(
837 kCurrentTypeReg, static_cast<int8_t>(Nullability::kNonNullable),
838 kScratchReg);
839 __ BranchIf(NOT_EQUAL, &is_assignable);
840 // FutureOr is a special case because it may have the non-nullable bit set,
841 // but FutureOr<T> functions as the union of T and Future<T>, so it must be
842 // unwrapped to see if T is nullable.
843 __ LoadTypeClassId(kScratchReg, kCurrentTypeReg);
844 __ CompareImmediate(kScratchReg, kFutureOrCid);
845 __ BranchIf(NOT_EQUAL, &done);
846 __ LoadCompressedField(
847 kScratchReg,
848 compiler::FieldAddress(kCurrentTypeReg,
850 __ CompareObject(kScratchReg, Object::null_object());
851 // If the arguments are null, then unwrapping gives the dynamic type,
852 // which can take null.
853 __ BranchIf(EQUAL, &is_assignable);
854 __ LoadCompressedField(
855 kCurrentTypeReg,
856 compiler::FieldAddress(
858 __ Jump(&check_null_assignable, compiler::Assembler::kNearJump);
859 __ Bind(&is_not_type);
860 // Null is assignable to a type parameter only if it is nullable or if the
861 // instantiation is nullable.
862 __ CompareAbstractTypeNullabilityWith(
863 kCurrentTypeReg, static_cast<int8_t>(Nullability::kNonNullable),
864 kScratchReg);
865 __ BranchIf(NOT_EQUAL, &is_assignable);
866
867 // Don't set kScratchReg in here as on IA32, that's the function TAV reg.
868 auto handle_case = [&](Register tav) {
869 // We can reuse kCurrentTypeReg to hold the index because we no longer
870 // need the type parameter afterwards.
871 auto const kIndexReg = kCurrentTypeReg;
872 // If the TAV is null, resolving gives the (nullable) dynamic type.
873 __ CompareObject(tav, NullObject());
874 __ BranchIf(EQUAL, &is_assignable, Assembler::kNearJump);
875 // Resolve the type parameter to its instantiated type and loop.
876 __ LoadFieldFromOffset(kIndexReg, kCurrentTypeReg,
879 __ LoadIndexedCompressed(kCurrentTypeReg, tav,
881 __ Jump(&check_null_assignable);
882 };
883
884 Label function_type_param;
885 __ LoadFromSlot(kScratchReg, TypeTestABI::kDstTypeReg,
886 Slot::AbstractType_flags());
887 __ BranchIfBit(kScratchReg,
889 NOT_ZERO, &function_type_param, Assembler::kNearJump);
891 __ Bind(&function_type_param);
892#if defined(TARGET_ARCH_IA32)
893 // Function TAV is on top of stack because we're using that register as
894 // kScratchReg.
896#endif
898
899 __ Bind(&is_assignable);
900 __ LoadImmediate(kOutputReg, 0);
901 __ Bind(&done);
902#if defined(TARGET_ARCH_IA32)
903 // Restore preserved scratch registers.
904 __ PopRegister(kScratchReg);
905#endif
906 __ Ret();
907}
908
909#if !defined(TARGET_ARCH_IA32)
910// The <X>TypeTestStubs are used to test whether a given value is of a given
911// type. All variants have the same calling convention:
912//
913// Inputs (from TypeTestABI struct):
914// - kSubtypeTestCacheReg: RawSubtypeTestCache
915// - kInstanceReg: instance to test against.
916// - kInstantiatorTypeArgumentsReg : instantiator type arguments (if needed).
917// - kFunctionTypeArgumentsReg : function type arguments (if needed).
918//
919// See GenerateSubtypeNTestCacheStub for registers that may need saving by the
920// caller.
921//
922// Output (from TypeTestABI struct):
923// - kResultReg: checked instance.
924//
925// Throws if the check is unsuccessful.
926//
927// Note of warning: The caller will not populate CODE_REG and we have therefore
928// no access to the pool.
929void StubCodeCompiler::GenerateDefaultTypeTestStub() {
930 __ LoadFromOffset(CODE_REG, THR,
932 __ Jump(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
933}
934
935// Used instead of DefaultTypeTestStub when null is assignable.
936void StubCodeCompiler::GenerateDefaultNullableTypeTestStub() {
937 Label done;
938
939 // Fast case for 'null'.
940 __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
941 __ BranchIf(EQUAL, &done);
942
943 __ LoadFromOffset(CODE_REG, THR,
945 __ Jump(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
946
947 __ Bind(&done);
948 __ Ret();
949}
950
951void StubCodeCompiler::GenerateTopTypeTypeTestStub() {
952 __ Ret();
953}
954
955void StubCodeCompiler::GenerateUnreachableTypeTestStub() {
956 __ Breakpoint();
957}
958
960 bool allow_null) {
961 Label done;
962
963 if (allow_null) {
964 __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
965 __ BranchIf(EQUAL, &done, Assembler::kNearJump);
966 }
967
968 auto handle_case = [&](Register tav) {
969 // If the TAV is null, then resolving the type parameter gives the dynamic
970 // type, which is a top type.
971 __ CompareObject(tav, NullObject());
972 __ BranchIf(EQUAL, &done, Assembler::kNearJump);
973 // Resolve the type parameter to its instantiated type and tail call the
974 // instantiated type's TTS.
978 __ LoadIndexedCompressed(TypeTestABI::kScratchReg, tav,
981 __ Jump(FieldAddress(
984 };
985
986 Label function_type_param;
988 Slot::AbstractType_flags());
989 __ BranchIfBit(TypeTestABI::kScratchReg,
991 NOT_ZERO, &function_type_param, Assembler::kNearJump);
993 __ Bind(&function_type_param);
995 __ Bind(&done);
996 __ Ret();
997}
998
999void StubCodeCompiler::GenerateNullableTypeParameterTypeTestStub() {
1000 BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/true);
1001}
1002
1003void StubCodeCompiler::GenerateTypeParameterTypeTestStub() {
1004 BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/false);
1005}
1006
1009 __ PushObject(NullObject()); // Make room for result.
1013 __ PushObject(NullObject());
1015 __ PushImmediate(target::ToRawSmi(mode));
1016 __ CallRuntime(kTypeCheckRuntimeEntry, 7);
1017 __ Drop(1); // mode
1019 __ Drop(1); // dst_name
1022 __ PopRegister(TypeTestABI::kDstTypeReg);
1023 __ PopRegister(TypeTestABI::kInstanceReg);
1024 __ Drop(1); // Discard return value.
1025}
1026
1027void StubCodeCompiler::GenerateLazySpecializeTypeTestStub() {
1028 __ LoadFromOffset(CODE_REG, THR,
1030 __ EnterStubFrame();
1032 __ LeaveStubFrame();
1033 __ Ret();
1034}
1035
1036// Used instead of LazySpecializeTypeTestStub when null is assignable.
1037void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub() {
1038 Label done;
1039
1040 __ CompareObject(TypeTestABI::kInstanceReg, NullObject());
1041 __ BranchIf(EQUAL, &done);
1042
1043 __ LoadFromOffset(CODE_REG, THR,
1045 __ EnterStubFrame();
1047 __ LeaveStubFrame();
1048
1049 __ Bind(&done);
1050 __ Ret();
1051}
1052
1053void StubCodeCompiler::GenerateSlowTypeTestStub() {
1054 Label done, call_runtime;
1055
1056 if (!FLAG_precompiled_mode) {
1057 __ LoadFromOffset(CODE_REG, THR,
1059 }
1060 __ EnterStubFrame();
1061
1062 // If the subtype-cache is null, it needs to be lazily-created by the runtime.
1064 __ BranchIf(EQUAL, &call_runtime);
1065
1066 // Use the number of inputs used by the STC to determine which stub to call.
1067 Label call_2, call_3, call_4, call_6;
1068 __ Comment("Check number of STC inputs");
1070 Slot::SubtypeTestCache_num_inputs());
1071 __ CompareImmediate(TypeTestABI::kScratchReg, 2);
1072 __ BranchIf(EQUAL, &call_2, Assembler::kNearJump);
1073 __ CompareImmediate(TypeTestABI::kScratchReg, 3);
1074 __ BranchIf(EQUAL, &call_3, Assembler::kNearJump);
1075 __ CompareImmediate(TypeTestABI::kScratchReg, 4);
1076 __ BranchIf(EQUAL, &call_4, Assembler::kNearJump);
1077 __ CompareImmediate(TypeTestABI::kScratchReg, 6);
1078 __ BranchIf(EQUAL, &call_6, Assembler::kNearJump);
1079 // Fall through to the all inputs case.
1080
1081 {
1082 __ Comment("Call 7 input STC check");
1085 CastHandle<Object>(TrueObject()));
1086 __ BranchIf(EQUAL, &done); // Cache said: yes.
1087 __ Jump(&call_runtime, Assembler::kNearJump);
1088 }
1089
1090 __ Bind(&call_6);
1091 {
1092 __ Comment("Call 6 input STC check");
1095 CastHandle<Object>(TrueObject()));
1096 __ BranchIf(EQUAL, &done); // Cache said: yes.
1097 __ Jump(&call_runtime, Assembler::kNearJump);
1098 }
1099
1100 __ Bind(&call_4);
1101 {
1102 __ Comment("Call 4 input STC check");
1105 CastHandle<Object>(TrueObject()));
1106 __ BranchIf(EQUAL, &done); // Cache said: yes.
1107 __ Jump(&call_runtime, Assembler::kNearJump);
1108 }
1109
1110 __ Bind(&call_3);
1111 {
1112 __ Comment("Call 3 input STC check");
1115 CastHandle<Object>(TrueObject()));
1116 __ BranchIf(EQUAL, &done); // Cache said: yes.
1117 __ Jump(&call_runtime, Assembler::kNearJump);
1118 }
1119
1120 __ Bind(&call_2);
1121 {
1122 __ Comment("Call 2 input STC check");
1125 CastHandle<Object>(TrueObject()));
1126 __ BranchIf(EQUAL, &done); // Cache said: yes.
1127 // Fall through to runtime_call
1128 }
1129
1130 __ Bind(&call_runtime);
1131 __ Comment("Call runtime");
1132
1134
1135 __ Bind(&done);
1136 __ Comment("Done");
1137 __ LeaveStubFrame();
1138 __ Ret();
1139}
1140#else
1141// Type testing stubs are not implemented on IA32.
1142#define GENERATE_BREAKPOINT_STUB(Name) \
1143 void StubCodeCompiler::Generate##Name##Stub() { \
1144 __ Breakpoint(); \
1145 }
1146
1147VM_TYPE_TESTING_STUB_CODE_LIST(GENERATE_BREAKPOINT_STUB)
1148
1149#undef GENERATE_BREAKPOINT_STUB
1150#endif // !defined(TARGET_ARCH_IA32)
1151
1152// Called for inline allocation of closure.
1153// Input (preserved):
1154// AllocateClosureABI::kFunctionReg: closure function.
1155// AllocateClosureABI::kContextReg: closure context.
1156// AllocateClosureABI::kInstantiatorTypeArgs: instantiator type arguments.
1157// Output:
1158// AllocateClosureABI::kResultReg: new allocated Closure object.
1159// Clobbered:
1160// AllocateClosureABI::kScratchReg
1161void StubCodeCompiler::GenerateAllocateClosureStub(
1162 bool has_instantiator_type_args,
1163 bool is_generic) {
1164 const intptr_t instance_size =
1166 __ EnsureHasClassIdInDEBUG(kFunctionCid, AllocateClosureABI::kFunctionReg,
1168 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1169 Label slow_case;
1170 __ Comment("Inline allocation of uninitialized closure");
1171#if defined(DEBUG)
1172 // Need to account for the debug checks added by StoreToSlotNoBarrier.
1173 const auto distance = Assembler::kFarJump;
1174#else
1175 const auto distance = Assembler::kNearJump;
1176#endif
1177 __ TryAllocateObject(kClosureCid, instance_size, &slow_case, distance,
1180
1181 __ Comment("Inline initialization of allocated closure");
1182 // Put null in the scratch register for initializing most boxed fields.
1183 // We initialize the fields in offset order below.
1184 // Since the TryAllocateObject above did not go to the slow path, we're
1185 // guaranteed an object in new space here, and thus no barriers are needed.
1187 if (has_instantiator_type_args) {
1190 Slot::Closure_instantiator_type_arguments());
1191 } else {
1192 __ StoreToSlotNoBarrier(AllocateClosureABI::kScratchReg,
1194 Slot::Closure_instantiator_type_arguments());
1195 }
1196 __ StoreToSlotNoBarrier(AllocateClosureABI::kScratchReg,
1198 Slot::Closure_function_type_arguments());
1199 if (!is_generic) {
1200 __ StoreToSlotNoBarrier(AllocateClosureABI::kScratchReg,
1202 Slot::Closure_delayed_type_arguments());
1203 }
1204 __ StoreToSlotNoBarrier(AllocateClosureABI::kFunctionReg,
1206 Slot::Closure_function());
1207 __ StoreToSlotNoBarrier(AllocateClosureABI::kContextReg,
1209 Slot::Closure_context());
1210 __ StoreToSlotNoBarrier(AllocateClosureABI::kScratchReg,
1212 Slot::Closure_hash());
1213 if (is_generic) {
1215 __ StoreToSlotNoBarrier(AllocateClosureABI::kScratchReg,
1217 Slot::Closure_delayed_type_arguments());
1218 }
1219#if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
1220 if (FLAG_precompiled_mode) {
1221 // Set the closure entry point in precompiled mode, either to the function
1222 // entry point in bare instructions mode or to 0 otherwise (to catch
1223 // misuse). This overwrites the scratch register, but there are no more
1224 // boxed fields.
1227 Slot::Function_entry_point());
1228 __ StoreToSlotNoBarrier(AllocateClosureABI::kScratchReg,
1230 Slot::Closure_entry_point());
1231 }
1232#endif
1233
1234 // AllocateClosureABI::kResultReg: new object.
1235 __ Ret();
1236
1237 __ Bind(&slow_case);
1238 }
1239
1240 __ Comment("Closure allocation via runtime");
1241 __ EnterStubFrame();
1242 __ PushObject(NullObject()); // Space on the stack for the return value.
1243 __ PushRegistersInOrder(
1245 if (has_instantiator_type_args) {
1247 } else {
1248 __ PushObject(NullObject());
1249 }
1250 if (is_generic) {
1251 __ PushObject(EmptyTypeArguments());
1252 } else {
1253 __ PushObject(NullObject());
1254 }
1255 __ CallRuntime(kAllocateClosureRuntimeEntry, 4);
1256 if (has_instantiator_type_args) {
1257 __ Drop(1);
1259 } else {
1260 __ Drop(2);
1261 }
1267 __ LeaveStubFrame();
1268
1269 // AllocateClosureABI::kResultReg: new object
1270 __ Ret();
1271}
1272
1273void StubCodeCompiler::GenerateAllocateClosureStub() {
1274 GenerateAllocateClosureStub(/*has_instantiator_type_args=*/false,
1275 /*is_generic=*/false);
1276}
1277
1278void StubCodeCompiler::GenerateAllocateClosureGenericStub() {
1279 GenerateAllocateClosureStub(/*has_instantiator_type_args=*/false,
1280 /*is_generic=*/true);
1281}
1282
1283void StubCodeCompiler::GenerateAllocateClosureTAStub() {
1284 GenerateAllocateClosureStub(/*has_instantiator_type_args=*/true,
1285 /*is_generic=*/false);
1286}
1287
1288void StubCodeCompiler::GenerateAllocateClosureTAGenericStub() {
1289 GenerateAllocateClosureStub(/*has_instantiator_type_args=*/true,
1290 /*is_generic=*/true);
1291}
1292
1293// Generates allocation stub for _GrowableList class.
1294// This stub exists solely for performance reasons: default allocation
1295// stub is slower as it doesn't use specialized inline allocation.
1296void StubCodeCompiler::GenerateAllocateGrowableArrayStub() {
1297#if defined(TARGET_ARCH_IA32)
1298 // This stub is not used on IA32 because IA32 version of
1299 // StubCodeCompiler::GenerateAllocationStubForClass uses inline
1300 // allocation. Also, AllocateObjectSlow stub is not generated on IA32.
1301 __ Breakpoint();
1302#else
1303 const intptr_t instance_size = target::RoundedAllocationSize(
1305
1306 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1307 Label slow_case;
1308 __ Comment("Inline allocation of GrowableList");
1309 __ TryAllocateObject(kGrowableObjectArrayCid, instance_size, &slow_case,
1311 /*temp_reg=*/AllocateObjectABI::kTagsReg);
1312 __ StoreIntoObjectNoBarrier(
1314 FieldAddress(AllocateObjectABI::kResultReg,
1317
1318 __ Ret();
1319 __ Bind(&slow_case);
1320 }
1321
1323 kGrowableObjectArrayCid, instance_size);
1324 __ LoadImmediate(AllocateObjectABI::kTagsReg, tags);
1325 __ Jump(
1327#endif // defined(TARGET_ARCH_IA32)
1328}
1329
1330void StubCodeCompiler::GenerateAllocateRecordStub() {
1331 const Register result_reg = AllocateRecordABI::kResultReg;
1332 const Register shape_reg = AllocateRecordABI::kShapeReg;
1333 const Register temp_reg = AllocateRecordABI::kTemp1Reg;
1334 const Register new_top_reg = AllocateRecordABI::kTemp2Reg;
1335
1336 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1337 Label slow_case;
1338
1339 // Check for allocation tracing.
1340 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kRecordCid, &slow_case, temp_reg));
1341
1342 // Extract number of fields from the shape.
1343 __ AndImmediate(
1344 temp_reg, shape_reg,
1346
1347 // Compute the rounded instance size.
1348 const intptr_t fixed_size_plus_alignment_padding =
1351 __ AddScaled(temp_reg, kNoRegister, temp_reg,
1353 fixed_size_plus_alignment_padding);
1354 __ AndImmediate(temp_reg, -target::ObjectAlignment::kObjectAlignment);
1355
1356 // Now allocate the object.
1357 __ LoadFromOffset(result_reg, THR, target::Thread::top_offset());
1358 __ MoveRegister(new_top_reg, temp_reg);
1359 __ AddRegisters(new_top_reg, result_reg);
1360 // Check if the allocation fits into the remaining space.
1361 __ CompareWithMemoryValue(new_top_reg,
1362 Address(THR, target::Thread::end_offset()));
1363 __ BranchIf(UNSIGNED_GREATER_EQUAL, &slow_case);
1364 __ CheckAllocationCanary(result_reg);
1365
1366 // Successfully allocated the object, now update top to point to
1367 // next object start and initialize the object.
1368 __ StoreToOffset(new_top_reg, THR, target::Thread::top_offset());
1369 __ AddImmediate(result_reg, kHeapObjectTag);
1370
1371 // Calculate the size tag.
1372 {
1373 Label size_tag_overflow, done;
1374 __ CompareImmediate(temp_reg, target::UntaggedObject::kSizeTagMaxSizeTag);
1375 __ BranchIf(UNSIGNED_GREATER, &size_tag_overflow, Assembler::kNearJump);
1376 __ LslImmediate(temp_reg,
1380
1381 __ Bind(&size_tag_overflow);
1382 // Set overflow size tag value.
1383 __ LoadImmediate(temp_reg, 0);
1384
1385 __ Bind(&done);
1386 uword tags = target::MakeTagWordForNewSpaceObject(kRecordCid, 0);
1387 __ OrImmediate(temp_reg, tags);
1388 __ StoreFieldToOffset(temp_reg, result_reg,
1389 target::Object::tags_offset()); // Tags.
1390 }
1391
1392 __ StoreCompressedIntoObjectNoBarrier(
1393 result_reg, FieldAddress(result_reg, target::Record::shape_offset()),
1394 shape_reg);
1395
1396 // Initialize the remaining words of the object.
1397 {
1398 const Register field_reg = shape_reg;
1399#if defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV32) || \
1400 defined(TARGET_ARCH_RISCV64)
1401 const Register null_reg = NULL_REG;
1402#else
1403 const Register null_reg = temp_reg;
1404 __ LoadObject(null_reg, NullObject());
1405#endif
1406
1407 Label loop, done;
1408 __ AddImmediate(field_reg, result_reg, target::Record::field_offset(0));
1409 __ CompareRegisters(field_reg, new_top_reg);
1411
1412 __ Bind(&loop);
1413 for (intptr_t offset = 0; offset < target::kObjectAlignment;
1415 __ StoreCompressedIntoObjectNoBarrier(
1416 result_reg, FieldAddress(field_reg, offset), null_reg);
1417 }
1418 // Safe to only check every kObjectAlignment bytes instead of each word.
1420 __ AddImmediate(field_reg, target::kObjectAlignment);
1421 __ CompareRegisters(field_reg, new_top_reg);
1422 __ BranchIf(UNSIGNED_LESS, &loop, Assembler::kNearJump);
1423 __ Bind(&done);
1424 }
1425
1426 __ WriteAllocationCanary(new_top_reg); // Fix overshoot.
1427 __ Ret();
1428
1429 __ Bind(&slow_case);
1430 }
1431
1432 __ EnterStubFrame();
1433 __ PushObject(NullObject()); // Space on the stack for the return value.
1434 __ PushRegister(shape_reg);
1435 __ CallRuntime(kAllocateRecordRuntimeEntry, 1);
1436 __ Drop(1);
1437 __ PopRegister(AllocateRecordABI::kResultReg);
1438
1440 __ LeaveStubFrame();
1441 __ Ret();
1442}
1443
1444void StubCodeCompiler::GenerateAllocateSmallRecordStub(intptr_t num_fields,
1445 bool has_named_fields) {
1446 ASSERT(num_fields == 2 || num_fields == 3);
1453 Label slow_case;
1454
1455 if ((num_fields > 2) && (value2_reg == kNoRegister)) {
1456 // Not implemented.
1457 __ Breakpoint();
1458 return;
1459 }
1460
1461#if defined(DEBUG)
1462 // Need to account for the debug checks added by
1463 // StoreCompressedIntoObjectNoBarrier.
1464 const auto distance = Assembler::kFarJump;
1465#else
1466 const auto distance = Assembler::kNearJump;
1467#endif
1468 __ TryAllocateObject(kRecordCid, target::Record::InstanceSize(num_fields),
1469 &slow_case, distance, result_reg, temp_reg);
1470
1471 if (!has_named_fields) {
1472 __ LoadImmediate(
1473 shape_reg, Smi::RawValue(RecordShape::ForUnnamed(num_fields).AsInt()));
1474 }
1475 __ StoreCompressedIntoObjectNoBarrier(
1476 result_reg, FieldAddress(result_reg, target::Record::shape_offset()),
1477 shape_reg);
1478
1479 __ StoreCompressedIntoObjectNoBarrier(
1480 result_reg, FieldAddress(result_reg, target::Record::field_offset(0)),
1481 value0_reg);
1482
1483 __ StoreCompressedIntoObjectNoBarrier(
1484 result_reg, FieldAddress(result_reg, target::Record::field_offset(1)),
1485 value1_reg);
1486
1487 if (num_fields > 2) {
1488 __ StoreCompressedIntoObjectNoBarrier(
1489 result_reg, FieldAddress(result_reg, target::Record::field_offset(2)),
1490 value2_reg);
1491 }
1492
1493 __ Ret();
1494
1495 __ Bind(&slow_case);
1496
1497 __ EnterStubFrame();
1498 __ PushObject(NullObject()); // Space on the stack for the return value.
1499 if (has_named_fields) {
1500 __ PushRegister(shape_reg);
1501 } else {
1502 __ PushImmediate(
1503 Smi::RawValue(RecordShape::ForUnnamed(num_fields).AsInt()));
1504 }
1505 __ PushRegistersInOrder({value0_reg, value1_reg});
1506 if (num_fields > 2) {
1507 __ PushRegister(value2_reg);
1508 } else {
1509 __ PushObject(NullObject());
1510 }
1511 __ CallRuntime(kAllocateSmallRecordRuntimeEntry, 4);
1512 __ Drop(4);
1513 __ PopRegister(result_reg);
1514
1516 __ LeaveStubFrame();
1517 __ Ret();
1518}
1519
1520void StubCodeCompiler::GenerateAllocateRecord2Stub() {
1521 GenerateAllocateSmallRecordStub(2, /*has_named_fields=*/false);
1522}
1523
1524void StubCodeCompiler::GenerateAllocateRecord2NamedStub() {
1525 GenerateAllocateSmallRecordStub(2, /*has_named_fields=*/true);
1526}
1527
1528void StubCodeCompiler::GenerateAllocateRecord3Stub() {
1529 GenerateAllocateSmallRecordStub(3, /*has_named_fields=*/false);
1530}
1531
1532void StubCodeCompiler::GenerateAllocateRecord3NamedStub() {
1533 GenerateAllocateSmallRecordStub(3, /*has_named_fields=*/true);
1534}
1535
1536// The UnhandledException class lives in the VM isolate, so it cannot cache
1537// an allocation stub for itself. Instead, we cache it in the stub code list.
1538void StubCodeCompiler::GenerateAllocateUnhandledExceptionStub() {
1539 Thread* thread = Thread::Current();
1540 auto class_table = thread->isolate_group()->class_table();
1541 ASSERT(class_table->HasValidClassAt(kUnhandledExceptionCid));
1542 const auto& cls = Class::ZoneHandle(thread->zone(),
1543 class_table->At(kUnhandledExceptionCid));
1544 ASSERT(!cls.IsNull());
1545
1548}
1549
1550#define TYPED_DATA_ALLOCATION_STUB(clazz) \
1551 void StubCodeCompiler::GenerateAllocate##clazz##Stub() { \
1552 GenerateAllocateTypedDataArrayStub(kTypedData##clazz##Cid); \
1553 }
1555#undef TYPED_DATA_ALLOCATION_STUB
1556
1557void StubCodeCompiler::GenerateLateInitializationError(bool with_fpu_regs) {
1558 auto perform_runtime_call = [&]() {
1560 __ CallRuntime(kLateFieldNotInitializedErrorRuntimeEntry,
1561 /*argument_count=*/1);
1562 };
1563 GenerateSharedStubGeneric(
1564 /*save_fpu_registers=*/with_fpu_regs,
1565 with_fpu_regs
1566 ? target::Thread::
1567 late_initialization_error_shared_with_fpu_regs_stub_offset()
1568 : target::Thread::
1569 late_initialization_error_shared_without_fpu_regs_stub_offset(),
1570 /*allow_return=*/false, perform_runtime_call);
1571}
1572
1573void StubCodeCompiler::
1574 GenerateLateInitializationErrorSharedWithoutFPURegsStub() {
1575 GenerateLateInitializationError(/*with_fpu_regs=*/false);
1576}
1577
1578void StubCodeCompiler::GenerateLateInitializationErrorSharedWithFPURegsStub() {
1579 GenerateLateInitializationError(/*with_fpu_regs=*/true);
1580}
1581
1582void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub() {
1583 GenerateSharedStub(
1584 /*save_fpu_registers=*/false, &kNullErrorRuntimeEntry,
1586 /*allow_return=*/false);
1587}
1588
1589void StubCodeCompiler::GenerateNullErrorSharedWithFPURegsStub() {
1590 GenerateSharedStub(
1591 /*save_fpu_registers=*/true, &kNullErrorRuntimeEntry,
1593 /*allow_return=*/false);
1594}
1595
1596void StubCodeCompiler::GenerateNullArgErrorSharedWithoutFPURegsStub() {
1597 GenerateSharedStub(
1598 /*save_fpu_registers=*/false, &kArgumentNullErrorRuntimeEntry,
1600 /*allow_return=*/false);
1601}
1602
1603void StubCodeCompiler::GenerateNullArgErrorSharedWithFPURegsStub() {
1604 GenerateSharedStub(
1605 /*save_fpu_registers=*/true, &kArgumentNullErrorRuntimeEntry,
1607 /*allow_return=*/false);
1608}
1609
1610void StubCodeCompiler::GenerateNullCastErrorSharedWithoutFPURegsStub() {
1611 GenerateSharedStub(
1612 /*save_fpu_registers=*/false, &kNullCastErrorRuntimeEntry,
1614 /*allow_return=*/false);
1615}
1616
1617void StubCodeCompiler::GenerateNullCastErrorSharedWithFPURegsStub() {
1618 GenerateSharedStub(
1619 /*save_fpu_registers=*/true, &kNullCastErrorRuntimeEntry,
1621 /*allow_return=*/false);
1622}
1623
1624void StubCodeCompiler::GenerateStackOverflowSharedWithoutFPURegsStub() {
1625 GenerateSharedStub(
1626 /*save_fpu_registers=*/false, &kInterruptOrStackOverflowRuntimeEntry,
1628 /*allow_return=*/true);
1629}
1630
1631void StubCodeCompiler::GenerateStackOverflowSharedWithFPURegsStub() {
1632 GenerateSharedStub(
1633 /*save_fpu_registers=*/true, &kInterruptOrStackOverflowRuntimeEntry,
1635 /*allow_return=*/true);
1636}
1637
1638void StubCodeCompiler::GenerateRangeErrorSharedWithoutFPURegsStub() {
1639 GenerateRangeError(/*with_fpu_regs=*/false);
1640}
1641
1642void StubCodeCompiler::GenerateRangeErrorSharedWithFPURegsStub() {
1643 GenerateRangeError(/*with_fpu_regs=*/true);
1644}
1645
1646void StubCodeCompiler::GenerateWriteErrorSharedWithoutFPURegsStub() {
1647 GenerateWriteError(/*with_fpu_regs=*/false);
1648}
1649
1650void StubCodeCompiler::GenerateWriteErrorSharedWithFPURegsStub() {
1651 GenerateWriteError(/*with_fpu_regs=*/true);
1652}
1653
1654void StubCodeCompiler::GenerateFrameAwaitingMaterializationStub() {
1655 __ Breakpoint(); // Marker stub.
1656}
1657
1658void StubCodeCompiler::GenerateAsynchronousGapMarkerStub() {
1659 __ Breakpoint(); // Marker stub.
1660}
1661
1662void StubCodeCompiler::GenerateUnknownDartCodeStub() {
1663 // Enter frame to include caller into the backtrace.
1664 __ EnterStubFrame();
1665 __ Breakpoint(); // Marker stub.
1666}
1667
1668void StubCodeCompiler::GenerateNotLoadedStub() {
1669 __ EnterStubFrame();
1670 __ CallRuntime(kNotLoadedRuntimeEntry, 0);
1671 __ Breakpoint();
1672}
1673
1674#define EMIT_BOX_ALLOCATION(Name) \
1675 void StubCodeCompiler::GenerateAllocate##Name##Stub() { \
1676 Label call_runtime; \
1677 if (!FLAG_use_slow_path && FLAG_inline_alloc) { \
1678 __ TryAllocate(compiler::Name##Class(), &call_runtime, \
1679 Assembler::kNearJump, AllocateBoxABI::kResultReg, \
1680 AllocateBoxABI::kTempReg); \
1681 __ Ret(); \
1682 } \
1683 __ Bind(&call_runtime); \
1684 __ EnterStubFrame(); \
1685 __ PushObject(NullObject()); /* Make room for result. */ \
1686 __ CallRuntime(kAllocate##Name##RuntimeEntry, 0); \
1687 __ PopRegister(AllocateBoxABI::kResultReg); \
1688 __ LeaveStubFrame(); \
1689 __ Ret(); \
1690 }
1691
1693EMIT_BOX_ALLOCATION(Double)
1694EMIT_BOX_ALLOCATION(Float32x4)
1695EMIT_BOX_ALLOCATION(Float64x2)
1696EMIT_BOX_ALLOCATION(Int32x4)
1697
1698#undef EMIT_BOX_ALLOCATION
1699
1700static void GenerateBoxFpuValueStub(Assembler* assembler,
1701 const dart::Class& cls,
1702 const RuntimeEntry& runtime_entry,
1703 void (Assembler::* store_value)(FpuRegister,
1704 Register,
1705 int32_t)) {
1706 Label call_runtime;
1707 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
1708 __ TryAllocate(cls, &call_runtime, compiler::Assembler::kFarJump,
1710 (assembler->*store_value)(
1713 __ Ret();
1714 }
1715 __ Bind(&call_runtime);
1716 __ EnterStubFrame();
1717 __ PushObject(NullObject()); /* Make room for result. */
1718 (assembler->*store_value)(BoxDoubleStubABI::kValueReg, THR,
1720 __ CallRuntime(runtime_entry, 0);
1721 __ PopRegister(BoxDoubleStubABI::kResultReg);
1722 __ LeaveStubFrame();
1723 __ Ret();
1724}
1725
1726void StubCodeCompiler::GenerateBoxDoubleStub() {
1728 kBoxDoubleRuntimeEntry,
1730}
1731
1732void StubCodeCompiler::GenerateBoxFloat32x4Stub() {
1733#if !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
1735 kBoxFloat32x4RuntimeEntry,
1737#else
1738 __ Stop("Not supported on RISC-V.");
1739#endif
1740}
1741
1742void StubCodeCompiler::GenerateBoxFloat64x2Stub() {
1743#if !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
1745 kBoxFloat64x2RuntimeEntry,
1747#else
1748 __ Stop("Not supported on RISC-V.");
1749#endif
1750}
1751
1752void StubCodeCompiler::GenerateDoubleToIntegerStub() {
1753 __ EnterStubFrame();
1754 __ StoreUnboxedDouble(DoubleToIntegerStubABI::kInputReg, THR,
1756 __ PushObject(NullObject()); /* Make room for result. */
1758 __ CallRuntime(kDoubleToIntegerRuntimeEntry, 1);
1759 __ Drop(1);
1761 __ LeaveStubFrame();
1762 __ Ret();
1763}
1764
1765static intptr_t SuspendStateFpOffset() {
1769}
1770
1772 Assembler* assembler,
1773 intptr_t entry_point_offset_in_thread,
1774 intptr_t function_offset_in_object_store,
1775 bool uses_args_desc = false) {
1776 if (FLAG_precompiled_mode) {
1777 __ Call(Address(THR, entry_point_offset_in_thread));
1778 } else {
1779 __ LoadIsolateGroup(FUNCTION_REG);
1780 __ LoadFromOffset(FUNCTION_REG, FUNCTION_REG,
1782 __ LoadFromOffset(FUNCTION_REG, FUNCTION_REG,
1783 function_offset_in_object_store);
1784 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
1786 if (!uses_args_desc) {
1787 // Load a GC-safe value for the arguments descriptor (unused but tagged).
1788 __ LoadImmediate(ARGS_DESC_REG, 0);
1789 }
1791 }
1792}
1793
1794// Helper to generate allocation of _SuspendState instance.
1795// Initializes tags, frame_capacity and frame_size.
1796// Other fields are not initialized.
1797//
1798// Input:
1799// frame_size_reg: size of the frame payload in bytes.
1800// Output:
1801// result_reg: allocated instance.
1802// Clobbers:
1803// result_reg, temp_reg.
1805 Label* slow_case,
1806 Register result_reg,
1807 Register frame_size_reg,
1808 Register temp_reg) {
1809 if (FLAG_use_slow_path || !FLAG_inline_alloc) {
1810 __ Jump(slow_case);
1811 return;
1812 }
1813
1814 // Check for allocation tracing.
1816 __ MaybeTraceAllocation(kSuspendStateCid, slow_case, temp_reg));
1817
1818 // Compute the rounded instance size.
1819 const intptr_t fixed_size_plus_alignment_padding =
1823 __ AddImmediate(temp_reg, frame_size_reg, fixed_size_plus_alignment_padding);
1824 __ AndImmediate(temp_reg, -target::ObjectAlignment::kObjectAlignment);
1825
1826 // Now allocate the object.
1827 __ LoadFromOffset(result_reg, THR, target::Thread::top_offset());
1828 __ AddRegisters(temp_reg, result_reg);
1829 // Check if the allocation fits into the remaining space.
1830 __ CompareWithMemoryValue(temp_reg,
1832 __ BranchIf(UNSIGNED_GREATER_EQUAL, slow_case);
1833 __ CheckAllocationCanary(result_reg);
1834
1835 // Successfully allocated the object, now update top to point to
1836 // next object start and initialize the object.
1837 __ StoreToOffset(temp_reg, THR, target::Thread::top_offset());
1838 __ SubRegisters(temp_reg, result_reg);
1839 __ AddImmediate(result_reg, kHeapObjectTag);
1840
1841 if (!FLAG_precompiled_mode) {
1842 // Use rounded object size to calculate and save frame capacity.
1843 __ AddImmediate(temp_reg, temp_reg,
1845 __ StoreFieldToOffset(temp_reg, result_reg,
1847 // Restore rounded object size.
1848 __ AddImmediate(temp_reg, temp_reg, target::SuspendState::payload_offset());
1849 }
1850
1851 // Calculate the size tag.
1852 {
1853 Label size_tag_overflow, done;
1854 __ CompareImmediate(temp_reg, target::UntaggedObject::kSizeTagMaxSizeTag);
1855 __ BranchIf(UNSIGNED_GREATER, &size_tag_overflow, Assembler::kNearJump);
1856 __ LslImmediate(temp_reg,
1860
1861 __ Bind(&size_tag_overflow);
1862 // Set overflow size tag value.
1863 __ LoadImmediate(temp_reg, 0);
1864
1865 __ Bind(&done);
1866 uword tags = target::MakeTagWordForNewSpaceObject(kSuspendStateCid, 0);
1867 __ OrImmediate(temp_reg, tags);
1868 __ StoreFieldToOffset(temp_reg, result_reg,
1869 target::Object::tags_offset()); // Tags.
1870 }
1871
1872 __ StoreFieldToOffset(frame_size_reg, result_reg,
1874}
1875
1876void StubCodeCompiler::GenerateSuspendStub(
1877 bool call_suspend_function,
1878 bool pass_type_arguments,
1879 intptr_t suspend_entry_point_offset_in_thread,
1880 intptr_t suspend_function_offset_in_object_store) {
1881 const Register kArgument = SuspendStubABI::kArgumentReg;
1882 const Register kTypeArgs = SuspendStubABI::kTypeArgsReg;
1883 const Register kTemp = SuspendStubABI::kTempReg;
1884 const Register kFrameSize = SuspendStubABI::kFrameSizeReg;
1885 const Register kSuspendState = SuspendStubABI::kSuspendStateReg;
1886 const Register kFunctionData = SuspendStubABI::kFunctionDataReg;
1887 const Register kSrcFrame = SuspendStubABI::kSrcFrameReg;
1888 const Register kDstFrame = SuspendStubABI::kDstFrameReg;
1889 Label alloc_slow_case, alloc_done, init_done, resize_suspend_state,
1890 remember_object, call_dart;
1891
1892#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
1893 SPILLS_LR_TO_FRAME({}); // Simulate entering the caller (Dart) frame.
1894#endif
1895
1896 __ LoadFromOffset(kSuspendState, FPREG, SuspendStateFpOffset());
1897
1898 __ AddImmediate(
1899 kFrameSize, FPREG,
1900 -target::frame_layout.last_param_from_entry_sp * target::kWordSize);
1901 __ SubRegisters(kFrameSize, SPREG);
1902
1903 __ EnterStubFrame();
1904
1905 if (pass_type_arguments) {
1906 __ PushRegister(kTypeArgs);
1907 }
1908
1909 __ CompareClassId(kSuspendState, kSuspendStateCid, kTemp);
1910
1911 if (FLAG_precompiled_mode) {
1912 __ BranchIf(EQUAL, &init_done);
1913 } else {
1914 Label alloc_suspend_state;
1915 __ BranchIf(NOT_EQUAL, &alloc_suspend_state);
1916
1917 __ CompareWithMemoryValue(
1918 kFrameSize,
1919 FieldAddress(kSuspendState,
1921 __ BranchIf(UNSIGNED_GREATER, &resize_suspend_state);
1922
1923 __ StoreFieldToOffset(kFrameSize, kSuspendState,
1925 __ Jump(&init_done);
1926
1927 __ Bind(&alloc_suspend_state);
1928 }
1929
1930 __ Comment("Allocate SuspendState");
1931 __ MoveRegister(kFunctionData, kSuspendState);
1932
1933 GenerateAllocateSuspendState(assembler, &alloc_slow_case, kSuspendState,
1934 kFrameSize, kTemp);
1935
1936 __ StoreCompressedIntoObjectNoBarrier(
1937 kSuspendState,
1938 FieldAddress(kSuspendState, target::SuspendState::function_data_offset()),
1939 kFunctionData);
1940
1941 {
1942#if defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV32) || \
1943 defined(TARGET_ARCH_RISCV64)
1944 const Register kNullReg = NULL_REG;
1945#else
1946 const Register kNullReg = kTemp;
1947 __ LoadObject(kNullReg, NullObject());
1948#endif
1949 __ StoreCompressedIntoObjectNoBarrier(
1950 kSuspendState,
1951 FieldAddress(kSuspendState,
1953 kNullReg);
1954 __ StoreCompressedIntoObjectNoBarrier(
1955 kSuspendState,
1956 FieldAddress(kSuspendState,
1958 kNullReg);
1959 }
1960
1961 __ Bind(&alloc_done);
1962
1963 __ Comment("Save SuspendState to frame");
1964 __ LoadFromOffset(kTemp, FPREG, kSavedCallerFpSlotFromFp * target::kWordSize);
1965 __ StoreToOffset(kSuspendState, kTemp, SuspendStateFpOffset());
1966
1967 __ Bind(&init_done);
1968 __ Comment("Copy frame to SuspendState");
1969
1970#ifdef DEBUG
1971 {
1972 // Verify that SuspendState.frame_size == kFrameSize.
1973 Label okay;
1974 __ LoadFieldFromOffset(kTemp, kSuspendState,
1976 __ CompareRegisters(kTemp, kFrameSize);
1977 __ BranchIf(EQUAL, &okay);
1978 __ Breakpoint();
1979 __ Bind(&okay);
1980 }
1981#endif
1982
1983 if (kSrcFrame == THR) {
1984 __ PushRegister(THR);
1985 }
1986 __ AddImmediate(kSrcFrame, FPREG, kCallerSpSlotFromFp * target::kWordSize);
1987 __ AddImmediate(kDstFrame, kSuspendState,
1989 __ CopyMemoryWords(kSrcFrame, kDstFrame, kFrameSize, kTemp);
1990 if (kSrcFrame == THR) {
1991 __ PopRegister(THR);
1992 }
1993
1994 __ LoadFromOffset(kTemp, FPREG, kSavedCallerPcSlotFromFp * target::kWordSize);
1995 __ StoreFieldToOffset(kTemp, kSuspendState,
1997
1998#ifdef DEBUG
1999 {
2000 // Verify that kSuspendState matches :suspend_state in the copied stack
2001 // frame.
2002 Label okay;
2003 __ LoadFieldFromOffset(kTemp, kSuspendState,
2005 __ AddRegisters(kTemp, kSuspendState);
2006 __ LoadFieldFromOffset(
2007 kTemp, kTemp,
2009 __ CompareRegisters(kTemp, kSuspendState);
2010 __ BranchIf(EQUAL, &okay);
2011 __ Breakpoint();
2012 __ Bind(&okay);
2013 }
2014#endif
2015
2016 if (call_suspend_function) {
2017 // Push arguments for suspend Dart function early to preserve them
2018 // across write barrier.
2019 __ PushRegistersInOrder({kSuspendState, kArgument});
2020 }
2021
2022 // Write barrier.
2023 __ AndImmediate(kTemp, kSuspendState, target::kPageMask);
2024 __ LoadFromOffset(kTemp, kTemp, target::Page::original_top_offset());
2025 __ CompareRegisters(kSuspendState, kTemp);
2026 __ BranchIf(UNSIGNED_LESS, &remember_object);
2027 // Assumption: SuspendStates are always on non-image pages.
2028 // TODO(rmacnak): Also check original_end if we bound TLABs to smaller than a
2029 // heap page.
2030
2031 __ Bind(&call_dart);
2032 if (call_suspend_function) {
2033 __ Comment("Call suspend Dart function");
2034 if (pass_type_arguments) {
2035 __ LoadObject(ARGS_DESC_REG,
2036 ArgumentsDescriptorBoxed(/*type_args_len=*/1,
2037 /*num_arguments=*/2));
2038 }
2039 CallDartCoreLibraryFunction(assembler, suspend_entry_point_offset_in_thread,
2040 suspend_function_offset_in_object_store,
2041 /*uses_args_desc=*/pass_type_arguments);
2042 } else {
2043 // SuspendStub returns either the result of Dart callback,
2044 // or SuspendStub argument (if Dart callback is not used).
2045 // The latter is used by yield/yield* in sync* functions
2046 // to indicate that iteration should be continued.
2047 __ MoveRegister(CallingConventions::kReturnReg, kArgument);
2048 }
2049
2050 __ LeaveStubFrame();
2051
2052#if !defined(TARGET_ARCH_X64) && !defined(TARGET_ARCH_IA32)
2053 // Drop caller frame on all architectures except x86 (X64/IA32) which
2054 // needs to maintain call/return balance to avoid performance regressions.
2055 __ LeaveDartFrame();
2056#elif defined(TARGET_ARCH_X64)
2057 // Restore PP in JIT mode on x64 as epilogue following SuspendStub call
2058 // will only unwind frame and return.
2059 if (!FLAG_precompiled_mode) {
2060 __ LoadFromOffset(
2061 PP, FPREG,
2062 target::frame_layout.saved_caller_pp_from_fp * target::kWordSize);
2063 }
2064#endif
2065 __ Ret();
2066
2067#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
2068 // Slow path is executed with Dart and stub frames still on the stack.
2069 SPILLS_LR_TO_FRAME({});
2070 SPILLS_LR_TO_FRAME({});
2071#endif
2072 __ Bind(&alloc_slow_case);
2073 __ Comment("SuspendState Allocation slow case");
2074 // Save argument and frame size.
2075 __ PushRegistersInOrder({kArgument, kFrameSize});
2076 __ PushObject(NullObject()); // Make space on stack for the return value.
2077 __ SmiTag(kFrameSize);
2078 // Pass frame size and function data to runtime entry.
2079 __ PushRegistersInOrder({kFrameSize, kFunctionData});
2080 __ CallRuntime(kAllocateSuspendStateRuntimeEntry, 2);
2081 __ Drop(2); // Drop arguments
2082 __ PopRegister(kSuspendState); // Get result.
2083 __ PopRegister(kFrameSize); // Restore frame size.
2084 __ PopRegister(kArgument); // Restore argument.
2085 __ Jump(&alloc_done);
2086
2087 __ Bind(&resize_suspend_state);
2088 __ Comment("Resize SuspendState");
2089 // Save argument and frame size.
2090 __ PushRegistersInOrder({kArgument, kFrameSize});
2091 __ PushObject(NullObject()); // Make space on stack for the return value.
2092 __ SmiTag(kFrameSize);
2093 // Pass frame size and old suspend state to runtime entry.
2094 __ PushRegistersInOrder({kFrameSize, kSuspendState});
2095 // It's okay to call runtime for resizing SuspendState objects
2096 // as it can only happen in the unoptimized code if expression
2097 // stack grows between suspends, or once after OSR transition.
2098 __ CallRuntime(kAllocateSuspendStateRuntimeEntry, 2);
2099 __ Drop(2); // Drop arguments
2100 __ PopRegister(kSuspendState); // Get result.
2101 __ PopRegister(kFrameSize); // Restore frame size.
2102 __ PopRegister(kArgument); // Restore argument.
2103 __ Jump(&alloc_done);
2104
2105 __ Bind(&remember_object);
2106 __ Comment("Old gen SuspendState slow case");
2107 if (!call_suspend_function) {
2108 // Save kArgument which contains the return value
2109 // if suspend function is not called.
2110 __ PushRegister(kArgument);
2111 }
2112 {
2113#if defined(TARGET_ARCH_IA32)
2114 LeafRuntimeScope rt(assembler, /*frame_size=*/2 * target::kWordSize,
2115 /*preserve_registers=*/false);
2116 __ movl(Address(ESP, 1 * target::kWordSize), THR);
2117 __ movl(Address(ESP, 0 * target::kWordSize), kSuspendState);
2118#else
2119 LeafRuntimeScope rt(assembler, /*frame_size=*/0,
2120 /*preserve_registers=*/false);
2121 __ MoveRegister(CallingConventions::ArgumentRegisters[0], kSuspendState);
2123#endif
2124 rt.Call(kEnsureRememberedAndMarkingDeferredRuntimeEntry, 2);
2125 }
2126 if (!call_suspend_function) {
2127 __ PopRegister(kArgument);
2128 }
2129 __ Jump(&call_dart);
2130}
2131
2132void StubCodeCompiler::GenerateAwaitStub() {
2133 GenerateSuspendStub(
2134 /*call_suspend_function=*/true,
2135 /*pass_type_arguments=*/false,
2138}
2139
2140void StubCodeCompiler::GenerateAwaitWithTypeCheckStub() {
2141 GenerateSuspendStub(
2142
2143 /*call_suspend_function=*/true,
2144 /*pass_type_arguments=*/true,
2147}
2148
2149void StubCodeCompiler::GenerateYieldAsyncStarStub() {
2150 GenerateSuspendStub(
2151
2152 /*call_suspend_function=*/true,
2153 /*pass_type_arguments=*/false,
2156}
2157
2158void StubCodeCompiler::GenerateSuspendSyncStarAtStartStub() {
2159 GenerateSuspendStub(
2160
2161 /*call_suspend_function=*/true,
2162 /*pass_type_arguments=*/false,
2163 target::Thread::
2164 suspend_state_suspend_sync_star_at_start_entry_point_offset(),
2166}
2167
2168void StubCodeCompiler::GenerateSuspendSyncStarAtYieldStub() {
2169 GenerateSuspendStub(
2170 /*call_suspend_function=*/false,
2171 /*pass_type_arguments=*/false, -1, -1);
2172}
2173
2174void StubCodeCompiler::GenerateInitSuspendableFunctionStub(
2175 intptr_t init_entry_point_offset_in_thread,
2176 intptr_t init_function_offset_in_object_store) {
2178
2179 __ EnterStubFrame();
2180 __ LoadObject(ARGS_DESC_REG, ArgumentsDescriptorBoxed(/*type_args_len=*/1,
2181 /*num_arguments=*/0));
2182 __ PushRegister(kTypeArgs);
2183 CallDartCoreLibraryFunction(assembler, init_entry_point_offset_in_thread,
2184 init_function_offset_in_object_store,
2185 /*uses_args_desc=*/true);
2186 __ LeaveStubFrame();
2187
2188 // Set :suspend_state in the caller frame.
2191 __ Ret();
2192}
2193
2194void StubCodeCompiler::GenerateInitAsyncStub() {
2195 GenerateInitSuspendableFunctionStub(
2198}
2199
2200void StubCodeCompiler::GenerateInitAsyncStarStub() {
2201 GenerateInitSuspendableFunctionStub(
2204}
2205
2206void StubCodeCompiler::GenerateInitSyncStarStub() {
2207 GenerateInitSuspendableFunctionStub(
2210}
2211
2212void StubCodeCompiler::GenerateResumeStub() {
2213 const Register kSuspendState = ResumeStubABI::kSuspendStateReg;
2214 const Register kTemp = ResumeStubABI::kTempReg;
2215 const Register kFrameSize = ResumeStubABI::kFrameSizeReg;
2216 const Register kSrcFrame = ResumeStubABI::kSrcFrameReg;
2217 const Register kDstFrame = ResumeStubABI::kDstFrameReg;
2218 const Register kResumePc = ResumeStubABI::kResumePcReg;
2219 const Register kException = ResumeStubABI::kExceptionReg;
2220 const Register kStackTrace = ResumeStubABI::kStackTraceReg;
2221 Label call_runtime;
2222
2223 // Top of the stack on entry:
2224 // ... [SuspendState] [value] [exception] [stackTrace] [ReturnAddress]
2225
2226 __ EnterDartFrame(0);
2227
2228 const intptr_t param_offset =
2230 __ LoadFromOffset(kSuspendState, FPREG, param_offset + 4 * target::kWordSize);
2231#ifdef DEBUG
2232 {
2233 Label okay;
2234 __ CompareClassId(kSuspendState, kSuspendStateCid, kTemp);
2235 __ BranchIf(EQUAL, &okay);
2236 __ Breakpoint();
2237 __ Bind(&okay);
2238 }
2239 {
2240 Label okay;
2241 __ LoadFieldFromOffset(kTemp, kSuspendState,
2243 __ CompareImmediate(kTemp, 0);
2244 __ BranchIf(NOT_EQUAL, &okay);
2245 __ Breakpoint();
2246 __ Bind(&okay);
2247 }
2248#endif
2249
2250 __ LoadFieldFromOffset(kFrameSize, kSuspendState,
2252#ifdef DEBUG
2253 {
2254 Label okay;
2255 __ MoveRegister(kTemp, kFrameSize);
2256 __ AddRegisters(kTemp, kSuspendState);
2257 __ LoadFieldFromOffset(
2258 kTemp, kTemp,
2260 __ CompareRegisters(kTemp, kSuspendState);
2261 __ BranchIf(EQUAL, &okay);
2262 __ Breakpoint();
2263 __ Bind(&okay);
2264 }
2265#endif
2266 if (!FLAG_precompiled_mode) {
2267 // Copy Code object (part of the fixed frame which is not copied below)
2268 // and restore pool pointer.
2269 __ MoveRegister(kTemp, kSuspendState);
2270 __ AddRegisters(kTemp, kFrameSize);
2271 __ LoadFromOffset(
2272 CODE_REG, kTemp,
2275 __ StoreToOffset(CODE_REG, FPREG,
2277#if !defined(TARGET_ARCH_IA32)
2278 __ LoadPoolPointer(PP);
2279#endif
2280 }
2281 // Do not copy fixed frame between the first local and FP.
2282 __ AddImmediate(kFrameSize, (target::frame_layout.first_local_from_fp + 1) *
2284 __ SubRegisters(SPREG, kFrameSize);
2285
2286 __ Comment("Copy frame from SuspendState");
2287 intptr_t num_saved_regs = 0;
2288 if (kSrcFrame == THR) {
2289 __ PushRegister(THR);
2290 ++num_saved_regs;
2291 }
2292 if (kDstFrame == CODE_REG) {
2293 __ PushRegister(CODE_REG);
2294 ++num_saved_regs;
2295 }
2296 __ AddImmediate(kSrcFrame, kSuspendState,
2298 __ AddImmediate(kDstFrame, SPREG, num_saved_regs * target::kWordSize);
2299 __ CopyMemoryWords(kSrcFrame, kDstFrame, kFrameSize, kTemp);
2300 if (kDstFrame == CODE_REG) {
2301 __ PopRegister(CODE_REG);
2302 }
2303 if (kSrcFrame == THR) {
2304 __ PopRegister(THR);
2305 }
2306
2307 __ Comment("Transfer control");
2308
2309 __ LoadFieldFromOffset(kResumePc, kSuspendState,
2311 __ StoreZero(FieldAddress(kSuspendState, target::SuspendState::pc_offset()),
2312 kTemp);
2313
2314#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32)
2315 // Adjust resume PC to skip extra epilogue generated on x86
2316 // right after the call to suspend stub in order to maintain
2317 // call/return balance.
2318 __ AddImmediate(kResumePc, SuspendStubABI::kResumePcDistance);
2319#endif
2320
2321 static_assert((kException != CODE_REG) && (kException != PP),
2322 "should not interfere");
2323 __ LoadFromOffset(kException, FPREG, param_offset + 2 * target::kWordSize);
2324 __ CompareObject(kException, NullObject());
2325 __ BranchIf(NOT_EQUAL, &call_runtime);
2326
2327 if (!FLAG_precompiled_mode) {
2328 // Check if Code is disabled.
2329 __ LoadFieldFromOffset(kTemp, CODE_REG,
2331 __ CompareWithMemoryValue(
2332 kTemp,
2334 __ BranchIf(NOT_EQUAL, &call_runtime);
2335
2336#if !defined(PRODUCT)
2337 // Check if there is a breakpoint at resumption.
2338 __ LoadIsolate(kTemp);
2339 __ LoadFromOffset(kTemp, kTemp,
2342 __ CompareImmediate(kTemp, 0);
2343 __ BranchIf(NOT_EQUAL, &call_runtime);
2344#endif
2345 }
2346
2347 __ LoadFromOffset(CallingConventions::kReturnReg, FPREG,
2348 param_offset + 3 * target::kWordSize);
2349
2350 __ Jump(kResumePc);
2351
2352 __ Comment("Call runtime to throw exception or deopt");
2353 __ Bind(&call_runtime);
2354
2355 __ LoadFromOffset(kStackTrace, FPREG, param_offset + 1 * target::kWordSize);
2356 static_assert((kStackTrace != CODE_REG) && (kStackTrace != PP),
2357 "should not interfere");
2358
2359 // Set return address as if suspended Dart function called
2360 // stub with kResumePc as a return address.
2361 __ SetReturnAddress(kResumePc);
2362
2363 if (!FLAG_precompiled_mode) {
2365 }
2366#if !defined(TARGET_ARCH_IA32)
2367 __ set_constant_pool_allowed(false);
2368#endif
2369 __ EnterStubFrame();
2370 __ PushObject(NullObject()); // Make room for (unused) result.
2371 __ PushRegistersInOrder({kException, kStackTrace});
2372 __ CallRuntime(kResumeFrameRuntimeEntry, /*argument_count=*/2);
2373
2374 if (FLAG_precompiled_mode) {
2375 __ Breakpoint();
2376 } else {
2377 __ LeaveStubFrame();
2378 __ LoadFromOffset(CallingConventions::kReturnReg, FPREG,
2379 param_offset + 3 * target::kWordSize);
2380 // Lazy deoptimize.
2381 __ Ret();
2382 }
2383}
2384
2385void StubCodeCompiler::GenerateReturnStub(
2386 intptr_t return_entry_point_offset_in_thread,
2387 intptr_t return_function_offset_in_object_store,
2388 intptr_t return_stub_offset_in_thread) {
2389 const Register kSuspendState = ReturnStubABI::kSuspendStateReg;
2390
2391#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
2392 SPILLS_LR_TO_FRAME({}); // Simulate entering the caller (Dart) frame.
2393#endif
2394
2395 __ LoadFromOffset(kSuspendState, FPREG, SuspendStateFpOffset());
2396#ifdef DEBUG
2397 {
2398 Label okay;
2399 __ CompareObject(kSuspendState, NullObject());
2400 __ BranchIf(NOT_EQUAL, &okay);
2401 __ Breakpoint();
2402 __ Bind(&okay);
2403 }
2404#endif
2405 __ LeaveDartFrame();
2406 if (!FLAG_precompiled_mode) {
2407 __ LoadFromOffset(CODE_REG, THR, return_stub_offset_in_thread);
2408 }
2409 __ EnterStubFrame();
2410 __ PushRegistersInOrder({kSuspendState, CallingConventions::kReturnReg});
2411 CallDartCoreLibraryFunction(assembler, return_entry_point_offset_in_thread,
2412 return_function_offset_in_object_store);
2413 __ LeaveStubFrame();
2414 __ Ret();
2415}
2416
2417void StubCodeCompiler::GenerateReturnAsyncStub() {
2418 GenerateReturnStub(
2422}
2423
2424void StubCodeCompiler::GenerateReturnAsyncNotFutureStub() {
2425 GenerateReturnStub(
2426 target::Thread::
2427 suspend_state_return_async_not_future_entry_point_offset(),
2430}
2431
2432void StubCodeCompiler::GenerateReturnAsyncStarStub() {
2433 GenerateReturnStub(
2437}
2438
2439void StubCodeCompiler::GenerateAsyncExceptionHandlerStub() {
2441 ASSERT(kSuspendState != kExceptionObjectReg);
2442 ASSERT(kSuspendState != kStackTraceObjectReg);
2443 Label rethrow_exception;
2444
2445#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
2446 SPILLS_LR_TO_FRAME({}); // Simulate entering the caller (Dart) frame.
2447#endif
2448
2449 __ LoadFromOffset(kSuspendState, FPREG, SuspendStateFpOffset());
2450
2451 // Check if suspend_state is initialized. Otherwise
2452 // exception was thrown from the prologue code and
2453 // should be synchronously propagated.
2454 __ CompareObject(kSuspendState, NullObject());
2455 __ BranchIf(EQUAL, &rethrow_exception);
2456
2457 __ LeaveDartFrame();
2458 if (!FLAG_precompiled_mode) {
2459 __ LoadFromOffset(CODE_REG, THR,
2461 }
2462 __ EnterStubFrame();
2463 __ PushRegistersInOrder(
2464 {kSuspendState, kExceptionObjectReg, kStackTraceObjectReg});
2466 assembler,
2469 __ LeaveStubFrame();
2470 __ Ret();
2471
2472#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
2473 // Rethrow case is used when Dart frame is still on the stack.
2474 SPILLS_LR_TO_FRAME({});
2475#endif
2476 __ Comment("Rethrow exception");
2477 __ Bind(&rethrow_exception);
2478 __ LeaveDartFrame();
2479 if (!FLAG_precompiled_mode) {
2480 __ LoadFromOffset(CODE_REG, THR,
2482 }
2483 __ EnterStubFrame();
2484 __ PushObject(NullObject()); // Make room for (unused) result.
2485 __ PushRegistersInOrder({kExceptionObjectReg, kStackTraceObjectReg});
2486 __ PushImmediate(Smi::RawValue(0)); // Do not bypass debugger.
2487 __ CallRuntime(kReThrowRuntimeEntry, /*argument_count=*/3);
2488 __ Breakpoint();
2489}
2490
2491void StubCodeCompiler::GenerateCloneSuspendStateStub() {
2498 Label alloc_slow_case;
2499
2500#ifdef DEBUG
2501 {
2502 // Can only clone _SuspendState objects with copied frames.
2503 Label okay;
2504 __ LoadFieldFromOffset(kTemp, kSource, target::SuspendState::pc_offset());
2505 __ CompareImmediate(kTemp, 0);
2506 __ BranchIf(NOT_EQUAL, &okay);
2507 __ Breakpoint();
2508 __ Bind(&okay);
2509 }
2510#endif
2511
2512 __ LoadFieldFromOffset(kFrameSize, kSource,
2514
2515 GenerateAllocateSuspendState(assembler, &alloc_slow_case, kDestination,
2516 kFrameSize, kTemp);
2517
2518 // Copy pc.
2519 __ LoadFieldFromOffset(kTemp, kSource, target::SuspendState::pc_offset());
2520 __ StoreFieldToOffset(kTemp, kDestination, target::SuspendState::pc_offset());
2521
2522 // Copy function_data.
2523 __ LoadCompressedFieldFromOffset(
2525 __ StoreCompressedIntoObjectNoBarrier(
2526 kDestination,
2527 FieldAddress(kDestination, target::SuspendState::function_data_offset()),
2528 kTemp);
2529
2530 // Copy then_callback.
2531 __ LoadCompressedFieldFromOffset(
2533 __ StoreCompressedIntoObjectNoBarrier(
2534 kDestination,
2535 FieldAddress(kDestination, target::SuspendState::then_callback_offset()),
2536 kTemp);
2537
2538 // Copy error_callback.
2539 __ LoadCompressedFieldFromOffset(
2541 __ StoreCompressedIntoObjectNoBarrier(
2542 kDestination,
2543 FieldAddress(kDestination, target::SuspendState::error_callback_offset()),
2544 kTemp);
2545
2546 // Copy payload frame.
2547 if (kSrcFrame == THR) {
2548 __ PushRegister(THR);
2549 }
2551 __ AddImmediate(kSrcFrame, kSource, offset);
2552 __ AddImmediate(kDstFrame, kDestination, offset);
2553 __ CopyMemoryWords(kSrcFrame, kDstFrame, kFrameSize, kTemp);
2554 if (kSrcFrame == THR) {
2555 __ PopRegister(THR);
2556 }
2557
2558 // Update value of :suspend_state variable in the copied frame
2559 // for the new SuspendState.
2560 __ LoadFieldFromOffset(kTemp, kDestination,
2562 __ AddRegisters(kTemp, kDestination);
2563 __ StoreFieldToOffset(
2564 kDestination, kTemp,
2566
2567 __ MoveRegister(CallingConventions::kReturnReg, kDestination);
2569 __ Ret();
2570
2571 __ Bind(&alloc_slow_case);
2572 __ Comment("CloneSuspendState slow case");
2573 __ EnterStubFrame();
2574 __ PushObject(NullObject()); // Make space on stack for the return value.
2575 __ PushRegister(kSource);
2576 __ CallRuntime(kCloneSuspendStateRuntimeEntry, 1);
2577 __ Drop(1); // Drop argument
2578 __ PopRegister(CallingConventions::kReturnReg); // Get result.
2579 __ LeaveStubFrame();
2580 __ Ret();
2581}
2582
2583void StubCodeCompiler::GenerateFfiAsyncCallbackSendStub() {
2584 __ EnterStubFrame();
2585 __ PushObject(NullObject()); // Make space on stack for the return value.
2587 __ CallRuntime(kFfiAsyncCallbackSendRuntimeEntry, 1);
2588 __ Drop(1); // Drop argument.
2589 __ PopRegister(CallingConventions::kReturnReg); // Get result.
2590 __ LeaveStubFrame();
2591 __ Ret();
2592}
2593
2594void StubCodeCompiler::InsertBSSRelocation(BSS::Relocation reloc) {
2595 ASSERT(pc_descriptors_list_ != nullptr);
2596 const intptr_t pc_offset = assembler->InsertAlignedRelocation(reloc);
2597 pc_descriptors_list_->AddDescriptor(
2598 UntaggedPcDescriptors::kBSSRelocation, pc_offset,
2599 /*deopt_id=*/DeoptId::kNone,
2600 /*root_pos=*/TokenPosition::kNoSource,
2601 /*try_index=*/-1,
2603}
2604
2605#if !defined(TARGET_ARCH_IA32)
2607 int n,
2608 Register null_reg,
2609 Register cache_entry_reg,
2610 Register instance_cid_or_sig_reg,
2611 Register instance_type_args_reg,
2612 Register parent_fun_type_args_reg,
2613 Register delayed_type_args_reg,
2614 Label* found,
2615 Label* not_found,
2616 Label* next_iteration) {
2617 __ Comment("Loop");
2618 // LoadAcquireCompressed assumes the loaded value is a heap object and
2619 // extends it with the heap bits if compressed. However, the entry may be
2620 // a Smi.
2621 //
2622 // Instead, just use LoadAcquire to load the lower bits when compressed and
2623 // only compare the low bits of the loaded value using CompareObjectRegisters.
2624 __ LoadAcquireFromOffset(
2625 TypeTestABI::kScratchReg, cache_entry_reg,
2628 kObjectBytes);
2629 __ CompareObjectRegisters(TypeTestABI::kScratchReg, null_reg);
2630 __ BranchIf(EQUAL, not_found, Assembler::kNearJump);
2631 __ CompareObjectRegisters(TypeTestABI::kScratchReg, instance_cid_or_sig_reg);
2632 if (n == 1) {
2633 __ BranchIf(EQUAL, found, Assembler::kNearJump);
2634 return;
2635 }
2636
2637 __ BranchIf(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2638 __ CompareWithMemoryValue(
2639 instance_type_args_reg,
2640 Address(cache_entry_reg,
2643 kObjectBytes);
2644 if (n == 2) {
2645 __ BranchIf(EQUAL, found, Assembler::kNearJump);
2646 return;
2647 }
2648
2649 __ BranchIf(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2650 __ CompareWithMemoryValue(
2652 Address(cache_entry_reg,
2655 kObjectBytes);
2656 if (n == 3) {
2657 __ BranchIf(EQUAL, found, Assembler::kNearJump);
2658 return;
2659 }
2660
2661 __ BranchIf(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2662 __ CompareWithMemoryValue(
2664 Address(cache_entry_reg,
2667 kObjectBytes);
2668 if (n == 4) {
2669 __ BranchIf(EQUAL, found, Assembler::kNearJump);
2670 return;
2671 }
2672
2673 __ BranchIf(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2674 __ CompareWithMemoryValue(
2675 parent_fun_type_args_reg,
2676 Address(
2677 cache_entry_reg,
2680 kObjectBytes);
2681 if (n == 5) {
2682 __ BranchIf(EQUAL, found, Assembler::kNearJump);
2683 return;
2684 }
2685
2686 __ BranchIf(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2687 __ CompareWithMemoryValue(
2688 delayed_type_args_reg,
2689 Address(
2690 cache_entry_reg,
2693 kObjectBytes);
2694 if (n == 6) {
2695 __ BranchIf(EQUAL, found, Assembler::kNearJump);
2696 return;
2697 }
2698
2699 __ BranchIf(NOT_EQUAL, next_iteration, Assembler::kNearJump);
2700 __ CompareWithMemoryValue(
2702 Address(cache_entry_reg, target::kCompressedWordSize *
2704 kObjectBytes);
2705 __ BranchIf(EQUAL, found, Assembler::kNearJump);
2706}
2707
2708// An object that uses RAII to load from and store to the stack when
2709// appropriate, allowing the code within that scope to act as if the given
2710// register is always provided. Either the Register value stored at [reg] must
2711// be a valid register (not kNoRegister) or [depth] must be a valid stack depth
2712// (not StackRegisterScope::kNoDepth).
2713//
2714// When the Register value stored at [reg] is a valid register, this scope
2715// generates no assembly and does not change the value stored at [reg].
2716//
2717// When [depth] is a valid stack depth, this scope object performs the
2718// following actions:
2719//
2720// On construction:
2721// * Generates assembly to load the value on the stack at [depth] into [alt].
2722// * Sets the Register value pointed to by [reg] to [alt].
2723//
2724// On destruction:
2725// * Generates assembly to store the value of [alt] into the stack at [depth].
2726// * Resets the Register value pointed to by [reg] to kNoRegister.
2728 public:
2730 Register* reg,
2731 intptr_t depth,
2732 Register alt = TMP)
2733 : assembler(assembler), reg_(reg), depth_(depth), alt_(alt) {
2734 if (depth_ != kNoDepth) {
2735 ASSERT(depth_ >= 0);
2736 ASSERT(*reg_ == kNoRegister);
2737 ASSERT(alt_ != kNoRegister);
2738 __ LoadFromStack(alt_, depth_);
2739 *reg_ = alt_;
2740 } else {
2741 ASSERT(*reg_ != kNoRegister);
2742 }
2743 }
2744
2746 if (depth_ != kNoDepth) {
2747 __ StoreToStack(alt_, depth_);
2748 *reg_ = kNoRegister;
2749 }
2750 }
2751
2752 static constexpr intptr_t kNoDepth = kIntptrMin;
2753
2754 private:
2755 Assembler* const assembler;
2756 Register* const reg_;
2757 const intptr_t depth_;
2758 const Register alt_;
2759};
2760
2761// Same inputs as StubCodeCompiler::GenerateSubtypeTestCacheSearch with
2762// the following additional requirements:
2763// - catch_entry_reg: the address of the backing array for the cache.
2764// - TypeTestABI::kScratchReg: the Smi value of the length field for the
2765// backing array in cache_entry_reg
2766//
2767// Also expects that all the STC entry input registers have been filled.
2769 Assembler* assembler,
2770 int n,
2771 Register null_reg,
2772 Register cache_entry_reg,
2773 Register instance_cid_or_sig_reg,
2774 Register instance_type_args_reg,
2775 Register parent_fun_type_args_reg,
2776 Register delayed_type_args_reg,
2777 Register cache_entry_end_reg,
2778 Register cache_contents_size_reg,
2779 Register probe_distance_reg,
2781 const StubCodeCompiler::STCSearchExitGenerator& gen_not_found) {
2782 // Since the test entry size is a power of 2, we can use shr to divide.
2783 const intptr_t kTestEntryLengthLog2 =
2785
2786 // Before we finish calculating the initial probe entry, we'll need the
2787 // starting cache entry and the number of entries. We'll store these in
2788 // [cache_contents_size_reg] and [probe_distance_reg] (or their equivalent
2789 // stack slots), respectively.
2790 __ Comment("Hash cache traversal");
2791 __ Comment("Calculating number of entries");
2792 // The array length is a Smi so it needs to be untagged.
2793 __ SmiUntag(TypeTestABI::kScratchReg);
2794 __ LsrImmediate(TypeTestABI::kScratchReg, kTestEntryLengthLog2);
2795 if (probe_distance_reg != kNoRegister) {
2796 __ MoveRegister(probe_distance_reg, TypeTestABI::kScratchReg);
2797 } else {
2798 __ PushRegister(TypeTestABI::kScratchReg);
2799 }
2800
2801 __ Comment("Calculating starting entry address");
2802 __ AddImmediate(cache_entry_reg,
2804 if (cache_contents_size_reg != kNoRegister) {
2805 __ MoveRegister(cache_contents_size_reg, cache_entry_reg);
2806 } else {
2807 __ PushRegister(cache_entry_reg);
2808 }
2809
2810 __ Comment("Calculating end of entries address");
2811 __ LslImmediate(TypeTestABI::kScratchReg,
2812 kTestEntryLengthLog2 + target::kCompressedWordSizeLog2);
2813 __ AddRegisters(TypeTestABI::kScratchReg, cache_entry_reg);
2814 if (cache_entry_end_reg != kNoRegister) {
2815 __ MoveRegister(cache_entry_end_reg, TypeTestABI::kScratchReg);
2816 } else {
2817 __ PushRegister(TypeTestABI::kScratchReg);
2818 }
2819
2820 // At this point, the stack is in the following order, if the corresponding
2821 // value doesn't have a register assignment:
2822 // <number of total entries in cache array>
2823 // <cache array entries start>
2824 // <cache array entries end>
2825 // --------- top of stack
2826 //
2827 // and after calculating the initial entry, we'll replace them as follows:
2828 // <probe distance>
2829 // <-cache array contents size> (note this is _negative_)
2830 // <cache array entries end>
2831 // ---------- top of stack
2832 //
2833 // So name them according to their later use.
2834 intptr_t kProbeDistanceDepth = StackRegisterScope::kNoDepth;
2835 intptr_t kHashStackElements = 0;
2836 if (probe_distance_reg == kNoRegister) {
2837 kProbeDistanceDepth = 0;
2838 kHashStackElements++;
2839 }
2840 intptr_t kCacheContentsSizeDepth = StackRegisterScope::kNoDepth;
2841 if (cache_contents_size_reg == kNoRegister) {
2842 kProbeDistanceDepth++;
2843 kHashStackElements++;
2844 kCacheContentsSizeDepth = 0;
2845 }
2846 intptr_t kCacheArrayEndDepth = StackRegisterScope::kNoDepth;
2847 if (cache_entry_end_reg == kNoRegister) {
2848 kProbeDistanceDepth++;
2849 kCacheContentsSizeDepth++;
2850 kHashStackElements++;
2851 kCacheArrayEndDepth = 0;
2852 }
2853
2854 // After this point, any exits should go through one of these two labels,
2855 // which will pop the extra stack elements pushed above.
2856 Label found, not_found;
2857
2858 // When retrieving hashes from objects below, note that a hash of 0 means
2859 // the hash hasn't been computed yet and we need to go to runtime.
2860 auto get_abstract_type_hash = [&](Register dst, Register src,
2861 const char* name) {
2864 __ Comment("Loading %s type hash", name);
2865 __ LoadFromSlot(dst, src, Slot::AbstractType_hash());
2866 __ SmiUntag(dst);
2867 __ CompareImmediate(dst, 0);
2868 __ BranchIf(EQUAL, &not_found);
2869 };
2870 auto get_type_arguments_hash = [&](Register dst, Register src,
2871 const char* name) {
2874 Label done;
2875 __ Comment("Loading %s type arguments hash", name);
2876 // Preload the hash value for TypeArguments::null() so control can jump
2877 // to done if null.
2878 __ LoadImmediate(dst, TypeArguments::kAllDynamicHash);
2879 __ CompareRegisters(src, null_reg);
2880 __ BranchIf(EQUAL, &done, Assembler::kNearJump);
2881 __ LoadFromSlot(dst, src, Slot::TypeArguments_hash());
2882 __ SmiUntag(dst);
2883 __ CompareImmediate(dst, 0);
2884 __ BranchIf(EQUAL, &not_found);
2885 __ Bind(&done);
2886 };
2887
2888 __ Comment("Hash the entry inputs");
2889 {
2890 Label done;
2891 // Assume a Smi tagged instance cid to avoid a branch in the common case.
2892 __ MoveRegister(cache_entry_reg, instance_cid_or_sig_reg);
2893 __ SmiUntag(cache_entry_reg);
2894 __ BranchIfSmi(instance_cid_or_sig_reg, &done, Assembler::kNearJump);
2895 get_abstract_type_hash(cache_entry_reg, instance_cid_or_sig_reg,
2896 "closure signature");
2897 __ Bind(&done);
2898 }
2899 if (n >= 7) {
2900 get_abstract_type_hash(TypeTestABI::kScratchReg, TypeTestABI::kDstTypeReg,
2901 "destination");
2902 __ CombineHashes(cache_entry_reg, TypeTestABI::kScratchReg);
2903 }
2904 if (n >= 6) {
2905 get_type_arguments_hash(TypeTestABI::kScratchReg, delayed_type_args_reg,
2906 "delayed");
2907 __ CombineHashes(cache_entry_reg, TypeTestABI::kScratchReg);
2908 }
2909 if (n >= 5) {
2910 get_type_arguments_hash(TypeTestABI::kScratchReg, parent_fun_type_args_reg,
2911 "parent function");
2912 __ CombineHashes(cache_entry_reg, TypeTestABI::kScratchReg);
2913 }
2914 if (n >= 4) {
2915 get_type_arguments_hash(TypeTestABI::kScratchReg,
2917 __ CombineHashes(cache_entry_reg, TypeTestABI::kScratchReg);
2918 }
2919 if (n >= 3) {
2920 get_type_arguments_hash(TypeTestABI::kScratchReg,
2922 "instantiator");
2923 __ CombineHashes(cache_entry_reg, TypeTestABI::kScratchReg);
2924 }
2925 if (n >= 2) {
2926 get_type_arguments_hash(TypeTestABI::kScratchReg, instance_type_args_reg,
2927 "instance");
2928 __ CombineHashes(cache_entry_reg, TypeTestABI::kScratchReg);
2929 }
2930 __ FinalizeHash(cache_entry_reg);
2931
2932 // This requires the number of entries in a hash cache to be a power of 2.
2933 __ Comment("Converting hash to probe entry index");
2934 {
2935 StackRegisterScope scope(assembler, &probe_distance_reg,
2936 kProbeDistanceDepth, TypeTestABI::kScratchReg);
2937 // The entry count is not needed after this point; create the mask in place.
2938 __ AddImmediate(probe_distance_reg, -1);
2939 __ AndRegisters(cache_entry_reg, probe_distance_reg);
2940 // Now set the register to the initial probe distance in words.
2941 __ Comment("Set initial probe distance");
2942 __ LoadImmediate(probe_distance_reg,
2945 }
2946
2947 // Now cache_entry_reg is the starting probe entry index.
2948 __ Comment("Converting probe entry index to probe entry address");
2949 {
2950 StackRegisterScope scope(assembler, &cache_contents_size_reg,
2951 kCacheContentsSizeDepth, TypeTestABI::kScratchReg);
2952 __ LslImmediate(cache_entry_reg,
2953 kTestEntryLengthLog2 + target::kCompressedWordSizeLog2);
2954 __ AddRegisters(cache_entry_reg, cache_contents_size_reg);
2955 // Now set the register to the negated size of the cache contents in words.
2956 __ Comment("Set negated cache contents size");
2957 if (cache_entry_end_reg != kNoRegister) {
2958 __ SubRegisters(cache_contents_size_reg, cache_entry_end_reg);
2959 } else {
2960 __ LoadFromStack(TMP, kCacheArrayEndDepth);
2961 __ SubRegisters(cache_contents_size_reg, TMP);
2962 }
2963 }
2964
2965 Label loop, next_iteration;
2966 __ Bind(&loop);
2968 assembler, n, null_reg, cache_entry_reg, instance_cid_or_sig_reg,
2969 instance_type_args_reg, parent_fun_type_args_reg, delayed_type_args_reg,
2970 &found, &not_found, &next_iteration);
2971 __ Bind(&next_iteration);
2972 __ Comment("Move to next entry");
2973 {
2974 StackRegisterScope scope(assembler, &probe_distance_reg,
2975 kProbeDistanceDepth, TypeTestABI::kScratchReg);
2976 __ AddRegisters(cache_entry_reg, probe_distance_reg);
2977 __ Comment("Adjust probe distance");
2978 __ AddImmediate(probe_distance_reg,
2981 }
2982 __ Comment("Check for leaving array");
2983 // Make sure we haven't run off the array.
2984 if (cache_entry_end_reg != kNoRegister) {
2985 __ CompareRegisters(cache_entry_reg, cache_entry_end_reg);
2986 } else {
2987 __ CompareToStack(cache_entry_reg, kCacheArrayEndDepth);
2988 }
2989 __ BranchIf(LESS, &loop, Assembler::kNearJump);
2990 __ Comment("Wrap around to start of entries");
2991 // Add the negated size of the cache contents.
2992 if (cache_contents_size_reg != kNoRegister) {
2993 __ AddRegisters(cache_entry_reg, cache_contents_size_reg);
2994 } else {
2995 __ LoadFromStack(TypeTestABI::kScratchReg, kCacheContentsSizeDepth);
2996 __ AddRegisters(cache_entry_reg, TypeTestABI::kScratchReg);
2997 }
2998 __ Jump(&loop, Assembler::kNearJump);
2999
3000 __ Bind(&found);
3001 __ Comment("Hash found");
3002 __ Drop(kHashStackElements);
3003 gen_found(assembler, n);
3004 __ Bind(&not_found);
3005 __ Comment("Hash not found");
3006 __ Drop(kHashStackElements);
3007 gen_not_found(assembler, n);
3008}
3009
3010// Same inputs as StubCodeCompiler::GenerateSubtypeTestCacheSearch with
3011// the following additional requirement:
3012// - catch_entry_reg: the address of the backing array for the cache.
3013//
3014// Also expects that all the STC entry input registers have been filled.
3016 Assembler* assembler,
3017 int n,
3018 Register null_reg,
3019 Register cache_entry_reg,
3020 Register instance_cid_or_sig_reg,
3021 Register instance_type_args_reg,
3022 Register parent_fun_type_args_reg,
3023 Register delayed_type_args_reg,
3025 const StubCodeCompiler::STCSearchExitGenerator& gen_not_found) {
3026 __ Comment("Linear cache traversal");
3027 __ AddImmediate(cache_entry_reg,
3029
3030 Label found, not_found, loop, next_iteration;
3031 __ Bind(&loop);
3033 assembler, n, null_reg, cache_entry_reg, instance_cid_or_sig_reg,
3034 instance_type_args_reg, parent_fun_type_args_reg, delayed_type_args_reg,
3035 &found, &not_found, &next_iteration);
3036 __ Bind(&next_iteration);
3037 __ Comment("Next iteration");
3038 __ AddImmediate(
3039 cache_entry_reg,
3041 __ Jump(&loop, Assembler::kNearJump);
3042
3043 __ Bind(&found);
3044 __ Comment("Linear found");
3045 gen_found(assembler, n);
3046 __ Bind(&not_found);
3047 __ Comment("Linear not found");
3048 gen_not_found(assembler, n);
3049}
3050
3051void StubCodeCompiler::GenerateSubtypeTestCacheSearch(
3052 Assembler* assembler,
3053 int n,
3054 Register null_reg,
3055 Register cache_entry_reg,
3056 Register instance_cid_or_sig_reg,
3057 Register instance_type_args_reg,
3058 Register parent_fun_type_args_reg,
3059 Register delayed_type_args_reg,
3060 Register cache_entry_end_reg,
3061 Register cache_contents_size_reg,
3062 Register probe_distance_reg,
3064 const StubCodeCompiler::STCSearchExitGenerator& gen_not_found) {
3065#if defined(DEBUG)
3066 RegisterSet input_regs;
3067 ASSERT(null_reg != kNoRegister);
3068 input_regs.AddRegister(null_reg);
3069 ASSERT(cache_entry_reg != kNoRegister);
3070 ASSERT(!input_regs.ContainsRegister(cache_entry_reg));
3071 input_regs.AddRegister(cache_entry_reg);
3072 ASSERT(instance_cid_or_sig_reg != kNoRegister);
3073 ASSERT(!input_regs.ContainsRegister(instance_cid_or_sig_reg));
3074 input_regs.AddRegister(instance_cid_or_sig_reg);
3075 if (n >= 2) {
3076 ASSERT(instance_type_args_reg != kNoRegister);
3077 ASSERT(!input_regs.ContainsRegister(instance_type_args_reg));
3078 input_regs.AddRegister(instance_type_args_reg);
3079 }
3080 if (n >= 5) {
3081 ASSERT(parent_fun_type_args_reg != kNoRegister);
3082 ASSERT(!input_regs.ContainsRegister(parent_fun_type_args_reg));
3083 input_regs.AddRegister(parent_fun_type_args_reg);
3084 }
3086 if (n >= 6) {
3087 ASSERT(delayed_type_args_reg != kNoRegister);
3088 ASSERT(!input_regs.ContainsRegister(delayed_type_args_reg));
3089 input_regs.AddRegister(delayed_type_args_reg);
3090 }
3091 if (cache_entry_end_reg != kNoRegister) {
3092 ASSERT(!input_regs.ContainsRegister(cache_entry_end_reg));
3093 input_regs.AddRegister(cache_entry_end_reg);
3094 }
3095 if (cache_contents_size_reg != kNoRegister) {
3096 ASSERT(!input_regs.ContainsRegister(cache_contents_size_reg));
3097 input_regs.AddRegister(cache_contents_size_reg);
3098 }
3099 if (probe_distance_reg != kNoRegister) {
3100 ASSERT(!input_regs.ContainsRegister(probe_distance_reg));
3101 input_regs.AddRegister(probe_distance_reg);
3102 }
3103 // We can allow the use of the registers below only if we're not expecting
3104 // them as an inspected input.
3105 if (n >= 3) {
3106 ASSERT(!input_regs.ContainsRegister(
3108 }
3109 if (n >= 4) {
3110 ASSERT(
3112 }
3113 if (n >= 7) {
3115 }
3116 // We use this as a scratch, so it has to be distinct from the others.
3118
3119 // Verify the STC we received has exactly as many inputs as this stub expects.
3120 Label search_stc;
3122 Slot::SubtypeTestCache_num_inputs());
3123 __ CompareImmediate(TypeTestABI::kScratchReg, n);
3124 __ BranchIf(EQUAL, &search_stc, Assembler::kNearJump);
3125 __ Breakpoint();
3126 __ Bind(&search_stc);
3127#endif
3128
3129 __ LoadAcquireCompressedFromOffset(
3130 cache_entry_reg, TypeTestABI::kSubtypeTestCacheReg,
3132
3133 // Fill in all the STC input registers.
3134 Label initialized, not_closure;
3135 if (n >= 3) {
3136 __ LoadClassIdMayBeSmi(instance_cid_or_sig_reg, TypeTestABI::kInstanceReg);
3137 } else {
3138 // If the type is fully instantiated, then it can be determined at compile
3139 // time whether Smi is a subtype of the type or not. Thus, this code should
3140 // never be called with a Smi instance.
3141 __ LoadClassId(instance_cid_or_sig_reg, TypeTestABI::kInstanceReg);
3142 }
3143 __ CompareImmediate(instance_cid_or_sig_reg, kClosureCid);
3144 __ BranchIf(NOT_EQUAL, &not_closure, Assembler::kNearJump);
3145
3146 // Closure handling.
3147 {
3148 __ Comment("Closure");
3149 __ LoadCompressed(instance_cid_or_sig_reg,
3150 FieldAddress(TypeTestABI::kInstanceReg,
3152 __ LoadCompressed(instance_cid_or_sig_reg,
3153 FieldAddress(instance_cid_or_sig_reg,
3155 if (n >= 2) {
3156 __ LoadCompressed(
3157 instance_type_args_reg,
3158 FieldAddress(TypeTestABI::kInstanceReg,
3160 }
3161 if (n >= 5) {
3162 __ LoadCompressed(
3163 parent_fun_type_args_reg,
3164 FieldAddress(TypeTestABI::kInstanceReg,
3166 }
3167 if (n >= 6) {
3168 __ LoadCompressed(
3169 delayed_type_args_reg,
3170 FieldAddress(TypeTestABI::kInstanceReg,
3172 }
3173
3174 __ Jump(&initialized, Assembler::kNearJump);
3175 }
3176
3177 // Non-Closure handling.
3178 {
3179 __ Comment("Non-Closure");
3180 __ Bind(&not_closure);
3181 if (n >= 2) {
3182 Label has_no_type_arguments;
3183 __ LoadClassById(TypeTestABI::kScratchReg, instance_cid_or_sig_reg);
3184 __ MoveRegister(instance_type_args_reg, null_reg);
3185 __ LoadFieldFromOffset(
3188 kFourBytes);
3189 __ CompareImmediate(TypeTestABI::kScratchReg,
3191 __ BranchIf(EQUAL, &has_no_type_arguments, Assembler::kNearJump);
3192 __ LoadIndexedCompressed(instance_type_args_reg,
3195 __ Bind(&has_no_type_arguments);
3196 __ Comment("No type arguments");
3197 }
3198 __ SmiTag(instance_cid_or_sig_reg);
3199 if (n >= 5) {
3200 __ MoveRegister(parent_fun_type_args_reg, null_reg);
3201 }
3202 if (n >= 6) {
3203 __ MoveRegister(delayed_type_args_reg, null_reg);
3204 }
3205 }
3206
3207 __ Bind(&initialized);
3208 // There is a maximum size for linear caches that is smaller than the size
3209 // of any hash-based cache, so we check the size of the backing array to
3210 // determine if this is a linear or hash-based cache.
3211 //
3212 // We load it into TypeTestABI::kScratchReg as the hash search code expects
3213 // it there.
3214 Label is_hash;
3215 __ LoadFromSlot(TypeTestABI::kScratchReg, cache_entry_reg,
3216 Slot::Array_length());
3217 __ CompareImmediate(TypeTestABI::kScratchReg,
3219 __ BranchIf(GREATER, &is_hash);
3220
3222 assembler, n, null_reg, cache_entry_reg, instance_cid_or_sig_reg,
3223 instance_type_args_reg, parent_fun_type_args_reg, delayed_type_args_reg,
3224 gen_found, gen_not_found);
3225
3226 __ Bind(&is_hash);
3228 assembler, n, null_reg, cache_entry_reg, instance_cid_or_sig_reg,
3229 instance_type_args_reg, parent_fun_type_args_reg, delayed_type_args_reg,
3230 cache_entry_end_reg, cache_contents_size_reg, probe_distance_reg,
3231 gen_found, gen_not_found);
3232}
3233#endif
3234
3235// See comment on [GenerateSubtypeNTestCacheStub].
3236void StubCodeCompiler::GenerateSubtype1TestCacheStub() {
3237 GenerateSubtypeNTestCacheStub(assembler, 1);
3238}
3239
3240// See comment on [GenerateSubtypeNTestCacheStub].
3241void StubCodeCompiler::GenerateSubtype2TestCacheStub() {
3242 GenerateSubtypeNTestCacheStub(assembler, 2);
3243}
3244
3245// See comment on [GenerateSubtypeNTestCacheStub].
3246void StubCodeCompiler::GenerateSubtype3TestCacheStub() {
3247 GenerateSubtypeNTestCacheStub(assembler, 3);
3248}
3249
3250// See comment on [GenerateSubtypeNTestCacheStub].
3251void StubCodeCompiler::GenerateSubtype4TestCacheStub() {
3252 GenerateSubtypeNTestCacheStub(assembler, 4);
3253}
3254
3255// See comment on [GenerateSubtypeNTestCacheStub].
3256void StubCodeCompiler::GenerateSubtype6TestCacheStub() {
3257 GenerateSubtypeNTestCacheStub(assembler, 6);
3258}
3259
3260// See comment on [GenerateSubtypeNTestCacheStub].
3261void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
3262 GenerateSubtypeNTestCacheStub(assembler, 7);
3263}
3264
3265} // namespace compiler
3266
3267} // namespace dart
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
static float next(float f)
#define CLASS_LIST_TYPED_DATA(V)
Definition: class_id.h:137
static constexpr int shift()
Definition: bitfield.h:159
static const Register ArgumentRegisters[]
static constexpr Register kReturnReg
static constexpr intptr_t kNone
Definition: deopt_id.h:27
void AddDescriptor(UntaggedPcDescriptors::Kind kind, intptr_t pc_offset, intptr_t deopt_id, TokenPosition token_pos, intptr_t try_index, intptr_t yield_index)
static ObjectPtr null()
Definition: object.h:433
static Object & Handle()
Definition: object.h:407
static Object & ZoneHandle()
Definition: object.h:419
static RecordShape ForUnnamed(intptr_t num_fields)
Definition: object.h:11308
void AddRegister(Register reg, Representation rep=kTagged)
Definition: locations.h:750
bool Contains(Location loc)
Definition: locations.h:774
bool ContainsRegister(Register reg) const
Definition: locations.h:800
static SmiPtr New(intptr_t value)
Definition: object.h:10006
static intptr_t RawValue(intptr_t value)
Definition: object.h:10022
static constexpr intptr_t kMaxLinearCacheSize
Definition: object.h:7912
static constexpr intptr_t kSuspendStateVarIndex
Definition: object.h:12617
static Thread * Current()
Definition: thread.h:362
static constexpr intptr_t kMaxLinearCacheSize
Definition: object.h:8956
static constexpr intptr_t kAllDynamicHash
Definition: object.h:8578
static constexpr intptr_t kInvalidYieldIndex
Definition: raw_object.h:2081
static constexpr int ShiftForPowerOfTwo(T x)
Definition: utils.h:81
intptr_t InsertAlignedRelocation(BSS::Relocation reloc)
void StoreUnboxedDouble(FpuRegister src, Register base, int32_t offset)
void StoreUnboxedSimd128(FpuRegister src, Register base, int32_t offset)
StackRegisterScope(Assembler *assembler, Register *reg, intptr_t depth, Register alt=TMP)
std::function< void(Assembler *, int)> STCSearchExitGenerator
void GenerateAllocationStubForClass(UnresolvedPcRelativeCalls *unresolved_calls, const Class &cls, const dart::Code &allocate_object, const dart::Code &allocat_object_parametrized)
static intptr_t WordOffsetFromFpToCpuRegister(Register cpu_register)
static word host_type_arguments_field_offset_in_words_offset()
static const word kNoTypeArguments
Definition: runtime_api.h:486
static word delayed_type_arguments_offset()
static word function_type_arguments_offset()
static word instantiator_type_arguments_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word active_instructions_offset()
static word instructions_offset()
static word host_offset_or_field_id_offset()
static word initializer_function_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word has_resumption_breakpoints_offset()
static word suspend_state_handle_exception_offset()
static word suspend_state_init_sync_star_offset()
static word suspend_state_return_async_offset()
static word suspend_state_init_async_star_offset()
static word suspend_state_return_async_not_future_offset()
static word suspend_state_await_with_type_check_offset()
static word suspend_state_return_async_star_offset()
static word suspend_state_yield_async_star_offset()
static word suspend_state_suspend_sync_star_at_start_offset()
static word original_top_offset()
static word field_offset(intptr_t index)
static const word kInstanceDelayedFunctionTypeArguments
Definition: runtime_api.h:1440
static const word kInstanceParentFunctionTypeArguments
Definition: runtime_api.h:1439
static word unboxed_runtime_arg_offset()
static word suspend_state_yield_async_star_entry_point_offset()
static word async_exception_handler_stub_offset()
static word allocate_object_slow_entry_point_offset()
static word suspend_state_init_async_entry_point_offset()
static word null_cast_error_shared_without_fpu_regs_stub_offset()
static word null_error_shared_with_fpu_regs_stub_offset()
static word null_arg_error_shared_with_fpu_regs_stub_offset()
static word suspend_state_return_async_entry_point_offset()
static word stack_overflow_shared_without_fpu_regs_stub_offset()
static word suspend_state_init_async_star_entry_point_offset()
static word null_error_shared_without_fpu_regs_stub_offset()
static word lazy_specialize_type_test_stub_offset()
static word null_arg_error_shared_without_fpu_regs_stub_offset()
static word return_async_stub_offset()
static word suspend_state_handle_exception_entry_point_offset()
static word slow_type_test_stub_offset()
static word suspend_state_return_async_star_entry_point_offset()
static word suspend_state_await_with_type_check_entry_point_offset()
static word suspend_state_await_entry_point_offset()
static word return_async_not_future_stub_offset()
static word suspend_state_init_sync_star_entry_point_offset()
static word stack_overflow_shared_with_fpu_regs_stub_offset()
static word null_cast_error_shared_with_fpu_regs_stub_offset()
static word return_async_star_stub_offset()
static word type_at_offset(intptr_t i)
#define ASSERT(E)
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
Definition: runtime_api.cc:360
static constexpr intptr_t kCompressedWordSizeLog2
Definition: runtime_api.h:287
word ToRawSmi(const dart::Object &a)
Definition: runtime_api.cc:960
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
static constexpr intptr_t kCompressedWordSize
Definition: runtime_api.h:286
static constexpr intptr_t kObjectAlignment
Definition: runtime_api.h:313
bool WillAllocateNewOrRememberedObject(intptr_t instance_size)
Definition: runtime_api.cc:35
FrameLayout frame_layout
Definition: stack_frame.cc:76
intptr_t RoundedAllocationSize(intptr_t size)
Definition: runtime_api.h:333
static void InvokeTypeCheckFromTypeTestStub(Assembler *assembler, TypeCheckMode mode)
static intptr_t SuspendStateFpOffset()
static void CallDartCoreLibraryFunction(Assembler *assembler, intptr_t entry_point_offset_in_thread, intptr_t function_offset_in_object_store, bool uses_args_desc=false)
static void EnsureIsTypeOrFunctionTypeOrTypeParameter(Assembler *assembler, Register type_reg, Register scratch_reg)
static void BuildInstantiateTypeRuntimeCall(Assembler *assembler)
const Class & Float64x2Class()
Definition: runtime_api.cc:205
static void GenerateSubtypeTestCacheHashSearch(Assembler *assembler, int n, Register null_reg, Register cache_entry_reg, Register instance_cid_or_sig_reg, Register instance_type_args_reg, Register parent_fun_type_args_reg, Register delayed_type_args_reg, Register cache_entry_end_reg, Register cache_contents_size_reg, Register probe_distance_reg, const StubCodeCompiler::STCSearchExitGenerator &gen_found, const StubCodeCompiler::STCSearchExitGenerator &gen_not_found)
const Class & Float32x4Class()
Definition: runtime_api.cc:200
static void GenerateBoxFpuValueStub(Assembler *assembler, const dart::Class &cls, const RuntimeEntry &runtime_entry, void(Assembler::*store_value)(FpuRegister, Register, int32_t))
const Array & ArgumentsDescriptorBoxed(intptr_t type_args_len, intptr_t num_arguments)
Definition: runtime_api.cc:220
const Code & StubCodeSubtype2TestCache()
Definition: runtime_api.cc:298
const Bool & TrueObject()
Definition: runtime_api.cc:157
const Code & StubCodeSubtype6TestCache()
Definition: runtime_api.cc:310
const Code & StubCodeSubtype7TestCache()
Definition: runtime_api.cc:314
const Code & StubCodeSubtype3TestCache()
Definition: runtime_api.cc:302
const Object & SentinelObject()
Definition: runtime_api.cc:153
static void GenerateAllocateSuspendState(Assembler *assembler, Label *slow_case, Register result_reg, Register frame_size_reg, Register temp_reg)
const Object & NullObject()
Definition: runtime_api.cc:149
const Class & DoubleClass()
Definition: runtime_api.cc:195
static void BuildTypeParameterTypeTestStub(Assembler *assembler, bool allow_null)
static void GenerateSubtypeTestCacheLinearSearch(Assembler *assembler, int n, Register null_reg, Register cache_entry_reg, Register instance_cid_or_sig_reg, Register instance_type_args_reg, Register parent_fun_type_args_reg, Register delayed_type_args_reg, const StubCodeCompiler::STCSearchExitGenerator &gen_found, const StubCodeCompiler::STCSearchExitGenerator &gen_not_found)
const Object & EmptyTypeArguments()
Definition: runtime_api.cc:165
const Code & StubCodeSubtype4TestCache()
Definition: runtime_api.cc:306
static void GenerateSubtypeTestCacheLoopBody(Assembler *assembler, int n, Register null_reg, Register cache_entry_reg, Register instance_cid_or_sig_reg, Register instance_type_args_reg, Register parent_fun_type_args_reg, Register delayed_type_args_reg, Label *found, Label *not_found, Label *next_iteration)
static void BuildInstantiateTypeParameterStub(Assembler *assembler, Nullability nullability, bool is_function_parameter)
Definition: dart_vm.cc:33
@ TIMES_COMPRESSED_HALF_WORD_SIZE
const Register THR
const char *const name
static constexpr int kSavedCallerPcSlotFromFp
const Register kExceptionObjectReg
Nullability
Definition: object.h:1112
const Register NULL_REG
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
Definition: hash.h:12
constexpr intptr_t kIntptrMin
Definition: globals.h:556
static constexpr int kSavedCallerFpSlotFromFp
@ kVoidCid
Definition: class_id.h:254
@ kDynamicCid
Definition: class_id.h:253
uintptr_t uword
Definition: globals.h:501
const Register CODE_REG
@ UNSIGNED_GREATER
@ UNSIGNED_GREATER_EQUAL
@ NOT_ZERO
@ UNSIGNED_LESS
@ NOT_EQUAL
const Register ARGS_DESC_REG
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
@ kNoRegister
Definition: constants_arm.h:99
static constexpr int kCallerSpSlotFromFp
constexpr RegList kDartAvailableCpuRegs
const Register TMP
const Register FPREG
uint32_t FinalizeHash(uint32_t hash, intptr_t hashbits=kBitsPerInt32)
Definition: hash.h:20
@ kTypeCheckFromLazySpecializeStub
@ kTypeCheckFromInline
@ kTypeCheckFromSlowStub
const Register FUNCTION_REG
static constexpr intptr_t kAllocationRedZoneSize
Definition: page.h:41
const Register PP
const Register kStackTraceObjectReg
NOT_IN_PRODUCT(LibraryPtr ReloadTestScript(const char *script))
@ kHeapObjectTag
@ kSmiTagShift
const Register SPREG
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
Definition: switches.h:228
dst
Definition: cp.py:12
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
SeparatedVector2 offset
static constexpr Register kFunctionReg
static constexpr Register kContextReg
static constexpr Register kResultReg
static constexpr Register kInstantiatorTypeArgsReg
static constexpr Register kScratchReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTagsReg
static constexpr Register kShapeReg
static constexpr Register kResultReg
static constexpr Register kTemp1Reg
static constexpr Register kTemp2Reg
static constexpr Register kResultReg
static constexpr Register kShapeReg
static constexpr Register kValue2Reg
static constexpr Register kValue0Reg
static constexpr Register kTempReg
static constexpr Register kValue1Reg
static constexpr Register kDstNameReg
static constexpr intptr_t kFunctionTAVSlotFromFp
static constexpr intptr_t kDstTypeSlotFromFp
static constexpr Register kSubtypeTestReg
static constexpr intptr_t kInstanceSlotFromFp
static constexpr intptr_t kInstantiatorTAVSlotFromFp
static constexpr Register kObjectReg
static constexpr Register kSubTypeReg
static constexpr Register kSuperTypeReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kDstNameReg
static constexpr Register kSuspendStateReg
static constexpr Register kTempReg
static constexpr Register kResultReg
static constexpr FpuRegister kValueReg
static constexpr Register kDestinationReg
static constexpr Register kSrcFrameReg
static constexpr Register kFrameSizeReg
static constexpr Register kSourceReg
static constexpr Register kTempReg
static constexpr Register kDstFrameReg
static constexpr Register kResultReg
static constexpr Register kRecognizedKindReg
static constexpr FpuRegister kInputReg
static constexpr Register kArgsReg
intptr_t param_end_from_fp
Definition: frame_layout.h:30
intptr_t FrameSlotForVariableIndex(intptr_t index) const
Definition: stack_frame.cc:89
static constexpr Register kFieldReg
static constexpr Register kResultReg
static constexpr Register kInstanceReg
static constexpr Register kAddressReg
static constexpr Register kScratchReg
static constexpr Register kAddressReg
static constexpr Register kScratchReg
static constexpr Register kResultReg
static constexpr Register kFieldReg
static constexpr Register kTypeArgsReg
static constexpr Register kEntryStartReg
static constexpr intptr_t kSavedRegisters
static constexpr Register kCurrentEntryIndexReg
static constexpr Register kProbeMaskReg
static constexpr Register kProbeDistanceReg
static constexpr Register kFunctionTypeArgumentsReg
Definition: constants.h:38
static constexpr Register kTypeReg
Definition: constants.h:34
static constexpr Register kScratchReg
Definition: constants.h:41
static constexpr Register kInstantiatorTypeArgumentsReg
Definition: constants.h:36
static constexpr Register kResultTypeReg
Definition: constants.h:40
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kScratchReg
static constexpr Register kUninstantiatedTypeArgumentsReg
static constexpr Register kResultTypeArgumentsReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kFieldReg
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment
static constexpr Register kStackTraceReg
static constexpr Register kExceptionReg
static constexpr Register kSrcFrameReg
static constexpr Register kDstFrameReg
static constexpr Register kFrameSizeReg
static constexpr Register kSuspendStateReg
static constexpr Register kExceptionReg
static constexpr Register kTempReg
static constexpr Register kResumePcReg
static constexpr Register kStackTraceReg
static constexpr Register kSuspendStateReg
static constexpr Register kSrcFrameReg
static constexpr Register kFunctionDataReg
static constexpr Register kSuspendStateReg
static constexpr intptr_t kResumePcDistance
static constexpr Register kTempReg
static constexpr Register kArgumentReg
static constexpr Register kDstFrameReg
static constexpr Register kTypeArgsReg
static constexpr Register kFrameSizeReg
static constexpr Register kExceptionReg
static constexpr Register kSubtypeTestCacheReg
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kSubtypeTestCacheResultReg
static constexpr Register kScratchReg
static constexpr Register kInstanceOfResultReg
#define __
#define TYPED_DATA_ALLOCATION_STUB(clazz)
#define EMIT_BOX_ALLOCATION(Name)
#define VM_TYPE_TESTING_STUB_CODE_LIST(V)