11#if !defined(FFI_UNIT_TESTS)
23#if !defined(FFI_UNIT_TESTS)
26static bool SoftFpAbi(
bool has_varargs,
bool is_result) {
27#if defined(TARGET_ARCH_ARM)
32#elif defined(TARGET_ARCH_ARM64) && defined(DART_TARGET_OS_WINDOWS)
33 return has_varargs && !is_result;
39static bool SoftFpAbi(
bool has_varargs,
bool is_result) {
40#if defined(TARGET_ARCH_ARM) && defined(DART_TARGET_OS_ANDROID)
42#elif defined(TARGET_ARCH_ARM)
44#elif defined(TARGET_ARCH_ARM64) && defined(DART_TARGET_OS_WINDOWS)
45 return has_varargs && !is_result;
54 if (
type.SizeInBytes() == 4) {
65 bool is_result =
false) {
101 : has_varargs_(has_varargs), zone_(zone) {}
104 bool is_first_vararg =
false,
105 bool is_vararg =
false) {
106#if defined(TARGET_ARCH_ARM64) && \
107 (defined(DART_TARGET_OS_MACOS_IOS) || defined(DART_TARGET_OS_MACOS))
108 if (is_first_vararg) {
110 BlockAllFpuRegisters();
114#if defined(TARGET_ARCH_RISCV64) || defined(TARGET_ARCH_RISCV32)
115 if (is_first_vararg) {
117 BlockAllFpuRegisters();
120 const auto&
result = AllocateArgument(payload_type, is_vararg);
121#if defined(TARGET_ARCH_X64) && defined(DART_TARGET_OS_WINDOWS)
123 if (
result.IsRegisters()) {
126 }
else if (
result.IsFpuRegisters()) {
129 const auto& fpu_reg_location =
result.AsFpuRegisters();
131 ASSERT(fpu_reg_location.fpu_reg_kind() == kind);
132 FpuRegister fpu_register = fpu_reg_location.fpu_reg();
133 const intptr_t reg_index = fpu_register;
134 ASSERT(cpu_regs_used == reg_index + 1);
139 zone_, payload_type, container_type, cpu_register);
151 bool is_vararg =
false) {
152 const auto& payload_type_converted =
154 if (payload_type_converted.IsFloat()) {
155 return AllocateFloat(payload_type, is_vararg);
157 if (payload_type_converted.IsInt()) {
158 return AllocateInt(payload_type, is_vararg);
167 const NativeCompoundType& compound_type = payload_type.
AsCompound();
168 return AllocateCompound(compound_type, is_vararg,
false);
171 const NativeLocation& AllocateFloat(
const NativeType& payload_type,
173 const auto kind = FpuRegKind(payload_type);
174 const intptr_t reg_index = FirstFreeFpuRegisterIndex(kind);
176 AllocateFpuRegisterAtIndex(kind, reg_index);
180#if defined(TARGET_ARCH_ARM)
183 NativeFpuRegistersLocation(payload_type, payload_type, kind,
188 NativeFpuRegistersLocation(payload_type, payload_type, kind,
195 NativeFpuRegistersLocation(payload_type, payload_type, reg);
198#if defined(TARGET_ARCH_RISCV64)
200 if (HasAvailableCpuRegisters(1)) {
201 const Register reg = AllocateCpuRegister();
204 NativeRegistersLocation(zone_, payload_type, container_type, reg);
206#elif defined(TARGET_ARCH_RISCV32)
208 if (((payload_type.SizeInBytes() == 4) && HasAvailableCpuRegisters(1)) ||
209 ((payload_type.SizeInBytes() == 8) && HasAvailableCpuRegisters(2))) {
211 return AllocateInt(payload_type, container_type, is_vararg);
215 BlockAllFpuRegisters();
219 return AllocateStack(payload_type);
222 const NativeLocation& AllocateInt(
const NativeType& payload_type,
223 const NativeType& container_type,
230 cpu_regs_used += cpu_regs_used % 2;
233 const Register register_1 = AllocateCpuRegister();
234 const Register register_2 = AllocateCpuRegister();
235 return *
new (zone_) NativeRegistersLocation(
236 zone_, payload_type, container_type, register_1, register_2);
241 return *
new (zone_) NativeRegistersLocation(
242 zone_, payload_type, container_type, AllocateCpuRegister());
245 return AllocateStack(payload_type, is_vararg);
249 const NativeLocation& AllocateInt(
const NativeType& payload_type,
251 const auto& payload_type_converted =
256 const auto& container_type = payload_type_converted.Extend(
259 return AllocateInt(payload_type, container_type, is_vararg);
262#if defined(TARGET_ARCH_X64) && !defined(DART_TARGET_OS_WINDOWS)
265 const NativeLocation& AllocateCompound(
const NativeCompoundType& payload_type,
268 const intptr_t
size = payload_type.SizeInBytes();
269 if (size <= 16 && size > 0 && !payload_type.ContainsUnalignedMembers()) {
270 intptr_t required_regs =
271 payload_type.NumberOfWordSizeChunksNotOnlyFloat();
272 intptr_t required_xmm_regs =
273 payload_type.NumberOfWordSizeChunksOnlyFloat();
274 const bool regs_available =
276 const bool fpu_regs_available =
278 FirstFreeFpuRegisterIndex(
kQuadFpuReg) + required_xmm_regs <=
280 if (regs_available && fpu_regs_available) {
283 zone_, required_regs + required_xmm_regs);
288 const intptr_t reg_index = FirstFreeFpuRegisterIndex(
kQuadFpuReg);
289 AllocateFpuRegisterAtIndex(
kQuadFpuReg, reg_index);
290 const auto&
type = *
new (zone_) NativePrimitiveType(
kDouble);
291 multiple_locations.Add(
new (zone_) NativeFpuRegistersLocation(
294 const auto& payload_type =
297 const auto& container_type = *
new (zone_) NativePrimitiveType(
299 multiple_locations.Add(
new (zone_) NativeRegistersLocation(
300 zone_, payload_type, container_type, AllocateCpuRegister()));
304 MultipleNativeLocations(payload_type, multiple_locations);
307 return AllocateStack(payload_type);
311#if defined(TARGET_ARCH_X64) && defined(DART_TARGET_OS_WINDOWS)
315 const NativeLocation& AllocateCompound(
const NativeCompoundType& payload_type,
318 const NativeCompoundType& compound_type = payload_type.AsCompound();
319 const intptr_t
size = compound_type.SizeInBytes();
324 const auto&
type = *
new (zone_) NativePrimitiveType(
326 multiple_locations.Add(
new (zone_) NativeRegistersLocation(
327 zone_,
type,
type, AllocateCpuRegister()));
329 MultipleNativeLocations(compound_type, multiple_locations);
332 }
else if (
size > 0) {
334 const auto& pointer_type = *
new (zone_) NativePrimitiveType(
kAddress);
335 const auto& pointer_location = AllocateArgument(pointer_type);
337 PointerToMemoryLocation(pointer_location, compound_type);
340 return AllocateStack(payload_type);
344#if defined(TARGET_ARCH_IA32)
345 const NativeLocation& AllocateCompound(
const NativeCompoundType& payload_type,
348 return AllocateStack(payload_type);
352#if defined(TARGET_ARCH_ARM)
355 const NativeLocation& AllocateCompound(
const NativeCompoundType& payload_type,
358 const auto& compound_type = payload_type.AsCompound();
359 if (compound_type.ContainsHomogeneousFloats() &&
361 compound_type.NumPrimitiveMembersRecursive() <= 4) {
362 const auto& elem_type = compound_type.FirstPrimitiveMember();
363 const intptr_t
size = compound_type.SizeInBytes();
364 const intptr_t elem_size = elem_type.SizeInBytes();
367 const intptr_t num_registers =
size / elem_size;
368 const intptr_t first_reg =
369 FirstFreeFpuRegisterIndex(reg_kind, num_registers);
371 AllocateFpuRegisterAtIndex(reg_kind, first_reg, num_registers);
375 for (
int i = 0;
i < num_registers;
i++) {
376 const intptr_t reg_index = first_reg +
i;
377 multiple_locations.Add(
new (zone_) NativeFpuRegistersLocation(
378 elem_type, elem_type, reg_kind, reg_index));
381 MultipleNativeLocations(compound_type, multiple_locations);
384 BlockAllFpuRegisters();
385 return AllocateStack(payload_type);
387 }
else if (payload_type.AlignmentInBytesStack() == 8) {
388 const intptr_t
chunk_size = payload_type.AlignmentInBytesStack();
390 const intptr_t size_rounded =
392 const intptr_t num_chunks = size_rounded /
chunk_size;
393 const auto& chuck_type =
398 for (
int i = 0;
i < num_chunks;
i++) {
399 const auto& allocated_chunk = &AllocateArgument(chuck_type);
404 const auto& small_chuck_type = *
new (zone_) NativePrimitiveType(
406 multiple_locations.Add(&allocated_chunk->WithOtherNativeType(
407 zone_, small_chuck_type, small_chuck_type));
409 multiple_locations.Add(allocated_chunk);
413 MultipleNativeLocations(compound_type, multiple_locations);
415 return AllocateCompoundAsMultiple(compound_type);
420#if defined(TARGET_ARCH_ARM64)
424 const NativeLocation& AllocateCompound(
const NativeCompoundType& payload_type,
427 const auto& compound_type = payload_type.AsCompound();
428 const intptr_t
size = compound_type.SizeInBytes();
429 if (compound_type.ContainsHomogeneousFloats() &&
431 compound_type.NumPrimitiveMembersRecursive() <= 4) {
432 const auto& elem_type = compound_type.FirstPrimitiveMember();
433 const intptr_t elem_size = elem_type.SizeInBytes();
436 const intptr_t num_registers =
size / elem_size;
437 const intptr_t first_reg =
438 FirstFreeFpuRegisterIndex(reg_kind, num_registers);
440 AllocateFpuRegisterAtIndex(reg_kind, first_reg, num_registers);
444 for (
int i = 0;
i < num_registers;
i++) {
445 const intptr_t reg_index = first_reg +
i;
446 multiple_locations.Add(
new (zone_) NativeFpuRegistersLocation(
447 elem_type, elem_type, reg_kind, reg_index));
450 MultipleNativeLocations(compound_type, multiple_locations);
452 BlockAllFpuRegisters();
453 return AllocateStack(payload_type, is_vararg);
458 const intptr_t num_chunks = size_rounded / 8;
459 ASSERT((num_chunks == 1) || (num_chunks == 2));
462#if defined(DART_TARGET_OS_WINDOWS)
463 if (!HasAvailableCpuRegisters(num_chunks) && !is_vararg) {
467 if (!HasAvailableCpuRegisters(num_chunks)) {
472 return AllocateCompoundAsMultiple(payload_type);
475 const auto& pointer_location =
476 AllocateArgument(*
new (zone_) NativePrimitiveType(
kInt64));
478 PointerToMemoryLocation(pointer_location, compound_type);
482#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
485 const NativeLocation& AllocateCompound(
const NativeCompoundType& payload_type,
488 const auto& compound_type = payload_type.AsCompound();
491 const NativePrimitiveType* first =
nullptr;
492 const NativePrimitiveType* second =
nullptr;
493 const intptr_t num_primitive_members =
494 compound_type.PrimitivePairMembers(&first, &second);
497 if ((num_primitive_members == 1) && first->IsFloat()) {
500 multiple_locations.Add(&AllocateArgument(*first));
502 MultipleNativeLocations(compound_type, multiple_locations);
505 if (num_primitive_members == 2) {
506 if (first->IsFloat() && second->IsFloat()) {
509 if (HasAvailableFpuRegisters(2)) {
512 multiple_locations.Add(&AllocateArgument(*first));
513 multiple_locations.Add(&AllocateArgument(*second));
515 MultipleNativeLocations(compound_type, multiple_locations);
517 }
else if (first->IsFloat() || second->IsFloat()) {
521 if (HasAvailableFpuRegisters(1) && HasAvailableCpuRegisters(1)) {
524 multiple_locations.Add(&AllocateArgument(*first));
525 multiple_locations.Add(&AllocateArgument(*second));
527 MultipleNativeLocations(compound_type, multiple_locations);
540 return AllocateCompoundAsMultiple(compound_type);
544 const auto& pointer_type = *
new (zone_) NativePrimitiveType(
kAddress);
545 const auto& pointer_location = AllocateArgument(pointer_type);
547 PointerToMemoryLocation(pointer_location, compound_type);
563 const NativeLocation& AllocateCompoundAsMultiple(
564 const NativeCompoundType& compound_type) {
566 const intptr_t num_chunks =
568 const NativeType& container_type =
572 intptr_t size_remaining = compound_type.SizeInBytes();
573 while (size_remaining > 0) {
574 const auto& chunk = AllocateArgument(container_type);
577 const NativeType& payload_type =
580 &chunk.WithOtherNativeType(zone_, payload_type, container_type));
581 size_remaining -=
size;
583 return *
new (zone_) MultipleNativeLocations(compound_type, locations);
587#if defined(TARGET_ARCH_ARM)
600 AllocateFpuRegisterAtIndex(
kQuadFpuReg, cpu_regs_used);
606 const NativeLocation& AllocateStack(
const NativeType& payload_type,
607 bool is_vararg =
false) {
608 align_stack(payload_type.AlignmentInBytesStack(is_vararg));
609 const intptr_t
size = payload_type.SizeInBytes();
612 const auto& container_type =
614 const auto&
result = *
new (zone_) NativeStackLocation(
616 stack_height_in_bytes);
617 stack_height_in_bytes +=
size;
618 align_stack(payload_type.AlignmentInBytesStack(is_vararg));
622 void align_stack(intptr_t alignment) {
623 stack_height_in_bytes =
Utils::RoundUp(stack_height_in_bytes, alignment);
627#if defined(TARGET_ARCH_ARM)
628 if (has_varargs_)
return 0;
637 int FirstFreeFpuRegisterIndex(
FpuRegisterKind kind,
int amount = 1)
const {
641 const intptr_t mask = (1 << (
size * amount)) - 1;
643 while (index + amount <= NumFpuRegisters(kind)) {
644 const intptr_t mask_shifted = mask << (index *
size);
645 if ((fpu_reg_parts_used & mask_shifted) == 0) {
658 const intptr_t mask = (1 <<
size * amount) - 1;
659 const intptr_t mask_shifted = (mask << (index *
size));
660 ASSERT((mask_shifted & fpu_reg_parts_used) == 0);
661 fpu_reg_parts_used |= mask_shifted;
675 void BlockAllFpuRegisters() {
677 fpu_reg_parts_used = -1;
680 bool HasAvailableCpuRegisters(intptr_t
count)
const {
683 bool HasAvailableFpuRegisters(intptr_t
count)
const {
687 intptr_t cpu_regs_used = 0;
689 intptr_t fpu_reg_parts_used = 0;
690 intptr_t stack_height_in_bytes = 0;
691 const bool has_varargs_;
700 intptr_t var_args_index) {
701 intptr_t num_arguments = arg_reps.
length();
706 const bool has_varargs =
709#if !defined(TARGET_ARCH_ARM64)
712 const auto& pointer_location =
714 const auto& pointer_location_allocated =
716 ASSERT(pointer_location.Equals(pointer_location_allocated));
720 for (intptr_t
i = 0;
i < num_arguments;
i++) {
722 const bool is_first_vararg = has_varargs &&
i == var_args_index;
723 const bool is_vararg = has_varargs &&
i >= var_args_index;
730#if !defined(TARGET_ARCH_IA32)
736 zone, pointer_type, pointer_type,
739 zone, pointer_type, pointer_type,
742 pointer_location, pointer_return_location, payload_type);
746#if defined(TARGET_ARCH_IA32)
750 const NativeCompoundType& payload_type) {
751 const auto& pointer_type = *
new (zone) NativePrimitiveType(
kAddress);
752 const auto& pointer_location = *
new (zone) NativeStackLocation(
754 const auto& pointer_return_location = *
new (zone) NativeRegistersLocation(
755 zone, pointer_type, pointer_type,
757 return *
new (zone) PointerToMemoryLocation(
758 pointer_location, pointer_return_location, payload_type);
762#if defined(TARGET_ARCH_X64) && !defined(DART_TARGET_OS_WINDOWS)
763static const NativeLocation& CompoundResultLocation(
765 const NativeCompoundType& payload_type,
767 const intptr_t
size = payload_type.SizeInBytes();
768 if (size <= 16 && size > 0 && !payload_type.ContainsUnalignedMembers()) {
773 intptr_t used_regs = 0;
774 intptr_t used_xmm_regs = 0;
776 const auto& double_type = *
new (zone) NativePrimitiveType(
kDouble);
778 const bool first_half_in_xmm = payload_type.ContainsOnlyFloats(
780 if (first_half_in_xmm) {
781 multiple_locations.Add(
new (zone) NativeFpuRegistersLocation(
786 const auto& payload_type = *
new (zone) NativePrimitiveType(
788 const auto& container_type = *
new (zone) NativePrimitiveType(
790 multiple_locations.Add(
new (zone) NativeRegistersLocation(
795 const bool second_half_in_xmm = payload_type.ContainsOnlyFloats(
797 if (second_half_in_xmm) {
801 multiple_locations.Add(
new (zone) NativeFpuRegistersLocation(
808 const auto& payload_type = *
new (zone) NativePrimitiveType(
810 const auto& container_type = *
new (zone) NativePrimitiveType(
812 multiple_locations.Add(
new (zone) NativeRegistersLocation(
813 zone, payload_type, container_type, reg));
818 MultipleNativeLocations(payload_type, multiple_locations);
824#if defined(TARGET_ARCH_X64) && defined(DART_TARGET_OS_WINDOWS)
827static const NativeLocation& CompoundResultLocation(
829 const NativeCompoundType& payload_type,
831 const intptr_t
size = payload_type.SizeInBytes();
836 multiple_locations.Add(
new (zone) NativeRegistersLocation(
839 MultipleNativeLocations(payload_type, multiple_locations);
845#if defined(TARGET_ARCH_IA32) && !defined(DART_TARGET_OS_WINDOWS)
846static const NativeLocation& CompoundResultLocation(
848 const NativeCompoundType& payload_type,
854#if defined(TARGET_ARCH_IA32) && defined(DART_TARGET_OS_WINDOWS)
856static const NativeLocation& CompoundResultLocation(
858 const NativeCompoundType& payload_type,
860 const intptr_t
size = payload_type.SizeInBytes();
864 const auto&
type = *
new (zone) NativePrimitiveType(
kUint32);
865 multiple_locations.Add(
new (zone) NativeRegistersLocation(
868 multiple_locations.Add(
new (zone) NativeRegistersLocation(
872 MultipleNativeLocations(payload_type, multiple_locations);
878#if defined(TARGET_ARCH_ARM)
882static const NativeLocation& CompoundResultLocation(
884 const NativeCompoundType& payload_type,
886 const intptr_t num_members = payload_type.NumPrimitiveMembersRecursive();
887 if (payload_type.ContainsHomogeneousFloats() &&
888 !
SoftFpAbi(has_varargs,
true) && num_members <= 4) {
891 for (
int i = 0;
i < num_members;
i++) {
892 const auto& member = payload_type.FirstPrimitiveMember();
893 multiple_locations.Add(
new (zone) NativeFpuRegistersLocation(
897 MultipleNativeLocations(payload_type, multiple_locations);
899 const intptr_t
size = payload_type.SizeInBytes();
902 const auto&
type = *
new (zone) NativePrimitiveType(
kUint32);
903 multiple_locations.Add(
new (zone)
904 NativeRegistersLocation(zone,
type,
type,
R0));
906 MultipleNativeLocations(payload_type, multiple_locations);
912#if defined(TARGET_ARCH_ARM64)
915static const NativeLocation& CompoundResultLocation(
917 const NativeCompoundType& payload_type,
919 ArgumentAllocator frame_state(zone, has_varargs);
920 const auto& location_as_argument =
921 frame_state.AllocateArgumentVariadic(payload_type);
922 if (!location_as_argument.IsStack() &&
923 !location_as_argument.IsPointerToMemory()) {
924 return location_as_argument;
930#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
931static const NativeLocation& CompoundResultLocation(
933 const NativeCompoundType& payload_type,
937 ArgumentAllocator frame_state(zone, has_varargs);
938 const auto& location_as_argument =
939 frame_state.AllocateArgumentVariadic(payload_type);
940 if (!location_as_argument.IsStack() &&
941 !location_as_argument.IsPointerToMemory()) {
942 return location_as_argument;
952 const auto& payload_type_converted =
954 const auto& container_type = payload_type_converted.Extend(
957 if (container_type.IsFloat()) {
962 if (container_type.IsInt() || container_type.IsVoid()) {
976 const auto& compound_type = payload_type.
AsCompound();
977 return CompoundResultLocation(zone, compound_type, has_varargs);
997 const intptr_t num_arguments = argument_locations_.
length();
998 intptr_t max_height_in_bytes = 0;
999 for (intptr_t
i = 0;
i < num_arguments;
i++) {
1005 max_height_in_bytes =
1012 bool multi_line)
const {
1016 for (intptr_t
i = 0;
i < argument_locations_.
length();
i++) {
1024 argument_locations_[
i]->PrintTo(
f);
1027 f->AddString(
"\n=>\n");
1029 f->AddString(
") => ");
1038 bool multi_line)
const {
1040 PrintTo(&textBuffer, multi_line);
1041 return textBuffer.
buffer();
1044#if !defined(FFI_UNIT_TESTS)
#define RELEASE_ASSERT(cond)
static constexpr ExtensionStrategy kArgumentStackExtension
static constexpr Register kSecondReturnReg
static constexpr AlignmentStrategy kArgumentRegisterAlignmentVarArgs
static const FpuRegister FpuArgumentRegisters[]
static constexpr FpuRegister kSecondReturnFpuReg
static const Register ArgumentRegisters[]
static constexpr bool kArgumentIntRegXorFpuReg
static constexpr intptr_t kNumDFpuArgRegs
static constexpr intptr_t kNumSFpuArgRegs
static constexpr FpuRegister kReturnFpuReg
static constexpr intptr_t kNumFpuArgRegs
static constexpr Register kPointerToReturnStructRegisterCall
static constexpr Register kPointerToReturnStructRegisterReturn
static constexpr AlignmentStrategy kArgumentRegisterAlignment
static constexpr ExtensionStrategy kReturnRegisterExtension
static constexpr Register kStackPointerRegister
static constexpr Register kReturnReg
static constexpr ExtensionStrategy kArgumentRegisterExtension
static constexpr intptr_t kNumArgRegs
static bool hardfp_supported()
static Thread * Current()
static constexpr T Maximum(T x, T y)
static T Minimum(T x, T y)
static constexpr T RoundUp(T x, uintptr_t alignment, uintptr_t offset=0)
static constexpr bool IsPowerOfTwo(T x)
ArgumentAllocator(Zone *zone, bool has_varargs)
const NativeLocation & AllocateArgumentVariadic(const NativeType &payload_type, bool is_first_vararg=false, bool is_vararg=false)
const char * ToCString(Zone *zone, bool multi_line=false) const
static const NativeCallingConvention & FromSignature(Zone *zone, const NativeFunctionType &signature)
const NativeLocation & return_location() const
const NativeLocations & argument_locations() const
intptr_t StackTopInBytes() const
void PrintTo(BaseTextBuffer *f, bool multi_line=false) const
bool contains_varargs() const
const NativeTypes & argument_types() const
const NativeType & return_type() const
static constexpr intptr_t kNoVariadicArguments
intptr_t variadic_arguments_index() const
virtual bool IsPointerToMemory() const
virtual void PrintTo(BaseTextBuffer *f) const
const PointerToMemoryLocation & AsPointerToMemory() const
const NativeCompoundType & AsCompound() const
const NativeLocation & pointer_location() const
static Range StartAndEnd(intptr_t start_inclusive, intptr_t end_exclusive)
PrimitiveType PrimitiveTypeFromSizeInBytes(intptr_t size)
intptr_t SizeFromFpuRegisterKind(enum FpuRegisterKind kind)
const intptr_t kNoFpuRegister
static PrimitiveType TypeForSize(intptr_t size)
static const NativeType & ConvertFloatToInt(Zone *zone, const NativeType &type)
enum FpuRegisterKind FpuRegisterKindFromSize(intptr_t size_in_bytes)
static bool SoftFpAbi(bool has_varargs, bool is_result)
static const NativeType & ConvertIfSoftFp(Zone *zone, const NativeType &type, bool has_varargs, bool is_result=false)
static const NativeLocation & PointerToMemoryResultLocation(Zone *zone, const NativeCompoundType &payload_type)
static const NativeLocation & ResultLocation(Zone *zone, const NativeType &payload_type, bool has_varargs)
constexpr PrimitiveType kAddress
ZoneGrowableArray< const NativeLocation * > NativeLocations
static NativeLocations & ArgumentLocations(Zone *zone, const ZoneGrowableArray< const NativeType * > &arg_reps, const NativeLocation &return_location, intptr_t var_args_index)
static constexpr intptr_t kWordSize
static intptr_t chunk_size(intptr_t bytes_left)
@ kAlignedToWordSizeAndValueSize
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size