101 : has_varargs_(has_varargs), zone_(zone) {}
104 bool is_first_vararg =
false,
105 bool is_vararg =
false) {
106#if defined(TARGET_ARCH_ARM64) && \
107 (defined(DART_TARGET_OS_MACOS_IOS) || defined(DART_TARGET_OS_MACOS))
108 if (is_first_vararg) {
110 BlockAllFpuRegisters();
114#if defined(TARGET_ARCH_RISCV64) || defined(TARGET_ARCH_RISCV32)
115 if (is_first_vararg) {
117 BlockAllFpuRegisters();
120 const auto&
result = AllocateArgument(payload_type, is_vararg);
121#if defined(TARGET_ARCH_X64) && defined(DART_TARGET_OS_WINDOWS)
123 if (
result.IsRegisters()) {
126 }
else if (
result.IsFpuRegisters()) {
129 const auto& fpu_reg_location =
result.AsFpuRegisters();
131 ASSERT(fpu_reg_location.fpu_reg_kind() == kind);
132 FpuRegister fpu_register = fpu_reg_location.fpu_reg();
133 const intptr_t reg_index = fpu_register;
134 ASSERT(cpu_regs_used == reg_index + 1);
139 zone_, payload_type, container_type, cpu_register);
151 bool is_vararg =
false) {
152 const auto& payload_type_converted =
154 if (payload_type_converted.IsFloat()) {
155 return AllocateFloat(payload_type, is_vararg);
157 if (payload_type_converted.IsInt()) {
158 return AllocateInt(payload_type, is_vararg);
167 const NativeCompoundType& compound_type = payload_type.
AsCompound();
168 return AllocateCompound(compound_type, is_vararg,
false);
171 const NativeLocation& AllocateFloat(
const NativeType& payload_type,
173 const auto kind = FpuRegKind(payload_type);
174 const intptr_t reg_index = FirstFreeFpuRegisterIndex(kind);
176 AllocateFpuRegisterAtIndex(kind, reg_index);
180#if defined(TARGET_ARCH_ARM)
183 NativeFpuRegistersLocation(payload_type, payload_type, kind,
188 NativeFpuRegistersLocation(payload_type, payload_type, kind,
195 NativeFpuRegistersLocation(payload_type, payload_type, reg);
198#if defined(TARGET_ARCH_RISCV64)
200 if (HasAvailableCpuRegisters(1)) {
201 const Register reg = AllocateCpuRegister();
204 NativeRegistersLocation(zone_, payload_type, container_type, reg);
206#elif defined(TARGET_ARCH_RISCV32)
208 if (((payload_type.SizeInBytes() == 4) && HasAvailableCpuRegisters(1)) ||
209 ((payload_type.SizeInBytes() == 8) && HasAvailableCpuRegisters(2))) {
211 return AllocateInt(payload_type, container_type, is_vararg);
215 BlockAllFpuRegisters();
219 return AllocateStack(payload_type);
222 const NativeLocation& AllocateInt(
const NativeType& payload_type,
223 const NativeType& container_type,
225 if (target::kWordSize == 4 && payload_type.SizeInBytes() == 8) {
230 cpu_regs_used += cpu_regs_used % 2;
233 const Register register_1 = AllocateCpuRegister();
234 const Register register_2 = AllocateCpuRegister();
235 return *
new (zone_) NativeRegistersLocation(
236 zone_, payload_type, container_type, register_1, register_2);
239 ASSERT(payload_type.SizeInBytes() <= target::kWordSize);
241 return *
new (zone_) NativeRegistersLocation(
242 zone_, payload_type, container_type, AllocateCpuRegister());
245 return AllocateStack(payload_type, is_vararg);
249 const NativeLocation& AllocateInt(
const NativeType& payload_type,
251 const auto& payload_type_converted =
256 const auto& container_type = payload_type_converted.Extend(
259 return AllocateInt(payload_type, container_type, is_vararg);
262#if defined(TARGET_ARCH_X64) && !defined(DART_TARGET_OS_WINDOWS)
265 const NativeLocation& AllocateCompound(
const NativeCompoundType& payload_type,
268 const intptr_t
size = payload_type.SizeInBytes();
269 if (size <= 16 && size > 0 && !payload_type.ContainsUnalignedMembers()) {
270 intptr_t required_regs =
271 payload_type.NumberOfWordSizeChunksNotOnlyFloat();
272 intptr_t required_xmm_regs =
273 payload_type.NumberOfWordSizeChunksOnlyFloat();
274 const bool regs_available =
276 const bool fpu_regs_available =
278 FirstFreeFpuRegisterIndex(
kQuadFpuReg) + required_xmm_regs <=
280 if (regs_available && fpu_regs_available) {
283 zone_, required_regs + required_xmm_regs);
285 offset += compiler::target::kWordSize) {
288 const intptr_t reg_index = FirstFreeFpuRegisterIndex(
kQuadFpuReg);
289 AllocateFpuRegisterAtIndex(
kQuadFpuReg, reg_index);
290 const auto&
type = *
new (zone_) NativePrimitiveType(
kDouble);
291 multiple_locations.Add(
new (zone_) NativeFpuRegistersLocation(
294 const auto& payload_type =
296 size -
offset, compiler::target::kWordSize)));
297 const auto& container_type = *
new (zone_) NativePrimitiveType(
299 multiple_locations.Add(
new (zone_) NativeRegistersLocation(
300 zone_, payload_type, container_type, AllocateCpuRegister()));
304 MultipleNativeLocations(payload_type, multiple_locations);
307 return AllocateStack(payload_type);
311#if defined(TARGET_ARCH_X64) && defined(DART_TARGET_OS_WINDOWS)
315 const NativeLocation& AllocateCompound(
const NativeCompoundType& payload_type,
318 const NativeCompoundType& compound_type = payload_type.AsCompound();
319 const intptr_t
size = compound_type.SizeInBytes();
324 const auto&
type = *
new (zone_) NativePrimitiveType(
326 multiple_locations.Add(
new (zone_) NativeRegistersLocation(
327 zone_,
type,
type, AllocateCpuRegister()));
329 MultipleNativeLocations(compound_type, multiple_locations);
332 }
else if (size > 0) {
334 const auto& pointer_type = *
new (zone_) NativePrimitiveType(
kAddress);
335 const auto& pointer_location = AllocateArgument(pointer_type);
337 PointerToMemoryLocation(pointer_location, compound_type);
340 return AllocateStack(payload_type);
344#if defined(TARGET_ARCH_IA32)
345 const NativeLocation& AllocateCompound(
const NativeCompoundType& payload_type,
348 return AllocateStack(payload_type);
352#if defined(TARGET_ARCH_ARM)
355 const NativeLocation& AllocateCompound(
const NativeCompoundType& payload_type,
358 const auto& compound_type = payload_type.AsCompound();
359 if (compound_type.ContainsHomogeneousFloats() &&
361 compound_type.NumPrimitiveMembersRecursive() <= 4) {
362 const auto& elem_type = compound_type.FirstPrimitiveMember();
363 const intptr_t
size = compound_type.SizeInBytes();
364 const intptr_t elem_size = elem_type.SizeInBytes();
366 ASSERT(size % elem_size == 0);
367 const intptr_t num_registers =
size / elem_size;
368 const intptr_t first_reg =
369 FirstFreeFpuRegisterIndex(reg_kind, num_registers);
371 AllocateFpuRegisterAtIndex(reg_kind, first_reg, num_registers);
375 for (
int i = 0; i < num_registers; i++) {
376 const intptr_t reg_index = first_reg + i;
377 multiple_locations.Add(
new (zone_) NativeFpuRegistersLocation(
378 elem_type, elem_type, reg_kind, reg_index));
381 MultipleNativeLocations(compound_type, multiple_locations);
384 BlockAllFpuRegisters();
385 return AllocateStack(payload_type);
387 }
else if (payload_type.AlignmentInBytesStack() == 8) {
388 const intptr_t chunk_size = payload_type.AlignmentInBytesStack();
389 ASSERT(chunk_size == 4 || chunk_size == 8);
390 const intptr_t size_rounded =
392 const intptr_t num_chunks = size_rounded / chunk_size;
393 const auto& chuck_type =
394 *
new (zone_) NativePrimitiveType(chunk_size == 4 ?
kInt32 :
kInt64);
398 for (
int i = 0; i < num_chunks; i++) {
399 const auto& allocated_chunk = &AllocateArgument(chuck_type);
402 if (i == num_chunks - 1 && chunk_size == 8 &&
404 const auto& small_chuck_type = *
new (zone_) NativePrimitiveType(
406 multiple_locations.Add(&allocated_chunk->WithOtherNativeType(
407 zone_, small_chuck_type, small_chuck_type));
409 multiple_locations.Add(allocated_chunk);
413 MultipleNativeLocations(compound_type, multiple_locations);
415 return AllocateCompoundAsMultiple(compound_type);
420#if defined(TARGET_ARCH_ARM64)
424 const NativeLocation& AllocateCompound(
const NativeCompoundType& payload_type,
427 const auto& compound_type = payload_type.AsCompound();
428 const intptr_t
size = compound_type.SizeInBytes();
429 if (compound_type.ContainsHomogeneousFloats() &&
431 compound_type.NumPrimitiveMembersRecursive() <= 4) {
432 const auto& elem_type = compound_type.FirstPrimitiveMember();
433 const intptr_t elem_size = elem_type.SizeInBytes();
435 ASSERT(size % elem_size == 0);
436 const intptr_t num_registers =
size / elem_size;
437 const intptr_t first_reg =
438 FirstFreeFpuRegisterIndex(reg_kind, num_registers);
440 AllocateFpuRegisterAtIndex(reg_kind, first_reg, num_registers);
444 for (
int i = 0; i < num_registers; i++) {
445 const intptr_t reg_index = first_reg + i;
446 multiple_locations.Add(
new (zone_) NativeFpuRegistersLocation(
447 elem_type, elem_type, reg_kind, reg_index));
450 MultipleNativeLocations(compound_type, multiple_locations);
452 BlockAllFpuRegisters();
453 return AllocateStack(payload_type, is_vararg);
458 const intptr_t num_chunks = size_rounded / 8;
459 ASSERT((num_chunks == 1) || (num_chunks == 2));
462#if defined(DART_TARGET_OS_WINDOWS)
463 if (!HasAvailableCpuRegisters(num_chunks) && !is_vararg) {
467 if (!HasAvailableCpuRegisters(num_chunks)) {
472 return AllocateCompoundAsMultiple(payload_type);
475 const auto& pointer_location =
476 AllocateArgument(*
new (zone_) NativePrimitiveType(
kInt64));
478 PointerToMemoryLocation(pointer_location, compound_type);
482#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
485 const NativeLocation& AllocateCompound(
const NativeCompoundType& payload_type,
488 const auto& compound_type = payload_type.AsCompound();
491 const NativePrimitiveType* first =
nullptr;
492 const NativePrimitiveType* second =
nullptr;
493 const intptr_t num_primitive_members =
494 compound_type.PrimitivePairMembers(&first, &second);
497 if ((num_primitive_members == 1) && first->IsFloat()) {
500 multiple_locations.Add(&AllocateArgument(*first));
502 MultipleNativeLocations(compound_type, multiple_locations);
505 if (num_primitive_members == 2) {
506 if (first->IsFloat() && second->IsFloat()) {
509 if (HasAvailableFpuRegisters(2)) {
512 multiple_locations.Add(&AllocateArgument(*first));
513 multiple_locations.Add(&AllocateArgument(*second));
515 MultipleNativeLocations(compound_type, multiple_locations);
517 }
else if (first->IsFloat() || second->IsFloat()) {
521 if (HasAvailableFpuRegisters(1) && HasAvailableCpuRegisters(1)) {
524 multiple_locations.Add(&AllocateArgument(*first));
525 multiple_locations.Add(&AllocateArgument(*second));
527 MultipleNativeLocations(compound_type, multiple_locations);
539 if (compound_type.SizeInBytes() <= 2 * target::kWordSize) {
540 return AllocateCompoundAsMultiple(compound_type);
544 const auto& pointer_type = *
new (zone_) NativePrimitiveType(
kAddress);
545 const auto& pointer_location = AllocateArgument(pointer_type);
547 PointerToMemoryLocation(pointer_location, compound_type);
563 const NativeLocation& AllocateCompoundAsMultiple(
564 const NativeCompoundType& compound_type) {
565 const intptr_t chunk_size = compiler::target::kWordSize;
566 const intptr_t num_chunks =
567 Utils::RoundUp(compound_type.SizeInBytes(), chunk_size) / chunk_size;
568 const NativeType& container_type =
569 *
new (zone_) NativePrimitiveType(
TypeForSize(chunk_size));
572 intptr_t size_remaining = compound_type.SizeInBytes();
573 while (size_remaining > 0) {
574 const auto& chunk = AllocateArgument(container_type);
577 const NativeType& payload_type =
578 *
new (zone_) NativePrimitiveType(
TypeForSize(size));
580 &chunk.WithOtherNativeType(zone_, payload_type, container_type));
581 size_remaining -=
size;
583 return *
new (zone_) MultipleNativeLocations(compound_type, locations);
587#if defined(TARGET_ARCH_ARM)
600 AllocateFpuRegisterAtIndex(
kQuadFpuReg, cpu_regs_used);
606 const NativeLocation& AllocateStack(
const NativeType& payload_type,
607 bool is_vararg =
false) {
608 align_stack(payload_type.AlignmentInBytesStack(is_vararg));
609 const intptr_t
size = payload_type.SizeInBytes();
612 const auto& container_type =
614 const auto&
result = *
new (zone_) NativeStackLocation(
616 stack_height_in_bytes);
617 stack_height_in_bytes +=
size;
618 align_stack(payload_type.AlignmentInBytesStack(is_vararg));
622 void align_stack(intptr_t alignment) {
623 stack_height_in_bytes =
Utils::RoundUp(stack_height_in_bytes, alignment);
627#if defined(TARGET_ARCH_ARM)
628 if (has_varargs_)
return 0;
637 int FirstFreeFpuRegisterIndex(
FpuRegisterKind kind,
int amount = 1)
const {
639 ASSERT(size == 1 || size == 2 || size == 4);
641 const intptr_t mask = (1 << (
size * amount)) - 1;
643 while (index + amount <= NumFpuRegisters(kind)) {
644 const intptr_t mask_shifted = mask << (index *
size);
645 if ((fpu_reg_parts_used & mask_shifted) == 0) {
657 ASSERT(size == 1 || size == 2 || size == 4);
658 const intptr_t mask = (1 <<
size * amount) - 1;
659 const intptr_t mask_shifted = (mask << (index *
size));
660 ASSERT((mask_shifted & fpu_reg_parts_used) == 0);
661 fpu_reg_parts_used |= mask_shifted;
675 void BlockAllFpuRegisters() {
677 fpu_reg_parts_used = -1;
680 bool HasAvailableCpuRegisters(intptr_t
count)
const {
683 bool HasAvailableFpuRegisters(intptr_t
count)
const {
687 intptr_t cpu_regs_used = 0;
689 intptr_t fpu_reg_parts_used = 0;
690 intptr_t stack_height_in_bytes = 0;
691 const bool has_varargs_;