Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
native_calling_convention.cc
Go to the documentation of this file.
1// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6
10
11#if !defined(FFI_UNIT_TESTS)
12#include "vm/cpu.h"
13#endif
14
15namespace dart {
16
17namespace compiler {
18
19namespace ffi {
20
21const intptr_t kNoFpuRegister = -1;
22
23#if !defined(FFI_UNIT_TESTS)
24// In Soft FP and vararg calls, floats and doubles get passed in integer
25// registers.
26static bool SoftFpAbi(bool has_varargs, bool is_result) {
27#if defined(TARGET_ARCH_ARM)
28 if (has_varargs) {
29 return true;
30 }
32#elif defined(TARGET_ARCH_ARM64) && defined(DART_TARGET_OS_WINDOWS)
33 return has_varargs && !is_result;
34#else
35 return false;
36#endif
37}
38#else // !defined(FFI_UNIT_TESTS)
39static bool SoftFpAbi(bool has_varargs, bool is_result) {
40#if defined(TARGET_ARCH_ARM) && defined(DART_TARGET_OS_ANDROID)
41 return true;
42#elif defined(TARGET_ARCH_ARM)
43 return has_varargs;
44#elif defined(TARGET_ARCH_ARM64) && defined(DART_TARGET_OS_WINDOWS)
45 return has_varargs && !is_result;
46#else
47 return false;
48#endif
49}
50#endif // !defined(FFI_UNIT_TESTS)
51
52static const NativeType& ConvertFloatToInt(Zone* zone, const NativeType& type) {
53 ASSERT(type.IsFloat());
54 if (type.SizeInBytes() == 4) {
55 return *new (zone) NativePrimitiveType(kInt32);
56 }
57 ASSERT(type.SizeInBytes() == 8);
58 return *new (zone) NativePrimitiveType(kInt64);
59}
60
61// In Soft FP, floats are treated as 4 byte ints, and doubles as 8 byte ints.
62static const NativeType& ConvertIfSoftFp(Zone* zone,
63 const NativeType& type,
64 bool has_varargs,
65 bool is_result = false) {
66 if (SoftFpAbi(has_varargs, is_result) && type.IsFloat()) {
67 return ConvertFloatToInt(zone, type);
68 }
69 return type;
70}
71
72static PrimitiveType TypeForSize(intptr_t size) {
73 switch (size) {
74 case 8:
75 return kUint64;
76 case 7:
77 return kUint56;
78 case 6:
79 return kUint48;
80 case 5:
81 return kUint40;
82 case 4:
83 return kUint32;
84 case 3:
85 return kUint24;
86 case 2:
87 return kUint16;
88 case 1:
89 return kUint8;
90 default:
92 return kVoid;
93 }
94}
95
96// Represents the state of a stack frame going into a call, between allocations
97// of argument locations.
99 public:
100 explicit ArgumentAllocator(Zone* zone, bool has_varargs)
101 : has_varargs_(has_varargs), zone_(zone) {}
102
104 bool is_first_vararg = false,
105 bool is_vararg = false) {
106#if defined(TARGET_ARCH_ARM64) && \
107 (defined(DART_TARGET_OS_MACOS_IOS) || defined(DART_TARGET_OS_MACOS))
108 if (is_first_vararg) {
109 // Block all registers.
110 BlockAllFpuRegisters();
111 cpu_regs_used = CallingConventions::kNumArgRegs;
112 }
113#endif
114#if defined(TARGET_ARCH_RISCV64) || defined(TARGET_ARCH_RISCV32)
115 if (is_first_vararg) {
116 // Block all FPU registers.
117 BlockAllFpuRegisters();
118 }
119#endif
120 const auto& result = AllocateArgument(payload_type, is_vararg);
121#if defined(TARGET_ARCH_X64) && defined(DART_TARGET_OS_WINDOWS)
122 if (has_varargs_) {
123 if (result.IsRegisters()) {
124 // If an integer register is used, block the corresponding xmm register.
126 } else if (result.IsFpuRegisters()) {
127 // If an xmm register is used, also the corresponding integer register.
129 const auto& fpu_reg_location = result.AsFpuRegisters();
130 const FpuRegisterKind kind = kQuadFpuReg;
131 ASSERT(fpu_reg_location.fpu_reg_kind() == kind);
132 FpuRegister fpu_register = fpu_reg_location.fpu_reg();
133 const intptr_t reg_index = fpu_register;
134 ASSERT(cpu_regs_used == reg_index + 1);
135 Register cpu_register =
137 const auto& container_type = ConvertFloatToInt(zone_, payload_type);
138 const auto& cpu_reg_location = *new (zone_) NativeRegistersLocation(
139 zone_, payload_type, container_type, cpu_register);
140 return *new (zone_)
141 BothNativeLocations(fpu_reg_location, cpu_reg_location);
142 }
143 }
144#endif
145 return result;
146 }
147
148 private:
149 const NativeLocation& AllocateArgument(const NativeType& payload_type,
150
151 bool is_vararg = false) {
152 const auto& payload_type_converted =
153 ConvertIfSoftFp(zone_, payload_type, has_varargs_);
154 if (payload_type_converted.IsFloat()) {
155 return AllocateFloat(payload_type, is_vararg);
156 }
157 if (payload_type_converted.IsInt()) {
158 return AllocateInt(payload_type, is_vararg);
159 }
160
161 // Compounds are laid out differently per ABI, so they are implemented
162 // per ABI.
163 //
164 // Compounds always have a PointerToMemory, Stack, or Multiple location,
165 // even if the parts of a compound fit in 1 cpu or fpu register it will
166 // be nested in a MultipleNativeLocations.
167 const NativeCompoundType& compound_type = payload_type.AsCompound();
168 return AllocateCompound(compound_type, is_vararg, /*is_result*/ false);
169 }
170
171 const NativeLocation& AllocateFloat(const NativeType& payload_type,
172 bool is_vararg) {
173 const auto kind = FpuRegKind(payload_type);
174 const intptr_t reg_index = FirstFreeFpuRegisterIndex(kind);
175 if (reg_index != kNoFpuRegister) {
176 AllocateFpuRegisterAtIndex(kind, reg_index);
178 cpu_regs_used++;
179 }
180#if defined(TARGET_ARCH_ARM)
181 if (kind == kSingleFpuReg) {
182 return *new (zone_)
183 NativeFpuRegistersLocation(payload_type, payload_type, kind,
184 static_cast<SRegister>(reg_index));
185 }
186 if (kind == kDoubleFpuReg) {
187 return *new (zone_)
188 NativeFpuRegistersLocation(payload_type, payload_type, kind,
189 static_cast<DRegister>(reg_index));
190 }
191#endif
192 ASSERT(kind == kQuadFpuReg);
194 return *new (zone_)
195 NativeFpuRegistersLocation(payload_type, payload_type, reg);
196 }
197
198#if defined(TARGET_ARCH_RISCV64)
199 // After using up F registers, start bitcasting to X registers.
200 if (HasAvailableCpuRegisters(1)) {
201 const Register reg = AllocateCpuRegister();
202 const auto& container_type = ConvertFloatToInt(zone_, payload_type);
203 return *new (zone_)
204 NativeRegistersLocation(zone_, payload_type, container_type, reg);
205 }
206#elif defined(TARGET_ARCH_RISCV32)
207 // After using up F registers, start bitcasting to X register (pairs).
208 if (((payload_type.SizeInBytes() == 4) && HasAvailableCpuRegisters(1)) ||
209 ((payload_type.SizeInBytes() == 8) && HasAvailableCpuRegisters(2))) {
210 const auto& container_type = ConvertFloatToInt(zone_, payload_type);
211 return AllocateInt(payload_type, container_type, is_vararg);
212 }
213#endif
214
215 BlockAllFpuRegisters();
217 ASSERT(cpu_regs_used == CallingConventions::kNumArgRegs);
218 }
219 return AllocateStack(payload_type);
220 }
221
222 const NativeLocation& AllocateInt(const NativeType& payload_type,
223 const NativeType& container_type,
224 bool is_vararg) {
225 if (target::kWordSize == 4 && payload_type.SizeInBytes() == 8) {
230 cpu_regs_used += cpu_regs_used % 2;
231 }
232 if (cpu_regs_used + 2 <= CallingConventions::kNumArgRegs) {
233 const Register register_1 = AllocateCpuRegister();
234 const Register register_2 = AllocateCpuRegister();
235 return *new (zone_) NativeRegistersLocation(
236 zone_, payload_type, container_type, register_1, register_2);
237 }
238 } else {
239 ASSERT(payload_type.SizeInBytes() <= target::kWordSize);
240 if (cpu_regs_used + 1 <= CallingConventions::kNumArgRegs) {
241 return *new (zone_) NativeRegistersLocation(
242 zone_, payload_type, container_type, AllocateCpuRegister());
243 }
244 }
245 return AllocateStack(payload_type, is_vararg);
246 }
247
248 // Constructs a container type.
249 const NativeLocation& AllocateInt(const NativeType& payload_type,
250 bool is_vararg) {
251 const auto& payload_type_converted =
252 ConvertIfSoftFp(zone_, payload_type, has_varargs_);
253
254 // Some calling conventions require the callee to make the lowest 32 bits
255 // in registers non-garbage.
256 const auto& container_type = payload_type_converted.Extend(
258
259 return AllocateInt(payload_type, container_type, is_vararg);
260 }
261
262#if defined(TARGET_ARCH_X64) && !defined(DART_TARGET_OS_WINDOWS)
263 // If fits in two fpu and/or cpu registers, transfer in those. Otherwise,
264 // transfer on stack.
265 const NativeLocation& AllocateCompound(const NativeCompoundType& payload_type,
266 bool is_vararg,
267 bool is_result) {
268 const intptr_t size = payload_type.SizeInBytes();
269 if (size <= 16 && size > 0 && !payload_type.ContainsUnalignedMembers()) {
270 intptr_t required_regs =
271 payload_type.NumberOfWordSizeChunksNotOnlyFloat();
272 intptr_t required_xmm_regs =
273 payload_type.NumberOfWordSizeChunksOnlyFloat();
274 const bool regs_available =
275 cpu_regs_used + required_regs <= CallingConventions::kNumArgRegs;
276 const bool fpu_regs_available =
277 FirstFreeFpuRegisterIndex(kQuadFpuReg) != kNoFpuRegister &&
278 FirstFreeFpuRegisterIndex(kQuadFpuReg) + required_xmm_regs <=
280 if (regs_available && fpu_regs_available) {
281 // Transfer in registers.
282 NativeLocations& multiple_locations = *new (zone_) NativeLocations(
283 zone_, required_regs + required_xmm_regs);
284 for (intptr_t offset = 0; offset < size;
285 offset += compiler::target::kWordSize) {
286 if (payload_type.ContainsOnlyFloats(Range::StartAndEnd(
287 offset, Utils::Minimum<intptr_t>(size, offset + 8)))) {
288 const intptr_t reg_index = FirstFreeFpuRegisterIndex(kQuadFpuReg);
289 AllocateFpuRegisterAtIndex(kQuadFpuReg, reg_index);
290 const auto& type = *new (zone_) NativePrimitiveType(kDouble);
291 multiple_locations.Add(new (zone_) NativeFpuRegistersLocation(
292 type, type, kQuadFpuReg, reg_index));
293 } else {
294 const auto& payload_type =
295 *new (zone_) NativePrimitiveType(TypeForSize(Utils::Minimum(
296 size - offset, compiler::target::kWordSize)));
297 const auto& container_type = *new (zone_) NativePrimitiveType(
298 TypeForSize(compiler::target::kWordSize));
299 multiple_locations.Add(new (zone_) NativeRegistersLocation(
300 zone_, payload_type, container_type, AllocateCpuRegister()));
301 }
302 }
303 return *new (zone_)
304 MultipleNativeLocations(payload_type, multiple_locations);
305 }
306 }
307 return AllocateStack(payload_type);
308 }
309#endif // defined(TARGET_ARCH_X64) && !defined(DART_TARGET_OS_WINDOWS)
310
311#if defined(TARGET_ARCH_X64) && defined(DART_TARGET_OS_WINDOWS)
312 // If struct fits in a single register and size is a power of two, then
313 // use a single register and sign extend.
314 // Otherwise, pass a pointer to a copy.
315 const NativeLocation& AllocateCompound(const NativeCompoundType& payload_type,
316 bool is_vararg,
317 bool is_result) {
318 const NativeCompoundType& compound_type = payload_type.AsCompound();
319 const intptr_t size = compound_type.SizeInBytes();
320 if (size <= 8 && Utils::IsPowerOfTwo(size)) {
321 if (cpu_regs_used < CallingConventions::kNumArgRegs) {
322 NativeLocations& multiple_locations =
323 *new (zone_) NativeLocations(zone_, 1);
324 const auto& type = *new (zone_) NativePrimitiveType(
326 multiple_locations.Add(new (zone_) NativeRegistersLocation(
327 zone_, type, type, AllocateCpuRegister()));
328 return *new (zone_)
329 MultipleNativeLocations(compound_type, multiple_locations);
330 }
331
332 } else if (size > 0) {
333 // Pointer in register if available, else pointer on stack.
334 const auto& pointer_type = *new (zone_) NativePrimitiveType(kAddress);
335 const auto& pointer_location = AllocateArgument(pointer_type);
336 return *new (zone_)
337 PointerToMemoryLocation(pointer_location, compound_type);
338 }
339
340 return AllocateStack(payload_type);
341 }
342#endif // defined(TARGET_ARCH_X64) && defined(DART_TARGET_OS_WINDOWS)
343
344#if defined(TARGET_ARCH_IA32)
345 const NativeLocation& AllocateCompound(const NativeCompoundType& payload_type,
346 bool is_vararg,
347 bool is_result) {
348 return AllocateStack(payload_type);
349 }
350#endif // defined(TARGET_ARCH_IA32)
351
352#if defined(TARGET_ARCH_ARM)
353 // Transfer homogeneous floats in FPU registers, and allocate the rest
354 // in 4 or 8 size chunks in registers and stack.
355 const NativeLocation& AllocateCompound(const NativeCompoundType& payload_type,
356 bool is_vararg,
357 bool is_result) {
358 const auto& compound_type = payload_type.AsCompound();
359 if (compound_type.ContainsHomogeneousFloats() &&
360 !SoftFpAbi(has_varargs_, is_result) &&
361 compound_type.NumPrimitiveMembersRecursive() <= 4) {
362 const auto& elem_type = compound_type.FirstPrimitiveMember();
363 const intptr_t size = compound_type.SizeInBytes();
364 const intptr_t elem_size = elem_type.SizeInBytes();
365 const auto reg_kind = FpuRegisterKindFromSize(elem_size);
366 ASSERT(size % elem_size == 0);
367 const intptr_t num_registers = size / elem_size;
368 const intptr_t first_reg =
369 FirstFreeFpuRegisterIndex(reg_kind, num_registers);
370 if (first_reg != kNoFpuRegister) {
371 AllocateFpuRegisterAtIndex(reg_kind, first_reg, num_registers);
372
373 NativeLocations& multiple_locations =
374 *new (zone_) NativeLocations(zone_, num_registers);
375 for (int i = 0; i < num_registers; i++) {
376 const intptr_t reg_index = first_reg + i;
377 multiple_locations.Add(new (zone_) NativeFpuRegistersLocation(
378 elem_type, elem_type, reg_kind, reg_index));
379 }
380 return *new (zone_)
381 MultipleNativeLocations(compound_type, multiple_locations);
382
383 } else {
384 BlockAllFpuRegisters();
385 return AllocateStack(payload_type);
386 }
387 } else if (payload_type.AlignmentInBytesStack() == 8) {
388 const intptr_t chunk_size = payload_type.AlignmentInBytesStack();
389 ASSERT(chunk_size == 4 || chunk_size == 8);
390 const intptr_t size_rounded =
391 Utils::RoundUp(payload_type.SizeInBytes(), chunk_size);
392 const intptr_t num_chunks = size_rounded / chunk_size;
393 const auto& chuck_type =
394 *new (zone_) NativePrimitiveType(chunk_size == 4 ? kInt32 : kInt64);
395
396 NativeLocations& multiple_locations =
397 *new (zone_) NativeLocations(zone_, num_chunks);
398 for (int i = 0; i < num_chunks; i++) {
399 const auto& allocated_chunk = &AllocateArgument(chuck_type);
400 // The last chunk should not be 8 bytes, if the struct only has 4
401 // remaining bytes to be allocated.
402 if (i == num_chunks - 1 && chunk_size == 8 &&
403 Utils::RoundUp(payload_type.SizeInBytes(), 4) % 8 == 4) {
404 const auto& small_chuck_type = *new (zone_) NativePrimitiveType(
405 chunk_size == 4 ? kInt32 : kInt64);
406 multiple_locations.Add(&allocated_chunk->WithOtherNativeType(
407 zone_, small_chuck_type, small_chuck_type));
408 } else {
409 multiple_locations.Add(allocated_chunk);
410 }
411 }
412 return *new (zone_)
413 MultipleNativeLocations(compound_type, multiple_locations);
414 } else {
415 return AllocateCompoundAsMultiple(compound_type);
416 }
417 }
418#endif // defined(TARGET_ARCH_ARM)
419
420#if defined(TARGET_ARCH_ARM64)
421 // Slightly different from Arm32. FPU registers don't alias the same way,
422 // structs up to 16 bytes block remaining registers if they do not fit in
423 // registers, and larger structs go on stack always.
424 const NativeLocation& AllocateCompound(const NativeCompoundType& payload_type,
425 bool is_vararg,
426 bool is_result) {
427 const auto& compound_type = payload_type.AsCompound();
428 const intptr_t size = compound_type.SizeInBytes();
429 if (compound_type.ContainsHomogeneousFloats() &&
430 !SoftFpAbi(has_varargs_, is_result) &&
431 compound_type.NumPrimitiveMembersRecursive() <= 4) {
432 const auto& elem_type = compound_type.FirstPrimitiveMember();
433 const intptr_t elem_size = elem_type.SizeInBytes();
434 const auto reg_kind = kQuadFpuReg;
435 ASSERT(size % elem_size == 0);
436 const intptr_t num_registers = size / elem_size;
437 const intptr_t first_reg =
438 FirstFreeFpuRegisterIndex(reg_kind, num_registers);
439 if (first_reg != kNoFpuRegister) {
440 AllocateFpuRegisterAtIndex(reg_kind, first_reg, num_registers);
441
442 NativeLocations& multiple_locations =
443 *new (zone_) NativeLocations(zone_, num_registers);
444 for (int i = 0; i < num_registers; i++) {
445 const intptr_t reg_index = first_reg + i;
446 multiple_locations.Add(new (zone_) NativeFpuRegistersLocation(
447 elem_type, elem_type, reg_kind, reg_index));
448 }
449 return *new (zone_)
450 MultipleNativeLocations(compound_type, multiple_locations);
451 }
452 BlockAllFpuRegisters();
453 return AllocateStack(payload_type, is_vararg);
454 }
455
456 if (size <= 16) {
457 const intptr_t size_rounded = Utils::RoundUp(size, 8);
458 const intptr_t num_chunks = size_rounded / 8;
459 ASSERT((num_chunks == 1) || (num_chunks == 2));
460
461 // All-or-none: block any leftover registers.
462#if defined(DART_TARGET_OS_WINDOWS)
463 if (!HasAvailableCpuRegisters(num_chunks) && !is_vararg) {
464 cpu_regs_used = CallingConventions::kNumArgRegs;
465 }
466#else
467 if (!HasAvailableCpuRegisters(num_chunks)) {
468 cpu_regs_used = CallingConventions::kNumArgRegs;
469 }
470#endif
471
472 return AllocateCompoundAsMultiple(payload_type);
473 }
474
475 const auto& pointer_location =
476 AllocateArgument(*new (zone_) NativePrimitiveType(kInt64));
477 return *new (zone_)
478 PointerToMemoryLocation(pointer_location, compound_type);
479 }
480#endif // defined(TARGET_ARCH_ARM64)
481
482#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
483 // See RISC-V ABIs Specification
484 // https://github.com/riscv-non-isa/riscv-elf-psabi-doc/releases
485 const NativeLocation& AllocateCompound(const NativeCompoundType& payload_type,
486 bool is_vararg,
487 bool is_result) {
488 const auto& compound_type = payload_type.AsCompound();
489
490 // 2.2. Hardware Floating-point Calling Convention.
491 const NativePrimitiveType* first = nullptr;
492 const NativePrimitiveType* second = nullptr;
493 const intptr_t num_primitive_members =
494 compound_type.PrimitivePairMembers(&first, &second);
495
496 // If exactly one floating-point member, pass like a scalar.
497 if ((num_primitive_members == 1) && first->IsFloat()) {
498 NativeLocations& multiple_locations =
499 *new (zone_) NativeLocations(zone_, 1);
500 multiple_locations.Add(&AllocateArgument(*first));
501 return *new (zone_)
502 MultipleNativeLocations(compound_type, multiple_locations);
503 }
504
505 if (num_primitive_members == 2) {
506 if (first->IsFloat() && second->IsFloat()) {
507 // If exactly two floating-point members, pass like two scalars if two F
508 // registers are available.
509 if (HasAvailableFpuRegisters(2)) {
510 NativeLocations& multiple_locations =
511 *new (zone_) NativeLocations(zone_, 2);
512 multiple_locations.Add(&AllocateArgument(*first));
513 multiple_locations.Add(&AllocateArgument(*second));
514 return *new (zone_)
515 MultipleNativeLocations(compound_type, multiple_locations);
516 }
517 } else if (first->IsFloat() || second->IsFloat()) {
518 // If exactly two members, one is integer and one is float in either
519 // order, pass like two scalars if both an X and F register are
520 // available.
521 if (HasAvailableFpuRegisters(1) && HasAvailableCpuRegisters(1)) {
522 NativeLocations& multiple_locations =
523 *new (zone_) NativeLocations(zone_, 2);
524 multiple_locations.Add(&AllocateArgument(*first));
525 multiple_locations.Add(&AllocateArgument(*second));
526 return *new (zone_)
527 MultipleNativeLocations(compound_type, multiple_locations);
528 }
529 }
530 }
531
532 // 2.1. Integer Calling Convention.
533 // If total size is <= XLEN, passed like an XLEN scalar: use a register if
534 // available or pass by value on the stack.
535 // If total size is <= 2*XLEN, passed like two XLEN scalars: use registers
536 // if available or pass by value on the stack. If only one register is
537 // available, pass the low part by register and the high part on the
538 // stack.
539 if (compound_type.SizeInBytes() <= 2 * target::kWordSize) {
540 return AllocateCompoundAsMultiple(compound_type);
541 }
542
543 // Otherwise, passed by reference.
544 const auto& pointer_type = *new (zone_) NativePrimitiveType(kAddress);
545 const auto& pointer_location = AllocateArgument(pointer_type);
546 return *new (zone_)
547 PointerToMemoryLocation(pointer_location, compound_type);
548 }
549#endif
550
551 // Allocate in word-sized chunks, with the container as a full word-sized
552 // register or stack slot and the payload constrained to the struct's size.
553 //
554 // Note this describes the location at call. Consumes of this location, such
555 // as FfiCallConvertCompoundArgumentToNative or EmitReturnMoves, often assume
556 // the location of the source compound in the heap corresponds to this
557 // location with just a change in base register. This is often true, except
558 // some ABIs assume zero extension of the last chunk, so the stack location at
559 // call is bigger than the location in the heap. Here we set the container
560 // size to reflect that zero-extended stack slot and rely on loads during
561 // moves opting to use the payload size instead of the container size to stay
562 // in-bounds.
563 const NativeLocation& AllocateCompoundAsMultiple(
564 const NativeCompoundType& compound_type) {
565 const intptr_t chunk_size = compiler::target::kWordSize;
566 const intptr_t num_chunks =
567 Utils::RoundUp(compound_type.SizeInBytes(), chunk_size) / chunk_size;
568 const NativeType& container_type =
569 *new (zone_) NativePrimitiveType(TypeForSize(chunk_size));
570 NativeLocations& locations =
571 *new (zone_) NativeLocations(zone_, num_chunks);
572 intptr_t size_remaining = compound_type.SizeInBytes();
573 while (size_remaining > 0) {
574 const auto& chunk = AllocateArgument(container_type);
575
576 const intptr_t size = Utils::Minimum(size_remaining, chunk_size);
577 const NativeType& payload_type =
578 *new (zone_) NativePrimitiveType(TypeForSize(size));
579 locations.Add(
580 &chunk.WithOtherNativeType(zone_, payload_type, container_type));
581 size_remaining -= size;
582 }
583 return *new (zone_) MultipleNativeLocations(compound_type, locations);
584 }
585
586 static FpuRegisterKind FpuRegKind(const NativeType& payload_type) {
587#if defined(TARGET_ARCH_ARM)
588 return FpuRegisterKindFromSize(payload_type.SizeInBytes());
589#else
590 return kQuadFpuReg;
591#endif
592 }
593
594 Register AllocateCpuRegister() {
595 RELEASE_ASSERT(cpu_regs_used >= 0); // Avoids -Werror=array-bounds in GCC.
597
598 const auto result = CallingConventions::ArgumentRegisters[cpu_regs_used];
600 AllocateFpuRegisterAtIndex(kQuadFpuReg, cpu_regs_used);
601 }
602 cpu_regs_used++;
603 return result;
604 }
605
606 const NativeLocation& AllocateStack(const NativeType& payload_type,
607 bool is_vararg = false) {
608 align_stack(payload_type.AlignmentInBytesStack(is_vararg));
609 const intptr_t size = payload_type.SizeInBytes();
610 // If the stack arguments are not packed, the 32 lowest bits should not
611 // contain garbage.
612 const auto& container_type =
613 payload_type.Extend(zone_, CallingConventions::kArgumentStackExtension);
614 const auto& result = *new (zone_) NativeStackLocation(
615 payload_type, container_type, CallingConventions::kStackPointerRegister,
616 stack_height_in_bytes);
617 stack_height_in_bytes += size;
618 align_stack(payload_type.AlignmentInBytesStack(is_vararg));
619 return result;
620 }
621
622 void align_stack(intptr_t alignment) {
623 stack_height_in_bytes = Utils::RoundUp(stack_height_in_bytes, alignment);
624 }
625
626 int NumFpuRegisters(FpuRegisterKind kind) const {
627#if defined(TARGET_ARCH_ARM)
628 if (has_varargs_) return 0;
631#endif // defined(TARGET_ARCH_ARM)
633 UNREACHABLE();
634 }
635
636 // If no register is free, returns -1.
637 int FirstFreeFpuRegisterIndex(FpuRegisterKind kind, int amount = 1) const {
638 const intptr_t size = SizeFromFpuRegisterKind(kind) / 4;
639 ASSERT(size == 1 || size == 2 || size == 4);
640 if (fpu_reg_parts_used == -1) return kNoFpuRegister;
641 const intptr_t mask = (1 << (size * amount)) - 1;
642 intptr_t index = 0;
643 while (index + amount <= NumFpuRegisters(kind)) {
644 const intptr_t mask_shifted = mask << (index * size);
645 if ((fpu_reg_parts_used & mask_shifted) == 0) {
646 return index;
647 }
648 index++;
649 }
650 return kNoFpuRegister;
651 }
652
653 void AllocateFpuRegisterAtIndex(FpuRegisterKind kind,
654 int index,
655 int amount = 1) {
656 const intptr_t size = SizeFromFpuRegisterKind(kind) / 4;
657 ASSERT(size == 1 || size == 2 || size == 4);
658 const intptr_t mask = (1 << size * amount) - 1;
659 const intptr_t mask_shifted = (mask << (index * size));
660 ASSERT((mask_shifted & fpu_reg_parts_used) == 0);
661 fpu_reg_parts_used |= mask_shifted;
662 }
663
664 // > The back-filling continues only so long as no VFP CPRC has been
665 // > allocated to a slot on the stack.
666 // Procedure Call Standard for the Arm Architecture, Release 2019Q1.1
667 // Chapter 7.1 page 28. https://developer.arm.com/docs/ihi0042/h
668 //
669 // Irrelevant on Android and iOS, as those are both SoftFP.
670 // > For floating-point arguments, the Base Standard variant of the
671 // > Procedure Call Standard is used. In this variant, floating-point
672 // > (and vector) arguments are passed in general purpose registers
673 // > (GPRs) instead of in VFP registers)
674 // https://developer.apple.com/library/archive/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARMv7FunctionCallingConventions.html#//apple_ref/doc/uid/TP40009022-SW1
675 void BlockAllFpuRegisters() {
676 // Set all bits to 1.
677 fpu_reg_parts_used = -1;
678 }
679
680 bool HasAvailableCpuRegisters(intptr_t count) const {
681 return cpu_regs_used + count <= CallingConventions::kNumArgRegs;
682 }
683 bool HasAvailableFpuRegisters(intptr_t count) const {
684 return FirstFreeFpuRegisterIndex(kQuadFpuReg, count) != kNoFpuRegister;
685 }
686
687 intptr_t cpu_regs_used = 0;
688 // Every bit denotes 32 bits of FPU registers.
689 intptr_t fpu_reg_parts_used = 0;
690 intptr_t stack_height_in_bytes = 0;
691 const bool has_varargs_;
692 Zone* zone_;
693};
694
695// Location for the arguments of a C signature function.
697 Zone* zone,
699 const NativeLocation& return_location,
700 intptr_t var_args_index) {
701 intptr_t num_arguments = arg_reps.length();
702 auto& result = *new (zone) NativeLocations(zone, num_arguments);
703
704 // Loop through all arguments and assign a register or a stack location.
705 // Allocate result pointer for composite returns first.
706 const bool has_varargs =
708 ArgumentAllocator frame_state(zone, has_varargs);
709#if !defined(TARGET_ARCH_ARM64)
710 // Arm64 allocates the pointer in R8, which is not an argument location.
711 if (return_location.IsPointerToMemory()) {
712 const auto& pointer_location =
713 return_location.AsPointerToMemory().pointer_location();
714 const auto& pointer_location_allocated =
715 frame_state.AllocateArgumentVariadic(pointer_location.payload_type());
716 ASSERT(pointer_location.Equals(pointer_location_allocated));
717 }
718#endif
719
720 for (intptr_t i = 0; i < num_arguments; i++) {
721 const NativeType& rep = *arg_reps[i];
722 const bool is_first_vararg = has_varargs && i == var_args_index;
723 const bool is_vararg = has_varargs && i >= var_args_index;
724 result.Add(
725 &frame_state.AllocateArgumentVariadic(rep, is_first_vararg, is_vararg));
726 }
727 return result;
728}
729
730#if !defined(TARGET_ARCH_IA32)
732 Zone* zone,
733 const NativeCompoundType& payload_type) {
734 const auto& pointer_type = *new (zone) NativePrimitiveType(kAddress);
735 const auto& pointer_location = *new (zone) NativeRegistersLocation(
736 zone, pointer_type, pointer_type,
738 const auto& pointer_return_location = *new (zone) NativeRegistersLocation(
739 zone, pointer_type, pointer_type,
741 return *new (zone) PointerToMemoryLocation(
742 pointer_location, pointer_return_location, payload_type);
743}
744#endif // !defined(TARGET_ARCH_IA32)
745
746#if defined(TARGET_ARCH_IA32)
747// ia32 Passes pointers to result locations on the stack.
748static const NativeLocation& PointerToMemoryResultLocation(
749 Zone* zone,
750 const NativeCompoundType& payload_type) {
751 const auto& pointer_type = *new (zone) NativePrimitiveType(kAddress);
752 const auto& pointer_location = *new (zone) NativeStackLocation(
753 pointer_type, pointer_type, CallingConventions::kStackPointerRegister, 0);
754 const auto& pointer_return_location = *new (zone) NativeRegistersLocation(
755 zone, pointer_type, pointer_type,
757 return *new (zone) PointerToMemoryLocation(
758 pointer_location, pointer_return_location, payload_type);
759}
760#endif // defined(TARGET_ARCH_IA32)
761
762#if defined(TARGET_ARCH_X64) && !defined(DART_TARGET_OS_WINDOWS)
763static const NativeLocation& CompoundResultLocation(
764 Zone* zone,
765 const NativeCompoundType& payload_type,
766 bool has_varargs) {
767 const intptr_t size = payload_type.SizeInBytes();
768 if (size <= 16 && size > 0 && !payload_type.ContainsUnalignedMembers()) {
769 // Allocate the same as argument, but use return registers instead of
770 // argument registers.
771 NativeLocations& multiple_locations =
772 *new (zone) NativeLocations(zone, size > 8 ? 2 : 1);
773 intptr_t used_regs = 0;
774 intptr_t used_xmm_regs = 0;
775
776 const auto& double_type = *new (zone) NativePrimitiveType(kDouble);
777
778 const bool first_half_in_xmm = payload_type.ContainsOnlyFloats(
779 Range::StartAndEnd(0, Utils::Minimum<intptr_t>(size, 8)));
780 if (first_half_in_xmm) {
781 multiple_locations.Add(new (zone) NativeFpuRegistersLocation(
782 double_type, double_type, kQuadFpuReg,
784 used_xmm_regs++;
785 } else {
786 const auto& payload_type = *new (zone) NativePrimitiveType(
787 TypeForSize(Utils::Minimum(size, compiler::target::kWordSize)));
788 const auto& container_type = *new (zone) NativePrimitiveType(
789 TypeForSize(compiler::target::kWordSize));
790 multiple_locations.Add(new (zone) NativeRegistersLocation(
791 zone, payload_type, container_type, CallingConventions::kReturnReg));
792 used_regs++;
793 }
794 if (size > 8) {
795 const bool second_half_in_xmm = payload_type.ContainsOnlyFloats(
796 Range::StartAndEnd(8, Utils::Minimum<intptr_t>(size, 16)));
797 if (second_half_in_xmm) {
798 const FpuRegister reg = used_xmm_regs == 0
801 multiple_locations.Add(new (zone) NativeFpuRegistersLocation(
802 double_type, double_type, kQuadFpuReg, reg));
803 used_xmm_regs++;
804 } else {
805 const Register reg = used_regs == 0
808 const auto& payload_type = *new (zone) NativePrimitiveType(
809 TypeForSize(Utils::Minimum(size - 8, compiler::target::kWordSize)));
810 const auto& container_type = *new (zone) NativePrimitiveType(
811 TypeForSize(compiler::target::kWordSize));
812 multiple_locations.Add(new (zone) NativeRegistersLocation(
813 zone, payload_type, container_type, reg));
814 used_regs++;
815 }
816 }
817 return *new (zone)
818 MultipleNativeLocations(payload_type, multiple_locations);
819 }
820 return PointerToMemoryResultLocation(zone, payload_type);
821}
822#endif // defined(TARGET_ARCH_X64) && !defined(DART_TARGET_OS_WINDOWS)
823
824#if defined(TARGET_ARCH_X64) && defined(DART_TARGET_OS_WINDOWS)
825// If struct fits in a single register do that, and sign extend.
826// Otherwise, pass a pointer to memory.
827static const NativeLocation& CompoundResultLocation(
828 Zone* zone,
829 const NativeCompoundType& payload_type,
830 bool has_varargs) {
831 const intptr_t size = payload_type.SizeInBytes();
832 if (size <= 8 && size > 0 && Utils::IsPowerOfTwo(size)) {
833 NativeLocations& multiple_locations = *new (zone) NativeLocations(zone, 1);
834 const auto& type =
835 *new (zone) NativePrimitiveType(PrimitiveTypeFromSizeInBytes(size));
836 multiple_locations.Add(new (zone) NativeRegistersLocation(
838 return *new (zone)
839 MultipleNativeLocations(payload_type, multiple_locations);
840 }
841 return PointerToMemoryResultLocation(zone, payload_type);
842}
843#endif // defined(TARGET_ARCH_X64) && defined(DART_TARGET_OS_WINDOWS)
844
845#if defined(TARGET_ARCH_IA32) && !defined(DART_TARGET_OS_WINDOWS)
846static const NativeLocation& CompoundResultLocation(
847 Zone* zone,
848 const NativeCompoundType& payload_type,
849 bool has_varargs) {
850 return PointerToMemoryResultLocation(zone, payload_type);
851}
852#endif // defined(TARGET_ARCH_IA32) && !defined(DART_TARGET_OS_WINDOWS)
853
854#if defined(TARGET_ARCH_IA32) && defined(DART_TARGET_OS_WINDOWS)
855// Windows uses up to two return registers, while Linux does not.
856static const NativeLocation& CompoundResultLocation(
857 Zone* zone,
858 const NativeCompoundType& payload_type,
859 bool has_varargs) {
860 const intptr_t size = payload_type.SizeInBytes();
861 if (size <= 8 && Utils::IsPowerOfTwo(size)) {
862 NativeLocations& multiple_locations =
863 *new (zone) NativeLocations(zone, size > 4 ? 2 : 1);
864 const auto& type = *new (zone) NativePrimitiveType(kUint32);
865 multiple_locations.Add(new (zone) NativeRegistersLocation(
867 if (size > 4) {
868 multiple_locations.Add(new (zone) NativeRegistersLocation(
870 }
871 return *new (zone)
872 MultipleNativeLocations(payload_type, multiple_locations);
873 }
874 return PointerToMemoryResultLocation(zone, payload_type);
875}
876#endif // defined(TARGET_ARCH_IA32) && defined(DART_TARGET_OS_WINDOWS)
877
878#if defined(TARGET_ARCH_ARM)
879// Arm passes homogenous float return values in FPU registers and small
880// composites in a single integer register. The rest is stored into the
881// location passed in by pointer.
882static const NativeLocation& CompoundResultLocation(
883 Zone* zone,
884 const NativeCompoundType& payload_type,
885 bool has_varargs) {
886 const intptr_t num_members = payload_type.NumPrimitiveMembersRecursive();
887 if (payload_type.ContainsHomogeneousFloats() &&
888 !SoftFpAbi(has_varargs, /*is_result*/ true) && num_members <= 4) {
889 NativeLocations& multiple_locations =
890 *new (zone) NativeLocations(zone, num_members);
891 for (int i = 0; i < num_members; i++) {
892 const auto& member = payload_type.FirstPrimitiveMember();
893 multiple_locations.Add(new (zone) NativeFpuRegistersLocation(
894 member, member, FpuRegisterKindFromSize(member.SizeInBytes()), i));
895 }
896 return *new (zone)
897 MultipleNativeLocations(payload_type, multiple_locations);
898 }
899 const intptr_t size = payload_type.SizeInBytes();
900 if (size <= 4) {
901 NativeLocations& multiple_locations = *new (zone) NativeLocations(zone, 1);
902 const auto& type = *new (zone) NativePrimitiveType(kUint32);
903 multiple_locations.Add(new (zone)
904 NativeRegistersLocation(zone, type, type, R0));
905 return *new (zone)
906 MultipleNativeLocations(payload_type, multiple_locations);
907 }
908 return PointerToMemoryResultLocation(zone, payload_type);
909}
910#endif // defined(TARGET_ARCH_ARM)
911
912#if defined(TARGET_ARCH_ARM64)
913// If allocated to integer or fpu registers as argument, same for return,
914// otherwise a pointer to the result location is passed in.
915static const NativeLocation& CompoundResultLocation(
916 Zone* zone,
917 const NativeCompoundType& payload_type,
918 bool has_varargs) {
919 ArgumentAllocator frame_state(zone, has_varargs);
920 const auto& location_as_argument =
921 frame_state.AllocateArgumentVariadic(payload_type);
922 if (!location_as_argument.IsStack() &&
923 !location_as_argument.IsPointerToMemory()) {
924 return location_as_argument;
925 }
926 return PointerToMemoryResultLocation(zone, payload_type);
927}
928#endif // defined(TARGET_ARCH_ARM64)
929
930#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
931static const NativeLocation& CompoundResultLocation(
932 Zone* zone,
933 const NativeCompoundType& payload_type,
934 bool has_varargs) {
935 // First or first and second argument registers if it fits, otherwise a
936 // pointer to the result location is passed in.
937 ArgumentAllocator frame_state(zone, has_varargs);
938 const auto& location_as_argument =
939 frame_state.AllocateArgumentVariadic(payload_type);
940 if (!location_as_argument.IsStack() &&
941 !location_as_argument.IsPointerToMemory()) {
942 return location_as_argument;
943 }
944 return PointerToMemoryResultLocation(zone, payload_type);
945}
946#endif // defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
947
948// Location for the result of a C signature function.
950 const NativeType& payload_type,
951 bool has_varargs) {
952 const auto& payload_type_converted =
953 ConvertIfSoftFp(zone, payload_type, has_varargs, /*is_result*/ true);
954 const auto& container_type = payload_type_converted.Extend(
956
957 if (container_type.IsFloat()) {
958 return *new (zone) NativeFpuRegistersLocation(
959 payload_type, container_type, CallingConventions::kReturnFpuReg);
960 }
961
962 if (container_type.IsInt() || container_type.IsVoid()) {
963 if (container_type.SizeInBytes() == 8 && target::kWordSize == 4) {
964 return *new (zone) NativeRegistersLocation(
965 zone, payload_type, container_type, CallingConventions::kReturnReg,
967 }
968
969 ASSERT(container_type.SizeInBytes() <= target::kWordSize);
970 return *new (zone) NativeRegistersLocation(
971 zone, payload_type, container_type, CallingConventions::kReturnReg);
972 }
973
974 // Compounds are laid out differently per ABI, so they are implemented
975 // per ABI.
976 const auto& compound_type = payload_type.AsCompound();
977 return CompoundResultLocation(zone, compound_type, has_varargs);
978}
979
981 Zone* zone,
982 const NativeFunctionType& signature) {
983 const bool contains_varargs = signature.variadic_arguments_index() !=
985 // With struct return values, a possible pointer to a return value can
986 // occupy an argument position. Hence, allocate return value first.
987 const auto& return_location =
988 ResultLocation(zone, signature.return_type(), contains_varargs);
989 const auto& argument_locations =
991 signature.variadic_arguments_index());
994}
995
997 const intptr_t num_arguments = argument_locations_.length();
998 intptr_t max_height_in_bytes = 0;
999 for (intptr_t i = 0; i < num_arguments; i++) {
1000 max_height_in_bytes = Utils::Maximum(
1001 max_height_in_bytes, argument_locations_[i]->StackTopInBytes());
1002 }
1003 if (return_location_.IsPointerToMemory()) {
1004 const auto& ret_loc = return_location_.AsPointerToMemory();
1005 max_height_in_bytes =
1006 Utils::Maximum(max_height_in_bytes, ret_loc.StackTopInBytes());
1007 }
1008 return Utils::RoundUp(max_height_in_bytes, compiler::target::kWordSize);
1009}
1010
1012 bool multi_line) const {
1013 if (!multi_line) {
1014 f->AddString("(");
1015 }
1016 for (intptr_t i = 0; i < argument_locations_.length(); i++) {
1017 if (i > 0) {
1018 if (multi_line) {
1019 f->AddString("\n");
1020 } else {
1021 f->AddString(", ");
1022 }
1023 }
1024 argument_locations_[i]->PrintTo(f);
1025 }
1026 if (multi_line) {
1027 f->AddString("\n=>\n");
1028 } else {
1029 f->AddString(") => ");
1030 }
1031 return_location_.PrintTo(f);
1032 if (multi_line) {
1033 f->AddString("\n");
1034 }
1035}
1036
1038 bool multi_line) const {
1039 ZoneTextBuffer textBuffer(zone);
1040 PrintTo(&textBuffer, multi_line);
1041 return textBuffer.buffer();
1042}
1043
1044#if !defined(FFI_UNIT_TESTS)
1045const char* NativeCallingConvention::ToCString(bool multi_line) const {
1046 return ToCString(Thread::Current()->zone(), multi_line);
1047}
1048#endif
1049
1050} // namespace ffi
1051
1052} // namespace compiler
1053
1054} // namespace dart
int count
#define UNREACHABLE()
Definition assert.h:248
#define RELEASE_ASSERT(cond)
Definition assert.h:327
void Add(const T &value)
intptr_t length() const
char * buffer() const
Definition text_buffer.h:35
static constexpr ExtensionStrategy kArgumentStackExtension
static constexpr Register kSecondReturnReg
static constexpr AlignmentStrategy kArgumentRegisterAlignmentVarArgs
static const FpuRegister FpuArgumentRegisters[]
static constexpr FpuRegister kSecondReturnFpuReg
static const Register ArgumentRegisters[]
static constexpr bool kArgumentIntRegXorFpuReg
static constexpr intptr_t kNumDFpuArgRegs
static constexpr intptr_t kNumSFpuArgRegs
static constexpr FpuRegister kReturnFpuReg
static constexpr intptr_t kNumFpuArgRegs
static constexpr Register kPointerToReturnStructRegisterCall
static constexpr Register kPointerToReturnStructRegisterReturn
static constexpr AlignmentStrategy kArgumentRegisterAlignment
static constexpr ExtensionStrategy kReturnRegisterExtension
static constexpr Register kStackPointerRegister
static constexpr Register kReturnReg
static constexpr ExtensionStrategy kArgumentRegisterExtension
static constexpr intptr_t kNumArgRegs
static bool hardfp_supported()
Definition cpu_arm.h:77
static Thread * Current()
Definition thread.h:361
static constexpr T Maximum(T x, T y)
Definition utils.h:26
static T Minimum(T x, T y)
Definition utils.h:21
static constexpr T RoundUp(T x, uintptr_t alignment, uintptr_t offset=0)
Definition utils.h:105
static constexpr bool IsPowerOfTwo(T x)
Definition utils.h:61
const NativeLocation & AllocateArgumentVariadic(const NativeType &payload_type, bool is_first_vararg=false, bool is_vararg=false)
const char * ToCString(Zone *zone, bool multi_line=false) const
static const NativeCallingConvention & FromSignature(Zone *zone, const NativeFunctionType &signature)
void PrintTo(BaseTextBuffer *f, bool multi_line=false) const
const NativeTypes & argument_types() const
const NativeType & return_type() const
static constexpr intptr_t kNoVariadicArguments
virtual void PrintTo(BaseTextBuffer *f) const
const PointerToMemoryLocation & AsPointerToMemory() const
const NativeCompoundType & AsCompound() const
const NativeLocation & pointer_location() const
static Range StartAndEnd(intptr_t start_inclusive, intptr_t end_exclusive)
Definition range.h:35
#define ASSERT(E)
GAsyncResult * result
PrimitiveType PrimitiveTypeFromSizeInBytes(intptr_t size)
intptr_t SizeFromFpuRegisterKind(enum FpuRegisterKind kind)
static PrimitiveType TypeForSize(intptr_t size)
static const NativeType & ConvertFloatToInt(Zone *zone, const NativeType &type)
enum FpuRegisterKind FpuRegisterKindFromSize(intptr_t size_in_bytes)
static bool SoftFpAbi(bool has_varargs, bool is_result)
static const NativeType & ConvertIfSoftFp(Zone *zone, const NativeType &type, bool has_varargs, bool is_result=false)
static const NativeLocation & PointerToMemoryResultLocation(Zone *zone, const NativeCompoundType &payload_type)
static const NativeLocation & ResultLocation(Zone *zone, const NativeType &payload_type, bool has_varargs)
constexpr PrimitiveType kAddress
ZoneGrowableArray< const NativeLocation * > NativeLocations
static NativeLocations & ArgumentLocations(Zone *zone, const ZoneGrowableArray< const NativeType * > &arg_reps, const NativeLocation &return_location, intptr_t var_args_index)
QRegister FpuRegister
@ kAlignedToWordSizeAndValueSize
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
Point offset