Flutter Engine
The Flutter Engine
asm_intrinsifier_arm64.cc
Go to the documentation of this file.
1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64.
6#if defined(TARGET_ARCH_ARM64)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
10#include "vm/class_id.h"
13
14namespace dart {
15namespace compiler {
16
17// When entering intrinsics code:
18// PP: Caller's ObjectPool in JIT / global ObjectPool in AOT
19// CODE_REG: Callee's Code in JIT / not passed in AOT
20// R4: Arguments descriptor
21// LR: Return address
22// The R4 and CODE_REG registers can be destroyed only if there is no slow-path,
23// i.e. if the intrinsified method always executes a return.
24// The FP register should not be modified, because it is used by the profiler.
25// The PP and THR registers (see constants_arm64.h) must be preserved.
26
27#define __ assembler->
28
29// Loads args from stack into R0 and R1
30// Tests if they are smis, jumps to label not_smi if not.
31static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
32 __ ldr(R0, Address(SP, +0 * target::kWordSize));
33 __ ldr(R1, Address(SP, +1 * target::kWordSize));
34 __ orr(TMP, R0, Operand(R1));
35 __ BranchIfNotSmi(TMP, not_smi);
36}
37
38void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
39 ASSERT(kSmiTagShift == 1);
40 ASSERT(kSmiTag == 0);
41 const Register right = R0;
42 const Register left = R1;
43 const Register temp = R2;
44 const Register result = R0;
45
46 TestBothArgumentsSmis(assembler, normal_ir_body);
47 __ CompareImmediate(right, target::ToRawSmi(target::kSmiBits),
49 __ b(normal_ir_body, CS);
50
51 // Left is not a constant.
52 // Check if count too large for handling it inlined.
53 __ SmiUntag(TMP, right); // SmiUntag right into TMP.
54 // Overflow test (preserve left, right, and TMP);
55 __ lslv(temp, left, TMP, kObjectBytes);
56 __ asrv(TMP2, temp, TMP, kObjectBytes);
57 __ cmp(left, Operand(TMP2), kObjectBytes);
58 __ b(normal_ir_body, NE); // Overflow.
59 // Shift for result now we know there is no overflow.
60 __ lslv(result, left, TMP, kObjectBytes);
61 __ ret();
62 __ Bind(normal_ir_body);
63}
64
65static void CompareIntegers(Assembler* assembler,
66 Label* normal_ir_body,
67 Condition true_condition) {
68 Label true_label;
69 TestBothArgumentsSmis(assembler, normal_ir_body);
70 // R0 contains the right argument, R1 the left.
71 __ CompareObjectRegisters(R1, R0);
72 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
73 __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
74 __ csel(R0, TMP, R0, true_condition);
75 __ ret();
76 __ Bind(normal_ir_body);
77}
78
79void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
80 Label* normal_ir_body) {
81 CompareIntegers(assembler, normal_ir_body, LT);
82}
83
84void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
85 Label* normal_ir_body) {
86 CompareIntegers(assembler, normal_ir_body, GT);
87}
88
89void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
90 Label* normal_ir_body) {
91 CompareIntegers(assembler, normal_ir_body, LE);
92}
93
94void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
95 Label* normal_ir_body) {
96 CompareIntegers(assembler, normal_ir_body, GE);
97}
98
99// This is called for Smi and Mint receivers. The right argument
100// can be Smi, Mint or double.
101void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
102 Label* normal_ir_body) {
103 Label true_label, check_for_mint;
104 // For integer receiver '===' check first.
105 __ ldr(R0, Address(SP, 0 * target::kWordSize));
106 __ ldr(R1, Address(SP, 1 * target::kWordSize));
107 __ CompareObjectRegisters(R0, R1);
108 __ b(&true_label, EQ);
109
110 __ orr(R2, R0, Operand(R1));
111 __ BranchIfNotSmi(R2, &check_for_mint);
112 // If R0 or R1 is not a smi do Mint checks.
113
114 // Both arguments are smi, '===' is good enough.
115 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
116 __ ret();
117 __ Bind(&true_label);
118 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
119 __ ret();
120
121 // At least one of the arguments was not Smi.
122 Label receiver_not_smi;
123 __ Bind(&check_for_mint);
124
125 __ BranchIfNotSmi(R1, &receiver_not_smi); // Check receiver.
126
127 // Left (receiver) is Smi, return false if right is not Double.
128 // Note that an instance of Mint never contains a value that can be
129 // represented by Smi.
130
131 __ CompareClassId(R0, kDoubleCid);
132 __ b(normal_ir_body, EQ);
133 __ LoadObject(R0,
134 CastHandle<Object>(FalseObject())); // Smi == Mint -> false.
135 __ ret();
136
137 __ Bind(&receiver_not_smi);
138 // R1: receiver.
139
140 __ CompareClassId(R1, kMintCid);
141 __ b(normal_ir_body, NE);
142 // Receiver is Mint, return false if right is Smi.
143 __ BranchIfNotSmi(R0, normal_ir_body);
144 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
145 __ ret();
146 // TODO(srdjan): Implement Mint == Mint comparison.
147
148 __ Bind(normal_ir_body);
149}
150
151void AsmIntrinsifier::Integer_equal(Assembler* assembler,
152 Label* normal_ir_body) {
153 Integer_equalToInteger(assembler, normal_ir_body);
154}
155
156void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
157 Label* normal_ir_body) {
158 __ ldr(R0, Address(SP, 0 * target::kWordSize));
159 __ SmiUntag(R0);
160 // XOR with sign bit to complement bits if value is negative.
161#if !defined(DART_COMPRESSED_POINTERS)
162 __ eor(R0, R0, Operand(R0, ASR, 63));
163 __ clz(R0, R0);
164 __ LoadImmediate(R1, 64);
165#else
166 __ eorw(R0, R0, Operand(R0, ASR, 31));
167 __ clzw(R0, R0);
168 __ LoadImmediate(R1, 32);
169#endif
170 __ sub(R0, R1, Operand(R0));
171 __ SmiTag(R0);
172 __ ret();
173}
174
175void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
176 // static void _lsh(Uint32List x_digits, int x_used, int n,
177 // Uint32List r_digits)
178
179 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi.
180 __ ldp(R2, R3, Address(SP, 2 * target::kWordSize, Address::PairOffset));
181#if defined(DART_COMPRESSED_POINTERS)
182 __ sxtw(R2, R2);
183#endif
184 __ add(R2, R2, Operand(2)); // x_used > 0, Smi. R2 = x_used + 1, round up.
185 __ AsrImmediate(R2, R2, 2); // R2 = num of digit pairs to read.
186 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0.
187 __ ldp(R4, R5, Address(SP, 0 * target::kWordSize, Address::PairOffset));
188#if defined(DART_COMPRESSED_POINTERS)
189 __ sxtw(R5, R5);
190#endif
191 __ SmiUntag(R5);
192 // R0 = n ~/ (2*_DIGIT_BITS)
193 __ AsrImmediate(R0, R5, 6);
194 // R6 = &x_digits[0]
196 // R7 = &x_digits[2*R2]
197 __ add(R7, R6, Operand(R2, LSL, 3));
198 // R8 = &r_digits[2*1]
199 __ add(R8, R4,
202 // R8 = &r_digits[2*(R2 + n ~/ (2*_DIGIT_BITS) + 1)]
203 __ add(R0, R0, Operand(R2));
204 __ add(R8, R8, Operand(R0, LSL, 3));
205 // R3 = n % (2 * _DIGIT_BITS)
206 __ AndImmediate(R3, R5, 63);
207 // R2 = 64 - R3
208 __ LoadImmediate(R2, 64);
209 __ sub(R2, R2, Operand(R3));
210 __ mov(R1, ZR);
211 Label loop;
212 __ Bind(&loop);
213 __ ldr(R0, Address(R7, -2 * kBytesPerBigIntDigit, Address::PreIndex));
214 __ lsrv(R4, R0, R2);
215 __ orr(R1, R1, Operand(R4));
216 __ str(R1, Address(R8, -2 * kBytesPerBigIntDigit, Address::PreIndex));
217 __ lslv(R1, R0, R3);
218 __ cmp(R7, Operand(R6));
219 __ b(&loop, NE);
220 __ str(R1, Address(R8, -2 * kBytesPerBigIntDigit, Address::PreIndex));
221 __ LoadObject(R0, NullObject());
222 __ ret();
223}
224
225void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
226 // static void _rsh(Uint32List x_digits, int x_used, int n,
227 // Uint32List r_digits)
228
229 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi.
230 __ ldp(R2, R3, Address(SP, 2 * target::kWordSize, Address::PairOffset));
231#if defined(DART_COMPRESSED_POINTERS)
232 __ sxtw(R2, R2);
233#endif
234 __ add(R2, R2, Operand(2)); // x_used > 0, Smi. R2 = x_used + 1, round up.
235 __ AsrImmediate(R2, R2, 2); // R2 = num of digit pairs to read.
236 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0.
237 __ ldp(R4, R5, Address(SP, 0 * target::kWordSize, Address::PairOffset));
238#if defined(DART_COMPRESSED_POINTERS)
239 __ sxtw(R5, R5);
240#endif
241 __ SmiUntag(R5);
242 // R0 = n ~/ (2*_DIGIT_BITS)
243 __ AsrImmediate(R0, R5, 6);
244 // R8 = &r_digits[0]
246 // R7 = &x_digits[2*(n ~/ (2*_DIGIT_BITS))]
248 __ add(R7, R7, Operand(R0, LSL, 3));
249 // R6 = &r_digits[2*(R2 - n ~/ (2*_DIGIT_BITS) - 1)]
250 __ add(R0, R0, Operand(1));
251 __ sub(R0, R2, Operand(R0));
252 __ add(R6, R8, Operand(R0, LSL, 3));
253 // R3 = n % (2*_DIGIT_BITS)
254 __ AndImmediate(R3, R5, 63);
255 // R2 = 64 - R3
256 __ LoadImmediate(R2, 64);
257 __ sub(R2, R2, Operand(R3));
258 // R1 = x_digits[n ~/ (2*_DIGIT_BITS)] >> (n % (2*_DIGIT_BITS))
259 __ ldr(R1, Address(R7, 2 * kBytesPerBigIntDigit, Address::PostIndex));
260 __ lsrv(R1, R1, R3);
261 Label loop_entry;
262 __ b(&loop_entry);
263 Label loop;
264 __ Bind(&loop);
265 __ ldr(R0, Address(R7, 2 * kBytesPerBigIntDigit, Address::PostIndex));
266 __ lslv(R4, R0, R2);
267 __ orr(R1, R1, Operand(R4));
268 __ str(R1, Address(R8, 2 * kBytesPerBigIntDigit, Address::PostIndex));
269 __ lsrv(R1, R0, R3);
270 __ Bind(&loop_entry);
271 __ cmp(R8, Operand(R6));
272 __ b(&loop, NE);
273 __ str(R1, Address(R8, 0));
274 __ LoadObject(R0, NullObject());
275 __ ret();
276}
277
278void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
279 Label* normal_ir_body) {
280 // static void _absAdd(Uint32List digits, int used,
281 // Uint32List a_digits, int a_used,
282 // Uint32List r_digits)
283
284 // R2 = used, R3 = digits
285 __ ldp(R2, R3, Address(SP, 3 * target::kWordSize, Address::PairOffset));
286#if defined(DART_COMPRESSED_POINTERS)
287 __ sxtw(R2, R2);
288#endif
289 __ add(R2, R2, Operand(2)); // used > 0, Smi. R2 = used + 1, round up.
290 __ add(R2, ZR, Operand(R2, ASR, 2)); // R2 = num of digit pairs to process.
291 // R3 = &digits[0]
293
294 // R4 = a_used, R5 = a_digits
295 __ ldp(R4, R5, Address(SP, 1 * target::kWordSize, Address::PairOffset));
296#if defined(DART_COMPRESSED_POINTERS)
297 __ sxtw(R4, R4);
298#endif
299 __ add(R4, R4, Operand(2)); // a_used > 0, Smi. R4 = a_used + 1, round up.
300 __ add(R4, ZR, Operand(R4, ASR, 2)); // R4 = num of digit pairs to process.
301 // R5 = &a_digits[0]
303
304 // R6 = r_digits
305 __ ldr(R6, Address(SP, 0 * target::kWordSize));
306 // R6 = &r_digits[0]
308
309 // R7 = &digits[a_used rounded up to even number].
310 __ add(R7, R3, Operand(R4, LSL, 3));
311
312 // R8 = &digits[a_used rounded up to even number].
313 __ add(R8, R3, Operand(R2, LSL, 3));
314
315 __ adds(R0, R0, Operand(0)); // carry flag = 0
316 Label add_loop;
317 __ Bind(&add_loop);
318 // Loop (a_used+1)/2 times, a_used > 0.
319 __ ldr(R0, Address(R3, 2 * kBytesPerBigIntDigit, Address::PostIndex));
320 __ ldr(R1, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
321 __ adcs(R0, R0, R1);
322 __ sub(R9, R3, Operand(R7)); // Does not affect carry flag.
323 __ str(R0, Address(R6, 2 * kBytesPerBigIntDigit, Address::PostIndex));
324 __ cbnz(&add_loop, R9); // Does not affect carry flag.
325
326 Label last_carry;
327 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag.
328 __ cbz(&last_carry, R9); // If used - a_used == 0.
329
330 Label carry_loop;
331 __ Bind(&carry_loop);
332 // Loop (used+1)/2 - (a_used+1)/2 times, used - a_used > 0.
333 __ ldr(R0, Address(R3, 2 * kBytesPerBigIntDigit, Address::PostIndex));
334 __ adcs(R0, R0, ZR);
335 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag.
336 __ str(R0, Address(R6, 2 * kBytesPerBigIntDigit, Address::PostIndex));
337 __ cbnz(&carry_loop, R9);
338
339 __ Bind(&last_carry);
340 Label done;
341 __ b(&done, CC);
342 __ LoadImmediate(R0, 1);
343 __ str(R0, Address(R6, 0));
344
345 __ Bind(&done);
346 __ LoadObject(R0, NullObject());
347 __ ret();
348}
349
350void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
351 Label* normal_ir_body) {
352 // static void _absSub(Uint32List digits, int used,
353 // Uint32List a_digits, int a_used,
354 // Uint32List r_digits)
355
356 // R2 = used, R3 = digits
357 __ ldp(R2, R3, Address(SP, 3 * target::kWordSize, Address::PairOffset));
358#if defined(DART_COMPRESSED_POINTERS)
359 __ sxtw(R2, R2);
360#endif
361 __ add(R2, R2, Operand(2)); // used > 0, Smi. R2 = used + 1, round up.
362 __ add(R2, ZR, Operand(R2, ASR, 2)); // R2 = num of digit pairs to process.
363 // R3 = &digits[0]
365
366 // R4 = a_used, R5 = a_digits
367 __ ldp(R4, R5, Address(SP, 1 * target::kWordSize, Address::PairOffset));
368#if defined(DART_COMPRESSED_POINTERS)
369 __ sxtw(R4, R4);
370#endif
371 __ add(R4, R4, Operand(2)); // a_used > 0, Smi. R4 = a_used + 1, round up.
372 __ add(R4, ZR, Operand(R4, ASR, 2)); // R4 = num of digit pairs to process.
373 // R5 = &a_digits[0]
375
376 // R6 = r_digits
377 __ ldr(R6, Address(SP, 0 * target::kWordSize));
378 // R6 = &r_digits[0]
380
381 // R7 = &digits[a_used rounded up to even number].
382 __ add(R7, R3, Operand(R4, LSL, 3));
383
384 // R8 = &digits[a_used rounded up to even number].
385 __ add(R8, R3, Operand(R2, LSL, 3));
386
387 __ subs(R0, R0, Operand(0)); // carry flag = 1
388 Label sub_loop;
389 __ Bind(&sub_loop);
390 // Loop (a_used+1)/2 times, a_used > 0.
391 __ ldr(R0, Address(R3, 2 * kBytesPerBigIntDigit, Address::PostIndex));
392 __ ldr(R1, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
393 __ sbcs(R0, R0, R1);
394 __ sub(R9, R3, Operand(R7)); // Does not affect carry flag.
395 __ str(R0, Address(R6, 2 * kBytesPerBigIntDigit, Address::PostIndex));
396 __ cbnz(&sub_loop, R9); // Does not affect carry flag.
397
398 Label done;
399 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag.
400 __ cbz(&done, R9); // If used - a_used == 0.
401
402 Label carry_loop;
403 __ Bind(&carry_loop);
404 // Loop (used+1)/2 - (a_used+1)/2 times, used - a_used > 0.
405 __ ldr(R0, Address(R3, 2 * kBytesPerBigIntDigit, Address::PostIndex));
406 __ sbcs(R0, R0, ZR);
407 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag.
408 __ str(R0, Address(R6, 2 * kBytesPerBigIntDigit, Address::PostIndex));
409 __ cbnz(&carry_loop, R9);
410
411 __ Bind(&done);
412 __ LoadObject(R0, NullObject());
413 __ ret();
414}
415
416void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
417 Label* normal_ir_body) {
418 // Pseudo code:
419 // static int _mulAdd(Uint32List x_digits, int xi,
420 // Uint32List m_digits, int i,
421 // Uint32List a_digits, int j, int n) {
422 // uint64_t x = x_digits[xi >> 1 .. (xi >> 1) + 1]; // xi is Smi and even.
423 // if (x == 0 || n == 0) {
424 // return 2;
425 // }
426 // uint64_t* mip = &m_digits[i >> 1]; // i is Smi and even.
427 // uint64_t* ajp = &a_digits[j >> 1]; // j is Smi and even.
428 // uint64_t c = 0;
429 // SmiUntag(n); // n is Smi and even.
430 // n = (n + 1)/2; // Number of pairs to process.
431 // do {
432 // uint64_t mi = *mip++;
433 // uint64_t aj = *ajp;
434 // uint128_t t = x*mi + aj + c; // 64-bit * 64-bit -> 128-bit.
435 // *ajp++ = low64(t);
436 // c = high64(t);
437 // } while (--n > 0);
438 // while (c != 0) {
439 // uint128_t t = *ajp + c;
440 // *ajp++ = low64(t);
441 // c = high64(t); // c == 0 or 1.
442 // }
443 // return 2;
444 // }
445
446 Label done;
447 // R3 = x, no_op if x == 0
448 // R0 = xi as Smi, R1 = x_digits.
449 __ ldp(R0, R1, Address(SP, 5 * target::kWordSize, Address::PairOffset));
450#if defined(DART_COMPRESSED_POINTERS)
451 __ sxtw(R0, R0);
452#endif
453 __ add(R1, R1, Operand(R0, LSL, 1));
454 __ ldr(R3, FieldAddress(R1, target::TypedData::payload_offset()));
455 __ tst(R3, Operand(R3));
456 __ b(&done, EQ);
457
458 // R6 = (SmiUntag(n) + 1)/2, no_op if n == 0
459 __ ldr(R6, Address(SP, 0 * target::kWordSize));
460#if defined(DART_COMPRESSED_POINTERS)
461 __ sxtw(R6, R6);
462#endif
463 __ add(R6, R6, Operand(2));
464 __ adds(R6, ZR, Operand(R6, ASR, 2)); // SmiUntag(R6) and set cc.
465 __ b(&done, EQ);
466
467 // R4 = mip = &m_digits[i >> 1]
468 // R0 = i as Smi, R1 = m_digits.
469 __ ldp(R0, R1, Address(SP, 3 * target::kWordSize, Address::PairOffset));
470#if defined(DART_COMPRESSED_POINTERS)
471 __ sxtw(R0, R0);
472#endif
473 __ add(R1, R1, Operand(R0, LSL, 1));
475
476 // R5 = ajp = &a_digits[j >> 1]
477 // R0 = j as Smi, R1 = a_digits.
478 __ ldp(R0, R1, Address(SP, 1 * target::kWordSize, Address::PairOffset));
479#if defined(DART_COMPRESSED_POINTERS)
480 __ sxtw(R0, R0);
481#endif
482 __ add(R1, R1, Operand(R0, LSL, 1));
484
485 // R1 = c = 0
486 __ mov(R1, ZR);
487
488 Label muladd_loop;
489 __ Bind(&muladd_loop);
490 // x: R3
491 // mip: R4
492 // ajp: R5
493 // c: R1
494 // n: R6
495 // t: R7:R8 (not live at loop entry)
496
497 // uint64_t mi = *mip++
498 __ ldr(R2, Address(R4, 2 * kBytesPerBigIntDigit, Address::PostIndex));
499
500 // uint64_t aj = *ajp
501 __ ldr(R0, Address(R5, 0));
502
503 // uint128_t t = x*mi + aj + c
504 __ mul(R7, R2, R3); // R7 = low64(R2*R3).
505 __ umulh(R8, R2, R3); // R8 = high64(R2*R3), t = R8:R7 = x*mi.
506 __ adds(R7, R7, Operand(R0));
507 __ adc(R8, R8, ZR); // t += aj.
508 __ adds(R0, R7, Operand(R1)); // t += c, R0 = low64(t).
509 __ adc(R1, R8, ZR); // c = R1 = high64(t).
510
511 // *ajp++ = low64(t) = R0
512 __ str(R0, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
513
514 // while (--n > 0)
515 __ subs(R6, R6, Operand(1)); // --n
516 __ b(&muladd_loop, NE);
517
518 __ tst(R1, Operand(R1));
519 __ b(&done, EQ);
520
521 // *ajp++ += c
522 __ ldr(R0, Address(R5, 0));
523 __ adds(R0, R0, Operand(R1));
524 __ str(R0, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
525 __ b(&done, CC);
526
527 Label propagate_carry_loop;
528 __ Bind(&propagate_carry_loop);
529 __ ldr(R0, Address(R5, 0));
530 __ adds(R0, R0, Operand(1));
531 __ str(R0, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
532 __ b(&propagate_carry_loop, CS);
533
534 __ Bind(&done);
535 __ LoadImmediate(R0, target::ToRawSmi(2)); // Two digits processed.
536 __ ret();
537}
538
539void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
540 Label* normal_ir_body) {
541 // Pseudo code:
542 // static int _sqrAdd(Uint32List x_digits, int i,
543 // Uint32List a_digits, int used) {
544 // uint64_t* xip = &x_digits[i >> 1]; // i is Smi and even.
545 // uint64_t x = *xip++;
546 // if (x == 0) return 2;
547 // uint64_t* ajp = &a_digits[i]; // j == 2*i, i is Smi.
548 // uint64_t aj = *ajp;
549 // uint128_t t = x*x + aj;
550 // *ajp++ = low64(t);
551 // uint128_t c = high64(t);
552 // int n = ((used - i + 2) >> 2) - 1; // used and i are Smi. n: num pairs.
553 // while (--n >= 0) {
554 // uint64_t xi = *xip++;
555 // uint64_t aj = *ajp;
556 // uint192_t t = 2*x*xi + aj + c; // 2-bit * 64-bit * 64-bit -> 129-bit.
557 // *ajp++ = low64(t);
558 // c = high128(t); // 65-bit.
559 // }
560 // uint64_t aj = *ajp;
561 // uint128_t t = aj + c; // 64-bit + 65-bit -> 66-bit.
562 // *ajp++ = low64(t);
563 // *ajp = high64(t);
564 // return 2;
565 // }
566
567 // R4 = xip = &x_digits[i >> 1]
568 // R2 = i as Smi, R3 = x_digits
569 __ ldp(R2, R3, Address(SP, 2 * target::kWordSize, Address::PairOffset));
570#if defined(DART_COMPRESSED_POINTERS)
571 __ sxtw(R2, R2);
572#endif
573 __ add(R3, R3, Operand(R2, LSL, 1));
575
576 // R3 = x = *xip++, return if x == 0
577 Label x_zero;
578 __ ldr(R3, Address(R4, 2 * kBytesPerBigIntDigit, Address::PostIndex));
579 __ tst(R3, Operand(R3));
580 __ b(&x_zero, EQ);
581
582 // R5 = ajp = &a_digits[i]
583 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // a_digits
584 __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi.
586
587 // R6:R1 = t = x*x + *ajp
588 __ ldr(R0, Address(R5, 0));
589 __ mul(R1, R3, R3); // R1 = low64(R3*R3).
590 __ umulh(R6, R3, R3); // R6 = high64(R3*R3).
591 __ adds(R1, R1, Operand(R0)); // R6:R1 += *ajp.
592 __ adc(R6, R6, ZR); // R6 = low64(c) = high64(t).
593 __ mov(R7, ZR); // R7 = high64(c) = 0.
594
595 // *ajp++ = low64(t) = R1
596 __ str(R1, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
597
598 // int n = (used - i + 1)/2 - 1
599 __ ldr(R0, Address(SP, 0 * target::kWordSize)); // used is Smi
600#if defined(DART_COMPRESSED_POINTERS)
601 __ sxtw(R0, R0);
602#endif
603 __ sub(R8, R0, Operand(R2));
604 __ add(R8, R8, Operand(2));
605 __ movn(R0, Immediate(1), 0); // R0 = ~1 = -2.
606 __ adds(R8, R0, Operand(R8, ASR, 2)); // while (--n >= 0)
607
608 Label loop, done;
609 __ b(&done, MI);
610
611 __ Bind(&loop);
612 // x: R3
613 // xip: R4
614 // ajp: R5
615 // c: R7:R6
616 // t: R2:R1:R0 (not live at loop entry)
617 // n: R8
618
619 // uint64_t xi = *xip++
620 __ ldr(R2, Address(R4, 2 * kBytesPerBigIntDigit, Address::PostIndex));
621
622 // uint192_t t = R2:R1:R0 = 2*x*xi + aj + c
623 __ mul(R0, R2, R3); // R0 = low64(R2*R3) = low64(x*xi).
624 __ umulh(R1, R2, R3); // R1 = high64(R2*R3) = high64(x*xi).
625 __ adds(R0, R0, Operand(R0));
626 __ adcs(R1, R1, R1);
627 __ adc(R2, ZR, ZR); // R2:R1:R0 = R1:R0 + R1:R0 = 2*x*xi.
628 __ adds(R0, R0, Operand(R6));
629 __ adcs(R1, R1, R7);
630 __ adc(R2, R2, ZR); // R2:R1:R0 += c.
631 __ ldr(R7, Address(R5, 0)); // R7 = aj = *ajp.
632 __ adds(R0, R0, Operand(R7));
633 __ adcs(R6, R1, ZR);
634 __ adc(R7, R2, ZR); // R7:R6:R0 = 2*x*xi + aj + c.
635
636 // *ajp++ = low64(t) = R0
637 __ str(R0, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
638
639 // while (--n >= 0)
640 __ subs(R8, R8, Operand(1)); // --n
641 __ b(&loop, PL);
642
643 __ Bind(&done);
644 // uint64_t aj = *ajp
645 __ ldr(R0, Address(R5, 0));
646
647 // uint128_t t = aj + c
648 __ adds(R6, R6, Operand(R0));
649 __ adc(R7, R7, ZR);
650
651 // *ajp = low64(t) = R6
652 // *(ajp + 1) = high64(t) = R7
653 __ stp(R6, R7, Address(R5, 0, Address::PairOffset));
654
655 __ Bind(&x_zero);
656 __ LoadImmediate(R0, target::ToRawSmi(2)); // Two digits processed.
657 __ ret();
658}
659
660void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
661 Label* normal_ir_body) {
662 // There is no 128-bit by 64-bit division instruction on arm64, so we use two
663 // 64-bit by 32-bit divisions and two 64-bit by 64-bit multiplications to
664 // adjust the two 32-bit digits of the estimated quotient.
665 //
666 // Pseudo code:
667 // static int _estQuotientDigit(Uint32List args, Uint32List digits, int i) {
668 // uint64_t yt = args[_YT_LO .. _YT]; // _YT_LO == 0, _YT == 1.
669 // uint64_t* dp = &digits[(i >> 1) - 1]; // i is Smi.
670 // uint64_t dh = dp[0]; // dh == digits[(i >> 1) - 1 .. i >> 1].
671 // uint64_t qd;
672 // if (dh == yt) {
673 // qd = (DIGIT_MASK << 32) | DIGIT_MASK;
674 // } else {
675 // dl = dp[-1]; // dl == digits[(i >> 1) - 3 .. (i >> 1) - 2].
676 // // We cannot calculate qd = dh:dl / yt, so ...
677 // uint64_t yth = yt >> 32;
678 // uint64_t qh = dh / yth;
679 // uint128_t ph:pl = yt*qh;
680 // uint64_t tl = (dh << 32)|(dl >> 32);
681 // uint64_t th = dh >> 32;
682 // while ((ph > th) || ((ph == th) && (pl > tl))) {
683 // if (pl < yt) --ph;
684 // pl -= yt;
685 // --qh;
686 // }
687 // qd = qh << 32;
688 // tl = (pl << 32);
689 // th = (ph << 32)|(pl >> 32);
690 // if (tl > dl) ++th;
691 // dl -= tl;
692 // dh -= th;
693 // uint64_t ql = ((dh << 32)|(dl >> 32)) / yth;
694 // ph:pl = yt*ql;
695 // while ((ph > dh) || ((ph == dh) && (pl > dl))) {
696 // if (pl < yt) --ph;
697 // pl -= yt;
698 // --ql;
699 // }
700 // qd |= ql;
701 // }
702 // args[_QD .. _QD_HI] = qd; // _QD == 2, _QD_HI == 3.
703 // return 2;
704 // }
705
706 // R4 = args
707 __ ldr(R4, Address(SP, 2 * target::kWordSize)); // args
708
709 // R3 = yt = args[0..1]
710 __ ldr(R3, FieldAddress(R4, target::TypedData::payload_offset()));
711
712 // R2 = dh = digits[(i >> 1) - 1 .. i >> 1]
713 // R0 = i as Smi, R1 = digits
714 __ ldp(R0, R1, Address(SP, 0 * target::kWordSize, Address::PairOffset));
715#if defined(DART_COMPRESSED_POINTERS)
716 __ sxtw(R0, R0);
717#endif
718 __ add(R1, R1, Operand(R0, LSL, 1));
719 __ ldr(R2, FieldAddress(R1, target::TypedData::payload_offset() -
721
722 // R0 = qd = (DIGIT_MASK << 32) | DIGIT_MASK = -1
723 __ movn(R0, Immediate(0), 0);
724
725 // Return qd if dh == yt
726 Label return_qd;
727 __ cmp(R2, Operand(R3));
728 __ b(&return_qd, EQ);
729
730 // R1 = dl = digits[(i >> 1) - 3 .. (i >> 1) - 2]
731 __ ldr(R1, FieldAddress(R1, target::TypedData::payload_offset() -
733
734 // R5 = yth = yt >> 32
735 __ orr(R5, ZR, Operand(R3, LSR, 32));
736
737 // R6 = qh = dh / yth
738 __ udiv(R6, R2, R5);
739
740 // R8:R7 = ph:pl = yt*qh
741 __ mul(R7, R3, R6);
742 __ umulh(R8, R3, R6);
743
744 // R9 = tl = (dh << 32)|(dl >> 32)
745 __ orr(R9, ZR, Operand(R2, LSL, 32));
746 __ orr(R9, R9, Operand(R1, LSR, 32));
747
748 // R10 = th = dh >> 32
749 __ orr(R10, ZR, Operand(R2, LSR, 32));
750
751 // while ((ph > th) || ((ph == th) && (pl > tl)))
752 Label qh_adj_loop, qh_adj, qh_ok;
753 __ Bind(&qh_adj_loop);
754 __ cmp(R8, Operand(R10));
755 __ b(&qh_adj, HI);
756 __ b(&qh_ok, NE);
757 __ cmp(R7, Operand(R9));
758 __ b(&qh_ok, LS);
759
760 __ Bind(&qh_adj);
761 // if (pl < yt) --ph
762 __ sub(TMP, R8, Operand(1)); // TMP = ph - 1
763 __ cmp(R7, Operand(R3));
764 __ csel(R8, TMP, R8, CC); // R8 = R7 < R3 ? TMP : R8
765
766 // pl -= yt
767 __ sub(R7, R7, Operand(R3));
768
769 // --qh
770 __ sub(R6, R6, Operand(1));
771
772 // Continue while loop.
773 __ b(&qh_adj_loop);
774
775 __ Bind(&qh_ok);
776 // R0 = qd = qh << 32
777 __ orr(R0, ZR, Operand(R6, LSL, 32));
778
779 // tl = (pl << 32)
780 __ orr(R9, ZR, Operand(R7, LSL, 32));
781
782 // th = (ph << 32)|(pl >> 32);
783 __ orr(R10, ZR, Operand(R8, LSL, 32));
784 __ orr(R10, R10, Operand(R7, LSR, 32));
785
786 // if (tl > dl) ++th
787 __ add(TMP, R10, Operand(1)); // TMP = th + 1
788 __ cmp(R9, Operand(R1));
789 __ csel(R10, TMP, R10, HI); // R10 = R9 > R1 ? TMP : R10
790
791 // dl -= tl
792 __ sub(R1, R1, Operand(R9));
793
794 // dh -= th
795 __ sub(R2, R2, Operand(R10));
796
797 // R6 = ql = ((dh << 32)|(dl >> 32)) / yth
798 __ orr(R6, ZR, Operand(R2, LSL, 32));
799 __ orr(R6, R6, Operand(R1, LSR, 32));
800 __ udiv(R6, R6, R5);
801
802 // R8:R7 = ph:pl = yt*ql
803 __ mul(R7, R3, R6);
804 __ umulh(R8, R3, R6);
805
806 // while ((ph > dh) || ((ph == dh) && (pl > dl))) {
807 Label ql_adj_loop, ql_adj, ql_ok;
808 __ Bind(&ql_adj_loop);
809 __ cmp(R8, Operand(R2));
810 __ b(&ql_adj, HI);
811 __ b(&ql_ok, NE);
812 __ cmp(R7, Operand(R1));
813 __ b(&ql_ok, LS);
814
815 __ Bind(&ql_adj);
816 // if (pl < yt) --ph
817 __ sub(TMP, R8, Operand(1)); // TMP = ph - 1
818 __ cmp(R7, Operand(R3));
819 __ csel(R8, TMP, R8, CC); // R8 = R7 < R3 ? TMP : R8
820
821 // pl -= yt
822 __ sub(R7, R7, Operand(R3));
823
824 // --ql
825 __ sub(R6, R6, Operand(1));
826
827 // Continue while loop.
828 __ b(&ql_adj_loop);
829
830 __ Bind(&ql_ok);
831 // qd |= ql;
832 __ orr(R0, R0, Operand(R6));
833
834 __ Bind(&return_qd);
835 // args[2..3] = qd
836 __ str(R0, FieldAddress(R4, target::TypedData::payload_offset() +
838
839 __ LoadImmediate(R0, target::ToRawSmi(2)); // Two digits processed.
840 __ ret();
841}
842
843void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
844 Label* normal_ir_body) {
845 // Pseudo code:
846 // static int _mulMod(Uint32List args, Uint32List digits, int i) {
847 // uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3.
848 // uint64_t d = digits[i >> 1 .. (i >> 1) + 1]; // i is Smi and even.
849 // uint128_t t = rho*d;
850 // args[_MU .. _MU_HI] = t mod DIGIT_BASE^2; // _MU == 4, _MU_HI == 5.
851 // return 2;
852 // }
853
854 // R4 = args
855 __ ldr(R4, Address(SP, 2 * target::kWordSize)); // args
856
857 // R3 = rho = args[2..3]
858 __ ldr(R3, FieldAddress(R4, target::TypedData::payload_offset() +
860
861 // R2 = digits[i >> 1 .. (i >> 1) + 1]
862 // R0 = i as Smi, R1 = digits
863 __ ldp(R0, R1, Address(SP, 0 * target::kWordSize, Address::PairOffset));
864#if defined(DART_COMPRESSED_POINTERS)
865 __ sxtw(R0, R0);
866#endif
867 __ add(R1, R1, Operand(R0, LSL, 1));
868 __ ldr(R2, FieldAddress(R1, target::TypedData::payload_offset()));
869
870 // R0 = rho*d mod DIGIT_BASE
871 __ mul(R0, R2, R3); // R0 = low64(R2*R3).
872
873 // args[4 .. 5] = R0
874 __ str(R0, FieldAddress(R4, target::TypedData::payload_offset() +
876
877 __ LoadImmediate(R0, target::ToRawSmi(2)); // Two digits processed.
878 __ ret();
879}
880
881// Check if the last argument is a double, jump to label 'is_smi' if smi
882// (easy to convert to double), otherwise jump to label 'not_double_smi',
883// Returns the last argument in R0.
884static void TestLastArgumentIsDouble(Assembler* assembler,
885 Label* is_smi,
886 Label* not_double_smi) {
887 __ ldr(R0, Address(SP, 0 * target::kWordSize));
888 __ BranchIfSmi(R0, is_smi);
889 __ CompareClassId(R0, kDoubleCid);
890 __ b(not_double_smi, NE);
891 // Fall through with Double in R0.
892}
893
894// Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown
895// type. Return true or false object in the register R0. Any NaN argument
896// returns false. Any non-double arg1 causes control flow to fall through to the
897// slow case (compiled method body).
898static void CompareDoubles(Assembler* assembler,
899 Label* normal_ir_body,
900 Condition true_condition) {
901 Label is_smi, double_op, not_nan;
902
903 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
904 // Both arguments are double, right operand is in R0.
905
906 __ LoadDFieldFromOffset(V1, R0, target::Double::value_offset());
907 __ Bind(&double_op);
908 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left argument.
909 __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
910
911 __ fcmpd(V0, V1);
912 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
913 // Return false if D0 or D1 was NaN before checking true condition.
914 __ b(&not_nan, VC);
915 __ ret();
916 __ Bind(&not_nan);
917 __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
918 __ csel(R0, TMP, R0, true_condition);
919 __ ret();
920
921 __ Bind(&is_smi); // Convert R0 to a double.
922 __ SmiUntag(R0);
923 __ scvtfdx(V1, R0);
924 __ b(&double_op); // Then do the comparison.
925 __ Bind(normal_ir_body);
926}
927
928void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
929 Label* normal_ir_body) {
930 CompareDoubles(assembler, normal_ir_body, HI);
931}
932
933void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
934 Label* normal_ir_body) {
935 CompareDoubles(assembler, normal_ir_body, CS);
936}
937
938void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
939 Label* normal_ir_body) {
940 CompareDoubles(assembler, normal_ir_body, CC);
941}
942
943void AsmIntrinsifier::Double_equal(Assembler* assembler,
944 Label* normal_ir_body) {
945 CompareDoubles(assembler, normal_ir_body, EQ);
946}
947
948void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
949 Label* normal_ir_body) {
950 CompareDoubles(assembler, normal_ir_body, LS);
951}
952
953// Expects left argument to be double (receiver). Right argument is unknown.
954// Both arguments are on stack.
955static void DoubleArithmeticOperations(Assembler* assembler,
956 Label* normal_ir_body,
957 Token::Kind kind) {
958 Label is_smi, double_op;
959
960 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
961 // Both arguments are double, right operand is in R0.
962 __ LoadDFieldFromOffset(V1, R0, target::Double::value_offset());
963 __ Bind(&double_op);
964 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left argument.
965 __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
966 switch (kind) {
967 case Token::kADD:
968 __ faddd(V0, V0, V1);
969 break;
970 case Token::kSUB:
971 __ fsubd(V0, V0, V1);
972 break;
973 case Token::kMUL:
974 __ fmuld(V0, V0, V1);
975 break;
976 case Token::kDIV:
977 __ fdivd(V0, V0, V1);
978 break;
979 default:
980 UNREACHABLE();
981 }
982 const Class& double_class = DoubleClass();
983 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, R0, R1);
984 __ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
985 __ ret();
986
987 __ Bind(&is_smi); // Convert R0 to a double.
988 __ SmiUntag(R0);
989 __ scvtfdx(V1, R0);
990 __ b(&double_op);
991
992 __ Bind(normal_ir_body);
993}
994
995void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
996 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
997}
998
999void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
1000 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
1001}
1002
1003void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
1004 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
1005}
1006
1007void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
1008 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
1009}
1010
1011// Left is double, right is integer (Mint or Smi)
1012void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
1013 Label* normal_ir_body) {
1014 // Only smis allowed.
1015 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1016 __ BranchIfNotSmi(R0, normal_ir_body);
1017 // Is Smi.
1018 __ SmiUntag(R0);
1019 __ scvtfdx(V1, R0);
1020 __ ldr(R0, Address(SP, 1 * target::kWordSize));
1021 __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
1022 __ fmuld(V0, V0, V1);
1023 const Class& double_class = DoubleClass();
1024 __ TryAllocate(double_class, normal_ir_body, Assembler::kNearJump, R0, R1);
1025 __ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
1026 __ ret();
1027 __ Bind(normal_ir_body);
1028}
1029
1030void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
1031 Label* normal_ir_body) {
1032 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1033 __ BranchIfNotSmi(R0, normal_ir_body);
1034 // Is Smi.
1035 __ SmiUntag(R0);
1036#if !defined(DART_COMPRESSED_POINTERS)
1037 __ scvtfdx(V0, R0);
1038#else
1039 __ scvtfdw(V0, R0);
1040#endif
1041 const Class& double_class = DoubleClass();
1042 __ TryAllocate(double_class, normal_ir_body, Assembler::kNearJump, R0, R1);
1043 __ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
1044 __ ret();
1045 __ Bind(normal_ir_body);
1046}
1047
1048void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
1049 Label* normal_ir_body) {
1050 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1051 __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
1052 __ fcmpd(V0, V0);
1053 __ LoadObject(TMP, CastHandle<Object>(FalseObject()));
1054 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1055 __ csel(R0, TMP, R0, VC);
1056 __ ret();
1057}
1058
1059void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
1060 Label* normal_ir_body) {
1061 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1062 __ LoadFieldFromOffset(R0, R0, target::Double::value_offset());
1063 // Mask off the sign.
1064 __ AndImmediate(R0, R0, 0x7FFFFFFFFFFFFFFFLL);
1065 // Compare with +infinity.
1066 __ CompareImmediate(R0, 0x7FF0000000000000LL);
1067 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1068 __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
1069 __ csel(R0, TMP, R0, EQ);
1070 __ ret();
1071}
1072
1073void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
1074 Label* normal_ir_body) {
1075 const Register false_reg = R0;
1076 const Register true_reg = R2;
1077 Label is_false, is_true, is_zero;
1078
1079 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1080 __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
1081 __ fcmpdz(V0);
1082 __ LoadObject(true_reg, CastHandle<Object>(TrueObject()));
1083 __ LoadObject(false_reg, CastHandle<Object>(FalseObject()));
1084 __ b(&is_false, VS); // NaN -> false.
1085 __ b(&is_zero, EQ); // Check for negative zero.
1086 __ b(&is_false, CS); // >= 0 -> false.
1087
1088 __ Bind(&is_true);
1089 __ mov(R0, true_reg);
1090
1091 __ Bind(&is_false);
1092 __ ret();
1093
1094 __ Bind(&is_zero);
1095 // Check for negative zero by looking at the sign bit.
1096 __ fmovrd(R1, V0);
1097 __ LsrImmediate(R1, R1, 63);
1098 __ tsti(R1, Immediate(1));
1099 __ csel(R0, true_reg, false_reg, NE); // Sign bit set.
1100 __ ret();
1101}
1102
1103void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
1104 Label* normal_ir_body) {
1105 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1106 __ ldr(R1, Address(SP, 1 * target::kWordSize));
1107 __ CompareObjectRegisters(R0, R1);
1108 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1109 __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
1110 __ csel(R0, TMP, R0, EQ);
1111 __ ret();
1112}
1113
1114static void JumpIfInteger(Assembler* assembler,
1115 Register cid,
1116 Register tmp,
1117 Label* target) {
1118 assembler->RangeCheck(cid, tmp, kSmiCid, kMintCid, Assembler::kIfInRange,
1119 target);
1120}
1121
1122static void JumpIfNotInteger(Assembler* assembler,
1123 Register cid,
1124 Register tmp,
1125 Label* target) {
1126 assembler->RangeCheck(cid, tmp, kSmiCid, kMintCid, Assembler::kIfNotInRange,
1127 target);
1128}
1129
1130static void JumpIfString(Assembler* assembler,
1131 Register cid,
1132 Register tmp,
1133 Label* target) {
1134 assembler->RangeCheck(cid, tmp, kOneByteStringCid, kTwoByteStringCid,
1136}
1137
1138static void JumpIfNotString(Assembler* assembler,
1139 Register cid,
1140 Register tmp,
1141 Label* target) {
1142 assembler->RangeCheck(cid, tmp, kOneByteStringCid, kTwoByteStringCid,
1144}
1145
1146static void JumpIfNotList(Assembler* assembler,
1147 Register cid,
1148 Register tmp,
1149 Label* target) {
1150 assembler->RangeCheck(cid, tmp, kArrayCid, kGrowableObjectArrayCid,
1152}
1153
1154static void JumpIfType(Assembler* assembler,
1155 Register cid,
1156 Register tmp,
1157 Label* target) {
1158 COMPILE_ASSERT((kFunctionTypeCid == kTypeCid + 1) &&
1159 (kRecordTypeCid == kTypeCid + 2));
1160 assembler->RangeCheck(cid, tmp, kTypeCid, kRecordTypeCid,
1162}
1163
1164static void JumpIfNotType(Assembler* assembler,
1165 Register cid,
1166 Register tmp,
1167 Label* target) {
1168 COMPILE_ASSERT((kFunctionTypeCid == kTypeCid + 1) &&
1169 (kRecordTypeCid == kTypeCid + 2));
1170 assembler->RangeCheck(cid, tmp, kTypeCid, kRecordTypeCid,
1172}
1173
1174// Return type quickly for simple types (not parameterized and not signature).
1175void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
1176 Label* normal_ir_body) {
1177 Label use_declaration_type, not_double, not_integer, not_string;
1178 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1179 __ LoadClassIdMayBeSmi(R1, R0);
1180
1181 __ CompareImmediate(R1, kClosureCid);
1182 __ b(normal_ir_body, EQ); // Instance is a closure.
1183
1184 __ CompareImmediate(R1, kRecordCid);
1185 __ b(normal_ir_body, EQ); // Instance is a record.
1186
1187 __ CompareImmediate(R1, kNumPredefinedCids);
1188 __ b(&use_declaration_type, HI);
1189
1190 __ LoadIsolateGroup(R2);
1192
1193 __ CompareImmediate(R1, kDoubleCid);
1194 __ b(&not_double, NE);
1195 __ LoadFromOffset(R0, R2, target::ObjectStore::double_type_offset());
1196 __ ret();
1197
1198 __ Bind(&not_double);
1199 JumpIfNotInteger(assembler, R1, R0, &not_integer);
1200 __ LoadFromOffset(R0, R2, target::ObjectStore::int_type_offset());
1201 __ ret();
1202
1203 __ Bind(&not_integer);
1204 JumpIfNotString(assembler, R1, R0, &not_string);
1205 __ LoadFromOffset(R0, R2, target::ObjectStore::string_type_offset());
1206 __ ret();
1207
1208 __ Bind(&not_string);
1209 JumpIfNotType(assembler, R1, R0, &use_declaration_type);
1210 __ LoadFromOffset(R0, R2, target::ObjectStore::type_type_offset());
1211 __ ret();
1212
1213 __ Bind(&use_declaration_type);
1214 __ LoadClassById(R2, R1);
1215 __ ldr(R3, FieldAddress(R2, target::Class::num_type_arguments_offset()),
1216 kTwoBytes);
1217 __ cbnz(normal_ir_body, R3);
1218
1219 __ LoadCompressed(R0,
1221 __ CompareObject(R0, NullObject());
1222 __ b(normal_ir_body, EQ);
1223 __ ret();
1224
1225 __ Bind(normal_ir_body);
1226}
1227
1228// Compares cid1 and cid2 to see if they're syntactically equivalent. If this
1229// can be determined by this fast path, it jumps to either equal_* or not_equal.
1230// If classes are equivalent but may be generic, then jumps to
1231// equal_may_be_generic. Clobbers scratch.
1232static void EquivalentClassIds(Assembler* assembler,
1233 Label* normal_ir_body,
1234 Label* equal_may_be_generic,
1235 Label* equal_not_generic,
1236 Label* not_equal,
1237 Register cid1,
1238 Register cid2,
1239 Register scratch,
1240 bool testing_instance_cids) {
1241 Label not_integer, not_integer_or_string, not_integer_or_string_or_list;
1242
1243 // Check if left hand side is a closure. Closures are handled in the runtime.
1244 __ CompareImmediate(cid1, kClosureCid);
1245 __ b(normal_ir_body, EQ);
1246
1247 // Check if left hand side is a record. Records are handled in the runtime.
1248 __ CompareImmediate(cid1, kRecordCid);
1249 __ b(normal_ir_body, EQ);
1250
1251 // Check whether class ids match. If class ids don't match types may still be
1252 // considered equivalent (e.g. multiple string implementation classes map to a
1253 // single String type).
1254 __ cmp(cid1, Operand(cid2));
1255 __ b(equal_may_be_generic, EQ);
1256
1257 // Class ids are different. Check if we are comparing two string types (with
1258 // different representations), two integer types, two list types or two type
1259 // types.
1260 __ CompareImmediate(cid1, kNumPredefinedCids);
1261 __ b(not_equal, HI);
1262
1263 // Check if both are integer types.
1264 JumpIfNotInteger(assembler, cid1, scratch, &not_integer);
1265
1266 // First type is an integer. Check if the second is an integer too.
1267 JumpIfInteger(assembler, cid2, scratch, equal_not_generic);
1268 // Integer types are only equivalent to other integer types.
1269 __ b(not_equal);
1270
1271 __ Bind(&not_integer);
1272 // Check if both are String types.
1273 JumpIfNotString(assembler, cid1, scratch,
1274 testing_instance_cids ? &not_integer_or_string : not_equal);
1275
1276 // First type is String. Check if the second is a string too.
1277 JumpIfString(assembler, cid2, scratch, equal_not_generic);
1278 // String types are only equivalent to other String types.
1279 __ b(not_equal);
1280
1281 if (testing_instance_cids) {
1282 __ Bind(&not_integer_or_string);
1283 // Check if both are List types.
1284 JumpIfNotList(assembler, cid1, scratch, &not_integer_or_string_or_list);
1285
1286 // First type is a List. Check if the second is a List too.
1287 JumpIfNotList(assembler, cid2, scratch, not_equal);
1290 __ b(equal_may_be_generic);
1291
1292 __ Bind(&not_integer_or_string_or_list);
1293 // Check if the first type is a Type. If it is not then types are not
1294 // equivalent because they have different class ids and they are not String
1295 // or integer or List or Type.
1296 JumpIfNotType(assembler, cid1, scratch, not_equal);
1297
1298 // First type is a Type. Check if the second is a Type too.
1299 JumpIfType(assembler, cid2, scratch, equal_not_generic);
1300 // Type types are only equivalent to other Type types.
1301 __ b(not_equal);
1302 }
1303}
1304
1305void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
1306 Label* normal_ir_body) {
1307 __ ldp(R0, R1, Address(SP, 0 * target::kWordSize, Address::PairOffset));
1308 __ LoadClassIdMayBeSmi(R2, R1);
1309 __ LoadClassIdMayBeSmi(R1, R0);
1310
1311 Label equal_may_be_generic, equal, not_equal;
1312 EquivalentClassIds(assembler, normal_ir_body, &equal_may_be_generic, &equal,
1313 &not_equal, R1, R2, R0,
1314 /* testing_instance_cids = */ true);
1315
1316 __ Bind(&equal_may_be_generic);
1317 // Classes are equivalent and neither is a closure class.
1318 // Check if there are no type arguments. In this case we can return true.
1319 // Otherwise fall through into the runtime to handle comparison.
1320 __ LoadClassById(R0, R1);
1321 __ ldr(R0,
1322 FieldAddress(
1323 R0,
1325 kFourBytes);
1326 __ CompareImmediate(R0, target::Class::kNoTypeArguments);
1327 __ b(&equal, EQ);
1328
1329 // Compare type arguments, host_type_arguments_field_offset_in_words in R0.
1330 __ ldp(R1, R2, Address(SP, 0 * target::kWordSize, Address::PairOffset));
1331 __ AddImmediate(R1, -kHeapObjectTag);
1332 __ ldr(R1, Address(R1, R0, UXTX, Address::Scaled), kObjectBytes);
1333 __ AddImmediate(R2, -kHeapObjectTag);
1334 __ ldr(R2, Address(R2, R0, UXTX, Address::Scaled), kObjectBytes);
1335 __ CompareObjectRegisters(R1, R2);
1336 __ b(normal_ir_body, NE);
1337 // Fall through to equal case if type arguments are equal.
1338
1339 __ Bind(&equal);
1340 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1341 __ Ret();
1342
1343 __ Bind(&not_equal);
1344 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1345 __ ret();
1346
1347 __ Bind(normal_ir_body);
1348}
1349
1350void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
1351 Label* normal_ir_body) {
1352 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1353 __ ldr(R0, FieldAddress(R0, target::String::hash_offset()),
1355 __ adds(R0, R0, Operand(R0)); // Smi tag the hash code, setting Z flag.
1356 __ b(normal_ir_body, EQ);
1357 __ ret();
1358 // Hash not yet computed.
1359 __ Bind(normal_ir_body);
1360}
1361
1362void AsmIntrinsifier::Type_equality(Assembler* assembler,
1363 Label* normal_ir_body) {
1364 Label equal, not_equal, equiv_cids_may_be_generic, equiv_cids;
1365
1366 __ ldp(R1, R2, Address(SP, 0 * target::kWordSize, Address::PairOffset));
1367 __ CompareObjectRegisters(R1, R2);
1368 __ b(&equal, EQ);
1369
1370 // R1 might not be a Type object, so check that first (R2 should be though,
1371 // since this is a method on the Type class).
1372 __ LoadClassIdMayBeSmi(R0, R1);
1373 __ CompareImmediate(R0, kTypeCid);
1374 __ b(normal_ir_body, NE);
1375
1376 // Check if types are syntactically equal.
1377 __ LoadTypeClassId(R3, R1);
1378 __ LoadTypeClassId(R4, R2);
1379 // We are not testing instance cids, but type class cids of Type instances.
1380 EquivalentClassIds(assembler, normal_ir_body, &equiv_cids_may_be_generic,
1381 &equiv_cids, &not_equal, R3, R4, R0,
1382 /* testing_instance_cids = */ false);
1383
1384 __ Bind(&equiv_cids_may_be_generic);
1385 // Compare type arguments in Type instances.
1386 __ LoadCompressed(R3, FieldAddress(R1, target::Type::arguments_offset()));
1387 __ LoadCompressed(R4, FieldAddress(R2, target::Type::arguments_offset()));
1388 __ CompareObjectRegisters(R3, R4);
1389 __ b(normal_ir_body, NE);
1390 // Fall through to check nullability if type arguments are equal.
1391
1392 // Check nullability.
1393 __ Bind(&equiv_cids);
1394 __ LoadAbstractTypeNullability(R1, R1);
1395 __ LoadAbstractTypeNullability(R2, R2);
1396 __ cmp(R1, Operand(R2));
1397 __ b(&not_equal, NE);
1398 // Fall through to equal case if nullability is equal.
1399
1400 __ Bind(&equal);
1401 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1402 __ ret();
1403
1404 __ Bind(&not_equal);
1405 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1406 __ ret();
1407
1408 __ Bind(normal_ir_body);
1409}
1410
1411void AsmIntrinsifier::AbstractType_getHashCode(Assembler* assembler,
1412 Label* normal_ir_body) {
1413 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1414 __ LoadCompressedSmi(R0,
1415 FieldAddress(R0, target::AbstractType::hash_offset()));
1416 __ cbz(normal_ir_body, R0, kObjectBytes);
1417 __ ret();
1418 // Hash not yet computed.
1419 __ Bind(normal_ir_body);
1420}
1421
1422void AsmIntrinsifier::AbstractType_equality(Assembler* assembler,
1423 Label* normal_ir_body) {
1424 __ ldp(R1, R2, Address(SP, 0 * target::kWordSize, Address::PairOffset));
1425 __ CompareObjectRegisters(R1, R2);
1426 __ b(normal_ir_body, NE);
1427
1428 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1429 __ ret();
1430
1431 __ Bind(normal_ir_body);
1432}
1433
1434// Keep in sync with Instance::IdentityHashCode.
1435// Note int and double never reach here because they override _identityHashCode.
1436// Special cases are also not needed for null or bool because they were pre-set
1437// during VM isolate finalization.
1438void AsmIntrinsifier::Object_getHash(Assembler* assembler,
1439 Label* normal_ir_body) {
1440 Label not_yet_computed;
1441 __ ldr(R0, Address(SP, 0 * target::kWordSize)); // Object.
1442 __ ldr(
1443 R0,
1444 FieldAddress(R0, target::Object::tags_offset() +
1447 __ cbz(&not_yet_computed, R0);
1448 __ SmiTag(R0);
1449 __ ret();
1450
1451 __ Bind(&not_yet_computed);
1452 __ LoadFromOffset(R1, THR, target::Thread::random_offset());
1453 __ AndImmediate(R2, R1, 0xffffffff); // state_lo
1454 __ LsrImmediate(R3, R1, 32); // state_hi
1455 __ LoadImmediate(R1, 0xffffda61); // A
1456 __ mul(R1, R1, R2);
1457 __ add(R1, R1, Operand(R3)); // new_state = (A * state_lo) + state_hi
1458 __ StoreToOffset(R1, THR, target::Thread::random_offset());
1459 __ AndImmediate(R1, R1, 0x3fffffff);
1460 __ cbz(&not_yet_computed, R1);
1461
1462 __ ldr(R0, Address(SP, 0 * target::kWordSize)); // Object.
1463 __ sub(R0, R0, Operand(kHeapObjectTag));
1465
1466 Label retry, already_set_in_r4;
1467 __ Bind(&retry);
1468 __ ldxr(R2, R0, kEightBytes);
1470 __ cbnz(&already_set_in_r4, R4);
1471 __ orr(R2, R2, Operand(R3));
1472 __ stxr(R4, R2, R0, kEightBytes);
1473 __ cbnz(&retry, R4);
1474 // Fall-through with R1 containing new hash value (untagged).
1475 __ SmiTag(R0, R1);
1476 __ ret();
1477 __ Bind(&already_set_in_r4);
1478 __ clrex();
1479 __ SmiTag(R0, R4);
1480 __ ret();
1481}
1482
1483void GenerateSubstringMatchesSpecialization(Assembler* assembler,
1484 intptr_t receiver_cid,
1485 intptr_t other_cid,
1486 Label* return_true,
1487 Label* return_false) {
1488 __ SmiUntag(R1);
1489 __ LoadCompressedSmi(
1490 R8, FieldAddress(R0, target::String::length_offset())); // this.length
1491 __ SmiUntag(R8);
1492 __ LoadCompressedSmi(
1493 R9, FieldAddress(R2, target::String::length_offset())); // other.length
1494 __ SmiUntag(R9);
1495
1496 // if (other.length == 0) return true;
1497 __ cmp(R9, Operand(0));
1498 __ b(return_true, EQ);
1499
1500 // if (start < 0) return false;
1501 __ cmp(R1, Operand(0));
1502 __ b(return_false, LT);
1503
1504 // if (start + other.length > this.length) return false;
1505 __ add(R3, R1, Operand(R9));
1506 __ cmp(R3, Operand(R8));
1507 __ b(return_false, GT);
1508
1509 if (receiver_cid == kOneByteStringCid) {
1511 __ add(R0, R0, Operand(R1));
1512 } else {
1513 ASSERT(receiver_cid == kTwoByteStringCid);
1515 __ add(R0, R0, Operand(R1));
1516 __ add(R0, R0, Operand(R1));
1517 }
1518 if (other_cid == kOneByteStringCid) {
1520 } else {
1521 ASSERT(other_cid == kTwoByteStringCid);
1523 }
1524
1525 // i = 0
1526 __ LoadImmediate(R3, 0);
1527
1528 // do
1529 Label loop;
1530 __ Bind(&loop);
1531
1532 // this.codeUnitAt(i + start)
1533 __ ldr(R10, Address(R0, 0),
1534 receiver_cid == kOneByteStringCid ? kUnsignedByte : kUnsignedTwoBytes);
1535 // other.codeUnitAt(i)
1536 __ ldr(R11, Address(R2, 0),
1537 other_cid == kOneByteStringCid ? kUnsignedByte : kUnsignedTwoBytes);
1538 __ cmp(R10, Operand(R11));
1539 __ b(return_false, NE);
1540
1541 // i++, while (i < len)
1542 __ add(R3, R3, Operand(1));
1543 __ add(R0, R0, Operand(receiver_cid == kOneByteStringCid ? 1 : 2));
1544 __ add(R2, R2, Operand(other_cid == kOneByteStringCid ? 1 : 2));
1545 __ cmp(R3, Operand(R9));
1546 __ b(&loop, LT);
1547
1548 __ b(return_true);
1549}
1550
1551// bool _substringMatches(int start, String other)
1552// This intrinsic handles a OneByteString or TwoByteString receiver with a
1553// OneByteString other.
1554void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
1555 Label* normal_ir_body) {
1556 Label return_true, return_false, try_two_byte;
1557 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // this
1558 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // start
1559 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // other
1560
1561 __ BranchIfNotSmi(R1, normal_ir_body);
1562
1563 __ CompareClassId(R2, kOneByteStringCid);
1564 __ b(normal_ir_body, NE);
1565
1566 __ CompareClassId(R0, kOneByteStringCid);
1567 __ b(normal_ir_body, NE);
1568
1569 GenerateSubstringMatchesSpecialization(assembler, kOneByteStringCid,
1570 kOneByteStringCid, &return_true,
1571 &return_false);
1572
1573 __ Bind(&try_two_byte);
1574 __ CompareClassId(R0, kTwoByteStringCid);
1575 __ b(normal_ir_body, NE);
1576
1577 GenerateSubstringMatchesSpecialization(assembler, kTwoByteStringCid,
1578 kOneByteStringCid, &return_true,
1579 &return_false);
1580
1581 __ Bind(&return_true);
1582 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1583 __ ret();
1584
1585 __ Bind(&return_false);
1586 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1587 __ ret();
1588
1589 __ Bind(normal_ir_body);
1590}
1591
1592void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
1593 Label* normal_ir_body) {
1594 Label try_two_byte_string;
1595
1596 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Index.
1597 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // String.
1598 __ BranchIfNotSmi(R1, normal_ir_body); // Index is not a Smi.
1599 // Range check.
1600 __ LoadCompressedSmi(R2, FieldAddress(R0, target::String::length_offset()));
1601 __ cmp(R1, Operand(R2));
1602 __ b(normal_ir_body, CS); // Runtime throws exception.
1603
1604 __ CompareClassId(R0, kOneByteStringCid);
1605 __ b(&try_two_byte_string, NE);
1606 __ SmiUntag(R1);
1608 __ ldr(R1, Address(R0, R1), kUnsignedByte);
1610 __ b(normal_ir_body, GE);
1612 __ AddImmediate(
1614 __ ldr(R0, Address(R0, R1, UXTX, Address::Scaled));
1615 __ ret();
1616
1617 __ Bind(&try_two_byte_string);
1618 __ CompareClassId(R0, kTwoByteStringCid);
1619 __ b(normal_ir_body, NE);
1620 ASSERT(kSmiTagShift == 1);
1622#if !defined(DART_COMPRESSED_POINTERS)
1623 __ ldr(R1, Address(R0, R1), kUnsignedTwoBytes);
1624#else
1625 // Upper half of a compressed Smi is garbage.
1626 __ ldr(R1, Address(R0, R1, SXTW, Address::Unscaled), kUnsignedTwoBytes);
1627#endif
1629 __ b(normal_ir_body, GE);
1631 __ AddImmediate(
1633 __ ldr(R0, Address(R0, R1, UXTX, Address::Scaled));
1634 __ ret();
1635
1636 __ Bind(normal_ir_body);
1637}
1638
1639void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
1640 Label* normal_ir_body) {
1641 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1642 __ LoadCompressedSmi(R0, FieldAddress(R0, target::String::length_offset()));
1643 __ cmp(R0, Operand(target::ToRawSmi(0)), kObjectBytes);
1644 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1645 __ LoadObject(TMP, CastHandle<Object>(FalseObject()));
1646 __ csel(R0, TMP, R0, NE);
1647 __ ret();
1648}
1649
1650void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
1651 Label* normal_ir_body) {
1652 Label compute_hash;
1653 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // OneByteString object.
1654 __ ldr(R0, FieldAddress(R1, target::String::hash_offset()),
1656 __ adds(R0, R0, Operand(R0)); // Smi tag the hash code, setting Z flag.
1657 __ b(&compute_hash, EQ);
1658 __ ret(); // Return if already computed.
1659
1660 __ Bind(&compute_hash);
1661 __ LoadCompressedSmi(R2, FieldAddress(R1, target::String::length_offset()));
1662 __ SmiUntag(R2);
1663
1664 __ mov(R3, ZR);
1665 __ AddImmediate(R6, R1,
1667 // R1: Instance of OneByteString.
1668 // R2: String length, untagged integer.
1669 // R3: Loop counter, untagged integer.
1670 // R6: String data.
1671 // R0: Hash code, untagged integer.
1672
1673 Label loop, done;
1674 __ Bind(&loop);
1675 __ cmp(R3, Operand(R2));
1676 __ b(&done, EQ);
1677 // Add to hash code: (hash_ is uint32)
1678 // Get one characters (ch).
1679 __ ldr(R7, Address(R6, R3), kUnsignedByte);
1680 // R7: ch.
1681 __ add(R3, R3, Operand(1));
1683 __ cmp(R3, Operand(R2));
1684 __ b(&loop);
1685
1686 __ Bind(&done);
1687 // Finalize. Allow a zero result to combine checks from empty string branch.
1688 __ FinalizeHashForSize(target::String::kHashBits, R0);
1689
1690 // R1: Untagged address of header word (ldxr/stxr do not support offsets).
1691 __ sub(R1, R1, Operand(kHeapObjectTag));
1693 Label retry;
1694 __ Bind(&retry);
1695 __ ldxr(R2, R1, kEightBytes);
1696 __ orr(R2, R2, Operand(R0));
1697 __ stxr(R4, R2, R1, kEightBytes);
1698 __ cbnz(&retry, R4);
1699
1701 __ SmiTag(R0);
1702 __ ret();
1703}
1704
1705// Allocates a _OneByteString or _TwoByteString. The content is not initialized.
1706// 'length-reg' (R2) contains the desired length as a _Smi or _Mint.
1707// Returns new string as tagged pointer in R0.
1708static void TryAllocateString(Assembler* assembler,
1709 classid_t cid,
1710 intptr_t max_elements,
1711 Label* ok,
1712 Label* failure) {
1713 ASSERT(cid == kOneByteStringCid || cid == kTwoByteStringCid);
1714 const Register length_reg = R2;
1715 // _Mint length: call to runtime to produce error.
1716 __ BranchIfNotSmi(length_reg, failure);
1717 // negative length: call to runtime to produce error.
1718 // Too big: call to runtime to allocate old.
1719 __ CompareImmediate(length_reg, target::ToRawSmi(max_elements), kObjectBytes);
1720 __ b(failure, HI);
1721
1722 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, failure, R0));
1723 __ mov(R6, length_reg); // Save the length register.
1724 if (cid == kOneByteStringCid) {
1725 // Untag length.
1726 __ SmiUntag(length_reg, length_reg);
1727 } else {
1728 // Untag length and multiply by element size -> no-op.
1729 ASSERT(kSmiTagSize == 1);
1730 }
1731 const intptr_t fixed_size_plus_alignment_padding =
1734 __ AddImmediate(length_reg, fixed_size_plus_alignment_padding);
1735 __ andi(length_reg, length_reg,
1737
1738 __ ldr(R0, Address(THR, target::Thread::top_offset()));
1739
1740 // length_reg: allocation size.
1741 __ adds(R1, R0, Operand(length_reg));
1742 __ b(failure, CS); // Fail on unsigned overflow.
1743
1744 // Check if the allocation fits into the remaining space.
1745 // R0: potential new object start.
1746 // R1: potential next object start.
1747 // R2: allocation size.
1748 __ ldr(R7, Address(THR, target::Thread::end_offset()));
1749 __ cmp(R1, Operand(R7));
1750 __ b(failure, CS);
1751 __ CheckAllocationCanary(R0);
1752
1753 // Successfully allocated the object(s), now update top to point to
1754 // next object start and initialize the object.
1755 __ str(R1, Address(THR, target::Thread::top_offset()));
1756 __ AddImmediate(R0, kHeapObjectTag);
1757 // Clear last double word to ensure string comparison doesn't need to
1758 // specially handle remainder of strings with lengths not factors of double
1759 // offsets.
1760 __ stp(ZR, ZR, Address(R1, -2 * target::kWordSize, Address::PairOffset));
1761
1762 // Initialize the tags.
1763 // R0: new object start as a tagged pointer.
1764 // R1: new object end address.
1765 // R2: allocation size.
1766 {
1767 const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
1769
1771 __ LslImmediate(R2, R2, shift);
1772 __ csel(R2, R2, ZR, LS);
1773
1774 // Get the class index and insert it into the tags.
1775 // R2: size and bit tags.
1776 // This also clears the hash, which is in the high word of the tags.
1777 const uword tags =
1778 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
1779 __ LoadImmediate(TMP, tags);
1780 __ orr(R2, R2, Operand(TMP));
1781 __ str(R2, FieldAddress(R0, target::Object::tags_offset())); // Store tags.
1782 }
1783
1784#if DART_COMPRESSED_POINTERS
1785 // Clear out padding caused by alignment gap between length and data.
1786 __ str(ZR, FieldAddress(R0, target::String::length_offset()));
1787#endif
1788 // Set the length field using the saved length (R6).
1789 __ StoreCompressedIntoObjectNoBarrier(
1790 R0, FieldAddress(R0, target::String::length_offset()), R6);
1791 __ b(ok);
1792}
1793
1794// Arg0: OneByteString (receiver).
1795// Arg1: Start index as Smi.
1796// Arg2: End index as Smi.
1797// The indexes must be valid.
1798void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
1799 Label* normal_ir_body) {
1800 const intptr_t kStringOffset = 2 * target::kWordSize;
1801 const intptr_t kStartIndexOffset = 1 * target::kWordSize;
1802 const intptr_t kEndIndexOffset = 0 * target::kWordSize;
1803 Label ok;
1804
1805 __ ldr(R2, Address(SP, kEndIndexOffset));
1806 __ ldr(TMP, Address(SP, kStartIndexOffset));
1807 __ orr(R3, R2, Operand(TMP));
1808 __ BranchIfNotSmi(R3, normal_ir_body); // 'start', 'end' not Smi.
1809
1810 __ sub(R2, R2, Operand(TMP));
1811 TryAllocateString(assembler, kOneByteStringCid,
1813 normal_ir_body);
1814 __ Bind(&ok);
1815 // R0: new string as tagged pointer.
1816 // Copy string.
1817 __ ldr(R3, Address(SP, kStringOffset));
1818 __ ldr(R1, Address(SP, kStartIndexOffset));
1819 __ SmiUntag(R1);
1820 __ add(R3, R3, Operand(R1));
1821 // Calculate start address and untag (- 1).
1822 __ AddImmediate(R3, target::OneByteString::data_offset() - 1);
1823
1824 // R3: Start address to copy from (untagged).
1825 // R1: Untagged start index.
1826 __ ldr(R2, Address(SP, kEndIndexOffset));
1827 __ SmiUntag(R2);
1828 __ sub(R2, R2, Operand(R1));
1829
1830 // R3: Start address to copy from (untagged).
1831 // R2: Untagged number of bytes to copy.
1832 // R0: Tagged result string.
1833 // R6: Pointer into R3.
1834 // R7: Pointer into R0.
1835 // R1: Scratch register.
1836 Label loop, done;
1837 __ cmp(R2, Operand(0));
1838 __ b(&done, LE);
1839 __ mov(R6, R3);
1840 __ mov(R7, R0);
1841 __ Bind(&loop);
1842 __ ldr(R1, Address(R6), kUnsignedByte);
1843 __ AddImmediate(R6, 1);
1844 __ sub(R2, R2, Operand(1));
1845 __ cmp(R2, Operand(0));
1846 __ str(R1, FieldAddress(R7, target::OneByteString::data_offset()),
1848 __ AddImmediate(R7, 1);
1849 __ b(&loop, GT);
1850
1851 __ Bind(&done);
1852 __ ret();
1853 __ Bind(normal_ir_body);
1854}
1855
1856void AsmIntrinsifier::WriteIntoOneByteString(Assembler* assembler,
1857 Label* normal_ir_body) {
1858 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Value.
1859 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // Index.
1860 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // OneByteString.
1861 __ SmiUntag(R1);
1862 __ SmiUntag(R2);
1863 __ AddImmediate(R3, R0,
1865 __ str(R2, Address(R3, R1), kUnsignedByte);
1866 __ ret();
1867}
1868
1869void AsmIntrinsifier::WriteIntoTwoByteString(Assembler* assembler,
1870 Label* normal_ir_body) {
1871 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Value.
1872 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // Index.
1873 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // TwoByteString.
1874 // Untag index and multiply by element size -> no-op.
1875 __ SmiUntag(R2);
1876 __ AddImmediate(R3, R0,
1878#if !defined(DART_COMPRESSED_POINTERS)
1879 __ str(R2, Address(R3, R1), kUnsignedTwoBytes);
1880#else
1881 // Upper half of a compressed Smi is garbage.
1882 __ str(R2, Address(R3, R1, SXTW, Address::Unscaled), kUnsignedTwoBytes);
1883#endif
1884 __ ret();
1885}
1886
1887void AsmIntrinsifier::AllocateOneByteString(Assembler* assembler,
1888 Label* normal_ir_body) {
1889 Label ok;
1890
1891 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Length.
1892#if defined(DART_COMPRESSED_POINTERS)
1893 __ sxtw(R2, R2);
1894#endif
1895 TryAllocateString(assembler, kOneByteStringCid,
1897 normal_ir_body);
1898
1899 __ Bind(&ok);
1900 __ ret();
1901
1902 __ Bind(normal_ir_body);
1903}
1904
1905void AsmIntrinsifier::AllocateTwoByteString(Assembler* assembler,
1906 Label* normal_ir_body) {
1907 Label ok;
1908
1909 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Length.
1910#if defined(DART_COMPRESSED_POINTERS)
1911 __ sxtw(R2, R2);
1912#endif
1913 TryAllocateString(assembler, kTwoByteStringCid,
1915 normal_ir_body);
1916
1917 __ Bind(&ok);
1918 __ ret();
1919
1920 __ Bind(normal_ir_body);
1921}
1922
1923void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
1924 Label* normal_ir_body) {
1925 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // This.
1926 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Other.
1927
1928 StringEquality(assembler, R0, R1, R2, R3, R0, normal_ir_body,
1929 kOneByteStringCid);
1930}
1931
1932void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
1933 Label* normal_ir_body) {
1934 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // This.
1935 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Other.
1936
1937 StringEquality(assembler, R0, R1, R2, R3, R0, normal_ir_body,
1938 kTwoByteStringCid);
1939}
1940
1941void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
1942 Label* normal_ir_body,
1943 bool sticky) {
1944 if (FLAG_interpret_irregexp) return;
1945
1946 const intptr_t kRegExpParamOffset = 2 * target::kWordSize;
1947 const intptr_t kStringParamOffset = 1 * target::kWordSize;
1948 // start_index smi is located at offset 0.
1949
1950 // Incoming registers:
1951 // R0: Function. (Will be reloaded with the specialized matcher function.)
1952 // R4: Arguments descriptor. (Will be preserved.)
1953 // R5: Unknown. (Must be GC safe on tail call.)
1954
1955 // Load the specialized function pointer into R0. Leverage the fact the
1956 // string CIDs as well as stored function pointers are in sequence.
1957 __ ldr(R2, Address(SP, kRegExpParamOffset));
1958 __ ldr(R1, Address(SP, kStringParamOffset));
1959 __ LoadClassId(R1, R1);
1960 __ AddImmediate(R1, -kOneByteStringCid);
1961#if !defined(DART_COMPRESSED_POINTERS)
1962 __ add(R1, R2, Operand(R1, LSL, target::kWordSizeLog2));
1963#else
1964 __ add(R1, R2, Operand(R1, LSL, target::kWordSizeLog2 - 1));
1965#endif
1966 __ LoadCompressed(FUNCTION_REG,
1967 FieldAddress(R1, target::RegExp::function_offset(
1968 kOneByteStringCid, sticky)));
1969
1970 // Registers are now set up for the lazy compile stub. It expects the function
1971 // in R0, the argument descriptor in R4, and IC-Data in R5.
1972 __ eor(R5, R5, Operand(R5));
1973
1974 // Tail-call the function.
1975 __ LoadCompressed(
1977 __ ldr(R1,
1979 __ br(R1);
1980}
1981
1982void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
1983 Label* normal_ir_body) {
1984 __ LoadIsolate(R0);
1985 __ ldr(R0, Address(R0, target::Isolate::default_tag_offset()));
1986 __ ret();
1987}
1988
1989void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
1990 Label* normal_ir_body) {
1991 __ LoadIsolate(R0);
1992 __ ldr(R0, Address(R0, target::Isolate::current_tag_offset()));
1993 __ ret();
1994}
1995
1996void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
1997 Label* normal_ir_body) {
1998#if !defined(SUPPORT_TIMELINE)
1999 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
2000 __ ret();
2001#else
2002 // Load TimelineStream*.
2003 __ ldr(R0, Address(THR, target::Thread::dart_stream_offset()));
2004 // Load uintptr_t from TimelineStream*.
2006 __ cmp(R0, Operand(0));
2007 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
2008 __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
2009 __ csel(R0, TMP, R0, NE);
2010 __ ret();
2011#endif
2012}
2013
2014void AsmIntrinsifier::Timeline_getNextTaskId(Assembler* assembler,
2015 Label* normal_ir_body) {
2016#if !defined(SUPPORT_TIMELINE)
2017 __ LoadImmediate(R0, target::ToRawSmi(0));
2018 __ ret();
2019#else
2020 __ ldr(R0, Address(THR, target::Thread::next_task_id_offset()));
2021 __ add(R1, R0, Operand(1));
2022 __ str(R1, Address(THR, target::Thread::next_task_id_offset()));
2023 __ SmiTag(R0); // Ignore loss of precision.
2024 __ ret();
2025#endif
2026}
2027
2028#undef __
2029
2030} // namespace compiler
2031} // namespace dart
2032
2033#endif // defined(TARGET_ARCH_ARM64)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
static bool equal(const SkBitmap &a, const SkBitmap &b)
Definition: ImageTest.cpp:1395
static bool ok(int result)
#define __
#define UNREACHABLE()
Definition: assert.h:248
static word type_arguments_offset()
static word declaration_type_offset()
static word host_type_arguments_field_offset_in_words_offset()
static const word kNoTypeArguments
Definition: runtime_api.h:486
static word num_type_arguments_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word function_offset(classid_t cid, bool sticky)
static const word kHashBits
Definition: runtime_api.h:782
static const word kNullCharCodeSymbolOffset
Definition: runtime_api.h:1533
static const word kNumberOfOneCharCodeSymbols
Definition: runtime_api.h:1532
static word predefined_symbols_address_offset()
#define ASSERT(E)
static bool b
GAsyncResult * result
uint32_t * target
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
Definition: runtime_api.cc:360
word ToRawSmi(const dart::Object &a)
Definition: runtime_api.cc:960
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
constexpr intptr_t kSmiBits
Definition: runtime_api.h:301
const Bool & TrueObject()
Definition: runtime_api.cc:157
const Bool & FalseObject()
Definition: runtime_api.cc:161
const Object & NullObject()
Definition: runtime_api.cc:149
const Class & DoubleClass()
Definition: runtime_api.cc:195
Definition: dart_vm.cc:33
const Register THR
static bool CompareIntegers(Token::Kind kind, const Integer &left, const Integer &right)
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
Definition: hash.h:12
int32_t classid_t
Definition: globals.h:524
@ kNumPredefinedCids
Definition: class_id.h:257
constexpr intptr_t kBitsPerByte
Definition: globals.h:463
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
uintptr_t uword
Definition: globals.h:501
const Register CODE_REG
const Register TMP2
const Register TMP
const intptr_t cid
const Register FUNCTION_REG
const intptr_t kBytesPerBigIntDigit
Definition: globals.h:54
NOT_IN_PRODUCT(LibraryPtr ReloadTestScript(const char *script))
@ kSmiTagSize
@ kHeapObjectTag
@ kSmiTagShift
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment