Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
asm_intrinsifier_arm64.cc
Go to the documentation of this file.
1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64.
6#if defined(TARGET_ARCH_ARM64)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
10#include "vm/class_id.h"
13
14namespace dart {
15namespace compiler {
16
17// When entering intrinsics code:
18// PP: Caller's ObjectPool in JIT / global ObjectPool in AOT
19// CODE_REG: Callee's Code in JIT / not passed in AOT
20// R4: Arguments descriptor
21// LR: Return address
22// The R4 and CODE_REG registers can be destroyed only if there is no slow-path,
23// i.e. if the intrinsified method always executes a return.
24// The FP register should not be modified, because it is used by the profiler.
25// The PP and THR registers (see constants_arm64.h) must be preserved.
26
27#define __ assembler->
28
29// Loads args from stack into R0 and R1
30// Tests if they are smis, jumps to label not_smi if not.
31static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
32 __ ldr(R0, Address(SP, +0 * target::kWordSize));
33 __ ldr(R1, Address(SP, +1 * target::kWordSize));
34 __ orr(TMP, R0, Operand(R1));
35 __ BranchIfNotSmi(TMP, not_smi);
36}
37
38void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
39 ASSERT(kSmiTagShift == 1);
40 ASSERT(kSmiTag == 0);
41 const Register right = R0;
42 const Register left = R1;
43 const Register temp = R2;
44 const Register result = R0;
45
46 TestBothArgumentsSmis(assembler, normal_ir_body);
47 __ CompareImmediate(right, target::ToRawSmi(target::kSmiBits),
49 __ b(normal_ir_body, CS);
50
51 // Left is not a constant.
52 // Check if count too large for handling it inlined.
53 __ SmiUntag(TMP, right); // SmiUntag right into TMP.
54 // Overflow test (preserve left, right, and TMP);
55 __ lslv(temp, left, TMP, kObjectBytes);
56 __ asrv(TMP2, temp, TMP, kObjectBytes);
57 __ cmp(left, Operand(TMP2), kObjectBytes);
58 __ b(normal_ir_body, NE); // Overflow.
59 // Shift for result now we know there is no overflow.
60 __ lslv(result, left, TMP, kObjectBytes);
61 __ ret();
62 __ Bind(normal_ir_body);
63}
64
65static void CompareIntegers(Assembler* assembler,
66 Label* normal_ir_body,
67 Condition true_condition) {
68 Label true_label;
69 TestBothArgumentsSmis(assembler, normal_ir_body);
70 // R0 contains the right argument, R1 the left.
71 __ CompareObjectRegisters(R1, R0);
72 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
73 __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
74 __ csel(R0, TMP, R0, true_condition);
75 __ ret();
76 __ Bind(normal_ir_body);
77}
78
79void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
80 Label* normal_ir_body) {
81 CompareIntegers(assembler, normal_ir_body, LT);
82}
83
84void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
85 Label* normal_ir_body) {
86 CompareIntegers(assembler, normal_ir_body, GT);
87}
88
89void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
90 Label* normal_ir_body) {
91 CompareIntegers(assembler, normal_ir_body, LE);
92}
93
94void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
95 Label* normal_ir_body) {
96 CompareIntegers(assembler, normal_ir_body, GE);
97}
98
99// This is called for Smi and Mint receivers. The right argument
100// can be Smi, Mint or double.
101void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
102 Label* normal_ir_body) {
103 Label true_label, check_for_mint;
104 // For integer receiver '===' check first.
105 __ ldr(R0, Address(SP, 0 * target::kWordSize));
106 __ ldr(R1, Address(SP, 1 * target::kWordSize));
107 __ CompareObjectRegisters(R0, R1);
108 __ b(&true_label, EQ);
109
110 __ orr(R2, R0, Operand(R1));
111 __ BranchIfNotSmi(R2, &check_for_mint);
112 // If R0 or R1 is not a smi do Mint checks.
113
114 // Both arguments are smi, '===' is good enough.
115 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
116 __ ret();
117 __ Bind(&true_label);
118 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
119 __ ret();
120
121 // At least one of the arguments was not Smi.
122 Label receiver_not_smi;
123 __ Bind(&check_for_mint);
124
125 __ BranchIfNotSmi(R1, &receiver_not_smi); // Check receiver.
126
127 // Left (receiver) is Smi, return false if right is not Double.
128 // Note that an instance of Mint never contains a value that can be
129 // represented by Smi.
130
131 __ CompareClassId(R0, kDoubleCid);
132 __ b(normal_ir_body, EQ);
133 __ LoadObject(R0,
134 CastHandle<Object>(FalseObject())); // Smi == Mint -> false.
135 __ ret();
136
137 __ Bind(&receiver_not_smi);
138 // R1: receiver.
139
140 __ CompareClassId(R1, kMintCid);
141 __ b(normal_ir_body, NE);
142 // Receiver is Mint, return false if right is Smi.
143 __ BranchIfNotSmi(R0, normal_ir_body);
144 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
145 __ ret();
146 // TODO(srdjan): Implement Mint == Mint comparison.
147
148 __ Bind(normal_ir_body);
149}
150
151void AsmIntrinsifier::Integer_equal(Assembler* assembler,
152 Label* normal_ir_body) {
153 Integer_equalToInteger(assembler, normal_ir_body);
154}
155
156void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
157 Label* normal_ir_body) {
158 __ ldr(R0, Address(SP, 0 * target::kWordSize));
159 __ SmiUntag(R0);
160 // XOR with sign bit to complement bits if value is negative.
161#if !defined(DART_COMPRESSED_POINTERS)
162 __ eor(R0, R0, Operand(R0, ASR, 63));
163 __ clz(R0, R0);
164 __ LoadImmediate(R1, 64);
165#else
166 __ eorw(R0, R0, Operand(R0, ASR, 31));
167 __ clzw(R0, R0);
168 __ LoadImmediate(R1, 32);
169#endif
170 __ sub(R0, R1, Operand(R0));
171 __ SmiTag(R0);
172 __ ret();
173}
174
175void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
176 // static void _lsh(Uint32List x_digits, int x_used, int n,
177 // Uint32List r_digits)
178
179 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi.
180 __ ldp(R2, R3, Address(SP, 2 * target::kWordSize, Address::PairOffset));
181#if defined(DART_COMPRESSED_POINTERS)
182 __ sxtw(R2, R2);
183#endif
184 __ add(R2, R2, Operand(2)); // x_used > 0, Smi. R2 = x_used + 1, round up.
185 __ AsrImmediate(R2, R2, 2); // R2 = num of digit pairs to read.
186 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0.
187 __ ldp(R4, R5, Address(SP, 0 * target::kWordSize, Address::PairOffset));
188#if defined(DART_COMPRESSED_POINTERS)
189 __ sxtw(R5, R5);
190#endif
191 __ SmiUntag(R5);
192 // R0 = n ~/ (2*_DIGIT_BITS)
193 __ AsrImmediate(R0, R5, 6);
194 // R6 = &x_digits[0]
195 __ add(R6, R3, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
196 // R7 = &x_digits[2*R2]
197 __ add(R7, R6, Operand(R2, LSL, 3));
198 // R8 = &r_digits[2*1]
199 __ add(R8, R4,
200 Operand(target::TypedData::payload_offset() - kHeapObjectTag +
202 // R8 = &r_digits[2*(R2 + n ~/ (2*_DIGIT_BITS) + 1)]
203 __ add(R0, R0, Operand(R2));
204 __ add(R8, R8, Operand(R0, LSL, 3));
205 // R3 = n % (2 * _DIGIT_BITS)
206 __ AndImmediate(R3, R5, 63);
207 // R2 = 64 - R3
208 __ LoadImmediate(R2, 64);
209 __ sub(R2, R2, Operand(R3));
210 __ mov(R1, ZR);
211 Label loop;
212 __ Bind(&loop);
213 __ ldr(R0, Address(R7, -2 * kBytesPerBigIntDigit, Address::PreIndex));
214 __ lsrv(R4, R0, R2);
215 __ orr(R1, R1, Operand(R4));
216 __ str(R1, Address(R8, -2 * kBytesPerBigIntDigit, Address::PreIndex));
217 __ lslv(R1, R0, R3);
218 __ cmp(R7, Operand(R6));
219 __ b(&loop, NE);
220 __ str(R1, Address(R8, -2 * kBytesPerBigIntDigit, Address::PreIndex));
221 __ LoadObject(R0, NullObject());
222 __ ret();
223}
224
225void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
226 // static void _rsh(Uint32List x_digits, int x_used, int n,
227 // Uint32List r_digits)
228
229 // R2 = x_used, R3 = x_digits, x_used > 0, x_used is Smi.
230 __ ldp(R2, R3, Address(SP, 2 * target::kWordSize, Address::PairOffset));
231#if defined(DART_COMPRESSED_POINTERS)
232 __ sxtw(R2, R2);
233#endif
234 __ add(R2, R2, Operand(2)); // x_used > 0, Smi. R2 = x_used + 1, round up.
235 __ AsrImmediate(R2, R2, 2); // R2 = num of digit pairs to read.
236 // R4 = r_digits, R5 = n, n is Smi, n % _DIGIT_BITS != 0.
237 __ ldp(R4, R5, Address(SP, 0 * target::kWordSize, Address::PairOffset));
238#if defined(DART_COMPRESSED_POINTERS)
239 __ sxtw(R5, R5);
240#endif
241 __ SmiUntag(R5);
242 // R0 = n ~/ (2*_DIGIT_BITS)
243 __ AsrImmediate(R0, R5, 6);
244 // R8 = &r_digits[0]
245 __ add(R8, R4, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
246 // R7 = &x_digits[2*(n ~/ (2*_DIGIT_BITS))]
247 __ add(R7, R3, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
248 __ add(R7, R7, Operand(R0, LSL, 3));
249 // R6 = &r_digits[2*(R2 - n ~/ (2*_DIGIT_BITS) - 1)]
250 __ add(R0, R0, Operand(1));
251 __ sub(R0, R2, Operand(R0));
252 __ add(R6, R8, Operand(R0, LSL, 3));
253 // R3 = n % (2*_DIGIT_BITS)
254 __ AndImmediate(R3, R5, 63);
255 // R2 = 64 - R3
256 __ LoadImmediate(R2, 64);
257 __ sub(R2, R2, Operand(R3));
258 // R1 = x_digits[n ~/ (2*_DIGIT_BITS)] >> (n % (2*_DIGIT_BITS))
259 __ ldr(R1, Address(R7, 2 * kBytesPerBigIntDigit, Address::PostIndex));
260 __ lsrv(R1, R1, R3);
261 Label loop_entry;
262 __ b(&loop_entry);
263 Label loop;
264 __ Bind(&loop);
265 __ ldr(R0, Address(R7, 2 * kBytesPerBigIntDigit, Address::PostIndex));
266 __ lslv(R4, R0, R2);
267 __ orr(R1, R1, Operand(R4));
268 __ str(R1, Address(R8, 2 * kBytesPerBigIntDigit, Address::PostIndex));
269 __ lsrv(R1, R0, R3);
270 __ Bind(&loop_entry);
271 __ cmp(R8, Operand(R6));
272 __ b(&loop, NE);
273 __ str(R1, Address(R8, 0));
274 __ LoadObject(R0, NullObject());
275 __ ret();
276}
277
278void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
279 Label* normal_ir_body) {
280 // static void _absAdd(Uint32List digits, int used,
281 // Uint32List a_digits, int a_used,
282 // Uint32List r_digits)
283
284 // R2 = used, R3 = digits
285 __ ldp(R2, R3, Address(SP, 3 * target::kWordSize, Address::PairOffset));
286#if defined(DART_COMPRESSED_POINTERS)
287 __ sxtw(R2, R2);
288#endif
289 __ add(R2, R2, Operand(2)); // used > 0, Smi. R2 = used + 1, round up.
290 __ add(R2, ZR, Operand(R2, ASR, 2)); // R2 = num of digit pairs to process.
291 // R3 = &digits[0]
292 __ add(R3, R3, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
293
294 // R4 = a_used, R5 = a_digits
295 __ ldp(R4, R5, Address(SP, 1 * target::kWordSize, Address::PairOffset));
296#if defined(DART_COMPRESSED_POINTERS)
297 __ sxtw(R4, R4);
298#endif
299 __ add(R4, R4, Operand(2)); // a_used > 0, Smi. R4 = a_used + 1, round up.
300 __ add(R4, ZR, Operand(R4, ASR, 2)); // R4 = num of digit pairs to process.
301 // R5 = &a_digits[0]
302 __ add(R5, R5, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
303
304 // R6 = r_digits
305 __ ldr(R6, Address(SP, 0 * target::kWordSize));
306 // R6 = &r_digits[0]
307 __ add(R6, R6, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
308
309 // R7 = &digits[a_used rounded up to even number].
310 __ add(R7, R3, Operand(R4, LSL, 3));
311
312 // R8 = &digits[a_used rounded up to even number].
313 __ add(R8, R3, Operand(R2, LSL, 3));
314
315 __ adds(R0, R0, Operand(0)); // carry flag = 0
316 Label add_loop;
317 __ Bind(&add_loop);
318 // Loop (a_used+1)/2 times, a_used > 0.
319 __ ldr(R0, Address(R3, 2 * kBytesPerBigIntDigit, Address::PostIndex));
320 __ ldr(R1, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
321 __ adcs(R0, R0, R1);
322 __ sub(R9, R3, Operand(R7)); // Does not affect carry flag.
323 __ str(R0, Address(R6, 2 * kBytesPerBigIntDigit, Address::PostIndex));
324 __ cbnz(&add_loop, R9); // Does not affect carry flag.
325
326 Label last_carry;
327 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag.
328 __ cbz(&last_carry, R9); // If used - a_used == 0.
329
330 Label carry_loop;
331 __ Bind(&carry_loop);
332 // Loop (used+1)/2 - (a_used+1)/2 times, used - a_used > 0.
333 __ ldr(R0, Address(R3, 2 * kBytesPerBigIntDigit, Address::PostIndex));
334 __ adcs(R0, R0, ZR);
335 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag.
336 __ str(R0, Address(R6, 2 * kBytesPerBigIntDigit, Address::PostIndex));
337 __ cbnz(&carry_loop, R9);
338
339 __ Bind(&last_carry);
340 Label done;
341 __ b(&done, CC);
342 __ LoadImmediate(R0, 1);
343 __ str(R0, Address(R6, 0));
344
345 __ Bind(&done);
346 __ LoadObject(R0, NullObject());
347 __ ret();
348}
349
350void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
351 Label* normal_ir_body) {
352 // static void _absSub(Uint32List digits, int used,
353 // Uint32List a_digits, int a_used,
354 // Uint32List r_digits)
355
356 // R2 = used, R3 = digits
357 __ ldp(R2, R3, Address(SP, 3 * target::kWordSize, Address::PairOffset));
358#if defined(DART_COMPRESSED_POINTERS)
359 __ sxtw(R2, R2);
360#endif
361 __ add(R2, R2, Operand(2)); // used > 0, Smi. R2 = used + 1, round up.
362 __ add(R2, ZR, Operand(R2, ASR, 2)); // R2 = num of digit pairs to process.
363 // R3 = &digits[0]
364 __ add(R3, R3, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
365
366 // R4 = a_used, R5 = a_digits
367 __ ldp(R4, R5, Address(SP, 1 * target::kWordSize, Address::PairOffset));
368#if defined(DART_COMPRESSED_POINTERS)
369 __ sxtw(R4, R4);
370#endif
371 __ add(R4, R4, Operand(2)); // a_used > 0, Smi. R4 = a_used + 1, round up.
372 __ add(R4, ZR, Operand(R4, ASR, 2)); // R4 = num of digit pairs to process.
373 // R5 = &a_digits[0]
374 __ add(R5, R5, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
375
376 // R6 = r_digits
377 __ ldr(R6, Address(SP, 0 * target::kWordSize));
378 // R6 = &r_digits[0]
379 __ add(R6, R6, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
380
381 // R7 = &digits[a_used rounded up to even number].
382 __ add(R7, R3, Operand(R4, LSL, 3));
383
384 // R8 = &digits[a_used rounded up to even number].
385 __ add(R8, R3, Operand(R2, LSL, 3));
386
387 __ subs(R0, R0, Operand(0)); // carry flag = 1
388 Label sub_loop;
389 __ Bind(&sub_loop);
390 // Loop (a_used+1)/2 times, a_used > 0.
391 __ ldr(R0, Address(R3, 2 * kBytesPerBigIntDigit, Address::PostIndex));
392 __ ldr(R1, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
393 __ sbcs(R0, R0, R1);
394 __ sub(R9, R3, Operand(R7)); // Does not affect carry flag.
395 __ str(R0, Address(R6, 2 * kBytesPerBigIntDigit, Address::PostIndex));
396 __ cbnz(&sub_loop, R9); // Does not affect carry flag.
397
398 Label done;
399 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag.
400 __ cbz(&done, R9); // If used - a_used == 0.
401
402 Label carry_loop;
403 __ Bind(&carry_loop);
404 // Loop (used+1)/2 - (a_used+1)/2 times, used - a_used > 0.
405 __ ldr(R0, Address(R3, 2 * kBytesPerBigIntDigit, Address::PostIndex));
406 __ sbcs(R0, R0, ZR);
407 __ sub(R9, R3, Operand(R8)); // Does not affect carry flag.
408 __ str(R0, Address(R6, 2 * kBytesPerBigIntDigit, Address::PostIndex));
409 __ cbnz(&carry_loop, R9);
410
411 __ Bind(&done);
412 __ LoadObject(R0, NullObject());
413 __ ret();
414}
415
416void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
417 Label* normal_ir_body) {
418 // Pseudo code:
419 // static int _mulAdd(Uint32List x_digits, int xi,
420 // Uint32List m_digits, int i,
421 // Uint32List a_digits, int j, int n) {
422 // uint64_t x = x_digits[xi >> 1 .. (xi >> 1) + 1]; // xi is Smi and even.
423 // if (x == 0 || n == 0) {
424 // return 2;
425 // }
426 // uint64_t* mip = &m_digits[i >> 1]; // i is Smi and even.
427 // uint64_t* ajp = &a_digits[j >> 1]; // j is Smi and even.
428 // uint64_t c = 0;
429 // SmiUntag(n); // n is Smi and even.
430 // n = (n + 1)/2; // Number of pairs to process.
431 // do {
432 // uint64_t mi = *mip++;
433 // uint64_t aj = *ajp;
434 // uint128_t t = x*mi + aj + c; // 64-bit * 64-bit -> 128-bit.
435 // *ajp++ = low64(t);
436 // c = high64(t);
437 // } while (--n > 0);
438 // while (c != 0) {
439 // uint128_t t = *ajp + c;
440 // *ajp++ = low64(t);
441 // c = high64(t); // c == 0 or 1.
442 // }
443 // return 2;
444 // }
445
446 Label done;
447 // R3 = x, no_op if x == 0
448 // R0 = xi as Smi, R1 = x_digits.
449 __ ldp(R0, R1, Address(SP, 5 * target::kWordSize, Address::PairOffset));
450#if defined(DART_COMPRESSED_POINTERS)
451 __ sxtw(R0, R0);
452#endif
453 __ add(R1, R1, Operand(R0, LSL, 1));
454 __ ldr(R3, FieldAddress(R1, target::TypedData::payload_offset()));
455 __ tst(R3, Operand(R3));
456 __ b(&done, EQ);
457
458 // R6 = (SmiUntag(n) + 1)/2, no_op if n == 0
459 __ ldr(R6, Address(SP, 0 * target::kWordSize));
460#if defined(DART_COMPRESSED_POINTERS)
461 __ sxtw(R6, R6);
462#endif
463 __ add(R6, R6, Operand(2));
464 __ adds(R6, ZR, Operand(R6, ASR, 2)); // SmiUntag(R6) and set cc.
465 __ b(&done, EQ);
466
467 // R4 = mip = &m_digits[i >> 1]
468 // R0 = i as Smi, R1 = m_digits.
469 __ ldp(R0, R1, Address(SP, 3 * target::kWordSize, Address::PairOffset));
470#if defined(DART_COMPRESSED_POINTERS)
471 __ sxtw(R0, R0);
472#endif
473 __ add(R1, R1, Operand(R0, LSL, 1));
474 __ add(R4, R1, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
475
476 // R5 = ajp = &a_digits[j >> 1]
477 // R0 = j as Smi, R1 = a_digits.
478 __ ldp(R0, R1, Address(SP, 1 * target::kWordSize, Address::PairOffset));
479#if defined(DART_COMPRESSED_POINTERS)
480 __ sxtw(R0, R0);
481#endif
482 __ add(R1, R1, Operand(R0, LSL, 1));
483 __ add(R5, R1, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
484
485 // R1 = c = 0
486 __ mov(R1, ZR);
487
488 Label muladd_loop;
489 __ Bind(&muladd_loop);
490 // x: R3
491 // mip: R4
492 // ajp: R5
493 // c: R1
494 // n: R6
495 // t: R7:R8 (not live at loop entry)
496
497 // uint64_t mi = *mip++
498 __ ldr(R2, Address(R4, 2 * kBytesPerBigIntDigit, Address::PostIndex));
499
500 // uint64_t aj = *ajp
501 __ ldr(R0, Address(R5, 0));
502
503 // uint128_t t = x*mi + aj + c
504 __ mul(R7, R2, R3); // R7 = low64(R2*R3).
505 __ umulh(R8, R2, R3); // R8 = high64(R2*R3), t = R8:R7 = x*mi.
506 __ adds(R7, R7, Operand(R0));
507 __ adc(R8, R8, ZR); // t += aj.
508 __ adds(R0, R7, Operand(R1)); // t += c, R0 = low64(t).
509 __ adc(R1, R8, ZR); // c = R1 = high64(t).
510
511 // *ajp++ = low64(t) = R0
512 __ str(R0, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
513
514 // while (--n > 0)
515 __ subs(R6, R6, Operand(1)); // --n
516 __ b(&muladd_loop, NE);
517
518 __ tst(R1, Operand(R1));
519 __ b(&done, EQ);
520
521 // *ajp++ += c
522 __ ldr(R0, Address(R5, 0));
523 __ adds(R0, R0, Operand(R1));
524 __ str(R0, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
525 __ b(&done, CC);
526
527 Label propagate_carry_loop;
528 __ Bind(&propagate_carry_loop);
529 __ ldr(R0, Address(R5, 0));
530 __ adds(R0, R0, Operand(1));
531 __ str(R0, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
532 __ b(&propagate_carry_loop, CS);
533
534 __ Bind(&done);
535 __ LoadImmediate(R0, target::ToRawSmi(2)); // Two digits processed.
536 __ ret();
537}
538
539void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
540 Label* normal_ir_body) {
541 // Pseudo code:
542 // static int _sqrAdd(Uint32List x_digits, int i,
543 // Uint32List a_digits, int used) {
544 // uint64_t* xip = &x_digits[i >> 1]; // i is Smi and even.
545 // uint64_t x = *xip++;
546 // if (x == 0) return 2;
547 // uint64_t* ajp = &a_digits[i]; // j == 2*i, i is Smi.
548 // uint64_t aj = *ajp;
549 // uint128_t t = x*x + aj;
550 // *ajp++ = low64(t);
551 // uint128_t c = high64(t);
552 // int n = ((used - i + 2) >> 2) - 1; // used and i are Smi. n: num pairs.
553 // while (--n >= 0) {
554 // uint64_t xi = *xip++;
555 // uint64_t aj = *ajp;
556 // uint192_t t = 2*x*xi + aj + c; // 2-bit * 64-bit * 64-bit -> 129-bit.
557 // *ajp++ = low64(t);
558 // c = high128(t); // 65-bit.
559 // }
560 // uint64_t aj = *ajp;
561 // uint128_t t = aj + c; // 64-bit + 65-bit -> 66-bit.
562 // *ajp++ = low64(t);
563 // *ajp = high64(t);
564 // return 2;
565 // }
566
567 // R4 = xip = &x_digits[i >> 1]
568 // R2 = i as Smi, R3 = x_digits
569 __ ldp(R2, R3, Address(SP, 2 * target::kWordSize, Address::PairOffset));
570#if defined(DART_COMPRESSED_POINTERS)
571 __ sxtw(R2, R2);
572#endif
573 __ add(R3, R3, Operand(R2, LSL, 1));
574 __ add(R4, R3, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
575
576 // R3 = x = *xip++, return if x == 0
577 Label x_zero;
578 __ ldr(R3, Address(R4, 2 * kBytesPerBigIntDigit, Address::PostIndex));
579 __ tst(R3, Operand(R3));
580 __ b(&x_zero, EQ);
581
582 // R5 = ajp = &a_digits[i]
583 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // a_digits
584 __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi.
585 __ add(R5, R1, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
586
587 // R6:R1 = t = x*x + *ajp
588 __ ldr(R0, Address(R5, 0));
589 __ mul(R1, R3, R3); // R1 = low64(R3*R3).
590 __ umulh(R6, R3, R3); // R6 = high64(R3*R3).
591 __ adds(R1, R1, Operand(R0)); // R6:R1 += *ajp.
592 __ adc(R6, R6, ZR); // R6 = low64(c) = high64(t).
593 __ mov(R7, ZR); // R7 = high64(c) = 0.
594
595 // *ajp++ = low64(t) = R1
596 __ str(R1, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
597
598 // int n = (used - i + 1)/2 - 1
599 __ ldr(R0, Address(SP, 0 * target::kWordSize)); // used is Smi
600#if defined(DART_COMPRESSED_POINTERS)
601 __ sxtw(R0, R0);
602#endif
603 __ sub(R8, R0, Operand(R2));
604 __ add(R8, R8, Operand(2));
605 __ movn(R0, Immediate(1), 0); // R0 = ~1 = -2.
606 __ adds(R8, R0, Operand(R8, ASR, 2)); // while (--n >= 0)
607
608 Label loop, done;
609 __ b(&done, MI);
610
611 __ Bind(&loop);
612 // x: R3
613 // xip: R4
614 // ajp: R5
615 // c: R7:R6
616 // t: R2:R1:R0 (not live at loop entry)
617 // n: R8
618
619 // uint64_t xi = *xip++
620 __ ldr(R2, Address(R4, 2 * kBytesPerBigIntDigit, Address::PostIndex));
621
622 // uint192_t t = R2:R1:R0 = 2*x*xi + aj + c
623 __ mul(R0, R2, R3); // R0 = low64(R2*R3) = low64(x*xi).
624 __ umulh(R1, R2, R3); // R1 = high64(R2*R3) = high64(x*xi).
625 __ adds(R0, R0, Operand(R0));
626 __ adcs(R1, R1, R1);
627 __ adc(R2, ZR, ZR); // R2:R1:R0 = R1:R0 + R1:R0 = 2*x*xi.
628 __ adds(R0, R0, Operand(R6));
629 __ adcs(R1, R1, R7);
630 __ adc(R2, R2, ZR); // R2:R1:R0 += c.
631 __ ldr(R7, Address(R5, 0)); // R7 = aj = *ajp.
632 __ adds(R0, R0, Operand(R7));
633 __ adcs(R6, R1, ZR);
634 __ adc(R7, R2, ZR); // R7:R6:R0 = 2*x*xi + aj + c.
635
636 // *ajp++ = low64(t) = R0
637 __ str(R0, Address(R5, 2 * kBytesPerBigIntDigit, Address::PostIndex));
638
639 // while (--n >= 0)
640 __ subs(R8, R8, Operand(1)); // --n
641 __ b(&loop, PL);
642
643 __ Bind(&done);
644 // uint64_t aj = *ajp
645 __ ldr(R0, Address(R5, 0));
646
647 // uint128_t t = aj + c
648 __ adds(R6, R6, Operand(R0));
649 __ adc(R7, R7, ZR);
650
651 // *ajp = low64(t) = R6
652 // *(ajp + 1) = high64(t) = R7
653 __ stp(R6, R7, Address(R5, 0, Address::PairOffset));
654
655 __ Bind(&x_zero);
656 __ LoadImmediate(R0, target::ToRawSmi(2)); // Two digits processed.
657 __ ret();
658}
659
660void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
661 Label* normal_ir_body) {
662 // There is no 128-bit by 64-bit division instruction on arm64, so we use two
663 // 64-bit by 32-bit divisions and two 64-bit by 64-bit multiplications to
664 // adjust the two 32-bit digits of the estimated quotient.
665 //
666 // Pseudo code:
667 // static int _estQuotientDigit(Uint32List args, Uint32List digits, int i) {
668 // uint64_t yt = args[_YT_LO .. _YT]; // _YT_LO == 0, _YT == 1.
669 // uint64_t* dp = &digits[(i >> 1) - 1]; // i is Smi.
670 // uint64_t dh = dp[0]; // dh == digits[(i >> 1) - 1 .. i >> 1].
671 // uint64_t qd;
672 // if (dh == yt) {
673 // qd = (DIGIT_MASK << 32) | DIGIT_MASK;
674 // } else {
675 // dl = dp[-1]; // dl == digits[(i >> 1) - 3 .. (i >> 1) - 2].
676 // // We cannot calculate qd = dh:dl / yt, so ...
677 // uint64_t yth = yt >> 32;
678 // uint64_t qh = dh / yth;
679 // uint128_t ph:pl = yt*qh;
680 // uint64_t tl = (dh << 32)|(dl >> 32);
681 // uint64_t th = dh >> 32;
682 // while ((ph > th) || ((ph == th) && (pl > tl))) {
683 // if (pl < yt) --ph;
684 // pl -= yt;
685 // --qh;
686 // }
687 // qd = qh << 32;
688 // tl = (pl << 32);
689 // th = (ph << 32)|(pl >> 32);
690 // if (tl > dl) ++th;
691 // dl -= tl;
692 // dh -= th;
693 // uint64_t ql = ((dh << 32)|(dl >> 32)) / yth;
694 // ph:pl = yt*ql;
695 // while ((ph > dh) || ((ph == dh) && (pl > dl))) {
696 // if (pl < yt) --ph;
697 // pl -= yt;
698 // --ql;
699 // }
700 // qd |= ql;
701 // }
702 // args[_QD .. _QD_HI] = qd; // _QD == 2, _QD_HI == 3.
703 // return 2;
704 // }
705
706 // R4 = args
707 __ ldr(R4, Address(SP, 2 * target::kWordSize)); // args
708
709 // R3 = yt = args[0..1]
710 __ ldr(R3, FieldAddress(R4, target::TypedData::payload_offset()));
711
712 // R2 = dh = digits[(i >> 1) - 1 .. i >> 1]
713 // R0 = i as Smi, R1 = digits
714 __ ldp(R0, R1, Address(SP, 0 * target::kWordSize, Address::PairOffset));
715#if defined(DART_COMPRESSED_POINTERS)
716 __ sxtw(R0, R0);
717#endif
718 __ add(R1, R1, Operand(R0, LSL, 1));
719 __ ldr(R2, FieldAddress(R1, target::TypedData::payload_offset() -
721
722 // R0 = qd = (DIGIT_MASK << 32) | DIGIT_MASK = -1
723 __ movn(R0, Immediate(0), 0);
724
725 // Return qd if dh == yt
726 Label return_qd;
727 __ cmp(R2, Operand(R3));
728 __ b(&return_qd, EQ);
729
730 // R1 = dl = digits[(i >> 1) - 3 .. (i >> 1) - 2]
731 __ ldr(R1, FieldAddress(R1, target::TypedData::payload_offset() -
733
734 // R5 = yth = yt >> 32
735 __ orr(R5, ZR, Operand(R3, LSR, 32));
736
737 // R6 = qh = dh / yth
738 __ udiv(R6, R2, R5);
739
740 // R8:R7 = ph:pl = yt*qh
741 __ mul(R7, R3, R6);
742 __ umulh(R8, R3, R6);
743
744 // R9 = tl = (dh << 32)|(dl >> 32)
745 __ orr(R9, ZR, Operand(R2, LSL, 32));
746 __ orr(R9, R9, Operand(R1, LSR, 32));
747
748 // R10 = th = dh >> 32
749 __ orr(R10, ZR, Operand(R2, LSR, 32));
750
751 // while ((ph > th) || ((ph == th) && (pl > tl)))
752 Label qh_adj_loop, qh_adj, qh_ok;
753 __ Bind(&qh_adj_loop);
754 __ cmp(R8, Operand(R10));
755 __ b(&qh_adj, HI);
756 __ b(&qh_ok, NE);
757 __ cmp(R7, Operand(R9));
758 __ b(&qh_ok, LS);
759
760 __ Bind(&qh_adj);
761 // if (pl < yt) --ph
762 __ sub(TMP, R8, Operand(1)); // TMP = ph - 1
763 __ cmp(R7, Operand(R3));
764 __ csel(R8, TMP, R8, CC); // R8 = R7 < R3 ? TMP : R8
765
766 // pl -= yt
767 __ sub(R7, R7, Operand(R3));
768
769 // --qh
770 __ sub(R6, R6, Operand(1));
771
772 // Continue while loop.
773 __ b(&qh_adj_loop);
774
775 __ Bind(&qh_ok);
776 // R0 = qd = qh << 32
777 __ orr(R0, ZR, Operand(R6, LSL, 32));
778
779 // tl = (pl << 32)
780 __ orr(R9, ZR, Operand(R7, LSL, 32));
781
782 // th = (ph << 32)|(pl >> 32);
783 __ orr(R10, ZR, Operand(R8, LSL, 32));
784 __ orr(R10, R10, Operand(R7, LSR, 32));
785
786 // if (tl > dl) ++th
787 __ add(TMP, R10, Operand(1)); // TMP = th + 1
788 __ cmp(R9, Operand(R1));
789 __ csel(R10, TMP, R10, HI); // R10 = R9 > R1 ? TMP : R10
790
791 // dl -= tl
792 __ sub(R1, R1, Operand(R9));
793
794 // dh -= th
795 __ sub(R2, R2, Operand(R10));
796
797 // R6 = ql = ((dh << 32)|(dl >> 32)) / yth
798 __ orr(R6, ZR, Operand(R2, LSL, 32));
799 __ orr(R6, R6, Operand(R1, LSR, 32));
800 __ udiv(R6, R6, R5);
801
802 // R8:R7 = ph:pl = yt*ql
803 __ mul(R7, R3, R6);
804 __ umulh(R8, R3, R6);
805
806 // while ((ph > dh) || ((ph == dh) && (pl > dl))) {
807 Label ql_adj_loop, ql_adj, ql_ok;
808 __ Bind(&ql_adj_loop);
809 __ cmp(R8, Operand(R2));
810 __ b(&ql_adj, HI);
811 __ b(&ql_ok, NE);
812 __ cmp(R7, Operand(R1));
813 __ b(&ql_ok, LS);
814
815 __ Bind(&ql_adj);
816 // if (pl < yt) --ph
817 __ sub(TMP, R8, Operand(1)); // TMP = ph - 1
818 __ cmp(R7, Operand(R3));
819 __ csel(R8, TMP, R8, CC); // R8 = R7 < R3 ? TMP : R8
820
821 // pl -= yt
822 __ sub(R7, R7, Operand(R3));
823
824 // --ql
825 __ sub(R6, R6, Operand(1));
826
827 // Continue while loop.
828 __ b(&ql_adj_loop);
829
830 __ Bind(&ql_ok);
831 // qd |= ql;
832 __ orr(R0, R0, Operand(R6));
833
834 __ Bind(&return_qd);
835 // args[2..3] = qd
836 __ str(R0, FieldAddress(R4, target::TypedData::payload_offset() +
838
839 __ LoadImmediate(R0, target::ToRawSmi(2)); // Two digits processed.
840 __ ret();
841}
842
843void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
844 Label* normal_ir_body) {
845 // Pseudo code:
846 // static int _mulMod(Uint32List args, Uint32List digits, int i) {
847 // uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3.
848 // uint64_t d = digits[i >> 1 .. (i >> 1) + 1]; // i is Smi and even.
849 // uint128_t t = rho*d;
850 // args[_MU .. _MU_HI] = t mod DIGIT_BASE^2; // _MU == 4, _MU_HI == 5.
851 // return 2;
852 // }
853
854 // R4 = args
855 __ ldr(R4, Address(SP, 2 * target::kWordSize)); // args
856
857 // R3 = rho = args[2..3]
858 __ ldr(R3, FieldAddress(R4, target::TypedData::payload_offset() +
860
861 // R2 = digits[i >> 1 .. (i >> 1) + 1]
862 // R0 = i as Smi, R1 = digits
863 __ ldp(R0, R1, Address(SP, 0 * target::kWordSize, Address::PairOffset));
864#if defined(DART_COMPRESSED_POINTERS)
865 __ sxtw(R0, R0);
866#endif
867 __ add(R1, R1, Operand(R0, LSL, 1));
868 __ ldr(R2, FieldAddress(R1, target::TypedData::payload_offset()));
869
870 // R0 = rho*d mod DIGIT_BASE
871 __ mul(R0, R2, R3); // R0 = low64(R2*R3).
872
873 // args[4 .. 5] = R0
874 __ str(R0, FieldAddress(R4, target::TypedData::payload_offset() +
876
877 __ LoadImmediate(R0, target::ToRawSmi(2)); // Two digits processed.
878 __ ret();
879}
880
881// Check if the last argument is a double, jump to label 'is_smi' if smi
882// (easy to convert to double), otherwise jump to label 'not_double_smi',
883// Returns the last argument in R0.
884static void TestLastArgumentIsDouble(Assembler* assembler,
885 Label* is_smi,
886 Label* not_double_smi) {
887 __ ldr(R0, Address(SP, 0 * target::kWordSize));
888 __ BranchIfSmi(R0, is_smi);
889 __ CompareClassId(R0, kDoubleCid);
890 __ b(not_double_smi, NE);
891 // Fall through with Double in R0.
892}
893
894// Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown
895// type. Return true or false object in the register R0. Any NaN argument
896// returns false. Any non-double arg1 causes control flow to fall through to the
897// slow case (compiled method body).
898static void CompareDoubles(Assembler* assembler,
899 Label* normal_ir_body,
900 Condition true_condition) {
901 Label is_smi, double_op, not_nan;
902
903 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
904 // Both arguments are double, right operand is in R0.
905
906 __ LoadDFieldFromOffset(V1, R0, target::Double::value_offset());
907 __ Bind(&double_op);
908 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left argument.
909 __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
910
911 __ fcmpd(V0, V1);
912 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
913 // Return false if D0 or D1 was NaN before checking true condition.
914 __ b(&not_nan, VC);
915 __ ret();
916 __ Bind(&not_nan);
917 __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
918 __ csel(R0, TMP, R0, true_condition);
919 __ ret();
920
921 __ Bind(&is_smi); // Convert R0 to a double.
922 __ SmiUntag(R0);
923 __ scvtfdx(V1, R0);
924 __ b(&double_op); // Then do the comparison.
925 __ Bind(normal_ir_body);
926}
927
928void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
929 Label* normal_ir_body) {
930 CompareDoubles(assembler, normal_ir_body, HI);
931}
932
933void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
934 Label* normal_ir_body) {
935 CompareDoubles(assembler, normal_ir_body, CS);
936}
937
938void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
939 Label* normal_ir_body) {
940 CompareDoubles(assembler, normal_ir_body, CC);
941}
942
943void AsmIntrinsifier::Double_equal(Assembler* assembler,
944 Label* normal_ir_body) {
945 CompareDoubles(assembler, normal_ir_body, EQ);
946}
947
948void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
949 Label* normal_ir_body) {
950 CompareDoubles(assembler, normal_ir_body, LS);
951}
952
953// Expects left argument to be double (receiver). Right argument is unknown.
954// Both arguments are on stack.
955static void DoubleArithmeticOperations(Assembler* assembler,
956 Label* normal_ir_body,
957 Token::Kind kind) {
958 Label is_smi, double_op;
959
960 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
961 // Both arguments are double, right operand is in R0.
962 __ LoadDFieldFromOffset(V1, R0, target::Double::value_offset());
963 __ Bind(&double_op);
964 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left argument.
965 __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
966 switch (kind) {
967 case Token::kADD:
968 __ faddd(V0, V0, V1);
969 break;
970 case Token::kSUB:
971 __ fsubd(V0, V0, V1);
972 break;
973 case Token::kMUL:
974 __ fmuld(V0, V0, V1);
975 break;
976 case Token::kDIV:
977 __ fdivd(V0, V0, V1);
978 break;
979 default:
980 UNREACHABLE();
981 }
982 const Class& double_class = DoubleClass();
983 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, R0, R1);
984 __ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
985 __ ret();
986
987 __ Bind(&is_smi); // Convert R0 to a double.
988 __ SmiUntag(R0);
989 __ scvtfdx(V1, R0);
990 __ b(&double_op);
991
992 __ Bind(normal_ir_body);
993}
994
995void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
996 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
997}
998
999void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
1000 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
1001}
1002
1003void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
1004 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
1005}
1006
1007void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
1008 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
1009}
1010
1011// Left is double, right is integer (Mint or Smi)
1012void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
1013 Label* normal_ir_body) {
1014 // Only smis allowed.
1015 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1016 __ BranchIfNotSmi(R0, normal_ir_body);
1017 // Is Smi.
1018 __ SmiUntag(R0);
1019 __ scvtfdx(V1, R0);
1020 __ ldr(R0, Address(SP, 1 * target::kWordSize));
1021 __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
1022 __ fmuld(V0, V0, V1);
1023 const Class& double_class = DoubleClass();
1024 __ TryAllocate(double_class, normal_ir_body, Assembler::kNearJump, R0, R1);
1025 __ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
1026 __ ret();
1027 __ Bind(normal_ir_body);
1028}
1029
1030void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
1031 Label* normal_ir_body) {
1032 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1033 __ BranchIfNotSmi(R0, normal_ir_body);
1034 // Is Smi.
1035 __ SmiUntag(R0);
1036#if !defined(DART_COMPRESSED_POINTERS)
1037 __ scvtfdx(V0, R0);
1038#else
1039 __ scvtfdw(V0, R0);
1040#endif
1041 const Class& double_class = DoubleClass();
1042 __ TryAllocate(double_class, normal_ir_body, Assembler::kNearJump, R0, R1);
1043 __ StoreDFieldToOffset(V0, R0, target::Double::value_offset());
1044 __ ret();
1045 __ Bind(normal_ir_body);
1046}
1047
1048void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
1049 Label* normal_ir_body) {
1050 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1051 __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
1052 __ fcmpd(V0, V0);
1053 __ LoadObject(TMP, CastHandle<Object>(FalseObject()));
1054 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1055 __ csel(R0, TMP, R0, VC);
1056 __ ret();
1057}
1058
1059void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
1060 Label* normal_ir_body) {
1061 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1062 __ LoadFieldFromOffset(R0, R0, target::Double::value_offset());
1063 // Mask off the sign.
1064 __ AndImmediate(R0, R0, 0x7FFFFFFFFFFFFFFFLL);
1065 // Compare with +infinity.
1066 __ CompareImmediate(R0, 0x7FF0000000000000LL);
1067 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1068 __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
1069 __ csel(R0, TMP, R0, EQ);
1070 __ ret();
1071}
1072
1073void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
1074 Label* normal_ir_body) {
1075 const Register false_reg = R0;
1076 const Register true_reg = R2;
1077 Label is_false, is_true, is_zero;
1078
1079 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1080 __ LoadDFieldFromOffset(V0, R0, target::Double::value_offset());
1081 __ fcmpdz(V0);
1082 __ LoadObject(true_reg, CastHandle<Object>(TrueObject()));
1083 __ LoadObject(false_reg, CastHandle<Object>(FalseObject()));
1084 __ b(&is_false, VS); // NaN -> false.
1085 __ b(&is_zero, EQ); // Check for negative zero.
1086 __ b(&is_false, CS); // >= 0 -> false.
1087
1088 __ Bind(&is_true);
1089 __ mov(R0, true_reg);
1090
1091 __ Bind(&is_false);
1092 __ ret();
1093
1094 __ Bind(&is_zero);
1095 // Check for negative zero by looking at the sign bit.
1096 __ fmovrd(R1, V0);
1097 __ LsrImmediate(R1, R1, 63);
1098 __ tsti(R1, Immediate(1));
1099 __ csel(R0, true_reg, false_reg, NE); // Sign bit set.
1100 __ ret();
1101}
1102
1103void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
1104 Label* normal_ir_body) {
1105 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1106 __ ldr(R1, Address(SP, 1 * target::kWordSize));
1107 __ CompareObjectRegisters(R0, R1);
1108 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1109 __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
1110 __ csel(R0, TMP, R0, EQ);
1111 __ ret();
1112}
1113
1114static void JumpIfInteger(Assembler* assembler,
1115 Register cid,
1116 Register tmp,
1117 Label* target) {
1118 assembler->RangeCheck(cid, tmp, kSmiCid, kMintCid, Assembler::kIfInRange,
1119 target);
1120}
1121
1122static void JumpIfNotInteger(Assembler* assembler,
1123 Register cid,
1124 Register tmp,
1125 Label* target) {
1126 assembler->RangeCheck(cid, tmp, kSmiCid, kMintCid, Assembler::kIfNotInRange,
1127 target);
1128}
1129
1130static void JumpIfString(Assembler* assembler,
1131 Register cid,
1132 Register tmp,
1133 Label* target) {
1134 assembler->RangeCheck(cid, tmp, kOneByteStringCid, kTwoByteStringCid,
1135 Assembler::kIfInRange, target);
1136}
1137
1138static void JumpIfNotString(Assembler* assembler,
1139 Register cid,
1140 Register tmp,
1141 Label* target) {
1142 assembler->RangeCheck(cid, tmp, kOneByteStringCid, kTwoByteStringCid,
1143 Assembler::kIfNotInRange, target);
1144}
1145
1146static void JumpIfNotList(Assembler* assembler,
1147 Register cid,
1148 Register tmp,
1149 Label* target) {
1150 assembler->RangeCheck(cid, tmp, kArrayCid, kGrowableObjectArrayCid,
1151 Assembler::kIfNotInRange, target);
1152}
1153
1154static void JumpIfType(Assembler* assembler,
1155 Register cid,
1156 Register tmp,
1157 Label* target) {
1158 COMPILE_ASSERT((kFunctionTypeCid == kTypeCid + 1) &&
1159 (kRecordTypeCid == kTypeCid + 2));
1160 assembler->RangeCheck(cid, tmp, kTypeCid, kRecordTypeCid,
1161 Assembler::kIfInRange, target);
1162}
1163
1164static void JumpIfNotType(Assembler* assembler,
1165 Register cid,
1166 Register tmp,
1167 Label* target) {
1168 COMPILE_ASSERT((kFunctionTypeCid == kTypeCid + 1) &&
1169 (kRecordTypeCid == kTypeCid + 2));
1170 assembler->RangeCheck(cid, tmp, kTypeCid, kRecordTypeCid,
1171 Assembler::kIfNotInRange, target);
1172}
1173
1174// Return type quickly for simple types (not parameterized and not signature).
1175void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
1176 Label* normal_ir_body) {
1177 Label use_declaration_type, not_double, not_integer, not_string;
1178 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1179 __ LoadClassIdMayBeSmi(R1, R0);
1180
1181 __ CompareImmediate(R1, kClosureCid);
1182 __ b(normal_ir_body, EQ); // Instance is a closure.
1183
1184 __ CompareImmediate(R1, kRecordCid);
1185 __ b(normal_ir_body, EQ); // Instance is a record.
1186
1187 __ CompareImmediate(R1, kNumPredefinedCids);
1188 __ b(&use_declaration_type, HI);
1189
1190 __ LoadIsolateGroup(R2);
1191 __ LoadFromOffset(R2, R2, target::IsolateGroup::object_store_offset());
1192
1193 __ CompareImmediate(R1, kDoubleCid);
1194 __ b(&not_double, NE);
1195 __ LoadFromOffset(R0, R2, target::ObjectStore::double_type_offset());
1196 __ ret();
1197
1198 __ Bind(&not_double);
1199 JumpIfNotInteger(assembler, R1, R0, &not_integer);
1200 __ LoadFromOffset(R0, R2, target::ObjectStore::int_type_offset());
1201 __ ret();
1202
1203 __ Bind(&not_integer);
1204 JumpIfNotString(assembler, R1, R0, &not_string);
1205 __ LoadFromOffset(R0, R2, target::ObjectStore::string_type_offset());
1206 __ ret();
1207
1208 __ Bind(&not_string);
1209 JumpIfNotType(assembler, R1, R0, &use_declaration_type);
1210 __ LoadFromOffset(R0, R2, target::ObjectStore::type_type_offset());
1211 __ ret();
1212
1213 __ Bind(&use_declaration_type);
1214 __ LoadClassById(R2, R1);
1215 __ ldr(R3, FieldAddress(R2, target::Class::num_type_arguments_offset()),
1216 kTwoBytes);
1217 __ cbnz(normal_ir_body, R3);
1218
1219 __ LoadCompressed(R0,
1220 FieldAddress(R2, target::Class::declaration_type_offset()));
1221 __ CompareObject(R0, NullObject());
1222 __ b(normal_ir_body, EQ);
1223 __ ret();
1224
1225 __ Bind(normal_ir_body);
1226}
1227
1228// Compares cid1 and cid2 to see if they're syntactically equivalent. If this
1229// can be determined by this fast path, it jumps to either equal_* or not_equal.
1230// If classes are equivalent but may be generic, then jumps to
1231// equal_may_be_generic. Clobbers scratch.
1232static void EquivalentClassIds(Assembler* assembler,
1233 Label* normal_ir_body,
1234 Label* equal_may_be_generic,
1235 Label* equal_not_generic,
1236 Label* not_equal,
1237 Register cid1,
1238 Register cid2,
1239 Register scratch,
1240 bool testing_instance_cids) {
1241 Label not_integer, not_integer_or_string, not_integer_or_string_or_list;
1242
1243 // Check if left hand side is a closure. Closures are handled in the runtime.
1244 __ CompareImmediate(cid1, kClosureCid);
1245 __ b(normal_ir_body, EQ);
1246
1247 // Check if left hand side is a record. Records are handled in the runtime.
1248 __ CompareImmediate(cid1, kRecordCid);
1249 __ b(normal_ir_body, EQ);
1250
1251 // Check whether class ids match. If class ids don't match types may still be
1252 // considered equivalent (e.g. multiple string implementation classes map to a
1253 // single String type).
1254 __ cmp(cid1, Operand(cid2));
1255 __ b(equal_may_be_generic, EQ);
1256
1257 // Class ids are different. Check if we are comparing two string types (with
1258 // different representations), two integer types, two list types or two type
1259 // types.
1260 __ CompareImmediate(cid1, kNumPredefinedCids);
1261 __ b(not_equal, HI);
1262
1263 // Check if both are integer types.
1264 JumpIfNotInteger(assembler, cid1, scratch, &not_integer);
1265
1266 // First type is an integer. Check if the second is an integer too.
1267 JumpIfInteger(assembler, cid2, scratch, equal_not_generic);
1268 // Integer types are only equivalent to other integer types.
1269 __ b(not_equal);
1270
1271 __ Bind(&not_integer);
1272 // Check if both are String types.
1273 JumpIfNotString(assembler, cid1, scratch,
1274 testing_instance_cids ? &not_integer_or_string : not_equal);
1275
1276 // First type is String. Check if the second is a string too.
1277 JumpIfString(assembler, cid2, scratch, equal_not_generic);
1278 // String types are only equivalent to other String types.
1279 __ b(not_equal);
1280
1281 if (testing_instance_cids) {
1282 __ Bind(&not_integer_or_string);
1283 // Check if both are List types.
1284 JumpIfNotList(assembler, cid1, scratch, &not_integer_or_string_or_list);
1285
1286 // First type is a List. Check if the second is a List too.
1287 JumpIfNotList(assembler, cid2, scratch, not_equal);
1288 ASSERT(compiler::target::Array::type_arguments_offset() ==
1289 compiler::target::GrowableObjectArray::type_arguments_offset());
1290 __ b(equal_may_be_generic);
1291
1292 __ Bind(&not_integer_or_string_or_list);
1293 // Check if the first type is a Type. If it is not then types are not
1294 // equivalent because they have different class ids and they are not String
1295 // or integer or List or Type.
1296 JumpIfNotType(assembler, cid1, scratch, not_equal);
1297
1298 // First type is a Type. Check if the second is a Type too.
1299 JumpIfType(assembler, cid2, scratch, equal_not_generic);
1300 // Type types are only equivalent to other Type types.
1301 __ b(not_equal);
1302 }
1303}
1304
1305void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
1306 Label* normal_ir_body) {
1307 __ ldp(R0, R1, Address(SP, 0 * target::kWordSize, Address::PairOffset));
1308 __ LoadClassIdMayBeSmi(R2, R1);
1309 __ LoadClassIdMayBeSmi(R1, R0);
1310
1311 Label equal_may_be_generic, equal, not_equal;
1312 EquivalentClassIds(assembler, normal_ir_body, &equal_may_be_generic, &equal,
1313 &not_equal, R1, R2, R0,
1314 /* testing_instance_cids = */ true);
1315
1316 __ Bind(&equal_may_be_generic);
1317 // Classes are equivalent and neither is a closure class.
1318 // Check if there are no type arguments. In this case we can return true.
1319 // Otherwise fall through into the runtime to handle comparison.
1320 __ LoadClassById(R0, R1);
1321 __ ldr(R0,
1322 FieldAddress(
1323 R0,
1324 target::Class::host_type_arguments_field_offset_in_words_offset()),
1325 kFourBytes);
1326 __ CompareImmediate(R0, target::Class::kNoTypeArguments);
1327 __ b(&equal, EQ);
1328
1329 // Compare type arguments, host_type_arguments_field_offset_in_words in R0.
1330 __ ldp(R1, R2, Address(SP, 0 * target::kWordSize, Address::PairOffset));
1331 __ AddImmediate(R1, -kHeapObjectTag);
1332 __ ldr(R1, Address(R1, R0, UXTX, Address::Scaled), kObjectBytes);
1333 __ AddImmediate(R2, -kHeapObjectTag);
1334 __ ldr(R2, Address(R2, R0, UXTX, Address::Scaled), kObjectBytes);
1335 __ CompareObjectRegisters(R1, R2);
1336 __ b(normal_ir_body, NE);
1337 // Fall through to equal case if type arguments are equal.
1338
1339 __ Bind(&equal);
1340 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1341 __ Ret();
1342
1343 __ Bind(&not_equal);
1344 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1345 __ ret();
1346
1347 __ Bind(normal_ir_body);
1348}
1349
1350void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
1351 Label* normal_ir_body) {
1352 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1353 __ ldr(R0, FieldAddress(R0, target::String::hash_offset()),
1355 __ adds(R0, R0, Operand(R0)); // Smi tag the hash code, setting Z flag.
1356 __ b(normal_ir_body, EQ);
1357 __ ret();
1358 // Hash not yet computed.
1359 __ Bind(normal_ir_body);
1360}
1361
1362void AsmIntrinsifier::Type_equality(Assembler* assembler,
1363 Label* normal_ir_body) {
1364 Label equal, not_equal, equiv_cids_may_be_generic, equiv_cids, check_legacy;
1365
1366 __ ldp(R1, R2, Address(SP, 0 * target::kWordSize, Address::PairOffset));
1367 __ CompareObjectRegisters(R1, R2);
1368 __ b(&equal, EQ);
1369
1370 // R1 might not be a Type object, so check that first (R2 should be though,
1371 // since this is a method on the Type class).
1372 __ LoadClassIdMayBeSmi(R0, R1);
1373 __ CompareImmediate(R0, kTypeCid);
1374 __ b(normal_ir_body, NE);
1375
1376 // Check if types are syntactically equal.
1377 __ LoadTypeClassId(R3, R1);
1378 __ LoadTypeClassId(R4, R2);
1379 // We are not testing instance cids, but type class cids of Type instances.
1380 EquivalentClassIds(assembler, normal_ir_body, &equiv_cids_may_be_generic,
1381 &equiv_cids, &not_equal, R3, R4, R0,
1382 /* testing_instance_cids = */ false);
1383
1384 __ Bind(&equiv_cids_may_be_generic);
1385 // Compare type arguments in Type instances.
1386 __ LoadCompressed(R3, FieldAddress(R1, target::Type::arguments_offset()));
1387 __ LoadCompressed(R4, FieldAddress(R2, target::Type::arguments_offset()));
1388 __ CompareObjectRegisters(R3, R4);
1389 __ b(normal_ir_body, NE);
1390 // Fall through to check nullability if type arguments are equal.
1391
1392 // Check nullability.
1393 __ Bind(&equiv_cids);
1394 __ LoadAbstractTypeNullability(R1, R1);
1395 __ LoadAbstractTypeNullability(R2, R2);
1396 __ cmp(R1, Operand(R2));
1397 __ b(&check_legacy, NE);
1398 // Fall through to equal case if nullability is strictly equal.
1399
1400 __ Bind(&equal);
1401 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1402 __ ret();
1403
1404 // At this point the nullabilities are different, so they can only be
1405 // syntactically equivalent if they're both either kNonNullable or kLegacy.
1406 // These are the two largest values of the enum, so we can just do a < check.
1407 ASSERT(target::Nullability::kNullable < target::Nullability::kNonNullable &&
1408 target::Nullability::kNonNullable < target::Nullability::kLegacy);
1409 __ Bind(&check_legacy);
1410 __ CompareImmediate(R1, target::Nullability::kNonNullable);
1411 __ b(&not_equal, LT);
1412 __ CompareImmediate(R2, target::Nullability::kNonNullable);
1413 __ b(&equal, GE);
1414
1415 __ Bind(&not_equal);
1416 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1417 __ ret();
1418
1419 __ Bind(normal_ir_body);
1420}
1421
1422void AsmIntrinsifier::AbstractType_getHashCode(Assembler* assembler,
1423 Label* normal_ir_body) {
1424 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1425 __ LoadCompressedSmi(R0,
1426 FieldAddress(R0, target::AbstractType::hash_offset()));
1427 __ cbz(normal_ir_body, R0, kObjectBytes);
1428 __ ret();
1429 // Hash not yet computed.
1430 __ Bind(normal_ir_body);
1431}
1432
1433void AsmIntrinsifier::AbstractType_equality(Assembler* assembler,
1434 Label* normal_ir_body) {
1435 __ ldp(R1, R2, Address(SP, 0 * target::kWordSize, Address::PairOffset));
1436 __ CompareObjectRegisters(R1, R2);
1437 __ b(normal_ir_body, NE);
1438
1439 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1440 __ ret();
1441
1442 __ Bind(normal_ir_body);
1443}
1444
1445// Keep in sync with Instance::IdentityHashCode.
1446// Note int and double never reach here because they override _identityHashCode.
1447// Special cases are also not needed for null or bool because they were pre-set
1448// during VM isolate finalization.
1449void AsmIntrinsifier::Object_getHash(Assembler* assembler,
1450 Label* normal_ir_body) {
1451 Label not_yet_computed;
1452 __ ldr(R0, Address(SP, 0 * target::kWordSize)); // Object.
1453 __ ldr(
1454 R0,
1455 FieldAddress(R0, target::Object::tags_offset() +
1456 target::UntaggedObject::kHashTagPos / kBitsPerByte),
1458 __ cbz(&not_yet_computed, R0);
1459 __ SmiTag(R0);
1460 __ ret();
1461
1462 __ Bind(&not_yet_computed);
1463 __ LoadFromOffset(R1, THR, target::Thread::random_offset());
1464 __ AndImmediate(R2, R1, 0xffffffff); // state_lo
1465 __ LsrImmediate(R3, R1, 32); // state_hi
1466 __ LoadImmediate(R1, 0xffffda61); // A
1467 __ mul(R1, R1, R2);
1468 __ add(R1, R1, Operand(R3)); // new_state = (A * state_lo) + state_hi
1469 __ StoreToOffset(R1, THR, target::Thread::random_offset());
1470 __ AndImmediate(R1, R1, 0x3fffffff);
1471 __ cbz(&not_yet_computed, R1);
1472
1473 __ ldr(R0, Address(SP, 0 * target::kWordSize)); // Object.
1474 __ sub(R0, R0, Operand(kHeapObjectTag));
1475 __ LslImmediate(R3, R1, target::UntaggedObject::kHashTagPos);
1476
1477 Label retry, already_set_in_r4;
1478 __ Bind(&retry);
1479 __ ldxr(R2, R0, kEightBytes);
1480 __ LsrImmediate(R4, R2, target::UntaggedObject::kHashTagPos);
1481 __ cbnz(&already_set_in_r4, R4);
1482 __ orr(R2, R2, Operand(R3));
1483 __ stxr(R4, R2, R0, kEightBytes);
1484 __ cbnz(&retry, R4);
1485 // Fall-through with R1 containing new hash value (untagged).
1486 __ SmiTag(R0, R1);
1487 __ ret();
1488 __ Bind(&already_set_in_r4);
1489 __ clrex();
1490 __ SmiTag(R0, R4);
1491 __ ret();
1492}
1493
1494void GenerateSubstringMatchesSpecialization(Assembler* assembler,
1495 intptr_t receiver_cid,
1496 intptr_t other_cid,
1497 Label* return_true,
1498 Label* return_false) {
1499 __ SmiUntag(R1);
1500 __ LoadCompressedSmi(
1501 R8, FieldAddress(R0, target::String::length_offset())); // this.length
1502 __ SmiUntag(R8);
1503 __ LoadCompressedSmi(
1504 R9, FieldAddress(R2, target::String::length_offset())); // other.length
1505 __ SmiUntag(R9);
1506
1507 // if (other.length == 0) return true;
1508 __ cmp(R9, Operand(0));
1509 __ b(return_true, EQ);
1510
1511 // if (start < 0) return false;
1512 __ cmp(R1, Operand(0));
1513 __ b(return_false, LT);
1514
1515 // if (start + other.length > this.length) return false;
1516 __ add(R3, R1, Operand(R9));
1517 __ cmp(R3, Operand(R8));
1518 __ b(return_false, GT);
1519
1520 if (receiver_cid == kOneByteStringCid) {
1521 __ AddImmediate(R0, target::OneByteString::data_offset() - kHeapObjectTag);
1522 __ add(R0, R0, Operand(R1));
1523 } else {
1524 ASSERT(receiver_cid == kTwoByteStringCid);
1525 __ AddImmediate(R0, target::TwoByteString::data_offset() - kHeapObjectTag);
1526 __ add(R0, R0, Operand(R1));
1527 __ add(R0, R0, Operand(R1));
1528 }
1529 if (other_cid == kOneByteStringCid) {
1530 __ AddImmediate(R2, target::OneByteString::data_offset() - kHeapObjectTag);
1531 } else {
1532 ASSERT(other_cid == kTwoByteStringCid);
1533 __ AddImmediate(R2, target::TwoByteString::data_offset() - kHeapObjectTag);
1534 }
1535
1536 // i = 0
1537 __ LoadImmediate(R3, 0);
1538
1539 // do
1540 Label loop;
1541 __ Bind(&loop);
1542
1543 // this.codeUnitAt(i + start)
1544 __ ldr(R10, Address(R0, 0),
1545 receiver_cid == kOneByteStringCid ? kUnsignedByte : kUnsignedTwoBytes);
1546 // other.codeUnitAt(i)
1547 __ ldr(R11, Address(R2, 0),
1548 other_cid == kOneByteStringCid ? kUnsignedByte : kUnsignedTwoBytes);
1549 __ cmp(R10, Operand(R11));
1550 __ b(return_false, NE);
1551
1552 // i++, while (i < len)
1553 __ add(R3, R3, Operand(1));
1554 __ add(R0, R0, Operand(receiver_cid == kOneByteStringCid ? 1 : 2));
1555 __ add(R2, R2, Operand(other_cid == kOneByteStringCid ? 1 : 2));
1556 __ cmp(R3, Operand(R9));
1557 __ b(&loop, LT);
1558
1559 __ b(return_true);
1560}
1561
1562// bool _substringMatches(int start, String other)
1563// This intrinsic handles a OneByteString or TwoByteString receiver with a
1564// OneByteString other.
1565void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
1566 Label* normal_ir_body) {
1567 Label return_true, return_false, try_two_byte;
1568 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // this
1569 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // start
1570 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // other
1571
1572 __ BranchIfNotSmi(R1, normal_ir_body);
1573
1574 __ CompareClassId(R2, kOneByteStringCid);
1575 __ b(normal_ir_body, NE);
1576
1577 __ CompareClassId(R0, kOneByteStringCid);
1578 __ b(normal_ir_body, NE);
1579
1580 GenerateSubstringMatchesSpecialization(assembler, kOneByteStringCid,
1581 kOneByteStringCid, &return_true,
1582 &return_false);
1583
1584 __ Bind(&try_two_byte);
1585 __ CompareClassId(R0, kTwoByteStringCid);
1586 __ b(normal_ir_body, NE);
1587
1588 GenerateSubstringMatchesSpecialization(assembler, kTwoByteStringCid,
1589 kOneByteStringCid, &return_true,
1590 &return_false);
1591
1592 __ Bind(&return_true);
1593 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1594 __ ret();
1595
1596 __ Bind(&return_false);
1597 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1598 __ ret();
1599
1600 __ Bind(normal_ir_body);
1601}
1602
1603void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
1604 Label* normal_ir_body) {
1605 Label try_two_byte_string;
1606
1607 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Index.
1608 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // String.
1609 __ BranchIfNotSmi(R1, normal_ir_body); // Index is not a Smi.
1610 // Range check.
1611 __ LoadCompressedSmi(R2, FieldAddress(R0, target::String::length_offset()));
1612 __ cmp(R1, Operand(R2));
1613 __ b(normal_ir_body, CS); // Runtime throws exception.
1614
1615 __ CompareClassId(R0, kOneByteStringCid);
1616 __ b(&try_two_byte_string, NE);
1617 __ SmiUntag(R1);
1618 __ AddImmediate(R0, target::OneByteString::data_offset() - kHeapObjectTag);
1619 __ ldr(R1, Address(R0, R1), kUnsignedByte);
1620 __ CompareImmediate(R1, target::Symbols::kNumberOfOneCharCodeSymbols);
1621 __ b(normal_ir_body, GE);
1622 __ ldr(R0, Address(THR, target::Thread::predefined_symbols_address_offset()));
1623 __ AddImmediate(
1624 R0, target::Symbols::kNullCharCodeSymbolOffset * target::kWordSize);
1625 __ ldr(R0, Address(R0, R1, UXTX, Address::Scaled));
1626 __ ret();
1627
1628 __ Bind(&try_two_byte_string);
1629 __ CompareClassId(R0, kTwoByteStringCid);
1630 __ b(normal_ir_body, NE);
1631 ASSERT(kSmiTagShift == 1);
1632 __ AddImmediate(R0, target::TwoByteString::data_offset() - kHeapObjectTag);
1633#if !defined(DART_COMPRESSED_POINTERS)
1634 __ ldr(R1, Address(R0, R1), kUnsignedTwoBytes);
1635#else
1636 // Upper half of a compressed Smi is garbage.
1637 __ ldr(R1, Address(R0, R1, SXTW, Address::Unscaled), kUnsignedTwoBytes);
1638#endif
1639 __ CompareImmediate(R1, target::Symbols::kNumberOfOneCharCodeSymbols);
1640 __ b(normal_ir_body, GE);
1641 __ ldr(R0, Address(THR, target::Thread::predefined_symbols_address_offset()));
1642 __ AddImmediate(
1643 R0, target::Symbols::kNullCharCodeSymbolOffset * target::kWordSize);
1644 __ ldr(R0, Address(R0, R1, UXTX, Address::Scaled));
1645 __ ret();
1646
1647 __ Bind(normal_ir_body);
1648}
1649
1650void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
1651 Label* normal_ir_body) {
1652 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1653 __ LoadCompressedSmi(R0, FieldAddress(R0, target::String::length_offset()));
1654 __ cmp(R0, Operand(target::ToRawSmi(0)), kObjectBytes);
1655 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1656 __ LoadObject(TMP, CastHandle<Object>(FalseObject()));
1657 __ csel(R0, TMP, R0, NE);
1658 __ ret();
1659}
1660
1661void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
1662 Label* normal_ir_body) {
1663 Label compute_hash;
1664 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // OneByteString object.
1665 __ ldr(R0, FieldAddress(R1, target::String::hash_offset()),
1667 __ adds(R0, R0, Operand(R0)); // Smi tag the hash code, setting Z flag.
1668 __ b(&compute_hash, EQ);
1669 __ ret(); // Return if already computed.
1670
1671 __ Bind(&compute_hash);
1672 __ LoadCompressedSmi(R2, FieldAddress(R1, target::String::length_offset()));
1673 __ SmiUntag(R2);
1674
1675 __ mov(R3, ZR);
1676 __ AddImmediate(R6, R1,
1677 target::OneByteString::data_offset() - kHeapObjectTag);
1678 // R1: Instance of OneByteString.
1679 // R2: String length, untagged integer.
1680 // R3: Loop counter, untagged integer.
1681 // R6: String data.
1682 // R0: Hash code, untagged integer.
1683
1684 Label loop, done;
1685 __ Bind(&loop);
1686 __ cmp(R3, Operand(R2));
1687 __ b(&done, EQ);
1688 // Add to hash code: (hash_ is uint32)
1689 // Get one characters (ch).
1690 __ ldr(R7, Address(R6, R3), kUnsignedByte);
1691 // R7: ch.
1692 __ add(R3, R3, Operand(1));
1694 __ cmp(R3, Operand(R2));
1695 __ b(&loop);
1696
1697 __ Bind(&done);
1698 // Finalize. Allow a zero result to combine checks from empty string branch.
1699 __ FinalizeHashForSize(target::String::kHashBits, R0);
1700
1701 // R1: Untagged address of header word (ldxr/stxr do not support offsets).
1702 __ sub(R1, R1, Operand(kHeapObjectTag));
1703 __ LslImmediate(R0, R0, target::UntaggedObject::kHashTagPos);
1704 Label retry;
1705 __ Bind(&retry);
1706 __ ldxr(R2, R1, kEightBytes);
1707 __ orr(R2, R2, Operand(R0));
1708 __ stxr(R4, R2, R1, kEightBytes);
1709 __ cbnz(&retry, R4);
1710
1711 __ LsrImmediate(R0, R0, target::UntaggedObject::kHashTagPos);
1712 __ SmiTag(R0);
1713 __ ret();
1714}
1715
1716// Allocates a _OneByteString or _TwoByteString. The content is not initialized.
1717// 'length-reg' (R2) contains the desired length as a _Smi or _Mint.
1718// Returns new string as tagged pointer in R0.
1719static void TryAllocateString(Assembler* assembler,
1720 classid_t cid,
1721 intptr_t max_elements,
1722 Label* ok,
1723 Label* failure) {
1724 ASSERT(cid == kOneByteStringCid || cid == kTwoByteStringCid);
1725 const Register length_reg = R2;
1726 // _Mint length: call to runtime to produce error.
1727 __ BranchIfNotSmi(length_reg, failure);
1728 // negative length: call to runtime to produce error.
1729 // Too big: call to runtime to allocate old.
1730 __ CompareImmediate(length_reg, target::ToRawSmi(max_elements), kObjectBytes);
1731 __ b(failure, HI);
1732
1733 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, failure, R0));
1734 __ mov(R6, length_reg); // Save the length register.
1735 if (cid == kOneByteStringCid) {
1736 // Untag length.
1737 __ SmiUntag(length_reg, length_reg);
1738 } else {
1739 // Untag length and multiply by element size -> no-op.
1740 ASSERT(kSmiTagSize == 1);
1741 }
1742 const intptr_t fixed_size_plus_alignment_padding =
1743 target::String::InstanceSize() +
1745 __ AddImmediate(length_reg, fixed_size_plus_alignment_padding);
1746 __ andi(length_reg, length_reg,
1748
1749 __ ldr(R0, Address(THR, target::Thread::top_offset()));
1750
1751 // length_reg: allocation size.
1752 __ adds(R1, R0, Operand(length_reg));
1753 __ b(failure, CS); // Fail on unsigned overflow.
1754
1755 // Check if the allocation fits into the remaining space.
1756 // R0: potential new object start.
1757 // R1: potential next object start.
1758 // R2: allocation size.
1759 __ ldr(R7, Address(THR, target::Thread::end_offset()));
1760 __ cmp(R1, Operand(R7));
1761 __ b(failure, CS);
1762 __ CheckAllocationCanary(R0);
1763
1764 // Successfully allocated the object(s), now update top to point to
1765 // next object start and initialize the object.
1766 __ str(R1, Address(THR, target::Thread::top_offset()));
1767 __ AddImmediate(R0, kHeapObjectTag);
1768 // Clear last double word to ensure string comparison doesn't need to
1769 // specially handle remainder of strings with lengths not factors of double
1770 // offsets.
1771 __ stp(ZR, ZR, Address(R1, -2 * target::kWordSize, Address::PairOffset));
1772
1773 // Initialize the tags.
1774 // R0: new object start as a tagged pointer.
1775 // R1: new object end address.
1776 // R2: allocation size.
1777 {
1778 const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
1780
1781 __ CompareImmediate(R2, target::UntaggedObject::kSizeTagMaxSizeTag);
1782 __ LslImmediate(R2, R2, shift);
1783 __ csel(R2, R2, ZR, LS);
1784
1785 // Get the class index and insert it into the tags.
1786 // R2: size and bit tags.
1787 // This also clears the hash, which is in the high word of the tags.
1788 const uword tags =
1789 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
1790 __ LoadImmediate(TMP, tags);
1791 __ orr(R2, R2, Operand(TMP));
1792 __ str(R2, FieldAddress(R0, target::Object::tags_offset())); // Store tags.
1793 }
1794
1795#if DART_COMPRESSED_POINTERS
1796 // Clear out padding caused by alignment gap between length and data.
1797 __ str(ZR, FieldAddress(R0, target::String::length_offset()));
1798#endif
1799 // Set the length field using the saved length (R6).
1800 __ StoreCompressedIntoObjectNoBarrier(
1801 R0, FieldAddress(R0, target::String::length_offset()), R6);
1802 __ b(ok);
1803}
1804
1805// Arg0: OneByteString (receiver).
1806// Arg1: Start index as Smi.
1807// Arg2: End index as Smi.
1808// The indexes must be valid.
1809void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
1810 Label* normal_ir_body) {
1811 const intptr_t kStringOffset = 2 * target::kWordSize;
1812 const intptr_t kStartIndexOffset = 1 * target::kWordSize;
1813 const intptr_t kEndIndexOffset = 0 * target::kWordSize;
1814 Label ok;
1815
1816 __ ldr(R2, Address(SP, kEndIndexOffset));
1817 __ ldr(TMP, Address(SP, kStartIndexOffset));
1818 __ orr(R3, R2, Operand(TMP));
1819 __ BranchIfNotSmi(R3, normal_ir_body); // 'start', 'end' not Smi.
1820
1821 __ sub(R2, R2, Operand(TMP));
1822 TryAllocateString(assembler, kOneByteStringCid,
1823 target::OneByteString::kMaxNewSpaceElements, &ok,
1824 normal_ir_body);
1825 __ Bind(&ok);
1826 // R0: new string as tagged pointer.
1827 // Copy string.
1828 __ ldr(R3, Address(SP, kStringOffset));
1829 __ ldr(R1, Address(SP, kStartIndexOffset));
1830 __ SmiUntag(R1);
1831 __ add(R3, R3, Operand(R1));
1832 // Calculate start address and untag (- 1).
1833 __ AddImmediate(R3, target::OneByteString::data_offset() - 1);
1834
1835 // R3: Start address to copy from (untagged).
1836 // R1: Untagged start index.
1837 __ ldr(R2, Address(SP, kEndIndexOffset));
1838 __ SmiUntag(R2);
1839 __ sub(R2, R2, Operand(R1));
1840
1841 // R3: Start address to copy from (untagged).
1842 // R2: Untagged number of bytes to copy.
1843 // R0: Tagged result string.
1844 // R6: Pointer into R3.
1845 // R7: Pointer into R0.
1846 // R1: Scratch register.
1847 Label loop, done;
1848 __ cmp(R2, Operand(0));
1849 __ b(&done, LE);
1850 __ mov(R6, R3);
1851 __ mov(R7, R0);
1852 __ Bind(&loop);
1853 __ ldr(R1, Address(R6), kUnsignedByte);
1854 __ AddImmediate(R6, 1);
1855 __ sub(R2, R2, Operand(1));
1856 __ cmp(R2, Operand(0));
1857 __ str(R1, FieldAddress(R7, target::OneByteString::data_offset()),
1859 __ AddImmediate(R7, 1);
1860 __ b(&loop, GT);
1861
1862 __ Bind(&done);
1863 __ ret();
1864 __ Bind(normal_ir_body);
1865}
1866
1867void AsmIntrinsifier::WriteIntoOneByteString(Assembler* assembler,
1868 Label* normal_ir_body) {
1869 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Value.
1870 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // Index.
1871 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // OneByteString.
1872 __ SmiUntag(R1);
1873 __ SmiUntag(R2);
1874 __ AddImmediate(R3, R0,
1875 target::OneByteString::data_offset() - kHeapObjectTag);
1876 __ str(R2, Address(R3, R1), kUnsignedByte);
1877 __ ret();
1878}
1879
1880void AsmIntrinsifier::WriteIntoTwoByteString(Assembler* assembler,
1881 Label* normal_ir_body) {
1882 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Value.
1883 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // Index.
1884 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // TwoByteString.
1885 // Untag index and multiply by element size -> no-op.
1886 __ SmiUntag(R2);
1887 __ AddImmediate(R3, R0,
1888 target::TwoByteString::data_offset() - kHeapObjectTag);
1889#if !defined(DART_COMPRESSED_POINTERS)
1890 __ str(R2, Address(R3, R1), kUnsignedTwoBytes);
1891#else
1892 // Upper half of a compressed Smi is garbage.
1893 __ str(R2, Address(R3, R1, SXTW, Address::Unscaled), kUnsignedTwoBytes);
1894#endif
1895 __ ret();
1896}
1897
1898void AsmIntrinsifier::AllocateOneByteString(Assembler* assembler,
1899 Label* normal_ir_body) {
1900 Label ok;
1901
1902 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Length.
1903#if defined(DART_COMPRESSED_POINTERS)
1904 __ sxtw(R2, R2);
1905#endif
1906 TryAllocateString(assembler, kOneByteStringCid,
1907 target::OneByteString::kMaxNewSpaceElements, &ok,
1908 normal_ir_body);
1909
1910 __ Bind(&ok);
1911 __ ret();
1912
1913 __ Bind(normal_ir_body);
1914}
1915
1916void AsmIntrinsifier::AllocateTwoByteString(Assembler* assembler,
1917 Label* normal_ir_body) {
1918 Label ok;
1919
1920 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Length.
1921#if defined(DART_COMPRESSED_POINTERS)
1922 __ sxtw(R2, R2);
1923#endif
1924 TryAllocateString(assembler, kTwoByteStringCid,
1925 target::TwoByteString::kMaxNewSpaceElements, &ok,
1926 normal_ir_body);
1927
1928 __ Bind(&ok);
1929 __ ret();
1930
1931 __ Bind(normal_ir_body);
1932}
1933
1934void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
1935 Label* normal_ir_body) {
1936 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // This.
1937 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Other.
1938
1939 StringEquality(assembler, R0, R1, R2, R3, R0, normal_ir_body,
1940 kOneByteStringCid);
1941}
1942
1943void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
1944 Label* normal_ir_body) {
1945 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // This.
1946 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Other.
1947
1948 StringEquality(assembler, R0, R1, R2, R3, R0, normal_ir_body,
1949 kTwoByteStringCid);
1950}
1951
1952void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
1953 Label* normal_ir_body,
1954 bool sticky) {
1955 if (FLAG_interpret_irregexp) return;
1956
1957 const intptr_t kRegExpParamOffset = 2 * target::kWordSize;
1958 const intptr_t kStringParamOffset = 1 * target::kWordSize;
1959 // start_index smi is located at offset 0.
1960
1961 // Incoming registers:
1962 // R0: Function. (Will be reloaded with the specialized matcher function.)
1963 // R4: Arguments descriptor. (Will be preserved.)
1964 // R5: Unknown. (Must be GC safe on tail call.)
1965
1966 // Load the specialized function pointer into R0. Leverage the fact the
1967 // string CIDs as well as stored function pointers are in sequence.
1968 __ ldr(R2, Address(SP, kRegExpParamOffset));
1969 __ ldr(R1, Address(SP, kStringParamOffset));
1970 __ LoadClassId(R1, R1);
1971 __ AddImmediate(R1, -kOneByteStringCid);
1972#if !defined(DART_COMPRESSED_POINTERS)
1973 __ add(R1, R2, Operand(R1, LSL, target::kWordSizeLog2));
1974#else
1975 __ add(R1, R2, Operand(R1, LSL, target::kWordSizeLog2 - 1));
1976#endif
1977 __ LoadCompressed(FUNCTION_REG,
1978 FieldAddress(R1, target::RegExp::function_offset(
1979 kOneByteStringCid, sticky)));
1980
1981 // Registers are now set up for the lazy compile stub. It expects the function
1982 // in R0, the argument descriptor in R4, and IC-Data in R5.
1983 __ eor(R5, R5, Operand(R5));
1984
1985 // Tail-call the function.
1986 __ LoadCompressed(
1987 CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
1988 __ ldr(R1,
1989 FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
1990 __ br(R1);
1991}
1992
1993void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
1994 Label* normal_ir_body) {
1995 __ LoadIsolate(R0);
1996 __ ldr(R0, Address(R0, target::Isolate::default_tag_offset()));
1997 __ ret();
1998}
1999
2000void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
2001 Label* normal_ir_body) {
2002 __ LoadIsolate(R0);
2003 __ ldr(R0, Address(R0, target::Isolate::current_tag_offset()));
2004 __ ret();
2005}
2006
2007void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
2008 Label* normal_ir_body) {
2009#if !defined(SUPPORT_TIMELINE)
2010 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
2011 __ ret();
2012#else
2013 // Load TimelineStream*.
2014 __ ldr(R0, Address(THR, target::Thread::dart_stream_offset()));
2015 // Load uintptr_t from TimelineStream*.
2016 __ ldr(R0, Address(R0, target::TimelineStream::enabled_offset()));
2017 __ cmp(R0, Operand(0));
2018 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
2019 __ LoadObject(TMP, CastHandle<Object>(TrueObject()));
2020 __ csel(R0, TMP, R0, NE);
2021 __ ret();
2022#endif
2023}
2024
2025void AsmIntrinsifier::Timeline_getNextTaskId(Assembler* assembler,
2026 Label* normal_ir_body) {
2027#if !defined(SUPPORT_TIMELINE)
2028 __ LoadImmediate(R0, target::ToRawSmi(0));
2029 __ ret();
2030#else
2031 __ ldr(R0, Address(THR, target::Thread::next_task_id_offset()));
2032 __ add(R1, R0, Operand(1));
2033 __ str(R1, Address(THR, target::Thread::next_task_id_offset()));
2034 __ SmiTag(R0); // Ignore loss of precision.
2035 __ ret();
2036#endif
2037}
2038
2039#undef __
2040
2041} // namespace compiler
2042} // namespace dart
2043
2044#endif // defined(TARGET_ARCH_ARM64)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
static bool equal(const SkBitmap &a, const SkBitmap &b)
static bool ok(int result)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
#define __
#define UNREACHABLE()
Definition assert.h:248
#define COMPILE_ASSERT(expr)
Definition assert.h:339
#define ASSERT(E)
static bool b
GAsyncResult * result
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
word ToRawSmi(const dart::Object &a)
const Bool & TrueObject()
const Bool & FalseObject()
const Object & NullObject()
const Class & DoubleClass()
const Register THR
static bool CompareIntegers(Token::Kind kind, const Integer &left, const Integer &right)
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
Definition hash.h:12
int32_t classid_t
Definition globals.h:524
@ kNumPredefinedCids
Definition class_id.h:257
constexpr intptr_t kBitsPerByte
Definition globals.h:463
@ kHeapObjectTag
uintptr_t uword
Definition globals.h:501
const Register CODE_REG
const Register TMP2
const Register TMP
const intptr_t cid
const Register FUNCTION_REG
const intptr_t kBytesPerBigIntDigit
Definition globals.h:54
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment
#define NOT_IN_PRODUCT(code)
Definition globals.h:84