Flutter Engine
The Flutter Engine
asm_intrinsifier_arm.cc
Go to the documentation of this file.
1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6#if defined(TARGET_ARCH_ARM)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
10#include "vm/class_id.h"
13
14namespace dart {
15namespace compiler {
16
17// When entering intrinsics code:
18// PP: Caller's ObjectPool in JIT / global ObjectPool in AOT
19// CODE_REG: Callee's Code in JIT / not passed in AOT
20// R4: Arguments descriptor
21// LR: Return address
22// The R4 and CODE_REG registers can be destroyed only if there is no slow-path,
23// i.e. if the intrinsified method always executes a return.
24// The FP register should not be modified, because it is used by the profiler.
25// The PP and THR registers (see constants_arm.h) must be preserved.
26
27#define __ assembler->
28
29// Loads args from stack into R0 and R1
30// Tests if they are smis, jumps to label not_smi if not.
31static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
32 __ ldr(R0, Address(SP, +0 * target::kWordSize));
33 __ ldr(R1, Address(SP, +1 * target::kWordSize));
34 __ orr(TMP, R0, Operand(R1));
35 __ tst(TMP, Operand(kSmiTagMask));
36 __ b(not_smi, NE);
37}
38
39void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
40 ASSERT(kSmiTagShift == 1);
41 ASSERT(kSmiTag == 0);
42 TestBothArgumentsSmis(assembler, normal_ir_body);
43 __ CompareImmediate(R0, target::ToRawSmi(target::kSmiBits));
44 __ b(normal_ir_body, HI);
45
46 __ SmiUntag(R0);
47
48 // Check for overflow by shifting left and shifting back arithmetically.
49 // If the result is different from the original, there was overflow.
50 __ mov(IP, Operand(R1, LSL, R0));
51 __ cmp(R1, Operand(IP, ASR, R0));
52
53 // No overflow, result in R0.
54 __ mov(R0, Operand(R1, LSL, R0), EQ);
55 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR, EQ));
56 // Arguments are Smi but the shift produced an overflow to Mint.
57 __ CompareImmediate(R1, 0);
58 __ b(normal_ir_body, LT);
59 __ SmiUntag(R1);
60
61 // Pull off high bits that will be shifted off of R1 by making a mask
62 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back.
63 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0)
64 // lo bits = R1 << R0
65 __ LoadImmediate(R8, 1);
66 __ mov(R8, Operand(R8, LSL, R0)); // R8 <- 1 << R0
67 __ sub(R8, R8, Operand(1)); // R8 <- R8 - 1
68 __ rsb(R3, R0, Operand(32)); // R3 <- 32 - R0
69 __ mov(R8, Operand(R8, LSL, R3)); // R8 <- R8 << R3
70 __ and_(R8, R1, Operand(R8)); // R8 <- R8 & R1
71 __ mov(R8, Operand(R8, LSR, R3)); // R8 <- R8 >> R3
72 // Now R8 has the bits that fall off of R1 on a left shift.
73 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits.
74
75 const Class& mint_class = MintClass();
76 __ TryAllocate(mint_class, normal_ir_body, Assembler::kFarJump, R0, R2);
77
78 __ str(R1, FieldAddress(R0, target::Mint::value_offset()));
79 __ str(R8,
81 __ Ret();
82 __ Bind(normal_ir_body);
83}
84
85static void Get64SmiOrMint(Assembler* assembler,
86 Register res_hi,
87 Register res_lo,
88 Register reg,
89 Label* not_smi_or_mint) {
90 Label not_smi, done;
91 __ tst(reg, Operand(kSmiTagMask));
92 __ b(&not_smi, NE);
93 __ SmiUntag(reg);
94
95 // Sign extend to 64 bit
96 __ mov(res_lo, Operand(reg));
97 __ mov(res_hi, Operand(res_lo, ASR, 31));
98 __ b(&done);
99
100 __ Bind(&not_smi);
101 __ CompareClassId(reg, kMintCid, res_lo);
102 __ b(not_smi_or_mint, NE);
103
104 // Mint.
105 __ ldr(res_lo, FieldAddress(reg, target::Mint::value_offset()));
106 __ ldr(res_hi,
107 FieldAddress(reg, target::Mint::value_offset() + target::kWordSize));
108 __ Bind(&done);
109}
110
111static void CompareIntegers(Assembler* assembler,
112 Label* normal_ir_body,
113 Condition true_condition) {
114 Label try_mint_smi, is_true, is_false, drop_two_fall_through, fall_through;
115 TestBothArgumentsSmis(assembler, &try_mint_smi);
116 // R0 contains the right argument. R1 contains left argument
117
118 __ cmp(R1, Operand(R0));
119 __ b(&is_true, true_condition);
120 __ Bind(&is_false);
121 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
122 __ Ret();
123 __ Bind(&is_true);
124 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
125 __ Ret();
126
127 // 64-bit comparison
128 Condition hi_true_cond, hi_false_cond, lo_false_cond;
129 switch (true_condition) {
130 case LT:
131 case LE:
132 hi_true_cond = LT;
133 hi_false_cond = GT;
134 lo_false_cond = (true_condition == LT) ? CS : HI;
135 break;
136 case GT:
137 case GE:
138 hi_true_cond = GT;
139 hi_false_cond = LT;
140 lo_false_cond = (true_condition == GT) ? LS : CC;
141 break;
142 default:
143 UNREACHABLE();
144 hi_true_cond = hi_false_cond = lo_false_cond = VS;
145 }
146
147 __ Bind(&try_mint_smi);
148 // Get left as 64 bit integer.
149 Get64SmiOrMint(assembler, R3, R2, R1, normal_ir_body);
150 // Get right as 64 bit integer.
151 Get64SmiOrMint(assembler, R1, R8, R0, normal_ir_body);
152 // R3: left high.
153 // R2: left low.
154 // R1: right high.
155 // R8: right low.
156
157 __ cmp(R3, Operand(R1)); // Compare left hi, right high.
158 __ b(&is_false, hi_false_cond);
159 __ b(&is_true, hi_true_cond);
160 __ cmp(R2, Operand(R8)); // Compare left lo, right lo.
161 __ b(&is_false, lo_false_cond);
162 // Else is true.
163 __ b(&is_true);
164
165 __ Bind(normal_ir_body);
166}
167
168void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
169 Label* normal_ir_body) {
170 CompareIntegers(assembler, normal_ir_body, LT);
171}
172
173void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
174 Label* normal_ir_body) {
175 CompareIntegers(assembler, normal_ir_body, GT);
176}
177
178void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
179 Label* normal_ir_body) {
180 CompareIntegers(assembler, normal_ir_body, LE);
181}
182
183void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
184 Label* normal_ir_body) {
185 CompareIntegers(assembler, normal_ir_body, GE);
186}
187
188// This is called for Smi and Mint receivers. The right argument
189// can be Smi, Mint or double.
190void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
191 Label* normal_ir_body) {
192 Label true_label, check_for_mint;
193 // For integer receiver '===' check first.
194 __ ldr(R0, Address(SP, 0 * target::kWordSize));
195 __ ldr(R1, Address(SP, 1 * target::kWordSize));
196 __ cmp(R0, Operand(R1));
197 __ b(&true_label, EQ);
198
199 __ orr(R2, R0, Operand(R1));
200 __ tst(R2, Operand(kSmiTagMask));
201 __ b(&check_for_mint, NE); // If R0 or R1 is not a smi do Mint checks.
202
203 // Both arguments are smi, '===' is good enough.
204 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
205 __ Ret();
206 __ Bind(&true_label);
207 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
208 __ Ret();
209
210 // At least one of the arguments was not Smi.
211 Label receiver_not_smi;
212 __ Bind(&check_for_mint);
213
214 __ tst(R1, Operand(kSmiTagMask)); // Check receiver.
215 __ b(&receiver_not_smi, NE);
216
217 // Left (receiver) is Smi, return false if right is not Double.
218 // Note that an instance of Mint never contains a value that can be
219 // represented by Smi.
220
221 __ CompareClassId(R0, kDoubleCid, R2);
222 __ b(normal_ir_body, EQ);
223 __ LoadObject(R0,
224 CastHandle<Object>(FalseObject())); // Smi == Mint -> false.
225 __ Ret();
226
227 __ Bind(&receiver_not_smi);
228 // R1:: receiver.
229
230 __ CompareClassId(R1, kMintCid, R2);
231 __ b(normal_ir_body, NE);
232 // Receiver is Mint, return false if right is Smi.
233 __ tst(R0, Operand(kSmiTagMask));
234 __ LoadObject(R0, CastHandle<Object>(FalseObject()), EQ);
235 READS_RETURN_ADDRESS_FROM_LR(
236 __ bx(LR, EQ)); // TODO(srdjan): Implement Mint == Mint comparison.
237
238 __ Bind(normal_ir_body);
239}
240
241void AsmIntrinsifier::Integer_equal(Assembler* assembler,
242 Label* normal_ir_body) {
243 Integer_equalToInteger(assembler, normal_ir_body);
244}
245
246void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
247 Label* normal_ir_body) {
248 __ ldr(R0, Address(SP, 0 * target::kWordSize));
249 __ SmiUntag(R0);
250 // XOR with sign bit to complement bits if value is negative.
251 __ eor(R0, R0, Operand(R0, ASR, 31));
252 __ clz(R0, R0);
253 __ rsb(R0, R0, Operand(32));
254 __ SmiTag(R0);
255 __ Ret();
256}
257
258void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
259 // static void _lsh(Uint32List x_digits, int x_used, int n,
260 // Uint32List r_digits)
261
262 // R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi.
263 __ ldrd(R0, R1, SP, 2 * target::kWordSize);
264 // R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0.
265 __ ldrd(R2, R3, SP, 0 * target::kWordSize);
266 __ SmiUntag(R3);
267 // R4 = n ~/ _DIGIT_BITS
268 __ Asr(R4, R3, Operand(5));
269 // R8 = &x_digits[0]
271 // R6 = &r_digits[1]
272 __ add(R6, R2,
275 // R2 = &x_digits[x_used]
276 __ add(R2, R8, Operand(R0, LSL, 1));
277 // R6 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1]
278 __ add(R4, R4, Operand(R0, ASR, 1));
279 __ add(R6, R6, Operand(R4, LSL, 2));
280 // R1 = n % _DIGIT_BITS
281 __ and_(R1, R3, Operand(31));
282 // R0 = 32 - R1
283 __ rsb(R0, R1, Operand(32));
284 __ mov(R9, Operand(0));
285 Label loop;
286 __ Bind(&loop);
288 __ orr(R9, R9, Operand(R4, LSR, R0));
290 __ mov(R9, Operand(R4, LSL, R1));
291 __ teq(R2, Operand(R8));
292 __ b(&loop, NE);
294 __ LoadObject(R0, NullObject());
295 __ Ret();
296}
297
298void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
299 // static void _lsh(Uint32List x_digits, int x_used, int n,
300 // Uint32List r_digits)
301
302 // R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi.
303 __ ldrd(R0, R1, SP, 2 * target::kWordSize);
304 // R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0.
305 __ ldrd(R2, R3, SP, 0 * target::kWordSize);
306 __ SmiUntag(R3);
307 // R4 = n ~/ _DIGIT_BITS
308 __ Asr(R4, R3, Operand(5));
309 // R6 = &r_digits[0]
311 // R2 = &x_digits[n ~/ _DIGIT_BITS]
313 __ add(R2, R2, Operand(R4, LSL, 2));
314 // R8 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1]
315 __ add(R4, R4, Operand(1));
316 __ rsb(R4, R4, Operand(R0, ASR, 1));
317 __ add(R8, R6, Operand(R4, LSL, 2));
318 // R1 = n % _DIGIT_BITS
319 __ and_(R1, R3, Operand(31));
320 // R0 = 32 - R1
321 __ rsb(R0, R1, Operand(32));
322 // R9 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS)
324 __ mov(R9, Operand(R9, LSR, R1));
325 Label loop_entry;
326 __ b(&loop_entry);
327 Label loop;
328 __ Bind(&loop);
330 __ orr(R9, R9, Operand(R4, LSL, R0));
332 __ mov(R9, Operand(R4, LSR, R1));
333 __ Bind(&loop_entry);
334 __ teq(R6, Operand(R8));
335 __ b(&loop, NE);
336 __ str(R9, Address(R6, 0));
337 __ LoadObject(R0, NullObject());
338 __ Ret();
339}
340
341void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
342 Label* normal_ir_body) {
343 // static void _absAdd(Uint32List digits, int used,
344 // Uint32List a_digits, int a_used,
345 // Uint32List r_digits)
346
347 // R0 = used, R1 = digits
348 __ ldrd(R0, R1, SP, 3 * target::kWordSize);
349 // R1 = &digits[0]
351
352 // R2 = a_used, R3 = a_digits
353 __ ldrd(R2, R3, SP, 1 * target::kWordSize);
354 // R3 = &a_digits[0]
356
357 // R8 = r_digits
358 __ ldr(R8, Address(SP, 0 * target::kWordSize));
359 // R8 = &r_digits[0]
361
362 // R2 = &digits[a_used >> 1], a_used is Smi.
363 __ add(R2, R1, Operand(R2, LSL, 1));
364
365 // R6 = &digits[used >> 1], used is Smi.
366 __ add(R6, R1, Operand(R0, LSL, 1));
367
368 __ adds(R4, R4, Operand(0)); // carry flag = 0
369 Label add_loop;
370 __ Bind(&add_loop);
371 // Loop a_used times, a_used > 0.
374 __ adcs(R4, R4, Operand(R9));
375 __ teq(R1, Operand(R2)); // Does not affect carry flag.
377 __ b(&add_loop, NE);
378
379 Label last_carry;
380 __ teq(R1, Operand(R6)); // Does not affect carry flag.
381 __ b(&last_carry, EQ); // If used - a_used == 0.
382
383 Label carry_loop;
384 __ Bind(&carry_loop);
385 // Loop used - a_used times, used - a_used > 0.
387 __ adcs(R4, R4, Operand(0));
388 __ teq(R1, Operand(R6)); // Does not affect carry flag.
390 __ b(&carry_loop, NE);
391
392 __ Bind(&last_carry);
393 __ mov(R4, Operand(0));
394 __ adc(R4, R4, Operand(0));
395 __ str(R4, Address(R8, 0));
396
397 __ LoadObject(R0, NullObject());
398 __ Ret();
399}
400
401void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
402 Label* normal_ir_body) {
403 // static void _absSub(Uint32List digits, int used,
404 // Uint32List a_digits, int a_used,
405 // Uint32List r_digits)
406
407 // R0 = used, R1 = digits
408 __ ldrd(R0, R1, SP, 3 * target::kWordSize);
409 // R1 = &digits[0]
411
412 // R2 = a_used, R3 = a_digits
413 __ ldrd(R2, R3, SP, 1 * target::kWordSize);
414 // R3 = &a_digits[0]
416
417 // R8 = r_digits
418 __ ldr(R8, Address(SP, 0 * target::kWordSize));
419 // R8 = &r_digits[0]
421
422 // R2 = &digits[a_used >> 1], a_used is Smi.
423 __ add(R2, R1, Operand(R2, LSL, 1));
424
425 // R6 = &digits[used >> 1], used is Smi.
426 __ add(R6, R1, Operand(R0, LSL, 1));
427
428 __ subs(R4, R4, Operand(0)); // carry flag = 1
429 Label sub_loop;
430 __ Bind(&sub_loop);
431 // Loop a_used times, a_used > 0.
434 __ sbcs(R4, R4, Operand(R9));
435 __ teq(R1, Operand(R2)); // Does not affect carry flag.
437 __ b(&sub_loop, NE);
438
439 Label done;
440 __ teq(R1, Operand(R6)); // Does not affect carry flag.
441 __ b(&done, EQ); // If used - a_used == 0.
442
443 Label carry_loop;
444 __ Bind(&carry_loop);
445 // Loop used - a_used times, used - a_used > 0.
447 __ sbcs(R4, R4, Operand(0));
448 __ teq(R1, Operand(R6)); // Does not affect carry flag.
450 __ b(&carry_loop, NE);
451
452 __ Bind(&done);
453 __ LoadObject(R0, NullObject());
454 __ Ret();
455}
456
457void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
458 Label* normal_ir_body) {
459 // Pseudo code:
460 // static int _mulAdd(Uint32List x_digits, int xi,
461 // Uint32List m_digits, int i,
462 // Uint32List a_digits, int j, int n) {
463 // uint32_t x = x_digits[xi >> 1]; // xi is Smi.
464 // if (x == 0 || n == 0) {
465 // return 1;
466 // }
467 // uint32_t* mip = &m_digits[i >> 1]; // i is Smi.
468 // uint32_t* ajp = &a_digits[j >> 1]; // j is Smi.
469 // uint32_t c = 0;
470 // SmiUntag(n);
471 // do {
472 // uint32_t mi = *mip++;
473 // uint32_t aj = *ajp;
474 // uint64_t t = x*mi + aj + c; // 32-bit * 32-bit -> 64-bit.
475 // *ajp++ = low32(t);
476 // c = high32(t);
477 // } while (--n > 0);
478 // while (c != 0) {
479 // uint64_t t = *ajp + c;
480 // *ajp++ = low32(t);
481 // c = high32(t); // c == 0 or 1.
482 // }
483 // return 1;
484 // }
485
486 Label done;
487 // R3 = x, no_op if x == 0
488 __ ldrd(R0, R1, SP, 5 * target::kWordSize); // R0 = xi as Smi, R1 = x_digits.
489 __ add(R1, R1, Operand(R0, LSL, 1));
490 __ ldr(R3, FieldAddress(R1, target::TypedData::payload_offset()));
491 __ tst(R3, Operand(R3));
492 __ b(&done, EQ);
493
494 // R8 = SmiUntag(n), no_op if n == 0
495 __ ldr(R8, Address(SP, 0 * target::kWordSize));
496 __ Asrs(R8, R8, Operand(kSmiTagSize));
497 __ b(&done, EQ);
498
499 // R4 = mip = &m_digits[i >> 1]
500 __ ldrd(R0, R1, SP, 3 * target::kWordSize); // R0 = i as Smi, R1 = m_digits.
501 __ add(R1, R1, Operand(R0, LSL, 1));
503
504 // R9 = ajp = &a_digits[j >> 1]
505 __ ldrd(R0, R1, SP, 1 * target::kWordSize); // R0 = j as Smi, R1 = a_digits.
506 __ add(R1, R1, Operand(R0, LSL, 1));
508
509 // R1 = c = 0
510 __ mov(R1, Operand(0));
511
512 Label muladd_loop;
513 __ Bind(&muladd_loop);
514 // x: R3
515 // mip: R4
516 // ajp: R9
517 // c: R1
518 // n: R8
519
520 // uint32_t mi = *mip++
522
523 // uint32_t aj = *ajp
524 __ ldr(R0, Address(R9, 0));
525
526 // uint64_t t = x*mi + aj + c
527 __ umaal(R0, R1, R2, R3); // R1:R0 = R2*R3 + R1 + R0.
528
529 // *ajp++ = low32(t) = R0
531
532 // c = high32(t) = R1
533
534 // while (--n > 0)
535 __ subs(R8, R8, Operand(1)); // --n
536 __ b(&muladd_loop, NE);
537
538 __ tst(R1, Operand(R1));
539 __ b(&done, EQ);
540
541 // *ajp++ += c
542 __ ldr(R0, Address(R9, 0));
543 __ adds(R0, R0, Operand(R1));
545 __ b(&done, CC);
546
547 Label propagate_carry_loop;
548 __ Bind(&propagate_carry_loop);
549 __ ldr(R0, Address(R9, 0));
550 __ adds(R0, R0, Operand(1));
552 __ b(&propagate_carry_loop, CS);
553
554 __ Bind(&done);
555 __ mov(R0, Operand(target::ToRawSmi(1))); // One digit processed.
556 __ Ret();
557}
558
559void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
560 Label* normal_ir_body) {
561 // Pseudo code:
562 // static int _sqrAdd(Uint32List x_digits, int i,
563 // Uint32List a_digits, int used) {
564 // uint32_t* xip = &x_digits[i >> 1]; // i is Smi.
565 // uint32_t x = *xip++;
566 // if (x == 0) return 1;
567 // uint32_t* ajp = &a_digits[i]; // j == 2*i, i is Smi.
568 // uint32_t aj = *ajp;
569 // uint64_t t = x*x + aj;
570 // *ajp++ = low32(t);
571 // uint64_t c = high32(t);
572 // int n = ((used - i) >> 1) - 1; // used and i are Smi.
573 // while (--n >= 0) {
574 // uint32_t xi = *xip++;
575 // uint32_t aj = *ajp;
576 // uint96_t t = 2*x*xi + aj + c; // 2-bit * 32-bit * 32-bit -> 65-bit.
577 // *ajp++ = low32(t);
578 // c = high64(t); // 33-bit.
579 // }
580 // uint32_t aj = *ajp;
581 // uint64_t t = aj + c; // 32-bit + 33-bit -> 34-bit.
582 // *ajp++ = low32(t);
583 // *ajp = high32(t);
584 // return 1;
585 // }
586
587 // The code has no bailout path, so we can use R6 (CODE_REG) freely.
588
589 // R4 = xip = &x_digits[i >> 1]
590 __ ldrd(R2, R3, SP, 2 * target::kWordSize); // R2 = i as Smi, R3 = x_digits
591 __ add(R3, R3, Operand(R2, LSL, 1));
593
594 // R3 = x = *xip++, return if x == 0
595 Label x_zero;
597 __ tst(R3, Operand(R3));
598 __ b(&x_zero, EQ);
599
600 // R6 = ajp = &a_digits[i]
601 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // a_digits
602 __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi.
604
605 // R8:R0 = t = x*x + *ajp
606 __ ldr(R0, Address(R6, 0));
607 __ mov(R8, Operand(0));
608 __ umaal(R0, R8, R3, R3); // R8:R0 = R3*R3 + R8 + R0.
609
610 // *ajp++ = low32(t) = R0
612
613 // R8 = low32(c) = high32(t)
614 // R9 = high32(c) = 0
615 __ mov(R9, Operand(0));
616
617 // int n = used - i - 1; while (--n >= 0) ...
618 __ ldr(R0, Address(SP, 0 * target::kWordSize)); // used is Smi
619 __ sub(TMP, R0, Operand(R2));
620 __ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0)
621 __ rsbs(TMP, R0, Operand(TMP, ASR, kSmiTagSize));
622
623 Label loop, done;
624 __ b(&done, MI);
625
626 __ Bind(&loop);
627 // x: R3
628 // xip: R4
629 // ajp: R6
630 // c: R9:R8
631 // t: R2:R1:R0 (not live at loop entry)
632 // n: TMP
633
634 // uint32_t xi = *xip++
636
637 // uint96_t t = R9:R8:R0 = 2*x*xi + aj + c
638 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3.
639 __ adds(R0, R0, Operand(R0));
640 __ adcs(R1, R1, Operand(R1));
641 __ mov(R2, Operand(0));
642 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi.
643 __ adds(R0, R0, Operand(R8));
644 __ adcs(R1, R1, Operand(R9));
645 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c.
646 __ ldr(R8, Address(R6, 0)); // R8 = aj = *ajp.
647 __ adds(R0, R0, Operand(R8));
648 __ adcs(R8, R1, Operand(0));
649 __ adc(R9, R2, Operand(0)); // R9:R8:R0 = 2*x*xi + c + aj.
650
651 // *ajp++ = low32(t) = R0
653
654 // while (--n >= 0)
655 __ subs(TMP, TMP, Operand(1)); // --n
656 __ b(&loop, PL);
657
658 __ Bind(&done);
659 // uint32_t aj = *ajp
660 __ ldr(R0, Address(R6, 0));
661
662 // uint64_t t = aj + c
663 __ adds(R8, R8, Operand(R0));
664 __ adc(R9, R9, Operand(0));
665
666 // *ajp = low32(t) = R8
667 // *(ajp + 1) = high32(t) = R9
668 __ strd(R8, R9, R6, 0);
669
670 __ Bind(&x_zero);
671 __ mov(R0, Operand(target::ToRawSmi(1))); // One digit processed.
672 __ Ret();
673}
674
675void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
676 Label* normal_ir_body) {
677 // No unsigned 64-bit / 32-bit divide instruction.
678}
679
680void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
681 Label* normal_ir_body) {
682 // Pseudo code:
683 // static int _mulMod(Uint32List args, Uint32List digits, int i) {
684 // uint32_t rho = args[_RHO]; // _RHO == 2.
685 // uint32_t d = digits[i >> 1]; // i is Smi.
686 // uint64_t t = rho*d;
687 // args[_MU] = t mod DIGIT_BASE; // _MU == 4.
688 // return 1;
689 // }
690
691 // R4 = args
692 __ ldr(R4, Address(SP, 2 * target::kWordSize)); // args
693
694 // R3 = rho = args[2]
695 __ ldr(R3, FieldAddress(R4, target::TypedData::payload_offset() +
697
698 // R2 = digits[i >> 1]
699 __ ldrd(R0, R1, SP, 0 * target::kWordSize); // R0 = i as Smi, R1 = digits
700 __ add(R1, R1, Operand(R0, LSL, 1));
701 __ ldr(R2, FieldAddress(R1, target::TypedData::payload_offset()));
702
703 // R1:R0 = t = rho*d
704 __ umull(R0, R1, R2, R3);
705
706 // args[4] = t mod DIGIT_BASE = low32(t)
707 __ str(R0, FieldAddress(R4, target::TypedData::payload_offset() +
709
710 __ mov(R0, Operand(target::ToRawSmi(1))); // One digit processed.
711 __ Ret();
712}
713
714// Check if the last argument is a double, jump to label 'is_smi' if smi
715// (easy to convert to double), otherwise jump to label 'not_double_smi',
716// Returns the last argument in R0.
717static void TestLastArgumentIsDouble(Assembler* assembler,
718 Label* is_smi,
719 Label* not_double_smi) {
720 __ ldr(R0, Address(SP, 0 * target::kWordSize));
721 __ tst(R0, Operand(kSmiTagMask));
722 __ b(is_smi, EQ);
723 __ CompareClassId(R0, kDoubleCid, R1);
724 __ b(not_double_smi, NE);
725 // Fall through with Double in R0.
726}
727
728// Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown
729// type. Return true or false object in the register R0. Any NaN argument
730// returns false. Any non-double arg1 causes control flow to fall through to the
731// slow case (compiled method body).
732static void CompareDoubles(Assembler* assembler,
733 Label* normal_ir_body,
734 Condition true_condition) {
735 Label is_smi, double_op;
736
737 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
738 // Both arguments are double, right operand is in R0.
739
740 __ LoadDFromOffset(D1, R0, target::Double::value_offset() - kHeapObjectTag);
741 __ Bind(&double_op);
742 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left argument.
743 __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
744
745 __ vcmpd(D0, D1);
746 __ vmstat();
747 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
748 // Return false if D0 or D1 was NaN before checking true condition.
749 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR, VS));
750 __ LoadObject(R0, CastHandle<Object>(TrueObject()), true_condition);
751 __ Ret();
752
753 __ Bind(&is_smi); // Convert R0 to a double.
754 __ SmiUntag(R0);
755 __ vmovsr(S0, R0);
756 __ vcvtdi(D1, S0);
757 __ b(&double_op); // Then do the comparison.
758 __ Bind(normal_ir_body);
759}
760
761void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
762 Label* normal_ir_body) {
763 CompareDoubles(assembler, normal_ir_body, HI);
764}
765
766void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
767 Label* normal_ir_body) {
768 CompareDoubles(assembler, normal_ir_body, CS);
769}
770
771void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
772 Label* normal_ir_body) {
773 CompareDoubles(assembler, normal_ir_body, CC);
774}
775
776void AsmIntrinsifier::Double_equal(Assembler* assembler,
777 Label* normal_ir_body) {
778 CompareDoubles(assembler, normal_ir_body, EQ);
779}
780
781void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
782 Label* normal_ir_body) {
783 CompareDoubles(assembler, normal_ir_body, LS);
784}
785
786// Expects left argument to be double (receiver). Right argument is unknown.
787// Both arguments are on stack.
788static void DoubleArithmeticOperations(Assembler* assembler,
789 Label* normal_ir_body,
790 Token::Kind kind) {
791 Label is_smi, double_op;
792
793 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
794 // Both arguments are double, right operand is in R0.
795 __ LoadDFromOffset(D1, R0, target::Double::value_offset() - kHeapObjectTag);
796 __ Bind(&double_op);
797 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left argument.
798 __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
799 switch (kind) {
800 case Token::kADD:
801 __ vaddd(D0, D0, D1);
802 break;
803 case Token::kSUB:
804 __ vsubd(D0, D0, D1);
805 break;
806 case Token::kMUL:
807 __ vmuld(D0, D0, D1);
808 break;
809 case Token::kDIV:
810 __ vdivd(D0, D0, D1);
811 break;
812 default:
813 UNREACHABLE();
814 }
815 const Class& double_class = DoubleClass();
816 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, R0,
817 R1); // Result register.
818 __ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
819 __ Ret();
820 __ Bind(&is_smi); // Convert R0 to a double.
821 __ SmiUntag(R0);
822 __ vmovsr(S0, R0);
823 __ vcvtdi(D1, S0);
824 __ b(&double_op);
825 __ Bind(normal_ir_body);
826}
827
828void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
829 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
830}
831
832void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
833 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
834}
835
836void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
837 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
838}
839
840void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
841 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
842}
843
844// Left is double, right is integer (Mint or Smi)
845void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
846 Label* normal_ir_body) {
847 Label fall_through;
848 // Only smis allowed.
849 __ ldr(R0, Address(SP, 0 * target::kWordSize));
850 __ tst(R0, Operand(kSmiTagMask));
851 __ b(normal_ir_body, NE);
852 // Is Smi.
853 __ SmiUntag(R0);
854 __ vmovsr(S0, R0);
855 __ vcvtdi(D1, S0);
856 __ ldr(R0, Address(SP, 1 * target::kWordSize));
857 __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
858 __ vmuld(D0, D0, D1);
859 const Class& double_class = DoubleClass();
860 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, R0,
861 R1); // Result register.
862 __ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
863 __ Ret();
864 __ Bind(normal_ir_body);
865}
866
867void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
868 Label* normal_ir_body) {
869 Label fall_through;
870
871 __ ldr(R0, Address(SP, 0 * target::kWordSize));
872 __ tst(R0, Operand(kSmiTagMask));
873 __ b(normal_ir_body, NE);
874 // Is Smi.
875 __ SmiUntag(R0);
876 __ vmovsr(S0, R0);
877 __ vcvtdi(D0, S0);
878 const Class& double_class = DoubleClass();
879 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, R0,
880 R1); // Result register.
881 __ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
882 __ Ret();
883 __ Bind(normal_ir_body);
884}
885
886void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
887 Label* normal_ir_body) {
888 __ ldr(R0, Address(SP, 0 * target::kWordSize));
889 __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
890 __ vcmpd(D0, D0);
891 __ vmstat();
892 __ LoadObject(R0, CastHandle<Object>(FalseObject()), VC);
893 __ LoadObject(R0, CastHandle<Object>(TrueObject()), VS);
894 __ Ret();
895}
896
897void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
898 Label* normal_ir_body) {
899 __ ldr(R0, Address(SP, 0 * target::kWordSize));
900 // R1 <- value[0:31], R2 <- value[32:63]
901 __ LoadFieldFromOffset(R1, R0, target::Double::value_offset());
902 __ LoadFieldFromOffset(R2, R0,
904
905 // If the low word isn't 0, then it isn't infinity.
906 __ cmp(R1, Operand(0));
907 __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
908 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR, NE)); // Return if NE.
909
910 // Mask off the sign bit.
911 __ AndImmediate(R2, R2, 0x7FFFFFFF);
912 // Compare with +infinity.
913 __ CompareImmediate(R2, 0x7FF00000);
914 __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
915 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR, NE));
916 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
917 __ Ret();
918}
919
920void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
921 Label* normal_ir_body) {
922 Label is_false, is_true, is_zero;
923 __ ldr(R0, Address(SP, 0 * target::kWordSize));
924 __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
925 __ vcmpdz(D0);
926 __ vmstat();
927 __ b(&is_false, VS); // NaN -> false.
928 __ b(&is_zero, EQ); // Check for negative zero.
929 __ b(&is_false, CS); // >= 0 -> false.
930
931 __ Bind(&is_true);
932 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
933 __ Ret();
934
935 __ Bind(&is_false);
936 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
937 __ Ret();
938
939 __ Bind(&is_zero);
940 // Check for negative zero by looking at the sign bit.
941 __ vmovrrd(R0, R1, D0); // R1:R0 <- D0, so sign bit is in bit 31 of R1.
942 __ mov(R1, Operand(R1, LSR, 31));
943 __ tst(R1, Operand(1));
944 __ b(&is_true, NE); // Sign bit set.
945 __ b(&is_false);
946}
947
948void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
949 Label* normal_ir_body) {
950 __ ldr(R0, Address(SP, 0 * target::kWordSize));
951 __ ldr(R1, Address(SP, 1 * target::kWordSize));
952 __ cmp(R0, Operand(R1));
953 __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
954 __ LoadObject(R0, CastHandle<Object>(TrueObject()), EQ);
955 __ Ret();
956}
957
958static void JumpIfInteger(Assembler* assembler,
960 Register tmp,
961 Label* target) {
962 assembler->RangeCheck(cid, tmp, kSmiCid, kMintCid, Assembler::kIfInRange,
963 target);
964}
965
966static void JumpIfNotInteger(Assembler* assembler,
968 Register tmp,
969 Label* target) {
970 assembler->RangeCheck(cid, tmp, kSmiCid, kMintCid, Assembler::kIfNotInRange,
971 target);
972}
973
974static void JumpIfString(Assembler* assembler,
976 Register tmp,
977 Label* target) {
978 assembler->RangeCheck(cid, tmp, kOneByteStringCid, kTwoByteStringCid,
980}
981
982static void JumpIfNotString(Assembler* assembler,
984 Register tmp,
985 Label* target) {
986 assembler->RangeCheck(cid, tmp, kOneByteStringCid, kTwoByteStringCid,
988}
989
990static void JumpIfNotList(Assembler* assembler,
992 Register tmp,
993 Label* target) {
994 assembler->RangeCheck(cid, tmp, kArrayCid, kGrowableObjectArrayCid,
996}
997
998static void JumpIfType(Assembler* assembler,
1000 Register tmp,
1001 Label* target) {
1002 COMPILE_ASSERT((kFunctionTypeCid == kTypeCid + 1) &&
1003 (kRecordTypeCid == kTypeCid + 2));
1004 assembler->RangeCheck(cid, tmp, kTypeCid, kRecordTypeCid,
1006}
1007
1008static void JumpIfNotType(Assembler* assembler,
1009 Register cid,
1010 Register tmp,
1011 Label* target) {
1012 COMPILE_ASSERT((kFunctionTypeCid == kTypeCid + 1) &&
1013 (kRecordTypeCid == kTypeCid + 2));
1014 assembler->RangeCheck(cid, tmp, kTypeCid, kRecordTypeCid,
1016}
1017
1018// Return type quickly for simple types (not parameterized and not signature).
1019void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
1020 Label* normal_ir_body) {
1021 Label use_declaration_type, not_double, not_integer, not_string;
1022 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1023 __ LoadClassIdMayBeSmi(R1, R0);
1024
1025 __ CompareImmediate(R1, kClosureCid);
1026 __ b(normal_ir_body, EQ); // Instance is a closure.
1027
1028 __ CompareImmediate(R1, kRecordCid);
1029 __ b(normal_ir_body, EQ); // Instance is a record.
1030
1031 __ CompareImmediate(R1, kNumPredefinedCids);
1032 __ b(&use_declaration_type, HI);
1033
1034 __ LoadIsolateGroup(R2);
1036
1037 __ CompareImmediate(R1, kDoubleCid);
1038 __ b(&not_double, NE);
1039 __ LoadFromOffset(R0, R2, target::ObjectStore::double_type_offset());
1040 __ Ret();
1041
1042 __ Bind(&not_double);
1043 JumpIfNotInteger(assembler, R1, R0, &not_integer);
1044 __ LoadFromOffset(R0, R2, target::ObjectStore::int_type_offset());
1045 __ Ret();
1046
1047 __ Bind(&not_integer);
1048 JumpIfNotString(assembler, R1, R0, &not_string);
1049 __ LoadFromOffset(R0, R2, target::ObjectStore::string_type_offset());
1050 __ Ret();
1051
1052 __ Bind(&not_string);
1053 JumpIfNotType(assembler, R1, R0, &use_declaration_type);
1054 __ LoadFromOffset(R0, R2, target::ObjectStore::type_type_offset());
1055 __ Ret();
1056
1057 __ Bind(&use_declaration_type);
1058 __ LoadClassById(R2, R1);
1059 __ ldrh(R3, FieldAddress(R2, target::Class::num_type_arguments_offset()));
1060 __ CompareImmediate(R3, 0);
1061 __ b(normal_ir_body, NE);
1062
1063 __ ldr(R0, FieldAddress(R2, target::Class::declaration_type_offset()));
1064 __ CompareObject(R0, NullObject());
1065 __ b(normal_ir_body, EQ);
1066 __ Ret();
1067
1068 __ Bind(normal_ir_body);
1069}
1070
1071// Compares cid1 and cid2 to see if they're syntactically equivalent. If this
1072// can be determined by this fast path, it jumps to either equal_* or not_equal.
1073// If classes are equivalent but may be generic, then jumps to
1074// equal_may_be_generic. Clobbers scratch.
1075static void EquivalentClassIds(Assembler* assembler,
1076 Label* normal_ir_body,
1077 Label* equal_may_be_generic,
1078 Label* equal_not_generic,
1079 Label* not_equal,
1080 Register cid1,
1081 Register cid2,
1082 Register scratch,
1083 bool testing_instance_cids) {
1084 Label not_integer, not_integer_or_string, not_integer_or_string_or_list;
1085
1086 // Check if left hand side is a closure. Closures are handled in the runtime.
1087 __ CompareImmediate(cid1, kClosureCid);
1088 __ b(normal_ir_body, EQ);
1089
1090 // Check if left hand side is a record. Records are handled in the runtime.
1091 __ CompareImmediate(cid1, kRecordCid);
1092 __ b(normal_ir_body, EQ);
1093
1094 // Check whether class ids match. If class ids don't match types may still be
1095 // considered equivalent (e.g. multiple string implementation classes map to a
1096 // single String type).
1097 __ cmp(cid1, Operand(cid2));
1098 __ b(equal_may_be_generic, EQ);
1099
1100 // Class ids are different. Check if we are comparing two string types (with
1101 // different representations), two integer types, two list types or two type
1102 // types.
1103 __ CompareImmediate(cid1, kNumPredefinedCids);
1104 __ b(not_equal, HI);
1105
1106 // Check if both are integer types.
1107 JumpIfNotInteger(assembler, cid1, scratch, &not_integer);
1108
1109 // First type is an integer. Check if the second is an integer too.
1110 JumpIfInteger(assembler, cid2, scratch, equal_not_generic);
1111 // Integer types are only equivalent to other integer types.
1112 __ b(not_equal);
1113
1114 __ Bind(&not_integer);
1115 // Check if both are String types.
1116 JumpIfNotString(assembler, cid1, scratch,
1117 testing_instance_cids ? &not_integer_or_string : not_equal);
1118
1119 // First type is String. Check if the second is a string too.
1120 JumpIfString(assembler, cid2, scratch, equal_not_generic);
1121 // String types are only equivalent to other String types.
1122 __ b(not_equal);
1123
1124 if (testing_instance_cids) {
1125 __ Bind(&not_integer_or_string);
1126 // Check if both are List types.
1127 JumpIfNotList(assembler, cid1, scratch, &not_integer_or_string_or_list);
1128
1129 // First type is a List. Check if the second is a List too.
1130 JumpIfNotList(assembler, cid2, scratch, not_equal);
1133 __ b(equal_may_be_generic);
1134
1135 __ Bind(&not_integer_or_string_or_list);
1136 // Check if the first type is a Type. If it is not then types are not
1137 // equivalent because they have different class ids and they are not String
1138 // or integer or List or Type.
1139 JumpIfNotType(assembler, cid1, scratch, not_equal);
1140
1141 // First type is a Type. Check if the second is a Type too.
1142 JumpIfType(assembler, cid2, scratch, equal_not_generic);
1143 // Type types are only equivalent to other Type types.
1144 __ b(not_equal);
1145 }
1146}
1147
1148void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
1149 Label* normal_ir_body) {
1150 __ ldm(IA, SP, (1 << R1 | 1 << R2));
1151 __ LoadClassIdMayBeSmi(R1, R1);
1152 __ LoadClassIdMayBeSmi(R2, R2);
1153
1154 Label equal_may_be_generic, equal, not_equal;
1155 EquivalentClassIds(assembler, normal_ir_body, &equal_may_be_generic, &equal,
1156 &not_equal, R1, R2, R0,
1157 /* testing_instance_cids = */ true);
1158
1159 __ Bind(&equal_may_be_generic);
1160 // Classes are equivalent and neither is a closure class.
1161 // Check if there are no type arguments. In this case we can return true.
1162 // Otherwise fall through into the runtime to handle comparison.
1163 __ LoadClassById(R0, R1);
1164 __ ldr(
1165 R0,
1166 FieldAddress(
1167 R0,
1169 __ CompareImmediate(R0, target::Class::kNoTypeArguments);
1170 __ b(&equal, EQ);
1171
1172 // Compare type arguments, host_type_arguments_field_offset_in_words in R0.
1173 __ ldm(IA, SP, (1 << R1 | 1 << R2));
1174 __ AddImmediate(R1, -kHeapObjectTag);
1175 __ ldr(R1, Address(R1, R0, LSL, target::kWordSizeLog2));
1176 __ AddImmediate(R2, -kHeapObjectTag);
1177 __ ldr(R2, Address(R2, R0, LSL, target::kWordSizeLog2));
1178 __ cmp(R1, Operand(R2));
1179 __ b(normal_ir_body, NE);
1180 // Fall through to equal case if type arguments are equal.
1181
1182 __ Bind(&equal);
1183 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1184 __ Ret();
1185
1186 __ Bind(&not_equal);
1187 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1188 __ Ret();
1189
1190 __ Bind(normal_ir_body);
1191}
1192
1193void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
1194 Label* normal_ir_body) {
1195 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1196 __ ldr(R0, FieldAddress(R0, target::String::hash_offset()));
1197 __ cmp(R0, Operand(0));
1198 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR, NE));
1199 __ Bind(normal_ir_body); // Hash not yet computed.
1200}
1201
1202void AsmIntrinsifier::Type_equality(Assembler* assembler,
1203 Label* normal_ir_body) {
1204 Label equal, not_equal, equiv_cids_may_be_generic, equiv_cids;
1205
1206 __ ldm(IA, SP, (1 << R1 | 1 << R2));
1207 __ cmp(R1, Operand(R2));
1208 __ b(&equal, EQ);
1209
1210 // R1 might not be a Type object, so check that first (R2 should be though,
1211 // since this is a method on the Type class).
1212 __ LoadClassIdMayBeSmi(R0, R1);
1213 __ CompareImmediate(R0, kTypeCid);
1214 __ b(normal_ir_body, NE);
1215
1216 // Check if types are syntactically equal.
1217 __ LoadTypeClassId(R3, R1);
1218 __ LoadTypeClassId(R4, R2);
1219 // We are not testing instance cids, but type class cids of Type instances.
1220 EquivalentClassIds(assembler, normal_ir_body, &equiv_cids_may_be_generic,
1221 &equiv_cids, &not_equal, R3, R4, R0,
1222 /* testing_instance_cids = */ false);
1223
1224 __ Bind(&equiv_cids_may_be_generic);
1225 // Compare type arguments in Type instances.
1226 __ ldr(R3, FieldAddress(R1, target::Type::arguments_offset()));
1227 __ ldr(R4, FieldAddress(R2, target::Type::arguments_offset()));
1228 __ cmp(R3, Operand(R4));
1229 __ b(normal_ir_body, NE);
1230 // Fall through to check nullability if type arguments are equal.
1231
1232 // Check nullability.
1233 __ Bind(&equiv_cids);
1234 __ LoadAbstractTypeNullability(R1, R1);
1235 __ LoadAbstractTypeNullability(R2, R2);
1236 __ cmp(R1, Operand(R2));
1237 __ b(&not_equal, NE);
1238 // Fall through to equal case if nullability is equal.
1239
1240 __ Bind(&equal);
1241 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1242 __ Ret();
1243
1244 __ Bind(&not_equal);
1245 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1246 __ Ret();
1247
1248 __ Bind(normal_ir_body);
1249}
1250
1251void AsmIntrinsifier::AbstractType_getHashCode(Assembler* assembler,
1252 Label* normal_ir_body) {
1253 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1254 __ ldr(R0, FieldAddress(R0, target::AbstractType::hash_offset()));
1255 __ cmp(R0, Operand(0));
1256 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR, NE));
1257 __ Bind(normal_ir_body); // Hash not yet computed.
1258}
1259
1260void AsmIntrinsifier::AbstractType_equality(Assembler* assembler,
1261 Label* normal_ir_body) {
1262 __ ldm(IA, SP, (1 << R1 | 1 << R2));
1263 __ cmp(R1, Operand(R2));
1264 __ b(normal_ir_body, NE);
1265
1266 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1267 __ Ret();
1268
1269 __ Bind(normal_ir_body);
1270}
1271
1272void GenerateSubstringMatchesSpecialization(Assembler* assembler,
1273 intptr_t receiver_cid,
1274 intptr_t other_cid,
1275 Label* return_true,
1276 Label* return_false) {
1277 __ SmiUntag(R1);
1278 __ ldr(R8, FieldAddress(R0, target::String::length_offset())); // this.length
1279 __ SmiUntag(R8);
1280 __ ldr(R9,
1281 FieldAddress(R2, target::String::length_offset())); // other.length
1282 __ SmiUntag(R9);
1283
1284 // if (other.length == 0) return true;
1285 __ cmp(R9, Operand(0));
1286 __ b(return_true, EQ);
1287
1288 // if (start < 0) return false;
1289 __ cmp(R1, Operand(0));
1290 __ b(return_false, LT);
1291
1292 // if (start + other.length > this.length) return false;
1293 __ add(R3, R1, Operand(R9));
1294 __ cmp(R3, Operand(R8));
1295 __ b(return_false, GT);
1296
1297 if (receiver_cid == kOneByteStringCid) {
1299 __ add(R0, R0, Operand(R1));
1300 } else {
1301 ASSERT(receiver_cid == kTwoByteStringCid);
1303 __ add(R0, R0, Operand(R1));
1304 __ add(R0, R0, Operand(R1));
1305 }
1306 if (other_cid == kOneByteStringCid) {
1308 } else {
1309 ASSERT(other_cid == kTwoByteStringCid);
1311 }
1312
1313 // i = 0
1314 __ LoadImmediate(R3, 0);
1315
1316 // do
1317 Label loop;
1318 __ Bind(&loop);
1319
1320 if (receiver_cid == kOneByteStringCid) {
1321 __ ldrb(R4, Address(R0, 0)); // this.codeUnitAt(i + start)
1322 } else {
1323 __ ldrh(R4, Address(R0, 0)); // this.codeUnitAt(i + start)
1324 }
1325 if (other_cid == kOneByteStringCid) {
1326 __ ldrb(TMP, Address(R2, 0)); // other.codeUnitAt(i)
1327 } else {
1328 __ ldrh(TMP, Address(R2, 0)); // other.codeUnitAt(i)
1329 }
1330 __ cmp(R4, Operand(TMP));
1331 __ b(return_false, NE);
1332
1333 // i++, while (i < len)
1334 __ AddImmediate(R3, 1);
1335 __ AddImmediate(R0, receiver_cid == kOneByteStringCid ? 1 : 2);
1336 __ AddImmediate(R2, other_cid == kOneByteStringCid ? 1 : 2);
1337 __ cmp(R3, Operand(R9));
1338 __ b(&loop, LT);
1339
1340 __ b(return_true);
1341}
1342
1343// bool _substringMatches(int start, String other)
1344// This intrinsic handles a OneByteString or TwoByteString receiver with a
1345// OneByteString other.
1346void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
1347 Label* normal_ir_body) {
1348 Label return_true, return_false, try_two_byte;
1349 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // this
1350 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // start
1351 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // other
1352 __ Push(R4); // Make ARGS_DESC_REG available.
1353
1354 __ tst(R1, Operand(kSmiTagMask));
1355 __ b(normal_ir_body, NE); // 'start' is not a Smi.
1356
1357 __ CompareClassId(R2, kOneByteStringCid, R3);
1358 __ b(normal_ir_body, NE);
1359
1360 __ CompareClassId(R0, kOneByteStringCid, R3);
1361 __ b(&try_two_byte, NE);
1362
1363 GenerateSubstringMatchesSpecialization(assembler, kOneByteStringCid,
1364 kOneByteStringCid, &return_true,
1365 &return_false);
1366
1367 __ Bind(&try_two_byte);
1368 __ CompareClassId(R0, kTwoByteStringCid, R3);
1369 __ b(normal_ir_body, NE);
1370
1371 GenerateSubstringMatchesSpecialization(assembler, kTwoByteStringCid,
1372 kOneByteStringCid, &return_true,
1373 &return_false);
1374
1375 __ Bind(&return_true);
1376 __ Pop(R4);
1377 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1378 __ Ret();
1379
1380 __ Bind(&return_false);
1381 __ Pop(R4);
1382 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1383 __ Ret();
1384
1385 __ Bind(normal_ir_body);
1386 __ Pop(R4);
1387}
1388
1389void AsmIntrinsifier::Object_getHash(Assembler* assembler,
1390 Label* normal_ir_body) {
1391 UNREACHABLE();
1392}
1393
1394void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
1395 Label* normal_ir_body) {
1396 Label try_two_byte_string;
1397
1398 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Index.
1399 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // String.
1400 __ tst(R1, Operand(kSmiTagMask));
1401 __ b(normal_ir_body, NE); // Index is not a Smi.
1402 // Range check.
1403 __ ldr(R2, FieldAddress(R0, target::String::length_offset()));
1404 __ cmp(R1, Operand(R2));
1405 __ b(normal_ir_body, CS); // Runtime throws exception.
1406
1407 __ CompareClassId(R0, kOneByteStringCid, R3);
1408 __ b(&try_two_byte_string, NE);
1409 __ SmiUntag(R1);
1411 __ ldrb(R1, Address(R0, R1));
1413 __ b(normal_ir_body, GE);
1415 __ AddImmediate(
1417 __ ldr(R0, Address(R0, R1, LSL, 2));
1418 __ Ret();
1419
1420 __ Bind(&try_two_byte_string);
1421 __ CompareClassId(R0, kTwoByteStringCid, R3);
1422 __ b(normal_ir_body, NE);
1423 ASSERT(kSmiTagShift == 1);
1425 __ ldrh(R1, Address(R0, R1));
1427 __ b(normal_ir_body, GE);
1429 __ AddImmediate(
1431 __ ldr(R0, Address(R0, R1, LSL, 2));
1432 __ Ret();
1433
1434 __ Bind(normal_ir_body);
1435}
1436
1437void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
1438 Label* normal_ir_body) {
1439 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1440 __ ldr(R0, FieldAddress(R0, target::String::length_offset()));
1441 __ cmp(R0, Operand(target::ToRawSmi(0)));
1442 __ LoadObject(R0, CastHandle<Object>(TrueObject()), EQ);
1443 __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
1444 __ Ret();
1445}
1446
1447void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
1448 Label* normal_ir_body) {
1449 __ ldr(R1, Address(SP, 0 * target::kWordSize));
1450 __ ldr(R0, FieldAddress(R1, target::String::hash_offset()));
1451 __ cmp(R0, Operand(0));
1452 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR, NE)); // Return if already computed.
1453 __ ldr(R2, FieldAddress(R1, target::String::length_offset()));
1454 __ SmiUntag(R2);
1455 __ mov(R3, Operand(0));
1456 __ AddImmediate(R8, R1,
1458
1459 // R1: Instance of OneByteString.
1460 // R2: String length, untagged integer.
1461 // R3: Loop counter, untagged integer.
1462 // R8: String data.
1463 // R0: Hash code, untagged integer.
1464
1465 Label loop, done;
1466 __ Bind(&loop);
1467 __ cmp(R3, Operand(R2));
1468 __ b(&done, EQ);
1469 // Add to hash code: (hash_ is uint32)
1470 // Get one characters (ch).
1471 __ ldrb(TMP, Address(R8, 0));
1472 // TMP: ch.
1473 __ add(R3, R3, Operand(1));
1474 __ add(R8, R8, Operand(1));
1476 __ b(&loop);
1477
1478 __ Bind(&done);
1479 // Finalize. Allow a zero result to combine checks from empty string branch.
1480 __ FinalizeHashForSize(target::String::kHashBits, R0);
1481 __ SmiTag(R0);
1482 __ StoreIntoSmiField(FieldAddress(R1, target::String::hash_offset()), R0);
1483 __ Ret();
1484}
1485
1486// Allocates a _OneByteString or _TwoByteString. The content is not initialized.
1487// 'length-reg' (R2) contains the desired length as a _Smi or _Mint.
1488// Returns new string as tagged pointer in R0.
1489static void TryAllocateString(Assembler* assembler,
1490 classid_t cid,
1491 intptr_t max_elements,
1492 Label* ok,
1493 Label* failure) {
1494 ASSERT(cid == kOneByteStringCid || cid == kTwoByteStringCid);
1495 const Register length_reg = R2;
1496 // _Mint length: call to runtime to produce error.
1497 __ BranchIfNotSmi(length_reg, failure);
1498 // Negative length: call to runtime to produce error.
1499 // Too big: call to runtime to allocate old.
1500 __ CompareImmediate(length_reg, target::ToRawSmi(max_elements));
1501 __ b(failure, HI);
1502
1503 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, failure, R0));
1504 __ mov(R8, Operand(length_reg)); // Save the length register.
1505 if (cid == kOneByteStringCid) {
1506 __ SmiUntag(length_reg);
1507 } else {
1508 // Untag length and multiply by element size -> no-op.
1509 }
1510 const intptr_t fixed_size_plus_alignment_padding =
1513 __ AddImmediate(length_reg, fixed_size_plus_alignment_padding);
1514 __ bic(length_reg, length_reg,
1516
1517 __ ldr(R0, Address(THR, target::Thread::top_offset()));
1518
1519 // length_reg: allocation size.
1520 __ adds(R1, R0, Operand(length_reg));
1521 __ b(failure, CS); // Fail on unsigned overflow.
1522
1523 // Check if the allocation fits into the remaining space.
1524 // R0: potential new object start.
1525 // R1: potential next object start.
1526 // R2: allocation size.
1527 __ ldr(TMP, Address(THR, target::Thread::end_offset()));
1528 __ cmp(R1, Operand(TMP));
1529 __ b(failure, CS);
1530 __ CheckAllocationCanary(R0);
1531
1532 // Successfully allocated the object(s), now update top to point to
1533 // next object start and initialize the object.
1534 __ str(R1, Address(THR, target::Thread::top_offset()));
1535 __ AddImmediate(R0, kHeapObjectTag);
1536 // Clear last double word to ensure string comparison doesn't need to
1537 // specially handle remainder of strings with lengths not factors of double
1538 // offsets.
1539 __ LoadImmediate(TMP, 0);
1540 __ str(TMP, Address(R1, -1 * target::kWordSize));
1541 __ str(TMP, Address(R1, -2 * target::kWordSize));
1542
1543 // Initialize the tags.
1544 // R0: new object start as a tagged pointer.
1545 // R1: new object end address.
1546 // R2: allocation size.
1547 {
1548 const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
1550
1552 __ mov(R3, Operand(R2, LSL, shift), LS);
1553 __ mov(R3, Operand(0), HI);
1554
1555 // Get the class index and insert it into the tags.
1556 // R3: size and bit tags.
1557 const uword tags =
1558 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
1559 __ LoadImmediate(TMP, tags);
1560 __ orr(R3, R3, Operand(TMP));
1561 __ str(R3, FieldAddress(R0, target::Object::tags_offset())); // Store tags.
1562 }
1563
1564 // Set the length field using the saved length (R8).
1565 __ StoreIntoObjectNoBarrier(
1566 R0, FieldAddress(R0, target::String::length_offset()), R8);
1567 // Clear hash.
1568 __ LoadImmediate(TMP, 0);
1569 __ StoreIntoObjectNoBarrier(
1570 R0, FieldAddress(R0, target::String::hash_offset()), TMP);
1571
1572 __ b(ok);
1573}
1574
1575// Arg0: OneByteString (receiver).
1576// Arg1: Start index as Smi.
1577// Arg2: End index as Smi.
1578// The indexes must be valid.
1579void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
1580 Label* normal_ir_body) {
1581 const intptr_t kStringOffset = 2 * target::kWordSize;
1582 const intptr_t kStartIndexOffset = 1 * target::kWordSize;
1583 const intptr_t kEndIndexOffset = 0 * target::kWordSize;
1584 Label ok;
1585
1586 __ ldr(R2, Address(SP, kEndIndexOffset));
1587 __ ldr(TMP, Address(SP, kStartIndexOffset));
1588 __ orr(R3, R2, Operand(TMP));
1589 __ tst(R3, Operand(kSmiTagMask));
1590 __ b(normal_ir_body, NE); // 'start', 'end' not Smi.
1591
1592 __ sub(R2, R2, Operand(TMP));
1593 TryAllocateString(assembler, kOneByteStringCid,
1595 normal_ir_body);
1596 __ Bind(&ok);
1597 // R0: new string as tagged pointer.
1598 // Copy string.
1599 __ ldr(R3, Address(SP, kStringOffset));
1600 __ ldr(R1, Address(SP, kStartIndexOffset));
1601 __ SmiUntag(R1);
1602 __ add(R3, R3, Operand(R1));
1603 // Calculate start address and untag (- 1).
1604 __ AddImmediate(R3, target::OneByteString::data_offset() - 1);
1605
1606 // R3: Start address to copy from (untagged).
1607 // R1: Untagged start index.
1608 __ ldr(R2, Address(SP, kEndIndexOffset));
1609 __ SmiUntag(R2);
1610 __ sub(R2, R2, Operand(R1));
1611
1612 // R3: Start address to copy from (untagged).
1613 // R2: Untagged number of bytes to copy.
1614 // R0: Tagged result string.
1615 // R8: Pointer into R3.
1616 // R1: Pointer into R0.
1617 // TMP: Scratch register.
1618 Label loop, done;
1619 __ cmp(R2, Operand(0));
1620 __ b(&done, LE);
1621 __ mov(R8, Operand(R3));
1622 __ mov(R1, Operand(R0));
1623 __ Bind(&loop);
1624 __ ldrb(TMP, Address(R8, 1, Address::PostIndex));
1625 __ sub(R2, R2, Operand(1));
1626 __ cmp(R2, Operand(0));
1627 __ strb(TMP, FieldAddress(R1, target::OneByteString::data_offset()));
1628 __ add(R1, R1, Operand(1));
1629 __ b(&loop, GT);
1630
1631 __ Bind(&done);
1632 __ Ret();
1633 __ Bind(normal_ir_body);
1634}
1635
1636void AsmIntrinsifier::WriteIntoOneByteString(Assembler* assembler,
1637 Label* normal_ir_body) {
1638 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Value.
1639 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // Index.
1640 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // OneByteString.
1641 __ SmiUntag(R1);
1642 __ SmiUntag(R2);
1643 __ AddImmediate(R3, R0,
1645 __ strb(R2, Address(R3, R1));
1646 __ Ret();
1647}
1648
1649void AsmIntrinsifier::WriteIntoTwoByteString(Assembler* assembler,
1650 Label* normal_ir_body) {
1651 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Value.
1652 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // Index.
1653 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // TwoByteString.
1654 // Untag index and multiply by element size -> no-op.
1655 __ SmiUntag(R2);
1656 __ AddImmediate(R3, R0,
1658 __ strh(R2, Address(R3, R1));
1659 __ Ret();
1660}
1661
1662void AsmIntrinsifier::AllocateOneByteString(Assembler* assembler,
1663 Label* normal_ir_body) {
1664 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Length.
1665 Label ok;
1666 TryAllocateString(assembler, kOneByteStringCid,
1668 normal_ir_body);
1669
1670 __ Bind(&ok);
1671 __ Ret();
1672
1673 __ Bind(normal_ir_body);
1674}
1675
1676void AsmIntrinsifier::AllocateTwoByteString(Assembler* assembler,
1677 Label* normal_ir_body) {
1678 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Length.
1679 Label ok;
1680 TryAllocateString(assembler, kTwoByteStringCid,
1682 normal_ir_body);
1683
1684 __ Bind(&ok);
1685 __ Ret();
1686
1687 __ Bind(normal_ir_body);
1688}
1689
1690void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
1691 Label* normal_ir_body) {
1692 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // This.
1693 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Other.
1694
1695 StringEquality(assembler, R0, R1, R2, R3, R0, normal_ir_body,
1696 kOneByteStringCid);
1697}
1698
1699void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
1700 Label* normal_ir_body) {
1701 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // This.
1702 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Other.
1703
1704 StringEquality(assembler, R0, R1, R2, R3, R0, normal_ir_body,
1705 kTwoByteStringCid);
1706}
1707
1708void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
1709 Label* normal_ir_body,
1710 bool sticky) {
1711 if (FLAG_interpret_irregexp) return;
1712
1713 const intptr_t kRegExpParamOffset = 2 * target::kWordSize;
1714 const intptr_t kStringParamOffset = 1 * target::kWordSize;
1715 // start_index smi is located at offset 0.
1716
1717 // Incoming registers:
1718 // R0: Function. (Will be reloaded with the specialized matcher function.)
1719 // R4: Arguments descriptor. (Will be preserved.)
1720 // R9: Unknown. (Must be GC safe on tail call.)
1721
1722 // Load the specialized function pointer into R0. Leverage the fact the
1723 // string CIDs as well as stored function pointers are in sequence.
1724 __ ldr(R2, Address(SP, kRegExpParamOffset));
1725 __ ldr(R1, Address(SP, kStringParamOffset));
1726 __ LoadClassId(R1, R1);
1727 __ AddImmediate(R1, -kOneByteStringCid);
1728 __ add(R1, R2, Operand(R1, LSL, target::kWordSizeLog2));
1730 kOneByteStringCid, sticky)));
1731
1732 // Registers are now set up for the lazy compile stub. It expects the function
1733 // in R0, the argument descriptor in R4, and IC-Data in R9.
1734 __ eor(R9, R9, Operand(R9));
1735
1736 // Tail-call the function.
1738 __ Branch(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
1739}
1740
1741void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
1742 Label* normal_ir_body) {
1743 __ LoadIsolate(R0);
1744 __ ldr(R0, Address(R0, target::Isolate::default_tag_offset()));
1745 __ Ret();
1746}
1747
1748void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
1749 Label* normal_ir_body) {
1750 __ LoadIsolate(R0);
1751 __ ldr(R0, Address(R0, target::Isolate::current_tag_offset()));
1752 __ Ret();
1753}
1754
1755void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
1756 Label* normal_ir_body) {
1757#if !defined(SUPPORT_TIMELINE)
1758 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1759 __ Ret();
1760#else
1761 // Load TimelineStream*.
1762 __ ldr(R0, Address(THR, target::Thread::dart_stream_offset()));
1763 // Load uintptr_t from TimelineStream*.
1765 __ cmp(R0, Operand(0));
1766 __ LoadObject(R0, CastHandle<Object>(TrueObject()), NE);
1767 __ LoadObject(R0, CastHandle<Object>(FalseObject()), EQ);
1768 __ Ret();
1769#endif
1770}
1771
1772void AsmIntrinsifier::Timeline_getNextTaskId(Assembler* assembler,
1773 Label* normal_ir_body) {
1774#if !defined(SUPPORT_TIMELINE)
1775 __ LoadImmediate(R0, target::ToRawSmi(0));
1776 __ Ret();
1777#else
1778 __ ldr(R1, Address(THR, target::Thread::next_task_id_offset()));
1779 __ ldr(R2, Address(THR, target::Thread::next_task_id_offset() + 4));
1780 __ SmiTag(R0, R1); // Ignore loss of precision.
1781 __ adds(R1, R1, Operand(1));
1782 __ adcs(R2, R2, Operand(0));
1783 __ str(R1, Address(THR, target::Thread::next_task_id_offset()));
1784 __ str(R2, Address(THR, target::Thread::next_task_id_offset() + 4));
1785 __ Ret();
1786#endif
1787}
1788
1789#undef __
1790
1791} // namespace compiler
1792} // namespace dart
1793
1794#endif // defined(TARGET_ARCH_ARM)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
static bool equal(const SkBitmap &a, const SkBitmap &b)
Definition: ImageTest.cpp:1395
static bool ok(int result)
#define __
#define UNREACHABLE()
Definition: assert.h:248
static word type_arguments_offset()
static word declaration_type_offset()
static word host_type_arguments_field_offset_in_words_offset()
static const word kNoTypeArguments
Definition: runtime_api.h:486
static word num_type_arguments_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word function_offset(classid_t cid, bool sticky)
static const word kHashBits
Definition: runtime_api.h:782
static const word kNullCharCodeSymbolOffset
Definition: runtime_api.h:1533
static const word kNumberOfOneCharCodeSymbols
Definition: runtime_api.h:1532
static word predefined_symbols_address_offset()
#define ASSERT(E)
static bool b
uint32_t * target
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
Definition: runtime_api.cc:360
word ToRawSmi(const dart::Object &a)
Definition: runtime_api.cc:960
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
constexpr intptr_t kSmiBits
Definition: runtime_api.h:301
const Bool & TrueObject()
Definition: runtime_api.cc:157
const Bool & FalseObject()
Definition: runtime_api.cc:161
const Object & NullObject()
Definition: runtime_api.cc:149
const Class & DoubleClass()
Definition: runtime_api.cc:195
const Class & MintClass()
Definition: runtime_api.cc:190
Definition: dart_vm.cc:33
const Register THR
static bool CompareIntegers(Token::Kind kind, const Integer &left, const Integer &right)
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
Definition: hash.h:12
int32_t classid_t
Definition: globals.h:524
@ kNumPredefinedCids
Definition: class_id.h:257
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
uintptr_t uword
Definition: globals.h:501
const Register CODE_REG
const Register TMP
const intptr_t cid
const Register FUNCTION_REG
const intptr_t kBytesPerBigIntDigit
Definition: globals.h:54
NOT_IN_PRODUCT(LibraryPtr ReloadTestScript(const char *script))
@ kSmiTagSize
@ kHeapObjectTag
@ kSmiTagMask
@ kSmiTagShift
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment