Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
asm_intrinsifier_arm.cc
Go to the documentation of this file.
1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6#if defined(TARGET_ARCH_ARM)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
10#include "vm/class_id.h"
13
14namespace dart {
15namespace compiler {
16
17// When entering intrinsics code:
18// PP: Caller's ObjectPool in JIT / global ObjectPool in AOT
19// CODE_REG: Callee's Code in JIT / not passed in AOT
20// R4: Arguments descriptor
21// LR: Return address
22// The R4 and CODE_REG registers can be destroyed only if there is no slow-path,
23// i.e. if the intrinsified method always executes a return.
24// The FP register should not be modified, because it is used by the profiler.
25// The PP and THR registers (see constants_arm.h) must be preserved.
26
27#define __ assembler->
28
29// Loads args from stack into R0 and R1
30// Tests if they are smis, jumps to label not_smi if not.
31static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
32 __ ldr(R0, Address(SP, +0 * target::kWordSize));
33 __ ldr(R1, Address(SP, +1 * target::kWordSize));
34 __ orr(TMP, R0, Operand(R1));
35 __ tst(TMP, Operand(kSmiTagMask));
36 __ b(not_smi, NE);
37}
38
39void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
40 ASSERT(kSmiTagShift == 1);
41 ASSERT(kSmiTag == 0);
42 TestBothArgumentsSmis(assembler, normal_ir_body);
43 __ CompareImmediate(R0, target::ToRawSmi(target::kSmiBits));
44 __ b(normal_ir_body, HI);
45
46 __ SmiUntag(R0);
47
48 // Check for overflow by shifting left and shifting back arithmetically.
49 // If the result is different from the original, there was overflow.
50 __ mov(IP, Operand(R1, LSL, R0));
51 __ cmp(R1, Operand(IP, ASR, R0));
52
53 // No overflow, result in R0.
54 __ mov(R0, Operand(R1, LSL, R0), EQ);
55 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR, EQ));
56 // Arguments are Smi but the shift produced an overflow to Mint.
57 __ CompareImmediate(R1, 0);
58 __ b(normal_ir_body, LT);
59 __ SmiUntag(R1);
60
61 // Pull off high bits that will be shifted off of R1 by making a mask
62 // ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back.
63 // high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0)
64 // lo bits = R1 << R0
65 __ LoadImmediate(R8, 1);
66 __ mov(R8, Operand(R8, LSL, R0)); // R8 <- 1 << R0
67 __ sub(R8, R8, Operand(1)); // R8 <- R8 - 1
68 __ rsb(R3, R0, Operand(32)); // R3 <- 32 - R0
69 __ mov(R8, Operand(R8, LSL, R3)); // R8 <- R8 << R3
70 __ and_(R8, R1, Operand(R8)); // R8 <- R8 & R1
71 __ mov(R8, Operand(R8, LSR, R3)); // R8 <- R8 >> R3
72 // Now R8 has the bits that fall off of R1 on a left shift.
73 __ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits.
74
75 const Class& mint_class = MintClass();
76 __ TryAllocate(mint_class, normal_ir_body, Assembler::kFarJump, R0, R2);
77
78 __ str(R1, FieldAddress(R0, target::Mint::value_offset()));
79 __ str(R8,
80 FieldAddress(R0, target::Mint::value_offset() + target::kWordSize));
81 __ Ret();
82 __ Bind(normal_ir_body);
83}
84
85static void Get64SmiOrMint(Assembler* assembler,
86 Register res_hi,
87 Register res_lo,
88 Register reg,
89 Label* not_smi_or_mint) {
90 Label not_smi, done;
91 __ tst(reg, Operand(kSmiTagMask));
92 __ b(&not_smi, NE);
93 __ SmiUntag(reg);
94
95 // Sign extend to 64 bit
96 __ mov(res_lo, Operand(reg));
97 __ mov(res_hi, Operand(res_lo, ASR, 31));
98 __ b(&done);
99
100 __ Bind(&not_smi);
101 __ CompareClassId(reg, kMintCid, res_lo);
102 __ b(not_smi_or_mint, NE);
103
104 // Mint.
105 __ ldr(res_lo, FieldAddress(reg, target::Mint::value_offset()));
106 __ ldr(res_hi,
107 FieldAddress(reg, target::Mint::value_offset() + target::kWordSize));
108 __ Bind(&done);
109}
110
111static void CompareIntegers(Assembler* assembler,
112 Label* normal_ir_body,
113 Condition true_condition) {
114 Label try_mint_smi, is_true, is_false, drop_two_fall_through, fall_through;
115 TestBothArgumentsSmis(assembler, &try_mint_smi);
116 // R0 contains the right argument. R1 contains left argument
117
118 __ cmp(R1, Operand(R0));
119 __ b(&is_true, true_condition);
120 __ Bind(&is_false);
121 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
122 __ Ret();
123 __ Bind(&is_true);
124 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
125 __ Ret();
126
127 // 64-bit comparison
128 Condition hi_true_cond, hi_false_cond, lo_false_cond;
129 switch (true_condition) {
130 case LT:
131 case LE:
132 hi_true_cond = LT;
133 hi_false_cond = GT;
134 lo_false_cond = (true_condition == LT) ? CS : HI;
135 break;
136 case GT:
137 case GE:
138 hi_true_cond = GT;
139 hi_false_cond = LT;
140 lo_false_cond = (true_condition == GT) ? LS : CC;
141 break;
142 default:
143 UNREACHABLE();
144 hi_true_cond = hi_false_cond = lo_false_cond = VS;
145 }
146
147 __ Bind(&try_mint_smi);
148 // Get left as 64 bit integer.
149 Get64SmiOrMint(assembler, R3, R2, R1, normal_ir_body);
150 // Get right as 64 bit integer.
151 Get64SmiOrMint(assembler, R1, R8, R0, normal_ir_body);
152 // R3: left high.
153 // R2: left low.
154 // R1: right high.
155 // R8: right low.
156
157 __ cmp(R3, Operand(R1)); // Compare left hi, right high.
158 __ b(&is_false, hi_false_cond);
159 __ b(&is_true, hi_true_cond);
160 __ cmp(R2, Operand(R8)); // Compare left lo, right lo.
161 __ b(&is_false, lo_false_cond);
162 // Else is true.
163 __ b(&is_true);
164
165 __ Bind(normal_ir_body);
166}
167
168void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
169 Label* normal_ir_body) {
170 CompareIntegers(assembler, normal_ir_body, LT);
171}
172
173void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
174 Label* normal_ir_body) {
175 CompareIntegers(assembler, normal_ir_body, GT);
176}
177
178void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
179 Label* normal_ir_body) {
180 CompareIntegers(assembler, normal_ir_body, LE);
181}
182
183void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
184 Label* normal_ir_body) {
185 CompareIntegers(assembler, normal_ir_body, GE);
186}
187
188// This is called for Smi and Mint receivers. The right argument
189// can be Smi, Mint or double.
190void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
191 Label* normal_ir_body) {
192 Label true_label, check_for_mint;
193 // For integer receiver '===' check first.
194 __ ldr(R0, Address(SP, 0 * target::kWordSize));
195 __ ldr(R1, Address(SP, 1 * target::kWordSize));
196 __ cmp(R0, Operand(R1));
197 __ b(&true_label, EQ);
198
199 __ orr(R2, R0, Operand(R1));
200 __ tst(R2, Operand(kSmiTagMask));
201 __ b(&check_for_mint, NE); // If R0 or R1 is not a smi do Mint checks.
202
203 // Both arguments are smi, '===' is good enough.
204 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
205 __ Ret();
206 __ Bind(&true_label);
207 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
208 __ Ret();
209
210 // At least one of the arguments was not Smi.
211 Label receiver_not_smi;
212 __ Bind(&check_for_mint);
213
214 __ tst(R1, Operand(kSmiTagMask)); // Check receiver.
215 __ b(&receiver_not_smi, NE);
216
217 // Left (receiver) is Smi, return false if right is not Double.
218 // Note that an instance of Mint never contains a value that can be
219 // represented by Smi.
220
221 __ CompareClassId(R0, kDoubleCid, R2);
222 __ b(normal_ir_body, EQ);
223 __ LoadObject(R0,
224 CastHandle<Object>(FalseObject())); // Smi == Mint -> false.
225 __ Ret();
226
227 __ Bind(&receiver_not_smi);
228 // R1:: receiver.
229
230 __ CompareClassId(R1, kMintCid, R2);
231 __ b(normal_ir_body, NE);
232 // Receiver is Mint, return false if right is Smi.
233 __ tst(R0, Operand(kSmiTagMask));
234 __ LoadObject(R0, CastHandle<Object>(FalseObject()), EQ);
235 READS_RETURN_ADDRESS_FROM_LR(
236 __ bx(LR, EQ)); // TODO(srdjan): Implement Mint == Mint comparison.
237
238 __ Bind(normal_ir_body);
239}
240
241void AsmIntrinsifier::Integer_equal(Assembler* assembler,
242 Label* normal_ir_body) {
243 Integer_equalToInteger(assembler, normal_ir_body);
244}
245
246void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
247 Label* normal_ir_body) {
248 __ ldr(R0, Address(SP, 0 * target::kWordSize));
249 __ SmiUntag(R0);
250 // XOR with sign bit to complement bits if value is negative.
251 __ eor(R0, R0, Operand(R0, ASR, 31));
252 __ clz(R0, R0);
253 __ rsb(R0, R0, Operand(32));
254 __ SmiTag(R0);
255 __ Ret();
256}
257
258void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
259 // static void _lsh(Uint32List x_digits, int x_used, int n,
260 // Uint32List r_digits)
261
262 // R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi.
263 __ ldrd(R0, R1, SP, 2 * target::kWordSize);
264 // R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0.
265 __ ldrd(R2, R3, SP, 0 * target::kWordSize);
266 __ SmiUntag(R3);
267 // R4 = n ~/ _DIGIT_BITS
268 __ Asr(R4, R3, Operand(5));
269 // R8 = &x_digits[0]
270 __ add(R8, R1, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
271 // R6 = &r_digits[1]
272 __ add(R6, R2,
273 Operand(target::TypedData::payload_offset() - kHeapObjectTag +
275 // R2 = &x_digits[x_used]
276 __ add(R2, R8, Operand(R0, LSL, 1));
277 // R6 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1]
278 __ add(R4, R4, Operand(R0, ASR, 1));
279 __ add(R6, R6, Operand(R4, LSL, 2));
280 // R1 = n % _DIGIT_BITS
281 __ and_(R1, R3, Operand(31));
282 // R0 = 32 - R1
283 __ rsb(R0, R1, Operand(32));
284 __ mov(R9, Operand(0));
285 Label loop;
286 __ Bind(&loop);
288 __ orr(R9, R9, Operand(R4, LSR, R0));
290 __ mov(R9, Operand(R4, LSL, R1));
291 __ teq(R2, Operand(R8));
292 __ b(&loop, NE);
294 __ LoadObject(R0, NullObject());
295 __ Ret();
296}
297
298void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
299 // static void _lsh(Uint32List x_digits, int x_used, int n,
300 // Uint32List r_digits)
301
302 // R0 = x_used, R1 = x_digits, x_used > 0, x_used is Smi.
303 __ ldrd(R0, R1, SP, 2 * target::kWordSize);
304 // R2 = r_digits, R3 = n, n is Smi, n % _DIGIT_BITS != 0.
305 __ ldrd(R2, R3, SP, 0 * target::kWordSize);
306 __ SmiUntag(R3);
307 // R4 = n ~/ _DIGIT_BITS
308 __ Asr(R4, R3, Operand(5));
309 // R6 = &r_digits[0]
310 __ add(R6, R2, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
311 // R2 = &x_digits[n ~/ _DIGIT_BITS]
312 __ add(R2, R1, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
313 __ add(R2, R2, Operand(R4, LSL, 2));
314 // R8 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1]
315 __ add(R4, R4, Operand(1));
316 __ rsb(R4, R4, Operand(R0, ASR, 1));
317 __ add(R8, R6, Operand(R4, LSL, 2));
318 // R1 = n % _DIGIT_BITS
319 __ and_(R1, R3, Operand(31));
320 // R0 = 32 - R1
321 __ rsb(R0, R1, Operand(32));
322 // R9 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS)
324 __ mov(R9, Operand(R9, LSR, R1));
325 Label loop_entry;
326 __ b(&loop_entry);
327 Label loop;
328 __ Bind(&loop);
330 __ orr(R9, R9, Operand(R4, LSL, R0));
332 __ mov(R9, Operand(R4, LSR, R1));
333 __ Bind(&loop_entry);
334 __ teq(R6, Operand(R8));
335 __ b(&loop, NE);
336 __ str(R9, Address(R6, 0));
337 __ LoadObject(R0, NullObject());
338 __ Ret();
339}
340
341void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
342 Label* normal_ir_body) {
343 // static void _absAdd(Uint32List digits, int used,
344 // Uint32List a_digits, int a_used,
345 // Uint32List r_digits)
346
347 // R0 = used, R1 = digits
348 __ ldrd(R0, R1, SP, 3 * target::kWordSize);
349 // R1 = &digits[0]
350 __ add(R1, R1, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
351
352 // R2 = a_used, R3 = a_digits
353 __ ldrd(R2, R3, SP, 1 * target::kWordSize);
354 // R3 = &a_digits[0]
355 __ add(R3, R3, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
356
357 // R8 = r_digits
358 __ ldr(R8, Address(SP, 0 * target::kWordSize));
359 // R8 = &r_digits[0]
360 __ add(R8, R8, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
361
362 // R2 = &digits[a_used >> 1], a_used is Smi.
363 __ add(R2, R1, Operand(R2, LSL, 1));
364
365 // R6 = &digits[used >> 1], used is Smi.
366 __ add(R6, R1, Operand(R0, LSL, 1));
367
368 __ adds(R4, R4, Operand(0)); // carry flag = 0
369 Label add_loop;
370 __ Bind(&add_loop);
371 // Loop a_used times, a_used > 0.
374 __ adcs(R4, R4, Operand(R9));
375 __ teq(R1, Operand(R2)); // Does not affect carry flag.
377 __ b(&add_loop, NE);
378
379 Label last_carry;
380 __ teq(R1, Operand(R6)); // Does not affect carry flag.
381 __ b(&last_carry, EQ); // If used - a_used == 0.
382
383 Label carry_loop;
384 __ Bind(&carry_loop);
385 // Loop used - a_used times, used - a_used > 0.
387 __ adcs(R4, R4, Operand(0));
388 __ teq(R1, Operand(R6)); // Does not affect carry flag.
390 __ b(&carry_loop, NE);
391
392 __ Bind(&last_carry);
393 __ mov(R4, Operand(0));
394 __ adc(R4, R4, Operand(0));
395 __ str(R4, Address(R8, 0));
396
397 __ LoadObject(R0, NullObject());
398 __ Ret();
399}
400
401void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
402 Label* normal_ir_body) {
403 // static void _absSub(Uint32List digits, int used,
404 // Uint32List a_digits, int a_used,
405 // Uint32List r_digits)
406
407 // R0 = used, R1 = digits
408 __ ldrd(R0, R1, SP, 3 * target::kWordSize);
409 // R1 = &digits[0]
410 __ add(R1, R1, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
411
412 // R2 = a_used, R3 = a_digits
413 __ ldrd(R2, R3, SP, 1 * target::kWordSize);
414 // R3 = &a_digits[0]
415 __ add(R3, R3, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
416
417 // R8 = r_digits
418 __ ldr(R8, Address(SP, 0 * target::kWordSize));
419 // R8 = &r_digits[0]
420 __ add(R8, R8, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
421
422 // R2 = &digits[a_used >> 1], a_used is Smi.
423 __ add(R2, R1, Operand(R2, LSL, 1));
424
425 // R6 = &digits[used >> 1], used is Smi.
426 __ add(R6, R1, Operand(R0, LSL, 1));
427
428 __ subs(R4, R4, Operand(0)); // carry flag = 1
429 Label sub_loop;
430 __ Bind(&sub_loop);
431 // Loop a_used times, a_used > 0.
434 __ sbcs(R4, R4, Operand(R9));
435 __ teq(R1, Operand(R2)); // Does not affect carry flag.
437 __ b(&sub_loop, NE);
438
439 Label done;
440 __ teq(R1, Operand(R6)); // Does not affect carry flag.
441 __ b(&done, EQ); // If used - a_used == 0.
442
443 Label carry_loop;
444 __ Bind(&carry_loop);
445 // Loop used - a_used times, used - a_used > 0.
447 __ sbcs(R4, R4, Operand(0));
448 __ teq(R1, Operand(R6)); // Does not affect carry flag.
450 __ b(&carry_loop, NE);
451
452 __ Bind(&done);
453 __ LoadObject(R0, NullObject());
454 __ Ret();
455}
456
457void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
458 Label* normal_ir_body) {
459 // Pseudo code:
460 // static int _mulAdd(Uint32List x_digits, int xi,
461 // Uint32List m_digits, int i,
462 // Uint32List a_digits, int j, int n) {
463 // uint32_t x = x_digits[xi >> 1]; // xi is Smi.
464 // if (x == 0 || n == 0) {
465 // return 1;
466 // }
467 // uint32_t* mip = &m_digits[i >> 1]; // i is Smi.
468 // uint32_t* ajp = &a_digits[j >> 1]; // j is Smi.
469 // uint32_t c = 0;
470 // SmiUntag(n);
471 // do {
472 // uint32_t mi = *mip++;
473 // uint32_t aj = *ajp;
474 // uint64_t t = x*mi + aj + c; // 32-bit * 32-bit -> 64-bit.
475 // *ajp++ = low32(t);
476 // c = high32(t);
477 // } while (--n > 0);
478 // while (c != 0) {
479 // uint64_t t = *ajp + c;
480 // *ajp++ = low32(t);
481 // c = high32(t); // c == 0 or 1.
482 // }
483 // return 1;
484 // }
485
486 Label done;
487 // R3 = x, no_op if x == 0
488 __ ldrd(R0, R1, SP, 5 * target::kWordSize); // R0 = xi as Smi, R1 = x_digits.
489 __ add(R1, R1, Operand(R0, LSL, 1));
490 __ ldr(R3, FieldAddress(R1, target::TypedData::payload_offset()));
491 __ tst(R3, Operand(R3));
492 __ b(&done, EQ);
493
494 // R8 = SmiUntag(n), no_op if n == 0
495 __ ldr(R8, Address(SP, 0 * target::kWordSize));
496 __ Asrs(R8, R8, Operand(kSmiTagSize));
497 __ b(&done, EQ);
498
499 // R4 = mip = &m_digits[i >> 1]
500 __ ldrd(R0, R1, SP, 3 * target::kWordSize); // R0 = i as Smi, R1 = m_digits.
501 __ add(R1, R1, Operand(R0, LSL, 1));
502 __ add(R4, R1, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
503
504 // R9 = ajp = &a_digits[j >> 1]
505 __ ldrd(R0, R1, SP, 1 * target::kWordSize); // R0 = j as Smi, R1 = a_digits.
506 __ add(R1, R1, Operand(R0, LSL, 1));
507 __ add(R9, R1, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
508
509 // R1 = c = 0
510 __ mov(R1, Operand(0));
511
512 Label muladd_loop;
513 __ Bind(&muladd_loop);
514 // x: R3
515 // mip: R4
516 // ajp: R9
517 // c: R1
518 // n: R8
519
520 // uint32_t mi = *mip++
522
523 // uint32_t aj = *ajp
524 __ ldr(R0, Address(R9, 0));
525
526 // uint64_t t = x*mi + aj + c
527 __ umaal(R0, R1, R2, R3); // R1:R0 = R2*R3 + R1 + R0.
528
529 // *ajp++ = low32(t) = R0
531
532 // c = high32(t) = R1
533
534 // while (--n > 0)
535 __ subs(R8, R8, Operand(1)); // --n
536 __ b(&muladd_loop, NE);
537
538 __ tst(R1, Operand(R1));
539 __ b(&done, EQ);
540
541 // *ajp++ += c
542 __ ldr(R0, Address(R9, 0));
543 __ adds(R0, R0, Operand(R1));
545 __ b(&done, CC);
546
547 Label propagate_carry_loop;
548 __ Bind(&propagate_carry_loop);
549 __ ldr(R0, Address(R9, 0));
550 __ adds(R0, R0, Operand(1));
552 __ b(&propagate_carry_loop, CS);
553
554 __ Bind(&done);
555 __ mov(R0, Operand(target::ToRawSmi(1))); // One digit processed.
556 __ Ret();
557}
558
559void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
560 Label* normal_ir_body) {
561 // Pseudo code:
562 // static int _sqrAdd(Uint32List x_digits, int i,
563 // Uint32List a_digits, int used) {
564 // uint32_t* xip = &x_digits[i >> 1]; // i is Smi.
565 // uint32_t x = *xip++;
566 // if (x == 0) return 1;
567 // uint32_t* ajp = &a_digits[i]; // j == 2*i, i is Smi.
568 // uint32_t aj = *ajp;
569 // uint64_t t = x*x + aj;
570 // *ajp++ = low32(t);
571 // uint64_t c = high32(t);
572 // int n = ((used - i) >> 1) - 1; // used and i are Smi.
573 // while (--n >= 0) {
574 // uint32_t xi = *xip++;
575 // uint32_t aj = *ajp;
576 // uint96_t t = 2*x*xi + aj + c; // 2-bit * 32-bit * 32-bit -> 65-bit.
577 // *ajp++ = low32(t);
578 // c = high64(t); // 33-bit.
579 // }
580 // uint32_t aj = *ajp;
581 // uint64_t t = aj + c; // 32-bit + 33-bit -> 34-bit.
582 // *ajp++ = low32(t);
583 // *ajp = high32(t);
584 // return 1;
585 // }
586
587 // The code has no bailout path, so we can use R6 (CODE_REG) freely.
588
589 // R4 = xip = &x_digits[i >> 1]
590 __ ldrd(R2, R3, SP, 2 * target::kWordSize); // R2 = i as Smi, R3 = x_digits
591 __ add(R3, R3, Operand(R2, LSL, 1));
592 __ add(R4, R3, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
593
594 // R3 = x = *xip++, return if x == 0
595 Label x_zero;
597 __ tst(R3, Operand(R3));
598 __ b(&x_zero, EQ);
599
600 // R6 = ajp = &a_digits[i]
601 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // a_digits
602 __ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi.
603 __ add(R6, R1, Operand(target::TypedData::payload_offset() - kHeapObjectTag));
604
605 // R8:R0 = t = x*x + *ajp
606 __ ldr(R0, Address(R6, 0));
607 __ mov(R8, Operand(0));
608 __ umaal(R0, R8, R3, R3); // R8:R0 = R3*R3 + R8 + R0.
609
610 // *ajp++ = low32(t) = R0
612
613 // R8 = low32(c) = high32(t)
614 // R9 = high32(c) = 0
615 __ mov(R9, Operand(0));
616
617 // int n = used - i - 1; while (--n >= 0) ...
618 __ ldr(R0, Address(SP, 0 * target::kWordSize)); // used is Smi
619 __ sub(TMP, R0, Operand(R2));
620 __ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0)
621 __ rsbs(TMP, R0, Operand(TMP, ASR, kSmiTagSize));
622
623 Label loop, done;
624 __ b(&done, MI);
625
626 __ Bind(&loop);
627 // x: R3
628 // xip: R4
629 // ajp: R6
630 // c: R9:R8
631 // t: R2:R1:R0 (not live at loop entry)
632 // n: TMP
633
634 // uint32_t xi = *xip++
636
637 // uint96_t t = R9:R8:R0 = 2*x*xi + aj + c
638 __ umull(R0, R1, R2, R3); // R1:R0 = R2*R3.
639 __ adds(R0, R0, Operand(R0));
640 __ adcs(R1, R1, Operand(R1));
641 __ mov(R2, Operand(0));
642 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi.
643 __ adds(R0, R0, Operand(R8));
644 __ adcs(R1, R1, Operand(R9));
645 __ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c.
646 __ ldr(R8, Address(R6, 0)); // R8 = aj = *ajp.
647 __ adds(R0, R0, Operand(R8));
648 __ adcs(R8, R1, Operand(0));
649 __ adc(R9, R2, Operand(0)); // R9:R8:R0 = 2*x*xi + c + aj.
650
651 // *ajp++ = low32(t) = R0
653
654 // while (--n >= 0)
655 __ subs(TMP, TMP, Operand(1)); // --n
656 __ b(&loop, PL);
657
658 __ Bind(&done);
659 // uint32_t aj = *ajp
660 __ ldr(R0, Address(R6, 0));
661
662 // uint64_t t = aj + c
663 __ adds(R8, R8, Operand(R0));
664 __ adc(R9, R9, Operand(0));
665
666 // *ajp = low32(t) = R8
667 // *(ajp + 1) = high32(t) = R9
668 __ strd(R8, R9, R6, 0);
669
670 __ Bind(&x_zero);
671 __ mov(R0, Operand(target::ToRawSmi(1))); // One digit processed.
672 __ Ret();
673}
674
675void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
676 Label* normal_ir_body) {
677 // No unsigned 64-bit / 32-bit divide instruction.
678}
679
680void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
681 Label* normal_ir_body) {
682 // Pseudo code:
683 // static int _mulMod(Uint32List args, Uint32List digits, int i) {
684 // uint32_t rho = args[_RHO]; // _RHO == 2.
685 // uint32_t d = digits[i >> 1]; // i is Smi.
686 // uint64_t t = rho*d;
687 // args[_MU] = t mod DIGIT_BASE; // _MU == 4.
688 // return 1;
689 // }
690
691 // R4 = args
692 __ ldr(R4, Address(SP, 2 * target::kWordSize)); // args
693
694 // R3 = rho = args[2]
695 __ ldr(R3, FieldAddress(R4, target::TypedData::payload_offset() +
697
698 // R2 = digits[i >> 1]
699 __ ldrd(R0, R1, SP, 0 * target::kWordSize); // R0 = i as Smi, R1 = digits
700 __ add(R1, R1, Operand(R0, LSL, 1));
701 __ ldr(R2, FieldAddress(R1, target::TypedData::payload_offset()));
702
703 // R1:R0 = t = rho*d
704 __ umull(R0, R1, R2, R3);
705
706 // args[4] = t mod DIGIT_BASE = low32(t)
707 __ str(R0, FieldAddress(R4, target::TypedData::payload_offset() +
709
710 __ mov(R0, Operand(target::ToRawSmi(1))); // One digit processed.
711 __ Ret();
712}
713
714// Check if the last argument is a double, jump to label 'is_smi' if smi
715// (easy to convert to double), otherwise jump to label 'not_double_smi',
716// Returns the last argument in R0.
717static void TestLastArgumentIsDouble(Assembler* assembler,
718 Label* is_smi,
719 Label* not_double_smi) {
720 __ ldr(R0, Address(SP, 0 * target::kWordSize));
721 __ tst(R0, Operand(kSmiTagMask));
722 __ b(is_smi, EQ);
723 __ CompareClassId(R0, kDoubleCid, R1);
724 __ b(not_double_smi, NE);
725 // Fall through with Double in R0.
726}
727
728// Both arguments on stack, arg0 (left) is a double, arg1 (right) is of unknown
729// type. Return true or false object in the register R0. Any NaN argument
730// returns false. Any non-double arg1 causes control flow to fall through to the
731// slow case (compiled method body).
732static void CompareDoubles(Assembler* assembler,
733 Label* normal_ir_body,
734 Condition true_condition) {
735 Label is_smi, double_op;
736
737 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
738 // Both arguments are double, right operand is in R0.
739
740 __ LoadDFromOffset(D1, R0, target::Double::value_offset() - kHeapObjectTag);
741 __ Bind(&double_op);
742 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left argument.
743 __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
744
745 __ vcmpd(D0, D1);
746 __ vmstat();
747 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
748 // Return false if D0 or D1 was NaN before checking true condition.
749 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR, VS));
750 __ LoadObject(R0, CastHandle<Object>(TrueObject()), true_condition);
751 __ Ret();
752
753 __ Bind(&is_smi); // Convert R0 to a double.
754 __ SmiUntag(R0);
755 __ vmovsr(S0, R0);
756 __ vcvtdi(D1, S0);
757 __ b(&double_op); // Then do the comparison.
758 __ Bind(normal_ir_body);
759}
760
761void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
762 Label* normal_ir_body) {
763 CompareDoubles(assembler, normal_ir_body, HI);
764}
765
766void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
767 Label* normal_ir_body) {
768 CompareDoubles(assembler, normal_ir_body, CS);
769}
770
771void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
772 Label* normal_ir_body) {
773 CompareDoubles(assembler, normal_ir_body, CC);
774}
775
776void AsmIntrinsifier::Double_equal(Assembler* assembler,
777 Label* normal_ir_body) {
778 CompareDoubles(assembler, normal_ir_body, EQ);
779}
780
781void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
782 Label* normal_ir_body) {
783 CompareDoubles(assembler, normal_ir_body, LS);
784}
785
786// Expects left argument to be double (receiver). Right argument is unknown.
787// Both arguments are on stack.
788static void DoubleArithmeticOperations(Assembler* assembler,
789 Label* normal_ir_body,
790 Token::Kind kind) {
791 Label is_smi, double_op;
792
793 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
794 // Both arguments are double, right operand is in R0.
795 __ LoadDFromOffset(D1, R0, target::Double::value_offset() - kHeapObjectTag);
796 __ Bind(&double_op);
797 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // Left argument.
798 __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
799 switch (kind) {
800 case Token::kADD:
801 __ vaddd(D0, D0, D1);
802 break;
803 case Token::kSUB:
804 __ vsubd(D0, D0, D1);
805 break;
806 case Token::kMUL:
807 __ vmuld(D0, D0, D1);
808 break;
809 case Token::kDIV:
810 __ vdivd(D0, D0, D1);
811 break;
812 default:
813 UNREACHABLE();
814 }
815 const Class& double_class = DoubleClass();
816 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, R0,
817 R1); // Result register.
818 __ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
819 __ Ret();
820 __ Bind(&is_smi); // Convert R0 to a double.
821 __ SmiUntag(R0);
822 __ vmovsr(S0, R0);
823 __ vcvtdi(D1, S0);
824 __ b(&double_op);
825 __ Bind(normal_ir_body);
826}
827
828void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
829 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
830}
831
832void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
833 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
834}
835
836void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
837 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
838}
839
840void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
841 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
842}
843
844// Left is double, right is integer (Mint or Smi)
845void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
846 Label* normal_ir_body) {
847 Label fall_through;
848 // Only smis allowed.
849 __ ldr(R0, Address(SP, 0 * target::kWordSize));
850 __ tst(R0, Operand(kSmiTagMask));
851 __ b(normal_ir_body, NE);
852 // Is Smi.
853 __ SmiUntag(R0);
854 __ vmovsr(S0, R0);
855 __ vcvtdi(D1, S0);
856 __ ldr(R0, Address(SP, 1 * target::kWordSize));
857 __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
858 __ vmuld(D0, D0, D1);
859 const Class& double_class = DoubleClass();
860 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, R0,
861 R1); // Result register.
862 __ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
863 __ Ret();
864 __ Bind(normal_ir_body);
865}
866
867void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
868 Label* normal_ir_body) {
869 Label fall_through;
870
871 __ ldr(R0, Address(SP, 0 * target::kWordSize));
872 __ tst(R0, Operand(kSmiTagMask));
873 __ b(normal_ir_body, NE);
874 // Is Smi.
875 __ SmiUntag(R0);
876 __ vmovsr(S0, R0);
877 __ vcvtdi(D0, S0);
878 const Class& double_class = DoubleClass();
879 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, R0,
880 R1); // Result register.
881 __ StoreDToOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
882 __ Ret();
883 __ Bind(normal_ir_body);
884}
885
886void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
887 Label* normal_ir_body) {
888 __ ldr(R0, Address(SP, 0 * target::kWordSize));
889 __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
890 __ vcmpd(D0, D0);
891 __ vmstat();
892 __ LoadObject(R0, CastHandle<Object>(FalseObject()), VC);
893 __ LoadObject(R0, CastHandle<Object>(TrueObject()), VS);
894 __ Ret();
895}
896
897void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
898 Label* normal_ir_body) {
899 __ ldr(R0, Address(SP, 0 * target::kWordSize));
900 // R1 <- value[0:31], R2 <- value[32:63]
901 __ LoadFieldFromOffset(R1, R0, target::Double::value_offset());
902 __ LoadFieldFromOffset(R2, R0,
903 target::Double::value_offset() + target::kWordSize);
904
905 // If the low word isn't 0, then it isn't infinity.
906 __ cmp(R1, Operand(0));
907 __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
908 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR, NE)); // Return if NE.
909
910 // Mask off the sign bit.
911 __ AndImmediate(R2, R2, 0x7FFFFFFF);
912 // Compare with +infinity.
913 __ CompareImmediate(R2, 0x7FF00000);
914 __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
915 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR, NE));
916 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
917 __ Ret();
918}
919
920void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
921 Label* normal_ir_body) {
922 Label is_false, is_true, is_zero;
923 __ ldr(R0, Address(SP, 0 * target::kWordSize));
924 __ LoadDFromOffset(D0, R0, target::Double::value_offset() - kHeapObjectTag);
925 __ vcmpdz(D0);
926 __ vmstat();
927 __ b(&is_false, VS); // NaN -> false.
928 __ b(&is_zero, EQ); // Check for negative zero.
929 __ b(&is_false, CS); // >= 0 -> false.
930
931 __ Bind(&is_true);
932 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
933 __ Ret();
934
935 __ Bind(&is_false);
936 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
937 __ Ret();
938
939 __ Bind(&is_zero);
940 // Check for negative zero by looking at the sign bit.
941 __ vmovrrd(R0, R1, D0); // R1:R0 <- D0, so sign bit is in bit 31 of R1.
942 __ mov(R1, Operand(R1, LSR, 31));
943 __ tst(R1, Operand(1));
944 __ b(&is_true, NE); // Sign bit set.
945 __ b(&is_false);
946}
947
948void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
949 Label* normal_ir_body) {
950 __ ldr(R0, Address(SP, 0 * target::kWordSize));
951 __ ldr(R1, Address(SP, 1 * target::kWordSize));
952 __ cmp(R0, Operand(R1));
953 __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
954 __ LoadObject(R0, CastHandle<Object>(TrueObject()), EQ);
955 __ Ret();
956}
957
958static void JumpIfInteger(Assembler* assembler,
960 Register tmp,
961 Label* target) {
962 assembler->RangeCheck(cid, tmp, kSmiCid, kMintCid, Assembler::kIfInRange,
963 target);
964}
965
966static void JumpIfNotInteger(Assembler* assembler,
968 Register tmp,
969 Label* target) {
970 assembler->RangeCheck(cid, tmp, kSmiCid, kMintCid, Assembler::kIfNotInRange,
971 target);
972}
973
974static void JumpIfString(Assembler* assembler,
976 Register tmp,
977 Label* target) {
978 assembler->RangeCheck(cid, tmp, kOneByteStringCid, kTwoByteStringCid,
979 Assembler::kIfInRange, target);
980}
981
982static void JumpIfNotString(Assembler* assembler,
984 Register tmp,
985 Label* target) {
986 assembler->RangeCheck(cid, tmp, kOneByteStringCid, kTwoByteStringCid,
988}
989
990static void JumpIfNotList(Assembler* assembler,
992 Register tmp,
993 Label* target) {
994 assembler->RangeCheck(cid, tmp, kArrayCid, kGrowableObjectArrayCid,
996}
997
998static void JumpIfType(Assembler* assembler,
1000 Register tmp,
1001 Label* target) {
1002 COMPILE_ASSERT((kFunctionTypeCid == kTypeCid + 1) &&
1003 (kRecordTypeCid == kTypeCid + 2));
1004 assembler->RangeCheck(cid, tmp, kTypeCid, kRecordTypeCid,
1005 Assembler::kIfInRange, target);
1006}
1007
1008static void JumpIfNotType(Assembler* assembler,
1009 Register cid,
1010 Register tmp,
1011 Label* target) {
1012 COMPILE_ASSERT((kFunctionTypeCid == kTypeCid + 1) &&
1013 (kRecordTypeCid == kTypeCid + 2));
1014 assembler->RangeCheck(cid, tmp, kTypeCid, kRecordTypeCid,
1015 Assembler::kIfNotInRange, target);
1016}
1017
1018// Return type quickly for simple types (not parameterized and not signature).
1019void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
1020 Label* normal_ir_body) {
1021 Label use_declaration_type, not_double, not_integer, not_string;
1022 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1023 __ LoadClassIdMayBeSmi(R1, R0);
1024
1025 __ CompareImmediate(R1, kClosureCid);
1026 __ b(normal_ir_body, EQ); // Instance is a closure.
1027
1028 __ CompareImmediate(R1, kRecordCid);
1029 __ b(normal_ir_body, EQ); // Instance is a record.
1030
1031 __ CompareImmediate(R1, kNumPredefinedCids);
1032 __ b(&use_declaration_type, HI);
1033
1034 __ LoadIsolateGroup(R2);
1035 __ LoadFromOffset(R2, R2, target::IsolateGroup::object_store_offset());
1036
1037 __ CompareImmediate(R1, kDoubleCid);
1038 __ b(&not_double, NE);
1039 __ LoadFromOffset(R0, R2, target::ObjectStore::double_type_offset());
1040 __ Ret();
1041
1042 __ Bind(&not_double);
1043 JumpIfNotInteger(assembler, R1, R0, &not_integer);
1044 __ LoadFromOffset(R0, R2, target::ObjectStore::int_type_offset());
1045 __ Ret();
1046
1047 __ Bind(&not_integer);
1048 JumpIfNotString(assembler, R1, R0, &not_string);
1049 __ LoadFromOffset(R0, R2, target::ObjectStore::string_type_offset());
1050 __ Ret();
1051
1052 __ Bind(&not_string);
1053 JumpIfNotType(assembler, R1, R0, &use_declaration_type);
1054 __ LoadFromOffset(R0, R2, target::ObjectStore::type_type_offset());
1055 __ Ret();
1056
1057 __ Bind(&use_declaration_type);
1058 __ LoadClassById(R2, R1);
1059 __ ldrh(R3, FieldAddress(R2, target::Class::num_type_arguments_offset()));
1060 __ CompareImmediate(R3, 0);
1061 __ b(normal_ir_body, NE);
1062
1063 __ ldr(R0, FieldAddress(R2, target::Class::declaration_type_offset()));
1064 __ CompareObject(R0, NullObject());
1065 __ b(normal_ir_body, EQ);
1066 __ Ret();
1067
1068 __ Bind(normal_ir_body);
1069}
1070
1071// Compares cid1 and cid2 to see if they're syntactically equivalent. If this
1072// can be determined by this fast path, it jumps to either equal_* or not_equal.
1073// If classes are equivalent but may be generic, then jumps to
1074// equal_may_be_generic. Clobbers scratch.
1075static void EquivalentClassIds(Assembler* assembler,
1076 Label* normal_ir_body,
1077 Label* equal_may_be_generic,
1078 Label* equal_not_generic,
1079 Label* not_equal,
1080 Register cid1,
1081 Register cid2,
1082 Register scratch,
1083 bool testing_instance_cids) {
1084 Label not_integer, not_integer_or_string, not_integer_or_string_or_list;
1085
1086 // Check if left hand side is a closure. Closures are handled in the runtime.
1087 __ CompareImmediate(cid1, kClosureCid);
1088 __ b(normal_ir_body, EQ);
1089
1090 // Check if left hand side is a record. Records are handled in the runtime.
1091 __ CompareImmediate(cid1, kRecordCid);
1092 __ b(normal_ir_body, EQ);
1093
1094 // Check whether class ids match. If class ids don't match types may still be
1095 // considered equivalent (e.g. multiple string implementation classes map to a
1096 // single String type).
1097 __ cmp(cid1, Operand(cid2));
1098 __ b(equal_may_be_generic, EQ);
1099
1100 // Class ids are different. Check if we are comparing two string types (with
1101 // different representations), two integer types, two list types or two type
1102 // types.
1103 __ CompareImmediate(cid1, kNumPredefinedCids);
1104 __ b(not_equal, HI);
1105
1106 // Check if both are integer types.
1107 JumpIfNotInteger(assembler, cid1, scratch, &not_integer);
1108
1109 // First type is an integer. Check if the second is an integer too.
1110 JumpIfInteger(assembler, cid2, scratch, equal_not_generic);
1111 // Integer types are only equivalent to other integer types.
1112 __ b(not_equal);
1113
1114 __ Bind(&not_integer);
1115 // Check if both are String types.
1116 JumpIfNotString(assembler, cid1, scratch,
1117 testing_instance_cids ? &not_integer_or_string : not_equal);
1118
1119 // First type is String. Check if the second is a string too.
1120 JumpIfString(assembler, cid2, scratch, equal_not_generic);
1121 // String types are only equivalent to other String types.
1122 __ b(not_equal);
1123
1124 if (testing_instance_cids) {
1125 __ Bind(&not_integer_or_string);
1126 // Check if both are List types.
1127 JumpIfNotList(assembler, cid1, scratch, &not_integer_or_string_or_list);
1128
1129 // First type is a List. Check if the second is a List too.
1130 JumpIfNotList(assembler, cid2, scratch, not_equal);
1131 ASSERT(compiler::target::Array::type_arguments_offset() ==
1132 compiler::target::GrowableObjectArray::type_arguments_offset());
1133 __ b(equal_may_be_generic);
1134
1135 __ Bind(&not_integer_or_string_or_list);
1136 // Check if the first type is a Type. If it is not then types are not
1137 // equivalent because they have different class ids and they are not String
1138 // or integer or List or Type.
1139 JumpIfNotType(assembler, cid1, scratch, not_equal);
1140
1141 // First type is a Type. Check if the second is a Type too.
1142 JumpIfType(assembler, cid2, scratch, equal_not_generic);
1143 // Type types are only equivalent to other Type types.
1144 __ b(not_equal);
1145 }
1146}
1147
1148void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
1149 Label* normal_ir_body) {
1150 __ ldm(IA, SP, (1 << R1 | 1 << R2));
1151 __ LoadClassIdMayBeSmi(R1, R1);
1152 __ LoadClassIdMayBeSmi(R2, R2);
1153
1154 Label equal_may_be_generic, equal, not_equal;
1155 EquivalentClassIds(assembler, normal_ir_body, &equal_may_be_generic, &equal,
1156 &not_equal, R1, R2, R0,
1157 /* testing_instance_cids = */ true);
1158
1159 __ Bind(&equal_may_be_generic);
1160 // Classes are equivalent and neither is a closure class.
1161 // Check if there are no type arguments. In this case we can return true.
1162 // Otherwise fall through into the runtime to handle comparison.
1163 __ LoadClassById(R0, R1);
1164 __ ldr(
1165 R0,
1166 FieldAddress(
1167 R0,
1168 target::Class::host_type_arguments_field_offset_in_words_offset()));
1169 __ CompareImmediate(R0, target::Class::kNoTypeArguments);
1170 __ b(&equal, EQ);
1171
1172 // Compare type arguments, host_type_arguments_field_offset_in_words in R0.
1173 __ ldm(IA, SP, (1 << R1 | 1 << R2));
1174 __ AddImmediate(R1, -kHeapObjectTag);
1175 __ ldr(R1, Address(R1, R0, LSL, target::kWordSizeLog2));
1176 __ AddImmediate(R2, -kHeapObjectTag);
1177 __ ldr(R2, Address(R2, R0, LSL, target::kWordSizeLog2));
1178 __ cmp(R1, Operand(R2));
1179 __ b(normal_ir_body, NE);
1180 // Fall through to equal case if type arguments are equal.
1181
1182 __ Bind(&equal);
1183 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1184 __ Ret();
1185
1186 __ Bind(&not_equal);
1187 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1188 __ Ret();
1189
1190 __ Bind(normal_ir_body);
1191}
1192
1193void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
1194 Label* normal_ir_body) {
1195 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1196 __ ldr(R0, FieldAddress(R0, target::String::hash_offset()));
1197 __ cmp(R0, Operand(0));
1198 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR, NE));
1199 __ Bind(normal_ir_body); // Hash not yet computed.
1200}
1201
1202void AsmIntrinsifier::Type_equality(Assembler* assembler,
1203 Label* normal_ir_body) {
1204 Label equal, not_equal, equiv_cids_may_be_generic, equiv_cids, check_legacy;
1205
1206 __ ldm(IA, SP, (1 << R1 | 1 << R2));
1207 __ cmp(R1, Operand(R2));
1208 __ b(&equal, EQ);
1209
1210 // R1 might not be a Type object, so check that first (R2 should be though,
1211 // since this is a method on the Type class).
1212 __ LoadClassIdMayBeSmi(R0, R1);
1213 __ CompareImmediate(R0, kTypeCid);
1214 __ b(normal_ir_body, NE);
1215
1216 // Check if types are syntactically equal.
1217 __ LoadTypeClassId(R3, R1);
1218 __ LoadTypeClassId(R4, R2);
1219 // We are not testing instance cids, but type class cids of Type instances.
1220 EquivalentClassIds(assembler, normal_ir_body, &equiv_cids_may_be_generic,
1221 &equiv_cids, &not_equal, R3, R4, R0,
1222 /* testing_instance_cids = */ false);
1223
1224 __ Bind(&equiv_cids_may_be_generic);
1225 // Compare type arguments in Type instances.
1226 __ ldr(R3, FieldAddress(R1, target::Type::arguments_offset()));
1227 __ ldr(R4, FieldAddress(R2, target::Type::arguments_offset()));
1228 __ cmp(R3, Operand(R4));
1229 __ b(normal_ir_body, NE);
1230 // Fall through to check nullability if type arguments are equal.
1231
1232 // Check nullability.
1233 __ Bind(&equiv_cids);
1234 __ LoadAbstractTypeNullability(R1, R1);
1235 __ LoadAbstractTypeNullability(R2, R2);
1236 __ cmp(R1, Operand(R2));
1237 __ b(&check_legacy, NE);
1238 // Fall through to equal case if nullability is strictly equal.
1239
1240 __ Bind(&equal);
1241 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1242 __ Ret();
1243
1244 // At this point the nullabilities are different, so they can only be
1245 // syntactically equivalent if they're both either kNonNullable or kLegacy.
1246 // These are the two largest values of the enum, so we can just do a < check.
1247 ASSERT(target::Nullability::kNullable < target::Nullability::kNonNullable &&
1248 target::Nullability::kNonNullable < target::Nullability::kLegacy);
1249 __ Bind(&check_legacy);
1250 __ CompareImmediate(R1, target::Nullability::kNonNullable);
1251 __ b(&not_equal, LT);
1252 __ CompareImmediate(R2, target::Nullability::kNonNullable);
1253 __ b(&equal, GE);
1254
1255 __ Bind(&not_equal);
1256 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1257 __ Ret();
1258
1259 __ Bind(normal_ir_body);
1260}
1261
1262void AsmIntrinsifier::AbstractType_getHashCode(Assembler* assembler,
1263 Label* normal_ir_body) {
1264 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1265 __ ldr(R0, FieldAddress(R0, target::AbstractType::hash_offset()));
1266 __ cmp(R0, Operand(0));
1267 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR, NE));
1268 __ Bind(normal_ir_body); // Hash not yet computed.
1269}
1270
1271void AsmIntrinsifier::AbstractType_equality(Assembler* assembler,
1272 Label* normal_ir_body) {
1273 __ ldm(IA, SP, (1 << R1 | 1 << R2));
1274 __ cmp(R1, Operand(R2));
1275 __ b(normal_ir_body, NE);
1276
1277 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1278 __ Ret();
1279
1280 __ Bind(normal_ir_body);
1281}
1282
1283void GenerateSubstringMatchesSpecialization(Assembler* assembler,
1284 intptr_t receiver_cid,
1285 intptr_t other_cid,
1286 Label* return_true,
1287 Label* return_false) {
1288 __ SmiUntag(R1);
1289 __ ldr(R8, FieldAddress(R0, target::String::length_offset())); // this.length
1290 __ SmiUntag(R8);
1291 __ ldr(R9,
1292 FieldAddress(R2, target::String::length_offset())); // other.length
1293 __ SmiUntag(R9);
1294
1295 // if (other.length == 0) return true;
1296 __ cmp(R9, Operand(0));
1297 __ b(return_true, EQ);
1298
1299 // if (start < 0) return false;
1300 __ cmp(R1, Operand(0));
1301 __ b(return_false, LT);
1302
1303 // if (start + other.length > this.length) return false;
1304 __ add(R3, R1, Operand(R9));
1305 __ cmp(R3, Operand(R8));
1306 __ b(return_false, GT);
1307
1308 if (receiver_cid == kOneByteStringCid) {
1309 __ AddImmediate(R0, target::OneByteString::data_offset() - kHeapObjectTag);
1310 __ add(R0, R0, Operand(R1));
1311 } else {
1312 ASSERT(receiver_cid == kTwoByteStringCid);
1313 __ AddImmediate(R0, target::TwoByteString::data_offset() - kHeapObjectTag);
1314 __ add(R0, R0, Operand(R1));
1315 __ add(R0, R0, Operand(R1));
1316 }
1317 if (other_cid == kOneByteStringCid) {
1318 __ AddImmediate(R2, target::OneByteString::data_offset() - kHeapObjectTag);
1319 } else {
1320 ASSERT(other_cid == kTwoByteStringCid);
1321 __ AddImmediate(R2, target::TwoByteString::data_offset() - kHeapObjectTag);
1322 }
1323
1324 // i = 0
1325 __ LoadImmediate(R3, 0);
1326
1327 // do
1328 Label loop;
1329 __ Bind(&loop);
1330
1331 if (receiver_cid == kOneByteStringCid) {
1332 __ ldrb(R4, Address(R0, 0)); // this.codeUnitAt(i + start)
1333 } else {
1334 __ ldrh(R4, Address(R0, 0)); // this.codeUnitAt(i + start)
1335 }
1336 if (other_cid == kOneByteStringCid) {
1337 __ ldrb(TMP, Address(R2, 0)); // other.codeUnitAt(i)
1338 } else {
1339 __ ldrh(TMP, Address(R2, 0)); // other.codeUnitAt(i)
1340 }
1341 __ cmp(R4, Operand(TMP));
1342 __ b(return_false, NE);
1343
1344 // i++, while (i < len)
1345 __ AddImmediate(R3, 1);
1346 __ AddImmediate(R0, receiver_cid == kOneByteStringCid ? 1 : 2);
1347 __ AddImmediate(R2, other_cid == kOneByteStringCid ? 1 : 2);
1348 __ cmp(R3, Operand(R9));
1349 __ b(&loop, LT);
1350
1351 __ b(return_true);
1352}
1353
1354// bool _substringMatches(int start, String other)
1355// This intrinsic handles a OneByteString or TwoByteString receiver with a
1356// OneByteString other.
1357void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
1358 Label* normal_ir_body) {
1359 Label return_true, return_false, try_two_byte;
1360 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // this
1361 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // start
1362 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // other
1363 __ Push(R4); // Make ARGS_DESC_REG available.
1364
1365 __ tst(R1, Operand(kSmiTagMask));
1366 __ b(normal_ir_body, NE); // 'start' is not a Smi.
1367
1368 __ CompareClassId(R2, kOneByteStringCid, R3);
1369 __ b(normal_ir_body, NE);
1370
1371 __ CompareClassId(R0, kOneByteStringCid, R3);
1372 __ b(&try_two_byte, NE);
1373
1374 GenerateSubstringMatchesSpecialization(assembler, kOneByteStringCid,
1375 kOneByteStringCid, &return_true,
1376 &return_false);
1377
1378 __ Bind(&try_two_byte);
1379 __ CompareClassId(R0, kTwoByteStringCid, R3);
1380 __ b(normal_ir_body, NE);
1381
1382 GenerateSubstringMatchesSpecialization(assembler, kTwoByteStringCid,
1383 kOneByteStringCid, &return_true,
1384 &return_false);
1385
1386 __ Bind(&return_true);
1387 __ Pop(R4);
1388 __ LoadObject(R0, CastHandle<Object>(TrueObject()));
1389 __ Ret();
1390
1391 __ Bind(&return_false);
1392 __ Pop(R4);
1393 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1394 __ Ret();
1395
1396 __ Bind(normal_ir_body);
1397 __ Pop(R4);
1398}
1399
1400void AsmIntrinsifier::Object_getHash(Assembler* assembler,
1401 Label* normal_ir_body) {
1402 UNREACHABLE();
1403}
1404
1405void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
1406 Label* normal_ir_body) {
1407 Label try_two_byte_string;
1408
1409 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Index.
1410 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // String.
1411 __ tst(R1, Operand(kSmiTagMask));
1412 __ b(normal_ir_body, NE); // Index is not a Smi.
1413 // Range check.
1414 __ ldr(R2, FieldAddress(R0, target::String::length_offset()));
1415 __ cmp(R1, Operand(R2));
1416 __ b(normal_ir_body, CS); // Runtime throws exception.
1417
1418 __ CompareClassId(R0, kOneByteStringCid, R3);
1419 __ b(&try_two_byte_string, NE);
1420 __ SmiUntag(R1);
1421 __ AddImmediate(R0, target::OneByteString::data_offset() - kHeapObjectTag);
1422 __ ldrb(R1, Address(R0, R1));
1423 __ CompareImmediate(R1, target::Symbols::kNumberOfOneCharCodeSymbols);
1424 __ b(normal_ir_body, GE);
1425 __ ldr(R0, Address(THR, target::Thread::predefined_symbols_address_offset()));
1426 __ AddImmediate(
1427 R0, target::Symbols::kNullCharCodeSymbolOffset * target::kWordSize);
1428 __ ldr(R0, Address(R0, R1, LSL, 2));
1429 __ Ret();
1430
1431 __ Bind(&try_two_byte_string);
1432 __ CompareClassId(R0, kTwoByteStringCid, R3);
1433 __ b(normal_ir_body, NE);
1434 ASSERT(kSmiTagShift == 1);
1435 __ AddImmediate(R0, target::TwoByteString::data_offset() - kHeapObjectTag);
1436 __ ldrh(R1, Address(R0, R1));
1437 __ CompareImmediate(R1, target::Symbols::kNumberOfOneCharCodeSymbols);
1438 __ b(normal_ir_body, GE);
1439 __ ldr(R0, Address(THR, target::Thread::predefined_symbols_address_offset()));
1440 __ AddImmediate(
1441 R0, target::Symbols::kNullCharCodeSymbolOffset * target::kWordSize);
1442 __ ldr(R0, Address(R0, R1, LSL, 2));
1443 __ Ret();
1444
1445 __ Bind(normal_ir_body);
1446}
1447
1448void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
1449 Label* normal_ir_body) {
1450 __ ldr(R0, Address(SP, 0 * target::kWordSize));
1451 __ ldr(R0, FieldAddress(R0, target::String::length_offset()));
1452 __ cmp(R0, Operand(target::ToRawSmi(0)));
1453 __ LoadObject(R0, CastHandle<Object>(TrueObject()), EQ);
1454 __ LoadObject(R0, CastHandle<Object>(FalseObject()), NE);
1455 __ Ret();
1456}
1457
1458void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
1459 Label* normal_ir_body) {
1460 __ ldr(R1, Address(SP, 0 * target::kWordSize));
1461 __ ldr(R0, FieldAddress(R1, target::String::hash_offset()));
1462 __ cmp(R0, Operand(0));
1463 READS_RETURN_ADDRESS_FROM_LR(__ bx(LR, NE)); // Return if already computed.
1464 __ ldr(R2, FieldAddress(R1, target::String::length_offset()));
1465 __ SmiUntag(R2);
1466 __ mov(R3, Operand(0));
1467 __ AddImmediate(R8, R1,
1468 target::OneByteString::data_offset() - kHeapObjectTag);
1469
1470 // R1: Instance of OneByteString.
1471 // R2: String length, untagged integer.
1472 // R3: Loop counter, untagged integer.
1473 // R8: String data.
1474 // R0: Hash code, untagged integer.
1475
1476 Label loop, done;
1477 __ Bind(&loop);
1478 __ cmp(R3, Operand(R2));
1479 __ b(&done, EQ);
1480 // Add to hash code: (hash_ is uint32)
1481 // Get one characters (ch).
1482 __ ldrb(TMP, Address(R8, 0));
1483 // TMP: ch.
1484 __ add(R3, R3, Operand(1));
1485 __ add(R8, R8, Operand(1));
1487 __ b(&loop);
1488
1489 __ Bind(&done);
1490 // Finalize. Allow a zero result to combine checks from empty string branch.
1491 __ FinalizeHashForSize(target::String::kHashBits, R0);
1492 __ SmiTag(R0);
1493 __ StoreIntoSmiField(FieldAddress(R1, target::String::hash_offset()), R0);
1494 __ Ret();
1495}
1496
1497// Allocates a _OneByteString or _TwoByteString. The content is not initialized.
1498// 'length-reg' (R2) contains the desired length as a _Smi or _Mint.
1499// Returns new string as tagged pointer in R0.
1500static void TryAllocateString(Assembler* assembler,
1501 classid_t cid,
1502 intptr_t max_elements,
1503 Label* ok,
1504 Label* failure) {
1505 ASSERT(cid == kOneByteStringCid || cid == kTwoByteStringCid);
1506 const Register length_reg = R2;
1507 // _Mint length: call to runtime to produce error.
1508 __ BranchIfNotSmi(length_reg, failure);
1509 // Negative length: call to runtime to produce error.
1510 // Too big: call to runtime to allocate old.
1511 __ CompareImmediate(length_reg, target::ToRawSmi(max_elements));
1512 __ b(failure, HI);
1513
1514 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, failure, R0));
1515 __ mov(R8, Operand(length_reg)); // Save the length register.
1516 if (cid == kOneByteStringCid) {
1517 __ SmiUntag(length_reg);
1518 } else {
1519 // Untag length and multiply by element size -> no-op.
1520 }
1521 const intptr_t fixed_size_plus_alignment_padding =
1522 target::String::InstanceSize() +
1524 __ AddImmediate(length_reg, fixed_size_plus_alignment_padding);
1525 __ bic(length_reg, length_reg,
1527
1528 __ ldr(R0, Address(THR, target::Thread::top_offset()));
1529
1530 // length_reg: allocation size.
1531 __ adds(R1, R0, Operand(length_reg));
1532 __ b(failure, CS); // Fail on unsigned overflow.
1533
1534 // Check if the allocation fits into the remaining space.
1535 // R0: potential new object start.
1536 // R1: potential next object start.
1537 // R2: allocation size.
1538 __ ldr(TMP, Address(THR, target::Thread::end_offset()));
1539 __ cmp(R1, Operand(TMP));
1540 __ b(failure, CS);
1541 __ CheckAllocationCanary(R0);
1542
1543 // Successfully allocated the object(s), now update top to point to
1544 // next object start and initialize the object.
1545 __ str(R1, Address(THR, target::Thread::top_offset()));
1546 __ AddImmediate(R0, kHeapObjectTag);
1547 // Clear last double word to ensure string comparison doesn't need to
1548 // specially handle remainder of strings with lengths not factors of double
1549 // offsets.
1550 __ LoadImmediate(TMP, 0);
1551 __ str(TMP, Address(R1, -1 * target::kWordSize));
1552 __ str(TMP, Address(R1, -2 * target::kWordSize));
1553
1554 // Initialize the tags.
1555 // R0: new object start as a tagged pointer.
1556 // R1: new object end address.
1557 // R2: allocation size.
1558 {
1559 const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
1561
1562 __ CompareImmediate(R2, target::UntaggedObject::kSizeTagMaxSizeTag);
1563 __ mov(R3, Operand(R2, LSL, shift), LS);
1564 __ mov(R3, Operand(0), HI);
1565
1566 // Get the class index and insert it into the tags.
1567 // R3: size and bit tags.
1568 const uword tags =
1569 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
1570 __ LoadImmediate(TMP, tags);
1571 __ orr(R3, R3, Operand(TMP));
1572 __ str(R3, FieldAddress(R0, target::Object::tags_offset())); // Store tags.
1573 }
1574
1575 // Set the length field using the saved length (R8).
1576 __ StoreIntoObjectNoBarrier(
1577 R0, FieldAddress(R0, target::String::length_offset()), R8);
1578 // Clear hash.
1579 __ LoadImmediate(TMP, 0);
1580 __ StoreIntoObjectNoBarrier(
1581 R0, FieldAddress(R0, target::String::hash_offset()), TMP);
1582
1583 __ b(ok);
1584}
1585
1586// Arg0: OneByteString (receiver).
1587// Arg1: Start index as Smi.
1588// Arg2: End index as Smi.
1589// The indexes must be valid.
1590void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
1591 Label* normal_ir_body) {
1592 const intptr_t kStringOffset = 2 * target::kWordSize;
1593 const intptr_t kStartIndexOffset = 1 * target::kWordSize;
1594 const intptr_t kEndIndexOffset = 0 * target::kWordSize;
1595 Label ok;
1596
1597 __ ldr(R2, Address(SP, kEndIndexOffset));
1598 __ ldr(TMP, Address(SP, kStartIndexOffset));
1599 __ orr(R3, R2, Operand(TMP));
1600 __ tst(R3, Operand(kSmiTagMask));
1601 __ b(normal_ir_body, NE); // 'start', 'end' not Smi.
1602
1603 __ sub(R2, R2, Operand(TMP));
1604 TryAllocateString(assembler, kOneByteStringCid,
1605 target::OneByteString::kMaxNewSpaceElements, &ok,
1606 normal_ir_body);
1607 __ Bind(&ok);
1608 // R0: new string as tagged pointer.
1609 // Copy string.
1610 __ ldr(R3, Address(SP, kStringOffset));
1611 __ ldr(R1, Address(SP, kStartIndexOffset));
1612 __ SmiUntag(R1);
1613 __ add(R3, R3, Operand(R1));
1614 // Calculate start address and untag (- 1).
1615 __ AddImmediate(R3, target::OneByteString::data_offset() - 1);
1616
1617 // R3: Start address to copy from (untagged).
1618 // R1: Untagged start index.
1619 __ ldr(R2, Address(SP, kEndIndexOffset));
1620 __ SmiUntag(R2);
1621 __ sub(R2, R2, Operand(R1));
1622
1623 // R3: Start address to copy from (untagged).
1624 // R2: Untagged number of bytes to copy.
1625 // R0: Tagged result string.
1626 // R8: Pointer into R3.
1627 // R1: Pointer into R0.
1628 // TMP: Scratch register.
1629 Label loop, done;
1630 __ cmp(R2, Operand(0));
1631 __ b(&done, LE);
1632 __ mov(R8, Operand(R3));
1633 __ mov(R1, Operand(R0));
1634 __ Bind(&loop);
1635 __ ldrb(TMP, Address(R8, 1, Address::PostIndex));
1636 __ sub(R2, R2, Operand(1));
1637 __ cmp(R2, Operand(0));
1638 __ strb(TMP, FieldAddress(R1, target::OneByteString::data_offset()));
1639 __ add(R1, R1, Operand(1));
1640 __ b(&loop, GT);
1641
1642 __ Bind(&done);
1643 __ Ret();
1644 __ Bind(normal_ir_body);
1645}
1646
1647void AsmIntrinsifier::WriteIntoOneByteString(Assembler* assembler,
1648 Label* normal_ir_body) {
1649 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Value.
1650 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // Index.
1651 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // OneByteString.
1652 __ SmiUntag(R1);
1653 __ SmiUntag(R2);
1654 __ AddImmediate(R3, R0,
1655 target::OneByteString::data_offset() - kHeapObjectTag);
1656 __ strb(R2, Address(R3, R1));
1657 __ Ret();
1658}
1659
1660void AsmIntrinsifier::WriteIntoTwoByteString(Assembler* assembler,
1661 Label* normal_ir_body) {
1662 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Value.
1663 __ ldr(R1, Address(SP, 1 * target::kWordSize)); // Index.
1664 __ ldr(R0, Address(SP, 2 * target::kWordSize)); // TwoByteString.
1665 // Untag index and multiply by element size -> no-op.
1666 __ SmiUntag(R2);
1667 __ AddImmediate(R3, R0,
1668 target::TwoByteString::data_offset() - kHeapObjectTag);
1669 __ strh(R2, Address(R3, R1));
1670 __ Ret();
1671}
1672
1673void AsmIntrinsifier::AllocateOneByteString(Assembler* assembler,
1674 Label* normal_ir_body) {
1675 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Length.
1676 Label ok;
1677 TryAllocateString(assembler, kOneByteStringCid,
1678 target::OneByteString::kMaxNewSpaceElements, &ok,
1679 normal_ir_body);
1680
1681 __ Bind(&ok);
1682 __ Ret();
1683
1684 __ Bind(normal_ir_body);
1685}
1686
1687void AsmIntrinsifier::AllocateTwoByteString(Assembler* assembler,
1688 Label* normal_ir_body) {
1689 __ ldr(R2, Address(SP, 0 * target::kWordSize)); // Length.
1690 Label ok;
1691 TryAllocateString(assembler, kTwoByteStringCid,
1692 target::TwoByteString::kMaxNewSpaceElements, &ok,
1693 normal_ir_body);
1694
1695 __ Bind(&ok);
1696 __ Ret();
1697
1698 __ Bind(normal_ir_body);
1699}
1700
1701void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
1702 Label* normal_ir_body) {
1703 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // This.
1704 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Other.
1705
1706 StringEquality(assembler, R0, R1, R2, R3, R0, normal_ir_body,
1707 kOneByteStringCid);
1708}
1709
1710void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
1711 Label* normal_ir_body) {
1712 __ ldr(R0, Address(SP, 1 * target::kWordSize)); // This.
1713 __ ldr(R1, Address(SP, 0 * target::kWordSize)); // Other.
1714
1715 StringEquality(assembler, R0, R1, R2, R3, R0, normal_ir_body,
1716 kTwoByteStringCid);
1717}
1718
1719void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
1720 Label* normal_ir_body,
1721 bool sticky) {
1722 if (FLAG_interpret_irregexp) return;
1723
1724 const intptr_t kRegExpParamOffset = 2 * target::kWordSize;
1725 const intptr_t kStringParamOffset = 1 * target::kWordSize;
1726 // start_index smi is located at offset 0.
1727
1728 // Incoming registers:
1729 // R0: Function. (Will be reloaded with the specialized matcher function.)
1730 // R4: Arguments descriptor. (Will be preserved.)
1731 // R9: Unknown. (Must be GC safe on tail call.)
1732
1733 // Load the specialized function pointer into R0. Leverage the fact the
1734 // string CIDs as well as stored function pointers are in sequence.
1735 __ ldr(R2, Address(SP, kRegExpParamOffset));
1736 __ ldr(R1, Address(SP, kStringParamOffset));
1737 __ LoadClassId(R1, R1);
1738 __ AddImmediate(R1, -kOneByteStringCid);
1739 __ add(R1, R2, Operand(R1, LSL, target::kWordSizeLog2));
1740 __ ldr(FUNCTION_REG, FieldAddress(R1, target::RegExp::function_offset(
1741 kOneByteStringCid, sticky)));
1742
1743 // Registers are now set up for the lazy compile stub. It expects the function
1744 // in R0, the argument descriptor in R4, and IC-Data in R9.
1745 __ eor(R9, R9, Operand(R9));
1746
1747 // Tail-call the function.
1748 __ ldr(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
1749 __ Branch(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
1750}
1751
1752void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
1753 Label* normal_ir_body) {
1754 __ LoadIsolate(R0);
1755 __ ldr(R0, Address(R0, target::Isolate::default_tag_offset()));
1756 __ Ret();
1757}
1758
1759void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
1760 Label* normal_ir_body) {
1761 __ LoadIsolate(R0);
1762 __ ldr(R0, Address(R0, target::Isolate::current_tag_offset()));
1763 __ Ret();
1764}
1765
1766void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
1767 Label* normal_ir_body) {
1768#if !defined(SUPPORT_TIMELINE)
1769 __ LoadObject(R0, CastHandle<Object>(FalseObject()));
1770 __ Ret();
1771#else
1772 // Load TimelineStream*.
1773 __ ldr(R0, Address(THR, target::Thread::dart_stream_offset()));
1774 // Load uintptr_t from TimelineStream*.
1775 __ ldr(R0, Address(R0, target::TimelineStream::enabled_offset()));
1776 __ cmp(R0, Operand(0));
1777 __ LoadObject(R0, CastHandle<Object>(TrueObject()), NE);
1778 __ LoadObject(R0, CastHandle<Object>(FalseObject()), EQ);
1779 __ Ret();
1780#endif
1781}
1782
1783void AsmIntrinsifier::Timeline_getNextTaskId(Assembler* assembler,
1784 Label* normal_ir_body) {
1785#if !defined(SUPPORT_TIMELINE)
1786 __ LoadImmediate(R0, target::ToRawSmi(0));
1787 __ Ret();
1788#else
1789 __ ldr(R1, Address(THR, target::Thread::next_task_id_offset()));
1790 __ ldr(R2, Address(THR, target::Thread::next_task_id_offset() + 4));
1791 __ SmiTag(R0, R1); // Ignore loss of precision.
1792 __ adds(R1, R1, Operand(1));
1793 __ adcs(R2, R2, Operand(0));
1794 __ str(R1, Address(THR, target::Thread::next_task_id_offset()));
1795 __ str(R2, Address(THR, target::Thread::next_task_id_offset() + 4));
1796 __ Ret();
1797#endif
1798}
1799
1800#undef __
1801
1802} // namespace compiler
1803} // namespace dart
1804
1805#endif // defined(TARGET_ARCH_ARM)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
static bool equal(const SkBitmap &a, const SkBitmap &b)
static bool ok(int result)
#define __
#define UNREACHABLE()
Definition assert.h:248
#define COMPILE_ASSERT(expr)
Definition assert.h:339
#define ASSERT(E)
static bool b
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
word ToRawSmi(const dart::Object &a)
const Bool & TrueObject()
const Bool & FalseObject()
const Object & NullObject()
const Class & DoubleClass()
const Class & MintClass()
const Register THR
static bool CompareIntegers(Token::Kind kind, const Integer &left, const Integer &right)
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
Definition hash.h:12
int32_t classid_t
Definition globals.h:524
@ kNumPredefinedCids
Definition class_id.h:257
@ kHeapObjectTag
uintptr_t uword
Definition globals.h:501
const Register CODE_REG
const Register TMP
const intptr_t cid
const Register FUNCTION_REG
const intptr_t kBytesPerBigIntDigit
Definition globals.h:54
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment
#define NOT_IN_PRODUCT(code)
Definition globals.h:84