Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
asm_intrinsifier_riscv.cc
Go to the documentation of this file.
1// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_RISCV.
6#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
10#include "vm/class_id.h"
13
14namespace dart {
15namespace compiler {
16
17// When entering intrinsics code:
18// PP: Caller's ObjectPool in JIT / global ObjectPool in AOT
19// CODE_REG: Callee's Code in JIT / not passed in AOT
20// S4: Arguments descriptor
21// RA: Return address
22// The S4 and CODE_REG registers can be destroyed only if there is no slow-path,
23// i.e. if the intrinsified method always executes a return.
24// The FP register should not be modified, because it is used by the profiler.
25// The PP and THR registers (see constants_riscv.h) must be preserved.
26
27#define __ assembler->
28
29// Loads args from stack into A0 and A1
30// Tests if they are smis, jumps to label not_smi if not.
31static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
32 __ lx(A0, Address(SP, +1 * target::kWordSize));
33 __ lx(A1, Address(SP, +0 * target::kWordSize));
34 __ or_(TMP, A0, A1);
35 __ BranchIfNotSmi(TMP, not_smi, Assembler::kNearJump);
36}
37
38void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
39 const Register left = A0;
40 const Register right = A1;
41 const Register result = A0;
42
43 TestBothArgumentsSmis(assembler, normal_ir_body);
44 __ CompareImmediate(right, target::ToRawSmi(target::kSmiBits),
46 __ BranchIf(CS, normal_ir_body, Assembler::kNearJump);
47
48 __ SmiUntag(right);
49 __ sll(TMP, left, right);
50 __ sra(TMP2, TMP, right);
51 __ bne(TMP2, left, normal_ir_body, Assembler::kNearJump);
52 __ mv(result, TMP);
53 __ ret();
54
55 __ Bind(normal_ir_body);
56}
57
58static void CompareIntegers(Assembler* assembler,
59 Label* normal_ir_body,
60 Condition true_condition) {
61 Label true_label;
62 TestBothArgumentsSmis(assembler, normal_ir_body);
63 __ CompareObjectRegisters(A0, A1);
64 __ BranchIf(true_condition, &true_label, Assembler::kNearJump);
65 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
66 __ ret();
67 __ Bind(&true_label);
68 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
69 __ ret();
70
71 __ Bind(normal_ir_body);
72}
73
74void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
75 Label* normal_ir_body) {
76 CompareIntegers(assembler, normal_ir_body, LT);
77}
78
79void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
80 Label* normal_ir_body) {
81 CompareIntegers(assembler, normal_ir_body, GT);
82}
83
84void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
85 Label* normal_ir_body) {
86 CompareIntegers(assembler, normal_ir_body, LE);
87}
88
89void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
90 Label* normal_ir_body) {
91 CompareIntegers(assembler, normal_ir_body, GE);
92}
93
94// This is called for Smi and Mint receivers. The right argument
95// can be Smi, Mint or double.
96void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
97 Label* normal_ir_body) {
98 Label true_label, check_for_mint;
99 // For integer receiver '===' check first.
100 __ lx(A0, Address(SP, 1 * target::kWordSize));
101 __ lx(A1, Address(SP, 0 * target::kWordSize));
102 __ CompareObjectRegisters(A0, A1);
103 __ BranchIf(EQ, &true_label, Assembler::kNearJump);
104
105 __ or_(TMP, A0, A1);
106 __ BranchIfNotSmi(TMP, &check_for_mint, Assembler::kNearJump);
107 // If R0 or R1 is not a smi do Mint checks.
108
109 // Both arguments are smi, '===' is good enough.
110 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
111 __ ret();
112 __ Bind(&true_label);
113 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
114 __ ret();
115
116 // At least one of the arguments was not Smi.
117 Label receiver_not_smi;
118 __ Bind(&check_for_mint);
119
120 __ BranchIfNotSmi(A0, &receiver_not_smi,
121 Assembler::kNearJump); // Check receiver.
122
123 // Left (receiver) is Smi, return false if right is not Double.
124 // Note that an instance of Mint never contains a value that can be
125 // represented by Smi.
126
127 __ CompareClassId(A1, kDoubleCid, TMP);
128 __ BranchIf(EQ, normal_ir_body, Assembler::kNearJump);
129 __ LoadObject(A0,
130 CastHandle<Object>(FalseObject())); // Smi == Mint -> false.
131 __ ret();
132
133 __ Bind(&receiver_not_smi);
134 // A0: receiver.
135
136 __ CompareClassId(A0, kMintCid, TMP);
137 __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
138 // Receiver is Mint, return false if right is Smi.
139 __ BranchIfNotSmi(A1, normal_ir_body, Assembler::kNearJump);
140 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
141 __ ret();
142 // TODO(srdjan): Implement Mint == Mint comparison.
143
144 __ Bind(normal_ir_body);
145}
146
147void AsmIntrinsifier::Integer_equal(Assembler* assembler,
148 Label* normal_ir_body) {
149 Integer_equalToInteger(assembler, normal_ir_body);
150}
151
152void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
153 Label* normal_ir_body) {
154 __ lx(A0, Address(SP, 0 * target::kWordSize));
155 __ SmiUntag(A0);
156
157 // XOR with sign bit to complement bits if value is negative.
158 __ srai(A1, A0, XLEN - 1);
159 __ xor_(A0, A0, A1);
160
161 __ CountLeadingZeroes(A0, A0);
162
163 __ li(TMP, XLEN);
164 __ sub(A0, TMP, A0);
165 __ SmiTag(A0);
166 __ ret();
167}
168
169void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
170 // static void _lsh(Uint32List src_digits, int src_used,
171 // int shift_amount,
172 // Uint32List result_digits)
173
174 Label loop, done;
175 __ lx(T0, Address(SP, 3 * target::kWordSize)); // src_digits
176 __ lx(T1, Address(SP, 2 * target::kWordSize)); // src_used
177 __ lx(T2, Address(SP, 1 * target::kWordSize)); // shift_amount
178 __ lx(T3, Address(SP, 0 * target::kWordSize)); // result_digits
179
180#if XLEN == 32
181 // 1 word = 1 digit
182 __ SmiUntag(T1);
183#else
184 // 1 word = 2 digits
185 __ addi(T1, T1, target::ToRawSmi(1)); // Round up to even
186 __ srai(T1, T1, kSmiTagSize + 1);
187#endif
188 __ SmiUntag(T2);
189
190 __ srai(T4, T2, target::kBitsPerWordLog2); // T4 = word shift
191 __ andi(T5, T2, target::kBitsPerWord - 1); // T5 = bit shift
192 __ li(T6, target::kBitsPerWord);
193 __ sub(T6, T6, T5); // T6 = carry bit shift
194
195 __ slli(TMP, T1, target::kWordSizeLog2);
196 __ add(T0, T0, TMP);
197 __ subi(T0, T0, target::kWordSize); // T0 = &src_digits[src_used - 1]
198
199 __ add(TMP, T1, T4);
200 __ slli(TMP, TMP, target::kWordSizeLog2);
201 __ add(T3, T3, TMP); // T3 = &dst_digits[src_used + word_shift]
202
203 __ li(T2, 0); // carry
204
205 __ Bind(&loop);
207 __ lx(TMP, FieldAddress(T0, target::TypedData::payload_offset()));
208 __ srl(TMP2, TMP, T6);
209 __ or_(TMP2, TMP2, T2);
210 __ sx(TMP2, FieldAddress(T3, target::TypedData::payload_offset()));
211 __ sll(T2, TMP, T5);
212 __ subi(T0, T0, target::kWordSize);
213 __ subi(T3, T3, target::kWordSize);
214 __ subi(T1, T1, 1);
215 __ j(&loop);
216
217 __ Bind(&done);
218 __ sx(T2, FieldAddress(T3, target::TypedData::payload_offset()));
219 __ LoadObject(A0, NullObject());
220 __ ret();
221}
222
223void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
224 // static void _rsh(Uint32List src_digits, int src_used,
225 // int shift_amount,
226 // Uint32List result_digits)
227
228 Label loop, done;
229 __ lx(T0, Address(SP, 3 * target::kWordSize)); // src_digits
230 __ lx(T1, Address(SP, 2 * target::kWordSize)); // src_used
231 __ lx(T2, Address(SP, 1 * target::kWordSize)); // shift_amount
232 __ lx(T3, Address(SP, 0 * target::kWordSize)); // result_digits
233
234#if XLEN == 32
235 // 1 word = 1 digit
236 __ SmiUntag(T1);
237#else
238 // 1 word = 2 digits
239 __ addi(T1, T1, target::ToRawSmi(1)); // Round up to even
240 __ srai(T1, T1, kSmiTagSize + 1);
241#endif
242 __ SmiUntag(T2);
243
244 __ srai(T4, T2, target::kBitsPerWordLog2); // T4 = word shift
245 __ andi(T5, T2, target::kBitsPerWord - 1); // T5 = bit shift
246 __ li(T6, target::kBitsPerWord);
247 __ sub(T6, T6, T5); // T6 = carry bit shift
248 __ sub(T1, T1, T4); // T1 = words to process
249
250 __ slli(TMP, T4, target::kWordSizeLog2);
251 __ add(T0, T0, TMP); // T0 = &src_digits[word_shift]
252
253 // T2 = carry
254 __ lx(T2, FieldAddress(T0, target::TypedData::payload_offset()));
255 __ srl(T2, T2, T5);
256 __ addi(T0, T0, target::kWordSize);
257 __ subi(T1, T1, 1);
258
259 __ Bind(&loop);
261 __ lx(TMP, FieldAddress(T0, target::TypedData::payload_offset()));
262 __ sll(TMP2, TMP, T6);
263 __ or_(TMP2, TMP2, T2);
264 __ sx(TMP2, FieldAddress(T3, target::TypedData::payload_offset()));
265 __ srl(T2, TMP, T5);
266 __ addi(T0, T0, target::kWordSize);
267 __ addi(T3, T3, target::kWordSize);
268 __ subi(T1, T1, 1);
269 __ j(&loop);
270
271 __ Bind(&done);
272 __ sx(T2, FieldAddress(T3, target::TypedData::payload_offset()));
273 __ LoadObject(A0, NullObject());
274 __ ret();
275}
276
277void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
278 Label* normal_ir_body) {
279 // static void _absAdd(Uint32List longer_digits, int longer_used,
280 // Uint32List shorter_digits, int shorter_used,
281 // Uint32List result_digits)
282
283 Label first_loop, second_loop, last_carry, done;
284 __ lx(T0, Address(SP, 4 * target::kWordSize)); // longer_digits
285 __ lx(T1, Address(SP, 3 * target::kWordSize)); // longer_used
286 __ lx(T2, Address(SP, 2 * target::kWordSize)); // shorter_digits
287 __ lx(T3, Address(SP, 1 * target::kWordSize)); // shorter_used
288 __ lx(T4, Address(SP, 0 * target::kWordSize)); // result_digits
289
290#if XLEN == 32
291 // 1 word = 1 digit
292 __ SmiUntag(T1);
293 __ SmiUntag(T3);
294#else
295 // 1 word = 2 digits
296 __ addi(T1, T1, target::ToRawSmi(1)); // Round up to even
297 __ srai(T1, T1, kSmiTagSize + 1);
298 __ addi(T3, T3, target::ToRawSmi(1)); // Round up to even
299 __ srai(T3, T3, kSmiTagSize + 1);
300#endif
301 __ li(T5, 0); // Carry
302
303 __ Bind(&first_loop);
304 __ beqz(T3, &second_loop);
305 __ lx(A0, FieldAddress(T0, target::TypedData::payload_offset()));
306 __ lx(A1, FieldAddress(T2, target::TypedData::payload_offset()));
307 __ add(A0, A0, A1);
308 __ sltu(TMP, A0, A1); // Carry
309 __ add(A0, A0, T5);
310 __ sltu(TMP2, A0, T5); // Carry
311 __ add(T5, TMP, TMP2);
312 __ sx(A0, FieldAddress(T4, target::TypedData::payload_offset()));
313 __ addi(T0, T0, target::kWordSize);
314 __ addi(T2, T2, target::kWordSize);
315 __ addi(T4, T4, target::kWordSize);
316 __ subi(T1, T1, 1);
317 __ subi(T3, T3, 1);
318 __ j(&first_loop);
319
320 __ Bind(&second_loop);
321 __ beqz(T1, &last_carry);
322 __ lx(A0, FieldAddress(T0, target::TypedData::payload_offset()));
323 __ add(TMP, A0, T5);
324 __ sltu(T5, TMP, A0); // Carry
325 __ sx(TMP, FieldAddress(T4, target::TypedData::payload_offset()));
326 __ addi(T0, T0, target::kWordSize);
327 __ addi(T4, T4, target::kWordSize);
328 __ subi(T1, T1, 1);
329 __ j(&second_loop);
330
331 __ Bind(&last_carry);
332 __ beqz(T5, &done);
333 __ sx(T5, FieldAddress(T4, target::TypedData::payload_offset()));
334
335 __ Bind(&done);
336 __ LoadObject(A0, NullObject());
337 __ ret();
338}
339
340void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
341 Label* normal_ir_body) {
342 // static void _absSub(Uint32List longer_digits, int longer_used,
343 // Uint32List shorter_digits, int shorter_used,
344 // Uint32List result_digits)
345 Label first_loop, second_loop, last_borrow, done;
346 __ lx(T0, Address(SP, 4 * target::kWordSize)); // longer_digits
347 __ lx(T1, Address(SP, 3 * target::kWordSize)); // longer_used
348 __ lx(T2, Address(SP, 2 * target::kWordSize)); // shorter_digits
349 __ lx(T3, Address(SP, 1 * target::kWordSize)); // shorter_used
350 __ lx(T4, Address(SP, 0 * target::kWordSize)); // result_digits
351
352#if XLEN == 32
353 // 1 word = 1 digit
354 __ SmiUntag(T1);
355 __ SmiUntag(T3);
356#else
357 // 1 word = 2 digits
358 __ addi(T1, T1, target::ToRawSmi(1)); // Round up to even
359 __ srai(T1, T1, kSmiTagSize + 1);
360 __ addi(T3, T3, target::ToRawSmi(1)); // Round up to even
361 __ srai(T3, T3, kSmiTagSize + 1);
362#endif
363 __ li(T5, 0); // Borrow
364
365 __ Bind(&first_loop);
366 __ beqz(T3, &second_loop);
367 __ lx(A0, FieldAddress(T0, target::TypedData::payload_offset()));
368 __ lx(A1, FieldAddress(T2, target::TypedData::payload_offset()));
369 __ sltu(TMP, A0, A1); // Borrow
370 __ sub(A0, A0, A1);
371 __ sltu(TMP2, A0, T5); // Borrow
372 __ sub(A0, A0, T5);
373 __ add(T5, TMP, TMP2);
374 __ sx(A0, FieldAddress(T4, target::TypedData::payload_offset()));
375 __ addi(T0, T0, target::kWordSize);
376 __ addi(T2, T2, target::kWordSize);
377 __ addi(T4, T4, target::kWordSize);
378 __ subi(T1, T1, 1);
379 __ subi(T3, T3, 1);
380 __ j(&first_loop);
381
382 __ Bind(&second_loop);
383 __ beqz(T1, &last_borrow);
384 __ lx(A0, FieldAddress(T0, target::TypedData::payload_offset()));
385 __ sltu(TMP, A0, T5); // Borrow
386 __ sub(A0, A0, T5);
387 __ mv(T5, TMP);
388 __ sx(A0, FieldAddress(T4, target::TypedData::payload_offset()));
389 __ addi(T0, T0, target::kWordSize);
390 __ addi(T4, T4, target::kWordSize);
391 __ subi(T1, T1, 1);
392 __ j(&second_loop);
393
394 __ Bind(&last_borrow);
395 __ beqz(T5, &done);
396 __ neg(T5, T5);
397 __ sx(T5, FieldAddress(T4, target::TypedData::payload_offset()));
398
399 __ Bind(&done);
400 __ LoadObject(A0, NullObject());
401 __ ret();
402}
403
404void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
405 Label* normal_ir_body) {
406 // Pseudo code:
407 // static int _mulAdd(Uint32List x_digits, int xi,
408 // Uint32List m_digits, int i,
409 // Uint32List a_digits, int j, int n) {
410 // uint64_t x = x_digits[xi >> 1 .. (xi >> 1) + 1]; // xi is Smi and even.
411 // if (x == 0 || n == 0) {
412 // return 2;
413 // }
414 // uint64_t* mip = &m_digits[i >> 1]; // i is Smi and even.
415 // uint64_t* ajp = &a_digits[j >> 1]; // j is Smi and even.
416 // uint64_t c = 0;
417 // SmiUntag(n); // n is Smi and even.
418 // n = (n + 1)/2; // Number of pairs to process.
419 // do {
420 // uint64_t mi = *mip++;
421 // uint64_t aj = *ajp;
422 // uint128_t t = x*mi + aj + c; // 64-bit * 64-bit -> 128-bit.
423 // *ajp++ = low64(t);
424 // c = high64(t);
425 // } while (--n > 0);
426 // while (c != 0) {
427 // uint128_t t = *ajp + c;
428 // *ajp++ = low64(t);
429 // c = high64(t); // c == 0 or 1.
430 // }
431 // return 2;
432 // }
433
434 Label done;
435 __ lx(T0, Address(SP, 6 * target::kWordSize)); // x_digits
436 __ lx(T1, Address(SP, 5 * target::kWordSize)); // xi
437 __ lx(T2, Address(SP, 4 * target::kWordSize)); // m_digits
438 __ lx(T3, Address(SP, 3 * target::kWordSize)); // i
439 __ lx(T4, Address(SP, 2 * target::kWordSize)); // a_digits
440 __ lx(T5, Address(SP, 1 * target::kWordSize)); // j
441 __ lx(T6, Address(SP, 0 * target::kWordSize)); // n
442
443 // R3 = x, no_op if x == 0
444 // T0 = xi as Smi, R1 = x_digits.
445 __ slli(T1, T1, 1);
446 __ add(T0, T0, T1);
447 __ lx(T0, FieldAddress(T0, target::TypedData::payload_offset()));
448 __ beqz(T0, &done);
449
450 // R6 = (SmiUntag(n) + 1)/2, no_op if n == 0
451#if XLEN == 32
452 // 1 word = 1 digit
453 __ SmiUntag(T6);
454#else
455 // 1 word = 2 digits
456 __ addi(T6, T6, target::ToRawSmi(1));
457 __ srai(T6, T6, 2);
458#endif
459 __ beqz(T6, &done);
460
461 // R4 = mip = &m_digits[i >> 1]
462 // R0 = i as Smi, R1 = m_digits.
463 __ slli(T3, T3, 1);
464 __ add(T2, T2, T3);
465
466 // R5 = ajp = &a_digits[j >> 1]
467 // R0 = j as Smi, R1 = a_digits.
468 __ slli(T5, T5, 1);
469 __ add(T4, T4, T5);
470
471 // T1 = c = 0
472 __ li(T1, 0);
473
474 Label muladd_loop;
475 __ Bind(&muladd_loop);
476 // x: T0
477 // mip: T2
478 // ajp: T4
479 // c: T1
480 // n: T6
481 // t: A7:A6 (not live at loop entry)
482
483 // uint64_t mi = *mip++
484 __ lx(A0, FieldAddress(T2, target::TypedData::payload_offset()));
485 __ addi(T2, T2, target::kWordSize);
486
487 // uint64_t aj = *ajp
488 __ lx(A1, FieldAddress(T4, target::TypedData::payload_offset()));
489
490 // uint128_t t = x*mi + aj + c
491 // Macro-op fusion: when both products are required, the recommended sequence
492 // is high first.
493 __ mulhu(A7, A0, T0); // A7 = high64(A0*T0), t = A7:A6 = x*mi.
494 __ mul(A6, A0, T0); // A6 = low64(A0*T0).
495
496 __ add(A6, A6, A1);
497 __ sltu(TMP, A6, A1); // Carry
498 __ add(A7, A7, TMP); // t += aj
499
500 __ add(A6, A6, T1);
501 __ sltu(TMP, A6, T1); // Carry
502 __ add(A7, A7, TMP); // t += c
503
504 __ mv(T1, A7); // c = high64(t)
505
506 // *ajp++ = low64(t) = R0
507 __ sx(A6, FieldAddress(T4, target::TypedData::payload_offset()));
508 __ addi(T4, T4, target::kWordSize);
509
510 // while (--n > 0)
511 __ subi(T6, T6, 1); // --n
512 __ bnez(T6, &muladd_loop);
513
514 __ beqz(T1, &done);
515
516 // *ajp++ += c
517 __ lx(A0, FieldAddress(T4, target::TypedData::payload_offset()));
518 __ add(A0, A0, T1);
519 __ sltu(T1, A0, T1); // Carry
520 __ sx(A0, FieldAddress(T4, target::TypedData::payload_offset()));
521 __ addi(T4, T4, target::kWordSize);
522 __ beqz(T1, &done);
523
524 Label propagate_carry_loop;
525 __ Bind(&propagate_carry_loop);
526 __ lx(A0, FieldAddress(T4, target::TypedData::payload_offset()));
527 __ add(A0, A0, T1);
528 __ sltu(T1, A0, T1); // Carry
529 __ sx(A0, FieldAddress(T4, target::TypedData::payload_offset()));
530 __ addi(T4, T4, target::kWordSize);
531 __ bnez(T1, &propagate_carry_loop);
532
533 __ Bind(&done);
534 // Result = One or two digits processed.
535 __ li(A0, target::ToRawSmi(target::kWordSize / kBytesPerBigIntDigit));
536 __ ret();
537}
538
539void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
540 Label* normal_ir_body) {
541 // Pseudo code:
542 // static int _sqrAdd(Uint32List x_digits, int i,
543 // Uint32List a_digits, int used) {
544 // uint64_t* xip = &x_digits[i >> 1]; // i is Smi and even.
545 // uint64_t x = *xip++;
546 // if (x == 0) return 2;
547 // uint64_t* ajp = &a_digits[i]; // j == 2*i, i is Smi.
548 // uint64_t aj = *ajp;
549 // uint128_t t = x*x + aj;
550 // *ajp++ = low64(t);
551 // uint128_t c = high64(t);
552 // int n = ((used - i + 2) >> 2) - 1; // used and i are Smi. n: num pairs.
553 // while (--n >= 0) {
554 // uint64_t xi = *xip++;
555 // uint64_t aj = *ajp;
556 // uint192_t t = 2*x*xi + aj + c; // 2-bit * 64-bit * 64-bit -> 129-bit.
557 // *ajp++ = low64(t);
558 // c = high128(t); // 65-bit.
559 // }
560 // uint64_t aj = *ajp;
561 // uint128_t t = aj + c; // 64-bit + 65-bit -> 66-bit.
562 // *ajp++ = low64(t);
563 // *ajp = high64(t);
564 // return 2;
565 // }
566
567 // T2 = xip = &x_digits[i >> 1]
568 // T0 = i as Smi, T1 = x_digits
569 __ lx(T0, Address(SP, 2 * target::kWordSize));
570 __ lx(T1, Address(SP, 3 * target::kWordSize));
571 __ slli(TMP, T0, 1);
572 __ add(T1, T1, TMP);
573 __ addi(T2, T1, target::TypedData::payload_offset() - kHeapObjectTag);
574
575 // T1 = x = *xip++, return if x == 0
576 Label x_zero;
577 __ lx(T1, Address(T2, 0));
578 __ addi(T2, T2, target::kWordSize);
579 __ beqz(T1, &x_zero);
580
581 // T3 = ajp = &a_digits[i]
582 __ lx(A1, Address(SP, 1 * target::kWordSize)); // a_digits
583 __ slli(TMP, T0, 2);
584 __ add(A1, A1, TMP); // j == 2*i, i is Smi.
585 __ addi(T3, A1, target::TypedData::payload_offset() - kHeapObjectTag);
586
587 // T4:A1 = t = x*x + *ajp
588 __ lx(A0, Address(T3, 0));
589 __ mul(A1, T1, T1); // A1 = low64(T1*T1).
590 __ mulhu(T4, T1, T1); // T4 = high64(T1*T1).
591 __ add(A1, A1, A0); // T4:A1 += *ajp.
592 __ sltu(TMP, A1, A0);
593 __ add(T4, T4, TMP); // T4 = low64(c) = high64(t).
594 __ li(T5, 0); // T5 = high64(c) = 0.
595
596 // *ajp++ = low64(t) = A1
597 __ sx(A1, Address(T3, 0));
598 __ addi(T3, T3, target::kWordSize);
599
600 __ lx(A0, Address(SP, 0 * target::kWordSize)); // used is Smi
601#if XLEN == 32
602 // int n = used - i - 2;
603 __ sub(T6, A0, T0);
604 __ SmiUntag(T6);
605 __ subi(T6, T6, 2);
606#else
607 // int n = (used - i + 1)/2 - 1
608 __ sub(T6, A0, T0);
609 __ addi(T6, T6, 2);
610 __ srai(T6, T6, 2);
611 __ subi(T6, T6, 2);
612#endif
613
614 Label loop, done;
615 __ bltz(T6, &done); // while (--n >= 0)
616
617 __ Bind(&loop);
618 // x: T1
619 // xip: T2
620 // ajp: T3
621 // c: T5:T4
622 // t: T0:A1:A0 (not live at loop entry)
623 // n: T6
624
625 // uint64_t xi = *xip++
626 __ lx(T0, Address(T2, 0));
627 __ addi(T2, T2, target::kWordSize);
628
629 // uint192_t t = T0:A1:A0 = 2*x*xi + aj + c
630 __ mul(A0, T0, T1); // A0 = low64(T0*T1) = low64(x*xi).
631 __ mulhu(A1, T0, T1); // A1 = high64(T0*T1) = high64(x*xi).
632
633 __ mv(TMP, A0);
634 __ add(A0, A0, A0);
635 __ sltu(TMP, A0, TMP);
636 __ mv(TMP2, A1);
637 __ add(A1, A1, A1);
638 __ sltu(TMP2, A1, TMP2);
639 __ add(A1, A1, TMP);
640 __ sltu(TMP, A1, TMP);
641 __ add(T0, TMP, TMP2); // T0:A1:A0 = A1:A0 + A1:A0 = 2*x*xi.
642
643 __ add(A0, A0, T4);
644 __ sltu(TMP, A0, T4);
645 __ add(A1, A1, T5);
646 __ sltu(TMP2, A1, T5);
647 __ add(A1, A1, TMP);
648 __ sltu(TMP, A1, TMP);
649 __ add(T0, T0, TMP);
650 __ add(T0, T0, TMP2); // T0:A1:A0 += c.
651
652 __ lx(T5, Address(T3, 0)); // T5 = aj = *ajp.
653 __ add(A0, A0, T5);
654 __ sltu(TMP, A0, T5);
655 __ add(T4, A1, TMP);
656 __ sltu(TMP, T4, A1);
657 __ add(T5, T0, TMP); // T5:T4:A0 = 2*x*xi + aj + c.
658
659 // *ajp++ = low64(t) = A0
660 __ sx(A0, Address(T3, 0));
661 __ addi(T3, T3, target::kWordSize);
662
663 // while (--n >= 0)
664 __ subi(T6, T6, 1); // --n
665 __ bgez(T6, &loop);
666
667 __ Bind(&done);
668 // uint64_t aj = *ajp
669 __ lx(A0, Address(T3, 0));
670
671 // uint128_t t = aj + c
672 __ add(T4, T4, A0);
673 __ sltu(TMP, T4, A0);
674 __ add(T5, T5, TMP);
675
676 // *ajp = low64(t) = T4
677 // *(ajp + 1) = high64(t) = T5
678 __ sx(T4, Address(T3, 0));
679 __ sx(T5, Address(T3, target::kWordSize));
680
681 __ Bind(&x_zero);
682 // Result = One or two digits processed.
683 __ li(A0, target::ToRawSmi(target::kWordSize / kBytesPerBigIntDigit));
684 __ ret();
685}
686
687void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
688 Label* normal_ir_body) {
689 // There is no 128-bit by 64-bit division instruction on arm64, so we use two
690 // 64-bit by 32-bit divisions and two 64-bit by 64-bit multiplications to
691 // adjust the two 32-bit digits of the estimated quotient.
692 //
693 // Pseudo code:
694 // static int _estQuotientDigit(Uint32List args, Uint32List digits, int i) {
695 // uint64_t yt = args[_YT_LO .. _YT]; // _YT_LO == 0, _YT == 1.
696 // uint64_t* dp = &digits[(i >> 1) - 1]; // i is Smi.
697 // uint64_t dh = dp[0]; // dh == digits[(i >> 1) - 1 .. i >> 1].
698 // uint64_t qd;
699 // if (dh == yt) {
700 // qd = (DIGIT_MASK << 32) | DIGIT_MASK;
701 // } else {
702 // dl = dp[-1]; // dl == digits[(i >> 1) - 3 .. (i >> 1) - 2].
703 // // We cannot calculate qd = dh:dl / yt, so ...
704 // uint64_t yth = yt >> 32;
705 // uint64_t qh = dh / yth;
706 // uint128_t ph:pl = yt*qh;
707 // uint64_t tl = (dh << 32)|(dl >> 32);
708 // uint64_t th = dh >> 32;
709 // while ((ph > th) || ((ph == th) && (pl > tl))) {
710 // if (pl < yt) --ph;
711 // pl -= yt;
712 // --qh;
713 // }
714 // qd = qh << 32;
715 // tl = (pl << 32);
716 // th = (ph << 32)|(pl >> 32);
717 // if (tl > dl) ++th;
718 // dl -= tl;
719 // dh -= th;
720 // uint64_t ql = ((dh << 32)|(dl >> 32)) / yth;
721 // ph:pl = yt*ql;
722 // while ((ph > dh) || ((ph == dh) && (pl > dl))) {
723 // if (pl < yt) --ph;
724 // pl -= yt;
725 // --ql;
726 // }
727 // qd |= ql;
728 // }
729 // args[_QD .. _QD_HI] = qd; // _QD == 2, _QD_HI == 3.
730 // return 2;
731 // }
732
733 __ lx(T4, Address(SP, 2 * target::kWordSize)); // args
734
735#if XLEN == 32
736 // ECX = yt = args[1]
737 __ lx(T3, FieldAddress(T4, target::TypedData::payload_offset() +
739#else
740 // T3 = yt = args[0..1]
741 __ lx(T3, FieldAddress(T4, target::TypedData::payload_offset()));
742#endif
743
744 __ lx(A0, Address(SP, 0 * target::kWordSize)); // A0 = i as Smi
745 __ lx(T1, Address(SP, 1 * target::kWordSize)); // T1 = digits
746 __ slli(TMP, A0, 1);
747 __ add(T1, T1, TMP);
748#if XLEN == 32
749 // EBX = dp = &digits[i >> 1]
750 __ lx(T2, FieldAddress(T1, target::TypedData::payload_offset()));
751#else
752 // T2 = dh = digits[(i >> 1) - 1 .. i >> 1]
753 __ lx(T2, FieldAddress(T1, target::TypedData::payload_offset() -
755#endif
756
757 // A0 = qd = (DIGIT_MASK << 32) | DIGIT_MASK = -1
758 __ li(A0, -1);
759
760 // Return qd if dh == yt
761 Label return_qd;
762 __ beq(T2, T3, &return_qd);
763
764#if XLEN == 32
765 // EAX = dl = dp[-1]
766 __ lx(T1, FieldAddress(T1, target::TypedData::payload_offset() -
768#else
769 // T1 = dl = digits[(i >> 1) - 3 .. (i >> 1) - 2]
770 __ lx(T1, FieldAddress(T1, target::TypedData::payload_offset() -
772#endif
773
774 // T5 = yth = yt >> 32
775 __ srli(T5, T3, target::kWordSize * 4);
776
777 // T6 = qh = dh / yth
778 __ divu(T6, T2, T5);
779
780 // A6:A1 = ph:pl = yt*qh
781 __ mulhu(A6, T3, T6);
782 __ mul(A1, T3, T6);
783
784 // A7 = tl = (dh << 32)|(dl >> 32)
785 __ slli(A7, T2, target::kWordSize * 4);
786 __ srli(TMP, T1, target::kWordSize * 4);
787 __ or_(A7, A7, TMP);
788
789 // S3 = th = dh >> 32
790 __ srli(S3, T2, target::kWordSize * 4);
791
792 // while ((ph > th) || ((ph == th) && (pl > tl)))
793 Label qh_adj_loop, qh_adj, qh_ok;
794 __ Bind(&qh_adj_loop);
795 __ bgtu(A6, S3, &qh_adj);
796 __ bne(A6, S3, &qh_ok);
797 __ bleu(A1, A7, &qh_ok);
798
799 __ Bind(&qh_adj);
800 // if (pl < yt) --ph
801 __ sltu(TMP, A1, T3);
802 __ sub(A6, A6, TMP);
803
804 // pl -= yt
805 __ sub(A1, A1, T3);
806
807 // --qh
808 __ subi(T6, T6, 1);
809
810 // Continue while loop.
811 __ j(&qh_adj_loop);
812
813 __ Bind(&qh_ok);
814 // A0 = qd = qh << 32
815 __ slli(A0, T6, target::kWordSize * 4);
816
817 // tl = (pl << 32)
818 __ slli(A7, A1, target::kWordSize * 4);
819
820 // th = (ph << 32)|(pl >> 32);
821 __ slli(S3, A6, target::kWordSize * 4);
822 __ srli(TMP, A1, target::kWordSize * 4);
823 __ or_(S3, S3, TMP);
824
825 // if (tl > dl) ++th
826 __ sltu(TMP, T1, A7);
827 __ add(S3, S3, TMP);
828
829 // dl -= tl
830 __ sub(T1, T1, A7);
831
832 // dh -= th
833 __ sub(T2, T2, S3);
834
835 // T6 = ql = ((dh << 32)|(dl >> 32)) / yth
836 __ slli(T6, T2, target::kWordSize * 4);
837 __ srli(TMP, T1, target::kWordSize * 4);
838 __ or_(T6, T6, TMP);
839 __ divu(T6, T6, T5);
840
841 // A6:A1 = ph:pl = yt*ql
842 __ mulhu(A6, T3, T6);
843 __ mul(A1, T3, T6);
844
845 // while ((ph > dh) || ((ph == dh) && (pl > dl))) {
846 Label ql_adj_loop, ql_adj, ql_ok;
847 __ Bind(&ql_adj_loop);
848 __ bgtu(A6, T2, &ql_adj);
849 __ bne(A6, T2, &ql_ok);
850 __ bleu(A1, T1, &ql_ok);
851
852 __ Bind(&ql_adj);
853 // if (pl < yt) --ph
854 __ sltu(TMP, A1, T3);
855 __ sub(A6, A6, TMP);
856
857 // pl -= yt
858 __ sub(A1, A1, T3);
859
860 // --ql
861 __ subi(T6, T6, 1);
862
863 // Continue while loop.
864 __ j(&ql_adj_loop);
865
866 __ Bind(&ql_ok);
867 // qd |= ql;
868 __ or_(A0, A0, T6);
869
870 __ Bind(&return_qd);
871 // args[2..3] = qd
872 __ sx(A0, FieldAddress(T4, target::TypedData::payload_offset() +
874
875 // Result = One or two digits processed.
876 __ li(A0, target::ToRawSmi(target::kWordSize / kBytesPerBigIntDigit));
877 __ ret();
878}
879
880void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
881 Label* normal_ir_body) {
882 // Pseudo code:
883 // static int _mulMod(Uint32List args, Uint32List digits, int i) {
884 // uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3.
885 // uint64_t d = digits[i >> 1 .. (i >> 1) + 1]; // i is Smi and even.
886 // uint128_t t = rho*d;
887 // args[_MU .. _MU_HI] = t mod DIGIT_BASE^2; // _MU == 4, _MU_HI == 5.
888 // return 2;
889 // }
890
891 __ lx(T0, Address(SP, 2 * target::kWordSize)); // args
892 __ lx(T1, Address(SP, 1 * target::kWordSize)); // digits
893 __ lx(T2, Address(SP, 0 * target::kWordSize)); // i as Smi
894
895 // T3 = rho = args[2..3]
896 __ lx(T3, FieldAddress(T0, target::TypedData::payload_offset() +
898
899 // T4 = digits[i >> 1 .. (i >> 1) + 1]
900 __ slli(T2, T2, 1);
901 __ add(T1, T1, T2);
902 __ lx(T4, FieldAddress(T1, target::TypedData::payload_offset()));
903
904 // T5 = rho*d mod DIGIT_BASE
905 __ mul(T5, T4, T3); // T5 = low64(T4*T3).
906
907 // args[4 .. 5] = T5
908 __ sx(T5, FieldAddress(T0, target::TypedData::payload_offset() +
910
911 // Result = One or two digits processed.
912 __ li(A0, target::ToRawSmi(target::kWordSize / kBytesPerBigIntDigit));
913 __ ret();
914}
915
916// FA0: left
917// FA1: right
918static void PrepareDoubleOp(Assembler* assembler, Label* normal_ir_body) {
919 Label double_op;
920 __ lx(A0, Address(SP, 1 * target::kWordSize)); // Left
921 __ lx(A1, Address(SP, 0 * target::kWordSize)); // Right
922
923 __ fld(FA0, FieldAddress(A0, target::Double::value_offset()));
924
925 __ SmiUntag(TMP, A1);
926#if XLEN == 32
927 __ fcvtdw(FA1, TMP);
928#else
929 __ fcvtdl(FA1, TMP);
930#endif
931 __ BranchIfSmi(A1, &double_op, Assembler::kNearJump);
932 __ CompareClassId(A1, kDoubleCid, TMP);
933 __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
934 __ fld(FA1, FieldAddress(A1, target::Double::value_offset()));
935
936 __ Bind(&double_op);
937}
938
939void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
940 Label* normal_ir_body) {
941 Label true_label;
942 PrepareDoubleOp(assembler, normal_ir_body);
943 __ fltd(TMP, FA1, FA0);
944 __ bnez(TMP, &true_label, Assembler::kNearJump);
945 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
946 __ ret();
947 __ Bind(&true_label);
948 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
949 __ ret();
950
951 __ Bind(normal_ir_body);
952}
953
954void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
955 Label* normal_ir_body) {
956 Label true_label;
957 PrepareDoubleOp(assembler, normal_ir_body);
958 __ fled(TMP, FA1, FA0);
959 __ bnez(TMP, &true_label, Assembler::kNearJump);
960 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
961 __ ret();
962 __ Bind(&true_label);
963 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
964 __ ret();
965
966 __ Bind(normal_ir_body);
967}
968
969void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
970 Label* normal_ir_body) {
971 Label true_label;
972 PrepareDoubleOp(assembler, normal_ir_body);
973 __ fltd(TMP, FA0, FA1);
974 __ bnez(TMP, &true_label, Assembler::kNearJump);
975 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
976 __ ret();
977 __ Bind(&true_label);
978 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
979 __ ret();
980
981 __ Bind(normal_ir_body);
982}
983
984void AsmIntrinsifier::Double_equal(Assembler* assembler,
985 Label* normal_ir_body) {
986 Label true_label;
987 PrepareDoubleOp(assembler, normal_ir_body);
988 __ feqd(TMP, FA0, FA1);
989 __ bnez(TMP, &true_label, Assembler::kNearJump);
990 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
991 __ ret();
992 __ Bind(&true_label);
993 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
994 __ ret();
995
996 __ Bind(normal_ir_body);
997}
998
999void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
1000 Label* normal_ir_body) {
1001 Label true_label;
1002 PrepareDoubleOp(assembler, normal_ir_body);
1003 __ fled(TMP, FA0, FA1);
1004 __ bnez(TMP, &true_label, Assembler::kNearJump);
1005 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
1006 __ ret();
1007 __ Bind(&true_label);
1008 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
1009 __ ret();
1010
1011 __ Bind(normal_ir_body);
1012}
1013
1014// Expects left argument to be double (receiver). Right argument is unknown.
1015// Both arguments are on stack.
1016static void DoubleArithmeticOperations(Assembler* assembler,
1017 Label* normal_ir_body,
1018 Token::Kind kind) {
1019 PrepareDoubleOp(assembler, normal_ir_body);
1020 switch (kind) {
1021 case Token::kADD:
1022 __ faddd(FA0, FA0, FA1);
1023 break;
1024 case Token::kSUB:
1025 __ fsubd(FA0, FA0, FA1);
1026 break;
1027 case Token::kMUL:
1028 __ fmuld(FA0, FA0, FA1);
1029 break;
1030 case Token::kDIV:
1031 __ fdivd(FA0, FA0, FA1);
1032 break;
1033 default:
1034 UNREACHABLE();
1035 }
1036 const Class& double_class = DoubleClass();
1037 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, A0, TMP);
1038 __ StoreDFieldToOffset(FA0, A0, target::Double::value_offset());
1039 __ ret();
1040
1041 __ Bind(normal_ir_body);
1042}
1043
1044void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
1045 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
1046}
1047
1048void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
1049 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
1050}
1051
1052void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
1053 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
1054}
1055
1056void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
1057 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
1058}
1059
1060// Left is double, right is integer (Mint or Smi)
1061void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
1062 Label* normal_ir_body) {
1063 // Only smis allowed.
1064 __ lx(A1, Address(SP, 0 * target::kWordSize));
1065 __ BranchIfNotSmi(A1, normal_ir_body, Assembler::kNearJump);
1066 // Is Smi.
1067 __ SmiUntag(A1);
1068#if XLEN == 32
1069 __ fcvtdw(FA1, A1);
1070#else
1071 __ fcvtdl(FA1, A1);
1072#endif
1073 __ lx(A0, Address(SP, 1 * target::kWordSize));
1074 __ LoadDFieldFromOffset(FA0, A0, target::Double::value_offset());
1075 __ fmuld(FA0, FA0, FA1);
1076 const Class& double_class = DoubleClass();
1077 __ TryAllocate(double_class, normal_ir_body, Assembler::kNearJump, A0, A1);
1078 __ StoreDFieldToOffset(FA0, A0, target::Double::value_offset());
1079 __ ret();
1080 __ Bind(normal_ir_body);
1081}
1082
1083void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
1084 Label* normal_ir_body) {
1085 __ lx(A0, Address(SP, 0 * target::kWordSize));
1086 __ BranchIfNotSmi(A0, normal_ir_body, Assembler::kNearJump);
1087 // Is Smi.
1088 __ SmiUntag(A0);
1089#if XLEN == 32
1090 __ fcvtdw(FA0, A0);
1091#else
1092 __ fcvtdl(FA0, A0);
1093#endif
1094 const Class& double_class = DoubleClass();
1095 __ TryAllocate(double_class, normal_ir_body, Assembler::kNearJump, A0, TMP);
1096 __ StoreDFieldToOffset(FA0, A0, target::Double::value_offset());
1097 __ ret();
1098 __ Bind(normal_ir_body);
1099}
1100
1101static void DoubleIsClass(Assembler* assembler, intx_t fclass) {
1102 Label true_label;
1103 __ lx(A0, Address(SP, 0 * target::kWordSize));
1104 __ LoadDFieldFromOffset(FA0, A0, target::Double::value_offset());
1105 __ fclassd(TMP, FA0);
1106 __ andi(TMP, TMP, fclass);
1107 __ bnez(TMP, &true_label, Assembler::kNearJump);
1108 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
1109 __ ret();
1110 __ Bind(&true_label);
1111 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
1112 __ ret();
1113}
1114
1115void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
1116 Label* normal_ir_body) {
1117 DoubleIsClass(assembler, kFClassSignallingNan | kFClassQuietNan);
1118}
1119
1120void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
1121 Label* normal_ir_body) {
1122 DoubleIsClass(assembler, kFClassNegInfinity | kFClassPosInfinity);
1123}
1124
1125void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
1126 Label* normal_ir_body) {
1127 DoubleIsClass(assembler, kFClassNegInfinity | kFClassNegNormal |
1129}
1130
1131void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
1132 Label* normal_ir_body) {
1133 Label true_label;
1134 __ lx(A0, Address(SP, 1 * target::kWordSize));
1135 __ lx(A1, Address(SP, 0 * target::kWordSize));
1136 __ beq(A0, A1, &true_label, Assembler::kNearJump);
1137 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
1138 __ ret();
1139 __ Bind(&true_label);
1140 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
1141 __ ret();
1142}
1143
1144static void JumpIfInteger(Assembler* assembler,
1145 Register cid,
1146 Register tmp,
1147 Label* target) {
1148 assembler->RangeCheck(cid, tmp, kSmiCid, kMintCid, Assembler::kIfInRange,
1149 target);
1150}
1151
1152static void JumpIfNotInteger(Assembler* assembler,
1153 Register cid,
1154 Register tmp,
1155 Label* target) {
1156 assembler->RangeCheck(cid, tmp, kSmiCid, kMintCid, Assembler::kIfNotInRange,
1157 target);
1158}
1159
1160static void JumpIfString(Assembler* assembler,
1161 Register cid,
1162 Register tmp,
1163 Label* target) {
1164 assembler->RangeCheck(cid, tmp, kOneByteStringCid, kTwoByteStringCid,
1165 Assembler::kIfInRange, target);
1166}
1167
1168static void JumpIfNotString(Assembler* assembler,
1169 Register cid,
1170 Register tmp,
1171 Label* target) {
1172 assembler->RangeCheck(cid, tmp, kOneByteStringCid, kTwoByteStringCid,
1173 Assembler::kIfNotInRange, target);
1174}
1175
1176static void JumpIfNotList(Assembler* assembler,
1177 Register cid,
1178 Register tmp,
1179 Label* target) {
1180 assembler->RangeCheck(cid, tmp, kArrayCid, kGrowableObjectArrayCid,
1181 Assembler::kIfNotInRange, target);
1182}
1183
1184static void JumpIfType(Assembler* assembler,
1185 Register cid,
1186 Register tmp,
1187 Label* target) {
1188 COMPILE_ASSERT((kFunctionTypeCid == kTypeCid + 1) &&
1189 (kRecordTypeCid == kTypeCid + 2));
1190 assembler->RangeCheck(cid, tmp, kTypeCid, kRecordTypeCid,
1191 Assembler::kIfInRange, target);
1192}
1193
1194static void JumpIfNotType(Assembler* assembler,
1195 Register cid,
1196 Register tmp,
1197 Label* target) {
1198 COMPILE_ASSERT((kFunctionTypeCid == kTypeCid + 1) &&
1199 (kRecordTypeCid == kTypeCid + 2));
1200 assembler->RangeCheck(cid, tmp, kTypeCid, kRecordTypeCid,
1201 Assembler::kIfNotInRange, target);
1202}
1203
1204// Return type quickly for simple types (not parameterized and not signature).
1205void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
1206 Label* normal_ir_body) {
1207 Label use_declaration_type, not_double, not_integer, not_string;
1208 __ lx(A0, Address(SP, 0 * target::kWordSize));
1209 __ LoadClassIdMayBeSmi(A1, A0);
1210
1211 __ CompareImmediate(A1, kClosureCid);
1212 __ BranchIf(EQ, normal_ir_body,
1213 Assembler::kNearJump); // Instance is a closure.
1214
1215 __ CompareImmediate(A1, kRecordCid);
1216 __ BranchIf(EQ, normal_ir_body,
1217 Assembler::kNearJump); // Instance is a record.
1218
1219 __ CompareImmediate(A1, kNumPredefinedCids);
1220 __ BranchIf(HI, &use_declaration_type, Assembler::kNearJump);
1221
1222 __ LoadIsolateGroup(A0);
1223 __ LoadFromOffset(A0, A0, target::IsolateGroup::object_store_offset());
1224
1225 __ CompareImmediate(A1, kDoubleCid);
1226 __ BranchIf(NE, &not_double, Assembler::kNearJump);
1227 __ LoadFromOffset(A0, A0, target::ObjectStore::double_type_offset());
1228 __ ret();
1229
1230 __ Bind(&not_double);
1231 JumpIfNotInteger(assembler, A1, TMP, &not_integer);
1232 __ LoadFromOffset(A0, A0, target::ObjectStore::int_type_offset());
1233 __ ret();
1234
1235 __ Bind(&not_integer);
1236 JumpIfNotString(assembler, A1, TMP, &not_string);
1237 __ LoadFromOffset(A0, A0, target::ObjectStore::string_type_offset());
1238 __ ret();
1239
1240 __ Bind(&not_string);
1241 JumpIfNotType(assembler, A1, TMP, &use_declaration_type);
1242 __ LoadFromOffset(A0, A0, target::ObjectStore::type_type_offset());
1243 __ ret();
1244
1245 __ Bind(&use_declaration_type);
1246 __ LoadClassById(T2, A1);
1247 __ lh(T3, FieldAddress(T2, target::Class::num_type_arguments_offset()));
1248 __ bnez(T3, normal_ir_body, Assembler::kNearJump);
1249
1250 __ LoadCompressed(A0,
1251 FieldAddress(T2, target::Class::declaration_type_offset()));
1252 __ beq(A0, NULL_REG, normal_ir_body, Assembler::kNearJump);
1253 __ ret();
1254
1255 __ Bind(normal_ir_body);
1256}
1257
1258// Compares cid1 and cid2 to see if they're syntactically equivalent. If this
1259// can be determined by this fast path, it jumps to either equal_* or not_equal.
1260// If classes are equivalent but may be generic, then jumps to
1261// equal_may_be_generic. Clobbers scratch.
1262static void EquivalentClassIds(Assembler* assembler,
1263 Label* normal_ir_body,
1264 Label* equal_may_be_generic,
1265 Label* equal_not_generic,
1266 Label* not_equal,
1267 Register cid1,
1268 Register cid2,
1269 Register scratch,
1270 bool testing_instance_cids) {
1271 Label not_integer, not_integer_or_string, not_integer_or_string_or_list;
1272
1273 // Check if left hand side is a closure. Closures are handled in the runtime.
1274 __ CompareImmediate(cid1, kClosureCid);
1275 __ BranchIf(EQ, normal_ir_body, Assembler::kNearJump);
1276
1277 // Check if left hand side is a record. Records are handled in the runtime.
1278 __ CompareImmediate(cid1, kRecordCid);
1279 __ BranchIf(EQ, normal_ir_body, Assembler::kNearJump);
1280
1281 // Check whether class ids match. If class ids don't match types may still be
1282 // considered equivalent (e.g. multiple string implementation classes map to a
1283 // single String type).
1284 __ beq(cid1, cid2, equal_may_be_generic);
1285
1286 // Class ids are different. Check if we are comparing two string types (with
1287 // different representations), two integer types, two list types or two type
1288 // types.
1289 __ CompareImmediate(cid1, kNumPredefinedCids);
1290 __ BranchIf(HI, not_equal);
1291
1292 // Check if both are integer types.
1293 JumpIfNotInteger(assembler, cid1, scratch, &not_integer);
1294
1295 // First type is an integer. Check if the second is an integer too.
1296 JumpIfInteger(assembler, cid2, scratch, equal_not_generic);
1297 // Integer types are only equivalent to other integer types.
1298 __ j(not_equal, Assembler::kNearJump);
1299
1300 __ Bind(&not_integer);
1301 // Check if both are String types.
1302 JumpIfNotString(assembler, cid1, scratch,
1303 testing_instance_cids ? &not_integer_or_string : not_equal);
1304
1305 // First type is String. Check if the second is a string too.
1306 JumpIfString(assembler, cid2, scratch, equal_not_generic);
1307 // String types are only equivalent to other String types.
1308 __ j(not_equal, Assembler::kNearJump);
1309
1310 if (testing_instance_cids) {
1311 __ Bind(&not_integer_or_string);
1312 // Check if both are List types.
1313 JumpIfNotList(assembler, cid1, scratch, &not_integer_or_string_or_list);
1314
1315 // First type is a List. Check if the second is a List too.
1316 JumpIfNotList(assembler, cid2, scratch, not_equal);
1317 ASSERT(compiler::target::Array::type_arguments_offset() ==
1318 compiler::target::GrowableObjectArray::type_arguments_offset());
1319 __ j(equal_may_be_generic, Assembler::kNearJump);
1320
1321 __ Bind(&not_integer_or_string_or_list);
1322 // Check if the first type is a Type. If it is not then types are not
1323 // equivalent because they have different class ids and they are not String
1324 // or integer or List or Type.
1325 JumpIfNotType(assembler, cid1, scratch, not_equal);
1326
1327 // First type is a Type. Check if the second is a Type too.
1328 JumpIfType(assembler, cid2, scratch, equal_not_generic);
1329 // Type types are only equivalent to other Type types.
1330 __ j(not_equal, Assembler::kNearJump);
1331 }
1332}
1333
1334void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
1335 Label* normal_ir_body) {
1336 __ lx(A0, Address(SP, 1 * target::kWordSize));
1337 __ lx(A1, Address(SP, 0 * target::kWordSize));
1338 __ LoadClassIdMayBeSmi(T2, A1);
1339 __ LoadClassIdMayBeSmi(A1, A0);
1340
1341 Label equal_may_be_generic, equal, not_equal;
1342 EquivalentClassIds(assembler, normal_ir_body, &equal_may_be_generic, &equal,
1343 &not_equal, A1, T2, TMP,
1344 /* testing_instance_cids = */ true);
1345
1346 __ Bind(&equal_may_be_generic);
1347 // Classes are equivalent and neither is a closure class.
1348 // Check if there are no type arguments. In this case we can return true.
1349 // Otherwise fall through into the runtime to handle comparison.
1350 __ LoadClassById(A0, A1);
1351 __ lw(T0,
1352 FieldAddress(
1353 A0,
1354 target::Class::host_type_arguments_field_offset_in_words_offset()));
1355 __ CompareImmediate(T0, target::Class::kNoTypeArguments);
1356 __ BranchIf(EQ, &equal, Assembler::kNearJump);
1357
1358 // Compare type arguments, host_type_arguments_field_offset_in_words in A0.
1359 __ lx(A0, Address(SP, 1 * target::kWordSize));
1360 __ lx(A1, Address(SP, 0 * target::kWordSize));
1361 __ slli(T0, T0, target::kCompressedWordSizeLog2);
1362 __ add(A0, A0, T0);
1363 __ add(A1, A1, T0);
1364 __ lx(A0, FieldAddress(A0, 0));
1365 __ lx(A1, FieldAddress(A1, 0));
1366 __ bne(A0, A1, normal_ir_body, Assembler::kNearJump);
1367 // Fall through to equal case if type arguments are equal.
1368
1369 __ Bind(&equal);
1370 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
1371 __ Ret();
1372
1373 __ Bind(&not_equal);
1374 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
1375 __ ret();
1376
1377 __ Bind(normal_ir_body);
1378}
1379
1380void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
1381 Label* normal_ir_body) {
1382 __ lx(A0, Address(SP, 0 * target::kWordSize));
1383
1384#if defined(HASH_IN_OBJECT_HEADER)
1385 // uint32_t field in header.
1386 __ lwu(A0, FieldAddress(A0, target::String::hash_offset()));
1387 __ SmiTag(A0);
1388#else
1389 // Smi field.
1390 __ lx(A0, FieldAddress(A0, target::String::hash_offset()));
1391#endif
1392 __ beqz(A0, normal_ir_body, Assembler::kNearJump);
1393 __ ret();
1394
1395 // Hash not yet computed.
1396 __ Bind(normal_ir_body);
1397}
1398
1399void AsmIntrinsifier::Type_equality(Assembler* assembler,
1400 Label* normal_ir_body) {
1401 Label equal, not_equal, equiv_cids_may_be_generic, equiv_cids, check_legacy;
1402
1403 __ lx(A0, Address(SP, 1 * target::kWordSize));
1404 __ lx(A1, Address(SP, 0 * target::kWordSize));
1405 __ beq(A1, A0, &equal);
1406
1407 // A1 might not be a Type object, so check that first (A0 should be though,
1408 // since this is a method on the Type class).
1409 __ LoadClassIdMayBeSmi(T3, A1);
1410 __ CompareImmediate(T3, kTypeCid);
1411 __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
1412
1413 // Check if types are syntactically equal.
1414 __ LoadTypeClassId(T3, A1);
1415 __ LoadTypeClassId(T4, A0);
1416 // We are not testing instance cids, but type class cids of Type instances.
1417 EquivalentClassIds(assembler, normal_ir_body, &equiv_cids_may_be_generic,
1418 &equiv_cids, &not_equal, T3, T4, TMP,
1419 /* testing_instance_cids = */ false);
1420
1421 __ Bind(&equiv_cids_may_be_generic);
1422 // Compare type arguments in Type instances.
1423 __ LoadCompressed(T3, FieldAddress(A1, target::Type::arguments_offset()));
1424 __ LoadCompressed(T4, FieldAddress(A0, target::Type::arguments_offset()));
1425 __ CompareObjectRegisters(T3, T4);
1426 __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
1427 // Fall through to check nullability if type arguments are equal.
1428
1429 // Check nullability.
1430 __ Bind(&equiv_cids);
1431 __ LoadAbstractTypeNullability(A0, A0);
1432 __ LoadAbstractTypeNullability(A1, A1);
1433 __ bne(A0, A1, &check_legacy);
1434 // Fall through to equal case if nullability is strictly equal.
1435
1436 __ Bind(&equal);
1437 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
1438 __ ret();
1439
1440 // At this point the nullabilities are different, so they can only be
1441 // syntactically equivalent if they're both either kNonNullable or kLegacy.
1442 // These are the two largest values of the enum, so we can just do a < check.
1443 ASSERT(target::Nullability::kNullable < target::Nullability::kNonNullable &&
1444 target::Nullability::kNonNullable < target::Nullability::kLegacy);
1445 __ Bind(&check_legacy);
1446 __ CompareImmediate(A1, target::Nullability::kNonNullable);
1447 __ BranchIf(LT, &not_equal);
1448 __ CompareImmediate(A0, target::Nullability::kNonNullable);
1449 __ BranchIf(GE, &equal);
1450
1451 __ Bind(&not_equal);
1452 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
1453 __ ret();
1454
1455 __ Bind(normal_ir_body);
1456}
1457
1458void AsmIntrinsifier::AbstractType_getHashCode(Assembler* assembler,
1459 Label* normal_ir_body) {
1460 __ lx(A0, Address(SP, 0 * target::kWordSize));
1461 __ LoadCompressed(A0, FieldAddress(A0, target::AbstractType::hash_offset()));
1462 __ beqz(A0, normal_ir_body, Assembler::kNearJump);
1463 __ ret();
1464 // Hash not yet computed.
1465 __ Bind(normal_ir_body);
1466}
1467
1468void AsmIntrinsifier::AbstractType_equality(Assembler* assembler,
1469 Label* normal_ir_body) {
1470 __ lx(A0, Address(SP, 1 * target::kWordSize));
1471 __ lx(A1, Address(SP, 0 * target::kWordSize));
1472 __ bne(A0, A1, normal_ir_body, Assembler::kNearJump);
1473
1474 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
1475 __ ret();
1476
1477 __ Bind(normal_ir_body);
1478}
1479
1480// Keep in sync with Instance::IdentityHashCode.
1481// Note int and double never reach here because they override _identityHashCode.
1482// Special cases are also not needed for null or bool because they were pre-set
1483// during VM isolate finalization.
1484void AsmIntrinsifier::Object_getHash(Assembler* assembler,
1485 Label* normal_ir_body) {
1486#if XLEN == 32
1487 UNREACHABLE();
1488#else
1489 Label not_yet_computed;
1490 __ lx(A0, Address(SP, 0 * target::kWordSize)); // Object.
1491 __ lwu(A0, FieldAddress(
1492 A0, target::Object::tags_offset() +
1493 target::UntaggedObject::kHashTagPos / kBitsPerByte));
1494 __ beqz(A0, &not_yet_computed);
1495 __ SmiTag(A0);
1496 __ ret();
1497
1498 __ Bind(&not_yet_computed);
1499 __ LoadFromOffset(A1, THR, target::Thread::random_offset());
1500 __ AndImmediate(T2, A1, 0xffffffff); // state_lo
1501 __ srli(T3, A1, 32); // state_hi
1502 __ LoadImmediate(A1, 0xffffda61); // A
1503 __ mul(A1, A1, T2);
1504 __ add(A1, A1, T3); // new_state = (A * state_lo) + state_hi
1505 __ StoreToOffset(A1, THR, target::Thread::random_offset());
1506 __ AndImmediate(A1, A1, 0x3fffffff);
1507 __ beqz(A1, &not_yet_computed);
1508
1509 __ lx(A0, Address(SP, 0 * target::kWordSize)); // Object
1510 __ subi(A0, A0, kHeapObjectTag);
1511 __ slli(T3, A1, target::UntaggedObject::kHashTagPos);
1512
1513 Label retry, already_set_in_r4;
1514 __ Bind(&retry);
1515 __ lr(T2, Address(A0, 0));
1516 __ srli(T4, T2, target::UntaggedObject::kHashTagPos);
1517 __ bnez(T4, &already_set_in_r4);
1518 __ or_(T2, T2, T3);
1519 __ sc(T4, T2, Address(A0, 0));
1520 __ bnez(T4, &retry);
1521 // Fall-through with A1 containing new hash value (untagged).
1522 __ SmiTag(A0, A1);
1523 __ ret();
1524 __ Bind(&already_set_in_r4);
1525 __ SmiTag(A0, T4);
1526 __ ret();
1527#endif
1528}
1529
1530void GenerateSubstringMatchesSpecialization(Assembler* assembler,
1531 intptr_t receiver_cid,
1532 intptr_t other_cid,
1533 Label* return_true,
1534 Label* return_false) {
1535 __ SmiUntag(T0);
1536 __ LoadCompressedSmi(
1537 T1, FieldAddress(A0, target::String::length_offset())); // this.length
1538 __ SmiUntag(T1);
1539 __ LoadCompressedSmi(
1540 T2, FieldAddress(A1, target::String::length_offset())); // other.length
1541 __ SmiUntag(T2);
1542
1543 // if (other.length == 0) return true;
1544 __ beqz(T2, return_true);
1545
1546 // if (start < 0) return false;
1547 __ bltz(T0, return_false);
1548
1549 // if (start + other.length > this.length) return false;
1550 __ add(T3, T0, T2);
1551 __ bgt(T3, T1, return_false);
1552
1553 if (receiver_cid == kOneByteStringCid) {
1554 __ add(A0, A0, T0);
1555 } else {
1556 ASSERT(receiver_cid == kTwoByteStringCid);
1557 __ add(A0, A0, T0);
1558 __ add(A0, A0, T0);
1559 }
1560
1561 // i = 0
1562 __ li(T3, 0);
1563
1564 // do
1565 Label loop;
1566 __ Bind(&loop);
1567
1568 // this.codeUnitAt(i + start)
1569 if (receiver_cid == kOneByteStringCid) {
1570 __ lbu(TMP, FieldAddress(A0, target::OneByteString::data_offset()));
1571 } else {
1572 __ lhu(TMP, FieldAddress(A0, target::TwoByteString::data_offset()));
1573 }
1574 // other.codeUnitAt(i)
1575 if (other_cid == kOneByteStringCid) {
1576 __ lbu(TMP2, FieldAddress(A1, target::OneByteString::data_offset()));
1577 } else {
1578 __ lhu(TMP2, FieldAddress(A1, target::TwoByteString::data_offset()));
1579 }
1580 __ bne(TMP, TMP2, return_false);
1581
1582 // i++, while (i < len)
1583 __ addi(T3, T3, 1);
1584 __ addi(A0, A0, receiver_cid == kOneByteStringCid ? 1 : 2);
1585 __ addi(A1, A1, other_cid == kOneByteStringCid ? 1 : 2);
1586 __ blt(T3, T2, &loop);
1587
1588 __ j(return_true);
1589}
1590
1591// bool _substringMatches(int start, String other)
1592// This intrinsic handles a OneByteString or TwoByteString receiver with a
1593// OneByteString other.
1594void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
1595 Label* normal_ir_body) {
1596 Label return_true, return_false, try_two_byte;
1597 __ lx(A0, Address(SP, 2 * target::kWordSize)); // this
1598 __ lx(T0, Address(SP, 1 * target::kWordSize)); // start
1599 __ lx(A1, Address(SP, 0 * target::kWordSize)); // other
1600
1601 __ BranchIfNotSmi(T0, normal_ir_body);
1602
1603 __ CompareClassId(A1, kOneByteStringCid, TMP);
1604 __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
1605
1606 __ CompareClassId(A0, kOneByteStringCid, TMP);
1607 __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
1608
1609 GenerateSubstringMatchesSpecialization(assembler, kOneByteStringCid,
1610 kOneByteStringCid, &return_true,
1611 &return_false);
1612
1613 __ Bind(&try_two_byte);
1614 __ CompareClassId(A0, kTwoByteStringCid, TMP);
1615 __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
1616
1617 GenerateSubstringMatchesSpecialization(assembler, kTwoByteStringCid,
1618 kOneByteStringCid, &return_true,
1619 &return_false);
1620
1621 __ Bind(&return_true);
1622 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
1623 __ ret();
1624
1625 __ Bind(&return_false);
1626 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
1627 __ ret();
1628
1629 __ Bind(normal_ir_body);
1630}
1631
1632void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
1633 Label* normal_ir_body) {
1634 Label try_two_byte_string;
1635
1636 __ lx(A1, Address(SP, 0 * target::kWordSize)); // Index.
1637 __ lx(A0, Address(SP, 1 * target::kWordSize)); // String.
1638 __ BranchIfNotSmi(A1, normal_ir_body,
1639 Assembler::kNearJump); // Index is not a Smi.
1640 // Range check.
1641 __ lx(TMP, FieldAddress(A0, target::String::length_offset()));
1642 __ bgeu(A1, TMP, normal_ir_body); // Runtime throws exception.
1643
1644 __ CompareClassId(A0, kOneByteStringCid, TMP);
1645 __ BranchIf(NE, &try_two_byte_string);
1646 __ SmiUntag(A1);
1647 __ add(A0, A0, A1);
1648 __ lbu(A1, FieldAddress(A0, target::OneByteString::data_offset()));
1649 __ CompareImmediate(A1, target::Symbols::kNumberOfOneCharCodeSymbols);
1650 __ BranchIf(GE, normal_ir_body, Assembler::kNearJump);
1651 __ lx(A0, Address(THR, target::Thread::predefined_symbols_address_offset()));
1652 __ slli(A1, A1, target::kWordSizeLog2);
1653 __ add(A0, A0, A1);
1654 __ lx(A0, Address(A0, target::Symbols::kNullCharCodeSymbolOffset *
1655 target::kWordSize));
1656 __ ret();
1657
1658 __ Bind(&try_two_byte_string);
1659 __ CompareClassId(A0, kTwoByteStringCid, TMP);
1660 __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
1661 ASSERT(kSmiTagShift == 1);
1662 __ add(A0, A0, A1);
1663 __ lhu(A1, FieldAddress(A0, target::TwoByteString::data_offset()));
1664 __ CompareImmediate(A1, target::Symbols::kNumberOfOneCharCodeSymbols);
1665 __ BranchIf(GE, normal_ir_body, Assembler::kNearJump);
1666 __ lx(A0, Address(THR, target::Thread::predefined_symbols_address_offset()));
1667 __ slli(A1, A1, target::kWordSizeLog2);
1668 __ add(A0, A0, A1);
1669 __ lx(A0, Address(A0, target::Symbols::kNullCharCodeSymbolOffset *
1670 target::kWordSize));
1671 __ ret();
1672
1673 __ Bind(normal_ir_body);
1674}
1675
1676void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
1677 Label* normal_ir_body) {
1678 Label is_true;
1679 __ lx(A0, Address(SP, 0 * target::kWordSize));
1680 __ lx(A0, FieldAddress(A0, target::String::length_offset()));
1681 __ beqz(A0, &is_true, Assembler::kNearJump);
1682 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
1683 __ ret();
1684 __ Bind(&is_true);
1685 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
1686 __ ret();
1687}
1688
1689void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
1690 Label* normal_ir_body) {
1691 Label compute_hash;
1692 __ lx(A1, Address(SP, 0 * target::kWordSize)); // OneByteString object.
1693#if defined(HASH_IN_OBJECT_HEADER)
1694 // uint32_t field in header.
1695 __ lwu(A0, FieldAddress(A1, target::String::hash_offset()));
1696 __ SmiTag(A0);
1697#else
1698 // Smi field.
1699 __ lx(A0, FieldAddress(A1, target::String::hash_offset()));
1700#endif
1701 __ beqz(A0, &compute_hash);
1702 __ ret(); // Return if already computed.
1703
1704 __ Bind(&compute_hash);
1705 __ lx(T0, FieldAddress(A1, target::String::length_offset()));
1706 __ SmiUntag(T0);
1707
1708 __ mv(T1, ZR);
1709 __ addi(T2, A1, target::OneByteString::data_offset() - kHeapObjectTag);
1710
1711 // A1: Instance of OneByteString.
1712 // T0: String length, untagged integer.
1713 // T1: Loop counter, untagged integer.
1714 // T2: String data.
1715 // A0: Hash code, untagged integer.
1716
1717 Label loop, done;
1718 __ Bind(&loop);
1719 __ beq(T1, T0, &done);
1720 // Add to hash code: (hash_ is uint32)
1721 // Get one characters (ch).
1722 __ lbu(T3, Address(T2, 0));
1723 __ addi(T2, T2, 1);
1724 // T3: ch.
1725 __ addi(T1, T1, 1);
1727 __ j(&loop);
1728
1729 __ Bind(&done);
1730 // Finalize. Allow a zero result to combine checks from empty string branch.
1731 __ FinalizeHashForSize(target::String::kHashBits, A0);
1732#if defined(HASH_IN_OBJECT_HEADER)
1733 // A1: Untagged address of header word (lr/sc do not support offsets).
1734 __ subi(A1, A1, kHeapObjectTag);
1735 __ slli(A0, A0, target::UntaggedObject::kHashTagPos);
1736 Label retry;
1737 __ Bind(&retry);
1738 __ lr(T0, Address(A1, 0));
1739 __ or_(T0, T0, A0);
1740 __ sc(TMP, T0, Address(A1, 0));
1741 __ bnez(TMP, &retry);
1742
1743 __ srli(A0, A0, target::UntaggedObject::kHashTagPos);
1744 __ SmiTag(A0);
1745#else
1746 __ SmiTag(A0);
1747 __ sx(A0, FieldAddress(A1, target::String::hash_offset()));
1748#endif
1749 __ ret();
1750}
1751
1752// Allocates a _OneByteString or _TwoByteString. The content is not initialized.
1753// 'length-reg' (A1) contains the desired length as a _Smi or _Mint.
1754// Returns new string as tagged pointer in A0.
1755static void TryAllocateString(Assembler* assembler,
1756 classid_t cid,
1757 intptr_t max_elements,
1758 Label* ok,
1759 Label* failure) {
1760 ASSERT(cid == kOneByteStringCid || cid == kTwoByteStringCid);
1761 const Register length_reg = A1;
1762 // _Mint length: call to runtime to produce error.
1763 __ BranchIfNotSmi(length_reg, failure);
1764 // negative length: call to runtime to produce error.
1765 // Too big: call to runtime to allocate old.
1766 __ CompareImmediate(length_reg, target::ToRawSmi(max_elements));
1767 __ BranchIf(UNSIGNED_GREATER, failure);
1768
1769 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, failure, TMP));
1770 __ mv(T0, length_reg); // Save the length register.
1771 if (cid == kOneByteStringCid) {
1772 // Untag length.
1773 __ SmiUntag(length_reg);
1774 } else {
1775 // Untag length and multiply by element size -> no-op.
1776 ASSERT(kSmiTagSize == 1);
1777 }
1778 const intptr_t fixed_size_plus_alignment_padding =
1779 target::String::InstanceSize() +
1781 __ addi(length_reg, length_reg, fixed_size_plus_alignment_padding);
1782 __ andi(length_reg, length_reg,
1784
1785 __ lx(A0, Address(THR, target::Thread::top_offset()));
1786
1787 // length_reg: allocation size.
1788 __ add(T1, A0, length_reg);
1789 __ bltu(T1, A0, failure); // Fail on unsigned overflow.
1790
1791 // Check if the allocation fits into the remaining space.
1792 // A0: potential new object start.
1793 // T1: potential next object start.
1794 // A1: allocation size.
1795 __ lx(TMP, Address(THR, target::Thread::end_offset()));
1796 __ bgtu(T1, TMP, failure);
1797 __ CheckAllocationCanary(A0);
1798
1799 // Successfully allocated the object(s), now update top to point to
1800 // next object start and initialize the object.
1801 __ sx(T1, Address(THR, target::Thread::top_offset()));
1802 __ AddImmediate(A0, kHeapObjectTag);
1803 // Clear last double word to ensure string comparison doesn't need to
1804 // specially handle remainder of strings with lengths not factors of double
1805 // offsets.
1806 __ sx(ZR, Address(T1, -1 * target::kWordSize));
1807 __ sx(ZR, Address(T1, -2 * target::kWordSize));
1808
1809 // Initialize the tags.
1810 // A0: new object start as a tagged pointer.
1811 // T1: new object end address.
1812 // A1: allocation size.
1813 {
1814 const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
1816
1817 __ CompareImmediate(A1, target::UntaggedObject::kSizeTagMaxSizeTag);
1818 Label dont_zero_tag;
1819 __ BranchIf(UNSIGNED_LESS_EQUAL, &dont_zero_tag);
1820 __ li(A1, 0);
1821 __ Bind(&dont_zero_tag);
1822 __ slli(A1, A1, shift);
1823
1824 // Get the class index and insert it into the tags.
1825 // A1: size and bit tags.
1826 // This also clears the hash, which is in the high word of the tags.
1827 const uword tags =
1828 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
1829 __ OrImmediate(A1, A1, tags);
1830 __ sx(A1, FieldAddress(A0, target::Object::tags_offset())); // Store tags.
1831 }
1832
1833 // Set the length field using the saved length (T0).
1834 __ StoreIntoObjectNoBarrier(
1835 A0, FieldAddress(A0, target::String::length_offset()), T0);
1836#if !defined(HASH_IN_OBJECT_HEADER)
1837 // Clear hash.
1838 __ StoreIntoObjectNoBarrier(
1839 A0, FieldAddress(A0, target::String::hash_offset()), ZR);
1840#endif
1841 __ j(ok);
1842}
1843
1844// Arg0: OneByteString (receiver).
1845// Arg1: Start index as Smi.
1846// Arg2: End index as Smi.
1847// The indexes must be valid.
1848void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
1849 Label* normal_ir_body) {
1850 const intptr_t kStringOffset = 2 * target::kWordSize;
1851 const intptr_t kStartIndexOffset = 1 * target::kWordSize;
1852 const intptr_t kEndIndexOffset = 0 * target::kWordSize;
1853 Label ok;
1854
1855 __ lx(T0, Address(SP, kEndIndexOffset));
1856 __ lx(TMP, Address(SP, kStartIndexOffset));
1857 __ or_(T1, T0, TMP);
1858 __ BranchIfNotSmi(T1, normal_ir_body); // 'start', 'end' not Smi.
1859
1860 __ sub(A1, T0, TMP);
1861 TryAllocateString(assembler, kOneByteStringCid,
1862 target::OneByteString::kMaxNewSpaceElements, &ok,
1863 normal_ir_body);
1864 __ Bind(&ok);
1865 // A0: new string as tagged pointer.
1866 // Copy string.
1867 __ lx(T1, Address(SP, kStringOffset));
1868 __ lx(T2, Address(SP, kStartIndexOffset));
1869 __ SmiUntag(T2);
1870 // Calculate start address.
1871 __ add(T1, T1, T2);
1872
1873 // T1: Start address to copy from.
1874 // T2: Untagged start index.
1875 __ lx(T0, Address(SP, kEndIndexOffset));
1876 __ SmiUntag(T0);
1877 __ sub(T0, T0, T2);
1878
1879 // T1: Start address to copy from (untagged).
1880 // T0: Untagged number of bytes to copy.
1881 // A0: Tagged result string.
1882 // T3: Pointer into T1.
1883 // T4: Pointer into A0.
1884 // T2: Scratch register.
1885 Label loop, done;
1886 __ blez(T0, &done, Assembler::kNearJump);
1887 __ mv(T3, T1);
1888 __ mv(T4, A0);
1889 __ Bind(&loop);
1890 __ subi(T0, T0, 1);
1891 __ lbu(T2, FieldAddress(T3, target::OneByteString::data_offset()));
1892 __ addi(T3, T3, 1);
1893 __ sb(T2, FieldAddress(T4, target::OneByteString::data_offset()));
1894 __ addi(T4, T4, 1);
1895 __ bgtz(T0, &loop);
1896
1897 __ Bind(&done);
1898 __ ret();
1899 __ Bind(normal_ir_body);
1900}
1901
1902void AsmIntrinsifier::WriteIntoOneByteString(Assembler* assembler,
1903 Label* normal_ir_body) {
1904 __ lx(A0, Address(SP, 2 * target::kWordSize)); // OneByteString.
1905 __ lx(A1, Address(SP, 1 * target::kWordSize)); // Index.
1906 __ lx(A2, Address(SP, 0 * target::kWordSize)); // Value.
1907 __ SmiUntag(A1);
1908 __ SmiUntag(A2);
1909 __ add(A1, A1, A0);
1910 __ sb(A2, FieldAddress(A1, target::OneByteString::data_offset()));
1911 __ ret();
1912}
1913
1914void AsmIntrinsifier::WriteIntoTwoByteString(Assembler* assembler,
1915 Label* normal_ir_body) {
1916 __ lx(A0, Address(SP, 2 * target::kWordSize)); // TwoByteString.
1917 __ lx(A1, Address(SP, 1 * target::kWordSize)); // Index.
1918 __ lx(A2, Address(SP, 0 * target::kWordSize)); // Value.
1919 // Untag index and multiply by element size -> no-op.
1920 __ SmiUntag(A2);
1921 __ add(A1, A1, A0);
1922 __ sh(A2, FieldAddress(A1, target::OneByteString::data_offset()));
1923 __ ret();
1924}
1925
1926void AsmIntrinsifier::AllocateOneByteString(Assembler* assembler,
1927 Label* normal_ir_body) {
1928 Label ok;
1929
1930 __ lx(A1, Address(SP, 0 * target::kWordSize)); // Length.
1931 TryAllocateString(assembler, kOneByteStringCid,
1932 target::OneByteString::kMaxNewSpaceElements, &ok,
1933 normal_ir_body);
1934
1935 __ Bind(&ok);
1936 __ ret();
1937
1938 __ Bind(normal_ir_body);
1939}
1940
1941void AsmIntrinsifier::AllocateTwoByteString(Assembler* assembler,
1942 Label* normal_ir_body) {
1943 Label ok;
1944
1945 __ lx(A1, Address(SP, 0 * target::kWordSize)); // Length.
1946 TryAllocateString(assembler, kTwoByteStringCid,
1947 target::TwoByteString::kMaxNewSpaceElements, &ok,
1948 normal_ir_body);
1949
1950 __ Bind(&ok);
1951 __ ret();
1952
1953 __ Bind(normal_ir_body);
1954}
1955
1956void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
1957 Label* normal_ir_body) {
1958 __ lx(A0, Address(SP, 1 * target::kWordSize)); // This.
1959 __ lx(A1, Address(SP, 0 * target::kWordSize)); // Other.
1960
1961 StringEquality(assembler, A0, A1, T2, TMP2, A0, normal_ir_body,
1962 kOneByteStringCid);
1963}
1964
1965void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
1966 Label* normal_ir_body) {
1967 __ lx(A0, Address(SP, 1 * target::kWordSize)); // This.
1968 __ lx(A1, Address(SP, 0 * target::kWordSize)); // Other.
1969
1970 StringEquality(assembler, A0, A1, T2, TMP2, A0, normal_ir_body,
1971 kTwoByteStringCid);
1972}
1973
1974void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
1975 Label* normal_ir_body,
1976 bool sticky) {
1977 if (FLAG_interpret_irregexp) return;
1978
1979 const intptr_t kRegExpParamOffset = 2 * target::kWordSize;
1980 const intptr_t kStringParamOffset = 1 * target::kWordSize;
1981 // start_index smi is located at offset 0.
1982
1983 // Incoming registers:
1984 // T0: Function. (Will be reloaded with the specialized matcher function.)
1985 // S4: Arguments descriptor. (Will be preserved.)
1986 // S5: Unknown. (Must be GC safe on tail call.)
1987
1988 // Load the specialized function pointer into T0. Leverage the fact the
1989 // string CIDs as well as stored function pointers are in sequence.
1990 __ lx(T2, Address(SP, kRegExpParamOffset));
1991 __ lx(T1, Address(SP, kStringParamOffset));
1992 __ LoadClassId(T1, T1);
1993 __ AddImmediate(T1, -kOneByteStringCid);
1994 __ slli(T1, T1, target::kWordSizeLog2);
1995 __ add(T1, T1, T2);
1996 __ lx(FUNCTION_REG, FieldAddress(T1, target::RegExp::function_offset(
1997 kOneByteStringCid, sticky)));
1998
1999 // Registers are now set up for the lazy compile stub. It expects the function
2000 // in T0, the argument descriptor in S4, and IC-Data in S5.
2001 __ li(S5, 0);
2002
2003 // Tail-call the function.
2004 __ lx(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
2005 __ lx(T1, FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
2006 __ jr(T1);
2007}
2008
2009void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
2010 Label* normal_ir_body) {
2011 __ LoadIsolate(A0);
2012 __ lx(A0, Address(A0, target::Isolate::default_tag_offset()));
2013 __ ret();
2014}
2015
2016void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
2017 Label* normal_ir_body) {
2018 __ LoadIsolate(A0);
2019 __ lx(A0, Address(A0, target::Isolate::current_tag_offset()));
2020 __ ret();
2021}
2022
2023void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
2024 Label* normal_ir_body) {
2025#if !defined(SUPPORT_TIMELINE)
2026 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
2027 __ ret();
2028#else
2029 Label true_label;
2030 // Load TimelineStream*.
2031 __ lx(A0, Address(THR, target::Thread::dart_stream_offset()));
2032 // Load uintptr_t from TimelineStream*.
2033 __ lx(A0, Address(A0, target::TimelineStream::enabled_offset()));
2034 __ bnez(A0, &true_label, Assembler::kNearJump);
2035 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
2036 __ ret();
2037 __ Bind(&true_label);
2038 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
2039 __ ret();
2040#endif
2041}
2042
2043void AsmIntrinsifier::Timeline_getNextTaskId(Assembler* assembler,
2044 Label* normal_ir_body) {
2045#if !defined(SUPPORT_TIMELINE)
2046 __ LoadImmediate(A0, target::ToRawSmi(0));
2047 __ ret();
2048#elif XLEN == 64
2049 __ ld(A0, Address(THR, target::Thread::next_task_id_offset()));
2050 __ addi(A1, A0, 1);
2051 __ sd(A1, Address(THR, target::Thread::next_task_id_offset()));
2052 __ SmiTag(A0); // Ignore loss of precision.
2053 __ ret();
2054#else
2055 __ lw(T0, Address(THR, target::Thread::next_task_id_offset()));
2056 __ lw(T1, Address(THR, target::Thread::next_task_id_offset() + 4));
2057 __ SmiTag(A0, T0); // Ignore loss of precision.
2058 __ addi(T2, T0, 1);
2059 __ sltu(T3, T2, T0); // Carry.
2060 __ add(T1, T1, T3);
2061 __ sw(T2, Address(THR, target::Thread::next_task_id_offset()));
2062 __ sw(T1, Address(THR, target::Thread::next_task_id_offset() + 4));
2063 __ ret();
2064#endif
2065}
2066
2067#undef __
2068
2069} // namespace compiler
2070} // namespace dart
2071
2072#endif // defined(TARGET_ARCH_RISCV)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
static bool equal(const SkBitmap &a, const SkBitmap &b)
static bool ok(int result)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
#define __
#define UNREACHABLE()
Definition assert.h:248
#define COMPILE_ASSERT(expr)
Definition assert.h:339
#define ASSERT(E)
GAsyncResult * result
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
word ToRawSmi(const dart::Object &a)
const Bool & TrueObject()
const Bool & FalseObject()
const Object & NullObject()
const Class & DoubleClass()
const Register THR
static bool CompareIntegers(Token::Kind kind, const Integer &left, const Integer &right)
const Register NULL_REG
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
Definition hash.h:12
int32_t classid_t
Definition globals.h:524
@ kNumPredefinedCids
Definition class_id.h:257
constexpr intptr_t kBitsPerByte
Definition globals.h:463
@ kHeapObjectTag
uintptr_t uword
Definition globals.h:501
const Register CODE_REG
@ UNSIGNED_GREATER
@ UNSIGNED_LESS_EQUAL
const Register TMP2
@ kFClassNegSubnormal
@ kFClassPosInfinity
@ kFClassQuietNan
@ kFClassSignallingNan
@ kFClassNegNormal
@ kFClassNegInfinity
const Register TMP
const intptr_t cid
const Register FUNCTION_REG
const intptr_t kBytesPerBigIntDigit
Definition globals.h:54
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment
#define NOT_IN_PRODUCT(code)
Definition globals.h:84