Flutter Engine
The Flutter Engine
asm_intrinsifier_riscv.cc
Go to the documentation of this file.
1// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_RISCV.
6#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
10#include "vm/class_id.h"
13
14namespace dart {
15namespace compiler {
16
17// When entering intrinsics code:
18// PP: Caller's ObjectPool in JIT / global ObjectPool in AOT
19// CODE_REG: Callee's Code in JIT / not passed in AOT
20// S4: Arguments descriptor
21// RA: Return address
22// The S4 and CODE_REG registers can be destroyed only if there is no slow-path,
23// i.e. if the intrinsified method always executes a return.
24// The FP register should not be modified, because it is used by the profiler.
25// The PP and THR registers (see constants_riscv.h) must be preserved.
26
27#define __ assembler->
28
29// Loads args from stack into A0 and A1
30// Tests if they are smis, jumps to label not_smi if not.
31static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
32 __ lx(A0, Address(SP, +1 * target::kWordSize));
33 __ lx(A1, Address(SP, +0 * target::kWordSize));
34 __ or_(TMP, A0, A1);
35 __ BranchIfNotSmi(TMP, not_smi, Assembler::kNearJump);
36}
37
38void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
39 const Register left = A0;
40 const Register right = A1;
41 const Register result = A0;
42
43 TestBothArgumentsSmis(assembler, normal_ir_body);
44 __ CompareImmediate(right, target::ToRawSmi(target::kSmiBits),
46 __ BranchIf(CS, normal_ir_body, Assembler::kNearJump);
47
48 __ SmiUntag(right);
49 __ sll(TMP, left, right);
50 __ sra(TMP2, TMP, right);
51 __ bne(TMP2, left, normal_ir_body, Assembler::kNearJump);
52 __ mv(result, TMP);
53 __ ret();
54
55 __ Bind(normal_ir_body);
56}
57
58static void CompareIntegers(Assembler* assembler,
59 Label* normal_ir_body,
60 Condition true_condition) {
61 Label true_label;
62 TestBothArgumentsSmis(assembler, normal_ir_body);
63 __ CompareObjectRegisters(A0, A1);
64 __ BranchIf(true_condition, &true_label, Assembler::kNearJump);
65 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
66 __ ret();
67 __ Bind(&true_label);
68 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
69 __ ret();
70
71 __ Bind(normal_ir_body);
72}
73
74void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
75 Label* normal_ir_body) {
76 CompareIntegers(assembler, normal_ir_body, LT);
77}
78
79void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
80 Label* normal_ir_body) {
81 CompareIntegers(assembler, normal_ir_body, GT);
82}
83
84void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
85 Label* normal_ir_body) {
86 CompareIntegers(assembler, normal_ir_body, LE);
87}
88
89void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
90 Label* normal_ir_body) {
91 CompareIntegers(assembler, normal_ir_body, GE);
92}
93
94// This is called for Smi and Mint receivers. The right argument
95// can be Smi, Mint or double.
96void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
97 Label* normal_ir_body) {
98 Label true_label, check_for_mint;
99 // For integer receiver '===' check first.
100 __ lx(A0, Address(SP, 1 * target::kWordSize));
101 __ lx(A1, Address(SP, 0 * target::kWordSize));
102 __ CompareObjectRegisters(A0, A1);
103 __ BranchIf(EQ, &true_label, Assembler::kNearJump);
104
105 __ or_(TMP, A0, A1);
106 __ BranchIfNotSmi(TMP, &check_for_mint, Assembler::kNearJump);
107 // If R0 or R1 is not a smi do Mint checks.
108
109 // Both arguments are smi, '===' is good enough.
110 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
111 __ ret();
112 __ Bind(&true_label);
113 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
114 __ ret();
115
116 // At least one of the arguments was not Smi.
117 Label receiver_not_smi;
118 __ Bind(&check_for_mint);
119
120 __ BranchIfNotSmi(A0, &receiver_not_smi,
121 Assembler::kNearJump); // Check receiver.
122
123 // Left (receiver) is Smi, return false if right is not Double.
124 // Note that an instance of Mint never contains a value that can be
125 // represented by Smi.
126
127 __ CompareClassId(A1, kDoubleCid, TMP);
128 __ BranchIf(EQ, normal_ir_body, Assembler::kNearJump);
129 __ LoadObject(A0,
130 CastHandle<Object>(FalseObject())); // Smi == Mint -> false.
131 __ ret();
132
133 __ Bind(&receiver_not_smi);
134 // A0: receiver.
135
136 __ CompareClassId(A0, kMintCid, TMP);
137 __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
138 // Receiver is Mint, return false if right is Smi.
139 __ BranchIfNotSmi(A1, normal_ir_body, Assembler::kNearJump);
140 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
141 __ ret();
142 // TODO(srdjan): Implement Mint == Mint comparison.
143
144 __ Bind(normal_ir_body);
145}
146
147void AsmIntrinsifier::Integer_equal(Assembler* assembler,
148 Label* normal_ir_body) {
149 Integer_equalToInteger(assembler, normal_ir_body);
150}
151
152void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
153 Label* normal_ir_body) {
154 __ lx(A0, Address(SP, 0 * target::kWordSize));
155 __ SmiUntag(A0);
156
157 // XOR with sign bit to complement bits if value is negative.
158 __ srai(A1, A0, XLEN - 1);
159 __ xor_(A0, A0, A1);
160
161 __ CountLeadingZeroes(A0, A0);
162
163 __ li(TMP, XLEN);
164 __ sub(A0, TMP, A0);
165 __ SmiTag(A0);
166 __ ret();
167}
168
169void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
170 // static void _lsh(Uint32List src_digits, int src_used,
171 // int shift_amount,
172 // Uint32List result_digits)
173
174 Label loop, done;
175 __ lx(T0, Address(SP, 3 * target::kWordSize)); // src_digits
176 __ lx(T1, Address(SP, 2 * target::kWordSize)); // src_used
177 __ lx(T2, Address(SP, 1 * target::kWordSize)); // shift_amount
178 __ lx(T3, Address(SP, 0 * target::kWordSize)); // result_digits
179
180#if XLEN == 32
181 // 1 word = 1 digit
182 __ SmiUntag(T1);
183#else
184 // 1 word = 2 digits
185 __ addi(T1, T1, target::ToRawSmi(1)); // Round up to even
186 __ srai(T1, T1, kSmiTagSize + 1);
187#endif
188 __ SmiUntag(T2);
189
190 __ srai(T4, T2, target::kBitsPerWordLog2); // T4 = word shift
191 __ andi(T5, T2, target::kBitsPerWord - 1); // T5 = bit shift
193 __ sub(T6, T6, T5); // T6 = carry bit shift
194
196 __ add(T0, T0, TMP);
197 __ subi(T0, T0, target::kWordSize); // T0 = &src_digits[src_used - 1]
198
199 __ add(TMP, T1, T4);
201 __ add(T3, T3, TMP); // T3 = &dst_digits[src_used + word_shift]
202
203 __ li(T2, 0); // carry
204
205 __ Bind(&loop);
207 __ lx(TMP, FieldAddress(T0, target::TypedData::payload_offset()));
208 __ srl(TMP2, TMP, T6);
209 __ or_(TMP2, TMP2, T2);
210 __ sx(TMP2, FieldAddress(T3, target::TypedData::payload_offset()));
211 __ sll(T2, TMP, T5);
212 __ subi(T0, T0, target::kWordSize);
213 __ subi(T3, T3, target::kWordSize);
214 __ subi(T1, T1, 1);
215 __ j(&loop);
216
217 __ Bind(&done);
218 __ sx(T2, FieldAddress(T3, target::TypedData::payload_offset()));
219 __ LoadObject(A0, NullObject());
220 __ ret();
221}
222
223void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
224 // static void _rsh(Uint32List src_digits, int src_used,
225 // int shift_amount,
226 // Uint32List result_digits)
227
228 Label loop, done;
229 __ lx(T0, Address(SP, 3 * target::kWordSize)); // src_digits
230 __ lx(T1, Address(SP, 2 * target::kWordSize)); // src_used
231 __ lx(T2, Address(SP, 1 * target::kWordSize)); // shift_amount
232 __ lx(T3, Address(SP, 0 * target::kWordSize)); // result_digits
233
234#if XLEN == 32
235 // 1 word = 1 digit
236 __ SmiUntag(T1);
237#else
238 // 1 word = 2 digits
239 __ addi(T1, T1, target::ToRawSmi(1)); // Round up to even
240 __ srai(T1, T1, kSmiTagSize + 1);
241#endif
242 __ SmiUntag(T2);
243
244 __ srai(T4, T2, target::kBitsPerWordLog2); // T4 = word shift
245 __ andi(T5, T2, target::kBitsPerWord - 1); // T5 = bit shift
247 __ sub(T6, T6, T5); // T6 = carry bit shift
248 __ sub(T1, T1, T4); // T1 = words to process
249
251 __ add(T0, T0, TMP); // T0 = &src_digits[word_shift]
252
253 // T2 = carry
254 __ lx(T2, FieldAddress(T0, target::TypedData::payload_offset()));
255 __ srl(T2, T2, T5);
256 __ addi(T0, T0, target::kWordSize);
257 __ subi(T1, T1, 1);
258
259 __ Bind(&loop);
261 __ lx(TMP, FieldAddress(T0, target::TypedData::payload_offset()));
262 __ sll(TMP2, TMP, T6);
263 __ or_(TMP2, TMP2, T2);
264 __ sx(TMP2, FieldAddress(T3, target::TypedData::payload_offset()));
265 __ srl(T2, TMP, T5);
266 __ addi(T0, T0, target::kWordSize);
267 __ addi(T3, T3, target::kWordSize);
268 __ subi(T1, T1, 1);
269 __ j(&loop);
270
271 __ Bind(&done);
272 __ sx(T2, FieldAddress(T3, target::TypedData::payload_offset()));
273 __ LoadObject(A0, NullObject());
274 __ ret();
275}
276
277void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
278 Label* normal_ir_body) {
279 // static void _absAdd(Uint32List longer_digits, int longer_used,
280 // Uint32List shorter_digits, int shorter_used,
281 // Uint32List result_digits)
282
283 Label first_loop, second_loop, last_carry, done;
284 __ lx(T0, Address(SP, 4 * target::kWordSize)); // longer_digits
285 __ lx(T1, Address(SP, 3 * target::kWordSize)); // longer_used
286 __ lx(T2, Address(SP, 2 * target::kWordSize)); // shorter_digits
287 __ lx(T3, Address(SP, 1 * target::kWordSize)); // shorter_used
288 __ lx(T4, Address(SP, 0 * target::kWordSize)); // result_digits
289
290#if XLEN == 32
291 // 1 word = 1 digit
292 __ SmiUntag(T1);
293 __ SmiUntag(T3);
294#else
295 // 1 word = 2 digits
296 __ addi(T1, T1, target::ToRawSmi(1)); // Round up to even
297 __ srai(T1, T1, kSmiTagSize + 1);
298 __ addi(T3, T3, target::ToRawSmi(1)); // Round up to even
299 __ srai(T3, T3, kSmiTagSize + 1);
300#endif
301 __ li(T5, 0); // Carry
302
303 __ Bind(&first_loop);
304 __ beqz(T3, &second_loop);
305 __ lx(A0, FieldAddress(T0, target::TypedData::payload_offset()));
306 __ lx(A1, FieldAddress(T2, target::TypedData::payload_offset()));
307 __ add(A0, A0, A1);
308 __ sltu(TMP, A0, A1); // Carry
309 __ add(A0, A0, T5);
310 __ sltu(TMP2, A0, T5); // Carry
311 __ add(T5, TMP, TMP2);
312 __ sx(A0, FieldAddress(T4, target::TypedData::payload_offset()));
313 __ addi(T0, T0, target::kWordSize);
314 __ addi(T2, T2, target::kWordSize);
315 __ addi(T4, T4, target::kWordSize);
316 __ subi(T1, T1, 1);
317 __ subi(T3, T3, 1);
318 __ j(&first_loop);
319
320 __ Bind(&second_loop);
321 __ beqz(T1, &last_carry);
322 __ lx(A0, FieldAddress(T0, target::TypedData::payload_offset()));
323 __ add(TMP, A0, T5);
324 __ sltu(T5, TMP, A0); // Carry
325 __ sx(TMP, FieldAddress(T4, target::TypedData::payload_offset()));
326 __ addi(T0, T0, target::kWordSize);
327 __ addi(T4, T4, target::kWordSize);
328 __ subi(T1, T1, 1);
329 __ j(&second_loop);
330
331 __ Bind(&last_carry);
332 __ beqz(T5, &done);
333 __ sx(T5, FieldAddress(T4, target::TypedData::payload_offset()));
334
335 __ Bind(&done);
336 __ LoadObject(A0, NullObject());
337 __ ret();
338}
339
340void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
341 Label* normal_ir_body) {
342 // static void _absSub(Uint32List longer_digits, int longer_used,
343 // Uint32List shorter_digits, int shorter_used,
344 // Uint32List result_digits)
345 Label first_loop, second_loop, last_borrow, done;
346 __ lx(T0, Address(SP, 4 * target::kWordSize)); // longer_digits
347 __ lx(T1, Address(SP, 3 * target::kWordSize)); // longer_used
348 __ lx(T2, Address(SP, 2 * target::kWordSize)); // shorter_digits
349 __ lx(T3, Address(SP, 1 * target::kWordSize)); // shorter_used
350 __ lx(T4, Address(SP, 0 * target::kWordSize)); // result_digits
351
352#if XLEN == 32
353 // 1 word = 1 digit
354 __ SmiUntag(T1);
355 __ SmiUntag(T3);
356#else
357 // 1 word = 2 digits
358 __ addi(T1, T1, target::ToRawSmi(1)); // Round up to even
359 __ srai(T1, T1, kSmiTagSize + 1);
360 __ addi(T3, T3, target::ToRawSmi(1)); // Round up to even
361 __ srai(T3, T3, kSmiTagSize + 1);
362#endif
363 __ li(T5, 0); // Borrow
364
365 __ Bind(&first_loop);
366 __ beqz(T3, &second_loop);
367 __ lx(A0, FieldAddress(T0, target::TypedData::payload_offset()));
368 __ lx(A1, FieldAddress(T2, target::TypedData::payload_offset()));
369 __ sltu(TMP, A0, A1); // Borrow
370 __ sub(A0, A0, A1);
371 __ sltu(TMP2, A0, T5); // Borrow
372 __ sub(A0, A0, T5);
373 __ add(T5, TMP, TMP2);
374 __ sx(A0, FieldAddress(T4, target::TypedData::payload_offset()));
375 __ addi(T0, T0, target::kWordSize);
376 __ addi(T2, T2, target::kWordSize);
377 __ addi(T4, T4, target::kWordSize);
378 __ subi(T1, T1, 1);
379 __ subi(T3, T3, 1);
380 __ j(&first_loop);
381
382 __ Bind(&second_loop);
383 __ beqz(T1, &last_borrow);
384 __ lx(A0, FieldAddress(T0, target::TypedData::payload_offset()));
385 __ sltu(TMP, A0, T5); // Borrow
386 __ sub(A0, A0, T5);
387 __ mv(T5, TMP);
388 __ sx(A0, FieldAddress(T4, target::TypedData::payload_offset()));
389 __ addi(T0, T0, target::kWordSize);
390 __ addi(T4, T4, target::kWordSize);
391 __ subi(T1, T1, 1);
392 __ j(&second_loop);
393
394 __ Bind(&last_borrow);
395 __ beqz(T5, &done);
396 __ neg(T5, T5);
397 __ sx(T5, FieldAddress(T4, target::TypedData::payload_offset()));
398
399 __ Bind(&done);
400 __ LoadObject(A0, NullObject());
401 __ ret();
402}
403
404void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
405 Label* normal_ir_body) {
406 // Pseudo code:
407 // static int _mulAdd(Uint32List x_digits, int xi,
408 // Uint32List m_digits, int i,
409 // Uint32List a_digits, int j, int n) {
410 // uint64_t x = x_digits[xi >> 1 .. (xi >> 1) + 1]; // xi is Smi and even.
411 // if (x == 0 || n == 0) {
412 // return 2;
413 // }
414 // uint64_t* mip = &m_digits[i >> 1]; // i is Smi and even.
415 // uint64_t* ajp = &a_digits[j >> 1]; // j is Smi and even.
416 // uint64_t c = 0;
417 // SmiUntag(n); // n is Smi and even.
418 // n = (n + 1)/2; // Number of pairs to process.
419 // do {
420 // uint64_t mi = *mip++;
421 // uint64_t aj = *ajp;
422 // uint128_t t = x*mi + aj + c; // 64-bit * 64-bit -> 128-bit.
423 // *ajp++ = low64(t);
424 // c = high64(t);
425 // } while (--n > 0);
426 // while (c != 0) {
427 // uint128_t t = *ajp + c;
428 // *ajp++ = low64(t);
429 // c = high64(t); // c == 0 or 1.
430 // }
431 // return 2;
432 // }
433
434 Label done;
435 __ lx(T0, Address(SP, 6 * target::kWordSize)); // x_digits
436 __ lx(T1, Address(SP, 5 * target::kWordSize)); // xi
437 __ lx(T2, Address(SP, 4 * target::kWordSize)); // m_digits
438 __ lx(T3, Address(SP, 3 * target::kWordSize)); // i
439 __ lx(T4, Address(SP, 2 * target::kWordSize)); // a_digits
440 __ lx(T5, Address(SP, 1 * target::kWordSize)); // j
441 __ lx(T6, Address(SP, 0 * target::kWordSize)); // n
442
443 // R3 = x, no_op if x == 0
444 // T0 = xi as Smi, R1 = x_digits.
445 __ slli(T1, T1, 1);
446 __ add(T0, T0, T1);
447 __ lx(T0, FieldAddress(T0, target::TypedData::payload_offset()));
448 __ beqz(T0, &done);
449
450 // R6 = (SmiUntag(n) + 1)/2, no_op if n == 0
451#if XLEN == 32
452 // 1 word = 1 digit
453 __ SmiUntag(T6);
454#else
455 // 1 word = 2 digits
456 __ addi(T6, T6, target::ToRawSmi(1));
457 __ srai(T6, T6, 2);
458#endif
459 __ beqz(T6, &done);
460
461 // R4 = mip = &m_digits[i >> 1]
462 // R0 = i as Smi, R1 = m_digits.
463 __ slli(T3, T3, 1);
464 __ add(T2, T2, T3);
465
466 // R5 = ajp = &a_digits[j >> 1]
467 // R0 = j as Smi, R1 = a_digits.
468 __ slli(T5, T5, 1);
469 __ add(T4, T4, T5);
470
471 // T1 = c = 0
472 __ li(T1, 0);
473
474 Label muladd_loop;
475 __ Bind(&muladd_loop);
476 // x: T0
477 // mip: T2
478 // ajp: T4
479 // c: T1
480 // n: T6
481 // t: A7:A6 (not live at loop entry)
482
483 // uint64_t mi = *mip++
484 __ lx(A0, FieldAddress(T2, target::TypedData::payload_offset()));
485 __ addi(T2, T2, target::kWordSize);
486
487 // uint64_t aj = *ajp
488 __ lx(A1, FieldAddress(T4, target::TypedData::payload_offset()));
489
490 // uint128_t t = x*mi + aj + c
491 // Macro-op fusion: when both products are required, the recommended sequence
492 // is high first.
493 __ mulhu(A7, A0, T0); // A7 = high64(A0*T0), t = A7:A6 = x*mi.
494 __ mul(A6, A0, T0); // A6 = low64(A0*T0).
495
496 __ add(A6, A6, A1);
497 __ sltu(TMP, A6, A1); // Carry
498 __ add(A7, A7, TMP); // t += aj
499
500 __ add(A6, A6, T1);
501 __ sltu(TMP, A6, T1); // Carry
502 __ add(A7, A7, TMP); // t += c
503
504 __ mv(T1, A7); // c = high64(t)
505
506 // *ajp++ = low64(t) = R0
507 __ sx(A6, FieldAddress(T4, target::TypedData::payload_offset()));
508 __ addi(T4, T4, target::kWordSize);
509
510 // while (--n > 0)
511 __ subi(T6, T6, 1); // --n
512 __ bnez(T6, &muladd_loop);
513
514 __ beqz(T1, &done);
515
516 // *ajp++ += c
517 __ lx(A0, FieldAddress(T4, target::TypedData::payload_offset()));
518 __ add(A0, A0, T1);
519 __ sltu(T1, A0, T1); // Carry
520 __ sx(A0, FieldAddress(T4, target::TypedData::payload_offset()));
521 __ addi(T4, T4, target::kWordSize);
522 __ beqz(T1, &done);
523
524 Label propagate_carry_loop;
525 __ Bind(&propagate_carry_loop);
526 __ lx(A0, FieldAddress(T4, target::TypedData::payload_offset()));
527 __ add(A0, A0, T1);
528 __ sltu(T1, A0, T1); // Carry
529 __ sx(A0, FieldAddress(T4, target::TypedData::payload_offset()));
530 __ addi(T4, T4, target::kWordSize);
531 __ bnez(T1, &propagate_carry_loop);
532
533 __ Bind(&done);
534 // Result = One or two digits processed.
536 __ ret();
537}
538
539void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
540 Label* normal_ir_body) {
541 // Pseudo code:
542 // static int _sqrAdd(Uint32List x_digits, int i,
543 // Uint32List a_digits, int used) {
544 // uint64_t* xip = &x_digits[i >> 1]; // i is Smi and even.
545 // uint64_t x = *xip++;
546 // if (x == 0) return 2;
547 // uint64_t* ajp = &a_digits[i]; // j == 2*i, i is Smi.
548 // uint64_t aj = *ajp;
549 // uint128_t t = x*x + aj;
550 // *ajp++ = low64(t);
551 // uint128_t c = high64(t);
552 // int n = ((used - i + 2) >> 2) - 1; // used and i are Smi. n: num pairs.
553 // while (--n >= 0) {
554 // uint64_t xi = *xip++;
555 // uint64_t aj = *ajp;
556 // uint192_t t = 2*x*xi + aj + c; // 2-bit * 64-bit * 64-bit -> 129-bit.
557 // *ajp++ = low64(t);
558 // c = high128(t); // 65-bit.
559 // }
560 // uint64_t aj = *ajp;
561 // uint128_t t = aj + c; // 64-bit + 65-bit -> 66-bit.
562 // *ajp++ = low64(t);
563 // *ajp = high64(t);
564 // return 2;
565 // }
566
567 // T2 = xip = &x_digits[i >> 1]
568 // T0 = i as Smi, T1 = x_digits
569 __ lx(T0, Address(SP, 2 * target::kWordSize));
570 __ lx(T1, Address(SP, 3 * target::kWordSize));
571 __ slli(TMP, T0, 1);
572 __ add(T1, T1, TMP);
574
575 // T1 = x = *xip++, return if x == 0
576 Label x_zero;
577 __ lx(T1, Address(T2, 0));
578 __ addi(T2, T2, target::kWordSize);
579 __ beqz(T1, &x_zero);
580
581 // T3 = ajp = &a_digits[i]
582 __ lx(A1, Address(SP, 1 * target::kWordSize)); // a_digits
583 __ slli(TMP, T0, 2);
584 __ add(A1, A1, TMP); // j == 2*i, i is Smi.
586
587 // T4:A1 = t = x*x + *ajp
588 __ lx(A0, Address(T3, 0));
589 __ mul(A1, T1, T1); // A1 = low64(T1*T1).
590 __ mulhu(T4, T1, T1); // T4 = high64(T1*T1).
591 __ add(A1, A1, A0); // T4:A1 += *ajp.
592 __ sltu(TMP, A1, A0);
593 __ add(T4, T4, TMP); // T4 = low64(c) = high64(t).
594 __ li(T5, 0); // T5 = high64(c) = 0.
595
596 // *ajp++ = low64(t) = A1
597 __ sx(A1, Address(T3, 0));
598 __ addi(T3, T3, target::kWordSize);
599
600 __ lx(A0, Address(SP, 0 * target::kWordSize)); // used is Smi
601#if XLEN == 32
602 // int n = used - i - 2;
603 __ sub(T6, A0, T0);
604 __ SmiUntag(T6);
605 __ subi(T6, T6, 2);
606#else
607 // int n = (used - i + 1)/2 - 1
608 __ sub(T6, A0, T0);
609 __ addi(T6, T6, 2);
610 __ srai(T6, T6, 2);
611 __ subi(T6, T6, 2);
612#endif
613
614 Label loop, done;
615 __ bltz(T6, &done); // while (--n >= 0)
616
617 __ Bind(&loop);
618 // x: T1
619 // xip: T2
620 // ajp: T3
621 // c: T5:T4
622 // t: T0:A1:A0 (not live at loop entry)
623 // n: T6
624
625 // uint64_t xi = *xip++
626 __ lx(T0, Address(T2, 0));
627 __ addi(T2, T2, target::kWordSize);
628
629 // uint192_t t = T0:A1:A0 = 2*x*xi + aj + c
630 __ mul(A0, T0, T1); // A0 = low64(T0*T1) = low64(x*xi).
631 __ mulhu(A1, T0, T1); // A1 = high64(T0*T1) = high64(x*xi).
632
633 __ mv(TMP, A0);
634 __ add(A0, A0, A0);
635 __ sltu(TMP, A0, TMP);
636 __ mv(TMP2, A1);
637 __ add(A1, A1, A1);
638 __ sltu(TMP2, A1, TMP2);
639 __ add(A1, A1, TMP);
640 __ sltu(TMP, A1, TMP);
641 __ add(T0, TMP, TMP2); // T0:A1:A0 = A1:A0 + A1:A0 = 2*x*xi.
642
643 __ add(A0, A0, T4);
644 __ sltu(TMP, A0, T4);
645 __ add(A1, A1, T5);
646 __ sltu(TMP2, A1, T5);
647 __ add(A1, A1, TMP);
648 __ sltu(TMP, A1, TMP);
649 __ add(T0, T0, TMP);
650 __ add(T0, T0, TMP2); // T0:A1:A0 += c.
651
652 __ lx(T5, Address(T3, 0)); // T5 = aj = *ajp.
653 __ add(A0, A0, T5);
654 __ sltu(TMP, A0, T5);
655 __ add(T4, A1, TMP);
656 __ sltu(TMP, T4, A1);
657 __ add(T5, T0, TMP); // T5:T4:A0 = 2*x*xi + aj + c.
658
659 // *ajp++ = low64(t) = A0
660 __ sx(A0, Address(T3, 0));
661 __ addi(T3, T3, target::kWordSize);
662
663 // while (--n >= 0)
664 __ subi(T6, T6, 1); // --n
665 __ bgez(T6, &loop);
666
667 __ Bind(&done);
668 // uint64_t aj = *ajp
669 __ lx(A0, Address(T3, 0));
670
671 // uint128_t t = aj + c
672 __ add(T4, T4, A0);
673 __ sltu(TMP, T4, A0);
674 __ add(T5, T5, TMP);
675
676 // *ajp = low64(t) = T4
677 // *(ajp + 1) = high64(t) = T5
678 __ sx(T4, Address(T3, 0));
679 __ sx(T5, Address(T3, target::kWordSize));
680
681 __ Bind(&x_zero);
682 // Result = One or two digits processed.
684 __ ret();
685}
686
687void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
688 Label* normal_ir_body) {
689 // There is no 128-bit by 64-bit division instruction on arm64, so we use two
690 // 64-bit by 32-bit divisions and two 64-bit by 64-bit multiplications to
691 // adjust the two 32-bit digits of the estimated quotient.
692 //
693 // Pseudo code:
694 // static int _estQuotientDigit(Uint32List args, Uint32List digits, int i) {
695 // uint64_t yt = args[_YT_LO .. _YT]; // _YT_LO == 0, _YT == 1.
696 // uint64_t* dp = &digits[(i >> 1) - 1]; // i is Smi.
697 // uint64_t dh = dp[0]; // dh == digits[(i >> 1) - 1 .. i >> 1].
698 // uint64_t qd;
699 // if (dh == yt) {
700 // qd = (DIGIT_MASK << 32) | DIGIT_MASK;
701 // } else {
702 // dl = dp[-1]; // dl == digits[(i >> 1) - 3 .. (i >> 1) - 2].
703 // // We cannot calculate qd = dh:dl / yt, so ...
704 // uint64_t yth = yt >> 32;
705 // uint64_t qh = dh / yth;
706 // uint128_t ph:pl = yt*qh;
707 // uint64_t tl = (dh << 32)|(dl >> 32);
708 // uint64_t th = dh >> 32;
709 // while ((ph > th) || ((ph == th) && (pl > tl))) {
710 // if (pl < yt) --ph;
711 // pl -= yt;
712 // --qh;
713 // }
714 // qd = qh << 32;
715 // tl = (pl << 32);
716 // th = (ph << 32)|(pl >> 32);
717 // if (tl > dl) ++th;
718 // dl -= tl;
719 // dh -= th;
720 // uint64_t ql = ((dh << 32)|(dl >> 32)) / yth;
721 // ph:pl = yt*ql;
722 // while ((ph > dh) || ((ph == dh) && (pl > dl))) {
723 // if (pl < yt) --ph;
724 // pl -= yt;
725 // --ql;
726 // }
727 // qd |= ql;
728 // }
729 // args[_QD .. _QD_HI] = qd; // _QD == 2, _QD_HI == 3.
730 // return 2;
731 // }
732
733 __ lx(T4, Address(SP, 2 * target::kWordSize)); // args
734
735#if XLEN == 32
736 // ECX = yt = args[1]
737 __ lx(T3, FieldAddress(T4, target::TypedData::payload_offset() +
739#else
740 // T3 = yt = args[0..1]
741 __ lx(T3, FieldAddress(T4, target::TypedData::payload_offset()));
742#endif
743
744 __ lx(A0, Address(SP, 0 * target::kWordSize)); // A0 = i as Smi
745 __ lx(T1, Address(SP, 1 * target::kWordSize)); // T1 = digits
746 __ slli(TMP, A0, 1);
747 __ add(T1, T1, TMP);
748#if XLEN == 32
749 // EBX = dp = &digits[i >> 1]
750 __ lx(T2, FieldAddress(T1, target::TypedData::payload_offset()));
751#else
752 // T2 = dh = digits[(i >> 1) - 1 .. i >> 1]
753 __ lx(T2, FieldAddress(T1, target::TypedData::payload_offset() -
755#endif
756
757 // A0 = qd = (DIGIT_MASK << 32) | DIGIT_MASK = -1
758 __ li(A0, -1);
759
760 // Return qd if dh == yt
761 Label return_qd;
762 __ beq(T2, T3, &return_qd);
763
764#if XLEN == 32
765 // EAX = dl = dp[-1]
766 __ lx(T1, FieldAddress(T1, target::TypedData::payload_offset() -
768#else
769 // T1 = dl = digits[(i >> 1) - 3 .. (i >> 1) - 2]
770 __ lx(T1, FieldAddress(T1, target::TypedData::payload_offset() -
772#endif
773
774 // T5 = yth = yt >> 32
775 __ srli(T5, T3, target::kWordSize * 4);
776
777 // T6 = qh = dh / yth
778 __ divu(T6, T2, T5);
779
780 // A6:A1 = ph:pl = yt*qh
781 __ mulhu(A6, T3, T6);
782 __ mul(A1, T3, T6);
783
784 // A7 = tl = (dh << 32)|(dl >> 32)
785 __ slli(A7, T2, target::kWordSize * 4);
786 __ srli(TMP, T1, target::kWordSize * 4);
787 __ or_(A7, A7, TMP);
788
789 // S3 = th = dh >> 32
790 __ srli(S3, T2, target::kWordSize * 4);
791
792 // while ((ph > th) || ((ph == th) && (pl > tl)))
793 Label qh_adj_loop, qh_adj, qh_ok;
794 __ Bind(&qh_adj_loop);
795 __ bgtu(A6, S3, &qh_adj);
796 __ bne(A6, S3, &qh_ok);
797 __ bleu(A1, A7, &qh_ok);
798
799 __ Bind(&qh_adj);
800 // if (pl < yt) --ph
801 __ sltu(TMP, A1, T3);
802 __ sub(A6, A6, TMP);
803
804 // pl -= yt
805 __ sub(A1, A1, T3);
806
807 // --qh
808 __ subi(T6, T6, 1);
809
810 // Continue while loop.
811 __ j(&qh_adj_loop);
812
813 __ Bind(&qh_ok);
814 // A0 = qd = qh << 32
815 __ slli(A0, T6, target::kWordSize * 4);
816
817 // tl = (pl << 32)
818 __ slli(A7, A1, target::kWordSize * 4);
819
820 // th = (ph << 32)|(pl >> 32);
821 __ slli(S3, A6, target::kWordSize * 4);
822 __ srli(TMP, A1, target::kWordSize * 4);
823 __ or_(S3, S3, TMP);
824
825 // if (tl > dl) ++th
826 __ sltu(TMP, T1, A7);
827 __ add(S3, S3, TMP);
828
829 // dl -= tl
830 __ sub(T1, T1, A7);
831
832 // dh -= th
833 __ sub(T2, T2, S3);
834
835 // T6 = ql = ((dh << 32)|(dl >> 32)) / yth
836 __ slli(T6, T2, target::kWordSize * 4);
837 __ srli(TMP, T1, target::kWordSize * 4);
838 __ or_(T6, T6, TMP);
839 __ divu(T6, T6, T5);
840
841 // A6:A1 = ph:pl = yt*ql
842 __ mulhu(A6, T3, T6);
843 __ mul(A1, T3, T6);
844
845 // while ((ph > dh) || ((ph == dh) && (pl > dl))) {
846 Label ql_adj_loop, ql_adj, ql_ok;
847 __ Bind(&ql_adj_loop);
848 __ bgtu(A6, T2, &ql_adj);
849 __ bne(A6, T2, &ql_ok);
850 __ bleu(A1, T1, &ql_ok);
851
852 __ Bind(&ql_adj);
853 // if (pl < yt) --ph
854 __ sltu(TMP, A1, T3);
855 __ sub(A6, A6, TMP);
856
857 // pl -= yt
858 __ sub(A1, A1, T3);
859
860 // --ql
861 __ subi(T6, T6, 1);
862
863 // Continue while loop.
864 __ j(&ql_adj_loop);
865
866 __ Bind(&ql_ok);
867 // qd |= ql;
868 __ or_(A0, A0, T6);
869
870 __ Bind(&return_qd);
871 // args[2..3] = qd
872 __ sx(A0, FieldAddress(T4, target::TypedData::payload_offset() +
874
875 // Result = One or two digits processed.
877 __ ret();
878}
879
880void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
881 Label* normal_ir_body) {
882 // Pseudo code:
883 // static int _mulMod(Uint32List args, Uint32List digits, int i) {
884 // uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3.
885 // uint64_t d = digits[i >> 1 .. (i >> 1) + 1]; // i is Smi and even.
886 // uint128_t t = rho*d;
887 // args[_MU .. _MU_HI] = t mod DIGIT_BASE^2; // _MU == 4, _MU_HI == 5.
888 // return 2;
889 // }
890
891 __ lx(T0, Address(SP, 2 * target::kWordSize)); // args
892 __ lx(T1, Address(SP, 1 * target::kWordSize)); // digits
893 __ lx(T2, Address(SP, 0 * target::kWordSize)); // i as Smi
894
895 // T3 = rho = args[2..3]
896 __ lx(T3, FieldAddress(T0, target::TypedData::payload_offset() +
898
899 // T4 = digits[i >> 1 .. (i >> 1) + 1]
900 __ slli(T2, T2, 1);
901 __ add(T1, T1, T2);
902 __ lx(T4, FieldAddress(T1, target::TypedData::payload_offset()));
903
904 // T5 = rho*d mod DIGIT_BASE
905 __ mul(T5, T4, T3); // T5 = low64(T4*T3).
906
907 // args[4 .. 5] = T5
908 __ sx(T5, FieldAddress(T0, target::TypedData::payload_offset() +
910
911 // Result = One or two digits processed.
913 __ ret();
914}
915
916// FA0: left
917// FA1: right
918static void PrepareDoubleOp(Assembler* assembler, Label* normal_ir_body) {
919 Label double_op;
920 __ lx(A0, Address(SP, 1 * target::kWordSize)); // Left
921 __ lx(A1, Address(SP, 0 * target::kWordSize)); // Right
922
923 __ fld(FA0, FieldAddress(A0, target::Double::value_offset()));
924
925 __ SmiUntag(TMP, A1);
926#if XLEN == 32
927 __ fcvtdw(FA1, TMP);
928#else
929 __ fcvtdl(FA1, TMP);
930#endif
931 __ BranchIfSmi(A1, &double_op, Assembler::kNearJump);
932 __ CompareClassId(A1, kDoubleCid, TMP);
933 __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
934 __ fld(FA1, FieldAddress(A1, target::Double::value_offset()));
935
936 __ Bind(&double_op);
937}
938
939void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
940 Label* normal_ir_body) {
941 Label true_label;
942 PrepareDoubleOp(assembler, normal_ir_body);
943 __ fltd(TMP, FA1, FA0);
944 __ bnez(TMP, &true_label, Assembler::kNearJump);
945 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
946 __ ret();
947 __ Bind(&true_label);
948 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
949 __ ret();
950
951 __ Bind(normal_ir_body);
952}
953
954void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
955 Label* normal_ir_body) {
956 Label true_label;
957 PrepareDoubleOp(assembler, normal_ir_body);
958 __ fled(TMP, FA1, FA0);
959 __ bnez(TMP, &true_label, Assembler::kNearJump);
960 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
961 __ ret();
962 __ Bind(&true_label);
963 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
964 __ ret();
965
966 __ Bind(normal_ir_body);
967}
968
969void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
970 Label* normal_ir_body) {
971 Label true_label;
972 PrepareDoubleOp(assembler, normal_ir_body);
973 __ fltd(TMP, FA0, FA1);
974 __ bnez(TMP, &true_label, Assembler::kNearJump);
975 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
976 __ ret();
977 __ Bind(&true_label);
978 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
979 __ ret();
980
981 __ Bind(normal_ir_body);
982}
983
984void AsmIntrinsifier::Double_equal(Assembler* assembler,
985 Label* normal_ir_body) {
986 Label true_label;
987 PrepareDoubleOp(assembler, normal_ir_body);
988 __ feqd(TMP, FA0, FA1);
989 __ bnez(TMP, &true_label, Assembler::kNearJump);
990 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
991 __ ret();
992 __ Bind(&true_label);
993 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
994 __ ret();
995
996 __ Bind(normal_ir_body);
997}
998
999void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
1000 Label* normal_ir_body) {
1001 Label true_label;
1002 PrepareDoubleOp(assembler, normal_ir_body);
1003 __ fled(TMP, FA0, FA1);
1004 __ bnez(TMP, &true_label, Assembler::kNearJump);
1005 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
1006 __ ret();
1007 __ Bind(&true_label);
1008 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
1009 __ ret();
1010
1011 __ Bind(normal_ir_body);
1012}
1013
1014// Expects left argument to be double (receiver). Right argument is unknown.
1015// Both arguments are on stack.
1016static void DoubleArithmeticOperations(Assembler* assembler,
1017 Label* normal_ir_body,
1018 Token::Kind kind) {
1019 PrepareDoubleOp(assembler, normal_ir_body);
1020 switch (kind) {
1021 case Token::kADD:
1022 __ faddd(FA0, FA0, FA1);
1023 break;
1024 case Token::kSUB:
1025 __ fsubd(FA0, FA0, FA1);
1026 break;
1027 case Token::kMUL:
1028 __ fmuld(FA0, FA0, FA1);
1029 break;
1030 case Token::kDIV:
1031 __ fdivd(FA0, FA0, FA1);
1032 break;
1033 default:
1034 UNREACHABLE();
1035 }
1036 const Class& double_class = DoubleClass();
1037 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump, A0, TMP);
1038 __ StoreDFieldToOffset(FA0, A0, target::Double::value_offset());
1039 __ ret();
1040
1041 __ Bind(normal_ir_body);
1042}
1043
1044void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
1045 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
1046}
1047
1048void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
1049 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
1050}
1051
1052void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
1053 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
1054}
1055
1056void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
1057 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
1058}
1059
1060// Left is double, right is integer (Mint or Smi)
1061void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
1062 Label* normal_ir_body) {
1063 // Only smis allowed.
1064 __ lx(A1, Address(SP, 0 * target::kWordSize));
1065 __ BranchIfNotSmi(A1, normal_ir_body, Assembler::kNearJump);
1066 // Is Smi.
1067 __ SmiUntag(A1);
1068#if XLEN == 32
1069 __ fcvtdw(FA1, A1);
1070#else
1071 __ fcvtdl(FA1, A1);
1072#endif
1073 __ lx(A0, Address(SP, 1 * target::kWordSize));
1074 __ LoadDFieldFromOffset(FA0, A0, target::Double::value_offset());
1075 __ fmuld(FA0, FA0, FA1);
1076 const Class& double_class = DoubleClass();
1077 __ TryAllocate(double_class, normal_ir_body, Assembler::kNearJump, A0, A1);
1078 __ StoreDFieldToOffset(FA0, A0, target::Double::value_offset());
1079 __ ret();
1080 __ Bind(normal_ir_body);
1081}
1082
1083void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
1084 Label* normal_ir_body) {
1085 __ lx(A0, Address(SP, 0 * target::kWordSize));
1086 __ BranchIfNotSmi(A0, normal_ir_body, Assembler::kNearJump);
1087 // Is Smi.
1088 __ SmiUntag(A0);
1089#if XLEN == 32
1090 __ fcvtdw(FA0, A0);
1091#else
1092 __ fcvtdl(FA0, A0);
1093#endif
1094 const Class& double_class = DoubleClass();
1095 __ TryAllocate(double_class, normal_ir_body, Assembler::kNearJump, A0, TMP);
1096 __ StoreDFieldToOffset(FA0, A0, target::Double::value_offset());
1097 __ ret();
1098 __ Bind(normal_ir_body);
1099}
1100
1101static void DoubleIsClass(Assembler* assembler, intx_t fclass) {
1102 Label true_label;
1103 __ lx(A0, Address(SP, 0 * target::kWordSize));
1104 __ LoadDFieldFromOffset(FA0, A0, target::Double::value_offset());
1105 __ fclassd(TMP, FA0);
1106 __ andi(TMP, TMP, fclass);
1107 __ bnez(TMP, &true_label, Assembler::kNearJump);
1108 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
1109 __ ret();
1110 __ Bind(&true_label);
1111 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
1112 __ ret();
1113}
1114
1115void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
1116 Label* normal_ir_body) {
1117 DoubleIsClass(assembler, kFClassSignallingNan | kFClassQuietNan);
1118}
1119
1120void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
1121 Label* normal_ir_body) {
1122 DoubleIsClass(assembler, kFClassNegInfinity | kFClassPosInfinity);
1123}
1124
1125void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
1126 Label* normal_ir_body) {
1127 DoubleIsClass(assembler, kFClassNegInfinity | kFClassNegNormal |
1129}
1130
1131void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
1132 Label* normal_ir_body) {
1133 Label true_label;
1134 __ lx(A0, Address(SP, 1 * target::kWordSize));
1135 __ lx(A1, Address(SP, 0 * target::kWordSize));
1136 __ beq(A0, A1, &true_label, Assembler::kNearJump);
1137 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
1138 __ ret();
1139 __ Bind(&true_label);
1140 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
1141 __ ret();
1142}
1143
1144static void JumpIfInteger(Assembler* assembler,
1145 Register cid,
1146 Register tmp,
1147 Label* target) {
1148 assembler->RangeCheck(cid, tmp, kSmiCid, kMintCid, Assembler::kIfInRange,
1149 target);
1150}
1151
1152static void JumpIfNotInteger(Assembler* assembler,
1153 Register cid,
1154 Register tmp,
1155 Label* target) {
1156 assembler->RangeCheck(cid, tmp, kSmiCid, kMintCid, Assembler::kIfNotInRange,
1157 target);
1158}
1159
1160static void JumpIfString(Assembler* assembler,
1161 Register cid,
1162 Register tmp,
1163 Label* target) {
1164 assembler->RangeCheck(cid, tmp, kOneByteStringCid, kTwoByteStringCid,
1166}
1167
1168static void JumpIfNotString(Assembler* assembler,
1169 Register cid,
1170 Register tmp,
1171 Label* target) {
1172 assembler->RangeCheck(cid, tmp, kOneByteStringCid, kTwoByteStringCid,
1174}
1175
1176static void JumpIfNotList(Assembler* assembler,
1177 Register cid,
1178 Register tmp,
1179 Label* target) {
1180 assembler->RangeCheck(cid, tmp, kArrayCid, kGrowableObjectArrayCid,
1182}
1183
1184static void JumpIfType(Assembler* assembler,
1185 Register cid,
1186 Register tmp,
1187 Label* target) {
1188 COMPILE_ASSERT((kFunctionTypeCid == kTypeCid + 1) &&
1189 (kRecordTypeCid == kTypeCid + 2));
1190 assembler->RangeCheck(cid, tmp, kTypeCid, kRecordTypeCid,
1192}
1193
1194static void JumpIfNotType(Assembler* assembler,
1195 Register cid,
1196 Register tmp,
1197 Label* target) {
1198 COMPILE_ASSERT((kFunctionTypeCid == kTypeCid + 1) &&
1199 (kRecordTypeCid == kTypeCid + 2));
1200 assembler->RangeCheck(cid, tmp, kTypeCid, kRecordTypeCid,
1202}
1203
1204// Return type quickly for simple types (not parameterized and not signature).
1205void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
1206 Label* normal_ir_body) {
1207 Label use_declaration_type, not_double, not_integer, not_string;
1208 __ lx(A0, Address(SP, 0 * target::kWordSize));
1209 __ LoadClassIdMayBeSmi(A1, A0);
1210
1211 __ CompareImmediate(A1, kClosureCid);
1212 __ BranchIf(EQ, normal_ir_body,
1213 Assembler::kNearJump); // Instance is a closure.
1214
1215 __ CompareImmediate(A1, kRecordCid);
1216 __ BranchIf(EQ, normal_ir_body,
1217 Assembler::kNearJump); // Instance is a record.
1218
1219 __ CompareImmediate(A1, kNumPredefinedCids);
1220 __ BranchIf(HI, &use_declaration_type, Assembler::kNearJump);
1221
1222 __ LoadIsolateGroup(A0);
1224
1225 __ CompareImmediate(A1, kDoubleCid);
1226 __ BranchIf(NE, &not_double, Assembler::kNearJump);
1227 __ LoadFromOffset(A0, A0, target::ObjectStore::double_type_offset());
1228 __ ret();
1229
1230 __ Bind(&not_double);
1231 JumpIfNotInteger(assembler, A1, TMP, &not_integer);
1232 __ LoadFromOffset(A0, A0, target::ObjectStore::int_type_offset());
1233 __ ret();
1234
1235 __ Bind(&not_integer);
1236 JumpIfNotString(assembler, A1, TMP, &not_string);
1237 __ LoadFromOffset(A0, A0, target::ObjectStore::string_type_offset());
1238 __ ret();
1239
1240 __ Bind(&not_string);
1241 JumpIfNotType(assembler, A1, TMP, &use_declaration_type);
1242 __ LoadFromOffset(A0, A0, target::ObjectStore::type_type_offset());
1243 __ ret();
1244
1245 __ Bind(&use_declaration_type);
1246 __ LoadClassById(T2, A1);
1247 __ lh(T3, FieldAddress(T2, target::Class::num_type_arguments_offset()));
1248 __ bnez(T3, normal_ir_body, Assembler::kNearJump);
1249
1250 __ LoadCompressed(A0,
1252 __ beq(A0, NULL_REG, normal_ir_body, Assembler::kNearJump);
1253 __ ret();
1254
1255 __ Bind(normal_ir_body);
1256}
1257
1258// Compares cid1 and cid2 to see if they're syntactically equivalent. If this
1259// can be determined by this fast path, it jumps to either equal_* or not_equal.
1260// If classes are equivalent but may be generic, then jumps to
1261// equal_may_be_generic. Clobbers scratch.
1262static void EquivalentClassIds(Assembler* assembler,
1263 Label* normal_ir_body,
1264 Label* equal_may_be_generic,
1265 Label* equal_not_generic,
1266 Label* not_equal,
1267 Register cid1,
1268 Register cid2,
1269 Register scratch,
1270 bool testing_instance_cids) {
1271 Label not_integer, not_integer_or_string, not_integer_or_string_or_list;
1272
1273 // Check if left hand side is a closure. Closures are handled in the runtime.
1274 __ CompareImmediate(cid1, kClosureCid);
1275 __ BranchIf(EQ, normal_ir_body, Assembler::kNearJump);
1276
1277 // Check if left hand side is a record. Records are handled in the runtime.
1278 __ CompareImmediate(cid1, kRecordCid);
1279 __ BranchIf(EQ, normal_ir_body, Assembler::kNearJump);
1280
1281 // Check whether class ids match. If class ids don't match types may still be
1282 // considered equivalent (e.g. multiple string implementation classes map to a
1283 // single String type).
1284 __ beq(cid1, cid2, equal_may_be_generic);
1285
1286 // Class ids are different. Check if we are comparing two string types (with
1287 // different representations), two integer types, two list types or two type
1288 // types.
1289 __ CompareImmediate(cid1, kNumPredefinedCids);
1290 __ BranchIf(HI, not_equal);
1291
1292 // Check if both are integer types.
1293 JumpIfNotInteger(assembler, cid1, scratch, &not_integer);
1294
1295 // First type is an integer. Check if the second is an integer too.
1296 JumpIfInteger(assembler, cid2, scratch, equal_not_generic);
1297 // Integer types are only equivalent to other integer types.
1298 __ j(not_equal, Assembler::kNearJump);
1299
1300 __ Bind(&not_integer);
1301 // Check if both are String types.
1302 JumpIfNotString(assembler, cid1, scratch,
1303 testing_instance_cids ? &not_integer_or_string : not_equal);
1304
1305 // First type is String. Check if the second is a string too.
1306 JumpIfString(assembler, cid2, scratch, equal_not_generic);
1307 // String types are only equivalent to other String types.
1308 __ j(not_equal, Assembler::kNearJump);
1309
1310 if (testing_instance_cids) {
1311 __ Bind(&not_integer_or_string);
1312 // Check if both are List types.
1313 JumpIfNotList(assembler, cid1, scratch, &not_integer_or_string_or_list);
1314
1315 // First type is a List. Check if the second is a List too.
1316 JumpIfNotList(assembler, cid2, scratch, not_equal);
1319 __ j(equal_may_be_generic, Assembler::kNearJump);
1320
1321 __ Bind(&not_integer_or_string_or_list);
1322 // Check if the first type is a Type. If it is not then types are not
1323 // equivalent because they have different class ids and they are not String
1324 // or integer or List or Type.
1325 JumpIfNotType(assembler, cid1, scratch, not_equal);
1326
1327 // First type is a Type. Check if the second is a Type too.
1328 JumpIfType(assembler, cid2, scratch, equal_not_generic);
1329 // Type types are only equivalent to other Type types.
1330 __ j(not_equal, Assembler::kNearJump);
1331 }
1332}
1333
1334void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
1335 Label* normal_ir_body) {
1336 __ lx(A0, Address(SP, 1 * target::kWordSize));
1337 __ lx(A1, Address(SP, 0 * target::kWordSize));
1338 __ LoadClassIdMayBeSmi(T2, A1);
1339 __ LoadClassIdMayBeSmi(A1, A0);
1340
1341 Label equal_may_be_generic, equal, not_equal;
1342 EquivalentClassIds(assembler, normal_ir_body, &equal_may_be_generic, &equal,
1343 &not_equal, A1, T2, TMP,
1344 /* testing_instance_cids = */ true);
1345
1346 __ Bind(&equal_may_be_generic);
1347 // Classes are equivalent and neither is a closure class.
1348 // Check if there are no type arguments. In this case we can return true.
1349 // Otherwise fall through into the runtime to handle comparison.
1350 __ LoadClassById(A0, A1);
1351 __ lw(T0,
1352 FieldAddress(
1353 A0,
1355 __ CompareImmediate(T0, target::Class::kNoTypeArguments);
1356 __ BranchIf(EQ, &equal, Assembler::kNearJump);
1357
1358 // Compare type arguments, host_type_arguments_field_offset_in_words in A0.
1359 __ lx(A0, Address(SP, 1 * target::kWordSize));
1360 __ lx(A1, Address(SP, 0 * target::kWordSize));
1362 __ add(A0, A0, T0);
1363 __ add(A1, A1, T0);
1364 __ lx(A0, FieldAddress(A0, 0));
1365 __ lx(A1, FieldAddress(A1, 0));
1366 __ bne(A0, A1, normal_ir_body, Assembler::kNearJump);
1367 // Fall through to equal case if type arguments are equal.
1368
1369 __ Bind(&equal);
1370 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
1371 __ Ret();
1372
1373 __ Bind(&not_equal);
1374 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
1375 __ ret();
1376
1377 __ Bind(normal_ir_body);
1378}
1379
1380void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
1381 Label* normal_ir_body) {
1382 __ lx(A0, Address(SP, 0 * target::kWordSize));
1383
1384#if defined(HASH_IN_OBJECT_HEADER)
1385 // uint32_t field in header.
1386 __ lwu(A0, FieldAddress(A0, target::String::hash_offset()));
1387 __ SmiTag(A0);
1388#else
1389 // Smi field.
1390 __ lx(A0, FieldAddress(A0, target::String::hash_offset()));
1391#endif
1392 __ beqz(A0, normal_ir_body, Assembler::kNearJump);
1393 __ ret();
1394
1395 // Hash not yet computed.
1396 __ Bind(normal_ir_body);
1397}
1398
1399void AsmIntrinsifier::Type_equality(Assembler* assembler,
1400 Label* normal_ir_body) {
1401 Label equal, not_equal, equiv_cids_may_be_generic, equiv_cids;
1402
1403 __ lx(A0, Address(SP, 1 * target::kWordSize));
1404 __ lx(A1, Address(SP, 0 * target::kWordSize));
1405 __ beq(A1, A0, &equal);
1406
1407 // A1 might not be a Type object, so check that first (A0 should be though,
1408 // since this is a method on the Type class).
1409 __ LoadClassIdMayBeSmi(T3, A1);
1410 __ CompareImmediate(T3, kTypeCid);
1411 __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
1412
1413 // Check if types are syntactically equal.
1414 __ LoadTypeClassId(T3, A1);
1415 __ LoadTypeClassId(T4, A0);
1416 // We are not testing instance cids, but type class cids of Type instances.
1417 EquivalentClassIds(assembler, normal_ir_body, &equiv_cids_may_be_generic,
1418 &equiv_cids, &not_equal, T3, T4, TMP,
1419 /* testing_instance_cids = */ false);
1420
1421 __ Bind(&equiv_cids_may_be_generic);
1422 // Compare type arguments in Type instances.
1423 __ LoadCompressed(T3, FieldAddress(A1, target::Type::arguments_offset()));
1424 __ LoadCompressed(T4, FieldAddress(A0, target::Type::arguments_offset()));
1425 __ CompareObjectRegisters(T3, T4);
1426 __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
1427 // Fall through to check nullability if type arguments are equal.
1428
1429 // Check nullability.
1430 __ Bind(&equiv_cids);
1431 __ LoadAbstractTypeNullability(A0, A0);
1432 __ LoadAbstractTypeNullability(A1, A1);
1433 __ bne(A0, A1, &not_equal);
1434 // Fall through to equal case if nullability is equal.
1435
1436 __ Bind(&equal);
1437 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
1438 __ ret();
1439
1440 __ Bind(&not_equal);
1441 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
1442 __ ret();
1443
1444 __ Bind(normal_ir_body);
1445}
1446
1447void AsmIntrinsifier::AbstractType_getHashCode(Assembler* assembler,
1448 Label* normal_ir_body) {
1449 __ lx(A0, Address(SP, 0 * target::kWordSize));
1450 __ LoadCompressed(A0, FieldAddress(A0, target::AbstractType::hash_offset()));
1451 __ beqz(A0, normal_ir_body, Assembler::kNearJump);
1452 __ ret();
1453 // Hash not yet computed.
1454 __ Bind(normal_ir_body);
1455}
1456
1457void AsmIntrinsifier::AbstractType_equality(Assembler* assembler,
1458 Label* normal_ir_body) {
1459 __ lx(A0, Address(SP, 1 * target::kWordSize));
1460 __ lx(A1, Address(SP, 0 * target::kWordSize));
1461 __ bne(A0, A1, normal_ir_body, Assembler::kNearJump);
1462
1463 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
1464 __ ret();
1465
1466 __ Bind(normal_ir_body);
1467}
1468
1469// Keep in sync with Instance::IdentityHashCode.
1470// Note int and double never reach here because they override _identityHashCode.
1471// Special cases are also not needed for null or bool because they were pre-set
1472// during VM isolate finalization.
1473void AsmIntrinsifier::Object_getHash(Assembler* assembler,
1474 Label* normal_ir_body) {
1475#if XLEN == 32
1476 UNREACHABLE();
1477#else
1478 Label not_yet_computed;
1479 __ lx(A0, Address(SP, 0 * target::kWordSize)); // Object.
1480 __ lwu(A0, FieldAddress(
1483 __ beqz(A0, &not_yet_computed);
1484 __ SmiTag(A0);
1485 __ ret();
1486
1487 __ Bind(&not_yet_computed);
1488 __ LoadFromOffset(A1, THR, target::Thread::random_offset());
1489 __ AndImmediate(T2, A1, 0xffffffff); // state_lo
1490 __ srli(T3, A1, 32); // state_hi
1491 __ LoadImmediate(A1, 0xffffda61); // A
1492 __ mul(A1, A1, T2);
1493 __ add(A1, A1, T3); // new_state = (A * state_lo) + state_hi
1494 __ StoreToOffset(A1, THR, target::Thread::random_offset());
1495 __ AndImmediate(A1, A1, 0x3fffffff);
1496 __ beqz(A1, &not_yet_computed);
1497
1498 __ lx(A0, Address(SP, 0 * target::kWordSize)); // Object
1499 __ subi(A0, A0, kHeapObjectTag);
1501
1502 Label retry, already_set_in_r4;
1503 __ Bind(&retry);
1504 __ lr(T2, Address(A0, 0));
1506 __ bnez(T4, &already_set_in_r4);
1507 __ or_(T2, T2, T3);
1508 __ sc(T4, T2, Address(A0, 0));
1509 __ bnez(T4, &retry);
1510 // Fall-through with A1 containing new hash value (untagged).
1511 __ SmiTag(A0, A1);
1512 __ ret();
1513 __ Bind(&already_set_in_r4);
1514 __ SmiTag(A0, T4);
1515 __ ret();
1516#endif
1517}
1518
1519void GenerateSubstringMatchesSpecialization(Assembler* assembler,
1520 intptr_t receiver_cid,
1521 intptr_t other_cid,
1522 Label* return_true,
1523 Label* return_false) {
1524 __ SmiUntag(T0);
1525 __ LoadCompressedSmi(
1526 T1, FieldAddress(A0, target::String::length_offset())); // this.length
1527 __ SmiUntag(T1);
1528 __ LoadCompressedSmi(
1529 T2, FieldAddress(A1, target::String::length_offset())); // other.length
1530 __ SmiUntag(T2);
1531
1532 // if (other.length == 0) return true;
1533 __ beqz(T2, return_true);
1534
1535 // if (start < 0) return false;
1536 __ bltz(T0, return_false);
1537
1538 // if (start + other.length > this.length) return false;
1539 __ add(T3, T0, T2);
1540 __ bgt(T3, T1, return_false);
1541
1542 if (receiver_cid == kOneByteStringCid) {
1543 __ add(A0, A0, T0);
1544 } else {
1545 ASSERT(receiver_cid == kTwoByteStringCid);
1546 __ add(A0, A0, T0);
1547 __ add(A0, A0, T0);
1548 }
1549
1550 // i = 0
1551 __ li(T3, 0);
1552
1553 // do
1554 Label loop;
1555 __ Bind(&loop);
1556
1557 // this.codeUnitAt(i + start)
1558 if (receiver_cid == kOneByteStringCid) {
1559 __ lbu(TMP, FieldAddress(A0, target::OneByteString::data_offset()));
1560 } else {
1561 __ lhu(TMP, FieldAddress(A0, target::TwoByteString::data_offset()));
1562 }
1563 // other.codeUnitAt(i)
1564 if (other_cid == kOneByteStringCid) {
1565 __ lbu(TMP2, FieldAddress(A1, target::OneByteString::data_offset()));
1566 } else {
1567 __ lhu(TMP2, FieldAddress(A1, target::TwoByteString::data_offset()));
1568 }
1569 __ bne(TMP, TMP2, return_false);
1570
1571 // i++, while (i < len)
1572 __ addi(T3, T3, 1);
1573 __ addi(A0, A0, receiver_cid == kOneByteStringCid ? 1 : 2);
1574 __ addi(A1, A1, other_cid == kOneByteStringCid ? 1 : 2);
1575 __ blt(T3, T2, &loop);
1576
1577 __ j(return_true);
1578}
1579
1580// bool _substringMatches(int start, String other)
1581// This intrinsic handles a OneByteString or TwoByteString receiver with a
1582// OneByteString other.
1583void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
1584 Label* normal_ir_body) {
1585 Label return_true, return_false, try_two_byte;
1586 __ lx(A0, Address(SP, 2 * target::kWordSize)); // this
1587 __ lx(T0, Address(SP, 1 * target::kWordSize)); // start
1588 __ lx(A1, Address(SP, 0 * target::kWordSize)); // other
1589
1590 __ BranchIfNotSmi(T0, normal_ir_body);
1591
1592 __ CompareClassId(A1, kOneByteStringCid, TMP);
1593 __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
1594
1595 __ CompareClassId(A0, kOneByteStringCid, TMP);
1596 __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
1597
1598 GenerateSubstringMatchesSpecialization(assembler, kOneByteStringCid,
1599 kOneByteStringCid, &return_true,
1600 &return_false);
1601
1602 __ Bind(&try_two_byte);
1603 __ CompareClassId(A0, kTwoByteStringCid, TMP);
1604 __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
1605
1606 GenerateSubstringMatchesSpecialization(assembler, kTwoByteStringCid,
1607 kOneByteStringCid, &return_true,
1608 &return_false);
1609
1610 __ Bind(&return_true);
1611 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
1612 __ ret();
1613
1614 __ Bind(&return_false);
1615 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
1616 __ ret();
1617
1618 __ Bind(normal_ir_body);
1619}
1620
1621void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
1622 Label* normal_ir_body) {
1623 Label try_two_byte_string;
1624
1625 __ lx(A1, Address(SP, 0 * target::kWordSize)); // Index.
1626 __ lx(A0, Address(SP, 1 * target::kWordSize)); // String.
1627 __ BranchIfNotSmi(A1, normal_ir_body,
1628 Assembler::kNearJump); // Index is not a Smi.
1629 // Range check.
1630 __ lx(TMP, FieldAddress(A0, target::String::length_offset()));
1631 __ bgeu(A1, TMP, normal_ir_body); // Runtime throws exception.
1632
1633 __ CompareClassId(A0, kOneByteStringCid, TMP);
1634 __ BranchIf(NE, &try_two_byte_string);
1635 __ SmiUntag(A1);
1636 __ add(A0, A0, A1);
1637 __ lbu(A1, FieldAddress(A0, target::OneByteString::data_offset()));
1639 __ BranchIf(GE, normal_ir_body, Assembler::kNearJump);
1641 __ slli(A1, A1, target::kWordSizeLog2);
1642 __ add(A0, A0, A1);
1645 __ ret();
1646
1647 __ Bind(&try_two_byte_string);
1648 __ CompareClassId(A0, kTwoByteStringCid, TMP);
1649 __ BranchIf(NE, normal_ir_body, Assembler::kNearJump);
1650 ASSERT(kSmiTagShift == 1);
1651 __ add(A0, A0, A1);
1652 __ lhu(A1, FieldAddress(A0, target::TwoByteString::data_offset()));
1654 __ BranchIf(GE, normal_ir_body, Assembler::kNearJump);
1656 __ slli(A1, A1, target::kWordSizeLog2);
1657 __ add(A0, A0, A1);
1660 __ ret();
1661
1662 __ Bind(normal_ir_body);
1663}
1664
1665void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
1666 Label* normal_ir_body) {
1667 Label is_true;
1668 __ lx(A0, Address(SP, 0 * target::kWordSize));
1669 __ lx(A0, FieldAddress(A0, target::String::length_offset()));
1670 __ beqz(A0, &is_true, Assembler::kNearJump);
1671 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
1672 __ ret();
1673 __ Bind(&is_true);
1674 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
1675 __ ret();
1676}
1677
1678void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
1679 Label* normal_ir_body) {
1680 Label compute_hash;
1681 __ lx(A1, Address(SP, 0 * target::kWordSize)); // OneByteString object.
1682#if defined(HASH_IN_OBJECT_HEADER)
1683 // uint32_t field in header.
1684 __ lwu(A0, FieldAddress(A1, target::String::hash_offset()));
1685 __ SmiTag(A0);
1686#else
1687 // Smi field.
1688 __ lx(A0, FieldAddress(A1, target::String::hash_offset()));
1689#endif
1690 __ beqz(A0, &compute_hash);
1691 __ ret(); // Return if already computed.
1692
1693 __ Bind(&compute_hash);
1694 __ lx(T0, FieldAddress(A1, target::String::length_offset()));
1695 __ SmiUntag(T0);
1696
1697 __ mv(T1, ZR);
1699
1700 // A1: Instance of OneByteString.
1701 // T0: String length, untagged integer.
1702 // T1: Loop counter, untagged integer.
1703 // T2: String data.
1704 // A0: Hash code, untagged integer.
1705
1706 Label loop, done;
1707 __ Bind(&loop);
1708 __ beq(T1, T0, &done);
1709 // Add to hash code: (hash_ is uint32)
1710 // Get one characters (ch).
1711 __ lbu(T3, Address(T2, 0));
1712 __ addi(T2, T2, 1);
1713 // T3: ch.
1714 __ addi(T1, T1, 1);
1716 __ j(&loop);
1717
1718 __ Bind(&done);
1719 // Finalize. Allow a zero result to combine checks from empty string branch.
1720 __ FinalizeHashForSize(target::String::kHashBits, A0);
1721#if defined(HASH_IN_OBJECT_HEADER)
1722 // A1: Untagged address of header word (lr/sc do not support offsets).
1723 __ subi(A1, A1, kHeapObjectTag);
1725 Label retry;
1726 __ Bind(&retry);
1727 __ lr(T0, Address(A1, 0));
1728 __ or_(T0, T0, A0);
1729 __ sc(TMP, T0, Address(A1, 0));
1730 __ bnez(TMP, &retry);
1731
1733 __ SmiTag(A0);
1734#else
1735 __ SmiTag(A0);
1736 __ sx(A0, FieldAddress(A1, target::String::hash_offset()));
1737#endif
1738 __ ret();
1739}
1740
1741// Allocates a _OneByteString or _TwoByteString. The content is not initialized.
1742// 'length-reg' (A1) contains the desired length as a _Smi or _Mint.
1743// Returns new string as tagged pointer in A0.
1744static void TryAllocateString(Assembler* assembler,
1745 classid_t cid,
1746 intptr_t max_elements,
1747 Label* ok,
1748 Label* failure) {
1749 ASSERT(cid == kOneByteStringCid || cid == kTwoByteStringCid);
1750 const Register length_reg = A1;
1751 // _Mint length: call to runtime to produce error.
1752 __ BranchIfNotSmi(length_reg, failure);
1753 // negative length: call to runtime to produce error.
1754 // Too big: call to runtime to allocate old.
1755 __ CompareImmediate(length_reg, target::ToRawSmi(max_elements));
1756 __ BranchIf(UNSIGNED_GREATER, failure);
1757
1758 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, failure, TMP));
1759 __ mv(T0, length_reg); // Save the length register.
1760 if (cid == kOneByteStringCid) {
1761 // Untag length.
1762 __ SmiUntag(length_reg);
1763 } else {
1764 // Untag length and multiply by element size -> no-op.
1765 ASSERT(kSmiTagSize == 1);
1766 }
1767 const intptr_t fixed_size_plus_alignment_padding =
1770 __ addi(length_reg, length_reg, fixed_size_plus_alignment_padding);
1771 __ andi(length_reg, length_reg,
1773
1774 __ lx(A0, Address(THR, target::Thread::top_offset()));
1775
1776 // length_reg: allocation size.
1777 __ add(T1, A0, length_reg);
1778 __ bltu(T1, A0, failure); // Fail on unsigned overflow.
1779
1780 // Check if the allocation fits into the remaining space.
1781 // A0: potential new object start.
1782 // T1: potential next object start.
1783 // A1: allocation size.
1784 __ lx(TMP, Address(THR, target::Thread::end_offset()));
1785 __ bgtu(T1, TMP, failure);
1786 __ CheckAllocationCanary(A0);
1787
1788 // Successfully allocated the object(s), now update top to point to
1789 // next object start and initialize the object.
1790 __ sx(T1, Address(THR, target::Thread::top_offset()));
1791 __ AddImmediate(A0, kHeapObjectTag);
1792 // Clear last double word to ensure string comparison doesn't need to
1793 // specially handle remainder of strings with lengths not factors of double
1794 // offsets.
1795 __ sx(ZR, Address(T1, -1 * target::kWordSize));
1796 __ sx(ZR, Address(T1, -2 * target::kWordSize));
1797
1798 // Initialize the tags.
1799 // A0: new object start as a tagged pointer.
1800 // T1: new object end address.
1801 // A1: allocation size.
1802 {
1803 const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
1805
1807 Label dont_zero_tag;
1808 __ BranchIf(UNSIGNED_LESS_EQUAL, &dont_zero_tag);
1809 __ li(A1, 0);
1810 __ Bind(&dont_zero_tag);
1811 __ slli(A1, A1, shift);
1812
1813 // Get the class index and insert it into the tags.
1814 // A1: size and bit tags.
1815 // This also clears the hash, which is in the high word of the tags.
1816 const uword tags =
1817 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
1818 __ OrImmediate(A1, A1, tags);
1819 __ sx(A1, FieldAddress(A0, target::Object::tags_offset())); // Store tags.
1820 }
1821
1822 // Set the length field using the saved length (T0).
1823 __ StoreIntoObjectNoBarrier(
1824 A0, FieldAddress(A0, target::String::length_offset()), T0);
1825#if !defined(HASH_IN_OBJECT_HEADER)
1826 // Clear hash.
1827 __ StoreIntoObjectNoBarrier(
1828 A0, FieldAddress(A0, target::String::hash_offset()), ZR);
1829#endif
1830 __ j(ok);
1831}
1832
1833// Arg0: OneByteString (receiver).
1834// Arg1: Start index as Smi.
1835// Arg2: End index as Smi.
1836// The indexes must be valid.
1837void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
1838 Label* normal_ir_body) {
1839 const intptr_t kStringOffset = 2 * target::kWordSize;
1840 const intptr_t kStartIndexOffset = 1 * target::kWordSize;
1841 const intptr_t kEndIndexOffset = 0 * target::kWordSize;
1842 Label ok;
1843
1844 __ lx(T0, Address(SP, kEndIndexOffset));
1845 __ lx(TMP, Address(SP, kStartIndexOffset));
1846 __ or_(T1, T0, TMP);
1847 __ BranchIfNotSmi(T1, normal_ir_body); // 'start', 'end' not Smi.
1848
1849 __ sub(A1, T0, TMP);
1850 TryAllocateString(assembler, kOneByteStringCid,
1852 normal_ir_body);
1853 __ Bind(&ok);
1854 // A0: new string as tagged pointer.
1855 // Copy string.
1856 __ lx(T1, Address(SP, kStringOffset));
1857 __ lx(T2, Address(SP, kStartIndexOffset));
1858 __ SmiUntag(T2);
1859 // Calculate start address.
1860 __ add(T1, T1, T2);
1861
1862 // T1: Start address to copy from.
1863 // T2: Untagged start index.
1864 __ lx(T0, Address(SP, kEndIndexOffset));
1865 __ SmiUntag(T0);
1866 __ sub(T0, T0, T2);
1867
1868 // T1: Start address to copy from (untagged).
1869 // T0: Untagged number of bytes to copy.
1870 // A0: Tagged result string.
1871 // T3: Pointer into T1.
1872 // T4: Pointer into A0.
1873 // T2: Scratch register.
1874 Label loop, done;
1875 __ blez(T0, &done, Assembler::kNearJump);
1876 __ mv(T3, T1);
1877 __ mv(T4, A0);
1878 __ Bind(&loop);
1879 __ subi(T0, T0, 1);
1880 __ lbu(T2, FieldAddress(T3, target::OneByteString::data_offset()));
1881 __ addi(T3, T3, 1);
1882 __ sb(T2, FieldAddress(T4, target::OneByteString::data_offset()));
1883 __ addi(T4, T4, 1);
1884 __ bgtz(T0, &loop);
1885
1886 __ Bind(&done);
1887 __ ret();
1888 __ Bind(normal_ir_body);
1889}
1890
1891void AsmIntrinsifier::WriteIntoOneByteString(Assembler* assembler,
1892 Label* normal_ir_body) {
1893 __ lx(A0, Address(SP, 2 * target::kWordSize)); // OneByteString.
1894 __ lx(A1, Address(SP, 1 * target::kWordSize)); // Index.
1895 __ lx(A2, Address(SP, 0 * target::kWordSize)); // Value.
1896 __ SmiUntag(A1);
1897 __ SmiUntag(A2);
1898 __ add(A1, A1, A0);
1899 __ sb(A2, FieldAddress(A1, target::OneByteString::data_offset()));
1900 __ ret();
1901}
1902
1903void AsmIntrinsifier::WriteIntoTwoByteString(Assembler* assembler,
1904 Label* normal_ir_body) {
1905 __ lx(A0, Address(SP, 2 * target::kWordSize)); // TwoByteString.
1906 __ lx(A1, Address(SP, 1 * target::kWordSize)); // Index.
1907 __ lx(A2, Address(SP, 0 * target::kWordSize)); // Value.
1908 // Untag index and multiply by element size -> no-op.
1909 __ SmiUntag(A2);
1910 __ add(A1, A1, A0);
1911 __ sh(A2, FieldAddress(A1, target::OneByteString::data_offset()));
1912 __ ret();
1913}
1914
1915void AsmIntrinsifier::AllocateOneByteString(Assembler* assembler,
1916 Label* normal_ir_body) {
1917 Label ok;
1918
1919 __ lx(A1, Address(SP, 0 * target::kWordSize)); // Length.
1920 TryAllocateString(assembler, kOneByteStringCid,
1922 normal_ir_body);
1923
1924 __ Bind(&ok);
1925 __ ret();
1926
1927 __ Bind(normal_ir_body);
1928}
1929
1930void AsmIntrinsifier::AllocateTwoByteString(Assembler* assembler,
1931 Label* normal_ir_body) {
1932 Label ok;
1933
1934 __ lx(A1, Address(SP, 0 * target::kWordSize)); // Length.
1935 TryAllocateString(assembler, kTwoByteStringCid,
1937 normal_ir_body);
1938
1939 __ Bind(&ok);
1940 __ ret();
1941
1942 __ Bind(normal_ir_body);
1943}
1944
1945void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
1946 Label* normal_ir_body) {
1947 __ lx(A0, Address(SP, 1 * target::kWordSize)); // This.
1948 __ lx(A1, Address(SP, 0 * target::kWordSize)); // Other.
1949
1950 StringEquality(assembler, A0, A1, T2, TMP2, A0, normal_ir_body,
1951 kOneByteStringCid);
1952}
1953
1954void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
1955 Label* normal_ir_body) {
1956 __ lx(A0, Address(SP, 1 * target::kWordSize)); // This.
1957 __ lx(A1, Address(SP, 0 * target::kWordSize)); // Other.
1958
1959 StringEquality(assembler, A0, A1, T2, TMP2, A0, normal_ir_body,
1960 kTwoByteStringCid);
1961}
1962
1963void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
1964 Label* normal_ir_body,
1965 bool sticky) {
1966 if (FLAG_interpret_irregexp) return;
1967
1968 const intptr_t kRegExpParamOffset = 2 * target::kWordSize;
1969 const intptr_t kStringParamOffset = 1 * target::kWordSize;
1970 // start_index smi is located at offset 0.
1971
1972 // Incoming registers:
1973 // T0: Function. (Will be reloaded with the specialized matcher function.)
1974 // S4: Arguments descriptor. (Will be preserved.)
1975 // S5: Unknown. (Must be GC safe on tail call.)
1976
1977 // Load the specialized function pointer into T0. Leverage the fact the
1978 // string CIDs as well as stored function pointers are in sequence.
1979 __ lx(T2, Address(SP, kRegExpParamOffset));
1980 __ lx(T1, Address(SP, kStringParamOffset));
1981 __ LoadClassId(T1, T1);
1982 __ AddImmediate(T1, -kOneByteStringCid);
1983 __ slli(T1, T1, target::kWordSizeLog2);
1984 __ add(T1, T1, T2);
1986 kOneByteStringCid, sticky)));
1987
1988 // Registers are now set up for the lazy compile stub. It expects the function
1989 // in T0, the argument descriptor in S4, and IC-Data in S5.
1990 __ li(S5, 0);
1991
1992 // Tail-call the function.
1995 __ jr(T1);
1996}
1997
1998void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
1999 Label* normal_ir_body) {
2000 __ LoadIsolate(A0);
2002 __ ret();
2003}
2004
2005void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
2006 Label* normal_ir_body) {
2007 __ LoadIsolate(A0);
2009 __ ret();
2010}
2011
2012void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
2013 Label* normal_ir_body) {
2014#if !defined(SUPPORT_TIMELINE)
2015 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
2016 __ ret();
2017#else
2018 Label true_label;
2019 // Load TimelineStream*.
2021 // Load uintptr_t from TimelineStream*.
2023 __ bnez(A0, &true_label, Assembler::kNearJump);
2024 __ LoadObject(A0, CastHandle<Object>(FalseObject()));
2025 __ ret();
2026 __ Bind(&true_label);
2027 __ LoadObject(A0, CastHandle<Object>(TrueObject()));
2028 __ ret();
2029#endif
2030}
2031
2032void AsmIntrinsifier::Timeline_getNextTaskId(Assembler* assembler,
2033 Label* normal_ir_body) {
2034#if !defined(SUPPORT_TIMELINE)
2035 __ LoadImmediate(A0, target::ToRawSmi(0));
2036 __ ret();
2037#elif XLEN == 64
2039 __ addi(A1, A0, 1);
2041 __ SmiTag(A0); // Ignore loss of precision.
2042 __ ret();
2043#else
2045 __ lw(T1, Address(THR, target::Thread::next_task_id_offset() + 4));
2046 __ SmiTag(A0, T0); // Ignore loss of precision.
2047 __ addi(T2, T0, 1);
2048 __ sltu(T3, T2, T0); // Carry.
2049 __ add(T1, T1, T3);
2051 __ sw(T1, Address(THR, target::Thread::next_task_id_offset() + 4));
2052 __ ret();
2053#endif
2054}
2055
2056#undef __
2057
2058} // namespace compiler
2059} // namespace dart
2060
2061#endif // defined(TARGET_ARCH_RISCV)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
static bool equal(const SkBitmap &a, const SkBitmap &b)
Definition: ImageTest.cpp:1395
static bool ok(int result)
#define __
#define UNREACHABLE()
Definition: assert.h:248
static word type_arguments_offset()
static word declaration_type_offset()
static word host_type_arguments_field_offset_in_words_offset()
static const word kNoTypeArguments
Definition: runtime_api.h:486
static word num_type_arguments_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word function_offset(classid_t cid, bool sticky)
static const word kHashBits
Definition: runtime_api.h:782
static const word kNullCharCodeSymbolOffset
Definition: runtime_api.h:1533
static const word kNumberOfOneCharCodeSymbols
Definition: runtime_api.h:1532
static word predefined_symbols_address_offset()
#define ASSERT(E)
GAsyncResult * result
uint32_t * target
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
Definition: runtime_api.cc:360
static constexpr intptr_t kCompressedWordSizeLog2
Definition: runtime_api.h:287
static constexpr word kBitsPerWordLog2
Definition: runtime_api.h:290
word ToRawSmi(const dart::Object &a)
Definition: runtime_api.cc:960
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
static constexpr word kBitsPerWord
Definition: runtime_api.h:291
constexpr intptr_t kSmiBits
Definition: runtime_api.h:301
const Bool & TrueObject()
Definition: runtime_api.cc:157
const Bool & FalseObject()
Definition: runtime_api.cc:161
const Object & NullObject()
Definition: runtime_api.cc:149
const Class & DoubleClass()
Definition: runtime_api.cc:195
Definition: dart_vm.cc:33
const Register THR
static bool CompareIntegers(Token::Kind kind, const Integer &left, const Integer &right)
const Register NULL_REG
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
Definition: hash.h:12
int32_t classid_t
Definition: globals.h:524
@ kNumPredefinedCids
Definition: class_id.h:257
constexpr intptr_t kBitsPerByte
Definition: globals.h:463
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
uintptr_t uword
Definition: globals.h:501
const Register CODE_REG
@ UNSIGNED_GREATER
@ UNSIGNED_LESS_EQUAL
const Register TMP2
@ kFClassNegZero
@ kFClassNegSubnormal
@ kFClassPosInfinity
@ kFClassQuietNan
@ kFClassSignallingNan
@ kFClassNegNormal
@ kFClassNegInfinity
const Register TMP
const intptr_t cid
const Register FUNCTION_REG
const intptr_t kBytesPerBigIntDigit
Definition: globals.h:54
NOT_IN_PRODUCT(LibraryPtr ReloadTestScript(const char *script))
@ kSmiTagSize
@ kHeapObjectTag
@ kSmiTagShift
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
sh
Definition: run_sh.py:10
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment