Flutter Engine
The Flutter Engine
asm_intrinsifier_x64.cc
Go to the documentation of this file.
1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
6#if defined(TARGET_ARCH_X64)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
10#include "vm/class_id.h"
13
14namespace dart {
15namespace compiler {
16
17// When entering intrinsics code:
18// PP: Caller's ObjectPool in JIT / global ObjectPool in AOT
19// CODE_REG: Callee's Code in JIT / not passed in AOT
20// R10: Arguments descriptor
21// TOS: Return address
22// The R10 and CODE_REG registers can be destroyed only if there is no
23// slow-path, i.e. if the intrinsified method always executes a return.
24// The RBP register should not be modified, because it is used by the profiler.
25// The PP and THR registers (see constants_x64.h) must be preserved.
26
27#define __ assembler->
28
29// Tests if two top most arguments are smis, jumps to label not_smi if not.
30// Topmost argument is in RAX.
31static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
32 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
33 __ movq(RCX, Address(RSP, +2 * target::kWordSize));
34 __ orq(RCX, RAX);
35 __ testq(RCX, Immediate(kSmiTagMask));
36 __ j(NOT_ZERO, not_smi);
37}
38
39void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
40 ASSERT(kSmiTagShift == 1);
41 ASSERT(kSmiTag == 0);
42 Label overflow;
43 TestBothArgumentsSmis(assembler, normal_ir_body);
44 // Shift value is in RAX. Compare with tagged Smi.
45 __ OBJ(cmp)(RAX, Immediate(target::ToRawSmi(target::kSmiBits)));
46 __ j(ABOVE_EQUAL, normal_ir_body, Assembler::kNearJump);
47
48 __ SmiUntag(RAX);
49 __ movq(RCX, RAX); // Shift amount must be in RCX.
50 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Value.
51
52 // Overflow test - all the shifted-out bits must be same as the sign bit.
53 __ movq(RDI, RAX);
54 __ OBJ(shl)(RAX, RCX);
55 __ OBJ(sar)(RAX, RCX);
56 __ OBJ(cmp)(RAX, RDI);
57 __ j(NOT_EQUAL, &overflow, Assembler::kNearJump);
58
59 __ OBJ(shl)(RAX, RCX); // Shift for result now we know there is no overflow.
60
61 // RAX is a correctly tagged Smi.
62 __ ret();
63
64 __ Bind(&overflow);
65 // Mint is rarely used on x64 (only for integers requiring 64 bit instead of
66 // 63 or 31 bits as represented by Smi).
67 __ Bind(normal_ir_body);
68}
69
70static void CompareIntegers(Assembler* assembler,
71 Label* normal_ir_body,
72 Condition true_condition) {
73 Label true_label;
74 TestBothArgumentsSmis(assembler, normal_ir_body);
75 // RAX contains the right argument.
76 __ OBJ(cmp)(Address(RSP, +2 * target::kWordSize), RAX);
77 __ j(true_condition, &true_label, Assembler::kNearJump);
78 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
79 __ ret();
80 __ Bind(&true_label);
81 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
82 __ ret();
83 __ Bind(normal_ir_body);
84}
85
86void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
87 Label* normal_ir_body) {
88 CompareIntegers(assembler, normal_ir_body, LESS);
89}
90
91void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
92 Label* normal_ir_body) {
93 CompareIntegers(assembler, normal_ir_body, GREATER);
94}
95
96void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
97 Label* normal_ir_body) {
98 CompareIntegers(assembler, normal_ir_body, LESS_EQUAL);
99}
100
101void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
102 Label* normal_ir_body) {
103 CompareIntegers(assembler, normal_ir_body, GREATER_EQUAL);
104}
105
106// This is called for Smi and Mint receivers. The right argument
107// can be Smi, Mint or double.
108void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
109 Label* normal_ir_body) {
110 Label true_label, check_for_mint;
111 const intptr_t kReceiverOffset = 2;
112 const intptr_t kArgumentOffset = 1;
113
114 // For integer receiver '===' check first.
115 __ movq(RAX, Address(RSP, +kArgumentOffset * target::kWordSize));
116 __ movq(RCX, Address(RSP, +kReceiverOffset * target::kWordSize));
117 __ OBJ(cmp)(RAX, RCX);
118 __ j(EQUAL, &true_label, Assembler::kNearJump);
119 __ orq(RAX, RCX);
120 __ testq(RAX, Immediate(kSmiTagMask));
121 __ j(NOT_ZERO, &check_for_mint, Assembler::kNearJump);
122 // Both arguments are smi, '===' is good enough.
123 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
124 __ ret();
125 __ Bind(&true_label);
126 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
127 __ ret();
128
129 // At least one of the arguments was not Smi.
130 Label receiver_not_smi;
131 __ Bind(&check_for_mint);
132 __ movq(RAX, Address(RSP, +kReceiverOffset * target::kWordSize));
133 __ testq(RAX, Immediate(kSmiTagMask));
134 __ j(NOT_ZERO, &receiver_not_smi);
135
136 // Left (receiver) is Smi, return false if right is not Double.
137 // Note that an instance of Mint never contains a value that can be
138 // represented by Smi.
139 __ movq(RAX, Address(RSP, +kArgumentOffset * target::kWordSize));
140 __ CompareClassId(RAX, kDoubleCid);
141 __ j(EQUAL, normal_ir_body);
142 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
143 __ ret();
144
145 __ Bind(&receiver_not_smi);
146 // RAX:: receiver.
147 __ CompareClassId(RAX, kMintCid);
148 __ j(NOT_EQUAL, normal_ir_body);
149 // Receiver is Mint, return false if right is Smi.
150 __ movq(RAX, Address(RSP, +kArgumentOffset * target::kWordSize));
151 __ testq(RAX, Immediate(kSmiTagMask));
152 __ j(NOT_ZERO, normal_ir_body);
153 // Smi == Mint -> false.
154 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
155 __ ret();
156 // TODO(srdjan): Implement Mint == Mint comparison.
157
158 __ Bind(normal_ir_body);
159}
160
161void AsmIntrinsifier::Integer_equal(Assembler* assembler,
162 Label* normal_ir_body) {
163 Integer_equalToInteger(assembler, normal_ir_body);
164}
165
166void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
167 Label* normal_ir_body) {
168 ASSERT(kSmiTagShift == 1);
169 __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // Index.
170#if defined(DART_COMPRESSED_POINTERS)
171 __ movsxd(RAX, RAX);
172#endif
173 // XOR with sign bit to complement bits if value is negative.
174 __ movq(RCX, RAX);
175 __ sarq(RCX, Immediate(63)); // All 0 or all 1.
176 __ OBJ (xor)(RAX, RCX);
177 // BSR does not write the destination register if source is zero. Put a 1 in
178 // the Smi tag bit to ensure BSR writes to destination register.
179 __ orq(RAX, Immediate(kSmiTagMask));
180 __ bsrq(RAX, RAX);
181 __ SmiTag(RAX);
182 __ ret();
183}
184
185void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
186 // static void _lsh(Uint32List x_digits, int x_used, int n,
187 // Uint32List r_digits)
188
189 __ movq(RDI, Address(RSP, 4 * target::kWordSize)); // x_digits
190 __ movq(R8, Address(RSP, 3 * target::kWordSize)); // x_used is Smi
191#if defined(DART_COMPRESSED_POINTERS)
192 __ movsxd(R8, R8);
193#endif
194 __ subq(R8, Immediate(2)); // x_used > 0, Smi. R8 = x_used - 1, round up.
195 __ sarq(R8, Immediate(2)); // R8 + 1 = number of digit pairs to read.
196 __ movq(RCX, Address(RSP, 2 * target::kWordSize)); // n is Smi
197#if defined(DART_COMPRESSED_POINTERS)
198 __ movsxd(RCX, RCX);
199#endif
200 __ SmiUntag(RCX);
201 __ movq(RBX, Address(RSP, 1 * target::kWordSize)); // r_digits
202 __ movq(RSI, RCX);
203 __ sarq(RSI, Immediate(6)); // RSI = n ~/ (2*_DIGIT_BITS).
204 __ leaq(RBX,
206 __ xorq(RAX, RAX); // RAX = 0.
207 __ movq(RDX,
209 __ shldq(RAX, RDX, RCX);
210 __ movq(Address(RBX, R8, TIMES_8, 2 * kBytesPerBigIntDigit), RAX);
211 Label last;
212 __ cmpq(R8, Immediate(0));
213 __ j(EQUAL, &last, Assembler::kNearJump);
214 Label loop;
215 __ Bind(&loop);
216 __ movq(RAX, RDX);
217 __ movq(RDX, FieldAddress(RDI, R8, TIMES_8,
220 __ shldq(RAX, RDX, RCX);
221 __ movq(Address(RBX, R8, TIMES_8, 0), RAX);
222 __ decq(R8);
224 __ Bind(&last);
225 __ shldq(RDX, R8, RCX); // R8 == 0.
226 __ movq(Address(RBX, 0), RDX);
227 __ LoadObject(RAX, NullObject());
228 __ ret();
229}
230
231void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
232 // static void _rsh(Uint32List x_digits, int x_used, int n,
233 // Uint32List r_digits)
234
235 __ movq(RDI, Address(RSP, 4 * target::kWordSize)); // x_digits
236 __ movq(RCX, Address(RSP, 2 * target::kWordSize)); // n is Smi
237#if defined(DART_COMPRESSED_POINTERS)
238 __ movsxd(RCX, RCX);
239#endif
240 __ SmiUntag(RCX);
241 __ movq(RBX, Address(RSP, 1 * target::kWordSize)); // r_digits
242 __ movq(RDX, RCX);
243 __ sarq(RDX, Immediate(6)); // RDX = n ~/ (2*_DIGIT_BITS).
244 __ movq(RSI, Address(RSP, 3 * target::kWordSize)); // x_used is Smi
245#if defined(DART_COMPRESSED_POINTERS)
246 __ movsxd(RSI, RSI);
247#endif
248 __ subq(RSI, Immediate(2)); // x_used > 0, Smi. RSI = x_used - 1, round up.
249 __ sarq(RSI, Immediate(2));
250 __ leaq(RDI,
252 __ subq(RSI, RDX); // RSI + 1 = number of digit pairs to read.
253 __ leaq(RBX,
255 __ negq(RSI);
256 __ movq(RDX, Address(RDI, RSI, TIMES_8, 0));
257 Label last;
258 __ cmpq(RSI, Immediate(0));
259 __ j(EQUAL, &last, Assembler::kNearJump);
260 Label loop;
261 __ Bind(&loop);
262 __ movq(RAX, RDX);
263 __ movq(RDX, Address(RDI, RSI, TIMES_8, 2 * kBytesPerBigIntDigit));
264 __ shrdq(RAX, RDX, RCX);
265 __ movq(Address(RBX, RSI, TIMES_8, 0), RAX);
266 __ incq(RSI);
268 __ Bind(&last);
269 __ shrdq(RDX, RSI, RCX); // RSI == 0.
270 __ movq(Address(RBX, 0), RDX);
271 __ LoadObject(RAX, NullObject());
272 __ ret();
273}
274
275void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
276 Label* normal_ir_body) {
277 // static void _absAdd(Uint32List digits, int used,
278 // Uint32List a_digits, int a_used,
279 // Uint32List r_digits)
280
281 __ movq(RDI, Address(RSP, 5 * target::kWordSize)); // digits
282 __ movq(R8, Address(RSP, 4 * target::kWordSize)); // used is Smi
283#if defined(DART_COMPRESSED_POINTERS)
284 __ movsxd(R8, R8);
285#endif
286 __ addq(R8, Immediate(2)); // used > 0, Smi. R8 = used + 1, round up.
287 __ sarq(R8, Immediate(2)); // R8 = number of digit pairs to process.
288 __ movq(RSI, Address(RSP, 3 * target::kWordSize)); // a_digits
289 __ movq(RCX, Address(RSP, 2 * target::kWordSize)); // a_used is Smi
290#if defined(DART_COMPRESSED_POINTERS)
291 __ movsxd(RCX, RCX);
292#endif
293 __ addq(RCX, Immediate(2)); // a_used > 0, Smi. R8 = a_used + 1, round up.
294 __ sarq(RCX, Immediate(2)); // R8 = number of digit pairs to process.
295 __ movq(RBX, Address(RSP, 1 * target::kWordSize)); // r_digits
296
297 // Precompute 'used - a_used' now so that carry flag is not lost later.
298 __ subq(R8, RCX);
299 __ incq(R8); // To account for the extra test between loops.
300
301 __ xorq(RDX, RDX); // RDX = 0, carry flag = 0.
302 Label add_loop;
303 __ Bind(&add_loop);
304 // Loop (a_used+1)/2 times, RCX > 0.
305 __ movq(RAX,
307 __ adcq(RAX,
309 __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::payload_offset()),
310 RAX);
311 __ incq(RDX); // Does not affect carry flag.
312 __ decq(RCX); // Does not affect carry flag.
313 __ j(NOT_ZERO, &add_loop, Assembler::kNearJump);
314
315 Label last_carry;
316 __ decq(R8); // Does not affect carry flag.
317 __ j(ZERO, &last_carry, Assembler::kNearJump); // If used - a_used == 0.
318
319 Label carry_loop;
320 __ Bind(&carry_loop);
321 // Loop (used+1)/2 - (a_used+1)/2 times, R8 > 0.
322 __ movq(RAX,
324 __ adcq(RAX, Immediate(0));
325 __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::payload_offset()),
326 RAX);
327 __ incq(RDX); // Does not affect carry flag.
328 __ decq(R8); // Does not affect carry flag.
329 __ j(NOT_ZERO, &carry_loop, Assembler::kNearJump);
330
331 __ Bind(&last_carry);
332 Label done;
333 __ j(NOT_CARRY, &done);
334 __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::payload_offset()),
335 Immediate(1));
336
337 __ Bind(&done);
338 __ LoadObject(RAX, NullObject());
339 __ ret();
340}
341
342void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
343 Label* normal_ir_body) {
344 // static void _absSub(Uint32List digits, int used,
345 // Uint32List a_digits, int a_used,
346 // Uint32List r_digits)
347
348 __ movq(RDI, Address(RSP, 5 * target::kWordSize)); // digits
349 __ movq(R8, Address(RSP, 4 * target::kWordSize)); // used is Smi
350#if defined(DART_COMPRESSED_POINTERS)
351 __ movsxd(R8, R8);
352#endif
353 __ addq(R8, Immediate(2)); // used > 0, Smi. R8 = used + 1, round up.
354 __ sarq(R8, Immediate(2)); // R8 = number of digit pairs to process.
355 __ movq(RSI, Address(RSP, 3 * target::kWordSize)); // a_digits
356 __ movq(RCX, Address(RSP, 2 * target::kWordSize)); // a_used is Smi
357#if defined(DART_COMPRESSED_POINTERS)
358 __ movsxd(RCX, RCX);
359#endif
360 __ addq(RCX, Immediate(2)); // a_used > 0, Smi. R8 = a_used + 1, round up.
361 __ sarq(RCX, Immediate(2)); // R8 = number of digit pairs to process.
362 __ movq(RBX, Address(RSP, 1 * target::kWordSize)); // r_digits
363
364 // Precompute 'used - a_used' now so that carry flag is not lost later.
365 __ subq(R8, RCX);
366 __ incq(R8); // To account for the extra test between loops.
367
368 __ xorq(RDX, RDX); // RDX = 0, carry flag = 0.
369 Label sub_loop;
370 __ Bind(&sub_loop);
371 // Loop (a_used+1)/2 times, RCX > 0.
372 __ movq(RAX,
374 __ sbbq(RAX,
376 __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::payload_offset()),
377 RAX);
378 __ incq(RDX); // Does not affect carry flag.
379 __ decq(RCX); // Does not affect carry flag.
380 __ j(NOT_ZERO, &sub_loop, Assembler::kNearJump);
381
382 Label done;
383 __ decq(R8); // Does not affect carry flag.
384 __ j(ZERO, &done, Assembler::kNearJump); // If used - a_used == 0.
385
386 Label carry_loop;
387 __ Bind(&carry_loop);
388 // Loop (used+1)/2 - (a_used+1)/2 times, R8 > 0.
389 __ movq(RAX,
391 __ sbbq(RAX, Immediate(0));
392 __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::payload_offset()),
393 RAX);
394 __ incq(RDX); // Does not affect carry flag.
395 __ decq(R8); // Does not affect carry flag.
396 __ j(NOT_ZERO, &carry_loop, Assembler::kNearJump);
397
398 __ Bind(&done);
399 __ LoadObject(RAX, NullObject());
400 __ ret();
401}
402
403void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
404 Label* normal_ir_body) {
405 // Pseudo code:
406 // static int _mulAdd(Uint32List x_digits, int xi,
407 // Uint32List m_digits, int i,
408 // Uint32List a_digits, int j, int n) {
409 // uint64_t x = x_digits[xi >> 1 .. (xi >> 1) + 1]; // xi is Smi and even.
410 // if (x == 0 || n == 0) {
411 // return 2;
412 // }
413 // uint64_t* mip = &m_digits[i >> 1]; // i is Smi and even.
414 // uint64_t* ajp = &a_digits[j >> 1]; // j is Smi and even.
415 // uint64_t c = 0;
416 // SmiUntag(n); // n is Smi and even.
417 // n = (n + 1)/2; // Number of pairs to process.
418 // do {
419 // uint64_t mi = *mip++;
420 // uint64_t aj = *ajp;
421 // uint128_t t = x*mi + aj + c; // 64-bit * 64-bit -> 128-bit.
422 // *ajp++ = low64(t);
423 // c = high64(t);
424 // } while (--n > 0);
425 // while (c != 0) {
426 // uint128_t t = *ajp + c;
427 // *ajp++ = low64(t);
428 // c = high64(t); // c == 0 or 1.
429 // }
430 // return 2;
431 // }
432
433 Label done;
434 // RBX = x, done if x == 0
435 __ movq(RCX, Address(RSP, 7 * target::kWordSize)); // x_digits
436 __ movq(RAX, Address(RSP, 6 * target::kWordSize)); // xi is Smi
437#if defined(DART_COMPRESSED_POINTERS)
438 __ movsxd(RAX, RAX);
439#endif
440 __ movq(RBX,
442 __ testq(RBX, RBX);
444
445 // R8 = (SmiUntag(n) + 1)/2, no_op if n == 0
446 __ movq(R8, Address(RSP, 1 * target::kWordSize)); // n is Smi
447#if defined(DART_COMPRESSED_POINTERS)
448 __ movsxd(R8, R8);
449#endif
450 __ addq(R8, Immediate(2));
451 __ sarq(R8, Immediate(2)); // R8 = number of digit pairs to process.
453
454 // RDI = mip = &m_digits[i >> 1]
455 __ movq(RDI, Address(RSP, 5 * target::kWordSize)); // m_digits
456 __ movq(RAX, Address(RSP, 4 * target::kWordSize)); // i is Smi
457#if defined(DART_COMPRESSED_POINTERS)
458 __ movsxd(RAX, RAX);
459#endif
460 __ leaq(RDI,
462
463 // RSI = ajp = &a_digits[j >> 1]
464 __ movq(RSI, Address(RSP, 3 * target::kWordSize)); // a_digits
465 __ movq(RAX, Address(RSP, 2 * target::kWordSize)); // j is Smi
466#if defined(DART_COMPRESSED_POINTERS)
467 __ movsxd(RAX, RAX);
468#endif
469 __ leaq(RSI,
471
472 // RCX = c = 0
473 __ xorq(RCX, RCX);
474
475 Label muladd_loop;
476 __ Bind(&muladd_loop);
477 // x: RBX
478 // mip: RDI
479 // ajp: RSI
480 // c: RCX
481 // t: RDX:RAX (not live at loop entry)
482 // n: R8
483
484 // uint64_t mi = *mip++
485 __ movq(RAX, Address(RDI, 0));
486 __ addq(RDI, Immediate(2 * kBytesPerBigIntDigit));
487
488 // uint128_t t = x*mi
489 __ mulq(RBX); // t = RDX:RAX = RAX * RBX, 64-bit * 64-bit -> 64-bit
490 __ addq(RAX, RCX); // t += c
491 __ adcq(RDX, Immediate(0));
492
493 // uint64_t aj = *ajp; t += aj
494 __ addq(RAX, Address(RSI, 0));
495 __ adcq(RDX, Immediate(0));
496
497 // *ajp++ = low64(t)
498 __ movq(Address(RSI, 0), RAX);
499 __ addq(RSI, Immediate(2 * kBytesPerBigIntDigit));
500
501 // c = high64(t)
502 __ movq(RCX, RDX);
503
504 // while (--n > 0)
505 __ decq(R8); // --n
506 __ j(NOT_ZERO, &muladd_loop, Assembler::kNearJump);
507
508 __ testq(RCX, RCX);
510
511 // *ajp += c
512 __ addq(Address(RSI, 0), RCX);
514
515 Label propagate_carry_loop;
516 __ Bind(&propagate_carry_loop);
517 __ addq(RSI, Immediate(2 * kBytesPerBigIntDigit));
518 __ incq(Address(RSI, 0)); // c == 0 or 1
519 __ j(CARRY, &propagate_carry_loop, Assembler::kNearJump);
520
521 __ Bind(&done);
522 __ movq(RAX, Immediate(target::ToRawSmi(2))); // Two digits processed.
523 __ ret();
524}
525
526void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
527 Label* normal_ir_body) {
528 // Pseudo code:
529 // static int _sqrAdd(Uint32List x_digits, int i,
530 // Uint32List a_digits, int used) {
531 // uint64_t* xip = &x_digits[i >> 1]; // i is Smi and even.
532 // uint64_t x = *xip++;
533 // if (x == 0) return 2;
534 // uint64_t* ajp = &a_digits[i]; // j == 2*i, i is Smi.
535 // uint64_t aj = *ajp;
536 // uint128_t t = x*x + aj;
537 // *ajp++ = low64(t);
538 // uint128_t c = high64(t);
539 // int n = ((used - i + 2) >> 2) - 1; // used and i are Smi. n: num pairs.
540 // while (--n >= 0) {
541 // uint64_t xi = *xip++;
542 // uint64_t aj = *ajp;
543 // uint192_t t = 2*x*xi + aj + c; // 2-bit * 64-bit * 64-bit -> 129-bit.
544 // *ajp++ = low64(t);
545 // c = high128(t); // 65-bit.
546 // }
547 // uint64_t aj = *ajp;
548 // uint128_t t = aj + c; // 64-bit + 65-bit -> 66-bit.
549 // *ajp++ = low64(t);
550 // *ajp = high64(t);
551 // return 2;
552 // }
553
554 // RDI = xip = &x_digits[i >> 1]
555 __ movq(RDI, Address(RSP, 4 * target::kWordSize)); // x_digits
556 __ movq(RAX, Address(RSP, 3 * target::kWordSize)); // i is Smi
557#if defined(DART_COMPRESSED_POINTERS)
558 __ movsxd(RAX, RAX);
559#endif
560 __ leaq(RDI,
562
563 // RBX = x = *xip++, return if x == 0
564 Label x_zero;
565 __ movq(RBX, Address(RDI, 0));
566 __ cmpq(RBX, Immediate(0));
567 __ j(EQUAL, &x_zero);
568 __ addq(RDI, Immediate(2 * kBytesPerBigIntDigit));
569
570 // RSI = ajp = &a_digits[i]
571 __ movq(RSI, Address(RSP, 2 * target::kWordSize)); // a_digits
572 __ leaq(RSI,
574
575 // RDX:RAX = t = x*x + *ajp
576 __ movq(RAX, RBX);
577 __ mulq(RBX);
578 __ addq(RAX, Address(RSI, 0));
579 __ adcq(RDX, Immediate(0));
580
581 // *ajp++ = low64(t)
582 __ movq(Address(RSI, 0), RAX);
583 __ addq(RSI, Immediate(2 * kBytesPerBigIntDigit));
584
585 // int n = (used - i + 1)/2 - 1
586 __ OBJ(mov)(R8, Address(RSP, 1 * target::kWordSize)); // used is Smi
587 __ OBJ(sub)(R8, Address(RSP, 3 * target::kWordSize)); // i is Smi
588 __ addq(R8, Immediate(2));
589 __ sarq(R8, Immediate(2));
590 __ decq(R8); // R8 = number of digit pairs to process.
591
592 // uint128_t c = high64(t)
593 __ xorq(R13, R13); // R13 = high64(c) == 0
594 __ movq(R12, RDX); // R12 = low64(c) == high64(t)
595
596 Label loop, done;
597 __ Bind(&loop);
598 // x: RBX
599 // xip: RDI
600 // ajp: RSI
601 // c: R13:R12
602 // t: RCX:RDX:RAX (not live at loop entry)
603 // n: R8
604
605 // while (--n >= 0)
606 __ decq(R8); // --n
608
609 // uint64_t xi = *xip++
610 __ movq(RAX, Address(RDI, 0));
611 __ addq(RDI, Immediate(2 * kBytesPerBigIntDigit));
612
613 // uint192_t t = RCX:RDX:RAX = 2*x*xi + aj + c
614 __ mulq(RBX); // RDX:RAX = RAX * RBX
615 __ xorq(RCX, RCX); // RCX = 0
616 __ shldq(RCX, RDX, Immediate(1));
617 __ shldq(RDX, RAX, Immediate(1));
618 __ shlq(RAX, Immediate(1)); // RCX:RDX:RAX <<= 1
619 __ addq(RAX, Address(RSI, 0)); // t += aj
620 __ adcq(RDX, Immediate(0));
621 __ adcq(RCX, Immediate(0));
622 __ addq(RAX, R12); // t += low64(c)
623 __ adcq(RDX, R13); // t += high64(c) << 64
624 __ adcq(RCX, Immediate(0));
625
626 // *ajp++ = low64(t)
627 __ movq(Address(RSI, 0), RAX);
628 __ addq(RSI, Immediate(2 * kBytesPerBigIntDigit));
629
630 // c = high128(t)
631 __ movq(R12, RDX);
632 __ movq(R13, RCX);
633
634 __ jmp(&loop, Assembler::kNearJump);
635
636 __ Bind(&done);
637 // uint128_t t = aj + c
638 __ addq(R12, Address(RSI, 0)); // t = c, t += *ajp
639 __ adcq(R13, Immediate(0));
640
641 // *ajp++ = low64(t)
642 // *ajp = high64(t)
643 __ movq(Address(RSI, 0), R12);
644 __ movq(Address(RSI, 2 * kBytesPerBigIntDigit), R13);
645
646 __ Bind(&x_zero);
647 __ movq(RAX, Immediate(target::ToRawSmi(2))); // Two digits processed.
648 __ ret();
649}
650
651void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
652 Label* normal_ir_body) {
653 // Pseudo code:
654 // static int _estQuotientDigit(Uint32List args, Uint32List digits, int i) {
655 // uint64_t yt = args[_YT_LO .. _YT]; // _YT_LO == 0, _YT == 1.
656 // uint64_t* dp = &digits[(i >> 1) - 1]; // i is Smi.
657 // uint64_t dh = dp[0]; // dh == digits[(i >> 1) - 1 .. i >> 1].
658 // uint64_t qd;
659 // if (dh == yt) {
660 // qd = (DIGIT_MASK << 32) | DIGIT_MASK;
661 // } else {
662 // dl = dp[-1]; // dl == digits[(i >> 1) - 3 .. (i >> 1) - 2].
663 // qd = dh:dl / yt; // No overflow possible, because dh < yt.
664 // }
665 // args[_QD .. _QD_HI] = qd; // _QD == 2, _QD_HI == 3.
666 // return 2;
667 // }
668
669 // RDI = args
670 __ movq(RDI, Address(RSP, 3 * target::kWordSize)); // args
671
672 // RCX = yt = args[0..1]
673 __ movq(RCX, FieldAddress(RDI, target::TypedData::payload_offset()));
674
675 // RBX = dp = &digits[(i >> 1) - 1]
676 __ movq(RBX, Address(RSP, 2 * target::kWordSize)); // digits
677 __ movq(RAX, Address(RSP, 1 * target::kWordSize)); // i is Smi and odd.
678#if defined(DART_COMPRESSED_POINTERS)
679 __ movsxd(RAX, RAX);
680#endif
681 __ leaq(RBX, FieldAddress(
682 RBX, RAX, TIMES_2,
684
685 // RDX = dh = dp[0]
686 __ movq(RDX, Address(RBX, 0));
687
688 // RAX = qd = (DIGIT_MASK << 32) | DIGIT_MASK = -1
689 __ movq(RAX, Immediate(-1));
690
691 // Return qd if dh == yt
692 Label return_qd;
693 __ cmpq(RDX, RCX);
694 __ j(EQUAL, &return_qd, Assembler::kNearJump);
695
696 // RAX = dl = dp[-1]
697 __ movq(RAX, Address(RBX, -2 * kBytesPerBigIntDigit));
698
699 // RAX = qd = dh:dl / yt = RDX:RAX / RCX
700 __ divq(RCX);
701
702 __ Bind(&return_qd);
703 // args[2..3] = qd
704 __ movq(FieldAddress(RDI, target::TypedData::payload_offset() +
706 RAX);
707
708 __ movq(RAX, Immediate(target::ToRawSmi(2))); // Two digits processed.
709 __ ret();
710}
711
712void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
713 Label* normal_ir_body) {
714 // Pseudo code:
715 // static int _mulMod(Uint32List args, Uint32List digits, int i) {
716 // uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3.
717 // uint64_t d = digits[i >> 1 .. (i >> 1) + 1]; // i is Smi and even.
718 // uint128_t t = rho*d;
719 // args[_MU .. _MU_HI] = t mod DIGIT_BASE^2; // _MU == 4, _MU_HI == 5.
720 // return 2;
721 // }
722
723 // RDI = args
724 __ movq(RDI, Address(RSP, 3 * target::kWordSize)); // args
725
726 // RCX = rho = args[2 .. 3]
727 __ movq(RCX, FieldAddress(RDI, target::TypedData::payload_offset() +
729
730 // RAX = digits[i >> 1 .. (i >> 1) + 1]
731 __ movq(RBX, Address(RSP, 2 * target::kWordSize)); // digits
732 __ movq(RAX, Address(RSP, 1 * target::kWordSize)); // i is Smi
733#if defined(DART_COMPRESSED_POINTERS)
734 __ movsxd(RAX, RAX);
735#endif
736 __ movq(RAX,
738
739 // RDX:RAX = t = rho*d
740 __ mulq(RCX);
741
742 // args[4 .. 5] = t mod DIGIT_BASE^2 = low64(t)
743 __ movq(FieldAddress(RDI, target::TypedData::payload_offset() +
745 RAX);
746
747 __ movq(RAX, Immediate(target::ToRawSmi(2))); // Two digits processed.
748 __ ret();
749}
750
751// Check if the last argument is a double, jump to label 'is_smi' if smi
752// (easy to convert to double), otherwise jump to label 'not_double_smi',
753// Returns the last argument in RAX.
754static void TestLastArgumentIsDouble(Assembler* assembler,
755 Label* is_smi,
756 Label* not_double_smi) {
757 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
758 __ testq(RAX, Immediate(kSmiTagMask));
759 __ j(ZERO, is_smi); // Jump if Smi.
760 __ CompareClassId(RAX, kDoubleCid);
761 __ j(NOT_EQUAL, not_double_smi);
762 // Fall through if double.
763}
764
765// Both arguments on stack, left argument is a double, right argument is of
766// unknown type. Return true or false object in RAX. Any NaN argument
767// returns false. Any non-double argument causes control flow to fall through
768// to the slow case (compiled method body).
769static void CompareDoubles(Assembler* assembler,
770 Label* normal_ir_body,
771 Condition true_condition) {
772 Label is_false, is_true, is_smi, double_op;
773 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
774 // Both arguments are double, right operand is in RAX.
775 __ movsd(XMM1, FieldAddress(RAX, target::Double::value_offset()));
776 __ Bind(&double_op);
777 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Left argument.
778 __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
779 __ comisd(XMM0, XMM1);
780 __ j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN -> false;
781 __ j(true_condition, &is_true, Assembler::kNearJump);
782 // Fall through false.
783 __ Bind(&is_false);
784 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
785 __ ret();
786 __ Bind(&is_true);
787 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
788 __ ret();
789 __ Bind(&is_smi);
790 __ SmiUntag(RAX);
791 __ OBJ(cvtsi2sd)(XMM1, RAX);
792 __ jmp(&double_op);
793 __ Bind(normal_ir_body);
794}
795
796void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
797 Label* normal_ir_body) {
798 CompareDoubles(assembler, normal_ir_body, ABOVE);
799}
800
801void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
802 Label* normal_ir_body) {
803 CompareDoubles(assembler, normal_ir_body, ABOVE_EQUAL);
804}
805
806void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
807 Label* normal_ir_body) {
808 CompareDoubles(assembler, normal_ir_body, BELOW);
809}
810
811void AsmIntrinsifier::Double_equal(Assembler* assembler,
812 Label* normal_ir_body) {
813 CompareDoubles(assembler, normal_ir_body, EQUAL);
814}
815
816void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
817 Label* normal_ir_body) {
818 CompareDoubles(assembler, normal_ir_body, BELOW_EQUAL);
819}
820
821// Expects left argument to be double (receiver). Right argument is unknown.
822// Both arguments are on stack.
823static void DoubleArithmeticOperations(Assembler* assembler,
824 Label* normal_ir_body,
825 Token::Kind kind) {
826 Label is_smi, double_op;
827 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
828 // Both arguments are double, right operand is in RAX.
829 __ movsd(XMM1, FieldAddress(RAX, target::Double::value_offset()));
830 __ Bind(&double_op);
831 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Left argument.
832 __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
833 switch (kind) {
834 case Token::kADD:
835 __ addsd(XMM0, XMM1);
836 break;
837 case Token::kSUB:
838 __ subsd(XMM0, XMM1);
839 break;
840 case Token::kMUL:
841 __ mulsd(XMM0, XMM1);
842 break;
843 case Token::kDIV:
844 __ divsd(XMM0, XMM1);
845 break;
846 default:
847 UNREACHABLE();
848 }
849 const Class& double_class = DoubleClass();
850 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump,
851 RAX, // Result register.
852 R13);
853 __ movsd(FieldAddress(RAX, target::Double::value_offset()), XMM0);
854 __ ret();
855 __ Bind(&is_smi);
856 __ SmiUntag(RAX);
857 __ OBJ(cvtsi2sd)(XMM1, RAX);
858 __ jmp(&double_op);
859 __ Bind(normal_ir_body);
860}
861
862void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
863 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
864}
865
866void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
867 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
868}
869
870void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
871 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
872}
873
874void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
875 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
876}
877
878void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
879 Label* normal_ir_body) {
880 // Only smis allowed.
881 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
882 __ testq(RAX, Immediate(kSmiTagMask));
883 __ j(NOT_ZERO, normal_ir_body);
884 // Is Smi.
885 __ SmiUntag(RAX);
886 __ OBJ(cvtsi2sd)(XMM1, RAX);
887 __ movq(RAX, Address(RSP, +2 * target::kWordSize));
888 __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
889 __ mulsd(XMM0, XMM1);
890 const Class& double_class = DoubleClass();
891 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump,
892 RAX, // Result register.
893 R13);
894 __ movsd(FieldAddress(RAX, target::Double::value_offset()), XMM0);
895 __ ret();
896 __ Bind(normal_ir_body);
897}
898
899// Left is double, right is integer (Mint or Smi)
900void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
901 Label* normal_ir_body) {
902 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
903 __ testq(RAX, Immediate(kSmiTagMask));
904 __ j(NOT_ZERO, normal_ir_body);
905 // Is Smi.
906 __ SmiUntag(RAX);
907 __ OBJ(cvtsi2sd)(XMM0, RAX);
908 const Class& double_class = DoubleClass();
909 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump,
910 RAX, // Result register.
911 R13);
912 __ movsd(FieldAddress(RAX, target::Double::value_offset()), XMM0);
913 __ ret();
914 __ Bind(normal_ir_body);
915}
916
917void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
918 Label* normal_ir_body) {
919 Label is_true;
920 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
921 __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
922 __ comisd(XMM0, XMM0);
923 __ j(PARITY_EVEN, &is_true, Assembler::kNearJump); // NaN -> true;
924 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
925 __ ret();
926 __ Bind(&is_true);
927 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
928 __ ret();
929}
930
931void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
932 Label* normal_ir_body) {
933 Label is_inf, done;
934 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
935 __ movq(RAX, FieldAddress(RAX, target::Double::value_offset()));
936 // Mask off the sign.
937 __ AndImmediate(RAX, Immediate(0x7FFFFFFFFFFFFFFFLL));
938 // Compare with +infinity.
939 __ CompareImmediate(RAX, Immediate(0x7FF0000000000000LL));
940 __ j(EQUAL, &is_inf, Assembler::kNearJump);
941 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
942 __ jmp(&done);
943
944 __ Bind(&is_inf);
945 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
946
947 __ Bind(&done);
948 __ ret();
949}
950
951void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
952 Label* normal_ir_body) {
953 Label is_false, is_true, is_zero;
954 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
955 __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
956 __ xorpd(XMM1, XMM1); // 0.0 -> XMM1.
957 __ comisd(XMM0, XMM1);
958 __ j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN -> false.
959 __ j(EQUAL, &is_zero, Assembler::kNearJump); // Check for negative zero.
960 __ j(ABOVE_EQUAL, &is_false, Assembler::kNearJump); // >= 0 -> false.
961 __ Bind(&is_true);
962 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
963 __ ret();
964 __ Bind(&is_false);
965 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
966 __ ret();
967 __ Bind(&is_zero);
968 // Check for negative zero (get the sign bit).
969 __ movmskpd(RAX, XMM0);
970 __ testq(RAX, Immediate(1));
971 __ j(NOT_ZERO, &is_true, Assembler::kNearJump);
972 __ jmp(&is_false, Assembler::kNearJump);
973}
974
975// Identity comparison.
976void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
977 Label* normal_ir_body) {
978 Label is_true;
979 const intptr_t kReceiverOffset = 2;
980 const intptr_t kArgumentOffset = 1;
981
982 __ movq(RAX, Address(RSP, +kArgumentOffset * target::kWordSize));
983 __ OBJ(cmp)(RAX, Address(RSP, +kReceiverOffset * target::kWordSize));
984 __ j(EQUAL, &is_true, Assembler::kNearJump);
985 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
986 __ ret();
987 __ Bind(&is_true);
988 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
989 __ ret();
990}
991
992static void JumpIfInteger(Assembler* assembler, Register cid, Label* target) {
993 assembler->RangeCheck(cid, kNoRegister, kSmiCid, kMintCid,
995}
996
997static void JumpIfNotInteger(Assembler* assembler,
999 Label* target) {
1000 assembler->RangeCheck(cid, kNoRegister, kSmiCid, kMintCid,
1002}
1003
1004static void JumpIfString(Assembler* assembler, Register cid, Label* target) {
1005 assembler->RangeCheck(cid, kNoRegister, kOneByteStringCid, kTwoByteStringCid,
1007}
1008
1009static void JumpIfNotString(Assembler* assembler, Register cid, Label* target) {
1010 assembler->RangeCheck(cid, kNoRegister, kOneByteStringCid, kTwoByteStringCid,
1012}
1013
1014static void JumpIfNotList(Assembler* assembler, Register cid, Label* target) {
1015 assembler->RangeCheck(cid, kNoRegister, kArrayCid, kGrowableObjectArrayCid,
1017}
1018
1019static void JumpIfType(Assembler* assembler, Register cid, Label* target) {
1020 COMPILE_ASSERT((kFunctionTypeCid == kTypeCid + 1) &&
1021 (kRecordTypeCid == kTypeCid + 2));
1022 assembler->RangeCheck(cid, kNoRegister, kTypeCid, kRecordTypeCid,
1024}
1025
1026static void JumpIfNotType(Assembler* assembler, Register cid, Label* target) {
1027 COMPILE_ASSERT((kFunctionTypeCid == kTypeCid + 1) &&
1028 (kRecordTypeCid == kTypeCid + 2));
1029 assembler->RangeCheck(cid, kNoRegister, kTypeCid, kRecordTypeCid,
1031}
1032
1033// Return type quickly for simple types (not parameterized and not signature).
1034void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
1035 Label* normal_ir_body) {
1036 Label use_declaration_type, not_integer, not_double, not_string;
1037 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
1038 __ LoadClassIdMayBeSmi(RCX, RAX);
1039
1040 // RCX: untagged cid of instance (RAX).
1041 __ cmpq(RCX, Immediate(kClosureCid));
1042 __ j(EQUAL, normal_ir_body); // Instance is a closure.
1043
1044 __ cmpq(RCX, Immediate(kRecordCid));
1045 __ j(EQUAL, normal_ir_body); // Instance is a record.
1046
1047 __ cmpl(RCX, Immediate(kNumPredefinedCids));
1048 __ j(ABOVE, &use_declaration_type);
1049
1050 // If object is a instance of _Double return double type.
1051 __ cmpl(RCX, Immediate(kDoubleCid));
1052 __ j(NOT_EQUAL, &not_double);
1053
1054 __ LoadIsolateGroup(RAX);
1057 __ ret();
1058
1059 __ Bind(&not_double);
1060 // If object is an integer (smi, mint or bigint) return int type.
1061 __ movl(RAX, RCX);
1062 JumpIfNotInteger(assembler, RAX, &not_integer);
1063
1064 __ LoadIsolateGroup(RAX);
1067 __ ret();
1068
1069 __ Bind(&not_integer);
1070 // If object is a string (one byte, two byte or external variants) return
1071 // string type.
1072 __ movq(RAX, RCX);
1073 JumpIfNotString(assembler, RAX, &not_string);
1074
1075 __ LoadIsolateGroup(RAX);
1078 __ ret();
1079
1080 __ Bind(&not_string);
1081 // If object is a type or function type, return Dart type.
1082 __ movq(RAX, RCX);
1083 JumpIfNotType(assembler, RAX, &use_declaration_type);
1084
1085 __ LoadIsolateGroup(RAX);
1088 __ ret();
1089
1090 // Object is neither double, nor integer, nor string, nor type.
1091 __ Bind(&use_declaration_type);
1092 __ LoadClassById(RDI, RCX);
1093 __ movzxw(RCX, FieldAddress(RDI, target::Class::num_type_arguments_offset()));
1094 __ cmpq(RCX, Immediate(0));
1095 __ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
1096 __ LoadCompressed(
1098 __ CompareObject(RAX, NullObject());
1099 __ j(EQUAL, normal_ir_body, Assembler::kNearJump); // Not yet set.
1100 __ ret();
1101
1102 __ Bind(normal_ir_body);
1103}
1104
1105// Compares cid1 and cid2 to see if they're syntactically equivalent. If this
1106// can be determined by this fast path, it jumps to either equal_* or not_equal.
1107// If classes are equivalent but may be generic, then jumps to
1108// equal_may_be_generic. Clobbers scratch.
1109static void EquivalentClassIds(Assembler* assembler,
1110 Label* normal_ir_body,
1111 Label* equal_may_be_generic,
1112 Label* equal_not_generic,
1113 Label* not_equal,
1114 Register cid1,
1115 Register cid2,
1116 Register scratch,
1117 bool testing_instance_cids) {
1118 Label not_integer, not_integer_or_string, not_integer_or_string_or_list;
1119
1120 // Check if left hand side is a closure. Closures are handled in the runtime.
1121 __ cmpq(cid1, Immediate(kClosureCid));
1122 __ j(EQUAL, normal_ir_body);
1123
1124 // Check if left hand side is a record. Records are handled in the runtime.
1125 __ cmpq(cid1, Immediate(kRecordCid));
1126 __ j(EQUAL, normal_ir_body);
1127
1128 // Check whether class ids match. If class ids don't match types may still be
1129 // considered equivalent (e.g. multiple string implementation classes map to a
1130 // single String type).
1131 __ cmpq(cid1, cid2);
1132 __ j(EQUAL, equal_may_be_generic);
1133
1134 // Class ids are different. Check if we are comparing two string types (with
1135 // different representations), two integer types, two list types or two type
1136 // types.
1137 __ cmpq(cid1, Immediate(kNumPredefinedCids));
1138 __ j(ABOVE_EQUAL, not_equal);
1139
1140 // Check if both are integer types.
1141 __ movq(scratch, cid1);
1142 JumpIfNotInteger(assembler, scratch, &not_integer);
1143
1144 // First type is an integer. Check if the second is an integer too.
1145 __ movq(scratch, cid2);
1146 JumpIfInteger(assembler, scratch, equal_not_generic);
1147 // Integer types are only equivalent to other integer types.
1148 __ jmp(not_equal);
1149
1150 __ Bind(&not_integer);
1151 // Check if both are String types.
1152 __ movq(scratch, cid1);
1153 JumpIfNotString(assembler, scratch,
1154 testing_instance_cids ? &not_integer_or_string : not_equal);
1155
1156 // First type is a String. Check if the second is a String too.
1157 __ movq(scratch, cid2);
1158 JumpIfString(assembler, scratch, equal_not_generic);
1159 // String types are only equivalent to other String types.
1160 __ jmp(not_equal);
1161
1162 if (testing_instance_cids) {
1163 __ Bind(&not_integer_or_string);
1164 // Check if both are List types.
1165 __ movq(scratch, cid1);
1166 JumpIfNotList(assembler, scratch, &not_integer_or_string_or_list);
1167
1168 // First type is a List. Check if the second is a List too.
1169 __ movq(scratch, cid2);
1170 JumpIfNotList(assembler, scratch, not_equal);
1173 __ jmp(equal_may_be_generic);
1174
1175 __ Bind(&not_integer_or_string_or_list);
1176 // Check if the first type is a Type. If it is not then types are not
1177 // equivalent because they have different class ids and they are not String
1178 // or integer or List or Type.
1179 __ movq(scratch, cid1);
1180 JumpIfNotType(assembler, scratch, not_equal);
1181
1182 // First type is a Type. Check if the second is a Type too.
1183 __ movq(scratch, cid2);
1184 JumpIfType(assembler, scratch, equal_not_generic);
1185 // Type types are only equivalent to other Type types.
1186 __ jmp(not_equal);
1187 }
1188}
1189
1190void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
1191 Label* normal_ir_body) {
1192 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
1193 __ LoadClassIdMayBeSmi(RCX, RAX);
1194
1195 __ movq(RAX, Address(RSP, +2 * target::kWordSize));
1196 __ LoadClassIdMayBeSmi(RDX, RAX);
1197
1198 Label equal_may_be_generic, equal, not_equal;
1199 EquivalentClassIds(assembler, normal_ir_body, &equal_may_be_generic, &equal,
1200 &not_equal, RCX, RDX, RAX,
1201 /* testing_instance_cids = */ true);
1202
1203 __ Bind(&equal_may_be_generic);
1204 // Classes are equivalent and neither is a closure class.
1205 // Check if there are no type arguments. In this case we can return true.
1206 // Otherwise fall through into the runtime to handle comparison.
1207 __ LoadClassById(RAX, RCX);
1208 __ movl(
1209 RAX,
1210 FieldAddress(
1211 RAX,
1213 __ cmpl(RAX, Immediate(target::Class::kNoTypeArguments));
1214 __ j(EQUAL, &equal);
1215
1216 // Compare type arguments, host_type_arguments_field_offset_in_words in RAX.
1217 __ movq(RCX, Address(RSP, +1 * target::kWordSize));
1218 __ movq(RDX, Address(RSP, +2 * target::kWordSize));
1219 __ OBJ(mov)(RCX, FieldAddress(RCX, RAX, TIMES_COMPRESSED_WORD_SIZE, 0));
1220 __ OBJ(mov)(RDX, FieldAddress(RDX, RAX, TIMES_COMPRESSED_WORD_SIZE, 0));
1221 __ OBJ(cmp)(RCX, RDX);
1222 __ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
1223 // Fall through to equal case if type arguments are equal.
1224
1225 __ Bind(&equal);
1226 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1227 __ ret();
1228
1229 __ Bind(&not_equal);
1230 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1231 __ ret();
1232
1233 __ Bind(normal_ir_body);
1234}
1235
1236void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
1237 Label* normal_ir_body) {
1238 __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // String object.
1239 __ movl(RAX, FieldAddress(RAX, target::String::hash_offset()));
1240 ASSERT(kSmiTag == 0);
1241 ASSERT(kSmiTagShift == 1);
1242 __ addq(RAX, RAX); // Smi tag RAX, setting Z flag.
1243 __ j(ZERO, normal_ir_body, Assembler::kNearJump);
1244 __ ret();
1245 __ Bind(normal_ir_body);
1246 // Hash not yet computed.
1247}
1248
1249void AsmIntrinsifier::Type_equality(Assembler* assembler,
1250 Label* normal_ir_body) {
1251 Label equal, not_equal, equiv_cids_may_be_generic, equiv_cids, check_legacy;
1252
1253 __ movq(RCX, Address(RSP, +1 * target::kWordSize));
1254 __ movq(RDX, Address(RSP, +2 * target::kWordSize));
1255 __ OBJ(cmp)(RCX, RDX);
1256 __ j(EQUAL, &equal);
1257
1258 // RCX might not be a Type object, so check that first (RDX should be though,
1259 // since this is a method on the Type class).
1260 __ LoadClassIdMayBeSmi(RAX, RCX);
1261 __ cmpq(RAX, Immediate(kTypeCid));
1262 __ j(NOT_EQUAL, normal_ir_body);
1263
1264 // Check if types are syntactically equal.
1265 __ LoadTypeClassId(RDI, RCX);
1266 __ LoadTypeClassId(RSI, RDX);
1267 // We are not testing instance cids, but type class cids of Type instances.
1268 EquivalentClassIds(assembler, normal_ir_body, &equiv_cids_may_be_generic,
1269 &equiv_cids, &not_equal, RDI, RSI, RAX,
1270 /* testing_instance_cids = */ false);
1271
1272 __ Bind(&equiv_cids_may_be_generic);
1273 // Compare type arguments in Type instances.
1274 __ LoadCompressed(RDI, FieldAddress(RCX, target::Type::arguments_offset()));
1275 __ LoadCompressed(RSI, FieldAddress(RDX, target::Type::arguments_offset()));
1276 __ cmpq(RDI, RSI);
1277 __ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
1278 // Fall through to check nullability if type arguments are equal.
1279
1280 // Check nullability.
1281 __ Bind(&equiv_cids);
1282 __ LoadAbstractTypeNullability(RCX, RCX);
1283 __ LoadAbstractTypeNullability(RDX, RDX);
1284 __ cmpq(RCX, RDX);
1285 __ j(NOT_EQUAL, &not_equal, Assembler::kNearJump);
1286 // Fall through to equal case if nullability is equal.
1287
1288 __ Bind(&equal);
1289 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1290 __ ret();
1291
1292 __ Bind(&not_equal);
1293 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1294 __ ret();
1295
1296 __ Bind(normal_ir_body);
1297}
1298
1299void AsmIntrinsifier::AbstractType_getHashCode(Assembler* assembler,
1300 Label* normal_ir_body) {
1301 __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // AbstractType object.
1302 __ LoadCompressedSmi(RAX,
1303 FieldAddress(RAX, target::AbstractType::hash_offset()));
1304 ASSERT(kSmiTag == 0);
1305 ASSERT(kSmiTagShift == 1);
1306 __ OBJ(test)(RAX, RAX);
1307 __ j(ZERO, normal_ir_body, Assembler::kNearJump);
1308 __ ret();
1309 __ Bind(normal_ir_body);
1310 // Hash not yet computed.
1311}
1312
1313void AsmIntrinsifier::AbstractType_equality(Assembler* assembler,
1314 Label* normal_ir_body) {
1315 __ movq(RCX, Address(RSP, +1 * target::kWordSize));
1316 __ movq(RDX, Address(RSP, +2 * target::kWordSize));
1317 __ OBJ(cmp)(RCX, RDX);
1318 __ j(NOT_EQUAL, normal_ir_body);
1319
1320 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1321 __ ret();
1322
1323 __ Bind(normal_ir_body);
1324}
1325
1326// Keep in sync with Instance::IdentityHashCode.
1327// Note int and double never reach here because they override _identityHashCode.
1328// Special cases are also not needed for null or bool because they were pre-set
1329// during VM isolate finalization.
1330void AsmIntrinsifier::Object_getHash(Assembler* assembler,
1331 Label* normal_ir_body) {
1332 Label not_yet_computed;
1333 __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // Object.
1334 __ movl(RAX, FieldAddress(RAX, target::Object::tags_offset() +
1336 kBitsPerByte));
1337 __ cmpl(RAX, Immediate(0));
1338 __ j(EQUAL, &not_yet_computed, Assembler::kNearJump);
1339 __ SmiTag(RAX);
1340 __ ret();
1341
1342 __ Bind(&not_yet_computed);
1343 __ movq(RCX, Address(THR, target::Thread::random_offset()));
1344 __ movq(RBX, RCX);
1345 __ andq(RCX, Immediate(0xffffffff)); // state_lo
1346 __ shrq(RBX, Immediate(32)); // state_hi
1347 __ imulq(RCX, Immediate(0xffffda61)); // A
1348 __ addq(RCX, RBX); // new_state = (A* state_lo) + state_hi
1349 __ movq(Address(THR, target::Thread::random_offset()), RCX);
1350 __ andq(RCX, Immediate(0x3fffffff));
1351 __ cmpl(RCX, Immediate(0));
1352 __ j(EQUAL, &not_yet_computed);
1353
1354 __ movq(RBX, Address(RSP, +1 * target::kWordSize)); // Object.
1355 __ MoveRegister(RDX, RCX);
1356 __ shlq(RDX, Immediate(32));
1357
1358 Label retry, success, already_in_rax;
1359 __ Bind(&retry);
1360 // RAX is used by "cmpxchgq" as comparison value (if comparison succeeds the
1361 // store is performed).
1362 __ movq(RAX, FieldAddress(RBX, 0));
1363 __ TestImmediate(RAX, Immediate(0xffffffff00000000));
1364 __ BranchIf(NOT_ZERO, &already_in_rax);
1365 __ MoveRegister(RSI, RAX);
1366 __ orq(RSI, RDX);
1367 __ LockCmpxchgq(FieldAddress(RBX, 0), RSI);
1368 __ BranchIf(NOT_ZERO, &retry);
1369 // Fall-through with RCX containing new hash value (untagged)
1370 __ Bind(&success);
1371 __ SmiTag(RCX);
1372 __ MoveRegister(RAX, RCX);
1373 __ Ret();
1374
1375 __ Bind(&already_in_rax);
1376 __ shrq(RAX, Immediate(32));
1377 __ SmiTag(RAX);
1378 __ Ret();
1379}
1380
1381void GenerateSubstringMatchesSpecialization(Assembler* assembler,
1382 intptr_t receiver_cid,
1383 intptr_t other_cid,
1384 Label* return_true,
1385 Label* return_false) {
1386 __ SmiUntag(RBX);
1387 __ LoadCompressedSmi(R8, FieldAddress(RAX, target::String::length_offset()));
1388 __ SmiUntag(R8);
1389 __ LoadCompressedSmi(R9, FieldAddress(RCX, target::String::length_offset()));
1390 __ SmiUntag(R9);
1391
1392 // if (other.length == 0) return true;
1393 __ testq(R9, R9);
1394 __ j(ZERO, return_true);
1395
1396 // if (start < 0) return false;
1397 __ testq(RBX, RBX);
1398 __ j(SIGN, return_false);
1399
1400 // if (start + other.length > this.length) return false;
1401 __ movq(R11, RBX);
1402 __ addq(R11, R9);
1403 __ cmpq(R11, R8);
1404 __ j(GREATER, return_false);
1405
1406 __ LoadImmediate(R11, Immediate(0)); // i = 0
1407
1408 // do
1409 Label loop;
1410 __ Bind(&loop);
1411
1412 // this.codeUnitAt(i + start)
1413 // clobbering this.length
1414 __ movq(R8, R11);
1415 __ addq(R8, RBX);
1416 if (receiver_cid == kOneByteStringCid) {
1417 __ movzxb(R12, FieldAddress(RAX, R8, TIMES_1,
1419 } else {
1420 ASSERT(receiver_cid == kTwoByteStringCid);
1421 __ movzxw(R12, FieldAddress(RAX, R8, TIMES_2,
1423 }
1424 // other.codeUnitAt(i)
1425 if (other_cid == kOneByteStringCid) {
1426 __ movzxb(R13, FieldAddress(RCX, R11, TIMES_1,
1428 } else {
1429 ASSERT(other_cid == kTwoByteStringCid);
1430 __ movzxw(R13, FieldAddress(RCX, R11, TIMES_2,
1432 }
1433 __ cmpq(R12, R13);
1434 __ j(NOT_EQUAL, return_false);
1435
1436 // i++, while (i < len)
1437 __ addq(R11, Immediate(1));
1438 __ cmpq(R11, R9);
1439 __ j(LESS, &loop, Assembler::kNearJump);
1440
1441 __ jmp(return_true);
1442}
1443
1444// bool _substringMatches(int start, String other)
1445// This intrinsic handles a OneByteString or TwoByteString receiver with a
1446// OneByteString other.
1447void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
1448 Label* normal_ir_body) {
1449 Label return_true, return_false, try_two_byte;
1450 __ movq(RAX, Address(RSP, +3 * target::kWordSize)); // receiver
1451 __ movq(RBX, Address(RSP, +2 * target::kWordSize)); // start
1452 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // other
1453
1454 __ testq(RBX, Immediate(kSmiTagMask));
1455 __ j(NOT_ZERO, normal_ir_body); // 'start' is not Smi.
1456
1457 __ CompareClassId(RCX, kOneByteStringCid);
1458 __ j(NOT_EQUAL, normal_ir_body);
1459
1460 __ CompareClassId(RAX, kOneByteStringCid);
1461 __ j(NOT_EQUAL, &try_two_byte);
1462
1463 GenerateSubstringMatchesSpecialization(assembler, kOneByteStringCid,
1464 kOneByteStringCid, &return_true,
1465 &return_false);
1466
1467 __ Bind(&try_two_byte);
1468 __ CompareClassId(RAX, kTwoByteStringCid);
1469 __ j(NOT_EQUAL, normal_ir_body);
1470
1471 GenerateSubstringMatchesSpecialization(assembler, kTwoByteStringCid,
1472 kOneByteStringCid, &return_true,
1473 &return_false);
1474
1475 __ Bind(&return_true);
1476 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1477 __ ret();
1478
1479 __ Bind(&return_false);
1480 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1481 __ ret();
1482
1483 __ Bind(normal_ir_body);
1484}
1485
1486void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
1487 Label* normal_ir_body) {
1488 Label try_two_byte_string;
1489 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Index.
1490 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // String.
1491 __ testq(RCX, Immediate(kSmiTagMask));
1492 __ j(NOT_ZERO, normal_ir_body); // Non-smi index.
1493 // Range check.
1494 __ OBJ(cmp)(RCX, FieldAddress(RAX, target::String::length_offset()));
1495 // Runtime throws exception.
1496 __ j(ABOVE_EQUAL, normal_ir_body);
1497 __ CompareClassId(RAX, kOneByteStringCid);
1498 __ j(NOT_EQUAL, &try_two_byte_string, Assembler::kNearJump);
1499 __ SmiUntag(RCX);
1500 __ movzxb(RCX, FieldAddress(RAX, RCX, TIMES_1,
1503 __ j(GREATER_EQUAL, normal_ir_body);
1504 __ movq(RAX,
1506 __ movq(RAX, Address(RAX, RCX, TIMES_8,
1509 __ ret();
1510
1511 __ Bind(&try_two_byte_string);
1512 __ CompareClassId(RAX, kTwoByteStringCid);
1513 __ j(NOT_EQUAL, normal_ir_body);
1514 ASSERT(kSmiTagShift == 1);
1515#if defined(DART_COMPRESSED_POINTERS)
1516 // The upper half of a compressed Smi contains undefined bits, but no x64
1517 // addressing mode will ignore these bits. We have already checked the index
1518 // is positive, so we just clear the upper bits, which is shorter than movsxd.
1519 __ orl(RCX, RCX);
1520#endif
1521 __ movzxw(RCX, FieldAddress(RAX, RCX, TIMES_1,
1524 __ j(GREATER_EQUAL, normal_ir_body);
1525 __ movq(RAX,
1527 __ movq(RAX, Address(RAX, RCX, TIMES_8,
1530 __ ret();
1531
1532 __ Bind(normal_ir_body);
1533}
1534
1535void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
1536 Label* normal_ir_body) {
1537 Label is_true;
1538 // Get length.
1539 __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // String object.
1540 __ LoadCompressedSmi(RAX, FieldAddress(RAX, target::String::length_offset()));
1541 __ OBJ(cmp)(RAX, Immediate(target::ToRawSmi(0)));
1542 __ j(EQUAL, &is_true, Assembler::kNearJump);
1543 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1544 __ ret();
1545 __ Bind(&is_true);
1546 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1547 __ ret();
1548}
1549
1550void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
1551 Label* normal_ir_body) {
1552 Label compute_hash;
1553 __ movq(
1554 RBX,
1555 Address(RSP, +1 * target::kWordSize)); // target::OneByteString object.
1556 __ movl(RAX, FieldAddress(RBX, target::String::hash_offset()));
1557 __ cmpq(RAX, Immediate(0));
1558 __ j(EQUAL, &compute_hash, Assembler::kNearJump);
1559 __ SmiTag(RAX);
1560 __ ret();
1561
1562 __ Bind(&compute_hash);
1563 // Hash not yet computed, use algorithm of class StringHasher.
1564 __ LoadCompressedSmi(RCX, FieldAddress(RBX, target::String::length_offset()));
1565 __ SmiUntag(RCX);
1566 __ xorq(RAX, RAX);
1567 __ xorq(RDI, RDI);
1568 // RBX: Instance of target::OneByteString.
1569 // RCX: String length, untagged integer.
1570 // RDI: Loop counter, untagged integer.
1571 // RAX: Hash code, untagged integer.
1572 Label loop, done;
1573 __ Bind(&loop);
1574 __ cmpq(RDI, RCX);
1576 // Add to hash code: (hash_ is uint32)
1577 // Get one characters (ch).
1578 __ movzxb(RDX, FieldAddress(RBX, RDI, TIMES_1,
1580 // RDX: ch and temporary.
1582
1583 __ incq(RDI);
1584 __ jmp(&loop, Assembler::kNearJump);
1585
1586 __ Bind(&done);
1587 // Finalize and fit to size kHashBits. Ensures hash is non-zero.
1588 __ FinalizeHashForSize(target::String::kHashBits, RAX);
1589 __ shlq(RAX, Immediate(target::UntaggedObject::kHashTagPos));
1590 // lock+orq is an atomic read-modify-write.
1591 __ lock();
1592 __ orq(FieldAddress(RBX, target::Object::tags_offset()), RAX);
1593 __ sarq(RAX, Immediate(target::UntaggedObject::kHashTagPos));
1594 __ SmiTag(RAX);
1595 __ ret();
1596}
1597
1598// Allocates a _OneByteString or _TwoByteString. The content is not initialized.
1599// 'length_reg' contains the desired length as a _Smi or _Mint.
1600// Returns new string as tagged pointer in RAX.
1601static void TryAllocateString(Assembler* assembler,
1602 classid_t cid,
1603 intptr_t max_elements,
1604 Label* ok,
1605 Label* failure,
1606 Register length_reg) {
1607 ASSERT(cid == kOneByteStringCid || cid == kTwoByteStringCid);
1608 // _Mint length: call to runtime to produce error.
1609 __ BranchIfNotSmi(length_reg, failure);
1610 // negative length: call to runtime to produce error.
1611 // Too big: call to runtime to allocate old.
1612 __ OBJ(cmp)(length_reg, Immediate(target::ToRawSmi(max_elements)));
1613 __ j(ABOVE, failure);
1614
1615 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, failure));
1616 if (length_reg != RDI) {
1617 __ movq(RDI, length_reg);
1618 }
1619 Label pop_and_fail, not_zero_length;
1620 __ pushq(RDI); // Preserve length.
1621 if (cid == kOneByteStringCid) {
1622 // Untag length.
1623 __ SmiUntag(RDI);
1624 } else {
1625 // Untag length and multiply by element size -> no-op.
1626 ASSERT(kSmiTagSize == 1);
1627 }
1628 const intptr_t fixed_size_plus_alignment_padding =
1631 __ addq(RDI, Immediate(fixed_size_plus_alignment_padding));
1633
1634 __ movq(RAX, Address(THR, target::Thread::top_offset()));
1635
1636 // RDI: allocation size.
1637 __ movq(RCX, RAX);
1638 __ addq(RCX, RDI);
1639 __ j(CARRY, &pop_and_fail);
1640
1641 // Check if the allocation fits into the remaining space.
1642 // RAX: potential new object start.
1643 // RCX: potential next object start.
1644 // RDI: allocation size.
1645 __ cmpq(RCX, Address(THR, target::Thread::end_offset()));
1646 __ j(ABOVE_EQUAL, &pop_and_fail);
1647 __ CheckAllocationCanary(RAX);
1648
1649 // Successfully allocated the object(s), now update top to point to
1650 // next object start and initialize the object.
1651 __ movq(Address(THR, target::Thread::top_offset()), RCX);
1652 __ addq(RAX, Immediate(kHeapObjectTag));
1653 // Clear last double word to ensure string comparison doesn't need to
1654 // specially handle remainder of strings with lengths not factors of double
1655 // offsets.
1657 __ movq(Address(RCX, -1 * target::kWordSize), Immediate(0));
1658 __ movq(Address(RCX, -2 * target::kWordSize), Immediate(0));
1659
1660 // Initialize the tags.
1661 // RAX: new object start as a tagged pointer.
1662 // RDI: allocation size.
1663 {
1664 Label size_tag_overflow, done;
1666 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
1670
1671 __ Bind(&size_tag_overflow);
1672 __ xorq(RDI, RDI);
1673 __ Bind(&done);
1674
1675 // Get the class index and insert it into the tags.
1676 // This also clears the hash, which is in the high bits of the tags.
1677 const uword tags =
1678 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
1679 __ orq(RDI, Immediate(tags));
1680 __ movq(FieldAddress(RAX, target::Object::tags_offset()), RDI); // Tags.
1681 }
1682
1683 // Set the length field.
1684 __ popq(RDI);
1685#if DART_COMPRESSED_POINTERS
1686 // Clear out padding caused by alignment gap between length and data.
1687 __ movq(FieldAddress(RAX, target::String::length_offset()),
1688 compiler::Immediate(0));
1689#endif
1690 __ StoreCompressedIntoObjectNoBarrier(
1691 RAX, FieldAddress(RAX, target::String::length_offset()), RDI);
1693
1694 __ Bind(&pop_and_fail);
1695 __ popq(RDI);
1696 __ jmp(failure);
1697}
1698
1699// Arg0: target::OneByteString (receiver).
1700// Arg1: Start index as Smi.
1701// Arg2: End index as Smi.
1702// The indexes must be valid.
1703void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
1704 Label* normal_ir_body) {
1705 const intptr_t kStringOffset = 3 * target::kWordSize;
1706 const intptr_t kStartIndexOffset = 2 * target::kWordSize;
1707 const intptr_t kEndIndexOffset = 1 * target::kWordSize;
1708 Label ok;
1709 __ movq(RSI, Address(RSP, +kStartIndexOffset));
1710 __ movq(RDI, Address(RSP, +kEndIndexOffset));
1711 __ orq(RSI, RDI);
1712 __ testq(RSI, Immediate(kSmiTagMask));
1713 __ j(NOT_ZERO, normal_ir_body); // 'start', 'end' not Smi.
1714
1715 __ subq(RDI, Address(RSP, +kStartIndexOffset));
1716 TryAllocateString(assembler, kOneByteStringCid,
1718 normal_ir_body, RDI);
1719 __ Bind(&ok);
1720 // RAX: new string as tagged pointer.
1721 // Copy string.
1722 __ movq(RSI, Address(RSP, +kStringOffset));
1723 __ movq(RBX, Address(RSP, +kStartIndexOffset));
1724 __ SmiUntag(RBX);
1725 __ leaq(RSI, FieldAddress(RSI, RBX, TIMES_1,
1727 // RSI: Start address to copy from (untagged).
1728 // RBX: Untagged start index.
1729 __ movq(RCX, Address(RSP, +kEndIndexOffset));
1730 __ SmiUntag(RCX);
1731 __ subq(RCX, RBX);
1732 __ xorq(RDX, RDX);
1733 // RSI: Start address to copy from (untagged).
1734 // RCX: Untagged number of bytes to copy.
1735 // RAX: Tagged result string
1736 // RDX: Loop counter.
1737 // RBX: Scratch register.
1738 Label loop, check;
1740 __ Bind(&loop);
1741 __ movzxb(RBX, Address(RSI, RDX, TIMES_1, 0));
1742 __ movb(FieldAddress(RAX, RDX, TIMES_1, target::OneByteString::data_offset()),
1744 __ incq(RDX);
1745 __ Bind(&check);
1746 __ cmpq(RDX, RCX);
1747 __ j(LESS, &loop, Assembler::kNearJump);
1748 __ ret();
1749 __ Bind(normal_ir_body);
1750}
1751
1752void AsmIntrinsifier::WriteIntoOneByteString(Assembler* assembler,
1753 Label* normal_ir_body) {
1754 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Value.
1755 __ movq(RBX, Address(RSP, +2 * target::kWordSize)); // Index.
1756 __ movq(RAX, Address(RSP, +3 * target::kWordSize)); // target::OneByteString.
1757 __ SmiUntag(RBX);
1758 __ SmiUntag(RCX);
1759 __ movb(FieldAddress(RAX, RBX, TIMES_1, target::OneByteString::data_offset()),
1761 __ ret();
1762}
1763
1764void AsmIntrinsifier::WriteIntoTwoByteString(Assembler* assembler,
1765 Label* normal_ir_body) {
1766 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Value.
1767 __ movq(RBX, Address(RSP, +2 * target::kWordSize)); // Index.
1768 __ movq(RAX, Address(RSP, +3 * target::kWordSize)); // target::TwoByteString.
1769 // Untag index and multiply by element size -> no-op.
1770 __ SmiUntag(RCX);
1771#if defined(DART_COMPRESSED_POINTERS)
1772 // The upper half of a compressed Smi contains undefined bits, but no x64
1773 // addressing mode will ignore these bits. We know the index is positive, so
1774 // we just clear the upper bits, which is shorter than movsxd.
1775 __ orl(RBX, RBX);
1776#endif
1777 __ movw(FieldAddress(RAX, RBX, TIMES_1, target::TwoByteString::data_offset()),
1778 RCX);
1779 __ ret();
1780}
1781
1782void AsmIntrinsifier::AllocateOneByteString(Assembler* assembler,
1783 Label* normal_ir_body) {
1784 __ movq(RDI, Address(RSP, +1 * target::kWordSize)); // Length.
1785#if defined(DART_COMPRESSED_POINTERS)
1786 __ movsxd(RDI, RDI);
1787#endif
1788 Label ok;
1789 TryAllocateString(assembler, kOneByteStringCid,
1791 normal_ir_body, RDI);
1792 // RDI: Start address to copy from (untagged).
1793
1794 __ Bind(&ok);
1795 __ ret();
1796
1797 __ Bind(normal_ir_body);
1798}
1799
1800void AsmIntrinsifier::AllocateTwoByteString(Assembler* assembler,
1801 Label* normal_ir_body) {
1802 __ movq(RDI, Address(RSP, +1 * target::kWordSize)); // Length.
1803#if defined(DART_COMPRESSED_POINTERS)
1804 __ movsxd(RDI, RDI);
1805#endif
1806 Label ok;
1807 TryAllocateString(assembler, kTwoByteStringCid,
1809 normal_ir_body, RDI);
1810 // RDI: Start address to copy from (untagged).
1811
1812 __ Bind(&ok);
1813 __ ret();
1814
1815 __ Bind(normal_ir_body);
1816}
1817
1818void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
1819 Label* normal_ir_body) {
1820 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // This.
1821 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Other.
1822
1823 StringEquality(assembler, RAX, RCX, RDI, RBX, RAX, normal_ir_body,
1824 kOneByteStringCid);
1825}
1826
1827void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
1828 Label* normal_ir_body) {
1829 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // This.
1830 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Other.
1831
1832 StringEquality(assembler, RAX, RCX, RDI, RBX, RAX, normal_ir_body,
1833 kTwoByteStringCid);
1834}
1835
1836void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
1837 Label* normal_ir_body,
1838 bool sticky) {
1839 if (FLAG_interpret_irregexp) return;
1840
1841 const intptr_t kRegExpParamOffset = 3 * target::kWordSize;
1842 const intptr_t kStringParamOffset = 2 * target::kWordSize;
1843 // start_index smi is located at offset 1.
1844
1845 // Incoming registers:
1846 // RAX: Function. (Will be loaded with the specialized matcher function.)
1847 // RCX: Unknown. (Must be GC safe on tail call.)
1848 // R10: Arguments descriptor. (Will be preserved.)
1849
1850 // Load the specialized function pointer into RAX. Leverage the fact the
1851 // string CIDs as well as stored function pointers are in sequence.
1852 __ movq(RBX, Address(RSP, kRegExpParamOffset));
1853 __ movq(RDI, Address(RSP, kStringParamOffset));
1854 __ LoadClassId(RDI, RDI);
1855 __ SubImmediate(RDI, Immediate(kOneByteStringCid));
1856#if !defined(DART_COMPRESSED_POINTERS)
1857 __ movq(FUNCTION_REG, FieldAddress(RBX, RDI, TIMES_8,
1859 kOneByteStringCid, sticky)));
1860#else
1861 __ LoadCompressed(FUNCTION_REG, FieldAddress(RBX, RDI, TIMES_4,
1863 kOneByteStringCid, sticky)));
1864#endif
1865
1866 // Registers are now set up for the lazy compile stub. It expects the function
1867 // in RAX, the argument descriptor in R10, and IC-Data in RCX.
1868 __ xorq(RCX, RCX);
1869
1870 // Tail-call the function.
1871 __ LoadCompressed(
1873 __ movq(RDI,
1875 __ jmp(RDI);
1876}
1877
1878void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
1879 Label* normal_ir_body) {
1880 __ LoadIsolate(RAX);
1881 __ movq(RAX, Address(RAX, target::Isolate::default_tag_offset()));
1882 __ ret();
1883}
1884
1885void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
1886 Label* normal_ir_body) {
1887 __ LoadIsolate(RAX);
1888 __ movq(RAX, Address(RAX, target::Isolate::current_tag_offset()));
1889 __ ret();
1890}
1891
1892void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
1893 Label* normal_ir_body) {
1894#if !defined(SUPPORT_TIMELINE)
1895 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1896 __ ret();
1897#else
1898 Label true_label;
1899 // Load TimelineStream*.
1900 __ movq(RAX, Address(THR, target::Thread::dart_stream_offset()));
1901 // Load uintptr_t from TimelineStream*.
1903 __ cmpq(RAX, Immediate(0));
1904 __ j(NOT_ZERO, &true_label, Assembler::kNearJump);
1905 // Not enabled.
1906 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1907 __ ret();
1908 // Enabled.
1909 __ Bind(&true_label);
1910 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1911 __ ret();
1912#endif
1913}
1914
1915void AsmIntrinsifier::Timeline_getNextTaskId(Assembler* assembler,
1916 Label* normal_ir_body) {
1917#if !defined(SUPPORT_TIMELINE)
1918 __ xorq(RAX, RAX); // Return Smi 0.
1919 __ ret();
1920#else
1921 __ movq(RAX, Address(THR, target::Thread::next_task_id_offset()));
1922 __ movq(RBX, RAX);
1923 __ incq(RBX);
1924 __ movq(Address(THR, target::Thread::next_task_id_offset()), RBX);
1925 __ SmiTag(RAX); // Ignore loss of precision.
1926 __ ret();
1927#endif
1928}
1929
1930#undef __
1931
1932} // namespace compiler
1933} // namespace dart
1934
1935#endif // defined(TARGET_ARCH_X64)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
static bool equal(const SkBitmap &a, const SkBitmap &b)
Definition: ImageTest.cpp:1395
#define check(reporter, ref, unref, make, kill)
Definition: RefCntTest.cpp:85
static bool ok(int result)
#define __
#define OBJ(op)
#define UNREACHABLE()
Definition: assert.h:248
static word type_arguments_offset()
static word declaration_type_offset()
static word host_type_arguments_field_offset_in_words_offset()
static const word kNoTypeArguments
Definition: runtime_api.h:486
static word num_type_arguments_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word function_offset(classid_t cid, bool sticky)
static const word kHashBits
Definition: runtime_api.h:782
static const word kNullCharCodeSymbolOffset
Definition: runtime_api.h:1533
static const word kNumberOfOneCharCodeSymbols
Definition: runtime_api.h:1532
static word predefined_symbols_address_offset()
#define ASSERT(E)
uint32_t * target
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
Definition: runtime_api.cc:360
word ToRawSmi(const dart::Object &a)
Definition: runtime_api.cc:960
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
constexpr intptr_t kSmiBits
Definition: runtime_api.h:301
const Bool & TrueObject()
Definition: runtime_api.cc:157
const Bool & FalseObject()
Definition: runtime_api.cc:161
const Object & NullObject()
Definition: runtime_api.cc:149
const Class & DoubleClass()
Definition: runtime_api.cc:195
Definition: dart_vm.cc:33
@ TIMES_COMPRESSED_WORD_SIZE
const Register THR
static bool CompareIntegers(Token::Kind kind, const Integer &left, const Integer &right)
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
Definition: hash.h:12
int32_t classid_t
Definition: globals.h:524
@ kNumPredefinedCids
Definition: class_id.h:257
constexpr intptr_t kBitsPerByte
Definition: globals.h:463
uintptr_t uword
Definition: globals.h:501
const Register CODE_REG
@ GREATER_EQUAL
@ NOT_CARRY
Definition: constants_x86.h:35
@ NOT_ZERO
@ NEGATIVE
Definition: constants_x86.h:32
@ LESS_EQUAL
@ BELOW_EQUAL
Definition: constants_x86.h:19
@ NOT_EQUAL
@ ABOVE_EQUAL
Definition: constants_x86.h:16
@ PARITY_EVEN
Definition: constants_x86.h:23
@ kNoRegister
Definition: constants_arm.h:99
const intptr_t cid
const Register FUNCTION_REG
const intptr_t kBytesPerBigIntDigit
Definition: globals.h:54
NOT_IN_PRODUCT(LibraryPtr ReloadTestScript(const char *script))
@ kSmiTagSize
@ kHeapObjectTag
@ kSmiTagMask
@ kSmiTagShift
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
ByteRegister ByteRegisterOf(Register reg)
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment