Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
asm_intrinsifier_x64.cc
Go to the documentation of this file.
1// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
6#if defined(TARGET_ARCH_X64)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
10#include "vm/class_id.h"
13
14namespace dart {
15namespace compiler {
16
17// When entering intrinsics code:
18// PP: Caller's ObjectPool in JIT / global ObjectPool in AOT
19// CODE_REG: Callee's Code in JIT / not passed in AOT
20// R10: Arguments descriptor
21// TOS: Return address
22// The R10 and CODE_REG registers can be destroyed only if there is no
23// slow-path, i.e. if the intrinsified method always executes a return.
24// The RBP register should not be modified, because it is used by the profiler.
25// The PP and THR registers (see constants_x64.h) must be preserved.
26
27#define __ assembler->
28
29// Tests if two top most arguments are smis, jumps to label not_smi if not.
30// Topmost argument is in RAX.
31static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {
32 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
33 __ movq(RCX, Address(RSP, +2 * target::kWordSize));
34 __ orq(RCX, RAX);
35 __ testq(RCX, Immediate(kSmiTagMask));
36 __ j(NOT_ZERO, not_smi);
37}
38
39void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
40 ASSERT(kSmiTagShift == 1);
41 ASSERT(kSmiTag == 0);
42 Label overflow;
43 TestBothArgumentsSmis(assembler, normal_ir_body);
44 // Shift value is in RAX. Compare with tagged Smi.
45 __ OBJ(cmp)(RAX, Immediate(target::ToRawSmi(target::kSmiBits)));
46 __ j(ABOVE_EQUAL, normal_ir_body, Assembler::kNearJump);
47
48 __ SmiUntag(RAX);
49 __ movq(RCX, RAX); // Shift amount must be in RCX.
50 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Value.
51
52 // Overflow test - all the shifted-out bits must be same as the sign bit.
53 __ movq(RDI, RAX);
54 __ OBJ(shl)(RAX, RCX);
55 __ OBJ(sar)(RAX, RCX);
56 __ OBJ(cmp)(RAX, RDI);
57 __ j(NOT_EQUAL, &overflow, Assembler::kNearJump);
58
59 __ OBJ(shl)(RAX, RCX); // Shift for result now we know there is no overflow.
60
61 // RAX is a correctly tagged Smi.
62 __ ret();
63
64 __ Bind(&overflow);
65 // Mint is rarely used on x64 (only for integers requiring 64 bit instead of
66 // 63 or 31 bits as represented by Smi).
67 __ Bind(normal_ir_body);
68}
69
70static void CompareIntegers(Assembler* assembler,
71 Label* normal_ir_body,
72 Condition true_condition) {
73 Label true_label;
74 TestBothArgumentsSmis(assembler, normal_ir_body);
75 // RAX contains the right argument.
76 __ OBJ(cmp)(Address(RSP, +2 * target::kWordSize), RAX);
77 __ j(true_condition, &true_label, Assembler::kNearJump);
78 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
79 __ ret();
80 __ Bind(&true_label);
81 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
82 __ ret();
83 __ Bind(normal_ir_body);
84}
85
86void AsmIntrinsifier::Integer_lessThan(Assembler* assembler,
87 Label* normal_ir_body) {
88 CompareIntegers(assembler, normal_ir_body, LESS);
89}
90
91void AsmIntrinsifier::Integer_greaterThan(Assembler* assembler,
92 Label* normal_ir_body) {
93 CompareIntegers(assembler, normal_ir_body, GREATER);
94}
95
96void AsmIntrinsifier::Integer_lessEqualThan(Assembler* assembler,
97 Label* normal_ir_body) {
98 CompareIntegers(assembler, normal_ir_body, LESS_EQUAL);
99}
100
101void AsmIntrinsifier::Integer_greaterEqualThan(Assembler* assembler,
102 Label* normal_ir_body) {
103 CompareIntegers(assembler, normal_ir_body, GREATER_EQUAL);
104}
105
106// This is called for Smi and Mint receivers. The right argument
107// can be Smi, Mint or double.
108void AsmIntrinsifier::Integer_equalToInteger(Assembler* assembler,
109 Label* normal_ir_body) {
110 Label true_label, check_for_mint;
111 const intptr_t kReceiverOffset = 2;
112 const intptr_t kArgumentOffset = 1;
113
114 // For integer receiver '===' check first.
115 __ movq(RAX, Address(RSP, +kArgumentOffset * target::kWordSize));
116 __ movq(RCX, Address(RSP, +kReceiverOffset * target::kWordSize));
117 __ OBJ(cmp)(RAX, RCX);
118 __ j(EQUAL, &true_label, Assembler::kNearJump);
119 __ orq(RAX, RCX);
120 __ testq(RAX, Immediate(kSmiTagMask));
121 __ j(NOT_ZERO, &check_for_mint, Assembler::kNearJump);
122 // Both arguments are smi, '===' is good enough.
123 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
124 __ ret();
125 __ Bind(&true_label);
126 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
127 __ ret();
128
129 // At least one of the arguments was not Smi.
130 Label receiver_not_smi;
131 __ Bind(&check_for_mint);
132 __ movq(RAX, Address(RSP, +kReceiverOffset * target::kWordSize));
133 __ testq(RAX, Immediate(kSmiTagMask));
134 __ j(NOT_ZERO, &receiver_not_smi);
135
136 // Left (receiver) is Smi, return false if right is not Double.
137 // Note that an instance of Mint never contains a value that can be
138 // represented by Smi.
139 __ movq(RAX, Address(RSP, +kArgumentOffset * target::kWordSize));
140 __ CompareClassId(RAX, kDoubleCid);
141 __ j(EQUAL, normal_ir_body);
142 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
143 __ ret();
144
145 __ Bind(&receiver_not_smi);
146 // RAX:: receiver.
147 __ CompareClassId(RAX, kMintCid);
148 __ j(NOT_EQUAL, normal_ir_body);
149 // Receiver is Mint, return false if right is Smi.
150 __ movq(RAX, Address(RSP, +kArgumentOffset * target::kWordSize));
151 __ testq(RAX, Immediate(kSmiTagMask));
152 __ j(NOT_ZERO, normal_ir_body);
153 // Smi == Mint -> false.
154 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
155 __ ret();
156 // TODO(srdjan): Implement Mint == Mint comparison.
157
158 __ Bind(normal_ir_body);
159}
160
161void AsmIntrinsifier::Integer_equal(Assembler* assembler,
162 Label* normal_ir_body) {
163 Integer_equalToInteger(assembler, normal_ir_body);
164}
165
166void AsmIntrinsifier::Smi_bitLength(Assembler* assembler,
167 Label* normal_ir_body) {
168 ASSERT(kSmiTagShift == 1);
169 __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // Index.
170#if defined(DART_COMPRESSED_POINTERS)
171 __ movsxd(RAX, RAX);
172#endif
173 // XOR with sign bit to complement bits if value is negative.
174 __ movq(RCX, RAX);
175 __ sarq(RCX, Immediate(63)); // All 0 or all 1.
176 __ OBJ (xor)(RAX, RCX);
177 // BSR does not write the destination register if source is zero. Put a 1 in
178 // the Smi tag bit to ensure BSR writes to destination register.
179 __ orq(RAX, Immediate(kSmiTagMask));
180 __ bsrq(RAX, RAX);
181 __ SmiTag(RAX);
182 __ ret();
183}
184
185void AsmIntrinsifier::Bigint_lsh(Assembler* assembler, Label* normal_ir_body) {
186 // static void _lsh(Uint32List x_digits, int x_used, int n,
187 // Uint32List r_digits)
188
189 __ movq(RDI, Address(RSP, 4 * target::kWordSize)); // x_digits
190 __ movq(R8, Address(RSP, 3 * target::kWordSize)); // x_used is Smi
191#if defined(DART_COMPRESSED_POINTERS)
192 __ movsxd(R8, R8);
193#endif
194 __ subq(R8, Immediate(2)); // x_used > 0, Smi. R8 = x_used - 1, round up.
195 __ sarq(R8, Immediate(2)); // R8 + 1 = number of digit pairs to read.
196 __ movq(RCX, Address(RSP, 2 * target::kWordSize)); // n is Smi
197#if defined(DART_COMPRESSED_POINTERS)
198 __ movsxd(RCX, RCX);
199#endif
200 __ SmiUntag(RCX);
201 __ movq(RBX, Address(RSP, 1 * target::kWordSize)); // r_digits
202 __ movq(RSI, RCX);
203 __ sarq(RSI, Immediate(6)); // RSI = n ~/ (2*_DIGIT_BITS).
204 __ leaq(RBX,
205 FieldAddress(RBX, RSI, TIMES_8, target::TypedData::payload_offset()));
206 __ xorq(RAX, RAX); // RAX = 0.
207 __ movq(RDX,
208 FieldAddress(RDI, R8, TIMES_8, target::TypedData::payload_offset()));
209 __ shldq(RAX, RDX, RCX);
210 __ movq(Address(RBX, R8, TIMES_8, 2 * kBytesPerBigIntDigit), RAX);
211 Label last;
212 __ cmpq(R8, Immediate(0));
213 __ j(EQUAL, &last, Assembler::kNearJump);
214 Label loop;
215 __ Bind(&loop);
216 __ movq(RAX, RDX);
217 __ movq(RDX, FieldAddress(RDI, R8, TIMES_8,
218 target::TypedData::payload_offset() -
220 __ shldq(RAX, RDX, RCX);
221 __ movq(Address(RBX, R8, TIMES_8, 0), RAX);
222 __ decq(R8);
224 __ Bind(&last);
225 __ shldq(RDX, R8, RCX); // R8 == 0.
226 __ movq(Address(RBX, 0), RDX);
227 __ LoadObject(RAX, NullObject());
228 __ ret();
229}
230
231void AsmIntrinsifier::Bigint_rsh(Assembler* assembler, Label* normal_ir_body) {
232 // static void _rsh(Uint32List x_digits, int x_used, int n,
233 // Uint32List r_digits)
234
235 __ movq(RDI, Address(RSP, 4 * target::kWordSize)); // x_digits
236 __ movq(RCX, Address(RSP, 2 * target::kWordSize)); // n is Smi
237#if defined(DART_COMPRESSED_POINTERS)
238 __ movsxd(RCX, RCX);
239#endif
240 __ SmiUntag(RCX);
241 __ movq(RBX, Address(RSP, 1 * target::kWordSize)); // r_digits
242 __ movq(RDX, RCX);
243 __ sarq(RDX, Immediate(6)); // RDX = n ~/ (2*_DIGIT_BITS).
244 __ movq(RSI, Address(RSP, 3 * target::kWordSize)); // x_used is Smi
245#if defined(DART_COMPRESSED_POINTERS)
246 __ movsxd(RSI, RSI);
247#endif
248 __ subq(RSI, Immediate(2)); // x_used > 0, Smi. RSI = x_used - 1, round up.
249 __ sarq(RSI, Immediate(2));
250 __ leaq(RDI,
251 FieldAddress(RDI, RSI, TIMES_8, target::TypedData::payload_offset()));
252 __ subq(RSI, RDX); // RSI + 1 = number of digit pairs to read.
253 __ leaq(RBX,
254 FieldAddress(RBX, RSI, TIMES_8, target::TypedData::payload_offset()));
255 __ negq(RSI);
256 __ movq(RDX, Address(RDI, RSI, TIMES_8, 0));
257 Label last;
258 __ cmpq(RSI, Immediate(0));
259 __ j(EQUAL, &last, Assembler::kNearJump);
260 Label loop;
261 __ Bind(&loop);
262 __ movq(RAX, RDX);
263 __ movq(RDX, Address(RDI, RSI, TIMES_8, 2 * kBytesPerBigIntDigit));
264 __ shrdq(RAX, RDX, RCX);
265 __ movq(Address(RBX, RSI, TIMES_8, 0), RAX);
266 __ incq(RSI);
268 __ Bind(&last);
269 __ shrdq(RDX, RSI, RCX); // RSI == 0.
270 __ movq(Address(RBX, 0), RDX);
271 __ LoadObject(RAX, NullObject());
272 __ ret();
273}
274
275void AsmIntrinsifier::Bigint_absAdd(Assembler* assembler,
276 Label* normal_ir_body) {
277 // static void _absAdd(Uint32List digits, int used,
278 // Uint32List a_digits, int a_used,
279 // Uint32List r_digits)
280
281 __ movq(RDI, Address(RSP, 5 * target::kWordSize)); // digits
282 __ movq(R8, Address(RSP, 4 * target::kWordSize)); // used is Smi
283#if defined(DART_COMPRESSED_POINTERS)
284 __ movsxd(R8, R8);
285#endif
286 __ addq(R8, Immediate(2)); // used > 0, Smi. R8 = used + 1, round up.
287 __ sarq(R8, Immediate(2)); // R8 = number of digit pairs to process.
288 __ movq(RSI, Address(RSP, 3 * target::kWordSize)); // a_digits
289 __ movq(RCX, Address(RSP, 2 * target::kWordSize)); // a_used is Smi
290#if defined(DART_COMPRESSED_POINTERS)
291 __ movsxd(RCX, RCX);
292#endif
293 __ addq(RCX, Immediate(2)); // a_used > 0, Smi. R8 = a_used + 1, round up.
294 __ sarq(RCX, Immediate(2)); // R8 = number of digit pairs to process.
295 __ movq(RBX, Address(RSP, 1 * target::kWordSize)); // r_digits
296
297 // Precompute 'used - a_used' now so that carry flag is not lost later.
298 __ subq(R8, RCX);
299 __ incq(R8); // To account for the extra test between loops.
300
301 __ xorq(RDX, RDX); // RDX = 0, carry flag = 0.
302 Label add_loop;
303 __ Bind(&add_loop);
304 // Loop (a_used+1)/2 times, RCX > 0.
305 __ movq(RAX,
306 FieldAddress(RDI, RDX, TIMES_8, target::TypedData::payload_offset()));
307 __ adcq(RAX,
308 FieldAddress(RSI, RDX, TIMES_8, target::TypedData::payload_offset()));
309 __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::payload_offset()),
310 RAX);
311 __ incq(RDX); // Does not affect carry flag.
312 __ decq(RCX); // Does not affect carry flag.
313 __ j(NOT_ZERO, &add_loop, Assembler::kNearJump);
314
315 Label last_carry;
316 __ decq(R8); // Does not affect carry flag.
317 __ j(ZERO, &last_carry, Assembler::kNearJump); // If used - a_used == 0.
318
319 Label carry_loop;
320 __ Bind(&carry_loop);
321 // Loop (used+1)/2 - (a_used+1)/2 times, R8 > 0.
322 __ movq(RAX,
323 FieldAddress(RDI, RDX, TIMES_8, target::TypedData::payload_offset()));
324 __ adcq(RAX, Immediate(0));
325 __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::payload_offset()),
326 RAX);
327 __ incq(RDX); // Does not affect carry flag.
328 __ decq(R8); // Does not affect carry flag.
329 __ j(NOT_ZERO, &carry_loop, Assembler::kNearJump);
330
331 __ Bind(&last_carry);
332 Label done;
333 __ j(NOT_CARRY, &done);
334 __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::payload_offset()),
335 Immediate(1));
336
337 __ Bind(&done);
338 __ LoadObject(RAX, NullObject());
339 __ ret();
340}
341
342void AsmIntrinsifier::Bigint_absSub(Assembler* assembler,
343 Label* normal_ir_body) {
344 // static void _absSub(Uint32List digits, int used,
345 // Uint32List a_digits, int a_used,
346 // Uint32List r_digits)
347
348 __ movq(RDI, Address(RSP, 5 * target::kWordSize)); // digits
349 __ movq(R8, Address(RSP, 4 * target::kWordSize)); // used is Smi
350#if defined(DART_COMPRESSED_POINTERS)
351 __ movsxd(R8, R8);
352#endif
353 __ addq(R8, Immediate(2)); // used > 0, Smi. R8 = used + 1, round up.
354 __ sarq(R8, Immediate(2)); // R8 = number of digit pairs to process.
355 __ movq(RSI, Address(RSP, 3 * target::kWordSize)); // a_digits
356 __ movq(RCX, Address(RSP, 2 * target::kWordSize)); // a_used is Smi
357#if defined(DART_COMPRESSED_POINTERS)
358 __ movsxd(RCX, RCX);
359#endif
360 __ addq(RCX, Immediate(2)); // a_used > 0, Smi. R8 = a_used + 1, round up.
361 __ sarq(RCX, Immediate(2)); // R8 = number of digit pairs to process.
362 __ movq(RBX, Address(RSP, 1 * target::kWordSize)); // r_digits
363
364 // Precompute 'used - a_used' now so that carry flag is not lost later.
365 __ subq(R8, RCX);
366 __ incq(R8); // To account for the extra test between loops.
367
368 __ xorq(RDX, RDX); // RDX = 0, carry flag = 0.
369 Label sub_loop;
370 __ Bind(&sub_loop);
371 // Loop (a_used+1)/2 times, RCX > 0.
372 __ movq(RAX,
373 FieldAddress(RDI, RDX, TIMES_8, target::TypedData::payload_offset()));
374 __ sbbq(RAX,
375 FieldAddress(RSI, RDX, TIMES_8, target::TypedData::payload_offset()));
376 __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::payload_offset()),
377 RAX);
378 __ incq(RDX); // Does not affect carry flag.
379 __ decq(RCX); // Does not affect carry flag.
380 __ j(NOT_ZERO, &sub_loop, Assembler::kNearJump);
381
382 Label done;
383 __ decq(R8); // Does not affect carry flag.
384 __ j(ZERO, &done, Assembler::kNearJump); // If used - a_used == 0.
385
386 Label carry_loop;
387 __ Bind(&carry_loop);
388 // Loop (used+1)/2 - (a_used+1)/2 times, R8 > 0.
389 __ movq(RAX,
390 FieldAddress(RDI, RDX, TIMES_8, target::TypedData::payload_offset()));
391 __ sbbq(RAX, Immediate(0));
392 __ movq(FieldAddress(RBX, RDX, TIMES_8, target::TypedData::payload_offset()),
393 RAX);
394 __ incq(RDX); // Does not affect carry flag.
395 __ decq(R8); // Does not affect carry flag.
396 __ j(NOT_ZERO, &carry_loop, Assembler::kNearJump);
397
398 __ Bind(&done);
399 __ LoadObject(RAX, NullObject());
400 __ ret();
401}
402
403void AsmIntrinsifier::Bigint_mulAdd(Assembler* assembler,
404 Label* normal_ir_body) {
405 // Pseudo code:
406 // static int _mulAdd(Uint32List x_digits, int xi,
407 // Uint32List m_digits, int i,
408 // Uint32List a_digits, int j, int n) {
409 // uint64_t x = x_digits[xi >> 1 .. (xi >> 1) + 1]; // xi is Smi and even.
410 // if (x == 0 || n == 0) {
411 // return 2;
412 // }
413 // uint64_t* mip = &m_digits[i >> 1]; // i is Smi and even.
414 // uint64_t* ajp = &a_digits[j >> 1]; // j is Smi and even.
415 // uint64_t c = 0;
416 // SmiUntag(n); // n is Smi and even.
417 // n = (n + 1)/2; // Number of pairs to process.
418 // do {
419 // uint64_t mi = *mip++;
420 // uint64_t aj = *ajp;
421 // uint128_t t = x*mi + aj + c; // 64-bit * 64-bit -> 128-bit.
422 // *ajp++ = low64(t);
423 // c = high64(t);
424 // } while (--n > 0);
425 // while (c != 0) {
426 // uint128_t t = *ajp + c;
427 // *ajp++ = low64(t);
428 // c = high64(t); // c == 0 or 1.
429 // }
430 // return 2;
431 // }
432
433 Label done;
434 // RBX = x, done if x == 0
435 __ movq(RCX, Address(RSP, 7 * target::kWordSize)); // x_digits
436 __ movq(RAX, Address(RSP, 6 * target::kWordSize)); // xi is Smi
437#if defined(DART_COMPRESSED_POINTERS)
438 __ movsxd(RAX, RAX);
439#endif
440 __ movq(RBX,
441 FieldAddress(RCX, RAX, TIMES_2, target::TypedData::payload_offset()));
442 __ testq(RBX, RBX);
444
445 // R8 = (SmiUntag(n) + 1)/2, no_op if n == 0
446 __ movq(R8, Address(RSP, 1 * target::kWordSize)); // n is Smi
447#if defined(DART_COMPRESSED_POINTERS)
448 __ movsxd(R8, R8);
449#endif
450 __ addq(R8, Immediate(2));
451 __ sarq(R8, Immediate(2)); // R8 = number of digit pairs to process.
453
454 // RDI = mip = &m_digits[i >> 1]
455 __ movq(RDI, Address(RSP, 5 * target::kWordSize)); // m_digits
456 __ movq(RAX, Address(RSP, 4 * target::kWordSize)); // i is Smi
457#if defined(DART_COMPRESSED_POINTERS)
458 __ movsxd(RAX, RAX);
459#endif
460 __ leaq(RDI,
461 FieldAddress(RDI, RAX, TIMES_2, target::TypedData::payload_offset()));
462
463 // RSI = ajp = &a_digits[j >> 1]
464 __ movq(RSI, Address(RSP, 3 * target::kWordSize)); // a_digits
465 __ movq(RAX, Address(RSP, 2 * target::kWordSize)); // j is Smi
466#if defined(DART_COMPRESSED_POINTERS)
467 __ movsxd(RAX, RAX);
468#endif
469 __ leaq(RSI,
470 FieldAddress(RSI, RAX, TIMES_2, target::TypedData::payload_offset()));
471
472 // RCX = c = 0
473 __ xorq(RCX, RCX);
474
475 Label muladd_loop;
476 __ Bind(&muladd_loop);
477 // x: RBX
478 // mip: RDI
479 // ajp: RSI
480 // c: RCX
481 // t: RDX:RAX (not live at loop entry)
482 // n: R8
483
484 // uint64_t mi = *mip++
485 __ movq(RAX, Address(RDI, 0));
486 __ addq(RDI, Immediate(2 * kBytesPerBigIntDigit));
487
488 // uint128_t t = x*mi
489 __ mulq(RBX); // t = RDX:RAX = RAX * RBX, 64-bit * 64-bit -> 64-bit
490 __ addq(RAX, RCX); // t += c
491 __ adcq(RDX, Immediate(0));
492
493 // uint64_t aj = *ajp; t += aj
494 __ addq(RAX, Address(RSI, 0));
495 __ adcq(RDX, Immediate(0));
496
497 // *ajp++ = low64(t)
498 __ movq(Address(RSI, 0), RAX);
499 __ addq(RSI, Immediate(2 * kBytesPerBigIntDigit));
500
501 // c = high64(t)
502 __ movq(RCX, RDX);
503
504 // while (--n > 0)
505 __ decq(R8); // --n
506 __ j(NOT_ZERO, &muladd_loop, Assembler::kNearJump);
507
508 __ testq(RCX, RCX);
510
511 // *ajp += c
512 __ addq(Address(RSI, 0), RCX);
514
515 Label propagate_carry_loop;
516 __ Bind(&propagate_carry_loop);
517 __ addq(RSI, Immediate(2 * kBytesPerBigIntDigit));
518 __ incq(Address(RSI, 0)); // c == 0 or 1
519 __ j(CARRY, &propagate_carry_loop, Assembler::kNearJump);
520
521 __ Bind(&done);
522 __ movq(RAX, Immediate(target::ToRawSmi(2))); // Two digits processed.
523 __ ret();
524}
525
526void AsmIntrinsifier::Bigint_sqrAdd(Assembler* assembler,
527 Label* normal_ir_body) {
528 // Pseudo code:
529 // static int _sqrAdd(Uint32List x_digits, int i,
530 // Uint32List a_digits, int used) {
531 // uint64_t* xip = &x_digits[i >> 1]; // i is Smi and even.
532 // uint64_t x = *xip++;
533 // if (x == 0) return 2;
534 // uint64_t* ajp = &a_digits[i]; // j == 2*i, i is Smi.
535 // uint64_t aj = *ajp;
536 // uint128_t t = x*x + aj;
537 // *ajp++ = low64(t);
538 // uint128_t c = high64(t);
539 // int n = ((used - i + 2) >> 2) - 1; // used and i are Smi. n: num pairs.
540 // while (--n >= 0) {
541 // uint64_t xi = *xip++;
542 // uint64_t aj = *ajp;
543 // uint192_t t = 2*x*xi + aj + c; // 2-bit * 64-bit * 64-bit -> 129-bit.
544 // *ajp++ = low64(t);
545 // c = high128(t); // 65-bit.
546 // }
547 // uint64_t aj = *ajp;
548 // uint128_t t = aj + c; // 64-bit + 65-bit -> 66-bit.
549 // *ajp++ = low64(t);
550 // *ajp = high64(t);
551 // return 2;
552 // }
553
554 // RDI = xip = &x_digits[i >> 1]
555 __ movq(RDI, Address(RSP, 4 * target::kWordSize)); // x_digits
556 __ movq(RAX, Address(RSP, 3 * target::kWordSize)); // i is Smi
557#if defined(DART_COMPRESSED_POINTERS)
558 __ movsxd(RAX, RAX);
559#endif
560 __ leaq(RDI,
561 FieldAddress(RDI, RAX, TIMES_2, target::TypedData::payload_offset()));
562
563 // RBX = x = *xip++, return if x == 0
564 Label x_zero;
565 __ movq(RBX, Address(RDI, 0));
566 __ cmpq(RBX, Immediate(0));
567 __ j(EQUAL, &x_zero);
568 __ addq(RDI, Immediate(2 * kBytesPerBigIntDigit));
569
570 // RSI = ajp = &a_digits[i]
571 __ movq(RSI, Address(RSP, 2 * target::kWordSize)); // a_digits
572 __ leaq(RSI,
573 FieldAddress(RSI, RAX, TIMES_4, target::TypedData::payload_offset()));
574
575 // RDX:RAX = t = x*x + *ajp
576 __ movq(RAX, RBX);
577 __ mulq(RBX);
578 __ addq(RAX, Address(RSI, 0));
579 __ adcq(RDX, Immediate(0));
580
581 // *ajp++ = low64(t)
582 __ movq(Address(RSI, 0), RAX);
583 __ addq(RSI, Immediate(2 * kBytesPerBigIntDigit));
584
585 // int n = (used - i + 1)/2 - 1
586 __ OBJ(mov)(R8, Address(RSP, 1 * target::kWordSize)); // used is Smi
587 __ OBJ(sub)(R8, Address(RSP, 3 * target::kWordSize)); // i is Smi
588 __ addq(R8, Immediate(2));
589 __ sarq(R8, Immediate(2));
590 __ decq(R8); // R8 = number of digit pairs to process.
591
592 // uint128_t c = high64(t)
593 __ xorq(R13, R13); // R13 = high64(c) == 0
594 __ movq(R12, RDX); // R12 = low64(c) == high64(t)
595
596 Label loop, done;
597 __ Bind(&loop);
598 // x: RBX
599 // xip: RDI
600 // ajp: RSI
601 // c: R13:R12
602 // t: RCX:RDX:RAX (not live at loop entry)
603 // n: R8
604
605 // while (--n >= 0)
606 __ decq(R8); // --n
608
609 // uint64_t xi = *xip++
610 __ movq(RAX, Address(RDI, 0));
611 __ addq(RDI, Immediate(2 * kBytesPerBigIntDigit));
612
613 // uint192_t t = RCX:RDX:RAX = 2*x*xi + aj + c
614 __ mulq(RBX); // RDX:RAX = RAX * RBX
615 __ xorq(RCX, RCX); // RCX = 0
616 __ shldq(RCX, RDX, Immediate(1));
617 __ shldq(RDX, RAX, Immediate(1));
618 __ shlq(RAX, Immediate(1)); // RCX:RDX:RAX <<= 1
619 __ addq(RAX, Address(RSI, 0)); // t += aj
620 __ adcq(RDX, Immediate(0));
621 __ adcq(RCX, Immediate(0));
622 __ addq(RAX, R12); // t += low64(c)
623 __ adcq(RDX, R13); // t += high64(c) << 64
624 __ adcq(RCX, Immediate(0));
625
626 // *ajp++ = low64(t)
627 __ movq(Address(RSI, 0), RAX);
628 __ addq(RSI, Immediate(2 * kBytesPerBigIntDigit));
629
630 // c = high128(t)
631 __ movq(R12, RDX);
632 __ movq(R13, RCX);
633
634 __ jmp(&loop, Assembler::kNearJump);
635
636 __ Bind(&done);
637 // uint128_t t = aj + c
638 __ addq(R12, Address(RSI, 0)); // t = c, t += *ajp
639 __ adcq(R13, Immediate(0));
640
641 // *ajp++ = low64(t)
642 // *ajp = high64(t)
643 __ movq(Address(RSI, 0), R12);
644 __ movq(Address(RSI, 2 * kBytesPerBigIntDigit), R13);
645
646 __ Bind(&x_zero);
647 __ movq(RAX, Immediate(target::ToRawSmi(2))); // Two digits processed.
648 __ ret();
649}
650
651void AsmIntrinsifier::Bigint_estimateQuotientDigit(Assembler* assembler,
652 Label* normal_ir_body) {
653 // Pseudo code:
654 // static int _estQuotientDigit(Uint32List args, Uint32List digits, int i) {
655 // uint64_t yt = args[_YT_LO .. _YT]; // _YT_LO == 0, _YT == 1.
656 // uint64_t* dp = &digits[(i >> 1) - 1]; // i is Smi.
657 // uint64_t dh = dp[0]; // dh == digits[(i >> 1) - 1 .. i >> 1].
658 // uint64_t qd;
659 // if (dh == yt) {
660 // qd = (DIGIT_MASK << 32) | DIGIT_MASK;
661 // } else {
662 // dl = dp[-1]; // dl == digits[(i >> 1) - 3 .. (i >> 1) - 2].
663 // qd = dh:dl / yt; // No overflow possible, because dh < yt.
664 // }
665 // args[_QD .. _QD_HI] = qd; // _QD == 2, _QD_HI == 3.
666 // return 2;
667 // }
668
669 // RDI = args
670 __ movq(RDI, Address(RSP, 3 * target::kWordSize)); // args
671
672 // RCX = yt = args[0..1]
673 __ movq(RCX, FieldAddress(RDI, target::TypedData::payload_offset()));
674
675 // RBX = dp = &digits[(i >> 1) - 1]
676 __ movq(RBX, Address(RSP, 2 * target::kWordSize)); // digits
677 __ movq(RAX, Address(RSP, 1 * target::kWordSize)); // i is Smi and odd.
678#if defined(DART_COMPRESSED_POINTERS)
679 __ movsxd(RAX, RAX);
680#endif
681 __ leaq(RBX, FieldAddress(
682 RBX, RAX, TIMES_2,
683 target::TypedData::payload_offset() - kBytesPerBigIntDigit));
684
685 // RDX = dh = dp[0]
686 __ movq(RDX, Address(RBX, 0));
687
688 // RAX = qd = (DIGIT_MASK << 32) | DIGIT_MASK = -1
689 __ movq(RAX, Immediate(-1));
690
691 // Return qd if dh == yt
692 Label return_qd;
693 __ cmpq(RDX, RCX);
694 __ j(EQUAL, &return_qd, Assembler::kNearJump);
695
696 // RAX = dl = dp[-1]
697 __ movq(RAX, Address(RBX, -2 * kBytesPerBigIntDigit));
698
699 // RAX = qd = dh:dl / yt = RDX:RAX / RCX
700 __ divq(RCX);
701
702 __ Bind(&return_qd);
703 // args[2..3] = qd
704 __ movq(FieldAddress(RDI, target::TypedData::payload_offset() +
706 RAX);
707
708 __ movq(RAX, Immediate(target::ToRawSmi(2))); // Two digits processed.
709 __ ret();
710}
711
712void AsmIntrinsifier::Montgomery_mulMod(Assembler* assembler,
713 Label* normal_ir_body) {
714 // Pseudo code:
715 // static int _mulMod(Uint32List args, Uint32List digits, int i) {
716 // uint64_t rho = args[_RHO .. _RHO_HI]; // _RHO == 2, _RHO_HI == 3.
717 // uint64_t d = digits[i >> 1 .. (i >> 1) + 1]; // i is Smi and even.
718 // uint128_t t = rho*d;
719 // args[_MU .. _MU_HI] = t mod DIGIT_BASE^2; // _MU == 4, _MU_HI == 5.
720 // return 2;
721 // }
722
723 // RDI = args
724 __ movq(RDI, Address(RSP, 3 * target::kWordSize)); // args
725
726 // RCX = rho = args[2 .. 3]
727 __ movq(RCX, FieldAddress(RDI, target::TypedData::payload_offset() +
729
730 // RAX = digits[i >> 1 .. (i >> 1) + 1]
731 __ movq(RBX, Address(RSP, 2 * target::kWordSize)); // digits
732 __ movq(RAX, Address(RSP, 1 * target::kWordSize)); // i is Smi
733#if defined(DART_COMPRESSED_POINTERS)
734 __ movsxd(RAX, RAX);
735#endif
736 __ movq(RAX,
737 FieldAddress(RBX, RAX, TIMES_2, target::TypedData::payload_offset()));
738
739 // RDX:RAX = t = rho*d
740 __ mulq(RCX);
741
742 // args[4 .. 5] = t mod DIGIT_BASE^2 = low64(t)
743 __ movq(FieldAddress(RDI, target::TypedData::payload_offset() +
745 RAX);
746
747 __ movq(RAX, Immediate(target::ToRawSmi(2))); // Two digits processed.
748 __ ret();
749}
750
751// Check if the last argument is a double, jump to label 'is_smi' if smi
752// (easy to convert to double), otherwise jump to label 'not_double_smi',
753// Returns the last argument in RAX.
754static void TestLastArgumentIsDouble(Assembler* assembler,
755 Label* is_smi,
756 Label* not_double_smi) {
757 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
758 __ testq(RAX, Immediate(kSmiTagMask));
759 __ j(ZERO, is_smi); // Jump if Smi.
760 __ CompareClassId(RAX, kDoubleCid);
761 __ j(NOT_EQUAL, not_double_smi);
762 // Fall through if double.
763}
764
765// Both arguments on stack, left argument is a double, right argument is of
766// unknown type. Return true or false object in RAX. Any NaN argument
767// returns false. Any non-double argument causes control flow to fall through
768// to the slow case (compiled method body).
769static void CompareDoubles(Assembler* assembler,
770 Label* normal_ir_body,
771 Condition true_condition) {
772 Label is_false, is_true, is_smi, double_op;
773 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
774 // Both arguments are double, right operand is in RAX.
775 __ movsd(XMM1, FieldAddress(RAX, target::Double::value_offset()));
776 __ Bind(&double_op);
777 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Left argument.
778 __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
779 __ comisd(XMM0, XMM1);
780 __ j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN -> false;
781 __ j(true_condition, &is_true, Assembler::kNearJump);
782 // Fall through false.
783 __ Bind(&is_false);
784 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
785 __ ret();
786 __ Bind(&is_true);
787 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
788 __ ret();
789 __ Bind(&is_smi);
790 __ SmiUntag(RAX);
791 __ OBJ(cvtsi2sd)(XMM1, RAX);
792 __ jmp(&double_op);
793 __ Bind(normal_ir_body);
794}
795
796void AsmIntrinsifier::Double_greaterThan(Assembler* assembler,
797 Label* normal_ir_body) {
798 CompareDoubles(assembler, normal_ir_body, ABOVE);
799}
800
801void AsmIntrinsifier::Double_greaterEqualThan(Assembler* assembler,
802 Label* normal_ir_body) {
803 CompareDoubles(assembler, normal_ir_body, ABOVE_EQUAL);
804}
805
806void AsmIntrinsifier::Double_lessThan(Assembler* assembler,
807 Label* normal_ir_body) {
808 CompareDoubles(assembler, normal_ir_body, BELOW);
809}
810
811void AsmIntrinsifier::Double_equal(Assembler* assembler,
812 Label* normal_ir_body) {
813 CompareDoubles(assembler, normal_ir_body, EQUAL);
814}
815
816void AsmIntrinsifier::Double_lessEqualThan(Assembler* assembler,
817 Label* normal_ir_body) {
818 CompareDoubles(assembler, normal_ir_body, BELOW_EQUAL);
819}
820
821// Expects left argument to be double (receiver). Right argument is unknown.
822// Both arguments are on stack.
823static void DoubleArithmeticOperations(Assembler* assembler,
824 Label* normal_ir_body,
825 Token::Kind kind) {
826 Label is_smi, double_op;
827 TestLastArgumentIsDouble(assembler, &is_smi, normal_ir_body);
828 // Both arguments are double, right operand is in RAX.
829 __ movsd(XMM1, FieldAddress(RAX, target::Double::value_offset()));
830 __ Bind(&double_op);
831 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Left argument.
832 __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
833 switch (kind) {
834 case Token::kADD:
835 __ addsd(XMM0, XMM1);
836 break;
837 case Token::kSUB:
838 __ subsd(XMM0, XMM1);
839 break;
840 case Token::kMUL:
841 __ mulsd(XMM0, XMM1);
842 break;
843 case Token::kDIV:
844 __ divsd(XMM0, XMM1);
845 break;
846 default:
847 UNREACHABLE();
848 }
849 const Class& double_class = DoubleClass();
850 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump,
851 RAX, // Result register.
852 R13);
853 __ movsd(FieldAddress(RAX, target::Double::value_offset()), XMM0);
854 __ ret();
855 __ Bind(&is_smi);
856 __ SmiUntag(RAX);
857 __ OBJ(cvtsi2sd)(XMM1, RAX);
858 __ jmp(&double_op);
859 __ Bind(normal_ir_body);
860}
861
862void AsmIntrinsifier::Double_add(Assembler* assembler, Label* normal_ir_body) {
863 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kADD);
864}
865
866void AsmIntrinsifier::Double_mul(Assembler* assembler, Label* normal_ir_body) {
867 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kMUL);
868}
869
870void AsmIntrinsifier::Double_sub(Assembler* assembler, Label* normal_ir_body) {
871 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kSUB);
872}
873
874void AsmIntrinsifier::Double_div(Assembler* assembler, Label* normal_ir_body) {
875 DoubleArithmeticOperations(assembler, normal_ir_body, Token::kDIV);
876}
877
878void AsmIntrinsifier::Double_mulFromInteger(Assembler* assembler,
879 Label* normal_ir_body) {
880 // Only smis allowed.
881 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
882 __ testq(RAX, Immediate(kSmiTagMask));
883 __ j(NOT_ZERO, normal_ir_body);
884 // Is Smi.
885 __ SmiUntag(RAX);
886 __ OBJ(cvtsi2sd)(XMM1, RAX);
887 __ movq(RAX, Address(RSP, +2 * target::kWordSize));
888 __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
889 __ mulsd(XMM0, XMM1);
890 const Class& double_class = DoubleClass();
891 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump,
892 RAX, // Result register.
893 R13);
894 __ movsd(FieldAddress(RAX, target::Double::value_offset()), XMM0);
895 __ ret();
896 __ Bind(normal_ir_body);
897}
898
899// Left is double, right is integer (Mint or Smi)
900void AsmIntrinsifier::DoubleFromInteger(Assembler* assembler,
901 Label* normal_ir_body) {
902 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
903 __ testq(RAX, Immediate(kSmiTagMask));
904 __ j(NOT_ZERO, normal_ir_body);
905 // Is Smi.
906 __ SmiUntag(RAX);
907 __ OBJ(cvtsi2sd)(XMM0, RAX);
908 const Class& double_class = DoubleClass();
909 __ TryAllocate(double_class, normal_ir_body, Assembler::kFarJump,
910 RAX, // Result register.
911 R13);
912 __ movsd(FieldAddress(RAX, target::Double::value_offset()), XMM0);
913 __ ret();
914 __ Bind(normal_ir_body);
915}
916
917void AsmIntrinsifier::Double_getIsNaN(Assembler* assembler,
918 Label* normal_ir_body) {
919 Label is_true;
920 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
921 __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
922 __ comisd(XMM0, XMM0);
923 __ j(PARITY_EVEN, &is_true, Assembler::kNearJump); // NaN -> true;
924 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
925 __ ret();
926 __ Bind(&is_true);
927 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
928 __ ret();
929}
930
931void AsmIntrinsifier::Double_getIsInfinite(Assembler* assembler,
932 Label* normal_ir_body) {
933 Label is_inf, done;
934 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
935 __ movq(RAX, FieldAddress(RAX, target::Double::value_offset()));
936 // Mask off the sign.
937 __ AndImmediate(RAX, Immediate(0x7FFFFFFFFFFFFFFFLL));
938 // Compare with +infinity.
939 __ CompareImmediate(RAX, Immediate(0x7FF0000000000000LL));
940 __ j(EQUAL, &is_inf, Assembler::kNearJump);
941 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
942 __ jmp(&done);
943
944 __ Bind(&is_inf);
945 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
946
947 __ Bind(&done);
948 __ ret();
949}
950
951void AsmIntrinsifier::Double_getIsNegative(Assembler* assembler,
952 Label* normal_ir_body) {
953 Label is_false, is_true, is_zero;
954 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
955 __ movsd(XMM0, FieldAddress(RAX, target::Double::value_offset()));
956 __ xorpd(XMM1, XMM1); // 0.0 -> XMM1.
957 __ comisd(XMM0, XMM1);
958 __ j(PARITY_EVEN, &is_false, Assembler::kNearJump); // NaN -> false.
959 __ j(EQUAL, &is_zero, Assembler::kNearJump); // Check for negative zero.
960 __ j(ABOVE_EQUAL, &is_false, Assembler::kNearJump); // >= 0 -> false.
961 __ Bind(&is_true);
962 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
963 __ ret();
964 __ Bind(&is_false);
965 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
966 __ ret();
967 __ Bind(&is_zero);
968 // Check for negative zero (get the sign bit).
969 __ movmskpd(RAX, XMM0);
970 __ testq(RAX, Immediate(1));
971 __ j(NOT_ZERO, &is_true, Assembler::kNearJump);
972 __ jmp(&is_false, Assembler::kNearJump);
973}
974
975// Identity comparison.
976void AsmIntrinsifier::ObjectEquals(Assembler* assembler,
977 Label* normal_ir_body) {
978 Label is_true;
979 const intptr_t kReceiverOffset = 2;
980 const intptr_t kArgumentOffset = 1;
981
982 __ movq(RAX, Address(RSP, +kArgumentOffset * target::kWordSize));
983 __ OBJ(cmp)(RAX, Address(RSP, +kReceiverOffset * target::kWordSize));
984 __ j(EQUAL, &is_true, Assembler::kNearJump);
985 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
986 __ ret();
987 __ Bind(&is_true);
988 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
989 __ ret();
990}
991
992static void JumpIfInteger(Assembler* assembler, Register cid, Label* target) {
993 assembler->RangeCheck(cid, kNoRegister, kSmiCid, kMintCid,
994 Assembler::kIfInRange, target);
995}
996
997static void JumpIfNotInteger(Assembler* assembler,
999 Label* target) {
1000 assembler->RangeCheck(cid, kNoRegister, kSmiCid, kMintCid,
1001 Assembler::kIfNotInRange, target);
1002}
1003
1004static void JumpIfString(Assembler* assembler, Register cid, Label* target) {
1005 assembler->RangeCheck(cid, kNoRegister, kOneByteStringCid, kTwoByteStringCid,
1006 Assembler::kIfInRange, target);
1007}
1008
1009static void JumpIfNotString(Assembler* assembler, Register cid, Label* target) {
1010 assembler->RangeCheck(cid, kNoRegister, kOneByteStringCid, kTwoByteStringCid,
1011 Assembler::kIfNotInRange, target);
1012}
1013
1014static void JumpIfNotList(Assembler* assembler, Register cid, Label* target) {
1015 assembler->RangeCheck(cid, kNoRegister, kArrayCid, kGrowableObjectArrayCid,
1016 Assembler::kIfNotInRange, target);
1017}
1018
1019static void JumpIfType(Assembler* assembler, Register cid, Label* target) {
1020 COMPILE_ASSERT((kFunctionTypeCid == kTypeCid + 1) &&
1021 (kRecordTypeCid == kTypeCid + 2));
1022 assembler->RangeCheck(cid, kNoRegister, kTypeCid, kRecordTypeCid,
1023 Assembler::kIfInRange, target);
1024}
1025
1026static void JumpIfNotType(Assembler* assembler, Register cid, Label* target) {
1027 COMPILE_ASSERT((kFunctionTypeCid == kTypeCid + 1) &&
1028 (kRecordTypeCid == kTypeCid + 2));
1029 assembler->RangeCheck(cid, kNoRegister, kTypeCid, kRecordTypeCid,
1030 Assembler::kIfNotInRange, target);
1031}
1032
1033// Return type quickly for simple types (not parameterized and not signature).
1034void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
1035 Label* normal_ir_body) {
1036 Label use_declaration_type, not_integer, not_double, not_string;
1037 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
1038 __ LoadClassIdMayBeSmi(RCX, RAX);
1039
1040 // RCX: untagged cid of instance (RAX).
1041 __ cmpq(RCX, Immediate(kClosureCid));
1042 __ j(EQUAL, normal_ir_body); // Instance is a closure.
1043
1044 __ cmpq(RCX, Immediate(kRecordCid));
1045 __ j(EQUAL, normal_ir_body); // Instance is a record.
1046
1047 __ cmpl(RCX, Immediate(kNumPredefinedCids));
1048 __ j(ABOVE, &use_declaration_type);
1049
1050 // If object is a instance of _Double return double type.
1051 __ cmpl(RCX, Immediate(kDoubleCid));
1052 __ j(NOT_EQUAL, &not_double);
1053
1054 __ LoadIsolateGroup(RAX);
1055 __ movq(RAX, Address(RAX, target::IsolateGroup::object_store_offset()));
1056 __ movq(RAX, Address(RAX, target::ObjectStore::double_type_offset()));
1057 __ ret();
1058
1059 __ Bind(&not_double);
1060 // If object is an integer (smi, mint or bigint) return int type.
1061 __ movl(RAX, RCX);
1062 JumpIfNotInteger(assembler, RAX, &not_integer);
1063
1064 __ LoadIsolateGroup(RAX);
1065 __ movq(RAX, Address(RAX, target::IsolateGroup::object_store_offset()));
1066 __ movq(RAX, Address(RAX, target::ObjectStore::int_type_offset()));
1067 __ ret();
1068
1069 __ Bind(&not_integer);
1070 // If object is a string (one byte, two byte or external variants) return
1071 // string type.
1072 __ movq(RAX, RCX);
1073 JumpIfNotString(assembler, RAX, &not_string);
1074
1075 __ LoadIsolateGroup(RAX);
1076 __ movq(RAX, Address(RAX, target::IsolateGroup::object_store_offset()));
1077 __ movq(RAX, Address(RAX, target::ObjectStore::string_type_offset()));
1078 __ ret();
1079
1080 __ Bind(&not_string);
1081 // If object is a type or function type, return Dart type.
1082 __ movq(RAX, RCX);
1083 JumpIfNotType(assembler, RAX, &use_declaration_type);
1084
1085 __ LoadIsolateGroup(RAX);
1086 __ movq(RAX, Address(RAX, target::IsolateGroup::object_store_offset()));
1087 __ movq(RAX, Address(RAX, target::ObjectStore::type_type_offset()));
1088 __ ret();
1089
1090 // Object is neither double, nor integer, nor string, nor type.
1091 __ Bind(&use_declaration_type);
1092 __ LoadClassById(RDI, RCX);
1093 __ movzxw(RCX, FieldAddress(RDI, target::Class::num_type_arguments_offset()));
1094 __ cmpq(RCX, Immediate(0));
1095 __ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
1096 __ LoadCompressed(
1097 RAX, FieldAddress(RDI, target::Class::declaration_type_offset()));
1098 __ CompareObject(RAX, NullObject());
1099 __ j(EQUAL, normal_ir_body, Assembler::kNearJump); // Not yet set.
1100 __ ret();
1101
1102 __ Bind(normal_ir_body);
1103}
1104
1105// Compares cid1 and cid2 to see if they're syntactically equivalent. If this
1106// can be determined by this fast path, it jumps to either equal_* or not_equal.
1107// If classes are equivalent but may be generic, then jumps to
1108// equal_may_be_generic. Clobbers scratch.
1109static void EquivalentClassIds(Assembler* assembler,
1110 Label* normal_ir_body,
1111 Label* equal_may_be_generic,
1112 Label* equal_not_generic,
1113 Label* not_equal,
1114 Register cid1,
1115 Register cid2,
1116 Register scratch,
1117 bool testing_instance_cids) {
1118 Label not_integer, not_integer_or_string, not_integer_or_string_or_list;
1119
1120 // Check if left hand side is a closure. Closures are handled in the runtime.
1121 __ cmpq(cid1, Immediate(kClosureCid));
1122 __ j(EQUAL, normal_ir_body);
1123
1124 // Check if left hand side is a record. Records are handled in the runtime.
1125 __ cmpq(cid1, Immediate(kRecordCid));
1126 __ j(EQUAL, normal_ir_body);
1127
1128 // Check whether class ids match. If class ids don't match types may still be
1129 // considered equivalent (e.g. multiple string implementation classes map to a
1130 // single String type).
1131 __ cmpq(cid1, cid2);
1132 __ j(EQUAL, equal_may_be_generic);
1133
1134 // Class ids are different. Check if we are comparing two string types (with
1135 // different representations), two integer types, two list types or two type
1136 // types.
1137 __ cmpq(cid1, Immediate(kNumPredefinedCids));
1138 __ j(ABOVE_EQUAL, not_equal);
1139
1140 // Check if both are integer types.
1141 __ movq(scratch, cid1);
1142 JumpIfNotInteger(assembler, scratch, &not_integer);
1143
1144 // First type is an integer. Check if the second is an integer too.
1145 __ movq(scratch, cid2);
1146 JumpIfInteger(assembler, scratch, equal_not_generic);
1147 // Integer types are only equivalent to other integer types.
1148 __ jmp(not_equal);
1149
1150 __ Bind(&not_integer);
1151 // Check if both are String types.
1152 __ movq(scratch, cid1);
1153 JumpIfNotString(assembler, scratch,
1154 testing_instance_cids ? &not_integer_or_string : not_equal);
1155
1156 // First type is a String. Check if the second is a String too.
1157 __ movq(scratch, cid2);
1158 JumpIfString(assembler, scratch, equal_not_generic);
1159 // String types are only equivalent to other String types.
1160 __ jmp(not_equal);
1161
1162 if (testing_instance_cids) {
1163 __ Bind(&not_integer_or_string);
1164 // Check if both are List types.
1165 __ movq(scratch, cid1);
1166 JumpIfNotList(assembler, scratch, &not_integer_or_string_or_list);
1167
1168 // First type is a List. Check if the second is a List too.
1169 __ movq(scratch, cid2);
1170 JumpIfNotList(assembler, scratch, not_equal);
1171 ASSERT(compiler::target::Array::type_arguments_offset() ==
1172 compiler::target::GrowableObjectArray::type_arguments_offset());
1173 __ jmp(equal_may_be_generic);
1174
1175 __ Bind(&not_integer_or_string_or_list);
1176 // Check if the first type is a Type. If it is not then types are not
1177 // equivalent because they have different class ids and they are not String
1178 // or integer or List or Type.
1179 __ movq(scratch, cid1);
1180 JumpIfNotType(assembler, scratch, not_equal);
1181
1182 // First type is a Type. Check if the second is a Type too.
1183 __ movq(scratch, cid2);
1184 JumpIfType(assembler, scratch, equal_not_generic);
1185 // Type types are only equivalent to other Type types.
1186 __ jmp(not_equal);
1187 }
1188}
1189
1190void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
1191 Label* normal_ir_body) {
1192 __ movq(RAX, Address(RSP, +1 * target::kWordSize));
1193 __ LoadClassIdMayBeSmi(RCX, RAX);
1194
1195 __ movq(RAX, Address(RSP, +2 * target::kWordSize));
1196 __ LoadClassIdMayBeSmi(RDX, RAX);
1197
1198 Label equal_may_be_generic, equal, not_equal;
1199 EquivalentClassIds(assembler, normal_ir_body, &equal_may_be_generic, &equal,
1200 &not_equal, RCX, RDX, RAX,
1201 /* testing_instance_cids = */ true);
1202
1203 __ Bind(&equal_may_be_generic);
1204 // Classes are equivalent and neither is a closure class.
1205 // Check if there are no type arguments. In this case we can return true.
1206 // Otherwise fall through into the runtime to handle comparison.
1207 __ LoadClassById(RAX, RCX);
1208 __ movl(
1209 RAX,
1210 FieldAddress(
1211 RAX,
1212 target::Class::host_type_arguments_field_offset_in_words_offset()));
1213 __ cmpl(RAX, Immediate(target::Class::kNoTypeArguments));
1214 __ j(EQUAL, &equal);
1215
1216 // Compare type arguments, host_type_arguments_field_offset_in_words in RAX.
1217 __ movq(RCX, Address(RSP, +1 * target::kWordSize));
1218 __ movq(RDX, Address(RSP, +2 * target::kWordSize));
1219 __ OBJ(mov)(RCX, FieldAddress(RCX, RAX, TIMES_COMPRESSED_WORD_SIZE, 0));
1220 __ OBJ(mov)(RDX, FieldAddress(RDX, RAX, TIMES_COMPRESSED_WORD_SIZE, 0));
1221 __ OBJ(cmp)(RCX, RDX);
1222 __ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
1223 // Fall through to equal case if type arguments are equal.
1224
1225 __ Bind(&equal);
1226 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1227 __ ret();
1228
1229 __ Bind(&not_equal);
1230 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1231 __ ret();
1232
1233 __ Bind(normal_ir_body);
1234}
1235
1236void AsmIntrinsifier::String_getHashCode(Assembler* assembler,
1237 Label* normal_ir_body) {
1238 __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // String object.
1239 __ movl(RAX, FieldAddress(RAX, target::String::hash_offset()));
1240 ASSERT(kSmiTag == 0);
1241 ASSERT(kSmiTagShift == 1);
1242 __ addq(RAX, RAX); // Smi tag RAX, setting Z flag.
1243 __ j(ZERO, normal_ir_body, Assembler::kNearJump);
1244 __ ret();
1245 __ Bind(normal_ir_body);
1246 // Hash not yet computed.
1247}
1248
1249void AsmIntrinsifier::Type_equality(Assembler* assembler,
1250 Label* normal_ir_body) {
1251 Label equal, not_equal, equiv_cids_may_be_generic, equiv_cids, check_legacy;
1252
1253 __ movq(RCX, Address(RSP, +1 * target::kWordSize));
1254 __ movq(RDX, Address(RSP, +2 * target::kWordSize));
1255 __ OBJ(cmp)(RCX, RDX);
1256 __ j(EQUAL, &equal);
1257
1258 // RCX might not be a Type object, so check that first (RDX should be though,
1259 // since this is a method on the Type class).
1260 __ LoadClassIdMayBeSmi(RAX, RCX);
1261 __ cmpq(RAX, Immediate(kTypeCid));
1262 __ j(NOT_EQUAL, normal_ir_body);
1263
1264 // Check if types are syntactically equal.
1265 __ LoadTypeClassId(RDI, RCX);
1266 __ LoadTypeClassId(RSI, RDX);
1267 // We are not testing instance cids, but type class cids of Type instances.
1268 EquivalentClassIds(assembler, normal_ir_body, &equiv_cids_may_be_generic,
1269 &equiv_cids, &not_equal, RDI, RSI, RAX,
1270 /* testing_instance_cids = */ false);
1271
1272 __ Bind(&equiv_cids_may_be_generic);
1273 // Compare type arguments in Type instances.
1274 __ LoadCompressed(RDI, FieldAddress(RCX, target::Type::arguments_offset()));
1275 __ LoadCompressed(RSI, FieldAddress(RDX, target::Type::arguments_offset()));
1276 __ cmpq(RDI, RSI);
1277 __ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
1278 // Fall through to check nullability if type arguments are equal.
1279
1280 // Check nullability.
1281 __ Bind(&equiv_cids);
1282 __ LoadAbstractTypeNullability(RCX, RCX);
1283 __ LoadAbstractTypeNullability(RDX, RDX);
1284 __ cmpq(RCX, RDX);
1285 __ j(NOT_EQUAL, &check_legacy, Assembler::kNearJump);
1286 // Fall through to equal case if nullability is strictly equal.
1287
1288 __ Bind(&equal);
1289 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1290 __ ret();
1291
1292 // At this point the nullabilities are different, so they can only be
1293 // syntactically equivalent if they're both either kNonNullable or kLegacy.
1294 // These are the two largest values of the enum, so we can just do a < check.
1295 ASSERT(target::Nullability::kNullable < target::Nullability::kNonNullable &&
1296 target::Nullability::kNonNullable < target::Nullability::kLegacy);
1297 __ Bind(&check_legacy);
1298 __ cmpq(RCX, Immediate(target::Nullability::kNonNullable));
1299 __ j(LESS, &not_equal, Assembler::kNearJump);
1300 __ cmpq(RDX, Immediate(target::Nullability::kNonNullable));
1302
1303 __ Bind(&not_equal);
1304 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1305 __ ret();
1306
1307 __ Bind(normal_ir_body);
1308}
1309
1310void AsmIntrinsifier::AbstractType_getHashCode(Assembler* assembler,
1311 Label* normal_ir_body) {
1312 __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // AbstractType object.
1313 __ LoadCompressedSmi(RAX,
1314 FieldAddress(RAX, target::AbstractType::hash_offset()));
1315 ASSERT(kSmiTag == 0);
1316 ASSERT(kSmiTagShift == 1);
1317 __ OBJ(test)(RAX, RAX);
1318 __ j(ZERO, normal_ir_body, Assembler::kNearJump);
1319 __ ret();
1320 __ Bind(normal_ir_body);
1321 // Hash not yet computed.
1322}
1323
1324void AsmIntrinsifier::AbstractType_equality(Assembler* assembler,
1325 Label* normal_ir_body) {
1326 __ movq(RCX, Address(RSP, +1 * target::kWordSize));
1327 __ movq(RDX, Address(RSP, +2 * target::kWordSize));
1328 __ OBJ(cmp)(RCX, RDX);
1329 __ j(NOT_EQUAL, normal_ir_body);
1330
1331 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1332 __ ret();
1333
1334 __ Bind(normal_ir_body);
1335}
1336
1337// Keep in sync with Instance::IdentityHashCode.
1338// Note int and double never reach here because they override _identityHashCode.
1339// Special cases are also not needed for null or bool because they were pre-set
1340// during VM isolate finalization.
1341void AsmIntrinsifier::Object_getHash(Assembler* assembler,
1342 Label* normal_ir_body) {
1343 Label not_yet_computed;
1344 __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // Object.
1345 __ movl(RAX, FieldAddress(RAX, target::Object::tags_offset() +
1346 target::UntaggedObject::kHashTagPos /
1347 kBitsPerByte));
1348 __ cmpl(RAX, Immediate(0));
1349 __ j(EQUAL, &not_yet_computed, Assembler::kNearJump);
1350 __ SmiTag(RAX);
1351 __ ret();
1352
1353 __ Bind(&not_yet_computed);
1354 __ movq(RCX, Address(THR, target::Thread::random_offset()));
1355 __ movq(RBX, RCX);
1356 __ andq(RCX, Immediate(0xffffffff)); // state_lo
1357 __ shrq(RBX, Immediate(32)); // state_hi
1358 __ imulq(RCX, Immediate(0xffffda61)); // A
1359 __ addq(RCX, RBX); // new_state = (A* state_lo) + state_hi
1360 __ movq(Address(THR, target::Thread::random_offset()), RCX);
1361 __ andq(RCX, Immediate(0x3fffffff));
1362 __ cmpl(RCX, Immediate(0));
1363 __ j(EQUAL, &not_yet_computed);
1364
1365 __ movq(RBX, Address(RSP, +1 * target::kWordSize)); // Object.
1366 __ MoveRegister(RDX, RCX);
1367 __ shlq(RDX, Immediate(32));
1368
1369 Label retry, success, already_in_rax;
1370 __ Bind(&retry);
1371 // RAX is used by "cmpxchgq" as comparison value (if comparison succeeds the
1372 // store is performed).
1373 __ movq(RAX, FieldAddress(RBX, 0));
1374 __ TestImmediate(RAX, Immediate(0xffffffff00000000));
1375 __ BranchIf(NOT_ZERO, &already_in_rax);
1376 __ MoveRegister(RSI, RAX);
1377 __ orq(RSI, RDX);
1378 __ LockCmpxchgq(FieldAddress(RBX, 0), RSI);
1379 __ BranchIf(NOT_ZERO, &retry);
1380 // Fall-through with RCX containing new hash value (untagged)
1381 __ Bind(&success);
1382 __ SmiTag(RCX);
1383 __ MoveRegister(RAX, RCX);
1384 __ Ret();
1385
1386 __ Bind(&already_in_rax);
1387 __ shrq(RAX, Immediate(32));
1388 __ SmiTag(RAX);
1389 __ Ret();
1390}
1391
1392void GenerateSubstringMatchesSpecialization(Assembler* assembler,
1393 intptr_t receiver_cid,
1394 intptr_t other_cid,
1395 Label* return_true,
1396 Label* return_false) {
1397 __ SmiUntag(RBX);
1398 __ LoadCompressedSmi(R8, FieldAddress(RAX, target::String::length_offset()));
1399 __ SmiUntag(R8);
1400 __ LoadCompressedSmi(R9, FieldAddress(RCX, target::String::length_offset()));
1401 __ SmiUntag(R9);
1402
1403 // if (other.length == 0) return true;
1404 __ testq(R9, R9);
1405 __ j(ZERO, return_true);
1406
1407 // if (start < 0) return false;
1408 __ testq(RBX, RBX);
1409 __ j(SIGN, return_false);
1410
1411 // if (start + other.length > this.length) return false;
1412 __ movq(R11, RBX);
1413 __ addq(R11, R9);
1414 __ cmpq(R11, R8);
1415 __ j(GREATER, return_false);
1416
1417 __ LoadImmediate(R11, Immediate(0)); // i = 0
1418
1419 // do
1420 Label loop;
1421 __ Bind(&loop);
1422
1423 // this.codeUnitAt(i + start)
1424 // clobbering this.length
1425 __ movq(R8, R11);
1426 __ addq(R8, RBX);
1427 if (receiver_cid == kOneByteStringCid) {
1428 __ movzxb(R12, FieldAddress(RAX, R8, TIMES_1,
1429 target::OneByteString::data_offset()));
1430 } else {
1431 ASSERT(receiver_cid == kTwoByteStringCid);
1432 __ movzxw(R12, FieldAddress(RAX, R8, TIMES_2,
1433 target::TwoByteString::data_offset()));
1434 }
1435 // other.codeUnitAt(i)
1436 if (other_cid == kOneByteStringCid) {
1437 __ movzxb(R13, FieldAddress(RCX, R11, TIMES_1,
1438 target::OneByteString::data_offset()));
1439 } else {
1440 ASSERT(other_cid == kTwoByteStringCid);
1441 __ movzxw(R13, FieldAddress(RCX, R11, TIMES_2,
1442 target::TwoByteString::data_offset()));
1443 }
1444 __ cmpq(R12, R13);
1445 __ j(NOT_EQUAL, return_false);
1446
1447 // i++, while (i < len)
1448 __ addq(R11, Immediate(1));
1449 __ cmpq(R11, R9);
1450 __ j(LESS, &loop, Assembler::kNearJump);
1451
1452 __ jmp(return_true);
1453}
1454
1455// bool _substringMatches(int start, String other)
1456// This intrinsic handles a OneByteString or TwoByteString receiver with a
1457// OneByteString other.
1458void AsmIntrinsifier::StringBaseSubstringMatches(Assembler* assembler,
1459 Label* normal_ir_body) {
1460 Label return_true, return_false, try_two_byte;
1461 __ movq(RAX, Address(RSP, +3 * target::kWordSize)); // receiver
1462 __ movq(RBX, Address(RSP, +2 * target::kWordSize)); // start
1463 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // other
1464
1465 __ testq(RBX, Immediate(kSmiTagMask));
1466 __ j(NOT_ZERO, normal_ir_body); // 'start' is not Smi.
1467
1468 __ CompareClassId(RCX, kOneByteStringCid);
1469 __ j(NOT_EQUAL, normal_ir_body);
1470
1471 __ CompareClassId(RAX, kOneByteStringCid);
1472 __ j(NOT_EQUAL, &try_two_byte);
1473
1474 GenerateSubstringMatchesSpecialization(assembler, kOneByteStringCid,
1475 kOneByteStringCid, &return_true,
1476 &return_false);
1477
1478 __ Bind(&try_two_byte);
1479 __ CompareClassId(RAX, kTwoByteStringCid);
1480 __ j(NOT_EQUAL, normal_ir_body);
1481
1482 GenerateSubstringMatchesSpecialization(assembler, kTwoByteStringCid,
1483 kOneByteStringCid, &return_true,
1484 &return_false);
1485
1486 __ Bind(&return_true);
1487 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1488 __ ret();
1489
1490 __ Bind(&return_false);
1491 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1492 __ ret();
1493
1494 __ Bind(normal_ir_body);
1495}
1496
1497void AsmIntrinsifier::StringBaseCharAt(Assembler* assembler,
1498 Label* normal_ir_body) {
1499 Label try_two_byte_string;
1500 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Index.
1501 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // String.
1502 __ testq(RCX, Immediate(kSmiTagMask));
1503 __ j(NOT_ZERO, normal_ir_body); // Non-smi index.
1504 // Range check.
1505 __ OBJ(cmp)(RCX, FieldAddress(RAX, target::String::length_offset()));
1506 // Runtime throws exception.
1507 __ j(ABOVE_EQUAL, normal_ir_body);
1508 __ CompareClassId(RAX, kOneByteStringCid);
1509 __ j(NOT_EQUAL, &try_two_byte_string, Assembler::kNearJump);
1510 __ SmiUntag(RCX);
1511 __ movzxb(RCX, FieldAddress(RAX, RCX, TIMES_1,
1512 target::OneByteString::data_offset()));
1513 __ cmpq(RCX, Immediate(target::Symbols::kNumberOfOneCharCodeSymbols));
1514 __ j(GREATER_EQUAL, normal_ir_body);
1515 __ movq(RAX,
1516 Address(THR, target::Thread::predefined_symbols_address_offset()));
1517 __ movq(RAX, Address(RAX, RCX, TIMES_8,
1518 target::Symbols::kNullCharCodeSymbolOffset *
1519 target::kWordSize));
1520 __ ret();
1521
1522 __ Bind(&try_two_byte_string);
1523 __ CompareClassId(RAX, kTwoByteStringCid);
1524 __ j(NOT_EQUAL, normal_ir_body);
1525 ASSERT(kSmiTagShift == 1);
1526#if defined(DART_COMPRESSED_POINTERS)
1527 // The upper half of a compressed Smi contains undefined bits, but no x64
1528 // addressing mode will ignore these bits. We have already checked the index
1529 // is positive, so we just clear the upper bits, which is shorter than movsxd.
1530 __ orl(RCX, RCX);
1531#endif
1532 __ movzxw(RCX, FieldAddress(RAX, RCX, TIMES_1,
1533 target::OneByteString::data_offset()));
1534 __ cmpq(RCX, Immediate(target::Symbols::kNumberOfOneCharCodeSymbols));
1535 __ j(GREATER_EQUAL, normal_ir_body);
1536 __ movq(RAX,
1537 Address(THR, target::Thread::predefined_symbols_address_offset()));
1538 __ movq(RAX, Address(RAX, RCX, TIMES_8,
1539 target::Symbols::kNullCharCodeSymbolOffset *
1540 target::kWordSize));
1541 __ ret();
1542
1543 __ Bind(normal_ir_body);
1544}
1545
1546void AsmIntrinsifier::StringBaseIsEmpty(Assembler* assembler,
1547 Label* normal_ir_body) {
1548 Label is_true;
1549 // Get length.
1550 __ movq(RAX, Address(RSP, +1 * target::kWordSize)); // String object.
1551 __ LoadCompressedSmi(RAX, FieldAddress(RAX, target::String::length_offset()));
1552 __ OBJ(cmp)(RAX, Immediate(target::ToRawSmi(0)));
1553 __ j(EQUAL, &is_true, Assembler::kNearJump);
1554 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1555 __ ret();
1556 __ Bind(&is_true);
1557 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1558 __ ret();
1559}
1560
1561void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
1562 Label* normal_ir_body) {
1563 Label compute_hash;
1564 __ movq(
1565 RBX,
1566 Address(RSP, +1 * target::kWordSize)); // target::OneByteString object.
1567 __ movl(RAX, FieldAddress(RBX, target::String::hash_offset()));
1568 __ cmpq(RAX, Immediate(0));
1569 __ j(EQUAL, &compute_hash, Assembler::kNearJump);
1570 __ SmiTag(RAX);
1571 __ ret();
1572
1573 __ Bind(&compute_hash);
1574 // Hash not yet computed, use algorithm of class StringHasher.
1575 __ LoadCompressedSmi(RCX, FieldAddress(RBX, target::String::length_offset()));
1576 __ SmiUntag(RCX);
1577 __ xorq(RAX, RAX);
1578 __ xorq(RDI, RDI);
1579 // RBX: Instance of target::OneByteString.
1580 // RCX: String length, untagged integer.
1581 // RDI: Loop counter, untagged integer.
1582 // RAX: Hash code, untagged integer.
1583 Label loop, done;
1584 __ Bind(&loop);
1585 __ cmpq(RDI, RCX);
1587 // Add to hash code: (hash_ is uint32)
1588 // Get one characters (ch).
1589 __ movzxb(RDX, FieldAddress(RBX, RDI, TIMES_1,
1590 target::OneByteString::data_offset()));
1591 // RDX: ch and temporary.
1593
1594 __ incq(RDI);
1595 __ jmp(&loop, Assembler::kNearJump);
1596
1597 __ Bind(&done);
1598 // Finalize and fit to size kHashBits. Ensures hash is non-zero.
1599 __ FinalizeHashForSize(target::String::kHashBits, RAX);
1600 __ shlq(RAX, Immediate(target::UntaggedObject::kHashTagPos));
1601 // lock+orq is an atomic read-modify-write.
1602 __ lock();
1603 __ orq(FieldAddress(RBX, target::Object::tags_offset()), RAX);
1604 __ sarq(RAX, Immediate(target::UntaggedObject::kHashTagPos));
1605 __ SmiTag(RAX);
1606 __ ret();
1607}
1608
1609// Allocates a _OneByteString or _TwoByteString. The content is not initialized.
1610// 'length_reg' contains the desired length as a _Smi or _Mint.
1611// Returns new string as tagged pointer in RAX.
1612static void TryAllocateString(Assembler* assembler,
1613 classid_t cid,
1614 intptr_t max_elements,
1615 Label* ok,
1616 Label* failure,
1617 Register length_reg) {
1618 ASSERT(cid == kOneByteStringCid || cid == kTwoByteStringCid);
1619 // _Mint length: call to runtime to produce error.
1620 __ BranchIfNotSmi(length_reg, failure);
1621 // negative length: call to runtime to produce error.
1622 // Too big: call to runtime to allocate old.
1623 __ OBJ(cmp)(length_reg, Immediate(target::ToRawSmi(max_elements)));
1624 __ j(ABOVE, failure);
1625
1626 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, failure));
1627 if (length_reg != RDI) {
1628 __ movq(RDI, length_reg);
1629 }
1630 Label pop_and_fail, not_zero_length;
1631 __ pushq(RDI); // Preserve length.
1632 if (cid == kOneByteStringCid) {
1633 // Untag length.
1634 __ SmiUntag(RDI);
1635 } else {
1636 // Untag length and multiply by element size -> no-op.
1637 ASSERT(kSmiTagSize == 1);
1638 }
1639 const intptr_t fixed_size_plus_alignment_padding =
1640 target::String::InstanceSize() +
1642 __ addq(RDI, Immediate(fixed_size_plus_alignment_padding));
1644
1645 __ movq(RAX, Address(THR, target::Thread::top_offset()));
1646
1647 // RDI: allocation size.
1648 __ movq(RCX, RAX);
1649 __ addq(RCX, RDI);
1650 __ j(CARRY, &pop_and_fail);
1651
1652 // Check if the allocation fits into the remaining space.
1653 // RAX: potential new object start.
1654 // RCX: potential next object start.
1655 // RDI: allocation size.
1656 __ cmpq(RCX, Address(THR, target::Thread::end_offset()));
1657 __ j(ABOVE_EQUAL, &pop_and_fail);
1658 __ CheckAllocationCanary(RAX);
1659
1660 // Successfully allocated the object(s), now update top to point to
1661 // next object start and initialize the object.
1662 __ movq(Address(THR, target::Thread::top_offset()), RCX);
1663 __ addq(RAX, Immediate(kHeapObjectTag));
1664 // Clear last double word to ensure string comparison doesn't need to
1665 // specially handle remainder of strings with lengths not factors of double
1666 // offsets.
1667 ASSERT(target::kWordSize == 8);
1668 __ movq(Address(RCX, -1 * target::kWordSize), Immediate(0));
1669 __ movq(Address(RCX, -2 * target::kWordSize), Immediate(0));
1670
1671 // Initialize the tags.
1672 // RAX: new object start as a tagged pointer.
1673 // RDI: allocation size.
1674 {
1675 Label size_tag_overflow, done;
1676 __ cmpq(RDI, Immediate(target::UntaggedObject::kSizeTagMaxSizeTag));
1677 __ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
1678 __ shlq(RDI, Immediate(target::UntaggedObject::kTagBitsSizeTagPos -
1681
1682 __ Bind(&size_tag_overflow);
1683 __ xorq(RDI, RDI);
1684 __ Bind(&done);
1685
1686 // Get the class index and insert it into the tags.
1687 // This also clears the hash, which is in the high bits of the tags.
1688 const uword tags =
1689 target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
1690 __ orq(RDI, Immediate(tags));
1691 __ movq(FieldAddress(RAX, target::Object::tags_offset()), RDI); // Tags.
1692 }
1693
1694 // Set the length field.
1695 __ popq(RDI);
1696#if DART_COMPRESSED_POINTERS
1697 // Clear out padding caused by alignment gap between length and data.
1698 __ movq(FieldAddress(RAX, target::String::length_offset()),
1699 compiler::Immediate(0));
1700#endif
1701 __ StoreCompressedIntoObjectNoBarrier(
1702 RAX, FieldAddress(RAX, target::String::length_offset()), RDI);
1704
1705 __ Bind(&pop_and_fail);
1706 __ popq(RDI);
1707 __ jmp(failure);
1708}
1709
1710// Arg0: target::OneByteString (receiver).
1711// Arg1: Start index as Smi.
1712// Arg2: End index as Smi.
1713// The indexes must be valid.
1714void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
1715 Label* normal_ir_body) {
1716 const intptr_t kStringOffset = 3 * target::kWordSize;
1717 const intptr_t kStartIndexOffset = 2 * target::kWordSize;
1718 const intptr_t kEndIndexOffset = 1 * target::kWordSize;
1719 Label ok;
1720 __ movq(RSI, Address(RSP, +kStartIndexOffset));
1721 __ movq(RDI, Address(RSP, +kEndIndexOffset));
1722 __ orq(RSI, RDI);
1723 __ testq(RSI, Immediate(kSmiTagMask));
1724 __ j(NOT_ZERO, normal_ir_body); // 'start', 'end' not Smi.
1725
1726 __ subq(RDI, Address(RSP, +kStartIndexOffset));
1727 TryAllocateString(assembler, kOneByteStringCid,
1728 target::OneByteString::kMaxNewSpaceElements, &ok,
1729 normal_ir_body, RDI);
1730 __ Bind(&ok);
1731 // RAX: new string as tagged pointer.
1732 // Copy string.
1733 __ movq(RSI, Address(RSP, +kStringOffset));
1734 __ movq(RBX, Address(RSP, +kStartIndexOffset));
1735 __ SmiUntag(RBX);
1736 __ leaq(RSI, FieldAddress(RSI, RBX, TIMES_1,
1737 target::OneByteString::data_offset()));
1738 // RSI: Start address to copy from (untagged).
1739 // RBX: Untagged start index.
1740 __ movq(RCX, Address(RSP, +kEndIndexOffset));
1741 __ SmiUntag(RCX);
1742 __ subq(RCX, RBX);
1743 __ xorq(RDX, RDX);
1744 // RSI: Start address to copy from (untagged).
1745 // RCX: Untagged number of bytes to copy.
1746 // RAX: Tagged result string
1747 // RDX: Loop counter.
1748 // RBX: Scratch register.
1749 Label loop, check;
1751 __ Bind(&loop);
1752 __ movzxb(RBX, Address(RSI, RDX, TIMES_1, 0));
1753 __ movb(FieldAddress(RAX, RDX, TIMES_1, target::OneByteString::data_offset()),
1755 __ incq(RDX);
1756 __ Bind(&check);
1757 __ cmpq(RDX, RCX);
1758 __ j(LESS, &loop, Assembler::kNearJump);
1759 __ ret();
1760 __ Bind(normal_ir_body);
1761}
1762
1763void AsmIntrinsifier::WriteIntoOneByteString(Assembler* assembler,
1764 Label* normal_ir_body) {
1765 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Value.
1766 __ movq(RBX, Address(RSP, +2 * target::kWordSize)); // Index.
1767 __ movq(RAX, Address(RSP, +3 * target::kWordSize)); // target::OneByteString.
1768 __ SmiUntag(RBX);
1769 __ SmiUntag(RCX);
1770 __ movb(FieldAddress(RAX, RBX, TIMES_1, target::OneByteString::data_offset()),
1772 __ ret();
1773}
1774
1775void AsmIntrinsifier::WriteIntoTwoByteString(Assembler* assembler,
1776 Label* normal_ir_body) {
1777 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Value.
1778 __ movq(RBX, Address(RSP, +2 * target::kWordSize)); // Index.
1779 __ movq(RAX, Address(RSP, +3 * target::kWordSize)); // target::TwoByteString.
1780 // Untag index and multiply by element size -> no-op.
1781 __ SmiUntag(RCX);
1782#if defined(DART_COMPRESSED_POINTERS)
1783 // The upper half of a compressed Smi contains undefined bits, but no x64
1784 // addressing mode will ignore these bits. We know the index is positive, so
1785 // we just clear the upper bits, which is shorter than movsxd.
1786 __ orl(RBX, RBX);
1787#endif
1788 __ movw(FieldAddress(RAX, RBX, TIMES_1, target::TwoByteString::data_offset()),
1789 RCX);
1790 __ ret();
1791}
1792
1793void AsmIntrinsifier::AllocateOneByteString(Assembler* assembler,
1794 Label* normal_ir_body) {
1795 __ movq(RDI, Address(RSP, +1 * target::kWordSize)); // Length.
1796#if defined(DART_COMPRESSED_POINTERS)
1797 __ movsxd(RDI, RDI);
1798#endif
1799 Label ok;
1800 TryAllocateString(assembler, kOneByteStringCid,
1801 target::OneByteString::kMaxNewSpaceElements, &ok,
1802 normal_ir_body, RDI);
1803 // RDI: Start address to copy from (untagged).
1804
1805 __ Bind(&ok);
1806 __ ret();
1807
1808 __ Bind(normal_ir_body);
1809}
1810
1811void AsmIntrinsifier::AllocateTwoByteString(Assembler* assembler,
1812 Label* normal_ir_body) {
1813 __ movq(RDI, Address(RSP, +1 * target::kWordSize)); // Length.
1814#if defined(DART_COMPRESSED_POINTERS)
1815 __ movsxd(RDI, RDI);
1816#endif
1817 Label ok;
1818 TryAllocateString(assembler, kTwoByteStringCid,
1819 target::TwoByteString::kMaxNewSpaceElements, &ok,
1820 normal_ir_body, RDI);
1821 // RDI: Start address to copy from (untagged).
1822
1823 __ Bind(&ok);
1824 __ ret();
1825
1826 __ Bind(normal_ir_body);
1827}
1828
1829void AsmIntrinsifier::OneByteString_equality(Assembler* assembler,
1830 Label* normal_ir_body) {
1831 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // This.
1832 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Other.
1833
1834 StringEquality(assembler, RAX, RCX, RDI, RBX, RAX, normal_ir_body,
1835 kOneByteStringCid);
1836}
1837
1838void AsmIntrinsifier::TwoByteString_equality(Assembler* assembler,
1839 Label* normal_ir_body) {
1840 __ movq(RAX, Address(RSP, +2 * target::kWordSize)); // This.
1841 __ movq(RCX, Address(RSP, +1 * target::kWordSize)); // Other.
1842
1843 StringEquality(assembler, RAX, RCX, RDI, RBX, RAX, normal_ir_body,
1844 kTwoByteStringCid);
1845}
1846
1847void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
1848 Label* normal_ir_body,
1849 bool sticky) {
1850 if (FLAG_interpret_irregexp) return;
1851
1852 const intptr_t kRegExpParamOffset = 3 * target::kWordSize;
1853 const intptr_t kStringParamOffset = 2 * target::kWordSize;
1854 // start_index smi is located at offset 1.
1855
1856 // Incoming registers:
1857 // RAX: Function. (Will be loaded with the specialized matcher function.)
1858 // RCX: Unknown. (Must be GC safe on tail call.)
1859 // R10: Arguments descriptor. (Will be preserved.)
1860
1861 // Load the specialized function pointer into RAX. Leverage the fact the
1862 // string CIDs as well as stored function pointers are in sequence.
1863 __ movq(RBX, Address(RSP, kRegExpParamOffset));
1864 __ movq(RDI, Address(RSP, kStringParamOffset));
1865 __ LoadClassId(RDI, RDI);
1866 __ SubImmediate(RDI, Immediate(kOneByteStringCid));
1867#if !defined(DART_COMPRESSED_POINTERS)
1868 __ movq(FUNCTION_REG, FieldAddress(RBX, RDI, TIMES_8,
1869 target::RegExp::function_offset(
1870 kOneByteStringCid, sticky)));
1871#else
1872 __ LoadCompressed(FUNCTION_REG, FieldAddress(RBX, RDI, TIMES_4,
1873 target::RegExp::function_offset(
1874 kOneByteStringCid, sticky)));
1875#endif
1876
1877 // Registers are now set up for the lazy compile stub. It expects the function
1878 // in RAX, the argument descriptor in R10, and IC-Data in RCX.
1879 __ xorq(RCX, RCX);
1880
1881 // Tail-call the function.
1882 __ LoadCompressed(
1883 CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
1884 __ movq(RDI,
1885 FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
1886 __ jmp(RDI);
1887}
1888
1889void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,
1890 Label* normal_ir_body) {
1891 __ LoadIsolate(RAX);
1892 __ movq(RAX, Address(RAX, target::Isolate::default_tag_offset()));
1893 __ ret();
1894}
1895
1896void AsmIntrinsifier::Profiler_getCurrentTag(Assembler* assembler,
1897 Label* normal_ir_body) {
1898 __ LoadIsolate(RAX);
1899 __ movq(RAX, Address(RAX, target::Isolate::current_tag_offset()));
1900 __ ret();
1901}
1902
1903void AsmIntrinsifier::Timeline_isDartStreamEnabled(Assembler* assembler,
1904 Label* normal_ir_body) {
1905#if !defined(SUPPORT_TIMELINE)
1906 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1907 __ ret();
1908#else
1909 Label true_label;
1910 // Load TimelineStream*.
1911 __ movq(RAX, Address(THR, target::Thread::dart_stream_offset()));
1912 // Load uintptr_t from TimelineStream*.
1913 __ movq(RAX, Address(RAX, target::TimelineStream::enabled_offset()));
1914 __ cmpq(RAX, Immediate(0));
1915 __ j(NOT_ZERO, &true_label, Assembler::kNearJump);
1916 // Not enabled.
1917 __ LoadObject(RAX, CastHandle<Object>(FalseObject()));
1918 __ ret();
1919 // Enabled.
1920 __ Bind(&true_label);
1921 __ LoadObject(RAX, CastHandle<Object>(TrueObject()));
1922 __ ret();
1923#endif
1924}
1925
1926void AsmIntrinsifier::Timeline_getNextTaskId(Assembler* assembler,
1927 Label* normal_ir_body) {
1928#if !defined(SUPPORT_TIMELINE)
1929 __ xorq(RAX, RAX); // Return Smi 0.
1930 __ ret();
1931#else
1932 __ movq(RAX, Address(THR, target::Thread::next_task_id_offset()));
1933 __ movq(RBX, RAX);
1934 __ incq(RBX);
1935 __ movq(Address(THR, target::Thread::next_task_id_offset()), RBX);
1936 __ SmiTag(RAX); // Ignore loss of precision.
1937 __ ret();
1938#endif
1939}
1940
1941#undef __
1942
1943} // namespace compiler
1944} // namespace dart
1945
1946#endif // defined(TARGET_ARCH_X64)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
static bool equal(const SkBitmap &a, const SkBitmap &b)
#define check(reporter, ref, unref, make, kill)
static bool ok(int result)
#define __
#define OBJ(op)
#define UNREACHABLE()
Definition assert.h:248
#define COMPILE_ASSERT(expr)
Definition assert.h:339
#define ASSERT(E)
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
word ToRawSmi(const dart::Object &a)
const Bool & TrueObject()
const Bool & FalseObject()
const Object & NullObject()
const Class & DoubleClass()
@ TIMES_COMPRESSED_WORD_SIZE
const Register THR
static bool CompareIntegers(Token::Kind kind, const Integer &left, const Integer &right)
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
Definition hash.h:12
int32_t classid_t
Definition globals.h:524
@ kNumPredefinedCids
Definition class_id.h:257
constexpr intptr_t kBitsPerByte
Definition globals.h:463
@ kHeapObjectTag
uintptr_t uword
Definition globals.h:501
const Register CODE_REG
@ GREATER_EQUAL
@ NOT_CARRY
@ BELOW_EQUAL
@ ABOVE_EQUAL
@ PARITY_EVEN
@ kNoRegister
const intptr_t cid
const Register FUNCTION_REG
const intptr_t kBytesPerBigIntDigit
Definition globals.h:54
ByteRegister ByteRegisterOf(Register reg)
static constexpr intptr_t kObjectAlignmentLog2
static constexpr intptr_t kObjectAlignment
#define NOT_IN_PRODUCT(code)
Definition globals.h:84