Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
il_riscv.cc
Go to the documentation of this file.
1// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_RISCV.
6#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
7
9
18#include "vm/dart_entry.h"
19#include "vm/instructions.h"
20#include "vm/object_store.h"
21#include "vm/parser.h"
22#include "vm/simulator.h"
23#include "vm/stack_frame.h"
24#include "vm/stub_code.h"
25#include "vm/symbols.h"
27
28#define __ (compiler->assembler())->
29#define Z (compiler->zone())
30
31namespace dart {
32
33// Generic summary for call instructions that have all arguments pushed
34// on the stack and return the result in a fixed register A0 (or FA0 if
35// the return type is double).
36LocationSummary* Instruction::MakeCallSummary(Zone* zone,
37 const Instruction* instr,
38 LocationSummary* locs) {
39 ASSERT(locs == nullptr || locs->always_calls());
40 LocationSummary* result =
41 ((locs == nullptr)
42 ? (new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall))
43 : locs);
44 const auto representation = instr->representation();
45 switch (representation) {
46 case kTagged:
47 case kUntagged:
48 case kUnboxedUint32:
49 case kUnboxedInt32:
50 result->set_out(
52 break;
53 case kPairOfTagged:
54 result->set_out(
59 break;
60 case kUnboxedInt64:
61#if XLEN == 32
62 result->set_out(
67#else
68 result->set_out(
70#endif
71 break;
72 case kUnboxedDouble:
73 result->set_out(
75 break;
76 default:
78 break;
79 }
80 return result;
81}
82
83LocationSummary* LoadIndexedUnsafeInstr::MakeLocationSummary(Zone* zone,
84 bool opt) const {
85 const intptr_t kNumInputs = 1;
86 const intptr_t kNumTemps = 0;
87 LocationSummary* locs = new (zone)
88 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
89
90 locs->set_in(0, Location::RequiresRegister());
91 switch (representation()) {
92 case kTagged:
93 locs->set_out(0, Location::RequiresRegister());
94 break;
95 case kUnboxedInt64:
96#if XLEN == 32
99#else
100 locs->set_out(0, Location::RequiresRegister());
101#endif
102 break;
103 case kUnboxedDouble:
104 locs->set_out(0, Location::RequiresFpuRegister());
105 break;
106 default:
107 UNREACHABLE();
108 break;
109 }
110 return locs;
111}
112
113void LoadIndexedUnsafeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
114 ASSERT(RequiredInputRepresentation(0) == kTagged); // It is a Smi.
115 ASSERT(kSmiTag == 0);
116 ASSERT(kSmiTagSize == 1);
117
118 const Register index = locs()->in(0).reg();
119
120 switch (representation()) {
121 case kTagged: {
122 const auto out = locs()->out(0).reg();
123 __ AddShifted(TMP, base_reg(), index, kWordSizeLog2 - kSmiTagSize);
124 __ LoadFromOffset(out, TMP, offset());
125 break;
126 }
127 case kUnboxedInt64: {
128#if XLEN == 32
129 const auto out_lo = locs()->out(0).AsPairLocation()->At(0).reg();
130 const auto out_hi = locs()->out(0).AsPairLocation()->At(1).reg();
131 __ AddShifted(TMP, base_reg(), index, kWordSizeLog2 - kSmiTagSize);
132 __ LoadFromOffset(out_lo, TMP, offset());
133 __ LoadFromOffset(out_hi, TMP, offset() + compiler::target::kWordSize);
134#else
135 const auto out = locs()->out(0).reg();
136 __ AddShifted(TMP, base_reg(), index, kWordSizeLog2 - kSmiTagSize);
137 __ LoadFromOffset(out, TMP, offset());
138#endif
139 break;
140 }
141 case kUnboxedDouble: {
142 const auto out = locs()->out(0).fpu_reg();
143 __ AddShifted(TMP, base_reg(), index, kWordSizeLog2 - kSmiTagSize);
144 __ LoadDFromOffset(out, TMP, offset());
145 break;
146 }
147 default:
148 UNREACHABLE();
149 break;
150 }
151}
152
153DEFINE_BACKEND(StoreIndexedUnsafe,
154 (NoLocation, Register index, Register value)) {
155 ASSERT(instr->RequiredInputRepresentation(
156 StoreIndexedUnsafeInstr::kIndexPos) == kTagged); // It is a Smi.
157 __ AddShifted(TMP, instr->base_reg(), index,
158 compiler::target::kWordSizeLog2 - kSmiTagSize);
159 __ sx(value, compiler::Address(TMP, instr->offset()));
160
161 ASSERT(kSmiTag == 0);
162}
163
164DEFINE_BACKEND(TailCall,
165 (NoLocation,
166 Fixed<Register, ARGS_DESC_REG>,
167 Temp<Register> temp)) {
168 compiler->EmitTailCallToStub(instr->code());
169
170 // Even though the TailCallInstr will be the last instruction in a basic
171 // block, the flow graph compiler will emit native code for other blocks after
172 // the one containing this instruction and needs to be able to use the pool.
173 // (The `LeaveDartFrame` above disables usages of the pool.)
174 __ set_constant_pool_allowed(true);
175}
176
177LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone,
178 bool opt) const {
179 // The compiler must optimize any function that includes a MemoryCopy
180 // instruction that uses typed data cids, since extracting the payload address
181 // from views is done in a compiler pass after all code motion has happened.
182 ASSERT((!IsTypedDataBaseClassId(src_cid_) &&
183 !IsTypedDataBaseClassId(dest_cid_)) ||
184 opt);
185 const intptr_t kNumInputs = 5;
186 const intptr_t kNumTemps = 2;
187 LocationSummary* locs = new (zone)
188 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
189 locs->set_in(kSrcPos, Location::RequiresRegister());
190 locs->set_in(kDestPos, Location::RequiresRegister());
193 locs->set_in(kLengthPos,
195 locs->set_temp(0, Location::RequiresRegister());
196 locs->set_temp(1, Location::RequiresRegister());
197 return locs;
198}
199
201 Register length_reg,
202 compiler::Label* done) {
203 __ BranchIfZero(length_reg, done);
204}
205
206static compiler::OperandSize OperandSizeFor(intptr_t bytes) {
208 switch (bytes) {
209 case 1:
210 return compiler::kByte;
211 case 2:
212 return compiler::kTwoBytes;
213 case 4:
215 case 8:
217 default:
218 UNREACHABLE();
220 }
221}
222
223// Copies [count] bytes from the memory region pointed to by [dest_reg] to the
224// memory region pointed to by [src_reg]. If [reversed] is true, then [dest_reg]
225// and [src_reg] are assumed to point at the end of the respective region.
226static void CopyBytes(FlowGraphCompiler* compiler,
227 Register dest_reg,
228 Register src_reg,
229 intptr_t count,
230 bool reversed) {
231 COMPILE_ASSERT(XLEN <= 128);
233
234#if XLEN >= 128
235 // Handled specially because there is no kSixteenBytes OperandSize.
236 if (count == 16) {
237 const intptr_t offset = (reversed ? -1 : 1) * count;
238 const intptr_t initial = reversed ? offset : 0;
239 __ lq(TMP, compiler::Address(src_reg, initial));
240 __ addi(src_reg, src_reg, offset);
241 __ sq(TMP, compiler::Address(dest_reg, initial));
242 __ addi(dest_reg, dest_reg, offset);
243 return;
244 }
245#endif
246
247#if XLEN <= 32
248 if (count == 4 * (XLEN / 8)) {
249 auto const sz = OperandSizeFor(XLEN / 8);
250 const intptr_t offset = (reversed ? -1 : 1) * (XLEN / 8);
251 const intptr_t initial = reversed ? offset : 0;
252 __ LoadFromOffset(TMP, src_reg, initial, sz);
253 __ LoadFromOffset(TMP2, src_reg, initial + offset, sz);
254 __ StoreToOffset(TMP, dest_reg, initial, sz);
255 __ StoreToOffset(TMP2, dest_reg, initial + offset, sz);
256 __ LoadFromOffset(TMP, src_reg, initial + 2 * offset, sz);
257 __ LoadFromOffset(TMP2, src_reg, initial + 3 * offset, sz);
258 __ addi(src_reg, src_reg, 4 * offset);
259 __ StoreToOffset(TMP, dest_reg, initial + 2 * offset, sz);
260 __ StoreToOffset(TMP2, dest_reg, initial + 3 * offset, sz);
261 __ addi(dest_reg, dest_reg, 4 * offset);
262 return;
263 }
264#endif
265
266#if XLEN <= 64
267 if (count == 2 * (XLEN / 8)) {
268 auto const sz = OperandSizeFor(XLEN / 8);
269 const intptr_t offset = (reversed ? -1 : 1) * (XLEN / 8);
270 const intptr_t initial = reversed ? offset : 0;
271 __ LoadFromOffset(TMP, src_reg, initial, sz);
272 __ LoadFromOffset(TMP2, src_reg, initial + offset, sz);
273 __ addi(src_reg, src_reg, 2 * offset);
274 __ StoreToOffset(TMP, dest_reg, initial, sz);
275 __ StoreToOffset(TMP2, dest_reg, initial + offset, sz);
276 __ addi(dest_reg, dest_reg, 2 * offset);
277 return;
278 }
279#endif
280
281 ASSERT(count <= (XLEN / 8));
282 auto const sz = OperandSizeFor(count);
283 const intptr_t offset = (reversed ? -1 : 1) * count;
284 const intptr_t initial = reversed ? offset : 0;
285 __ LoadFromOffset(TMP, src_reg, initial, sz);
286 __ addi(src_reg, src_reg, offset);
287 __ StoreToOffset(TMP, dest_reg, initial, sz);
288 __ addi(dest_reg, dest_reg, offset);
289}
290
291static void CopyUpToWordMultiple(FlowGraphCompiler* compiler,
292 Register dest_reg,
293 Register src_reg,
294 Register length_reg,
295 intptr_t element_size,
296 bool unboxed_inputs,
297 bool reversed,
298 compiler::Label* done) {
300 if (element_size >= compiler::target::kWordSize) return;
301
302 const intptr_t element_shift = Utils::ShiftForPowerOfTwo(element_size);
303 const intptr_t base_shift =
304 (unboxed_inputs ? 0 : kSmiTagShift) - element_shift;
305 intptr_t tested_bits = 0;
306
307 __ Comment("Copying until region is a multiple of word size");
308
309 COMPILE_ASSERT(XLEN <= 128);
310
311 for (intptr_t bit = compiler::target::kWordSizeLog2 - 1; bit >= element_shift;
312 bit--) {
313 const intptr_t bytes = 1 << bit;
314 const intptr_t tested_bit = bit + base_shift;
315 tested_bits |= 1 << tested_bit;
316 compiler::Label skip_copy;
317 __ andi(TMP, length_reg, 1 << tested_bit);
318 __ beqz(TMP, &skip_copy);
319 CopyBytes(compiler, dest_reg, src_reg, bytes, reversed);
320 __ Bind(&skip_copy);
321 }
322
323 ASSERT(tested_bits != 0);
324 __ andi(length_reg, length_reg, ~tested_bits);
325 __ beqz(length_reg, done);
326}
327
328void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
329 Register dest_reg,
330 Register src_reg,
331 Register length_reg,
332 compiler::Label* done,
333 compiler::Label* copy_forwards) {
334 const bool reversed = copy_forwards != nullptr;
335 if (reversed) {
336 // Verify that the overlap actually exists by checking to see if the start
337 // of the destination region is after the end of the source region.
338 const intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) -
340 if (shift == 0) {
341 __ add(TMP, src_reg, length_reg);
342 } else if (shift < 0) {
343 __ srai(TMP, length_reg, -shift);
344 __ add(TMP, src_reg, TMP);
345 } else {
346 __ slli(TMP, length_reg, shift);
347 __ add(TMP, src_reg, TMP);
348 }
349 __ CompareRegisters(dest_reg, TMP);
350 __ BranchIf(UNSIGNED_GREATER_EQUAL, copy_forwards);
351 // Adjust dest_reg and src_reg to point at the end (i.e. one past the
352 // last element) of their respective region.
353 __ add(dest_reg, dest_reg, TMP);
354 __ sub(dest_reg, dest_reg, src_reg);
355 __ MoveRegister(src_reg, TMP);
356 }
357 CopyUpToWordMultiple(compiler, dest_reg, src_reg, length_reg, element_size_,
358 unboxed_inputs_, reversed, done);
359 // The size of the uncopied region is a multiple of the word size, so now we
360 // copy the rest by word.
361 const intptr_t loop_subtract =
362 Utils::Maximum<intptr_t>(1, (XLEN / 8) / element_size_)
363 << (unboxed_inputs_ ? 0 : kSmiTagShift);
364 __ Comment("Copying by multiples of word size");
365 compiler::Label loop;
366 __ Bind(&loop);
367 switch (element_size_) {
368 case 1:
369 case 2:
370 case 4:
371#if XLEN <= 32
372 CopyBytes(compiler, dest_reg, src_reg, 4, reversed);
373 break;
374#endif
375 case 8:
376#if XLEN <= 64
377 CopyBytes(compiler, dest_reg, src_reg, 8, reversed);
378 break;
379#endif
380 case 16:
381 COMPILE_ASSERT(XLEN <= 128);
382 CopyBytes(compiler, dest_reg, src_reg, 16, reversed);
383 break;
384 default:
385 UNREACHABLE();
386 break;
387 }
388 __ subi(length_reg, length_reg, loop_subtract);
389 __ bnez(length_reg, &loop);
390}
391
392void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
393 classid_t array_cid,
394 Register array_reg,
395 Register payload_reg,
396 Representation array_rep,
397 Location start_loc) {
398 intptr_t offset = 0;
399 if (array_rep != kTagged) {
400 // Do nothing, array_reg already contains the payload address.
401 } else if (IsTypedDataBaseClassId(array_cid)) {
402 // The incoming array must have been proven to be an internal typed data
403 // object, where the payload is in the object and we can just offset.
404 ASSERT_EQUAL(array_rep, kTagged);
405 offset = compiler::target::TypedData::payload_offset() - kHeapObjectTag;
406 } else {
407 ASSERT_EQUAL(array_rep, kTagged);
408 ASSERT(!IsExternalPayloadClassId(array_cid));
409 switch (array_cid) {
410 case kOneByteStringCid:
411 offset =
412 compiler::target::OneByteString::data_offset() - kHeapObjectTag;
413 break;
414 case kTwoByteStringCid:
415 offset =
416 compiler::target::TwoByteString::data_offset() - kHeapObjectTag;
417 break;
418 default:
419 UNREACHABLE();
420 break;
421 }
422 }
423 ASSERT(start_loc.IsRegister() || start_loc.IsConstant());
424 if (start_loc.IsConstant()) {
425 const auto& constant = start_loc.constant();
426 ASSERT(constant.IsInteger());
427 const int64_t start_value = Integer::Cast(constant).AsInt64Value();
428 const intx_t add_value = Utils::AddWithWrapAround<intx_t>(
429 Utils::MulWithWrapAround<intx_t>(start_value, element_size_), offset);
430 __ AddImmediate(payload_reg, array_reg, add_value);
431 return;
432 }
433 const Register start_reg = start_loc.reg();
434 intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) -
436 __ AddShifted(payload_reg, array_reg, start_reg, shift);
437 __ AddImmediate(payload_reg, offset);
438}
439
440LocationSummary* CalculateElementAddressInstr::MakeLocationSummary(
441 Zone* zone,
442 bool opt) const {
443 const intptr_t kNumInputs = 3;
444 const intptr_t kNumTemps = 0;
445 auto* const summary = new (zone)
446 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
447
448 summary->set_in(kBasePos, Location::RequiresRegister());
449 summary->set_in(kIndexPos, Location::RequiresRegister());
450 // Only use a Smi constant for the index if multiplying it by the index
451 // scale would be an intx constant.
452 const intptr_t scale_shift = Utils::ShiftForPowerOfTwo(index_scale());
453 summary->set_in(
454 kIndexPos, LocationRegisterOrSmiConstant(index(), kMinIntX >> scale_shift,
455 kMaxIntX >> scale_shift));
456#if XLEN == 32
458#else
459 summary->set_in(kOffsetPos, LocationRegisterOrConstant(offset()));
460#endif
461 summary->set_out(0, Location::RequiresRegister());
462
463 return summary;
464}
465
466void CalculateElementAddressInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
467 const Register base_reg = locs()->in(kBasePos).reg();
468 const Location& index_loc = locs()->in(kIndexPos);
469 const Location& offset_loc = locs()->in(kOffsetPos);
470 const Register result_reg = locs()->out(0).reg();
471
472 if (index_loc.IsConstant()) {
473 if (offset_loc.IsConstant()) {
474 ASSERT_EQUAL(Smi::Cast(index_loc.constant()).Value(), 0);
475 ASSERT(Integer::Cast(offset_loc.constant()).AsInt64Value() != 0);
476 // No index involved at all.
477 const intx_t offset_value =
478 Integer::Cast(offset_loc.constant()).AsInt64Value();
479 __ AddImmediate(result_reg, base_reg, offset_value);
480 } else {
481 __ add(result_reg, base_reg, offset_loc.reg());
482 // Don't need wrap-around as the index is constant only if multiplying
483 // it by the scale is an intx.
484 const intx_t scaled_index =
485 Smi::Cast(index_loc.constant()).Value() * index_scale();
486 __ AddImmediate(result_reg, scaled_index);
487 }
488 } else {
489 __ AddShifted(result_reg, base_reg, index_loc.reg(),
491 if (offset_loc.IsConstant()) {
492 const intx_t offset_value =
493 Integer::Cast(offset_loc.constant()).AsInt64Value();
494 __ AddImmediate(result_reg, offset_value);
495 } else {
496 __ AddRegisters(result_reg, offset_loc.reg());
497 }
498 }
499}
500
501LocationSummary* MoveArgumentInstr::MakeLocationSummary(Zone* zone,
502 bool opt) const {
503 const intptr_t kNumInputs = 1;
504 const intptr_t kNumTemps = 0;
505 LocationSummary* locs = new (zone)
506 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
507 ConstantInstr* constant = value()->definition()->AsConstant();
508 if (constant != nullptr && constant->HasZeroRepresentation()) {
509 locs->set_in(0, Location::Constant(constant));
510 } else if (representation() == kUnboxedDouble) {
511 locs->set_in(0, Location::RequiresFpuRegister());
512 } else if (representation() == kUnboxedInt64) {
513#if XLEN == 32
516#else
517 locs->set_in(0, Location::RequiresRegister());
518#endif
519 } else {
520 ASSERT(representation() == kTagged);
521 locs->set_in(0, LocationAnyOrConstant(value()));
522 }
523 return locs;
524}
525
526void MoveArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
527 ASSERT(compiler->is_optimizing());
528
529 const Location value = compiler->RebaseIfImprovesAddressing(locs()->in(0));
530 if (value.IsRegister()) {
531 __ StoreToOffset(value.reg(), SP,
532 location().stack_index() * compiler::target::kWordSize);
533#if XLEN == 32
534 } else if (value.IsPairLocation()) {
535 __ StoreToOffset(value.AsPairLocation()->At(1).reg(), SP,
536 location().AsPairLocation()->At(1).stack_index() *
537 compiler::target::kWordSize);
538 __ StoreToOffset(value.AsPairLocation()->At(0).reg(), SP,
539 location().AsPairLocation()->At(0).stack_index() *
540 compiler::target::kWordSize);
541#endif
542 } else if (value.IsConstant()) {
543 if (representation() == kUnboxedDouble) {
544 ASSERT(value.constant_instruction()->HasZeroRepresentation());
545 intptr_t offset = location().stack_index() * compiler::target::kWordSize;
546#if XLEN == 32
547 __ StoreToOffset(ZR, SP, offset + compiler::target::kWordSize);
548 __ StoreToOffset(ZR, SP, offset);
549#else
550 __ StoreToOffset(ZR, SP, offset);
551#endif
552 } else if (representation() == kUnboxedInt64) {
553 ASSERT(value.constant_instruction()->HasZeroRepresentation());
554#if XLEN == 32
555 __ StoreToOffset(ZR, SP,
556 location().AsPairLocation()->At(1).stack_index() *
557 compiler::target::kWordSize);
558 __ StoreToOffset(ZR, SP,
559 location().AsPairLocation()->At(0).stack_index() *
560 compiler::target::kWordSize);
561#else
562 __ StoreToOffset(ZR, SP,
563 location().stack_index() * compiler::target::kWordSize);
564#endif
565 } else {
566 ASSERT(representation() == kTagged);
567 const Object& constant = value.constant();
568 Register reg;
569 if (constant.IsNull()) {
570 reg = NULL_REG;
571 } else if (constant.IsSmi() && Smi::Cast(constant).Value() == 0) {
572 reg = ZR;
573 } else {
574 reg = TMP;
575 __ LoadObject(TMP, constant);
576 }
577 __ StoreToOffset(reg, SP,
578 location().stack_index() * compiler::target::kWordSize);
579 }
580 } else if (value.IsFpuRegister()) {
581 __ StoreDToOffset(value.fpu_reg(), SP,
582 location().stack_index() * compiler::target::kWordSize);
583 } else if (value.IsStackSlot()) {
584 const intptr_t value_offset = value.ToStackSlotOffset();
585 __ LoadFromOffset(TMP, value.base_reg(), value_offset);
586 __ StoreToOffset(TMP, SP,
587 location().stack_index() * compiler::target::kWordSize);
588 } else {
589 UNREACHABLE();
590 }
591}
592
593LocationSummary* DartReturnInstr::MakeLocationSummary(Zone* zone,
594 bool opt) const {
595 const intptr_t kNumInputs = 1;
596 const intptr_t kNumTemps = 0;
597 LocationSummary* locs = new (zone)
598 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
599 switch (representation()) {
600 case kTagged:
601 locs->set_in(0,
603 break;
604 case kPairOfTagged:
605 locs->set_in(
610 break;
611 case kUnboxedInt64:
612#if XLEN == 32
613 locs->set_in(
618#else
619 locs->set_in(0,
621#endif
622 break;
623 case kUnboxedDouble:
624 locs->set_in(
626 break;
627 default:
628 UNREACHABLE();
629 break;
630 }
631 return locs;
632}
633
634// Attempt optimized compilation at return instruction instead of at the entry.
635// The entry needs to be patchable, no inlined objects are allowed in the area
636// that will be overwritten by the patch instructions: a branch macro sequence.
637void DartReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
638 if (locs()->in(0).IsRegister()) {
639 const Register result = locs()->in(0).reg();
641 } else if (locs()->in(0).IsPairLocation()) {
642 const Register result_lo = locs()->in(0).AsPairLocation()->At(0).reg();
643 const Register result_hi = locs()->in(0).AsPairLocation()->At(1).reg();
646 } else {
647 ASSERT(locs()->in(0).IsFpuRegister());
648 const FpuRegister result = locs()->in(0).fpu_reg();
650 }
651
652 if (compiler->parsed_function().function().IsAsyncFunction() ||
653 compiler->parsed_function().function().IsAsyncGenerator()) {
654 ASSERT(compiler->flow_graph().graph_entry()->NeedsFrame());
655 const Code& stub = GetReturnStub(compiler);
656 compiler->EmitJumpToStub(stub);
657 return;
658 }
659
660 if (!compiler->flow_graph().graph_entry()->NeedsFrame()) {
661 __ ret();
662 return;
663 }
664
665 const intptr_t fp_sp_dist =
666 (compiler::target::frame_layout.first_local_from_fp + 1 -
667 compiler->StackSize()) *
668 kWordSize;
669 __ CheckFpSpDist(fp_sp_dist);
670 ASSERT(__ constant_pool_allowed());
671 __ LeaveDartFrame(fp_sp_dist); // Disallows constant pool use.
672 __ ret();
673 // This DartReturnInstr may be emitted out of order by the optimizer. The next
674 // block may be a target expecting a properly set constant pool pointer.
675 __ set_constant_pool_allowed(true);
676}
677
678// Detect pattern when one value is zero and another is a power of 2.
679static bool IsPowerOfTwoKind(intptr_t v1, intptr_t v2) {
680 return (Utils::IsPowerOfTwo(v1) && (v2 == 0)) ||
681 (Utils::IsPowerOfTwo(v2) && (v1 == 0));
682}
683
684LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone,
685 bool opt) const {
687 return comparison()->locs();
688}
689
690void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
691 const Register result = locs()->out(0).reg();
692
693 Location left = locs()->in(0);
694 Location right = locs()->in(1);
695 ASSERT(!left.IsConstant() || !right.IsConstant());
696
697 // Emit comparison code. This must not overwrite the result register.
698 // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using
699 // the labels or returning an invalid condition.
700 BranchLabels labels = {nullptr, nullptr, nullptr};
701 Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
702 ASSERT(true_condition != kInvalidCondition);
703
704 const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_);
705
706 intptr_t true_value = if_true_;
707 intptr_t false_value = if_false_;
708
709 if (is_power_of_two_kind) {
710 if (true_value == 0) {
711 // We need to have zero in result on true_condition.
712 true_condition = InvertCondition(true_condition);
713 }
714 } else {
715 if (true_value == 0) {
716 // Swap values so that false_value is zero.
717 intptr_t temp = true_value;
718 true_value = false_value;
719 false_value = temp;
720 } else {
721 true_condition = InvertCondition(true_condition);
722 }
723 }
724
725 __ SetIf(true_condition, result);
726
727 if (is_power_of_two_kind) {
728 const intptr_t shift =
729 Utils::ShiftForPowerOfTwo(Utils::Maximum(true_value, false_value));
730 __ slli(result, result, shift + kSmiTagSize);
731 } else {
732 __ subi(result, result, 1);
733 const int64_t val = Smi::RawValue(true_value) - Smi::RawValue(false_value);
734 __ AndImmediate(result, result, val);
735 if (false_value != 0) {
736 __ AddImmediate(result, Smi::RawValue(false_value));
737 }
738 }
739}
740
741LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone,
742 bool opt) const {
743 const intptr_t kNumInputs = 1;
744 const intptr_t kNumTemps = 0;
745 LocationSummary* summary = new (zone)
746 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
747 summary->set_in(
748 0, Location::RegisterLocation(FLAG_precompiled_mode ? T0 : FUNCTION_REG));
749 return MakeCallSummary(zone, this, summary);
750}
751
752void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
753 // Load arguments descriptor in ARGS_DESC_REG.
754 const intptr_t argument_count = ArgumentCount(); // Includes type args.
755 const Array& arguments_descriptor =
757 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
758
759 if (FLAG_precompiled_mode) {
760 ASSERT(locs()->in(0).reg() == T0);
761 // T0: Closure with a cached entry point.
762 __ LoadFieldFromOffset(A1, T0,
763 compiler::target::Closure::entry_point_offset());
764 } else {
765 ASSERT(locs()->in(0).reg() == FUNCTION_REG);
766 // FUNCTION_REG: Function.
767 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
768 compiler::target::Function::code_offset());
769 // Closure functions only have one entry point.
770 __ LoadFieldFromOffset(A1, FUNCTION_REG,
771 compiler::target::Function::entry_point_offset());
772 }
773
774 // FUNCTION_REG: Function (argument to lazy compile stub)
775 // ARGS_DESC_REG: Arguments descriptor array.
776 // A1: instructions entry point.
777 if (!FLAG_precompiled_mode) {
778 // S5: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
779 __ LoadImmediate(IC_DATA_REG, 0);
780 }
781 __ jalr(A1);
782 compiler->EmitCallsiteMetadata(source(), deopt_id(),
783 UntaggedPcDescriptors::kOther, locs(), env());
784 compiler->EmitDropArguments(argument_count);
785}
786
787LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone,
788 bool opt) const {
791}
792
793void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
794 const Register result = locs()->out(0).reg();
795 __ LoadFromOffset(result, FP,
796 compiler::target::FrameOffsetInBytesForVariable(&local()));
797 // TODO(riscv): Using an SP-relative address instead of an FP-relative
798 // address would allow for compressed instructions.
799}
800
801LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone,
802 bool opt) const {
805}
806
807void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
808 const Register value = locs()->in(0).reg();
809 const Register result = locs()->out(0).reg();
810 ASSERT(result == value); // Assert that register assignment is correct.
811 __ StoreToOffset(value, FP,
812 compiler::target::FrameOffsetInBytesForVariable(&local()));
813 // TODO(riscv): Using an SP-relative address instead of an FP-relative
814 // address would allow for compressed instructions.
815}
816
817LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
818 bool opt) const {
821}
822
823void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
824 // The register allocator drops constant definitions that have no uses.
825 if (!locs()->out(0).IsInvalid()) {
826 const Register result = locs()->out(0).reg();
827 __ LoadObject(result, value());
828 }
829}
830
831void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
832 const Location& destination,
833 Register tmp,
834 intptr_t pair_index) {
835 if (destination.IsRegister()) {
836 if (RepresentationUtils::IsUnboxedInteger(representation())) {
837 int64_t v;
838 const bool ok = compiler::HasIntegerValue(value_, &v);
840 if (value_.IsSmi() &&
841 RepresentationUtils::IsUnsignedInteger(representation())) {
842 // If the value is negative, then the sign bit was preserved during
843 // Smi untagging, which means the resulting value may be unexpected.
844 ASSERT(v >= 0);
845 }
846#if XLEN == 32
847 __ LoadImmediate(destination.reg(), pair_index == 0
849 : Utils::High32Bits(v));
850#else
851 ASSERT(pair_index == 0); // No pair representation needed on 64-bit.
852 __ LoadImmediate(destination.reg(), v);
853#endif
854 } else {
855 ASSERT(representation() == kTagged);
856 __ LoadObject(destination.reg(), value_);
857 }
858 } else if (destination.IsFpuRegister()) {
859 const FRegister dst = destination.fpu_reg();
860 if (representation() == kUnboxedFloat) {
861 __ LoadSImmediate(dst, Double::Cast(value_).value());
862 } else {
863 ASSERT(representation() == kUnboxedDouble);
864 __ LoadDImmediate(dst, Double::Cast(value_).value());
865 }
866 } else if (destination.IsDoubleStackSlot()) {
867 const intptr_t dest_offset = destination.ToStackSlotOffset();
868#if XLEN == 32
869 if (false) {
870#else
871 if (Utils::DoublesBitEqual(Double::Cast(value_).value(), 0.0)) {
872#endif
873 __ StoreToOffset(ZR, destination.base_reg(), dest_offset);
874 } else {
875 __ LoadDImmediate(FTMP, Double::Cast(value_).value());
876 __ StoreDToOffset(FTMP, destination.base_reg(), dest_offset);
877 }
878 } else {
879 ASSERT(destination.IsStackSlot());
880 ASSERT(tmp != kNoRegister);
881 const intptr_t dest_offset = destination.ToStackSlotOffset();
883 if (RepresentationUtils::IsUnboxedInteger(representation())) {
884 int64_t val = Integer::Cast(value_).AsInt64Value();
885#if XLEN == 32
886 val = pair_index == 0 ? Utils::Low32Bits(val) : Utils::High32Bits(val);
887#else
888 ASSERT(pair_index == 0); // No pair representation needed on 64-bit.
889#endif
890 if (val == 0) {
891 tmp = ZR;
892 } else {
893 __ LoadImmediate(tmp, val);
894 }
895 } else if (representation() == kUnboxedFloat) {
896 int32_t float_bits =
897 bit_cast<int32_t, float>(Double::Cast(value_).value());
898 __ LoadImmediate(tmp, float_bits);
899 operand_size = compiler::kFourBytes;
900 } else {
901 ASSERT(representation() == kTagged);
902 if (value_.IsNull()) {
903 tmp = NULL_REG;
904 } else if (value_.IsSmi() && Smi::Cast(value_).Value() == 0) {
905 tmp = ZR;
906 } else {
907 __ LoadObject(tmp, value_);
908 }
909 }
910 __ StoreToOffset(tmp, destination.base_reg(), dest_offset, operand_size);
911 }
912}
913
914LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone,
915 bool opt) const {
916 const bool is_unboxed_int =
918 ASSERT(!is_unboxed_int || RepresentationUtils::ValueSize(representation()) <=
919 compiler::target::kWordSize);
920 const intptr_t kNumInputs = 0;
921 const intptr_t kNumTemps = is_unboxed_int ? 0 : 1;
922 LocationSummary* locs = new (zone)
923 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
924 if (is_unboxed_int) {
925 locs->set_out(0, Location::RequiresRegister());
926 } else {
927 switch (representation()) {
928 case kUnboxedDouble:
929 locs->set_out(0, Location::RequiresFpuRegister());
930 locs->set_temp(0, Location::RequiresRegister());
931 break;
932 default:
933 UNREACHABLE();
934 break;
935 }
936 }
937 return locs;
938}
939
940void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
941 if (!locs()->out(0).IsInvalid()) {
942 const Register scratch =
945 : locs()->temp(0).reg();
946 EmitMoveToLocation(compiler, locs()->out(0), scratch);
947 }
948}
949
950LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone,
951 bool opt) const {
952 auto const dst_type_loc =
954
955 // We want to prevent spilling of the inputs (e.g. function/instantiator tav),
956 // since TTS preserves them. So we make this a `kNoCall` summary,
957 // even though most other registers can be modified by the stub. To tell the
958 // register allocator about it, we reserve all the other registers as
959 // temporary registers.
960 // TODO(http://dartbug.com/32788): Simplify this.
961
962 const intptr_t kNonChangeableInputRegs =
964 ((dst_type_loc.IsRegister() ? 1 : 0) << TypeTestABI::kDstTypeReg) |
965 (1 << TypeTestABI::kInstantiatorTypeArgumentsReg) |
966 (1 << TypeTestABI::kFunctionTypeArgumentsReg);
967
968 const intptr_t kNumInputs = 4;
969
970 // We invoke a stub that can potentially clobber any CPU register
971 // but can only clobber FPU registers on the slow path when
972 // entering runtime. ARM64 ABI only guarantees that lower
973 // 64-bits of an V registers are preserved so we block all
974 // of them except for FpuTMP.
975 const intptr_t kCpuRegistersToPreserve =
976 kDartAvailableCpuRegs & ~kNonChangeableInputRegs;
977 const intptr_t kFpuRegistersToPreserve =
978 Utils::NBitMask<intptr_t>(kNumberOfFpuRegisters) & ~(1l << FpuTMP);
979
980 const intptr_t kNumTemps = (Utils::CountOneBits32(kCpuRegistersToPreserve) +
981 Utils::CountOneBits32(kFpuRegistersToPreserve));
982
983 LocationSummary* summary = new (zone) LocationSummary(
984 zone, kNumInputs, kNumTemps, LocationSummary::kCallCalleeSafe);
985 summary->set_in(kInstancePos,
987 summary->set_in(kDstTypePos, dst_type_loc);
988 summary->set_in(
989 kInstantiatorTAVPos,
991 summary->set_in(kFunctionTAVPos, Location::RegisterLocation(
993 summary->set_out(0, Location::SameAsFirstInput());
994
995 // Let's reserve all registers except for the input ones.
996 intptr_t next_temp = 0;
997 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
998 const bool should_preserve = ((1 << i) & kCpuRegistersToPreserve) != 0;
999 if (should_preserve) {
1000 summary->set_temp(next_temp++,
1001 Location::RegisterLocation(static_cast<Register>(i)));
1002 }
1003 }
1004
1005 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
1006 const bool should_preserve = ((1l << i) & kFpuRegistersToPreserve) != 0;
1007 if (should_preserve) {
1008 summary->set_temp(next_temp++, Location::FpuRegisterLocation(
1009 static_cast<FpuRegister>(i)));
1010 }
1011 }
1012
1013 return summary;
1014}
1015
1016void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1017 ASSERT(locs()->always_calls());
1018
1019 auto object_store = compiler->isolate_group()->object_store();
1020 const auto& assert_boolean_stub =
1021 Code::ZoneHandle(compiler->zone(), object_store->assert_boolean_stub());
1022
1023 compiler::Label done;
1026 compiler->GenerateStubCall(source(), assert_boolean_stub,
1027 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
1028 deopt_id(), env());
1029 __ Bind(&done);
1030}
1031
1032static Condition TokenKindToIntCondition(Token::Kind kind) {
1033 switch (kind) {
1034 case Token::kEQ:
1035 return EQ;
1036 case Token::kNE:
1037 return NE;
1038 case Token::kLT:
1039 return LT;
1040 case Token::kGT:
1041 return GT;
1042 case Token::kLTE:
1043 return LE;
1044 case Token::kGTE:
1045 return GE;
1046 default:
1047 UNREACHABLE();
1048 return VS;
1049 }
1050}
1051
1052static Condition FlipCondition(Condition condition) {
1053 switch (condition) {
1054 case EQ:
1055 return EQ;
1056 case NE:
1057 return NE;
1058 case LT:
1059 return GT;
1060 case LE:
1061 return GE;
1062 case GT:
1063 return LT;
1064 case GE:
1065 return LE;
1066 case CC:
1067 return HI;
1068 case LS:
1069 return CS;
1070 case HI:
1071 return CC;
1072 case CS:
1073 return LS;
1074 default:
1075 UNREACHABLE();
1076 return EQ;
1077 }
1078}
1079
1080static void EmitBranchOnCondition(FlowGraphCompiler* compiler,
1081 Condition true_condition,
1082 BranchLabels labels) {
1083 if (labels.fall_through == labels.false_label) {
1084 // If the next block is the false successor we will fall through to it.
1085 __ BranchIf(true_condition, labels.true_label);
1086 } else {
1087 // If the next block is not the false successor we will branch to it.
1088 Condition false_condition = InvertCondition(true_condition);
1089 __ BranchIf(false_condition, labels.false_label);
1090
1091 // Fall through or jump to the true successor.
1092 if (labels.fall_through != labels.true_label) {
1093 __ j(labels.true_label);
1094 }
1095 }
1096}
1097
1098static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler,
1099 LocationSummary* locs,
1100 Token::Kind kind,
1101 BranchLabels labels) {
1102 Location left = locs->in(0);
1103 Location right = locs->in(1);
1104 ASSERT(!left.IsConstant() || !right.IsConstant());
1105
1106 Condition true_condition = TokenKindToIntCondition(kind);
1107 if (left.IsConstant() || right.IsConstant()) {
1108 // Ensure constant is on the right.
1109 if (left.IsConstant()) {
1110 Location tmp = right;
1111 right = left;
1112 left = tmp;
1113 true_condition = FlipCondition(true_condition);
1114 }
1115 __ CompareObject(left.reg(), right.constant());
1116 } else {
1117 __ CompareObjectRegisters(left.reg(), right.reg());
1118 }
1119 return true_condition;
1120}
1121
1122static Condition EmitWordComparisonOp(FlowGraphCompiler* compiler,
1123 LocationSummary* locs,
1124 Token::Kind kind,
1125 BranchLabels labels) {
1126 Location left = locs->in(0);
1127 Location right = locs->in(1);
1128 ASSERT(!left.IsConstant() || !right.IsConstant());
1129
1130 Condition true_condition = TokenKindToIntCondition(kind);
1131 if (left.IsConstant() || right.IsConstant()) {
1132 // Ensure constant is on the right.
1133 if (left.IsConstant()) {
1134 Location tmp = right;
1135 right = left;
1136 left = tmp;
1137 true_condition = FlipCondition(true_condition);
1138 }
1139 __ CompareImmediate(
1140 left.reg(),
1141 static_cast<uword>(Integer::Cast(right.constant()).AsInt64Value()));
1142 } else {
1143 __ CompareRegisters(left.reg(), right.reg());
1144 }
1145 return true_condition;
1146}
1147
1148#if XLEN == 32
1149static Condition EmitUnboxedMintEqualityOp(FlowGraphCompiler* compiler,
1150 LocationSummary* locs,
1151 Token::Kind kind) {
1153 PairLocation* left_pair = locs->in(0).AsPairLocation();
1154 Register left_lo = left_pair->At(0).reg();
1155 Register left_hi = left_pair->At(1).reg();
1156 PairLocation* right_pair = locs->in(1).AsPairLocation();
1157 Register right_lo = right_pair->At(0).reg();
1158 Register right_hi = right_pair->At(1).reg();
1159
1160 __ xor_(TMP, left_lo, right_lo);
1161 __ xor_(TMP2, left_hi, right_hi);
1162 __ or_(TMP, TMP, TMP2);
1163 __ CompareImmediate(TMP, 0);
1164 if (kind == Token::kEQ) {
1165 return EQUAL;
1166 } else if (kind == Token::kNE) {
1167 return NOT_EQUAL;
1168 }
1169 UNREACHABLE();
1170}
1171
1172static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler,
1173 LocationSummary* locs,
1174 Token::Kind kind,
1175 BranchLabels labels) {
1176 PairLocation* left_pair = locs->in(0).AsPairLocation();
1177 Register left_lo = left_pair->At(0).reg();
1178 Register left_hi = left_pair->At(1).reg();
1179 PairLocation* right_pair = locs->in(1).AsPairLocation();
1180 Register right_lo = right_pair->At(0).reg();
1181 Register right_hi = right_pair->At(1).reg();
1182
1183 switch (kind) {
1184 case Token::kEQ:
1185 __ bne(left_lo, right_lo, labels.false_label);
1186 __ CompareRegisters(left_hi, right_hi);
1187 return EQUAL;
1188 case Token::kNE:
1189 __ bne(left_lo, right_lo, labels.true_label);
1190 __ CompareRegisters(left_hi, right_hi);
1191 return NOT_EQUAL;
1192 case Token::kLT:
1193 __ blt(left_hi, right_hi, labels.true_label);
1194 __ bgt(left_hi, right_hi, labels.false_label);
1195 __ CompareRegisters(left_lo, right_lo);
1196 return UNSIGNED_LESS;
1197 case Token::kGT:
1198 __ bgt(left_hi, right_hi, labels.true_label);
1199 __ blt(left_hi, right_hi, labels.false_label);
1200 __ CompareRegisters(left_lo, right_lo);
1201 return UNSIGNED_GREATER;
1202 case Token::kLTE:
1203 __ blt(left_hi, right_hi, labels.true_label);
1204 __ bgt(left_hi, right_hi, labels.false_label);
1205 __ CompareRegisters(left_lo, right_lo);
1206 return UNSIGNED_LESS_EQUAL;
1207 case Token::kGTE:
1208 __ bgt(left_hi, right_hi, labels.true_label);
1209 __ blt(left_hi, right_hi, labels.false_label);
1210 __ CompareRegisters(left_lo, right_lo);
1212 default:
1213 UNREACHABLE();
1214 }
1215}
1216#else
1217// Similar to ComparisonInstr::EmitComparisonCode, may either:
1218// - emit comparison code and return a valid condition in which case the
1219// caller is expected to emit a branch to the true label based on that
1220// condition (or a branch to the false label on the opposite condition).
1221// - emit comparison code with a branch directly to the labels and return
1222// kInvalidCondition.
1223static Condition EmitInt64ComparisonOp(FlowGraphCompiler* compiler,
1224 LocationSummary* locs,
1225 Token::Kind kind,
1226 BranchLabels labels) {
1227 Location left = locs->in(0);
1228 Location right = locs->in(1);
1229 ASSERT(!left.IsConstant() || !right.IsConstant());
1230
1231 Condition true_condition = TokenKindToIntCondition(kind);
1232 if (left.IsConstant() || right.IsConstant()) {
1233 // Ensure constant is on the right.
1234 ConstantInstr* constant = nullptr;
1235 if (left.IsConstant()) {
1236 constant = left.constant_instruction();
1237 Location tmp = right;
1238 right = left;
1239 left = tmp;
1240 true_condition = FlipCondition(true_condition);
1241 } else {
1242 constant = right.constant_instruction();
1243 }
1244
1245 if (RepresentationUtils::IsUnboxedInteger(constant->representation())) {
1246 int64_t value;
1247 const bool ok = compiler::HasIntegerValue(constant->value(), &value);
1249 __ CompareImmediate(left.reg(), value);
1250 } else {
1251 UNREACHABLE();
1252 }
1253 } else {
1254 __ CompareRegisters(left.reg(), right.reg());
1255 }
1256 return true_condition;
1257}
1258#endif
1259
1260static Condition EmitNullAwareInt64ComparisonOp(FlowGraphCompiler* compiler,
1261 LocationSummary* locs,
1262 Token::Kind kind,
1263 BranchLabels labels) {
1264 ASSERT((kind == Token::kEQ) || (kind == Token::kNE));
1265 const Register left = locs->in(0).reg();
1266 const Register right = locs->in(1).reg();
1267 const Condition true_condition = TokenKindToIntCondition(kind);
1268 compiler::Label* equal_result =
1269 (true_condition == EQ) ? labels.true_label : labels.false_label;
1270 compiler::Label* not_equal_result =
1271 (true_condition == EQ) ? labels.false_label : labels.true_label;
1272
1273 // Check if operands have the same value. If they don't, then they could
1274 // be equal only if both of them are Mints with the same value.
1275 __ CompareObjectRegisters(left, right);
1276 __ BranchIf(EQ, equal_result);
1277 __ and_(TMP, left, right);
1278 __ BranchIfSmi(TMP, not_equal_result);
1279 __ CompareClassId(left, kMintCid, TMP);
1280 __ BranchIf(NE, not_equal_result);
1281 __ CompareClassId(right, kMintCid, TMP);
1282 __ BranchIf(NE, not_equal_result);
1283#if XLEN == 32
1284 __ LoadFieldFromOffset(TMP, left, compiler::target::Mint::value_offset());
1285 __ LoadFieldFromOffset(TMP2, right, compiler::target::Mint::value_offset());
1286 __ bne(TMP, TMP2, not_equal_result);
1287 __ LoadFieldFromOffset(
1288 TMP, left,
1289 compiler::target::Mint::value_offset() + compiler::target::kWordSize);
1290 __ LoadFieldFromOffset(
1291 TMP2, right,
1292 compiler::target::Mint::value_offset() + compiler::target::kWordSize);
1293#else
1294 __ LoadFieldFromOffset(TMP, left, Mint::value_offset());
1295 __ LoadFieldFromOffset(TMP2, right, Mint::value_offset());
1296#endif
1297 __ CompareRegisters(TMP, TMP2);
1298 return true_condition;
1299}
1300
1301LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone,
1302 bool opt) const {
1303 const intptr_t kNumInputs = 2;
1304 const intptr_t kNumTemps = 0;
1305 if (is_null_aware()) {
1306 LocationSummary* locs = new (zone)
1307 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1308 locs->set_in(0, Location::RequiresRegister());
1309 locs->set_in(1, Location::RequiresRegister());
1310 locs->set_out(0, Location::RequiresRegister());
1311 return locs;
1312 }
1313#if XLEN == 32
1314 if (operation_cid() == kMintCid) {
1315 LocationSummary* locs = new (zone)
1316 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1321 locs->set_out(0, Location::RequiresRegister());
1322 return locs;
1323 }
1324#endif
1325 if (operation_cid() == kDoubleCid) {
1326 LocationSummary* locs = new (zone)
1327 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1328 locs->set_in(0, Location::RequiresFpuRegister());
1329 locs->set_in(1, Location::RequiresFpuRegister());
1330 locs->set_out(0, Location::RequiresRegister());
1331 return locs;
1332 }
1333 if (operation_cid() == kSmiCid || operation_cid() == kMintCid ||
1334 operation_cid() == kIntegerCid) {
1335 LocationSummary* locs = new (zone)
1336 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1337 if (is_null_aware()) {
1338 locs->set_in(0, Location::RequiresRegister());
1339 locs->set_in(1, Location::RequiresRegister());
1340 } else {
1341 locs->set_in(0, LocationRegisterOrConstant(left()));
1342 // Only one input can be a constant operand. The case of two constant
1343 // operands should be handled by constant propagation.
1344 // Only right can be a stack slot.
1345 locs->set_in(1, locs->in(0).IsConstant()
1348 }
1349 locs->set_out(0, Location::RequiresRegister());
1350 return locs;
1351 }
1352 UNREACHABLE();
1353 return nullptr;
1354}
1355
1356static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
1357 LocationSummary* locs,
1358 BranchLabels labels,
1359 Token::Kind kind) {
1360 const FRegister left = locs->in(0).fpu_reg();
1361 const FRegister right = locs->in(1).fpu_reg();
1362
1363 switch (kind) {
1364 case Token::kEQ:
1365 __ feqd(TMP, left, right);
1366 __ CompareImmediate(TMP, 0);
1367 return NE;
1368 case Token::kNE:
1369 __ feqd(TMP, left, right);
1370 __ CompareImmediate(TMP, 0);
1371 return EQ;
1372 case Token::kLT:
1373 __ fltd(TMP, left, right);
1374 __ CompareImmediate(TMP, 0);
1375 return NE;
1376 case Token::kGT:
1377 __ fltd(TMP, right, left);
1378 __ CompareImmediate(TMP, 0);
1379 return NE;
1380 case Token::kLTE:
1381 __ fled(TMP, left, right);
1382 __ CompareImmediate(TMP, 0);
1383 return NE;
1384 case Token::kGTE:
1385 __ fled(TMP, right, left);
1386 __ CompareImmediate(TMP, 0);
1387 return NE;
1388 default:
1389 UNREACHABLE();
1390 }
1391}
1392
1393Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1394 BranchLabels labels) {
1395 if (is_null_aware()) {
1396 ASSERT(operation_cid() == kMintCid);
1397 return EmitNullAwareInt64ComparisonOp(compiler, locs(), kind(), labels);
1398 }
1399 if (operation_cid() == kSmiCid) {
1400 return EmitSmiComparisonOp(compiler, locs(), kind(), labels);
1401 } else if (operation_cid() == kIntegerCid) {
1402 return EmitWordComparisonOp(compiler, locs(), kind(), labels);
1403 } else if (operation_cid() == kMintCid) {
1404#if XLEN == 32
1405 return EmitUnboxedMintEqualityOp(compiler, locs(), kind());
1406#else
1407 return EmitInt64ComparisonOp(compiler, locs(), kind(), labels);
1408#endif
1409 } else {
1410 ASSERT(operation_cid() == kDoubleCid);
1411 return EmitDoubleComparisonOp(compiler, locs(), labels, kind());
1412 }
1413}
1414
1415LocationSummary* TestSmiInstr::MakeLocationSummary(Zone* zone, bool opt) const {
1416 const intptr_t kNumInputs = 2;
1417 const intptr_t kNumTemps = 0;
1418 LocationSummary* locs = new (zone)
1419 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1420 locs->set_in(0, Location::RequiresRegister());
1421 // Only one input can be a constant operand. The case of two constant
1422 // operands should be handled by constant propagation.
1423 locs->set_in(1, LocationRegisterOrConstant(right()));
1424 return locs;
1425}
1426
1427Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1428 BranchLabels labels) {
1429 const Register left = locs()->in(0).reg();
1430 Location right = locs()->in(1);
1431 if (right.IsConstant()) {
1432 ASSERT(right.constant().IsSmi());
1433 const intx_t imm = static_cast<intx_t>(right.constant().ptr());
1434 __ TestImmediate(left, imm);
1435 } else {
1436 __ TestRegisters(left, right.reg());
1437 }
1438 Condition true_condition = (kind() == Token::kNE) ? NE : EQ;
1439 return true_condition;
1440}
1441
1442LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone,
1443 bool opt) const {
1444 const intptr_t kNumInputs = 1;
1445 const intptr_t kNumTemps = 1;
1446 LocationSummary* locs = new (zone)
1447 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1448 locs->set_in(0, Location::RequiresRegister());
1449 locs->set_temp(0, Location::RequiresRegister());
1450 locs->set_out(0, Location::RequiresRegister());
1451 return locs;
1452}
1453
1454Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1455 BranchLabels labels) {
1456 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
1457 const Register val_reg = locs()->in(0).reg();
1458 const Register cid_reg = locs()->temp(0).reg();
1459
1460 compiler::Label* deopt =
1461 CanDeoptimize()
1462 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids)
1463 : nullptr;
1464
1465 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
1466 const ZoneGrowableArray<intptr_t>& data = cid_results();
1467 ASSERT(data[0] == kSmiCid);
1468 bool result = data[1] == true_result;
1469 __ BranchIfSmi(val_reg, result ? labels.true_label : labels.false_label);
1470 __ LoadClassId(cid_reg, val_reg);
1471
1472 for (intptr_t i = 2; i < data.length(); i += 2) {
1473 const intptr_t test_cid = data[i];
1474 ASSERT(test_cid != kSmiCid);
1475 result = data[i + 1] == true_result;
1476 __ CompareImmediate(cid_reg, test_cid);
1477 __ BranchIf(EQ, result ? labels.true_label : labels.false_label);
1478 }
1479 // No match found, deoptimize or default action.
1480 if (deopt == nullptr) {
1481 // If the cid is not in the list, jump to the opposite label from the cids
1482 // that are in the list. These must be all the same (see asserts in the
1483 // constructor).
1484 compiler::Label* target = result ? labels.false_label : labels.true_label;
1485 if (target != labels.fall_through) {
1486 __ j(target);
1487 }
1488 } else {
1489 __ j(deopt);
1490 }
1491 // Dummy result as this method already did the jump, there's no need
1492 // for the caller to branch on a condition.
1493 return kInvalidCondition;
1494}
1495
1496LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone,
1497 bool opt) const {
1498 const intptr_t kNumInputs = 2;
1499 const intptr_t kNumTemps = 0;
1500#if XLEN == 32
1501 if (operation_cid() == kMintCid) {
1502 LocationSummary* locs = new (zone)
1503 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1508 locs->set_out(0, Location::RequiresRegister());
1509 return locs;
1510 }
1511#endif
1512 if (operation_cid() == kDoubleCid) {
1513 LocationSummary* summary = new (zone)
1514 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1515 summary->set_in(0, Location::RequiresFpuRegister());
1516 summary->set_in(1, Location::RequiresFpuRegister());
1517 summary->set_out(0, Location::RequiresRegister());
1518 return summary;
1519 }
1520 if (operation_cid() == kSmiCid || operation_cid() == kMintCid) {
1521 LocationSummary* summary = new (zone)
1522 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1523 summary->set_in(0, LocationRegisterOrConstant(left()));
1524 // Only one input can be a constant operand. The case of two constant
1525 // operands should be handled by constant propagation.
1526 summary->set_in(1, summary->in(0).IsConstant()
1529 summary->set_out(0, Location::RequiresRegister());
1530 return summary;
1531 }
1532
1533 UNREACHABLE();
1534 return nullptr;
1535}
1536
1537Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1538 BranchLabels labels) {
1539 if (operation_cid() == kSmiCid) {
1540 return EmitSmiComparisonOp(compiler, locs(), kind(), labels);
1541 } else if (operation_cid() == kMintCid) {
1542#if XLEN == 32
1543 return EmitUnboxedMintComparisonOp(compiler, locs(), kind(), labels);
1544#else
1545 return EmitInt64ComparisonOp(compiler, locs(), kind(), labels);
1546#endif
1547 } else {
1548 ASSERT(operation_cid() == kDoubleCid);
1549 return EmitDoubleComparisonOp(compiler, locs(), labels, kind());
1550 }
1551}
1552
1553void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1554 SetupNative();
1555 const Register result = locs()->out(0).reg();
1556
1557 // Pass a pointer to the first argument in R2.
1558 __ AddImmediate(T2, SP, (ArgumentCount() - 1) * kWordSize);
1559
1560 // Compute the effective address. When running under the simulator,
1561 // this is a redirection address that forces the simulator to call
1562 // into the runtime system.
1563 uword entry;
1564 const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function());
1565 const Code* stub;
1566 if (link_lazily()) {
1567 stub = &StubCode::CallBootstrapNative();
1569 } else {
1570 entry = reinterpret_cast<uword>(native_c_function());
1571 if (is_bootstrap_native()) {
1572 stub = &StubCode::CallBootstrapNative();
1573 } else if (is_auto_scope()) {
1574 stub = &StubCode::CallAutoScopeNative();
1575 } else {
1576 stub = &StubCode::CallNoScopeNative();
1577 }
1578 }
1579 __ LoadImmediate(T1, argc_tag);
1580 compiler::ExternalLabel label(entry);
1581 __ LoadNativeEntry(T5, &label,
1582 link_lazily() ? ObjectPool::Patchability::kPatchable
1583 : ObjectPool::Patchability::kNotPatchable);
1584 if (link_lazily()) {
1585 compiler->GeneratePatchableCall(
1586 source(), *stub, UntaggedPcDescriptors::kOther, locs(),
1588 } else {
1589 // We can never lazy-deopt here because natives are never optimized.
1590 ASSERT(!compiler->is_optimizing());
1591 compiler->GenerateNonLazyDeoptableStubCall(
1592 source(), *stub, UntaggedPcDescriptors::kOther, locs(),
1594 }
1595 __ lx(result, compiler::Address(SP, 0));
1596 compiler->EmitDropArguments(ArgumentCount());
1597}
1598
1599#define R(r) (1 << r)
1600
1601LocationSummary* FfiCallInstr::MakeLocationSummary(Zone* zone,
1602 bool is_optimizing) const {
1603 return MakeLocationSummaryInternal(
1604 zone, is_optimizing,
1607}
1608
1609#undef R
1610
1611void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1612 const Register target = locs()->in(TargetAddressIndex()).reg();
1613
1614 // The temps are indexed according to their register number.
1615 const Register temp1 = locs()->temp(0).reg();
1616 // For regular calls, this holds the FP for rebasing the original locations
1617 // during EmitParamMoves.
1618 // For leaf calls, this holds the SP used to restore the pre-aligned SP after
1619 // the call.
1620 const Register saved_fp_or_sp = locs()->temp(1).reg();
1621 const Register temp2 = locs()->temp(2).reg();
1622
1623 ASSERT(temp1 != target);
1624 ASSERT(temp2 != target);
1625 ASSERT(temp1 != saved_fp_or_sp);
1626 ASSERT(temp2 != saved_fp_or_sp);
1627 ASSERT(saved_fp_or_sp != target);
1628
1629 // Ensure these are callee-saved register and are preserved across the call.
1630 ASSERT(IsCalleeSavedRegister(saved_fp_or_sp));
1631 // Other temps don't need to be preserved.
1632
1633 __ mv(saved_fp_or_sp, is_leaf_ ? SPREG : FPREG);
1634
1635 if (!is_leaf_) {
1636 // We need to create a dummy "exit frame".
1637 // This is EnterDartFrame without accessing A2=CODE_REG or A5=PP.
1638 if (FLAG_precompiled_mode) {
1639 __ subi(SP, SP, 2 * compiler::target::kWordSize);
1640 __ sx(RA, compiler::Address(SP, 1 * compiler::target::kWordSize));
1641 __ sx(FP, compiler::Address(SP, 0 * compiler::target::kWordSize));
1642 __ addi(FP, SP, 2 * compiler::target::kWordSize);
1643 } else {
1644 __ subi(SP, SP, 4 * compiler::target::kWordSize);
1645 __ sx(RA, compiler::Address(SP, 3 * compiler::target::kWordSize));
1646 __ sx(FP, compiler::Address(SP, 2 * compiler::target::kWordSize));
1647 __ sx(NULL_REG, compiler::Address(SP, 1 * compiler::target::kWordSize));
1648 __ sx(NULL_REG, compiler::Address(SP, 0 * compiler::target::kWordSize));
1649 __ addi(FP, SP, 4 * compiler::target::kWordSize);
1650 }
1651 }
1652
1653 // Reserve space for the arguments that go on the stack (if any), then align.
1654 intptr_t stack_space = marshaller_.RequiredStackSpaceInBytes();
1655 __ ReserveAlignedFrameSpace(stack_space);
1656#if defined(USING_MEMORY_SANITIZER)
1657 {
1658 RegisterSet kVolatileRegisterSet(kAbiVolatileCpuRegs, kAbiVolatileFpuRegs);
1659 __ mv(temp1, SP);
1660 __ PushRegisters(kVolatileRegisterSet);
1661
1662 // Outgoing arguments passed on the stack to the foreign function.
1663 __ mv(A0, temp1);
1664 __ LoadImmediate(A1, stack_space);
1665 __ CallCFunction(
1666 compiler::Address(THR, kMsanUnpoisonRuntimeEntry.OffsetFromThread()));
1667
1668 // Incoming Dart arguments to this trampoline are potentially used as local
1669 // handles.
1670 __ mv(A0, is_leaf_ ? FPREG : saved_fp_or_sp);
1671 __ LoadImmediate(A1, (kParamEndSlotFromFp + InputCount()) * kWordSize);
1672 __ CallCFunction(
1673 compiler::Address(THR, kMsanUnpoisonRuntimeEntry.OffsetFromThread()));
1674
1675 // Outgoing arguments passed by register to the foreign function.
1676 __ LoadImmediate(A0, InputCount());
1677 __ CallCFunction(compiler::Address(
1678 THR, kMsanUnpoisonParamRuntimeEntry.OffsetFromThread()));
1679
1680 __ PopRegisters(kVolatileRegisterSet);
1681 }
1682#endif
1683
1684 EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp1, temp2);
1685
1687 __ Comment(is_leaf_ ? "Leaf Call" : "Call");
1688 }
1689
1690 if (is_leaf_) {
1691#if !defined(PRODUCT)
1692 // Set the thread object's top_exit_frame_info and VMTag to enable the
1693 // profiler to determine that thread is no longer executing Dart code.
1694 __ StoreToOffset(FPREG, THR,
1695 compiler::target::Thread::top_exit_frame_info_offset());
1696 __ StoreToOffset(target, THR, compiler::target::Thread::vm_tag_offset());
1697#endif
1698
1699 __ mv(A3, T3); // TODO(rmacnak): Only when needed.
1700 __ mv(A4, T4);
1701 __ mv(A5, T5);
1702 __ jalr(target);
1703
1704#if !defined(PRODUCT)
1705 __ LoadImmediate(temp1, compiler::target::Thread::vm_tag_dart_id());
1706 __ StoreToOffset(temp1, THR, compiler::target::Thread::vm_tag_offset());
1707 __ StoreToOffset(ZR, THR,
1708 compiler::target::Thread::top_exit_frame_info_offset());
1709#endif
1710 } else {
1711 // We need to copy a dummy return address up into the dummy stack frame so
1712 // the stack walker will know which safepoint to use.
1713 //
1714 // AUIPC loads relative to itself.
1715 compiler->EmitCallsiteMetadata(source(), deopt_id(),
1716 UntaggedPcDescriptors::Kind::kOther, locs(),
1717 env());
1718 __ auipc(temp1, 0);
1719 __ StoreToOffset(temp1, FPREG, kSavedCallerPcSlotFromFp * kWordSize);
1720
1721 if (CanExecuteGeneratedCodeInSafepoint()) {
1722 // Update information in the thread object and enter a safepoint.
1723 __ LoadImmediate(temp1, compiler::target::Thread::exit_through_ffi());
1724 __ TransitionGeneratedToNative(target, FPREG, temp1,
1725 /*enter_safepoint=*/true);
1726
1727 __ mv(A3, T3); // TODO(rmacnak): Only when needed.
1728 __ mv(A4, T4);
1729 __ mv(A5, T5);
1730 __ jalr(target);
1731
1732 // Update information in the thread object and leave the safepoint.
1733 __ TransitionNativeToGenerated(temp1, /*leave_safepoint=*/true);
1734 } else {
1735 // We cannot trust that this code will be executable within a safepoint.
1736 // Therefore we delegate the responsibility of entering/exiting the
1737 // safepoint to a stub which in the VM isolate's heap, which will never
1738 // lose execute permission.
1739 __ lx(temp1,
1740 compiler::Address(
1741 THR, compiler::target::Thread::
1742 call_native_through_safepoint_entry_point_offset()));
1743
1744 // Calls T0 and clobbers R19 (along with volatile registers).
1745 ASSERT(target == T0);
1746 __ mv(A3, T3); // TODO(rmacnak): Only when needed.
1747 __ mv(A4, T4);
1748 __ mv(A5, T5);
1749 __ jalr(temp1);
1750 }
1751
1752 if (marshaller_.IsHandleCType(compiler::ffi::kResultIndex)) {
1753 __ Comment("Check Dart_Handle for Error.");
1756 compiler::Label not_error;
1757 __ LoadFromOffset(temp1, CallingConventions::kReturnReg,
1758 compiler::target::LocalHandle::ptr_offset());
1759 __ BranchIfSmi(temp1, &not_error);
1760 __ LoadClassId(temp1, temp1);
1761 __ RangeCheck(temp1, temp2, kFirstErrorCid, kLastErrorCid,
1763
1764 // Slow path, use the stub to propagate error, to save on code-size.
1765 __ Comment("Slow path: call Dart_PropagateError through stub.");
1768 __ lx(temp1,
1769 compiler::Address(
1770 THR, compiler::target::Thread::
1771 call_native_through_safepoint_entry_point_offset()));
1772 __ lx(target, compiler::Address(
1773 THR, kPropagateErrorRuntimeEntry.OffsetFromThread()));
1774 __ jalr(temp1);
1775#if defined(DEBUG)
1776 // We should never return with normal controlflow from this.
1777 __ ebreak();
1778#endif
1779
1780 __ Bind(&not_error);
1781 }
1782
1783 // Refresh pinned registers values (inc. write barrier mask and null
1784 // object).
1785 __ RestorePinnedRegisters();
1786 }
1787
1788 EmitReturnMoves(compiler, temp1, temp2);
1789
1790 if (is_leaf_) {
1791 // Restore the pre-aligned SP.
1792 __ mv(SPREG, saved_fp_or_sp);
1793 } else {
1794 __ LeaveDartFrame();
1795
1796 // Restore the global object pool after returning from runtime (old space is
1797 // moving, so the GOP could have been relocated).
1798 if (FLAG_precompiled_mode) {
1799 __ SetupGlobalPoolAndDispatchTable();
1800 }
1801 }
1802
1803 // PP is a volatile register, so it must be restored even for leaf FFI calls.
1804 __ RestorePoolPointer();
1805 __ set_constant_pool_allowed(true);
1806}
1807
1808// Keep in sync with NativeEntryInstr::EmitNativeCode.
1809void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1810 EmitReturnMoves(compiler);
1811
1812 __ LeaveDartFrame();
1813
1814 // The dummy return address is in RA, no need to pop it as on Intel.
1815
1816 // These can be anything besides the return registers (A0, A1) and THR (S1).
1817 const Register vm_tag_reg = T2;
1818 const Register old_exit_frame_reg = T3;
1819 const Register old_exit_through_ffi_reg = T4;
1820 const Register tmp = T5;
1821
1822 __ PopRegisterPair(old_exit_frame_reg, old_exit_through_ffi_reg);
1823
1824 // Restore top_resource.
1825 __ PopRegisterPair(tmp, vm_tag_reg);
1826 __ StoreToOffset(tmp, THR, compiler::target::Thread::top_resource_offset());
1827
1828 // Reset the exit frame info to old_exit_frame_reg *before* entering the
1829 // safepoint. The trampoline that called us will enter the safepoint on our
1830 // behalf.
1831 __ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
1832 old_exit_through_ffi_reg,
1833 /*enter_safepoint=*/false);
1834
1835 __ PopNativeCalleeSavedRegisters();
1836
1837 // Leave the entry frame.
1838 __ LeaveFrame();
1839
1840 // Leave the dummy frame holding the pushed arguments.
1841 __ LeaveFrame();
1842
1843 __ Ret();
1844
1845 // For following blocks.
1846 __ set_constant_pool_allowed(true);
1847}
1848
1849// Keep in sync with NativeReturnInstr::EmitNativeCode and ComputeInnerLRState.
1850void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1851 // Constant pool cannot be used until we enter the actual Dart frame.
1852 __ set_constant_pool_allowed(false);
1853
1854 __ Bind(compiler->GetJumpLabel(this));
1855
1856 // Create a dummy frame holding the pushed arguments. This simplifies
1857 // NativeReturnInstr::EmitNativeCode.
1858 __ EnterFrame(0);
1859
1860 // Save the argument registers, in reverse order.
1861 __ mv(T3, A3); // TODO(rmacnak): Only when needed.
1862 __ mv(T4, A4);
1863 __ mv(T5, A5);
1864 SaveArguments(compiler);
1865
1866 // Enter the entry frame. NativeParameterInstr expects this frame has size
1867 // -exit_link_slot_from_entry_fp, verified below.
1868 __ EnterFrame(0);
1869
1870 // Save a space for the code object.
1871 __ PushImmediate(0);
1872
1873 __ PushNativeCalleeSavedRegisters();
1874
1875#if defined(USING_SHADOW_CALL_STACK)
1876#error Unimplemented
1877#endif
1878
1879 // Refresh pinned registers values (inc. write barrier mask and null object).
1880 __ RestorePinnedRegisters();
1881
1882 // Save the current VMTag on the stack.
1883 __ LoadFromOffset(TMP, THR, compiler::target::Thread::vm_tag_offset());
1884 // Save the top resource.
1885 __ LoadFromOffset(A0, THR, compiler::target::Thread::top_resource_offset());
1886 __ PushRegisterPair(A0, TMP);
1887
1888 __ StoreToOffset(ZR, THR, compiler::target::Thread::top_resource_offset());
1889
1890 __ LoadFromOffset(A0, THR,
1891 compiler::target::Thread::exit_through_ffi_offset());
1892 __ PushRegister(A0);
1893
1894 // Save the top exit frame info. We don't set it to 0 yet:
1895 // TransitionNativeToGenerated will handle that.
1896 __ LoadFromOffset(A0, THR,
1897 compiler::target::Thread::top_exit_frame_info_offset());
1898 __ PushRegister(A0);
1899
1900 // In debug mode, verify that we've pushed the top exit frame info at the
1901 // correct offset from FP.
1902 __ EmitEntryFrameVerification();
1903
1904 // The callback trampoline (caller) has already left the safepoint for us.
1905 __ TransitionNativeToGenerated(A0, /*exit_safepoint=*/false);
1906
1907 // Now that the safepoint has ended, we can touch Dart objects without
1908 // handles.
1909
1910 // Load the code object.
1911 const Function& target_function = marshaller_.dart_signature();
1912 const intptr_t callback_id = target_function.FfiCallbackId();
1913 __ LoadFromOffset(A0, THR, compiler::target::Thread::isolate_group_offset());
1914 __ LoadFromOffset(A0, A0,
1915 compiler::target::IsolateGroup::object_store_offset());
1916 __ LoadFromOffset(A0, A0,
1917 compiler::target::ObjectStore::ffi_callback_code_offset());
1918 __ LoadCompressedFieldFromOffset(
1919 A0, A0, compiler::target::GrowableObjectArray::data_offset());
1920 __ LoadCompressedFieldFromOffset(
1921 CODE_REG, A0,
1922 compiler::target::Array::data_offset() +
1923 callback_id * compiler::target::kCompressedWordSize);
1924
1925 // Put the code object in the reserved slot.
1926 __ StoreToOffset(CODE_REG, FPREG,
1927 kPcMarkerSlotFromFp * compiler::target::kWordSize);
1928 if (FLAG_precompiled_mode) {
1929 __ SetupGlobalPoolAndDispatchTable();
1930 } else {
1931 // We now load the pool pointer (PP) with a GC safe value as we are about to
1932 // invoke dart code. We don't need a real object pool here.
1933 // Smi zero does not work because ARM64 assumes PP to be untagged.
1934 __ LoadObject(PP, compiler::NullObject());
1935 }
1936
1937 // Load a GC-safe value for the arguments descriptor (unused but tagged).
1938 __ mv(ARGS_DESC_REG, ZR);
1939
1940 // Load a dummy return address which suggests that we are inside of
1941 // InvokeDartCodeStub. This is how the stack walker detects an entry frame.
1942 __ LoadFromOffset(RA, THR,
1943 compiler::target::Thread::invoke_dart_code_stub_offset());
1944 __ LoadFieldFromOffset(RA, RA, compiler::target::Code::entry_point_offset());
1945
1946 FunctionEntryInstr::EmitNativeCode(compiler);
1947}
1948
1949#define R(r) (1 << r)
1950
1952 Zone* zone,
1953 bool is_optimizing) const {
1956 static_assert(saved_fp < temp0, "Unexpected ordering of registers in set.");
1957 LocationSummary* summary =
1958 MakeLocationSummaryInternal(zone, (R(saved_fp) | R(temp0)));
1959 return summary;
1960}
1961
1962#undef R
1963
1964void LeafRuntimeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1965 const Register saved_fp = locs()->temp(0).reg();
1966 const Register temp0 = locs()->temp(1).reg();
1967
1968 __ MoveRegister(saved_fp, FPREG);
1969
1970 const intptr_t frame_space = native_calling_convention_.StackTopInBytes();
1971 __ EnterCFrame(frame_space);
1972
1973 EmitParamMoves(compiler, saved_fp, temp0);
1974
1975 const Register target_address = locs()->in(TargetAddressIndex()).reg();
1976 // I.e., no use of A3/A4/A5.
1977 RELEASE_ASSERT(native_calling_convention_.argument_locations().length() < 4);
1978 __ sx(target_address,
1979 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
1980 __ CallCFunction(target_address);
1981 __ li(temp0, VMTag::kDartTagId);
1982 __ sx(temp0,
1983 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
1984
1985 __ LeaveCFrame(); // Also restores PP=A5.
1986}
1987
1988LocationSummary* OneByteStringFromCharCodeInstr::MakeLocationSummary(
1989 Zone* zone,
1990 bool opt) const {
1991 const intptr_t kNumInputs = 1;
1992 // TODO(fschneider): Allow immediate operands for the char code.
1993 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
1995}
1996
1997void OneByteStringFromCharCodeInstr::EmitNativeCode(
1998 FlowGraphCompiler* compiler) {
1999 ASSERT(compiler->is_optimizing());
2000 const Register char_code = locs()->in(0).reg();
2001 const Register result = locs()->out(0).reg();
2002 __ lx(result,
2003 compiler::Address(THR, Thread::predefined_symbols_address_offset()));
2004 __ AddShifted(TMP, result, char_code, kWordSizeLog2 - kSmiTagSize);
2005 __ lx(result,
2006 compiler::Address(TMP, Symbols::kNullCharCodeSymbolOffset * kWordSize));
2007}
2008
2009LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone,
2010 bool opt) const {
2011 const intptr_t kNumInputs = 1;
2012 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
2014}
2015
2016void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2017 ASSERT(cid_ == kOneByteStringCid);
2018 Register str = locs()->in(0).reg();
2019 Register result = locs()->out(0).reg();
2020 compiler::Label is_one, done;
2021 __ LoadCompressedSmi(result,
2022 compiler::FieldAddress(str, String::length_offset()));
2023 __ CompareImmediate(result, Smi::RawValue(1));
2024 __ BranchIf(EQUAL, &is_one, compiler::Assembler::kNearJump);
2025 __ li(result, Smi::RawValue(-1));
2027 __ Bind(&is_one);
2028 __ lbu(result, compiler::FieldAddress(str, OneByteString::data_offset()));
2029 __ SmiTag(result);
2030 __ Bind(&done);
2031}
2032
2033LocationSummary* Utf8ScanInstr::MakeLocationSummary(Zone* zone,
2034 bool opt) const {
2035 const intptr_t kNumInputs = 5;
2036 const intptr_t kNumTemps = 0;
2037 LocationSummary* summary = new (zone)
2038 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2039 summary->set_in(0, Location::Any()); // decoder
2040 summary->set_in(1, Location::WritableRegister()); // bytes
2041 summary->set_in(2, Location::WritableRegister()); // start
2042 summary->set_in(3, Location::WritableRegister()); // end
2043 summary->set_in(4, Location::WritableRegister()); // table
2044 summary->set_out(0, Location::RequiresRegister());
2045 return summary;
2046}
2047
2048void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2049 const Register bytes_reg = locs()->in(1).reg();
2050 const Register start_reg = locs()->in(2).reg();
2051 const Register end_reg = locs()->in(3).reg();
2052 const Register table_reg = locs()->in(4).reg();
2053 const Register size_reg = locs()->out(0).reg();
2054
2055 const Register bytes_ptr_reg = start_reg;
2056 const Register bytes_end_reg = end_reg;
2057 const Register flags_reg = bytes_reg;
2058 const Register temp_reg = TMP;
2059 const Register decoder_temp_reg = start_reg;
2060 const Register flags_temp_reg = end_reg;
2061
2062 const intptr_t kSizeMask = 0x03;
2063 const intptr_t kFlagsMask = 0x3C;
2064
2065 compiler::Label loop, loop_in;
2066
2067 // Address of input bytes.
2068 __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
2069
2070 // Table.
2071 __ AddImmediate(
2072 table_reg, table_reg,
2073 compiler::target::OneByteString::data_offset() - kHeapObjectTag);
2074
2075 // Pointers to start and end.
2076 __ add(bytes_ptr_reg, bytes_reg, start_reg);
2077 __ add(bytes_end_reg, bytes_reg, end_reg);
2078
2079 // Initialize size and flags.
2080 __ li(size_reg, 0);
2081 __ li(flags_reg, 0);
2082
2084 __ Bind(&loop);
2085
2086 // Read byte and increment pointer.
2087 __ lbu(temp_reg, compiler::Address(bytes_ptr_reg, 0));
2088 __ addi(bytes_ptr_reg, bytes_ptr_reg, 1);
2089
2090 // Update size and flags based on byte value.
2091 __ add(temp_reg, table_reg, temp_reg);
2092 __ lbu(temp_reg, compiler::Address(temp_reg));
2093 __ or_(flags_reg, flags_reg, temp_reg);
2094 __ andi(temp_reg, temp_reg, kSizeMask);
2095 __ add(size_reg, size_reg, temp_reg);
2096
2097 // Stop if end is reached.
2098 __ Bind(&loop_in);
2099 __ bltu(bytes_ptr_reg, bytes_end_reg, &loop, compiler::Assembler::kNearJump);
2100
2101 // Write flags to field.
2102 __ AndImmediate(flags_reg, flags_reg, kFlagsMask);
2103 if (!IsScanFlagsUnboxed()) {
2104 __ SmiTag(flags_reg);
2105 }
2106 Register decoder_reg;
2107 const Location decoder_location = locs()->in(0);
2108 if (decoder_location.IsStackSlot()) {
2109 __ lx(decoder_temp_reg, LocationToStackSlotAddress(decoder_location));
2110 decoder_reg = decoder_temp_reg;
2111 } else {
2112 decoder_reg = decoder_location.reg();
2113 }
2114 const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes();
2115 if (scan_flags_field_.is_compressed() && !IsScanFlagsUnboxed()) {
2116 UNIMPLEMENTED();
2117 } else {
2118 __ LoadFieldFromOffset(flags_temp_reg, decoder_reg,
2119 scan_flags_field_offset);
2120 __ or_(flags_temp_reg, flags_temp_reg, flags_reg);
2121 __ StoreFieldToOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset);
2122 }
2123}
2124
2125LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
2126 bool opt) const {
2127 // The compiler must optimize any function that includes a LoadIndexed
2128 // instruction that uses typed data cids, since extracting the payload address
2129 // from views is done in a compiler pass after all code motion has happened.
2130 ASSERT(!IsTypedDataBaseClassId(class_id()) || opt);
2131
2132 const intptr_t kNumInputs = 2;
2133 const intptr_t kNumTemps = 0;
2134 LocationSummary* locs = new (zone)
2135 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2136 locs->set_in(kArrayPos, Location::RequiresRegister());
2137 const bool can_be_constant =
2138 index()->BindsToConstant() &&
2140 index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
2141 locs->set_in(kIndexPos,
2142 can_be_constant
2143 ? Location::Constant(index()->definition()->AsConstant())
2144 : Location::RequiresRegister());
2145 auto const rep =
2148 locs->set_out(0, Location::RequiresRegister());
2149#if XLEN == 32
2150 if (rep == kUnboxedInt64) {
2151 locs->set_out(0, Location::Pair(Location::RequiresRegister(),
2153 }
2154#endif
2155 } else if (RepresentationUtils::IsUnboxed(rep)) {
2156 locs->set_out(0, Location::RequiresFpuRegister());
2157 } else {
2158 locs->set_out(0, Location::RequiresRegister());
2159 }
2160 return locs;
2161}
2162
2163void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2164 // The array register points to the backing store for external arrays.
2165 const Register array = locs()->in(kArrayPos).reg();
2166 const Location index = locs()->in(kIndexPos);
2167
2168 compiler::Address element_address(TMP); // Bad address.
2169 element_address = index.IsRegister()
2170 ? __ ElementAddressForRegIndex(
2171 IsUntagged(), class_id(), index_scale(),
2172 index_unboxed_, array, index.reg(), TMP)
2173 : __ ElementAddressForIntIndex(
2174 IsUntagged(), class_id(), index_scale(), array,
2175 Smi::Cast(index.constant()).Value());
2176
2177 auto const rep =
2179 ASSERT(representation() == Boxing::NativeRepresentation(rep));
2181#if XLEN == 32
2182 if (rep == kUnboxedInt64) {
2183 ASSERT(locs()->out(0).IsPairLocation());
2184 PairLocation* result_pair = locs()->out(0).AsPairLocation();
2185 const Register result_lo = result_pair->At(0).reg();
2186 const Register result_hi = result_pair->At(1).reg();
2187 __ lw(result_lo, element_address);
2188 __ lw(result_hi, compiler::Address(element_address.base(),
2189 element_address.offset() + 4));
2190 } else {
2191 const Register result = locs()->out(0).reg();
2192 __ Load(result, element_address, RepresentationUtils::OperandSize(rep));
2193 }
2194#else
2195 const Register result = locs()->out(0).reg();
2196 __ Load(result, element_address, RepresentationUtils::OperandSize(rep));
2197#endif
2198 } else if (RepresentationUtils::IsUnboxed(rep)) {
2199 const FRegister result = locs()->out(0).fpu_reg();
2200 if (rep == kUnboxedFloat) {
2201 // Load single precision float.
2202 __ flw(result, element_address);
2203 } else if (rep == kUnboxedDouble) {
2204 // Load double precision float.
2205 __ fld(result, element_address);
2206 } else {
2207 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2208 rep == kUnboxedFloat64x2);
2209 UNIMPLEMENTED();
2210 }
2211 } else {
2212 ASSERT(rep == kTagged);
2213 ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
2214 (class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
2215 const Register result = locs()->out(0).reg();
2216 __ lx(result, element_address);
2217 }
2218}
2219
2220LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone,
2221 bool opt) const {
2222 const intptr_t kNumInputs = 2;
2223 const intptr_t kNumTemps = 0;
2224 LocationSummary* summary = new (zone)
2225 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2226 summary->set_in(0, Location::RequiresRegister());
2227 summary->set_in(1, Location::RequiresRegister());
2228#if XLEN == 32
2229 if (representation() == kUnboxedInt64) {
2230 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
2232 } else {
2233 ASSERT(representation() == kTagged);
2234 summary->set_out(0, Location::RequiresRegister());
2235 }
2236#else
2237 summary->set_out(0, Location::RequiresRegister());
2238#endif
2239 return summary;
2240}
2241
2242void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2243 // The string register points to the backing store for external strings.
2244 const Register str = locs()->in(0).reg();
2245 const Location index = locs()->in(1);
2247
2248#if XLEN == 32
2249 if (representation() == kUnboxedInt64) {
2250 ASSERT(compiler->is_optimizing());
2251 ASSERT(locs()->out(0).IsPairLocation());
2252 UNIMPLEMENTED();
2253 }
2254#endif
2255
2256 Register result = locs()->out(0).reg();
2257 switch (class_id()) {
2258 case kOneByteStringCid:
2259 switch (element_count()) {
2260 case 1:
2262 break;
2263 case 2:
2265 break;
2266 case 4:
2268 break;
2269 default:
2270 UNREACHABLE();
2271 }
2272 break;
2273 case kTwoByteStringCid:
2274 switch (element_count()) {
2275 case 1:
2277 break;
2278 case 2:
2280 break;
2281 default:
2282 UNREACHABLE();
2283 }
2284 break;
2285 default:
2286 UNREACHABLE();
2287 break;
2288 }
2289 // Warning: element_address may use register TMP as base.
2290 compiler::Address element_address = __ ElementAddressForRegIndex(
2291 IsExternal(), class_id(), index_scale(), /*index_unboxed=*/false, str,
2292 index.reg(), TMP);
2293 switch (sz) {
2295 __ lbu(result, element_address);
2296 break;
2298 __ lhu(result, element_address);
2299 break;
2301#if XLEN == 32
2302 __ lw(result, element_address);
2303#else
2304 __ lwu(result, element_address);
2305#endif
2306 break;
2307 default:
2308 UNREACHABLE();
2309 }
2310
2311 ASSERT(can_pack_into_smi());
2312 __ SmiTag(result);
2313}
2314
2315LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
2316 bool opt) const {
2317 // The compiler must optimize any function that includes a StoreIndexed
2318 // instruction that uses typed data cids, since extracting the payload address
2319 // from views is done in a compiler pass after all code motion has happened.
2320 ASSERT(!IsTypedDataBaseClassId(class_id()) || opt);
2321
2322 const intptr_t kNumInputs = 3;
2323 const intptr_t kNumTemps = 1;
2324 LocationSummary* locs = new (zone)
2325 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2326 locs->set_in(0, Location::RequiresRegister());
2327 const bool can_be_constant =
2328 index()->BindsToConstant() &&
2330 index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
2331 locs->set_in(1, can_be_constant
2332 ? Location::Constant(index()->definition()->AsConstant())
2333 : Location::RequiresRegister());
2334 locs->set_temp(0, Location::RequiresRegister());
2335
2336 auto const rep =
2338 if (IsClampedTypedDataBaseClassId(class_id())) {
2339 ASSERT(rep == kUnboxedUint8);
2340 locs->set_in(2, LocationRegisterOrConstant(value()));
2341 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
2342 if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
2343 ConstantInstr* constant = value()->definition()->AsConstant();
2344 if (constant != nullptr && constant->HasZeroRepresentation()) {
2345 locs->set_in(2, Location::Constant(constant));
2346 } else {
2347 locs->set_in(2, Location::RequiresRegister());
2348 }
2349 } else if (rep == kUnboxedInt64) {
2350#if XLEN == 32
2353#else
2354 ConstantInstr* constant = value()->definition()->AsConstant();
2355 if (constant != nullptr && constant->HasZeroRepresentation()) {
2356 locs->set_in(2, Location::Constant(constant));
2357 } else {
2358 locs->set_in(2, Location::RequiresRegister());
2359 }
2360#endif
2361 } else {
2362 ConstantInstr* constant = value()->definition()->AsConstant();
2363 if (constant != nullptr && constant->HasZeroRepresentation()) {
2364 locs->set_in(2, Location::Constant(constant));
2365 } else {
2366 locs->set_in(2, Location::RequiresRegister());
2367 }
2368 }
2369 } else if (RepresentationUtils::IsUnboxed(rep)) {
2370 if (rep == kUnboxedFloat) {
2371 ConstantInstr* constant = value()->definition()->AsConstant();
2372 if (constant != nullptr && constant->HasZeroRepresentation()) {
2373 locs->set_in(2, Location::Constant(constant));
2374 } else {
2375 locs->set_in(2, Location::RequiresFpuRegister());
2376 }
2377 } else if (rep == kUnboxedDouble) {
2378#if XLEN == 32
2379 locs->set_in(2, Location::RequiresFpuRegister());
2380#else
2381 ConstantInstr* constant = value()->definition()->AsConstant();
2382 if (constant != nullptr && constant->HasZeroRepresentation()) {
2383 locs->set_in(2, Location::Constant(constant));
2384 } else {
2385 locs->set_in(2, Location::RequiresFpuRegister());
2386 }
2387#endif
2388 } else {
2389 locs->set_in(2, Location::RequiresFpuRegister());
2390 }
2391 } else if (class_id() == kArrayCid) {
2392 locs->set_in(2, ShouldEmitStoreBarrier()
2395 if (ShouldEmitStoreBarrier()) {
2398 }
2399 } else {
2400 UNREACHABLE();
2401 }
2402 return locs;
2403}
2404
2405void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2406 // The array register points to the backing store for external arrays.
2407 const Register array = locs()->in(0).reg();
2408 const Location index = locs()->in(1);
2409 const Register temp = locs()->temp(0).reg();
2410 compiler::Address element_address(TMP); // Bad address.
2411
2412 // Deal with a special case separately.
2413 if (class_id() == kArrayCid && ShouldEmitStoreBarrier()) {
2414 if (index.IsRegister()) {
2415 __ ComputeElementAddressForRegIndex(temp, IsUntagged(), class_id(),
2416 index_scale(), index_unboxed_, array,
2417 index.reg());
2418 } else {
2419 __ ComputeElementAddressForIntIndex(temp, IsUntagged(), class_id(),
2420 index_scale(), array,
2421 Smi::Cast(index.constant()).Value());
2422 }
2423 const Register value = locs()->in(2).reg();
2424 __ StoreIntoArray(array, temp, value, CanValueBeSmi());
2425 return;
2426 }
2427
2428 element_address = index.IsRegister()
2429 ? __ ElementAddressForRegIndex(
2430 IsUntagged(), class_id(), index_scale(),
2431 index_unboxed_, array, index.reg(), temp)
2432 : __ ElementAddressForIntIndex(
2433 IsUntagged(), class_id(), index_scale(), array,
2434 Smi::Cast(index.constant()).Value());
2435
2436 auto const rep =
2438 ASSERT(RequiredInputRepresentation(2) == Boxing::NativeRepresentation(rep));
2439 if (IsClampedTypedDataBaseClassId(class_id())) {
2440 if (locs()->in(2).IsConstant()) {
2441 const Smi& constant = Smi::Cast(locs()->in(2).constant());
2442 intptr_t value = constant.Value();
2443 // Clamp to 0x0 or 0xFF respectively.
2444 if (value > 0xFF) {
2445 value = 0xFF;
2446 } else if (value < 0) {
2447 value = 0;
2448 }
2449 if (value == 0) {
2450 __ sb(ZR, element_address);
2451 } else {
2452 __ LoadImmediate(TMP, static_cast<int8_t>(value));
2453 __ sb(TMP, element_address);
2454 }
2455 } else {
2456 const Register value = locs()->in(2).reg();
2457
2458 compiler::Label store_zero, store_ff, done;
2459 __ blt(value, ZR, &store_zero, compiler::Assembler::kNearJump);
2460
2461 __ li(TMP, 0xFF);
2462 __ bgt(value, TMP, &store_ff, compiler::Assembler::kNearJump);
2463
2464 __ sb(value, element_address);
2466
2467 __ Bind(&store_zero);
2468 __ mv(TMP, ZR);
2469
2470 __ Bind(&store_ff);
2471 __ sb(TMP, element_address);
2472
2473 __ Bind(&done);
2474 }
2475 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
2476 if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
2477 if (locs()->in(2).IsConstant()) {
2478 ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
2479 __ sb(ZR, element_address);
2480 } else {
2481 const Register value = locs()->in(2).reg();
2482 __ sb(value, element_address);
2483 }
2484 } else if (rep == kUnboxedInt64) {
2485#if XLEN >= 64
2486 if (locs()->in(2).IsConstant()) {
2487 ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
2488 __ sd(ZR, element_address);
2489 } else {
2490 __ sd(locs()->in(2).reg(), element_address);
2491 }
2492#else
2493 PairLocation* value_pair = locs()->in(2).AsPairLocation();
2494 Register value_lo = value_pair->At(0).reg();
2495 Register value_hi = value_pair->At(1).reg();
2496 __ sw(value_lo, element_address);
2497 __ sw(value_hi, compiler::Address(element_address.base(),
2498 element_address.offset() + 4));
2499#endif
2500 } else {
2501 if (locs()->in(2).IsConstant()) {
2502 ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
2503 __ Store(ZR, element_address, RepresentationUtils::OperandSize(rep));
2504 } else {
2505 __ Store(locs()->in(2).reg(), element_address,
2507 }
2508 }
2509 } else if (RepresentationUtils::IsUnboxed(rep)) {
2510 if (rep == kUnboxedFloat) {
2511 if (locs()->in(2).IsConstant()) {
2512 ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
2513 __ sw(ZR, element_address);
2514 } else {
2515 __ fsw(locs()->in(2).fpu_reg(), element_address);
2516 }
2517 } else if (rep == kUnboxedDouble) {
2518#if XLEN >= 64
2519 if (locs()->in(2).IsConstant()) {
2520 ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
2521 __ sd(ZR, element_address);
2522 } else {
2523 __ fsd(locs()->in(2).fpu_reg(), element_address);
2524 }
2525#else
2526 __ fsd(locs()->in(2).fpu_reg(), element_address);
2527#endif
2528 } else {
2529 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2530 rep == kUnboxedFloat64x2);
2531 UNIMPLEMENTED();
2532 }
2533 } else if (class_id() == kArrayCid) {
2534 ASSERT(rep == kTagged);
2535 ASSERT(!ShouldEmitStoreBarrier()); // Specially treated above.
2536 if (locs()->in(2).IsConstant()) {
2537 const Object& constant = locs()->in(2).constant();
2538 __ StoreObjectIntoObjectNoBarrier(array, element_address, constant);
2539 } else {
2540 const Register value = locs()->in(2).reg();
2541 __ StoreIntoObjectNoBarrier(array, element_address, value);
2542 }
2543 } else {
2544 UNREACHABLE();
2545 }
2546
2547#if defined(USING_MEMORY_SANITIZER)
2548 UNIMPLEMENTED();
2549#endif
2550}
2551
2552static void LoadValueCid(FlowGraphCompiler* compiler,
2553 Register value_cid_reg,
2554 Register value_reg,
2555 compiler::Label* value_is_smi = nullptr) {
2556 compiler::Label done;
2557 if (value_is_smi == nullptr) {
2558 __ LoadImmediate(value_cid_reg, kSmiCid);
2559 }
2560 __ BranchIfSmi(value_reg, value_is_smi == nullptr ? &done : value_is_smi,
2561 compiler::Assembler::kNearJump);
2562 __ LoadClassId(value_cid_reg, value_reg);
2563 __ Bind(&done);
2564}
2565
2566DEFINE_UNIMPLEMENTED_INSTRUCTION(GuardFieldTypeInstr)
2567
2568LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
2569 bool opt) const {
2570 const intptr_t kNumInputs = 1;
2571
2572 const intptr_t value_cid = value()->Type()->ToCid();
2573 const intptr_t field_cid = field().guarded_cid();
2574
2575 const bool emit_full_guard = !opt || (field_cid == kIllegalCid);
2576
2577 const bool needs_value_cid_temp_reg =
2578 emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid));
2579
2580 const bool needs_field_temp_reg = emit_full_guard;
2581
2582 intptr_t num_temps = 0;
2583 if (needs_value_cid_temp_reg) {
2584 num_temps++;
2585 }
2586 if (needs_field_temp_reg) {
2587 num_temps++;
2588 }
2589
2590 LocationSummary* summary = new (zone)
2591 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
2592 summary->set_in(0, Location::RequiresRegister());
2593
2594 for (intptr_t i = 0; i < num_temps; i++) {
2595 summary->set_temp(i, Location::RequiresRegister());
2596 }
2597
2598 return summary;
2599}
2600
2601void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2602 ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 20);
2603 ASSERT(sizeof(UntaggedField::guarded_cid_) == 4);
2604 ASSERT(sizeof(UntaggedField::is_nullable_) == 4);
2605
2606 const intptr_t value_cid = value()->Type()->ToCid();
2607 const intptr_t field_cid = field().guarded_cid();
2608 const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid;
2609
2610 if (field_cid == kDynamicCid) {
2611 return; // Nothing to emit.
2612 }
2613
2614 const bool emit_full_guard =
2615 !compiler->is_optimizing() || (field_cid == kIllegalCid);
2616
2617 const bool needs_value_cid_temp_reg =
2618 emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid));
2619
2620 const bool needs_field_temp_reg = emit_full_guard;
2621
2622 const Register value_reg = locs()->in(0).reg();
2623
2624 const Register value_cid_reg =
2625 needs_value_cid_temp_reg ? locs()->temp(0).reg() : kNoRegister;
2626
2627 const Register field_reg = needs_field_temp_reg
2628 ? locs()->temp(locs()->temp_count() - 1).reg()
2629 : kNoRegister;
2630
2631 compiler::Label ok, fail_label;
2632
2633 compiler::Label* deopt =
2634 compiler->is_optimizing()
2635 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2636 : nullptr;
2637
2638 compiler::Label* fail = (deopt != nullptr) ? deopt : &fail_label;
2639
2640 if (emit_full_guard) {
2641 __ LoadObject(field_reg, Field::ZoneHandle((field().Original())));
2642
2643 compiler::FieldAddress field_cid_operand(field_reg,
2645 compiler::FieldAddress field_nullability_operand(
2646 field_reg, Field::is_nullable_offset());
2647
2648 if (value_cid == kDynamicCid) {
2649 LoadValueCid(compiler, value_cid_reg, value_reg);
2650 compiler::Label skip_length_check;
2651 __ lw(TMP, field_cid_operand);
2652 __ CompareRegisters(value_cid_reg, TMP);
2654 __ lw(TMP, field_nullability_operand);
2655 __ CompareRegisters(value_cid_reg, TMP);
2656 } else if (value_cid == kNullCid) {
2657 __ lw(value_cid_reg, field_nullability_operand);
2658 __ CompareImmediate(value_cid_reg, value_cid);
2659 } else {
2660 compiler::Label skip_length_check;
2661 __ lw(value_cid_reg, field_cid_operand);
2662 __ CompareImmediate(value_cid_reg, value_cid);
2663 }
2665
2666 // Check if the tracked state of the guarded field can be initialized
2667 // inline. If the field needs length check we fall through to runtime
2668 // which is responsible for computing offset of the length field
2669 // based on the class id.
2670 // Length guard will be emitted separately when needed via GuardFieldLength
2671 // instruction after GuardFieldClass.
2672 if (!field().needs_length_check()) {
2673 // Uninitialized field can be handled inline. Check if the
2674 // field is still unitialized.
2675 __ lw(TMP, field_cid_operand);
2676 __ CompareImmediate(TMP, kIllegalCid);
2677 __ BranchIf(NE, fail);
2678
2679 if (value_cid == kDynamicCid) {
2680 __ sw(value_cid_reg, field_cid_operand);
2681 __ sw(value_cid_reg, field_nullability_operand);
2682 } else {
2683 __ LoadImmediate(TMP, value_cid);
2684 __ sw(TMP, field_cid_operand);
2685 __ sw(TMP, field_nullability_operand);
2686 }
2687
2689 }
2690
2691 if (deopt == nullptr) {
2692 __ Bind(fail);
2693
2694 __ LoadFieldFromOffset(TMP, field_reg, Field::guarded_cid_offset(),
2696 __ CompareImmediate(TMP, kDynamicCid);
2698
2699 __ PushRegisterPair(value_reg, field_reg);
2700 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2701 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2702 __ Drop(2); // Drop the field and the value.
2703 } else {
2704 __ j(fail);
2705 }
2706 } else {
2707 ASSERT(compiler->is_optimizing());
2708 ASSERT(deopt != nullptr);
2709
2710 // Field guard class has been initialized and is known.
2711 if (value_cid == kDynamicCid) {
2712 // Value's class id is not known.
2713 __ TestImmediate(value_reg, kSmiTagMask);
2714
2715 if (field_cid != kSmiCid) {
2716 __ BranchIf(EQ, fail);
2717 __ LoadClassId(value_cid_reg, value_reg);
2718 __ CompareImmediate(value_cid_reg, field_cid);
2719 }
2720
2721 if (field().is_nullable() && (field_cid != kNullCid)) {
2723 __ CompareObject(value_reg, Object::null_object());
2724 }
2725
2726 __ BranchIf(NE, fail);
2727 } else if (value_cid == field_cid) {
2728 // This would normally be caught by Canonicalize, but RemoveRedefinitions
2729 // may sometimes produce the situation after the last Canonicalize pass.
2730 } else {
2731 // Both value's and field's class id is known.
2732 ASSERT(value_cid != nullability);
2733 __ j(fail);
2734 }
2735 }
2736 __ Bind(&ok);
2737}
2738
2739LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone,
2740 bool opt) const {
2741 const intptr_t kNumInputs = 1;
2742 if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2743 const intptr_t kNumTemps = 3;
2744 LocationSummary* summary = new (zone)
2745 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2746 summary->set_in(0, Location::RequiresRegister());
2747 // We need temporaries for field object, length offset and expected length.
2748 summary->set_temp(0, Location::RequiresRegister());
2749 summary->set_temp(1, Location::RequiresRegister());
2750 summary->set_temp(2, Location::RequiresRegister());
2751 return summary;
2752 } else {
2753 LocationSummary* summary = new (zone)
2754 LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
2755 summary->set_in(0, Location::RequiresRegister());
2756 return summary;
2757 }
2758 UNREACHABLE();
2759}
2760
2761void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2762 if (field().guarded_list_length() == Field::kNoFixedLength) {
2763 return; // Nothing to emit.
2764 }
2765
2766 compiler::Label* deopt =
2767 compiler->is_optimizing()
2768 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2769 : nullptr;
2770
2771 const Register value_reg = locs()->in(0).reg();
2772
2773 if (!compiler->is_optimizing() ||
2774 (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2775 const Register field_reg = locs()->temp(0).reg();
2776 const Register offset_reg = locs()->temp(1).reg();
2777 const Register length_reg = locs()->temp(2).reg();
2778
2779 compiler::Label ok;
2780
2781 __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
2782
2783 __ lb(offset_reg,
2784 compiler::FieldAddress(
2786 __ LoadCompressed(
2787 length_reg,
2788 compiler::FieldAddress(field_reg, Field::guarded_list_length_offset()));
2789
2790 __ bltz(offset_reg, &ok, compiler::Assembler::kNearJump);
2791
2792 // Load the length from the value. GuardFieldClass already verified that
2793 // value's class matches guarded class id of the field.
2794 // offset_reg contains offset already corrected by -kHeapObjectTag that is
2795 // why we use Address instead of FieldAddress.
2796 __ add(TMP, value_reg, offset_reg);
2797 __ lx(TMP, compiler::Address(TMP, 0));
2798 __ CompareObjectRegisters(length_reg, TMP);
2799
2800 if (deopt == nullptr) {
2802
2803 __ PushRegisterPair(value_reg, field_reg);
2804 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2805 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2806 __ Drop(2); // Drop the field and the value.
2807 } else {
2808 __ BranchIf(NE, deopt);
2809 }
2810
2811 __ Bind(&ok);
2812 } else {
2813 ASSERT(compiler->is_optimizing());
2814 ASSERT(field().guarded_list_length() >= 0);
2815 ASSERT(field().guarded_list_length_in_object_offset() !=
2817
2818 __ lx(TMP, compiler::FieldAddress(
2819 value_reg, field().guarded_list_length_in_object_offset()));
2820 __ CompareImmediate(TMP, Smi::RawValue(field().guarded_list_length()));
2821 __ BranchIf(NE, deopt);
2822 }
2823}
2824
2825LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
2826 bool opt) const {
2827 const intptr_t kNumInputs = 1;
2828 const intptr_t kNumTemps = 0;
2829 LocationSummary* locs = new (zone)
2830 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2831 locs->set_in(0, Location::RequiresRegister());
2832 return locs;
2833}
2834
2835void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2836 const Register value = locs()->in(0).reg();
2837
2838 compiler->used_static_fields().Add(&field());
2839
2840 __ LoadFromOffset(TMP, THR,
2841 compiler::target::Thread::field_table_values_offset());
2842 // Note: static fields ids won't be changed by hot-reload.
2843 __ StoreToOffset(value, TMP, compiler::target::FieldTable::OffsetOf(field()));
2844}
2845
2846LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone,
2847 bool opt) const {
2848 const intptr_t kNumInputs = 3;
2849 const intptr_t kNumTemps = 0;
2850 LocationSummary* summary = new (zone)
2851 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2853 summary->set_in(1, Location::RegisterLocation(
2855 summary->set_in(
2857 summary->set_out(
2859 return summary;
2860}
2861
2862void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2863 ASSERT(locs()->in(0).reg() == TypeTestABI::kInstanceReg);
2864 ASSERT(locs()->in(1).reg() == TypeTestABI::kInstantiatorTypeArgumentsReg);
2865 ASSERT(locs()->in(2).reg() == TypeTestABI::kFunctionTypeArgumentsReg);
2866
2867 compiler->GenerateInstanceOf(source(), deopt_id(), env(), type(), locs());
2868 ASSERT(locs()->out(0).reg() == TypeTestABI::kInstanceOfResultReg);
2869}
2870
2871LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone,
2872 bool opt) const {
2873 const intptr_t kNumInputs = 2;
2874 const intptr_t kNumTemps = 0;
2875 LocationSummary* locs = new (zone)
2876 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2877 locs->set_in(kTypeArgumentsPos,
2879 locs->set_in(kLengthPos,
2882 return locs;
2883}
2884
2885// Inlines array allocation for known constant values.
2886static void InlineArrayAllocation(FlowGraphCompiler* compiler,
2887 intptr_t num_elements,
2888 compiler::Label* slow_path,
2889 compiler::Label* done) {
2890 const int kInlineArraySize = 12; // Same as kInlineInstanceSize.
2891 const intptr_t instance_size = Array::InstanceSize(num_elements);
2892
2893 __ TryAllocateArray(kArrayCid, instance_size, slow_path,
2894 AllocateArrayABI::kResultReg, // instance
2895 T3, // end address
2896 T4, T5);
2897 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
2898 // R3: new object end address.
2899
2900 // Store the type argument field.
2901 __ StoreCompressedIntoObjectNoBarrier(
2903 compiler::FieldAddress(AllocateArrayABI::kResultReg,
2906
2907 // Set the length field.
2908 __ StoreCompressedIntoObjectNoBarrier(
2910 compiler::FieldAddress(AllocateArrayABI::kResultReg,
2913
2914 // Initialize all array elements to raw_null.
2915 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
2916 // T3: new object end address.
2917 // T5: iterator which initially points to the start of the variable
2918 // data area to be initialized.
2919 if (num_elements > 0) {
2920 const intptr_t array_size = instance_size - sizeof(UntaggedArray);
2921 __ AddImmediate(T5, AllocateArrayABI::kResultReg,
2922 sizeof(UntaggedArray) - kHeapObjectTag);
2923 if (array_size < (kInlineArraySize * kCompressedWordSize)) {
2924 intptr_t current_offset = 0;
2925 while (current_offset < array_size) {
2926 __ StoreCompressedIntoObjectNoBarrier(
2927 AllocateArrayABI::kResultReg, compiler::Address(T5, current_offset),
2928 NULL_REG);
2929 current_offset += kCompressedWordSize;
2930 }
2931 } else {
2932 compiler::Label end_loop, init_loop;
2933 __ Bind(&init_loop);
2934 __ CompareRegisters(T5, T3);
2935 __ BranchIf(CS, &end_loop, compiler::Assembler::kNearJump);
2936 __ StoreCompressedIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
2937 compiler::Address(T5, 0), NULL_REG);
2938 __ AddImmediate(T5, kCompressedWordSize);
2939 __ j(&init_loop);
2940 __ Bind(&end_loop);
2941 }
2942 }
2944}
2945
2946void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2947 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
2948 if (type_usage_info != nullptr) {
2949 const Class& list_class =
2950 Class::Handle(compiler->isolate_group()->class_table()->At(kArrayCid));
2951 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, list_class,
2952 type_arguments()->definition());
2953 }
2954
2955 compiler::Label slow_path, done;
2956 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2957 if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
2958 num_elements()->BindsToConstant() &&
2959 num_elements()->BoundConstant().IsSmi()) {
2960 const intptr_t length =
2961 Smi::Cast(num_elements()->BoundConstant()).Value();
2963 InlineArrayAllocation(compiler, length, &slow_path, &done);
2964 }
2965 }
2966 }
2967
2968 __ Bind(&slow_path);
2969 auto object_store = compiler->isolate_group()->object_store();
2970 const auto& allocate_array_stub =
2971 Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
2972 compiler->GenerateStubCall(source(), allocate_array_stub,
2973 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
2974 env());
2975 __ Bind(&done);
2976}
2977
2979 Zone* zone,
2980 bool opt) const {
2981 ASSERT(opt);
2982 const intptr_t kNumInputs = 0;
2983 const intptr_t kNumTemps = 3;
2984 LocationSummary* locs = new (zone) LocationSummary(
2985 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
2986 locs->set_temp(0, Location::RegisterLocation(T1));
2987 locs->set_temp(1, Location::RegisterLocation(T2));
2988 locs->set_temp(2, Location::RegisterLocation(T3));
2989 locs->set_out(0, Location::RegisterLocation(A0));
2990 return locs;
2991}
2992
2993class AllocateContextSlowPath
2994 : public TemplateSlowPathCode<AllocateUninitializedContextInstr> {
2995 public:
2996 explicit AllocateContextSlowPath(
2997 AllocateUninitializedContextInstr* instruction)
2998 : TemplateSlowPathCode(instruction) {}
2999
3000 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
3001 __ Comment("AllocateContextSlowPath");
3002 __ Bind(entry_label());
3003
3004 LocationSummary* locs = instruction()->locs();
3005 locs->live_registers()->Remove(locs->out(0));
3006
3007 compiler->SaveLiveRegisters(locs);
3008
3009 auto slow_path_env = compiler->SlowPathEnvironmentFor(
3010 instruction(), /*num_slow_path_args=*/0);
3011 ASSERT(slow_path_env != nullptr);
3012
3013 auto object_store = compiler->isolate_group()->object_store();
3014 const auto& allocate_context_stub = Code::ZoneHandle(
3015 compiler->zone(), object_store->allocate_context_stub());
3016
3017 __ LoadImmediate(T1, instruction()->num_context_variables());
3018 compiler->GenerateStubCall(instruction()->source(), allocate_context_stub,
3019 UntaggedPcDescriptors::kOther, locs,
3020 instruction()->deopt_id(), slow_path_env);
3021 ASSERT(instruction()->locs()->out(0).reg() == A0);
3022 compiler->RestoreLiveRegisters(instruction()->locs());
3023 __ j(exit_label());
3024 }
3025};
3026
3028 FlowGraphCompiler* compiler) {
3029 Register temp0 = locs()->temp(0).reg();
3030 Register temp1 = locs()->temp(1).reg();
3031 Register temp2 = locs()->temp(2).reg();
3032 Register result = locs()->out(0).reg();
3033 // Try allocate the object.
3034 AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this);
3035 compiler->AddSlowPathCode(slow_path);
3036 intptr_t instance_size = Context::InstanceSize(num_context_variables());
3037
3038 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
3039 __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
3040 result, // instance
3041 temp0, temp1, temp2);
3042
3043 // Setup up number of context variables field (int32_t).
3044 __ LoadImmediate(temp0, num_context_variables());
3045 __ sw(temp0,
3046 compiler::FieldAddress(result, Context::num_variables_offset()));
3047 } else {
3048 __ Jump(slow_path->entry_label());
3049 }
3050
3051 __ Bind(slow_path->exit_label());
3052}
3053
3054LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone,
3055 bool opt) const {
3056 const intptr_t kNumInputs = 0;
3057 const intptr_t kNumTemps = 1;
3058 LocationSummary* locs = new (zone)
3059 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3061 locs->set_out(0, Location::RegisterLocation(A0));
3062 return locs;
3063}
3064
3065void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3066 ASSERT(locs()->temp(0).reg() == T1);
3067 ASSERT(locs()->out(0).reg() == A0);
3068
3069 auto object_store = compiler->isolate_group()->object_store();
3070 const auto& allocate_context_stub =
3071 Code::ZoneHandle(compiler->zone(), object_store->allocate_context_stub());
3072 __ LoadImmediate(T1, num_context_variables());
3073 compiler->GenerateStubCall(source(), allocate_context_stub,
3074 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
3075 env());
3076}
3077
3078LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
3079 bool opt) const {
3080 const intptr_t kNumInputs = 1;
3081 const intptr_t kNumTemps = 0;
3082 LocationSummary* locs = new (zone)
3083 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3084 locs->set_in(0, Location::RegisterLocation(T5));
3085 locs->set_out(0, Location::RegisterLocation(A0));
3086 return locs;
3087}
3088
3089void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3090 ASSERT(locs()->in(0).reg() == T5);
3091 ASSERT(locs()->out(0).reg() == A0);
3092
3093 auto object_store = compiler->isolate_group()->object_store();
3094 const auto& clone_context_stub =
3095 Code::ZoneHandle(compiler->zone(), object_store->clone_context_stub());
3096 compiler->GenerateStubCall(source(), clone_context_stub,
3097 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
3098 deopt_id(), env());
3099}
3100
3101LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
3102 bool opt) const {
3103 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall);
3104}
3105
3106void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3107 __ Bind(compiler->GetJumpLabel(this));
3108 compiler->AddExceptionHandler(this);
3109 if (HasParallelMove()) {
3110 parallel_move()->EmitNativeCode(compiler);
3111 }
3112
3113 // Restore SP from FP as we are coming from a throw and the code for
3114 // popping arguments has not been run.
3115 const intptr_t fp_sp_dist =
3116 (compiler::target::frame_layout.first_local_from_fp + 1 -
3117 compiler->StackSize()) *
3118 kWordSize;
3119 ASSERT(fp_sp_dist <= 0);
3120 __ AddImmediate(SP, FP, fp_sp_dist);
3121
3122 if (!compiler->is_optimizing()) {
3123 if (raw_exception_var_ != nullptr) {
3124 __ StoreToOffset(
3126 compiler::target::FrameOffsetInBytesForVariable(raw_exception_var_));
3127 }
3128 if (raw_stacktrace_var_ != nullptr) {
3129 __ StoreToOffset(
3131 compiler::target::FrameOffsetInBytesForVariable(raw_stacktrace_var_));
3132 }
3133 }
3134}
3135
3136LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone,
3137 bool opt) const {
3138 const intptr_t kNumInputs = 0;
3139 const intptr_t kNumTemps = 1;
3140 const bool using_shared_stub = UseSharedSlowPathStub(opt);
3141 LocationSummary* summary = new (zone)
3142 LocationSummary(zone, kNumInputs, kNumTemps,
3143 using_shared_stub ? LocationSummary::kCallOnSharedSlowPath
3144 : LocationSummary::kCallOnSlowPath);
3145 summary->set_temp(0, Location::RequiresRegister());
3146 return summary;
3147}
3148
3149class CheckStackOverflowSlowPath
3150 : public TemplateSlowPathCode<CheckStackOverflowInstr> {
3151 public:
3152 static constexpr intptr_t kNumSlowPathArgs = 0;
3153
3154 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
3155 : TemplateSlowPathCode(instruction) {}
3156
3157 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
3158 auto locs = instruction()->locs();
3159 if (compiler->isolate_group()->use_osr() && osr_entry_label()->IsLinked()) {
3160 const Register value = locs->temp(0).reg();
3161 __ Comment("CheckStackOverflowSlowPathOsr");
3162 __ Bind(osr_entry_label());
3163 __ li(value, Thread::kOsrRequest);
3164 __ sx(value,
3165 compiler::Address(THR, Thread::stack_overflow_flags_offset()));
3166 }
3167 __ Comment("CheckStackOverflowSlowPath");
3168 __ Bind(entry_label());
3169 const bool using_shared_stub = locs->call_on_shared_slow_path();
3170 if (!using_shared_stub) {
3171 compiler->SaveLiveRegisters(locs);
3172 }
3173 // pending_deoptimization_env_ is needed to generate a runtime call that
3174 // may throw an exception.
3175 ASSERT(compiler->pending_deoptimization_env_ == nullptr);
3176 Environment* env =
3177 compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
3178 compiler->pending_deoptimization_env_ = env;
3179
3180 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
3181 if (using_shared_stub) {
3182 if (!has_frame) {
3183 ASSERT(__ constant_pool_allowed());
3184 __ set_constant_pool_allowed(false);
3185 __ EnterDartFrame(0);
3186 }
3187 auto object_store = compiler->isolate_group()->object_store();
3188 const bool live_fpu_regs = locs->live_registers()->FpuRegisterCount() > 0;
3189 const auto& stub = Code::ZoneHandle(
3190 compiler->zone(),
3191 live_fpu_regs
3192 ? object_store->stack_overflow_stub_with_fpu_regs_stub()
3193 : object_store->stack_overflow_stub_without_fpu_regs_stub());
3194
3195 if (compiler->CanPcRelativeCall(stub)) {
3196 __ GenerateUnRelocatedPcRelativeCall();
3197 compiler->AddPcRelativeCallStubTarget(stub);
3198 } else {
3199 const uword entry_point_offset =
3200 Thread::stack_overflow_shared_stub_entry_point_offset(
3201 locs->live_registers()->FpuRegisterCount() > 0);
3202 __ Call(compiler::Address(THR, entry_point_offset));
3203 }
3204 compiler->RecordSafepoint(locs, kNumSlowPathArgs);
3205 compiler->RecordCatchEntryMoves(env);
3206 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOther,
3207 instruction()->deopt_id(),
3208 instruction()->source());
3209 if (!has_frame) {
3210 __ LeaveDartFrame();
3211 __ set_constant_pool_allowed(true);
3212 }
3213 } else {
3214 ASSERT(has_frame);
3215 __ CallRuntime(kInterruptOrStackOverflowRuntimeEntry, kNumSlowPathArgs);
3216 compiler->EmitCallsiteMetadata(
3217 instruction()->source(), instruction()->deopt_id(),
3218 UntaggedPcDescriptors::kOther, instruction()->locs(), env);
3219 }
3220
3221 if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
3222 instruction()->in_loop()) {
3223 // In unoptimized code, record loop stack checks as possible OSR entries.
3224 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
3225 instruction()->deopt_id(),
3226 InstructionSource());
3227 }
3228 compiler->pending_deoptimization_env_ = nullptr;
3229 if (!using_shared_stub) {
3230 compiler->RestoreLiveRegisters(locs);
3231 }
3232 __ j(exit_label());
3233 }
3234
3235 compiler::Label* osr_entry_label() {
3236 ASSERT(IsolateGroup::Current()->use_osr());
3237 return &osr_entry_label_;
3238 }
3239
3240 private:
3241 compiler::Label osr_entry_label_;
3242};
3243
3244void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3245 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this);
3246 compiler->AddSlowPathCode(slow_path);
3247
3248 __ lx(TMP,
3249 compiler::Address(THR, compiler::target::Thread::stack_limit_offset()));
3250 __ bleu(SP, TMP, slow_path->entry_label());
3251 if (compiler->CanOSRFunction() && in_loop()) {
3252 const Register function = locs()->temp(0).reg();
3253 // In unoptimized code check the usage counter to trigger OSR at loop
3254 // stack checks. Use progressively higher thresholds for more deeply
3255 // nested loops to attempt to hit outer loops with OSR when possible.
3256 __ LoadObject(function, compiler->parsed_function().function());
3257 const intptr_t configured_optimization_counter_threshold =
3258 compiler->thread()->isolate_group()->optimization_counter_threshold();
3259 const int32_t threshold =
3260 configured_optimization_counter_threshold * (loop_depth() + 1);
3261 __ LoadFieldFromOffset(TMP, function, Function::usage_counter_offset(),
3263 __ addi(TMP, TMP, 1);
3264 __ StoreFieldToOffset(TMP, function, Function::usage_counter_offset(),
3266 __ CompareImmediate(TMP, threshold);
3267 __ BranchIf(GE, slow_path->osr_entry_label());
3268 }
3269 if (compiler->ForceSlowPathForStackOverflow()) {
3270 __ j(slow_path->entry_label());
3271 }
3272 __ Bind(slow_path->exit_label());
3273}
3274
3275static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
3276 BinarySmiOpInstr* shift_left) {
3277 const LocationSummary& locs = *shift_left->locs();
3278 const Register left = locs.in(0).reg();
3279 const Register result = locs.out(0).reg();
3280 compiler::Label* deopt =
3281 shift_left->CanDeoptimize()
3282 ? compiler->AddDeoptStub(shift_left->deopt_id(),
3283 ICData::kDeoptBinarySmiOp)
3284 : nullptr;
3285 if (locs.in(1).IsConstant()) {
3286 const Object& constant = locs.in(1).constant();
3287 ASSERT(constant.IsSmi());
3288 // Immediate shift operation takes 6/5 bits for the count.
3289 const intptr_t kCountLimit = XLEN - 1;
3290 const intptr_t value = Smi::Cast(constant).Value();
3291 ASSERT((0 < value) && (value < kCountLimit));
3292 __ slli(result, left, value);
3293 if (shift_left->can_overflow()) {
3294 ASSERT(result != left);
3295 __ srai(TMP2, result, value);
3296 __ bne(left, TMP2, deopt); // Overflow.
3297 }
3298 return;
3299 }
3300
3301 // Right (locs.in(1)) is not constant.
3302 const Register right = locs.in(1).reg();
3303 Range* right_range = shift_left->right_range();
3304 if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
3305 // TODO(srdjan): Implement code below for is_truncating().
3306 // If left is constant, we know the maximal allowed size for right.
3307 const Object& obj = shift_left->left()->BoundConstant();
3308 if (obj.IsSmi()) {
3309 const intptr_t left_int = Smi::Cast(obj).Value();
3310 if (left_int == 0) {
3311 __ bltz(right, deopt);
3312 __ mv(result, ZR);
3313 return;
3314 }
3315 const intptr_t max_right =
3316 compiler::target::kSmiBits - Utils::HighestBit(left_int);
3317 const bool right_needs_check =
3318 !RangeUtils::IsWithin(right_range, 0, max_right - 1);
3319 if (right_needs_check) {
3320 __ CompareObject(right, Smi::ZoneHandle(Smi::New(max_right)));
3321 __ BranchIf(CS, deopt);
3322 }
3323 __ SmiUntag(TMP, right);
3324 __ sll(result, left, TMP);
3325 }
3326 return;
3327 }
3328
3329 const bool right_needs_check =
3330 !RangeUtils::IsWithin(right_range, 0, (Smi::kBits - 1));
3331 if (!shift_left->can_overflow()) {
3332 if (right_needs_check) {
3333 if (!RangeUtils::IsPositive(right_range)) {
3334 ASSERT(shift_left->CanDeoptimize());
3335 __ bltz(right, deopt);
3336 }
3337
3338 compiler::Label done, is_not_zero;
3339 __ CompareObject(right, Smi::ZoneHandle(Smi::New(Smi::kBits)));
3340 __ BranchIf(LESS, &is_not_zero, compiler::Assembler::kNearJump);
3341 __ li(result, 0);
3343 __ Bind(&is_not_zero);
3344 __ SmiUntag(TMP, right);
3345 __ sll(result, left, TMP);
3346 __ Bind(&done);
3347 } else {
3348 __ SmiUntag(TMP, right);
3349 __ sll(result, left, TMP);
3350 }
3351 } else {
3352 if (right_needs_check) {
3353 ASSERT(shift_left->CanDeoptimize());
3354 __ CompareObject(right, Smi::ZoneHandle(Smi::New(Smi::kBits)));
3355 __ BranchIf(CS, deopt);
3356 }
3357 __ SmiUntag(TMP, right);
3358 ASSERT(result != left);
3359 __ sll(result, left, TMP);
3360 __ sra(TMP, result, TMP);
3361 __ bne(left, TMP, deopt); // Overflow.
3362 }
3363}
3364
3365LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Zone* zone,
3366 bool opt) const {
3367 const intptr_t kNumInputs = 2;
3368 const intptr_t kNumTemps =
3369 ((op_kind() == Token::kUSHR) || (op_kind() == Token::kMUL)) ? 1 : 0;
3370 LocationSummary* summary = new (zone)
3371 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3372 if (op_kind() == Token::kTRUNCDIV) {
3373 summary->set_in(0, Location::RequiresRegister());
3374 if (RightIsPowerOfTwoConstant()) {
3375 ConstantInstr* right_constant = right()->definition()->AsConstant();
3376 summary->set_in(1, Location::Constant(right_constant));
3377 } else {
3378 summary->set_in(1, Location::RequiresRegister());
3379 }
3380 summary->set_out(0, Location::RequiresRegister());
3381 return summary;
3382 }
3383 if (op_kind() == Token::kMOD) {
3384 summary->set_in(0, Location::RequiresRegister());
3385 summary->set_in(1, Location::RequiresRegister());
3386 summary->set_out(0, Location::RequiresRegister());
3387 return summary;
3388 }
3389 summary->set_in(0, Location::RequiresRegister());
3390 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
3391 if (kNumTemps == 1) {
3392 summary->set_temp(0, Location::RequiresRegister());
3393 }
3394 // We make use of 3-operand instructions by not requiring result register
3395 // to be identical to first input register as on Intel.
3396 summary->set_out(0, Location::RequiresRegister());
3397 return summary;
3398}
3399
3400void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3401 if (op_kind() == Token::kSHL) {
3402 EmitSmiShiftLeft(compiler, this);
3403 return;
3404 }
3405
3406 const Register left = locs()->in(0).reg();
3407 const Register result = locs()->out(0).reg();
3408 compiler::Label* deopt = nullptr;
3409 if (CanDeoptimize()) {
3410 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
3411 }
3412
3413 if (locs()->in(1).IsConstant()) {
3414 const Object& constant = locs()->in(1).constant();
3415 ASSERT(constant.IsSmi());
3416 const intx_t imm = static_cast<intx_t>(constant.ptr());
3417 switch (op_kind()) {
3418 case Token::kADD: {
3419 if (deopt == nullptr) {
3420 __ AddImmediate(result, left, imm);
3421 } else {
3422 __ AddImmediateBranchOverflow(result, left, imm, deopt);
3423 }
3424 break;
3425 }
3426 case Token::kSUB: {
3427 if (deopt == nullptr) {
3428 __ AddImmediate(result, left, -imm);
3429 } else {
3430 // Negating imm and using AddImmediateSetFlags would not detect the
3431 // overflow when imm == kMinInt64.
3432 __ SubtractImmediateBranchOverflow(result, left, imm, deopt);
3433 }
3434 break;
3435 }
3436 case Token::kMUL: {
3437 // Keep left value tagged and untag right value.
3438 const intptr_t value = Smi::Cast(constant).Value();
3439 if (deopt == nullptr) {
3440 __ LoadImmediate(TMP, value);
3441 __ mul(result, left, TMP);
3442 } else {
3443 __ MultiplyImmediateBranchOverflow(result, left, value, deopt);
3444 }
3445 break;
3446 }
3447 case Token::kTRUNCDIV: {
3448 const intptr_t value = Smi::Cast(constant).Value();
3451 const intptr_t shift_count =
3453 ASSERT(kSmiTagSize == 1);
3454 __ srai(TMP, left, XLEN - 1);
3455 ASSERT(shift_count > 1); // 1, -1 case handled above.
3456 const Register temp = TMP2;
3457 __ srli(TMP, TMP, XLEN - shift_count);
3458 __ add(temp, left, TMP);
3459 ASSERT(shift_count > 0);
3460 __ srai(result, temp, shift_count);
3461 if (value < 0) {
3462 __ neg(result, result);
3463 }
3464 __ SmiTag(result);
3465 break;
3466 }
3467 case Token::kBIT_AND:
3468 // No overflow check.
3469 __ AndImmediate(result, left, imm);
3470 break;
3471 case Token::kBIT_OR:
3472 // No overflow check.
3473 __ OrImmediate(result, left, imm);
3474 break;
3475 case Token::kBIT_XOR:
3476 // No overflow check.
3477 __ XorImmediate(result, left, imm);
3478 break;
3479 case Token::kSHR: {
3480 // Asr operation masks the count to 6/5 bits.
3481 const intptr_t kCountLimit = XLEN - 1;
3482 intptr_t value = Smi::Cast(constant).Value();
3483 __ srai(result, left, Utils::Minimum(value + kSmiTagSize, kCountLimit));
3484 __ SmiTag(result);
3485 break;
3486 }
3487 case Token::kUSHR: {
3488#if XLEN == 32
3489 const intptr_t value = compiler::target::SmiValue(constant);
3490 ASSERT((value > 0) && (value < 64));
3491 COMPILE_ASSERT(compiler::target::kSmiBits < 32);
3492 // 64-bit representation of left operand value:
3493 //
3494 // ss...sssss s s xxxxxxxxxxxxx
3495 // | | | | | |
3496 // 63 32 31 30 kSmiBits-1 0
3497 //
3498 // Where 's' is a sign bit.
3499 //
3500 // If left operand is negative (sign bit is set), then
3501 // result will fit into Smi range if and only if
3502 // the shift amount >= 64 - kSmiBits.
3503 //
3504 // If left operand is non-negative, the result always
3505 // fits into Smi range.
3506 //
3507 if (value < (64 - compiler::target::kSmiBits)) {
3508 if (deopt != nullptr) {
3509 __ bltz(left, deopt);
3510 } else {
3511 // Operation cannot overflow only if left value is always
3512 // non-negative.
3513 ASSERT(!can_overflow());
3514 }
3515 // At this point left operand is non-negative, so unsigned shift
3516 // can't overflow.
3517 if (value >= compiler::target::kSmiBits) {
3518 __ li(result, 0);
3519 } else {
3520 __ srli(result, left, value + kSmiTagSize);
3521 __ SmiTag(result);
3522 }
3523 } else {
3524 // Shift amount > 32, and the result is guaranteed to fit into Smi.
3525 // Low (Smi) part of the left operand is shifted out.
3526 // High part is filled with sign bits.
3527 __ srai(result, left, 31);
3528 __ srli(result, result, value - 32);
3529 __ SmiTag(result);
3530 }
3531#else
3532 // Lsr operation masks the count to 6 bits, but
3533 // unsigned shifts by >= kBitsPerInt64 are eliminated by
3534 // BinaryIntegerOpInstr::Canonicalize.
3535 const intptr_t kCountLimit = XLEN - 1;
3536 intptr_t value = Smi::Cast(constant).Value();
3537 ASSERT((value >= 0) && (value <= kCountLimit));
3538 __ SmiUntag(TMP, left);
3539 __ srli(TMP, TMP, value);
3540 __ SmiTag(result, TMP);
3541 if (deopt != nullptr) {
3542 __ SmiUntag(TMP2, result);
3543 __ bne(TMP, TMP2, deopt);
3544 }
3545#endif
3546 break;
3547 }
3548 default:
3549 UNREACHABLE();
3550 break;
3551 }
3552 return;
3553 }
3554
3555 const Register right = locs()->in(1).reg();
3556 switch (op_kind()) {
3557 case Token::kADD: {
3558 if (deopt == nullptr) {
3559 __ add(result, left, right);
3560 } else if (RangeUtils::IsPositive(right_range())) {
3561 ASSERT(result != left);
3562 __ add(result, left, right);
3563 __ blt(result, left, deopt);
3564 } else if (RangeUtils::IsNegative(right_range())) {
3565 ASSERT(result != left);
3566 __ add(result, left, right);
3567 __ bgt(result, left, deopt);
3568 } else {
3569 __ AddBranchOverflow(result, left, right, deopt);
3570 }
3571 break;
3572 }
3573 case Token::kSUB: {
3574 if (deopt == nullptr) {
3575 __ sub(result, left, right);
3576 } else if (RangeUtils::IsPositive(right_range())) {
3577 ASSERT(result != left);
3578 __ sub(result, left, right);
3579 __ bgt(result, left, deopt);
3580 } else if (RangeUtils::IsNegative(right_range())) {
3581 ASSERT(result != left);
3582 __ sub(result, left, right);
3583 __ blt(result, left, deopt);
3584 } else {
3585 __ SubtractBranchOverflow(result, left, right, deopt);
3586 }
3587 break;
3588 }
3589 case Token::kMUL: {
3590 const Register temp = locs()->temp(0).reg();
3591 __ SmiUntag(temp, left);
3592 if (deopt == nullptr) {
3593 __ mul(result, temp, right);
3594 } else {
3595 __ MultiplyBranchOverflow(result, temp, right, deopt);
3596 }
3597 break;
3598 }
3599 case Token::kBIT_AND: {
3600 // No overflow check.
3601 __ and_(result, left, right);
3602 break;
3603 }
3604 case Token::kBIT_OR: {
3605 // No overflow check.
3606 __ or_(result, left, right);
3607 break;
3608 }
3609 case Token::kBIT_XOR: {
3610 // No overflow check.
3611 __ xor_(result, left, right);
3612 break;
3613 }
3614 case Token::kTRUNCDIV: {
3615 if (RangeUtils::CanBeZero(right_range())) {
3616 // Handle divide by zero in runtime.
3617 __ beqz(right, deopt);
3618 }
3619 __ SmiUntag(TMP, left);
3620 __ SmiUntag(TMP2, right);
3621 __ div(TMP, TMP, TMP2);
3622 __ SmiTag(result, TMP);
3623
3624 if (RangeUtils::Overlaps(right_range(), -1, -1)) {
3625 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
3626 // case we cannot tag the result.
3627 __ SmiUntag(TMP2, result);
3628 __ bne(TMP, TMP2, deopt);
3629 }
3630 break;
3631 }
3632 case Token::kMOD: {
3633 if (RangeUtils::CanBeZero(right_range())) {
3634 // Handle divide by zero in runtime.
3635 __ beqz(right, deopt);
3636 }
3637 __ SmiUntag(TMP, left);
3638 __ SmiUntag(TMP2, right);
3639
3640 __ rem(result, TMP, TMP2);
3641
3642 // res = left % right;
3643 // if (res < 0) {
3644 // if (right < 0) {
3645 // res = res - right;
3646 // } else {
3647 // res = res + right;
3648 // }
3649 // }
3650 compiler::Label done, adjust;
3652 // Result is negative, adjust it.
3653 __ bgez(right, &adjust, compiler::Assembler::kNearJump);
3654 __ sub(result, result, TMP2);
3656 __ Bind(&adjust);
3657 __ add(result, result, TMP2);
3658 __ Bind(&done);
3659 __ SmiTag(result);
3660 break;
3661 }
3662 case Token::kSHR: {
3663 if (CanDeoptimize()) {
3664 __ bltz(right, deopt);
3665 }
3666 __ SmiUntag(TMP, right);
3667 // asrv[w] operation masks the count to 6/5 bits.
3668 const intptr_t kCountLimit = XLEN - 1;
3669 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
3670 __ LoadImmediate(TMP2, kCountLimit);
3671 compiler::Label shift_in_bounds;
3672 __ ble(TMP, TMP2, &shift_in_bounds, compiler::Assembler::kNearJump);
3673 __ mv(TMP, TMP2);
3674 __ Bind(&shift_in_bounds);
3675 }
3676 __ SmiUntag(TMP2, left);
3677 __ sra(result, TMP2, TMP);
3678 __ SmiTag(result);
3679 break;
3680 }
3681 case Token::kUSHR: {
3682#if XLEN == 32
3683 compiler::Label done;
3684 __ SmiUntag(TMP, right);
3685 // 64-bit representation of left operand value:
3686 //
3687 // ss...sssss s s xxxxxxxxxxxxx
3688 // | | | | | |
3689 // 63 32 31 30 kSmiBits-1 0
3690 //
3691 // Where 's' is a sign bit.
3692 //
3693 // If left operand is negative (sign bit is set), then
3694 // result will fit into Smi range if and only if
3695 // the shift amount >= 64 - kSmiBits.
3696 //
3697 // If left operand is non-negative, the result always
3698 // fits into Smi range.
3699 //
3701 right_range(), 64 - compiler::target::kSmiBits - 1)) {
3702 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(),
3703 kBitsPerInt64 - 1)) {
3704 ASSERT(result != left);
3705 ASSERT(result != right);
3706 __ li(result, 0);
3707 __ CompareImmediate(TMP, kBitsPerInt64);
3708 // If shift amount >= 64, then result is 0.
3710 }
3711 __ CompareImmediate(TMP, 64 - compiler::target::kSmiBits);
3712 // Shift amount >= 64 - kSmiBits > 32, but < 64.
3713 // Result is guaranteed to fit into Smi range.
3714 // Low (Smi) part of the left operand is shifted out.
3715 // High part is filled with sign bits.
3716 compiler::Label next;
3718 __ subi(TMP, TMP, 32);
3719 __ srai(result, left, 31);
3720 __ srl(result, result, TMP);
3721 __ SmiTag(result);
3723 __ Bind(&next);
3724 }
3725 // Shift amount < 64 - kSmiBits.
3726 // If left is negative, then result will not fit into Smi range.
3727 // Also deopt in case of negative shift amount.
3728 if (deopt != nullptr) {
3729 __ bltz(left, deopt);
3730 __ bltz(right, deopt);
3731 } else {
3732 ASSERT(!can_overflow());
3733 }
3734 // At this point left operand is non-negative, so unsigned shift
3735 // can't overflow.
3736 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(),
3737 compiler::target::kSmiBits - 1)) {
3738 ASSERT(result != left);
3739 ASSERT(result != right);
3740 __ li(result, 0);
3741 __ CompareImmediate(TMP, compiler::target::kSmiBits);
3742 // Left operand >= 0, shift amount >= kSmiBits. Result is 0.
3744 }
3745 // Left operand >= 0, shift amount < kSmiBits < 32.
3746 const Register temp = locs()->temp(0).reg();
3747 __ SmiUntag(temp, left);
3748 __ srl(result, temp, TMP);
3749 __ SmiTag(result);
3750 __ Bind(&done);
3751#elif XLEN == 64
3752 if (CanDeoptimize()) {
3753 __ bltz(right, deopt);
3754 }
3755 __ SmiUntag(TMP, right);
3756 // lsrv operation masks the count to 6 bits.
3757 const intptr_t kCountLimit = XLEN - 1;
3758 COMPILE_ASSERT(kCountLimit + 1 == kBitsPerInt64);
3759 compiler::Label done;
3760 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
3761 __ LoadImmediate(TMP2, kCountLimit);
3762 compiler::Label shift_in_bounds;
3763 __ ble(TMP, TMP2, &shift_in_bounds, compiler::Assembler::kNearJump);
3764 __ mv(result, ZR);
3766 __ Bind(&shift_in_bounds);
3767 }
3768 __ SmiUntag(TMP2, left);
3769 __ srl(TMP, TMP2, TMP);
3770 __ SmiTag(result, TMP);
3771 if (deopt != nullptr) {
3772 __ SmiUntag(TMP2, result);
3773 __ bne(TMP, TMP2, deopt);
3774 }
3775 __ Bind(&done);
3776#else
3777 UNIMPLEMENTED();
3778#endif
3779 break;
3780 }
3781 case Token::kDIV: {
3782 // Dispatches to 'Double./'.
3783 // TODO(srdjan): Implement as conversion to double and double division.
3784 UNREACHABLE();
3785 break;
3786 }
3787 case Token::kOR:
3788 case Token::kAND: {
3789 // Flow graph builder has dissected this operation to guarantee correct
3790 // behavior (short-circuit evaluation).
3791 UNREACHABLE();
3792 break;
3793 }
3794 default:
3795 UNREACHABLE();
3796 break;
3797 }
3798}
3799
3800LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone,
3801 bool opt) const {
3802 intptr_t left_cid = left()->Type()->ToCid();
3803 intptr_t right_cid = right()->Type()->ToCid();
3804 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid));
3805 const intptr_t kNumInputs = 2;
3806 const intptr_t kNumTemps = 0;
3807 LocationSummary* summary = new (zone)
3808 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3809 summary->set_in(0, Location::RequiresRegister());
3810 summary->set_in(1, Location::RequiresRegister());
3811 return summary;
3812}
3813
3814void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3815 compiler::Label* deopt =
3816 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryDoubleOp);
3817 intptr_t left_cid = left()->Type()->ToCid();
3818 intptr_t right_cid = right()->Type()->ToCid();
3819 const Register left = locs()->in(0).reg();
3820 const Register right = locs()->in(1).reg();
3821 if (this->left()->definition() == this->right()->definition()) {
3822 __ BranchIfSmi(left, deopt);
3823 } else if (left_cid == kSmiCid) {
3824 __ BranchIfSmi(right, deopt);
3825 } else if (right_cid == kSmiCid) {
3826 __ BranchIfSmi(left, deopt);
3827 } else {
3828 __ or_(TMP, left, right);
3829 __ BranchIfSmi(TMP, deopt);
3830 }
3831}
3832
3833LocationSummary* BoxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
3834 const intptr_t kNumInputs = 1;
3835 const intptr_t kNumTemps = 0;
3836 LocationSummary* summary = new (zone) LocationSummary(
3837 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
3838 summary->set_in(0, Location::RequiresFpuRegister());
3839 summary->set_out(0, Location::RequiresRegister());
3840 return summary;
3841}
3842
3843void BoxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3844 const Register out_reg = locs()->out(0).reg();
3845 const FRegister value = locs()->in(0).fpu_reg();
3846
3848 compiler->BoxClassFor(from_representation()),
3849 out_reg, TMP);
3850
3851 switch (from_representation()) {
3852 case kUnboxedDouble:
3853 __ StoreDFieldToOffset(value, out_reg, ValueOffset());
3854 break;
3855 case kUnboxedFloat:
3856 __ fcvtds(FpuTMP, value);
3857 __ StoreDFieldToOffset(FpuTMP, out_reg, ValueOffset());
3858 break;
3859 case kUnboxedFloat32x4:
3860 case kUnboxedFloat64x2:
3861 case kUnboxedInt32x4:
3862 UNIMPLEMENTED();
3863 break;
3864 default:
3865 UNREACHABLE();
3866 break;
3867 }
3868}
3869
3870LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
3872 const intptr_t kNumInputs = 1;
3873 const intptr_t kNumTemps = 1;
3874 const bool is_floating_point =
3875 !RepresentationUtils::IsUnboxedInteger(representation());
3876 LocationSummary* summary = new (zone)
3877 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3878 summary->set_in(0, Location::RequiresRegister());
3879 summary->set_temp(0, Location::RequiresRegister());
3880
3881 if (is_floating_point) {
3882 summary->set_out(0, Location::RequiresFpuRegister());
3883#if XLEN == 32
3884 } else if (representation() == kUnboxedInt64) {
3885 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
3887#endif
3888 } else {
3889 summary->set_out(0, Location::RequiresRegister());
3890 }
3891 return summary;
3892}
3893
3894void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler* compiler) {
3895 const Register box = locs()->in(0).reg();
3896
3897 switch (representation()) {
3898 case kUnboxedInt64: {
3899#if XLEN == 32
3900 PairLocation* result = locs()->out(0).AsPairLocation();
3901 ASSERT(result->At(0).reg() != box);
3902 __ LoadFieldFromOffset(result->At(0).reg(), box, ValueOffset());
3903 __ LoadFieldFromOffset(result->At(1).reg(), box,
3904 ValueOffset() + compiler::target::kWordSize);
3905#elif XLEN == 64
3906 const Register result = locs()->out(0).reg();
3907 __ ld(result, compiler::FieldAddress(box, ValueOffset()));
3908#endif
3909 break;
3910 }
3911
3912 case kUnboxedDouble: {
3913 const FRegister result = locs()->out(0).fpu_reg();
3914 __ LoadDFieldFromOffset(result, box, ValueOffset());
3915 break;
3916 }
3917
3918 case kUnboxedFloat: {
3919 const FRegister result = locs()->out(0).fpu_reg();
3920 __ LoadDFieldFromOffset(result, box, ValueOffset());
3921 __ fcvtsd(result, result);
3922 break;
3923 }
3924
3925 case kUnboxedFloat32x4:
3926 case kUnboxedFloat64x2:
3927 case kUnboxedInt32x4: {
3928 UNIMPLEMENTED();
3929 break;
3930 }
3931
3932 default:
3933 UNREACHABLE();
3934 break;
3935 }
3936}
3937
3938void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) {
3939 const Register box = locs()->in(0).reg();
3940
3941 switch (representation()) {
3942#if XLEN == 32
3943 case kUnboxedInt64: {
3944 PairLocation* result = locs()->out(0).AsPairLocation();
3945 __ SmiUntag(result->At(0).reg(), box);
3946 __ srai(result->At(1).reg(), box, XLEN - 1); // SignFill.
3947 break;
3948 }
3949#elif XLEN == 64
3950 case kUnboxedInt32:
3951 case kUnboxedInt64: {
3952 const Register result = locs()->out(0).reg();
3953 __ SmiUntag(result, box);
3954 break;
3955 }
3956#endif
3957
3958 case kUnboxedFloat: {
3959 const FRegister result = locs()->out(0).fpu_reg();
3960 __ SmiUntag(TMP, box);
3961#if XLEN == 32
3962 __ fcvtsw(result, TMP);
3963#elif XLEN == 64
3964 __ fcvtsl(result, TMP);
3965#endif
3966 break;
3967 }
3968 case kUnboxedDouble: {
3969 const FRegister result = locs()->out(0).fpu_reg();
3970 __ SmiUntag(TMP, box);
3971#if XLEN == 32
3972 __ fcvtdw(result, TMP);
3973#elif XLEN == 64
3974 __ fcvtdl(result, TMP);
3975#endif
3976 break;
3977 }
3978
3979 default:
3980 UNREACHABLE();
3981 break;
3982 }
3983}
3984
3985void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) {
3986 const Register value = locs()->in(0).reg();
3987 const Register result = locs()->out(0).reg();
3988 __ LoadInt32FromBoxOrSmi(result, value);
3989}
3990
3991void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler) {
3992#if XLEN == 32
3993 const Register box = locs()->in(0).reg();
3994 PairLocation* result = locs()->out(0).AsPairLocation();
3995 ASSERT(result->At(0).reg() != box);
3996 ASSERT(result->At(1).reg() != box);
3997 compiler::Label done;
3998 __ srai(result->At(1).reg(), box, XLEN - 1); // SignFill
3999 __ SmiUntag(result->At(0).reg(), box);
4000 __ BranchIfSmi(box, &done, compiler::Assembler::kNearJump);
4001 EmitLoadFromBox(compiler);
4002 __ Bind(&done);
4003#else
4004 const Register value = locs()->in(0).reg();
4005 const Register result = locs()->out(0).reg();
4006 __ LoadInt64FromBoxOrSmi(result, value);
4007#endif
4008}
4009
4010LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
4011 bool opt) const {
4012 ASSERT((from_representation() == kUnboxedInt32) ||
4013 (from_representation() == kUnboxedUint32));
4014 const intptr_t kNumInputs = 1;
4015 const intptr_t kNumTemps = 0;
4016#if XLEN > 32
4017 // ValueFitsSmi() may be overly conservative and false because we only
4018 // perform range analysis during optimized compilation.
4019 const bool kMayAllocateMint = false;
4020#else
4021 const bool kMayAllocateMint = !ValueFitsSmi();
4022#endif
4023 LocationSummary* summary = new (zone)
4024 LocationSummary(zone, kNumInputs, kNumTemps,
4025 kMayAllocateMint ? LocationSummary::kCallOnSlowPath
4026 : LocationSummary::kNoCall);
4027 summary->set_in(0, Location::RequiresRegister());
4028 summary->set_out(0, Location::RequiresRegister());
4029 return summary;
4030}
4031
4032void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4033 Register value = locs()->in(0).reg();
4034 Register out = locs()->out(0).reg();
4035 ASSERT(value != out);
4036
4037#if XLEN > 32
4038 ASSERT(compiler::target::kSmiBits >= 32);
4039 __ slli(out, value, XLEN - 32);
4040 if (from_representation() == kUnboxedInt32) {
4041 __ srai(out, out, XLEN - 32 - kSmiTagShift);
4042 } else {
4043 ASSERT(from_representation() == kUnboxedUint32);
4044 __ srli(out, out, XLEN - 32 - kSmiTagShift);
4045 }
4046#elif XLEN == 32
4047 __ slli(out, value, 1);
4048 if (ValueFitsSmi()) {
4049 return;
4050 }
4051 compiler::Label done;
4052 if (from_representation() == kUnboxedInt32) {
4053 __ srai(TMP, out, 1);
4054 __ beq(TMP, value, &done);
4055 } else {
4056 ASSERT(from_representation() == kUnboxedUint32);
4057 __ srli(TMP, value, 30);
4058 __ beqz(TMP, &done);
4059 }
4060
4061 BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
4062 TMP);
4063 __ StoreFieldToOffset(value, out, compiler::target::Mint::value_offset());
4064 if (from_representation() == kUnboxedInt32) {
4065 __ srai(TMP, value, 31);
4066 __ StoreFieldToOffset(
4067 TMP, out,
4068 compiler::target::Mint::value_offset() + compiler::target::kWordSize);
4069 } else {
4070 ASSERT(from_representation() == kUnboxedUint32);
4071 __ StoreFieldToOffset(
4072 ZR, out,
4073 compiler::target::Mint::value_offset() + compiler::target::kWordSize);
4074 }
4075 __ Bind(&done);
4076#endif
4077}
4078
4079LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
4080 bool opt) const {
4081 // Shared slow path is used in BoxInt64Instr::EmitNativeCode in
4082 // FLAG_use_bare_instructions mode and only after VM isolate stubs where
4083 // replaced with isolate-specific stubs.
4084 auto object_store = IsolateGroup::Current()->object_store();
4085 const bool stubs_in_vm_isolate =
4086 object_store->allocate_mint_with_fpu_regs_stub()
4087 ->untag()
4088 ->InVMIsolateHeap() ||
4089 object_store->allocate_mint_without_fpu_regs_stub()
4090 ->untag()
4091 ->InVMIsolateHeap();
4092 const bool shared_slow_path_call =
4093 SlowPathSharingSupported(opt) && !stubs_in_vm_isolate;
4094 const intptr_t kNumInputs = 1;
4095 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
4096 LocationSummary* summary = new (zone) LocationSummary(
4097 zone, kNumInputs, kNumTemps,
4098 ValueFitsSmi()
4100 : ((shared_slow_path_call ? LocationSummary::kCallOnSharedSlowPath
4102#if XLEN == 32
4103 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
4105#else
4106 summary->set_in(0, Location::RequiresRegister());
4107#endif
4108 if (ValueFitsSmi()) {
4109 summary->set_out(0, Location::RequiresRegister());
4110 } else if (shared_slow_path_call) {
4111 summary->set_out(0,
4114 } else {
4115 summary->set_out(0, Location::RequiresRegister());
4116 summary->set_temp(0, Location::RequiresRegister());
4117 }
4118 return summary;
4119}
4120
4121void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4122#if XLEN == 32
4123 if (ValueFitsSmi()) {
4124 PairLocation* value_pair = locs()->in(0).AsPairLocation();
4125 Register value_lo = value_pair->At(0).reg();
4126 Register out_reg = locs()->out(0).reg();
4127 __ SmiTag(out_reg, value_lo);
4128 return;
4129 }
4130
4131 PairLocation* value_pair = locs()->in(0).AsPairLocation();
4132 Register value_lo = value_pair->At(0).reg();
4133 Register value_hi = value_pair->At(1).reg();
4134 Register out_reg = locs()->out(0).reg();
4135
4136 compiler::Label overflow, done;
4137 __ SmiTag(out_reg, value_lo);
4138 __ srai(TMP, out_reg, kSmiTagSize);
4139 __ bne(value_lo, TMP, &overflow, compiler::Assembler::kNearJump);
4140 __ srai(TMP, out_reg, XLEN - 1); // SignFill
4141 __ beq(value_hi, TMP, &done, compiler::Assembler::kNearJump);
4142
4143 __ Bind(&overflow);
4144 if (compiler->intrinsic_mode()) {
4145 __ TryAllocate(compiler->mint_class(),
4146 compiler->intrinsic_slow_path_label(),
4148 } else if (locs()->call_on_shared_slow_path()) {
4149 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
4150 if (!has_frame) {
4151 ASSERT(__ constant_pool_allowed());
4152 __ set_constant_pool_allowed(false);
4153 __ EnterDartFrame(0);
4154 }
4155 auto object_store = compiler->isolate_group()->object_store();
4156 const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
4157 const auto& stub = Code::ZoneHandle(
4158 compiler->zone(),
4159 live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub()
4160 : object_store->allocate_mint_without_fpu_regs_stub());
4161
4162 ASSERT(!locs()->live_registers()->ContainsRegister(
4164 auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
4165 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
4166 locs(), DeoptId::kNone, extended_env);
4167 if (!has_frame) {
4168 __ LeaveDartFrame();
4169 __ set_constant_pool_allowed(true);
4170 }
4171 } else {
4173 out_reg, TMP);
4174 }
4175
4176 __ StoreFieldToOffset(value_lo, out_reg,
4177 compiler::target::Mint::value_offset());
4178 __ StoreFieldToOffset(
4179 value_hi, out_reg,
4180 compiler::target::Mint::value_offset() + compiler::target::kWordSize);
4181 __ Bind(&done);
4182#else
4183 Register in = locs()->in(0).reg();
4184 Register out = locs()->out(0).reg();
4185 if (ValueFitsSmi()) {
4186 __ SmiTag(out, in);
4187 return;
4188 }
4189 ASSERT(kSmiTag == 0);
4190 compiler::Label done;
4191
4192 ASSERT(out != in);
4193 __ SmiTag(out, in);
4194 __ SmiUntag(TMP, out);
4195 __ beq(in, TMP, &done); // No overflow.
4196
4197 if (compiler->intrinsic_mode()) {
4198 __ TryAllocate(compiler->mint_class(),
4199 compiler->intrinsic_slow_path_label(),
4201 } else if (locs()->call_on_shared_slow_path()) {
4202 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
4203 if (!has_frame) {
4204 ASSERT(__ constant_pool_allowed());
4205 __ set_constant_pool_allowed(false);
4206 __ EnterDartFrame(0);
4207 }
4208 auto object_store = compiler->isolate_group()->object_store();
4209 const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
4210 const auto& stub = Code::ZoneHandle(
4211 compiler->zone(),
4212 live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub()
4213 : object_store->allocate_mint_without_fpu_regs_stub());
4214
4215 ASSERT(!locs()->live_registers()->ContainsRegister(
4217 auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
4218 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
4219 locs(), DeoptId::kNone, extended_env);
4220 if (!has_frame) {
4221 __ LeaveDartFrame();
4222 __ set_constant_pool_allowed(true);
4223 }
4224 } else {
4225 BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
4226 TMP);
4227 }
4228
4229 __ StoreToOffset(in, out, Mint::value_offset() - kHeapObjectTag);
4230 __ Bind(&done);
4231#endif
4232}
4233
4234#if XLEN == 32
4235static void LoadInt32FromMint(FlowGraphCompiler* compiler,
4236 Register mint,
4238 compiler::Label* deopt) {
4239 __ LoadFieldFromOffset(result, mint, compiler::target::Mint::value_offset());
4240 if (deopt != nullptr) {
4241 __ LoadFieldFromOffset(
4242 TMP, mint,
4243 compiler::target::Mint::value_offset() + compiler::target::kWordSize);
4244 __ srai(TMP2, result, XLEN - 1);
4245 __ bne(TMP, TMP2, deopt);
4246 }
4247}
4248#endif
4249
4250LocationSummary* UnboxInteger32Instr::MakeLocationSummary(Zone* zone,
4251 bool opt) const {
4252 const intptr_t kNumInputs = 1;
4253 const intptr_t kNumTemps = 0;
4254 LocationSummary* summary = new (zone)
4255 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4256 summary->set_in(0, Location::RequiresRegister());
4257 summary->set_out(0, Location::RequiresRegister());
4258 return summary;
4259}
4260
4261void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4262#if XLEN == 32
4263 const intptr_t value_cid = value()->Type()->ToCid();
4264 const Register value = locs()->in(0).reg();
4265 const Register out = locs()->out(0).reg();
4266 compiler::Label* deopt =
4267 CanDeoptimize()
4268 ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger)
4269 : nullptr;
4270 compiler::Label* out_of_range = !is_truncating() ? deopt : nullptr;
4271 ASSERT(value != out);
4272
4273 if (value_cid == kSmiCid) {
4274 __ SmiUntag(out, value);
4275 } else if (value_cid == kMintCid) {
4276 LoadInt32FromMint(compiler, value, out, out_of_range);
4277 } else if (!CanDeoptimize()) {
4278 compiler::Label done;
4279 __ SmiUntag(out, value);
4281 LoadInt32FromMint(compiler, value, out, nullptr);
4282 __ Bind(&done);
4283 } else {
4284 compiler::Label done;
4285 __ SmiUntag(out, value);
4287 __ CompareClassId(value, kMintCid, TMP);
4288 __ BranchIf(NE, deopt);
4289 LoadInt32FromMint(compiler, value, out, out_of_range);
4290 __ Bind(&done);
4291 }
4292#elif XLEN == 64
4293 const intptr_t value_cid = value()->Type()->ToCid();
4294 const Register out = locs()->out(0).reg();
4295 const Register value = locs()->in(0).reg();
4296 compiler::Label* deopt =
4297 CanDeoptimize()
4298 ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger)
4299 : nullptr;
4300
4301 if (value_cid == kSmiCid) {
4302 __ SmiUntag(out, value);
4303 } else if (value_cid == kMintCid) {
4304 __ LoadFieldFromOffset(out, value, Mint::value_offset());
4305 } else if (!CanDeoptimize()) {
4306 // Type information is not conclusive, but range analysis found
4307 // the value to be in int64 range. Therefore it must be a smi
4308 // or mint value.
4309 ASSERT(is_truncating());
4310 compiler::Label done;
4311 __ SmiUntag(out, value);
4313 __ LoadFieldFromOffset(out, value, Mint::value_offset());
4314 __ Bind(&done);
4315 } else {
4316 compiler::Label done;
4317 __ SmiUntag(out, value);
4319 __ CompareClassId(value, kMintCid, TMP);
4320 __ BranchIf(NE, deopt);
4321 __ LoadFieldFromOffset(out, value, Mint::value_offset());
4322 __ Bind(&done);
4323 }
4324
4325 // TODO(vegorov): as it is implemented right now truncating unboxing would
4326 // leave "garbage" in the higher word.
4327 if (!is_truncating() && (deopt != nullptr)) {
4328 ASSERT(representation() == kUnboxedInt32);
4329 __ sextw(TMP, out);
4330 __ bne(TMP, out, deopt);
4331 }
4332#endif
4333}
4334
4335LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
4336 bool opt) const {
4337 const intptr_t kNumInputs = 2;
4338 const intptr_t kNumTemps = 0;
4339 LocationSummary* summary = new (zone)
4340 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4341 summary->set_in(0, Location::RequiresFpuRegister());
4342 summary->set_in(1, Location::RequiresFpuRegister());
4343 summary->set_out(0, Location::RequiresFpuRegister());
4344 return summary;
4345}
4346
4347void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4348 const FRegister left = locs()->in(0).fpu_reg();
4349 const FRegister right = locs()->in(1).fpu_reg();
4350 const FRegister result = locs()->out(0).fpu_reg();
4351 if (representation() == kUnboxedDouble) {
4352 switch (op_kind()) {
4353 case Token::kADD:
4354 __ faddd(result, left, right);
4355 break;
4356 case Token::kSUB:
4357 __ fsubd(result, left, right);
4358 break;
4359 case Token::kMUL:
4360 __ fmuld(result, left, right);
4361 break;
4362 case Token::kDIV:
4363 __ fdivd(result, left, right);
4364 break;
4365 case Token::kMIN:
4366 __ fmind(result, left, right);
4367 break;
4368 case Token::kMAX:
4369 __ fmaxd(result, left, right);
4370 break;
4371 default:
4372 UNREACHABLE();
4373 }
4374 } else {
4375 ASSERT(representation() == kUnboxedFloat);
4376 switch (op_kind()) {
4377 case Token::kADD:
4378 __ fadds(result, left, right);
4379 break;
4380 case Token::kSUB:
4381 __ fsubs(result, left, right);
4382 break;
4383 case Token::kMUL:
4384 __ fmuls(result, left, right);
4385 break;
4386 case Token::kDIV:
4387 __ fdivs(result, left, right);
4388 break;
4389 case Token::kMIN:
4390 __ fmins(result, left, right);
4391 break;
4392 case Token::kMAX:
4393 __ fmaxs(result, left, right);
4394 break;
4395 default:
4396 UNREACHABLE();
4397 }
4398 }
4399}
4400
4401LocationSummary* DoubleTestOpInstr::MakeLocationSummary(Zone* zone,
4402 bool opt) const {
4403 const intptr_t kNumInputs = 1;
4404 const intptr_t kNumTemps = 0;
4405 LocationSummary* summary = new (zone)
4406 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4407 summary->set_in(0, Location::RequiresFpuRegister());
4408 summary->set_out(0, Location::RequiresRegister());
4409 return summary;
4410}
4411
4412Condition DoubleTestOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
4413 BranchLabels labels) {
4414 ASSERT(compiler->is_optimizing());
4415 const FRegister value = locs()->in(0).fpu_reg();
4416
4417 __ fclassd(TMP, value);
4418 if (op_kind() == MethodRecognizer::kDouble_getIsNaN) {
4419 __ TestImmediate(TMP, kFClassSignallingNan | kFClassQuietNan);
4420 } else if (op_kind() == MethodRecognizer::kDouble_getIsInfinite) {
4421 __ TestImmediate(TMP, kFClassNegInfinity | kFClassPosInfinity);
4422 } else if (op_kind() == MethodRecognizer::kDouble_getIsNegative) {
4423 __ TestImmediate(TMP, kFClassNegInfinity | kFClassNegNormal |
4425 } else {
4426 UNREACHABLE();
4427 }
4428 return kind() == Token::kEQ ? NOT_ZERO : ZERO;
4429}
4430
4431LocationSummary* SimdOpInstr::MakeLocationSummary(Zone* zone, bool opt) const {
4432 UNREACHABLE();
4433 return nullptr;
4434}
4435
4436void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4437 UNREACHABLE();
4438}
4439
4440LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
4441 Zone* zone,
4442 bool opt) const {
4443 const intptr_t kNumTemps = 0;
4444 LocationSummary* summary = new (zone)
4445 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
4446 summary->set_in(0, Location::RegisterLocation(A0));
4447 summary->set_in(1, Location::RegisterLocation(A1));
4448 summary->set_in(2, Location::RegisterLocation(A2));
4449 // Can't specify A3 because it is blocked in register allocation as TMP.
4450 summary->set_in(3, Location::Any());
4451 summary->set_out(0, Location::RegisterLocation(A0));
4452 return summary;
4453}
4454
4455void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4456 if (compiler->intrinsic_mode()) {
4457 // Would also need to preserve CODE_REG and ARGS_DESC_REG.
4458 UNIMPLEMENTED();
4459 }
4460
4461 compiler::LeafRuntimeScope rt(compiler->assembler(),
4462 /*frame_size=*/0,
4463 /*preserve_registers=*/false);
4464 if (locs()->in(3).IsRegister()) {
4465 __ mv(A3, locs()->in(3).reg());
4466 } else if (locs()->in(3).IsStackSlot()) {
4467 __ lx(A3, LocationToStackSlotAddress(locs()->in(3)));
4468 } else {
4469 UNIMPLEMENTED();
4470 }
4471 rt.Call(TargetFunction(), TargetFunction().argument_count());
4472}
4473
4474LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone,
4475 bool opt) const {
4476 if (result_cid() == kDoubleCid) {
4477 const intptr_t kNumInputs = 2;
4478 const intptr_t kNumTemps = 0;
4479 LocationSummary* summary = new (zone)
4480 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4481 summary->set_in(0, Location::RequiresFpuRegister());
4482 summary->set_in(1, Location::RequiresFpuRegister());
4483 // Reuse the left register so that code can be made shorter.
4484 summary->set_out(0, Location::SameAsFirstInput());
4485 return summary;
4486 }
4487 ASSERT(result_cid() == kSmiCid);
4488 const intptr_t kNumInputs = 2;
4489 const intptr_t kNumTemps = 0;
4490 LocationSummary* summary = new (zone)
4491 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4492 summary->set_in(0, Location::RequiresRegister());
4493 summary->set_in(1, Location::RequiresRegister());
4494 // Reuse the left register so that code can be made shorter.
4495 summary->set_out(0, Location::SameAsFirstInput());
4496 return summary;
4497}
4498
4499void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4500 ASSERT((op_kind() == MethodRecognizer::kMathMin) ||
4501 (op_kind() == MethodRecognizer::kMathMax));
4502 const bool is_min = (op_kind() == MethodRecognizer::kMathMin);
4503 if (result_cid() == kDoubleCid) {
4504 compiler::Label done, returns_nan, are_equal;
4505 const FRegister left = locs()->in(0).fpu_reg();
4506 const FRegister right = locs()->in(1).fpu_reg();
4507 const FRegister result = locs()->out(0).fpu_reg();
4508 if (is_min) {
4509 __ fmind(result, left, right);
4510 } else {
4511 __ fmaxd(result, left, right);
4512 }
4513 return;
4514 }
4515
4516 ASSERT(result_cid() == kSmiCid);
4517 const Register left = locs()->in(0).reg();
4518 const Register right = locs()->in(1).reg();
4519 const Register result = locs()->out(0).reg();
4520 compiler::Label choose_right, done;
4521 if (is_min) {
4522 __ bgt(left, right, &choose_right, compiler::Assembler::kNearJump);
4523 } else {
4524 __ blt(left, right, &choose_right, compiler::Assembler::kNearJump);
4525 }
4526 __ mv(result, left);
4528 __ Bind(&choose_right);
4529 __ mv(result, right);
4530 __ Bind(&done);
4531}
4532
4533LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Zone* zone,
4534 bool opt) const {
4535 const intptr_t kNumInputs = 1;
4536 const intptr_t kNumTemps = 0;
4537 LocationSummary* summary = new (zone)
4538 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4539 summary->set_in(0, Location::RequiresRegister());
4540 // We make use of 3-operand instructions by not requiring result register
4541 // to be identical to first input register as on Intel.
4542 summary->set_out(0, Location::RequiresRegister());
4543 return summary;
4544}
4545
4546void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4547 const Register value = locs()->in(0).reg();
4548 const Register result = locs()->out(0).reg();
4549 switch (op_kind()) {
4550 case Token::kNEGATE: {
4551 compiler::Label* deopt =
4552 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
4553 __ neg(result, value);
4554 ASSERT(result != value);
4555 __ beq(result, value, deopt); // Overflow.
4556 break;
4557 }
4558 case Token::kBIT_NOT:
4559 __ not_(result, value);
4560 __ andi(result, result, ~kSmiTagMask); // Remove inverted smi-tag.
4561 break;
4562 default:
4563 UNREACHABLE();
4564 }
4565}
4566
4567LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
4568 bool opt) const {
4569 const intptr_t kNumInputs = 1;
4570 const intptr_t kNumTemps = 0;
4571 LocationSummary* summary = new (zone)
4572 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4573 summary->set_in(0, Location::RequiresFpuRegister());
4574 summary->set_out(0, Location::RequiresFpuRegister());
4575 return summary;
4576}
4577
4578void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4579 const FRegister result = locs()->out(0).fpu_reg();
4580 const FRegister value = locs()->in(0).fpu_reg();
4581 if (representation() == kUnboxedDouble) {
4582 switch (op_kind()) {
4583 case Token::kABS:
4584 __ fabsd(result, value);
4585 break;
4586 case Token::kNEGATE:
4587 __ fnegd(result, value);
4588 break;
4589 case Token::kSQRT:
4590 __ fsqrtd(result, value);
4591 break;
4592 case Token::kSQUARE:
4593 __ fmuld(result, value, value);
4594 break;
4595 default:
4596 UNREACHABLE();
4597 }
4598 } else {
4599 ASSERT(representation() == kUnboxedFloat);
4600 switch (op_kind()) {
4601 case Token::kABS:
4602 __ fabss(result, value);
4603 break;
4604 case Token::kNEGATE:
4605 __ fnegs(result, value);
4606 break;
4607 case Token::kRECIPROCAL:
4608 __ li(TMP, 1);
4609 __ fcvtsw(FTMP, TMP);
4610 __ fdivs(result, FTMP, value);
4611 break;
4612 case Token::kRECIPROCAL_SQRT:
4613 __ li(TMP, 1);
4614 __ fcvtsw(FTMP, TMP);
4615 __ fdivs(result, FTMP, value);
4616 __ fsqrts(result, result);
4617 break;
4618 case Token::kSQRT:
4619 __ fsqrts(result, value);
4620 break;
4621 case Token::kSQUARE:
4622 __ fmuls(result, value, value);
4623 break;
4624 default:
4625 UNREACHABLE();
4626 }
4627 }
4628}
4629
4630LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone,
4631 bool opt) const {
4632 const intptr_t kNumInputs = 1;
4633 const intptr_t kNumTemps = 0;
4634 LocationSummary* result = new (zone)
4635 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4636 result->set_in(0, Location::RequiresRegister());
4638 return result;
4639}
4640
4641void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4642 const Register value = locs()->in(0).reg();
4643 const FRegister result = locs()->out(0).fpu_reg();
4644 __ fcvtdw(result, value);
4645}
4646
4647LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone,
4648 bool opt) const {
4649 const intptr_t kNumInputs = 1;
4650 const intptr_t kNumTemps = 0;
4651 LocationSummary* result = new (zone)
4652 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4653 result->set_in(0, Location::RequiresRegister());
4655 return result;
4656}
4657
4658void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4659 const Register value = locs()->in(0).reg();
4660 const FRegister result = locs()->out(0).fpu_reg();
4661 __ SmiUntag(TMP, value);
4662#if XLEN == 32
4663 __ fcvtdw(result, TMP);
4664#else
4665 __ fcvtdl(result, TMP);
4666#endif
4667}
4668
4669LocationSummary* Int64ToDoubleInstr::MakeLocationSummary(Zone* zone,
4670 bool opt) const {
4671#if XLEN == 32
4672 UNIMPLEMENTED();
4673 return nullptr;
4674#else
4675 const intptr_t kNumInputs = 1;
4676 const intptr_t kNumTemps = 0;
4677 LocationSummary* result = new (zone)
4678 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4679 result->set_in(0, Location::RequiresRegister());
4681 return result;
4682#endif
4683}
4684
4685void Int64ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4686#if XLEN == 32
4687 UNIMPLEMENTED();
4688#else
4689 const Register value = locs()->in(0).reg();
4690 const FRegister result = locs()->out(0).fpu_reg();
4691 __ fcvtdl(result, value);
4692#endif
4693}
4694
4695LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone,
4696 bool opt) const {
4697 const intptr_t kNumInputs = 1;
4698 const intptr_t kNumTemps = 0;
4699 LocationSummary* result = new (zone) LocationSummary(
4700 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
4702 result->set_out(0, Location::RequiresRegister());
4703 return result;
4704}
4705
4706void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4707 const Register result = locs()->out(0).reg();
4708 const FRegister value_double = locs()->in(0).fpu_reg();
4709
4710 DoubleToIntegerSlowPath* slow_path =
4711 new DoubleToIntegerSlowPath(this, value_double);
4712 compiler->AddSlowPathCode(slow_path);
4713
4714 RoundingMode rounding;
4715 switch (recognized_kind()) {
4716 case MethodRecognizer::kDoubleToInteger:
4717 rounding = RTZ;
4718 break;
4719 case MethodRecognizer::kDoubleFloorToInt:
4720 rounding = RDN;
4721 break;
4722 case MethodRecognizer::kDoubleCeilToInt:
4723 rounding = RUP;
4724 break;
4725 default:
4726 UNREACHABLE();
4727 }
4728
4729#if XLEN == 32
4730 __ fcvtwd(TMP, value_double, rounding);
4731#else
4732 __ fcvtld(TMP, value_double, rounding);
4733#endif
4734 // Underflow -> minint -> Smi tagging fails
4735 // Overflow, NaN -> maxint -> Smi tagging fails
4736
4737 // Check for overflow and that it fits into Smi.
4738 __ SmiTag(result, TMP);
4739 __ SmiUntag(TMP2, result);
4740 __ bne(TMP, TMP2, slow_path->entry_label());
4741 __ Bind(slow_path->exit_label());
4742}
4743
4744LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,
4745 bool opt) const {
4746 const intptr_t kNumInputs = 1;
4747 const intptr_t kNumTemps = 0;
4748 LocationSummary* result = new (zone)
4749 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4751 result->set_out(0, Location::RequiresRegister());
4752 return result;
4753}
4754
4755void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4756 compiler::Label* deopt =
4757 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptDoubleToSmi);
4758 const Register result = locs()->out(0).reg();
4759 const FRegister value = locs()->in(0).fpu_reg();
4760
4761#if XLEN == 32
4762 __ fcvtwd(TMP, value, RTZ); // Round To Zero (truncation).
4763#else
4764 __ fcvtld(TMP, value, RTZ); // Round To Zero (truncation).
4765#endif
4766 // Underflow -> minint -> Smi tagging fails
4767 // Overflow, NaN -> maxint -> Smi tagging fails
4768
4769 // Check for overflow and that it fits into Smi.
4770 __ SmiTag(result, TMP);
4771 __ SmiUntag(TMP2, result);
4772 __ bne(TMP, TMP2, deopt);
4773}
4774
4775LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone,
4776 bool opt) const {
4777 const intptr_t kNumInputs = 1;
4778 const intptr_t kNumTemps = 0;
4779 LocationSummary* result = new (zone)
4780 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4783 return result;
4784}
4785
4786void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4787 const FRegister value = locs()->in(0).fpu_reg();
4788 const FRegister result = locs()->out(0).fpu_reg();
4789 __ fcvtsd(result, value);
4790}
4791
4792LocationSummary* FloatToDoubleInstr::MakeLocationSummary(Zone* zone,
4793 bool opt) const {
4794 const intptr_t kNumInputs = 1;
4795 const intptr_t kNumTemps = 0;
4796 LocationSummary* result = new (zone)
4797 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4800 return result;
4801}
4802
4803void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4804 const FRegister value = locs()->in(0).fpu_reg();
4805 const FRegister result = locs()->out(0).fpu_reg();
4806 __ fcvtds(result, value);
4807}
4808
4809LocationSummary* FloatCompareInstr::MakeLocationSummary(Zone* zone,
4810 bool opt) const {
4811 const intptr_t kNumInputs = 2;
4812 const intptr_t kNumTemps = 0;
4813 LocationSummary* result = new (zone)
4814 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4817 result->set_out(0, Location::RequiresRegister());
4818 return result;
4819}
4820
4821void FloatCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4822 const FRegister lhs = locs()->in(0).fpu_reg();
4823 const FRegister rhs = locs()->in(1).fpu_reg();
4824 const Register result = locs()->out(0).reg();
4825
4826 switch (op_kind()) {
4827 case Token::kEQ:
4828 __ feqs(result, lhs, rhs); // lhs op rhs ? 1 : 0
4829 break;
4830 case Token::kLT:
4831 __ flts(result, lhs, rhs);
4832 break;
4833 case Token::kLTE:
4834 __ fles(result, lhs, rhs);
4835 break;
4836 case Token::kGT:
4837 __ fgts(result, lhs, rhs);
4838 break;
4839 case Token::kGTE:
4840 __ fges(result, lhs, rhs);
4841 break;
4842 default:
4843 UNREACHABLE();
4844 }
4845 __ neg(result, result); // lhs op rhs ? -1 : 0
4846}
4847
4848LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone,
4849 bool opt) const {
4850 ASSERT((InputCount() == 1) || (InputCount() == 2));
4851 const intptr_t kNumTemps = 0;
4852 LocationSummary* result = new (zone)
4853 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
4855 if (InputCount() == 2) {
4857 }
4859 return result;
4860}
4861
4862void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4863 if (compiler->intrinsic_mode()) {
4864 // Would also need to preserve CODE_REG and ARGS_DESC_REG.
4865 UNIMPLEMENTED();
4866 }
4867
4868 compiler::LeafRuntimeScope rt(compiler->assembler(),
4869 /*frame_size=*/0,
4870 /*preserve_registers=*/false);
4871 ASSERT(locs()->in(0).fpu_reg() == FA0);
4872 if (InputCount() == 2) {
4873 ASSERT(locs()->in(1).fpu_reg() == FA1);
4874 }
4875 rt.Call(TargetFunction(), InputCount());
4876 ASSERT(locs()->out(0).fpu_reg() == FA0);
4877
4878 // TODO(riscv): Special case pow?
4879}
4880
4881LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone,
4882 bool opt) const {
4883 // Only use this instruction in optimized code.
4884 ASSERT(opt);
4885 const intptr_t kNumInputs = 1;
4886 LocationSummary* summary =
4887 new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
4888 if (representation() == kUnboxedDouble) {
4889 if (index() == 0) {
4890 summary->set_in(
4892 } else {
4893 ASSERT(index() == 1);
4894 summary->set_in(
4896 }
4897 summary->set_out(0, Location::RequiresFpuRegister());
4898 } else {
4899 ASSERT(representation() == kTagged);
4900 if (index() == 0) {
4901 summary->set_in(
4903 } else {
4904 ASSERT(index() == 1);
4905 summary->set_in(
4907 }
4908 summary->set_out(0, Location::RequiresRegister());
4909 }
4910 return summary;
4911}
4912
4913void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4914 ASSERT(locs()->in(0).IsPairLocation());
4915 PairLocation* pair = locs()->in(0).AsPairLocation();
4916 Location in_loc = pair->At(index());
4917 if (representation() == kUnboxedDouble) {
4918 const FRegister out = locs()->out(0).fpu_reg();
4919 const FRegister in = in_loc.fpu_reg();
4920 __ fmvd(out, in);
4921 } else {
4922 ASSERT(representation() == kTagged);
4923 const Register out = locs()->out(0).reg();
4924 const Register in = in_loc.reg();
4925 __ mv(out, in);
4926 }
4927}
4928
4929LocationSummary* UnboxLaneInstr::MakeLocationSummary(Zone* zone,
4930 bool opt) const {
4931 const intptr_t kNumInputs = 1;
4932 LocationSummary* summary =
4933 new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
4934 summary->set_in(0, Location::RequiresRegister());
4935 switch (representation()) {
4936 case kUnboxedDouble:
4937 case kUnboxedFloat:
4938 summary->set_out(0, Location::RequiresFpuRegister());
4939 break;
4940 case kUnboxedInt32:
4941 summary->set_out(0, Location::RequiresRegister());
4942 break;
4943 default:
4944 UNREACHABLE();
4945 }
4946 return summary;
4947}
4948
4949void UnboxLaneInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4950 Register in = locs()->in(0).reg();
4951 switch (representation()) {
4952 case kUnboxedDouble:
4953 __ fld(locs()->out(0).fpu_reg(),
4954 compiler::FieldAddress(
4955 in, compiler::target::Float64x2::value_offset() +
4956 lane() * sizeof(double)));
4957 break;
4958 case kUnboxedFloat:
4959 __ flw(locs()->out(0).fpu_reg(),
4960 compiler::FieldAddress(
4961 in, compiler::target::Float32x4::value_offset() +
4962 lane() * sizeof(float)));
4963 break;
4964 case kUnboxedInt32:
4965 __ lw(
4966 locs()->out(0).reg(),
4967 compiler::FieldAddress(in, compiler::target::Int32x4::value_offset() +
4968 lane() * sizeof(int32_t)));
4969 break;
4970 default:
4971 UNREACHABLE();
4972 }
4973}
4974
4975LocationSummary* BoxLanesInstr::MakeLocationSummary(Zone* zone,
4976 bool opt) const {
4977 const intptr_t kNumInputs = InputCount();
4978 LocationSummary* summary = new (zone)
4979 LocationSummary(zone, kNumInputs, 0, LocationSummary::kCallOnSlowPath);
4980 switch (from_representation()) {
4981 case kUnboxedDouble:
4982 summary->set_in(0, Location::RequiresFpuRegister());
4983 summary->set_in(1, Location::RequiresFpuRegister());
4984 break;
4985 case kUnboxedFloat:
4986 summary->set_in(0, Location::RequiresFpuRegister());
4987 summary->set_in(1, Location::RequiresFpuRegister());
4988 summary->set_in(2, Location::RequiresFpuRegister());
4989 summary->set_in(3, Location::RequiresFpuRegister());
4990 break;
4991 case kUnboxedInt32:
4992 summary->set_in(0, Location::RequiresRegister());
4993 summary->set_in(1, Location::RequiresRegister());
4994 summary->set_in(2, Location::RequiresRegister());
4995 summary->set_in(3, Location::RequiresRegister());
4996 break;
4997 default:
4998 UNREACHABLE();
4999 }
5000 summary->set_out(0, Location::RequiresRegister());
5001 return summary;
5002}
5003
5004void BoxLanesInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5005 Register result = locs()->out(0).reg();
5006 switch (from_representation()) {
5007 case kUnboxedDouble:
5009 compiler->float64x2_class(), result, TMP);
5010 for (intptr_t i = 0; i < 2; i++) {
5011 __ fsd(locs()->in(i).fpu_reg(),
5012 compiler::FieldAddress(
5013 result, compiler::target::Float64x2::value_offset() +
5014 i * sizeof(double)));
5015 }
5016 break;
5017 case kUnboxedFloat:
5019 compiler->float32x4_class(), result, TMP);
5020 for (intptr_t i = 0; i < 4; i++) {
5021 __ fsw(locs()->in(i).fpu_reg(),
5022 compiler::FieldAddress(
5023 result, compiler::target::Float32x4::value_offset() +
5024 i * sizeof(float)));
5025 }
5026 break;
5027 case kUnboxedInt32:
5028 BoxAllocationSlowPath::Allocate(compiler, this, compiler->int32x4_class(),
5029 result, TMP);
5030 for (intptr_t i = 0; i < 4; i++) {
5031 __ sw(locs()->in(i).reg(),
5032 compiler::FieldAddress(result,
5033 compiler::target::Int32x4::value_offset() +
5034 i * sizeof(int32_t)));
5035 }
5036 break;
5037 default:
5038 UNREACHABLE();
5039 }
5040}
5041
5042LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
5043 bool opt) const {
5044 const intptr_t kNumInputs = 2;
5045 const intptr_t kNumTemps = 0;
5046 LocationSummary* summary = new (zone)
5047 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5048 summary->set_in(0, Location::RequiresRegister());
5049 summary->set_in(1, Location::RequiresRegister());
5050 // Output is a pair of registers.
5051 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
5053 return summary;
5054}
5055
5056void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5057 ASSERT(CanDeoptimize());
5058 compiler::Label* deopt =
5059 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
5060 const Register left = locs()->in(0).reg();
5061 const Register right = locs()->in(1).reg();
5062 ASSERT(locs()->out(0).IsPairLocation());
5063 const PairLocation* pair = locs()->out(0).AsPairLocation();
5064 const Register result_div = pair->At(0).reg();
5065 const Register result_mod = pair->At(1).reg();
5066 if (RangeUtils::CanBeZero(divisor_range())) {
5067 // Handle divide by zero in runtime.
5068 __ beqz(right, deopt);
5069 }
5070
5071 __ SmiUntag(TMP, left);
5072 __ SmiUntag(TMP2, right);
5073
5074 // Macro-op fusion: DIV immediately before REM.
5075 __ div(result_div, TMP, TMP2);
5076 __ rem(result_mod, TMP, TMP2);
5077
5078 // Correct MOD result:
5079 // res = left % right;
5080 // if (res < 0) {
5081 // if (right < 0) {
5082 // res = res - right;
5083 // } else {
5084 // res = res + right;
5085 // }
5086 // }
5087 compiler::Label done, adjust;
5088 __ bgez(result_mod, &done, compiler::Assembler::kNearJump);
5089 // Result is negative, adjust it.
5090 if (RangeUtils::IsNegative(divisor_range())) {
5091 __ sub(result_mod, result_mod, TMP2);
5092 } else if (RangeUtils::IsPositive(divisor_range())) {
5093 __ add(result_mod, result_mod, TMP2);
5094 } else {
5095 __ bgez(right, &adjust, compiler::Assembler::kNearJump);
5096 __ sub(result_mod, result_mod, TMP2);
5098 __ Bind(&adjust);
5099 __ add(result_mod, result_mod, TMP2);
5100 }
5101 __ Bind(&done);
5102
5103 if (RangeUtils::Overlaps(divisor_range(), -1, -1)) {
5104 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
5105 // case we cannot tag the result.
5106 __ mv(TMP, result_div);
5107 __ SmiTag(result_div);
5108 __ SmiTag(result_mod);
5109 __ SmiUntag(TMP2, result_div);
5110 __ bne(TMP, TMP2, deopt);
5111 } else {
5112 __ SmiTag(result_div);
5113 __ SmiTag(result_mod);
5114 }
5115}
5116
5117// Should be kept in sync with integers.cc Multiply64Hash
5118#if XLEN == 32
5119static void EmitHashIntegerCodeSequence(FlowGraphCompiler* compiler,
5120 const Register value_lo,
5121 const Register value_hi,
5122 const Register result) {
5123 ASSERT(value_lo != TMP);
5124 ASSERT(value_lo != TMP2);
5125 ASSERT(value_hi != TMP);
5126 ASSERT(value_hi != TMP2);
5127 ASSERT(result != TMP);
5128 ASSERT(result != TMP2);
5129
5130 __ LoadImmediate(TMP, 0x2d51);
5131 // (value_hi:value_lo) * (0:TMP) =
5132 // value_lo * TMP + (value_hi * TMP) * 2^32 =
5133 // lo32(value_lo * TMP) +
5134 // (hi32(value_lo * TMP) + lo32(value_hi * TMP) * 2^32 +
5135 // hi32(value_hi * TMP) * 2^64
5136 __ mulhu(TMP2, value_lo, TMP);
5137 __ mul(result, value_lo, TMP); // (TMP2:result) = lo32 * 0x2d51
5138 __ mulhu(value_lo, value_hi, TMP);
5139 __ mul(TMP, value_hi, TMP); // (value_lo:TMP) = hi32 * 0x2d51
5140 __ add(TMP, TMP, TMP2);
5141 // (0:value_lo:TMP:result) is 128-bit product
5142 __ xor_(result, value_lo, result);
5143 __ xor_(result, TMP, result);
5144 __ AndImmediate(result, result, 0x3fffffff);
5145}
5146
5147#else
5148static void EmitHashIntegerCodeSequence(FlowGraphCompiler* compiler,
5149 const Register value,
5150 const Register result) {
5151 ASSERT(value != TMP);
5152 ASSERT(result != TMP);
5153 __ LoadImmediate(TMP, 0x2d51);
5154 __ mul(result, TMP, value);
5155 __ mulhu(TMP, TMP, value);
5156 __ xor_(result, result, TMP);
5157 __ srai(TMP, result, 32);
5158 __ xor_(result, result, TMP);
5159 __ AndImmediate(result, result, 0x3fffffff);
5160}
5161
5162#endif
5163
5164LocationSummary* HashDoubleOpInstr::MakeLocationSummary(Zone* zone,
5165 bool opt) const {
5166 const intptr_t kNumInputs = 1;
5167 const intptr_t kNumTemps = 3;
5168 LocationSummary* summary = new (zone) LocationSummary(
5169 zone, kNumInputs, kNumTemps, LocationSummary::kNativeLeafCall);
5170
5171 summary->set_in(0, Location::RequiresFpuRegister());
5172 summary->set_temp(0, Location::RequiresRegister());
5173 summary->set_temp(1, Location::RequiresRegister());
5174 summary->set_temp(2, Location::RequiresFpuRegister());
5175#if XLEN == 32
5176 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
5178#else
5179 summary->set_out(0, Location::RequiresRegister());
5180#endif
5181 return summary;
5182}
5183
5184void HashDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5185 const FpuRegister value = locs()->in(0).fpu_reg();
5186#if XLEN == 32
5187 const PairLocation* out_pair = locs()->out(0).AsPairLocation();
5188 const Register result = out_pair->At(0).reg();
5189 const Register result_hi = out_pair->At(1).reg();
5190#else
5191 const Register result = locs()->out(0).reg();
5192#endif
5193 const Register temp = locs()->temp(0).reg();
5194 const Register temp1 = locs()->temp(1).reg();
5195 const FpuRegister temp_double = locs()->temp(2).fpu_reg();
5196
5197 compiler::Label hash_double, hash_double_value, hash_integer;
5198 compiler::Label slow_path, done;
5199 __ fclassd(temp, value);
5200 __ TestImmediate(temp, kFClassSignallingNan | kFClassQuietNan |
5202 __ BranchIf(NOT_ZERO, &hash_double_value);
5203#if XLEN == 32
5204 __ fcvtwd(temp1, value, RTZ);
5205 __ fcvtdw(temp_double, temp1);
5206#else
5207 __ fcvtld(temp1, value, RTZ);
5208 __ fcvtdl(temp_double, temp1);
5209#endif
5210 __ feqd(temp, value, temp_double);
5211 __ CompareImmediate(temp, 1);
5212 __ BranchIf(NE, &hash_double_value);
5213#if XLEN == 32
5214 // integer hash of (0:temp1)
5215 __ srai(temp, temp1, XLEN - 1); // SignFill
5216 __ Bind(&hash_integer);
5217 // integer hash of (temp, temp1)
5218 EmitHashIntegerCodeSequence(compiler, temp1, temp, result);
5219#else
5220 // integer hash of temp1
5221 __ Bind(&hash_integer);
5222 EmitHashIntegerCodeSequence(compiler, temp1, result);
5223#endif
5224 __ j(&done);
5225
5226 __ Bind(&slow_path);
5227 // double value is potentially doesn't fit into Smi range, so
5228 // do the double->int64->double via runtime call.
5229 __ StoreDToOffset(value, THR,
5230 compiler::target::Thread::unboxed_runtime_arg_offset());
5231 {
5232 compiler::LeafRuntimeScope rt(compiler->assembler(), /*frame_size=*/0,
5233 /*preserve_registers=*/true);
5234 __ mv(A0, THR);
5235 // Check if double can be represented as int64, load it into (temp:EAX) if
5236 // it can.
5237 rt.Call(kTryDoubleAsIntegerRuntimeEntry, 1);
5238 __ mv(TMP, A0);
5239 }
5240#if XLEN == 32
5241 __ LoadFromOffset(temp1, THR,
5242 compiler::target::Thread::unboxed_runtime_arg_offset());
5243 __ LoadFromOffset(temp, THR,
5244 compiler::target::Thread::unboxed_runtime_arg_offset() +
5245 compiler::target::kWordSize);
5246#else
5247 __ fmvxd(temp1, value);
5248 __ srli(temp, temp1, 32);
5249#endif
5250 __ CompareImmediate(TMP, 0);
5251 __ BranchIf(NE, &hash_integer);
5252 __ j(&hash_double);
5253
5254#if XLEN == 32
5255 __ Bind(&hash_double_value);
5256 __ StoreDToOffset(value, THR,
5257 compiler::target::Thread::unboxed_runtime_arg_offset());
5258 __ LoadFromOffset(temp1, THR,
5259 compiler::target::Thread::unboxed_runtime_arg_offset());
5260 __ LoadFromOffset(temp, THR,
5261 compiler::target::Thread::unboxed_runtime_arg_offset() +
5262 compiler::target::kWordSize);
5263#else
5264 __ Bind(&hash_double_value);
5265 __ fmvxd(temp1, value);
5266 __ srli(temp, temp1, 32);
5267#endif
5268
5269 // double hi/lo words are in (temp:temp1)
5270 __ Bind(&hash_double);
5271 __ xor_(result, temp1, temp);
5272 __ AndImmediate(result, result, compiler::target::kSmiMax);
5273
5274 __ Bind(&done);
5275#if XLEN == 32
5276 __ xor_(result_hi, result_hi, result_hi);
5277#endif
5278}
5279
5280LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
5281 bool opt) const {
5282 const intptr_t kNumInputs = 1;
5283#if XLEN == 32
5284 const intptr_t kNumTemps = 1;
5285 LocationSummary* summary = new (zone)
5286 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5287 summary->set_temp(0, Location::RequiresRegister());
5288#else
5289 const intptr_t kNumTemps = 0;
5290 LocationSummary* summary = new (zone)
5291 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5292#endif
5293 summary->set_in(0, Location::WritableRegister());
5294 summary->set_out(0, Location::RequiresRegister());
5295 return summary;
5296}
5297
5298void HashIntegerOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5299 Register result = locs()->out(0).reg();
5300 Register value = locs()->in(0).reg();
5301
5302#if XLEN == 32
5303 Register value_hi = locs()->temp(0).reg();
5304
5305 if (smi_) {
5306 __ SmiUntag(value);
5307 __ srai(value_hi, value, XLEN - 1); // SignFill
5308 } else {
5309 __ LoadFieldFromOffset(value_hi, value,
5310 Mint::value_offset() + compiler::target::kWordSize);
5311 __ LoadFieldFromOffset(value, value, Mint::value_offset());
5312 }
5313 EmitHashIntegerCodeSequence(compiler, value, value_hi, result);
5314#else
5315 if (smi_) {
5316 __ SmiUntag(value);
5317 } else {
5318 __ LoadFieldFromOffset(value, value, Mint::value_offset());
5319 }
5320 EmitHashIntegerCodeSequence(compiler, value, result);
5321#endif
5322 __ SmiTag(result);
5323}
5324
5325LocationSummary* BranchInstr::MakeLocationSummary(Zone* zone, bool opt) const {
5326 comparison()->InitializeLocationSummary(zone, opt);
5327 // Branches don't produce a result.
5328 comparison()->locs()->set_out(0, Location::NoLocation());
5329 return comparison()->locs();
5330}
5331
5332void BranchInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5333 comparison()->EmitBranchCode(compiler, this);
5334}
5335
5336LocationSummary* CheckClassInstr::MakeLocationSummary(Zone* zone,
5337 bool opt) const {
5338 const intptr_t kNumInputs = 1;
5339 const bool need_mask_temp = IsBitTest();
5340 const intptr_t kNumTemps = !IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0;
5341 LocationSummary* summary = new (zone)
5342 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5343 summary->set_in(0, Location::RequiresRegister());
5344 if (!IsNullCheck()) {
5345 summary->set_temp(0, Location::RequiresRegister());
5346 if (need_mask_temp) {
5347 summary->set_temp(1, Location::RequiresRegister());
5348 }
5349 }
5350 return summary;
5351}
5352
5353void CheckClassInstr::EmitNullCheck(FlowGraphCompiler* compiler,
5354 compiler::Label* deopt) {
5355 if (IsDeoptIfNull()) {
5356 __ beq(locs()->in(0).reg(), NULL_REG, deopt);
5357 } else if (IsDeoptIfNotNull()) {
5358 __ bne(locs()->in(0).reg(), NULL_REG, deopt);
5359 } else {
5360 UNREACHABLE();
5361 }
5362}
5363
5364void CheckClassInstr::EmitBitTest(FlowGraphCompiler* compiler,
5365 intptr_t min,
5366 intptr_t max,
5367 intptr_t mask,
5368 compiler::Label* deopt) {
5369 Register biased_cid = locs()->temp(0).reg();
5370 __ AddImmediate(biased_cid, -min);
5371 __ CompareImmediate(biased_cid, max - min);
5372 __ BranchIf(HI, deopt);
5373
5374 Register bit_reg = locs()->temp(1).reg();
5375 __ LoadImmediate(bit_reg, 1);
5376 __ sll(bit_reg, bit_reg, biased_cid);
5377 __ TestImmediate(bit_reg, mask);
5378 __ BranchIf(EQ, deopt);
5379}
5380
5381int CheckClassInstr::EmitCheckCid(FlowGraphCompiler* compiler,
5382 int bias,
5383 intptr_t cid_start,
5384 intptr_t cid_end,
5385 bool is_last,
5386 compiler::Label* is_ok,
5387 compiler::Label* deopt,
5388 bool use_near_jump) {
5389 Register biased_cid = locs()->temp(0).reg();
5390 Condition no_match, match;
5391 if (cid_start == cid_end) {
5392 __ CompareImmediate(biased_cid, cid_start - bias);
5393 no_match = NE;
5394 match = EQ;
5395 } else {
5396 // For class ID ranges use a subtract followed by an unsigned
5397 // comparison to check both ends of the ranges with one comparison.
5398 __ AddImmediate(biased_cid, bias - cid_start);
5399 bias = cid_start;
5400 __ CompareImmediate(biased_cid, cid_end - cid_start);
5401 no_match = HI; // Unsigned higher.
5402 match = LS; // Unsigned lower or same.
5403 }
5404 if (is_last) {
5405 __ BranchIf(no_match, deopt);
5406 } else {
5407 __ BranchIf(match, is_ok);
5408 }
5409 return bias;
5410}
5411
5412LocationSummary* CheckClassIdInstr::MakeLocationSummary(Zone* zone,
5413 bool opt) const {
5414 const intptr_t kNumInputs = 1;
5415 const intptr_t kNumTemps = 0;
5416 LocationSummary* summary = new (zone)
5417 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5418 summary->set_in(0, cids_.IsSingleCid() ? Location::RequiresRegister()
5419 : Location::WritableRegister());
5420 return summary;
5421}
5422
5423void CheckClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5424 Register value = locs()->in(0).reg();
5425 compiler::Label* deopt =
5426 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass);
5427 if (cids_.IsSingleCid()) {
5428 __ CompareImmediate(value, Smi::RawValue(cids_.cid_start));
5429 __ BranchIf(NE, deopt);
5430 } else {
5431 __ AddImmediate(value, -Smi::RawValue(cids_.cid_start));
5432 __ CompareImmediate(value, Smi::RawValue(cids_.cid_end - cids_.cid_start));
5433 __ BranchIf(HI, deopt); // Unsigned higher.
5434 }
5435}
5436
5437LocationSummary* CheckSmiInstr::MakeLocationSummary(Zone* zone,
5438 bool opt) const {
5439 const intptr_t kNumInputs = 1;
5440 const intptr_t kNumTemps = 0;
5441 LocationSummary* summary = new (zone)
5442 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5443 summary->set_in(0, Location::RequiresRegister());
5444 return summary;
5445}
5446
5447void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5448 const Register value = locs()->in(0).reg();
5449 compiler::Label* deopt =
5450 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi);
5451 __ BranchIfNotSmi(value, deopt);
5452}
5453
5454void CheckNullInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5455 ThrowErrorSlowPathCode* slow_path = new NullErrorSlowPath(this);
5456 compiler->AddSlowPathCode(slow_path);
5457
5458 Register value_reg = locs()->in(0).reg();
5459 // TODO(dartbug.com/30480): Consider passing `null` literal as an argument
5460 // in order to be able to allocate it on register.
5461 __ CompareObject(value_reg, Object::null_object());
5462 __ BranchIf(EQUAL, slow_path->entry_label());
5463}
5464
5465LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Zone* zone,
5466 bool opt) const {
5467 const intptr_t kNumInputs = 2;
5468 const intptr_t kNumTemps = 0;
5469 LocationSummary* locs = new (zone)
5470 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5471 locs->set_in(kLengthPos, LocationRegisterOrSmiConstant(length()));
5472 locs->set_in(kIndexPos, LocationRegisterOrSmiConstant(index()));
5473 return locs;
5474}
5475
5476void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5477 uint32_t flags = generalized_ ? ICData::kGeneralized : 0;
5478 compiler::Label* deopt =
5479 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckArrayBound, flags);
5480
5481 Location length_loc = locs()->in(kLengthPos);
5482 Location index_loc = locs()->in(kIndexPos);
5483
5484 const intptr_t index_cid = index()->Type()->ToCid();
5485 if (length_loc.IsConstant() && index_loc.IsConstant()) {
5486 // TODO(srdjan): remove this code once failures are fixed.
5487 if ((Smi::Cast(length_loc.constant()).Value() >
5488 Smi::Cast(index_loc.constant()).Value()) &&
5489 (Smi::Cast(index_loc.constant()).Value() >= 0)) {
5490 // This CheckArrayBoundInstr should have been eliminated.
5491 return;
5492 }
5493 ASSERT((Smi::Cast(length_loc.constant()).Value() <=
5494 Smi::Cast(index_loc.constant()).Value()) ||
5495 (Smi::Cast(index_loc.constant()).Value() < 0));
5496 // Unconditionally deoptimize for constant bounds checks because they
5497 // only occur only when index is out-of-bounds.
5498 __ j(deopt);
5499 return;
5500 }
5501
5502 if (index_loc.IsConstant()) {
5503 const Register length = length_loc.reg();
5504 const Smi& index = Smi::Cast(index_loc.constant());
5505 __ CompareObject(length, index);
5506 __ BranchIf(LS, deopt);
5507 } else if (length_loc.IsConstant()) {
5508 const Smi& length = Smi::Cast(length_loc.constant());
5509 const Register index = index_loc.reg();
5510 if (index_cid != kSmiCid) {
5511 __ BranchIfNotSmi(index, deopt);
5512 }
5513 if (length.Value() == Smi::kMaxValue) {
5514 __ bltz(index, deopt);
5515 } else {
5516 __ CompareObject(index, length);
5517 __ BranchIf(CS, deopt);
5518 }
5519 } else {
5520 const Register length = length_loc.reg();
5521 const Register index = index_loc.reg();
5522 if (index_cid != kSmiCid) {
5523 __ BranchIfNotSmi(index, deopt);
5524 }
5525 __ CompareObjectRegisters(index, length);
5526 __ BranchIf(CS, deopt);
5527 }
5528}
5529
5530LocationSummary* CheckWritableInstr::MakeLocationSummary(Zone* zone,
5531 bool opt) const {
5532 const intptr_t kNumInputs = 1;
5533 const intptr_t kNumTemps = 0;
5534 LocationSummary* locs = new (zone) LocationSummary(
5535 zone, kNumInputs, kNumTemps,
5536 UseSharedSlowPathStub(opt) ? LocationSummary::kCallOnSharedSlowPath
5538 locs->set_in(kReceiver, Location::RequiresRegister());
5539 return locs;
5540}
5541
5542void CheckWritableInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5543 WriteErrorSlowPath* slow_path = new WriteErrorSlowPath(this);
5544 compiler->AddSlowPathCode(slow_path);
5545 __ lbu(TMP, compiler::FieldAddress(locs()->in(0).reg(),
5546 compiler::target::Object::tags_offset()));
5547 // In the first byte.
5548 ASSERT(compiler::target::UntaggedObject::kImmutableBit < 8);
5549 __ andi(TMP, TMP, 1 << compiler::target::UntaggedObject::kImmutableBit);
5550 __ bnez(TMP, slow_path->entry_label());
5551}
5552
5553class Int64DivideSlowPath : public ThrowErrorSlowPathCode {
5554 public:
5555 Int64DivideSlowPath(BinaryInt64OpInstr* instruction,
5556 Register divisor,
5557 Range* divisor_range,
5558 Register tmp,
5559 Register out)
5560 : ThrowErrorSlowPathCode(instruction,
5561 kIntegerDivisionByZeroExceptionRuntimeEntry),
5562 is_mod_(instruction->op_kind() == Token::kMOD),
5563 divisor_(divisor),
5564 divisor_range_(divisor_range),
5565 tmp_(tmp),
5566 out_(out),
5567 adjust_sign_label_() {}
5568
5569 void EmitNativeCode(FlowGraphCompiler* compiler) override {
5570 // Handle modulo/division by zero, if needed. Use superclass code.
5571 if (has_divide_by_zero()) {
5572 ThrowErrorSlowPathCode::EmitNativeCode(compiler);
5573 } else {
5574 __ Bind(entry_label()); // not used, but keeps destructor happy
5575 if (compiler::Assembler::EmittingComments()) {
5576 __ Comment("slow path %s operation (no throw)", name());
5577 }
5578 }
5579 // Adjust modulo for negative sign, optimized for known ranges.
5580 // if (divisor < 0)
5581 // out -= divisor;
5582 // else
5583 // out += divisor;
5584 if (has_adjust_sign()) {
5585 __ Bind(adjust_sign_label());
5586 if (RangeUtils::Overlaps(divisor_range_, -1, 1)) {
5587 // General case.
5588 compiler::Label adjust, done;
5589 __ bgez(divisor_, &adjust, compiler::Assembler::kNearJump);
5590 __ sub(out_, out_, divisor_);
5591 __ j(&done, compiler::Assembler::kNearJump);
5592 __ Bind(&adjust);
5593 __ add(out_, out_, divisor_);
5594 __ Bind(&done);
5595 } else if (divisor_range_->IsPositive()) {
5596 // Always positive.
5597 __ add(out_, out_, divisor_);
5598 } else {
5599 // Always negative.
5600 __ sub(out_, out_, divisor_);
5601 }
5602 __ j(exit_label());
5603 }
5604 }
5605
5606 const char* name() override { return "int64 divide"; }
5607
5608 bool has_divide_by_zero() { return RangeUtils::CanBeZero(divisor_range_); }
5609
5610 bool has_adjust_sign() { return is_mod_; }
5611
5612 bool is_needed() { return has_divide_by_zero() || has_adjust_sign(); }
5613
5614 compiler::Label* adjust_sign_label() {
5615 ASSERT(has_adjust_sign());
5616 return &adjust_sign_label_;
5617 }
5618
5619 private:
5620 bool is_mod_;
5621 Register divisor_;
5622 Range* divisor_range_;
5623 Register tmp_;
5624 Register out_;
5625 compiler::Label adjust_sign_label_;
5626};
5627
5628#if XLEN == 64
5629static void EmitInt64ModTruncDiv(FlowGraphCompiler* compiler,
5630 BinaryInt64OpInstr* instruction,
5631 Token::Kind op_kind,
5632 Register left,
5634 Register tmp,
5635 Register out) {
5636 ASSERT(op_kind == Token::kMOD || op_kind == Token::kTRUNCDIV);
5637
5638 // TODO(riscv): Is it worth copying the magic constant optimization from the
5639 // other architectures?
5640
5641 // Prepare a slow path.
5642 Range* right_range = instruction->right()->definition()->range();
5643 Int64DivideSlowPath* slow_path =
5644 new (Z) Int64DivideSlowPath(instruction, right, right_range, tmp, out);
5645
5646 // Handle modulo/division by zero exception on slow path.
5647 if (slow_path->has_divide_by_zero()) {
5648 __ beqz(right, slow_path->entry_label());
5649 }
5650
5651 // Perform actual operation
5652 // out = left % right
5653 // or
5654 // out = left / right.
5655 if (op_kind == Token::kMOD) {
5656 __ rem(out, left, right);
5657 // For the % operator, the rem instruction does not
5658 // quite do what we want. Adjust for sign on slow path.
5659 __ bltz(out, slow_path->adjust_sign_label());
5660 } else {
5661 __ div(out, left, right);
5662 }
5663
5664 if (slow_path->is_needed()) {
5665 __ Bind(slow_path->exit_label());
5666 compiler->AddSlowPathCode(slow_path);
5667 }
5668}
5669#endif
5670
5671LocationSummary* BinaryInt64OpInstr::MakeLocationSummary(Zone* zone,
5672 bool opt) const {
5673#if XLEN == 32
5674 // TODO(riscv): Allow constants for the RHS of bitwise operators if both
5675 // hi and lo components are IType immediates.
5676 const intptr_t kNumInputs = 2;
5677 const intptr_t kNumTemps = 0;
5678 LocationSummary* summary = new (zone)
5679 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5680 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
5682 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
5684 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
5686 return summary;
5687#else
5688 switch (op_kind()) {
5689 case Token::kMOD:
5690 case Token::kTRUNCDIV: {
5691 const intptr_t kNumInputs = 2;
5692 const intptr_t kNumTemps = (op_kind() == Token::kMOD) ? 1 : 0;
5693 LocationSummary* summary = new (zone) LocationSummary(
5694 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
5695 summary->set_in(0, Location::RequiresRegister());
5696 summary->set_in(1, Location::RequiresRegister());
5697 summary->set_out(0, Location::RequiresRegister());
5698 if (kNumTemps == 1) {
5699 summary->set_temp(0, Location::RequiresRegister());
5700 }
5701 return summary;
5702 }
5703 default: {
5704 const intptr_t kNumInputs = 2;
5705 const intptr_t kNumTemps = 0;
5706 LocationSummary* summary = new (zone) LocationSummary(
5707 zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5708 summary->set_in(0, Location::RequiresRegister());
5709 summary->set_in(1, LocationRegisterOrConstant(right()));
5710 summary->set_out(0, Location::RequiresRegister());
5711 return summary;
5712 }
5713 }
5714#endif
5715}
5716
5717void BinaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5718#if XLEN == 32
5719 PairLocation* left_pair = locs()->in(0).AsPairLocation();
5720 Register left_lo = left_pair->At(0).reg();
5721 Register left_hi = left_pair->At(1).reg();
5722 PairLocation* right_pair = locs()->in(1).AsPairLocation();
5723 Register right_lo = right_pair->At(0).reg();
5724 Register right_hi = right_pair->At(1).reg();
5725 PairLocation* out_pair = locs()->out(0).AsPairLocation();
5726 Register out_lo = out_pair->At(0).reg();
5727 Register out_hi = out_pair->At(1).reg();
5728 ASSERT(!can_overflow());
5729 ASSERT(!CanDeoptimize());
5730
5731 switch (op_kind()) {
5732 case Token::kBIT_AND: {
5733 __ and_(out_lo, left_lo, right_lo);
5734 __ and_(out_hi, left_hi, right_hi);
5735 break;
5736 }
5737 case Token::kBIT_OR: {
5738 __ or_(out_lo, left_lo, right_lo);
5739 __ or_(out_hi, left_hi, right_hi);
5740 break;
5741 }
5742 case Token::kBIT_XOR: {
5743 __ xor_(out_lo, left_lo, right_lo);
5744 __ xor_(out_hi, left_hi, right_hi);
5745 break;
5746 }
5747 case Token::kADD: {
5748 __ add(out_hi, left_hi, right_hi);
5749 __ add(out_lo, left_lo, right_lo);
5750 __ sltu(TMP, out_lo, right_lo); // Carry
5751 __ add(out_hi, out_hi, TMP);
5752 break;
5753 }
5754 case Token::kSUB: {
5755 __ sltu(TMP, left_lo, right_lo); // Borrow
5756 __ sub(out_hi, left_hi, right_hi);
5757 __ sub(out_hi, out_hi, TMP);
5758 __ sub(out_lo, left_lo, right_lo);
5759 break;
5760 }
5761 case Token::kMUL: {
5762 // TODO(riscv): Fix ordering for macro-op fusion.
5763 __ mul(out_lo, right_lo, left_hi);
5764 __ mulhu(out_hi, right_lo, left_lo);
5765 __ add(out_lo, out_lo, out_hi);
5766 __ mul(out_hi, right_hi, left_lo);
5767 __ add(out_hi, out_hi, out_lo);
5768 __ mul(out_lo, right_lo, left_lo);
5769 break;
5770 }
5771 default:
5772 UNREACHABLE();
5773 }
5774#else
5775 ASSERT(!can_overflow());
5776 ASSERT(!CanDeoptimize());
5777
5778 const Register left = locs()->in(0).reg();
5779 const Location right = locs()->in(1);
5780 const Register out = locs()->out(0).reg();
5781
5782 if (op_kind() == Token::kMOD || op_kind() == Token::kTRUNCDIV) {
5783 Register tmp =
5784 (op_kind() == Token::kMOD) ? locs()->temp(0).reg() : kNoRegister;
5785 EmitInt64ModTruncDiv(compiler, this, op_kind(), left, right.reg(), tmp,
5786 out);
5787 return;
5788 } else if (op_kind() == Token::kMUL) {
5789 Register r = TMP;
5790 if (right.IsConstant()) {
5791 int64_t value;
5792 const bool ok = compiler::HasIntegerValue(right.constant(), &value);
5794 __ LoadImmediate(r, value);
5795 } else {
5796 r = right.reg();
5797 }
5798 __ mul(out, left, r);
5799 return;
5800 }
5801
5802 if (right.IsConstant()) {
5803 int64_t value;
5804 const bool ok = compiler::HasIntegerValue(right.constant(), &value);
5806 switch (op_kind()) {
5807 case Token::kADD:
5808 __ AddImmediate(out, left, value);
5809 break;
5810 case Token::kSUB:
5811 __ AddImmediate(out, left, -value);
5812 break;
5813 case Token::kBIT_AND:
5814 __ AndImmediate(out, left, value);
5815 break;
5816 case Token::kBIT_OR:
5817 __ OrImmediate(out, left, value);
5818 break;
5819 case Token::kBIT_XOR:
5820 __ XorImmediate(out, left, value);
5821 break;
5822 default:
5823 UNREACHABLE();
5824 }
5825 } else {
5826 switch (op_kind()) {
5827 case Token::kADD:
5828 __ add(out, left, right.reg());
5829 break;
5830 case Token::kSUB:
5831 __ sub(out, left, right.reg());
5832 break;
5833 case Token::kBIT_AND:
5834 __ and_(out, left, right.reg());
5835 break;
5836 case Token::kBIT_OR:
5837 __ or_(out, left, right.reg());
5838 break;
5839 case Token::kBIT_XOR:
5840 __ xor_(out, left, right.reg());
5841 break;
5842 default:
5843 UNREACHABLE();
5844 }
5845 }
5846#endif
5847}
5848
5849#if XLEN == 32
5850static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
5851 Token::Kind op_kind,
5852 Register out_lo,
5853 Register out_hi,
5854 Register left_lo,
5855 Register left_hi,
5856 const Object& right) {
5857 const int64_t shift = Integer::Cast(right).AsInt64Value();
5858 ASSERT(shift >= 0);
5859
5860 switch (op_kind) {
5861 case Token::kSHR: {
5862 if (shift < 32) {
5863 __ slli(out_lo, left_hi, 32 - shift);
5864 __ srli(TMP, left_lo, shift);
5865 __ or_(out_lo, out_lo, TMP);
5866 __ srai(out_hi, left_hi, shift);
5867 } else {
5868 if (shift == 32) {
5869 __ mv(out_lo, left_hi);
5870 } else if (shift < 64) {
5871 __ srai(out_lo, left_hi, shift - 32);
5872 } else {
5873 __ srai(out_lo, left_hi, 31);
5874 }
5875 __ srai(out_hi, left_hi, 31);
5876 }
5877 break;
5878 }
5879 case Token::kUSHR: {
5880 ASSERT(shift < 64);
5881 if (shift < 32) {
5882 __ slli(out_lo, left_hi, 32 - shift);
5883 __ srli(TMP, left_lo, shift);
5884 __ or_(out_lo, out_lo, TMP);
5885 __ srli(out_hi, left_hi, shift);
5886 } else {
5887 if (shift == 32) {
5888 __ mv(out_lo, left_hi);
5889 } else {
5890 __ srli(out_lo, left_hi, shift - 32);
5891 }
5892 __ li(out_hi, 0);
5893 }
5894 break;
5895 }
5896 case Token::kSHL: {
5897 ASSERT(shift >= 0);
5898 ASSERT(shift < 64);
5899 if (shift < 32) {
5900 __ srli(out_hi, left_lo, 32 - shift);
5901 __ slli(TMP, left_hi, shift);
5902 __ or_(out_hi, out_hi, TMP);
5903 __ slli(out_lo, left_lo, shift);
5904 } else {
5905 if (shift == 32) {
5906 __ mv(out_hi, left_lo);
5907 } else {
5908 __ slli(out_hi, left_lo, shift - 32);
5909 }
5910 __ li(out_lo, 0);
5911 }
5912 break;
5913 }
5914 default:
5915 UNREACHABLE();
5916 }
5917}
5918#else
5919static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
5920 Token::Kind op_kind,
5921 Register out,
5922 Register left,
5923 const Object& right) {
5924 const int64_t shift = Integer::Cast(right).AsInt64Value();
5925 ASSERT(shift >= 0);
5926 switch (op_kind) {
5927 case Token::kSHR: {
5928 __ srai(out, left, Utils::Minimum<int64_t>(shift, XLEN - 1));
5929 break;
5930 }
5931 case Token::kUSHR: {
5932 ASSERT(shift < 64);
5933 __ srli(out, left, shift);
5934 break;
5935 }
5936 case Token::kSHL: {
5937 ASSERT(shift < 64);
5938 __ slli(out, left, shift);
5939 break;
5940 }
5941 default:
5942 UNREACHABLE();
5943 }
5944}
5945#endif
5946
5947#if XLEN == 32
5948static void EmitShiftInt64ByRegister(FlowGraphCompiler* compiler,
5949 Token::Kind op_kind,
5950 Register out_lo,
5951 Register out_hi,
5952 Register left_lo,
5953 Register left_hi,
5954 Register right) {
5955 // TODO(riscv): Review.
5956 switch (op_kind) {
5957 case Token::kSHR: {
5958 compiler::Label big_shift, done;
5959 __ li(TMP, 32);
5960 __ bge(right, TMP, &big_shift, compiler::Assembler::kNearJump);
5961
5962 // 0 <= right < 32
5963 __ srl(out_lo, left_lo, right);
5964 __ sra(out_hi, left_hi, right);
5966 __ sub(TMP, TMP, right);
5967 __ sll(TMP2, left_hi, TMP);
5968 __ or_(out_lo, out_lo, TMP2);
5970
5971 // 32 <= right < 64
5972 __ Bind(&big_shift);
5973 __ sub(TMP, right, TMP);
5974 __ sra(out_lo, left_hi, TMP);
5975 __ srai(out_hi, left_hi, XLEN - 1); // SignFill
5976 __ Bind(&done);
5977 break;
5978 }
5979 case Token::kUSHR: {
5980 compiler::Label big_shift, done;
5981 __ li(TMP, 32);
5982 __ bge(right, TMP, &big_shift, compiler::Assembler::kNearJump);
5983
5984 // 0 <= right < 32
5985 __ srl(out_lo, left_lo, right);
5986 __ srl(out_hi, left_hi, right);
5988 __ sub(TMP, TMP, right);
5989 __ sll(TMP2, left_hi, TMP);
5990 __ or_(out_lo, out_lo, TMP2);
5992
5993 // 32 <= right < 64
5994 __ Bind(&big_shift);
5995 __ sub(TMP, right, TMP);
5996 __ srl(out_lo, left_hi, TMP);
5997 __ li(out_hi, 0);
5998 __ Bind(&done);
5999 break;
6000 }
6001 case Token::kSHL: {
6002 compiler::Label big_shift, done;
6003 __ li(TMP, 32);
6004 __ bge(right, TMP, &big_shift, compiler::Assembler::kNearJump);
6005
6006 // 0 <= right < 32
6007 __ sll(out_lo, left_lo, right);
6008 __ sll(out_hi, left_hi, right);
6010 __ sub(TMP, TMP, right);
6011 __ srl(TMP2, left_lo, TMP);
6012 __ or_(out_hi, out_hi, TMP2);
6014
6015 // 32 <= right < 64
6016 __ Bind(&big_shift);
6017 __ sub(TMP, right, TMP);
6018 __ sll(out_hi, left_lo, TMP);
6019 __ li(out_lo, 0);
6020 __ Bind(&done);
6021 break;
6022 }
6023 default:
6024 UNREACHABLE();
6025 }
6026}
6027#else
6028static void EmitShiftInt64ByRegister(FlowGraphCompiler* compiler,
6029 Token::Kind op_kind,
6030 Register out,
6031 Register left,
6032 Register right) {
6033 switch (op_kind) {
6034 case Token::kSHR: {
6035 __ sra(out, left, right);
6036 break;
6037 }
6038 case Token::kUSHR: {
6039 __ srl(out, left, right);
6040 break;
6041 }
6042 case Token::kSHL: {
6043 __ sll(out, left, right);
6044 break;
6045 }
6046 default:
6047 UNREACHABLE();
6048 }
6049}
6050#endif
6051
6052static void EmitShiftUint32ByConstant(FlowGraphCompiler* compiler,
6053 Token::Kind op_kind,
6054 Register out,
6055 Register left,
6056 const Object& right) {
6057 const int64_t shift = Integer::Cast(right).AsInt64Value();
6058 ASSERT(shift >= 0);
6059 if (shift >= 32) {
6060 __ li(out, 0);
6061 } else {
6062 switch (op_kind) {
6063 case Token::kSHR:
6064 case Token::kUSHR:
6065#if XLEN == 32
6066 __ srli(out, left, shift);
6067#else
6068 __ srliw(out, left, shift);
6069#endif
6070 break;
6071 case Token::kSHL:
6072#if XLEN == 32
6073 __ slli(out, left, shift);
6074#else
6075 __ slliw(out, left, shift);
6076#endif
6077 break;
6078 default:
6079 UNREACHABLE();
6080 }
6081 }
6082}
6083
6084static void EmitShiftUint32ByRegister(FlowGraphCompiler* compiler,
6085 Token::Kind op_kind,
6086 Register out,
6087 Register left,
6088 Register right) {
6089 switch (op_kind) {
6090 case Token::kSHR:
6091 case Token::kUSHR:
6092#if XLEN == 32
6093 __ srl(out, left, right);
6094#else
6095 __ srlw(out, left, right);
6096#endif
6097 break;
6098 case Token::kSHL:
6099#if XLEN == 32
6100 __ sll(out, left, right);
6101#else
6102 __ sllw(out, left, right);
6103#endif
6104 break;
6105 default:
6106 UNREACHABLE();
6107 }
6108}
6109
6110class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
6111 public:
6112 explicit ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction)
6113 : ThrowErrorSlowPathCode(instruction,
6114 kArgumentErrorUnboxedInt64RuntimeEntry) {}
6115
6116 const char* name() override { return "int64 shift"; }
6117
6118 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
6119#if XLEN == 32
6120 PairLocation* left_pair = instruction()->locs()->in(0).AsPairLocation();
6121 Register left_hi = left_pair->At(1).reg();
6122 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
6123 Register right_lo = right_pair->At(0).reg();
6124 Register right_hi = right_pair->At(1).reg();
6125 PairLocation* out_pair = instruction()->locs()->out(0).AsPairLocation();
6126 Register out_lo = out_pair->At(0).reg();
6127 Register out_hi = out_pair->At(1).reg();
6128
6129 compiler::Label throw_error;
6130 __ bltz(right_hi, &throw_error);
6131
6132 switch (instruction()->AsShiftInt64Op()->op_kind()) {
6133 case Token::kSHR:
6134 __ srai(out_hi, left_hi, compiler::target::kBitsPerWord - 1);
6135 __ mv(out_lo, out_hi);
6136 break;
6137 case Token::kUSHR:
6138 case Token::kSHL: {
6139 __ li(out_lo, 0);
6140 __ li(out_hi, 0);
6141 break;
6142 }
6143 default:
6144 UNREACHABLE();
6145 }
6146
6147 __ j(exit_label());
6148
6149 __ Bind(&throw_error);
6150
6151 // Can't pass unboxed int64 value directly to runtime call, as all
6152 // arguments are expected to be tagged (boxed).
6153 // The unboxed int64 argument is passed through a dedicated slot in Thread.
6154 // TODO(dartbug.com/33549): Clean this up when unboxed values
6155 // could be passed as arguments.
6156 __ StoreToOffset(right_lo, THR,
6157 compiler::target::Thread::unboxed_runtime_arg_offset());
6158 __ StoreToOffset(right_hi, THR,
6159 compiler::target::Thread::unboxed_runtime_arg_offset() +
6160 compiler::target::kWordSize);
6161#else
6162 const Register left = instruction()->locs()->in(0).reg();
6163 const Register right = instruction()->locs()->in(1).reg();
6164 const Register out = instruction()->locs()->out(0).reg();
6165 ASSERT((out != left) && (out != right));
6166
6167 compiler::Label throw_error;
6168 __ bltz(right, &throw_error);
6169
6170 switch (instruction()->AsShiftInt64Op()->op_kind()) {
6171 case Token::kSHR:
6172 __ srai(out, left, XLEN - 1);
6173 break;
6174 case Token::kUSHR:
6175 case Token::kSHL:
6176 __ mv(out, ZR);
6177 break;
6178 default:
6179 UNREACHABLE();
6180 }
6181 __ j(exit_label());
6182
6183 __ Bind(&throw_error);
6184
6185 // Can't pass unboxed int64 value directly to runtime call, as all
6186 // arguments are expected to be tagged (boxed).
6187 // The unboxed int64 argument is passed through a dedicated slot in Thread.
6188 // TODO(dartbug.com/33549): Clean this up when unboxed values
6189 // could be passed as arguments.
6190 __ sx(right,
6191 compiler::Address(
6192 THR, compiler::target::Thread::unboxed_runtime_arg_offset()));
6193#endif
6194 }
6195};
6196
6197LocationSummary* ShiftInt64OpInstr::MakeLocationSummary(Zone* zone,
6198 bool opt) const {
6199 const intptr_t kNumInputs = 2;
6200 const intptr_t kNumTemps = 0;
6201#if XLEN == 32
6202 LocationSummary* summary = new (zone) LocationSummary(
6203 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
6204 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
6206 if (RangeUtils::IsPositive(shift_range()) &&
6207 right()->definition()->IsConstant()) {
6208 ConstantInstr* constant = right()->definition()->AsConstant();
6209 summary->set_in(1, Location::Constant(constant));
6210 } else {
6211 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
6213 }
6214 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6216#else
6217 LocationSummary* summary = new (zone) LocationSummary(
6218 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
6219 summary->set_in(0, Location::RequiresRegister());
6220 summary->set_in(1, RangeUtils::IsPositive(shift_range())
6222 : Location::RequiresRegister());
6223 summary->set_out(0, Location::RequiresRegister());
6224#endif
6225 return summary;
6226}
6227
6228void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6229#if XLEN == 32
6230 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6231 Register left_lo = left_pair->At(0).reg();
6232 Register left_hi = left_pair->At(1).reg();
6233 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6234 Register out_lo = out_pair->At(0).reg();
6235 Register out_hi = out_pair->At(1).reg();
6236 ASSERT(!can_overflow());
6237
6238 if (locs()->in(1).IsConstant()) {
6239 EmitShiftInt64ByConstant(compiler, op_kind(), out_lo, out_hi, left_lo,
6240 left_hi, locs()->in(1).constant());
6241 } else {
6242 // Code for a variable shift amount (or constant that throws).
6243 PairLocation* right_pair = locs()->in(1).AsPairLocation();
6244 Register right_lo = right_pair->At(0).reg();
6245 Register right_hi = right_pair->At(1).reg();
6246
6247 // Jump to a slow path if shift is larger than 63 or less than 0.
6248 ShiftInt64OpSlowPath* slow_path = nullptr;
6249 if (!IsShiftCountInRange()) {
6250 slow_path = new (Z) ShiftInt64OpSlowPath(this);
6251 compiler->AddSlowPathCode(slow_path);
6252 __ CompareImmediate(right_hi, 0);
6253 __ BranchIf(NE, slow_path->entry_label());
6254 __ CompareImmediate(right_lo, kShiftCountLimit);
6255 __ BranchIf(HI, slow_path->entry_label());
6256 }
6257
6258 EmitShiftInt64ByRegister(compiler, op_kind(), out_lo, out_hi, left_lo,
6259 left_hi, right_lo);
6260
6261 if (slow_path != nullptr) {
6262 __ Bind(slow_path->exit_label());
6263 }
6264 }
6265#else
6266 const Register left = locs()->in(0).reg();
6267 const Register out = locs()->out(0).reg();
6268 ASSERT(!can_overflow());
6269
6270 if (locs()->in(1).IsConstant()) {
6271 EmitShiftInt64ByConstant(compiler, op_kind(), out, left,
6272 locs()->in(1).constant());
6273 } else {
6274 // Code for a variable shift amount (or constant that throws).
6275 Register shift = locs()->in(1).reg();
6276
6277 // Jump to a slow path if shift is larger than 63 or less than 0.
6278 ShiftInt64OpSlowPath* slow_path = nullptr;
6279 if (!IsShiftCountInRange()) {
6280 slow_path = new (Z) ShiftInt64OpSlowPath(this);
6281 compiler->AddSlowPathCode(slow_path);
6282 __ CompareImmediate(shift, kShiftCountLimit);
6283 __ BranchIf(HI, slow_path->entry_label());
6284 }
6285
6286 EmitShiftInt64ByRegister(compiler, op_kind(), out, left, shift);
6287
6288 if (slow_path != nullptr) {
6289 __ Bind(slow_path->exit_label());
6290 }
6291 }
6292#endif
6293}
6294
6295LocationSummary* SpeculativeShiftInt64OpInstr::MakeLocationSummary(
6296 Zone* zone,
6297 bool opt) const {
6298 const intptr_t kNumInputs = 2;
6299 const intptr_t kNumTemps = 0;
6300 LocationSummary* summary = new (zone)
6301 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6302#if XLEN == 32
6303 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
6305 summary->set_in(1, LocationWritableRegisterOrSmiConstant(right()));
6306 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6308#else
6309 summary->set_in(0, Location::RequiresRegister());
6310 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
6311 summary->set_out(0, Location::RequiresRegister());
6312#endif
6313 return summary;
6314}
6315
6316void SpeculativeShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6317#if XLEN == 32
6318 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6319 Register left_lo = left_pair->At(0).reg();
6320 Register left_hi = left_pair->At(1).reg();
6321 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6322 Register out_lo = out_pair->At(0).reg();
6323 Register out_hi = out_pair->At(1).reg();
6324 ASSERT(!can_overflow());
6325
6326 if (locs()->in(1).IsConstant()) {
6327 EmitShiftInt64ByConstant(compiler, op_kind(), out_lo, out_hi, left_lo,
6328 left_hi, locs()->in(1).constant());
6329 } else {
6330 // Code for a variable shift amount.
6331 Register shift = locs()->in(1).reg();
6332 __ SmiUntag(shift);
6333
6334 // Deopt if shift is larger than 63 or less than 0 (or not a smi).
6335 if (!IsShiftCountInRange()) {
6336 ASSERT(CanDeoptimize());
6337 compiler::Label* deopt =
6338 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
6339
6340 __ CompareImmediate(shift, kShiftCountLimit);
6341 __ BranchIf(HI, deopt);
6342 }
6343
6344 EmitShiftInt64ByRegister(compiler, op_kind(), out_lo, out_hi, left_lo,
6345 left_hi, shift);
6346 }
6347#else
6348 const Register left = locs()->in(0).reg();
6349 const Register out = locs()->out(0).reg();
6350 ASSERT(!can_overflow());
6351
6352 if (locs()->in(1).IsConstant()) {
6353 EmitShiftInt64ByConstant(compiler, op_kind(), out, left,
6354 locs()->in(1).constant());
6355 } else {
6356 // Code for a variable shift amount.
6357 Register shift = locs()->in(1).reg();
6358
6359 // Untag shift count.
6360 __ SmiUntag(TMP, shift);
6361 shift = TMP;
6362
6363 // Deopt if shift is larger than 63 or less than 0 (or not a smi).
6364 if (!IsShiftCountInRange()) {
6365 ASSERT(CanDeoptimize());
6366 compiler::Label* deopt =
6367 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
6368
6369 __ CompareImmediate(shift, kShiftCountLimit);
6370 __ BranchIf(HI, deopt);
6371 }
6372
6373 EmitShiftInt64ByRegister(compiler, op_kind(), out, left, shift);
6374 }
6375#endif
6376}
6377
6378class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
6379 public:
6380 explicit ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction)
6381 : ThrowErrorSlowPathCode(instruction,
6382 kArgumentErrorUnboxedInt64RuntimeEntry) {}
6383
6384 const char* name() override { return "uint32 shift"; }
6385
6386 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
6387#if XLEN == 32
6388 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
6389 Register right_lo = right_pair->At(0).reg();
6390 Register right_hi = right_pair->At(1).reg();
6391 Register out = instruction()->locs()->out(0).reg();
6392
6393 compiler::Label throw_error;
6394 __ bltz(right_hi, &throw_error, compiler::Assembler::kNearJump);
6395 __ li(out, 0);
6396 __ j(exit_label());
6397
6398 __ Bind(&throw_error);
6399 // Can't pass unboxed int64 value directly to runtime call, as all
6400 // arguments are expected to be tagged (boxed).
6401 // The unboxed int64 argument is passed through a dedicated slot in Thread.
6402 // TODO(dartbug.com/33549): Clean this up when unboxed values
6403 // could be passed as arguments.
6404 __ StoreToOffset(right_lo, THR,
6405 compiler::target::Thread::unboxed_runtime_arg_offset());
6406 __ StoreToOffset(right_hi, THR,
6407 compiler::target::Thread::unboxed_runtime_arg_offset() +
6408 compiler::target::kWordSize);
6409#else
6410 const Register right = instruction()->locs()->in(1).reg();
6411
6412 // Can't pass unboxed int64 value directly to runtime call, as all
6413 // arguments are expected to be tagged (boxed).
6414 // The unboxed int64 argument is passed through a dedicated slot in Thread.
6415 // TODO(dartbug.com/33549): Clean this up when unboxed values
6416 // could be passed as arguments.
6417 __ sx(right,
6418 compiler::Address(
6419 THR, compiler::target::Thread::unboxed_runtime_arg_offset()));
6420#endif
6421 }
6422};
6423
6424LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
6425 bool opt) const {
6426 const intptr_t kNumInputs = 2;
6427 const intptr_t kNumTemps = 0;
6428 LocationSummary* summary = new (zone) LocationSummary(
6429 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
6430 summary->set_in(0, Location::RequiresRegister());
6431 if (RangeUtils::IsPositive(shift_range()) &&
6432 right()->definition()->IsConstant()) {
6433 ConstantInstr* constant = right()->definition()->AsConstant();
6434 summary->set_in(1, Location::Constant(constant));
6435 } else {
6436#if XLEN == 32
6437 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
6439#else
6440 summary->set_in(1, Location::RequiresRegister());
6441#endif
6442 }
6443 summary->set_out(0, Location::RequiresRegister());
6444 return summary;
6445}
6446
6447void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6448#if XLEN == 32
6449 Register left = locs()->in(0).reg();
6450 Register out = locs()->out(0).reg();
6451
6452 ASSERT(left != out);
6453
6454 if (locs()->in(1).IsConstant()) {
6455 EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
6456 locs()->in(1).constant());
6457 } else {
6458 // Code for a variable shift amount (or constant that throws).
6459 PairLocation* right_pair = locs()->in(1).AsPairLocation();
6460 Register right_lo = right_pair->At(0).reg();
6461 Register right_hi = right_pair->At(1).reg();
6462
6463 // Jump to a slow path if shift count is > 31 or negative.
6464 ShiftUint32OpSlowPath* slow_path = nullptr;
6465 if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
6466 slow_path = new (Z) ShiftUint32OpSlowPath(this);
6467 compiler->AddSlowPathCode(slow_path);
6468
6469 __ CompareImmediate(right_hi, 0);
6470 __ BranchIf(NE, slow_path->entry_label());
6471 __ CompareImmediate(right_lo, kUint32ShiftCountLimit);
6472 __ BranchIf(HI, slow_path->entry_label());
6473 }
6474
6475 EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right_lo);
6476
6477 if (slow_path != nullptr) {
6478 __ Bind(slow_path->exit_label());
6479 }
6480 }
6481#else
6482 Register left = locs()->in(0).reg();
6483 Register out = locs()->out(0).reg();
6484
6485 if (locs()->in(1).IsConstant()) {
6486 EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
6487 locs()->in(1).constant());
6488 } else {
6489 // Code for a variable shift amount (or constant that throws).
6490 const Register right = locs()->in(1).reg();
6491 const bool shift_count_in_range =
6492 IsShiftCountInRange(kUint32ShiftCountLimit);
6493
6494 // Jump to a slow path if shift count is negative.
6495 if (!shift_count_in_range) {
6496 ShiftUint32OpSlowPath* slow_path = new (Z) ShiftUint32OpSlowPath(this);
6497 compiler->AddSlowPathCode(slow_path);
6498
6499 __ bltz(right, slow_path->entry_label());
6500 }
6501
6502 EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right);
6503
6504 if (!shift_count_in_range) {
6505 // If shift value is > 31, return zero.
6506 compiler::Label done;
6507 __ CompareImmediate(right, 31);
6509 __ li(out, 0);
6510 __ Bind(&done);
6511 }
6512 }
6513#endif
6514}
6515
6516LocationSummary* SpeculativeShiftUint32OpInstr::MakeLocationSummary(
6517 Zone* zone,
6518 bool opt) const {
6519 const intptr_t kNumInputs = 2;
6520 const intptr_t kNumTemps = 0;
6521 LocationSummary* summary = new (zone)
6522 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6523 summary->set_in(0, Location::RequiresRegister());
6524 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
6525 summary->set_out(0, Location::RequiresRegister());
6526 return summary;
6527}
6528
6529void SpeculativeShiftUint32OpInstr::EmitNativeCode(
6530 FlowGraphCompiler* compiler) {
6531 Register left = locs()->in(0).reg();
6532 Register out = locs()->out(0).reg();
6533
6534 if (locs()->in(1).IsConstant()) {
6535 EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
6536 locs()->in(1).constant());
6537 } else {
6538 Register right = locs()->in(1).reg();
6539 const bool shift_count_in_range =
6540 IsShiftCountInRange(kUint32ShiftCountLimit);
6541
6542 __ SmiUntag(TMP, right);
6543 right = TMP;
6544
6545 // Jump to a slow path if shift count is negative.
6546 if (!shift_count_in_range) {
6547 // Deoptimize if shift count is negative.
6548 ASSERT(CanDeoptimize());
6549 compiler::Label* deopt =
6550 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
6551
6552 __ bltz(right, deopt);
6553 }
6554
6555 EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right);
6556
6557 if (!shift_count_in_range) {
6558 // If shift value is > 31, return zero.
6559 compiler::Label done;
6560 __ CompareImmediate(right, 31);
6562 __ li(out, 0);
6563 __ Bind(&done);
6564 }
6565 }
6566}
6567
6568LocationSummary* UnaryInt64OpInstr::MakeLocationSummary(Zone* zone,
6569 bool opt) const {
6570#if XLEN == 32
6571 const intptr_t kNumInputs = 1;
6572 const intptr_t kNumTemps = 0;
6573 LocationSummary* summary = new (zone)
6574 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6575 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
6577 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6579 return summary;
6580#else
6581 const intptr_t kNumInputs = 1;
6582 const intptr_t kNumTemps = 0;
6583 LocationSummary* summary = new (zone)
6584 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6585 summary->set_in(0, Location::RequiresRegister());
6586 summary->set_out(0, Location::RequiresRegister());
6587 return summary;
6588#endif
6589}
6590
6591void UnaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6592#if XLEN == 32
6593 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6594 Register left_lo = left_pair->At(0).reg();
6595 Register left_hi = left_pair->At(1).reg();
6596
6597 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6598 Register out_lo = out_pair->At(0).reg();
6599 Register out_hi = out_pair->At(1).reg();
6600
6601 switch (op_kind()) {
6602 case Token::kBIT_NOT:
6603 __ not_(out_lo, left_lo);
6604 __ not_(out_hi, left_hi);
6605 break;
6606 case Token::kNEGATE:
6607 __ snez(TMP, left_lo); // Borrow
6608 __ neg(out_lo, left_lo);
6609 __ neg(out_hi, left_hi);
6610 __ sub(out_hi, out_hi, TMP);
6611 break;
6612 default:
6613 UNREACHABLE();
6614 }
6615#else
6616 const Register left = locs()->in(0).reg();
6617 const Register out = locs()->out(0).reg();
6618 switch (op_kind()) {
6619 case Token::kBIT_NOT:
6620 __ not_(out, left);
6621 break;
6622 case Token::kNEGATE:
6623 __ neg(out, left);
6624 break;
6625 default:
6626 UNREACHABLE();
6627 }
6628#endif
6629}
6630
6631LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone,
6632 bool opt) const {
6633 const intptr_t kNumInputs = 2;
6634 const intptr_t kNumTemps = 0;
6635 LocationSummary* summary = new (zone)
6636 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6637 summary->set_in(0, Location::RequiresRegister());
6638 summary->set_in(1, Location::RequiresRegister());
6639 summary->set_out(0, Location::RequiresRegister());
6640 return summary;
6641}
6642
6643void BinaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6644 Register left = locs()->in(0).reg();
6645 Register right = locs()->in(1).reg();
6646 Register out = locs()->out(0).reg();
6647 switch (op_kind()) {
6648 case Token::kBIT_AND:
6649 __ and_(out, left, right);
6650 break;
6651 case Token::kBIT_OR:
6652 __ or_(out, left, right);
6653 break;
6654 case Token::kBIT_XOR:
6655 __ xor_(out, left, right);
6656 break;
6657 case Token::kADD:
6658#if XLEN == 32
6659 __ add(out, left, right);
6660#elif XLEN > 32
6661 __ addw(out, left, right);
6662#endif
6663 break;
6664 case Token::kSUB:
6665#if XLEN == 32
6666 __ sub(out, left, right);
6667#elif XLEN > 32
6668 __ subw(out, left, right);
6669#endif
6670 break;
6671 case Token::kMUL:
6672 __ mul(out, left, right);
6673 break;
6674 default:
6675 UNREACHABLE();
6676 }
6677}
6678
6679LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone,
6680 bool opt) const {
6681 const intptr_t kNumInputs = 1;
6682 const intptr_t kNumTemps = 0;
6683 LocationSummary* summary = new (zone)
6684 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6685 summary->set_in(0, Location::RequiresRegister());
6686 summary->set_out(0, Location::RequiresRegister());
6687 return summary;
6688}
6689
6690void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6691 Register left = locs()->in(0).reg();
6692 Register out = locs()->out(0).reg();
6693
6694 ASSERT(op_kind() == Token::kBIT_NOT);
6695 __ not_(out, left);
6696}
6697
6698#if XLEN == 32
6699static void EmitInt32ShiftLeft(FlowGraphCompiler* compiler,
6700 BinaryInt32OpInstr* shift_left) {
6701 const LocationSummary& locs = *shift_left->locs();
6702 const Register left = locs.in(0).reg();
6703 const Register result = locs.out(0).reg();
6704 compiler::Label* deopt =
6705 shift_left->CanDeoptimize()
6706 ? compiler->AddDeoptStub(shift_left->deopt_id(),
6707 ICData::kDeoptBinarySmiOp)
6708 : nullptr;
6709 ASSERT(locs.in(1).IsConstant());
6710 const Object& constant = locs.in(1).constant();
6712 // Immediate shift operation takes 5 bits for the count.
6713 const intptr_t kCountLimit = 0x1F;
6714 const intptr_t value = compiler::target::SmiValue(constant);
6715 ASSERT((0 < value) && (value < kCountLimit));
6716 __ slli(result, left, value);
6717 if (shift_left->can_overflow()) {
6718 __ srai(TMP, result, value);
6719 __ bne(TMP, left, deopt); // Overflow.
6720 }
6721}
6722
6723LocationSummary* BinaryInt32OpInstr::MakeLocationSummary(Zone* zone,
6724 bool opt) const {
6725 const intptr_t kNumInputs = 2;
6726 // Calculate number of temporaries.
6727 intptr_t num_temps = 0;
6728 if (((op_kind() == Token::kSHL) && can_overflow()) ||
6729 (op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR) ||
6730 (op_kind() == Token::kMUL)) {
6731 num_temps = 1;
6732 }
6733 LocationSummary* summary = new (zone)
6734 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
6735 summary->set_in(0, Location::RequiresRegister());
6736 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
6737 if (num_temps == 1) {
6738 summary->set_temp(0, Location::RequiresRegister());
6739 }
6740 // We make use of 3-operand instructions by not requiring result register
6741 // to be identical to first input register as on Intel.
6742 summary->set_out(0, Location::RequiresRegister());
6743 return summary;
6744}
6745
6746void BinaryInt32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6747 if (op_kind() == Token::kSHL) {
6748 EmitInt32ShiftLeft(compiler, this);
6749 return;
6750 }
6751
6752 const Register left = locs()->in(0).reg();
6753 const Register result = locs()->out(0).reg();
6754 compiler::Label* deopt = nullptr;
6755 if (CanDeoptimize()) {
6756 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
6757 }
6758
6759 if (locs()->in(1).IsConstant()) {
6760 const Object& constant = locs()->in(1).constant();
6762 const intptr_t value = compiler::target::SmiValue(constant);
6763 switch (op_kind()) {
6764 case Token::kADD: {
6765 if (deopt == nullptr) {
6766 __ AddImmediate(result, left, value);
6767 } else {
6768 __ AddImmediateBranchOverflow(result, left, value, deopt);
6769 }
6770 break;
6771 }
6772 case Token::kSUB: {
6773 if (deopt == nullptr) {
6774 __ AddImmediate(result, left, -value);
6775 } else {
6776 // Negating value and using AddImmediateSetFlags would not detect the
6777 // overflow when value == kMinInt32.
6778 __ SubtractImmediateBranchOverflow(result, left, value, deopt);
6779 }
6780 break;
6781 }
6782 case Token::kMUL: {
6783 const Register right = locs()->temp(0).reg();
6784 __ LoadImmediate(right, value);
6785 if (deopt == nullptr) {
6786 __ mul(result, left, right);
6787 } else {
6788 __ MultiplyBranchOverflow(result, left, right, deopt);
6789 }
6790 break;
6791 }
6792 case Token::kBIT_AND: {
6793 // No overflow check.
6794 __ AndImmediate(result, left, value);
6795 break;
6796 }
6797 case Token::kBIT_OR: {
6798 // No overflow check.
6799 __ OrImmediate(result, left, value);
6800 break;
6801 }
6802 case Token::kBIT_XOR: {
6803 // No overflow check.
6804 __ XorImmediate(result, left, value);
6805 break;
6806 }
6807 case Token::kSHR: {
6808 // sarl operation masks the count to 5 bits.
6809 const intptr_t kCountLimit = 0x1F;
6810 __ srai(result, left, Utils::Minimum(value, kCountLimit));
6811 break;
6812 }
6813 case Token::kUSHR: {
6814 UNIMPLEMENTED();
6815 break;
6816 }
6817
6818 default:
6819 UNREACHABLE();
6820 break;
6821 }
6822 return;
6823 }
6824
6825 const Register right = locs()->in(1).reg();
6826 switch (op_kind()) {
6827 case Token::kADD: {
6828 if (deopt == nullptr) {
6829 __ add(result, left, right);
6830 } else {
6831 __ AddBranchOverflow(result, left, right, deopt);
6832 }
6833 break;
6834 }
6835 case Token::kSUB: {
6836 if (deopt == nullptr) {
6837 __ sub(result, left, right);
6838 } else {
6839 __ SubtractBranchOverflow(result, left, right, deopt);
6840 }
6841 break;
6842 }
6843 case Token::kMUL: {
6844 if (deopt == nullptr) {
6845 __ mul(result, left, right);
6846 } else {
6847 __ MultiplyBranchOverflow(result, left, right, deopt);
6848 }
6849 break;
6850 }
6851 case Token::kBIT_AND: {
6852 // No overflow check.
6853 __ and_(result, left, right);
6854 break;
6855 }
6856 case Token::kBIT_OR: {
6857 // No overflow check.
6858 __ or_(result, left, right);
6859 break;
6860 }
6861 case Token::kBIT_XOR: {
6862 // No overflow check.
6863 __ xor_(result, left, right);
6864 break;
6865 }
6866 default:
6867 UNREACHABLE();
6868 break;
6869 }
6870}
6871#else
6872DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryInt32OpInstr)
6873#endif
6874
6875LocationSummary* IntConverterInstr::MakeLocationSummary(Zone* zone,
6876 bool opt) const {
6877#if XLEN == 32
6878 const intptr_t kNumInputs = 1;
6879 const intptr_t kNumTemps = 0;
6880 LocationSummary* summary = new (zone)
6881 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6882 if (from() == kUntagged || to() == kUntagged) {
6883 ASSERT((from() == kUntagged && to() == kUnboxedInt32) ||
6884 (from() == kUntagged && to() == kUnboxedUint32) ||
6885 (from() == kUnboxedInt32 && to() == kUntagged) ||
6886 (from() == kUnboxedUint32 && to() == kUntagged));
6887 ASSERT(!CanDeoptimize());
6888 summary->set_in(0, Location::RequiresRegister());
6889 summary->set_out(0, Location::SameAsFirstInput());
6890 } else if (from() == kUnboxedInt64) {
6891 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6892 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
6894 summary->set_out(0, Location::RequiresRegister());
6895 } else if (to() == kUnboxedInt64) {
6896 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
6897 summary->set_in(0, Location::RequiresRegister());
6898 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6900 } else {
6901 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6902 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
6903 summary->set_in(0, Location::RequiresRegister());
6904 summary->set_out(0, Location::SameAsFirstInput());
6905 }
6906 return summary;
6907#else
6908 const intptr_t kNumInputs = 1;
6909 const intptr_t kNumTemps = 0;
6910 LocationSummary* summary = new (zone)
6911 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6912 if (from() == kUntagged || to() == kUntagged) {
6913 ASSERT((from() == kUntagged && to() == kUnboxedIntPtr) ||
6914 (from() == kUnboxedIntPtr && to() == kUntagged));
6915 ASSERT(!CanDeoptimize());
6916 } else if (from() == kUnboxedInt64) {
6917 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6918 } else if (to() == kUnboxedInt64) {
6919 ASSERT(from() == kUnboxedInt32 || from() == kUnboxedUint32);
6920 } else {
6921 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6922 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
6923 }
6924 summary->set_in(0, Location::RequiresRegister());
6925 if (CanDeoptimize()) {
6926 summary->set_out(0, Location::RequiresRegister());
6927 } else {
6928 summary->set_out(0, Location::SameAsFirstInput());
6929 }
6930 return summary;
6931#endif
6932}
6933
6934void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6935#if XLEN == 32
6936 const bool is_nop_conversion =
6937 (from() == kUntagged && to() == kUnboxedInt32) ||
6938 (from() == kUntagged && to() == kUnboxedUint32) ||
6939 (from() == kUnboxedInt32 && to() == kUntagged) ||
6940 (from() == kUnboxedUint32 && to() == kUntagged);
6941 if (is_nop_conversion) {
6942 ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
6943 return;
6944 }
6945
6946 if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
6947 const Register out = locs()->out(0).reg();
6948 // Representations are bitwise equivalent.
6949 ASSERT(out == locs()->in(0).reg());
6950 } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
6951 const Register out = locs()->out(0).reg();
6952 // Representations are bitwise equivalent.
6953 ASSERT(out == locs()->in(0).reg());
6954 if (CanDeoptimize()) {
6955 compiler::Label* deopt =
6956 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
6957 __ bltz(out, deopt);
6958 }
6959 } else if (from() == kUnboxedInt64) {
6960 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6961 PairLocation* in_pair = locs()->in(0).AsPairLocation();
6962 Register in_lo = in_pair->At(0).reg();
6963 Register in_hi = in_pair->At(1).reg();
6964 Register out = locs()->out(0).reg();
6965 // Copy low word.
6966 __ mv(out, in_lo);
6967 if (CanDeoptimize()) {
6968 compiler::Label* deopt =
6969 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
6970 ASSERT(to() == kUnboxedInt32);
6971 __ srai(TMP, in_lo, XLEN - 1);
6972 __ bne(in_hi, TMP, deopt);
6973 }
6974 } else if (from() == kUnboxedUint32 || from() == kUnboxedInt32) {
6975 ASSERT(to() == kUnboxedInt64);
6976 Register in = locs()->in(0).reg();
6977 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6978 Register out_lo = out_pair->At(0).reg();
6979 Register out_hi = out_pair->At(1).reg();
6980 // Copy low word.
6981 __ mv(out_lo, in);
6982 if (from() == kUnboxedUint32) {
6983 __ li(out_hi, 0);
6984 } else {
6985 ASSERT(from() == kUnboxedInt32);
6986 __ srai(out_hi, in, XLEN - 1);
6987 }
6988 } else {
6989 UNREACHABLE();
6990 }
6991#else
6992 ASSERT(from() != to()); // We don't convert from a representation to itself.
6993
6994 const bool is_nop_conversion =
6995 (from() == kUntagged && to() == kUnboxedIntPtr) ||
6996 (from() == kUnboxedIntPtr && to() == kUntagged);
6997 if (is_nop_conversion) {
6998 ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
6999 return;
7000 }
7001
7002 const Register value = locs()->in(0).reg();
7003 const Register out = locs()->out(0).reg();
7004 compiler::Label* deopt =
7005 !CanDeoptimize()
7006 ? nullptr
7007 : compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
7008 if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
7009 if (CanDeoptimize()) {
7010 __ slli(TMP, value, 32);
7011 __ bltz(TMP, deopt); // If sign bit is set it won't fit in a uint32.
7012 }
7013 if (out != value) {
7014 __ mv(out, value); // For positive values the bits are the same.
7015 }
7016 } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
7017 if (CanDeoptimize()) {
7018 __ slli(TMP, value, 32);
7019 __ bltz(TMP, deopt); // If high bit is set it won't fit in an int32.
7020 }
7021 if (out != value) {
7022 __ mv(out, value); // For 31 bit values the bits are the same.
7023 }
7024 } else if (from() == kUnboxedInt64) {
7025 if (to() == kUnboxedInt32) {
7026 if (is_truncating() || out != value) {
7027 __ sextw(out, value); // Signed extension 64->32.
7028 }
7029 } else {
7030 ASSERT(to() == kUnboxedUint32);
7031 if (is_truncating() || out != value) {
7032 // Unsigned extension 64->32.
7033 // TODO(riscv): Might be a shorter way to do this.
7034 __ slli(out, value, 32);
7035 __ srli(out, out, 32);
7036 }
7037 }
7038 if (CanDeoptimize()) {
7039 ASSERT(to() == kUnboxedInt32);
7040 __ CompareRegisters(out, value);
7041 __ BranchIf(NE, deopt); // Value cannot be held in Int32, deopt.
7042 }
7043 } else if (to() == kUnboxedInt64) {
7044 if (from() == kUnboxedUint32) {
7045 // TODO(riscv): Might be a shorter way to do this.
7046 __ slli(out, value, 32);
7047 __ srli(out, out, 32);
7048 } else {
7049 ASSERT(from() == kUnboxedInt32);
7050 __ sextw(out, value); // Signed extension 32->64.
7051 }
7052 } else {
7053 UNREACHABLE();
7054 }
7055#endif
7056}
7057
7058LocationSummary* BitCastInstr::MakeLocationSummary(Zone* zone, bool opt) const {
7059 LocationSummary* summary =
7060 new (zone) LocationSummary(zone, /*num_inputs=*/InputCount(),
7061 /*num_temps=*/0, LocationSummary::kNoCall);
7062 switch (from()) {
7063 case kUnboxedInt32:
7064 summary->set_in(0, Location::RequiresRegister());
7065 break;
7066 case kUnboxedInt64:
7067#if XLEN == 32
7068 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
7070#else
7071 summary->set_in(0, Location::RequiresRegister());
7072#endif
7073 break;
7074 case kUnboxedFloat:
7075 case kUnboxedDouble:
7076 summary->set_in(0, Location::RequiresFpuRegister());
7077 break;
7078 default:
7079 UNREACHABLE();
7080 }
7081
7082 switch (to()) {
7083 case kUnboxedInt32:
7084 summary->set_out(0, Location::RequiresRegister());
7085 break;
7086 case kUnboxedInt64:
7087#if XLEN == 32
7088 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
7090#else
7091 summary->set_out(0, Location::RequiresRegister());
7092#endif
7093 break;
7094 case kUnboxedFloat:
7095 case kUnboxedDouble:
7096 summary->set_out(0, Location::RequiresFpuRegister());
7097 break;
7098 default:
7099 UNREACHABLE();
7100 }
7101 return summary;
7102}
7103
7104void BitCastInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7105 switch (from()) {
7106 case kUnboxedFloat: {
7107 switch (to()) {
7108 case kUnboxedInt32: {
7109 const FpuRegister src = locs()->in(0).fpu_reg();
7110 const Register dst = locs()->out(0).reg();
7111 __ fmvxw(dst, src);
7112 break;
7113 }
7114 case kUnboxedInt64: {
7115 const FpuRegister src = locs()->in(0).fpu_reg();
7116#if XLEN == 32
7117 const Register dst0 = locs()->out(0).AsPairLocation()->At(0).reg();
7118 const Register dst1 = locs()->out(0).AsPairLocation()->At(1).reg();
7119 __ fmvxw(dst0, src);
7120 __ li(dst1, 0);
7121#else
7122 const Register dst = locs()->out(0).reg();
7123 __ fmvxw(dst, src);
7124#endif
7125 break;
7126 }
7127 default:
7128 UNREACHABLE();
7129 }
7130 break;
7131 }
7132 case kUnboxedDouble: {
7133 ASSERT(to() == kUnboxedInt64);
7134 const FpuRegister src = locs()->in(0).fpu_reg();
7135#if XLEN == 32
7136 const Register dst0 = locs()->out(0).AsPairLocation()->At(0).reg();
7137 const Register dst1 = locs()->out(0).AsPairLocation()->At(1).reg();
7138 __ subi(SP, SP, 16);
7139 __ fsd(src, compiler::Address(SP, 0));
7140 __ lw(dst0, compiler::Address(SP, 0));
7141 __ lw(dst1, compiler::Address(SP, 4));
7142 __ addi(SP, SP, 16);
7143#else
7144 const Register dst = locs()->out(0).reg();
7145 __ fmvxd(dst, src);
7146#endif
7147 break;
7148 }
7149 case kUnboxedInt64: {
7150 switch (to()) {
7151 case kUnboxedDouble: {
7152 const FpuRegister dst = locs()->out(0).fpu_reg();
7153#if XLEN == 32
7154 const Register src0 = locs()->in(0).AsPairLocation()->At(0).reg();
7155 const Register src1 = locs()->in(0).AsPairLocation()->At(1).reg();
7156 __ subi(SP, SP, 16);
7157 __ sw(src0, compiler::Address(SP, 0));
7158 __ sw(src1, compiler::Address(SP, 4));
7159 __ fld(dst, compiler::Address(SP, 0));
7160 __ addi(SP, SP, 16);
7161#else
7162 const Register src = locs()->in(0).reg();
7163 __ fmvdx(dst, src);
7164#endif
7165 break;
7166 }
7167 case kUnboxedFloat: {
7168 const FpuRegister dst = locs()->out(0).fpu_reg();
7169#if XLEN == 32
7170 const Register src0 = locs()->in(0).AsPairLocation()->At(0).reg();
7171 __ fmvwx(dst, src0);
7172#else
7173 const Register src = locs()->in(0).reg();
7174 __ fmvwx(dst, src);
7175#endif
7176 break;
7177 }
7178 default:
7179 UNREACHABLE();
7180 }
7181 break;
7182 }
7183 case kUnboxedInt32: {
7184 ASSERT(to() == kUnboxedFloat);
7185 const Register src = locs()->in(0).reg();
7186 const FpuRegister dst = locs()->out(0).fpu_reg();
7187 __ fmvwx(dst, src);
7188 break;
7189 }
7190 default:
7191 UNREACHABLE();
7192 }
7193}
7194
7195LocationSummary* StopInstr::MakeLocationSummary(Zone* zone, bool opt) const {
7196 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
7197}
7198
7199void StopInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7200 __ Stop(message());
7201}
7202
7203void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7204 BlockEntryInstr* entry = normal_entry();
7205 if (entry != nullptr) {
7206 if (!compiler->CanFallThroughTo(entry)) {
7207 FATAL("Checked function entry must have no offset");
7208 }
7209 } else {
7210 entry = osr_entry();
7211 if (!compiler->CanFallThroughTo(entry)) {
7212 __ j(compiler->GetJumpLabel(entry));
7213 }
7214 }
7215}
7216
7217LocationSummary* GotoInstr::MakeLocationSummary(Zone* zone, bool opt) const {
7218 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
7219}
7220
7221void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7222 if (!compiler->is_optimizing()) {
7223 if (FLAG_reorder_basic_blocks) {
7224 compiler->EmitEdgeCounter(block()->preorder_number());
7225 }
7226 // Add a deoptimization descriptor for deoptimizing instructions that
7227 // may be inserted before this instruction.
7228 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
7229 InstructionSource());
7230 }
7231 if (HasParallelMove()) {
7232 parallel_move()->EmitNativeCode(compiler);
7233 }
7234
7235 // We can fall through if the successor is the next block in the list.
7236 // Otherwise, we need a jump.
7237 if (!compiler->CanFallThroughTo(successor())) {
7238 __ j(compiler->GetJumpLabel(successor()));
7239 }
7240}
7241
7242LocationSummary* IndirectGotoInstr::MakeLocationSummary(Zone* zone,
7243 bool opt) const {
7244 const intptr_t kNumInputs = 1;
7245 const intptr_t kNumTemps = 2;
7246
7247 LocationSummary* summary = new (zone)
7248 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7249
7250 summary->set_in(0, Location::RequiresRegister());
7251 summary->set_temp(0, Location::RequiresRegister());
7252 summary->set_temp(1, Location::RequiresRegister());
7253
7254 return summary;
7255}
7256
7257void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7258 Register index_reg = locs()->in(0).reg();
7259 Register target_address_reg = locs()->temp(0).reg();
7260 Register offset_reg = locs()->temp(1).reg();
7261
7262 ASSERT(RequiredInputRepresentation(0) == kTagged);
7263 __ LoadObject(offset_reg, offsets_);
7264 const auto element_address = __ ElementAddressForRegIndex(
7265 /*is_external=*/false, kTypedDataInt32ArrayCid,
7266 /*index_scale=*/4,
7267 /*index_unboxed=*/false, offset_reg, index_reg, TMP);
7268 __ lw(offset_reg, element_address);
7269
7270 const intptr_t entry_offset = __ CodeSize();
7271 intx_t imm = -entry_offset;
7272 intx_t lo = ImmLo(imm);
7273 intx_t hi = ImmHi(imm);
7274 __ auipc(target_address_reg, hi);
7275 __ add(target_address_reg, target_address_reg, offset_reg);
7276 __ jr(target_address_reg, lo);
7277}
7278
7279LocationSummary* StrictCompareInstr::MakeLocationSummary(Zone* zone,
7280 bool opt) const {
7281 const intptr_t kNumInputs = 2;
7282 const intptr_t kNumTemps = 0;
7283 if (needs_number_check()) {
7284 LocationSummary* locs = new (zone)
7285 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
7286 locs->set_in(0, Location::RegisterLocation(A0));
7287 locs->set_in(1, Location::RegisterLocation(A1));
7288 locs->set_out(0, Location::RegisterLocation(A0));
7289 return locs;
7290 }
7291 LocationSummary* locs = new (zone)
7292 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7293 locs->set_in(0, LocationRegisterOrConstant(left()));
7294 // Only one of the inputs can be a constant. Choose register if the first one
7295 // is a constant.
7296 locs->set_in(1, locs->in(0).IsConstant()
7299 locs->set_out(0, Location::RequiresRegister());
7300 return locs;
7301}
7302
7303Condition StrictCompareInstr::EmitComparisonCodeRegConstant(
7304 FlowGraphCompiler* compiler,
7305 BranchLabels labels,
7306 Register reg,
7307 const Object& obj) {
7308 return compiler->EmitEqualityRegConstCompare(reg, obj, needs_number_check(),
7309 source(), deopt_id());
7310}
7311
7312void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7313 compiler::Label is_true, is_false;
7314 BranchLabels labels = {&is_true, &is_false, &is_false};
7315 Condition true_condition = EmitComparisonCode(compiler, labels);
7316
7317 Register result = locs()->out(0).reg();
7318 if (is_true.IsLinked() || is_false.IsLinked()) {
7319 if (true_condition != kInvalidCondition) {
7320 EmitBranchOnCondition(compiler, true_condition, labels);
7321 }
7322 compiler::Label done;
7323 __ Bind(&is_false);
7324 __ LoadObject(result, Bool::False());
7326 __ Bind(&is_true);
7327 __ LoadObject(result, Bool::True());
7328 __ Bind(&done);
7329 } else {
7330 // If EmitComparisonCode did not use the labels and just returned
7331 // a condition we can avoid the branch and use slt to generate the
7332 // offsets to true or false.
7337 __ SetIf(InvertCondition(true_condition), result);
7340 __ add(result, result, NULL_REG);
7341 }
7342}
7343
7344void ComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler,
7345 BranchInstr* branch) {
7346 BranchLabels labels = compiler->CreateBranchLabels(branch);
7347 Condition true_condition = EmitComparisonCode(compiler, labels);
7348 if (true_condition != kInvalidCondition) {
7349 EmitBranchOnCondition(compiler, true_condition, labels);
7350 }
7351}
7352
7353LocationSummary* BooleanNegateInstr::MakeLocationSummary(Zone* zone,
7354 bool opt) const {
7357}
7358
7359void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7360 const Register input = locs()->in(0).reg();
7361 const Register result = locs()->out(0).reg();
7363}
7364
7365LocationSummary* BoolToIntInstr::MakeLocationSummary(Zone* zone,
7366 bool opt) const {
7369}
7370
7371void BoolToIntInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7372 const Register input = locs()->in(0).reg();
7373 const Register result = locs()->out(0).reg();
7374 __ LoadObject(TMP, Bool::True());
7375 __ xor_(TMP, TMP, input);
7376 __ seqz(TMP, TMP);
7377 __ neg(result, TMP);
7378}
7379
7380LocationSummary* IntToBoolInstr::MakeLocationSummary(Zone* zone,
7381 bool opt) const {
7384}
7385
7386void IntToBoolInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7387 const Register input = locs()->in(0).reg();
7388 const Register result = locs()->out(0).reg();
7389 __ seqz(result, input);
7391 __ add(result, result, NULL_REG);
7393}
7394
7395LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
7396 bool opt) const {
7397 const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0;
7398 const intptr_t kNumTemps = 0;
7399 LocationSummary* locs = new (zone)
7400 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
7401 if (type_arguments() != nullptr) {
7402 locs->set_in(kTypeArgumentsPos, Location::RegisterLocation(
7404 }
7406 return locs;
7407}
7408
7409void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7410 if (type_arguments() != nullptr) {
7411 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
7412 if (type_usage_info != nullptr) {
7413 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, cls_,
7414 type_arguments()->definition());
7415 }
7416 }
7417 const Code& stub = Code::ZoneHandle(
7419 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
7420 locs(), deopt_id(), env());
7421}
7422
7423void DebugStepCheckInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7424#ifdef PRODUCT
7425 UNREACHABLE();
7426#else
7427 ASSERT(!compiler->is_optimizing());
7428 __ JumpAndLinkPatchable(StubCode::DebugStepCheck());
7429 compiler->AddCurrentDescriptor(stub_kind_, deopt_id_, source());
7430 compiler->RecordSafepoint(locs());
7431#endif
7432}
7433
7434} // namespace dart
7435
7436#endif // defined(TARGET_ARCH_RISCV)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
static void fail(const SkString &err)
Definition DM.cpp:234
static bool match(const char *needle, const char *haystack)
Definition DM.cpp:1132
int count
static bool are_equal(skiatest::Reporter *reporter, const SkMatrix &a, const SkMatrix &b)
static int float_bits(float f)
static float next(float f)
static bool ok(int result)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
static size_t element_size(Layout layout, SkSLType type)
Vec2Value v2
#define __
#define UNREACHABLE()
Definition assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
#define Z
static intptr_t type_arguments_offset()
Definition object.h:10902
static intptr_t InstanceSize()
Definition object.h:10910
static constexpr bool IsValidLength(intptr_t len)
Definition object.h:10906
static intptr_t length_offset()
Definition object.h:10813
static const Bool & False()
Definition object.h:10778
static const Bool & True()
Definition object.h:10776
static void Allocate(FlowGraphCompiler *compiler, Instruction *instruction, const Class &cls, Register result, Register temp)
Definition il.cc:6317
intptr_t index_scale() const
Definition il.h:7972
static constexpr Register kSecondReturnReg
static const Register ArgumentRegisters[]
static constexpr FpuRegister kReturnFpuReg
static constexpr Register kFfiAnyNonAbiRegister
static constexpr Register kReturnReg
static constexpr Register kSecondNonArgumentRegister
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr, TemplateInstruction, FIELD_LIST) private void EmitBitTest(FlowGraphCompiler *compiler, intptr_t min, intptr_t max, intptr_t mask, compiler::Label *deopt)
void EmitNullCheck(FlowGraphCompiler *compiler, compiler::Label *deopt)
virtual void EmitBranchCode(FlowGraphCompiler *compiler, BranchInstr *branch)
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual Condition EmitComparisonCode(FlowGraphCompiler *compiler, BranchLabels labels)=0
const Object & value() const
Definition il.h:4212
void EmitMoveToLocation(FlowGraphCompiler *compiler, const Location &destination, Register tmp=kNoRegister, intptr_t pair_index=0)
ConstantInstr(const Object &value)
Definition il.h:4203
bool HasZeroRepresentation() const
Definition il.h:4216
bool IsSmi() const
Definition il.h:4214
static intptr_t num_variables_offset()
Definition object.h:7386
static intptr_t InstanceSize()
Definition object.h:7419
virtual Representation representation() const
Definition il.h:3483
static constexpr intptr_t kNone
Definition deopt_id.h:27
static intptr_t guarded_cid_offset()
Definition object.h:4642
@ kUnknownFixedLength
Definition object.h:4701
@ kUnknownLengthOffset
Definition object.h:4700
@ kNoFixedLength
Definition object.h:4702
static intptr_t guarded_list_length_in_object_offset_offset()
Definition object.h:4666
static intptr_t is_nullable_offset()
Definition object.h:4739
static intptr_t guarded_list_length_offset()
Definition object.h:4656
ComparisonInstr * comparison() const
Definition il.h:5434
virtual LocationSummary * MakeLocationSummary(Zone *zone, bool is_optimizing) const =0
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition il.h:1207
void InitializeLocationSummary(Zone *zone, bool optimizing)
Definition il.h:1196
virtual Representation representation() const
Definition il.h:1254
LocationSummary * locs()
Definition il.h:1186
intptr_t deopt_id() const
Definition il.h:987
static LocationSummary * MakeCallSummary(Zone *zone, const Instruction *instr, LocationSummary *locs=nullptr)
ObjectStore * object_store() const
Definition isolate.h:505
static IsolateGroup * Current()
Definition isolate.h:534
Value * index() const
Definition il.h:3109
virtual Representation RequiredInputRepresentation(intptr_t index) const
Definition il.h:3096
intptr_t offset() const
Definition il.h:3111
Register base_reg() const
Definition il.h:3110
virtual Representation representation() const
Definition il.h:3107
const LocalVariable & local() const
Definition il.h:5765
Location out(intptr_t index) const
Definition locations.h:903
static LocationSummary * Make(Zone *zone, intptr_t input_count, Location out, ContainsCall contains_call)
Definition locations.cc:187
void set_temp(intptr_t index, Location loc)
Definition locations.h:894
void set_out(intptr_t index, Location loc)
Definition locations.cc:232
bool always_calls() const
Definition locations.h:918
Location in(intptr_t index) const
Definition locations.h:866
void set_in(intptr_t index, Location loc)
Definition locations.cc:205
static Location NoLocation()
Definition locations.h:387
static Location SameAsFirstInput()
Definition locations.h:382
static Location Pair(Location first, Location second)
Definition locations.cc:271
Register reg() const
Definition locations.h:404
static Location FpuRegisterLocation(FpuRegister reg)
Definition locations.h:410
intptr_t stack_index() const
Definition locations.h:485
static Location WritableRegister()
Definition locations.h:376
static Location RegisterLocation(Register reg)
Definition locations.h:398
static Location Any()
Definition locations.h:352
PairLocation * AsPairLocation() const
Definition locations.cc:280
static Location RequiresRegister()
Definition locations.h:365
static Location RequiresFpuRegister()
Definition locations.h:369
FpuRegister fpu_reg() const
Definition locations.h:416
static Location Constant(const ConstantInstr *obj, int pair_index=0)
Definition locations.h:294
Value * length() const
Definition il.h:3193
bool unboxed_inputs() const
Definition il.h:3198
Value * src_start() const
Definition il.h:3191
void EmitLoopCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, Register length_reg, compiler::Label *done, compiler::Label *copy_forwards=nullptr)
void PrepareLengthRegForLoop(FlowGraphCompiler *compiler, Register length_reg, compiler::Label *done)
Value * dest_start() const
Definition il.h:3192
static intptr_t value_offset()
Definition object.h:10053
virtual Representation representation() const
Definition il.h:3369
Value * value() const
Definition il.h:3359
Location location() const
Definition il.h:3356
static int ComputeArgcTag(const Function &function)
static uword LinkNativeCallEntry()
static Object & Handle()
Definition object.h:407
static Object & ZoneHandle()
Definition object.h:419
static intptr_t data_offset()
Definition object.h:10533
Location At(intptr_t i) const
Definition locations.h:618
static bool IsNegative(Range *range)
static bool Overlaps(Range *range, intptr_t min, intptr_t max)
static bool OnlyLessThanOrEqualTo(Range *range, intptr_t value)
static bool IsWithin(const Range *range, int64_t min, int64_t max)
static bool IsPositive(Range *range)
static bool CanBeZero(Range *range)
static constexpr intptr_t kBits
Definition object.h:9965
static SmiPtr New(intptr_t value)
Definition object.h:9985
static constexpr intptr_t kMaxValue
Definition object.h:9966
static intptr_t RawValue(intptr_t value)
Definition object.h:10001
Value * value() const
Definition il.h:5914
const LocalVariable & local() const
Definition il.h:5913
static intptr_t length_offset()
Definition object.h:10193
static CodePtr GetAllocationStubForClass(const Class &cls)
Definition stub_code.cc:174
static constexpr int kNullCharCodeSymbolOffset
Definition symbols.h:604
intptr_t ArgumentCount() const
Definition il.h:4568
ArrayPtr GetArgumentsDescriptor() const
Definition il.h:4599
virtual intptr_t InputCount() const
Definition il.h:2737
static bool IsEqualityOperator(Kind tok)
Definition token.h:236
static T Abs(T x)
Definition utils.h:34
static int32_t Low32Bits(int64_t value)
Definition utils.h:354
static constexpr T Maximum(T x, T y)
Definition utils.h:26
static constexpr int ShiftForPowerOfTwo(T x)
Definition utils.h:66
static T Minimum(T x, T y)
Definition utils.h:21
static constexpr int CountOneBits32(uint32_t x)
Definition utils.h:145
static bool DoublesBitEqual(const double a, const double b)
Definition utils.h:510
static constexpr size_t HighestBit(int64_t v)
Definition utils.h:170
static constexpr bool IsPowerOfTwo(T x)
Definition utils.h:61
Definition * definition() const
Definition il.h:103
void static bool EmittingComments()
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool *needs_base=nullptr)
#define UNIMPLEMENTED
#define ASSERT(E)
SkBitmap source
Definition examples.cpp:28
#define FATAL(error)
FlutterSemanticsFlag flags
uint8_t value
GAsyncResult * result
uint32_t * target
Dart_NativeFunction function
Definition fuchsia.cc:51
const char * name
Definition fuchsia.cc:50
int argument_count
Definition fuchsia.cc:52
static float max(float r, float g, float b)
Definition hsl.cpp:49
static float min(float r, float g, float b)
Definition hsl.cpp:48
#define R(r)
#define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name)
Definition il.h:11813
size_t length
Win32Message message
#define DEFINE_BACKEND(Name, Args)
const intptr_t kResultIndex
Definition marshaller.h:28
bool IsSmi(int64_t v)
word SmiValue(const dart::Object &a)
const Object & NullObject()
constexpr OperandSize kWordBytes
bool HasIntegerValue(const dart::Object &object, int64_t *value)
Location LocationAnyOrConstant(Value *value)
Definition locations.cc:357
Location LocationRegisterOrConstant(Value *value)
Definition locations.cc:289
const Register kWriteBarrierSlotReg
const Register THR
static Condition InvertCondition(Condition c)
const RegList kAbiVolatileCpuRegs
static constexpr int kSavedCallerPcSlotFromFp
bool IsTypedDataBaseClassId(intptr_t index)
Definition class_id.h:429
const Register kExceptionObjectReg
static constexpr intptr_t kFalseOffsetFromNull
const Register kWriteBarrierObjectReg
const Register NULL_REG
static constexpr intptr_t kBoolVsNullBitPosition
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition constants.h:85
constexpr intptr_t kIntptrMin
Definition globals.h:556
int32_t classid_t
Definition globals.h:524
static const ClassId kLastErrorCid
Definition class_id.h:311
static constexpr intptr_t kTrueOffsetFromNull
@ kIllegalCid
Definition class_id.h:214
@ kNullCid
Definition class_id.h:252
@ kDynamicCid
Definition class_id.h:253
Representation
Definition locations.h:66
const FpuRegister FpuTMP
constexpr intptr_t kWordSizeLog2
Definition globals.h:507
@ kHeapObjectTag
static const ClassId kFirstErrorCid
Definition class_id.h:310
uintptr_t uword
Definition globals.h:501
static constexpr intptr_t kBoolValueBitPosition
const Register CODE_REG
@ UNSIGNED_GREATER
@ kInvalidCondition
@ UNSIGNED_GREATER_EQUAL
@ UNSIGNED_LESS
@ UNSIGNED_LESS_EQUAL
void RegisterTypeArgumentsUse(const Function &function, TypeUsageInfo *type_usage_info, const Class &klass, Definition *type_arguments)
const Register TMP2
static constexpr int kParamEndSlotFromFp
const Register ARGS_DESC_REG
bool IsClampedTypedDataBaseClassId(intptr_t index)
Definition class_id.h:461
@ kNumberOfCpuRegisters
@ kNoRegister
Location LocationFixedRegisterOrConstant(Value *value, Register reg)
Definition locations.cc:339
const int kNumberOfFpuRegisters
Location LocationWritableRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
Definition locations.cc:322
@ kFClassNegSubnormal
@ kFClassPosInfinity
@ kFClassQuietNan
@ kFClassSignallingNan
@ kFClassNegNormal
@ kFClassNegInfinity
const FRegister FTMP
bool IsExternalPayloadClassId(classid_t cid)
Definition class_id.h:472
constexpr RegList kDartAvailableCpuRegs
const Register TMP
intx_t ImmHi(intx_t imm)
const Register FPREG
static constexpr intptr_t kCompressedWordSize
Definition globals.h:42
static constexpr int kPcMarkerSlotFromFp
const Register FUNCTION_REG
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
Definition locations.cc:365
constexpr intptr_t kWordSize
Definition globals.h:509
static bool IsConstant(Definition *def, int64_t *val)
Definition loops.cc:123
static constexpr Representation kUnboxedIntPtr
Definition locations.h:176
const Register PP
QRegister FpuRegister
const Register kStackTraceObjectReg
static int8_t data[kExtLength]
const RegList kAbiVolatileFpuRegs
Location LocationRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
Definition locations.cc:297
const Register CALLEE_SAVED_TEMP2
constexpr intptr_t kBitsPerInt64
Definition globals.h:467
const Register SPREG
intx_t ImmLo(intx_t imm)
Definition __init__.py:1
dst
Definition cp.py:12
Point offset
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTempReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kObjectReg
static constexpr Representation NativeRepresentation(Representation rep)
Definition il.h:8456
static constexpr intptr_t kBoolValueMask
static constexpr size_t ValueSize(Representation rep)
Definition locations.h:112
static constexpr bool IsUnboxedInteger(Representation rep)
Definition locations.h:92
static compiler::OperandSize OperandSize(Representation rep)
Definition locations.cc:16
static constexpr bool IsUnboxed(Representation rep)
Definition locations.h:101
static bool IsUnsignedInteger(Representation rep)
Definition locations.h:126
static Representation RepresentationOfArrayElement(classid_t cid)
Definition locations.cc:79
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kInstanceOfResultReg