Flutter Engine
The Flutter Engine
il_riscv.cc
Go to the documentation of this file.
1// Copyright (c) 2021, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_RISCV.
6#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
7
9
18#include "vm/dart_entry.h"
19#include "vm/instructions.h"
20#include "vm/object_store.h"
21#include "vm/parser.h"
22#include "vm/simulator.h"
23#include "vm/stack_frame.h"
24#include "vm/stub_code.h"
25#include "vm/symbols.h"
27
28#define __ (compiler->assembler())->
29#define Z (compiler->zone())
30
31namespace dart {
32
33// Generic summary for call instructions that have all arguments pushed
34// on the stack and return the result in a fixed register A0 (or FA0 if
35// the return type is double).
36LocationSummary* Instruction::MakeCallSummary(Zone* zone,
37 const Instruction* instr,
38 LocationSummary* locs) {
39 ASSERT(locs == nullptr || locs->always_calls());
40 LocationSummary* result =
41 ((locs == nullptr)
42 ? (new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall))
43 : locs);
44 const auto representation = instr->representation();
45 switch (representation) {
46 case kTagged:
47 case kUntagged:
48 case kUnboxedUint32:
49 case kUnboxedInt32:
50 result->set_out(
52 break;
53 case kPairOfTagged:
54 result->set_out(
59 break;
60 case kUnboxedInt64:
61#if XLEN == 32
62 result->set_out(
67#else
68 result->set_out(
70#endif
71 break;
72 case kUnboxedDouble:
73 result->set_out(
75 break;
76 default:
78 break;
79 }
80 return result;
81}
82
83LocationSummary* LoadIndexedUnsafeInstr::MakeLocationSummary(Zone* zone,
84 bool opt) const {
85 const intptr_t kNumInputs = 1;
86 const intptr_t kNumTemps = 0;
87 LocationSummary* locs = new (zone)
88 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
89
91 switch (representation()) {
92 case kTagged:
94 break;
95 case kUnboxedInt64:
96#if XLEN == 32
99#else
101#endif
102 break;
103 case kUnboxedDouble:
105 break;
106 default:
107 UNREACHABLE();
108 break;
109 }
110 return locs;
111}
112
113void LoadIndexedUnsafeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
114 ASSERT(RequiredInputRepresentation(0) == kTagged); // It is a Smi.
115 ASSERT(kSmiTag == 0);
116 ASSERT(kSmiTagSize == 1);
117
118 const Register index = locs()->in(0).reg();
119
120 switch (representation()) {
121 case kTagged: {
122 const auto out = locs()->out(0).reg();
123 __ AddShifted(TMP, base_reg(), index, kWordSizeLog2 - kSmiTagSize);
124 __ LoadFromOffset(out, TMP, offset());
125 break;
126 }
127 case kUnboxedInt64: {
128#if XLEN == 32
129 const auto out_lo = locs()->out(0).AsPairLocation()->At(0).reg();
130 const auto out_hi = locs()->out(0).AsPairLocation()->At(1).reg();
131 __ AddShifted(TMP, base_reg(), index, kWordSizeLog2 - kSmiTagSize);
132 __ LoadFromOffset(out_lo, TMP, offset());
133 __ LoadFromOffset(out_hi, TMP, offset() + compiler::target::kWordSize);
134#else
135 const auto out = locs()->out(0).reg();
136 __ AddShifted(TMP, base_reg(), index, kWordSizeLog2 - kSmiTagSize);
137 __ LoadFromOffset(out, TMP, offset());
138#endif
139 break;
140 }
141 case kUnboxedDouble: {
142 const auto out = locs()->out(0).fpu_reg();
143 __ AddShifted(TMP, base_reg(), index, kWordSizeLog2 - kSmiTagSize);
144 __ LoadDFromOffset(out, TMP, offset());
145 break;
146 }
147 default:
148 UNREACHABLE();
149 break;
150 }
151}
152
153DEFINE_BACKEND(StoreIndexedUnsafe,
154 (NoLocation, Register index, Register value)) {
155 ASSERT(instr->RequiredInputRepresentation(
156 StoreIndexedUnsafeInstr::kIndexPos) == kTagged); // It is a Smi.
157 __ AddShifted(TMP, instr->base_reg(), index,
159 __ sx(value, compiler::Address(TMP, instr->offset()));
160
161 ASSERT(kSmiTag == 0);
162}
163
164DEFINE_BACKEND(TailCall,
165 (NoLocation,
166 Fixed<Register, ARGS_DESC_REG>,
167 Temp<Register> temp)) {
168 compiler->EmitTailCallToStub(instr->code());
169
170 // Even though the TailCallInstr will be the last instruction in a basic
171 // block, the flow graph compiler will emit native code for other blocks after
172 // the one containing this instruction and needs to be able to use the pool.
173 // (The `LeaveDartFrame` above disables usages of the pool.)
174 __ set_constant_pool_allowed(true);
175}
176
177LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone,
178 bool opt) const {
179 // The compiler must optimize any function that includes a MemoryCopy
180 // instruction that uses typed data cids, since extracting the payload address
181 // from views is done in a compiler pass after all code motion has happened.
182 ASSERT((!IsTypedDataBaseClassId(src_cid_) &&
183 !IsTypedDataBaseClassId(dest_cid_)) ||
184 opt);
185 const intptr_t kNumInputs = 5;
186 const intptr_t kNumTemps = 2;
187 LocationSummary* locs = new (zone)
188 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
197 return locs;
198}
199
201 Register length_reg,
202 compiler::Label* done) {
203 __ BranchIfZero(length_reg, done);
204}
205
206static compiler::OperandSize OperandSizeFor(intptr_t bytes) {
208 switch (bytes) {
209 case 1:
210 return compiler::kByte;
211 case 2:
212 return compiler::kTwoBytes;
213 case 4:
215 case 8:
217 default:
218 UNREACHABLE();
220 }
221}
222
223// Copies [count] bytes from the memory region pointed to by [dest_reg] to the
224// memory region pointed to by [src_reg]. If [reversed] is true, then [dest_reg]
225// and [src_reg] are assumed to point at the end of the respective region.
226static void CopyBytes(FlowGraphCompiler* compiler,
227 Register dest_reg,
228 Register src_reg,
229 intptr_t count,
230 bool reversed) {
231 COMPILE_ASSERT(XLEN <= 128);
233
234#if XLEN >= 128
235 // Handled specially because there is no kSixteenBytes OperandSize.
236 if (count == 16) {
237 const intptr_t offset = (reversed ? -1 : 1) * count;
238 const intptr_t initial = reversed ? offset : 0;
239 __ lq(TMP, compiler::Address(src_reg, initial));
240 __ addi(src_reg, src_reg, offset);
241 __ sq(TMP, compiler::Address(dest_reg, initial));
242 __ addi(dest_reg, dest_reg, offset);
243 return;
244 }
245#endif
246
247#if XLEN <= 32
248 if (count == 4 * (XLEN / 8)) {
249 auto const sz = OperandSizeFor(XLEN / 8);
250 const intptr_t offset = (reversed ? -1 : 1) * (XLEN / 8);
251 const intptr_t initial = reversed ? offset : 0;
252 __ LoadFromOffset(TMP, src_reg, initial, sz);
253 __ LoadFromOffset(TMP2, src_reg, initial + offset, sz);
254 __ StoreToOffset(TMP, dest_reg, initial, sz);
255 __ StoreToOffset(TMP2, dest_reg, initial + offset, sz);
256 __ LoadFromOffset(TMP, src_reg, initial + 2 * offset, sz);
257 __ LoadFromOffset(TMP2, src_reg, initial + 3 * offset, sz);
258 __ addi(src_reg, src_reg, 4 * offset);
259 __ StoreToOffset(TMP, dest_reg, initial + 2 * offset, sz);
260 __ StoreToOffset(TMP2, dest_reg, initial + 3 * offset, sz);
261 __ addi(dest_reg, dest_reg, 4 * offset);
262 return;
263 }
264#endif
265
266#if XLEN <= 64
267 if (count == 2 * (XLEN / 8)) {
268 auto const sz = OperandSizeFor(XLEN / 8);
269 const intptr_t offset = (reversed ? -1 : 1) * (XLEN / 8);
270 const intptr_t initial = reversed ? offset : 0;
271 __ LoadFromOffset(TMP, src_reg, initial, sz);
272 __ LoadFromOffset(TMP2, src_reg, initial + offset, sz);
273 __ addi(src_reg, src_reg, 2 * offset);
274 __ StoreToOffset(TMP, dest_reg, initial, sz);
275 __ StoreToOffset(TMP2, dest_reg, initial + offset, sz);
276 __ addi(dest_reg, dest_reg, 2 * offset);
277 return;
278 }
279#endif
280
281 ASSERT(count <= (XLEN / 8));
282 auto const sz = OperandSizeFor(count);
283 const intptr_t offset = (reversed ? -1 : 1) * count;
284 const intptr_t initial = reversed ? offset : 0;
285 __ LoadFromOffset(TMP, src_reg, initial, sz);
286 __ addi(src_reg, src_reg, offset);
287 __ StoreToOffset(TMP, dest_reg, initial, sz);
288 __ addi(dest_reg, dest_reg, offset);
289}
290
291static void CopyUpToWordMultiple(FlowGraphCompiler* compiler,
292 Register dest_reg,
293 Register src_reg,
294 Register length_reg,
295 intptr_t element_size,
296 bool unboxed_inputs,
297 bool reversed,
298 compiler::Label* done) {
301
302 const intptr_t element_shift = Utils::ShiftForPowerOfTwo(element_size);
303 const intptr_t base_shift =
304 (unboxed_inputs ? 0 : kSmiTagShift) - element_shift;
305 intptr_t tested_bits = 0;
306
307 __ Comment("Copying until region is a multiple of word size");
308
309 COMPILE_ASSERT(XLEN <= 128);
310
311 for (intptr_t bit = compiler::target::kWordSizeLog2 - 1; bit >= element_shift;
312 bit--) {
313 const intptr_t bytes = 1 << bit;
314 const intptr_t tested_bit = bit + base_shift;
315 tested_bits |= 1 << tested_bit;
316 compiler::Label skip_copy;
317 __ andi(TMP, length_reg, 1 << tested_bit);
318 __ beqz(TMP, &skip_copy);
319 CopyBytes(compiler, dest_reg, src_reg, bytes, reversed);
320 __ Bind(&skip_copy);
321 }
322
323 ASSERT(tested_bits != 0);
324 __ andi(length_reg, length_reg, ~tested_bits);
325 __ beqz(length_reg, done);
326}
327
328void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
329 Register dest_reg,
330 Register src_reg,
331 Register length_reg,
332 compiler::Label* done,
333 compiler::Label* copy_forwards) {
334 const bool reversed = copy_forwards != nullptr;
335 if (reversed) {
336 // Verify that the overlap actually exists by checking to see if the start
337 // of the destination region is after the end of the source region.
338 const intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) -
340 if (shift == 0) {
341 __ add(TMP, src_reg, length_reg);
342 } else if (shift < 0) {
343 __ srai(TMP, length_reg, -shift);
344 __ add(TMP, src_reg, TMP);
345 } else {
346 __ slli(TMP, length_reg, shift);
347 __ add(TMP, src_reg, TMP);
348 }
349 __ CompareRegisters(dest_reg, TMP);
350 __ BranchIf(UNSIGNED_GREATER_EQUAL, copy_forwards);
351 // Adjust dest_reg and src_reg to point at the end (i.e. one past the
352 // last element) of their respective region.
353 __ add(dest_reg, dest_reg, TMP);
354 __ sub(dest_reg, dest_reg, src_reg);
355 __ MoveRegister(src_reg, TMP);
356 }
357 CopyUpToWordMultiple(compiler, dest_reg, src_reg, length_reg, element_size_,
358 unboxed_inputs_, reversed, done);
359 // The size of the uncopied region is a multiple of the word size, so now we
360 // copy the rest by word.
361 const intptr_t loop_subtract =
362 Utils::Maximum<intptr_t>(1, (XLEN / 8) / element_size_)
363 << (unboxed_inputs_ ? 0 : kSmiTagShift);
364 __ Comment("Copying by multiples of word size");
365 compiler::Label loop;
366 __ Bind(&loop);
367 switch (element_size_) {
368 case 1:
369 case 2:
370 case 4:
371#if XLEN <= 32
372 CopyBytes(compiler, dest_reg, src_reg, 4, reversed);
373 break;
374#endif
375 case 8:
376#if XLEN <= 64
377 CopyBytes(compiler, dest_reg, src_reg, 8, reversed);
378 break;
379#endif
380 case 16:
381 COMPILE_ASSERT(XLEN <= 128);
382 CopyBytes(compiler, dest_reg, src_reg, 16, reversed);
383 break;
384 default:
385 UNREACHABLE();
386 break;
387 }
388 __ subi(length_reg, length_reg, loop_subtract);
389 __ bnez(length_reg, &loop);
390}
391
392void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
393 classid_t array_cid,
394 Register array_reg,
395 Register payload_reg,
396 Representation array_rep,
397 Location start_loc) {
398 intptr_t offset = 0;
399 if (array_rep != kTagged) {
400 // Do nothing, array_reg already contains the payload address.
401 } else if (IsTypedDataBaseClassId(array_cid)) {
402 // The incoming array must have been proven to be an internal typed data
403 // object, where the payload is in the object and we can just offset.
404 ASSERT_EQUAL(array_rep, kTagged);
406 } else {
407 ASSERT_EQUAL(array_rep, kTagged);
408 ASSERT(!IsExternalPayloadClassId(array_cid));
409 switch (array_cid) {
410 case kOneByteStringCid:
411 offset =
413 break;
414 case kTwoByteStringCid:
415 offset =
417 break;
418 default:
419 UNREACHABLE();
420 break;
421 }
422 }
423 ASSERT(start_loc.IsRegister() || start_loc.IsConstant());
424 if (start_loc.IsConstant()) {
425 const auto& constant = start_loc.constant();
426 ASSERT(constant.IsInteger());
427 const int64_t start_value = Integer::Cast(constant).AsInt64Value();
428 const intx_t add_value = Utils::AddWithWrapAround<intx_t>(
429 Utils::MulWithWrapAround<intx_t>(start_value, element_size_), offset);
430 __ AddImmediate(payload_reg, array_reg, add_value);
431 return;
432 }
433 const Register start_reg = start_loc.reg();
434 intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) -
436 __ AddShifted(payload_reg, array_reg, start_reg, shift);
437 __ AddImmediate(payload_reg, offset);
438}
439
440LocationSummary* MoveArgumentInstr::MakeLocationSummary(Zone* zone,
441 bool opt) const {
442 const intptr_t kNumInputs = 1;
443 const intptr_t kNumTemps = 0;
444 LocationSummary* locs = new (zone)
445 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
446 ConstantInstr* constant = value()->definition()->AsConstant();
447 if (constant != nullptr && constant->HasZeroRepresentation()) {
448 locs->set_in(0, Location::Constant(constant));
449 } else if (representation() == kUnboxedDouble) {
451 } else if (representation() == kUnboxedInt64) {
452#if XLEN == 32
455#else
457#endif
458 } else {
459 ASSERT(representation() == kTagged);
461 }
462 return locs;
463}
464
465void MoveArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
466 ASSERT(compiler->is_optimizing());
467
468 const Location value = compiler->RebaseIfImprovesAddressing(locs()->in(0));
469 if (value.IsRegister()) {
470 __ StoreToOffset(value.reg(), SP,
471 location().stack_index() * compiler::target::kWordSize);
472#if XLEN == 32
473 } else if (value.IsPairLocation()) {
474 __ StoreToOffset(value.AsPairLocation()->At(1).reg(), SP,
475 location().AsPairLocation()->At(1).stack_index() *
477 __ StoreToOffset(value.AsPairLocation()->At(0).reg(), SP,
478 location().AsPairLocation()->At(0).stack_index() *
480#endif
481 } else if (value.IsConstant()) {
482 if (representation() == kUnboxedDouble) {
483 ASSERT(value.constant_instruction()->HasZeroRepresentation());
485#if XLEN == 32
486 __ StoreToOffset(ZR, SP, offset + compiler::target::kWordSize);
487 __ StoreToOffset(ZR, SP, offset);
488#else
489 __ StoreToOffset(ZR, SP, offset);
490#endif
491 } else if (representation() == kUnboxedInt64) {
492 ASSERT(value.constant_instruction()->HasZeroRepresentation());
493#if XLEN == 32
494 __ StoreToOffset(ZR, SP,
495 location().AsPairLocation()->At(1).stack_index() *
497 __ StoreToOffset(ZR, SP,
498 location().AsPairLocation()->At(0).stack_index() *
500#else
501 __ StoreToOffset(ZR, SP,
502 location().stack_index() * compiler::target::kWordSize);
503#endif
504 } else {
505 ASSERT(representation() == kTagged);
506 const Object& constant = value.constant();
507 Register reg;
508 if (constant.IsNull()) {
509 reg = NULL_REG;
510 } else if (constant.IsSmi() && Smi::Cast(constant).Value() == 0) {
511 reg = ZR;
512 } else {
513 reg = TMP;
514 __ LoadObject(TMP, constant);
515 }
516 __ StoreToOffset(reg, SP,
517 location().stack_index() * compiler::target::kWordSize);
518 }
519 } else if (value.IsFpuRegister()) {
520 __ StoreDToOffset(value.fpu_reg(), SP,
521 location().stack_index() * compiler::target::kWordSize);
522 } else if (value.IsStackSlot()) {
523 const intptr_t value_offset = value.ToStackSlotOffset();
524 __ LoadFromOffset(TMP, value.base_reg(), value_offset);
525 __ StoreToOffset(TMP, SP,
526 location().stack_index() * compiler::target::kWordSize);
527 } else {
528 UNREACHABLE();
529 }
530}
531
532LocationSummary* DartReturnInstr::MakeLocationSummary(Zone* zone,
533 bool opt) const {
534 const intptr_t kNumInputs = 1;
535 const intptr_t kNumTemps = 0;
536 LocationSummary* locs = new (zone)
537 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
538 switch (representation()) {
539 case kTagged:
540 locs->set_in(0,
542 break;
543 case kPairOfTagged:
544 locs->set_in(
549 break;
550 case kUnboxedInt64:
551#if XLEN == 32
552 locs->set_in(
557#else
558 locs->set_in(0,
560#endif
561 break;
562 case kUnboxedDouble:
563 locs->set_in(
565 break;
566 default:
567 UNREACHABLE();
568 break;
569 }
570 return locs;
571}
572
573// Attempt optimized compilation at return instruction instead of at the entry.
574// The entry needs to be patchable, no inlined objects are allowed in the area
575// that will be overwritten by the patch instructions: a branch macro sequence.
576void DartReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
577 if (locs()->in(0).IsRegister()) {
578 const Register result = locs()->in(0).reg();
580 } else if (locs()->in(0).IsPairLocation()) {
581 const Register result_lo = locs()->in(0).AsPairLocation()->At(0).reg();
582 const Register result_hi = locs()->in(0).AsPairLocation()->At(1).reg();
585 } else {
586 ASSERT(locs()->in(0).IsFpuRegister());
587 const FpuRegister result = locs()->in(0).fpu_reg();
589 }
590
591 if (compiler->parsed_function().function().IsAsyncFunction() ||
592 compiler->parsed_function().function().IsAsyncGenerator()) {
593 ASSERT(compiler->flow_graph().graph_entry()->NeedsFrame());
594 const Code& stub = GetReturnStub(compiler);
595 compiler->EmitJumpToStub(stub);
596 return;
597 }
598
599 if (!compiler->flow_graph().graph_entry()->NeedsFrame()) {
600 __ ret();
601 return;
602 }
603
604 const intptr_t fp_sp_dist =
606 compiler->StackSize()) *
607 kWordSize;
608 __ CheckFpSpDist(fp_sp_dist);
609 ASSERT(__ constant_pool_allowed());
610 __ LeaveDartFrame(fp_sp_dist); // Disallows constant pool use.
611 __ ret();
612 // This DartReturnInstr may be emitted out of order by the optimizer. The next
613 // block may be a target expecting a properly set constant pool pointer.
614 __ set_constant_pool_allowed(true);
615}
616
617// Detect pattern when one value is zero and another is a power of 2.
618static bool IsPowerOfTwoKind(intptr_t v1, intptr_t v2) {
619 return (Utils::IsPowerOfTwo(v1) && (v2 == 0)) ||
620 (Utils::IsPowerOfTwo(v2) && (v1 == 0));
621}
622
623LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone,
624 bool opt) const {
626 return comparison()->locs();
627}
628
629void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
630 const Register result = locs()->out(0).reg();
631
632 Location left = locs()->in(0);
633 Location right = locs()->in(1);
634 ASSERT(!left.IsConstant() || !right.IsConstant());
635
636 // Emit comparison code. This must not overwrite the result register.
637 // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using
638 // the labels or returning an invalid condition.
639 BranchLabels labels = {nullptr, nullptr, nullptr};
640 Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
641 ASSERT(true_condition != kInvalidCondition);
642
643 const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_);
644
645 intptr_t true_value = if_true_;
646 intptr_t false_value = if_false_;
647
648 if (is_power_of_two_kind) {
649 if (true_value == 0) {
650 // We need to have zero in result on true_condition.
651 true_condition = InvertCondition(true_condition);
652 }
653 } else {
654 if (true_value == 0) {
655 // Swap values so that false_value is zero.
656 intptr_t temp = true_value;
657 true_value = false_value;
658 false_value = temp;
659 } else {
660 true_condition = InvertCondition(true_condition);
661 }
662 }
663
664 __ SetIf(true_condition, result);
665
666 if (is_power_of_two_kind) {
667 const intptr_t shift =
668 Utils::ShiftForPowerOfTwo(Utils::Maximum(true_value, false_value));
669 __ slli(result, result, shift + kSmiTagSize);
670 } else {
671 __ subi(result, result, 1);
672 const int64_t val = Smi::RawValue(true_value) - Smi::RawValue(false_value);
673 __ AndImmediate(result, result, val);
674 if (false_value != 0) {
675 __ AddImmediate(result, Smi::RawValue(false_value));
676 }
677 }
678}
679
680LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone,
681 bool opt) const {
682 const intptr_t kNumInputs = 1;
683 const intptr_t kNumTemps = 0;
684 LocationSummary* summary = new (zone)
685 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
686 summary->set_in(
687 0, Location::RegisterLocation(FLAG_precompiled_mode ? T0 : FUNCTION_REG));
688 return MakeCallSummary(zone, this, summary);
689}
690
691void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
692 // Load arguments descriptor in ARGS_DESC_REG.
693 const intptr_t argument_count = ArgumentCount(); // Includes type args.
694 const Array& arguments_descriptor =
696 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
697
698 if (FLAG_precompiled_mode) {
699 ASSERT(locs()->in(0).reg() == T0);
700 // T0: Closure with a cached entry point.
701 __ LoadFieldFromOffset(A1, T0,
703 } else {
704 ASSERT(locs()->in(0).reg() == FUNCTION_REG);
705 // FUNCTION_REG: Function.
706 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
708 // Closure functions only have one entry point.
709 __ LoadFieldFromOffset(A1, FUNCTION_REG,
711 }
712
713 // FUNCTION_REG: Function (argument to lazy compile stub)
714 // ARGS_DESC_REG: Arguments descriptor array.
715 // A1: instructions entry point.
716 if (!FLAG_precompiled_mode) {
717 // S5: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
718 __ LoadImmediate(IC_DATA_REG, 0);
719 }
720 __ jalr(A1);
721 compiler->EmitCallsiteMetadata(source(), deopt_id(),
722 UntaggedPcDescriptors::kOther, locs(), env());
723 compiler->EmitDropArguments(argument_count);
724}
725
726LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone,
727 bool opt) const {
730}
731
732void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
733 const Register result = locs()->out(0).reg();
734 __ LoadFromOffset(result, FP,
736 // TODO(riscv): Using an SP-relative address instead of an FP-relative
737 // address would allow for compressed instructions.
738}
739
740LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone,
741 bool opt) const {
744}
745
746void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
747 const Register value = locs()->in(0).reg();
748 const Register result = locs()->out(0).reg();
749 ASSERT(result == value); // Assert that register assignment is correct.
750 __ StoreToOffset(value, FP,
752 // TODO(riscv): Using an SP-relative address instead of an FP-relative
753 // address would allow for compressed instructions.
754}
755
756LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
757 bool opt) const {
760}
761
762void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
763 // The register allocator drops constant definitions that have no uses.
764 if (!locs()->out(0).IsInvalid()) {
765 const Register result = locs()->out(0).reg();
766 __ LoadObject(result, value());
767 }
768}
769
770void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
771 const Location& destination,
772 Register tmp,
773 intptr_t pair_index) {
774 if (destination.IsRegister()) {
776 int64_t v;
777 const bool ok = compiler::HasIntegerValue(value_, &v);
779 if (value_.IsSmi() &&
781 // If the value is negative, then the sign bit was preserved during
782 // Smi untagging, which means the resulting value may be unexpected.
783 ASSERT(v >= 0);
784 }
785#if XLEN == 32
786 __ LoadImmediate(destination.reg(), pair_index == 0
788 : Utils::High32Bits(v));
789#else
790 ASSERT(pair_index == 0); // No pair representation needed on 64-bit.
791 __ LoadImmediate(destination.reg(), v);
792#endif
793 } else {
794 ASSERT(representation() == kTagged);
795 __ LoadObject(destination.reg(), value_);
796 }
797 } else if (destination.IsFpuRegister()) {
798 const FRegister dst = destination.fpu_reg();
799 if (representation() == kUnboxedFloat) {
800 __ LoadSImmediate(dst, Double::Cast(value_).value());
801 } else {
802 ASSERT(representation() == kUnboxedDouble);
803 __ LoadDImmediate(dst, Double::Cast(value_).value());
804 }
805 } else if (destination.IsDoubleStackSlot()) {
806 const intptr_t dest_offset = destination.ToStackSlotOffset();
807#if XLEN == 32
808 if (false) {
809#else
810 if (Utils::DoublesBitEqual(Double::Cast(value_).value(), 0.0)) {
811#endif
812 __ StoreToOffset(ZR, destination.base_reg(), dest_offset);
813 } else {
814 __ LoadDImmediate(FTMP, Double::Cast(value_).value());
815 __ StoreDToOffset(FTMP, destination.base_reg(), dest_offset);
816 }
817 } else {
818 ASSERT(destination.IsStackSlot());
819 ASSERT(tmp != kNoRegister);
820 const intptr_t dest_offset = destination.ToStackSlotOffset();
823 int64_t val = Integer::Cast(value_).AsInt64Value();
824#if XLEN == 32
825 val = pair_index == 0 ? Utils::Low32Bits(val) : Utils::High32Bits(val);
826#else
827 ASSERT(pair_index == 0); // No pair representation needed on 64-bit.
828#endif
829 if (val == 0) {
830 tmp = ZR;
831 } else {
832 __ LoadImmediate(tmp, val);
833 }
834 } else if (representation() == kUnboxedFloat) {
835 int32_t float_bits =
836 bit_cast<int32_t, float>(Double::Cast(value_).value());
837 __ LoadImmediate(tmp, float_bits);
838 operand_size = compiler::kFourBytes;
839 } else {
840 ASSERT(representation() == kTagged);
841 if (value_.IsNull()) {
842 tmp = NULL_REG;
843 } else if (value_.IsSmi() && Smi::Cast(value_).Value() == 0) {
844 tmp = ZR;
845 } else {
846 __ LoadObject(tmp, value_);
847 }
848 }
849 __ StoreToOffset(tmp, destination.base_reg(), dest_offset, operand_size);
850 }
851}
852
853LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone,
854 bool opt) const {
855 const bool is_unboxed_int =
859 const intptr_t kNumInputs = 0;
860 const intptr_t kNumTemps = is_unboxed_int ? 0 : 1;
861 LocationSummary* locs = new (zone)
862 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
863 if (is_unboxed_int) {
865 } else {
866 switch (representation()) {
867 case kUnboxedDouble:
870 break;
871 default:
872 UNREACHABLE();
873 break;
874 }
875 }
876 return locs;
877}
878
879void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
880 if (!locs()->out(0).IsInvalid()) {
881 const Register scratch =
884 : locs()->temp(0).reg();
885 EmitMoveToLocation(compiler, locs()->out(0), scratch);
886 }
887}
888
889LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone,
890 bool opt) const {
891 auto const dst_type_loc =
893
894 // We want to prevent spilling of the inputs (e.g. function/instantiator tav),
895 // since TTS preserves them. So we make this a `kNoCall` summary,
896 // even though most other registers can be modified by the stub. To tell the
897 // register allocator about it, we reserve all the other registers as
898 // temporary registers.
899 // TODO(http://dartbug.com/32788): Simplify this.
900
901 const intptr_t kNonChangeableInputRegs =
903 ((dst_type_loc.IsRegister() ? 1 : 0) << TypeTestABI::kDstTypeReg) |
906
907 const intptr_t kNumInputs = 4;
908
909 // We invoke a stub that can potentially clobber any CPU register
910 // but can only clobber FPU registers on the slow path when
911 // entering runtime. ARM64 ABI only guarantees that lower
912 // 64-bits of an V registers are preserved so we block all
913 // of them except for FpuTMP.
914 const intptr_t kCpuRegistersToPreserve =
915 kDartAvailableCpuRegs & ~kNonChangeableInputRegs;
916 const intptr_t kFpuRegistersToPreserve =
917 Utils::NBitMask<intptr_t>(kNumberOfFpuRegisters) & ~(1l << FpuTMP);
918
919 const intptr_t kNumTemps = (Utils::CountOneBits32(kCpuRegistersToPreserve) +
920 Utils::CountOneBits32(kFpuRegistersToPreserve));
921
922 LocationSummary* summary = new (zone) LocationSummary(
923 zone, kNumInputs, kNumTemps, LocationSummary::kCallCalleeSafe);
924 summary->set_in(kInstancePos,
926 summary->set_in(kDstTypePos, dst_type_loc);
927 summary->set_in(
928 kInstantiatorTAVPos,
930 summary->set_in(kFunctionTAVPos, Location::RegisterLocation(
932 summary->set_out(0, Location::SameAsFirstInput());
933
934 // Let's reserve all registers except for the input ones.
935 intptr_t next_temp = 0;
936 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
937 const bool should_preserve = ((1 << i) & kCpuRegistersToPreserve) != 0;
938 if (should_preserve) {
939 summary->set_temp(next_temp++,
940 Location::RegisterLocation(static_cast<Register>(i)));
941 }
942 }
943
944 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
945 const bool should_preserve = ((1l << i) & kFpuRegistersToPreserve) != 0;
946 if (should_preserve) {
947 summary->set_temp(next_temp++, Location::FpuRegisterLocation(
948 static_cast<FpuRegister>(i)));
949 }
950 }
951
952 return summary;
953}
954
955void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
956 ASSERT(locs()->always_calls());
957
958 auto object_store = compiler->isolate_group()->object_store();
959 const auto& assert_boolean_stub =
960 Code::ZoneHandle(compiler->zone(), object_store->assert_boolean_stub());
961
962 compiler::Label done;
965 compiler->GenerateStubCall(source(), assert_boolean_stub,
966 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
967 deopt_id(), env());
968 __ Bind(&done);
969}
970
971static Condition TokenKindToIntCondition(Token::Kind kind) {
972 switch (kind) {
973 case Token::kEQ:
974 return EQ;
975 case Token::kNE:
976 return NE;
977 case Token::kLT:
978 return LT;
979 case Token::kGT:
980 return GT;
981 case Token::kLTE:
982 return LE;
983 case Token::kGTE:
984 return GE;
985 default:
986 UNREACHABLE();
987 return VS;
988 }
989}
990
991static Condition FlipCondition(Condition condition) {
992 switch (condition) {
993 case EQ:
994 return EQ;
995 case NE:
996 return NE;
997 case LT:
998 return GT;
999 case LE:
1000 return GE;
1001 case GT:
1002 return LT;
1003 case GE:
1004 return LE;
1005 case CC:
1006 return HI;
1007 case LS:
1008 return CS;
1009 case HI:
1010 return CC;
1011 case CS:
1012 return LS;
1013 default:
1014 UNREACHABLE();
1015 return EQ;
1016 }
1017}
1018
1019static void EmitBranchOnCondition(FlowGraphCompiler* compiler,
1020 Condition true_condition,
1021 BranchLabels labels) {
1022 if (labels.fall_through == labels.false_label) {
1023 // If the next block is the false successor we will fall through to it.
1024 __ BranchIf(true_condition, labels.true_label);
1025 } else {
1026 // If the next block is not the false successor we will branch to it.
1027 Condition false_condition = InvertCondition(true_condition);
1028 __ BranchIf(false_condition, labels.false_label);
1029
1030 // Fall through or jump to the true successor.
1031 if (labels.fall_through != labels.true_label) {
1032 __ j(labels.true_label);
1033 }
1034 }
1035}
1036
1037static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler,
1038 LocationSummary* locs,
1039 Token::Kind kind,
1040 BranchLabels labels) {
1041 Location left = locs->in(0);
1042 Location right = locs->in(1);
1043 ASSERT(!left.IsConstant() || !right.IsConstant());
1044
1045 Condition true_condition = TokenKindToIntCondition(kind);
1046 if (left.IsConstant() || right.IsConstant()) {
1047 // Ensure constant is on the right.
1048 if (left.IsConstant()) {
1049 Location tmp = right;
1050 right = left;
1051 left = tmp;
1052 true_condition = FlipCondition(true_condition);
1053 }
1054 __ CompareObject(left.reg(), right.constant());
1055 } else {
1056 __ CompareObjectRegisters(left.reg(), right.reg());
1057 }
1058 return true_condition;
1059}
1060
1061static Condition EmitWordComparisonOp(FlowGraphCompiler* compiler,
1062 LocationSummary* locs,
1063 Token::Kind kind,
1064 BranchLabels labels) {
1065 Location left = locs->in(0);
1066 Location right = locs->in(1);
1067 ASSERT(!left.IsConstant() || !right.IsConstant());
1068
1069 Condition true_condition = TokenKindToIntCondition(kind);
1070 if (left.IsConstant() || right.IsConstant()) {
1071 // Ensure constant is on the right.
1072 if (left.IsConstant()) {
1073 Location tmp = right;
1074 right = left;
1075 left = tmp;
1076 true_condition = FlipCondition(true_condition);
1077 }
1078 __ CompareImmediate(
1079 left.reg(),
1080 static_cast<uword>(Integer::Cast(right.constant()).AsInt64Value()));
1081 } else {
1082 __ CompareRegisters(left.reg(), right.reg());
1083 }
1084 return true_condition;
1085}
1086
1087#if XLEN == 32
1088static Condition EmitUnboxedMintEqualityOp(FlowGraphCompiler* compiler,
1089 LocationSummary* locs,
1090 Token::Kind kind) {
1092 PairLocation* left_pair = locs->in(0).AsPairLocation();
1093 Register left_lo = left_pair->At(0).reg();
1094 Register left_hi = left_pair->At(1).reg();
1095 PairLocation* right_pair = locs->in(1).AsPairLocation();
1096 Register right_lo = right_pair->At(0).reg();
1097 Register right_hi = right_pair->At(1).reg();
1098
1099 __ xor_(TMP, left_lo, right_lo);
1100 __ xor_(TMP2, left_hi, right_hi);
1101 __ or_(TMP, TMP, TMP2);
1102 __ CompareImmediate(TMP, 0);
1103 if (kind == Token::kEQ) {
1104 return EQUAL;
1105 } else if (kind == Token::kNE) {
1106 return NOT_EQUAL;
1107 }
1108 UNREACHABLE();
1109}
1110
1111static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler,
1112 LocationSummary* locs,
1113 Token::Kind kind,
1114 BranchLabels labels) {
1115 PairLocation* left_pair = locs->in(0).AsPairLocation();
1116 Register left_lo = left_pair->At(0).reg();
1117 Register left_hi = left_pair->At(1).reg();
1118 PairLocation* right_pair = locs->in(1).AsPairLocation();
1119 Register right_lo = right_pair->At(0).reg();
1120 Register right_hi = right_pair->At(1).reg();
1121
1122 switch (kind) {
1123 case Token::kEQ:
1124 __ bne(left_lo, right_lo, labels.false_label);
1125 __ CompareRegisters(left_hi, right_hi);
1126 return EQUAL;
1127 case Token::kNE:
1128 __ bne(left_lo, right_lo, labels.true_label);
1129 __ CompareRegisters(left_hi, right_hi);
1130 return NOT_EQUAL;
1131 case Token::kLT:
1132 __ blt(left_hi, right_hi, labels.true_label);
1133 __ bgt(left_hi, right_hi, labels.false_label);
1134 __ CompareRegisters(left_lo, right_lo);
1135 return UNSIGNED_LESS;
1136 case Token::kGT:
1137 __ bgt(left_hi, right_hi, labels.true_label);
1138 __ blt(left_hi, right_hi, labels.false_label);
1139 __ CompareRegisters(left_lo, right_lo);
1140 return UNSIGNED_GREATER;
1141 case Token::kLTE:
1142 __ blt(left_hi, right_hi, labels.true_label);
1143 __ bgt(left_hi, right_hi, labels.false_label);
1144 __ CompareRegisters(left_lo, right_lo);
1145 return UNSIGNED_LESS_EQUAL;
1146 case Token::kGTE:
1147 __ bgt(left_hi, right_hi, labels.true_label);
1148 __ blt(left_hi, right_hi, labels.false_label);
1149 __ CompareRegisters(left_lo, right_lo);
1151 default:
1152 UNREACHABLE();
1153 }
1154}
1155#else
1156// Similar to ComparisonInstr::EmitComparisonCode, may either:
1157// - emit comparison code and return a valid condition in which case the
1158// caller is expected to emit a branch to the true label based on that
1159// condition (or a branch to the false label on the opposite condition).
1160// - emit comparison code with a branch directly to the labels and return
1161// kInvalidCondition.
1162static Condition EmitInt64ComparisonOp(FlowGraphCompiler* compiler,
1163 LocationSummary* locs,
1164 Token::Kind kind,
1165 BranchLabels labels) {
1166 Location left = locs->in(0);
1167 Location right = locs->in(1);
1168 ASSERT(!left.IsConstant() || !right.IsConstant());
1169
1170 Condition true_condition = TokenKindToIntCondition(kind);
1171 if (left.IsConstant() || right.IsConstant()) {
1172 // Ensure constant is on the right.
1173 ConstantInstr* constant = nullptr;
1174 if (left.IsConstant()) {
1175 constant = left.constant_instruction();
1176 Location tmp = right;
1177 right = left;
1178 left = tmp;
1179 true_condition = FlipCondition(true_condition);
1180 } else {
1181 constant = right.constant_instruction();
1182 }
1183
1184 if (RepresentationUtils::IsUnboxedInteger(constant->representation())) {
1185 int64_t value;
1186 const bool ok = compiler::HasIntegerValue(constant->value(), &value);
1188 __ CompareImmediate(left.reg(), value);
1189 } else {
1190 UNREACHABLE();
1191 }
1192 } else {
1193 __ CompareRegisters(left.reg(), right.reg());
1194 }
1195 return true_condition;
1196}
1197#endif
1198
1199static Condition EmitNullAwareInt64ComparisonOp(FlowGraphCompiler* compiler,
1200 LocationSummary* locs,
1201 Token::Kind kind,
1202 BranchLabels labels) {
1203 ASSERT((kind == Token::kEQ) || (kind == Token::kNE));
1204 const Register left = locs->in(0).reg();
1205 const Register right = locs->in(1).reg();
1206 const Condition true_condition = TokenKindToIntCondition(kind);
1207 compiler::Label* equal_result =
1208 (true_condition == EQ) ? labels.true_label : labels.false_label;
1209 compiler::Label* not_equal_result =
1210 (true_condition == EQ) ? labels.false_label : labels.true_label;
1211
1212 // Check if operands have the same value. If they don't, then they could
1213 // be equal only if both of them are Mints with the same value.
1214 __ CompareObjectRegisters(left, right);
1215 __ BranchIf(EQ, equal_result);
1216 __ and_(TMP, left, right);
1217 __ BranchIfSmi(TMP, not_equal_result);
1218 __ CompareClassId(left, kMintCid, TMP);
1219 __ BranchIf(NE, not_equal_result);
1220 __ CompareClassId(right, kMintCid, TMP);
1221 __ BranchIf(NE, not_equal_result);
1222#if XLEN == 32
1223 __ LoadFieldFromOffset(TMP, left, compiler::target::Mint::value_offset());
1224 __ LoadFieldFromOffset(TMP2, right, compiler::target::Mint::value_offset());
1225 __ bne(TMP, TMP2, not_equal_result);
1226 __ LoadFieldFromOffset(
1227 TMP, left,
1229 __ LoadFieldFromOffset(
1230 TMP2, right,
1232#else
1233 __ LoadFieldFromOffset(TMP, left, Mint::value_offset());
1234 __ LoadFieldFromOffset(TMP2, right, Mint::value_offset());
1235#endif
1236 __ CompareRegisters(TMP, TMP2);
1237 return true_condition;
1238}
1239
1240LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone,
1241 bool opt) const {
1242 const intptr_t kNumInputs = 2;
1243 const intptr_t kNumTemps = 0;
1244 if (is_null_aware()) {
1245 LocationSummary* locs = new (zone)
1246 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1250 return locs;
1251 }
1252#if XLEN == 32
1253 if (operation_cid() == kMintCid) {
1254 LocationSummary* locs = new (zone)
1255 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1261 return locs;
1262 }
1263#endif
1264 if (operation_cid() == kDoubleCid) {
1265 LocationSummary* locs = new (zone)
1266 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1270 return locs;
1271 }
1272 if (operation_cid() == kSmiCid || operation_cid() == kMintCid ||
1273 operation_cid() == kIntegerCid) {
1274 LocationSummary* locs = new (zone)
1275 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1276 if (is_null_aware()) {
1279 } else {
1281 // Only one input can be a constant operand. The case of two constant
1282 // operands should be handled by constant propagation.
1283 // Only right can be a stack slot.
1284 locs->set_in(1, locs->in(0).IsConstant()
1287 }
1289 return locs;
1290 }
1291 UNREACHABLE();
1292 return nullptr;
1293}
1294
1295static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
1296 LocationSummary* locs,
1297 BranchLabels labels,
1298 Token::Kind kind) {
1299 const FRegister left = locs->in(0).fpu_reg();
1300 const FRegister right = locs->in(1).fpu_reg();
1301
1302 switch (kind) {
1303 case Token::kEQ:
1304 __ feqd(TMP, left, right);
1305 __ CompareImmediate(TMP, 0);
1306 return NE;
1307 case Token::kNE:
1308 __ feqd(TMP, left, right);
1309 __ CompareImmediate(TMP, 0);
1310 return EQ;
1311 case Token::kLT:
1312 __ fltd(TMP, left, right);
1313 __ CompareImmediate(TMP, 0);
1314 return NE;
1315 case Token::kGT:
1316 __ fltd(TMP, right, left);
1317 __ CompareImmediate(TMP, 0);
1318 return NE;
1319 case Token::kLTE:
1320 __ fled(TMP, left, right);
1321 __ CompareImmediate(TMP, 0);
1322 return NE;
1323 case Token::kGTE:
1324 __ fled(TMP, right, left);
1325 __ CompareImmediate(TMP, 0);
1326 return NE;
1327 default:
1328 UNREACHABLE();
1329 }
1330}
1331
1332Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1333 BranchLabels labels) {
1334 if (is_null_aware()) {
1335 ASSERT(operation_cid() == kMintCid);
1336 return EmitNullAwareInt64ComparisonOp(compiler, locs(), kind(), labels);
1337 }
1338 if (operation_cid() == kSmiCid) {
1339 return EmitSmiComparisonOp(compiler, locs(), kind(), labels);
1340 } else if (operation_cid() == kIntegerCid) {
1341 return EmitWordComparisonOp(compiler, locs(), kind(), labels);
1342 } else if (operation_cid() == kMintCid) {
1343#if XLEN == 32
1344 return EmitUnboxedMintEqualityOp(compiler, locs(), kind());
1345#else
1346 return EmitInt64ComparisonOp(compiler, locs(), kind(), labels);
1347#endif
1348 } else {
1349 ASSERT(operation_cid() == kDoubleCid);
1350 return EmitDoubleComparisonOp(compiler, locs(), labels, kind());
1351 }
1352}
1353
1354LocationSummary* TestIntInstr::MakeLocationSummary(Zone* zone, bool opt) const {
1355 const intptr_t kNumInputs = 2;
1356 const intptr_t kNumTemps = 0;
1357 LocationSummary* locs = new (zone)
1358 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1360 // Only one input can be a constant operand. The case of two constant
1361 // operands should be handled by constant propagation.
1364 return locs;
1365}
1366
1367Condition TestIntInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1368 BranchLabels labels) {
1369 const Register left = locs()->in(0).reg();
1370 Location right = locs()->in(1);
1371 if (right.IsConstant()) {
1372 __ TestImmediate(left, ComputeImmediateMask());
1373 } else {
1374 __ TestRegisters(left, right.reg());
1375 }
1376 Condition true_condition = (kind() == Token::kNE) ? NE : EQ;
1377 return true_condition;
1378}
1379
1380LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone,
1381 bool opt) const {
1382 const intptr_t kNumInputs = 1;
1383 const intptr_t kNumTemps = 1;
1384 LocationSummary* locs = new (zone)
1385 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1389 return locs;
1390}
1391
1392Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1393 BranchLabels labels) {
1394 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
1395 const Register val_reg = locs()->in(0).reg();
1396 const Register cid_reg = locs()->temp(0).reg();
1397
1398 compiler::Label* deopt =
1400 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids)
1401 : nullptr;
1402
1403 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
1404 const ZoneGrowableArray<intptr_t>& data = cid_results();
1405 ASSERT(data[0] == kSmiCid);
1406 bool result = data[1] == true_result;
1407 __ BranchIfSmi(val_reg, result ? labels.true_label : labels.false_label);
1408 __ LoadClassId(cid_reg, val_reg);
1409
1410 for (intptr_t i = 2; i < data.length(); i += 2) {
1411 const intptr_t test_cid = data[i];
1412 ASSERT(test_cid != kSmiCid);
1413 result = data[i + 1] == true_result;
1414 __ CompareImmediate(cid_reg, test_cid);
1415 __ BranchIf(EQ, result ? labels.true_label : labels.false_label);
1416 }
1417 // No match found, deoptimize or default action.
1418 if (deopt == nullptr) {
1419 // If the cid is not in the list, jump to the opposite label from the cids
1420 // that are in the list. These must be all the same (see asserts in the
1421 // constructor).
1422 compiler::Label* target = result ? labels.false_label : labels.true_label;
1423 if (target != labels.fall_through) {
1424 __ j(target);
1425 }
1426 } else {
1427 __ j(deopt);
1428 }
1429 // Dummy result as this method already did the jump, there's no need
1430 // for the caller to branch on a condition.
1431 return kInvalidCondition;
1432}
1433
1434LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone,
1435 bool opt) const {
1436 const intptr_t kNumInputs = 2;
1437 const intptr_t kNumTemps = 0;
1438#if XLEN == 32
1439 if (operation_cid() == kMintCid) {
1440 LocationSummary* locs = new (zone)
1441 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1447 return locs;
1448 }
1449#endif
1450 if (operation_cid() == kDoubleCid) {
1451 LocationSummary* summary = new (zone)
1452 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1454 summary->set_in(1, Location::RequiresFpuRegister());
1455 summary->set_out(0, Location::RequiresRegister());
1456 return summary;
1457 }
1458 if (operation_cid() == kSmiCid || operation_cid() == kMintCid) {
1459 LocationSummary* summary = new (zone)
1460 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1461 summary->set_in(0, LocationRegisterOrConstant(left()));
1462 // Only one input can be a constant operand. The case of two constant
1463 // operands should be handled by constant propagation.
1464 summary->set_in(1, summary->in(0).IsConstant()
1467 summary->set_out(0, Location::RequiresRegister());
1468 return summary;
1469 }
1470
1471 UNREACHABLE();
1472 return nullptr;
1473}
1474
1475Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1476 BranchLabels labels) {
1477 if (operation_cid() == kSmiCid) {
1478 return EmitSmiComparisonOp(compiler, locs(), kind(), labels);
1479 } else if (operation_cid() == kMintCid) {
1480#if XLEN == 32
1481 return EmitUnboxedMintComparisonOp(compiler, locs(), kind(), labels);
1482#else
1483 return EmitInt64ComparisonOp(compiler, locs(), kind(), labels);
1484#endif
1485 } else {
1486 ASSERT(operation_cid() == kDoubleCid);
1487 return EmitDoubleComparisonOp(compiler, locs(), labels, kind());
1488 }
1489}
1490
1491void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1492 SetupNative();
1493 const Register result = locs()->out(0).reg();
1494
1495 // Pass a pointer to the first argument in R2.
1496 __ AddImmediate(T2, SP, (ArgumentCount() - 1) * kWordSize);
1497
1498 // Compute the effective address. When running under the simulator,
1499 // this is a redirection address that forces the simulator to call
1500 // into the runtime system.
1501 uword entry;
1502 const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function());
1503 const Code* stub;
1504 if (link_lazily()) {
1505 stub = &StubCode::CallBootstrapNative();
1507 } else {
1508 entry = reinterpret_cast<uword>(native_c_function());
1509 if (is_bootstrap_native()) {
1510 stub = &StubCode::CallBootstrapNative();
1511 } else if (is_auto_scope()) {
1512 stub = &StubCode::CallAutoScopeNative();
1513 } else {
1514 stub = &StubCode::CallNoScopeNative();
1515 }
1516 }
1517 __ LoadImmediate(T1, argc_tag);
1518 compiler::ExternalLabel label(entry);
1519 __ LoadNativeEntry(T5, &label,
1520 link_lazily() ? ObjectPool::Patchability::kPatchable
1521 : ObjectPool::Patchability::kNotPatchable);
1522 if (link_lazily()) {
1523 compiler->GeneratePatchableCall(
1524 source(), *stub, UntaggedPcDescriptors::kOther, locs(),
1526 } else {
1527 // We can never lazy-deopt here because natives are never optimized.
1528 ASSERT(!compiler->is_optimizing());
1529 compiler->GenerateNonLazyDeoptableStubCall(
1530 source(), *stub, UntaggedPcDescriptors::kOther, locs(),
1532 }
1533 __ lx(result, compiler::Address(SP, 0));
1534 compiler->EmitDropArguments(ArgumentCount());
1535}
1536
1537#define R(r) (1 << r)
1538
1539LocationSummary* FfiCallInstr::MakeLocationSummary(Zone* zone,
1540 bool is_optimizing) const {
1541 return MakeLocationSummaryInternal(
1542 zone, is_optimizing,
1545}
1546
1547#undef R
1548
1549void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1550 const Register target = locs()->in(TargetAddressIndex()).reg();
1551
1552 // The temps are indexed according to their register number.
1553 const Register temp1 = locs()->temp(0).reg();
1554 // For regular calls, this holds the FP for rebasing the original locations
1555 // during EmitParamMoves.
1556 // For leaf calls, this holds the SP used to restore the pre-aligned SP after
1557 // the call.
1558 const Register saved_fp_or_sp = locs()->temp(1).reg();
1559 const Register temp2 = locs()->temp(2).reg();
1560
1561 ASSERT(temp1 != target);
1562 ASSERT(temp2 != target);
1563 ASSERT(temp1 != saved_fp_or_sp);
1564 ASSERT(temp2 != saved_fp_or_sp);
1565 ASSERT(saved_fp_or_sp != target);
1566
1567 // Ensure these are callee-saved register and are preserved across the call.
1568 ASSERT(IsCalleeSavedRegister(saved_fp_or_sp));
1569 // Other temps don't need to be preserved.
1570
1571 __ mv(saved_fp_or_sp, is_leaf_ ? SPREG : FPREG);
1572
1573 if (!is_leaf_) {
1574 // We need to create a dummy "exit frame".
1575 // This is EnterDartFrame without accessing A2=CODE_REG or A5=PP.
1576 if (FLAG_precompiled_mode) {
1577 __ subi(SP, SP, 2 * compiler::target::kWordSize);
1578 __ sx(RA, compiler::Address(SP, 1 * compiler::target::kWordSize));
1579 __ sx(FP, compiler::Address(SP, 0 * compiler::target::kWordSize));
1580 __ addi(FP, SP, 2 * compiler::target::kWordSize);
1581 } else {
1582 __ subi(SP, SP, 4 * compiler::target::kWordSize);
1583 __ sx(RA, compiler::Address(SP, 3 * compiler::target::kWordSize));
1584 __ sx(FP, compiler::Address(SP, 2 * compiler::target::kWordSize));
1585 __ sx(NULL_REG, compiler::Address(SP, 1 * compiler::target::kWordSize));
1586 __ sx(NULL_REG, compiler::Address(SP, 0 * compiler::target::kWordSize));
1587 __ addi(FP, SP, 4 * compiler::target::kWordSize);
1588 }
1589 }
1590
1591 // Reserve space for the arguments that go on the stack (if any), then align.
1592 intptr_t stack_space = marshaller_.RequiredStackSpaceInBytes();
1593 __ ReserveAlignedFrameSpace(stack_space);
1595 RegisterSet kVolatileRegisterSet(kAbiVolatileCpuRegs, kAbiVolatileFpuRegs);
1596 __ mv(temp1, SP);
1597 __ PushRegisters(kVolatileRegisterSet);
1598
1599 // Outgoing arguments passed on the stack to the foreign function.
1600 __ mv(A0, temp1);
1601 __ LoadImmediate(A1, stack_space);
1602 __ CallCFunction(
1603 compiler::Address(THR, kMsanUnpoisonRuntimeEntry.OffsetFromThread()));
1604
1605 // Incoming Dart arguments to this trampoline are potentially used as local
1606 // handles.
1607 __ mv(A0, is_leaf_ ? FPREG : saved_fp_or_sp);
1608 __ LoadImmediate(A1, (kParamEndSlotFromFp + InputCount()) * kWordSize);
1609 __ CallCFunction(
1610 compiler::Address(THR, kMsanUnpoisonRuntimeEntry.OffsetFromThread()));
1611
1612 // Outgoing arguments passed by register to the foreign function.
1613 __ LoadImmediate(A0, InputCount());
1614 __ CallCFunction(compiler::Address(
1615 THR, kMsanUnpoisonParamRuntimeEntry.OffsetFromThread()));
1616
1617 __ PopRegisters(kVolatileRegisterSet);
1618 }
1619
1620 EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp1, temp2);
1621
1623 __ Comment(is_leaf_ ? "Leaf Call" : "Call");
1624 }
1625
1626 if (is_leaf_) {
1627#if !defined(PRODUCT)
1628 // Set the thread object's top_exit_frame_info and VMTag to enable the
1629 // profiler to determine that thread is no longer executing Dart code.
1630 __ StoreToOffset(FPREG, THR,
1633#endif
1634
1635 __ mv(A3, T3); // TODO(rmacnak): Only when needed.
1636 __ mv(A4, T4);
1637 __ mv(A5, T5);
1638 __ jalr(target);
1639
1640#if !defined(PRODUCT)
1641 __ LoadImmediate(temp1, compiler::target::Thread::vm_tag_dart_id());
1642 __ StoreToOffset(temp1, THR, compiler::target::Thread::vm_tag_offset());
1643 __ StoreToOffset(ZR, THR,
1645#endif
1646 } else {
1647 // We need to copy a dummy return address up into the dummy stack frame so
1648 // the stack walker will know which safepoint to use.
1649 //
1650 // AUIPC loads relative to itself.
1651 compiler->EmitCallsiteMetadata(source(), deopt_id(),
1652 UntaggedPcDescriptors::Kind::kOther, locs(),
1653 env());
1654 __ auipc(temp1, 0);
1655 __ StoreToOffset(temp1, FPREG, kSavedCallerPcSlotFromFp * kWordSize);
1656
1657 if (CanExecuteGeneratedCodeInSafepoint()) {
1658 // Update information in the thread object and enter a safepoint.
1659 __ LoadImmediate(temp1, compiler::target::Thread::exit_through_ffi());
1660 __ TransitionGeneratedToNative(target, FPREG, temp1,
1661 /*enter_safepoint=*/true);
1662
1663 __ mv(A3, T3); // TODO(rmacnak): Only when needed.
1664 __ mv(A4, T4);
1665 __ mv(A5, T5);
1666 __ jalr(target);
1667
1668 // Update information in the thread object and leave the safepoint.
1669 __ TransitionNativeToGenerated(temp1, /*leave_safepoint=*/true);
1670 } else {
1671 // We cannot trust that this code will be executable within a safepoint.
1672 // Therefore we delegate the responsibility of entering/exiting the
1673 // safepoint to a stub which in the VM isolate's heap, which will never
1674 // lose execute permission.
1675 __ lx(temp1,
1676 compiler::Address(
1677 THR, compiler::target::Thread::
1678 call_native_through_safepoint_entry_point_offset()));
1679
1680 // Calls T0 and clobbers R19 (along with volatile registers).
1681 ASSERT(target == T0);
1682 __ mv(A3, T3); // TODO(rmacnak): Only when needed.
1683 __ mv(A4, T4);
1684 __ mv(A5, T5);
1685 __ jalr(temp1);
1686 }
1687
1688 if (marshaller_.IsHandleCType(compiler::ffi::kResultIndex)) {
1689 __ Comment("Check Dart_Handle for Error.");
1692 compiler::Label not_error;
1693 __ LoadFromOffset(temp1, CallingConventions::kReturnReg,
1695 __ BranchIfSmi(temp1, &not_error);
1696 __ LoadClassId(temp1, temp1);
1697 __ RangeCheck(temp1, temp2, kFirstErrorCid, kLastErrorCid,
1699
1700 // Slow path, use the stub to propagate error, to save on code-size.
1701 __ Comment("Slow path: call Dart_PropagateError through stub.");
1704 __ lx(temp1,
1705 compiler::Address(
1706 THR, compiler::target::Thread::
1707 call_native_through_safepoint_entry_point_offset()));
1708 __ lx(target, compiler::Address(
1709 THR, kPropagateErrorRuntimeEntry.OffsetFromThread()));
1710 __ jalr(temp1);
1711#if defined(DEBUG)
1712 // We should never return with normal controlflow from this.
1713 __ ebreak();
1714#endif
1715
1716 __ Bind(&not_error);
1717 }
1718
1719 // Refresh pinned registers values (inc. write barrier mask and null
1720 // object).
1721 __ RestorePinnedRegisters();
1722 }
1723
1724 EmitReturnMoves(compiler, temp1, temp2);
1725
1726 if (is_leaf_) {
1727 // Restore the pre-aligned SP.
1728 __ mv(SPREG, saved_fp_or_sp);
1729 } else {
1730 __ LeaveDartFrame();
1731
1732 // Restore the global object pool after returning from runtime (old space is
1733 // moving, so the GOP could have been relocated).
1734 if (FLAG_precompiled_mode) {
1735 __ SetupGlobalPoolAndDispatchTable();
1736 }
1737 }
1738
1739 // PP is a volatile register, so it must be restored even for leaf FFI calls.
1740 __ RestorePoolPointer();
1741 __ set_constant_pool_allowed(true);
1742}
1743
1744// Keep in sync with NativeEntryInstr::EmitNativeCode.
1745void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1746 EmitReturnMoves(compiler);
1747
1748 // Restore tag before the profiler's stack walker will no longer see the
1749 // InvokeDartCode return address.
1752
1753 __ LeaveDartFrame();
1754
1755 // The dummy return address is in RA, no need to pop it as on Intel.
1756
1757 // These can be anything besides the return registers (A0, A1) and THR (S1).
1758 const Register vm_tag_reg = T2;
1759 const Register old_exit_frame_reg = T3;
1760 const Register old_exit_through_ffi_reg = T4;
1761 const Register tmp = T5;
1762
1763 __ PopRegisterPair(old_exit_frame_reg, old_exit_through_ffi_reg);
1764
1765 // Restore top_resource.
1766 __ PopRegisterPair(tmp, vm_tag_reg);
1768
1769 // Reset the exit frame info to old_exit_frame_reg *before* entering the
1770 // safepoint. The trampoline that called us will enter the safepoint on our
1771 // behalf.
1772 __ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
1773 old_exit_through_ffi_reg,
1774 /*enter_safepoint=*/false);
1775
1776 __ PopNativeCalleeSavedRegisters();
1777
1778 // Leave the entry frame.
1779 __ LeaveFrame();
1780
1781 // Leave the dummy frame holding the pushed arguments.
1782 __ LeaveFrame();
1783
1784 __ Ret();
1785
1786 // For following blocks.
1787 __ set_constant_pool_allowed(true);
1788}
1789
1790// Keep in sync with NativeReturnInstr::EmitNativeCode and ComputeInnerLRState.
1791void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1792 // Constant pool cannot be used until we enter the actual Dart frame.
1793 __ set_constant_pool_allowed(false);
1794
1795 __ Bind(compiler->GetJumpLabel(this));
1796
1797 // Create a dummy frame holding the pushed arguments. This simplifies
1798 // NativeReturnInstr::EmitNativeCode.
1799 __ EnterFrame(0);
1800
1801 // Save the argument registers, in reverse order.
1802 __ mv(T3, A3); // TODO(rmacnak): Only when needed.
1803 __ mv(T4, A4);
1804 __ mv(T5, A5);
1805 SaveArguments(compiler);
1806
1807 // Enter the entry frame. NativeParameterInstr expects this frame has size
1808 // -exit_link_slot_from_entry_fp, verified below.
1809 __ EnterFrame(0);
1810
1811 // Save a space for the code object.
1812 __ PushImmediate(0);
1813
1814 __ PushNativeCalleeSavedRegisters();
1815
1816#if defined(USING_SHADOW_CALL_STACK)
1817#error Unimplemented
1818#endif
1819
1820 // Refresh pinned registers values (inc. write barrier mask and null object).
1821 __ RestorePinnedRegisters();
1822
1823 // Save the current VMTag on the stack.
1825 // Save the top resource.
1827 __ PushRegisterPair(A0, TMP);
1828 ASSERT(kVMTagOffsetFromFp == 5 * compiler::target::kWordSize);
1829
1831
1832 __ LoadFromOffset(A0, THR,
1834 __ PushRegister(A0);
1835
1836 // Save the top exit frame info. We don't set it to 0 yet:
1837 // TransitionNativeToGenerated will handle that.
1838 __ LoadFromOffset(A0, THR,
1840 __ PushRegister(A0);
1841
1842 // In debug mode, verify that we've pushed the top exit frame info at the
1843 // correct offset from FP.
1844 __ EmitEntryFrameVerification();
1845
1846 // The callback trampoline (caller) has already left the safepoint for us.
1847 __ TransitionNativeToGenerated(A0, /*exit_safepoint=*/false,
1848 /*ignore_unwind_in_progress=*/false,
1849 /*set_tag=*/false);
1850
1851 // Now that the safepoint has ended, we can touch Dart objects without
1852 // handles.
1853
1854 // Load the code object.
1855 const Function& target_function = marshaller_.dart_signature();
1856 const intptr_t callback_id = target_function.FfiCallbackId();
1858 __ LoadFromOffset(A0, A0,
1860 __ LoadFromOffset(A0, A0,
1862 __ LoadCompressedFieldFromOffset(
1864 __ LoadCompressedFieldFromOffset(
1865 CODE_REG, A0,
1868
1869 // Put the code object in the reserved slot.
1870 __ StoreToOffset(CODE_REG, FPREG,
1872 if (FLAG_precompiled_mode) {
1873 __ SetupGlobalPoolAndDispatchTable();
1874 } else {
1875 // We now load the pool pointer (PP) with a GC safe value as we are about to
1876 // invoke dart code. We don't need a real object pool here.
1877 // Smi zero does not work because ARM64 assumes PP to be untagged.
1878 __ LoadObject(PP, compiler::NullObject());
1879 }
1880
1881 // Load a GC-safe value for the arguments descriptor (unused but tagged).
1882 __ mv(ARGS_DESC_REG, ZR);
1883
1884 // Load a dummy return address which suggests that we are inside of
1885 // InvokeDartCodeStub. This is how the stack walker detects an entry frame.
1886 __ LoadFromOffset(RA, THR,
1888 __ LoadFieldFromOffset(RA, RA, compiler::target::Code::entry_point_offset());
1889
1891
1892 // Delay setting the tag until the profiler's stack walker will see the
1893 // InvokeDartCode return address.
1896}
1897
1898#define R(r) (1 << r)
1899
1901 Zone* zone,
1902 bool is_optimizing) const {
1905 static_assert(saved_fp < temp0, "Unexpected ordering of registers in set.");
1906 LocationSummary* summary =
1907 MakeLocationSummaryInternal(zone, (R(saved_fp) | R(temp0)));
1908 return summary;
1909}
1910
1911#undef R
1912
1913void LeafRuntimeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1914 const Register saved_fp = locs()->temp(0).reg();
1915 const Register temp0 = locs()->temp(1).reg();
1916
1917 __ MoveRegister(saved_fp, FPREG);
1918
1919 const intptr_t frame_space = native_calling_convention_.StackTopInBytes();
1920 __ EnterCFrame(frame_space);
1921
1922 EmitParamMoves(compiler, saved_fp, temp0);
1923
1924 const Register target_address = locs()->in(TargetAddressIndex()).reg();
1925 // I.e., no use of A3/A4/A5.
1926 RELEASE_ASSERT(native_calling_convention_.argument_locations().length() < 4);
1927 __ sx(target_address,
1928 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
1929 __ CallCFunction(target_address);
1930 __ li(temp0, VMTag::kDartTagId);
1931 __ sx(temp0,
1932 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
1933
1934 __ LeaveCFrame(); // Also restores PP=A5.
1935}
1936
1938 Zone* zone,
1939 bool opt) const {
1940 const intptr_t kNumInputs = 1;
1941 // TODO(fschneider): Allow immediate operands for the char code.
1942 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
1944}
1945
1947 FlowGraphCompiler* compiler) {
1948 ASSERT(compiler->is_optimizing());
1949 const Register char_code = locs()->in(0).reg();
1950 const Register result = locs()->out(0).reg();
1951 __ lx(result,
1952 compiler::Address(THR, Thread::predefined_symbols_address_offset()));
1953 __ AddShifted(TMP, result, char_code, kWordSizeLog2 - kSmiTagSize);
1954 __ lx(result,
1955 compiler::Address(TMP, Symbols::kNullCharCodeSymbolOffset * kWordSize));
1956}
1957
1958LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone,
1959 bool opt) const {
1960 const intptr_t kNumInputs = 1;
1961 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
1963}
1964
1965void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1966 ASSERT(cid_ == kOneByteStringCid);
1967 Register str = locs()->in(0).reg();
1968 Register result = locs()->out(0).reg();
1969 compiler::Label is_one, done;
1970 __ LoadCompressedSmi(result,
1971 compiler::FieldAddress(str, String::length_offset()));
1972 __ CompareImmediate(result, Smi::RawValue(1));
1973 __ BranchIf(EQUAL, &is_one, compiler::Assembler::kNearJump);
1974 __ li(result, Smi::RawValue(-1));
1976 __ Bind(&is_one);
1977 __ lbu(result, compiler::FieldAddress(str, OneByteString::data_offset()));
1978 __ SmiTag(result);
1979 __ Bind(&done);
1980}
1981
1982LocationSummary* Utf8ScanInstr::MakeLocationSummary(Zone* zone,
1983 bool opt) const {
1984 const intptr_t kNumInputs = 5;
1985 const intptr_t kNumTemps = 0;
1986 LocationSummary* summary = new (zone)
1987 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1988 summary->set_in(0, Location::Any()); // decoder
1989 summary->set_in(1, Location::WritableRegister()); // bytes
1990 summary->set_in(2, Location::WritableRegister()); // start
1991 summary->set_in(3, Location::WritableRegister()); // end
1992 summary->set_in(4, Location::WritableRegister()); // table
1993 summary->set_out(0, Location::RequiresRegister());
1994 return summary;
1995}
1996
1997void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1998 const Register bytes_reg = locs()->in(1).reg();
1999 const Register start_reg = locs()->in(2).reg();
2000 const Register end_reg = locs()->in(3).reg();
2001 const Register table_reg = locs()->in(4).reg();
2002 const Register size_reg = locs()->out(0).reg();
2003
2004 const Register bytes_ptr_reg = start_reg;
2005 const Register bytes_end_reg = end_reg;
2006 const Register flags_reg = bytes_reg;
2007 const Register temp_reg = TMP;
2008 const Register decoder_temp_reg = start_reg;
2009 const Register flags_temp_reg = end_reg;
2010
2011 const intptr_t kSizeMask = 0x03;
2012 const intptr_t kFlagsMask = 0x3C;
2013
2014 compiler::Label loop, loop_in;
2015
2016 // Address of input bytes.
2017 __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
2018
2019 // Table.
2020 __ AddImmediate(
2021 table_reg, table_reg,
2023
2024 // Pointers to start and end.
2025 __ add(bytes_ptr_reg, bytes_reg, start_reg);
2026 __ add(bytes_end_reg, bytes_reg, end_reg);
2027
2028 // Initialize size and flags.
2029 __ li(size_reg, 0);
2030 __ li(flags_reg, 0);
2031
2033 __ Bind(&loop);
2034
2035 // Read byte and increment pointer.
2036 __ lbu(temp_reg, compiler::Address(bytes_ptr_reg, 0));
2037 __ addi(bytes_ptr_reg, bytes_ptr_reg, 1);
2038
2039 // Update size and flags based on byte value.
2040 __ add(temp_reg, table_reg, temp_reg);
2041 __ lbu(temp_reg, compiler::Address(temp_reg));
2042 __ or_(flags_reg, flags_reg, temp_reg);
2043 __ andi(temp_reg, temp_reg, kSizeMask);
2044 __ add(size_reg, size_reg, temp_reg);
2045
2046 // Stop if end is reached.
2047 __ Bind(&loop_in);
2048 __ bltu(bytes_ptr_reg, bytes_end_reg, &loop, compiler::Assembler::kNearJump);
2049
2050 // Write flags to field.
2051 __ AndImmediate(flags_reg, flags_reg, kFlagsMask);
2052 if (!IsScanFlagsUnboxed()) {
2053 __ SmiTag(flags_reg);
2054 }
2055 Register decoder_reg;
2056 const Location decoder_location = locs()->in(0);
2057 if (decoder_location.IsStackSlot()) {
2058 __ lx(decoder_temp_reg, LocationToStackSlotAddress(decoder_location));
2059 decoder_reg = decoder_temp_reg;
2060 } else {
2061 decoder_reg = decoder_location.reg();
2062 }
2063 const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes();
2064 if (scan_flags_field_.is_compressed() && !IsScanFlagsUnboxed()) {
2065 UNIMPLEMENTED();
2066 } else {
2067 __ LoadFieldFromOffset(flags_temp_reg, decoder_reg,
2068 scan_flags_field_offset);
2069 __ or_(flags_temp_reg, flags_temp_reg, flags_reg);
2070 __ StoreFieldToOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset);
2071 }
2072}
2073
2074LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
2075 bool opt) const {
2076 // The compiler must optimize any function that includes a LoadIndexed
2077 // instruction that uses typed data cids, since extracting the payload address
2078 // from views is done in a compiler pass after all code motion has happened.
2079 ASSERT(!IsTypedDataBaseClassId(class_id()) || opt);
2080
2081 const intptr_t kNumInputs = 2;
2082 const intptr_t kNumTemps = 0;
2083 LocationSummary* locs = new (zone)
2084 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2085 locs->set_in(kArrayPos, Location::RequiresRegister());
2086 const bool can_be_constant =
2087 index()->BindsToConstant() &&
2089 index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
2090 locs->set_in(kIndexPos,
2091 can_be_constant
2092 ? Location::Constant(index()->definition()->AsConstant())
2094 auto const rep =
2098#if XLEN == 32
2099 if (rep == kUnboxedInt64) {
2102 }
2103#endif
2104 } else if (RepresentationUtils::IsUnboxed(rep)) {
2106 } else {
2108 }
2109 return locs;
2110}
2111
2112void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2113 // The array register points to the backing store for external arrays.
2114 const Register array = locs()->in(kArrayPos).reg();
2115 const Location index = locs()->in(kIndexPos);
2116
2117 compiler::Address element_address(TMP); // Bad address.
2118 element_address = index.IsRegister()
2119 ? __ ElementAddressForRegIndex(
2120 IsUntagged(), class_id(), index_scale(),
2121 index_unboxed_, array, index.reg(), TMP)
2122 : __ ElementAddressForIntIndex(
2123 IsUntagged(), class_id(), index_scale(), array,
2124 Smi::Cast(index.constant()).Value());
2125
2126 auto const rep =
2130#if XLEN == 32
2131 if (rep == kUnboxedInt64) {
2132 ASSERT(locs()->out(0).IsPairLocation());
2133 PairLocation* result_pair = locs()->out(0).AsPairLocation();
2134 const Register result_lo = result_pair->At(0).reg();
2135 const Register result_hi = result_pair->At(1).reg();
2136 __ lw(result_lo, element_address);
2137 __ lw(result_hi, compiler::Address(element_address.base(),
2138 element_address.offset() + 4));
2139 } else {
2140 const Register result = locs()->out(0).reg();
2141 __ Load(result, element_address, RepresentationUtils::OperandSize(rep));
2142 }
2143#else
2144 const Register result = locs()->out(0).reg();
2145 __ Load(result, element_address, RepresentationUtils::OperandSize(rep));
2146#endif
2147 } else if (RepresentationUtils::IsUnboxed(rep)) {
2148 const FRegister result = locs()->out(0).fpu_reg();
2149 if (rep == kUnboxedFloat) {
2150 // Load single precision float.
2151 __ flw(result, element_address);
2152 } else if (rep == kUnboxedDouble) {
2153 // Load double precision float.
2154 __ fld(result, element_address);
2155 } else {
2156 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2157 rep == kUnboxedFloat64x2);
2158 UNIMPLEMENTED();
2159 }
2160 } else {
2161 ASSERT(rep == kTagged);
2162 ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
2163 (class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
2164 const Register result = locs()->out(0).reg();
2165 __ lx(result, element_address);
2166 }
2167}
2168
2169LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone,
2170 bool opt) const {
2171 const intptr_t kNumInputs = 2;
2172 const intptr_t kNumTemps = 0;
2173 LocationSummary* summary = new (zone)
2174 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2175 summary->set_in(0, Location::RequiresRegister());
2176 summary->set_in(1, Location::RequiresRegister());
2177#if XLEN == 32
2178 if (representation() == kUnboxedInt64) {
2179 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
2181 } else {
2182 ASSERT(representation() == kTagged);
2183 summary->set_out(0, Location::RequiresRegister());
2184 }
2185#else
2186 summary->set_out(0, Location::RequiresRegister());
2187#endif
2188 return summary;
2189}
2190
2191void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2192 // The string register points to the backing store for external strings.
2193 const Register str = locs()->in(0).reg();
2194 const Location index = locs()->in(1);
2196
2197#if XLEN == 32
2198 if (representation() == kUnboxedInt64) {
2199 ASSERT(compiler->is_optimizing());
2200 ASSERT(locs()->out(0).IsPairLocation());
2201 UNIMPLEMENTED();
2202 }
2203#endif
2204
2205 Register result = locs()->out(0).reg();
2206 switch (class_id()) {
2207 case kOneByteStringCid:
2208 switch (element_count()) {
2209 case 1:
2211 break;
2212 case 2:
2214 break;
2215 case 4:
2217 break;
2218 default:
2219 UNREACHABLE();
2220 }
2221 break;
2222 case kTwoByteStringCid:
2223 switch (element_count()) {
2224 case 1:
2226 break;
2227 case 2:
2229 break;
2230 default:
2231 UNREACHABLE();
2232 }
2233 break;
2234 default:
2235 UNREACHABLE();
2236 break;
2237 }
2238 // Warning: element_address may use register TMP as base.
2239 compiler::Address element_address = __ ElementAddressForRegIndex(
2240 IsExternal(), class_id(), index_scale(), /*index_unboxed=*/false, str,
2241 index.reg(), TMP);
2242 switch (sz) {
2244 __ lbu(result, element_address);
2245 break;
2247 __ lhu(result, element_address);
2248 break;
2250#if XLEN == 32
2251 __ lw(result, element_address);
2252#else
2253 __ lwu(result, element_address);
2254#endif
2255 break;
2256 default:
2257 UNREACHABLE();
2258 }
2259
2260 ASSERT(can_pack_into_smi());
2261 __ SmiTag(result);
2262}
2263
2264LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
2265 bool opt) const {
2266 // The compiler must optimize any function that includes a StoreIndexed
2267 // instruction that uses typed data cids, since extracting the payload address
2268 // from views is done in a compiler pass after all code motion has happened.
2269 ASSERT(!IsTypedDataBaseClassId(class_id()) || opt);
2270
2271 const intptr_t kNumInputs = 3;
2272 const intptr_t kNumTemps = 1;
2273 LocationSummary* locs = new (zone)
2274 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2276 const bool can_be_constant =
2277 index()->BindsToConstant() &&
2279 index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
2280 locs->set_in(1, can_be_constant
2281 ? Location::Constant(index()->definition()->AsConstant())
2284
2285 auto const rep =
2287 if (IsClampedTypedDataBaseClassId(class_id())) {
2288 ASSERT(rep == kUnboxedUint8);
2290 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
2291 if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
2292 ConstantInstr* constant = value()->definition()->AsConstant();
2293 if (constant != nullptr && constant->HasZeroRepresentation()) {
2294 locs->set_in(2, Location::Constant(constant));
2295 } else {
2297 }
2298 } else if (rep == kUnboxedInt64) {
2299#if XLEN == 32
2302#else
2303 ConstantInstr* constant = value()->definition()->AsConstant();
2304 if (constant != nullptr && constant->HasZeroRepresentation()) {
2305 locs->set_in(2, Location::Constant(constant));
2306 } else {
2308 }
2309#endif
2310 } else {
2311 ConstantInstr* constant = value()->definition()->AsConstant();
2312 if (constant != nullptr && constant->HasZeroRepresentation()) {
2313 locs->set_in(2, Location::Constant(constant));
2314 } else {
2316 }
2317 }
2318 } else if (RepresentationUtils::IsUnboxed(rep)) {
2319 if (rep == kUnboxedFloat) {
2320 ConstantInstr* constant = value()->definition()->AsConstant();
2321 if (constant != nullptr && constant->HasZeroRepresentation()) {
2322 locs->set_in(2, Location::Constant(constant));
2323 } else {
2325 }
2326 } else if (rep == kUnboxedDouble) {
2327#if XLEN == 32
2329#else
2330 ConstantInstr* constant = value()->definition()->AsConstant();
2331 if (constant != nullptr && constant->HasZeroRepresentation()) {
2332 locs->set_in(2, Location::Constant(constant));
2333 } else {
2335 }
2336#endif
2337 } else {
2339 }
2340 } else if (class_id() == kArrayCid) {
2341 locs->set_in(2, ShouldEmitStoreBarrier()
2344 if (ShouldEmitStoreBarrier()) {
2347 }
2348 } else {
2349 UNREACHABLE();
2350 }
2351 return locs;
2352}
2353
2354void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2355 // The array register points to the backing store for external arrays.
2356 const Register array = locs()->in(0).reg();
2357 const Location index = locs()->in(1);
2358 const Register temp = locs()->temp(0).reg();
2359 compiler::Address element_address(TMP); // Bad address.
2360
2361 // Deal with a special case separately.
2362 if (class_id() == kArrayCid && ShouldEmitStoreBarrier()) {
2363 if (index.IsRegister()) {
2364 __ ComputeElementAddressForRegIndex(temp, IsUntagged(), class_id(),
2365 index_scale(), index_unboxed_, array,
2366 index.reg());
2367 } else {
2368 __ ComputeElementAddressForIntIndex(temp, IsUntagged(), class_id(),
2369 index_scale(), array,
2370 Smi::Cast(index.constant()).Value());
2371 }
2372 const Register value = locs()->in(2).reg();
2373 __ StoreIntoArray(array, temp, value, CanValueBeSmi());
2374 return;
2375 }
2376
2377 element_address = index.IsRegister()
2378 ? __ ElementAddressForRegIndex(
2379 IsUntagged(), class_id(), index_scale(),
2380 index_unboxed_, array, index.reg(), temp)
2381 : __ ElementAddressForIntIndex(
2382 IsUntagged(), class_id(), index_scale(), array,
2383 Smi::Cast(index.constant()).Value());
2384
2385 auto const rep =
2388 if (IsClampedTypedDataBaseClassId(class_id())) {
2389 if (locs()->in(2).IsConstant()) {
2390 const Smi& constant = Smi::Cast(locs()->in(2).constant());
2391 intptr_t value = constant.Value();
2392 // Clamp to 0x0 or 0xFF respectively.
2393 if (value > 0xFF) {
2394 value = 0xFF;
2395 } else if (value < 0) {
2396 value = 0;
2397 }
2398 if (value == 0) {
2399 __ sb(ZR, element_address);
2400 } else {
2401 __ LoadImmediate(TMP, static_cast<int8_t>(value));
2402 __ sb(TMP, element_address);
2403 }
2404 } else {
2405 const Register value = locs()->in(2).reg();
2406
2407 compiler::Label store_zero, store_ff, done;
2408 __ blt(value, ZR, &store_zero, compiler::Assembler::kNearJump);
2409
2410 __ li(TMP, 0xFF);
2411 __ bgt(value, TMP, &store_ff, compiler::Assembler::kNearJump);
2412
2413 __ sb(value, element_address);
2415
2416 __ Bind(&store_zero);
2417 __ mv(TMP, ZR);
2418
2419 __ Bind(&store_ff);
2420 __ sb(TMP, element_address);
2421
2422 __ Bind(&done);
2423 }
2424 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
2425 if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
2426 if (locs()->in(2).IsConstant()) {
2427 ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
2428 __ sb(ZR, element_address);
2429 } else {
2430 const Register value = locs()->in(2).reg();
2431 __ sb(value, element_address);
2432 }
2433 } else if (rep == kUnboxedInt64) {
2434#if XLEN >= 64
2435 if (locs()->in(2).IsConstant()) {
2436 ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
2437 __ sd(ZR, element_address);
2438 } else {
2439 __ sd(locs()->in(2).reg(), element_address);
2440 }
2441#else
2442 PairLocation* value_pair = locs()->in(2).AsPairLocation();
2443 Register value_lo = value_pair->At(0).reg();
2444 Register value_hi = value_pair->At(1).reg();
2445 __ sw(value_lo, element_address);
2446 __ sw(value_hi, compiler::Address(element_address.base(),
2447 element_address.offset() + 4));
2448#endif
2449 } else {
2450 if (locs()->in(2).IsConstant()) {
2451 ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
2452 __ Store(ZR, element_address, RepresentationUtils::OperandSize(rep));
2453 } else {
2454 __ Store(locs()->in(2).reg(), element_address,
2456 }
2457 }
2458 } else if (RepresentationUtils::IsUnboxed(rep)) {
2459 if (rep == kUnboxedFloat) {
2460 if (locs()->in(2).IsConstant()) {
2461 ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
2462 __ sw(ZR, element_address);
2463 } else {
2464 __ fsw(locs()->in(2).fpu_reg(), element_address);
2465 }
2466 } else if (rep == kUnboxedDouble) {
2467#if XLEN >= 64
2468 if (locs()->in(2).IsConstant()) {
2469 ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
2470 __ sd(ZR, element_address);
2471 } else {
2472 __ fsd(locs()->in(2).fpu_reg(), element_address);
2473 }
2474#else
2475 __ fsd(locs()->in(2).fpu_reg(), element_address);
2476#endif
2477 } else {
2478 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2479 rep == kUnboxedFloat64x2);
2480 UNIMPLEMENTED();
2481 }
2482 } else if (class_id() == kArrayCid) {
2483 ASSERT(rep == kTagged);
2484 ASSERT(!ShouldEmitStoreBarrier()); // Specially treated above.
2485 if (locs()->in(2).IsConstant()) {
2486 const Object& constant = locs()->in(2).constant();
2487 __ StoreObjectIntoObjectNoBarrier(array, element_address, constant);
2488 } else {
2489 const Register value = locs()->in(2).reg();
2490 __ StoreIntoObjectNoBarrier(array, element_address, value);
2491 }
2492 } else {
2493 UNREACHABLE();
2494 }
2495
2497 UNIMPLEMENTED();
2498 }
2499}
2500
2501static void LoadValueCid(FlowGraphCompiler* compiler,
2502 Register value_cid_reg,
2503 Register value_reg,
2504 compiler::Label* value_is_smi = nullptr) {
2505 compiler::Label done;
2506 if (value_is_smi == nullptr) {
2507 __ LoadImmediate(value_cid_reg, kSmiCid);
2508 }
2509 __ BranchIfSmi(value_reg, value_is_smi == nullptr ? &done : value_is_smi,
2511 __ LoadClassId(value_cid_reg, value_reg);
2512 __ Bind(&done);
2513}
2514
2515DEFINE_UNIMPLEMENTED_INSTRUCTION(GuardFieldTypeInstr)
2516
2517LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
2518 bool opt) const {
2519 const intptr_t kNumInputs = 1;
2520
2521 const intptr_t value_cid = value()->Type()->ToCid();
2522 const intptr_t field_cid = field().guarded_cid();
2523
2524 const bool emit_full_guard = !opt || (field_cid == kIllegalCid);
2525
2526 const bool needs_value_cid_temp_reg =
2527 emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid));
2528
2529 const bool needs_field_temp_reg = emit_full_guard;
2530
2531 intptr_t num_temps = 0;
2532 if (needs_value_cid_temp_reg) {
2533 num_temps++;
2534 }
2535 if (needs_field_temp_reg) {
2536 num_temps++;
2537 }
2538
2539 LocationSummary* summary = new (zone)
2540 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
2541 summary->set_in(0, Location::RequiresRegister());
2542
2543 for (intptr_t i = 0; i < num_temps; i++) {
2544 summary->set_temp(i, Location::RequiresRegister());
2545 }
2546
2547 return summary;
2548}
2549
2550void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2552 ASSERT(sizeof(UntaggedField::guarded_cid_) == 4);
2553 ASSERT(sizeof(UntaggedField::is_nullable_) == 4);
2554
2555 const intptr_t value_cid = value()->Type()->ToCid();
2556 const intptr_t field_cid = field().guarded_cid();
2557 const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid;
2558
2559 if (field_cid == kDynamicCid) {
2560 return; // Nothing to emit.
2561 }
2562
2563 const bool emit_full_guard =
2564 !compiler->is_optimizing() || (field_cid == kIllegalCid);
2565
2566 const bool needs_value_cid_temp_reg =
2567 emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid));
2568
2569 const bool needs_field_temp_reg = emit_full_guard;
2570
2571 const Register value_reg = locs()->in(0).reg();
2572
2573 const Register value_cid_reg =
2574 needs_value_cid_temp_reg ? locs()->temp(0).reg() : kNoRegister;
2575
2576 const Register field_reg = needs_field_temp_reg
2577 ? locs()->temp(locs()->temp_count() - 1).reg()
2578 : kNoRegister;
2579
2580 compiler::Label ok, fail_label;
2581
2582 compiler::Label* deopt =
2583 compiler->is_optimizing()
2584 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2585 : nullptr;
2586
2587 compiler::Label* fail = (deopt != nullptr) ? deopt : &fail_label;
2588
2589 if (emit_full_guard) {
2590 __ LoadObject(field_reg, Field::ZoneHandle((field().Original())));
2591
2592 compiler::FieldAddress field_cid_operand(field_reg,
2594 compiler::FieldAddress field_nullability_operand(
2595 field_reg, Field::is_nullable_offset());
2596
2597 if (value_cid == kDynamicCid) {
2598 LoadValueCid(compiler, value_cid_reg, value_reg);
2599 compiler::Label skip_length_check;
2600 __ lw(TMP, field_cid_operand);
2601 __ CompareRegisters(value_cid_reg, TMP);
2603 __ lw(TMP, field_nullability_operand);
2604 __ CompareRegisters(value_cid_reg, TMP);
2605 } else if (value_cid == kNullCid) {
2606 __ lw(value_cid_reg, field_nullability_operand);
2607 __ CompareImmediate(value_cid_reg, value_cid);
2608 } else {
2609 compiler::Label skip_length_check;
2610 __ lw(value_cid_reg, field_cid_operand);
2611 __ CompareImmediate(value_cid_reg, value_cid);
2612 }
2614
2615 // Check if the tracked state of the guarded field can be initialized
2616 // inline. If the field needs length check we fall through to runtime
2617 // which is responsible for computing offset of the length field
2618 // based on the class id.
2619 // Length guard will be emitted separately when needed via GuardFieldLength
2620 // instruction after GuardFieldClass.
2621 if (!field().needs_length_check()) {
2622 // Uninitialized field can be handled inline. Check if the
2623 // field is still unitialized.
2624 __ lw(TMP, field_cid_operand);
2625 __ CompareImmediate(TMP, kIllegalCid);
2626 __ BranchIf(NE, fail);
2627
2628 if (value_cid == kDynamicCid) {
2629 __ sw(value_cid_reg, field_cid_operand);
2630 __ sw(value_cid_reg, field_nullability_operand);
2631 } else {
2632 __ LoadImmediate(TMP, value_cid);
2633 __ sw(TMP, field_cid_operand);
2634 __ sw(TMP, field_nullability_operand);
2635 }
2636
2638 }
2639
2640 if (deopt == nullptr) {
2641 __ Bind(fail);
2642
2643 __ LoadFieldFromOffset(TMP, field_reg, Field::guarded_cid_offset(),
2645 __ CompareImmediate(TMP, kDynamicCid);
2647
2648 __ PushRegisterPair(value_reg, field_reg);
2649 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2650 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2651 __ Drop(2); // Drop the field and the value.
2652 } else {
2653 __ j(fail);
2654 }
2655 } else {
2656 ASSERT(compiler->is_optimizing());
2657 ASSERT(deopt != nullptr);
2658
2659 // Field guard class has been initialized and is known.
2660 if (value_cid == kDynamicCid) {
2661 // Value's class id is not known.
2662 __ TestImmediate(value_reg, kSmiTagMask);
2663
2664 if (field_cid != kSmiCid) {
2665 __ BranchIf(EQ, fail);
2666 __ LoadClassId(value_cid_reg, value_reg);
2667 __ CompareImmediate(value_cid_reg, field_cid);
2668 }
2669
2670 if (field().is_nullable() && (field_cid != kNullCid)) {
2672 __ CompareObject(value_reg, Object::null_object());
2673 }
2674
2675 __ BranchIf(NE, fail);
2676 } else if (value_cid == field_cid) {
2677 // This would normally be caught by Canonicalize, but RemoveRedefinitions
2678 // may sometimes produce the situation after the last Canonicalize pass.
2679 } else {
2680 // Both value's and field's class id is known.
2681 ASSERT(value_cid != nullability);
2682 __ j(fail);
2683 }
2684 }
2685 __ Bind(&ok);
2686}
2687
2688LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone,
2689 bool opt) const {
2690 const intptr_t kNumInputs = 1;
2691 if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2692 const intptr_t kNumTemps = 3;
2693 LocationSummary* summary = new (zone)
2694 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2695 summary->set_in(0, Location::RequiresRegister());
2696 // We need temporaries for field object, length offset and expected length.
2697 summary->set_temp(0, Location::RequiresRegister());
2698 summary->set_temp(1, Location::RequiresRegister());
2699 summary->set_temp(2, Location::RequiresRegister());
2700 return summary;
2701 } else {
2702 LocationSummary* summary = new (zone)
2703 LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
2704 summary->set_in(0, Location::RequiresRegister());
2705 return summary;
2706 }
2707 UNREACHABLE();
2708}
2709
2710void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2711 if (field().guarded_list_length() == Field::kNoFixedLength) {
2712 return; // Nothing to emit.
2713 }
2714
2715 compiler::Label* deopt =
2716 compiler->is_optimizing()
2717 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2718 : nullptr;
2719
2720 const Register value_reg = locs()->in(0).reg();
2721
2722 if (!compiler->is_optimizing() ||
2723 (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2724 const Register field_reg = locs()->temp(0).reg();
2725 const Register offset_reg = locs()->temp(1).reg();
2726 const Register length_reg = locs()->temp(2).reg();
2727
2728 compiler::Label ok;
2729
2730 __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
2731
2732 __ lb(offset_reg,
2733 compiler::FieldAddress(
2735 __ LoadCompressed(
2736 length_reg,
2737 compiler::FieldAddress(field_reg, Field::guarded_list_length_offset()));
2738
2739 __ bltz(offset_reg, &ok, compiler::Assembler::kNearJump);
2740
2741 // Load the length from the value. GuardFieldClass already verified that
2742 // value's class matches guarded class id of the field.
2743 // offset_reg contains offset already corrected by -kHeapObjectTag that is
2744 // why we use Address instead of FieldAddress.
2745 __ add(TMP, value_reg, offset_reg);
2746 __ lx(TMP, compiler::Address(TMP, 0));
2747 __ CompareObjectRegisters(length_reg, TMP);
2748
2749 if (deopt == nullptr) {
2751
2752 __ PushRegisterPair(value_reg, field_reg);
2753 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2754 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2755 __ Drop(2); // Drop the field and the value.
2756 } else {
2757 __ BranchIf(NE, deopt);
2758 }
2759
2760 __ Bind(&ok);
2761 } else {
2762 ASSERT(compiler->is_optimizing());
2763 ASSERT(field().guarded_list_length() >= 0);
2764 ASSERT(field().guarded_list_length_in_object_offset() !=
2766
2767 __ lx(TMP, compiler::FieldAddress(
2768 value_reg, field().guarded_list_length_in_object_offset()));
2769 __ CompareImmediate(TMP, Smi::RawValue(field().guarded_list_length()));
2770 __ BranchIf(NE, deopt);
2771 }
2772}
2773
2774LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
2775 bool opt) const {
2776 const intptr_t kNumInputs = 1;
2777 const intptr_t kNumTemps = 0;
2778 LocationSummary* locs = new (zone)
2779 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2781 return locs;
2782}
2783
2784void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2785 const Register value = locs()->in(0).reg();
2786
2787 compiler->used_static_fields().Add(&field());
2788
2789 __ LoadFromOffset(
2790 TMP, THR,
2791 field().is_shared()
2794 // Note: static fields ids won't be changed by hot-reload.
2795 __ StoreToOffset(value, TMP, compiler::target::FieldTable::OffsetOf(field()));
2796}
2797
2798LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone,
2799 bool opt) const {
2800 const intptr_t kNumInputs = 3;
2801 const intptr_t kNumTemps = 0;
2802 LocationSummary* summary = new (zone)
2803 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2805 summary->set_in(1, Location::RegisterLocation(
2807 summary->set_in(
2809 summary->set_out(
2811 return summary;
2812}
2813
2814void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2815 ASSERT(locs()->in(0).reg() == TypeTestABI::kInstanceReg);
2818
2819 compiler->GenerateInstanceOf(source(), deopt_id(), env(), type(), locs());
2821}
2822
2823LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone,
2824 bool opt) const {
2825 const intptr_t kNumInputs = 2;
2826 const intptr_t kNumTemps = 0;
2827 LocationSummary* locs = new (zone)
2828 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2829 locs->set_in(kTypeArgumentsPos,
2831 locs->set_in(kLengthPos,
2834 return locs;
2835}
2836
2837// Inlines array allocation for known constant values.
2838static void InlineArrayAllocation(FlowGraphCompiler* compiler,
2839 intptr_t num_elements,
2840 compiler::Label* slow_path,
2841 compiler::Label* done) {
2842 const int kInlineArraySize = 12; // Same as kInlineInstanceSize.
2843 const intptr_t instance_size = Array::InstanceSize(num_elements);
2844
2845 __ TryAllocateArray(kArrayCid, instance_size, slow_path,
2846 AllocateArrayABI::kResultReg, // instance
2847 T3, // end address
2848 T4, T5);
2849 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
2850 // R3: new object end address.
2851
2852 // Store the type argument field.
2853 __ StoreCompressedIntoObjectNoBarrier(
2855 compiler::FieldAddress(AllocateArrayABI::kResultReg,
2858
2859 // Set the length field.
2860 __ StoreCompressedIntoObjectNoBarrier(
2862 compiler::FieldAddress(AllocateArrayABI::kResultReg,
2865
2866 // Initialize all array elements to raw_null.
2867 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
2868 // T3: new object end address.
2869 // T5: iterator which initially points to the start of the variable
2870 // data area to be initialized.
2871 if (num_elements > 0) {
2872 const intptr_t array_size = instance_size - sizeof(UntaggedArray);
2873 __ AddImmediate(T5, AllocateArrayABI::kResultReg,
2874 sizeof(UntaggedArray) - kHeapObjectTag);
2875 if (array_size < (kInlineArraySize * kCompressedWordSize)) {
2876 intptr_t current_offset = 0;
2877 while (current_offset < array_size) {
2878 __ StoreCompressedIntoObjectNoBarrier(
2879 AllocateArrayABI::kResultReg, compiler::Address(T5, current_offset),
2880 NULL_REG);
2881 current_offset += kCompressedWordSize;
2882 }
2883 } else {
2884 compiler::Label end_loop, init_loop;
2885 __ Bind(&init_loop);
2886 __ CompareRegisters(T5, T3);
2887 __ BranchIf(CS, &end_loop, compiler::Assembler::kNearJump);
2888 __ StoreCompressedIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
2889 compiler::Address(T5, 0), NULL_REG);
2890 __ AddImmediate(T5, kCompressedWordSize);
2891 __ j(&init_loop);
2892 __ Bind(&end_loop);
2893 }
2894 }
2896}
2897
2898void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2899 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
2900 if (type_usage_info != nullptr) {
2901 const Class& list_class =
2902 Class::Handle(compiler->isolate_group()->class_table()->At(kArrayCid));
2903 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, list_class,
2904 type_arguments()->definition());
2905 }
2906
2907 compiler::Label slow_path, done;
2908 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2909 if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
2910 num_elements()->BindsToConstant() &&
2911 num_elements()->BoundConstant().IsSmi()) {
2912 const intptr_t length =
2913 Smi::Cast(num_elements()->BoundConstant()).Value();
2915 InlineArrayAllocation(compiler, length, &slow_path, &done);
2916 }
2917 }
2918 }
2919
2920 __ Bind(&slow_path);
2921 auto object_store = compiler->isolate_group()->object_store();
2922 const auto& allocate_array_stub =
2923 Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
2924 compiler->GenerateStubCall(source(), allocate_array_stub,
2925 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
2926 env());
2927 __ Bind(&done);
2928}
2929
2931 Zone* zone,
2932 bool opt) const {
2933 ASSERT(opt);
2934 const intptr_t kNumInputs = 0;
2935 const intptr_t kNumTemps = 3;
2936 LocationSummary* locs = new (zone) LocationSummary(
2937 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
2942 return locs;
2943}
2944
2945class AllocateContextSlowPath
2946 : public TemplateSlowPathCode<AllocateUninitializedContextInstr> {
2947 public:
2948 explicit AllocateContextSlowPath(
2949 AllocateUninitializedContextInstr* instruction)
2950 : TemplateSlowPathCode(instruction) {}
2951
2952 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
2953 __ Comment("AllocateContextSlowPath");
2954 __ Bind(entry_label());
2955
2956 LocationSummary* locs = instruction()->locs();
2958
2959 compiler->SaveLiveRegisters(locs);
2960
2961 auto slow_path_env = compiler->SlowPathEnvironmentFor(
2962 instruction(), /*num_slow_path_args=*/0);
2963 ASSERT(slow_path_env != nullptr);
2964
2965 auto object_store = compiler->isolate_group()->object_store();
2966 const auto& allocate_context_stub = Code::ZoneHandle(
2967 compiler->zone(), object_store->allocate_context_stub());
2968
2969 __ LoadImmediate(T1, instruction()->num_context_variables());
2970 compiler->GenerateStubCall(instruction()->source(), allocate_context_stub,
2971 UntaggedPcDescriptors::kOther, locs,
2972 instruction()->deopt_id(), slow_path_env);
2973 ASSERT(instruction()->locs()->out(0).reg() == A0);
2974 compiler->RestoreLiveRegisters(instruction()->locs());
2975 __ j(exit_label());
2976 }
2977};
2978
2980 FlowGraphCompiler* compiler) {
2981 Register temp0 = locs()->temp(0).reg();
2982 Register temp1 = locs()->temp(1).reg();
2983 Register temp2 = locs()->temp(2).reg();
2984 Register result = locs()->out(0).reg();
2985 // Try allocate the object.
2986 AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this);
2987 compiler->AddSlowPathCode(slow_path);
2988 intptr_t instance_size = Context::InstanceSize(num_context_variables());
2989
2990 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2991 __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
2992 result, // instance
2993 temp0, temp1, temp2);
2994
2995 // Setup up number of context variables field (int32_t).
2996 __ LoadImmediate(temp0, num_context_variables());
2997 __ sw(temp0,
2998 compiler::FieldAddress(result, Context::num_variables_offset()));
2999 } else {
3000 __ Jump(slow_path->entry_label());
3001 }
3002
3003 __ Bind(slow_path->exit_label());
3004}
3005
3006LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone,
3007 bool opt) const {
3008 const intptr_t kNumInputs = 0;
3009 const intptr_t kNumTemps = 1;
3010 LocationSummary* locs = new (zone)
3011 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3014 return locs;
3015}
3016
3017void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3018 ASSERT(locs()->temp(0).reg() == T1);
3019 ASSERT(locs()->out(0).reg() == A0);
3020
3021 auto object_store = compiler->isolate_group()->object_store();
3022 const auto& allocate_context_stub =
3023 Code::ZoneHandle(compiler->zone(), object_store->allocate_context_stub());
3024 __ LoadImmediate(T1, num_context_variables());
3025 compiler->GenerateStubCall(source(), allocate_context_stub,
3026 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
3027 env());
3028}
3029
3030LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
3031 bool opt) const {
3032 const intptr_t kNumInputs = 1;
3033 const intptr_t kNumTemps = 0;
3034 LocationSummary* locs = new (zone)
3035 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3038 return locs;
3039}
3040
3041void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3042 ASSERT(locs()->in(0).reg() == T5);
3043 ASSERT(locs()->out(0).reg() == A0);
3044
3045 auto object_store = compiler->isolate_group()->object_store();
3046 const auto& clone_context_stub =
3047 Code::ZoneHandle(compiler->zone(), object_store->clone_context_stub());
3048 compiler->GenerateStubCall(source(), clone_context_stub,
3049 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
3050 deopt_id(), env());
3051}
3052
3053LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
3054 bool opt) const {
3055 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall);
3056}
3057
3058void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3059 __ Bind(compiler->GetJumpLabel(this));
3060 compiler->AddExceptionHandler(this);
3061 if (HasParallelMove()) {
3062 parallel_move()->EmitNativeCode(compiler);
3063 }
3064
3065 // Restore SP from FP as we are coming from a throw and the code for
3066 // popping arguments has not been run.
3067 const intptr_t fp_sp_dist =
3069 compiler->StackSize()) *
3070 kWordSize;
3071 ASSERT(fp_sp_dist <= 0);
3072 __ AddImmediate(SP, FP, fp_sp_dist);
3073
3074 if (!compiler->is_optimizing()) {
3075 if (raw_exception_var_ != nullptr) {
3076 __ StoreToOffset(
3079 }
3080 if (raw_stacktrace_var_ != nullptr) {
3081 __ StoreToOffset(
3084 }
3085 }
3086}
3087
3088LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone,
3089 bool opt) const {
3090 const intptr_t kNumInputs = 0;
3091 const intptr_t kNumTemps = 1;
3092 const bool using_shared_stub = UseSharedSlowPathStub(opt);
3093 LocationSummary* summary = new (zone)
3094 LocationSummary(zone, kNumInputs, kNumTemps,
3095 using_shared_stub ? LocationSummary::kCallOnSharedSlowPath
3097 summary->set_temp(0, Location::RequiresRegister());
3098 return summary;
3099}
3100
3101class CheckStackOverflowSlowPath
3102 : public TemplateSlowPathCode<CheckStackOverflowInstr> {
3103 public:
3104 static constexpr intptr_t kNumSlowPathArgs = 0;
3105
3106 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
3107 : TemplateSlowPathCode(instruction) {}
3108
3109 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
3110 auto locs = instruction()->locs();
3111 if (compiler->isolate_group()->use_osr() && osr_entry_label()->IsLinked()) {
3112 const Register value = locs->temp(0).reg();
3113 __ Comment("CheckStackOverflowSlowPathOsr");
3114 __ Bind(osr_entry_label());
3116 __ sx(value,
3117 compiler::Address(THR, Thread::stack_overflow_flags_offset()));
3118 }
3119 __ Comment("CheckStackOverflowSlowPath");
3120 __ Bind(entry_label());
3121 const bool using_shared_stub = locs->call_on_shared_slow_path();
3122 if (!using_shared_stub) {
3123 compiler->SaveLiveRegisters(locs);
3124 }
3125 // pending_deoptimization_env_ is needed to generate a runtime call that
3126 // may throw an exception.
3127 ASSERT(compiler->pending_deoptimization_env_ == nullptr);
3128 Environment* env =
3129 compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
3130 compiler->pending_deoptimization_env_ = env;
3131
3132 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
3133 if (using_shared_stub) {
3134 if (!has_frame) {
3135 ASSERT(__ constant_pool_allowed());
3136 __ set_constant_pool_allowed(false);
3137 __ EnterDartFrame(0);
3138 }
3139 auto object_store = compiler->isolate_group()->object_store();
3140 const bool live_fpu_regs = locs->live_registers()->FpuRegisterCount() > 0;
3141 const auto& stub = Code::ZoneHandle(
3142 compiler->zone(),
3143 live_fpu_regs
3144 ? object_store->stack_overflow_stub_with_fpu_regs_stub()
3145 : object_store->stack_overflow_stub_without_fpu_regs_stub());
3146
3147 if (compiler->CanPcRelativeCall(stub)) {
3148 __ GenerateUnRelocatedPcRelativeCall();
3149 compiler->AddPcRelativeCallStubTarget(stub);
3150 } else {
3151 const uword entry_point_offset =
3154 __ Call(compiler::Address(THR, entry_point_offset));
3155 }
3156 compiler->RecordSafepoint(locs, kNumSlowPathArgs);
3157 compiler->RecordCatchEntryMoves(env);
3158 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOther,
3159 instruction()->deopt_id(),
3160 instruction()->source());
3161 if (!has_frame) {
3162 __ LeaveDartFrame();
3163 __ set_constant_pool_allowed(true);
3164 }
3165 } else {
3166 ASSERT(has_frame);
3167 __ CallRuntime(kInterruptOrStackOverflowRuntimeEntry, kNumSlowPathArgs);
3168 compiler->EmitCallsiteMetadata(
3169 instruction()->source(), instruction()->deopt_id(),
3170 UntaggedPcDescriptors::kOther, instruction()->locs(), env);
3171 }
3172
3173 if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
3174 instruction()->in_loop()) {
3175 // In unoptimized code, record loop stack checks as possible OSR entries.
3176 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
3177 instruction()->deopt_id(),
3178 InstructionSource());
3179 }
3180 compiler->pending_deoptimization_env_ = nullptr;
3181 if (!using_shared_stub) {
3182 compiler->RestoreLiveRegisters(locs);
3183 }
3184 __ j(exit_label());
3185 }
3186
3187 compiler::Label* osr_entry_label() {
3188 ASSERT(IsolateGroup::Current()->use_osr());
3189 return &osr_entry_label_;
3190 }
3191
3192 private:
3193 compiler::Label osr_entry_label_;
3194};
3195
3196void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3197 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this);
3198 compiler->AddSlowPathCode(slow_path);
3199
3200 __ lx(TMP,
3202 __ bleu(SP, TMP, slow_path->entry_label());
3203 if (compiler->CanOSRFunction() && in_loop()) {
3204 const Register function = locs()->temp(0).reg();
3205 // In unoptimized code check the usage counter to trigger OSR at loop
3206 // stack checks. Use progressively higher thresholds for more deeply
3207 // nested loops to attempt to hit outer loops with OSR when possible.
3208 __ LoadObject(function, compiler->parsed_function().function());
3209 const intptr_t configured_optimization_counter_threshold =
3210 compiler->thread()->isolate_group()->optimization_counter_threshold();
3211 const int32_t threshold =
3212 configured_optimization_counter_threshold * (loop_depth() + 1);
3213 __ LoadFieldFromOffset(TMP, function, Function::usage_counter_offset(),
3215 __ addi(TMP, TMP, 1);
3216 __ StoreFieldToOffset(TMP, function, Function::usage_counter_offset(),
3218 __ CompareImmediate(TMP, threshold);
3219 __ BranchIf(GE, slow_path->osr_entry_label());
3220 }
3221 if (compiler->ForceSlowPathForStackOverflow()) {
3222 __ j(slow_path->entry_label());
3223 }
3224 __ Bind(slow_path->exit_label());
3225}
3226
3227static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
3228 BinarySmiOpInstr* shift_left) {
3229 const LocationSummary& locs = *shift_left->locs();
3230 const Register left = locs.in(0).reg();
3231 const Register result = locs.out(0).reg();
3232 compiler::Label* deopt =
3233 shift_left->CanDeoptimize()
3234 ? compiler->AddDeoptStub(shift_left->deopt_id(),
3235 ICData::kDeoptBinarySmiOp)
3236 : nullptr;
3237 if (locs.in(1).IsConstant()) {
3238 const Object& constant = locs.in(1).constant();
3239 ASSERT(constant.IsSmi());
3240 // Immediate shift operation takes 6/5 bits for the count.
3241 const intptr_t kCountLimit = XLEN - 1;
3242 const intptr_t value = Smi::Cast(constant).Value();
3243 ASSERT((0 < value) && (value < kCountLimit));
3244 __ slli(result, left, value);
3245 if (shift_left->can_overflow()) {
3246 ASSERT(result != left);
3247 __ srai(TMP2, result, value);
3248 __ bne(left, TMP2, deopt); // Overflow.
3249 }
3250 return;
3251 }
3252
3253 // Right (locs.in(1)) is not constant.
3254 const Register right = locs.in(1).reg();
3255 Range* right_range = shift_left->right_range();
3256 if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
3257 // TODO(srdjan): Implement code below for is_truncating().
3258 // If left is constant, we know the maximal allowed size for right.
3259 const Object& obj = shift_left->left()->BoundConstant();
3260 if (obj.IsSmi()) {
3261 const intptr_t left_int = Smi::Cast(obj).Value();
3262 if (left_int == 0) {
3263 __ bltz(right, deopt);
3264 __ mv(result, ZR);
3265 return;
3266 }
3267 const intptr_t max_right =
3269 const bool right_needs_check =
3270 !RangeUtils::IsWithin(right_range, 0, max_right - 1);
3271 if (right_needs_check) {
3272 __ CompareObject(right, Smi::ZoneHandle(Smi::New(max_right)));
3273 __ BranchIf(CS, deopt);
3274 }
3275 __ SmiUntag(TMP, right);
3276 __ sll(result, left, TMP);
3277 }
3278 return;
3279 }
3280
3281 const bool right_needs_check =
3282 !RangeUtils::IsWithin(right_range, 0, (Smi::kBits - 1));
3283 if (!shift_left->can_overflow()) {
3284 if (right_needs_check) {
3285 if (!RangeUtils::IsPositive(right_range)) {
3286 ASSERT(shift_left->CanDeoptimize());
3287 __ bltz(right, deopt);
3288 }
3289
3290 compiler::Label done, is_not_zero;
3291 __ CompareObject(right, Smi::ZoneHandle(Smi::New(Smi::kBits)));
3292 __ BranchIf(LESS, &is_not_zero, compiler::Assembler::kNearJump);
3293 __ li(result, 0);
3295 __ Bind(&is_not_zero);
3296 __ SmiUntag(TMP, right);
3297 __ sll(result, left, TMP);
3298 __ Bind(&done);
3299 } else {
3300 __ SmiUntag(TMP, right);
3301 __ sll(result, left, TMP);
3302 }
3303 } else {
3304 if (right_needs_check) {
3305 ASSERT(shift_left->CanDeoptimize());
3306 __ CompareObject(right, Smi::ZoneHandle(Smi::New(Smi::kBits)));
3307 __ BranchIf(CS, deopt);
3308 }
3309 __ SmiUntag(TMP, right);
3310 ASSERT(result != left);
3311 __ sll(result, left, TMP);
3312 __ sra(TMP, result, TMP);
3313 __ bne(left, TMP, deopt); // Overflow.
3314 }
3315}
3316
3317LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Zone* zone,
3318 bool opt) const {
3319 const intptr_t kNumInputs = 2;
3320 const intptr_t kNumTemps =
3321 ((op_kind() == Token::kUSHR) || (op_kind() == Token::kMUL)) ? 1 : 0;
3322 LocationSummary* summary = new (zone)
3323 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3324 if (op_kind() == Token::kTRUNCDIV) {
3325 summary->set_in(0, Location::RequiresRegister());
3326 if (RightIsPowerOfTwoConstant()) {
3327 ConstantInstr* right_constant = right()->definition()->AsConstant();
3328 summary->set_in(1, Location::Constant(right_constant));
3329 } else {
3330 summary->set_in(1, Location::RequiresRegister());
3331 }
3332 summary->set_out(0, Location::RequiresRegister());
3333 return summary;
3334 }
3335 if (op_kind() == Token::kMOD) {
3336 summary->set_in(0, Location::RequiresRegister());
3337 summary->set_in(1, Location::RequiresRegister());
3338 summary->set_out(0, Location::RequiresRegister());
3339 return summary;
3340 }
3341 summary->set_in(0, Location::RequiresRegister());
3342 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
3343 if (kNumTemps == 1) {
3344 summary->set_temp(0, Location::RequiresRegister());
3345 }
3346 // We make use of 3-operand instructions by not requiring result register
3347 // to be identical to first input register as on Intel.
3348 summary->set_out(0, Location::RequiresRegister());
3349 return summary;
3350}
3351
3352void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3353 if (op_kind() == Token::kSHL) {
3354 EmitSmiShiftLeft(compiler, this);
3355 return;
3356 }
3357
3358 const Register left = locs()->in(0).reg();
3359 const Register result = locs()->out(0).reg();
3360 compiler::Label* deopt = nullptr;
3361 if (CanDeoptimize()) {
3362 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
3363 }
3364
3365 if (locs()->in(1).IsConstant()) {
3366 const Object& constant = locs()->in(1).constant();
3367 ASSERT(constant.IsSmi());
3368 const intx_t imm = static_cast<intx_t>(constant.ptr());
3369 switch (op_kind()) {
3370 case Token::kADD: {
3371 if (deopt == nullptr) {
3372 __ AddImmediate(result, left, imm);
3373 } else {
3374 __ AddImmediateBranchOverflow(result, left, imm, deopt);
3375 }
3376 break;
3377 }
3378 case Token::kSUB: {
3379 if (deopt == nullptr) {
3380 __ AddImmediate(result, left, -imm);
3381 } else {
3382 // Negating imm and using AddImmediateSetFlags would not detect the
3383 // overflow when imm == kMinInt64.
3384 __ SubtractImmediateBranchOverflow(result, left, imm, deopt);
3385 }
3386 break;
3387 }
3388 case Token::kMUL: {
3389 // Keep left value tagged and untag right value.
3390 const intptr_t value = Smi::Cast(constant).Value();
3391 if (deopt == nullptr) {
3392 __ LoadImmediate(TMP, value);
3393 __ mul(result, left, TMP);
3394 } else {
3395 __ MultiplyImmediateBranchOverflow(result, left, value, deopt);
3396 }
3397 break;
3398 }
3399 case Token::kTRUNCDIV: {
3400 const intptr_t value = Smi::Cast(constant).Value();
3403 const intptr_t shift_count =
3405 ASSERT(kSmiTagSize == 1);
3406 __ srai(TMP, left, XLEN - 1);
3407 ASSERT(shift_count > 1); // 1, -1 case handled above.
3408 const Register temp = TMP2;
3409 __ srli(TMP, TMP, XLEN - shift_count);
3410 __ add(temp, left, TMP);
3411 ASSERT(shift_count > 0);
3412 __ srai(result, temp, shift_count);
3413 if (value < 0) {
3414 __ neg(result, result);
3415 }
3416 __ SmiTag(result);
3417 break;
3418 }
3419 case Token::kBIT_AND:
3420 // No overflow check.
3421 __ AndImmediate(result, left, imm);
3422 break;
3423 case Token::kBIT_OR:
3424 // No overflow check.
3425 __ OrImmediate(result, left, imm);
3426 break;
3427 case Token::kBIT_XOR:
3428 // No overflow check.
3429 __ XorImmediate(result, left, imm);
3430 break;
3431 case Token::kSHR: {
3432 // Asr operation masks the count to 6/5 bits.
3433 const intptr_t kCountLimit = XLEN - 1;
3434 intptr_t value = Smi::Cast(constant).Value();
3435 __ srai(result, left, Utils::Minimum(value + kSmiTagSize, kCountLimit));
3436 __ SmiTag(result);
3437 break;
3438 }
3439 case Token::kUSHR: {
3440#if XLEN == 32
3441 const intptr_t value = compiler::target::SmiValue(constant);
3442 ASSERT((value > 0) && (value < 64));
3444 // 64-bit representation of left operand value:
3445 //
3446 // ss...sssss s s xxxxxxxxxxxxx
3447 // | | | | | |
3448 // 63 32 31 30 kSmiBits-1 0
3449 //
3450 // Where 's' is a sign bit.
3451 //
3452 // If left operand is negative (sign bit is set), then
3453 // result will fit into Smi range if and only if
3454 // the shift amount >= 64 - kSmiBits.
3455 //
3456 // If left operand is non-negative, the result always
3457 // fits into Smi range.
3458 //
3459 if (value < (64 - compiler::target::kSmiBits)) {
3460 if (deopt != nullptr) {
3461 __ bltz(left, deopt);
3462 } else {
3463 // Operation cannot overflow only if left value is always
3464 // non-negative.
3465 ASSERT(!can_overflow());
3466 }
3467 // At this point left operand is non-negative, so unsigned shift
3468 // can't overflow.
3470 __ li(result, 0);
3471 } else {
3472 __ srli(result, left, value + kSmiTagSize);
3473 __ SmiTag(result);
3474 }
3475 } else {
3476 // Shift amount > 32, and the result is guaranteed to fit into Smi.
3477 // Low (Smi) part of the left operand is shifted out.
3478 // High part is filled with sign bits.
3479 __ srai(result, left, 31);
3480 __ srli(result, result, value - 32);
3481 __ SmiTag(result);
3482 }
3483#else
3484 // Lsr operation masks the count to 6 bits, but
3485 // unsigned shifts by >= kBitsPerInt64 are eliminated by
3486 // BinaryIntegerOpInstr::Canonicalize.
3487 const intptr_t kCountLimit = XLEN - 1;
3488 intptr_t value = Smi::Cast(constant).Value();
3489 ASSERT((value >= 0) && (value <= kCountLimit));
3490 __ SmiUntag(TMP, left);
3491 __ srli(TMP, TMP, value);
3492 __ SmiTag(result, TMP);
3493 if (deopt != nullptr) {
3494 __ SmiUntag(TMP2, result);
3495 __ bne(TMP, TMP2, deopt);
3496 }
3497#endif
3498 break;
3499 }
3500 default:
3501 UNREACHABLE();
3502 break;
3503 }
3504 return;
3505 }
3506
3507 const Register right = locs()->in(1).reg();
3508 switch (op_kind()) {
3509 case Token::kADD: {
3510 if (deopt == nullptr) {
3511 __ add(result, left, right);
3512 } else if (RangeUtils::IsPositive(right_range())) {
3513 ASSERT(result != left);
3514 __ add(result, left, right);
3515 __ blt(result, left, deopt);
3516 } else if (RangeUtils::IsNegative(right_range())) {
3517 ASSERT(result != left);
3518 __ add(result, left, right);
3519 __ bgt(result, left, deopt);
3520 } else {
3521 __ AddBranchOverflow(result, left, right, deopt);
3522 }
3523 break;
3524 }
3525 case Token::kSUB: {
3526 if (deopt == nullptr) {
3527 __ sub(result, left, right);
3528 } else if (RangeUtils::IsPositive(right_range())) {
3529 ASSERT(result != left);
3530 __ sub(result, left, right);
3531 __ bgt(result, left, deopt);
3532 } else if (RangeUtils::IsNegative(right_range())) {
3533 ASSERT(result != left);
3534 __ sub(result, left, right);
3535 __ blt(result, left, deopt);
3536 } else {
3537 __ SubtractBranchOverflow(result, left, right, deopt);
3538 }
3539 break;
3540 }
3541 case Token::kMUL: {
3542 const Register temp = locs()->temp(0).reg();
3543 __ SmiUntag(temp, left);
3544 if (deopt == nullptr) {
3545 __ mul(result, temp, right);
3546 } else {
3547 __ MultiplyBranchOverflow(result, temp, right, deopt);
3548 }
3549 break;
3550 }
3551 case Token::kBIT_AND: {
3552 // No overflow check.
3553 __ and_(result, left, right);
3554 break;
3555 }
3556 case Token::kBIT_OR: {
3557 // No overflow check.
3558 __ or_(result, left, right);
3559 break;
3560 }
3561 case Token::kBIT_XOR: {
3562 // No overflow check.
3563 __ xor_(result, left, right);
3564 break;
3565 }
3566 case Token::kTRUNCDIV: {
3567 if (RangeUtils::CanBeZero(right_range())) {
3568 // Handle divide by zero in runtime.
3569 __ beqz(right, deopt);
3570 }
3571 __ SmiUntag(TMP, left);
3572 __ SmiUntag(TMP2, right);
3573 __ div(TMP, TMP, TMP2);
3574 __ SmiTag(result, TMP);
3575
3576 if (RangeUtils::Overlaps(right_range(), -1, -1)) {
3577 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
3578 // case we cannot tag the result.
3579 __ SmiUntag(TMP2, result);
3580 __ bne(TMP, TMP2, deopt);
3581 }
3582 break;
3583 }
3584 case Token::kMOD: {
3585 if (RangeUtils::CanBeZero(right_range())) {
3586 // Handle divide by zero in runtime.
3587 __ beqz(right, deopt);
3588 }
3589 __ SmiUntag(TMP, left);
3590 __ SmiUntag(TMP2, right);
3591
3592 __ rem(result, TMP, TMP2);
3593
3594 // res = left % right;
3595 // if (res < 0) {
3596 // if (right < 0) {
3597 // res = res - right;
3598 // } else {
3599 // res = res + right;
3600 // }
3601 // }
3602 compiler::Label done, adjust;
3604 // Result is negative, adjust it.
3605 __ bgez(right, &adjust, compiler::Assembler::kNearJump);
3606 __ sub(result, result, TMP2);
3608 __ Bind(&adjust);
3609 __ add(result, result, TMP2);
3610 __ Bind(&done);
3611 __ SmiTag(result);
3612 break;
3613 }
3614 case Token::kSHR: {
3615 if (CanDeoptimize()) {
3616 __ bltz(right, deopt);
3617 }
3618 __ SmiUntag(TMP, right);
3619 // asrv[w] operation masks the count to 6/5 bits.
3620 const intptr_t kCountLimit = XLEN - 1;
3621 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
3622 __ LoadImmediate(TMP2, kCountLimit);
3623 compiler::Label shift_in_bounds;
3624 __ ble(TMP, TMP2, &shift_in_bounds, compiler::Assembler::kNearJump);
3625 __ mv(TMP, TMP2);
3626 __ Bind(&shift_in_bounds);
3627 }
3628 __ SmiUntag(TMP2, left);
3629 __ sra(result, TMP2, TMP);
3630 __ SmiTag(result);
3631 break;
3632 }
3633 case Token::kUSHR: {
3634#if XLEN == 32
3635 compiler::Label done;
3636 __ SmiUntag(TMP, right);
3637 // 64-bit representation of left operand value:
3638 //
3639 // ss...sssss s s xxxxxxxxxxxxx
3640 // | | | | | |
3641 // 63 32 31 30 kSmiBits-1 0
3642 //
3643 // Where 's' is a sign bit.
3644 //
3645 // If left operand is negative (sign bit is set), then
3646 // result will fit into Smi range if and only if
3647 // the shift amount >= 64 - kSmiBits.
3648 //
3649 // If left operand is non-negative, the result always
3650 // fits into Smi range.
3651 //
3653 right_range(), 64 - compiler::target::kSmiBits - 1)) {
3654 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(),
3655 kBitsPerInt64 - 1)) {
3656 ASSERT(result != left);
3657 ASSERT(result != right);
3658 __ li(result, 0);
3659 __ CompareImmediate(TMP, kBitsPerInt64);
3660 // If shift amount >= 64, then result is 0.
3662 }
3663 __ CompareImmediate(TMP, 64 - compiler::target::kSmiBits);
3664 // Shift amount >= 64 - kSmiBits > 32, but < 64.
3665 // Result is guaranteed to fit into Smi range.
3666 // Low (Smi) part of the left operand is shifted out.
3667 // High part is filled with sign bits.
3668 compiler::Label next;
3670 __ subi(TMP, TMP, 32);
3671 __ srai(result, left, 31);
3672 __ srl(result, result, TMP);
3673 __ SmiTag(result);
3675 __ Bind(&next);
3676 }
3677 // Shift amount < 64 - kSmiBits.
3678 // If left is negative, then result will not fit into Smi range.
3679 // Also deopt in case of negative shift amount.
3680 if (deopt != nullptr) {
3681 __ bltz(left, deopt);
3682 __ bltz(right, deopt);
3683 } else {
3684 ASSERT(!can_overflow());
3685 }
3686 // At this point left operand is non-negative, so unsigned shift
3687 // can't overflow.
3688 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(),
3690 ASSERT(result != left);
3691 ASSERT(result != right);
3692 __ li(result, 0);
3693 __ CompareImmediate(TMP, compiler::target::kSmiBits);
3694 // Left operand >= 0, shift amount >= kSmiBits. Result is 0.
3696 }
3697 // Left operand >= 0, shift amount < kSmiBits < 32.
3698 const Register temp = locs()->temp(0).reg();
3699 __ SmiUntag(temp, left);
3700 __ srl(result, temp, TMP);
3701 __ SmiTag(result);
3702 __ Bind(&done);
3703#elif XLEN == 64
3704 if (CanDeoptimize()) {
3705 __ bltz(right, deopt);
3706 }
3707 __ SmiUntag(TMP, right);
3708 // lsrv operation masks the count to 6 bits.
3709 const intptr_t kCountLimit = XLEN - 1;
3710 COMPILE_ASSERT(kCountLimit + 1 == kBitsPerInt64);
3711 compiler::Label done;
3712 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
3713 __ LoadImmediate(TMP2, kCountLimit);
3714 compiler::Label shift_in_bounds;
3715 __ ble(TMP, TMP2, &shift_in_bounds, compiler::Assembler::kNearJump);
3716 __ mv(result, ZR);
3718 __ Bind(&shift_in_bounds);
3719 }
3720 __ SmiUntag(TMP2, left);
3721 __ srl(TMP, TMP2, TMP);
3722 __ SmiTag(result, TMP);
3723 if (deopt != nullptr) {
3724 __ SmiUntag(TMP2, result);
3725 __ bne(TMP, TMP2, deopt);
3726 }
3727 __ Bind(&done);
3728#else
3729 UNIMPLEMENTED();
3730#endif
3731 break;
3732 }
3733 case Token::kDIV: {
3734 // Dispatches to 'Double./'.
3735 // TODO(srdjan): Implement as conversion to double and double division.
3736 UNREACHABLE();
3737 break;
3738 }
3739 case Token::kOR:
3740 case Token::kAND: {
3741 // Flow graph builder has dissected this operation to guarantee correct
3742 // behavior (short-circuit evaluation).
3743 UNREACHABLE();
3744 break;
3745 }
3746 default:
3747 UNREACHABLE();
3748 break;
3749 }
3750}
3751
3752LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone,
3753 bool opt) const {
3754 intptr_t left_cid = left()->Type()->ToCid();
3755 intptr_t right_cid = right()->Type()->ToCid();
3756 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid));
3757 const intptr_t kNumInputs = 2;
3758 const intptr_t kNumTemps = 0;
3759 LocationSummary* summary = new (zone)
3760 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3761 summary->set_in(0, Location::RequiresRegister());
3762 summary->set_in(1, Location::RequiresRegister());
3763 return summary;
3764}
3765
3766void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3767 compiler::Label* deopt =
3768 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryDoubleOp);
3769 intptr_t left_cid = left()->Type()->ToCid();
3770 intptr_t right_cid = right()->Type()->ToCid();
3771 const Register left = locs()->in(0).reg();
3772 const Register right = locs()->in(1).reg();
3773 if (this->left()->definition() == this->right()->definition()) {
3774 __ BranchIfSmi(left, deopt);
3775 } else if (left_cid == kSmiCid) {
3776 __ BranchIfSmi(right, deopt);
3777 } else if (right_cid == kSmiCid) {
3778 __ BranchIfSmi(left, deopt);
3779 } else {
3780 __ or_(TMP, left, right);
3781 __ BranchIfSmi(TMP, deopt);
3782 }
3783}
3784
3785LocationSummary* BoxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
3786 const intptr_t kNumInputs = 1;
3787 const intptr_t kNumTemps = 0;
3788 LocationSummary* summary = new (zone) LocationSummary(
3789 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
3791 summary->set_out(0, Location::RequiresRegister());
3792 return summary;
3793}
3794
3795void BoxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3796 const Register out_reg = locs()->out(0).reg();
3797 const FRegister value = locs()->in(0).fpu_reg();
3798
3800 compiler->BoxClassFor(from_representation()),
3801 out_reg, TMP);
3802
3803 switch (from_representation()) {
3804 case kUnboxedDouble:
3805 __ StoreDFieldToOffset(value, out_reg, ValueOffset());
3806 break;
3807 case kUnboxedFloat:
3808 __ fcvtds(FpuTMP, value);
3809 __ StoreDFieldToOffset(FpuTMP, out_reg, ValueOffset());
3810 break;
3811 case kUnboxedFloat32x4:
3812 case kUnboxedFloat64x2:
3813 case kUnboxedInt32x4:
3814 UNIMPLEMENTED();
3815 break;
3816 default:
3817 UNREACHABLE();
3818 break;
3819 }
3820}
3821
3822LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
3824 const intptr_t kNumInputs = 1;
3825 const intptr_t kNumTemps = 1;
3826 const bool is_floating_point =
3828 LocationSummary* summary = new (zone)
3829 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3830 summary->set_in(0, Location::RequiresRegister());
3831 summary->set_temp(0, Location::RequiresRegister());
3832
3833 if (is_floating_point) {
3834 summary->set_out(0, Location::RequiresFpuRegister());
3835#if XLEN == 32
3836 } else if (representation() == kUnboxedInt64) {
3837 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
3839#endif
3840 } else {
3841 summary->set_out(0, Location::RequiresRegister());
3842 }
3843 return summary;
3844}
3845
3846void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler* compiler) {
3847 const Register box = locs()->in(0).reg();
3848
3849 switch (representation()) {
3850 case kUnboxedInt64: {
3851#if XLEN == 32
3852 PairLocation* result = locs()->out(0).AsPairLocation();
3853 ASSERT(result->At(0).reg() != box);
3854 __ LoadFieldFromOffset(result->At(0).reg(), box, ValueOffset());
3855 __ LoadFieldFromOffset(result->At(1).reg(), box,
3856 ValueOffset() + compiler::target::kWordSize);
3857#elif XLEN == 64
3858 const Register result = locs()->out(0).reg();
3859 __ ld(result, compiler::FieldAddress(box, ValueOffset()));
3860#endif
3861 break;
3862 }
3863
3864 case kUnboxedDouble: {
3865 const FRegister result = locs()->out(0).fpu_reg();
3866 __ LoadDFieldFromOffset(result, box, ValueOffset());
3867 break;
3868 }
3869
3870 case kUnboxedFloat: {
3871 const FRegister result = locs()->out(0).fpu_reg();
3872 __ LoadDFieldFromOffset(result, box, ValueOffset());
3873 __ fcvtsd(result, result);
3874 break;
3875 }
3876
3877 case kUnboxedFloat32x4:
3878 case kUnboxedFloat64x2:
3879 case kUnboxedInt32x4: {
3880 UNIMPLEMENTED();
3881 break;
3882 }
3883
3884 default:
3885 UNREACHABLE();
3886 break;
3887 }
3888}
3889
3890void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) {
3891 const Register box = locs()->in(0).reg();
3892
3893 switch (representation()) {
3894#if XLEN == 32
3895 case kUnboxedInt64: {
3896 PairLocation* result = locs()->out(0).AsPairLocation();
3897 __ SmiUntag(result->At(0).reg(), box);
3898 __ srai(result->At(1).reg(), box, XLEN - 1); // SignFill.
3899 break;
3900 }
3901#elif XLEN == 64
3902 case kUnboxedInt32:
3903 case kUnboxedInt64: {
3904 const Register result = locs()->out(0).reg();
3905 __ SmiUntag(result, box);
3906 break;
3907 }
3908#endif
3909
3910 case kUnboxedFloat: {
3911 const FRegister result = locs()->out(0).fpu_reg();
3912 __ SmiUntag(TMP, box);
3913#if XLEN == 32
3914 __ fcvtsw(result, TMP);
3915#elif XLEN == 64
3916 __ fcvtsl(result, TMP);
3917#endif
3918 break;
3919 }
3920 case kUnboxedDouble: {
3921 const FRegister result = locs()->out(0).fpu_reg();
3922 __ SmiUntag(TMP, box);
3923#if XLEN == 32
3924 __ fcvtdw(result, TMP);
3925#elif XLEN == 64
3926 __ fcvtdl(result, TMP);
3927#endif
3928 break;
3929 }
3930
3931 default:
3932 UNREACHABLE();
3933 break;
3934 }
3935}
3936
3937void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) {
3938 const Register value = locs()->in(0).reg();
3939 const Register result = locs()->out(0).reg();
3940 __ LoadInt32FromBoxOrSmi(result, value);
3941}
3942
3943void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler) {
3944#if XLEN == 32
3945 const Register box = locs()->in(0).reg();
3946 PairLocation* result = locs()->out(0).AsPairLocation();
3947 ASSERT(result->At(0).reg() != box);
3948 ASSERT(result->At(1).reg() != box);
3949 compiler::Label done;
3950 __ srai(result->At(1).reg(), box, XLEN - 1); // SignFill
3951 __ SmiUntag(result->At(0).reg(), box);
3952 __ BranchIfSmi(box, &done, compiler::Assembler::kNearJump);
3953 EmitLoadFromBox(compiler);
3954 __ Bind(&done);
3955#else
3956 const Register value = locs()->in(0).reg();
3957 const Register result = locs()->out(0).reg();
3958 __ LoadInt64FromBoxOrSmi(result, value);
3959#endif
3960}
3961
3962LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
3963 bool opt) const {
3964 ASSERT((from_representation() == kUnboxedInt32) ||
3965 (from_representation() == kUnboxedUint32));
3966 const intptr_t kNumInputs = 1;
3967 const intptr_t kNumTemps = 0;
3968#if XLEN > 32
3969 // ValueFitsSmi() may be overly conservative and false because we only
3970 // perform range analysis during optimized compilation.
3971 const bool kMayAllocateMint = false;
3972#else
3973 const bool kMayAllocateMint = !ValueFitsSmi();
3974#endif
3975 LocationSummary* summary = new (zone)
3976 LocationSummary(zone, kNumInputs, kNumTemps,
3977 kMayAllocateMint ? LocationSummary::kCallOnSlowPath
3979 summary->set_in(0, Location::RequiresRegister());
3980 summary->set_out(0, Location::RequiresRegister());
3981 return summary;
3982}
3983
3984void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3985 Register value = locs()->in(0).reg();
3986 Register out = locs()->out(0).reg();
3987 ASSERT(value != out);
3988
3989#if XLEN > 32
3991 __ slli(out, value, XLEN - 32);
3992 if (from_representation() == kUnboxedInt32) {
3993 __ srai(out, out, XLEN - 32 - kSmiTagShift);
3994 } else {
3995 ASSERT(from_representation() == kUnboxedUint32);
3996 __ srli(out, out, XLEN - 32 - kSmiTagShift);
3997 }
3998#elif XLEN == 32
3999 __ slli(out, value, 1);
4000 if (ValueFitsSmi()) {
4001 return;
4002 }
4003 compiler::Label done;
4004 if (from_representation() == kUnboxedInt32) {
4005 __ srai(TMP, out, 1);
4006 __ beq(TMP, value, &done);
4007 } else {
4008 ASSERT(from_representation() == kUnboxedUint32);
4009 __ srli(TMP, value, 30);
4010 __ beqz(TMP, &done);
4011 }
4012
4014 TMP);
4015 __ StoreFieldToOffset(value, out, compiler::target::Mint::value_offset());
4016 if (from_representation() == kUnboxedInt32) {
4017 __ srai(TMP, value, 31);
4018 __ StoreFieldToOffset(
4019 TMP, out,
4021 } else {
4022 ASSERT(from_representation() == kUnboxedUint32);
4023 __ StoreFieldToOffset(
4024 ZR, out,
4026 }
4027 __ Bind(&done);
4028#endif
4029}
4030
4031LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
4032 bool opt) const {
4033 // Shared slow path is used in BoxInt64Instr::EmitNativeCode in
4034 // FLAG_use_bare_instructions mode and only after VM isolate stubs where
4035 // replaced with isolate-specific stubs.
4036 auto object_store = IsolateGroup::Current()->object_store();
4037 const bool stubs_in_vm_isolate =
4038 object_store->allocate_mint_with_fpu_regs_stub()
4039 ->untag()
4040 ->InVMIsolateHeap() ||
4041 object_store->allocate_mint_without_fpu_regs_stub()
4042 ->untag()
4043 ->InVMIsolateHeap();
4044 const bool shared_slow_path_call =
4045 SlowPathSharingSupported(opt) && !stubs_in_vm_isolate;
4046 const intptr_t kNumInputs = 1;
4047 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
4048 LocationSummary* summary = new (zone) LocationSummary(
4049 zone, kNumInputs, kNumTemps,
4050 ValueFitsSmi()
4052 : ((shared_slow_path_call ? LocationSummary::kCallOnSharedSlowPath
4054#if XLEN == 32
4055 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
4057#else
4058 summary->set_in(0, Location::RequiresRegister());
4059#endif
4060 if (ValueFitsSmi()) {
4061 summary->set_out(0, Location::RequiresRegister());
4062 } else if (shared_slow_path_call) {
4063 summary->set_out(0,
4066 } else {
4067 summary->set_out(0, Location::RequiresRegister());
4068 summary->set_temp(0, Location::RequiresRegister());
4069 }
4070 return summary;
4071}
4072
4073void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4074#if XLEN == 32
4075 if (ValueFitsSmi()) {
4076 PairLocation* value_pair = locs()->in(0).AsPairLocation();
4077 Register value_lo = value_pair->At(0).reg();
4078 Register out_reg = locs()->out(0).reg();
4079 __ SmiTag(out_reg, value_lo);
4080 return;
4081 }
4082
4083 PairLocation* value_pair = locs()->in(0).AsPairLocation();
4084 Register value_lo = value_pair->At(0).reg();
4085 Register value_hi = value_pair->At(1).reg();
4086 Register out_reg = locs()->out(0).reg();
4087
4088 compiler::Label overflow, done;
4089 __ SmiTag(out_reg, value_lo);
4090 __ srai(TMP, out_reg, kSmiTagSize);
4091 __ bne(value_lo, TMP, &overflow, compiler::Assembler::kNearJump);
4092 __ srai(TMP, out_reg, XLEN - 1); // SignFill
4093 __ beq(value_hi, TMP, &done, compiler::Assembler::kNearJump);
4094
4095 __ Bind(&overflow);
4096 if (compiler->intrinsic_mode()) {
4097 __ TryAllocate(compiler->mint_class(),
4098 compiler->intrinsic_slow_path_label(),
4100 } else if (locs()->call_on_shared_slow_path()) {
4101 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
4102 if (!has_frame) {
4103 ASSERT(__ constant_pool_allowed());
4104 __ set_constant_pool_allowed(false);
4105 __ EnterDartFrame(0);
4106 }
4107 auto object_store = compiler->isolate_group()->object_store();
4108 const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
4109 const auto& stub = Code::ZoneHandle(
4110 compiler->zone(),
4111 live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub()
4112 : object_store->allocate_mint_without_fpu_regs_stub());
4113
4114 ASSERT(!locs()->live_registers()->ContainsRegister(
4116 auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
4117 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
4118 locs(), DeoptId::kNone, extended_env);
4119 if (!has_frame) {
4120 __ LeaveDartFrame();
4121 __ set_constant_pool_allowed(true);
4122 }
4123 } else {
4125 out_reg, TMP);
4126 }
4127
4128 __ StoreFieldToOffset(value_lo, out_reg,
4130 __ StoreFieldToOffset(
4131 value_hi, out_reg,
4133 __ Bind(&done);
4134#else
4135 Register in = locs()->in(0).reg();
4136 Register out = locs()->out(0).reg();
4137 if (ValueFitsSmi()) {
4138 __ SmiTag(out, in);
4139 return;
4140 }
4141 ASSERT(kSmiTag == 0);
4142 compiler::Label done;
4143
4144 ASSERT(out != in);
4145 __ SmiTag(out, in);
4146 __ SmiUntag(TMP, out);
4147 __ beq(in, TMP, &done); // No overflow.
4148
4149 if (compiler->intrinsic_mode()) {
4150 __ TryAllocate(compiler->mint_class(),
4151 compiler->intrinsic_slow_path_label(),
4153 } else if (locs()->call_on_shared_slow_path()) {
4154 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
4155 if (!has_frame) {
4156 ASSERT(__ constant_pool_allowed());
4157 __ set_constant_pool_allowed(false);
4158 __ EnterDartFrame(0);
4159 }
4160 auto object_store = compiler->isolate_group()->object_store();
4161 const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
4162 const auto& stub = Code::ZoneHandle(
4163 compiler->zone(),
4164 live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub()
4165 : object_store->allocate_mint_without_fpu_regs_stub());
4166
4167 ASSERT(!locs()->live_registers()->ContainsRegister(
4169 auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
4170 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
4171 locs(), DeoptId::kNone, extended_env);
4172 if (!has_frame) {
4173 __ LeaveDartFrame();
4174 __ set_constant_pool_allowed(true);
4175 }
4176 } else {
4178 TMP);
4179 }
4180
4181 __ StoreToOffset(in, out, Mint::value_offset() - kHeapObjectTag);
4182 __ Bind(&done);
4183#endif
4184}
4185
4186#if XLEN == 32
4187static void LoadInt32FromMint(FlowGraphCompiler* compiler,
4188 Register mint,
4190 compiler::Label* deopt) {
4191 __ LoadFieldFromOffset(result, mint, compiler::target::Mint::value_offset());
4192 if (deopt != nullptr) {
4193 __ LoadFieldFromOffset(
4194 TMP, mint,
4196 __ srai(TMP2, result, XLEN - 1);
4197 __ bne(TMP, TMP2, deopt);
4198 }
4199}
4200#endif
4201
4202LocationSummary* UnboxInteger32Instr::MakeLocationSummary(Zone* zone,
4203 bool opt) const {
4204 const intptr_t kNumInputs = 1;
4205 const intptr_t kNumTemps = 0;
4206 LocationSummary* summary = new (zone)
4207 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4208 summary->set_in(0, Location::RequiresRegister());
4209 summary->set_out(0, Location::RequiresRegister());
4210 return summary;
4211}
4212
4213void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4214#if XLEN == 32
4215 const intptr_t value_cid = value()->Type()->ToCid();
4216 const Register value = locs()->in(0).reg();
4217 const Register out = locs()->out(0).reg();
4218 compiler::Label* deopt =
4220 ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger)
4221 : nullptr;
4222 compiler::Label* out_of_range = !is_truncating() ? deopt : nullptr;
4223 ASSERT(value != out);
4224
4225 if (value_cid == kSmiCid) {
4226 __ SmiUntag(out, value);
4227 } else if (value_cid == kMintCid) {
4228 LoadInt32FromMint(compiler, value, out, out_of_range);
4229 } else if (!CanDeoptimize()) {
4230 compiler::Label done;
4231 __ SmiUntag(out, value);
4233 LoadInt32FromMint(compiler, value, out, nullptr);
4234 __ Bind(&done);
4235 } else {
4236 compiler::Label done;
4237 __ SmiUntag(out, value);
4239 __ CompareClassId(value, kMintCid, TMP);
4240 __ BranchIf(NE, deopt);
4241 LoadInt32FromMint(compiler, value, out, out_of_range);
4242 __ Bind(&done);
4243 }
4244#elif XLEN == 64
4245 const intptr_t value_cid = value()->Type()->ToCid();
4246 const Register out = locs()->out(0).reg();
4247 const Register value = locs()->in(0).reg();
4248 compiler::Label* deopt =
4250 ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger)
4251 : nullptr;
4252
4253 if (value_cid == kSmiCid) {
4254 __ SmiUntag(out, value);
4255 } else if (value_cid == kMintCid) {
4256 __ LoadFieldFromOffset(out, value, Mint::value_offset());
4257 } else if (!CanDeoptimize()) {
4258 // Type information is not conclusive, but range analysis found
4259 // the value to be in int64 range. Therefore it must be a smi
4260 // or mint value.
4261 ASSERT(is_truncating());
4262 compiler::Label done;
4263 __ SmiUntag(out, value);
4265 __ LoadFieldFromOffset(out, value, Mint::value_offset());
4266 __ Bind(&done);
4267 } else {
4268 compiler::Label done;
4269 __ SmiUntag(out, value);
4271 __ CompareClassId(value, kMintCid, TMP);
4272 __ BranchIf(NE, deopt);
4273 __ LoadFieldFromOffset(out, value, Mint::value_offset());
4274 __ Bind(&done);
4275 }
4276
4277 // TODO(vegorov): as it is implemented right now truncating unboxing would
4278 // leave "garbage" in the higher word.
4279 if (!is_truncating() && (deopt != nullptr)) {
4280 ASSERT(representation() == kUnboxedInt32);
4281 __ sextw(TMP, out);
4282 __ bne(TMP, out, deopt);
4283 }
4284#endif
4285}
4286
4287LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
4288 bool opt) const {
4289 const intptr_t kNumInputs = 2;
4290 const intptr_t kNumTemps = 0;
4291 LocationSummary* summary = new (zone)
4292 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4294 summary->set_in(1, Location::RequiresFpuRegister());
4295 summary->set_out(0, Location::RequiresFpuRegister());
4296 return summary;
4297}
4298
4299void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4300 const FRegister left = locs()->in(0).fpu_reg();
4301 const FRegister right = locs()->in(1).fpu_reg();
4302 const FRegister result = locs()->out(0).fpu_reg();
4303 if (representation() == kUnboxedDouble) {
4304 switch (op_kind()) {
4305 case Token::kADD:
4306 __ faddd(result, left, right);
4307 break;
4308 case Token::kSUB:
4309 __ fsubd(result, left, right);
4310 break;
4311 case Token::kMUL:
4312 __ fmuld(result, left, right);
4313 break;
4314 case Token::kDIV:
4315 __ fdivd(result, left, right);
4316 break;
4317 case Token::kMIN:
4318 __ fmind(result, left, right);
4319 break;
4320 case Token::kMAX:
4321 __ fmaxd(result, left, right);
4322 break;
4323 default:
4324 UNREACHABLE();
4325 }
4326 } else {
4327 ASSERT(representation() == kUnboxedFloat);
4328 switch (op_kind()) {
4329 case Token::kADD:
4330 __ fadds(result, left, right);
4331 break;
4332 case Token::kSUB:
4333 __ fsubs(result, left, right);
4334 break;
4335 case Token::kMUL:
4336 __ fmuls(result, left, right);
4337 break;
4338 case Token::kDIV:
4339 __ fdivs(result, left, right);
4340 break;
4341 case Token::kMIN:
4342 __ fmins(result, left, right);
4343 break;
4344 case Token::kMAX:
4345 __ fmaxs(result, left, right);
4346 break;
4347 default:
4348 UNREACHABLE();
4349 }
4350 }
4351}
4352
4353LocationSummary* DoubleTestOpInstr::MakeLocationSummary(Zone* zone,
4354 bool opt) const {
4355 const intptr_t kNumInputs = 1;
4356 const intptr_t kNumTemps = 0;
4357 LocationSummary* summary = new (zone)
4358 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4360 summary->set_out(0, Location::RequiresRegister());
4361 return summary;
4362}
4363
4364Condition DoubleTestOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
4365 BranchLabels labels) {
4366 ASSERT(compiler->is_optimizing());
4367 const FRegister value = locs()->in(0).fpu_reg();
4368
4369 __ fclassd(TMP, value);
4370 if (op_kind() == MethodRecognizer::kDouble_getIsNaN) {
4371 __ TestImmediate(TMP, kFClassSignallingNan | kFClassQuietNan);
4372 } else if (op_kind() == MethodRecognizer::kDouble_getIsInfinite) {
4373 __ TestImmediate(TMP, kFClassNegInfinity | kFClassPosInfinity);
4374 } else if (op_kind() == MethodRecognizer::kDouble_getIsNegative) {
4375 __ TestImmediate(TMP, kFClassNegInfinity | kFClassNegNormal |
4377 } else {
4378 UNREACHABLE();
4379 }
4380 return kind() == Token::kEQ ? NOT_ZERO : ZERO;
4381}
4382
4383LocationSummary* SimdOpInstr::MakeLocationSummary(Zone* zone, bool opt) const {
4384 UNREACHABLE();
4385 return nullptr;
4386}
4387
4388void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4389 UNREACHABLE();
4390}
4391
4393 Zone* zone,
4394 bool opt) const {
4395 const intptr_t kNumTemps = 0;
4396 LocationSummary* summary = new (zone)
4397 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
4398 summary->set_in(0, Location::RegisterLocation(A0));
4399 summary->set_in(1, Location::RegisterLocation(A1));
4400 summary->set_in(2, Location::RegisterLocation(A2));
4401 // Can't specify A3 because it is blocked in register allocation as TMP.
4402 summary->set_in(3, Location::Any());
4403 summary->set_out(0, Location::RegisterLocation(A0));
4404 return summary;
4405}
4406
4408 if (compiler->intrinsic_mode()) {
4409 // Would also need to preserve CODE_REG and ARGS_DESC_REG.
4410 UNIMPLEMENTED();
4411 }
4412
4413 compiler::LeafRuntimeScope rt(compiler->assembler(),
4414 /*frame_size=*/0,
4415 /*preserve_registers=*/false);
4416 if (locs()->in(3).IsRegister()) {
4417 __ mv(A3, locs()->in(3).reg());
4418 } else if (locs()->in(3).IsStackSlot()) {
4419 __ lx(A3, LocationToStackSlotAddress(locs()->in(3)));
4420 } else {
4421 UNIMPLEMENTED();
4422 }
4423 rt.Call(TargetFunction(), TargetFunction().argument_count());
4424}
4425
4426LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone,
4427 bool opt) const {
4428 if (result_cid() == kDoubleCid) {
4429 const intptr_t kNumInputs = 2;
4430 const intptr_t kNumTemps = 0;
4431 LocationSummary* summary = new (zone)
4432 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4434 summary->set_in(1, Location::RequiresFpuRegister());
4435 // Reuse the left register so that code can be made shorter.
4436 summary->set_out(0, Location::SameAsFirstInput());
4437 return summary;
4438 }
4439 ASSERT(result_cid() == kSmiCid);
4440 const intptr_t kNumInputs = 2;
4441 const intptr_t kNumTemps = 0;
4442 LocationSummary* summary = new (zone)
4443 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4444 summary->set_in(0, Location::RequiresRegister());
4445 summary->set_in(1, Location::RequiresRegister());
4446 // Reuse the left register so that code can be made shorter.
4447 summary->set_out(0, Location::SameAsFirstInput());
4448 return summary;
4449}
4450
4451void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4452 ASSERT((op_kind() == MethodRecognizer::kMathMin) ||
4453 (op_kind() == MethodRecognizer::kMathMax));
4454 const bool is_min = (op_kind() == MethodRecognizer::kMathMin);
4455 if (result_cid() == kDoubleCid) {
4456 compiler::Label done, returns_nan, are_equal;
4457 const FRegister left = locs()->in(0).fpu_reg();
4458 const FRegister right = locs()->in(1).fpu_reg();
4459 const FRegister result = locs()->out(0).fpu_reg();
4460 if (is_min) {
4461 __ fmind(result, left, right);
4462 } else {
4463 __ fmaxd(result, left, right);
4464 }
4465 return;
4466 }
4467
4468 ASSERT(result_cid() == kSmiCid);
4469 const Register left = locs()->in(0).reg();
4470 const Register right = locs()->in(1).reg();
4471 const Register result = locs()->out(0).reg();
4472 compiler::Label choose_right, done;
4473 if (is_min) {
4474 __ bgt(left, right, &choose_right, compiler::Assembler::kNearJump);
4475 } else {
4476 __ blt(left, right, &choose_right, compiler::Assembler::kNearJump);
4477 }
4478 __ mv(result, left);
4480 __ Bind(&choose_right);
4481 __ mv(result, right);
4482 __ Bind(&done);
4483}
4484
4485LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Zone* zone,
4486 bool opt) const {
4487 const intptr_t kNumInputs = 1;
4488 const intptr_t kNumTemps = 0;
4489 LocationSummary* summary = new (zone)
4490 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4491 summary->set_in(0, Location::RequiresRegister());
4492 // We make use of 3-operand instructions by not requiring result register
4493 // to be identical to first input register as on Intel.
4494 summary->set_out(0, Location::RequiresRegister());
4495 return summary;
4496}
4497
4498void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4499 const Register value = locs()->in(0).reg();
4500 const Register result = locs()->out(0).reg();
4501 switch (op_kind()) {
4502 case Token::kNEGATE: {
4503 compiler::Label* deopt =
4504 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
4505 __ neg(result, value);
4506 ASSERT(result != value);
4507 __ beq(result, value, deopt); // Overflow.
4508 break;
4509 }
4510 case Token::kBIT_NOT:
4511 __ not_(result, value);
4512 __ andi(result, result, ~kSmiTagMask); // Remove inverted smi-tag.
4513 break;
4514 default:
4515 UNREACHABLE();
4516 }
4517}
4518
4519LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
4520 bool opt) const {
4521 const intptr_t kNumInputs = 1;
4522 const intptr_t kNumTemps = 0;
4523 LocationSummary* summary = new (zone)
4524 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4526 summary->set_out(0, Location::RequiresFpuRegister());
4527 return summary;
4528}
4529
4530void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4531 const FRegister result = locs()->out(0).fpu_reg();
4532 const FRegister value = locs()->in(0).fpu_reg();
4533 if (representation() == kUnboxedDouble) {
4534 switch (op_kind()) {
4535 case Token::kABS:
4536 __ fabsd(result, value);
4537 break;
4538 case Token::kNEGATE:
4539 __ fnegd(result, value);
4540 break;
4541 case Token::kSQRT:
4542 __ fsqrtd(result, value);
4543 break;
4544 case Token::kSQUARE:
4545 __ fmuld(result, value, value);
4546 break;
4547 default:
4548 UNREACHABLE();
4549 }
4550 } else {
4551 ASSERT(representation() == kUnboxedFloat);
4552 switch (op_kind()) {
4553 case Token::kABS:
4554 __ fabss(result, value);
4555 break;
4556 case Token::kNEGATE:
4557 __ fnegs(result, value);
4558 break;
4559 case Token::kRECIPROCAL:
4560 __ li(TMP, 1);
4561 __ fcvtsw(FTMP, TMP);
4562 __ fdivs(result, FTMP, value);
4563 break;
4564 case Token::kRECIPROCAL_SQRT:
4565 __ li(TMP, 1);
4566 __ fcvtsw(FTMP, TMP);
4567 __ fdivs(result, FTMP, value);
4568 __ fsqrts(result, result);
4569 break;
4570 case Token::kSQRT:
4571 __ fsqrts(result, value);
4572 break;
4573 case Token::kSQUARE:
4574 __ fmuls(result, value, value);
4575 break;
4576 default:
4577 UNREACHABLE();
4578 }
4579 }
4580}
4581
4582LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone,
4583 bool opt) const {
4584 const intptr_t kNumInputs = 1;
4585 const intptr_t kNumTemps = 0;
4586 LocationSummary* result = new (zone)
4587 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4588 result->set_in(0, Location::RequiresRegister());
4590 return result;
4591}
4592
4593void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4594 const Register value = locs()->in(0).reg();
4595 const FRegister result = locs()->out(0).fpu_reg();
4596 __ fcvtdw(result, value);
4597}
4598
4599LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone,
4600 bool opt) const {
4601 const intptr_t kNumInputs = 1;
4602 const intptr_t kNumTemps = 0;
4603 LocationSummary* result = new (zone)
4604 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4605 result->set_in(0, Location::RequiresRegister());
4607 return result;
4608}
4609
4610void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4611 const Register value = locs()->in(0).reg();
4612 const FRegister result = locs()->out(0).fpu_reg();
4613 __ SmiUntag(TMP, value);
4614#if XLEN == 32
4615 __ fcvtdw(result, TMP);
4616#else
4617 __ fcvtdl(result, TMP);
4618#endif
4619}
4620
4621LocationSummary* Int64ToDoubleInstr::MakeLocationSummary(Zone* zone,
4622 bool opt) const {
4623#if XLEN == 32
4624 UNIMPLEMENTED();
4625 return nullptr;
4626#else
4627 const intptr_t kNumInputs = 1;
4628 const intptr_t kNumTemps = 0;
4629 LocationSummary* result = new (zone)
4630 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4631 result->set_in(0, Location::RequiresRegister());
4633 return result;
4634#endif
4635}
4636
4637void Int64ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4638#if XLEN == 32
4639 UNIMPLEMENTED();
4640#else
4641 const Register value = locs()->in(0).reg();
4642 const FRegister result = locs()->out(0).fpu_reg();
4643 __ fcvtdl(result, value);
4644#endif
4645}
4646
4647LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone,
4648 bool opt) const {
4649 const intptr_t kNumInputs = 1;
4650 const intptr_t kNumTemps = 0;
4651 LocationSummary* result = new (zone) LocationSummary(
4652 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
4654 result->set_out(0, Location::RequiresRegister());
4655 return result;
4656}
4657
4658void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4659 const Register result = locs()->out(0).reg();
4660 const FRegister value_double = locs()->in(0).fpu_reg();
4661
4662 DoubleToIntegerSlowPath* slow_path =
4663 new DoubleToIntegerSlowPath(this, value_double);
4664 compiler->AddSlowPathCode(slow_path);
4665
4666 RoundingMode rounding;
4667 switch (recognized_kind()) {
4668 case MethodRecognizer::kDoubleToInteger:
4669 rounding = RTZ;
4670 break;
4671 case MethodRecognizer::kDoubleFloorToInt:
4672 rounding = RDN;
4673 break;
4674 case MethodRecognizer::kDoubleCeilToInt:
4675 rounding = RUP;
4676 break;
4677 default:
4678 UNREACHABLE();
4679 }
4680
4681#if XLEN == 32
4682 __ fcvtwd(TMP, value_double, rounding);
4683#else
4684 __ fcvtld(TMP, value_double, rounding);
4685#endif
4686 // Underflow -> minint -> Smi tagging fails
4687 // Overflow, NaN -> maxint -> Smi tagging fails
4688
4689 // Check for overflow and that it fits into Smi.
4690 __ SmiTag(result, TMP);
4691 __ SmiUntag(TMP2, result);
4692 __ bne(TMP, TMP2, slow_path->entry_label());
4693 __ Bind(slow_path->exit_label());
4694}
4695
4696LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,
4697 bool opt) const {
4698 const intptr_t kNumInputs = 1;
4699 const intptr_t kNumTemps = 0;
4700 LocationSummary* result = new (zone)
4701 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4703 result->set_out(0, Location::RequiresRegister());
4704 return result;
4705}
4706
4707void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4708 compiler::Label* deopt =
4709 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptDoubleToSmi);
4710 const Register result = locs()->out(0).reg();
4711 const FRegister value = locs()->in(0).fpu_reg();
4712
4713#if XLEN == 32
4714 __ fcvtwd(TMP, value, RTZ); // Round To Zero (truncation).
4715#else
4716 __ fcvtld(TMP, value, RTZ); // Round To Zero (truncation).
4717#endif
4718 // Underflow -> minint -> Smi tagging fails
4719 // Overflow, NaN -> maxint -> Smi tagging fails
4720
4721 // Check for overflow and that it fits into Smi.
4722 __ SmiTag(result, TMP);
4723 __ SmiUntag(TMP2, result);
4724 __ bne(TMP, TMP2, deopt);
4725}
4726
4727LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone,
4728 bool opt) const {
4729 const intptr_t kNumInputs = 1;
4730 const intptr_t kNumTemps = 0;
4731 LocationSummary* result = new (zone)
4732 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4735 return result;
4736}
4737
4738void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4739 const FRegister value = locs()->in(0).fpu_reg();
4740 const FRegister result = locs()->out(0).fpu_reg();
4741 __ fcvtsd(result, value);
4742}
4743
4744LocationSummary* FloatToDoubleInstr::MakeLocationSummary(Zone* zone,
4745 bool opt) const {
4746 const intptr_t kNumInputs = 1;
4747 const intptr_t kNumTemps = 0;
4748 LocationSummary* result = new (zone)
4749 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4752 return result;
4753}
4754
4755void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4756 const FRegister value = locs()->in(0).fpu_reg();
4757 const FRegister result = locs()->out(0).fpu_reg();
4758 __ fcvtds(result, value);
4759}
4760
4761LocationSummary* FloatCompareInstr::MakeLocationSummary(Zone* zone,
4762 bool opt) const {
4763 const intptr_t kNumInputs = 2;
4764 const intptr_t kNumTemps = 0;
4765 LocationSummary* result = new (zone)
4766 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4769 result->set_out(0, Location::RequiresRegister());
4770 return result;
4771}
4772
4773void FloatCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4774 const FRegister lhs = locs()->in(0).fpu_reg();
4775 const FRegister rhs = locs()->in(1).fpu_reg();
4776 const Register result = locs()->out(0).reg();
4777
4778 switch (op_kind()) {
4779 case Token::kEQ:
4780 __ feqs(result, lhs, rhs); // lhs op rhs ? 1 : 0
4781 break;
4782 case Token::kLT:
4783 __ flts(result, lhs, rhs);
4784 break;
4785 case Token::kLTE:
4786 __ fles(result, lhs, rhs);
4787 break;
4788 case Token::kGT:
4789 __ fgts(result, lhs, rhs);
4790 break;
4791 case Token::kGTE:
4792 __ fges(result, lhs, rhs);
4793 break;
4794 default:
4795 UNREACHABLE();
4796 }
4797 __ neg(result, result); // lhs op rhs ? -1 : 0
4798}
4799
4800LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone,
4801 bool opt) const {
4802 ASSERT((InputCount() == 1) || (InputCount() == 2));
4803 const intptr_t kNumTemps = 0;
4804 LocationSummary* result = new (zone)
4805 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
4807 if (InputCount() == 2) {
4809 }
4811 return result;
4812}
4813
4814void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4815 if (compiler->intrinsic_mode()) {
4816 // Would also need to preserve CODE_REG and ARGS_DESC_REG.
4817 UNIMPLEMENTED();
4818 }
4819
4820 compiler::LeafRuntimeScope rt(compiler->assembler(),
4821 /*frame_size=*/0,
4822 /*preserve_registers=*/false);
4823 ASSERT(locs()->in(0).fpu_reg() == FA0);
4824 if (InputCount() == 2) {
4825 ASSERT(locs()->in(1).fpu_reg() == FA1);
4826 }
4827 rt.Call(TargetFunction(), InputCount());
4828 ASSERT(locs()->out(0).fpu_reg() == FA0);
4829
4830 // TODO(riscv): Special case pow?
4831}
4832
4833LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone,
4834 bool opt) const {
4835 // Only use this instruction in optimized code.
4836 ASSERT(opt);
4837 const intptr_t kNumInputs = 1;
4838 LocationSummary* summary =
4839 new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
4840 if (representation() == kUnboxedDouble) {
4841 if (index() == 0) {
4842 summary->set_in(
4844 } else {
4845 ASSERT(index() == 1);
4846 summary->set_in(
4848 }
4849 summary->set_out(0, Location::RequiresFpuRegister());
4850 } else {
4851 ASSERT(representation() == kTagged);
4852 if (index() == 0) {
4853 summary->set_in(
4855 } else {
4856 ASSERT(index() == 1);
4857 summary->set_in(
4859 }
4860 summary->set_out(0, Location::RequiresRegister());
4861 }
4862 return summary;
4863}
4864
4865void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4866 ASSERT(locs()->in(0).IsPairLocation());
4867 PairLocation* pair = locs()->in(0).AsPairLocation();
4868 Location in_loc = pair->At(index());
4869 if (representation() == kUnboxedDouble) {
4870 const FRegister out = locs()->out(0).fpu_reg();
4871 const FRegister in = in_loc.fpu_reg();
4872 __ fmvd(out, in);
4873 } else {
4874 ASSERT(representation() == kTagged);
4875 const Register out = locs()->out(0).reg();
4876 const Register in = in_loc.reg();
4877 __ mv(out, in);
4878 }
4879}
4880
4881LocationSummary* UnboxLaneInstr::MakeLocationSummary(Zone* zone,
4882 bool opt) const {
4883 const intptr_t kNumInputs = 1;
4884 LocationSummary* summary =
4885 new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
4886 summary->set_in(0, Location::RequiresRegister());
4887 switch (representation()) {
4888 case kUnboxedDouble:
4889 case kUnboxedFloat:
4890 summary->set_out(0, Location::RequiresFpuRegister());
4891 break;
4892 case kUnboxedInt32:
4893 summary->set_out(0, Location::RequiresRegister());
4894 break;
4895 default:
4896 UNREACHABLE();
4897 }
4898 return summary;
4899}
4900
4901void UnboxLaneInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4902 Register in = locs()->in(0).reg();
4903 switch (representation()) {
4904 case kUnboxedDouble:
4905 __ fld(locs()->out(0).fpu_reg(),
4906 compiler::FieldAddress(
4908 lane() * sizeof(double)));
4909 break;
4910 case kUnboxedFloat:
4911 __ flw(locs()->out(0).fpu_reg(),
4912 compiler::FieldAddress(
4914 lane() * sizeof(float)));
4915 break;
4916 case kUnboxedInt32:
4917 __ lw(
4918 locs()->out(0).reg(),
4919 compiler::FieldAddress(in, compiler::target::Int32x4::value_offset() +
4920 lane() * sizeof(int32_t)));
4921 break;
4922 default:
4923 UNREACHABLE();
4924 }
4925}
4926
4927LocationSummary* BoxLanesInstr::MakeLocationSummary(Zone* zone,
4928 bool opt) const {
4929 const intptr_t kNumInputs = InputCount();
4930 LocationSummary* summary = new (zone)
4931 LocationSummary(zone, kNumInputs, 0, LocationSummary::kCallOnSlowPath);
4932 switch (from_representation()) {
4933 case kUnboxedDouble:
4934 summary->set_in(0, Location::RequiresFpuRegister());
4935 summary->set_in(1, Location::RequiresFpuRegister());
4936 break;
4937 case kUnboxedFloat:
4938 summary->set_in(0, Location::RequiresFpuRegister());
4939 summary->set_in(1, Location::RequiresFpuRegister());
4940 summary->set_in(2, Location::RequiresFpuRegister());
4941 summary->set_in(3, Location::RequiresFpuRegister());
4942 break;
4943 case kUnboxedInt32:
4944 summary->set_in(0, Location::RequiresRegister());
4945 summary->set_in(1, Location::RequiresRegister());
4946 summary->set_in(2, Location::RequiresRegister());
4947 summary->set_in(3, Location::RequiresRegister());
4948 break;
4949 default:
4950 UNREACHABLE();
4951 }
4952 summary->set_out(0, Location::RequiresRegister());
4953 return summary;
4954}
4955
4956void BoxLanesInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4957 Register result = locs()->out(0).reg();
4958 switch (from_representation()) {
4959 case kUnboxedDouble:
4961 compiler->float64x2_class(), result, TMP);
4962 for (intptr_t i = 0; i < 2; i++) {
4963 __ fsd(locs()->in(i).fpu_reg(),
4964 compiler::FieldAddress(
4966 i * sizeof(double)));
4967 }
4968 break;
4969 case kUnboxedFloat:
4971 compiler->float32x4_class(), result, TMP);
4972 for (intptr_t i = 0; i < 4; i++) {
4973 __ fsw(locs()->in(i).fpu_reg(),
4974 compiler::FieldAddress(
4976 i * sizeof(float)));
4977 }
4978 break;
4979 case kUnboxedInt32:
4980 BoxAllocationSlowPath::Allocate(compiler, this, compiler->int32x4_class(),
4981 result, TMP);
4982 for (intptr_t i = 0; i < 4; i++) {
4983 __ sw(locs()->in(i).reg(),
4984 compiler::FieldAddress(result,
4986 i * sizeof(int32_t)));
4987 }
4988 break;
4989 default:
4990 UNREACHABLE();
4991 }
4992}
4993
4994LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
4995 bool opt) const {
4996 const intptr_t kNumInputs = 2;
4997 const intptr_t kNumTemps = 0;
4998 LocationSummary* summary = new (zone)
4999 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5000 summary->set_in(0, Location::RequiresRegister());
5001 summary->set_in(1, Location::RequiresRegister());
5002 // Output is a pair of registers.
5003 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
5005 return summary;
5006}
5007
5008void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5010 compiler::Label* deopt =
5011 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
5012 const Register left = locs()->in(0).reg();
5013 const Register right = locs()->in(1).reg();
5014 ASSERT(locs()->out(0).IsPairLocation());
5015 const PairLocation* pair = locs()->out(0).AsPairLocation();
5016 const Register result_div = pair->At(0).reg();
5017 const Register result_mod = pair->At(1).reg();
5018 if (RangeUtils::CanBeZero(divisor_range())) {
5019 // Handle divide by zero in runtime.
5020 __ beqz(right, deopt);
5021 }
5022
5023 __ SmiUntag(TMP, left);
5024 __ SmiUntag(TMP2, right);
5025
5026 // Macro-op fusion: DIV immediately before REM.
5027 __ div(result_div, TMP, TMP2);
5028 __ rem(result_mod, TMP, TMP2);
5029
5030 // Correct MOD result:
5031 // res = left % right;
5032 // if (res < 0) {
5033 // if (right < 0) {
5034 // res = res - right;
5035 // } else {
5036 // res = res + right;
5037 // }
5038 // }
5039 compiler::Label done, adjust;
5040 __ bgez(result_mod, &done, compiler::Assembler::kNearJump);
5041 // Result is negative, adjust it.
5042 if (RangeUtils::IsNegative(divisor_range())) {
5043 __ sub(result_mod, result_mod, TMP2);
5044 } else if (RangeUtils::IsPositive(divisor_range())) {
5045 __ add(result_mod, result_mod, TMP2);
5046 } else {
5047 __ bgez(right, &adjust, compiler::Assembler::kNearJump);
5048 __ sub(result_mod, result_mod, TMP2);
5050 __ Bind(&adjust);
5051 __ add(result_mod, result_mod, TMP2);
5052 }
5053 __ Bind(&done);
5054
5055 if (RangeUtils::Overlaps(divisor_range(), -1, -1)) {
5056 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
5057 // case we cannot tag the result.
5058 __ mv(TMP, result_div);
5059 __ SmiTag(result_div);
5060 __ SmiTag(result_mod);
5061 __ SmiUntag(TMP2, result_div);
5062 __ bne(TMP, TMP2, deopt);
5063 } else {
5064 __ SmiTag(result_div);
5065 __ SmiTag(result_mod);
5066 }
5067}
5068
5069// Should be kept in sync with integers.cc Multiply64Hash
5070#if XLEN == 32
5071static void EmitHashIntegerCodeSequence(FlowGraphCompiler* compiler,
5072 const Register value_lo,
5073 const Register value_hi,
5074 const Register result) {
5075 ASSERT(value_lo != TMP);
5076 ASSERT(value_lo != TMP2);
5077 ASSERT(value_hi != TMP);
5078 ASSERT(value_hi != TMP2);
5079 ASSERT(result != TMP);
5080 ASSERT(result != TMP2);
5081
5082 __ LoadImmediate(TMP, 0x2d51);
5083 // (value_hi:value_lo) * (0:TMP) =
5084 // value_lo * TMP + (value_hi * TMP) * 2^32 =
5085 // lo32(value_lo * TMP) +
5086 // (hi32(value_lo * TMP) + lo32(value_hi * TMP) * 2^32 +
5087 // hi32(value_hi * TMP) * 2^64
5088 __ mulhu(TMP2, value_lo, TMP);
5089 __ mul(result, value_lo, TMP); // (TMP2:result) = lo32 * 0x2d51
5090 __ mulhu(value_lo, value_hi, TMP);
5091 __ mul(TMP, value_hi, TMP); // (value_lo:TMP) = hi32 * 0x2d51
5092 __ add(TMP, TMP, TMP2);
5093 // (0:value_lo:TMP:result) is 128-bit product
5094 __ xor_(result, value_lo, result);
5095 __ xor_(result, TMP, result);
5096 __ AndImmediate(result, result, 0x3fffffff);
5097}
5098
5099#else
5100static void EmitHashIntegerCodeSequence(FlowGraphCompiler* compiler,
5101 const Register value,
5102 const Register result) {
5103 ASSERT(value != TMP);
5104 ASSERT(result != TMP);
5105 __ LoadImmediate(TMP, 0x2d51);
5106 __ mul(result, TMP, value);
5107 __ mulhu(TMP, TMP, value);
5108 __ xor_(result, result, TMP);
5109 __ srai(TMP, result, 32);
5110 __ xor_(result, result, TMP);
5111 __ AndImmediate(result, result, 0x3fffffff);
5112}
5113
5114#endif
5115
5116LocationSummary* HashDoubleOpInstr::MakeLocationSummary(Zone* zone,
5117 bool opt) const {
5118 const intptr_t kNumInputs = 1;
5119 const intptr_t kNumTemps = 3;
5120 LocationSummary* summary = new (zone) LocationSummary(
5121 zone, kNumInputs, kNumTemps, LocationSummary::kNativeLeafCall);
5122
5124 summary->set_temp(0, Location::RequiresRegister());
5125 summary->set_temp(1, Location::RequiresRegister());
5126 summary->set_temp(2, Location::RequiresFpuRegister());
5127#if XLEN == 32
5128 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
5130#else
5131 summary->set_out(0, Location::RequiresRegister());
5132#endif
5133 return summary;
5134}
5135
5136void HashDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5137 const FpuRegister value = locs()->in(0).fpu_reg();
5138#if XLEN == 32
5139 const PairLocation* out_pair = locs()->out(0).AsPairLocation();
5140 const Register result = out_pair->At(0).reg();
5141 const Register result_hi = out_pair->At(1).reg();
5142#else
5143 const Register result = locs()->out(0).reg();
5144#endif
5145 const Register temp = locs()->temp(0).reg();
5146 const Register temp1 = locs()->temp(1).reg();
5147 const FpuRegister temp_double = locs()->temp(2).fpu_reg();
5148
5149 compiler::Label hash_double, hash_double_value, hash_integer;
5150 compiler::Label slow_path, done;
5151 __ fclassd(temp, value);
5152 __ TestImmediate(temp, kFClassSignallingNan | kFClassQuietNan |
5154 __ BranchIf(NOT_ZERO, &hash_double_value);
5155#if XLEN == 32
5156 __ fcvtwd(temp1, value, RTZ);
5157 __ fcvtdw(temp_double, temp1);
5158#else
5159 __ fcvtld(temp1, value, RTZ);
5160 __ fcvtdl(temp_double, temp1);
5161#endif
5162 __ feqd(temp, value, temp_double);
5163 __ CompareImmediate(temp, 1);
5164 __ BranchIf(NE, &hash_double_value);
5165#if XLEN == 32
5166 // integer hash of (0:temp1)
5167 __ srai(temp, temp1, XLEN - 1); // SignFill
5168 __ Bind(&hash_integer);
5169 // integer hash of (temp, temp1)
5170 EmitHashIntegerCodeSequence(compiler, temp1, temp, result);
5171#else
5172 // integer hash of temp1
5173 __ Bind(&hash_integer);
5174 EmitHashIntegerCodeSequence(compiler, temp1, result);
5175#endif
5176 __ j(&done);
5177
5178 __ Bind(&slow_path);
5179 // double value is potentially doesn't fit into Smi range, so
5180 // do the double->int64->double via runtime call.
5181 __ StoreDToOffset(value, THR,
5183 {
5184 compiler::LeafRuntimeScope rt(compiler->assembler(), /*frame_size=*/0,
5185 /*preserve_registers=*/true);
5186 __ mv(A0, THR);
5187 // Check if double can be represented as int64, load it into (temp:EAX) if
5188 // it can.
5189 rt.Call(kTryDoubleAsIntegerRuntimeEntry, 1);
5190 __ mv(TMP, A0);
5191 }
5192#if XLEN == 32
5193 __ LoadFromOffset(temp1, THR,
5195 __ LoadFromOffset(temp, THR,
5198#else
5199 __ fmvxd(temp1, value);
5200 __ srli(temp, temp1, 32);
5201#endif
5202 __ CompareImmediate(TMP, 0);
5203 __ BranchIf(NE, &hash_integer);
5204 __ j(&hash_double);
5205
5206#if XLEN == 32
5207 __ Bind(&hash_double_value);
5208 __ StoreDToOffset(value, THR,
5210 __ LoadFromOffset(temp1, THR,
5212 __ LoadFromOffset(temp, THR,
5215#else
5216 __ Bind(&hash_double_value);
5217 __ fmvxd(temp1, value);
5218 __ srli(temp, temp1, 32);
5219#endif
5220
5221 // double hi/lo words are in (temp:temp1)
5222 __ Bind(&hash_double);
5223 __ xor_(result, temp1, temp);
5224 __ AndImmediate(result, result, compiler::target::kSmiMax);
5225
5226 __ Bind(&done);
5227#if XLEN == 32
5228 __ xor_(result_hi, result_hi, result_hi);
5229#endif
5230}
5231
5232LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
5233 bool opt) const {
5234 const intptr_t kNumInputs = 1;
5235#if XLEN == 32
5236 const intptr_t kNumTemps = 1;
5237 LocationSummary* summary = new (zone)
5238 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5239 summary->set_temp(0, Location::RequiresRegister());
5240#else
5241 const intptr_t kNumTemps = 0;
5242 LocationSummary* summary = new (zone)
5243 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5244#endif
5245 summary->set_in(0, Location::WritableRegister());
5246 summary->set_out(0, Location::RequiresRegister());
5247 return summary;
5248}
5249
5250void HashIntegerOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5251 Register result = locs()->out(0).reg();
5252 Register value = locs()->in(0).reg();
5253
5254#if XLEN == 32
5255 Register value_hi = locs()->temp(0).reg();
5256
5257 if (smi_) {
5258 __ SmiUntag(value);
5259 __ srai(value_hi, value, XLEN - 1); // SignFill
5260 } else {
5261 __ LoadFieldFromOffset(value_hi, value,
5263 __ LoadFieldFromOffset(value, value, Mint::value_offset());
5264 }
5265 EmitHashIntegerCodeSequence(compiler, value, value_hi, result);
5266#else
5267 if (smi_) {
5268 __ SmiUntag(value);
5269 } else {
5270 __ LoadFieldFromOffset(value, value, Mint::value_offset());
5271 }
5272 EmitHashIntegerCodeSequence(compiler, value, result);
5273#endif
5274 __ SmiTag(result);
5275}
5276
5277LocationSummary* BranchInstr::MakeLocationSummary(Zone* zone, bool opt) const {
5278 comparison()->InitializeLocationSummary(zone, opt);
5279 // Branches don't produce a result.
5280 comparison()->locs()->set_out(0, Location::NoLocation());
5281 return comparison()->locs();
5282}
5283
5284void BranchInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5285 comparison()->EmitBranchCode(compiler, this);
5286}
5287
5288LocationSummary* CheckClassInstr::MakeLocationSummary(Zone* zone,
5289 bool opt) const {
5290 const intptr_t kNumInputs = 1;
5291 const bool need_mask_temp = IsBitTest();
5292 const intptr_t kNumTemps = !IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0;
5293 LocationSummary* summary = new (zone)
5294 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5295 summary->set_in(0, Location::RequiresRegister());
5296 if (!IsNullCheck()) {
5297 summary->set_temp(0, Location::RequiresRegister());
5298 if (need_mask_temp) {
5299 summary->set_temp(1, Location::RequiresRegister());
5300 }
5301 }
5302 return summary;
5303}
5304
5305void CheckClassInstr::EmitNullCheck(FlowGraphCompiler* compiler,
5306 compiler::Label* deopt) {
5307 if (IsDeoptIfNull()) {
5308 __ beq(locs()->in(0).reg(), NULL_REG, deopt);
5309 } else if (IsDeoptIfNotNull()) {
5310 __ bne(locs()->in(0).reg(), NULL_REG, deopt);
5311 } else {
5312 UNREACHABLE();
5313 }
5314}
5315
5316void CheckClassInstr::EmitBitTest(FlowGraphCompiler* compiler,
5317 intptr_t min,
5318 intptr_t max,
5319 intptr_t mask,
5320 compiler::Label* deopt) {
5321 Register biased_cid = locs()->temp(0).reg();
5322 __ AddImmediate(biased_cid, -min);
5323 __ CompareImmediate(biased_cid, max - min);
5324 __ BranchIf(HI, deopt);
5325
5326 Register bit_reg = locs()->temp(1).reg();
5327 __ LoadImmediate(bit_reg, 1);
5328 __ sll(bit_reg, bit_reg, biased_cid);
5329 __ TestImmediate(bit_reg, mask);
5330 __ BranchIf(EQ, deopt);
5331}
5332
5333int CheckClassInstr::EmitCheckCid(FlowGraphCompiler* compiler,
5334 int bias,
5335 intptr_t cid_start,
5336 intptr_t cid_end,
5337 bool is_last,
5338 compiler::Label* is_ok,
5339 compiler::Label* deopt,
5340 bool use_near_jump) {
5341 Register biased_cid = locs()->temp(0).reg();
5342 Condition no_match, match;
5343 if (cid_start == cid_end) {
5344 __ CompareImmediate(biased_cid, cid_start - bias);
5345 no_match = NE;
5346 match = EQ;
5347 } else {
5348 // For class ID ranges use a subtract followed by an unsigned
5349 // comparison to check both ends of the ranges with one comparison.
5350 __ AddImmediate(biased_cid, bias - cid_start);
5351 bias = cid_start;
5352 __ CompareImmediate(biased_cid, cid_end - cid_start);
5353 no_match = HI; // Unsigned higher.
5354 match = LS; // Unsigned lower or same.
5355 }
5356 if (is_last) {
5357 __ BranchIf(no_match, deopt);
5358 } else {
5359 __ BranchIf(match, is_ok);
5360 }
5361 return bias;
5362}
5363
5364LocationSummary* CheckClassIdInstr::MakeLocationSummary(Zone* zone,
5365 bool opt) const {
5366 const intptr_t kNumInputs = 1;
5367 const intptr_t kNumTemps = 0;
5368 LocationSummary* summary = new (zone)
5369 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5370 summary->set_in(0, cids_.IsSingleCid() ? Location::RequiresRegister()
5371 : Location::WritableRegister());
5372 return summary;
5373}
5374
5375void CheckClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5376 Register value = locs()->in(0).reg();
5377 compiler::Label* deopt =
5378 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass);
5379 if (cids_.IsSingleCid()) {
5380 __ CompareImmediate(value, Smi::RawValue(cids_.cid_start));
5381 __ BranchIf(NE, deopt);
5382 } else {
5383 __ AddImmediate(value, -Smi::RawValue(cids_.cid_start));
5384 __ CompareImmediate(value, Smi::RawValue(cids_.cid_end - cids_.cid_start));
5385 __ BranchIf(HI, deopt); // Unsigned higher.
5386 }
5387}
5388
5389LocationSummary* CheckSmiInstr::MakeLocationSummary(Zone* zone,
5390 bool opt) const {
5391 const intptr_t kNumInputs = 1;
5392 const intptr_t kNumTemps = 0;
5393 LocationSummary* summary = new (zone)
5394 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5395 summary->set_in(0, Location::RequiresRegister());
5396 return summary;
5397}
5398
5399void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5400 const Register value = locs()->in(0).reg();
5401 compiler::Label* deopt =
5402 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi);
5403 __ BranchIfNotSmi(value, deopt);
5404}
5405
5406void CheckNullInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5407 ThrowErrorSlowPathCode* slow_path = new NullErrorSlowPath(this);
5408 compiler->AddSlowPathCode(slow_path);
5409
5410 Register value_reg = locs()->in(0).reg();
5411 // TODO(dartbug.com/30480): Consider passing `null` literal as an argument
5412 // in order to be able to allocate it on register.
5413 __ CompareObject(value_reg, Object::null_object());
5414 __ BranchIf(EQUAL, slow_path->entry_label());
5415}
5416
5417LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Zone* zone,
5418 bool opt) const {
5419 const intptr_t kNumInputs = 2;
5420 const intptr_t kNumTemps = 0;
5421 LocationSummary* locs = new (zone)
5422 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5424 locs->set_in(kIndexPos, LocationRegisterOrSmiConstant(index()));
5425 return locs;
5426}
5427
5428void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5429 uint32_t flags = generalized_ ? ICData::kGeneralized : 0;
5430 compiler::Label* deopt =
5431 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckArrayBound, flags);
5432
5433 Location length_loc = locs()->in(kLengthPos);
5434 Location index_loc = locs()->in(kIndexPos);
5435
5436 const intptr_t index_cid = index()->Type()->ToCid();
5437 if (length_loc.IsConstant() && index_loc.IsConstant()) {
5438 // TODO(srdjan): remove this code once failures are fixed.
5439 if ((Smi::Cast(length_loc.constant()).Value() >
5440 Smi::Cast(index_loc.constant()).Value()) &&
5441 (Smi::Cast(index_loc.constant()).Value() >= 0)) {
5442 // This CheckArrayBoundInstr should have been eliminated.
5443 return;
5444 }
5445 ASSERT((Smi::Cast(length_loc.constant()).Value() <=
5446 Smi::Cast(index_loc.constant()).Value()) ||
5447 (Smi::Cast(index_loc.constant()).Value() < 0));
5448 // Unconditionally deoptimize for constant bounds checks because they
5449 // only occur only when index is out-of-bounds.
5450 __ j(deopt);
5451 return;
5452 }
5453
5454 if (index_loc.IsConstant()) {
5455 const Register length = length_loc.reg();
5456 const Smi& index = Smi::Cast(index_loc.constant());
5457 __ CompareObject(length, index);
5458 __ BranchIf(LS, deopt);
5459 } else if (length_loc.IsConstant()) {
5460 const Smi& length = Smi::Cast(length_loc.constant());
5461 const Register index = index_loc.reg();
5462 if (index_cid != kSmiCid) {
5463 __ BranchIfNotSmi(index, deopt);
5464 }
5465 if (length.Value() == Smi::kMaxValue) {
5466 __ bltz(index, deopt);
5467 } else {
5468 __ CompareObject(index, length);
5469 __ BranchIf(CS, deopt);
5470 }
5471 } else {
5472 const Register length = length_loc.reg();
5473 const Register index = index_loc.reg();
5474 if (index_cid != kSmiCid) {
5475 __ BranchIfNotSmi(index, deopt);
5476 }
5477 __ CompareObjectRegisters(index, length);
5478 __ BranchIf(CS, deopt);
5479 }
5480}
5481
5482LocationSummary* CheckWritableInstr::MakeLocationSummary(Zone* zone,
5483 bool opt) const {
5484 const intptr_t kNumInputs = 1;
5485 const intptr_t kNumTemps = 0;
5486 LocationSummary* locs = new (zone) LocationSummary(
5487 zone, kNumInputs, kNumTemps,
5490 locs->set_in(kReceiver, Location::RequiresRegister());
5491 return locs;
5492}
5493
5494void CheckWritableInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5495 WriteErrorSlowPath* slow_path = new WriteErrorSlowPath(this);
5496 compiler->AddSlowPathCode(slow_path);
5497 __ lbu(TMP, compiler::FieldAddress(locs()->in(0).reg(),
5499 // In the first byte.
5502 __ bnez(TMP, slow_path->entry_label());
5503}
5504
5505class Int64DivideSlowPath : public ThrowErrorSlowPathCode {
5506 public:
5507 Int64DivideSlowPath(BinaryInt64OpInstr* instruction,
5508 Register divisor,
5509 Range* divisor_range,
5510 Register tmp,
5511 Register out)
5512 : ThrowErrorSlowPathCode(instruction,
5513 kIntegerDivisionByZeroExceptionRuntimeEntry),
5514 is_mod_(instruction->op_kind() == Token::kMOD),
5515 divisor_(divisor),
5516 divisor_range_(divisor_range),
5517 tmp_(tmp),
5518 out_(out),
5519 adjust_sign_label_() {}
5520
5521 void EmitNativeCode(FlowGraphCompiler* compiler) override {
5522 // Handle modulo/division by zero, if needed. Use superclass code.
5523 if (has_divide_by_zero()) {
5525 } else {
5526 __ Bind(entry_label()); // not used, but keeps destructor happy
5528 __ Comment("slow path %s operation (no throw)", name());
5529 }
5530 }
5531 // Adjust modulo for negative sign, optimized for known ranges.
5532 // if (divisor < 0)
5533 // out -= divisor;
5534 // else
5535 // out += divisor;
5536 if (has_adjust_sign()) {
5537 __ Bind(adjust_sign_label());
5538 if (RangeUtils::Overlaps(divisor_range_, -1, 1)) {
5539 // General case.
5540 compiler::Label adjust, done;
5541 __ bgez(divisor_, &adjust, compiler::Assembler::kNearJump);
5542 __ sub(out_, out_, divisor_);
5544 __ Bind(&adjust);
5545 __ add(out_, out_, divisor_);
5546 __ Bind(&done);
5547 } else if (divisor_range_->IsPositive()) {
5548 // Always positive.
5549 __ add(out_, out_, divisor_);
5550 } else {
5551 // Always negative.
5552 __ sub(out_, out_, divisor_);
5553 }
5554 __ j(exit_label());
5555 }
5556 }
5557
5558 const char* name() override { return "int64 divide"; }
5559
5560 bool has_divide_by_zero() { return RangeUtils::CanBeZero(divisor_range_); }
5561
5562 bool has_adjust_sign() { return is_mod_; }
5563
5564 bool is_needed() { return has_divide_by_zero() || has_adjust_sign(); }
5565
5566 compiler::Label* adjust_sign_label() {
5567 ASSERT(has_adjust_sign());
5568 return &adjust_sign_label_;
5569 }
5570
5571 private:
5572 bool is_mod_;
5573 Register divisor_;
5574 Range* divisor_range_;
5575 Register tmp_;
5576 Register out_;
5577 compiler::Label adjust_sign_label_;
5578};
5579
5580#if XLEN == 64
5581static void EmitInt64ModTruncDiv(FlowGraphCompiler* compiler,
5582 BinaryInt64OpInstr* instruction,
5583 Token::Kind op_kind,
5584 Register left,
5585 Register right,
5586 Register tmp,
5587 Register out) {
5588 ASSERT(op_kind == Token::kMOD || op_kind == Token::kTRUNCDIV);
5589
5590 // TODO(riscv): Is it worth copying the magic constant optimization from the
5591 // other architectures?
5592
5593 // Prepare a slow path.
5594 Range* right_range = instruction->right()->definition()->range();
5595 Int64DivideSlowPath* slow_path =
5596 new (Z) Int64DivideSlowPath(instruction, right, right_range, tmp, out);
5597
5598 // Handle modulo/division by zero exception on slow path.
5599 if (slow_path->has_divide_by_zero()) {
5600 __ beqz(right, slow_path->entry_label());
5601 }
5602
5603 // Perform actual operation
5604 // out = left % right
5605 // or
5606 // out = left / right.
5607 if (op_kind == Token::kMOD) {
5608 __ rem(out, left, right);
5609 // For the % operator, the rem instruction does not
5610 // quite do what we want. Adjust for sign on slow path.
5611 __ bltz(out, slow_path->adjust_sign_label());
5612 } else {
5613 __ div(out, left, right);
5614 }
5615
5616 if (slow_path->is_needed()) {
5617 __ Bind(slow_path->exit_label());
5618 compiler->AddSlowPathCode(slow_path);
5619 }
5620}
5621#endif
5622
5623LocationSummary* BinaryInt64OpInstr::MakeLocationSummary(Zone* zone,
5624 bool opt) const {
5625#if XLEN == 32
5626 // TODO(riscv): Allow constants for the RHS of bitwise operators if both
5627 // hi and lo components are IType immediates.
5628 const intptr_t kNumInputs = 2;
5629 const intptr_t kNumTemps = 0;
5630 LocationSummary* summary = new (zone)
5631 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5634 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
5636 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
5638 return summary;
5639#else
5640 switch (op_kind()) {
5641 case Token::kMOD:
5642 case Token::kTRUNCDIV: {
5643 const intptr_t kNumInputs = 2;
5644 const intptr_t kNumTemps = (op_kind() == Token::kMOD) ? 1 : 0;
5645 LocationSummary* summary = new (zone) LocationSummary(
5646 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
5647 summary->set_in(0, Location::RequiresRegister());
5648 summary->set_in(1, Location::RequiresRegister());
5649 summary->set_out(0, Location::RequiresRegister());
5650 if (kNumTemps == 1) {
5651 summary->set_temp(0, Location::RequiresRegister());
5652 }
5653 return summary;
5654 }
5655 default: {
5656 const intptr_t kNumInputs = 2;
5657 const intptr_t kNumTemps = 0;
5658 LocationSummary* summary = new (zone) LocationSummary(
5659 zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5660 summary->set_in(0, Location::RequiresRegister());
5661 summary->set_in(1, LocationRegisterOrConstant(right()));
5662 summary->set_out(0, Location::RequiresRegister());
5663 return summary;
5664 }
5665 }
5666#endif
5667}
5668
5669void BinaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5670#if XLEN == 32
5671 PairLocation* left_pair = locs()->in(0).AsPairLocation();
5672 Register left_lo = left_pair->At(0).reg();
5673 Register left_hi = left_pair->At(1).reg();
5674 PairLocation* right_pair = locs()->in(1).AsPairLocation();
5675 Register right_lo = right_pair->At(0).reg();
5676 Register right_hi = right_pair->At(1).reg();
5677 PairLocation* out_pair = locs()->out(0).AsPairLocation();
5678 Register out_lo = out_pair->At(0).reg();
5679 Register out_hi = out_pair->At(1).reg();
5680 ASSERT(!can_overflow());
5682
5683 switch (op_kind()) {
5684 case Token::kBIT_AND: {
5685 __ and_(out_lo, left_lo, right_lo);
5686 __ and_(out_hi, left_hi, right_hi);
5687 break;
5688 }
5689 case Token::kBIT_OR: {
5690 __ or_(out_lo, left_lo, right_lo);
5691 __ or_(out_hi, left_hi, right_hi);
5692 break;
5693 }
5694 case Token::kBIT_XOR: {
5695 __ xor_(out_lo, left_lo, right_lo);
5696 __ xor_(out_hi, left_hi, right_hi);
5697 break;
5698 }
5699 case Token::kADD: {
5700 __ add(out_hi, left_hi, right_hi);
5701 __ add(out_lo, left_lo, right_lo);
5702 __ sltu(TMP, out_lo, right_lo); // Carry
5703 __ add(out_hi, out_hi, TMP);
5704 break;
5705 }
5706 case Token::kSUB: {
5707 __ sltu(TMP, left_lo, right_lo); // Borrow
5708 __ sub(out_hi, left_hi, right_hi);
5709 __ sub(out_hi, out_hi, TMP);
5710 __ sub(out_lo, left_lo, right_lo);
5711 break;
5712 }
5713 case Token::kMUL: {
5714 // TODO(riscv): Fix ordering for macro-op fusion.
5715 __ mul(out_lo, right_lo, left_hi);
5716 __ mulhu(out_hi, right_lo, left_lo);
5717 __ add(out_lo, out_lo, out_hi);
5718 __ mul(out_hi, right_hi, left_lo);
5719 __ add(out_hi, out_hi, out_lo);
5720 __ mul(out_lo, right_lo, left_lo);
5721 break;
5722 }
5723 default:
5724 UNREACHABLE();
5725 }
5726#else
5727 ASSERT(!can_overflow());
5729
5730 const Register left = locs()->in(0).reg();
5731 const Location right = locs()->in(1);
5732 const Register out = locs()->out(0).reg();
5733
5734 if (op_kind() == Token::kMOD || op_kind() == Token::kTRUNCDIV) {
5735 Register tmp =
5736 (op_kind() == Token::kMOD) ? locs()->temp(0).reg() : kNoRegister;
5737 EmitInt64ModTruncDiv(compiler, this, op_kind(), left, right.reg(), tmp,
5738 out);
5739 return;
5740 } else if (op_kind() == Token::kMUL) {
5741 Register r = TMP;
5742 if (right.IsConstant()) {
5743 int64_t value;
5744 const bool ok = compiler::HasIntegerValue(right.constant(), &value);
5746 __ LoadImmediate(r, value);
5747 } else {
5748 r = right.reg();
5749 }
5750 __ mul(out, left, r);
5751 return;
5752 }
5753
5754 if (right.IsConstant()) {
5755 int64_t value;
5756 const bool ok = compiler::HasIntegerValue(right.constant(), &value);
5758 switch (op_kind()) {
5759 case Token::kADD:
5760 __ AddImmediate(out, left, value);
5761 break;
5762 case Token::kSUB:
5763 __ AddImmediate(out, left, -value);
5764 break;
5765 case Token::kBIT_AND:
5766 __ AndImmediate(out, left, value);
5767 break;
5768 case Token::kBIT_OR:
5769 __ OrImmediate(out, left, value);
5770 break;
5771 case Token::kBIT_XOR:
5772 __ XorImmediate(out, left, value);
5773 break;
5774 default:
5775 UNREACHABLE();
5776 }
5777 } else {
5778 switch (op_kind()) {
5779 case Token::kADD:
5780 __ add(out, left, right.reg());
5781 break;
5782 case Token::kSUB:
5783 __ sub(out, left, right.reg());
5784 break;
5785 case Token::kBIT_AND:
5786 __ and_(out, left, right.reg());
5787 break;
5788 case Token::kBIT_OR:
5789 __ or_(out, left, right.reg());
5790 break;
5791 case Token::kBIT_XOR:
5792 __ xor_(out, left, right.reg());
5793 break;
5794 default:
5795 UNREACHABLE();
5796 }
5797 }
5798#endif
5799}
5800
5801#if XLEN == 32
5802static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
5803 Token::Kind op_kind,
5804 Register out_lo,
5805 Register out_hi,
5806 Register left_lo,
5807 Register left_hi,
5808 const Object& right) {
5809 const int64_t shift = Integer::Cast(right).AsInt64Value();
5810 ASSERT(shift >= 0);
5811
5812 switch (op_kind) {
5813 case Token::kSHR: {
5814 if (shift < 32) {
5815 __ slli(out_lo, left_hi, 32 - shift);
5816 __ srli(TMP, left_lo, shift);
5817 __ or_(out_lo, out_lo, TMP);
5818 __ srai(out_hi, left_hi, shift);
5819 } else {
5820 if (shift == 32) {
5821 __ mv(out_lo, left_hi);
5822 } else if (shift < 64) {
5823 __ srai(out_lo, left_hi, shift - 32);
5824 } else {
5825 __ srai(out_lo, left_hi, 31);
5826 }
5827 __ srai(out_hi, left_hi, 31);
5828 }
5829 break;
5830 }
5831 case Token::kUSHR: {
5832 ASSERT(shift < 64);
5833 if (shift < 32) {
5834 __ slli(out_lo, left_hi, 32 - shift);
5835 __ srli(TMP, left_lo, shift);
5836 __ or_(out_lo, out_lo, TMP);
5837 __ srli(out_hi, left_hi, shift);
5838 } else {
5839 if (shift == 32) {
5840 __ mv(out_lo, left_hi);
5841 } else {
5842 __ srli(out_lo, left_hi, shift - 32);
5843 }
5844 __ li(out_hi, 0);
5845 }
5846 break;
5847 }
5848 case Token::kSHL: {
5849 ASSERT(shift >= 0);
5850 ASSERT(shift < 64);
5851 if (shift < 32) {
5852 __ srli(out_hi, left_lo, 32 - shift);
5853 __ slli(TMP, left_hi, shift);
5854 __ or_(out_hi, out_hi, TMP);
5855 __ slli(out_lo, left_lo, shift);
5856 } else {
5857 if (shift == 32) {
5858 __ mv(out_hi, left_lo);
5859 } else {
5860 __ slli(out_hi, left_lo, shift - 32);
5861 }
5862 __ li(out_lo, 0);
5863 }
5864 break;
5865 }
5866 default:
5867 UNREACHABLE();
5868 }
5869}
5870#else
5871static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
5872 Token::Kind op_kind,
5873 Register out,
5874 Register left,
5875 const Object& right) {
5876 const int64_t shift = Integer::Cast(right).AsInt64Value();
5877 ASSERT(shift >= 0);
5878 switch (op_kind) {
5879 case Token::kSHR: {
5880 __ srai(out, left, Utils::Minimum<int64_t>(shift, XLEN - 1));
5881 break;
5882 }
5883 case Token::kUSHR: {
5884 ASSERT(shift < 64);
5885 __ srli(out, left, shift);
5886 break;
5887 }
5888 case Token::kSHL: {
5889 ASSERT(shift < 64);
5890 __ slli(out, left, shift);
5891 break;
5892 }
5893 default:
5894 UNREACHABLE();
5895 }
5896}
5897#endif
5898
5899#if XLEN == 32
5900static void EmitShiftInt64ByRegister(FlowGraphCompiler* compiler,
5901 Token::Kind op_kind,
5902 Register out_lo,
5903 Register out_hi,
5904 Register left_lo,
5905 Register left_hi,
5906 Register right) {
5907 // TODO(riscv): Review.
5908 switch (op_kind) {
5909 case Token::kSHR: {
5910 compiler::Label big_shift, done;
5911 __ li(TMP, 32);
5912 __ bge(right, TMP, &big_shift, compiler::Assembler::kNearJump);
5913
5914 // 0 <= right < 32
5915 __ srl(out_lo, left_lo, right);
5916 __ sra(out_hi, left_hi, right);
5917 __ beqz(right, &done, compiler::Assembler::kNearJump);
5918 __ sub(TMP, TMP, right);
5919 __ sll(TMP2, left_hi, TMP);
5920 __ or_(out_lo, out_lo, TMP2);
5922
5923 // 32 <= right < 64
5924 __ Bind(&big_shift);
5925 __ sub(TMP, right, TMP);
5926 __ sra(out_lo, left_hi, TMP);
5927 __ srai(out_hi, left_hi, XLEN - 1); // SignFill
5928 __ Bind(&done);
5929 break;
5930 }
5931 case Token::kUSHR: {
5932 compiler::Label big_shift, done;
5933 __ li(TMP, 32);
5934 __ bge(right, TMP, &big_shift, compiler::Assembler::kNearJump);
5935
5936 // 0 <= right < 32
5937 __ srl(out_lo, left_lo, right);
5938 __ srl(out_hi, left_hi, right);
5939 __ beqz(right, &done, compiler::Assembler::kNearJump);
5940 __ sub(TMP, TMP, right);
5941 __ sll(TMP2, left_hi, TMP);
5942 __ or_(out_lo, out_lo, TMP2);
5944
5945 // 32 <= right < 64
5946 __ Bind(&big_shift);
5947 __ sub(TMP, right, TMP);
5948 __ srl(out_lo, left_hi, TMP);
5949 __ li(out_hi, 0);
5950 __ Bind(&done);
5951 break;
5952 }
5953 case Token::kSHL: {
5954 compiler::Label big_shift, done;
5955 __ li(TMP, 32);
5956 __ bge(right, TMP, &big_shift, compiler::Assembler::kNearJump);
5957
5958 // 0 <= right < 32
5959 __ sll(out_lo, left_lo, right);
5960 __ sll(out_hi, left_hi, right);
5961 __ beqz(right, &done, compiler::Assembler::kNearJump);
5962 __ sub(TMP, TMP, right);
5963 __ srl(TMP2, left_lo, TMP);
5964 __ or_(out_hi, out_hi, TMP2);
5966
5967 // 32 <= right < 64
5968 __ Bind(&big_shift);
5969 __ sub(TMP, right, TMP);
5970 __ sll(out_hi, left_lo, TMP);
5971 __ li(out_lo, 0);
5972 __ Bind(&done);
5973 break;
5974 }
5975 default:
5976 UNREACHABLE();
5977 }
5978}
5979#else
5980static void EmitShiftInt64ByRegister(FlowGraphCompiler* compiler,
5981 Token::Kind op_kind,
5982 Register out,
5983 Register left,
5984 Register right) {
5985 switch (op_kind) {
5986 case Token::kSHR: {
5987 __ sra(out, left, right);
5988 break;
5989 }
5990 case Token::kUSHR: {
5991 __ srl(out, left, right);
5992 break;
5993 }
5994 case Token::kSHL: {
5995 __ sll(out, left, right);
5996 break;
5997 }
5998 default:
5999 UNREACHABLE();
6000 }
6001}
6002#endif
6003
6004static void EmitShiftUint32ByConstant(FlowGraphCompiler* compiler,
6005 Token::Kind op_kind,
6006 Register out,
6007 Register left,
6008 const Object& right) {
6009 const int64_t shift = Integer::Cast(right).AsInt64Value();
6010 ASSERT(shift >= 0);
6011 if (shift >= 32) {
6012 __ li(out, 0);
6013 } else {
6014 switch (op_kind) {
6015 case Token::kSHR:
6016 case Token::kUSHR:
6017#if XLEN == 32
6018 __ srli(out, left, shift);
6019#else
6020 __ srliw(out, left, shift);
6021#endif
6022 break;
6023 case Token::kSHL:
6024#if XLEN == 32
6025 __ slli(out, left, shift);
6026#else
6027 __ slliw(out, left, shift);
6028#endif
6029 break;
6030 default:
6031 UNREACHABLE();
6032 }
6033 }
6034}
6035
6036static void EmitShiftUint32ByRegister(FlowGraphCompiler* compiler,
6037 Token::Kind op_kind,
6038 Register out,
6039 Register left,
6040 Register right) {
6041 switch (op_kind) {
6042 case Token::kSHR:
6043 case Token::kUSHR:
6044#if XLEN == 32
6045 __ srl(out, left, right);
6046#else
6047 __ srlw(out, left, right);
6048#endif
6049 break;
6050 case Token::kSHL:
6051#if XLEN == 32
6052 __ sll(out, left, right);
6053#else
6054 __ sllw(out, left, right);
6055#endif
6056 break;
6057 default:
6058 UNREACHABLE();
6059 }
6060}
6061
6062class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
6063 public:
6064 explicit ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction)
6065 : ThrowErrorSlowPathCode(instruction,
6066 kArgumentErrorUnboxedInt64RuntimeEntry) {}
6067
6068 const char* name() override { return "int64 shift"; }
6069
6070 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
6071#if XLEN == 32
6072 PairLocation* left_pair = instruction()->locs()->in(0).AsPairLocation();
6073 Register left_hi = left_pair->At(1).reg();
6074 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
6075 Register right_lo = right_pair->At(0).reg();
6076 Register right_hi = right_pair->At(1).reg();
6077 PairLocation* out_pair = instruction()->locs()->out(0).AsPairLocation();
6078 Register out_lo = out_pair->At(0).reg();
6079 Register out_hi = out_pair->At(1).reg();
6080
6081 compiler::Label throw_error;
6082 __ bltz(right_hi, &throw_error);
6083
6084 switch (instruction()->AsShiftInt64Op()->op_kind()) {
6085 case Token::kSHR:
6086 __ srai(out_hi, left_hi, compiler::target::kBitsPerWord - 1);
6087 __ mv(out_lo, out_hi);
6088 break;
6089 case Token::kUSHR:
6090 case Token::kSHL: {
6091 __ li(out_lo, 0);
6092 __ li(out_hi, 0);
6093 break;
6094 }
6095 default:
6096 UNREACHABLE();
6097 }
6098
6099 __ j(exit_label());
6100
6101 __ Bind(&throw_error);
6102
6103 // Can't pass unboxed int64 value directly to runtime call, as all
6104 // arguments are expected to be tagged (boxed).
6105 // The unboxed int64 argument is passed through a dedicated slot in Thread.
6106 // TODO(dartbug.com/33549): Clean this up when unboxed values
6107 // could be passed as arguments.
6108 __ StoreToOffset(right_lo, THR,
6110 __ StoreToOffset(right_hi, THR,
6113#else
6114 const Register left = instruction()->locs()->in(0).reg();
6115 const Register right = instruction()->locs()->in(1).reg();
6116 const Register out = instruction()->locs()->out(0).reg();
6117 ASSERT((out != left) && (out != right));
6118
6119 compiler::Label throw_error;
6120 __ bltz(right, &throw_error);
6121
6122 switch (instruction()->AsShiftInt64Op()->op_kind()) {
6123 case Token::kSHR:
6124 __ srai(out, left, XLEN - 1);
6125 break;
6126 case Token::kUSHR:
6127 case Token::kSHL:
6128 __ mv(out, ZR);
6129 break;
6130 default:
6131 UNREACHABLE();
6132 }
6133 __ j(exit_label());
6134
6135 __ Bind(&throw_error);
6136
6137 // Can't pass unboxed int64 value directly to runtime call, as all
6138 // arguments are expected to be tagged (boxed).
6139 // The unboxed int64 argument is passed through a dedicated slot in Thread.
6140 // TODO(dartbug.com/33549): Clean this up when unboxed values
6141 // could be passed as arguments.
6142 __ sx(right,
6143 compiler::Address(
6145#endif
6146 }
6147};
6148
6149LocationSummary* ShiftInt64OpInstr::MakeLocationSummary(Zone* zone,
6150 bool opt) const {
6151 const intptr_t kNumInputs = 2;
6152 const intptr_t kNumTemps = 0;
6153#if XLEN == 32
6154 LocationSummary* summary = new (zone) LocationSummary(
6155 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
6158 if (RangeUtils::IsPositive(shift_range()) &&
6159 right()->definition()->IsConstant()) {
6160 ConstantInstr* constant = right()->definition()->AsConstant();
6161 summary->set_in(1, Location::Constant(constant));
6162 } else {
6163 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
6165 }
6166 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6168#else
6169 LocationSummary* summary = new (zone) LocationSummary(
6170 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
6171 summary->set_in(0, Location::RequiresRegister());
6172 summary->set_in(1, RangeUtils::IsPositive(shift_range())
6174 : Location::RequiresRegister());
6175 summary->set_out(0, Location::RequiresRegister());
6176#endif
6177 return summary;
6178}
6179
6180void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6181#if XLEN == 32
6182 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6183 Register left_lo = left_pair->At(0).reg();
6184 Register left_hi = left_pair->At(1).reg();
6185 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6186 Register out_lo = out_pair->At(0).reg();
6187 Register out_hi = out_pair->At(1).reg();
6188 ASSERT(!can_overflow());
6189
6190 if (locs()->in(1).IsConstant()) {
6191 EmitShiftInt64ByConstant(compiler, op_kind(), out_lo, out_hi, left_lo,
6192 left_hi, locs()->in(1).constant());
6193 } else {
6194 // Code for a variable shift amount (or constant that throws).
6195 PairLocation* right_pair = locs()->in(1).AsPairLocation();
6196 Register right_lo = right_pair->At(0).reg();
6197 Register right_hi = right_pair->At(1).reg();
6198
6199 // Jump to a slow path if shift is larger than 63 or less than 0.
6200 ShiftInt64OpSlowPath* slow_path = nullptr;
6201 if (!IsShiftCountInRange()) {
6202 slow_path = new (Z) ShiftInt64OpSlowPath(this);
6203 compiler->AddSlowPathCode(slow_path);
6204 __ CompareImmediate(right_hi, 0);
6205 __ BranchIf(NE, slow_path->entry_label());
6206 __ CompareImmediate(right_lo, kShiftCountLimit);
6207 __ BranchIf(HI, slow_path->entry_label());
6208 }
6209
6210 EmitShiftInt64ByRegister(compiler, op_kind(), out_lo, out_hi, left_lo,
6211 left_hi, right_lo);
6212
6213 if (slow_path != nullptr) {
6214 __ Bind(slow_path->exit_label());
6215 }
6216 }
6217#else
6218 const Register left = locs()->in(0).reg();
6219 const Register out = locs()->out(0).reg();
6220 ASSERT(!can_overflow());
6221
6222 if (locs()->in(1).IsConstant()) {
6223 EmitShiftInt64ByConstant(compiler, op_kind(), out, left,
6224 locs()->in(1).constant());
6225 } else {
6226 // Code for a variable shift amount (or constant that throws).
6227 Register shift = locs()->in(1).reg();
6228
6229 // Jump to a slow path if shift is larger than 63 or less than 0.
6230 ShiftInt64OpSlowPath* slow_path = nullptr;
6231 if (!IsShiftCountInRange()) {
6232 slow_path = new (Z) ShiftInt64OpSlowPath(this);
6233 compiler->AddSlowPathCode(slow_path);
6234 __ CompareImmediate(shift, kShiftCountLimit);
6235 __ BranchIf(HI, slow_path->entry_label());
6236 }
6237
6238 EmitShiftInt64ByRegister(compiler, op_kind(), out, left, shift);
6239
6240 if (slow_path != nullptr) {
6241 __ Bind(slow_path->exit_label());
6242 }
6243 }
6244#endif
6245}
6246
6248 Zone* zone,
6249 bool opt) const {
6250 const intptr_t kNumInputs = 2;
6251 const intptr_t kNumTemps = 0;
6252 LocationSummary* summary = new (zone)
6253 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6254#if XLEN == 32
6257 summary->set_in(1, LocationWritableRegisterOrSmiConstant(right()));
6258 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6260#else
6261 summary->set_in(0, Location::RequiresRegister());
6262 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
6263 summary->set_out(0, Location::RequiresRegister());
6264#endif
6265 return summary;
6266}
6267
6269#if XLEN == 32
6270 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6271 Register left_lo = left_pair->At(0).reg();
6272 Register left_hi = left_pair->At(1).reg();
6273 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6274 Register out_lo = out_pair->At(0).reg();
6275 Register out_hi = out_pair->At(1).reg();
6276 ASSERT(!can_overflow());
6277
6278 if (locs()->in(1).IsConstant()) {
6279 EmitShiftInt64ByConstant(compiler, op_kind(), out_lo, out_hi, left_lo,
6280 left_hi, locs()->in(1).constant());
6281 } else {
6282 // Code for a variable shift amount.
6283 Register shift = locs()->in(1).reg();
6284 __ SmiUntag(shift);
6285
6286 // Deopt if shift is larger than 63 or less than 0 (or not a smi).
6287 if (!IsShiftCountInRange()) {
6289 compiler::Label* deopt =
6290 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
6291
6292 __ CompareImmediate(shift, kShiftCountLimit);
6293 __ BranchIf(HI, deopt);
6294 }
6295
6296 EmitShiftInt64ByRegister(compiler, op_kind(), out_lo, out_hi, left_lo,
6297 left_hi, shift);
6298 }
6299#else
6300 const Register left = locs()->in(0).reg();
6301 const Register out = locs()->out(0).reg();
6302 ASSERT(!can_overflow());
6303
6304 if (locs()->in(1).IsConstant()) {
6305 EmitShiftInt64ByConstant(compiler, op_kind(), out, left,
6306 locs()->in(1).constant());
6307 } else {
6308 // Code for a variable shift amount.
6309 Register shift = locs()->in(1).reg();
6310
6311 // Untag shift count.
6312 __ SmiUntag(TMP, shift);
6313 shift = TMP;
6314
6315 // Deopt if shift is larger than 63 or less than 0 (or not a smi).
6316 if (!IsShiftCountInRange()) {
6318 compiler::Label* deopt =
6319 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
6320
6321 __ CompareImmediate(shift, kShiftCountLimit);
6322 __ BranchIf(HI, deopt);
6323 }
6324
6325 EmitShiftInt64ByRegister(compiler, op_kind(), out, left, shift);
6326 }
6327#endif
6328}
6329
6330class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
6331 public:
6332 explicit ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction)
6333 : ThrowErrorSlowPathCode(instruction,
6334 kArgumentErrorUnboxedInt64RuntimeEntry) {}
6335
6336 const char* name() override { return "uint32 shift"; }
6337
6338 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
6339#if XLEN == 32
6340 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
6341 Register right_lo = right_pair->At(0).reg();
6342 Register right_hi = right_pair->At(1).reg();
6343 Register out = instruction()->locs()->out(0).reg();
6344
6345 compiler::Label throw_error;
6346 __ bltz(right_hi, &throw_error, compiler::Assembler::kNearJump);
6347 __ li(out, 0);
6348 __ j(exit_label());
6349
6350 __ Bind(&throw_error);
6351 // Can't pass unboxed int64 value directly to runtime call, as all
6352 // arguments are expected to be tagged (boxed).
6353 // The unboxed int64 argument is passed through a dedicated slot in Thread.
6354 // TODO(dartbug.com/33549): Clean this up when unboxed values
6355 // could be passed as arguments.
6356 __ StoreToOffset(right_lo, THR,
6358 __ StoreToOffset(right_hi, THR,
6361#else
6362 const Register right = instruction()->locs()->in(1).reg();
6363
6364 // Can't pass unboxed int64 value directly to runtime call, as all
6365 // arguments are expected to be tagged (boxed).
6366 // The unboxed int64 argument is passed through a dedicated slot in Thread.
6367 // TODO(dartbug.com/33549): Clean this up when unboxed values
6368 // could be passed as arguments.
6369 __ sx(right,
6370 compiler::Address(
6372#endif
6373 }
6374};
6375
6376LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
6377 bool opt) const {
6378 const intptr_t kNumInputs = 2;
6379 const intptr_t kNumTemps = 0;
6380 LocationSummary* summary = new (zone) LocationSummary(
6381 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
6382 summary->set_in(0, Location::RequiresRegister());
6383 if (RangeUtils::IsPositive(shift_range()) &&
6384 right()->definition()->IsConstant()) {
6385 ConstantInstr* constant = right()->definition()->AsConstant();
6386 summary->set_in(1, Location::Constant(constant));
6387 } else {
6388#if XLEN == 32
6389 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
6391#else
6392 summary->set_in(1, Location::RequiresRegister());
6393#endif
6394 }
6395 summary->set_out(0, Location::RequiresRegister());
6396 return summary;
6397}
6398
6399void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6400#if XLEN == 32
6401 Register left = locs()->in(0).reg();
6402 Register out = locs()->out(0).reg();
6403
6404 ASSERT(left != out);
6405
6406 if (locs()->in(1).IsConstant()) {
6407 EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
6408 locs()->in(1).constant());
6409 } else {
6410 // Code for a variable shift amount (or constant that throws).
6411 PairLocation* right_pair = locs()->in(1).AsPairLocation();
6412 Register right_lo = right_pair->At(0).reg();
6413 Register right_hi = right_pair->At(1).reg();
6414
6415 // Jump to a slow path if shift count is > 31 or negative.
6416 ShiftUint32OpSlowPath* slow_path = nullptr;
6417 if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
6418 slow_path = new (Z) ShiftUint32OpSlowPath(this);
6419 compiler->AddSlowPathCode(slow_path);
6420
6421 __ CompareImmediate(right_hi, 0);
6422 __ BranchIf(NE, slow_path->entry_label());
6423 __ CompareImmediate(right_lo, kUint32ShiftCountLimit);
6424 __ BranchIf(HI, slow_path->entry_label());
6425 }
6426
6427 EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right_lo);
6428
6429 if (slow_path != nullptr) {
6430 __ Bind(slow_path->exit_label());
6431 }
6432 }
6433#else
6434 Register left = locs()->in(0).reg();
6435 Register out = locs()->out(0).reg();
6436
6437 if (locs()->in(1).IsConstant()) {
6438 EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
6439 locs()->in(1).constant());
6440 } else {
6441 // Code for a variable shift amount (or constant that throws).
6442 const Register right = locs()->in(1).reg();
6443 const bool shift_count_in_range =
6444 IsShiftCountInRange(kUint32ShiftCountLimit);
6445
6446 // Jump to a slow path if shift count is negative.
6447 if (!shift_count_in_range) {
6448 ShiftUint32OpSlowPath* slow_path = new (Z) ShiftUint32OpSlowPath(this);
6449 compiler->AddSlowPathCode(slow_path);
6450
6451 __ bltz(right, slow_path->entry_label());
6452 }
6453
6454 EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right);
6455
6456 if (!shift_count_in_range) {
6457 // If shift value is > 31, return zero.
6458 compiler::Label done;
6459 __ CompareImmediate(right, 31);
6461 __ li(out, 0);
6462 __ Bind(&done);
6463 }
6464 }
6465#endif
6466}
6467
6469 Zone* zone,
6470 bool opt) const {
6471 const intptr_t kNumInputs = 2;
6472 const intptr_t kNumTemps = 0;
6473 LocationSummary* summary = new (zone)
6474 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6475 summary->set_in(0, Location::RequiresRegister());
6476 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
6477 summary->set_out(0, Location::RequiresRegister());
6478 return summary;
6479}
6480
6482 FlowGraphCompiler* compiler) {
6483 Register left = locs()->in(0).reg();
6484 Register out = locs()->out(0).reg();
6485
6486 if (locs()->in(1).IsConstant()) {
6487 EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
6488 locs()->in(1).constant());
6489 } else {
6490 Register right = locs()->in(1).reg();
6491 const bool shift_count_in_range =
6492 IsShiftCountInRange(kUint32ShiftCountLimit);
6493
6494 __ SmiUntag(TMP, right);
6495 right = TMP;
6496
6497 // Jump to a slow path if shift count is negative.
6498 if (!shift_count_in_range) {
6499 // Deoptimize if shift count is negative.
6501 compiler::Label* deopt =
6502 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
6503
6504 __ bltz(right, deopt);
6505 }
6506
6507 EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right);
6508
6509 if (!shift_count_in_range) {
6510 // If shift value is > 31, return zero.
6511 compiler::Label done;
6512 __ CompareImmediate(right, 31);
6514 __ li(out, 0);
6515 __ Bind(&done);
6516 }
6517 }
6518}
6519
6520LocationSummary* UnaryInt64OpInstr::MakeLocationSummary(Zone* zone,
6521 bool opt) const {
6522#if XLEN == 32
6523 const intptr_t kNumInputs = 1;
6524 const intptr_t kNumTemps = 0;
6525 LocationSummary* summary = new (zone)
6526 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6529 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6531 return summary;
6532#else
6533 const intptr_t kNumInputs = 1;
6534 const intptr_t kNumTemps = 0;
6535 LocationSummary* summary = new (zone)
6536 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6537 summary->set_in(0, Location::RequiresRegister());
6538 summary->set_out(0, Location::RequiresRegister());
6539 return summary;
6540#endif
6541}
6542
6543void UnaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6544#if XLEN == 32
6545 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6546 Register left_lo = left_pair->At(0).reg();
6547 Register left_hi = left_pair->At(1).reg();
6548
6549 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6550 Register out_lo = out_pair->At(0).reg();
6551 Register out_hi = out_pair->At(1).reg();
6552
6553 switch (op_kind()) {
6554 case Token::kBIT_NOT:
6555 __ not_(out_lo, left_lo);
6556 __ not_(out_hi, left_hi);
6557 break;
6558 case Token::kNEGATE:
6559 __ snez(TMP, left_lo); // Borrow
6560 __ neg(out_lo, left_lo);
6561 __ neg(out_hi, left_hi);
6562 __ sub(out_hi, out_hi, TMP);
6563 break;
6564 default:
6565 UNREACHABLE();
6566 }
6567#else
6568 const Register left = locs()->in(0).reg();
6569 const Register out = locs()->out(0).reg();
6570 switch (op_kind()) {
6571 case Token::kBIT_NOT:
6572 __ not_(out, left);
6573 break;
6574 case Token::kNEGATE:
6575 __ neg(out, left);
6576 break;
6577 default:
6578 UNREACHABLE();
6579 }
6580#endif
6581}
6582
6583LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone,
6584 bool opt) const {
6585 const intptr_t kNumInputs = 2;
6586 const intptr_t kNumTemps = 0;
6587 LocationSummary* summary = new (zone)
6588 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6589 summary->set_in(0, Location::RequiresRegister());
6590 summary->set_in(1, Location::RequiresRegister());
6591 summary->set_out(0, Location::RequiresRegister());
6592 return summary;
6593}
6594
6595void BinaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6596 Register left = locs()->in(0).reg();
6597 Register right = locs()->in(1).reg();
6598 Register out = locs()->out(0).reg();
6599 switch (op_kind()) {
6600 case Token::kBIT_AND:
6601 __ and_(out, left, right);
6602 break;
6603 case Token::kBIT_OR:
6604 __ or_(out, left, right);
6605 break;
6606 case Token::kBIT_XOR:
6607 __ xor_(out, left, right);
6608 break;
6609 case Token::kADD:
6610#if XLEN == 32
6611 __ add(out, left, right);
6612#elif XLEN > 32
6613 __ addw(out, left, right);
6614#endif
6615 break;
6616 case Token::kSUB:
6617#if XLEN == 32
6618 __ sub(out, left, right);
6619#elif XLEN > 32
6620 __ subw(out, left, right);
6621#endif
6622 break;
6623 case Token::kMUL:
6624 __ mul(out, left, right);
6625 break;
6626 default:
6627 UNREACHABLE();
6628 }
6629}
6630
6631LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone,
6632 bool opt) const {
6633 const intptr_t kNumInputs = 1;
6634 const intptr_t kNumTemps = 0;
6635 LocationSummary* summary = new (zone)
6636 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6637 summary->set_in(0, Location::RequiresRegister());
6638 summary->set_out(0, Location::RequiresRegister());
6639 return summary;
6640}
6641
6642void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6643 Register left = locs()->in(0).reg();
6644 Register out = locs()->out(0).reg();
6645
6646 ASSERT(op_kind() == Token::kBIT_NOT);
6647 __ not_(out, left);
6648}
6649
6650#if XLEN == 32
6651static void EmitInt32ShiftLeft(FlowGraphCompiler* compiler,
6652 BinaryInt32OpInstr* shift_left) {
6653 const LocationSummary& locs = *shift_left->locs();
6654 const Register left = locs.in(0).reg();
6655 const Register result = locs.out(0).reg();
6656 compiler::Label* deopt =
6657 shift_left->CanDeoptimize()
6658 ? compiler->AddDeoptStub(shift_left->deopt_id(),
6659 ICData::kDeoptBinarySmiOp)
6660 : nullptr;
6661 ASSERT(locs.in(1).IsConstant());
6662 const Object& constant = locs.in(1).constant();
6664 // Immediate shift operation takes 5 bits for the count.
6665 const intptr_t kCountLimit = 0x1F;
6666 const intptr_t value = compiler::target::SmiValue(constant);
6667 ASSERT((0 < value) && (value < kCountLimit));
6668 __ slli(result, left, value);
6669 if (shift_left->can_overflow()) {
6670 __ srai(TMP, result, value);
6671 __ bne(TMP, left, deopt); // Overflow.
6672 }
6673}
6674
6675LocationSummary* BinaryInt32OpInstr::MakeLocationSummary(Zone* zone,
6676 bool opt) const {
6677 const intptr_t kNumInputs = 2;
6678 // Calculate number of temporaries.
6679 intptr_t num_temps = 0;
6680 if (((op_kind() == Token::kSHL) && can_overflow()) ||
6681 (op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR) ||
6682 (op_kind() == Token::kMUL)) {
6683 num_temps = 1;
6684 }
6685 LocationSummary* summary = new (zone)
6686 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
6687 summary->set_in(0, Location::RequiresRegister());
6688 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
6689 if (num_temps == 1) {
6690 summary->set_temp(0, Location::RequiresRegister());
6691 }
6692 // We make use of 3-operand instructions by not requiring result register
6693 // to be identical to first input register as on Intel.
6694 summary->set_out(0, Location::RequiresRegister());
6695 return summary;
6696}
6697
6698void BinaryInt32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6699 if (op_kind() == Token::kSHL) {
6700 EmitInt32ShiftLeft(compiler, this);
6701 return;
6702 }
6703
6704 const Register left = locs()->in(0).reg();
6705 const Register result = locs()->out(0).reg();
6706 compiler::Label* deopt = nullptr;
6707 if (CanDeoptimize()) {
6708 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
6709 }
6710
6711 if (locs()->in(1).IsConstant()) {
6712 const Object& constant = locs()->in(1).constant();
6714 const intptr_t value = compiler::target::SmiValue(constant);
6715 switch (op_kind()) {
6716 case Token::kADD: {
6717 if (deopt == nullptr) {
6718 __ AddImmediate(result, left, value);
6719 } else {
6720 __ AddImmediateBranchOverflow(result, left, value, deopt);
6721 }
6722 break;
6723 }
6724 case Token::kSUB: {
6725 if (deopt == nullptr) {
6726 __ AddImmediate(result, left, -value);
6727 } else {
6728 // Negating value and using AddImmediateSetFlags would not detect the
6729 // overflow when value == kMinInt32.
6730 __ SubtractImmediateBranchOverflow(result, left, value, deopt);
6731 }
6732 break;
6733 }
6734 case Token::kMUL: {
6735 const Register right = locs()->temp(0).reg();
6736 __ LoadImmediate(right, value);
6737 if (deopt == nullptr) {
6738 __ mul(result, left, right);
6739 } else {
6740 __ MultiplyBranchOverflow(result, left, right, deopt);
6741 }
6742 break;
6743 }
6744 case Token::kBIT_AND: {
6745 // No overflow check.
6746 __ AndImmediate(result, left, value);
6747 break;
6748 }
6749 case Token::kBIT_OR: {
6750 // No overflow check.
6751 __ OrImmediate(result, left, value);
6752 break;
6753 }
6754 case Token::kBIT_XOR: {
6755 // No overflow check.
6756 __ XorImmediate(result, left, value);
6757 break;
6758 }
6759 case Token::kSHR: {
6760 // sarl operation masks the count to 5 bits.
6761 const intptr_t kCountLimit = 0x1F;
6762 __ srai(result, left, Utils::Minimum(value, kCountLimit));
6763 break;
6764 }
6765 case Token::kUSHR: {
6766 UNIMPLEMENTED();
6767 break;
6768 }
6769
6770 default:
6771 UNREACHABLE();
6772 break;
6773 }
6774 return;
6775 }
6776
6777 const Register right = locs()->in(1).reg();
6778 switch (op_kind()) {
6779 case Token::kADD: {
6780 if (deopt == nullptr) {
6781 __ add(result, left, right);
6782 } else {
6783 __ AddBranchOverflow(result, left, right, deopt);
6784 }
6785 break;
6786 }
6787 case Token::kSUB: {
6788 if (deopt == nullptr) {
6789 __ sub(result, left, right);
6790 } else {
6791 __ SubtractBranchOverflow(result, left, right, deopt);
6792 }
6793 break;
6794 }
6795 case Token::kMUL: {
6796 if (deopt == nullptr) {
6797 __ mul(result, left, right);
6798 } else {
6799 __ MultiplyBranchOverflow(result, left, right, deopt);
6800 }
6801 break;
6802 }
6803 case Token::kBIT_AND: {
6804 // No overflow check.
6805 __ and_(result, left, right);
6806 break;
6807 }
6808 case Token::kBIT_OR: {
6809 // No overflow check.
6810 __ or_(result, left, right);
6811 break;
6812 }
6813 case Token::kBIT_XOR: {
6814 // No overflow check.
6815 __ xor_(result, left, right);
6816 break;
6817 }
6818 default:
6819 UNREACHABLE();
6820 break;
6821 }
6822}
6823#else
6824DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryInt32OpInstr)
6825#endif
6826
6827LocationSummary* IntConverterInstr::MakeLocationSummary(Zone* zone,
6828 bool opt) const {
6829#if XLEN == 32
6830 const intptr_t kNumInputs = 1;
6831 const intptr_t kNumTemps = 0;
6832 LocationSummary* summary = new (zone)
6833 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6834 if (from() == kUntagged || to() == kUntagged) {
6835 ASSERT((from() == kUntagged && to() == kUnboxedInt32) ||
6836 (from() == kUntagged && to() == kUnboxedUint32) ||
6837 (from() == kUnboxedInt32 && to() == kUntagged) ||
6838 (from() == kUnboxedUint32 && to() == kUntagged));
6840 summary->set_in(0, Location::RequiresRegister());
6841 summary->set_out(0, Location::SameAsFirstInput());
6842 } else if (from() == kUnboxedInt64) {
6843 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6844 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
6846 summary->set_out(0, Location::RequiresRegister());
6847 } else if (to() == kUnboxedInt64) {
6848 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
6849 summary->set_in(0, Location::RequiresRegister());
6850 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6852 } else {
6853 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6854 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
6855 summary->set_in(0, Location::RequiresRegister());
6856 summary->set_out(0, Location::SameAsFirstInput());
6857 }
6858 return summary;
6859#else
6860 const intptr_t kNumInputs = 1;
6861 const intptr_t kNumTemps = 0;
6862 LocationSummary* summary = new (zone)
6863 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6864 if (from() == kUntagged || to() == kUntagged) {
6865 ASSERT((from() == kUntagged && to() == kUnboxedIntPtr) ||
6866 (from() == kUnboxedIntPtr && to() == kUntagged));
6868 } else if (from() == kUnboxedInt64) {
6869 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6870 } else if (to() == kUnboxedInt64) {
6871 ASSERT(from() == kUnboxedInt32 || from() == kUnboxedUint32);
6872 } else {
6873 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6874 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
6875 }
6876 summary->set_in(0, Location::RequiresRegister());
6877 if (CanDeoptimize()) {
6878 summary->set_out(0, Location::RequiresRegister());
6879 } else {
6880 summary->set_out(0, Location::SameAsFirstInput());
6881 }
6882 return summary;
6883#endif
6884}
6885
6886void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6887#if XLEN == 32
6888 const bool is_nop_conversion =
6889 (from() == kUntagged && to() == kUnboxedInt32) ||
6890 (from() == kUntagged && to() == kUnboxedUint32) ||
6891 (from() == kUnboxedInt32 && to() == kUntagged) ||
6892 (from() == kUnboxedUint32 && to() == kUntagged);
6893 if (is_nop_conversion) {
6894 ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
6895 return;
6896 }
6897
6898 if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
6899 const Register out = locs()->out(0).reg();
6900 // Representations are bitwise equivalent.
6901 ASSERT(out == locs()->in(0).reg());
6902 } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
6903 const Register out = locs()->out(0).reg();
6904 // Representations are bitwise equivalent.
6905 ASSERT(out == locs()->in(0).reg());
6906 if (CanDeoptimize()) {
6907 compiler::Label* deopt =
6908 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
6909 __ bltz(out, deopt);
6910 }
6911 } else if (from() == kUnboxedInt64) {
6912 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6913 PairLocation* in_pair = locs()->in(0).AsPairLocation();
6914 Register in_lo = in_pair->At(0).reg();
6915 Register in_hi = in_pair->At(1).reg();
6916 Register out = locs()->out(0).reg();
6917 // Copy low word.
6918 __ mv(out, in_lo);
6919 if (CanDeoptimize()) {
6920 compiler::Label* deopt =
6921 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
6922 ASSERT(to() == kUnboxedInt32);
6923 __ srai(TMP, in_lo, XLEN - 1);
6924 __ bne(in_hi, TMP, deopt);
6925 }
6926 } else if (from() == kUnboxedUint32 || from() == kUnboxedInt32) {
6927 ASSERT(to() == kUnboxedInt64);
6928 Register in = locs()->in(0).reg();
6929 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6930 Register out_lo = out_pair->At(0).reg();
6931 Register out_hi = out_pair->At(1).reg();
6932 // Copy low word.
6933 __ mv(out_lo, in);
6934 if (from() == kUnboxedUint32) {
6935 __ li(out_hi, 0);
6936 } else {
6937 ASSERT(from() == kUnboxedInt32);
6938 __ srai(out_hi, in, XLEN - 1);
6939 }
6940 } else {
6941 UNREACHABLE();
6942 }
6943#else
6944 ASSERT(from() != to()); // We don't convert from a representation to itself.
6945
6946 const bool is_nop_conversion =
6947 (from() == kUntagged && to() == kUnboxedIntPtr) ||
6948 (from() == kUnboxedIntPtr && to() == kUntagged);
6949 if (is_nop_conversion) {
6950 ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
6951 return;
6952 }
6953
6954 const Register value = locs()->in(0).reg();
6955 const Register out = locs()->out(0).reg();
6956 compiler::Label* deopt =
6957 !CanDeoptimize()
6958 ? nullptr
6959 : compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
6960 if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
6961 if (CanDeoptimize()) {
6962 __ slli(TMP, value, 32);
6963 __ bltz(TMP, deopt); // If sign bit is set it won't fit in a uint32.
6964 }
6965 if (out != value) {
6966 __ mv(out, value); // For positive values the bits are the same.
6967 }
6968 } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
6969 if (CanDeoptimize()) {
6970 __ slli(TMP, value, 32);
6971 __ bltz(TMP, deopt); // If high bit is set it won't fit in an int32.
6972 }
6973 if (out != value) {
6974 __ mv(out, value); // For 31 bit values the bits are the same.
6975 }
6976 } else if (from() == kUnboxedInt64) {
6977 if (to() == kUnboxedInt32) {
6978 if (is_truncating() || out != value) {
6979 __ sextw(out, value); // Signed extension 64->32.
6980 }
6981 } else {
6982 ASSERT(to() == kUnboxedUint32);
6983 if (is_truncating() || out != value) {
6984 // Unsigned extension 64->32.
6985 // TODO(riscv): Might be a shorter way to do this.
6986 __ slli(out, value, 32);
6987 __ srli(out, out, 32);
6988 }
6989 }
6990 if (CanDeoptimize()) {
6991 ASSERT(to() == kUnboxedInt32);
6992 __ CompareRegisters(out, value);
6993 __ BranchIf(NE, deopt); // Value cannot be held in Int32, deopt.
6994 }
6995 } else if (to() == kUnboxedInt64) {
6996 if (from() == kUnboxedUint32) {
6997 // TODO(riscv): Might be a shorter way to do this.
6998 __ slli(out, value, 32);
6999 __ srli(out, out, 32);
7000 } else {
7001 ASSERT(from() == kUnboxedInt32);
7002 __ sextw(out, value); // Signed extension 32->64.
7003 }
7004 } else {
7005 UNREACHABLE();
7006 }
7007#endif
7008}
7009
7010LocationSummary* BitCastInstr::MakeLocationSummary(Zone* zone, bool opt) const {
7011 LocationSummary* summary =
7012 new (zone) LocationSummary(zone, /*num_inputs=*/InputCount(),
7013 /*num_temps=*/0, LocationSummary::kNoCall);
7014 switch (from()) {
7015 case kUnboxedInt32:
7016 summary->set_in(0, Location::RequiresRegister());
7017 break;
7018 case kUnboxedInt64:
7019#if XLEN == 32
7020 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
7022#else
7023 summary->set_in(0, Location::RequiresRegister());
7024#endif
7025 break;
7026 case kUnboxedFloat:
7027 case kUnboxedDouble:
7028 summary->set_in(0, Location::RequiresFpuRegister());
7029 break;
7030 default:
7031 UNREACHABLE();
7032 }
7033
7034 switch (to()) {
7035 case kUnboxedInt32:
7036 summary->set_out(0, Location::RequiresRegister());
7037 break;
7038 case kUnboxedInt64:
7039#if XLEN == 32
7040 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
7042#else
7043 summary->set_out(0, Location::RequiresRegister());
7044#endif
7045 break;
7046 case kUnboxedFloat:
7047 case kUnboxedDouble:
7048 summary->set_out(0, Location::RequiresFpuRegister());
7049 break;
7050 default:
7051 UNREACHABLE();
7052 }
7053 return summary;
7054}
7055
7056void BitCastInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7057 switch (from()) {
7058 case kUnboxedFloat: {
7059 switch (to()) {
7060 case kUnboxedInt32: {
7061 const FpuRegister src = locs()->in(0).fpu_reg();
7062 const Register dst = locs()->out(0).reg();
7063 __ fmvxw(dst, src);
7064 break;
7065 }
7066 case kUnboxedInt64: {
7067 const FpuRegister src = locs()->in(0).fpu_reg();
7068#if XLEN == 32
7069 const Register dst0 = locs()->out(0).AsPairLocation()->At(0).reg();
7070 const Register dst1 = locs()->out(0).AsPairLocation()->At(1).reg();
7071 __ fmvxw(dst0, src);
7072 __ li(dst1, 0);
7073#else
7074 const Register dst = locs()->out(0).reg();
7075 __ fmvxw(dst, src);
7076#endif
7077 break;
7078 }
7079 default:
7080 UNREACHABLE();
7081 }
7082 break;
7083 }
7084 case kUnboxedDouble: {
7085 ASSERT(to() == kUnboxedInt64);
7086 const FpuRegister src = locs()->in(0).fpu_reg();
7087#if XLEN == 32
7088 const Register dst0 = locs()->out(0).AsPairLocation()->At(0).reg();
7089 const Register dst1 = locs()->out(0).AsPairLocation()->At(1).reg();
7090 __ subi(SP, SP, 16);
7091 __ fsd(src, compiler::Address(SP, 0));
7092 __ lw(dst0, compiler::Address(SP, 0));
7093 __ lw(dst1, compiler::Address(SP, 4));
7094 __ addi(SP, SP, 16);
7095#else
7096 const Register dst = locs()->out(0).reg();
7097 __ fmvxd(dst, src);
7098#endif
7099 break;
7100 }
7101 case kUnboxedInt64: {
7102 switch (to()) {
7103 case kUnboxedDouble: {
7104 const FpuRegister dst = locs()->out(0).fpu_reg();
7105#if XLEN == 32
7106 const Register src0 = locs()->in(0).AsPairLocation()->At(0).reg();
7107 const Register src1 = locs()->in(0).AsPairLocation()->At(1).reg();
7108 __ subi(SP, SP, 16);
7109 __ sw(src0, compiler::Address(SP, 0));
7110 __ sw(src1, compiler::Address(SP, 4));
7111 __ fld(dst, compiler::Address(SP, 0));
7112 __ addi(SP, SP, 16);
7113#else
7114 const Register src = locs()->in(0).reg();
7115 __ fmvdx(dst, src);
7116#endif
7117 break;
7118 }
7119 case kUnboxedFloat: {
7120 const FpuRegister dst = locs()->out(0).fpu_reg();
7121#if XLEN == 32
7122 const Register src0 = locs()->in(0).AsPairLocation()->At(0).reg();
7123 __ fmvwx(dst, src0);
7124#else
7125 const Register src = locs()->in(0).reg();
7126 __ fmvwx(dst, src);
7127#endif
7128 break;
7129 }
7130 default:
7131 UNREACHABLE();
7132 }
7133 break;
7134 }
7135 case kUnboxedInt32: {
7136 ASSERT(to() == kUnboxedFloat);
7137 const Register src = locs()->in(0).reg();
7138 const FpuRegister dst = locs()->out(0).fpu_reg();
7139 __ fmvwx(dst, src);
7140 break;
7141 }
7142 default:
7143 UNREACHABLE();
7144 }
7145}
7146
7147LocationSummary* StopInstr::MakeLocationSummary(Zone* zone, bool opt) const {
7148 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
7149}
7150
7151void StopInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7152 __ Stop(message());
7153}
7154
7155void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7156 BlockEntryInstr* entry = normal_entry();
7157 if (entry != nullptr) {
7158 if (!compiler->CanFallThroughTo(entry)) {
7159 FATAL("Checked function entry must have no offset");
7160 }
7161 } else {
7162 entry = osr_entry();
7163 if (!compiler->CanFallThroughTo(entry)) {
7164 __ j(compiler->GetJumpLabel(entry));
7165 }
7166 }
7167}
7168
7169LocationSummary* GotoInstr::MakeLocationSummary(Zone* zone, bool opt) const {
7170 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
7171}
7172
7173void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7174 if (!compiler->is_optimizing()) {
7175 if (FLAG_reorder_basic_blocks) {
7176 compiler->EmitEdgeCounter(block()->preorder_number());
7177 }
7178 // Add a deoptimization descriptor for deoptimizing instructions that
7179 // may be inserted before this instruction.
7180 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
7181 InstructionSource());
7182 }
7183 if (HasParallelMove()) {
7184 parallel_move()->EmitNativeCode(compiler);
7185 }
7186
7187 // We can fall through if the successor is the next block in the list.
7188 // Otherwise, we need a jump.
7189 if (!compiler->CanFallThroughTo(successor())) {
7190 __ j(compiler->GetJumpLabel(successor()));
7191 }
7192}
7193
7194LocationSummary* IndirectGotoInstr::MakeLocationSummary(Zone* zone,
7195 bool opt) const {
7196 const intptr_t kNumInputs = 1;
7197 const intptr_t kNumTemps = 2;
7198
7199 LocationSummary* summary = new (zone)
7200 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7201
7202 summary->set_in(0, Location::RequiresRegister());
7203 summary->set_temp(0, Location::RequiresRegister());
7204 summary->set_temp(1, Location::RequiresRegister());
7205
7206 return summary;
7207}
7208
7209void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7210 Register index_reg = locs()->in(0).reg();
7211 Register target_address_reg = locs()->temp(0).reg();
7212 Register offset_reg = locs()->temp(1).reg();
7213
7214 ASSERT(RequiredInputRepresentation(0) == kTagged);
7215 __ LoadObject(offset_reg, offsets_);
7216 const auto element_address = __ ElementAddressForRegIndex(
7217 /*is_external=*/false, kTypedDataInt32ArrayCid,
7218 /*index_scale=*/4,
7219 /*index_unboxed=*/false, offset_reg, index_reg, TMP);
7220 __ lw(offset_reg, element_address);
7221
7222 const intptr_t entry_offset = __ CodeSize();
7223 intx_t imm = -entry_offset;
7224 intx_t lo = ImmLo(imm);
7225 intx_t hi = ImmHi(imm);
7226 __ auipc(target_address_reg, hi);
7227 __ add(target_address_reg, target_address_reg, offset_reg);
7228 __ jr(target_address_reg, lo);
7229}
7230
7231LocationSummary* StrictCompareInstr::MakeLocationSummary(Zone* zone,
7232 bool opt) const {
7233 const intptr_t kNumInputs = 2;
7234 const intptr_t kNumTemps = 0;
7235 if (needs_number_check()) {
7236 LocationSummary* locs = new (zone)
7237 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
7241 return locs;
7242 }
7243 LocationSummary* locs = new (zone)
7244 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7246 // Only one of the inputs can be a constant. Choose register if the first one
7247 // is a constant.
7248 locs->set_in(1, locs->in(0).IsConstant()
7252 return locs;
7253}
7254
7255Condition StrictCompareInstr::EmitComparisonCodeRegConstant(
7256 FlowGraphCompiler* compiler,
7257 BranchLabels labels,
7258 Register reg,
7259 const Object& obj) {
7260 return compiler->EmitEqualityRegConstCompare(reg, obj, needs_number_check(),
7261 source(), deopt_id());
7262}
7263
7264void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7265 compiler::Label is_true, is_false;
7266 BranchLabels labels = {&is_true, &is_false, &is_false};
7267 Condition true_condition = EmitComparisonCode(compiler, labels);
7268
7269 Register result = locs()->out(0).reg();
7270 if (is_true.IsLinked() || is_false.IsLinked()) {
7271 if (true_condition != kInvalidCondition) {
7272 EmitBranchOnCondition(compiler, true_condition, labels);
7273 }
7274 compiler::Label done;
7275 __ Bind(&is_false);
7276 __ LoadObject(result, Bool::False());
7278 __ Bind(&is_true);
7279 __ LoadObject(result, Bool::True());
7280 __ Bind(&done);
7281 } else {
7282 // If EmitComparisonCode did not use the labels and just returned
7283 // a condition we can avoid the branch and use slt to generate the
7284 // offsets to true or false.
7289 __ SetIf(InvertCondition(true_condition), result);
7292 __ add(result, result, NULL_REG);
7293 }
7294}
7295
7296void ComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler,
7297 BranchInstr* branch) {
7298 BranchLabels labels = compiler->CreateBranchLabels(branch);
7299 Condition true_condition = EmitComparisonCode(compiler, labels);
7300 if (true_condition != kInvalidCondition) {
7301 EmitBranchOnCondition(compiler, true_condition, labels);
7302 }
7303}
7304
7305LocationSummary* BooleanNegateInstr::MakeLocationSummary(Zone* zone,
7306 bool opt) const {
7309}
7310
7311void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7312 const Register input = locs()->in(0).reg();
7313 const Register result = locs()->out(0).reg();
7315}
7316
7317LocationSummary* BoolToIntInstr::MakeLocationSummary(Zone* zone,
7318 bool opt) const {
7321}
7322
7323void BoolToIntInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7324 const Register input = locs()->in(0).reg();
7325 const Register result = locs()->out(0).reg();
7326 __ LoadObject(TMP, Bool::True());
7327 __ xor_(TMP, TMP, input);
7328 __ seqz(TMP, TMP);
7329 __ neg(result, TMP);
7330}
7331
7332LocationSummary* IntToBoolInstr::MakeLocationSummary(Zone* zone,
7333 bool opt) const {
7336}
7337
7338void IntToBoolInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7339 const Register input = locs()->in(0).reg();
7340 const Register result = locs()->out(0).reg();
7341 __ seqz(result, input);
7343 __ add(result, result, NULL_REG);
7345}
7346
7347LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
7348 bool opt) const {
7349 const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0;
7350 const intptr_t kNumTemps = 0;
7351 LocationSummary* locs = new (zone)
7352 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
7353 if (type_arguments() != nullptr) {
7354 locs->set_in(kTypeArgumentsPos, Location::RegisterLocation(
7356 }
7358 return locs;
7359}
7360
7361void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7362 if (type_arguments() != nullptr) {
7363 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
7364 if (type_usage_info != nullptr) {
7365 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, cls_,
7366 type_arguments()->definition());
7367 }
7368 }
7369 const Code& stub = Code::ZoneHandle(
7371 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
7372 locs(), deopt_id(), env());
7373}
7374
7375void DebugStepCheckInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7376#ifdef PRODUCT
7377 UNREACHABLE();
7378#else
7379 ASSERT(!compiler->is_optimizing());
7380 __ JumpAndLinkPatchable(StubCode::DebugStepCheck());
7381 compiler->AddCurrentDescriptor(stub_kind_, deopt_id_, source());
7382 compiler->RecordSafepoint(locs());
7383#endif
7384}
7385
7386} // namespace dart
7387
7388#endif // defined(TARGET_ARCH_RISCV)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
static void fail(const SkString &err)
Definition: DM.cpp:234
int count
Definition: FontMgrTest.cpp:50
static bool are_equal(skiatest::Reporter *reporter, const SkMatrix &a, const SkMatrix &b)
Definition: MatrixTest.cpp:50
static int float_bits(float f)
Definition: MatrixTest.cpp:44
static bool ok(int result)
static size_t element_size(Layout layout, SkSLType type)
Vec2Value v2
#define __
#define UNREACHABLE()
Definition: assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
#define Z
GLenum type
static intptr_t type_arguments_offset()
Definition: object.h:10928
static intptr_t InstanceSize()
Definition: object.h:10936
static constexpr bool IsValidLength(intptr_t len)
Definition: object.h:10932
static intptr_t length_offset()
Definition: object.h:10834
static const Bool & False()
Definition: object.h:10799
static const Bool & True()
Definition: object.h:10797
static void Allocate(FlowGraphCompiler *compiler, Instruction *instruction, const Class &cls, Register result, Register temp)
Definition: il.cc:6309
static constexpr Register kSecondReturnReg
static const Register ArgumentRegisters[]
static constexpr FpuRegister kReturnFpuReg
static constexpr Register kFfiAnyNonAbiRegister
static constexpr Register kReturnReg
static constexpr Register kSecondNonArgumentRegister
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr, TemplateInstruction, FIELD_LIST) private void EmitBitTest(FlowGraphCompiler *compiler, intptr_t min, intptr_t max, intptr_t mask, compiler::Label *deopt)
void EmitNullCheck(FlowGraphCompiler *compiler, compiler::Label *deopt)
virtual void EmitBranchCode(FlowGraphCompiler *compiler, BranchInstr *branch)
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual Condition EmitComparisonCode(FlowGraphCompiler *compiler, BranchLabels labels)=0
const Object & value() const
Definition: il.h:4230
void EmitMoveToLocation(FlowGraphCompiler *compiler, const Location &destination, Register tmp=kNoRegister, intptr_t pair_index=0)
ConstantInstr(const Object &value)
Definition: il.h:4221
bool HasZeroRepresentation() const
Definition: il.h:4234
bool IsSmi() const
Definition: il.h:4232
static intptr_t num_variables_offset()
Definition: object.h:7415
static intptr_t InstanceSize()
Definition: object.h:7448
virtual Representation representation() const
Definition: il.h:3501
static constexpr intptr_t kNone
Definition: deopt_id.h:27
static intptr_t guarded_cid_offset()
Definition: object.h:4669
@ kUnknownFixedLength
Definition: object.h:4728
@ kUnknownLengthOffset
Definition: object.h:4727
@ kNoFixedLength
Definition: object.h:4729
static intptr_t guarded_list_length_in_object_offset_offset()
Definition: object.h:4693
static intptr_t is_nullable_offset()
Definition: object.h:4766
static intptr_t guarded_list_length_offset()
Definition: object.h:4683
@ kGeneralized
Definition: object.h:2525
ComparisonInstr * comparison() const
Definition: il.h:5483
Instruction * next() const
Definition: il.h:1093
intptr_t GetDeoptId() const
Definition: il.h:1409
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Definition: il.h:1377
Environment * env() const
Definition: il.h:1215
virtual LocationSummary * MakeLocationSummary(Zone *zone, bool is_optimizing) const =0
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition: il.h:1213
void InitializeLocationSummary(Zone *zone, bool optimizing)
Definition: il.h:1202
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.h:1241
virtual intptr_t ArgumentCount() const
Definition: il.h:1041
virtual Representation representation() const
Definition: il.h:1260
bool CanDeoptimize() const
Definition: il.h:1079
friend class BlockEntryInstr
Definition: il.h:1403
LocationSummary * locs()
Definition: il.h:1192
InstructionSource source() const
Definition: il.h:1008
intptr_t deopt_id() const
Definition: il.h:993
const T * Cast() const
Definition: il.h:1186
static bool SlowPathSharingSupported(bool is_optimizing)
Definition: il.h:1368
static LocationSummary * MakeCallSummary(Zone *zone, const Instruction *instr, LocationSummary *locs=nullptr)
ObjectStore * object_store() const
Definition: isolate.h:510
static IsolateGroup * Current()
Definition: isolate.h:539
Value * index() const
Definition: il.h:3127
virtual Representation RequiredInputRepresentation(intptr_t index) const
Definition: il.h:3114
intptr_t offset() const
Definition: il.h:3129
Register base_reg() const
Definition: il.h:3128
virtual Representation representation() const
Definition: il.h:3125
const LocalVariable & local() const
Definition: il.h:5814
Location temp(intptr_t index) const
Definition: locations.h:882
Location out(intptr_t index) const
Definition: locations.h:903
static LocationSummary * Make(Zone *zone, intptr_t input_count, Location out, ContainsCall contains_call)
Definition: locations.cc:187
void set_temp(intptr_t index, Location loc)
Definition: locations.h:894
RegisterSet * live_registers()
Definition: locations.h:941
void set_out(intptr_t index, Location loc)
Definition: locations.cc:232
bool always_calls() const
Definition: locations.h:918
bool call_on_shared_slow_path() const
Definition: locations.h:928
Location in(intptr_t index) const
Definition: locations.h:866
void set_in(intptr_t index, Location loc)
Definition: locations.cc:205
static Location NoLocation()
Definition: locations.h:387
static Location SameAsFirstInput()
Definition: locations.h:382
static Location Pair(Location first, Location second)
Definition: locations.cc:271
Register reg() const
Definition: locations.h:404
static Location FpuRegisterLocation(FpuRegister reg)
Definition: locations.h:410
intptr_t stack_index() const
Definition: locations.h:485
static Location WritableRegister()
Definition: locations.h:376
bool IsConstant() const
Definition: locations.h:292
static Location RegisterLocation(Register reg)
Definition: locations.h:398
static Location Any()
Definition: locations.h:352
PairLocation * AsPairLocation() const
Definition: locations.cc:280
static Location RequiresRegister()
Definition: locations.h:365
static Location RequiresFpuRegister()
Definition: locations.h:369
FpuRegister fpu_reg() const
Definition: locations.h:416
const Object & constant() const
Definition: locations.cc:373
static Location Constant(const ConstantInstr *obj, int pair_index=0)
Definition: locations.h:294
Value * length() const
Definition: il.h:3211
bool unboxed_inputs() const
Definition: il.h:3216
Value * src_start() const
Definition: il.h:3209
void EmitLoopCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, Register length_reg, compiler::Label *done, compiler::Label *copy_forwards=nullptr)
void PrepareLengthRegForLoop(FlowGraphCompiler *compiler, Register length_reg, compiler::Label *done)
Value * dest_start() const
Definition: il.h:3210
static intptr_t value_offset()
Definition: object.h:10074
virtual Representation representation() const
Definition: il.h:3387
Value * value() const
Definition: il.h:3377
Location location() const
Definition: il.h:3374
static int ComputeArgcTag(const Function &function)
static constexpr intptr_t kVMTagOffsetFromFp
Definition: il.h:2235
static uword LinkNativeCallEntry()
static Object & Handle()
Definition: object.h:407
static Object & ZoneHandle()
Definition: object.h:419
static intptr_t data_offset()
Definition: object.h:10554
Location At(intptr_t i) const
Definition: locations.h:618
static bool IsNegative(Range *range)
static bool Overlaps(Range *range, intptr_t min, intptr_t max)
static bool OnlyLessThanOrEqualTo(Range *range, intptr_t value)
static bool IsWithin(const Range *range, int64_t min, int64_t max)
static bool IsPositive(Range *range)
static bool CanBeZero(Range *range)
intptr_t FpuRegisterCount() const
Definition: locations.h:809
void Remove(Location loc)
Definition: locations.h:766
static constexpr intptr_t kBits
Definition: object.h:9986
static SmiPtr New(intptr_t value)
Definition: object.h:10006
static constexpr intptr_t kMaxValue
Definition: object.h:9987
static intptr_t RawValue(intptr_t value)
Definition: object.h:10022
Value * value() const
Definition: il.h:5963
const LocalVariable & local() const
Definition: il.h:5962
static intptr_t length_offset()
Definition: object.h:10214
static CodePtr GetAllocationStubForClass(const Class &cls)
Definition: stub_code.cc:174
static constexpr int kNullCharCodeSymbolOffset
Definition: symbols.h:605
intptr_t ArgumentCount() const
Definition: il.h:4586
ArrayPtr GetArgumentsDescriptor() const
Definition: il.h:4617
virtual intptr_t InputCount() const
Definition: il.h:2755
@ kOsrRequest
Definition: thread.h:425
static uword stack_overflow_shared_stub_entry_point_offset(bool fpu_regs)
Definition: thread.h:453
static intptr_t stack_overflow_flags_offset()
Definition: thread.h:443
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
static bool IsEqualityOperator(Kind tok)
Definition: token.h:236
static T Abs(T x)
Definition: utils.h:49
static int32_t Low32Bits(int64_t value)
Definition: utils.h:369
static constexpr T Maximum(T x, T y)
Definition: utils.h:41
static constexpr int ShiftForPowerOfTwo(T x)
Definition: utils.h:81
static int32_t High32Bits(int64_t value)
Definition: utils.h:373
static T Minimum(T x, T y)
Definition: utils.h:36
static constexpr int CountOneBits32(uint32_t x)
Definition: utils.h:160
static bool DoublesBitEqual(const double a, const double b)
Definition: utils.h:525
static constexpr size_t HighestBit(int64_t v)
Definition: utils.h:185
static constexpr bool IsPowerOfTwo(T x)
Definition: utils.h:76
Definition * definition() const
Definition: il.h:103
void static bool EmittingComments()
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool *needs_base=nullptr)
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word OffsetOf(const dart::Field &field)
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word unboxed_runtime_arg_offset()
static word shared_field_table_values_offset()
static word field_table_values_offset()
static word exit_through_ffi_offset()
static word invoke_dart_code_stub_offset()
static word top_exit_frame_info_offset()
#define UNIMPLEMENTED
#define ASSERT(E)
#define FATAL(error)
FlutterSemanticsFlag flags
uint8_t value
GAsyncResult * result
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
int argument_count
Definition: fuchsia.cc:52
static float max(float r, float g, float b)
Definition: hsl.cpp:49
static float min(float r, float g, float b)
Definition: hsl.cpp:48
#define R(r)
#define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name)
Definition: il.h:11867
size_t length
Win32Message message
def match(bench, filt)
Definition: benchmark.py:23
const intptr_t kResultIndex
Definition: marshaller.h:28
intptr_t FrameOffsetInBytesForVariable(const LocalVariable *variable)
Definition: runtime_api.h:344
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
static constexpr word kBitsPerWord
Definition: runtime_api.h:291
static constexpr intptr_t kCompressedWordSize
Definition: runtime_api.h:286
bool IsSmi(int64_t v)
Definition: runtime_api.cc:31
constexpr word kSmiMax
Definition: runtime_api.h:305
constexpr intptr_t kSmiBits
Definition: runtime_api.h:301
word SmiValue(const dart::Object &a)
Definition: runtime_api.cc:969
FrameLayout frame_layout
Definition: stack_frame.cc:76
const Object & NullObject()
Definition: runtime_api.cc:149
constexpr OperandSize kWordBytes
bool HasIntegerValue(const dart::Object &object, int64_t *value)
Definition: runtime_api.cc:239
Definition: dart_vm.cc:33
Location LocationAnyOrConstant(Value *value)
Definition: locations.cc:357
Location LocationRegisterOrConstant(Value *value)
Definition: locations.cc:289
const Register kWriteBarrierSlotReg
const Register THR
const char *const name
static Condition InvertCondition(Condition c)
const RegList kAbiVolatileCpuRegs
static constexpr int kSavedCallerPcSlotFromFp
bool IsTypedDataBaseClassId(intptr_t index)
Definition: class_id.h:429
const Register kExceptionObjectReg
static constexpr intptr_t kFalseOffsetFromNull
const Register kWriteBarrierObjectReg
const Register NULL_REG
static constexpr intptr_t kBoolVsNullBitPosition
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition: constants.h:85
constexpr intptr_t kIntptrMin
Definition: globals.h:556
int32_t classid_t
Definition: globals.h:524
static const ClassId kLastErrorCid
Definition: class_id.h:311
static constexpr intptr_t kTrueOffsetFromNull
@ kIllegalCid
Definition: class_id.h:214
@ kNullCid
Definition: class_id.h:252
@ kDynamicCid
Definition: class_id.h:253
Representation
Definition: locations.h:66
const FpuRegister FpuTMP
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
static const ClassId kFirstErrorCid
Definition: class_id.h:310
uintptr_t uword
Definition: globals.h:501
static constexpr intptr_t kBoolValueBitPosition
const Register CODE_REG
@ UNSIGNED_GREATER
@ kInvalidCondition
@ UNSIGNED_GREATER_EQUAL
@ NOT_ZERO
@ UNSIGNED_LESS
@ NOT_EQUAL
@ UNSIGNED_LESS_EQUAL
void RegisterTypeArgumentsUse(const Function &function, TypeUsageInfo *type_usage_info, const Class &klass, Definition *type_arguments)
const Register TMP2
static constexpr int kParamEndSlotFromFp
const Register ARGS_DESC_REG
bool IsClampedTypedDataBaseClassId(intptr_t index)
Definition: class_id.h:461
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
@ kNoRegister
Definition: constants_arm.h:99
Location LocationFixedRegisterOrConstant(Value *value, Register reg)
Definition: locations.cc:339
const int kNumberOfFpuRegisters
Location LocationWritableRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
Definition: locations.cc:322
@ kFClassNegZero
@ kFClassNegSubnormal
@ kFClassPosInfinity
@ kFClassQuietNan
@ kFClassSignallingNan
@ kFClassNegNormal
@ kFClassNegInfinity
const FRegister FTMP
bool IsExternalPayloadClassId(classid_t cid)
Definition: class_id.h:472
constexpr RegList kDartAvailableCpuRegs
const Register TMP
intx_t ImmHi(intx_t imm)
const Register FPREG
static constexpr intptr_t kCompressedWordSize
Definition: globals.h:42
DEFINE_BACKEND(LoadThread,(Register out))
Definition: il.cc:8109
static constexpr int kPcMarkerSlotFromFp
const Register FUNCTION_REG
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
Definition: locations.cc:365
constexpr intptr_t kWordSize
Definition: globals.h:509
static bool IsConstant(Definition *def, int64_t *val)
Definition: loops.cc:123
static constexpr Representation kUnboxedIntPtr
Definition: locations.h:176
const Register PP
QRegister FpuRegister
constexpr bool FLAG_target_memory_sanitizer
Definition: flags.h:174
const Register kStackTraceObjectReg
static int8_t data[kExtLength]
@ kSmiTagSize
@ kHeapObjectTag
@ kSmiTagMask
@ kSmiTagShift
const RegList kAbiVolatileFpuRegs
Location LocationRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
Definition: locations.cc:297
const Register CALLEE_SAVED_TEMP2
constexpr intptr_t kBitsPerInt64
Definition: globals.h:467
const Register SPREG
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
intx_t ImmLo(intx_t imm)
Definition: __init__.py:1
dst
Definition: cp.py:12
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
SeparatedVector2 offset
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTempReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kObjectReg
static constexpr Representation NativeRepresentation(Representation rep)
Definition: il.h:8504
intptr_t first_local_from_fp
Definition: frame_layout.h:37
static constexpr intptr_t kBoolValueMask
static constexpr size_t ValueSize(Representation rep)
Definition: locations.h:112
static constexpr bool IsUnboxedInteger(Representation rep)
Definition: locations.h:92
static compiler::OperandSize OperandSize(Representation rep)
Definition: locations.cc:16
static constexpr bool IsUnboxed(Representation rep)
Definition: locations.h:101
static bool IsUnsignedInteger(Representation rep)
Definition: locations.h:126
static Representation RepresentationOfArrayElement(classid_t cid)
Definition: locations.cc:79
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kInstanceOfResultReg