Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
il_arm.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6#if defined(TARGET_ARCH_ARM)
7
9
19#include "vm/cpu.h"
20#include "vm/dart_entry.h"
21#include "vm/instructions.h"
22#include "vm/object_store.h"
23#include "vm/parser.h"
24#include "vm/simulator.h"
25#include "vm/stack_frame.h"
26#include "vm/stub_code.h"
27#include "vm/symbols.h"
29
30#define __ compiler->assembler()->
31#define Z (compiler->zone())
32
33namespace dart {
34
35// Generic summary for call instructions that have all arguments pushed
36// on the stack and return the result in a fixed location depending on
37// the return value (R0, Location::Pair(R0, R1) or Q0).
38LocationSummary* Instruction::MakeCallSummary(Zone* zone,
39 const Instruction* instr,
40 LocationSummary* locs) {
41 ASSERT(locs == nullptr || locs->always_calls());
42 LocationSummary* result =
43 ((locs == nullptr)
44 ? (new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall))
45 : locs);
46 const auto representation = instr->representation();
47 switch (representation) {
48 case kTagged:
49 case kUntagged:
50 case kUnboxedUint32:
51 case kUnboxedInt32:
52 result->set_out(
54 break;
55 case kPairOfTagged:
56 case kUnboxedInt64:
57 result->set_out(
62 break;
63 case kUnboxedDouble:
64 result->set_out(
66 break;
67 default:
69 break;
70 }
71 return result;
72}
73
74LocationSummary* LoadIndexedUnsafeInstr::MakeLocationSummary(Zone* zone,
75 bool opt) const {
76 const intptr_t kNumInputs = 1;
77 const intptr_t kNumTemps = ((representation() == kUnboxedDouble) ? 1 : 0);
78 LocationSummary* locs = new (zone)
79 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
80
81 locs->set_in(0, Location::RequiresRegister());
82 switch (representation()) {
83 case kTagged:
84 locs->set_out(0, Location::RequiresRegister());
85 break;
86 case kUnboxedInt64:
89 break;
90 case kUnboxedDouble:
91 locs->set_temp(0, Location::RequiresRegister());
92 locs->set_out(0, Location::RequiresFpuRegister());
93 break;
94 default:
96 break;
97 }
98 return locs;
99}
100
101void LoadIndexedUnsafeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
102 ASSERT(RequiredInputRepresentation(0) == kTagged); // It is a Smi.
103 ASSERT(kSmiTag == 0);
104 ASSERT(kSmiTagSize == 1);
105
106 const Register index = locs()->in(0).reg();
107
108 switch (representation()) {
109 case kTagged: {
110 const auto out = locs()->out(0).reg();
111 __ add(out, base_reg(), compiler::Operand(index, LSL, 1));
112 __ LoadFromOffset(out, out, offset());
113 break;
114 }
115 case kUnboxedInt64: {
116 const auto out_lo = locs()->out(0).AsPairLocation()->At(0).reg();
117 const auto out_hi = locs()->out(0).AsPairLocation()->At(1).reg();
118
119 __ add(out_hi, base_reg(), compiler::Operand(index, LSL, 1));
120 __ LoadFromOffset(out_lo, out_hi, offset());
121 __ LoadFromOffset(out_hi, out_hi, offset() + compiler::target::kWordSize);
122 break;
123 }
124 case kUnboxedDouble: {
125 const auto tmp = locs()->temp(0).reg();
126 const auto out = EvenDRegisterOf(locs()->out(0).fpu_reg());
127 __ add(tmp, base_reg(), compiler::Operand(index, LSL, 1));
128 __ LoadDFromOffset(out, tmp, offset());
129 break;
130 }
131 default:
132 UNREACHABLE();
133 break;
134 }
135}
136
137DEFINE_BACKEND(StoreIndexedUnsafe,
138 (NoLocation, Register index, Register value)) {
139 ASSERT(instr->RequiredInputRepresentation(
140 StoreIndexedUnsafeInstr::kIndexPos) == kTagged); // It is a Smi.
141 __ add(TMP, instr->base_reg(), compiler::Operand(index, LSL, 1));
142 __ str(value, compiler::Address(TMP, instr->offset()));
143
144 ASSERT(kSmiTag == 0);
145 ASSERT(kSmiTagSize == 1);
146}
147
148DEFINE_BACKEND(TailCall,
149 (NoLocation,
150 Fixed<Register, ARGS_DESC_REG>,
151 Temp<Register> temp)) {
152 compiler->EmitTailCallToStub(instr->code());
153
154 // Even though the TailCallInstr will be the last instruction in a basic
155 // block, the flow graph compiler will emit native code for other blocks after
156 // the one containing this instruction and needs to be able to use the pool.
157 // (The `LeaveDartFrame` above disables usages of the pool.)
158 __ set_constant_pool_allowed(true);
159}
160
161// TODO(http://dartbug.com/51229): We can use TMP for LDM/STM, which means we
162// only need one additional temporary for 8-byte moves. For 16-byte moves,
163// attempting to allocate three temporaries causes too much register pressure,
164// so just use two 8-byte sized moves there per iteration.
165static constexpr intptr_t kMaxMemoryCopyElementSize =
166 2 * compiler::target::kWordSize;
167
168static constexpr intptr_t kMemoryCopyPayloadTemps = 2;
169
170LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone,
171 bool opt) const {
172 // The compiler must optimize any function that includes a MemoryCopy
173 // instruction that uses typed data cids, since extracting the payload address
174 // from views is done in a compiler pass after all code motion has happened.
175 ASSERT((!IsTypedDataBaseClassId(src_cid_) &&
176 !IsTypedDataBaseClassId(dest_cid_)) ||
177 opt);
178 const intptr_t kNumInputs = 5;
179 const intptr_t kNumTemps =
180 kMemoryCopyPayloadTemps +
181 (element_size_ >= kMaxMemoryCopyElementSize ? 1 : 0);
182 LocationSummary* locs = new (zone)
183 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
184 locs->set_in(kSrcPos, Location::RequiresRegister());
185 locs->set_in(kDestPos, Location::RequiresRegister());
188 locs->set_in(kLengthPos,
190 for (intptr_t i = 0; i < kNumTemps; i++) {
191 locs->set_temp(i, Location::RequiresRegister());
192 }
193 return locs;
194}
195
196void MemoryCopyInstr::EmitUnrolledCopy(FlowGraphCompiler* compiler,
197 Register dest_reg,
198 Register src_reg,
199 intptr_t num_elements,
200 bool reversed) {
201 const intptr_t num_bytes = num_elements * element_size_;
202 // The amount moved in a single load/store pair.
203 const intptr_t mov_size =
204 Utils::Minimum(element_size_, kMaxMemoryCopyElementSize);
205 const intptr_t mov_repeat = num_bytes / mov_size;
206 ASSERT(num_bytes % mov_size == 0);
207 // We can use TMP for all instructions below because element_size_ is
208 // guaranteed to fit in the offset portion of the instruction in the
209 // non-LDM/STM cases.
210
211 if (mov_size == kMaxMemoryCopyElementSize) {
212 RegList temp_regs = (1 << TMP);
213 for (intptr_t i = kMemoryCopyPayloadTemps; i < locs()->temp_count(); i++) {
214 temp_regs |= 1 << locs()->temp(i).reg();
215 }
216 auto block_mode = BlockAddressMode::IA_W;
217 if (reversed) {
218 // When reversed, start the src and dest registers with the end addresses
219 // and apply the negated offset prior to indexing.
220 block_mode = BlockAddressMode::DB_W;
221 __ AddImmediate(src_reg, num_bytes);
222 __ AddImmediate(dest_reg, num_bytes);
223 }
224 for (intptr_t i = 0; i < mov_repeat; i++) {
225 __ ldm(block_mode, src_reg, temp_regs);
226 __ stm(block_mode, dest_reg, temp_regs);
227 }
228 return;
229 }
230
231 for (intptr_t i = 0; i < mov_repeat; i++) {
232 const intptr_t byte_index =
233 (reversed ? mov_repeat - (i + 1) : i) * mov_size;
234 switch (mov_size) {
235 case 1:
236 __ ldrb(TMP, compiler::Address(src_reg, byte_index));
237 __ strb(TMP, compiler::Address(dest_reg, byte_index));
238 break;
239 case 2:
240 __ ldrh(TMP, compiler::Address(src_reg, byte_index));
241 __ strh(TMP, compiler::Address(dest_reg, byte_index));
242 break;
243 case 4:
244 __ ldr(TMP, compiler::Address(src_reg, byte_index));
245 __ str(TMP, compiler::Address(dest_reg, byte_index));
246 break;
247 default:
248 UNREACHABLE();
249 }
250 }
251}
252
254 Register length_reg,
255 compiler::Label* done) {
256 __ BranchIfZero(length_reg, done);
257}
258
259static compiler::OperandSize OperandSizeFor(intptr_t bytes) {
261 switch (bytes) {
262 case 1:
264 case 2:
266 case 4:
268 case 8:
270 default:
271 UNREACHABLE();
273 }
274}
275
276static void CopyUpToWordMultiple(FlowGraphCompiler* compiler,
277 Register dest_reg,
278 Register src_reg,
279 Register length_reg,
280 intptr_t element_size,
281 bool unboxed_inputs,
282 bool reversed,
283 compiler::Label* done) {
285 if (element_size >= compiler::target::kWordSize) return;
286
287 const intptr_t element_shift = Utils::ShiftForPowerOfTwo(element_size);
288 const intptr_t base_shift =
289 (unboxed_inputs ? 0 : kSmiTagShift) - element_shift;
290 auto const mode =
292 intptr_t tested_bits = 0;
293
294 __ Comment("Copying until region is a multiple of word size");
295
296 for (intptr_t bit = compiler::target::kWordSizeLog2 - 1; bit >= element_shift;
297 bit--) {
298 const intptr_t bytes = 1 << bit;
299 const intptr_t tested_bit = bit + base_shift;
300 tested_bits |= (1 << tested_bit);
301 __ tst(length_reg, compiler::Operand(1 << tested_bit));
302 auto const sz = OperandSizeFor(bytes);
303 __ Load(TMP, compiler::Address(src_reg, bytes, mode), sz, NOT_ZERO);
304 __ Store(TMP, compiler::Address(dest_reg, bytes, mode), sz, NOT_ZERO);
305 }
306
307 __ bics(length_reg, length_reg, compiler::Operand(tested_bits));
308 __ b(done, ZERO);
309}
310
311void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
312 Register dest_reg,
313 Register src_reg,
314 Register length_reg,
315 compiler::Label* done,
316 compiler::Label* copy_forwards) {
317 const bool reversed = copy_forwards != nullptr;
318 if (reversed) {
319 // Verify that the overlap actually exists by checking to see if
320 // dest_start < src_end.
321 const intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) -
323 if (shift < 0) {
324 __ add(src_reg, src_reg, compiler::Operand(length_reg, ASR, -shift));
325 } else {
326 __ add(src_reg, src_reg, compiler::Operand(length_reg, LSL, shift));
327 }
328 __ CompareRegisters(dest_reg, src_reg);
329 // If dest_reg >= src_reg, then set src_reg back to the start of the source
330 // region before branching to the forwards-copying loop.
331 if (shift < 0) {
332 __ sub(src_reg, src_reg, compiler::Operand(length_reg, ASR, -shift),
334 } else {
335 __ sub(src_reg, src_reg, compiler::Operand(length_reg, LSL, shift),
337 }
338 __ b(copy_forwards, UNSIGNED_GREATER_EQUAL);
339 // There is overlap, so adjust dest_reg now.
340 if (shift < 0) {
341 __ add(dest_reg, dest_reg, compiler::Operand(length_reg, ASR, -shift));
342 } else {
343 __ add(dest_reg, dest_reg, compiler::Operand(length_reg, LSL, shift));
344 }
345 }
346 // We can use TMP for all instructions below because element_size_ is
347 // guaranteed to fit in the offset portion of the instruction in the
348 // non-LDM/STM cases.
349 CopyUpToWordMultiple(compiler, dest_reg, src_reg, length_reg, element_size_,
350 unboxed_inputs_, reversed, done);
351 // When reversed, the src and dest registers have been adjusted to start at
352 // the end addresses, so apply the negated offset prior to indexing.
353 const auto load_mode =
355 const auto load_multiple_mode =
357 // The size of the uncopied region is a multiple of the word size, so now we
358 // copy the rest by word (unless the element size is larger).
359 const intptr_t loop_subtract =
360 Utils::Maximum<intptr_t>(1, compiler::target::kWordSize / element_size_)
361 << (unboxed_inputs_ ? 0 : kSmiTagShift);
362 // Used only for LDM/STM below.
363 RegList temp_regs = (1 << TMP);
364 for (intptr_t i = kMemoryCopyPayloadTemps; i < locs()->temp_count(); i++) {
365 temp_regs |= 1 << locs()->temp(i).reg();
366 }
367 __ Comment("Copying by multiples of word size");
368 compiler::Label loop;
369 __ Bind(&loop);
370 switch (element_size_) {
371 // Fall through for the sizes smaller than compiler::target::kWordSize.
372 case 1:
373 case 2:
374 case 4:
375 __ ldr(TMP, compiler::Address(src_reg, 4, load_mode));
376 __ str(TMP, compiler::Address(dest_reg, 4, load_mode));
377 break;
378 case 8:
379 COMPILE_ASSERT(8 == kMaxMemoryCopyElementSize);
381 __ ldm(load_multiple_mode, src_reg, temp_regs);
382 __ stm(load_multiple_mode, dest_reg, temp_regs);
383 break;
384 case 16:
385 COMPILE_ASSERT(16 > kMaxMemoryCopyElementSize);
387 __ ldm(load_multiple_mode, src_reg, temp_regs);
388 __ stm(load_multiple_mode, dest_reg, temp_regs);
389 __ ldm(load_multiple_mode, src_reg, temp_regs);
390 __ stm(load_multiple_mode, dest_reg, temp_regs);
391 break;
392 default:
393 UNREACHABLE();
394 break;
395 }
396 __ subs(length_reg, length_reg, compiler::Operand(loop_subtract));
397 __ b(&loop, NOT_ZERO);
398}
399
400void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
401 classid_t array_cid,
402 Register array_reg,
403 Register payload_reg,
404 Representation array_rep,
405 Location start_loc) {
406 intptr_t offset = 0;
407 if (array_rep != kTagged) {
408 // Do nothing, array_reg already contains the payload address.
409 } else if (IsTypedDataBaseClassId(array_cid)) {
410 // The incoming array must have been proven to be an internal typed data
411 // object, where the payload is in the object and we can just offset.
412 ASSERT_EQUAL(array_rep, kTagged);
413 offset = compiler::target::TypedData::payload_offset() - kHeapObjectTag;
414 } else {
415 ASSERT_EQUAL(array_rep, kTagged);
416 ASSERT(!IsExternalPayloadClassId(array_cid));
417 switch (array_cid) {
418 case kOneByteStringCid:
419 offset =
420 compiler::target::OneByteString::data_offset() - kHeapObjectTag;
421 break;
422 case kTwoByteStringCid:
423 offset =
424 compiler::target::TwoByteString::data_offset() - kHeapObjectTag;
425 break;
426 default:
427 UNREACHABLE();
428 break;
429 }
430 }
431 ASSERT(start_loc.IsRegister() || start_loc.IsConstant());
432 if (start_loc.IsConstant()) {
433 const auto& constant = start_loc.constant();
434 ASSERT(constant.IsInteger());
435 const int64_t start_value = Integer::Cast(constant).AsInt64Value();
436 const intptr_t add_value = Utils::AddWithWrapAround(
437 Utils::MulWithWrapAround<intptr_t>(start_value, element_size_), offset);
438 __ AddImmediate(payload_reg, array_reg, add_value);
439 return;
440 }
441 const Register start_reg = start_loc.reg();
442 intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) -
444 if (shift < 0) {
445 __ add(payload_reg, array_reg, compiler::Operand(start_reg, ASR, -shift));
446 } else {
447 __ add(payload_reg, array_reg, compiler::Operand(start_reg, LSL, shift));
448 }
449 __ AddImmediate(payload_reg, offset);
450}
451
452LocationSummary* CalculateElementAddressInstr::MakeLocationSummary(
453 Zone* zone,
454 bool opt) const {
455 const intptr_t kNumInputs = 3;
456 const intptr_t kNumTemps = 0;
457 auto* const summary = new (zone)
458 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
459
460 summary->set_in(kBasePos, Location::RequiresRegister());
461 summary->set_in(kIndexPos, Location::RequiresRegister());
462 // Only use a Smi constant for the index if multiplying it by the index
463 // scale would be an int32 constant.
464 const intptr_t scale_shift = Utils::ShiftForPowerOfTwo(index_scale());
466 index(), kMinInt32 >> scale_shift,
467 kMaxInt32 >> scale_shift));
469 summary->set_out(0, Location::RequiresRegister());
470
471 return summary;
472}
473
474void CalculateElementAddressInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
475 const Register base_reg = locs()->in(kBasePos).reg();
476 const Location& index_loc = locs()->in(kIndexPos);
477 const Location& offset_loc = locs()->in(kOffsetPos);
478 const Register result_reg = locs()->out(0).reg();
479
480 if (index_loc.IsConstant()) {
481 if (offset_loc.IsConstant()) {
482 ASSERT_EQUAL(Smi::Cast(index_loc.constant()).Value(), 0);
483 ASSERT(Smi::Cast(offset_loc.constant()).Value() != 0);
484 // No index involved at all.
485 const int32_t offset_value = Smi::Cast(offset_loc.constant()).Value();
486 __ AddImmediate(result_reg, base_reg, offset_value);
487 } else {
488 __ add(result_reg, base_reg, compiler::Operand(offset_loc.reg()));
489 // Don't need wrap-around as the index is constant only if multiplying
490 // it by the scale is an int32.
491 const int32_t scaled_index =
492 Smi::Cast(index_loc.constant()).Value() * index_scale();
493 __ AddImmediate(result_reg, scaled_index);
494 }
495 } else {
496 __ add(result_reg, base_reg,
497 compiler::Operand(index_loc.reg(), LSL,
499 if (offset_loc.IsConstant()) {
500 const int32_t offset_value = Smi::Cast(offset_loc.constant()).Value();
501 __ AddImmediate(result_reg, offset_value);
502 } else {
503 __ AddRegisters(result_reg, offset_loc.reg());
504 }
505 }
506}
507
508LocationSummary* MoveArgumentInstr::MakeLocationSummary(Zone* zone,
509 bool opt) const {
510 const intptr_t kNumInputs = 1;
511 const intptr_t kNumTemps = 0;
512 LocationSummary* locs = new (zone)
513 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
514 if (representation() == kUnboxedDouble) {
515 locs->set_in(0, Location::RequiresFpuRegister());
516 } else if (representation() == kUnboxedInt64) {
519 } else {
520 locs->set_in(0, LocationAnyOrConstant(value()));
521 }
522 return locs;
523}
524
525// Buffers registers to use STMDB in order to store
526// multiple registers at once.
527class ArgumentsMover : public ValueObject {
528 public:
529 // Flush all buffered registers.
530 void Flush(FlowGraphCompiler* compiler) {
531 if (pending_regs_ != 0) {
532 if (is_single_register_) {
533 __ StoreToOffset(
534 lowest_register_, SP,
535 lowest_register_sp_relative_index_ * compiler::target::kWordSize);
536 } else {
537 if (lowest_register_sp_relative_index_ == 0) {
538 __ stm(IA, SP, pending_regs_);
539 } else {
540 intptr_t offset =
541 lowest_register_sp_relative_index_ * compiler::target::kWordSize;
542 for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) {
543 if (((1 << reg) & pending_regs_) != 0) {
544 __ StoreToOffset(static_cast<Register>(reg), SP, offset);
545 offset += compiler::target::kWordSize;
546 }
547 }
548 }
549 }
550 pending_regs_ = 0;
551 lowest_register_ = kNoRegister;
552 is_single_register_ = false;
553 }
554 }
555
556 // Buffer given register. May push previously buffered registers if needed.
557 void MoveRegister(FlowGraphCompiler* compiler,
558 intptr_t sp_relative_index,
559 Register reg) {
560 if (pending_regs_ != 0) {
561 ASSERT(lowest_register_ != kNoRegister);
562 // STMDB pushes higher registers first, so we can only buffer
563 // lower registers.
564 if (reg < lowest_register_) {
565 ASSERT((sp_relative_index + 1) == lowest_register_sp_relative_index_);
566 pending_regs_ |= (1 << reg);
567 lowest_register_ = reg;
568 is_single_register_ = false;
569 lowest_register_sp_relative_index_ = sp_relative_index;
570 return;
571 }
573 }
574 pending_regs_ = (1 << reg);
575 lowest_register_ = reg;
576 is_single_register_ = true;
577 lowest_register_sp_relative_index_ = sp_relative_index;
578 }
579
580 // Return a register which can be used to hold a value of an argument.
581 Register FindFreeRegister(FlowGraphCompiler* compiler,
582 Instruction* move_arg) {
583 // Dart calling conventions do not have callee-save registers,
584 // so arguments pushing can clobber all allocatable registers
585 // except registers used in arguments which were not pushed yet,
586 // as well as ParallelMove and inputs of a call instruction.
587 intptr_t busy = kReservedCpuRegisters;
588 for (Instruction* instr = move_arg;; instr = instr->next()) {
589 ASSERT(instr != nullptr);
590 if (ParallelMoveInstr* parallel_move = instr->AsParallelMove()) {
591 for (intptr_t i = 0, n = parallel_move->NumMoves(); i < n; ++i) {
592 const auto src_loc = parallel_move->MoveOperandsAt(i)->src();
593 if (src_loc.IsRegister()) {
594 busy |= (1 << src_loc.reg());
595 } else if (src_loc.IsPairLocation()) {
596 busy |= (1 << src_loc.AsPairLocation()->At(0).reg());
597 busy |= (1 << src_loc.AsPairLocation()->At(1).reg());
598 }
599 }
600 } else {
601 ASSERT(instr->IsMoveArgument() || (instr->ArgumentCount() > 0));
602 for (intptr_t i = 0, n = instr->locs()->input_count(); i < n; ++i) {
603 const auto in_loc = instr->locs()->in(i);
604 if (in_loc.IsRegister()) {
605 busy |= (1 << in_loc.reg());
606 } else if (in_loc.IsPairLocation()) {
607 const auto pair_location = in_loc.AsPairLocation();
608 busy |= (1 << pair_location->At(0).reg());
609 busy |= (1 << pair_location->At(1).reg());
610 }
611 }
612 if (instr->ArgumentCount() > 0) {
613 break;
614 }
615 }
616 }
617 if (pending_regs_ != 0) {
618 // Find the highest available register which can be pushed along with
619 // pending registers.
620 Register reg = HighestAvailableRegister(busy, lowest_register_);
621 if (reg != kNoRegister) {
622 return reg;
623 }
625 }
626 // At this point there are no pending buffered registers.
627 // Use LR as it's the highest free register, it is not allocatable and
628 // it is clobbered by the call.
629 CLOBBERS_LR({
630 static_assert(((1 << LR) & kDartAvailableCpuRegs) == 0,
631 "LR should not be allocatable");
632 return LR;
633 });
634 }
635
636 private:
637 RegList pending_regs_ = 0;
638 Register lowest_register_ = kNoRegister;
639 intptr_t lowest_register_sp_relative_index_ = -1;
640 bool is_single_register_ = false;
641
642 Register HighestAvailableRegister(intptr_t busy, Register upper_bound) {
643 for (intptr_t i = upper_bound - 1; i >= 0; --i) {
644 if ((busy & (1 << i)) == 0) {
645 return static_cast<Register>(i);
646 }
647 }
648 return kNoRegister;
649 }
650};
651
652void MoveArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
653 ASSERT(compiler->is_optimizing());
654 if (previous()->IsMoveArgument()) {
655 // Already generated by the first MoveArgument in the chain.
656 return;
657 }
658
659 ArgumentsMover pusher;
660 for (MoveArgumentInstr* move_arg = this; move_arg != nullptr;
661 move_arg = move_arg->next()->AsMoveArgument()) {
662 const Location value = move_arg->locs()->in(0);
663 if (value.IsRegister()) {
664 pusher.MoveRegister(compiler, move_arg->location().stack_index(),
665 value.reg());
666 } else if (value.IsPairLocation()) {
667 RELEASE_ASSERT(move_arg->location().IsPairLocation());
668 auto pair = move_arg->location().AsPairLocation();
669 RELEASE_ASSERT(pair->At(0).IsStackSlot());
670 RELEASE_ASSERT(pair->At(1).IsStackSlot());
671 pusher.MoveRegister(compiler, pair->At(1).stack_index(),
672 value.AsPairLocation()->At(1).reg());
673 pusher.MoveRegister(compiler, pair->At(0).stack_index(),
674 value.AsPairLocation()->At(0).reg());
675 } else if (value.IsFpuRegister()) {
676 pusher.Flush(compiler);
677 __ StoreDToOffset(
678 EvenDRegisterOf(value.fpu_reg()), SP,
679 move_arg->location().stack_index() * compiler::target::kWordSize);
680 } else {
681 const Register reg = pusher.FindFreeRegister(compiler, move_arg);
682 ASSERT(reg != kNoRegister);
683 if (value.IsConstant()) {
684 __ LoadObject(reg, value.constant());
685 } else {
686 ASSERT(value.IsStackSlot());
687 const intptr_t value_offset = value.ToStackSlotOffset();
688 __ LoadFromOffset(reg, value.base_reg(), value_offset);
689 }
690 pusher.MoveRegister(compiler, move_arg->location().stack_index(), reg);
691 }
692 }
693 pusher.Flush(compiler);
694}
695
696LocationSummary* DartReturnInstr::MakeLocationSummary(Zone* zone,
697 bool opt) const {
698 const intptr_t kNumInputs = 1;
699 const intptr_t kNumTemps = 0;
700 LocationSummary* locs = new (zone)
701 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
702 switch (representation()) {
703 case kTagged:
704 locs->set_in(0,
706 break;
707 case kPairOfTagged:
708 case kUnboxedInt64:
709 locs->set_in(
714 break;
715 case kUnboxedDouble:
716 locs->set_in(
718 break;
719 default:
720 UNREACHABLE();
721 break;
722 }
723 return locs;
724}
725
726// Attempt optimized compilation at return instruction instead of at the entry.
727// The entry needs to be patchable, no inlined objects are allowed in the area
728// that will be overwritten by the patch instructions: a branch macro sequence.
729void DartReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
730 if (locs()->in(0).IsRegister()) {
731 const Register result = locs()->in(0).reg();
733 } else if (locs()->in(0).IsPairLocation()) {
734 const Register result_lo = locs()->in(0).AsPairLocation()->At(0).reg();
735 const Register result_hi = locs()->in(0).AsPairLocation()->At(1).reg();
738 } else {
739 ASSERT(locs()->in(0).IsFpuRegister());
740 const FpuRegister result = locs()->in(0).fpu_reg();
742 }
743
744 if (compiler->parsed_function().function().IsAsyncFunction() ||
745 compiler->parsed_function().function().IsAsyncGenerator()) {
746 ASSERT(compiler->flow_graph().graph_entry()->NeedsFrame());
747 const Code& stub = GetReturnStub(compiler);
748 compiler->EmitJumpToStub(stub);
749 return;
750 }
751
752 if (!compiler->flow_graph().graph_entry()->NeedsFrame()) {
753 __ Ret();
754 return;
755 }
756
757#if defined(DEBUG)
758 compiler::Label stack_ok;
759 __ Comment("Stack Check");
760 const intptr_t fp_sp_dist =
761 (compiler::target::frame_layout.first_local_from_fp + 1 -
762 compiler->StackSize()) *
763 compiler::target::kWordSize;
764 ASSERT(fp_sp_dist <= 0);
765 __ sub(R2, SP, compiler::Operand(FP));
766 __ CompareImmediate(R2, fp_sp_dist);
767 __ b(&stack_ok, EQ);
768 __ bkpt(0);
769 __ Bind(&stack_ok);
770#endif
771 ASSERT(__ constant_pool_allowed());
772 __ LeaveDartFrameAndReturn(); // Disallows constant pool use.
773 // This DartReturnInstr may be emitted out of order by the optimizer. The next
774 // block may be a target expecting a properly set constant pool pointer.
775 __ set_constant_pool_allowed(true);
776}
777
778// Detect pattern when one value is zero and another is a power of 2.
779static bool IsPowerOfTwoKind(intptr_t v1, intptr_t v2) {
780 return (Utils::IsPowerOfTwo(v1) && (v2 == 0)) ||
781 (Utils::IsPowerOfTwo(v2) && (v1 == 0));
782}
783
784LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone,
785 bool opt) const {
787 return comparison()->locs();
788}
789
790void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
791 const Register result = locs()->out(0).reg();
792
793 Location left = locs()->in(0);
794 Location right = locs()->in(1);
795 ASSERT(!left.IsConstant() || !right.IsConstant());
796
797 // Clear out register.
798 __ eor(result, result, compiler::Operand(result));
799
800 // Emit comparison code. This must not overwrite the result register.
801 // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using
802 // the labels or returning an invalid condition.
803 BranchLabels labels = {nullptr, nullptr, nullptr};
804 Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
805 ASSERT(true_condition != kInvalidCondition);
806
807 const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_);
808
809 intptr_t true_value = if_true_;
810 intptr_t false_value = if_false_;
811
812 if (is_power_of_two_kind) {
813 if (true_value == 0) {
814 // We need to have zero in result on true_condition.
815 true_condition = InvertCondition(true_condition);
816 }
817 } else {
818 if (true_value == 0) {
819 // Swap values so that false_value is zero.
820 intptr_t temp = true_value;
821 true_value = false_value;
822 false_value = temp;
823 } else {
824 true_condition = InvertCondition(true_condition);
825 }
826 }
827
828 __ mov(result, compiler::Operand(1), true_condition);
829
830 if (is_power_of_two_kind) {
831 const intptr_t shift =
832 Utils::ShiftForPowerOfTwo(Utils::Maximum(true_value, false_value));
833 __ Lsl(result, result, compiler::Operand(shift + kSmiTagSize));
834 } else {
835 __ sub(result, result, compiler::Operand(1));
836 const int32_t val = compiler::target::ToRawSmi(true_value) -
837 compiler::target::ToRawSmi(false_value);
838 __ AndImmediate(result, result, val);
839 if (false_value != 0) {
840 __ AddImmediate(result, compiler::target::ToRawSmi(false_value));
841 }
842 }
843}
844
845LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone,
846 bool opt) const {
847 const intptr_t kNumInputs = 1;
848 const intptr_t kNumTemps = 0;
849 LocationSummary* summary = new (zone)
850 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
851 summary->set_in(
852 0, Location::RegisterLocation(FLAG_precompiled_mode ? R0 : FUNCTION_REG));
853 return MakeCallSummary(zone, this, summary);
854}
855
856void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
857 // Load arguments descriptor in ARGS_DESC_REG.
858 const intptr_t argument_count = ArgumentCount(); // Includes type args.
859 const Array& arguments_descriptor =
861 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
862
863 if (FLAG_precompiled_mode) {
864 ASSERT(locs()->in(0).reg() == R0);
865 // R0: Closure with a cached entry point.
866 __ ldr(R2, compiler::FieldAddress(
867 R0, compiler::target::Closure::entry_point_offset()));
868 } else {
869 ASSERT(locs()->in(0).reg() == FUNCTION_REG);
870 // FUNCTION_REG: Function.
871 __ ldr(CODE_REG,
872 compiler::FieldAddress(FUNCTION_REG,
873 compiler::target::Function::code_offset()));
874 // Closure functions only have one entry point.
875 __ ldr(R2,
876 compiler::FieldAddress(
877 FUNCTION_REG, compiler::target::Function::entry_point_offset()));
878 }
879
880 // ARGS_DESC_REG: Arguments descriptor array.
881 // R2: instructions entry point.
882 if (!FLAG_precompiled_mode) {
883 // R9: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
884 __ LoadImmediate(IC_DATA_REG, 0);
885 }
886 __ blx(R2);
887 compiler->EmitCallsiteMetadata(source(), deopt_id(),
888 UntaggedPcDescriptors::kOther, locs(), env());
889 compiler->EmitDropArguments(argument_count);
890}
891
892LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone,
893 bool opt) const {
896}
897
898void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
899 const Register result = locs()->out(0).reg();
900 __ LoadFromOffset(result, FP,
901 compiler::target::FrameOffsetInBytesForVariable(&local()));
902}
903
904LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone,
905 bool opt) const {
908}
909
910void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
911 const Register value = locs()->in(0).reg();
912 const Register result = locs()->out(0).reg();
913 ASSERT(result == value); // Assert that register assignment is correct.
914 __ StoreToOffset(value, FP,
915 compiler::target::FrameOffsetInBytesForVariable(&local()));
916}
917
918LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
919 bool opt) const {
922}
923
924void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
925 // The register allocator drops constant definitions that have no uses.
926 if (!locs()->out(0).IsInvalid()) {
927 const Register result = locs()->out(0).reg();
928 __ LoadObject(result, value());
929 }
930}
931
932void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
933 const Location& destination,
934 Register tmp,
935 intptr_t pair_index) {
936 if (destination.IsRegister()) {
937 if (RepresentationUtils::IsUnboxedInteger(representation())) {
938 int64_t v;
939 const bool ok = compiler::HasIntegerValue(value_, &v);
941 if (value_.IsSmi() &&
942 RepresentationUtils::IsUnsignedInteger(representation())) {
943 // If the value is negative, then the sign bit was preserved during
944 // Smi untagging, which means the resulting value may be unexpected.
945 ASSERT(v >= 0);
946 }
947 __ LoadImmediate(destination.reg(), pair_index == 0
949 : Utils::High32Bits(v));
950 } else {
951 ASSERT(representation() == kTagged);
952 __ LoadObject(destination.reg(), value_);
953 }
954 } else if (destination.IsFpuRegister()) {
955 switch (representation()) {
956 case kUnboxedFloat:
957 __ LoadSImmediate(
958 EvenSRegisterOf(EvenDRegisterOf(destination.fpu_reg())),
959 Double::Cast(value_).value());
960 break;
961 case kUnboxedDouble:
962 ASSERT(tmp != kNoRegister);
963 __ LoadDImmediate(EvenDRegisterOf(destination.fpu_reg()),
964 Double::Cast(value_).value(), tmp);
965 break;
966 case kUnboxedFloat64x2:
967 __ LoadQImmediate(destination.fpu_reg(),
968 Float64x2::Cast(value_).value());
969 break;
970 case kUnboxedFloat32x4:
971 __ LoadQImmediate(destination.fpu_reg(),
972 Float32x4::Cast(value_).value());
973 break;
974 case kUnboxedInt32x4:
975 __ LoadQImmediate(destination.fpu_reg(), Int32x4::Cast(value_).value());
976 break;
977 default:
978 UNREACHABLE();
979 }
980 } else if (destination.IsDoubleStackSlot()) {
981 ASSERT(tmp != kNoRegister);
982 __ LoadDImmediate(DTMP, Double::Cast(value_).value(), tmp);
983 const intptr_t dest_offset = destination.ToStackSlotOffset();
984 __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset);
985 } else if (destination.IsQuadStackSlot()) {
986 switch (representation()) {
987 case kUnboxedFloat64x2:
988 __ LoadQImmediate(QTMP, Float64x2::Cast(value_).value());
989 break;
990 case kUnboxedFloat32x4:
991 __ LoadQImmediate(QTMP, Float32x4::Cast(value_).value());
992 break;
993 case kUnboxedInt32x4:
994 __ LoadQImmediate(QTMP, Int32x4::Cast(value_).value());
995 break;
996 default:
997 UNREACHABLE();
998 }
999 const intptr_t dest_offset = destination.ToStackSlotOffset();
1000 __ StoreMultipleDToOffset(EvenDRegisterOf(QTMP), 2, destination.base_reg(),
1001 dest_offset);
1002 } else {
1003 ASSERT(destination.IsStackSlot());
1004 ASSERT(tmp != kNoRegister);
1005 const intptr_t dest_offset = destination.ToStackSlotOffset();
1006 if (RepresentationUtils::IsUnboxedInteger(representation())) {
1007 int64_t v;
1008 const bool ok = compiler::HasIntegerValue(value_, &v);
1010 __ LoadImmediate(
1011 tmp, pair_index == 0 ? Utils::Low32Bits(v) : Utils::High32Bits(v));
1012 } else if (representation() == kUnboxedFloat) {
1013 int32_t float_bits =
1014 bit_cast<int32_t, float>(Double::Cast(value_).value());
1015 __ LoadImmediate(tmp, float_bits);
1016 } else {
1017 ASSERT(representation() == kTagged);
1018 __ LoadObject(tmp, value_);
1019 }
1020 __ StoreToOffset(tmp, destination.base_reg(), dest_offset);
1021 }
1022}
1023
1024LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone,
1025 bool opt) const {
1026 const bool is_unboxed_int =
1029 compiler::target::kWordSize);
1030 const intptr_t kNumInputs = 0;
1031 const intptr_t kNumTemps = is_unboxed_int ? 0 : 1;
1032 LocationSummary* locs = new (zone)
1033 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1034 if (is_unboxed_int) {
1035 locs->set_out(0, Location::RequiresRegister());
1036 } else {
1037 ASSERT(representation_ == kUnboxedDouble);
1038 locs->set_out(0, Location::RequiresFpuRegister());
1039 }
1040 if (kNumTemps > 0) {
1041 locs->set_temp(0, Location::RequiresRegister());
1042 }
1043 return locs;
1044}
1045
1046void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1047 // The register allocator drops constant definitions that have no uses.
1048 if (!locs()->out(0).IsInvalid()) {
1049 const Register scratch =
1050 locs()->temp_count() == 0 ? kNoRegister : locs()->temp(0).reg();
1051 EmitMoveToLocation(compiler, locs()->out(0), scratch);
1052 }
1053}
1054
1055LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone,
1056 bool opt) const {
1057 auto const dst_type_loc =
1059
1060 // We want to prevent spilling of the inputs (e.g. function/instantiator tav),
1061 // since TTS preserves them. So we make this a `kNoCall` summary,
1062 // even though most other registers can be modified by the stub. To tell the
1063 // register allocator about it, we reserve all the other registers as
1064 // temporary registers.
1065 // TODO(http://dartbug.com/32788): Simplify this.
1066
1067 const intptr_t kNonChangeableInputRegs =
1069 ((dst_type_loc.IsRegister() ? 1 : 0) << TypeTestABI::kDstTypeReg) |
1070 (1 << TypeTestABI::kInstantiatorTypeArgumentsReg) |
1071 (1 << TypeTestABI::kFunctionTypeArgumentsReg);
1072
1073 const intptr_t kNumInputs = 4;
1074
1075 // We invoke a stub that can potentially clobber any CPU register
1076 // but can only clobber FPU registers on the slow path when
1077 // entering runtime. Preserve all FPU registers that are
1078 // not guaranteed to be preserved by the ABI.
1079 const intptr_t kCpuRegistersToPreserve =
1080 kDartAvailableCpuRegs & ~kNonChangeableInputRegs;
1081 const intptr_t kFpuRegistersToPreserve =
1082 Utils::NBitMask<intptr_t>(kNumberOfFpuRegisters) &
1083 ~(Utils::NBitMask<intptr_t>(kAbiPreservedFpuRegCount)
1085 ~(1 << FpuTMP);
1086
1087 const intptr_t kNumTemps = (Utils::CountOneBits64(kCpuRegistersToPreserve) +
1088 Utils::CountOneBits64(kFpuRegistersToPreserve));
1089
1090 LocationSummary* summary = new (zone) LocationSummary(
1092 summary->set_in(kInstancePos,
1094 summary->set_in(kDstTypePos, dst_type_loc);
1095 summary->set_in(
1100 summary->set_out(0, Location::SameAsFirstInput());
1101
1102 // Let's reserve all registers except for the input ones.
1103 intptr_t next_temp = 0;
1104 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
1105 const bool should_preserve = ((1 << i) & kCpuRegistersToPreserve) != 0;
1106 if (should_preserve) {
1107 summary->set_temp(next_temp++,
1108 Location::RegisterLocation(static_cast<Register>(i)));
1109 }
1110 }
1111
1112 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
1113 const bool should_preserve = ((1 << i) & kFpuRegistersToPreserve) != 0;
1114 if (should_preserve) {
1115 summary->set_temp(next_temp++, Location::FpuRegisterLocation(
1116 static_cast<FpuRegister>(i)));
1117 }
1118 }
1119
1120 return summary;
1121}
1122
1123void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1124 ASSERT(locs()->always_calls());
1125
1126 auto object_store = compiler->isolate_group()->object_store();
1127 const auto& assert_boolean_stub =
1128 Code::ZoneHandle(compiler->zone(), object_store->assert_boolean_stub());
1129
1130 compiler::Label done;
1133 __ b(&done, NOT_ZERO);
1134 compiler->GenerateStubCall(source(), assert_boolean_stub,
1135 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
1136 deopt_id(), env());
1137 __ Bind(&done);
1138}
1139
1140static Condition TokenKindToIntCondition(Token::Kind kind) {
1141 switch (kind) {
1142 case Token::kEQ:
1143 return EQ;
1144 case Token::kNE:
1145 return NE;
1146 case Token::kLT:
1147 return LT;
1148 case Token::kGT:
1149 return GT;
1150 case Token::kLTE:
1151 return LE;
1152 case Token::kGTE:
1153 return GE;
1154 default:
1155 UNREACHABLE();
1156 return VS;
1157 }
1158}
1159
1160static bool CanBePairOfImmediateOperands(const dart::Object& constant,
1161 compiler::Operand* low,
1162 compiler::Operand* high) {
1163 int64_t imm;
1164 if (!compiler::HasIntegerValue(constant, &imm)) {
1165 return false;
1166 }
1167 return compiler::Operand::CanHold(Utils::Low32Bits(imm), low) &&
1169}
1170
1171static bool CanBePairOfImmediateOperands(Value* value,
1172 compiler::Operand* low,
1173 compiler::Operand* high) {
1174 if (!value->BindsToConstant()) {
1175 return false;
1176 }
1177 return CanBePairOfImmediateOperands(value->BoundConstant(), low, high);
1178}
1179
1180LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone,
1181 bool opt) const {
1182 const intptr_t kNumInputs = 2;
1183 if (is_null_aware()) {
1184 const intptr_t kNumTemps = 1;
1185 LocationSummary* locs = new (zone)
1186 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1187 locs->set_in(0, Location::RequiresRegister());
1188 locs->set_in(1, Location::RequiresRegister());
1189 locs->set_temp(0, Location::RequiresRegister());
1190 locs->set_out(0, Location::RequiresRegister());
1191 return locs;
1192 }
1193 if (operation_cid() == kMintCid) {
1194 compiler::Operand o;
1195 const intptr_t kNumTemps = 0;
1196 LocationSummary* locs = new (zone)
1197 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1198 if (CanBePairOfImmediateOperands(left(), &o, &o)) {
1199 locs->set_in(0, Location::Constant(left()->definition()->AsConstant()));
1202 } else if (CanBePairOfImmediateOperands(right(), &o, &o)) {
1205 locs->set_in(1, Location::Constant(right()->definition()->AsConstant()));
1206 } else {
1211 }
1212 locs->set_out(0, Location::RequiresRegister());
1213 return locs;
1214 }
1215 if (operation_cid() == kDoubleCid) {
1216 const intptr_t kNumTemps = 0;
1217 LocationSummary* locs = new (zone)
1218 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1219 locs->set_in(0, Location::RequiresFpuRegister());
1220 locs->set_in(1, Location::RequiresFpuRegister());
1221 locs->set_out(0, Location::RequiresRegister());
1222 return locs;
1223 }
1224 if (operation_cid() == kSmiCid || operation_cid() == kIntegerCid) {
1225 const intptr_t kNumTemps = 0;
1226 LocationSummary* locs = new (zone)
1227 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1228 locs->set_in(0, LocationRegisterOrConstant(left()));
1229 // Only one input can be a constant operand. The case of two constant
1230 // operands should be handled by constant propagation.
1231 locs->set_in(1, locs->in(0).IsConstant()
1234 locs->set_out(0, Location::RequiresRegister());
1235 return locs;
1236 }
1237 UNREACHABLE();
1238 return nullptr;
1239}
1240
1241static void LoadValueCid(FlowGraphCompiler* compiler,
1242 Register value_cid_reg,
1243 Register value_reg,
1244 compiler::Label* value_is_smi = nullptr) {
1245 if (value_is_smi == nullptr) {
1246 __ mov(value_cid_reg, compiler::Operand(kSmiCid));
1247 }
1248 __ tst(value_reg, compiler::Operand(kSmiTagMask));
1249 if (value_is_smi == nullptr) {
1250 __ LoadClassId(value_cid_reg, value_reg, NE);
1251 } else {
1252 __ b(value_is_smi, EQ);
1253 __ LoadClassId(value_cid_reg, value_reg);
1254 }
1255}
1256
1257static Condition FlipCondition(Condition condition) {
1258 switch (condition) {
1259 case EQ:
1260 return EQ;
1261 case NE:
1262 return NE;
1263 case LT:
1264 return GT;
1265 case LE:
1266 return GE;
1267 case GT:
1268 return LT;
1269 case GE:
1270 return LE;
1271 case CC:
1272 return HI;
1273 case LS:
1274 return CS;
1275 case HI:
1276 return CC;
1277 case CS:
1278 return LS;
1279 default:
1280 UNREACHABLE();
1281 return EQ;
1282 }
1283}
1284
1285static void EmitBranchOnCondition(FlowGraphCompiler* compiler,
1286 Condition true_condition,
1287 BranchLabels labels) {
1288 if (labels.fall_through == labels.false_label) {
1289 // If the next block is the false successor we will fall through to it.
1290 __ b(labels.true_label, true_condition);
1291 } else {
1292 // If the next block is not the false successor we will branch to it.
1293 Condition false_condition = InvertCondition(true_condition);
1294 __ b(labels.false_label, false_condition);
1295
1296 // Fall through or jump to the true successor.
1297 if (labels.fall_through != labels.true_label) {
1298 __ b(labels.true_label);
1299 }
1300 }
1301}
1302
1303static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler,
1304 LocationSummary* locs,
1305 Token::Kind kind) {
1306 Location left = locs->in(0);
1307 Location right = locs->in(1);
1308 ASSERT(!left.IsConstant() || !right.IsConstant());
1309
1310 Condition true_condition = TokenKindToIntCondition(kind);
1311
1312 if (left.IsConstant()) {
1313 __ CompareObject(right.reg(), left.constant());
1314 true_condition = FlipCondition(true_condition);
1315 } else if (right.IsConstant()) {
1316 __ CompareObject(left.reg(), right.constant());
1317 } else {
1318 __ cmp(left.reg(), compiler::Operand(right.reg()));
1319 }
1320 return true_condition;
1321}
1322
1323static Condition EmitWordComparisonOp(FlowGraphCompiler* compiler,
1324 LocationSummary* locs,
1325 Token::Kind kind) {
1326 Location left = locs->in(0);
1327 Location right = locs->in(1);
1328 ASSERT(!left.IsConstant() || !right.IsConstant());
1329
1330 Condition true_condition = TokenKindToIntCondition(kind);
1331
1332 if (left.IsConstant()) {
1333 __ CompareImmediate(
1334 right.reg(),
1335 static_cast<uword>(Integer::Cast(left.constant()).AsInt64Value()));
1336 true_condition = FlipCondition(true_condition);
1337 } else if (right.IsConstant()) {
1338 __ CompareImmediate(
1339 left.reg(),
1340 static_cast<uword>(Integer::Cast(right.constant()).AsInt64Value()));
1341 } else {
1342 __ cmp(left.reg(), compiler::Operand(right.reg()));
1343 }
1344 return true_condition;
1345}
1346
1347static Condition EmitUnboxedMintEqualityOp(FlowGraphCompiler* compiler,
1348 LocationSummary* locs,
1349 Token::Kind kind) {
1351 PairLocation* left_pair;
1352 compiler::Operand right_lo, right_hi;
1353 if (locs->in(0).IsConstant()) {
1354 const bool ok = CanBePairOfImmediateOperands(locs->in(0).constant(),
1355 &right_lo, &right_hi);
1357 left_pair = locs->in(1).AsPairLocation();
1358 } else if (locs->in(1).IsConstant()) {
1359 const bool ok = CanBePairOfImmediateOperands(locs->in(1).constant(),
1360 &right_lo, &right_hi);
1362 left_pair = locs->in(0).AsPairLocation();
1363 } else {
1364 left_pair = locs->in(0).AsPairLocation();
1365 PairLocation* right_pair = locs->in(1).AsPairLocation();
1366 right_lo = compiler::Operand(right_pair->At(0).reg());
1367 right_hi = compiler::Operand(right_pair->At(1).reg());
1368 }
1369 Register left_lo = left_pair->At(0).reg();
1370 Register left_hi = left_pair->At(1).reg();
1371
1372 // Compare lower.
1373 __ cmp(left_lo, right_lo);
1374 // Compare upper if lower is equal.
1375 __ cmp(left_hi, right_hi, EQ);
1376 return TokenKindToIntCondition(kind);
1377}
1378
1379static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler,
1380 LocationSummary* locs,
1381 Token::Kind kind,
1382 BranchLabels labels) {
1383 PairLocation* left_pair;
1384 compiler::Operand right_lo, right_hi;
1385 Condition true_condition = TokenKindToIntCondition(kind);
1386 if (locs->in(0).IsConstant()) {
1387 const bool ok = CanBePairOfImmediateOperands(locs->in(0).constant(),
1388 &right_lo, &right_hi);
1390 left_pair = locs->in(1).AsPairLocation();
1391 true_condition = FlipCondition(true_condition);
1392 } else if (locs->in(1).IsConstant()) {
1393 const bool ok = CanBePairOfImmediateOperands(locs->in(1).constant(),
1394 &right_lo, &right_hi);
1396 left_pair = locs->in(0).AsPairLocation();
1397 } else {
1398 left_pair = locs->in(0).AsPairLocation();
1399 PairLocation* right_pair = locs->in(1).AsPairLocation();
1400 right_lo = compiler::Operand(right_pair->At(0).reg());
1401 right_hi = compiler::Operand(right_pair->At(1).reg());
1402 }
1403 Register left_lo = left_pair->At(0).reg();
1404 Register left_hi = left_pair->At(1).reg();
1405
1406 // 64-bit comparison.
1407 Condition hi_cond, lo_cond;
1408 switch (true_condition) {
1409 case LT:
1410 hi_cond = LT;
1411 lo_cond = CC;
1412 break;
1413 case GT:
1414 hi_cond = GT;
1415 lo_cond = HI;
1416 break;
1417 case LE:
1418 hi_cond = LT;
1419 lo_cond = LS;
1420 break;
1421 case GE:
1422 hi_cond = GT;
1423 lo_cond = CS;
1424 break;
1425 default:
1426 UNREACHABLE();
1427 hi_cond = lo_cond = VS;
1428 }
1429 // Compare upper halves first.
1430 __ cmp(left_hi, right_hi);
1431 __ b(labels.true_label, hi_cond);
1432 __ b(labels.false_label, FlipCondition(hi_cond));
1433
1434 // If higher words are equal, compare lower words.
1435 __ cmp(left_lo, right_lo);
1436 return lo_cond;
1437}
1438
1439static Condition EmitNullAwareInt64ComparisonOp(FlowGraphCompiler* compiler,
1440 LocationSummary* locs,
1441 Token::Kind kind,
1442 BranchLabels labels) {
1443 ASSERT((kind == Token::kEQ) || (kind == Token::kNE));
1444 const Register left = locs->in(0).reg();
1445 const Register right = locs->in(1).reg();
1446 const Register temp = locs->temp(0).reg();
1447 const Condition true_condition = TokenKindToIntCondition(kind);
1448 compiler::Label* equal_result =
1449 (true_condition == EQ) ? labels.true_label : labels.false_label;
1450 compiler::Label* not_equal_result =
1451 (true_condition == EQ) ? labels.false_label : labels.true_label;
1452
1453 // Check if operands have the same value. If they don't, then they could
1454 // be equal only if both of them are Mints with the same value.
1455 __ cmp(left, compiler::Operand(right));
1456 __ b(equal_result, EQ);
1457 __ and_(temp, left, compiler::Operand(right));
1458 __ BranchIfSmi(temp, not_equal_result);
1459 __ CompareClassId(left, kMintCid, temp);
1460 __ b(not_equal_result, NE);
1461 __ CompareClassId(right, kMintCid, temp);
1462 __ b(not_equal_result, NE);
1463 __ LoadFieldFromOffset(temp, left, compiler::target::Mint::value_offset());
1464 __ LoadFieldFromOffset(TMP, right, compiler::target::Mint::value_offset());
1465 __ cmp(temp, compiler::Operand(TMP));
1466 __ LoadFieldFromOffset(
1467 temp, left,
1468 compiler::target::Mint::value_offset() + compiler::target::kWordSize,
1470 __ LoadFieldFromOffset(
1471 TMP, right,
1472 compiler::target::Mint::value_offset() + compiler::target::kWordSize,
1474 __ cmp(temp, compiler::Operand(TMP), EQ);
1475 return true_condition;
1476}
1477
1478static Condition TokenKindToDoubleCondition(Token::Kind kind) {
1479 switch (kind) {
1480 case Token::kEQ:
1481 return EQ;
1482 case Token::kNE:
1483 return NE;
1484 case Token::kLT:
1485 return LT;
1486 case Token::kGT:
1487 return GT;
1488 case Token::kLTE:
1489 return LE;
1490 case Token::kGTE:
1491 return GE;
1492 default:
1493 UNREACHABLE();
1494 return VS;
1495 }
1496}
1497
1498static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
1499 LocationSummary* locs,
1500 BranchLabels labels,
1501 Token::Kind kind) {
1502 const QRegister left = locs->in(0).fpu_reg();
1503 const QRegister right = locs->in(1).fpu_reg();
1504 const DRegister dleft = EvenDRegisterOf(left);
1505 const DRegister dright = EvenDRegisterOf(right);
1506
1507 switch (kind) {
1508 case Token::kEQ:
1509 __ vcmpd(dleft, dright);
1510 __ vmstat();
1511 return EQ;
1512 case Token::kNE:
1513 __ vcmpd(dleft, dright);
1514 __ vmstat();
1515 return NE;
1516 case Token::kLT:
1517 __ vcmpd(dright, dleft); // Flip to handle NaN.
1518 __ vmstat();
1519 return GT;
1520 case Token::kGT:
1521 __ vcmpd(dleft, dright);
1522 __ vmstat();
1523 return GT;
1524 case Token::kLTE:
1525 __ vcmpd(dright, dleft); // Flip to handle NaN.
1526 __ vmstat();
1527 return GE;
1528 case Token::kGTE:
1529 __ vcmpd(dleft, dright);
1530 __ vmstat();
1531 return GE;
1532 default:
1533 UNREACHABLE();
1534 return VS;
1535 }
1536}
1537
1538Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1539 BranchLabels labels) {
1540 if (is_null_aware()) {
1541 ASSERT(operation_cid() == kMintCid);
1542 return EmitNullAwareInt64ComparisonOp(compiler, locs(), kind(), labels);
1543 }
1544 if (operation_cid() == kSmiCid) {
1545 return EmitSmiComparisonOp(compiler, locs(), kind());
1546 } else if (operation_cid() == kIntegerCid) {
1547 return EmitWordComparisonOp(compiler, locs(), kind());
1548 } else if (operation_cid() == kMintCid) {
1549 return EmitUnboxedMintEqualityOp(compiler, locs(), kind());
1550 } else {
1551 ASSERT(operation_cid() == kDoubleCid);
1552 return EmitDoubleComparisonOp(compiler, locs(), labels, kind());
1553 }
1554}
1555
1556LocationSummary* TestSmiInstr::MakeLocationSummary(Zone* zone, bool opt) const {
1557 const intptr_t kNumInputs = 2;
1558 const intptr_t kNumTemps = 0;
1559 LocationSummary* locs = new (zone)
1560 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1561 locs->set_in(0, Location::RequiresRegister());
1562 // Only one input can be a constant operand. The case of two constant
1563 // operands should be handled by constant propagation.
1564 locs->set_in(1, LocationRegisterOrConstant(right()));
1565 return locs;
1566}
1567
1568Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1569 BranchLabels labels) {
1570 const Register left = locs()->in(0).reg();
1571 Location right = locs()->in(1);
1572 if (right.IsConstant()) {
1574 const int32_t imm = compiler::target::ToRawSmi(right.constant());
1575 __ TestImmediate(left, imm);
1576 } else {
1577 __ tst(left, compiler::Operand(right.reg()));
1578 }
1579 Condition true_condition = (kind() == Token::kNE) ? NE : EQ;
1580 return true_condition;
1581}
1582
1583LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone,
1584 bool opt) const {
1585 const intptr_t kNumInputs = 1;
1586 const intptr_t kNumTemps = 1;
1587 LocationSummary* locs = new (zone)
1588 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1589 locs->set_in(0, Location::RequiresRegister());
1590 locs->set_temp(0, Location::RequiresRegister());
1591 locs->set_out(0, Location::RequiresRegister());
1592 return locs;
1593}
1594
1595Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1596 BranchLabels labels) {
1597 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
1598 const Register val_reg = locs()->in(0).reg();
1599 const Register cid_reg = locs()->temp(0).reg();
1600
1601 compiler::Label* deopt =
1602 CanDeoptimize()
1603 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids)
1604 : nullptr;
1605
1606 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
1607 const ZoneGrowableArray<intptr_t>& data = cid_results();
1608 ASSERT(data[0] == kSmiCid);
1609 bool result = data[1] == true_result;
1610 __ tst(val_reg, compiler::Operand(kSmiTagMask));
1611 __ b(result ? labels.true_label : labels.false_label, EQ);
1612 __ LoadClassId(cid_reg, val_reg);
1613
1614 for (intptr_t i = 2; i < data.length(); i += 2) {
1615 const intptr_t test_cid = data[i];
1616 ASSERT(test_cid != kSmiCid);
1617 result = data[i + 1] == true_result;
1618 __ CompareImmediate(cid_reg, test_cid);
1619 __ b(result ? labels.true_label : labels.false_label, EQ);
1620 }
1621 // No match found, deoptimize or default action.
1622 if (deopt == nullptr) {
1623 // If the cid is not in the list, jump to the opposite label from the cids
1624 // that are in the list. These must be all the same (see asserts in the
1625 // constructor).
1626 compiler::Label* target = result ? labels.false_label : labels.true_label;
1627 if (target != labels.fall_through) {
1628 __ b(target);
1629 }
1630 } else {
1631 __ b(deopt);
1632 }
1633 // Dummy result as this method already did the jump, there's no need
1634 // for the caller to branch on a condition.
1635 return kInvalidCondition;
1636}
1637
1638LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone,
1639 bool opt) const {
1640 const intptr_t kNumInputs = 2;
1641 const intptr_t kNumTemps = 0;
1642 if (operation_cid() == kMintCid) {
1643 compiler::Operand o;
1644 const intptr_t kNumTemps = 0;
1645 LocationSummary* locs = new (zone)
1646 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1647 if (CanBePairOfImmediateOperands(left(), &o, &o)) {
1648 locs->set_in(0, Location::Constant(left()->definition()->AsConstant()));
1651 } else if (CanBePairOfImmediateOperands(right(), &o, &o)) {
1654 locs->set_in(1, Location::Constant(right()->definition()->AsConstant()));
1655 } else {
1660 }
1661 locs->set_out(0, Location::RequiresRegister());
1662 return locs;
1663 }
1664 if (operation_cid() == kDoubleCid) {
1665 LocationSummary* summary = new (zone)
1666 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1667 summary->set_in(0, Location::RequiresFpuRegister());
1668 summary->set_in(1, Location::RequiresFpuRegister());
1669 summary->set_out(0, Location::RequiresRegister());
1670 return summary;
1671 }
1672 ASSERT(operation_cid() == kSmiCid);
1673 LocationSummary* summary = new (zone)
1674 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1675 summary->set_in(0, LocationRegisterOrConstant(left()));
1676 // Only one input can be a constant operand. The case of two constant
1677 // operands should be handled by constant propagation.
1678 summary->set_in(1, summary->in(0).IsConstant()
1681 summary->set_out(0, Location::RequiresRegister());
1682 return summary;
1683}
1684
1685Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1686 BranchLabels labels) {
1687 if (operation_cid() == kSmiCid) {
1688 return EmitSmiComparisonOp(compiler, locs(), kind());
1689 } else if (operation_cid() == kMintCid) {
1690 return EmitUnboxedMintComparisonOp(compiler, locs(), kind(), labels);
1691 } else {
1692 ASSERT(operation_cid() == kDoubleCid);
1693 return EmitDoubleComparisonOp(compiler, locs(), labels, kind());
1694 }
1695}
1696
1697void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1698 SetupNative();
1699 const Register result = locs()->out(0).reg();
1700
1701 // Pass a pointer to the first argument in R2.
1702 __ add(
1703 R2, SP,
1704 compiler::Operand((ArgumentCount() - 1) * compiler::target::kWordSize));
1705
1706 // Compute the effective address. When running under the simulator,
1707 // this is a redirection address that forces the simulator to call
1708 // into the runtime system.
1709 uword entry;
1710 const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function());
1711 const Code* stub;
1712 if (link_lazily()) {
1713 stub = &StubCode::CallBootstrapNative();
1715 } else {
1716 entry = reinterpret_cast<uword>(native_c_function());
1717 if (is_bootstrap_native()) {
1718 stub = &StubCode::CallBootstrapNative();
1719 } else if (is_auto_scope()) {
1720 stub = &StubCode::CallAutoScopeNative();
1721 } else {
1722 stub = &StubCode::CallNoScopeNative();
1723 }
1724 }
1725 __ LoadImmediate(R1, argc_tag);
1726 compiler::ExternalLabel label(entry);
1727 __ LoadNativeEntry(R9, &label,
1728 link_lazily()
1730 : compiler::ObjectPoolBuilderEntry::kNotPatchable);
1731 if (link_lazily()) {
1732 compiler->GeneratePatchableCall(
1733 source(), *stub, UntaggedPcDescriptors::kOther, locs(),
1735 } else {
1736 // We can never lazy-deopt here because natives are never optimized.
1737 ASSERT(!compiler->is_optimizing());
1738 compiler->GenerateNonLazyDeoptableStubCall(
1739 source(), *stub, UntaggedPcDescriptors::kOther, locs(),
1741 }
1742 __ LoadFromOffset(result, SP, 0);
1743
1744 compiler->EmitDropArguments(ArgumentCount()); // Drop the arguments.
1745}
1746
1747#define R(r) (1 << r)
1748
1749LocationSummary* FfiCallInstr::MakeLocationSummary(Zone* zone,
1750 bool is_optimizing) const {
1753 return MakeLocationSummaryInternal(
1754 zone, is_optimizing,
1757}
1758
1759#undef R
1760
1761void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1762 const Register branch = locs()->in(TargetAddressIndex()).reg();
1763
1764 // The temps are indexed according to their register number.
1765 // For regular calls, this holds the FP for rebasing the original locations
1766 // during EmitParamMoves.
1767 // For leaf calls, this holds the SP used to restore the pre-aligned SP after
1768 // the call.
1769 const Register saved_fp_or_sp = locs()->temp(0).reg();
1770 const Register temp1 = locs()->temp(1).reg();
1771
1772 // Ensure these are callee-saved register and are preserved across the call.
1773 ASSERT(IsCalleeSavedRegister(saved_fp_or_sp));
1774 // Other temps don't need to be preserved.
1775
1776 __ mov(saved_fp_or_sp,
1777 is_leaf_ ? compiler::Operand(SPREG) : compiler::Operand(FPREG));
1778
1779 if (!is_leaf_) {
1780 // Make a space to put the return address.
1781 __ PushImmediate(0);
1782
1783 // We need to create a dummy "exit frame". It will have a null code object.
1784 __ LoadObject(CODE_REG, Object::null_object());
1785 __ set_constant_pool_allowed(false);
1786 __ EnterDartFrame(0, /*load_pool_pointer=*/false);
1787 }
1788
1789 // Reserve space for the arguments that go on the stack (if any), then align.
1790 __ ReserveAlignedFrameSpace(marshaller_.RequiredStackSpaceInBytes());
1791#if defined(USING_MEMORY_SANITIZER)
1792 UNIMPLEMENTED();
1793#endif
1794
1795 EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp1, TMP);
1796
1798 __ Comment(is_leaf_ ? "Leaf Call" : "Call");
1799 }
1800
1801 if (is_leaf_) {
1802#if !defined(PRODUCT)
1803 // Set the thread object's top_exit_frame_info and VMTag to enable the
1804 // profiler to determine that thread is no longer executing Dart code.
1805 __ StoreToOffset(FPREG, THR,
1806 compiler::target::Thread::top_exit_frame_info_offset());
1807 __ StoreToOffset(branch, THR, compiler::target::Thread::vm_tag_offset());
1808#endif
1809
1810 __ blx(branch);
1811
1812#if !defined(PRODUCT)
1813 __ LoadImmediate(temp1, compiler::target::Thread::vm_tag_dart_id());
1814 __ StoreToOffset(temp1, THR, compiler::target::Thread::vm_tag_offset());
1815 __ LoadImmediate(temp1, 0);
1816 __ StoreToOffset(temp1, THR,
1817 compiler::target::Thread::top_exit_frame_info_offset());
1818#endif
1819 } else {
1820 // We need to copy the return address up into the dummy stack frame so the
1821 // stack walker will know which safepoint to use.
1822 __ mov(temp1, compiler::Operand(PC));
1823 __ str(temp1, compiler::Address(FPREG, kSavedCallerPcSlotFromFp *
1824 compiler::target::kWordSize));
1825
1826 // For historical reasons, the PC on ARM points 8 bytes past the current
1827 // instruction. Therefore we emit the metadata here, 8 bytes
1828 // (2 instructions) after the original mov.
1829 compiler->EmitCallsiteMetadata(InstructionSource(), deopt_id(),
1830 UntaggedPcDescriptors::Kind::kOther, locs(),
1831 env());
1832
1833 // Update information in the thread object and enter a safepoint.
1834 // Outline state transition. In AOT, for code size. In JIT, because we
1835 // cannot trust that code will be executable.
1836 __ ldr(temp1,
1837 compiler::Address(
1838 THR, compiler::target::Thread::
1839 call_native_through_safepoint_entry_point_offset()));
1840
1841 // Calls R8 in a safepoint and clobbers R4 and NOTFP.
1842 ASSERT(branch == R8);
1843 static_assert((kReservedCpuRegisters & (1 << NOTFP)) != 0,
1844 "NOTFP should be a reserved register");
1845 __ blx(temp1);
1846
1847 if (marshaller_.IsHandleCType(compiler::ffi::kResultIndex)) {
1848 __ Comment("Check Dart_Handle for Error.");
1849 compiler::Label not_error;
1851 ASSERT(saved_fp_or_sp != CallingConventions::kReturnReg);
1852 __ ldr(temp1,
1853 compiler::Address(CallingConventions::kReturnReg,
1854 compiler::target::LocalHandle::ptr_offset()));
1855 __ BranchIfSmi(temp1, &not_error);
1856 __ LoadClassId(temp1, temp1);
1857 __ RangeCheck(temp1, saved_fp_or_sp, kFirstErrorCid, kLastErrorCid,
1859
1860 // Slow path, use the stub to propagate error, to save on code-size.
1861 __ Comment("Slow path: call Dart_PropagateError through stub.");
1864 __ ldr(temp1,
1865 compiler::Address(
1866 THR, compiler::target::Thread::
1867 call_native_through_safepoint_entry_point_offset()));
1868 __ ldr(branch, compiler::Address(
1869 THR, kPropagateErrorRuntimeEntry.OffsetFromThread()));
1870 __ blx(temp1);
1871#if defined(DEBUG)
1872 // We should never return with normal controlflow from this.
1873 __ bkpt(0);
1874#endif
1875
1876 __ Bind(&not_error);
1877 }
1878
1879 // Restore the global object pool after returning from runtime (old space is
1880 // moving, so the GOP could have been relocated).
1881 if (FLAG_precompiled_mode) {
1882 __ SetupGlobalPoolAndDispatchTable();
1883 }
1884 }
1885
1886 EmitReturnMoves(compiler, temp1, TMP);
1887
1888 if (is_leaf_) {
1889 // Restore the pre-aligned SP.
1890 __ mov(SPREG, compiler::Operand(saved_fp_or_sp));
1891 } else {
1892 // Leave dummy exit frame.
1893 __ LeaveDartFrame();
1894 __ set_constant_pool_allowed(true);
1895
1896 // Instead of returning to the "fake" return address, we just pop it.
1897 __ PopRegister(temp1);
1898 }
1899}
1900
1901// Keep in sync with NativeEntryInstr::EmitNativeCode.
1902void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1903 EmitReturnMoves(compiler);
1904
1905 __ LeaveDartFrame();
1906
1907 // The dummy return address is in LR, no need to pop it as on Intel.
1908
1909 // These can be anything besides the return registers (R0 and R1) and THR
1910 // (R10).
1911 const Register vm_tag_reg = R2;
1912 const Register old_exit_frame_reg = R3;
1913 const Register old_exit_through_ffi_reg = R4;
1914 const Register tmp = R5;
1915
1916 __ Pop(old_exit_frame_reg);
1917 __ Pop(old_exit_through_ffi_reg);
1918
1919 // Restore top_resource.
1920 __ Pop(tmp);
1921 __ StoreToOffset(tmp, THR, compiler::target::Thread::top_resource_offset());
1922
1923 __ Pop(vm_tag_reg);
1924
1925 // The trampoline that called us will enter the safepoint on our behalf.
1926 __ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
1927 old_exit_through_ffi_reg, tmp,
1928 /*enter_safepoint=*/false);
1929
1930 __ PopNativeCalleeSavedRegisters();
1931
1932#if defined(DART_TARGET_OS_FUCHSIA) && defined(USING_SHADOW_CALL_STACK)
1933#error Unimplemented
1934#endif
1935
1936 // Leave the entry frame.
1937 RESTORES_LR_FROM_FRAME(__ LeaveFrame(1 << LR | 1 << FP));
1938
1939 // Leave the dummy frame holding the pushed arguments.
1940 RESTORES_LR_FROM_FRAME(__ LeaveFrame(1 << LR | 1 << FP));
1941
1942 __ Ret();
1943
1944 // For following blocks.
1945 __ set_constant_pool_allowed(true);
1946}
1947
1948// Keep in sync with NativeReturnInstr::EmitNativeCode and ComputeInnerLRState.
1949void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1950 // Constant pool cannot be used until we enter the actual Dart frame.
1951 __ set_constant_pool_allowed(false);
1952
1953 __ Bind(compiler->GetJumpLabel(this));
1954
1955 // Create a dummy frame holding the pushed arguments. This simplifies
1956 // NativeReturnInstr::EmitNativeCode.
1957 SPILLS_LR_TO_FRAME(__ EnterFrame((1 << FP) | (1 << LR), 0));
1958
1959 // Save the argument registers, in reverse order.
1960 SaveArguments(compiler);
1961
1962 // Enter the entry frame. NativeParameterInstr expects this frame has size
1963 // -exit_link_slot_from_entry_fp, verified below.
1964 SPILLS_LR_TO_FRAME(__ EnterFrame((1 << FP) | (1 << LR), 0));
1965
1966 // Save a space for the code object.
1967 __ PushImmediate(0);
1968
1969#if defined(DART_TARGET_OS_FUCHSIA) && defined(USING_SHADOW_CALL_STACK)
1970#error Unimplemented
1971#endif
1972
1973 __ PushNativeCalleeSavedRegisters();
1974
1975 // Save the current VMTag on the stack.
1976 __ LoadFromOffset(R0, THR, compiler::target::Thread::vm_tag_offset());
1977 __ Push(R0);
1978
1979 // Save top resource.
1980 const intptr_t top_resource_offset =
1981 compiler::target::Thread::top_resource_offset();
1982 __ LoadFromOffset(R0, THR, top_resource_offset);
1983 __ Push(R0);
1984 __ LoadImmediate(R0, 0);
1985 __ StoreToOffset(R0, THR, top_resource_offset);
1986
1987 __ LoadFromOffset(R0, THR,
1988 compiler::target::Thread::exit_through_ffi_offset());
1989 __ Push(R0);
1990
1991 // Save top exit frame info. Don't set it to 0 yet,
1992 // TransitionNativeToGenerated will handle that.
1993 __ LoadFromOffset(R0, THR,
1994 compiler::target::Thread::top_exit_frame_info_offset());
1995 __ Push(R0);
1996
1997 __ EmitEntryFrameVerification(R0);
1998
1999 // The callback trampoline (caller) has already left the safepoint for us.
2000 __ TransitionNativeToGenerated(/*scratch0=*/R0, /*scratch1=*/R1,
2001 /*exit_safepoint=*/false);
2002
2003 // Now that the safepoint has ended, we can touch Dart objects without
2004 // handles.
2005
2006 // Load the code object.
2007 const Function& target_function = marshaller_.dart_signature();
2008 const intptr_t callback_id = target_function.FfiCallbackId();
2009 __ LoadFromOffset(R0, THR, compiler::target::Thread::isolate_group_offset());
2010 __ LoadFromOffset(R0, R0,
2011 compiler::target::IsolateGroup::object_store_offset());
2012 __ LoadFromOffset(R0, R0,
2013 compiler::target::ObjectStore::ffi_callback_code_offset());
2014 __ LoadFieldFromOffset(R0, R0,
2015 compiler::target::GrowableObjectArray::data_offset());
2016 __ LoadFieldFromOffset(CODE_REG, R0,
2017 compiler::target::Array::data_offset() +
2018 callback_id * compiler::target::kWordSize);
2019
2020 // Put the code object in the reserved slot.
2021 __ StoreToOffset(CODE_REG, FPREG,
2022 kPcMarkerSlotFromFp * compiler::target::kWordSize);
2023 if (FLAG_precompiled_mode) {
2024 __ SetupGlobalPoolAndDispatchTable();
2025 } else {
2026 __ LoadImmediate(PP, 0); // GC safe value into PP.
2027 }
2028
2029 // Load a GC-safe value for the arguments descriptor (unused but tagged).
2030 __ LoadImmediate(ARGS_DESC_REG, 0);
2031
2032 // Load a dummy return address which suggests that we are inside of
2033 // InvokeDartCodeStub. This is how the stack walker detects an entry frame.
2034 CLOBBERS_LR({
2035 __ LoadFromOffset(LR, THR,
2036 compiler::target::Thread::invoke_dart_code_stub_offset());
2037 __ LoadFieldFromOffset(LR, LR,
2038 compiler::target::Code::entry_point_offset());
2039 });
2040
2041 FunctionEntryInstr::EmitNativeCode(compiler);
2042}
2043
2044#define R(r) (1 << r)
2045
2047 Zone* zone,
2048 bool is_optimizing) const {
2050 return MakeLocationSummaryInternal(zone, (R(saved_fp)));
2051}
2052
2053#undef R
2054
2055void LeafRuntimeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2056 const Register saved_fp = locs()->temp(0).reg();
2057 const Register temp0 = TMP;
2058
2059 __ MoveRegister(saved_fp, FPREG);
2060
2061 const intptr_t frame_space = native_calling_convention_.StackTopInBytes();
2062 __ EnterCFrame(frame_space);
2063
2064 EmitParamMoves(compiler, saved_fp, temp0);
2065
2066 const Register target_address = locs()->in(TargetAddressIndex()).reg();
2067 __ str(target_address,
2068 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
2069 __ CallCFunction(target_address);
2070 __ LoadImmediate(temp0, VMTag::kDartTagId);
2071 __ str(temp0,
2072 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
2073
2074 __ LeaveCFrame();
2075}
2076
2077LocationSummary* OneByteStringFromCharCodeInstr::MakeLocationSummary(
2078 Zone* zone,
2079 bool opt) const {
2080 const intptr_t kNumInputs = 1;
2081 // TODO(fschneider): Allow immediate operands for the char code.
2082 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
2084}
2085
2086void OneByteStringFromCharCodeInstr::EmitNativeCode(
2087 FlowGraphCompiler* compiler) {
2088 ASSERT(compiler->is_optimizing());
2089 const Register char_code = locs()->in(0).reg();
2090 const Register result = locs()->out(0).reg();
2091
2092 __ ldr(
2093 result,
2094 compiler::Address(
2095 THR, compiler::target::Thread::predefined_symbols_address_offset()));
2096 __ AddImmediate(
2097 result, Symbols::kNullCharCodeSymbolOffset * compiler::target::kWordSize);
2098 __ ldr(result,
2099 compiler::Address(result, char_code, LSL, 1)); // Char code is a smi.
2100}
2101
2102LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone,
2103 bool opt) const {
2104 const intptr_t kNumInputs = 1;
2105 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
2107}
2108
2109void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2110 ASSERT(cid_ == kOneByteStringCid);
2111 const Register str = locs()->in(0).reg();
2112 const Register result = locs()->out(0).reg();
2113 __ ldr(result, compiler::FieldAddress(
2114 str, compiler::target::String::length_offset()));
2115 __ cmp(result, compiler::Operand(compiler::target::ToRawSmi(1)));
2116 __ LoadImmediate(result, -1, NE);
2117 __ ldrb(result,
2118 compiler::FieldAddress(
2119 str, compiler::target::OneByteString::data_offset()),
2120 EQ);
2121 __ SmiTag(result);
2122}
2123
2124LocationSummary* Utf8ScanInstr::MakeLocationSummary(Zone* zone,
2125 bool opt) const {
2126 const intptr_t kNumInputs = 5;
2127 const intptr_t kNumTemps = 0;
2128 LocationSummary* summary = new (zone)
2129 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2130 summary->set_in(0, Location::Any()); // decoder
2131 summary->set_in(1, Location::WritableRegister()); // bytes
2132 summary->set_in(2, Location::WritableRegister()); // start
2133 summary->set_in(3, Location::WritableRegister()); // end
2134 summary->set_in(4, Location::WritableRegister()); // table
2135 summary->set_out(0, Location::RequiresRegister());
2136 return summary;
2137}
2138
2139void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2140 const Register bytes_reg = locs()->in(1).reg();
2141 const Register start_reg = locs()->in(2).reg();
2142 const Register end_reg = locs()->in(3).reg();
2143 const Register table_reg = locs()->in(4).reg();
2144 const Register size_reg = locs()->out(0).reg();
2145
2146 const Register bytes_ptr_reg = start_reg;
2147 const Register bytes_end_reg = end_reg;
2148 const Register flags_reg = bytes_reg;
2149 const Register temp_reg = TMP;
2150 const Register decoder_temp_reg = start_reg;
2151 const Register flags_temp_reg = end_reg;
2152
2153 const intptr_t kSizeMask = 0x03;
2154 const intptr_t kFlagsMask = 0x3C;
2155
2156 compiler::Label loop, loop_in;
2157
2158 // Address of input bytes.
2159 __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
2160
2161 // Table.
2162 __ AddImmediate(
2163 table_reg, table_reg,
2164 compiler::target::OneByteString::data_offset() - kHeapObjectTag);
2165
2166 // Pointers to start and end.
2167 __ add(bytes_ptr_reg, bytes_reg, compiler::Operand(start_reg));
2168 __ add(bytes_end_reg, bytes_reg, compiler::Operand(end_reg));
2169
2170 // Initialize size and flags.
2171 __ LoadImmediate(size_reg, 0);
2172 __ LoadImmediate(flags_reg, 0);
2173
2174 __ b(&loop_in);
2175 __ Bind(&loop);
2176
2177 // Read byte and increment pointer.
2178 __ ldrb(temp_reg,
2179 compiler::Address(bytes_ptr_reg, 1, compiler::Address::PostIndex));
2180
2181 // Update size and flags based on byte value.
2182 __ ldrb(temp_reg, compiler::Address(table_reg, temp_reg));
2183 __ orr(flags_reg, flags_reg, compiler::Operand(temp_reg));
2184 __ and_(temp_reg, temp_reg, compiler::Operand(kSizeMask));
2185 __ add(size_reg, size_reg, compiler::Operand(temp_reg));
2186
2187 // Stop if end is reached.
2188 __ Bind(&loop_in);
2189 __ cmp(bytes_ptr_reg, compiler::Operand(bytes_end_reg));
2190 __ b(&loop, UNSIGNED_LESS);
2191
2192 // Write flags to field.
2193 __ AndImmediate(flags_reg, flags_reg, kFlagsMask);
2194 if (!IsScanFlagsUnboxed()) {
2195 __ SmiTag(flags_reg);
2196 }
2197 Register decoder_reg;
2198 const Location decoder_location = locs()->in(0);
2199 if (decoder_location.IsStackSlot()) {
2200 __ ldr(decoder_temp_reg, LocationToStackSlotAddress(decoder_location));
2201 decoder_reg = decoder_temp_reg;
2202 } else {
2203 decoder_reg = decoder_location.reg();
2204 }
2205 const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes();
2206 __ LoadFieldFromOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset);
2207 __ orr(flags_temp_reg, flags_temp_reg, compiler::Operand(flags_reg));
2208 __ StoreFieldToOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset);
2209}
2210
2211LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
2212 bool opt) const {
2213 // The compiler must optimize any function that includes a LoadIndexed
2214 // instruction that uses typed data cids, since extracting the payload address
2215 // from views is done in a compiler pass after all code motion has happened.
2217
2218 auto const rep =
2220 const bool directly_addressable = aligned() && rep != kUnboxedInt64;
2221 const intptr_t kNumInputs = 2;
2222 intptr_t kNumTemps = 0;
2223 if (!directly_addressable) {
2224 kNumTemps += 1;
2225 if (rep == kUnboxedFloat || rep == kUnboxedDouble) {
2226 kNumTemps += 1;
2227 }
2228 }
2229 LocationSummary* locs = new (zone)
2230 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2231 locs->set_in(kArrayPos, Location::RequiresRegister());
2232 bool needs_base;
2233 const bool can_be_constant =
2234 index()->BindsToConstant() &&
2236 index()->BoundConstant(), /*load=*/true, IsUntagged(), class_id(),
2237 index_scale(), &needs_base);
2238 // We don't need to check if [needs_base] is true, since we use TMP as the
2239 // temp register in this case and so don't need to allocate a temp register.
2240 locs->set_in(kIndexPos,
2241 can_be_constant
2242 ? Location::Constant(index()->definition()->AsConstant())
2243 : Location::RequiresRegister());
2245 if (rep == kUnboxedInt64) {
2246 locs->set_out(0, Location::Pair(Location::RequiresRegister(),
2248 } else {
2249 locs->set_out(0, Location::RequiresRegister());
2250 }
2251 } else if (RepresentationUtils::IsUnboxed(rep)) {
2252 if (rep == kUnboxedFloat) {
2253 // Need register < Q7 for float operations.
2254 // TODO(30953): Support register range constraints in the regalloc.
2255 locs->set_out(0, Location::FpuRegisterLocation(Q6));
2256 } else {
2257 locs->set_out(0, Location::RequiresFpuRegister());
2258 }
2259 } else {
2260 locs->set_out(0, Location::RequiresRegister());
2261 }
2262 if (!directly_addressable) {
2263 locs->set_temp(0, Location::RequiresRegister());
2264 if (rep == kUnboxedFloat || rep == kUnboxedDouble) {
2265 locs->set_temp(1, Location::RequiresRegister());
2266 }
2267 }
2268 return locs;
2269}
2270
2271void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2272 auto const rep =
2275 const bool directly_addressable = aligned() && rep != kUnboxedInt64;
2276 // The array register points to the backing store for external arrays.
2277 const Register array = locs()->in(kArrayPos).reg();
2278 const Location index = locs()->in(kIndexPos);
2279 const Register address =
2280 directly_addressable ? kNoRegister : locs()->temp(0).reg();
2281
2282 compiler::Address element_address(kNoRegister);
2283 if (directly_addressable) {
2284 element_address =
2285 index.IsRegister()
2286 ? __ ElementAddressForRegIndex(true, // Load.
2287 IsUntagged(), class_id(),
2288 index_scale(), index_unboxed_, array,
2289 index.reg())
2290 : __ ElementAddressForIntIndex(
2291 true, // Load.
2293 compiler::target::SmiValue(index.constant()),
2294 IP); // Temp register.
2295 // Warning: element_address may use register IP as base.
2296 } else {
2297 if (index.IsRegister()) {
2298 __ LoadElementAddressForRegIndex(address,
2299 true, // Load.
2301 index_unboxed_, array, index.reg());
2302 } else {
2303 __ LoadElementAddressForIntIndex(
2304 address,
2305 true, // Load.
2307 compiler::target::SmiValue(index.constant()));
2308 }
2309 }
2310
2312 if (rep == kUnboxedInt64) {
2313 ASSERT(!directly_addressable); // need to add to register
2314 ASSERT(locs()->out(0).IsPairLocation());
2315 PairLocation* result_pair = locs()->out(0).AsPairLocation();
2316 const Register result_lo = result_pair->At(0).reg();
2317 const Register result_hi = result_pair->At(1).reg();
2318 if (aligned()) {
2319 __ ldr(result_lo, compiler::Address(address));
2320 __ ldr(result_hi,
2321 compiler::Address(address, compiler::target::kWordSize));
2322 } else {
2323 __ LoadWordUnaligned(result_lo, address, TMP);
2324 __ AddImmediate(address, address, compiler::target::kWordSize);
2325 __ LoadWordUnaligned(result_hi, address, TMP);
2326 }
2327 } else {
2328 const Register result = locs()->out(0).reg();
2329 if (aligned()) {
2330 __ Load(result, element_address, RepresentationUtils::OperandSize(rep));
2331 } else {
2332 switch (rep) {
2333 case kUnboxedUint32:
2334 case kUnboxedInt32:
2335 __ LoadWordUnaligned(result, address, TMP);
2336 break;
2337 case kUnboxedUint16:
2338 __ LoadHalfWordUnsignedUnaligned(result, address, TMP);
2339 break;
2340 case kUnboxedInt16:
2341 __ LoadHalfWordUnaligned(result, address, TMP);
2342 break;
2343 default:
2344 UNREACHABLE();
2345 break;
2346 }
2347 }
2348 }
2349 } else if (RepresentationUtils::IsUnboxed(rep)) {
2350 const QRegister result = locs()->out(0).fpu_reg();
2351 const DRegister dresult0 = EvenDRegisterOf(result);
2352 if (rep == kUnboxedFloat) {
2353 // Load single precision float.
2354 // vldrs does not support indexed addressing.
2355 if (aligned()) {
2356 __ vldrs(EvenSRegisterOf(dresult0), element_address);
2357 } else {
2358 const Register value = locs()->temp(1).reg();
2359 __ LoadWordUnaligned(value, address, TMP);
2360 __ vmovsr(EvenSRegisterOf(dresult0), value);
2361 }
2362 } else if (rep == kUnboxedDouble) {
2363 // vldrd does not support indexed addressing.
2364 if (aligned()) {
2365 __ vldrd(dresult0, element_address);
2366 } else {
2367 const Register value = locs()->temp(1).reg();
2368 __ LoadWordUnaligned(value, address, TMP);
2369 __ vmovdr(dresult0, 0, value);
2370 __ AddImmediate(address, address, 4);
2371 __ LoadWordUnaligned(value, address, TMP);
2372 __ vmovdr(dresult0, 1, value);
2373 }
2374 } else {
2375 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2376 rep == kUnboxedFloat64x2);
2377 ASSERT(element_address.Equals(compiler::Address(IP)));
2378 ASSERT(aligned());
2379 __ vldmd(IA, IP, dresult0, 2);
2380 }
2381 } else {
2382 const Register result = locs()->out(0).reg();
2383 ASSERT(rep == kTagged);
2384 ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
2385 (class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
2386 __ ldr(result, element_address);
2387 }
2388}
2389
2390LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
2391 bool opt) const {
2392 // The compiler must optimize any function that includes a StoreIndexed
2393 // instruction that uses typed data cids, since extracting the payload address
2394 // from views is done in a compiler pass after all code motion has happened.
2396
2397 auto const rep =
2399 const bool directly_addressable =
2400 aligned() && rep != kUnboxedInt64 && class_id() != kArrayCid;
2401 const intptr_t kNumInputs = 3;
2402 LocationSummary* locs;
2403
2404 intptr_t kNumTemps = 0;
2405 bool needs_base = false;
2406 const bool can_be_constant =
2407 index()->BindsToConstant() &&
2409 index()->BoundConstant(), /*load=*/false, IsUntagged(), class_id(),
2410 index_scale(), &needs_base);
2411 if (can_be_constant) {
2412 if (!directly_addressable) {
2413 kNumTemps += 2;
2414 } else if (needs_base) {
2415 kNumTemps += 1;
2416 }
2417
2418 locs = new (zone)
2419 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2420
2421 locs->set_in(1, Location::Constant(index()->definition()->AsConstant()));
2422 } else {
2423 if (!directly_addressable) {
2424 kNumTemps += 2;
2425 }
2426
2427 locs = new (zone)
2428 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2429
2430 locs->set_in(1, Location::WritableRegister());
2431 }
2432 locs->set_in(0, Location::RequiresRegister());
2433 for (intptr_t i = 0; i < kNumTemps; i++) {
2434 locs->set_temp(i, Location::RequiresRegister());
2435 }
2436
2438 if (rep == kUnboxedInt64) {
2441 } else if (rep == kUnboxedInt8 || rep == kUnboxedUint8) {
2442 locs->set_in(2, LocationRegisterOrConstant(value()));
2443 } else {
2444 locs->set_in(2, Location::RequiresRegister());
2445 }
2446 } else if (RepresentationUtils::IsUnboxed(rep)) {
2447 if (rep == kUnboxedFloat) {
2448 // Need low register (< Q7).
2449 locs->set_in(2, Location::FpuRegisterLocation(Q6));
2450 } else { // TODO(srdjan): Support Float64 constants.
2451 locs->set_in(2, Location::RequiresFpuRegister());
2452 }
2453 } else if (class_id() == kArrayCid) {
2454 locs->set_in(2, ShouldEmitStoreBarrier()
2457 if (ShouldEmitStoreBarrier()) {
2460 }
2461 }
2462
2463 return locs;
2464}
2465
2466void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2467 auto const rep =
2470 const bool directly_addressable =
2471 aligned() && rep != kUnboxedInt64 && class_id() != kArrayCid;
2472
2473 // The array register points to the backing store for external arrays.
2474 const Register array = locs()->in(0).reg();
2475 const Location index = locs()->in(1);
2476 const Register temp =
2477 (locs()->temp_count() > 0) ? locs()->temp(0).reg() : kNoRegister;
2478 const Register temp2 =
2479 (locs()->temp_count() > 1) ? locs()->temp(1).reg() : kNoRegister;
2480
2481 compiler::Address element_address(kNoRegister);
2482 if (directly_addressable) {
2483 element_address =
2484 index.IsRegister()
2485 ? __ ElementAddressForRegIndex(false, // Store.
2486 IsUntagged(), class_id(),
2487 index_scale(), index_unboxed_, array,
2488 index.reg())
2489 : __ ElementAddressForIntIndex(
2490 false, // Store.
2492 compiler::target::SmiValue(index.constant()), temp);
2493 } else {
2494 if (index.IsRegister()) {
2495 __ LoadElementAddressForRegIndex(temp,
2496 false, // Store.
2498 index_unboxed_, array, index.reg());
2499 } else {
2500 __ LoadElementAddressForIntIndex(
2501 temp,
2502 false, // Store.
2504 compiler::target::SmiValue(index.constant()));
2505 }
2506 }
2507
2509 ASSERT(rep == kUnboxedUint8);
2510 if (locs()->in(2).IsConstant()) {
2511 intptr_t value = compiler::target::SmiValue(locs()->in(2).constant());
2512 // Clamp to 0x0 or 0xFF respectively.
2513 if (value > 0xFF) {
2514 value = 0xFF;
2515 } else if (value < 0) {
2516 value = 0;
2517 }
2518 __ LoadImmediate(IP, static_cast<int8_t>(value));
2519 __ strb(IP, element_address);
2520 } else {
2521 const Register value = locs()->in(2).reg();
2522 // Clamp to 0x00 or 0xFF respectively.
2523 __ LoadImmediate(IP, 0xFF);
2524 // Compare Smi value and smi 0xFF.
2525 __ cmp(value, compiler::Operand(IP));
2526 // IP = value <= 0xFF ? 0 : 0xFF.
2527 __ mov(IP, compiler::Operand(0), LE);
2528 // IP = value in range ? value : IP.
2529 __ mov(IP, compiler::Operand(value), LS);
2530 __ strb(IP, element_address);
2531 }
2532 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
2533 if (rep == kUnboxedInt64) {
2534 ASSERT(!directly_addressable); // need to add to register
2535 ASSERT(locs()->in(2).IsPairLocation());
2536 PairLocation* value_pair = locs()->in(2).AsPairLocation();
2537 Register value_lo = value_pair->At(0).reg();
2538 Register value_hi = value_pair->At(1).reg();
2539 if (aligned()) {
2540 __ str(value_lo, compiler::Address(temp));
2541 __ str(value_hi, compiler::Address(temp, compiler::target::kWordSize));
2542 } else {
2543 __ StoreWordUnaligned(value_lo, temp, temp2);
2544 __ AddImmediate(temp, temp, compiler::target::kWordSize);
2545 __ StoreWordUnaligned(value_hi, temp, temp2);
2546 }
2547 } else if (rep == kUnboxedInt8 || rep == kUnboxedUint8) {
2548 if (locs()->in(2).IsConstant()) {
2549 __ LoadImmediate(IP,
2550 compiler::target::SmiValue(locs()->in(2).constant()));
2551 __ strb(IP, element_address);
2552 } else {
2553 const Register value = locs()->in(2).reg();
2554 __ strb(value, element_address);
2555 }
2556 } else {
2557 const Register value = locs()->in(2).reg();
2558 if (aligned()) {
2559 __ Store(value, element_address, RepresentationUtils::OperandSize(rep));
2560 } else {
2561 switch (rep) {
2562 case kUnboxedUint32:
2563 case kUnboxedInt32:
2564 __ StoreWordUnaligned(value, temp, temp2);
2565 break;
2566 case kUnboxedUint16:
2567 case kUnboxedInt16:
2568 __ StoreHalfWordUnaligned(value, temp, temp2);
2569 break;
2570 default:
2571 UNREACHABLE();
2572 break;
2573 }
2574 }
2575 }
2576 } else if (RepresentationUtils::IsUnboxed(rep)) {
2577 if (rep == kUnboxedFloat) {
2578 const SRegister value_reg =
2579 EvenSRegisterOf(EvenDRegisterOf(locs()->in(2).fpu_reg()));
2580 if (aligned()) {
2581 __ vstrs(value_reg, element_address);
2582 } else {
2583 const Register address = temp;
2584 const Register value = temp2;
2585 __ vmovrs(value, value_reg);
2586 __ StoreWordUnaligned(value, address, TMP);
2587 }
2588 } else if (rep == kUnboxedDouble) {
2589 const DRegister value_reg = EvenDRegisterOf(locs()->in(2).fpu_reg());
2590 if (aligned()) {
2591 __ vstrd(value_reg, element_address);
2592 } else {
2593 const Register address = temp;
2594 const Register value = temp2;
2595 __ vmovrs(value, EvenSRegisterOf(value_reg));
2596 __ StoreWordUnaligned(value, address, TMP);
2597 __ AddImmediate(address, address, 4);
2598 __ vmovrs(value, OddSRegisterOf(value_reg));
2599 __ StoreWordUnaligned(value, address, TMP);
2600 }
2601 } else {
2602 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2603 rep == kUnboxedFloat64x2);
2604 ASSERT(element_address.Equals(compiler::Address(index.reg())));
2605 ASSERT(aligned());
2606 const DRegister value_reg = EvenDRegisterOf(locs()->in(2).fpu_reg());
2607 __ vstmd(IA, index.reg(), value_reg, 2);
2608 }
2609 } else if (class_id() == kArrayCid) {
2610 if (ShouldEmitStoreBarrier()) {
2611 const Register value = locs()->in(2).reg();
2612 __ StoreIntoArray(array, temp, value, CanValueBeSmi());
2613 } else if (locs()->in(2).IsConstant()) {
2614 const Object& constant = locs()->in(2).constant();
2615 __ StoreObjectIntoObjectNoBarrier(array, compiler::Address(temp),
2616 constant);
2617 } else {
2618 const Register value = locs()->in(2).reg();
2619 __ StoreIntoObjectNoBarrier(array, compiler::Address(temp), value);
2620 }
2621 }
2622}
2623
2624LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
2625 bool opt) const {
2626 const intptr_t kNumInputs = 1;
2627
2628 const intptr_t value_cid = value()->Type()->ToCid();
2629 const intptr_t field_cid = field().guarded_cid();
2630
2631 const bool emit_full_guard = !opt || (field_cid == kIllegalCid);
2632
2633 const bool needs_value_cid_temp_reg =
2634 emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid));
2635
2636 const bool needs_field_temp_reg = emit_full_guard;
2637
2638 intptr_t num_temps = 0;
2639 if (needs_value_cid_temp_reg) {
2640 num_temps++;
2641 }
2642 if (needs_field_temp_reg) {
2643 num_temps++;
2644 }
2645
2646 LocationSummary* summary = new (zone)
2647 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
2648 summary->set_in(0, Location::RequiresRegister());
2649
2650 for (intptr_t i = 0; i < num_temps; i++) {
2651 summary->set_temp(i, Location::RequiresRegister());
2652 }
2653
2654 return summary;
2655}
2656
2657void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2658 ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 20);
2659 ASSERT(sizeof(UntaggedField::guarded_cid_) == 4);
2660 ASSERT(sizeof(UntaggedField::is_nullable_) == 4);
2661
2662 const intptr_t value_cid = value()->Type()->ToCid();
2663 const intptr_t field_cid = field().guarded_cid();
2664 const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid;
2665
2666 if (field_cid == kDynamicCid) {
2667 return; // Nothing to emit.
2668 }
2669
2670 const bool emit_full_guard =
2671 !compiler->is_optimizing() || (field_cid == kIllegalCid);
2672
2673 const bool needs_value_cid_temp_reg =
2674 emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid));
2675
2676 const bool needs_field_temp_reg = emit_full_guard;
2677
2678 const Register value_reg = locs()->in(0).reg();
2679
2680 const Register value_cid_reg =
2681 needs_value_cid_temp_reg ? locs()->temp(0).reg() : kNoRegister;
2682
2683 const Register field_reg = needs_field_temp_reg
2684 ? locs()->temp(locs()->temp_count() - 1).reg()
2685 : kNoRegister;
2686
2687 compiler::Label ok, fail_label;
2688
2689 compiler::Label* deopt =
2690 compiler->is_optimizing()
2691 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2692 : nullptr;
2693
2694 compiler::Label* fail = (deopt != nullptr) ? deopt : &fail_label;
2695
2696 if (emit_full_guard) {
2697 __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
2698
2699 compiler::FieldAddress field_cid_operand(
2700 field_reg, compiler::target::Field::guarded_cid_offset());
2701 compiler::FieldAddress field_nullability_operand(
2702 field_reg, compiler::target::Field::is_nullable_offset());
2703
2704 if (value_cid == kDynamicCid) {
2705 LoadValueCid(compiler, value_cid_reg, value_reg);
2706 __ ldr(IP, field_cid_operand);
2707 __ cmp(value_cid_reg, compiler::Operand(IP));
2708 __ b(&ok, EQ);
2709 __ ldr(IP, field_nullability_operand);
2710 __ cmp(value_cid_reg, compiler::Operand(IP));
2711 } else if (value_cid == kNullCid) {
2712 __ ldr(value_cid_reg, field_nullability_operand);
2713 __ CompareImmediate(value_cid_reg, value_cid);
2714 } else {
2715 __ ldr(value_cid_reg, field_cid_operand);
2716 __ CompareImmediate(value_cid_reg, value_cid);
2717 }
2718 __ b(&ok, EQ);
2719
2720 // Check if the tracked state of the guarded field can be initialized
2721 // inline. If the field needs length check we fall through to runtime
2722 // which is responsible for computing offset of the length field
2723 // based on the class id.
2724 // Length guard will be emitted separately when needed via GuardFieldLength
2725 // instruction after GuardFieldClass.
2726 if (!field().needs_length_check()) {
2727 // Uninitialized field can be handled inline. Check if the
2728 // field is still unitialized.
2729 __ ldr(IP, field_cid_operand);
2730 __ CompareImmediate(IP, kIllegalCid);
2731 __ b(fail, NE);
2732
2733 if (value_cid == kDynamicCid) {
2734 __ str(value_cid_reg, field_cid_operand);
2735 __ str(value_cid_reg, field_nullability_operand);
2736 } else {
2737 __ LoadImmediate(IP, value_cid);
2738 __ str(IP, field_cid_operand);
2739 __ str(IP, field_nullability_operand);
2740 }
2741
2742 __ b(&ok);
2743 }
2744
2745 if (deopt == nullptr) {
2746 __ Bind(fail);
2747
2748 __ ldr(IP, compiler::FieldAddress(
2749 field_reg, compiler::target::Field::guarded_cid_offset()));
2750 __ CompareImmediate(IP, kDynamicCid);
2751 __ b(&ok, EQ);
2752
2753 __ Push(field_reg);
2754 __ Push(value_reg);
2755 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2756 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2757 __ Drop(2); // Drop the field and the value.
2758 } else {
2759 __ b(fail);
2760 }
2761 } else {
2762 ASSERT(compiler->is_optimizing());
2763 ASSERT(deopt != nullptr);
2764
2765 // Field guard class has been initialized and is known.
2766 if (value_cid == kDynamicCid) {
2767 // Field's guarded class id is fixed by value's class id is not known.
2768 __ tst(value_reg, compiler::Operand(kSmiTagMask));
2769
2770 if (field_cid != kSmiCid) {
2771 __ b(fail, EQ);
2772 __ LoadClassId(value_cid_reg, value_reg);
2773 __ CompareImmediate(value_cid_reg, field_cid);
2774 }
2775
2776 if (field().is_nullable() && (field_cid != kNullCid)) {
2777 __ b(&ok, EQ);
2778 if (field_cid != kSmiCid) {
2779 __ CompareImmediate(value_cid_reg, kNullCid);
2780 } else {
2781 __ CompareObject(value_reg, Object::null_object());
2782 }
2783 }
2784 __ b(fail, NE);
2785 } else if (value_cid == field_cid) {
2786 // This would normally be caught by Canonicalize, but RemoveRedefinitions
2787 // may sometimes produce the situation after the last Canonicalize pass.
2788 } else {
2789 // Both value's and field's class id is known.
2790 ASSERT(value_cid != nullability);
2791 __ b(fail);
2792 }
2793 }
2794 __ Bind(&ok);
2795}
2796
2797LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone,
2798 bool opt) const {
2799 const intptr_t kNumInputs = 1;
2800 if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2801 const intptr_t kNumTemps = 3;
2802 LocationSummary* summary = new (zone)
2803 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2804 summary->set_in(0, Location::RequiresRegister());
2805 // We need temporaries for field object, length offset and expected length.
2806 summary->set_temp(0, Location::RequiresRegister());
2807 summary->set_temp(1, Location::RequiresRegister());
2808 summary->set_temp(2, Location::RequiresRegister());
2809 return summary;
2810 } else {
2811 // TODO(vegorov): can use TMP when length is small enough to fit into
2812 // immediate.
2813 const intptr_t kNumTemps = 1;
2814 LocationSummary* summary = new (zone)
2815 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2816 summary->set_in(0, Location::RequiresRegister());
2817 summary->set_temp(0, Location::RequiresRegister());
2818 return summary;
2819 }
2820 UNREACHABLE();
2821}
2822
2823void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2824 if (field().guarded_list_length() == Field::kNoFixedLength) {
2825 return; // Nothing to emit.
2826 }
2827
2828 compiler::Label* deopt =
2829 compiler->is_optimizing()
2830 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2831 : nullptr;
2832
2833 const Register value_reg = locs()->in(0).reg();
2834
2835 if (!compiler->is_optimizing() ||
2836 (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2837 const Register field_reg = locs()->temp(0).reg();
2838 const Register offset_reg = locs()->temp(1).reg();
2839 const Register length_reg = locs()->temp(2).reg();
2840
2841 compiler::Label ok;
2842
2843 __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
2844
2845 __ ldrsb(offset_reg,
2846 compiler::FieldAddress(
2847 field_reg, compiler::target::Field::
2848 guarded_list_length_in_object_offset_offset()));
2849 __ ldr(
2850 length_reg,
2851 compiler::FieldAddress(
2852 field_reg, compiler::target::Field::guarded_list_length_offset()));
2853
2854 __ tst(offset_reg, compiler::Operand(offset_reg));
2855 __ b(&ok, MI);
2856
2857 // Load the length from the value. GuardFieldClass already verified that
2858 // value's class matches guarded class id of the field.
2859 // offset_reg contains offset already corrected by -kHeapObjectTag that is
2860 // why we use Address instead of FieldAddress.
2861 __ ldr(IP, compiler::Address(value_reg, offset_reg));
2862 __ cmp(length_reg, compiler::Operand(IP));
2863
2864 if (deopt == nullptr) {
2865 __ b(&ok, EQ);
2866
2867 __ Push(field_reg);
2868 __ Push(value_reg);
2869 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2870 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2871 __ Drop(2); // Drop the field and the value.
2872 } else {
2873 __ b(deopt, NE);
2874 }
2875
2876 __ Bind(&ok);
2877 } else {
2878 ASSERT(compiler->is_optimizing());
2879 ASSERT(field().guarded_list_length() >= 0);
2880 ASSERT(field().guarded_list_length_in_object_offset() !=
2882
2883 const Register length_reg = locs()->temp(0).reg();
2884
2885 __ ldr(length_reg,
2886 compiler::FieldAddress(
2887 value_reg, field().guarded_list_length_in_object_offset()));
2888 __ CompareImmediate(
2889 length_reg, compiler::target::ToRawSmi(field().guarded_list_length()));
2890 __ b(deopt, NE);
2891 }
2892}
2893
2894DEFINE_UNIMPLEMENTED_INSTRUCTION(GuardFieldTypeInstr)
2895
2896LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone,
2897 bool opt) const {
2898 const bool might_box = (representation() == kTagged) && !can_pack_into_smi();
2899 const intptr_t kNumInputs = 2;
2900 const intptr_t kNumTemps = might_box ? 2 : 0;
2901 LocationSummary* summary = new (zone) LocationSummary(
2902 zone, kNumInputs, kNumTemps,
2903 might_box ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
2904 summary->set_in(0, Location::RequiresRegister());
2905 summary->set_in(1, Location::RequiresRegister());
2906
2907 if (might_box) {
2908 summary->set_temp(0, Location::RequiresRegister());
2909 summary->set_temp(1, Location::RequiresRegister());
2910 }
2911
2912 if (representation() == kUnboxedInt64) {
2913 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
2915 } else {
2916 ASSERT(representation() == kTagged);
2917 summary->set_out(0, Location::RequiresRegister());
2918 }
2919
2920 return summary;
2921}
2922
2923void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2924 // The string register points to the backing store for external strings.
2925 const Register str = locs()->in(0).reg();
2926 const Location index = locs()->in(1);
2927
2928 compiler::Address element_address = __ ElementAddressForRegIndex(
2929 true, IsExternal(), class_id(), index_scale(), /*index_unboxed=*/false,
2930 str, index.reg());
2931 // Warning: element_address may use register IP as base.
2932
2933 if (representation() == kUnboxedInt64) {
2934 ASSERT(compiler->is_optimizing());
2935 ASSERT(locs()->out(0).IsPairLocation());
2936 PairLocation* result_pair = locs()->out(0).AsPairLocation();
2937 Register result1 = result_pair->At(0).reg();
2938 Register result2 = result_pair->At(1).reg();
2939 switch (class_id()) {
2940 case kOneByteStringCid:
2941 ASSERT(element_count() == 4);
2942 __ ldr(result1, element_address);
2943 __ eor(result2, result2, compiler::Operand(result2));
2944 break;
2945 case kTwoByteStringCid:
2946 ASSERT(element_count() == 2);
2947 __ ldr(result1, element_address);
2948 __ eor(result2, result2, compiler::Operand(result2));
2949 break;
2950 default:
2951 UNREACHABLE();
2952 }
2953 } else {
2954 ASSERT(representation() == kTagged);
2955 Register result = locs()->out(0).reg();
2956 switch (class_id()) {
2957 case kOneByteStringCid:
2958 switch (element_count()) {
2959 case 1:
2960 __ ldrb(result, element_address);
2961 break;
2962 case 2:
2963 __ ldrh(result, element_address);
2964 break;
2965 case 4:
2966 __ ldr(result, element_address);
2967 break;
2968 default:
2969 UNREACHABLE();
2970 }
2971 break;
2972 case kTwoByteStringCid:
2973 switch (element_count()) {
2974 case 1:
2975 __ ldrh(result, element_address);
2976 break;
2977 case 2:
2978 __ ldr(result, element_address);
2979 break;
2980 default:
2981 UNREACHABLE();
2982 }
2983 break;
2984 default:
2985 UNREACHABLE();
2986 break;
2987 }
2988 if (can_pack_into_smi()) {
2989 __ SmiTag(result);
2990 } else {
2991 // If the value cannot fit in a smi then allocate a mint box for it.
2992 Register value = locs()->temp(0).reg();
2993 Register temp = locs()->temp(1).reg();
2994 // Value register needs to be manually preserved on allocation slow-path.
2995 locs()->live_registers()->Add(locs()->temp(0), kUnboxedInt32);
2996
2997 ASSERT(result != value);
2998 __ MoveRegister(value, result);
2999 __ SmiTag(result);
3000
3001 compiler::Label done;
3002 __ TestImmediate(value, 0xC0000000);
3003 __ b(&done, EQ);
3005 result, temp);
3006 __ eor(temp, temp, compiler::Operand(temp));
3007 __ StoreFieldToOffset(value, result,
3008 compiler::target::Mint::value_offset());
3009 __ StoreFieldToOffset(
3010 temp, result,
3011 compiler::target::Mint::value_offset() + compiler::target::kWordSize);
3012 __ Bind(&done);
3013 }
3014 }
3015}
3016
3017LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
3018 bool opt) const {
3019 const intptr_t kNumInputs = 1;
3020 const intptr_t kNumTemps = 1;
3021 LocationSummary* locs = new (zone)
3022 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3023 locs->set_in(0, Location::RequiresRegister());
3024 locs->set_temp(0, Location::RequiresRegister());
3025 return locs;
3026}
3027
3028void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3029 const Register value = locs()->in(0).reg();
3030 const Register temp = locs()->temp(0).reg();
3031
3032 compiler->used_static_fields().Add(&field());
3033
3034 __ LoadFromOffset(temp, THR,
3035 compiler::target::Thread::field_table_values_offset());
3036 // Note: static fields ids won't be changed by hot-reload.
3037 __ StoreToOffset(value, temp,
3038 compiler::target::FieldTable::OffsetOf(field()));
3039}
3040
3041LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone,
3042 bool opt) const {
3043 const intptr_t kNumInputs = 3;
3044 const intptr_t kNumTemps = 0;
3045 LocationSummary* summary = new (zone)
3046 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3048 summary->set_in(1, Location::RegisterLocation(
3050 summary->set_in(
3052 summary->set_out(0, Location::RegisterLocation(R0));
3053 return summary;
3054}
3055
3056void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3057 ASSERT(locs()->in(0).reg() == TypeTestABI::kInstanceReg);
3058 ASSERT(locs()->in(1).reg() == TypeTestABI::kInstantiatorTypeArgumentsReg);
3059 ASSERT(locs()->in(2).reg() == TypeTestABI::kFunctionTypeArgumentsReg);
3060
3061 compiler->GenerateInstanceOf(source(), deopt_id(), env(), type(), locs());
3062 ASSERT(locs()->out(0).reg() == R0);
3063}
3064
3065LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone,
3066 bool opt) const {
3067 const intptr_t kNumInputs = 2;
3068 const intptr_t kNumTemps = 0;
3069 LocationSummary* locs = new (zone)
3070 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3076 return locs;
3077}
3078
3079// Inlines array allocation for known constant values.
3080static void InlineArrayAllocation(FlowGraphCompiler* compiler,
3081 intptr_t num_elements,
3082 compiler::Label* slow_path,
3083 compiler::Label* done) {
3084 const int kInlineArraySize = 12; // Same as kInlineInstanceSize.
3085 const intptr_t instance_size = Array::InstanceSize(num_elements);
3086
3087 __ TryAllocateArray(kArrayCid, instance_size, slow_path,
3088 AllocateArrayABI::kResultReg, // instance
3089 R3, // end address
3090 R8, R6);
3091 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
3092 // R3: new object end address.
3093
3094 // Store the type argument field.
3095 __ StoreIntoObjectNoBarrier(
3097 compiler::FieldAddress(AllocateArrayABI::kResultReg,
3098 compiler::target::Array::type_arguments_offset()),
3100
3101 // Set the length field.
3102 __ StoreIntoObjectNoBarrier(
3104 compiler::FieldAddress(AllocateArrayABI::kResultReg,
3105 compiler::target::Array::length_offset()),
3107
3108 // Initialize all array elements to raw_null.
3109 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
3110 // R3: new object end address.
3111 // R6: iterator which initially points to the start of the variable
3112 // data area to be initialized.
3113 // R8: null
3114 if (num_elements > 0) {
3115 const intptr_t array_size = instance_size - sizeof(UntaggedArray);
3116 __ LoadObject(R8, Object::null_object());
3117 if (num_elements >= 2) {
3118 __ mov(R9, compiler::Operand(R8));
3119 } else {
3120#if defined(DEBUG)
3121 // Clobber R9 with an invalid pointer.
3122 __ LoadImmediate(R9, 0x1);
3123#endif // DEBUG
3124 }
3125 __ AddImmediate(R6, AllocateArrayABI::kResultReg,
3126 sizeof(UntaggedArray) - kHeapObjectTag);
3127 if (array_size < (kInlineArraySize * compiler::target::kWordSize)) {
3128 __ InitializeFieldsNoBarrierUnrolled(
3130 num_elements * compiler::target::kWordSize, R8, R9);
3131 } else {
3132 __ InitializeFieldsNoBarrier(AllocateArrayABI::kResultReg, R6, R3, R8,
3133 R9);
3134 }
3135 }
3136 __ b(done);
3137}
3138
3139void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3140 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
3141 if (type_usage_info != nullptr) {
3142 const Class& list_class =
3143 Class::Handle(compiler->isolate_group()->class_table()->At(kArrayCid));
3144 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, list_class,
3145 type_arguments()->definition());
3146 }
3147
3148 compiler::Label slow_path, done;
3149 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
3150 if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
3151 num_elements()->BindsToConstant() &&
3152 compiler::target::IsSmi(num_elements()->BoundConstant())) {
3153 const intptr_t length =
3154 compiler::target::SmiValue(num_elements()->BoundConstant());
3156 InlineArrayAllocation(compiler, length, &slow_path, &done);
3157 }
3158 }
3159 }
3160
3161 __ Bind(&slow_path);
3162 auto object_store = compiler->isolate_group()->object_store();
3163 const auto& allocate_array_stub =
3164 Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
3165 compiler->GenerateStubCall(source(), allocate_array_stub,
3166 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
3167 env());
3168 __ Bind(&done);
3169}
3170
3172 Zone* zone,
3173 bool opt) const {
3174 ASSERT(opt);
3175 const intptr_t kNumInputs = 0;
3176 const intptr_t kNumTemps = 3;
3177 LocationSummary* locs = new (zone) LocationSummary(
3178 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
3183 return locs;
3184}
3185
3186class AllocateContextSlowPath
3187 : public TemplateSlowPathCode<AllocateUninitializedContextInstr> {
3188 public:
3189 explicit AllocateContextSlowPath(
3190 AllocateUninitializedContextInstr* instruction)
3191 : TemplateSlowPathCode(instruction) {}
3192
3193 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
3194 __ Comment("AllocateContextSlowPath");
3195 __ Bind(entry_label());
3196
3197 LocationSummary* locs = instruction()->locs();
3198 locs->live_registers()->Remove(locs->out(0));
3199
3200 compiler->SaveLiveRegisters(locs);
3201
3202 auto slow_path_env = compiler->SlowPathEnvironmentFor(
3203 instruction(), /*num_slow_path_args=*/0);
3204 ASSERT(slow_path_env != nullptr);
3205
3206 auto object_store = compiler->isolate_group()->object_store();
3207 const auto& allocate_context_stub = Code::ZoneHandle(
3208 compiler->zone(), object_store->allocate_context_stub());
3209 __ LoadImmediate(R1, instruction()->num_context_variables());
3210 compiler->GenerateStubCall(instruction()->source(), allocate_context_stub,
3211 UntaggedPcDescriptors::kOther, locs,
3212 instruction()->deopt_id(), slow_path_env);
3213 ASSERT(instruction()->locs()->out(0).reg() == R0);
3214 compiler->RestoreLiveRegisters(instruction()->locs());
3215 __ b(exit_label());
3216 }
3217};
3218
3220 FlowGraphCompiler* compiler) {
3221 Register temp0 = locs()->temp(0).reg();
3222 Register temp1 = locs()->temp(1).reg();
3223 Register temp2 = locs()->temp(2).reg();
3224 Register result = locs()->out(0).reg();
3225 // Try allocate the object.
3226 AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this);
3227 compiler->AddSlowPathCode(slow_path);
3228 intptr_t instance_size = Context::InstanceSize(num_context_variables());
3229
3230 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
3231 __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
3232 result, // instance
3233 temp0, temp1, temp2);
3234
3235 // Setup up number of context variables field.
3236 __ LoadImmediate(temp0, num_context_variables());
3237 __ str(temp0,
3238 compiler::FieldAddress(
3239 result, compiler::target::Context::num_variables_offset()));
3240 } else {
3241 __ Jump(slow_path->entry_label());
3242 }
3243
3244 __ Bind(slow_path->exit_label());
3245}
3246
3247LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone,
3248 bool opt) const {
3249 const intptr_t kNumInputs = 0;
3250 const intptr_t kNumTemps = 1;
3251 LocationSummary* locs = new (zone)
3252 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3255 return locs;
3256}
3257
3258void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3259 ASSERT(locs()->temp(0).reg() == R1);
3260 ASSERT(locs()->out(0).reg() == R0);
3261
3262 auto object_store = compiler->isolate_group()->object_store();
3263 const auto& allocate_context_stub =
3264 Code::ZoneHandle(compiler->zone(), object_store->allocate_context_stub());
3265 __ LoadImmediate(R1, num_context_variables());
3266 compiler->GenerateStubCall(source(), allocate_context_stub,
3267 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
3268 env());
3269}
3270
3271LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
3272 bool opt) const {
3273 const intptr_t kNumInputs = 1;
3274 const intptr_t kNumTemps = 0;
3275 LocationSummary* locs = new (zone)
3276 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3277 locs->set_in(0, Location::RegisterLocation(R4));
3278 locs->set_out(0, Location::RegisterLocation(R0));
3279 return locs;
3280}
3281
3282void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3283 ASSERT(locs()->in(0).reg() == R4);
3284 ASSERT(locs()->out(0).reg() == R0);
3285
3286 auto object_store = compiler->isolate_group()->object_store();
3287 const auto& clone_context_stub =
3288 Code::ZoneHandle(compiler->zone(), object_store->clone_context_stub());
3289 compiler->GenerateStubCall(source(), clone_context_stub,
3290 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
3291 deopt_id(), env());
3292}
3293
3294LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
3295 bool opt) const {
3296 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall);
3297}
3298
3299void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3300 __ Bind(compiler->GetJumpLabel(this));
3301 compiler->AddExceptionHandler(this);
3302 if (HasParallelMove()) {
3303 parallel_move()->EmitNativeCode(compiler);
3304 }
3305
3306 // Restore SP from FP as we are coming from a throw and the code for
3307 // popping arguments has not been run.
3308 const intptr_t fp_sp_dist =
3309 (compiler::target::frame_layout.first_local_from_fp + 1 -
3310 compiler->StackSize()) *
3311 compiler::target::kWordSize;
3312 ASSERT(fp_sp_dist <= 0);
3313 __ AddImmediate(SP, FP, fp_sp_dist);
3314
3315 if (!compiler->is_optimizing()) {
3316 if (raw_exception_var_ != nullptr) {
3317 __ StoreToOffset(
3319 compiler::target::FrameOffsetInBytesForVariable(raw_exception_var_));
3320 }
3321 if (raw_stacktrace_var_ != nullptr) {
3322 __ StoreToOffset(
3324 compiler::target::FrameOffsetInBytesForVariable(raw_stacktrace_var_));
3325 }
3326 }
3327}
3328
3329LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone,
3330 bool opt) const {
3331 const intptr_t kNumInputs = 0;
3332 const intptr_t kNumTemps = 2;
3333 const bool using_shared_stub = UseSharedSlowPathStub(opt);
3334 LocationSummary* summary = new (zone)
3335 LocationSummary(zone, kNumInputs, kNumTemps,
3336 using_shared_stub ? LocationSummary::kCallOnSharedSlowPath
3337 : LocationSummary::kCallOnSlowPath);
3338 summary->set_temp(0, Location::RequiresRegister());
3339 summary->set_temp(1, Location::RequiresRegister());
3340 return summary;
3341}
3342
3343class CheckStackOverflowSlowPath
3344 : public TemplateSlowPathCode<CheckStackOverflowInstr> {
3345 public:
3346 static constexpr intptr_t kNumSlowPathArgs = 0;
3347
3348 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
3349 : TemplateSlowPathCode(instruction) {}
3350
3351 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
3352 if (compiler->isolate_group()->use_osr() && osr_entry_label()->IsLinked()) {
3353 const Register value = instruction()->locs()->temp(0).reg();
3354 __ Comment("CheckStackOverflowSlowPathOsr");
3355 __ Bind(osr_entry_label());
3356 __ LoadImmediate(value, Thread::kOsrRequest);
3357 __ str(value,
3358 compiler::Address(
3359 THR, compiler::target::Thread::stack_overflow_flags_offset()));
3360 }
3361 __ Comment("CheckStackOverflowSlowPath");
3362 __ Bind(entry_label());
3363 const bool using_shared_stub =
3364 instruction()->locs()->call_on_shared_slow_path();
3365 if (!using_shared_stub) {
3366 compiler->SaveLiveRegisters(instruction()->locs());
3367 }
3368 // pending_deoptimization_env_ is needed to generate a runtime call that
3369 // may throw an exception.
3370 ASSERT(compiler->pending_deoptimization_env_ == nullptr);
3371 Environment* env =
3372 compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
3373 compiler->pending_deoptimization_env_ = env;
3374
3375 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
3376 if (using_shared_stub) {
3377 if (!has_frame) {
3378 ASSERT(__ constant_pool_allowed());
3379 __ set_constant_pool_allowed(false);
3380 __ EnterDartFrame(0);
3381 }
3382 const uword entry_point_offset = compiler::target::Thread::
3383 stack_overflow_shared_stub_entry_point_offset(
3384 instruction()->locs()->live_registers()->FpuRegisterCount() > 0);
3385 __ Call(compiler::Address(THR, entry_point_offset));
3386 compiler->RecordSafepoint(instruction()->locs(), kNumSlowPathArgs);
3387 compiler->RecordCatchEntryMoves(env);
3388 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOther,
3389 instruction()->deopt_id(),
3390 instruction()->source());
3391 if (!has_frame) {
3392 __ LeaveDartFrame();
3393 __ set_constant_pool_allowed(true);
3394 }
3395 } else {
3396 ASSERT(has_frame);
3397 __ CallRuntime(kInterruptOrStackOverflowRuntimeEntry, kNumSlowPathArgs);
3398 compiler->EmitCallsiteMetadata(
3399 instruction()->source(), instruction()->deopt_id(),
3400 UntaggedPcDescriptors::kOther, instruction()->locs(), env);
3401 }
3402
3403 if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
3404 instruction()->in_loop()) {
3405 // In unoptimized code, record loop stack checks as possible OSR entries.
3406 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
3407 instruction()->deopt_id(),
3408 InstructionSource());
3409 }
3410 compiler->pending_deoptimization_env_ = nullptr;
3411 if (!using_shared_stub) {
3412 compiler->RestoreLiveRegisters(instruction()->locs());
3413 }
3414 __ b(exit_label());
3415 }
3416
3417 compiler::Label* osr_entry_label() {
3418 ASSERT(IsolateGroup::Current()->use_osr());
3419 return &osr_entry_label_;
3420 }
3421
3422 private:
3423 compiler::Label osr_entry_label_;
3424};
3425
3426void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3427 __ ldr(IP, compiler::Address(THR,
3428 compiler::target::Thread::stack_limit_offset()));
3429 __ cmp(SP, compiler::Operand(IP));
3430
3431 auto object_store = compiler->isolate_group()->object_store();
3432 const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
3433 const auto& stub = Code::ZoneHandle(
3434 compiler->zone(),
3435 live_fpu_regs
3436 ? object_store->stack_overflow_stub_with_fpu_regs_stub()
3437 : object_store->stack_overflow_stub_without_fpu_regs_stub());
3438 const bool using_shared_stub = locs()->call_on_shared_slow_path();
3439 if (using_shared_stub && compiler->CanPcRelativeCall(stub) &&
3440 compiler->flow_graph().graph_entry()->NeedsFrame()) {
3441 __ GenerateUnRelocatedPcRelativeCall(LS);
3442 compiler->AddPcRelativeCallStubTarget(stub);
3443
3444 // We use the "extended" environment which has the locations updated to
3445 // reflect live registers being saved in the shared spilling stubs (see
3446 // the stub above).
3447 auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
3448 compiler->EmitCallsiteMetadata(source(), deopt_id(),
3449 UntaggedPcDescriptors::kOther, locs(),
3450 extended_env);
3451 return;
3452 }
3453
3454 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this);
3455 compiler->AddSlowPathCode(slow_path);
3456 __ b(slow_path->entry_label(), LS);
3457 if (compiler->CanOSRFunction() && in_loop()) {
3458 const Register function = locs()->temp(0).reg();
3459 const Register count = locs()->temp(1).reg();
3460 // In unoptimized code check the usage counter to trigger OSR at loop
3461 // stack checks. Use progressively higher thresholds for more deeply
3462 // nested loops to attempt to hit outer loops with OSR when possible.
3463 __ LoadObject(function, compiler->parsed_function().function());
3464 const intptr_t configured_optimization_counter_threshold =
3465 compiler->thread()->isolate_group()->optimization_counter_threshold();
3466 const int32_t threshold =
3467 configured_optimization_counter_threshold * (loop_depth() + 1);
3468 __ ldr(count,
3469 compiler::FieldAddress(
3470 function, compiler::target::Function::usage_counter_offset()));
3471 __ add(count, count, compiler::Operand(1));
3472 __ str(count,
3473 compiler::FieldAddress(
3474 function, compiler::target::Function::usage_counter_offset()));
3475 __ CompareImmediate(count, threshold);
3476 __ b(slow_path->osr_entry_label(), GE);
3477 }
3478 if (compiler->ForceSlowPathForStackOverflow()) {
3479 __ b(slow_path->entry_label());
3480 }
3481 __ Bind(slow_path->exit_label());
3482}
3483
3484static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
3485 BinarySmiOpInstr* shift_left) {
3486 const LocationSummary& locs = *shift_left->locs();
3487 const Register left = locs.in(0).reg();
3488 const Register result = locs.out(0).reg();
3489 compiler::Label* deopt =
3490 shift_left->CanDeoptimize()
3491 ? compiler->AddDeoptStub(shift_left->deopt_id(),
3492 ICData::kDeoptBinarySmiOp)
3493 : nullptr;
3494 if (locs.in(1).IsConstant()) {
3495 const Object& constant = locs.in(1).constant();
3497 // Immediate shift operation takes 5 bits for the count.
3498 const intptr_t kCountLimit = 0x1F;
3499 const intptr_t value = compiler::target::SmiValue(constant);
3500 ASSERT((0 < value) && (value < kCountLimit));
3501 if (shift_left->can_overflow()) {
3502 // Check for overflow (preserve left).
3503 __ Lsl(IP, left, compiler::Operand(value));
3504 __ cmp(left, compiler::Operand(IP, ASR, value));
3505 __ b(deopt, NE); // Overflow.
3506 }
3507 // Shift for result now we know there is no overflow.
3508 __ Lsl(result, left, compiler::Operand(value));
3509 return;
3510 }
3511
3512 // Right (locs.in(1)) is not constant.
3513 const Register right = locs.in(1).reg();
3514 Range* right_range = shift_left->right_range();
3515 if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
3516 // TODO(srdjan): Implement code below for is_truncating().
3517 // If left is constant, we know the maximal allowed size for right.
3518 const Object& obj = shift_left->left()->BoundConstant();
3519 if (compiler::target::IsSmi(obj)) {
3520 const intptr_t left_int = compiler::target::SmiValue(obj);
3521 if (left_int == 0) {
3522 __ cmp(right, compiler::Operand(0));
3523 __ b(deopt, MI);
3524 __ mov(result, compiler::Operand(0));
3525 return;
3526 }
3527 const intptr_t max_right =
3528 compiler::target::kSmiBits - Utils::HighestBit(left_int);
3529 const bool right_needs_check =
3530 !RangeUtils::IsWithin(right_range, 0, max_right - 1);
3531 if (right_needs_check) {
3532 __ cmp(right, compiler::Operand(compiler::target::ToRawSmi(max_right)));
3533 __ b(deopt, CS);
3534 }
3535 __ SmiUntag(IP, right);
3536 __ Lsl(result, left, IP);
3537 }
3538 return;
3539 }
3540
3541 const bool right_needs_check =
3542 !RangeUtils::IsWithin(right_range, 0, (compiler::target::kSmiBits - 1));
3543 if (!shift_left->can_overflow()) {
3544 if (right_needs_check) {
3545 if (!RangeUtils::IsPositive(right_range)) {
3546 ASSERT(shift_left->CanDeoptimize());
3547 __ cmp(right, compiler::Operand(0));
3548 __ b(deopt, MI);
3549 }
3550
3551 __ cmp(right, compiler::Operand(compiler::target::ToRawSmi(
3552 compiler::target::kSmiBits)));
3553 __ mov(result, compiler::Operand(0), CS);
3554 __ SmiUntag(IP, right, CC); // SmiUntag right into IP if CC.
3555 __ Lsl(result, left, IP, CC);
3556 } else {
3557 __ SmiUntag(IP, right);
3558 __ Lsl(result, left, IP);
3559 }
3560 } else {
3561 if (right_needs_check) {
3562 ASSERT(shift_left->CanDeoptimize());
3563 __ cmp(right, compiler::Operand(compiler::target::ToRawSmi(
3564 compiler::target::kSmiBits)));
3565 __ b(deopt, CS);
3566 }
3567 // Left is not a constant.
3568 // Check if count too large for handling it inlined.
3569 __ SmiUntag(IP, right);
3570 // Overflow test (preserve left, right, and IP);
3571 const Register temp = locs.temp(0).reg();
3572 __ Lsl(temp, left, IP);
3573 __ cmp(left, compiler::Operand(temp, ASR, IP));
3574 __ b(deopt, NE); // Overflow.
3575 // Shift for result now we know there is no overflow.
3576 __ Lsl(result, left, IP);
3577 }
3578}
3579
3580LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Zone* zone,
3581 bool opt) const {
3582 const intptr_t kNumInputs = 2;
3583 // Calculate number of temporaries.
3584 intptr_t num_temps = 0;
3585 if (op_kind() == Token::kTRUNCDIV) {
3587 num_temps = 1;
3588 } else {
3589 num_temps = 2;
3590 }
3591 } else if (op_kind() == Token::kMOD) {
3592 num_temps = 2;
3593 } else if (((op_kind() == Token::kSHL) && can_overflow()) ||
3594 (op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR)) {
3595 num_temps = 1;
3596 }
3597 LocationSummary* summary = new (zone)
3598 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
3599 if (op_kind() == Token::kTRUNCDIV) {
3600 summary->set_in(0, Location::RequiresRegister());
3602 ConstantInstr* right_constant = right()->definition()->AsConstant();
3603 summary->set_in(1, Location::Constant(right_constant));
3604 summary->set_temp(0, Location::RequiresRegister());
3605 } else {
3606 summary->set_in(1, Location::RequiresRegister());
3607 summary->set_temp(0, Location::RequiresRegister());
3608 // Request register that overlaps with S0..S31.
3609 summary->set_temp(1, Location::FpuRegisterLocation(Q0));
3610 }
3611 summary->set_out(0, Location::RequiresRegister());
3612 return summary;
3613 }
3614 if (op_kind() == Token::kMOD) {
3615 summary->set_in(0, Location::RequiresRegister());
3616 summary->set_in(1, Location::RequiresRegister());
3617 summary->set_temp(0, Location::RequiresRegister());
3618 // Request register that overlaps with S0..S31.
3619 summary->set_temp(1, Location::FpuRegisterLocation(Q0));
3620 summary->set_out(0, Location::RequiresRegister());
3621 return summary;
3622 }
3623 summary->set_in(0, Location::RequiresRegister());
3624 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
3625 if (((op_kind() == Token::kSHL) && can_overflow()) ||
3626 (op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR)) {
3627 summary->set_temp(0, Location::RequiresRegister());
3628 }
3629 // We make use of 3-operand instructions by not requiring result register
3630 // to be identical to first input register as on Intel.
3631 summary->set_out(0, Location::RequiresRegister());
3632 return summary;
3633}
3634
3635void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3636 if (op_kind() == Token::kSHL) {
3637 EmitSmiShiftLeft(compiler, this);
3638 return;
3639 }
3640
3641 const Register left = locs()->in(0).reg();
3642 const Register result = locs()->out(0).reg();
3643 compiler::Label* deopt = nullptr;
3644 if (CanDeoptimize()) {
3645 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
3646 }
3647
3648 if (locs()->in(1).IsConstant()) {
3649 const Object& constant = locs()->in(1).constant();
3651 const int32_t imm = compiler::target::ToRawSmi(constant);
3652 switch (op_kind()) {
3653 case Token::kADD: {
3654 if (deopt == nullptr) {
3655 __ AddImmediate(result, left, imm);
3656 } else {
3657 __ AddImmediateSetFlags(result, left, imm);
3658 __ b(deopt, VS);
3659 }
3660 break;
3661 }
3662 case Token::kSUB: {
3663 if (deopt == nullptr) {
3664 __ AddImmediate(result, left, -imm);
3665 } else {
3666 // Negating imm and using AddImmediateSetFlags would not detect the
3667 // overflow when imm == kMinInt32.
3668 __ SubImmediateSetFlags(result, left, imm);
3669 __ b(deopt, VS);
3670 }
3671 break;
3672 }
3673 case Token::kMUL: {
3674 // Keep left value tagged and untag right value.
3675 const intptr_t value = compiler::target::SmiValue(constant);
3676 if (deopt == nullptr) {
3677 __ LoadImmediate(IP, value);
3678 __ mul(result, left, IP);
3679 } else {
3680 __ LoadImmediate(IP, value);
3681 __ smull(result, IP, left, IP);
3682 // IP: result bits 32..63.
3683 __ cmp(IP, compiler::Operand(result, ASR, 31));
3684 __ b(deopt, NE);
3685 }
3686 break;
3687 }
3688 case Token::kTRUNCDIV: {
3689 const intptr_t value = compiler::target::SmiValue(constant);
3690 ASSERT(value != kIntptrMin);
3692 const intptr_t shift_count =
3694 ASSERT(kSmiTagSize == 1);
3695 __ mov(IP, compiler::Operand(left, ASR, 31));
3696 ASSERT(shift_count > 1); // 1, -1 case handled above.
3697 const Register temp = locs()->temp(0).reg();
3698 __ add(temp, left, compiler::Operand(IP, LSR, 32 - shift_count));
3699 ASSERT(shift_count > 0);
3700 __ mov(result, compiler::Operand(temp, ASR, shift_count));
3701 if (value < 0) {
3702 __ rsb(result, result, compiler::Operand(0));
3703 }
3704 __ SmiTag(result);
3705 break;
3706 }
3707 case Token::kBIT_AND: {
3708 // No overflow check.
3709 compiler::Operand o;
3710 if (compiler::Operand::CanHold(imm, &o)) {
3711 __ and_(result, left, o);
3712 } else if (compiler::Operand::CanHold(~imm, &o)) {
3713 __ bic(result, left, o);
3714 } else {
3715 __ LoadImmediate(IP, imm);
3716 __ and_(result, left, compiler::Operand(IP));
3717 }
3718 break;
3719 }
3720 case Token::kBIT_OR: {
3721 // No overflow check.
3722 compiler::Operand o;
3723 if (compiler::Operand::CanHold(imm, &o)) {
3724 __ orr(result, left, o);
3725 } else {
3726 __ LoadImmediate(IP, imm);
3727 __ orr(result, left, compiler::Operand(IP));
3728 }
3729 break;
3730 }
3731 case Token::kBIT_XOR: {
3732 // No overflow check.
3733 compiler::Operand o;
3734 if (compiler::Operand::CanHold(imm, &o)) {
3735 __ eor(result, left, o);
3736 } else {
3737 __ LoadImmediate(IP, imm);
3738 __ eor(result, left, compiler::Operand(IP));
3739 }
3740 break;
3741 }
3742 case Token::kSHR: {
3743 // sarl operation masks the count to 5 bits.
3744 const intptr_t kCountLimit = 0x1F;
3745 intptr_t value = compiler::target::SmiValue(constant);
3746 __ Asr(result, left,
3747 compiler::Operand(
3748 Utils::Minimum(value + kSmiTagSize, kCountLimit)));
3749 __ SmiTag(result);
3750 break;
3751 }
3752 case Token::kUSHR: {
3753 const intptr_t value = compiler::target::SmiValue(constant);
3754 ASSERT((value > 0) && (value < 64));
3755 COMPILE_ASSERT(compiler::target::kSmiBits < 32);
3756 // 64-bit representation of left operand value:
3757 //
3758 // ss...sssss s s xxxxxxxxxxxxx
3759 // | | | | | |
3760 // 63 32 31 30 kSmiBits-1 0
3761 //
3762 // Where 's' is a sign bit.
3763 //
3764 // If left operand is negative (sign bit is set), then
3765 // result will fit into Smi range if and only if
3766 // the shift amount >= 64 - kSmiBits.
3767 //
3768 // If left operand is non-negative, the result always
3769 // fits into Smi range.
3770 //
3771 if (value < (64 - compiler::target::kSmiBits)) {
3772 if (deopt != nullptr) {
3773 __ CompareImmediate(left, 0);
3774 __ b(deopt, LT);
3775 } else {
3776 // Operation cannot overflow only if left value is always
3777 // non-negative.
3778 ASSERT(!can_overflow());
3779 }
3780 // At this point left operand is non-negative, so unsigned shift
3781 // can't overflow.
3782 if (value >= compiler::target::kSmiBits) {
3783 __ LoadImmediate(result, 0);
3784 } else {
3785 __ Lsr(result, left, compiler::Operand(value + kSmiTagSize));
3786 __ SmiTag(result);
3787 }
3788 } else {
3789 // Shift amount > 32, and the result is guaranteed to fit into Smi.
3790 // Low (Smi) part of the left operand is shifted out.
3791 // High part is filled with sign bits.
3792 __ Asr(result, left, compiler::Operand(31));
3793 __ Lsr(result, result, compiler::Operand(value - 32));
3794 __ SmiTag(result);
3795 }
3796 break;
3797 }
3798
3799 default:
3800 UNREACHABLE();
3801 break;
3802 }
3803 return;
3804 }
3805
3806 const Register right = locs()->in(1).reg();
3807 switch (op_kind()) {
3808 case Token::kADD: {
3809 if (deopt == nullptr) {
3810 __ add(result, left, compiler::Operand(right));
3811 } else {
3812 __ adds(result, left, compiler::Operand(right));
3813 __ b(deopt, VS);
3814 }
3815 break;
3816 }
3817 case Token::kSUB: {
3818 if (deopt == nullptr) {
3819 __ sub(result, left, compiler::Operand(right));
3820 } else {
3821 __ subs(result, left, compiler::Operand(right));
3822 __ b(deopt, VS);
3823 }
3824 break;
3825 }
3826 case Token::kMUL: {
3827 __ SmiUntag(IP, left);
3828 if (deopt == nullptr) {
3829 __ mul(result, IP, right);
3830 } else {
3831 __ smull(result, IP, IP, right);
3832 // IP: result bits 32..63.
3833 __ cmp(IP, compiler::Operand(result, ASR, 31));
3834 __ b(deopt, NE);
3835 }
3836 break;
3837 }
3838 case Token::kBIT_AND: {
3839 // No overflow check.
3840 __ and_(result, left, compiler::Operand(right));
3841 break;
3842 }
3843 case Token::kBIT_OR: {
3844 // No overflow check.
3845 __ orr(result, left, compiler::Operand(right));
3846 break;
3847 }
3848 case Token::kBIT_XOR: {
3849 // No overflow check.
3850 __ eor(result, left, compiler::Operand(right));
3851 break;
3852 }
3853 case Token::kTRUNCDIV: {
3855 // Handle divide by zero in runtime.
3856 __ cmp(right, compiler::Operand(0));
3857 __ b(deopt, EQ);
3858 }
3859 const Register temp = locs()->temp(0).reg();
3860 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
3861 __ SmiUntag(temp, left);
3862 __ SmiUntag(IP, right);
3863 __ IntegerDivide(result, temp, IP, dtemp, DTMP);
3864
3865 if (RangeUtils::Overlaps(right_range(), -1, -1)) {
3866 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
3867 // case we cannot tag the result.
3868 __ CompareImmediate(result, 0x40000000);
3869 __ b(deopt, EQ);
3870 }
3871 __ SmiTag(result);
3872 break;
3873 }
3874 case Token::kMOD: {
3876 // Handle divide by zero in runtime.
3877 __ cmp(right, compiler::Operand(0));
3878 __ b(deopt, EQ);
3879 }
3880 const Register temp = locs()->temp(0).reg();
3881 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
3882 __ SmiUntag(temp, left);
3883 __ SmiUntag(IP, right);
3884 __ IntegerDivide(result, temp, IP, dtemp, DTMP);
3885 __ SmiUntag(IP, right);
3886 __ mls(result, IP, result, temp); // result <- left - right * result
3887 __ SmiTag(result);
3888 // res = left % right;
3889 // if (res < 0) {
3890 // if (right < 0) {
3891 // res = res - right;
3892 // } else {
3893 // res = res + right;
3894 // }
3895 // }
3896 compiler::Label done;
3897 __ cmp(result, compiler::Operand(0));
3898 __ b(&done, GE);
3899 // Result is negative, adjust it.
3900 __ cmp(right, compiler::Operand(0));
3901 __ sub(result, result, compiler::Operand(right), LT);
3902 __ add(result, result, compiler::Operand(right), GE);
3903 __ Bind(&done);
3904 break;
3905 }
3906 case Token::kSHR: {
3907 if (CanDeoptimize()) {
3908 __ CompareImmediate(right, 0);
3909 __ b(deopt, LT);
3910 }
3911 __ SmiUntag(IP, right);
3912 // sarl operation masks the count to 5 bits.
3913 const intptr_t kCountLimit = 0x1F;
3914 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
3915 __ CompareImmediate(IP, kCountLimit);
3916 __ LoadImmediate(IP, kCountLimit, GT);
3917 }
3918 const Register temp = locs()->temp(0).reg();
3919 __ SmiUntag(temp, left);
3920 __ Asr(result, temp, IP);
3921 __ SmiTag(result);
3922 break;
3923 }
3924 case Token::kUSHR: {
3925 compiler::Label done;
3926 __ SmiUntag(IP, right);
3927 // 64-bit representation of left operand value:
3928 //
3929 // ss...sssss s s xxxxxxxxxxxxx
3930 // | | | | | |
3931 // 63 32 31 30 kSmiBits-1 0
3932 //
3933 // Where 's' is a sign bit.
3934 //
3935 // If left operand is negative (sign bit is set), then
3936 // result will fit into Smi range if and only if
3937 // the shift amount >= 64 - kSmiBits.
3938 //
3939 // If left operand is non-negative, the result always
3940 // fits into Smi range.
3941 //
3943 right_range(), 64 - compiler::target::kSmiBits - 1)) {
3945 kBitsPerInt64 - 1)) {
3946 __ CompareImmediate(IP, kBitsPerInt64);
3947 // If shift amount >= 64, then result is 0.
3948 __ LoadImmediate(result, 0, GE);
3949 __ b(&done, GE);
3950 }
3951 __ CompareImmediate(IP, 64 - compiler::target::kSmiBits);
3952 // Shift amount >= 64 - kSmiBits > 32, but < 64.
3953 // Result is guaranteed to fit into Smi range.
3954 // Low (Smi) part of the left operand is shifted out.
3955 // High part is filled with sign bits.
3956 __ sub(IP, IP, compiler::Operand(32), GE);
3957 __ Asr(result, left, compiler::Operand(31), GE);
3958 __ Lsr(result, result, IP, GE);
3959 __ SmiTag(result, GE);
3960 __ b(&done, GE);
3961 }
3962 // Shift amount < 64 - kSmiBits.
3963 // If left is negative, then result will not fit into Smi range.
3964 // Also deopt in case of negative shift amount.
3965 if (deopt != nullptr) {
3966 __ tst(left, compiler::Operand(left));
3967 __ tst(right, compiler::Operand(right), PL);
3968 __ b(deopt, MI);
3969 } else {
3970 ASSERT(!can_overflow());
3971 }
3972 // At this point left operand is non-negative, so unsigned shift
3973 // can't overflow.
3975 compiler::target::kSmiBits - 1)) {
3976 __ CompareImmediate(IP, compiler::target::kSmiBits);
3977 // Left operand >= 0, shift amount >= kSmiBits. Result is 0.
3978 __ LoadImmediate(result, 0, GE);
3979 __ b(&done, GE);
3980 }
3981 // Left operand >= 0, shift amount < kSmiBits < 32.
3982 const Register temp = locs()->temp(0).reg();
3983 __ SmiUntag(temp, left);
3984 __ Lsr(result, temp, IP);
3985 __ SmiTag(result);
3986 __ Bind(&done);
3987 break;
3988 }
3989 case Token::kDIV: {
3990 // Dispatches to 'Double./'.
3991 // TODO(srdjan): Implement as conversion to double and double division.
3992 UNREACHABLE();
3993 break;
3994 }
3995 case Token::kOR:
3996 case Token::kAND: {
3997 // Flow graph builder has dissected this operation to guarantee correct
3998 // behavior (short-circuit evaluation).
3999 UNREACHABLE();
4000 break;
4001 }
4002 default:
4003 UNREACHABLE();
4004 break;
4005 }
4006}
4007
4008static void EmitInt32ShiftLeft(FlowGraphCompiler* compiler,
4009 BinaryInt32OpInstr* shift_left) {
4010 const LocationSummary& locs = *shift_left->locs();
4011 const Register left = locs.in(0).reg();
4012 const Register result = locs.out(0).reg();
4013 compiler::Label* deopt =
4014 shift_left->CanDeoptimize()
4015 ? compiler->AddDeoptStub(shift_left->deopt_id(),
4016 ICData::kDeoptBinarySmiOp)
4017 : nullptr;
4018 ASSERT(locs.in(1).IsConstant());
4019 const Object& constant = locs.in(1).constant();
4021 // Immediate shift operation takes 5 bits for the count.
4022 const intptr_t kCountLimit = 0x1F;
4023 const intptr_t value = compiler::target::SmiValue(constant);
4024 ASSERT((0 < value) && (value < kCountLimit));
4025 if (shift_left->can_overflow()) {
4026 // Check for overflow (preserve left).
4027 __ Lsl(IP, left, compiler::Operand(value));
4028 __ cmp(left, compiler::Operand(IP, ASR, value));
4029 __ b(deopt, NE); // Overflow.
4030 }
4031 // Shift for result now we know there is no overflow.
4032 __ Lsl(result, left, compiler::Operand(value));
4033}
4034
4035LocationSummary* BinaryInt32OpInstr::MakeLocationSummary(Zone* zone,
4036 bool opt) const {
4037 const intptr_t kNumInputs = 2;
4038 // Calculate number of temporaries.
4039 intptr_t num_temps = 0;
4040 if (((op_kind() == Token::kSHL) && can_overflow()) ||
4041 (op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR)) {
4042 num_temps = 1;
4043 }
4044 LocationSummary* summary = new (zone)
4045 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
4046 summary->set_in(0, Location::RequiresRegister());
4047 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
4048 if (((op_kind() == Token::kSHL) && can_overflow()) ||
4049 (op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR)) {
4050 summary->set_temp(0, Location::RequiresRegister());
4051 }
4052 // We make use of 3-operand instructions by not requiring result register
4053 // to be identical to first input register as on Intel.
4054 summary->set_out(0, Location::RequiresRegister());
4055 return summary;
4056}
4057
4058void BinaryInt32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4059 if (op_kind() == Token::kSHL) {
4060 EmitInt32ShiftLeft(compiler, this);
4061 return;
4062 }
4063
4064 const Register left = locs()->in(0).reg();
4065 const Register result = locs()->out(0).reg();
4066 compiler::Label* deopt = nullptr;
4067 if (CanDeoptimize()) {
4068 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
4069 }
4070
4071 if (locs()->in(1).IsConstant()) {
4072 const Object& constant = locs()->in(1).constant();
4074 const intptr_t value = compiler::target::SmiValue(constant);
4075 switch (op_kind()) {
4076 case Token::kADD: {
4077 if (deopt == nullptr) {
4078 __ AddImmediate(result, left, value);
4079 } else {
4080 __ AddImmediateSetFlags(result, left, value);
4081 __ b(deopt, VS);
4082 }
4083 break;
4084 }
4085 case Token::kSUB: {
4086 if (deopt == nullptr) {
4087 __ AddImmediate(result, left, -value);
4088 } else {
4089 // Negating value and using AddImmediateSetFlags would not detect the
4090 // overflow when value == kMinInt32.
4091 __ SubImmediateSetFlags(result, left, value);
4092 __ b(deopt, VS);
4093 }
4094 break;
4095 }
4096 case Token::kMUL: {
4097 if (deopt == nullptr) {
4098 __ LoadImmediate(IP, value);
4099 __ mul(result, left, IP);
4100 } else {
4101 __ LoadImmediate(IP, value);
4102 __ smull(result, IP, left, IP);
4103 // IP: result bits 32..63.
4104 __ cmp(IP, compiler::Operand(result, ASR, 31));
4105 __ b(deopt, NE);
4106 }
4107 break;
4108 }
4109 case Token::kBIT_AND: {
4110 // No overflow check.
4111 compiler::Operand o;
4112 if (compiler::Operand::CanHold(value, &o)) {
4113 __ and_(result, left, o);
4114 } else if (compiler::Operand::CanHold(~value, &o)) {
4115 __ bic(result, left, o);
4116 } else {
4117 __ LoadImmediate(IP, value);
4118 __ and_(result, left, compiler::Operand(IP));
4119 }
4120 break;
4121 }
4122 case Token::kBIT_OR: {
4123 // No overflow check.
4124 compiler::Operand o;
4125 if (compiler::Operand::CanHold(value, &o)) {
4126 __ orr(result, left, o);
4127 } else {
4128 __ LoadImmediate(IP, value);
4129 __ orr(result, left, compiler::Operand(IP));
4130 }
4131 break;
4132 }
4133 case Token::kBIT_XOR: {
4134 // No overflow check.
4135 compiler::Operand o;
4136 if (compiler::Operand::CanHold(value, &o)) {
4137 __ eor(result, left, o);
4138 } else {
4139 __ LoadImmediate(IP, value);
4140 __ eor(result, left, compiler::Operand(IP));
4141 }
4142 break;
4143 }
4144 case Token::kSHR: {
4145 // sarl operation masks the count to 5 bits.
4146 const intptr_t kCountLimit = 0x1F;
4147 __ Asr(result, left,
4148 compiler::Operand(Utils::Minimum(value, kCountLimit)));
4149 break;
4150 }
4151 case Token::kUSHR: {
4152 ASSERT((value > 0) && (value < 64));
4153 // 64-bit representation of left operand value:
4154 //
4155 // ss...sssss s xxxxxxxxxxxxx
4156 // | | | | |
4157 // 63 32 31 30 0
4158 //
4159 // Where 's' is a sign bit.
4160 //
4161 // If left operand is negative (sign bit is set), then
4162 // result will fit into Int32 range if and only if
4163 // the shift amount > 32.
4164 //
4165 if (value <= 32) {
4166 if (deopt != nullptr) {
4167 __ tst(left, compiler::Operand(left));
4168 __ b(deopt, MI);
4169 } else {
4170 // Operation cannot overflow only if left value is always
4171 // non-negative.
4172 ASSERT(!can_overflow());
4173 }
4174 // At this point left operand is non-negative, so unsigned shift
4175 // can't overflow.
4176 if (value == 32) {
4177 __ LoadImmediate(result, 0);
4178 } else {
4179 __ Lsr(result, left, compiler::Operand(value));
4180 }
4181 } else {
4182 // Shift amount > 32.
4183 // Low (Int32) part of the left operand is shifted out.
4184 // Shift high part which is filled with sign bits.
4185 __ Asr(result, left, compiler::Operand(31));
4186 __ Lsr(result, result, compiler::Operand(value - 32));
4187 }
4188 break;
4189 }
4190
4191 default:
4192 UNREACHABLE();
4193 break;
4194 }
4195 return;
4196 }
4197
4198 const Register right = locs()->in(1).reg();
4199 switch (op_kind()) {
4200 case Token::kADD: {
4201 if (deopt == nullptr) {
4202 __ add(result, left, compiler::Operand(right));
4203 } else {
4204 __ adds(result, left, compiler::Operand(right));
4205 __ b(deopt, VS);
4206 }
4207 break;
4208 }
4209 case Token::kSUB: {
4210 if (deopt == nullptr) {
4211 __ sub(result, left, compiler::Operand(right));
4212 } else {
4213 __ subs(result, left, compiler::Operand(right));
4214 __ b(deopt, VS);
4215 }
4216 break;
4217 }
4218 case Token::kMUL: {
4219 if (deopt == nullptr) {
4220 __ mul(result, left, right);
4221 } else {
4222 __ smull(result, IP, left, right);
4223 // IP: result bits 32..63.
4224 __ cmp(IP, compiler::Operand(result, ASR, 31));
4225 __ b(deopt, NE);
4226 }
4227 break;
4228 }
4229 case Token::kBIT_AND: {
4230 // No overflow check.
4231 __ and_(result, left, compiler::Operand(right));
4232 break;
4233 }
4234 case Token::kBIT_OR: {
4235 // No overflow check.
4236 __ orr(result, left, compiler::Operand(right));
4237 break;
4238 }
4239 case Token::kBIT_XOR: {
4240 // No overflow check.
4241 __ eor(result, left, compiler::Operand(right));
4242 break;
4243 }
4244 default:
4245 UNREACHABLE();
4246 break;
4247 }
4248}
4249
4250LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone,
4251 bool opt) const {
4252 intptr_t left_cid = left()->Type()->ToCid();
4253 intptr_t right_cid = right()->Type()->ToCid();
4254 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid));
4255 const intptr_t kNumInputs = 2;
4256 const intptr_t kNumTemps = 0;
4257 LocationSummary* summary = new (zone)
4258 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4259 summary->set_in(0, Location::RequiresRegister());
4260 summary->set_in(1, Location::RequiresRegister());
4261 return summary;
4262}
4263
4264void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4265 compiler::Label* deopt =
4266 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryDoubleOp);
4267 intptr_t left_cid = left()->Type()->ToCid();
4268 intptr_t right_cid = right()->Type()->ToCid();
4269 const Register left = locs()->in(0).reg();
4270 const Register right = locs()->in(1).reg();
4271 if (this->left()->definition() == this->right()->definition()) {
4272 __ tst(left, compiler::Operand(kSmiTagMask));
4273 } else if (left_cid == kSmiCid) {
4274 __ tst(right, compiler::Operand(kSmiTagMask));
4275 } else if (right_cid == kSmiCid) {
4276 __ tst(left, compiler::Operand(kSmiTagMask));
4277 } else {
4278 __ orr(IP, left, compiler::Operand(right));
4279 __ tst(IP, compiler::Operand(kSmiTagMask));
4280 }
4281 __ b(deopt, EQ);
4282}
4283
4284LocationSummary* BoxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
4285 const intptr_t kNumInputs = 1;
4286 const intptr_t kNumTemps = 1;
4287 LocationSummary* summary = new (zone) LocationSummary(
4288 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
4289 summary->set_in(0, Location::RequiresFpuRegister());
4290 summary->set_temp(0, Location::RequiresRegister());
4291 summary->set_out(0, Location::RequiresRegister());
4292 return summary;
4293}
4294
4295void BoxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4296 const Register out_reg = locs()->out(0).reg();
4297 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
4298
4300 compiler->BoxClassFor(from_representation()),
4301 out_reg, locs()->temp(0).reg());
4302
4303 switch (from_representation()) {
4304 case kUnboxedDouble:
4305 __ StoreDToOffset(value, out_reg, ValueOffset() - kHeapObjectTag);
4306 break;
4307 case kUnboxedFloat:
4308 __ vcvtds(DTMP, EvenSRegisterOf(value));
4309 __ StoreDToOffset(EvenDRegisterOf(FpuTMP), out_reg,
4310 ValueOffset() - kHeapObjectTag);
4311 break;
4312 case kUnboxedFloat32x4:
4313 case kUnboxedFloat64x2:
4314 case kUnboxedInt32x4:
4315 __ StoreMultipleDToOffset(value, 2, out_reg,
4316 ValueOffset() - kHeapObjectTag);
4317 break;
4318 default:
4319 UNREACHABLE();
4320 break;
4321 }
4322}
4323
4324LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
4325 ASSERT(BoxCid() != kSmiCid);
4326 const bool needs_temp = CanDeoptimize();
4327 const intptr_t kNumInputs = 1;
4328 const intptr_t kNumTemps = needs_temp ? 1 : 0;
4329 LocationSummary* summary = new (zone)
4330 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4331 summary->set_in(0, Location::RequiresRegister());
4332 if (needs_temp) {
4333 summary->set_temp(0, Location::RequiresRegister());
4334 }
4335 if (representation() == kUnboxedInt64) {
4336 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
4338 } else if (representation() == kUnboxedInt32) {
4339 summary->set_out(0, Location::RequiresRegister());
4340 } else if (representation() == kUnboxedFloat) {
4341 // Low (< Q7) Q registers are needed for the vcvtds and vmovs instructions.
4342 // TODO(30953): Support register range constraints in the regalloc.
4343 summary->set_out(0, Location::FpuRegisterLocation(Q6));
4344 } else {
4345 summary->set_out(0, Location::RequiresFpuRegister());
4346 }
4347 return summary;
4348}
4349
4350void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler* compiler) {
4351 const Register box = locs()->in(0).reg();
4352
4353 switch (representation()) {
4354 case kUnboxedInt64: {
4355 PairLocation* result = locs()->out(0).AsPairLocation();
4356 ASSERT(result->At(0).reg() != box);
4357 __ LoadFieldFromOffset(result->At(0).reg(), box, ValueOffset());
4358 __ LoadFieldFromOffset(result->At(1).reg(), box,
4359 ValueOffset() + compiler::target::kWordSize);
4360 break;
4361 }
4362
4363 case kUnboxedDouble: {
4364 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
4365 __ LoadDFromOffset(result, box, ValueOffset() - kHeapObjectTag);
4366 break;
4367 }
4368
4369 case kUnboxedFloat: {
4370 // Should only be <= Q7, because >= Q8 cannot be addressed as S register.
4371 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
4372 __ LoadDFromOffset(result, box, ValueOffset() - kHeapObjectTag);
4373 __ vcvtsd(EvenSRegisterOf(result), result);
4374 break;
4375 }
4376
4377 case kUnboxedFloat32x4:
4378 case kUnboxedFloat64x2:
4379 case kUnboxedInt32x4: {
4380 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
4381 __ LoadMultipleDFromOffset(result, 2, box,
4382 ValueOffset() - kHeapObjectTag);
4383 break;
4384 }
4385
4386 default:
4387 UNREACHABLE();
4388 break;
4389 }
4390}
4391
4392void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) {
4393 const Register box = locs()->in(0).reg();
4394
4395 switch (representation()) {
4396 case kUnboxedInt64: {
4397 PairLocation* result = locs()->out(0).AsPairLocation();
4398 __ SmiUntag(result->At(0).reg(), box);
4399 __ SignFill(result->At(1).reg(), result->At(0).reg());
4400 break;
4401 }
4402
4403 case kUnboxedDouble: {
4404 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
4405 __ SmiUntag(IP, box);
4406 __ vmovdr(DTMP, 0, IP);
4407 __ vcvtdi(result, STMP);
4408 break;
4409 }
4410
4411 default:
4412 UNREACHABLE();
4413 break;
4414 }
4415}
4416
4417void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) {
4418 const Register value = locs()->in(0).reg();
4419 const Register result = locs()->out(0).reg();
4420 __ LoadInt32FromBoxOrSmi(result, value);
4421}
4422
4423void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler) {
4424 const Register box = locs()->in(0).reg();
4425 PairLocation* result = locs()->out(0).AsPairLocation();
4426 ASSERT(result->At(0).reg() != box);
4427 ASSERT(result->At(1).reg() != box);
4428 compiler::Label done;
4429 __ SignFill(result->At(1).reg(), box);
4430 __ SmiUntag(result->At(0).reg(), box, &done);
4431 EmitLoadFromBox(compiler);
4432 __ Bind(&done);
4433}
4434
4435LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
4436 bool opt) const {
4437 ASSERT((from_representation() == kUnboxedInt32) ||
4438 (from_representation() == kUnboxedUint32));
4439 const intptr_t kNumInputs = 1;
4440 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
4441 LocationSummary* summary = new (zone)
4442 LocationSummary(zone, kNumInputs, kNumTemps,
4445 summary->set_in(0, Location::RequiresRegister());
4446 if (!ValueFitsSmi()) {
4447 summary->set_temp(0, Location::RequiresRegister());
4448 }
4449 summary->set_out(0, Location::RequiresRegister());
4450 return summary;
4451}
4452
4453void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4454 Register value = locs()->in(0).reg();
4455 Register out = locs()->out(0).reg();
4456 ASSERT(value != out);
4457
4458 __ SmiTag(out, value);
4459 if (!ValueFitsSmi()) {
4460 Register temp = locs()->temp(0).reg();
4461 compiler::Label done;
4462 if (from_representation() == kUnboxedInt32) {
4463 __ cmp(value, compiler::Operand(out, ASR, 1));
4464 } else {
4465 ASSERT(from_representation() == kUnboxedUint32);
4466 // Note: better to test upper bits instead of comparing with
4467 // kSmiMax as kSmiMax does not fit into immediate operand.
4468 __ TestImmediate(value, 0xC0000000);
4469 }
4470 __ b(&done, EQ);
4471 BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
4472 temp);
4473 if (from_representation() == kUnboxedInt32) {
4474 __ Asr(temp, value,
4475 compiler::Operand(compiler::target::kBitsPerWord - 1));
4476 } else {
4477 ASSERT(from_representation() == kUnboxedUint32);
4478 __ eor(temp, temp, compiler::Operand(temp));
4479 }
4480 __ StoreFieldToOffset(value, out, compiler::target::Mint::value_offset());
4481 __ StoreFieldToOffset(
4482 temp, out,
4483 compiler::target::Mint::value_offset() + compiler::target::kWordSize);
4484 __ Bind(&done);
4485 }
4486}
4487
4488LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
4489 bool opt) const {
4490 const intptr_t kNumInputs = 1;
4491 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
4492 // Shared slow path is used in BoxInt64Instr::EmitNativeCode in
4493 // precompiled mode and only after VM isolate stubs where
4494 // replaced with isolate-specific stubs.
4495 auto object_store = IsolateGroup::Current()->object_store();
4496 const bool stubs_in_vm_isolate =
4497 object_store->allocate_mint_with_fpu_regs_stub()
4498 ->untag()
4499 ->InVMIsolateHeap() ||
4500 object_store->allocate_mint_without_fpu_regs_stub()
4501 ->untag()
4502 ->InVMIsolateHeap();
4503 const bool shared_slow_path_call =
4504 SlowPathSharingSupported(opt) && !stubs_in_vm_isolate;
4505 LocationSummary* summary = new (zone) LocationSummary(
4506 zone, kNumInputs, kNumTemps,
4507 ValueFitsSmi()
4509 : ((shared_slow_path_call ? LocationSummary::kCallOnSharedSlowPath
4511 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
4513 if (ValueFitsSmi()) {
4514 summary->set_out(0, Location::RequiresRegister());
4515 } else if (shared_slow_path_call) {
4516 summary->set_out(0,
4519 } else {
4520 summary->set_out(0, Location::RequiresRegister());
4521 summary->set_temp(0, Location::RequiresRegister());
4522 }
4523 return summary;
4524}
4525
4526void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4527 if (ValueFitsSmi()) {
4528 PairLocation* value_pair = locs()->in(0).AsPairLocation();
4529 Register value_lo = value_pair->At(0).reg();
4530 Register out_reg = locs()->out(0).reg();
4531 __ SmiTag(out_reg, value_lo);
4532 return;
4533 }
4534
4535 PairLocation* value_pair = locs()->in(0).AsPairLocation();
4536 Register value_lo = value_pair->At(0).reg();
4537 Register value_hi = value_pair->At(1).reg();
4538 Register tmp = locs()->temp(0).reg();
4539 Register out_reg = locs()->out(0).reg();
4540
4541 compiler::Label done;
4542 __ SmiTag(out_reg, value_lo);
4543 __ cmp(value_lo, compiler::Operand(out_reg, ASR, kSmiTagSize));
4544 __ cmp(value_hi, compiler::Operand(out_reg, ASR, 31), EQ);
4545 __ b(&done, EQ);
4546
4547 if (compiler->intrinsic_mode()) {
4548 __ TryAllocate(compiler->mint_class(),
4549 compiler->intrinsic_slow_path_label(),
4550 compiler::Assembler::kNearJump, out_reg, tmp);
4551 } else if (locs()->call_on_shared_slow_path()) {
4552 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
4553 if (!has_frame) {
4554 ASSERT(__ constant_pool_allowed());
4555 __ set_constant_pool_allowed(false);
4556 __ EnterDartFrame(0);
4557 }
4558 auto object_store = compiler->isolate_group()->object_store();
4559 const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
4560 const auto& stub = Code::ZoneHandle(
4561 compiler->zone(),
4562 live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub()
4563 : object_store->allocate_mint_without_fpu_regs_stub());
4564
4565 ASSERT(!locs()->live_registers()->ContainsRegister(
4567 auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
4568 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
4569 locs(), DeoptId::kNone, extended_env);
4570 if (!has_frame) {
4571 __ LeaveDartFrame();
4572 __ set_constant_pool_allowed(true);
4573 }
4574 } else {
4576 out_reg, tmp);
4577 }
4578
4579 __ StoreFieldToOffset(value_lo, out_reg,
4580 compiler::target::Mint::value_offset());
4581 __ StoreFieldToOffset(
4582 value_hi, out_reg,
4583 compiler::target::Mint::value_offset() + compiler::target::kWordSize);
4584 __ Bind(&done);
4585}
4586
4587static void LoadInt32FromMint(FlowGraphCompiler* compiler,
4588 Register mint,
4590 Register temp,
4591 compiler::Label* deopt) {
4592 __ LoadFieldFromOffset(result, mint, compiler::target::Mint::value_offset());
4593 if (deopt != nullptr) {
4594 __ LoadFieldFromOffset(
4595 temp, mint,
4596 compiler::target::Mint::value_offset() + compiler::target::kWordSize);
4597 __ cmp(temp,
4598 compiler::Operand(result, ASR, compiler::target::kBitsPerWord - 1));
4599 __ b(deopt, NE);
4600 }
4601}
4602
4603LocationSummary* UnboxInteger32Instr::MakeLocationSummary(Zone* zone,
4604 bool opt) const {
4605 ASSERT((representation() == kUnboxedInt32) ||
4606 (representation() == kUnboxedUint32));
4607 ASSERT((representation() != kUnboxedUint32) || is_truncating());
4608 const intptr_t kNumInputs = 1;
4609 const intptr_t kNumTemps = CanDeoptimize() ? 1 : 0;
4610 LocationSummary* summary = new (zone)
4611 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4612 summary->set_in(0, Location::RequiresRegister());
4613 if (kNumTemps > 0) {
4614 summary->set_temp(0, Location::RequiresRegister());
4615 }
4616 summary->set_out(0, Location::RequiresRegister());
4617 return summary;
4618}
4619
4620void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4621 const intptr_t value_cid = value()->Type()->ToCid();
4622 const Register value = locs()->in(0).reg();
4623 const Register out = locs()->out(0).reg();
4624 const Register temp = CanDeoptimize() ? locs()->temp(0).reg() : kNoRegister;
4625 compiler::Label* deopt =
4626 CanDeoptimize()
4627 ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger)
4628 : nullptr;
4629 compiler::Label* out_of_range = !is_truncating() ? deopt : nullptr;
4630 ASSERT(value != out);
4631
4632 if (value_cid == kSmiCid) {
4633 __ SmiUntag(out, value);
4634 } else if (value_cid == kMintCid) {
4635 LoadInt32FromMint(compiler, value, out, temp, out_of_range);
4636 } else if (!CanDeoptimize()) {
4637 compiler::Label done;
4638 __ SmiUntag(out, value, &done);
4639 LoadInt32FromMint(compiler, value, out, kNoRegister, nullptr);
4640 __ Bind(&done);
4641 } else {
4642 compiler::Label done;
4643 __ SmiUntag(out, value, &done);
4644 __ CompareClassId(value, kMintCid, temp);
4645 __ b(deopt, NE);
4646 LoadInt32FromMint(compiler, value, out, temp, out_of_range);
4647 __ Bind(&done);
4648 }
4649}
4650
4651LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
4652 bool opt) const {
4653 const intptr_t kNumInputs = 2;
4654 const intptr_t kNumTemps = 0;
4655 LocationSummary* summary = new (zone)
4656 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4657 summary->set_in(0, Location::RequiresFpuRegister());
4658 summary->set_in(1, Location::RequiresFpuRegister());
4659 summary->set_out(0, Location::RequiresFpuRegister());
4660 return summary;
4661}
4662
4663void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4664 const DRegister left = EvenDRegisterOf(locs()->in(0).fpu_reg());
4665 const DRegister right = EvenDRegisterOf(locs()->in(1).fpu_reg());
4666 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
4667 switch (op_kind()) {
4668 case Token::kADD:
4669 __ vaddd(result, left, right);
4670 break;
4671 case Token::kSUB:
4672 __ vsubd(result, left, right);
4673 break;
4674 case Token::kMUL:
4675 __ vmuld(result, left, right);
4676 break;
4677 case Token::kDIV:
4678 __ vdivd(result, left, right);
4679 break;
4680 default:
4681 UNREACHABLE();
4682 }
4683}
4684
4685LocationSummary* DoubleTestOpInstr::MakeLocationSummary(Zone* zone,
4686 bool opt) const {
4687 const bool needs_temp = op_kind() != MethodRecognizer::kDouble_getIsNaN;
4688 const intptr_t kNumInputs = 1;
4689 const intptr_t kNumTemps = needs_temp ? 1 : 0;
4690 LocationSummary* summary = new (zone)
4691 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4692 summary->set_in(0, Location::RequiresFpuRegister());
4693 if (needs_temp) {
4694 summary->set_temp(0, Location::RequiresRegister());
4695 }
4696 summary->set_out(0, Location::RequiresRegister());
4697 return summary;
4698}
4699
4700Condition DoubleTestOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
4701 BranchLabels labels) {
4702 ASSERT(compiler->is_optimizing());
4703 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
4704 const bool is_negated = kind() != Token::kEQ;
4705
4706 switch (op_kind()) {
4707 case MethodRecognizer::kDouble_getIsNaN: {
4708 __ vcmpd(value, value);
4709 __ vmstat();
4710 return is_negated ? VC : VS;
4711 }
4712 case MethodRecognizer::kDouble_getIsInfinite: {
4713 const Register temp = locs()->temp(0).reg();
4714 compiler::Label done;
4715 // TMP <- value[0:31], result <- value[32:63]
4716 __ vmovrrd(TMP, temp, value);
4717 __ cmp(TMP, compiler::Operand(0));
4718 __ b(is_negated ? labels.true_label : labels.false_label, NE);
4719
4720 // Mask off the sign bit.
4721 __ AndImmediate(temp, temp, 0x7FFFFFFF);
4722 // Compare with +infinity.
4723 __ CompareImmediate(temp, 0x7FF00000);
4724 return is_negated ? NE : EQ;
4725 }
4726 case MethodRecognizer::kDouble_getIsNegative: {
4727 const Register temp = locs()->temp(0).reg();
4728 __ vcmpdz(value);
4729 __ vmstat();
4730 // If it's NaN, it's not negative.
4731 __ b(is_negated ? labels.true_label : labels.false_label, VS);
4732 // Check for negative zero with a signed comparison.
4733 __ vmovrrd(TMP, temp, value, ZERO);
4734 __ cmp(temp, compiler::Operand(0), ZERO);
4735 return is_negated ? GE : LT;
4736 }
4737 default:
4738 UNREACHABLE();
4739 }
4740}
4741
4742// SIMD
4743
4744#define DEFINE_EMIT(Name, Args) \
4745 static void Emit##Name(FlowGraphCompiler* compiler, SimdOpInstr* instr, \
4746 PP_APPLY(PP_UNPACK, Args))
4747
4748DEFINE_EMIT(Simd32x4BinaryOp,
4750 switch (instr->kind()) {
4751 case SimdOpInstr::kFloat32x4Add:
4752 __ vaddqs(result, left, right);
4753 break;
4754 case SimdOpInstr::kFloat32x4Sub:
4755 __ vsubqs(result, left, right);
4756 break;
4757 case SimdOpInstr::kFloat32x4Mul:
4758 __ vmulqs(result, left, right);
4759 break;
4760 case SimdOpInstr::kFloat32x4Div:
4761 __ Vdivqs(result, left, right);
4762 break;
4763 case SimdOpInstr::kFloat32x4Equal:
4764 __ vceqqs(result, left, right);
4765 break;
4766 case SimdOpInstr::kFloat32x4NotEqual:
4767 __ vceqqs(result, left, right);
4768 // Invert the result.
4769 __ vmvnq(result, result);
4770 break;
4771 case SimdOpInstr::kFloat32x4GreaterThan:
4772 __ vcgtqs(result, left, right);
4773 break;
4774 case SimdOpInstr::kFloat32x4GreaterThanOrEqual:
4775 __ vcgeqs(result, left, right);
4776 break;
4777 case SimdOpInstr::kFloat32x4LessThan:
4778 __ vcgtqs(result, right, left);
4779 break;
4780 case SimdOpInstr::kFloat32x4LessThanOrEqual:
4781 __ vcgeqs(result, right, left);
4782 break;
4783 case SimdOpInstr::kFloat32x4Min:
4784 __ vminqs(result, left, right);
4785 break;
4786 case SimdOpInstr::kFloat32x4Max:
4787 __ vmaxqs(result, left, right);
4788 break;
4789 case SimdOpInstr::kFloat32x4Scale:
4790 __ vcvtsd(STMP, EvenDRegisterOf(left));
4792 __ vmulqs(result, result, right);
4793 break;
4794 case SimdOpInstr::kInt32x4BitAnd:
4795 __ vandq(result, left, right);
4796 break;
4797 case SimdOpInstr::kInt32x4BitOr:
4798 __ vorrq(result, left, right);
4799 break;
4800 case SimdOpInstr::kInt32x4BitXor:
4801 __ veorq(result, left, right);
4802 break;
4803 case SimdOpInstr::kInt32x4Add:
4805 break;
4806 case SimdOpInstr::kInt32x4Sub:
4808 break;
4809 default:
4810 UNREACHABLE();
4811 }
4812}
4813
4814DEFINE_EMIT(Float64x2BinaryOp,
4815 (QRegisterView result, QRegisterView left, QRegisterView right)) {
4816 switch (instr->kind()) {
4817 case SimdOpInstr::kFloat64x2Add:
4818 __ vaddd(result.d(0), left.d(0), right.d(0));
4819 __ vaddd(result.d(1), left.d(1), right.d(1));
4820 break;
4821 case SimdOpInstr::kFloat64x2Sub:
4822 __ vsubd(result.d(0), left.d(0), right.d(0));
4823 __ vsubd(result.d(1), left.d(1), right.d(1));
4824 break;
4825 case SimdOpInstr::kFloat64x2Mul:
4826 __ vmuld(result.d(0), left.d(0), right.d(0));
4827 __ vmuld(result.d(1), left.d(1), right.d(1));
4828 break;
4829 case SimdOpInstr::kFloat64x2Div:
4830 __ vdivd(result.d(0), left.d(0), right.d(0));
4831 __ vdivd(result.d(1), left.d(1), right.d(1));
4832 break;
4833 default:
4834 UNREACHABLE();
4835 }
4836}
4837
4838// Low (< Q7) Q registers are needed for the vcvtds and vmovs instructions.
4839// TODO(dartbug.com/30953) support register range constraints in the regalloc.
4840DEFINE_EMIT(Simd32x4Shuffle,
4841 (FixedQRegisterView<Q6> result, FixedQRegisterView<Q5> value)) {
4842 // For some cases the vdup instruction requires fewer
4843 // instructions. For arbitrary shuffles, use vtbl.
4844
4845 switch (instr->kind()) {
4846 case SimdOpInstr::kFloat32x4GetX:
4847 __ vcvtds(result.d(0), value.s(0));
4848 break;
4849 case SimdOpInstr::kFloat32x4GetY:
4850 __ vcvtds(result.d(0), value.s(1));
4851 break;
4852 case SimdOpInstr::kFloat32x4GetZ:
4853 __ vcvtds(result.d(0), value.s(2));
4854 break;
4855 case SimdOpInstr::kFloat32x4GetW:
4856 __ vcvtds(result.d(0), value.s(3));
4857 break;
4858 case SimdOpInstr::kInt32x4Shuffle:
4859 case SimdOpInstr::kFloat32x4Shuffle: {
4860 if (instr->mask() == 0x00) {
4861 __ vdup(compiler::kFourBytes, result, value.d(0), 0);
4862 } else if (instr->mask() == 0x55) {
4863 __ vdup(compiler::kFourBytes, result, value.d(0), 1);
4864 } else if (instr->mask() == 0xAA) {
4865 __ vdup(compiler::kFourBytes, result, value.d(1), 0);
4866 } else if (instr->mask() == 0xFF) {
4867 __ vdup(compiler::kFourBytes, result, value.d(1), 1);
4868 } else {
4869 // TODO(zra): Investigate better instruction sequences for other
4870 // shuffle masks.
4871 QRegisterView temp(QTMP);
4872
4873 __ vmovq(temp, value);
4874 for (intptr_t i = 0; i < 4; i++) {
4875 __ vmovs(result.s(i), temp.s((instr->mask() >> (2 * i)) & 0x3));
4876 }
4877 }
4878 break;
4879 }
4880 default:
4881 UNREACHABLE();
4882 }
4883}
4884
4885// TODO(dartbug.com/30953) support register range constraints in the regalloc.
4886DEFINE_EMIT(Simd32x4ShuffleMix,
4887 (FixedQRegisterView<Q6> result,
4888 FixedQRegisterView<Q4> left,
4889 FixedQRegisterView<Q5> right)) {
4890 // TODO(zra): Investigate better instruction sequences for shuffle masks.
4891 __ vmovs(result.s(0), left.s((instr->mask() >> 0) & 0x3));
4892 __ vmovs(result.s(1), left.s((instr->mask() >> 2) & 0x3));
4893 __ vmovs(result.s(2), right.s((instr->mask() >> 4) & 0x3));
4894 __ vmovs(result.s(3), right.s((instr->mask() >> 6) & 0x3));
4895}
4896
4897// TODO(dartbug.com/30953) support register range constraints in the regalloc.
4898DEFINE_EMIT(Simd32x4GetSignMask,
4899 (Register out, FixedQRegisterView<Q5> value, Temp<Register> temp)) {
4900 // X lane.
4901 __ vmovrs(out, value.s(0));
4902 __ Lsr(out, out, compiler::Operand(31));
4903 // Y lane.
4904 __ vmovrs(temp, value.s(1));
4905 __ Lsr(temp, temp, compiler::Operand(31));
4906 __ orr(out, out, compiler::Operand(temp, LSL, 1));
4907 // Z lane.
4908 __ vmovrs(temp, value.s(2));
4909 __ Lsr(temp, temp, compiler::Operand(31));
4910 __ orr(out, out, compiler::Operand(temp, LSL, 2));
4911 // W lane.
4912 __ vmovrs(temp, value.s(3));
4913 __ Lsr(temp, temp, compiler::Operand(31));
4914 __ orr(out, out, compiler::Operand(temp, LSL, 3));
4915}
4916
4917// Low (< 7) Q registers are needed for the vcvtsd instruction.
4918// TODO(dartbug.com/30953) support register range constraints in the regalloc.
4919DEFINE_EMIT(Float32x4FromDoubles,
4920 (FixedQRegisterView<Q6> out,
4921 QRegisterView q0,
4922 QRegisterView q1,
4923 QRegisterView q2,
4924 QRegisterView q3)) {
4925 __ vcvtsd(out.s(0), q0.d(0));
4926 __ vcvtsd(out.s(1), q1.d(0));
4927 __ vcvtsd(out.s(2), q2.d(0));
4928 __ vcvtsd(out.s(3), q3.d(0));
4929}
4930
4931DEFINE_EMIT(Float32x4Zero, (QRegister out)) {
4932 __ veorq(out, out, out);
4933}
4934
4935DEFINE_EMIT(Float32x4Splat, (QRegister result, QRegisterView value)) {
4936 // Convert to Float32.
4937 __ vcvtsd(STMP, value.d(0));
4938
4939 // Splat across all lanes.
4941}
4942
4943DEFINE_EMIT(Float32x4Sqrt,
4944 (QRegister result, QRegister left, Temp<QRegister> temp)) {
4945 __ Vsqrtqs(result, left, temp);
4946}
4947
4948DEFINE_EMIT(Float32x4Unary, (QRegister result, QRegister left)) {
4949 switch (instr->kind()) {
4950 case SimdOpInstr::kFloat32x4Negate:
4951 __ vnegqs(result, left);
4952 break;
4953 case SimdOpInstr::kFloat32x4Abs:
4954 __ vabsqs(result, left);
4955 break;
4956 case SimdOpInstr::kFloat32x4Reciprocal:
4957 __ Vreciprocalqs(result, left);
4958 break;
4959 case SimdOpInstr::kFloat32x4ReciprocalSqrt:
4960 __ VreciprocalSqrtqs(result, left);
4961 break;
4962 default:
4963 UNREACHABLE();
4964 }
4965}
4966
4967DEFINE_EMIT(Simd32x4ToSimd32x4Conversion, (SameAsFirstInput, QRegister left)) {
4968 // TODO(dartbug.com/30949) these operations are essentially nop and should
4969 // not generate any code. They should be removed from the graph before
4970 // code generation.
4971}
4972
4973DEFINE_EMIT(
4974 Float32x4Clamp,
4975 (QRegister result, QRegister left, QRegister lower, QRegister upper)) {
4976 __ vminqs(result, left, upper);
4977 __ vmaxqs(result, result, lower);
4978}
4979
4980DEFINE_EMIT(Float64x2Clamp,
4981 (QRegisterView result,
4982 QRegisterView left,
4983 QRegisterView lower,
4984 QRegisterView upper)) {
4985 compiler::Label done0, done1;
4986 // result = max(min(left, upper), lower) |
4987 // lower if (upper is NaN || left is NaN) |
4988 // upper if lower is NaN
4989 __ vcmpd(left.d(0), upper.d(0));
4990 __ vmstat();
4991 __ vmovd(result.d(0), upper.d(0), GE);
4992 __ vmovd(result.d(0), left.d(0), LT); // less than or unordered(NaN)
4993 __ b(&done0, VS); // at least one argument was NaN
4994 __ vcmpd(result.d(0), lower.d(0));
4995 __ vmstat();
4996 __ vmovd(result.d(0), lower.d(0), LE);
4997 __ Bind(&done0);
4998
4999 __ vcmpd(left.d(1), upper.d(1));
5000 __ vmstat();
5001 __ vmovd(result.d(1), upper.d(1), GE);
5002 __ vmovd(result.d(1), left.d(1), LT); // less than or unordered(NaN)
5003 __ b(&done1, VS); // at least one argument was NaN
5004 __ vcmpd(result.d(1), lower.d(1));
5005 __ vmstat();
5006 __ vmovd(result.d(1), lower.d(1), LE);
5007 __ Bind(&done1);
5008}
5009
5010// Low (< 7) Q registers are needed for the vmovs instruction.
5011// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5012DEFINE_EMIT(Float32x4With,
5013 (FixedQRegisterView<Q6> result,
5014 QRegisterView replacement,
5015 QRegister value)) {
5016 __ vcvtsd(STMP, replacement.d(0));
5017 __ vmovq(result, value);
5018 switch (instr->kind()) {
5019 case SimdOpInstr::kFloat32x4WithX:
5020 __ vmovs(result.s(0), STMP);
5021 break;
5022 case SimdOpInstr::kFloat32x4WithY:
5023 __ vmovs(result.s(1), STMP);
5024 break;
5025 case SimdOpInstr::kFloat32x4WithZ:
5026 __ vmovs(result.s(2), STMP);
5027 break;
5028 case SimdOpInstr::kFloat32x4WithW:
5029 __ vmovs(result.s(3), STMP);
5030 break;
5031 default:
5032 UNREACHABLE();
5033 }
5034}
5035
5036DEFINE_EMIT(Simd64x2Shuffle, (QRegisterView result, QRegisterView value)) {
5037 switch (instr->kind()) {
5038 case SimdOpInstr::kFloat64x2GetX:
5039 __ vmovd(result.d(0), value.d(0));
5040 break;
5041 case SimdOpInstr::kFloat64x2GetY:
5042 __ vmovd(result.d(0), value.d(1));
5043 break;
5044 default:
5045 UNREACHABLE();
5046 }
5047}
5048
5049DEFINE_EMIT(Float64x2Zero, (QRegister q)) {
5050 __ veorq(q, q, q);
5051}
5052
5053DEFINE_EMIT(Float64x2Splat, (QRegisterView result, QRegisterView value)) {
5054 // Splat across all lanes.
5055 __ vmovd(result.d(0), value.d(0));
5056 __ vmovd(result.d(1), value.d(0));
5057}
5058
5059DEFINE_EMIT(Float64x2FromDoubles,
5060 (QRegisterView r, QRegisterView q0, QRegisterView q1)) {
5061 __ vmovd(r.d(0), q0.d(0));
5062 __ vmovd(r.d(1), q1.d(0));
5063}
5064
5065// Low (< 7) Q registers are needed for the vcvtsd instruction.
5066// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5067DEFINE_EMIT(Float64x2ToFloat32x4, (FixedQRegisterView<Q6> r, QRegisterView q)) {
5068 __ veorq(r, r, r);
5069 // Set X lane.
5070 __ vcvtsd(r.s(0), q.d(0));
5071 // Set Y lane.
5072 __ vcvtsd(r.s(1), q.d(1));
5073}
5074
5075// Low (< 7) Q registers are needed for the vcvtds instruction.
5076// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5077DEFINE_EMIT(Float32x4ToFloat64x2, (QRegisterView r, FixedQRegisterView<Q6> q)) {
5078 // Set X.
5079 __ vcvtds(r.d(0), q.s(0));
5080 // Set Y.
5081 __ vcvtds(r.d(1), q.s(1));
5082}
5083
5084// Grabbing the S components means we need a low (< 7) Q.
5085// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5086DEFINE_EMIT(Float64x2GetSignMask,
5087 (Register out, FixedQRegisterView<Q6> value)) {
5088 // Upper 32-bits of X lane.
5089 __ vmovrs(out, value.s(1));
5090 __ Lsr(out, out, compiler::Operand(31));
5091 // Upper 32-bits of Y lane.
5092 __ vmovrs(TMP, value.s(3));
5093 __ Lsr(TMP, TMP, compiler::Operand(31));
5094 __ orr(out, out, compiler::Operand(TMP, LSL, 1));
5095}
5096
5097DEFINE_EMIT(Float64x2Unary, (QRegisterView result, QRegisterView value)) {
5098 switch (instr->kind()) {
5099 case SimdOpInstr::kFloat64x2Negate:
5100 __ vnegd(result.d(0), value.d(0));
5101 __ vnegd(result.d(1), value.d(1));
5102 break;
5103 case SimdOpInstr::kFloat64x2Abs:
5104 __ vabsd(result.d(0), value.d(0));
5105 __ vabsd(result.d(1), value.d(1));
5106 break;
5107 case SimdOpInstr::kFloat64x2Sqrt:
5108 __ vsqrtd(result.d(0), value.d(0));
5109 __ vsqrtd(result.d(1), value.d(1));
5110 break;
5111 default:
5112 UNREACHABLE();
5113 }
5114}
5115
5116DEFINE_EMIT(Float64x2Binary,
5117 (SameAsFirstInput, QRegisterView left, QRegisterView right)) {
5118 switch (instr->kind()) {
5119 case SimdOpInstr::kFloat64x2Scale:
5120 __ vmuld(left.d(0), left.d(0), right.d(0));
5121 __ vmuld(left.d(1), left.d(1), right.d(0));
5122 break;
5123 case SimdOpInstr::kFloat64x2WithX:
5124 __ vmovd(left.d(0), right.d(0));
5125 break;
5126 case SimdOpInstr::kFloat64x2WithY:
5127 __ vmovd(left.d(1), right.d(0));
5128 break;
5129 case SimdOpInstr::kFloat64x2Min: {
5130 // X lane.
5131 __ vcmpd(left.d(0), right.d(0));
5132 __ vmstat();
5133 __ vmovd(left.d(0), right.d(0), GE);
5134 // Y lane.
5135 __ vcmpd(left.d(1), right.d(1));
5136 __ vmstat();
5137 __ vmovd(left.d(1), right.d(1), GE);
5138 break;
5139 }
5140 case SimdOpInstr::kFloat64x2Max: {
5141 // X lane.
5142 __ vcmpd(left.d(0), right.d(0));
5143 __ vmstat();
5144 __ vmovd(left.d(0), right.d(0), LE);
5145 // Y lane.
5146 __ vcmpd(left.d(1), right.d(1));
5147 __ vmstat();
5148 __ vmovd(left.d(1), right.d(1), LE);
5149 break;
5150 }
5151 default:
5152 UNREACHABLE();
5153 }
5154}
5155
5156DEFINE_EMIT(Int32x4FromInts,
5157 (QRegisterView result,
5158 Register v0,
5159 Register v1,
5160 Register v2,
5161 Register v3)) {
5162 __ veorq(result, result, result);
5163 __ vmovdrr(result.d(0), v0, v1);
5164 __ vmovdrr(result.d(1), v2, v3);
5165}
5166
5167DEFINE_EMIT(Int32x4FromBools,
5168 (QRegisterView result,
5169 Register v0,
5170 Register v1,
5171 Register v2,
5172 Register v3,
5173 Temp<Register> temp)) {
5174 __ veorq(result, result, result);
5175 __ LoadImmediate(temp, 0xffffffff);
5176
5177 __ LoadObject(IP, Bool::True());
5178 __ cmp(v0, compiler::Operand(IP));
5179 __ vmovdr(result.d(0), 0, temp, EQ);
5180
5181 __ cmp(v1, compiler::Operand(IP));
5182 __ vmovdr(result.d(0), 1, temp, EQ);
5183
5184 __ cmp(v2, compiler::Operand(IP));
5185 __ vmovdr(result.d(1), 0, temp, EQ);
5186
5187 __ cmp(v3, compiler::Operand(IP));
5188 __ vmovdr(result.d(1), 1, temp, EQ);
5189}
5190
5191// Low (< 7) Q registers are needed for the vmovrs instruction.
5192// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5193DEFINE_EMIT(Int32x4GetFlag, (Register result, FixedQRegisterView<Q6> value)) {
5194 switch (instr->kind()) {
5195 case SimdOpInstr::kInt32x4GetFlagX:
5196 __ vmovrs(result, value.s(0));
5197 break;
5198 case SimdOpInstr::kInt32x4GetFlagY:
5199 __ vmovrs(result, value.s(1));
5200 break;
5201 case SimdOpInstr::kInt32x4GetFlagZ:
5202 __ vmovrs(result, value.s(2));
5203 break;
5204 case SimdOpInstr::kInt32x4GetFlagW:
5205 __ vmovrs(result, value.s(3));
5206 break;
5207 default:
5208 UNREACHABLE();
5209 }
5210
5211 __ tst(result, compiler::Operand(result));
5212 __ LoadObject(result, Bool::True(), NE);
5213 __ LoadObject(result, Bool::False(), EQ);
5214}
5215
5216DEFINE_EMIT(Int32x4Select,
5217 (QRegister out,
5218 QRegister mask,
5219 QRegister trueValue,
5220 QRegister falseValue,
5221 Temp<QRegister> temp)) {
5222 // Copy mask.
5223 __ vmovq(temp, mask);
5224 // Invert it.
5225 __ vmvnq(temp, temp);
5226 // mask = mask & trueValue.
5227 __ vandq(mask, mask, trueValue);
5228 // temp = temp & falseValue.
5229 __ vandq(temp, temp, falseValue);
5230 // out = mask | temp.
5231 __ vorrq(out, mask, temp);
5232}
5233
5234DEFINE_EMIT(Int32x4WithFlag,
5235 (QRegisterView result, QRegister mask, Register flag)) {
5236 __ vmovq(result, mask);
5237 __ CompareObject(flag, Bool::True());
5238 __ LoadImmediate(TMP, 0xffffffff, EQ);
5239 __ LoadImmediate(TMP, 0, NE);
5240 switch (instr->kind()) {
5241 case SimdOpInstr::kInt32x4WithFlagX:
5242 __ vmovdr(result.d(0), 0, TMP);
5243 break;
5244 case SimdOpInstr::kInt32x4WithFlagY:
5245 __ vmovdr(result.d(0), 1, TMP);
5246 break;
5247 case SimdOpInstr::kInt32x4WithFlagZ:
5248 __ vmovdr(result.d(1), 0, TMP);
5249 break;
5250 case SimdOpInstr::kInt32x4WithFlagW:
5251 __ vmovdr(result.d(1), 1, TMP);
5252 break;
5253 default:
5254 UNREACHABLE();
5255 }
5256}
5257
5258// Map SimdOpInstr::Kind-s to corresponding emit functions. Uses the following
5259// format:
5260//
5261// CASE(OpA) CASE(OpB) ____(Emitter) - Emitter is used to emit OpA and OpB.
5262// SIMPLE(OpA) - Emitter with name OpA is used to emit OpA.
5263//
5264#define SIMD_OP_VARIANTS(CASE, ____, SIMPLE) \
5265 CASE(Float32x4Add) \
5266 CASE(Float32x4Sub) \
5267 CASE(Float32x4Mul) \
5268 CASE(Float32x4Div) \
5269 CASE(Float32x4Equal) \
5270 CASE(Float32x4NotEqual) \
5271 CASE(Float32x4GreaterThan) \
5272 CASE(Float32x4GreaterThanOrEqual) \
5273 CASE(Float32x4LessThan) \
5274 CASE(Float32x4LessThanOrEqual) \
5275 CASE(Float32x4Min) \
5276 CASE(Float32x4Max) \
5277 CASE(Float32x4Scale) \
5278 CASE(Int32x4BitAnd) \
5279 CASE(Int32x4BitOr) \
5280 CASE(Int32x4BitXor) \
5281 CASE(Int32x4Add) \
5282 CASE(Int32x4Sub) \
5283 ____(Simd32x4BinaryOp) \
5284 CASE(Float64x2Add) \
5285 CASE(Float64x2Sub) \
5286 CASE(Float64x2Mul) \
5287 CASE(Float64x2Div) \
5288 ____(Float64x2BinaryOp) \
5289 CASE(Float32x4GetX) \
5290 CASE(Float32x4GetY) \
5291 CASE(Float32x4GetZ) \
5292 CASE(Float32x4GetW) \
5293 CASE(Int32x4Shuffle) \
5294 CASE(Float32x4Shuffle) \
5295 ____(Simd32x4Shuffle) \
5296 CASE(Float32x4ShuffleMix) \
5297 CASE(Int32x4ShuffleMix) \
5298 ____(Simd32x4ShuffleMix) \
5299 CASE(Float32x4GetSignMask) \
5300 CASE(Int32x4GetSignMask) \
5301 ____(Simd32x4GetSignMask) \
5302 SIMPLE(Float32x4FromDoubles) \
5303 SIMPLE(Float32x4Zero) \
5304 SIMPLE(Float32x4Splat) \
5305 SIMPLE(Float32x4Sqrt) \
5306 CASE(Float32x4Negate) \
5307 CASE(Float32x4Abs) \
5308 CASE(Float32x4Reciprocal) \
5309 CASE(Float32x4ReciprocalSqrt) \
5310 ____(Float32x4Unary) \
5311 CASE(Float32x4ToInt32x4) \
5312 CASE(Int32x4ToFloat32x4) \
5313 ____(Simd32x4ToSimd32x4Conversion) \
5314 SIMPLE(Float32x4Clamp) \
5315 SIMPLE(Float64x2Clamp) \
5316 CASE(Float32x4WithX) \
5317 CASE(Float32x4WithY) \
5318 CASE(Float32x4WithZ) \
5319 CASE(Float32x4WithW) \
5320 ____(Float32x4With) \
5321 CASE(Float64x2GetX) \
5322 CASE(Float64x2GetY) \
5323 ____(Simd64x2Shuffle) \
5324 SIMPLE(Float64x2Zero) \
5325 SIMPLE(Float64x2Splat) \
5326 SIMPLE(Float64x2FromDoubles) \
5327 SIMPLE(Float64x2ToFloat32x4) \
5328 SIMPLE(Float32x4ToFloat64x2) \
5329 SIMPLE(Float64x2GetSignMask) \
5330 CASE(Float64x2Negate) \
5331 CASE(Float64x2Abs) \
5332 CASE(Float64x2Sqrt) \
5333 ____(Float64x2Unary) \
5334 CASE(Float64x2Scale) \
5335 CASE(Float64x2WithX) \
5336 CASE(Float64x2WithY) \
5337 CASE(Float64x2Min) \
5338 CASE(Float64x2Max) \
5339 ____(Float64x2Binary) \
5340 SIMPLE(Int32x4FromInts) \
5341 SIMPLE(Int32x4FromBools) \
5342 CASE(Int32x4GetFlagX) \
5343 CASE(Int32x4GetFlagY) \
5344 CASE(Int32x4GetFlagZ) \
5345 CASE(Int32x4GetFlagW) \
5346 ____(Int32x4GetFlag) \
5347 SIMPLE(Int32x4Select) \
5348 CASE(Int32x4WithFlagX) \
5349 CASE(Int32x4WithFlagY) \
5350 CASE(Int32x4WithFlagZ) \
5351 CASE(Int32x4WithFlagW) \
5352 ____(Int32x4WithFlag)
5353
5354LocationSummary* SimdOpInstr::MakeLocationSummary(Zone* zone, bool opt) const {
5355 switch (kind()) {
5356#define CASE(Name) case k##Name:
5357#define EMIT(Name) \
5358 return MakeLocationSummaryFromEmitter(zone, this, &Emit##Name);
5359#define SIMPLE(Name) CASE(Name) EMIT(Name)
5360 SIMD_OP_VARIANTS(CASE, EMIT, SIMPLE)
5361#undef CASE
5362#undef EMIT
5363#undef SIMPLE
5364 case kIllegalSimdOp:
5365 UNREACHABLE();
5366 break;
5367 }
5368 UNREACHABLE();
5369 return nullptr;
5370}
5371
5372void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5373 switch (kind()) {
5374#define CASE(Name) case k##Name:
5375#define EMIT(Name) \
5376 InvokeEmitter(compiler, this, &Emit##Name); \
5377 break;
5378#define SIMPLE(Name) CASE(Name) EMIT(Name)
5379 SIMD_OP_VARIANTS(CASE, EMIT, SIMPLE)
5380#undef CASE
5381#undef EMIT
5382#undef SIMPLE
5383 case kIllegalSimdOp:
5384 UNREACHABLE();
5385 break;
5386 }
5387}
5388
5389#undef DEFINE_EMIT
5390
5391LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
5392 Zone* zone,
5393 bool opt) const {
5394 const intptr_t kNumTemps = 0;
5395 LocationSummary* summary = new (zone)
5396 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
5397 summary->set_in(0, Location::RegisterLocation(R0));
5398 summary->set_in(1, Location::RegisterLocation(R1));
5399 summary->set_in(2, Location::RegisterLocation(R2));
5400 summary->set_in(3, Location::RegisterLocation(R3));
5401 summary->set_out(0, Location::RegisterLocation(R0));
5402 return summary;
5403}
5404
5405void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5406 compiler::LeafRuntimeScope rt(compiler->assembler(),
5407 /*frame_size=*/0,
5408 /*preserve_registers=*/false);
5409 // Call the function. Parameters are already in their correct spots.
5411}
5412
5413LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone,
5414 bool opt) const {
5415 if (result_cid() == kDoubleCid) {
5416 const intptr_t kNumInputs = 2;
5417 const intptr_t kNumTemps = 1;
5418 LocationSummary* summary = new (zone)
5419 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5420 summary->set_in(0, Location::RequiresFpuRegister());
5421 summary->set_in(1, Location::RequiresFpuRegister());
5422 // Reuse the left register so that code can be made shorter.
5423 summary->set_out(0, Location::SameAsFirstInput());
5424 summary->set_temp(0, Location::RequiresRegister());
5425 return summary;
5426 }
5427 ASSERT(result_cid() == kSmiCid);
5428 const intptr_t kNumInputs = 2;
5429 const intptr_t kNumTemps = 0;
5430 LocationSummary* summary = new (zone)
5431 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5432 summary->set_in(0, Location::RequiresRegister());
5433 summary->set_in(1, Location::RequiresRegister());
5434 // Reuse the left register so that code can be made shorter.
5435 summary->set_out(0, Location::SameAsFirstInput());
5436 return summary;
5437}
5438
5439void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5440 ASSERT((op_kind() == MethodRecognizer::kMathMin) ||
5441 (op_kind() == MethodRecognizer::kMathMax));
5442 const bool is_min = (op_kind() == MethodRecognizer::kMathMin);
5443 if (result_cid() == kDoubleCid) {
5444 compiler::Label done, returns_nan, are_equal;
5445 const DRegister left = EvenDRegisterOf(locs()->in(0).fpu_reg());
5446 const DRegister right = EvenDRegisterOf(locs()->in(1).fpu_reg());
5447 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5448 const Register temp = locs()->temp(0).reg();
5449 __ vcmpd(left, right);
5450 __ vmstat();
5451 __ b(&returns_nan, VS);
5452 __ b(&are_equal, EQ);
5453 const Condition neg_double_condition =
5454 is_min ? TokenKindToDoubleCondition(Token::kGTE)
5455 : TokenKindToDoubleCondition(Token::kLTE);
5456 ASSERT(left == result);
5457 __ vmovd(result, right, neg_double_condition);
5458 __ b(&done);
5459
5460 __ Bind(&returns_nan);
5461 __ LoadDImmediate(result, NAN, temp);
5462 __ b(&done);
5463
5464 __ Bind(&are_equal);
5465 // Check for negative zero: -0.0 is equal 0.0 but min or max must return
5466 // -0.0 or 0.0 respectively.
5467 // Check for negative left value (get the sign bit):
5468 // - min -> left is negative ? left : right.
5469 // - max -> left is negative ? right : left
5470 // Check the sign bit.
5471 __ vmovrrd(IP, temp, left); // Sign bit is in bit 31 of temp.
5472 __ cmp(temp, compiler::Operand(0));
5473 if (is_min) {
5474 ASSERT(left == result);
5475 __ vmovd(result, right, GE);
5476 } else {
5477 __ vmovd(result, right, LT);
5478 ASSERT(left == result);
5479 }
5480 __ Bind(&done);
5481 return;
5482 }
5483
5484 ASSERT(result_cid() == kSmiCid);
5485 const Register left = locs()->in(0).reg();
5486 const Register right = locs()->in(1).reg();
5487 const Register result = locs()->out(0).reg();
5488 __ cmp(left, compiler::Operand(right));
5489 ASSERT(result == left);
5490 if (is_min) {
5491 __ mov(result, compiler::Operand(right), GT);
5492 } else {
5493 __ mov(result, compiler::Operand(right), LT);
5494 }
5495}
5496
5497LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Zone* zone,
5498 bool opt) const {
5499 const intptr_t kNumInputs = 1;
5500 const intptr_t kNumTemps = 0;
5501 LocationSummary* summary = new (zone)
5502 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5503 summary->set_in(0, Location::RequiresRegister());
5504 // We make use of 3-operand instructions by not requiring result register
5505 // to be identical to first input register as on Intel.
5506 summary->set_out(0, Location::RequiresRegister());
5507 return summary;
5508}
5509
5510void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5511 const Register value = locs()->in(0).reg();
5512 const Register result = locs()->out(0).reg();
5513 switch (op_kind()) {
5514 case Token::kNEGATE: {
5515 compiler::Label* deopt =
5516 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
5517 __ rsbs(result, value, compiler::Operand(0));
5518 __ b(deopt, VS);
5519 break;
5520 }
5521 case Token::kBIT_NOT:
5522 __ mvn_(result, compiler::Operand(value));
5523 // Remove inverted smi-tag.
5524 __ bic(result, result, compiler::Operand(kSmiTagMask));
5525 break;
5526 default:
5527 UNREACHABLE();
5528 }
5529}
5530
5531LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
5532 bool opt) const {
5533 const intptr_t kNumInputs = 1;
5534 const intptr_t kNumTemps = 0;
5535 LocationSummary* summary = new (zone)
5536 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5537 summary->set_in(0, Location::RequiresFpuRegister());
5538 summary->set_out(0, Location::RequiresFpuRegister());
5539 return summary;
5540}
5541
5542void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5543 ASSERT(representation() == kUnboxedDouble);
5544 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5545 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
5546 switch (op_kind()) {
5547 case Token::kNEGATE:
5548 __ vnegd(result, value);
5549 break;
5550 case Token::kSQRT:
5551 __ vsqrtd(result, value);
5552 break;
5553 case Token::kSQUARE:
5554 __ vmuld(result, value, value);
5555 break;
5556 default:
5557 UNREACHABLE();
5558 }
5559}
5560
5561LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone,
5562 bool opt) const {
5563 const intptr_t kNumInputs = 1;
5564 const intptr_t kNumTemps = 0;
5565 LocationSummary* result = new (zone)
5566 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5567 result->set_in(0, Location::RequiresRegister());
5569 return result;
5570}
5571
5572void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5573 const Register value = locs()->in(0).reg();
5574 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5575 __ vmovdr(DTMP, 0, value);
5576 __ vcvtdi(result, STMP);
5577}
5578
5579LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone,
5580 bool opt) const {
5581 const intptr_t kNumInputs = 1;
5582 const intptr_t kNumTemps = 0;
5583 LocationSummary* result = new (zone)
5584 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5585 result->set_in(0, Location::RequiresRegister());
5587 return result;
5588}
5589
5590void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5591 const Register value = locs()->in(0).reg();
5592 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5593 __ SmiUntag(IP, value);
5594 __ vmovdr(DTMP, 0, IP);
5595 __ vcvtdi(result, STMP);
5596}
5597
5598LocationSummary* Int64ToDoubleInstr::MakeLocationSummary(Zone* zone,
5599 bool opt) const {
5600 UNIMPLEMENTED();
5601 return nullptr;
5602}
5603
5604void Int64ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5605 UNIMPLEMENTED();
5606}
5607
5608LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone,
5609 bool opt) const {
5610 const intptr_t kNumInputs = 1;
5611 const intptr_t kNumTemps = 0;
5612 LocationSummary* result = new (zone) LocationSummary(
5613 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
5615 result->set_out(0, Location::RequiresRegister());
5616 return result;
5617}
5618
5619void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5620 const Register result = locs()->out(0).reg();
5621 const DRegister value_double = EvenDRegisterOf(locs()->in(0).fpu_reg());
5622
5623 DoubleToIntegerSlowPath* slow_path =
5624 new DoubleToIntegerSlowPath(this, locs()->in(0).fpu_reg());
5625 compiler->AddSlowPathCode(slow_path);
5626
5627 // First check for NaN. Checking for minint after the conversion doesn't work
5628 // on ARM because vcvtid gives 0 for NaN.
5629 __ vcmpd(value_double, value_double);
5630 __ vmstat();
5631 __ b(slow_path->entry_label(), VS);
5632
5633 __ vcvtid(STMP, value_double);
5634 __ vmovrs(result, STMP);
5635 // Overflow is signaled with minint.
5636 // Check for overflow and that it fits into Smi.
5637 __ CompareImmediate(result, 0xC0000000);
5638 __ b(slow_path->entry_label(), MI);
5639 __ SmiTag(result);
5640 __ Bind(slow_path->exit_label());
5641}
5642
5643LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,
5644 bool opt) const {
5645 const intptr_t kNumInputs = 1;
5646 const intptr_t kNumTemps = 0;
5647 LocationSummary* result = new (zone)
5648 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5650 result->set_out(0, Location::RequiresRegister());
5651 return result;
5652}
5653
5654void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5655 compiler::Label* deopt =
5656 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptDoubleToSmi);
5657 const Register result = locs()->out(0).reg();
5658 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
5659 // First check for NaN. Checking for minint after the conversion doesn't work
5660 // on ARM because vcvtid gives 0 for NaN.
5661 __ vcmpd(value, value);
5662 __ vmstat();
5663 __ b(deopt, VS);
5664
5665 __ vcvtid(STMP, value);
5666 __ vmovrs(result, STMP);
5667 // Check for overflow and that it fits into Smi.
5668 __ CompareImmediate(result, 0xC0000000);
5669 __ b(deopt, MI);
5670 __ SmiTag(result);
5671}
5672
5673LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone,
5674 bool opt) const {
5675 const intptr_t kNumInputs = 1;
5676 const intptr_t kNumTemps = 0;
5677 LocationSummary* result = new (zone)
5678 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5679 // Low (< Q7) Q registers are needed for the conversion instructions.
5682 return result;
5683}
5684
5685void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5686 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
5687 const SRegister result =
5688 EvenSRegisterOf(EvenDRegisterOf(locs()->out(0).fpu_reg()));
5689 __ vcvtsd(result, value);
5690}
5691
5692LocationSummary* FloatToDoubleInstr::MakeLocationSummary(Zone* zone,
5693 bool opt) const {
5694 const intptr_t kNumInputs = 1;
5695 const intptr_t kNumTemps = 0;
5696 LocationSummary* result = new (zone)
5697 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5698 // Low (< Q7) Q registers are needed for the conversion instructions.
5701 return result;
5702}
5703
5704void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5705 const SRegister value =
5706 EvenSRegisterOf(EvenDRegisterOf(locs()->in(0).fpu_reg()));
5707 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5708 __ vcvtds(result, value);
5709}
5710
5711LocationSummary* FloatCompareInstr::MakeLocationSummary(Zone* zone,
5712 bool opt) const {
5713 UNREACHABLE();
5714 return NULL;
5715}
5716
5717void FloatCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5718 UNREACHABLE();
5719}
5720
5721LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone,
5722 bool opt) const {
5723 ASSERT((InputCount() == 1) || (InputCount() == 2));
5724 const intptr_t kNumTemps =
5726 ? ((recognized_kind() == MethodRecognizer::kMathDoublePow) ? 1 : 0)
5727 : 4;
5728 LocationSummary* result = new (zone)
5729 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
5731 if (InputCount() == 2) {
5733 }
5734 if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
5735 result->set_temp(0, Location::RegisterLocation(R2));
5737 result->set_temp(1, Location::RegisterLocation(R0));
5738 result->set_temp(2, Location::RegisterLocation(R1));
5739 result->set_temp(3, Location::RegisterLocation(R3));
5740 }
5742 result->set_temp(0, Location::RegisterLocation(R0));
5743 result->set_temp(1, Location::RegisterLocation(R1));
5744 result->set_temp(2, Location::RegisterLocation(R2));
5745 result->set_temp(3, Location::RegisterLocation(R3));
5746 }
5748 return result;
5749}
5750
5751// Pseudo code:
5752// if (exponent == 0.0) return 1.0;
5753// // Speed up simple cases.
5754// if (exponent == 1.0) return base;
5755// if (exponent == 2.0) return base * base;
5756// if (exponent == 3.0) return base * base * base;
5757// if (base == 1.0) return 1.0;
5758// if (base.isNaN || exponent.isNaN) {
5759// return double.NAN;
5760// }
5761// if (base != -Infinity && exponent == 0.5) {
5762// if (base == 0.0) return 0.0;
5763// return sqrt(value);
5764// }
5765// TODO(srdjan): Move into a stub?
5766static void InvokeDoublePow(FlowGraphCompiler* compiler,
5767 InvokeMathCFunctionInstr* instr) {
5768 ASSERT(instr->recognized_kind() == MethodRecognizer::kMathDoublePow);
5769 const intptr_t kInputCount = 2;
5770 ASSERT(instr->InputCount() == kInputCount);
5771 LocationSummary* locs = instr->locs();
5772
5773 const DRegister base = EvenDRegisterOf(locs->in(0).fpu_reg());
5774 const DRegister exp = EvenDRegisterOf(locs->in(1).fpu_reg());
5775 const DRegister result = EvenDRegisterOf(locs->out(0).fpu_reg());
5776 const Register temp = locs->temp(0).reg();
5777 const DRegister saved_base = OddDRegisterOf(locs->in(0).fpu_reg());
5778 ASSERT((base == result) && (result != saved_base));
5779
5780 compiler::Label skip_call, try_sqrt, check_base, return_nan;
5781 __ vmovd(saved_base, base);
5782 __ LoadDImmediate(result, 1.0, temp);
5783 // exponent == 0.0 -> return 1.0;
5784 __ vcmpdz(exp);
5785 __ vmstat();
5786 __ b(&check_base, VS); // NaN -> check base.
5787 __ b(&skip_call, EQ); // exp is 0.0, result is 1.0.
5788
5789 // exponent == 1.0 ?
5790 __ vcmpd(exp, result);
5791 __ vmstat();
5792 compiler::Label return_base;
5793 __ b(&return_base, EQ);
5794
5795 // exponent == 2.0 ?
5796 __ LoadDImmediate(DTMP, 2.0, temp);
5797 __ vcmpd(exp, DTMP);
5798 __ vmstat();
5799 compiler::Label return_base_times_2;
5800 __ b(&return_base_times_2, EQ);
5801
5802 // exponent == 3.0 ?
5803 __ LoadDImmediate(DTMP, 3.0, temp);
5804 __ vcmpd(exp, DTMP);
5805 __ vmstat();
5806 __ b(&check_base, NE);
5807
5808 // base_times_3.
5809 __ vmuld(result, saved_base, saved_base);
5810 __ vmuld(result, result, saved_base);
5811 __ b(&skip_call);
5812
5813 __ Bind(&return_base);
5814 __ vmovd(result, saved_base);
5815 __ b(&skip_call);
5816
5817 __ Bind(&return_base_times_2);
5818 __ vmuld(result, saved_base, saved_base);
5819 __ b(&skip_call);
5820
5821 __ Bind(&check_base);
5822 // Note: 'exp' could be NaN.
5823 // base == 1.0 -> return 1.0;
5824 __ vcmpd(saved_base, result);
5825 __ vmstat();
5826 __ b(&return_nan, VS);
5827 __ b(&skip_call, EQ); // base is 1.0, result is 1.0.
5828
5829 __ vcmpd(saved_base, exp);
5830 __ vmstat();
5831 __ b(&try_sqrt, VC); // // Neither 'exp' nor 'base' is NaN.
5832
5833 __ Bind(&return_nan);
5834 __ LoadDImmediate(result, NAN, temp);
5835 __ b(&skip_call);
5836
5837 compiler::Label do_pow, return_zero;
5838 __ Bind(&try_sqrt);
5839
5840 // Before calling pow, check if we could use sqrt instead of pow.
5841 __ LoadDImmediate(result, kNegInfinity, temp);
5842
5843 // base == -Infinity -> call pow;
5844 __ vcmpd(saved_base, result);
5845 __ vmstat();
5846 __ b(&do_pow, EQ);
5847
5848 // exponent == 0.5 ?
5849 __ LoadDImmediate(result, 0.5, temp);
5850 __ vcmpd(exp, result);
5851 __ vmstat();
5852 __ b(&do_pow, NE);
5853
5854 // base == 0 -> return 0;
5855 __ vcmpdz(saved_base);
5856 __ vmstat();
5857 __ b(&return_zero, EQ);
5858
5859 __ vsqrtd(result, saved_base);
5860 __ b(&skip_call);
5861
5862 __ Bind(&return_zero);
5863 __ LoadDImmediate(result, 0.0, temp);
5864 __ b(&skip_call);
5865
5866 __ Bind(&do_pow);
5867 __ vmovd(base, saved_base); // Restore base.
5868
5869 // Args must be in D0 and D1, so move arg from Q1(== D3:D2) to D1.
5870 __ vmovd(D1, D2);
5872 ASSERT(instr->TargetFunction().is_leaf()); // No deopt info needed.
5873 compiler::LeafRuntimeScope rt(compiler->assembler(),
5874 /*frame_size=*/0,
5875 /*preserve_registers=*/false);
5876 rt.Call(instr->TargetFunction(), kInputCount);
5877 } else {
5878 // If the ABI is not "hardfp", then we have to move the double arguments
5879 // to the integer registers, and take the results from the integer
5880 // registers.
5881 compiler::LeafRuntimeScope rt(compiler->assembler(),
5882 /*frame_size=*/0,
5883 /*preserve_registers=*/false);
5884 __ vmovrrd(R0, R1, D0);
5885 __ vmovrrd(R2, R3, D1);
5886 rt.Call(instr->TargetFunction(), kInputCount);
5887 __ vmovdrr(D0, R0, R1);
5888 __ vmovdrr(D1, R2, R3);
5889 }
5890 __ Bind(&skip_call);
5891}
5892
5893void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5894 if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
5895 InvokeDoublePow(compiler, this);
5896 return;
5897 }
5898
5899 if (InputCount() == 2) {
5900 // Args must be in D0 and D1, so move arg from Q1(== D3:D2) to D1.
5901 __ vmovd(D1, D2);
5902 }
5904 compiler::LeafRuntimeScope rt(compiler->assembler(),
5905 /*frame_size=*/0,
5906 /*preserve_registers=*/false);
5908 } else {
5909 // If the ABI is not "hardfp", then we have to move the double arguments
5910 // to the integer registers, and take the results from the integer
5911 // registers.
5912 compiler::LeafRuntimeScope rt(compiler->assembler(),
5913 /*frame_size=*/0,
5914 /*preserve_registers=*/false);
5915 __ vmovrrd(R0, R1, D0);
5916 __ vmovrrd(R2, R3, D1);
5918 __ vmovdrr(D0, R0, R1);
5919 __ vmovdrr(D1, R2, R3);
5920 }
5921}
5922
5923LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone,
5924 bool opt) const {
5925 // Only use this instruction in optimized code.
5926 ASSERT(opt);
5927 const intptr_t kNumInputs = 1;
5928 LocationSummary* summary =
5929 new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
5930 if (representation() == kUnboxedDouble) {
5931 if (index() == 0) {
5932 summary->set_in(
5934 } else {
5935 ASSERT(index() == 1);
5936 summary->set_in(
5938 }
5939 summary->set_out(0, Location::RequiresFpuRegister());
5940 } else {
5941 ASSERT(representation() == kTagged);
5942 if (index() == 0) {
5943 summary->set_in(
5945 } else {
5946 ASSERT(index() == 1);
5947 summary->set_in(
5949 }
5950 summary->set_out(0, Location::RequiresRegister());
5951 }
5952 return summary;
5953}
5954
5955void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5956 ASSERT(locs()->in(0).IsPairLocation());
5957 PairLocation* pair = locs()->in(0).AsPairLocation();
5958 Location in_loc = pair->At(index());
5959 if (representation() == kUnboxedDouble) {
5960 const QRegister out = locs()->out(0).fpu_reg();
5961 const QRegister in = in_loc.fpu_reg();
5962 __ vmovq(out, in);
5963 } else {
5964 ASSERT(representation() == kTagged);
5965 const Register out = locs()->out(0).reg();
5966 const Register in = in_loc.reg();
5967 __ mov(out, compiler::Operand(in));
5968 }
5969}
5970
5971LocationSummary* UnboxLaneInstr::MakeLocationSummary(Zone* zone,
5972 bool opt) const {
5973 UNREACHABLE();
5974 return NULL;
5975}
5976
5977void UnboxLaneInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5978 UNREACHABLE();
5979}
5980
5981LocationSummary* BoxLanesInstr::MakeLocationSummary(Zone* zone,
5982 bool opt) const {
5983 UNREACHABLE();
5984 return NULL;
5985}
5986
5987void BoxLanesInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5988 UNREACHABLE();
5989}
5990
5991LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
5992 bool opt) const {
5993 const intptr_t kNumInputs = 2;
5994 const intptr_t kNumTemps = 2;
5995 LocationSummary* summary = new (zone)
5996 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5997 summary->set_in(0, Location::RequiresRegister());
5998 summary->set_in(1, Location::RequiresRegister());
5999 summary->set_temp(0, Location::RequiresRegister());
6000 // Request register that overlaps with S0..S31.
6001 summary->set_temp(1, Location::FpuRegisterLocation(Q0));
6002 // Output is a pair of registers.
6003 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6005 return summary;
6006}
6007
6008void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6009 ASSERT(CanDeoptimize());
6010 compiler::Label* deopt =
6011 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
6012
6013 const Register left = locs()->in(0).reg();
6014 const Register right = locs()->in(1).reg();
6015 ASSERT(locs()->out(0).IsPairLocation());
6016 PairLocation* pair = locs()->out(0).AsPairLocation();
6017 const Register result_div = pair->At(0).reg();
6018 const Register result_mod = pair->At(1).reg();
6019 if (RangeUtils::CanBeZero(divisor_range())) {
6020 // Handle divide by zero in runtime.
6021 __ cmp(right, compiler::Operand(0));
6022 __ b(deopt, EQ);
6023 }
6024 const Register temp = locs()->temp(0).reg();
6025 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
6026 __ SmiUntag(temp, left);
6027 __ SmiUntag(IP, right);
6028 __ IntegerDivide(result_div, temp, IP, dtemp, DTMP);
6029
6030 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
6031 // case we cannot tag the result.
6032 __ CompareImmediate(result_div, 0x40000000);
6033 __ b(deopt, EQ);
6034 __ SmiUntag(IP, right);
6035 // result_mod <- left - right * result_div.
6036 __ mls(result_mod, IP, result_div, temp);
6037 __ SmiTag(result_div);
6038 __ SmiTag(result_mod);
6039 // Correct MOD result:
6040 // res = left % right;
6041 // if (res < 0) {
6042 // if (right < 0) {
6043 // res = res - right;
6044 // } else {
6045 // res = res + right;
6046 // }
6047 // }
6048 compiler::Label done;
6049 __ cmp(result_mod, compiler::Operand(0));
6050 __ b(&done, GE);
6051 // Result is negative, adjust it.
6052 __ cmp(right, compiler::Operand(0));
6053 __ sub(result_mod, result_mod, compiler::Operand(right), LT);
6054 __ add(result_mod, result_mod, compiler::Operand(right), GE);
6055 __ Bind(&done);
6056}
6057
6058// Should be kept in sync with integers.cc Multiply64Hash
6059static void EmitHashIntegerCodeSequence(FlowGraphCompiler* compiler,
6060 const Register result,
6061 const Register value_lo,
6062 const Register value_hi) {
6063 __ LoadImmediate(TMP, compiler::Immediate(0x2d51));
6064 __ umull(result, value_lo, value_lo, TMP); // (lo:result) = lo32 * 0x2d51
6065 __ umull(TMP, value_hi, value_hi, TMP); // (hi:TMP) = hi32 * 0x2d51
6066 __ add(TMP, TMP, compiler::Operand(value_lo));
6067 // (0:hi:TMP:result) is 128-bit product
6068 __ eor(result, value_hi, compiler::Operand(result));
6069 __ eor(result, TMP, compiler::Operand(result));
6070 __ AndImmediate(result, result, 0x3fffffff);
6071}
6072
6073LocationSummary* HashDoubleOpInstr::MakeLocationSummary(Zone* zone,
6074 bool opt) const {
6075 const intptr_t kNumInputs = 1;
6076 const intptr_t kNumTemps = 4;
6077 LocationSummary* summary = new (zone) LocationSummary(
6078 zone, kNumInputs, kNumTemps, LocationSummary::kNativeLeafCall);
6079 summary->set_in(0, Location::RequiresFpuRegister());
6080 summary->set_temp(0, Location::RequiresRegister());
6081 summary->set_temp(1, Location::RegisterLocation(R1));
6082 summary->set_temp(2, Location::RequiresFpuRegister());
6083 summary->set_temp(3, Location::RegisterLocation(R4));
6084 summary->set_out(0, Location::Pair(Location::RegisterLocation(R0),
6086 return summary;
6087}
6088
6089void HashDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6090 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
6091 const Register temp = locs()->temp(0).reg();
6092 const Register temp1 = locs()->temp(1).reg();
6093 ASSERT(temp1 == R1);
6094 const DRegister temp_double = EvenDRegisterOf(locs()->temp(2).fpu_reg());
6095 ASSERT(locs()->temp(3).reg() == R4);
6096 const PairLocation* out_pair = locs()->out(0).AsPairLocation();
6097 Register result = out_pair->At(0).reg();
6098 ASSERT(result == R0);
6099 ASSERT(out_pair->At(1).reg() == R1);
6100
6101 compiler::Label hash_double, hash_double_value, try_convert;
6102
6103 __ vmovrrd(TMP, temp, value);
6104 __ AndImmediate(temp, temp, 0x7FF00000);
6105 __ CompareImmediate(temp, 0x7FF00000);
6106 __ b(&hash_double_value, EQ); // is_infinity or nan
6107
6108 compiler::Label slow_path;
6109 __ Bind(&try_convert);
6110 // value -> temp1 -> temp_double
6111 __ vcvtid(STMP, value);
6112 __ vmovrs(temp1, STMP);
6113 // Checks whether temp1 is INT_MAX or INT_MIN which indicates failed vcvt
6114 __ CompareImmediate(temp1, 0xC0000000);
6115 __ b(&slow_path, MI);
6116 __ vmovdr(DTMP, 0, temp1);
6117 __ vcvtdi(temp_double, STMP);
6118
6119 // value != temp_double, then go to hash_double_value
6120 __ vcmpd(value, temp_double);
6121 __ vmstat();
6122 __ b(&hash_double_value, NE);
6123 // Sign-extend 32-bit [temp1] value to 64-bit pair of (temp:temp1), which
6124 // is used by integer hash code sequence.
6125 __ SignFill(temp, temp1);
6126
6127 compiler::Label hash_integer, done;
6128 {
6129 __ Bind(&hash_integer);
6130 // integer hash of (temp:temp1)
6131 EmitHashIntegerCodeSequence(compiler, result, temp1, temp);
6132 __ b(&done);
6133 }
6134
6135 __ Bind(&slow_path);
6136 // double value is potentially doesn't fit into Smi range, so
6137 // do the double->int64->double via runtime call.
6138 __ StoreDToOffset(value, THR,
6139 compiler::target::Thread::unboxed_runtime_arg_offset());
6140 {
6141 compiler::LeafRuntimeScope rt(compiler->assembler(), /*frame_size=*/0,
6142 /*preserve_registers=*/true);
6143 __ mov(R0, compiler::Operand(THR));
6144 // Check if double can be represented as int64, load it into (temp:EAX) if
6145 // it can.
6146 rt.Call(kTryDoubleAsIntegerRuntimeEntry, 1);
6147 __ mov(R4, compiler::Operand(R0));
6148 }
6149 __ LoadFromOffset(temp1, THR,
6150 compiler::target::Thread::unboxed_runtime_arg_offset());
6151 __ LoadFromOffset(temp, THR,
6152 compiler::target::Thread::unboxed_runtime_arg_offset() +
6153 compiler::target::kWordSize);
6154 __ cmp(R4, compiler::Operand(0));
6155 __ b(&hash_integer, NE);
6156 __ b(&hash_double);
6157
6158 __ Bind(&hash_double_value);
6159 __ vmovrrd(temp, temp1, value);
6160
6161 __ Bind(&hash_double);
6162 // Convert the double bits (temp:temp1) to a hash code that fits in a Smi.
6163 __ eor(result, temp1, compiler::Operand(temp));
6164 __ AndImmediate(result, result, compiler::target::kSmiMax);
6165
6166 __ Bind(&done);
6167 __ mov(R1, compiler::Operand(0));
6168}
6169
6170LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
6171 bool opt) const {
6172 const intptr_t kNumInputs = 1;
6173 const intptr_t kNumTemps = 1;
6174 LocationSummary* summary = new (zone)
6175 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6176 summary->set_in(0, Location::WritableRegister());
6177 summary->set_out(0, Location::RequiresRegister());
6178 summary->set_temp(0, Location::RequiresRegister());
6179 return summary;
6180}
6181
6182void HashIntegerOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6183 Register value = locs()->in(0).reg();
6184 Register result = locs()->out(0).reg();
6185 Register temp = locs()->temp(0).reg();
6186
6187 if (smi_) {
6188 __ SmiUntag(value);
6189 __ SignFill(temp, value);
6190 } else {
6191 __ LoadFieldFromOffset(temp, value,
6192 Mint::value_offset() + compiler::target::kWordSize);
6193 __ LoadFieldFromOffset(value, value, Mint::value_offset());
6194 }
6195 EmitHashIntegerCodeSequence(compiler, result, value, temp);
6196 __ SmiTag(result);
6197}
6198
6199LocationSummary* BranchInstr::MakeLocationSummary(Zone* zone, bool opt) const {
6201 // Branches don't produce a result.
6203 return comparison()->locs();
6204}
6205
6206void BranchInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6208}
6209
6210LocationSummary* CheckClassInstr::MakeLocationSummary(Zone* zone,
6211 bool opt) const {
6212 const intptr_t kNumInputs = 1;
6213 const bool need_mask_temp = IsBitTest();
6214 const intptr_t kNumTemps = !IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0;
6215 LocationSummary* summary = new (zone)
6216 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6217 summary->set_in(0, Location::RequiresRegister());
6218 if (!IsNullCheck()) {
6219 summary->set_temp(0, Location::RequiresRegister());
6220 if (need_mask_temp) {
6221 summary->set_temp(1, Location::RequiresRegister());
6222 }
6223 }
6224 return summary;
6225}
6226
6227void CheckClassInstr::EmitNullCheck(FlowGraphCompiler* compiler,
6228 compiler::Label* deopt) {
6229 __ CompareObject(locs()->in(0).reg(), Object::null_object());
6231 Condition cond = IsDeoptIfNull() ? EQ : NE;
6232 __ b(deopt, cond);
6233}
6234
6235void CheckClassInstr::EmitBitTest(FlowGraphCompiler* compiler,
6236 intptr_t min,
6237 intptr_t max,
6238 intptr_t mask,
6239 compiler::Label* deopt) {
6240 Register biased_cid = locs()->temp(0).reg();
6241 __ AddImmediate(biased_cid, -min);
6242 __ CompareImmediate(biased_cid, max - min);
6243 __ b(deopt, HI);
6244
6245 Register bit_reg = locs()->temp(1).reg();
6246 __ LoadImmediate(bit_reg, 1);
6247 __ Lsl(bit_reg, bit_reg, biased_cid);
6248 __ TestImmediate(bit_reg, mask);
6249 __ b(deopt, EQ);
6250}
6251
6252int CheckClassInstr::EmitCheckCid(FlowGraphCompiler* compiler,
6253 int bias,
6254 intptr_t cid_start,
6255 intptr_t cid_end,
6256 bool is_last,
6257 compiler::Label* is_ok,
6258 compiler::Label* deopt,
6259 bool use_near_jump) {
6260 Register biased_cid = locs()->temp(0).reg();
6261 Condition no_match, match;
6262 if (cid_start == cid_end) {
6263 __ CompareImmediate(biased_cid, cid_start - bias);
6264 no_match = NE;
6265 match = EQ;
6266 } else {
6267 // For class ID ranges use a subtract followed by an unsigned
6268 // comparison to check both ends of the ranges with one comparison.
6269 __ AddImmediate(biased_cid, bias - cid_start);
6270 bias = cid_start;
6271 __ CompareImmediate(biased_cid, cid_end - cid_start);
6272 no_match = HI; // Unsigned higher.
6273 match = LS; // Unsigned lower or same.
6274 }
6275 if (is_last) {
6276 __ b(deopt, no_match);
6277 } else {
6278 __ b(is_ok, match);
6279 }
6280 return bias;
6281}
6282
6283LocationSummary* CheckSmiInstr::MakeLocationSummary(Zone* zone,
6284 bool opt) const {
6285 const intptr_t kNumInputs = 1;
6286 const intptr_t kNumTemps = 0;
6287 LocationSummary* summary = new (zone)
6288 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6289 summary->set_in(0, Location::RequiresRegister());
6290 return summary;
6291}
6292
6293void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6294 const Register value = locs()->in(0).reg();
6295 compiler::Label* deopt =
6296 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi);
6297 __ BranchIfNotSmi(value, deopt);
6298}
6299
6300void CheckNullInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6301 Register value_reg = locs()->in(0).reg();
6302 // TODO(dartbug.com/30480): Consider passing `null` literal as an argument
6303 // in order to be able to allocate it on register.
6304 __ CompareObject(value_reg, Object::null_object());
6305
6306 const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
6307 Code& stub = Code::ZoneHandle(
6308 compiler->zone(),
6310 const bool using_shared_stub = locs()->call_on_shared_slow_path();
6311
6312 if (using_shared_stub && compiler->CanPcRelativeCall(stub) &&
6313 compiler->flow_graph().graph_entry()->NeedsFrame()) {
6314 __ GenerateUnRelocatedPcRelativeCall(EQUAL);
6315 compiler->AddPcRelativeCallStubTarget(stub);
6316
6317 // We use the "extended" environment which has the locations updated to
6318 // reflect live registers being saved in the shared spilling stubs (see
6319 // the stub above).
6320 auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
6321 compiler->EmitCallsiteMetadata(source(), deopt_id(),
6322 UntaggedPcDescriptors::kOther, locs(),
6323 extended_env);
6325 return;
6326 }
6327
6328 ThrowErrorSlowPathCode* slow_path = new NullErrorSlowPath(this);
6329 compiler->AddSlowPathCode(slow_path);
6330
6331 __ BranchIf(EQUAL, slow_path->entry_label());
6332}
6333
6334LocationSummary* CheckClassIdInstr::MakeLocationSummary(Zone* zone,
6335 bool opt) const {
6336 const intptr_t kNumInputs = 1;
6337 const intptr_t kNumTemps = 0;
6338 LocationSummary* summary = new (zone)
6339 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6340 summary->set_in(0, cids_.IsSingleCid() ? Location::RequiresRegister()
6341 : Location::WritableRegister());
6342 return summary;
6343}
6344
6345void CheckClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6346 Register value = locs()->in(0).reg();
6347 compiler::Label* deopt =
6348 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass);
6349 if (cids_.IsSingleCid()) {
6350 __ CompareImmediate(value, compiler::target::ToRawSmi(cids_.cid_start));
6351 __ b(deopt, NE);
6352 } else {
6353 __ AddImmediate(value, -compiler::target::ToRawSmi(cids_.cid_start));
6354 __ CompareImmediate(value, compiler::target::ToRawSmi(cids_.Extent()));
6355 __ b(deopt, HI); // Unsigned higher.
6356 }
6357}
6358
6359LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Zone* zone,
6360 bool opt) const {
6361 const intptr_t kNumInputs = 2;
6362 const intptr_t kNumTemps = 0;
6363 LocationSummary* locs = new (zone)
6364 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6367 return locs;
6368}
6369
6370void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6371 uint32_t flags = generalized_ ? ICData::kGeneralized : 0;
6372 compiler::Label* deopt =
6373 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckArrayBound, flags);
6374
6375 Location length_loc = locs()->in(kLengthPos);
6376 Location index_loc = locs()->in(kIndexPos);
6377
6378 if (length_loc.IsConstant() && index_loc.IsConstant()) {
6379#ifdef DEBUG
6380 const int32_t length = compiler::target::SmiValue(length_loc.constant());
6381 const int32_t index = compiler::target::SmiValue(index_loc.constant());
6382 ASSERT((length <= index) || (index < 0));
6383#endif
6384 // Unconditionally deoptimize for constant bounds checks because they
6385 // only occur only when index is out-of-bounds.
6386 __ b(deopt);
6387 return;
6388 }
6389
6390 const intptr_t index_cid = index()->Type()->ToCid();
6391 if (index_loc.IsConstant()) {
6392 const Register length = length_loc.reg();
6393 __ CompareImmediate(length,
6394 compiler::target::ToRawSmi(index_loc.constant()));
6395 __ b(deopt, LS);
6396 } else if (length_loc.IsConstant()) {
6397 const Register index = index_loc.reg();
6398 if (index_cid != kSmiCid) {
6399 __ BranchIfNotSmi(index, deopt);
6400 }
6401 if (compiler::target::SmiValue(length_loc.constant()) ==
6402 compiler::target::kSmiMax) {
6403 __ tst(index, compiler::Operand(index));
6404 __ b(deopt, MI);
6405 } else {
6406 __ CompareImmediate(index,
6407 compiler::target::ToRawSmi(length_loc.constant()));
6408 __ b(deopt, CS);
6409 }
6410 } else {
6411 const Register length = length_loc.reg();
6412 const Register index = index_loc.reg();
6413 if (index_cid != kSmiCid) {
6414 __ BranchIfNotSmi(index, deopt);
6415 }
6416 __ cmp(index, compiler::Operand(length));
6417 __ b(deopt, CS);
6418 }
6419}
6420
6421LocationSummary* CheckWritableInstr::MakeLocationSummary(Zone* zone,
6422 bool opt) const {
6423 const intptr_t kNumInputs = 1;
6424 const intptr_t kNumTemps = 0;
6425 LocationSummary* locs = new (zone) LocationSummary(
6426 zone, kNumInputs, kNumTemps,
6427 UseSharedSlowPathStub(opt) ? LocationSummary::kCallOnSharedSlowPath
6429 locs->set_in(kReceiver, Location::RequiresRegister());
6430 return locs;
6431}
6432
6433void CheckWritableInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6434 WriteErrorSlowPath* slow_path = new WriteErrorSlowPath(this);
6435 compiler->AddSlowPathCode(slow_path);
6436 __ ldrb(TMP, compiler::FieldAddress(locs()->in(0).reg(),
6437 compiler::target::Object::tags_offset()));
6438 // In the first byte.
6439 ASSERT(compiler::target::UntaggedObject::kImmutableBit < 8);
6440 __ TestImmediate(TMP, 1 << compiler::target::UntaggedObject::kImmutableBit);
6441 __ b(slow_path->entry_label(), NOT_ZERO);
6442}
6443
6444LocationSummary* BinaryInt64OpInstr::MakeLocationSummary(Zone* zone,
6445 bool opt) const {
6446 const intptr_t kNumInputs = 2;
6447 const intptr_t kNumTemps = (op_kind() == Token::kMUL) ? 1 : 0;
6448 LocationSummary* summary = new (zone)
6449 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6450 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
6452
6453 compiler::Operand o;
6454 if (CanBePairOfImmediateOperands(right(), &o, &o) &&
6455 (op_kind() == Token::kBIT_AND || op_kind() == Token::kBIT_OR ||
6456 op_kind() == Token::kBIT_XOR || op_kind() == Token::kADD ||
6457 op_kind() == Token::kSUB)) {
6458 summary->set_in(1, Location::Constant(right()->definition()->AsConstant()));
6459 } else {
6460 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
6462 }
6463 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6465 if (op_kind() == Token::kMUL) {
6466 summary->set_temp(0, Location::RequiresRegister());
6467 }
6468 return summary;
6469}
6470
6471void BinaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6472 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6473 Register left_lo = left_pair->At(0).reg();
6474 Register left_hi = left_pair->At(1).reg();
6475 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6476 Register out_lo = out_pair->At(0).reg();
6477 Register out_hi = out_pair->At(1).reg();
6478 ASSERT(!can_overflow());
6479 ASSERT(!CanDeoptimize());
6480
6481 compiler::Operand right_lo, right_hi;
6482 if (locs()->in(1).IsConstant()) {
6483 const bool ok = CanBePairOfImmediateOperands(locs()->in(1).constant(),
6484 &right_lo, &right_hi);
6486 } else {
6487 PairLocation* right_pair = locs()->in(1).AsPairLocation();
6488 right_lo = compiler::Operand(right_pair->At(0).reg());
6489 right_hi = compiler::Operand(right_pair->At(1).reg());
6490 }
6491
6492 switch (op_kind()) {
6493 case Token::kBIT_AND: {
6494 __ and_(out_lo, left_lo, compiler::Operand(right_lo));
6495 __ and_(out_hi, left_hi, compiler::Operand(right_hi));
6496 break;
6497 }
6498 case Token::kBIT_OR: {
6499 __ orr(out_lo, left_lo, compiler::Operand(right_lo));
6500 __ orr(out_hi, left_hi, compiler::Operand(right_hi));
6501 break;
6502 }
6503 case Token::kBIT_XOR: {
6504 __ eor(out_lo, left_lo, compiler::Operand(right_lo));
6505 __ eor(out_hi, left_hi, compiler::Operand(right_hi));
6506 break;
6507 }
6508 case Token::kADD: {
6509 __ adds(out_lo, left_lo, compiler::Operand(right_lo));
6510 __ adcs(out_hi, left_hi, compiler::Operand(right_hi));
6511 break;
6512 }
6513 case Token::kSUB: {
6514 __ subs(out_lo, left_lo, compiler::Operand(right_lo));
6515 __ sbcs(out_hi, left_hi, compiler::Operand(right_hi));
6516 break;
6517 }
6518 case Token::kMUL: {
6519 PairLocation* right_pair = locs()->in(1).AsPairLocation();
6520 Register right_lo_reg = right_pair->At(0).reg();
6521 Register right_hi_reg = right_pair->At(1).reg();
6522 // Compute 64-bit a * b as:
6523 // a_l * b_l + (a_h * b_l + a_l * b_h) << 32
6524 Register temp = locs()->temp(0).reg();
6525 __ mul(temp, left_lo, right_hi_reg);
6526 __ mla(out_hi, left_hi, right_lo_reg, temp);
6527 __ umull(out_lo, temp, left_lo, right_lo_reg);
6528 __ add(out_hi, out_hi, compiler::Operand(temp));
6529 break;
6530 }
6531 default:
6532 UNREACHABLE();
6533 }
6534}
6535
6536static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
6537 Token::Kind op_kind,
6538 Register out_lo,
6539 Register out_hi,
6540 Register left_lo,
6541 Register left_hi,
6542 const Object& right) {
6543 const int64_t shift = Integer::Cast(right).AsInt64Value();
6544 ASSERT(shift >= 0);
6545
6546 switch (op_kind) {
6547 case Token::kSHR: {
6548 if (shift < 32) {
6549 __ Lsl(out_lo, left_hi, compiler::Operand(32 - shift));
6550 __ orr(out_lo, out_lo, compiler::Operand(left_lo, LSR, shift));
6551 __ Asr(out_hi, left_hi, compiler::Operand(shift));
6552 } else {
6553 if (shift == 32) {
6554 __ mov(out_lo, compiler::Operand(left_hi));
6555 } else if (shift < 64) {
6556 __ Asr(out_lo, left_hi, compiler::Operand(shift - 32));
6557 } else {
6558 __ Asr(out_lo, left_hi, compiler::Operand(31));
6559 }
6560 __ Asr(out_hi, left_hi, compiler::Operand(31));
6561 }
6562 break;
6563 }
6564 case Token::kUSHR: {
6565 ASSERT(shift < 64);
6566 if (shift < 32) {
6567 __ Lsl(out_lo, left_hi, compiler::Operand(32 - shift));
6568 __ orr(out_lo, out_lo, compiler::Operand(left_lo, LSR, shift));
6569 __ Lsr(out_hi, left_hi, compiler::Operand(shift));
6570 } else {
6571 if (shift == 32) {
6572 __ mov(out_lo, compiler::Operand(left_hi));
6573 } else {
6574 __ Lsr(out_lo, left_hi, compiler::Operand(shift - 32));
6575 }
6576 __ mov(out_hi, compiler::Operand(0));
6577 }
6578 break;
6579 }
6580 case Token::kSHL: {
6581 ASSERT(shift < 64);
6582 if (shift < 32) {
6583 __ Lsr(out_hi, left_lo, compiler::Operand(32 - shift));
6584 __ orr(out_hi, out_hi, compiler::Operand(left_hi, LSL, shift));
6585 __ Lsl(out_lo, left_lo, compiler::Operand(shift));
6586 } else {
6587 if (shift == 32) {
6588 __ mov(out_hi, compiler::Operand(left_lo));
6589 } else {
6590 __ Lsl(out_hi, left_lo, compiler::Operand(shift - 32));
6591 }
6592 __ mov(out_lo, compiler::Operand(0));
6593 }
6594 break;
6595 }
6596 default:
6597 UNREACHABLE();
6598 }
6599}
6600
6601static void EmitShiftInt64ByRegister(FlowGraphCompiler* compiler,
6602 Token::Kind op_kind,
6603 Register out_lo,
6604 Register out_hi,
6605 Register left_lo,
6606 Register left_hi,
6607 Register right) {
6608 switch (op_kind) {
6609 case Token::kSHR: {
6610 __ rsbs(IP, right, compiler::Operand(32));
6611 __ sub(IP, right, compiler::Operand(32), MI);
6612 __ mov(out_lo, compiler::Operand(left_hi, ASR, IP), MI);
6613 __ mov(out_lo, compiler::Operand(left_lo, LSR, right), PL);
6614 __ orr(out_lo, out_lo, compiler::Operand(left_hi, LSL, IP), PL);
6615 __ mov(out_hi, compiler::Operand(left_hi, ASR, right));
6616 break;
6617 }
6618 case Token::kUSHR: {
6619 __ rsbs(IP, right, compiler::Operand(32));
6620 __ sub(IP, right, compiler::Operand(32), MI);
6621 __ mov(out_lo, compiler::Operand(left_hi, LSR, IP), MI);
6622 __ mov(out_lo, compiler::Operand(left_lo, LSR, right), PL);
6623 __ orr(out_lo, out_lo, compiler::Operand(left_hi, LSL, IP), PL);
6624 __ mov(out_hi, compiler::Operand(left_hi, LSR, right));
6625 break;
6626 }
6627 case Token::kSHL: {
6628 __ rsbs(IP, right, compiler::Operand(32));
6629 __ sub(IP, right, compiler::Operand(32), MI);
6630 __ mov(out_hi, compiler::Operand(left_lo, LSL, IP), MI);
6631 __ mov(out_hi, compiler::Operand(left_hi, LSL, right), PL);
6632 __ orr(out_hi, out_hi, compiler::Operand(left_lo, LSR, IP), PL);
6633 __ mov(out_lo, compiler::Operand(left_lo, LSL, right));
6634 break;
6635 }
6636 default:
6637 UNREACHABLE();
6638 }
6639}
6640
6641static void EmitShiftUint32ByConstant(FlowGraphCompiler* compiler,
6642 Token::Kind op_kind,
6643 Register out,
6644 Register left,
6645 const Object& right) {
6646 const int64_t shift = Integer::Cast(right).AsInt64Value();
6647 ASSERT(shift >= 0);
6648 if (shift >= 32) {
6649 __ LoadImmediate(out, 0);
6650 } else {
6651 switch (op_kind) {
6652 case Token::kSHR:
6653 case Token::kUSHR:
6654 __ Lsr(out, left, compiler::Operand(shift));
6655 break;
6656 case Token::kSHL:
6657 __ Lsl(out, left, compiler::Operand(shift));
6658 break;
6659 default:
6660 UNREACHABLE();
6661 }
6662 }
6663}
6664
6665static void EmitShiftUint32ByRegister(FlowGraphCompiler* compiler,
6666 Token::Kind op_kind,
6667 Register out,
6668 Register left,
6669 Register right) {
6670 switch (op_kind) {
6671 case Token::kSHR:
6672 case Token::kUSHR:
6673 __ Lsr(out, left, right);
6674 break;
6675 case Token::kSHL:
6676 __ Lsl(out, left, right);
6677 break;
6678 default:
6679 UNREACHABLE();
6680 }
6681}
6682
6683class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
6684 public:
6685 explicit ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction)
6686 : ThrowErrorSlowPathCode(instruction,
6687 kArgumentErrorUnboxedInt64RuntimeEntry) {}
6688
6689 const char* name() override { return "int64 shift"; }
6690
6691 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
6692 PairLocation* left_pair = instruction()->locs()->in(0).AsPairLocation();
6693 Register left_hi = left_pair->At(1).reg();
6694 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
6695 Register right_lo = right_pair->At(0).reg();
6696 Register right_hi = right_pair->At(1).reg();
6697 PairLocation* out_pair = instruction()->locs()->out(0).AsPairLocation();
6698 Register out_lo = out_pair->At(0).reg();
6699 Register out_hi = out_pair->At(1).reg();
6700
6701 __ CompareImmediate(right_hi, 0);
6702
6703 switch (instruction()->AsShiftInt64Op()->op_kind()) {
6704 case Token::kSHR:
6705 __ Asr(out_hi, left_hi,
6706 compiler::Operand(compiler::target::kBitsPerWord - 1), GE);
6707 __ mov(out_lo, compiler::Operand(out_hi), GE);
6708 break;
6709 case Token::kUSHR:
6710 case Token::kSHL: {
6711 __ LoadImmediate(out_lo, 0, GE);
6712 __ LoadImmediate(out_hi, 0, GE);
6713 break;
6714 }
6715 default:
6716 UNREACHABLE();
6717 }
6718
6719 __ b(exit_label(), GE);
6720
6721 // Can't pass unboxed int64 value directly to runtime call, as all
6722 // arguments are expected to be tagged (boxed).
6723 // The unboxed int64 argument is passed through a dedicated slot in Thread.
6724 // TODO(dartbug.com/33549): Clean this up when unboxed values
6725 // could be passed as arguments.
6726 __ StoreToOffset(right_lo, THR,
6727 compiler::target::Thread::unboxed_runtime_arg_offset());
6728 __ StoreToOffset(right_hi, THR,
6729 compiler::target::Thread::unboxed_runtime_arg_offset() +
6730 compiler::target::kWordSize);
6731 }
6732};
6733
6734LocationSummary* ShiftInt64OpInstr::MakeLocationSummary(Zone* zone,
6735 bool opt) const {
6736 const intptr_t kNumInputs = 2;
6737 const intptr_t kNumTemps = 0;
6738 LocationSummary* summary = new (zone) LocationSummary(
6739 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
6740 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
6743 right()->definition()->IsConstant()) {
6744 ConstantInstr* constant = right()->definition()->AsConstant();
6745 summary->set_in(1, Location::Constant(constant));
6746 } else {
6747 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
6749 }
6750 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6752 return summary;
6753}
6754
6755void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6756 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6757 Register left_lo = left_pair->At(0).reg();
6758 Register left_hi = left_pair->At(1).reg();
6759 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6760 Register out_lo = out_pair->At(0).reg();
6761 Register out_hi = out_pair->At(1).reg();
6762 ASSERT(!can_overflow());
6763
6764 if (locs()->in(1).IsConstant()) {
6765 EmitShiftInt64ByConstant(compiler, op_kind(), out_lo, out_hi, left_lo,
6766 left_hi, locs()->in(1).constant());
6767 } else {
6768 // Code for a variable shift amount (or constant that throws).
6769 PairLocation* right_pair = locs()->in(1).AsPairLocation();
6770 Register right_lo = right_pair->At(0).reg();
6771 Register right_hi = right_pair->At(1).reg();
6772
6773 // Jump to a slow path if shift is larger than 63 or less than 0.
6774 ShiftInt64OpSlowPath* slow_path = nullptr;
6775 if (!IsShiftCountInRange()) {
6776 slow_path = new (Z) ShiftInt64OpSlowPath(this);
6777 compiler->AddSlowPathCode(slow_path);
6778 __ CompareImmediate(right_hi, 0);
6779 __ b(slow_path->entry_label(), NE);
6780 __ CompareImmediate(right_lo, kShiftCountLimit);
6781 __ b(slow_path->entry_label(), HI);
6782 }
6783
6784 EmitShiftInt64ByRegister(compiler, op_kind(), out_lo, out_hi, left_lo,
6785 left_hi, right_lo);
6786
6787 if (slow_path != nullptr) {
6788 __ Bind(slow_path->exit_label());
6789 }
6790 }
6791}
6792
6793LocationSummary* SpeculativeShiftInt64OpInstr::MakeLocationSummary(
6794 Zone* zone,
6795 bool opt) const {
6796 const intptr_t kNumInputs = 2;
6797 const intptr_t kNumTemps = 0;
6798 LocationSummary* summary = new (zone)
6799 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6800 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
6802 summary->set_in(1, LocationWritableRegisterOrSmiConstant(right()));
6803 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6805 return summary;
6806}
6807
6808void SpeculativeShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6809 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6810 Register left_lo = left_pair->At(0).reg();
6811 Register left_hi = left_pair->At(1).reg();
6812 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6813 Register out_lo = out_pair->At(0).reg();
6814 Register out_hi = out_pair->At(1).reg();
6815 ASSERT(!can_overflow());
6816
6817 if (locs()->in(1).IsConstant()) {
6818 EmitShiftInt64ByConstant(compiler, op_kind(), out_lo, out_hi, left_lo,
6819 left_hi, locs()->in(1).constant());
6820 } else {
6821 // Code for a variable shift amount.
6822 Register shift = locs()->in(1).reg();
6823 __ SmiUntag(shift);
6824
6825 // Deopt if shift is larger than 63 or less than 0 (or not a smi).
6826 if (!IsShiftCountInRange()) {
6827 ASSERT(CanDeoptimize());
6828 compiler::Label* deopt =
6829 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
6830
6831 __ CompareImmediate(shift, kShiftCountLimit);
6832 __ b(deopt, HI);
6833 }
6834
6835 EmitShiftInt64ByRegister(compiler, op_kind(), out_lo, out_hi, left_lo,
6836 left_hi, shift);
6837 }
6838}
6839
6840class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
6841 public:
6842 explicit ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction)
6843 : ThrowErrorSlowPathCode(instruction,
6844 kArgumentErrorUnboxedInt64RuntimeEntry) {}
6845
6846 const char* name() override { return "uint32 shift"; }
6847
6848 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
6849 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
6850 Register right_lo = right_pair->At(0).reg();
6851 Register right_hi = right_pair->At(1).reg();
6852 Register out = instruction()->locs()->out(0).reg();
6853
6854 __ CompareImmediate(right_hi, 0);
6855 __ LoadImmediate(out, 0, GE);
6856 __ b(exit_label(), GE);
6857
6858 // Can't pass unboxed int64 value directly to runtime call, as all
6859 // arguments are expected to be tagged (boxed).
6860 // The unboxed int64 argument is passed through a dedicated slot in Thread.
6861 // TODO(dartbug.com/33549): Clean this up when unboxed values
6862 // could be passed as arguments.
6863 __ StoreToOffset(right_lo, THR,
6864 compiler::target::Thread::unboxed_runtime_arg_offset());
6865 __ StoreToOffset(right_hi, THR,
6866 compiler::target::Thread::unboxed_runtime_arg_offset() +
6867 compiler::target::kWordSize);
6868 }
6869};
6870
6871LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
6872 bool opt) const {
6873 const intptr_t kNumInputs = 2;
6874 const intptr_t kNumTemps = 0;
6875 LocationSummary* summary = new (zone) LocationSummary(
6876 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
6877 summary->set_in(0, Location::RequiresRegister());
6879 right()->definition()->IsConstant()) {
6880 ConstantInstr* constant = right()->definition()->AsConstant();
6881 summary->set_in(1, Location::Constant(constant));
6882 } else {
6883 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
6885 }
6886 summary->set_out(0, Location::RequiresRegister());
6887 return summary;
6888}
6889
6890void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6891 Register left = locs()->in(0).reg();
6892 Register out = locs()->out(0).reg();
6893
6894 ASSERT(left != out);
6895
6896 if (locs()->in(1).IsConstant()) {
6897 EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
6898 locs()->in(1).constant());
6899 } else {
6900 // Code for a variable shift amount (or constant that throws).
6901 PairLocation* right_pair = locs()->in(1).AsPairLocation();
6902 Register right_lo = right_pair->At(0).reg();
6903 Register right_hi = right_pair->At(1).reg();
6904
6905 // Jump to a slow path if shift count is > 31 or negative.
6906 ShiftUint32OpSlowPath* slow_path = nullptr;
6907 if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
6908 slow_path = new (Z) ShiftUint32OpSlowPath(this);
6909 compiler->AddSlowPathCode(slow_path);
6910
6911 __ CompareImmediate(right_hi, 0);
6912 __ b(slow_path->entry_label(), NE);
6913 __ CompareImmediate(right_lo, kUint32ShiftCountLimit);
6914 __ b(slow_path->entry_label(), HI);
6915 }
6916
6917 EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right_lo);
6918
6919 if (slow_path != nullptr) {
6920 __ Bind(slow_path->exit_label());
6921 }
6922 }
6923}
6924
6925LocationSummary* SpeculativeShiftUint32OpInstr::MakeLocationSummary(
6926 Zone* zone,
6927 bool opt) const {
6928 const intptr_t kNumInputs = 2;
6929 const intptr_t kNumTemps = 1;
6930 LocationSummary* summary = new (zone)
6931 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6932 summary->set_in(0, Location::RequiresRegister());
6933 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
6934 summary->set_temp(0, Location::RequiresRegister());
6935 summary->set_out(0, Location::RequiresRegister());
6936 return summary;
6937}
6938
6939void SpeculativeShiftUint32OpInstr::EmitNativeCode(
6940 FlowGraphCompiler* compiler) {
6941 Register left = locs()->in(0).reg();
6942 Register out = locs()->out(0).reg();
6943 Register temp = locs()->temp(0).reg();
6944 ASSERT(left != out);
6945
6946 if (locs()->in(1).IsConstant()) {
6947 EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
6948 locs()->in(1).constant());
6949 } else {
6950 Register right = locs()->in(1).reg();
6951 const bool shift_count_in_range =
6952 IsShiftCountInRange(kUint32ShiftCountLimit);
6953
6954 __ SmiUntag(temp, right);
6955 right = temp;
6956
6957 // Deopt if shift count is negative.
6958 if (!shift_count_in_range) {
6959 ASSERT(CanDeoptimize());
6960 compiler::Label* deopt =
6961 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
6962
6963 __ CompareImmediate(right, 0);
6964 __ b(deopt, LT);
6965 }
6966
6967 EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right);
6968
6969 if (!shift_count_in_range) {
6970 __ CompareImmediate(right, kUint32ShiftCountLimit);
6971 __ LoadImmediate(out, 0, HI);
6972 }
6973 }
6974}
6975
6976LocationSummary* UnaryInt64OpInstr::MakeLocationSummary(Zone* zone,
6977 bool opt) const {
6978 const intptr_t kNumInputs = 1;
6979 const intptr_t kNumTemps = 0;
6980 LocationSummary* summary = new (zone)
6981 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6982 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
6984 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6986 return summary;
6987}
6988
6989void UnaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6990 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6991 Register left_lo = left_pair->At(0).reg();
6992 Register left_hi = left_pair->At(1).reg();
6993
6994 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6995 Register out_lo = out_pair->At(0).reg();
6996 Register out_hi = out_pair->At(1).reg();
6997
6998 switch (op_kind()) {
6999 case Token::kBIT_NOT:
7000 __ mvn_(out_lo, compiler::Operand(left_lo));
7001 __ mvn_(out_hi, compiler::Operand(left_hi));
7002 break;
7003 case Token::kNEGATE:
7004 __ rsbs(out_lo, left_lo, compiler::Operand(0));
7005 __ sbc(out_hi, out_hi, compiler::Operand(out_hi));
7006 __ sub(out_hi, out_hi, compiler::Operand(left_hi));
7007 break;
7008 default:
7009 UNREACHABLE();
7010 }
7011}
7012
7013LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone,
7014 bool opt) const {
7015 const intptr_t kNumInputs = 2;
7016 const intptr_t kNumTemps = 0;
7017 LocationSummary* summary = new (zone)
7018 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7019 summary->set_in(0, Location::RequiresRegister());
7020 summary->set_in(1, Location::RequiresRegister());
7021 summary->set_out(0, Location::RequiresRegister());
7022 return summary;
7023}
7024
7025void BinaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7026 Register left = locs()->in(0).reg();
7027 Register right = locs()->in(1).reg();
7028 Register out = locs()->out(0).reg();
7029 ASSERT(out != left);
7030 switch (op_kind()) {
7031 case Token::kBIT_AND:
7032 __ and_(out, left, compiler::Operand(right));
7033 break;
7034 case Token::kBIT_OR:
7035 __ orr(out, left, compiler::Operand(right));
7036 break;
7037 case Token::kBIT_XOR:
7038 __ eor(out, left, compiler::Operand(right));
7039 break;
7040 case Token::kADD:
7041 __ add(out, left, compiler::Operand(right));
7042 break;
7043 case Token::kSUB:
7044 __ sub(out, left, compiler::Operand(right));
7045 break;
7046 case Token::kMUL:
7047 __ mul(out, left, right);
7048 break;
7049 default:
7050 UNREACHABLE();
7051 }
7052}
7053
7054LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone,
7055 bool opt) const {
7056 const intptr_t kNumInputs = 1;
7057 const intptr_t kNumTemps = 0;
7058 LocationSummary* summary = new (zone)
7059 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7060 summary->set_in(0, Location::RequiresRegister());
7061 summary->set_out(0, Location::RequiresRegister());
7062 return summary;
7063}
7064
7065void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7066 Register left = locs()->in(0).reg();
7067 Register out = locs()->out(0).reg();
7068 ASSERT(left != out);
7069
7070 ASSERT(op_kind() == Token::kBIT_NOT);
7071
7072 __ mvn_(out, compiler::Operand(left));
7073}
7074
7075LocationSummary* IntConverterInstr::MakeLocationSummary(Zone* zone,
7076 bool opt) const {
7077 const intptr_t kNumInputs = 1;
7078 const intptr_t kNumTemps = 0;
7079 LocationSummary* summary = new (zone)
7080 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7081 if (from() == kUntagged || to() == kUntagged) {
7082 ASSERT((from() == kUntagged && to() == kUnboxedInt32) ||
7083 (from() == kUntagged && to() == kUnboxedUint32) ||
7084 (from() == kUnboxedInt32 && to() == kUntagged) ||
7085 (from() == kUnboxedUint32 && to() == kUntagged));
7086 ASSERT(!CanDeoptimize());
7087 summary->set_in(0, Location::RequiresRegister());
7088 summary->set_out(0, Location::SameAsFirstInput());
7089 } else if (from() == kUnboxedInt64) {
7090 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
7091 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
7093 summary->set_out(0, Location::RequiresRegister());
7094 } else if (to() == kUnboxedInt64) {
7095 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
7096 summary->set_in(0, Location::RequiresRegister());
7097 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
7099 } else {
7100 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
7101 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
7102 summary->set_in(0, Location::RequiresRegister());
7103 summary->set_out(0, Location::SameAsFirstInput());
7104 }
7105 return summary;
7106}
7107
7108void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7109 const bool is_nop_conversion =
7110 (from() == kUntagged && to() == kUnboxedInt32) ||
7111 (from() == kUntagged && to() == kUnboxedUint32) ||
7112 (from() == kUnboxedInt32 && to() == kUntagged) ||
7113 (from() == kUnboxedUint32 && to() == kUntagged);
7114 if (is_nop_conversion) {
7115 ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
7116 return;
7117 }
7118
7119 if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
7120 const Register out = locs()->out(0).reg();
7121 // Representations are bitwise equivalent.
7122 ASSERT(out == locs()->in(0).reg());
7123 } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
7124 const Register out = locs()->out(0).reg();
7125 // Representations are bitwise equivalent.
7126 ASSERT(out == locs()->in(0).reg());
7127 if (CanDeoptimize()) {
7128 compiler::Label* deopt =
7129 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
7130 __ tst(out, compiler::Operand(out));
7131 __ b(deopt, MI);
7132 }
7133 } else if (from() == kUnboxedInt64) {
7134 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
7135 PairLocation* in_pair = locs()->in(0).AsPairLocation();
7136 Register in_lo = in_pair->At(0).reg();
7137 Register in_hi = in_pair->At(1).reg();
7138 Register out = locs()->out(0).reg();
7139 // Copy low word.
7140 __ mov(out, compiler::Operand(in_lo));
7141 if (CanDeoptimize()) {
7142 compiler::Label* deopt =
7143 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
7144 ASSERT(to() == kUnboxedInt32);
7145 __ cmp(in_hi,
7146 compiler::Operand(in_lo, ASR, compiler::target::kBitsPerWord - 1));
7147 __ b(deopt, NE);
7148 }
7149 } else if (from() == kUnboxedUint32 || from() == kUnboxedInt32) {
7150 ASSERT(to() == kUnboxedInt64);
7151 Register in = locs()->in(0).reg();
7152 PairLocation* out_pair = locs()->out(0).AsPairLocation();
7153 Register out_lo = out_pair->At(0).reg();
7154 Register out_hi = out_pair->At(1).reg();
7155 // Copy low word.
7156 __ mov(out_lo, compiler::Operand(in));
7157 if (from() == kUnboxedUint32) {
7158 __ eor(out_hi, out_hi, compiler::Operand(out_hi));
7159 } else {
7160 ASSERT(from() == kUnboxedInt32);
7161 __ mov(out_hi,
7162 compiler::Operand(in, ASR, compiler::target::kBitsPerWord - 1));
7163 }
7164 } else {
7165 UNREACHABLE();
7166 }
7167}
7168
7169LocationSummary* BitCastInstr::MakeLocationSummary(Zone* zone, bool opt) const {
7170 LocationSummary* summary =
7171 new (zone) LocationSummary(zone, /*num_inputs=*/InputCount(),
7172 /*num_temps=*/0, LocationSummary::kNoCall);
7173 switch (from()) {
7174 case kUnboxedInt32:
7175 summary->set_in(0, Location::RequiresRegister());
7176 break;
7177 case kUnboxedInt64:
7178 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
7180 break;
7181 case kUnboxedFloat:
7182 case kUnboxedDouble:
7183 // Choose an FPU register with corresponding D and S registers.
7184 summary->set_in(0, Location::FpuRegisterLocation(Q0));
7185 break;
7186 default:
7187 UNREACHABLE();
7188 }
7189
7190 switch (to()) {
7191 case kUnboxedInt32:
7192 summary->set_out(0, Location::RequiresRegister());
7193 break;
7194 case kUnboxedInt64:
7195 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
7197 break;
7198 case kUnboxedFloat:
7199 case kUnboxedDouble:
7200 // Choose an FPU register with corresponding D and S registers.
7201 summary->set_out(0, Location::FpuRegisterLocation(Q0));
7202 break;
7203 default:
7204 UNREACHABLE();
7205 }
7206 return summary;
7207}
7208
7209void BitCastInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7210 switch (from()) {
7211 case kUnboxedInt32: {
7212 ASSERT(to() == kUnboxedFloat);
7213 const Register from_reg = locs()->in(0).reg();
7214 const FpuRegister to_reg = locs()->out(0).fpu_reg();
7215 __ vmovsr(EvenSRegisterOf(EvenDRegisterOf(to_reg)), from_reg);
7216 break;
7217 }
7218 case kUnboxedFloat: {
7219 ASSERT(to() == kUnboxedInt32);
7220 const FpuRegister from_reg = locs()->in(0).fpu_reg();
7221 const Register to_reg = locs()->out(0).reg();
7222 __ vmovrs(to_reg, EvenSRegisterOf(EvenDRegisterOf(from_reg)));
7223 break;
7224 }
7225 case kUnboxedInt64: {
7226 ASSERT(to() == kUnboxedDouble);
7227 const Register from_lo = locs()->in(0).AsPairLocation()->At(0).reg();
7228 const Register from_hi = locs()->in(0).AsPairLocation()->At(1).reg();
7229 const FpuRegister to_reg = locs()->out(0).fpu_reg();
7230 __ vmovsr(EvenSRegisterOf(EvenDRegisterOf(to_reg)), from_lo);
7231 __ vmovsr(OddSRegisterOf(EvenDRegisterOf(to_reg)), from_hi);
7232 break;
7233 }
7234 case kUnboxedDouble: {
7235 ASSERT(to() == kUnboxedInt64);
7236 const FpuRegister from_reg = locs()->in(0).fpu_reg();
7237 const Register to_lo = locs()->out(0).AsPairLocation()->At(0).reg();
7238 const Register to_hi = locs()->out(0).AsPairLocation()->At(1).reg();
7239 __ vmovrs(to_lo, EvenSRegisterOf(EvenDRegisterOf(from_reg)));
7240 __ vmovrs(to_hi, OddSRegisterOf(EvenDRegisterOf(from_reg)));
7241 break;
7242 }
7243 default:
7244 UNREACHABLE();
7245 }
7246}
7247
7248LocationSummary* StopInstr::MakeLocationSummary(Zone* zone, bool opt) const {
7249 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
7250}
7251
7252void StopInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7253 __ Stop(message());
7254}
7255
7256void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7257 BlockEntryInstr* entry = normal_entry();
7258 if (entry != nullptr) {
7259 if (!compiler->CanFallThroughTo(entry)) {
7260 FATAL("Checked function entry must have no offset");
7261 }
7262 } else {
7263 entry = osr_entry();
7264 if (!compiler->CanFallThroughTo(entry)) {
7265 __ b(compiler->GetJumpLabel(entry));
7266 }
7267 }
7268}
7269
7270LocationSummary* GotoInstr::MakeLocationSummary(Zone* zone, bool opt) const {
7271 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
7272}
7273
7274void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7275 if (!compiler->is_optimizing()) {
7276 if (FLAG_reorder_basic_blocks) {
7277 compiler->EmitEdgeCounter(block()->preorder_number());
7278 }
7279 // Add a deoptimization descriptor for deoptimizing instructions that
7280 // may be inserted before this instruction.
7281 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
7282 InstructionSource());
7283 }
7284 if (HasParallelMove()) {
7285 parallel_move()->EmitNativeCode(compiler);
7286 }
7287
7288 // We can fall through if the successor is the next block in the list.
7289 // Otherwise, we need a jump.
7290 if (!compiler->CanFallThroughTo(successor())) {
7291 __ b(compiler->GetJumpLabel(successor()));
7292 }
7293}
7294
7295LocationSummary* IndirectGotoInstr::MakeLocationSummary(Zone* zone,
7296 bool opt) const {
7297 const intptr_t kNumInputs = 1;
7298 const intptr_t kNumTemps = 2;
7299
7300 LocationSummary* summary = new (zone)
7301 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7302
7303 summary->set_in(0, Location::RequiresRegister());
7304 summary->set_temp(0, Location::RequiresRegister());
7305 summary->set_temp(1, Location::RequiresRegister());
7306
7307 return summary;
7308}
7309
7310void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7311 Register index_reg = locs()->in(0).reg();
7312 Register target_address_reg = locs()->temp(0).reg();
7313 Register offset_reg = locs()->temp(1).reg();
7314
7315 ASSERT(RequiredInputRepresentation(0) == kTagged);
7316 __ LoadObject(offset_reg, offsets_);
7317 const auto element_address = __ ElementAddressForRegIndex(
7318 /*is_load=*/true,
7319 /*is_external=*/false, kTypedDataInt32ArrayCid,
7320 /*index_scale=*/4,
7321 /*index_unboxed=*/false, offset_reg, index_reg);
7322 __ ldr(offset_reg, element_address);
7323
7324 // Offset is relative to entry pc.
7325 const intptr_t entry_to_pc_offset = __ CodeSize() + Instr::kPCReadOffset;
7326 __ mov(target_address_reg, compiler::Operand(PC));
7327 __ AddImmediate(target_address_reg, -entry_to_pc_offset);
7328
7329 __ add(target_address_reg, target_address_reg, compiler::Operand(offset_reg));
7330
7331 // Jump to the absolute address.
7332 __ bx(target_address_reg);
7333}
7334
7335LocationSummary* StrictCompareInstr::MakeLocationSummary(Zone* zone,
7336 bool opt) const {
7337 const intptr_t kNumInputs = 2;
7338 const intptr_t kNumTemps = 0;
7339 if (needs_number_check()) {
7340 LocationSummary* locs = new (zone)
7341 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
7342 locs->set_in(0, Location::RegisterLocation(R0));
7343 locs->set_in(1, Location::RegisterLocation(R1));
7344 locs->set_out(0, Location::RegisterLocation(R0));
7345 return locs;
7346 }
7347 LocationSummary* locs = new (zone)
7348 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7349
7350 // If a constant has more than one use, make sure it is loaded in register
7351 // so that multiple immediate loads can be avoided.
7352 ConstantInstr* constant = left()->definition()->AsConstant();
7353 if ((constant != nullptr) && !left()->IsSingleUse()) {
7354 locs->set_in(0, Location::RequiresRegister());
7355 } else {
7356 locs->set_in(0, LocationRegisterOrConstant(left()));
7357 }
7358
7359 constant = right()->definition()->AsConstant();
7360 if ((constant != nullptr) && !right()->IsSingleUse()) {
7361 locs->set_in(1, Location::RequiresRegister());
7362 } else {
7363 // Only one of the inputs can be a constant. Choose register if the first
7364 // one is a constant.
7365 locs->set_in(1, locs->in(0).IsConstant()
7368 }
7369 locs->set_out(0, Location::RequiresRegister());
7370 return locs;
7371}
7372
7373Condition StrictCompareInstr::EmitComparisonCodeRegConstant(
7374 FlowGraphCompiler* compiler,
7375 BranchLabels labels,
7376 Register reg,
7377 const Object& obj) {
7378 return compiler->EmitEqualityRegConstCompare(reg, obj, needs_number_check(),
7379 source(), deopt_id());
7380}
7381
7382void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7383 // The ARM code may not use true- and false-labels here.
7384 compiler::Label is_true, is_false, done;
7385 BranchLabels labels = {&is_true, &is_false, &is_false};
7386 Condition true_condition = EmitComparisonCode(compiler, labels);
7387
7388 const Register result = this->locs()->out(0).reg();
7389 if (is_false.IsLinked() || is_true.IsLinked()) {
7390 if (true_condition != kInvalidCondition) {
7391 EmitBranchOnCondition(compiler, true_condition, labels);
7392 }
7393 __ Bind(&is_false);
7394 __ LoadObject(result, Bool::False());
7395 __ b(&done);
7396 __ Bind(&is_true);
7397 __ LoadObject(result, Bool::True());
7398 __ Bind(&done);
7399 } else {
7400 // If EmitComparisonCode did not use the labels and just returned
7401 // a condition we can avoid the branch and use conditional loads.
7402 ASSERT(true_condition != kInvalidCondition);
7403 __ LoadObject(result, Bool::True(), true_condition);
7404 __ LoadObject(result, Bool::False(), InvertCondition(true_condition));
7405 }
7406}
7407
7408void ComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler,
7409 BranchInstr* branch) {
7410 BranchLabels labels = compiler->CreateBranchLabels(branch);
7411 Condition true_condition = EmitComparisonCode(compiler, labels);
7412 if (true_condition != kInvalidCondition) {
7413 EmitBranchOnCondition(compiler, true_condition, labels);
7414 }
7415}
7416
7417LocationSummary* BooleanNegateInstr::MakeLocationSummary(Zone* zone,
7418 bool opt) const {
7421}
7422
7423void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7424 const Register input = locs()->in(0).reg();
7425 const Register result = locs()->out(0).reg();
7426 __ eor(result, input,
7428}
7429
7430LocationSummary* BoolToIntInstr::MakeLocationSummary(Zone* zone,
7431 bool opt) const {
7432 UNREACHABLE();
7433 return NULL;
7434}
7435
7436void BoolToIntInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7437 UNREACHABLE();
7438}
7439
7440LocationSummary* IntToBoolInstr::MakeLocationSummary(Zone* zone,
7441 bool opt) const {
7442 UNREACHABLE();
7443 return NULL;
7444}
7445
7446void IntToBoolInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7447 UNREACHABLE();
7448}
7449
7450LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
7451 bool opt) const {
7452 const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0;
7453 const intptr_t kNumTemps = 0;
7454 LocationSummary* locs = new (zone)
7455 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
7456 if (type_arguments() != nullptr) {
7459 }
7461 return locs;
7462}
7463
7464void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7465 if (type_arguments() != nullptr) {
7466 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
7467 if (type_usage_info != nullptr) {
7468 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, cls_,
7469 type_arguments()->definition());
7470 }
7471 }
7472 const Code& stub = Code::ZoneHandle(
7474 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
7475 locs(), deopt_id(), env());
7476}
7477
7478void DebugStepCheckInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7479#ifdef PRODUCT
7480 UNREACHABLE();
7481#else
7482 ASSERT(!compiler->is_optimizing());
7483 __ BranchLinkPatchable(StubCode::DebugStepCheck());
7484 compiler->AddCurrentDescriptor(stub_kind_, deopt_id_, source());
7485 compiler->RecordSafepoint(locs());
7486#endif
7487}
7488
7489} // namespace dart
7490
7491#endif // defined(TARGET_ARCH_ARM)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
static void fail(const SkString &err)
Definition DM.cpp:234
static bool match(const char *needle, const char *haystack)
Definition DM.cpp:1132
int count
static bool are_equal(skiatest::Reporter *reporter, const SkMatrix &a, const SkMatrix &b)
static int float_bits(float f)
static bool ok(int result)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
static size_t element_size(Layout layout, SkSLType type)
Vec2Value v2
#define __
#define SIMPLE(name,...)
#define UNREACHABLE()
Definition assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
#define Z
intptr_t num_context_variables() const
Definition il.h:8344
Value * type_arguments() const
Definition il.h:7397
const Class & cls() const
Definition il.h:7396
intptr_t num_context_variables() const
Definition il.h:7555
static intptr_t InstanceSize()
Definition object.h:10910
static constexpr bool IsValidLength(intptr_t len)
Definition object.h:10906
Value * dst_type() const
Definition il.h:4405
Token::Kind op_kind() const
Definition il.h:8990
Value * right() const
Definition il.h:8988
Value * left() const
Definition il.h:8987
bool can_overflow() const
Definition il.h:9352
Value * right() const
Definition il.h:9350
Token::Kind op_kind() const
Definition il.h:9348
Value * left() const
Definition il.h:9349
bool RightIsPowerOfTwoConstant() const
Definition il.cc:2116
Range * right_range() const
Definition il.h:9425
Representation to() const
Definition il.h:11067
Representation from() const
Definition il.h:11066
ParallelMoveInstr * parallel_move() const
Definition il.h:1683
bool HasParallelMove() const
Definition il.h:1685
BlockEntryInstr(intptr_t block_id, intptr_t try_index, intptr_t deopt_id, intptr_t stack_depth)
Definition il.h:1776
static const Bool & False()
Definition object.h:10778
static const Bool & True()
Definition object.h:10776
static void Allocate(FlowGraphCompiler *compiler, Instruction *instruction, const Class &cls, Register result, Register temp)
Definition il.cc:6317
Value * value() const
Definition il.h:8480
Representation from_representation() const
Definition il.h:8481
virtual bool ValueFitsSmi() const
Definition il.cc:3244
ComparisonInstr * comparison() const
Definition il.h:4003
intptr_t index_scale() const
Definition il.h:7972
static constexpr Register kSecondReturnReg
static const Register ArgumentRegisters[]
static constexpr FpuRegister kReturnFpuReg
static constexpr Register kFfiAnyNonAbiRegister
static constexpr Register kReturnReg
static constexpr Register kSecondNonArgumentRegister
const RuntimeEntry & TargetFunction() const
Definition il.cc:1099
Value * index() const
Definition il.h:10743
Value * length() const
Definition il.h:10742
Value * value() const
Definition il.h:10701
bool IsDeoptIfNull() const
Definition il.cc:861
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr, TemplateInstruction, FIELD_LIST) private void EmitBitTest(FlowGraphCompiler *compiler, intptr_t min, intptr_t max, intptr_t mask, compiler::Label *deopt)
void EmitNullCheck(FlowGraphCompiler *compiler, compiler::Label *deopt)
bool IsNullCheck() const
Definition il.h:10546
bool IsDeoptIfNotNull() const
Definition il.cc:875
bool IsBitTest() const
Definition il.cc:897
Value * right() const
Definition il.h:8429
Value * left() const
Definition il.h:8428
static void AddMetadataForRuntimeCall(CheckNullInstr *check_null, FlowGraphCompiler *compiler)
Definition il.cc:6286
ExceptionType exception_type() const
Definition il.h:10650
Value * value() const
Definition il.h:10600
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Definition il.h:9875
intptr_t loop_depth() const
Definition il.h:9858
virtual void EmitBranchCode(FlowGraphCompiler *compiler, BranchInstr *branch)
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual Condition EmitComparisonCode(FlowGraphCompiler *compiler, BranchLabels labels)=0
const Object & value() const
Definition il.h:4212
void EmitMoveToLocation(FlowGraphCompiler *compiler, const Location &destination, Register tmp=kNoRegister, intptr_t pair_index=0)
static intptr_t InstanceSize()
Definition object.h:7419
Value * type_arguments() const
Definition il.h:7806
virtual Value * num_elements() const
Definition il.h:7807
virtual Representation representation() const
Definition il.h:3483
static constexpr intptr_t kNone
Definition deopt_id.h:27
Value * value() const
Definition il.h:9053
MethodRecognizer::Kind op_kind() const
Definition il.h:9055
Value * value() const
Definition il.h:10090
Value * value() const
Definition il.h:10059
bool is_null_aware() const
Definition il.h:5292
virtual Representation representation() const
Definition il.h:10283
intptr_t index() const
Definition il.h:10281
void EmitReturnMoves(FlowGraphCompiler *compiler, const Register temp0, const Register temp1)
Definition il.cc:7633
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(FfiCallInstr, VariadicDefinition, FIELD_LIST) private void EmitParamMoves(FlowGraphCompiler *compiler, const Register saved_fp, const Register temp0, const Register temp1)
Definition il.cc:7421
intptr_t TargetAddressIndex() const
Definition il.h:6051
bool is_nullable() const
Definition object.cc:11821
@ kUnknownFixedLength
Definition object.h:4701
@ kUnknownLengthOffset
Definition object.h:4700
@ kNoFixedLength
Definition object.h:4702
intptr_t guarded_cid() const
Definition object.cc:11800
Value * value() const
Definition il.h:10131
ParallelMoveInstr * parallel_move() const
Definition il.h:3717
BlockEntryInstr * block() const
Definition il.h:3692
bool HasParallelMove() const
Definition il.h:3719
JoinEntryInstr * successor() const
Definition il.h:3695
FunctionEntryInstr * normal_entry() const
Definition il.h:1986
OsrEntryInstr * osr_entry() const
Definition il.h:1992
const Field & field() const
Definition il.h:6476
Value * value() const
Definition il.h:6474
Value * value() const
Definition il.h:9101
Value * value() const
Definition il.h:9141
ComparisonInstr * comparison() const
Definition il.h:5434
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.h:3789
const AbstractType & type() const
Definition il.h:7245
Environment * env() const
Definition il.h:1209
virtual LocationSummary * MakeLocationSummary(Zone *zone, bool is_optimizing) const =0
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition il.h:1207
void InitializeLocationSummary(Zone *zone, bool optimizing)
Definition il.h:1196
virtual Representation representation() const
Definition il.h:1254
LocationSummary * locs()
Definition il.h:1186
InstructionSource source() const
Definition il.h:1002
intptr_t deopt_id() const
Definition il.h:987
static LocationSummary * MakeCallSummary(Zone *zone, const Instruction *instr, LocationSummary *locs=nullptr)
Value * value() const
Definition il.h:9930
Representation to() const
Definition il.h:10993
Representation from() const
Definition il.h:10992
const RuntimeEntry & TargetFunction() const
Definition il.cc:7229
MethodRecognizer::Kind recognized_kind() const
Definition il.h:10209
ObjectStore * object_store() const
Definition isolate.h:505
static IsolateGroup * Current()
Definition isolate.h:534
intptr_t TargetAddressIndex() const
Definition il.h:6149
void EmitParamMoves(FlowGraphCompiler *compiler, Register saved_fp, Register temp0)
Definition il.cc:8042
LocationSummary * MakeLocationSummaryInternal(Zone *zone, const RegList temps) const
Definition il.cc:7964
virtual Representation representation() const
Definition il.h:6865
intptr_t index_scale() const
Definition il.h:6851
Value * index() const
Definition il.h:6849
bool can_pack_into_smi() const
Definition il.h:6858
intptr_t element_count() const
Definition il.h:6856
bool IsExternal() const
Definition il.h:6844
intptr_t class_id() const
Definition il.h:6855
intptr_t class_id() const
Definition il.h:6759
bool IsUntagged() const
Definition il.h:6752
bool aligned() const
Definition il.h:6760
Value * array() const
Definition il.h:6756
intptr_t index_scale() const
Definition il.h:6758
Representation representation() const
Definition il.h:6775
Value * index() const
Definition il.h:6757
Value * index() const
Definition il.h:3109
virtual Representation RequiredInputRepresentation(intptr_t index) const
Definition il.h:3096
intptr_t offset() const
Definition il.h:3111
Register base_reg() const
Definition il.h:3110
virtual Representation representation() const
Definition il.h:3107
const LocalVariable & local() const
Definition il.h:5765
Location temp(intptr_t index) const
Definition locations.h:882
Location out(intptr_t index) const
Definition locations.h:903
static LocationSummary * Make(Zone *zone, intptr_t input_count, Location out, ContainsCall contains_call)
Definition locations.cc:187
void set_temp(intptr_t index, Location loc)
Definition locations.h:894
void set_out(intptr_t index, Location loc)
Definition locations.cc:232
bool always_calls() const
Definition locations.h:918
Location in(intptr_t index) const
Definition locations.h:866
void set_in(intptr_t index, Location loc)
Definition locations.cc:205
static Location NoLocation()
Definition locations.h:387
static Location SameAsFirstInput()
Definition locations.h:382
static Location Pair(Location first, Location second)
Definition locations.cc:271
Register reg() const
Definition locations.h:404
static Location FpuRegisterLocation(FpuRegister reg)
Definition locations.h:410
static Location WritableRegister()
Definition locations.h:376
static Location RegisterLocation(Register reg)
Definition locations.h:398
static Location Any()
Definition locations.h:352
PairLocation * AsPairLocation() const
Definition locations.cc:280
static Location RequiresRegister()
Definition locations.h:365
static Location RequiresFpuRegister()
Definition locations.h:369
FpuRegister fpu_reg() const
Definition locations.h:416
static Location Constant(const ConstantInstr *obj, int pair_index=0)
Definition locations.h:294
Value * right() const
Definition il.h:8922
intptr_t result_cid() const
Definition il.h:8924
Value * left() const
Definition il.h:8921
MethodRecognizer::Kind op_kind() const
Definition il.h:8919
Value * length() const
Definition il.h:3193
bool unboxed_inputs() const
Definition il.h:3198
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MemoryCopyInstr, TemplateInstruction, FIELD_LIST) private void EmitUnrolledCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, intptr_t num_elements, bool reversed)
Definition il.cc:7110
Value * src_start() const
Definition il.h:3191
void EmitLoopCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, Register length_reg, compiler::Label *done, compiler::Label *copy_forwards=nullptr)
void PrepareLengthRegForLoop(FlowGraphCompiler *compiler, Register length_reg, compiler::Label *done)
Value * dest_start() const
Definition il.h:3192
static intptr_t value_offset()
Definition object.h:10053
virtual Representation representation() const
Definition il.h:3369
Value * value() const
Definition il.h:3359
static int ComputeArgcTag(const Function &function)
bool is_auto_scope() const
Definition il.h:5977
bool is_bootstrap_native() const
Definition il.h:5976
const Function & function() const
Definition il.h:5974
NativeFunction native_c_function() const
Definition il.h:5975
bool link_lazily() const
Definition il.h:5978
static uword LinkNativeCallEntry()
static CodePtr GetStub(FlowGraphCompiler *compiler, CheckNullInstr::ExceptionType exception_type, bool save_fpu_registers)
static Object & Handle()
Definition object.h:407
static Object & ZoneHandle()
Definition object.h:419
Location At(intptr_t i) const
Definition locations.h:618
static bool Overlaps(Range *range, intptr_t min, intptr_t max)
static bool OnlyLessThanOrEqualTo(Range *range, intptr_t value)
static bool IsWithin(const Range *range, int64_t min, int64_t max)
static bool IsPositive(Range *range)
static bool CanBeZero(Range *range)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ShiftIntegerOpInstr, BinaryIntegerOpInstr, FIELD_LIST) protected bool IsShiftCountInRange(int64_t max=kShiftCountLimit) const
Definition il.cc:2103
Range * shift_range() const
Definition il.h:9607
Kind kind() const
Definition il.h:11304
Value * value() const
Definition il.h:9904
const char * message() const
Definition il.h:3663
bool ShouldEmitStoreBarrier() const
Definition il.h:7045
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.cc:6932
Value * value() const
Definition il.h:7039
Value * array() const
Definition il.h:7037
intptr_t class_id() const
Definition il.h:7042
bool IsUntagged() const
Definition il.h:7076
bool aligned() const
Definition il.h:7043
intptr_t index_scale() const
Definition il.h:7041
Value * index() const
Definition il.h:7038
Value * value() const
Definition il.h:5914
const LocalVariable & local() const
Definition il.h:5913
const Field & field() const
Definition il.h:6685
Value * value() const
Definition il.h:6686
bool needs_number_check() const
Definition il.h:5107
Value * str() const
Definition il.h:6923
static CodePtr GetAllocationStubForClass(const Class &cls)
Definition stub_code.cc:174
static constexpr int kNullCharCodeSymbolOffset
Definition symbols.h:604
static bool hardfp_supported()
Definition cpu_arm.h:77
intptr_t ArgumentCount() const
Definition il.h:4568
ArrayPtr GetArgumentsDescriptor() const
Definition il.h:4599
virtual intptr_t InputCount() const
Definition il.h:2737
const ZoneGrowableArray< intptr_t > & cid_results() const
Definition il.h:5185
static bool IsEqualityOperator(Kind tok)
Definition token.h:236
virtual Representation representation() const
Definition il.h:9793
Value * value() const
Definition il.h:9780
Token::Kind op_kind() const
Definition il.h:9781
Value * value() const
Definition il.h:9192
Token::Kind op_kind() const
Definition il.h:9193
virtual Representation representation() const
Definition il.h:8655
Value * value() const
Definition il.h:8630
bool is_truncating() const
Definition il.h:8724
virtual Representation representation() const
Definition il.h:4270
bool IsScanFlagsUnboxed() const
Definition il.cc:7188
static T Abs(T x)
Definition utils.h:34
static int32_t Low32Bits(int64_t value)
Definition utils.h:354
static constexpr int CountOneBitsWord(uword x)
Definition utils.h:161
static constexpr T Maximum(T x, T y)
Definition utils.h:26
static constexpr int ShiftForPowerOfTwo(T x)
Definition utils.h:66
static int32_t High32Bits(int64_t value)
Definition utils.h:358
static T Minimum(T x, T y)
Definition utils.h:21
static T AddWithWrapAround(T a, T b)
Definition utils.h:416
static constexpr int CountOneBits64(uint64_t x)
Definition utils.h:133
static constexpr size_t HighestBit(int64_t v)
Definition utils.h:170
static constexpr bool IsPowerOfTwo(T x)
Definition utils.h:61
bool BindsToConstant() const
Definition il.cc:1181
Definition * definition() const
Definition il.h:103
CompileType * Type()
intptr_t InputCount() const
Definition il.h:2776
void static bool EmittingComments()
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool *needs_base=nullptr)
static bool CanHold(uint32_t immediate, Operand *o)
#define LR
#define UNIMPLEMENTED
#define ASSERT(E)
SkBitmap source
Definition examples.cpp:28
static bool b
#define FATAL(error)
FlutterSemanticsFlag flag
FlutterSemanticsFlag flags
uint8_t value
GAsyncResult * result
uint32_t * target
Dart_NativeFunction function
Definition fuchsia.cc:51
const char * name
Definition fuchsia.cc:50
int argument_count
Definition fuchsia.cc:52
static float max(float r, float g, float b)
Definition hsl.cpp:49
static float min(float r, float g, float b)
Definition hsl.cpp:48
#define R(r)
#define CASE(Arity, Mask, Name, Args, Result)
#define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name)
Definition il.h:11813
size_t length
#define DEFINE_BACKEND(Name, Args)
const intptr_t kResultIndex
Definition marshaller.h:28
word ToRawSmi(const dart::Object &a)
bool IsSmi(int64_t v)
word SmiValue(const dart::Object &a)
bool HasIntegerValue(const dart::Object &object, int64_t *value)
Location LocationAnyOrConstant(Value *value)
Definition locations.cc:357
Location LocationRegisterOrConstant(Value *value)
Definition locations.cc:289
const Register kWriteBarrierSlotReg
const Register THR
static DRegister EvenDRegisterOf(QRegister q)
static Condition InvertCondition(Condition c)
static constexpr int kSavedCallerPcSlotFromFp
bool IsTypedDataBaseClassId(intptr_t index)
Definition class_id.h:429
const Register kExceptionObjectReg
static DRegister OddDRegisterOf(QRegister q)
const DRegister DTMP
const RegList kReservedCpuRegisters
const Register kWriteBarrierObjectReg
constexpr int32_t kMinInt32
Definition globals.h:482
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition constants.h:85
constexpr intptr_t kIntptrMin
Definition globals.h:556
uint16_t RegList
int32_t classid_t
Definition globals.h:524
static const ClassId kLastErrorCid
Definition class_id.h:311
@ kIllegalCid
Definition class_id.h:214
@ kNullCid
Definition class_id.h:252
@ kDynamicCid
Definition class_id.h:253
Representation
Definition locations.h:66
const FpuRegister FpuTMP
@ kHeapObjectTag
static const ClassId kFirstErrorCid
Definition class_id.h:310
uintptr_t uword
Definition globals.h:501
const Register CODE_REG
static SRegister OddSRegisterOf(DRegister d)
@ kInvalidCondition
@ UNSIGNED_GREATER_EQUAL
@ UNSIGNED_LESS
void RegisterTypeArgumentsUse(const Function &function, TypeUsageInfo *type_usage_info, const Class &klass, Definition *type_arguments)
const Register ARGS_DESC_REG
bool IsClampedTypedDataBaseClassId(intptr_t index)
Definition class_id.h:461
@ kNumberOfCpuRegisters
@ kNoRegister
Location LocationFixedRegisterOrConstant(Value *value, Register reg)
Definition locations.cc:339
const int kNumberOfFpuRegisters
Location LocationWritableRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
Definition locations.cc:322
bool IsExternalPayloadClassId(classid_t cid)
Definition class_id.h:472
constexpr RegList kDartAvailableCpuRegs
const Register TMP
const Register FPREG
const int kAbiPreservedFpuRegCount
const QRegister QTMP
static constexpr int kPcMarkerSlotFromFp
const Register FUNCTION_REG
const Register IC_DATA_REG
constexpr int32_t kMaxInt32
Definition globals.h:483
compiler::Address LocationToStackSlotAddress(Location loc)
Definition locations.cc:365
static bool IsConstant(Definition *def, int64_t *val)
Definition loops.cc:123
const Register PP
QRegister FpuRegister
const Register kStackTraceObjectReg
static int8_t data[kExtLength]
Location LocationRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
Definition locations.cc:297
constexpr intptr_t kBitsPerInt64
Definition globals.h:467
const Register SPREG
static SRegister EvenSRegisterOf(DRegister d)
const QRegister kAbiFirstPreservedFpuReg
Definition __init__.py:1
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
Definition switches.h:228
void Flush(SkSurface *surface)
Definition GpuTools.h:25
Point offset
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTempReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kObjectReg
static constexpr Representation NativeRepresentation(Representation rep)
Definition il.h:8456
static constexpr intptr_t kBoolVsNullMask
static constexpr intptr_t kBoolValueMask
static constexpr size_t ValueSize(Representation rep)
Definition locations.h:112
static constexpr bool IsUnboxedInteger(Representation rep)
Definition locations.h:92
static compiler::OperandSize OperandSize(Representation rep)
Definition locations.cc:16
static constexpr bool IsUnboxed(Representation rep)
Definition locations.h:101
static bool IsUnsignedInteger(Representation rep)
Definition locations.h:126
static Representation RepresentationOfArrayElement(classid_t cid)
Definition locations.cc:79
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
#define kNegInfinity
Definition globals.h:66