Flutter Engine
The Flutter Engine
il_arm.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6#if defined(TARGET_ARCH_ARM)
7
9
19#include "vm/cpu.h"
20#include "vm/dart_entry.h"
21#include "vm/instructions.h"
22#include "vm/object_store.h"
23#include "vm/parser.h"
24#include "vm/simulator.h"
25#include "vm/stack_frame.h"
26#include "vm/stub_code.h"
27#include "vm/symbols.h"
29
30#define __ compiler->assembler()->
31#define Z (compiler->zone())
32
33namespace dart {
34
35// Generic summary for call instructions that have all arguments pushed
36// on the stack and return the result in a fixed location depending on
37// the return value (R0, Location::Pair(R0, R1) or Q0).
38LocationSummary* Instruction::MakeCallSummary(Zone* zone,
39 const Instruction* instr,
40 LocationSummary* locs) {
41 ASSERT(locs == nullptr || locs->always_calls());
42 LocationSummary* result =
43 ((locs == nullptr)
44 ? (new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall))
45 : locs);
46 const auto representation = instr->representation();
47 switch (representation) {
48 case kTagged:
49 case kUntagged:
50 case kUnboxedUint32:
51 case kUnboxedInt32:
52 result->set_out(
54 break;
55 case kPairOfTagged:
56 case kUnboxedInt64:
57 result->set_out(
62 break;
63 case kUnboxedDouble:
64 result->set_out(
66 break;
67 default:
69 break;
70 }
71 return result;
72}
73
74LocationSummary* LoadIndexedUnsafeInstr::MakeLocationSummary(Zone* zone,
75 bool opt) const {
76 const intptr_t kNumInputs = 1;
77 const intptr_t kNumTemps = ((representation() == kUnboxedDouble) ? 1 : 0);
78 LocationSummary* locs = new (zone)
79 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
80
82 switch (representation()) {
83 case kTagged:
85 break;
86 case kUnboxedInt64:
89 break;
90 case kUnboxedDouble:
93 break;
94 default:
96 break;
97 }
98 return locs;
99}
100
101void LoadIndexedUnsafeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
102 ASSERT(RequiredInputRepresentation(0) == kTagged); // It is a Smi.
103 ASSERT(kSmiTag == 0);
104 ASSERT(kSmiTagSize == 1);
105
106 const Register index = locs()->in(0).reg();
107
108 switch (representation()) {
109 case kTagged: {
110 const auto out = locs()->out(0).reg();
111 __ add(out, base_reg(), compiler::Operand(index, LSL, 1));
112 __ LoadFromOffset(out, out, offset());
113 break;
114 }
115 case kUnboxedInt64: {
116 const auto out_lo = locs()->out(0).AsPairLocation()->At(0).reg();
117 const auto out_hi = locs()->out(0).AsPairLocation()->At(1).reg();
118
119 __ add(out_hi, base_reg(), compiler::Operand(index, LSL, 1));
120 __ LoadFromOffset(out_lo, out_hi, offset());
121 __ LoadFromOffset(out_hi, out_hi, offset() + compiler::target::kWordSize);
122 break;
123 }
124 case kUnboxedDouble: {
125 const auto tmp = locs()->temp(0).reg();
126 const auto out = EvenDRegisterOf(locs()->out(0).fpu_reg());
127 __ add(tmp, base_reg(), compiler::Operand(index, LSL, 1));
128 __ LoadDFromOffset(out, tmp, offset());
129 break;
130 }
131 default:
132 UNREACHABLE();
133 break;
134 }
135}
136
137DEFINE_BACKEND(StoreIndexedUnsafe,
138 (NoLocation, Register index, Register value)) {
139 ASSERT(instr->RequiredInputRepresentation(
140 StoreIndexedUnsafeInstr::kIndexPos) == kTagged); // It is a Smi.
141 __ add(TMP, instr->base_reg(), compiler::Operand(index, LSL, 1));
142 __ str(value, compiler::Address(TMP, instr->offset()));
143
144 ASSERT(kSmiTag == 0);
145 ASSERT(kSmiTagSize == 1);
146}
147
148DEFINE_BACKEND(TailCall,
149 (NoLocation,
150 Fixed<Register, ARGS_DESC_REG>,
151 Temp<Register> temp)) {
152 compiler->EmitTailCallToStub(instr->code());
153
154 // Even though the TailCallInstr will be the last instruction in a basic
155 // block, the flow graph compiler will emit native code for other blocks after
156 // the one containing this instruction and needs to be able to use the pool.
157 // (The `LeaveDartFrame` above disables usages of the pool.)
158 __ set_constant_pool_allowed(true);
159}
160
161// TODO(http://dartbug.com/51229): We can use TMP for LDM/STM, which means we
162// only need one additional temporary for 8-byte moves. For 16-byte moves,
163// attempting to allocate three temporaries causes too much register pressure,
164// so just use two 8-byte sized moves there per iteration.
165static constexpr intptr_t kMaxMemoryCopyElementSize =
167
168static constexpr intptr_t kMemoryCopyPayloadTemps = 2;
169
170LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone,
171 bool opt) const {
172 // The compiler must optimize any function that includes a MemoryCopy
173 // instruction that uses typed data cids, since extracting the payload address
174 // from views is done in a compiler pass after all code motion has happened.
175 ASSERT((!IsTypedDataBaseClassId(src_cid_) &&
176 !IsTypedDataBaseClassId(dest_cid_)) ||
177 opt);
178 const intptr_t kNumInputs = 5;
179 const intptr_t kNumTemps =
180 kMemoryCopyPayloadTemps +
181 (element_size_ >= kMaxMemoryCopyElementSize ? 1 : 0);
182 LocationSummary* locs = new (zone)
183 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
190 for (intptr_t i = 0; i < kNumTemps; i++) {
192 }
193 return locs;
194}
195
196void MemoryCopyInstr::EmitUnrolledCopy(FlowGraphCompiler* compiler,
197 Register dest_reg,
198 Register src_reg,
199 intptr_t num_elements,
200 bool reversed) {
201 const intptr_t num_bytes = num_elements * element_size_;
202 // The amount moved in a single load/store pair.
203 const intptr_t mov_size =
204 Utils::Minimum(element_size_, kMaxMemoryCopyElementSize);
205 const intptr_t mov_repeat = num_bytes / mov_size;
206 ASSERT(num_bytes % mov_size == 0);
207 // We can use TMP for all instructions below because element_size_ is
208 // guaranteed to fit in the offset portion of the instruction in the
209 // non-LDM/STM cases.
210
211 if (mov_size == kMaxMemoryCopyElementSize) {
212 RegList temp_regs = (1 << TMP);
213 for (intptr_t i = kMemoryCopyPayloadTemps; i < locs()->temp_count(); i++) {
214 temp_regs |= 1 << locs()->temp(i).reg();
215 }
216 auto block_mode = BlockAddressMode::IA_W;
217 if (reversed) {
218 // When reversed, start the src and dest registers with the end addresses
219 // and apply the negated offset prior to indexing.
220 block_mode = BlockAddressMode::DB_W;
221 __ AddImmediate(src_reg, num_bytes);
222 __ AddImmediate(dest_reg, num_bytes);
223 }
224 for (intptr_t i = 0; i < mov_repeat; i++) {
225 __ ldm(block_mode, src_reg, temp_regs);
226 __ stm(block_mode, dest_reg, temp_regs);
227 }
228 return;
229 }
230
231 for (intptr_t i = 0; i < mov_repeat; i++) {
232 const intptr_t byte_index =
233 (reversed ? mov_repeat - (i + 1) : i) * mov_size;
234 switch (mov_size) {
235 case 1:
236 __ ldrb(TMP, compiler::Address(src_reg, byte_index));
237 __ strb(TMP, compiler::Address(dest_reg, byte_index));
238 break;
239 case 2:
240 __ ldrh(TMP, compiler::Address(src_reg, byte_index));
241 __ strh(TMP, compiler::Address(dest_reg, byte_index));
242 break;
243 case 4:
244 __ ldr(TMP, compiler::Address(src_reg, byte_index));
245 __ str(TMP, compiler::Address(dest_reg, byte_index));
246 break;
247 default:
248 UNREACHABLE();
249 }
250 }
251}
252
254 Register length_reg,
255 compiler::Label* done) {
256 __ BranchIfZero(length_reg, done);
257}
258
259static compiler::OperandSize OperandSizeFor(intptr_t bytes) {
261 switch (bytes) {
262 case 1:
264 case 2:
266 case 4:
268 case 8:
270 default:
271 UNREACHABLE();
273 }
274}
275
276static void CopyUpToWordMultiple(FlowGraphCompiler* compiler,
277 Register dest_reg,
278 Register src_reg,
279 Register length_reg,
280 intptr_t element_size,
281 bool unboxed_inputs,
282 bool reversed,
283 compiler::Label* done) {
286
287 const intptr_t element_shift = Utils::ShiftForPowerOfTwo(element_size);
288 const intptr_t base_shift =
289 (unboxed_inputs ? 0 : kSmiTagShift) - element_shift;
290 auto const mode =
292 intptr_t tested_bits = 0;
293
294 __ Comment("Copying until region is a multiple of word size");
295
296 for (intptr_t bit = compiler::target::kWordSizeLog2 - 1; bit >= element_shift;
297 bit--) {
298 const intptr_t bytes = 1 << bit;
299 const intptr_t tested_bit = bit + base_shift;
300 tested_bits |= (1 << tested_bit);
301 __ tst(length_reg, compiler::Operand(1 << tested_bit));
302 auto const sz = OperandSizeFor(bytes);
303 __ Load(TMP, compiler::Address(src_reg, bytes, mode), sz, NOT_ZERO);
304 __ Store(TMP, compiler::Address(dest_reg, bytes, mode), sz, NOT_ZERO);
305 }
306
307 __ bics(length_reg, length_reg, compiler::Operand(tested_bits));
308 __ b(done, ZERO);
309}
310
311void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
312 Register dest_reg,
313 Register src_reg,
314 Register length_reg,
315 compiler::Label* done,
316 compiler::Label* copy_forwards) {
317 const bool reversed = copy_forwards != nullptr;
318 if (reversed) {
319 // Verify that the overlap actually exists by checking to see if
320 // dest_start < src_end.
321 const intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) -
323 if (shift < 0) {
324 __ add(src_reg, src_reg, compiler::Operand(length_reg, ASR, -shift));
325 } else {
326 __ add(src_reg, src_reg, compiler::Operand(length_reg, LSL, shift));
327 }
328 __ CompareRegisters(dest_reg, src_reg);
329 // If dest_reg >= src_reg, then set src_reg back to the start of the source
330 // region before branching to the forwards-copying loop.
331 if (shift < 0) {
332 __ sub(src_reg, src_reg, compiler::Operand(length_reg, ASR, -shift),
334 } else {
335 __ sub(src_reg, src_reg, compiler::Operand(length_reg, LSL, shift),
337 }
338 __ b(copy_forwards, UNSIGNED_GREATER_EQUAL);
339 // There is overlap, so adjust dest_reg now.
340 if (shift < 0) {
341 __ add(dest_reg, dest_reg, compiler::Operand(length_reg, ASR, -shift));
342 } else {
343 __ add(dest_reg, dest_reg, compiler::Operand(length_reg, LSL, shift));
344 }
345 }
346 // We can use TMP for all instructions below because element_size_ is
347 // guaranteed to fit in the offset portion of the instruction in the
348 // non-LDM/STM cases.
349 CopyUpToWordMultiple(compiler, dest_reg, src_reg, length_reg, element_size_,
350 unboxed_inputs_, reversed, done);
351 // When reversed, the src and dest registers have been adjusted to start at
352 // the end addresses, so apply the negated offset prior to indexing.
353 const auto load_mode =
355 const auto load_multiple_mode =
357 // The size of the uncopied region is a multiple of the word size, so now we
358 // copy the rest by word (unless the element size is larger).
359 const intptr_t loop_subtract =
360 Utils::Maximum<intptr_t>(1, compiler::target::kWordSize / element_size_)
361 << (unboxed_inputs_ ? 0 : kSmiTagShift);
362 // Used only for LDM/STM below.
363 RegList temp_regs = (1 << TMP);
364 for (intptr_t i = kMemoryCopyPayloadTemps; i < locs()->temp_count(); i++) {
365 temp_regs |= 1 << locs()->temp(i).reg();
366 }
367 __ Comment("Copying by multiples of word size");
368 compiler::Label loop;
369 __ Bind(&loop);
370 switch (element_size_) {
371 // Fall through for the sizes smaller than compiler::target::kWordSize.
372 case 1:
373 case 2:
374 case 4:
375 __ ldr(TMP, compiler::Address(src_reg, 4, load_mode));
376 __ str(TMP, compiler::Address(dest_reg, 4, load_mode));
377 break;
378 case 8:
379 COMPILE_ASSERT(8 == kMaxMemoryCopyElementSize);
381 __ ldm(load_multiple_mode, src_reg, temp_regs);
382 __ stm(load_multiple_mode, dest_reg, temp_regs);
383 break;
384 case 16:
385 COMPILE_ASSERT(16 > kMaxMemoryCopyElementSize);
387 __ ldm(load_multiple_mode, src_reg, temp_regs);
388 __ stm(load_multiple_mode, dest_reg, temp_regs);
389 __ ldm(load_multiple_mode, src_reg, temp_regs);
390 __ stm(load_multiple_mode, dest_reg, temp_regs);
391 break;
392 default:
393 UNREACHABLE();
394 break;
395 }
396 __ subs(length_reg, length_reg, compiler::Operand(loop_subtract));
397 __ b(&loop, NOT_ZERO);
398}
399
400void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
401 classid_t array_cid,
402 Register array_reg,
403 Register payload_reg,
404 Representation array_rep,
405 Location start_loc) {
406 intptr_t offset = 0;
407 if (array_rep != kTagged) {
408 // Do nothing, array_reg already contains the payload address.
409 } else if (IsTypedDataBaseClassId(array_cid)) {
410 // The incoming array must have been proven to be an internal typed data
411 // object, where the payload is in the object and we can just offset.
412 ASSERT_EQUAL(array_rep, kTagged);
414 } else {
415 ASSERT_EQUAL(array_rep, kTagged);
416 ASSERT(!IsExternalPayloadClassId(array_cid));
417 switch (array_cid) {
418 case kOneByteStringCid:
419 offset =
421 break;
422 case kTwoByteStringCid:
423 offset =
425 break;
426 default:
427 UNREACHABLE();
428 break;
429 }
430 }
431 ASSERT(start_loc.IsRegister() || start_loc.IsConstant());
432 if (start_loc.IsConstant()) {
433 const auto& constant = start_loc.constant();
434 ASSERT(constant.IsInteger());
435 const int64_t start_value = Integer::Cast(constant).AsInt64Value();
436 const intptr_t add_value = Utils::AddWithWrapAround(
437 Utils::MulWithWrapAround<intptr_t>(start_value, element_size_), offset);
438 __ AddImmediate(payload_reg, array_reg, add_value);
439 return;
440 }
441 const Register start_reg = start_loc.reg();
442 intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) -
444 if (shift < 0) {
445 __ add(payload_reg, array_reg, compiler::Operand(start_reg, ASR, -shift));
446 } else {
447 __ add(payload_reg, array_reg, compiler::Operand(start_reg, LSL, shift));
448 }
449 __ AddImmediate(payload_reg, offset);
450}
451
452LocationSummary* MoveArgumentInstr::MakeLocationSummary(Zone* zone,
453 bool opt) const {
454 const intptr_t kNumInputs = 1;
455 const intptr_t kNumTemps = 0;
456 LocationSummary* locs = new (zone)
457 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
458 if (representation() == kUnboxedDouble) {
460 } else if (representation() == kUnboxedInt64) {
463 } else {
465 }
466 return locs;
467}
468
469// Buffers registers to use STMDB in order to store
470// multiple registers at once.
471class ArgumentsMover : public ValueObject {
472 public:
473 // Flush all buffered registers.
474 void Flush(FlowGraphCompiler* compiler) {
475 if (pending_regs_ != 0) {
476 if (is_single_register_) {
477 __ StoreToOffset(
478 lowest_register_, SP,
479 lowest_register_sp_relative_index_ * compiler::target::kWordSize);
480 } else {
481 if (lowest_register_sp_relative_index_ == 0) {
482 __ stm(IA, SP, pending_regs_);
483 } else {
484 intptr_t offset =
485 lowest_register_sp_relative_index_ * compiler::target::kWordSize;
486 for (intptr_t reg = 0; reg < kNumberOfCpuRegisters; reg++) {
487 if (((1 << reg) & pending_regs_) != 0) {
488 __ StoreToOffset(static_cast<Register>(reg), SP, offset);
490 }
491 }
492 }
493 }
494 pending_regs_ = 0;
495 lowest_register_ = kNoRegister;
496 is_single_register_ = false;
497 }
498 }
499
500 // Buffer given register. May push previously buffered registers if needed.
501 void MoveRegister(FlowGraphCompiler* compiler,
502 intptr_t sp_relative_index,
503 Register reg) {
504 if (pending_regs_ != 0) {
505 ASSERT(lowest_register_ != kNoRegister);
506 // STMDB pushes higher registers first, so we can only buffer
507 // lower registers.
508 if (reg < lowest_register_) {
509 ASSERT((sp_relative_index + 1) == lowest_register_sp_relative_index_);
510 pending_regs_ |= (1 << reg);
511 lowest_register_ = reg;
512 is_single_register_ = false;
513 lowest_register_sp_relative_index_ = sp_relative_index;
514 return;
515 }
517 }
518 pending_regs_ = (1 << reg);
519 lowest_register_ = reg;
520 is_single_register_ = true;
521 lowest_register_sp_relative_index_ = sp_relative_index;
522 }
523
524 // Return a register which can be used to hold a value of an argument.
525 Register FindFreeRegister(FlowGraphCompiler* compiler,
526 Instruction* move_arg) {
527 // Dart calling conventions do not have callee-save registers,
528 // so arguments pushing can clobber all allocatable registers
529 // except registers used in arguments which were not pushed yet,
530 // as well as ParallelMove and inputs of a call instruction.
531 intptr_t busy = kReservedCpuRegisters;
532 for (Instruction* instr = move_arg;; instr = instr->next()) {
533 ASSERT(instr != nullptr);
534 if (ParallelMoveInstr* parallel_move = instr->AsParallelMove()) {
535 for (intptr_t i = 0, n = parallel_move->NumMoves(); i < n; ++i) {
536 const auto src_loc = parallel_move->MoveOperandsAt(i)->src();
537 if (src_loc.IsRegister()) {
538 busy |= (1 << src_loc.reg());
539 } else if (src_loc.IsPairLocation()) {
540 busy |= (1 << src_loc.AsPairLocation()->At(0).reg());
541 busy |= (1 << src_loc.AsPairLocation()->At(1).reg());
542 }
543 }
544 } else {
545 ASSERT(instr->IsMoveArgument() || (instr->ArgumentCount() > 0));
546 for (intptr_t i = 0, n = instr->locs()->input_count(); i < n; ++i) {
547 const auto in_loc = instr->locs()->in(i);
548 if (in_loc.IsRegister()) {
549 busy |= (1 << in_loc.reg());
550 } else if (in_loc.IsPairLocation()) {
551 const auto pair_location = in_loc.AsPairLocation();
552 busy |= (1 << pair_location->At(0).reg());
553 busy |= (1 << pair_location->At(1).reg());
554 }
555 }
556 if (instr->ArgumentCount() > 0) {
557 break;
558 }
559 }
560 }
561 if (pending_regs_ != 0) {
562 // Find the highest available register which can be pushed along with
563 // pending registers.
564 Register reg = HighestAvailableRegister(busy, lowest_register_);
565 if (reg != kNoRegister) {
566 return reg;
567 }
569 }
570 // At this point there are no pending buffered registers.
571 // Use LR as it's the highest free register, it is not allocatable and
572 // it is clobbered by the call.
573 CLOBBERS_LR({
574 static_assert(((1 << LR) & kDartAvailableCpuRegs) == 0,
575 "LR should not be allocatable");
576 return LR;
577 });
578 }
579
580 private:
581 RegList pending_regs_ = 0;
582 Register lowest_register_ = kNoRegister;
583 intptr_t lowest_register_sp_relative_index_ = -1;
584 bool is_single_register_ = false;
585
586 Register HighestAvailableRegister(intptr_t busy, Register upper_bound) {
587 for (intptr_t i = upper_bound - 1; i >= 0; --i) {
588 if ((busy & (1 << i)) == 0) {
589 return static_cast<Register>(i);
590 }
591 }
592 return kNoRegister;
593 }
594};
595
596void MoveArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
597 ASSERT(compiler->is_optimizing());
598 if (previous()->IsMoveArgument()) {
599 // Already generated by the first MoveArgument in the chain.
600 return;
601 }
602
603 ArgumentsMover pusher;
604 for (MoveArgumentInstr* move_arg = this; move_arg != nullptr;
605 move_arg = move_arg->next()->AsMoveArgument()) {
606 const Location value = move_arg->locs()->in(0);
607 if (value.IsRegister()) {
608 pusher.MoveRegister(compiler, move_arg->location().stack_index(),
609 value.reg());
610 } else if (value.IsPairLocation()) {
611 RELEASE_ASSERT(move_arg->location().IsPairLocation());
612 auto pair = move_arg->location().AsPairLocation();
613 RELEASE_ASSERT(pair->At(0).IsStackSlot());
614 RELEASE_ASSERT(pair->At(1).IsStackSlot());
615 pusher.MoveRegister(compiler, pair->At(1).stack_index(),
616 value.AsPairLocation()->At(1).reg());
617 pusher.MoveRegister(compiler, pair->At(0).stack_index(),
618 value.AsPairLocation()->At(0).reg());
619 } else if (value.IsFpuRegister()) {
620 pusher.Flush(compiler);
621 __ StoreDToOffset(
622 EvenDRegisterOf(value.fpu_reg()), SP,
623 move_arg->location().stack_index() * compiler::target::kWordSize);
624 } else {
625 const Register reg = pusher.FindFreeRegister(compiler, move_arg);
626 ASSERT(reg != kNoRegister);
627 if (value.IsConstant()) {
628 __ LoadObject(reg, value.constant());
629 } else {
630 ASSERT(value.IsStackSlot());
631 const intptr_t value_offset = value.ToStackSlotOffset();
632 __ LoadFromOffset(reg, value.base_reg(), value_offset);
633 }
634 pusher.MoveRegister(compiler, move_arg->location().stack_index(), reg);
635 }
636 }
637 pusher.Flush(compiler);
638}
639
640LocationSummary* DartReturnInstr::MakeLocationSummary(Zone* zone,
641 bool opt) const {
642 const intptr_t kNumInputs = 1;
643 const intptr_t kNumTemps = 0;
644 LocationSummary* locs = new (zone)
645 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
646 switch (representation()) {
647 case kTagged:
648 locs->set_in(0,
650 break;
651 case kPairOfTagged:
652 case kUnboxedInt64:
653 locs->set_in(
658 break;
659 case kUnboxedDouble:
660 locs->set_in(
662 break;
663 default:
664 UNREACHABLE();
665 break;
666 }
667 return locs;
668}
669
670// Attempt optimized compilation at return instruction instead of at the entry.
671// The entry needs to be patchable, no inlined objects are allowed in the area
672// that will be overwritten by the patch instructions: a branch macro sequence.
673void DartReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
674 if (locs()->in(0).IsRegister()) {
675 const Register result = locs()->in(0).reg();
677 } else if (locs()->in(0).IsPairLocation()) {
678 const Register result_lo = locs()->in(0).AsPairLocation()->At(0).reg();
679 const Register result_hi = locs()->in(0).AsPairLocation()->At(1).reg();
682 } else {
683 ASSERT(locs()->in(0).IsFpuRegister());
684 const FpuRegister result = locs()->in(0).fpu_reg();
686 }
687
688 if (compiler->parsed_function().function().IsAsyncFunction() ||
689 compiler->parsed_function().function().IsAsyncGenerator()) {
690 ASSERT(compiler->flow_graph().graph_entry()->NeedsFrame());
691 const Code& stub = GetReturnStub(compiler);
692 compiler->EmitJumpToStub(stub);
693 return;
694 }
695
696 if (!compiler->flow_graph().graph_entry()->NeedsFrame()) {
697 __ Ret();
698 return;
699 }
700
701#if defined(DEBUG)
702 compiler::Label stack_ok;
703 __ Comment("Stack Check");
704 const intptr_t fp_sp_dist =
706 compiler->StackSize()) *
708 ASSERT(fp_sp_dist <= 0);
709 __ sub(R2, SP, compiler::Operand(FP));
710 __ CompareImmediate(R2, fp_sp_dist);
711 __ b(&stack_ok, EQ);
712 __ bkpt(0);
713 __ Bind(&stack_ok);
714#endif
715 ASSERT(__ constant_pool_allowed());
716 __ LeaveDartFrameAndReturn(); // Disallows constant pool use.
717 // This DartReturnInstr may be emitted out of order by the optimizer. The next
718 // block may be a target expecting a properly set constant pool pointer.
719 __ set_constant_pool_allowed(true);
720}
721
722// Detect pattern when one value is zero and another is a power of 2.
723static bool IsPowerOfTwoKind(intptr_t v1, intptr_t v2) {
724 return (Utils::IsPowerOfTwo(v1) && (v2 == 0)) ||
725 (Utils::IsPowerOfTwo(v2) && (v1 == 0));
726}
727
728LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone,
729 bool opt) const {
731 return comparison()->locs();
732}
733
734void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
735 const Register result = locs()->out(0).reg();
736
737 Location left = locs()->in(0);
738 Location right = locs()->in(1);
739 ASSERT(!left.IsConstant() || !right.IsConstant());
740
741 // Clear out register.
742 __ eor(result, result, compiler::Operand(result));
743
744 // Emit comparison code. This must not overwrite the result register.
745 // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using
746 // the labels or returning an invalid condition.
747 BranchLabels labels = {nullptr, nullptr, nullptr};
748 Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
749 ASSERT(true_condition != kInvalidCondition);
750
751 const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_);
752
753 intptr_t true_value = if_true_;
754 intptr_t false_value = if_false_;
755
756 if (is_power_of_two_kind) {
757 if (true_value == 0) {
758 // We need to have zero in result on true_condition.
759 true_condition = InvertCondition(true_condition);
760 }
761 } else {
762 if (true_value == 0) {
763 // Swap values so that false_value is zero.
764 intptr_t temp = true_value;
765 true_value = false_value;
766 false_value = temp;
767 } else {
768 true_condition = InvertCondition(true_condition);
769 }
770 }
771
772 __ mov(result, compiler::Operand(1), true_condition);
773
774 if (is_power_of_two_kind) {
775 const intptr_t shift =
776 Utils::ShiftForPowerOfTwo(Utils::Maximum(true_value, false_value));
777 __ Lsl(result, result, compiler::Operand(shift + kSmiTagSize));
778 } else {
779 __ sub(result, result, compiler::Operand(1));
780 const int32_t val = compiler::target::ToRawSmi(true_value) -
781 compiler::target::ToRawSmi(false_value);
782 __ AndImmediate(result, result, val);
783 if (false_value != 0) {
784 __ AddImmediate(result, compiler::target::ToRawSmi(false_value));
785 }
786 }
787}
788
789LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone,
790 bool opt) const {
791 const intptr_t kNumInputs = 1;
792 const intptr_t kNumTemps = 0;
793 LocationSummary* summary = new (zone)
794 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
795 summary->set_in(
796 0, Location::RegisterLocation(FLAG_precompiled_mode ? R0 : FUNCTION_REG));
797 return MakeCallSummary(zone, this, summary);
798}
799
800void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
801 // Load arguments descriptor in ARGS_DESC_REG.
802 const intptr_t argument_count = ArgumentCount(); // Includes type args.
803 const Array& arguments_descriptor =
805 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
806
807 if (FLAG_precompiled_mode) {
808 ASSERT(locs()->in(0).reg() == R0);
809 // R0: Closure with a cached entry point.
810 __ ldr(R2, compiler::FieldAddress(
812 } else {
813 ASSERT(locs()->in(0).reg() == FUNCTION_REG);
814 // FUNCTION_REG: Function.
815 __ ldr(CODE_REG,
816 compiler::FieldAddress(FUNCTION_REG,
818 // Closure functions only have one entry point.
819 __ ldr(R2,
820 compiler::FieldAddress(
822 }
823
824 // ARGS_DESC_REG: Arguments descriptor array.
825 // R2: instructions entry point.
826 if (!FLAG_precompiled_mode) {
827 // R9: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
828 __ LoadImmediate(IC_DATA_REG, 0);
829 }
830 __ blx(R2);
831 compiler->EmitCallsiteMetadata(source(), deopt_id(),
832 UntaggedPcDescriptors::kOther, locs(), env());
833 compiler->EmitDropArguments(argument_count);
834}
835
836LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone,
837 bool opt) const {
840}
841
842void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
843 const Register result = locs()->out(0).reg();
844 __ LoadFromOffset(result, FP,
846}
847
848LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone,
849 bool opt) const {
852}
853
854void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
855 const Register value = locs()->in(0).reg();
856 const Register result = locs()->out(0).reg();
857 ASSERT(result == value); // Assert that register assignment is correct.
858 __ StoreToOffset(value, FP,
860}
861
862LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
863 bool opt) const {
866}
867
868void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
869 // The register allocator drops constant definitions that have no uses.
870 if (!locs()->out(0).IsInvalid()) {
871 const Register result = locs()->out(0).reg();
872 __ LoadObject(result, value());
873 }
874}
875
876void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
877 const Location& destination,
878 Register tmp,
879 intptr_t pair_index) {
880 if (destination.IsRegister()) {
882 int64_t v;
883 const bool ok = compiler::HasIntegerValue(value_, &v);
885 if (value_.IsSmi() &&
887 // If the value is negative, then the sign bit was preserved during
888 // Smi untagging, which means the resulting value may be unexpected.
889 ASSERT(v >= 0);
890 }
891 __ LoadImmediate(destination.reg(), pair_index == 0
893 : Utils::High32Bits(v));
894 } else {
895 ASSERT(representation() == kTagged);
896 __ LoadObject(destination.reg(), value_);
897 }
898 } else if (destination.IsFpuRegister()) {
899 switch (representation()) {
900 case kUnboxedFloat:
901 __ LoadSImmediate(
902 EvenSRegisterOf(EvenDRegisterOf(destination.fpu_reg())),
903 Double::Cast(value_).value());
904 break;
905 case kUnboxedDouble:
906 ASSERT(tmp != kNoRegister);
907 __ LoadDImmediate(EvenDRegisterOf(destination.fpu_reg()),
908 Double::Cast(value_).value(), tmp);
909 break;
910 case kUnboxedFloat64x2:
911 __ LoadQImmediate(destination.fpu_reg(),
912 Float64x2::Cast(value_).value());
913 break;
914 case kUnboxedFloat32x4:
915 __ LoadQImmediate(destination.fpu_reg(),
916 Float32x4::Cast(value_).value());
917 break;
918 case kUnboxedInt32x4:
919 __ LoadQImmediate(destination.fpu_reg(), Int32x4::Cast(value_).value());
920 break;
921 default:
922 UNREACHABLE();
923 }
924 } else if (destination.IsDoubleStackSlot()) {
925 ASSERT(tmp != kNoRegister);
926 __ LoadDImmediate(DTMP, Double::Cast(value_).value(), tmp);
927 const intptr_t dest_offset = destination.ToStackSlotOffset();
928 __ StoreDToOffset(DTMP, destination.base_reg(), dest_offset);
929 } else if (destination.IsQuadStackSlot()) {
930 switch (representation()) {
931 case kUnboxedFloat64x2:
932 __ LoadQImmediate(QTMP, Float64x2::Cast(value_).value());
933 break;
934 case kUnboxedFloat32x4:
935 __ LoadQImmediate(QTMP, Float32x4::Cast(value_).value());
936 break;
937 case kUnboxedInt32x4:
938 __ LoadQImmediate(QTMP, Int32x4::Cast(value_).value());
939 break;
940 default:
941 UNREACHABLE();
942 }
943 const intptr_t dest_offset = destination.ToStackSlotOffset();
944 __ StoreMultipleDToOffset(EvenDRegisterOf(QTMP), 2, destination.base_reg(),
945 dest_offset);
946 } else {
947 ASSERT(destination.IsStackSlot());
948 ASSERT(tmp != kNoRegister);
949 const intptr_t dest_offset = destination.ToStackSlotOffset();
951 int64_t v;
952 const bool ok = compiler::HasIntegerValue(value_, &v);
954 __ LoadImmediate(
955 tmp, pair_index == 0 ? Utils::Low32Bits(v) : Utils::High32Bits(v));
956 } else if (representation() == kUnboxedFloat) {
957 int32_t float_bits =
958 bit_cast<int32_t, float>(Double::Cast(value_).value());
959 __ LoadImmediate(tmp, float_bits);
960 } else {
961 ASSERT(representation() == kTagged);
962 __ LoadObject(tmp, value_);
963 }
964 __ StoreToOffset(tmp, destination.base_reg(), dest_offset);
965 }
966}
967
968LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone,
969 bool opt) const {
970 const bool is_unboxed_int =
974 const intptr_t kNumInputs = 0;
975 const intptr_t kNumTemps = is_unboxed_int ? 0 : 1;
976 LocationSummary* locs = new (zone)
977 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
978 if (is_unboxed_int) {
980 } else {
981 ASSERT(representation_ == kUnboxedDouble);
983 }
984 if (kNumTemps > 0) {
986 }
987 return locs;
988}
989
990void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
991 // The register allocator drops constant definitions that have no uses.
992 if (!locs()->out(0).IsInvalid()) {
993 const Register scratch =
994 locs()->temp_count() == 0 ? kNoRegister : locs()->temp(0).reg();
995 EmitMoveToLocation(compiler, locs()->out(0), scratch);
996 }
997}
998
999LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone,
1000 bool opt) const {
1001 auto const dst_type_loc =
1003
1004 // We want to prevent spilling of the inputs (e.g. function/instantiator tav),
1005 // since TTS preserves them. So we make this a `kNoCall` summary,
1006 // even though most other registers can be modified by the stub. To tell the
1007 // register allocator about it, we reserve all the other registers as
1008 // temporary registers.
1009 // TODO(http://dartbug.com/32788): Simplify this.
1010
1011 const intptr_t kNonChangeableInputRegs =
1013 ((dst_type_loc.IsRegister() ? 1 : 0) << TypeTestABI::kDstTypeReg) |
1016
1017 const intptr_t kNumInputs = 4;
1018
1019 // We invoke a stub that can potentially clobber any CPU register
1020 // but can only clobber FPU registers on the slow path when
1021 // entering runtime. Preserve all FPU registers that are
1022 // not guaranteed to be preserved by the ABI.
1023 const intptr_t kCpuRegistersToPreserve =
1024 kDartAvailableCpuRegs & ~kNonChangeableInputRegs;
1025 const intptr_t kFpuRegistersToPreserve =
1026 Utils::NBitMask<intptr_t>(kNumberOfFpuRegisters) &
1027 ~(Utils::NBitMask<intptr_t>(kAbiPreservedFpuRegCount)
1029 ~(1 << FpuTMP);
1030
1031 const intptr_t kNumTemps = (Utils::CountOneBits64(kCpuRegistersToPreserve) +
1032 Utils::CountOneBits64(kFpuRegistersToPreserve));
1033
1034 LocationSummary* summary = new (zone) LocationSummary(
1036 summary->set_in(kInstancePos,
1038 summary->set_in(kDstTypePos, dst_type_loc);
1039 summary->set_in(
1044 summary->set_out(0, Location::SameAsFirstInput());
1045
1046 // Let's reserve all registers except for the input ones.
1047 intptr_t next_temp = 0;
1048 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
1049 const bool should_preserve = ((1 << i) & kCpuRegistersToPreserve) != 0;
1050 if (should_preserve) {
1051 summary->set_temp(next_temp++,
1052 Location::RegisterLocation(static_cast<Register>(i)));
1053 }
1054 }
1055
1056 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
1057 const bool should_preserve = ((1 << i) & kFpuRegistersToPreserve) != 0;
1058 if (should_preserve) {
1059 summary->set_temp(next_temp++, Location::FpuRegisterLocation(
1060 static_cast<FpuRegister>(i)));
1061 }
1062 }
1063
1064 return summary;
1065}
1066
1067void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1068 ASSERT(locs()->always_calls());
1069
1070 auto object_store = compiler->isolate_group()->object_store();
1071 const auto& assert_boolean_stub =
1072 Code::ZoneHandle(compiler->zone(), object_store->assert_boolean_stub());
1073
1074 compiler::Label done;
1077 __ b(&done, NOT_ZERO);
1078 compiler->GenerateStubCall(source(), assert_boolean_stub,
1079 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
1080 deopt_id(), env());
1081 __ Bind(&done);
1082}
1083
1084static Condition TokenKindToIntCondition(Token::Kind kind) {
1085 switch (kind) {
1086 case Token::kEQ:
1087 return EQ;
1088 case Token::kNE:
1089 return NE;
1090 case Token::kLT:
1091 return LT;
1092 case Token::kGT:
1093 return GT;
1094 case Token::kLTE:
1095 return LE;
1096 case Token::kGTE:
1097 return GE;
1098 default:
1099 UNREACHABLE();
1100 return VS;
1101 }
1102}
1103
1104static bool CanBePairOfImmediateOperands(const dart::Object& constant,
1105 compiler::Operand* low,
1106 compiler::Operand* high) {
1107 int64_t imm;
1108 if (!compiler::HasIntegerValue(constant, &imm)) {
1109 return false;
1110 }
1111 return compiler::Operand::CanHold(Utils::Low32Bits(imm), low) &&
1113}
1114
1115static bool CanBePairOfImmediateOperands(Value* value,
1116 compiler::Operand* low,
1117 compiler::Operand* high) {
1118 if (!value->BindsToConstant()) {
1119 return false;
1120 }
1121 return CanBePairOfImmediateOperands(value->BoundConstant(), low, high);
1122}
1123
1124LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone,
1125 bool opt) const {
1126 const intptr_t kNumInputs = 2;
1127 if (is_null_aware()) {
1128 const intptr_t kNumTemps = 1;
1129 LocationSummary* locs = new (zone)
1130 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1135 return locs;
1136 }
1137 if (operation_cid() == kMintCid) {
1138 compiler::Operand o;
1139 const intptr_t kNumTemps = 0;
1140 LocationSummary* locs = new (zone)
1141 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1142 if (CanBePairOfImmediateOperands(left(), &o, &o)) {
1143 locs->set_in(0, Location::Constant(left()->definition()->AsConstant()));
1146 } else if (CanBePairOfImmediateOperands(right(), &o, &o)) {
1149 locs->set_in(1, Location::Constant(right()->definition()->AsConstant()));
1150 } else {
1155 }
1157 return locs;
1158 }
1159 if (operation_cid() == kDoubleCid) {
1160 const intptr_t kNumTemps = 0;
1161 LocationSummary* locs = new (zone)
1162 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1166 return locs;
1167 }
1168 if (operation_cid() == kSmiCid || operation_cid() == kIntegerCid) {
1169 const intptr_t kNumTemps = 0;
1170 LocationSummary* locs = new (zone)
1171 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1173 // Only one input can be a constant operand. The case of two constant
1174 // operands should be handled by constant propagation.
1175 locs->set_in(1, locs->in(0).IsConstant()
1179 return locs;
1180 }
1181 UNREACHABLE();
1182 return nullptr;
1183}
1184
1185static void LoadValueCid(FlowGraphCompiler* compiler,
1186 Register value_cid_reg,
1187 Register value_reg,
1188 compiler::Label* value_is_smi = nullptr) {
1189 if (value_is_smi == nullptr) {
1190 __ mov(value_cid_reg, compiler::Operand(kSmiCid));
1191 }
1192 __ tst(value_reg, compiler::Operand(kSmiTagMask));
1193 if (value_is_smi == nullptr) {
1194 __ LoadClassId(value_cid_reg, value_reg, NE);
1195 } else {
1196 __ b(value_is_smi, EQ);
1197 __ LoadClassId(value_cid_reg, value_reg);
1198 }
1199}
1200
1201static Condition FlipCondition(Condition condition) {
1202 switch (condition) {
1203 case EQ:
1204 return EQ;
1205 case NE:
1206 return NE;
1207 case LT:
1208 return GT;
1209 case LE:
1210 return GE;
1211 case GT:
1212 return LT;
1213 case GE:
1214 return LE;
1215 case CC:
1216 return HI;
1217 case LS:
1218 return CS;
1219 case HI:
1220 return CC;
1221 case CS:
1222 return LS;
1223 default:
1224 UNREACHABLE();
1225 return EQ;
1226 }
1227}
1228
1229static void EmitBranchOnCondition(FlowGraphCompiler* compiler,
1230 Condition true_condition,
1231 BranchLabels labels) {
1232 if (labels.fall_through == labels.false_label) {
1233 // If the next block is the false successor we will fall through to it.
1234 __ b(labels.true_label, true_condition);
1235 } else {
1236 // If the next block is not the false successor we will branch to it.
1237 Condition false_condition = InvertCondition(true_condition);
1238 __ b(labels.false_label, false_condition);
1239
1240 // Fall through or jump to the true successor.
1241 if (labels.fall_through != labels.true_label) {
1242 __ b(labels.true_label);
1243 }
1244 }
1245}
1246
1247static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler,
1248 LocationSummary* locs,
1249 Token::Kind kind) {
1250 Location left = locs->in(0);
1251 Location right = locs->in(1);
1252 ASSERT(!left.IsConstant() || !right.IsConstant());
1253
1254 Condition true_condition = TokenKindToIntCondition(kind);
1255
1256 if (left.IsConstant()) {
1257 __ CompareObject(right.reg(), left.constant());
1258 true_condition = FlipCondition(true_condition);
1259 } else if (right.IsConstant()) {
1260 __ CompareObject(left.reg(), right.constant());
1261 } else {
1262 __ cmp(left.reg(), compiler::Operand(right.reg()));
1263 }
1264 return true_condition;
1265}
1266
1267static Condition EmitWordComparisonOp(FlowGraphCompiler* compiler,
1268 LocationSummary* locs,
1269 Token::Kind kind) {
1270 Location left = locs->in(0);
1271 Location right = locs->in(1);
1272 ASSERT(!left.IsConstant() || !right.IsConstant());
1273
1274 Condition true_condition = TokenKindToIntCondition(kind);
1275
1276 if (left.IsConstant()) {
1277 __ CompareImmediate(
1278 right.reg(),
1279 static_cast<uword>(Integer::Cast(left.constant()).AsInt64Value()));
1280 true_condition = FlipCondition(true_condition);
1281 } else if (right.IsConstant()) {
1282 __ CompareImmediate(
1283 left.reg(),
1284 static_cast<uword>(Integer::Cast(right.constant()).AsInt64Value()));
1285 } else {
1286 __ cmp(left.reg(), compiler::Operand(right.reg()));
1287 }
1288 return true_condition;
1289}
1290
1291static Condition EmitUnboxedMintEqualityOp(FlowGraphCompiler* compiler,
1292 LocationSummary* locs,
1293 Token::Kind kind) {
1295 PairLocation* left_pair;
1296 compiler::Operand right_lo, right_hi;
1297 if (locs->in(0).IsConstant()) {
1298 const bool ok = CanBePairOfImmediateOperands(locs->in(0).constant(),
1299 &right_lo, &right_hi);
1301 left_pair = locs->in(1).AsPairLocation();
1302 } else if (locs->in(1).IsConstant()) {
1303 const bool ok = CanBePairOfImmediateOperands(locs->in(1).constant(),
1304 &right_lo, &right_hi);
1306 left_pair = locs->in(0).AsPairLocation();
1307 } else {
1308 left_pair = locs->in(0).AsPairLocation();
1309 PairLocation* right_pair = locs->in(1).AsPairLocation();
1310 right_lo = compiler::Operand(right_pair->At(0).reg());
1311 right_hi = compiler::Operand(right_pair->At(1).reg());
1312 }
1313 Register left_lo = left_pair->At(0).reg();
1314 Register left_hi = left_pair->At(1).reg();
1315
1316 // Compare lower.
1317 __ cmp(left_lo, right_lo);
1318 // Compare upper if lower is equal.
1319 __ cmp(left_hi, right_hi, EQ);
1320 return TokenKindToIntCondition(kind);
1321}
1322
1323static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler,
1324 LocationSummary* locs,
1325 Token::Kind kind,
1326 BranchLabels labels) {
1327 PairLocation* left_pair;
1328 compiler::Operand right_lo, right_hi;
1329 Condition true_condition = TokenKindToIntCondition(kind);
1330 if (locs->in(0).IsConstant()) {
1331 const bool ok = CanBePairOfImmediateOperands(locs->in(0).constant(),
1332 &right_lo, &right_hi);
1334 left_pair = locs->in(1).AsPairLocation();
1335 true_condition = FlipCondition(true_condition);
1336 } else if (locs->in(1).IsConstant()) {
1337 const bool ok = CanBePairOfImmediateOperands(locs->in(1).constant(),
1338 &right_lo, &right_hi);
1340 left_pair = locs->in(0).AsPairLocation();
1341 } else {
1342 left_pair = locs->in(0).AsPairLocation();
1343 PairLocation* right_pair = locs->in(1).AsPairLocation();
1344 right_lo = compiler::Operand(right_pair->At(0).reg());
1345 right_hi = compiler::Operand(right_pair->At(1).reg());
1346 }
1347 Register left_lo = left_pair->At(0).reg();
1348 Register left_hi = left_pair->At(1).reg();
1349
1350 // 64-bit comparison.
1351 Condition hi_cond, lo_cond;
1352 switch (true_condition) {
1353 case LT:
1354 hi_cond = LT;
1355 lo_cond = CC;
1356 break;
1357 case GT:
1358 hi_cond = GT;
1359 lo_cond = HI;
1360 break;
1361 case LE:
1362 hi_cond = LT;
1363 lo_cond = LS;
1364 break;
1365 case GE:
1366 hi_cond = GT;
1367 lo_cond = CS;
1368 break;
1369 default:
1370 UNREACHABLE();
1371 hi_cond = lo_cond = VS;
1372 }
1373 // Compare upper halves first.
1374 __ cmp(left_hi, right_hi);
1375 __ b(labels.true_label, hi_cond);
1376 __ b(labels.false_label, FlipCondition(hi_cond));
1377
1378 // If higher words are equal, compare lower words.
1379 __ cmp(left_lo, right_lo);
1380 return lo_cond;
1381}
1382
1383static Condition EmitNullAwareInt64ComparisonOp(FlowGraphCompiler* compiler,
1384 LocationSummary* locs,
1385 Token::Kind kind,
1386 BranchLabels labels) {
1387 ASSERT((kind == Token::kEQ) || (kind == Token::kNE));
1388 const Register left = locs->in(0).reg();
1389 const Register right = locs->in(1).reg();
1390 const Register temp = locs->temp(0).reg();
1391 const Condition true_condition = TokenKindToIntCondition(kind);
1392 compiler::Label* equal_result =
1393 (true_condition == EQ) ? labels.true_label : labels.false_label;
1394 compiler::Label* not_equal_result =
1395 (true_condition == EQ) ? labels.false_label : labels.true_label;
1396
1397 // Check if operands have the same value. If they don't, then they could
1398 // be equal only if both of them are Mints with the same value.
1399 __ cmp(left, compiler::Operand(right));
1400 __ b(equal_result, EQ);
1401 __ and_(temp, left, compiler::Operand(right));
1402 __ BranchIfSmi(temp, not_equal_result);
1403 __ CompareClassId(left, kMintCid, temp);
1404 __ b(not_equal_result, NE);
1405 __ CompareClassId(right, kMintCid, temp);
1406 __ b(not_equal_result, NE);
1407 __ LoadFieldFromOffset(temp, left, compiler::target::Mint::value_offset());
1408 __ LoadFieldFromOffset(TMP, right, compiler::target::Mint::value_offset());
1409 __ cmp(temp, compiler::Operand(TMP));
1410 __ LoadFieldFromOffset(
1411 temp, left,
1414 __ LoadFieldFromOffset(
1415 TMP, right,
1418 __ cmp(temp, compiler::Operand(TMP), EQ);
1419 return true_condition;
1420}
1421
1422static Condition TokenKindToDoubleCondition(Token::Kind kind) {
1423 switch (kind) {
1424 case Token::kEQ:
1425 return EQ;
1426 case Token::kNE:
1427 return NE;
1428 case Token::kLT:
1429 return LT;
1430 case Token::kGT:
1431 return GT;
1432 case Token::kLTE:
1433 return LE;
1434 case Token::kGTE:
1435 return GE;
1436 default:
1437 UNREACHABLE();
1438 return VS;
1439 }
1440}
1441
1442static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
1443 LocationSummary* locs,
1444 BranchLabels labels,
1445 Token::Kind kind) {
1446 const QRegister left = locs->in(0).fpu_reg();
1447 const QRegister right = locs->in(1).fpu_reg();
1448 const DRegister dleft = EvenDRegisterOf(left);
1449 const DRegister dright = EvenDRegisterOf(right);
1450
1451 switch (kind) {
1452 case Token::kEQ:
1453 __ vcmpd(dleft, dright);
1454 __ vmstat();
1455 return EQ;
1456 case Token::kNE:
1457 __ vcmpd(dleft, dright);
1458 __ vmstat();
1459 return NE;
1460 case Token::kLT:
1461 __ vcmpd(dright, dleft); // Flip to handle NaN.
1462 __ vmstat();
1463 return GT;
1464 case Token::kGT:
1465 __ vcmpd(dleft, dright);
1466 __ vmstat();
1467 return GT;
1468 case Token::kLTE:
1469 __ vcmpd(dright, dleft); // Flip to handle NaN.
1470 __ vmstat();
1471 return GE;
1472 case Token::kGTE:
1473 __ vcmpd(dleft, dright);
1474 __ vmstat();
1475 return GE;
1476 default:
1477 UNREACHABLE();
1478 return VS;
1479 }
1480}
1481
1482Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1483 BranchLabels labels) {
1484 if (is_null_aware()) {
1485 ASSERT(operation_cid() == kMintCid);
1486 return EmitNullAwareInt64ComparisonOp(compiler, locs(), kind(), labels);
1487 }
1488 if (operation_cid() == kSmiCid) {
1489 return EmitSmiComparisonOp(compiler, locs(), kind());
1490 } else if (operation_cid() == kIntegerCid) {
1491 return EmitWordComparisonOp(compiler, locs(), kind());
1492 } else if (operation_cid() == kMintCid) {
1493 return EmitUnboxedMintEqualityOp(compiler, locs(), kind());
1494 } else {
1495 ASSERT(operation_cid() == kDoubleCid);
1496 return EmitDoubleComparisonOp(compiler, locs(), labels, kind());
1497 }
1498}
1499
1500LocationSummary* TestIntInstr::MakeLocationSummary(Zone* zone, bool opt) const {
1501 RELEASE_ASSERT(representation_ == kTagged);
1502 const intptr_t kNumInputs = 2;
1503 const intptr_t kNumTemps = 0;
1504 LocationSummary* locs = new (zone)
1505 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1507 // Only one input can be a constant operand. The case of two constant
1508 // operands should be handled by constant propagation.
1511 return locs;
1512}
1513
1514Condition TestIntInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1515 BranchLabels labels) {
1516 const Register left = locs()->in(0).reg();
1517 Location right = locs()->in(1);
1518 if (right.IsConstant()) {
1519 __ TestImmediate(left, static_cast<int32_t>(ComputeImmediateMask()));
1520 } else {
1521 __ tst(left, compiler::Operand(right.reg()));
1522 }
1523 Condition true_condition = (kind() == Token::kNE) ? NE : EQ;
1524 return true_condition;
1525}
1526
1527LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone,
1528 bool opt) const {
1529 const intptr_t kNumInputs = 1;
1530 const intptr_t kNumTemps = 1;
1531 LocationSummary* locs = new (zone)
1532 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1536 return locs;
1537}
1538
1539Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1540 BranchLabels labels) {
1541 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
1542 const Register val_reg = locs()->in(0).reg();
1543 const Register cid_reg = locs()->temp(0).reg();
1544
1545 compiler::Label* deopt =
1547 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids)
1548 : nullptr;
1549
1550 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
1551 const ZoneGrowableArray<intptr_t>& data = cid_results();
1552 ASSERT(data[0] == kSmiCid);
1553 bool result = data[1] == true_result;
1554 __ tst(val_reg, compiler::Operand(kSmiTagMask));
1555 __ b(result ? labels.true_label : labels.false_label, EQ);
1556 __ LoadClassId(cid_reg, val_reg);
1557
1558 for (intptr_t i = 2; i < data.length(); i += 2) {
1559 const intptr_t test_cid = data[i];
1560 ASSERT(test_cid != kSmiCid);
1561 result = data[i + 1] == true_result;
1562 __ CompareImmediate(cid_reg, test_cid);
1563 __ b(result ? labels.true_label : labels.false_label, EQ);
1564 }
1565 // No match found, deoptimize or default action.
1566 if (deopt == nullptr) {
1567 // If the cid is not in the list, jump to the opposite label from the cids
1568 // that are in the list. These must be all the same (see asserts in the
1569 // constructor).
1570 compiler::Label* target = result ? labels.false_label : labels.true_label;
1571 if (target != labels.fall_through) {
1572 __ b(target);
1573 }
1574 } else {
1575 __ b(deopt);
1576 }
1577 // Dummy result as this method already did the jump, there's no need
1578 // for the caller to branch on a condition.
1579 return kInvalidCondition;
1580}
1581
1582LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone,
1583 bool opt) const {
1584 const intptr_t kNumInputs = 2;
1585 const intptr_t kNumTemps = 0;
1586 if (operation_cid() == kMintCid) {
1587 compiler::Operand o;
1588 const intptr_t kNumTemps = 0;
1589 LocationSummary* locs = new (zone)
1590 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1591 if (CanBePairOfImmediateOperands(left(), &o, &o)) {
1592 locs->set_in(0, Location::Constant(left()->definition()->AsConstant()));
1595 } else if (CanBePairOfImmediateOperands(right(), &o, &o)) {
1598 locs->set_in(1, Location::Constant(right()->definition()->AsConstant()));
1599 } else {
1604 }
1606 return locs;
1607 }
1608 if (operation_cid() == kDoubleCid) {
1609 LocationSummary* summary = new (zone)
1610 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1612 summary->set_in(1, Location::RequiresFpuRegister());
1613 summary->set_out(0, Location::RequiresRegister());
1614 return summary;
1615 }
1616 ASSERT(operation_cid() == kSmiCid);
1617 LocationSummary* summary = new (zone)
1618 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1619 summary->set_in(0, LocationRegisterOrConstant(left()));
1620 // Only one input can be a constant operand. The case of two constant
1621 // operands should be handled by constant propagation.
1622 summary->set_in(1, summary->in(0).IsConstant()
1625 summary->set_out(0, Location::RequiresRegister());
1626 return summary;
1627}
1628
1629Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1630 BranchLabels labels) {
1631 if (operation_cid() == kSmiCid) {
1632 return EmitSmiComparisonOp(compiler, locs(), kind());
1633 } else if (operation_cid() == kMintCid) {
1634 return EmitUnboxedMintComparisonOp(compiler, locs(), kind(), labels);
1635 } else {
1636 ASSERT(operation_cid() == kDoubleCid);
1637 return EmitDoubleComparisonOp(compiler, locs(), labels, kind());
1638 }
1639}
1640
1641void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1642 SetupNative();
1643 const Register result = locs()->out(0).reg();
1644
1645 // Pass a pointer to the first argument in R2.
1646 __ add(
1647 R2, SP,
1648 compiler::Operand((ArgumentCount() - 1) * compiler::target::kWordSize));
1649
1650 // Compute the effective address. When running under the simulator,
1651 // this is a redirection address that forces the simulator to call
1652 // into the runtime system.
1653 uword entry;
1654 const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function());
1655 const Code* stub;
1656 if (link_lazily()) {
1657 stub = &StubCode::CallBootstrapNative();
1659 } else {
1660 entry = reinterpret_cast<uword>(native_c_function());
1661 if (is_bootstrap_native()) {
1662 stub = &StubCode::CallBootstrapNative();
1663 } else if (is_auto_scope()) {
1664 stub = &StubCode::CallAutoScopeNative();
1665 } else {
1666 stub = &StubCode::CallNoScopeNative();
1667 }
1668 }
1669 __ LoadImmediate(R1, argc_tag);
1670 compiler::ExternalLabel label(entry);
1671 __ LoadNativeEntry(R9, &label,
1672 link_lazily()
1675 if (link_lazily()) {
1676 compiler->GeneratePatchableCall(
1677 source(), *stub, UntaggedPcDescriptors::kOther, locs(),
1679 } else {
1680 // We can never lazy-deopt here because natives are never optimized.
1681 ASSERT(!compiler->is_optimizing());
1682 compiler->GenerateNonLazyDeoptableStubCall(
1683 source(), *stub, UntaggedPcDescriptors::kOther, locs(),
1685 }
1686 __ LoadFromOffset(result, SP, 0);
1687
1688 compiler->EmitDropArguments(ArgumentCount()); // Drop the arguments.
1689}
1690
1691#define R(r) (1 << r)
1692
1693LocationSummary* FfiCallInstr::MakeLocationSummary(Zone* zone,
1694 bool is_optimizing) const {
1697 return MakeLocationSummaryInternal(
1698 zone, is_optimizing,
1701}
1702
1703#undef R
1704
1705void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1706 const Register branch = locs()->in(TargetAddressIndex()).reg();
1707
1708 // The temps are indexed according to their register number.
1709 // For regular calls, this holds the FP for rebasing the original locations
1710 // during EmitParamMoves.
1711 // For leaf calls, this holds the SP used to restore the pre-aligned SP after
1712 // the call.
1713 const Register saved_fp_or_sp = locs()->temp(0).reg();
1714 const Register temp1 = locs()->temp(1).reg();
1715
1716 // Ensure these are callee-saved register and are preserved across the call.
1717 ASSERT(IsCalleeSavedRegister(saved_fp_or_sp));
1718 // Other temps don't need to be preserved.
1719
1720 __ mov(saved_fp_or_sp,
1721 is_leaf_ ? compiler::Operand(SPREG) : compiler::Operand(FPREG));
1722
1723 if (!is_leaf_) {
1724 // Make a space to put the return address.
1725 __ PushImmediate(0);
1726
1727 // We need to create a dummy "exit frame". It will have a null code object.
1728 __ LoadObject(CODE_REG, Object::null_object());
1729 __ set_constant_pool_allowed(false);
1730 __ EnterDartFrame(0, /*load_pool_pointer=*/false);
1731 }
1732
1733 // Reserve space for the arguments that go on the stack (if any), then align.
1734 __ ReserveAlignedFrameSpace(marshaller_.RequiredStackSpaceInBytes());
1736 UNIMPLEMENTED();
1737 }
1738
1739 EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp1, TMP);
1740
1742 __ Comment(is_leaf_ ? "Leaf Call" : "Call");
1743 }
1744
1745 if (is_leaf_) {
1746#if !defined(PRODUCT)
1747 // Set the thread object's top_exit_frame_info and VMTag to enable the
1748 // profiler to determine that thread is no longer executing Dart code.
1749 __ StoreToOffset(FPREG, THR,
1751 __ StoreToOffset(branch, THR, compiler::target::Thread::vm_tag_offset());
1752#endif
1753
1754 __ blx(branch);
1755
1756#if !defined(PRODUCT)
1757 __ LoadImmediate(temp1, compiler::target::Thread::vm_tag_dart_id());
1758 __ StoreToOffset(temp1, THR, compiler::target::Thread::vm_tag_offset());
1759 __ LoadImmediate(temp1, 0);
1760 __ StoreToOffset(temp1, THR,
1762#endif
1763 } else {
1764 // We need to copy the return address up into the dummy stack frame so the
1765 // stack walker will know which safepoint to use.
1766 __ mov(temp1, compiler::Operand(PC));
1767 __ str(temp1, compiler::Address(FPREG, kSavedCallerPcSlotFromFp *
1769
1770 // For historical reasons, the PC on ARM points 8 bytes past the current
1771 // instruction. Therefore we emit the metadata here, 8 bytes
1772 // (2 instructions) after the original mov.
1773 compiler->EmitCallsiteMetadata(InstructionSource(), deopt_id(),
1774 UntaggedPcDescriptors::Kind::kOther, locs(),
1775 env());
1776
1777 // Update information in the thread object and enter a safepoint.
1778 // Outline state transition. In AOT, for code size. In JIT, because we
1779 // cannot trust that code will be executable.
1780 __ ldr(temp1,
1781 compiler::Address(
1782 THR, compiler::target::Thread::
1783 call_native_through_safepoint_entry_point_offset()));
1784
1785 // Calls R8 in a safepoint and clobbers R4 and NOTFP.
1786 ASSERT(branch == R8);
1787 static_assert((kReservedCpuRegisters & (1 << NOTFP)) != 0,
1788 "NOTFP should be a reserved register");
1789 __ blx(temp1);
1790
1791 if (marshaller_.IsHandleCType(compiler::ffi::kResultIndex)) {
1792 __ Comment("Check Dart_Handle for Error.");
1793 compiler::Label not_error;
1795 ASSERT(saved_fp_or_sp != CallingConventions::kReturnReg);
1796 __ ldr(temp1,
1797 compiler::Address(CallingConventions::kReturnReg,
1799 __ BranchIfSmi(temp1, &not_error);
1800 __ LoadClassId(temp1, temp1);
1801 __ RangeCheck(temp1, saved_fp_or_sp, kFirstErrorCid, kLastErrorCid,
1803
1804 // Slow path, use the stub to propagate error, to save on code-size.
1805 __ Comment("Slow path: call Dart_PropagateError through stub.");
1808 __ ldr(temp1,
1809 compiler::Address(
1810 THR, compiler::target::Thread::
1811 call_native_through_safepoint_entry_point_offset()));
1812 __ ldr(branch, compiler::Address(
1813 THR, kPropagateErrorRuntimeEntry.OffsetFromThread()));
1814 __ blx(temp1);
1815#if defined(DEBUG)
1816 // We should never return with normal controlflow from this.
1817 __ bkpt(0);
1818#endif
1819
1820 __ Bind(&not_error);
1821 }
1822
1823 // Restore the global object pool after returning from runtime (old space is
1824 // moving, so the GOP could have been relocated).
1825 if (FLAG_precompiled_mode) {
1826 __ SetupGlobalPoolAndDispatchTable();
1827 }
1828 }
1829
1830 EmitReturnMoves(compiler, temp1, TMP);
1831
1832 if (is_leaf_) {
1833 // Restore the pre-aligned SP.
1834 __ mov(SPREG, compiler::Operand(saved_fp_or_sp));
1835 } else {
1836 // Leave dummy exit frame.
1837 __ LeaveDartFrame();
1838 __ set_constant_pool_allowed(true);
1839
1840 // Instead of returning to the "fake" return address, we just pop it.
1841 __ PopRegister(temp1);
1842 }
1843}
1844
1845// Keep in sync with NativeEntryInstr::EmitNativeCode.
1846void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1847 EmitReturnMoves(compiler);
1848
1849 // Restore tag before the profiler's stack walker will no longer see the
1850 // InvokeDartCode return address.
1853
1854 __ LeaveDartFrame();
1855
1856 // The dummy return address is in LR, no need to pop it as on Intel.
1857
1858 // These can be anything besides the return registers (R0 and R1) and THR
1859 // (R10).
1860 const Register vm_tag_reg = R2;
1861 const Register old_exit_frame_reg = R3;
1862 const Register old_exit_through_ffi_reg = R4;
1863 const Register tmp = R5;
1864
1865 __ Pop(old_exit_frame_reg);
1866 __ Pop(old_exit_through_ffi_reg);
1867
1868 // Restore top_resource.
1869 __ Pop(tmp);
1871
1872 __ Pop(vm_tag_reg);
1873
1874 // The trampoline that called us will enter the safepoint on our behalf.
1875 __ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
1876 old_exit_through_ffi_reg, tmp,
1877 /*enter_safepoint=*/false);
1878
1879 __ PopNativeCalleeSavedRegisters();
1880
1881#if defined(DART_TARGET_OS_FUCHSIA) && defined(USING_SHADOW_CALL_STACK)
1882#error Unimplemented
1883#endif
1884
1885 // Leave the entry frame.
1886 RESTORES_LR_FROM_FRAME(__ LeaveFrame(1 << LR | 1 << FP));
1887
1888 // Leave the dummy frame holding the pushed arguments.
1889 RESTORES_LR_FROM_FRAME(__ LeaveFrame(1 << LR | 1 << FP));
1890
1891 __ Ret();
1892
1893 // For following blocks.
1894 __ set_constant_pool_allowed(true);
1895}
1896
1897// Keep in sync with NativeReturnInstr::EmitNativeCode and ComputeInnerLRState.
1898void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1899 // Constant pool cannot be used until we enter the actual Dart frame.
1900 __ set_constant_pool_allowed(false);
1901
1902 __ Bind(compiler->GetJumpLabel(this));
1903
1904 // Create a dummy frame holding the pushed arguments. This simplifies
1905 // NativeReturnInstr::EmitNativeCode.
1906 SPILLS_LR_TO_FRAME(__ EnterFrame((1 << FP) | (1 << LR), 0));
1907
1908 // Save the argument registers, in reverse order.
1909 SaveArguments(compiler);
1910
1911 // Enter the entry frame. NativeParameterInstr expects this frame has size
1912 // -exit_link_slot_from_entry_fp, verified below.
1913 SPILLS_LR_TO_FRAME(__ EnterFrame((1 << FP) | (1 << LR), 0));
1914
1915 // Save a space for the code object.
1916 __ PushImmediate(0);
1917
1918#if defined(DART_TARGET_OS_FUCHSIA) && defined(USING_SHADOW_CALL_STACK)
1919#error Unimplemented
1920#endif
1921
1922 __ PushNativeCalleeSavedRegisters();
1923
1924 // Save the current VMTag on the stack.
1926 __ Push(R0);
1928
1929 // Save top resource.
1930 const intptr_t top_resource_offset =
1932 __ LoadFromOffset(R0, THR, top_resource_offset);
1933 __ Push(R0);
1934 __ LoadImmediate(R0, 0);
1935 __ StoreToOffset(R0, THR, top_resource_offset);
1936
1937 __ LoadFromOffset(R0, THR,
1939 __ Push(R0);
1940
1941 // Save top exit frame info. Don't set it to 0 yet,
1942 // TransitionNativeToGenerated will handle that.
1943 __ LoadFromOffset(R0, THR,
1945 __ Push(R0);
1946
1947 __ EmitEntryFrameVerification(R0);
1948
1949 // The callback trampoline (caller) has already left the safepoint for us.
1950 __ TransitionNativeToGenerated(/*scratch0=*/R0, /*scratch1=*/R1,
1951 /*exit_safepoint=*/false,
1952 /*ignore_unwind_in_progress=*/false,
1953 /*set_tag=*/false);
1954
1955 // Now that the safepoint has ended, we can touch Dart objects without
1956 // handles.
1957
1958 // Load the code object.
1959 const Function& target_function = marshaller_.dart_signature();
1960 const intptr_t callback_id = target_function.FfiCallbackId();
1962 __ LoadFromOffset(R0, R0,
1964 __ LoadFromOffset(R0, R0,
1966 __ LoadFieldFromOffset(R0, R0,
1968 __ LoadFieldFromOffset(CODE_REG, R0,
1970 callback_id * compiler::target::kWordSize);
1971
1972 // Put the code object in the reserved slot.
1973 __ StoreToOffset(CODE_REG, FPREG,
1975 if (FLAG_precompiled_mode) {
1976 __ SetupGlobalPoolAndDispatchTable();
1977 } else {
1978 __ LoadImmediate(PP, 0); // GC safe value into PP.
1979 }
1980
1981 // Load a GC-safe value for the arguments descriptor (unused but tagged).
1982 __ LoadImmediate(ARGS_DESC_REG, 0);
1983
1984 // Load a dummy return address which suggests that we are inside of
1985 // InvokeDartCodeStub. This is how the stack walker detects an entry frame.
1986 CLOBBERS_LR({
1987 __ LoadFromOffset(LR, THR,
1989 __ LoadFieldFromOffset(LR, LR,
1991 });
1992
1994
1995 // Delay setting the tag until the profiler's stack walker will see the
1996 // InvokeDartCode return address.
1999}
2000
2001#define R(r) (1 << r)
2002
2004 Zone* zone,
2005 bool is_optimizing) const {
2007 return MakeLocationSummaryInternal(zone, (R(saved_fp)));
2008}
2009
2010#undef R
2011
2012void LeafRuntimeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2013 const Register saved_fp = locs()->temp(0).reg();
2014 const Register temp0 = TMP;
2015
2016 __ MoveRegister(saved_fp, FPREG);
2017
2018 const intptr_t frame_space = native_calling_convention_.StackTopInBytes();
2019 __ EnterCFrame(frame_space);
2020
2021 EmitParamMoves(compiler, saved_fp, temp0);
2022
2023 const Register target_address = locs()->in(TargetAddressIndex()).reg();
2024 __ str(target_address,
2025 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
2026 __ CallCFunction(target_address);
2027 __ LoadImmediate(temp0, VMTag::kDartTagId);
2028 __ str(temp0,
2029 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
2030
2031 __ LeaveCFrame();
2032}
2033
2035 Zone* zone,
2036 bool opt) const {
2037 const intptr_t kNumInputs = 1;
2038 // TODO(fschneider): Allow immediate operands for the char code.
2039 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
2041}
2042
2044 FlowGraphCompiler* compiler) {
2045 ASSERT(compiler->is_optimizing());
2046 const Register char_code = locs()->in(0).reg();
2047 const Register result = locs()->out(0).reg();
2048
2049 __ ldr(
2050 result,
2051 compiler::Address(
2053 __ AddImmediate(
2055 __ ldr(result,
2056 compiler::Address(result, char_code, LSL, 1)); // Char code is a smi.
2057}
2058
2059LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone,
2060 bool opt) const {
2061 const intptr_t kNumInputs = 1;
2062 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
2064}
2065
2066void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2067 ASSERT(cid_ == kOneByteStringCid);
2068 const Register str = locs()->in(0).reg();
2069 const Register result = locs()->out(0).reg();
2070 __ ldr(result, compiler::FieldAddress(
2072 __ cmp(result, compiler::Operand(compiler::target::ToRawSmi(1)));
2073 __ LoadImmediate(result, -1, NE);
2074 __ ldrb(result,
2075 compiler::FieldAddress(
2077 EQ);
2078 __ SmiTag(result);
2079}
2080
2081LocationSummary* Utf8ScanInstr::MakeLocationSummary(Zone* zone,
2082 bool opt) const {
2083 const intptr_t kNumInputs = 5;
2084 const intptr_t kNumTemps = 0;
2085 LocationSummary* summary = new (zone)
2086 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2087 summary->set_in(0, Location::Any()); // decoder
2088 summary->set_in(1, Location::WritableRegister()); // bytes
2089 summary->set_in(2, Location::WritableRegister()); // start
2090 summary->set_in(3, Location::WritableRegister()); // end
2091 summary->set_in(4, Location::WritableRegister()); // table
2092 summary->set_out(0, Location::RequiresRegister());
2093 return summary;
2094}
2095
2096void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2097 const Register bytes_reg = locs()->in(1).reg();
2098 const Register start_reg = locs()->in(2).reg();
2099 const Register end_reg = locs()->in(3).reg();
2100 const Register table_reg = locs()->in(4).reg();
2101 const Register size_reg = locs()->out(0).reg();
2102
2103 const Register bytes_ptr_reg = start_reg;
2104 const Register bytes_end_reg = end_reg;
2105 const Register flags_reg = bytes_reg;
2106 const Register temp_reg = TMP;
2107 const Register decoder_temp_reg = start_reg;
2108 const Register flags_temp_reg = end_reg;
2109
2110 const intptr_t kSizeMask = 0x03;
2111 const intptr_t kFlagsMask = 0x3C;
2112
2113 compiler::Label loop, loop_in;
2114
2115 // Address of input bytes.
2116 __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
2117
2118 // Table.
2119 __ AddImmediate(
2120 table_reg, table_reg,
2122
2123 // Pointers to start and end.
2124 __ add(bytes_ptr_reg, bytes_reg, compiler::Operand(start_reg));
2125 __ add(bytes_end_reg, bytes_reg, compiler::Operand(end_reg));
2126
2127 // Initialize size and flags.
2128 __ LoadImmediate(size_reg, 0);
2129 __ LoadImmediate(flags_reg, 0);
2130
2131 __ b(&loop_in);
2132 __ Bind(&loop);
2133
2134 // Read byte and increment pointer.
2135 __ ldrb(temp_reg,
2136 compiler::Address(bytes_ptr_reg, 1, compiler::Address::PostIndex));
2137
2138 // Update size and flags based on byte value.
2139 __ ldrb(temp_reg, compiler::Address(table_reg, temp_reg));
2140 __ orr(flags_reg, flags_reg, compiler::Operand(temp_reg));
2141 __ and_(temp_reg, temp_reg, compiler::Operand(kSizeMask));
2142 __ add(size_reg, size_reg, compiler::Operand(temp_reg));
2143
2144 // Stop if end is reached.
2145 __ Bind(&loop_in);
2146 __ cmp(bytes_ptr_reg, compiler::Operand(bytes_end_reg));
2147 __ b(&loop, UNSIGNED_LESS);
2148
2149 // Write flags to field.
2150 __ AndImmediate(flags_reg, flags_reg, kFlagsMask);
2151 if (!IsScanFlagsUnboxed()) {
2152 __ SmiTag(flags_reg);
2153 }
2154 Register decoder_reg;
2155 const Location decoder_location = locs()->in(0);
2156 if (decoder_location.IsStackSlot()) {
2157 __ ldr(decoder_temp_reg, LocationToStackSlotAddress(decoder_location));
2158 decoder_reg = decoder_temp_reg;
2159 } else {
2160 decoder_reg = decoder_location.reg();
2161 }
2162 const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes();
2163 __ LoadFieldFromOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset);
2164 __ orr(flags_temp_reg, flags_temp_reg, compiler::Operand(flags_reg));
2165 __ StoreFieldToOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset);
2166}
2167
2168LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
2169 bool opt) const {
2170 // The compiler must optimize any function that includes a LoadIndexed
2171 // instruction that uses typed data cids, since extracting the payload address
2172 // from views is done in a compiler pass after all code motion has happened.
2174
2175 auto const rep =
2177 const bool directly_addressable = aligned() && rep != kUnboxedInt64;
2178 const intptr_t kNumInputs = 2;
2179 intptr_t kNumTemps = 0;
2180 if (!directly_addressable) {
2181 kNumTemps += 1;
2182 if (rep == kUnboxedFloat || rep == kUnboxedDouble) {
2183 kNumTemps += 1;
2184 }
2185 }
2186 LocationSummary* locs = new (zone)
2187 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2189 bool needs_base;
2190 const bool can_be_constant =
2191 index()->BindsToConstant() &&
2193 index()->BoundConstant(), /*load=*/true, IsUntagged(), class_id(),
2194 index_scale(), &needs_base);
2195 // We don't need to check if [needs_base] is true, since we use TMP as the
2196 // temp register in this case and so don't need to allocate a temp register.
2198 can_be_constant
2199 ? Location::Constant(index()->definition()->AsConstant())
2202 if (rep == kUnboxedInt64) {
2205 } else {
2207 }
2208 } else if (RepresentationUtils::IsUnboxed(rep)) {
2209 if (rep == kUnboxedFloat) {
2210 // Need register < Q7 for float operations.
2211 // TODO(30953): Support register range constraints in the regalloc.
2213 } else {
2215 }
2216 } else {
2218 }
2219 if (!directly_addressable) {
2221 if (rep == kUnboxedFloat || rep == kUnboxedDouble) {
2223 }
2224 }
2225 return locs;
2226}
2227
2228void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2229 auto const rep =
2232 const bool directly_addressable = aligned() && rep != kUnboxedInt64;
2233 // The array register points to the backing store for external arrays.
2234 const Register array = locs()->in(kArrayPos).reg();
2235 const Location index = locs()->in(kIndexPos);
2236 const Register address =
2237 directly_addressable ? kNoRegister : locs()->temp(0).reg();
2238
2239 compiler::Address element_address(kNoRegister);
2240 if (directly_addressable) {
2241 element_address =
2242 index.IsRegister()
2243 ? __ ElementAddressForRegIndex(true, // Load.
2244 IsUntagged(), class_id(),
2245 index_scale(), index_unboxed_, array,
2246 index.reg())
2247 : __ ElementAddressForIntIndex(
2248 true, // Load.
2250 compiler::target::SmiValue(index.constant()),
2251 IP); // Temp register.
2252 // Warning: element_address may use register IP as base.
2253 } else {
2254 if (index.IsRegister()) {
2255 __ LoadElementAddressForRegIndex(address,
2256 true, // Load.
2258 index_unboxed_, array, index.reg());
2259 } else {
2260 __ LoadElementAddressForIntIndex(
2261 address,
2262 true, // Load.
2264 compiler::target::SmiValue(index.constant()));
2265 }
2266 }
2267
2269 if (rep == kUnboxedInt64) {
2270 ASSERT(!directly_addressable); // need to add to register
2271 ASSERT(locs()->out(0).IsPairLocation());
2272 PairLocation* result_pair = locs()->out(0).AsPairLocation();
2273 const Register result_lo = result_pair->At(0).reg();
2274 const Register result_hi = result_pair->At(1).reg();
2275 if (aligned()) {
2276 __ ldr(result_lo, compiler::Address(address));
2277 __ ldr(result_hi,
2278 compiler::Address(address, compiler::target::kWordSize));
2279 } else {
2280 __ LoadWordUnaligned(result_lo, address, TMP);
2281 __ AddImmediate(address, address, compiler::target::kWordSize);
2282 __ LoadWordUnaligned(result_hi, address, TMP);
2283 }
2284 } else {
2285 const Register result = locs()->out(0).reg();
2286 if (aligned()) {
2287 __ Load(result, element_address, RepresentationUtils::OperandSize(rep));
2288 } else {
2289 switch (rep) {
2290 case kUnboxedUint32:
2291 case kUnboxedInt32:
2292 __ LoadWordUnaligned(result, address, TMP);
2293 break;
2294 case kUnboxedUint16:
2295 __ LoadHalfWordUnsignedUnaligned(result, address, TMP);
2296 break;
2297 case kUnboxedInt16:
2298 __ LoadHalfWordUnaligned(result, address, TMP);
2299 break;
2300 default:
2301 UNREACHABLE();
2302 break;
2303 }
2304 }
2305 }
2306 } else if (RepresentationUtils::IsUnboxed(rep)) {
2307 const QRegister result = locs()->out(0).fpu_reg();
2308 const DRegister dresult0 = EvenDRegisterOf(result);
2309 if (rep == kUnboxedFloat) {
2310 // Load single precision float.
2311 // vldrs does not support indexed addressing.
2312 if (aligned()) {
2313 __ vldrs(EvenSRegisterOf(dresult0), element_address);
2314 } else {
2315 const Register value = locs()->temp(1).reg();
2316 __ LoadWordUnaligned(value, address, TMP);
2317 __ vmovsr(EvenSRegisterOf(dresult0), value);
2318 }
2319 } else if (rep == kUnboxedDouble) {
2320 // vldrd does not support indexed addressing.
2321 if (aligned()) {
2322 __ vldrd(dresult0, element_address);
2323 } else {
2324 const Register value = locs()->temp(1).reg();
2325 __ LoadWordUnaligned(value, address, TMP);
2326 __ vmovdr(dresult0, 0, value);
2327 __ AddImmediate(address, address, 4);
2328 __ LoadWordUnaligned(value, address, TMP);
2329 __ vmovdr(dresult0, 1, value);
2330 }
2331 } else {
2332 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2333 rep == kUnboxedFloat64x2);
2334 ASSERT(element_address.Equals(compiler::Address(IP)));
2335 ASSERT(aligned());
2336 __ vldmd(IA, IP, dresult0, 2);
2337 }
2338 } else {
2339 const Register result = locs()->out(0).reg();
2340 ASSERT(rep == kTagged);
2341 ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
2342 (class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
2343 __ ldr(result, element_address);
2344 }
2345}
2346
2347LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
2348 bool opt) const {
2349 // The compiler must optimize any function that includes a StoreIndexed
2350 // instruction that uses typed data cids, since extracting the payload address
2351 // from views is done in a compiler pass after all code motion has happened.
2353
2354 auto const rep =
2356 const bool directly_addressable =
2357 aligned() && rep != kUnboxedInt64 && class_id() != kArrayCid;
2358 const intptr_t kNumInputs = 3;
2359 LocationSummary* locs;
2360
2361 intptr_t kNumTemps = 0;
2362 bool needs_base = false;
2363 const bool can_be_constant =
2364 index()->BindsToConstant() &&
2366 index()->BoundConstant(), /*load=*/false, IsUntagged(), class_id(),
2367 index_scale(), &needs_base);
2368 if (can_be_constant) {
2369 if (!directly_addressable) {
2370 kNumTemps += 2;
2371 } else if (needs_base) {
2372 kNumTemps += 1;
2373 }
2374
2375 locs = new (zone)
2376 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2377
2378 locs->set_in(1, Location::Constant(index()->definition()->AsConstant()));
2379 } else {
2380 if (!directly_addressable) {
2381 kNumTemps += 2;
2382 }
2383
2384 locs = new (zone)
2385 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2386
2388 }
2390 for (intptr_t i = 0; i < kNumTemps; i++) {
2392 }
2393
2395 if (rep == kUnboxedInt64) {
2398 } else if (rep == kUnboxedInt8 || rep == kUnboxedUint8) {
2400 } else {
2402 }
2403 } else if (RepresentationUtils::IsUnboxed(rep)) {
2404 if (rep == kUnboxedFloat) {
2405 // Need low register (< Q7).
2407 } else { // TODO(srdjan): Support Float64 constants.
2409 }
2410 } else if (class_id() == kArrayCid) {
2414 if (ShouldEmitStoreBarrier()) {
2417 }
2418 }
2419
2420 return locs;
2421}
2422
2423void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2424 auto const rep =
2427 const bool directly_addressable =
2428 aligned() && rep != kUnboxedInt64 && class_id() != kArrayCid;
2429
2430 // The array register points to the backing store for external arrays.
2431 const Register array = locs()->in(0).reg();
2432 const Location index = locs()->in(1);
2433 const Register temp =
2434 (locs()->temp_count() > 0) ? locs()->temp(0).reg() : kNoRegister;
2435 const Register temp2 =
2436 (locs()->temp_count() > 1) ? locs()->temp(1).reg() : kNoRegister;
2437
2438 compiler::Address element_address(kNoRegister);
2439 if (directly_addressable) {
2440 element_address =
2441 index.IsRegister()
2442 ? __ ElementAddressForRegIndex(false, // Store.
2443 IsUntagged(), class_id(),
2444 index_scale(), index_unboxed_, array,
2445 index.reg())
2446 : __ ElementAddressForIntIndex(
2447 false, // Store.
2449 compiler::target::SmiValue(index.constant()), temp);
2450 } else {
2451 if (index.IsRegister()) {
2452 __ LoadElementAddressForRegIndex(temp,
2453 false, // Store.
2455 index_unboxed_, array, index.reg());
2456 } else {
2457 __ LoadElementAddressForIntIndex(
2458 temp,
2459 false, // Store.
2461 compiler::target::SmiValue(index.constant()));
2462 }
2463 }
2464
2466 ASSERT(rep == kUnboxedUint8);
2467 if (locs()->in(2).IsConstant()) {
2468 intptr_t value = compiler::target::SmiValue(locs()->in(2).constant());
2469 // Clamp to 0x0 or 0xFF respectively.
2470 if (value > 0xFF) {
2471 value = 0xFF;
2472 } else if (value < 0) {
2473 value = 0;
2474 }
2475 __ LoadImmediate(IP, static_cast<int8_t>(value));
2476 __ strb(IP, element_address);
2477 } else {
2478 const Register value = locs()->in(2).reg();
2479 // Clamp to 0x00 or 0xFF respectively.
2480 __ LoadImmediate(IP, 0xFF);
2481 // Compare Smi value and smi 0xFF.
2482 __ cmp(value, compiler::Operand(IP));
2483 // IP = value <= 0xFF ? 0 : 0xFF.
2484 __ mov(IP, compiler::Operand(0), LE);
2485 // IP = value in range ? value : IP.
2486 __ mov(IP, compiler::Operand(value), LS);
2487 __ strb(IP, element_address);
2488 }
2489 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
2490 if (rep == kUnboxedInt64) {
2491 ASSERT(!directly_addressable); // need to add to register
2492 ASSERT(locs()->in(2).IsPairLocation());
2493 PairLocation* value_pair = locs()->in(2).AsPairLocation();
2494 Register value_lo = value_pair->At(0).reg();
2495 Register value_hi = value_pair->At(1).reg();
2496 if (aligned()) {
2497 __ str(value_lo, compiler::Address(temp));
2498 __ str(value_hi, compiler::Address(temp, compiler::target::kWordSize));
2499 } else {
2500 __ StoreWordUnaligned(value_lo, temp, temp2);
2501 __ AddImmediate(temp, temp, compiler::target::kWordSize);
2502 __ StoreWordUnaligned(value_hi, temp, temp2);
2503 }
2504 } else if (rep == kUnboxedInt8 || rep == kUnboxedUint8) {
2505 if (locs()->in(2).IsConstant()) {
2506 __ LoadImmediate(IP,
2507 compiler::target::SmiValue(locs()->in(2).constant()));
2508 __ strb(IP, element_address);
2509 } else {
2510 const Register value = locs()->in(2).reg();
2511 __ strb(value, element_address);
2512 }
2513 } else {
2514 const Register value = locs()->in(2).reg();
2515 if (aligned()) {
2516 __ Store(value, element_address, RepresentationUtils::OperandSize(rep));
2517 } else {
2518 switch (rep) {
2519 case kUnboxedUint32:
2520 case kUnboxedInt32:
2521 __ StoreWordUnaligned(value, temp, temp2);
2522 break;
2523 case kUnboxedUint16:
2524 case kUnboxedInt16:
2525 __ StoreHalfWordUnaligned(value, temp, temp2);
2526 break;
2527 default:
2528 UNREACHABLE();
2529 break;
2530 }
2531 }
2532 }
2533 } else if (RepresentationUtils::IsUnboxed(rep)) {
2534 if (rep == kUnboxedFloat) {
2535 const SRegister value_reg =
2536 EvenSRegisterOf(EvenDRegisterOf(locs()->in(2).fpu_reg()));
2537 if (aligned()) {
2538 __ vstrs(value_reg, element_address);
2539 } else {
2540 const Register address = temp;
2541 const Register value = temp2;
2542 __ vmovrs(value, value_reg);
2543 __ StoreWordUnaligned(value, address, TMP);
2544 }
2545 } else if (rep == kUnboxedDouble) {
2546 const DRegister value_reg = EvenDRegisterOf(locs()->in(2).fpu_reg());
2547 if (aligned()) {
2548 __ vstrd(value_reg, element_address);
2549 } else {
2550 const Register address = temp;
2551 const Register value = temp2;
2552 __ vmovrs(value, EvenSRegisterOf(value_reg));
2553 __ StoreWordUnaligned(value, address, TMP);
2554 __ AddImmediate(address, address, 4);
2555 __ vmovrs(value, OddSRegisterOf(value_reg));
2556 __ StoreWordUnaligned(value, address, TMP);
2557 }
2558 } else {
2559 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2560 rep == kUnboxedFloat64x2);
2561 ASSERT(element_address.Equals(compiler::Address(index.reg())));
2562 ASSERT(aligned());
2563 const DRegister value_reg = EvenDRegisterOf(locs()->in(2).fpu_reg());
2564 __ vstmd(IA, index.reg(), value_reg, 2);
2565 }
2566 } else if (class_id() == kArrayCid) {
2567 if (ShouldEmitStoreBarrier()) {
2568 const Register value = locs()->in(2).reg();
2569 __ StoreIntoArray(array, temp, value, CanValueBeSmi());
2570 } else if (locs()->in(2).IsConstant()) {
2571 const Object& constant = locs()->in(2).constant();
2572 __ StoreObjectIntoObjectNoBarrier(array, compiler::Address(temp),
2573 constant);
2574 } else {
2575 const Register value = locs()->in(2).reg();
2576 __ StoreIntoObjectNoBarrier(array, compiler::Address(temp), value);
2577 }
2578 }
2579}
2580
2581LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
2582 bool opt) const {
2583 const intptr_t kNumInputs = 1;
2584
2585 const intptr_t value_cid = value()->Type()->ToCid();
2586 const intptr_t field_cid = field().guarded_cid();
2587
2588 const bool emit_full_guard = !opt || (field_cid == kIllegalCid);
2589
2590 const bool needs_value_cid_temp_reg =
2591 emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid));
2592
2593 const bool needs_field_temp_reg = emit_full_guard;
2594
2595 intptr_t num_temps = 0;
2596 if (needs_value_cid_temp_reg) {
2597 num_temps++;
2598 }
2599 if (needs_field_temp_reg) {
2600 num_temps++;
2601 }
2602
2603 LocationSummary* summary = new (zone)
2604 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
2605 summary->set_in(0, Location::RequiresRegister());
2606
2607 for (intptr_t i = 0; i < num_temps; i++) {
2608 summary->set_temp(i, Location::RequiresRegister());
2609 }
2610
2611 return summary;
2612}
2613
2614void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2616 ASSERT(sizeof(UntaggedField::guarded_cid_) == 4);
2617 ASSERT(sizeof(UntaggedField::is_nullable_) == 4);
2618
2619 const intptr_t value_cid = value()->Type()->ToCid();
2620 const intptr_t field_cid = field().guarded_cid();
2621 const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid;
2622
2623 if (field_cid == kDynamicCid) {
2624 return; // Nothing to emit.
2625 }
2626
2627 const bool emit_full_guard =
2628 !compiler->is_optimizing() || (field_cid == kIllegalCid);
2629
2630 const bool needs_value_cid_temp_reg =
2631 emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid));
2632
2633 const bool needs_field_temp_reg = emit_full_guard;
2634
2635 const Register value_reg = locs()->in(0).reg();
2636
2637 const Register value_cid_reg =
2638 needs_value_cid_temp_reg ? locs()->temp(0).reg() : kNoRegister;
2639
2640 const Register field_reg = needs_field_temp_reg
2641 ? locs()->temp(locs()->temp_count() - 1).reg()
2642 : kNoRegister;
2643
2644 compiler::Label ok, fail_label;
2645
2646 compiler::Label* deopt =
2647 compiler->is_optimizing()
2648 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2649 : nullptr;
2650
2651 compiler::Label* fail = (deopt != nullptr) ? deopt : &fail_label;
2652
2653 if (emit_full_guard) {
2654 __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
2655
2656 compiler::FieldAddress field_cid_operand(
2658 compiler::FieldAddress field_nullability_operand(
2660
2661 if (value_cid == kDynamicCid) {
2662 LoadValueCid(compiler, value_cid_reg, value_reg);
2663 __ ldr(IP, field_cid_operand);
2664 __ cmp(value_cid_reg, compiler::Operand(IP));
2665 __ b(&ok, EQ);
2666 __ ldr(IP, field_nullability_operand);
2667 __ cmp(value_cid_reg, compiler::Operand(IP));
2668 } else if (value_cid == kNullCid) {
2669 __ ldr(value_cid_reg, field_nullability_operand);
2670 __ CompareImmediate(value_cid_reg, value_cid);
2671 } else {
2672 __ ldr(value_cid_reg, field_cid_operand);
2673 __ CompareImmediate(value_cid_reg, value_cid);
2674 }
2675 __ b(&ok, EQ);
2676
2677 // Check if the tracked state of the guarded field can be initialized
2678 // inline. If the field needs length check we fall through to runtime
2679 // which is responsible for computing offset of the length field
2680 // based on the class id.
2681 // Length guard will be emitted separately when needed via GuardFieldLength
2682 // instruction after GuardFieldClass.
2683 if (!field().needs_length_check()) {
2684 // Uninitialized field can be handled inline. Check if the
2685 // field is still unitialized.
2686 __ ldr(IP, field_cid_operand);
2687 __ CompareImmediate(IP, kIllegalCid);
2688 __ b(fail, NE);
2689
2690 if (value_cid == kDynamicCid) {
2691 __ str(value_cid_reg, field_cid_operand);
2692 __ str(value_cid_reg, field_nullability_operand);
2693 } else {
2694 __ LoadImmediate(IP, value_cid);
2695 __ str(IP, field_cid_operand);
2696 __ str(IP, field_nullability_operand);
2697 }
2698
2699 __ b(&ok);
2700 }
2701
2702 if (deopt == nullptr) {
2703 __ Bind(fail);
2704
2705 __ ldr(IP, compiler::FieldAddress(
2707 __ CompareImmediate(IP, kDynamicCid);
2708 __ b(&ok, EQ);
2709
2710 __ Push(field_reg);
2711 __ Push(value_reg);
2712 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2713 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2714 __ Drop(2); // Drop the field and the value.
2715 } else {
2716 __ b(fail);
2717 }
2718 } else {
2719 ASSERT(compiler->is_optimizing());
2720 ASSERT(deopt != nullptr);
2721
2722 // Field guard class has been initialized and is known.
2723 if (value_cid == kDynamicCid) {
2724 // Field's guarded class id is fixed by value's class id is not known.
2725 __ tst(value_reg, compiler::Operand(kSmiTagMask));
2726
2727 if (field_cid != kSmiCid) {
2728 __ b(fail, EQ);
2729 __ LoadClassId(value_cid_reg, value_reg);
2730 __ CompareImmediate(value_cid_reg, field_cid);
2731 }
2732
2733 if (field().is_nullable() && (field_cid != kNullCid)) {
2734 __ b(&ok, EQ);
2735 if (field_cid != kSmiCid) {
2736 __ CompareImmediate(value_cid_reg, kNullCid);
2737 } else {
2738 __ CompareObject(value_reg, Object::null_object());
2739 }
2740 }
2741 __ b(fail, NE);
2742 } else if (value_cid == field_cid) {
2743 // This would normally be caught by Canonicalize, but RemoveRedefinitions
2744 // may sometimes produce the situation after the last Canonicalize pass.
2745 } else {
2746 // Both value's and field's class id is known.
2747 ASSERT(value_cid != nullability);
2748 __ b(fail);
2749 }
2750 }
2751 __ Bind(&ok);
2752}
2753
2754LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone,
2755 bool opt) const {
2756 const intptr_t kNumInputs = 1;
2757 if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2758 const intptr_t kNumTemps = 3;
2759 LocationSummary* summary = new (zone)
2760 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2761 summary->set_in(0, Location::RequiresRegister());
2762 // We need temporaries for field object, length offset and expected length.
2763 summary->set_temp(0, Location::RequiresRegister());
2764 summary->set_temp(1, Location::RequiresRegister());
2765 summary->set_temp(2, Location::RequiresRegister());
2766 return summary;
2767 } else {
2768 // TODO(vegorov): can use TMP when length is small enough to fit into
2769 // immediate.
2770 const intptr_t kNumTemps = 1;
2771 LocationSummary* summary = new (zone)
2772 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2773 summary->set_in(0, Location::RequiresRegister());
2774 summary->set_temp(0, Location::RequiresRegister());
2775 return summary;
2776 }
2777 UNREACHABLE();
2778}
2779
2780void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2781 if (field().guarded_list_length() == Field::kNoFixedLength) {
2782 return; // Nothing to emit.
2783 }
2784
2785 compiler::Label* deopt =
2786 compiler->is_optimizing()
2787 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2788 : nullptr;
2789
2790 const Register value_reg = locs()->in(0).reg();
2791
2792 if (!compiler->is_optimizing() ||
2793 (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2794 const Register field_reg = locs()->temp(0).reg();
2795 const Register offset_reg = locs()->temp(1).reg();
2796 const Register length_reg = locs()->temp(2).reg();
2797
2798 compiler::Label ok;
2799
2800 __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
2801
2802 __ ldrsb(offset_reg,
2803 compiler::FieldAddress(
2804 field_reg, compiler::target::Field::
2805 guarded_list_length_in_object_offset_offset()));
2806 __ ldr(
2807 length_reg,
2808 compiler::FieldAddress(
2810
2811 __ tst(offset_reg, compiler::Operand(offset_reg));
2812 __ b(&ok, MI);
2813
2814 // Load the length from the value. GuardFieldClass already verified that
2815 // value's class matches guarded class id of the field.
2816 // offset_reg contains offset already corrected by -kHeapObjectTag that is
2817 // why we use Address instead of FieldAddress.
2818 __ ldr(IP, compiler::Address(value_reg, offset_reg));
2819 __ cmp(length_reg, compiler::Operand(IP));
2820
2821 if (deopt == nullptr) {
2822 __ b(&ok, EQ);
2823
2824 __ Push(field_reg);
2825 __ Push(value_reg);
2826 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2827 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2828 __ Drop(2); // Drop the field and the value.
2829 } else {
2830 __ b(deopt, NE);
2831 }
2832
2833 __ Bind(&ok);
2834 } else {
2835 ASSERT(compiler->is_optimizing());
2836 ASSERT(field().guarded_list_length() >= 0);
2837 ASSERT(field().guarded_list_length_in_object_offset() !=
2839
2840 const Register length_reg = locs()->temp(0).reg();
2841
2842 __ ldr(length_reg,
2843 compiler::FieldAddress(
2844 value_reg, field().guarded_list_length_in_object_offset()));
2845 __ CompareImmediate(
2846 length_reg, compiler::target::ToRawSmi(field().guarded_list_length()));
2847 __ b(deopt, NE);
2848 }
2849}
2850
2851DEFINE_UNIMPLEMENTED_INSTRUCTION(GuardFieldTypeInstr)
2852
2853LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone,
2854 bool opt) const {
2855 const bool might_box = (representation() == kTagged) && !can_pack_into_smi();
2856 const intptr_t kNumInputs = 2;
2857 const intptr_t kNumTemps = might_box ? 2 : 0;
2858 LocationSummary* summary = new (zone) LocationSummary(
2859 zone, kNumInputs, kNumTemps,
2861 summary->set_in(0, Location::RequiresRegister());
2862 summary->set_in(1, Location::RequiresRegister());
2863
2864 if (might_box) {
2865 summary->set_temp(0, Location::RequiresRegister());
2866 summary->set_temp(1, Location::RequiresRegister());
2867 }
2868
2869 if (representation() == kUnboxedInt64) {
2870 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
2872 } else {
2873 ASSERT(representation() == kTagged);
2874 summary->set_out(0, Location::RequiresRegister());
2875 }
2876
2877 return summary;
2878}
2879
2880void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2881 // The string register points to the backing store for external strings.
2882 const Register str = locs()->in(0).reg();
2883 const Location index = locs()->in(1);
2884
2885 compiler::Address element_address = __ ElementAddressForRegIndex(
2886 true, IsExternal(), class_id(), index_scale(), /*index_unboxed=*/false,
2887 str, index.reg());
2888 // Warning: element_address may use register IP as base.
2889
2890 if (representation() == kUnboxedInt64) {
2891 ASSERT(compiler->is_optimizing());
2892 ASSERT(locs()->out(0).IsPairLocation());
2893 PairLocation* result_pair = locs()->out(0).AsPairLocation();
2894 Register result1 = result_pair->At(0).reg();
2895 Register result2 = result_pair->At(1).reg();
2896 switch (class_id()) {
2897 case kOneByteStringCid:
2898 ASSERT(element_count() == 4);
2899 __ ldr(result1, element_address);
2900 __ eor(result2, result2, compiler::Operand(result2));
2901 break;
2902 case kTwoByteStringCid:
2903 ASSERT(element_count() == 2);
2904 __ ldr(result1, element_address);
2905 __ eor(result2, result2, compiler::Operand(result2));
2906 break;
2907 default:
2908 UNREACHABLE();
2909 }
2910 } else {
2911 ASSERT(representation() == kTagged);
2912 Register result = locs()->out(0).reg();
2913 switch (class_id()) {
2914 case kOneByteStringCid:
2915 switch (element_count()) {
2916 case 1:
2917 __ ldrb(result, element_address);
2918 break;
2919 case 2:
2920 __ ldrh(result, element_address);
2921 break;
2922 case 4:
2923 __ ldr(result, element_address);
2924 break;
2925 default:
2926 UNREACHABLE();
2927 }
2928 break;
2929 case kTwoByteStringCid:
2930 switch (element_count()) {
2931 case 1:
2932 __ ldrh(result, element_address);
2933 break;
2934 case 2:
2935 __ ldr(result, element_address);
2936 break;
2937 default:
2938 UNREACHABLE();
2939 }
2940 break;
2941 default:
2942 UNREACHABLE();
2943 break;
2944 }
2945 if (can_pack_into_smi()) {
2946 __ SmiTag(result);
2947 } else {
2948 // If the value cannot fit in a smi then allocate a mint box for it.
2949 Register value = locs()->temp(0).reg();
2950 Register temp = locs()->temp(1).reg();
2951 // Value register needs to be manually preserved on allocation slow-path.
2952 locs()->live_registers()->Add(locs()->temp(0), kUnboxedInt32);
2953
2954 ASSERT(result != value);
2955 __ MoveRegister(value, result);
2956 __ SmiTag(result);
2957
2958 compiler::Label done;
2959 __ TestImmediate(value, 0xC0000000);
2960 __ b(&done, EQ);
2962 result, temp);
2963 __ eor(temp, temp, compiler::Operand(temp));
2964 __ StoreFieldToOffset(value, result,
2966 __ StoreFieldToOffset(
2967 temp, result,
2969 __ Bind(&done);
2970 }
2971 }
2972}
2973
2974LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
2975 bool opt) const {
2976 const intptr_t kNumInputs = 1;
2977 const intptr_t kNumTemps = 1;
2978 LocationSummary* locs = new (zone)
2979 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2982 return locs;
2983}
2984
2985void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2986 const Register value = locs()->in(0).reg();
2987 const Register temp = locs()->temp(0).reg();
2988
2989 compiler->used_static_fields().Add(&field());
2990
2991 __ LoadFromOffset(
2992 temp, THR,
2993 field().is_shared()
2996
2997 // Note: static fields ids won't be changed by hot-reload.
2998 __ StoreToOffset(value, temp,
3000}
3001
3002LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone,
3003 bool opt) const {
3004 const intptr_t kNumInputs = 3;
3005 const intptr_t kNumTemps = 0;
3006 LocationSummary* summary = new (zone)
3007 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3009 summary->set_in(1, Location::RegisterLocation(
3011 summary->set_in(
3013 summary->set_out(0, Location::RegisterLocation(R0));
3014 return summary;
3015}
3016
3017void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3018 ASSERT(locs()->in(0).reg() == TypeTestABI::kInstanceReg);
3021
3022 compiler->GenerateInstanceOf(source(), deopt_id(), env(), type(), locs());
3023 ASSERT(locs()->out(0).reg() == R0);
3024}
3025
3026LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone,
3027 bool opt) const {
3028 const intptr_t kNumInputs = 2;
3029 const intptr_t kNumTemps = 0;
3030 LocationSummary* locs = new (zone)
3031 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3037 return locs;
3038}
3039
3040// Inlines array allocation for known constant values.
3041static void InlineArrayAllocation(FlowGraphCompiler* compiler,
3042 intptr_t num_elements,
3043 compiler::Label* slow_path,
3044 compiler::Label* done) {
3045 const int kInlineArraySize = 12; // Same as kInlineInstanceSize.
3046 const intptr_t instance_size = Array::InstanceSize(num_elements);
3047
3048 __ TryAllocateArray(kArrayCid, instance_size, slow_path,
3049 AllocateArrayABI::kResultReg, // instance
3050 R3, // end address
3051 R8, R6);
3052 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
3053 // R3: new object end address.
3054
3055 // Store the type argument field.
3056 __ StoreIntoObjectNoBarrier(
3058 compiler::FieldAddress(AllocateArrayABI::kResultReg,
3061
3062 // Set the length field.
3063 __ StoreIntoObjectNoBarrier(
3065 compiler::FieldAddress(AllocateArrayABI::kResultReg,
3068
3069 // Initialize all array elements to raw_null.
3070 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
3071 // R3: new object end address.
3072 // R6: iterator which initially points to the start of the variable
3073 // data area to be initialized.
3074 // R8: null
3075 if (num_elements > 0) {
3076 const intptr_t array_size = instance_size - sizeof(UntaggedArray);
3077 __ LoadObject(R8, Object::null_object());
3078 if (num_elements >= 2) {
3079 __ mov(R9, compiler::Operand(R8));
3080 } else {
3081#if defined(DEBUG)
3082 // Clobber R9 with an invalid pointer.
3083 __ LoadImmediate(R9, 0x1);
3084#endif // DEBUG
3085 }
3086 __ AddImmediate(R6, AllocateArrayABI::kResultReg,
3087 sizeof(UntaggedArray) - kHeapObjectTag);
3088 if (array_size < (kInlineArraySize * compiler::target::kWordSize)) {
3089 __ InitializeFieldsNoBarrierUnrolled(
3091 num_elements * compiler::target::kWordSize, R8, R9);
3092 } else {
3093 __ InitializeFieldsNoBarrier(AllocateArrayABI::kResultReg, R6, R3, R8,
3094 R9);
3095 }
3096 }
3097 __ b(done);
3098}
3099
3100void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3101 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
3102 if (type_usage_info != nullptr) {
3103 const Class& list_class =
3104 Class::Handle(compiler->isolate_group()->class_table()->At(kArrayCid));
3105 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, list_class,
3106 type_arguments()->definition());
3107 }
3108
3109 compiler::Label slow_path, done;
3110 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
3111 if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
3112 num_elements()->BindsToConstant() &&
3113 compiler::target::IsSmi(num_elements()->BoundConstant())) {
3114 const intptr_t length =
3115 compiler::target::SmiValue(num_elements()->BoundConstant());
3117 InlineArrayAllocation(compiler, length, &slow_path, &done);
3118 }
3119 }
3120 }
3121
3122 __ Bind(&slow_path);
3123 auto object_store = compiler->isolate_group()->object_store();
3124 const auto& allocate_array_stub =
3125 Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
3126 compiler->GenerateStubCall(source(), allocate_array_stub,
3127 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
3128 env());
3129 __ Bind(&done);
3130}
3131
3133 Zone* zone,
3134 bool opt) const {
3135 ASSERT(opt);
3136 const intptr_t kNumInputs = 0;
3137 const intptr_t kNumTemps = 3;
3138 LocationSummary* locs = new (zone) LocationSummary(
3139 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
3144 return locs;
3145}
3146
3147class AllocateContextSlowPath
3148 : public TemplateSlowPathCode<AllocateUninitializedContextInstr> {
3149 public:
3150 explicit AllocateContextSlowPath(
3151 AllocateUninitializedContextInstr* instruction)
3152 : TemplateSlowPathCode(instruction) {}
3153
3154 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
3155 __ Comment("AllocateContextSlowPath");
3156 __ Bind(entry_label());
3157
3158 LocationSummary* locs = instruction()->locs();
3159 locs->live_registers()->Remove(locs->out(0));
3160
3161 compiler->SaveLiveRegisters(locs);
3162
3163 auto slow_path_env = compiler->SlowPathEnvironmentFor(
3164 instruction(), /*num_slow_path_args=*/0);
3165 ASSERT(slow_path_env != nullptr);
3166
3167 auto object_store = compiler->isolate_group()->object_store();
3168 const auto& allocate_context_stub = Code::ZoneHandle(
3169 compiler->zone(), object_store->allocate_context_stub());
3170 __ LoadImmediate(R1, instruction()->num_context_variables());
3171 compiler->GenerateStubCall(instruction()->source(), allocate_context_stub,
3172 UntaggedPcDescriptors::kOther, locs,
3173 instruction()->deopt_id(), slow_path_env);
3174 ASSERT(instruction()->locs()->out(0).reg() == R0);
3175 compiler->RestoreLiveRegisters(instruction()->locs());
3176 __ b(exit_label());
3177 }
3178};
3179
3181 FlowGraphCompiler* compiler) {
3182 Register temp0 = locs()->temp(0).reg();
3183 Register temp1 = locs()->temp(1).reg();
3184 Register temp2 = locs()->temp(2).reg();
3185 Register result = locs()->out(0).reg();
3186 // Try allocate the object.
3187 AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this);
3188 compiler->AddSlowPathCode(slow_path);
3189 intptr_t instance_size = Context::InstanceSize(num_context_variables());
3190
3191 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
3192 __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
3193 result, // instance
3194 temp0, temp1, temp2);
3195
3196 // Setup up number of context variables field.
3197 __ LoadImmediate(temp0, num_context_variables());
3198 __ str(temp0,
3199 compiler::FieldAddress(
3201 } else {
3202 __ Jump(slow_path->entry_label());
3203 }
3204
3205 __ Bind(slow_path->exit_label());
3206}
3207
3208LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone,
3209 bool opt) const {
3210 const intptr_t kNumInputs = 0;
3211 const intptr_t kNumTemps = 1;
3212 LocationSummary* locs = new (zone)
3213 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3216 return locs;
3217}
3218
3219void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3220 ASSERT(locs()->temp(0).reg() == R1);
3221 ASSERT(locs()->out(0).reg() == R0);
3222
3223 auto object_store = compiler->isolate_group()->object_store();
3224 const auto& allocate_context_stub =
3225 Code::ZoneHandle(compiler->zone(), object_store->allocate_context_stub());
3226 __ LoadImmediate(R1, num_context_variables());
3227 compiler->GenerateStubCall(source(), allocate_context_stub,
3228 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
3229 env());
3230}
3231
3232LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
3233 bool opt) const {
3234 const intptr_t kNumInputs = 1;
3235 const intptr_t kNumTemps = 0;
3236 LocationSummary* locs = new (zone)
3237 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
3240 return locs;
3241}
3242
3243void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3244 ASSERT(locs()->in(0).reg() == R4);
3245 ASSERT(locs()->out(0).reg() == R0);
3246
3247 auto object_store = compiler->isolate_group()->object_store();
3248 const auto& clone_context_stub =
3249 Code::ZoneHandle(compiler->zone(), object_store->clone_context_stub());
3250 compiler->GenerateStubCall(source(), clone_context_stub,
3251 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
3252 deopt_id(), env());
3253}
3254
3255LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
3256 bool opt) const {
3257 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall);
3258}
3259
3260void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3261 __ Bind(compiler->GetJumpLabel(this));
3262 compiler->AddExceptionHandler(this);
3263 if (HasParallelMove()) {
3265 }
3266
3267 // Restore SP from FP as we are coming from a throw and the code for
3268 // popping arguments has not been run.
3269 const intptr_t fp_sp_dist =
3271 compiler->StackSize()) *
3273 ASSERT(fp_sp_dist <= 0);
3274 __ AddImmediate(SP, FP, fp_sp_dist);
3275
3276 if (!compiler->is_optimizing()) {
3277 if (raw_exception_var_ != nullptr) {
3278 __ StoreToOffset(
3281 }
3282 if (raw_stacktrace_var_ != nullptr) {
3283 __ StoreToOffset(
3286 }
3287 }
3288}
3289
3290LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone,
3291 bool opt) const {
3292 const intptr_t kNumInputs = 0;
3293 const intptr_t kNumTemps = 2;
3294 const bool using_shared_stub = UseSharedSlowPathStub(opt);
3295 LocationSummary* summary = new (zone)
3296 LocationSummary(zone, kNumInputs, kNumTemps,
3297 using_shared_stub ? LocationSummary::kCallOnSharedSlowPath
3299 summary->set_temp(0, Location::RequiresRegister());
3300 summary->set_temp(1, Location::RequiresRegister());
3301 return summary;
3302}
3303
3304class CheckStackOverflowSlowPath
3305 : public TemplateSlowPathCode<CheckStackOverflowInstr> {
3306 public:
3307 static constexpr intptr_t kNumSlowPathArgs = 0;
3308
3309 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
3310 : TemplateSlowPathCode(instruction) {}
3311
3312 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
3313 if (compiler->isolate_group()->use_osr() && osr_entry_label()->IsLinked()) {
3314 const Register value = instruction()->locs()->temp(0).reg();
3315 __ Comment("CheckStackOverflowSlowPathOsr");
3316 __ Bind(osr_entry_label());
3317 __ LoadImmediate(value, Thread::kOsrRequest);
3318 __ str(value,
3319 compiler::Address(
3321 }
3322 __ Comment("CheckStackOverflowSlowPath");
3323 __ Bind(entry_label());
3324 const bool using_shared_stub =
3325 instruction()->locs()->call_on_shared_slow_path();
3326 if (!using_shared_stub) {
3327 compiler->SaveLiveRegisters(instruction()->locs());
3328 }
3329 // pending_deoptimization_env_ is needed to generate a runtime call that
3330 // may throw an exception.
3331 ASSERT(compiler->pending_deoptimization_env_ == nullptr);
3332 Environment* env =
3333 compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
3334 compiler->pending_deoptimization_env_ = env;
3335
3336 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
3337 if (using_shared_stub) {
3338 if (!has_frame) {
3339 ASSERT(__ constant_pool_allowed());
3340 __ set_constant_pool_allowed(false);
3341 __ EnterDartFrame(0);
3342 }
3343 const uword entry_point_offset = compiler::target::Thread::
3345 instruction()->locs()->live_registers()->FpuRegisterCount() > 0);
3346 __ Call(compiler::Address(THR, entry_point_offset));
3347 compiler->RecordSafepoint(instruction()->locs(), kNumSlowPathArgs);
3348 compiler->RecordCatchEntryMoves(env);
3349 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOther,
3350 instruction()->deopt_id(),
3351 instruction()->source());
3352 if (!has_frame) {
3353 __ LeaveDartFrame();
3354 __ set_constant_pool_allowed(true);
3355 }
3356 } else {
3357 ASSERT(has_frame);
3358 __ CallRuntime(kInterruptOrStackOverflowRuntimeEntry, kNumSlowPathArgs);
3359 compiler->EmitCallsiteMetadata(
3360 instruction()->source(), instruction()->deopt_id(),
3361 UntaggedPcDescriptors::kOther, instruction()->locs(), env);
3362 }
3363
3364 if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
3365 instruction()->in_loop()) {
3366 // In unoptimized code, record loop stack checks as possible OSR entries.
3367 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
3368 instruction()->deopt_id(),
3369 InstructionSource());
3370 }
3371 compiler->pending_deoptimization_env_ = nullptr;
3372 if (!using_shared_stub) {
3373 compiler->RestoreLiveRegisters(instruction()->locs());
3374 }
3375 __ b(exit_label());
3376 }
3377
3378 compiler::Label* osr_entry_label() {
3379 ASSERT(IsolateGroup::Current()->use_osr());
3380 return &osr_entry_label_;
3381 }
3382
3383 private:
3384 compiler::Label osr_entry_label_;
3385};
3386
3387void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3388 __ ldr(IP, compiler::Address(THR,
3390 __ cmp(SP, compiler::Operand(IP));
3391
3392 auto object_store = compiler->isolate_group()->object_store();
3393 const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
3394 const auto& stub = Code::ZoneHandle(
3395 compiler->zone(),
3396 live_fpu_regs
3397 ? object_store->stack_overflow_stub_with_fpu_regs_stub()
3398 : object_store->stack_overflow_stub_without_fpu_regs_stub());
3399 const bool using_shared_stub = locs()->call_on_shared_slow_path();
3400 if (using_shared_stub && compiler->CanPcRelativeCall(stub) &&
3401 compiler->flow_graph().graph_entry()->NeedsFrame()) {
3402 __ GenerateUnRelocatedPcRelativeCall(LS);
3403 compiler->AddPcRelativeCallStubTarget(stub);
3404
3405 // We use the "extended" environment which has the locations updated to
3406 // reflect live registers being saved in the shared spilling stubs (see
3407 // the stub above).
3408 auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
3409 compiler->EmitCallsiteMetadata(source(), deopt_id(),
3410 UntaggedPcDescriptors::kOther, locs(),
3411 extended_env);
3412 return;
3413 }
3414
3415 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this);
3416 compiler->AddSlowPathCode(slow_path);
3417 __ b(slow_path->entry_label(), LS);
3418 if (compiler->CanOSRFunction() && in_loop()) {
3419 const Register function = locs()->temp(0).reg();
3420 const Register count = locs()->temp(1).reg();
3421 // In unoptimized code check the usage counter to trigger OSR at loop
3422 // stack checks. Use progressively higher thresholds for more deeply
3423 // nested loops to attempt to hit outer loops with OSR when possible.
3424 __ LoadObject(function, compiler->parsed_function().function());
3425 const intptr_t configured_optimization_counter_threshold =
3426 compiler->thread()->isolate_group()->optimization_counter_threshold();
3427 const int32_t threshold =
3428 configured_optimization_counter_threshold * (loop_depth() + 1);
3429 __ ldr(count,
3430 compiler::FieldAddress(
3432 __ add(count, count, compiler::Operand(1));
3433 __ str(count,
3434 compiler::FieldAddress(
3436 __ CompareImmediate(count, threshold);
3437 __ b(slow_path->osr_entry_label(), GE);
3438 }
3439 if (compiler->ForceSlowPathForStackOverflow()) {
3440 __ b(slow_path->entry_label());
3441 }
3442 __ Bind(slow_path->exit_label());
3443}
3444
3445static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
3446 BinarySmiOpInstr* shift_left) {
3447 const LocationSummary& locs = *shift_left->locs();
3448 const Register left = locs.in(0).reg();
3449 const Register result = locs.out(0).reg();
3450 compiler::Label* deopt =
3451 shift_left->CanDeoptimize()
3452 ? compiler->AddDeoptStub(shift_left->deopt_id(),
3453 ICData::kDeoptBinarySmiOp)
3454 : nullptr;
3455 if (locs.in(1).IsConstant()) {
3456 const Object& constant = locs.in(1).constant();
3458 // Immediate shift operation takes 5 bits for the count.
3459 const intptr_t kCountLimit = 0x1F;
3460 const intptr_t value = compiler::target::SmiValue(constant);
3461 ASSERT((0 < value) && (value < kCountLimit));
3462 if (shift_left->can_overflow()) {
3463 // Check for overflow (preserve left).
3464 __ Lsl(IP, left, compiler::Operand(value));
3465 __ cmp(left, compiler::Operand(IP, ASR, value));
3466 __ b(deopt, NE); // Overflow.
3467 }
3468 // Shift for result now we know there is no overflow.
3469 __ Lsl(result, left, compiler::Operand(value));
3470 return;
3471 }
3472
3473 // Right (locs.in(1)) is not constant.
3474 const Register right = locs.in(1).reg();
3475 Range* right_range = shift_left->right_range();
3476 if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
3477 // TODO(srdjan): Implement code below for is_truncating().
3478 // If left is constant, we know the maximal allowed size for right.
3479 const Object& obj = shift_left->left()->BoundConstant();
3480 if (compiler::target::IsSmi(obj)) {
3481 const intptr_t left_int = compiler::target::SmiValue(obj);
3482 if (left_int == 0) {
3483 __ cmp(right, compiler::Operand(0));
3484 __ b(deopt, MI);
3485 __ mov(result, compiler::Operand(0));
3486 return;
3487 }
3488 const intptr_t max_right =
3490 const bool right_needs_check =
3491 !RangeUtils::IsWithin(right_range, 0, max_right - 1);
3492 if (right_needs_check) {
3493 __ cmp(right, compiler::Operand(compiler::target::ToRawSmi(max_right)));
3494 __ b(deopt, CS);
3495 }
3496 __ SmiUntag(IP, right);
3497 __ Lsl(result, left, IP);
3498 }
3499 return;
3500 }
3501
3502 const bool right_needs_check =
3503 !RangeUtils::IsWithin(right_range, 0, (compiler::target::kSmiBits - 1));
3504 if (!shift_left->can_overflow()) {
3505 if (right_needs_check) {
3506 if (!RangeUtils::IsPositive(right_range)) {
3507 ASSERT(shift_left->CanDeoptimize());
3508 __ cmp(right, compiler::Operand(0));
3509 __ b(deopt, MI);
3510 }
3511
3512 __ cmp(right, compiler::Operand(compiler::target::ToRawSmi(
3514 __ mov(result, compiler::Operand(0), CS);
3515 __ SmiUntag(IP, right, CC); // SmiUntag right into IP if CC.
3516 __ Lsl(result, left, IP, CC);
3517 } else {
3518 __ SmiUntag(IP, right);
3519 __ Lsl(result, left, IP);
3520 }
3521 } else {
3522 if (right_needs_check) {
3523 ASSERT(shift_left->CanDeoptimize());
3524 __ cmp(right, compiler::Operand(compiler::target::ToRawSmi(
3526 __ b(deopt, CS);
3527 }
3528 // Left is not a constant.
3529 // Check if count too large for handling it inlined.
3530 __ SmiUntag(IP, right);
3531 // Overflow test (preserve left, right, and IP);
3532 const Register temp = locs.temp(0).reg();
3533 __ Lsl(temp, left, IP);
3534 __ cmp(left, compiler::Operand(temp, ASR, IP));
3535 __ b(deopt, NE); // Overflow.
3536 // Shift for result now we know there is no overflow.
3537 __ Lsl(result, left, IP);
3538 }
3539}
3540
3541LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Zone* zone,
3542 bool opt) const {
3543 const intptr_t kNumInputs = 2;
3544 // Calculate number of temporaries.
3545 intptr_t num_temps = 0;
3546 if (op_kind() == Token::kTRUNCDIV) {
3548 num_temps = 1;
3549 } else {
3550 num_temps = 2;
3551 }
3552 } else if (op_kind() == Token::kMOD) {
3553 num_temps = 2;
3554 } else if (((op_kind() == Token::kSHL) && can_overflow()) ||
3555 (op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR)) {
3556 num_temps = 1;
3557 }
3558 LocationSummary* summary = new (zone)
3559 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
3560 if (op_kind() == Token::kTRUNCDIV) {
3561 summary->set_in(0, Location::RequiresRegister());
3563 ConstantInstr* right_constant = right()->definition()->AsConstant();
3564 summary->set_in(1, Location::Constant(right_constant));
3565 summary->set_temp(0, Location::RequiresRegister());
3566 } else {
3567 summary->set_in(1, Location::RequiresRegister());
3568 summary->set_temp(0, Location::RequiresRegister());
3569 // Request register that overlaps with S0..S31.
3570 summary->set_temp(1, Location::FpuRegisterLocation(Q0));
3571 }
3572 summary->set_out(0, Location::RequiresRegister());
3573 return summary;
3574 }
3575 if (op_kind() == Token::kMOD) {
3576 summary->set_in(0, Location::RequiresRegister());
3577 summary->set_in(1, Location::RequiresRegister());
3578 summary->set_temp(0, Location::RequiresRegister());
3579 // Request register that overlaps with S0..S31.
3580 summary->set_temp(1, Location::FpuRegisterLocation(Q0));
3581 summary->set_out(0, Location::RequiresRegister());
3582 return summary;
3583 }
3584 summary->set_in(0, Location::RequiresRegister());
3585 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
3586 if (((op_kind() == Token::kSHL) && can_overflow()) ||
3587 (op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR)) {
3588 summary->set_temp(0, Location::RequiresRegister());
3589 }
3590 // We make use of 3-operand instructions by not requiring result register
3591 // to be identical to first input register as on Intel.
3592 summary->set_out(0, Location::RequiresRegister());
3593 return summary;
3594}
3595
3596void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3597 if (op_kind() == Token::kSHL) {
3598 EmitSmiShiftLeft(compiler, this);
3599 return;
3600 }
3601
3602 const Register left = locs()->in(0).reg();
3603 const Register result = locs()->out(0).reg();
3604 compiler::Label* deopt = nullptr;
3605 if (CanDeoptimize()) {
3606 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
3607 }
3608
3609 if (locs()->in(1).IsConstant()) {
3610 const Object& constant = locs()->in(1).constant();
3612 const int32_t imm = compiler::target::ToRawSmi(constant);
3613 switch (op_kind()) {
3614 case Token::kADD: {
3615 if (deopt == nullptr) {
3616 __ AddImmediate(result, left, imm);
3617 } else {
3618 __ AddImmediateSetFlags(result, left, imm);
3619 __ b(deopt, VS);
3620 }
3621 break;
3622 }
3623 case Token::kSUB: {
3624 if (deopt == nullptr) {
3625 __ AddImmediate(result, left, -imm);
3626 } else {
3627 // Negating imm and using AddImmediateSetFlags would not detect the
3628 // overflow when imm == kMinInt32.
3629 __ SubImmediateSetFlags(result, left, imm);
3630 __ b(deopt, VS);
3631 }
3632 break;
3633 }
3634 case Token::kMUL: {
3635 // Keep left value tagged and untag right value.
3636 const intptr_t value = compiler::target::SmiValue(constant);
3637 if (deopt == nullptr) {
3638 __ LoadImmediate(IP, value);
3639 __ mul(result, left, IP);
3640 } else {
3641 __ LoadImmediate(IP, value);
3642 __ smull(result, IP, left, IP);
3643 // IP: result bits 32..63.
3644 __ cmp(IP, compiler::Operand(result, ASR, 31));
3645 __ b(deopt, NE);
3646 }
3647 break;
3648 }
3649 case Token::kTRUNCDIV: {
3650 const intptr_t value = compiler::target::SmiValue(constant);
3653 const intptr_t shift_count =
3655 ASSERT(kSmiTagSize == 1);
3656 __ mov(IP, compiler::Operand(left, ASR, 31));
3657 ASSERT(shift_count > 1); // 1, -1 case handled above.
3658 const Register temp = locs()->temp(0).reg();
3659 __ add(temp, left, compiler::Operand(IP, LSR, 32 - shift_count));
3660 ASSERT(shift_count > 0);
3661 __ mov(result, compiler::Operand(temp, ASR, shift_count));
3662 if (value < 0) {
3663 __ rsb(result, result, compiler::Operand(0));
3664 }
3665 __ SmiTag(result);
3666 break;
3667 }
3668 case Token::kBIT_AND: {
3669 // No overflow check.
3670 compiler::Operand o;
3671 if (compiler::Operand::CanHold(imm, &o)) {
3672 __ and_(result, left, o);
3673 } else if (compiler::Operand::CanHold(~imm, &o)) {
3674 __ bic(result, left, o);
3675 } else {
3676 __ LoadImmediate(IP, imm);
3677 __ and_(result, left, compiler::Operand(IP));
3678 }
3679 break;
3680 }
3681 case Token::kBIT_OR: {
3682 // No overflow check.
3683 compiler::Operand o;
3684 if (compiler::Operand::CanHold(imm, &o)) {
3685 __ orr(result, left, o);
3686 } else {
3687 __ LoadImmediate(IP, imm);
3688 __ orr(result, left, compiler::Operand(IP));
3689 }
3690 break;
3691 }
3692 case Token::kBIT_XOR: {
3693 // No overflow check.
3694 compiler::Operand o;
3695 if (compiler::Operand::CanHold(imm, &o)) {
3696 __ eor(result, left, o);
3697 } else {
3698 __ LoadImmediate(IP, imm);
3699 __ eor(result, left, compiler::Operand(IP));
3700 }
3701 break;
3702 }
3703 case Token::kSHR: {
3704 // sarl operation masks the count to 5 bits.
3705 const intptr_t kCountLimit = 0x1F;
3706 intptr_t value = compiler::target::SmiValue(constant);
3707 __ Asr(result, left,
3708 compiler::Operand(
3709 Utils::Minimum(value + kSmiTagSize, kCountLimit)));
3710 __ SmiTag(result);
3711 break;
3712 }
3713 case Token::kUSHR: {
3714 const intptr_t value = compiler::target::SmiValue(constant);
3715 ASSERT((value > 0) && (value < 64));
3717 // 64-bit representation of left operand value:
3718 //
3719 // ss...sssss s s xxxxxxxxxxxxx
3720 // | | | | | |
3721 // 63 32 31 30 kSmiBits-1 0
3722 //
3723 // Where 's' is a sign bit.
3724 //
3725 // If left operand is negative (sign bit is set), then
3726 // result will fit into Smi range if and only if
3727 // the shift amount >= 64 - kSmiBits.
3728 //
3729 // If left operand is non-negative, the result always
3730 // fits into Smi range.
3731 //
3732 if (value < (64 - compiler::target::kSmiBits)) {
3733 if (deopt != nullptr) {
3734 __ CompareImmediate(left, 0);
3735 __ b(deopt, LT);
3736 } else {
3737 // Operation cannot overflow only if left value is always
3738 // non-negative.
3739 ASSERT(!can_overflow());
3740 }
3741 // At this point left operand is non-negative, so unsigned shift
3742 // can't overflow.
3744 __ LoadImmediate(result, 0);
3745 } else {
3746 __ Lsr(result, left, compiler::Operand(value + kSmiTagSize));
3747 __ SmiTag(result);
3748 }
3749 } else {
3750 // Shift amount > 32, and the result is guaranteed to fit into Smi.
3751 // Low (Smi) part of the left operand is shifted out.
3752 // High part is filled with sign bits.
3753 __ Asr(result, left, compiler::Operand(31));
3754 __ Lsr(result, result, compiler::Operand(value - 32));
3755 __ SmiTag(result);
3756 }
3757 break;
3758 }
3759
3760 default:
3761 UNREACHABLE();
3762 break;
3763 }
3764 return;
3765 }
3766
3767 const Register right = locs()->in(1).reg();
3768 switch (op_kind()) {
3769 case Token::kADD: {
3770 if (deopt == nullptr) {
3771 __ add(result, left, compiler::Operand(right));
3772 } else {
3773 __ adds(result, left, compiler::Operand(right));
3774 __ b(deopt, VS);
3775 }
3776 break;
3777 }
3778 case Token::kSUB: {
3779 if (deopt == nullptr) {
3780 __ sub(result, left, compiler::Operand(right));
3781 } else {
3782 __ subs(result, left, compiler::Operand(right));
3783 __ b(deopt, VS);
3784 }
3785 break;
3786 }
3787 case Token::kMUL: {
3788 __ SmiUntag(IP, left);
3789 if (deopt == nullptr) {
3790 __ mul(result, IP, right);
3791 } else {
3792 __ smull(result, IP, IP, right);
3793 // IP: result bits 32..63.
3794 __ cmp(IP, compiler::Operand(result, ASR, 31));
3795 __ b(deopt, NE);
3796 }
3797 break;
3798 }
3799 case Token::kBIT_AND: {
3800 // No overflow check.
3801 __ and_(result, left, compiler::Operand(right));
3802 break;
3803 }
3804 case Token::kBIT_OR: {
3805 // No overflow check.
3806 __ orr(result, left, compiler::Operand(right));
3807 break;
3808 }
3809 case Token::kBIT_XOR: {
3810 // No overflow check.
3811 __ eor(result, left, compiler::Operand(right));
3812 break;
3813 }
3814 case Token::kTRUNCDIV: {
3816 // Handle divide by zero in runtime.
3817 __ cmp(right, compiler::Operand(0));
3818 __ b(deopt, EQ);
3819 }
3820 const Register temp = locs()->temp(0).reg();
3821 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
3822 __ SmiUntag(temp, left);
3823 __ SmiUntag(IP, right);
3824 __ IntegerDivide(result, temp, IP, dtemp, DTMP);
3825
3826 if (RangeUtils::Overlaps(right_range(), -1, -1)) {
3827 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
3828 // case we cannot tag the result.
3829 __ CompareImmediate(result, 0x40000000);
3830 __ b(deopt, EQ);
3831 }
3832 __ SmiTag(result);
3833 break;
3834 }
3835 case Token::kMOD: {
3837 // Handle divide by zero in runtime.
3838 __ cmp(right, compiler::Operand(0));
3839 __ b(deopt, EQ);
3840 }
3841 const Register temp = locs()->temp(0).reg();
3842 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
3843 __ SmiUntag(temp, left);
3844 __ SmiUntag(IP, right);
3845 __ IntegerDivide(result, temp, IP, dtemp, DTMP);
3846 __ SmiUntag(IP, right);
3847 __ mls(result, IP, result, temp); // result <- left - right * result
3848 __ SmiTag(result);
3849 // res = left % right;
3850 // if (res < 0) {
3851 // if (right < 0) {
3852 // res = res - right;
3853 // } else {
3854 // res = res + right;
3855 // }
3856 // }
3857 compiler::Label done;
3858 __ cmp(result, compiler::Operand(0));
3859 __ b(&done, GE);
3860 // Result is negative, adjust it.
3861 __ cmp(right, compiler::Operand(0));
3862 __ sub(result, result, compiler::Operand(right), LT);
3863 __ add(result, result, compiler::Operand(right), GE);
3864 __ Bind(&done);
3865 break;
3866 }
3867 case Token::kSHR: {
3868 if (CanDeoptimize()) {
3869 __ CompareImmediate(right, 0);
3870 __ b(deopt, LT);
3871 }
3872 __ SmiUntag(IP, right);
3873 // sarl operation masks the count to 5 bits.
3874 const intptr_t kCountLimit = 0x1F;
3875 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
3876 __ CompareImmediate(IP, kCountLimit);
3877 __ LoadImmediate(IP, kCountLimit, GT);
3878 }
3879 const Register temp = locs()->temp(0).reg();
3880 __ SmiUntag(temp, left);
3881 __ Asr(result, temp, IP);
3882 __ SmiTag(result);
3883 break;
3884 }
3885 case Token::kUSHR: {
3886 compiler::Label done;
3887 __ SmiUntag(IP, right);
3888 // 64-bit representation of left operand value:
3889 //
3890 // ss...sssss s s xxxxxxxxxxxxx
3891 // | | | | | |
3892 // 63 32 31 30 kSmiBits-1 0
3893 //
3894 // Where 's' is a sign bit.
3895 //
3896 // If left operand is negative (sign bit is set), then
3897 // result will fit into Smi range if and only if
3898 // the shift amount >= 64 - kSmiBits.
3899 //
3900 // If left operand is non-negative, the result always
3901 // fits into Smi range.
3902 //
3906 kBitsPerInt64 - 1)) {
3907 __ CompareImmediate(IP, kBitsPerInt64);
3908 // If shift amount >= 64, then result is 0.
3909 __ LoadImmediate(result, 0, GE);
3910 __ b(&done, GE);
3911 }
3912 __ CompareImmediate(IP, 64 - compiler::target::kSmiBits);
3913 // Shift amount >= 64 - kSmiBits > 32, but < 64.
3914 // Result is guaranteed to fit into Smi range.
3915 // Low (Smi) part of the left operand is shifted out.
3916 // High part is filled with sign bits.
3917 __ sub(IP, IP, compiler::Operand(32), GE);
3918 __ Asr(result, left, compiler::Operand(31), GE);
3919 __ Lsr(result, result, IP, GE);
3920 __ SmiTag(result, GE);
3921 __ b(&done, GE);
3922 }
3923 // Shift amount < 64 - kSmiBits.
3924 // If left is negative, then result will not fit into Smi range.
3925 // Also deopt in case of negative shift amount.
3926 if (deopt != nullptr) {
3927 __ tst(left, compiler::Operand(left));
3928 __ tst(right, compiler::Operand(right), PL);
3929 __ b(deopt, MI);
3930 } else {
3931 ASSERT(!can_overflow());
3932 }
3933 // At this point left operand is non-negative, so unsigned shift
3934 // can't overflow.
3937 __ CompareImmediate(IP, compiler::target::kSmiBits);
3938 // Left operand >= 0, shift amount >= kSmiBits. Result is 0.
3939 __ LoadImmediate(result, 0, GE);
3940 __ b(&done, GE);
3941 }
3942 // Left operand >= 0, shift amount < kSmiBits < 32.
3943 const Register temp = locs()->temp(0).reg();
3944 __ SmiUntag(temp, left);
3945 __ Lsr(result, temp, IP);
3946 __ SmiTag(result);
3947 __ Bind(&done);
3948 break;
3949 }
3950 case Token::kDIV: {
3951 // Dispatches to 'Double./'.
3952 // TODO(srdjan): Implement as conversion to double and double division.
3953 UNREACHABLE();
3954 break;
3955 }
3956 case Token::kOR:
3957 case Token::kAND: {
3958 // Flow graph builder has dissected this operation to guarantee correct
3959 // behavior (short-circuit evaluation).
3960 UNREACHABLE();
3961 break;
3962 }
3963 default:
3964 UNREACHABLE();
3965 break;
3966 }
3967}
3968
3969static void EmitInt32ShiftLeft(FlowGraphCompiler* compiler,
3970 BinaryInt32OpInstr* shift_left) {
3971 const LocationSummary& locs = *shift_left->locs();
3972 const Register left = locs.in(0).reg();
3973 const Register result = locs.out(0).reg();
3974 compiler::Label* deopt =
3975 shift_left->CanDeoptimize()
3976 ? compiler->AddDeoptStub(shift_left->deopt_id(),
3977 ICData::kDeoptBinarySmiOp)
3978 : nullptr;
3979 ASSERT(locs.in(1).IsConstant());
3980 const Object& constant = locs.in(1).constant();
3982 // Immediate shift operation takes 5 bits for the count.
3983 const intptr_t kCountLimit = 0x1F;
3984 const intptr_t value = compiler::target::SmiValue(constant);
3985 ASSERT((0 < value) && (value < kCountLimit));
3986 if (shift_left->can_overflow()) {
3987 // Check for overflow (preserve left).
3988 __ Lsl(IP, left, compiler::Operand(value));
3989 __ cmp(left, compiler::Operand(IP, ASR, value));
3990 __ b(deopt, NE); // Overflow.
3991 }
3992 // Shift for result now we know there is no overflow.
3993 __ Lsl(result, left, compiler::Operand(value));
3994}
3995
3996LocationSummary* BinaryInt32OpInstr::MakeLocationSummary(Zone* zone,
3997 bool opt) const {
3998 const intptr_t kNumInputs = 2;
3999 // Calculate number of temporaries.
4000 intptr_t num_temps = 0;
4001 if (((op_kind() == Token::kSHL) && can_overflow()) ||
4002 (op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR)) {
4003 num_temps = 1;
4004 }
4005 LocationSummary* summary = new (zone)
4006 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
4007 summary->set_in(0, Location::RequiresRegister());
4008 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
4009 if (((op_kind() == Token::kSHL) && can_overflow()) ||
4010 (op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR)) {
4011 summary->set_temp(0, Location::RequiresRegister());
4012 }
4013 // We make use of 3-operand instructions by not requiring result register
4014 // to be identical to first input register as on Intel.
4015 summary->set_out(0, Location::RequiresRegister());
4016 return summary;
4017}
4018
4019void BinaryInt32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4020 if (op_kind() == Token::kSHL) {
4021 EmitInt32ShiftLeft(compiler, this);
4022 return;
4023 }
4024
4025 const Register left = locs()->in(0).reg();
4026 const Register result = locs()->out(0).reg();
4027 compiler::Label* deopt = nullptr;
4028 if (CanDeoptimize()) {
4029 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
4030 }
4031
4032 if (locs()->in(1).IsConstant()) {
4033 const Object& constant = locs()->in(1).constant();
4035 const intptr_t value = compiler::target::SmiValue(constant);
4036 switch (op_kind()) {
4037 case Token::kADD: {
4038 if (deopt == nullptr) {
4039 __ AddImmediate(result, left, value);
4040 } else {
4041 __ AddImmediateSetFlags(result, left, value);
4042 __ b(deopt, VS);
4043 }
4044 break;
4045 }
4046 case Token::kSUB: {
4047 if (deopt == nullptr) {
4048 __ AddImmediate(result, left, -value);
4049 } else {
4050 // Negating value and using AddImmediateSetFlags would not detect the
4051 // overflow when value == kMinInt32.
4052 __ SubImmediateSetFlags(result, left, value);
4053 __ b(deopt, VS);
4054 }
4055 break;
4056 }
4057 case Token::kMUL: {
4058 if (deopt == nullptr) {
4059 __ LoadImmediate(IP, value);
4060 __ mul(result, left, IP);
4061 } else {
4062 __ LoadImmediate(IP, value);
4063 __ smull(result, IP, left, IP);
4064 // IP: result bits 32..63.
4065 __ cmp(IP, compiler::Operand(result, ASR, 31));
4066 __ b(deopt, NE);
4067 }
4068 break;
4069 }
4070 case Token::kBIT_AND: {
4071 // No overflow check.
4072 compiler::Operand o;
4074 __ and_(result, left, o);
4075 } else if (compiler::Operand::CanHold(~value, &o)) {
4076 __ bic(result, left, o);
4077 } else {
4078 __ LoadImmediate(IP, value);
4079 __ and_(result, left, compiler::Operand(IP));
4080 }
4081 break;
4082 }
4083 case Token::kBIT_OR: {
4084 // No overflow check.
4085 compiler::Operand o;
4087 __ orr(result, left, o);
4088 } else {
4089 __ LoadImmediate(IP, value);
4090 __ orr(result, left, compiler::Operand(IP));
4091 }
4092 break;
4093 }
4094 case Token::kBIT_XOR: {
4095 // No overflow check.
4096 compiler::Operand o;
4098 __ eor(result, left, o);
4099 } else {
4100 __ LoadImmediate(IP, value);
4101 __ eor(result, left, compiler::Operand(IP));
4102 }
4103 break;
4104 }
4105 case Token::kSHR: {
4106 // sarl operation masks the count to 5 bits.
4107 const intptr_t kCountLimit = 0x1F;
4108 __ Asr(result, left,
4109 compiler::Operand(Utils::Minimum(value, kCountLimit)));
4110 break;
4111 }
4112 case Token::kUSHR: {
4113 ASSERT((value > 0) && (value < 64));
4114 // 64-bit representation of left operand value:
4115 //
4116 // ss...sssss s xxxxxxxxxxxxx
4117 // | | | | |
4118 // 63 32 31 30 0
4119 //
4120 // Where 's' is a sign bit.
4121 //
4122 // If left operand is negative (sign bit is set), then
4123 // result will fit into Int32 range if and only if
4124 // the shift amount > 32.
4125 //
4126 if (value <= 32) {
4127 if (deopt != nullptr) {
4128 __ tst(left, compiler::Operand(left));
4129 __ b(deopt, MI);
4130 } else {
4131 // Operation cannot overflow only if left value is always
4132 // non-negative.
4133 ASSERT(!can_overflow());
4134 }
4135 // At this point left operand is non-negative, so unsigned shift
4136 // can't overflow.
4137 if (value == 32) {
4138 __ LoadImmediate(result, 0);
4139 } else {
4140 __ Lsr(result, left, compiler::Operand(value));
4141 }
4142 } else {
4143 // Shift amount > 32.
4144 // Low (Int32) part of the left operand is shifted out.
4145 // Shift high part which is filled with sign bits.
4146 __ Asr(result, left, compiler::Operand(31));
4147 __ Lsr(result, result, compiler::Operand(value - 32));
4148 }
4149 break;
4150 }
4151
4152 default:
4153 UNREACHABLE();
4154 break;
4155 }
4156 return;
4157 }
4158
4159 const Register right = locs()->in(1).reg();
4160 switch (op_kind()) {
4161 case Token::kADD: {
4162 if (deopt == nullptr) {
4163 __ add(result, left, compiler::Operand(right));
4164 } else {
4165 __ adds(result, left, compiler::Operand(right));
4166 __ b(deopt, VS);
4167 }
4168 break;
4169 }
4170 case Token::kSUB: {
4171 if (deopt == nullptr) {
4172 __ sub(result, left, compiler::Operand(right));
4173 } else {
4174 __ subs(result, left, compiler::Operand(right));
4175 __ b(deopt, VS);
4176 }
4177 break;
4178 }
4179 case Token::kMUL: {
4180 if (deopt == nullptr) {
4181 __ mul(result, left, right);
4182 } else {
4183 __ smull(result, IP, left, right);
4184 // IP: result bits 32..63.
4185 __ cmp(IP, compiler::Operand(result, ASR, 31));
4186 __ b(deopt, NE);
4187 }
4188 break;
4189 }
4190 case Token::kBIT_AND: {
4191 // No overflow check.
4192 __ and_(result, left, compiler::Operand(right));
4193 break;
4194 }
4195 case Token::kBIT_OR: {
4196 // No overflow check.
4197 __ orr(result, left, compiler::Operand(right));
4198 break;
4199 }
4200 case Token::kBIT_XOR: {
4201 // No overflow check.
4202 __ eor(result, left, compiler::Operand(right));
4203 break;
4204 }
4205 default:
4206 UNREACHABLE();
4207 break;
4208 }
4209}
4210
4211LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone,
4212 bool opt) const {
4213 intptr_t left_cid = left()->Type()->ToCid();
4214 intptr_t right_cid = right()->Type()->ToCid();
4215 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid));
4216 const intptr_t kNumInputs = 2;
4217 const intptr_t kNumTemps = 0;
4218 LocationSummary* summary = new (zone)
4219 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4220 summary->set_in(0, Location::RequiresRegister());
4221 summary->set_in(1, Location::RequiresRegister());
4222 return summary;
4223}
4224
4225void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4226 compiler::Label* deopt =
4227 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryDoubleOp);
4228 intptr_t left_cid = left()->Type()->ToCid();
4229 intptr_t right_cid = right()->Type()->ToCid();
4230 const Register left = locs()->in(0).reg();
4231 const Register right = locs()->in(1).reg();
4232 if (this->left()->definition() == this->right()->definition()) {
4233 __ tst(left, compiler::Operand(kSmiTagMask));
4234 } else if (left_cid == kSmiCid) {
4235 __ tst(right, compiler::Operand(kSmiTagMask));
4236 } else if (right_cid == kSmiCid) {
4237 __ tst(left, compiler::Operand(kSmiTagMask));
4238 } else {
4239 __ orr(IP, left, compiler::Operand(right));
4240 __ tst(IP, compiler::Operand(kSmiTagMask));
4241 }
4242 __ b(deopt, EQ);
4243}
4244
4245LocationSummary* BoxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
4246 const intptr_t kNumInputs = 1;
4247 const intptr_t kNumTemps = 1;
4248 LocationSummary* summary = new (zone) LocationSummary(
4249 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
4251 summary->set_temp(0, Location::RequiresRegister());
4252 summary->set_out(0, Location::RequiresRegister());
4253 return summary;
4254}
4255
4256void BoxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4257 const Register out_reg = locs()->out(0).reg();
4258 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
4259
4261 compiler->BoxClassFor(from_representation()),
4262 out_reg, locs()->temp(0).reg());
4263
4264 switch (from_representation()) {
4265 case kUnboxedDouble:
4266 __ StoreDToOffset(value, out_reg, ValueOffset() - kHeapObjectTag);
4267 break;
4268 case kUnboxedFloat:
4269 __ vcvtds(DTMP, EvenSRegisterOf(value));
4270 __ StoreDToOffset(EvenDRegisterOf(FpuTMP), out_reg,
4271 ValueOffset() - kHeapObjectTag);
4272 break;
4273 case kUnboxedFloat32x4:
4274 case kUnboxedFloat64x2:
4275 case kUnboxedInt32x4:
4276 __ StoreMultipleDToOffset(value, 2, out_reg,
4277 ValueOffset() - kHeapObjectTag);
4278 break;
4279 default:
4280 UNREACHABLE();
4281 break;
4282 }
4283}
4284
4285LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
4286 ASSERT(BoxCid() != kSmiCid);
4287 const bool needs_temp = CanDeoptimize();
4288 const intptr_t kNumInputs = 1;
4289 const intptr_t kNumTemps = needs_temp ? 1 : 0;
4290 LocationSummary* summary = new (zone)
4291 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4292 summary->set_in(0, Location::RequiresRegister());
4293 if (needs_temp) {
4294 summary->set_temp(0, Location::RequiresRegister());
4295 }
4296 if (representation() == kUnboxedInt64) {
4297 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
4299 } else if (representation() == kUnboxedInt32) {
4300 summary->set_out(0, Location::RequiresRegister());
4301 } else if (representation() == kUnboxedFloat) {
4302 // Low (< Q7) Q registers are needed for the vcvtds and vmovs instructions.
4303 // TODO(30953): Support register range constraints in the regalloc.
4304 summary->set_out(0, Location::FpuRegisterLocation(Q6));
4305 } else {
4306 summary->set_out(0, Location::RequiresFpuRegister());
4307 }
4308 return summary;
4309}
4310
4311void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler* compiler) {
4312 const Register box = locs()->in(0).reg();
4313
4314 switch (representation()) {
4315 case kUnboxedInt64: {
4316 PairLocation* result = locs()->out(0).AsPairLocation();
4317 ASSERT(result->At(0).reg() != box);
4318 __ LoadFieldFromOffset(result->At(0).reg(), box, ValueOffset());
4319 __ LoadFieldFromOffset(result->At(1).reg(), box,
4320 ValueOffset() + compiler::target::kWordSize);
4321 break;
4322 }
4323
4324 case kUnboxedDouble: {
4325 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
4326 __ LoadDFromOffset(result, box, ValueOffset() - kHeapObjectTag);
4327 break;
4328 }
4329
4330 case kUnboxedFloat: {
4331 // Should only be <= Q7, because >= Q8 cannot be addressed as S register.
4332 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
4333 __ LoadDFromOffset(result, box, ValueOffset() - kHeapObjectTag);
4334 __ vcvtsd(EvenSRegisterOf(result), result);
4335 break;
4336 }
4337
4338 case kUnboxedFloat32x4:
4339 case kUnboxedFloat64x2:
4340 case kUnboxedInt32x4: {
4341 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
4342 __ LoadMultipleDFromOffset(result, 2, box,
4343 ValueOffset() - kHeapObjectTag);
4344 break;
4345 }
4346
4347 default:
4348 UNREACHABLE();
4349 break;
4350 }
4351}
4352
4353void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) {
4354 const Register box = locs()->in(0).reg();
4355
4356 switch (representation()) {
4357 case kUnboxedInt64: {
4358 PairLocation* result = locs()->out(0).AsPairLocation();
4359 __ SmiUntag(result->At(0).reg(), box);
4360 __ SignFill(result->At(1).reg(), result->At(0).reg());
4361 break;
4362 }
4363
4364 case kUnboxedDouble: {
4365 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
4366 __ SmiUntag(IP, box);
4367 __ vmovdr(DTMP, 0, IP);
4368 __ vcvtdi(result, STMP);
4369 break;
4370 }
4371
4372 default:
4373 UNREACHABLE();
4374 break;
4375 }
4376}
4377
4378void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) {
4379 const Register value = locs()->in(0).reg();
4380 const Register result = locs()->out(0).reg();
4381 __ LoadInt32FromBoxOrSmi(result, value);
4382}
4383
4384void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler) {
4385 const Register box = locs()->in(0).reg();
4386 PairLocation* result = locs()->out(0).AsPairLocation();
4387 ASSERT(result->At(0).reg() != box);
4388 ASSERT(result->At(1).reg() != box);
4389 compiler::Label done;
4390 __ SignFill(result->At(1).reg(), box);
4391 __ SmiUntag(result->At(0).reg(), box, &done);
4392 EmitLoadFromBox(compiler);
4393 __ Bind(&done);
4394}
4395
4396LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
4397 bool opt) const {
4398 ASSERT((from_representation() == kUnboxedInt32) ||
4399 (from_representation() == kUnboxedUint32));
4400 const intptr_t kNumInputs = 1;
4401 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
4402 LocationSummary* summary = new (zone)
4403 LocationSummary(zone, kNumInputs, kNumTemps,
4406 summary->set_in(0, Location::RequiresRegister());
4407 if (!ValueFitsSmi()) {
4408 summary->set_temp(0, Location::RequiresRegister());
4409 }
4410 summary->set_out(0, Location::RequiresRegister());
4411 return summary;
4412}
4413
4414void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4415 Register value = locs()->in(0).reg();
4416 Register out = locs()->out(0).reg();
4417 ASSERT(value != out);
4418
4419 __ SmiTag(out, value);
4420 if (!ValueFitsSmi()) {
4421 Register temp = locs()->temp(0).reg();
4422 compiler::Label done;
4423 if (from_representation() == kUnboxedInt32) {
4424 __ cmp(value, compiler::Operand(out, ASR, 1));
4425 } else {
4426 ASSERT(from_representation() == kUnboxedUint32);
4427 // Note: better to test upper bits instead of comparing with
4428 // kSmiMax as kSmiMax does not fit into immediate operand.
4429 __ TestImmediate(value, 0xC0000000);
4430 }
4431 __ b(&done, EQ);
4433 temp);
4434 if (from_representation() == kUnboxedInt32) {
4435 __ Asr(temp, value,
4436 compiler::Operand(compiler::target::kBitsPerWord - 1));
4437 } else {
4438 ASSERT(from_representation() == kUnboxedUint32);
4439 __ eor(temp, temp, compiler::Operand(temp));
4440 }
4441 __ StoreFieldToOffset(value, out, compiler::target::Mint::value_offset());
4442 __ StoreFieldToOffset(
4443 temp, out,
4445 __ Bind(&done);
4446 }
4447}
4448
4449LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
4450 bool opt) const {
4451 const intptr_t kNumInputs = 1;
4452 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
4453 // Shared slow path is used in BoxInt64Instr::EmitNativeCode in
4454 // precompiled mode and only after VM isolate stubs where
4455 // replaced with isolate-specific stubs.
4456 auto object_store = IsolateGroup::Current()->object_store();
4457 const bool stubs_in_vm_isolate =
4458 object_store->allocate_mint_with_fpu_regs_stub()
4459 ->untag()
4460 ->InVMIsolateHeap() ||
4461 object_store->allocate_mint_without_fpu_regs_stub()
4462 ->untag()
4463 ->InVMIsolateHeap();
4464 const bool shared_slow_path_call =
4465 SlowPathSharingSupported(opt) && !stubs_in_vm_isolate;
4466 LocationSummary* summary = new (zone) LocationSummary(
4467 zone, kNumInputs, kNumTemps,
4468 ValueFitsSmi()
4470 : ((shared_slow_path_call ? LocationSummary::kCallOnSharedSlowPath
4472 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
4474 if (ValueFitsSmi()) {
4475 summary->set_out(0, Location::RequiresRegister());
4476 } else if (shared_slow_path_call) {
4477 summary->set_out(0,
4480 } else {
4481 summary->set_out(0, Location::RequiresRegister());
4482 summary->set_temp(0, Location::RequiresRegister());
4483 }
4484 return summary;
4485}
4486
4487void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4488 if (ValueFitsSmi()) {
4489 PairLocation* value_pair = locs()->in(0).AsPairLocation();
4490 Register value_lo = value_pair->At(0).reg();
4491 Register out_reg = locs()->out(0).reg();
4492 __ SmiTag(out_reg, value_lo);
4493 return;
4494 }
4495
4496 PairLocation* value_pair = locs()->in(0).AsPairLocation();
4497 Register value_lo = value_pair->At(0).reg();
4498 Register value_hi = value_pair->At(1).reg();
4499 Register tmp = locs()->temp(0).reg();
4500 Register out_reg = locs()->out(0).reg();
4501
4502 compiler::Label done;
4503 __ SmiTag(out_reg, value_lo);
4504 __ cmp(value_lo, compiler::Operand(out_reg, ASR, kSmiTagSize));
4505 __ cmp(value_hi, compiler::Operand(out_reg, ASR, 31), EQ);
4506 __ b(&done, EQ);
4507
4508 if (compiler->intrinsic_mode()) {
4509 __ TryAllocate(compiler->mint_class(),
4510 compiler->intrinsic_slow_path_label(),
4511 compiler::Assembler::kNearJump, out_reg, tmp);
4512 } else if (locs()->call_on_shared_slow_path()) {
4513 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
4514 if (!has_frame) {
4515 ASSERT(__ constant_pool_allowed());
4516 __ set_constant_pool_allowed(false);
4517 __ EnterDartFrame(0);
4518 }
4519 auto object_store = compiler->isolate_group()->object_store();
4520 const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
4521 const auto& stub = Code::ZoneHandle(
4522 compiler->zone(),
4523 live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub()
4524 : object_store->allocate_mint_without_fpu_regs_stub());
4525
4526 ASSERT(!locs()->live_registers()->ContainsRegister(
4528 auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
4529 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
4530 locs(), DeoptId::kNone, extended_env);
4531 if (!has_frame) {
4532 __ LeaveDartFrame();
4533 __ set_constant_pool_allowed(true);
4534 }
4535 } else {
4537 out_reg, tmp);
4538 }
4539
4540 __ StoreFieldToOffset(value_lo, out_reg,
4542 __ StoreFieldToOffset(
4543 value_hi, out_reg,
4545 __ Bind(&done);
4546}
4547
4548static void LoadInt32FromMint(FlowGraphCompiler* compiler,
4549 Register mint,
4551 Register temp,
4552 compiler::Label* deopt) {
4553 __ LoadFieldFromOffset(result, mint, compiler::target::Mint::value_offset());
4554 if (deopt != nullptr) {
4555 __ LoadFieldFromOffset(
4556 temp, mint,
4558 __ cmp(temp,
4559 compiler::Operand(result, ASR, compiler::target::kBitsPerWord - 1));
4560 __ b(deopt, NE);
4561 }
4562}
4563
4564LocationSummary* UnboxInteger32Instr::MakeLocationSummary(Zone* zone,
4565 bool opt) const {
4566 ASSERT((representation() == kUnboxedInt32) ||
4567 (representation() == kUnboxedUint32));
4568 ASSERT((representation() != kUnboxedUint32) || is_truncating());
4569 const intptr_t kNumInputs = 1;
4570 const intptr_t kNumTemps = CanDeoptimize() ? 1 : 0;
4571 LocationSummary* summary = new (zone)
4572 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4573 summary->set_in(0, Location::RequiresRegister());
4574 if (kNumTemps > 0) {
4575 summary->set_temp(0, Location::RequiresRegister());
4576 }
4577 summary->set_out(0, Location::RequiresRegister());
4578 return summary;
4579}
4580
4581void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4582 const intptr_t value_cid = value()->Type()->ToCid();
4583 const Register value = locs()->in(0).reg();
4584 const Register out = locs()->out(0).reg();
4585 const Register temp = CanDeoptimize() ? locs()->temp(0).reg() : kNoRegister;
4586 compiler::Label* deopt =
4588 ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger)
4589 : nullptr;
4590 compiler::Label* out_of_range = !is_truncating() ? deopt : nullptr;
4591 ASSERT(value != out);
4592
4593 if (value_cid == kSmiCid) {
4594 __ SmiUntag(out, value);
4595 } else if (value_cid == kMintCid) {
4596 LoadInt32FromMint(compiler, value, out, temp, out_of_range);
4597 } else if (!CanDeoptimize()) {
4598 compiler::Label done;
4599 __ SmiUntag(out, value, &done);
4600 LoadInt32FromMint(compiler, value, out, kNoRegister, nullptr);
4601 __ Bind(&done);
4602 } else {
4603 compiler::Label done;
4604 __ SmiUntag(out, value, &done);
4605 __ CompareClassId(value, kMintCid, temp);
4606 __ b(deopt, NE);
4607 LoadInt32FromMint(compiler, value, out, temp, out_of_range);
4608 __ Bind(&done);
4609 }
4610}
4611
4612LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
4613 bool opt) const {
4614 const intptr_t kNumInputs = 2;
4615 const intptr_t kNumTemps = 0;
4616 LocationSummary* summary = new (zone)
4617 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4619 summary->set_in(1, Location::RequiresFpuRegister());
4620 summary->set_out(0, Location::RequiresFpuRegister());
4621 return summary;
4622}
4623
4624void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4625 const DRegister left = EvenDRegisterOf(locs()->in(0).fpu_reg());
4626 const DRegister right = EvenDRegisterOf(locs()->in(1).fpu_reg());
4627 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
4628 switch (op_kind()) {
4629 case Token::kADD:
4630 __ vaddd(result, left, right);
4631 break;
4632 case Token::kSUB:
4633 __ vsubd(result, left, right);
4634 break;
4635 case Token::kMUL:
4636 __ vmuld(result, left, right);
4637 break;
4638 case Token::kDIV:
4639 __ vdivd(result, left, right);
4640 break;
4641 default:
4642 UNREACHABLE();
4643 }
4644}
4645
4646LocationSummary* DoubleTestOpInstr::MakeLocationSummary(Zone* zone,
4647 bool opt) const {
4648 const bool needs_temp = op_kind() != MethodRecognizer::kDouble_getIsNaN;
4649 const intptr_t kNumInputs = 1;
4650 const intptr_t kNumTemps = needs_temp ? 1 : 0;
4651 LocationSummary* summary = new (zone)
4652 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4653 summary->set_in(0, Location::RequiresFpuRegister());
4654 if (needs_temp) {
4655 summary->set_temp(0, Location::RequiresRegister());
4656 }
4657 summary->set_out(0, Location::RequiresRegister());
4658 return summary;
4659}
4660
4661Condition DoubleTestOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
4662 BranchLabels labels) {
4663 ASSERT(compiler->is_optimizing());
4664 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
4665 const bool is_negated = kind() != Token::kEQ;
4666
4667 switch (op_kind()) {
4668 case MethodRecognizer::kDouble_getIsNaN: {
4669 __ vcmpd(value, value);
4670 __ vmstat();
4671 return is_negated ? VC : VS;
4672 }
4673 case MethodRecognizer::kDouble_getIsInfinite: {
4674 const Register temp = locs()->temp(0).reg();
4675 compiler::Label done;
4676 // TMP <- value[0:31], result <- value[32:63]
4677 __ vmovrrd(TMP, temp, value);
4678 __ cmp(TMP, compiler::Operand(0));
4679 __ b(is_negated ? labels.true_label : labels.false_label, NE);
4680
4681 // Mask off the sign bit.
4682 __ AndImmediate(temp, temp, 0x7FFFFFFF);
4683 // Compare with +infinity.
4684 __ CompareImmediate(temp, 0x7FF00000);
4685 return is_negated ? NE : EQ;
4686 }
4687 case MethodRecognizer::kDouble_getIsNegative: {
4688 const Register temp = locs()->temp(0).reg();
4689 __ vcmpdz(value);
4690 __ vmstat();
4691 // If it's NaN, it's not negative.
4692 __ b(is_negated ? labels.true_label : labels.false_label, VS);
4693 // Check for negative zero with a signed comparison.
4694 __ vmovrrd(TMP, temp, value, ZERO);
4695 __ cmp(temp, compiler::Operand(0), ZERO);
4696 return is_negated ? GE : LT;
4697 }
4698 default:
4699 UNREACHABLE();
4700 }
4701}
4702
4703// SIMD
4704
4705#define DEFINE_EMIT(Name, Args) \
4706 static void Emit##Name(FlowGraphCompiler* compiler, SimdOpInstr* instr, \
4707 PP_APPLY(PP_UNPACK, Args))
4708
4709DEFINE_EMIT(Simd32x4BinaryOp,
4710 (QRegister result, QRegister left, QRegister right)) {
4711 switch (instr->kind()) {
4712 case SimdOpInstr::kFloat32x4Add:
4713 __ vaddqs(result, left, right);
4714 break;
4715 case SimdOpInstr::kFloat32x4Sub:
4716 __ vsubqs(result, left, right);
4717 break;
4718 case SimdOpInstr::kFloat32x4Mul:
4719 __ vmulqs(result, left, right);
4720 break;
4721 case SimdOpInstr::kFloat32x4Div:
4722 __ Vdivqs(result, left, right);
4723 break;
4724 case SimdOpInstr::kFloat32x4Equal:
4725 __ vceqqs(result, left, right);
4726 break;
4727 case SimdOpInstr::kFloat32x4NotEqual:
4728 __ vceqqs(result, left, right);
4729 // Invert the result.
4730 __ vmvnq(result, result);
4731 break;
4732 case SimdOpInstr::kFloat32x4GreaterThan:
4733 __ vcgtqs(result, left, right);
4734 break;
4735 case SimdOpInstr::kFloat32x4GreaterThanOrEqual:
4736 __ vcgeqs(result, left, right);
4737 break;
4738 case SimdOpInstr::kFloat32x4LessThan:
4739 __ vcgtqs(result, right, left);
4740 break;
4741 case SimdOpInstr::kFloat32x4LessThanOrEqual:
4742 __ vcgeqs(result, right, left);
4743 break;
4744 case SimdOpInstr::kFloat32x4Min:
4745 __ vminqs(result, left, right);
4746 break;
4747 case SimdOpInstr::kFloat32x4Max:
4748 __ vmaxqs(result, left, right);
4749 break;
4750 case SimdOpInstr::kFloat32x4Scale:
4751 __ vcvtsd(STMP, EvenDRegisterOf(left));
4753 __ vmulqs(result, result, right);
4754 break;
4755 case SimdOpInstr::kInt32x4BitAnd:
4756 __ vandq(result, left, right);
4757 break;
4758 case SimdOpInstr::kInt32x4BitOr:
4759 __ vorrq(result, left, right);
4760 break;
4761 case SimdOpInstr::kInt32x4BitXor:
4762 __ veorq(result, left, right);
4763 break;
4764 case SimdOpInstr::kInt32x4Add:
4765 __ vaddqi(compiler::kFourBytes, result, left, right);
4766 break;
4767 case SimdOpInstr::kInt32x4Sub:
4768 __ vsubqi(compiler::kFourBytes, result, left, right);
4769 break;
4770 default:
4771 UNREACHABLE();
4772 }
4773}
4774
4775DEFINE_EMIT(Float64x2BinaryOp,
4776 (QRegisterView result, QRegisterView left, QRegisterView right)) {
4777 switch (instr->kind()) {
4778 case SimdOpInstr::kFloat64x2Add:
4779 __ vaddd(result.d(0), left.d(0), right.d(0));
4780 __ vaddd(result.d(1), left.d(1), right.d(1));
4781 break;
4782 case SimdOpInstr::kFloat64x2Sub:
4783 __ vsubd(result.d(0), left.d(0), right.d(0));
4784 __ vsubd(result.d(1), left.d(1), right.d(1));
4785 break;
4786 case SimdOpInstr::kFloat64x2Mul:
4787 __ vmuld(result.d(0), left.d(0), right.d(0));
4788 __ vmuld(result.d(1), left.d(1), right.d(1));
4789 break;
4790 case SimdOpInstr::kFloat64x2Div:
4791 __ vdivd(result.d(0), left.d(0), right.d(0));
4792 __ vdivd(result.d(1), left.d(1), right.d(1));
4793 break;
4794 default:
4795 UNREACHABLE();
4796 }
4797}
4798
4799// Low (< Q7) Q registers are needed for the vcvtds and vmovs instructions.
4800// TODO(dartbug.com/30953) support register range constraints in the regalloc.
4801DEFINE_EMIT(Simd32x4Shuffle,
4802 (FixedQRegisterView<Q6> result, FixedQRegisterView<Q5> value)) {
4803 // For some cases the vdup instruction requires fewer
4804 // instructions. For arbitrary shuffles, use vtbl.
4805
4806 switch (instr->kind()) {
4807 case SimdOpInstr::kFloat32x4GetX:
4808 __ vcvtds(result.d(0), value.s(0));
4809 break;
4810 case SimdOpInstr::kFloat32x4GetY:
4811 __ vcvtds(result.d(0), value.s(1));
4812 break;
4813 case SimdOpInstr::kFloat32x4GetZ:
4814 __ vcvtds(result.d(0), value.s(2));
4815 break;
4816 case SimdOpInstr::kFloat32x4GetW:
4817 __ vcvtds(result.d(0), value.s(3));
4818 break;
4819 case SimdOpInstr::kInt32x4Shuffle:
4820 case SimdOpInstr::kFloat32x4Shuffle: {
4821 if (instr->mask() == 0x00) {
4822 __ vdup(compiler::kFourBytes, result, value.d(0), 0);
4823 } else if (instr->mask() == 0x55) {
4824 __ vdup(compiler::kFourBytes, result, value.d(0), 1);
4825 } else if (instr->mask() == 0xAA) {
4826 __ vdup(compiler::kFourBytes, result, value.d(1), 0);
4827 } else if (instr->mask() == 0xFF) {
4828 __ vdup(compiler::kFourBytes, result, value.d(1), 1);
4829 } else {
4830 // TODO(zra): Investigate better instruction sequences for other
4831 // shuffle masks.
4832 QRegisterView temp(QTMP);
4833
4834 __ vmovq(temp, value);
4835 for (intptr_t i = 0; i < 4; i++) {
4836 __ vmovs(result.s(i), temp.s((instr->mask() >> (2 * i)) & 0x3));
4837 }
4838 }
4839 break;
4840 }
4841 default:
4842 UNREACHABLE();
4843 }
4844}
4845
4846// TODO(dartbug.com/30953) support register range constraints in the regalloc.
4847DEFINE_EMIT(Simd32x4ShuffleMix,
4848 (FixedQRegisterView<Q6> result,
4849 FixedQRegisterView<Q4> left,
4850 FixedQRegisterView<Q5> right)) {
4851 // TODO(zra): Investigate better instruction sequences for shuffle masks.
4852 __ vmovs(result.s(0), left.s((instr->mask() >> 0) & 0x3));
4853 __ vmovs(result.s(1), left.s((instr->mask() >> 2) & 0x3));
4854 __ vmovs(result.s(2), right.s((instr->mask() >> 4) & 0x3));
4855 __ vmovs(result.s(3), right.s((instr->mask() >> 6) & 0x3));
4856}
4857
4858// TODO(dartbug.com/30953) support register range constraints in the regalloc.
4859DEFINE_EMIT(Simd32x4GetSignMask,
4860 (Register out, FixedQRegisterView<Q5> value, Temp<Register> temp)) {
4861 // X lane.
4862 __ vmovrs(out, value.s(0));
4863 __ Lsr(out, out, compiler::Operand(31));
4864 // Y lane.
4865 __ vmovrs(temp, value.s(1));
4866 __ Lsr(temp, temp, compiler::Operand(31));
4867 __ orr(out, out, compiler::Operand(temp, LSL, 1));
4868 // Z lane.
4869 __ vmovrs(temp, value.s(2));
4870 __ Lsr(temp, temp, compiler::Operand(31));
4871 __ orr(out, out, compiler::Operand(temp, LSL, 2));
4872 // W lane.
4873 __ vmovrs(temp, value.s(3));
4874 __ Lsr(temp, temp, compiler::Operand(31));
4875 __ orr(out, out, compiler::Operand(temp, LSL, 3));
4876}
4877
4878// Low (< 7) Q registers are needed for the vcvtsd instruction.
4879// TODO(dartbug.com/30953) support register range constraints in the regalloc.
4880DEFINE_EMIT(Float32x4FromDoubles,
4881 (FixedQRegisterView<Q6> out,
4882 QRegisterView q0,
4883 QRegisterView q1,
4884 QRegisterView q2,
4885 QRegisterView q3)) {
4886 __ vcvtsd(out.s(0), q0.d(0));
4887 __ vcvtsd(out.s(1), q1.d(0));
4888 __ vcvtsd(out.s(2), q2.d(0));
4889 __ vcvtsd(out.s(3), q3.d(0));
4890}
4891
4892DEFINE_EMIT(Float32x4Zero, (QRegister out)) {
4893 __ veorq(out, out, out);
4894}
4895
4896DEFINE_EMIT(Float32x4Splat, (QRegister result, QRegisterView value)) {
4897 // Convert to Float32.
4898 __ vcvtsd(STMP, value.d(0));
4899
4900 // Splat across all lanes.
4902}
4903
4904DEFINE_EMIT(Float32x4Sqrt,
4905 (QRegister result, QRegister left, Temp<QRegister> temp)) {
4906 __ Vsqrtqs(result, left, temp);
4907}
4908
4909DEFINE_EMIT(Float32x4Unary, (QRegister result, QRegister left)) {
4910 switch (instr->kind()) {
4911 case SimdOpInstr::kFloat32x4Negate:
4912 __ vnegqs(result, left);
4913 break;
4914 case SimdOpInstr::kFloat32x4Abs:
4915 __ vabsqs(result, left);
4916 break;
4917 case SimdOpInstr::kFloat32x4Reciprocal:
4918 __ Vreciprocalqs(result, left);
4919 break;
4920 case SimdOpInstr::kFloat32x4ReciprocalSqrt:
4921 __ VreciprocalSqrtqs(result, left);
4922 break;
4923 default:
4924 UNREACHABLE();
4925 }
4926}
4927
4928DEFINE_EMIT(Simd32x4ToSimd32x4Conversion, (SameAsFirstInput, QRegister left)) {
4929 // TODO(dartbug.com/30949) these operations are essentially nop and should
4930 // not generate any code. They should be removed from the graph before
4931 // code generation.
4932}
4933
4934DEFINE_EMIT(
4935 Float32x4Clamp,
4937 __ vminqs(result, left, upper);
4938 __ vmaxqs(result, result, lower);
4939}
4940
4941DEFINE_EMIT(Float64x2Clamp,
4942 (QRegisterView result,
4943 QRegisterView left,
4944 QRegisterView lower,
4945 QRegisterView upper)) {
4946 compiler::Label done0, done1;
4947 // result = max(min(left, upper), lower) |
4948 // lower if (upper is NaN || left is NaN) |
4949 // upper if lower is NaN
4950 __ vcmpd(left.d(0), upper.d(0));
4951 __ vmstat();
4952 __ vmovd(result.d(0), upper.d(0), GE);
4953 __ vmovd(result.d(0), left.d(0), LT); // less than or unordered(NaN)
4954 __ b(&done0, VS); // at least one argument was NaN
4955 __ vcmpd(result.d(0), lower.d(0));
4956 __ vmstat();
4957 __ vmovd(result.d(0), lower.d(0), LE);
4958 __ Bind(&done0);
4959
4960 __ vcmpd(left.d(1), upper.d(1));
4961 __ vmstat();
4962 __ vmovd(result.d(1), upper.d(1), GE);
4963 __ vmovd(result.d(1), left.d(1), LT); // less than or unordered(NaN)
4964 __ b(&done1, VS); // at least one argument was NaN
4965 __ vcmpd(result.d(1), lower.d(1));
4966 __ vmstat();
4967 __ vmovd(result.d(1), lower.d(1), LE);
4968 __ Bind(&done1);
4969}
4970
4971// Low (< 7) Q registers are needed for the vmovs instruction.
4972// TODO(dartbug.com/30953) support register range constraints in the regalloc.
4973DEFINE_EMIT(Float32x4With,
4974 (FixedQRegisterView<Q6> result,
4975 QRegisterView replacement,
4976 QRegister value)) {
4977 __ vcvtsd(STMP, replacement.d(0));
4978 __ vmovq(result, value);
4979 switch (instr->kind()) {
4980 case SimdOpInstr::kFloat32x4WithX:
4981 __ vmovs(result.s(0), STMP);
4982 break;
4983 case SimdOpInstr::kFloat32x4WithY:
4984 __ vmovs(result.s(1), STMP);
4985 break;
4986 case SimdOpInstr::kFloat32x4WithZ:
4987 __ vmovs(result.s(2), STMP);
4988 break;
4989 case SimdOpInstr::kFloat32x4WithW:
4990 __ vmovs(result.s(3), STMP);
4991 break;
4992 default:
4993 UNREACHABLE();
4994 }
4995}
4996
4997DEFINE_EMIT(Simd64x2Shuffle, (QRegisterView result, QRegisterView value)) {
4998 switch (instr->kind()) {
4999 case SimdOpInstr::kFloat64x2GetX:
5000 __ vmovd(result.d(0), value.d(0));
5001 break;
5002 case SimdOpInstr::kFloat64x2GetY:
5003 __ vmovd(result.d(0), value.d(1));
5004 break;
5005 default:
5006 UNREACHABLE();
5007 }
5008}
5009
5010DEFINE_EMIT(Float64x2Zero, (QRegister q)) {
5011 __ veorq(q, q, q);
5012}
5013
5014DEFINE_EMIT(Float64x2Splat, (QRegisterView result, QRegisterView value)) {
5015 // Splat across all lanes.
5016 __ vmovd(result.d(0), value.d(0));
5017 __ vmovd(result.d(1), value.d(0));
5018}
5019
5020DEFINE_EMIT(Float64x2FromDoubles,
5021 (QRegisterView r, QRegisterView q0, QRegisterView q1)) {
5022 __ vmovd(r.d(0), q0.d(0));
5023 __ vmovd(r.d(1), q1.d(0));
5024}
5025
5026// Low (< 7) Q registers are needed for the vcvtsd instruction.
5027// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5028DEFINE_EMIT(Float64x2ToFloat32x4, (FixedQRegisterView<Q6> r, QRegisterView q)) {
5029 __ veorq(r, r, r);
5030 // Set X lane.
5031 __ vcvtsd(r.s(0), q.d(0));
5032 // Set Y lane.
5033 __ vcvtsd(r.s(1), q.d(1));
5034}
5035
5036// Low (< 7) Q registers are needed for the vcvtds instruction.
5037// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5038DEFINE_EMIT(Float32x4ToFloat64x2, (QRegisterView r, FixedQRegisterView<Q6> q)) {
5039 // Set X.
5040 __ vcvtds(r.d(0), q.s(0));
5041 // Set Y.
5042 __ vcvtds(r.d(1), q.s(1));
5043}
5044
5045// Grabbing the S components means we need a low (< 7) Q.
5046// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5047DEFINE_EMIT(Float64x2GetSignMask,
5048 (Register out, FixedQRegisterView<Q6> value)) {
5049 // Upper 32-bits of X lane.
5050 __ vmovrs(out, value.s(1));
5051 __ Lsr(out, out, compiler::Operand(31));
5052 // Upper 32-bits of Y lane.
5053 __ vmovrs(TMP, value.s(3));
5054 __ Lsr(TMP, TMP, compiler::Operand(31));
5055 __ orr(out, out, compiler::Operand(TMP, LSL, 1));
5056}
5057
5058DEFINE_EMIT(Float64x2Unary, (QRegisterView result, QRegisterView value)) {
5059 switch (instr->kind()) {
5060 case SimdOpInstr::kFloat64x2Negate:
5061 __ vnegd(result.d(0), value.d(0));
5062 __ vnegd(result.d(1), value.d(1));
5063 break;
5064 case SimdOpInstr::kFloat64x2Abs:
5065 __ vabsd(result.d(0), value.d(0));
5066 __ vabsd(result.d(1), value.d(1));
5067 break;
5068 case SimdOpInstr::kFloat64x2Sqrt:
5069 __ vsqrtd(result.d(0), value.d(0));
5070 __ vsqrtd(result.d(1), value.d(1));
5071 break;
5072 default:
5073 UNREACHABLE();
5074 }
5075}
5076
5077DEFINE_EMIT(Float64x2Binary,
5078 (SameAsFirstInput, QRegisterView left, QRegisterView right)) {
5079 switch (instr->kind()) {
5080 case SimdOpInstr::kFloat64x2Scale:
5081 __ vmuld(left.d(0), left.d(0), right.d(0));
5082 __ vmuld(left.d(1), left.d(1), right.d(0));
5083 break;
5084 case SimdOpInstr::kFloat64x2WithX:
5085 __ vmovd(left.d(0), right.d(0));
5086 break;
5087 case SimdOpInstr::kFloat64x2WithY:
5088 __ vmovd(left.d(1), right.d(0));
5089 break;
5090 case SimdOpInstr::kFloat64x2Min: {
5091 // X lane.
5092 __ vcmpd(left.d(0), right.d(0));
5093 __ vmstat();
5094 __ vmovd(left.d(0), right.d(0), GE);
5095 // Y lane.
5096 __ vcmpd(left.d(1), right.d(1));
5097 __ vmstat();
5098 __ vmovd(left.d(1), right.d(1), GE);
5099 break;
5100 }
5101 case SimdOpInstr::kFloat64x2Max: {
5102 // X lane.
5103 __ vcmpd(left.d(0), right.d(0));
5104 __ vmstat();
5105 __ vmovd(left.d(0), right.d(0), LE);
5106 // Y lane.
5107 __ vcmpd(left.d(1), right.d(1));
5108 __ vmstat();
5109 __ vmovd(left.d(1), right.d(1), LE);
5110 break;
5111 }
5112 default:
5113 UNREACHABLE();
5114 }
5115}
5116
5117DEFINE_EMIT(Int32x4FromInts,
5118 (QRegisterView result,
5119 Register v0,
5120 Register v1,
5121 Register v2,
5122 Register v3)) {
5123 __ veorq(result, result, result);
5124 __ vmovdrr(result.d(0), v0, v1);
5125 __ vmovdrr(result.d(1), v2, v3);
5126}
5127
5128DEFINE_EMIT(Int32x4FromBools,
5129 (QRegisterView result,
5130 Register v0,
5131 Register v1,
5132 Register v2,
5133 Register v3,
5134 Temp<Register> temp)) {
5135 __ veorq(result, result, result);
5136 __ LoadImmediate(temp, 0xffffffff);
5137
5138 __ LoadObject(IP, Bool::True());
5139 __ cmp(v0, compiler::Operand(IP));
5140 __ vmovdr(result.d(0), 0, temp, EQ);
5141
5142 __ cmp(v1, compiler::Operand(IP));
5143 __ vmovdr(result.d(0), 1, temp, EQ);
5144
5145 __ cmp(v2, compiler::Operand(IP));
5146 __ vmovdr(result.d(1), 0, temp, EQ);
5147
5148 __ cmp(v3, compiler::Operand(IP));
5149 __ vmovdr(result.d(1), 1, temp, EQ);
5150}
5151
5152// Low (< 7) Q registers are needed for the vmovrs instruction.
5153// TODO(dartbug.com/30953) support register range constraints in the regalloc.
5154DEFINE_EMIT(Int32x4GetFlag, (Register result, FixedQRegisterView<Q6> value)) {
5155 switch (instr->kind()) {
5156 case SimdOpInstr::kInt32x4GetFlagX:
5157 __ vmovrs(result, value.s(0));
5158 break;
5159 case SimdOpInstr::kInt32x4GetFlagY:
5160 __ vmovrs(result, value.s(1));
5161 break;
5162 case SimdOpInstr::kInt32x4GetFlagZ:
5163 __ vmovrs(result, value.s(2));
5164 break;
5165 case SimdOpInstr::kInt32x4GetFlagW:
5166 __ vmovrs(result, value.s(3));
5167 break;
5168 default:
5169 UNREACHABLE();
5170 }
5171
5172 __ tst(result, compiler::Operand(result));
5173 __ LoadObject(result, Bool::True(), NE);
5174 __ LoadObject(result, Bool::False(), EQ);
5175}
5176
5177DEFINE_EMIT(Int32x4Select,
5178 (QRegister out,
5179 QRegister mask,
5180 QRegister trueValue,
5181 QRegister falseValue,
5182 Temp<QRegister> temp)) {
5183 // Copy mask.
5184 __ vmovq(temp, mask);
5185 // Invert it.
5186 __ vmvnq(temp, temp);
5187 // mask = mask & trueValue.
5188 __ vandq(mask, mask, trueValue);
5189 // temp = temp & falseValue.
5190 __ vandq(temp, temp, falseValue);
5191 // out = mask | temp.
5192 __ vorrq(out, mask, temp);
5193}
5194
5195DEFINE_EMIT(Int32x4WithFlag,
5196 (QRegisterView result, QRegister mask, Register flag)) {
5197 __ vmovq(result, mask);
5198 __ CompareObject(flag, Bool::True());
5199 __ LoadImmediate(TMP, 0xffffffff, EQ);
5200 __ LoadImmediate(TMP, 0, NE);
5201 switch (instr->kind()) {
5202 case SimdOpInstr::kInt32x4WithFlagX:
5203 __ vmovdr(result.d(0), 0, TMP);
5204 break;
5205 case SimdOpInstr::kInt32x4WithFlagY:
5206 __ vmovdr(result.d(0), 1, TMP);
5207 break;
5208 case SimdOpInstr::kInt32x4WithFlagZ:
5209 __ vmovdr(result.d(1), 0, TMP);
5210 break;
5211 case SimdOpInstr::kInt32x4WithFlagW:
5212 __ vmovdr(result.d(1), 1, TMP);
5213 break;
5214 default:
5215 UNREACHABLE();
5216 }
5217}
5218
5219// Map SimdOpInstr::Kind-s to corresponding emit functions. Uses the following
5220// format:
5221//
5222// CASE(OpA) CASE(OpB) ____(Emitter) - Emitter is used to emit OpA and OpB.
5223// SIMPLE(OpA) - Emitter with name OpA is used to emit OpA.
5224//
5225#define SIMD_OP_VARIANTS(CASE, ____, SIMPLE) \
5226 CASE(Float32x4Add) \
5227 CASE(Float32x4Sub) \
5228 CASE(Float32x4Mul) \
5229 CASE(Float32x4Div) \
5230 CASE(Float32x4Equal) \
5231 CASE(Float32x4NotEqual) \
5232 CASE(Float32x4GreaterThan) \
5233 CASE(Float32x4GreaterThanOrEqual) \
5234 CASE(Float32x4LessThan) \
5235 CASE(Float32x4LessThanOrEqual) \
5236 CASE(Float32x4Min) \
5237 CASE(Float32x4Max) \
5238 CASE(Float32x4Scale) \
5239 CASE(Int32x4BitAnd) \
5240 CASE(Int32x4BitOr) \
5241 CASE(Int32x4BitXor) \
5242 CASE(Int32x4Add) \
5243 CASE(Int32x4Sub) \
5244 ____(Simd32x4BinaryOp) \
5245 CASE(Float64x2Add) \
5246 CASE(Float64x2Sub) \
5247 CASE(Float64x2Mul) \
5248 CASE(Float64x2Div) \
5249 ____(Float64x2BinaryOp) \
5250 CASE(Float32x4GetX) \
5251 CASE(Float32x4GetY) \
5252 CASE(Float32x4GetZ) \
5253 CASE(Float32x4GetW) \
5254 CASE(Int32x4Shuffle) \
5255 CASE(Float32x4Shuffle) \
5256 ____(Simd32x4Shuffle) \
5257 CASE(Float32x4ShuffleMix) \
5258 CASE(Int32x4ShuffleMix) \
5259 ____(Simd32x4ShuffleMix) \
5260 CASE(Float32x4GetSignMask) \
5261 CASE(Int32x4GetSignMask) \
5262 ____(Simd32x4GetSignMask) \
5263 SIMPLE(Float32x4FromDoubles) \
5264 SIMPLE(Float32x4Zero) \
5265 SIMPLE(Float32x4Splat) \
5266 SIMPLE(Float32x4Sqrt) \
5267 CASE(Float32x4Negate) \
5268 CASE(Float32x4Abs) \
5269 CASE(Float32x4Reciprocal) \
5270 CASE(Float32x4ReciprocalSqrt) \
5271 ____(Float32x4Unary) \
5272 CASE(Float32x4ToInt32x4) \
5273 CASE(Int32x4ToFloat32x4) \
5274 ____(Simd32x4ToSimd32x4Conversion) \
5275 SIMPLE(Float32x4Clamp) \
5276 SIMPLE(Float64x2Clamp) \
5277 CASE(Float32x4WithX) \
5278 CASE(Float32x4WithY) \
5279 CASE(Float32x4WithZ) \
5280 CASE(Float32x4WithW) \
5281 ____(Float32x4With) \
5282 CASE(Float64x2GetX) \
5283 CASE(Float64x2GetY) \
5284 ____(Simd64x2Shuffle) \
5285 SIMPLE(Float64x2Zero) \
5286 SIMPLE(Float64x2Splat) \
5287 SIMPLE(Float64x2FromDoubles) \
5288 SIMPLE(Float64x2ToFloat32x4) \
5289 SIMPLE(Float32x4ToFloat64x2) \
5290 SIMPLE(Float64x2GetSignMask) \
5291 CASE(Float64x2Negate) \
5292 CASE(Float64x2Abs) \
5293 CASE(Float64x2Sqrt) \
5294 ____(Float64x2Unary) \
5295 CASE(Float64x2Scale) \
5296 CASE(Float64x2WithX) \
5297 CASE(Float64x2WithY) \
5298 CASE(Float64x2Min) \
5299 CASE(Float64x2Max) \
5300 ____(Float64x2Binary) \
5301 SIMPLE(Int32x4FromInts) \
5302 SIMPLE(Int32x4FromBools) \
5303 CASE(Int32x4GetFlagX) \
5304 CASE(Int32x4GetFlagY) \
5305 CASE(Int32x4GetFlagZ) \
5306 CASE(Int32x4GetFlagW) \
5307 ____(Int32x4GetFlag) \
5308 SIMPLE(Int32x4Select) \
5309 CASE(Int32x4WithFlagX) \
5310 CASE(Int32x4WithFlagY) \
5311 CASE(Int32x4WithFlagZ) \
5312 CASE(Int32x4WithFlagW) \
5313 ____(Int32x4WithFlag)
5314
5315LocationSummary* SimdOpInstr::MakeLocationSummary(Zone* zone, bool opt) const {
5316 switch (kind()) {
5317#define CASE(Name) case k##Name:
5318#define EMIT(Name) \
5319 return MakeLocationSummaryFromEmitter(zone, this, &Emit##Name);
5320#define SIMPLE(Name) CASE(Name) EMIT(Name)
5321 SIMD_OP_VARIANTS(CASE, EMIT, SIMPLE)
5322#undef CASE
5323#undef EMIT
5324#undef SIMPLE
5325 case kIllegalSimdOp:
5326 UNREACHABLE();
5327 break;
5328 }
5329 UNREACHABLE();
5330 return nullptr;
5331}
5332
5333void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5334 switch (kind()) {
5335#define CASE(Name) case k##Name:
5336#define EMIT(Name) \
5337 InvokeEmitter(compiler, this, &Emit##Name); \
5338 break;
5339#define SIMPLE(Name) CASE(Name) EMIT(Name)
5340 SIMD_OP_VARIANTS(CASE, EMIT, SIMPLE)
5341#undef CASE
5342#undef EMIT
5343#undef SIMPLE
5344 case kIllegalSimdOp:
5345 UNREACHABLE();
5346 break;
5347 }
5348}
5349
5350#undef DEFINE_EMIT
5351
5353 Zone* zone,
5354 bool opt) const {
5355 const intptr_t kNumTemps = 0;
5356 LocationSummary* summary = new (zone)
5357 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
5358 summary->set_in(0, Location::RegisterLocation(R0));
5359 summary->set_in(1, Location::RegisterLocation(R1));
5360 summary->set_in(2, Location::RegisterLocation(R2));
5361 summary->set_in(3, Location::RegisterLocation(R3));
5362 summary->set_out(0, Location::RegisterLocation(R0));
5363 return summary;
5364}
5365
5367 compiler::LeafRuntimeScope rt(compiler->assembler(),
5368 /*frame_size=*/0,
5369 /*preserve_registers=*/false);
5370 // Call the function. Parameters are already in their correct spots.
5372}
5373
5374LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone,
5375 bool opt) const {
5376 if (result_cid() == kDoubleCid) {
5377 const intptr_t kNumInputs = 2;
5378 const intptr_t kNumTemps = 1;
5379 LocationSummary* summary = new (zone)
5380 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5382 summary->set_in(1, Location::RequiresFpuRegister());
5383 // Reuse the left register so that code can be made shorter.
5384 summary->set_out(0, Location::SameAsFirstInput());
5385 summary->set_temp(0, Location::RequiresRegister());
5386 return summary;
5387 }
5388 ASSERT(result_cid() == kSmiCid);
5389 const intptr_t kNumInputs = 2;
5390 const intptr_t kNumTemps = 0;
5391 LocationSummary* summary = new (zone)
5392 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5393 summary->set_in(0, Location::RequiresRegister());
5394 summary->set_in(1, Location::RequiresRegister());
5395 // Reuse the left register so that code can be made shorter.
5396 summary->set_out(0, Location::SameAsFirstInput());
5397 return summary;
5398}
5399
5400void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5401 ASSERT((op_kind() == MethodRecognizer::kMathMin) ||
5402 (op_kind() == MethodRecognizer::kMathMax));
5403 const bool is_min = (op_kind() == MethodRecognizer::kMathMin);
5404 if (result_cid() == kDoubleCid) {
5405 compiler::Label done, returns_nan, are_equal;
5406 const DRegister left = EvenDRegisterOf(locs()->in(0).fpu_reg());
5407 const DRegister right = EvenDRegisterOf(locs()->in(1).fpu_reg());
5408 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5409 const Register temp = locs()->temp(0).reg();
5410 __ vcmpd(left, right);
5411 __ vmstat();
5412 __ b(&returns_nan, VS);
5413 __ b(&are_equal, EQ);
5414 const Condition neg_double_condition =
5415 is_min ? TokenKindToDoubleCondition(Token::kGTE)
5416 : TokenKindToDoubleCondition(Token::kLTE);
5417 ASSERT(left == result);
5418 __ vmovd(result, right, neg_double_condition);
5419 __ b(&done);
5420
5421 __ Bind(&returns_nan);
5422 __ LoadDImmediate(result, NAN, temp);
5423 __ b(&done);
5424
5425 __ Bind(&are_equal);
5426 // Check for negative zero: -0.0 is equal 0.0 but min or max must return
5427 // -0.0 or 0.0 respectively.
5428 // Check for negative left value (get the sign bit):
5429 // - min -> left is negative ? left : right.
5430 // - max -> left is negative ? right : left
5431 // Check the sign bit.
5432 __ vmovrrd(IP, temp, left); // Sign bit is in bit 31 of temp.
5433 __ cmp(temp, compiler::Operand(0));
5434 if (is_min) {
5435 ASSERT(left == result);
5436 __ vmovd(result, right, GE);
5437 } else {
5438 __ vmovd(result, right, LT);
5439 ASSERT(left == result);
5440 }
5441 __ Bind(&done);
5442 return;
5443 }
5444
5445 ASSERT(result_cid() == kSmiCid);
5446 const Register left = locs()->in(0).reg();
5447 const Register right = locs()->in(1).reg();
5448 const Register result = locs()->out(0).reg();
5449 __ cmp(left, compiler::Operand(right));
5450 ASSERT(result == left);
5451 if (is_min) {
5452 __ mov(result, compiler::Operand(right), GT);
5453 } else {
5454 __ mov(result, compiler::Operand(right), LT);
5455 }
5456}
5457
5458LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Zone* zone,
5459 bool opt) const {
5460 const intptr_t kNumInputs = 1;
5461 const intptr_t kNumTemps = 0;
5462 LocationSummary* summary = new (zone)
5463 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5464 summary->set_in(0, Location::RequiresRegister());
5465 // We make use of 3-operand instructions by not requiring result register
5466 // to be identical to first input register as on Intel.
5467 summary->set_out(0, Location::RequiresRegister());
5468 return summary;
5469}
5470
5471void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5472 const Register value = locs()->in(0).reg();
5473 const Register result = locs()->out(0).reg();
5474 switch (op_kind()) {
5475 case Token::kNEGATE: {
5476 compiler::Label* deopt =
5477 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
5478 __ rsbs(result, value, compiler::Operand(0));
5479 __ b(deopt, VS);
5480 break;
5481 }
5482 case Token::kBIT_NOT:
5483 __ mvn_(result, compiler::Operand(value));
5484 // Remove inverted smi-tag.
5485 __ bic(result, result, compiler::Operand(kSmiTagMask));
5486 break;
5487 default:
5488 UNREACHABLE();
5489 }
5490}
5491
5492LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
5493 bool opt) const {
5494 const intptr_t kNumInputs = 1;
5495 const intptr_t kNumTemps = 0;
5496 LocationSummary* summary = new (zone)
5497 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5499 summary->set_out(0, Location::RequiresFpuRegister());
5500 return summary;
5501}
5502
5503void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5504 ASSERT(representation() == kUnboxedDouble);
5505 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5506 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
5507 switch (op_kind()) {
5508 case Token::kNEGATE:
5509 __ vnegd(result, value);
5510 break;
5511 case Token::kSQRT:
5512 __ vsqrtd(result, value);
5513 break;
5514 case Token::kSQUARE:
5515 __ vmuld(result, value, value);
5516 break;
5517 default:
5518 UNREACHABLE();
5519 }
5520}
5521
5522LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone,
5523 bool opt) const {
5524 const intptr_t kNumInputs = 1;
5525 const intptr_t kNumTemps = 0;
5526 LocationSummary* result = new (zone)
5527 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5528 result->set_in(0, Location::RequiresRegister());
5530 return result;
5531}
5532
5533void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5534 const Register value = locs()->in(0).reg();
5535 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5536 __ vmovdr(DTMP, 0, value);
5537 __ vcvtdi(result, STMP);
5538}
5539
5540LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone,
5541 bool opt) const {
5542 const intptr_t kNumInputs = 1;
5543 const intptr_t kNumTemps = 0;
5544 LocationSummary* result = new (zone)
5545 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5546 result->set_in(0, Location::RequiresRegister());
5548 return result;
5549}
5550
5551void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5552 const Register value = locs()->in(0).reg();
5553 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5554 __ SmiUntag(IP, value);
5555 __ vmovdr(DTMP, 0, IP);
5556 __ vcvtdi(result, STMP);
5557}
5558
5559LocationSummary* Int64ToDoubleInstr::MakeLocationSummary(Zone* zone,
5560 bool opt) const {
5561 UNIMPLEMENTED();
5562 return nullptr;
5563}
5564
5565void Int64ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5566 UNIMPLEMENTED();
5567}
5568
5569LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone,
5570 bool opt) const {
5571 const intptr_t kNumInputs = 1;
5572 const intptr_t kNumTemps = 0;
5573 LocationSummary* result = new (zone) LocationSummary(
5574 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
5576 result->set_out(0, Location::RequiresRegister());
5577 return result;
5578}
5579
5580void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5581 ASSERT(recognized_kind() == MethodRecognizer::kDoubleToInteger);
5582 const Register result = locs()->out(0).reg();
5583 const DRegister value_double = EvenDRegisterOf(locs()->in(0).fpu_reg());
5584
5585 DoubleToIntegerSlowPath* slow_path =
5586 new DoubleToIntegerSlowPath(this, locs()->in(0).fpu_reg());
5587 compiler->AddSlowPathCode(slow_path);
5588
5589 // First check for NaN. Checking for minint after the conversion doesn't work
5590 // on ARM because vcvtid gives 0 for NaN.
5591 __ vcmpd(value_double, value_double);
5592 __ vmstat();
5593 __ b(slow_path->entry_label(), VS);
5594
5595 __ vcvtid(STMP, value_double);
5596 __ vmovrs(result, STMP);
5597 // Overflow is signaled with minint.
5598 // Check for overflow and that it fits into Smi.
5599 __ CompareImmediate(result, 0xC0000000);
5600 __ b(slow_path->entry_label(), MI);
5601 __ SmiTag(result);
5602 __ Bind(slow_path->exit_label());
5603}
5604
5605LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,
5606 bool opt) const {
5607 const intptr_t kNumInputs = 1;
5608 const intptr_t kNumTemps = 0;
5609 LocationSummary* result = new (zone)
5610 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5612 result->set_out(0, Location::RequiresRegister());
5613 return result;
5614}
5615
5616void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5617 compiler::Label* deopt =
5618 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptDoubleToSmi);
5619 const Register result = locs()->out(0).reg();
5620 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
5621 // First check for NaN. Checking for minint after the conversion doesn't work
5622 // on ARM because vcvtid gives 0 for NaN.
5623 __ vcmpd(value, value);
5624 __ vmstat();
5625 __ b(deopt, VS);
5626
5627 __ vcvtid(STMP, value);
5628 __ vmovrs(result, STMP);
5629 // Check for overflow and that it fits into Smi.
5630 __ CompareImmediate(result, 0xC0000000);
5631 __ b(deopt, MI);
5632 __ SmiTag(result);
5633}
5634
5635LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone,
5636 bool opt) const {
5637 const intptr_t kNumInputs = 1;
5638 const intptr_t kNumTemps = 0;
5639 LocationSummary* result = new (zone)
5640 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5641 // Low (< Q7) Q registers are needed for the conversion instructions.
5644 return result;
5645}
5646
5647void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5648 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
5649 const SRegister result =
5650 EvenSRegisterOf(EvenDRegisterOf(locs()->out(0).fpu_reg()));
5651 __ vcvtsd(result, value);
5652}
5653
5654LocationSummary* FloatToDoubleInstr::MakeLocationSummary(Zone* zone,
5655 bool opt) const {
5656 const intptr_t kNumInputs = 1;
5657 const intptr_t kNumTemps = 0;
5658 LocationSummary* result = new (zone)
5659 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5660 // Low (< Q7) Q registers are needed for the conversion instructions.
5663 return result;
5664}
5665
5666void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5667 const SRegister value =
5668 EvenSRegisterOf(EvenDRegisterOf(locs()->in(0).fpu_reg()));
5669 const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
5670 __ vcvtds(result, value);
5671}
5672
5673LocationSummary* FloatCompareInstr::MakeLocationSummary(Zone* zone,
5674 bool opt) const {
5675 UNREACHABLE();
5676 return NULL;
5677}
5678
5679void FloatCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5680 UNREACHABLE();
5681}
5682
5683LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone,
5684 bool opt) const {
5685 ASSERT((InputCount() == 1) || (InputCount() == 2));
5686 const intptr_t kNumTemps =
5688 ? ((recognized_kind() == MethodRecognizer::kMathDoublePow) ? 1 : 0)
5689 : 4;
5690 LocationSummary* result = new (zone)
5691 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
5693 if (InputCount() == 2) {
5695 }
5696 if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
5697 result->set_temp(0, Location::RegisterLocation(R2));
5699 result->set_temp(1, Location::RegisterLocation(R0));
5700 result->set_temp(2, Location::RegisterLocation(R1));
5701 result->set_temp(3, Location::RegisterLocation(R3));
5702 }
5704 result->set_temp(0, Location::RegisterLocation(R0));
5705 result->set_temp(1, Location::RegisterLocation(R1));
5706 result->set_temp(2, Location::RegisterLocation(R2));
5707 result->set_temp(3, Location::RegisterLocation(R3));
5708 }
5710 return result;
5711}
5712
5713// Pseudo code:
5714// if (exponent == 0.0) return 1.0;
5715// // Speed up simple cases.
5716// if (exponent == 1.0) return base;
5717// if (exponent == 2.0) return base * base;
5718// if (exponent == 3.0) return base * base * base;
5719// if (base == 1.0) return 1.0;
5720// if (base.isNaN || exponent.isNaN) {
5721// return double.NAN;
5722// }
5723// if (base != -Infinity && exponent == 0.5) {
5724// if (base == 0.0) return 0.0;
5725// return sqrt(value);
5726// }
5727// TODO(srdjan): Move into a stub?
5728static void InvokeDoublePow(FlowGraphCompiler* compiler,
5729 InvokeMathCFunctionInstr* instr) {
5730 ASSERT(instr->recognized_kind() == MethodRecognizer::kMathDoublePow);
5731 const intptr_t kInputCount = 2;
5732 ASSERT(instr->InputCount() == kInputCount);
5733 LocationSummary* locs = instr->locs();
5734
5735 const DRegister base = EvenDRegisterOf(locs->in(0).fpu_reg());
5736 const DRegister exp = EvenDRegisterOf(locs->in(1).fpu_reg());
5737 const DRegister result = EvenDRegisterOf(locs->out(0).fpu_reg());
5738 const Register temp = locs->temp(0).reg();
5739 const DRegister saved_base = OddDRegisterOf(locs->in(0).fpu_reg());
5740 ASSERT((base == result) && (result != saved_base));
5741
5742 compiler::Label skip_call, try_sqrt, check_base, return_nan;
5743 __ vmovd(saved_base, base);
5744 __ LoadDImmediate(result, 1.0, temp);
5745 // exponent == 0.0 -> return 1.0;
5746 __ vcmpdz(exp);
5747 __ vmstat();
5748 __ b(&check_base, VS); // NaN -> check base.
5749 __ b(&skip_call, EQ); // exp is 0.0, result is 1.0.
5750
5751 // exponent == 1.0 ?
5752 __ vcmpd(exp, result);
5753 __ vmstat();
5754 compiler::Label return_base;
5755 __ b(&return_base, EQ);
5756
5757 // exponent == 2.0 ?
5758 __ LoadDImmediate(DTMP, 2.0, temp);
5759 __ vcmpd(exp, DTMP);
5760 __ vmstat();
5761 compiler::Label return_base_times_2;
5762 __ b(&return_base_times_2, EQ);
5763
5764 // exponent == 3.0 ?
5765 __ LoadDImmediate(DTMP, 3.0, temp);
5766 __ vcmpd(exp, DTMP);
5767 __ vmstat();
5768 __ b(&check_base, NE);
5769
5770 // base_times_3.
5771 __ vmuld(result, saved_base, saved_base);
5772 __ vmuld(result, result, saved_base);
5773 __ b(&skip_call);
5774
5775 __ Bind(&return_base);
5776 __ vmovd(result, saved_base);
5777 __ b(&skip_call);
5778
5779 __ Bind(&return_base_times_2);
5780 __ vmuld(result, saved_base, saved_base);
5781 __ b(&skip_call);
5782
5783 __ Bind(&check_base);
5784 // Note: 'exp' could be NaN.
5785 // base == 1.0 -> return 1.0;
5786 __ vcmpd(saved_base, result);
5787 __ vmstat();
5788 __ b(&return_nan, VS);
5789 __ b(&skip_call, EQ); // base is 1.0, result is 1.0.
5790
5791 __ vcmpd(saved_base, exp);
5792 __ vmstat();
5793 __ b(&try_sqrt, VC); // // Neither 'exp' nor 'base' is NaN.
5794
5795 __ Bind(&return_nan);
5796 __ LoadDImmediate(result, NAN, temp);
5797 __ b(&skip_call);
5798
5799 compiler::Label do_pow, return_zero;
5800 __ Bind(&try_sqrt);
5801
5802 // Before calling pow, check if we could use sqrt instead of pow.
5803 __ LoadDImmediate(result, kNegInfinity, temp);
5804
5805 // base == -Infinity -> call pow;
5806 __ vcmpd(saved_base, result);
5807 __ vmstat();
5808 __ b(&do_pow, EQ);
5809
5810 // exponent == 0.5 ?
5811 __ LoadDImmediate(result, 0.5, temp);
5812 __ vcmpd(exp, result);
5813 __ vmstat();
5814 __ b(&do_pow, NE);
5815
5816 // base == 0 -> return 0;
5817 __ vcmpdz(saved_base);
5818 __ vmstat();
5819 __ b(&return_zero, EQ);
5820
5821 __ vsqrtd(result, saved_base);
5822 __ b(&skip_call);
5823
5824 __ Bind(&return_zero);
5825 __ LoadDImmediate(result, 0.0, temp);
5826 __ b(&skip_call);
5827
5828 __ Bind(&do_pow);
5829 __ vmovd(base, saved_base); // Restore base.
5830
5831 // Args must be in D0 and D1, so move arg from Q1(== D3:D2) to D1.
5832 __ vmovd(D1, D2);
5834 ASSERT(instr->TargetFunction().is_leaf()); // No deopt info needed.
5835 compiler::LeafRuntimeScope rt(compiler->assembler(),
5836 /*frame_size=*/0,
5837 /*preserve_registers=*/false);
5838 rt.Call(instr->TargetFunction(), kInputCount);
5839 } else {
5840 // If the ABI is not "hardfp", then we have to move the double arguments
5841 // to the integer registers, and take the results from the integer
5842 // registers.
5843 compiler::LeafRuntimeScope rt(compiler->assembler(),
5844 /*frame_size=*/0,
5845 /*preserve_registers=*/false);
5846 __ vmovrrd(R0, R1, D0);
5847 __ vmovrrd(R2, R3, D1);
5848 rt.Call(instr->TargetFunction(), kInputCount);
5849 __ vmovdrr(D0, R0, R1);
5850 __ vmovdrr(D1, R2, R3);
5851 }
5852 __ Bind(&skip_call);
5853}
5854
5855void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5856 if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
5857 InvokeDoublePow(compiler, this);
5858 return;
5859 }
5860
5861 if (InputCount() == 2) {
5862 // Args must be in D0 and D1, so move arg from Q1(== D3:D2) to D1.
5863 __ vmovd(D1, D2);
5864 }
5866 compiler::LeafRuntimeScope rt(compiler->assembler(),
5867 /*frame_size=*/0,
5868 /*preserve_registers=*/false);
5870 } else {
5871 // If the ABI is not "hardfp", then we have to move the double arguments
5872 // to the integer registers, and take the results from the integer
5873 // registers.
5874 compiler::LeafRuntimeScope rt(compiler->assembler(),
5875 /*frame_size=*/0,
5876 /*preserve_registers=*/false);
5877 __ vmovrrd(R0, R1, D0);
5878 __ vmovrrd(R2, R3, D1);
5880 __ vmovdrr(D0, R0, R1);
5881 __ vmovdrr(D1, R2, R3);
5882 }
5883}
5884
5885LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone,
5886 bool opt) const {
5887 // Only use this instruction in optimized code.
5888 ASSERT(opt);
5889 const intptr_t kNumInputs = 1;
5890 LocationSummary* summary =
5891 new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
5892 if (representation() == kUnboxedDouble) {
5893 if (index() == 0) {
5894 summary->set_in(
5896 } else {
5897 ASSERT(index() == 1);
5898 summary->set_in(
5900 }
5901 summary->set_out(0, Location::RequiresFpuRegister());
5902 } else {
5903 ASSERT(representation() == kTagged);
5904 if (index() == 0) {
5905 summary->set_in(
5907 } else {
5908 ASSERT(index() == 1);
5909 summary->set_in(
5911 }
5912 summary->set_out(0, Location::RequiresRegister());
5913 }
5914 return summary;
5915}
5916
5917void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5918 ASSERT(locs()->in(0).IsPairLocation());
5919 PairLocation* pair = locs()->in(0).AsPairLocation();
5920 Location in_loc = pair->At(index());
5921 if (representation() == kUnboxedDouble) {
5922 const QRegister out = locs()->out(0).fpu_reg();
5923 const QRegister in = in_loc.fpu_reg();
5924 __ vmovq(out, in);
5925 } else {
5926 ASSERT(representation() == kTagged);
5927 const Register out = locs()->out(0).reg();
5928 const Register in = in_loc.reg();
5929 __ mov(out, compiler::Operand(in));
5930 }
5931}
5932
5933LocationSummary* UnboxLaneInstr::MakeLocationSummary(Zone* zone,
5934 bool opt) const {
5935 UNREACHABLE();
5936 return NULL;
5937}
5938
5939void UnboxLaneInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5940 UNREACHABLE();
5941}
5942
5943LocationSummary* BoxLanesInstr::MakeLocationSummary(Zone* zone,
5944 bool opt) const {
5945 UNREACHABLE();
5946 return NULL;
5947}
5948
5949void BoxLanesInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5950 UNREACHABLE();
5951}
5952
5953LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
5954 bool opt) const {
5955 const intptr_t kNumInputs = 2;
5956 const intptr_t kNumTemps = 2;
5957 LocationSummary* summary = new (zone)
5958 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5959 summary->set_in(0, Location::RequiresRegister());
5960 summary->set_in(1, Location::RequiresRegister());
5961 summary->set_temp(0, Location::RequiresRegister());
5962 // Request register that overlaps with S0..S31.
5963 summary->set_temp(1, Location::FpuRegisterLocation(Q0));
5964 // Output is a pair of registers.
5965 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
5967 return summary;
5968}
5969
5970void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5972 compiler::Label* deopt =
5973 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
5974
5975 const Register left = locs()->in(0).reg();
5976 const Register right = locs()->in(1).reg();
5977 ASSERT(locs()->out(0).IsPairLocation());
5978 PairLocation* pair = locs()->out(0).AsPairLocation();
5979 const Register result_div = pair->At(0).reg();
5980 const Register result_mod = pair->At(1).reg();
5981 if (RangeUtils::CanBeZero(divisor_range())) {
5982 // Handle divide by zero in runtime.
5983 __ cmp(right, compiler::Operand(0));
5984 __ b(deopt, EQ);
5985 }
5986 const Register temp = locs()->temp(0).reg();
5987 const DRegister dtemp = EvenDRegisterOf(locs()->temp(1).fpu_reg());
5988 __ SmiUntag(temp, left);
5989 __ SmiUntag(IP, right);
5990 __ IntegerDivide(result_div, temp, IP, dtemp, DTMP);
5991
5992 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
5993 // case we cannot tag the result.
5994 __ CompareImmediate(result_div, 0x40000000);
5995 __ b(deopt, EQ);
5996 __ SmiUntag(IP, right);
5997 // result_mod <- left - right * result_div.
5998 __ mls(result_mod, IP, result_div, temp);
5999 __ SmiTag(result_div);
6000 __ SmiTag(result_mod);
6001 // Correct MOD result:
6002 // res = left % right;
6003 // if (res < 0) {
6004 // if (right < 0) {
6005 // res = res - right;
6006 // } else {
6007 // res = res + right;
6008 // }
6009 // }
6010 compiler::Label done;
6011 __ cmp(result_mod, compiler::Operand(0));
6012 __ b(&done, GE);
6013 // Result is negative, adjust it.
6014 __ cmp(right, compiler::Operand(0));
6015 __ sub(result_mod, result_mod, compiler::Operand(right), LT);
6016 __ add(result_mod, result_mod, compiler::Operand(right), GE);
6017 __ Bind(&done);
6018}
6019
6020// Should be kept in sync with integers.cc Multiply64Hash
6021static void EmitHashIntegerCodeSequence(FlowGraphCompiler* compiler,
6022 const Register result,
6023 const Register value_lo,
6024 const Register value_hi) {
6025 __ LoadImmediate(TMP, compiler::Immediate(0x2d51));
6026 __ umull(result, value_lo, value_lo, TMP); // (lo:result) = lo32 * 0x2d51
6027 __ umull(TMP, value_hi, value_hi, TMP); // (hi:TMP) = hi32 * 0x2d51
6028 __ add(TMP, TMP, compiler::Operand(value_lo));
6029 // (0:hi:TMP:result) is 128-bit product
6030 __ eor(result, value_hi, compiler::Operand(result));
6031 __ eor(result, TMP, compiler::Operand(result));
6032 __ AndImmediate(result, result, 0x3fffffff);
6033}
6034
6035LocationSummary* HashDoubleOpInstr::MakeLocationSummary(Zone* zone,
6036 bool opt) const {
6037 const intptr_t kNumInputs = 1;
6038 const intptr_t kNumTemps = 4;
6039 LocationSummary* summary = new (zone) LocationSummary(
6040 zone, kNumInputs, kNumTemps, LocationSummary::kNativeLeafCall);
6042 summary->set_temp(0, Location::RequiresRegister());
6043 summary->set_temp(1, Location::RegisterLocation(R1));
6044 summary->set_temp(2, Location::RequiresFpuRegister());
6045 summary->set_temp(3, Location::RegisterLocation(R4));
6046 summary->set_out(0, Location::Pair(Location::RegisterLocation(R0),
6048 return summary;
6049}
6050
6051void HashDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6052 const DRegister value = EvenDRegisterOf(locs()->in(0).fpu_reg());
6053 const Register temp = locs()->temp(0).reg();
6054 const Register temp1 = locs()->temp(1).reg();
6055 ASSERT(temp1 == R1);
6056 const DRegister temp_double = EvenDRegisterOf(locs()->temp(2).fpu_reg());
6057 ASSERT(locs()->temp(3).reg() == R4);
6058 const PairLocation* out_pair = locs()->out(0).AsPairLocation();
6059 Register result = out_pair->At(0).reg();
6060 ASSERT(result == R0);
6061 ASSERT(out_pair->At(1).reg() == R1);
6062
6063 compiler::Label hash_double, hash_double_value, try_convert;
6064
6065 __ vmovrrd(TMP, temp, value);
6066 __ AndImmediate(temp, temp, 0x7FF00000);
6067 __ CompareImmediate(temp, 0x7FF00000);
6068 __ b(&hash_double_value, EQ); // is_infinity or nan
6069
6070 compiler::Label slow_path;
6071 __ Bind(&try_convert);
6072 // value -> temp1 -> temp_double
6073 __ vcvtid(STMP, value);
6074 __ vmovrs(temp1, STMP);
6075 // Checks whether temp1 is INT_MAX or INT_MIN which indicates failed vcvt
6076 __ CompareImmediate(temp1, 0xC0000000);
6077 __ b(&slow_path, MI);
6078 __ vmovdr(DTMP, 0, temp1);
6079 __ vcvtdi(temp_double, STMP);
6080
6081 // value != temp_double, then go to hash_double_value
6082 __ vcmpd(value, temp_double);
6083 __ vmstat();
6084 __ b(&hash_double_value, NE);
6085 // Sign-extend 32-bit [temp1] value to 64-bit pair of (temp:temp1), which
6086 // is used by integer hash code sequence.
6087 __ SignFill(temp, temp1);
6088
6089 compiler::Label hash_integer, done;
6090 {
6091 __ Bind(&hash_integer);
6092 // integer hash of (temp:temp1)
6093 EmitHashIntegerCodeSequence(compiler, result, temp1, temp);
6094 __ b(&done);
6095 }
6096
6097 __ Bind(&slow_path);
6098 // double value is potentially doesn't fit into Smi range, so
6099 // do the double->int64->double via runtime call.
6100 __ StoreDToOffset(value, THR,
6102 {
6103 compiler::LeafRuntimeScope rt(compiler->assembler(), /*frame_size=*/0,
6104 /*preserve_registers=*/true);
6105 __ mov(R0, compiler::Operand(THR));
6106 // Check if double can be represented as int64, load it into (temp:EAX) if
6107 // it can.
6108 rt.Call(kTryDoubleAsIntegerRuntimeEntry, 1);
6109 __ mov(R4, compiler::Operand(R0));
6110 }
6111 __ LoadFromOffset(temp1, THR,
6113 __ LoadFromOffset(temp, THR,
6116 __ cmp(R4, compiler::Operand(0));
6117 __ b(&hash_integer, NE);
6118 __ b(&hash_double);
6119
6120 __ Bind(&hash_double_value);
6121 __ vmovrrd(temp, temp1, value);
6122
6123 __ Bind(&hash_double);
6124 // Convert the double bits (temp:temp1) to a hash code that fits in a Smi.
6125 __ eor(result, temp1, compiler::Operand(temp));
6126 __ AndImmediate(result, result, compiler::target::kSmiMax);
6127
6128 __ Bind(&done);
6129 __ mov(R1, compiler::Operand(0));
6130}
6131
6132LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
6133 bool opt) const {
6134 const intptr_t kNumInputs = 1;
6135 const intptr_t kNumTemps = 1;
6136 LocationSummary* summary = new (zone)
6137 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6138 summary->set_in(0, Location::WritableRegister());
6139 summary->set_out(0, Location::RequiresRegister());
6140 summary->set_temp(0, Location::RequiresRegister());
6141 return summary;
6142}
6143
6144void HashIntegerOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6145 Register value = locs()->in(0).reg();
6146 Register result = locs()->out(0).reg();
6147 Register temp = locs()->temp(0).reg();
6148
6149 if (smi_) {
6150 __ SmiUntag(value);
6151 __ SignFill(temp, value);
6152 } else {
6153 __ LoadFieldFromOffset(temp, value,
6155 __ LoadFieldFromOffset(value, value, Mint::value_offset());
6156 }
6157 EmitHashIntegerCodeSequence(compiler, result, value, temp);
6158 __ SmiTag(result);
6159}
6160
6161LocationSummary* BranchInstr::MakeLocationSummary(Zone* zone, bool opt) const {
6163 // Branches don't produce a result.
6165 return comparison()->locs();
6166}
6167
6168void BranchInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6170}
6171
6172LocationSummary* CheckClassInstr::MakeLocationSummary(Zone* zone,
6173 bool opt) const {
6174 const intptr_t kNumInputs = 1;
6175 const bool need_mask_temp = IsBitTest();
6176 const intptr_t kNumTemps = !IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0;
6177 LocationSummary* summary = new (zone)
6178 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6179 summary->set_in(0, Location::RequiresRegister());
6180 if (!IsNullCheck()) {
6181 summary->set_temp(0, Location::RequiresRegister());
6182 if (need_mask_temp) {
6183 summary->set_temp(1, Location::RequiresRegister());
6184 }
6185 }
6186 return summary;
6187}
6188
6189void CheckClassInstr::EmitNullCheck(FlowGraphCompiler* compiler,
6190 compiler::Label* deopt) {
6191 __ CompareObject(locs()->in(0).reg(), Object::null_object());
6193 Condition cond = IsDeoptIfNull() ? EQ : NE;
6194 __ b(deopt, cond);
6195}
6196
6197void CheckClassInstr::EmitBitTest(FlowGraphCompiler* compiler,
6198 intptr_t min,
6199 intptr_t max,
6200 intptr_t mask,
6201 compiler::Label* deopt) {
6202 Register biased_cid = locs()->temp(0).reg();
6203 __ AddImmediate(biased_cid, -min);
6204 __ CompareImmediate(biased_cid, max - min);
6205 __ b(deopt, HI);
6206
6207 Register bit_reg = locs()->temp(1).reg();
6208 __ LoadImmediate(bit_reg, 1);
6209 __ Lsl(bit_reg, bit_reg, biased_cid);
6210 __ TestImmediate(bit_reg, mask);
6211 __ b(deopt, EQ);
6212}
6213
6214int CheckClassInstr::EmitCheckCid(FlowGraphCompiler* compiler,
6215 int bias,
6216 intptr_t cid_start,
6217 intptr_t cid_end,
6218 bool is_last,
6219 compiler::Label* is_ok,
6220 compiler::Label* deopt,
6221 bool use_near_jump) {
6222 Register biased_cid = locs()->temp(0).reg();
6223 Condition no_match, match;
6224 if (cid_start == cid_end) {
6225 __ CompareImmediate(biased_cid, cid_start - bias);
6226 no_match = NE;
6227 match = EQ;
6228 } else {
6229 // For class ID ranges use a subtract followed by an unsigned
6230 // comparison to check both ends of the ranges with one comparison.
6231 __ AddImmediate(biased_cid, bias - cid_start);
6232 bias = cid_start;
6233 __ CompareImmediate(biased_cid, cid_end - cid_start);
6234 no_match = HI; // Unsigned higher.
6235 match = LS; // Unsigned lower or same.
6236 }
6237 if (is_last) {
6238 __ b(deopt, no_match);
6239 } else {
6240 __ b(is_ok, match);
6241 }
6242 return bias;
6243}
6244
6245LocationSummary* CheckSmiInstr::MakeLocationSummary(Zone* zone,
6246 bool opt) const {
6247 const intptr_t kNumInputs = 1;
6248 const intptr_t kNumTemps = 0;
6249 LocationSummary* summary = new (zone)
6250 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6251 summary->set_in(0, Location::RequiresRegister());
6252 return summary;
6253}
6254
6255void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6256 const Register value = locs()->in(0).reg();
6257 compiler::Label* deopt =
6258 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi);
6259 __ BranchIfNotSmi(value, deopt);
6260}
6261
6262void CheckNullInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6263 Register value_reg = locs()->in(0).reg();
6264 // TODO(dartbug.com/30480): Consider passing `null` literal as an argument
6265 // in order to be able to allocate it on register.
6266 __ CompareObject(value_reg, Object::null_object());
6267
6268 const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
6269 Code& stub = Code::ZoneHandle(
6270 compiler->zone(),
6272 const bool using_shared_stub = locs()->call_on_shared_slow_path();
6273
6274 if (using_shared_stub && compiler->CanPcRelativeCall(stub) &&
6275 compiler->flow_graph().graph_entry()->NeedsFrame()) {
6276 __ GenerateUnRelocatedPcRelativeCall(EQUAL);
6277 compiler->AddPcRelativeCallStubTarget(stub);
6278
6279 // We use the "extended" environment which has the locations updated to
6280 // reflect live registers being saved in the shared spilling stubs (see
6281 // the stub above).
6282 auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
6283 compiler->EmitCallsiteMetadata(source(), deopt_id(),
6284 UntaggedPcDescriptors::kOther, locs(),
6285 extended_env);
6287 return;
6288 }
6289
6290 ThrowErrorSlowPathCode* slow_path = new NullErrorSlowPath(this);
6291 compiler->AddSlowPathCode(slow_path);
6292
6293 __ BranchIf(EQUAL, slow_path->entry_label());
6294}
6295
6296LocationSummary* CheckClassIdInstr::MakeLocationSummary(Zone* zone,
6297 bool opt) const {
6298 const intptr_t kNumInputs = 1;
6299 const intptr_t kNumTemps = 0;
6300 LocationSummary* summary = new (zone)
6301 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6302 summary->set_in(0, cids_.IsSingleCid() ? Location::RequiresRegister()
6303 : Location::WritableRegister());
6304 return summary;
6305}
6306
6307void CheckClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6308 Register value = locs()->in(0).reg();
6309 compiler::Label* deopt =
6310 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass);
6311 if (cids_.IsSingleCid()) {
6312 __ CompareImmediate(value, compiler::target::ToRawSmi(cids_.cid_start));
6313 __ b(deopt, NE);
6314 } else {
6315 __ AddImmediate(value, -compiler::target::ToRawSmi(cids_.cid_start));
6316 __ CompareImmediate(value, compiler::target::ToRawSmi(cids_.Extent()));
6317 __ b(deopt, HI); // Unsigned higher.
6318 }
6319}
6320
6321LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Zone* zone,
6322 bool opt) const {
6323 const intptr_t kNumInputs = 2;
6324 const intptr_t kNumTemps = 0;
6325 LocationSummary* locs = new (zone)
6326 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6329 return locs;
6330}
6331
6332void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6333 uint32_t flags = generalized_ ? ICData::kGeneralized : 0;
6334 compiler::Label* deopt =
6335 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckArrayBound, flags);
6336
6337 Location length_loc = locs()->in(kLengthPos);
6338 Location index_loc = locs()->in(kIndexPos);
6339
6340 if (length_loc.IsConstant() && index_loc.IsConstant()) {
6341#ifdef DEBUG
6342 const int32_t length = compiler::target::SmiValue(length_loc.constant());
6343 const int32_t index = compiler::target::SmiValue(index_loc.constant());
6344 ASSERT((length <= index) || (index < 0));
6345#endif
6346 // Unconditionally deoptimize for constant bounds checks because they
6347 // only occur only when index is out-of-bounds.
6348 __ b(deopt);
6349 return;
6350 }
6351
6352 const intptr_t index_cid = index()->Type()->ToCid();
6353 if (index_loc.IsConstant()) {
6354 const Register length = length_loc.reg();
6355 __ CompareImmediate(length,
6356 compiler::target::ToRawSmi(index_loc.constant()));
6357 __ b(deopt, LS);
6358 } else if (length_loc.IsConstant()) {
6359 const Register index = index_loc.reg();
6360 if (index_cid != kSmiCid) {
6361 __ BranchIfNotSmi(index, deopt);
6362 }
6363 if (compiler::target::SmiValue(length_loc.constant()) ==
6365 __ tst(index, compiler::Operand(index));
6366 __ b(deopt, MI);
6367 } else {
6368 __ CompareImmediate(index,
6369 compiler::target::ToRawSmi(length_loc.constant()));
6370 __ b(deopt, CS);
6371 }
6372 } else {
6373 const Register length = length_loc.reg();
6374 const Register index = index_loc.reg();
6375 if (index_cid != kSmiCid) {
6376 __ BranchIfNotSmi(index, deopt);
6377 }
6378 __ cmp(index, compiler::Operand(length));
6379 __ b(deopt, CS);
6380 }
6381}
6382
6383LocationSummary* CheckWritableInstr::MakeLocationSummary(Zone* zone,
6384 bool opt) const {
6385 const intptr_t kNumInputs = 1;
6386 const intptr_t kNumTemps = 0;
6387 LocationSummary* locs = new (zone) LocationSummary(
6388 zone, kNumInputs, kNumTemps,
6392 return locs;
6393}
6394
6395void CheckWritableInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6396 WriteErrorSlowPath* slow_path = new WriteErrorSlowPath(this);
6397 compiler->AddSlowPathCode(slow_path);
6398 __ ldrb(TMP, compiler::FieldAddress(locs()->in(0).reg(),
6400 // In the first byte.
6403 __ b(slow_path->entry_label(), NOT_ZERO);
6404}
6405
6406LocationSummary* BinaryInt64OpInstr::MakeLocationSummary(Zone* zone,
6407 bool opt) const {
6408 const intptr_t kNumInputs = 2;
6409 const intptr_t kNumTemps = (op_kind() == Token::kMUL) ? 1 : 0;
6410 LocationSummary* summary = new (zone)
6411 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6414
6415 compiler::Operand o;
6416 if (CanBePairOfImmediateOperands(right(), &o, &o) &&
6417 (op_kind() == Token::kBIT_AND || op_kind() == Token::kBIT_OR ||
6418 op_kind() == Token::kBIT_XOR || op_kind() == Token::kADD ||
6419 op_kind() == Token::kSUB)) {
6420 summary->set_in(1, Location::Constant(right()->definition()->AsConstant()));
6421 } else {
6422 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
6424 }
6425 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6427 if (op_kind() == Token::kMUL) {
6428 summary->set_temp(0, Location::RequiresRegister());
6429 }
6430 return summary;
6431}
6432
6433void BinaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6434 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6435 Register left_lo = left_pair->At(0).reg();
6436 Register left_hi = left_pair->At(1).reg();
6437 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6438 Register out_lo = out_pair->At(0).reg();
6439 Register out_hi = out_pair->At(1).reg();
6440 ASSERT(!can_overflow());
6442
6443 compiler::Operand right_lo, right_hi;
6444 if (locs()->in(1).IsConstant()) {
6445 const bool ok = CanBePairOfImmediateOperands(locs()->in(1).constant(),
6446 &right_lo, &right_hi);
6448 } else {
6449 PairLocation* right_pair = locs()->in(1).AsPairLocation();
6450 right_lo = compiler::Operand(right_pair->At(0).reg());
6451 right_hi = compiler::Operand(right_pair->At(1).reg());
6452 }
6453
6454 switch (op_kind()) {
6455 case Token::kBIT_AND: {
6456 __ and_(out_lo, left_lo, compiler::Operand(right_lo));
6457 __ and_(out_hi, left_hi, compiler::Operand(right_hi));
6458 break;
6459 }
6460 case Token::kBIT_OR: {
6461 __ orr(out_lo, left_lo, compiler::Operand(right_lo));
6462 __ orr(out_hi, left_hi, compiler::Operand(right_hi));
6463 break;
6464 }
6465 case Token::kBIT_XOR: {
6466 __ eor(out_lo, left_lo, compiler::Operand(right_lo));
6467 __ eor(out_hi, left_hi, compiler::Operand(right_hi));
6468 break;
6469 }
6470 case Token::kADD: {
6471 __ adds(out_lo, left_lo, compiler::Operand(right_lo));
6472 __ adcs(out_hi, left_hi, compiler::Operand(right_hi));
6473 break;
6474 }
6475 case Token::kSUB: {
6476 __ subs(out_lo, left_lo, compiler::Operand(right_lo));
6477 __ sbcs(out_hi, left_hi, compiler::Operand(right_hi));
6478 break;
6479 }
6480 case Token::kMUL: {
6481 PairLocation* right_pair = locs()->in(1).AsPairLocation();
6482 Register right_lo_reg = right_pair->At(0).reg();
6483 Register right_hi_reg = right_pair->At(1).reg();
6484 // Compute 64-bit a * b as:
6485 // a_l * b_l + (a_h * b_l + a_l * b_h) << 32
6486 Register temp = locs()->temp(0).reg();
6487 __ mul(temp, left_lo, right_hi_reg);
6488 __ mla(out_hi, left_hi, right_lo_reg, temp);
6489 __ umull(out_lo, temp, left_lo, right_lo_reg);
6490 __ add(out_hi, out_hi, compiler::Operand(temp));
6491 break;
6492 }
6493 default:
6494 UNREACHABLE();
6495 }
6496}
6497
6498static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
6499 Token::Kind op_kind,
6500 Register out_lo,
6501 Register out_hi,
6502 Register left_lo,
6503 Register left_hi,
6504 const Object& right) {
6505 const int64_t shift = Integer::Cast(right).AsInt64Value();
6506 ASSERT(shift >= 0);
6507
6508 switch (op_kind) {
6509 case Token::kSHR: {
6510 if (shift < 32) {
6511 __ Lsl(out_lo, left_hi, compiler::Operand(32 - shift));
6512 __ orr(out_lo, out_lo, compiler::Operand(left_lo, LSR, shift));
6513 __ Asr(out_hi, left_hi, compiler::Operand(shift));
6514 } else {
6515 if (shift == 32) {
6516 __ mov(out_lo, compiler::Operand(left_hi));
6517 } else if (shift < 64) {
6518 __ Asr(out_lo, left_hi, compiler::Operand(shift - 32));
6519 } else {
6520 __ Asr(out_lo, left_hi, compiler::Operand(31));
6521 }
6522 __ Asr(out_hi, left_hi, compiler::Operand(31));
6523 }
6524 break;
6525 }
6526 case Token::kUSHR: {
6527 ASSERT(shift < 64);
6528 if (shift < 32) {
6529 __ Lsl(out_lo, left_hi, compiler::Operand(32 - shift));
6530 __ orr(out_lo, out_lo, compiler::Operand(left_lo, LSR, shift));
6531 __ Lsr(out_hi, left_hi, compiler::Operand(shift));
6532 } else {
6533 if (shift == 32) {
6534 __ mov(out_lo, compiler::Operand(left_hi));
6535 } else {
6536 __ Lsr(out_lo, left_hi, compiler::Operand(shift - 32));
6537 }
6538 __ mov(out_hi, compiler::Operand(0));
6539 }
6540 break;
6541 }
6542 case Token::kSHL: {
6543 ASSERT(shift < 64);
6544 if (shift < 32) {
6545 __ Lsr(out_hi, left_lo, compiler::Operand(32 - shift));
6546 __ orr(out_hi, out_hi, compiler::Operand(left_hi, LSL, shift));
6547 __ Lsl(out_lo, left_lo, compiler::Operand(shift));
6548 } else {
6549 if (shift == 32) {
6550 __ mov(out_hi, compiler::Operand(left_lo));
6551 } else {
6552 __ Lsl(out_hi, left_lo, compiler::Operand(shift - 32));
6553 }
6554 __ mov(out_lo, compiler::Operand(0));
6555 }
6556 break;
6557 }
6558 default:
6559 UNREACHABLE();
6560 }
6561}
6562
6563static void EmitShiftInt64ByRegister(FlowGraphCompiler* compiler,
6564 Token::Kind op_kind,
6565 Register out_lo,
6566 Register out_hi,
6567 Register left_lo,
6568 Register left_hi,
6569 Register right) {
6570 switch (op_kind) {
6571 case Token::kSHR: {
6572 __ rsbs(IP, right, compiler::Operand(32));
6573 __ sub(IP, right, compiler::Operand(32), MI);
6574 __ mov(out_lo, compiler::Operand(left_hi, ASR, IP), MI);
6575 __ mov(out_lo, compiler::Operand(left_lo, LSR, right), PL);
6576 __ orr(out_lo, out_lo, compiler::Operand(left_hi, LSL, IP), PL);
6577 __ mov(out_hi, compiler::Operand(left_hi, ASR, right));
6578 break;
6579 }
6580 case Token::kUSHR: {
6581 __ rsbs(IP, right, compiler::Operand(32));
6582 __ sub(IP, right, compiler::Operand(32), MI);
6583 __ mov(out_lo, compiler::Operand(left_hi, LSR, IP), MI);
6584 __ mov(out_lo, compiler::Operand(left_lo, LSR, right), PL);
6585 __ orr(out_lo, out_lo, compiler::Operand(left_hi, LSL, IP), PL);
6586 __ mov(out_hi, compiler::Operand(left_hi, LSR, right));
6587 break;
6588 }
6589 case Token::kSHL: {
6590 __ rsbs(IP, right, compiler::Operand(32));
6591 __ sub(IP, right, compiler::Operand(32), MI);
6592 __ mov(out_hi, compiler::Operand(left_lo, LSL, IP), MI);
6593 __ mov(out_hi, compiler::Operand(left_hi, LSL, right), PL);
6594 __ orr(out_hi, out_hi, compiler::Operand(left_lo, LSR, IP), PL);
6595 __ mov(out_lo, compiler::Operand(left_lo, LSL, right));
6596 break;
6597 }
6598 default:
6599 UNREACHABLE();
6600 }
6601}
6602
6603static void EmitShiftUint32ByConstant(FlowGraphCompiler* compiler,
6604 Token::Kind op_kind,
6605 Register out,
6606 Register left,
6607 const Object& right) {
6608 const int64_t shift = Integer::Cast(right).AsInt64Value();
6609 ASSERT(shift >= 0);
6610 if (shift >= 32) {
6611 __ LoadImmediate(out, 0);
6612 } else {
6613 switch (op_kind) {
6614 case Token::kSHR:
6615 case Token::kUSHR:
6616 __ Lsr(out, left, compiler::Operand(shift));
6617 break;
6618 case Token::kSHL:
6619 __ Lsl(out, left, compiler::Operand(shift));
6620 break;
6621 default:
6622 UNREACHABLE();
6623 }
6624 }
6625}
6626
6627static void EmitShiftUint32ByRegister(FlowGraphCompiler* compiler,
6628 Token::Kind op_kind,
6629 Register out,
6630 Register left,
6631 Register right) {
6632 switch (op_kind) {
6633 case Token::kSHR:
6634 case Token::kUSHR:
6635 __ Lsr(out, left, right);
6636 break;
6637 case Token::kSHL:
6638 __ Lsl(out, left, right);
6639 break;
6640 default:
6641 UNREACHABLE();
6642 }
6643}
6644
6645class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
6646 public:
6647 explicit ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction)
6648 : ThrowErrorSlowPathCode(instruction,
6649 kArgumentErrorUnboxedInt64RuntimeEntry) {}
6650
6651 const char* name() override { return "int64 shift"; }
6652
6653 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
6654 PairLocation* left_pair = instruction()->locs()->in(0).AsPairLocation();
6655 Register left_hi = left_pair->At(1).reg();
6656 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
6657 Register right_lo = right_pair->At(0).reg();
6658 Register right_hi = right_pair->At(1).reg();
6659 PairLocation* out_pair = instruction()->locs()->out(0).AsPairLocation();
6660 Register out_lo = out_pair->At(0).reg();
6661 Register out_hi = out_pair->At(1).reg();
6662
6663 __ CompareImmediate(right_hi, 0);
6664
6665 switch (instruction()->AsShiftInt64Op()->op_kind()) {
6666 case Token::kSHR:
6667 __ Asr(out_hi, left_hi,
6668 compiler::Operand(compiler::target::kBitsPerWord - 1), GE);
6669 __ mov(out_lo, compiler::Operand(out_hi), GE);
6670 break;
6671 case Token::kUSHR:
6672 case Token::kSHL: {
6673 __ LoadImmediate(out_lo, 0, GE);
6674 __ LoadImmediate(out_hi, 0, GE);
6675 break;
6676 }
6677 default:
6678 UNREACHABLE();
6679 }
6680
6681 __ b(exit_label(), GE);
6682
6683 // Can't pass unboxed int64 value directly to runtime call, as all
6684 // arguments are expected to be tagged (boxed).
6685 // The unboxed int64 argument is passed through a dedicated slot in Thread.
6686 // TODO(dartbug.com/33549): Clean this up when unboxed values
6687 // could be passed as arguments.
6688 __ StoreToOffset(right_lo, THR,
6690 __ StoreToOffset(right_hi, THR,
6693 }
6694};
6695
6696LocationSummary* ShiftInt64OpInstr::MakeLocationSummary(Zone* zone,
6697 bool opt) const {
6698 const intptr_t kNumInputs = 2;
6699 const intptr_t kNumTemps = 0;
6700 LocationSummary* summary = new (zone) LocationSummary(
6701 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
6705 right()->definition()->IsConstant()) {
6706 ConstantInstr* constant = right()->definition()->AsConstant();
6707 summary->set_in(1, Location::Constant(constant));
6708 } else {
6709 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
6711 }
6712 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6714 return summary;
6715}
6716
6717void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6718 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6719 Register left_lo = left_pair->At(0).reg();
6720 Register left_hi = left_pair->At(1).reg();
6721 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6722 Register out_lo = out_pair->At(0).reg();
6723 Register out_hi = out_pair->At(1).reg();
6724 ASSERT(!can_overflow());
6725
6726 if (locs()->in(1).IsConstant()) {
6727 EmitShiftInt64ByConstant(compiler, op_kind(), out_lo, out_hi, left_lo,
6728 left_hi, locs()->in(1).constant());
6729 } else {
6730 // Code for a variable shift amount (or constant that throws).
6731 PairLocation* right_pair = locs()->in(1).AsPairLocation();
6732 Register right_lo = right_pair->At(0).reg();
6733 Register right_hi = right_pair->At(1).reg();
6734
6735 // Jump to a slow path if shift is larger than 63 or less than 0.
6736 ShiftInt64OpSlowPath* slow_path = nullptr;
6737 if (!IsShiftCountInRange()) {
6738 slow_path = new (Z) ShiftInt64OpSlowPath(this);
6739 compiler->AddSlowPathCode(slow_path);
6740 __ CompareImmediate(right_hi, 0);
6741 __ b(slow_path->entry_label(), NE);
6742 __ CompareImmediate(right_lo, kShiftCountLimit);
6743 __ b(slow_path->entry_label(), HI);
6744 }
6745
6746 EmitShiftInt64ByRegister(compiler, op_kind(), out_lo, out_hi, left_lo,
6747 left_hi, right_lo);
6748
6749 if (slow_path != nullptr) {
6750 __ Bind(slow_path->exit_label());
6751 }
6752 }
6753}
6754
6756 Zone* zone,
6757 bool opt) const {
6758 const intptr_t kNumInputs = 2;
6759 const intptr_t kNumTemps = 0;
6760 LocationSummary* summary = new (zone)
6761 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6764 summary->set_in(1, LocationWritableRegisterOrSmiConstant(right()));
6765 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6767 return summary;
6768}
6769
6771 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6772 Register left_lo = left_pair->At(0).reg();
6773 Register left_hi = left_pair->At(1).reg();
6774 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6775 Register out_lo = out_pair->At(0).reg();
6776 Register out_hi = out_pair->At(1).reg();
6777 ASSERT(!can_overflow());
6778
6779 if (locs()->in(1).IsConstant()) {
6780 EmitShiftInt64ByConstant(compiler, op_kind(), out_lo, out_hi, left_lo,
6781 left_hi, locs()->in(1).constant());
6782 } else {
6783 // Code for a variable shift amount.
6784 Register shift = locs()->in(1).reg();
6785 __ SmiUntag(shift);
6786
6787 // Deopt if shift is larger than 63 or less than 0 (or not a smi).
6788 if (!IsShiftCountInRange()) {
6790 compiler::Label* deopt =
6791 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
6792
6793 __ CompareImmediate(shift, kShiftCountLimit);
6794 __ b(deopt, HI);
6795 }
6796
6797 EmitShiftInt64ByRegister(compiler, op_kind(), out_lo, out_hi, left_lo,
6798 left_hi, shift);
6799 }
6800}
6801
6802class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
6803 public:
6804 explicit ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction)
6805 : ThrowErrorSlowPathCode(instruction,
6806 kArgumentErrorUnboxedInt64RuntimeEntry) {}
6807
6808 const char* name() override { return "uint32 shift"; }
6809
6810 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
6811 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
6812 Register right_lo = right_pair->At(0).reg();
6813 Register right_hi = right_pair->At(1).reg();
6814 Register out = instruction()->locs()->out(0).reg();
6815
6816 __ CompareImmediate(right_hi, 0);
6817 __ LoadImmediate(out, 0, GE);
6818 __ b(exit_label(), GE);
6819
6820 // Can't pass unboxed int64 value directly to runtime call, as all
6821 // arguments are expected to be tagged (boxed).
6822 // The unboxed int64 argument is passed through a dedicated slot in Thread.
6823 // TODO(dartbug.com/33549): Clean this up when unboxed values
6824 // could be passed as arguments.
6825 __ StoreToOffset(right_lo, THR,
6827 __ StoreToOffset(right_hi, THR,
6830 }
6831};
6832
6833LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
6834 bool opt) const {
6835 const intptr_t kNumInputs = 2;
6836 const intptr_t kNumTemps = 0;
6837 LocationSummary* summary = new (zone) LocationSummary(
6838 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
6839 summary->set_in(0, Location::RequiresRegister());
6841 right()->definition()->IsConstant()) {
6842 ConstantInstr* constant = right()->definition()->AsConstant();
6843 summary->set_in(1, Location::Constant(constant));
6844 } else {
6845 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
6847 }
6848 summary->set_out(0, Location::RequiresRegister());
6849 return summary;
6850}
6851
6852void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6853 Register left = locs()->in(0).reg();
6854 Register out = locs()->out(0).reg();
6855
6856 ASSERT(left != out);
6857
6858 if (locs()->in(1).IsConstant()) {
6859 EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
6860 locs()->in(1).constant());
6861 } else {
6862 // Code for a variable shift amount (or constant that throws).
6863 PairLocation* right_pair = locs()->in(1).AsPairLocation();
6864 Register right_lo = right_pair->At(0).reg();
6865 Register right_hi = right_pair->At(1).reg();
6866
6867 // Jump to a slow path if shift count is > 31 or negative.
6868 ShiftUint32OpSlowPath* slow_path = nullptr;
6869 if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
6870 slow_path = new (Z) ShiftUint32OpSlowPath(this);
6871 compiler->AddSlowPathCode(slow_path);
6872
6873 __ CompareImmediate(right_hi, 0);
6874 __ b(slow_path->entry_label(), NE);
6875 __ CompareImmediate(right_lo, kUint32ShiftCountLimit);
6876 __ b(slow_path->entry_label(), HI);
6877 }
6878
6879 EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right_lo);
6880
6881 if (slow_path != nullptr) {
6882 __ Bind(slow_path->exit_label());
6883 }
6884 }
6885}
6886
6888 Zone* zone,
6889 bool opt) const {
6890 const intptr_t kNumInputs = 2;
6891 const intptr_t kNumTemps = 1;
6892 LocationSummary* summary = new (zone)
6893 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6894 summary->set_in(0, Location::RequiresRegister());
6895 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
6896 summary->set_temp(0, Location::RequiresRegister());
6897 summary->set_out(0, Location::RequiresRegister());
6898 return summary;
6899}
6900
6902 FlowGraphCompiler* compiler) {
6903 Register left = locs()->in(0).reg();
6904 Register out = locs()->out(0).reg();
6905 Register temp = locs()->temp(0).reg();
6906 ASSERT(left != out);
6907
6908 if (locs()->in(1).IsConstant()) {
6909 EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
6910 locs()->in(1).constant());
6911 } else {
6912 Register right = locs()->in(1).reg();
6913 const bool shift_count_in_range =
6914 IsShiftCountInRange(kUint32ShiftCountLimit);
6915
6916 __ SmiUntag(temp, right);
6917 right = temp;
6918
6919 // Deopt if shift count is negative.
6920 if (!shift_count_in_range) {
6922 compiler::Label* deopt =
6923 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
6924
6925 __ CompareImmediate(right, 0);
6926 __ b(deopt, LT);
6927 }
6928
6929 EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right);
6930
6931 if (!shift_count_in_range) {
6932 __ CompareImmediate(right, kUint32ShiftCountLimit);
6933 __ LoadImmediate(out, 0, HI);
6934 }
6935 }
6936}
6937
6938LocationSummary* UnaryInt64OpInstr::MakeLocationSummary(Zone* zone,
6939 bool opt) const {
6940 const intptr_t kNumInputs = 1;
6941 const intptr_t kNumTemps = 0;
6942 LocationSummary* summary = new (zone)
6943 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6946 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6948 return summary;
6949}
6950
6951void UnaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6952 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6953 Register left_lo = left_pair->At(0).reg();
6954 Register left_hi = left_pair->At(1).reg();
6955
6956 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6957 Register out_lo = out_pair->At(0).reg();
6958 Register out_hi = out_pair->At(1).reg();
6959
6960 switch (op_kind()) {
6961 case Token::kBIT_NOT:
6962 __ mvn_(out_lo, compiler::Operand(left_lo));
6963 __ mvn_(out_hi, compiler::Operand(left_hi));
6964 break;
6965 case Token::kNEGATE:
6966 __ rsbs(out_lo, left_lo, compiler::Operand(0));
6967 __ sbc(out_hi, out_hi, compiler::Operand(out_hi));
6968 __ sub(out_hi, out_hi, compiler::Operand(left_hi));
6969 break;
6970 default:
6971 UNREACHABLE();
6972 }
6973}
6974
6975LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone,
6976 bool opt) const {
6977 const intptr_t kNumInputs = 2;
6978 const intptr_t kNumTemps = 0;
6979 LocationSummary* summary = new (zone)
6980 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6981 summary->set_in(0, Location::RequiresRegister());
6982 summary->set_in(1, Location::RequiresRegister());
6983 summary->set_out(0, Location::RequiresRegister());
6984 return summary;
6985}
6986
6987void BinaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6988 Register left = locs()->in(0).reg();
6989 Register right = locs()->in(1).reg();
6990 Register out = locs()->out(0).reg();
6991 ASSERT(out != left);
6992 switch (op_kind()) {
6993 case Token::kBIT_AND:
6994 __ and_(out, left, compiler::Operand(right));
6995 break;
6996 case Token::kBIT_OR:
6997 __ orr(out, left, compiler::Operand(right));
6998 break;
6999 case Token::kBIT_XOR:
7000 __ eor(out, left, compiler::Operand(right));
7001 break;
7002 case Token::kADD:
7003 __ add(out, left, compiler::Operand(right));
7004 break;
7005 case Token::kSUB:
7006 __ sub(out, left, compiler::Operand(right));
7007 break;
7008 case Token::kMUL:
7009 __ mul(out, left, right);
7010 break;
7011 default:
7012 UNREACHABLE();
7013 }
7014}
7015
7016LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone,
7017 bool opt) const {
7018 const intptr_t kNumInputs = 1;
7019 const intptr_t kNumTemps = 0;
7020 LocationSummary* summary = new (zone)
7021 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7022 summary->set_in(0, Location::RequiresRegister());
7023 summary->set_out(0, Location::RequiresRegister());
7024 return summary;
7025}
7026
7027void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7028 Register left = locs()->in(0).reg();
7029 Register out = locs()->out(0).reg();
7030 ASSERT(left != out);
7031
7032 ASSERT(op_kind() == Token::kBIT_NOT);
7033
7034 __ mvn_(out, compiler::Operand(left));
7035}
7036
7037LocationSummary* IntConverterInstr::MakeLocationSummary(Zone* zone,
7038 bool opt) const {
7039 const intptr_t kNumInputs = 1;
7040 const intptr_t kNumTemps = 0;
7041 LocationSummary* summary = new (zone)
7042 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7043 if (from() == kUntagged || to() == kUntagged) {
7044 ASSERT((from() == kUntagged && to() == kUnboxedInt32) ||
7045 (from() == kUntagged && to() == kUnboxedUint32) ||
7046 (from() == kUnboxedInt32 && to() == kUntagged) ||
7047 (from() == kUnboxedUint32 && to() == kUntagged));
7049 summary->set_in(0, Location::RequiresRegister());
7050 summary->set_out(0, Location::SameAsFirstInput());
7051 } else if (from() == kUnboxedInt64) {
7052 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
7053 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
7055 summary->set_out(0, Location::RequiresRegister());
7056 } else if (to() == kUnboxedInt64) {
7057 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
7058 summary->set_in(0, Location::RequiresRegister());
7059 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
7061 } else {
7062 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
7063 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
7064 summary->set_in(0, Location::RequiresRegister());
7065 summary->set_out(0, Location::SameAsFirstInput());
7066 }
7067 return summary;
7068}
7069
7070void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7071 const bool is_nop_conversion =
7072 (from() == kUntagged && to() == kUnboxedInt32) ||
7073 (from() == kUntagged && to() == kUnboxedUint32) ||
7074 (from() == kUnboxedInt32 && to() == kUntagged) ||
7075 (from() == kUnboxedUint32 && to() == kUntagged);
7076 if (is_nop_conversion) {
7077 ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
7078 return;
7079 }
7080
7081 if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
7082 const Register out = locs()->out(0).reg();
7083 // Representations are bitwise equivalent.
7084 ASSERT(out == locs()->in(0).reg());
7085 } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
7086 const Register out = locs()->out(0).reg();
7087 // Representations are bitwise equivalent.
7088 ASSERT(out == locs()->in(0).reg());
7089 if (CanDeoptimize()) {
7090 compiler::Label* deopt =
7091 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
7092 __ tst(out, compiler::Operand(out));
7093 __ b(deopt, MI);
7094 }
7095 } else if (from() == kUnboxedInt64) {
7096 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
7097 PairLocation* in_pair = locs()->in(0).AsPairLocation();
7098 Register in_lo = in_pair->At(0).reg();
7099 Register in_hi = in_pair->At(1).reg();
7100 Register out = locs()->out(0).reg();
7101 // Copy low word.
7102 __ mov(out, compiler::Operand(in_lo));
7103 if (CanDeoptimize()) {
7104 compiler::Label* deopt =
7105 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
7106 ASSERT(to() == kUnboxedInt32);
7107 __ cmp(in_hi,
7108 compiler::Operand(in_lo, ASR, compiler::target::kBitsPerWord - 1));
7109 __ b(deopt, NE);
7110 }
7111 } else if (from() == kUnboxedUint32 || from() == kUnboxedInt32) {
7112 ASSERT(to() == kUnboxedInt64);
7113 Register in = locs()->in(0).reg();
7114 PairLocation* out_pair = locs()->out(0).AsPairLocation();
7115 Register out_lo = out_pair->At(0).reg();
7116 Register out_hi = out_pair->At(1).reg();
7117 // Copy low word.
7118 __ mov(out_lo, compiler::Operand(in));
7119 if (from() == kUnboxedUint32) {
7120 __ eor(out_hi, out_hi, compiler::Operand(out_hi));
7121 } else {
7122 ASSERT(from() == kUnboxedInt32);
7123 __ mov(out_hi,
7124 compiler::Operand(in, ASR, compiler::target::kBitsPerWord - 1));
7125 }
7126 } else {
7127 UNREACHABLE();
7128 }
7129}
7130
7131LocationSummary* BitCastInstr::MakeLocationSummary(Zone* zone, bool opt) const {
7132 LocationSummary* summary =
7133 new (zone) LocationSummary(zone, /*num_inputs=*/InputCount(),
7134 /*num_temps=*/0, LocationSummary::kNoCall);
7135 switch (from()) {
7136 case kUnboxedInt32:
7137 summary->set_in(0, Location::RequiresRegister());
7138 break;
7139 case kUnboxedInt64:
7140 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
7142 break;
7143 case kUnboxedFloat:
7144 case kUnboxedDouble:
7145 // Choose an FPU register with corresponding D and S registers.
7146 summary->set_in(0, Location::FpuRegisterLocation(Q0));
7147 break;
7148 default:
7149 UNREACHABLE();
7150 }
7151
7152 switch (to()) {
7153 case kUnboxedInt32:
7154 summary->set_out(0, Location::RequiresRegister());
7155 break;
7156 case kUnboxedInt64:
7157 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
7159 break;
7160 case kUnboxedFloat:
7161 case kUnboxedDouble:
7162 // Choose an FPU register with corresponding D and S registers.
7163 summary->set_out(0, Location::FpuRegisterLocation(Q0));
7164 break;
7165 default:
7166 UNREACHABLE();
7167 }
7168 return summary;
7169}
7170
7171void BitCastInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7172 switch (from()) {
7173 case kUnboxedInt32: {
7174 ASSERT(to() == kUnboxedFloat);
7175 const Register from_reg = locs()->in(0).reg();
7176 const FpuRegister to_reg = locs()->out(0).fpu_reg();
7177 __ vmovsr(EvenSRegisterOf(EvenDRegisterOf(to_reg)), from_reg);
7178 break;
7179 }
7180 case kUnboxedFloat: {
7181 ASSERT(to() == kUnboxedInt32);
7182 const FpuRegister from_reg = locs()->in(0).fpu_reg();
7183 const Register to_reg = locs()->out(0).reg();
7184 __ vmovrs(to_reg, EvenSRegisterOf(EvenDRegisterOf(from_reg)));
7185 break;
7186 }
7187 case kUnboxedInt64: {
7188 ASSERT(to() == kUnboxedDouble);
7189 const Register from_lo = locs()->in(0).AsPairLocation()->At(0).reg();
7190 const Register from_hi = locs()->in(0).AsPairLocation()->At(1).reg();
7191 const FpuRegister to_reg = locs()->out(0).fpu_reg();
7192 __ vmovsr(EvenSRegisterOf(EvenDRegisterOf(to_reg)), from_lo);
7193 __ vmovsr(OddSRegisterOf(EvenDRegisterOf(to_reg)), from_hi);
7194 break;
7195 }
7196 case kUnboxedDouble: {
7197 ASSERT(to() == kUnboxedInt64);
7198 const FpuRegister from_reg = locs()->in(0).fpu_reg();
7199 const Register to_lo = locs()->out(0).AsPairLocation()->At(0).reg();
7200 const Register to_hi = locs()->out(0).AsPairLocation()->At(1).reg();
7201 __ vmovrs(to_lo, EvenSRegisterOf(EvenDRegisterOf(from_reg)));
7202 __ vmovrs(to_hi, OddSRegisterOf(EvenDRegisterOf(from_reg)));
7203 break;
7204 }
7205 default:
7206 UNREACHABLE();
7207 }
7208}
7209
7210LocationSummary* StopInstr::MakeLocationSummary(Zone* zone, bool opt) const {
7211 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
7212}
7213
7214void StopInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7215 __ Stop(message());
7216}
7217
7218void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7219 BlockEntryInstr* entry = normal_entry();
7220 if (entry != nullptr) {
7221 if (!compiler->CanFallThroughTo(entry)) {
7222 FATAL("Checked function entry must have no offset");
7223 }
7224 } else {
7225 entry = osr_entry();
7226 if (!compiler->CanFallThroughTo(entry)) {
7227 __ b(compiler->GetJumpLabel(entry));
7228 }
7229 }
7230}
7231
7232LocationSummary* GotoInstr::MakeLocationSummary(Zone* zone, bool opt) const {
7233 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
7234}
7235
7236void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7237 if (!compiler->is_optimizing()) {
7238 if (FLAG_reorder_basic_blocks) {
7239 compiler->EmitEdgeCounter(block()->preorder_number());
7240 }
7241 // Add a deoptimization descriptor for deoptimizing instructions that
7242 // may be inserted before this instruction.
7243 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
7244 InstructionSource());
7245 }
7246 if (HasParallelMove()) {
7248 }
7249
7250 // We can fall through if the successor is the next block in the list.
7251 // Otherwise, we need a jump.
7252 if (!compiler->CanFallThroughTo(successor())) {
7253 __ b(compiler->GetJumpLabel(successor()));
7254 }
7255}
7256
7257LocationSummary* IndirectGotoInstr::MakeLocationSummary(Zone* zone,
7258 bool opt) const {
7259 const intptr_t kNumInputs = 1;
7260 const intptr_t kNumTemps = 2;
7261
7262 LocationSummary* summary = new (zone)
7263 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7264
7265 summary->set_in(0, Location::RequiresRegister());
7266 summary->set_temp(0, Location::RequiresRegister());
7267 summary->set_temp(1, Location::RequiresRegister());
7268
7269 return summary;
7270}
7271
7272void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7273 Register index_reg = locs()->in(0).reg();
7274 Register target_address_reg = locs()->temp(0).reg();
7275 Register offset_reg = locs()->temp(1).reg();
7276
7277 ASSERT(RequiredInputRepresentation(0) == kTagged);
7278 __ LoadObject(offset_reg, offsets_);
7279 const auto element_address = __ ElementAddressForRegIndex(
7280 /*is_load=*/true,
7281 /*is_external=*/false, kTypedDataInt32ArrayCid,
7282 /*index_scale=*/4,
7283 /*index_unboxed=*/false, offset_reg, index_reg);
7284 __ ldr(offset_reg, element_address);
7285
7286 // Offset is relative to entry pc.
7287 const intptr_t entry_to_pc_offset = __ CodeSize() + Instr::kPCReadOffset;
7288 __ mov(target_address_reg, compiler::Operand(PC));
7289 __ AddImmediate(target_address_reg, -entry_to_pc_offset);
7290
7291 __ add(target_address_reg, target_address_reg, compiler::Operand(offset_reg));
7292
7293 // Jump to the absolute address.
7294 __ bx(target_address_reg);
7295}
7296
7297LocationSummary* StrictCompareInstr::MakeLocationSummary(Zone* zone,
7298 bool opt) const {
7299 const intptr_t kNumInputs = 2;
7300 const intptr_t kNumTemps = 0;
7301 if (needs_number_check()) {
7302 LocationSummary* locs = new (zone)
7303 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
7307 return locs;
7308 }
7309 LocationSummary* locs = new (zone)
7310 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
7311
7312 // If a constant has more than one use, make sure it is loaded in register
7313 // so that multiple immediate loads can be avoided.
7314 ConstantInstr* constant = left()->definition()->AsConstant();
7315 if ((constant != nullptr) && !left()->IsSingleUse()) {
7317 } else {
7319 }
7320
7321 constant = right()->definition()->AsConstant();
7322 if ((constant != nullptr) && !right()->IsSingleUse()) {
7324 } else {
7325 // Only one of the inputs can be a constant. Choose register if the first
7326 // one is a constant.
7327 locs->set_in(1, locs->in(0).IsConstant()
7330 }
7332 return locs;
7333}
7334
7335Condition StrictCompareInstr::EmitComparisonCodeRegConstant(
7336 FlowGraphCompiler* compiler,
7337 BranchLabels labels,
7338 Register reg,
7339 const Object& obj) {
7340 return compiler->EmitEqualityRegConstCompare(reg, obj, needs_number_check(),
7341 source(), deopt_id());
7342}
7343
7344void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7345 // The ARM code may not use true- and false-labels here.
7346 compiler::Label is_true, is_false, done;
7347 BranchLabels labels = {&is_true, &is_false, &is_false};
7348 Condition true_condition = EmitComparisonCode(compiler, labels);
7349
7350 const Register result = this->locs()->out(0).reg();
7351 if (is_false.IsLinked() || is_true.IsLinked()) {
7352 if (true_condition != kInvalidCondition) {
7353 EmitBranchOnCondition(compiler, true_condition, labels);
7354 }
7355 __ Bind(&is_false);
7356 __ LoadObject(result, Bool::False());
7357 __ b(&done);
7358 __ Bind(&is_true);
7359 __ LoadObject(result, Bool::True());
7360 __ Bind(&done);
7361 } else {
7362 // If EmitComparisonCode did not use the labels and just returned
7363 // a condition we can avoid the branch and use conditional loads.
7364 ASSERT(true_condition != kInvalidCondition);
7365 __ LoadObject(result, Bool::True(), true_condition);
7366 __ LoadObject(result, Bool::False(), InvertCondition(true_condition));
7367 }
7368}
7369
7370void ComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler,
7371 BranchInstr* branch) {
7372 BranchLabels labels = compiler->CreateBranchLabels(branch);
7373 Condition true_condition = EmitComparisonCode(compiler, labels);
7374 if (true_condition != kInvalidCondition) {
7375 EmitBranchOnCondition(compiler, true_condition, labels);
7376 }
7377}
7378
7379LocationSummary* BooleanNegateInstr::MakeLocationSummary(Zone* zone,
7380 bool opt) const {
7383}
7384
7385void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7386 const Register input = locs()->in(0).reg();
7387 const Register result = locs()->out(0).reg();
7388 __ eor(result, input,
7390}
7391
7392LocationSummary* BoolToIntInstr::MakeLocationSummary(Zone* zone,
7393 bool opt) const {
7394 UNREACHABLE();
7395 return NULL;
7396}
7397
7398void BoolToIntInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7399 UNREACHABLE();
7400}
7401
7402LocationSummary* IntToBoolInstr::MakeLocationSummary(Zone* zone,
7403 bool opt) const {
7404 UNREACHABLE();
7405 return NULL;
7406}
7407
7408void IntToBoolInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7409 UNREACHABLE();
7410}
7411
7412LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
7413 bool opt) const {
7414 const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0;
7415 const intptr_t kNumTemps = 0;
7416 LocationSummary* locs = new (zone)
7417 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
7418 if (type_arguments() != nullptr) {
7421 }
7423 return locs;
7424}
7425
7426void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7427 if (type_arguments() != nullptr) {
7428 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
7429 if (type_usage_info != nullptr) {
7430 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, cls_,
7431 type_arguments()->definition());
7432 }
7433 }
7434 const Code& stub = Code::ZoneHandle(
7436 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
7437 locs(), deopt_id(), env());
7438}
7439
7440void DebugStepCheckInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
7441#ifdef PRODUCT
7442 UNREACHABLE();
7443#else
7444 ASSERT(!compiler->is_optimizing());
7445 __ BranchLinkPatchable(StubCode::DebugStepCheck());
7446 compiler->AddCurrentDescriptor(stub_kind_, deopt_id_, source());
7447 compiler->RecordSafepoint(locs());
7448#endif
7449}
7450
7451} // namespace dart
7452
7453#endif // defined(TARGET_ARCH_ARM)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
static void fail(const SkString &err)
Definition: DM.cpp:234
int count
Definition: FontMgrTest.cpp:50
static bool are_equal(skiatest::Reporter *reporter, const SkMatrix &a, const SkMatrix &b)
Definition: MatrixTest.cpp:50
static int float_bits(float f)
Definition: MatrixTest.cpp:44
static bool ok(int result)
static size_t element_size(Layout layout, SkSLType type)
Vec2Value v2
#define __
#define SIMPLE(name,...)
#define UNREACHABLE()
Definition: assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
#define Z
intptr_t num_context_variables() const
Definition: il.h:8392
Value * type_arguments() const
Definition: il.h:7436
const Class & cls() const
Definition: il.h:7435
intptr_t num_context_variables() const
Definition: il.h:7594
static intptr_t InstanceSize()
Definition: object.h:10936
static constexpr bool IsValidLength(intptr_t len)
Definition: object.h:10932
Value * dst_type() const
Definition: il.h:4423
Token::Kind op_kind() const
Definition: il.h:9038
Value * right() const
Definition: il.h:9036
Value * left() const
Definition: il.h:9035
bool can_overflow() const
Definition: il.h:9400
Value * right() const
Definition: il.h:9398
Token::Kind op_kind() const
Definition: il.h:9396
Value * left() const
Definition: il.h:9397
bool RightIsPowerOfTwoConstant() const
Definition: il.cc:2125
Range * right_range() const
Definition: il.h:9473
Representation to() const
Definition: il.h:11121
Representation from() const
Definition: il.h:11120
ParallelMoveInstr * parallel_move() const
Definition: il.h:1689
bool HasParallelMove() const
Definition: il.h:1691
static const Bool & False()
Definition: object.h:10799
static const Bool & True()
Definition: object.h:10797
static void Allocate(FlowGraphCompiler *compiler, Instruction *instruction, const Class &cls, Register result, Register temp)
Definition: il.cc:6309
Value * value() const
Definition: il.h:8528
Representation from_representation() const
Definition: il.h:8529
virtual bool ValueFitsSmi() const
Definition: il.cc:3253
ComparisonInstr * comparison() const
Definition: il.h:4021
static constexpr Register kSecondReturnReg
static const Register ArgumentRegisters[]
static constexpr FpuRegister kReturnFpuReg
static constexpr Register kFfiAnyNonAbiRegister
static constexpr Register kReturnReg
static constexpr Register kSecondNonArgumentRegister
const RuntimeEntry & TargetFunction() const
Definition: il.cc:1101
Value * index() const
Definition: il.h:10797
Value * length() const
Definition: il.h:10796
Value * value() const
Definition: il.h:10755
bool IsDeoptIfNull() const
Definition: il.cc:863
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr, TemplateInstruction, FIELD_LIST) private void EmitBitTest(FlowGraphCompiler *compiler, intptr_t min, intptr_t max, intptr_t mask, compiler::Label *deopt)
void EmitNullCheck(FlowGraphCompiler *compiler, compiler::Label *deopt)
bool IsNullCheck() const
Definition: il.h:10600
bool IsDeoptIfNotNull() const
Definition: il.cc:877
bool IsBitTest() const
Definition: il.cc:899
Value * right() const
Definition: il.h:8477
Value * left() const
Definition: il.h:8476
static void AddMetadataForRuntimeCall(CheckNullInstr *check_null, FlowGraphCompiler *compiler)
Definition: il.cc:6278
ExceptionType exception_type() const
Definition: il.h:10704
Value * value() const
Definition: il.h:10654
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Definition: il.h:9923
intptr_t loop_depth() const
Definition: il.h:9906
bool in_loop() const
Definition: il.h:9904
virtual void EmitBranchCode(FlowGraphCompiler *compiler, BranchInstr *branch)
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual Condition EmitComparisonCode(FlowGraphCompiler *compiler, BranchLabels labels)=0
const Object & value() const
Definition: il.h:4230
void EmitMoveToLocation(FlowGraphCompiler *compiler, const Location &destination, Register tmp=kNoRegister, intptr_t pair_index=0)
static intptr_t InstanceSize()
Definition: object.h:7448
Value * type_arguments() const
Definition: il.h:7845
virtual Value * num_elements() const
Definition: il.h:7846
virtual Representation representation() const
Definition: il.h:3501
static constexpr intptr_t kNone
Definition: deopt_id.h:27
Value * value() const
Definition: il.h:9101
MethodRecognizer::Kind op_kind() const
Definition: il.h:9103
Value * value() const
Definition: il.h:10142
MethodRecognizer::Kind recognized_kind() const
Definition: il.h:10060
Value * value() const
Definition: il.h:10111
bool is_null_aware() const
Definition: il.h:5341
virtual Representation representation() const
Definition: il.h:10337
intptr_t index() const
Definition: il.h:10335
void EmitReturnMoves(FlowGraphCompiler *compiler, const Register temp0, const Register temp1)
Definition: il.cc:7690
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(FfiCallInstr, VariadicDefinition, FIELD_LIST) private void EmitParamMoves(FlowGraphCompiler *compiler, const Register saved_fp, const Register temp0, const Register temp1)
Definition: il.cc:7478
intptr_t TargetAddressIndex() const
Definition: il.h:6100
bool is_nullable() const
Definition: object.cc:11770
@ kUnknownFixedLength
Definition: object.h:4728
@ kUnknownLengthOffset
Definition: object.h:4727
@ kNoFixedLength
Definition: object.h:4729
intptr_t guarded_cid() const
Definition: object.cc:11749
Value * value() const
Definition: il.h:10183
ParallelMoveInstr * parallel_move() const
Definition: il.h:3735
BlockEntryInstr * block() const
Definition: il.h:3710
bool HasParallelMove() const
Definition: il.h:3737
JoinEntryInstr * successor() const
Definition: il.h:3713
FunctionEntryInstr * normal_entry() const
Definition: il.h:2001
OsrEntryInstr * osr_entry() const
Definition: il.h:2007
const Field & field() const
Definition: il.h:6520
Value * value() const
Definition: il.h:6518
Value * value() const
Definition: il.h:9149
Value * value() const
Definition: il.h:9189
@ kGeneralized
Definition: object.h:2525
ComparisonInstr * comparison() const
Definition: il.h:5483
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.h:3807
const AbstractType & type() const
Definition: il.h:7284
intptr_t GetDeoptId() const
Definition: il.h:1409
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Definition: il.h:1377
Environment * env() const
Definition: il.h:1215
virtual LocationSummary * MakeLocationSummary(Zone *zone, bool is_optimizing) const =0
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition: il.h:1213
void InitializeLocationSummary(Zone *zone, bool optimizing)
Definition: il.h:1202
virtual Representation representation() const
Definition: il.h:1260
bool CanDeoptimize() const
Definition: il.h:1079
friend class BlockEntryInstr
Definition: il.h:1403
LocationSummary * locs()
Definition: il.h:1192
InstructionSource source() const
Definition: il.h:1008
intptr_t deopt_id() const
Definition: il.h:993
static bool SlowPathSharingSupported(bool is_optimizing)
Definition: il.h:1368
Instruction * previous() const
Definition: il.h:1087
static LocationSummary * MakeCallSummary(Zone *zone, const Instruction *instr, LocationSummary *locs=nullptr)
Value * value() const
Definition: il.h:9978
Representation to() const
Definition: il.h:11047
Representation from() const
Definition: il.h:11046
const RuntimeEntry & TargetFunction() const
Definition: il.cc:7223
MethodRecognizer::Kind recognized_kind() const
Definition: il.h:10261
ObjectStore * object_store() const
Definition: isolate.h:510
static IsolateGroup * Current()
Definition: isolate.h:539
intptr_t TargetAddressIndex() const
Definition: il.h:6198
void EmitParamMoves(FlowGraphCompiler *compiler, Register saved_fp, Register temp0)
Definition: il.cc:8191
LocationSummary * MakeLocationSummaryInternal(Zone *zone, const RegList temps) const
Definition: il.cc:8113
virtual Representation representation() const
Definition: il.h:6909
intptr_t index_scale() const
Definition: il.h:6895
Value * index() const
Definition: il.h:6893
bool can_pack_into_smi() const
Definition: il.h:6902
intptr_t element_count() const
Definition: il.h:6900
bool IsExternal() const
Definition: il.h:6888
intptr_t class_id() const
Definition: il.h:6899
intptr_t class_id() const
Definition: il.h:6803
bool IsUntagged() const
Definition: il.h:6796
bool aligned() const
Definition: il.h:6804
Value * array() const
Definition: il.h:6800
intptr_t index_scale() const
Definition: il.h:6802
Representation representation() const
Definition: il.h:6819
Value * index() const
Definition: il.h:6801
Value * index() const
Definition: il.h:3127
virtual Representation RequiredInputRepresentation(intptr_t index) const
Definition: il.h:3114
intptr_t offset() const
Definition: il.h:3129
Register base_reg() const
Definition: il.h:3128
virtual Representation representation() const
Definition: il.h:3125
const LocalVariable & local() const
Definition: il.h:5814
Location temp(intptr_t index) const
Definition: locations.h:882
Location out(intptr_t index) const
Definition: locations.h:903
static LocationSummary * Make(Zone *zone, intptr_t input_count, Location out, ContainsCall contains_call)
Definition: locations.cc:187
void set_temp(intptr_t index, Location loc)
Definition: locations.h:894
intptr_t temp_count() const
Definition: locations.h:880
RegisterSet * live_registers()
Definition: locations.h:941
void set_out(intptr_t index, Location loc)
Definition: locations.cc:232
bool always_calls() const
Definition: locations.h:918
bool call_on_shared_slow_path() const
Definition: locations.h:928
Location in(intptr_t index) const
Definition: locations.h:866
void set_in(intptr_t index, Location loc)
Definition: locations.cc:205
static Location NoLocation()
Definition: locations.h:387
static Location SameAsFirstInput()
Definition: locations.h:382
static Location Pair(Location first, Location second)
Definition: locations.cc:271
Register reg() const
Definition: locations.h:404
static Location FpuRegisterLocation(FpuRegister reg)
Definition: locations.h:410
static Location WritableRegister()
Definition: locations.h:376
bool IsConstant() const
Definition: locations.h:292
static Location RegisterLocation(Register reg)
Definition: locations.h:398
static Location Any()
Definition: locations.h:352
PairLocation * AsPairLocation() const
Definition: locations.cc:280
static Location RequiresRegister()
Definition: locations.h:365
static Location RequiresFpuRegister()
Definition: locations.h:369
FpuRegister fpu_reg() const
Definition: locations.h:416
const Object & constant() const
Definition: locations.cc:373
static Location Constant(const ConstantInstr *obj, int pair_index=0)
Definition: locations.h:294
Value * right() const
Definition: il.h:8970
intptr_t result_cid() const
Definition: il.h:8972
Value * left() const
Definition: il.h:8969
MethodRecognizer::Kind op_kind() const
Definition: il.h:8967
Value * length() const
Definition: il.h:3211
bool unboxed_inputs() const
Definition: il.h:3216
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MemoryCopyInstr, TemplateInstruction, FIELD_LIST) private void EmitUnrolledCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, intptr_t num_elements, bool reversed)
Definition: il.cc:7101
Value * src_start() const
Definition: il.h:3209
void EmitLoopCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, Register length_reg, compiler::Label *done, compiler::Label *copy_forwards=nullptr)
void PrepareLengthRegForLoop(FlowGraphCompiler *compiler, Register length_reg, compiler::Label *done)
Value * dest_start() const
Definition: il.h:3210
static intptr_t value_offset()
Definition: object.h:10074
virtual Representation representation() const
Definition: il.h:3387
Value * value() const
Definition: il.h:3377
MoveArgumentInstr(Value *value, Representation representation, Location location)
Definition: il.h:3349
static int ComputeArgcTag(const Function &function)
void SetupNative()
Definition: il.cc:7347
bool is_auto_scope() const
Definition: il.h:6026
bool is_bootstrap_native() const
Definition: il.h:6025
const Function & function() const
Definition: il.h:6023
NativeFunction native_c_function() const
Definition: il.h:6024
bool link_lazily() const
Definition: il.h:6027
static constexpr intptr_t kVMTagOffsetFromFp
Definition: il.h:2235
static uword LinkNativeCallEntry()
static CodePtr GetStub(FlowGraphCompiler *compiler, CheckNullInstr::ExceptionType exception_type, bool save_fpu_registers)
static Object & Handle()
Definition: object.h:407
static Object & ZoneHandle()
Definition: object.h:419
Location At(intptr_t i) const
Definition: locations.h:618
static bool Overlaps(Range *range, intptr_t min, intptr_t max)
static bool OnlyLessThanOrEqualTo(Range *range, intptr_t value)
static bool IsWithin(const Range *range, int64_t min, int64_t max)
static bool IsPositive(Range *range)
static bool CanBeZero(Range *range)
intptr_t FpuRegisterCount() const
Definition: locations.h:809
void Add(Location loc, Representation rep=kTagged)
Definition: locations.h:754
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ShiftIntegerOpInstr, BinaryIntegerOpInstr, FIELD_LIST) protected bool IsShiftCountInRange(int64_t max=kShiftCountLimit) const
Definition: il.cc:2112
Range * shift_range() const
Definition: il.h:9655
Kind kind() const
Definition: il.h:11358
Value * value() const
Definition: il.h:9952
const char * message() const
Definition: il.h:3681
bool ShouldEmitStoreBarrier() const
Definition: il.h:7089
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.cc:6925
Value * value() const
Definition: il.h:7083
Value * array() const
Definition: il.h:7081
intptr_t class_id() const
Definition: il.h:7086
bool IsUntagged() const
Definition: il.h:7114
bool aligned() const
Definition: il.h:7087
intptr_t index_scale() const
Definition: il.h:7085
Value * index() const
Definition: il.h:7082
Value * value() const
Definition: il.h:5963
const LocalVariable & local() const
Definition: il.h:5962
const Field & field() const
Definition: il.h:6729
Value * value() const
Definition: il.h:6730
bool needs_number_check() const
Definition: il.h:5125
Value * str() const
Definition: il.h:6967
static CodePtr GetAllocationStubForClass(const Class &cls)
Definition: stub_code.cc:174
static constexpr int kNullCharCodeSymbolOffset
Definition: symbols.h:605
static bool hardfp_supported()
Definition: cpu_arm.h:77
intptr_t ArgumentCount() const
Definition: il.h:4586
ArrayPtr GetArgumentsDescriptor() const
Definition: il.h:4617
virtual intptr_t InputCount() const
Definition: il.h:2755
const ZoneGrowableArray< intptr_t > & cid_results() const
Definition: il.h:5234
@ kOsrRequest
Definition: thread.h:425
static bool IsEqualityOperator(Kind tok)
Definition: token.h:236
virtual Representation representation() const
Definition: il.h:9841
Value * value() const
Definition: il.h:9828
Token::Kind op_kind() const
Definition: il.h:9829
Value * value() const
Definition: il.h:9240
Token::Kind op_kind() const
Definition: il.h:9241
virtual Representation representation() const
Definition: il.h:8703
Value * value() const
Definition: il.h:8678
bool is_truncating() const
Definition: il.h:8772
virtual Representation representation() const
Definition: il.h:4288
bool IsScanFlagsUnboxed() const
Definition: il.cc:7181
static T Abs(T x)
Definition: utils.h:49
static int32_t Low32Bits(int64_t value)
Definition: utils.h:369
static constexpr int CountOneBitsWord(uword x)
Definition: utils.h:176
static constexpr T Maximum(T x, T y)
Definition: utils.h:41
static constexpr int ShiftForPowerOfTwo(T x)
Definition: utils.h:81
static int32_t High32Bits(int64_t value)
Definition: utils.h:373
static T Minimum(T x, T y)
Definition: utils.h:36
static T AddWithWrapAround(T a, T b)
Definition: utils.h:431
static constexpr int CountOneBits64(uint64_t x)
Definition: utils.h:148
static constexpr size_t HighestBit(int64_t v)
Definition: utils.h:185
static constexpr bool IsPowerOfTwo(T x)
Definition: utils.h:76
bool BindsToConstant() const
Definition: il.cc:1183
Definition * definition() const
Definition: il.h:103
CompileType * Type()
intptr_t InputCount() const
Definition: il.h:2794
void static bool EmittingComments()
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool *needs_base=nullptr)
static bool CanHold(uint32_t immediate, Operand *o)
static word type_arguments_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word OffsetOf(const dart::Field &field)
static word guarded_list_length_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word unboxed_runtime_arg_offset()
static word shared_field_table_values_offset()
static word stack_overflow_shared_stub_entry_point_offset(bool fpu_regs)
Definition: runtime_api.cc:890
static word field_table_values_offset()
static word predefined_symbols_address_offset()
static word stack_overflow_flags_offset()
static word exit_through_ffi_offset()
static word invoke_dart_code_stub_offset()
static word top_exit_frame_info_offset()
#define UNIMPLEMENTED
#define ASSERT(E)
SkBitmap source
Definition: examples.cpp:28
static bool b
#define FATAL(error)
FlutterSemanticsFlag flag
FlutterSemanticsFlag flags
uint8_t value
GAsyncResult * result
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
int argument_count
Definition: fuchsia.cc:52
static float max(float r, float g, float b)
Definition: hsl.cpp:49
static float min(float r, float g, float b)
Definition: hsl.cpp:48
#define R(r)
#define CASE(Arity, Mask, Name, Args, Result)
#define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name)
Definition: il.h:11867
size_t length
def match(bench, filt)
Definition: benchmark.py:23
const intptr_t kResultIndex
Definition: marshaller.h:28
word ToRawSmi(const dart::Object &a)
Definition: runtime_api.cc:960
intptr_t FrameOffsetInBytesForVariable(const LocalVariable *variable)
Definition: runtime_api.h:344
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
static constexpr word kBitsPerWord
Definition: runtime_api.h:291
bool IsSmi(int64_t v)
Definition: runtime_api.cc:31
constexpr word kSmiMax
Definition: runtime_api.h:305
constexpr intptr_t kSmiBits
Definition: runtime_api.h:301
word SmiValue(const dart::Object &a)
Definition: runtime_api.cc:969
FrameLayout frame_layout
Definition: stack_frame.cc:76
bool HasIntegerValue(const dart::Object &object, int64_t *value)
Definition: runtime_api.cc:239
Definition: dart_vm.cc:33
Location LocationAnyOrConstant(Value *value)
Definition: locations.cc:357
Location LocationRegisterOrConstant(Value *value)
Definition: locations.cc:289
const Register kWriteBarrierSlotReg
const Register THR
const char *const name
static DRegister EvenDRegisterOf(QRegister q)
static Condition InvertCondition(Condition c)
static constexpr int kSavedCallerPcSlotFromFp
bool IsTypedDataBaseClassId(intptr_t index)
Definition: class_id.h:429
const Register kExceptionObjectReg
static DRegister OddDRegisterOf(QRegister q)
const DRegister DTMP
const RegList kReservedCpuRegisters
const Register kWriteBarrierObjectReg
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition: constants.h:85
constexpr intptr_t kIntptrMin
Definition: globals.h:556
uint16_t RegList
int32_t classid_t
Definition: globals.h:524
static const ClassId kLastErrorCid
Definition: class_id.h:311
@ kIllegalCid
Definition: class_id.h:214
@ kNullCid
Definition: class_id.h:252
@ kDynamicCid
Definition: class_id.h:253
Representation
Definition: locations.h:66
const FpuRegister FpuTMP
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
static const ClassId kFirstErrorCid
Definition: class_id.h:310
uintptr_t uword
Definition: globals.h:501
const Register CODE_REG
static SRegister OddSRegisterOf(DRegister d)
@ kInvalidCondition
@ UNSIGNED_GREATER_EQUAL
@ NOT_ZERO
@ UNSIGNED_LESS
void RegisterTypeArgumentsUse(const Function &function, TypeUsageInfo *type_usage_info, const Class &klass, Definition *type_arguments)
const Register ARGS_DESC_REG
bool IsClampedTypedDataBaseClassId(intptr_t index)
Definition: class_id.h:461
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
@ kNoRegister
Definition: constants_arm.h:99
Location LocationFixedRegisterOrConstant(Value *value, Register reg)
Definition: locations.cc:339
const int kNumberOfFpuRegisters
Location LocationWritableRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
Definition: locations.cc:322
bool IsExternalPayloadClassId(classid_t cid)
Definition: class_id.h:472
constexpr RegList kDartAvailableCpuRegs
const Register TMP
const Register FPREG
const int kAbiPreservedFpuRegCount
DEFINE_BACKEND(LoadThread,(Register out))
Definition: il.cc:8109
const QRegister QTMP
static constexpr int kPcMarkerSlotFromFp
const Register FUNCTION_REG
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
Definition: locations.cc:365
static bool IsConstant(Definition *def, int64_t *val)
Definition: loops.cc:123
const Register PP
QRegister FpuRegister
constexpr bool FLAG_target_memory_sanitizer
Definition: flags.h:174
const Register kStackTraceObjectReg
static int8_t data[kExtLength]
@ kSmiTagSize
@ kHeapObjectTag
@ kSmiTagMask
@ kSmiTagShift
Location LocationRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
Definition: locations.cc:297
constexpr intptr_t kBitsPerInt64
Definition: globals.h:467
const Register SPREG
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
static SRegister EvenSRegisterOf(DRegister d)
const QRegister kAbiFirstPreservedFpuReg
Definition: __init__.py:1
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
Definition: switches.h:228
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
void Flush(SkSurface *surface)
Definition: GpuTools.h:25
SeparatedVector2 offset
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTempReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kObjectReg
static constexpr Representation NativeRepresentation(Representation rep)
Definition: il.h:8504
intptr_t first_local_from_fp
Definition: frame_layout.h:37
static constexpr intptr_t kBoolVsNullMask
static constexpr intptr_t kBoolValueMask
static constexpr size_t ValueSize(Representation rep)
Definition: locations.h:112
static constexpr bool IsUnboxedInteger(Representation rep)
Definition: locations.h:92
static compiler::OperandSize OperandSize(Representation rep)
Definition: locations.cc:16
static constexpr bool IsUnboxed(Representation rep)
Definition: locations.h:101
static bool IsUnsignedInteger(Representation rep)
Definition: locations.h:126
static Representation RepresentationOfArrayElement(classid_t cid)
Definition: locations.cc:79
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
#define kNegInfinity
Definition: globals.h:66