Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
il_arm64.cc
Go to the documentation of this file.
1// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64.
6#if defined(TARGET_ARCH_ARM64)
7
9
18#include "vm/dart_entry.h"
19#include "vm/instructions.h"
20#include "vm/object_store.h"
21#include "vm/parser.h"
22#include "vm/simulator.h"
23#include "vm/stack_frame.h"
24#include "vm/stub_code.h"
25#include "vm/symbols.h"
27
28#define __ (compiler->assembler())->
29#define Z (compiler->zone())
30
31namespace dart {
32
33// Generic summary for call instructions that have all arguments pushed
34// on the stack and return the result in a fixed register R0 (or V0 if
35// the return type is double).
36LocationSummary* Instruction::MakeCallSummary(Zone* zone,
37 const Instruction* instr,
38 LocationSummary* locs) {
39 ASSERT(locs == nullptr || locs->always_calls());
40 LocationSummary* result =
41 ((locs == nullptr)
42 ? (new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall))
43 : locs);
44 const auto representation = instr->representation();
45 switch (representation) {
46 case kTagged:
47 case kUntagged:
48 case kUnboxedInt64:
49 result->set_out(
51 break;
52 case kPairOfTagged:
53 result->set_out(
58 break;
59 case kUnboxedDouble:
60 result->set_out(
62 break;
63 default:
65 break;
66 }
67 return result;
68}
69
70LocationSummary* LoadIndexedUnsafeInstr::MakeLocationSummary(Zone* zone,
71 bool opt) const {
72 const intptr_t kNumInputs = 1;
73 const intptr_t kNumTemps = ((representation() == kUnboxedDouble) ? 1 : 0);
74 LocationSummary* locs = new (zone)
75 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
76
77 locs->set_in(0, Location::RequiresRegister());
78 switch (representation()) {
79 case kTagged:
80 case kUnboxedInt64:
81 locs->set_out(0, Location::RequiresRegister());
82 break;
83 case kUnboxedDouble:
84 locs->set_temp(0, Location::RequiresRegister());
85 locs->set_out(0, Location::RequiresFpuRegister());
86 break;
87 default:
89 break;
90 }
91 return locs;
92}
93
94void LoadIndexedUnsafeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
95 ASSERT(RequiredInputRepresentation(0) == kTagged); // It is a Smi.
96 ASSERT(kSmiTag == 0);
97 ASSERT(kSmiTagSize == 1);
98
99 const Register index = locs()->in(0).reg();
100
101 switch (representation()) {
102 case kTagged:
103 case kUnboxedInt64: {
104 const auto out = locs()->out(0).reg();
105#if !defined(DART_COMPRESSED_POINTERS)
106 __ add(out, base_reg(), compiler::Operand(index, LSL, 2));
107#else
108 __ add(out, base_reg(), compiler::Operand(index, SXTW, 2));
109#endif
110 __ LoadFromOffset(out, out, offset());
111 break;
112 }
113 case kUnboxedDouble: {
114 const auto tmp = locs()->temp(0).reg();
115 const auto out = locs()->out(0).fpu_reg();
116#if !defined(DART_COMPRESSED_POINTERS)
117 __ add(tmp, base_reg(), compiler::Operand(index, LSL, 2));
118#else
119 __ add(tmp, base_reg(), compiler::Operand(index, SXTW, 2));
120#endif
121 __ LoadDFromOffset(out, tmp, offset());
122 break;
123 }
124 default:
125 UNREACHABLE();
126 break;
127 }
128}
129
130DEFINE_BACKEND(StoreIndexedUnsafe,
131 (NoLocation, Register index, Register value)) {
132 ASSERT(instr->RequiredInputRepresentation(
133 StoreIndexedUnsafeInstr::kIndexPos) == kTagged); // It is a Smi.
134#if !defined(DART_COMPRESSED_POINTERS)
135 __ add(TMP, instr->base_reg(), compiler::Operand(index, LSL, 2));
136#else
137 __ add(TMP, instr->base_reg(), compiler::Operand(index, SXTW, 2));
138#endif
139 __ str(value, compiler::Address(TMP, instr->offset()));
140
141 ASSERT(kSmiTag == 0);
142 ASSERT(kSmiTagSize == 1);
143}
144
145DEFINE_BACKEND(TailCall,
146 (NoLocation,
147 Fixed<Register, ARGS_DESC_REG>,
148 Temp<Register> temp)) {
149 compiler->EmitTailCallToStub(instr->code());
150
151 // Even though the TailCallInstr will be the last instruction in a basic
152 // block, the flow graph compiler will emit native code for other blocks after
153 // the one containing this instruction and needs to be able to use the pool.
154 // (The `LeaveDartFrame` above disables usages of the pool.)
155 __ set_constant_pool_allowed(true);
156}
157
158LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone,
159 bool opt) const {
160 // The compiler must optimize any function that includes a MemoryCopy
161 // instruction that uses typed data cids, since extracting the payload address
162 // from views is done in a compiler pass after all code motion has happened.
163 ASSERT((!IsTypedDataBaseClassId(src_cid_) &&
164 !IsTypedDataBaseClassId(dest_cid_)) ||
165 opt);
166 const intptr_t kNumInputs = 5;
167 const intptr_t kNumTemps = 2;
168 LocationSummary* locs = new (zone)
169 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
170 locs->set_in(kSrcPos, Location::RequiresRegister());
171 locs->set_in(kDestPos, Location::RequiresRegister());
174 locs->set_in(kLengthPos,
176 locs->set_temp(0, Location::RequiresRegister());
177 locs->set_temp(1, Location::RequiresRegister());
178 return locs;
179}
180
182 Register length_reg,
183 compiler::Label* done) {
184 __ BranchIfZero(length_reg, done);
185}
186
187static compiler::OperandSize OperandSizeFor(intptr_t bytes) {
189 switch (bytes) {
190 case 1:
192 case 2:
194 case 4:
196 case 8:
198 default:
199 UNREACHABLE();
201 }
202}
203
204static void CopyUpToMultipleOfChunkSize(FlowGraphCompiler* compiler,
205 Register dest_reg,
206 Register src_reg,
207 Register length_reg,
208 intptr_t element_size,
209 bool unboxed_inputs,
210 bool reversed,
211 intptr_t chunk_size,
212 compiler::Label* done) {
214 if (element_size >= chunk_size) return;
215
216 const intptr_t element_shift = Utils::ShiftForPowerOfTwo(element_size);
217 const intptr_t base_shift =
218 (unboxed_inputs ? 0 : kSmiTagShift) - element_shift;
219 const intptr_t offset_sign = reversed ? -1 : 1;
220 auto const mode =
222 intptr_t tested_bits = 0;
223
224 __ Comment("Copying until region size is a multiple of chunk size");
225
226 for (intptr_t bit = Utils::ShiftForPowerOfTwo(chunk_size) - 1;
227 bit >= element_shift; bit--) {
228 const intptr_t bytes = 1 << bit;
229 const intptr_t tested_bit = bit + base_shift;
230 tested_bits |= (1 << tested_bit);
231 const intptr_t offset = offset_sign * bytes;
232 compiler::Label skip_copy;
233 __ tbz(&skip_copy, length_reg, tested_bit);
234 auto const sz = OperandSizeFor(bytes);
235 __ ldr(TMP, compiler::Address(src_reg, offset, mode), sz);
236 __ str(TMP, compiler::Address(dest_reg, offset, mode), sz);
237 __ Bind(&skip_copy);
238 }
239
240 ASSERT(tested_bits != 0);
241 __ andis(length_reg, length_reg, compiler::Immediate(~tested_bits),
243 __ b(done, ZERO);
244}
245
246void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
247 Register dest_reg,
248 Register src_reg,
249 Register length_reg,
250 compiler::Label* done,
251 compiler::Label* copy_forwards) {
252 const bool reversed = copy_forwards != nullptr;
253 const intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) -
255#if defined(USING_MEMORY_SANITIZER)
256 __ PushPair(length_reg, dest_reg);
257#endif
258 if (reversed) {
259 // Verify that the overlap actually exists by checking to see if
260 // dest_start < src_end.
261 if (!unboxed_inputs()) {
262 __ ExtendNonNegativeSmi(length_reg);
263 }
264 if (shift < 0) {
265 __ add(TMP, src_reg, compiler::Operand(length_reg, ASR, -shift));
266 } else {
267 __ add(TMP, src_reg, compiler::Operand(length_reg, LSL, shift));
268 }
269 __ CompareRegisters(dest_reg, TMP);
270 __ BranchIf(UNSIGNED_GREATER_EQUAL, copy_forwards);
271 // There is overlap, so move TMP to src_reg and adjust dest_reg now.
272 __ MoveRegister(src_reg, TMP);
273 if (shift < 0) {
274 __ add(dest_reg, dest_reg, compiler::Operand(length_reg, ASR, -shift));
275 } else {
276 __ add(dest_reg, dest_reg, compiler::Operand(length_reg, LSL, shift));
277 }
278 }
279 const intptr_t kChunkSize = 16;
280 ASSERT(kChunkSize >= element_size_);
281 CopyUpToMultipleOfChunkSize(compiler, dest_reg, src_reg, length_reg,
282 element_size_, unboxed_inputs_, reversed,
283 kChunkSize, done);
284 // The size of the uncopied region is now a multiple of the chunk size.
285 const intptr_t loop_subtract = (kChunkSize / element_size_)
286 << (unboxed_inputs_ ? 0 : kSmiTagShift);
287 // When reversed, the src and dest registers are adjusted to start with the
288 // end addresses, so apply the negated offset prior to indexing.
289 const intptr_t offset = (reversed ? -1 : 1) * kChunkSize;
290 const auto mode = reversed ? compiler::Address::PairPreIndex
292 __ Comment("Copying chunks at a time");
293 compiler::Label loop;
294 __ Bind(&loop);
295 __ ldp(TMP, TMP2, compiler::Address(src_reg, offset, mode));
296 __ stp(TMP, TMP2, compiler::Address(dest_reg, offset, mode));
297 __ subs(length_reg, length_reg, compiler::Operand(loop_subtract),
299 __ b(&loop, NOT_ZERO);
300
301#if defined(USING_MEMORY_SANITIZER)
302 __ PopPair(length_reg, dest_reg);
303 if (!unboxed_inputs()) {
304 __ ExtendNonNegativeSmi(length_reg);
305 }
306 if (shift < 0) {
307 __ AsrImmediate(length_reg, length_reg, -shift);
308 } else {
309 __ LslImmediate(length_reg, length_reg, shift);
310 }
311 __ MsanUnpoison(dest_reg, length_reg);
312#endif
313}
314
315void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
316 classid_t array_cid,
317 Register array_reg,
318 Register payload_reg,
319 Representation array_rep,
320 Location start_loc) {
321 intptr_t offset = 0;
322 if (array_rep != kTagged) {
323 // Do nothing, array_reg already contains the payload address.
324 } else if (IsTypedDataBaseClassId(array_cid)) {
325 // The incoming array must have been proven to be an internal typed data
326 // object, where the payload is in the object and we can just offset.
327 ASSERT_EQUAL(array_rep, kTagged);
328 offset = compiler::target::TypedData::payload_offset() - kHeapObjectTag;
329 } else {
330 ASSERT_EQUAL(array_rep, kTagged);
331 ASSERT(!IsExternalPayloadClassId(array_cid));
332 switch (array_cid) {
333 case kOneByteStringCid:
334 offset =
335 compiler::target::OneByteString::data_offset() - kHeapObjectTag;
336 break;
337 case kTwoByteStringCid:
338 offset =
339 compiler::target::TwoByteString::data_offset() - kHeapObjectTag;
340 break;
341 default:
342 UNREACHABLE();
343 break;
344 }
345 }
346 ASSERT(start_loc.IsRegister() || start_loc.IsConstant());
347 if (start_loc.IsConstant()) {
348 const auto& constant = start_loc.constant();
349 ASSERT(constant.IsInteger());
350 const int64_t start_value = Integer::Cast(constant).AsInt64Value();
351 const intptr_t add_value = Utils::AddWithWrapAround(
352 Utils::MulWithWrapAround<intptr_t>(start_value, element_size_), offset);
353 __ AddImmediate(payload_reg, array_reg, add_value);
354 return;
355 }
356 const Register start_reg = start_loc.reg();
357 intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) -
359 if (shift < 0) {
360 if (!unboxed_inputs()) {
361 __ ExtendNonNegativeSmi(start_reg);
362 }
363 __ add(payload_reg, array_reg, compiler::Operand(start_reg, ASR, -shift));
364#if defined(DART_COMPRESSED_POINTERS)
365 } else if (!unboxed_inputs()) {
366 __ add(payload_reg, array_reg, compiler::Operand(start_reg, SXTW, shift));
367#endif
368 } else {
369 __ add(payload_reg, array_reg, compiler::Operand(start_reg, LSL, shift));
370 }
371 __ AddImmediate(payload_reg, offset);
372}
373
374LocationSummary* CalculateElementAddressInstr::MakeLocationSummary(
375 Zone* zone,
376 bool opt) const {
377 const intptr_t kNumInputs = 3;
378 const intptr_t kNumTemps = 0;
379 auto* const summary = new (zone)
380 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
381
382 summary->set_in(kBasePos, Location::RequiresRegister());
383 summary->set_in(kIndexPos, Location::RequiresRegister());
384 // Only use a Smi constant for the index if multiplying it by the index
385 // scale would be an int64 constant.
386 const intptr_t scale_shift = Utils::ShiftForPowerOfTwo(index_scale());
388 index(), kMinInt64 >> scale_shift,
389 kMaxInt64 >> scale_shift));
390 // Any possible int64 value is okay as a constant here.
391 summary->set_in(kOffsetPos, LocationRegisterOrConstant(offset()));
392 summary->set_out(0, Location::RequiresRegister());
393
394 return summary;
395}
396
397void CalculateElementAddressInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
398 const Register base_reg = locs()->in(kBasePos).reg();
399 const Location& index_loc = locs()->in(kIndexPos);
400 const Location& offset_loc = locs()->in(kOffsetPos);
401 const Register result_reg = locs()->out(0).reg();
402
403 if (index_loc.IsConstant()) {
404 if (offset_loc.IsConstant()) {
405 ASSERT_EQUAL(Smi::Cast(index_loc.constant()).Value(), 0);
406 ASSERT(Integer::Cast(offset_loc.constant()).AsInt64Value() != 0);
407 // No index involved at all.
408 const int64_t offset_value =
409 Integer::Cast(offset_loc.constant()).AsInt64Value();
410 __ AddImmediate(result_reg, base_reg, offset_value);
411 } else {
412 __ add(result_reg, base_reg, compiler::Operand(offset_loc.reg()));
413 // Don't need wrap-around as the index is constant only if multiplying
414 // it by the scale is an int64.
415 const int64_t scaled_index =
416 Smi::Cast(index_loc.constant()).Value() * index_scale();
417 __ AddImmediate(result_reg, scaled_index);
418 }
419 } else {
420 __ add(result_reg, base_reg,
421 compiler::Operand(index_loc.reg(), LSL,
423 if (offset_loc.IsConstant()) {
424 const int64_t offset_value =
425 Integer::Cast(offset_loc.constant()).AsInt64Value();
426 __ AddImmediate(result_reg, offset_value);
427 } else {
428 __ AddRegisters(result_reg, offset_loc.reg());
429 }
430 }
431}
432
433LocationSummary* MoveArgumentInstr::MakeLocationSummary(Zone* zone,
434 bool opt) const {
435 const intptr_t kNumInputs = 1;
436 const intptr_t kNumTemps = 0;
437 LocationSummary* locs = new (zone)
438 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
439 ConstantInstr* constant = value()->definition()->AsConstant();
440 if (constant != nullptr && constant->HasZeroRepresentation()) {
441 locs->set_in(0, Location::Constant(constant));
442 } else if (representation() == kUnboxedDouble) {
443 locs->set_in(0, Location::RequiresFpuRegister());
444 } else if (representation() == kUnboxedInt64) {
445 locs->set_in(0, Location::RequiresRegister());
446 } else {
447 ASSERT(representation() == kTagged);
448 locs->set_in(0, LocationAnyOrConstant(value()));
449 }
450 return locs;
451}
452
453// Buffers registers in order to use STP to move
454// two registers at once.
455class ArgumentsMover : public ValueObject {
456 public:
457 // Flush all buffered registers.
458 void Flush(FlowGraphCompiler* compiler) {
459 if (pending_register_ != kNoRegister) {
460 __ StoreToOffset(pending_register_, SP,
461 pending_sp_relative_index_ * kWordSize);
462 pending_sp_relative_index_ = -1;
463 pending_register_ = kNoRegister;
464 }
465 }
466
467 // Buffer given register. May push buffered registers if needed.
468 void MoveRegister(FlowGraphCompiler* compiler,
469 intptr_t sp_relative_index,
470 Register reg) {
471 if (pending_register_ != kNoRegister) {
472 ASSERT((sp_relative_index + 1) == pending_sp_relative_index_);
473 __ StorePairToOffset(reg, pending_register_, SP,
474 sp_relative_index * kWordSize);
475 pending_register_ = kNoRegister;
476 return;
477 }
478 pending_register_ = reg;
479 pending_sp_relative_index_ = sp_relative_index;
480 }
481
482 // Returns free temp register to hold argument value.
483 Register GetFreeTempRegister(FlowGraphCompiler* compiler) {
484 CLOBBERS_LR({
485 // While pushing arguments only Push, PushPair, LoadObject and
486 // LoadFromOffset are used. They do not clobber TMP or LR.
487 static_assert(((1 << LR) & kDartAvailableCpuRegs) == 0,
488 "LR should not be allocatable");
489 static_assert(((1 << TMP) & kDartAvailableCpuRegs) == 0,
490 "TMP should not be allocatable");
491 return (pending_register_ == TMP) ? LR : TMP;
492 });
493 }
494
495 private:
496 intptr_t pending_sp_relative_index_ = -1;
497 Register pending_register_ = kNoRegister;
498};
499
500void MoveArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
501 ASSERT(compiler->is_optimizing());
502
503 if (previous()->IsMoveArgument()) {
504 // Already generated by the first MoveArgument in the chain.
505 return;
506 }
507
508 ArgumentsMover pusher;
509 for (MoveArgumentInstr* move_arg = this; move_arg != nullptr;
510 move_arg = move_arg->next()->AsMoveArgument()) {
511 const Location value = move_arg->locs()->in(0);
512 Register reg = kNoRegister;
513 if (value.IsRegister()) {
514 reg = value.reg();
515 } else if (value.IsConstant()) {
516 if (value.constant_instruction()->HasZeroRepresentation()) {
517 reg = ZR;
518 } else {
519 ASSERT(move_arg->representation() == kTagged);
520 const Object& constant = value.constant();
521 if (constant.IsNull()) {
522 reg = NULL_REG;
523 } else {
524 reg = pusher.GetFreeTempRegister(compiler);
525 __ LoadObject(reg, value.constant());
526 }
527 }
528 } else if (value.IsFpuRegister()) {
529 pusher.Flush(compiler);
530 __ StoreDToOffset(value.fpu_reg(), SP,
531 move_arg->location().stack_index() * kWordSize);
532 continue;
533 } else {
534 ASSERT(value.IsStackSlot());
535 const intptr_t value_offset = value.ToStackSlotOffset();
536 reg = pusher.GetFreeTempRegister(compiler);
537 __ LoadFromOffset(reg, value.base_reg(), value_offset);
538 }
539 pusher.MoveRegister(compiler, move_arg->location().stack_index(), reg);
540 }
541 pusher.Flush(compiler);
542}
543
544LocationSummary* DartReturnInstr::MakeLocationSummary(Zone* zone,
545 bool opt) const {
546 const intptr_t kNumInputs = 1;
547 const intptr_t kNumTemps = 0;
548 LocationSummary* locs = new (zone)
549 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
550 switch (representation()) {
551 case kTagged:
552 case kUnboxedInt64:
553 locs->set_in(0,
555 break;
556 case kPairOfTagged:
557 locs->set_in(
562 break;
563 case kUnboxedDouble:
564 locs->set_in(
566 break;
567 default:
568 UNREACHABLE();
569 break;
570 }
571 return locs;
572}
573
574// Attempt optimized compilation at return instruction instead of at the entry.
575// The entry needs to be patchable, no inlined objects are allowed in the area
576// that will be overwritten by the patch instructions: a branch macro sequence.
577void DartReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
578 if (locs()->in(0).IsRegister()) {
579 const Register result = locs()->in(0).reg();
581 } else if (locs()->in(0).IsPairLocation()) {
582 const Register result_lo = locs()->in(0).AsPairLocation()->At(0).reg();
583 const Register result_hi = locs()->in(0).AsPairLocation()->At(1).reg();
586 } else {
587 ASSERT(locs()->in(0).IsFpuRegister());
588 const FpuRegister result = locs()->in(0).fpu_reg();
590 }
591
592 if (compiler->parsed_function().function().IsAsyncFunction() ||
593 compiler->parsed_function().function().IsAsyncGenerator()) {
594 ASSERT(compiler->flow_graph().graph_entry()->NeedsFrame());
595 const Code& stub = GetReturnStub(compiler);
596 compiler->EmitJumpToStub(stub);
597 return;
598 }
599
600 if (!compiler->flow_graph().graph_entry()->NeedsFrame()) {
601 __ ret();
602 return;
603 }
604
605#if defined(DEBUG)
606 compiler::Label stack_ok;
607 __ Comment("Stack Check");
608 const intptr_t fp_sp_dist =
609 (compiler::target::frame_layout.first_local_from_fp + 1 -
610 compiler->StackSize()) *
611 kWordSize;
612 ASSERT(fp_sp_dist <= 0);
613 __ sub(R2, SP, compiler::Operand(FP));
614 __ CompareImmediate(R2, fp_sp_dist);
615 __ b(&stack_ok, EQ);
616 __ brk(0);
617 __ Bind(&stack_ok);
618#endif
619 ASSERT(__ constant_pool_allowed());
620 __ LeaveDartFrame(); // Disallows constant pool use.
621 __ ret();
622 // This DartReturnInstr may be emitted out of order by the optimizer. The next
623 // block may be a target expecting a properly set constant pool pointer.
624 __ set_constant_pool_allowed(true);
625}
626
627// Detect pattern when one value is zero and another is a power of 2.
628static bool IsPowerOfTwoKind(intptr_t v1, intptr_t v2) {
629 return (Utils::IsPowerOfTwo(v1) && (v2 == 0)) ||
630 (Utils::IsPowerOfTwo(v2) && (v1 == 0));
631}
632
633LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone,
634 bool opt) const {
636 return comparison()->locs();
637}
638
639void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
640 const Register result = locs()->out(0).reg();
641
642 Location left = locs()->in(0);
643 Location right = locs()->in(1);
644 ASSERT(!left.IsConstant() || !right.IsConstant());
645
646 // Emit comparison code. This must not overwrite the result register.
647 // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using
648 // the labels or returning an invalid condition.
649 BranchLabels labels = {nullptr, nullptr, nullptr};
650 Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
651 ASSERT(true_condition != kInvalidCondition);
652
653 const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_);
654
655 intptr_t true_value = if_true_;
656 intptr_t false_value = if_false_;
657
658 if (is_power_of_two_kind) {
659 if (true_value == 0) {
660 // We need to have zero in result on true_condition.
661 true_condition = InvertCondition(true_condition);
662 }
663 } else {
664 if (true_value == 0) {
665 // Swap values so that false_value is zero.
666 intptr_t temp = true_value;
667 true_value = false_value;
668 false_value = temp;
669 } else {
670 true_condition = InvertCondition(true_condition);
671 }
672 }
673
674 __ cset(result, true_condition);
675
676 if (is_power_of_two_kind) {
677 const intptr_t shift =
678 Utils::ShiftForPowerOfTwo(Utils::Maximum(true_value, false_value));
679 __ LslImmediate(result, result, shift + kSmiTagSize);
680 } else {
681 __ sub(result, result, compiler::Operand(1));
682 const int64_t val = Smi::RawValue(true_value) - Smi::RawValue(false_value);
683 __ AndImmediate(result, result, val);
684 if (false_value != 0) {
685 __ AddImmediate(result, Smi::RawValue(false_value));
686 }
687 }
688}
689
690LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone,
691 bool opt) const {
692 const intptr_t kNumInputs = 1;
693 const intptr_t kNumTemps = 0;
694 LocationSummary* summary = new (zone)
695 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
696 summary->set_in(
697 0, Location::RegisterLocation(FLAG_precompiled_mode ? R0 : FUNCTION_REG));
698 return MakeCallSummary(zone, this, summary);
699}
700
701void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
702 // Load arguments descriptor in ARGS_DESC_REG.
703 const intptr_t argument_count = ArgumentCount(); // Includes type args.
704 const Array& arguments_descriptor =
706 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
707
708 if (FLAG_precompiled_mode) {
709 ASSERT(locs()->in(0).reg() == R0);
710 // R0: Closure with a cached entry point.
711 __ LoadFieldFromOffset(R2, R0,
712 compiler::target::Closure::entry_point_offset());
713 } else {
714 ASSERT(locs()->in(0).reg() == FUNCTION_REG);
715 // FUNCTION_REG: Function.
716 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
717 compiler::target::Function::code_offset());
718 // Closure functions only have one entry point.
719 __ LoadFieldFromOffset(R2, FUNCTION_REG,
720 compiler::target::Function::entry_point_offset());
721 }
722
723 // ARGS_DESC_REG: Arguments descriptor array.
724 // R2: instructions entry point.
725 if (!FLAG_precompiled_mode) {
726 // R5: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
727 __ LoadImmediate(IC_DATA_REG, 0);
728 }
729 __ blr(R2);
730 compiler->EmitCallsiteMetadata(source(), deopt_id(),
731 UntaggedPcDescriptors::kOther, locs(), env());
732 compiler->EmitDropArguments(argument_count);
733}
734
735LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone,
736 bool opt) const {
739}
740
741void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
742 const Register result = locs()->out(0).reg();
743 __ LoadFromOffset(result, FP,
744 compiler::target::FrameOffsetInBytesForVariable(&local()));
745}
746
747LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone,
748 bool opt) const {
751}
752
753void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
754 const Register value = locs()->in(0).reg();
755 const Register result = locs()->out(0).reg();
756 ASSERT(result == value); // Assert that register assignment is correct.
757 __ StoreToOffset(value, FP,
758 compiler::target::FrameOffsetInBytesForVariable(&local()));
759}
760
761LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
762 bool opt) const {
765}
766
767void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
768 // The register allocator drops constant definitions that have no uses.
769 if (!locs()->out(0).IsInvalid()) {
770 const Register result = locs()->out(0).reg();
771 __ LoadObject(result, value());
772 }
773}
774
775void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
776 const Location& destination,
777 Register tmp,
778 intptr_t pair_index) {
779 ASSERT(pair_index == 0); // No pair representation needed on 64-bit.
780 if (destination.IsRegister()) {
781 if (representation() == kUnboxedInt32 ||
782 representation() == kUnboxedUint32 ||
783 representation() == kUnboxedInt64) {
784 const int64_t value = Integer::Cast(value_).AsInt64Value();
785 __ LoadImmediate(destination.reg(), value);
786 } else {
787 ASSERT(representation() == kTagged);
788 __ LoadObject(destination.reg(), value_);
789 }
790 } else if (destination.IsFpuRegister()) {
791 switch (representation()) {
792 case kUnboxedFloat:
793 __ LoadSImmediate(destination.fpu_reg(), Double::Cast(value_).value());
794 break;
795 case kUnboxedDouble:
796 __ LoadDImmediate(destination.fpu_reg(), Double::Cast(value_).value());
797 break;
798 case kUnboxedFloat64x2:
799 __ LoadQImmediate(destination.fpu_reg(),
800 Float64x2::Cast(value_).value());
801 break;
802 case kUnboxedFloat32x4:
803 __ LoadQImmediate(destination.fpu_reg(),
804 Float32x4::Cast(value_).value());
805 break;
806 case kUnboxedInt32x4:
807 __ LoadQImmediate(destination.fpu_reg(), Int32x4::Cast(value_).value());
808 break;
809 default:
810 UNREACHABLE();
811 }
812 } else if (destination.IsDoubleStackSlot()) {
813 ASSERT(representation() == kUnboxedDouble);
814 __ LoadDImmediate(VTMP, Double::Cast(value_).value());
815 const intptr_t dest_offset = destination.ToStackSlotOffset();
816 __ StoreDToOffset(VTMP, destination.base_reg(), dest_offset);
817 } else if (destination.IsQuadStackSlot()) {
818 switch (representation()) {
819 case kUnboxedFloat64x2:
820 __ LoadQImmediate(VTMP, Float64x2::Cast(value_).value());
821 break;
822 case kUnboxedFloat32x4:
823 __ LoadQImmediate(VTMP, Float32x4::Cast(value_).value());
824 break;
825 case kUnboxedInt32x4:
826 __ LoadQImmediate(VTMP, Int32x4::Cast(value_).value());
827 break;
828 default:
829 UNREACHABLE();
830 }
831 } else {
832 ASSERT(destination.IsStackSlot());
833 ASSERT(tmp != kNoRegister);
834 const intptr_t dest_offset = destination.ToStackSlotOffset();
836 if (representation() == kUnboxedInt32 ||
837 representation() == kUnboxedUint32 ||
838 representation() == kUnboxedInt64) {
839 const int64_t value = Integer::Cast(value_).AsInt64Value();
840 if (value == 0) {
841 tmp = ZR;
842 } else {
843 __ LoadImmediate(tmp, value);
844 }
845 } else if (representation() == kUnboxedFloat) {
846 int32_t float_bits =
847 bit_cast<int32_t, float>(Double::Cast(value_).value());
848 __ LoadImmediate(tmp, float_bits);
849 operand_size = compiler::kFourBytes;
850 } else {
851 ASSERT(representation() == kTagged);
852 if (value_.IsNull()) {
853 tmp = NULL_REG;
854 } else if (value_.IsSmi() && Smi::Cast(value_).Value() == 0) {
855 tmp = ZR;
856 } else {
857 __ LoadObject(tmp, value_);
858 }
859 }
860 __ StoreToOffset(tmp, destination.base_reg(), dest_offset, operand_size);
861 }
862}
863
864LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone,
865 bool opt) const {
866 const bool is_unboxed_int =
869 compiler::target::kWordSize);
870 const intptr_t kNumInputs = 0;
871 const intptr_t kNumTemps = is_unboxed_int ? 0 : 1;
872 LocationSummary* locs = new (zone)
873 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
874 if (is_unboxed_int) {
875 locs->set_out(0, Location::RequiresRegister());
876 } else {
877 switch (representation()) {
878 case kUnboxedDouble:
879 locs->set_out(0, Location::RequiresFpuRegister());
880 locs->set_temp(0, Location::RequiresRegister());
881 break;
882 default:
883 UNREACHABLE();
884 break;
885 }
886 }
887 return locs;
888}
889
890void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
891 if (!locs()->out(0).IsInvalid()) {
892 const Register scratch =
895 : locs()->temp(0).reg();
896 EmitMoveToLocation(compiler, locs()->out(0), scratch);
897 }
898}
899
900LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone,
901 bool opt) const {
902 auto const dst_type_loc =
904
905 // We want to prevent spilling of the inputs (e.g. function/instantiator tav),
906 // since TTS preserves them. So we make this a `kNoCall` summary,
907 // even though most other registers can be modified by the stub. To tell the
908 // register allocator about it, we reserve all the other registers as
909 // temporary registers.
910 // TODO(http://dartbug.com/32788): Simplify this.
911
912 const intptr_t kNonChangeableInputRegs =
914 ((dst_type_loc.IsRegister() ? 1 : 0) << TypeTestABI::kDstTypeReg) |
915 (1 << TypeTestABI::kInstantiatorTypeArgumentsReg) |
916 (1 << TypeTestABI::kFunctionTypeArgumentsReg);
917
918 const intptr_t kNumInputs = 4;
919
920 // We invoke a stub that can potentially clobber any CPU register
921 // but can only clobber FPU registers on the slow path when
922 // entering runtime. ARM64 ABI only guarantees that lower
923 // 64-bits of an V registers are preserved so we block all
924 // of them except for FpuTMP.
925 const intptr_t kCpuRegistersToPreserve =
926 kDartAvailableCpuRegs & ~kNonChangeableInputRegs;
927 const intptr_t kFpuRegistersToPreserve =
928 Utils::NBitMask<intptr_t>(kNumberOfFpuRegisters) & ~(1l << FpuTMP);
929
930 const intptr_t kNumTemps = (Utils::CountOneBits64(kCpuRegistersToPreserve) +
931 Utils::CountOneBits64(kFpuRegistersToPreserve));
932
933 LocationSummary* summary = new (zone) LocationSummary(
935 summary->set_in(kInstancePos,
937 summary->set_in(kDstTypePos, dst_type_loc);
938 summary->set_in(
943 summary->set_out(0, Location::SameAsFirstInput());
944
945 // Let's reserve all registers except for the input ones.
946 intptr_t next_temp = 0;
947 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
948 const bool should_preserve = ((1 << i) & kCpuRegistersToPreserve) != 0;
949 if (should_preserve) {
950 summary->set_temp(next_temp++,
951 Location::RegisterLocation(static_cast<Register>(i)));
952 }
953 }
954
955 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
956 const bool should_preserve = ((1l << i) & kFpuRegistersToPreserve) != 0;
957 if (should_preserve) {
958 summary->set_temp(next_temp++, Location::FpuRegisterLocation(
959 static_cast<FpuRegister>(i)));
960 }
961 }
962
963 return summary;
964}
965
966void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
967 ASSERT(locs()->always_calls());
968
969 auto object_store = compiler->isolate_group()->object_store();
970 const auto& assert_boolean_stub =
971 Code::ZoneHandle(compiler->zone(), object_store->assert_boolean_stub());
972
973 compiler::Label done;
975 compiler->GenerateStubCall(source(), assert_boolean_stub,
976 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
977 deopt_id(), env());
978 __ Bind(&done);
979}
980
981static Condition TokenKindToIntCondition(Token::Kind kind) {
982 switch (kind) {
983 case Token::kEQ:
984 return EQ;
985 case Token::kNE:
986 return NE;
987 case Token::kLT:
988 return LT;
989 case Token::kGT:
990 return GT;
991 case Token::kLTE:
992 return LE;
993 case Token::kGTE:
994 return GE;
995 default:
996 UNREACHABLE();
997 return VS;
998 }
999}
1000
1001static Condition FlipCondition(Condition condition) {
1002 switch (condition) {
1003 case EQ:
1004 return EQ;
1005 case NE:
1006 return NE;
1007 case LT:
1008 return GT;
1009 case LE:
1010 return GE;
1011 case GT:
1012 return LT;
1013 case GE:
1014 return LE;
1015 case CC:
1016 return HI;
1017 case LS:
1018 return CS;
1019 case HI:
1020 return CC;
1021 case CS:
1022 return LS;
1023 default:
1024 UNREACHABLE();
1025 return EQ;
1026 }
1027}
1028
1029static void EmitBranchOnCondition(FlowGraphCompiler* compiler,
1030 Condition true_condition,
1031 BranchLabels labels) {
1032 if (labels.fall_through == labels.false_label) {
1033 // If the next block is the false successor we will fall through to it.
1034 __ b(labels.true_label, true_condition);
1035 } else {
1036 // If the next block is not the false successor we will branch to it.
1037 Condition false_condition = InvertCondition(true_condition);
1038 __ b(labels.false_label, false_condition);
1039
1040 // Fall through or jump to the true successor.
1041 if (labels.fall_through != labels.true_label) {
1042 __ b(labels.true_label);
1043 }
1044 }
1045}
1046
1047static bool AreLabelsNull(BranchLabels labels) {
1048 return (labels.true_label == nullptr && labels.false_label == nullptr &&
1049 labels.fall_through == nullptr);
1050}
1051
1052static bool CanUseCbzTbzForComparison(FlowGraphCompiler* compiler,
1053 Register rn,
1054 Condition cond,
1055 BranchLabels labels) {
1056 return !AreLabelsNull(labels) && __ CanGenerateCbzTbz(rn, cond);
1057}
1058
1059static void EmitCbzTbz(Register reg,
1060 FlowGraphCompiler* compiler,
1061 Condition true_condition,
1062 BranchLabels labels,
1064 ASSERT(CanUseCbzTbzForComparison(compiler, reg, true_condition, labels));
1065 if (labels.fall_through == labels.false_label) {
1066 // If the next block is the false successor we will fall through to it.
1067 __ GenerateCbzTbz(reg, true_condition, labels.true_label, sz);
1068 } else {
1069 // If the next block is not the false successor we will branch to it.
1070 Condition false_condition = InvertCondition(true_condition);
1071 __ GenerateCbzTbz(reg, false_condition, labels.false_label, sz);
1072
1073 // Fall through or jump to the true successor.
1074 if (labels.fall_through != labels.true_label) {
1075 __ b(labels.true_label);
1076 }
1077 }
1078}
1079
1080static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler,
1081 LocationSummary* locs,
1082 Token::Kind kind,
1083 BranchLabels labels) {
1084 Location left = locs->in(0);
1085 Location right = locs->in(1);
1086 ASSERT(!left.IsConstant() || !right.IsConstant());
1087
1088 Condition true_condition = TokenKindToIntCondition(kind);
1089 if (left.IsConstant() || right.IsConstant()) {
1090 // Ensure constant is on the right.
1091 ConstantInstr* constant = nullptr;
1092 if (left.IsConstant()) {
1093 constant = left.constant_instruction();
1094 Location tmp = right;
1095 right = left;
1096 left = tmp;
1097 true_condition = FlipCondition(true_condition);
1098 } else {
1099 constant = right.constant_instruction();
1100 }
1101
1102 ASSERT(constant->representation() == kTagged);
1103 int64_t value;
1104 if (compiler::HasIntegerValue(constant->value(), &value) && (value == 0) &&
1105 CanUseCbzTbzForComparison(compiler, left.reg(), true_condition,
1106 labels)) {
1107 EmitCbzTbz(left.reg(), compiler, true_condition, labels,
1109 return kInvalidCondition;
1110 }
1111 __ CompareObject(left.reg(), right.constant());
1112 } else {
1113 __ CompareObjectRegisters(left.reg(), right.reg());
1114 }
1115 return true_condition;
1116}
1117
1118// Similar to ComparisonInstr::EmitComparisonCode, may either:
1119// - emit comparison code and return a valid condition in which case the
1120// caller is expected to emit a branch to the true label based on that
1121// condition (or a branch to the false label on the opposite condition).
1122// - emit comparison code with a branch directly to the labels and return
1123// kInvalidCondition.
1124static Condition EmitInt64ComparisonOp(FlowGraphCompiler* compiler,
1125 LocationSummary* locs,
1126 Token::Kind kind,
1127 BranchLabels labels) {
1128 Location left = locs->in(0);
1129 Location right = locs->in(1);
1130 ASSERT(!left.IsConstant() || !right.IsConstant());
1131
1132 Condition true_condition = TokenKindToIntCondition(kind);
1133 if (left.IsConstant() || right.IsConstant()) {
1134 // Ensure constant is on the right.
1135 ConstantInstr* constant = nullptr;
1136 if (left.IsConstant()) {
1137 constant = left.constant_instruction();
1138 Location tmp = right;
1139 right = left;
1140 left = tmp;
1141 true_condition = FlipCondition(true_condition);
1142 } else {
1143 constant = right.constant_instruction();
1144 }
1145
1146 if (RepresentationUtils::IsUnboxedInteger(constant->representation())) {
1147 int64_t value;
1148 const bool ok = compiler::HasIntegerValue(constant->value(), &value);
1150 if (value == 0 && CanUseCbzTbzForComparison(compiler, left.reg(),
1151 true_condition, labels)) {
1152 EmitCbzTbz(left.reg(), compiler, true_condition, labels,
1154 return kInvalidCondition;
1155 }
1156 __ CompareImmediate(left.reg(), value);
1157 } else {
1158 UNREACHABLE();
1159 }
1160 } else {
1161 __ CompareRegisters(left.reg(), right.reg());
1162 }
1163 return true_condition;
1164}
1165
1166static Condition EmitNullAwareInt64ComparisonOp(FlowGraphCompiler* compiler,
1167 LocationSummary* locs,
1168 Token::Kind kind,
1169 BranchLabels labels) {
1170 ASSERT((kind == Token::kEQ) || (kind == Token::kNE));
1171 const Register left = locs->in(0).reg();
1172 const Register right = locs->in(1).reg();
1173 const Condition true_condition = TokenKindToIntCondition(kind);
1174 compiler::Label* equal_result =
1175 (true_condition == EQ) ? labels.true_label : labels.false_label;
1176 compiler::Label* not_equal_result =
1177 (true_condition == EQ) ? labels.false_label : labels.true_label;
1178
1179 // Check if operands have the same value. If they don't, then they could
1180 // be equal only if both of them are Mints with the same value.
1181 __ CompareObjectRegisters(left, right);
1182 __ b(equal_result, EQ);
1183 __ and_(TMP, left, compiler::Operand(right), compiler::kObjectBytes);
1184 __ BranchIfSmi(TMP, not_equal_result);
1185 __ CompareClassId(left, kMintCid);
1186 __ b(not_equal_result, NE);
1187 __ CompareClassId(right, kMintCid);
1188 __ b(not_equal_result, NE);
1189 __ LoadFieldFromOffset(TMP, left, Mint::value_offset());
1190 __ LoadFieldFromOffset(TMP2, right, Mint::value_offset());
1191 __ CompareRegisters(TMP, TMP2);
1192 return true_condition;
1193}
1194
1195LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone,
1196 bool opt) const {
1197 const intptr_t kNumInputs = 2;
1198 if (operation_cid() == kDoubleCid) {
1199 const intptr_t kNumTemps = 0;
1200 LocationSummary* locs = new (zone)
1201 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1202 locs->set_in(0, Location::RequiresFpuRegister());
1203 locs->set_in(1, Location::RequiresFpuRegister());
1204 locs->set_out(0, Location::RequiresRegister());
1205 return locs;
1206 }
1207 if (operation_cid() == kSmiCid || operation_cid() == kMintCid ||
1208 operation_cid() == kIntegerCid) {
1209 const intptr_t kNumTemps = 0;
1210 LocationSummary* locs = new (zone)
1211 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1212 if (is_null_aware()) {
1213 locs->set_in(0, Location::RequiresRegister());
1214 locs->set_in(1, Location::RequiresRegister());
1215 } else {
1216 locs->set_in(0, LocationRegisterOrConstant(left()));
1217 // Only one input can be a constant operand. The case of two constant
1218 // operands should be handled by constant propagation.
1219 // Only right can be a stack slot.
1220 locs->set_in(1, locs->in(0).IsConstant()
1223 }
1224 locs->set_out(0, Location::RequiresRegister());
1225 return locs;
1226 }
1227 UNREACHABLE();
1228 return nullptr;
1229}
1230
1231static Condition TokenKindToDoubleCondition(Token::Kind kind) {
1232 switch (kind) {
1233 case Token::kEQ:
1234 return EQ;
1235 case Token::kNE:
1236 return NE;
1237 case Token::kLT:
1238 return LT;
1239 case Token::kGT:
1240 return GT;
1241 case Token::kLTE:
1242 return LE;
1243 case Token::kGTE:
1244 return GE;
1245 default:
1246 UNREACHABLE();
1247 return VS;
1248 }
1249}
1250
1251static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
1252 LocationSummary* locs,
1253 BranchLabels labels,
1254 Token::Kind kind) {
1255 const VRegister left = locs->in(0).fpu_reg();
1256 const VRegister right = locs->in(1).fpu_reg();
1257
1258 switch (kind) {
1259 case Token::kEQ:
1260 __ fcmpd(left, right);
1261 return EQ;
1262 case Token::kNE:
1263 __ fcmpd(left, right);
1264 return NE;
1265 case Token::kLT:
1266 __ fcmpd(right, left); // Flip to handle NaN.
1267 return GT;
1268 case Token::kGT:
1269 __ fcmpd(left, right);
1270 return GT;
1271 case Token::kLTE:
1272 __ fcmpd(right, left); // Flip to handle NaN.
1273 return GE;
1274 case Token::kGTE:
1275 __ fcmpd(left, right);
1276 return GE;
1277 default:
1278 UNREACHABLE();
1279 return VS;
1280 }
1281}
1282
1283Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1284 BranchLabels labels) {
1285 if (is_null_aware()) {
1286 ASSERT(operation_cid() == kMintCid);
1287 return EmitNullAwareInt64ComparisonOp(compiler, locs(), kind(), labels);
1288 }
1289 if (operation_cid() == kSmiCid) {
1290 return EmitSmiComparisonOp(compiler, locs(), kind(), labels);
1291 } else if (operation_cid() == kMintCid || operation_cid() == kIntegerCid) {
1292 return EmitInt64ComparisonOp(compiler, locs(), kind(), labels);
1293 } else {
1294 ASSERT(operation_cid() == kDoubleCid);
1295 return EmitDoubleComparisonOp(compiler, locs(), labels, kind());
1296 }
1297}
1298
1299LocationSummary* TestSmiInstr::MakeLocationSummary(Zone* zone, bool opt) const {
1300 const intptr_t kNumInputs = 2;
1301 const intptr_t kNumTemps = 0;
1302 LocationSummary* locs = new (zone)
1303 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1304 locs->set_in(0, Location::RequiresRegister());
1305 // Only one input can be a constant operand. The case of two constant
1306 // operands should be handled by constant propagation.
1307 locs->set_in(1, LocationRegisterOrConstant(right()));
1308 return locs;
1309}
1310
1311Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1312 BranchLabels labels) {
1313 const Register left = locs()->in(0).reg();
1314 Location right = locs()->in(1);
1315 if (right.IsConstant()) {
1316 ASSERT(right.constant().IsSmi());
1317 const int64_t imm = Smi::RawValue(Smi::Cast(right.constant()).Value());
1318 __ TestImmediate(left, imm, compiler::kObjectBytes);
1319 } else {
1320 __ tst(left, compiler::Operand(right.reg()), compiler::kObjectBytes);
1321 }
1322 Condition true_condition = (kind() == Token::kNE) ? NE : EQ;
1323 return true_condition;
1324}
1325
1326LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone,
1327 bool opt) const {
1328 const intptr_t kNumInputs = 1;
1329 const intptr_t kNumTemps = 1;
1330 LocationSummary* locs = new (zone)
1331 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1332 locs->set_in(0, Location::RequiresRegister());
1333 locs->set_temp(0, Location::RequiresRegister());
1334 locs->set_out(0, Location::RequiresRegister());
1335 return locs;
1336}
1337
1338Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1339 BranchLabels labels) {
1340 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
1341 const Register val_reg = locs()->in(0).reg();
1342 const Register cid_reg = locs()->temp(0).reg();
1343
1344 compiler::Label* deopt =
1345 CanDeoptimize()
1346 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids)
1347 : nullptr;
1348
1349 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
1350 const ZoneGrowableArray<intptr_t>& data = cid_results();
1351 ASSERT(data[0] == kSmiCid);
1352 bool result = data[1] == true_result;
1353 __ BranchIfSmi(val_reg, result ? labels.true_label : labels.false_label);
1354 __ LoadClassId(cid_reg, val_reg);
1355
1356 for (intptr_t i = 2; i < data.length(); i += 2) {
1357 const intptr_t test_cid = data[i];
1358 ASSERT(test_cid != kSmiCid);
1359 result = data[i + 1] == true_result;
1360 __ CompareImmediate(cid_reg, test_cid);
1361 __ b(result ? labels.true_label : labels.false_label, EQ);
1362 }
1363 // No match found, deoptimize or default action.
1364 if (deopt == nullptr) {
1365 // If the cid is not in the list, jump to the opposite label from the cids
1366 // that are in the list. These must be all the same (see asserts in the
1367 // constructor).
1368 compiler::Label* target = result ? labels.false_label : labels.true_label;
1369 if (target != labels.fall_through) {
1370 __ b(target);
1371 }
1372 } else {
1373 __ b(deopt);
1374 }
1375 // Dummy result as this method already did the jump, there's no need
1376 // for the caller to branch on a condition.
1377 return kInvalidCondition;
1378}
1379
1380LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone,
1381 bool opt) const {
1382 const intptr_t kNumInputs = 2;
1383 const intptr_t kNumTemps = 0;
1384 if (operation_cid() == kDoubleCid) {
1385 LocationSummary* summary = new (zone)
1386 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1387 summary->set_in(0, Location::RequiresFpuRegister());
1388 summary->set_in(1, Location::RequiresFpuRegister());
1389 summary->set_out(0, Location::RequiresRegister());
1390 return summary;
1391 }
1392 if (operation_cid() == kSmiCid || operation_cid() == kMintCid) {
1393 LocationSummary* summary = new (zone)
1394 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1395 summary->set_in(0, LocationRegisterOrConstant(left()));
1396 // Only one input can be a constant operand. The case of two constant
1397 // operands should be handled by constant propagation.
1398 summary->set_in(1, summary->in(0).IsConstant()
1401 summary->set_out(0, Location::RequiresRegister());
1402 return summary;
1403 }
1404
1405 UNREACHABLE();
1406 return nullptr;
1407}
1408
1409Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1410 BranchLabels labels) {
1411 if (operation_cid() == kSmiCid) {
1412 return EmitSmiComparisonOp(compiler, locs(), kind(), labels);
1413 } else if (operation_cid() == kMintCid) {
1414 return EmitInt64ComparisonOp(compiler, locs(), kind(), labels);
1415 } else {
1416 ASSERT(operation_cid() == kDoubleCid);
1417 return EmitDoubleComparisonOp(compiler, locs(), labels, kind());
1418 }
1419}
1420
1421void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1422 SetupNative();
1423 const Register result = locs()->out(0).reg();
1424
1425 // Pass a pointer to the first argument in R2.
1426 __ AddImmediate(R2, SP, (ArgumentCount() - 1) * kWordSize);
1427
1428 // Compute the effective address. When running under the simulator,
1429 // this is a redirection address that forces the simulator to call
1430 // into the runtime system.
1431 uword entry;
1432 const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function());
1433 const Code* stub;
1434 if (link_lazily()) {
1435 stub = &StubCode::CallBootstrapNative();
1437 } else {
1438 entry = reinterpret_cast<uword>(native_c_function());
1439 if (is_bootstrap_native()) {
1440 stub = &StubCode::CallBootstrapNative();
1441 } else if (is_auto_scope()) {
1442 stub = &StubCode::CallAutoScopeNative();
1443 } else {
1444 stub = &StubCode::CallNoScopeNative();
1445 }
1446 }
1447 __ LoadImmediate(R1, argc_tag);
1448 compiler::ExternalLabel label(entry);
1449 __ LoadNativeEntry(R5, &label,
1450 link_lazily() ? ObjectPool::Patchability::kPatchable
1451 : ObjectPool::Patchability::kNotPatchable);
1452 if (link_lazily()) {
1453 compiler->GeneratePatchableCall(
1454 source(), *stub, UntaggedPcDescriptors::kOther, locs(),
1456 } else {
1457 // We can never lazy-deopt here because natives are never optimized.
1458 ASSERT(!compiler->is_optimizing());
1459 compiler->GenerateNonLazyDeoptableStubCall(
1460 source(), *stub, UntaggedPcDescriptors::kOther, locs(),
1462 }
1463 __ LoadFromOffset(result, SP, 0);
1464
1465 compiler->EmitDropArguments(ArgumentCount()); // Drop the arguments.
1466}
1467
1468#define R(r) (1 << r)
1469
1470LocationSummary* FfiCallInstr::MakeLocationSummary(Zone* zone,
1471 bool is_optimizing) const {
1472 return MakeLocationSummaryInternal(
1473 zone, is_optimizing,
1476}
1477
1478#undef R
1479
1480void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1481 const Register branch = locs()->in(TargetAddressIndex()).reg();
1482
1483 // The temps are indexed according to their register number.
1484 const Register temp1 = locs()->temp(0).reg();
1485 const Register temp2 = locs()->temp(1).reg();
1486 // For regular calls, this holds the FP for rebasing the original locations
1487 // during EmitParamMoves.
1488 // For leaf calls, this holds the SP used to restore the pre-aligned SP after
1489 // the call.
1490 const Register saved_fp_or_sp = locs()->temp(2).reg();
1491 const Register temp_csp = locs()->temp(3).reg();
1492
1493 // Ensure these are callee-saved register and are preserved across the call.
1494 ASSERT(IsCalleeSavedRegister(saved_fp_or_sp));
1495 ASSERT(IsCalleeSavedRegister(temp_csp));
1496 // Other temps don't need to be preserved.
1497
1498 __ mov(saved_fp_or_sp, is_leaf_ ? SPREG : FPREG);
1499
1500 if (!is_leaf_) {
1501 // We need to create a dummy "exit frame". It will share the same pool
1502 // pointer but have a null code object.
1503 __ LoadObject(CODE_REG, Object::null_object());
1504 __ set_constant_pool_allowed(false);
1505 __ EnterDartFrame(0, PP);
1506 }
1507
1508 // Reserve space for the arguments that go on the stack (if any), then align.
1509 intptr_t stack_space = marshaller_.RequiredStackSpaceInBytes();
1510 __ ReserveAlignedFrameSpace(stack_space);
1511#if defined(USING_MEMORY_SANITIZER)
1512 {
1513 RegisterSet kVolatileRegisterSet(kAbiVolatileCpuRegs & ~(1 << SP),
1515 __ mov(temp1, SP);
1516 SPILLS_LR_TO_FRAME(__ PushRegisters(kVolatileRegisterSet));
1517
1518 // Unpoison everything from SP to FP: this covers both space we have
1519 // reserved for outgoing arguments and the spills which might have
1520 // been generated by the register allocator. Some of these spill slots
1521 // can be used as handles passed down to the runtime.
1522 __ sub(R1, is_leaf_ ? FPREG : saved_fp_or_sp, temp1);
1523 __ MsanUnpoison(temp1, R1);
1524
1525 // Incoming Dart arguments to this trampoline are potentially used as local
1526 // handles.
1527 __ MsanUnpoison(is_leaf_ ? FPREG : saved_fp_or_sp,
1529
1530 // Outgoing arguments passed by register to the foreign function.
1531 __ LoadImmediate(R0, InputCount());
1532 __ CallCFunction(compiler::Address(
1533 THR, kMsanUnpoisonParamRuntimeEntry.OffsetFromThread()));
1534
1535 RESTORES_LR_FROM_FRAME(__ PopRegisters(kVolatileRegisterSet));
1536 }
1537#endif
1538
1539 EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp1, temp2);
1540
1542 __ Comment(is_leaf_ ? "Leaf Call" : "Call");
1543 }
1544
1545 if (is_leaf_) {
1546#if !defined(PRODUCT)
1547 // Set the thread object's top_exit_frame_info and VMTag to enable the
1548 // profiler to determine that thread is no longer executing Dart code.
1549 __ StoreToOffset(FPREG, THR,
1550 compiler::target::Thread::top_exit_frame_info_offset());
1551 __ StoreToOffset(branch, THR, compiler::target::Thread::vm_tag_offset());
1552#endif
1553
1554 // We are entering runtime code, so the C stack pointer must be restored
1555 // from the stack limit to the top of the stack.
1556 __ mov(temp_csp, CSP);
1557 __ mov(CSP, SP);
1558
1559 __ blr(branch);
1560
1561 // Restore the Dart stack pointer.
1562 __ mov(SP, CSP);
1563 __ mov(CSP, temp_csp);
1564
1565#if !defined(PRODUCT)
1566 __ LoadImmediate(temp1, compiler::target::Thread::vm_tag_dart_id());
1567 __ StoreToOffset(temp1, THR, compiler::target::Thread::vm_tag_offset());
1568 __ StoreToOffset(ZR, THR,
1569 compiler::target::Thread::top_exit_frame_info_offset());
1570#endif
1571 } else {
1572 // We need to copy a dummy return address up into the dummy stack frame so
1573 // the stack walker will know which safepoint to use.
1574 //
1575 // ADR loads relative to itself, so add kInstrSize to point to the next
1576 // instruction.
1577 __ adr(temp1, compiler::Immediate(Instr::kInstrSize));
1578 compiler->EmitCallsiteMetadata(source(), deopt_id(),
1579 UntaggedPcDescriptors::Kind::kOther, locs(),
1580 env());
1581
1582 __ StoreToOffset(temp1, FPREG, kSavedCallerPcSlotFromFp * kWordSize);
1583
1584 // Update information in the thread object and enter a safepoint.
1585 // Outline state transition. In AOT, for code size. In JIT, because we
1586 // cannot trust that code will be executable.
1587 __ ldr(temp1,
1588 compiler::Address(
1589 THR, compiler::target::Thread::
1590 call_native_through_safepoint_entry_point_offset()));
1591
1592 // Calls R9 and clobbers R19 (along with volatile registers).
1593 ASSERT(branch == R9);
1594 __ blr(temp1);
1595
1596 if (marshaller_.IsHandleCType(compiler::ffi::kResultIndex)) {
1597 __ Comment("Check Dart_Handle for Error.");
1598 compiler::Label not_error;
1599 __ ldr(temp1,
1600 compiler::Address(CallingConventions::kReturnReg,
1601 compiler::target::LocalHandle::ptr_offset()));
1602 __ BranchIfSmi(temp1, &not_error);
1603 __ LoadClassId(temp1, temp1);
1604 __ RangeCheck(temp1, temp2, kFirstErrorCid, kLastErrorCid,
1606
1607 // Slow path, use the stub to propagate error, to save on code-size.
1608 __ Comment("Slow path: call Dart_PropagateError through stub.");
1611 __ ldr(temp1,
1612 compiler::Address(
1613 THR, compiler::target::Thread::
1614 call_native_through_safepoint_entry_point_offset()));
1615 __ ldr(branch, compiler::Address(
1616 THR, kPropagateErrorRuntimeEntry.OffsetFromThread()));
1617 __ blr(temp1);
1618#if defined(DEBUG)
1619 // We should never return with normal controlflow from this.
1620 __ brk(0);
1621#endif
1622
1623 __ Bind(&not_error);
1624 }
1625
1626 // Refresh pinned registers values (inc. write barrier mask and null
1627 // object).
1628 __ RestorePinnedRegisters();
1629 }
1630
1631 EmitReturnMoves(compiler, temp1, temp2);
1632
1633 if (is_leaf_) {
1634 // Restore the pre-aligned SP.
1635 __ mov(SPREG, saved_fp_or_sp);
1636 } else {
1637 __ LeaveDartFrame();
1638
1639 // Restore the global object pool after returning from runtime (old space is
1640 // moving, so the GOP could have been relocated).
1641 if (FLAG_precompiled_mode) {
1642 __ SetupGlobalPoolAndDispatchTable();
1643 }
1644
1645 __ set_constant_pool_allowed(true);
1646 }
1647}
1648
1649// Keep in sync with NativeEntryInstr::EmitNativeCode.
1650void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1651 EmitReturnMoves(compiler);
1652
1653 __ LeaveDartFrame();
1654
1655 // The dummy return address is in LR, no need to pop it as on Intel.
1656
1657 // These can be anything besides the return registers (R0, R1) and THR (R26).
1658 const Register vm_tag_reg = R2;
1659 const Register old_exit_frame_reg = R3;
1660 const Register old_exit_through_ffi_reg = R4;
1661 const Register tmp = R5;
1662
1663 __ PopPair(old_exit_frame_reg, old_exit_through_ffi_reg);
1664
1665 // Restore top_resource.
1666 __ PopPair(tmp, vm_tag_reg);
1667 __ StoreToOffset(tmp, THR, compiler::target::Thread::top_resource_offset());
1668
1669 // Reset the exit frame info to old_exit_frame_reg *before* entering the
1670 // safepoint. The trampoline that called us will enter the safepoint on our
1671 // behalf.
1672 __ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
1673 old_exit_through_ffi_reg,
1674 /*enter_safepoint=*/false);
1675
1676 __ PopNativeCalleeSavedRegisters();
1677
1678 // Leave the entry frame.
1679 __ LeaveFrame();
1680
1681 // Leave the dummy frame holding the pushed arguments.
1682 __ LeaveFrame();
1683
1684 // Restore the actual stack pointer from SPREG.
1685 __ RestoreCSP();
1686
1687 __ Ret();
1688
1689 // For following blocks.
1690 __ set_constant_pool_allowed(true);
1691}
1692
1693// Keep in sync with NativeReturnInstr::EmitNativeCode and ComputeInnerLRState.
1694void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1695 // Constant pool cannot be used until we enter the actual Dart frame.
1696 __ set_constant_pool_allowed(false);
1697
1698 __ Bind(compiler->GetJumpLabel(this));
1699
1700 // We don't use the regular stack pointer in ARM64, so we have to copy the
1701 // native stack pointer into the Dart stack pointer. This will also kick CSP
1702 // forward a bit, enough for the spills and leaf call below, until we can set
1703 // it properly after setting up THR.
1704 __ SetupDartSP();
1705
1706 // Create a dummy frame holding the pushed arguments. This simplifies
1707 // NativeReturnInstr::EmitNativeCode.
1708 __ EnterFrame(0);
1709
1710 // Save the argument registers, in reverse order.
1711 SaveArguments(compiler);
1712
1713 // Enter the entry frame. NativeParameterInstr expects this frame has size
1714 // -exit_link_slot_from_entry_fp, verified below.
1715 __ EnterFrame(0);
1716
1717 // Save a space for the code object.
1718 __ PushImmediate(0);
1719
1720 __ PushNativeCalleeSavedRegisters();
1721
1722 // Now that we have THR, we can set CSP.
1723 __ SetupCSPFromThread(THR);
1724
1725#if defined(DART_TARGET_OS_FUCHSIA)
1726 __ str(R18,
1727 compiler::Address(
1728 THR, compiler::target::Thread::saved_shadow_call_stack_offset()));
1729#elif defined(USING_SHADOW_CALL_STACK)
1730#error Unimplemented
1731#endif
1732
1733 // Refresh pinned registers values (inc. write barrier mask and null object).
1734 __ RestorePinnedRegisters();
1735
1736 // Save the current VMTag on the stack.
1737 __ LoadFromOffset(TMP, THR, compiler::target::Thread::vm_tag_offset());
1738 // Save the top resource.
1739 __ LoadFromOffset(R0, THR, compiler::target::Thread::top_resource_offset());
1740 __ PushPair(R0, TMP);
1741
1742 __ StoreToOffset(ZR, THR, compiler::target::Thread::top_resource_offset());
1743
1744 __ LoadFromOffset(R0, THR,
1745 compiler::target::Thread::exit_through_ffi_offset());
1746 __ Push(R0);
1747
1748 // Save the top exit frame info. We don't set it to 0 yet:
1749 // TransitionNativeToGenerated will handle that.
1750 __ LoadFromOffset(R0, THR,
1751 compiler::target::Thread::top_exit_frame_info_offset());
1752 __ Push(R0);
1753
1754 // In debug mode, verify that we've pushed the top exit frame info at the
1755 // correct offset from FP.
1756 __ EmitEntryFrameVerification();
1757
1758 // The callback trampoline (caller) has already left the safepoint for us.
1759 __ TransitionNativeToGenerated(R0, /*exit_safepoint=*/false);
1760
1761 // Now that the safepoint has ended, we can touch Dart objects without
1762 // handles.
1763
1764 // Load the code object.
1765 const Function& target_function = marshaller_.dart_signature();
1766 const intptr_t callback_id = target_function.FfiCallbackId();
1767 __ LoadFromOffset(R0, THR, compiler::target::Thread::isolate_group_offset());
1768 __ LoadFromOffset(R0, R0,
1769 compiler::target::IsolateGroup::object_store_offset());
1770 __ LoadFromOffset(R0, R0,
1771 compiler::target::ObjectStore::ffi_callback_code_offset());
1772 __ LoadCompressedFieldFromOffset(
1773 R0, R0, compiler::target::GrowableObjectArray::data_offset());
1774 __ LoadCompressedFieldFromOffset(
1775 CODE_REG, R0,
1776 compiler::target::Array::data_offset() +
1777 callback_id * compiler::target::kCompressedWordSize);
1778
1779 // Put the code object in the reserved slot.
1780 __ StoreToOffset(CODE_REG, FPREG,
1781 kPcMarkerSlotFromFp * compiler::target::kWordSize);
1782 if (FLAG_precompiled_mode) {
1783 __ SetupGlobalPoolAndDispatchTable();
1784 } else {
1785 // We now load the pool pointer (PP) with a GC safe value as we are about to
1786 // invoke dart code. We don't need a real object pool here.
1787 // Smi zero does not work because ARM64 assumes PP to be untagged.
1788 __ LoadObject(PP, compiler::NullObject());
1789 }
1790
1791 // Load a GC-safe value for the arguments descriptor (unused but tagged).
1792 __ mov(ARGS_DESC_REG, ZR);
1793
1794 // Load a dummy return address which suggests that we are inside of
1795 // InvokeDartCodeStub. This is how the stack walker detects an entry frame.
1796 CLOBBERS_LR({
1797 __ LoadFromOffset(LR, THR,
1798 compiler::target::Thread::invoke_dart_code_stub_offset());
1799 __ LoadFieldFromOffset(LR, LR,
1800 compiler::target::Code::entry_point_offset());
1801 });
1802
1803 FunctionEntryInstr::EmitNativeCode(compiler);
1804}
1805
1806#define R(r) (1 << r)
1807
1809 Zone* zone,
1810 bool is_optimizing) const {
1811 // Compare FfiCallInstr's use of kFfiAnyNonAbiRegister.
1813 ASSERT(IsAbiPreservedRegister(saved_csp));
1814 return MakeLocationSummaryInternal(zone, (R(saved_csp)));
1815}
1816
1817#undef R
1818
1819void LeafRuntimeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1820 const Register saved_fp = TMP2;
1821 const Register temp0 = TMP;
1822 const Register saved_csp = locs()->temp(0).reg();
1823
1824 __ MoveRegister(saved_fp, FPREG);
1825
1826 const intptr_t frame_space = native_calling_convention_.StackTopInBytes();
1827 __ EnterCFrame(frame_space);
1828 ASSERT(IsAbiPreservedRegister(saved_csp));
1829 __ mov(saved_csp, CSP);
1830 __ mov(CSP, SP);
1831
1832 EmitParamMoves(compiler, saved_fp, temp0);
1833
1834 const Register target_address = locs()->in(TargetAddressIndex()).reg();
1835 __ str(target_address,
1836 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
1837 __ CallCFunction(target_address);
1838 __ LoadImmediate(temp0, VMTag::kDartTagId);
1839 __ str(temp0,
1840 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
1841
1842 // We don't use the DartSP, we leave the frame after this immediately.
1843 // However, we need set CSP to a 16 byte aligned value far above the SP.
1844 __ mov(CSP, saved_csp);
1845 __ LeaveCFrame();
1846}
1847
1848LocationSummary* OneByteStringFromCharCodeInstr::MakeLocationSummary(
1849 Zone* zone,
1850 bool opt) const {
1851 const intptr_t kNumInputs = 1;
1852 // TODO(fschneider): Allow immediate operands for the char code.
1853 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
1855}
1856
1857void OneByteStringFromCharCodeInstr::EmitNativeCode(
1858 FlowGraphCompiler* compiler) {
1859 ASSERT(compiler->is_optimizing());
1860 const Register char_code = locs()->in(0).reg();
1861 const Register result = locs()->out(0).reg();
1862
1863 __ ldr(result,
1864 compiler::Address(THR, Thread::predefined_symbols_address_offset()));
1866 __ SmiUntag(TMP, char_code); // Untag to use scaled address mode.
1867 __ ldr(result,
1868 compiler::Address(result, TMP, UXTX, compiler::Address::Scaled));
1869}
1870
1871LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone,
1872 bool opt) const {
1873 const intptr_t kNumInputs = 1;
1874 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
1876}
1877
1878void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1879 ASSERT(cid_ == kOneByteStringCid);
1880 const Register str = locs()->in(0).reg();
1881 const Register result = locs()->out(0).reg();
1882 __ LoadCompressedSmi(result,
1883 compiler::FieldAddress(str, String::length_offset()));
1884 __ ldr(TMP, compiler::FieldAddress(str, OneByteString::data_offset()),
1886 __ CompareImmediate(result, Smi::RawValue(1));
1887 __ LoadImmediate(result, -1);
1888 __ csel(result, TMP, result, EQ);
1889 __ SmiTag(result);
1890}
1891
1892LocationSummary* Utf8ScanInstr::MakeLocationSummary(Zone* zone,
1893 bool opt) const {
1894 const intptr_t kNumInputs = 5;
1895 const intptr_t kNumTemps = 0;
1896 LocationSummary* summary = new (zone)
1897 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1898 summary->set_in(0, Location::Any()); // decoder
1899 summary->set_in(1, Location::WritableRegister()); // bytes
1900 summary->set_in(2, Location::WritableRegister()); // start
1901 summary->set_in(3, Location::WritableRegister()); // end
1902 summary->set_in(4, Location::WritableRegister()); // table
1903 summary->set_out(0, Location::RequiresRegister());
1904 return summary;
1905}
1906
1907void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1908 const Register bytes_reg = locs()->in(1).reg();
1909 const Register start_reg = locs()->in(2).reg();
1910 const Register end_reg = locs()->in(3).reg();
1911 const Register table_reg = locs()->in(4).reg();
1912 const Register size_reg = locs()->out(0).reg();
1913
1914 const Register bytes_ptr_reg = start_reg;
1915 const Register bytes_end_reg = end_reg;
1916 const Register flags_reg = bytes_reg;
1917 const Register temp_reg = TMP;
1918 const Register decoder_temp_reg = start_reg;
1919 const Register flags_temp_reg = end_reg;
1920
1921 const intptr_t kSizeMask = 0x03;
1922 const intptr_t kFlagsMask = 0x3C;
1923
1924 compiler::Label loop, loop_in;
1925
1926 // Address of input bytes.
1927 __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
1928
1929 // Table.
1930 __ AddImmediate(
1931 table_reg, table_reg,
1932 compiler::target::OneByteString::data_offset() - kHeapObjectTag);
1933
1934 // Pointers to start and end.
1935 __ add(bytes_ptr_reg, bytes_reg, compiler::Operand(start_reg));
1936 __ add(bytes_end_reg, bytes_reg, compiler::Operand(end_reg));
1937
1938 // Initialize size and flags.
1939 __ mov(size_reg, ZR);
1940 __ mov(flags_reg, ZR);
1941
1942 __ b(&loop_in);
1943 __ Bind(&loop);
1944
1945 // Read byte and increment pointer.
1946 __ ldr(temp_reg,
1947 compiler::Address(bytes_ptr_reg, 1, compiler::Address::PostIndex),
1949
1950 // Update size and flags based on byte value.
1951 __ ldr(temp_reg, compiler::Address(table_reg, temp_reg),
1953 __ orr(flags_reg, flags_reg, compiler::Operand(temp_reg));
1954 __ andi(temp_reg, temp_reg, compiler::Immediate(kSizeMask));
1955 __ add(size_reg, size_reg, compiler::Operand(temp_reg));
1956
1957 // Stop if end is reached.
1958 __ Bind(&loop_in);
1959 __ cmp(bytes_ptr_reg, compiler::Operand(bytes_end_reg));
1960 __ b(&loop, UNSIGNED_LESS);
1961
1962 // Write flags to field.
1963 __ AndImmediate(flags_reg, flags_reg, kFlagsMask);
1964 if (!IsScanFlagsUnboxed()) {
1965 __ SmiTag(flags_reg);
1966 }
1967 Register decoder_reg;
1968 const Location decoder_location = locs()->in(0);
1969 if (decoder_location.IsStackSlot()) {
1970 __ ldr(decoder_temp_reg, LocationToStackSlotAddress(decoder_location));
1971 decoder_reg = decoder_temp_reg;
1972 } else {
1973 decoder_reg = decoder_location.reg();
1974 }
1975 const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes();
1976 if (scan_flags_field_.is_compressed() && !IsScanFlagsUnboxed()) {
1977 __ LoadCompressedSmiFieldFromOffset(flags_temp_reg, decoder_reg,
1978 scan_flags_field_offset);
1979 __ orr(flags_temp_reg, flags_temp_reg, compiler::Operand(flags_reg),
1981 __ StoreFieldToOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset,
1983 } else {
1984 __ LoadFieldFromOffset(flags_temp_reg, decoder_reg,
1985 scan_flags_field_offset);
1986 __ orr(flags_temp_reg, flags_temp_reg, compiler::Operand(flags_reg));
1987 __ StoreFieldToOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset);
1988 }
1989}
1990
1991LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
1992 bool opt) const {
1993 // The compiler must optimize any function that includes a LoadIndexed
1994 // instruction that uses typed data cids, since extracting the payload address
1995 // from views is done in a compiler pass after all code motion has happened.
1997
1998 const intptr_t kNumInputs = 2;
1999 const intptr_t kNumTemps = 0;
2000 LocationSummary* locs = new (zone)
2001 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2002 locs->set_in(kArrayPos, Location::RequiresRegister());
2003 const bool can_be_constant =
2004 index()->BindsToConstant() &&
2006 index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
2007 locs->set_in(kIndexPos,
2008 can_be_constant
2009 ? Location::Constant(index()->definition()->AsConstant())
2010 : Location::RequiresRegister());
2011 auto const rep =
2014 locs->set_out(0, Location::RequiresRegister());
2015 } else if (RepresentationUtils::IsUnboxed(rep)) {
2016 locs->set_out(0, Location::RequiresFpuRegister());
2017 } else {
2018 locs->set_out(0, Location::RequiresRegister());
2019 }
2020 return locs;
2021}
2022
2023void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2024 // The array register points to the backing store for external arrays.
2025 const Register array = locs()->in(kArrayPos).reg();
2026 const Location index = locs()->in(kIndexPos);
2027
2028 compiler::Address element_address(TMP); // Bad address.
2029 element_address = index.IsRegister()
2030 ? __ ElementAddressForRegIndex(
2032 index_unboxed_, array, index.reg(), TMP)
2033 : __ ElementAddressForIntIndex(
2035 Smi::Cast(index.constant()).Value());
2036 auto const rep =
2040 const Register result = locs()->out(0).reg();
2041 __ ldr(result, element_address, RepresentationUtils::OperandSize(rep));
2042 } else if (RepresentationUtils::IsUnboxed(rep)) {
2043 const VRegister result = locs()->out(0).fpu_reg();
2044 if (rep == kUnboxedFloat) {
2045 // Load single precision float.
2046 __ fldrs(result, element_address);
2047 } else if (rep == kUnboxedDouble) {
2048 // Load double precision float.
2049 __ fldrd(result, element_address);
2050 } else {
2051 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2052 rep == kUnboxedFloat64x2);
2053 __ fldrq(result, element_address);
2054 }
2055 } else {
2056 const Register result = locs()->out(0).reg();
2057 ASSERT(representation() == kTagged);
2058 ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
2059 (class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
2060 __ LoadCompressed(result, element_address);
2061 }
2062}
2063
2064LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone,
2065 bool opt) const {
2066 const intptr_t kNumInputs = 2;
2067 const intptr_t kNumTemps = 0;
2068 LocationSummary* summary = new (zone)
2069 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2070 summary->set_in(0, Location::RequiresRegister());
2071 summary->set_in(1, Location::RequiresRegister());
2072 summary->set_out(0, Location::RequiresRegister());
2073 return summary;
2074}
2075
2076void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2077 // The string register points to the backing store for external strings.
2078 const Register str = locs()->in(0).reg();
2079 const Location index = locs()->in(1);
2081
2082 Register result = locs()->out(0).reg();
2083 switch (class_id()) {
2084 case kOneByteStringCid:
2085 switch (element_count()) {
2086 case 1:
2088 break;
2089 case 2:
2091 break;
2092 case 4:
2094 break;
2095 default:
2096 UNREACHABLE();
2097 }
2098 break;
2099 case kTwoByteStringCid:
2100 switch (element_count()) {
2101 case 1:
2103 break;
2104 case 2:
2106 break;
2107 default:
2108 UNREACHABLE();
2109 }
2110 break;
2111 default:
2112 UNREACHABLE();
2113 break;
2114 }
2115 // Warning: element_address may use register TMP as base.
2116 compiler::Address element_address = __ ElementAddressForRegIndexWithSize(
2117 IsExternal(), class_id(), sz, index_scale(), /*index_unboxed=*/false, str,
2118 index.reg(), TMP);
2119 __ ldr(result, element_address, sz);
2120
2122 __ SmiTag(result);
2123}
2124
2125LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
2126 bool opt) const {
2127 // The compiler must optimize any function that includes a StoreIndexed
2128 // instruction that uses typed data cids, since extracting the payload address
2129 // from views is done in a compiler pass after all code motion has happened.
2131
2132 const intptr_t kNumInputs = 3;
2133 const intptr_t kNumTemps = 1;
2134 LocationSummary* locs = new (zone)
2135 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2136 locs->set_in(0, Location::RequiresRegister());
2137 const bool can_be_constant =
2138 index()->BindsToConstant() &&
2140 index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
2141 locs->set_in(1, can_be_constant
2142 ? Location::Constant(index()->definition()->AsConstant())
2143 : Location::RequiresRegister());
2144 locs->set_temp(0, Location::RequiresRegister());
2145 auto const rep =
2148 ASSERT(rep == kUnboxedUint8);
2149 locs->set_in(2, LocationRegisterOrConstant(value()));
2150 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
2151 ConstantInstr* constant = value()->definition()->AsConstant();
2152 if (constant != nullptr && constant->HasZeroRepresentation()) {
2153 locs->set_in(2, Location::Constant(constant));
2154 } else {
2155 locs->set_in(2, Location::RequiresRegister());
2156 }
2157 } else if (RepresentationUtils::IsUnboxed(rep)) {
2158 if (rep == kUnboxedFloat || rep == kUnboxedDouble) {
2159 ConstantInstr* constant = value()->definition()->AsConstant();
2160 if (constant != nullptr && constant->HasZeroRepresentation()) {
2161 locs->set_in(2, Location::Constant(constant));
2162 } else {
2163 locs->set_in(2, Location::RequiresFpuRegister());
2164 }
2165 } else {
2166 locs->set_in(2, Location::RequiresFpuRegister());
2167 }
2168 } else if (class_id() == kArrayCid) {
2169 locs->set_in(2, ShouldEmitStoreBarrier()
2172 if (ShouldEmitStoreBarrier()) {
2175 }
2176 } else {
2177 UNREACHABLE();
2178 }
2179 return locs;
2180}
2181
2182void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2183 // The array register points to the backing store for external arrays.
2184 const Register array = locs()->in(0).reg();
2185 const Location index = locs()->in(1);
2186 const Register temp = locs()->temp(0).reg();
2187 compiler::Address element_address(TMP); // Bad address.
2188
2189 auto const rep =
2192
2193 // Deal with a special case separately.
2194 if (class_id() == kArrayCid && ShouldEmitStoreBarrier()) {
2195 if (index.IsRegister()) {
2196 __ ComputeElementAddressForRegIndex(temp, IsUntagged(), class_id(),
2197 index_scale(), index_unboxed_, array,
2198 index.reg());
2199 } else {
2200 __ ComputeElementAddressForIntIndex(temp, IsUntagged(), class_id(),
2201 index_scale(), array,
2202 Smi::Cast(index.constant()).Value());
2203 }
2204 const Register value = locs()->in(2).reg();
2205 __ StoreCompressedIntoArray(array, temp, value, CanValueBeSmi());
2206 return;
2207 }
2208
2209 element_address = index.IsRegister()
2210 ? __ ElementAddressForRegIndex(
2212 index_unboxed_, array, index.reg(), temp)
2213 : __ ElementAddressForIntIndex(
2215 Smi::Cast(index.constant()).Value());
2216
2218 ASSERT(rep == kUnboxedUint8);
2219 if (locs()->in(2).IsConstant()) {
2220 const Smi& constant = Smi::Cast(locs()->in(2).constant());
2221 intptr_t value = constant.Value();
2222 // Clamp to 0x0 or 0xFF respectively.
2223 if (value > 0xFF) {
2224 value = 0xFF;
2225 } else if (value < 0) {
2226 value = 0;
2227 }
2228 if (value == 0) {
2229 __ str(ZR, element_address, compiler::kUnsignedByte);
2230 } else {
2231 __ LoadImmediate(TMP, static_cast<int8_t>(value));
2232 __ str(TMP, element_address, compiler::kUnsignedByte);
2233 }
2234 } else {
2235 const Register value = locs()->in(2).reg();
2236 // Clamp to 0x00 or 0xFF respectively.
2237 __ CompareImmediate(value, 0xFF);
2238 __ csetm(TMP, GT); // TMP = value > 0xFF ? -1 : 0.
2239 __ csel(TMP, value, TMP, LS); // TMP = value in range ? value : TMP.
2240 __ str(TMP, element_address, compiler::kUnsignedByte);
2241 }
2242 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
2243 if (locs()->in(2).IsConstant()) {
2244 ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
2245 __ str(ZR, element_address, RepresentationUtils::OperandSize(rep));
2246 } else {
2247 __ str(locs()->in(2).reg(), element_address,
2249 }
2250 } else if (RepresentationUtils::IsUnboxed(rep)) {
2251 if (rep == kUnboxedFloat) {
2252 if (locs()->in(2).IsConstant()) {
2253 ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
2254 __ str(ZR, element_address, compiler::kFourBytes);
2255 } else {
2256 __ fstrs(locs()->in(2).fpu_reg(), element_address);
2257 }
2258 } else if (rep == kUnboxedDouble) {
2259 if (locs()->in(2).IsConstant()) {
2260 ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
2261 __ str(ZR, element_address, compiler::kEightBytes);
2262 } else {
2263 __ fstrd(locs()->in(2).fpu_reg(), element_address);
2264 }
2265 } else {
2266 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2267 rep == kUnboxedFloat64x2);
2268 const VRegister value_reg = locs()->in(2).fpu_reg();
2269 __ fstrq(value_reg, element_address);
2270 }
2271 } else if (class_id() == kArrayCid) {
2272 ASSERT(!ShouldEmitStoreBarrier()); // Specially treated above.
2273 if (locs()->in(2).IsConstant()) {
2274 const Object& constant = locs()->in(2).constant();
2275 __ StoreCompressedObjectIntoObjectNoBarrier(array, element_address,
2276 constant);
2277 } else {
2278 const Register value = locs()->in(2).reg();
2279 __ StoreCompressedIntoObjectNoBarrier(array, element_address, value);
2280 }
2281 } else {
2282 UNREACHABLE();
2283 }
2284
2285#if defined(USING_MEMORY_SANITIZER)
2286 if (index.IsRegister()) {
2287 __ ComputeElementAddressForRegIndex(TMP, IsUntagged(), class_id(),
2288 index_scale(), index_unboxed_, array,
2289 index.reg());
2290 } else {
2291 __ ComputeElementAddressForIntIndex(TMP, IsUntagged(), class_id(),
2292 index_scale(), array,
2293 Smi::Cast(index.constant()).Value());
2294 }
2295 const intptr_t length_in_bytes = RepresentationUtils::ValueSize(
2297 __ MsanUnpoison(TMP, length_in_bytes);
2298#endif
2299}
2300
2301static void LoadValueCid(FlowGraphCompiler* compiler,
2302 Register value_cid_reg,
2303 Register value_reg,
2304 compiler::Label* value_is_smi = nullptr) {
2305 compiler::Label done;
2306 if (value_is_smi == nullptr) {
2307 __ LoadImmediate(value_cid_reg, kSmiCid);
2308 }
2309 __ BranchIfSmi(value_reg, value_is_smi == nullptr ? &done : value_is_smi);
2310 __ LoadClassId(value_cid_reg, value_reg);
2311 __ Bind(&done);
2312}
2313
2314DEFINE_UNIMPLEMENTED_INSTRUCTION(GuardFieldTypeInstr)
2315
2316LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
2317 bool opt) const {
2318 const intptr_t kNumInputs = 1;
2319
2320 const intptr_t value_cid = value()->Type()->ToCid();
2321 const intptr_t field_cid = field().guarded_cid();
2322
2323 const bool emit_full_guard = !opt || (field_cid == kIllegalCid);
2324
2325 const bool needs_value_cid_temp_reg =
2326 emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid));
2327
2328 const bool needs_field_temp_reg = emit_full_guard;
2329
2330 intptr_t num_temps = 0;
2331 if (needs_value_cid_temp_reg) {
2332 num_temps++;
2333 }
2334 if (needs_field_temp_reg) {
2335 num_temps++;
2336 }
2337
2338 LocationSummary* summary = new (zone)
2339 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
2340 summary->set_in(0, Location::RequiresRegister());
2341
2342 for (intptr_t i = 0; i < num_temps; i++) {
2343 summary->set_temp(i, Location::RequiresRegister());
2344 }
2345
2346 return summary;
2347}
2348
2349void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2350 ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 20);
2351 ASSERT(sizeof(UntaggedField::guarded_cid_) == 4);
2352 ASSERT(sizeof(UntaggedField::is_nullable_) == 4);
2353
2354 const intptr_t value_cid = value()->Type()->ToCid();
2355 const intptr_t field_cid = field().guarded_cid();
2356 const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid;
2357
2358 if (field_cid == kDynamicCid) {
2359 return; // Nothing to emit.
2360 }
2361
2362 const bool emit_full_guard =
2363 !compiler->is_optimizing() || (field_cid == kIllegalCid);
2364
2365 const bool needs_value_cid_temp_reg =
2366 emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid));
2367
2368 const bool needs_field_temp_reg = emit_full_guard;
2369
2370 const Register value_reg = locs()->in(0).reg();
2371
2372 const Register value_cid_reg =
2373 needs_value_cid_temp_reg ? locs()->temp(0).reg() : kNoRegister;
2374
2375 const Register field_reg = needs_field_temp_reg
2376 ? locs()->temp(locs()->temp_count() - 1).reg()
2377 : kNoRegister;
2378
2379 compiler::Label ok, fail_label;
2380
2381 compiler::Label* deopt =
2382 compiler->is_optimizing()
2383 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2384 : nullptr;
2385
2386 compiler::Label* fail = (deopt != nullptr) ? deopt : &fail_label;
2387
2388 if (emit_full_guard) {
2389 __ LoadObject(field_reg, Field::ZoneHandle((field().Original())));
2390
2391 compiler::FieldAddress field_cid_operand(field_reg,
2393 compiler::FieldAddress field_nullability_operand(
2394 field_reg, Field::is_nullable_offset());
2395
2396 if (value_cid == kDynamicCid) {
2397 LoadValueCid(compiler, value_cid_reg, value_reg);
2398 compiler::Label skip_length_check;
2399 __ ldr(TMP, field_cid_operand, compiler::kUnsignedFourBytes);
2400 __ CompareRegisters(value_cid_reg, TMP);
2401 __ b(&ok, EQ);
2402 __ ldr(TMP, field_nullability_operand, compiler::kUnsignedFourBytes);
2403 __ CompareRegisters(value_cid_reg, TMP);
2404 } else if (value_cid == kNullCid) {
2405 __ ldr(value_cid_reg, field_nullability_operand,
2407 __ CompareImmediate(value_cid_reg, value_cid);
2408 } else {
2409 compiler::Label skip_length_check;
2410 __ ldr(value_cid_reg, field_cid_operand, compiler::kUnsignedFourBytes);
2411 __ CompareImmediate(value_cid_reg, value_cid);
2412 }
2413 __ b(&ok, EQ);
2414
2415 // Check if the tracked state of the guarded field can be initialized
2416 // inline. If the field needs length check we fall through to runtime
2417 // which is responsible for computing offset of the length field
2418 // based on the class id.
2419 // Length guard will be emitted separately when needed via GuardFieldLength
2420 // instruction after GuardFieldClass.
2421 if (!field().needs_length_check()) {
2422 // Uninitialized field can be handled inline. Check if the
2423 // field is still unitialized.
2424 __ ldr(TMP, field_cid_operand, compiler::kUnsignedFourBytes);
2425 __ CompareImmediate(TMP, kIllegalCid);
2426 __ b(fail, NE);
2427
2428 if (value_cid == kDynamicCid) {
2429 __ str(value_cid_reg, field_cid_operand, compiler::kUnsignedFourBytes);
2430 __ str(value_cid_reg, field_nullability_operand,
2432 } else {
2433 __ LoadImmediate(TMP, value_cid);
2434 __ str(TMP, field_cid_operand, compiler::kUnsignedFourBytes);
2435 __ str(TMP, field_nullability_operand, compiler::kUnsignedFourBytes);
2436 }
2437
2438 __ b(&ok);
2439 }
2440
2441 if (deopt == nullptr) {
2442 __ Bind(fail);
2443
2444 __ LoadFieldFromOffset(TMP, field_reg, Field::guarded_cid_offset(),
2446 __ CompareImmediate(TMP, kDynamicCid);
2447 __ b(&ok, EQ);
2448
2449 __ PushPair(value_reg, field_reg);
2450 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2451 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2452 __ Drop(2); // Drop the field and the value.
2453 } else {
2454 __ b(fail);
2455 }
2456 } else {
2457 ASSERT(compiler->is_optimizing());
2458 ASSERT(deopt != nullptr);
2459
2460 // Field guard class has been initialized and is known.
2461 if (value_cid == kDynamicCid) {
2462 // Value's class id is not known.
2463 __ tsti(value_reg, compiler::Immediate(kSmiTagMask));
2464
2465 if (field_cid != kSmiCid) {
2466 __ b(fail, EQ);
2467 __ LoadClassId(value_cid_reg, value_reg);
2468 __ CompareImmediate(value_cid_reg, field_cid);
2469 }
2470
2471 if (field().is_nullable() && (field_cid != kNullCid)) {
2472 __ b(&ok, EQ);
2473 __ CompareObject(value_reg, Object::null_object());
2474 }
2475
2476 __ b(fail, NE);
2477 } else if (value_cid == field_cid) {
2478 // This would normally be caught by Canonicalize, but RemoveRedefinitions
2479 // may sometimes produce the situation after the last Canonicalize pass.
2480 } else {
2481 // Both value's and field's class id is known.
2482 ASSERT(value_cid != nullability);
2483 __ b(fail);
2484 }
2485 }
2486 __ Bind(&ok);
2487}
2488
2489LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone,
2490 bool opt) const {
2491 const intptr_t kNumInputs = 1;
2492 if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2493 const intptr_t kNumTemps = 3;
2494 LocationSummary* summary = new (zone)
2495 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2496 summary->set_in(0, Location::RequiresRegister());
2497 // We need temporaries for field object, length offset and expected length.
2498 summary->set_temp(0, Location::RequiresRegister());
2499 summary->set_temp(1, Location::RequiresRegister());
2500 summary->set_temp(2, Location::RequiresRegister());
2501 return summary;
2502 } else {
2503 LocationSummary* summary = new (zone)
2504 LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
2505 summary->set_in(0, Location::RequiresRegister());
2506 return summary;
2507 }
2508 UNREACHABLE();
2509}
2510
2511void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2512 if (field().guarded_list_length() == Field::kNoFixedLength) {
2513 return; // Nothing to emit.
2514 }
2515
2516 compiler::Label* deopt =
2517 compiler->is_optimizing()
2518 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2519 : nullptr;
2520
2521 const Register value_reg = locs()->in(0).reg();
2522
2523 if (!compiler->is_optimizing() ||
2524 (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2525 const Register field_reg = locs()->temp(0).reg();
2526 const Register offset_reg = locs()->temp(1).reg();
2527 const Register length_reg = locs()->temp(2).reg();
2528
2529 compiler::Label ok;
2530
2531 __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
2532
2533 __ ldr(offset_reg,
2534 compiler::FieldAddress(
2537 __ LoadCompressedSmi(
2538 length_reg,
2539 compiler::FieldAddress(field_reg, Field::guarded_list_length_offset()));
2540
2541 __ tst(offset_reg, compiler::Operand(offset_reg));
2542 __ b(&ok, MI);
2543
2544 // Load the length from the value. GuardFieldClass already verified that
2545 // value's class matches guarded class id of the field.
2546 // offset_reg contains offset already corrected by -kHeapObjectTag that is
2547 // why we use Address instead of FieldAddress.
2548 __ LoadCompressedSmi(TMP, compiler::Address(value_reg, offset_reg));
2549 __ CompareObjectRegisters(length_reg, TMP);
2550
2551 if (deopt == nullptr) {
2552 __ b(&ok, EQ);
2553
2554 __ PushPair(value_reg, field_reg);
2555 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2556 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2557 __ Drop(2); // Drop the field and the value.
2558 } else {
2559 __ b(deopt, NE);
2560 }
2561
2562 __ Bind(&ok);
2563 } else {
2564 ASSERT(compiler->is_optimizing());
2565 ASSERT(field().guarded_list_length() >= 0);
2566 ASSERT(field().guarded_list_length_in_object_offset() !=
2568
2569 __ ldr(TMP, compiler::FieldAddress(
2570 value_reg, field().guarded_list_length_in_object_offset()));
2571 __ CompareImmediate(TMP, Smi::RawValue(field().guarded_list_length()));
2572 __ b(deopt, NE);
2573 }
2574}
2575
2576LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
2577 bool opt) const {
2578 const intptr_t kNumInputs = 1;
2579 const intptr_t kNumTemps = 1;
2580 LocationSummary* locs = new (zone)
2581 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2582 locs->set_in(0, Location::RequiresRegister());
2583 locs->set_temp(0, Location::RequiresRegister());
2584 return locs;
2585}
2586
2587void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2588 const Register value = locs()->in(0).reg();
2589 const Register temp = locs()->temp(0).reg();
2590
2591 compiler->used_static_fields().Add(&field());
2592
2593 __ LoadFromOffset(temp, THR,
2594 compiler::target::Thread::field_table_values_offset());
2595 // Note: static fields ids won't be changed by hot-reload.
2596 __ StoreToOffset(value, temp,
2597 compiler::target::FieldTable::OffsetOf(field()));
2598}
2599
2600LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone,
2601 bool opt) const {
2602 const intptr_t kNumInputs = 3;
2603 const intptr_t kNumTemps = 0;
2604 LocationSummary* summary = new (zone)
2605 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2607 summary->set_in(1, Location::RegisterLocation(
2609 summary->set_in(
2611 summary->set_out(0, Location::RegisterLocation(R0));
2612 return summary;
2613}
2614
2615void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2616 ASSERT(locs()->in(0).reg() == TypeTestABI::kInstanceReg);
2617 ASSERT(locs()->in(1).reg() == TypeTestABI::kInstantiatorTypeArgumentsReg);
2618 ASSERT(locs()->in(2).reg() == TypeTestABI::kFunctionTypeArgumentsReg);
2619
2620 compiler->GenerateInstanceOf(source(), deopt_id(), env(), type(), locs());
2621 ASSERT(locs()->out(0).reg() == R0);
2622}
2623
2624LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone,
2625 bool opt) const {
2626 const intptr_t kNumInputs = 2;
2627 const intptr_t kNumTemps = 0;
2628 LocationSummary* locs = new (zone)
2629 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2635 return locs;
2636}
2637
2638// Inlines array allocation for known constant values.
2639static void InlineArrayAllocation(FlowGraphCompiler* compiler,
2640 intptr_t num_elements,
2641 compiler::Label* slow_path,
2642 compiler::Label* done) {
2643 const int kInlineArraySize = 12; // Same as kInlineInstanceSize.
2644 const intptr_t instance_size = Array::InstanceSize(num_elements);
2645
2646 __ TryAllocateArray(kArrayCid, instance_size, slow_path,
2647 AllocateArrayABI::kResultReg, // instance
2648 R3, // end address
2649 R6, R8);
2650 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
2651 // R3: new object end address.
2652
2653 // Store the type argument field.
2654 __ StoreCompressedIntoObjectNoBarrier(
2656 compiler::FieldAddress(AllocateArrayABI::kResultReg,
2659
2660 // Set the length field.
2661 __ StoreCompressedIntoObjectNoBarrier(
2663 compiler::FieldAddress(AllocateArrayABI::kResultReg,
2666
2667 // TODO(zra): Use stp once added.
2668 // Initialize all array elements to raw_null.
2669 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
2670 // R3: new object end address.
2671 // R8: iterator which initially points to the start of the variable
2672 // data area to be initialized.
2673 if (num_elements > 0) {
2674 const intptr_t array_size = instance_size - sizeof(UntaggedArray);
2675 __ AddImmediate(R8, AllocateArrayABI::kResultReg,
2676 sizeof(UntaggedArray) - kHeapObjectTag);
2677 if (array_size < (kInlineArraySize * kCompressedWordSize)) {
2678 intptr_t current_offset = 0;
2679 while (current_offset < array_size) {
2680 __ StoreCompressedIntoObjectNoBarrier(
2681 AllocateArrayABI::kResultReg, compiler::Address(R8, current_offset),
2682 NULL_REG);
2683 current_offset += kCompressedWordSize;
2684 }
2685 } else {
2686 compiler::Label end_loop, init_loop;
2687 __ Bind(&init_loop);
2688 __ CompareRegisters(R8, R3);
2689 __ b(&end_loop, CS);
2690 __ StoreCompressedIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
2691 compiler::Address(R8, 0), NULL_REG);
2692 __ AddImmediate(R8, kCompressedWordSize);
2693 __ b(&init_loop);
2694 __ Bind(&end_loop);
2695 }
2696 }
2697 __ b(done);
2698}
2699
2700void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2701 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
2702 if (type_usage_info != nullptr) {
2703 const Class& list_class =
2704 Class::Handle(compiler->isolate_group()->class_table()->At(kArrayCid));
2705 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, list_class,
2706 type_arguments()->definition());
2707 }
2708
2709 compiler::Label slow_path, done;
2710 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2711 if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
2712 num_elements()->BindsToConstant() &&
2713 num_elements()->BoundConstant().IsSmi()) {
2714 const intptr_t length =
2715 Smi::Cast(num_elements()->BoundConstant()).Value();
2717 InlineArrayAllocation(compiler, length, &slow_path, &done);
2718 }
2719 }
2720 }
2721
2722 __ Bind(&slow_path);
2723 auto object_store = compiler->isolate_group()->object_store();
2724 const auto& allocate_array_stub =
2725 Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
2726 compiler->GenerateStubCall(source(), allocate_array_stub,
2727 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
2728 env());
2729 __ Bind(&done);
2730}
2731
2733 Zone* zone,
2734 bool opt) const {
2735 ASSERT(opt);
2736 const intptr_t kNumInputs = 0;
2737 const intptr_t kNumTemps = 3;
2738 LocationSummary* locs = new (zone) LocationSummary(
2739 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
2744 return locs;
2745}
2746
2747class AllocateContextSlowPath
2748 : public TemplateSlowPathCode<AllocateUninitializedContextInstr> {
2749 public:
2750 explicit AllocateContextSlowPath(
2751 AllocateUninitializedContextInstr* instruction)
2752 : TemplateSlowPathCode(instruction) {}
2753
2754 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
2755 __ Comment("AllocateContextSlowPath");
2756 __ Bind(entry_label());
2757
2758 LocationSummary* locs = instruction()->locs();
2759 locs->live_registers()->Remove(locs->out(0));
2760
2761 compiler->SaveLiveRegisters(locs);
2762
2763 auto slow_path_env = compiler->SlowPathEnvironmentFor(
2764 instruction(), /*num_slow_path_args=*/0);
2765 ASSERT(slow_path_env != nullptr);
2766
2767 auto object_store = compiler->isolate_group()->object_store();
2768 const auto& allocate_context_stub = Code::ZoneHandle(
2769 compiler->zone(), object_store->allocate_context_stub());
2770
2771 __ LoadImmediate(R1, instruction()->num_context_variables());
2772 compiler->GenerateStubCall(instruction()->source(), allocate_context_stub,
2773 UntaggedPcDescriptors::kOther, locs,
2774 instruction()->deopt_id(), slow_path_env);
2775 ASSERT(instruction()->locs()->out(0).reg() == R0);
2776 compiler->RestoreLiveRegisters(instruction()->locs());
2777 __ b(exit_label());
2778 }
2779};
2780
2782 FlowGraphCompiler* compiler) {
2783 Register temp0 = locs()->temp(0).reg();
2784 Register temp1 = locs()->temp(1).reg();
2785 Register temp2 = locs()->temp(2).reg();
2786 Register result = locs()->out(0).reg();
2787 // Try allocate the object.
2788 AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this);
2789 compiler->AddSlowPathCode(slow_path);
2790 intptr_t instance_size = Context::InstanceSize(num_context_variables());
2791
2792 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2793 __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
2794 result, // instance
2795 temp0, temp1, temp2);
2796
2797 // Setup up number of context variables field.
2798 __ LoadImmediate(temp0, num_context_variables());
2799 __ str(temp0,
2800 compiler::FieldAddress(result, Context::num_variables_offset()));
2801 } else {
2802 __ Jump(slow_path->entry_label());
2803 }
2804
2805 __ Bind(slow_path->exit_label());
2806}
2807
2808LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone,
2809 bool opt) const {
2810 const intptr_t kNumInputs = 0;
2811 const intptr_t kNumTemps = 1;
2812 LocationSummary* locs = new (zone)
2813 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2816 return locs;
2817}
2818
2819void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2820 ASSERT(locs()->temp(0).reg() == R1);
2821 ASSERT(locs()->out(0).reg() == R0);
2822
2823 auto object_store = compiler->isolate_group()->object_store();
2824 const auto& allocate_context_stub =
2825 Code::ZoneHandle(compiler->zone(), object_store->allocate_context_stub());
2826 __ LoadImmediate(R1, num_context_variables());
2827 compiler->GenerateStubCall(source(), allocate_context_stub,
2828 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
2829 env());
2830}
2831
2832LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
2833 bool opt) const {
2834 const intptr_t kNumInputs = 1;
2835 const intptr_t kNumTemps = 0;
2836 LocationSummary* locs = new (zone)
2837 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2838 locs->set_in(0, Location::RegisterLocation(R5));
2839 locs->set_out(0, Location::RegisterLocation(R0));
2840 return locs;
2841}
2842
2843void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2844 ASSERT(locs()->in(0).reg() == R5);
2845 ASSERT(locs()->out(0).reg() == R0);
2846
2847 auto object_store = compiler->isolate_group()->object_store();
2848 const auto& clone_context_stub =
2849 Code::ZoneHandle(compiler->zone(), object_store->clone_context_stub());
2850 compiler->GenerateStubCall(source(), clone_context_stub,
2851 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
2852 deopt_id(), env());
2853}
2854
2855LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
2856 bool opt) const {
2857 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall);
2858}
2859
2860void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2861 __ Bind(compiler->GetJumpLabel(this));
2862 compiler->AddExceptionHandler(this);
2863 if (HasParallelMove()) {
2864 parallel_move()->EmitNativeCode(compiler);
2865 }
2866
2867 // Restore SP from FP as we are coming from a throw and the code for
2868 // popping arguments has not been run.
2869 const intptr_t fp_sp_dist =
2870 (compiler::target::frame_layout.first_local_from_fp + 1 -
2871 compiler->StackSize()) *
2872 kWordSize;
2873 ASSERT(fp_sp_dist <= 0);
2874 __ AddImmediate(SP, FP, fp_sp_dist);
2875
2876 if (!compiler->is_optimizing()) {
2877 if (raw_exception_var_ != nullptr) {
2878 __ StoreToOffset(
2880 compiler::target::FrameOffsetInBytesForVariable(raw_exception_var_));
2881 }
2882 if (raw_stacktrace_var_ != nullptr) {
2883 __ StoreToOffset(
2885 compiler::target::FrameOffsetInBytesForVariable(raw_stacktrace_var_));
2886 }
2887 }
2888}
2889
2890LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone,
2891 bool opt) const {
2892 const intptr_t kNumInputs = 0;
2893 const intptr_t kNumTemps = 1;
2894 const bool using_shared_stub = UseSharedSlowPathStub(opt);
2895 LocationSummary* summary = new (zone)
2896 LocationSummary(zone, kNumInputs, kNumTemps,
2897 using_shared_stub ? LocationSummary::kCallOnSharedSlowPath
2898 : LocationSummary::kCallOnSlowPath);
2899 summary->set_temp(0, Location::RequiresRegister());
2900 return summary;
2901}
2902
2903class CheckStackOverflowSlowPath
2904 : public TemplateSlowPathCode<CheckStackOverflowInstr> {
2905 public:
2906 static constexpr intptr_t kNumSlowPathArgs = 0;
2907
2908 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
2909 : TemplateSlowPathCode(instruction) {}
2910
2911 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
2912 auto locs = instruction()->locs();
2913 if (compiler->isolate_group()->use_osr() && osr_entry_label()->IsLinked()) {
2914 const Register value = locs->temp(0).reg();
2915 __ Comment("CheckStackOverflowSlowPathOsr");
2916 __ Bind(osr_entry_label());
2917 __ LoadImmediate(value, Thread::kOsrRequest);
2918 __ str(value,
2919 compiler::Address(THR, Thread::stack_overflow_flags_offset()));
2920 }
2921 __ Comment("CheckStackOverflowSlowPath");
2922 __ Bind(entry_label());
2923 const bool using_shared_stub = locs->call_on_shared_slow_path();
2924 if (!using_shared_stub) {
2925 compiler->SaveLiveRegisters(locs);
2926 }
2927 // pending_deoptimization_env_ is needed to generate a runtime call that
2928 // may throw an exception.
2929 ASSERT(compiler->pending_deoptimization_env_ == nullptr);
2930 Environment* env =
2931 compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
2932 compiler->pending_deoptimization_env_ = env;
2933
2934 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
2935 if (using_shared_stub) {
2936 if (!has_frame) {
2937 ASSERT(__ constant_pool_allowed());
2938 __ set_constant_pool_allowed(false);
2939 __ EnterDartFrame(0);
2940 }
2941 auto object_store = compiler->isolate_group()->object_store();
2942 const bool live_fpu_regs = locs->live_registers()->FpuRegisterCount() > 0;
2943 const auto& stub = Code::ZoneHandle(
2944 compiler->zone(),
2945 live_fpu_regs
2946 ? object_store->stack_overflow_stub_with_fpu_regs_stub()
2947 : object_store->stack_overflow_stub_without_fpu_regs_stub());
2948
2949 if (compiler->CanPcRelativeCall(stub)) {
2950 __ GenerateUnRelocatedPcRelativeCall();
2951 compiler->AddPcRelativeCallStubTarget(stub);
2952 } else {
2953 const uword entry_point_offset =
2954 Thread::stack_overflow_shared_stub_entry_point_offset(
2955 locs->live_registers()->FpuRegisterCount() > 0);
2956 __ Call(compiler::Address(THR, entry_point_offset));
2957 }
2958 compiler->RecordSafepoint(locs, kNumSlowPathArgs);
2959 compiler->RecordCatchEntryMoves(env);
2960 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOther,
2961 instruction()->deopt_id(),
2962 instruction()->source());
2963 if (!has_frame) {
2964 __ LeaveDartFrame();
2965 __ set_constant_pool_allowed(true);
2966 }
2967 } else {
2968 ASSERT(has_frame);
2969 __ CallRuntime(kInterruptOrStackOverflowRuntimeEntry, kNumSlowPathArgs);
2970 compiler->EmitCallsiteMetadata(
2971 instruction()->source(), instruction()->deopt_id(),
2972 UntaggedPcDescriptors::kOther, instruction()->locs(), env);
2973 }
2974
2975 if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
2976 instruction()->in_loop()) {
2977 // In unoptimized code, record loop stack checks as possible OSR entries.
2978 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
2979 instruction()->deopt_id(),
2980 InstructionSource());
2981 }
2982 compiler->pending_deoptimization_env_ = nullptr;
2983 if (!using_shared_stub) {
2984 compiler->RestoreLiveRegisters(locs);
2985 }
2986 __ b(exit_label());
2987 }
2988
2989 compiler::Label* osr_entry_label() {
2990 ASSERT(IsolateGroup::Current()->use_osr());
2991 return &osr_entry_label_;
2992 }
2993
2994 private:
2995 compiler::Label osr_entry_label_;
2996};
2997
2998void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2999 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this);
3000 compiler->AddSlowPathCode(slow_path);
3001
3002 __ ldr(TMP, compiler::Address(
3003 THR, compiler::target::Thread::stack_limit_offset()));
3004 __ CompareRegisters(SP, TMP);
3005 __ b(slow_path->entry_label(), LS);
3006 if (compiler->CanOSRFunction() && in_loop()) {
3007 const Register function = locs()->temp(0).reg();
3008 // In unoptimized code check the usage counter to trigger OSR at loop
3009 // stack checks. Use progressively higher thresholds for more deeply
3010 // nested loops to attempt to hit outer loops with OSR when possible.
3011 __ LoadObject(function, compiler->parsed_function().function());
3012 const intptr_t configured_optimization_counter_threshold =
3013 compiler->thread()->isolate_group()->optimization_counter_threshold();
3014 const int32_t threshold =
3015 configured_optimization_counter_threshold * (loop_depth() + 1);
3016 __ LoadFieldFromOffset(TMP, function, Function::usage_counter_offset(),
3018 __ add(TMP, TMP, compiler::Operand(1));
3019 __ StoreFieldToOffset(TMP, function, Function::usage_counter_offset(),
3021 __ CompareImmediate(TMP, threshold);
3022 __ b(slow_path->osr_entry_label(), GE);
3023 }
3024 if (compiler->ForceSlowPathForStackOverflow()) {
3025 __ b(slow_path->entry_label());
3026 }
3027 __ Bind(slow_path->exit_label());
3028}
3029
3030static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
3031 BinarySmiOpInstr* shift_left) {
3032 const LocationSummary& locs = *shift_left->locs();
3033 const Register left = locs.in(0).reg();
3034 const Register result = locs.out(0).reg();
3035 compiler::Label* deopt =
3036 shift_left->CanDeoptimize()
3037 ? compiler->AddDeoptStub(shift_left->deopt_id(),
3038 ICData::kDeoptBinarySmiOp)
3039 : nullptr;
3040 if (locs.in(1).IsConstant()) {
3041 const Object& constant = locs.in(1).constant();
3042 ASSERT(constant.IsSmi());
3043 // Immediate shift operation takes 6 bits for the count.
3044#if !defined(DART_COMPRESSED_POINTERS)
3045 const intptr_t kCountLimit = 0x3F;
3046#else
3047 const intptr_t kCountLimit = 0x1F;
3048#endif
3049 const intptr_t value = Smi::Cast(constant).Value();
3050 ASSERT((0 < value) && (value < kCountLimit));
3051 if (shift_left->can_overflow()) {
3052 // Check for overflow (preserve left).
3053 __ LslImmediate(TMP, left, value, compiler::kObjectBytes);
3054 __ cmp(left, compiler::Operand(TMP, ASR, value), compiler::kObjectBytes);
3055 __ b(deopt, NE); // Overflow.
3056 }
3057 // Shift for result now we know there is no overflow.
3058 __ LslImmediate(result, left, value, compiler::kObjectBytes);
3059 return;
3060 }
3061
3062 // Right (locs.in(1)) is not constant.
3063 const Register right = locs.in(1).reg();
3064 Range* right_range = shift_left->right_range();
3065 if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
3066 // TODO(srdjan): Implement code below for is_truncating().
3067 // If left is constant, we know the maximal allowed size for right.
3068 const Object& obj = shift_left->left()->BoundConstant();
3069 if (obj.IsSmi()) {
3070 const intptr_t left_int = Smi::Cast(obj).Value();
3071 if (left_int == 0) {
3072 __ CompareObjectRegisters(right, ZR);
3073 __ b(deopt, MI);
3074 __ mov(result, ZR);
3075 return;
3076 }
3077 const intptr_t max_right =
3078 compiler::target::kSmiBits - Utils::HighestBit(left_int);
3079 const bool right_needs_check =
3080 !RangeUtils::IsWithin(right_range, 0, max_right - 1);
3081 if (right_needs_check) {
3082 __ CompareObject(right, Smi::ZoneHandle(Smi::New(max_right)));
3083 __ b(deopt, CS);
3084 }
3085 __ SmiUntag(TMP, right);
3087 }
3088 return;
3089 }
3090
3091 const bool right_needs_check =
3092 !RangeUtils::IsWithin(right_range, 0, (Smi::kBits - 1));
3093 if (!shift_left->can_overflow()) {
3094 if (right_needs_check) {
3095 if (!RangeUtils::IsPositive(right_range)) {
3096 ASSERT(shift_left->CanDeoptimize());
3097 __ CompareObjectRegisters(right, ZR);
3098 __ b(deopt, MI);
3099 }
3100
3101 __ CompareObject(right, Smi::ZoneHandle(Smi::New(Smi::kBits)));
3102 __ csel(result, ZR, result, CS);
3103 __ SmiUntag(TMP, right);
3105 __ csel(result, TMP, result, CC);
3106 } else {
3107 __ SmiUntag(TMP, right);
3109 }
3110 } else {
3111 if (right_needs_check) {
3112 ASSERT(shift_left->CanDeoptimize());
3113 __ CompareObject(right, Smi::ZoneHandle(Smi::New(Smi::kBits)));
3114 __ b(deopt, CS);
3115 }
3116 // Left is not a constant.
3117 // Check if count too large for handling it inlined.
3118 __ SmiUntag(TMP, right);
3119 // Overflow test (preserve left, right, and TMP);
3120 const Register temp = locs.temp(0).reg();
3121 __ lslv(temp, left, TMP, compiler::kObjectBytes);
3122 __ asrv(TMP2, temp, TMP, compiler::kObjectBytes);
3123 __ cmp(left, compiler::Operand(TMP2), compiler::kObjectBytes);
3124 __ b(deopt, NE); // Overflow.
3125 // Shift for result now we know there is no overflow.
3127 }
3128}
3129
3130LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Zone* zone,
3131 bool opt) const {
3132 const intptr_t kNumInputs = 2;
3133 const intptr_t kNumTemps =
3134 (((op_kind() == Token::kSHL) && can_overflow()) ||
3135 (op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR))
3136 ? 1
3137 : 0;
3138 LocationSummary* summary = new (zone)
3139 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3140 if (op_kind() == Token::kTRUNCDIV) {
3141 summary->set_in(0, Location::RequiresRegister());
3143 ConstantInstr* right_constant = right()->definition()->AsConstant();
3144 summary->set_in(1, Location::Constant(right_constant));
3145 } else {
3146 summary->set_in(1, Location::RequiresRegister());
3147 }
3148 summary->set_out(0, Location::RequiresRegister());
3149 return summary;
3150 }
3151 if (op_kind() == Token::kMOD) {
3152 summary->set_in(0, Location::RequiresRegister());
3153 summary->set_in(1, Location::RequiresRegister());
3154 summary->set_out(0, Location::RequiresRegister());
3155 return summary;
3156 }
3157 summary->set_in(0, Location::RequiresRegister());
3158 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
3159 if (((op_kind() == Token::kSHL) && can_overflow()) ||
3160 (op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR)) {
3161 summary->set_temp(0, Location::RequiresRegister());
3162 }
3163 // We make use of 3-operand instructions by not requiring result register
3164 // to be identical to first input register as on Intel.
3165 summary->set_out(0, Location::RequiresRegister());
3166 return summary;
3167}
3168
3169void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3170 if (op_kind() == Token::kSHL) {
3171 EmitSmiShiftLeft(compiler, this);
3172 return;
3173 }
3174
3175 const Register left = locs()->in(0).reg();
3176 const Register result = locs()->out(0).reg();
3177 compiler::Label* deopt = nullptr;
3178 if (CanDeoptimize()) {
3179 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
3180 }
3181
3182 if (locs()->in(1).IsConstant()) {
3183 const Object& constant = locs()->in(1).constant();
3184 ASSERT(constant.IsSmi());
3185 const int64_t imm = Smi::RawValue(Smi::Cast(constant).Value());
3186 switch (op_kind()) {
3187 case Token::kADD: {
3188 if (deopt == nullptr) {
3189 __ AddImmediate(result, left, imm, compiler::kObjectBytes);
3190 } else {
3191 __ AddImmediateSetFlags(result, left, imm, compiler::kObjectBytes);
3192 __ b(deopt, VS);
3193 }
3194 break;
3195 }
3196 case Token::kSUB: {
3197 if (deopt == nullptr) {
3198 __ AddImmediate(result, left, -imm);
3199 } else {
3200 // Negating imm and using AddImmediateSetFlags would not detect the
3201 // overflow when imm == kMinInt64.
3202 __ SubImmediateSetFlags(result, left, imm, compiler::kObjectBytes);
3203 __ b(deopt, VS);
3204 }
3205 break;
3206 }
3207 case Token::kMUL: {
3208 // Keep left value tagged and untag right value.
3209 const intptr_t value = Smi::Cast(constant).Value();
3210 __ LoadImmediate(TMP, value);
3211#if !defined(DART_COMPRESSED_POINTERS)
3212 __ mul(result, left, TMP);
3213#else
3214 __ smull(result, left, TMP);
3215#endif
3216 if (deopt != nullptr) {
3217#if !defined(DART_COMPRESSED_POINTERS)
3218 __ smulh(TMP, left, TMP);
3219 // TMP: result bits 64..127.
3220#else
3221 __ AsrImmediate(TMP, result, 31);
3222 // TMP: result bits 32..63.
3223#endif
3224 __ cmp(TMP, compiler::Operand(result, ASR, 63));
3225 __ b(deopt, NE);
3226 }
3227 break;
3228 }
3229 case Token::kTRUNCDIV: {
3230 const intptr_t value = Smi::Cast(constant).Value();
3231 ASSERT(value != kIntptrMin);
3233 const intptr_t shift_count =
3235 ASSERT(kSmiTagSize == 1);
3236#if !defined(DART_COMPRESSED_POINTERS)
3237 __ AsrImmediate(TMP, left, 63);
3238#else
3239 __ AsrImmediate(TMP, left, 31, compiler::kFourBytes);
3240#endif
3241 ASSERT(shift_count > 1); // 1, -1 case handled above.
3242 const Register temp = TMP2;
3243#if !defined(DART_COMPRESSED_POINTERS)
3244 __ add(temp, left, compiler::Operand(TMP, LSR, 64 - shift_count));
3245#else
3246 __ addw(temp, left, compiler::Operand(TMP, LSR, 32 - shift_count));
3247#endif
3248 ASSERT(shift_count > 0);
3249 __ AsrImmediate(result, temp, shift_count, compiler::kObjectBytes);
3250 if (value < 0) {
3251 __ sub(result, ZR, compiler::Operand(result), compiler::kObjectBytes);
3252 }
3253 __ SmiTag(result);
3254 break;
3255 }
3256 case Token::kBIT_AND:
3257 // No overflow check.
3258 __ AndImmediate(result, left, imm);
3259 break;
3260 case Token::kBIT_OR:
3261 // No overflow check.
3262 __ OrImmediate(result, left, imm);
3263 break;
3264 case Token::kBIT_XOR:
3265 // No overflow check.
3266 __ XorImmediate(result, left, imm);
3267 break;
3268 case Token::kSHR: {
3269 // Asr operation masks the count to 6/5 bits.
3270#if !defined(DART_COMPRESSED_POINTERS)
3271 const intptr_t kCountLimit = 0x3F;
3272#else
3273 const intptr_t kCountLimit = 0x1F;
3274#endif
3275 intptr_t value = Smi::Cast(constant).Value();
3276 __ AsrImmediate(result, left,
3277 Utils::Minimum(value + kSmiTagSize, kCountLimit),
3279 __ SmiTag(result);
3280 // BOGUS: this could be one sbfiz
3281 break;
3282 }
3283 case Token::kUSHR: {
3284 // Lsr operation masks the count to 6 bits, but
3285 // unsigned shifts by >= kBitsPerInt64 are eliminated by
3286 // BinaryIntegerOpInstr::Canonicalize.
3287 const intptr_t kCountLimit = 0x3F;
3288 intptr_t value = Smi::Cast(constant).Value();
3289 ASSERT((value >= 0) && (value <= kCountLimit));
3290 __ SmiUntag(result, left);
3291 __ LsrImmediate(result, result, value);
3292 if (deopt != nullptr) {
3293 __ SmiTagAndBranchIfOverflow(result, deopt);
3294 } else {
3295 __ SmiTag(result);
3296 }
3297 break;
3298 }
3299 default:
3300 UNREACHABLE();
3301 break;
3302 }
3303 return;
3304 }
3305
3306 const Register right = locs()->in(1).reg();
3307 switch (op_kind()) {
3308 case Token::kADD: {
3309 if (deopt == nullptr) {
3310 __ add(result, left, compiler::Operand(right), compiler::kObjectBytes);
3311 } else {
3312 __ adds(result, left, compiler::Operand(right), compiler::kObjectBytes);
3313 __ b(deopt, VS);
3314 }
3315 break;
3316 }
3317 case Token::kSUB: {
3318 if (deopt == nullptr) {
3319 __ sub(result, left, compiler::Operand(right), compiler::kObjectBytes);
3320 } else {
3321 __ subs(result, left, compiler::Operand(right), compiler::kObjectBytes);
3322 __ b(deopt, VS);
3323 }
3324 break;
3325 }
3326 case Token::kMUL: {
3327 __ SmiUntag(TMP, left);
3328#if !defined(DART_COMPRESSED_POINTERS)
3329 __ mul(result, TMP, right);
3330#else
3331 __ smull(result, TMP, right);
3332#endif
3333 if (deopt != nullptr) {
3334#if !defined(DART_COMPRESSED_POINTERS)
3335 __ smulh(TMP, TMP, right);
3336 // TMP: result bits 64..127.
3337#else
3338 __ AsrImmediate(TMP, result, 31);
3339 // TMP: result bits 32..63.
3340#endif
3341 __ cmp(TMP, compiler::Operand(result, ASR, 63));
3342 __ b(deopt, NE);
3343 }
3344 break;
3345 }
3346 case Token::kBIT_AND: {
3347 // No overflow check.
3348 __ and_(result, left, compiler::Operand(right));
3349 break;
3350 }
3351 case Token::kBIT_OR: {
3352 // No overflow check.
3353 __ orr(result, left, compiler::Operand(right));
3354 break;
3355 }
3356 case Token::kBIT_XOR: {
3357 // No overflow check.
3358 __ eor(result, left, compiler::Operand(right));
3359 break;
3360 }
3361 case Token::kTRUNCDIV: {
3363 // Handle divide by zero in runtime.
3364 __ cbz(deopt, right, compiler::kObjectBytes);
3365 }
3366 const Register temp = TMP2;
3367 __ SmiUntag(temp, left);
3368 __ SmiUntag(TMP, right);
3369
3370 __ sdiv(result, temp, TMP, compiler::kObjectBytes);
3371 if (RangeUtils::Overlaps(right_range(), -1, -1)) {
3372 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
3373 // case we cannot tag the result.
3374#if !defined(DART_COMPRESSED_POINTERS)
3375 __ CompareImmediate(result, 0x4000000000000000LL);
3376#else
3377 __ CompareImmediate(result, 0x40000000LL, compiler::kFourBytes);
3378#endif
3379 __ b(deopt, EQ);
3380 }
3381 __ SmiTag(result);
3382 break;
3383 }
3384 case Token::kMOD: {
3386 // Handle divide by zero in runtime.
3387 __ cbz(deopt, right, compiler::kObjectBytes);
3388 }
3389 const Register temp = TMP2;
3390 __ SmiUntag(temp, left);
3391 __ SmiUntag(TMP, right);
3392
3393 __ sdiv(result, temp, TMP, compiler::kObjectBytes);
3394
3395 __ SmiUntag(TMP, right);
3396 __ msub(result, TMP, result, temp,
3397 compiler::kObjectBytes); // result <- left - right * result
3398 __ SmiTag(result);
3399 // res = left % right;
3400 // if (res < 0) {
3401 // if (right < 0) {
3402 // res = res - right;
3403 // } else {
3404 // res = res + right;
3405 // }
3406 // }
3407 compiler::Label done;
3408 __ CompareObjectRegisters(result, ZR);
3409 __ b(&done, GE);
3410 // Result is negative, adjust it.
3411 __ CompareObjectRegisters(right, ZR);
3412 __ sub(TMP, result, compiler::Operand(right), compiler::kObjectBytes);
3413 __ add(result, result, compiler::Operand(right), compiler::kObjectBytes);
3414 __ csel(result, TMP, result, LT);
3415 __ Bind(&done);
3416 break;
3417 }
3418 case Token::kSHR: {
3419 if (CanDeoptimize()) {
3420 __ tbnz(deopt, right, compiler::target::kSmiBits + kSmiTagSize);
3421 }
3422 __ SmiUntag(TMP, right);
3423 // asrv[w] operation masks the count to 6/5 bits.
3424#if !defined(DART_COMPRESSED_POINTERS)
3425 const intptr_t kCountLimit = 0x3F;
3426#else
3427 const intptr_t kCountLimit = 0x1F;
3428#endif
3429 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
3430 __ LoadImmediate(TMP2, kCountLimit);
3431 __ CompareObjectRegisters(TMP, TMP2);
3432 __ csel(TMP, TMP2, TMP, GT);
3433 }
3434 const Register temp = locs()->temp(0).reg();
3435 __ SmiUntag(temp, left);
3436 __ asrv(result, temp, TMP, compiler::kObjectBytes);
3437 __ SmiTag(result);
3438 break;
3439 }
3440 case Token::kUSHR: {
3441 if (CanDeoptimize()) {
3442 __ tbnz(deopt, right, compiler::target::kSmiBits + kSmiTagSize);
3443 }
3444 __ SmiUntag(TMP, right);
3445 // lsrv operation masks the count to 6 bits.
3446 const intptr_t kCountLimit = 0x3F;
3447 COMPILE_ASSERT(kCountLimit + 1 == kBitsPerInt64);
3448 compiler::Label done;
3449 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
3450 __ LoadImmediate(TMP2, kCountLimit);
3451 __ CompareRegisters(TMP, TMP2);
3452 __ csel(result, ZR, result, GT);
3453 __ b(&done, GT);
3454 }
3455 const Register temp = locs()->temp(0).reg();
3456 __ SmiUntag(temp, left);
3457 __ lsrv(result, temp, TMP);
3458 if (deopt != nullptr) {
3459 __ SmiTagAndBranchIfOverflow(result, deopt);
3460 } else {
3461 __ SmiTag(result);
3462 }
3463 __ Bind(&done);
3464 break;
3465 }
3466 case Token::kDIV: {
3467 // Dispatches to 'Double./'.
3468 // TODO(srdjan): Implement as conversion to double and double division.
3469 UNREACHABLE();
3470 break;
3471 }
3472 case Token::kOR:
3473 case Token::kAND: {
3474 // Flow graph builder has dissected this operation to guarantee correct
3475 // behavior (short-circuit evaluation).
3476 UNREACHABLE();
3477 break;
3478 }
3479 default:
3480 UNREACHABLE();
3481 break;
3482 }
3483}
3484
3485LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone,
3486 bool opt) const {
3487 intptr_t left_cid = left()->Type()->ToCid();
3488 intptr_t right_cid = right()->Type()->ToCid();
3489 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid));
3490 const intptr_t kNumInputs = 2;
3491 const intptr_t kNumTemps = 0;
3492 LocationSummary* summary = new (zone)
3493 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3494 summary->set_in(0, Location::RequiresRegister());
3495 summary->set_in(1, Location::RequiresRegister());
3496 return summary;
3497}
3498
3499void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3500 compiler::Label* deopt =
3501 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryDoubleOp);
3502 intptr_t left_cid = left()->Type()->ToCid();
3503 intptr_t right_cid = right()->Type()->ToCid();
3504 const Register left = locs()->in(0).reg();
3505 const Register right = locs()->in(1).reg();
3506 if (this->left()->definition() == this->right()->definition()) {
3507 __ BranchIfSmi(left, deopt);
3508 } else if (left_cid == kSmiCid) {
3509 __ BranchIfSmi(right, deopt);
3510 } else if (right_cid == kSmiCid) {
3511 __ BranchIfSmi(left, deopt);
3512 } else {
3513 __ orr(TMP, left, compiler::Operand(right));
3514 __ BranchIfSmi(TMP, deopt);
3515 }
3516}
3517
3518LocationSummary* BoxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
3519 const intptr_t kNumInputs = 1;
3520 const intptr_t kNumTemps = 1;
3521 LocationSummary* summary = new (zone) LocationSummary(
3522 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
3523 summary->set_in(0, Location::RequiresFpuRegister());
3524 summary->set_temp(0, Location::RequiresRegister());
3525 summary->set_out(0, Location::RequiresRegister());
3526 return summary;
3527}
3528
3529void BoxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3530 const Register out_reg = locs()->out(0).reg();
3531 const Register temp_reg = locs()->temp(0).reg();
3532 const VRegister value = locs()->in(0).fpu_reg();
3533
3535 compiler->BoxClassFor(from_representation()),
3536 out_reg, temp_reg);
3537
3538 switch (from_representation()) {
3539 case kUnboxedDouble:
3540 __ StoreDFieldToOffset(value, out_reg, ValueOffset());
3541 break;
3542 case kUnboxedFloat:
3543 __ fcvtds(FpuTMP, value);
3544 __ StoreDFieldToOffset(FpuTMP, out_reg, ValueOffset());
3545 break;
3546 case kUnboxedFloat32x4:
3547 case kUnboxedFloat64x2:
3548 case kUnboxedInt32x4:
3549 __ StoreQFieldToOffset(value, out_reg, ValueOffset());
3550 break;
3551 default:
3552 UNREACHABLE();
3553 break;
3554 }
3555}
3556
3557LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
3559 const intptr_t kNumInputs = 1;
3560 const intptr_t kNumTemps = 0;
3561 const bool is_floating_point =
3563 LocationSummary* summary = new (zone)
3564 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3565 summary->set_in(0, Location::RequiresRegister());
3566 summary->set_out(0, is_floating_point ? Location::RequiresFpuRegister()
3568 return summary;
3569}
3570
3571void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler* compiler) {
3572 const Register box = locs()->in(0).reg();
3573
3574 switch (representation()) {
3575 case kUnboxedInt64: {
3576 const Register result = locs()->out(0).reg();
3577 __ ldr(result, compiler::FieldAddress(box, ValueOffset()));
3578 break;
3579 }
3580
3581 case kUnboxedDouble: {
3582 const VRegister result = locs()->out(0).fpu_reg();
3583 __ LoadDFieldFromOffset(result, box, ValueOffset());
3584 break;
3585 }
3586
3587 case kUnboxedFloat: {
3588 const VRegister result = locs()->out(0).fpu_reg();
3589 __ LoadDFieldFromOffset(result, box, ValueOffset());
3590 __ fcvtsd(result, result);
3591 break;
3592 }
3593
3594 case kUnboxedFloat32x4:
3595 case kUnboxedFloat64x2:
3596 case kUnboxedInt32x4: {
3597 const VRegister result = locs()->out(0).fpu_reg();
3598 __ LoadQFieldFromOffset(result, box, ValueOffset());
3599 break;
3600 }
3601
3602 default:
3603 UNREACHABLE();
3604 break;
3605 }
3606}
3607
3608void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) {
3609 const Register box = locs()->in(0).reg();
3610
3611 switch (representation()) {
3612 case kUnboxedInt32:
3613 case kUnboxedInt64: {
3614 const Register result = locs()->out(0).reg();
3615 __ SmiUntag(result, box);
3616 break;
3617 }
3618
3619 case kUnboxedDouble: {
3620 const VRegister result = locs()->out(0).fpu_reg();
3621 __ SmiUntag(TMP, box);
3622#if !defined(DART_COMPRESSED_POINTERS)
3623 __ scvtfdx(result, TMP);
3624#else
3625 __ scvtfdw(result, TMP);
3626#endif
3627 break;
3628 }
3629
3630 default:
3631 UNREACHABLE();
3632 break;
3633 }
3634}
3635
3636void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) {
3637 const Register value = locs()->in(0).reg();
3638 const Register result = locs()->out(0).reg();
3639 __ LoadInt32FromBoxOrSmi(result, value);
3640}
3641
3642void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler) {
3643 const Register value = locs()->in(0).reg();
3644 const Register result = locs()->out(0).reg();
3645 __ LoadInt64FromBoxOrSmi(result, value);
3646}
3647
3648LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
3649 bool opt) const {
3650 ASSERT((from_representation() == kUnboxedInt32) ||
3651 (from_representation() == kUnboxedUint32));
3652#if !defined(DART_COMPRESSED_POINTERS)
3653 // ValueFitsSmi() may be overly conservative and false because we only
3654 // perform range analysis during optimized compilation.
3655 const bool kMayAllocateMint = false;
3656#else
3657 const bool kMayAllocateMint = !ValueFitsSmi();
3658#endif
3659 const intptr_t kNumInputs = 1;
3660 const intptr_t kNumTemps = kMayAllocateMint ? 1 : 0;
3661 LocationSummary* summary = new (zone)
3662 LocationSummary(zone, kNumInputs, kNumTemps,
3663 kMayAllocateMint ? LocationSummary::kCallOnSlowPath
3664 : LocationSummary::kNoCall);
3665 summary->set_in(0, Location::RequiresRegister());
3666 summary->set_out(0, Location::RequiresRegister());
3667 if (kMayAllocateMint) {
3668 summary->set_temp(0, Location::RequiresRegister());
3669 }
3670 return summary;
3671}
3672
3673void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3674 Register value = locs()->in(0).reg();
3675 Register out = locs()->out(0).reg();
3676 ASSERT(value != out);
3677
3678#if !defined(DART_COMPRESSED_POINTERS)
3679 ASSERT(compiler::target::kSmiBits >= 32);
3680 if (from_representation() == kUnboxedInt32) {
3681 __ sbfiz(out, value, kSmiTagSize, 32);
3682 } else {
3683 ASSERT(from_representation() == kUnboxedUint32);
3684 __ ubfiz(out, value, kSmiTagSize, 32);
3685 }
3686#else
3687 compiler::Label done;
3688 if (from_representation() == kUnboxedInt32) {
3689 ASSERT(kSmiTag == 0);
3690 // Signed Bitfield Insert in Zero instruction extracts the 31 significant
3691 // bits from a Smi.
3692 __ sbfiz(out, value, kSmiTagSize, 32 - kSmiTagSize);
3693 if (ValueFitsSmi()) {
3694 return;
3695 }
3696 __ cmpw(value, compiler::Operand(out, ASR, 1));
3697 __ b(&done, EQ); // Jump if the sbfiz instruction didn't lose info.
3698 } else {
3699 ASSERT(from_representation() == kUnboxedUint32);
3700 // A 32 bit positive Smi has one tag bit and one unused sign bit,
3701 // leaving only 30 bits for the payload.
3702 __ LslImmediate(out, value, kSmiTagSize, compiler::kFourBytes);
3703 if (ValueFitsSmi()) {
3704 return;
3705 }
3706 __ TestImmediate(value, 0xC0000000);
3707 __ b(&done, EQ); // Jump if both bits are zero.
3708 }
3709
3710 Register temp = locs()->temp(0).reg();
3711 BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
3712 temp);
3713 if (from_representation() == kUnboxedInt32) {
3714 __ sxtw(temp, value); // Sign-extend.
3715 } else {
3716 __ uxtw(temp, value); // Zero-extend.
3717 }
3718 __ StoreToOffset(temp, out, Mint::value_offset() - kHeapObjectTag);
3719 __ Bind(&done);
3720#endif
3721}
3722
3723LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
3724 bool opt) const {
3725 const intptr_t kNumInputs = 1;
3726 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
3727 // Shared slow path is used in BoxInt64Instr::EmitNativeCode in
3728 // precompiled mode and only after VM isolate stubs where
3729 // replaced with isolate-specific stubs.
3730 auto object_store = IsolateGroup::Current()->object_store();
3731 const bool stubs_in_vm_isolate =
3732 object_store->allocate_mint_with_fpu_regs_stub()
3733 ->untag()
3734 ->InVMIsolateHeap() ||
3735 object_store->allocate_mint_without_fpu_regs_stub()
3736 ->untag()
3737 ->InVMIsolateHeap();
3738 const bool shared_slow_path_call =
3739 SlowPathSharingSupported(opt) && !stubs_in_vm_isolate;
3740 LocationSummary* summary = new (zone) LocationSummary(
3741 zone, kNumInputs, kNumTemps,
3743 : shared_slow_path_call ? LocationSummary::kCallOnSharedSlowPath
3745 summary->set_in(0, Location::RequiresRegister());
3746 if (ValueFitsSmi()) {
3747 summary->set_out(0, Location::RequiresRegister());
3748 } else if (shared_slow_path_call) {
3749 summary->set_out(0,
3752 } else {
3753 summary->set_out(0, Location::RequiresRegister());
3754 summary->set_temp(0, Location::RequiresRegister());
3755 }
3756 return summary;
3757}
3758
3759void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3760 Register in = locs()->in(0).reg();
3761 Register out = locs()->out(0).reg();
3762 if (ValueFitsSmi()) {
3763 __ SmiTag(out, in);
3764 return;
3765 }
3766 ASSERT(kSmiTag == 0);
3767 compiler::Label done;
3768#if !defined(DART_COMPRESSED_POINTERS)
3769 __ adds(out, in, compiler::Operand(in)); // SmiTag
3770 // If the value doesn't fit in a smi, the tagging changes the sign,
3771 // which causes the overflow flag to be set.
3772 __ b(&done, NO_OVERFLOW);
3773#else
3774 __ sbfiz(out, in, kSmiTagSize, 31); // SmiTag + sign-extend.
3775 __ cmp(in, compiler::Operand(out, ASR, kSmiTagSize));
3776 __ b(&done, EQ);
3777#endif
3778
3779 Register temp = locs()->temp(0).reg();
3780 if (compiler->intrinsic_mode()) {
3781 __ TryAllocate(compiler->mint_class(),
3782 compiler->intrinsic_slow_path_label(),
3784 } else if (locs()->call_on_shared_slow_path()) {
3785 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
3786 if (!has_frame) {
3787 ASSERT(__ constant_pool_allowed());
3788 __ set_constant_pool_allowed(false);
3789 __ EnterDartFrame(0);
3790 }
3791 auto object_store = compiler->isolate_group()->object_store();
3792 const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
3793 const auto& stub = Code::ZoneHandle(
3794 compiler->zone(),
3795 live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub()
3796 : object_store->allocate_mint_without_fpu_regs_stub());
3797
3798 ASSERT(!locs()->live_registers()->ContainsRegister(
3800 auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
3801 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
3802 locs(), DeoptId::kNone, extended_env);
3803 if (!has_frame) {
3804 __ LeaveDartFrame();
3805 __ set_constant_pool_allowed(true);
3806 }
3807 } else {
3808 BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
3809 temp);
3810 }
3811
3812 __ StoreToOffset(in, out, Mint::value_offset() - kHeapObjectTag);
3813 __ Bind(&done);
3814}
3815
3816LocationSummary* UnboxInteger32Instr::MakeLocationSummary(Zone* zone,
3817 bool opt) const {
3818 const intptr_t kNumInputs = 1;
3819 const intptr_t kNumTemps = 0;
3820 LocationSummary* summary = new (zone)
3821 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3822 summary->set_in(0, Location::RequiresRegister());
3823 summary->set_out(0, Location::RequiresRegister());
3824 return summary;
3825}
3826
3827void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3828 const intptr_t value_cid = value()->Type()->ToCid();
3829 const Register out = locs()->out(0).reg();
3830 const Register value = locs()->in(0).reg();
3831 compiler::Label* deopt =
3832 CanDeoptimize()
3833 ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger)
3834 : nullptr;
3835
3836 if (value_cid == kSmiCid) {
3837 __ SmiUntag(out, value);
3838 } else if (value_cid == kMintCid) {
3839 __ LoadFieldFromOffset(out, value, Mint::value_offset());
3840 } else if (!CanDeoptimize()) {
3841 // Type information is not conclusive, but range analysis found
3842 // the value to be in int64 range. Therefore it must be a smi
3843 // or mint value.
3845 compiler::Label done;
3846 __ SmiUntag(out, value);
3847 __ BranchIfSmi(value, &done);
3848 __ LoadFieldFromOffset(out, value, Mint::value_offset());
3849 __ Bind(&done);
3850 } else {
3851 compiler::Label done;
3852 __ SmiUntag(out, value);
3853 __ BranchIfSmi(value, &done);
3854 __ CompareClassId(value, kMintCid);
3855 __ b(deopt, NE);
3856 __ LoadFieldFromOffset(out, value, Mint::value_offset());
3857 __ Bind(&done);
3858 }
3859
3860 // TODO(vegorov): as it is implemented right now truncating unboxing would
3861 // leave "garbage" in the higher word.
3862 if (!is_truncating() && (deopt != nullptr)) {
3863 ASSERT(representation() == kUnboxedInt32);
3864 __ cmp(out, compiler::Operand(out, SXTW, 0));
3865 __ b(deopt, NE);
3866 }
3867}
3868
3869LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
3870 bool opt) const {
3871 const intptr_t kNumInputs = 2;
3872 const intptr_t kNumTemps = 0;
3873 LocationSummary* summary = new (zone)
3874 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3875 summary->set_in(0, Location::RequiresFpuRegister());
3876 summary->set_in(1, Location::RequiresFpuRegister());
3877 summary->set_out(0, Location::RequiresFpuRegister());
3878 return summary;
3879}
3880
3881void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3882 const VRegister left = locs()->in(0).fpu_reg();
3883 const VRegister right = locs()->in(1).fpu_reg();
3884 const VRegister result = locs()->out(0).fpu_reg();
3885 switch (op_kind()) {
3886 case Token::kADD:
3887 __ faddd(result, left, right);
3888 break;
3889 case Token::kSUB:
3890 __ fsubd(result, left, right);
3891 break;
3892 case Token::kMUL:
3893 __ fmuld(result, left, right);
3894 break;
3895 case Token::kDIV:
3896 __ fdivd(result, left, right);
3897 break;
3898 default:
3899 UNREACHABLE();
3900 }
3901}
3902
3903LocationSummary* DoubleTestOpInstr::MakeLocationSummary(Zone* zone,
3904 bool opt) const {
3905 const bool needs_temp = op_kind() != MethodRecognizer::kDouble_getIsNaN;
3906 const intptr_t kNumInputs = 1;
3907 const intptr_t kNumTemps = needs_temp ? 1 : 0;
3908 LocationSummary* summary = new (zone)
3909 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3910 summary->set_in(0, Location::RequiresFpuRegister());
3911 if (needs_temp) {
3912 summary->set_temp(0, Location::RequiresRegister());
3913 }
3914 summary->set_out(0, Location::RequiresRegister());
3915 return summary;
3916}
3917
3918Condition DoubleTestOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
3919 BranchLabels labels) {
3920 ASSERT(compiler->is_optimizing());
3921 const VRegister value = locs()->in(0).fpu_reg();
3922 const bool is_negated = kind() != Token::kEQ;
3923
3924 switch (op_kind()) {
3925 case MethodRecognizer::kDouble_getIsNaN: {
3926 __ fcmpd(value, value);
3927 return is_negated ? VC : VS;
3928 }
3929 case MethodRecognizer::kDouble_getIsInfinite: {
3930 const Register temp = locs()->temp(0).reg();
3931 __ vmovrd(temp, value, 0);
3932 // Mask off the sign.
3933 __ AndImmediate(temp, temp, 0x7FFFFFFFFFFFFFFFLL);
3934 // Compare with +infinity.
3935 __ CompareImmediate(temp, 0x7FF0000000000000LL);
3936 return is_negated ? NE : EQ;
3937 }
3938 case MethodRecognizer::kDouble_getIsNegative: {
3939 const Register temp = locs()->temp(0).reg();
3940 compiler::Label not_zero;
3941 __ fcmpdz(value);
3942 // If it's NaN, it's not negative.
3943 __ b(is_negated ? labels.true_label : labels.false_label, VS);
3944 __ b(&not_zero, NOT_EQUAL);
3945 // Check for negative zero with a signed comparison.
3946 __ fmovrd(temp, value);
3947 __ CompareImmediate(temp, 0);
3948 __ Bind(&not_zero);
3949 return is_negated ? GE : LT;
3950 }
3951 default:
3952 UNREACHABLE();
3953 }
3954}
3955
3956// SIMD
3957
3958#define DEFINE_EMIT(Name, Args) \
3959 static void Emit##Name(FlowGraphCompiler* compiler, SimdOpInstr* instr, \
3960 PP_APPLY(PP_UNPACK, Args))
3961
3962#define SIMD_OP_FLOAT_ARITH(V, Name, op) \
3963 V(Float32x4##Name, op##s) \
3964 V(Float64x2##Name, op##d)
3965
3966#define SIMD_OP_SIMPLE_BINARY(V) \
3967 SIMD_OP_FLOAT_ARITH(V, Add, vadd) \
3968 SIMD_OP_FLOAT_ARITH(V, Sub, vsub) \
3969 SIMD_OP_FLOAT_ARITH(V, Mul, vmul) \
3970 SIMD_OP_FLOAT_ARITH(V, Div, vdiv) \
3971 SIMD_OP_FLOAT_ARITH(V, Min, vmin) \
3972 SIMD_OP_FLOAT_ARITH(V, Max, vmax) \
3973 V(Int32x4Add, vaddw) \
3974 V(Int32x4Sub, vsubw) \
3975 V(Int32x4BitAnd, vand) \
3976 V(Int32x4BitOr, vorr) \
3977 V(Int32x4BitXor, veor) \
3978 V(Float32x4Equal, vceqs) \
3979 V(Float32x4GreaterThan, vcgts) \
3980 V(Float32x4GreaterThanOrEqual, vcges)
3981
3982DEFINE_EMIT(SimdBinaryOp, (VRegister result, VRegister left, VRegister right)) {
3983 switch (instr->kind()) {
3984#define EMIT(Name, op) \
3985 case SimdOpInstr::k##Name: \
3986 __ op(result, left, right); \
3987 break;
3988 SIMD_OP_SIMPLE_BINARY(EMIT)
3989#undef EMIT
3990 case SimdOpInstr::kFloat32x4ShuffleMix:
3991 case SimdOpInstr::kInt32x4ShuffleMix: {
3992 const intptr_t mask = instr->mask();
3993 __ vinss(result, 0, left, (mask >> 0) & 0x3);
3994 __ vinss(result, 1, left, (mask >> 2) & 0x3);
3995 __ vinss(result, 2, right, (mask >> 4) & 0x3);
3996 __ vinss(result, 3, right, (mask >> 6) & 0x3);
3997 break;
3998 }
3999 case SimdOpInstr::kFloat32x4NotEqual:
4000 __ vceqs(result, left, right);
4001 // Invert the result.
4002 __ vnot(result, result);
4003 break;
4004 case SimdOpInstr::kFloat32x4LessThan:
4005 __ vcgts(result, right, left);
4006 break;
4007 case SimdOpInstr::kFloat32x4LessThanOrEqual:
4008 __ vcges(result, right, left);
4009 break;
4010 case SimdOpInstr::kFloat32x4Scale:
4011 __ fcvtsd(VTMP, left);
4012 __ vdups(result, VTMP, 0);
4013 __ vmuls(result, result, right);
4014 break;
4015 case SimdOpInstr::kFloat64x2FromDoubles:
4016 __ vinsd(result, 0, left, 0);
4017 __ vinsd(result, 1, right, 0);
4018 break;
4019 case SimdOpInstr::kFloat64x2Scale:
4020 __ vdupd(VTMP, right, 0);
4021 __ vmuld(result, left, VTMP);
4022 break;
4023 default:
4024 UNREACHABLE();
4025 }
4026}
4027
4028#define SIMD_OP_SIMPLE_UNARY(V) \
4029 SIMD_OP_FLOAT_ARITH(V, Sqrt, vsqrt) \
4030 SIMD_OP_FLOAT_ARITH(V, Negate, vneg) \
4031 SIMD_OP_FLOAT_ARITH(V, Abs, vabs) \
4032 V(Float32x4Reciprocal, VRecps) \
4033 V(Float32x4ReciprocalSqrt, VRSqrts)
4034
4035DEFINE_EMIT(SimdUnaryOp, (VRegister result, VRegister value)) {
4036 switch (instr->kind()) {
4037#define EMIT(Name, op) \
4038 case SimdOpInstr::k##Name: \
4039 __ op(result, value); \
4040 break;
4041 SIMD_OP_SIMPLE_UNARY(EMIT)
4042#undef EMIT
4043 case SimdOpInstr::kFloat32x4GetX:
4044 __ vinss(result, 0, value, 0);
4045 __ fcvtds(result, result);
4046 break;
4047 case SimdOpInstr::kFloat32x4GetY:
4048 __ vinss(result, 0, value, 1);
4049 __ fcvtds(result, result);
4050 break;
4051 case SimdOpInstr::kFloat32x4GetZ:
4052 __ vinss(result, 0, value, 2);
4053 __ fcvtds(result, result);
4054 break;
4055 case SimdOpInstr::kFloat32x4GetW:
4056 __ vinss(result, 0, value, 3);
4057 __ fcvtds(result, result);
4058 break;
4059 case SimdOpInstr::kInt32x4Shuffle:
4060 case SimdOpInstr::kFloat32x4Shuffle: {
4061 const intptr_t mask = instr->mask();
4062 if (mask == 0x00) {
4063 __ vdups(result, value, 0);
4064 } else if (mask == 0x55) {
4065 __ vdups(result, value, 1);
4066 } else if (mask == 0xAA) {
4067 __ vdups(result, value, 2);
4068 } else if (mask == 0xFF) {
4069 __ vdups(result, value, 3);
4070 } else {
4071 for (intptr_t i = 0; i < 4; i++) {
4072 __ vinss(result, i, value, (mask >> (2 * i)) & 0x3);
4073 }
4074 }
4075 break;
4076 }
4077 case SimdOpInstr::kFloat32x4Splat:
4078 // Convert to Float32.
4079 __ fcvtsd(VTMP, value);
4080 // Splat across all lanes.
4081 __ vdups(result, VTMP, 0);
4082 break;
4083 case SimdOpInstr::kFloat64x2GetX:
4084 __ vinsd(result, 0, value, 0);
4085 break;
4086 case SimdOpInstr::kFloat64x2GetY:
4087 __ vinsd(result, 0, value, 1);
4088 break;
4089 case SimdOpInstr::kFloat64x2Splat:
4090 __ vdupd(result, value, 0);
4091 break;
4092 case SimdOpInstr::kFloat64x2ToFloat32x4:
4093 // Zero register.
4094 __ veor(result, result, result);
4095 // Set X lane.
4096 __ vinsd(VTMP, 0, value, 0);
4097 __ fcvtsd(VTMP, VTMP);
4098 __ vinss(result, 0, VTMP, 0);
4099 // Set Y lane.
4100 __ vinsd(VTMP, 0, value, 1);
4101 __ fcvtsd(VTMP, VTMP);
4102 __ vinss(result, 1, VTMP, 0);
4103 break;
4104 case SimdOpInstr::kFloat32x4ToFloat64x2:
4105 // Set X.
4106 __ vinss(VTMP, 0, value, 0);
4107 __ fcvtds(VTMP, VTMP);
4108 __ vinsd(result, 0, VTMP, 0);
4109 // Set Y.
4110 __ vinss(VTMP, 0, value, 1);
4111 __ fcvtds(VTMP, VTMP);
4112 __ vinsd(result, 1, VTMP, 0);
4113 break;
4114 default:
4115 UNREACHABLE();
4116 }
4117}
4118
4119DEFINE_EMIT(Simd32x4GetSignMask,
4120 (Register out, VRegister value, Temp<Register> temp)) {
4121 // X lane.
4122 __ vmovrs(out, value, 0);
4123 __ LsrImmediate(out, out, 31);
4124 // Y lane.
4125 __ vmovrs(temp, value, 1);
4126 __ LsrImmediate(temp, temp, 31);
4127 __ orr(out, out, compiler::Operand(temp, LSL, 1));
4128 // Z lane.
4129 __ vmovrs(temp, value, 2);
4130 __ LsrImmediate(temp, temp, 31);
4131 __ orr(out, out, compiler::Operand(temp, LSL, 2));
4132 // W lane.
4133 __ vmovrs(temp, value, 3);
4134 __ LsrImmediate(temp, temp, 31);
4135 __ orr(out, out, compiler::Operand(temp, LSL, 3));
4136}
4137
4138DEFINE_EMIT(
4139 Float32x4FromDoubles,
4141 __ fcvtsd(VTMP, v0);
4142 __ vinss(r, 0, VTMP, 0);
4143 __ fcvtsd(VTMP, v1);
4144 __ vinss(r, 1, VTMP, 0);
4145 __ fcvtsd(VTMP, v2);
4146 __ vinss(r, 2, VTMP, 0);
4147 __ fcvtsd(VTMP, v3);
4148 __ vinss(r, 3, VTMP, 0);
4149}
4150
4151DEFINE_EMIT(
4152 Float32x4Clamp,
4153 (VRegister result, VRegister value, VRegister lower, VRegister upper)) {
4154 __ vmins(result, value, upper);
4155 __ vmaxs(result, result, lower);
4156}
4157
4158DEFINE_EMIT(
4159 Float64x2Clamp,
4160 (VRegister result, VRegister value, VRegister lower, VRegister upper)) {
4161 __ vmind(result, value, upper);
4162 __ vmaxd(result, result, lower);
4163}
4164
4165DEFINE_EMIT(Float32x4With,
4166 (VRegister result, VRegister replacement, VRegister value)) {
4167 __ fcvtsd(VTMP, replacement);
4168 __ vmov(result, value);
4169 switch (instr->kind()) {
4170 case SimdOpInstr::kFloat32x4WithX:
4171 __ vinss(result, 0, VTMP, 0);
4172 break;
4173 case SimdOpInstr::kFloat32x4WithY:
4174 __ vinss(result, 1, VTMP, 0);
4175 break;
4176 case SimdOpInstr::kFloat32x4WithZ:
4177 __ vinss(result, 2, VTMP, 0);
4178 break;
4179 case SimdOpInstr::kFloat32x4WithW:
4180 __ vinss(result, 3, VTMP, 0);
4181 break;
4182 default:
4183 UNREACHABLE();
4184 }
4185}
4186
4187DEFINE_EMIT(Simd32x4ToSimd32x4, (SameAsFirstInput, VRegister value)) {
4188 // TODO(dartbug.com/30949) these operations are essentially nop and should
4189 // not generate any code. They should be removed from the graph before
4190 // code generation.
4191}
4192
4193DEFINE_EMIT(SimdZero, (VRegister v)) {
4194 __ veor(v, v, v);
4195}
4196
4197DEFINE_EMIT(Float64x2GetSignMask, (Register out, VRegister value)) {
4198 // Bits of X lane.
4199 __ vmovrd(out, value, 0);
4200 __ LsrImmediate(out, out, 63);
4201 // Bits of Y lane.
4202 __ vmovrd(TMP, value, 1);
4203 __ LsrImmediate(TMP, TMP, 63);
4204 __ orr(out, out, compiler::Operand(TMP, LSL, 1));
4205}
4206
4207DEFINE_EMIT(Float64x2With,
4208 (SameAsFirstInput, VRegister left, VRegister right)) {
4209 switch (instr->kind()) {
4210 case SimdOpInstr::kFloat64x2WithX:
4211 __ vinsd(left, 0, right, 0);
4212 break;
4213 case SimdOpInstr::kFloat64x2WithY:
4214 __ vinsd(left, 1, right, 0);
4215 break;
4216 default:
4217 UNREACHABLE();
4218 }
4219}
4220
4221DEFINE_EMIT(
4222 Int32x4FromInts,
4224 __ veor(result, result, result);
4225 __ vinsw(result, 0, v0);
4226 __ vinsw(result, 1, v1);
4227 __ vinsw(result, 2, v2);
4228 __ vinsw(result, 3, v3);
4229}
4230
4231DEFINE_EMIT(Int32x4FromBools,
4233 Register v0,
4234 Register v1,
4235 Register v2,
4236 Register v3,
4237 Temp<Register> temp)) {
4238 __ veor(result, result, result);
4239 __ LoadImmediate(temp, 0xffffffff);
4240 __ LoadObject(TMP2, Bool::True());
4241
4242 const Register vs[] = {v0, v1, v2, v3};
4243 for (intptr_t i = 0; i < 4; i++) {
4244 __ CompareObjectRegisters(vs[i], TMP2);
4245 __ csel(TMP, temp, ZR, EQ);
4246 __ vinsw(result, i, TMP);
4247 }
4248}
4249
4250DEFINE_EMIT(Int32x4GetFlag, (Register result, VRegister value)) {
4251 switch (instr->kind()) {
4252 case SimdOpInstr::kInt32x4GetFlagX:
4253 __ vmovrs(result, value, 0);
4254 break;
4255 case SimdOpInstr::kInt32x4GetFlagY:
4256 __ vmovrs(result, value, 1);
4257 break;
4258 case SimdOpInstr::kInt32x4GetFlagZ:
4259 __ vmovrs(result, value, 2);
4260 break;
4261 case SimdOpInstr::kInt32x4GetFlagW:
4262 __ vmovrs(result, value, 3);
4263 break;
4264 default:
4265 UNREACHABLE();
4266 }
4267
4268 __ tst(result, compiler::Operand(result));
4269 __ LoadObject(result, Bool::True());
4270 __ LoadObject(TMP, Bool::False());
4271 __ csel(result, TMP, result, EQ);
4272}
4273
4274DEFINE_EMIT(Int32x4Select,
4275 (VRegister out,
4276 VRegister mask,
4277 VRegister trueValue,
4278 VRegister falseValue,
4279 Temp<VRegister> temp)) {
4280 // Copy mask.
4281 __ vmov(temp, mask);
4282 // Invert it.
4283 __ vnot(temp, temp);
4284 // mask = mask & trueValue.
4285 __ vand(mask, mask, trueValue);
4286 // temp = temp & falseValue.
4287 __ vand(temp, temp, falseValue);
4288 // out = mask | temp.
4289 __ vorr(out, mask, temp);
4290}
4291
4292DEFINE_EMIT(Int32x4WithFlag,
4293 (SameAsFirstInput, VRegister mask, Register flag)) {
4294 const VRegister result = mask;
4295 __ CompareObject(flag, Bool::True());
4296 __ LoadImmediate(TMP, 0xffffffff);
4297 __ csel(TMP, TMP, ZR, EQ);
4298 switch (instr->kind()) {
4299 case SimdOpInstr::kInt32x4WithFlagX:
4300 __ vinsw(result, 0, TMP);
4301 break;
4302 case SimdOpInstr::kInt32x4WithFlagY:
4303 __ vinsw(result, 1, TMP);
4304 break;
4305 case SimdOpInstr::kInt32x4WithFlagZ:
4306 __ vinsw(result, 2, TMP);
4307 break;
4308 case SimdOpInstr::kInt32x4WithFlagW:
4309 __ vinsw(result, 3, TMP);
4310 break;
4311 default:
4312 UNREACHABLE();
4313 }
4314}
4315
4316// Map SimdOpInstr::Kind-s to corresponding emit functions. Uses the following
4317// format:
4318//
4319// CASE(OpA) CASE(OpB) ____(Emitter) - Emitter is used to emit OpA and OpB.
4320// SIMPLE(OpA) - Emitter with name OpA is used to emit OpA.
4321//
4322#define SIMD_OP_VARIANTS(CASE, ____) \
4323 SIMD_OP_SIMPLE_BINARY(CASE) \
4324 CASE(Float32x4ShuffleMix) \
4325 CASE(Int32x4ShuffleMix) \
4326 CASE(Float32x4NotEqual) \
4327 CASE(Float32x4LessThan) \
4328 CASE(Float32x4LessThanOrEqual) \
4329 CASE(Float32x4Scale) \
4330 CASE(Float64x2FromDoubles) \
4331 CASE(Float64x2Scale) \
4332 ____(SimdBinaryOp) \
4333 SIMD_OP_SIMPLE_UNARY(CASE) \
4334 CASE(Float32x4GetX) \
4335 CASE(Float32x4GetY) \
4336 CASE(Float32x4GetZ) \
4337 CASE(Float32x4GetW) \
4338 CASE(Int32x4Shuffle) \
4339 CASE(Float32x4Shuffle) \
4340 CASE(Float32x4Splat) \
4341 CASE(Float64x2GetX) \
4342 CASE(Float64x2GetY) \
4343 CASE(Float64x2Splat) \
4344 CASE(Float64x2ToFloat32x4) \
4345 CASE(Float32x4ToFloat64x2) \
4346 ____(SimdUnaryOp) \
4347 CASE(Float32x4GetSignMask) \
4348 CASE(Int32x4GetSignMask) \
4349 ____(Simd32x4GetSignMask) \
4350 CASE(Float32x4FromDoubles) \
4351 ____(Float32x4FromDoubles) \
4352 CASE(Float32x4Zero) \
4353 CASE(Float64x2Zero) \
4354 ____(SimdZero) \
4355 CASE(Float32x4Clamp) \
4356 ____(Float32x4Clamp) \
4357 CASE(Float64x2Clamp) \
4358 ____(Float64x2Clamp) \
4359 CASE(Float32x4WithX) \
4360 CASE(Float32x4WithY) \
4361 CASE(Float32x4WithZ) \
4362 CASE(Float32x4WithW) \
4363 ____(Float32x4With) \
4364 CASE(Float32x4ToInt32x4) \
4365 CASE(Int32x4ToFloat32x4) \
4366 ____(Simd32x4ToSimd32x4) \
4367 CASE(Float64x2GetSignMask) \
4368 ____(Float64x2GetSignMask) \
4369 CASE(Float64x2WithX) \
4370 CASE(Float64x2WithY) \
4371 ____(Float64x2With) \
4372 CASE(Int32x4FromInts) \
4373 ____(Int32x4FromInts) \
4374 CASE(Int32x4FromBools) \
4375 ____(Int32x4FromBools) \
4376 CASE(Int32x4GetFlagX) \
4377 CASE(Int32x4GetFlagY) \
4378 CASE(Int32x4GetFlagZ) \
4379 CASE(Int32x4GetFlagW) \
4380 ____(Int32x4GetFlag) \
4381 CASE(Int32x4Select) \
4382 ____(Int32x4Select) \
4383 CASE(Int32x4WithFlagX) \
4384 CASE(Int32x4WithFlagY) \
4385 CASE(Int32x4WithFlagZ) \
4386 CASE(Int32x4WithFlagW) \
4387 ____(Int32x4WithFlag)
4388
4389LocationSummary* SimdOpInstr::MakeLocationSummary(Zone* zone, bool opt) const {
4390 switch (kind()) {
4391#define CASE(Name, ...) case k##Name:
4392#define EMIT(Name) \
4393 return MakeLocationSummaryFromEmitter(zone, this, &Emit##Name);
4394 SIMD_OP_VARIANTS(CASE, EMIT)
4395#undef CASE
4396#undef EMIT
4397 case kIllegalSimdOp:
4398 UNREACHABLE();
4399 break;
4400 }
4401 UNREACHABLE();
4402 return nullptr;
4403}
4404
4405void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4406 switch (kind()) {
4407#define CASE(Name, ...) case k##Name:
4408#define EMIT(Name) \
4409 InvokeEmitter(compiler, this, &Emit##Name); \
4410 break;
4411 SIMD_OP_VARIANTS(CASE, EMIT)
4412#undef CASE
4413#undef EMIT
4414 case kIllegalSimdOp:
4415 UNREACHABLE();
4416 break;
4417 }
4418}
4419
4420#undef DEFINE_EMIT
4421
4422LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
4423 Zone* zone,
4424 bool opt) const {
4425 const intptr_t kNumTemps = 0;
4426 LocationSummary* summary = new (zone)
4427 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
4428 summary->set_in(0, Location::RegisterLocation(R0));
4429 summary->set_in(1, Location::RegisterLocation(R1));
4430 summary->set_in(2, Location::RegisterLocation(R2));
4431 summary->set_in(3, Location::RegisterLocation(R3));
4432 summary->set_out(0, Location::RegisterLocation(R0));
4433 return summary;
4434}
4435
4436void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4437 compiler::LeafRuntimeScope rt(compiler->assembler(),
4438 /*frame_size=*/0,
4439 /*preserve_registers=*/false);
4440 // Call the function. Parameters are already in their correct spots.
4442}
4443
4444LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone,
4445 bool opt) const {
4446 if (result_cid() == kDoubleCid) {
4447 const intptr_t kNumInputs = 2;
4448 const intptr_t kNumTemps = 0;
4449 LocationSummary* summary = new (zone)
4450 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4451 summary->set_in(0, Location::RequiresFpuRegister());
4452 summary->set_in(1, Location::RequiresFpuRegister());
4453 // Reuse the left register so that code can be made shorter.
4454 summary->set_out(0, Location::SameAsFirstInput());
4455 return summary;
4456 }
4457 ASSERT(result_cid() == kSmiCid);
4458 const intptr_t kNumInputs = 2;
4459 const intptr_t kNumTemps = 0;
4460 LocationSummary* summary = new (zone)
4461 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4462 summary->set_in(0, Location::RequiresRegister());
4463 summary->set_in(1, Location::RequiresRegister());
4464 // Reuse the left register so that code can be made shorter.
4465 summary->set_out(0, Location::SameAsFirstInput());
4466 return summary;
4467}
4468
4469void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4470 ASSERT((op_kind() == MethodRecognizer::kMathMin) ||
4471 (op_kind() == MethodRecognizer::kMathMax));
4472 const bool is_min = (op_kind() == MethodRecognizer::kMathMin);
4473 if (result_cid() == kDoubleCid) {
4474 compiler::Label done, returns_nan, are_equal;
4475 const VRegister left = locs()->in(0).fpu_reg();
4476 const VRegister right = locs()->in(1).fpu_reg();
4477 const VRegister result = locs()->out(0).fpu_reg();
4478 __ fcmpd(left, right);
4479 __ b(&returns_nan, VS);
4480 __ b(&are_equal, EQ);
4481 const Condition double_condition =
4482 is_min ? TokenKindToDoubleCondition(Token::kLTE)
4483 : TokenKindToDoubleCondition(Token::kGTE);
4484 ASSERT(left == result);
4485 __ b(&done, double_condition);
4486 __ fmovdd(result, right);
4487 __ b(&done);
4488
4489 __ Bind(&returns_nan);
4490 __ LoadDImmediate(result, NAN);
4491 __ b(&done);
4492
4493 __ Bind(&are_equal);
4494 // Check for negative zero: -0.0 is equal 0.0 but min or max must return
4495 // -0.0 or 0.0 respectively.
4496 // Check for negative left value (get the sign bit):
4497 // - min -> left is negative ? left : right.
4498 // - max -> left is negative ? right : left
4499 // Check the sign bit.
4500 __ fmovrd(TMP, left); // Sign bit is in bit 63 of TMP.
4501 __ CompareImmediate(TMP, 0);
4502 if (is_min) {
4503 ASSERT(left == result);
4504 __ b(&done, LT);
4505 __ fmovdd(result, right);
4506 } else {
4507 __ b(&done, GE);
4508 __ fmovdd(result, right);
4509 ASSERT(left == result);
4510 }
4511 __ Bind(&done);
4512 return;
4513 }
4514
4515 ASSERT(result_cid() == kSmiCid);
4516 const Register left = locs()->in(0).reg();
4517 const Register right = locs()->in(1).reg();
4518 const Register result = locs()->out(0).reg();
4519 __ CompareObjectRegisters(left, right);
4520 ASSERT(result == left);
4521 if (is_min) {
4522 __ csel(result, right, left, GT);
4523 } else {
4524 __ csel(result, right, left, LT);
4525 }
4526}
4527
4528LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Zone* zone,
4529 bool opt) const {
4530 const intptr_t kNumInputs = 1;
4531 const intptr_t kNumTemps = 0;
4532 LocationSummary* summary = new (zone)
4533 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4534 summary->set_in(0, Location::RequiresRegister());
4535 // We make use of 3-operand instructions by not requiring result register
4536 // to be identical to first input register as on Intel.
4537 summary->set_out(0, Location::RequiresRegister());
4538 return summary;
4539}
4540
4541void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4542 const Register value = locs()->in(0).reg();
4543 const Register result = locs()->out(0).reg();
4544 switch (op_kind()) {
4545 case Token::kNEGATE: {
4546 compiler::Label* deopt =
4547 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
4548 __ subs(result, ZR, compiler::Operand(value), compiler::kObjectBytes);
4549 __ b(deopt, VS);
4550 break;
4551 }
4552 case Token::kBIT_NOT:
4553 __ mvn_(result, value);
4554 // Remove inverted smi-tag.
4555 __ andi(result, result, compiler::Immediate(~kSmiTagMask));
4556 break;
4557 default:
4558 UNREACHABLE();
4559 }
4560}
4561
4562LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
4563 bool opt) const {
4564 const intptr_t kNumInputs = 1;
4565 const intptr_t kNumTemps = 0;
4566 LocationSummary* summary = new (zone)
4567 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4568 summary->set_in(0, Location::RequiresFpuRegister());
4569 summary->set_out(0, Location::RequiresFpuRegister());
4570 return summary;
4571}
4572
4573void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4574 ASSERT(representation() == kUnboxedDouble);
4575 const VRegister result = locs()->out(0).fpu_reg();
4576 const VRegister value = locs()->in(0).fpu_reg();
4577 switch (op_kind()) {
4578 case Token::kNEGATE:
4579 __ fnegd(result, value);
4580 break;
4581 case Token::kSQRT:
4582 __ fsqrtd(result, value);
4583 break;
4584 case Token::kSQUARE:
4585 __ fmuld(result, value, value);
4586 break;
4587 default:
4588 UNREACHABLE();
4589 }
4590}
4591
4592LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone,
4593 bool opt) const {
4594 const intptr_t kNumInputs = 1;
4595 const intptr_t kNumTemps = 0;
4596 LocationSummary* result = new (zone)
4597 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4598 result->set_in(0, Location::RequiresRegister());
4600 return result;
4601}
4602
4603void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4604 const Register value = locs()->in(0).reg();
4605 const VRegister result = locs()->out(0).fpu_reg();
4606 __ scvtfdw(result, value);
4607}
4608
4609LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone,
4610 bool opt) const {
4611 const intptr_t kNumInputs = 1;
4612 const intptr_t kNumTemps = 0;
4613 LocationSummary* result = new (zone)
4614 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4615 result->set_in(0, Location::RequiresRegister());
4617 return result;
4618}
4619
4620void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4621 const Register value = locs()->in(0).reg();
4622 const VRegister result = locs()->out(0).fpu_reg();
4623 __ SmiUntag(TMP, value);
4624#if !defined(DART_COMPRESSED_POINTERS)
4625 __ scvtfdx(result, TMP);
4626#else
4627 __ scvtfdw(result, TMP);
4628#endif
4629}
4630
4631LocationSummary* Int64ToDoubleInstr::MakeLocationSummary(Zone* zone,
4632 bool opt) const {
4633 const intptr_t kNumInputs = 1;
4634 const intptr_t kNumTemps = 0;
4635 LocationSummary* result = new (zone)
4636 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4637 result->set_in(0, Location::RequiresRegister());
4639 return result;
4640}
4641
4642void Int64ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4643 const Register value = locs()->in(0).reg();
4644 const VRegister result = locs()->out(0).fpu_reg();
4645 __ scvtfdx(result, value);
4646}
4647
4648LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone,
4649 bool opt) const {
4650 const intptr_t kNumInputs = 1;
4651 const intptr_t kNumTemps = 0;
4652 LocationSummary* result = new (zone) LocationSummary(
4653 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
4655 result->set_out(0, Location::RequiresRegister());
4656 return result;
4657}
4658
4659void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4660 const Register result = locs()->out(0).reg();
4661 const VRegister value_double = locs()->in(0).fpu_reg();
4662
4663 DoubleToIntegerSlowPath* slow_path =
4664 new DoubleToIntegerSlowPath(this, value_double);
4665 compiler->AddSlowPathCode(slow_path);
4666
4667 // First check for NaN. Checking for minint after the conversion doesn't work
4668 // on ARM64 because fcvtzs gives 0 for NaN.
4669 __ fcmpd(value_double, value_double);
4670 __ b(slow_path->entry_label(), VS);
4671
4672 switch (recognized_kind()) {
4673 case MethodRecognizer::kDoubleToInteger:
4674 __ fcvtzsxd(result, value_double);
4675 break;
4676 case MethodRecognizer::kDoubleFloorToInt:
4677 __ fcvtmsxd(result, value_double);
4678 break;
4679 case MethodRecognizer::kDoubleCeilToInt:
4680 __ fcvtpsxd(result, value_double);
4681 break;
4682 default:
4683 UNREACHABLE();
4684 }
4685 // Overflow is signaled with minint.
4686
4687#if !defined(DART_COMPRESSED_POINTERS)
4688 // Check for overflow and that it fits into Smi.
4689 __ CompareImmediate(result, 0xC000000000000000);
4690 __ b(slow_path->entry_label(), MI);
4691#else
4692 // Check for overflow and that it fits into Smi.
4693 __ AsrImmediate(TMP, result, 30);
4694 __ cmp(TMP, compiler::Operand(result, ASR, 63));
4695 __ b(slow_path->entry_label(), NE);
4696#endif
4697 __ SmiTag(result);
4698 __ Bind(slow_path->exit_label());
4699}
4700
4701LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,
4702 bool opt) const {
4703 const intptr_t kNumInputs = 1;
4704 const intptr_t kNumTemps = 0;
4705 LocationSummary* result = new (zone)
4706 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4708 result->set_out(0, Location::RequiresRegister());
4709 return result;
4710}
4711
4712void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4713 compiler::Label* deopt =
4714 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptDoubleToSmi);
4715 const Register result = locs()->out(0).reg();
4716 const VRegister value = locs()->in(0).fpu_reg();
4717 // First check for NaN. Checking for minint after the conversion doesn't work
4718 // on ARM64 because fcvtzs gives 0 for NaN.
4719 // TODO(zra): Check spec that this is true.
4720 __ fcmpd(value, value);
4721 __ b(deopt, VS);
4722
4723 __ fcvtzsxd(result, value);
4724
4725#if !defined(DART_COMPRESSED_POINTERS)
4726 // Check for overflow and that it fits into Smi.
4727 __ CompareImmediate(result, 0xC000000000000000);
4728 __ b(deopt, MI);
4729#else
4730 // Check for overflow and that it fits into Smi.
4731 __ AsrImmediate(TMP, result, 30);
4732 __ cmp(TMP, compiler::Operand(result, ASR, 63));
4733 __ b(deopt, NE);
4734#endif
4735 __ SmiTag(result);
4736}
4737
4738LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone,
4739 bool opt) const {
4740 const intptr_t kNumInputs = 1;
4741 const intptr_t kNumTemps = 0;
4742 LocationSummary* result = new (zone)
4743 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4746 return result;
4747}
4748
4749void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4750 const VRegister value = locs()->in(0).fpu_reg();
4751 const VRegister result = locs()->out(0).fpu_reg();
4752 __ fcvtsd(result, value);
4753}
4754
4755LocationSummary* FloatToDoubleInstr::MakeLocationSummary(Zone* zone,
4756 bool opt) const {
4757 const intptr_t kNumInputs = 1;
4758 const intptr_t kNumTemps = 0;
4759 LocationSummary* result = new (zone)
4760 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4763 return result;
4764}
4765
4766void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4767 const VRegister value = locs()->in(0).fpu_reg();
4768 const VRegister result = locs()->out(0).fpu_reg();
4769 __ fcvtds(result, value);
4770}
4771
4772LocationSummary* FloatCompareInstr::MakeLocationSummary(Zone* zone,
4773 bool opt) const {
4774 UNREACHABLE();
4775 return NULL;
4776}
4777
4778void FloatCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4779 UNREACHABLE();
4780}
4781
4782LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone,
4783 bool opt) const {
4784 ASSERT((InputCount() == 1) || (InputCount() == 2));
4785 const intptr_t kNumTemps =
4786 (recognized_kind() == MethodRecognizer::kMathDoublePow) ? 1 : 0;
4787 LocationSummary* result = new (zone)
4788 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
4790 if (InputCount() == 2) {
4792 }
4793 if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
4795 }
4797 return result;
4798}
4799
4800// Pseudo code:
4801// if (exponent == 0.0) return 1.0;
4802// // Speed up simple cases.
4803// if (exponent == 1.0) return base;
4804// if (exponent == 2.0) return base * base;
4805// if (exponent == 3.0) return base * base * base;
4806// if (base == 1.0) return 1.0;
4807// if (base.isNaN || exponent.isNaN) {
4808// return double.NAN;
4809// }
4810// if (base != -Infinity && exponent == 0.5) {
4811// if (base == 0.0) return 0.0;
4812// return sqrt(value);
4813// }
4814// TODO(srdjan): Move into a stub?
4815static void InvokeDoublePow(FlowGraphCompiler* compiler,
4816 InvokeMathCFunctionInstr* instr) {
4817 ASSERT(instr->recognized_kind() == MethodRecognizer::kMathDoublePow);
4818 const intptr_t kInputCount = 2;
4819 ASSERT(instr->InputCount() == kInputCount);
4820 LocationSummary* locs = instr->locs();
4821
4822 const VRegister base = locs->in(0).fpu_reg();
4823 const VRegister exp = locs->in(1).fpu_reg();
4824 const VRegister result = locs->out(0).fpu_reg();
4825 const VRegister saved_base = locs->temp(0).fpu_reg();
4826 ASSERT((base == result) && (result != saved_base));
4827
4828 compiler::Label skip_call, try_sqrt, check_base, return_nan, do_pow;
4829 __ fmovdd(saved_base, base);
4830 __ LoadDImmediate(result, 1.0);
4831 // exponent == 0.0 -> return 1.0;
4832 __ fcmpdz(exp);
4833 __ b(&check_base, VS); // NaN -> check base.
4834 __ b(&skip_call, EQ); // exp is 0.0, result is 1.0.
4835
4836 // exponent == 1.0 ?
4837 __ fcmpd(exp, result);
4838 compiler::Label return_base;
4839 __ b(&return_base, EQ);
4840
4841 // exponent == 2.0 ?
4842 __ LoadDImmediate(VTMP, 2.0);
4843 __ fcmpd(exp, VTMP);
4844 compiler::Label return_base_times_2;
4845 __ b(&return_base_times_2, EQ);
4846
4847 // exponent == 3.0 ?
4848 __ LoadDImmediate(VTMP, 3.0);
4849 __ fcmpd(exp, VTMP);
4850 __ b(&check_base, NE);
4851
4852 // base_times_3.
4853 __ fmuld(result, saved_base, saved_base);
4854 __ fmuld(result, result, saved_base);
4855 __ b(&skip_call);
4856
4857 __ Bind(&return_base);
4858 __ fmovdd(result, saved_base);
4859 __ b(&skip_call);
4860
4861 __ Bind(&return_base_times_2);
4862 __ fmuld(result, saved_base, saved_base);
4863 __ b(&skip_call);
4864
4865 __ Bind(&check_base);
4866 // Note: 'exp' could be NaN.
4867 // base == 1.0 -> return 1.0;
4868 __ fcmpd(saved_base, result);
4869 __ b(&return_nan, VS);
4870 __ b(&skip_call, EQ); // base is 1.0, result is 1.0.
4871
4872 __ fcmpd(saved_base, exp);
4873 __ b(&try_sqrt, VC); // // Neither 'exp' nor 'base' is NaN.
4874
4875 __ Bind(&return_nan);
4876 __ LoadDImmediate(result, NAN);
4877 __ b(&skip_call);
4878
4879 compiler::Label return_zero;
4880 __ Bind(&try_sqrt);
4881
4882 // Before calling pow, check if we could use sqrt instead of pow.
4883 __ LoadDImmediate(result, kNegInfinity);
4884
4885 // base == -Infinity -> call pow;
4886 __ fcmpd(saved_base, result);
4887 __ b(&do_pow, EQ);
4888
4889 // exponent == 0.5 ?
4890 __ LoadDImmediate(result, 0.5);
4891 __ fcmpd(exp, result);
4892 __ b(&do_pow, NE);
4893
4894 // base == 0 -> return 0;
4895 __ fcmpdz(saved_base);
4896 __ b(&return_zero, EQ);
4897
4898 __ fsqrtd(result, saved_base);
4899 __ b(&skip_call);
4900
4901 __ Bind(&return_zero);
4902 __ LoadDImmediate(result, 0.0);
4903 __ b(&skip_call);
4904
4905 __ Bind(&do_pow);
4906 __ fmovdd(base, saved_base); // Restore base.
4907 {
4908 compiler::LeafRuntimeScope rt(compiler->assembler(),
4909 /*frame_size=*/0,
4910 /*preserve_registers=*/false);
4911 ASSERT(base == V0);
4912 ASSERT(exp == V1);
4913 rt.Call(instr->TargetFunction(), kInputCount);
4914 ASSERT(result == V0);
4915 }
4916 __ Bind(&skip_call);
4917}
4918
4919void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4920 if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
4921 InvokeDoublePow(compiler, this);
4922 return;
4923 }
4924
4925 compiler::LeafRuntimeScope rt(compiler->assembler(),
4926 /*frame_size=*/0,
4927 /*preserve_registers=*/false);
4928 ASSERT(locs()->in(0).fpu_reg() == V0);
4929 if (InputCount() == 2) {
4930 ASSERT(locs()->in(1).fpu_reg() == V1);
4931 }
4932 rt.Call(TargetFunction(), InputCount());
4933 ASSERT(locs()->out(0).fpu_reg() == V0);
4934}
4935
4936LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone,
4937 bool opt) const {
4938 // Only use this instruction in optimized code.
4939 ASSERT(opt);
4940 const intptr_t kNumInputs = 1;
4941 LocationSummary* summary =
4942 new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
4943 if (representation() == kUnboxedDouble) {
4944 if (index() == 0) {
4945 summary->set_in(
4947 } else {
4948 ASSERT(index() == 1);
4949 summary->set_in(
4951 }
4952 summary->set_out(0, Location::RequiresFpuRegister());
4953 } else {
4954 ASSERT(representation() == kTagged);
4955 if (index() == 0) {
4956 summary->set_in(
4958 } else {
4959 ASSERT(index() == 1);
4960 summary->set_in(
4962 }
4963 summary->set_out(0, Location::RequiresRegister());
4964 }
4965 return summary;
4966}
4967
4968void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4969 ASSERT(locs()->in(0).IsPairLocation());
4970 PairLocation* pair = locs()->in(0).AsPairLocation();
4971 Location in_loc = pair->At(index());
4972 if (representation() == kUnboxedDouble) {
4973 const VRegister out = locs()->out(0).fpu_reg();
4974 const VRegister in = in_loc.fpu_reg();
4975 __ fmovdd(out, in);
4976 } else {
4977 ASSERT(representation() == kTagged);
4978 const Register out = locs()->out(0).reg();
4979 const Register in = in_loc.reg();
4980 __ mov(out, in);
4981 }
4982}
4983
4984LocationSummary* UnboxLaneInstr::MakeLocationSummary(Zone* zone,
4985 bool opt) const {
4986 UNREACHABLE();
4987 return NULL;
4988}
4989
4990void UnboxLaneInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4991 UNREACHABLE();
4992}
4993
4994LocationSummary* BoxLanesInstr::MakeLocationSummary(Zone* zone,
4995 bool opt) const {
4996 UNREACHABLE();
4997 return NULL;
4998}
4999
5000void BoxLanesInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5001 UNREACHABLE();
5002}
5003
5004LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
5005 bool opt) const {
5006 const intptr_t kNumInputs = 2;
5007 const intptr_t kNumTemps = 0;
5008 LocationSummary* summary = new (zone)
5009 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5010 summary->set_in(0, Location::RequiresRegister());
5011 summary->set_in(1, Location::RequiresRegister());
5012 // Output is a pair of registers.
5013 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
5015 return summary;
5016}
5017
5018void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5019 ASSERT(CanDeoptimize());
5020 compiler::Label* deopt =
5021 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
5022 const Register left = locs()->in(0).reg();
5023 const Register right = locs()->in(1).reg();
5024 ASSERT(locs()->out(0).IsPairLocation());
5025 const PairLocation* pair = locs()->out(0).AsPairLocation();
5026 const Register result_div = pair->At(0).reg();
5027 const Register result_mod = pair->At(1).reg();
5028 if (RangeUtils::CanBeZero(divisor_range())) {
5029 // Handle divide by zero in runtime.
5030 __ CompareObjectRegisters(right, ZR);
5031 __ b(deopt, EQ);
5032 }
5033
5034 __ SmiUntag(result_mod, left);
5035 __ SmiUntag(TMP, right);
5036
5037 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
5038 // case we cannot tag the result.
5039#if !defined(DART_COMPRESSED_POINTERS)
5040 __ sdiv(result_div, result_mod, TMP);
5041 __ CompareImmediate(result_div, 0x4000000000000000);
5042#else
5043 __ sdivw(result_div, result_mod, TMP);
5044 __ CompareImmediate(result_div, 0x40000000, compiler::kFourBytes);
5045#endif
5046 __ b(deopt, EQ);
5047 // result_mod <- left - right * result_div.
5048 __ msub(result_mod, TMP, result_div, result_mod, compiler::kObjectBytes);
5049 __ SmiTag(result_div);
5050 __ SmiTag(result_mod);
5051 // Correct MOD result:
5052 // res = left % right;
5053 // if (res < 0) {
5054 // if (right < 0) {
5055 // res = res - right;
5056 // } else {
5057 // res = res + right;
5058 // }
5059 // }
5060 compiler::Label done;
5061 __ CompareObjectRegisters(result_mod, ZR);
5062 __ b(&done, GE);
5063 // Result is negative, adjust it.
5064 if (RangeUtils::IsNegative(divisor_range())) {
5065 __ sub(result_mod, result_mod, compiler::Operand(right));
5066 } else if (RangeUtils::IsPositive(divisor_range())) {
5067 __ add(result_mod, result_mod, compiler::Operand(right));
5068 } else {
5069 __ CompareObjectRegisters(right, ZR);
5070 __ sub(TMP2, result_mod, compiler::Operand(right), compiler::kObjectBytes);
5071 __ add(TMP, result_mod, compiler::Operand(right), compiler::kObjectBytes);
5072 __ csel(result_mod, TMP, TMP2, GE);
5073 }
5074 __ Bind(&done);
5075}
5076
5077// Should be kept in sync with integers.cc Multiply64Hash
5078static void EmitHashIntegerCodeSequence(FlowGraphCompiler* compiler,
5079 const Register value,
5080 const Register result) {
5081 ASSERT(value != TMP2);
5082 ASSERT(result != TMP2);
5083 ASSERT(value != result);
5084 __ LoadImmediate(TMP2, compiler::Immediate(0x2d51));
5085 __ mul(result, value, TMP2);
5086 __ umulh(value, value, TMP2);
5087 __ eor(result, result, compiler::Operand(value));
5088 __ eor(result, result, compiler::Operand(result, LSR, 32));
5089}
5090
5091LocationSummary* HashDoubleOpInstr::MakeLocationSummary(Zone* zone,
5092 bool opt) const {
5093 const intptr_t kNumInputs = 1;
5094 const intptr_t kNumTemps = 1;
5095 LocationSummary* summary = new (zone)
5096 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5097 summary->set_in(0, Location::RequiresFpuRegister());
5098 summary->set_temp(0, Location::RequiresFpuRegister());
5099 summary->set_out(0, Location::RequiresRegister());
5100 return summary;
5101}
5102
5103void HashDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5104 const VRegister value = locs()->in(0).fpu_reg();
5105 const VRegister temp_double = locs()->temp(0).fpu_reg();
5106 const Register result = locs()->out(0).reg();
5107
5108 compiler::Label done, hash_double;
5109 __ vmovrd(TMP, value, 0);
5110 __ AndImmediate(TMP, TMP, 0x7FF0000000000000LL);
5111 __ CompareImmediate(TMP, 0x7FF0000000000000LL);
5112 __ b(&hash_double, EQ); // is_infinity or nan
5113
5114 __ fcvtzsxd(TMP, value);
5115 __ scvtfdx(temp_double, TMP);
5116 __ fcmpd(temp_double, value);
5117 __ b(&hash_double, NE);
5118
5119 EmitHashIntegerCodeSequence(compiler, TMP, result);
5120 __ AndImmediate(result, result, 0x3fffffff);
5121 __ b(&done);
5122
5123 __ Bind(&hash_double);
5124 __ fmovrd(result, value);
5125 __ eor(result, result, compiler::Operand(result, LSR, 32));
5126 __ AndImmediate(result, result, compiler::target::kSmiMax);
5127
5128 __ Bind(&done);
5129}
5130
5131LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
5132 bool opt) const {
5133 const intptr_t kNumInputs = 1;
5134 const intptr_t kNumTemps = 0;
5135 LocationSummary* summary = new (zone)
5136 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5137 summary->set_in(0, Location::RequiresRegister());
5138 summary->set_out(0, Location::RequiresRegister());
5139 return summary;
5140}
5141
5142void HashIntegerOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5143 Register value = locs()->in(0).reg();
5144 Register result = locs()->out(0).reg();
5145
5146 if (smi_) {
5147 __ SmiUntag(TMP, value);
5148 } else {
5149 __ LoadFieldFromOffset(TMP, value, Mint::value_offset());
5150 }
5151
5152 EmitHashIntegerCodeSequence(compiler, TMP, result);
5153 __ ubfm(result, result, 63, 29); // SmiTag(result & 0x3fffffff)
5154}
5155
5156LocationSummary* BranchInstr::MakeLocationSummary(Zone* zone, bool opt) const {
5158 // Branches don't produce a result.
5160 return comparison()->locs();
5161}
5162
5163void BranchInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5165}
5166
5167LocationSummary* CheckClassInstr::MakeLocationSummary(Zone* zone,
5168 bool opt) const {
5169 const intptr_t kNumInputs = 1;
5170 const bool need_mask_temp = IsBitTest();
5171 const intptr_t kNumTemps = !IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0;
5172 LocationSummary* summary = new (zone)
5173 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5174 summary->set_in(0, Location::RequiresRegister());
5175 if (!IsNullCheck()) {
5176 summary->set_temp(0, Location::RequiresRegister());
5177 if (need_mask_temp) {
5178 summary->set_temp(1, Location::RequiresRegister());
5179 }
5180 }
5181 return summary;
5182}
5183
5184void CheckClassInstr::EmitNullCheck(FlowGraphCompiler* compiler,
5185 compiler::Label* deopt) {
5186 __ CompareObject(locs()->in(0).reg(), Object::null_object());
5188 Condition cond = IsDeoptIfNull() ? EQ : NE;
5189 __ b(deopt, cond);
5190}
5191
5192void CheckClassInstr::EmitBitTest(FlowGraphCompiler* compiler,
5193 intptr_t min,
5194 intptr_t max,
5195 intptr_t mask,
5196 compiler::Label* deopt) {
5197 Register biased_cid = locs()->temp(0).reg();
5198 __ AddImmediate(biased_cid, -min);
5199 __ CompareImmediate(biased_cid, max - min);
5200 __ b(deopt, HI);
5201
5202 Register bit_reg = locs()->temp(1).reg();
5203 __ LoadImmediate(bit_reg, 1);
5204 __ lslv(bit_reg, bit_reg, biased_cid);
5205 __ TestImmediate(bit_reg, mask);
5206 __ b(deopt, EQ);
5207}
5208
5209int CheckClassInstr::EmitCheckCid(FlowGraphCompiler* compiler,
5210 int bias,
5211 intptr_t cid_start,
5212 intptr_t cid_end,
5213 bool is_last,
5214 compiler::Label* is_ok,
5215 compiler::Label* deopt,
5216 bool use_near_jump) {
5217 Register biased_cid = locs()->temp(0).reg();
5218 Condition no_match, match;
5219 if (cid_start == cid_end) {
5220 __ CompareImmediate(biased_cid, cid_start - bias);
5221 no_match = NE;
5222 match = EQ;
5223 } else {
5224 // For class ID ranges use a subtract followed by an unsigned
5225 // comparison to check both ends of the ranges with one comparison.
5226 __ AddImmediate(biased_cid, bias - cid_start);
5227 bias = cid_start;
5228 __ CompareImmediate(biased_cid, cid_end - cid_start);
5229 no_match = HI; // Unsigned higher.
5230 match = LS; // Unsigned lower or same.
5231 }
5232 if (is_last) {
5233 __ b(deopt, no_match);
5234 } else {
5235 __ b(is_ok, match);
5236 }
5237 return bias;
5238}
5239
5240LocationSummary* CheckClassIdInstr::MakeLocationSummary(Zone* zone,
5241 bool opt) const {
5242 const intptr_t kNumInputs = 1;
5243 const intptr_t kNumTemps = 0;
5244 LocationSummary* summary = new (zone)
5245 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5246 summary->set_in(0, cids_.IsSingleCid() ? Location::RequiresRegister()
5247 : Location::WritableRegister());
5248 return summary;
5249}
5250
5251void CheckClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5252 Register value = locs()->in(0).reg();
5253 compiler::Label* deopt =
5254 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass);
5255 if (cids_.IsSingleCid()) {
5256 __ CompareImmediate(value, Smi::RawValue(cids_.cid_start));
5257 __ b(deopt, NE);
5258 } else {
5259 __ AddImmediate(value, -Smi::RawValue(cids_.cid_start));
5260 __ CompareImmediate(value, Smi::RawValue(cids_.cid_end - cids_.cid_start));
5261 __ b(deopt, HI); // Unsigned higher.
5262 }
5263}
5264
5265LocationSummary* CheckSmiInstr::MakeLocationSummary(Zone* zone,
5266 bool opt) const {
5267 const intptr_t kNumInputs = 1;
5268 const intptr_t kNumTemps = 0;
5269 LocationSummary* summary = new (zone)
5270 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5271 summary->set_in(0, Location::RequiresRegister());
5272 return summary;
5273}
5274
5275void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5276 const Register value = locs()->in(0).reg();
5277 compiler::Label* deopt =
5278 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi);
5279 __ BranchIfNotSmi(value, deopt);
5280}
5281
5282void CheckNullInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5283 ThrowErrorSlowPathCode* slow_path = new NullErrorSlowPath(this);
5284 compiler->AddSlowPathCode(slow_path);
5285
5286 Register value_reg = locs()->in(0).reg();
5287 // TODO(dartbug.com/30480): Consider passing `null` literal as an argument
5288 // in order to be able to allocate it on register.
5289 __ CompareObject(value_reg, Object::null_object());
5290 __ BranchIf(EQUAL, slow_path->entry_label());
5291}
5292
5293LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Zone* zone,
5294 bool opt) const {
5295 const intptr_t kNumInputs = 2;
5296 const intptr_t kNumTemps = 0;
5297 LocationSummary* locs = new (zone)
5298 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5301 return locs;
5302}
5303
5304void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5305 uint32_t flags = generalized_ ? ICData::kGeneralized : 0;
5306 compiler::Label* deopt =
5307 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckArrayBound, flags);
5308
5309 Location length_loc = locs()->in(kLengthPos);
5310 Location index_loc = locs()->in(kIndexPos);
5311
5312 const intptr_t index_cid = index()->Type()->ToCid();
5313 if (length_loc.IsConstant() && index_loc.IsConstant()) {
5314 // TODO(srdjan): remove this code once failures are fixed.
5315 if ((Smi::Cast(length_loc.constant()).Value() >
5316 Smi::Cast(index_loc.constant()).Value()) &&
5317 (Smi::Cast(index_loc.constant()).Value() >= 0)) {
5318 // This CheckArrayBoundInstr should have been eliminated.
5319 return;
5320 }
5321 ASSERT((Smi::Cast(length_loc.constant()).Value() <=
5322 Smi::Cast(index_loc.constant()).Value()) ||
5323 (Smi::Cast(index_loc.constant()).Value() < 0));
5324 // Unconditionally deoptimize for constant bounds checks because they
5325 // only occur only when index is out-of-bounds.
5326 __ b(deopt);
5327 return;
5328 }
5329
5330 if (index_loc.IsConstant()) {
5331 const Register length = length_loc.reg();
5332 const Smi& index = Smi::Cast(index_loc.constant());
5333 __ CompareObject(length, index);
5334 __ b(deopt, LS);
5335 } else if (length_loc.IsConstant()) {
5336 const Smi& length = Smi::Cast(length_loc.constant());
5337 const Register index = index_loc.reg();
5338 if (index_cid != kSmiCid) {
5339 __ BranchIfNotSmi(index, deopt);
5340 }
5341 if (length.Value() == Smi::kMaxValue) {
5342 __ tst(index, compiler::Operand(index), compiler::kObjectBytes);
5343 __ b(deopt, MI);
5344 } else {
5345 __ CompareObject(index, length);
5346 __ b(deopt, CS);
5347 }
5348 } else {
5349 const Register length = length_loc.reg();
5350 const Register index = index_loc.reg();
5351 if (index_cid != kSmiCid) {
5352 __ BranchIfNotSmi(index, deopt);
5353 }
5354 __ CompareObjectRegisters(index, length);
5355 __ b(deopt, CS);
5356 }
5357}
5358
5359LocationSummary* CheckWritableInstr::MakeLocationSummary(Zone* zone,
5360 bool opt) const {
5361 const intptr_t kNumInputs = 1;
5362 const intptr_t kNumTemps = 0;
5363 LocationSummary* locs = new (zone) LocationSummary(
5364 zone, kNumInputs, kNumTemps,
5365 UseSharedSlowPathStub(opt) ? LocationSummary::kCallOnSharedSlowPath
5367 locs->set_in(kReceiver, Location::RequiresRegister());
5368 return locs;
5369}
5370
5371void CheckWritableInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5372 WriteErrorSlowPath* slow_path = new WriteErrorSlowPath(this);
5373 compiler->AddSlowPathCode(slow_path);
5374 __ ldr(TMP,
5375 compiler::FieldAddress(locs()->in(0).reg(),
5376 compiler::target::Object::tags_offset()),
5378 // In the first byte.
5379 ASSERT(compiler::target::UntaggedObject::kImmutableBit < 8);
5380 __ tbnz(slow_path->entry_label(), TMP,
5381 compiler::target::UntaggedObject::kImmutableBit);
5382}
5383
5384class Int64DivideSlowPath : public ThrowErrorSlowPathCode {
5385 public:
5386 Int64DivideSlowPath(BinaryInt64OpInstr* instruction,
5387 Register divisor,
5388 Range* divisor_range,
5389 Register tmp,
5390 Register out)
5391 : ThrowErrorSlowPathCode(instruction,
5392 kIntegerDivisionByZeroExceptionRuntimeEntry),
5393 is_mod_(instruction->op_kind() == Token::kMOD),
5394 divisor_(divisor),
5395 divisor_range_(divisor_range),
5396 tmp_(tmp),
5397 out_(out),
5398 adjust_sign_label_() {}
5399
5400 void EmitNativeCode(FlowGraphCompiler* compiler) override {
5401 // Handle modulo/division by zero, if needed. Use superclass code.
5402 if (has_divide_by_zero()) {
5403 ThrowErrorSlowPathCode::EmitNativeCode(compiler);
5404 } else {
5405 __ Bind(entry_label()); // not used, but keeps destructor happy
5406 if (compiler::Assembler::EmittingComments()) {
5407 __ Comment("slow path %s operation (no throw)", name());
5408 }
5409 }
5410 // Adjust modulo for negative sign, optimized for known ranges.
5411 // if (divisor < 0)
5412 // out -= divisor;
5413 // else
5414 // out += divisor;
5415 if (has_adjust_sign()) {
5416 __ Bind(adjust_sign_label());
5417 if (RangeUtils::Overlaps(divisor_range_, -1, 1)) {
5418 // General case.
5419 __ CompareRegisters(divisor_, ZR);
5420 __ sub(tmp_, out_, compiler::Operand(divisor_));
5421 __ add(out_, out_, compiler::Operand(divisor_));
5422 __ csel(out_, tmp_, out_, LT);
5423 } else if (divisor_range_->IsPositive()) {
5424 // Always positive.
5425 __ add(out_, out_, compiler::Operand(divisor_));
5426 } else {
5427 // Always negative.
5428 __ sub(out_, out_, compiler::Operand(divisor_));
5429 }
5430 __ b(exit_label());
5431 }
5432 }
5433
5434 const char* name() override { return "int64 divide"; }
5435
5436 bool has_divide_by_zero() { return RangeUtils::CanBeZero(divisor_range_); }
5437
5438 bool has_adjust_sign() { return is_mod_; }
5439
5440 bool is_needed() { return has_divide_by_zero() || has_adjust_sign(); }
5441
5442 compiler::Label* adjust_sign_label() {
5443 ASSERT(has_adjust_sign());
5444 return &adjust_sign_label_;
5445 }
5446
5447 private:
5448 bool is_mod_;
5449 Register divisor_;
5450 Range* divisor_range_;
5451 Register tmp_;
5452 Register out_;
5453 compiler::Label adjust_sign_label_;
5454};
5455
5456static void EmitInt64ModTruncDiv(FlowGraphCompiler* compiler,
5457 BinaryInt64OpInstr* instruction,
5458 Token::Kind op_kind,
5459 Register left,
5461 Register tmp,
5462 Register out) {
5463 ASSERT(op_kind == Token::kMOD || op_kind == Token::kTRUNCDIV);
5464
5465 // Special case 64-bit div/mod by compile-time constant. Note that various
5466 // special constants (such as powers of two) should have been optimized
5467 // earlier in the pipeline. Div or mod by zero falls into general code
5468 // to implement the exception.
5469 if (FLAG_optimization_level <= 2) {
5470 // We only consider magic operations under O3.
5471 } else if (auto c = instruction->right()->definition()->AsConstant()) {
5472 if (c->value().IsInteger()) {
5473 const int64_t divisor = Integer::Cast(c->value()).AsInt64Value();
5474 if (divisor <= -2 || divisor >= 2) {
5475 // For x DIV c or x MOD c: use magic operations.
5476 compiler::Label pos;
5477 int64_t magic = 0;
5478 int64_t shift = 0;
5479 Utils::CalculateMagicAndShiftForDivRem(divisor, &magic, &shift);
5480 // Compute tmp = high(magic * numerator).
5481 __ LoadImmediate(TMP2, magic);
5482 __ smulh(TMP2, TMP2, left);
5483 // Compute tmp +/-= numerator.
5484 if (divisor > 0 && magic < 0) {
5485 __ add(TMP2, TMP2, compiler::Operand(left));
5486 } else if (divisor < 0 && magic > 0) {
5487 __ sub(TMP2, TMP2, compiler::Operand(left));
5488 }
5489 // Shift if needed.
5490 if (shift != 0) {
5491 __ add(TMP2, ZR, compiler::Operand(TMP2, ASR, shift));
5492 }
5493 // Finalize DIV or MOD.
5494 if (op_kind == Token::kTRUNCDIV) {
5495 __ sub(out, TMP2, compiler::Operand(TMP2, ASR, 63));
5496 } else {
5497 __ sub(TMP2, TMP2, compiler::Operand(TMP2, ASR, 63));
5498 __ LoadImmediate(TMP, divisor);
5499 __ msub(out, TMP2, TMP, left);
5500 // Compensate for Dart's Euclidean view of MOD.
5501 __ CompareRegisters(out, ZR);
5502 if (divisor > 0) {
5503 __ add(TMP2, out, compiler::Operand(TMP));
5504 } else {
5505 __ sub(TMP2, out, compiler::Operand(TMP));
5506 }
5507 __ csel(out, TMP2, out, LT);
5508 }
5509 return;
5510 }
5511 }
5512 }
5513
5514 // Prepare a slow path.
5515 Range* right_range = instruction->right()->definition()->range();
5516 Int64DivideSlowPath* slow_path =
5517 new (Z) Int64DivideSlowPath(instruction, right, right_range, tmp, out);
5518
5519 // Handle modulo/division by zero exception on slow path.
5520 if (slow_path->has_divide_by_zero()) {
5521 __ cbz(slow_path->entry_label(), right);
5522 }
5523
5524 // Perform actual operation
5525 // out = left % right
5526 // or
5527 // out = left / right.
5528 if (op_kind == Token::kMOD) {
5529 __ sdiv(tmp, left, right);
5530 __ msub(out, tmp, right, left);
5531 // For the % operator, the sdiv instruction does not
5532 // quite do what we want. Adjust for sign on slow path.
5533 __ CompareRegisters(out, ZR);
5534 __ b(slow_path->adjust_sign_label(), LT);
5535 } else {
5536 __ sdiv(out, left, right);
5537 }
5538
5539 if (slow_path->is_needed()) {
5540 __ Bind(slow_path->exit_label());
5541 compiler->AddSlowPathCode(slow_path);
5542 }
5543}
5544
5545LocationSummary* BinaryInt64OpInstr::MakeLocationSummary(Zone* zone,
5546 bool opt) const {
5547 switch (op_kind()) {
5548 case Token::kMOD:
5549 case Token::kTRUNCDIV: {
5550 const intptr_t kNumInputs = 2;
5551 const intptr_t kNumTemps = (op_kind() == Token::kMOD) ? 1 : 0;
5552 LocationSummary* summary = new (zone) LocationSummary(
5553 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
5554 summary->set_in(0, Location::RequiresRegister());
5555 summary->set_in(1, Location::RequiresRegister());
5556 summary->set_out(0, Location::RequiresRegister());
5557 if (kNumTemps == 1) {
5558 summary->set_temp(0, Location::RequiresRegister());
5559 }
5560 return summary;
5561 }
5562 default: {
5563 const intptr_t kNumInputs = 2;
5564 const intptr_t kNumTemps = 0;
5565 LocationSummary* summary = new (zone) LocationSummary(
5566 zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5567 summary->set_in(0, Location::RequiresRegister());
5568 summary->set_in(1, LocationRegisterOrConstant(right()));
5569 summary->set_out(0, Location::RequiresRegister());
5570 return summary;
5571 }
5572 }
5573}
5574
5575void BinaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5576 ASSERT(!can_overflow());
5577 ASSERT(!CanDeoptimize());
5578
5579 const Register left = locs()->in(0).reg();
5580 const Location right = locs()->in(1);
5581 const Register out = locs()->out(0).reg();
5582
5583 if (op_kind() == Token::kMOD || op_kind() == Token::kTRUNCDIV) {
5584 Register tmp =
5585 (op_kind() == Token::kMOD) ? locs()->temp(0).reg() : kNoRegister;
5586 EmitInt64ModTruncDiv(compiler, this, op_kind(), left, right.reg(), tmp,
5587 out);
5588 return;
5589 } else if (op_kind() == Token::kMUL) {
5590 Register r = TMP;
5591 if (right.IsConstant()) {
5592 int64_t value;
5593 const bool ok = compiler::HasIntegerValue(right.constant(), &value);
5595 __ LoadImmediate(r, value);
5596 } else {
5597 r = right.reg();
5598 }
5599 __ mul(out, left, r);
5600 return;
5601 }
5602
5603 if (right.IsConstant()) {
5604 int64_t value;
5605 const bool ok = compiler::HasIntegerValue(right.constant(), &value);
5607 switch (op_kind()) {
5608 case Token::kADD:
5609 __ AddImmediate(out, left, value);
5610 break;
5611 case Token::kSUB:
5612 __ AddImmediate(out, left, -value);
5613 break;
5614 case Token::kBIT_AND:
5615 __ AndImmediate(out, left, value);
5616 break;
5617 case Token::kBIT_OR:
5618 __ OrImmediate(out, left, value);
5619 break;
5620 case Token::kBIT_XOR:
5621 __ XorImmediate(out, left, value);
5622 break;
5623 default:
5624 UNREACHABLE();
5625 }
5626 } else {
5627 compiler::Operand r = compiler::Operand(right.reg());
5628 switch (op_kind()) {
5629 case Token::kADD:
5630 __ add(out, left, r);
5631 break;
5632 case Token::kSUB:
5633 __ sub(out, left, r);
5634 break;
5635 case Token::kBIT_AND:
5636 __ and_(out, left, r);
5637 break;
5638 case Token::kBIT_OR:
5639 __ orr(out, left, r);
5640 break;
5641 case Token::kBIT_XOR:
5642 __ eor(out, left, r);
5643 break;
5644 default:
5645 UNREACHABLE();
5646 }
5647 }
5648}
5649
5650static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
5651 Token::Kind op_kind,
5652 Register out,
5653 Register left,
5654 const Object& right) {
5655 const int64_t shift = Integer::Cast(right).AsInt64Value();
5656 ASSERT(shift >= 0);
5657 switch (op_kind) {
5658 case Token::kSHR: {
5659 __ AsrImmediate(out, left,
5660 Utils::Minimum<int64_t>(shift, kBitsPerWord - 1));
5661 break;
5662 }
5663 case Token::kUSHR: {
5664 ASSERT(shift < 64);
5665 __ LsrImmediate(out, left, shift);
5666 break;
5667 }
5668 case Token::kSHL: {
5669 ASSERT(shift < 64);
5670 __ LslImmediate(out, left, shift);
5671 break;
5672 }
5673 default:
5674 UNREACHABLE();
5675 }
5676}
5677
5678static void EmitShiftInt64ByRegister(FlowGraphCompiler* compiler,
5679 Token::Kind op_kind,
5680 Register out,
5681 Register left,
5682 Register right) {
5683 switch (op_kind) {
5684 case Token::kSHR: {
5685 __ asrv(out, left, right);
5686 break;
5687 }
5688 case Token::kUSHR: {
5689 __ lsrv(out, left, right);
5690 break;
5691 }
5692 case Token::kSHL: {
5693 __ lslv(out, left, right);
5694 break;
5695 }
5696 default:
5697 UNREACHABLE();
5698 }
5699}
5700
5701static void EmitShiftUint32ByConstant(FlowGraphCompiler* compiler,
5702 Token::Kind op_kind,
5703 Register out,
5704 Register left,
5705 const Object& right) {
5706 const int64_t shift = Integer::Cast(right).AsInt64Value();
5707 ASSERT(shift >= 0);
5708 if (shift >= 32) {
5709 __ LoadImmediate(out, 0);
5710 } else {
5711 switch (op_kind) {
5712 case Token::kSHR:
5713 case Token::kUSHR:
5714 __ LsrImmediate(out, left, shift, compiler::kFourBytes);
5715 break;
5716 case Token::kSHL:
5717 __ LslImmediate(out, left, shift, compiler::kFourBytes);
5718 break;
5719 default:
5720 UNREACHABLE();
5721 }
5722 }
5723}
5724
5725static void EmitShiftUint32ByRegister(FlowGraphCompiler* compiler,
5726 Token::Kind op_kind,
5727 Register out,
5728 Register left,
5729 Register right) {
5730 switch (op_kind) {
5731 case Token::kSHR:
5732 case Token::kUSHR:
5733 __ lsrvw(out, left, right);
5734 break;
5735 case Token::kSHL:
5736 __ lslvw(out, left, right);
5737 break;
5738 default:
5739 UNREACHABLE();
5740 }
5741}
5742
5743class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
5744 public:
5745 explicit ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction)
5746 : ThrowErrorSlowPathCode(instruction,
5747 kArgumentErrorUnboxedInt64RuntimeEntry) {}
5748
5749 const char* name() override { return "int64 shift"; }
5750
5751 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
5752 const Register left = instruction()->locs()->in(0).reg();
5753 const Register right = instruction()->locs()->in(1).reg();
5754 const Register out = instruction()->locs()->out(0).reg();
5755 ASSERT((out != left) && (out != right));
5756
5757 compiler::Label throw_error;
5758 __ tbnz(&throw_error, right, kBitsPerWord - 1);
5759
5760 switch (instruction()->AsShiftInt64Op()->op_kind()) {
5761 case Token::kSHR:
5762 __ AsrImmediate(out, left, kBitsPerWord - 1);
5763 break;
5764 case Token::kUSHR:
5765 case Token::kSHL:
5766 __ mov(out, ZR);
5767 break;
5768 default:
5769 UNREACHABLE();
5770 }
5771 __ b(exit_label());
5772
5773 __ Bind(&throw_error);
5774
5775 // Can't pass unboxed int64 value directly to runtime call, as all
5776 // arguments are expected to be tagged (boxed).
5777 // The unboxed int64 argument is passed through a dedicated slot in Thread.
5778 // TODO(dartbug.com/33549): Clean this up when unboxed values
5779 // could be passed as arguments.
5780 __ str(right,
5781 compiler::Address(
5782 THR, compiler::target::Thread::unboxed_runtime_arg_offset()));
5783 }
5784};
5785
5786LocationSummary* ShiftInt64OpInstr::MakeLocationSummary(Zone* zone,
5787 bool opt) const {
5788 const intptr_t kNumInputs = 2;
5789 const intptr_t kNumTemps = 0;
5790 LocationSummary* summary = new (zone) LocationSummary(
5791 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
5792 summary->set_in(0, Location::RequiresRegister());
5793 summary->set_in(1, RangeUtils::IsPositive(shift_range())
5795 : Location::RequiresRegister());
5796 summary->set_out(0, Location::RequiresRegister());
5797 return summary;
5798}
5799
5800void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5801 const Register left = locs()->in(0).reg();
5802 const Register out = locs()->out(0).reg();
5803 ASSERT(!can_overflow());
5804
5805 if (locs()->in(1).IsConstant()) {
5806 EmitShiftInt64ByConstant(compiler, op_kind(), out, left,
5807 locs()->in(1).constant());
5808 } else {
5809 // Code for a variable shift amount (or constant that throws).
5810 Register shift = locs()->in(1).reg();
5811
5812 // Jump to a slow path if shift is larger than 63 or less than 0.
5813 ShiftInt64OpSlowPath* slow_path = nullptr;
5814 if (!IsShiftCountInRange()) {
5815 slow_path = new (Z) ShiftInt64OpSlowPath(this);
5816 compiler->AddSlowPathCode(slow_path);
5817 __ CompareImmediate(shift, kShiftCountLimit);
5818 __ b(slow_path->entry_label(), HI);
5819 }
5820
5821 EmitShiftInt64ByRegister(compiler, op_kind(), out, left, shift);
5822
5823 if (slow_path != nullptr) {
5824 __ Bind(slow_path->exit_label());
5825 }
5826 }
5827}
5828
5829LocationSummary* SpeculativeShiftInt64OpInstr::MakeLocationSummary(
5830 Zone* zone,
5831 bool opt) const {
5832 const intptr_t kNumInputs = 2;
5833 const intptr_t kNumTemps = 0;
5834 LocationSummary* summary = new (zone)
5835 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5836 summary->set_in(0, Location::RequiresRegister());
5837 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
5838 summary->set_out(0, Location::RequiresRegister());
5839 return summary;
5840}
5841
5842void SpeculativeShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5843 const Register left = locs()->in(0).reg();
5844 const Register out = locs()->out(0).reg();
5845 ASSERT(!can_overflow());
5846
5847 if (locs()->in(1).IsConstant()) {
5848 EmitShiftInt64ByConstant(compiler, op_kind(), out, left,
5849 locs()->in(1).constant());
5850 } else {
5851 // Code for a variable shift amount.
5852 Register shift = locs()->in(1).reg();
5853
5854 // Untag shift count.
5855 __ SmiUntag(TMP, shift);
5856 shift = TMP;
5857
5858 // Deopt if shift is larger than 63 or less than 0 (or not a smi).
5859 if (!IsShiftCountInRange()) {
5860 ASSERT(CanDeoptimize());
5861 compiler::Label* deopt =
5862 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
5863
5864 __ CompareImmediate(shift, kShiftCountLimit);
5865 __ b(deopt, HI);
5866 }
5867
5868 EmitShiftInt64ByRegister(compiler, op_kind(), out, left, shift);
5869 }
5870}
5871
5872class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
5873 public:
5874 explicit ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction)
5875 : ThrowErrorSlowPathCode(instruction,
5876 kArgumentErrorUnboxedInt64RuntimeEntry) {}
5877
5878 const char* name() override { return "uint32 shift"; }
5879
5880 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
5881 const Register right = instruction()->locs()->in(1).reg();
5882
5883 // Can't pass unboxed int64 value directly to runtime call, as all
5884 // arguments are expected to be tagged (boxed).
5885 // The unboxed int64 argument is passed through a dedicated slot in Thread.
5886 // TODO(dartbug.com/33549): Clean this up when unboxed values
5887 // could be passed as arguments.
5888 __ str(right,
5889 compiler::Address(
5890 THR, compiler::target::Thread::unboxed_runtime_arg_offset()));
5891 }
5892};
5893
5894LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
5895 bool opt) const {
5896 const intptr_t kNumInputs = 2;
5897 const intptr_t kNumTemps = 0;
5898 LocationSummary* summary = new (zone) LocationSummary(
5899 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
5900 summary->set_in(0, Location::RequiresRegister());
5901 summary->set_in(1, RangeUtils::IsPositive(shift_range())
5903 : Location::RequiresRegister());
5904 summary->set_out(0, Location::RequiresRegister());
5905 return summary;
5906}
5907
5908void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5909 Register left = locs()->in(0).reg();
5910 Register out = locs()->out(0).reg();
5911
5912 if (locs()->in(1).IsConstant()) {
5913 EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
5914 locs()->in(1).constant());
5915 } else {
5916 // Code for a variable shift amount (or constant that throws).
5917 const Register right = locs()->in(1).reg();
5918 const bool shift_count_in_range =
5919 IsShiftCountInRange(kUint32ShiftCountLimit);
5920
5921 // Jump to a slow path if shift count is negative.
5922 if (!shift_count_in_range) {
5923 ShiftUint32OpSlowPath* slow_path = new (Z) ShiftUint32OpSlowPath(this);
5924 compiler->AddSlowPathCode(slow_path);
5925
5926 __ tbnz(slow_path->entry_label(), right, kBitsPerWord - 1);
5927 }
5928
5929 EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right);
5930
5931 if (!shift_count_in_range) {
5932 // If shift value is > 31, return zero.
5933 __ CompareImmediate(right, 31);
5934 __ csel(out, out, ZR, LE);
5935 }
5936 }
5937}
5938
5939LocationSummary* SpeculativeShiftUint32OpInstr::MakeLocationSummary(
5940 Zone* zone,
5941 bool opt) const {
5942 const intptr_t kNumInputs = 2;
5943 const intptr_t kNumTemps = 0;
5944 LocationSummary* summary = new (zone)
5945 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5946 summary->set_in(0, Location::RequiresRegister());
5947 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
5948 summary->set_out(0, Location::RequiresRegister());
5949 return summary;
5950}
5951
5952void SpeculativeShiftUint32OpInstr::EmitNativeCode(
5953 FlowGraphCompiler* compiler) {
5954 Register left = locs()->in(0).reg();
5955 Register out = locs()->out(0).reg();
5956
5957 if (locs()->in(1).IsConstant()) {
5958 EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
5959 locs()->in(1).constant());
5960 } else {
5961 Register right = locs()->in(1).reg();
5962 const bool shift_count_in_range =
5963 IsShiftCountInRange(kUint32ShiftCountLimit);
5964
5965 __ SmiUntag(TMP, right);
5966 right = TMP;
5967
5968 // Jump to a slow path if shift count is negative.
5969 if (!shift_count_in_range) {
5970 // Deoptimize if shift count is negative.
5971 ASSERT(CanDeoptimize());
5972 compiler::Label* deopt =
5973 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
5974
5975 __ tbnz(deopt, right, compiler::target::kSmiBits + 1);
5976 }
5977
5978 EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right);
5979
5980 if (!shift_count_in_range) {
5981 // If shift value is > 31, return zero.
5982 __ CompareImmediate(right, 31, compiler::kObjectBytes);
5983 __ csel(out, out, ZR, LE);
5984 }
5985 }
5986}
5987
5988LocationSummary* UnaryInt64OpInstr::MakeLocationSummary(Zone* zone,
5989 bool opt) const {
5990 const intptr_t kNumInputs = 1;
5991 const intptr_t kNumTemps = 0;
5992 LocationSummary* summary = new (zone)
5993 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5994 summary->set_in(0, Location::RequiresRegister());
5995 summary->set_out(0, Location::RequiresRegister());
5996 return summary;
5997}
5998
5999void UnaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6000 const Register left = locs()->in(0).reg();
6001 const Register out = locs()->out(0).reg();
6002 switch (op_kind()) {
6003 case Token::kBIT_NOT:
6004 __ mvn_(out, left);
6005 break;
6006 case Token::kNEGATE:
6007 __ sub(out, ZR, compiler::Operand(left));
6008 break;
6009 default:
6010 UNREACHABLE();
6011 }
6012}
6013
6014LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone,
6015 bool opt) const {
6016 const intptr_t kNumInputs = 2;
6017 const intptr_t kNumTemps = 0;
6018 LocationSummary* summary = new (zone)
6019 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6020 summary->set_in(0, Location::RequiresRegister());
6021 summary->set_in(1, Location::RequiresRegister());
6022 summary->set_out(0, Location::RequiresRegister());
6023 return summary;
6024}
6025
6026void BinaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6027 Register left = locs()->in(0).reg();
6028 Register right = locs()->in(1).reg();
6029 compiler::Operand r = compiler::Operand(right);
6030 Register out = locs()->out(0).reg();
6031 switch (op_kind()) {
6032 case Token::kBIT_AND:
6033 __ and_(out, left, r);
6034 break;
6035 case Token::kBIT_OR:
6036 __ orr(out, left, r);
6037 break;
6038 case Token::kBIT_XOR:
6039 __ eor(out, left, r);
6040 break;
6041 case Token::kADD:
6042 __ addw(out, left, r);
6043 break;
6044 case Token::kSUB:
6045 __ subw(out, left, r);
6046 break;
6047 case Token::kMUL:
6048 __ mulw(out, left, right);
6049 break;
6050 default:
6051 UNREACHABLE();
6052 }
6053}
6054
6055LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone,
6056 bool opt) const {
6057 const intptr_t kNumInputs = 1;
6058 const intptr_t kNumTemps = 0;
6059 LocationSummary* summary = new (zone)
6060 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6061 summary->set_in(0, Location::RequiresRegister());
6062 summary->set_out(0, Location::RequiresRegister());
6063 return summary;
6064}
6065
6066void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6067 Register left = locs()->in(0).reg();
6068 Register out = locs()->out(0).reg();
6069
6070 ASSERT(op_kind() == Token::kBIT_NOT);
6071 __ mvnw(out, left);
6072}
6073
6074DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryInt32OpInstr)
6075
6076LocationSummary* IntConverterInstr::MakeLocationSummary(Zone* zone,
6077 bool opt) const {
6078 const intptr_t kNumInputs = 1;
6079 const intptr_t kNumTemps = 0;
6080 LocationSummary* summary = new (zone)
6081 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6082 if (from() == kUntagged || to() == kUntagged) {
6083 ASSERT((from() == kUntagged && to() == kUnboxedIntPtr) ||
6084 (from() == kUnboxedIntPtr && to() == kUntagged));
6085 ASSERT(!CanDeoptimize());
6086 } else if (from() == kUnboxedInt64) {
6087 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6088 } else if (to() == kUnboxedInt64) {
6089 ASSERT(from() == kUnboxedInt32 || from() == kUnboxedUint32);
6090 } else {
6091 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6092 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
6093 }
6094 summary->set_in(0, Location::RequiresRegister());
6095 if (CanDeoptimize()) {
6096 summary->set_out(0, Location::RequiresRegister());
6097 } else {
6098 summary->set_out(0, Location::SameAsFirstInput());
6099 }
6100 return summary;
6101}
6102
6103void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6104 ASSERT(from() != to()); // We don't convert from a representation to itself.
6105
6106 const bool is_nop_conversion =
6107 (from() == kUntagged && to() == kUnboxedIntPtr) ||
6108 (from() == kUnboxedIntPtr && to() == kUntagged);
6109 if (is_nop_conversion) {
6110 ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
6111 return;
6112 }
6113
6114 const Register value = locs()->in(0).reg();
6115 const Register out = locs()->out(0).reg();
6116 compiler::Label* deopt =
6117 !CanDeoptimize()
6118 ? nullptr
6119 : compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
6120 if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
6121 if (CanDeoptimize()) {
6122 __ tbnz(deopt, value,
6123 31); // If sign bit is set it won't fit in a uint32.
6124 }
6125 if (out != value) {
6126 __ mov(out, value); // For positive values the bits are the same.
6127 }
6128 } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
6129 if (CanDeoptimize()) {
6130 __ tbnz(deopt, value,
6131 31); // If high bit is set it won't fit in an int32.
6132 }
6133 if (out != value) {
6134 __ mov(out, value); // For 31 bit values the bits are the same.
6135 }
6136 } else if (from() == kUnboxedInt64) {
6137 if (to() == kUnboxedInt32) {
6138 if (is_truncating() || out != value) {
6139 __ sxtw(out, value); // Signed extension 64->32.
6140 }
6141 } else {
6142 ASSERT(to() == kUnboxedUint32);
6143 if (is_truncating() || out != value) {
6144 __ uxtw(out, value); // Unsigned extension 64->32.
6145 }
6146 }
6147 if (CanDeoptimize()) {
6148 ASSERT(to() == kUnboxedInt32);
6149 __ cmp(out, compiler::Operand(value));
6150 __ b(deopt, NE); // Value cannot be held in Int32, deopt.
6151 }
6152 } else if (to() == kUnboxedInt64) {
6153 if (from() == kUnboxedUint32) {
6154 __ uxtw(out, value);
6155 } else {
6156 ASSERT(from() == kUnboxedInt32);
6157 __ sxtw(out, value); // Signed extension 32->64.
6158 }
6159 } else {
6160 UNREACHABLE();
6161 }
6162}
6163
6164LocationSummary* BitCastInstr::MakeLocationSummary(Zone* zone, bool opt) const {
6165 LocationSummary* summary =
6166 new (zone) LocationSummary(zone, /*num_inputs=*/InputCount(),
6167 /*num_temps=*/0, LocationSummary::kNoCall);
6168 switch (from()) {
6169 case kUnboxedInt32:
6170 case kUnboxedInt64:
6171 summary->set_in(0, Location::RequiresRegister());
6172 break;
6173 case kUnboxedFloat:
6174 case kUnboxedDouble:
6175 summary->set_in(0, Location::RequiresFpuRegister());
6176 break;
6177 default:
6178 UNREACHABLE();
6179 }
6180
6181 switch (to()) {
6182 case kUnboxedInt32:
6183 case kUnboxedInt64:
6184 summary->set_out(0, Location::RequiresRegister());
6185 break;
6186 case kUnboxedFloat:
6187 case kUnboxedDouble:
6188 summary->set_out(0, Location::RequiresFpuRegister());
6189 break;
6190 default:
6191 UNREACHABLE();
6192 }
6193 return summary;
6194}
6195
6196void BitCastInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6197 switch (from()) {
6198 case kUnboxedInt32: {
6199 ASSERT(to() == kUnboxedFloat);
6200 const Register from_reg = locs()->in(0).reg();
6201 const FpuRegister to_reg = locs()->out(0).fpu_reg();
6202 __ fmovsr(to_reg, from_reg);
6203 break;
6204 }
6205 case kUnboxedFloat: {
6206 ASSERT(to() == kUnboxedInt32);
6207 const FpuRegister from_reg = locs()->in(0).fpu_reg();
6208 const Register to_reg = locs()->out(0).reg();
6209 __ fmovrs(to_reg, from_reg);
6210 break;
6211 }
6212 case kUnboxedInt64: {
6213 ASSERT(to() == kUnboxedDouble);
6214 const Register from_reg = locs()->in(0).reg();
6215 const FpuRegister to_reg = locs()->out(0).fpu_reg();
6216 __ fmovdr(to_reg, from_reg);
6217 break;
6218 }
6219 case kUnboxedDouble: {
6220 ASSERT(to() == kUnboxedInt64);
6221 const FpuRegister from_reg = locs()->in(0).fpu_reg();
6222 const Register to_reg = locs()->out(0).reg();
6223 __ fmovrd(to_reg, from_reg);
6224 break;
6225 }
6226 default:
6227 UNREACHABLE();
6228 }
6229}
6230
6231LocationSummary* StopInstr::MakeLocationSummary(Zone* zone, bool opt) const {
6232 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
6233}
6234
6235void StopInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6236 __ Stop(message());
6237}
6238
6239void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6240 BlockEntryInstr* entry = normal_entry();
6241 if (entry != nullptr) {
6242 if (!compiler->CanFallThroughTo(entry)) {
6243 FATAL("Checked function entry must have no offset");
6244 }
6245 } else {
6246 entry = osr_entry();
6247 if (!compiler->CanFallThroughTo(entry)) {
6248 __ b(compiler->GetJumpLabel(entry));
6249 }
6250 }
6251}
6252
6253LocationSummary* GotoInstr::MakeLocationSummary(Zone* zone, bool opt) const {
6254 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
6255}
6256
6257void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6258 if (!compiler->is_optimizing()) {
6259 if (FLAG_reorder_basic_blocks) {
6260 compiler->EmitEdgeCounter(block()->preorder_number());
6261 }
6262 // Add a deoptimization descriptor for deoptimizing instructions that
6263 // may be inserted before this instruction.
6264 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
6265 InstructionSource());
6266 }
6267 if (HasParallelMove()) {
6268 parallel_move()->EmitNativeCode(compiler);
6269 }
6270
6271 // We can fall through if the successor is the next block in the list.
6272 // Otherwise, we need a jump.
6273 if (!compiler->CanFallThroughTo(successor())) {
6274 __ b(compiler->GetJumpLabel(successor()));
6275 }
6276}
6277
6278LocationSummary* IndirectGotoInstr::MakeLocationSummary(Zone* zone,
6279 bool opt) const {
6280 const intptr_t kNumInputs = 1;
6281 const intptr_t kNumTemps = 2;
6282
6283 LocationSummary* summary = new (zone)
6284 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6285
6286 summary->set_in(0, Location::RequiresRegister());
6287 summary->set_temp(0, Location::RequiresRegister());
6288 summary->set_temp(1, Location::RequiresRegister());
6289
6290 return summary;
6291}
6292
6293void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6294 Register index_reg = locs()->in(0).reg();
6295 Register target_address_reg = locs()->temp(0).reg();
6296 Register offset_reg = locs()->temp(1).reg();
6297
6298 ASSERT(RequiredInputRepresentation(0) == kTagged);
6299 __ LoadObject(offset_reg, offsets_);
6300 const auto element_address = __ ElementAddressForRegIndex(
6301 /*is_external=*/false, kTypedDataInt32ArrayCid,
6302 /*index_scale=*/4,
6303 /*index_unboxed=*/false, offset_reg, index_reg, TMP);
6304 __ ldr(offset_reg, element_address, compiler::kFourBytes);
6305
6306 // Load code entry point.
6307 const intptr_t entry_offset = __ CodeSize();
6308 if (Utils::IsInt(21, -entry_offset)) {
6309 __ adr(target_address_reg, compiler::Immediate(-entry_offset));
6310 } else {
6311 __ adr(target_address_reg, compiler::Immediate(0));
6312 __ AddImmediate(target_address_reg, -entry_offset);
6313 }
6314
6315 __ add(target_address_reg, target_address_reg, compiler::Operand(offset_reg));
6316
6317 // Jump to the absolute address.
6318 __ br(target_address_reg);
6319}
6320
6321LocationSummary* StrictCompareInstr::MakeLocationSummary(Zone* zone,
6322 bool opt) const {
6323 const intptr_t kNumInputs = 2;
6324 const intptr_t kNumTemps = 0;
6325 if (needs_number_check()) {
6326 LocationSummary* locs = new (zone)
6327 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6328 locs->set_in(0, Location::RegisterLocation(R0));
6329 locs->set_in(1, Location::RegisterLocation(R1));
6330 locs->set_out(0, Location::RegisterLocation(R0));
6331 return locs;
6332 }
6333 LocationSummary* locs = new (zone)
6334 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6335 locs->set_in(0, LocationRegisterOrConstant(left()));
6336 // Only one of the inputs can be a constant. Choose register if the first one
6337 // is a constant.
6338 locs->set_in(1, locs->in(0).IsConstant()
6341 locs->set_out(0, Location::RequiresRegister());
6342 return locs;
6343}
6344
6345Condition StrictCompareInstr::EmitComparisonCodeRegConstant(
6346 FlowGraphCompiler* compiler,
6347 BranchLabels labels,
6348 Register reg,
6349 const Object& obj) {
6350 Condition orig_cond = (kind() == Token::kEQ_STRICT) ? EQ : NE;
6352 compiler::target::ToRawSmi(obj) == 0 &&
6353 CanUseCbzTbzForComparison(compiler, reg, orig_cond, labels)) {
6354 EmitCbzTbz(reg, compiler, orig_cond, labels, compiler::kObjectBytes);
6355 return kInvalidCondition;
6356 } else {
6357 return compiler->EmitEqualityRegConstCompare(reg, obj, needs_number_check(),
6358 source(), deopt_id());
6359 }
6360}
6361
6362void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6363 compiler::Label is_true, is_false;
6364 BranchLabels labels = {&is_true, &is_false, &is_false};
6365 Condition true_condition = EmitComparisonCode(compiler, labels);
6366
6367 const Register result = this->locs()->out(0).reg();
6368 if (is_true.IsLinked() || is_false.IsLinked()) {
6369 if (true_condition != kInvalidCondition) {
6370 EmitBranchOnCondition(compiler, true_condition, labels);
6371 }
6372 compiler::Label done;
6373 __ Bind(&is_false);
6374 __ LoadObject(result, Bool::False());
6375 __ b(&done);
6376 __ Bind(&is_true);
6377 __ LoadObject(result, Bool::True());
6378 __ Bind(&done);
6379 } else {
6380 // If EmitComparisonCode did not use the labels and just returned
6381 // a condition we can avoid the branch and use conditional loads.
6382 ASSERT(true_condition != kInvalidCondition);
6383 __ LoadObject(TMP, Bool::True());
6384 __ LoadObject(TMP2, Bool::False());
6385 __ csel(result, TMP, TMP2, true_condition);
6386 }
6387}
6388
6389void ComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler,
6390 BranchInstr* branch) {
6391 BranchLabels labels = compiler->CreateBranchLabels(branch);
6392 Condition true_condition = EmitComparisonCode(compiler, labels);
6393 if (true_condition != kInvalidCondition) {
6394 EmitBranchOnCondition(compiler, true_condition, labels);
6395 }
6396}
6397
6398LocationSummary* BooleanNegateInstr::MakeLocationSummary(Zone* zone,
6399 bool opt) const {
6402}
6403
6404void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6405 const Register input = locs()->in(0).reg();
6406 const Register result = locs()->out(0).reg();
6407 __ eori(
6408 result, input,
6410}
6411
6412LocationSummary* BoolToIntInstr::MakeLocationSummary(Zone* zone,
6413 bool opt) const {
6414 UNREACHABLE();
6415 return NULL;
6416}
6417
6418void BoolToIntInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6419 UNREACHABLE();
6420}
6421
6422LocationSummary* IntToBoolInstr::MakeLocationSummary(Zone* zone,
6423 bool opt) const {
6424 UNREACHABLE();
6425 return NULL;
6426}
6427
6428void IntToBoolInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6429 UNREACHABLE();
6430}
6431
6432LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
6433 bool opt) const {
6434 const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0;
6435 const intptr_t kNumTemps = 0;
6436 LocationSummary* locs = new (zone)
6437 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6438 if (type_arguments() != nullptr) {
6441 }
6443 return locs;
6444}
6445
6446void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6447 if (type_arguments() != nullptr) {
6448 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
6449 if (type_usage_info != nullptr) {
6450 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, cls_,
6451 type_arguments()->definition());
6452 }
6453 }
6454 const Code& stub = Code::ZoneHandle(
6456 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
6457 locs(), deopt_id(), env());
6458}
6459
6460void DebugStepCheckInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6461#ifdef PRODUCT
6462 UNREACHABLE();
6463#else
6464 ASSERT(!compiler->is_optimizing());
6465 __ BranchLinkPatchable(StubCode::DebugStepCheck());
6466 compiler->AddCurrentDescriptor(stub_kind_, deopt_id_, source());
6467 compiler->RecordSafepoint(locs());
6468#endif
6469}
6470
6471} // namespace dart
6472
6473#endif // defined(TARGET_ARCH_ARM64)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
static void fail(const SkString &err)
Definition DM.cpp:234
static bool match(const char *needle, const char *haystack)
Definition DM.cpp:1132
SkPoint pos
static bool are_equal(skiatest::Reporter *reporter, const SkMatrix &a, const SkMatrix &b)
static int float_bits(float f)
static bool ok(int result)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
static size_t element_size(Layout layout, SkSLType type)
Vec2Value v2
#define __
#define UNREACHABLE()
Definition assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
#define Z
intptr_t num_context_variables() const
Definition il.h:8344
Value * type_arguments() const
Definition il.h:7397
const Class & cls() const
Definition il.h:7396
intptr_t num_context_variables() const
Definition il.h:7555
static intptr_t type_arguments_offset()
Definition object.h:10902
static intptr_t InstanceSize()
Definition object.h:10910
static constexpr bool IsValidLength(intptr_t len)
Definition object.h:10906
static intptr_t length_offset()
Definition object.h:10813
Value * dst_type() const
Definition il.h:4405
Token::Kind op_kind() const
Definition il.h:8990
Value * right() const
Definition il.h:8988
Value * left() const
Definition il.h:8987
bool can_overflow() const
Definition il.h:9352
Value * right() const
Definition il.h:9350
Token::Kind op_kind() const
Definition il.h:9348
Value * left() const
Definition il.h:9349
bool RightIsPowerOfTwoConstant() const
Definition il.cc:2116
Range * right_range() const
Definition il.h:9425
Representation to() const
Definition il.h:11067
Representation from() const
Definition il.h:11066
ParallelMoveInstr * parallel_move() const
Definition il.h:1683
bool HasParallelMove() const
Definition il.h:1685
BlockEntryInstr(intptr_t block_id, intptr_t try_index, intptr_t deopt_id, intptr_t stack_depth)
Definition il.h:1776
static const Bool & False()
Definition object.h:10778
static const Bool & True()
Definition object.h:10776
static void Allocate(FlowGraphCompiler *compiler, Instruction *instruction, const Class &cls, Register result, Register temp)
Definition il.cc:6317
Value * value() const
Definition il.h:8480
Representation from_representation() const
Definition il.h:8481
virtual bool ValueFitsSmi() const
Definition il.cc:3244
ComparisonInstr * comparison() const
Definition il.h:4003
intptr_t index_scale() const
Definition il.h:7972
static constexpr Register kSecondReturnReg
static const Register ArgumentRegisters[]
static constexpr FpuRegister kReturnFpuReg
static constexpr Register kFfiAnyNonAbiRegister
static constexpr Register kReturnReg
static constexpr Register kSecondNonArgumentRegister
const RuntimeEntry & TargetFunction() const
Definition il.cc:1099
Value * index() const
Definition il.h:10743
Value * length() const
Definition il.h:10742
Value * value() const
Definition il.h:10701
bool IsDeoptIfNull() const
Definition il.cc:861
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr, TemplateInstruction, FIELD_LIST) private void EmitBitTest(FlowGraphCompiler *compiler, intptr_t min, intptr_t max, intptr_t mask, compiler::Label *deopt)
void EmitNullCheck(FlowGraphCompiler *compiler, compiler::Label *deopt)
bool IsNullCheck() const
Definition il.h:10546
bool IsDeoptIfNotNull() const
Definition il.cc:875
bool IsBitTest() const
Definition il.cc:897
Value * right() const
Definition il.h:8429
Value * left() const
Definition il.h:8428
Value * value() const
Definition il.h:10600
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Definition il.h:9875
intptr_t loop_depth() const
Definition il.h:9858
virtual void EmitBranchCode(FlowGraphCompiler *compiler, BranchInstr *branch)
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual Condition EmitComparisonCode(FlowGraphCompiler *compiler, BranchLabels labels)=0
const Object & value() const
Definition il.h:4212
void EmitMoveToLocation(FlowGraphCompiler *compiler, const Location &destination, Register tmp=kNoRegister, intptr_t pair_index=0)
static intptr_t num_variables_offset()
Definition object.h:7386
static intptr_t InstanceSize()
Definition object.h:7419
Value * type_arguments() const
Definition il.h:7806
virtual Value * num_elements() const
Definition il.h:7807
virtual Representation representation() const
Definition il.h:3483
static constexpr intptr_t kNone
Definition deopt_id.h:27
Value * value() const
Definition il.h:9053
MethodRecognizer::Kind op_kind() const
Definition il.h:9055
Value * value() const
Definition il.h:10090
MethodRecognizer::Kind recognized_kind() const
Definition il.h:10012
Value * value() const
Definition il.h:10059
bool is_null_aware() const
Definition il.h:5292
virtual Representation representation() const
Definition il.h:10283
intptr_t index() const
Definition il.h:10281
void EmitReturnMoves(FlowGraphCompiler *compiler, const Register temp0, const Register temp1)
Definition il.cc:7633
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(FfiCallInstr, VariadicDefinition, FIELD_LIST) private void EmitParamMoves(FlowGraphCompiler *compiler, const Register saved_fp, const Register temp0, const Register temp1)
Definition il.cc:7421
intptr_t TargetAddressIndex() const
Definition il.h:6051
static intptr_t guarded_cid_offset()
Definition object.h:4642
bool is_nullable() const
Definition object.cc:11821
@ kUnknownFixedLength
Definition object.h:4701
@ kUnknownLengthOffset
Definition object.h:4700
@ kNoFixedLength
Definition object.h:4702
static intptr_t guarded_list_length_in_object_offset_offset()
Definition object.h:4666
intptr_t guarded_cid() const
Definition object.cc:11800
static intptr_t is_nullable_offset()
Definition object.h:4739
static intptr_t guarded_list_length_offset()
Definition object.h:4656
Value * value() const
Definition il.h:10131
ParallelMoveInstr * parallel_move() const
Definition il.h:3717
BlockEntryInstr * block() const
Definition il.h:3692
bool HasParallelMove() const
Definition il.h:3719
JoinEntryInstr * successor() const
Definition il.h:3695
FunctionEntryInstr * normal_entry() const
Definition il.h:1986
OsrEntryInstr * osr_entry() const
Definition il.h:1992
const Field & field() const
Definition il.h:6476
Value * value() const
Definition il.h:6474
Value * value() const
Definition il.h:9101
Value * value() const
Definition il.h:9141
ComparisonInstr * comparison() const
Definition il.h:5434
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.h:3789
const AbstractType & type() const
Definition il.h:7245
Environment * env() const
Definition il.h:1209
virtual LocationSummary * MakeLocationSummary(Zone *zone, bool is_optimizing) const =0
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition il.h:1207
void InitializeLocationSummary(Zone *zone, bool optimizing)
Definition il.h:1196
virtual Representation representation() const
Definition il.h:1254
LocationSummary * locs()
Definition il.h:1186
InstructionSource source() const
Definition il.h:1002
intptr_t deopt_id() const
Definition il.h:987
static LocationSummary * MakeCallSummary(Zone *zone, const Instruction *instr, LocationSummary *locs=nullptr)
Value * value() const
Definition il.h:9930
Value * value() const
Definition il.h:9960
Value * value() const
Definition il.h:10990
bool is_truncating() const
Definition il.h:10994
Representation to() const
Definition il.h:10993
Representation from() const
Definition il.h:10992
const RuntimeEntry & TargetFunction() const
Definition il.cc:7229
MethodRecognizer::Kind recognized_kind() const
Definition il.h:10209
ObjectStore * object_store() const
Definition isolate.h:505
static IsolateGroup * Current()
Definition isolate.h:534
intptr_t TargetAddressIndex() const
Definition il.h:6149
void EmitParamMoves(FlowGraphCompiler *compiler, Register saved_fp, Register temp0)
Definition il.cc:8042
LocationSummary * MakeLocationSummaryInternal(Zone *zone, const RegList temps) const
Definition il.cc:7964
intptr_t index_scale() const
Definition il.h:6851
Value * index() const
Definition il.h:6849
bool can_pack_into_smi() const
Definition il.h:6858
intptr_t element_count() const
Definition il.h:6856
bool IsExternal() const
Definition il.h:6844
intptr_t class_id() const
Definition il.h:6855
intptr_t class_id() const
Definition il.h:6759
bool IsUntagged() const
Definition il.h:6752
Value * array() const
Definition il.h:6756
intptr_t index_scale() const
Definition il.h:6758
Representation representation() const
Definition il.h:6775
Value * index() const
Definition il.h:6757
Value * index() const
Definition il.h:3109
virtual Representation RequiredInputRepresentation(intptr_t index) const
Definition il.h:3096
intptr_t offset() const
Definition il.h:3111
Register base_reg() const
Definition il.h:3110
virtual Representation representation() const
Definition il.h:3107
const LocalVariable & local() const
Definition il.h:5765
Location temp(intptr_t index) const
Definition locations.h:882
Location out(intptr_t index) const
Definition locations.h:903
static LocationSummary * Make(Zone *zone, intptr_t input_count, Location out, ContainsCall contains_call)
Definition locations.cc:187
void set_temp(intptr_t index, Location loc)
Definition locations.h:894
void set_out(intptr_t index, Location loc)
Definition locations.cc:232
bool always_calls() const
Definition locations.h:918
Location in(intptr_t index) const
Definition locations.h:866
void set_in(intptr_t index, Location loc)
Definition locations.cc:205
static Location NoLocation()
Definition locations.h:387
static Location SameAsFirstInput()
Definition locations.h:382
static Location Pair(Location first, Location second)
Definition locations.cc:271
Register reg() const
Definition locations.h:404
static Location FpuRegisterLocation(FpuRegister reg)
Definition locations.h:410
static Location WritableRegister()
Definition locations.h:376
static Location RegisterLocation(Register reg)
Definition locations.h:398
static Location Any()
Definition locations.h:352
PairLocation * AsPairLocation() const
Definition locations.cc:280
static Location RequiresRegister()
Definition locations.h:365
static Location RequiresFpuRegister()
Definition locations.h:369
FpuRegister fpu_reg() const
Definition locations.h:416
static Location Constant(const ConstantInstr *obj, int pair_index=0)
Definition locations.h:294
Value * right() const
Definition il.h:8922
intptr_t result_cid() const
Definition il.h:8924
Value * left() const
Definition il.h:8921
MethodRecognizer::Kind op_kind() const
Definition il.h:8919
Value * length() const
Definition il.h:3193
bool unboxed_inputs() const
Definition il.h:3198
Value * src_start() const
Definition il.h:3191
void EmitLoopCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, Register length_reg, compiler::Label *done, compiler::Label *copy_forwards=nullptr)
void PrepareLengthRegForLoop(FlowGraphCompiler *compiler, Register length_reg, compiler::Label *done)
Value * dest_start() const
Definition il.h:3192
static intptr_t value_offset()
Definition object.h:10053
virtual Representation representation() const
Definition il.h:3369
Value * value() const
Definition il.h:3359
static int ComputeArgcTag(const Function &function)
bool is_auto_scope() const
Definition il.h:5977
bool is_bootstrap_native() const
Definition il.h:5976
const Function & function() const
Definition il.h:5974
NativeFunction native_c_function() const
Definition il.h:5975
bool link_lazily() const
Definition il.h:5978
static uword LinkNativeCallEntry()
static Object & Handle()
Definition object.h:407
static Object & ZoneHandle()
Definition object.h:419
static intptr_t data_offset()
Definition object.h:10533
Location At(intptr_t i) const
Definition locations.h:618
static bool IsNegative(Range *range)
static bool Overlaps(Range *range, intptr_t min, intptr_t max)
static bool OnlyLessThanOrEqualTo(Range *range, intptr_t value)
static bool IsWithin(const Range *range, int64_t min, int64_t max)
static bool IsPositive(Range *range)
static bool CanBeZero(Range *range)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ShiftIntegerOpInstr, BinaryIntegerOpInstr, FIELD_LIST) protected bool IsShiftCountInRange(int64_t max=kShiftCountLimit) const
Definition il.cc:2103
Range * shift_range() const
Definition il.h:9607
Kind kind() const
Definition il.h:11304
Value * value() const
Definition il.h:9904
static constexpr intptr_t kBits
Definition object.h:9965
static SmiPtr New(intptr_t value)
Definition object.h:9985
static constexpr intptr_t kMaxValue
Definition object.h:9966
static intptr_t RawValue(intptr_t value)
Definition object.h:10001
const char * message() const
Definition il.h:3663
bool ShouldEmitStoreBarrier() const
Definition il.h:7045
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.cc:6932
Value * value() const
Definition il.h:7039
Value * array() const
Definition il.h:7037
intptr_t class_id() const
Definition il.h:7042
bool IsUntagged() const
Definition il.h:7076
intptr_t index_scale() const
Definition il.h:7041
Value * index() const
Definition il.h:7038
Value * value() const
Definition il.h:5914
const LocalVariable & local() const
Definition il.h:5913
const Field & field() const
Definition il.h:6685
Value * value() const
Definition il.h:6686
bool needs_number_check() const
Definition il.h:5107
Value * str() const
Definition il.h:6923
static intptr_t length_offset()
Definition object.h:10193
static CodePtr GetAllocationStubForClass(const Class &cls)
Definition stub_code.cc:174
static constexpr int kNullCharCodeSymbolOffset
Definition symbols.h:604
intptr_t ArgumentCount() const
Definition il.h:4568
ArrayPtr GetArgumentsDescriptor() const
Definition il.h:4599
virtual intptr_t InputCount() const
Definition il.h:2737
const ZoneGrowableArray< intptr_t > & cid_results() const
Definition il.h:5185
virtual Representation representation() const
Definition il.h:9793
Value * value() const
Definition il.h:9780
Token::Kind op_kind() const
Definition il.h:9781
Value * value() const
Definition il.h:9192
Token::Kind op_kind() const
Definition il.h:9193
virtual Representation representation() const
Definition il.h:8655
Value * value() const
Definition il.h:8630
bool is_truncating() const
Definition il.h:8724
virtual Representation representation() const
Definition il.h:4270
bool IsScanFlagsUnboxed() const
Definition il.cc:7188
static bool IsInt(intptr_t N, T value)
Definition utils.h:298
static T Abs(T x)
Definition utils.h:34
static void CalculateMagicAndShiftForDivRem(int64_t divisor, int64_t *magic, int64_t *shift)
Definition utils.cc:39
static constexpr T Maximum(T x, T y)
Definition utils.h:26
static constexpr int ShiftForPowerOfTwo(T x)
Definition utils.h:66
static T Minimum(T x, T y)
Definition utils.h:21
static T AddWithWrapAround(T a, T b)
Definition utils.h:416
static constexpr int CountOneBits64(uint64_t x)
Definition utils.h:133
static constexpr size_t HighestBit(int64_t v)
Definition utils.h:170
static constexpr bool IsPowerOfTwo(T x)
Definition utils.h:61
bool BindsToConstant() const
Definition il.cc:1181
Definition * definition() const
Definition il.h:103
CompileType * Type()
Value(Definition *definition)
Definition il.h:95
intptr_t InputCount() const
Definition il.h:2776
void static bool EmittingComments()
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool *needs_base=nullptr)
#define LR
#define ASSERT(E)
SkBitmap source
Definition examples.cpp:28
static bool b
#define FATAL(error)
FlutterSemanticsFlag flag
FlutterSemanticsFlag flags
uint8_t value
GAsyncResult * result
uint32_t * target
Dart_NativeFunction function
Definition fuchsia.cc:51
const char * name
Definition fuchsia.cc:50
int argument_count
Definition fuchsia.cc:52
static float max(float r, float g, float b)
Definition hsl.cpp:49
static float min(float r, float g, float b)
Definition hsl.cpp:48
#define R(r)
#define CASE(Arity, Mask, Name, Args, Result)
#define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name)
Definition il.h:11813
size_t length
#define DEFINE_BACKEND(Name, Args)
const intptr_t kResultIndex
Definition marshaller.h:28
word ToRawSmi(const dart::Object &a)
bool IsSmi(int64_t v)
const Object & NullObject()
constexpr OperandSize kWordBytes
bool HasIntegerValue(const dart::Object &object, int64_t *value)
constexpr int64_t kMaxInt64
Definition globals.h:486
constexpr int64_t kMinInt64
Definition globals.h:485
Location LocationAnyOrConstant(Value *value)
Definition locations.cc:357
Location LocationRegisterOrConstant(Value *value)
Definition locations.cc:289
const Register kWriteBarrierSlotReg
constexpr bool IsAbiPreservedRegister(Register reg)
Definition constants.h:90
const Register THR
static Condition InvertCondition(Condition c)
const RegList kAbiVolatileCpuRegs
static constexpr int kSavedCallerPcSlotFromFp
bool IsTypedDataBaseClassId(intptr_t index)
Definition class_id.h:429
constexpr intptr_t kBitsPerWord
Definition globals.h:514
const Register kExceptionObjectReg
const Register kWriteBarrierObjectReg
const VRegister VTMP
const Register NULL_REG
static constexpr intptr_t kBoolVsNullBitPosition
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition constants.h:85
constexpr intptr_t kIntptrMin
Definition globals.h:556
int32_t classid_t
Definition globals.h:524
static const ClassId kLastErrorCid
Definition class_id.h:311
@ kIllegalCid
Definition class_id.h:214
@ kNullCid
Definition class_id.h:252
@ kDynamicCid
Definition class_id.h:253
Representation
Definition locations.h:66
const FpuRegister FpuTMP
@ kHeapObjectTag
static const ClassId kFirstErrorCid
Definition class_id.h:310
uintptr_t uword
Definition globals.h:501
const Register CODE_REG
@ kInvalidCondition
@ UNSIGNED_GREATER_EQUAL
@ NO_OVERFLOW
@ UNSIGNED_LESS
void RegisterTypeArgumentsUse(const Function &function, TypeUsageInfo *type_usage_info, const Class &klass, Definition *type_arguments)
const Register TMP2
static constexpr int kParamEndSlotFromFp
const Register ARGS_DESC_REG
bool IsClampedTypedDataBaseClassId(intptr_t index)
Definition class_id.h:461
@ kNumberOfCpuRegisters
@ kNoRegister
Location LocationFixedRegisterOrConstant(Value *value, Register reg)
Definition locations.cc:339
const int kNumberOfFpuRegisters
Location LocationWritableRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
Definition locations.cc:322
bool IsExternalPayloadClassId(classid_t cid)
Definition class_id.h:472
constexpr RegList kDartAvailableCpuRegs
const Register TMP
const Register FPREG
static constexpr intptr_t kCompressedWordSize
Definition globals.h:42
static constexpr int kPcMarkerSlotFromFp
const Register FUNCTION_REG
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
Definition locations.cc:365
constexpr intptr_t kWordSize
Definition globals.h:509
static bool IsConstant(Definition *def, int64_t *val)
Definition loops.cc:123
static constexpr Representation kUnboxedIntPtr
Definition locations.h:176
const Register PP
QRegister FpuRegister
const Register kStackTraceObjectReg
static int8_t data[kExtLength]
const RegList kAbiVolatileFpuRegs
Location LocationRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
Definition locations.cc:297
constexpr intptr_t kBitsPerInt64
Definition globals.h:467
const Register SPREG
Definition __init__.py:1
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
Definition switches.h:228
void Flush(SkSurface *surface)
Definition GpuTools.h:25
Point offset
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTempReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kObjectReg
static constexpr Representation NativeRepresentation(Representation rep)
Definition il.h:8456
static constexpr intptr_t kBoolValueMask
static constexpr size_t ValueSize(Representation rep)
Definition locations.h:112
static constexpr bool IsUnboxedInteger(Representation rep)
Definition locations.h:92
static compiler::OperandSize OperandSize(Representation rep)
Definition locations.cc:16
static constexpr bool IsUnboxed(Representation rep)
Definition locations.h:101
static bool IsUnsignedInteger(Representation rep)
Definition locations.h:126
static Representation RepresentationOfArrayElement(classid_t cid)
Definition locations.cc:79
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
#define kNegInfinity
Definition globals.h:66