Flutter Engine
The Flutter Engine
il_arm64.cc
Go to the documentation of this file.
1// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM64.
6#if defined(TARGET_ARCH_ARM64)
7
9
18#include "vm/dart_entry.h"
19#include "vm/instructions.h"
20#include "vm/object_store.h"
21#include "vm/parser.h"
22#include "vm/simulator.h"
23#include "vm/stack_frame.h"
24#include "vm/stub_code.h"
25#include "vm/symbols.h"
27
28#define __ (compiler->assembler())->
29#define Z (compiler->zone())
30
31namespace dart {
32
33// Generic summary for call instructions that have all arguments pushed
34// on the stack and return the result in a fixed register R0 (or V0 if
35// the return type is double).
36LocationSummary* Instruction::MakeCallSummary(Zone* zone,
37 const Instruction* instr,
38 LocationSummary* locs) {
39 ASSERT(locs == nullptr || locs->always_calls());
40 LocationSummary* result =
41 ((locs == nullptr)
42 ? (new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall))
43 : locs);
44 const auto representation = instr->representation();
45 switch (representation) {
46 case kTagged:
47 case kUntagged:
48 case kUnboxedInt64:
49 result->set_out(
51 break;
52 case kPairOfTagged:
53 result->set_out(
58 break;
59 case kUnboxedDouble:
60 result->set_out(
62 break;
63 default:
65 break;
66 }
67 return result;
68}
69
70LocationSummary* LoadIndexedUnsafeInstr::MakeLocationSummary(Zone* zone,
71 bool opt) const {
72 const intptr_t kNumInputs = 1;
73 const intptr_t kNumTemps = ((representation() == kUnboxedDouble) ? 1 : 0);
74 LocationSummary* locs = new (zone)
75 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
76
78 switch (representation()) {
79 case kTagged:
80 case kUnboxedInt64:
82 break;
83 case kUnboxedDouble:
86 break;
87 default:
89 break;
90 }
91 return locs;
92}
93
94void LoadIndexedUnsafeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
95 ASSERT(RequiredInputRepresentation(0) == kTagged); // It is a Smi.
96 ASSERT(kSmiTag == 0);
97 ASSERT(kSmiTagSize == 1);
98
99 const Register index = locs()->in(0).reg();
100
101 switch (representation()) {
102 case kTagged:
103 case kUnboxedInt64: {
104 const auto out = locs()->out(0).reg();
105#if !defined(DART_COMPRESSED_POINTERS)
106 __ add(out, base_reg(), compiler::Operand(index, LSL, 2));
107#else
108 __ add(out, base_reg(), compiler::Operand(index, SXTW, 2));
109#endif
110 __ LoadFromOffset(out, out, offset());
111 break;
112 }
113 case kUnboxedDouble: {
114 const auto tmp = locs()->temp(0).reg();
115 const auto out = locs()->out(0).fpu_reg();
116#if !defined(DART_COMPRESSED_POINTERS)
117 __ add(tmp, base_reg(), compiler::Operand(index, LSL, 2));
118#else
119 __ add(tmp, base_reg(), compiler::Operand(index, SXTW, 2));
120#endif
121 __ LoadDFromOffset(out, tmp, offset());
122 break;
123 }
124 default:
125 UNREACHABLE();
126 break;
127 }
128}
129
130DEFINE_BACKEND(StoreIndexedUnsafe,
131 (NoLocation, Register index, Register value)) {
132 ASSERT(instr->RequiredInputRepresentation(
133 StoreIndexedUnsafeInstr::kIndexPos) == kTagged); // It is a Smi.
134#if !defined(DART_COMPRESSED_POINTERS)
135 __ add(TMP, instr->base_reg(), compiler::Operand(index, LSL, 2));
136#else
137 __ add(TMP, instr->base_reg(), compiler::Operand(index, SXTW, 2));
138#endif
139 __ str(value, compiler::Address(TMP, instr->offset()));
140
141 ASSERT(kSmiTag == 0);
142 ASSERT(kSmiTagSize == 1);
143}
144
145DEFINE_BACKEND(TailCall,
146 (NoLocation,
147 Fixed<Register, ARGS_DESC_REG>,
148 Temp<Register> temp)) {
149 compiler->EmitTailCallToStub(instr->code());
150
151 // Even though the TailCallInstr will be the last instruction in a basic
152 // block, the flow graph compiler will emit native code for other blocks after
153 // the one containing this instruction and needs to be able to use the pool.
154 // (The `LeaveDartFrame` above disables usages of the pool.)
155 __ set_constant_pool_allowed(true);
156}
157
158LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone,
159 bool opt) const {
160 // The compiler must optimize any function that includes a MemoryCopy
161 // instruction that uses typed data cids, since extracting the payload address
162 // from views is done in a compiler pass after all code motion has happened.
163 ASSERT((!IsTypedDataBaseClassId(src_cid_) &&
164 !IsTypedDataBaseClassId(dest_cid_)) ||
165 opt);
166 const intptr_t kNumInputs = 5;
167 const intptr_t kNumTemps = 2;
168 LocationSummary* locs = new (zone)
169 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
178 return locs;
179}
180
182 Register length_reg,
183 compiler::Label* done) {
184 __ BranchIfZero(length_reg, done);
185}
186
187static compiler::OperandSize OperandSizeFor(intptr_t bytes) {
189 switch (bytes) {
190 case 1:
192 case 2:
194 case 4:
196 case 8:
198 default:
199 UNREACHABLE();
201 }
202}
203
204static void CopyUpToMultipleOfChunkSize(FlowGraphCompiler* compiler,
205 Register dest_reg,
206 Register src_reg,
207 Register length_reg,
208 intptr_t element_size,
209 bool unboxed_inputs,
210 bool reversed,
211 intptr_t chunk_size,
212 compiler::Label* done) {
214 if (element_size >= chunk_size) return;
215
216 const intptr_t element_shift = Utils::ShiftForPowerOfTwo(element_size);
217 const intptr_t base_shift =
218 (unboxed_inputs ? 0 : kSmiTagShift) - element_shift;
219 const intptr_t offset_sign = reversed ? -1 : 1;
220 auto const mode =
222 intptr_t tested_bits = 0;
223
224 __ Comment("Copying until region size is a multiple of chunk size");
225
226 for (intptr_t bit = Utils::ShiftForPowerOfTwo(chunk_size) - 1;
227 bit >= element_shift; bit--) {
228 const intptr_t bytes = 1 << bit;
229 const intptr_t tested_bit = bit + base_shift;
230 tested_bits |= (1 << tested_bit);
231 const intptr_t offset = offset_sign * bytes;
232 compiler::Label skip_copy;
233 __ tbz(&skip_copy, length_reg, tested_bit);
234 auto const sz = OperandSizeFor(bytes);
235 __ ldr(TMP, compiler::Address(src_reg, offset, mode), sz);
236 __ str(TMP, compiler::Address(dest_reg, offset, mode), sz);
237 __ Bind(&skip_copy);
238 }
239
240 ASSERT(tested_bits != 0);
241 __ andis(length_reg, length_reg, compiler::Immediate(~tested_bits),
243 __ b(done, ZERO);
244}
245
246void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
247 Register dest_reg,
248 Register src_reg,
249 Register length_reg,
250 compiler::Label* done,
251 compiler::Label* copy_forwards) {
252 const bool reversed = copy_forwards != nullptr;
253 const intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) -
256 __ PushPair(length_reg, dest_reg);
257 }
258 if (reversed) {
259 // Verify that the overlap actually exists by checking to see if
260 // dest_start < src_end.
261 if (!unboxed_inputs()) {
262 __ ExtendNonNegativeSmi(length_reg);
263 }
264 if (shift < 0) {
265 __ add(TMP, src_reg, compiler::Operand(length_reg, ASR, -shift));
266 } else {
267 __ add(TMP, src_reg, compiler::Operand(length_reg, LSL, shift));
268 }
269 __ CompareRegisters(dest_reg, TMP);
270 __ BranchIf(UNSIGNED_GREATER_EQUAL, copy_forwards);
271 // There is overlap, so move TMP to src_reg and adjust dest_reg now.
272 __ MoveRegister(src_reg, TMP);
273 if (shift < 0) {
274 __ add(dest_reg, dest_reg, compiler::Operand(length_reg, ASR, -shift));
275 } else {
276 __ add(dest_reg, dest_reg, compiler::Operand(length_reg, LSL, shift));
277 }
278 }
279 const intptr_t kChunkSize = 16;
280 ASSERT(kChunkSize >= element_size_);
281 CopyUpToMultipleOfChunkSize(compiler, dest_reg, src_reg, length_reg,
282 element_size_, unboxed_inputs_, reversed,
283 kChunkSize, done);
284 // The size of the uncopied region is now a multiple of the chunk size.
285 const intptr_t loop_subtract = (kChunkSize / element_size_)
286 << (unboxed_inputs_ ? 0 : kSmiTagShift);
287 // When reversed, the src and dest registers are adjusted to start with the
288 // end addresses, so apply the negated offset prior to indexing.
289 const intptr_t offset = (reversed ? -1 : 1) * kChunkSize;
290 const auto mode = reversed ? compiler::Address::PairPreIndex
292 __ Comment("Copying chunks at a time");
293 compiler::Label loop;
294 __ Bind(&loop);
295 __ ldp(TMP, TMP2, compiler::Address(src_reg, offset, mode));
296 __ stp(TMP, TMP2, compiler::Address(dest_reg, offset, mode));
297 __ subs(length_reg, length_reg, compiler::Operand(loop_subtract),
299 __ b(&loop, NOT_ZERO);
300
302 __ PopPair(length_reg, dest_reg);
303 if (!unboxed_inputs()) {
304 __ ExtendNonNegativeSmi(length_reg);
305 }
306 if (shift < 0) {
307 __ AsrImmediate(length_reg, length_reg, -shift);
308 } else {
309 __ LslImmediate(length_reg, length_reg, shift);
310 }
311 __ MsanUnpoison(dest_reg, length_reg);
312 }
313}
314
315void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
316 classid_t array_cid,
317 Register array_reg,
318 Register payload_reg,
319 Representation array_rep,
320 Location start_loc) {
321 intptr_t offset = 0;
322 if (array_rep != kTagged) {
323 // Do nothing, array_reg already contains the payload address.
324 } else if (IsTypedDataBaseClassId(array_cid)) {
325 // The incoming array must have been proven to be an internal typed data
326 // object, where the payload is in the object and we can just offset.
327 ASSERT_EQUAL(array_rep, kTagged);
329 } else {
330 ASSERT_EQUAL(array_rep, kTagged);
331 ASSERT(!IsExternalPayloadClassId(array_cid));
332 switch (array_cid) {
333 case kOneByteStringCid:
334 offset =
336 break;
337 case kTwoByteStringCid:
338 offset =
340 break;
341 default:
342 UNREACHABLE();
343 break;
344 }
345 }
346 ASSERT(start_loc.IsRegister() || start_loc.IsConstant());
347 if (start_loc.IsConstant()) {
348 const auto& constant = start_loc.constant();
349 ASSERT(constant.IsInteger());
350 const int64_t start_value = Integer::Cast(constant).AsInt64Value();
351 const intptr_t add_value = Utils::AddWithWrapAround(
352 Utils::MulWithWrapAround<intptr_t>(start_value, element_size_), offset);
353 __ AddImmediate(payload_reg, array_reg, add_value);
354 return;
355 }
356 const Register start_reg = start_loc.reg();
357 intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) -
359 if (shift < 0) {
360 if (!unboxed_inputs()) {
361 __ ExtendNonNegativeSmi(start_reg);
362 }
363 __ add(payload_reg, array_reg, compiler::Operand(start_reg, ASR, -shift));
364#if defined(DART_COMPRESSED_POINTERS)
365 } else if (!unboxed_inputs()) {
366 __ add(payload_reg, array_reg, compiler::Operand(start_reg, SXTW, shift));
367#endif
368 } else {
369 __ add(payload_reg, array_reg, compiler::Operand(start_reg, LSL, shift));
370 }
371 __ AddImmediate(payload_reg, offset);
372}
373
374LocationSummary* MoveArgumentInstr::MakeLocationSummary(Zone* zone,
375 bool opt) const {
376 const intptr_t kNumInputs = 1;
377 const intptr_t kNumTemps = 0;
378 LocationSummary* locs = new (zone)
379 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
380 ConstantInstr* constant = value()->definition()->AsConstant();
381 if (constant != nullptr && constant->HasZeroRepresentation()) {
382 locs->set_in(0, Location::Constant(constant));
383 } else if (representation() == kUnboxedDouble) {
385 } else if (representation() == kUnboxedInt64) {
387 } else {
388 ASSERT(representation() == kTagged);
390 }
391 return locs;
392}
393
394// Buffers registers in order to use STP to move
395// two registers at once.
396class ArgumentsMover : public ValueObject {
397 public:
398 // Flush all buffered registers.
399 void Flush(FlowGraphCompiler* compiler) {
400 if (pending_register_ != kNoRegister) {
401 __ StoreToOffset(pending_register_, SP,
402 pending_sp_relative_index_ * kWordSize);
403 pending_sp_relative_index_ = -1;
404 pending_register_ = kNoRegister;
405 }
406 }
407
408 // Buffer given register. May push buffered registers if needed.
409 void MoveRegister(FlowGraphCompiler* compiler,
410 intptr_t sp_relative_index,
411 Register reg) {
412 if (pending_register_ != kNoRegister) {
413 ASSERT((sp_relative_index + 1) == pending_sp_relative_index_);
414 __ StorePairToOffset(reg, pending_register_, SP,
415 sp_relative_index * kWordSize);
416 pending_register_ = kNoRegister;
417 return;
418 }
419 pending_register_ = reg;
420 pending_sp_relative_index_ = sp_relative_index;
421 }
422
423 // Returns free temp register to hold argument value.
424 Register GetFreeTempRegister(FlowGraphCompiler* compiler) {
425 CLOBBERS_LR({
426 // While pushing arguments only Push, PushPair, LoadObject and
427 // LoadFromOffset are used. They do not clobber TMP or LR.
428 static_assert(((1 << LR) & kDartAvailableCpuRegs) == 0,
429 "LR should not be allocatable");
430 static_assert(((1 << TMP) & kDartAvailableCpuRegs) == 0,
431 "TMP should not be allocatable");
432 return (pending_register_ == TMP) ? LR : TMP;
433 });
434 }
435
436 private:
437 intptr_t pending_sp_relative_index_ = -1;
438 Register pending_register_ = kNoRegister;
439};
440
441void MoveArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
442 ASSERT(compiler->is_optimizing());
443
444 if (previous()->IsMoveArgument()) {
445 // Already generated by the first MoveArgument in the chain.
446 return;
447 }
448
449 ArgumentsMover pusher;
450 for (MoveArgumentInstr* move_arg = this; move_arg != nullptr;
451 move_arg = move_arg->next()->AsMoveArgument()) {
452 const Location value = move_arg->locs()->in(0);
453 Register reg = kNoRegister;
454 if (value.IsRegister()) {
455 reg = value.reg();
456 } else if (value.IsConstant()) {
457 if (value.constant_instruction()->HasZeroRepresentation()) {
458 reg = ZR;
459 } else {
460 ASSERT(move_arg->representation() == kTagged);
461 const Object& constant = value.constant();
462 if (constant.IsNull()) {
463 reg = NULL_REG;
464 } else {
465 reg = pusher.GetFreeTempRegister(compiler);
466 __ LoadObject(reg, value.constant());
467 }
468 }
469 } else if (value.IsFpuRegister()) {
470 pusher.Flush(compiler);
471 __ StoreDToOffset(value.fpu_reg(), SP,
472 move_arg->location().stack_index() * kWordSize);
473 continue;
474 } else {
475 ASSERT(value.IsStackSlot());
476 const intptr_t value_offset = value.ToStackSlotOffset();
477 reg = pusher.GetFreeTempRegister(compiler);
478 __ LoadFromOffset(reg, value.base_reg(), value_offset);
479 }
480 pusher.MoveRegister(compiler, move_arg->location().stack_index(), reg);
481 }
482 pusher.Flush(compiler);
483}
484
485LocationSummary* DartReturnInstr::MakeLocationSummary(Zone* zone,
486 bool opt) const {
487 const intptr_t kNumInputs = 1;
488 const intptr_t kNumTemps = 0;
489 LocationSummary* locs = new (zone)
490 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
491 switch (representation()) {
492 case kTagged:
493 case kUnboxedInt64:
494 locs->set_in(0,
496 break;
497 case kPairOfTagged:
498 locs->set_in(
503 break;
504 case kUnboxedDouble:
505 locs->set_in(
507 break;
508 default:
509 UNREACHABLE();
510 break;
511 }
512 return locs;
513}
514
515// Attempt optimized compilation at return instruction instead of at the entry.
516// The entry needs to be patchable, no inlined objects are allowed in the area
517// that will be overwritten by the patch instructions: a branch macro sequence.
518void DartReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
519 if (locs()->in(0).IsRegister()) {
520 const Register result = locs()->in(0).reg();
522 } else if (locs()->in(0).IsPairLocation()) {
523 const Register result_lo = locs()->in(0).AsPairLocation()->At(0).reg();
524 const Register result_hi = locs()->in(0).AsPairLocation()->At(1).reg();
527 } else {
528 ASSERT(locs()->in(0).IsFpuRegister());
529 const FpuRegister result = locs()->in(0).fpu_reg();
531 }
532
533 if (compiler->parsed_function().function().IsAsyncFunction() ||
534 compiler->parsed_function().function().IsAsyncGenerator()) {
535 ASSERT(compiler->flow_graph().graph_entry()->NeedsFrame());
536 const Code& stub = GetReturnStub(compiler);
537 compiler->EmitJumpToStub(stub);
538 return;
539 }
540
541 if (!compiler->flow_graph().graph_entry()->NeedsFrame()) {
542 __ ret();
543 return;
544 }
545
546#if defined(DEBUG)
547 compiler::Label stack_ok;
548 __ Comment("Stack Check");
549 const intptr_t fp_sp_dist =
551 compiler->StackSize()) *
552 kWordSize;
553 ASSERT(fp_sp_dist <= 0);
554 __ sub(R2, SP, compiler::Operand(FP));
555 __ CompareImmediate(R2, fp_sp_dist);
556 __ b(&stack_ok, EQ);
557 __ brk(0);
558 __ Bind(&stack_ok);
559#endif
560 ASSERT(__ constant_pool_allowed());
561 __ LeaveDartFrame(); // Disallows constant pool use.
562 __ ret();
563 // This DartReturnInstr may be emitted out of order by the optimizer. The next
564 // block may be a target expecting a properly set constant pool pointer.
565 __ set_constant_pool_allowed(true);
566}
567
568// Detect pattern when one value is zero and another is a power of 2.
569static bool IsPowerOfTwoKind(intptr_t v1, intptr_t v2) {
570 return (Utils::IsPowerOfTwo(v1) && (v2 == 0)) ||
571 (Utils::IsPowerOfTwo(v2) && (v1 == 0));
572}
573
574LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone,
575 bool opt) const {
577 return comparison()->locs();
578}
579
580void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
581 const Register result = locs()->out(0).reg();
582
583 Location left = locs()->in(0);
584 Location right = locs()->in(1);
585 ASSERT(!left.IsConstant() || !right.IsConstant());
586
587 // Emit comparison code. This must not overwrite the result register.
588 // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using
589 // the labels or returning an invalid condition.
590 BranchLabels labels = {nullptr, nullptr, nullptr};
591 Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
592 ASSERT(true_condition != kInvalidCondition);
593
594 const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_);
595
596 intptr_t true_value = if_true_;
597 intptr_t false_value = if_false_;
598
599 if (is_power_of_two_kind) {
600 if (true_value == 0) {
601 // We need to have zero in result on true_condition.
602 true_condition = InvertCondition(true_condition);
603 }
604 } else {
605 if (true_value == 0) {
606 // Swap values so that false_value is zero.
607 intptr_t temp = true_value;
608 true_value = false_value;
609 false_value = temp;
610 } else {
611 true_condition = InvertCondition(true_condition);
612 }
613 }
614
615 __ cset(result, true_condition);
616
617 if (is_power_of_two_kind) {
618 const intptr_t shift =
619 Utils::ShiftForPowerOfTwo(Utils::Maximum(true_value, false_value));
620 __ LslImmediate(result, result, shift + kSmiTagSize);
621 } else {
622 __ sub(result, result, compiler::Operand(1));
623 const int64_t val = Smi::RawValue(true_value) - Smi::RawValue(false_value);
624 __ AndImmediate(result, result, val);
625 if (false_value != 0) {
626 __ AddImmediate(result, Smi::RawValue(false_value));
627 }
628 }
629}
630
631LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone,
632 bool opt) const {
633 const intptr_t kNumInputs = 1;
634 const intptr_t kNumTemps = 0;
635 LocationSummary* summary = new (zone)
636 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
637 summary->set_in(
638 0, Location::RegisterLocation(FLAG_precompiled_mode ? R0 : FUNCTION_REG));
639 return MakeCallSummary(zone, this, summary);
640}
641
642void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
643 // Load arguments descriptor in ARGS_DESC_REG.
644 const intptr_t argument_count = ArgumentCount(); // Includes type args.
645 const Array& arguments_descriptor =
647 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
648
649 if (FLAG_precompiled_mode) {
650 ASSERT(locs()->in(0).reg() == R0);
651 // R0: Closure with a cached entry point.
652 __ LoadFieldFromOffset(R2, R0,
654 } else {
655 ASSERT(locs()->in(0).reg() == FUNCTION_REG);
656 // FUNCTION_REG: Function.
657 __ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
659 // Closure functions only have one entry point.
660 __ LoadFieldFromOffset(R2, FUNCTION_REG,
662 }
663
664 // ARGS_DESC_REG: Arguments descriptor array.
665 // R2: instructions entry point.
666 if (!FLAG_precompiled_mode) {
667 // R5: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
668 __ LoadImmediate(IC_DATA_REG, 0);
669 }
670 __ blr(R2);
671 compiler->EmitCallsiteMetadata(source(), deopt_id(),
672 UntaggedPcDescriptors::kOther, locs(), env());
673 compiler->EmitDropArguments(argument_count);
674}
675
676LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone,
677 bool opt) const {
680}
681
682void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
683 const Register result = locs()->out(0).reg();
684 __ LoadFromOffset(result, FP,
686}
687
688LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone,
689 bool opt) const {
692}
693
694void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
695 const Register value = locs()->in(0).reg();
696 const Register result = locs()->out(0).reg();
697 ASSERT(result == value); // Assert that register assignment is correct.
698 __ StoreToOffset(value, FP,
700}
701
702LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
703 bool opt) const {
706}
707
708void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
709 // The register allocator drops constant definitions that have no uses.
710 if (!locs()->out(0).IsInvalid()) {
711 const Register result = locs()->out(0).reg();
712 __ LoadObject(result, value());
713 }
714}
715
716void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
717 const Location& destination,
718 Register tmp,
719 intptr_t pair_index) {
720 ASSERT(pair_index == 0); // No pair representation needed on 64-bit.
721 if (destination.IsRegister()) {
722 if (representation() == kUnboxedInt32 ||
723 representation() == kUnboxedUint32 ||
724 representation() == kUnboxedInt64) {
725 const int64_t value = Integer::Cast(value_).AsInt64Value();
726 __ LoadImmediate(destination.reg(), value);
727 } else {
728 ASSERT(representation() == kTagged);
729 __ LoadObject(destination.reg(), value_);
730 }
731 } else if (destination.IsFpuRegister()) {
732 switch (representation()) {
733 case kUnboxedFloat:
734 __ LoadSImmediate(destination.fpu_reg(), Double::Cast(value_).value());
735 break;
736 case kUnboxedDouble:
737 __ LoadDImmediate(destination.fpu_reg(), Double::Cast(value_).value());
738 break;
739 case kUnboxedFloat64x2:
740 __ LoadQImmediate(destination.fpu_reg(),
741 Float64x2::Cast(value_).value());
742 break;
743 case kUnboxedFloat32x4:
744 __ LoadQImmediate(destination.fpu_reg(),
745 Float32x4::Cast(value_).value());
746 break;
747 case kUnboxedInt32x4:
748 __ LoadQImmediate(destination.fpu_reg(), Int32x4::Cast(value_).value());
749 break;
750 default:
751 UNREACHABLE();
752 }
753 } else if (destination.IsDoubleStackSlot()) {
754 ASSERT(representation() == kUnboxedDouble);
755 __ LoadDImmediate(VTMP, Double::Cast(value_).value());
756 const intptr_t dest_offset = destination.ToStackSlotOffset();
757 __ StoreDToOffset(VTMP, destination.base_reg(), dest_offset);
758 } else if (destination.IsQuadStackSlot()) {
759 switch (representation()) {
760 case kUnboxedFloat64x2:
761 __ LoadQImmediate(VTMP, Float64x2::Cast(value_).value());
762 break;
763 case kUnboxedFloat32x4:
764 __ LoadQImmediate(VTMP, Float32x4::Cast(value_).value());
765 break;
766 case kUnboxedInt32x4:
767 __ LoadQImmediate(VTMP, Int32x4::Cast(value_).value());
768 break;
769 default:
770 UNREACHABLE();
771 }
772 } else {
773 ASSERT(destination.IsStackSlot());
774 ASSERT(tmp != kNoRegister);
775 const intptr_t dest_offset = destination.ToStackSlotOffset();
777 if (representation() == kUnboxedInt32 ||
778 representation() == kUnboxedUint32 ||
779 representation() == kUnboxedInt64) {
780 const int64_t value = Integer::Cast(value_).AsInt64Value();
781 if (value == 0) {
782 tmp = ZR;
783 } else {
784 __ LoadImmediate(tmp, value);
785 }
786 } else if (representation() == kUnboxedFloat) {
787 int32_t float_bits =
788 bit_cast<int32_t, float>(Double::Cast(value_).value());
789 __ LoadImmediate(tmp, float_bits);
790 operand_size = compiler::kFourBytes;
791 } else {
792 ASSERT(representation() == kTagged);
793 if (value_.IsNull()) {
794 tmp = NULL_REG;
795 } else if (value_.IsSmi() && Smi::Cast(value_).Value() == 0) {
796 tmp = ZR;
797 } else {
798 __ LoadObject(tmp, value_);
799 }
800 }
801 __ StoreToOffset(tmp, destination.base_reg(), dest_offset, operand_size);
802 }
803}
804
805LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone,
806 bool opt) const {
807 const bool is_unboxed_int =
811 const intptr_t kNumInputs = 0;
812 const intptr_t kNumTemps = is_unboxed_int ? 0 : 1;
813 LocationSummary* locs = new (zone)
814 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
815 if (is_unboxed_int) {
817 } else {
818 switch (representation()) {
819 case kUnboxedDouble:
822 break;
823 default:
824 UNREACHABLE();
825 break;
826 }
827 }
828 return locs;
829}
830
831void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
832 if (!locs()->out(0).IsInvalid()) {
833 const Register scratch =
836 : locs()->temp(0).reg();
837 EmitMoveToLocation(compiler, locs()->out(0), scratch);
838 }
839}
840
841LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone,
842 bool opt) const {
843 auto const dst_type_loc =
845
846 // We want to prevent spilling of the inputs (e.g. function/instantiator tav),
847 // since TTS preserves them. So we make this a `kNoCall` summary,
848 // even though most other registers can be modified by the stub. To tell the
849 // register allocator about it, we reserve all the other registers as
850 // temporary registers.
851 // TODO(http://dartbug.com/32788): Simplify this.
852
853 const intptr_t kNonChangeableInputRegs =
855 ((dst_type_loc.IsRegister() ? 1 : 0) << TypeTestABI::kDstTypeReg) |
858
859 const intptr_t kNumInputs = 4;
860
861 // We invoke a stub that can potentially clobber any CPU register
862 // but can only clobber FPU registers on the slow path when
863 // entering runtime. ARM64 ABI only guarantees that lower
864 // 64-bits of an V registers are preserved so we block all
865 // of them except for FpuTMP.
866 const intptr_t kCpuRegistersToPreserve =
867 kDartAvailableCpuRegs & ~kNonChangeableInputRegs;
868 const intptr_t kFpuRegistersToPreserve =
869 Utils::NBitMask<intptr_t>(kNumberOfFpuRegisters) & ~(1l << FpuTMP);
870
871 const intptr_t kNumTemps = (Utils::CountOneBits64(kCpuRegistersToPreserve) +
872 Utils::CountOneBits64(kFpuRegistersToPreserve));
873
874 LocationSummary* summary = new (zone) LocationSummary(
876 summary->set_in(kInstancePos,
878 summary->set_in(kDstTypePos, dst_type_loc);
879 summary->set_in(
884 summary->set_out(0, Location::SameAsFirstInput());
885
886 // Let's reserve all registers except for the input ones.
887 intptr_t next_temp = 0;
888 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
889 const bool should_preserve = ((1 << i) & kCpuRegistersToPreserve) != 0;
890 if (should_preserve) {
891 summary->set_temp(next_temp++,
892 Location::RegisterLocation(static_cast<Register>(i)));
893 }
894 }
895
896 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
897 const bool should_preserve = ((1l << i) & kFpuRegistersToPreserve) != 0;
898 if (should_preserve) {
899 summary->set_temp(next_temp++, Location::FpuRegisterLocation(
900 static_cast<FpuRegister>(i)));
901 }
902 }
903
904 return summary;
905}
906
907void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
908 ASSERT(locs()->always_calls());
909
910 auto object_store = compiler->isolate_group()->object_store();
911 const auto& assert_boolean_stub =
912 Code::ZoneHandle(compiler->zone(), object_store->assert_boolean_stub());
913
914 compiler::Label done;
916 compiler->GenerateStubCall(source(), assert_boolean_stub,
917 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
918 deopt_id(), env());
919 __ Bind(&done);
920}
921
922static Condition TokenKindToIntCondition(Token::Kind kind) {
923 switch (kind) {
924 case Token::kEQ:
925 return EQ;
926 case Token::kNE:
927 return NE;
928 case Token::kLT:
929 return LT;
930 case Token::kGT:
931 return GT;
932 case Token::kLTE:
933 return LE;
934 case Token::kGTE:
935 return GE;
936 default:
937 UNREACHABLE();
938 return VS;
939 }
940}
941
942static Condition FlipCondition(Condition condition) {
943 switch (condition) {
944 case EQ:
945 return EQ;
946 case NE:
947 return NE;
948 case LT:
949 return GT;
950 case LE:
951 return GE;
952 case GT:
953 return LT;
954 case GE:
955 return LE;
956 case CC:
957 return HI;
958 case LS:
959 return CS;
960 case HI:
961 return CC;
962 case CS:
963 return LS;
964 default:
965 UNREACHABLE();
966 return EQ;
967 }
968}
969
970static void EmitBranchOnCondition(FlowGraphCompiler* compiler,
971 Condition true_condition,
972 BranchLabels labels) {
973 if (labels.fall_through == labels.false_label) {
974 // If the next block is the false successor we will fall through to it.
975 __ b(labels.true_label, true_condition);
976 } else {
977 // If the next block is not the false successor we will branch to it.
978 Condition false_condition = InvertCondition(true_condition);
979 __ b(labels.false_label, false_condition);
980
981 // Fall through or jump to the true successor.
982 if (labels.fall_through != labels.true_label) {
983 __ b(labels.true_label);
984 }
985 }
986}
987
988static bool AreLabelsNull(BranchLabels labels) {
989 return (labels.true_label == nullptr && labels.false_label == nullptr &&
990 labels.fall_through == nullptr);
991}
992
993static bool CanUseCbzTbzForComparison(FlowGraphCompiler* compiler,
994 Register rn,
995 Condition cond,
996 BranchLabels labels) {
997 return !AreLabelsNull(labels) && __ CanGenerateCbzTbz(rn, cond);
998}
999
1000static void EmitCbzTbz(Register reg,
1001 FlowGraphCompiler* compiler,
1002 Condition true_condition,
1003 BranchLabels labels,
1005 ASSERT(CanUseCbzTbzForComparison(compiler, reg, true_condition, labels));
1006 if (labels.fall_through == labels.false_label) {
1007 // If the next block is the false successor we will fall through to it.
1008 __ GenerateCbzTbz(reg, true_condition, labels.true_label, sz);
1009 } else {
1010 // If the next block is not the false successor we will branch to it.
1011 Condition false_condition = InvertCondition(true_condition);
1012 __ GenerateCbzTbz(reg, false_condition, labels.false_label, sz);
1013
1014 // Fall through or jump to the true successor.
1015 if (labels.fall_through != labels.true_label) {
1016 __ b(labels.true_label);
1017 }
1018 }
1019}
1020
1021static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler,
1022 LocationSummary* locs,
1023 Token::Kind kind,
1024 BranchLabels labels) {
1025 Location left = locs->in(0);
1026 Location right = locs->in(1);
1027 ASSERT(!left.IsConstant() || !right.IsConstant());
1028
1029 Condition true_condition = TokenKindToIntCondition(kind);
1030 if (left.IsConstant() || right.IsConstant()) {
1031 // Ensure constant is on the right.
1032 ConstantInstr* constant = nullptr;
1033 if (left.IsConstant()) {
1034 constant = left.constant_instruction();
1035 Location tmp = right;
1036 right = left;
1037 left = tmp;
1038 true_condition = FlipCondition(true_condition);
1039 } else {
1040 constant = right.constant_instruction();
1041 }
1042
1043 ASSERT(constant->representation() == kTagged);
1044 int64_t value;
1045 if (compiler::HasIntegerValue(constant->value(), &value) && (value == 0) &&
1046 CanUseCbzTbzForComparison(compiler, left.reg(), true_condition,
1047 labels)) {
1048 EmitCbzTbz(left.reg(), compiler, true_condition, labels,
1050 return kInvalidCondition;
1051 }
1052 __ CompareObject(left.reg(), right.constant());
1053 } else {
1054 __ CompareObjectRegisters(left.reg(), right.reg());
1055 }
1056 return true_condition;
1057}
1058
1059// Similar to ComparisonInstr::EmitComparisonCode, may either:
1060// - emit comparison code and return a valid condition in which case the
1061// caller is expected to emit a branch to the true label based on that
1062// condition (or a branch to the false label on the opposite condition).
1063// - emit comparison code with a branch directly to the labels and return
1064// kInvalidCondition.
1065static Condition EmitInt64ComparisonOp(FlowGraphCompiler* compiler,
1066 LocationSummary* locs,
1067 Token::Kind kind,
1068 BranchLabels labels) {
1069 Location left = locs->in(0);
1070 Location right = locs->in(1);
1071 ASSERT(!left.IsConstant() || !right.IsConstant());
1072
1073 Condition true_condition = TokenKindToIntCondition(kind);
1074 if (left.IsConstant() || right.IsConstant()) {
1075 // Ensure constant is on the right.
1076 ConstantInstr* constant = nullptr;
1077 if (left.IsConstant()) {
1078 constant = left.constant_instruction();
1079 Location tmp = right;
1080 right = left;
1081 left = tmp;
1082 true_condition = FlipCondition(true_condition);
1083 } else {
1084 constant = right.constant_instruction();
1085 }
1086
1087 if (RepresentationUtils::IsUnboxedInteger(constant->representation())) {
1088 int64_t value;
1089 const bool ok = compiler::HasIntegerValue(constant->value(), &value);
1091 if (value == 0 && CanUseCbzTbzForComparison(compiler, left.reg(),
1092 true_condition, labels)) {
1093 EmitCbzTbz(left.reg(), compiler, true_condition, labels,
1095 return kInvalidCondition;
1096 }
1097 __ CompareImmediate(left.reg(), value);
1098 } else {
1099 UNREACHABLE();
1100 }
1101 } else {
1102 __ CompareRegisters(left.reg(), right.reg());
1103 }
1104 return true_condition;
1105}
1106
1107static Condition EmitNullAwareInt64ComparisonOp(FlowGraphCompiler* compiler,
1108 LocationSummary* locs,
1109 Token::Kind kind,
1110 BranchLabels labels) {
1111 ASSERT((kind == Token::kEQ) || (kind == Token::kNE));
1112 const Register left = locs->in(0).reg();
1113 const Register right = locs->in(1).reg();
1114 const Condition true_condition = TokenKindToIntCondition(kind);
1115 compiler::Label* equal_result =
1116 (true_condition == EQ) ? labels.true_label : labels.false_label;
1117 compiler::Label* not_equal_result =
1118 (true_condition == EQ) ? labels.false_label : labels.true_label;
1119
1120 // Check if operands have the same value. If they don't, then they could
1121 // be equal only if both of them are Mints with the same value.
1122 __ CompareObjectRegisters(left, right);
1123 __ b(equal_result, EQ);
1124 __ and_(TMP, left, compiler::Operand(right), compiler::kObjectBytes);
1125 __ BranchIfSmi(TMP, not_equal_result);
1126 __ CompareClassId(left, kMintCid);
1127 __ b(not_equal_result, NE);
1128 __ CompareClassId(right, kMintCid);
1129 __ b(not_equal_result, NE);
1130 __ LoadFieldFromOffset(TMP, left, Mint::value_offset());
1131 __ LoadFieldFromOffset(TMP2, right, Mint::value_offset());
1132 __ CompareRegisters(TMP, TMP2);
1133 return true_condition;
1134}
1135
1136LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone,
1137 bool opt) const {
1138 const intptr_t kNumInputs = 2;
1139 if (operation_cid() == kDoubleCid) {
1140 const intptr_t kNumTemps = 0;
1141 LocationSummary* locs = new (zone)
1142 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1146 return locs;
1147 }
1148 if (operation_cid() == kSmiCid || operation_cid() == kMintCid ||
1149 operation_cid() == kIntegerCid) {
1150 const intptr_t kNumTemps = 0;
1151 LocationSummary* locs = new (zone)
1152 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1153 if (is_null_aware()) {
1156 } else {
1158 // Only one input can be a constant operand. The case of two constant
1159 // operands should be handled by constant propagation.
1160 // Only right can be a stack slot.
1161 locs->set_in(1, locs->in(0).IsConstant()
1164 }
1166 return locs;
1167 }
1168 UNREACHABLE();
1169 return nullptr;
1170}
1171
1172static Condition TokenKindToDoubleCondition(Token::Kind kind) {
1173 switch (kind) {
1174 case Token::kEQ:
1175 return EQ;
1176 case Token::kNE:
1177 return NE;
1178 case Token::kLT:
1179 return LT;
1180 case Token::kGT:
1181 return GT;
1182 case Token::kLTE:
1183 return LE;
1184 case Token::kGTE:
1185 return GE;
1186 default:
1187 UNREACHABLE();
1188 return VS;
1189 }
1190}
1191
1192static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
1193 LocationSummary* locs,
1194 BranchLabels labels,
1195 Token::Kind kind) {
1196 const VRegister left = locs->in(0).fpu_reg();
1197 const VRegister right = locs->in(1).fpu_reg();
1198
1199 switch (kind) {
1200 case Token::kEQ:
1201 __ fcmpd(left, right);
1202 return EQ;
1203 case Token::kNE:
1204 __ fcmpd(left, right);
1205 return NE;
1206 case Token::kLT:
1207 __ fcmpd(right, left); // Flip to handle NaN.
1208 return GT;
1209 case Token::kGT:
1210 __ fcmpd(left, right);
1211 return GT;
1212 case Token::kLTE:
1213 __ fcmpd(right, left); // Flip to handle NaN.
1214 return GE;
1215 case Token::kGTE:
1216 __ fcmpd(left, right);
1217 return GE;
1218 default:
1219 UNREACHABLE();
1220 return VS;
1221 }
1222}
1223
1224Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1225 BranchLabels labels) {
1226 if (is_null_aware()) {
1227 ASSERT(operation_cid() == kMintCid);
1228 return EmitNullAwareInt64ComparisonOp(compiler, locs(), kind(), labels);
1229 }
1230 if (operation_cid() == kSmiCid) {
1231 return EmitSmiComparisonOp(compiler, locs(), kind(), labels);
1232 } else if (operation_cid() == kMintCid || operation_cid() == kIntegerCid) {
1233 return EmitInt64ComparisonOp(compiler, locs(), kind(), labels);
1234 } else {
1235 ASSERT(operation_cid() == kDoubleCid);
1236 return EmitDoubleComparisonOp(compiler, locs(), labels, kind());
1237 }
1238}
1239
1240LocationSummary* TestIntInstr::MakeLocationSummary(Zone* zone, bool opt) const {
1241 const intptr_t kNumInputs = 2;
1242 const intptr_t kNumTemps = 0;
1243 LocationSummary* locs = new (zone)
1244 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1246 // Only one input can be a constant operand. The case of two constant
1247 // operands should be handled by constant propagation.
1250 return locs;
1251}
1252
1253Condition TestIntInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1254 BranchLabels labels) {
1255 const Register left = locs()->in(0).reg();
1256 Location right = locs()->in(1);
1257 const auto operand_size = representation_ == kTagged ? compiler::kObjectBytes
1259 if (right.IsConstant()) {
1260 __ TestImmediate(left, ComputeImmediateMask(), operand_size);
1261 } else {
1262 __ tst(left, compiler::Operand(right.reg()), operand_size);
1263 }
1264 Condition true_condition = (kind() == Token::kNE) ? NE : EQ;
1265 return true_condition;
1266}
1267
1268static bool IsSingleBitMask(Location mask, intptr_t* bit) {
1269 if (!mask.IsConstant()) {
1270 return false;
1271 }
1272
1273 uint64_t mask_value =
1274 static_cast<uint64_t>(Integer::Cast(mask.constant()).AsInt64Value());
1275 if (!Utils::IsPowerOfTwo(mask_value)) {
1276 return false;
1277 }
1278
1279 *bit = Utils::CountTrailingZeros64(mask_value);
1280 return true;
1281}
1282
1283void TestIntInstr::EmitBranchCode(FlowGraphCompiler* compiler,
1284 BranchInstr* branch) {
1285 // Check if this is a single bit test. In this case this branch can be
1286 // emitted as TBZ/TBNZ.
1287 intptr_t bit_index;
1288 if (IsSingleBitMask(locs()->in(1), &bit_index)) {
1289 BranchLabels labels = compiler->CreateBranchLabels(branch);
1290 const Register value = locs()->in(0).reg();
1291
1292 bool branch_on_zero_bit;
1293 bool can_fallthrough;
1294 compiler::Label* target;
1295 if (labels.fall_through == labels.true_label) {
1296 target = labels.false_label;
1297 branch_on_zero_bit = (kind() == Token::kNE);
1298 can_fallthrough = true;
1299 } else {
1300 target = labels.true_label;
1301 branch_on_zero_bit = (kind() == Token::kEQ);
1302 can_fallthrough = (labels.fall_through == labels.false_label);
1303 }
1304
1305 if (representation_ == kTagged) {
1306 bit_index = Utils::Minimum(kSmiBits, bit_index) + kSmiTagShift;
1307 }
1308
1309 if (branch_on_zero_bit) {
1310 __ tbz(target, value, bit_index);
1311 } else {
1312 __ tbnz(target, value, bit_index);
1313 }
1314 if (!can_fallthrough) {
1315 __ b(labels.false_label);
1316 }
1317
1318 return;
1319 }
1320
1321 // Otherwise use shared implementation.
1323}
1324
1325LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone,
1326 bool opt) const {
1327 const intptr_t kNumInputs = 1;
1328 const intptr_t kNumTemps = 1;
1329 LocationSummary* locs = new (zone)
1330 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1334 return locs;
1335}
1336
1337Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1338 BranchLabels labels) {
1339 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
1340 const Register val_reg = locs()->in(0).reg();
1341 const Register cid_reg = locs()->temp(0).reg();
1342
1343 compiler::Label* deopt =
1345 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids)
1346 : nullptr;
1347
1348 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
1349 const ZoneGrowableArray<intptr_t>& data = cid_results();
1350 ASSERT(data[0] == kSmiCid);
1351 bool result = data[1] == true_result;
1352 __ BranchIfSmi(val_reg, result ? labels.true_label : labels.false_label);
1353 __ LoadClassId(cid_reg, val_reg);
1354
1355 for (intptr_t i = 2; i < data.length(); i += 2) {
1356 const intptr_t test_cid = data[i];
1357 ASSERT(test_cid != kSmiCid);
1358 result = data[i + 1] == true_result;
1359 __ CompareImmediate(cid_reg, test_cid);
1360 __ b(result ? labels.true_label : labels.false_label, EQ);
1361 }
1362 // No match found, deoptimize or default action.
1363 if (deopt == nullptr) {
1364 // If the cid is not in the list, jump to the opposite label from the cids
1365 // that are in the list. These must be all the same (see asserts in the
1366 // constructor).
1367 compiler::Label* target = result ? labels.false_label : labels.true_label;
1368 if (target != labels.fall_through) {
1369 __ b(target);
1370 }
1371 } else {
1372 __ b(deopt);
1373 }
1374 // Dummy result as this method already did the jump, there's no need
1375 // for the caller to branch on a condition.
1376 return kInvalidCondition;
1377}
1378
1379LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone,
1380 bool opt) const {
1381 const intptr_t kNumInputs = 2;
1382 const intptr_t kNumTemps = 0;
1383 if (operation_cid() == kDoubleCid) {
1384 LocationSummary* summary = new (zone)
1385 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1387 summary->set_in(1, Location::RequiresFpuRegister());
1388 summary->set_out(0, Location::RequiresRegister());
1389 return summary;
1390 }
1391 if (operation_cid() == kSmiCid || operation_cid() == kMintCid) {
1392 LocationSummary* summary = new (zone)
1393 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1394 summary->set_in(0, LocationRegisterOrConstant(left()));
1395 // Only one input can be a constant operand. The case of two constant
1396 // operands should be handled by constant propagation.
1397 summary->set_in(1, summary->in(0).IsConstant()
1400 summary->set_out(0, Location::RequiresRegister());
1401 return summary;
1402 }
1403
1404 UNREACHABLE();
1405 return nullptr;
1406}
1407
1408Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1409 BranchLabels labels) {
1410 if (operation_cid() == kSmiCid) {
1411 return EmitSmiComparisonOp(compiler, locs(), kind(), labels);
1412 } else if (operation_cid() == kMintCid) {
1413 return EmitInt64ComparisonOp(compiler, locs(), kind(), labels);
1414 } else {
1415 ASSERT(operation_cid() == kDoubleCid);
1416 return EmitDoubleComparisonOp(compiler, locs(), labels, kind());
1417 }
1418}
1419
1420void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1421 SetupNative();
1422 const Register result = locs()->out(0).reg();
1423
1424 // Pass a pointer to the first argument in R2.
1425 __ AddImmediate(R2, SP, (ArgumentCount() - 1) * kWordSize);
1426
1427 // Compute the effective address. When running under the simulator,
1428 // this is a redirection address that forces the simulator to call
1429 // into the runtime system.
1430 uword entry;
1431 const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function());
1432 const Code* stub;
1433 if (link_lazily()) {
1434 stub = &StubCode::CallBootstrapNative();
1436 } else {
1437 entry = reinterpret_cast<uword>(native_c_function());
1438 if (is_bootstrap_native()) {
1439 stub = &StubCode::CallBootstrapNative();
1440 } else if (is_auto_scope()) {
1441 stub = &StubCode::CallAutoScopeNative();
1442 } else {
1443 stub = &StubCode::CallNoScopeNative();
1444 }
1445 }
1446 __ LoadImmediate(R1, argc_tag);
1447 compiler::ExternalLabel label(entry);
1448 __ LoadNativeEntry(R5, &label,
1449 link_lazily() ? ObjectPool::Patchability::kPatchable
1450 : ObjectPool::Patchability::kNotPatchable);
1451 if (link_lazily()) {
1452 compiler->GeneratePatchableCall(
1453 source(), *stub, UntaggedPcDescriptors::kOther, locs(),
1455 } else {
1456 // We can never lazy-deopt here because natives are never optimized.
1457 ASSERT(!compiler->is_optimizing());
1458 compiler->GenerateNonLazyDeoptableStubCall(
1459 source(), *stub, UntaggedPcDescriptors::kOther, locs(),
1461 }
1462 __ LoadFromOffset(result, SP, 0);
1463
1464 compiler->EmitDropArguments(ArgumentCount()); // Drop the arguments.
1465}
1466
1467#define R(r) (1 << r)
1468
1469LocationSummary* FfiCallInstr::MakeLocationSummary(Zone* zone,
1470 bool is_optimizing) const {
1471 return MakeLocationSummaryInternal(
1472 zone, is_optimizing,
1475}
1476
1477#undef R
1478
1479void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1480 const Register branch = locs()->in(TargetAddressIndex()).reg();
1481
1482 // The temps are indexed according to their register number.
1483 const Register temp1 = locs()->temp(0).reg();
1484 const Register temp2 = locs()->temp(1).reg();
1485 // For regular calls, this holds the FP for rebasing the original locations
1486 // during EmitParamMoves.
1487 // For leaf calls, this holds the SP used to restore the pre-aligned SP after
1488 // the call.
1489 const Register saved_fp_or_sp = locs()->temp(2).reg();
1490 const Register temp_csp = locs()->temp(3).reg();
1491
1492 // Ensure these are callee-saved register and are preserved across the call.
1493 ASSERT(IsCalleeSavedRegister(saved_fp_or_sp));
1494 ASSERT(IsCalleeSavedRegister(temp_csp));
1495 // Other temps don't need to be preserved.
1496
1497 __ mov(saved_fp_or_sp, is_leaf_ ? SPREG : FPREG);
1498
1499 if (!is_leaf_) {
1500 // We need to create a dummy "exit frame". It will share the same pool
1501 // pointer but have a null code object.
1502 __ LoadObject(CODE_REG, Object::null_object());
1503 __ set_constant_pool_allowed(false);
1504 __ EnterDartFrame(0, PP);
1505 }
1506
1507 // Reserve space for the arguments that go on the stack (if any), then align.
1508 intptr_t stack_space = marshaller_.RequiredStackSpaceInBytes();
1509 __ ReserveAlignedFrameSpace(stack_space);
1511 RegisterSet kVolatileRegisterSet(kAbiVolatileCpuRegs & ~(1 << SP),
1513 __ mov(temp1, SP);
1514 SPILLS_LR_TO_FRAME(__ PushRegisters(kVolatileRegisterSet));
1515
1516 // Unpoison everything from SP to FP: this covers both space we have
1517 // reserved for outgoing arguments and the spills which might have
1518 // been generated by the register allocator. Some of these spill slots
1519 // can be used as handles passed down to the runtime.
1520 __ sub(R1, is_leaf_ ? FPREG : saved_fp_or_sp, compiler::Operand(temp1));
1521 __ MsanUnpoison(temp1, R1);
1522
1523 // Incoming Dart arguments to this trampoline are potentially used as local
1524 // handles.
1525 __ MsanUnpoison(is_leaf_ ? FPREG : saved_fp_or_sp,
1527
1528 // Outgoing arguments passed by register to the foreign function.
1529 __ LoadImmediate(R0, InputCount());
1530 __ CallCFunction(compiler::Address(
1531 THR, kMsanUnpoisonParamRuntimeEntry.OffsetFromThread()));
1532
1533 RESTORES_LR_FROM_FRAME(__ PopRegisters(kVolatileRegisterSet));
1534 }
1535
1536 EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp1, temp2);
1537
1539 __ Comment(is_leaf_ ? "Leaf Call" : "Call");
1540 }
1541
1542 if (is_leaf_) {
1543#if !defined(PRODUCT)
1544 // Set the thread object's top_exit_frame_info and VMTag to enable the
1545 // profiler to determine that thread is no longer executing Dart code.
1546 __ StoreToOffset(FPREG, THR,
1548 __ StoreToOffset(branch, THR, compiler::target::Thread::vm_tag_offset());
1549#endif
1550
1551 // We are entering runtime code, so the C stack pointer must be restored
1552 // from the stack limit to the top of the stack.
1553 __ mov(temp_csp, CSP);
1554 __ mov(CSP, SP);
1555
1556 __ blr(branch);
1557
1558 // Restore the Dart stack pointer.
1559 __ mov(SP, CSP);
1560 __ mov(CSP, temp_csp);
1561
1562#if !defined(PRODUCT)
1563 __ LoadImmediate(temp1, compiler::target::Thread::vm_tag_dart_id());
1564 __ StoreToOffset(temp1, THR, compiler::target::Thread::vm_tag_offset());
1565 __ StoreToOffset(ZR, THR,
1567#endif
1568 } else {
1569 // We need to copy a dummy return address up into the dummy stack frame so
1570 // the stack walker will know which safepoint to use.
1571 //
1572 // ADR loads relative to itself, so add kInstrSize to point to the next
1573 // instruction.
1574 __ adr(temp1, compiler::Immediate(Instr::kInstrSize));
1575 compiler->EmitCallsiteMetadata(source(), deopt_id(),
1576 UntaggedPcDescriptors::Kind::kOther, locs(),
1577 env());
1578
1579 __ StoreToOffset(temp1, FPREG, kSavedCallerPcSlotFromFp * kWordSize);
1580
1581 // Update information in the thread object and enter a safepoint.
1582 // Outline state transition. In AOT, for code size. In JIT, because we
1583 // cannot trust that code will be executable.
1584 __ ldr(temp1,
1585 compiler::Address(
1586 THR, compiler::target::Thread::
1587 call_native_through_safepoint_entry_point_offset()));
1588
1589 // Calls R9 and clobbers R19 (along with volatile registers).
1590 ASSERT(branch == R9);
1591 __ blr(temp1);
1592
1593 if (marshaller_.IsHandleCType(compiler::ffi::kResultIndex)) {
1594 __ Comment("Check Dart_Handle for Error.");
1595 compiler::Label not_error;
1596 __ ldr(temp1,
1597 compiler::Address(CallingConventions::kReturnReg,
1599 __ BranchIfSmi(temp1, &not_error);
1600 __ LoadClassId(temp1, temp1);
1601 __ RangeCheck(temp1, temp2, kFirstErrorCid, kLastErrorCid,
1603
1604 // Slow path, use the stub to propagate error, to save on code-size.
1605 __ Comment("Slow path: call Dart_PropagateError through stub.");
1608 __ ldr(temp1,
1609 compiler::Address(
1610 THR, compiler::target::Thread::
1611 call_native_through_safepoint_entry_point_offset()));
1612 __ ldr(branch, compiler::Address(
1613 THR, kPropagateErrorRuntimeEntry.OffsetFromThread()));
1614 __ blr(temp1);
1615#if defined(DEBUG)
1616 // We should never return with normal controlflow from this.
1617 __ brk(0);
1618#endif
1619
1620 __ Bind(&not_error);
1621 }
1622
1623 // Refresh pinned registers values (inc. write barrier mask and null
1624 // object).
1625 __ RestorePinnedRegisters();
1626 }
1627
1628 EmitReturnMoves(compiler, temp1, temp2);
1629
1630 if (is_leaf_) {
1631 // Restore the pre-aligned SP.
1632 __ mov(SPREG, saved_fp_or_sp);
1633 } else {
1634 __ LeaveDartFrame();
1635
1636 // Restore the global object pool after returning from runtime (old space is
1637 // moving, so the GOP could have been relocated).
1638 if (FLAG_precompiled_mode) {
1639 __ SetupGlobalPoolAndDispatchTable();
1640 }
1641
1642 __ set_constant_pool_allowed(true);
1643 }
1644}
1645
1646// Keep in sync with NativeEntryInstr::EmitNativeCode.
1647void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1648 EmitReturnMoves(compiler);
1649
1650 // Restore tag before the profiler's stack walker will no longer see the
1651 // InvokeDartCode return address.
1654
1655 __ LeaveDartFrame();
1656
1657 // The dummy return address is in LR, no need to pop it as on Intel.
1658
1659 // These can be anything besides the return registers (R0, R1) and THR (R26).
1660 const Register vm_tag_reg = R2;
1661 const Register old_exit_frame_reg = R3;
1662 const Register old_exit_through_ffi_reg = R4;
1663 const Register tmp = R5;
1664
1665 __ PopPair(old_exit_frame_reg, old_exit_through_ffi_reg);
1666
1667 // Restore top_resource.
1668 __ PopPair(tmp, vm_tag_reg);
1670
1671 // Reset the exit frame info to old_exit_frame_reg *before* entering the
1672 // safepoint. The trampoline that called us will enter the safepoint on our
1673 // behalf.
1674 __ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
1675 old_exit_through_ffi_reg,
1676 /*enter_safepoint=*/false);
1677
1678 __ PopNativeCalleeSavedRegisters();
1679
1680 // Leave the entry frame.
1681 __ LeaveFrame();
1682
1683 // Leave the dummy frame holding the pushed arguments.
1684 __ LeaveFrame();
1685
1686 // Restore the actual stack pointer from SPREG.
1687 __ RestoreCSP();
1688
1689 __ Ret();
1690
1691 // For following blocks.
1692 __ set_constant_pool_allowed(true);
1693}
1694
1695// Keep in sync with NativeReturnInstr::EmitNativeCode and ComputeInnerLRState.
1696void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1697 // Constant pool cannot be used until we enter the actual Dart frame.
1698 __ set_constant_pool_allowed(false);
1699
1700 __ Bind(compiler->GetJumpLabel(this));
1701
1702 // We don't use the regular stack pointer in ARM64, so we have to copy the
1703 // native stack pointer into the Dart stack pointer. This will also kick CSP
1704 // forward a bit, enough for the spills and leaf call below, until we can set
1705 // it properly after setting up THR.
1706 __ SetupDartSP();
1707
1708 // Create a dummy frame holding the pushed arguments. This simplifies
1709 // NativeReturnInstr::EmitNativeCode.
1710 __ EnterFrame(0);
1711
1712 // Save the argument registers, in reverse order.
1713 SaveArguments(compiler);
1714
1715 // Enter the entry frame. NativeParameterInstr expects this frame has size
1716 // -exit_link_slot_from_entry_fp, verified below.
1717 __ EnterFrame(0);
1718
1719 // Save a space for the code object.
1720 __ PushImmediate(0);
1721
1722 __ PushNativeCalleeSavedRegisters();
1723
1724 // Now that we have THR, we can set CSP.
1725 __ SetupCSPFromThread(THR);
1726
1727#if defined(DART_TARGET_OS_FUCHSIA)
1728 __ str(R18,
1729 compiler::Address(
1731#elif defined(USING_SHADOW_CALL_STACK)
1732#error Unimplemented
1733#endif
1734
1735 // Refresh pinned registers values (inc. write barrier mask and null object).
1736 __ RestorePinnedRegisters();
1737
1738 // Save the current VMTag on the stack.
1740 // Save the top resource.
1742 __ PushPair(R0, TMP);
1744
1746
1747 __ LoadFromOffset(R0, THR,
1749 __ Push(R0);
1750
1751 // Save the top exit frame info. We don't set it to 0 yet:
1752 // TransitionNativeToGenerated will handle that.
1753 __ LoadFromOffset(R0, THR,
1755 __ Push(R0);
1756
1757 // In debug mode, verify that we've pushed the top exit frame info at the
1758 // correct offset from FP.
1759 __ EmitEntryFrameVerification();
1760
1761 // The callback trampoline (caller) has already left the safepoint for us.
1762 __ TransitionNativeToGenerated(R0, /*exit_safepoint=*/false,
1763 /*ignore_unwind_in_progress=*/false,
1764 /*set_tag=*/false);
1765
1766 // Now that the safepoint has ended, we can touch Dart objects without
1767 // handles.
1768
1769 // Load the code object.
1770 const Function& target_function = marshaller_.dart_signature();
1771 const intptr_t callback_id = target_function.FfiCallbackId();
1773 __ LoadFromOffset(R0, R0,
1775 __ LoadFromOffset(R0, R0,
1777 __ LoadCompressedFieldFromOffset(
1779 __ LoadCompressedFieldFromOffset(
1780 CODE_REG, R0,
1783
1784 // Put the code object in the reserved slot.
1785 __ StoreToOffset(CODE_REG, FPREG,
1787 if (FLAG_precompiled_mode) {
1788 __ SetupGlobalPoolAndDispatchTable();
1789 } else {
1790 // We now load the pool pointer (PP) with a GC safe value as we are about to
1791 // invoke dart code. We don't need a real object pool here.
1792 // Smi zero does not work because ARM64 assumes PP to be untagged.
1793 __ LoadObject(PP, compiler::NullObject());
1794 }
1795
1796 // Load a GC-safe value for the arguments descriptor (unused but tagged).
1797 __ mov(ARGS_DESC_REG, ZR);
1798
1799 // Load a dummy return address which suggests that we are inside of
1800 // InvokeDartCodeStub. This is how the stack walker detects an entry frame.
1801 CLOBBERS_LR({
1802 __ LoadFromOffset(LR, THR,
1804 __ LoadFieldFromOffset(LR, LR,
1806 });
1807
1809
1810 // Delay setting the tag until the profiler's stack walker will see the
1811 // InvokeDartCode return address.
1814}
1815
1816#define R(r) (1 << r)
1817
1819 Zone* zone,
1820 bool is_optimizing) const {
1821 // Compare FfiCallInstr's use of kFfiAnyNonAbiRegister.
1823 ASSERT(IsAbiPreservedRegister(saved_csp));
1824 return MakeLocationSummaryInternal(zone, (R(saved_csp)));
1825}
1826
1827#undef R
1828
1829void LeafRuntimeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1830 const Register saved_fp = TMP2;
1831 const Register temp0 = TMP;
1832 const Register saved_csp = locs()->temp(0).reg();
1833
1834 __ MoveRegister(saved_fp, FPREG);
1835
1836 const intptr_t frame_space = native_calling_convention_.StackTopInBytes();
1837 __ EnterCFrame(frame_space);
1838 ASSERT(IsAbiPreservedRegister(saved_csp));
1839 __ mov(saved_csp, CSP);
1840 __ mov(CSP, SP);
1841
1842 EmitParamMoves(compiler, saved_fp, temp0);
1843
1844 const Register target_address = locs()->in(TargetAddressIndex()).reg();
1845 __ str(target_address,
1846 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
1847 __ CallCFunction(target_address);
1848 __ LoadImmediate(temp0, VMTag::kDartTagId);
1849 __ str(temp0,
1850 compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
1851
1852 // We don't use the DartSP, we leave the frame after this immediately.
1853 // However, we need set CSP to a 16 byte aligned value far above the SP.
1854 __ mov(CSP, saved_csp);
1855 __ LeaveCFrame();
1856}
1857
1859 Zone* zone,
1860 bool opt) const {
1861 const intptr_t kNumInputs = 1;
1862 // TODO(fschneider): Allow immediate operands for the char code.
1863 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
1865}
1866
1868 FlowGraphCompiler* compiler) {
1869 ASSERT(compiler->is_optimizing());
1870 const Register char_code = locs()->in(0).reg();
1871 const Register result = locs()->out(0).reg();
1872
1873 __ ldr(result,
1874 compiler::Address(THR, Thread::predefined_symbols_address_offset()));
1876 __ SmiUntag(TMP, char_code); // Untag to use scaled address mode.
1877 __ ldr(result,
1878 compiler::Address(result, TMP, UXTX, compiler::Address::Scaled));
1879}
1880
1881LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone,
1882 bool opt) const {
1883 const intptr_t kNumInputs = 1;
1884 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
1886}
1887
1888void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1889 ASSERT(cid_ == kOneByteStringCid);
1890 const Register str = locs()->in(0).reg();
1891 const Register result = locs()->out(0).reg();
1892 __ LoadCompressedSmi(result,
1893 compiler::FieldAddress(str, String::length_offset()));
1894 __ ldr(TMP, compiler::FieldAddress(str, OneByteString::data_offset()),
1896 __ CompareImmediate(result, Smi::RawValue(1));
1897 __ LoadImmediate(result, -1);
1898 __ csel(result, TMP, result, EQ);
1899 __ SmiTag(result);
1900}
1901
1902LocationSummary* Utf8ScanInstr::MakeLocationSummary(Zone* zone,
1903 bool opt) const {
1904 const intptr_t kNumInputs = 5;
1905 const intptr_t kNumTemps = 0;
1906 LocationSummary* summary = new (zone)
1907 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1908 summary->set_in(0, Location::Any()); // decoder
1909 summary->set_in(1, Location::WritableRegister()); // bytes
1910 summary->set_in(2, Location::WritableRegister()); // start
1911 summary->set_in(3, Location::WritableRegister()); // end
1912 summary->set_in(4, Location::WritableRegister()); // table
1913 summary->set_out(0, Location::RequiresRegister());
1914 return summary;
1915}
1916
1917void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1918 const Register bytes_reg = locs()->in(1).reg();
1919 const Register start_reg = locs()->in(2).reg();
1920 const Register end_reg = locs()->in(3).reg();
1921 const Register table_reg = locs()->in(4).reg();
1922 const Register size_reg = locs()->out(0).reg();
1923
1924 const Register bytes_ptr_reg = start_reg;
1925 const Register bytes_end_reg = end_reg;
1926 const Register flags_reg = bytes_reg;
1927 const Register temp_reg = TMP;
1928 const Register decoder_temp_reg = start_reg;
1929 const Register flags_temp_reg = end_reg;
1930
1931 const intptr_t kSizeMask = 0x03;
1932 const intptr_t kFlagsMask = 0x3C;
1933
1934 compiler::Label loop, loop_in;
1935
1936 // Address of input bytes.
1937 __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
1938
1939 // Table.
1940 __ AddImmediate(
1941 table_reg, table_reg,
1943
1944 // Pointers to start and end.
1945 __ add(bytes_ptr_reg, bytes_reg, compiler::Operand(start_reg));
1946 __ add(bytes_end_reg, bytes_reg, compiler::Operand(end_reg));
1947
1948 // Initialize size and flags.
1949 __ mov(size_reg, ZR);
1950 __ mov(flags_reg, ZR);
1951
1952 __ b(&loop_in);
1953 __ Bind(&loop);
1954
1955 // Read byte and increment pointer.
1956 __ ldr(temp_reg,
1957 compiler::Address(bytes_ptr_reg, 1, compiler::Address::PostIndex),
1959
1960 // Update size and flags based on byte value.
1961 __ ldr(temp_reg, compiler::Address(table_reg, temp_reg),
1963 __ orr(flags_reg, flags_reg, compiler::Operand(temp_reg));
1964 __ andi(temp_reg, temp_reg, compiler::Immediate(kSizeMask));
1965 __ add(size_reg, size_reg, compiler::Operand(temp_reg));
1966
1967 // Stop if end is reached.
1968 __ Bind(&loop_in);
1969 __ cmp(bytes_ptr_reg, compiler::Operand(bytes_end_reg));
1970 __ b(&loop, UNSIGNED_LESS);
1971
1972 // Write flags to field.
1973 __ AndImmediate(flags_reg, flags_reg, kFlagsMask);
1974 if (!IsScanFlagsUnboxed()) {
1975 __ SmiTag(flags_reg);
1976 }
1977 Register decoder_reg;
1978 const Location decoder_location = locs()->in(0);
1979 if (decoder_location.IsStackSlot()) {
1980 __ ldr(decoder_temp_reg, LocationToStackSlotAddress(decoder_location));
1981 decoder_reg = decoder_temp_reg;
1982 } else {
1983 decoder_reg = decoder_location.reg();
1984 }
1985 const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes();
1986 if (scan_flags_field_.is_compressed() && !IsScanFlagsUnboxed()) {
1987 __ LoadCompressedSmiFieldFromOffset(flags_temp_reg, decoder_reg,
1988 scan_flags_field_offset);
1989 __ orr(flags_temp_reg, flags_temp_reg, compiler::Operand(flags_reg),
1991 __ StoreFieldToOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset,
1993 } else {
1994 __ LoadFieldFromOffset(flags_temp_reg, decoder_reg,
1995 scan_flags_field_offset);
1996 __ orr(flags_temp_reg, flags_temp_reg, compiler::Operand(flags_reg));
1997 __ StoreFieldToOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset);
1998 }
1999}
2000
2001LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
2002 bool opt) const {
2003 // The compiler must optimize any function that includes a LoadIndexed
2004 // instruction that uses typed data cids, since extracting the payload address
2005 // from views is done in a compiler pass after all code motion has happened.
2007
2008 const intptr_t kNumInputs = 2;
2009 const intptr_t kNumTemps = 0;
2010 LocationSummary* locs = new (zone)
2011 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2013 const bool can_be_constant =
2014 index()->BindsToConstant() &&
2016 index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
2018 can_be_constant
2019 ? Location::Constant(index()->definition()->AsConstant())
2021 auto const rep =
2025 } else if (RepresentationUtils::IsUnboxed(rep)) {
2027 } else {
2029 }
2030 return locs;
2031}
2032
2033void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2034 // The array register points to the backing store for external arrays.
2035 const Register array = locs()->in(kArrayPos).reg();
2036 const Location index = locs()->in(kIndexPos);
2037
2038 compiler::Address element_address(TMP); // Bad address.
2039 element_address = index.IsRegister()
2040 ? __ ElementAddressForRegIndex(
2042 index_unboxed_, array, index.reg(), TMP)
2043 : __ ElementAddressForIntIndex(
2045 Smi::Cast(index.constant()).Value());
2046 auto const rep =
2050 const Register result = locs()->out(0).reg();
2051 __ ldr(result, element_address, RepresentationUtils::OperandSize(rep));
2052 } else if (RepresentationUtils::IsUnboxed(rep)) {
2053 const VRegister result = locs()->out(0).fpu_reg();
2054 if (rep == kUnboxedFloat) {
2055 // Load single precision float.
2056 __ fldrs(result, element_address);
2057 } else if (rep == kUnboxedDouble) {
2058 // Load double precision float.
2059 __ fldrd(result, element_address);
2060 } else {
2061 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2062 rep == kUnboxedFloat64x2);
2063 __ fldrq(result, element_address);
2064 }
2065 } else {
2066 const Register result = locs()->out(0).reg();
2067 ASSERT(representation() == kTagged);
2068 ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
2069 (class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
2070 __ LoadCompressed(result, element_address);
2071 }
2072}
2073
2074LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone,
2075 bool opt) const {
2076 const intptr_t kNumInputs = 2;
2077 const intptr_t kNumTemps = 0;
2078 LocationSummary* summary = new (zone)
2079 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2080 summary->set_in(0, Location::RequiresRegister());
2081 summary->set_in(1, Location::RequiresRegister());
2082 summary->set_out(0, Location::RequiresRegister());
2083 return summary;
2084}
2085
2086void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2087 // The string register points to the backing store for external strings.
2088 const Register str = locs()->in(0).reg();
2089 const Location index = locs()->in(1);
2091
2092 Register result = locs()->out(0).reg();
2093 switch (class_id()) {
2094 case kOneByteStringCid:
2095 switch (element_count()) {
2096 case 1:
2098 break;
2099 case 2:
2101 break;
2102 case 4:
2104 break;
2105 default:
2106 UNREACHABLE();
2107 }
2108 break;
2109 case kTwoByteStringCid:
2110 switch (element_count()) {
2111 case 1:
2113 break;
2114 case 2:
2116 break;
2117 default:
2118 UNREACHABLE();
2119 }
2120 break;
2121 default:
2122 UNREACHABLE();
2123 break;
2124 }
2125 // Warning: element_address may use register TMP as base.
2126 compiler::Address element_address = __ ElementAddressForRegIndexWithSize(
2127 IsExternal(), class_id(), sz, index_scale(), /*index_unboxed=*/false, str,
2128 index.reg(), TMP);
2129 __ ldr(result, element_address, sz);
2130
2132 __ SmiTag(result);
2133}
2134
2135LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
2136 bool opt) const {
2137 // The compiler must optimize any function that includes a StoreIndexed
2138 // instruction that uses typed data cids, since extracting the payload address
2139 // from views is done in a compiler pass after all code motion has happened.
2141
2142 const intptr_t kNumInputs = 3;
2143 const intptr_t kNumTemps = 1;
2144 LocationSummary* locs = new (zone)
2145 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2147 const bool can_be_constant =
2148 index()->BindsToConstant() &&
2150 index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
2151 locs->set_in(1, can_be_constant
2152 ? Location::Constant(index()->definition()->AsConstant())
2155 auto const rep =
2158 ASSERT(rep == kUnboxedUint8);
2160 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
2161 ConstantInstr* constant = value()->definition()->AsConstant();
2162 if (constant != nullptr && constant->HasZeroRepresentation()) {
2163 locs->set_in(2, Location::Constant(constant));
2164 } else {
2166 }
2167 } else if (RepresentationUtils::IsUnboxed(rep)) {
2168 if (rep == kUnboxedFloat || rep == kUnboxedDouble) {
2169 ConstantInstr* constant = value()->definition()->AsConstant();
2170 if (constant != nullptr && constant->HasZeroRepresentation()) {
2171 locs->set_in(2, Location::Constant(constant));
2172 } else {
2174 }
2175 } else {
2177 }
2178 } else if (class_id() == kArrayCid) {
2182 if (ShouldEmitStoreBarrier()) {
2185 }
2186 } else {
2187 UNREACHABLE();
2188 }
2189 return locs;
2190}
2191
2192void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2193 // The array register points to the backing store for external arrays.
2194 const Register array = locs()->in(0).reg();
2195 const Location index = locs()->in(1);
2196 const Register temp = locs()->temp(0).reg();
2197 compiler::Address element_address(TMP); // Bad address.
2198
2199 auto const rep =
2202
2203 // Deal with a special case separately.
2204 if (class_id() == kArrayCid && ShouldEmitStoreBarrier()) {
2205 if (index.IsRegister()) {
2206 __ ComputeElementAddressForRegIndex(temp, IsUntagged(), class_id(),
2207 index_scale(), index_unboxed_, array,
2208 index.reg());
2209 } else {
2210 __ ComputeElementAddressForIntIndex(temp, IsUntagged(), class_id(),
2211 index_scale(), array,
2212 Smi::Cast(index.constant()).Value());
2213 }
2214 const Register value = locs()->in(2).reg();
2215 __ StoreCompressedIntoArray(array, temp, value, CanValueBeSmi());
2216 return;
2217 }
2218
2219 element_address = index.IsRegister()
2220 ? __ ElementAddressForRegIndex(
2222 index_unboxed_, array, index.reg(), temp)
2223 : __ ElementAddressForIntIndex(
2225 Smi::Cast(index.constant()).Value());
2226
2228 ASSERT(rep == kUnboxedUint8);
2229 if (locs()->in(2).IsConstant()) {
2230 const Smi& constant = Smi::Cast(locs()->in(2).constant());
2231 intptr_t value = constant.Value();
2232 // Clamp to 0x0 or 0xFF respectively.
2233 if (value > 0xFF) {
2234 value = 0xFF;
2235 } else if (value < 0) {
2236 value = 0;
2237 }
2238 if (value == 0) {
2239 __ str(ZR, element_address, compiler::kUnsignedByte);
2240 } else {
2241 __ LoadImmediate(TMP, static_cast<int8_t>(value));
2242 __ str(TMP, element_address, compiler::kUnsignedByte);
2243 }
2244 } else {
2245 const Register value = locs()->in(2).reg();
2246 // Clamp to 0x00 or 0xFF respectively.
2247 __ CompareImmediate(value, 0xFF);
2248 __ csetm(TMP, GT); // TMP = value > 0xFF ? -1 : 0.
2249 __ csel(TMP, value, TMP, LS); // TMP = value in range ? value : TMP.
2250 __ str(TMP, element_address, compiler::kUnsignedByte);
2251 }
2252 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
2253 if (locs()->in(2).IsConstant()) {
2254 ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
2255 __ str(ZR, element_address, RepresentationUtils::OperandSize(rep));
2256 } else {
2257 __ str(locs()->in(2).reg(), element_address,
2259 }
2260 } else if (RepresentationUtils::IsUnboxed(rep)) {
2261 if (rep == kUnboxedFloat) {
2262 if (locs()->in(2).IsConstant()) {
2263 ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
2264 __ str(ZR, element_address, compiler::kFourBytes);
2265 } else {
2266 __ fstrs(locs()->in(2).fpu_reg(), element_address);
2267 }
2268 } else if (rep == kUnboxedDouble) {
2269 if (locs()->in(2).IsConstant()) {
2270 ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
2271 __ str(ZR, element_address, compiler::kEightBytes);
2272 } else {
2273 __ fstrd(locs()->in(2).fpu_reg(), element_address);
2274 }
2275 } else {
2276 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2277 rep == kUnboxedFloat64x2);
2278 const VRegister value_reg = locs()->in(2).fpu_reg();
2279 __ fstrq(value_reg, element_address);
2280 }
2281 } else if (class_id() == kArrayCid) {
2282 ASSERT(!ShouldEmitStoreBarrier()); // Specially treated above.
2283 if (locs()->in(2).IsConstant()) {
2284 const Object& constant = locs()->in(2).constant();
2285 __ StoreCompressedObjectIntoObjectNoBarrier(array, element_address,
2286 constant);
2287 } else {
2288 const Register value = locs()->in(2).reg();
2289 __ StoreCompressedIntoObjectNoBarrier(array, element_address, value);
2290 }
2291 } else {
2292 UNREACHABLE();
2293 }
2294
2296 if (index.IsRegister()) {
2297 __ ComputeElementAddressForRegIndex(TMP, IsUntagged(), class_id(),
2298 index_scale(), index_unboxed_, array,
2299 index.reg());
2300 } else {
2301 __ ComputeElementAddressForIntIndex(TMP, IsUntagged(), class_id(),
2302 index_scale(), array,
2303 Smi::Cast(index.constant()).Value());
2304 }
2305 const intptr_t length_in_bytes = RepresentationUtils::ValueSize(
2307 __ MsanUnpoison(TMP, length_in_bytes);
2308 }
2309}
2310
2311static void LoadValueCid(FlowGraphCompiler* compiler,
2312 Register value_cid_reg,
2313 Register value_reg,
2314 compiler::Label* value_is_smi = nullptr) {
2315 compiler::Label done;
2316 if (value_is_smi == nullptr) {
2317 __ LoadImmediate(value_cid_reg, kSmiCid);
2318 }
2319 __ BranchIfSmi(value_reg, value_is_smi == nullptr ? &done : value_is_smi);
2320 __ LoadClassId(value_cid_reg, value_reg);
2321 __ Bind(&done);
2322}
2323
2324DEFINE_UNIMPLEMENTED_INSTRUCTION(GuardFieldTypeInstr)
2325
2326LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
2327 bool opt) const {
2328 const intptr_t kNumInputs = 1;
2329
2330 const intptr_t value_cid = value()->Type()->ToCid();
2331 const intptr_t field_cid = field().guarded_cid();
2332
2333 const bool emit_full_guard = !opt || (field_cid == kIllegalCid);
2334
2335 const bool needs_value_cid_temp_reg =
2336 emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid));
2337
2338 const bool needs_field_temp_reg = emit_full_guard;
2339
2340 intptr_t num_temps = 0;
2341 if (needs_value_cid_temp_reg) {
2342 num_temps++;
2343 }
2344 if (needs_field_temp_reg) {
2345 num_temps++;
2346 }
2347
2348 LocationSummary* summary = new (zone)
2349 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
2350 summary->set_in(0, Location::RequiresRegister());
2351
2352 for (intptr_t i = 0; i < num_temps; i++) {
2353 summary->set_temp(i, Location::RequiresRegister());
2354 }
2355
2356 return summary;
2357}
2358
2359void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2361 ASSERT(sizeof(UntaggedField::guarded_cid_) == 4);
2362 ASSERT(sizeof(UntaggedField::is_nullable_) == 4);
2363
2364 const intptr_t value_cid = value()->Type()->ToCid();
2365 const intptr_t field_cid = field().guarded_cid();
2366 const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid;
2367
2368 if (field_cid == kDynamicCid) {
2369 return; // Nothing to emit.
2370 }
2371
2372 const bool emit_full_guard =
2373 !compiler->is_optimizing() || (field_cid == kIllegalCid);
2374
2375 const bool needs_value_cid_temp_reg =
2376 emit_full_guard || ((value_cid == kDynamicCid) && (field_cid != kSmiCid));
2377
2378 const bool needs_field_temp_reg = emit_full_guard;
2379
2380 const Register value_reg = locs()->in(0).reg();
2381
2382 const Register value_cid_reg =
2383 needs_value_cid_temp_reg ? locs()->temp(0).reg() : kNoRegister;
2384
2385 const Register field_reg = needs_field_temp_reg
2386 ? locs()->temp(locs()->temp_count() - 1).reg()
2387 : kNoRegister;
2388
2389 compiler::Label ok, fail_label;
2390
2391 compiler::Label* deopt =
2392 compiler->is_optimizing()
2393 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2394 : nullptr;
2395
2396 compiler::Label* fail = (deopt != nullptr) ? deopt : &fail_label;
2397
2398 if (emit_full_guard) {
2399 __ LoadObject(field_reg, Field::ZoneHandle((field().Original())));
2400
2401 compiler::FieldAddress field_cid_operand(field_reg,
2403 compiler::FieldAddress field_nullability_operand(
2404 field_reg, Field::is_nullable_offset());
2405
2406 if (value_cid == kDynamicCid) {
2407 LoadValueCid(compiler, value_cid_reg, value_reg);
2408 compiler::Label skip_length_check;
2409 __ ldr(TMP, field_cid_operand, compiler::kUnsignedFourBytes);
2410 __ CompareRegisters(value_cid_reg, TMP);
2411 __ b(&ok, EQ);
2412 __ ldr(TMP, field_nullability_operand, compiler::kUnsignedFourBytes);
2413 __ CompareRegisters(value_cid_reg, TMP);
2414 } else if (value_cid == kNullCid) {
2415 __ ldr(value_cid_reg, field_nullability_operand,
2417 __ CompareImmediate(value_cid_reg, value_cid);
2418 } else {
2419 compiler::Label skip_length_check;
2420 __ ldr(value_cid_reg, field_cid_operand, compiler::kUnsignedFourBytes);
2421 __ CompareImmediate(value_cid_reg, value_cid);
2422 }
2423 __ b(&ok, EQ);
2424
2425 // Check if the tracked state of the guarded field can be initialized
2426 // inline. If the field needs length check we fall through to runtime
2427 // which is responsible for computing offset of the length field
2428 // based on the class id.
2429 // Length guard will be emitted separately when needed via GuardFieldLength
2430 // instruction after GuardFieldClass.
2431 if (!field().needs_length_check()) {
2432 // Uninitialized field can be handled inline. Check if the
2433 // field is still unitialized.
2434 __ ldr(TMP, field_cid_operand, compiler::kUnsignedFourBytes);
2435 __ CompareImmediate(TMP, kIllegalCid);
2436 __ b(fail, NE);
2437
2438 if (value_cid == kDynamicCid) {
2439 __ str(value_cid_reg, field_cid_operand, compiler::kUnsignedFourBytes);
2440 __ str(value_cid_reg, field_nullability_operand,
2442 } else {
2443 __ LoadImmediate(TMP, value_cid);
2444 __ str(TMP, field_cid_operand, compiler::kUnsignedFourBytes);
2445 __ str(TMP, field_nullability_operand, compiler::kUnsignedFourBytes);
2446 }
2447
2448 __ b(&ok);
2449 }
2450
2451 if (deopt == nullptr) {
2452 __ Bind(fail);
2453
2454 __ LoadFieldFromOffset(TMP, field_reg, Field::guarded_cid_offset(),
2456 __ CompareImmediate(TMP, kDynamicCid);
2457 __ b(&ok, EQ);
2458
2459 __ PushPair(value_reg, field_reg);
2460 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2461 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2462 __ Drop(2); // Drop the field and the value.
2463 } else {
2464 __ b(fail);
2465 }
2466 } else {
2467 ASSERT(compiler->is_optimizing());
2468 ASSERT(deopt != nullptr);
2469
2470 // Field guard class has been initialized and is known.
2471 if (value_cid == kDynamicCid) {
2472 // Value's class id is not known.
2473 __ tsti(value_reg, compiler::Immediate(kSmiTagMask));
2474
2475 if (field_cid != kSmiCid) {
2476 __ b(fail, EQ);
2477 __ LoadClassId(value_cid_reg, value_reg);
2478 __ CompareImmediate(value_cid_reg, field_cid);
2479 }
2480
2481 if (field().is_nullable() && (field_cid != kNullCid)) {
2482 __ b(&ok, EQ);
2483 __ CompareObject(value_reg, Object::null_object());
2484 }
2485
2486 __ b(fail, NE);
2487 } else if (value_cid == field_cid) {
2488 // This would normally be caught by Canonicalize, but RemoveRedefinitions
2489 // may sometimes produce the situation after the last Canonicalize pass.
2490 } else {
2491 // Both value's and field's class id is known.
2492 ASSERT(value_cid != nullability);
2493 __ b(fail);
2494 }
2495 }
2496 __ Bind(&ok);
2497}
2498
2499LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone,
2500 bool opt) const {
2501 const intptr_t kNumInputs = 1;
2502 if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2503 const intptr_t kNumTemps = 3;
2504 LocationSummary* summary = new (zone)
2505 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2506 summary->set_in(0, Location::RequiresRegister());
2507 // We need temporaries for field object, length offset and expected length.
2508 summary->set_temp(0, Location::RequiresRegister());
2509 summary->set_temp(1, Location::RequiresRegister());
2510 summary->set_temp(2, Location::RequiresRegister());
2511 return summary;
2512 } else {
2513 LocationSummary* summary = new (zone)
2514 LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
2515 summary->set_in(0, Location::RequiresRegister());
2516 return summary;
2517 }
2518 UNREACHABLE();
2519}
2520
2521void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2522 if (field().guarded_list_length() == Field::kNoFixedLength) {
2523 return; // Nothing to emit.
2524 }
2525
2526 compiler::Label* deopt =
2527 compiler->is_optimizing()
2528 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2529 : nullptr;
2530
2531 const Register value_reg = locs()->in(0).reg();
2532
2533 if (!compiler->is_optimizing() ||
2534 (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2535 const Register field_reg = locs()->temp(0).reg();
2536 const Register offset_reg = locs()->temp(1).reg();
2537 const Register length_reg = locs()->temp(2).reg();
2538
2539 compiler::Label ok;
2540
2541 __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
2542
2543 __ ldr(offset_reg,
2544 compiler::FieldAddress(
2547 __ LoadCompressedSmi(
2548 length_reg,
2549 compiler::FieldAddress(field_reg, Field::guarded_list_length_offset()));
2550
2551 __ tst(offset_reg, compiler::Operand(offset_reg));
2552 __ b(&ok, MI);
2553
2554 // Load the length from the value. GuardFieldClass already verified that
2555 // value's class matches guarded class id of the field.
2556 // offset_reg contains offset already corrected by -kHeapObjectTag that is
2557 // why we use Address instead of FieldAddress.
2558 __ LoadCompressedSmi(TMP, compiler::Address(value_reg, offset_reg));
2559 __ CompareObjectRegisters(length_reg, TMP);
2560
2561 if (deopt == nullptr) {
2562 __ b(&ok, EQ);
2563
2564 __ PushPair(value_reg, field_reg);
2565 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2566 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2567 __ Drop(2); // Drop the field and the value.
2568 } else {
2569 __ b(deopt, NE);
2570 }
2571
2572 __ Bind(&ok);
2573 } else {
2574 ASSERT(compiler->is_optimizing());
2575 ASSERT(field().guarded_list_length() >= 0);
2576 ASSERT(field().guarded_list_length_in_object_offset() !=
2578
2579 __ ldr(TMP, compiler::FieldAddress(
2580 value_reg, field().guarded_list_length_in_object_offset()));
2581 __ CompareImmediate(TMP, Smi::RawValue(field().guarded_list_length()));
2582 __ b(deopt, NE);
2583 }
2584}
2585
2586LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
2587 bool opt) const {
2588 const intptr_t kNumInputs = 1;
2589 const intptr_t kNumTemps = 1;
2590 LocationSummary* locs = new (zone)
2591 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2594 return locs;
2595}
2596
2597void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2598 const Register value = locs()->in(0).reg();
2599 const Register temp = locs()->temp(0).reg();
2600
2601 compiler->used_static_fields().Add(&field());
2602
2603 __ LoadFromOffset(
2604 temp, THR,
2605 field().is_shared()
2608 // Note: static fields ids won't be changed by hot-reload.
2609 __ StoreToOffset(value, temp,
2611}
2612
2613LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone,
2614 bool opt) const {
2615 const intptr_t kNumInputs = 3;
2616 const intptr_t kNumTemps = 0;
2617 LocationSummary* summary = new (zone)
2618 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2620 summary->set_in(1, Location::RegisterLocation(
2622 summary->set_in(
2624 summary->set_out(0, Location::RegisterLocation(R0));
2625 return summary;
2626}
2627
2628void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2629 ASSERT(locs()->in(0).reg() == TypeTestABI::kInstanceReg);
2632
2633 compiler->GenerateInstanceOf(source(), deopt_id(), env(), type(), locs());
2634 ASSERT(locs()->out(0).reg() == R0);
2635}
2636
2637LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone,
2638 bool opt) const {
2639 const intptr_t kNumInputs = 2;
2640 const intptr_t kNumTemps = 0;
2641 LocationSummary* locs = new (zone)
2642 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2648 return locs;
2649}
2650
2651// Inlines array allocation for known constant values.
2652static void InlineArrayAllocation(FlowGraphCompiler* compiler,
2653 intptr_t num_elements,
2654 compiler::Label* slow_path,
2655 compiler::Label* done) {
2656 const int kInlineArraySize = 12; // Same as kInlineInstanceSize.
2657 const intptr_t instance_size = Array::InstanceSize(num_elements);
2658
2659 __ TryAllocateArray(kArrayCid, instance_size, slow_path,
2660 AllocateArrayABI::kResultReg, // instance
2661 R3, // end address
2662 R6, R8);
2663 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
2664 // R3: new object end address.
2665
2666 // Store the type argument field.
2667 __ StoreCompressedIntoObjectNoBarrier(
2669 compiler::FieldAddress(AllocateArrayABI::kResultReg,
2672
2673 // Set the length field.
2674 __ StoreCompressedIntoObjectNoBarrier(
2676 compiler::FieldAddress(AllocateArrayABI::kResultReg,
2679
2680 // TODO(zra): Use stp once added.
2681 // Initialize all array elements to raw_null.
2682 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
2683 // R3: new object end address.
2684 // R8: iterator which initially points to the start of the variable
2685 // data area to be initialized.
2686 if (num_elements > 0) {
2687 const intptr_t array_size = instance_size - sizeof(UntaggedArray);
2688 __ AddImmediate(R8, AllocateArrayABI::kResultReg,
2689 sizeof(UntaggedArray) - kHeapObjectTag);
2690 if (array_size < (kInlineArraySize * kCompressedWordSize)) {
2691 intptr_t current_offset = 0;
2692 while (current_offset < array_size) {
2693 __ StoreCompressedIntoObjectNoBarrier(
2694 AllocateArrayABI::kResultReg, compiler::Address(R8, current_offset),
2695 NULL_REG);
2696 current_offset += kCompressedWordSize;
2697 }
2698 } else {
2699 compiler::Label end_loop, init_loop;
2700 __ Bind(&init_loop);
2701 __ CompareRegisters(R8, R3);
2702 __ b(&end_loop, CS);
2703 __ StoreCompressedIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
2704 compiler::Address(R8, 0), NULL_REG);
2705 __ AddImmediate(R8, kCompressedWordSize);
2706 __ b(&init_loop);
2707 __ Bind(&end_loop);
2708 }
2709 }
2710 __ b(done);
2711}
2712
2713void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2714 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
2715 if (type_usage_info != nullptr) {
2716 const Class& list_class =
2717 Class::Handle(compiler->isolate_group()->class_table()->At(kArrayCid));
2718 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, list_class,
2719 type_arguments()->definition());
2720 }
2721
2722 compiler::Label slow_path, done;
2723 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2724 if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
2725 num_elements()->BindsToConstant() &&
2726 num_elements()->BoundConstant().IsSmi()) {
2727 const intptr_t length =
2728 Smi::Cast(num_elements()->BoundConstant()).Value();
2730 InlineArrayAllocation(compiler, length, &slow_path, &done);
2731 }
2732 }
2733 }
2734
2735 __ Bind(&slow_path);
2736 auto object_store = compiler->isolate_group()->object_store();
2737 const auto& allocate_array_stub =
2738 Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
2739 compiler->GenerateStubCall(source(), allocate_array_stub,
2740 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
2741 env());
2742 __ Bind(&done);
2743}
2744
2746 Zone* zone,
2747 bool opt) const {
2748 ASSERT(opt);
2749 const intptr_t kNumInputs = 0;
2750 const intptr_t kNumTemps = 3;
2751 LocationSummary* locs = new (zone) LocationSummary(
2752 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
2757 return locs;
2758}
2759
2760class AllocateContextSlowPath
2761 : public TemplateSlowPathCode<AllocateUninitializedContextInstr> {
2762 public:
2763 explicit AllocateContextSlowPath(
2764 AllocateUninitializedContextInstr* instruction)
2765 : TemplateSlowPathCode(instruction) {}
2766
2767 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
2768 __ Comment("AllocateContextSlowPath");
2769 __ Bind(entry_label());
2770
2771 LocationSummary* locs = instruction()->locs();
2772 locs->live_registers()->Remove(locs->out(0));
2773
2774 compiler->SaveLiveRegisters(locs);
2775
2776 auto slow_path_env = compiler->SlowPathEnvironmentFor(
2777 instruction(), /*num_slow_path_args=*/0);
2778 ASSERT(slow_path_env != nullptr);
2779
2780 auto object_store = compiler->isolate_group()->object_store();
2781 const auto& allocate_context_stub = Code::ZoneHandle(
2782 compiler->zone(), object_store->allocate_context_stub());
2783
2784 __ LoadImmediate(R1, instruction()->num_context_variables());
2785 compiler->GenerateStubCall(instruction()->source(), allocate_context_stub,
2786 UntaggedPcDescriptors::kOther, locs,
2787 instruction()->deopt_id(), slow_path_env);
2788 ASSERT(instruction()->locs()->out(0).reg() == R0);
2789 compiler->RestoreLiveRegisters(instruction()->locs());
2790 __ b(exit_label());
2791 }
2792};
2793
2795 FlowGraphCompiler* compiler) {
2796 Register temp0 = locs()->temp(0).reg();
2797 Register temp1 = locs()->temp(1).reg();
2798 Register temp2 = locs()->temp(2).reg();
2799 Register result = locs()->out(0).reg();
2800 // Try allocate the object.
2801 AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this);
2802 compiler->AddSlowPathCode(slow_path);
2803 intptr_t instance_size = Context::InstanceSize(num_context_variables());
2804
2805 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2806 __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
2807 result, // instance
2808 temp0, temp1, temp2);
2809
2810 // Setup up number of context variables field.
2811 __ LoadImmediate(temp0, num_context_variables());
2812 __ str(temp0,
2813 compiler::FieldAddress(result, Context::num_variables_offset()));
2814 } else {
2815 __ Jump(slow_path->entry_label());
2816 }
2817
2818 __ Bind(slow_path->exit_label());
2819}
2820
2821LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone,
2822 bool opt) const {
2823 const intptr_t kNumInputs = 0;
2824 const intptr_t kNumTemps = 1;
2825 LocationSummary* locs = new (zone)
2826 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2829 return locs;
2830}
2831
2832void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2833 ASSERT(locs()->temp(0).reg() == R1);
2834 ASSERT(locs()->out(0).reg() == R0);
2835
2836 auto object_store = compiler->isolate_group()->object_store();
2837 const auto& allocate_context_stub =
2838 Code::ZoneHandle(compiler->zone(), object_store->allocate_context_stub());
2839 __ LoadImmediate(R1, num_context_variables());
2840 compiler->GenerateStubCall(source(), allocate_context_stub,
2841 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
2842 env());
2843}
2844
2845LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
2846 bool opt) const {
2847 const intptr_t kNumInputs = 1;
2848 const intptr_t kNumTemps = 0;
2849 LocationSummary* locs = new (zone)
2850 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2853 return locs;
2854}
2855
2856void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2857 ASSERT(locs()->in(0).reg() == R5);
2858 ASSERT(locs()->out(0).reg() == R0);
2859
2860 auto object_store = compiler->isolate_group()->object_store();
2861 const auto& clone_context_stub =
2862 Code::ZoneHandle(compiler->zone(), object_store->clone_context_stub());
2863 compiler->GenerateStubCall(source(), clone_context_stub,
2864 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
2865 deopt_id(), env());
2866}
2867
2868LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
2869 bool opt) const {
2870 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall);
2871}
2872
2873void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2874 __ Bind(compiler->GetJumpLabel(this));
2875 compiler->AddExceptionHandler(this);
2876 if (HasParallelMove()) {
2878 }
2879
2880 // Restore SP from FP as we are coming from a throw and the code for
2881 // popping arguments has not been run.
2882 const intptr_t fp_sp_dist =
2884 compiler->StackSize()) *
2885 kWordSize;
2886 ASSERT(fp_sp_dist <= 0);
2887 __ AddImmediate(SP, FP, fp_sp_dist);
2888
2889 if (!compiler->is_optimizing()) {
2890 if (raw_exception_var_ != nullptr) {
2891 __ StoreToOffset(
2894 }
2895 if (raw_stacktrace_var_ != nullptr) {
2896 __ StoreToOffset(
2899 }
2900 }
2901}
2902
2903LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone,
2904 bool opt) const {
2905 const intptr_t kNumInputs = 0;
2906 const intptr_t kNumTemps = 1;
2907 const bool using_shared_stub = UseSharedSlowPathStub(opt);
2908 LocationSummary* summary = new (zone)
2909 LocationSummary(zone, kNumInputs, kNumTemps,
2910 using_shared_stub ? LocationSummary::kCallOnSharedSlowPath
2912 summary->set_temp(0, Location::RequiresRegister());
2913 return summary;
2914}
2915
2916class CheckStackOverflowSlowPath
2917 : public TemplateSlowPathCode<CheckStackOverflowInstr> {
2918 public:
2919 static constexpr intptr_t kNumSlowPathArgs = 0;
2920
2921 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
2922 : TemplateSlowPathCode(instruction) {}
2923
2924 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
2925 auto locs = instruction()->locs();
2926 if (compiler->isolate_group()->use_osr() && osr_entry_label()->IsLinked()) {
2927 const Register value = locs->temp(0).reg();
2928 __ Comment("CheckStackOverflowSlowPathOsr");
2929 __ Bind(osr_entry_label());
2930 __ LoadImmediate(value, Thread::kOsrRequest);
2931 __ str(value,
2932 compiler::Address(THR, Thread::stack_overflow_flags_offset()));
2933 }
2934 __ Comment("CheckStackOverflowSlowPath");
2935 __ Bind(entry_label());
2936 const bool using_shared_stub = locs->call_on_shared_slow_path();
2937 if (!using_shared_stub) {
2938 compiler->SaveLiveRegisters(locs);
2939 }
2940 // pending_deoptimization_env_ is needed to generate a runtime call that
2941 // may throw an exception.
2942 ASSERT(compiler->pending_deoptimization_env_ == nullptr);
2943 Environment* env =
2944 compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
2945 compiler->pending_deoptimization_env_ = env;
2946
2947 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
2948 if (using_shared_stub) {
2949 if (!has_frame) {
2950 ASSERT(__ constant_pool_allowed());
2951 __ set_constant_pool_allowed(false);
2952 __ EnterDartFrame(0);
2953 }
2954 auto object_store = compiler->isolate_group()->object_store();
2955 const bool live_fpu_regs = locs->live_registers()->FpuRegisterCount() > 0;
2956 const auto& stub = Code::ZoneHandle(
2957 compiler->zone(),
2958 live_fpu_regs
2959 ? object_store->stack_overflow_stub_with_fpu_regs_stub()
2960 : object_store->stack_overflow_stub_without_fpu_regs_stub());
2961
2962 if (compiler->CanPcRelativeCall(stub)) {
2963 __ GenerateUnRelocatedPcRelativeCall();
2964 compiler->AddPcRelativeCallStubTarget(stub);
2965 } else {
2966 const uword entry_point_offset =
2968 locs->live_registers()->FpuRegisterCount() > 0);
2969 __ Call(compiler::Address(THR, entry_point_offset));
2970 }
2971 compiler->RecordSafepoint(locs, kNumSlowPathArgs);
2972 compiler->RecordCatchEntryMoves(env);
2973 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOther,
2974 instruction()->deopt_id(),
2975 instruction()->source());
2976 if (!has_frame) {
2977 __ LeaveDartFrame();
2978 __ set_constant_pool_allowed(true);
2979 }
2980 } else {
2981 ASSERT(has_frame);
2982 __ CallRuntime(kInterruptOrStackOverflowRuntimeEntry, kNumSlowPathArgs);
2983 compiler->EmitCallsiteMetadata(
2984 instruction()->source(), instruction()->deopt_id(),
2985 UntaggedPcDescriptors::kOther, instruction()->locs(), env);
2986 }
2987
2988 if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
2989 instruction()->in_loop()) {
2990 // In unoptimized code, record loop stack checks as possible OSR entries.
2991 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
2992 instruction()->deopt_id(),
2993 InstructionSource());
2994 }
2995 compiler->pending_deoptimization_env_ = nullptr;
2996 if (!using_shared_stub) {
2997 compiler->RestoreLiveRegisters(locs);
2998 }
2999 __ b(exit_label());
3000 }
3001
3002 compiler::Label* osr_entry_label() {
3003 ASSERT(IsolateGroup::Current()->use_osr());
3004 return &osr_entry_label_;
3005 }
3006
3007 private:
3008 compiler::Label osr_entry_label_;
3009};
3010
3011void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3012 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this);
3013 compiler->AddSlowPathCode(slow_path);
3014
3015 __ ldr(TMP, compiler::Address(
3017 __ CompareRegisters(SP, TMP);
3018 __ b(slow_path->entry_label(), LS);
3019 if (compiler->CanOSRFunction() && in_loop()) {
3020 const Register function = locs()->temp(0).reg();
3021 // In unoptimized code check the usage counter to trigger OSR at loop
3022 // stack checks. Use progressively higher thresholds for more deeply
3023 // nested loops to attempt to hit outer loops with OSR when possible.
3024 __ LoadObject(function, compiler->parsed_function().function());
3025 const intptr_t configured_optimization_counter_threshold =
3026 compiler->thread()->isolate_group()->optimization_counter_threshold();
3027 const int32_t threshold =
3028 configured_optimization_counter_threshold * (loop_depth() + 1);
3029 __ LoadFieldFromOffset(TMP, function, Function::usage_counter_offset(),
3031 __ add(TMP, TMP, compiler::Operand(1));
3032 __ StoreFieldToOffset(TMP, function, Function::usage_counter_offset(),
3034 __ CompareImmediate(TMP, threshold);
3035 __ b(slow_path->osr_entry_label(), GE);
3036 }
3037 if (compiler->ForceSlowPathForStackOverflow()) {
3038 __ b(slow_path->entry_label());
3039 }
3040 __ Bind(slow_path->exit_label());
3041}
3042
3043static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
3044 BinarySmiOpInstr* shift_left) {
3045 const LocationSummary& locs = *shift_left->locs();
3046 const Register left = locs.in(0).reg();
3047 const Register result = locs.out(0).reg();
3048 compiler::Label* deopt =
3049 shift_left->CanDeoptimize()
3050 ? compiler->AddDeoptStub(shift_left->deopt_id(),
3051 ICData::kDeoptBinarySmiOp)
3052 : nullptr;
3053 if (locs.in(1).IsConstant()) {
3054 const Object& constant = locs.in(1).constant();
3055 ASSERT(constant.IsSmi());
3056 // Immediate shift operation takes 6 bits for the count.
3057#if !defined(DART_COMPRESSED_POINTERS)
3058 const intptr_t kCountLimit = 0x3F;
3059#else
3060 const intptr_t kCountLimit = 0x1F;
3061#endif
3062 const intptr_t value = Smi::Cast(constant).Value();
3063 ASSERT((0 < value) && (value < kCountLimit));
3064 if (shift_left->can_overflow()) {
3065 // Check for overflow (preserve left).
3066 __ LslImmediate(TMP, left, value, compiler::kObjectBytes);
3067 __ cmp(left, compiler::Operand(TMP, ASR, value), compiler::kObjectBytes);
3068 __ b(deopt, NE); // Overflow.
3069 }
3070 // Shift for result now we know there is no overflow.
3071 __ LslImmediate(result, left, value, compiler::kObjectBytes);
3072 return;
3073 }
3074
3075 // Right (locs.in(1)) is not constant.
3076 const Register right = locs.in(1).reg();
3077 Range* right_range = shift_left->right_range();
3078 if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
3079 // TODO(srdjan): Implement code below for is_truncating().
3080 // If left is constant, we know the maximal allowed size for right.
3081 const Object& obj = shift_left->left()->BoundConstant();
3082 if (obj.IsSmi()) {
3083 const intptr_t left_int = Smi::Cast(obj).Value();
3084 if (left_int == 0) {
3085 __ CompareObjectRegisters(right, ZR);
3086 __ b(deopt, MI);
3087 __ mov(result, ZR);
3088 return;
3089 }
3090 const intptr_t max_right =
3092 const bool right_needs_check =
3093 !RangeUtils::IsWithin(right_range, 0, max_right - 1);
3094 if (right_needs_check) {
3095 __ CompareObject(right, Smi::ZoneHandle(Smi::New(max_right)));
3096 __ b(deopt, CS);
3097 }
3098 __ SmiUntag(TMP, right);
3099 __ lslv(result, left, TMP, compiler::kObjectBytes);
3100 }
3101 return;
3102 }
3103
3104 const bool right_needs_check =
3105 !RangeUtils::IsWithin(right_range, 0, (Smi::kBits - 1));
3106 if (!shift_left->can_overflow()) {
3107 if (right_needs_check) {
3108 if (!RangeUtils::IsPositive(right_range)) {
3109 ASSERT(shift_left->CanDeoptimize());
3110 __ CompareObjectRegisters(right, ZR);
3111 __ b(deopt, MI);
3112 }
3113
3114 __ CompareObject(right, Smi::ZoneHandle(Smi::New(Smi::kBits)));
3115 __ csel(result, ZR, result, CS);
3116 __ SmiUntag(TMP, right);
3117 __ lslv(TMP, left, TMP, compiler::kObjectBytes);
3118 __ csel(result, TMP, result, CC);
3119 } else {
3120 __ SmiUntag(TMP, right);
3121 __ lslv(result, left, TMP, compiler::kObjectBytes);
3122 }
3123 } else {
3124 if (right_needs_check) {
3125 ASSERT(shift_left->CanDeoptimize());
3126 __ CompareObject(right, Smi::ZoneHandle(Smi::New(Smi::kBits)));
3127 __ b(deopt, CS);
3128 }
3129 // Left is not a constant.
3130 // Check if count too large for handling it inlined.
3131 __ SmiUntag(TMP, right);
3132 // Overflow test (preserve left, right, and TMP);
3133 const Register temp = locs.temp(0).reg();
3134 __ lslv(temp, left, TMP, compiler::kObjectBytes);
3135 __ asrv(TMP2, temp, TMP, compiler::kObjectBytes);
3136 __ cmp(left, compiler::Operand(TMP2), compiler::kObjectBytes);
3137 __ b(deopt, NE); // Overflow.
3138 // Shift for result now we know there is no overflow.
3139 __ lslv(result, left, TMP, compiler::kObjectBytes);
3140 }
3141}
3142
3143LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Zone* zone,
3144 bool opt) const {
3145 const intptr_t kNumInputs = 2;
3146 const intptr_t kNumTemps =
3147 (((op_kind() == Token::kSHL) && can_overflow()) ||
3148 (op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR))
3149 ? 1
3150 : 0;
3151 LocationSummary* summary = new (zone)
3152 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3153 if (op_kind() == Token::kTRUNCDIV) {
3154 summary->set_in(0, Location::RequiresRegister());
3156 ConstantInstr* right_constant = right()->definition()->AsConstant();
3157 summary->set_in(1, Location::Constant(right_constant));
3158 } else {
3159 summary->set_in(1, Location::RequiresRegister());
3160 }
3161 summary->set_out(0, Location::RequiresRegister());
3162 return summary;
3163 }
3164 if (op_kind() == Token::kMOD) {
3165 summary->set_in(0, Location::RequiresRegister());
3166 summary->set_in(1, Location::RequiresRegister());
3167 summary->set_out(0, Location::RequiresRegister());
3168 return summary;
3169 }
3170 summary->set_in(0, Location::RequiresRegister());
3171 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
3172 if (((op_kind() == Token::kSHL) && can_overflow()) ||
3173 (op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR)) {
3174 summary->set_temp(0, Location::RequiresRegister());
3175 }
3176 // We make use of 3-operand instructions by not requiring result register
3177 // to be identical to first input register as on Intel.
3178 summary->set_out(0, Location::RequiresRegister());
3179 return summary;
3180}
3181
3182void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3183 if (op_kind() == Token::kSHL) {
3184 EmitSmiShiftLeft(compiler, this);
3185 return;
3186 }
3187
3188 const Register left = locs()->in(0).reg();
3189 const Register result = locs()->out(0).reg();
3190 compiler::Label* deopt = nullptr;
3191 if (CanDeoptimize()) {
3192 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
3193 }
3194
3195 if (locs()->in(1).IsConstant()) {
3196 const Object& constant = locs()->in(1).constant();
3197 ASSERT(constant.IsSmi());
3198 const int64_t imm = Smi::RawValue(Smi::Cast(constant).Value());
3199 switch (op_kind()) {
3200 case Token::kADD: {
3201 if (deopt == nullptr) {
3202 __ AddImmediate(result, left, imm, compiler::kObjectBytes);
3203 } else {
3204 __ AddImmediateSetFlags(result, left, imm, compiler::kObjectBytes);
3205 __ b(deopt, VS);
3206 }
3207 break;
3208 }
3209 case Token::kSUB: {
3210 if (deopt == nullptr) {
3211 __ AddImmediate(result, left, -imm);
3212 } else {
3213 // Negating imm and using AddImmediateSetFlags would not detect the
3214 // overflow when imm == kMinInt64.
3215 __ SubImmediateSetFlags(result, left, imm, compiler::kObjectBytes);
3216 __ b(deopt, VS);
3217 }
3218 break;
3219 }
3220 case Token::kMUL: {
3221 // Keep left value tagged and untag right value.
3222 const intptr_t value = Smi::Cast(constant).Value();
3223 __ LoadImmediate(TMP, value);
3224#if !defined(DART_COMPRESSED_POINTERS)
3225 __ mul(result, left, TMP);
3226#else
3227 __ smull(result, left, TMP);
3228#endif
3229 if (deopt != nullptr) {
3230#if !defined(DART_COMPRESSED_POINTERS)
3231 __ smulh(TMP, left, TMP);
3232 // TMP: result bits 64..127.
3233#else
3234 __ AsrImmediate(TMP, result, 31);
3235 // TMP: result bits 32..63.
3236#endif
3237 __ cmp(TMP, compiler::Operand(result, ASR, 63));
3238 __ b(deopt, NE);
3239 }
3240 break;
3241 }
3242 case Token::kTRUNCDIV: {
3243 const intptr_t value = Smi::Cast(constant).Value();
3246 const intptr_t shift_count =
3248 ASSERT(kSmiTagSize == 1);
3249#if !defined(DART_COMPRESSED_POINTERS)
3250 __ AsrImmediate(TMP, left, 63);
3251#else
3252 __ AsrImmediate(TMP, left, 31, compiler::kFourBytes);
3253#endif
3254 ASSERT(shift_count > 1); // 1, -1 case handled above.
3255 const Register temp = TMP2;
3256#if !defined(DART_COMPRESSED_POINTERS)
3257 __ add(temp, left, compiler::Operand(TMP, LSR, 64 - shift_count));
3258#else
3259 __ addw(temp, left, compiler::Operand(TMP, LSR, 32 - shift_count));
3260#endif
3261 ASSERT(shift_count > 0);
3262 __ AsrImmediate(result, temp, shift_count, compiler::kObjectBytes);
3263 if (value < 0) {
3264 __ sub(result, ZR, compiler::Operand(result), compiler::kObjectBytes);
3265 }
3266 __ SmiTag(result);
3267 break;
3268 }
3269 case Token::kBIT_AND:
3270 // No overflow check.
3271 __ AndImmediate(result, left, imm);
3272 break;
3273 case Token::kBIT_OR:
3274 // No overflow check.
3275 __ OrImmediate(result, left, imm);
3276 break;
3277 case Token::kBIT_XOR:
3278 // No overflow check.
3279 __ XorImmediate(result, left, imm);
3280 break;
3281 case Token::kSHR: {
3282 // Asr operation masks the count to 6/5 bits.
3283#if !defined(DART_COMPRESSED_POINTERS)
3284 const intptr_t kCountLimit = 0x3F;
3285#else
3286 const intptr_t kCountLimit = 0x1F;
3287#endif
3288 intptr_t value = Smi::Cast(constant).Value();
3289 __ AsrImmediate(result, left,
3290 Utils::Minimum(value + kSmiTagSize, kCountLimit),
3292 __ SmiTag(result);
3293 // BOGUS: this could be one sbfiz
3294 break;
3295 }
3296 case Token::kUSHR: {
3297 // Lsr operation masks the count to 6 bits, but
3298 // unsigned shifts by >= kBitsPerInt64 are eliminated by
3299 // BinaryIntegerOpInstr::Canonicalize.
3300 const intptr_t kCountLimit = 0x3F;
3301 intptr_t value = Smi::Cast(constant).Value();
3302 ASSERT((value >= 0) && (value <= kCountLimit));
3303 __ SmiUntag(result, left);
3304 __ LsrImmediate(result, result, value);
3305 if (deopt != nullptr) {
3306 __ SmiTagAndBranchIfOverflow(result, deopt);
3307 } else {
3308 __ SmiTag(result);
3309 }
3310 break;
3311 }
3312 default:
3313 UNREACHABLE();
3314 break;
3315 }
3316 return;
3317 }
3318
3319 const Register right = locs()->in(1).reg();
3320 switch (op_kind()) {
3321 case Token::kADD: {
3322 if (deopt == nullptr) {
3323 __ add(result, left, compiler::Operand(right), compiler::kObjectBytes);
3324 } else {
3325 __ adds(result, left, compiler::Operand(right), compiler::kObjectBytes);
3326 __ b(deopt, VS);
3327 }
3328 break;
3329 }
3330 case Token::kSUB: {
3331 if (deopt == nullptr) {
3332 __ sub(result, left, compiler::Operand(right), compiler::kObjectBytes);
3333 } else {
3334 __ subs(result, left, compiler::Operand(right), compiler::kObjectBytes);
3335 __ b(deopt, VS);
3336 }
3337 break;
3338 }
3339 case Token::kMUL: {
3340 __ SmiUntag(TMP, left);
3341#if !defined(DART_COMPRESSED_POINTERS)
3342 __ mul(result, TMP, right);
3343#else
3344 __ smull(result, TMP, right);
3345#endif
3346 if (deopt != nullptr) {
3347#if !defined(DART_COMPRESSED_POINTERS)
3348 __ smulh(TMP, TMP, right);
3349 // TMP: result bits 64..127.
3350#else
3351 __ AsrImmediate(TMP, result, 31);
3352 // TMP: result bits 32..63.
3353#endif
3354 __ cmp(TMP, compiler::Operand(result, ASR, 63));
3355 __ b(deopt, NE);
3356 }
3357 break;
3358 }
3359 case Token::kBIT_AND: {
3360 // No overflow check.
3361 __ and_(result, left, compiler::Operand(right));
3362 break;
3363 }
3364 case Token::kBIT_OR: {
3365 // No overflow check.
3366 __ orr(result, left, compiler::Operand(right));
3367 break;
3368 }
3369 case Token::kBIT_XOR: {
3370 // No overflow check.
3371 __ eor(result, left, compiler::Operand(right));
3372 break;
3373 }
3374 case Token::kTRUNCDIV: {
3376 // Handle divide by zero in runtime.
3377 __ cbz(deopt, right, compiler::kObjectBytes);
3378 }
3379 const Register temp = TMP2;
3380 __ SmiUntag(temp, left);
3381 __ SmiUntag(TMP, right);
3382
3383 __ sdiv(result, temp, TMP, compiler::kObjectBytes);
3384 if (RangeUtils::Overlaps(right_range(), -1, -1)) {
3385 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
3386 // case we cannot tag the result.
3387#if !defined(DART_COMPRESSED_POINTERS)
3388 __ CompareImmediate(result, 0x4000000000000000LL);
3389#else
3390 __ CompareImmediate(result, 0x40000000LL, compiler::kFourBytes);
3391#endif
3392 __ b(deopt, EQ);
3393 }
3394 __ SmiTag(result);
3395 break;
3396 }
3397 case Token::kMOD: {
3399 // Handle divide by zero in runtime.
3400 __ cbz(deopt, right, compiler::kObjectBytes);
3401 }
3402 const Register temp = TMP2;
3403 __ SmiUntag(temp, left);
3404 __ SmiUntag(TMP, right);
3405
3406 __ sdiv(result, temp, TMP, compiler::kObjectBytes);
3407
3408 __ SmiUntag(TMP, right);
3409 __ msub(result, TMP, result, temp,
3410 compiler::kObjectBytes); // result <- left - right * result
3411 __ SmiTag(result);
3412 // res = left % right;
3413 // if (res < 0) {
3414 // if (right < 0) {
3415 // res = res - right;
3416 // } else {
3417 // res = res + right;
3418 // }
3419 // }
3420 compiler::Label done;
3421 __ CompareObjectRegisters(result, ZR);
3422 __ b(&done, GE);
3423 // Result is negative, adjust it.
3424 __ CompareObjectRegisters(right, ZR);
3425 __ sub(TMP, result, compiler::Operand(right), compiler::kObjectBytes);
3426 __ add(result, result, compiler::Operand(right), compiler::kObjectBytes);
3427 __ csel(result, TMP, result, LT);
3428 __ Bind(&done);
3429 break;
3430 }
3431 case Token::kSHR: {
3432 if (CanDeoptimize()) {
3434 }
3435 __ SmiUntag(TMP, right);
3436 // asrv[w] operation masks the count to 6/5 bits.
3437#if !defined(DART_COMPRESSED_POINTERS)
3438 const intptr_t kCountLimit = 0x3F;
3439#else
3440 const intptr_t kCountLimit = 0x1F;
3441#endif
3442 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
3443 __ LoadImmediate(TMP2, kCountLimit);
3444 __ CompareObjectRegisters(TMP, TMP2);
3445 __ csel(TMP, TMP2, TMP, GT);
3446 }
3447 const Register temp = locs()->temp(0).reg();
3448 __ SmiUntag(temp, left);
3449 __ asrv(result, temp, TMP, compiler::kObjectBytes);
3450 __ SmiTag(result);
3451 break;
3452 }
3453 case Token::kUSHR: {
3454 if (CanDeoptimize()) {
3456 }
3457 __ SmiUntag(TMP, right);
3458 // lsrv operation masks the count to 6 bits.
3459 const intptr_t kCountLimit = 0x3F;
3460 COMPILE_ASSERT(kCountLimit + 1 == kBitsPerInt64);
3461 compiler::Label done;
3462 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
3463 __ LoadImmediate(TMP2, kCountLimit);
3464 __ CompareRegisters(TMP, TMP2);
3465 __ csel(result, ZR, result, GT);
3466 __ b(&done, GT);
3467 }
3468 const Register temp = locs()->temp(0).reg();
3469 __ SmiUntag(temp, left);
3470 __ lsrv(result, temp, TMP);
3471 if (deopt != nullptr) {
3472 __ SmiTagAndBranchIfOverflow(result, deopt);
3473 } else {
3474 __ SmiTag(result);
3475 }
3476 __ Bind(&done);
3477 break;
3478 }
3479 case Token::kDIV: {
3480 // Dispatches to 'Double./'.
3481 // TODO(srdjan): Implement as conversion to double and double division.
3482 UNREACHABLE();
3483 break;
3484 }
3485 case Token::kOR:
3486 case Token::kAND: {
3487 // Flow graph builder has dissected this operation to guarantee correct
3488 // behavior (short-circuit evaluation).
3489 UNREACHABLE();
3490 break;
3491 }
3492 default:
3493 UNREACHABLE();
3494 break;
3495 }
3496}
3497
3498LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone,
3499 bool opt) const {
3500 intptr_t left_cid = left()->Type()->ToCid();
3501 intptr_t right_cid = right()->Type()->ToCid();
3502 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid));
3503 const intptr_t kNumInputs = 2;
3504 const intptr_t kNumTemps = 0;
3505 LocationSummary* summary = new (zone)
3506 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3507 summary->set_in(0, Location::RequiresRegister());
3508 summary->set_in(1, Location::RequiresRegister());
3509 return summary;
3510}
3511
3512void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3513 compiler::Label* deopt =
3514 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryDoubleOp);
3515 intptr_t left_cid = left()->Type()->ToCid();
3516 intptr_t right_cid = right()->Type()->ToCid();
3517 const Register left = locs()->in(0).reg();
3518 const Register right = locs()->in(1).reg();
3519 if (this->left()->definition() == this->right()->definition()) {
3520 __ BranchIfSmi(left, deopt);
3521 } else if (left_cid == kSmiCid) {
3522 __ BranchIfSmi(right, deopt);
3523 } else if (right_cid == kSmiCid) {
3524 __ BranchIfSmi(left, deopt);
3525 } else {
3526 __ orr(TMP, left, compiler::Operand(right));
3527 __ BranchIfSmi(TMP, deopt);
3528 }
3529}
3530
3531LocationSummary* BoxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
3532 const intptr_t kNumInputs = 1;
3533 const intptr_t kNumTemps = 1;
3534 LocationSummary* summary = new (zone) LocationSummary(
3535 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
3537 summary->set_temp(0, Location::RequiresRegister());
3538 summary->set_out(0, Location::RequiresRegister());
3539 return summary;
3540}
3541
3542void BoxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3543 const Register out_reg = locs()->out(0).reg();
3544 const Register temp_reg = locs()->temp(0).reg();
3545 const VRegister value = locs()->in(0).fpu_reg();
3546
3548 compiler->BoxClassFor(from_representation()),
3549 out_reg, temp_reg);
3550
3551 switch (from_representation()) {
3552 case kUnboxedDouble:
3553 __ StoreDFieldToOffset(value, out_reg, ValueOffset());
3554 break;
3555 case kUnboxedFloat:
3556 __ fcvtds(FpuTMP, value);
3557 __ StoreDFieldToOffset(FpuTMP, out_reg, ValueOffset());
3558 break;
3559 case kUnboxedFloat32x4:
3560 case kUnboxedFloat64x2:
3561 case kUnboxedInt32x4:
3562 __ StoreQFieldToOffset(value, out_reg, ValueOffset());
3563 break;
3564 default:
3565 UNREACHABLE();
3566 break;
3567 }
3568}
3569
3570LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
3572 const intptr_t kNumInputs = 1;
3573 const intptr_t kNumTemps = 0;
3574 const bool is_floating_point =
3576 LocationSummary* summary = new (zone)
3577 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3578 summary->set_in(0, Location::RequiresRegister());
3579 summary->set_out(0, is_floating_point ? Location::RequiresFpuRegister()
3581 return summary;
3582}
3583
3584void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler* compiler) {
3585 const Register box = locs()->in(0).reg();
3586
3587 switch (representation()) {
3588 case kUnboxedInt64: {
3589 const Register result = locs()->out(0).reg();
3590 __ ldr(result, compiler::FieldAddress(box, ValueOffset()));
3591 break;
3592 }
3593
3594 case kUnboxedDouble: {
3595 const VRegister result = locs()->out(0).fpu_reg();
3596 __ LoadDFieldFromOffset(result, box, ValueOffset());
3597 break;
3598 }
3599
3600 case kUnboxedFloat: {
3601 const VRegister result = locs()->out(0).fpu_reg();
3602 __ LoadDFieldFromOffset(result, box, ValueOffset());
3603 __ fcvtsd(result, result);
3604 break;
3605 }
3606
3607 case kUnboxedFloat32x4:
3608 case kUnboxedFloat64x2:
3609 case kUnboxedInt32x4: {
3610 const VRegister result = locs()->out(0).fpu_reg();
3611 __ LoadQFieldFromOffset(result, box, ValueOffset());
3612 break;
3613 }
3614
3615 default:
3616 UNREACHABLE();
3617 break;
3618 }
3619}
3620
3621void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) {
3622 const Register box = locs()->in(0).reg();
3623
3624 switch (representation()) {
3625 case kUnboxedInt32:
3626 case kUnboxedInt64: {
3627 const Register result = locs()->out(0).reg();
3628 __ SmiUntag(result, box);
3629 break;
3630 }
3631
3632 case kUnboxedDouble: {
3633 const VRegister result = locs()->out(0).fpu_reg();
3634 __ SmiUntag(TMP, box);
3635#if !defined(DART_COMPRESSED_POINTERS)
3636 __ scvtfdx(result, TMP);
3637#else
3638 __ scvtfdw(result, TMP);
3639#endif
3640 break;
3641 }
3642
3643 default:
3644 UNREACHABLE();
3645 break;
3646 }
3647}
3648
3649void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) {
3650 const Register value = locs()->in(0).reg();
3651 const Register result = locs()->out(0).reg();
3652 __ LoadInt32FromBoxOrSmi(result, value);
3653}
3654
3655void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler) {
3656 const Register value = locs()->in(0).reg();
3657 const Register result = locs()->out(0).reg();
3658 __ LoadInt64FromBoxOrSmi(result, value);
3659}
3660
3661LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
3662 bool opt) const {
3663 ASSERT((from_representation() == kUnboxedInt32) ||
3664 (from_representation() == kUnboxedUint32));
3665#if !defined(DART_COMPRESSED_POINTERS)
3666 // ValueFitsSmi() may be overly conservative and false because we only
3667 // perform range analysis during optimized compilation.
3668 const bool kMayAllocateMint = false;
3669#else
3670 const bool kMayAllocateMint = !ValueFitsSmi();
3671#endif
3672 const intptr_t kNumInputs = 1;
3673 const intptr_t kNumTemps = kMayAllocateMint ? 1 : 0;
3674 LocationSummary* summary = new (zone)
3675 LocationSummary(zone, kNumInputs, kNumTemps,
3676 kMayAllocateMint ? LocationSummary::kCallOnSlowPath
3678 summary->set_in(0, Location::RequiresRegister());
3679 summary->set_out(0, Location::RequiresRegister());
3680 if (kMayAllocateMint) {
3681 summary->set_temp(0, Location::RequiresRegister());
3682 }
3683 return summary;
3684}
3685
3686void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3687 Register value = locs()->in(0).reg();
3688 Register out = locs()->out(0).reg();
3689 ASSERT(value != out);
3690
3691#if !defined(DART_COMPRESSED_POINTERS)
3693 if (from_representation() == kUnboxedInt32) {
3694 __ sbfiz(out, value, kSmiTagSize, 32);
3695 } else {
3696 ASSERT(from_representation() == kUnboxedUint32);
3697 __ ubfiz(out, value, kSmiTagSize, 32);
3698 }
3699#else
3700 compiler::Label done;
3701 if (from_representation() == kUnboxedInt32) {
3702 ASSERT(kSmiTag == 0);
3703 // Signed Bitfield Insert in Zero instruction extracts the 31 significant
3704 // bits from a Smi.
3705 __ sbfiz(out, value, kSmiTagSize, 32 - kSmiTagSize);
3706 if (ValueFitsSmi()) {
3707 return;
3708 }
3709 __ cmpw(value, compiler::Operand(out, ASR, 1));
3710 __ b(&done, EQ); // Jump if the sbfiz instruction didn't lose info.
3711 } else {
3712 ASSERT(from_representation() == kUnboxedUint32);
3713 // A 32 bit positive Smi has one tag bit and one unused sign bit,
3714 // leaving only 30 bits for the payload.
3715 __ LslImmediate(out, value, kSmiTagSize, compiler::kFourBytes);
3716 if (ValueFitsSmi()) {
3717 return;
3718 }
3719 __ TestImmediate(value, 0xC0000000);
3720 __ b(&done, EQ); // Jump if both bits are zero.
3721 }
3722
3723 Register temp = locs()->temp(0).reg();
3725 temp);
3726 if (from_representation() == kUnboxedInt32) {
3727 __ sxtw(temp, value); // Sign-extend.
3728 } else {
3729 __ uxtw(temp, value); // Zero-extend.
3730 }
3731 __ StoreToOffset(temp, out, Mint::value_offset() - kHeapObjectTag);
3732 __ Bind(&done);
3733#endif
3734}
3735
3736LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
3737 bool opt) const {
3738 const intptr_t kNumInputs = 1;
3739 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
3740 // Shared slow path is used in BoxInt64Instr::EmitNativeCode in
3741 // precompiled mode and only after VM isolate stubs where
3742 // replaced with isolate-specific stubs.
3743 auto object_store = IsolateGroup::Current()->object_store();
3744 const bool stubs_in_vm_isolate =
3745 object_store->allocate_mint_with_fpu_regs_stub()
3746 ->untag()
3747 ->InVMIsolateHeap() ||
3748 object_store->allocate_mint_without_fpu_regs_stub()
3749 ->untag()
3750 ->InVMIsolateHeap();
3751 const bool shared_slow_path_call =
3752 SlowPathSharingSupported(opt) && !stubs_in_vm_isolate;
3753 LocationSummary* summary = new (zone) LocationSummary(
3754 zone, kNumInputs, kNumTemps,
3756 : shared_slow_path_call ? LocationSummary::kCallOnSharedSlowPath
3758 summary->set_in(0, Location::RequiresRegister());
3759 if (ValueFitsSmi()) {
3760 summary->set_out(0, Location::RequiresRegister());
3761 } else if (shared_slow_path_call) {
3762 summary->set_out(0,
3765 } else {
3766 summary->set_out(0, Location::RequiresRegister());
3767 summary->set_temp(0, Location::RequiresRegister());
3768 }
3769 return summary;
3770}
3771
3772void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3773 Register in = locs()->in(0).reg();
3774 Register out = locs()->out(0).reg();
3775 if (ValueFitsSmi()) {
3776 __ SmiTag(out, in);
3777 return;
3778 }
3779 ASSERT(kSmiTag == 0);
3780 compiler::Label done;
3781#if !defined(DART_COMPRESSED_POINTERS)
3782 __ adds(out, in, compiler::Operand(in)); // SmiTag
3783 // If the value doesn't fit in a smi, the tagging changes the sign,
3784 // which causes the overflow flag to be set.
3785 __ b(&done, NO_OVERFLOW);
3786#else
3787 __ sbfiz(out, in, kSmiTagSize, 31); // SmiTag + sign-extend.
3788 __ cmp(in, compiler::Operand(out, ASR, kSmiTagSize));
3789 __ b(&done, EQ);
3790#endif
3791
3792 Register temp = locs()->temp(0).reg();
3793 if (compiler->intrinsic_mode()) {
3794 __ TryAllocate(compiler->mint_class(),
3795 compiler->intrinsic_slow_path_label(),
3797 } else if (locs()->call_on_shared_slow_path()) {
3798 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
3799 if (!has_frame) {
3800 ASSERT(__ constant_pool_allowed());
3801 __ set_constant_pool_allowed(false);
3802 __ EnterDartFrame(0);
3803 }
3804 auto object_store = compiler->isolate_group()->object_store();
3805 const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
3806 const auto& stub = Code::ZoneHandle(
3807 compiler->zone(),
3808 live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub()
3809 : object_store->allocate_mint_without_fpu_regs_stub());
3810
3811 ASSERT(!locs()->live_registers()->ContainsRegister(
3813 auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
3814 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
3815 locs(), DeoptId::kNone, extended_env);
3816 if (!has_frame) {
3817 __ LeaveDartFrame();
3818 __ set_constant_pool_allowed(true);
3819 }
3820 } else {
3822 temp);
3823 }
3824
3825 __ StoreToOffset(in, out, Mint::value_offset() - kHeapObjectTag);
3826 __ Bind(&done);
3827}
3828
3829LocationSummary* UnboxInteger32Instr::MakeLocationSummary(Zone* zone,
3830 bool opt) const {
3831 const intptr_t kNumInputs = 1;
3832 const intptr_t kNumTemps = 0;
3833 LocationSummary* summary = new (zone)
3834 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3835 summary->set_in(0, Location::RequiresRegister());
3836 summary->set_out(0, Location::RequiresRegister());
3837 return summary;
3838}
3839
3840void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3841 const intptr_t value_cid = value()->Type()->ToCid();
3842 const Register out = locs()->out(0).reg();
3843 const Register value = locs()->in(0).reg();
3844 compiler::Label* deopt =
3846 ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger)
3847 : nullptr;
3848
3849 if (value_cid == kSmiCid) {
3850 __ SmiUntag(out, value);
3851 } else if (value_cid == kMintCid) {
3852 __ LoadFieldFromOffset(out, value, Mint::value_offset());
3853 } else if (!CanDeoptimize()) {
3854 // Type information is not conclusive, but range analysis found
3855 // the value to be in int64 range. Therefore it must be a smi
3856 // or mint value.
3858 compiler::Label done;
3859 __ SmiUntag(out, value);
3860 __ BranchIfSmi(value, &done);
3861 __ LoadFieldFromOffset(out, value, Mint::value_offset());
3862 __ Bind(&done);
3863 } else {
3864 compiler::Label done;
3865 __ SmiUntag(out, value);
3866 __ BranchIfSmi(value, &done);
3867 __ CompareClassId(value, kMintCid);
3868 __ b(deopt, NE);
3869 __ LoadFieldFromOffset(out, value, Mint::value_offset());
3870 __ Bind(&done);
3871 }
3872
3873 // TODO(vegorov): as it is implemented right now truncating unboxing would
3874 // leave "garbage" in the higher word.
3875 if (!is_truncating() && (deopt != nullptr)) {
3876 ASSERT(representation() == kUnboxedInt32);
3877 __ cmp(out, compiler::Operand(out, SXTW, 0));
3878 __ b(deopt, NE);
3879 }
3880}
3881
3882LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
3883 bool opt) const {
3884 const intptr_t kNumInputs = 2;
3885 const intptr_t kNumTemps = 0;
3886 LocationSummary* summary = new (zone)
3887 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3889 summary->set_in(1, Location::RequiresFpuRegister());
3890 summary->set_out(0, Location::RequiresFpuRegister());
3891 return summary;
3892}
3893
3894void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3895 const VRegister left = locs()->in(0).fpu_reg();
3896 const VRegister right = locs()->in(1).fpu_reg();
3897 const VRegister result = locs()->out(0).fpu_reg();
3898 switch (op_kind()) {
3899 case Token::kADD:
3900 __ faddd(result, left, right);
3901 break;
3902 case Token::kSUB:
3903 __ fsubd(result, left, right);
3904 break;
3905 case Token::kMUL:
3906 __ fmuld(result, left, right);
3907 break;
3908 case Token::kDIV:
3909 __ fdivd(result, left, right);
3910 break;
3911 default:
3912 UNREACHABLE();
3913 }
3914}
3915
3916LocationSummary* DoubleTestOpInstr::MakeLocationSummary(Zone* zone,
3917 bool opt) const {
3918 const bool needs_temp = op_kind() != MethodRecognizer::kDouble_getIsNaN;
3919 const intptr_t kNumInputs = 1;
3920 const intptr_t kNumTemps = needs_temp ? 1 : 0;
3921 LocationSummary* summary = new (zone)
3922 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3923 summary->set_in(0, Location::RequiresFpuRegister());
3924 if (needs_temp) {
3925 summary->set_temp(0, Location::RequiresRegister());
3926 }
3927 summary->set_out(0, Location::RequiresRegister());
3928 return summary;
3929}
3930
3931Condition DoubleTestOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
3932 BranchLabels labels) {
3933 ASSERT(compiler->is_optimizing());
3934 const VRegister value = locs()->in(0).fpu_reg();
3935 const bool is_negated = kind() != Token::kEQ;
3936
3937 switch (op_kind()) {
3938 case MethodRecognizer::kDouble_getIsNaN: {
3939 __ fcmpd(value, value);
3940 return is_negated ? VC : VS;
3941 }
3942 case MethodRecognizer::kDouble_getIsInfinite: {
3943 const Register temp = locs()->temp(0).reg();
3944 __ vmovrd(temp, value, 0);
3945 // Mask off the sign.
3946 __ AndImmediate(temp, temp, 0x7FFFFFFFFFFFFFFFLL);
3947 // Compare with +infinity.
3948 __ CompareImmediate(temp, 0x7FF0000000000000LL);
3949 return is_negated ? NE : EQ;
3950 }
3951 case MethodRecognizer::kDouble_getIsNegative: {
3952 const Register temp = locs()->temp(0).reg();
3953 compiler::Label not_zero;
3954 __ fcmpdz(value);
3955 // If it's NaN, it's not negative.
3956 __ b(is_negated ? labels.true_label : labels.false_label, VS);
3957 __ b(&not_zero, NOT_EQUAL);
3958 // Check for negative zero with a signed comparison.
3959 __ fmovrd(temp, value);
3960 __ CompareImmediate(temp, 0);
3961 __ Bind(&not_zero);
3962 return is_negated ? GE : LT;
3963 }
3964 default:
3965 UNREACHABLE();
3966 }
3967}
3968
3969// SIMD
3970
3971#define DEFINE_EMIT(Name, Args) \
3972 static void Emit##Name(FlowGraphCompiler* compiler, SimdOpInstr* instr, \
3973 PP_APPLY(PP_UNPACK, Args))
3974
3975#define SIMD_OP_FLOAT_ARITH(V, Name, op) \
3976 V(Float32x4##Name, op##s) \
3977 V(Float64x2##Name, op##d)
3978
3979#define SIMD_OP_SIMPLE_BINARY(V) \
3980 SIMD_OP_FLOAT_ARITH(V, Add, vadd) \
3981 SIMD_OP_FLOAT_ARITH(V, Sub, vsub) \
3982 SIMD_OP_FLOAT_ARITH(V, Mul, vmul) \
3983 SIMD_OP_FLOAT_ARITH(V, Div, vdiv) \
3984 SIMD_OP_FLOAT_ARITH(V, Min, vmin) \
3985 SIMD_OP_FLOAT_ARITH(V, Max, vmax) \
3986 V(Int32x4Add, vaddw) \
3987 V(Int32x4Sub, vsubw) \
3988 V(Int32x4BitAnd, vand) \
3989 V(Int32x4BitOr, vorr) \
3990 V(Int32x4BitXor, veor) \
3991 V(Float32x4Equal, vceqs) \
3992 V(Float32x4GreaterThan, vcgts) \
3993 V(Float32x4GreaterThanOrEqual, vcges)
3994
3995DEFINE_EMIT(SimdBinaryOp, (VRegister result, VRegister left, VRegister right)) {
3996 switch (instr->kind()) {
3997#define EMIT(Name, op) \
3998 case SimdOpInstr::k##Name: \
3999 __ op(result, left, right); \
4000 break;
4001 SIMD_OP_SIMPLE_BINARY(EMIT)
4002#undef EMIT
4003 case SimdOpInstr::kFloat32x4ShuffleMix:
4004 case SimdOpInstr::kInt32x4ShuffleMix: {
4005 const intptr_t mask = instr->mask();
4006 __ vinss(result, 0, left, (mask >> 0) & 0x3);
4007 __ vinss(result, 1, left, (mask >> 2) & 0x3);
4008 __ vinss(result, 2, right, (mask >> 4) & 0x3);
4009 __ vinss(result, 3, right, (mask >> 6) & 0x3);
4010 break;
4011 }
4012 case SimdOpInstr::kFloat32x4NotEqual:
4013 __ vceqs(result, left, right);
4014 // Invert the result.
4015 __ vnot(result, result);
4016 break;
4017 case SimdOpInstr::kFloat32x4LessThan:
4018 __ vcgts(result, right, left);
4019 break;
4020 case SimdOpInstr::kFloat32x4LessThanOrEqual:
4021 __ vcges(result, right, left);
4022 break;
4023 case SimdOpInstr::kFloat32x4Scale:
4024 __ fcvtsd(VTMP, left);
4025 __ vdups(result, VTMP, 0);
4026 __ vmuls(result, result, right);
4027 break;
4028 case SimdOpInstr::kFloat64x2FromDoubles:
4029 __ vinsd(result, 0, left, 0);
4030 __ vinsd(result, 1, right, 0);
4031 break;
4032 case SimdOpInstr::kFloat64x2Scale:
4033 __ vdupd(VTMP, right, 0);
4034 __ vmuld(result, left, VTMP);
4035 break;
4036 default:
4037 UNREACHABLE();
4038 }
4039}
4040
4041#define SIMD_OP_SIMPLE_UNARY(V) \
4042 SIMD_OP_FLOAT_ARITH(V, Sqrt, vsqrt) \
4043 SIMD_OP_FLOAT_ARITH(V, Negate, vneg) \
4044 SIMD_OP_FLOAT_ARITH(V, Abs, vabs) \
4045 V(Float32x4Reciprocal, VRecps) \
4046 V(Float32x4ReciprocalSqrt, VRSqrts)
4047
4048DEFINE_EMIT(SimdUnaryOp, (VRegister result, VRegister value)) {
4049 switch (instr->kind()) {
4050#define EMIT(Name, op) \
4051 case SimdOpInstr::k##Name: \
4052 __ op(result, value); \
4053 break;
4054 SIMD_OP_SIMPLE_UNARY(EMIT)
4055#undef EMIT
4056 case SimdOpInstr::kFloat32x4GetX:
4057 __ vinss(result, 0, value, 0);
4058 __ fcvtds(result, result);
4059 break;
4060 case SimdOpInstr::kFloat32x4GetY:
4061 __ vinss(result, 0, value, 1);
4062 __ fcvtds(result, result);
4063 break;
4064 case SimdOpInstr::kFloat32x4GetZ:
4065 __ vinss(result, 0, value, 2);
4066 __ fcvtds(result, result);
4067 break;
4068 case SimdOpInstr::kFloat32x4GetW:
4069 __ vinss(result, 0, value, 3);
4070 __ fcvtds(result, result);
4071 break;
4072 case SimdOpInstr::kInt32x4Shuffle:
4073 case SimdOpInstr::kFloat32x4Shuffle: {
4074 const intptr_t mask = instr->mask();
4075 if (mask == 0x00) {
4076 __ vdups(result, value, 0);
4077 } else if (mask == 0x55) {
4078 __ vdups(result, value, 1);
4079 } else if (mask == 0xAA) {
4080 __ vdups(result, value, 2);
4081 } else if (mask == 0xFF) {
4082 __ vdups(result, value, 3);
4083 } else {
4084 for (intptr_t i = 0; i < 4; i++) {
4085 __ vinss(result, i, value, (mask >> (2 * i)) & 0x3);
4086 }
4087 }
4088 break;
4089 }
4090 case SimdOpInstr::kFloat32x4Splat:
4091 // Convert to Float32.
4092 __ fcvtsd(VTMP, value);
4093 // Splat across all lanes.
4094 __ vdups(result, VTMP, 0);
4095 break;
4096 case SimdOpInstr::kFloat64x2GetX:
4097 __ vinsd(result, 0, value, 0);
4098 break;
4099 case SimdOpInstr::kFloat64x2GetY:
4100 __ vinsd(result, 0, value, 1);
4101 break;
4102 case SimdOpInstr::kFloat64x2Splat:
4103 __ vdupd(result, value, 0);
4104 break;
4105 case SimdOpInstr::kFloat64x2ToFloat32x4:
4106 // Zero register.
4107 __ veor(result, result, result);
4108 // Set X lane.
4109 __ vinsd(VTMP, 0, value, 0);
4110 __ fcvtsd(VTMP, VTMP);
4111 __ vinss(result, 0, VTMP, 0);
4112 // Set Y lane.
4113 __ vinsd(VTMP, 0, value, 1);
4114 __ fcvtsd(VTMP, VTMP);
4115 __ vinss(result, 1, VTMP, 0);
4116 break;
4117 case SimdOpInstr::kFloat32x4ToFloat64x2:
4118 // Set X.
4119 __ vinss(VTMP, 0, value, 0);
4120 __ fcvtds(VTMP, VTMP);
4121 __ vinsd(result, 0, VTMP, 0);
4122 // Set Y.
4123 __ vinss(VTMP, 0, value, 1);
4124 __ fcvtds(VTMP, VTMP);
4125 __ vinsd(result, 1, VTMP, 0);
4126 break;
4127 default:
4128 UNREACHABLE();
4129 }
4130}
4131
4132DEFINE_EMIT(Simd32x4GetSignMask,
4133 (Register out, VRegister value, Temp<Register> temp)) {
4134 // X lane.
4135 __ vmovrs(out, value, 0);
4136 __ LsrImmediate(out, out, 31);
4137 // Y lane.
4138 __ vmovrs(temp, value, 1);
4139 __ LsrImmediate(temp, temp, 31);
4140 __ orr(out, out, compiler::Operand(temp, LSL, 1));
4141 // Z lane.
4142 __ vmovrs(temp, value, 2);
4143 __ LsrImmediate(temp, temp, 31);
4144 __ orr(out, out, compiler::Operand(temp, LSL, 2));
4145 // W lane.
4146 __ vmovrs(temp, value, 3);
4147 __ LsrImmediate(temp, temp, 31);
4148 __ orr(out, out, compiler::Operand(temp, LSL, 3));
4149}
4150
4151DEFINE_EMIT(
4152 Float32x4FromDoubles,
4154 __ fcvtsd(VTMP, v0);
4155 __ vinss(r, 0, VTMP, 0);
4156 __ fcvtsd(VTMP, v1);
4157 __ vinss(r, 1, VTMP, 0);
4158 __ fcvtsd(VTMP, v2);
4159 __ vinss(r, 2, VTMP, 0);
4160 __ fcvtsd(VTMP, v3);
4161 __ vinss(r, 3, VTMP, 0);
4162}
4163
4164DEFINE_EMIT(
4165 Float32x4Clamp,
4167 __ vmins(result, value, upper);
4168 __ vmaxs(result, result, lower);
4169}
4170
4171DEFINE_EMIT(
4172 Float64x2Clamp,
4174 __ vmind(result, value, upper);
4175 __ vmaxd(result, result, lower);
4176}
4177
4178DEFINE_EMIT(Float32x4With,
4179 (VRegister result, VRegister replacement, VRegister value)) {
4180 __ fcvtsd(VTMP, replacement);
4181 __ vmov(result, value);
4182 switch (instr->kind()) {
4183 case SimdOpInstr::kFloat32x4WithX:
4184 __ vinss(result, 0, VTMP, 0);
4185 break;
4186 case SimdOpInstr::kFloat32x4WithY:
4187 __ vinss(result, 1, VTMP, 0);
4188 break;
4189 case SimdOpInstr::kFloat32x4WithZ:
4190 __ vinss(result, 2, VTMP, 0);
4191 break;
4192 case SimdOpInstr::kFloat32x4WithW:
4193 __ vinss(result, 3, VTMP, 0);
4194 break;
4195 default:
4196 UNREACHABLE();
4197 }
4198}
4199
4200DEFINE_EMIT(Simd32x4ToSimd32x4, (SameAsFirstInput, VRegister value)) {
4201 // TODO(dartbug.com/30949) these operations are essentially nop and should
4202 // not generate any code. They should be removed from the graph before
4203 // code generation.
4204}
4205
4206DEFINE_EMIT(SimdZero, (VRegister v)) {
4207 __ veor(v, v, v);
4208}
4209
4210DEFINE_EMIT(Float64x2GetSignMask, (Register out, VRegister value)) {
4211 // Bits of X lane.
4212 __ vmovrd(out, value, 0);
4213 __ LsrImmediate(out, out, 63);
4214 // Bits of Y lane.
4215 __ vmovrd(TMP, value, 1);
4216 __ LsrImmediate(TMP, TMP, 63);
4217 __ orr(out, out, compiler::Operand(TMP, LSL, 1));
4218}
4219
4220DEFINE_EMIT(Float64x2With,
4221 (SameAsFirstInput, VRegister left, VRegister right)) {
4222 switch (instr->kind()) {
4223 case SimdOpInstr::kFloat64x2WithX:
4224 __ vinsd(left, 0, right, 0);
4225 break;
4226 case SimdOpInstr::kFloat64x2WithY:
4227 __ vinsd(left, 1, right, 0);
4228 break;
4229 default:
4230 UNREACHABLE();
4231 }
4232}
4233
4234DEFINE_EMIT(
4235 Int32x4FromInts,
4237 __ veor(result, result, result);
4238 __ vinsw(result, 0, v0);
4239 __ vinsw(result, 1, v1);
4240 __ vinsw(result, 2, v2);
4241 __ vinsw(result, 3, v3);
4242}
4243
4244DEFINE_EMIT(Int32x4FromBools,
4246 Register v0,
4247 Register v1,
4248 Register v2,
4249 Register v3,
4250 Temp<Register> temp)) {
4251 __ veor(result, result, result);
4252 __ LoadImmediate(temp, 0xffffffff);
4253 __ LoadObject(TMP2, Bool::True());
4254
4255 const Register vs[] = {v0, v1, v2, v3};
4256 for (intptr_t i = 0; i < 4; i++) {
4257 __ CompareObjectRegisters(vs[i], TMP2);
4258 __ csel(TMP, temp, ZR, EQ);
4259 __ vinsw(result, i, TMP);
4260 }
4261}
4262
4263DEFINE_EMIT(Int32x4GetFlag, (Register result, VRegister value)) {
4264 switch (instr->kind()) {
4265 case SimdOpInstr::kInt32x4GetFlagX:
4266 __ vmovrs(result, value, 0);
4267 break;
4268 case SimdOpInstr::kInt32x4GetFlagY:
4269 __ vmovrs(result, value, 1);
4270 break;
4271 case SimdOpInstr::kInt32x4GetFlagZ:
4272 __ vmovrs(result, value, 2);
4273 break;
4274 case SimdOpInstr::kInt32x4GetFlagW:
4275 __ vmovrs(result, value, 3);
4276 break;
4277 default:
4278 UNREACHABLE();
4279 }
4280
4281 __ tst(result, compiler::Operand(result));
4282 __ LoadObject(result, Bool::True());
4283 __ LoadObject(TMP, Bool::False());
4284 __ csel(result, TMP, result, EQ);
4285}
4286
4287DEFINE_EMIT(Int32x4Select,
4288 (VRegister out,
4289 VRegister mask,
4290 VRegister trueValue,
4291 VRegister falseValue,
4292 Temp<VRegister> temp)) {
4293 // Copy mask.
4294 __ vmov(temp, mask);
4295 // Invert it.
4296 __ vnot(temp, temp);
4297 // mask = mask & trueValue.
4298 __ vand(mask, mask, trueValue);
4299 // temp = temp & falseValue.
4300 __ vand(temp, temp, falseValue);
4301 // out = mask | temp.
4302 __ vorr(out, mask, temp);
4303}
4304
4305DEFINE_EMIT(Int32x4WithFlag,
4306 (SameAsFirstInput, VRegister mask, Register flag)) {
4307 const VRegister result = mask;
4308 __ CompareObject(flag, Bool::True());
4309 __ LoadImmediate(TMP, 0xffffffff);
4310 __ csel(TMP, TMP, ZR, EQ);
4311 switch (instr->kind()) {
4312 case SimdOpInstr::kInt32x4WithFlagX:
4313 __ vinsw(result, 0, TMP);
4314 break;
4315 case SimdOpInstr::kInt32x4WithFlagY:
4316 __ vinsw(result, 1, TMP);
4317 break;
4318 case SimdOpInstr::kInt32x4WithFlagZ:
4319 __ vinsw(result, 2, TMP);
4320 break;
4321 case SimdOpInstr::kInt32x4WithFlagW:
4322 __ vinsw(result, 3, TMP);
4323 break;
4324 default:
4325 UNREACHABLE();
4326 }
4327}
4328
4329// Map SimdOpInstr::Kind-s to corresponding emit functions. Uses the following
4330// format:
4331//
4332// CASE(OpA) CASE(OpB) ____(Emitter) - Emitter is used to emit OpA and OpB.
4333// SIMPLE(OpA) - Emitter with name OpA is used to emit OpA.
4334//
4335#define SIMD_OP_VARIANTS(CASE, ____) \
4336 SIMD_OP_SIMPLE_BINARY(CASE) \
4337 CASE(Float32x4ShuffleMix) \
4338 CASE(Int32x4ShuffleMix) \
4339 CASE(Float32x4NotEqual) \
4340 CASE(Float32x4LessThan) \
4341 CASE(Float32x4LessThanOrEqual) \
4342 CASE(Float32x4Scale) \
4343 CASE(Float64x2FromDoubles) \
4344 CASE(Float64x2Scale) \
4345 ____(SimdBinaryOp) \
4346 SIMD_OP_SIMPLE_UNARY(CASE) \
4347 CASE(Float32x4GetX) \
4348 CASE(Float32x4GetY) \
4349 CASE(Float32x4GetZ) \
4350 CASE(Float32x4GetW) \
4351 CASE(Int32x4Shuffle) \
4352 CASE(Float32x4Shuffle) \
4353 CASE(Float32x4Splat) \
4354 CASE(Float64x2GetX) \
4355 CASE(Float64x2GetY) \
4356 CASE(Float64x2Splat) \
4357 CASE(Float64x2ToFloat32x4) \
4358 CASE(Float32x4ToFloat64x2) \
4359 ____(SimdUnaryOp) \
4360 CASE(Float32x4GetSignMask) \
4361 CASE(Int32x4GetSignMask) \
4362 ____(Simd32x4GetSignMask) \
4363 CASE(Float32x4FromDoubles) \
4364 ____(Float32x4FromDoubles) \
4365 CASE(Float32x4Zero) \
4366 CASE(Float64x2Zero) \
4367 ____(SimdZero) \
4368 CASE(Float32x4Clamp) \
4369 ____(Float32x4Clamp) \
4370 CASE(Float64x2Clamp) \
4371 ____(Float64x2Clamp) \
4372 CASE(Float32x4WithX) \
4373 CASE(Float32x4WithY) \
4374 CASE(Float32x4WithZ) \
4375 CASE(Float32x4WithW) \
4376 ____(Float32x4With) \
4377 CASE(Float32x4ToInt32x4) \
4378 CASE(Int32x4ToFloat32x4) \
4379 ____(Simd32x4ToSimd32x4) \
4380 CASE(Float64x2GetSignMask) \
4381 ____(Float64x2GetSignMask) \
4382 CASE(Float64x2WithX) \
4383 CASE(Float64x2WithY) \
4384 ____(Float64x2With) \
4385 CASE(Int32x4FromInts) \
4386 ____(Int32x4FromInts) \
4387 CASE(Int32x4FromBools) \
4388 ____(Int32x4FromBools) \
4389 CASE(Int32x4GetFlagX) \
4390 CASE(Int32x4GetFlagY) \
4391 CASE(Int32x4GetFlagZ) \
4392 CASE(Int32x4GetFlagW) \
4393 ____(Int32x4GetFlag) \
4394 CASE(Int32x4Select) \
4395 ____(Int32x4Select) \
4396 CASE(Int32x4WithFlagX) \
4397 CASE(Int32x4WithFlagY) \
4398 CASE(Int32x4WithFlagZ) \
4399 CASE(Int32x4WithFlagW) \
4400 ____(Int32x4WithFlag)
4401
4402LocationSummary* SimdOpInstr::MakeLocationSummary(Zone* zone, bool opt) const {
4403 switch (kind()) {
4404#define CASE(Name, ...) case k##Name:
4405#define EMIT(Name) \
4406 return MakeLocationSummaryFromEmitter(zone, this, &Emit##Name);
4407 SIMD_OP_VARIANTS(CASE, EMIT)
4408#undef CASE
4409#undef EMIT
4410 case kIllegalSimdOp:
4411 UNREACHABLE();
4412 break;
4413 }
4414 UNREACHABLE();
4415 return nullptr;
4416}
4417
4418void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4419 switch (kind()) {
4420#define CASE(Name, ...) case k##Name:
4421#define EMIT(Name) \
4422 InvokeEmitter(compiler, this, &Emit##Name); \
4423 break;
4424 SIMD_OP_VARIANTS(CASE, EMIT)
4425#undef CASE
4426#undef EMIT
4427 case kIllegalSimdOp:
4428 UNREACHABLE();
4429 break;
4430 }
4431}
4432
4433#undef DEFINE_EMIT
4434
4436 Zone* zone,
4437 bool opt) const {
4438 const intptr_t kNumTemps = 0;
4439 LocationSummary* summary = new (zone)
4440 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
4441 summary->set_in(0, Location::RegisterLocation(R0));
4442 summary->set_in(1, Location::RegisterLocation(R1));
4443 summary->set_in(2, Location::RegisterLocation(R2));
4444 summary->set_in(3, Location::RegisterLocation(R3));
4445 summary->set_out(0, Location::RegisterLocation(R0));
4446 return summary;
4447}
4448
4450 compiler::LeafRuntimeScope rt(compiler->assembler(),
4451 /*frame_size=*/0,
4452 /*preserve_registers=*/false);
4453 // Call the function. Parameters are already in their correct spots.
4455}
4456
4457LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone,
4458 bool opt) const {
4459 if (result_cid() == kDoubleCid) {
4460 const intptr_t kNumInputs = 2;
4461 const intptr_t kNumTemps = 0;
4462 LocationSummary* summary = new (zone)
4463 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4465 summary->set_in(1, Location::RequiresFpuRegister());
4466 // Reuse the left register so that code can be made shorter.
4467 summary->set_out(0, Location::SameAsFirstInput());
4468 return summary;
4469 }
4470 ASSERT(result_cid() == kSmiCid);
4471 const intptr_t kNumInputs = 2;
4472 const intptr_t kNumTemps = 0;
4473 LocationSummary* summary = new (zone)
4474 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4475 summary->set_in(0, Location::RequiresRegister());
4476 summary->set_in(1, Location::RequiresRegister());
4477 // Reuse the left register so that code can be made shorter.
4478 summary->set_out(0, Location::SameAsFirstInput());
4479 return summary;
4480}
4481
4482void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4483 ASSERT((op_kind() == MethodRecognizer::kMathMin) ||
4484 (op_kind() == MethodRecognizer::kMathMax));
4485 const bool is_min = (op_kind() == MethodRecognizer::kMathMin);
4486 if (result_cid() == kDoubleCid) {
4487 compiler::Label done, returns_nan, are_equal;
4488 const VRegister left = locs()->in(0).fpu_reg();
4489 const VRegister right = locs()->in(1).fpu_reg();
4490 const VRegister result = locs()->out(0).fpu_reg();
4491 __ fcmpd(left, right);
4492 __ b(&returns_nan, VS);
4493 __ b(&are_equal, EQ);
4494 const Condition double_condition =
4495 is_min ? TokenKindToDoubleCondition(Token::kLTE)
4496 : TokenKindToDoubleCondition(Token::kGTE);
4497 ASSERT(left == result);
4498 __ b(&done, double_condition);
4499 __ fmovdd(result, right);
4500 __ b(&done);
4501
4502 __ Bind(&returns_nan);
4503 __ LoadDImmediate(result, NAN);
4504 __ b(&done);
4505
4506 __ Bind(&are_equal);
4507 // Check for negative zero: -0.0 is equal 0.0 but min or max must return
4508 // -0.0 or 0.0 respectively.
4509 // Check for negative left value (get the sign bit):
4510 // - min -> left is negative ? left : right.
4511 // - max -> left is negative ? right : left
4512 // Check the sign bit.
4513 __ fmovrd(TMP, left); // Sign bit is in bit 63 of TMP.
4514 __ CompareImmediate(TMP, 0);
4515 if (is_min) {
4516 ASSERT(left == result);
4517 __ b(&done, LT);
4518 __ fmovdd(result, right);
4519 } else {
4520 __ b(&done, GE);
4521 __ fmovdd(result, right);
4522 ASSERT(left == result);
4523 }
4524 __ Bind(&done);
4525 return;
4526 }
4527
4528 ASSERT(result_cid() == kSmiCid);
4529 const Register left = locs()->in(0).reg();
4530 const Register right = locs()->in(1).reg();
4531 const Register result = locs()->out(0).reg();
4532 __ CompareObjectRegisters(left, right);
4533 ASSERT(result == left);
4534 if (is_min) {
4535 __ csel(result, right, left, GT);
4536 } else {
4537 __ csel(result, right, left, LT);
4538 }
4539}
4540
4541LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Zone* zone,
4542 bool opt) const {
4543 const intptr_t kNumInputs = 1;
4544 const intptr_t kNumTemps = 0;
4545 LocationSummary* summary = new (zone)
4546 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4547 summary->set_in(0, Location::RequiresRegister());
4548 // We make use of 3-operand instructions by not requiring result register
4549 // to be identical to first input register as on Intel.
4550 summary->set_out(0, Location::RequiresRegister());
4551 return summary;
4552}
4553
4554void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4555 const Register value = locs()->in(0).reg();
4556 const Register result = locs()->out(0).reg();
4557 switch (op_kind()) {
4558 case Token::kNEGATE: {
4559 compiler::Label* deopt =
4560 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
4561 __ subs(result, ZR, compiler::Operand(value), compiler::kObjectBytes);
4562 __ b(deopt, VS);
4563 break;
4564 }
4565 case Token::kBIT_NOT:
4566 __ mvn_(result, value);
4567 // Remove inverted smi-tag.
4568 __ andi(result, result, compiler::Immediate(~kSmiTagMask));
4569 break;
4570 default:
4571 UNREACHABLE();
4572 }
4573}
4574
4575LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
4576 bool opt) const {
4577 const intptr_t kNumInputs = 1;
4578 const intptr_t kNumTemps = 0;
4579 LocationSummary* summary = new (zone)
4580 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4582 summary->set_out(0, Location::RequiresFpuRegister());
4583 return summary;
4584}
4585
4586void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4587 ASSERT(representation() == kUnboxedDouble);
4588 const VRegister result = locs()->out(0).fpu_reg();
4589 const VRegister value = locs()->in(0).fpu_reg();
4590 switch (op_kind()) {
4591 case Token::kNEGATE:
4592 __ fnegd(result, value);
4593 break;
4594 case Token::kSQRT:
4595 __ fsqrtd(result, value);
4596 break;
4597 case Token::kSQUARE:
4598 __ fmuld(result, value, value);
4599 break;
4600 default:
4601 UNREACHABLE();
4602 }
4603}
4604
4605LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone,
4606 bool opt) const {
4607 const intptr_t kNumInputs = 1;
4608 const intptr_t kNumTemps = 0;
4609 LocationSummary* result = new (zone)
4610 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4611 result->set_in(0, Location::RequiresRegister());
4613 return result;
4614}
4615
4616void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4617 const Register value = locs()->in(0).reg();
4618 const VRegister result = locs()->out(0).fpu_reg();
4619 __ scvtfdw(result, value);
4620}
4621
4622LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone,
4623 bool opt) const {
4624 const intptr_t kNumInputs = 1;
4625 const intptr_t kNumTemps = 0;
4626 LocationSummary* result = new (zone)
4627 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4628 result->set_in(0, Location::RequiresRegister());
4630 return result;
4631}
4632
4633void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4634 const Register value = locs()->in(0).reg();
4635 const VRegister result = locs()->out(0).fpu_reg();
4636 __ SmiUntag(TMP, value);
4637#if !defined(DART_COMPRESSED_POINTERS)
4638 __ scvtfdx(result, TMP);
4639#else
4640 __ scvtfdw(result, TMP);
4641#endif
4642}
4643
4644LocationSummary* Int64ToDoubleInstr::MakeLocationSummary(Zone* zone,
4645 bool opt) const {
4646 const intptr_t kNumInputs = 1;
4647 const intptr_t kNumTemps = 0;
4648 LocationSummary* result = new (zone)
4649 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4650 result->set_in(0, Location::RequiresRegister());
4652 return result;
4653}
4654
4655void Int64ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4656 const Register value = locs()->in(0).reg();
4657 const VRegister result = locs()->out(0).fpu_reg();
4658 __ scvtfdx(result, value);
4659}
4660
4661LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone,
4662 bool opt) const {
4663 const intptr_t kNumInputs = 1;
4664 const intptr_t kNumTemps = 0;
4665 LocationSummary* result = new (zone) LocationSummary(
4666 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
4668 result->set_out(0, Location::RequiresRegister());
4669 return result;
4670}
4671
4672void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4673 const Register result = locs()->out(0).reg();
4674 const VRegister value_double = locs()->in(0).fpu_reg();
4675
4676 DoubleToIntegerSlowPath* slow_path =
4677 new DoubleToIntegerSlowPath(this, value_double);
4678 compiler->AddSlowPathCode(slow_path);
4679
4680 // First check for NaN. Checking for minint after the conversion doesn't work
4681 // on ARM64 because fcvtzs gives 0 for NaN.
4682 __ fcmpd(value_double, value_double);
4683 __ b(slow_path->entry_label(), VS);
4684
4685 switch (recognized_kind()) {
4686 case MethodRecognizer::kDoubleToInteger:
4687 __ fcvtzsxd(result, value_double);
4688 break;
4689 case MethodRecognizer::kDoubleFloorToInt:
4690 __ fcvtmsxd(result, value_double);
4691 break;
4692 case MethodRecognizer::kDoubleCeilToInt:
4693 __ fcvtpsxd(result, value_double);
4694 break;
4695 default:
4696 UNREACHABLE();
4697 }
4698 // Overflow is signaled with minint.
4699
4700#if !defined(DART_COMPRESSED_POINTERS)
4701 // Check for overflow and that it fits into Smi.
4702 __ CompareImmediate(result, 0xC000000000000000);
4703 __ b(slow_path->entry_label(), MI);
4704#else
4705 // Check for overflow and that it fits into Smi.
4706 __ AsrImmediate(TMP, result, 30);
4707 __ cmp(TMP, compiler::Operand(result, ASR, 63));
4708 __ b(slow_path->entry_label(), NE);
4709#endif
4710 __ SmiTag(result);
4711 __ Bind(slow_path->exit_label());
4712}
4713
4714LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,
4715 bool opt) const {
4716 const intptr_t kNumInputs = 1;
4717 const intptr_t kNumTemps = 0;
4718 LocationSummary* result = new (zone)
4719 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4721 result->set_out(0, Location::RequiresRegister());
4722 return result;
4723}
4724
4725void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4726 compiler::Label* deopt =
4727 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptDoubleToSmi);
4728 const Register result = locs()->out(0).reg();
4729 const VRegister value = locs()->in(0).fpu_reg();
4730 // First check for NaN. Checking for minint after the conversion doesn't work
4731 // on ARM64 because fcvtzs gives 0 for NaN.
4732 // TODO(zra): Check spec that this is true.
4733 __ fcmpd(value, value);
4734 __ b(deopt, VS);
4735
4736 __ fcvtzsxd(result, value);
4737
4738#if !defined(DART_COMPRESSED_POINTERS)
4739 // Check for overflow and that it fits into Smi.
4740 __ CompareImmediate(result, 0xC000000000000000);
4741 __ b(deopt, MI);
4742#else
4743 // Check for overflow and that it fits into Smi.
4744 __ AsrImmediate(TMP, result, 30);
4745 __ cmp(TMP, compiler::Operand(result, ASR, 63));
4746 __ b(deopt, NE);
4747#endif
4748 __ SmiTag(result);
4749}
4750
4751LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone,
4752 bool opt) const {
4753 const intptr_t kNumInputs = 1;
4754 const intptr_t kNumTemps = 0;
4755 LocationSummary* result = new (zone)
4756 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4759 return result;
4760}
4761
4762void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4763 const VRegister value = locs()->in(0).fpu_reg();
4764 const VRegister result = locs()->out(0).fpu_reg();
4765 __ fcvtsd(result, value);
4766}
4767
4768LocationSummary* FloatToDoubleInstr::MakeLocationSummary(Zone* zone,
4769 bool opt) const {
4770 const intptr_t kNumInputs = 1;
4771 const intptr_t kNumTemps = 0;
4772 LocationSummary* result = new (zone)
4773 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4776 return result;
4777}
4778
4779void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4780 const VRegister value = locs()->in(0).fpu_reg();
4781 const VRegister result = locs()->out(0).fpu_reg();
4782 __ fcvtds(result, value);
4783}
4784
4785LocationSummary* FloatCompareInstr::MakeLocationSummary(Zone* zone,
4786 bool opt) const {
4787 UNREACHABLE();
4788 return NULL;
4789}
4790
4791void FloatCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4792 UNREACHABLE();
4793}
4794
4795LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone,
4796 bool opt) const {
4797 ASSERT((InputCount() == 1) || (InputCount() == 2));
4798 const intptr_t kNumTemps =
4799 (recognized_kind() == MethodRecognizer::kMathDoublePow) ? 1 : 0;
4800 LocationSummary* result = new (zone)
4801 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
4803 if (InputCount() == 2) {
4805 }
4806 if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
4808 }
4810 return result;
4811}
4812
4813// Pseudo code:
4814// if (exponent == 0.0) return 1.0;
4815// // Speed up simple cases.
4816// if (exponent == 1.0) return base;
4817// if (exponent == 2.0) return base * base;
4818// if (exponent == 3.0) return base * base * base;
4819// if (base == 1.0) return 1.0;
4820// if (base.isNaN || exponent.isNaN) {
4821// return double.NAN;
4822// }
4823// if (base != -Infinity && exponent == 0.5) {
4824// if (base == 0.0) return 0.0;
4825// return sqrt(value);
4826// }
4827// TODO(srdjan): Move into a stub?
4828static void InvokeDoublePow(FlowGraphCompiler* compiler,
4829 InvokeMathCFunctionInstr* instr) {
4830 ASSERT(instr->recognized_kind() == MethodRecognizer::kMathDoublePow);
4831 const intptr_t kInputCount = 2;
4832 ASSERT(instr->InputCount() == kInputCount);
4833 LocationSummary* locs = instr->locs();
4834
4835 const VRegister base = locs->in(0).fpu_reg();
4836 const VRegister exp = locs->in(1).fpu_reg();
4837 const VRegister result = locs->out(0).fpu_reg();
4838 const VRegister saved_base = locs->temp(0).fpu_reg();
4839 ASSERT((base == result) && (result != saved_base));
4840
4841 compiler::Label skip_call, try_sqrt, check_base, return_nan, do_pow;
4842 __ fmovdd(saved_base, base);
4843 __ LoadDImmediate(result, 1.0);
4844 // exponent == 0.0 -> return 1.0;
4845 __ fcmpdz(exp);
4846 __ b(&check_base, VS); // NaN -> check base.
4847 __ b(&skip_call, EQ); // exp is 0.0, result is 1.0.
4848
4849 // exponent == 1.0 ?
4850 __ fcmpd(exp, result);
4851 compiler::Label return_base;
4852 __ b(&return_base, EQ);
4853
4854 // exponent == 2.0 ?
4855 __ LoadDImmediate(VTMP, 2.0);
4856 __ fcmpd(exp, VTMP);
4857 compiler::Label return_base_times_2;
4858 __ b(&return_base_times_2, EQ);
4859
4860 // exponent == 3.0 ?
4861 __ LoadDImmediate(VTMP, 3.0);
4862 __ fcmpd(exp, VTMP);
4863 __ b(&check_base, NE);
4864
4865 // base_times_3.
4866 __ fmuld(result, saved_base, saved_base);
4867 __ fmuld(result, result, saved_base);
4868 __ b(&skip_call);
4869
4870 __ Bind(&return_base);
4871 __ fmovdd(result, saved_base);
4872 __ b(&skip_call);
4873
4874 __ Bind(&return_base_times_2);
4875 __ fmuld(result, saved_base, saved_base);
4876 __ b(&skip_call);
4877
4878 __ Bind(&check_base);
4879 // Note: 'exp' could be NaN.
4880 // base == 1.0 -> return 1.0;
4881 __ fcmpd(saved_base, result);
4882 __ b(&return_nan, VS);
4883 __ b(&skip_call, EQ); // base is 1.0, result is 1.0.
4884
4885 __ fcmpd(saved_base, exp);
4886 __ b(&try_sqrt, VC); // // Neither 'exp' nor 'base' is NaN.
4887
4888 __ Bind(&return_nan);
4889 __ LoadDImmediate(result, NAN);
4890 __ b(&skip_call);
4891
4892 compiler::Label return_zero;
4893 __ Bind(&try_sqrt);
4894
4895 // Before calling pow, check if we could use sqrt instead of pow.
4896 __ LoadDImmediate(result, kNegInfinity);
4897
4898 // base == -Infinity -> call pow;
4899 __ fcmpd(saved_base, result);
4900 __ b(&do_pow, EQ);
4901
4902 // exponent == 0.5 ?
4903 __ LoadDImmediate(result, 0.5);
4904 __ fcmpd(exp, result);
4905 __ b(&do_pow, NE);
4906
4907 // base == 0 -> return 0;
4908 __ fcmpdz(saved_base);
4909 __ b(&return_zero, EQ);
4910
4911 __ fsqrtd(result, saved_base);
4912 __ b(&skip_call);
4913
4914 __ Bind(&return_zero);
4915 __ LoadDImmediate(result, 0.0);
4916 __ b(&skip_call);
4917
4918 __ Bind(&do_pow);
4919 __ fmovdd(base, saved_base); // Restore base.
4920 {
4921 compiler::LeafRuntimeScope rt(compiler->assembler(),
4922 /*frame_size=*/0,
4923 /*preserve_registers=*/false);
4924 ASSERT(base == V0);
4925 ASSERT(exp == V1);
4926 rt.Call(instr->TargetFunction(), kInputCount);
4927 ASSERT(result == V0);
4928 }
4929 __ Bind(&skip_call);
4930}
4931
4932void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4933 if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
4934 InvokeDoublePow(compiler, this);
4935 return;
4936 }
4937
4938 compiler::LeafRuntimeScope rt(compiler->assembler(),
4939 /*frame_size=*/0,
4940 /*preserve_registers=*/false);
4941 ASSERT(locs()->in(0).fpu_reg() == V0);
4942 if (InputCount() == 2) {
4943 ASSERT(locs()->in(1).fpu_reg() == V1);
4944 }
4945 rt.Call(TargetFunction(), InputCount());
4946 ASSERT(locs()->out(0).fpu_reg() == V0);
4947}
4948
4949LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone,
4950 bool opt) const {
4951 // Only use this instruction in optimized code.
4952 ASSERT(opt);
4953 const intptr_t kNumInputs = 1;
4954 LocationSummary* summary =
4955 new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
4956 if (representation() == kUnboxedDouble) {
4957 if (index() == 0) {
4958 summary->set_in(
4960 } else {
4961 ASSERT(index() == 1);
4962 summary->set_in(
4964 }
4965 summary->set_out(0, Location::RequiresFpuRegister());
4966 } else {
4967 ASSERT(representation() == kTagged);
4968 if (index() == 0) {
4969 summary->set_in(
4971 } else {
4972 ASSERT(index() == 1);
4973 summary->set_in(
4975 }
4976 summary->set_out(0, Location::RequiresRegister());
4977 }
4978 return summary;
4979}
4980
4981void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4982 ASSERT(locs()->in(0).IsPairLocation());
4983 PairLocation* pair = locs()->in(0).AsPairLocation();
4984 Location in_loc = pair->At(index());
4985 if (representation() == kUnboxedDouble) {
4986 const VRegister out = locs()->out(0).fpu_reg();
4987 const VRegister in = in_loc.fpu_reg();
4988 __ fmovdd(out, in);
4989 } else {
4990 ASSERT(representation() == kTagged);
4991 const Register out = locs()->out(0).reg();
4992 const Register in = in_loc.reg();
4993 __ mov(out, in);
4994 }
4995}
4996
4997LocationSummary* UnboxLaneInstr::MakeLocationSummary(Zone* zone,
4998 bool opt) const {
4999 UNREACHABLE();
5000 return NULL;
5001}
5002
5003void UnboxLaneInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5004 UNREACHABLE();
5005}
5006
5007LocationSummary* BoxLanesInstr::MakeLocationSummary(Zone* zone,
5008 bool opt) const {
5009 UNREACHABLE();
5010 return NULL;
5011}
5012
5013void BoxLanesInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5014 UNREACHABLE();
5015}
5016
5017LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
5018 bool opt) const {
5019 const intptr_t kNumInputs = 2;
5020 const intptr_t kNumTemps = 0;
5021 LocationSummary* summary = new (zone)
5022 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5023 summary->set_in(0, Location::RequiresRegister());
5024 summary->set_in(1, Location::RequiresRegister());
5025 // Output is a pair of registers.
5026 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
5028 return summary;
5029}
5030
5031void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5033 compiler::Label* deopt =
5034 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
5035 const Register left = locs()->in(0).reg();
5036 const Register right = locs()->in(1).reg();
5037 ASSERT(locs()->out(0).IsPairLocation());
5038 const PairLocation* pair = locs()->out(0).AsPairLocation();
5039 const Register result_div = pair->At(0).reg();
5040 const Register result_mod = pair->At(1).reg();
5041 if (RangeUtils::CanBeZero(divisor_range())) {
5042 // Handle divide by zero in runtime.
5043 __ CompareObjectRegisters(right, ZR);
5044 __ b(deopt, EQ);
5045 }
5046
5047 __ SmiUntag(result_mod, left);
5048 __ SmiUntag(TMP, right);
5049
5050 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
5051 // case we cannot tag the result.
5052#if !defined(DART_COMPRESSED_POINTERS)
5053 __ sdiv(result_div, result_mod, TMP);
5054 __ CompareImmediate(result_div, 0x4000000000000000);
5055#else
5056 __ sdivw(result_div, result_mod, TMP);
5057 __ CompareImmediate(result_div, 0x40000000, compiler::kFourBytes);
5058#endif
5059 __ b(deopt, EQ);
5060 // result_mod <- left - right * result_div.
5061 __ msub(result_mod, TMP, result_div, result_mod, compiler::kObjectBytes);
5062 __ SmiTag(result_div);
5063 __ SmiTag(result_mod);
5064 // Correct MOD result:
5065 // res = left % right;
5066 // if (res < 0) {
5067 // if (right < 0) {
5068 // res = res - right;
5069 // } else {
5070 // res = res + right;
5071 // }
5072 // }
5073 compiler::Label done;
5074 __ CompareObjectRegisters(result_mod, ZR);
5075 __ b(&done, GE);
5076 // Result is negative, adjust it.
5077 if (RangeUtils::IsNegative(divisor_range())) {
5078 __ sub(result_mod, result_mod, compiler::Operand(right));
5079 } else if (RangeUtils::IsPositive(divisor_range())) {
5080 __ add(result_mod, result_mod, compiler::Operand(right));
5081 } else {
5082 __ CompareObjectRegisters(right, ZR);
5083 __ sub(TMP2, result_mod, compiler::Operand(right), compiler::kObjectBytes);
5084 __ add(TMP, result_mod, compiler::Operand(right), compiler::kObjectBytes);
5085 __ csel(result_mod, TMP, TMP2, GE);
5086 }
5087 __ Bind(&done);
5088}
5089
5090// Should be kept in sync with integers.cc Multiply64Hash
5091static void EmitHashIntegerCodeSequence(FlowGraphCompiler* compiler,
5092 const Register value,
5093 const Register result) {
5094 ASSERT(value != TMP2);
5095 ASSERT(result != TMP2);
5096 ASSERT(value != result);
5097 __ LoadImmediate(TMP2, compiler::Immediate(0x2d51));
5098 __ mul(result, value, TMP2);
5099 __ umulh(value, value, TMP2);
5100 __ eor(result, result, compiler::Operand(value));
5101 __ eor(result, result, compiler::Operand(result, LSR, 32));
5102}
5103
5104LocationSummary* HashDoubleOpInstr::MakeLocationSummary(Zone* zone,
5105 bool opt) const {
5106 const intptr_t kNumInputs = 1;
5107 const intptr_t kNumTemps = 1;
5108 LocationSummary* summary = new (zone)
5109 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5111 summary->set_temp(0, Location::RequiresFpuRegister());
5112 summary->set_out(0, Location::RequiresRegister());
5113 return summary;
5114}
5115
5116void HashDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5117 const VRegister value = locs()->in(0).fpu_reg();
5118 const VRegister temp_double = locs()->temp(0).fpu_reg();
5119 const Register result = locs()->out(0).reg();
5120
5121 compiler::Label done, hash_double;
5122 __ vmovrd(TMP, value, 0);
5123 __ AndImmediate(TMP, TMP, 0x7FF0000000000000LL);
5124 __ CompareImmediate(TMP, 0x7FF0000000000000LL);
5125 __ b(&hash_double, EQ); // is_infinity or nan
5126
5127 __ fcvtzsxd(TMP, value);
5128 __ scvtfdx(temp_double, TMP);
5129 __ fcmpd(temp_double, value);
5130 __ b(&hash_double, NE);
5131
5132 EmitHashIntegerCodeSequence(compiler, TMP, result);
5133 __ AndImmediate(result, result, 0x3fffffff);
5134 __ b(&done);
5135
5136 __ Bind(&hash_double);
5137 __ fmovrd(result, value);
5138 __ eor(result, result, compiler::Operand(result, LSR, 32));
5139 __ AndImmediate(result, result, compiler::target::kSmiMax);
5140
5141 __ Bind(&done);
5142}
5143
5144LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
5145 bool opt) const {
5146 const intptr_t kNumInputs = 1;
5147 const intptr_t kNumTemps = 0;
5148 LocationSummary* summary = new (zone)
5149 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5150 summary->set_in(0, Location::RequiresRegister());
5151 summary->set_out(0, Location::RequiresRegister());
5152 return summary;
5153}
5154
5155void HashIntegerOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5156 Register value = locs()->in(0).reg();
5157 Register result = locs()->out(0).reg();
5158
5159 if (smi_) {
5160 __ SmiUntag(TMP, value);
5161 } else {
5162 __ LoadFieldFromOffset(TMP, value, Mint::value_offset());
5163 }
5164
5165 EmitHashIntegerCodeSequence(compiler, TMP, result);
5166 __ ubfm(result, result, 63, 29); // SmiTag(result & 0x3fffffff)
5167}
5168
5169LocationSummary* BranchInstr::MakeLocationSummary(Zone* zone, bool opt) const {
5171 // Branches don't produce a result.
5173 return comparison()->locs();
5174}
5175
5176void BranchInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5178}
5179
5180LocationSummary* CheckClassInstr::MakeLocationSummary(Zone* zone,
5181 bool opt) const {
5182 const intptr_t kNumInputs = 1;
5183 const bool need_mask_temp = IsBitTest();
5184 const intptr_t kNumTemps = !IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0;
5185 LocationSummary* summary = new (zone)
5186 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5187 summary->set_in(0, Location::RequiresRegister());
5188 if (!IsNullCheck()) {
5189 summary->set_temp(0, Location::RequiresRegister());
5190 if (need_mask_temp) {
5191 summary->set_temp(1, Location::RequiresRegister());
5192 }
5193 }
5194 return summary;
5195}
5196
5197void CheckClassInstr::EmitNullCheck(FlowGraphCompiler* compiler,
5198 compiler::Label* deopt) {
5199 __ CompareObject(locs()->in(0).reg(), Object::null_object());
5201 Condition cond = IsDeoptIfNull() ? EQ : NE;
5202 __ b(deopt, cond);
5203}
5204
5205void CheckClassInstr::EmitBitTest(FlowGraphCompiler* compiler,
5206 intptr_t min,
5207 intptr_t max,
5208 intptr_t mask,
5209 compiler::Label* deopt) {
5210 Register biased_cid = locs()->temp(0).reg();
5211 __ AddImmediate(biased_cid, -min);
5212 __ CompareImmediate(biased_cid, max - min);
5213 __ b(deopt, HI);
5214
5215 Register bit_reg = locs()->temp(1).reg();
5216 __ LoadImmediate(bit_reg, 1);
5217 __ lslv(bit_reg, bit_reg, biased_cid);
5218 __ TestImmediate(bit_reg, mask);
5219 __ b(deopt, EQ);
5220}
5221
5222int CheckClassInstr::EmitCheckCid(FlowGraphCompiler* compiler,
5223 int bias,
5224 intptr_t cid_start,
5225 intptr_t cid_end,
5226 bool is_last,
5227 compiler::Label* is_ok,
5228 compiler::Label* deopt,
5229 bool use_near_jump) {
5230 Register biased_cid = locs()->temp(0).reg();
5231 Condition no_match, match;
5232 if (cid_start == cid_end) {
5233 __ CompareImmediate(biased_cid, cid_start - bias);
5234 no_match = NE;
5235 match = EQ;
5236 } else {
5237 // For class ID ranges use a subtract followed by an unsigned
5238 // comparison to check both ends of the ranges with one comparison.
5239 __ AddImmediate(biased_cid, bias - cid_start);
5240 bias = cid_start;
5241 __ CompareImmediate(biased_cid, cid_end - cid_start);
5242 no_match = HI; // Unsigned higher.
5243 match = LS; // Unsigned lower or same.
5244 }
5245 if (is_last) {
5246 __ b(deopt, no_match);
5247 } else {
5248 __ b(is_ok, match);
5249 }
5250 return bias;
5251}
5252
5253LocationSummary* CheckClassIdInstr::MakeLocationSummary(Zone* zone,
5254 bool opt) const {
5255 const intptr_t kNumInputs = 1;
5256 const intptr_t kNumTemps = 0;
5257 LocationSummary* summary = new (zone)
5258 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5259 summary->set_in(0, cids_.IsSingleCid() ? Location::RequiresRegister()
5260 : Location::WritableRegister());
5261 return summary;
5262}
5263
5264void CheckClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5265 Register value = locs()->in(0).reg();
5266 compiler::Label* deopt =
5267 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass);
5268 if (cids_.IsSingleCid()) {
5269 __ CompareImmediate(value, Smi::RawValue(cids_.cid_start));
5270 __ b(deopt, NE);
5271 } else {
5272 __ AddImmediate(value, -Smi::RawValue(cids_.cid_start));
5273 __ CompareImmediate(value, Smi::RawValue(cids_.cid_end - cids_.cid_start));
5274 __ b(deopt, HI); // Unsigned higher.
5275 }
5276}
5277
5278LocationSummary* CheckSmiInstr::MakeLocationSummary(Zone* zone,
5279 bool opt) const {
5280 const intptr_t kNumInputs = 1;
5281 const intptr_t kNumTemps = 0;
5282 LocationSummary* summary = new (zone)
5283 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5284 summary->set_in(0, Location::RequiresRegister());
5285 return summary;
5286}
5287
5288void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5289 const Register value = locs()->in(0).reg();
5290 compiler::Label* deopt =
5291 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi);
5292 __ BranchIfNotSmi(value, deopt);
5293}
5294
5295void CheckNullInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5296 ThrowErrorSlowPathCode* slow_path = new NullErrorSlowPath(this);
5297 compiler->AddSlowPathCode(slow_path);
5298
5299 Register value_reg = locs()->in(0).reg();
5300 // TODO(dartbug.com/30480): Consider passing `null` literal as an argument
5301 // in order to be able to allocate it on register.
5302 __ CompareObject(value_reg, Object::null_object());
5303 __ BranchIf(EQUAL, slow_path->entry_label());
5304}
5305
5306LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Zone* zone,
5307 bool opt) const {
5308 const intptr_t kNumInputs = 2;
5309 const intptr_t kNumTemps = 0;
5310 LocationSummary* locs = new (zone)
5311 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5314 return locs;
5315}
5316
5317void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5318 uint32_t flags = generalized_ ? ICData::kGeneralized : 0;
5319 compiler::Label* deopt =
5320 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckArrayBound, flags);
5321
5322 Location length_loc = locs()->in(kLengthPos);
5323 Location index_loc = locs()->in(kIndexPos);
5324
5325 const intptr_t index_cid = index()->Type()->ToCid();
5326 if (length_loc.IsConstant() && index_loc.IsConstant()) {
5327 // TODO(srdjan): remove this code once failures are fixed.
5328 if ((Smi::Cast(length_loc.constant()).Value() >
5329 Smi::Cast(index_loc.constant()).Value()) &&
5330 (Smi::Cast(index_loc.constant()).Value() >= 0)) {
5331 // This CheckArrayBoundInstr should have been eliminated.
5332 return;
5333 }
5334 ASSERT((Smi::Cast(length_loc.constant()).Value() <=
5335 Smi::Cast(index_loc.constant()).Value()) ||
5336 (Smi::Cast(index_loc.constant()).Value() < 0));
5337 // Unconditionally deoptimize for constant bounds checks because they
5338 // only occur only when index is out-of-bounds.
5339 __ b(deopt);
5340 return;
5341 }
5342
5343 if (index_loc.IsConstant()) {
5344 const Register length = length_loc.reg();
5345 const Smi& index = Smi::Cast(index_loc.constant());
5346 __ CompareObject(length, index);
5347 __ b(deopt, LS);
5348 } else if (length_loc.IsConstant()) {
5349 const Smi& length = Smi::Cast(length_loc.constant());
5350 const Register index = index_loc.reg();
5351 if (index_cid != kSmiCid) {
5352 __ BranchIfNotSmi(index, deopt);
5353 }
5354 if (length.Value() == Smi::kMaxValue) {
5355 __ tst(index, compiler::Operand(index), compiler::kObjectBytes);
5356 __ b(deopt, MI);
5357 } else {
5358 __ CompareObject(index, length);
5359 __ b(deopt, CS);
5360 }
5361 } else {
5362 const Register length = length_loc.reg();
5363 const Register index = index_loc.reg();
5364 if (index_cid != kSmiCid) {
5365 __ BranchIfNotSmi(index, deopt);
5366 }
5367 __ CompareObjectRegisters(index, length);
5368 __ b(deopt, CS);
5369 }
5370}
5371
5372LocationSummary* CheckWritableInstr::MakeLocationSummary(Zone* zone,
5373 bool opt) const {
5374 const intptr_t kNumInputs = 1;
5375 const intptr_t kNumTemps = 0;
5376 LocationSummary* locs = new (zone) LocationSummary(
5377 zone, kNumInputs, kNumTemps,
5381 return locs;
5382}
5383
5384void CheckWritableInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5385 WriteErrorSlowPath* slow_path = new WriteErrorSlowPath(this);
5386 compiler->AddSlowPathCode(slow_path);
5387 __ ldr(TMP,
5388 compiler::FieldAddress(locs()->in(0).reg(),
5391 // In the first byte.
5393 __ tbnz(slow_path->entry_label(), TMP,
5395}
5396
5397class Int64DivideSlowPath : public ThrowErrorSlowPathCode {
5398 public:
5399 Int64DivideSlowPath(BinaryInt64OpInstr* instruction,
5400 Register divisor,
5401 Range* divisor_range,
5402 Register tmp,
5403 Register out)
5404 : ThrowErrorSlowPathCode(instruction,
5405 kIntegerDivisionByZeroExceptionRuntimeEntry),
5406 is_mod_(instruction->op_kind() == Token::kMOD),
5407 divisor_(divisor),
5408 divisor_range_(divisor_range),
5409 tmp_(tmp),
5410 out_(out),
5411 adjust_sign_label_() {}
5412
5413 void EmitNativeCode(FlowGraphCompiler* compiler) override {
5414 // Handle modulo/division by zero, if needed. Use superclass code.
5415 if (has_divide_by_zero()) {
5417 } else {
5418 __ Bind(entry_label()); // not used, but keeps destructor happy
5420 __ Comment("slow path %s operation (no throw)", name());
5421 }
5422 }
5423 // Adjust modulo for negative sign, optimized for known ranges.
5424 // if (divisor < 0)
5425 // out -= divisor;
5426 // else
5427 // out += divisor;
5428 if (has_adjust_sign()) {
5429 __ Bind(adjust_sign_label());
5430 if (RangeUtils::Overlaps(divisor_range_, -1, 1)) {
5431 // General case.
5432 __ CompareRegisters(divisor_, ZR);
5433 __ sub(tmp_, out_, compiler::Operand(divisor_));
5434 __ add(out_, out_, compiler::Operand(divisor_));
5435 __ csel(out_, tmp_, out_, LT);
5436 } else if (divisor_range_->IsPositive()) {
5437 // Always positive.
5438 __ add(out_, out_, compiler::Operand(divisor_));
5439 } else {
5440 // Always negative.
5441 __ sub(out_, out_, compiler::Operand(divisor_));
5442 }
5443 __ b(exit_label());
5444 }
5445 }
5446
5447 const char* name() override { return "int64 divide"; }
5448
5449 bool has_divide_by_zero() { return RangeUtils::CanBeZero(divisor_range_); }
5450
5451 bool has_adjust_sign() { return is_mod_; }
5452
5453 bool is_needed() { return has_divide_by_zero() || has_adjust_sign(); }
5454
5455 compiler::Label* adjust_sign_label() {
5456 ASSERT(has_adjust_sign());
5457 return &adjust_sign_label_;
5458 }
5459
5460 private:
5461 bool is_mod_;
5462 Register divisor_;
5463 Range* divisor_range_;
5464 Register tmp_;
5465 Register out_;
5466 compiler::Label adjust_sign_label_;
5467};
5468
5469static void EmitInt64ModTruncDiv(FlowGraphCompiler* compiler,
5470 BinaryInt64OpInstr* instruction,
5471 Token::Kind op_kind,
5472 Register left,
5473 Register right,
5474 Register tmp,
5475 Register out) {
5476 ASSERT(op_kind == Token::kMOD || op_kind == Token::kTRUNCDIV);
5477
5478 // Special case 64-bit div/mod by compile-time constant. Note that various
5479 // special constants (such as powers of two) should have been optimized
5480 // earlier in the pipeline. Div or mod by zero falls into general code
5481 // to implement the exception.
5482 if (FLAG_optimization_level <= 2) {
5483 // We only consider magic operations under O3.
5484 } else if (auto c = instruction->right()->definition()->AsConstant()) {
5485 if (c->value().IsInteger()) {
5486 const int64_t divisor = Integer::Cast(c->value()).AsInt64Value();
5487 if (divisor <= -2 || divisor >= 2) {
5488 // For x DIV c or x MOD c: use magic operations.
5489 compiler::Label pos;
5490 int64_t magic = 0;
5491 int64_t shift = 0;
5493 // Compute tmp = high(magic * numerator).
5494 __ LoadImmediate(TMP2, magic);
5495 __ smulh(TMP2, TMP2, left);
5496 // Compute tmp +/-= numerator.
5497 if (divisor > 0 && magic < 0) {
5498 __ add(TMP2, TMP2, compiler::Operand(left));
5499 } else if (divisor < 0 && magic > 0) {
5500 __ sub(TMP2, TMP2, compiler::Operand(left));
5501 }
5502 // Shift if needed.
5503 if (shift != 0) {
5504 __ add(TMP2, ZR, compiler::Operand(TMP2, ASR, shift));
5505 }
5506 // Finalize DIV or MOD.
5507 if (op_kind == Token::kTRUNCDIV) {
5508 __ sub(out, TMP2, compiler::Operand(TMP2, ASR, 63));
5509 } else {
5510 __ sub(TMP2, TMP2, compiler::Operand(TMP2, ASR, 63));
5511 __ LoadImmediate(TMP, divisor);
5512 __ msub(out, TMP2, TMP, left);
5513 // Compensate for Dart's Euclidean view of MOD.
5514 __ CompareRegisters(out, ZR);
5515 if (divisor > 0) {
5516 __ add(TMP2, out, compiler::Operand(TMP));
5517 } else {
5518 __ sub(TMP2, out, compiler::Operand(TMP));
5519 }
5520 __ csel(out, TMP2, out, LT);
5521 }
5522 return;
5523 }
5524 }
5525 }
5526
5527 // Prepare a slow path.
5528 Range* right_range = instruction->right()->definition()->range();
5529 Int64DivideSlowPath* slow_path =
5530 new (Z) Int64DivideSlowPath(instruction, right, right_range, tmp, out);
5531
5532 // Handle modulo/division by zero exception on slow path.
5533 if (slow_path->has_divide_by_zero()) {
5534 __ cbz(slow_path->entry_label(), right);
5535 }
5536
5537 // Perform actual operation
5538 // out = left % right
5539 // or
5540 // out = left / right.
5541 if (op_kind == Token::kMOD) {
5542 __ sdiv(tmp, left, right);
5543 __ msub(out, tmp, right, left);
5544 // For the % operator, the sdiv instruction does not
5545 // quite do what we want. Adjust for sign on slow path.
5546 __ CompareRegisters(out, ZR);
5547 __ b(slow_path->adjust_sign_label(), LT);
5548 } else {
5549 __ sdiv(out, left, right);
5550 }
5551
5552 if (slow_path->is_needed()) {
5553 __ Bind(slow_path->exit_label());
5554 compiler->AddSlowPathCode(slow_path);
5555 }
5556}
5557
5558LocationSummary* BinaryInt64OpInstr::MakeLocationSummary(Zone* zone,
5559 bool opt) const {
5560 switch (op_kind()) {
5561 case Token::kMOD:
5562 case Token::kTRUNCDIV: {
5563 const intptr_t kNumInputs = 2;
5564 const intptr_t kNumTemps = (op_kind() == Token::kMOD) ? 1 : 0;
5565 LocationSummary* summary = new (zone) LocationSummary(
5566 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
5567 summary->set_in(0, Location::RequiresRegister());
5568 summary->set_in(1, Location::RequiresRegister());
5569 summary->set_out(0, Location::RequiresRegister());
5570 if (kNumTemps == 1) {
5571 summary->set_temp(0, Location::RequiresRegister());
5572 }
5573 return summary;
5574 }
5575 default: {
5576 const intptr_t kNumInputs = 2;
5577 const intptr_t kNumTemps = 0;
5578 LocationSummary* summary = new (zone) LocationSummary(
5579 zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5580 summary->set_in(0, Location::RequiresRegister());
5581 summary->set_in(1, LocationRegisterOrConstant(right()));
5582 summary->set_out(0, Location::RequiresRegister());
5583 return summary;
5584 }
5585 }
5586}
5587
5588void BinaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5589 ASSERT(!can_overflow());
5591
5592 const Register left = locs()->in(0).reg();
5593 const Location right = locs()->in(1);
5594 const Register out = locs()->out(0).reg();
5595
5596 if (op_kind() == Token::kMOD || op_kind() == Token::kTRUNCDIV) {
5597 Register tmp =
5598 (op_kind() == Token::kMOD) ? locs()->temp(0).reg() : kNoRegister;
5599 EmitInt64ModTruncDiv(compiler, this, op_kind(), left, right.reg(), tmp,
5600 out);
5601 return;
5602 } else if (op_kind() == Token::kMUL) {
5603 Register r = TMP;
5604 if (right.IsConstant()) {
5605 int64_t value;
5606 const bool ok = compiler::HasIntegerValue(right.constant(), &value);
5608 __ LoadImmediate(r, value);
5609 } else {
5610 r = right.reg();
5611 }
5612 __ mul(out, left, r);
5613 return;
5614 }
5615
5616 if (right.IsConstant()) {
5617 int64_t value;
5618 const bool ok = compiler::HasIntegerValue(right.constant(), &value);
5620 switch (op_kind()) {
5621 case Token::kADD:
5622 __ AddImmediate(out, left, value);
5623 break;
5624 case Token::kSUB:
5625 __ AddImmediate(out, left, -value);
5626 break;
5627 case Token::kBIT_AND:
5628 __ AndImmediate(out, left, value);
5629 break;
5630 case Token::kBIT_OR:
5631 __ OrImmediate(out, left, value);
5632 break;
5633 case Token::kBIT_XOR:
5634 __ XorImmediate(out, left, value);
5635 break;
5636 default:
5637 UNREACHABLE();
5638 }
5639 } else {
5640 compiler::Operand r = compiler::Operand(right.reg());
5641 switch (op_kind()) {
5642 case Token::kADD:
5643 __ add(out, left, r);
5644 break;
5645 case Token::kSUB:
5646 __ sub(out, left, r);
5647 break;
5648 case Token::kBIT_AND:
5649 __ and_(out, left, r);
5650 break;
5651 case Token::kBIT_OR:
5652 __ orr(out, left, r);
5653 break;
5654 case Token::kBIT_XOR:
5655 __ eor(out, left, r);
5656 break;
5657 default:
5658 UNREACHABLE();
5659 }
5660 }
5661}
5662
5663static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
5664 Token::Kind op_kind,
5665 Register out,
5666 Register left,
5667 const Object& right) {
5668 const int64_t shift = Integer::Cast(right).AsInt64Value();
5669 ASSERT(shift >= 0);
5670 switch (op_kind) {
5671 case Token::kSHR: {
5672 __ AsrImmediate(out, left,
5673 Utils::Minimum<int64_t>(shift, kBitsPerWord - 1));
5674 break;
5675 }
5676 case Token::kUSHR: {
5677 ASSERT(shift < 64);
5678 __ LsrImmediate(out, left, shift);
5679 break;
5680 }
5681 case Token::kSHL: {
5682 ASSERT(shift < 64);
5683 __ LslImmediate(out, left, shift);
5684 break;
5685 }
5686 default:
5687 UNREACHABLE();
5688 }
5689}
5690
5691static void EmitShiftInt64ByRegister(FlowGraphCompiler* compiler,
5692 Token::Kind op_kind,
5693 Register out,
5694 Register left,
5695 Register right) {
5696 switch (op_kind) {
5697 case Token::kSHR: {
5698 __ asrv(out, left, right);
5699 break;
5700 }
5701 case Token::kUSHR: {
5702 __ lsrv(out, left, right);
5703 break;
5704 }
5705 case Token::kSHL: {
5706 __ lslv(out, left, right);
5707 break;
5708 }
5709 default:
5710 UNREACHABLE();
5711 }
5712}
5713
5714static void EmitShiftUint32ByConstant(FlowGraphCompiler* compiler,
5715 Token::Kind op_kind,
5716 Register out,
5717 Register left,
5718 const Object& right) {
5719 const int64_t shift = Integer::Cast(right).AsInt64Value();
5720 ASSERT(shift >= 0);
5721 if (shift >= 32) {
5722 __ LoadImmediate(out, 0);
5723 } else {
5724 switch (op_kind) {
5725 case Token::kSHR:
5726 case Token::kUSHR:
5727 __ LsrImmediate(out, left, shift, compiler::kFourBytes);
5728 break;
5729 case Token::kSHL:
5730 __ LslImmediate(out, left, shift, compiler::kFourBytes);
5731 break;
5732 default:
5733 UNREACHABLE();
5734 }
5735 }
5736}
5737
5738static void EmitShiftUint32ByRegister(FlowGraphCompiler* compiler,
5739 Token::Kind op_kind,
5740 Register out,
5741 Register left,
5742 Register right) {
5743 switch (op_kind) {
5744 case Token::kSHR:
5745 case Token::kUSHR:
5746 __ lsrvw(out, left, right);
5747 break;
5748 case Token::kSHL:
5749 __ lslvw(out, left, right);
5750 break;
5751 default:
5752 UNREACHABLE();
5753 }
5754}
5755
5756class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
5757 public:
5758 explicit ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction)
5759 : ThrowErrorSlowPathCode(instruction,
5760 kArgumentErrorUnboxedInt64RuntimeEntry) {}
5761
5762 const char* name() override { return "int64 shift"; }
5763
5764 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
5765 const Register left = instruction()->locs()->in(0).reg();
5766 const Register right = instruction()->locs()->in(1).reg();
5767 const Register out = instruction()->locs()->out(0).reg();
5768 ASSERT((out != left) && (out != right));
5769
5770 compiler::Label throw_error;
5771 __ tbnz(&throw_error, right, kBitsPerWord - 1);
5772
5773 switch (instruction()->AsShiftInt64Op()->op_kind()) {
5774 case Token::kSHR:
5775 __ AsrImmediate(out, left, kBitsPerWord - 1);
5776 break;
5777 case Token::kUSHR:
5778 case Token::kSHL:
5779 __ mov(out, ZR);
5780 break;
5781 default:
5782 UNREACHABLE();
5783 }
5784 __ b(exit_label());
5785
5786 __ Bind(&throw_error);
5787
5788 // Can't pass unboxed int64 value directly to runtime call, as all
5789 // arguments are expected to be tagged (boxed).
5790 // The unboxed int64 argument is passed through a dedicated slot in Thread.
5791 // TODO(dartbug.com/33549): Clean this up when unboxed values
5792 // could be passed as arguments.
5793 __ str(right,
5794 compiler::Address(
5796 }
5797};
5798
5799LocationSummary* ShiftInt64OpInstr::MakeLocationSummary(Zone* zone,
5800 bool opt) const {
5801 const intptr_t kNumInputs = 2;
5802 const intptr_t kNumTemps = 0;
5803 LocationSummary* summary = new (zone) LocationSummary(
5804 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
5805 summary->set_in(0, Location::RequiresRegister());
5806 summary->set_in(1, RangeUtils::IsPositive(shift_range())
5808 : Location::RequiresRegister());
5809 summary->set_out(0, Location::RequiresRegister());
5810 return summary;
5811}
5812
5813void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5814 const Register left = locs()->in(0).reg();
5815 const Register out = locs()->out(0).reg();
5816 ASSERT(!can_overflow());
5817
5818 if (locs()->in(1).IsConstant()) {
5819 EmitShiftInt64ByConstant(compiler, op_kind(), out, left,
5820 locs()->in(1).constant());
5821 } else {
5822 // Code for a variable shift amount (or constant that throws).
5823 Register shift = locs()->in(1).reg();
5824
5825 // Jump to a slow path if shift is larger than 63 or less than 0.
5826 ShiftInt64OpSlowPath* slow_path = nullptr;
5827 if (!IsShiftCountInRange()) {
5828 slow_path = new (Z) ShiftInt64OpSlowPath(this);
5829 compiler->AddSlowPathCode(slow_path);
5830 __ CompareImmediate(shift, kShiftCountLimit);
5831 __ b(slow_path->entry_label(), HI);
5832 }
5833
5834 EmitShiftInt64ByRegister(compiler, op_kind(), out, left, shift);
5835
5836 if (slow_path != nullptr) {
5837 __ Bind(slow_path->exit_label());
5838 }
5839 }
5840}
5841
5843 Zone* zone,
5844 bool opt) const {
5845 const intptr_t kNumInputs = 2;
5846 const intptr_t kNumTemps = 0;
5847 LocationSummary* summary = new (zone)
5848 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5849 summary->set_in(0, Location::RequiresRegister());
5850 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
5851 summary->set_out(0, Location::RequiresRegister());
5852 return summary;
5853}
5854
5856 const Register left = locs()->in(0).reg();
5857 const Register out = locs()->out(0).reg();
5858 ASSERT(!can_overflow());
5859
5860 if (locs()->in(1).IsConstant()) {
5861 EmitShiftInt64ByConstant(compiler, op_kind(), out, left,
5862 locs()->in(1).constant());
5863 } else {
5864 // Code for a variable shift amount.
5865 Register shift = locs()->in(1).reg();
5866
5867 // Untag shift count.
5868 __ SmiUntag(TMP, shift);
5869 shift = TMP;
5870
5871 // Deopt if shift is larger than 63 or less than 0 (or not a smi).
5872 if (!IsShiftCountInRange()) {
5874 compiler::Label* deopt =
5875 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
5876
5877 __ CompareImmediate(shift, kShiftCountLimit);
5878 __ b(deopt, HI);
5879 }
5880
5881 EmitShiftInt64ByRegister(compiler, op_kind(), out, left, shift);
5882 }
5883}
5884
5885class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
5886 public:
5887 explicit ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction)
5888 : ThrowErrorSlowPathCode(instruction,
5889 kArgumentErrorUnboxedInt64RuntimeEntry) {}
5890
5891 const char* name() override { return "uint32 shift"; }
5892
5893 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
5894 const Register right = instruction()->locs()->in(1).reg();
5895
5896 // Can't pass unboxed int64 value directly to runtime call, as all
5897 // arguments are expected to be tagged (boxed).
5898 // The unboxed int64 argument is passed through a dedicated slot in Thread.
5899 // TODO(dartbug.com/33549): Clean this up when unboxed values
5900 // could be passed as arguments.
5901 __ str(right,
5902 compiler::Address(
5904 }
5905};
5906
5907LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
5908 bool opt) const {
5909 const intptr_t kNumInputs = 2;
5910 const intptr_t kNumTemps = 0;
5911 LocationSummary* summary = new (zone) LocationSummary(
5912 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
5913 summary->set_in(0, Location::RequiresRegister());
5914 summary->set_in(1, RangeUtils::IsPositive(shift_range())
5916 : Location::RequiresRegister());
5917 summary->set_out(0, Location::RequiresRegister());
5918 return summary;
5919}
5920
5921void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5922 Register left = locs()->in(0).reg();
5923 Register out = locs()->out(0).reg();
5924
5925 if (locs()->in(1).IsConstant()) {
5926 EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
5927 locs()->in(1).constant());
5928 } else {
5929 // Code for a variable shift amount (or constant that throws).
5930 const Register right = locs()->in(1).reg();
5931 const bool shift_count_in_range =
5932 IsShiftCountInRange(kUint32ShiftCountLimit);
5933
5934 // Jump to a slow path if shift count is negative.
5935 if (!shift_count_in_range) {
5936 ShiftUint32OpSlowPath* slow_path = new (Z) ShiftUint32OpSlowPath(this);
5937 compiler->AddSlowPathCode(slow_path);
5938
5939 __ tbnz(slow_path->entry_label(), right, kBitsPerWord - 1);
5940 }
5941
5942 EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right);
5943
5944 if (!shift_count_in_range) {
5945 // If shift value is > 31, return zero.
5946 __ CompareImmediate(right, 31);
5947 __ csel(out, out, ZR, LE);
5948 }
5949 }
5950}
5951
5953 Zone* zone,
5954 bool opt) const {
5955 const intptr_t kNumInputs = 2;
5956 const intptr_t kNumTemps = 0;
5957 LocationSummary* summary = new (zone)
5958 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5959 summary->set_in(0, Location::RequiresRegister());
5960 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
5961 summary->set_out(0, Location::RequiresRegister());
5962 return summary;
5963}
5964
5966 FlowGraphCompiler* compiler) {
5967 Register left = locs()->in(0).reg();
5968 Register out = locs()->out(0).reg();
5969
5970 if (locs()->in(1).IsConstant()) {
5971 EmitShiftUint32ByConstant(compiler, op_kind(), out, left,
5972 locs()->in(1).constant());
5973 } else {
5974 Register right = locs()->in(1).reg();
5975 const bool shift_count_in_range =
5976 IsShiftCountInRange(kUint32ShiftCountLimit);
5977
5978 __ SmiUntag(TMP, right);
5979 right = TMP;
5980
5981 // Jump to a slow path if shift count is negative.
5982 if (!shift_count_in_range) {
5983 // Deoptimize if shift count is negative.
5985 compiler::Label* deopt =
5986 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
5987
5988 __ tbnz(deopt, right, compiler::target::kSmiBits + 1);
5989 }
5990
5991 EmitShiftUint32ByRegister(compiler, op_kind(), out, left, right);
5992
5993 if (!shift_count_in_range) {
5994 // If shift value is > 31, return zero.
5995 __ CompareImmediate(right, 31, compiler::kObjectBytes);
5996 __ csel(out, out, ZR, LE);
5997 }
5998 }
5999}
6000
6001LocationSummary* UnaryInt64OpInstr::MakeLocationSummary(Zone* zone,
6002 bool opt) const {
6003 const intptr_t kNumInputs = 1;
6004 const intptr_t kNumTemps = 0;
6005 LocationSummary* summary = new (zone)
6006 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6007 summary->set_in(0, Location::RequiresRegister());
6008 summary->set_out(0, Location::RequiresRegister());
6009 return summary;
6010}
6011
6012void UnaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6013 const Register left = locs()->in(0).reg();
6014 const Register out = locs()->out(0).reg();
6015 switch (op_kind()) {
6016 case Token::kBIT_NOT:
6017 __ mvn_(out, left);
6018 break;
6019 case Token::kNEGATE:
6020 __ sub(out, ZR, compiler::Operand(left));
6021 break;
6022 default:
6023 UNREACHABLE();
6024 }
6025}
6026
6027LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone,
6028 bool opt) const {
6029 const intptr_t kNumInputs = 2;
6030 const intptr_t kNumTemps = 0;
6031 LocationSummary* summary = new (zone)
6032 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6033 summary->set_in(0, Location::RequiresRegister());
6034 summary->set_in(1, Location::RequiresRegister());
6035 summary->set_out(0, Location::RequiresRegister());
6036 return summary;
6037}
6038
6039void BinaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6040 Register left = locs()->in(0).reg();
6041 Register right = locs()->in(1).reg();
6042 compiler::Operand r = compiler::Operand(right);
6043 Register out = locs()->out(0).reg();
6044 switch (op_kind()) {
6045 case Token::kBIT_AND:
6046 __ and_(out, left, r);
6047 break;
6048 case Token::kBIT_OR:
6049 __ orr(out, left, r);
6050 break;
6051 case Token::kBIT_XOR:
6052 __ eor(out, left, r);
6053 break;
6054 case Token::kADD:
6055 __ addw(out, left, r);
6056 break;
6057 case Token::kSUB:
6058 __ subw(out, left, r);
6059 break;
6060 case Token::kMUL:
6061 __ mulw(out, left, right);
6062 break;
6063 default:
6064 UNREACHABLE();
6065 }
6066}
6067
6068LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone,
6069 bool opt) const {
6070 const intptr_t kNumInputs = 1;
6071 const intptr_t kNumTemps = 0;
6072 LocationSummary* summary = new (zone)
6073 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6074 summary->set_in(0, Location::RequiresRegister());
6075 summary->set_out(0, Location::RequiresRegister());
6076 return summary;
6077}
6078
6079void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6080 Register left = locs()->in(0).reg();
6081 Register out = locs()->out(0).reg();
6082
6083 ASSERT(op_kind() == Token::kBIT_NOT);
6084 __ mvnw(out, left);
6085}
6086
6087DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryInt32OpInstr)
6088
6089LocationSummary* IntConverterInstr::MakeLocationSummary(Zone* zone,
6090 bool opt) const {
6091 const intptr_t kNumInputs = 1;
6092 const intptr_t kNumTemps = 0;
6093 LocationSummary* summary = new (zone)
6094 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6095 if (from() == kUntagged || to() == kUntagged) {
6096 ASSERT((from() == kUntagged && to() == kUnboxedIntPtr) ||
6097 (from() == kUnboxedIntPtr && to() == kUntagged));
6099 } else if (from() == kUnboxedInt64) {
6100 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6101 } else if (to() == kUnboxedInt64) {
6102 ASSERT(from() == kUnboxedInt32 || from() == kUnboxedUint32);
6103 } else {
6104 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6105 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
6106 }
6107 summary->set_in(0, Location::RequiresRegister());
6108 if (CanDeoptimize()) {
6109 summary->set_out(0, Location::RequiresRegister());
6110 } else {
6111 summary->set_out(0, Location::SameAsFirstInput());
6112 }
6113 return summary;
6114}
6115
6116void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6117 ASSERT(from() != to()); // We don't convert from a representation to itself.
6118
6119 const bool is_nop_conversion =
6120 (from() == kUntagged && to() == kUnboxedIntPtr) ||
6121 (from() == kUnboxedIntPtr && to() == kUntagged);
6122 if (is_nop_conversion) {
6123 ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
6124 return;
6125 }
6126
6127 const Register value = locs()->in(0).reg();
6128 const Register out = locs()->out(0).reg();
6129 compiler::Label* deopt =
6130 !CanDeoptimize()
6131 ? nullptr
6132 : compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
6133 if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
6134 if (CanDeoptimize()) {
6135 __ tbnz(deopt, value,
6136 31); // If sign bit is set it won't fit in a uint32.
6137 }
6138 if (out != value) {
6139 __ mov(out, value); // For positive values the bits are the same.
6140 }
6141 } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
6142 if (CanDeoptimize()) {
6143 __ tbnz(deopt, value,
6144 31); // If high bit is set it won't fit in an int32.
6145 }
6146 if (out != value) {
6147 __ mov(out, value); // For 31 bit values the bits are the same.
6148 }
6149 } else if (from() == kUnboxedInt64) {
6150 if (to() == kUnboxedInt32) {
6151 if (is_truncating() || out != value) {
6152 __ sxtw(out, value); // Signed extension 64->32.
6153 }
6154 } else {
6155 ASSERT(to() == kUnboxedUint32);
6156 if (is_truncating() || out != value) {
6157 __ uxtw(out, value); // Unsigned extension 64->32.
6158 }
6159 }
6160 if (CanDeoptimize()) {
6161 ASSERT(to() == kUnboxedInt32);
6162 __ cmp(out, compiler::Operand(value));
6163 __ b(deopt, NE); // Value cannot be held in Int32, deopt.
6164 }
6165 } else if (to() == kUnboxedInt64) {
6166 if (from() == kUnboxedUint32) {
6167 __ uxtw(out, value);
6168 } else {
6169 ASSERT(from() == kUnboxedInt32);
6170 __ sxtw(out, value); // Signed extension 32->64.
6171 }
6172 } else {
6173 UNREACHABLE();
6174 }
6175}
6176
6177LocationSummary* BitCastInstr::MakeLocationSummary(Zone* zone, bool opt) const {
6178 LocationSummary* summary =
6179 new (zone) LocationSummary(zone, /*num_inputs=*/InputCount(),
6180 /*num_temps=*/0, LocationSummary::kNoCall);
6181 switch (from()) {
6182 case kUnboxedInt32:
6183 case kUnboxedInt64:
6184 summary->set_in(0, Location::RequiresRegister());
6185 break;
6186 case kUnboxedFloat:
6187 case kUnboxedDouble:
6188 summary->set_in(0, Location::RequiresFpuRegister());
6189 break;
6190 default:
6191 UNREACHABLE();
6192 }
6193
6194 switch (to()) {
6195 case kUnboxedInt32:
6196 case kUnboxedInt64:
6197 summary->set_out(0, Location::RequiresRegister());
6198 break;
6199 case kUnboxedFloat:
6200 case kUnboxedDouble:
6201 summary->set_out(0, Location::RequiresFpuRegister());
6202 break;
6203 default:
6204 UNREACHABLE();
6205 }
6206 return summary;
6207}
6208
6209void BitCastInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6210 switch (from()) {
6211 case kUnboxedInt32: {
6212 ASSERT(to() == kUnboxedFloat);
6213 const Register from_reg = locs()->in(0).reg();
6214 const FpuRegister to_reg = locs()->out(0).fpu_reg();
6215 __ fmovsr(to_reg, from_reg);
6216 break;
6217 }
6218 case kUnboxedFloat: {
6219 ASSERT(to() == kUnboxedInt32);
6220 const FpuRegister from_reg = locs()->in(0).fpu_reg();
6221 const Register to_reg = locs()->out(0).reg();
6222 __ fmovrs(to_reg, from_reg);
6223 break;
6224 }
6225 case kUnboxedInt64: {
6226 ASSERT(to() == kUnboxedDouble);
6227 const Register from_reg = locs()->in(0).reg();
6228 const FpuRegister to_reg = locs()->out(0).fpu_reg();
6229 __ fmovdr(to_reg, from_reg);
6230 break;
6231 }
6232 case kUnboxedDouble: {
6233 ASSERT(to() == kUnboxedInt64);
6234 const FpuRegister from_reg = locs()->in(0).fpu_reg();
6235 const Register to_reg = locs()->out(0).reg();
6236 __ fmovrd(to_reg, from_reg);
6237 break;
6238 }
6239 default:
6240 UNREACHABLE();
6241 }
6242}
6243
6244LocationSummary* StopInstr::MakeLocationSummary(Zone* zone, bool opt) const {
6245 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
6246}
6247
6248void StopInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6249 __ Stop(message());
6250}
6251
6252void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6253 BlockEntryInstr* entry = normal_entry();
6254 if (entry != nullptr) {
6255 if (!compiler->CanFallThroughTo(entry)) {
6256 FATAL("Checked function entry must have no offset");
6257 }
6258 } else {
6259 entry = osr_entry();
6260 if (!compiler->CanFallThroughTo(entry)) {
6261 __ b(compiler->GetJumpLabel(entry));
6262 }
6263 }
6264}
6265
6266LocationSummary* GotoInstr::MakeLocationSummary(Zone* zone, bool opt) const {
6267 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
6268}
6269
6270void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6271 if (!compiler->is_optimizing()) {
6272 if (FLAG_reorder_basic_blocks) {
6273 compiler->EmitEdgeCounter(block()->preorder_number());
6274 }
6275 // Add a deoptimization descriptor for deoptimizing instructions that
6276 // may be inserted before this instruction.
6277 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
6278 InstructionSource());
6279 }
6280 if (HasParallelMove()) {
6282 }
6283
6284 // We can fall through if the successor is the next block in the list.
6285 // Otherwise, we need a jump.
6286 if (!compiler->CanFallThroughTo(successor())) {
6287 __ b(compiler->GetJumpLabel(successor()));
6288 }
6289}
6290
6291LocationSummary* IndirectGotoInstr::MakeLocationSummary(Zone* zone,
6292 bool opt) const {
6293 const intptr_t kNumInputs = 1;
6294 const intptr_t kNumTemps = 2;
6295
6296 LocationSummary* summary = new (zone)
6297 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6298
6299 summary->set_in(0, Location::RequiresRegister());
6300 summary->set_temp(0, Location::RequiresRegister());
6301 summary->set_temp(1, Location::RequiresRegister());
6302
6303 return summary;
6304}
6305
6306void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6307 Register index_reg = locs()->in(0).reg();
6308 Register target_address_reg = locs()->temp(0).reg();
6309 Register offset_reg = locs()->temp(1).reg();
6310
6311 ASSERT(RequiredInputRepresentation(0) == kTagged);
6312 __ LoadObject(offset_reg, offsets_);
6313 const auto element_address = __ ElementAddressForRegIndex(
6314 /*is_external=*/false, kTypedDataInt32ArrayCid,
6315 /*index_scale=*/4,
6316 /*index_unboxed=*/false, offset_reg, index_reg, TMP);
6317 __ ldr(offset_reg, element_address, compiler::kFourBytes);
6318
6319 // Load code entry point.
6320 const intptr_t entry_offset = __ CodeSize();
6321 if (Utils::IsInt(21, -entry_offset)) {
6322 __ adr(target_address_reg, compiler::Immediate(-entry_offset));
6323 } else {
6324 __ adr(target_address_reg, compiler::Immediate(0));
6325 __ AddImmediate(target_address_reg, -entry_offset);
6326 }
6327
6328 __ add(target_address_reg, target_address_reg, compiler::Operand(offset_reg));
6329
6330 // Jump to the absolute address.
6331 __ br(target_address_reg);
6332}
6333
6334LocationSummary* StrictCompareInstr::MakeLocationSummary(Zone* zone,
6335 bool opt) const {
6336 const intptr_t kNumInputs = 2;
6337 const intptr_t kNumTemps = 0;
6338 if (needs_number_check()) {
6339 LocationSummary* locs = new (zone)
6340 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6344 return locs;
6345 }
6346 LocationSummary* locs = new (zone)
6347 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6349 // Only one of the inputs can be a constant. Choose register if the first one
6350 // is a constant.
6351 locs->set_in(1, locs->in(0).IsConstant()
6355 return locs;
6356}
6357
6358Condition StrictCompareInstr::EmitComparisonCodeRegConstant(
6359 FlowGraphCompiler* compiler,
6360 BranchLabels labels,
6361 Register reg,
6362 const Object& obj) {
6363 Condition orig_cond = (kind() == Token::kEQ_STRICT) ? EQ : NE;
6365 compiler::target::ToRawSmi(obj) == 0 &&
6366 CanUseCbzTbzForComparison(compiler, reg, orig_cond, labels)) {
6367 EmitCbzTbz(reg, compiler, orig_cond, labels, compiler::kObjectBytes);
6368 return kInvalidCondition;
6369 } else {
6370 return compiler->EmitEqualityRegConstCompare(reg, obj, needs_number_check(),
6371 source(), deopt_id());
6372 }
6373}
6374
6375void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6376 compiler::Label is_true, is_false;
6377 BranchLabels labels = {&is_true, &is_false, &is_false};
6378 Condition true_condition = EmitComparisonCode(compiler, labels);
6379
6380 const Register result = this->locs()->out(0).reg();
6381 if (is_true.IsLinked() || is_false.IsLinked()) {
6382 if (true_condition != kInvalidCondition) {
6383 EmitBranchOnCondition(compiler, true_condition, labels);
6384 }
6385 compiler::Label done;
6386 __ Bind(&is_false);
6387 __ LoadObject(result, Bool::False());
6388 __ b(&done);
6389 __ Bind(&is_true);
6390 __ LoadObject(result, Bool::True());
6391 __ Bind(&done);
6392 } else {
6393 // If EmitComparisonCode did not use the labels and just returned
6394 // a condition we can avoid the branch and use conditional loads.
6395 ASSERT(true_condition != kInvalidCondition);
6396 __ LoadObject(TMP, Bool::True());
6397 __ LoadObject(TMP2, Bool::False());
6398 __ csel(result, TMP, TMP2, true_condition);
6399 }
6400}
6401
6402void ComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler,
6403 BranchInstr* branch) {
6404 BranchLabels labels = compiler->CreateBranchLabels(branch);
6405 Condition true_condition = EmitComparisonCode(compiler, labels);
6406 if (true_condition != kInvalidCondition) {
6407 EmitBranchOnCondition(compiler, true_condition, labels);
6408 }
6409}
6410
6411LocationSummary* BooleanNegateInstr::MakeLocationSummary(Zone* zone,
6412 bool opt) const {
6415}
6416
6417void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6418 const Register input = locs()->in(0).reg();
6419 const Register result = locs()->out(0).reg();
6420 __ eori(
6421 result, input,
6423}
6424
6425LocationSummary* BoolToIntInstr::MakeLocationSummary(Zone* zone,
6426 bool opt) const {
6427 UNREACHABLE();
6428 return NULL;
6429}
6430
6431void BoolToIntInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6432 UNREACHABLE();
6433}
6434
6435LocationSummary* IntToBoolInstr::MakeLocationSummary(Zone* zone,
6436 bool opt) const {
6437 UNREACHABLE();
6438 return NULL;
6439}
6440
6441void IntToBoolInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6442 UNREACHABLE();
6443}
6444
6445LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
6446 bool opt) const {
6447 const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0;
6448 const intptr_t kNumTemps = 0;
6449 LocationSummary* locs = new (zone)
6450 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6451 if (type_arguments() != nullptr) {
6454 }
6456 return locs;
6457}
6458
6459void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6460 if (type_arguments() != nullptr) {
6461 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
6462 if (type_usage_info != nullptr) {
6463 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, cls_,
6464 type_arguments()->definition());
6465 }
6466 }
6467 const Code& stub = Code::ZoneHandle(
6469 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
6470 locs(), deopt_id(), env());
6471}
6472
6473void DebugStepCheckInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6474#ifdef PRODUCT
6475 UNREACHABLE();
6476#else
6477 ASSERT(!compiler->is_optimizing());
6478 __ BranchLinkPatchable(StubCode::DebugStepCheck());
6479 compiler->AddCurrentDescriptor(stub_kind_, deopt_id_, source());
6480 compiler->RecordSafepoint(locs());
6481#endif
6482}
6483
6484} // namespace dart
6485
6486#endif // defined(TARGET_ARCH_ARM64)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
static void fail(const SkString &err)
Definition: DM.cpp:234
SkPoint pos
static bool are_equal(skiatest::Reporter *reporter, const SkMatrix &a, const SkMatrix &b)
Definition: MatrixTest.cpp:50
static int float_bits(float f)
Definition: MatrixTest.cpp:44
static bool ok(int result)
static size_t element_size(Layout layout, SkSLType type)
Vec2Value v2
#define __
#define UNREACHABLE()
Definition: assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
#define Z
intptr_t num_context_variables() const
Definition: il.h:8392
Value * type_arguments() const
Definition: il.h:7436
const Class & cls() const
Definition: il.h:7435
intptr_t num_context_variables() const
Definition: il.h:7594
static intptr_t type_arguments_offset()
Definition: object.h:10928
static intptr_t InstanceSize()
Definition: object.h:10936
static constexpr bool IsValidLength(intptr_t len)
Definition: object.h:10932
static intptr_t length_offset()
Definition: object.h:10834
Value * dst_type() const
Definition: il.h:4423
Token::Kind op_kind() const
Definition: il.h:9038
Value * right() const
Definition: il.h:9036
Value * left() const
Definition: il.h:9035
bool can_overflow() const
Definition: il.h:9400
Value * right() const
Definition: il.h:9398
Token::Kind op_kind() const
Definition: il.h:9396
Value * left() const
Definition: il.h:9397
bool RightIsPowerOfTwoConstant() const
Definition: il.cc:2125
Range * right_range() const
Definition: il.h:9473
Representation to() const
Definition: il.h:11121
Representation from() const
Definition: il.h:11120
ParallelMoveInstr * parallel_move() const
Definition: il.h:1689
bool HasParallelMove() const
Definition: il.h:1691
static const Bool & False()
Definition: object.h:10799
static const Bool & True()
Definition: object.h:10797
static void Allocate(FlowGraphCompiler *compiler, Instruction *instruction, const Class &cls, Register result, Register temp)
Definition: il.cc:6309
Value * value() const
Definition: il.h:8528
Representation from_representation() const
Definition: il.h:8529
virtual bool ValueFitsSmi() const
Definition: il.cc:3253
ComparisonInstr * comparison() const
Definition: il.h:4021
static constexpr Register kSecondReturnReg
static const Register ArgumentRegisters[]
static constexpr FpuRegister kReturnFpuReg
static constexpr Register kFfiAnyNonAbiRegister
static constexpr Register kReturnReg
static constexpr Register kSecondNonArgumentRegister
const RuntimeEntry & TargetFunction() const
Definition: il.cc:1101
Value * index() const
Definition: il.h:10797
Value * length() const
Definition: il.h:10796
Value * value() const
Definition: il.h:10755
bool IsDeoptIfNull() const
Definition: il.cc:863
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr, TemplateInstruction, FIELD_LIST) private void EmitBitTest(FlowGraphCompiler *compiler, intptr_t min, intptr_t max, intptr_t mask, compiler::Label *deopt)
void EmitNullCheck(FlowGraphCompiler *compiler, compiler::Label *deopt)
bool IsNullCheck() const
Definition: il.h:10600
bool IsDeoptIfNotNull() const
Definition: il.cc:877
bool IsBitTest() const
Definition: il.cc:899
Value * right() const
Definition: il.h:8477
Value * left() const
Definition: il.h:8476
Value * value() const
Definition: il.h:10654
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Definition: il.h:9923
intptr_t loop_depth() const
Definition: il.h:9906
bool in_loop() const
Definition: il.h:9904
virtual void EmitBranchCode(FlowGraphCompiler *compiler, BranchInstr *branch)
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual Condition EmitComparisonCode(FlowGraphCompiler *compiler, BranchLabels labels)=0
const Object & value() const
Definition: il.h:4230
void EmitMoveToLocation(FlowGraphCompiler *compiler, const Location &destination, Register tmp=kNoRegister, intptr_t pair_index=0)
static intptr_t num_variables_offset()
Definition: object.h:7415
static intptr_t InstanceSize()
Definition: object.h:7448
Value * type_arguments() const
Definition: il.h:7845
virtual Value * num_elements() const
Definition: il.h:7846
virtual Representation representation() const
Definition: il.h:3501
friend class Value
Definition: il.h:2690
static constexpr intptr_t kNone
Definition: deopt_id.h:27
Value * value() const
Definition: il.h:9101
MethodRecognizer::Kind op_kind() const
Definition: il.h:9103
Value * value() const
Definition: il.h:10142
MethodRecognizer::Kind recognized_kind() const
Definition: il.h:10060
Value * value() const
Definition: il.h:10111
bool is_null_aware() const
Definition: il.h:5341
virtual Representation representation() const
Definition: il.h:10337
intptr_t index() const
Definition: il.h:10335
void EmitReturnMoves(FlowGraphCompiler *compiler, const Register temp0, const Register temp1)
Definition: il.cc:7690
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(FfiCallInstr, VariadicDefinition, FIELD_LIST) private void EmitParamMoves(FlowGraphCompiler *compiler, const Register saved_fp, const Register temp0, const Register temp1)
Definition: il.cc:7478
intptr_t TargetAddressIndex() const
Definition: il.h:6100
static intptr_t guarded_cid_offset()
Definition: object.h:4669
bool is_nullable() const
Definition: object.cc:11770
@ kUnknownFixedLength
Definition: object.h:4728
@ kUnknownLengthOffset
Definition: object.h:4727
@ kNoFixedLength
Definition: object.h:4729
static intptr_t guarded_list_length_in_object_offset_offset()
Definition: object.h:4693
intptr_t guarded_cid() const
Definition: object.cc:11749
static intptr_t is_nullable_offset()
Definition: object.h:4766
static intptr_t guarded_list_length_offset()
Definition: object.h:4683
Value * value() const
Definition: il.h:10183
ParallelMoveInstr * parallel_move() const
Definition: il.h:3735
BlockEntryInstr * block() const
Definition: il.h:3710
bool HasParallelMove() const
Definition: il.h:3737
JoinEntryInstr * successor() const
Definition: il.h:3713
FunctionEntryInstr * normal_entry() const
Definition: il.h:2001
OsrEntryInstr * osr_entry() const
Definition: il.h:2007
const Field & field() const
Definition: il.h:6520
Value * value() const
Definition: il.h:6518
Value * value() const
Definition: il.h:9149
Value * value() const
Definition: il.h:9189
@ kGeneralized
Definition: object.h:2525
ComparisonInstr * comparison() const
Definition: il.h:5483
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.h:3807
const AbstractType & type() const
Definition: il.h:7284
intptr_t GetDeoptId() const
Definition: il.h:1409
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Definition: il.h:1377
Environment * env() const
Definition: il.h:1215
virtual LocationSummary * MakeLocationSummary(Zone *zone, bool is_optimizing) const =0
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition: il.h:1213
void InitializeLocationSummary(Zone *zone, bool optimizing)
Definition: il.h:1202
virtual Representation representation() const
Definition: il.h:1260
bool CanDeoptimize() const
Definition: il.h:1079
friend class BlockEntryInstr
Definition: il.h:1403
LocationSummary * locs()
Definition: il.h:1192
InstructionSource source() const
Definition: il.h:1008
intptr_t deopt_id() const
Definition: il.h:993
const T * Cast() const
Definition: il.h:1186
static bool SlowPathSharingSupported(bool is_optimizing)
Definition: il.h:1368
Instruction * previous() const
Definition: il.h:1087
static LocationSummary * MakeCallSummary(Zone *zone, const Instruction *instr, LocationSummary *locs=nullptr)
Value * value() const
Definition: il.h:9978
Value * value() const
Definition: il.h:10008
Value * value() const
Definition: il.h:11044
bool is_truncating() const
Definition: il.h:11048
Representation to() const
Definition: il.h:11047
Representation from() const
Definition: il.h:11046
const RuntimeEntry & TargetFunction() const
Definition: il.cc:7223
MethodRecognizer::Kind recognized_kind() const
Definition: il.h:10261
ObjectStore * object_store() const
Definition: isolate.h:510
static IsolateGroup * Current()
Definition: isolate.h:539
intptr_t TargetAddressIndex() const
Definition: il.h:6198
void EmitParamMoves(FlowGraphCompiler *compiler, Register saved_fp, Register temp0)
Definition: il.cc:8191
LocationSummary * MakeLocationSummaryInternal(Zone *zone, const RegList temps) const
Definition: il.cc:8113
intptr_t index_scale() const
Definition: il.h:6895
Value * index() const
Definition: il.h:6893
bool can_pack_into_smi() const
Definition: il.h:6902
intptr_t element_count() const
Definition: il.h:6900
bool IsExternal() const
Definition: il.h:6888
intptr_t class_id() const
Definition: il.h:6899
intptr_t class_id() const
Definition: il.h:6803
bool IsUntagged() const
Definition: il.h:6796
Value * array() const
Definition: il.h:6800
intptr_t index_scale() const
Definition: il.h:6802
Representation representation() const
Definition: il.h:6819
Value * index() const
Definition: il.h:6801
Value * index() const
Definition: il.h:3127
virtual Representation RequiredInputRepresentation(intptr_t index) const
Definition: il.h:3114
intptr_t offset() const
Definition: il.h:3129
Register base_reg() const
Definition: il.h:3128
virtual Representation representation() const
Definition: il.h:3125
const LocalVariable & local() const
Definition: il.h:5814
Location temp(intptr_t index) const
Definition: locations.h:882
Location out(intptr_t index) const
Definition: locations.h:903
static LocationSummary * Make(Zone *zone, intptr_t input_count, Location out, ContainsCall contains_call)
Definition: locations.cc:187
void set_temp(intptr_t index, Location loc)
Definition: locations.h:894
RegisterSet * live_registers()
Definition: locations.h:941
void set_out(intptr_t index, Location loc)
Definition: locations.cc:232
bool always_calls() const
Definition: locations.h:918
Location in(intptr_t index) const
Definition: locations.h:866
void set_in(intptr_t index, Location loc)
Definition: locations.cc:205
static Location NoLocation()
Definition: locations.h:387
static Location SameAsFirstInput()
Definition: locations.h:382
static Location Pair(Location first, Location second)
Definition: locations.cc:271
Register reg() const
Definition: locations.h:404
static Location FpuRegisterLocation(FpuRegister reg)
Definition: locations.h:410
static Location WritableRegister()
Definition: locations.h:376
bool IsConstant() const
Definition: locations.h:292
static Location RegisterLocation(Register reg)
Definition: locations.h:398
static Location Any()
Definition: locations.h:352
PairLocation * AsPairLocation() const
Definition: locations.cc:280
static Location RequiresRegister()
Definition: locations.h:365
static Location RequiresFpuRegister()
Definition: locations.h:369
FpuRegister fpu_reg() const
Definition: locations.h:416
const Object & constant() const
Definition: locations.cc:373
static Location Constant(const ConstantInstr *obj, int pair_index=0)
Definition: locations.h:294
Value * right() const
Definition: il.h:8970
intptr_t result_cid() const
Definition: il.h:8972
Value * left() const
Definition: il.h:8969
MethodRecognizer::Kind op_kind() const
Definition: il.h:8967
Value * length() const
Definition: il.h:3211
bool unboxed_inputs() const
Definition: il.h:3216
Value * src_start() const
Definition: il.h:3209
void EmitLoopCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, Register length_reg, compiler::Label *done, compiler::Label *copy_forwards=nullptr)
void PrepareLengthRegForLoop(FlowGraphCompiler *compiler, Register length_reg, compiler::Label *done)
Value * dest_start() const
Definition: il.h:3210
static intptr_t value_offset()
Definition: object.h:10074
virtual Representation representation() const
Definition: il.h:3387
Value * value() const
Definition: il.h:3377
MoveArgumentInstr(Value *value, Representation representation, Location location)
Definition: il.h:3349
static int ComputeArgcTag(const Function &function)
void SetupNative()
Definition: il.cc:7347
bool is_auto_scope() const
Definition: il.h:6026
bool is_bootstrap_native() const
Definition: il.h:6025
const Function & function() const
Definition: il.h:6023
NativeFunction native_c_function() const
Definition: il.h:6024
bool link_lazily() const
Definition: il.h:6027
static constexpr intptr_t kVMTagOffsetFromFp
Definition: il.h:2235
static uword LinkNativeCallEntry()
static Object & Handle()
Definition: object.h:407
static Object & ZoneHandle()
Definition: object.h:419
static intptr_t data_offset()
Definition: object.h:10554
Location At(intptr_t i) const
Definition: locations.h:618
static bool IsNegative(Range *range)
static bool Overlaps(Range *range, intptr_t min, intptr_t max)
static bool OnlyLessThanOrEqualTo(Range *range, intptr_t value)
static bool IsWithin(const Range *range, int64_t min, int64_t max)
static bool IsPositive(Range *range)
static bool CanBeZero(Range *range)
intptr_t FpuRegisterCount() const
Definition: locations.h:809
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ShiftIntegerOpInstr, BinaryIntegerOpInstr, FIELD_LIST) protected bool IsShiftCountInRange(int64_t max=kShiftCountLimit) const
Definition: il.cc:2112
Range * shift_range() const
Definition: il.h:9655
Kind kind() const
Definition: il.h:11358
Value * value() const
Definition: il.h:9952
static constexpr intptr_t kBits
Definition: object.h:9986
static SmiPtr New(intptr_t value)
Definition: object.h:10006
static constexpr intptr_t kMaxValue
Definition: object.h:9987
static intptr_t RawValue(intptr_t value)
Definition: object.h:10022
const char * message() const
Definition: il.h:3681
bool ShouldEmitStoreBarrier() const
Definition: il.h:7089
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.cc:6925
Value * value() const
Definition: il.h:7083
Value * array() const
Definition: il.h:7081
intptr_t class_id() const
Definition: il.h:7086
bool IsUntagged() const
Definition: il.h:7114
intptr_t index_scale() const
Definition: il.h:7085
Value * index() const
Definition: il.h:7082
Value * value() const
Definition: il.h:5963
const LocalVariable & local() const
Definition: il.h:5962
const Field & field() const
Definition: il.h:6729
Value * value() const
Definition: il.h:6730
bool needs_number_check() const
Definition: il.h:5125
Value * str() const
Definition: il.h:6967
static intptr_t length_offset()
Definition: object.h:10214
static CodePtr GetAllocationStubForClass(const Class &cls)
Definition: stub_code.cc:174
static constexpr int kNullCharCodeSymbolOffset
Definition: symbols.h:605
intptr_t ArgumentCount() const
Definition: il.h:4586
ArrayPtr GetArgumentsDescriptor() const
Definition: il.h:4617
virtual intptr_t InputCount() const
Definition: il.h:2755
const ZoneGrowableArray< intptr_t > & cid_results() const
Definition: il.h:5234
@ kOsrRequest
Definition: thread.h:425
static uword stack_overflow_shared_stub_entry_point_offset(bool fpu_regs)
Definition: thread.h:453
static intptr_t stack_overflow_flags_offset()
Definition: thread.h:443
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual Representation representation() const
Definition: il.h:9841
Value * value() const
Definition: il.h:9828
Token::Kind op_kind() const
Definition: il.h:9829
Value * value() const
Definition: il.h:9240
Token::Kind op_kind() const
Definition: il.h:9241
virtual Representation representation() const
Definition: il.h:8703
Value * value() const
Definition: il.h:8678
bool is_truncating() const
Definition: il.h:8772
virtual Representation representation() const
Definition: il.h:4288
bool IsScanFlagsUnboxed() const
Definition: il.cc:7181
static bool IsInt(intptr_t N, T value)
Definition: utils.h:313
static T Abs(T x)
Definition: utils.h:49
static void CalculateMagicAndShiftForDivRem(int64_t divisor, int64_t *magic, int64_t *shift)
Definition: utils.cc:39
static constexpr T Maximum(T x, T y)
Definition: utils.h:41
static constexpr int ShiftForPowerOfTwo(T x)
Definition: utils.h:81
static int CountTrailingZeros64(uint64_t x)
Definition: utils.h:264
static T Minimum(T x, T y)
Definition: utils.h:36
static T AddWithWrapAround(T a, T b)
Definition: utils.h:431
static constexpr int CountOneBits64(uint64_t x)
Definition: utils.h:148
static constexpr size_t HighestBit(int64_t v)
Definition: utils.h:185
static constexpr bool IsPowerOfTwo(T x)
Definition: utils.h:76
bool BindsToConstant() const
Definition: il.cc:1183
Definition * definition() const
Definition: il.h:103
CompileType * Type()
Value(Definition *definition)
Definition: il.h:95
intptr_t InputCount() const
Definition: il.h:2794
void static bool EmittingComments()
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool *needs_base=nullptr)
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word OffsetOf(const dart::Field &field)
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word unboxed_runtime_arg_offset()
static word shared_field_table_values_offset()
static word field_table_values_offset()
static word exit_through_ffi_offset()
static word invoke_dart_code_stub_offset()
static word saved_shadow_call_stack_offset()
static word top_exit_frame_info_offset()
#define ASSERT(E)
SkBitmap source
Definition: examples.cpp:28
static bool b
#define FATAL(error)
FlutterSemanticsFlag flag
FlutterSemanticsFlag flags
uint8_t value
GAsyncResult * result
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
int argument_count
Definition: fuchsia.cc:52
static float max(float r, float g, float b)
Definition: hsl.cpp:49
static float min(float r, float g, float b)
Definition: hsl.cpp:48
#define R(r)
#define CASE(Arity, Mask, Name, Args, Result)
#define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name)
Definition: il.h:11867
size_t length
def match(bench, filt)
Definition: benchmark.py:23
const intptr_t kResultIndex
Definition: marshaller.h:28
word ToRawSmi(const dart::Object &a)
Definition: runtime_api.cc:960
intptr_t FrameOffsetInBytesForVariable(const LocalVariable *variable)
Definition: runtime_api.h:344
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
static constexpr intptr_t kCompressedWordSize
Definition: runtime_api.h:286
bool IsSmi(int64_t v)
Definition: runtime_api.cc:31
constexpr word kSmiMax
Definition: runtime_api.h:305
constexpr intptr_t kSmiBits
Definition: runtime_api.h:301
FrameLayout frame_layout
Definition: stack_frame.cc:76
const Object & NullObject()
Definition: runtime_api.cc:149
constexpr OperandSize kWordBytes
bool HasIntegerValue(const dart::Object &object, int64_t *value)
Definition: runtime_api.cc:239
static intptr_t chunk_size(intptr_t bytes_left)
Definition: dart_vm.cc:33
Location LocationAnyOrConstant(Value *value)
Definition: locations.cc:357
Location LocationRegisterOrConstant(Value *value)
Definition: locations.cc:289
const intptr_t kSmiBits
Definition: globals.h:24
const Register kWriteBarrierSlotReg
constexpr bool IsAbiPreservedRegister(Register reg)
Definition: constants.h:90
const Register THR
const char *const name
static Condition InvertCondition(Condition c)
const RegList kAbiVolatileCpuRegs
static constexpr int kSavedCallerPcSlotFromFp
bool IsTypedDataBaseClassId(intptr_t index)
Definition: class_id.h:429
constexpr intptr_t kBitsPerWord
Definition: globals.h:514
const Register kExceptionObjectReg
const Register kWriteBarrierObjectReg
const VRegister VTMP
const Register NULL_REG
static constexpr intptr_t kBoolVsNullBitPosition
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition: constants.h:85
constexpr intptr_t kIntptrMin
Definition: globals.h:556
int32_t classid_t
Definition: globals.h:524
static const ClassId kLastErrorCid
Definition: class_id.h:311
@ kIllegalCid
Definition: class_id.h:214
@ kNullCid
Definition: class_id.h:252
@ kDynamicCid
Definition: class_id.h:253
Representation
Definition: locations.h:66
const FpuRegister FpuTMP
static const ClassId kFirstErrorCid
Definition: class_id.h:310
uintptr_t uword
Definition: globals.h:501
const Register CODE_REG
@ kInvalidCondition
@ UNSIGNED_GREATER_EQUAL
@ NOT_ZERO
@ NO_OVERFLOW
@ UNSIGNED_LESS
@ NOT_EQUAL
void RegisterTypeArgumentsUse(const Function &function, TypeUsageInfo *type_usage_info, const Class &klass, Definition *type_arguments)
const Register TMP2
static constexpr int kParamEndSlotFromFp
const Register ARGS_DESC_REG
bool IsClampedTypedDataBaseClassId(intptr_t index)
Definition: class_id.h:461
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
@ kNoRegister
Definition: constants_arm.h:99
Location LocationFixedRegisterOrConstant(Value *value, Register reg)
Definition: locations.cc:339
const int kNumberOfFpuRegisters
Location LocationWritableRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
Definition: locations.cc:322
bool IsExternalPayloadClassId(classid_t cid)
Definition: class_id.h:472
constexpr RegList kDartAvailableCpuRegs
const Register TMP
const Register FPREG
static constexpr intptr_t kCompressedWordSize
Definition: globals.h:42
DEFINE_BACKEND(LoadThread,(Register out))
Definition: il.cc:8109
static constexpr int kPcMarkerSlotFromFp
const Register FUNCTION_REG
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
Definition: locations.cc:365
constexpr intptr_t kWordSize
Definition: globals.h:509
static bool IsConstant(Definition *def, int64_t *val)
Definition: loops.cc:123
static constexpr Representation kUnboxedIntPtr
Definition: locations.h:176
const Register PP
QRegister FpuRegister
constexpr bool FLAG_target_memory_sanitizer
Definition: flags.h:174
const Register kStackTraceObjectReg
static int8_t data[kExtLength]
@ kSmiTagSize
@ kHeapObjectTag
@ kSmiTagMask
@ kSmiTagShift
const RegList kAbiVolatileFpuRegs
Location LocationRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
Definition: locations.cc:297
constexpr intptr_t kBitsPerInt64
Definition: globals.h:467
const Register SPREG
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
Definition: __init__.py:1
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
Definition: switches.h:228
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
void Flush(SkSurface *surface)
Definition: GpuTools.h:25
SeparatedVector2 offset
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTempReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kObjectReg
static constexpr Representation NativeRepresentation(Representation rep)
Definition: il.h:8504
intptr_t first_local_from_fp
Definition: frame_layout.h:37
static constexpr intptr_t kBoolValueMask
static constexpr size_t ValueSize(Representation rep)
Definition: locations.h:112
static constexpr bool IsUnboxedInteger(Representation rep)
Definition: locations.h:92
static compiler::OperandSize OperandSize(Representation rep)
Definition: locations.cc:16
static constexpr bool IsUnboxed(Representation rep)
Definition: locations.h:101
static bool IsUnsignedInteger(Representation rep)
Definition: locations.h:126
static Representation RepresentationOfArrayElement(classid_t cid)
Definition: locations.cc:79
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
#define kNegInfinity
Definition: globals.h:66