Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
il_x64.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "platform/globals.h"
6#include "vm/globals.h" // Needed here to get TARGET_ARCH_X64.
7#if defined(TARGET_ARCH_X64)
8
10
11#include "platform/assert.h"
13#include "vm/class_id.h"
23#include "vm/dart_entry.h"
24#include "vm/instructions.h"
25#include "vm/object_store.h"
26#include "vm/parser.h"
27#include "vm/stack_frame.h"
28#include "vm/stub_code.h"
29#include "vm/symbols.h"
31
32#define __ compiler->assembler()->
33#define Z (compiler->zone())
34
35namespace dart {
36
37// Generic summary for call instructions that have all arguments pushed
38// on the stack and return the result in a fixed register RAX (or XMM0 if
39// the return type is double).
40LocationSummary* Instruction::MakeCallSummary(Zone* zone,
41 const Instruction* instr,
42 LocationSummary* locs) {
43 ASSERT(locs == nullptr || locs->always_calls());
44 LocationSummary* result =
45 ((locs == nullptr)
46 ? (new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall))
47 : locs);
48 const auto representation = instr->representation();
49 switch (representation) {
50 case kTagged:
51 case kUntagged:
52 case kUnboxedInt64:
53 result->set_out(
55 break;
56 case kPairOfTagged:
57 result->set_out(
62 break;
63 case kUnboxedDouble:
64 result->set_out(
66 break;
67 default:
69 break;
70 }
71 return result;
72}
73
74LocationSummary* LoadIndexedUnsafeInstr::MakeLocationSummary(Zone* zone,
75 bool opt) const {
76 const intptr_t kNumInputs = 1;
77 const intptr_t kNumTemps = 0;
78 LocationSummary* locs = new (zone)
79 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
80
81 locs->set_in(0, Location::RequiresRegister());
82 switch (representation()) {
83 case kTagged:
84 case kUnboxedInt64:
85 locs->set_out(0, Location::RequiresRegister());
86 break;
87 case kUnboxedDouble:
88 locs->set_out(0, Location::RequiresFpuRegister());
89 break;
90 default:
92 break;
93 }
94 return locs;
95}
96
97void LoadIndexedUnsafeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
98 ASSERT(RequiredInputRepresentation(0) == kTagged); // It is a Smi.
99 ASSERT(kSmiTag == 0);
100 ASSERT(kSmiTagSize == 1);
101
102 const Register index = locs()->in(0).reg();
103#if defined(DART_COMPRESSED_POINTERS)
104 // No addressing mode will ignore the upper bits. Cannot use the shorter `orl`
105 // to clear the upper bits as this instructions uses negative indices as part
106 // of FP-relative loads.
107 // TODO(compressed-pointers): Can we guarantee the index is already
108 // sign-extended if always comes for an args-descriptor load?
109 __ movsxd(index, index);
110#endif
111
112 switch (representation()) {
113 case kTagged:
114 case kUnboxedInt64: {
115 const auto out = locs()->out(0).reg();
116 __ movq(out, compiler::Address(base_reg(), index, TIMES_4, offset()));
117 break;
118 }
119 case kUnboxedDouble: {
120 const auto out = locs()->out(0).fpu_reg();
121 __ movsd(out, compiler::Address(base_reg(), index, TIMES_4, offset()));
122 break;
123 }
124 default:
125 UNREACHABLE();
126 break;
127 }
128}
129
130DEFINE_BACKEND(StoreIndexedUnsafe,
131 (NoLocation, Register index, Register value)) {
132 ASSERT(instr->RequiredInputRepresentation(
133 StoreIndexedUnsafeInstr::kIndexPos) == kTagged); // It is a Smi.
134#if defined(DART_COMPRESSED_POINTERS)
135 // No addressing mode will ignore the upper bits. Cannot use the shorter `orl`
136 // to clear the upper bits as this instructions uses negative indices as part
137 // of FP-relative stores.
138 // TODO(compressed-pointers): Can we guarantee the index is already
139 // sign-extended if always comes for an args-descriptor load?
140 __ movsxd(index, index);
141#endif
142 __ movq(compiler::Address(instr->base_reg(), index, TIMES_4, instr->offset()),
143 value);
144
145 ASSERT(kSmiTag == 0);
146 ASSERT(kSmiTagSize == 1);
147}
148
149DEFINE_BACKEND(TailCall, (NoLocation, Fixed<Register, ARGS_DESC_REG>)) {
150 compiler->EmitTailCallToStub(instr->code());
151
152 // Even though the TailCallInstr will be the last instruction in a basic
153 // block, the flow graph compiler will emit native code for other blocks after
154 // the one containing this instruction and needs to be able to use the pool.
155 // (The `LeaveDartFrame` above disables usages of the pool.)
156 __ set_constant_pool_allowed(true);
157}
158
159LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone,
160 bool opt) const {
161 // The compiler must optimize any function that includes a MemoryCopy
162 // instruction that uses typed data cids, since extracting the payload address
163 // from views is done in a compiler pass after all code motion has happened.
164 ASSERT((!IsTypedDataBaseClassId(src_cid_) &&
165 !IsTypedDataBaseClassId(dest_cid_)) ||
166 opt);
167 const intptr_t kNumInputs = 5;
168 const intptr_t kNumTemps = 2;
169 LocationSummary* locs = new (zone)
170 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
171 locs->set_in(kSrcPos, Location::RequiresRegister());
172 locs->set_in(kDestPos, Location::RequiresRegister());
173 const bool needs_writable_inputs =
174 (((element_size_ == 1) && !unboxed_inputs_) ||
175 ((element_size_ == 16) && unboxed_inputs_));
176 locs->set_in(kSrcStartPos,
177 needs_writable_inputs
180 locs->set_in(kDestStartPos,
181 needs_writable_inputs
184 if (length()->BindsToSmiConstant() && length()->BoundSmiConstant() <= 4) {
185 locs->set_in(
188 length()->definition()->OriginalDefinition()->AsConstant()));
189 } else {
191 }
192 // Used for the actual iteration.
193 locs->set_temp(0, Location::RegisterLocation(RSI));
194 locs->set_temp(1, Location::RegisterLocation(RDI));
195 return locs;
196}
197
198static inline intptr_t SizeOfMemoryCopyElements(intptr_t element_size) {
199 return Utils::Minimum<intptr_t>(element_size, compiler::target::kWordSize);
200}
201
203 Register length_reg,
204 compiler::Label* done) {
205 const intptr_t mov_size = SizeOfMemoryCopyElements(element_size_);
206
207 // We want to convert the value in length_reg to an unboxed length in
208 // terms of mov_size-sized elements.
209 const intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) -
210 Utils::ShiftForPowerOfTwo(mov_size) -
212 if (shift < 0) {
213 ASSERT_EQUAL(shift, -kSmiTagShift);
214 __ SmiUntag(length_reg);
215 } else if (shift > 0) {
216 __ OBJ(shl)(length_reg, compiler::Immediate(shift));
217 } else {
218 __ ExtendNonNegativeSmi(length_reg);
219 }
220}
221
222void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
223 Register dest_reg,
224 Register src_reg,
225 Register length_reg,
226 compiler::Label* done,
227 compiler::Label* copy_forwards) {
228 const intptr_t mov_size = SizeOfMemoryCopyElements(element_size_);
229 const bool reversed = copy_forwards != nullptr;
230 if (reversed) {
231 // Avoid doing the extra work to prepare for the rep mov instructions
232 // if the length to copy is zero.
233 __ BranchIfZero(length_reg, done);
234 // Verify that the overlap actually exists by checking to see if
235 // the first element in dest <= the last element in src.
236 const ScaleFactor scale = ToScaleFactor(mov_size, /*index_unboxed=*/true);
237 __ leaq(TMP, compiler::Address(src_reg, length_reg, scale, -mov_size));
238 __ CompareRegisters(dest_reg, TMP);
239#if defined(USING_MEMORY_SANITIZER)
240 const auto jump_distance = compiler::Assembler::kFarJump;
241#else
242 const auto jump_distance = compiler::Assembler::kNearJump;
243#endif
244 __ BranchIf(UNSIGNED_GREATER, copy_forwards, jump_distance);
245 // The backwards move must be performed, so move TMP -> src_reg and do the
246 // same adjustment for dest_reg.
247 __ movq(src_reg, TMP);
248 __ leaq(dest_reg,
249 compiler::Address(dest_reg, length_reg, scale, -mov_size));
250 __ std();
251 }
252#if defined(USING_MEMORY_SANITIZER)
253 // For reversed, do the `rep` first. It sets `dest_reg` to the start again.
254 // For forward, do the unpoisining first, before `dest_reg` is modified.
255 __ movq(TMP, length_reg);
256 if (mov_size != 1) {
257 // Unpoison takes the length in bytes.
258 __ MulImmediate(TMP, mov_size);
259 }
260 if (!reversed) {
261 __ MsanUnpoison(dest_reg, TMP);
262 }
263#endif
264 switch (mov_size) {
265 case 1:
266 __ rep_movsb();
267 break;
268 case 2:
269 __ rep_movsw();
270 break;
271 case 4:
272 __ rep_movsd();
273 break;
274 case 8:
275 __ rep_movsq();
276 break;
277 default:
278 UNREACHABLE();
279 }
280 if (reversed) {
281 __ cld();
282 }
283
284#if defined(USING_MEMORY_SANITIZER)
285 if (reversed) {
286 __ MsanUnpoison(dest_reg, TMP);
287 }
288#endif
289}
290
291void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
292 classid_t array_cid,
293 Register array_reg,
294 Register payload_reg,
295 Representation array_rep,
296 Location start_loc) {
297 intptr_t offset = 0;
298 if (array_rep != kTagged) {
299 // Do nothing, array_reg already contains the payload address.
300 } else if (IsTypedDataBaseClassId(array_cid)) {
301 // The incoming array must have been proven to be an internal typed data
302 // object, where the payload is in the object and we can just offset.
303 ASSERT_EQUAL(array_rep, kTagged);
304 offset = compiler::target::TypedData::payload_offset() - kHeapObjectTag;
305 } else {
306 ASSERT_EQUAL(array_rep, kTagged);
307 ASSERT(!IsExternalPayloadClassId(array_cid));
308 switch (array_cid) {
309 case kOneByteStringCid:
310 offset =
311 compiler::target::OneByteString::data_offset() - kHeapObjectTag;
312 break;
313 case kTwoByteStringCid:
314 offset =
315 compiler::target::TwoByteString::data_offset() - kHeapObjectTag;
316 break;
317 default:
318 UNREACHABLE();
319 break;
320 }
321 }
322 ASSERT(start_loc.IsRegister() || start_loc.IsConstant());
323 if (start_loc.IsConstant()) {
324 const auto& constant = start_loc.constant();
325 ASSERT(constant.IsInteger());
326 const int64_t start_value = Integer::Cast(constant).AsInt64Value();
327 const intptr_t add_value = Utils::AddWithWrapAround(
328 Utils::MulWithWrapAround<intptr_t>(start_value, element_size_), offset);
329 __ leaq(payload_reg, compiler::Address(array_reg, add_value));
330 return;
331 }
332 // Note that start_reg must be writable in the special cases below.
333 const Register start_reg = start_loc.reg();
334 bool index_unboxed = unboxed_inputs_;
335 // Both special cases below assume that Smis are only shifted one bit.
337 if (element_size_ == 1 && !index_unboxed) {
338 // Shift the value to the right by tagging it as a Smi.
339 __ SmiUntag(start_reg);
340 index_unboxed = true;
341 } else if (element_size_ == 16 && index_unboxed) {
342 // Can't use TIMES_16 on X86, so instead pre-shift the value to reduce the
343 // scaling needed in the leaq instruction.
344 __ SmiTag(start_reg);
345 index_unboxed = false;
346 } else if (!index_unboxed) {
347 __ ExtendNonNegativeSmi(start_reg);
348 }
349 auto const scale = ToScaleFactor(element_size_, index_unboxed);
350 __ leaq(payload_reg, compiler::Address(array_reg, start_reg, scale, offset));
351}
352
353LocationSummary* CalculateElementAddressInstr::MakeLocationSummary(
354 Zone* zone,
355 bool opt) const {
356 const intptr_t kNumInputs = 3;
357 const intptr_t kNumTemps = 0;
358 auto* const summary = new (zone)
359 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
360
361 summary->set_in(kBasePos, Location::RequiresRegister());
362 // Only use a Smi constant for the index if multiplying it by the index
363 // scale would be an int32 constant.
364 const intptr_t scale_shift = Utils::ShiftForPowerOfTwo(index_scale());
366 index(), kMinInt32 >> scale_shift,
367 kMaxInt32 >> scale_shift));
368 // Only use a Smi constant for the offset if it is an int32 constant.
370 kMaxInt32));
371 summary->set_out(0, Location::RequiresRegister());
372
373 return summary;
374}
375
376void CalculateElementAddressInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
377 const Register base_reg = locs()->in(kBasePos).reg();
378 const Location& index_loc = locs()->in(kIndexPos);
379 const Location& offset_loc = locs()->in(kOffsetPos);
380 const Register result_reg = locs()->out(0).reg();
381
382 if (index_loc.IsConstant()) {
383 if (offset_loc.IsConstant()) {
384 ASSERT_EQUAL(Smi::Cast(index_loc.constant()).Value(), 0);
385 ASSERT(Smi::Cast(offset_loc.constant()).Value() != 0);
386 // No index involved at all.
387 const int32_t offset_value = Smi::Cast(offset_loc.constant()).Value();
388 __ leaq(result_reg, compiler::Address(base_reg, offset_value));
389 } else {
390 // Don't need wrap-around as the index is constant only if multiplying
391 // it by the scale is an int32.
392 const int32_t scaled_index =
393 Smi::Cast(index_loc.constant()).Value() * index_scale();
394 __ leaq(result_reg, compiler::Address(base_reg, offset_loc.reg(), TIMES_1,
395 scaled_index));
396 }
397 } else {
398 Register index_reg = index_loc.reg();
399 bool index_unboxed = RepresentationUtils::IsUnboxedInteger(
401 ASSERT(index_unboxed);
402 if (index_scale() == 16) {
404 // A ScaleFactor of TIMES_16 is invalid for x86, so box the index as a Smi
405 // (using the result register to store it to avoid allocating a writable
406 // register for the index) to reduce the ScaleFactor to TIMES_8.
407 __ MoveAndSmiTagRegister(result_reg, index_reg);
408 index_reg = result_reg;
409 index_unboxed = false;
410 }
411 auto const scale = ToScaleFactor(index_scale(), index_unboxed);
412 if (offset_loc.IsConstant()) {
413 const int32_t offset_value = Smi::Cast(offset_loc.constant()).Value();
414 __ leaq(result_reg,
415 compiler::Address(base_reg, index_reg, scale, offset_value));
416 } else {
417 // compiler::Address(reg, reg, scale, reg) is invalid, so have to do
418 // as a two-part operation.
419 __ leaq(result_reg, compiler::Address(base_reg, index_reg, scale,
420 /*disp=*/0));
421 __ AddRegisters(result_reg, offset_loc.reg());
422 }
423 }
424}
425
426LocationSummary* MoveArgumentInstr::MakeLocationSummary(Zone* zone,
427 bool opt) const {
428 const intptr_t kNumInputs = 1;
429 const intptr_t kNumTemps = 0;
430 LocationSummary* locs = new (zone)
431 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
432 if (representation() == kUnboxedDouble) {
433 locs->set_in(0, Location::RequiresFpuRegister());
434 } else if (representation() == kUnboxedInt64) {
435 locs->set_in(0, Location::RequiresRegister());
436 } else {
437 locs->set_in(0, LocationAnyOrConstant(value()));
438 }
439 return locs;
440}
441
442void MoveArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
443 ASSERT(compiler->is_optimizing());
444
445 const Location value = locs()->in(0);
447 if (value.IsRegister()) {
448 __ movq(dst, value.reg());
449 } else if (value.IsConstant()) {
450 __ StoreObject(dst, value.constant());
451 } else if (value.IsFpuRegister()) {
452 __ movsd(dst, value.fpu_reg());
453 } else {
454 ASSERT(value.IsStackSlot());
455 __ MoveMemoryToMemory(dst, LocationToStackSlotAddress(value));
456 }
457}
458
459LocationSummary* DartReturnInstr::MakeLocationSummary(Zone* zone,
460 bool opt) const {
461 const intptr_t kNumInputs = 1;
462 const intptr_t kNumTemps = 0;
463 LocationSummary* locs = new (zone)
464 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
465 switch (representation()) {
466 case kTagged:
467 case kUnboxedInt64:
468 locs->set_in(0,
470 break;
471 case kPairOfTagged:
472 locs->set_in(
477 break;
478 case kUnboxedDouble:
479 locs->set_in(
481 break;
482 default:
483 UNREACHABLE();
484 break;
485 }
486 return locs;
487}
488
489// Attempt optimized compilation at return instruction instead of at the entry.
490// The entry needs to be patchable, no inlined objects are allowed in the area
491// that will be overwritten by the patch instruction: a jump).
492void DartReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
493 if (locs()->in(0).IsRegister()) {
494 const Register result = locs()->in(0).reg();
496 } else if (locs()->in(0).IsPairLocation()) {
497 const Register result_lo = locs()->in(0).AsPairLocation()->At(0).reg();
498 const Register result_hi = locs()->in(0).AsPairLocation()->At(1).reg();
501 } else {
502 ASSERT(locs()->in(0).IsFpuRegister());
503 const FpuRegister result = locs()->in(0).fpu_reg();
505 }
506
507 if (compiler->parsed_function().function().IsAsyncFunction() ||
508 compiler->parsed_function().function().IsAsyncGenerator()) {
509 ASSERT(compiler->flow_graph().graph_entry()->NeedsFrame());
510 const Code& stub = GetReturnStub(compiler);
511 compiler->EmitJumpToStub(stub);
512 return;
513 }
514
515 if (!compiler->flow_graph().graph_entry()->NeedsFrame()) {
516 __ ret();
517 return;
518 }
519
520#if defined(DEBUG)
521 __ Comment("Stack Check");
522 compiler::Label done;
523 const intptr_t fp_sp_dist =
524 (compiler::target::frame_layout.first_local_from_fp + 1 -
525 compiler->StackSize()) *
526 kWordSize;
527 ASSERT(fp_sp_dist <= 0);
528 __ movq(RDI, RSP);
529 __ subq(RDI, RBP);
530 __ CompareImmediate(RDI, compiler::Immediate(fp_sp_dist));
532 __ int3();
533 __ Bind(&done);
534#endif
535 ASSERT(__ constant_pool_allowed());
536 __ LeaveDartFrame(); // Disallows constant pool use.
537 __ ret();
538 // This DartReturnInstr may be emitted out of order by the optimizer. The next
539 // block may be a target expecting a properly set constant pool pointer.
540 __ set_constant_pool_allowed(true);
541}
542
543static const RegisterSet kCalleeSaveRegistersSet(
546
547// Keep in sync with NativeEntryInstr::EmitNativeCode.
548void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
549 EmitReturnMoves(compiler);
550
551 __ LeaveDartFrame();
552
553 // Pop dummy return address.
554 __ popq(TMP);
555
556 // Anything besides the return register.
557 const Register vm_tag_reg = RBX;
558 const Register old_exit_frame_reg = RCX;
559 const Register old_exit_through_ffi_reg = RDI;
560
561 __ popq(old_exit_frame_reg);
562
563 __ popq(old_exit_through_ffi_reg);
564
565 // Restore top_resource.
566 __ popq(TMP);
567 __ movq(
568 compiler::Address(THR, compiler::target::Thread::top_resource_offset()),
569 TMP);
570
571 __ popq(vm_tag_reg);
572
573 // The trampoline that called us will enter the safepoint on our behalf.
574 __ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
575 old_exit_through_ffi_reg,
576 /*enter_safepoint=*/false);
577
578 // Restore C++ ABI callee-saved registers.
579 __ PopRegisters(kCalleeSaveRegistersSet);
580
581#if defined(DART_TARGET_OS_FUCHSIA) && defined(USING_SHADOW_CALL_STACK)
582#error Unimplemented
583#endif
584
585 // Leave the entry frame.
586 __ LeaveFrame();
587
588 // Leave the dummy frame holding the pushed arguments.
589 __ LeaveFrame();
590
591 __ ret();
592
593 // For following blocks.
594 __ set_constant_pool_allowed(true);
595}
596
597// Detect pattern when one value is zero and another is a power of 2.
598static bool IsPowerOfTwoKind(intptr_t v1, intptr_t v2) {
599 return (Utils::IsPowerOfTwo(v1) && (v2 == 0)) ||
600 (Utils::IsPowerOfTwo(v2) && (v1 == 0));
601}
602
603LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone,
604 bool opt) const {
606 // TODO(dartbug.com/30952) support conversion of Register to corresponding
607 // least significant byte register (e.g. RAX -> AL, RSI -> SIL, r15 -> r15b).
609 return comparison()->locs();
610}
611
612void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
613 ASSERT(locs()->out(0).reg() == RDX);
614
615 // Clear upper part of the out register. We are going to use setcc on it
616 // which is a byte move.
617 __ xorq(RDX, RDX);
618
619 // Emit comparison code. This must not overwrite the result register.
620 // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using
621 // the labels or returning an invalid condition.
622 BranchLabels labels = {nullptr, nullptr, nullptr};
623 Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
624 ASSERT(true_condition != kInvalidCondition);
625
626 const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_);
627
628 intptr_t true_value = if_true_;
629 intptr_t false_value = if_false_;
630
631 if (is_power_of_two_kind) {
632 if (true_value == 0) {
633 // We need to have zero in RDX on true_condition.
634 true_condition = InvertCondition(true_condition);
635 }
636 } else {
637 if (true_value == 0) {
638 // Swap values so that false_value is zero.
639 intptr_t temp = true_value;
640 true_value = false_value;
641 false_value = temp;
642 } else {
643 true_condition = InvertCondition(true_condition);
644 }
645 }
646
647 __ setcc(true_condition, DL);
648
649 if (is_power_of_two_kind) {
650 const intptr_t shift =
651 Utils::ShiftForPowerOfTwo(Utils::Maximum(true_value, false_value));
652 __ shlq(RDX, compiler::Immediate(shift + kSmiTagSize));
653 } else {
654 __ decq(RDX);
655 __ AndImmediate(RDX, compiler::Immediate(Smi::RawValue(true_value) -
656 Smi::RawValue(false_value)));
657 if (false_value != 0) {
658 __ AddImmediate(RDX, compiler::Immediate(Smi::RawValue(false_value)));
659 }
660 }
661}
662
663LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone,
664 bool opt) const {
665 const intptr_t kNumInputs = 0;
666 const intptr_t stack_index =
667 compiler::target::frame_layout.FrameSlotForVariable(&local());
668 return LocationSummary::Make(zone, kNumInputs,
669 Location::StackSlot(stack_index, FPREG),
671}
672
673void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
674 ASSERT(!compiler->is_optimizing());
675 // Nothing to do.
676}
677
678LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone,
679 bool opt) const {
680 const intptr_t kNumInputs = 1;
681 return LocationSummary::Make(zone, kNumInputs, Location::SameAsFirstInput(),
683}
684
685void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
686 Register value = locs()->in(0).reg();
687 Register result = locs()->out(0).reg();
688 ASSERT(result == value); // Assert that register assignment is correct.
689 __ movq(compiler::Address(
690 RBP, compiler::target::FrameOffsetInBytesForVariable(&local())),
691 value);
692}
693
694LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
695 bool opt) const {
696 const intptr_t kNumInputs = 0;
697 return LocationSummary::Make(zone, kNumInputs,
699 ? Location::Constant(this)
700 : Location::RequiresRegister(),
701 LocationSummary::kNoCall);
702}
703
704void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
705 // The register allocator drops constant definitions that have no uses.
706 Location out = locs()->out(0);
707 ASSERT(out.IsRegister() || out.IsConstant() || out.IsInvalid());
708 if (out.IsRegister()) {
709 Register result = out.reg();
710 __ LoadObject(result, value());
711 }
712}
713
714void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
715 const Location& destination,
716 Register tmp,
717 intptr_t pair_index) {
718 ASSERT(pair_index == 0); // No pair representation needed on 64-bit.
719 if (destination.IsRegister()) {
720 if (RepresentationUtils::IsUnboxedInteger(representation())) {
721 const int64_t value = Integer::Cast(value_).AsInt64Value();
722 if (value == 0) {
723 __ xorl(destination.reg(), destination.reg());
724 } else {
725 __ movq(destination.reg(), compiler::Immediate(value));
726 }
727 } else {
728 ASSERT(representation() == kTagged);
729 __ LoadObject(destination.reg(), value_);
730 }
731 } else if (destination.IsFpuRegister()) {
732 switch (representation()) {
733 case kUnboxedFloat:
734 __ LoadSImmediate(destination.fpu_reg(), Double::Cast(value_).value());
735 break;
736 case kUnboxedDouble:
737 __ LoadDImmediate(destination.fpu_reg(), Double::Cast(value_).value());
738 break;
739 case kUnboxedFloat64x2:
740 __ LoadQImmediate(destination.fpu_reg(),
741 Float64x2::Cast(value_).value());
742 break;
743 case kUnboxedFloat32x4:
744 __ LoadQImmediate(destination.fpu_reg(),
745 Float32x4::Cast(value_).value());
746 break;
747 case kUnboxedInt32x4:
748 __ LoadQImmediate(destination.fpu_reg(), Int32x4::Cast(value_).value());
749 break;
750 default:
751 UNREACHABLE();
752 }
753 } else if (destination.IsDoubleStackSlot()) {
754 ASSERT(representation() == kUnboxedDouble);
755 __ LoadDImmediate(FpuTMP, Double::Cast(value_).value());
756 __ movsd(LocationToStackSlotAddress(destination), FpuTMP);
757 } else if (destination.IsQuadStackSlot()) {
758 switch (representation()) {
759 case kUnboxedFloat64x2:
760 __ LoadQImmediate(FpuTMP, Float64x2::Cast(value_).value());
761 break;
762 case kUnboxedFloat32x4:
763 __ LoadQImmediate(FpuTMP, Float32x4::Cast(value_).value());
764 break;
765 case kUnboxedInt32x4:
766 __ LoadQImmediate(FpuTMP, Int32x4::Cast(value_).value());
767 break;
768 default:
769 UNREACHABLE();
770 }
771 __ movups(LocationToStackSlotAddress(destination), FpuTMP);
772 } else {
773 ASSERT(destination.IsStackSlot());
774 if (RepresentationUtils::IsUnboxedInteger(representation())) {
775 const int64_t value = Integer::Cast(value_).AsInt64Value();
776 __ movq(LocationToStackSlotAddress(destination),
777 compiler::Immediate(value));
778 } else if (representation() == kUnboxedFloat) {
779 int32_t float_bits =
780 bit_cast<int32_t, float>(Double::Cast(value_).value());
781 __ movl(LocationToStackSlotAddress(destination),
782 compiler::Immediate(float_bits));
783 } else {
784 ASSERT(representation() == kTagged);
785 __ StoreObject(LocationToStackSlotAddress(destination), value_);
786 }
787 }
788}
789
790LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone,
791 bool opt) const {
792 const bool is_unboxed_int =
795 compiler::target::kWordSize);
796 const intptr_t kNumInputs = 0;
797 const intptr_t kNumTemps = is_unboxed_int ? 0 : 1;
798 LocationSummary* locs = new (zone)
799 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
800 if (is_unboxed_int) {
801 locs->set_out(0, Location::RequiresRegister());
802 } else {
803 switch (representation()) {
804 case kUnboxedDouble:
805 locs->set_out(0, Location::RequiresFpuRegister());
806 locs->set_temp(0, Location::RequiresRegister());
807 break;
808 default:
809 UNREACHABLE();
810 break;
811 }
812 }
813 return locs;
814}
815
816void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
817 // The register allocator drops constant definitions that have no uses.
818 if (!locs()->out(0).IsInvalid()) {
819 const Register scratch =
822 : locs()->temp(0).reg();
823 EmitMoveToLocation(compiler, locs()->out(0), scratch);
824 }
825}
826
827LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone,
828 bool opt) const {
829 auto const dst_type_loc =
831
832 // We want to prevent spilling of the inputs (e.g. function/instantiator tav),
833 // since TTS preserves them. So we make this a `kNoCall` summary,
834 // even though most other registers can be modified by the stub. To tell the
835 // register allocator about it, we reserve all the other registers as
836 // temporary registers.
837 // TODO(http://dartbug.com/32788): Simplify this.
838
839 const intptr_t kNonChangeableInputRegs =
841 ((dst_type_loc.IsRegister() ? 1 : 0) << TypeTestABI::kDstTypeReg) |
842 (1 << TypeTestABI::kInstantiatorTypeArgumentsReg) |
843 (1 << TypeTestABI::kFunctionTypeArgumentsReg);
844
845 const intptr_t kNumInputs = 4;
846
847 // We invoke a stub that can potentially clobber any CPU register
848 // but can only clobber FPU registers on the slow path when
849 // entering runtime. Preserve all FPU registers that are
850 // not guaranteed to be preserved by the ABI.
851 const intptr_t kCpuRegistersToPreserve =
852 kDartAvailableCpuRegs & ~kNonChangeableInputRegs;
853 const intptr_t kFpuRegistersToPreserve =
855
856 const intptr_t kNumTemps = (Utils::CountOneBits64(kCpuRegistersToPreserve) +
857 Utils::CountOneBits64(kFpuRegistersToPreserve));
858
859 LocationSummary* summary = new (zone) LocationSummary(
861 summary->set_in(kInstancePos,
863 summary->set_in(kDstTypePos, dst_type_loc);
864 summary->set_in(
869 summary->set_out(0, Location::SameAsFirstInput());
870
871 // Let's reserve all registers except for the input ones.
872 intptr_t next_temp = 0;
873 for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
874 const bool should_preserve = ((1 << i) & kCpuRegistersToPreserve) != 0;
875 if (should_preserve) {
876 summary->set_temp(next_temp++,
877 Location::RegisterLocation(static_cast<Register>(i)));
878 }
879 }
880
881 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
882 const bool should_preserve = ((1 << i) & kFpuRegistersToPreserve) != 0;
883 if (should_preserve) {
884 summary->set_temp(next_temp++, Location::FpuRegisterLocation(
885 static_cast<FpuRegister>(i)));
886 }
887 }
888
889 return summary;
890}
891
892void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
893 ASSERT(locs()->always_calls());
894
895 auto object_store = compiler->isolate_group()->object_store();
896 const auto& assert_boolean_stub =
897 Code::ZoneHandle(compiler->zone(), object_store->assert_boolean_stub());
898
899 compiler::Label done;
900 __ testq(
904 compiler->GenerateStubCall(source(), assert_boolean_stub,
905 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
906 deopt_id(), env());
907 __ Bind(&done);
908}
909
910static Condition TokenKindToIntCondition(Token::Kind kind) {
911 switch (kind) {
912 case Token::kEQ:
913 return EQUAL;
914 case Token::kNE:
915 return NOT_EQUAL;
916 case Token::kLT:
917 return LESS;
918 case Token::kGT:
919 return GREATER;
920 case Token::kLTE:
921 return LESS_EQUAL;
922 case Token::kGTE:
923 return GREATER_EQUAL;
924 default:
925 UNREACHABLE();
926 return OVERFLOW;
927 }
928}
929
930LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone,
931 bool opt) const {
932 const intptr_t kNumInputs = 2;
933 if (operation_cid() == kDoubleCid) {
934 const intptr_t kNumTemps = 0;
935 LocationSummary* locs = new (zone)
936 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
937 locs->set_in(0, Location::RequiresFpuRegister());
938 locs->set_in(1, Location::RequiresFpuRegister());
939 locs->set_out(0, Location::RequiresRegister());
940 return locs;
941 }
942 if (operation_cid() == kSmiCid || operation_cid() == kMintCid ||
943 operation_cid() == kIntegerCid) {
944 const intptr_t kNumTemps = 0;
945 LocationSummary* locs = new (zone)
946 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
947 if (is_null_aware()) {
948 locs->set_in(0, Location::RequiresRegister());
949 locs->set_in(1, Location::RequiresRegister());
950 } else {
951 locs->set_in(0, LocationRegisterOrConstant(left()));
952 // Only one input can be a constant operand. The case of two constant
953 // operands should be handled by constant propagation.
954 // Only right can be a stack slot.
955 locs->set_in(1, locs->in(0).IsConstant()
958 }
959 locs->set_out(0, Location::RequiresRegister());
960 return locs;
961 }
962 UNREACHABLE();
963 return nullptr;
964}
965
966static void LoadValueCid(FlowGraphCompiler* compiler,
967 Register value_cid_reg,
968 Register value_reg,
969 compiler::Label* value_is_smi = nullptr) {
970 compiler::Label done;
971 if (value_is_smi == nullptr) {
972 __ LoadImmediate(value_cid_reg, compiler::Immediate(kSmiCid));
973 }
974 __ testq(value_reg, compiler::Immediate(kSmiTagMask));
975 if (value_is_smi == nullptr) {
977 } else {
978 __ j(ZERO, value_is_smi);
979 }
980 __ LoadClassId(value_cid_reg, value_reg);
981 __ Bind(&done);
982}
983
984static Condition FlipCondition(Condition condition) {
985 switch (condition) {
986 case EQUAL:
987 return EQUAL;
988 case NOT_EQUAL:
989 return NOT_EQUAL;
990 case LESS:
991 return GREATER;
992 case LESS_EQUAL:
993 return GREATER_EQUAL;
994 case GREATER:
995 return LESS;
996 case GREATER_EQUAL:
997 return LESS_EQUAL;
998 case BELOW:
999 return ABOVE;
1000 case BELOW_EQUAL:
1001 return ABOVE_EQUAL;
1002 case ABOVE:
1003 return BELOW;
1004 case ABOVE_EQUAL:
1005 return BELOW_EQUAL;
1006 default:
1007 UNIMPLEMENTED();
1008 return EQUAL;
1009 }
1010}
1011
1012static void EmitBranchOnCondition(
1013 FlowGraphCompiler* compiler,
1014 Condition true_condition,
1015 BranchLabels labels,
1016 compiler::Assembler::JumpDistance jump_distance =
1018 if (labels.fall_through == labels.false_label) {
1019 // If the next block is the false successor, fall through to it.
1020 __ j(true_condition, labels.true_label, jump_distance);
1021 } else {
1022 // If the next block is not the false successor, branch to it.
1023 Condition false_condition = InvertCondition(true_condition);
1024 __ j(false_condition, labels.false_label, jump_distance);
1025
1026 // Fall through or jump to the true successor.
1027 if (labels.fall_through != labels.true_label) {
1028 __ jmp(labels.true_label, jump_distance);
1029 }
1030 }
1031}
1032
1033static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler,
1034 const LocationSummary& locs,
1035 Token::Kind kind) {
1036 Location left = locs.in(0);
1037 Location right = locs.in(1);
1038 ASSERT(!left.IsConstant() || !right.IsConstant());
1039
1040 Condition true_condition = TokenKindToIntCondition(kind);
1041 if (left.IsConstant() || right.IsConstant()) {
1042 // Ensure constant is on the right.
1043 ConstantInstr* constant = nullptr;
1044 if (left.IsConstant()) {
1045 constant = left.constant_instruction();
1046 Location tmp = right;
1047 right = left;
1048 left = tmp;
1049 true_condition = FlipCondition(true_condition);
1050 } else {
1051 constant = right.constant_instruction();
1052 }
1053
1054 if (RepresentationUtils::IsUnboxedInteger(constant->representation())) {
1055 int64_t value;
1056 const bool ok = compiler::HasIntegerValue(constant->value(), &value);
1058 __ OBJ(cmp)(left.reg(), compiler::Immediate(value));
1059 } else {
1060 ASSERT(constant->representation() == kTagged);
1061 __ CompareObject(left.reg(), right.constant());
1062 }
1063 } else if (right.IsStackSlot()) {
1065 } else {
1066 __ OBJ(cmp)(left.reg(), right.reg());
1067 }
1068 return true_condition;
1069}
1070
1071static Condition EmitInt64ComparisonOp(FlowGraphCompiler* compiler,
1072 const LocationSummary& locs,
1073 Token::Kind kind) {
1074 Location left = locs.in(0);
1075 Location right = locs.in(1);
1076 ASSERT(!left.IsConstant() || !right.IsConstant());
1077
1078 Condition true_condition = TokenKindToIntCondition(kind);
1079 if (left.IsConstant() || right.IsConstant()) {
1080 // Ensure constant is on the right.
1081 ConstantInstr* constant = nullptr;
1082 if (left.IsConstant()) {
1083 constant = left.constant_instruction();
1084 Location tmp = right;
1085 right = left;
1086 left = tmp;
1087 true_condition = FlipCondition(true_condition);
1088 } else {
1089 constant = right.constant_instruction();
1090 }
1091
1092 if (RepresentationUtils::IsUnboxedInteger(constant->representation())) {
1093 int64_t value;
1094 const bool ok = compiler::HasIntegerValue(constant->value(), &value);
1096 __ cmpq(left.reg(), compiler::Immediate(value));
1097 } else {
1098 UNREACHABLE();
1099 }
1100 } else if (right.IsStackSlot()) {
1102 } else {
1103 __ cmpq(left.reg(), right.reg());
1104 }
1105 return true_condition;
1106}
1107
1108static Condition EmitNullAwareInt64ComparisonOp(FlowGraphCompiler* compiler,
1109 const LocationSummary& locs,
1110 Token::Kind kind,
1111 BranchLabels labels) {
1112 ASSERT((kind == Token::kEQ) || (kind == Token::kNE));
1113 const Register left = locs.in(0).reg();
1114 const Register right = locs.in(1).reg();
1115 const Condition true_condition = TokenKindToIntCondition(kind);
1116 compiler::Label* equal_result =
1117 (true_condition == EQUAL) ? labels.true_label : labels.false_label;
1118 compiler::Label* not_equal_result =
1119 (true_condition == EQUAL) ? labels.false_label : labels.true_label;
1120
1121 // Check if operands have the same value. If they don't, then they could
1122 // be equal only if both of them are Mints with the same value.
1123 __ OBJ(cmp)(left, right);
1124 __ j(EQUAL, equal_result);
1125 __ OBJ(mov)(TMP, left);
1126 __ OBJ (and)(TMP, right);
1127 __ BranchIfSmi(TMP, not_equal_result);
1128 __ CompareClassId(left, kMintCid);
1129 __ j(NOT_EQUAL, not_equal_result);
1130 __ CompareClassId(right, kMintCid);
1131 __ j(NOT_EQUAL, not_equal_result);
1132 __ movq(TMP, compiler::FieldAddress(left, Mint::value_offset()));
1133 __ cmpq(TMP, compiler::FieldAddress(right, Mint::value_offset()));
1134 return true_condition;
1135}
1136
1137static Condition TokenKindToDoubleCondition(Token::Kind kind) {
1138 switch (kind) {
1139 case Token::kEQ:
1140 return EQUAL;
1141 case Token::kNE:
1142 return NOT_EQUAL;
1143 case Token::kLT:
1144 return BELOW;
1145 case Token::kGT:
1146 return ABOVE;
1147 case Token::kLTE:
1148 return BELOW_EQUAL;
1149 case Token::kGTE:
1150 return ABOVE_EQUAL;
1151 default:
1152 UNREACHABLE();
1153 return OVERFLOW;
1154 }
1155}
1156
1157static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
1158 const LocationSummary& locs,
1159 Token::Kind kind,
1160 BranchLabels labels) {
1161 XmmRegister left = locs.in(0).fpu_reg();
1162 XmmRegister right = locs.in(1).fpu_reg();
1163
1164 __ comisd(left, right);
1165
1166 Condition true_condition = TokenKindToDoubleCondition(kind);
1167 compiler::Label* nan_result =
1168 (true_condition == NOT_EQUAL) ? labels.true_label : labels.false_label;
1169 __ j(PARITY_EVEN, nan_result);
1170 return true_condition;
1171}
1172
1173Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1174 BranchLabels labels) {
1175 if (is_null_aware()) {
1176 ASSERT(operation_cid() == kMintCid);
1177 return EmitNullAwareInt64ComparisonOp(compiler, *locs(), kind(), labels);
1178 }
1179 if (operation_cid() == kSmiCid) {
1180 return EmitSmiComparisonOp(compiler, *locs(), kind());
1181 } else if (operation_cid() == kMintCid || operation_cid() == kIntegerCid) {
1182 return EmitInt64ComparisonOp(compiler, *locs(), kind());
1183 } else {
1184 ASSERT(operation_cid() == kDoubleCid);
1185 return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels);
1186 }
1187}
1188
1189void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1190 compiler::Label is_true, is_false;
1191 BranchLabels labels = {&is_true, &is_false, &is_false};
1192 Condition true_condition = EmitComparisonCode(compiler, labels);
1193
1194 Register result = locs()->out(0).reg();
1195 if (true_condition != kInvalidCondition) {
1196 EmitBranchOnCondition(compiler, true_condition, labels,
1198 }
1199 // Note: We use branches instead of setcc or cmov even when the branch labels
1200 // are otherwise unused, as this runs faster for the x86 processors tested on
1201 // our benchmarking server.
1202 compiler::Label done;
1203 __ Bind(&is_false);
1204 __ LoadObject(result, Bool::False());
1206 __ Bind(&is_true);
1207 __ LoadObject(result, Bool::True());
1208 __ Bind(&done);
1209}
1210
1211void ComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler,
1212 BranchInstr* branch) {
1213 BranchLabels labels = compiler->CreateBranchLabels(branch);
1214 Condition true_condition = EmitComparisonCode(compiler, labels);
1215 if (true_condition != kInvalidCondition) {
1216 EmitBranchOnCondition(compiler, true_condition, labels);
1217 }
1218}
1219
1220LocationSummary* TestSmiInstr::MakeLocationSummary(Zone* zone, bool opt) const {
1221 const intptr_t kNumInputs = 2;
1222 const intptr_t kNumTemps = 0;
1223 LocationSummary* locs = new (zone)
1224 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1225 locs->set_in(0, Location::RequiresRegister());
1226 // Only one input can be a constant operand. The case of two constant
1227 // operands should be handled by constant propagation.
1228 locs->set_in(1, LocationRegisterOrConstant(right()));
1229 return locs;
1230}
1231
1232Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1233 BranchLabels labels) {
1234 Register left_reg = locs()->in(0).reg();
1235 Location right = locs()->in(1);
1236 if (right.IsConstant()) {
1237 ASSERT(right.constant().IsSmi());
1238 const int64_t imm = Smi::RawValue(Smi::Cast(right.constant()).Value());
1239 __ TestImmediate(left_reg, compiler::Immediate(imm),
1241 } else {
1242 __ OBJ(test)(left_reg, right.reg());
1243 }
1244 Condition true_condition = (kind() == Token::kNE) ? NOT_ZERO : ZERO;
1245 return true_condition;
1246}
1247
1248LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone,
1249 bool opt) const {
1250 const intptr_t kNumInputs = 1;
1251 const intptr_t kNumTemps = 1;
1252 LocationSummary* locs = new (zone)
1253 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1254 locs->set_in(0, Location::RequiresRegister());
1255 locs->set_temp(0, Location::RequiresRegister());
1256 locs->set_out(0, Location::RequiresRegister());
1257 return locs;
1258}
1259
1260Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1261 BranchLabels labels) {
1262 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
1263 Register val_reg = locs()->in(0).reg();
1264 Register cid_reg = locs()->temp(0).reg();
1265
1266 compiler::Label* deopt =
1267 CanDeoptimize()
1268 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids)
1269 : nullptr;
1270
1271 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
1272 const ZoneGrowableArray<intptr_t>& data = cid_results();
1273 ASSERT(data[0] == kSmiCid);
1274 bool result = data[1] == true_result;
1275 __ testq(val_reg, compiler::Immediate(kSmiTagMask));
1276 __ j(ZERO, result ? labels.true_label : labels.false_label);
1277 __ LoadClassId(cid_reg, val_reg);
1278 for (intptr_t i = 2; i < data.length(); i += 2) {
1279 const intptr_t test_cid = data[i];
1280 ASSERT(test_cid != kSmiCid);
1281 result = data[i + 1] == true_result;
1282 __ cmpq(cid_reg, compiler::Immediate(test_cid));
1283 __ j(EQUAL, result ? labels.true_label : labels.false_label);
1284 }
1285 // No match found, deoptimize or default action.
1286 if (deopt == nullptr) {
1287 // If the cid is not in the list, jump to the opposite label from the cids
1288 // that are in the list. These must be all the same (see asserts in the
1289 // constructor).
1290 compiler::Label* target = result ? labels.false_label : labels.true_label;
1291 if (target != labels.fall_through) {
1292 __ jmp(target);
1293 }
1294 } else {
1295 __ jmp(deopt);
1296 }
1297 // Dummy result as this method already did the jump, there's no need
1298 // for the caller to branch on a condition.
1299 return kInvalidCondition;
1300}
1301
1302LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone,
1303 bool opt) const {
1304 const intptr_t kNumInputs = 2;
1305 const intptr_t kNumTemps = 0;
1306 if (operation_cid() == kDoubleCid) {
1307 LocationSummary* summary = new (zone)
1308 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1309 summary->set_in(0, Location::RequiresFpuRegister());
1310 summary->set_in(1, Location::RequiresFpuRegister());
1311 summary->set_out(0, Location::RequiresRegister());
1312 return summary;
1313 }
1314 if (operation_cid() == kSmiCid || operation_cid() == kMintCid) {
1315 LocationSummary* summary = new (zone)
1316 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1317 summary->set_in(0, LocationRegisterOrConstant(left()));
1318 // Only one input can be a constant operand. The case of two constant
1319 // operands should be handled by constant propagation.
1320 summary->set_in(1, summary->in(0).IsConstant()
1323 summary->set_out(0, Location::RequiresRegister());
1324 return summary;
1325 }
1326 UNREACHABLE();
1327 return nullptr;
1328}
1329
1330Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1331 BranchLabels labels) {
1332 if (operation_cid() == kSmiCid) {
1333 return EmitSmiComparisonOp(compiler, *locs(), kind());
1334 } else if (operation_cid() == kMintCid) {
1335 return EmitInt64ComparisonOp(compiler, *locs(), kind());
1336 } else {
1337 ASSERT(operation_cid() == kDoubleCid);
1338 return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels);
1339 }
1340}
1341
1342void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1343 SetupNative();
1344 Register result = locs()->out(0).reg();
1345 const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function());
1346
1347 // Pass a pointer to the first argument in R13 (we avoid using RAX here to
1348 // simplify the stub code that will call native code).
1349 __ leaq(R13, compiler::Address(RSP, (ArgumentCount() - 1) * kWordSize));
1350
1351 __ LoadImmediate(R10, compiler::Immediate(argc_tag));
1352 const Code* stub;
1353 if (link_lazily()) {
1354 stub = &StubCode::CallBootstrapNative();
1355 compiler::ExternalLabel label(NativeEntry::LinkNativeCallEntry());
1356 __ LoadNativeEntry(RBX, &label,
1358 compiler->GeneratePatchableCall(
1359 source(), *stub, UntaggedPcDescriptors::kOther, locs(),
1361 } else {
1362 if (is_bootstrap_native()) {
1363 stub = &StubCode::CallBootstrapNative();
1364 } else if (is_auto_scope()) {
1365 stub = &StubCode::CallAutoScopeNative();
1366 } else {
1367 stub = &StubCode::CallNoScopeNative();
1368 }
1369 const compiler::ExternalLabel label(
1370 reinterpret_cast<uword>(native_c_function()));
1371 __ LoadNativeEntry(RBX, &label,
1373 // We can never lazy-deopt here because natives are never optimized.
1374 ASSERT(!compiler->is_optimizing());
1375 compiler->GenerateNonLazyDeoptableStubCall(
1376 source(), *stub, UntaggedPcDescriptors::kOther, locs(),
1378 }
1379 __ LoadFromOffset(result, RSP, 0);
1380 compiler->EmitDropArguments(ArgumentCount()); // Drop the arguments.
1381}
1382
1383#define R(r) (1 << r)
1384
1385LocationSummary* FfiCallInstr::MakeLocationSummary(Zone* zone,
1386 bool is_optimizing) const {
1387 // Use R10 as a temp. register. We can't use RDI, RSI, RDX, R8, R9 as they are
1388 // argument registers, and R11 is TMP.
1389 return MakeLocationSummaryInternal(
1390 zone, is_optimizing,
1393}
1394
1395#undef R
1396
1397void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1398 const Register target_address = locs()->in(TargetAddressIndex()).reg();
1399
1400 // The temps are indexed according to their register number.
1401 // For regular calls, this holds the FP for rebasing the original locations
1402 // during EmitParamMoves.
1403 const Register saved_fp = locs()->temp(0).reg();
1404 const Register temp = locs()->temp(1).reg();
1405 // For leaf calls, this holds the SP used to restore the pre-aligned SP after
1406 // the call.
1407 // Note: R12 doubles as CODE_REG, which gets clobbered during frame setup in
1408 // regular calls.
1409 const Register saved_sp = locs()->temp(2).reg();
1410
1411 // Ensure these are callee-saved register and are preserved across the call.
1412 ASSERT(IsCalleeSavedRegister(saved_sp));
1413 ASSERT(IsCalleeSavedRegister(saved_fp));
1414 // Other temps don't need to be preserved.
1415
1416 if (is_leaf_) {
1417 __ movq(saved_sp, SPREG);
1418 } else {
1419 __ movq(saved_fp, FPREG);
1420 // Make a space to put the return address.
1421 __ pushq(compiler::Immediate(0));
1422
1423 // We need to create a dummy "exit frame". It will share the same pool
1424 // pointer but have a null code object.
1425 __ LoadObject(CODE_REG, Code::null_object());
1426 __ set_constant_pool_allowed(false);
1427 __ EnterDartFrame(0, PP);
1428 }
1429
1430 // Reserve space for the arguments that go on the stack (if any), then align.
1431 intptr_t stack_space = marshaller_.RequiredStackSpaceInBytes();
1432 __ ReserveAlignedFrameSpace(stack_space);
1433#if defined(USING_MEMORY_SANITIZER)
1434 {
1435 RegisterSet kVolatileRegisterSet(CallingConventions::kVolatileCpuRegisters,
1437 __ movq(temp, RSP);
1438 __ PushRegisters(kVolatileRegisterSet);
1439
1440 // Unpoison everything from SP to FP: this covers both space we have
1441 // reserved for outgoing arguments and the spills which might have
1442 // been generated by the register allocator. Some of these spill slots
1443 // can be used as handles passed down to the runtime.
1444 __ movq(RAX, is_leaf_ ? FPREG : saved_fp);
1445 __ subq(RAX, temp);
1446 __ MsanUnpoison(temp, RAX);
1447
1448 // Incoming Dart arguments to this trampoline are potentially used as local
1449 // handles.
1450 __ MsanUnpoison(is_leaf_ ? FPREG : saved_fp,
1452
1453 // Outgoing arguments passed by register to the foreign function.
1454 __ LoadImmediate(CallingConventions::kArg1Reg, InputCount());
1455 __ CallCFunction(compiler::Address(
1456 THR, kMsanUnpoisonParamRuntimeEntry.OffsetFromThread()));
1457
1458 __ PopRegisters(kVolatileRegisterSet);
1459 }
1460#endif
1461
1462 if (is_leaf_) {
1463 EmitParamMoves(compiler, FPREG, saved_fp, TMP);
1464 } else {
1465 EmitParamMoves(compiler, saved_fp, saved_sp, TMP);
1466 }
1467
1469 __ Comment(is_leaf_ ? "Leaf Call" : "Call");
1470 }
1471
1472 if (is_leaf_) {
1473#if !defined(PRODUCT)
1474 // Set the thread object's top_exit_frame_info and VMTag to enable the
1475 // profiler to determine that thread is no longer executing Dart code.
1476 __ movq(compiler::Address(
1477 THR, compiler::target::Thread::top_exit_frame_info_offset()),
1478 FPREG);
1479 __ movq(compiler::Assembler::VMTagAddress(), target_address);
1480#endif
1481
1482 if (marshaller_.contains_varargs() &&
1484 // TODO(http://dartbug.com/38578): Use the number of used FPU registers.
1487 }
1488 __ CallCFunction(target_address, /*restore_rsp=*/true);
1489
1490#if !defined(PRODUCT)
1492 compiler::Immediate(compiler::target::Thread::vm_tag_dart_id()));
1493 __ movq(compiler::Address(
1494 THR, compiler::target::Thread::top_exit_frame_info_offset()),
1495 compiler::Immediate(0));
1496#endif
1497 } else {
1498 // We need to copy a dummy return address up into the dummy stack frame so
1499 // the stack walker will know which safepoint to use. RIP points to the
1500 // *next* instruction, so 'AddressRIPRelative' loads the address of the
1501 // following 'movq'.
1503 compiler->EmitCallsiteMetadata(InstructionSource(), deopt_id(),
1504 UntaggedPcDescriptors::Kind::kOther, locs(),
1505 env());
1506 __ movq(compiler::Address(FPREG, kSavedCallerPcSlotFromFp * kWordSize),
1507 temp);
1508
1510 // Update information in the thread object and enter a safepoint.
1511 __ movq(temp, compiler::Immediate(
1512 compiler::target::Thread::exit_through_ffi()));
1513
1514 __ TransitionGeneratedToNative(target_address, FPREG, temp,
1515 /*enter_safepoint=*/true);
1516
1517 if (marshaller_.contains_varargs() &&
1520 }
1521 __ CallCFunction(target_address, /*restore_rsp=*/true);
1522
1523 // Update information in the thread object and leave the safepoint.
1524 __ TransitionNativeToGenerated(/*leave_safepoint=*/true);
1525 } else {
1526 // We cannot trust that this code will be executable within a safepoint.
1527 // Therefore we delegate the responsibility of entering/exiting the
1528 // safepoint to a stub which is in the VM isolate's heap, which will never
1529 // lose execute permission.
1530 __ movq(temp,
1531 compiler::Address(
1532 THR, compiler::target::Thread::
1533 call_native_through_safepoint_entry_point_offset()));
1534
1535 // Calls RBX within a safepoint. RBX and R12 are clobbered.
1536 __ movq(RBX, target_address);
1537 if (marshaller_.contains_varargs() &&
1540 }
1541 __ call(temp);
1542 }
1543
1544 if (marshaller_.IsHandleCType(compiler::ffi::kResultIndex)) {
1545 __ Comment("Check Dart_Handle for Error.");
1546 compiler::Label not_error;
1547 __ movq(temp,
1548 compiler::Address(CallingConventions::kReturnReg,
1549 compiler::target::LocalHandle::ptr_offset()));
1550 __ BranchIfSmi(temp, &not_error);
1551 __ LoadClassId(temp, temp);
1552 __ RangeCheck(temp, kNoRegister, kFirstErrorCid, kLastErrorCid,
1554
1555 // Slow path, use the stub to propagate error, to save on code-size.
1556 __ Comment("Slow path: call Dart_PropagateError through stub.");
1557 __ movq(temp,
1558 compiler::Address(
1559 THR, compiler::target::Thread::
1560 call_native_through_safepoint_entry_point_offset()));
1561 __ movq(RBX, compiler::Address(
1562 THR, kPropagateErrorRuntimeEntry.OffsetFromThread()));
1564 __ call(temp);
1565#if defined(DEBUG)
1566 // We should never return with normal controlflow from this.
1567 __ int3();
1568#endif
1569
1570 __ Bind(&not_error);
1571 }
1572 }
1573
1574 // Pass the `saved_fp` reg. as a temp to clobber since we're done with it.
1575 EmitReturnMoves(compiler, temp, saved_fp);
1576
1577 if (is_leaf_) {
1578 // Restore the pre-aligned SP.
1579 __ movq(SPREG, saved_sp);
1580 } else {
1581 __ LeaveDartFrame();
1582 // Restore the global object pool after returning from runtime (old space is
1583 // moving, so the GOP could have been relocated).
1584 if (FLAG_precompiled_mode) {
1585 __ movq(PP, compiler::Address(THR, Thread::global_object_pool_offset()));
1586 }
1587 __ set_constant_pool_allowed(true);
1588
1589 // Instead of returning to the "fake" return address, we just pop it.
1590 __ popq(temp);
1591 }
1592}
1593
1594// Keep in sync with NativeReturnInstr::EmitNativeCode.
1595void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1596 __ Bind(compiler->GetJumpLabel(this));
1597
1598 // Create a dummy frame holding the pushed arguments. This simplifies
1599 // NativeReturnInstr::EmitNativeCode.
1600 __ EnterFrame(0);
1601
1602#if defined(DART_TARGET_OS_FUCHSIA) && defined(USING_SHADOW_CALL_STACK)
1603#error Unimplemented
1604#endif
1605
1606 // Save the argument registers, in reverse order.
1607 SaveArguments(compiler);
1608
1609 // Enter the entry frame. Push a dummy return address for consistency with
1610 // EnterFrame on ARM(64). NativeParameterInstr expects this frame has size
1611 // -exit_link_slot_from_entry_fp, verified below.
1612 __ PushImmediate(compiler::Immediate(0));
1613 __ EnterFrame(0);
1614
1615 // Save a space for the code object.
1616 __ PushImmediate(compiler::Immediate(0));
1617
1618 // InvokeDartCodeStub saves the arguments descriptor here. We don't have one,
1619 // but we need to follow the same frame layout for the stack walker.
1620 __ PushImmediate(compiler::Immediate(0));
1621
1622 // Save ABI callee-saved registers.
1623 __ PushRegisters(kCalleeSaveRegistersSet);
1624
1625 // Save the current VMTag on the stack.
1627 __ pushq(RAX);
1628
1629 // Save top resource.
1630 __ pushq(
1631 compiler::Address(THR, compiler::target::Thread::top_resource_offset()));
1632 __ movq(
1633 compiler::Address(THR, compiler::target::Thread::top_resource_offset()),
1634 compiler::Immediate(0));
1635
1636 __ pushq(compiler::Address(
1637 THR, compiler::target::Thread::exit_through_ffi_offset()));
1638
1639 // Save top exit frame info. Stack walker expects it to be here.
1640 __ pushq(compiler::Address(
1641 THR, compiler::target::Thread::top_exit_frame_info_offset()));
1642
1643 // In debug mode, verify that we've pushed the top exit frame info at the
1644 // correct offset from FP.
1645 __ EmitEntryFrameVerification();
1646
1647 // The callback trampoline (caller) has already left the safepoint for us.
1648 __ TransitionNativeToGenerated(/*exit_safepoint=*/false);
1649
1650 // Load the code object.
1651 const Function& target_function = marshaller_.dart_signature();
1652 const intptr_t callback_id = target_function.FfiCallbackId();
1653 __ movq(RAX, compiler::Address(
1654 THR, compiler::target::Thread::isolate_group_offset()));
1655 __ movq(RAX, compiler::Address(
1656 RAX, compiler::target::IsolateGroup::object_store_offset()));
1657 __ movq(RAX,
1658 compiler::Address(
1659 RAX, compiler::target::ObjectStore::ffi_callback_code_offset()));
1660 __ LoadCompressed(
1661 RAX, compiler::FieldAddress(
1662 RAX, compiler::target::GrowableObjectArray::data_offset()));
1663 __ LoadCompressed(
1664 CODE_REG,
1665 compiler::FieldAddress(
1666 RAX, compiler::target::Array::data_offset() +
1667 callback_id * compiler::target::kCompressedWordSize));
1668
1669 // Put the code object in the reserved slot.
1670 __ movq(compiler::Address(FPREG,
1671 kPcMarkerSlotFromFp * compiler::target::kWordSize),
1672 CODE_REG);
1673
1674 if (FLAG_precompiled_mode) {
1675 __ movq(PP,
1676 compiler::Address(
1677 THR, compiler::target::Thread::global_object_pool_offset()));
1678 } else {
1679 __ xorq(PP, PP); // GC-safe value into PP.
1680 }
1681
1682 // Load a GC-safe value for arguments descriptor (unused but tagged).
1684
1685 // Push a dummy return address which suggests that we are inside of
1686 // InvokeDartCodeStub. This is how the stack walker detects an entry frame.
1687 __ movq(RAX,
1688 compiler::Address(
1689 THR, compiler::target::Thread::invoke_dart_code_stub_offset()));
1690 __ pushq(compiler::FieldAddress(
1691 RAX, compiler::target::Code::entry_point_offset()));
1692
1693 // Continue with Dart frame setup.
1694 FunctionEntryInstr::EmitNativeCode(compiler);
1695}
1696
1697#define R(r) (1 << r)
1698
1700 Zone* zone,
1701 bool is_optimizing) const {
1703 return MakeLocationSummaryInternal(zone, (R(saved_fp)));
1704}
1705
1706#undef R
1707
1708void LeafRuntimeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1709 const Register saved_fp = locs()->temp(0).reg();
1710 const Register temp0 = TMP;
1711
1712 // TODO(http://dartbug.com/47778): If we knew whether the stack was aligned
1713 // at this point, we could omit having a frame.
1714 __ MoveRegister(saved_fp, FPREG);
1715
1716 const intptr_t frame_space = native_calling_convention_.StackTopInBytes();
1717 __ EnterCFrame(frame_space);
1718
1719 EmitParamMoves(compiler, saved_fp, temp0);
1720 const Register target_address = locs()->in(TargetAddressIndex()).reg();
1721 __ movq(compiler::Assembler::VMTagAddress(), target_address);
1722 __ CallCFunction(target_address);
1724 compiler::Immediate(VMTag::kDartTagId));
1725
1726 __ LeaveCFrame();
1727}
1728
1729LocationSummary* OneByteStringFromCharCodeInstr::MakeLocationSummary(
1730 Zone* zone,
1731 bool opt) const {
1732 const intptr_t kNumInputs = 1;
1733 // TODO(fschneider): Allow immediate operands for the char code.
1734 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
1736}
1737
1738void OneByteStringFromCharCodeInstr::EmitNativeCode(
1739 FlowGraphCompiler* compiler) {
1740 ASSERT(compiler->is_optimizing());
1741 Register char_code = locs()->in(0).reg();
1742 Register result = locs()->out(0).reg();
1743
1744 // Note: we don't bother to ensure char_code is a writable input because any
1745 // other instructions using it must also not rely on the upper bits when
1746 // compressed.
1747 __ ExtendNonNegativeSmi(char_code);
1748 __ movq(result,
1749 compiler::Address(THR, Thread::predefined_symbols_address_offset()));
1750 __ movq(result,
1751 compiler::Address(result, char_code,
1752 TIMES_HALF_WORD_SIZE, // Char code is a smi.
1754}
1755
1756LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone,
1757 bool opt) const {
1758 const intptr_t kNumInputs = 1;
1759 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
1761}
1762
1763void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1764 ASSERT(cid_ == kOneByteStringCid);
1765 Register str = locs()->in(0).reg();
1766 Register result = locs()->out(0).reg();
1767 compiler::Label is_one, done;
1768 __ LoadCompressedSmi(result,
1769 compiler::FieldAddress(str, String::length_offset()));
1770 __ cmpq(result, compiler::Immediate(Smi::RawValue(1)));
1772 __ movq(result, compiler::Immediate(Smi::RawValue(-1)));
1773 __ jmp(&done);
1774 __ Bind(&is_one);
1775 __ movzxb(result, compiler::FieldAddress(str, OneByteString::data_offset()));
1776 __ SmiTag(result);
1777 __ Bind(&done);
1778}
1779
1780LocationSummary* Utf8ScanInstr::MakeLocationSummary(Zone* zone,
1781 bool opt) const {
1782 const intptr_t kNumInputs = 5;
1783 const intptr_t kNumTemps = 1;
1784 LocationSummary* summary = new (zone)
1785 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1786 summary->set_in(0, Location::Any()); // decoder
1787 summary->set_in(1, Location::WritableRegister()); // bytes
1788 summary->set_in(2, Location::WritableRegister()); // start
1789 summary->set_in(3, Location::WritableRegister()); // end
1790 summary->set_in(4, Location::RequiresRegister()); // table
1791 summary->set_temp(0, Location::RequiresRegister());
1792 summary->set_out(0, Location::RequiresRegister());
1793 return summary;
1794}
1795
1796void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1797 const Register bytes_reg = locs()->in(1).reg();
1798 const Register start_reg = locs()->in(2).reg();
1799 const Register end_reg = locs()->in(3).reg();
1800 const Register table_reg = locs()->in(4).reg();
1801 const Register size_reg = locs()->out(0).reg();
1802
1803 const Register bytes_ptr_reg = start_reg;
1804 const Register bytes_end_reg = end_reg;
1805 const Register bytes_end_minus_16_reg = bytes_reg;
1806 const Register flags_reg = locs()->temp(0).reg();
1807 const Register temp_reg = TMP;
1808 const XmmRegister vector_reg = FpuTMP;
1809
1810 const intptr_t kSizeMask = 0x03;
1811 const intptr_t kFlagsMask = 0x3C;
1812
1813 compiler::Label scan_ascii, ascii_loop, ascii_loop_in, nonascii_loop;
1814 compiler::Label rest, rest_loop, rest_loop_in, done;
1815
1816 // Address of input bytes.
1817 __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
1818
1819 // Pointers to start, end and end-16.
1820 __ leaq(bytes_ptr_reg, compiler::Address(bytes_reg, start_reg, TIMES_1, 0));
1821 __ leaq(bytes_end_reg, compiler::Address(bytes_reg, end_reg, TIMES_1, 0));
1822 __ leaq(bytes_end_minus_16_reg, compiler::Address(bytes_end_reg, -16));
1823
1824 // Initialize size and flags.
1825 __ xorq(size_reg, size_reg);
1826 __ xorq(flags_reg, flags_reg);
1827
1828 __ jmp(&scan_ascii, compiler::Assembler::kNearJump);
1829
1830 // Loop scanning through ASCII bytes one 16-byte vector at a time.
1831 // While scanning, the size register contains the size as it was at the start
1832 // of the current block of ASCII bytes, minus the address of the start of the
1833 // block. After the block, the end address of the block is added to update the
1834 // size to include the bytes in the block.
1835 __ Bind(&ascii_loop);
1836 __ addq(bytes_ptr_reg, compiler::Immediate(16));
1837 __ Bind(&ascii_loop_in);
1838
1839 // Exit vectorized loop when there are less than 16 bytes left.
1840 __ cmpq(bytes_ptr_reg, bytes_end_minus_16_reg);
1842
1843 // Find next non-ASCII byte within the next 16 bytes.
1844 // Note: In principle, we should use MOVDQU here, since the loaded value is
1845 // used as input to an integer instruction. In practice, according to Agner
1846 // Fog, there is no penalty for using the wrong kind of load.
1847 __ movups(vector_reg, compiler::Address(bytes_ptr_reg, 0));
1848 __ pmovmskb(temp_reg, vector_reg);
1849 __ bsfq(temp_reg, temp_reg);
1850 __ j(EQUAL, &ascii_loop, compiler::Assembler::kNearJump);
1851
1852 // Point to non-ASCII byte and update size.
1853 __ addq(bytes_ptr_reg, temp_reg);
1854 __ addq(size_reg, bytes_ptr_reg);
1855
1856 // Read first non-ASCII byte.
1857 __ movzxb(temp_reg, compiler::Address(bytes_ptr_reg, 0));
1858
1859 // Loop over block of non-ASCII bytes.
1860 __ Bind(&nonascii_loop);
1861 __ addq(bytes_ptr_reg, compiler::Immediate(1));
1862
1863 // Update size and flags based on byte value.
1864 __ movzxb(temp_reg, compiler::FieldAddress(
1865 table_reg, temp_reg, TIMES_1,
1866 compiler::target::OneByteString::data_offset()));
1867 __ orq(flags_reg, temp_reg);
1868 __ andq(temp_reg, compiler::Immediate(kSizeMask));
1869 __ addq(size_reg, temp_reg);
1870
1871 // Stop if end is reached.
1872 __ cmpq(bytes_ptr_reg, bytes_end_reg);
1874
1875 // Go to ASCII scan if next byte is ASCII, otherwise loop.
1876 __ movzxb(temp_reg, compiler::Address(bytes_ptr_reg, 0));
1877 __ testq(temp_reg, compiler::Immediate(0x80));
1878 __ j(NOT_EQUAL, &nonascii_loop, compiler::Assembler::kNearJump);
1879
1880 // Enter the ASCII scanning loop.
1881 __ Bind(&scan_ascii);
1882 __ subq(size_reg, bytes_ptr_reg);
1883 __ jmp(&ascii_loop_in);
1884
1885 // Less than 16 bytes left. Process the remaining bytes individually.
1886 __ Bind(&rest);
1887
1888 // Update size after ASCII scanning loop.
1889 __ addq(size_reg, bytes_ptr_reg);
1890 __ jmp(&rest_loop_in, compiler::Assembler::kNearJump);
1891
1892 __ Bind(&rest_loop);
1893
1894 // Read byte and increment pointer.
1895 __ movzxb(temp_reg, compiler::Address(bytes_ptr_reg, 0));
1896 __ addq(bytes_ptr_reg, compiler::Immediate(1));
1897
1898 // Update size and flags based on byte value.
1899 __ movzxb(temp_reg, compiler::FieldAddress(
1900 table_reg, temp_reg, TIMES_1,
1901 compiler::target::OneByteString::data_offset()));
1902 __ orq(flags_reg, temp_reg);
1903 __ andq(temp_reg, compiler::Immediate(kSizeMask));
1904 __ addq(size_reg, temp_reg);
1905
1906 // Stop if end is reached.
1907 __ Bind(&rest_loop_in);
1908 __ cmpq(bytes_ptr_reg, bytes_end_reg);
1910 __ Bind(&done);
1911
1912 // Write flags to field.
1913 __ andq(flags_reg, compiler::Immediate(kFlagsMask));
1914 if (!IsScanFlagsUnboxed()) {
1915 __ SmiTag(flags_reg);
1916 }
1917 Register decoder_reg;
1918 const Location decoder_location = locs()->in(0);
1919 if (decoder_location.IsStackSlot()) {
1920 __ movq(temp_reg, LocationToStackSlotAddress(decoder_location));
1921 decoder_reg = temp_reg;
1922 } else {
1923 decoder_reg = decoder_location.reg();
1924 }
1925 const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes();
1926 if (scan_flags_field_.is_compressed() && !IsScanFlagsUnboxed()) {
1927 __ OBJ(or)(compiler::FieldAddress(decoder_reg, scan_flags_field_offset),
1928 flags_reg);
1929 } else {
1930 __ orq(compiler::FieldAddress(decoder_reg, scan_flags_field_offset),
1931 flags_reg);
1932 }
1933}
1934
1935LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
1936 bool opt) const {
1937 // The compiler must optimize any function that includes a LoadIndexed
1938 // instruction that uses typed data cids, since extracting the payload address
1939 // from views is done in a compiler pass after all code motion has happened.
1941
1942 const intptr_t kNumInputs = 2;
1943 const intptr_t kNumTemps = 0;
1944 LocationSummary* locs = new (zone)
1945 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1946 locs->set_in(kArrayPos, Location::RequiresRegister());
1947 // For tagged index with index_scale=1 as well as untagged index with
1948 // index_scale=16 we need a writable register due to addressing mode
1949 // restrictions on X64.
1950 const bool need_writable_index_register =
1951 (index_scale() == 1 && !index_unboxed_) ||
1952 (index_scale() == 16 && index_unboxed_);
1953 const bool can_be_constant =
1954 index()->BindsToConstant() &&
1956 index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
1957 locs->set_in(
1958 kIndexPos,
1959 can_be_constant
1960 ? Location::Constant(index()->definition()->AsConstant())
1961 : (need_writable_index_register ? Location::WritableRegister()
1962 : Location::RequiresRegister()));
1963 auto const rep =
1966 locs->set_out(0, Location::RequiresRegister());
1967 } else if (RepresentationUtils::IsUnboxed(rep)) {
1968 locs->set_out(0, Location::RequiresFpuRegister());
1969 } else {
1970 locs->set_out(0, Location::RequiresRegister());
1971 }
1972 return locs;
1973}
1974
1975void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1976 // The array register points to the backing store for external arrays.
1977 const Register array = locs()->in(kArrayPos).reg();
1978 const Location index = locs()->in(kIndexPos);
1979
1980 bool index_unboxed = index_unboxed_;
1981 if (index.IsRegister()) {
1982 if (index_scale_ == 1 && !index_unboxed) {
1983 __ SmiUntag(index.reg());
1984 index_unboxed = true;
1985 } else if (index_scale_ == 16 && index_unboxed) {
1986 // X64 does not support addressing mode using TIMES_16.
1987 __ SmiTag(index.reg());
1988 index_unboxed = false;
1989 } else if (!index_unboxed) {
1990 // Note: we don't bother to ensure index is a writable input because any
1991 // other instructions using it must also not rely on the upper bits
1992 // when compressed.
1993 __ ExtendNonNegativeSmi(index.reg());
1994 }
1995 } else {
1996 ASSERT(index.IsConstant());
1997 }
1998
1999 compiler::Address element_address =
2001 IsUntagged(), class_id(), index_scale_,
2002 index_unboxed, array, index.reg())
2003 : compiler::Assembler::ElementAddressForIntIndex(
2004 IsUntagged(), class_id(), index_scale_, array,
2005 Smi::Cast(index.constant()).Value());
2006
2007 auto const rep =
2011 Register result = locs()->out(0).reg();
2012 __ Load(result, element_address, RepresentationUtils::OperandSize(rep));
2013 } else if (RepresentationUtils::IsUnboxed(rep)) {
2014 XmmRegister result = locs()->out(0).fpu_reg();
2015 if (rep == kUnboxedFloat) {
2016 // Load single precision float.
2017 __ movss(result, element_address);
2018 } else if (rep == kUnboxedDouble) {
2019 __ movsd(result, element_address);
2020 } else {
2021 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2022 rep == kUnboxedFloat64x2);
2023 __ movups(result, element_address);
2024 }
2025 } else {
2026 ASSERT(rep == kTagged);
2027 ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
2028 (class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
2029 Register result = locs()->out(0).reg();
2030 __ LoadCompressed(result, element_address);
2031 }
2032}
2033
2034LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone,
2035 bool opt) const {
2036 const intptr_t kNumInputs = 2;
2037 const intptr_t kNumTemps = 0;
2038 LocationSummary* summary = new (zone)
2039 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2040 summary->set_in(0, Location::RequiresRegister());
2041 // The smi index is either untagged (element size == 1), or it is left smi
2042 // tagged (for all element sizes > 1).
2043 summary->set_in(1, index_scale() == 1 ? Location::WritableRegister()
2044 : Location::RequiresRegister());
2045 summary->set_out(0, Location::RequiresRegister());
2046 return summary;
2047}
2048
2049void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2050 // The string register points to the backing store for external strings.
2051 const Register str = locs()->in(0).reg();
2052 const Register index = locs()->in(1).reg();
2053
2054 bool index_unboxed = false;
2055 if ((index_scale() == 1)) {
2056 __ SmiUntag(index);
2057 index_unboxed = true;
2058 } else {
2059 __ ExtendNonNegativeSmi(index);
2060 }
2061 compiler::Address element_address =
2063 IsExternal(), class_id(), index_scale(), index_unboxed, str, index);
2064
2065 Register result = locs()->out(0).reg();
2066 switch (class_id()) {
2067 case kOneByteStringCid:
2068 switch (element_count()) {
2069 case 1:
2070 __ movzxb(result, element_address);
2071 break;
2072 case 2:
2073 __ movzxw(result, element_address);
2074 break;
2075 case 4:
2076 __ movl(result, element_address);
2077 break;
2078 default:
2079 UNREACHABLE();
2080 }
2082 __ SmiTag(result);
2083 break;
2084 case kTwoByteStringCid:
2085 switch (element_count()) {
2086 case 1:
2087 __ movzxw(result, element_address);
2088 break;
2089 case 2:
2090 __ movl(result, element_address);
2091 break;
2092 default:
2093 UNREACHABLE();
2094 }
2096 __ SmiTag(result);
2097 break;
2098 default:
2099 UNREACHABLE();
2100 break;
2101 }
2102}
2103
2104LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
2105 bool opt) const {
2106 // The compiler must optimize any function that includes a StoreIndexed
2107 // instruction that uses typed data cids, since extracting the payload address
2108 // from views is done in a compiler pass after all code motion has happened.
2110
2111 const intptr_t kNumInputs = 3;
2112 const intptr_t kNumTemps =
2113 class_id() == kArrayCid && ShouldEmitStoreBarrier() ? 1 : 0;
2114 LocationSummary* locs = new (zone)
2115 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2116 locs->set_in(0, Location::RequiresRegister());
2117 // For tagged index with index_scale=1 as well as untagged index with
2118 // index_scale=16 we need a writable register due to addressing mode
2119 // restrictions on X64.
2120 const bool need_writable_index_register =
2121 (index_scale() == 1 && !index_unboxed_) ||
2122 (index_scale() == 16 && index_unboxed_);
2123 const bool can_be_constant =
2124 index()->BindsToConstant() &&
2126 index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
2127 locs->set_in(
2128 1, can_be_constant
2129 ? Location::Constant(index()->definition()->AsConstant())
2130 : (need_writable_index_register ? Location::WritableRegister()
2131 : Location::RequiresRegister()));
2132 auto const rep =
2135 if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
2136 // TODO(fschneider): Add location constraint for byte registers (RAX,
2137 // RBX, RCX, RDX) instead of using a fixed register.
2138 locs->set_in(2, LocationFixedRegisterOrSmiConstant(value(), RAX));
2139 } else {
2140 locs->set_in(2, Location::RequiresRegister());
2141 }
2142 } else if (RepresentationUtils::IsUnboxed(rep)) {
2143 // TODO(srdjan): Support Float64 constants.
2144 locs->set_in(2, Location::RequiresFpuRegister());
2145 } else if (class_id() == kArrayCid) {
2146 locs->set_in(2, ShouldEmitStoreBarrier()
2149 if (ShouldEmitStoreBarrier()) {
2152 }
2153 } else {
2154 UNREACHABLE();
2155 }
2156 return locs;
2157}
2158
2159void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2160 // The array register points to the backing store for external arrays.
2161 const Register array = locs()->in(0).reg();
2162 const Location index = locs()->in(1);
2163
2164 bool index_unboxed = index_unboxed_;
2165 if (index.IsRegister()) {
2166 if (index_scale_ == 1 && !index_unboxed) {
2167 __ SmiUntag(index.reg());
2168 index_unboxed = true;
2169 } else if (index_scale_ == 16 && index_unboxed) {
2170 // X64 does not support addressing mode using TIMES_16.
2171 __ SmiTag(index.reg());
2172 index_unboxed = false;
2173 } else if (!index_unboxed) {
2174 // Note: we don't bother to ensure index is a writable input because any
2175 // other instructions using it must also not rely on the upper bits
2176 // when compressed.
2177 __ ExtendNonNegativeSmi(index.reg());
2178 }
2179 } else {
2180 ASSERT(index.IsConstant());
2181 }
2182
2183 compiler::Address element_address =
2185 IsUntagged(), class_id(), index_scale_,
2186 index_unboxed, array, index.reg())
2187 : compiler::Assembler::ElementAddressForIntIndex(
2188 IsUntagged(), class_id(), index_scale_, array,
2189 Smi::Cast(index.constant()).Value());
2190
2191 auto const rep =
2195 ASSERT(rep == kUnboxedUint8);
2196 if (locs()->in(2).IsConstant()) {
2197 const Smi& constant = Smi::Cast(locs()->in(2).constant());
2198 intptr_t value = constant.Value();
2199 // Clamp to 0x0 or 0xFF respectively.
2200 if (value > 0xFF) {
2201 value = 0xFF;
2202 } else if (value < 0) {
2203 value = 0;
2204 }
2205 __ movb(element_address, compiler::Immediate(static_cast<int8_t>(value)));
2206 } else {
2207 const Register storedValueReg = locs()->in(2).reg();
2208 compiler::Label store_value, store_0xff;
2209 __ CompareImmediate(storedValueReg, compiler::Immediate(0xFF));
2211 // Clamp to 0x0 or 0xFF respectively.
2212 __ j(GREATER, &store_0xff);
2213 __ xorq(storedValueReg, storedValueReg);
2214 __ jmp(&store_value, compiler::Assembler::kNearJump);
2215 __ Bind(&store_0xff);
2216 __ LoadImmediate(storedValueReg, compiler::Immediate(0xFF));
2217 __ Bind(&store_value);
2218 __ movb(element_address, ByteRegisterOf(storedValueReg));
2219 }
2220 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
2221 if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
2222 if (locs()->in(2).IsConstant()) {
2223 const Smi& constant = Smi::Cast(locs()->in(2).constant());
2224 __ movb(element_address,
2225 compiler::Immediate(static_cast<int8_t>(constant.Value())));
2226 } else {
2227 __ movb(element_address, ByteRegisterOf(locs()->in(2).reg()));
2228 }
2229 } else {
2230 Register value = locs()->in(2).reg();
2231 __ Store(value, element_address, RepresentationUtils::OperandSize(rep));
2232 }
2233 } else if (RepresentationUtils::IsUnboxed(rep)) {
2234 if (rep == kUnboxedFloat) {
2235 __ movss(element_address, locs()->in(2).fpu_reg());
2236 } else if (rep == kUnboxedDouble) {
2237 __ movsd(element_address, locs()->in(2).fpu_reg());
2238 } else {
2239 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2240 rep == kUnboxedFloat64x2);
2241 __ movups(element_address, locs()->in(2).fpu_reg());
2242 }
2243 } else if (class_id() == kArrayCid) {
2244 ASSERT(rep == kTagged);
2245 if (ShouldEmitStoreBarrier()) {
2246 Register value = locs()->in(2).reg();
2247 Register slot = locs()->temp(0).reg();
2248 __ leaq(slot, element_address);
2249 __ StoreCompressedIntoArray(array, slot, value, CanValueBeSmi());
2250 } else if (locs()->in(2).IsConstant()) {
2251 const Object& constant = locs()->in(2).constant();
2252 __ StoreCompressedObjectIntoObjectNoBarrier(array, element_address,
2253 constant);
2254 } else {
2255 Register value = locs()->in(2).reg();
2256 __ StoreCompressedIntoObjectNoBarrier(array, element_address, value);
2257 }
2258 } else {
2259 UNREACHABLE();
2260 }
2261
2262#if defined(USING_MEMORY_SANITIZER)
2263 __ leaq(TMP, element_address);
2264 const intptr_t length_in_bytes = RepresentationUtils::ValueSize(
2266 __ MsanUnpoison(TMP, length_in_bytes);
2267#endif
2268}
2269
2270LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
2271 bool opt) const {
2272 const intptr_t kNumInputs = 1;
2273
2274 const intptr_t value_cid = value()->Type()->ToCid();
2275 const intptr_t field_cid = field().guarded_cid();
2276
2277 const bool emit_full_guard = !opt || (field_cid == kIllegalCid);
2278 const bool needs_value_cid_temp_reg =
2279 (value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid));
2280 const bool needs_field_temp_reg = emit_full_guard;
2281
2282 intptr_t num_temps = 0;
2283 if (needs_value_cid_temp_reg) {
2284 num_temps++;
2285 }
2286 if (needs_field_temp_reg) {
2287 num_temps++;
2288 }
2289
2290 LocationSummary* summary = new (zone)
2291 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
2292 summary->set_in(0, Location::RequiresRegister());
2293
2294 for (intptr_t i = 0; i < num_temps; i++) {
2295 summary->set_temp(i, Location::RequiresRegister());
2296 }
2297
2298 return summary;
2299}
2300
2301void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2302 ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 20);
2303 ASSERT(sizeof(UntaggedField::guarded_cid_) == 4);
2304 ASSERT(sizeof(UntaggedField::is_nullable_) == 4);
2305
2306 const intptr_t value_cid = value()->Type()->ToCid();
2307 const intptr_t field_cid = field().guarded_cid();
2308 const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid;
2309
2310 if (field_cid == kDynamicCid) {
2311 return; // Nothing to emit.
2312 }
2313
2314 const bool emit_full_guard =
2315 !compiler->is_optimizing() || (field_cid == kIllegalCid);
2316
2317 const bool needs_value_cid_temp_reg =
2318 (value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid));
2319
2320 const bool needs_field_temp_reg = emit_full_guard;
2321
2322 const Register value_reg = locs()->in(0).reg();
2323
2324 const Register value_cid_reg =
2325 needs_value_cid_temp_reg ? locs()->temp(0).reg() : kNoRegister;
2326
2327 const Register field_reg = needs_field_temp_reg
2328 ? locs()->temp(locs()->temp_count() - 1).reg()
2329 : kNoRegister;
2330
2331 compiler::Label ok, fail_label;
2332
2333 compiler::Label* deopt = nullptr;
2334 if (compiler->is_optimizing()) {
2335 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField);
2336 }
2337
2338 compiler::Label* fail = (deopt != nullptr) ? deopt : &fail_label;
2339
2340 if (emit_full_guard) {
2341 __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
2342
2343 compiler::FieldAddress field_cid_operand(field_reg,
2345 compiler::FieldAddress field_nullability_operand(
2346 field_reg, Field::is_nullable_offset());
2347
2348 if (value_cid == kDynamicCid) {
2349 LoadValueCid(compiler, value_cid_reg, value_reg);
2350
2351 __ cmpl(value_cid_reg, field_cid_operand);
2352 __ j(EQUAL, &ok);
2353 __ cmpl(value_cid_reg, field_nullability_operand);
2354 } else if (value_cid == kNullCid) {
2355 __ cmpl(field_nullability_operand, compiler::Immediate(value_cid));
2356 } else {
2357 __ cmpl(field_cid_operand, compiler::Immediate(value_cid));
2358 }
2359 __ j(EQUAL, &ok);
2360
2361 // Check if the tracked state of the guarded field can be initialized
2362 // inline. If the field needs length check or requires type arguments and
2363 // class hierarchy processing for exactness tracking then we fall through
2364 // into runtime which is responsible for computing offset of the length
2365 // field based on the class id.
2366 const bool is_complicated_field =
2369 if (!is_complicated_field) {
2370 // Uninitialized field can be handled inline. Check if the
2371 // field is still unitialized.
2372 __ cmpl(field_cid_operand, compiler::Immediate(kIllegalCid));
2373 __ j(NOT_EQUAL, fail);
2374
2375 if (value_cid == kDynamicCid) {
2376 __ movl(field_cid_operand, value_cid_reg);
2377 __ movl(field_nullability_operand, value_cid_reg);
2378 } else {
2379 ASSERT(field_reg != kNoRegister);
2380 __ movl(field_cid_operand, compiler::Immediate(value_cid));
2381 __ movl(field_nullability_operand, compiler::Immediate(value_cid));
2382 }
2383
2384 __ jmp(&ok);
2385 }
2386
2387 if (deopt == nullptr) {
2388 __ Bind(fail);
2389
2390 __ cmpl(compiler::FieldAddress(field_reg, Field::guarded_cid_offset()),
2391 compiler::Immediate(kDynamicCid));
2392 __ j(EQUAL, &ok);
2393
2394 __ pushq(field_reg);
2395 __ pushq(value_reg);
2396 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2397 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2398 __ Drop(2); // Drop the field and the value.
2399 } else {
2400 __ jmp(fail);
2401 }
2402 } else {
2403 ASSERT(compiler->is_optimizing());
2404 ASSERT(deopt != nullptr);
2405
2406 // Field guard class has been initialized and is known.
2407 if (value_cid == kDynamicCid) {
2408 // Value's class id is not known.
2409 __ testq(value_reg, compiler::Immediate(kSmiTagMask));
2410
2411 if (field_cid != kSmiCid) {
2412 __ j(ZERO, fail);
2413 __ LoadClassId(value_cid_reg, value_reg);
2414 __ CompareImmediate(value_cid_reg, compiler::Immediate(field_cid));
2415 }
2416
2417 if (field().is_nullable() && (field_cid != kNullCid)) {
2418 __ j(EQUAL, &ok);
2419 __ CompareObject(value_reg, Object::null_object());
2420 }
2421
2422 __ j(NOT_EQUAL, fail);
2423 } else if (value_cid == field_cid) {
2424 // This would normally be caught by Canonicalize, but RemoveRedefinitions
2425 // may sometimes produce the situation after the last Canonicalize pass.
2426 } else {
2427 // Both value's and field's class id is known.
2428 ASSERT(value_cid != nullability);
2429 __ jmp(fail);
2430 }
2431 }
2432 __ Bind(&ok);
2433}
2434
2435LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone,
2436 bool opt) const {
2437 const intptr_t kNumInputs = 1;
2438 if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2439 const intptr_t kNumTemps = 3;
2440 LocationSummary* summary = new (zone)
2441 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2442 summary->set_in(0, Location::RequiresRegister());
2443 // We need temporaries for field object, length offset and expected length.
2444 summary->set_temp(0, Location::RequiresRegister());
2445 summary->set_temp(1, Location::RequiresRegister());
2446 summary->set_temp(2, Location::RequiresRegister());
2447 return summary;
2448 } else {
2449 LocationSummary* summary = new (zone)
2450 LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
2451 summary->set_in(0, Location::RequiresRegister());
2452 return summary;
2453 }
2454 UNREACHABLE();
2455}
2456
2457void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2458 if (field().guarded_list_length() == Field::kNoFixedLength) {
2459 return; // Nothing to emit.
2460 }
2461
2462 compiler::Label* deopt =
2463 compiler->is_optimizing()
2464 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2465 : nullptr;
2466
2467 const Register value_reg = locs()->in(0).reg();
2468
2469 if (!compiler->is_optimizing() ||
2470 (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2471 const Register field_reg = locs()->temp(0).reg();
2472 const Register offset_reg = locs()->temp(1).reg();
2473 const Register length_reg = locs()->temp(2).reg();
2474
2475 compiler::Label ok;
2476
2477 __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
2478
2479 __ movsxb(
2480 offset_reg,
2481 compiler::FieldAddress(
2483 __ LoadCompressedSmi(
2484 length_reg,
2485 compiler::FieldAddress(field_reg, Field::guarded_list_length_offset()));
2486
2487 __ cmpq(offset_reg, compiler::Immediate(0));
2488 __ j(NEGATIVE, &ok);
2489
2490 // Load the length from the value. GuardFieldClass already verified that
2491 // value's class matches guarded class id of the field.
2492 // offset_reg contains offset already corrected by -kHeapObjectTag that is
2493 // why we use Address instead of FieldAddress.
2494 __ OBJ(cmp)(length_reg,
2495 compiler::Address(value_reg, offset_reg, TIMES_1, 0));
2496
2497 if (deopt == nullptr) {
2498 __ j(EQUAL, &ok);
2499
2500 __ pushq(field_reg);
2501 __ pushq(value_reg);
2502 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2503 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2504 __ Drop(2); // Drop the field and the value.
2505 } else {
2506 __ j(NOT_EQUAL, deopt);
2507 }
2508
2509 __ Bind(&ok);
2510 } else {
2511 ASSERT(compiler->is_optimizing());
2512 ASSERT(field().guarded_list_length() >= 0);
2513 ASSERT(field().guarded_list_length_in_object_offset() !=
2515
2516 __ CompareImmediate(
2517 compiler::FieldAddress(value_reg,
2518 field().guarded_list_length_in_object_offset()),
2519 compiler::Immediate(Smi::RawValue(field().guarded_list_length())));
2520 __ j(NOT_EQUAL, deopt);
2521 }
2522}
2523
2524LocationSummary* GuardFieldTypeInstr::MakeLocationSummary(Zone* zone,
2525 bool opt) const {
2526 const intptr_t kNumInputs = 1;
2527 const intptr_t kNumTemps = 1;
2528 LocationSummary* summary = new (zone)
2529 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2530 summary->set_in(0, Location::RequiresRegister());
2531 summary->set_temp(0, Location::RequiresRegister());
2532 return summary;
2533}
2534
2535void GuardFieldTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2536 // Should never emit GuardFieldType for fields that are marked as NotTracking.
2537 ASSERT(field().static_type_exactness_state().IsTracking());
2538 if (!field().static_type_exactness_state().NeedsFieldGuard()) {
2539 // Nothing to do: we only need to perform checks for trivially invariant
2540 // fields. If optimizing Canonicalize pass should have removed
2541 // this instruction.
2542 return;
2543 }
2544
2545 compiler::Label* deopt =
2546 compiler->is_optimizing()
2547 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2548 : nullptr;
2549
2550 compiler::Label ok;
2551
2552 const Register value_reg = locs()->in(0).reg();
2553 const Register temp = locs()->temp(0).reg();
2554
2555 // Skip null values for nullable fields.
2556 if (!compiler->is_optimizing() || field().is_nullable()) {
2557 __ CompareObject(value_reg, Object::Handle());
2558 __ j(EQUAL, &ok);
2559 }
2560
2561 // Get the state.
2562 const Field& original =
2563 Field::ZoneHandle(compiler->zone(), field().Original());
2564 __ LoadObject(temp, original);
2565 __ movsxb(temp, compiler::FieldAddress(
2567
2568 if (!compiler->is_optimizing()) {
2569 // Check if field requires checking (it is in unitialized or trivially
2570 // exact state).
2571 __ cmpq(temp,
2572 compiler::Immediate(StaticTypeExactnessState::kUninitialized));
2573 __ j(LESS, &ok);
2574 }
2575
2576 compiler::Label call_runtime;
2577 if (field().static_type_exactness_state().IsUninitialized()) {
2578 // Can't initialize the field state inline in optimized code.
2579 __ cmpq(temp,
2580 compiler::Immediate(StaticTypeExactnessState::kUninitialized));
2581 __ j(EQUAL, compiler->is_optimizing() ? deopt : &call_runtime);
2582 }
2583
2584 // At this point temp is known to be type arguments offset in words.
2585 __ movq(temp, compiler::FieldAddress(value_reg, temp,
2587 __ CompareObject(
2588 temp,
2590 compiler->zone(), Type::Cast(AbstractType::Handle(field().type()))
2591 .GetInstanceTypeArguments(compiler->thread())));
2592 if (deopt != nullptr) {
2593 __ j(NOT_EQUAL, deopt);
2594 } else {
2595 __ j(EQUAL, &ok);
2596
2597 __ Bind(&call_runtime);
2598 __ PushObject(original);
2599 __ pushq(value_reg);
2600 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2601 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2602 __ Drop(2);
2603 }
2604
2605 __ Bind(&ok);
2606}
2607
2608LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
2609 bool opt) const {
2610 const intptr_t kNumInputs = 1;
2611 const intptr_t kNumTemps = 1;
2612 LocationSummary* locs = new (zone)
2613 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2615 locs->set_temp(0, Location::RequiresRegister());
2616 return locs;
2617}
2618
2619void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2620 Register value = locs()->in(0).reg();
2621 Register temp = locs()->temp(0).reg();
2622
2623 compiler->used_static_fields().Add(&field());
2624
2625 __ movq(temp,
2626 compiler::Address(
2627 THR, compiler::target::Thread::field_table_values_offset()));
2628 // Note: static fields ids won't be changed by hot-reload.
2629 __ movq(
2630 compiler::Address(temp, compiler::target::FieldTable::OffsetOf(field())),
2631 value);
2632}
2633
2634LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone,
2635 bool opt) const {
2636 const intptr_t kNumInputs = 3;
2637 const intptr_t kNumTemps = 0;
2638 LocationSummary* summary = new (zone)
2639 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2641 summary->set_in(1, Location::RegisterLocation(
2643 summary->set_in(
2645 summary->set_out(0, Location::RegisterLocation(RAX));
2646 return summary;
2647}
2648
2649void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2650 ASSERT(locs()->in(0).reg() == TypeTestABI::kInstanceReg);
2651 ASSERT(locs()->in(1).reg() == TypeTestABI::kInstantiatorTypeArgumentsReg);
2652 ASSERT(locs()->in(2).reg() == TypeTestABI::kFunctionTypeArgumentsReg);
2653
2654 compiler->GenerateInstanceOf(source(), deopt_id(), env(), type(), locs());
2655 ASSERT(locs()->out(0).reg() == RAX);
2656}
2657
2658// TODO(srdjan): In case of constant inputs make CreateArray kNoCall and
2659// use slow path stub.
2660LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone,
2661 bool opt) const {
2662 const intptr_t kNumInputs = 2;
2663 const intptr_t kNumTemps = 0;
2664 LocationSummary* locs = new (zone)
2665 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2671 return locs;
2672}
2673
2674// Inlines array allocation for known constant values.
2675static void InlineArrayAllocation(FlowGraphCompiler* compiler,
2676 intptr_t num_elements,
2677 compiler::Label* slow_path,
2678 compiler::Label* done) {
2679 const int kInlineArraySize = 12; // Same as kInlineInstanceSize.
2680 const intptr_t instance_size = Array::InstanceSize(num_elements);
2681
2682 __ TryAllocateArray(kArrayCid, instance_size, slow_path,
2684 AllocateArrayABI::kResultReg, // instance
2685 RCX, // end address
2686 R13); // temp
2687
2688 // RAX: new object start as a tagged pointer.
2689 // Store the type argument field.
2690 __ StoreCompressedIntoObjectNoBarrier(
2692 compiler::FieldAddress(AllocateArrayABI::kResultReg,
2695
2696 // Set the length field.
2697 __ StoreCompressedIntoObjectNoBarrier(
2699 compiler::FieldAddress(AllocateArrayABI::kResultReg,
2702
2703 // Initialize all array elements to raw_null.
2704 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
2705 // RCX: new object end address.
2706 // RDI: iterator which initially points to the start of the variable
2707 // data area to be initialized.
2708 if (num_elements > 0) {
2709 const intptr_t array_size = instance_size - sizeof(UntaggedArray);
2710 __ LoadObject(R12, Object::null_object());
2711 __ leaq(RDI, compiler::FieldAddress(AllocateArrayABI::kResultReg,
2712 sizeof(UntaggedArray)));
2713 if (array_size < (kInlineArraySize * kCompressedWordSize)) {
2714 intptr_t current_offset = 0;
2715 while (current_offset < array_size) {
2716 __ StoreCompressedIntoObjectNoBarrier(
2718 compiler::Address(RDI, current_offset), R12);
2719 current_offset += kCompressedWordSize;
2720 }
2721 } else {
2722 compiler::Label init_loop;
2723 __ Bind(&init_loop);
2724 __ StoreCompressedIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
2725 compiler::Address(RDI, 0), R12);
2726 __ addq(RDI, compiler::Immediate(kCompressedWordSize));
2727 __ cmpq(RDI, RCX);
2728 __ j(BELOW, &init_loop, compiler::Assembler::kNearJump);
2729 }
2730 }
2732}
2733
2734void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2735 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
2736 if (type_usage_info != nullptr) {
2737 const Class& list_class =
2738 Class::Handle(compiler->isolate_group()->class_table()->At(kArrayCid));
2739 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, list_class,
2740 type_arguments()->definition());
2741 }
2742
2743 compiler::Label slow_path, done;
2744 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2745 if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
2746 num_elements()->BindsToConstant() &&
2747 num_elements()->BoundConstant().IsSmi()) {
2748 const intptr_t length =
2749 Smi::Cast(num_elements()->BoundConstant()).Value();
2751 InlineArrayAllocation(compiler, length, &slow_path, &done);
2752 }
2753 }
2754 }
2755
2756 __ Bind(&slow_path);
2757 auto object_store = compiler->isolate_group()->object_store();
2758 const auto& allocate_array_stub =
2759 Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
2760 compiler->GenerateStubCall(source(), allocate_array_stub,
2761 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
2762 env());
2763 __ Bind(&done);
2764}
2765
2767 Zone* zone,
2768 bool opt) const {
2769 ASSERT(opt);
2770 const intptr_t kNumInputs = 0;
2771 const intptr_t kNumTemps = 2;
2772 LocationSummary* locs = new (zone) LocationSummary(
2773 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
2777 return locs;
2778}
2779
2780class AllocateContextSlowPath
2781 : public TemplateSlowPathCode<AllocateUninitializedContextInstr> {
2782 public:
2783 explicit AllocateContextSlowPath(
2784 AllocateUninitializedContextInstr* instruction)
2785 : TemplateSlowPathCode(instruction) {}
2786
2787 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
2788 __ Comment("AllocateContextSlowPath");
2789 __ Bind(entry_label());
2790
2791 LocationSummary* locs = instruction()->locs();
2792 locs->live_registers()->Remove(locs->out(0));
2793
2794 compiler->SaveLiveRegisters(locs);
2795
2796 auto slow_path_env = compiler->SlowPathEnvironmentFor(
2797 instruction(), /*num_slow_path_args=*/0);
2798 ASSERT(slow_path_env != nullptr);
2799
2800 auto object_store = compiler->isolate_group()->object_store();
2801 const auto& allocate_context_stub = Code::ZoneHandle(
2802 compiler->zone(), object_store->allocate_context_stub());
2803
2804 __ LoadImmediate(
2805 R10, compiler::Immediate(instruction()->num_context_variables()));
2806 compiler->GenerateStubCall(instruction()->source(), allocate_context_stub,
2807 UntaggedPcDescriptors::kOther, locs,
2808 instruction()->deopt_id(), slow_path_env);
2809 ASSERT(instruction()->locs()->out(0).reg() == RAX);
2810
2811 compiler->RestoreLiveRegisters(instruction()->locs());
2812 __ jmp(exit_label());
2813 }
2814};
2815
2817 FlowGraphCompiler* compiler) {
2818 ASSERT(compiler->is_optimizing());
2819 Register temp = locs()->temp(0).reg();
2820 Register result = locs()->out(0).reg();
2821 // Try allocate the object.
2822 AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this);
2823 compiler->AddSlowPathCode(slow_path);
2824 intptr_t instance_size = Context::InstanceSize(num_context_variables());
2825
2826 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2827 __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
2829 result, // instance
2830 temp, // end address
2831 locs()->temp(1).reg());
2832
2833 // Setup up number of context variables field.
2834 __ movq(compiler::FieldAddress(result, Context::num_variables_offset()),
2835 compiler::Immediate(num_context_variables()));
2836 } else {
2837 __ Jump(slow_path->entry_label());
2838 }
2839
2840 __ Bind(slow_path->exit_label());
2841}
2842
2843LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone,
2844 bool opt) const {
2845 const intptr_t kNumInputs = 0;
2846 const intptr_t kNumTemps = 1;
2847 LocationSummary* locs = new (zone)
2848 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2851 return locs;
2852}
2853
2854void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2855 ASSERT(locs()->temp(0).reg() == R10);
2856 ASSERT(locs()->out(0).reg() == RAX);
2857
2858 auto object_store = compiler->isolate_group()->object_store();
2859 const auto& allocate_context_stub =
2860 Code::ZoneHandle(compiler->zone(), object_store->allocate_context_stub());
2861
2862 __ LoadImmediate(R10, compiler::Immediate(num_context_variables()));
2863 compiler->GenerateStubCall(source(), allocate_context_stub,
2864 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
2865 env());
2866}
2867
2868LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
2869 bool opt) const {
2870 const intptr_t kNumInputs = 1;
2871 const intptr_t kNumTemps = 0;
2872 LocationSummary* locs = new (zone)
2873 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2874 locs->set_in(0, Location::RegisterLocation(R9));
2875 locs->set_out(0, Location::RegisterLocation(RAX));
2876 return locs;
2877}
2878
2879void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2880 ASSERT(locs()->in(0).reg() == R9);
2881 ASSERT(locs()->out(0).reg() == RAX);
2882
2883 auto object_store = compiler->isolate_group()->object_store();
2884 const auto& clone_context_stub =
2885 Code::ZoneHandle(compiler->zone(), object_store->clone_context_stub());
2886 compiler->GenerateStubCall(source(), clone_context_stub,
2887 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
2888 deopt_id(), env());
2889}
2890
2891LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
2892 bool opt) const {
2893 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall);
2894}
2895
2896void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2897 __ Bind(compiler->GetJumpLabel(this));
2898 compiler->AddExceptionHandler(this);
2899 if (HasParallelMove()) {
2900 parallel_move()->EmitNativeCode(compiler);
2901 }
2902
2903 // Restore RSP from RBP as we are coming from a throw and the code for
2904 // popping arguments has not been run.
2905 const intptr_t fp_sp_dist =
2906 (compiler::target::frame_layout.first_local_from_fp + 1 -
2907 compiler->StackSize()) *
2908 kWordSize;
2909 ASSERT(fp_sp_dist <= 0);
2910 __ leaq(RSP, compiler::Address(RBP, fp_sp_dist));
2911
2912 if (!compiler->is_optimizing()) {
2913 if (raw_exception_var_ != nullptr) {
2914 __ movq(compiler::Address(RBP,
2915 compiler::target::FrameOffsetInBytesForVariable(
2916 raw_exception_var_)),
2918 }
2919 if (raw_stacktrace_var_ != nullptr) {
2920 __ movq(compiler::Address(RBP,
2921 compiler::target::FrameOffsetInBytesForVariable(
2922 raw_stacktrace_var_)),
2924 }
2925 }
2926}
2927
2928LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone,
2929 bool opt) const {
2930 const intptr_t kNumInputs = 0;
2931 const intptr_t kNumTemps = 1;
2932 const bool using_shared_stub = UseSharedSlowPathStub(opt);
2933 LocationSummary* summary = new (zone)
2934 LocationSummary(zone, kNumInputs, kNumTemps,
2935 using_shared_stub ? LocationSummary::kCallOnSharedSlowPath
2936 : LocationSummary::kCallOnSlowPath);
2937 summary->set_temp(0, Location::RequiresRegister());
2938 return summary;
2939}
2940
2941class CheckStackOverflowSlowPath
2942 : public TemplateSlowPathCode<CheckStackOverflowInstr> {
2943 public:
2944 static constexpr intptr_t kNumSlowPathArgs = 0;
2945
2946 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
2947 : TemplateSlowPathCode(instruction) {}
2948
2949 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
2950 if (compiler->isolate_group()->use_osr() && osr_entry_label()->IsLinked()) {
2951 __ Comment("CheckStackOverflowSlowPathOsr");
2952 __ Bind(osr_entry_label());
2953 __ movq(compiler::Address(THR, Thread::stack_overflow_flags_offset()),
2954 compiler::Immediate(Thread::kOsrRequest));
2955 }
2956 __ Comment("CheckStackOverflowSlowPath");
2957 __ Bind(entry_label());
2958 const bool using_shared_stub =
2959 instruction()->locs()->call_on_shared_slow_path();
2960 if (!using_shared_stub) {
2961 compiler->SaveLiveRegisters(instruction()->locs());
2962 }
2963 // pending_deoptimization_env_ is needed to generate a runtime call that
2964 // may throw an exception.
2965 ASSERT(compiler->pending_deoptimization_env_ == nullptr);
2966 Environment* env =
2967 compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
2968 compiler->pending_deoptimization_env_ = env;
2969
2970 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
2971 if (using_shared_stub) {
2972 if (!has_frame) {
2973 ASSERT(__ constant_pool_allowed());
2974 __ set_constant_pool_allowed(false);
2975 __ EnterDartFrame(0);
2976 }
2977 const uword entry_point_offset =
2978 Thread::stack_overflow_shared_stub_entry_point_offset(
2979 instruction()->locs()->live_registers()->FpuRegisterCount() > 0);
2980 __ call(compiler::Address(THR, entry_point_offset));
2981 compiler->RecordSafepoint(instruction()->locs(), kNumSlowPathArgs);
2982 compiler->RecordCatchEntryMoves(env);
2983 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOther,
2984 instruction()->deopt_id(),
2985 instruction()->source());
2986 if (!has_frame) {
2987 __ LeaveDartFrame();
2988 __ set_constant_pool_allowed(true);
2989 }
2990 } else {
2991 ASSERT(has_frame);
2992 __ CallRuntime(kInterruptOrStackOverflowRuntimeEntry, kNumSlowPathArgs);
2993 compiler->EmitCallsiteMetadata(
2994 instruction()->source(), instruction()->deopt_id(),
2995 UntaggedPcDescriptors::kOther, instruction()->locs(), env);
2996 }
2997
2998 if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
2999 instruction()->in_loop()) {
3000 // In unoptimized code, record loop stack checks as possible OSR entries.
3001 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
3002 instruction()->deopt_id(),
3003 InstructionSource());
3004 }
3005 compiler->pending_deoptimization_env_ = nullptr;
3006 if (!using_shared_stub) {
3007 compiler->RestoreLiveRegisters(instruction()->locs());
3008 }
3009 __ jmp(exit_label());
3010 }
3011
3012 compiler::Label* osr_entry_label() {
3013 ASSERT(IsolateGroup::Current()->use_osr());
3014 return &osr_entry_label_;
3015 }
3016
3017 private:
3018 compiler::Label osr_entry_label_;
3019};
3020
3021void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3022 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this);
3023 compiler->AddSlowPathCode(slow_path);
3024
3025 Register temp = locs()->temp(0).reg();
3026 // Generate stack overflow check.
3027 __ cmpq(RSP, compiler::Address(THR, Thread::stack_limit_offset()));
3028 __ j(BELOW_EQUAL, slow_path->entry_label());
3029 if (compiler->CanOSRFunction() && in_loop()) {
3030 // In unoptimized code check the usage counter to trigger OSR at loop
3031 // stack checks. Use progressively higher thresholds for more deeply
3032 // nested loops to attempt to hit outer loops with OSR when possible.
3033 __ LoadObject(temp, compiler->parsed_function().function());
3034 const intptr_t configured_optimization_counter_threshold =
3035 compiler->thread()->isolate_group()->optimization_counter_threshold();
3036 const int32_t threshold =
3037 configured_optimization_counter_threshold * (loop_depth() + 1);
3038 __ incl(compiler::FieldAddress(temp, Function::usage_counter_offset()));
3039 __ cmpl(compiler::FieldAddress(temp, Function::usage_counter_offset()),
3040 compiler::Immediate(threshold));
3041 __ j(GREATER_EQUAL, slow_path->osr_entry_label());
3042 }
3043 if (compiler->ForceSlowPathForStackOverflow()) {
3044 __ jmp(slow_path->entry_label());
3045 }
3046 __ Bind(slow_path->exit_label());
3047}
3048
3049static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
3050 BinarySmiOpInstr* shift_left) {
3051 const LocationSummary& locs = *shift_left->locs();
3052 Register left = locs.in(0).reg();
3053 Register result = locs.out(0).reg();
3054 ASSERT(left == result);
3055 compiler::Label* deopt =
3056 shift_left->CanDeoptimize()
3057 ? compiler->AddDeoptStub(shift_left->deopt_id(),
3058 ICData::kDeoptBinarySmiOp)
3059 : nullptr;
3060 if (locs.in(1).IsConstant()) {
3061 const Object& constant = locs.in(1).constant();
3062 ASSERT(constant.IsSmi());
3063 // shlq operation masks the count to 6 bits.
3064#if !defined(DART_COMPRESSED_POINTERS)
3065 const intptr_t kCountLimit = 0x3F;
3066#else
3067 const intptr_t kCountLimit = 0x1F;
3068#endif
3069 const intptr_t value = Smi::Cast(constant).Value();
3070 ASSERT((0 < value) && (value < kCountLimit));
3071 if (shift_left->can_overflow()) {
3072 if (value == 1) {
3073 // Use overflow flag.
3074 __ OBJ(shl)(left, compiler::Immediate(1));
3075 __ j(OVERFLOW, deopt);
3076 return;
3077 }
3078 // Check for overflow.
3079 Register temp = locs.temp(0).reg();
3080 __ OBJ(mov)(temp, left);
3081 __ OBJ(shl)(left, compiler::Immediate(value));
3082 __ OBJ(sar)(left, compiler::Immediate(value));
3083 __ OBJ(cmp)(left, temp);
3084 __ j(NOT_EQUAL, deopt); // Overflow.
3085 }
3086 // Shift for result now we know there is no overflow.
3087 __ OBJ(shl)(left, compiler::Immediate(value));
3088 return;
3089 }
3090
3091 // Right (locs.in(1)) is not constant.
3092 Register right = locs.in(1).reg();
3093 Range* right_range = shift_left->right_range();
3094 if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
3095 // TODO(srdjan): Implement code below for is_truncating().
3096 // If left is constant, we know the maximal allowed size for right.
3097 const Object& obj = shift_left->left()->BoundConstant();
3098 if (obj.IsSmi()) {
3099 const intptr_t left_int = Smi::Cast(obj).Value();
3100 if (left_int == 0) {
3101 __ CompareImmediate(right, compiler::Immediate(0),
3103 __ j(NEGATIVE, deopt);
3104 return;
3105 }
3106 const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int);
3107 const bool right_needs_check =
3108 !RangeUtils::IsWithin(right_range, 0, max_right - 1);
3109 if (right_needs_check) {
3110 __ CompareObject(right, Smi::ZoneHandle(Smi::New(max_right)));
3111 __ j(ABOVE_EQUAL, deopt);
3112 }
3113 __ SmiUntag(right);
3114 __ OBJ(shl)(left, right);
3115 }
3116 return;
3117 }
3118
3119 const bool right_needs_check =
3120 !RangeUtils::IsWithin(right_range, 0, (Smi::kBits - 1));
3121 ASSERT(right == RCX); // Count must be in RCX
3122 if (!shift_left->can_overflow()) {
3123 if (right_needs_check) {
3124 const bool right_may_be_negative =
3125 (right_range == nullptr) || !right_range->IsPositive();
3126 if (right_may_be_negative) {
3127 ASSERT(shift_left->CanDeoptimize());
3128 __ CompareImmediate(right, compiler::Immediate(0),
3130 __ j(NEGATIVE, deopt);
3131 }
3132 compiler::Label done, is_not_zero;
3133 __ CompareObject(right, Smi::ZoneHandle(Smi::New(Smi::kBits)));
3134 __ j(BELOW, &is_not_zero, compiler::Assembler::kNearJump);
3135 __ xorq(left, left);
3137 __ Bind(&is_not_zero);
3138 __ SmiUntag(right);
3139 __ OBJ(shl)(left, right);
3140 __ Bind(&done);
3141 } else {
3142 __ SmiUntag(right);
3143 __ OBJ(shl)(left, right);
3144 }
3145 } else {
3146 if (right_needs_check) {
3147 ASSERT(shift_left->CanDeoptimize());
3148 __ CompareObject(right, Smi::ZoneHandle(Smi::New(Smi::kBits)));
3149 __ j(ABOVE_EQUAL, deopt);
3150 }
3151 // Left is not a constant.
3152 Register temp = locs.temp(0).reg();
3153 // Check if count too large for handling it inlined.
3154 __ OBJ(mov)(temp, left);
3155 __ SmiUntag(right);
3156 // Overflow test (preserve temp and right);
3157 __ OBJ(shl)(left, right);
3158 __ OBJ(sar)(left, right);
3159 __ OBJ(cmp)(left, temp);
3160 __ j(NOT_EQUAL, deopt); // Overflow.
3161 // Shift for result now we know there is no overflow.
3162 __ OBJ(shl)(left, right);
3163 ASSERT(!shift_left->is_truncating());
3164 }
3165}
3166
3167static bool CanBeImmediate(const Object& constant) {
3168 return constant.IsSmi() &&
3169 compiler::Immediate(Smi::RawValue(Smi::Cast(constant).Value()))
3170 .is_int32();
3171}
3172
3173static bool IsSmiValue(const Object& constant, intptr_t value) {
3174 return constant.IsSmi() && (Smi::Cast(constant).Value() == value);
3175}
3176
3177LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Zone* zone,
3178 bool opt) const {
3179 const intptr_t kNumInputs = 2;
3180
3181 ConstantInstr* right_constant = right()->definition()->AsConstant();
3182 if ((right_constant != nullptr) && (op_kind() != Token::kTRUNCDIV) &&
3183 (op_kind() != Token::kSHL) &&
3184#if defined(DART_COMPRESSED_POINTERS)
3185 (op_kind() != Token::kUSHR) &&
3186#endif
3187 (op_kind() != Token::kMUL) && (op_kind() != Token::kMOD) &&
3188 CanBeImmediate(right_constant->value())) {
3189 const intptr_t kNumTemps = 0;
3190 LocationSummary* summary = new (zone)
3191 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3192 summary->set_in(0, Location::RequiresRegister());
3193 summary->set_in(1, Location::Constant(right_constant));
3194 summary->set_out(0, Location::SameAsFirstInput());
3195 return summary;
3196 }
3197
3198 if (op_kind() == Token::kTRUNCDIV) {
3199 const intptr_t kNumTemps = 1;
3200 LocationSummary* summary = new (zone)
3201 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3203 summary->set_in(0, Location::RequiresRegister());
3204 ConstantInstr* right_constant = right()->definition()->AsConstant();
3205 summary->set_in(1, Location::Constant(right_constant));
3206 summary->set_temp(0, Location::RequiresRegister());
3207 summary->set_out(0, Location::SameAsFirstInput());
3208 } else {
3209 // Both inputs must be writable because they will be untagged.
3210 summary->set_in(0, Location::RegisterLocation(RAX));
3211 summary->set_in(1, Location::WritableRegister());
3212 summary->set_out(0, Location::SameAsFirstInput());
3213 // Will be used for sign extension and division.
3214 summary->set_temp(0, Location::RegisterLocation(RDX));
3215 }
3216 return summary;
3217 } else if (op_kind() == Token::kMOD) {
3218 const intptr_t kNumTemps = 1;
3219 LocationSummary* summary = new (zone)
3220 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3221 // Both inputs must be writable because they will be untagged.
3222 summary->set_in(0, Location::RegisterLocation(RDX));
3223 summary->set_in(1, Location::WritableRegister());
3224 summary->set_out(0, Location::SameAsFirstInput());
3225 // Will be used for sign extension and division.
3226 summary->set_temp(0, Location::RegisterLocation(RAX));
3227 return summary;
3228 } else if ((op_kind() == Token::kSHR)
3229#if !defined(DART_COMPRESSED_POINTERS)
3230 || (op_kind() == Token::kUSHR)
3231#endif // !defined(DART_COMPRESSED_POINTERS)
3232 ) {
3233 const intptr_t kNumTemps = 0;
3234 LocationSummary* summary = new (zone)
3235 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3236 summary->set_in(0, Location::RequiresRegister());
3237 summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), RCX));
3238 summary->set_out(0, Location::SameAsFirstInput());
3239 return summary;
3240#if defined(DART_COMPRESSED_POINTERS)
3241 } else if (op_kind() == Token::kUSHR) {
3242 const intptr_t kNumTemps = 1;
3243 LocationSummary* summary = new (zone)
3244 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3245 summary->set_in(0, Location::RequiresRegister());
3246 if ((right_constant != nullptr) &&
3247 CanBeImmediate(right_constant->value())) {
3248 summary->set_in(1, Location::Constant(right_constant));
3249 } else {
3250 summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), RCX));
3251 }
3252 summary->set_out(0, Location::SameAsFirstInput());
3253 summary->set_temp(0, Location::RequiresRegister());
3254 return summary;
3255#endif // defined(DART_COMPRESSED_POINTERS)
3256 } else if (op_kind() == Token::kSHL) {
3257 // Shift-by-1 overflow checking can use flags, otherwise we need a temp.
3258 const bool shiftBy1 =
3259 (right_constant != nullptr) && IsSmiValue(right_constant->value(), 1);
3260 const intptr_t kNumTemps = (can_overflow() && !shiftBy1) ? 1 : 0;
3261 LocationSummary* summary = new (zone)
3262 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3263 summary->set_in(0, Location::RequiresRegister());
3264 summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), RCX));
3265 if (kNumTemps == 1) {
3266 summary->set_temp(0, Location::RequiresRegister());
3267 }
3268 summary->set_out(0, Location::SameAsFirstInput());
3269 return summary;
3270 } else {
3271 const intptr_t kNumTemps = 0;
3272 LocationSummary* summary = new (zone)
3273 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3274 summary->set_in(0, Location::RequiresRegister());
3275 ConstantInstr* constant = right()->definition()->AsConstant();
3276 if (constant != nullptr) {
3277 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
3278 } else {
3279 summary->set_in(1, Location::PrefersRegister());
3280 }
3281 summary->set_out(0, Location::SameAsFirstInput());
3282 return summary;
3283 }
3284}
3285
3286void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3287 if (op_kind() == Token::kSHL) {
3288 EmitSmiShiftLeft(compiler, this);
3289 return;
3290 }
3291
3292 Register left = locs()->in(0).reg();
3293 Register result = locs()->out(0).reg();
3294 ASSERT(left == result);
3295 compiler::Label* deopt = nullptr;
3296 if (CanDeoptimize()) {
3297 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
3298 }
3299
3300 if (locs()->in(1).IsConstant()) {
3301 const Object& constant = locs()->in(1).constant();
3302 ASSERT(constant.IsSmi());
3303 const int64_t imm = Smi::RawValue(Smi::Cast(constant).Value());
3304 switch (op_kind()) {
3305 case Token::kADD: {
3306 __ AddImmediate(left, compiler::Immediate(imm), compiler::kObjectBytes);
3307 if (deopt != nullptr) __ j(OVERFLOW, deopt);
3308 break;
3309 }
3310 case Token::kSUB: {
3311 __ SubImmediate(left, compiler::Immediate(imm), compiler::kObjectBytes);
3312 if (deopt != nullptr) __ j(OVERFLOW, deopt);
3313 break;
3314 }
3315 case Token::kMUL: {
3316 // Keep left value tagged and untag right value.
3317 const intptr_t value = Smi::Cast(constant).Value();
3318 __ MulImmediate(left, compiler::Immediate(value),
3320 if (deopt != nullptr) __ j(OVERFLOW, deopt);
3321 break;
3322 }
3323 case Token::kTRUNCDIV: {
3324 const intptr_t value = Smi::Cast(constant).Value();
3325 ASSERT(value != kIntptrMin);
3327 const intptr_t shift_count =
3329 ASSERT(kSmiTagSize == 1);
3330 Register temp = locs()->temp(0).reg();
3331 __ movq(temp, left);
3332#if !defined(DART_COMPRESSED_POINTERS)
3333 __ sarq(temp, compiler::Immediate(63));
3334#else
3335 __ sarl(temp, compiler::Immediate(31));
3336#endif
3337 ASSERT(shift_count > 1); // 1, -1 case handled above.
3338#if !defined(DART_COMPRESSED_POINTERS)
3339 __ shrq(temp, compiler::Immediate(64 - shift_count));
3340#else
3341 __ shrl(temp, compiler::Immediate(32 - shift_count));
3342#endif
3343 __ OBJ(add)(left, temp);
3344 ASSERT(shift_count > 0);
3345 __ OBJ(sar)(left, compiler::Immediate(shift_count));
3346 if (value < 0) {
3347 __ OBJ(neg)(left);
3348 }
3349 __ SmiTag(left);
3350 break;
3351 }
3352 case Token::kBIT_AND: {
3353 // No overflow check.
3354 __ AndImmediate(left, compiler::Immediate(imm));
3355 break;
3356 }
3357 case Token::kBIT_OR: {
3358 // No overflow check.
3359 __ OrImmediate(left, compiler::Immediate(imm));
3360 break;
3361 }
3362 case Token::kBIT_XOR: {
3363 // No overflow check.
3364 __ XorImmediate(left, compiler::Immediate(imm));
3365 break;
3366 }
3367
3368 case Token::kSHR: {
3369 // sarq/l operation masks the count to 6/5 bits.
3370#if !defined(DART_COMPRESSED_POINTERS)
3371 const intptr_t kCountLimit = 0x3F;
3372#else
3373 const intptr_t kCountLimit = 0x1F;
3374#endif
3375 const intptr_t value = Smi::Cast(constant).Value();
3376 __ OBJ(sar)(left, compiler::Immediate(Utils::Minimum(
3377 value + kSmiTagSize, kCountLimit)));
3378 __ SmiTag(left);
3379 break;
3380 }
3381
3382 case Token::kUSHR: {
3383 // shrq operation masks the count to 6 bits, but
3384 // unsigned shifts by >= kBitsPerInt64 are eliminated by
3385 // BinaryIntegerOpInstr::Canonicalize.
3386 const intptr_t kCountLimit = 0x3F;
3387 const intptr_t value = Smi::Cast(constant).Value();
3388 ASSERT((value >= 0) && (value <= kCountLimit));
3389 __ SmiUntagAndSignExtend(left);
3390 __ shrq(left, compiler::Immediate(value));
3391 __ shlq(left, compiler::Immediate(1)); // SmiTag, keep hi bits.
3392 if (deopt != nullptr) {
3393 __ j(OVERFLOW, deopt);
3394#if defined(DART_COMPRESSED_POINTERS)
3395 const Register temp = locs()->temp(0).reg();
3396 __ movsxd(temp, left);
3397 __ cmpq(temp, left);
3398 __ j(NOT_EQUAL, deopt);
3399#endif // defined(DART_COMPRESSED_POINTERS)
3400 }
3401 break;
3402 }
3403
3404 default:
3405 UNREACHABLE();
3406 break;
3407 }
3408 return;
3409 } // locs()->in(1).IsConstant().
3410
3411 if (locs()->in(1).IsStackSlot()) {
3412 const compiler::Address& right = LocationToStackSlotAddress(locs()->in(1));
3413 switch (op_kind()) {
3414 case Token::kADD: {
3415 __ OBJ(add)(left, right);
3416 if (deopt != nullptr) __ j(OVERFLOW, deopt);
3417 break;
3418 }
3419 case Token::kSUB: {
3420 __ OBJ(sub)(left, right);
3421 if (deopt != nullptr) __ j(OVERFLOW, deopt);
3422 break;
3423 }
3424 case Token::kMUL: {
3425 __ SmiUntag(left);
3426 __ OBJ(imul)(left, right);
3427 if (deopt != nullptr) __ j(OVERFLOW, deopt);
3428 break;
3429 }
3430 case Token::kBIT_AND: {
3431 // No overflow check.
3432 __ andq(left, right);
3433 break;
3434 }
3435 case Token::kBIT_OR: {
3436 // No overflow check.
3437 __ orq(left, right);
3438 break;
3439 }
3440 case Token::kBIT_XOR: {
3441 // No overflow check.
3442 __ xorq(left, right);
3443 break;
3444 }
3445 default:
3446 UNREACHABLE();
3447 break;
3448 }
3449 return;
3450 } // locs()->in(1).IsStackSlot().
3451
3452 // if locs()->in(1).IsRegister.
3453 Register right = locs()->in(1).reg();
3454 switch (op_kind()) {
3455 case Token::kADD: {
3456 __ OBJ(add)(left, right);
3457 if (deopt != nullptr) __ j(OVERFLOW, deopt);
3458 break;
3459 }
3460 case Token::kSUB: {
3461 __ OBJ(sub)(left, right);
3462 if (deopt != nullptr) __ j(OVERFLOW, deopt);
3463 break;
3464 }
3465 case Token::kMUL: {
3466 __ SmiUntag(left);
3467 __ OBJ(imul)(left, right);
3468 if (deopt != nullptr) __ j(OVERFLOW, deopt);
3469 break;
3470 }
3471 case Token::kBIT_AND: {
3472 // No overflow check.
3473 __ andq(left, right);
3474 break;
3475 }
3476 case Token::kBIT_OR: {
3477 // No overflow check.
3478 __ orq(left, right);
3479 break;
3480 }
3481 case Token::kBIT_XOR: {
3482 // No overflow check.
3483 __ xorq(left, right);
3484 break;
3485 }
3486 case Token::kTRUNCDIV: {
3487 compiler::Label not_32bit, done;
3488
3489 Register temp = locs()->temp(0).reg();
3490 ASSERT(left == RAX);
3491 ASSERT((right != RDX) && (right != RAX));
3492 ASSERT(temp == RDX);
3493 ASSERT(result == RAX);
3495 // Handle divide by zero in runtime.
3496 __ OBJ(test)(right, right);
3497 __ j(ZERO, deopt);
3498 }
3499#if !defined(DART_COMPRESSED_POINTERS)
3500 // Check if both operands fit into 32bits as idiv with 64bit operands
3501 // requires twice as many cycles and has much higher latency.
3502 // We are checking this before untagging them to avoid corner case
3503 // dividing INT_MAX by -1 that raises exception because quotient is
3504 // too large for 32bit register.
3505 __ movsxd(temp, left);
3506 __ cmpq(temp, left);
3507 __ j(NOT_EQUAL, &not_32bit);
3508 __ movsxd(temp, right);
3509 __ cmpq(temp, right);
3510 __ j(NOT_EQUAL, &not_32bit);
3511
3512 // Both operands are 31bit smis. Divide using 32bit idiv.
3513 __ SmiUntag(left);
3514 __ SmiUntag(right);
3515 __ cdq();
3516 __ idivl(right);
3517 __ movsxd(result, result);
3518 __ jmp(&done);
3519
3520 // Divide using 64bit idiv.
3521 __ Bind(&not_32bit);
3522 __ SmiUntag(left);
3523 __ SmiUntag(right);
3524 __ cqo(); // Sign extend RAX -> RDX:RAX.
3525 __ idivq(right); // RAX: quotient, RDX: remainder.
3526 if (RangeUtils::Overlaps(right_range(), -1, -1)) {
3527 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
3528 // case we cannot tag the result.
3529 __ CompareImmediate(result, compiler::Immediate(0x4000000000000000));
3530 __ j(EQUAL, deopt);
3531 }
3532#else
3533 // Both operands are 31bit smis. Divide using 32bit idiv.
3534 __ SmiUntag(left);
3535 __ SmiUntag(right);
3536 __ cdq();
3537 __ idivl(right);
3538
3539 if (RangeUtils::Overlaps(right_range(), -1, -1)) {
3540 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
3541 // case we cannot tag the result.
3542 __ cmpl(result, compiler::Immediate(0x40000000));
3543 __ j(EQUAL, deopt);
3544 }
3545 __ movsxd(result, result);
3546#endif
3547 __ Bind(&done);
3548 __ SmiTag(result);
3549 break;
3550 }
3551 case Token::kMOD: {
3552 compiler::Label not_32bit, div_done;
3553
3554 Register temp = locs()->temp(0).reg();
3555 ASSERT(left == RDX);
3556 ASSERT((right != RDX) && (right != RAX));
3557 ASSERT(temp == RAX);
3558 ASSERT(result == RDX);
3560 // Handle divide by zero in runtime.
3561 __ OBJ(test)(right, right);
3562 __ j(ZERO, deopt);
3563 }
3564#if !defined(DART_COMPRESSED_POINTERS)
3565 // Check if both operands fit into 32bits as idiv with 64bit operands
3566 // requires twice as many cycles and has much higher latency.
3567 // We are checking this before untagging them to avoid corner case
3568 // dividing INT_MAX by -1 that raises exception because quotient is
3569 // too large for 32bit register.
3570 __ movsxd(temp, left);
3571 __ cmpq(temp, left);
3572 __ j(NOT_EQUAL, &not_32bit);
3573 __ movsxd(temp, right);
3574 __ cmpq(temp, right);
3575 __ j(NOT_EQUAL, &not_32bit);
3576#endif
3577 // Both operands are 31bit smis. Divide using 32bit idiv.
3578 __ SmiUntag(left);
3579 __ SmiUntag(right);
3580 __ movq(RAX, RDX);
3581 __ cdq();
3582 __ idivl(right);
3583 __ movsxd(result, result);
3584#if !defined(DART_COMPRESSED_POINTERS)
3585 __ jmp(&div_done);
3586
3587 // Divide using 64bit idiv.
3588 __ Bind(&not_32bit);
3589 __ SmiUntag(left);
3590 __ SmiUntag(right);
3591 __ movq(RAX, RDX);
3592 __ cqo(); // Sign extend RAX -> RDX:RAX.
3593 __ idivq(right); // RAX: quotient, RDX: remainder.
3594 __ Bind(&div_done);
3595#endif
3596 // res = left % right;
3597 // if (res < 0) {
3598 // if (right < 0) {
3599 // res = res - right;
3600 // } else {
3601 // res = res + right;
3602 // }
3603 // }
3604 compiler::Label all_done;
3605 __ OBJ(cmp)(result, compiler::Immediate(0));
3607 // Result is negative, adjust it.
3608 if (RangeUtils::Overlaps(right_range(), -1, 1)) {
3609 compiler::Label subtract;
3610 __ OBJ(cmp)(right, compiler::Immediate(0));
3612 __ OBJ(add)(result, right);
3613 __ jmp(&all_done, compiler::Assembler::kNearJump);
3614 __ Bind(&subtract);
3615 __ OBJ(sub)(result, right);
3616 } else if (right_range()->IsPositive()) {
3617 // Right is positive.
3618 __ OBJ(add)(result, right);
3619 } else {
3620 // Right is negative.
3621 __ OBJ(sub)(result, right);
3622 }
3623 __ Bind(&all_done);
3624 __ SmiTag(result);
3625 break;
3626 }
3627 case Token::kSHR: {
3628 if (CanDeoptimize()) {
3629 __ CompareImmediate(right, compiler::Immediate(0),
3631 __ j(LESS, deopt);
3632 }
3633 __ SmiUntag(right);
3634 // sarq/l operation masks the count to 6/5 bits.
3635#if !defined(DART_COMPRESSED_POINTERS)
3636 const intptr_t kCountLimit = 0x3F;
3637#else
3638 const intptr_t kCountLimit = 0x1F;
3639#endif
3640 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
3641 __ CompareImmediate(right, compiler::Immediate(kCountLimit));
3642 compiler::Label count_ok;
3643 __ j(LESS, &count_ok, compiler::Assembler::kNearJump);
3644 __ LoadImmediate(right, compiler::Immediate(kCountLimit));
3645 __ Bind(&count_ok);
3646 }
3647 ASSERT(right == RCX); // Count must be in RCX
3648 __ SmiUntag(left);
3649 __ OBJ(sar)(left, right);
3650 __ SmiTag(left);
3651 break;
3652 }
3653 case Token::kUSHR: {
3654 if (deopt != nullptr) {
3655 __ CompareImmediate(right, compiler::Immediate(0),
3657 __ j(LESS, deopt);
3658 }
3659 __ SmiUntag(right);
3660 // shrq operation masks the count to 6 bits.
3661 const intptr_t kCountLimit = 0x3F;
3662 COMPILE_ASSERT(kCountLimit + 1 == kBitsPerInt64);
3663 compiler::Label done;
3664 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
3665 __ CompareImmediate(right, compiler::Immediate(kCountLimit),
3667 compiler::Label count_ok;
3669 __ xorq(left, left);
3671 __ Bind(&count_ok);
3672 }
3673 ASSERT(right == RCX); // Count must be in RCX
3674 __ SmiUntagAndSignExtend(left);
3675 __ shrq(left, right);
3676 __ shlq(left, compiler::Immediate(1)); // SmiTag, keep hi bits.
3677 if (deopt != nullptr) {
3678 __ j(OVERFLOW, deopt);
3679#if defined(DART_COMPRESSED_POINTERS)
3680 const Register temp = locs()->temp(0).reg();
3681 __ movsxd(temp, left);
3682 __ cmpq(temp, left);
3683 __ j(NOT_EQUAL, deopt);
3684#endif // defined(DART_COMPRESSED_POINTERS)
3685 }
3686 __ Bind(&done);
3687 break;
3688 }
3689 case Token::kDIV: {
3690 // Dispatches to 'Double./'.
3691 // TODO(srdjan): Implement as conversion to double and double division.
3692 UNREACHABLE();
3693 break;
3694 }
3695 case Token::kOR:
3696 case Token::kAND: {
3697 // Flow graph builder has dissected this operation to guarantee correct
3698 // behavior (short-circuit evaluation).
3699 UNREACHABLE();
3700 break;
3701 }
3702 default:
3703 UNREACHABLE();
3704 break;
3705 }
3706}
3707
3708LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone,
3709 bool opt) const {
3710 intptr_t left_cid = left()->Type()->ToCid();
3711 intptr_t right_cid = right()->Type()->ToCid();
3712 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid));
3713 const intptr_t kNumInputs = 2;
3714 const bool need_temp = (left()->definition() != right()->definition()) &&
3715 (left_cid != kSmiCid) && (right_cid != kSmiCid);
3716 const intptr_t kNumTemps = need_temp ? 1 : 0;
3717 LocationSummary* summary = new (zone)
3718 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3719 summary->set_in(0, Location::RequiresRegister());
3720 summary->set_in(1, Location::RequiresRegister());
3721 if (need_temp) summary->set_temp(0, Location::RequiresRegister());
3722 return summary;
3723}
3724
3725void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3726 compiler::Label* deopt =
3727 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryDoubleOp);
3728 intptr_t left_cid = left()->Type()->ToCid();
3729 intptr_t right_cid = right()->Type()->ToCid();
3730 Register left = locs()->in(0).reg();
3731 Register right = locs()->in(1).reg();
3732 if (this->left()->definition() == this->right()->definition()) {
3733 __ testq(left, compiler::Immediate(kSmiTagMask));
3734 } else if (left_cid == kSmiCid) {
3735 __ testq(right, compiler::Immediate(kSmiTagMask));
3736 } else if (right_cid == kSmiCid) {
3737 __ testq(left, compiler::Immediate(kSmiTagMask));
3738 } else {
3739 Register temp = locs()->temp(0).reg();
3740 __ movq(temp, left);
3741 __ orq(temp, right);
3742 __ testq(temp, compiler::Immediate(kSmiTagMask));
3743 }
3744 __ j(ZERO, deopt);
3745}
3746
3747LocationSummary* BoxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
3748 const intptr_t kNumInputs = 1;
3749 const intptr_t kNumTemps = 1;
3750 LocationSummary* summary = new (zone) LocationSummary(
3751 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
3752 summary->set_in(0, Location::RequiresFpuRegister());
3753 summary->set_temp(0, Location::RequiresRegister());
3754 summary->set_out(0, Location::RequiresRegister());
3755 return summary;
3756}
3757
3758void BoxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3759 Register out_reg = locs()->out(0).reg();
3760 Register temp = locs()->temp(0).reg();
3761 XmmRegister value = locs()->in(0).fpu_reg();
3762
3764 compiler->BoxClassFor(from_representation()),
3765 out_reg, temp);
3766
3767 switch (from_representation()) {
3768 case kUnboxedDouble:
3769 __ movsd(compiler::FieldAddress(out_reg, ValueOffset()), value);
3770 break;
3771 case kUnboxedFloat: {
3772 __ cvtss2sd(FpuTMP, value);
3773 __ movsd(compiler::FieldAddress(out_reg, ValueOffset()), FpuTMP);
3774 break;
3775 }
3776 case kUnboxedFloat32x4:
3777 case kUnboxedFloat64x2:
3778 case kUnboxedInt32x4:
3779 __ movups(compiler::FieldAddress(out_reg, ValueOffset()), value);
3780 break;
3781 default:
3782 UNREACHABLE();
3783 break;
3784 }
3785}
3786
3787LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
3789 const intptr_t kNumInputs = 1;
3790 const intptr_t kNumTemps = 0;
3791 const bool needs_writable_input =
3792 (representation() != kUnboxedInt64) &&
3793 (value()->Type()->ToNullableCid() != BoxCid());
3794 LocationSummary* summary = new (zone)
3795 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3796 summary->set_in(0, needs_writable_input ? Location::WritableRegister()
3799#if !defined(DART_COMPRESSED_POINTERS)
3800 summary->set_out(0, Location::SameAsFirstInput());
3801#else
3802 summary->set_out(0, Location::RequiresRegister());
3803#endif
3804 } else {
3805 summary->set_out(0, Location::RequiresFpuRegister());
3806 }
3807 return summary;
3808}
3809
3810void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler* compiler) {
3811 const Register box = locs()->in(0).reg();
3812
3813 switch (representation()) {
3814 case kUnboxedInt64: {
3815 const Register result = locs()->out(0).reg();
3816 __ movq(result, compiler::FieldAddress(box, ValueOffset()));
3817 break;
3818 }
3819
3820 case kUnboxedDouble: {
3821 const FpuRegister result = locs()->out(0).fpu_reg();
3822 __ movsd(result, compiler::FieldAddress(box, ValueOffset()));
3823 break;
3824 }
3825
3826 case kUnboxedFloat: {
3827 const FpuRegister result = locs()->out(0).fpu_reg();
3828 __ movsd(result, compiler::FieldAddress(box, ValueOffset()));
3829 __ cvtsd2ss(result, result);
3830 break;
3831 }
3832
3833 case kUnboxedFloat32x4:
3834 case kUnboxedFloat64x2:
3835 case kUnboxedInt32x4: {
3836 const FpuRegister result = locs()->out(0).fpu_reg();
3837 __ movups(result, compiler::FieldAddress(box, ValueOffset()));
3838 break;
3839 }
3840
3841 default:
3842 UNREACHABLE();
3843 break;
3844 }
3845}
3846
3847void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) {
3848 const Register box = locs()->in(0).reg();
3849
3850 switch (representation()) {
3851 case kUnboxedInt32: {
3852 const Register result = locs()->out(0).reg();
3853 __ SmiUntag(result, box);
3854 break;
3855 }
3856 case kUnboxedInt64: {
3857 const Register result = locs()->out(0).reg();
3858 __ SmiUntagAndSignExtend(result, box);
3859 break;
3860 }
3861 case kUnboxedDouble: {
3862 const FpuRegister result = locs()->out(0).fpu_reg();
3863 __ SmiUntag(box);
3864 __ OBJ(cvtsi2sd)(result, box);
3865 break;
3866 }
3867
3868 default:
3869 UNREACHABLE();
3870 break;
3871 }
3872}
3873
3874void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) {
3875 const Register value = locs()->in(0).reg();
3876 const Register result = locs()->out(0).reg();
3877 __ LoadInt32FromBoxOrSmi(result, value);
3878}
3879
3880void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler) {
3881 const Register value = locs()->in(0).reg();
3882 const Register result = locs()->out(0).reg();
3883 __ LoadInt64FromBoxOrSmi(result, value);
3884}
3885
3886LocationSummary* UnboxInteger32Instr::MakeLocationSummary(Zone* zone,
3887 bool opt) const {
3888 const intptr_t kNumInputs = 1;
3889 const intptr_t kNumTemps = (!is_truncating() && CanDeoptimize()) ? 1 : 0;
3890 LocationSummary* summary = new (zone)
3891 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3892 summary->set_in(0, Location::RequiresRegister());
3893 summary->set_out(0, Location::SameAsFirstInput());
3894 if (kNumTemps > 0) {
3895 summary->set_temp(0, Location::RequiresRegister());
3896 }
3897 return summary;
3898}
3899
3900void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3901 const intptr_t value_cid = value()->Type()->ToCid();
3902 const Register value = locs()->in(0).reg();
3903 compiler::Label* deopt =
3904 CanDeoptimize()
3905 ? compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger)
3906 : nullptr;
3907 ASSERT(value == locs()->out(0).reg());
3908
3909 if (value_cid == kSmiCid) {
3910 __ SmiUntag(value);
3911 } else if (value_cid == kMintCid) {
3912 __ movq(value, compiler::FieldAddress(value, Mint::value_offset()));
3913 } else if (!CanDeoptimize()) {
3914 // Type information is not conclusive, but range analysis found
3915 // the value to be in int64 range. Therefore it must be a smi
3916 // or mint value.
3918 compiler::Label done;
3919#if !defined(DART_COMPRESSED_POINTERS)
3920 // Optimistically untag value.
3921 __ SmiUntag(value);
3923 // Undo untagging by multiplying value by 2.
3924 // [reg + reg + disp8] has a shorter encoding than [reg*2 + disp32]
3925 __ movq(value,
3926 compiler::Address(value, value, TIMES_1, Mint::value_offset()));
3927#else
3928 // Cannot speculatively untag because it erases the upper bits needed to
3929 // dereference when it is a Mint.
3930 compiler::Label not_smi;
3931 __ BranchIfNotSmi(value, &not_smi, compiler::Assembler::kNearJump);
3932 __ SmiUntagAndSignExtend(value);
3934 __ Bind(&not_smi);
3935 __ movq(value, compiler::FieldAddress(value, Mint::value_offset()));
3936#endif
3937 __ Bind(&done);
3938 return;
3939 } else {
3940 compiler::Label done;
3941#if !defined(DART_COMPRESSED_POINTERS)
3942 // Optimistically untag value.
3943 __ SmiUntagOrCheckClass(value, kMintCid, &done);
3944 __ j(NOT_EQUAL, deopt);
3945 // Undo untagging by multiplying value by 2.
3946 // [reg + reg + disp8] has a shorter encoding than [reg*2 + disp32]
3947 __ movq(value,
3948 compiler::Address(value, value, TIMES_1, Mint::value_offset()));
3949#else
3950 // Cannot speculatively untag because it erases the upper bits needed to
3951 // dereference when it is a Mint.
3952 compiler::Label not_smi;
3953 __ BranchIfNotSmi(value, &not_smi, compiler::Assembler::kNearJump);
3954 __ SmiUntagAndSignExtend(value);
3956 __ Bind(&not_smi);
3957 __ CompareClassId(value, kMintCid);
3958 __ j(NOT_EQUAL, deopt);
3959 __ movq(value, compiler::FieldAddress(value, Mint::value_offset()));
3960#endif
3961 __ Bind(&done);
3962 }
3963
3964 // TODO(vegorov): as it is implemented right now truncating unboxing would
3965 // leave "garbage" in the higher word.
3966 if (!is_truncating() && (deopt != nullptr)) {
3967 ASSERT(representation() == kUnboxedInt32);
3968 Register temp = locs()->temp(0).reg();
3969 __ movsxd(temp, value);
3970 __ cmpq(temp, value);
3971 __ j(NOT_EQUAL, deopt);
3972 }
3973}
3974
3975LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
3976 bool opt) const {
3977 ASSERT((from_representation() == kUnboxedInt32) ||
3978 (from_representation() == kUnboxedUint32));
3979#if !defined(DART_COMPRESSED_POINTERS)
3980 // ValueFitsSmi() may be overly conservative and false because we only
3981 // perform range analysis during optimized compilation.
3982 const bool kMayAllocateMint = false;
3983#else
3984 const bool kMayAllocateMint = !ValueFitsSmi();
3985#endif
3986 const intptr_t kNumInputs = 1;
3987 const intptr_t kNumTemps = kMayAllocateMint ? 1 : 0;
3988 LocationSummary* summary = new (zone)
3989 LocationSummary(zone, kNumInputs, kNumTemps,
3990 kMayAllocateMint ? LocationSummary::kCallOnSlowPath
3991 : LocationSummary::kNoCall);
3992 summary->set_in(0, Location::RequiresRegister());
3993 summary->set_out(0, Location::RequiresRegister());
3994 if (kMayAllocateMint) {
3995 summary->set_temp(0, Location::RequiresRegister());
3996 }
3997 return summary;
3998}
3999
4000void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4001 const Register value = locs()->in(0).reg();
4002 const Register out = locs()->out(0).reg();
4003 ASSERT(value != out);
4004
4005#if !defined(DART_COMPRESSED_POINTERS)
4006 ASSERT(kSmiTagSize == 1);
4007 if (from_representation() == kUnboxedInt32) {
4008 __ movsxd(out, value);
4009 } else {
4010 ASSERT(from_representation() == kUnboxedUint32);
4011 __ movl(out, value);
4012 }
4013 __ SmiTag(out);
4014#else
4015 compiler::Label done;
4016 if (from_representation() == kUnboxedInt32) {
4017 __ MoveRegister(out, value);
4018 __ addl(out, out);
4019 __ movsxd(out, out); // Does not affect flags.
4020 if (ValueFitsSmi()) {
4021 return;
4022 }
4023 __ j(NO_OVERFLOW, &done);
4024 } else {
4025 __ movl(out, value);
4026 __ SmiTag(out);
4027 if (ValueFitsSmi()) {
4028 return;
4029 }
4030 __ TestImmediate(value, compiler::Immediate(0xC0000000LL));
4031 __ j(ZERO, &done);
4032 }
4033 // Allocate a mint.
4034 // Value input is a writable register and we have to inform the compiler of
4035 // the type so it can be preserved untagged on the slow path
4036 locs()->live_registers()->Add(locs()->in(0), from_representation());
4037 const Register temp = locs()->temp(0).reg();
4038 BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
4039 temp);
4040 if (from_representation() == kUnboxedInt32) {
4041 __ movsxd(temp, value); // Sign-extend.
4042 } else {
4043 __ movl(temp, value); // Zero-extend.
4044 }
4045 __ movq(compiler::FieldAddress(out, Mint::value_offset()), temp);
4046 __ Bind(&done);
4047#endif
4048}
4049
4050LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
4051 bool opt) const {
4052 const intptr_t kNumInputs = 1;
4053 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
4054 // Shared slow path is used in BoxInt64Instr::EmitNativeCode in
4055 // precompiled mode and only after VM isolate stubs where
4056 // replaced with isolate-specific stubs.
4057 auto object_store = IsolateGroup::Current()->object_store();
4058 const bool stubs_in_vm_isolate =
4059 object_store->allocate_mint_with_fpu_regs_stub()
4060 ->untag()
4061 ->InVMIsolateHeap() ||
4062 object_store->allocate_mint_without_fpu_regs_stub()
4063 ->untag()
4064 ->InVMIsolateHeap();
4065 const bool shared_slow_path_call =
4066 SlowPathSharingSupported(opt) && !stubs_in_vm_isolate;
4067 LocationSummary* summary = new (zone) LocationSummary(
4068 zone, kNumInputs, kNumTemps,
4069 ValueFitsSmi()
4071 : ((shared_slow_path_call ? LocationSummary::kCallOnSharedSlowPath
4073 summary->set_in(0, Location::RequiresRegister());
4074 if (ValueFitsSmi()) {
4075 summary->set_out(0, Location::RequiresRegister());
4076 } else if (shared_slow_path_call) {
4077 summary->set_out(0,
4080 } else {
4081 summary->set_out(0, Location::RequiresRegister());
4082 summary->set_temp(0, Location::RequiresRegister());
4083 }
4084 return summary;
4085}
4086
4087void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
4088 const Register out = locs()->out(0).reg();
4089 const Register value = locs()->in(0).reg();
4090#if !defined(DART_COMPRESSED_POINTERS)
4091 __ MoveRegister(out, value);
4092 __ SmiTag(out);
4093 if (ValueFitsSmi()) {
4094 return;
4095 }
4096 // If the value doesn't fit in a smi, the tagging changes the sign,
4097 // which causes the overflow flag to be set.
4098 compiler::Label done;
4099 const Register temp = locs()->temp(0).reg();
4100 __ j(NO_OVERFLOW, &done);
4101#else
4102 __ leaq(out, compiler::Address(value, value, TIMES_1, 0));
4103 if (ValueFitsSmi()) {
4104 return;
4105 }
4106 compiler::Label done;
4107 const Register temp = locs()->temp(0).reg();
4108 __ movq(temp, value);
4109 __ sarq(temp, compiler::Immediate(30));
4110 __ addq(temp, compiler::Immediate(1));
4111 __ cmpq(temp, compiler::Immediate(2));
4112 __ j(BELOW, &done);
4113#endif
4114
4115 if (compiler->intrinsic_mode()) {
4116 __ TryAllocate(compiler->mint_class(),
4117 compiler->intrinsic_slow_path_label(),
4119 } else if (locs()->call_on_shared_slow_path()) {
4120 const bool has_frame = compiler->flow_graph().graph_entry()->NeedsFrame();
4121 if (!has_frame) {
4122 ASSERT(__ constant_pool_allowed());
4123 __ set_constant_pool_allowed(false);
4124 __ EnterDartFrame(0);
4125 }
4126 auto object_store = compiler->isolate_group()->object_store();
4127 const bool live_fpu_regs = locs()->live_registers()->FpuRegisterCount() > 0;
4128 const auto& stub = Code::ZoneHandle(
4129 compiler->zone(),
4130 live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub()
4131 : object_store->allocate_mint_without_fpu_regs_stub());
4132
4133 ASSERT(!locs()->live_registers()->ContainsRegister(
4135 auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
4136 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
4137 locs(), DeoptId::kNone, extended_env);
4138 if (!has_frame) {
4139 __ LeaveDartFrame();
4140 __ set_constant_pool_allowed(true);
4141 }
4142 } else {
4143 BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
4144 temp);
4145 }
4146
4147 __ movq(compiler::FieldAddress(out, Mint::value_offset()), value);
4148 __ Bind(&done);
4149}
4150
4151LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
4152 bool opt) const {
4153 const intptr_t kNumInputs = 2;
4154 const intptr_t kNumTemps = 0;
4155 LocationSummary* summary = new (zone)
4156 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4157 summary->set_in(0, Location::RequiresFpuRegister());
4158 summary->set_in(1, Location::RequiresFpuRegister());
4159 summary->set_out(0, Location::SameAsFirstInput());
4160 return summary;
4161}
4162
4163void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4164 XmmRegister left = locs()->in(0).fpu_reg();
4165 XmmRegister right = locs()->in(1).fpu_reg();
4166
4167 ASSERT(locs()->out(0).fpu_reg() == left);
4168
4169 switch (op_kind()) {
4170 case Token::kADD:
4171 __ addsd(left, right);
4172 break;
4173 case Token::kSUB:
4174 __ subsd(left, right);
4175 break;
4176 case Token::kMUL:
4177 __ mulsd(left, right);
4178 break;
4179 case Token::kDIV:
4180 __ divsd(left, right);
4181 break;
4182 default:
4183 UNREACHABLE();
4184 }
4185}
4186
4187LocationSummary* DoubleTestOpInstr::MakeLocationSummary(Zone* zone,
4188 bool opt) const {
4189 const intptr_t kNumInputs = 1;
4190 const intptr_t kNumTemps =
4191 op_kind() == MethodRecognizer::kDouble_getIsNegative
4192 ? 2
4193 : (op_kind() == MethodRecognizer::kDouble_getIsInfinite ? 1 : 0);
4194 LocationSummary* summary = new (zone)
4195 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4196 summary->set_in(0, Location::RequiresFpuRegister());
4197 if (kNumTemps > 0) {
4198 summary->set_temp(0, Location::RequiresRegister());
4199 if (op_kind() == MethodRecognizer::kDouble_getIsNegative) {
4200 summary->set_temp(1, Location::RequiresFpuRegister());
4201 }
4202 }
4203 summary->set_out(0, Location::RequiresRegister());
4204 return summary;
4205}
4206
4207Condition DoubleTestOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
4208 BranchLabels labels) {
4209 ASSERT(compiler->is_optimizing());
4210 const XmmRegister value = locs()->in(0).fpu_reg();
4211 const bool is_negated = kind() != Token::kEQ;
4212
4213 switch (op_kind()) {
4214 case MethodRecognizer::kDouble_getIsNaN: {
4215 __ comisd(value, value);
4216 return is_negated ? PARITY_ODD : PARITY_EVEN;
4217 }
4218 case MethodRecognizer::kDouble_getIsInfinite: {
4219 const Register temp = locs()->temp(0).reg();
4220 __ AddImmediate(RSP, compiler::Immediate(-kDoubleSize));
4221 __ movsd(compiler::Address(RSP, 0), value);
4222 __ movq(temp, compiler::Address(RSP, 0));
4223 __ AddImmediate(RSP, compiler::Immediate(kDoubleSize));
4224 // Mask off the sign.
4225 __ AndImmediate(temp, compiler::Immediate(0x7FFFFFFFFFFFFFFFLL));
4226 // Compare with +infinity.
4227 __ CompareImmediate(temp, compiler::Immediate(0x7FF0000000000000LL));
4228 return is_negated ? NOT_EQUAL : EQUAL;
4229 }
4230 case MethodRecognizer::kDouble_getIsNegative: {
4231 const Register temp = locs()->temp(0).reg();
4232 const FpuRegister temp_fpu = locs()->temp(1).fpu_reg();
4233 compiler::Label not_zero;
4234 __ xorpd(temp_fpu, temp_fpu);
4235 __ comisd(value, temp_fpu);
4236 // If it's NaN, it's not negative.
4237 __ j(PARITY_EVEN, is_negated ? labels.true_label : labels.false_label);
4238 // Looking at the sign bit also takes care of signed zero.
4239 __ movmskpd(temp, value);
4240 __ TestImmediate(temp, compiler::Immediate(1));
4241 return is_negated ? EQUAL : NOT_EQUAL;
4242 }
4243 default:
4244 UNREACHABLE();
4245 }
4246}
4247
4248// SIMD
4249
4250#define DEFINE_EMIT(Name, Args) \
4251 static void Emit##Name(FlowGraphCompiler* compiler, SimdOpInstr* instr, \
4252 PP_APPLY(PP_UNPACK, Args))
4253
4254#define SIMD_OP_FLOAT_ARITH(V, Name, op) \
4255 V(Float32x4##Name, op##ps) \
4256 V(Float64x2##Name, op##pd)
4257
4258#define SIMD_OP_SIMPLE_BINARY(V) \
4259 SIMD_OP_FLOAT_ARITH(V, Add, add) \
4260 SIMD_OP_FLOAT_ARITH(V, Sub, sub) \
4261 SIMD_OP_FLOAT_ARITH(V, Mul, mul) \
4262 SIMD_OP_FLOAT_ARITH(V, Div, div) \
4263 SIMD_OP_FLOAT_ARITH(V, Min, min) \
4264 SIMD_OP_FLOAT_ARITH(V, Max, max) \
4265 V(Int32x4Add, addpl) \
4266 V(Int32x4Sub, subpl) \
4267 V(Int32x4BitAnd, andps) \
4268 V(Int32x4BitOr, orps) \
4269 V(Int32x4BitXor, xorps) \
4270 V(Float32x4Equal, cmppseq) \
4271 V(Float32x4NotEqual, cmppsneq) \
4272 V(Float32x4LessThan, cmppslt) \
4273 V(Float32x4LessThanOrEqual, cmppsle)
4274
4275DEFINE_EMIT(SimdBinaryOp,
4276 (SameAsFirstInput, XmmRegister left, XmmRegister right)) {
4277 switch (instr->kind()) {
4278#define EMIT(Name, op) \
4279 case SimdOpInstr::k##Name: \
4280 __ op(left, right); \
4281 break;
4282 SIMD_OP_SIMPLE_BINARY(EMIT)
4283#undef EMIT
4284 case SimdOpInstr::kFloat32x4Scale:
4285 __ cvtsd2ss(left, left);
4286 __ shufps(left, left, compiler::Immediate(0x00));
4287 __ mulps(left, right);
4288 break;
4289 case SimdOpInstr::kFloat32x4ShuffleMix:
4290 case SimdOpInstr::kInt32x4ShuffleMix:
4291 __ shufps(left, right, compiler::Immediate(instr->mask()));
4292 break;
4293 case SimdOpInstr::kFloat64x2FromDoubles:
4294 // shufpd mask 0x0 results in:
4295 // Lower 64-bits of left = Lower 64-bits of left.
4296 // Upper 64-bits of left = Lower 64-bits of right.
4297 __ shufpd(left, right, compiler::Immediate(0x0));
4298 break;
4299 case SimdOpInstr::kFloat64x2Scale:
4300 __ shufpd(right, right, compiler::Immediate(0x00));
4301 __ mulpd(left, right);
4302 break;
4303 case SimdOpInstr::kFloat64x2WithX:
4304 case SimdOpInstr::kFloat64x2WithY: {
4305 // TODO(dartbug.com/30949) avoid transfer through memory.
4306 COMPILE_ASSERT(SimdOpInstr::kFloat64x2WithY ==
4307 (SimdOpInstr::kFloat64x2WithX + 1));
4308 const intptr_t lane_index = instr->kind() - SimdOpInstr::kFloat64x2WithX;
4309 ASSERT(0 <= lane_index && lane_index < 2);
4310
4311 __ SubImmediate(RSP, compiler::Immediate(kSimd128Size));
4312 __ movups(compiler::Address(RSP, 0), left);
4313 __ movsd(compiler::Address(RSP, lane_index * kDoubleSize), right);
4314 __ movups(left, compiler::Address(RSP, 0));
4315 __ AddImmediate(RSP, compiler::Immediate(kSimd128Size));
4316 break;
4317 }
4318 case SimdOpInstr::kFloat32x4WithX:
4319 case SimdOpInstr::kFloat32x4WithY:
4320 case SimdOpInstr::kFloat32x4WithZ:
4321 case SimdOpInstr::kFloat32x4WithW: {
4322 // TODO(dartbug.com/30949) avoid transfer through memory. SSE4.1 has
4323 // insertps. SSE2 these instructions can be implemented via a combination
4324 // of shufps/movss/movlhps.
4326 SimdOpInstr::kFloat32x4WithY == (SimdOpInstr::kFloat32x4WithX + 1) &&
4327 SimdOpInstr::kFloat32x4WithZ == (SimdOpInstr::kFloat32x4WithX + 2) &&
4328 SimdOpInstr::kFloat32x4WithW == (SimdOpInstr::kFloat32x4WithX + 3));
4329 const intptr_t lane_index = instr->kind() - SimdOpInstr::kFloat32x4WithX;
4330 ASSERT(0 <= lane_index && lane_index < 4);
4331 __ cvtsd2ss(left, left);
4332 __ SubImmediate(RSP, compiler::Immediate(kSimd128Size));
4333 __ movups(compiler::Address(RSP, 0), right);
4334 __ movss(compiler::Address(RSP, lane_index * kFloatSize), left);
4335 __ movups(left, compiler::Address(RSP, 0));
4336 __ AddImmediate(RSP, compiler::Immediate(kSimd128Size));
4337 break;
4338 }
4339
4340 default:
4341 UNREACHABLE();
4342 }
4343}
4344
4345#define SIMD_OP_SIMPLE_UNARY(V) \
4346 SIMD_OP_FLOAT_ARITH(V, Sqrt, sqrt) \
4347 SIMD_OP_FLOAT_ARITH(V, Negate, negate) \
4348 SIMD_OP_FLOAT_ARITH(V, Abs, abs) \
4349 V(Float32x4Reciprocal, rcpps) \
4350 V(Float32x4ReciprocalSqrt, rsqrtps)
4351
4352DEFINE_EMIT(SimdUnaryOp, (SameAsFirstInput, XmmRegister value)) {
4353 // TODO(dartbug.com/30949) select better register constraints to avoid
4354 // redundant move of input into a different register.
4355 switch (instr->kind()) {
4356#define EMIT(Name, op) \
4357 case SimdOpInstr::k##Name: \
4358 __ op(value, value); \
4359 break;
4360 SIMD_OP_SIMPLE_UNARY(EMIT)
4361#undef EMIT
4362 case SimdOpInstr::kFloat32x4GetX:
4363 // Shuffle not necessary.
4364 __ cvtss2sd(value, value);
4365 break;
4366 case SimdOpInstr::kFloat32x4GetY:
4367 __ shufps(value, value, compiler::Immediate(0x55));
4368 __ cvtss2sd(value, value);
4369 break;
4370 case SimdOpInstr::kFloat32x4GetZ:
4371 __ shufps(value, value, compiler::Immediate(0xAA));
4372 __ cvtss2sd(value, value);
4373 break;
4374 case SimdOpInstr::kFloat32x4GetW:
4375 __ shufps(value, value, compiler::Immediate(0xFF));
4376 __ cvtss2sd(value, value);
4377 break;
4378 case SimdOpInstr::kFloat32x4Shuffle:
4379 case SimdOpInstr::kInt32x4Shuffle:
4380 __ shufps(value, value, compiler::Immediate(instr->mask()));
4381 break;
4382 case SimdOpInstr::kFloat32x4Splat:
4383 // Convert to Float32.
4384 __ cvtsd2ss(value, value);
4385 // Splat across all lanes.
4386 __ shufps(value, value, compiler::Immediate(0x00));
4387 break;
4388 case SimdOpInstr::kFloat32x4ToFloat64x2:
4389 __ cvtps2pd(value, value);
4390 break;
4391 case SimdOpInstr::kFloat64x2ToFloat32x4:
4392 __ cvtpd2ps(value, value);
4393 break;
4394 case SimdOpInstr::kInt32x4ToFloat32x4:
4395 case SimdOpInstr::kFloat32x4ToInt32x4:
4396 // TODO(dartbug.com/30949) these operations are essentially nop and should
4397 // not generate any code. They should be removed from the graph before
4398 // code generation.
4399 break;
4400 case SimdOpInstr::kFloat64x2GetX:
4401 // NOP.
4402 break;
4403 case SimdOpInstr::kFloat64x2GetY:
4404 __ shufpd(value, value, compiler::Immediate(0x33));
4405 break;
4406 case SimdOpInstr::kFloat64x2Splat:
4407 __ shufpd(value, value, compiler::Immediate(0x0));
4408 break;
4409 default:
4410 UNREACHABLE();
4411 break;
4412 }
4413}
4414
4415DEFINE_EMIT(SimdGetSignMask, (Register out, XmmRegister value)) {
4416 switch (instr->kind()) {
4417 case SimdOpInstr::kFloat32x4GetSignMask:
4418 case SimdOpInstr::kInt32x4GetSignMask:
4419 __ movmskps(out, value);
4420 break;
4421 case SimdOpInstr::kFloat64x2GetSignMask:
4422 __ movmskpd(out, value);
4423 break;
4424 default:
4425 UNREACHABLE();
4426 break;
4427 }
4428}
4429
4430DEFINE_EMIT(
4431 Float32x4FromDoubles,
4432 (SameAsFirstInput, XmmRegister v0, XmmRegister, XmmRegister, XmmRegister)) {
4433 // TODO(dartbug.com/30949) avoid transfer through memory. SSE4.1 has
4434 // insertps, with SSE2 this instruction can be implemented through unpcklps.
4435 const XmmRegister out = v0;
4436 __ SubImmediate(RSP, compiler::Immediate(kSimd128Size));
4437 for (intptr_t i = 0; i < 4; i++) {
4438 __ cvtsd2ss(out, instr->locs()->in(i).fpu_reg());
4439 __ movss(compiler::Address(RSP, i * kFloatSize), out);
4440 }
4441 __ movups(out, compiler::Address(RSP, 0));
4442 __ AddImmediate(RSP, compiler::Immediate(kSimd128Size));
4443}
4444
4445DEFINE_EMIT(Float32x4Zero, (XmmRegister value)) {
4446 __ xorps(value, value);
4447}
4448
4449DEFINE_EMIT(Float64x2Zero, (XmmRegister value)) {
4450 __ xorpd(value, value);
4451}
4452
4453DEFINE_EMIT(Float32x4Clamp,
4454 (SameAsFirstInput,
4455 XmmRegister value,
4456 XmmRegister lower,
4457 XmmRegister upper)) {
4458 __ minps(value, upper);
4459 __ maxps(value, lower);
4460}
4461
4462DEFINE_EMIT(Float64x2Clamp,
4463 (SameAsFirstInput,
4464 XmmRegister value,
4465 XmmRegister lower,
4466 XmmRegister upper)) {
4467 __ minpd(value, upper);
4468 __ maxpd(value, lower);
4469}
4470
4471DEFINE_EMIT(Int32x4FromInts,
4473 // TODO(dartbug.com/30949) avoid transfer through memory.
4474 __ SubImmediate(RSP, compiler::Immediate(kSimd128Size));
4475 for (intptr_t i = 0; i < 4; i++) {
4476 __ movl(compiler::Address(RSP, i * kInt32Size), instr->locs()->in(i).reg());
4477 }
4478 __ movups(result, compiler::Address(RSP, 0));
4479 __ AddImmediate(RSP, compiler::Immediate(kSimd128Size));
4480}
4481
4482DEFINE_EMIT(Int32x4FromBools,
4484 Register,
4485 Register,
4486 Register,
4487 Register,
4488 Temp<Register> temp)) {
4489 // TODO(dartbug.com/30949) avoid transfer through memory.
4490 __ SubImmediate(RSP, compiler::Immediate(kSimd128Size));
4491 for (intptr_t i = 0; i < 4; i++) {
4492 compiler::Label done, load_false;
4493 __ xorq(temp, temp);
4494 __ CompareObject(instr->locs()->in(i).reg(), Bool::True());
4495 __ setcc(EQUAL, ByteRegisterOf(temp));
4496 __ negl(temp); // temp = input ? -1 : 0
4497 __ movl(compiler::Address(RSP, kInt32Size * i), temp);
4498 }
4499 __ movups(result, compiler::Address(RSP, 0));
4500 __ AddImmediate(RSP, compiler::Immediate(kSimd128Size));
4501}
4502
4503static void EmitToBoolean(FlowGraphCompiler* compiler, Register out) {
4505 __ testl(out, out);
4506 __ setcc(ZERO, ByteRegisterOf(out));
4507 __ movzxb(out, out);
4508 __ movq(out,
4509 compiler::Address(THR, out, TIMES_8, Thread::bool_true_offset()));
4510}
4511
4512DEFINE_EMIT(Int32x4GetFlagZorW,
4513 (Register out, XmmRegister value, Temp<XmmRegister> temp)) {
4514 __ movhlps(temp, value); // extract upper half.
4515 __ movq(out, temp);
4516 if (instr->kind() == SimdOpInstr::kInt32x4GetFlagW) {
4517 __ shrq(out, compiler::Immediate(32)); // extract upper 32bits.
4518 }
4519 EmitToBoolean(compiler, out);
4520}
4521
4522DEFINE_EMIT(Int32x4GetFlagXorY, (Register out, XmmRegister value)) {
4523 __ movq(out, value);
4524 if (instr->kind() == SimdOpInstr::kInt32x4GetFlagY) {
4525 __ shrq(out, compiler::Immediate(32)); // extract upper 32bits.
4526 }
4527 EmitToBoolean(compiler, out);
4528}
4529
4530DEFINE_EMIT(
4531 Int32x4WithFlag,
4532 (SameAsFirstInput, XmmRegister mask, Register flag, Temp<Register> temp)) {
4533 // TODO(dartbug.com/30949) avoid transfer through memory.
4535 SimdOpInstr::kInt32x4WithFlagY == (SimdOpInstr::kInt32x4WithFlagX + 1) &&
4536 SimdOpInstr::kInt32x4WithFlagZ == (SimdOpInstr::kInt32x4WithFlagX + 2) &&
4537 SimdOpInstr::kInt32x4WithFlagW == (SimdOpInstr::kInt32x4WithFlagX + 3));
4538 const intptr_t lane_index = instr->kind() - SimdOpInstr::kInt32x4WithFlagX;
4539 ASSERT(0 <= lane_index && lane_index < 4);
4540 __ SubImmediate(RSP, compiler::Immediate(kSimd128Size));
4541 __ movups(compiler::Address(RSP, 0), mask);
4542
4543 // temp = flag == true ? -1 : 0
4544 __ xorq(temp, temp);
4545 __ CompareObject(flag, Bool::True());
4546 __ setcc(EQUAL, ByteRegisterOf(temp));
4547 __ negl(temp);
4548
4549 __ movl(compiler::Address(RSP, lane_index * kInt32Size), temp);
4550 __ movups(mask, compiler::Address(RSP, 0));
4551 __ AddImmediate(RSP, compiler::Immediate(kSimd128Size));
4552}
4553
4554DEFINE_EMIT(Int32x4Select,
4555 (SameAsFirstInput,
4556 XmmRegister mask,
4557 XmmRegister trueValue,
4558 XmmRegister falseValue,
4559 Temp<XmmRegister> temp)) {
4560 // Copy mask.
4561 __ movaps(temp, mask);
4562 // Invert it.
4563 __ notps(temp, temp);
4564 // mask = mask & trueValue.
4565 __ andps(mask, trueValue);
4566 // temp = temp & falseValue.
4567 __ andps(temp, falseValue);
4568 // out = mask | temp.
4569 __ orps(mask, temp);
4570}
4571
4572// Map SimdOpInstr::Kind-s to corresponding emit functions. Uses the following
4573// format:
4574//
4575// CASE(OpA) CASE(OpB) ____(Emitter) - Emitter is used to emit OpA and OpB.
4576// SIMPLE(OpA) - Emitter with name OpA is used to emit OpA.
4577//
4578#define SIMD_OP_VARIANTS(CASE, ____, SIMPLE) \
4579 SIMD_OP_SIMPLE_BINARY(CASE) \
4580 CASE(Float32x4Scale) \
4581 CASE(Float32x4ShuffleMix) \
4582 CASE(Int32x4ShuffleMix) \
4583 CASE(Float64x2FromDoubles) \
4584 CASE(Float64x2Scale) \
4585 CASE(Float64x2WithX) \
4586 CASE(Float64x2WithY) \
4587 CASE(Float32x4WithX) \
4588 CASE(Float32x4WithY) \
4589 CASE(Float32x4WithZ) \
4590 CASE(Float32x4WithW) \
4591 ____(SimdBinaryOp) \
4592 SIMD_OP_SIMPLE_UNARY(CASE) \
4593 CASE(Float32x4GetX) \
4594 CASE(Float32x4GetY) \
4595 CASE(Float32x4GetZ) \
4596 CASE(Float32x4GetW) \
4597 CASE(Float32x4Shuffle) \
4598 CASE(Int32x4Shuffle) \
4599 CASE(Float32x4Splat) \
4600 CASE(Float32x4ToFloat64x2) \
4601 CASE(Float64x2ToFloat32x4) \
4602 CASE(Int32x4ToFloat32x4) \
4603 CASE(Float32x4ToInt32x4) \
4604 CASE(Float64x2GetX) \
4605 CASE(Float64x2GetY) \
4606 CASE(Float64x2Splat) \
4607 ____(SimdUnaryOp) \
4608 CASE(Float32x4GetSignMask) \
4609 CASE(Int32x4GetSignMask) \
4610 CASE(Float64x2GetSignMask) \
4611 ____(SimdGetSignMask) \
4612 SIMPLE(Float32x4FromDoubles) \
4613 SIMPLE(Int32x4FromInts) \
4614 SIMPLE(Int32x4FromBools) \
4615 SIMPLE(Float32x4Zero) \
4616 SIMPLE(Float64x2Zero) \
4617 SIMPLE(Float32x4Clamp) \
4618 SIMPLE(Float64x2Clamp) \
4619 CASE(Int32x4GetFlagX) \
4620 CASE(Int32x4GetFlagY) \
4621 ____(Int32x4GetFlagXorY) \
4622 CASE(Int32x4GetFlagZ) \
4623 CASE(Int32x4GetFlagW) \
4624 ____(Int32x4GetFlagZorW) \
4625 CASE(Int32x4WithFlagX) \
4626 CASE(Int32x4WithFlagY) \
4627 CASE(Int32x4WithFlagZ) \
4628 CASE(Int32x4WithFlagW) \
4629 ____(Int32x4WithFlag) \
4630 SIMPLE(Int32x4Select)
4631
4632LocationSummary* SimdOpInstr::MakeLocationSummary(Zone* zone, bool opt) const {
4633 switch (kind()) {
4634#define CASE(Name, ...) case k##Name:
4635#define EMIT(Name) \
4636 return MakeLocationSummaryFromEmitter(zone, this, &Emit##Name);
4637#define SIMPLE(Name) CASE(Name) EMIT(Name)
4638 SIMD_OP_VARIANTS(CASE, EMIT, SIMPLE)
4639#undef CASE
4640#undef EMIT
4641#undef SIMPLE
4642 case SimdOpInstr::kFloat32x4GreaterThan:
4643 case SimdOpInstr::kFloat32x4GreaterThanOrEqual:
4644 case kIllegalSimdOp:
4645 break;
4646 }
4647 UNREACHABLE();
4648 return nullptr;
4649}
4650
4651void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4652 switch (kind()) {
4653#define CASE(Name, ...) case k##Name:
4654#define EMIT(Name) \
4655 InvokeEmitter(compiler, this, &Emit##Name); \
4656 break;
4657#define SIMPLE(Name) CASE(Name) EMIT(Name)
4658 SIMD_OP_VARIANTS(CASE, EMIT, SIMPLE)
4659#undef CASE
4660#undef EMIT
4661#undef SIMPLE
4662 case SimdOpInstr::kFloat32x4GreaterThan:
4663 case SimdOpInstr::kFloat32x4GreaterThanOrEqual:
4664 case kIllegalSimdOp:
4665 UNREACHABLE();
4666 break;
4667 }
4668}
4669
4670#undef DEFINE_EMIT
4671
4672LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
4673 Zone* zone,
4674 bool opt) const {
4675 const intptr_t kNumTemps = 0;
4676 LocationSummary* summary = new (zone)
4677 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
4682 summary->set_out(0, Location::RegisterLocation(RAX));
4683 return summary;
4684}
4685
4686void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4687 compiler::LeafRuntimeScope rt(compiler->assembler(),
4688 /*frame_size=*/0,
4689 /*preserve_registers=*/false);
4690 // Call the function. Parameters are already in their correct spots.
4692}
4693
4694LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Zone* zone,
4695 bool opt) const {
4696 const intptr_t kNumInputs = 1;
4697 return LocationSummary::Make(zone, kNumInputs, Location::SameAsFirstInput(),
4699}
4700
4701void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4702 Register value = locs()->in(0).reg();
4703 ASSERT(value == locs()->out(0).reg());
4704 switch (op_kind()) {
4705 case Token::kNEGATE: {
4706 compiler::Label* deopt =
4707 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
4708 __ OBJ(neg)(value);
4709 __ j(OVERFLOW, deopt);
4710 break;
4711 }
4712 case Token::kBIT_NOT:
4713 __ notq(value);
4714 // Remove inverted smi-tag.
4715 __ AndImmediate(value, compiler::Immediate(~kSmiTagMask));
4716 break;
4717 default:
4718 UNREACHABLE();
4719 }
4720}
4721
4722LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
4723 bool opt) const {
4724 const intptr_t kNumInputs = 1;
4725 const intptr_t kNumTemps = 0;
4726 LocationSummary* summary = new (zone)
4727 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4728 summary->set_in(0, Location::RequiresFpuRegister());
4729 if (op_kind() == Token::kSQUARE) {
4730 summary->set_out(0, Location::SameAsFirstInput());
4731 } else {
4732 summary->set_out(0, Location::RequiresFpuRegister());
4733 }
4734 return summary;
4735}
4736
4737void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4738 ASSERT(representation() == kUnboxedDouble);
4739 XmmRegister result = locs()->out(0).fpu_reg();
4740 XmmRegister value = locs()->in(0).fpu_reg();
4741 switch (op_kind()) {
4742 case Token::kNEGATE:
4743 __ DoubleNegate(result, value);
4744 break;
4745 case Token::kSQRT:
4746 __ sqrtsd(result, value);
4747 break;
4748 case Token::kSQUARE:
4749 ASSERT(result == value);
4750 __ mulsd(result, value);
4751 break;
4752 case Token::kTRUNCATE:
4754 break;
4755 case Token::kFLOOR:
4757 break;
4758 case Token::kCEILING:
4760 break;
4761 default:
4762 UNREACHABLE();
4763 }
4764}
4765
4766LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone,
4767 bool opt) const {
4768 if (result_cid() == kDoubleCid) {
4769 const intptr_t kNumInputs = 2;
4770 const intptr_t kNumTemps = 1;
4771 LocationSummary* summary = new (zone)
4772 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4773 summary->set_in(0, Location::RequiresFpuRegister());
4774 summary->set_in(1, Location::RequiresFpuRegister());
4775 // Reuse the left register so that code can be made shorter.
4776 summary->set_out(0, Location::SameAsFirstInput());
4777 summary->set_temp(0, Location::RequiresRegister());
4778 return summary;
4779 }
4780 ASSERT(result_cid() == kSmiCid);
4781 const intptr_t kNumInputs = 2;
4782 const intptr_t kNumTemps = 0;
4783 LocationSummary* summary = new (zone)
4784 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4785 summary->set_in(0, Location::RequiresRegister());
4786 summary->set_in(1, Location::RequiresRegister());
4787 // Reuse the left register so that code can be made shorter.
4788 summary->set_out(0, Location::SameAsFirstInput());
4789 return summary;
4790}
4791
4792void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4793 ASSERT((op_kind() == MethodRecognizer::kMathMin) ||
4794 (op_kind() == MethodRecognizer::kMathMax));
4795 const bool is_min = op_kind() == MethodRecognizer::kMathMin;
4796 if (result_cid() == kDoubleCid) {
4797 compiler::Label done, returns_nan, are_equal;
4798 XmmRegister left = locs()->in(0).fpu_reg();
4799 XmmRegister right = locs()->in(1).fpu_reg();
4800 XmmRegister result = locs()->out(0).fpu_reg();
4801 Register temp = locs()->temp(0).reg();
4802 __ comisd(left, right);
4805 const Condition double_condition =
4806 is_min ? TokenKindToDoubleCondition(Token::kLT)
4807 : TokenKindToDoubleCondition(Token::kGT);
4808 ASSERT(left == result);
4809 __ j(double_condition, &done, compiler::Assembler::kNearJump);
4810 __ movsd(result, right);
4812
4813 __ Bind(&returns_nan);
4814 __ movq(temp, compiler::Address(THR, Thread::double_nan_address_offset()));
4815 __ movsd(result, compiler::Address(temp, 0));
4817
4818 __ Bind(&are_equal);
4819 compiler::Label left_is_negative;
4820 // Check for negative zero: -0.0 is equal 0.0 but min or max must return
4821 // -0.0 or 0.0 respectively.
4822 // Check for negative left value (get the sign bit):
4823 // - min -> left is negative ? left : right.
4824 // - max -> left is negative ? right : left
4825 // Check the sign bit.
4826 __ movmskpd(temp, left);
4827 __ testq(temp, compiler::Immediate(1));
4828 if (is_min) {
4829 ASSERT(left == result);
4830 __ j(NOT_ZERO, &done,
4831 compiler::Assembler::kNearJump); // Negative -> return left.
4832 } else {
4833 ASSERT(left == result);
4834 __ j(ZERO, &done,
4835 compiler::Assembler::kNearJump); // Positive -> return left.
4836 }
4837 __ movsd(result, right);
4838 __ Bind(&done);
4839 return;
4840 }
4841
4842 ASSERT(result_cid() == kSmiCid);
4843 Register left = locs()->in(0).reg();
4844 Register right = locs()->in(1).reg();
4845 Register result = locs()->out(0).reg();
4846 __ OBJ(cmp)(left, right);
4847 ASSERT(result == left);
4848 if (is_min) {
4849 __ cmovgeq(result, right);
4850 } else {
4851 __ cmovlq(result, right);
4852 }
4853}
4854
4855LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone,
4856 bool opt) const {
4857 const intptr_t kNumInputs = 1;
4858 const intptr_t kNumTemps = 0;
4859 LocationSummary* result = new (zone)
4860 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4861 result->set_in(0, Location::RequiresRegister());
4863 return result;
4864}
4865
4866void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4867 Register value = locs()->in(0).reg();
4868 FpuRegister result = locs()->out(0).fpu_reg();
4869 __ cvtsi2sdl(result, value);
4870}
4871
4872LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone,
4873 bool opt) const {
4874 const intptr_t kNumInputs = 1;
4875 const intptr_t kNumTemps = 0;
4876 LocationSummary* result = new (zone)
4877 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4878 result->set_in(0, Location::WritableRegister());
4880 return result;
4881}
4882
4883void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4884 Register value = locs()->in(0).reg();
4885 FpuRegister result = locs()->out(0).fpu_reg();
4886 __ SmiUntag(value);
4887 __ OBJ(cvtsi2sd)(result, value);
4888}
4889
4890DEFINE_BACKEND(Int64ToDouble, (FpuRegister result, Register value)) {
4891 __ cvtsi2sdq(result, value);
4892}
4893
4894LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone,
4895 bool opt) const {
4896 const intptr_t kNumInputs = 1;
4897 const intptr_t kNumTemps = 1;
4898 LocationSummary* result = new (zone) LocationSummary(
4899 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
4901 result->set_out(0, Location::RequiresRegister());
4902 result->set_temp(0, Location::RequiresRegister());
4903 return result;
4904}
4905
4906void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4907 const Register result = locs()->out(0).reg();
4908 const Register temp = locs()->temp(0).reg();
4909 XmmRegister value_double = locs()->in(0).fpu_reg();
4910 ASSERT(result != temp);
4911
4912 DoubleToIntegerSlowPath* slow_path =
4913 new DoubleToIntegerSlowPath(this, value_double);
4914 compiler->AddSlowPathCode(slow_path);
4915
4916 if (recognized_kind() != MethodRecognizer::kDoubleToInteger) {
4917 // In JIT mode without --target-unknown-cpu VM knows target CPU features
4918 // at compile time and can pick more optimal representation
4919 // for DoubleToDouble conversion. In AOT mode and with
4920 // --target-unknown-cpu we test if roundsd instruction is available
4921 // at run time and fall back to stub if it isn't.
4922 ASSERT(CompilerState::Current().is_aot() || FLAG_target_unknown_cpu);
4923 if (FLAG_use_slow_path) {
4924 __ jmp(slow_path->entry_label());
4925 __ Bind(slow_path->exit_label());
4926 return;
4927 }
4928 __ cmpb(
4929 compiler::Address(
4930 THR,
4931 compiler::target::Thread::double_truncate_round_supported_offset()),
4932 compiler::Immediate(0));
4933 __ j(EQUAL, slow_path->entry_label());
4934
4935 __ xorps(FpuTMP, FpuTMP);
4936 switch (recognized_kind()) {
4937 case MethodRecognizer::kDoubleFloorToInt:
4938 __ roundsd(FpuTMP, value_double, compiler::Assembler::kRoundDown);
4939 break;
4940 case MethodRecognizer::kDoubleCeilToInt:
4941 __ roundsd(FpuTMP, value_double, compiler::Assembler::kRoundUp);
4942 break;
4943 default:
4944 UNREACHABLE();
4945 }
4946 value_double = FpuTMP;
4947 }
4948
4949 __ OBJ(cvttsd2si)(result, value_double);
4950 // Overflow is signalled with minint.
4951 // Check for overflow and that it fits into Smi.
4952 __ movq(temp, result);
4953 __ OBJ(shl)(temp, compiler::Immediate(1));
4954 __ j(OVERFLOW, slow_path->entry_label());
4955 __ SmiTag(result);
4956 __ Bind(slow_path->exit_label());
4957}
4958
4959LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,
4960 bool opt) const {
4961 const intptr_t kNumInputs = 1;
4962 const intptr_t kNumTemps = 1;
4963 LocationSummary* result = new (zone)
4964 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4966 result->set_out(0, Location::RequiresRegister());
4967 result->set_temp(0, Location::RequiresRegister());
4968 return result;
4969}
4970
4971void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4972 compiler::Label* deopt =
4973 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptDoubleToSmi);
4974 Register result = locs()->out(0).reg();
4975 XmmRegister value = locs()->in(0).fpu_reg();
4976 Register temp = locs()->temp(0).reg();
4977
4978 __ OBJ(cvttsd2si)(result, value);
4979 // Overflow is signalled with minint.
4980 compiler::Label do_call, done;
4981 // Check for overflow and that it fits into Smi.
4982 __ movq(temp, result);
4983 __ OBJ(shl)(temp, compiler::Immediate(1));
4984 __ j(OVERFLOW, deopt);
4985 __ SmiTag(result);
4986}
4987
4988LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone,
4989 bool opt) const {
4990 const intptr_t kNumInputs = 1;
4991 const intptr_t kNumTemps = 0;
4992 LocationSummary* result = new (zone)
4993 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4995 result->set_out(0, Location::SameAsFirstInput());
4996 return result;
4997}
4998
4999void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5000 __ cvtsd2ss(locs()->out(0).fpu_reg(), locs()->in(0).fpu_reg());
5001}
5002
5003LocationSummary* FloatToDoubleInstr::MakeLocationSummary(Zone* zone,
5004 bool opt) const {
5005 const intptr_t kNumInputs = 1;
5006 const intptr_t kNumTemps = 0;
5007 LocationSummary* result = new (zone)
5008 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5010 result->set_out(0, Location::SameAsFirstInput());
5011 return result;
5012}
5013
5014void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5015 __ cvtss2sd(locs()->out(0).fpu_reg(), locs()->in(0).fpu_reg());
5016}
5017
5018LocationSummary* FloatCompareInstr::MakeLocationSummary(Zone* zone,
5019 bool opt) const {
5020 UNREACHABLE();
5021 return NULL;
5022}
5023
5024void FloatCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5025 UNREACHABLE();
5026}
5027
5028LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone,
5029 bool opt) const {
5030 // Calling convention on x64 uses XMM0 and XMM1 to pass the first two
5031 // double arguments and XMM0 to return the result.
5034
5035 if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
5036 ASSERT(InputCount() == 2);
5037 const intptr_t kNumTemps = 4;
5038 LocationSummary* result = new (zone)
5039 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
5042 result->set_temp(0, Location::RegisterLocation(R13));
5043 // Temp index 1.
5044 result->set_temp(1, Location::RegisterLocation(RAX));
5045 // Temp index 2.
5047 // Block XMM0 for the calling convention.
5050 return result;
5051 }
5052 ASSERT((InputCount() == 1) || (InputCount() == 2));
5053 const intptr_t kNumTemps = 1;
5054 LocationSummary* result = new (zone)
5055 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
5056 result->set_temp(0, Location::RegisterLocation(R13));
5058 if (InputCount() == 2) {
5060 }
5062 return result;
5063}
5064
5065// Pseudo code:
5066// if (exponent == 0.0) return 1.0;
5067// // Speed up simple cases.
5068// if (exponent == 1.0) return base;
5069// if (exponent == 2.0) return base * base;
5070// if (exponent == 3.0) return base * base * base;
5071// if (base == 1.0) return 1.0;
5072// if (base.isNaN || exponent.isNaN) {
5073// return double.NAN;
5074// }
5075// if (base != -Infinity && exponent == 0.5) {
5076// if (base == 0.0) return 0.0;
5077// return sqrt(value);
5078// }
5079// TODO(srdjan): Move into a stub?
5080static void InvokeDoublePow(FlowGraphCompiler* compiler,
5081 InvokeMathCFunctionInstr* instr) {
5082 ASSERT(instr->recognized_kind() == MethodRecognizer::kMathDoublePow);
5083 const intptr_t kInputCount = 2;
5084 ASSERT(instr->InputCount() == kInputCount);
5085 LocationSummary* locs = instr->locs();
5086
5087 XmmRegister base = locs->in(0).fpu_reg();
5088 XmmRegister exp = locs->in(1).fpu_reg();
5089 XmmRegister result = locs->out(0).fpu_reg();
5090 XmmRegister zero_temp =
5091 locs->temp(InvokeMathCFunctionInstr::kDoubleTempIndex).fpu_reg();
5092
5093 __ xorps(zero_temp, zero_temp);
5094 __ LoadDImmediate(result, 1.0);
5095
5096 compiler::Label check_base, skip_call;
5097 // exponent == 0.0 -> return 1.0;
5098 __ comisd(exp, zero_temp);
5100 __ j(EQUAL, &skip_call); // 'result' is 1.0.
5101
5102 // exponent == 1.0 ?
5103 __ comisd(exp, result);
5104 compiler::Label return_base;
5105 __ j(EQUAL, &return_base, compiler::Assembler::kNearJump);
5106
5107 // exponent == 2.0 ?
5108 __ LoadDImmediate(XMM0, 2.0);
5109 __ comisd(exp, XMM0);
5110 compiler::Label return_base_times_2;
5111 __ j(EQUAL, &return_base_times_2, compiler::Assembler::kNearJump);
5112
5113 // exponent == 3.0 ?
5114 __ LoadDImmediate(XMM0, 3.0);
5115 __ comisd(exp, XMM0);
5116 __ j(NOT_EQUAL, &check_base);
5117
5118 // Base times 3.
5119 __ movsd(result, base);
5120 __ mulsd(result, base);
5121 __ mulsd(result, base);
5122 __ jmp(&skip_call);
5123
5124 __ Bind(&return_base);
5125 __ movsd(result, base);
5126 __ jmp(&skip_call);
5127
5128 __ Bind(&return_base_times_2);
5129 __ movsd(result, base);
5130 __ mulsd(result, base);
5131 __ jmp(&skip_call);
5132
5133 __ Bind(&check_base);
5134 // Note: 'exp' could be NaN.
5135
5136 compiler::Label return_nan;
5137 // base == 1.0 -> return 1.0;
5138 __ comisd(base, result);
5140 __ j(EQUAL, &skip_call, compiler::Assembler::kNearJump);
5141 // Note: 'base' could be NaN.
5142 __ comisd(exp, base);
5143 // Neither 'exp' nor 'base' is NaN.
5144 compiler::Label try_sqrt;
5146 // Return NaN.
5147 __ Bind(&return_nan);
5148 __ LoadDImmediate(result, NAN);
5149 __ jmp(&skip_call);
5150
5151 compiler::Label do_pow, return_zero;
5152 __ Bind(&try_sqrt);
5153 // Before calling pow, check if we could use sqrt instead of pow.
5154 __ LoadDImmediate(result, kNegInfinity);
5155 // base == -Infinity -> call pow;
5156 __ comisd(base, result);
5158
5159 // exponent == 0.5 ?
5160 __ LoadDImmediate(result, 0.5);
5161 __ comisd(exp, result);
5163
5164 // base == 0 -> return 0;
5165 __ comisd(base, zero_temp);
5166 __ j(EQUAL, &return_zero, compiler::Assembler::kNearJump);
5167
5168 __ sqrtsd(result, base);
5169 __ jmp(&skip_call, compiler::Assembler::kNearJump);
5170
5171 __ Bind(&return_zero);
5172 __ movsd(result, zero_temp);
5173 __ jmp(&skip_call);
5174
5175 __ Bind(&do_pow);
5176 {
5177 compiler::LeafRuntimeScope rt(compiler->assembler(),
5178 /*frame_size=*/0,
5179 /*preserve_registers=*/false);
5180 __ movaps(XMM0, locs->in(0).fpu_reg());
5181 ASSERT(locs->in(1).fpu_reg() == XMM1);
5182 rt.Call(instr->TargetFunction(), kInputCount);
5183 __ movaps(locs->out(0).fpu_reg(), XMM0);
5184 }
5185 __ Bind(&skip_call);
5186}
5187
5188void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5189 if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
5190 InvokeDoublePow(compiler, this);
5191 return;
5192 }
5193
5194 compiler::LeafRuntimeScope rt(compiler->assembler(),
5195 /*frame_size=*/0,
5196 /*preserve_registers=*/false);
5197 ASSERT(locs()->in(0).fpu_reg() == XMM0);
5198 if (InputCount() == 2) {
5199 ASSERT(locs()->in(1).fpu_reg() == XMM1);
5200 }
5201 rt.Call(TargetFunction(), InputCount());
5202 ASSERT(locs()->out(0).fpu_reg() == XMM0);
5203}
5204
5205LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone,
5206 bool opt) const {
5207 // Only use this instruction in optimized code.
5208 ASSERT(opt);
5209 const intptr_t kNumInputs = 1;
5210 LocationSummary* summary =
5211 new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
5212 if (representation() == kUnboxedDouble) {
5213 if (index() == 0) {
5214 summary->set_in(
5216 } else {
5217 ASSERT(index() == 1);
5218 summary->set_in(
5220 }
5221 summary->set_out(0, Location::RequiresFpuRegister());
5222 } else {
5223 ASSERT(representation() == kTagged);
5224 if (index() == 0) {
5225 summary->set_in(
5227 } else {
5228 ASSERT(index() == 1);
5229 summary->set_in(
5231 }
5232 summary->set_out(0, Location::RequiresRegister());
5233 }
5234 return summary;
5235}
5236
5237void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5238 ASSERT(locs()->in(0).IsPairLocation());
5239 PairLocation* pair = locs()->in(0).AsPairLocation();
5240 Location in_loc = pair->At(index());
5241 if (representation() == kUnboxedDouble) {
5242 XmmRegister out = locs()->out(0).fpu_reg();
5243 XmmRegister in = in_loc.fpu_reg();
5244 __ movaps(out, in);
5245 } else {
5246 ASSERT(representation() == kTagged);
5247 Register out = locs()->out(0).reg();
5248 Register in = in_loc.reg();
5249 __ movq(out, in);
5250 }
5251}
5252
5253LocationSummary* UnboxLaneInstr::MakeLocationSummary(Zone* zone,
5254 bool opt) const {
5255 UNREACHABLE();
5256 return NULL;
5257}
5258
5259void UnboxLaneInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5260 UNREACHABLE();
5261}
5262
5263LocationSummary* BoxLanesInstr::MakeLocationSummary(Zone* zone,
5264 bool opt) const {
5265 UNREACHABLE();
5266 return NULL;
5267}
5268
5269void BoxLanesInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5270 UNREACHABLE();
5271}
5272
5273LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
5274 bool opt) const {
5275 const intptr_t kNumInputs = 2;
5276 const intptr_t kNumTemps = 0;
5277 LocationSummary* summary = new (zone)
5278 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5279 // Both inputs must be writable because they will be untagged.
5280 summary->set_in(0, Location::RegisterLocation(RAX));
5281 summary->set_in(1, Location::WritableRegister());
5282 summary->set_out(0, Location::Pair(Location::RegisterLocation(RAX),
5284 return summary;
5285}
5286
5287void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5288 ASSERT(CanDeoptimize());
5289 compiler::Label* deopt =
5290 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
5291 Register left = locs()->in(0).reg();
5292 Register right = locs()->in(1).reg();
5293 ASSERT(locs()->out(0).IsPairLocation());
5294 PairLocation* pair = locs()->out(0).AsPairLocation();
5295 Register result1 = pair->At(0).reg();
5296 Register result2 = pair->At(1).reg();
5297 compiler::Label not_32bit, done;
5298 Register temp = RDX;
5299 ASSERT(left == RAX);
5300 ASSERT((right != RDX) && (right != RAX));
5301 ASSERT(result1 == RAX);
5302 ASSERT(result2 == RDX);
5303 if (RangeUtils::CanBeZero(divisor_range())) {
5304 // Handle divide by zero in runtime.
5305 __ OBJ(test)(right, right);
5306 __ j(ZERO, deopt);
5307 }
5308#if !defined(DART_COMPRESSED_POINTERS)
5309 // Check if both operands fit into 32bits as idiv with 64bit operands
5310 // requires twice as many cycles and has much higher latency.
5311 // We are checking this before untagging them to avoid corner case
5312 // dividing INT_MAX by -1 that raises exception because quotient is
5313 // too large for 32bit register.
5314 __ movsxd(temp, left);
5315 __ cmpq(temp, left);
5316 __ j(NOT_EQUAL, &not_32bit);
5317 __ movsxd(temp, right);
5318 __ cmpq(temp, right);
5319 __ j(NOT_EQUAL, &not_32bit);
5320
5321 // Both operands are 31bit smis. Divide using 32bit idiv.
5322 __ SmiUntag(left);
5323 __ SmiUntag(right);
5324 __ cdq();
5325 __ idivl(right);
5326 __ movsxd(RAX, RAX);
5327 __ movsxd(RDX, RDX);
5328 __ jmp(&done);
5329
5330 // Divide using 64bit idiv.
5331 __ Bind(&not_32bit);
5332 __ SmiUntag(left);
5333 __ SmiUntag(right);
5334 __ cqo(); // Sign extend RAX -> RDX:RAX.
5335 __ idivq(right); // RAX: quotient, RDX: remainder.
5336 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
5337 // case we cannot tag the result.
5338 __ CompareImmediate(RAX, compiler::Immediate(0x4000000000000000));
5339 __ j(EQUAL, deopt);
5340 __ Bind(&done);
5341#else
5342 USE(temp);
5343 // Both operands are 31bit smis. Divide using 32bit idiv.
5344 __ SmiUntag(left);
5345 __ SmiUntag(right);
5346 __ cdq();
5347 __ idivl(right);
5348
5349 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
5350 // case we cannot tag the result.
5351 __ cmpl(RAX, compiler::Immediate(0x40000000));
5352 __ j(EQUAL, deopt);
5353 __ movsxd(RAX, RAX);
5354 __ movsxd(RDX, RDX);
5355#endif
5356
5357 // Modulo correction (RDX).
5358 // res = left % right;
5359 // if (res < 0) {
5360 // if (right < 0) {
5361 // res = res - right;
5362 // } else {
5363 // res = res + right;
5364 // }
5365 // }
5366 compiler::Label all_done;
5367 __ cmpq(RDX, compiler::Immediate(0));
5369 // Result is negative, adjust it.
5370 if ((divisor_range() == nullptr) || divisor_range()->Overlaps(-1, 1)) {
5371 compiler::Label subtract;
5372 __ cmpq(right, compiler::Immediate(0));
5374 __ addq(RDX, right);
5375 __ jmp(&all_done, compiler::Assembler::kNearJump);
5376 __ Bind(&subtract);
5377 __ subq(RDX, right);
5378 } else if (divisor_range()->IsPositive()) {
5379 // Right is positive.
5380 __ addq(RDX, right);
5381 } else {
5382 // Right is negative.
5383 __ subq(RDX, right);
5384 }
5385 __ Bind(&all_done);
5386
5387 __ SmiTag(RAX);
5388 __ SmiTag(RDX);
5389 // Note that the result of an integer division/modulo of two
5390 // in-range arguments, cannot create out-of-range result.
5391}
5392
5393// Should be kept in sync with integers.cc Multiply64Hash
5394static void EmitHashIntegerCodeSequence(FlowGraphCompiler* compiler) {
5395 __ movq(RDX, compiler::Immediate(0x2d51));
5396 __ mulq(RDX);
5397 __ xorq(RAX, RDX); // RAX = xor(hi64, lo64)
5398 __ movq(RDX, RAX);
5399 __ shrq(RDX, compiler::Immediate(32));
5400 __ xorq(RAX, RDX);
5401 __ andq(RAX, compiler::Immediate(0x3fffffff));
5402}
5403
5404LocationSummary* HashDoubleOpInstr::MakeLocationSummary(Zone* zone,
5405 bool opt) const {
5406 const intptr_t kNumInputs = 1;
5407 const intptr_t kNumTemps = 2;
5408 LocationSummary* summary = new (zone)
5409 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5410 summary->set_in(0, Location::RequiresFpuRegister());
5411 summary->set_temp(0, Location::RegisterLocation(RDX));
5412 summary->set_temp(1, Location::RequiresFpuRegister());
5413 summary->set_out(0, Location::RegisterLocation(RAX));
5414 return summary;
5415}
5416
5417void HashDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5418 const XmmRegister value = locs()->in(0).fpu_reg();
5419 ASSERT(locs()->out(0).reg() == RAX);
5420 ASSERT(locs()->temp(0).reg() == RDX);
5421 const FpuRegister temp_fpu_reg = locs()->temp(1).fpu_reg();
5422
5423 compiler::Label hash_double;
5424
5425 __ cvttsd2siq(RAX, value);
5426 __ cvtsi2sdq(temp_fpu_reg, RAX);
5427 __ comisd(value, temp_fpu_reg);
5428 __ j(PARITY_EVEN, &hash_double); // one of the arguments is NaN
5429 __ j(NOT_EQUAL, &hash_double);
5430
5431 // RAX has int64 value
5432 EmitHashIntegerCodeSequence(compiler);
5433
5434 compiler::Label done;
5435 __ jmp(&done);
5436
5437 __ Bind(&hash_double);
5438 // Convert the double bits to a hash code that fits in a Smi.
5439 __ movq(RAX, value);
5440 __ movq(RDX, RAX);
5441 __ shrq(RDX, compiler::Immediate(32));
5442 __ xorq(RAX, RDX);
5443 __ andq(RAX, compiler::Immediate(compiler::target::kSmiMax));
5444
5445 __ Bind(&done);
5446}
5447
5448LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
5449 bool opt) const {
5450 const intptr_t kNumInputs = 1;
5451 const intptr_t kNumTemps = 1;
5452 LocationSummary* summary = new (zone)
5453 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5454 summary->set_in(0, Location::RegisterLocation(RAX));
5455 summary->set_out(0, Location::SameAsFirstInput());
5456 summary->set_temp(0, Location::RegisterLocation(RDX));
5457 return summary;
5458}
5459
5460void HashIntegerOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5461 Register value = locs()->in(0).reg();
5462 Register result = locs()->out(0).reg();
5463 Register temp = locs()->temp(0).reg();
5464 ASSERT(value == RAX);
5465 ASSERT(result == RAX);
5466 ASSERT(temp == RDX);
5467
5468 if (smi_) {
5469 __ SmiUntagAndSignExtend(RAX);
5470 } else {
5471 __ LoadFieldFromOffset(RAX, RAX, Mint::value_offset());
5472 }
5473
5474 EmitHashIntegerCodeSequence(compiler);
5475 __ SmiTag(RAX);
5476}
5477
5478LocationSummary* BranchInstr::MakeLocationSummary(Zone* zone, bool opt) const {
5480 // Branches don't produce a result.
5482 return comparison()->locs();
5483}
5484
5485void BranchInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5487}
5488
5489LocationSummary* CheckClassInstr::MakeLocationSummary(Zone* zone,
5490 bool opt) const {
5491 const intptr_t kNumInputs = 1;
5492 const bool need_mask_temp = IsBitTest();
5493 const intptr_t kNumTemps = !IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0;
5494 LocationSummary* summary = new (zone)
5495 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5496 summary->set_in(0, Location::RequiresRegister());
5497 if (!IsNullCheck()) {
5498 summary->set_temp(0, Location::RequiresRegister());
5499 if (need_mask_temp) {
5500 summary->set_temp(1, Location::RequiresRegister());
5501 }
5502 }
5503 return summary;
5504}
5505
5506void CheckClassInstr::EmitNullCheck(FlowGraphCompiler* compiler,
5507 compiler::Label* deopt) {
5508 __ CompareObject(locs()->in(0).reg(), Object::null_object());
5510 __ j(cond, deopt);
5511}
5512
5513void CheckClassInstr::EmitBitTest(FlowGraphCompiler* compiler,
5514 intptr_t min,
5515 intptr_t max,
5516 intptr_t mask,
5517 compiler::Label* deopt) {
5518 Register biased_cid = locs()->temp(0).reg();
5519 __ subq(biased_cid, compiler::Immediate(min));
5520 __ cmpq(biased_cid, compiler::Immediate(max - min));
5521 __ j(ABOVE, deopt);
5522
5523 Register mask_reg = locs()->temp(1).reg();
5524 __ movq(mask_reg, compiler::Immediate(mask));
5525 __ btq(mask_reg, biased_cid);
5526 __ j(NOT_CARRY, deopt);
5527}
5528
5529int CheckClassInstr::EmitCheckCid(FlowGraphCompiler* compiler,
5530 int bias,
5531 intptr_t cid_start,
5532 intptr_t cid_end,
5533 bool is_last,
5534 compiler::Label* is_ok,
5535 compiler::Label* deopt,
5536 bool use_near_jump) {
5537 Register biased_cid = locs()->temp(0).reg();
5538 Condition no_match, match;
5539 if (cid_start == cid_end) {
5540 __ cmpl(biased_cid, compiler::Immediate(cid_start - bias));
5541 no_match = NOT_EQUAL;
5542 match = EQUAL;
5543 } else {
5544 // For class ID ranges use a subtract followed by an unsigned
5545 // comparison to check both ends of the ranges with one comparison.
5546 __ addl(biased_cid, compiler::Immediate(bias - cid_start));
5547 bias = cid_start;
5548 __ cmpl(biased_cid, compiler::Immediate(cid_end - cid_start));
5549 no_match = ABOVE;
5551 }
5552
5553 if (is_last) {
5554 __ j(no_match, deopt);
5555 } else {
5556 if (use_near_jump) {
5558 } else {
5559 __ j(match, is_ok);
5560 }
5561 }
5562 return bias;
5563}
5564
5565LocationSummary* CheckSmiInstr::MakeLocationSummary(Zone* zone,
5566 bool opt) const {
5567 const intptr_t kNumInputs = 1;
5568 const intptr_t kNumTemps = 0;
5569 LocationSummary* summary = new (zone)
5570 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5571 summary->set_in(0, Location::RequiresRegister());
5572 return summary;
5573}
5574
5575void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5576 Register value = locs()->in(0).reg();
5577 compiler::Label* deopt =
5578 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi);
5579 __ BranchIfNotSmi(value, deopt);
5580}
5581
5582void CheckNullInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5583 ThrowErrorSlowPathCode* slow_path = new NullErrorSlowPath(this);
5584 compiler->AddSlowPathCode(slow_path);
5585
5586 Register value_reg = locs()->in(0).reg();
5587 // TODO(dartbug.com/30480): Consider passing `null` literal as an argument
5588 // in order to be able to allocate it on register.
5589 __ CompareObject(value_reg, Object::null_object());
5590 __ BranchIf(EQUAL, slow_path->entry_label());
5591}
5592
5593LocationSummary* CheckClassIdInstr::MakeLocationSummary(Zone* zone,
5594 bool opt) const {
5595 const intptr_t kNumInputs = 1;
5596 const intptr_t kNumTemps = 0;
5597 LocationSummary* summary = new (zone)
5598 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5599 summary->set_in(0, cids_.IsSingleCid() ? Location::RequiresRegister()
5600 : Location::WritableRegister());
5601 return summary;
5602}
5603
5604void CheckClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5605 Register value = locs()->in(0).reg();
5606 compiler::Label* deopt =
5607 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass);
5608 if (cids_.IsSingleCid()) {
5609 __ CompareImmediate(value,
5610 compiler::Immediate(Smi::RawValue(cids_.cid_start)));
5611 __ j(NOT_ZERO, deopt);
5612 } else {
5613 __ AddImmediate(value,
5614 compiler::Immediate(-Smi::RawValue(cids_.cid_start)));
5615 __ cmpq(value, compiler::Immediate(Smi::RawValue(cids_.Extent())));
5616 __ j(ABOVE, deopt);
5617 }
5618}
5619
5620LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Zone* zone,
5621 bool opt) const {
5622 const intptr_t kNumInputs = 2;
5623 const intptr_t kNumTemps = 0;
5624 LocationSummary* locs = new (zone)
5625 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5628 return locs;
5629}
5630
5631void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5632 uint32_t flags = generalized_ ? ICData::kGeneralized : 0;
5633 compiler::Label* deopt =
5634 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckArrayBound, flags);
5635
5636 Location length_loc = locs()->in(kLengthPos);
5637 Location index_loc = locs()->in(kIndexPos);
5638
5639 if (length_loc.IsConstant() && index_loc.IsConstant()) {
5640 ASSERT((Smi::Cast(length_loc.constant()).Value() <=
5641 Smi::Cast(index_loc.constant()).Value()) ||
5642 (Smi::Cast(index_loc.constant()).Value() < 0));
5643 // Unconditionally deoptimize for constant bounds checks because they
5644 // only occur only when index is out-of-bounds.
5645 __ jmp(deopt);
5646 return;
5647 }
5648
5649 const intptr_t index_cid = index()->Type()->ToCid();
5650 if (index_loc.IsConstant()) {
5651 Register length = length_loc.reg();
5652 const Smi& index = Smi::Cast(index_loc.constant());
5653 __ CompareObject(length, index);
5654 __ j(BELOW_EQUAL, deopt);
5655 } else if (length_loc.IsConstant()) {
5656 const Smi& length = Smi::Cast(length_loc.constant());
5657 Register index = index_loc.reg();
5658 if (index_cid != kSmiCid) {
5659 __ BranchIfNotSmi(index, deopt);
5660 }
5661 if (length.Value() == Smi::kMaxValue) {
5662 __ OBJ(test)(index, index);
5663 __ j(NEGATIVE, deopt);
5664 } else {
5665 __ CompareObject(index, length);
5666 __ j(ABOVE_EQUAL, deopt);
5667 }
5668 } else {
5669 Register length = length_loc.reg();
5670 Register index = index_loc.reg();
5671 if (index_cid != kSmiCid) {
5672 __ BranchIfNotSmi(index, deopt);
5673 }
5674 __ OBJ(cmp)(index, length);
5675 __ j(ABOVE_EQUAL, deopt);
5676 }
5677}
5678
5679LocationSummary* CheckWritableInstr::MakeLocationSummary(Zone* zone,
5680 bool opt) const {
5681 const intptr_t kNumInputs = 1;
5682 const intptr_t kNumTemps = 0;
5683 LocationSummary* locs = new (zone) LocationSummary(
5684 zone, kNumInputs, kNumTemps,
5685 UseSharedSlowPathStub(opt) ? LocationSummary::kCallOnSharedSlowPath
5687 locs->set_in(kReceiver, Location::RequiresRegister());
5688 return locs;
5689}
5690
5691void CheckWritableInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5692 WriteErrorSlowPath* slow_path = new WriteErrorSlowPath(this);
5693 compiler->AddSlowPathCode(slow_path);
5694 __ movq(TMP, compiler::FieldAddress(locs()->in(0).reg(),
5695 compiler::target::Object::tags_offset()));
5696 __ testq(TMP, compiler::Immediate(
5697 1 << compiler::target::UntaggedObject::kImmutableBit));
5698 __ j(NOT_ZERO, slow_path->entry_label());
5699}
5700
5701class Int64DivideSlowPath : public ThrowErrorSlowPathCode {
5702 public:
5703 Int64DivideSlowPath(BinaryInt64OpInstr* instruction,
5704 Register divisor,
5705 Range* divisor_range)
5706 : ThrowErrorSlowPathCode(instruction,
5707 kIntegerDivisionByZeroExceptionRuntimeEntry),
5708 is_mod_(instruction->op_kind() == Token::kMOD),
5709 divisor_(divisor),
5710 divisor_range_(divisor_range),
5711 div_by_minus_one_label_(),
5712 adjust_sign_label_() {}
5713
5714 void EmitNativeCode(FlowGraphCompiler* compiler) override {
5715 // Handle modulo/division by zero, if needed. Use superclass code.
5716 if (has_divide_by_zero()) {
5717 ThrowErrorSlowPathCode::EmitNativeCode(compiler);
5718 } else {
5719 __ Bind(entry_label()); // not used, but keeps destructor happy
5720 if (compiler::Assembler::EmittingComments()) {
5721 __ Comment("slow path %s operation (no throw)", name());
5722 }
5723 }
5724 // Handle modulo/division by minus one, if needed.
5725 // Note: an exact -1 divisor is best optimized prior to codegen.
5726 if (has_divide_by_minus_one()) {
5727 __ Bind(div_by_minus_one_label());
5728 if (is_mod_) {
5729 __ xorq(RDX, RDX); // x % -1 = 0
5730 } else {
5731 __ negq(RAX); // x / -1 = -x
5732 }
5733 __ jmp(exit_label());
5734 }
5735 // Adjust modulo for negative sign, optimized for known ranges.
5736 // if (divisor < 0)
5737 // out -= divisor;
5738 // else
5739 // out += divisor;
5740 if (has_adjust_sign()) {
5741 __ Bind(adjust_sign_label());
5742 if (RangeUtils::Overlaps(divisor_range_, -1, 1)) {
5743 // General case.
5744 compiler::Label subtract;
5745 __ testq(divisor_, divisor_);
5746 __ j(LESS, &subtract, compiler::Assembler::kNearJump);
5747 __ addq(RDX, divisor_);
5748 __ jmp(exit_label());
5749 __ Bind(&subtract);
5750 __ subq(RDX, divisor_);
5751 } else if (divisor_range_->IsPositive()) {
5752 // Always positive.
5753 __ addq(RDX, divisor_);
5754 } else {
5755 // Always negative.
5756 __ subq(RDX, divisor_);
5757 }
5758 __ jmp(exit_label());
5759 }
5760 }
5761
5762 const char* name() override { return "int64 divide"; }
5763
5764 bool has_divide_by_zero() { return RangeUtils::CanBeZero(divisor_range_); }
5765
5766 bool has_divide_by_minus_one() {
5767 return RangeUtils::Overlaps(divisor_range_, -1, -1);
5768 }
5769
5770 bool has_adjust_sign() { return is_mod_; }
5771
5772 bool is_needed() {
5773 return has_divide_by_zero() || has_divide_by_minus_one() ||
5774 has_adjust_sign();
5775 }
5776
5777 compiler::Label* div_by_minus_one_label() {
5778 ASSERT(has_divide_by_minus_one());
5779 return &div_by_minus_one_label_;
5780 }
5781
5782 compiler::Label* adjust_sign_label() {
5783 ASSERT(has_adjust_sign());
5784 return &adjust_sign_label_;
5785 }
5786
5787 private:
5788 bool is_mod_;
5789 Register divisor_;
5790 Range* divisor_range_;
5791 compiler::Label div_by_minus_one_label_;
5792 compiler::Label adjust_sign_label_;
5793};
5794
5795static void EmitInt64ModTruncDiv(FlowGraphCompiler* compiler,
5796 BinaryInt64OpInstr* instruction,
5797 Token::Kind op_kind,
5798 Register left,
5800 Register tmp,
5801 Register out) {
5802 ASSERT(op_kind == Token::kMOD || op_kind == Token::kTRUNCDIV);
5803
5804 // Special case 64-bit div/mod by compile-time constant. Note that various
5805 // special constants (such as powers of two) should have been optimized
5806 // earlier in the pipeline. Div or mod by zero falls into general code
5807 // to implement the exception.
5808 if (auto c = instruction->right()->definition()->AsConstant()) {
5809 if (c->value().IsInteger()) {
5810 const int64_t divisor = Integer::Cast(c->value()).AsInt64Value();
5811 if (divisor <= -2 || divisor >= 2) {
5812 // For x DIV c or x MOD c: use magic operations.
5813 compiler::Label pos;
5814 int64_t magic = 0;
5815 int64_t shift = 0;
5816 Utils::CalculateMagicAndShiftForDivRem(divisor, &magic, &shift);
5817 // RDX:RAX = magic * numerator.
5818 ASSERT(left == RAX);
5819 __ MoveRegister(TMP, RAX); // save numerator
5820 __ LoadImmediate(RAX, compiler::Immediate(magic));
5821 __ imulq(TMP);
5822 // RDX +/-= numerator.
5823 if (divisor > 0 && magic < 0) {
5824 __ addq(RDX, TMP);
5825 } else if (divisor < 0 && magic > 0) {
5826 __ subq(RDX, TMP);
5827 }
5828 // Shift if needed.
5829 if (shift != 0) {
5830 __ sarq(RDX, compiler::Immediate(shift));
5831 }
5832 // RDX += 1 if RDX < 0.
5833 __ movq(RAX, RDX);
5834 __ shrq(RDX, compiler::Immediate(63));
5835 __ addq(RDX, RAX);
5836 // Finalize DIV or MOD.
5837 if (op_kind == Token::kTRUNCDIV) {
5838 ASSERT(out == RAX && tmp == RDX);
5839 __ movq(RAX, RDX);
5840 } else {
5841 ASSERT(out == RDX && tmp == RAX);
5842 __ movq(RAX, TMP);
5843 __ LoadImmediate(TMP, compiler::Immediate(divisor));
5844 __ imulq(RDX, TMP);
5845 __ subq(RAX, RDX);
5846 // Compensate for Dart's Euclidean view of MOD.
5847 __ testq(RAX, RAX);
5848 __ j(GREATER_EQUAL, &pos);
5849 if (divisor > 0) {
5850 __ addq(RAX, TMP);
5851 } else {
5852 __ subq(RAX, TMP);
5853 }
5854 __ Bind(&pos);
5855 __ movq(RDX, RAX);
5856 }
5857 return;
5858 }
5859 }
5860 }
5861
5862 // Prepare a slow path.
5863 Range* right_range = instruction->right()->definition()->range();
5864 Int64DivideSlowPath* slow_path =
5865 new (Z) Int64DivideSlowPath(instruction, right, right_range);
5866
5867 // Handle modulo/division by zero exception on slow path.
5868 if (slow_path->has_divide_by_zero()) {
5869 __ testq(right, right);
5870 __ j(EQUAL, slow_path->entry_label());
5871 }
5872
5873 // Handle modulo/division by minus one explicitly on slow path
5874 // (to avoid arithmetic exception on 0x8000000000000000 / -1).
5875 if (slow_path->has_divide_by_minus_one()) {
5876 __ cmpq(right, compiler::Immediate(-1));
5877 __ j(EQUAL, slow_path->div_by_minus_one_label());
5878 }
5879
5880 // Perform actual operation
5881 // out = left % right
5882 // or
5883 // out = left / right.
5884 //
5885 // Note that since 64-bit division requires twice as many cycles
5886 // and has much higher latency compared to the 32-bit division,
5887 // even for this non-speculative 64-bit path we add a "fast path".
5888 // Integers are untagged at this stage, so testing if sign extending
5889 // the lower half of each operand equals the full operand, effectively
5890 // tests if the values fit in 32-bit operands (and the slightly
5891 // dangerous division by -1 has been handled above already).
5892 ASSERT(left == RAX);
5893 ASSERT(right != RDX); // available at this stage
5894 compiler::Label div_64;
5895 compiler::Label div_merge;
5896 __ movsxd(RDX, left);
5897 __ cmpq(RDX, left);
5899 __ movsxd(RDX, right);
5900 __ cmpq(RDX, right);
5902 __ cdq(); // sign-ext eax into edx:eax
5903 __ idivl(right); // quotient eax, remainder edx
5904 __ movsxd(out, out);
5905 __ jmp(&div_merge, compiler::Assembler::kNearJump);
5906 __ Bind(&div_64);
5907 __ cqo(); // sign-ext rax into rdx:rax
5908 __ idivq(right); // quotient rax, remainder rdx
5909 __ Bind(&div_merge);
5910 if (op_kind == Token::kMOD) {
5911 ASSERT(out == RDX);
5912 ASSERT(tmp == RAX);
5913 // For the % operator, again the idiv instruction does
5914 // not quite do what we want. Adjust for sign on slow path.
5915 __ testq(out, out);
5916 __ j(LESS, slow_path->adjust_sign_label());
5917 } else {
5918 ASSERT(out == RAX);
5919 ASSERT(tmp == RDX);
5920 }
5921
5922 if (slow_path->is_needed()) {
5923 __ Bind(slow_path->exit_label());
5924 compiler->AddSlowPathCode(slow_path);
5925 }
5926}
5927
5928template <typename OperandType>
5929static void EmitInt64Arithmetic(FlowGraphCompiler* compiler,
5930 Token::Kind op_kind,
5931 Register left,
5932 const OperandType& right) {
5933 switch (op_kind) {
5934 case Token::kADD:
5935 __ addq(left, right);
5936 break;
5937 case Token::kSUB:
5938 __ subq(left, right);
5939 break;
5940 case Token::kBIT_AND:
5941 __ andq(left, right);
5942 break;
5943 case Token::kBIT_OR:
5944 __ orq(left, right);
5945 break;
5946 case Token::kBIT_XOR:
5947 __ xorq(left, right);
5948 break;
5949 case Token::kMUL:
5950 __ imulq(left, right);
5951 break;
5952 default:
5953 UNREACHABLE();
5954 }
5955}
5956
5957LocationSummary* BinaryInt64OpInstr::MakeLocationSummary(Zone* zone,
5958 bool opt) const {
5959 switch (op_kind()) {
5960 case Token::kMOD:
5961 case Token::kTRUNCDIV: {
5962 const intptr_t kNumInputs = 2;
5963 const intptr_t kNumTemps = 1;
5964 LocationSummary* summary = new (zone) LocationSummary(
5965 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
5966 summary->set_in(0, Location::RegisterLocation(RAX));
5967 summary->set_in(1, Location::RequiresRegister());
5968 // Intel uses rdx:rax with quotient rax and remainder rdx. Pick the
5969 // appropriate one for output and reserve the other as temp.
5970 summary->set_out(
5971 0, Location::RegisterLocation(op_kind() == Token::kMOD ? RDX : RAX));
5972 summary->set_temp(
5973 0, Location::RegisterLocation(op_kind() == Token::kMOD ? RAX : RDX));
5974 return summary;
5975 }
5976 default: {
5977 const intptr_t kNumInputs = 2;
5978 const intptr_t kNumTemps = 0;
5979 LocationSummary* summary = new (zone) LocationSummary(
5980 zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5981 summary->set_in(0, Location::RequiresRegister());
5982 summary->set_in(1, LocationRegisterOrConstant(right()));
5983 summary->set_out(0, Location::SameAsFirstInput());
5984 return summary;
5985 }
5986 }
5987}
5988
5989void BinaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5990 const Location left = locs()->in(0);
5991 const Location right = locs()->in(1);
5992 const Location out = locs()->out(0);
5993 ASSERT(!can_overflow());
5994 ASSERT(!CanDeoptimize());
5995
5996 if (op_kind() == Token::kMOD || op_kind() == Token::kTRUNCDIV) {
5997 const Location temp = locs()->temp(0);
5998 EmitInt64ModTruncDiv(compiler, this, op_kind(), left.reg(), right.reg(),
5999 temp.reg(), out.reg());
6000 } else if (right.IsConstant()) {
6001 ASSERT(out.reg() == left.reg());
6002 int64_t value;
6003 const bool ok = compiler::HasIntegerValue(right.constant(), &value);
6005 EmitInt64Arithmetic(compiler, op_kind(), left.reg(),
6006 compiler::Immediate(value));
6007 } else {
6008 ASSERT(out.reg() == left.reg());
6009 EmitInt64Arithmetic(compiler, op_kind(), left.reg(), right.reg());
6010 }
6011}
6012
6013LocationSummary* UnaryInt64OpInstr::MakeLocationSummary(Zone* zone,
6014 bool opt) const {
6015 const intptr_t kNumInputs = 1;
6016 const intptr_t kNumTemps = 0;
6017 LocationSummary* summary = new (zone)
6018 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6019 summary->set_in(0, Location::RequiresRegister());
6020 summary->set_out(0, Location::SameAsFirstInput());
6021 return summary;
6022}
6023
6024void UnaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6025 const Register left = locs()->in(0).reg();
6026 const Register out = locs()->out(0).reg();
6027 ASSERT(out == left);
6028 switch (op_kind()) {
6029 case Token::kBIT_NOT:
6030 __ notq(left);
6031 break;
6032 case Token::kNEGATE:
6033 __ negq(left);
6034 break;
6035 default:
6036 UNREACHABLE();
6037 }
6038}
6039
6040static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
6041 Token::Kind op_kind,
6042 Register left,
6043 const Object& right) {
6044 const int64_t shift = Integer::Cast(right).AsInt64Value();
6045 ASSERT(shift >= 0);
6046 switch (op_kind) {
6047 case Token::kSHR:
6048 __ sarq(left, compiler::Immediate(
6049 Utils::Minimum<int64_t>(shift, kBitsPerWord - 1)));
6050 break;
6051 case Token::kUSHR:
6052 ASSERT(shift < 64);
6053 __ shrq(left, compiler::Immediate(shift));
6054 break;
6055 case Token::kSHL: {
6056 ASSERT(shift < 64);
6057 __ shlq(left, compiler::Immediate(shift));
6058 break;
6059 }
6060 default:
6061 UNREACHABLE();
6062 }
6063}
6064
6065static void EmitShiftInt64ByRCX(FlowGraphCompiler* compiler,
6066 Token::Kind op_kind,
6067 Register left) {
6068 switch (op_kind) {
6069 case Token::kSHR: {
6070 __ sarq(left, RCX);
6071 break;
6072 }
6073 case Token::kUSHR: {
6074 __ shrq(left, RCX);
6075 break;
6076 }
6077 case Token::kSHL: {
6078 __ shlq(left, RCX);
6079 break;
6080 }
6081 default:
6082 UNREACHABLE();
6083 }
6084}
6085
6086static void EmitShiftUint32ByConstant(FlowGraphCompiler* compiler,
6087 Token::Kind op_kind,
6088 Register left,
6089 const Object& right) {
6090 const int64_t shift = Integer::Cast(right).AsInt64Value();
6091 ASSERT(shift >= 0);
6092 if (shift >= 32) {
6093 __ xorl(left, left);
6094 } else {
6095 switch (op_kind) {
6096 case Token::kSHR:
6097 case Token::kUSHR: {
6098 __ shrl(left, compiler::Immediate(shift));
6099 break;
6100 }
6101 case Token::kSHL: {
6102 __ shll(left, compiler::Immediate(shift));
6103 break;
6104 }
6105 default:
6106 UNREACHABLE();
6107 }
6108 }
6109}
6110
6111static void EmitShiftUint32ByRCX(FlowGraphCompiler* compiler,
6112 Token::Kind op_kind,
6113 Register left) {
6114 switch (op_kind) {
6115 case Token::kSHR:
6116 case Token::kUSHR: {
6117 __ shrl(left, RCX);
6118 break;
6119 }
6120 case Token::kSHL: {
6121 __ shll(left, RCX);
6122 break;
6123 }
6124 default:
6125 UNREACHABLE();
6126 }
6127}
6128
6129class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
6130 public:
6131 explicit ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction)
6132 : ThrowErrorSlowPathCode(instruction,
6133 kArgumentErrorUnboxedInt64RuntimeEntry) {}
6134
6135 const char* name() override { return "int64 shift"; }
6136
6137 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
6138 const Register out = instruction()->locs()->out(0).reg();
6139 ASSERT(out == instruction()->locs()->in(0).reg());
6140
6141 compiler::Label throw_error;
6142 __ testq(RCX, RCX);
6143 __ j(LESS, &throw_error);
6144
6145 switch (instruction()->AsShiftInt64Op()->op_kind()) {
6146 case Token::kSHR:
6147 __ sarq(out, compiler::Immediate(kBitsPerInt64 - 1));
6148 break;
6149 case Token::kUSHR:
6150 case Token::kSHL:
6151 __ xorq(out, out);
6152 break;
6153 default:
6154 UNREACHABLE();
6155 }
6156 __ jmp(exit_label());
6157
6158 __ Bind(&throw_error);
6159
6160 // Can't pass unboxed int64 value directly to runtime call, as all
6161 // arguments are expected to be tagged (boxed).
6162 // The unboxed int64 argument is passed through a dedicated slot in Thread.
6163 // TODO(dartbug.com/33549): Clean this up when unboxed values
6164 // could be passed as arguments.
6165 __ movq(compiler::Address(
6166 THR, compiler::target::Thread::unboxed_runtime_arg_offset()),
6167 RCX);
6168 }
6169};
6170
6171LocationSummary* ShiftInt64OpInstr::MakeLocationSummary(Zone* zone,
6172 bool opt) const {
6173 const intptr_t kNumInputs = 2;
6174 const intptr_t kNumTemps = 0;
6175 LocationSummary* summary = new (zone) LocationSummary(
6176 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
6177 summary->set_in(0, Location::RequiresRegister());
6178 summary->set_in(1, RangeUtils::IsPositive(shift_range())
6180 : Location::RegisterLocation(RCX));
6181 summary->set_out(0, Location::SameAsFirstInput());
6182 return summary;
6183}
6184
6185void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6186 const Register left = locs()->in(0).reg();
6187 const Register out = locs()->out(0).reg();
6188 ASSERT(left == out);
6189 ASSERT(!can_overflow());
6190
6191 if (locs()->in(1).IsConstant()) {
6192 EmitShiftInt64ByConstant(compiler, op_kind(), left,
6193 locs()->in(1).constant());
6194 } else {
6195 // Code for a variable shift amount (or constant that throws).
6196 ASSERT(locs()->in(1).reg() == RCX);
6197
6198 // Jump to a slow path if shift count is > 63 or negative.
6199 ShiftInt64OpSlowPath* slow_path = nullptr;
6200 if (!IsShiftCountInRange()) {
6201 slow_path = new (Z) ShiftInt64OpSlowPath(this);
6202 compiler->AddSlowPathCode(slow_path);
6203
6204 __ cmpq(RCX, compiler::Immediate(kShiftCountLimit));
6205 __ j(ABOVE, slow_path->entry_label());
6206 }
6207
6208 EmitShiftInt64ByRCX(compiler, op_kind(), left);
6209
6210 if (slow_path != nullptr) {
6211 __ Bind(slow_path->exit_label());
6212 }
6213 }
6214}
6215
6216LocationSummary* SpeculativeShiftInt64OpInstr::MakeLocationSummary(
6217 Zone* zone,
6218 bool opt) const {
6219 const intptr_t kNumInputs = 2;
6220 const intptr_t kNumTemps = 0;
6221 LocationSummary* summary = new (zone)
6222 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6223 summary->set_in(0, Location::RequiresRegister());
6224 summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), RCX));
6225 summary->set_out(0, Location::SameAsFirstInput());
6226 return summary;
6227}
6228
6229void SpeculativeShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6230 const Register left = locs()->in(0).reg();
6231 const Register out = locs()->out(0).reg();
6232 ASSERT(left == out);
6233 ASSERT(!can_overflow());
6234
6235 if (locs()->in(1).IsConstant()) {
6236 EmitShiftInt64ByConstant(compiler, op_kind(), left,
6237 locs()->in(1).constant());
6238 } else {
6239 ASSERT(locs()->in(1).reg() == RCX);
6240 __ SmiUntag(RCX);
6241
6242 // Deoptimize if shift count is > 63 or negative (or not a smi).
6243 if (!IsShiftCountInRange()) {
6244 ASSERT(CanDeoptimize());
6245 compiler::Label* deopt =
6246 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
6247
6248 __ cmpq(RCX, compiler::Immediate(kShiftCountLimit));
6249 __ j(ABOVE, deopt);
6250 }
6251
6252 EmitShiftInt64ByRCX(compiler, op_kind(), left);
6253 }
6254}
6255
6256class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
6257 public:
6258 explicit ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction)
6259 : ThrowErrorSlowPathCode(instruction,
6260 kArgumentErrorUnboxedInt64RuntimeEntry) {}
6261
6262 const char* name() override { return "uint32 shift"; }
6263
6264 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
6265 const Register out = instruction()->locs()->out(0).reg();
6266 ASSERT(out == instruction()->locs()->in(0).reg());
6267
6268 compiler::Label throw_error;
6269 __ testq(RCX, RCX);
6270 __ j(LESS, &throw_error);
6271
6272 __ xorl(out, out);
6273 __ jmp(exit_label());
6274
6275 __ Bind(&throw_error);
6276
6277 // Can't pass unboxed int64 value directly to runtime call, as all
6278 // arguments are expected to be tagged (boxed).
6279 // The unboxed int64 argument is passed through a dedicated slot in Thread.
6280 // TODO(dartbug.com/33549): Clean this up when unboxed values
6281 // could be passed as arguments.
6282 __ movq(compiler::Address(
6283 THR, compiler::target::Thread::unboxed_runtime_arg_offset()),
6284 RCX);
6285 }
6286};
6287
6288LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
6289 bool opt) const {
6290 const intptr_t kNumInputs = 2;
6291 const intptr_t kNumTemps = 0;
6292 LocationSummary* summary = new (zone) LocationSummary(
6293 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
6294 summary->set_in(0, Location::RequiresRegister());
6295 summary->set_in(1, RangeUtils::IsPositive(shift_range())
6297 : Location::RegisterLocation(RCX));
6298 summary->set_out(0, Location::SameAsFirstInput());
6299 return summary;
6300}
6301
6302void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6303 Register left = locs()->in(0).reg();
6304 Register out = locs()->out(0).reg();
6305 ASSERT(left == out);
6306
6307 if (locs()->in(1).IsConstant()) {
6308 EmitShiftUint32ByConstant(compiler, op_kind(), left,
6309 locs()->in(1).constant());
6310 } else {
6311 // Code for a variable shift amount (or constant that throws).
6312 ASSERT(locs()->in(1).reg() == RCX);
6313
6314 // Jump to a slow path if shift count is > 31 or negative.
6315 ShiftUint32OpSlowPath* slow_path = nullptr;
6316 if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
6317 slow_path = new (Z) ShiftUint32OpSlowPath(this);
6318 compiler->AddSlowPathCode(slow_path);
6319
6320 __ cmpq(RCX, compiler::Immediate(kUint32ShiftCountLimit));
6321 __ j(ABOVE, slow_path->entry_label());
6322 }
6323
6324 EmitShiftUint32ByRCX(compiler, op_kind(), left);
6325
6326 if (slow_path != nullptr) {
6327 __ Bind(slow_path->exit_label());
6328 }
6329 }
6330}
6331
6332LocationSummary* SpeculativeShiftUint32OpInstr::MakeLocationSummary(
6333 Zone* zone,
6334 bool opt) const {
6335 const intptr_t kNumInputs = 2;
6336 const intptr_t kNumTemps = 0;
6337 LocationSummary* summary = new (zone)
6338 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6339 summary->set_in(0, Location::RequiresRegister());
6340 summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), RCX));
6341 summary->set_out(0, Location::SameAsFirstInput());
6342 return summary;
6343}
6344
6345void SpeculativeShiftUint32OpInstr::EmitNativeCode(
6346 FlowGraphCompiler* compiler) {
6347 Register left = locs()->in(0).reg();
6348 Register out = locs()->out(0).reg();
6349 ASSERT(left == out);
6350
6351 if (locs()->in(1).IsConstant()) {
6352 EmitShiftUint32ByConstant(compiler, op_kind(), left,
6353 locs()->in(1).constant());
6354 } else {
6355 ASSERT(locs()->in(1).reg() == RCX);
6356 __ SmiUntag(RCX);
6357
6358 if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
6359 if (!IsShiftCountInRange()) {
6360 // Deoptimize if shift count is negative.
6361 ASSERT(CanDeoptimize());
6362 compiler::Label* deopt =
6363 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
6364
6365 __ OBJ(test)(RCX, RCX);
6366 __ j(LESS, deopt);
6367 }
6368
6369 compiler::Label cont;
6370 __ OBJ(cmp)(RCX, compiler::Immediate(kUint32ShiftCountLimit));
6371 __ j(LESS_EQUAL, &cont);
6372
6373 __ xorl(left, left);
6374
6375 __ Bind(&cont);
6376 }
6377
6378 EmitShiftUint32ByRCX(compiler, op_kind(), left);
6379 }
6380}
6381
6382LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone,
6383 bool opt) const {
6384 const intptr_t kNumInputs = 2;
6385 const intptr_t kNumTemps = 0;
6386 LocationSummary* summary = new (zone)
6387 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6388 summary->set_in(0, Location::RequiresRegister());
6389 summary->set_in(1, Location::RequiresRegister());
6390 summary->set_out(0, Location::SameAsFirstInput());
6391 return summary;
6392}
6393
6394template <typename OperandType>
6395static void EmitIntegerArithmetic(FlowGraphCompiler* compiler,
6396 Token::Kind op_kind,
6397 Register left,
6398 const OperandType& right) {
6399 switch (op_kind) {
6400 case Token::kADD:
6401 __ addl(left, right);
6402 break;
6403 case Token::kSUB:
6404 __ subl(left, right);
6405 break;
6406 case Token::kBIT_AND:
6407 __ andl(left, right);
6408 break;
6409 case Token::kBIT_OR:
6410 __ orl(left, right);
6411 break;
6412 case Token::kBIT_XOR:
6413 __ xorl(left, right);
6414 break;
6415 case Token::kMUL:
6416 __ imull(left, right);
6417 break;
6418 default:
6419 UNREACHABLE();
6420 }
6421}
6422
6423void BinaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6424 Register left = locs()->in(0).reg();
6425 Register right = locs()->in(1).reg();
6426 Register out = locs()->out(0).reg();
6427 ASSERT(out == left);
6428 switch (op_kind()) {
6429 case Token::kBIT_AND:
6430 case Token::kBIT_OR:
6431 case Token::kBIT_XOR:
6432 case Token::kADD:
6433 case Token::kSUB:
6434 case Token::kMUL:
6435 EmitIntegerArithmetic(compiler, op_kind(), left, right);
6436 return;
6437 default:
6438 UNREACHABLE();
6439 }
6440}
6441
6442DEFINE_BACKEND(UnaryUint32Op, (SameAsFirstInput, Register value)) {
6443 ASSERT(instr->op_kind() == Token::kBIT_NOT);
6444 __ notl(value);
6445}
6446
6447DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryInt32OpInstr)
6448
6449LocationSummary* IntConverterInstr::MakeLocationSummary(Zone* zone,
6450 bool opt) const {
6451 const intptr_t kNumInputs = 1;
6452 const intptr_t kNumTemps = 0;
6453 LocationSummary* summary = new (zone)
6454 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6455 if (from() == kUntagged || to() == kUntagged) {
6456 ASSERT((from() == kUntagged && to() == kUnboxedIntPtr) ||
6457 (from() == kUnboxedIntPtr && to() == kUntagged));
6458 ASSERT(!CanDeoptimize());
6459 } else if (from() == kUnboxedInt64) {
6460 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6461 } else if (to() == kUnboxedInt64) {
6462 ASSERT(from() == kUnboxedInt32 || from() == kUnboxedUint32);
6463 } else {
6464 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6465 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
6466 }
6467
6468 summary->set_in(0, Location::RequiresRegister());
6469 summary->set_out(0, Location::SameAsFirstInput());
6470
6471 return summary;
6472}
6473
6474void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6475 const bool is_nop_conversion =
6476 (from() == kUntagged && to() == kUnboxedIntPtr) ||
6477 (from() == kUnboxedIntPtr && to() == kUntagged);
6478 if (is_nop_conversion) {
6479 ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
6480 return;
6481 }
6482
6483 if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
6484 const Register value = locs()->in(0).reg();
6485 const Register out = locs()->out(0).reg();
6486 // Representations are bitwise equivalent but we want to normalize
6487 // upperbits for safety reasons.
6488 // TODO(vegorov) if we ensure that we never use upperbits we could
6489 // avoid this.
6490 __ movl(out, value);
6491 } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
6492 // Representations are bitwise equivalent.
6493 const Register value = locs()->in(0).reg();
6494 const Register out = locs()->out(0).reg();
6495 __ movsxd(out, value);
6496 if (CanDeoptimize()) {
6497 compiler::Label* deopt =
6498 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
6499 __ testl(out, out);
6500 __ j(NEGATIVE, deopt);
6501 }
6502 } else if (from() == kUnboxedInt64) {
6503 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6504 const Register value = locs()->in(0).reg();
6505 const Register out = locs()->out(0).reg();
6506 if (!CanDeoptimize()) {
6507 // Copy low.
6508 __ movl(out, value);
6509 } else {
6510 compiler::Label* deopt =
6511 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
6512 // Sign extend.
6513 __ movsxd(out, value);
6514 // Compare with original value.
6515 __ cmpq(out, value);
6516 // Value cannot be held in Int32, deopt.
6517 __ j(NOT_EQUAL, deopt);
6518 }
6519 } else if (to() == kUnboxedInt64) {
6520 ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32));
6521 const Register value = locs()->in(0).reg();
6522 const Register out = locs()->out(0).reg();
6523 if (from() == kUnboxedUint32) {
6524 // Zero extend.
6525 __ movl(out, value);
6526 } else {
6527 // Sign extend.
6528 ASSERT(from() == kUnboxedInt32);
6529 __ movsxd(out, value);
6530 }
6531 } else {
6532 UNREACHABLE();
6533 }
6534}
6535
6536LocationSummary* StopInstr::MakeLocationSummary(Zone* zone, bool opt) const {
6537 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
6538}
6539
6540void StopInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6541 __ Stop(message());
6542}
6543
6544void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6545 BlockEntryInstr* entry = normal_entry();
6546 if (entry != nullptr) {
6547 if (!compiler->CanFallThroughTo(entry)) {
6548 FATAL("Checked function entry must have no offset");
6549 }
6550 } else {
6551 entry = osr_entry();
6552 if (!compiler->CanFallThroughTo(entry)) {
6553 __ jmp(compiler->GetJumpLabel(entry));
6554 }
6555 }
6556}
6557
6558LocationSummary* GotoInstr::MakeLocationSummary(Zone* zone, bool opt) const {
6559 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
6560}
6561
6562void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6563 if (!compiler->is_optimizing()) {
6564 if (FLAG_reorder_basic_blocks) {
6565 compiler->EmitEdgeCounter(block()->preorder_number());
6566 }
6567 // Add a deoptimization descriptor for deoptimizing instructions that
6568 // may be inserted before this instruction.
6569 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
6570 InstructionSource());
6571 }
6572 if (HasParallelMove()) {
6573 parallel_move()->EmitNativeCode(compiler);
6574 }
6575
6576 // We can fall through if the successor is the next block in the list.
6577 // Otherwise, we need a jump.
6578 if (!compiler->CanFallThroughTo(successor())) {
6579 __ jmp(compiler->GetJumpLabel(successor()));
6580 }
6581}
6582
6583LocationSummary* IndirectGotoInstr::MakeLocationSummary(Zone* zone,
6584 bool opt) const {
6585 const intptr_t kNumInputs = 1;
6586 const intptr_t kNumTemps = 1;
6587
6588 LocationSummary* summary = new (zone)
6589 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6590
6591 summary->set_in(0, Location::RequiresRegister());
6592 summary->set_temp(0, Location::RequiresRegister());
6593
6594 return summary;
6595}
6596
6597void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6598 Register index_reg = locs()->in(0).reg();
6599 Register offset_reg = locs()->temp(0).reg();
6600
6601 ASSERT(RequiredInputRepresentation(0) == kTagged);
6602 // Note: we don't bother to ensure index is a writable input because any
6603 // other instructions using it must also not rely on the upper bits
6604 // when compressed.
6605 __ ExtendNonNegativeSmi(index_reg);
6606 __ LoadObject(offset_reg, offsets_);
6608 /*is_external=*/false, kTypedDataInt32ArrayCid,
6609 /*index_scale=*/4,
6610 /*index_unboxed=*/false, offset_reg, index_reg));
6611
6612 {
6613 const intptr_t kRIPRelativeLeaqSize = 7;
6614 const intptr_t entry_to_rip_offset = __ CodeSize() + kRIPRelativeLeaqSize;
6615 __ leaq(TMP, compiler::Address::AddressRIPRelative(-entry_to_rip_offset));
6616 ASSERT(__ CodeSize() == entry_to_rip_offset);
6617 }
6618
6619 __ addq(TMP, offset_reg);
6620
6621 // Jump to the absolute address.
6622 __ jmp(TMP);
6623}
6624
6625LocationSummary* StrictCompareInstr::MakeLocationSummary(Zone* zone,
6626 bool opt) const {
6627 const intptr_t kNumInputs = 2;
6628 const intptr_t kNumTemps = 0;
6629 if (needs_number_check()) {
6630 LocationSummary* locs = new (zone)
6631 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6632 locs->set_in(0, Location::RegisterLocation(RAX));
6633 locs->set_in(1, Location::RegisterLocation(RCX));
6634 locs->set_out(0, Location::RegisterLocation(RAX));
6635 return locs;
6636 }
6637 LocationSummary* locs = new (zone)
6638 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6639 locs->set_in(0, LocationRegisterOrConstant(left()));
6640 // Only one of the inputs can be a constant. Choose register if the first one
6641 // is a constant.
6642 locs->set_in(1, locs->in(0).IsConstant()
6645 locs->set_out(0, Location::RequiresRegister());
6646 return locs;
6647}
6648
6649Condition StrictCompareInstr::EmitComparisonCodeRegConstant(
6650 FlowGraphCompiler* compiler,
6651 BranchLabels labels,
6652 Register reg,
6653 const Object& obj) {
6654 return compiler->EmitEqualityRegConstCompare(reg, obj, needs_number_check(),
6655 source(), deopt_id());
6656}
6657
6658LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone,
6659 bool opt) const {
6660 const intptr_t kNumInputs = 1;
6661 const intptr_t kNumTemps = 0;
6662 LocationSummary* summary = new (zone)
6663 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6665 FLAG_precompiled_mode ? RAX : FUNCTION_REG));
6666 return MakeCallSummary(zone, this, summary);
6667}
6668
6669void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6670 // Arguments descriptor is expected in ARGS_DESC_REG.
6671 const intptr_t argument_count = ArgumentCount(); // Includes type args.
6672 const Array& arguments_descriptor =
6674 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
6675
6676 if (FLAG_precompiled_mode) {
6677 ASSERT(locs()->in(0).reg() == RAX);
6678 // RAX: Closure with cached entry point.
6679 __ movq(RCX, compiler::FieldAddress(
6680 RAX, compiler::target::Closure::entry_point_offset()));
6681 } else {
6682 ASSERT(locs()->in(0).reg() == FUNCTION_REG);
6683 // FUNCTION_REG: Function.
6684 __ LoadCompressed(
6685 CODE_REG, compiler::FieldAddress(
6686 FUNCTION_REG, compiler::target::Function::code_offset()));
6687 // Closure functions only have one entry point.
6688 __ movq(RCX, compiler::FieldAddress(
6690 compiler::target::Function::entry_point_offset()));
6691 }
6692
6693 // ARGS_DESC_REG: Arguments descriptor array.
6694 // RCX: instructions entry point.
6695 if (!FLAG_precompiled_mode) {
6696 // RBX: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
6697 __ xorq(IC_DATA_REG, IC_DATA_REG);
6698 }
6699 __ call(RCX);
6700 compiler->EmitCallsiteMetadata(source(), deopt_id(),
6701 UntaggedPcDescriptors::kOther, locs(), env());
6702 compiler->EmitDropArguments(argument_count);
6703}
6704
6705LocationSummary* BooleanNegateInstr::MakeLocationSummary(Zone* zone,
6706 bool opt) const {
6709}
6710
6711void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6712 Register input = locs()->in(0).reg();
6713 Register result = locs()->out(0).reg();
6714 ASSERT(input == result);
6715 __ xorq(result, compiler::Immediate(
6717}
6718
6719LocationSummary* BoolToIntInstr::MakeLocationSummary(Zone* zone,
6720 bool opt) const {
6721 UNREACHABLE();
6722 return NULL;
6723}
6724
6725void BoolToIntInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6726 UNREACHABLE();
6727}
6728
6729LocationSummary* IntToBoolInstr::MakeLocationSummary(Zone* zone,
6730 bool opt) const {
6731 UNREACHABLE();
6732 return NULL;
6733}
6734
6735void IntToBoolInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6736 UNREACHABLE();
6737}
6738
6739LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
6740 bool opt) const {
6741 const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0;
6742 const intptr_t kNumTemps = 0;
6743 LocationSummary* locs = new (zone)
6744 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6745 if (type_arguments() != nullptr) {
6748 }
6750 return locs;
6751}
6752
6753void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6754 if (type_arguments() != nullptr) {
6755 TypeUsageInfo* type_usage_info = compiler->thread()->type_usage_info();
6756 if (type_usage_info != nullptr) {
6757 RegisterTypeArgumentsUse(compiler->function(), type_usage_info, cls_,
6758 type_arguments()->definition());
6759 }
6760 }
6761 const Code& stub = Code::ZoneHandle(
6763 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
6764 locs(), deopt_id(), env());
6765}
6766
6767void DebugStepCheckInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6768#ifdef PRODUCT
6769 UNREACHABLE();
6770#else
6771 ASSERT(!compiler->is_optimizing());
6772 __ CallPatchable(StubCode::DebugStepCheck());
6773 compiler->AddCurrentDescriptor(stub_kind_, deopt_id_, source());
6774 compiler->RecordSafepoint(locs());
6775#endif
6776}
6777
6778} // namespace dart
6779
6780#undef __
6781
6782#endif // defined(TARGET_ARCH_X64)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
static void fail(const SkString &err)
Definition DM.cpp:234
static bool match(const char *needle, const char *haystack)
Definition DM.cpp:1132
SkPoint pos
static bool are_equal(skiatest::Reporter *reporter, const SkMatrix &a, const SkMatrix &b)
static int float_bits(float f)
static bool ok(int result)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
static bool subtract(const R &a, const R &b, R *out)
Definition SkRect.cpp:177
static size_t element_size(Layout layout, SkSLType type)
Vec2Value v2
#define __
#define SIMPLE(name,...)
#define OBJ(op)
#define UNREACHABLE()
Definition assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
#define Z
intptr_t num_context_variables() const
Definition il.h:8344
Value * type_arguments() const
Definition il.h:7397
const Class & cls() const
Definition il.h:7396
intptr_t num_context_variables() const
Definition il.h:7555
static intptr_t type_arguments_offset()
Definition object.h:10902
static intptr_t InstanceSize()
Definition object.h:10910
static constexpr bool IsValidLength(intptr_t len)
Definition object.h:10906
static intptr_t length_offset()
Definition object.h:10813
Value * dst_type() const
Definition il.h:4405
Token::Kind op_kind() const
Definition il.h:8990
Value * right() const
Definition il.h:8988
Value * left() const
Definition il.h:8987
bool can_overflow() const
Definition il.h:9352
Value * right() const
Definition il.h:9350
Token::Kind op_kind() const
Definition il.h:9348
Value * left() const
Definition il.h:9349
bool RightIsPowerOfTwoConstant() const
Definition il.cc:2116
Range * right_range() const
Definition il.h:9425
ParallelMoveInstr * parallel_move() const
Definition il.h:1683
bool HasParallelMove() const
Definition il.h:1685
BlockEntryInstr(intptr_t block_id, intptr_t try_index, intptr_t deopt_id, intptr_t stack_depth)
Definition il.h:1776
static const Bool & False()
Definition object.h:10778
static const Bool & True()
Definition object.h:10776
static void Allocate(FlowGraphCompiler *compiler, Instruction *instruction, const Class &cls, Register result, Register temp)
Definition il.cc:6317
Value * value() const
Definition il.h:8480
Representation from_representation() const
Definition il.h:8481
virtual bool ValueFitsSmi() const
Definition il.cc:3244
ComparisonInstr * comparison() const
Definition il.h:4003
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.h:7963
intptr_t index_scale() const
Definition il.h:7972
static constexpr Register kSecondReturnReg
static constexpr Register kVarArgFpuRegisterCount
static constexpr intptr_t kCalleeSaveCpuRegisters
static constexpr RegList kVolatileXmmRegisters
static constexpr intptr_t kVolatileCpuRegisters
static constexpr intptr_t kFpuArgumentRegisters
static constexpr FpuRegister kReturnFpuReg
static constexpr Register kFfiAnyNonAbiRegister
static constexpr Register kArg3Reg
static constexpr Register kArg1Reg
static constexpr Register kReturnReg
static constexpr Register kSecondNonArgumentRegister
static constexpr Register kArg2Reg
static constexpr Register kArg4Reg
static constexpr intptr_t kCalleeSaveXmmRegisters
const RuntimeEntry & TargetFunction() const
Definition il.cc:1099
Value * index() const
Definition il.h:10743
Value * length() const
Definition il.h:10742
Value * value() const
Definition il.h:10701
bool IsDeoptIfNull() const
Definition il.cc:861
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr, TemplateInstruction, FIELD_LIST) private void EmitBitTest(FlowGraphCompiler *compiler, intptr_t min, intptr_t max, intptr_t mask, compiler::Label *deopt)
void EmitNullCheck(FlowGraphCompiler *compiler, compiler::Label *deopt)
bool IsNullCheck() const
Definition il.h:10546
bool IsBitTest() const
Definition il.cc:897
Value * right() const
Definition il.h:8429
Value * left() const
Definition il.h:8428
Value * value() const
Definition il.h:10600
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Definition il.h:9875
intptr_t loop_depth() const
Definition il.h:9858
virtual void EmitBranchCode(FlowGraphCompiler *compiler, BranchInstr *branch)
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual Condition EmitComparisonCode(FlowGraphCompiler *compiler, BranchLabels labels)=0
static CompilerState & Current()
const Object & value() const
Definition il.h:4212
void EmitMoveToLocation(FlowGraphCompiler *compiler, const Location &destination, Register tmp=kNoRegister, intptr_t pair_index=0)
static intptr_t num_variables_offset()
Definition object.h:7386
static intptr_t InstanceSize()
Definition object.h:7419
Value * type_arguments() const
Definition il.h:7806
virtual Value * num_elements() const
Definition il.h:7807
virtual Representation representation() const
Definition il.h:3483
static constexpr intptr_t kNone
Definition deopt_id.h:27
Value * value() const
Definition il.h:9053
MethodRecognizer::Kind op_kind() const
Definition il.h:9055
MethodRecognizer::Kind recognized_kind() const
Definition il.h:10012
Value * value() const
Definition il.h:10059
bool is_null_aware() const
Definition il.h:5292
virtual Representation representation() const
Definition il.h:10283
intptr_t index() const
Definition il.h:10281
void EmitReturnMoves(FlowGraphCompiler *compiler, const Register temp0, const Register temp1)
Definition il.cc:7633
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(FfiCallInstr, VariadicDefinition, FIELD_LIST) private void EmitParamMoves(FlowGraphCompiler *compiler, const Register saved_fp, const Register temp0, const Register temp1)
Definition il.cc:7421
static bool CanExecuteGeneratedCodeInSafepoint()
Definition il.h:6093
intptr_t TargetAddressIndex() const
Definition il.h:6051
static intptr_t guarded_cid_offset()
Definition object.h:4642
static intptr_t static_type_exactness_state_offset()
Definition object.h:4624
bool is_nullable() const
Definition object.cc:11821
@ kUnknownFixedLength
Definition object.h:4701
@ kUnknownLengthOffset
Definition object.h:4700
@ kNoFixedLength
Definition object.h:4702
bool needs_length_check() const
Definition object.h:4670
static intptr_t guarded_list_length_in_object_offset_offset()
Definition object.h:4666
StaticTypeExactnessState static_type_exactness_state() const
Definition object.h:4606
intptr_t guarded_cid() const
Definition object.cc:11800
static intptr_t is_nullable_offset()
Definition object.h:4739
static intptr_t guarded_list_length_offset()
Definition object.h:4656
ParallelMoveInstr * parallel_move() const
Definition il.h:3717
BlockEntryInstr * block() const
Definition il.h:3692
bool HasParallelMove() const
Definition il.h:3719
JoinEntryInstr * successor() const
Definition il.h:3695
FunctionEntryInstr * normal_entry() const
Definition il.h:1986
OsrEntryInstr * osr_entry() const
Definition il.h:1992
const Field & field() const
Definition il.h:6476
Value * value() const
Definition il.h:6474
Value * value() const
Definition il.h:9101
Value * value() const
Definition il.h:9141
ComparisonInstr * comparison() const
Definition il.h:5434
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.h:3789
const AbstractType & type() const
Definition il.h:7245
Environment * env() const
Definition il.h:1209
virtual LocationSummary * MakeLocationSummary(Zone *zone, bool is_optimizing) const =0
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition il.h:1207
void InitializeLocationSummary(Zone *zone, bool optimizing)
Definition il.h:1196
virtual Representation representation() const
Definition il.h:1254
LocationSummary * locs()
Definition il.h:1186
InstructionSource source() const
Definition il.h:1002
intptr_t deopt_id() const
Definition il.h:987
static LocationSummary * MakeCallSummary(Zone *zone, const Instruction *instr, LocationSummary *locs=nullptr)
Value * value() const
Definition il.h:9930
Value * value() const
Definition il.h:10990
Representation to() const
Definition il.h:10993
Representation from() const
Definition il.h:10992
const RuntimeEntry & TargetFunction() const
Definition il.cc:7229
MethodRecognizer::Kind recognized_kind() const
Definition il.h:10209
static constexpr intptr_t kDoubleTempIndex
Definition il.h:10243
ObjectStore * object_store() const
Definition isolate.h:505
static IsolateGroup * Current()
Definition isolate.h:534
intptr_t TargetAddressIndex() const
Definition il.h:6149
void EmitParamMoves(FlowGraphCompiler *compiler, Register saved_fp, Register temp0)
Definition il.cc:8042
LocationSummary * MakeLocationSummaryInternal(Zone *zone, const RegList temps) const
Definition il.cc:7964
intptr_t index_scale() const
Definition il.h:6851
Value * index() const
Definition il.h:6849
bool can_pack_into_smi() const
Definition il.h:6858
intptr_t element_count() const
Definition il.h:6856
bool IsExternal() const
Definition il.h:6844
intptr_t class_id() const
Definition il.h:6855
intptr_t class_id() const
Definition il.h:6759
bool IsUntagged() const
Definition il.h:6752
Value * array() const
Definition il.h:6756
intptr_t index_scale() const
Definition il.h:6758
Representation representation() const
Definition il.h:6775
Value * index() const
Definition il.h:6757
Value * index() const
Definition il.h:3109
virtual Representation RequiredInputRepresentation(intptr_t index) const
Definition il.h:3096
intptr_t offset() const
Definition il.h:3111
Register base_reg() const
Definition il.h:3110
virtual Representation representation() const
Definition il.h:3107
const LocalVariable & local() const
Definition il.h:5765
Location temp(intptr_t index) const
Definition locations.h:882
Location out(intptr_t index) const
Definition locations.h:903
static LocationSummary * Make(Zone *zone, intptr_t input_count, Location out, ContainsCall contains_call)
Definition locations.cc:187
void set_temp(intptr_t index, Location loc)
Definition locations.h:894
void set_out(intptr_t index, Location loc)
Definition locations.cc:232
bool always_calls() const
Definition locations.h:918
Location in(intptr_t index) const
Definition locations.h:866
void set_in(intptr_t index, Location loc)
Definition locations.cc:205
static Location StackSlot(intptr_t stack_index, Register base)
Definition locations.h:447
static Location NoLocation()
Definition locations.h:387
static Location SameAsFirstInput()
Definition locations.h:382
static Location Pair(Location first, Location second)
Definition locations.cc:271
Register reg() const
Definition locations.h:404
static Location FpuRegisterLocation(FpuRegister reg)
Definition locations.h:410
static Location WritableRegister()
Definition locations.h:376
static Location RegisterLocation(Register reg)
Definition locations.h:398
static Location PrefersRegister()
Definition locations.h:358
static Location Any()
Definition locations.h:352
PairLocation * AsPairLocation() const
Definition locations.cc:280
static Location RequiresRegister()
Definition locations.h:365
static Location RequiresFpuRegister()
Definition locations.h:369
FpuRegister fpu_reg() const
Definition locations.h:416
static Location Constant(const ConstantInstr *obj, int pair_index=0)
Definition locations.h:294
Value * right() const
Definition il.h:8922
intptr_t result_cid() const
Definition il.h:8924
Value * left() const
Definition il.h:8921
MethodRecognizer::Kind op_kind() const
Definition il.h:8919
Value * length() const
Definition il.h:3193
bool unboxed_inputs() const
Definition il.h:3198
Value * src_start() const
Definition il.h:3191
void EmitLoopCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, Register length_reg, compiler::Label *done, compiler::Label *copy_forwards=nullptr)
void PrepareLengthRegForLoop(FlowGraphCompiler *compiler, Register length_reg, compiler::Label *done)
Value * dest_start() const
Definition il.h:3192
static intptr_t value_offset()
Definition object.h:10053
virtual Representation representation() const
Definition il.h:3369
Value * value() const
Definition il.h:3359
Location location() const
Definition il.h:3356
static int ComputeArgcTag(const Function &function)
bool is_auto_scope() const
Definition il.h:5977
bool is_bootstrap_native() const
Definition il.h:5976
const Function & function() const
Definition il.h:5974
NativeFunction native_c_function() const
Definition il.h:5975
bool link_lazily() const
Definition il.h:5978
static uword LinkNativeCallEntry()
static Object & Handle()
Definition object.h:407
static Object & ZoneHandle()
Definition object.h:419
static intptr_t data_offset()
Definition object.h:10533
Location At(intptr_t i) const
Definition locations.h:618
static bool Overlaps(Range *range, intptr_t min, intptr_t max)
static bool OnlyLessThanOrEqualTo(Range *range, intptr_t value)
static bool IsWithin(const Range *range, int64_t min, int64_t max)
static bool IsPositive(Range *range)
static bool CanBeZero(Range *range)
bool IsPositive() const
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ShiftIntegerOpInstr, BinaryIntegerOpInstr, FIELD_LIST) protected bool IsShiftCountInRange(int64_t max=kShiftCountLimit) const
Definition il.cc:2103
Range * shift_range() const
Definition il.h:9607
Kind kind() const
Definition il.h:11304
Value * value() const
Definition il.h:9904
static constexpr intptr_t kBits
Definition object.h:9965
static SmiPtr New(intptr_t value)
Definition object.h:9985
static constexpr intptr_t kMaxValue
Definition object.h:9966
static intptr_t RawValue(intptr_t value)
Definition object.h:10001
const char * message() const
Definition il.h:3663
bool ShouldEmitStoreBarrier() const
Definition il.h:7045
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.cc:6932
Value * value() const
Definition il.h:7039
Value * array() const
Definition il.h:7037
intptr_t class_id() const
Definition il.h:7042
bool IsUntagged() const
Definition il.h:7076
intptr_t index_scale() const
Definition il.h:7041
Value * index() const
Definition il.h:7038
Value * value() const
Definition il.h:5914
const LocalVariable & local() const
Definition il.h:5913
const Field & field() const
Definition il.h:6685
Value * value() const
Definition il.h:6686
bool needs_number_check() const
Definition il.h:5107
Value * str() const
Definition il.h:6923
static intptr_t length_offset()
Definition object.h:10193
static CodePtr GetAllocationStubForClass(const Class &cls)
Definition stub_code.cc:174
static constexpr int kNullCharCodeSymbolOffset
Definition symbols.h:604
intptr_t ArgumentCount() const
Definition il.h:4568
ArrayPtr GetArgumentsDescriptor() const
Definition il.h:4599
virtual intptr_t InputCount() const
Definition il.h:2737
const ZoneGrowableArray< intptr_t > & cid_results() const
Definition il.h:5185
static intptr_t stack_limit_offset()
Definition thread.h:401
static intptr_t global_object_pool_offset()
Definition thread.h:840
virtual Representation representation() const
Definition il.h:9793
Value * value() const
Definition il.h:9780
Token::Kind op_kind() const
Definition il.h:9781
Value * value() const
Definition il.h:9192
Token::Kind op_kind() const
Definition il.h:9193
virtual Representation representation() const
Definition il.h:8655
Value * value() const
Definition il.h:8630
bool is_truncating() const
Definition il.h:8724
virtual Representation representation() const
Definition il.h:4270
bool IsScanFlagsUnboxed() const
Definition il.cc:7188
static T Abs(T x)
Definition utils.h:34
static void CalculateMagicAndShiftForDivRem(int64_t divisor, int64_t *magic, int64_t *shift)
Definition utils.cc:39
static constexpr T Maximum(T x, T y)
Definition utils.h:26
static constexpr int ShiftForPowerOfTwo(T x)
Definition utils.h:66
static T Minimum(T x, T y)
Definition utils.h:21
static T AddWithWrapAround(T a, T b)
Definition utils.h:416
static constexpr int CountOneBits64(uint64_t x)
Definition utils.h:133
static constexpr size_t HighestBit(int64_t v)
Definition utils.h:170
static constexpr bool IsPowerOfTwo(T x)
Definition utils.h:61
bool BindsToConstant() const
Definition il.cc:1181
Definition * definition() const
Definition il.h:103
CompileType * Type()
Value(Definition *definition)
Definition il.h:95
intptr_t InputCount() const
Definition il.h:2776
static Address AddressRIPRelative(int32_t disp)
void static bool EmittingComments()
Address ElementAddressForRegIndex(bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool index_unboxed, Register array, Register index)
static Address VMTagAddress()
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool *needs_base=nullptr)
static bool IsSafe(const Object &object)
#define UNIMPLEMENTED
#define ASSERT(E)
SkBitmap source
Definition examples.cpp:28
#define FATAL(error)
FlutterSemanticsFlag flag
FlutterSemanticsFlag flags
uint8_t value
GAsyncResult * result
uint32_t * target
const char * name
Definition fuchsia.cc:50
int argument_count
Definition fuchsia.cc:52
static float max(float r, float g, float b)
Definition hsl.cpp:49
static float min(float r, float g, float b)
Definition hsl.cpp:48
#define R(r)
#define CASE(Arity, Mask, Name, Args, Result)
#define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name)
Definition il.h:11813
size_t length
#define DEFINE_BACKEND(Name, Args)
const intptr_t kResultIndex
Definition marshaller.h:28
bool IsSmi(int64_t v)
bool HasIntegerValue(const dart::Object &object, int64_t *value)
Location LocationAnyOrConstant(Value *value)
Definition locations.cc:357
Location LocationRegisterOrConstant(Value *value)
Definition locations.cc:289
const intptr_t kSmiBits
Definition globals.h:24
const Register kWriteBarrierSlotReg
@ TIMES_COMPRESSED_WORD_SIZE
const Register THR
static Condition InvertCondition(Condition c)
static constexpr int kSavedCallerPcSlotFromFp
bool IsTypedDataBaseClassId(intptr_t index)
Definition class_id.h:429
constexpr intptr_t kBitsPerWord
Definition globals.h:514
static bool IsSmiValue(Value *val, intptr_t *int_val)
const Register kExceptionObjectReg
const Register kWriteBarrierObjectReg
constexpr int32_t kMinInt32
Definition globals.h:482
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition constants.h:85
constexpr intptr_t kIntptrMin
Definition globals.h:556
int32_t classid_t
Definition globals.h:524
static const ClassId kLastErrorCid
Definition class_id.h:311
@ kIllegalCid
Definition class_id.h:214
@ kNullCid
Definition class_id.h:252
@ kDynamicCid
Definition class_id.h:253
Representation
Definition locations.h:66
constexpr intptr_t kSimd128Size
Definition globals.h:459
const FpuRegister FpuTMP
const Register CALLEE_SAVED_TEMP
@ kHeapObjectTag
static const ClassId kFirstErrorCid
Definition class_id.h:310
uintptr_t uword
Definition globals.h:501
const Register CODE_REG
@ GREATER_EQUAL
@ UNSIGNED_GREATER
@ kInvalidCondition
@ UNSIGNED_GREATER_EQUAL
@ NOT_CARRY
@ NO_OVERFLOW
@ BELOW_EQUAL
@ PARITY_ODD
@ UNSIGNED_LESS
@ ABOVE_EQUAL
@ PARITY_EVEN
void RegisterTypeArgumentsUse(const Function &function, TypeUsageInfo *type_usage_info, const Class &klass, Definition *type_arguments)
static constexpr int kParamEndSlotFromFp
const Register ARGS_DESC_REG
bool IsClampedTypedDataBaseClassId(intptr_t index)
Definition class_id.h:461
@ kNumberOfCpuRegisters
@ kNoRegister
Location LocationFixedRegisterOrConstant(Value *value, Register reg)
Definition locations.cc:339
const int kNumberOfFpuRegisters
bool IsExternalPayloadClassId(classid_t cid)
Definition class_id.h:472
constexpr RegList kDartAvailableCpuRegs
static void USE(T &&)
Definition globals.h:618
constexpr intptr_t kInt32Size
Definition globals.h:450
const Register TMP
const Register FPREG
static constexpr intptr_t kCompressedWordSize
Definition globals.h:42
static constexpr int kPcMarkerSlotFromFp
const Register FUNCTION_REG
const Register IC_DATA_REG
constexpr int32_t kMaxInt32
Definition globals.h:483
compiler::Address LocationToStackSlotAddress(Location loc)
Definition locations.cc:365
constexpr intptr_t kWordSize
Definition globals.h:509
Location LocationWritableRegisterOrConstant(Value *value)
Definition locations.cc:314
static bool IsConstant(Definition *def, int64_t *val)
Definition loops.cc:123
constexpr intptr_t kFloatSize
Definition globals.h:457
static constexpr Representation kUnboxedIntPtr
Definition locations.h:176
const Register PP
QRegister FpuRegister
const Register kStackTraceObjectReg
static int8_t data[kExtLength]
constexpr intptr_t kDoubleSize
Definition globals.h:456
Location LocationFixedRegisterOrSmiConstant(Value *value, Register reg)
Definition locations.cc:348
static ScaleFactor ToScaleFactor(intptr_t index_scale, bool index_unboxed)
Definition constants.h:95
Location LocationRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
Definition locations.cc:297
constexpr intptr_t kBitsPerInt64
Definition globals.h:467
const Register SPREG
ByteRegister ByteRegisterOf(Register reg)
call(args)
Definition dom.py:159
Definition __init__.py:1
dst
Definition cp.py:12
Definition ref_ptr.h:256
const Scalar scale
Point offset
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTempReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kObjectReg
static constexpr Representation NativeRepresentation(Representation rep)
Definition il.h:8456
static constexpr intptr_t kBoolVsNullMask
static constexpr intptr_t kBoolValueMask
static constexpr size_t ValueSize(Representation rep)
Definition locations.h:112
static constexpr bool IsUnboxedInteger(Representation rep)
Definition locations.h:92
static compiler::OperandSize OperandSize(Representation rep)
Definition locations.cc:16
static constexpr bool IsUnboxed(Representation rep)
Definition locations.h:101
static bool IsUnsignedInteger(Representation rep)
Definition locations.h:126
static Representation RepresentationOfArrayElement(classid_t cid)
Definition locations.cc:79
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
#define ASSERT_BOOL_FALSE_FOLLOWS_BOOL_TRUE()
Definition thread.h:204
#define kNegInfinity
Definition globals.h:66