Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
il_ia32.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "platform/globals.h"
6#include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32.
7#if defined(TARGET_ARCH_IA32)
8
10
20#include "vm/dart_entry.h"
21#include "vm/instructions.h"
22#include "vm/object_store.h"
23#include "vm/parser.h"
24#include "vm/stack_frame.h"
25#include "vm/stub_code.h"
26#include "vm/symbols.h"
27
28#define __ compiler->assembler()->
29#define Z (compiler->zone())
30
31namespace dart {
32
33// Generic summary for call instructions that have all arguments pushed
34// on the stack and return the result in a fixed register EAX.
35LocationSummary* Instruction::MakeCallSummary(Zone* zone,
36 const Instruction* instr,
37 LocationSummary* locs) {
38 // This is unused on ia32.
39 ASSERT(locs == nullptr);
40 ASSERT(instr->representation() == kTagged);
41 const intptr_t kNumInputs = 0;
42 const intptr_t kNumTemps = 0;
43 LocationSummary* result = new (zone)
44 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
46 return result;
47}
48
49DEFINE_BACKEND(LoadIndexedUnsafe, (Register out, Register index)) {
50 ASSERT(instr->RequiredInputRepresentation(0) == kTagged); // It is a Smi.
51 ASSERT(instr->representation() == kTagged);
52 __ movl(out, compiler::Address(instr->base_reg(), index, TIMES_2,
53 instr->offset()));
54
55 ASSERT(kSmiTag == 0);
56 ASSERT(kSmiTagSize == 1);
57}
58
59DEFINE_BACKEND(StoreIndexedUnsafe,
60 (NoLocation, Register index, Register value)) {
61 ASSERT(instr->RequiredInputRepresentation(
62 StoreIndexedUnsafeInstr::kIndexPos) == kTagged); // It is a Smi.
63 __ movl(compiler::Address(instr->base_reg(), index, TIMES_2, instr->offset()),
64 value);
65
66 ASSERT(kSmiTag == 0);
67 ASSERT(kSmiTagSize == 1);
68}
69
70DEFINE_BACKEND(TailCall,
71 (NoLocation,
72 Fixed<Register, ARGS_DESC_REG>,
73 Temp<Register> temp)) {
74 __ LoadObject(CODE_REG, instr->code());
75 __ LeaveFrame(); // The arguments are still on the stack.
76 __ movl(temp, compiler::FieldAddress(CODE_REG, Code::entry_point_offset()));
77 __ jmp(temp);
78}
79
80LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone,
81 bool opt) const {
82 // The compiler must optimize any function that includes a MemoryCopy
83 // instruction that uses typed data cids, since extracting the payload address
84 // from views is done in a compiler pass after all code motion has happened.
85 ASSERT((!IsTypedDataBaseClassId(src_cid_) &&
86 !IsTypedDataBaseClassId(dest_cid_)) ||
87 opt);
88 const bool remove_loop =
90 const intptr_t kNumInputs = 5;
91 const intptr_t kNumTemps = remove_loop ? 1 : 0;
92 LocationSummary* locs = new (zone)
93 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
94 // Unlike other architectures, IA32 don't have enough registers to allocate
95 // temps to hold the payload address, so instead these the rep mov input
96 // registers ESI and EDI, respectively... except ESI is THR, so use another
97 // writable register for the input and save/restore ESI internally as needed.
98 locs->set_in(kSrcPos, Location::WritableRegister());
100 const bool needs_writable_inputs =
101 (((element_size_ == 1) && !unboxed_inputs_) ||
102 ((element_size_ == 16) && unboxed_inputs_));
103 locs->set_in(kSrcStartPos,
104 needs_writable_inputs
107 locs->set_in(kDestStartPos,
108 needs_writable_inputs
111 if (remove_loop) {
112 locs->set_in(
115 length()->definition()->OriginalDefinition()->AsConstant()));
116 // Needs a valid ByteRegister for single byte moves, and a temp register
117 // for more than one move. We could potentially optimize the 2 and 4 byte
118 // single moves to overwrite the src_reg.
119 locs->set_temp(0, Location::RegisterLocation(ECX));
120 } else {
122 }
123 return locs;
124}
125
126static inline intptr_t SizeOfMemoryCopyElements(intptr_t element_size) {
127 return Utils::Minimum<intptr_t>(element_size, compiler::target::kWordSize);
128}
129
131 Register length_reg,
132 compiler::Label* done) {
133 const intptr_t mov_size = SizeOfMemoryCopyElements(element_size_);
134 // We want to convert the value in length_reg to an unboxed length in
135 // terms of mov_size-sized elements.
136 const intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) -
137 Utils::ShiftForPowerOfTwo(mov_size) -
139 if (shift < 0) {
140 ASSERT_EQUAL(shift, -kSmiTagShift);
141 __ SmiUntag(length_reg);
142 } else if (shift > 0) {
143 __ shll(length_reg, compiler::Immediate(shift));
144 }
145}
146
147void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
148 Register dest_reg,
149 Register src_reg,
150 Register length_reg,
151 compiler::Label* done,
152 compiler::Label* copy_forwards) {
153 const intptr_t mov_size = SizeOfMemoryCopyElements(element_size_);
154 const bool reversed = copy_forwards != nullptr;
155 if (reversed) {
156 // Avoid doing the extra work to prepare for the rep mov instructions
157 // if the length to copy is zero.
158 __ BranchIfZero(length_reg, done);
159 // Verify that the overlap actually exists by checking to see if
160 // the first element in dest <= the last element in src.
161 const ScaleFactor scale = ToScaleFactor(mov_size, /*index_unboxed=*/true);
162 __ leal(ESI, compiler::Address(src_reg, length_reg, scale, -mov_size));
163 __ CompareRegisters(dest_reg, ESI);
164 __ BranchIf(UNSIGNED_GREATER, copy_forwards,
166 // ESI already has the right address, so we just need to adjust dest_reg
167 // appropriately.
168 __ leal(dest_reg,
169 compiler::Address(dest_reg, length_reg, scale, -mov_size));
170 __ std();
171 } else {
172 // Move the start of the src array into ESI before the string operation.
173 __ movl(ESI, src_reg);
174 }
175 switch (mov_size) {
176 case 1:
177 __ rep_movsb();
178 break;
179 case 2:
180 __ rep_movsw();
181 break;
182 case 4:
183 __ rep_movsd();
184 break;
185 default:
186 UNREACHABLE();
187 }
188 if (reversed) {
189 __ cld();
190 }
191}
192
193void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
194 classid_t array_cid,
195 Register array_reg,
196 Register payload_reg,
197 Representation array_rep,
198 Location start_loc) {
199 intptr_t offset = 0;
200 if (array_rep != kTagged) {
201 // Do nothing, array_reg already contains the payload address.
202 } else if (IsTypedDataBaseClassId(array_cid)) {
203 // The incoming array must have been proven to be an internal typed data
204 // object, where the payload is in the object and we can just offset.
205 ASSERT_EQUAL(array_rep, kTagged);
206 offset = compiler::target::TypedData::payload_offset() - kHeapObjectTag;
207 } else {
208 ASSERT_EQUAL(array_rep, kTagged);
209 ASSERT(!IsExternalPayloadClassId(array_cid));
210 switch (array_cid) {
211 case kOneByteStringCid:
212 offset =
213 compiler::target::OneByteString::data_offset() - kHeapObjectTag;
214 break;
215 case kTwoByteStringCid:
216 offset =
217 compiler::target::TwoByteString::data_offset() - kHeapObjectTag;
218 break;
219 default:
220 UNREACHABLE();
221 break;
222 }
223 }
224 ASSERT(start_loc.IsRegister() || start_loc.IsConstant());
225 if (start_loc.IsConstant()) {
226 const auto& constant = start_loc.constant();
227 ASSERT(constant.IsInteger());
228 const int64_t start_value = Integer::Cast(constant).AsInt64Value();
229 const intptr_t add_value = Utils::AddWithWrapAround(
230 Utils::MulWithWrapAround<intptr_t>(start_value, element_size_), offset);
231 __ leal(payload_reg, compiler::Address(array_reg, add_value));
232 return;
233 }
234 // Note that start_reg must be writable in the special cases below.
235 const Register start_reg = start_loc.reg();
236 bool index_unboxed = unboxed_inputs_;
237 // Both special cases below assume that Smis are only shifted one bit.
239 if (element_size_ == 1 && !index_unboxed) {
240 // Shift the value to the right by tagging it as a Smi.
241 __ SmiUntag(start_reg);
242 index_unboxed = true;
243 } else if (element_size_ == 16 && index_unboxed) {
244 // Can't use TIMES_16 on X86, so instead pre-shift the value to reduce the
245 // scaling needed in the leaq instruction.
246 __ SmiTag(start_reg);
247 index_unboxed = false;
248 }
249 auto const scale = ToScaleFactor(element_size_, index_unboxed);
250 __ leal(payload_reg, compiler::Address(array_reg, start_reg, scale, offset));
251}
252
253LocationSummary* CalculateElementAddressInstr::MakeLocationSummary(
254 Zone* zone,
255 bool opt) const {
256 const intptr_t kNumInputs = 3;
257 const intptr_t kNumTemps = 0;
258 auto* const summary = new (zone)
259 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
260
261 summary->set_in(kBasePos, Location::RequiresRegister());
262 // Only use a Smi constant for the index if multiplying it by the index
263 // scale would be an int32 constant.
264 const intptr_t scale_shift = Utils::ShiftForPowerOfTwo(index_scale());
266 index(), kMinInt32 >> scale_shift,
267 kMaxInt32 >> scale_shift));
269 summary->set_out(0, Location::RequiresRegister());
270
271 return summary;
272}
273
274void CalculateElementAddressInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
275 const Register base_reg = locs()->in(kBasePos).reg();
276 const Location& index_loc = locs()->in(kIndexPos);
277 const Location& offset_loc = locs()->in(kOffsetPos);
278 const Register result_reg = locs()->out(0).reg();
279
280 if (index_loc.IsConstant()) {
281 if (offset_loc.IsConstant()) {
282 ASSERT_EQUAL(Smi::Cast(index_loc.constant()).Value(), 0);
283 ASSERT(Smi::Cast(offset_loc.constant()).Value() != 0);
284 // No index involved at all.
285 const int32_t offset_value = Smi::Cast(offset_loc.constant()).Value();
286 __ leal(result_reg, compiler::Address(base_reg, offset_value));
287 } else {
288 // Don't need wrap-around as the index is constant only if multiplying
289 // it by the scale is an int32.
290 const int32_t scaled_index =
291 Smi::Cast(index_loc.constant()).Value() * index_scale();
292 __ leal(result_reg, compiler::Address(base_reg, offset_loc.reg(), TIMES_1,
293 scaled_index));
294 }
295 } else {
296 Register index_reg = index_loc.reg();
297 bool index_unboxed = RepresentationUtils::IsUnboxedInteger(
299 ASSERT(index_unboxed);
300 if (index_scale() == 16) {
302 // A ScaleFactor of TIMES_16 is invalid for x86, so box the index as a Smi
303 // (using the result register to store it to avoid allocating a writable
304 // register for the index) to reduce the ScaleFactor to TIMES_8.
305 __ MoveAndSmiTagRegister(result_reg, index_reg);
306 index_reg = result_reg;
307 index_unboxed = false;
308 }
309 auto const scale = ToScaleFactor(index_scale(), index_unboxed);
310 if (offset_loc.IsConstant()) {
311 const int32_t offset_value = Smi::Cast(offset_loc.constant()).Value();
312 __ leal(result_reg,
313 compiler::Address(base_reg, index_reg, scale, offset_value));
314 } else {
315 // compiler::Address(reg, reg, scale, reg) is invalid, so have to do
316 // as a two-part operation.
317 __ leal(result_reg, compiler::Address(base_reg, index_reg, scale,
318 /*disp=*/0));
319 __ AddRegisters(result_reg, offset_loc.reg());
320 }
321 }
322}
323
324LocationSummary* MoveArgumentInstr::MakeLocationSummary(Zone* zone,
325 bool opt) const {
326 const intptr_t kNumInputs = 1;
327 const intptr_t kNumTemps = 0;
328 LocationSummary* locs = new (zone)
329 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
330 ASSERT(representation() == kTagged);
331 locs->set_in(0, LocationRegisterOrConstant(value()));
332 return locs;
333}
334
335void MoveArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
336 ASSERT(compiler->is_optimizing());
337
338 Location value = locs()->in(0);
339 const compiler::Address dst = LocationToStackSlotAddress(location());
340 if (value.IsConstant()) {
341 __ Store(value.constant(), dst);
342 } else {
343 ASSERT(value.IsRegister());
344 __ Store(value.reg(), dst);
345 }
346}
347
348LocationSummary* DartReturnInstr::MakeLocationSummary(Zone* zone,
349 bool opt) const {
350 const intptr_t kNumInputs = 1;
351 const intptr_t kNumTemps = 0;
352 LocationSummary* locs = new (zone)
353 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
354 ASSERT(representation() == kTagged);
356 return locs;
357}
358
359// Attempt optimized compilation at return instruction instead of at the entry.
360// The entry needs to be patchable, no inlined objects are allowed in the area
361// that will be overwritten by the patch instruction: a jump).
362void DartReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
363 Register result = locs()->in(0).reg();
364 ASSERT(result == EAX);
365
366 if (compiler->parsed_function().function().IsAsyncFunction() ||
367 compiler->parsed_function().function().IsAsyncGenerator()) {
368 ASSERT(compiler->flow_graph().graph_entry()->NeedsFrame());
369 const Code& stub = GetReturnStub(compiler);
370 compiler->EmitJumpToStub(stub);
371 return;
372 }
373
374 if (!compiler->flow_graph().graph_entry()->NeedsFrame()) {
375 __ ret();
376 return;
377 }
378
379#if defined(DEBUG)
380 __ Comment("Stack Check");
381 compiler::Label done;
382 const intptr_t fp_sp_dist =
383 (compiler::target::frame_layout.first_local_from_fp + 1 -
384 compiler->StackSize()) *
385 kWordSize;
386 ASSERT(fp_sp_dist <= 0);
387 __ movl(EDI, ESP);
388 __ subl(EDI, EBP);
389 __ cmpl(EDI, compiler::Immediate(fp_sp_dist));
391 __ int3();
392 __ Bind(&done);
393#endif
394 __ LeaveDartFrame();
395 __ ret();
396}
397
398// Keep in sync with NativeEntryInstr::EmitNativeCode.
399void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
400 EmitReturnMoves(compiler);
401
402 bool return_in_st0 = false;
403 if (marshaller_.Location(compiler::ffi::kResultIndex)
404 .payload_type()
405 .IsFloat()) {
406 ASSERT(locs()->in(0).IsFpuRegister() && locs()->in(0).fpu_reg() == XMM0);
407 return_in_st0 = true;
408 }
409
410 __ LeaveDartFrame();
411
412 // EDI is the only sane choice for a temporary register here because:
413 //
414 // EDX is used for large return values.
415 // ESI == THR.
416 // Could be EBX or ECX, but that would make code below confusing.
417 const Register tmp = EDI;
418
419 // Pop dummy return address.
420 __ popl(tmp);
421
422 // Anything besides the return register(s!). Callee-saved registers will be
423 // restored later.
424 const Register vm_tag_reg = EBX;
425 const Register old_exit_frame_reg = ECX;
426 const Register old_exit_through_ffi_reg = tmp;
427
428 __ popl(old_exit_frame_reg);
429 __ popl(vm_tag_reg); /* old_exit_through_ffi, we still need to use tmp. */
430
431 // Restore top_resource.
432 __ popl(tmp);
433 __ movl(
434 compiler::Address(THR, compiler::target::Thread::top_resource_offset()),
435 tmp);
436
437 __ movl(old_exit_through_ffi_reg, vm_tag_reg);
438 __ popl(vm_tag_reg);
439
440 // Reset the exit frame info to old_exit_frame_reg *before* entering the
441 // safepoint. The trampoline that called us will enter the safepoint on our
442 // behalf.
443 __ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
444 old_exit_through_ffi_reg,
445 /*enter_safepoint=*/false);
446
447 // Move XMM0 into ST0 if needed.
448 if (return_in_st0) {
449 if (marshaller_.Location(compiler::ffi::kResultIndex)
450 .payload_type()
451 .SizeInBytes() == 8) {
452 __ movsd(compiler::Address(SPREG, -8), XMM0);
453 __ fldl(compiler::Address(SPREG, -8));
454 } else {
455 __ movss(compiler::Address(SPREG, -4), XMM0);
456 __ flds(compiler::Address(SPREG, -4));
457 }
458 }
459
460 // Restore C++ ABI callee-saved registers.
461 __ popl(EDI);
462 __ popl(ESI);
463 __ popl(EBX);
464
465#if defined(DART_TARGET_OS_FUCHSIA) && defined(USING_SHADOW_CALL_STACK)
466#error Unimplemented
467#endif
468
469 // Leave the entry frame.
470 __ LeaveFrame();
471
472 // We deal with `ret 4` for structs in the JIT callback trampolines.
473 __ ret();
474}
475
476LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone,
477 bool opt) const {
478 const intptr_t kNumInputs = 0;
479 const intptr_t stack_index =
480 compiler::target::frame_layout.FrameSlotForVariable(&local());
481 return LocationSummary::Make(zone, kNumInputs,
482 Location::StackSlot(stack_index, FPREG),
484}
485
486void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
487 ASSERT(!compiler->is_optimizing());
488 // Nothing to do.
489}
490
491LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone,
492 bool opt) const {
493 const intptr_t kNumInputs = 1;
494 return LocationSummary::Make(zone, kNumInputs, Location::SameAsFirstInput(),
496}
497
498void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
499 Register value = locs()->in(0).reg();
500 Register result = locs()->out(0).reg();
501 ASSERT(result == value); // Assert that register assignment is correct.
502 __ movl(compiler::Address(
503 EBP, compiler::target::FrameOffsetInBytesForVariable(&local())),
504 value);
505}
506
507LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
508 bool opt) const {
509 const intptr_t kNumInputs = 0;
510 return LocationSummary::Make(zone, kNumInputs,
512 ? Location::Constant(this)
513 : Location::RequiresRegister(),
514 LocationSummary::kNoCall);
515}
516
517void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
518 // The register allocator drops constant definitions that have no uses.
519 Location out = locs()->out(0);
520 ASSERT(out.IsRegister() || out.IsConstant() || out.IsInvalid());
521 if (out.IsRegister()) {
522 Register result = out.reg();
523 __ LoadObjectSafely(result, value());
524 }
525}
526
527void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
528 const Location& destination,
529 Register tmp,
530 intptr_t pair_index) {
531 if (destination.IsRegister()) {
532 if (RepresentationUtils::IsUnboxedInteger(representation())) {
533 int64_t v;
534 const bool ok = compiler::HasIntegerValue(value_, &v);
536 if (value_.IsSmi() &&
537 RepresentationUtils::IsUnsignedInteger(representation())) {
538 // If the value is negative, then the sign bit was preserved during
539 // Smi untagging, which means the resulting value may be unexpected.
540 ASSERT(v >= 0);
541 }
542 __ movl(destination.reg(),
543 compiler::Immediate(pair_index == 0 ? Utils::Low32Bits(v)
544 : Utils::High32Bits(v)));
545 } else {
546 ASSERT(representation() == kTagged);
547 __ LoadObjectSafely(destination.reg(), value_);
548 }
549 } else if (destination.IsFpuRegister()) {
550 switch (representation()) {
551 case kUnboxedFloat:
552 __ LoadSImmediate(destination.fpu_reg(),
553 static_cast<float>(Double::Cast(value_).value()));
554 break;
555 case kUnboxedDouble: {
556 const double value_as_double = Double::Cast(value_).value();
557 uword addr = FindDoubleConstant(value_as_double);
558 if (addr == 0) {
559 __ pushl(EAX);
560 __ LoadObject(EAX, value_);
561 __ movsd(destination.fpu_reg(),
562 compiler::FieldAddress(EAX, Double::value_offset()));
563 __ popl(EAX);
564 } else if (Utils::DoublesBitEqual(value_as_double, 0.0)) {
565 __ xorps(destination.fpu_reg(), destination.fpu_reg());
566 } else {
567 __ movsd(destination.fpu_reg(), compiler::Address::Absolute(addr));
568 }
569 break;
570 }
571 case kUnboxedFloat64x2:
572 __ LoadQImmediate(destination.fpu_reg(),
573 Float64x2::Cast(value_).value());
574 break;
575 case kUnboxedFloat32x4:
576 __ LoadQImmediate(destination.fpu_reg(),
577 Float32x4::Cast(value_).value());
578 break;
579 case kUnboxedInt32x4:
580 __ LoadQImmediate(destination.fpu_reg(), Int32x4::Cast(value_).value());
581 break;
582 default:
583 UNREACHABLE();
584 }
585 } else if (destination.IsDoubleStackSlot()) {
586 const double value_as_double = Double::Cast(value_).value();
587 uword addr = FindDoubleConstant(value_as_double);
588 if (addr == 0) {
589 __ pushl(EAX);
590 __ LoadObject(EAX, value_);
591 __ movsd(FpuTMP, compiler::FieldAddress(EAX, Double::value_offset()));
592 __ popl(EAX);
593 } else if (Utils::DoublesBitEqual(value_as_double, 0.0)) {
594 __ xorps(FpuTMP, FpuTMP);
595 } else {
597 }
598 __ movsd(LocationToStackSlotAddress(destination), FpuTMP);
599 } else if (destination.IsQuadStackSlot()) {
600 switch (representation()) {
601 case kUnboxedFloat64x2:
602 __ LoadQImmediate(FpuTMP, Float64x2::Cast(value_).value());
603 break;
604 case kUnboxedFloat32x4:
605 __ LoadQImmediate(FpuTMP, Float32x4::Cast(value_).value());
606 break;
607 case kUnboxedInt32x4:
608 __ LoadQImmediate(FpuTMP, Int32x4::Cast(value_).value());
609 break;
610 default:
611 UNREACHABLE();
612 }
613 __ movups(LocationToStackSlotAddress(destination), FpuTMP);
614 } else {
615 ASSERT(destination.IsStackSlot());
616 if (RepresentationUtils::IsUnboxedInteger(representation())) {
617 int64_t v;
618 const bool ok = compiler::HasIntegerValue(value_, &v);
620 __ movl(LocationToStackSlotAddress(destination),
621 compiler::Immediate(pair_index == 0 ? Utils::Low32Bits(v)
622 : Utils::High32Bits(v)));
623 } else if (representation() == kUnboxedFloat) {
624 int32_t float_bits =
625 bit_cast<int32_t, float>(Double::Cast(value_).value());
626 __ movl(LocationToStackSlotAddress(destination),
627 compiler::Immediate(float_bits));
628 } else {
629 ASSERT(representation() == kTagged);
630 if (compiler::Assembler::IsSafeSmi(value_) || value_.IsNull()) {
631 __ movl(LocationToStackSlotAddress(destination),
632 compiler::Immediate(static_cast<int32_t>(value_.ptr())));
633 } else {
634 __ pushl(EAX);
635 __ LoadObjectSafely(EAX, value_);
636 __ movl(LocationToStackSlotAddress(destination), EAX);
637 __ popl(EAX);
638 }
639 }
640 }
641}
642
643LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone,
644 bool opt) const {
645 const bool is_unboxed_int =
648 compiler::target::kWordSize);
649 const intptr_t kNumInputs = 0;
650 const intptr_t kNumTemps =
651 (constant_address() == 0) && !is_unboxed_int ? 1 : 0;
652 LocationSummary* locs = new (zone)
653 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
654 if (representation() == kUnboxedDouble) {
655 locs->set_out(0, Location::RequiresFpuRegister());
656 } else {
657 ASSERT(is_unboxed_int);
658 locs->set_out(0, Location::RequiresRegister());
659 }
660 if (kNumTemps == 1) {
661 locs->set_temp(0, Location::RequiresRegister());
662 }
663 return locs;
664}
665
666void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
667 // The register allocator drops constant definitions that have no uses.
668 if (!locs()->out(0).IsInvalid()) {
669 EmitMoveToLocation(compiler, locs()->out(0));
670 }
671}
672
673LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone,
674 bool opt) const {
675 const intptr_t kNumInputs = 4;
676 const intptr_t kNumTemps = 0;
677 LocationSummary* summary = new (zone)
678 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
679 summary->set_in(kInstancePos,
683 summary->set_in(
688 summary->set_out(0, Location::SameAsFirstInput());
689 return summary;
690}
691
692void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
693 ASSERT(locs()->always_calls());
694
695 auto object_store = compiler->isolate_group()->object_store();
696 const auto& assert_boolean_stub =
697 Code::ZoneHandle(compiler->zone(), object_store->assert_boolean_stub());
698
699 compiler::Label done;
700 __ testl(
704 compiler->GenerateStubCall(source(), assert_boolean_stub,
705 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
706 deopt_id(), env());
707 __ Bind(&done);
708}
709
710static Condition TokenKindToIntCondition(Token::Kind kind) {
711 switch (kind) {
712 case Token::kEQ:
713 return EQUAL;
714 case Token::kNE:
715 return NOT_EQUAL;
716 case Token::kLT:
717 return LESS;
718 case Token::kGT:
719 return GREATER;
720 case Token::kLTE:
721 return LESS_EQUAL;
722 case Token::kGTE:
723 return GREATER_EQUAL;
724 default:
725 UNREACHABLE();
726 return OVERFLOW;
727 }
728}
729
730LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone,
731 bool opt) const {
732 const intptr_t kNumInputs = 2;
733 if (operation_cid() == kMintCid) {
734 const intptr_t kNumTemps = 0;
735 LocationSummary* locs = new (zone)
736 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
741 locs->set_out(0, Location::RequiresRegister());
742 return locs;
743 }
744 if (operation_cid() == kDoubleCid) {
745 const intptr_t kNumTemps = 0;
746 LocationSummary* locs = new (zone)
747 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
748 locs->set_in(0, Location::RequiresFpuRegister());
749 locs->set_in(1, Location::RequiresFpuRegister());
750 locs->set_out(0, Location::RequiresRegister());
751 return locs;
752 }
753 if (operation_cid() == kSmiCid || operation_cid() == kIntegerCid) {
754 const intptr_t kNumTemps = 0;
755 LocationSummary* locs = new (zone)
756 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
757 locs->set_in(0, LocationRegisterOrConstant(left()));
758 // Only one input can be a constant operand. The case of two constant
759 // operands should be handled by constant propagation.
760 // Only right can be a stack slot.
761 locs->set_in(1, locs->in(0).IsConstant()
764 locs->set_out(0, Location::RequiresRegister());
765 return locs;
766 }
767 UNREACHABLE();
768 return nullptr;
769}
770
771static void LoadValueCid(FlowGraphCompiler* compiler,
772 Register value_cid_reg,
773 Register value_reg,
774 compiler::Label* value_is_smi = nullptr) {
775 compiler::Label done;
776 if (value_is_smi == nullptr) {
777 __ movl(value_cid_reg, compiler::Immediate(kSmiCid));
778 }
779 __ testl(value_reg, compiler::Immediate(kSmiTagMask));
780 if (value_is_smi == nullptr) {
782 } else {
783 __ j(ZERO, value_is_smi);
784 }
785 __ LoadClassId(value_cid_reg, value_reg);
786 __ Bind(&done);
787}
788
789static Condition FlipCondition(Condition condition) {
790 switch (condition) {
791 case EQUAL:
792 return EQUAL;
793 case NOT_EQUAL:
794 return NOT_EQUAL;
795 case LESS:
796 return GREATER;
797 case LESS_EQUAL:
798 return GREATER_EQUAL;
799 case GREATER:
800 return LESS;
801 case GREATER_EQUAL:
802 return LESS_EQUAL;
803 case BELOW:
804 return ABOVE;
805 case BELOW_EQUAL:
806 return ABOVE_EQUAL;
807 case ABOVE:
808 return BELOW;
809 case ABOVE_EQUAL:
810 return BELOW_EQUAL;
811 default:
813 return EQUAL;
814 }
815}
816
817static void EmitBranchOnCondition(
818 FlowGraphCompiler* compiler,
819 Condition true_condition,
820 BranchLabels labels,
823 if (labels.fall_through == labels.false_label) {
824 // If the next block is the false successor, fall through to it.
825 __ j(true_condition, labels.true_label, jump_distance);
826 } else {
827 // If the next block is not the false successor, branch to it.
828 Condition false_condition = InvertCondition(true_condition);
829 __ j(false_condition, labels.false_label, jump_distance);
830
831 // Fall through or jump to the true successor.
832 if (labels.fall_through != labels.true_label) {
833 __ jmp(labels.true_label, jump_distance);
834 }
835 }
836}
837
838static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler,
839 const LocationSummary& locs,
840 Token::Kind kind,
841 BranchLabels labels) {
842 Location left = locs.in(0);
843 Location right = locs.in(1);
844 ASSERT(!left.IsConstant() || !right.IsConstant());
845
846 Condition true_condition = TokenKindToIntCondition(kind);
847
848 if (left.IsConstant()) {
849 __ CompareObject(right.reg(), left.constant());
850 true_condition = FlipCondition(true_condition);
851 } else if (right.IsConstant()) {
852 __ CompareObject(left.reg(), right.constant());
853 } else if (right.IsStackSlot()) {
855 } else {
856 __ cmpl(left.reg(), right.reg());
857 }
858 return true_condition;
859}
860
861static Condition EmitWordComparisonOp(FlowGraphCompiler* compiler,
862 const LocationSummary& locs,
863 Token::Kind kind,
864 BranchLabels labels) {
865 Location left = locs.in(0);
866 Location right = locs.in(1);
867 ASSERT(!left.IsConstant() || !right.IsConstant());
868
869 Condition true_condition = TokenKindToIntCondition(kind);
870
871 if (left.IsConstant()) {
872 __ CompareImmediate(
873 right.reg(),
874 static_cast<uword>(Integer::Cast(left.constant()).AsInt64Value()));
875 true_condition = FlipCondition(true_condition);
876 } else if (right.IsConstant()) {
877 __ CompareImmediate(
878 left.reg(),
879 static_cast<uword>(Integer::Cast(right.constant()).AsInt64Value()));
880 } else if (right.IsStackSlot()) {
882 } else {
883 __ cmpl(left.reg(), right.reg());
884 }
885 return true_condition;
886}
887
888static Condition EmitUnboxedMintEqualityOp(FlowGraphCompiler* compiler,
889 const LocationSummary& locs,
890 Token::Kind kind,
891 BranchLabels labels) {
893 PairLocation* left_pair = locs.in(0).AsPairLocation();
894 Register left1 = left_pair->At(0).reg();
895 Register left2 = left_pair->At(1).reg();
896 PairLocation* right_pair = locs.in(1).AsPairLocation();
897 Register right1 = right_pair->At(0).reg();
898 Register right2 = right_pair->At(1).reg();
899 compiler::Label done;
900 // Compare lower.
901 __ cmpl(left1, right1);
902 __ j(NOT_EQUAL, &done);
903 // Lower is equal, compare upper.
904 __ cmpl(left2, right2);
905 __ Bind(&done);
906 Condition true_condition = TokenKindToIntCondition(kind);
907 return true_condition;
908}
909
910static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler,
911 const LocationSummary& locs,
912 Token::Kind kind,
913 BranchLabels labels) {
914 PairLocation* left_pair = locs.in(0).AsPairLocation();
915 Register left1 = left_pair->At(0).reg();
916 Register left2 = left_pair->At(1).reg();
917 PairLocation* right_pair = locs.in(1).AsPairLocation();
918 Register right1 = right_pair->At(0).reg();
919 Register right2 = right_pair->At(1).reg();
920
921 Condition hi_cond = OVERFLOW, lo_cond = OVERFLOW;
922 switch (kind) {
923 case Token::kLT:
924 hi_cond = LESS;
925 lo_cond = BELOW;
926 break;
927 case Token::kGT:
928 hi_cond = GREATER;
929 lo_cond = ABOVE;
930 break;
931 case Token::kLTE:
932 hi_cond = LESS;
933 lo_cond = BELOW_EQUAL;
934 break;
935 case Token::kGTE:
936 hi_cond = GREATER;
937 lo_cond = ABOVE_EQUAL;
938 break;
939 default:
940 break;
941 }
942 ASSERT(hi_cond != OVERFLOW && lo_cond != OVERFLOW);
943 // Compare upper halves first.
944 __ cmpl(left2, right2);
945 __ j(hi_cond, labels.true_label);
946 __ j(FlipCondition(hi_cond), labels.false_label);
947
948 // If upper is equal, compare lower half.
949 __ cmpl(left1, right1);
950 return lo_cond;
951}
952
953static Condition TokenKindToDoubleCondition(Token::Kind kind) {
954 switch (kind) {
955 case Token::kEQ:
956 return EQUAL;
957 case Token::kNE:
958 return NOT_EQUAL;
959 case Token::kLT:
960 return BELOW;
961 case Token::kGT:
962 return ABOVE;
963 case Token::kLTE:
964 return BELOW_EQUAL;
965 case Token::kGTE:
966 return ABOVE_EQUAL;
967 default:
968 UNREACHABLE();
969 return OVERFLOW;
970 }
971}
972
973static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
974 const LocationSummary& locs,
975 Token::Kind kind,
976 BranchLabels labels) {
977 XmmRegister left = locs.in(0).fpu_reg();
978 XmmRegister right = locs.in(1).fpu_reg();
979
980 __ comisd(left, right);
981
982 Condition true_condition = TokenKindToDoubleCondition(kind);
983 compiler::Label* nan_result =
984 (true_condition == NOT_EQUAL) ? labels.true_label : labels.false_label;
985 __ j(PARITY_EVEN, nan_result);
986 return true_condition;
987}
988
989Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
990 BranchLabels labels) {
991 if (is_null_aware()) {
992 // Null-aware EqualityCompare instruction is only used in AOT.
993 UNREACHABLE();
994 }
995 if (operation_cid() == kSmiCid) {
996 return EmitSmiComparisonOp(compiler, *locs(), kind(), labels);
997 } else if (operation_cid() == kMintCid) {
998 return EmitUnboxedMintEqualityOp(compiler, *locs(), kind(), labels);
999 } else if (operation_cid() == kIntegerCid) {
1000 return EmitWordComparisonOp(compiler, *locs(), kind(), labels);
1001 } else {
1002 ASSERT(operation_cid() == kDoubleCid);
1003 return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels);
1004 }
1005}
1006
1007void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1008 compiler::Label is_true, is_false;
1009 BranchLabels labels = {&is_true, &is_false, &is_false};
1010 Condition true_condition = EmitComparisonCode(compiler, labels);
1011 if (true_condition != kInvalidCondition) {
1012 EmitBranchOnCondition(compiler, true_condition, labels,
1014 }
1015
1016 Register result = locs()->out(0).reg();
1017 compiler::Label done;
1018 __ Bind(&is_false);
1019 __ LoadObject(result, Bool::False());
1021 __ Bind(&is_true);
1022 __ LoadObject(result, Bool::True());
1023 __ Bind(&done);
1024}
1025
1026void ComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler,
1027 BranchInstr* branch) {
1028 BranchLabels labels = compiler->CreateBranchLabels(branch);
1029 Condition true_condition = EmitComparisonCode(compiler, labels);
1030 if (true_condition != kInvalidCondition) {
1031 EmitBranchOnCondition(compiler, true_condition, labels);
1032 }
1033}
1034
1035LocationSummary* TestSmiInstr::MakeLocationSummary(Zone* zone, bool opt) const {
1036 const intptr_t kNumInputs = 2;
1037 const intptr_t kNumTemps = 0;
1038 LocationSummary* locs = new (zone)
1039 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1040 locs->set_in(0, Location::RequiresRegister());
1041 // Only one input can be a constant operand. The case of two constant
1042 // operands should be handled by constant propagation.
1043 locs->set_in(1, LocationRegisterOrConstant(right()));
1044 return locs;
1045}
1046
1047Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1048 BranchLabels labels) {
1049 Register left = locs()->in(0).reg();
1050 Location right = locs()->in(1);
1051 if (right.IsConstant()) {
1052 ASSERT(right.constant().IsSmi());
1053 const int32_t imm = static_cast<int32_t>(right.constant().ptr());
1054 __ testl(left, compiler::Immediate(imm));
1055 } else {
1056 __ testl(left, right.reg());
1057 }
1058 Condition true_condition = (kind() == Token::kNE) ? NOT_ZERO : ZERO;
1059 return true_condition;
1060}
1061
1062LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone,
1063 bool opt) const {
1064 const intptr_t kNumInputs = 1;
1065 const intptr_t kNumTemps = 1;
1066 LocationSummary* locs = new (zone)
1067 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1068 locs->set_in(0, Location::RequiresRegister());
1069 locs->set_temp(0, Location::RequiresRegister());
1070 locs->set_out(0, Location::RequiresRegister());
1071 return locs;
1072}
1073
1074Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1075 BranchLabels labels) {
1076 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
1077 Register val_reg = locs()->in(0).reg();
1078 Register cid_reg = locs()->temp(0).reg();
1079
1080 compiler::Label* deopt =
1081 CanDeoptimize()
1082 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids)
1083 : nullptr;
1084
1085 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
1086 const ZoneGrowableArray<intptr_t>& data = cid_results();
1087 ASSERT(data[0] == kSmiCid);
1088 bool result = data[1] == true_result;
1089 __ testl(val_reg, compiler::Immediate(kSmiTagMask));
1090 __ j(ZERO, result ? labels.true_label : labels.false_label);
1091 __ LoadClassId(cid_reg, val_reg);
1092 for (intptr_t i = 2; i < data.length(); i += 2) {
1093 const intptr_t test_cid = data[i];
1094 ASSERT(test_cid != kSmiCid);
1095 result = data[i + 1] == true_result;
1096 __ cmpl(cid_reg, compiler::Immediate(test_cid));
1097 __ j(EQUAL, result ? labels.true_label : labels.false_label);
1098 }
1099 // No match found, deoptimize or default action.
1100 if (deopt == nullptr) {
1101 // If the cid is not in the list, jump to the opposite label from the cids
1102 // that are in the list. These must be all the same (see asserts in the
1103 // constructor).
1104 compiler::Label* target = result ? labels.false_label : labels.true_label;
1105 if (target != labels.fall_through) {
1106 __ jmp(target);
1107 }
1108 } else {
1109 __ jmp(deopt);
1110 }
1111 // Dummy result as this method already did the jump, there's no need
1112 // for the caller to branch on a condition.
1113 return kInvalidCondition;
1114}
1115
1116LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone,
1117 bool opt) const {
1118 const intptr_t kNumInputs = 2;
1119 const intptr_t kNumTemps = 0;
1120 if (operation_cid() == kMintCid) {
1121 const intptr_t kNumTemps = 0;
1122 LocationSummary* locs = new (zone)
1123 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1128 locs->set_out(0, Location::RequiresRegister());
1129 return locs;
1130 }
1131 if (operation_cid() == kDoubleCid) {
1132 LocationSummary* summary = new (zone)
1133 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1134 summary->set_in(0, Location::RequiresFpuRegister());
1135 summary->set_in(1, Location::RequiresFpuRegister());
1136 summary->set_out(0, Location::RequiresRegister());
1137 return summary;
1138 }
1139 ASSERT(operation_cid() == kSmiCid);
1140 LocationSummary* summary = new (zone)
1141 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1142 summary->set_in(0, LocationRegisterOrConstant(left()));
1143 // Only one input can be a constant operand. The case of two constant
1144 // operands should be handled by constant propagation.
1145 summary->set_in(1, summary->in(0).IsConstant()
1148 summary->set_out(0, Location::RequiresRegister());
1149 return summary;
1150}
1151
1152Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1153 BranchLabels labels) {
1154 if (operation_cid() == kSmiCid) {
1155 return EmitSmiComparisonOp(compiler, *locs(), kind(), labels);
1156 } else if (operation_cid() == kMintCid) {
1157 return EmitUnboxedMintComparisonOp(compiler, *locs(), kind(), labels);
1158 } else {
1159 ASSERT(operation_cid() == kDoubleCid);
1160 return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels);
1161 }
1162}
1163
1164void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1165 SetupNative();
1166 Register result = locs()->out(0).reg();
1167 const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function());
1168
1169 // Pass a pointer to the first argument in EAX.
1170 __ leal(EAX, compiler::Address(ESP, (ArgumentCount() - 1) * kWordSize));
1171
1172 __ movl(EDX, compiler::Immediate(argc_tag));
1173
1174 const Code* stub;
1175
1176 // There is no lazy-linking support on ia32.
1177 ASSERT(!link_lazily());
1178 if (is_bootstrap_native()) {
1179 stub = &StubCode::CallBootstrapNative();
1180 } else if (is_auto_scope()) {
1181 stub = &StubCode::CallAutoScopeNative();
1182 } else {
1183 stub = &StubCode::CallNoScopeNative();
1184 }
1185 const compiler::ExternalLabel label(
1186 reinterpret_cast<uword>(native_c_function()));
1187 __ movl(ECX, compiler::Immediate(label.address()));
1188 // We can never lazy-deopt here because natives are never optimized.
1189 ASSERT(!compiler->is_optimizing());
1190 compiler->GenerateNonLazyDeoptableStubCall(
1191 source(), *stub, UntaggedPcDescriptors::kOther, locs());
1192 __ LoadFromOffset(result, ESP, 0);
1193
1194 compiler->EmitDropArguments(ArgumentCount()); // Drop the arguments.
1195}
1196
1197#define R(r) (1 << r)
1198
1199LocationSummary* FfiCallInstr::MakeLocationSummary(Zone* zone,
1200 bool is_optimizing) const {
1203 return MakeLocationSummaryInternal(
1204 zone, is_optimizing,
1207}
1208
1209#undef R
1210
1211void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1212 const Register branch = locs()->in(TargetAddressIndex()).reg();
1213
1214 // The temps are indexed according to their register number.
1215 const Register temp = locs()->temp(0).reg();
1216 // For regular calls, this holds the FP for rebasing the original locations
1217 // during EmitParamMoves.
1218 // For leaf calls, this holds the SP used to restore the pre-aligned SP after
1219 // the call.
1220 const Register saved_fp_or_sp = locs()->temp(2).reg();
1221
1222 // Ensure these are callee-saved register and are preserved across the call.
1223 ASSERT(IsCalleeSavedRegister(saved_fp_or_sp));
1224 // Other temps don't need to be preserved.
1225
1226 __ movl(saved_fp_or_sp, is_leaf_ ? SPREG : FPREG);
1227
1228 intptr_t stack_required = marshaller_.RequiredStackSpaceInBytes();
1229
1230 if (is_leaf_) {
1231 // For leaf calls we need to leave space at the bottom for the pre-align SP.
1232 stack_required += compiler::target::kWordSize;
1233 } else {
1234 // Make a space to put the return address.
1235 __ pushl(compiler::Immediate(0));
1236
1237 // We need to create a dummy "exit frame". It will have a null code object.
1238 __ LoadObject(CODE_REG, Object::null_object());
1239 __ EnterDartFrame(0);
1240 }
1241
1242 // Reserve space for the arguments that go on the stack (if any), then align.
1243 __ ReserveAlignedFrameSpace(stack_required);
1244#if defined(USING_MEMORY_SANITIZER)
1245 UNIMPLEMENTED();
1246#endif
1247
1248 EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp,
1249 locs()->temp(1).reg());
1250
1251 if (is_leaf_) {
1252 // We store the pre-align SP at a fixed offset from the final SP.
1253 // Pushing before alignment would mean its placement would vary with how
1254 // much the frame was unaligned.
1255 __ movl(compiler::Address(SPREG, marshaller_.RequiredStackSpaceInBytes()),
1256 saved_fp_or_sp);
1257 }
1258
1260 __ Comment(is_leaf_ ? "Leaf Call" : "Call");
1261 }
1262
1263 if (is_leaf_) {
1264#if !defined(PRODUCT)
1265 // Set the thread object's top_exit_frame_info and VMTag to enable the
1266 // profiler to determine that thread is no longer executing Dart code.
1267 __ movl(compiler::Address(
1268 THR, compiler::target::Thread::top_exit_frame_info_offset()),
1269 FPREG);
1270 __ movl(compiler::Assembler::VMTagAddress(), branch);
1271#endif
1272
1273 __ call(branch);
1274
1275#if !defined(PRODUCT)
1277 compiler::Immediate(compiler::target::Thread::vm_tag_dart_id()));
1278 __ movl(compiler::Address(
1279 THR, compiler::target::Thread::top_exit_frame_info_offset()),
1280 compiler::Immediate(0));
1281#endif
1282 } else {
1283 // We need to copy a dummy return address up into the dummy stack frame so
1284 // the stack walker will know which safepoint to use. Unlike X64, there's no
1285 // PC-relative 'leaq' available, so we have do a trick with 'call'.
1286 compiler::Label get_pc;
1287 __ call(&get_pc);
1288 compiler->EmitCallsiteMetadata(InstructionSource(), deopt_id(),
1289 UntaggedPcDescriptors::Kind::kOther, locs(),
1290 env());
1291 __ Bind(&get_pc);
1292 __ popl(temp);
1293 __ movl(compiler::Address(FPREG, kSavedCallerPcSlotFromFp * kWordSize),
1294 temp);
1295
1297 // We cannot trust that this code will be executable within a safepoint.
1298 // Therefore we delegate the responsibility of entering/exiting the
1299 // safepoint to a stub which in the VM isolate's heap, which will never lose
1300 // execute permission.
1301 __ movl(temp,
1302 compiler::Address(
1303 THR, compiler::target::Thread::
1304 call_native_through_safepoint_entry_point_offset()));
1305
1306 // Calls EAX within a safepoint and clobbers EBX.
1307 ASSERT(branch == EAX);
1308 __ call(temp);
1309
1310 if (marshaller_.IsHandleCType(compiler::ffi::kResultIndex)) {
1311 __ Comment("Check Dart_Handle for Error.");
1312 compiler::Label not_error;
1313 __ movl(temp,
1314 compiler::Address(CallingConventions::kReturnReg,
1315 compiler::target::LocalHandle::ptr_offset()));
1316 __ BranchIfSmi(temp, &not_error);
1317 __ LoadClassId(temp, temp);
1318 __ RangeCheck(temp, kNoRegister, kFirstErrorCid, kLastErrorCid,
1320
1321 // Slow path, use the stub to propagate error, to save on code-size.
1322 __ Comment("Slow path: call Dart_PropagateError through stub.");
1323 __ movl(temp,
1324 compiler::Address(
1325 THR, compiler::target::Thread::
1326 call_native_through_safepoint_entry_point_offset()));
1328 __ movl(EAX, compiler::Address(
1329 THR, kPropagateErrorRuntimeEntry.OffsetFromThread()));
1330 __ call(temp);
1331#if defined(DEBUG)
1332 // We should never return with normal controlflow from this.
1333 __ int3();
1334#endif
1335
1336 __ Bind(&not_error);
1337 }
1338 }
1339
1340 // Restore the stack when a struct by value is returned into memory pointed
1341 // to by a pointer that is passed into the function.
1343 marshaller_.Location(compiler::ffi::kResultIndex).IsPointerToMemory()) {
1344 // Callee uses `ret 4` instead of `ret` to return.
1345 // See: https://c9x.me/x86/html/file_module_x86_id_280.html
1346 // Caller does `sub esp, 4` immediately after return to balance stack.
1347 __ subl(SPREG, compiler::Immediate(compiler::target::kWordSize));
1348 }
1349
1350 // The x86 calling convention requires floating point values to be returned
1351 // on the "floating-point stack" (aka. register ST0). We don't use the
1352 // floating-point stack in Dart, so we need to move the return value back
1353 // into an XMM register.
1354 if (representation() == kUnboxedDouble) {
1355 __ fstpl(compiler::Address(SPREG, -kDoubleSize));
1356 __ movsd(XMM0, compiler::Address(SPREG, -kDoubleSize));
1357 } else if (representation() == kUnboxedFloat) {
1358 __ fstps(compiler::Address(SPREG, -kFloatSize));
1359 __ movss(XMM0, compiler::Address(SPREG, -kFloatSize));
1360 }
1361
1362 // Pass both registers for use as clobbered temp registers.
1363 EmitReturnMoves(compiler, saved_fp_or_sp, temp);
1364
1365 if (is_leaf_) {
1366 // Restore pre-align SP. Was stored right before the first stack argument.
1367 __ movl(SPREG,
1368 compiler::Address(SPREG, marshaller_.RequiredStackSpaceInBytes()));
1369 } else {
1370 // Leave dummy exit frame.
1371 __ LeaveDartFrame();
1372
1373 // Instead of returning to the "fake" return address, we just pop it.
1374 __ popl(temp);
1375 }
1376}
1377
1378// Keep in sync with NativeReturnInstr::EmitNativeCode.
1379void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1380 __ Bind(compiler->GetJumpLabel(this));
1381
1382 // Enter the entry frame. NativeParameterInstr expects this frame has size
1383 // -exit_link_slot_from_entry_fp, verified below.
1384 __ EnterFrame(0);
1385
1386 // Save a space for the code object.
1387 __ xorl(EAX, EAX);
1388 __ pushl(EAX);
1389
1390#if defined(DART_TARGET_OS_FUCHSIA) && defined(USING_SHADOW_CALL_STACK)
1391#error Unimplemented
1392#endif
1393
1394 // Save ABI callee-saved registers.
1395 __ pushl(EBX);
1396 __ pushl(ESI);
1397 __ pushl(EDI);
1398
1399 // Save the current VMTag on the stack.
1401 __ pushl(ECX);
1402
1403 // Save top resource.
1404 __ pushl(
1405 compiler::Address(THR, compiler::target::Thread::top_resource_offset()));
1406 __ movl(
1407 compiler::Address(THR, compiler::target::Thread::top_resource_offset()),
1408 compiler::Immediate(0));
1409
1410 __ pushl(compiler::Address(
1411 THR, compiler::target::Thread::exit_through_ffi_offset()));
1412
1413 // Save top exit frame info. Stack walker expects it to be here.
1414 __ pushl(compiler::Address(
1415 THR, compiler::target::Thread::top_exit_frame_info_offset()));
1416
1417 // In debug mode, verify that we've pushed the top exit frame info at the
1418 // correct offset from FP.
1419 __ EmitEntryFrameVerification();
1420
1421 // The callback trampoline (caller) has already left the safepoint for us.
1422 __ TransitionNativeToGenerated(EAX, /*exit_safepoint=*/false);
1423
1424 // Now that the safepoint has ended, we can hold Dart objects with bare hands.
1425
1426 // Load the code object.
1427 const Function& target_function = marshaller_.dart_signature();
1428 const intptr_t callback_id = target_function.FfiCallbackId();
1429 __ movl(EAX, compiler::Address(
1430 THR, compiler::target::Thread::isolate_group_offset()));
1431 __ movl(EAX, compiler::Address(
1432 EAX, compiler::target::IsolateGroup::object_store_offset()));
1433 __ movl(EAX,
1434 compiler::Address(
1435 EAX, compiler::target::ObjectStore::ffi_callback_code_offset()));
1436 __ movl(EAX, compiler::FieldAddress(
1437 EAX, compiler::target::GrowableObjectArray::data_offset()));
1438 __ movl(CODE_REG, compiler::FieldAddress(
1439 EAX, compiler::target::Array::data_offset() +
1440 callback_id * compiler::target::kWordSize));
1441
1442 // Put the code object in the reserved slot.
1443 __ movl(compiler::Address(FPREG,
1444 kPcMarkerSlotFromFp * compiler::target::kWordSize),
1445 CODE_REG);
1446
1447 // Load a GC-safe value for the arguments descriptor (unused but tagged).
1449
1450 // Push a dummy return address which suggests that we are inside of
1451 // InvokeDartCodeStub. This is how the stack walker detects an entry frame.
1452 __ movl(EAX,
1453 compiler::Address(
1454 THR, compiler::target::Thread::invoke_dart_code_stub_offset()));
1455 __ pushl(compiler::FieldAddress(
1456 EAX, compiler::target::Code::entry_point_offset()));
1457
1458 // Continue with Dart frame setup.
1459 FunctionEntryInstr::EmitNativeCode(compiler);
1460}
1461
1462#define R(r) (1 << r)
1463
1465 Zone* zone,
1466 bool is_optimizing) const {
1469 static_assert(saved_fp < temp0, "Unexpected ordering of registers in set.");
1470 return MakeLocationSummaryInternal(zone, (R(saved_fp) | R(temp0)));
1471}
1472
1473#undef R
1474
1475void LeafRuntimeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1476 const Register saved_fp = locs()->temp(0).reg();
1477 const Register temp0 = locs()->temp(1).reg();
1478
1479 __ MoveRegister(saved_fp, FPREG);
1480 const intptr_t frame_space = native_calling_convention_.StackTopInBytes();
1481 __ EnterCFrame(frame_space);
1482
1483 EmitParamMoves(compiler, saved_fp, temp0);
1484
1485 const Register target_address = locs()->in(TargetAddressIndex()).reg();
1486 __ movl(compiler::Assembler::VMTagAddress(), target_address);
1487 __ CallCFunction(target_address);
1489 compiler::Immediate(VMTag::kDartTagId));
1490
1491 __ LeaveCFrame();
1492}
1493
1494LocationSummary* OneByteStringFromCharCodeInstr::MakeLocationSummary(
1495 Zone* zone,
1496 bool opt) const {
1497 const intptr_t kNumInputs = 1;
1498 // TODO(fschneider): Allow immediate operands for the char code.
1499 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
1501}
1502
1503void OneByteStringFromCharCodeInstr::EmitNativeCode(
1504 FlowGraphCompiler* compiler) {
1505 Register char_code = locs()->in(0).reg();
1506 Register result = locs()->out(0).reg();
1507 __ movl(result, compiler::Immediate(
1508 reinterpret_cast<uword>(Symbols::PredefinedAddress())));
1509 __ movl(result,
1510 compiler::Address(result, char_code,
1511 TIMES_HALF_WORD_SIZE, // Char code is a smi.
1513}
1514
1515LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone,
1516 bool opt) const {
1517 const intptr_t kNumInputs = 1;
1518 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
1520}
1521
1522void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1523 ASSERT(cid_ == kOneByteStringCid);
1524 Register str = locs()->in(0).reg();
1525 Register result = locs()->out(0).reg();
1526 compiler::Label is_one, done;
1527 __ movl(result, compiler::FieldAddress(str, String::length_offset()));
1528 __ cmpl(result, compiler::Immediate(Smi::RawValue(1)));
1530 __ movl(result, compiler::Immediate(Smi::RawValue(-1)));
1531 __ jmp(&done);
1532 __ Bind(&is_one);
1533 __ movzxb(result, compiler::FieldAddress(str, OneByteString::data_offset()));
1534 __ SmiTag(result);
1535 __ Bind(&done);
1536}
1537
1538LocationSummary* Utf8ScanInstr::MakeLocationSummary(Zone* zone,
1539 bool opt) const {
1540 const intptr_t kNumInputs = 5;
1541 const intptr_t kNumTemps = 0;
1542 LocationSummary* summary = new (zone)
1543 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1544 summary->set_in(0, Location::Any()); // decoder
1545 summary->set_in(1, Location::WritableRegister()); // bytes
1546 summary->set_in(2, Location::WritableRegister()); // start
1547 summary->set_in(3, Location::WritableRegister()); // end
1548 summary->set_in(4, Location::RequiresRegister()); // table
1549 summary->set_out(0, Location::RequiresRegister());
1550 return summary;
1551}
1552
1553void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1554 const Register bytes_reg = locs()->in(1).reg();
1555 const Register start_reg = locs()->in(2).reg();
1556 const Register end_reg = locs()->in(3).reg();
1557 const Register table_reg = locs()->in(4).reg();
1558 const Register size_reg = locs()->out(0).reg();
1559
1560 const Register bytes_ptr_reg = start_reg;
1561 const Register flags_reg = end_reg;
1562 const Register temp_reg = bytes_reg;
1563 const XmmRegister vector_reg = FpuTMP;
1564
1565 const intptr_t kBytesEndTempOffset = 1 * compiler::target::kWordSize;
1566 const intptr_t kBytesEndMinus16TempOffset = 0 * compiler::target::kWordSize;
1567
1568 const intptr_t kSizeMask = 0x03;
1569 const intptr_t kFlagsMask = 0x3C;
1570
1571 compiler::Label scan_ascii, ascii_loop, ascii_loop_in, nonascii_loop;
1572 compiler::Label rest, rest_loop, rest_loop_in, done;
1573
1574 // Address of input bytes.
1575 __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
1576
1577 // Pointers to start, end and end-16.
1578 __ leal(bytes_ptr_reg, compiler::Address(bytes_reg, start_reg, TIMES_1, 0));
1579 __ leal(temp_reg, compiler::Address(bytes_reg, end_reg, TIMES_1, 0));
1580 __ pushl(temp_reg);
1581 __ leal(temp_reg, compiler::Address(temp_reg, -16));
1582 __ pushl(temp_reg);
1583
1584 // Initialize size and flags.
1585 __ xorl(size_reg, size_reg);
1586 __ xorl(flags_reg, flags_reg);
1587
1588 __ jmp(&scan_ascii, compiler::Assembler::kNearJump);
1589
1590 // Loop scanning through ASCII bytes one 16-byte vector at a time.
1591 // While scanning, the size register contains the size as it was at the start
1592 // of the current block of ASCII bytes, minus the address of the start of the
1593 // block. After the block, the end address of the block is added to update the
1594 // size to include the bytes in the block.
1595 __ Bind(&ascii_loop);
1596 __ addl(bytes_ptr_reg, compiler::Immediate(16));
1597 __ Bind(&ascii_loop_in);
1598
1599 // Exit vectorized loop when there are less than 16 bytes left.
1600 __ cmpl(bytes_ptr_reg, compiler::Address(ESP, kBytesEndMinus16TempOffset));
1602
1603 // Find next non-ASCII byte within the next 16 bytes.
1604 // Note: In principle, we should use MOVDQU here, since the loaded value is
1605 // used as input to an integer instruction. In practice, according to Agner
1606 // Fog, there is no penalty for using the wrong kind of load.
1607 __ movups(vector_reg, compiler::Address(bytes_ptr_reg, 0));
1608 __ pmovmskb(temp_reg, vector_reg);
1609 __ bsfl(temp_reg, temp_reg);
1610 __ j(EQUAL, &ascii_loop, compiler::Assembler::kNearJump);
1611
1612 // Point to non-ASCII byte and update size.
1613 __ addl(bytes_ptr_reg, temp_reg);
1614 __ addl(size_reg, bytes_ptr_reg);
1615
1616 // Read first non-ASCII byte.
1617 __ movzxb(temp_reg, compiler::Address(bytes_ptr_reg, 0));
1618
1619 // Loop over block of non-ASCII bytes.
1620 __ Bind(&nonascii_loop);
1621 __ addl(bytes_ptr_reg, compiler::Immediate(1));
1622
1623 // Update size and flags based on byte value.
1624 __ movzxb(temp_reg, compiler::FieldAddress(
1625 table_reg, temp_reg, TIMES_1,
1626 compiler::target::OneByteString::data_offset()));
1627 __ orl(flags_reg, temp_reg);
1628 __ andl(temp_reg, compiler::Immediate(kSizeMask));
1629 __ addl(size_reg, temp_reg);
1630
1631 // Stop if end is reached.
1632 __ cmpl(bytes_ptr_reg, compiler::Address(ESP, kBytesEndTempOffset));
1634
1635 // Go to ASCII scan if next byte is ASCII, otherwise loop.
1636 __ movzxb(temp_reg, compiler::Address(bytes_ptr_reg, 0));
1637 __ testl(temp_reg, compiler::Immediate(0x80));
1638 __ j(NOT_EQUAL, &nonascii_loop, compiler::Assembler::kNearJump);
1639
1640 // Enter the ASCII scanning loop.
1641 __ Bind(&scan_ascii);
1642 __ subl(size_reg, bytes_ptr_reg);
1643 __ jmp(&ascii_loop_in);
1644
1645 // Less than 16 bytes left. Process the remaining bytes individually.
1646 __ Bind(&rest);
1647
1648 // Update size after ASCII scanning loop.
1649 __ addl(size_reg, bytes_ptr_reg);
1650 __ jmp(&rest_loop_in, compiler::Assembler::kNearJump);
1651
1652 __ Bind(&rest_loop);
1653
1654 // Read byte and increment pointer.
1655 __ movzxb(temp_reg, compiler::Address(bytes_ptr_reg, 0));
1656 __ addl(bytes_ptr_reg, compiler::Immediate(1));
1657
1658 // Update size and flags based on byte value.
1659 __ movzxb(temp_reg, compiler::FieldAddress(
1660 table_reg, temp_reg, TIMES_1,
1661 compiler::target::OneByteString::data_offset()));
1662 __ orl(flags_reg, temp_reg);
1663 __ andl(temp_reg, compiler::Immediate(kSizeMask));
1664 __ addl(size_reg, temp_reg);
1665
1666 // Stop if end is reached.
1667 __ Bind(&rest_loop_in);
1668 __ cmpl(bytes_ptr_reg, compiler::Address(ESP, kBytesEndTempOffset));
1670 __ Bind(&done);
1671
1672 // Pop temporaries.
1673 __ addl(ESP, compiler::Immediate(2 * compiler::target::kWordSize));
1674
1675 // Write flags to field.
1676 __ andl(flags_reg, compiler::Immediate(kFlagsMask));
1677 if (!IsScanFlagsUnboxed()) {
1678 __ SmiTag(flags_reg);
1679 }
1680 Register decoder_reg;
1681 const Location decoder_location = locs()->in(0);
1682 if (decoder_location.IsStackSlot()) {
1683 __ movl(temp_reg, LocationToStackSlotAddress(decoder_location));
1684 decoder_reg = temp_reg;
1685 } else {
1686 decoder_reg = decoder_location.reg();
1687 }
1688 const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes();
1689 __ orl(compiler::FieldAddress(decoder_reg, scan_flags_field_offset),
1690 flags_reg);
1691}
1692
1693LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
1694 bool opt) const {
1695 // The compiler must optimize any function that includes a LoadIndexed
1696 // instruction that uses typed data cids, since extracting the payload address
1697 // from views is done in a compiler pass after all code motion has happened.
1699
1700 const intptr_t kNumInputs = 2;
1701 const intptr_t kNumTemps = 0;
1702 LocationSummary* locs = new (zone)
1703 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1704 locs->set_in(kArrayPos, Location::RequiresRegister());
1705 // The index is either untagged (element size == 1) or a smi (for all
1706 // element sizes > 1).
1707 const bool need_writable_index_register = index_scale() == 1;
1708 const bool can_be_constant =
1709 index()->BindsToConstant() &&
1711 index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
1712 locs->set_in(
1713 kIndexPos,
1714 can_be_constant
1715 ? Location::Constant(index()->definition()->AsConstant())
1716 : (need_writable_index_register ? Location::WritableRegister()
1717 : Location::RequiresRegister()));
1718 auto const rep =
1721 if (rep == kUnboxedInt64) {
1722 locs->set_out(0, Location::Pair(Location::RequiresRegister(),
1724 } else {
1725 locs->set_out(0, Location::RequiresRegister());
1726 }
1727 } else if (RepresentationUtils::IsUnboxed(rep)) {
1728 locs->set_out(0, Location::RequiresFpuRegister());
1729 } else {
1730 locs->set_out(0, Location::RequiresRegister());
1731 }
1732 return locs;
1733}
1734
1735void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1736 // The array register points to the backing store for external arrays.
1737 const Register array = locs()->in(kArrayPos).reg();
1738 const Location index = locs()->in(kIndexPos);
1739
1740 bool index_unboxed = index_unboxed_;
1741 if (index_scale() == 1 && !index_unboxed) {
1742 if (index.IsRegister()) {
1743 __ SmiUntag(index.reg());
1744 index_unboxed = true;
1745 } else {
1746 ASSERT(index.IsConstant());
1747 }
1748 }
1749
1750 compiler::Address element_address =
1753 index_unboxed, array, index.reg())
1754 : compiler::Assembler::ElementAddressForIntIndex(
1756 Smi::Cast(index.constant()).Value());
1757
1758 auto const rep =
1762 if (rep == kUnboxedInt64) {
1763 ASSERT(locs()->out(0).IsPairLocation());
1764 PairLocation* result_pair = locs()->out(0).AsPairLocation();
1765 const Register result_lo = result_pair->At(0).reg();
1766 const Register result_hi = result_pair->At(1).reg();
1767 __ movl(result_lo, element_address);
1768 element_address =
1769 index.IsRegister()
1771 IsUntagged(), class_id(), index_scale(), index_unboxed,
1772 array, index.reg(), kWordSize)
1773 : compiler::Assembler::ElementAddressForIntIndex(
1775 Smi::Cast(index.constant()).Value(), kWordSize);
1776 __ movl(result_hi, element_address);
1777 } else {
1778 Register result = locs()->out(0).reg();
1779 __ Load(result, element_address, RepresentationUtils::OperandSize(rep));
1780 }
1781 } else if (RepresentationUtils::IsUnboxed(rep)) {
1782 XmmRegister result = locs()->out(0).fpu_reg();
1783 if (rep == kUnboxedFloat) {
1784 __ movss(result, element_address);
1785 } else if (rep == kUnboxedDouble) {
1786 __ movsd(result, element_address);
1787 } else {
1788 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
1789 rep == kUnboxedFloat64x2);
1790 __ movups(result, element_address);
1791 }
1792 } else {
1793 const Register result = locs()->out(0).reg();
1794 ASSERT(representation() == kTagged);
1795 ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
1796 (class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
1797 __ movl(result, element_address);
1798 }
1799}
1800
1801LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
1802 bool opt) const {
1803 // The compiler must optimize any function that includes a StoreIndexed
1804 // instruction that uses typed data cids, since extracting the payload address
1805 // from views is done in a compiler pass after all code motion has happened.
1807
1808 const intptr_t kNumInputs = 3;
1809 const intptr_t kNumTemps =
1810 class_id() == kArrayCid && ShouldEmitStoreBarrier() ? 2 : 0;
1811 LocationSummary* locs = new (zone)
1812 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1813 locs->set_in(0, Location::RequiresRegister());
1814 // The index is either untagged (element size == 1) or a smi (for all
1815 // element sizes > 1).
1816 const bool need_writable_index_register = index_scale() == 1;
1817 const bool can_be_constant =
1818 index()->BindsToConstant() &&
1820 index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
1821 locs->set_in(
1822 1, can_be_constant
1823 ? Location::Constant(index()->definition()->AsConstant())
1824 : (need_writable_index_register ? Location::WritableRegister()
1825 : Location::RequiresRegister()));
1826 auto const rep =
1829 if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
1830 // TODO(fschneider): Add location constraint for byte registers (EAX,
1831 // EBX, ECX, EDX) instead of using a fixed register.
1832 locs->set_in(2, LocationFixedRegisterOrSmiConstant(value(), EAX));
1833 } else if (rep == kUnboxedInt64) {
1836 } else {
1837 locs->set_in(2, Location::RequiresRegister());
1838 }
1839 } else if (RepresentationUtils::IsUnboxed(rep)) {
1840 // TODO(srdjan): Support Float64 constants.
1841 locs->set_in(2, Location::RequiresFpuRegister());
1842 } else if (class_id() == kArrayCid) {
1843 locs->set_in(2, LocationRegisterOrConstant(value()));
1844 if (ShouldEmitStoreBarrier()) {
1848 locs->set_temp(1, Location::RequiresRegister());
1849 }
1850 } else {
1851 UNREACHABLE();
1852 }
1853 return locs;
1854}
1855
1856void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1857 // The array register points to the backing store for external arrays.
1858 const Register array = locs()->in(0).reg();
1859 const Location index = locs()->in(1);
1860
1861 bool index_unboxed = index_unboxed_;
1862 if ((index_scale() == 1) && index.IsRegister() && !index_unboxed) {
1863 __ SmiUntag(index.reg());
1864 index_unboxed = true;
1865 }
1866 compiler::Address element_address =
1869 index_unboxed, array, index.reg())
1870 : compiler::Assembler::ElementAddressForIntIndex(
1872 Smi::Cast(index.constant()).Value());
1873
1874 auto const rep =
1878 ASSERT(rep == kUnboxedUint8);
1879 if (locs()->in(2).IsConstant()) {
1880 const Smi& constant = Smi::Cast(locs()->in(2).constant());
1881 intptr_t value = constant.Value();
1882 // Clamp to 0x0 or 0xFF respectively.
1883 if (value > 0xFF) {
1884 value = 0xFF;
1885 } else if (value < 0) {
1886 value = 0;
1887 }
1888 __ movb(element_address, compiler::Immediate(static_cast<int8_t>(value)));
1889 } else {
1890 ASSERT(locs()->in(2).reg() == EAX);
1891 compiler::Label store_value, store_0xff;
1892 __ cmpl(EAX, compiler::Immediate(0xFF));
1894 // Clamp to 0x0 or 0xFF respectively.
1895 __ j(GREATER, &store_0xff);
1896 __ xorl(EAX, EAX);
1897 __ jmp(&store_value, compiler::Assembler::kNearJump);
1898 __ Bind(&store_0xff);
1899 __ movl(EAX, compiler::Immediate(0xFF));
1900 __ Bind(&store_value);
1901 __ movb(element_address, AL);
1902 }
1903 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
1904 if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
1905 if (locs()->in(2).IsConstant()) {
1906 const Smi& constant = Smi::Cast(locs()->in(2).constant());
1907 __ movb(element_address,
1908 compiler::Immediate(static_cast<int8_t>(constant.Value())));
1909 } else {
1910 ASSERT(locs()->in(2).reg() == EAX);
1911 __ movb(element_address, AL);
1912 }
1913 } else if (rep == kUnboxedInt64) {
1914 ASSERT(locs()->in(2).IsPairLocation());
1915 PairLocation* value_pair = locs()->in(2).AsPairLocation();
1916 const Register value_lo = value_pair->At(0).reg();
1917 const Register value_hi = value_pair->At(1).reg();
1918 __ movl(element_address, value_lo);
1919 element_address =
1920 index.IsRegister()
1922 IsUntagged(), class_id(), index_scale(), index_unboxed,
1923 array, index.reg(), kWordSize)
1924 : compiler::Assembler::ElementAddressForIntIndex(
1926 Smi::Cast(index.constant()).Value(), kWordSize);
1927 __ movl(element_address, value_hi);
1928 } else {
1929 Register value = locs()->in(2).reg();
1930 __ Store(value, element_address, RepresentationUtils::OperandSize(rep));
1931 }
1932 } else if (RepresentationUtils::IsUnboxed(rep)) {
1933 if (rep == kUnboxedFloat) {
1934 __ movss(element_address, locs()->in(2).fpu_reg());
1935 } else if (rep == kUnboxedDouble) {
1936 __ movsd(element_address, locs()->in(2).fpu_reg());
1937 } else {
1938 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
1939 rep == kUnboxedFloat64x2);
1940 __ movups(element_address, locs()->in(2).fpu_reg());
1941 }
1942 } else if (class_id() == kArrayCid) {
1943 ASSERT(rep == kTagged);
1944 if (ShouldEmitStoreBarrier()) {
1945 Register value = locs()->in(2).reg();
1946 Register slot = locs()->temp(0).reg();
1947 Register scratch = locs()->temp(1).reg();
1948 __ leal(slot, element_address);
1949 __ StoreIntoArray(array, slot, value, CanValueBeSmi(), scratch);
1950 } else if (locs()->in(2).IsConstant()) {
1951 const Object& constant = locs()->in(2).constant();
1952 __ StoreObjectIntoObjectNoBarrier(array, element_address, constant);
1953 } else {
1954 Register value = locs()->in(2).reg();
1955 __ StoreIntoObjectNoBarrier(array, element_address, value);
1956 }
1957 } else {
1958 UNREACHABLE();
1959 }
1960}
1961
1962DEFINE_UNIMPLEMENTED_INSTRUCTION(GuardFieldTypeInstr)
1963
1964LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
1965 bool opt) const {
1966 const intptr_t kNumInputs = 1;
1967
1968 const intptr_t value_cid = value()->Type()->ToCid();
1969 const intptr_t field_cid = field().guarded_cid();
1970
1971 const bool emit_full_guard = !opt || (field_cid == kIllegalCid);
1972 const bool needs_value_cid_temp_reg =
1973 (value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid));
1974 const bool needs_field_temp_reg = emit_full_guard;
1975
1976 intptr_t num_temps = 0;
1977 if (needs_value_cid_temp_reg) {
1978 num_temps++;
1979 }
1980 if (needs_field_temp_reg) {
1981 num_temps++;
1982 }
1983
1984 LocationSummary* summary = new (zone)
1985 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
1986 summary->set_in(0, Location::RequiresRegister());
1987
1988 for (intptr_t i = 0; i < num_temps; i++) {
1989 summary->set_temp(i, Location::RequiresRegister());
1990 }
1991
1992 return summary;
1993}
1994
1995void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1996 ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 20);
1997 ASSERT(sizeof(UntaggedField::guarded_cid_) == 4);
1998 ASSERT(sizeof(UntaggedField::is_nullable_) == 4);
1999
2000 const intptr_t value_cid = value()->Type()->ToCid();
2001 const intptr_t field_cid = field().guarded_cid();
2002 const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid;
2003
2004 if (field_cid == kDynamicCid) {
2005 return; // Nothing to emit.
2006 }
2007
2008 const bool emit_full_guard =
2009 !compiler->is_optimizing() || (field_cid == kIllegalCid);
2010
2011 const bool needs_value_cid_temp_reg =
2012 (value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid));
2013
2014 const bool needs_field_temp_reg = emit_full_guard;
2015
2016 const Register value_reg = locs()->in(0).reg();
2017
2018 const Register value_cid_reg =
2019 needs_value_cid_temp_reg ? locs()->temp(0).reg() : kNoRegister;
2020
2021 const Register field_reg = needs_field_temp_reg
2022 ? locs()->temp(locs()->temp_count() - 1).reg()
2023 : kNoRegister;
2024
2025 compiler::Label ok, fail_label;
2026
2027 compiler::Label* deopt = nullptr;
2028 if (compiler->is_optimizing()) {
2029 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField);
2030 }
2031
2032 compiler::Label* fail = (deopt != nullptr) ? deopt : &fail_label;
2033
2034 if (emit_full_guard) {
2035 __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
2036
2037 compiler::FieldAddress field_cid_operand(field_reg,
2039 compiler::FieldAddress field_nullability_operand(
2040 field_reg, Field::is_nullable_offset());
2041
2042 if (value_cid == kDynamicCid) {
2043 LoadValueCid(compiler, value_cid_reg, value_reg);
2044 __ cmpl(value_cid_reg, field_cid_operand);
2045 __ j(EQUAL, &ok);
2046 __ cmpl(value_cid_reg, field_nullability_operand);
2047 } else if (value_cid == kNullCid) {
2048 // Value in graph known to be null.
2049 // Compare with null.
2050 __ cmpl(field_nullability_operand, compiler::Immediate(value_cid));
2051 } else {
2052 // Value in graph known to be non-null.
2053 // Compare class id with guard field class id.
2054 __ cmpl(field_cid_operand, compiler::Immediate(value_cid));
2055 }
2056 __ j(EQUAL, &ok);
2057
2058 // Check if the tracked state of the guarded field can be initialized
2059 // inline. If the field needs length check we fall through to runtime
2060 // which is responsible for computing offset of the length field
2061 // based on the class id.
2062 // Length guard will be emitted separately when needed via GuardFieldLength
2063 // instruction after GuardFieldClass.
2064 if (!field().needs_length_check()) {
2065 // Uninitialized field can be handled inline. Check if the
2066 // field is still unitialized.
2067 __ cmpl(field_cid_operand, compiler::Immediate(kIllegalCid));
2068 // Jump to failure path when guard field has been initialized and
2069 // the field and value class ids do not match.
2070 __ j(NOT_EQUAL, fail);
2071
2072 if (value_cid == kDynamicCid) {
2073 // Do not know value's class id.
2074 __ movl(field_cid_operand, value_cid_reg);
2075 __ movl(field_nullability_operand, value_cid_reg);
2076 } else {
2077 ASSERT(field_reg != kNoRegister);
2078 __ movl(field_cid_operand, compiler::Immediate(value_cid));
2079 __ movl(field_nullability_operand, compiler::Immediate(value_cid));
2080 }
2081
2082 __ jmp(&ok);
2083 }
2084
2085 if (deopt == nullptr) {
2086 __ Bind(fail);
2087
2088 __ cmpl(compiler::FieldAddress(field_reg, Field::guarded_cid_offset()),
2089 compiler::Immediate(kDynamicCid));
2090 __ j(EQUAL, &ok);
2091
2092 __ pushl(field_reg);
2093 __ pushl(value_reg);
2094 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2095 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2096 __ Drop(2); // Drop the field and the value.
2097 } else {
2098 __ jmp(fail);
2099 }
2100 } else {
2101 ASSERT(compiler->is_optimizing());
2102 ASSERT(deopt != nullptr);
2103 ASSERT(fail == deopt);
2104
2105 // Field guard class has been initialized and is known.
2106 if (value_cid == kDynamicCid) {
2107 // Value's class id is not known.
2108 __ testl(value_reg, compiler::Immediate(kSmiTagMask));
2109
2110 if (field_cid != kSmiCid) {
2111 __ j(ZERO, fail);
2112 __ LoadClassId(value_cid_reg, value_reg);
2113 __ cmpl(value_cid_reg, compiler::Immediate(field_cid));
2114 }
2115
2116 if (field().is_nullable() && (field_cid != kNullCid)) {
2117 __ j(EQUAL, &ok);
2118 if (field_cid != kSmiCid) {
2119 __ cmpl(value_cid_reg, compiler::Immediate(kNullCid));
2120 } else {
2121 const compiler::Immediate& raw_null =
2122 compiler::Immediate(static_cast<intptr_t>(Object::null()));
2123 __ cmpl(value_reg, raw_null);
2124 }
2125 }
2126 __ j(NOT_EQUAL, fail);
2127 } else if (value_cid == field_cid) {
2128 // This would normally be caught by Canonicalize, but RemoveRedefinitions
2129 // may sometimes produce the situation after the last Canonicalize pass.
2130 } else {
2131 // Both value's and field's class id is known.
2132 ASSERT(value_cid != nullability);
2133 __ jmp(fail);
2134 }
2135 }
2136 __ Bind(&ok);
2137}
2138
2139LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone,
2140 bool opt) const {
2141 const intptr_t kNumInputs = 1;
2142 if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2143 const intptr_t kNumTemps = 3;
2144 LocationSummary* summary = new (zone)
2145 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2146 summary->set_in(0, Location::RequiresRegister());
2147 // We need temporaries for field object, length offset and expected length.
2148 summary->set_temp(0, Location::RequiresRegister());
2149 summary->set_temp(1, Location::RequiresRegister());
2150 summary->set_temp(2, Location::RequiresRegister());
2151 return summary;
2152 } else {
2153 LocationSummary* summary = new (zone)
2154 LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
2155 summary->set_in(0, Location::RequiresRegister());
2156 return summary;
2157 }
2158 UNREACHABLE();
2159}
2160
2161void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2162 if (field().guarded_list_length() == Field::kNoFixedLength) {
2163 return; // Nothing to emit.
2164 }
2165
2166 compiler::Label* deopt =
2167 compiler->is_optimizing()
2168 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2169 : nullptr;
2170
2171 const Register value_reg = locs()->in(0).reg();
2172
2173 if (!compiler->is_optimizing() ||
2174 (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2175 const Register field_reg = locs()->temp(0).reg();
2176 const Register offset_reg = locs()->temp(1).reg();
2177 const Register length_reg = locs()->temp(2).reg();
2178
2179 compiler::Label ok;
2180
2181 __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
2182
2183 __ movsxb(
2184 offset_reg,
2185 compiler::FieldAddress(
2187 __ movl(length_reg, compiler::FieldAddress(
2188 field_reg, Field::guarded_list_length_offset()));
2189
2190 __ cmpl(offset_reg, compiler::Immediate(0));
2191 __ j(NEGATIVE, &ok);
2192
2193 // Load the length from the value. GuardFieldClass already verified that
2194 // value's class matches guarded class id of the field.
2195 // offset_reg contains offset already corrected by -kHeapObjectTag that is
2196 // why we use Address instead of FieldAddress.
2197 __ cmpl(length_reg, compiler::Address(value_reg, offset_reg, TIMES_1, 0));
2198
2199 if (deopt == nullptr) {
2200 __ j(EQUAL, &ok);
2201
2202 __ pushl(field_reg);
2203 __ pushl(value_reg);
2204 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2205 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2206 __ Drop(2); // Drop the field and the value.
2207 } else {
2208 __ j(NOT_EQUAL, deopt);
2209 }
2210
2211 __ Bind(&ok);
2212 } else {
2213 ASSERT(compiler->is_optimizing());
2214 ASSERT(field().guarded_list_length() >= 0);
2215 ASSERT(field().guarded_list_length_in_object_offset() !=
2217
2218 __ cmpl(compiler::FieldAddress(
2219 value_reg, field().guarded_list_length_in_object_offset()),
2220 compiler::Immediate(Smi::RawValue(field().guarded_list_length())));
2221 __ j(NOT_EQUAL, deopt);
2222 }
2223}
2224
2225LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
2226 bool opt) const {
2227 LocationSummary* locs =
2228 new (zone) LocationSummary(zone, 1, 1, LocationSummary::kNoCall);
2229 locs->set_in(0, value()->NeedsWriteBarrier() ? Location::WritableRegister()
2230 : Location::RequiresRegister());
2231 locs->set_temp(0, Location::RequiresRegister());
2232 return locs;
2233}
2234
2235void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2236 Register value = locs()->in(0).reg();
2237 Register temp = locs()->temp(0).reg();
2238
2239 compiler->used_static_fields().Add(&field());
2240
2241 __ movl(temp,
2242 compiler::Address(
2243 THR, compiler::target::Thread::field_table_values_offset()));
2244 // Note: static fields ids won't be changed by hot-reload.
2245 __ movl(
2246 compiler::Address(temp, compiler::target::FieldTable::OffsetOf(field())),
2247 value);
2248}
2249
2250LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone,
2251 bool opt) const {
2252 const intptr_t kNumInputs = 3;
2253 const intptr_t kNumTemps = 0;
2254 LocationSummary* summary = new (zone)
2255 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2256
2258 summary->set_in(1, Location::RegisterLocation(
2260 summary->set_in(
2262 summary->set_out(0, Location::RegisterLocation(EAX));
2263 return summary;
2264}
2265
2266void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2267 ASSERT(locs()->in(0).reg() == TypeTestABI::kInstanceReg);
2268 ASSERT(locs()->in(1).reg() == TypeTestABI::kInstantiatorTypeArgumentsReg);
2269 ASSERT(locs()->in(2).reg() == TypeTestABI::kFunctionTypeArgumentsReg);
2270
2271 compiler->GenerateInstanceOf(source(), deopt_id(), env(), type(), locs());
2272 ASSERT(locs()->out(0).reg() == EAX);
2273}
2274
2275// TODO(srdjan): In case of constant inputs make CreateArray kNoCall and
2276// use slow path stub.
2277LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone,
2278 bool opt) const {
2279 const intptr_t kNumInputs = 2;
2280 const intptr_t kNumTemps = 0;
2281 LocationSummary* locs = new (zone)
2282 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2288 return locs;
2289}
2290
2291// Inlines array allocation for known constant values.
2292static void InlineArrayAllocation(FlowGraphCompiler* compiler,
2293 intptr_t num_elements,
2294 compiler::Label* slow_path,
2295 compiler::Label* done) {
2296 const int kInlineArraySize = 12; // Same as kInlineInstanceSize.
2297 const intptr_t instance_size = Array::InstanceSize(num_elements);
2298
2299 // Instance in AllocateArrayABI::kResultReg.
2300 // Object end address in EBX.
2301 __ TryAllocateArray(kArrayCid, instance_size, slow_path,
2303 AllocateArrayABI::kResultReg, // instance
2304 EBX, // end address
2305 EDI); // temp
2306
2307 // Store the type argument field.
2308 __ StoreIntoObjectNoBarrier(
2310 compiler::FieldAddress(AllocateArrayABI::kResultReg,
2313
2314 // Set the length field.
2315 __ StoreIntoObjectNoBarrier(
2317 compiler::FieldAddress(AllocateArrayABI::kResultReg,
2320
2321 // Initialize all array elements to raw_null.
2322 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
2323 // EBX: new object end address.
2324 // EDI: iterator which initially points to the start of the variable
2325 // data area to be initialized.
2326 if (num_elements > 0) {
2327 const intptr_t array_size = instance_size - sizeof(UntaggedArray);
2328 const compiler::Immediate& raw_null =
2329 compiler::Immediate(static_cast<intptr_t>(Object::null()));
2330 __ leal(EDI, compiler::FieldAddress(AllocateArrayABI::kResultReg,
2331 sizeof(UntaggedArray)));
2332 if (array_size < (kInlineArraySize * kWordSize)) {
2333 intptr_t current_offset = 0;
2334 __ movl(EBX, raw_null);
2335 while (current_offset < array_size) {
2336 __ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
2337 compiler::Address(EDI, current_offset),
2338 EBX);
2339 current_offset += kWordSize;
2340 }
2341 } else {
2342 compiler::Label init_loop;
2343 __ Bind(&init_loop);
2344 __ StoreObjectIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
2345 compiler::Address(EDI, 0),
2346 Object::null_object());
2347 __ addl(EDI, compiler::Immediate(kWordSize));
2348 __ cmpl(EDI, EBX);
2349 __ j(BELOW, &init_loop, compiler::Assembler::kNearJump);
2350 }
2351 }
2353}
2354
2355void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2356 compiler::Label slow_path, done;
2357 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2358 if (compiler->is_optimizing() && num_elements()->BindsToConstant() &&
2359 num_elements()->BoundConstant().IsSmi()) {
2360 const intptr_t length =
2361 Smi::Cast(num_elements()->BoundConstant()).Value();
2363 InlineArrayAllocation(compiler, length, &slow_path, &done);
2364 }
2365 }
2366 }
2367
2368 __ Bind(&slow_path);
2369 auto object_store = compiler->isolate_group()->object_store();
2370 const auto& allocate_array_stub =
2371 Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
2372 compiler->GenerateStubCall(source(), allocate_array_stub,
2373 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
2374 env());
2375 __ Bind(&done);
2376}
2377
2379 Zone* zone,
2380 bool opt) const {
2381 ASSERT(opt);
2382 const intptr_t kNumInputs = 0;
2383 const intptr_t kNumTemps = 2;
2384 LocationSummary* locs = new (zone) LocationSummary(
2385 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
2389 return locs;
2390}
2391
2392class AllocateContextSlowPath
2393 : public TemplateSlowPathCode<AllocateUninitializedContextInstr> {
2394 public:
2395 explicit AllocateContextSlowPath(
2396 AllocateUninitializedContextInstr* instruction)
2397 : TemplateSlowPathCode(instruction) {}
2398
2399 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
2400 __ Comment("AllocateContextSlowPath");
2401 __ Bind(entry_label());
2402
2403 LocationSummary* locs = instruction()->locs();
2404 ASSERT(!locs->live_registers()->Contains(locs->out(0)));
2405
2406 compiler->SaveLiveRegisters(locs);
2407
2408 auto slow_path_env = compiler->SlowPathEnvironmentFor(
2409 instruction(), /*num_slow_path_args=*/0);
2410 ASSERT(slow_path_env != nullptr);
2411
2412 __ movl(EDX, compiler::Immediate(instruction()->num_context_variables()));
2413 compiler->GenerateStubCall(instruction()->source(),
2414 StubCode::AllocateContext(),
2415 UntaggedPcDescriptors::kOther, locs,
2416 instruction()->deopt_id(), slow_path_env);
2417 ASSERT(instruction()->locs()->out(0).reg() == EAX);
2418 compiler->RestoreLiveRegisters(instruction()->locs());
2419 __ jmp(exit_label());
2420 }
2421};
2422
2424 FlowGraphCompiler* compiler) {
2425 ASSERT(compiler->is_optimizing());
2426 Register temp = locs()->temp(0).reg();
2427 Register temp2 = locs()->temp(1).reg();
2428 Register result = locs()->out(0).reg();
2429 // Try allocate the object.
2430 AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this);
2431 compiler->AddSlowPathCode(slow_path);
2432 intptr_t instance_size = Context::InstanceSize(num_context_variables());
2433
2434 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2435 __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
2437 result, // instance
2438 temp, // end address
2439 temp2); // temp
2440
2441 // Setup up number of context variables field.
2442 __ movl(compiler::FieldAddress(result, Context::num_variables_offset()),
2443 compiler::Immediate(num_context_variables()));
2444 } else {
2445 __ Jump(slow_path->entry_label());
2446 }
2447
2448 __ Bind(slow_path->exit_label());
2449}
2450
2451LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone,
2452 bool opt) const {
2453 const intptr_t kNumInputs = 0;
2454 const intptr_t kNumTemps = 1;
2455 LocationSummary* locs = new (zone)
2456 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2459 return locs;
2460}
2461
2462void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2463 ASSERT(locs()->temp(0).reg() == EDX);
2464 ASSERT(locs()->out(0).reg() == EAX);
2465
2466 __ movl(EDX, compiler::Immediate(num_context_variables()));
2467 compiler->GenerateStubCall(source(), StubCode::AllocateContext(),
2468 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
2469 env());
2470}
2471
2472LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
2473 bool opt) const {
2474 const intptr_t kNumInputs = 1;
2475 const intptr_t kNumTemps = 0;
2476 LocationSummary* locs = new (zone)
2477 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2478 locs->set_in(0, Location::RegisterLocation(ECX));
2479 locs->set_out(0, Location::RegisterLocation(EAX));
2480 return locs;
2481}
2482
2483void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2484 ASSERT(locs()->in(0).reg() == ECX);
2485 ASSERT(locs()->out(0).reg() == EAX);
2486
2487 compiler->GenerateStubCall(source(), StubCode::CloneContext(),
2488 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
2489 deopt_id(), env());
2490}
2491
2492LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
2493 bool opt) const {
2494 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall);
2495}
2496
2497void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2498 __ Bind(compiler->GetJumpLabel(this));
2499 compiler->AddExceptionHandler(this);
2500 if (HasParallelMove()) {
2501 parallel_move()->EmitNativeCode(compiler);
2502 }
2503
2504 // Restore ESP from EBP as we are coming from a throw and the code for
2505 // popping arguments has not been run.
2506 const intptr_t fp_sp_dist =
2507 (compiler::target::frame_layout.first_local_from_fp + 1 -
2508 compiler->StackSize()) *
2509 kWordSize;
2510 ASSERT(fp_sp_dist <= 0);
2511 __ leal(ESP, compiler::Address(EBP, fp_sp_dist));
2512
2513 if (!compiler->is_optimizing()) {
2514 if (raw_exception_var_ != nullptr) {
2515 __ movl(compiler::Address(EBP,
2516 compiler::target::FrameOffsetInBytesForVariable(
2517 raw_exception_var_)),
2519 }
2520 if (raw_stacktrace_var_ != nullptr) {
2521 __ movl(compiler::Address(EBP,
2522 compiler::target::FrameOffsetInBytesForVariable(
2523 raw_stacktrace_var_)),
2525 }
2526 }
2527}
2528
2529LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone,
2530 bool opt) const {
2531 const intptr_t kNumInputs = 0;
2532 const intptr_t kNumTemps = opt ? 0 : 1;
2533 LocationSummary* summary = new (zone) LocationSummary(
2534 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
2535 if (!opt) {
2536 summary->set_temp(0, Location::RequiresRegister());
2537 }
2538 return summary;
2539}
2540
2541class CheckStackOverflowSlowPath
2542 : public TemplateSlowPathCode<CheckStackOverflowInstr> {
2543 public:
2544 static constexpr intptr_t kNumSlowPathArgs = 0;
2545
2546 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
2547 : TemplateSlowPathCode(instruction) {}
2548
2549 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
2550 if (compiler->isolate_group()->use_osr() && osr_entry_label()->IsLinked()) {
2551 __ Comment("CheckStackOverflowSlowPathOsr");
2552 __ Bind(osr_entry_label());
2553 __ movl(compiler::Address(THR, Thread::stack_overflow_flags_offset()),
2554 compiler::Immediate(Thread::kOsrRequest));
2555 }
2556 __ Comment("CheckStackOverflowSlowPath");
2557 __ Bind(entry_label());
2558 compiler->SaveLiveRegisters(instruction()->locs());
2559 // pending_deoptimization_env_ is needed to generate a runtime call that
2560 // may throw an exception.
2561 ASSERT(compiler->pending_deoptimization_env_ == nullptr);
2562 Environment* env = compiler->SlowPathEnvironmentFor(
2563 instruction(), /*num_slow_path_args=*/0);
2564 compiler->pending_deoptimization_env_ = env;
2565
2566 __ CallRuntime(kInterruptOrStackOverflowRuntimeEntry, kNumSlowPathArgs);
2567 compiler->EmitCallsiteMetadata(
2568 instruction()->source(), instruction()->deopt_id(),
2569 UntaggedPcDescriptors::kOther, instruction()->locs(), env);
2570
2571 if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
2572 instruction()->in_loop()) {
2573 // In unoptimized code, record loop stack checks as possible OSR entries.
2574 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
2575 instruction()->deopt_id(),
2576 InstructionSource());
2577 }
2578 compiler->pending_deoptimization_env_ = nullptr;
2579 compiler->RestoreLiveRegisters(instruction()->locs());
2580 __ jmp(exit_label());
2581 }
2582
2583 compiler::Label* osr_entry_label() {
2584 ASSERT(IsolateGroup::Current()->use_osr());
2585 return &osr_entry_label_;
2586 }
2587
2588 private:
2589 compiler::Label osr_entry_label_;
2590};
2591
2592void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2593 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this);
2594 compiler->AddSlowPathCode(slow_path);
2595
2596 __ cmpl(ESP, compiler::Address(THR, Thread::stack_limit_offset()));
2597 __ j(BELOW_EQUAL, slow_path->entry_label());
2598 if (compiler->CanOSRFunction() && in_loop()) {
2599 // In unoptimized code check the usage counter to trigger OSR at loop
2600 // stack checks. Use progressively higher thresholds for more deeply
2601 // nested loops to attempt to hit outer loops with OSR when possible.
2602 __ LoadObject(EDI, compiler->parsed_function().function());
2603 const intptr_t configured_optimization_counter_threshold =
2604 compiler->thread()->isolate_group()->optimization_counter_threshold();
2605 const int32_t threshold =
2606 configured_optimization_counter_threshold * (loop_depth() + 1);
2607 __ incl(compiler::FieldAddress(EDI, Function::usage_counter_offset()));
2608 __ cmpl(compiler::FieldAddress(EDI, Function::usage_counter_offset()),
2609 compiler::Immediate(threshold));
2610 __ j(GREATER_EQUAL, slow_path->osr_entry_label());
2611 }
2612 if (compiler->ForceSlowPathForStackOverflow()) {
2613 // TODO(turnidge): Implement stack overflow count in assembly to
2614 // make --stacktrace-every and --deoptimize-every faster.
2615 __ jmp(slow_path->entry_label());
2616 }
2617 __ Bind(slow_path->exit_label());
2618}
2619
2620static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
2621 BinarySmiOpInstr* shift_left) {
2622 const LocationSummary& locs = *shift_left->locs();
2623 Register left = locs.in(0).reg();
2624 Register result = locs.out(0).reg();
2625 ASSERT(left == result);
2626 compiler::Label* deopt =
2627 shift_left->CanDeoptimize()
2628 ? compiler->AddDeoptStub(shift_left->deopt_id(),
2629 ICData::kDeoptBinarySmiOp)
2630 : nullptr;
2631 if (locs.in(1).IsConstant()) {
2632 const Object& constant = locs.in(1).constant();
2633 ASSERT(constant.IsSmi());
2634 // shll operation masks the count to 5 bits.
2635 const intptr_t kCountLimit = 0x1F;
2636 const intptr_t value = Smi::Cast(constant).Value();
2637 ASSERT((0 < value) && (value < kCountLimit));
2638 if (shift_left->can_overflow()) {
2639 if (value == 1) {
2640 // Use overflow flag.
2641 __ shll(left, compiler::Immediate(1));
2642 __ j(OVERFLOW, deopt);
2643 return;
2644 }
2645 // Check for overflow.
2646 Register temp = locs.temp(0).reg();
2647 __ movl(temp, left);
2648 __ shll(left, compiler::Immediate(value));
2649 __ sarl(left, compiler::Immediate(value));
2650 __ cmpl(left, temp);
2651 __ j(NOT_EQUAL, deopt); // Overflow.
2652 }
2653 // Shift for result now we know there is no overflow.
2654 __ shll(left, compiler::Immediate(value));
2655 return;
2656 }
2657
2658 // Right (locs.in(1)) is not constant.
2659 Register right = locs.in(1).reg();
2660 Range* right_range = shift_left->right_range();
2661 if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
2662 // TODO(srdjan): Implement code below for can_overflow().
2663 // If left is constant, we know the maximal allowed size for right.
2664 const Object& obj = shift_left->left()->BoundConstant();
2665 if (obj.IsSmi()) {
2666 const intptr_t left_int = Smi::Cast(obj).Value();
2667 if (left_int == 0) {
2668 __ cmpl(right, compiler::Immediate(0));
2669 __ j(NEGATIVE, deopt);
2670 return;
2671 }
2672 const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int);
2673 const bool right_needs_check =
2674 !RangeUtils::IsWithin(right_range, 0, max_right - 1);
2675 if (right_needs_check) {
2676 __ cmpl(right,
2677 compiler::Immediate(static_cast<int32_t>(Smi::New(max_right))));
2678 __ j(ABOVE_EQUAL, deopt);
2679 }
2680 __ SmiUntag(right);
2681 __ shll(left, right);
2682 }
2683 return;
2684 }
2685
2686 const bool right_needs_check =
2687 !RangeUtils::IsWithin(right_range, 0, (Smi::kBits - 1));
2688 ASSERT(right == ECX); // Count must be in ECX
2689 if (!shift_left->can_overflow()) {
2690 if (right_needs_check) {
2691 if (!RangeUtils::IsPositive(right_range)) {
2692 ASSERT(shift_left->CanDeoptimize());
2693 __ cmpl(right, compiler::Immediate(0));
2694 __ j(NEGATIVE, deopt);
2695 }
2696 compiler::Label done, is_not_zero;
2697 __ cmpl(right,
2698 compiler::Immediate(static_cast<int32_t>(Smi::New(Smi::kBits))));
2699 __ j(BELOW, &is_not_zero, compiler::Assembler::kNearJump);
2700 __ xorl(left, left);
2702 __ Bind(&is_not_zero);
2703 __ SmiUntag(right);
2704 __ shll(left, right);
2705 __ Bind(&done);
2706 } else {
2707 __ SmiUntag(right);
2708 __ shll(left, right);
2709 }
2710 } else {
2711 if (right_needs_check) {
2712 ASSERT(shift_left->CanDeoptimize());
2713 __ cmpl(right,
2714 compiler::Immediate(static_cast<int32_t>(Smi::New(Smi::kBits))));
2715 __ j(ABOVE_EQUAL, deopt);
2716 }
2717 // Left is not a constant.
2718 Register temp = locs.temp(0).reg();
2719 // Check if count too large for handling it inlined.
2720 __ movl(temp, left);
2721 __ SmiUntag(right);
2722 // Overflow test (preserve temp and right);
2723 __ shll(left, right);
2724 __ sarl(left, right);
2725 __ cmpl(left, temp);
2726 __ j(NOT_EQUAL, deopt); // Overflow.
2727 // Shift for result now we know there is no overflow.
2728 __ shll(left, right);
2729 }
2730}
2731
2732static bool IsSmiValue(const Object& constant, intptr_t value) {
2733 return constant.IsSmi() && (Smi::Cast(constant).Value() == value);
2734}
2735
2736LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Zone* zone,
2737 bool opt) const {
2738 const intptr_t kNumInputs = 2;
2739 if (op_kind() == Token::kTRUNCDIV) {
2740 const intptr_t kNumTemps = 1;
2741 LocationSummary* summary = new (zone)
2742 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2744 summary->set_in(0, Location::RequiresRegister());
2745 ConstantInstr* right_constant = right()->definition()->AsConstant();
2746 // The programmer only controls one bit, so the constant is safe.
2747 summary->set_in(1, Location::Constant(right_constant));
2748 summary->set_temp(0, Location::RequiresRegister());
2749 summary->set_out(0, Location::SameAsFirstInput());
2750 } else {
2751 // Both inputs must be writable because they will be untagged.
2752 summary->set_in(0, Location::RegisterLocation(EAX));
2753 summary->set_in(1, Location::WritableRegister());
2754 summary->set_out(0, Location::SameAsFirstInput());
2755 // Will be used for sign extension and division.
2756 summary->set_temp(0, Location::RegisterLocation(EDX));
2757 }
2758 return summary;
2759 } else if (op_kind() == Token::kMOD) {
2760 const intptr_t kNumTemps = 1;
2761 LocationSummary* summary = new (zone)
2762 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2763 // Both inputs must be writable because they will be untagged.
2764 summary->set_in(0, Location::RegisterLocation(EDX));
2765 summary->set_in(1, Location::WritableRegister());
2766 summary->set_out(0, Location::SameAsFirstInput());
2767 // Will be used for sign extension and division.
2768 summary->set_temp(0, Location::RegisterLocation(EAX));
2769 return summary;
2770 } else if ((op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR)) {
2771 const intptr_t kNumTemps = 0;
2772 LocationSummary* summary = new (zone)
2773 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2774 summary->set_in(0, Location::RequiresRegister());
2775 summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX));
2776 summary->set_out(0, Location::SameAsFirstInput());
2777 return summary;
2778 } else if (op_kind() == Token::kSHL) {
2779 ConstantInstr* right_constant = right()->definition()->AsConstant();
2780 // Shift-by-1 overflow checking can use flags, otherwise we need a temp.
2781 const bool shiftBy1 =
2782 (right_constant != nullptr) && IsSmiValue(right_constant->value(), 1);
2783 const intptr_t kNumTemps = (can_overflow() && !shiftBy1) ? 1 : 0;
2784 LocationSummary* summary = new (zone)
2785 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2786 summary->set_in(0, Location::RequiresRegister());
2787 summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX));
2788 if (kNumTemps == 1) {
2789 summary->set_temp(0, Location::RequiresRegister());
2790 }
2791 summary->set_out(0, Location::SameAsFirstInput());
2792 return summary;
2793 } else {
2794 const intptr_t kNumTemps = 0;
2795 LocationSummary* summary = new (zone)
2796 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2797 summary->set_in(0, Location::RequiresRegister());
2798 ConstantInstr* constant = right()->definition()->AsConstant();
2799 if (constant != nullptr) {
2800 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
2801 } else {
2802 summary->set_in(1, Location::PrefersRegister());
2803 }
2804 summary->set_out(0, Location::SameAsFirstInput());
2805 return summary;
2806 }
2807}
2808
2809template <typename OperandType>
2810static void EmitIntegerArithmetic(FlowGraphCompiler* compiler,
2811 Token::Kind op_kind,
2812 Register left,
2813 const OperandType& right,
2814 compiler::Label* deopt) {
2815 switch (op_kind) {
2816 case Token::kADD:
2817 __ addl(left, right);
2818 break;
2819 case Token::kSUB:
2820 __ subl(left, right);
2821 break;
2822 case Token::kBIT_AND:
2823 __ andl(left, right);
2824 break;
2825 case Token::kBIT_OR:
2826 __ orl(left, right);
2827 break;
2828 case Token::kBIT_XOR:
2829 __ xorl(left, right);
2830 break;
2831 case Token::kMUL:
2832 __ imull(left, right);
2833 break;
2834 default:
2835 UNREACHABLE();
2836 }
2837 if (deopt != nullptr) __ j(OVERFLOW, deopt);
2838}
2839
2840void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2841 if (op_kind() == Token::kSHL) {
2842 EmitSmiShiftLeft(compiler, this);
2843 return;
2844 }
2845
2846 Register left = locs()->in(0).reg();
2847 Register result = locs()->out(0).reg();
2848 ASSERT(left == result);
2849 compiler::Label* deopt = nullptr;
2850 if (CanDeoptimize()) {
2851 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
2852 }
2853
2854 if (locs()->in(1).IsConstant()) {
2855 const Object& constant = locs()->in(1).constant();
2856 ASSERT(constant.IsSmi());
2857 const intptr_t value = Smi::Cast(constant).Value();
2858 switch (op_kind()) {
2859 case Token::kADD:
2860 case Token::kSUB:
2861 case Token::kBIT_AND:
2862 case Token::kBIT_OR:
2863 case Token::kBIT_XOR:
2864 case Token::kMUL: {
2865 const intptr_t imm =
2866 (op_kind() == Token::kMUL) ? value : Smi::RawValue(value);
2867 EmitIntegerArithmetic(compiler, op_kind(), left,
2868 compiler::Immediate(imm), deopt);
2869 break;
2870 }
2871
2872 case Token::kTRUNCDIV: {
2873 ASSERT(value != kIntptrMin);
2875 const intptr_t shift_count =
2877 ASSERT(kSmiTagSize == 1);
2878 Register temp = locs()->temp(0).reg();
2879 __ movl(temp, left);
2880 __ sarl(temp, compiler::Immediate(31));
2881 ASSERT(shift_count > 1); // 1, -1 case handled above.
2882 __ shrl(temp, compiler::Immediate(32 - shift_count));
2883 __ addl(left, temp);
2884 ASSERT(shift_count > 0);
2885 __ sarl(left, compiler::Immediate(shift_count));
2886 if (value < 0) {
2887 __ negl(left);
2888 }
2889 __ SmiTag(left);
2890 break;
2891 }
2892
2893 case Token::kSHR: {
2894 // sarl operation masks the count to 5 bits.
2895 const intptr_t kCountLimit = 0x1F;
2896 __ sarl(left, compiler::Immediate(
2897 Utils::Minimum(value + kSmiTagSize, kCountLimit)));
2898 __ SmiTag(left);
2899 break;
2900 }
2901
2902 case Token::kUSHR: {
2903 ASSERT((value > 0) && (value < 64));
2904 COMPILE_ASSERT(compiler::target::kSmiBits < 32);
2905 // 64-bit representation of left operand value:
2906 //
2907 // ss...sssss s s xxxxxxxxxxxxx
2908 // | | | | | |
2909 // 63 32 31 30 kSmiBits-1 0
2910 //
2911 // Where 's' is a sign bit.
2912 //
2913 // If left operand is negative (sign bit is set), then
2914 // result will fit into Smi range if and only if
2915 // the shift amount >= 64 - kSmiBits.
2916 //
2917 // If left operand is non-negative, the result always
2918 // fits into Smi range.
2919 //
2920 if (value < (64 - compiler::target::kSmiBits)) {
2921 if (deopt != nullptr) {
2922 __ testl(left, left);
2923 __ j(LESS, deopt);
2924 } else {
2925 // Operation cannot overflow only if left value is always
2926 // non-negative.
2927 ASSERT(!can_overflow());
2928 }
2929 // At this point left operand is non-negative, so unsigned shift
2930 // can't overflow.
2931 if (value >= compiler::target::kSmiBits) {
2932 __ xorl(left, left);
2933 } else {
2934 __ shrl(left, compiler::Immediate(value + kSmiTagSize));
2935 __ SmiTag(left);
2936 }
2937 } else {
2938 // Shift amount > 32, and the result is guaranteed to fit into Smi.
2939 // Low (Smi) part of the left operand is shifted out.
2940 // High part is filled with sign bits.
2941 __ sarl(left, compiler::Immediate(31));
2942 __ shrl(left, compiler::Immediate(value - 32));
2943 __ SmiTag(left);
2944 }
2945 break;
2946 }
2947
2948 default:
2949 UNREACHABLE();
2950 break;
2951 }
2952 return;
2953 } // if locs()->in(1).IsConstant()
2954
2955 if (locs()->in(1).IsStackSlot()) {
2956 const compiler::Address& right = LocationToStackSlotAddress(locs()->in(1));
2957 if (op_kind() == Token::kMUL) {
2958 __ SmiUntag(left);
2959 }
2960 EmitIntegerArithmetic(compiler, op_kind(), left, right, deopt);
2961 return;
2962 }
2963
2964 // if locs()->in(1).IsRegister.
2965 Register right = locs()->in(1).reg();
2966 switch (op_kind()) {
2967 case Token::kADD:
2968 case Token::kSUB:
2969 case Token::kBIT_AND:
2970 case Token::kBIT_OR:
2971 case Token::kBIT_XOR:
2972 case Token::kMUL:
2973 if (op_kind() == Token::kMUL) {
2974 __ SmiUntag(left);
2975 }
2976 EmitIntegerArithmetic(compiler, op_kind(), left, right, deopt);
2977 break;
2978
2979 case Token::kTRUNCDIV: {
2981 // Handle divide by zero in runtime.
2982 __ testl(right, right);
2983 __ j(ZERO, deopt);
2984 }
2985 ASSERT(left == EAX);
2986 ASSERT((right != EDX) && (right != EAX));
2987 ASSERT(locs()->temp(0).reg() == EDX);
2988 ASSERT(result == EAX);
2989 __ SmiUntag(left);
2990 __ SmiUntag(right);
2991 __ cdq(); // Sign extend EAX -> EDX:EAX.
2992 __ idivl(right); // EAX: quotient, EDX: remainder.
2993 if (RangeUtils::Overlaps(right_range(), -1, -1)) {
2994 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
2995 // case we cannot tag the result.
2996 __ cmpl(result, compiler::Immediate(0x40000000));
2997 __ j(EQUAL, deopt);
2998 }
2999 __ SmiTag(result);
3000 break;
3001 }
3002 case Token::kMOD: {
3004 // Handle divide by zero in runtime.
3005 __ testl(right, right);
3006 __ j(ZERO, deopt);
3007 }
3008 ASSERT(left == EDX);
3009 ASSERT((right != EDX) && (right != EAX));
3010 ASSERT(locs()->temp(0).reg() == EAX);
3011 ASSERT(result == EDX);
3012 __ SmiUntag(left);
3013 __ SmiUntag(right);
3014 __ movl(EAX, EDX);
3015 __ cdq(); // Sign extend EAX -> EDX:EAX.
3016 __ idivl(right); // EAX: quotient, EDX: remainder.
3017 // res = left % right;
3018 // if (res < 0) {
3019 // if (right < 0) {
3020 // res = res - right;
3021 // } else {
3022 // res = res + right;
3023 // }
3024 // }
3025 compiler::Label done;
3026 __ cmpl(result, compiler::Immediate(0));
3028 // Result is negative, adjust it.
3029 if (RangeUtils::Overlaps(right_range(), -1, 1)) {
3030 // Right can be positive and negative.
3031 compiler::Label subtract;
3032 __ cmpl(right, compiler::Immediate(0));
3034 __ addl(result, right);
3036 __ Bind(&subtract);
3037 __ subl(result, right);
3038 } else if (right_range()->IsPositive()) {
3039 // Right is positive.
3040 __ addl(result, right);
3041 } else {
3042 // Right is negative.
3043 __ subl(result, right);
3044 }
3045 __ Bind(&done);
3046 __ SmiTag(result);
3047 break;
3048 }
3049 case Token::kSHR: {
3050 if (CanDeoptimize()) {
3051 __ cmpl(right, compiler::Immediate(0));
3052 __ j(LESS, deopt);
3053 }
3054 __ SmiUntag(right);
3055 // sarl operation masks the count to 5 bits.
3056 const intptr_t kCountLimit = 0x1F;
3057 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
3058 __ cmpl(right, compiler::Immediate(kCountLimit));
3059 compiler::Label count_ok;
3060 __ j(LESS, &count_ok, compiler::Assembler::kNearJump);
3061 __ movl(right, compiler::Immediate(kCountLimit));
3062 __ Bind(&count_ok);
3063 }
3064 ASSERT(right == ECX); // Count must be in ECX
3065 __ SmiUntag(left);
3066 __ sarl(left, right);
3067 __ SmiTag(left);
3068 break;
3069 }
3070 case Token::kUSHR: {
3071 compiler::Label done;
3072 __ SmiUntag(right);
3073 // 64-bit representation of left operand value:
3074 //
3075 // ss...sssss s s xxxxxxxxxxxxx
3076 // | | | | | |
3077 // 63 32 31 30 kSmiBits-1 0
3078 //
3079 // Where 's' is a sign bit.
3080 //
3081 // If left operand is negative (sign bit is set), then
3082 // result will fit into Smi range if and only if
3083 // the shift amount >= 64 - kSmiBits.
3084 //
3085 // If left operand is non-negative, the result always
3086 // fits into Smi range.
3087 //
3089 right_range(), 64 - compiler::target::kSmiBits - 1)) {
3090 __ cmpl(right, compiler::Immediate(64 - compiler::target::kSmiBits));
3091 compiler::Label shift_less_34;
3092 __ j(LESS, &shift_less_34, compiler::Assembler::kNearJump);
3094 kBitsPerInt64 - 1)) {
3095 __ cmpl(right, compiler::Immediate(kBitsPerInt64));
3096 compiler::Label shift_less_64;
3097 __ j(LESS, &shift_less_64, compiler::Assembler::kNearJump);
3098 // Shift amount >= 64. Result is 0.
3099 __ xorl(left, left);
3101 __ Bind(&shift_less_64);
3102 }
3103 // Shift amount >= 64 - kSmiBits > 32, but < 64.
3104 // Result is guaranteed to fit into Smi range.
3105 // Low (Smi) part of the left operand is shifted out.
3106 // High part is filled with sign bits.
3107 ASSERT(right == ECX); // Count must be in ECX
3108 __ subl(right, compiler::Immediate(32));
3109 __ sarl(left, compiler::Immediate(31));
3110 __ shrl(left, right);
3111 __ SmiTag(left);
3113 __ Bind(&shift_less_34);
3114 }
3115 // Shift amount < 64 - kSmiBits.
3116 // If left is negative, then result will not fit into Smi range.
3117 // Also deopt in case of negative shift amount.
3118 if (deopt != nullptr) {
3119 __ testl(left, left);
3120 __ j(LESS, deopt);
3121 __ testl(right, right);
3122 __ j(LESS, deopt);
3123 } else {
3124 ASSERT(!can_overflow());
3125 }
3126 // At this point left operand is non-negative, so unsigned shift
3127 // can't overflow.
3129 compiler::target::kSmiBits - 1)) {
3130 __ cmpl(right, compiler::Immediate(compiler::target::kSmiBits));
3131 compiler::Label shift_less_30;
3132 __ j(LESS, &shift_less_30, compiler::Assembler::kNearJump);
3133 // Left operand >= 0, shift amount >= kSmiBits. Result is 0.
3134 __ xorl(left, left);
3136 __ Bind(&shift_less_30);
3137 }
3138 // Left operand >= 0, shift amount < kSmiBits < 32.
3139 ASSERT(right == ECX); // Count must be in ECX
3140 __ SmiUntag(left);
3141 __ shrl(left, right);
3142 __ SmiTag(left);
3143 __ Bind(&done);
3144 break;
3145 }
3146 case Token::kDIV: {
3147 // Dispatches to 'Double./'.
3148 // TODO(srdjan): Implement as conversion to double and double division.
3149 UNREACHABLE();
3150 break;
3151 }
3152 case Token::kOR:
3153 case Token::kAND: {
3154 // Flow graph builder has dissected this operation to guarantee correct
3155 // behavior (short-circuit evaluation).
3156 UNREACHABLE();
3157 break;
3158 }
3159 default:
3160 UNREACHABLE();
3161 break;
3162 }
3163}
3164
3165LocationSummary* BinaryInt32OpInstr::MakeLocationSummary(Zone* zone,
3166 bool opt) const {
3167 const intptr_t kNumInputs = 2;
3168 if (op_kind() == Token::kTRUNCDIV) {
3169 UNREACHABLE();
3170 return nullptr;
3171 } else if (op_kind() == Token::kMOD) {
3172 UNREACHABLE();
3173 return nullptr;
3174 } else if ((op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR)) {
3175 const intptr_t kNumTemps = 0;
3176 LocationSummary* summary = new (zone)
3177 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3178 summary->set_in(0, Location::RequiresRegister());
3179 summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX));
3180 summary->set_out(0, Location::SameAsFirstInput());
3181 return summary;
3182 } else if (op_kind() == Token::kSHL) {
3183 const intptr_t kNumTemps = can_overflow() ? 1 : 0;
3184 LocationSummary* summary = new (zone)
3185 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3186 summary->set_in(0, Location::RequiresRegister());
3187 summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX));
3188 if (can_overflow()) {
3189 summary->set_temp(0, Location::RequiresRegister());
3190 }
3191 summary->set_out(0, Location::SameAsFirstInput());
3192 return summary;
3193 } else {
3194 const intptr_t kNumTemps = 0;
3195 LocationSummary* summary = new (zone)
3196 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3197 summary->set_in(0, Location::RequiresRegister());
3198 ConstantInstr* constant = right()->definition()->AsConstant();
3199 if (constant != nullptr) {
3200 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
3201 } else {
3202 summary->set_in(1, Location::PrefersRegister());
3203 }
3204 summary->set_out(0, Location::SameAsFirstInput());
3205 return summary;
3206 }
3207}
3208
3209static void EmitInt32ShiftLeft(FlowGraphCompiler* compiler,
3210 BinaryInt32OpInstr* shift_left) {
3211 const LocationSummary& locs = *shift_left->locs();
3212 Register left = locs.in(0).reg();
3213 Register result = locs.out(0).reg();
3214 ASSERT(left == result);
3215 compiler::Label* deopt =
3216 shift_left->CanDeoptimize()
3217 ? compiler->AddDeoptStub(shift_left->deopt_id(),
3218 ICData::kDeoptBinarySmiOp)
3219 : nullptr;
3220 ASSERT(locs.in(1).IsConstant());
3221
3222 const Object& constant = locs.in(1).constant();
3223 ASSERT(constant.IsSmi());
3224 // shll operation masks the count to 5 bits.
3225 const intptr_t kCountLimit = 0x1F;
3226 const intptr_t value = Smi::Cast(constant).Value();
3227 ASSERT((0 < value) && (value < kCountLimit));
3228 if (shift_left->can_overflow()) {
3229 // Check for overflow.
3230 Register temp = locs.temp(0).reg();
3231 __ movl(temp, left);
3232 __ shll(left, compiler::Immediate(value));
3233 __ sarl(left, compiler::Immediate(value));
3234 __ cmpl(left, temp);
3235 __ j(NOT_EQUAL, deopt); // Overflow.
3236 }
3237 // Shift for result now we know there is no overflow.
3238 __ shll(left, compiler::Immediate(value));
3239}
3240
3241void BinaryInt32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3242 if (op_kind() == Token::kSHL) {
3243 EmitInt32ShiftLeft(compiler, this);
3244 return;
3245 }
3246
3247 Register left = locs()->in(0).reg();
3248 Register result = locs()->out(0).reg();
3249 ASSERT(left == result);
3250 compiler::Label* deopt = nullptr;
3251 if (CanDeoptimize()) {
3252 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
3253 }
3254
3255 if (locs()->in(1).IsConstant()) {
3256 const Object& constant = locs()->in(1).constant();
3257 ASSERT(constant.IsSmi());
3258 const intptr_t value = Smi::Cast(constant).Value();
3259 switch (op_kind()) {
3260 case Token::kADD:
3261 case Token::kSUB:
3262 case Token::kMUL:
3263 case Token::kBIT_AND:
3264 case Token::kBIT_OR:
3265 case Token::kBIT_XOR:
3266 EmitIntegerArithmetic(compiler, op_kind(), left,
3267 compiler::Immediate(value), deopt);
3268 break;
3269
3270 case Token::kTRUNCDIV: {
3271 UNREACHABLE();
3272 break;
3273 }
3274
3275 case Token::kSHR: {
3276 // sarl operation masks the count to 5 bits.
3277 const intptr_t kCountLimit = 0x1F;
3278 __ sarl(left, compiler::Immediate(Utils::Minimum(value, kCountLimit)));
3279 break;
3280 }
3281
3282 case Token::kUSHR: {
3283 ASSERT((value > 0) && (value < 64));
3284 // 64-bit representation of left operand value:
3285 //
3286 // ss...sssss s xxxxxxxxxxxxx
3287 // | | | | |
3288 // 63 32 31 30 0
3289 //
3290 // Where 's' is a sign bit.
3291 //
3292 // If left operand is negative (sign bit is set), then
3293 // result will fit into Int32 range if and only if
3294 // the shift amount > 32.
3295 //
3296 if (value <= 32) {
3297 if (deopt != nullptr) {
3298 __ testl(left, left);
3299 __ j(LESS, deopt);
3300 } else {
3301 // Operation cannot overflow only if left value is always
3302 // non-negative.
3303 ASSERT(!can_overflow());
3304 }
3305 // At this point left operand is non-negative, so unsigned shift
3306 // can't overflow.
3307 if (value == 32) {
3308 __ xorl(left, left);
3309 } else {
3310 __ shrl(left, compiler::Immediate(value));
3311 }
3312 } else {
3313 // Shift amount > 32.
3314 // Low (Int32) part of the left operand is shifted out.
3315 // Shift high part which is filled with sign bits.
3316 __ sarl(left, compiler::Immediate(31));
3317 __ shrl(left, compiler::Immediate(value - 32));
3318 }
3319 break;
3320 }
3321
3322 default:
3323 UNREACHABLE();
3324 break;
3325 }
3326 return;
3327 } // if locs()->in(1).IsConstant()
3328
3329 if (locs()->in(1).IsStackSlot()) {
3330 const compiler::Address& right = LocationToStackSlotAddress(locs()->in(1));
3331 EmitIntegerArithmetic(compiler, op_kind(), left, right, deopt);
3332 return;
3333 } // if locs()->in(1).IsStackSlot.
3334
3335 // if locs()->in(1).IsRegister.
3336 Register right = locs()->in(1).reg();
3337 switch (op_kind()) {
3338 case Token::kADD:
3339 case Token::kSUB:
3340 case Token::kMUL:
3341 case Token::kBIT_AND:
3342 case Token::kBIT_OR:
3343 case Token::kBIT_XOR:
3344 EmitIntegerArithmetic(compiler, op_kind(), left, right, deopt);
3345 break;
3346
3347 default:
3348 UNREACHABLE();
3349 break;
3350 }
3351}
3352
3353LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone,
3354 bool opt) const {
3355 const intptr_t kNumInputs = 2;
3356 const intptr_t kNumTemps = (op_kind() == Token::kMUL) ? 1 : 0;
3357 LocationSummary* summary = new (zone)
3358 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3359 if (op_kind() == Token::kMUL) {
3360 summary->set_in(0, Location::RegisterLocation(EAX));
3361 summary->set_temp(0, Location::RegisterLocation(EDX));
3362 } else {
3363 summary->set_in(0, Location::RequiresRegister());
3364 }
3365 summary->set_in(1, Location::RequiresRegister());
3366 summary->set_out(0, Location::SameAsFirstInput());
3367 return summary;
3368}
3369
3370void BinaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3371 Register left = locs()->in(0).reg();
3372 Register right = locs()->in(1).reg();
3373 Register out = locs()->out(0).reg();
3374 ASSERT(out == left);
3375 switch (op_kind()) {
3376 case Token::kBIT_AND:
3377 case Token::kBIT_OR:
3378 case Token::kBIT_XOR:
3379 case Token::kADD:
3380 case Token::kSUB:
3381 EmitIntegerArithmetic(compiler, op_kind(), left, right, nullptr);
3382 return;
3383
3384 case Token::kMUL:
3385 __ mull(right); // Result in EDX:EAX.
3386 ASSERT(out == EAX);
3387 ASSERT(locs()->temp(0).reg() == EDX);
3388 break;
3389 default:
3390 UNREACHABLE();
3391 }
3392}
3393
3394LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone,
3395 bool opt) const {
3396 intptr_t left_cid = left()->Type()->ToCid();
3397 intptr_t right_cid = right()->Type()->ToCid();
3398 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid));
3399 const intptr_t kNumInputs = 2;
3400 const bool need_temp = (left()->definition() != right()->definition()) &&
3401 (left_cid != kSmiCid) && (right_cid != kSmiCid);
3402 const intptr_t kNumTemps = need_temp ? 1 : 0;
3403 LocationSummary* summary = new (zone)
3404 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3405 summary->set_in(0, Location::RequiresRegister());
3406 summary->set_in(1, Location::RequiresRegister());
3407 if (need_temp) summary->set_temp(0, Location::RequiresRegister());
3408 return summary;
3409}
3410
3411void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3412 compiler::Label* deopt =
3413 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryDoubleOp);
3414 intptr_t left_cid = left()->Type()->ToCid();
3415 intptr_t right_cid = right()->Type()->ToCid();
3416 Register left = locs()->in(0).reg();
3417 Register right = locs()->in(1).reg();
3418 if (this->left()->definition() == this->right()->definition()) {
3419 __ testl(left, compiler::Immediate(kSmiTagMask));
3420 } else if (left_cid == kSmiCid) {
3421 __ testl(right, compiler::Immediate(kSmiTagMask));
3422 } else if (right_cid == kSmiCid) {
3423 __ testl(left, compiler::Immediate(kSmiTagMask));
3424 } else {
3425 Register temp = locs()->temp(0).reg();
3426 __ movl(temp, left);
3427 __ orl(temp, right);
3428 __ testl(temp, compiler::Immediate(kSmiTagMask));
3429 }
3430 __ j(ZERO, deopt);
3431}
3432
3433LocationSummary* BoxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
3434 const intptr_t kNumInputs = 1;
3435 const intptr_t kNumTemps = 1;
3436 LocationSummary* summary = new (zone) LocationSummary(
3437 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
3438 summary->set_in(0, Location::RequiresFpuRegister());
3439 summary->set_temp(0, Location::RequiresRegister());
3440 summary->set_out(0, Location::RequiresRegister());
3441 return summary;
3442}
3443
3444void BoxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3445 Register out_reg = locs()->out(0).reg();
3446 XmmRegister value = locs()->in(0).fpu_reg();
3447
3449 compiler->BoxClassFor(from_representation()),
3450 out_reg, locs()->temp(0).reg());
3451
3452 switch (from_representation()) {
3453 case kUnboxedDouble:
3454 __ movsd(compiler::FieldAddress(out_reg, ValueOffset()), value);
3455 break;
3456 case kUnboxedFloat:
3457 __ cvtss2sd(FpuTMP, value);
3458 __ movsd(compiler::FieldAddress(out_reg, ValueOffset()), FpuTMP);
3459 break;
3460 case kUnboxedFloat32x4:
3461 case kUnboxedFloat64x2:
3462 case kUnboxedInt32x4:
3463 __ movups(compiler::FieldAddress(out_reg, ValueOffset()), value);
3464 break;
3465 default:
3466 UNREACHABLE();
3467 break;
3468 }
3469}
3470
3471LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
3472 ASSERT(BoxCid() != kSmiCid);
3473 const bool needs_temp =
3474 CanDeoptimize() ||
3475 (CanConvertSmi() && (value()->Type()->ToCid() == kSmiCid));
3476
3477 const intptr_t kNumInputs = 1;
3478 const intptr_t kNumTemps = needs_temp ? 1 : 0;
3479 LocationSummary* summary = new (zone)
3480 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3481 summary->set_in(0, Location::RequiresRegister());
3482 if (needs_temp) {
3483 summary->set_temp(0, Location::RequiresRegister());
3484 }
3485 if (representation() == kUnboxedInt64) {
3486 summary->set_out(0, Location::Pair(Location::RegisterLocation(EAX),
3488 } else if (representation() == kUnboxedInt32) {
3489 summary->set_out(0, Location::SameAsFirstInput());
3490 } else {
3491 summary->set_out(0, Location::RequiresFpuRegister());
3492 }
3493 return summary;
3494}
3495
3496void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler* compiler) {
3497 const Register box = locs()->in(0).reg();
3498
3499 switch (representation()) {
3500 case kUnboxedInt64: {
3501 PairLocation* result = locs()->out(0).AsPairLocation();
3502 ASSERT(result->At(0).reg() != box);
3503 __ movl(result->At(0).reg(), compiler::FieldAddress(box, ValueOffset()));
3504 __ movl(result->At(1).reg(),
3505 compiler::FieldAddress(box, ValueOffset() + kWordSize));
3506 break;
3507 }
3508
3509 case kUnboxedDouble: {
3510 const FpuRegister result = locs()->out(0).fpu_reg();
3511 __ movsd(result, compiler::FieldAddress(box, ValueOffset()));
3512 break;
3513 }
3514
3515 case kUnboxedFloat: {
3516 const FpuRegister result = locs()->out(0).fpu_reg();
3517 __ movsd(result, compiler::FieldAddress(box, ValueOffset()));
3518 __ cvtsd2ss(result, result);
3519 break;
3520 }
3521
3522 case kUnboxedFloat32x4:
3523 case kUnboxedFloat64x2:
3524 case kUnboxedInt32x4: {
3525 const FpuRegister result = locs()->out(0).fpu_reg();
3526 __ movups(result, compiler::FieldAddress(box, ValueOffset()));
3527 break;
3528 }
3529
3530 default:
3531 UNREACHABLE();
3532 break;
3533 }
3534}
3535
3536void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) {
3537 const Register box = locs()->in(0).reg();
3538
3539 switch (representation()) {
3540 case kUnboxedInt64: {
3541 PairLocation* result = locs()->out(0).AsPairLocation();
3542 ASSERT(result->At(0).reg() == EAX);
3543 ASSERT(result->At(1).reg() == EDX);
3544 __ movl(EAX, box);
3545 __ SmiUntag(EAX);
3546 __ cdq();
3547 break;
3548 }
3549
3550 case kUnboxedDouble: {
3551 const Register temp = locs()->temp(0).reg();
3552 const FpuRegister result = locs()->out(0).fpu_reg();
3553 __ movl(temp, box);
3554 __ SmiUntag(temp);
3555 __ cvtsi2sd(result, temp);
3556 break;
3557 }
3558
3559 default:
3560 UNREACHABLE();
3561 break;
3562 }
3563}
3564
3565void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) {
3566 const Register value = locs()->in(0).reg();
3567 const Register result = locs()->out(0).reg();
3568 __ LoadInt32FromBoxOrSmi(result, value);
3569}
3570
3571void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler) {
3572 const Register box = locs()->in(0).reg();
3573 PairLocation* result = locs()->out(0).AsPairLocation();
3574 ASSERT(result->At(0).reg() != box);
3575 ASSERT(result->At(1).reg() != box);
3576 compiler::Label done;
3577 EmitSmiConversion(compiler); // Leaves CF after SmiUntag.
3579 EmitLoadFromBox(compiler);
3580 __ Bind(&done);
3581}
3582
3583LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
3584 bool opt) const {
3585 const intptr_t kNumInputs = 1;
3586 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
3587 if (ValueFitsSmi()) {
3588 LocationSummary* summary = new (zone)
3589 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3590 // Same regs, can overwrite input.
3591 summary->set_in(0, Location::RequiresRegister());
3592 summary->set_out(0, Location::SameAsFirstInput());
3593 return summary;
3594 } else {
3595 LocationSummary* summary = new (zone) LocationSummary(
3596 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
3597 // Guaranteed different regs. In the signed case we are going to use the
3598 // input for sign extension of any Mint.
3599 const bool needs_writable_input = (from_representation() == kUnboxedInt32);
3600 summary->set_in(0, needs_writable_input ? Location::WritableRegister()
3602 summary->set_temp(0, Location::RequiresRegister());
3603 summary->set_out(0, Location::RequiresRegister());
3604 return summary;
3605 }
3606}
3607
3608void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3609 const Register value = locs()->in(0).reg();
3610 const Register out = locs()->out(0).reg();
3611
3612 if (ValueFitsSmi()) {
3613 ASSERT(value == out);
3614 ASSERT(kSmiTag == 0);
3615 __ shll(out, compiler::Immediate(kSmiTagSize));
3616 return;
3617 }
3618
3619 __ movl(out, value);
3620 __ shll(out, compiler::Immediate(kSmiTagSize));
3621 compiler::Label done;
3622 if (from_representation() == kUnboxedInt32) {
3623 __ j(NO_OVERFLOW, &done);
3624 } else {
3625 ASSERT(value != out); // Value was not overwritten.
3626 __ testl(value, compiler::Immediate(0xC0000000));
3627 __ j(ZERO, &done);
3628 }
3629
3630 // Allocate a Mint.
3631 if (from_representation() == kUnboxedInt32) {
3632 // Value input is a writable register and should be manually preserved
3633 // across allocation slow-path. Add it to live_registers set which
3634 // determines which registers to preserve.
3635 locs()->live_registers()->Add(locs()->in(0), kUnboxedInt32);
3636 }
3637 ASSERT(value != out); // We need the value after the allocation.
3638 BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
3639 locs()->temp(0).reg());
3640 __ movl(compiler::FieldAddress(out, Mint::value_offset()), value);
3641 if (from_representation() == kUnboxedInt32) {
3642 // In the signed may-overflow case we asked for the input (value) to be
3643 // writable so we can use it as a temp to put the sign extension bits in.
3644 __ sarl(value, compiler::Immediate(31)); // Sign extend the Mint.
3645 __ movl(compiler::FieldAddress(out, Mint::value_offset() + kWordSize),
3646 value);
3647 } else {
3648 __ movl(compiler::FieldAddress(out, Mint::value_offset() + kWordSize),
3649 compiler::Immediate(0)); // Zero extend the Mint.
3650 }
3651 __ Bind(&done);
3652}
3653
3654LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
3655 bool opt) const {
3656 const intptr_t kNumInputs = 1;
3657 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
3658 LocationSummary* summary = new (zone)
3659 LocationSummary(zone, kNumInputs, kNumTemps,
3662 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
3664 if (!ValueFitsSmi()) {
3665 summary->set_temp(0, Location::RequiresRegister());
3666 }
3667 summary->set_out(0, Location::RequiresRegister());
3668 return summary;
3669}
3670
3671void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3672 if (ValueFitsSmi()) {
3673 PairLocation* value_pair = locs()->in(0).AsPairLocation();
3674 Register value_lo = value_pair->At(0).reg();
3675 Register out_reg = locs()->out(0).reg();
3676 __ movl(out_reg, value_lo);
3677 __ SmiTag(out_reg);
3678 return;
3679 }
3680
3681 PairLocation* value_pair = locs()->in(0).AsPairLocation();
3682 Register value_lo = value_pair->At(0).reg();
3683 Register value_hi = value_pair->At(1).reg();
3684 Register out_reg = locs()->out(0).reg();
3685
3686 // Copy value_hi into out_reg as a temporary.
3687 // We modify value_lo but restore it before using it.
3688 __ movl(out_reg, value_hi);
3689
3690 // Unboxed operations produce smis or mint-sized values.
3691 // Check if value fits into a smi.
3692 compiler::Label not_smi, done;
3693
3694 // 1. Compute (x + -kMinSmi) which has to be in the range
3695 // 0 .. -kMinSmi+kMaxSmi for x to fit into a smi.
3696 __ addl(value_lo, compiler::Immediate(0x40000000));
3697 __ adcl(out_reg, compiler::Immediate(0));
3698 // 2. Unsigned compare to -kMinSmi+kMaxSmi.
3699 __ cmpl(value_lo, compiler::Immediate(0x80000000));
3700 __ sbbl(out_reg, compiler::Immediate(0));
3701 __ j(ABOVE_EQUAL, &not_smi);
3702 // 3. Restore lower half if result is a smi.
3703 __ subl(value_lo, compiler::Immediate(0x40000000));
3704 __ movl(out_reg, value_lo);
3705 __ SmiTag(out_reg);
3706 __ jmp(&done);
3707 __ Bind(&not_smi);
3708 // 3. Restore lower half of input before using it.
3709 __ subl(value_lo, compiler::Immediate(0x40000000));
3710
3712 out_reg, locs()->temp(0).reg());
3713 __ movl(compiler::FieldAddress(out_reg, Mint::value_offset()), value_lo);
3714 __ movl(compiler::FieldAddress(out_reg, Mint::value_offset() + kWordSize),
3715 value_hi);
3716 __ Bind(&done);
3717}
3718
3719LocationSummary* UnboxInteger32Instr::MakeLocationSummary(Zone* zone,
3720 bool opt) const {
3721 const intptr_t value_cid = value()->Type()->ToCid();
3722 const intptr_t kNumInputs = 1;
3723 intptr_t kNumTemps = 0;
3724
3725 if (CanDeoptimize()) {
3726 if ((value_cid != kSmiCid) && (value_cid != kMintCid) && !is_truncating()) {
3727 kNumTemps = 2;
3728 } else {
3729 kNumTemps = 1;
3730 }
3731 }
3732
3733 LocationSummary* summary = new (zone)
3734 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3735 summary->set_in(0, Location::RequiresRegister());
3736 for (int i = 0; i < kNumTemps; i++) {
3737 summary->set_temp(i, Location::RequiresRegister());
3738 }
3739 summary->set_out(0, ((value_cid == kSmiCid) || (value_cid != kMintCid))
3741 : Location::RequiresRegister());
3742 return summary;
3743}
3744
3745static void LoadInt32FromMint(FlowGraphCompiler* compiler,
3747 const compiler::Address& lo,
3748 const compiler::Address& hi,
3749 Register temp,
3750 compiler::Label* deopt) {
3751 __ movl(result, lo);
3752 if (deopt != nullptr) {
3753 ASSERT(temp != result);
3754 __ movl(temp, result);
3755 __ sarl(temp, compiler::Immediate(31));
3756 __ cmpl(temp, hi);
3757 __ j(NOT_EQUAL, deopt);
3758 }
3759}
3760
3761void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3762 const intptr_t value_cid = value()->Type()->ToCid();
3763 Register value = locs()->in(0).reg();
3764 const Register result = locs()->out(0).reg();
3765 const Register temp = CanDeoptimize() ? locs()->temp(0).reg() : kNoRegister;
3766 compiler::Label* deopt = nullptr;
3767 if (CanDeoptimize()) {
3768 deopt = compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger);
3769 }
3770 compiler::Label* out_of_range = !is_truncating() ? deopt : nullptr;
3771
3772 const intptr_t lo_offset = Mint::value_offset();
3773 const intptr_t hi_offset = Mint::value_offset() + kWordSize;
3774
3775 if (value_cid == kSmiCid) {
3776 ASSERT(value == result);
3777 __ SmiUntag(value);
3778 } else if (value_cid == kMintCid) {
3779 ASSERT((value != result) || (out_of_range == nullptr));
3780 LoadInt32FromMint(
3781 compiler, result, compiler::FieldAddress(value, lo_offset),
3782 compiler::FieldAddress(value, hi_offset), temp, out_of_range);
3783 } else if (!CanDeoptimize()) {
3784 ASSERT(value == result);
3785 compiler::Label done;
3786 __ SmiUntag(value);
3787 __ j(NOT_CARRY, &done);
3788 __ movl(value, compiler::Address(value, TIMES_2, lo_offset));
3789 __ Bind(&done);
3790 } else {
3791 ASSERT(value == result);
3792 compiler::Label done;
3793 __ SmiUntagOrCheckClass(value, kMintCid, temp, &done);
3794 __ j(NOT_EQUAL, deopt);
3795 if (out_of_range != nullptr) {
3796 Register value_temp = locs()->temp(1).reg();
3797 __ movl(value_temp, value);
3798 value = value_temp;
3799 }
3800 LoadInt32FromMint(
3801 compiler, result, compiler::Address(value, TIMES_2, lo_offset),
3802 compiler::Address(value, TIMES_2, hi_offset), temp, out_of_range);
3803 __ Bind(&done);
3804 }
3805}
3806
3807LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone,
3808 bool opt) const {
3809 const bool might_box = (representation() == kTagged) && !can_pack_into_smi();
3810 const intptr_t kNumInputs = 2;
3811 const intptr_t kNumTemps = might_box ? 2 : 0;
3812 LocationSummary* summary = new (zone) LocationSummary(
3813 zone, kNumInputs, kNumTemps,
3814 might_box ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
3815 summary->set_in(0, Location::RequiresRegister());
3816 // The smi index is either untagged (element size == 1), or it is left smi
3817 // tagged (for all element sizes > 1).
3818 summary->set_in(1, (index_scale() == 1) ? Location::WritableRegister()
3819 : Location::RequiresRegister());
3820 if (might_box) {
3821 summary->set_temp(0, Location::RequiresRegister());
3822 summary->set_temp(1, Location::RequiresRegister());
3823 }
3824
3825 if (representation() == kUnboxedInt64) {
3826 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
3828 } else {
3829 ASSERT(representation() == kTagged);
3830 summary->set_out(0, Location::RequiresRegister());
3831 }
3832
3833 return summary;
3834}
3835
3836void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3837 // The string register points to the backing store for external strings.
3838 const Register str = locs()->in(0).reg();
3839 const Location index = locs()->in(1);
3840
3841 bool index_unboxed = false;
3842 if ((index_scale() == 1)) {
3843 __ SmiUntag(index.reg());
3844 index_unboxed = true;
3845 }
3846 compiler::Address element_address =
3848 IsExternal(), class_id(), index_scale(), index_unboxed, str,
3849 index.reg());
3850
3851 if (representation() == kUnboxedInt64) {
3852 ASSERT(compiler->is_optimizing());
3853 ASSERT(locs()->out(0).IsPairLocation());
3854 PairLocation* result_pair = locs()->out(0).AsPairLocation();
3855 Register result1 = result_pair->At(0).reg();
3856 Register result2 = result_pair->At(1).reg();
3857
3858 switch (class_id()) {
3859 case kOneByteStringCid:
3860 ASSERT(element_count() == 4);
3861 __ movl(result1, element_address);
3862 __ xorl(result2, result2);
3863 break;
3864 case kTwoByteStringCid:
3865 ASSERT(element_count() == 2);
3866 __ movl(result1, element_address);
3867 __ xorl(result2, result2);
3868 break;
3869 default:
3870 UNREACHABLE();
3871 }
3872 } else {
3873 ASSERT(representation() == kTagged);
3874 Register result = locs()->out(0).reg();
3875 switch (class_id()) {
3876 case kOneByteStringCid:
3877 switch (element_count()) {
3878 case 1:
3879 __ movzxb(result, element_address);
3880 break;
3881 case 2:
3882 __ movzxw(result, element_address);
3883 break;
3884 case 4:
3885 __ movl(result, element_address);
3886 break;
3887 default:
3888 UNREACHABLE();
3889 }
3890 break;
3891 case kTwoByteStringCid:
3892 switch (element_count()) {
3893 case 1:
3894 __ movzxw(result, element_address);
3895 break;
3896 case 2:
3897 __ movl(result, element_address);
3898 break;
3899 default:
3900 UNREACHABLE();
3901 }
3902 break;
3903 default:
3904 UNREACHABLE();
3905 break;
3906 }
3907 if (can_pack_into_smi()) {
3908 __ SmiTag(result);
3909 } else {
3910 // If the value cannot fit in a smi then allocate a mint box for it.
3911 Register temp = locs()->temp(0).reg();
3912 Register temp2 = locs()->temp(1).reg();
3913 // Temp register needs to be manually preserved on allocation slow-path.
3914 // Add it to live_registers set which determines which registers to
3915 // preserve.
3916 locs()->live_registers()->Add(locs()->temp(0), kUnboxedInt32);
3917
3918 ASSERT(temp != result);
3919 __ MoveRegister(temp, result);
3920 __ SmiTag(result);
3921
3922 compiler::Label done;
3923 __ testl(temp, compiler::Immediate(0xC0000000));
3924 __ j(ZERO, &done);
3926 result, temp2);
3927 __ movl(compiler::FieldAddress(result, Mint::value_offset()), temp);
3928 __ movl(compiler::FieldAddress(result, Mint::value_offset() + kWordSize),
3929 compiler::Immediate(0));
3930 __ Bind(&done);
3931 }
3932 }
3933}
3934
3935LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
3936 bool opt) const {
3937 const intptr_t kNumInputs = 2;
3938 const intptr_t kNumTemps = 0;
3939 LocationSummary* summary = new (zone)
3940 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3941 summary->set_in(0, Location::RequiresFpuRegister());
3942 summary->set_in(1, Location::RequiresFpuRegister());
3943 summary->set_out(0, Location::SameAsFirstInput());
3944 return summary;
3945}
3946
3947void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3948 XmmRegister left = locs()->in(0).fpu_reg();
3949 XmmRegister right = locs()->in(1).fpu_reg();
3950
3951 ASSERT(locs()->out(0).fpu_reg() == left);
3952
3953 switch (op_kind()) {
3954 case Token::kADD:
3955 __ addsd(left, right);
3956 break;
3957 case Token::kSUB:
3958 __ subsd(left, right);
3959 break;
3960 case Token::kMUL:
3961 __ mulsd(left, right);
3962 break;
3963 case Token::kDIV:
3964 __ divsd(left, right);
3965 break;
3966 default:
3967 UNREACHABLE();
3968 }
3969}
3970
3971LocationSummary* DoubleTestOpInstr::MakeLocationSummary(Zone* zone,
3972 bool opt) const {
3973 const intptr_t kNumInputs = 1;
3974 const intptr_t kNumTemps =
3975 op_kind() == MethodRecognizer::kDouble_getIsNegative
3976 ? 2
3977 : (op_kind() == MethodRecognizer::kDouble_getIsInfinite ? 1 : 0);
3978 LocationSummary* summary = new (zone)
3979 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3980 summary->set_in(0, Location::RequiresFpuRegister());
3981 if (kNumTemps > 0) {
3982 summary->set_temp(0, Location::RequiresRegister());
3983 if (op_kind() == MethodRecognizer::kDouble_getIsNegative) {
3984 summary->set_temp(1, Location::RequiresFpuRegister());
3985 }
3986 }
3987 summary->set_out(0, Location::RequiresRegister());
3988 return summary;
3989}
3990
3991Condition DoubleTestOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
3992 BranchLabels labels) {
3993 ASSERT(compiler->is_optimizing());
3994 const XmmRegister value = locs()->in(0).fpu_reg();
3995 const bool is_negated = kind() != Token::kEQ;
3996
3997 switch (op_kind()) {
3998 case MethodRecognizer::kDouble_getIsNaN: {
3999 __ comisd(value, value);
4000 return is_negated ? PARITY_ODD : PARITY_EVEN;
4001 }
4002 case MethodRecognizer::kDouble_getIsInfinite: {
4003 const Register temp = locs()->temp(0).reg();
4004 compiler::Label check_upper;
4005 __ AddImmediate(ESP, compiler::Immediate(-kDoubleSize));
4006 __ movsd(compiler::Address(ESP, 0), value);
4007 __ movl(temp, compiler::Address(ESP, 0));
4008 // If the low word isn't zero, then it isn't infinity.
4009 __ cmpl(temp, compiler::Immediate(0));
4010 __ j(EQUAL, &check_upper, compiler::Assembler::kNearJump);
4011 __ AddImmediate(ESP, compiler::Immediate(kDoubleSize));
4012 __ jmp(is_negated ? labels.true_label : labels.false_label);
4013 __ Bind(&check_upper);
4014 // Check the high word.
4015 __ movl(temp, compiler::Address(ESP, kWordSize));
4016 __ AddImmediate(ESP, compiler::Immediate(kDoubleSize));
4017 // Mask off sign bit.
4018 __ andl(temp, compiler::Immediate(0x7FFFFFFF));
4019 // Compare with +infinity.
4020 __ cmpl(temp, compiler::Immediate(0x7FF00000));
4021 return is_negated ? NOT_EQUAL : EQUAL;
4022 }
4023 case MethodRecognizer::kDouble_getIsNegative: {
4024 const Register temp = locs()->temp(0).reg();
4025 const FpuRegister temp_fpu = locs()->temp(1).fpu_reg();
4026 compiler::Label not_zero;
4027 __ xorpd(temp_fpu, temp_fpu);
4028 __ comisd(value, temp_fpu);
4029 // If it's NaN, it's not negative.
4030 __ j(PARITY_EVEN, is_negated ? labels.true_label : labels.false_label);
4031 // Looking at the sign bit also takes care of signed zero.
4032 __ movmskpd(temp, value);
4033 __ testl(temp, compiler::Immediate(1));
4034 return is_negated ? EQUAL : NOT_EQUAL;
4035 }
4036 default:
4037 UNREACHABLE();
4038 }
4039}
4040
4041// SIMD
4042
4043#define DEFINE_EMIT(Name, Args) \
4044 static void Emit##Name(FlowGraphCompiler* compiler, SimdOpInstr* instr, \
4045 PP_APPLY(PP_UNPACK, Args))
4046
4047#define SIMD_OP_FLOAT_ARITH(V, Name, op) \
4048 V(Float32x4##Name, op##ps) \
4049 V(Float64x2##Name, op##pd)
4050
4051#define SIMD_OP_SIMPLE_BINARY(V) \
4052 SIMD_OP_FLOAT_ARITH(V, Add, add) \
4053 SIMD_OP_FLOAT_ARITH(V, Sub, sub) \
4054 SIMD_OP_FLOAT_ARITH(V, Mul, mul) \
4055 SIMD_OP_FLOAT_ARITH(V, Div, div) \
4056 SIMD_OP_FLOAT_ARITH(V, Min, min) \
4057 SIMD_OP_FLOAT_ARITH(V, Max, max) \
4058 V(Int32x4Add, addpl) \
4059 V(Int32x4Sub, subpl) \
4060 V(Int32x4BitAnd, andps) \
4061 V(Int32x4BitOr, orps) \
4062 V(Int32x4BitXor, xorps) \
4063 V(Float32x4Equal, cmppseq) \
4064 V(Float32x4NotEqual, cmppsneq) \
4065 V(Float32x4LessThan, cmppslt) \
4066 V(Float32x4LessThanOrEqual, cmppsle)
4067
4068DEFINE_EMIT(SimdBinaryOp,
4069 (SameAsFirstInput, XmmRegister left, XmmRegister right)) {
4070 switch (instr->kind()) {
4071#define EMIT(Name, op) \
4072 case SimdOpInstr::k##Name: \
4073 __ op(left, right); \
4074 break;
4075 SIMD_OP_SIMPLE_BINARY(EMIT)
4076#undef EMIT
4077 case SimdOpInstr::kFloat32x4Scale:
4078 __ cvtsd2ss(left, left);
4079 __ shufps(left, left, compiler::Immediate(0x00));
4080 __ mulps(left, right);
4081 break;
4082 case SimdOpInstr::kFloat32x4ShuffleMix:
4083 case SimdOpInstr::kInt32x4ShuffleMix:
4084 __ shufps(left, right, compiler::Immediate(instr->mask()));
4085 break;
4086 case SimdOpInstr::kFloat64x2FromDoubles:
4087 // shufpd mask 0x0 results in:
4088 // Lower 64-bits of left = Lower 64-bits of left.
4089 // Upper 64-bits of left = Lower 64-bits of right.
4090 __ shufpd(left, right, compiler::Immediate(0x0));
4091 break;
4092 case SimdOpInstr::kFloat64x2Scale:
4093 __ shufpd(right, right, compiler::Immediate(0x00));
4094 __ mulpd(left, right);
4095 break;
4096 case SimdOpInstr::kFloat64x2WithX:
4097 case SimdOpInstr::kFloat64x2WithY: {
4098 // TODO(dartbug.com/30949) avoid transfer through memory
4099 COMPILE_ASSERT(SimdOpInstr::kFloat64x2WithY ==
4100 (SimdOpInstr::kFloat64x2WithX + 1));
4101 const intptr_t lane_index = instr->kind() - SimdOpInstr::kFloat64x2WithX;
4102 ASSERT(0 <= lane_index && lane_index < 2);
4103 __ SubImmediate(ESP, compiler::Immediate(kSimd128Size));
4104 __ movups(compiler::Address(ESP, 0), left);
4105 __ movsd(compiler::Address(ESP, lane_index * kDoubleSize), right);
4106 __ movups(left, compiler::Address(ESP, 0));
4107 __ AddImmediate(ESP, compiler::Immediate(kSimd128Size));
4108 break;
4109 }
4110 case SimdOpInstr::kFloat32x4WithX:
4111 case SimdOpInstr::kFloat32x4WithY:
4112 case SimdOpInstr::kFloat32x4WithZ:
4113 case SimdOpInstr::kFloat32x4WithW: {
4114 // TODO(dartbug.com/30949) avoid transfer through memory. SSE4.1 has
4115 // insertps. SSE2 these instructions can be implemented via a combination
4116 // of shufps/movss/movlhps.
4118 SimdOpInstr::kFloat32x4WithY == (SimdOpInstr::kFloat32x4WithX + 1) &&
4119 SimdOpInstr::kFloat32x4WithZ == (SimdOpInstr::kFloat32x4WithX + 2) &&
4120 SimdOpInstr::kFloat32x4WithW == (SimdOpInstr::kFloat32x4WithX + 3));
4121 const intptr_t lane_index = instr->kind() - SimdOpInstr::kFloat32x4WithX;
4122 ASSERT(0 <= lane_index && lane_index < 4);
4123 __ cvtsd2ss(left, left);
4124 __ SubImmediate(ESP, compiler::Immediate(kSimd128Size));
4125 __ movups(compiler::Address(ESP, 0), right);
4126 __ movss(compiler::Address(ESP, lane_index * kFloatSize), left);
4127 __ movups(left, compiler::Address(ESP, 0));
4128 __ AddImmediate(ESP, compiler::Immediate(kSimd128Size));
4129 break;
4130 }
4131 default:
4132 UNREACHABLE();
4133 }
4134}
4135
4136#define SIMD_OP_SIMPLE_UNARY(V) \
4137 SIMD_OP_FLOAT_ARITH(V, Sqrt, sqrt) \
4138 SIMD_OP_FLOAT_ARITH(V, Negate, negate) \
4139 SIMD_OP_FLOAT_ARITH(V, Abs, abs) \
4140 V(Float32x4Reciprocal, reciprocalps) \
4141 V(Float32x4ReciprocalSqrt, rsqrtps)
4142
4143DEFINE_EMIT(SimdUnaryOp, (SameAsFirstInput, XmmRegister value)) {
4144 // TODO(dartbug.com/30949) select better register constraints to avoid
4145 // redundant move of input into a different register because all instructions
4146 // below support two operand forms.
4147 switch (instr->kind()) {
4148#define EMIT(Name, op) \
4149 case SimdOpInstr::k##Name: \
4150 __ op(value); \
4151 break;
4152 SIMD_OP_SIMPLE_UNARY(EMIT)
4153#undef EMIT
4154 case SimdOpInstr::kFloat32x4GetX:
4155 // Shuffle not necessary.
4156 __ cvtss2sd(value, value);
4157 break;
4158 case SimdOpInstr::kFloat32x4GetY:
4159 __ shufps(value, value, compiler::Immediate(0x55));
4160 __ cvtss2sd(value, value);
4161 break;
4162 case SimdOpInstr::kFloat32x4GetZ:
4163 __ shufps(value, value, compiler::Immediate(0xAA));
4164 __ cvtss2sd(value, value);
4165 break;
4166 case SimdOpInstr::kFloat32x4GetW:
4167 __ shufps(value, value, compiler::Immediate(0xFF));
4168 __ cvtss2sd(value, value);
4169 break;
4170 case SimdOpInstr::kFloat32x4Shuffle:
4171 case SimdOpInstr::kInt32x4Shuffle:
4172 __ shufps(value, value, compiler::Immediate(instr->mask()));
4173 break;
4174 case SimdOpInstr::kFloat32x4Splat:
4175 // Convert to Float32.
4176 __ cvtsd2ss(value, value);
4177 // Splat across all lanes.
4178 __ shufps(value, value, compiler::Immediate(0x00));
4179 break;
4180 case SimdOpInstr::kFloat64x2ToFloat32x4:
4181 __ cvtpd2ps(value, value);
4182 break;
4183 case SimdOpInstr::kFloat32x4ToFloat64x2:
4184 __ cvtps2pd(value, value);
4185 break;
4186 case SimdOpInstr::kFloat32x4ToInt32x4:
4187 case SimdOpInstr::kInt32x4ToFloat32x4:
4188 // TODO(dartbug.com/30949) these operations are essentially nop and should
4189 // not generate any code. They should be removed from the graph before
4190 // code generation.
4191 break;
4192 case SimdOpInstr::kFloat64x2GetX:
4193 // NOP.
4194 break;
4195 case SimdOpInstr::kFloat64x2GetY:
4196 __ shufpd(value, value, compiler::Immediate(0x33));
4197 break;
4198 case SimdOpInstr::kFloat64x2Splat:
4199 __ shufpd(value, value, compiler::Immediate(0x0));
4200 break;
4201 default:
4202 UNREACHABLE();
4203 }
4204}
4205
4206DEFINE_EMIT(SimdGetSignMask, (Register out, XmmRegister value)) {
4207 switch (instr->kind()) {
4208 case SimdOpInstr::kFloat32x4GetSignMask:
4209 case SimdOpInstr::kInt32x4GetSignMask:
4210 __ movmskps(out, value);
4211 break;
4212 case SimdOpInstr::kFloat64x2GetSignMask:
4213 __ movmskpd(out, value);
4214 break;
4215 default:
4216 UNREACHABLE();
4217 break;
4218 }
4219}
4220
4221DEFINE_EMIT(
4222 Float32x4FromDoubles,
4223 (SameAsFirstInput, XmmRegister v0, XmmRegister, XmmRegister, XmmRegister)) {
4224 // TODO(dartbug.com/30949) avoid transfer through memory. SSE4.1 has
4225 // insertps, with SSE2 this instruction can be implemented through unpcklps.
4226 const XmmRegister out = v0;
4227 __ SubImmediate(ESP, compiler::Immediate(kSimd128Size));
4228 for (intptr_t i = 0; i < 4; i++) {
4229 __ cvtsd2ss(out, instr->locs()->in(i).fpu_reg());
4230 __ movss(compiler::Address(ESP, i * kFloatSize), out);
4231 }
4232 __ movups(out, compiler::Address(ESP, 0));
4233 __ AddImmediate(ESP, compiler::Immediate(kSimd128Size));
4234}
4235
4236DEFINE_EMIT(Float32x4Zero, (XmmRegister out)) {
4237 __ xorps(out, out);
4238}
4239
4240DEFINE_EMIT(Float64x2Zero, (XmmRegister value)) {
4241 __ xorpd(value, value);
4242}
4243
4244DEFINE_EMIT(Float32x4Clamp,
4245 (SameAsFirstInput,
4247 XmmRegister lower,
4248 XmmRegister upper)) {
4249 __ minps(left, upper);
4250 __ maxps(left, lower);
4251}
4252
4253DEFINE_EMIT(Float64x2Clamp,
4254 (SameAsFirstInput,
4256 XmmRegister lower,
4257 XmmRegister upper)) {
4258 __ minpd(left, upper);
4259 __ maxpd(left, lower);
4260}
4261
4262DEFINE_EMIT(Int32x4FromInts,
4264 // TODO(dartbug.com/30949) avoid transfer through memory.
4265 __ SubImmediate(ESP, compiler::Immediate(kSimd128Size));
4266 for (intptr_t i = 0; i < 4; i++) {
4267 __ movl(compiler::Address(ESP, i * kInt32Size), instr->locs()->in(i).reg());
4268 }
4269 __ movups(result, compiler::Address(ESP, 0));
4270 __ AddImmediate(ESP, compiler::Immediate(kSimd128Size));
4271}
4272
4273DEFINE_EMIT(Int32x4FromBools,
4275 // TODO(dartbug.com/30949) avoid transfer through memory and branches.
4276 __ SubImmediate(ESP, compiler::Immediate(kSimd128Size));
4277 for (intptr_t i = 0; i < 4; i++) {
4278 compiler::Label store_false, done;
4279 __ CompareObject(instr->locs()->in(i).reg(), Bool::True());
4280 __ j(NOT_EQUAL, &store_false);
4281 __ movl(compiler::Address(ESP, kInt32Size * i),
4282 compiler::Immediate(0xFFFFFFFF));
4283 __ jmp(&done);
4284 __ Bind(&store_false);
4285 __ movl(compiler::Address(ESP, kInt32Size * i), compiler::Immediate(0x0));
4286 __ Bind(&done);
4287 }
4288 __ movups(result, compiler::Address(ESP, 0));
4289 __ AddImmediate(ESP, compiler::Immediate(kSimd128Size));
4290}
4291
4292// TODO(dartbug.com/30953) need register with a byte component for setcc.
4293DEFINE_EMIT(Int32x4GetFlag, (Fixed<Register, EDX> result, XmmRegister value)) {
4295 SimdOpInstr::kInt32x4GetFlagY == (SimdOpInstr::kInt32x4GetFlagX + 1) &&
4296 SimdOpInstr::kInt32x4GetFlagZ == (SimdOpInstr::kInt32x4GetFlagX + 2) &&
4297 SimdOpInstr::kInt32x4GetFlagW == (SimdOpInstr::kInt32x4GetFlagX + 3));
4298 const intptr_t lane_index = instr->kind() - SimdOpInstr::kInt32x4GetFlagX;
4299 ASSERT(0 <= lane_index && lane_index < 4);
4300
4301 // TODO(dartbug.com/30949) avoid transfer through memory.
4302 __ SubImmediate(ESP, compiler::Immediate(kSimd128Size));
4303 __ movups(compiler::Address(ESP, 0), value);
4304 __ movl(EDX, compiler::Address(ESP, lane_index * kInt32Size));
4305 __ AddImmediate(ESP, compiler::Immediate(kSimd128Size));
4306
4307 // EDX = EDX != 0 ? 0 : 1
4308 __ testl(EDX, EDX);
4309 __ setcc(ZERO, DL);
4310 __ movzxb(EDX, DL);
4311
4313 __ movl(EDX,
4314 compiler::Address(THR, EDX, TIMES_4, Thread::bool_true_offset()));
4315}
4316
4317// TODO(dartbug.com/30953) need register with a byte component for setcc.
4318DEFINE_EMIT(Int32x4WithFlag,
4319 (SameAsFirstInput,
4320 XmmRegister mask,
4321 Register flag,
4322 Temp<Fixed<Register, EDX> > temp)) {
4324 SimdOpInstr::kInt32x4WithFlagY == (SimdOpInstr::kInt32x4WithFlagX + 1) &&
4325 SimdOpInstr::kInt32x4WithFlagZ == (SimdOpInstr::kInt32x4WithFlagX + 2) &&
4326 SimdOpInstr::kInt32x4WithFlagW == (SimdOpInstr::kInt32x4WithFlagX + 3));
4327 const intptr_t lane_index = instr->kind() - SimdOpInstr::kInt32x4WithFlagX;
4328 ASSERT(0 <= lane_index && lane_index < 4);
4329
4330 // TODO(dartbug.com/30949) avoid transfer through memory.
4331 __ SubImmediate(ESP, compiler::Immediate(kSimd128Size));
4332 __ movups(compiler::Address(ESP, 0), mask);
4333
4334 // EDX = flag == true ? -1 : 0
4335 __ xorl(EDX, EDX);
4336 __ CompareObject(flag, Bool::True());
4337 __ setcc(EQUAL, DL);
4338 __ negl(EDX);
4339
4340 __ movl(compiler::Address(ESP, lane_index * kInt32Size), EDX);
4341
4342 // Copy mask back to register.
4343 __ movups(mask, compiler::Address(ESP, 0));
4344 __ AddImmediate(ESP, compiler::Immediate(kSimd128Size));
4345}
4346
4347DEFINE_EMIT(Int32x4Select,
4348 (SameAsFirstInput,
4349 XmmRegister mask,
4350 XmmRegister trueValue,
4351 XmmRegister falseValue,
4352 Temp<XmmRegister> temp)) {
4353 // Copy mask.
4354 __ movaps(temp, mask);
4355 // Invert it.
4356 __ notps(temp);
4357 // mask = mask & trueValue.
4358 __ andps(mask, trueValue);
4359 // temp = temp & falseValue.
4360 __ andps(temp, falseValue);
4361 // out = mask | temp.
4362 __ orps(mask, temp);
4363}
4364
4365// Map SimdOpInstr::Kind-s to corresponding emit functions. Uses the following
4366// format:
4367//
4368// CASE(OpA) CASE(OpB) ____(Emitter) - Emitter is used to emit OpA and OpB.
4369// SIMPLE(OpA) - Emitter with name OpA is used to emit OpA.
4370//
4371#define SIMD_OP_VARIANTS(CASE, ____, SIMPLE) \
4372 SIMD_OP_SIMPLE_BINARY(CASE) \
4373 CASE(Float32x4Scale) \
4374 CASE(Float32x4ShuffleMix) \
4375 CASE(Int32x4ShuffleMix) \
4376 CASE(Float64x2FromDoubles) \
4377 CASE(Float64x2Scale) \
4378 CASE(Float64x2WithX) \
4379 CASE(Float64x2WithY) \
4380 CASE(Float32x4WithX) \
4381 CASE(Float32x4WithY) \
4382 CASE(Float32x4WithZ) \
4383 CASE(Float32x4WithW) \
4384 ____(SimdBinaryOp) \
4385 SIMD_OP_SIMPLE_UNARY(CASE) \
4386 CASE(Float32x4GetX) \
4387 CASE(Float32x4GetY) \
4388 CASE(Float32x4GetZ) \
4389 CASE(Float32x4GetW) \
4390 CASE(Float32x4Shuffle) \
4391 CASE(Int32x4Shuffle) \
4392 CASE(Float32x4Splat) \
4393 CASE(Float32x4ToFloat64x2) \
4394 CASE(Float64x2ToFloat32x4) \
4395 CASE(Int32x4ToFloat32x4) \
4396 CASE(Float32x4ToInt32x4) \
4397 CASE(Float64x2GetX) \
4398 CASE(Float64x2GetY) \
4399 CASE(Float64x2Splat) \
4400 ____(SimdUnaryOp) \
4401 CASE(Float32x4GetSignMask) \
4402 CASE(Int32x4GetSignMask) \
4403 CASE(Float64x2GetSignMask) \
4404 ____(SimdGetSignMask) \
4405 SIMPLE(Float32x4FromDoubles) \
4406 SIMPLE(Int32x4FromInts) \
4407 SIMPLE(Int32x4FromBools) \
4408 SIMPLE(Float32x4Zero) \
4409 SIMPLE(Float64x2Zero) \
4410 SIMPLE(Float32x4Clamp) \
4411 SIMPLE(Float64x2Clamp) \
4412 CASE(Int32x4GetFlagX) \
4413 CASE(Int32x4GetFlagY) \
4414 CASE(Int32x4GetFlagZ) \
4415 CASE(Int32x4GetFlagW) \
4416 ____(Int32x4GetFlag) \
4417 CASE(Int32x4WithFlagX) \
4418 CASE(Int32x4WithFlagY) \
4419 CASE(Int32x4WithFlagZ) \
4420 CASE(Int32x4WithFlagW) \
4421 ____(Int32x4WithFlag) \
4422 SIMPLE(Int32x4Select)
4423
4424LocationSummary* SimdOpInstr::MakeLocationSummary(Zone* zone, bool opt) const {
4425 switch (kind()) {
4426#define CASE(Name, ...) case k##Name:
4427#define EMIT(Name) \
4428 return MakeLocationSummaryFromEmitter(zone, this, &Emit##Name);
4429#define SIMPLE(Name) CASE(Name) EMIT(Name)
4430 SIMD_OP_VARIANTS(CASE, EMIT, SIMPLE)
4431#undef CASE
4432#undef EMIT
4433#undef SIMPLE
4434 case SimdOpInstr::kFloat32x4GreaterThan:
4435 case SimdOpInstr::kFloat32x4GreaterThanOrEqual:
4436 case kIllegalSimdOp:
4437 UNREACHABLE();
4438 break;
4439 }
4440 UNREACHABLE();
4441 return nullptr;
4442}
4443
4444void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4445 switch (kind()) {
4446#define CASE(Name, ...) case k##Name:
4447#define EMIT(Name) \
4448 InvokeEmitter(compiler, this, &Emit##Name); \
4449 break;
4450#define SIMPLE(Name) CASE(Name) EMIT(Name)
4451 SIMD_OP_VARIANTS(CASE, EMIT, SIMPLE)
4452#undef CASE
4453#undef EMIT
4454#undef SIMPLE
4455 case SimdOpInstr::kFloat32x4GreaterThan:
4456 case SimdOpInstr::kFloat32x4GreaterThanOrEqual:
4457 case kIllegalSimdOp:
4458 UNREACHABLE();
4459 break;
4460 }
4461}
4462
4463#undef DEFINE_EMIT
4464
4465LocationSummary* CaseInsensitiveCompareInstr::MakeLocationSummary(
4466 Zone* zone,
4467 bool opt) const {
4468 const intptr_t kNumTemps = 0;
4469 LocationSummary* summary = new (zone)
4470 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
4471 summary->set_in(0, Location::RegisterLocation(EAX));
4472 summary->set_in(1, Location::RegisterLocation(ECX));
4473 summary->set_in(2, Location::RegisterLocation(EDX));
4474 summary->set_in(3, Location::RegisterLocation(EBX));
4475 summary->set_out(0, Location::RegisterLocation(EAX));
4476 return summary;
4477}
4478
4479void CaseInsensitiveCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4480 compiler::LeafRuntimeScope rt(compiler->assembler(),
4481 /*frame_size=*/4 * compiler::target::kWordSize,
4482 /*preserve_registers=*/false);
4483 __ movl(compiler::Address(ESP, +0 * kWordSize), locs()->in(0).reg());
4484 __ movl(compiler::Address(ESP, +1 * kWordSize), locs()->in(1).reg());
4485 __ movl(compiler::Address(ESP, +2 * kWordSize), locs()->in(2).reg());
4486 __ movl(compiler::Address(ESP, +3 * kWordSize), locs()->in(3).reg());
4487 rt.Call(TargetFunction(), 4);
4488}
4489
4490LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone,
4491 bool opt) const {
4492 if (result_cid() == kDoubleCid) {
4493 const intptr_t kNumInputs = 2;
4494 const intptr_t kNumTemps = 1;
4495 LocationSummary* summary = new (zone)
4496 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4497 summary->set_in(0, Location::RequiresFpuRegister());
4498 summary->set_in(1, Location::RequiresFpuRegister());
4499 // Reuse the left register so that code can be made shorter.
4500 summary->set_out(0, Location::SameAsFirstInput());
4501 summary->set_temp(0, Location::RequiresRegister());
4502 return summary;
4503 }
4504
4505 ASSERT(result_cid() == kSmiCid);
4506 const intptr_t kNumInputs = 2;
4507 const intptr_t kNumTemps = 0;
4508 LocationSummary* summary = new (zone)
4509 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4510 summary->set_in(0, Location::RequiresRegister());
4511 summary->set_in(1, Location::RequiresRegister());
4512 // Reuse the left register so that code can be made shorter.
4513 summary->set_out(0, Location::SameAsFirstInput());
4514 return summary;
4515}
4516
4517void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4518 ASSERT((op_kind() == MethodRecognizer::kMathMin) ||
4519 (op_kind() == MethodRecognizer::kMathMax));
4520 const bool is_min = (op_kind() == MethodRecognizer::kMathMin);
4521 if (result_cid() == kDoubleCid) {
4522 compiler::Label done, returns_nan, are_equal;
4523 XmmRegister left = locs()->in(0).fpu_reg();
4524 XmmRegister right = locs()->in(1).fpu_reg();
4525 XmmRegister result = locs()->out(0).fpu_reg();
4526 Register temp = locs()->temp(0).reg();
4527 __ comisd(left, right);
4530 const Condition double_condition =
4531 is_min ? TokenKindToDoubleCondition(Token::kLT)
4532 : TokenKindToDoubleCondition(Token::kGT);
4533 ASSERT(left == result);
4534 __ j(double_condition, &done, compiler::Assembler::kNearJump);
4535 __ movsd(result, right);
4537
4538 __ Bind(&returns_nan);
4539 static double kNaN = NAN;
4540 __ movsd(result,
4541 compiler::Address::Absolute(reinterpret_cast<uword>(&kNaN)));
4543
4544 __ Bind(&are_equal);
4545 compiler::Label left_is_negative;
4546 // Check for negative zero: -0.0 is equal 0.0 but min or max must return
4547 // -0.0 or 0.0 respectively.
4548 // Check for negative left value (get the sign bit):
4549 // - min -> left is negative ? left : right.
4550 // - max -> left is negative ? right : left
4551 // Check the sign bit.
4552 __ movmskpd(temp, left);
4553 __ testl(temp, compiler::Immediate(1));
4554 ASSERT(left == result);
4555 if (is_min) {
4556 __ j(NOT_ZERO, &done,
4557 compiler::Assembler::kNearJump); // Negative -> return left.
4558 } else {
4559 __ j(ZERO, &done,
4560 compiler::Assembler::kNearJump); // Positive -> return left.
4561 }
4562 __ movsd(result, right);
4563 __ Bind(&done);
4564 return;
4565 }
4566
4567 ASSERT(result_cid() == kSmiCid);
4568 Register left = locs()->in(0).reg();
4569 Register right = locs()->in(1).reg();
4570 Register result = locs()->out(0).reg();
4571 __ cmpl(left, right);
4572 ASSERT(result == left);
4573 if (is_min) {
4574 __ cmovgel(result, right);
4575 } else {
4576 __ cmovlessl(result, right);
4577 }
4578}
4579
4580LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Zone* zone,
4581 bool opt) const {
4582 const intptr_t kNumInputs = 1;
4583 return LocationSummary::Make(zone, kNumInputs, Location::SameAsFirstInput(),
4585}
4586
4587void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4588 Register value = locs()->in(0).reg();
4589 ASSERT(value == locs()->out(0).reg());
4590 switch (op_kind()) {
4591 case Token::kNEGATE: {
4592 compiler::Label* deopt =
4593 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
4594 __ negl(value);
4595 __ j(OVERFLOW, deopt);
4596 break;
4597 }
4598 case Token::kBIT_NOT:
4599 __ notl(value);
4600 __ andl(value,
4601 compiler::Immediate(~kSmiTagMask)); // Remove inverted smi-tag.
4602 break;
4603 default:
4604 UNREACHABLE();
4605 }
4606}
4607
4608LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
4609 bool opt) const {
4610 const intptr_t kNumInputs = 1;
4611 const intptr_t kNumTemps = 0;
4612 LocationSummary* summary = new (zone)
4613 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4614 summary->set_in(0, Location::RequiresFpuRegister());
4615 summary->set_out(0, Location::SameAsFirstInput());
4616 return summary;
4617}
4618
4619void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4620 ASSERT(representation() == kUnboxedDouble);
4621 XmmRegister value = locs()->in(0).fpu_reg();
4622 ASSERT(locs()->out(0).fpu_reg() == value);
4623 switch (op_kind()) {
4624 case Token::kNEGATE:
4625 __ DoubleNegate(value);
4626 break;
4627 case Token::kSQRT:
4628 __ sqrtsd(value, value);
4629 break;
4630 case Token::kSQUARE:
4631 __ mulsd(value, value);
4632 break;
4633 case Token::kTRUNCATE:
4635 break;
4636 case Token::kFLOOR:
4638 break;
4639 case Token::kCEILING:
4641 break;
4642 default:
4643 UNREACHABLE();
4644 }
4645}
4646
4647LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone,
4648 bool opt) const {
4649 const intptr_t kNumInputs = 1;
4650 const intptr_t kNumTemps = 0;
4651 LocationSummary* result = new (zone)
4652 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4653 result->set_in(0, Location::RequiresRegister());
4655 return result;
4656}
4657
4658void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4659 Register value = locs()->in(0).reg();
4660 FpuRegister result = locs()->out(0).fpu_reg();
4661 __ cvtsi2sd(result, value);
4662}
4663
4664LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone,
4665 bool opt) const {
4666 const intptr_t kNumInputs = 1;
4667 const intptr_t kNumTemps = 0;
4668 LocationSummary* result = new (zone)
4669 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4670 result->set_in(0, Location::WritableRegister());
4672 return result;
4673}
4674
4675void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4676 Register value = locs()->in(0).reg();
4677 FpuRegister result = locs()->out(0).fpu_reg();
4678 __ SmiUntag(value);
4679 __ cvtsi2sd(result, value);
4680}
4681
4682LocationSummary* Int64ToDoubleInstr::MakeLocationSummary(Zone* zone,
4683 bool opt) const {
4684 const intptr_t kNumInputs = 1;
4685 const intptr_t kNumTemps = 0;
4686 LocationSummary* result = new (zone)
4687 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4691 return result;
4692}
4693
4694void Int64ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4695 PairLocation* pair = locs()->in(0).AsPairLocation();
4696 Register in_lo = pair->At(0).reg();
4697 Register in_hi = pair->At(1).reg();
4698
4699 FpuRegister result = locs()->out(0).fpu_reg();
4700
4701 // Push hi.
4702 __ pushl(in_hi);
4703 // Push lo.
4704 __ pushl(in_lo);
4705 // Perform conversion from Mint to double.
4706 __ fildl(compiler::Address(ESP, 0));
4707 // Pop FPU stack onto regular stack.
4708 __ fstpl(compiler::Address(ESP, 0));
4709 // Copy into result.
4710 __ movsd(result, compiler::Address(ESP, 0));
4711 // Pop args.
4712 __ addl(ESP, compiler::Immediate(2 * kWordSize));
4713}
4714
4715LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone,
4716 bool opt) const {
4717 const intptr_t kNumInputs = 1;
4718 const intptr_t kNumTemps = 0;
4719 LocationSummary* result = new (zone) LocationSummary(
4720 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
4722 result->set_out(0, Location::RequiresRegister());
4723 return result;
4724}
4725
4726void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4727 ASSERT(recognized_kind() == MethodRecognizer::kDoubleToInteger);
4728 const Register result = locs()->out(0).reg();
4729 const XmmRegister value_double = locs()->in(0).fpu_reg();
4730
4731 DoubleToIntegerSlowPath* slow_path =
4732 new DoubleToIntegerSlowPath(this, value_double);
4733 compiler->AddSlowPathCode(slow_path);
4734
4735 __ cvttsd2si(result, value_double);
4736 // Overflow is signalled with minint.
4737 // Check for overflow and that it fits into Smi.
4738 __ cmpl(result, compiler::Immediate(0xC0000000));
4739 __ j(NEGATIVE, slow_path->entry_label());
4740 __ SmiTag(result);
4741 __ Bind(slow_path->exit_label());
4742}
4743
4744LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,
4745 bool opt) const {
4746 const intptr_t kNumInputs = 1;
4747 const intptr_t kNumTemps = 0;
4748 LocationSummary* result = new (zone)
4749 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4751 result->set_out(0, Location::RequiresRegister());
4752 return result;
4753}
4754
4755void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4756 compiler::Label* deopt =
4757 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptDoubleToSmi);
4758 Register result = locs()->out(0).reg();
4759 XmmRegister value = locs()->in(0).fpu_reg();
4760 __ cvttsd2si(result, value);
4761 // Check for overflow and that it fits into Smi.
4762 __ cmpl(result, compiler::Immediate(0xC0000000));
4763 __ j(NEGATIVE, deopt);
4764 __ SmiTag(result);
4765}
4766
4767LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone,
4768 bool opt) const {
4769 const intptr_t kNumInputs = 1;
4770 const intptr_t kNumTemps = 0;
4771 LocationSummary* result = new (zone)
4772 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4774 result->set_out(0, Location::SameAsFirstInput());
4775 return result;
4776}
4777
4778void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4779 __ cvtsd2ss(locs()->out(0).fpu_reg(), locs()->in(0).fpu_reg());
4780}
4781
4782LocationSummary* FloatToDoubleInstr::MakeLocationSummary(Zone* zone,
4783 bool opt) const {
4784 const intptr_t kNumInputs = 1;
4785 const intptr_t kNumTemps = 0;
4786 LocationSummary* result = new (zone)
4787 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4789 result->set_out(0, Location::SameAsFirstInput());
4790 return result;
4791}
4792
4793void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4794 __ cvtss2sd(locs()->out(0).fpu_reg(), locs()->in(0).fpu_reg());
4795}
4796
4797LocationSummary* FloatCompareInstr::MakeLocationSummary(Zone* zone,
4798 bool opt) const {
4799 UNREACHABLE();
4800 return NULL;
4801}
4802
4803void FloatCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4804 UNREACHABLE();
4805}
4806
4807LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone,
4808 bool opt) const {
4809 ASSERT((InputCount() == 1) || (InputCount() == 2));
4810 const intptr_t kNumTemps =
4811 (recognized_kind() == MethodRecognizer::kMathDoublePow) ? 4 : 1;
4812 LocationSummary* result = new (zone)
4813 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
4814 // EDI is chosen because it is callee saved so we do not need to back it
4815 // up before calling into the runtime.
4816 result->set_temp(0, Location::RegisterLocation(EDI));
4818 if (InputCount() == 2) {
4820 }
4821 if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
4822 // Temp index 1.
4823 result->set_temp(1, Location::RegisterLocation(EAX));
4824 // Temp index 2.
4826 // We need to block XMM0 for the floating-point calling convention.
4828 }
4830 return result;
4831}
4832
4833// Pseudo code:
4834// if (exponent == 0.0) return 1.0;
4835// // Speed up simple cases.
4836// if (exponent == 1.0) return base;
4837// if (exponent == 2.0) return base * base;
4838// if (exponent == 3.0) return base * base * base;
4839// if (base == 1.0) return 1.0;
4840// if (base.isNaN || exponent.isNaN) {
4841// return double.NAN;
4842// }
4843// if (base != -Infinity && exponent == 0.5) {
4844// if (base == 0.0) return 0.0;
4845// return sqrt(value);
4846// }
4847// TODO(srdjan): Move into a stub?
4848static void InvokeDoublePow(FlowGraphCompiler* compiler,
4849 InvokeMathCFunctionInstr* instr) {
4850 ASSERT(instr->recognized_kind() == MethodRecognizer::kMathDoublePow);
4851 const intptr_t kInputCount = 2;
4852 ASSERT(instr->InputCount() == kInputCount);
4853 LocationSummary* locs = instr->locs();
4854
4855 XmmRegister base = locs->in(0).fpu_reg();
4856 XmmRegister exp = locs->in(1).fpu_reg();
4857 XmmRegister result = locs->out(0).fpu_reg();
4859 XmmRegister zero_temp =
4860 locs->temp(InvokeMathCFunctionInstr::kDoubleTempIndex).fpu_reg();
4861
4862 __ xorps(zero_temp, zero_temp); // 0.0.
4863 __ LoadObject(temp, Double::ZoneHandle(Double::NewCanonical(1.0)));
4864 __ movsd(result, compiler::FieldAddress(temp, Double::value_offset()));
4865
4866 compiler::Label check_base, skip_call;
4867 // exponent == 0.0 -> return 1.0;
4868 __ comisd(exp, zero_temp);
4869 __ j(PARITY_EVEN, &check_base);
4870 __ j(EQUAL, &skip_call); // 'result' is 1.0.
4871
4872 // exponent == 1.0 ?
4873 __ comisd(exp, result);
4874 compiler::Label return_base;
4875 __ j(EQUAL, &return_base, compiler::Assembler::kNearJump);
4876
4877 // exponent == 2.0 ?
4878 __ LoadObject(temp, Double::ZoneHandle(Double::NewCanonical(2.0)));
4879 __ movsd(XMM0, compiler::FieldAddress(temp, Double::value_offset()));
4880 __ comisd(exp, XMM0);
4881 compiler::Label return_base_times_2;
4882 __ j(EQUAL, &return_base_times_2, compiler::Assembler::kNearJump);
4883
4884 // exponent == 3.0 ?
4885 __ LoadObject(temp, Double::ZoneHandle(Double::NewCanonical(3.0)));
4886 __ movsd(XMM0, compiler::FieldAddress(temp, Double::value_offset()));
4887 __ comisd(exp, XMM0);
4888 __ j(NOT_EQUAL, &check_base);
4889
4890 // Base times 3.
4891 __ movsd(result, base);
4892 __ mulsd(result, base);
4893 __ mulsd(result, base);
4894 __ jmp(&skip_call);
4895
4896 __ Bind(&return_base);
4897 __ movsd(result, base);
4898 __ jmp(&skip_call);
4899
4900 __ Bind(&return_base_times_2);
4901 __ movsd(result, base);
4902 __ mulsd(result, base);
4903 __ jmp(&skip_call);
4904
4905 __ Bind(&check_base);
4906 // Note: 'exp' could be NaN.
4907
4908 // base == 1.0 -> return 1.0;
4909 __ comisd(base, result);
4910 compiler::Label return_nan;
4912 __ j(EQUAL, &skip_call, compiler::Assembler::kNearJump);
4913 // Note: 'base' could be NaN.
4914 __ comisd(exp, base);
4915 // Neither 'exp' nor 'base' is NaN.
4916 compiler::Label try_sqrt;
4918 // Return NaN.
4919 __ Bind(&return_nan);
4920 __ LoadObject(temp, Double::ZoneHandle(Double::NewCanonical(NAN)));
4921 __ movsd(result, compiler::FieldAddress(temp, Double::value_offset()));
4922 __ jmp(&skip_call);
4923
4924 compiler::Label do_pow, return_zero;
4925 __ Bind(&try_sqrt);
4926 // Before calling pow, check if we could use sqrt instead of pow.
4928 __ movsd(result, compiler::FieldAddress(temp, Double::value_offset()));
4929 // base == -Infinity -> call pow;
4930 __ comisd(base, result);
4932
4933 // exponent == 0.5 ?
4934 __ LoadObject(temp, Double::ZoneHandle(Double::NewCanonical(0.5)));
4935 __ movsd(result, compiler::FieldAddress(temp, Double::value_offset()));
4936 __ comisd(exp, result);
4938
4939 // base == 0 -> return 0;
4940 __ comisd(base, zero_temp);
4941 __ j(EQUAL, &return_zero, compiler::Assembler::kNearJump);
4942
4943 __ sqrtsd(result, base);
4944 __ jmp(&skip_call, compiler::Assembler::kNearJump);
4945
4946 __ Bind(&return_zero);
4947 __ movsd(result, zero_temp);
4948 __ jmp(&skip_call);
4949
4950 __ Bind(&do_pow);
4951 {
4952 compiler::LeafRuntimeScope rt(compiler->assembler(),
4953 /*frame_size=*/kDoubleSize * kInputCount,
4954 /*preserve_registers=*/false);
4955 for (intptr_t i = 0; i < kInputCount; i++) {
4956 __ movsd(compiler::Address(ESP, kDoubleSize * i), locs->in(i).fpu_reg());
4957 }
4958 rt.Call(instr->TargetFunction(), kInputCount);
4959 __ fstpl(compiler::Address(ESP, 0));
4960 __ movsd(locs->out(0).fpu_reg(), compiler::Address(ESP, 0));
4961 }
4962 __ Bind(&skip_call);
4963}
4964
4965void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4966 if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
4967 InvokeDoublePow(compiler, this);
4968 return;
4969 }
4970
4971 {
4972 compiler::LeafRuntimeScope rt(compiler->assembler(),
4973 /*frame_size=*/kDoubleSize * InputCount(),
4974 /*preserve_registers=*/false);
4975 for (intptr_t i = 0; i < InputCount(); i++) {
4976 __ movsd(compiler::Address(ESP, kDoubleSize * i),
4977 locs()->in(i).fpu_reg());
4978 }
4979 rt.Call(TargetFunction(), InputCount());
4980 __ fstpl(compiler::Address(ESP, 0));
4981 __ movsd(locs()->out(0).fpu_reg(), compiler::Address(ESP, 0));
4982 }
4983}
4984
4985LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone,
4986 bool opt) const {
4987 // Only use this instruction in optimized code.
4988 ASSERT(opt);
4989 const intptr_t kNumInputs = 1;
4990 LocationSummary* summary =
4991 new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
4992 if (representation() == kUnboxedDouble) {
4993 if (index() == 0) {
4994 summary->set_in(
4996 } else {
4997 ASSERT(index() == 1);
4998 summary->set_in(
5000 }
5001 summary->set_out(0, Location::RequiresFpuRegister());
5002 } else {
5003 ASSERT(representation() == kTagged);
5004 if (index() == 0) {
5005 summary->set_in(
5007 } else {
5008 ASSERT(index() == 1);
5009 summary->set_in(
5011 }
5012 summary->set_out(0, Location::RequiresRegister());
5013 }
5014 return summary;
5015}
5016
5017void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5018 ASSERT(locs()->in(0).IsPairLocation());
5019 PairLocation* pair = locs()->in(0).AsPairLocation();
5020 Location in_loc = pair->At(index());
5021 if (representation() == kUnboxedDouble) {
5022 XmmRegister out = locs()->out(0).fpu_reg();
5023 XmmRegister in = in_loc.fpu_reg();
5024 __ movaps(out, in);
5025 } else {
5026 ASSERT(representation() == kTagged);
5027 Register out = locs()->out(0).reg();
5028 Register in = in_loc.reg();
5029 __ movl(out, in);
5030 }
5031}
5032
5033LocationSummary* UnboxLaneInstr::MakeLocationSummary(Zone* zone,
5034 bool opt) const {
5035 UNREACHABLE();
5036 return NULL;
5037}
5038
5039void UnboxLaneInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5040 UNREACHABLE();
5041}
5042
5043LocationSummary* BoxLanesInstr::MakeLocationSummary(Zone* zone,
5044 bool opt) const {
5045 UNREACHABLE();
5046 return NULL;
5047}
5048
5049void BoxLanesInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5050 UNREACHABLE();
5051}
5052
5053LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
5054 bool opt) const {
5055 const intptr_t kNumInputs = 2;
5056 const intptr_t kNumTemps = 0;
5057 LocationSummary* summary = new (zone)
5058 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5059 // Both inputs must be writable because they will be untagged.
5060 summary->set_in(0, Location::RegisterLocation(EAX));
5061 summary->set_in(1, Location::WritableRegister());
5062 // Output is a pair of registers.
5063 summary->set_out(0, Location::Pair(Location::RegisterLocation(EAX),
5065 return summary;
5066}
5067
5068void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5069 ASSERT(CanDeoptimize());
5070 compiler::Label* deopt =
5071 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
5072 Register left = locs()->in(0).reg();
5073 Register right = locs()->in(1).reg();
5074 ASSERT(locs()->out(0).IsPairLocation());
5075 PairLocation* pair = locs()->out(0).AsPairLocation();
5076 Register result1 = pair->At(0).reg();
5077 Register result2 = pair->At(1).reg();
5078 if (RangeUtils::CanBeZero(divisor_range())) {
5079 // Handle divide by zero in runtime.
5080 __ testl(right, right);
5081 __ j(ZERO, deopt);
5082 }
5083 ASSERT(left == EAX);
5084 ASSERT((right != EDX) && (right != EAX));
5085 ASSERT(result1 == EAX);
5086 ASSERT(result2 == EDX);
5087 __ SmiUntag(left);
5088 __ SmiUntag(right);
5089 __ cdq(); // Sign extend EAX -> EDX:EAX.
5090 __ idivl(right); // EAX: quotient, EDX: remainder.
5091 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
5092 // case we cannot tag the result.
5093 // TODO(srdjan): We could store instead untagged intermediate results in a
5094 // typed array, but then the load indexed instructions would need to be
5095 // able to deoptimize.
5096 __ cmpl(EAX, compiler::Immediate(0x40000000));
5097 __ j(EQUAL, deopt);
5098 // Modulo result (EDX) correction:
5099 // res = left % right;
5100 // if (res < 0) {
5101 // if (right < 0) {
5102 // res = res - right;
5103 // } else {
5104 // res = res + right;
5105 // }
5106 // }
5107 compiler::Label done;
5108 __ cmpl(EDX, compiler::Immediate(0));
5110 // Result is negative, adjust it.
5111 if (RangeUtils::Overlaps(divisor_range(), -1, 1)) {
5112 compiler::Label subtract;
5113 __ cmpl(right, compiler::Immediate(0));
5115 __ addl(EDX, right);
5117 __ Bind(&subtract);
5118 __ subl(EDX, right);
5119 } else if (divisor_range()->IsPositive()) {
5120 // Right is positive.
5121 __ addl(EDX, right);
5122 } else {
5123 // Right is negative.
5124 __ subl(EDX, right);
5125 }
5126 __ Bind(&done);
5127
5128 __ SmiTag(EAX);
5129 __ SmiTag(EDX);
5130}
5131
5132// Should be kept in sync with integers.cc Multiply64Hash
5133static void EmitHashIntegerCodeSequence(FlowGraphCompiler* compiler,
5134 const Register value_lo,
5135 const Register value_hi,
5136 const Register temp) {
5137 __ movl(EDX, compiler::Immediate(0x2d51));
5138 __ mull(EDX); // EAX = lo32(value_lo*0x2d51), EDX = carry(value_lo * 0x2d51)
5139 __ movl(temp, EAX); // save prod_lo32
5140 __ movl(EAX, value_hi); // get saved value_hi
5141 __ movl(value_hi, EDX); // save carry
5142 __ movl(EDX, compiler::Immediate(0x2d51));
5143 __ mull(EDX); // EAX = lo32(value_hi * 0x2d51, EDX = carry(value_hi * 0x2d51)
5144 __ addl(EAX, value_hi); // EAX has prod_hi32, EDX has prod_hi64_lo32
5145
5146 __ xorl(EAX, EDX); // EAX = prod_hi32 ^ prod_hi64_lo32
5147 __ xorl(EAX, temp); // result = prod_hi32 ^ prod_hi64_lo32 ^ prod_lo32
5148 __ andl(EAX, compiler::Immediate(0x3fffffff));
5149}
5150
5151LocationSummary* HashDoubleOpInstr::MakeLocationSummary(Zone* zone,
5152 bool opt) const {
5153 const intptr_t kNumInputs = 1;
5154 const intptr_t kNumTemps = 4;
5155 LocationSummary* summary = new (zone)
5156 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5157 summary->set_in(0, Location::RequiresFpuRegister());
5158 summary->set_temp(0, Location::RequiresRegister());
5159 summary->set_temp(1, Location::RegisterLocation(EBX));
5160 summary->set_temp(2, Location::RegisterLocation(EDX));
5161 summary->set_temp(3, Location::RequiresFpuRegister());
5162 summary->set_out(0, Location::Pair(Location::RegisterLocation(EAX),
5164 return summary;
5165}
5166
5167void HashDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5168 const XmmRegister value = locs()->in(0).fpu_reg();
5169 const Register temp = locs()->temp(0).reg();
5170 ASSERT(locs()->temp(1).reg() == EBX);
5171 ASSERT(locs()->temp(2).reg() == EDX);
5172 const XmmRegister temp_double = locs()->temp(3).fpu_reg();
5173 PairLocation* result_pair = locs()->out(0).AsPairLocation();
5174 ASSERT(result_pair->At(0).reg() == EAX);
5175 ASSERT(result_pair->At(1).reg() == EDX);
5176
5177 // If either nan or infinity, do hash double
5178 compiler::Label hash_double, try_convert;
5179
5180 // extract high 32-bits out of double value.
5182 __ pextrd(temp, value, compiler::Immediate(1));
5183 } else {
5184 __ SubImmediate(ESP, compiler::Immediate(kDoubleSize));
5185 __ movsd(compiler::Address(ESP, 0), value);
5186 __ movl(temp, compiler::Address(ESP, kWordSize));
5187 __ AddImmediate(ESP, compiler::Immediate(kDoubleSize));
5188 }
5189 __ andl(temp, compiler::Immediate(0x7FF00000));
5190 __ cmpl(temp, compiler::Immediate(0x7FF00000));
5191 __ j(EQUAL, &hash_double); // is infinity or nan
5192
5193 compiler::Label slow_path;
5194 __ Bind(&try_convert);
5195 __ cvttsd2si(EAX, value);
5196 // Overflow is signaled with minint.
5197 __ cmpl(EAX, compiler::Immediate(0x80000000));
5198 __ j(EQUAL, &slow_path);
5199 __ cvtsi2sd(temp_double, EAX);
5200 __ comisd(value, temp_double);
5201 __ j(NOT_EQUAL, &hash_double);
5202 __ cdq(); // sign-extend EAX to EDX
5203 __ movl(temp, EDX);
5204
5205 compiler::Label hash_integer, done;
5206 // integer hash for (temp:EAX)
5207 __ Bind(&hash_integer);
5208 EmitHashIntegerCodeSequence(compiler, EAX, temp, EBX);
5209 __ jmp(&done);
5210
5211 __ Bind(&slow_path);
5212 // double value is potentially doesn't fit into Smi range, so
5213 // do the double->int64->double via runtime call.
5214 __ StoreUnboxedDouble(value, THR,
5215 compiler::target::Thread::unboxed_runtime_arg_offset());
5216 {
5217 compiler::LeafRuntimeScope rt(
5218 compiler->assembler(),
5219 /*frame_size=*/1 * compiler::target::kWordSize,
5220 /*preserve_registers=*/true);
5221 __ movl(compiler::Address(ESP, 0 * compiler::target::kWordSize), THR);
5222 // Check if double can be represented as int64, load it into (temp:EAX) if
5223 // it can.
5224 rt.Call(kTryDoubleAsIntegerRuntimeEntry, 1);
5225 __ movl(EBX, EAX); // use non-volatile register to carry value out.
5226 }
5227 __ orl(EBX, EBX);
5228 __ j(ZERO, &hash_double);
5229 __ movl(EAX,
5230 compiler::Address(
5231 THR, compiler::target::Thread::unboxed_runtime_arg_offset()));
5232 __ movl(temp,
5233 compiler::Address(
5234 THR, compiler::target::Thread::unboxed_runtime_arg_offset() +
5235 kWordSize));
5236 __ jmp(&hash_integer);
5237
5238 __ Bind(&hash_double);
5240 __ pextrd(EAX, value, compiler::Immediate(0));
5241 __ pextrd(temp, value, compiler::Immediate(1));
5242 } else {
5243 __ SubImmediate(ESP, compiler::Immediate(kDoubleSize));
5244 __ movsd(compiler::Address(ESP, 0), value);
5245 __ movl(EAX, compiler::Address(ESP, 0));
5246 __ movl(temp, compiler::Address(ESP, kWordSize));
5247 __ AddImmediate(ESP, compiler::Immediate(kDoubleSize));
5248 }
5249 __ xorl(EAX, temp);
5250 __ andl(EAX, compiler::Immediate(compiler::target::kSmiMax));
5251
5252 __ Bind(&done);
5253 __ xorl(EDX, EDX);
5254}
5255
5256LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
5257 bool opt) const {
5258 const intptr_t kNumInputs = 1;
5259 const intptr_t kNumTemps = 3;
5260 LocationSummary* summary = new (zone)
5261 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5262 summary->set_in(0, Location::RegisterLocation(EAX));
5263 summary->set_out(0, Location::SameAsFirstInput());
5264 summary->set_temp(0, Location::RequiresRegister());
5265 summary->set_temp(1, Location::RequiresRegister());
5266 summary->set_temp(2, Location::RegisterLocation(EDX));
5267 return summary;
5268}
5269
5270void HashIntegerOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5271 Register value = locs()->in(0).reg();
5272 Register result = locs()->out(0).reg();
5273 Register temp = locs()->temp(0).reg();
5274 Register temp1 = locs()->temp(1).reg();
5275 ASSERT(value == EAX);
5276 ASSERT(result == EAX);
5277
5278 if (smi_) {
5279 __ SmiUntag(EAX);
5280 __ cdq(); // sign-extend EAX to EDX
5281 __ movl(temp, EDX);
5282 } else {
5283 __ LoadFieldFromOffset(temp, EAX,
5284 Mint::value_offset() + compiler::target::kWordSize);
5285 __ LoadFieldFromOffset(EAX, EAX, Mint::value_offset());
5286 }
5287
5288 // value = value_hi << 32 + value_lo
5289 //
5290 // value * 0x2d51 = (value_hi * 0x2d51) << 32 + value_lo * 0x2d51
5291 // prod_lo32 = value_lo * 0x2d51
5292 // prod_hi32 = carry(value_lo * 0x2d51) + value_hi * 0x2d51
5293 // prod_lo64 = prod_hi32 << 32 + prod_lo32
5294 // prod_hi64_lo32 = carry(value_hi * 0x2d51)
5295 // result = prod_lo32 ^ prod_hi32 ^ prod_hi64_lo32
5296 // return result & 0x3fffffff
5297
5298 // EAX has value_lo
5299 EmitHashIntegerCodeSequence(compiler, EAX, temp, temp1);
5300 __ SmiTag(EAX);
5301}
5302
5303LocationSummary* BranchInstr::MakeLocationSummary(Zone* zone, bool opt) const {
5305 // Branches don't produce a result.
5307 return comparison()->locs();
5308}
5309
5310void BranchInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5312}
5313
5314LocationSummary* CheckClassInstr::MakeLocationSummary(Zone* zone,
5315 bool opt) const {
5316 const intptr_t kNumInputs = 1;
5317 const bool need_mask_temp = IsBitTest();
5318 const intptr_t kNumTemps = !IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0;
5319 LocationSummary* summary = new (zone)
5320 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5321 summary->set_in(0, Location::RequiresRegister());
5322 if (!IsNullCheck()) {
5323 summary->set_temp(0, Location::RequiresRegister());
5324 if (need_mask_temp) {
5325 summary->set_temp(1, Location::RequiresRegister());
5326 }
5327 }
5328 return summary;
5329}
5330
5331void CheckClassInstr::EmitNullCheck(FlowGraphCompiler* compiler,
5332 compiler::Label* deopt) {
5333 const compiler::Immediate& raw_null =
5334 compiler::Immediate(static_cast<intptr_t>(Object::null()));
5335 __ cmpl(locs()->in(0).reg(), raw_null);
5338 __ j(cond, deopt);
5339}
5340
5341void CheckClassInstr::EmitBitTest(FlowGraphCompiler* compiler,
5342 intptr_t min,
5343 intptr_t max,
5344 intptr_t mask,
5345 compiler::Label* deopt) {
5346 Register biased_cid = locs()->temp(0).reg();
5347 __ subl(biased_cid, compiler::Immediate(min));
5348 __ cmpl(biased_cid, compiler::Immediate(max - min));
5349 __ j(ABOVE, deopt);
5350
5351 Register mask_reg = locs()->temp(1).reg();
5352 __ movl(mask_reg, compiler::Immediate(mask));
5353 __ bt(mask_reg, biased_cid);
5354 __ j(NOT_CARRY, deopt);
5355}
5356
5357int CheckClassInstr::EmitCheckCid(FlowGraphCompiler* compiler,
5358 int bias,
5359 intptr_t cid_start,
5360 intptr_t cid_end,
5361 bool is_last,
5362 compiler::Label* is_ok,
5363 compiler::Label* deopt,
5364 bool use_near_jump) {
5365 Register biased_cid = locs()->temp(0).reg();
5366 Condition no_match, match;
5367 if (cid_start == cid_end) {
5368 __ cmpl(biased_cid, compiler::Immediate(cid_start - bias));
5369 no_match = NOT_EQUAL;
5370 match = EQUAL;
5371 } else {
5372 // For class ID ranges use a subtract followed by an unsigned
5373 // comparison to check both ends of the ranges with one comparison.
5374 __ addl(biased_cid, compiler::Immediate(bias - cid_start));
5375 bias = cid_start;
5376 __ cmpl(biased_cid, compiler::Immediate(cid_end - cid_start));
5377 no_match = ABOVE;
5379 }
5380
5381 if (is_last) {
5382 __ j(no_match, deopt);
5383 } else {
5384 if (use_near_jump) {
5386 } else {
5387 __ j(match, is_ok);
5388 }
5389 }
5390 return bias;
5391}
5392
5393LocationSummary* CheckSmiInstr::MakeLocationSummary(Zone* zone,
5394 bool opt) const {
5395 const intptr_t kNumInputs = 1;
5396 const intptr_t kNumTemps = 0;
5397 LocationSummary* summary = new (zone)
5398 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5399 summary->set_in(0, Location::RequiresRegister());
5400 return summary;
5401}
5402
5403void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5404 Register value = locs()->in(0).reg();
5405 compiler::Label* deopt =
5406 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi);
5407 __ BranchIfNotSmi(value, deopt);
5408}
5409
5410void CheckNullInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5411 ThrowErrorSlowPathCode* slow_path = new NullErrorSlowPath(this);
5412 compiler->AddSlowPathCode(slow_path);
5413
5414 Register value_reg = locs()->in(0).reg();
5415 // TODO(dartbug.com/30480): Consider passing `null` literal as an argument
5416 // in order to be able to allocate it on register.
5417 __ CompareObject(value_reg, Object::null_object());
5418 __ BranchIf(EQUAL, slow_path->entry_label());
5419}
5420
5421LocationSummary* CheckClassIdInstr::MakeLocationSummary(Zone* zone,
5422 bool opt) const {
5423 const intptr_t kNumInputs = 1;
5424 const intptr_t kNumTemps = 0;
5425 LocationSummary* summary = new (zone)
5426 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5427 summary->set_in(0, cids_.IsSingleCid() ? Location::RequiresRegister()
5428 : Location::WritableRegister());
5429 return summary;
5430}
5431
5432void CheckClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5433 Register value = locs()->in(0).reg();
5434 compiler::Label* deopt =
5435 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass);
5436 if (cids_.IsSingleCid()) {
5437 __ cmpl(value, compiler::Immediate(Smi::RawValue(cids_.cid_start)));
5438 __ j(NOT_ZERO, deopt);
5439 } else {
5440 __ AddImmediate(value,
5441 compiler::Immediate(-Smi::RawValue(cids_.cid_start)));
5442 __ cmpl(value, compiler::Immediate(Smi::RawValue(cids_.Extent())));
5443 __ j(ABOVE, deopt);
5444 }
5445}
5446
5447// Length: register or constant.
5448// Index: register, constant or stack slot.
5449LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Zone* zone,
5450 bool opt) const {
5451 const intptr_t kNumInputs = 2;
5452 const intptr_t kNumTemps = 0;
5453 LocationSummary* locs = new (zone)
5454 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5455 if (length()->definition()->IsConstant()) {
5457 } else {
5458 locs->set_in(kLengthPos, Location::PrefersRegister());
5459 }
5461 return locs;
5462}
5463
5464void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5465 uint32_t flags = generalized_ ? ICData::kGeneralized : 0;
5466 compiler::Label* deopt =
5467 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckArrayBound, flags);
5468
5469 Location length_loc = locs()->in(kLengthPos);
5470 Location index_loc = locs()->in(kIndexPos);
5471
5472 if (length_loc.IsConstant() && index_loc.IsConstant()) {
5473 ASSERT((Smi::Cast(length_loc.constant()).Value() <=
5474 Smi::Cast(index_loc.constant()).Value()) ||
5475 (Smi::Cast(index_loc.constant()).Value() < 0));
5476 // Unconditionally deoptimize for constant bounds checks because they
5477 // only occur only when index is out-of-bounds.
5478 __ jmp(deopt);
5479 return;
5480 }
5481
5482 const intptr_t index_cid = index()->Type()->ToCid();
5483 if (length_loc.IsConstant()) {
5484 Register index = index_loc.reg();
5485 if (index_cid != kSmiCid) {
5486 __ BranchIfNotSmi(index, deopt);
5487 }
5488 const Smi& length = Smi::Cast(length_loc.constant());
5489 if (length.Value() == Smi::kMaxValue) {
5490 __ testl(index, index);
5491 __ j(NEGATIVE, deopt);
5492 } else {
5493 __ cmpl(index, compiler::Immediate(static_cast<int32_t>(length.ptr())));
5494 __ j(ABOVE_EQUAL, deopt);
5495 }
5496 } else if (index_loc.IsConstant()) {
5497 const Smi& index = Smi::Cast(index_loc.constant());
5498 if (length_loc.IsStackSlot()) {
5499 const compiler::Address& length = LocationToStackSlotAddress(length_loc);
5500 __ cmpl(length, compiler::Immediate(static_cast<int32_t>(index.ptr())));
5501 } else {
5502 Register length = length_loc.reg();
5503 __ cmpl(length, compiler::Immediate(static_cast<int32_t>(index.ptr())));
5504 }
5505 __ j(BELOW_EQUAL, deopt);
5506 } else if (length_loc.IsStackSlot()) {
5507 Register index = index_loc.reg();
5508 const compiler::Address& length = LocationToStackSlotAddress(length_loc);
5509 if (index_cid != kSmiCid) {
5510 __ BranchIfNotSmi(index, deopt);
5511 }
5512 __ cmpl(index, length);
5513 __ j(ABOVE_EQUAL, deopt);
5514 } else {
5515 Register index = index_loc.reg();
5516 Register length = length_loc.reg();
5517 if (index_cid != kSmiCid) {
5518 __ BranchIfNotSmi(index, deopt);
5519 }
5520 __ cmpl(length, index);
5521 __ j(BELOW_EQUAL, deopt);
5522 }
5523}
5524
5525LocationSummary* CheckWritableInstr::MakeLocationSummary(Zone* zone,
5526 bool opt) const {
5527 const intptr_t kNumInputs = 1;
5528 const intptr_t kNumTemps = 1;
5529 LocationSummary* locs = new (zone) LocationSummary(
5530 zone, kNumInputs, kNumTemps,
5531 UseSharedSlowPathStub(opt) ? LocationSummary::kCallOnSharedSlowPath
5533 locs->set_in(kReceiver, Location::RequiresRegister());
5534 locs->set_temp(0, Location::RequiresRegister());
5535 return locs;
5536}
5537
5538void CheckWritableInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5539 WriteErrorSlowPath* slow_path = new WriteErrorSlowPath(this);
5540 const Register temp = locs()->temp(0).reg();
5541 compiler->AddSlowPathCode(slow_path);
5542 __ movl(temp,
5543 compiler::FieldAddress(locs()->in(0).reg(),
5544 compiler::target::Object::tags_offset()));
5545 __ testl(temp, compiler::Immediate(
5546 1 << compiler::target::UntaggedObject::kImmutableBit));
5547 __ j(NOT_ZERO, slow_path->entry_label());
5548}
5549
5550LocationSummary* BinaryInt64OpInstr::MakeLocationSummary(Zone* zone,
5551 bool opt) const {
5552 const intptr_t kNumInputs = 2;
5553 switch (op_kind()) {
5554 case Token::kBIT_AND:
5555 case Token::kBIT_OR:
5556 case Token::kBIT_XOR:
5557 case Token::kADD:
5558 case Token::kSUB: {
5559 const intptr_t kNumTemps = 0;
5560 LocationSummary* summary = new (zone) LocationSummary(
5561 zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5562 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
5564 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
5566 summary->set_out(0, Location::SameAsFirstInput());
5567 return summary;
5568 }
5569 case Token::kMUL: {
5570 const intptr_t kNumTemps = 1;
5571 LocationSummary* summary = new (zone) LocationSummary(
5572 zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5573 summary->set_in(0, Location::Pair(Location::RegisterLocation(EAX),
5575 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
5577 summary->set_out(0, Location::SameAsFirstInput());
5578 summary->set_temp(0, Location::RequiresRegister());
5579 return summary;
5580 }
5581 default:
5582 UNREACHABLE();
5583 return nullptr;
5584 }
5585}
5586
5587void BinaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5588 PairLocation* left_pair = locs()->in(0).AsPairLocation();
5589 Register left_lo = left_pair->At(0).reg();
5590 Register left_hi = left_pair->At(1).reg();
5591 PairLocation* right_pair = locs()->in(1).AsPairLocation();
5592 Register right_lo = right_pair->At(0).reg();
5593 Register right_hi = right_pair->At(1).reg();
5594 PairLocation* out_pair = locs()->out(0).AsPairLocation();
5595 Register out_lo = out_pair->At(0).reg();
5596 Register out_hi = out_pair->At(1).reg();
5597 ASSERT(out_lo == left_lo);
5598 ASSERT(out_hi == left_hi);
5599 ASSERT(!can_overflow());
5600 ASSERT(!CanDeoptimize());
5601
5602 switch (op_kind()) {
5603 case Token::kBIT_AND:
5604 __ andl(left_lo, right_lo);
5605 __ andl(left_hi, right_hi);
5606 break;
5607 case Token::kBIT_OR:
5608 __ orl(left_lo, right_lo);
5609 __ orl(left_hi, right_hi);
5610 break;
5611 case Token::kBIT_XOR:
5612 __ xorl(left_lo, right_lo);
5613 __ xorl(left_hi, right_hi);
5614 break;
5615 case Token::kADD:
5616 case Token::kSUB: {
5617 if (op_kind() == Token::kADD) {
5618 __ addl(left_lo, right_lo);
5619 __ adcl(left_hi, right_hi);
5620 } else {
5621 __ subl(left_lo, right_lo);
5622 __ sbbl(left_hi, right_hi);
5623 }
5624 break;
5625 }
5626 case Token::kMUL: {
5627 // Compute 64-bit a * b as:
5628 // a_l * b_l + (a_h * b_l + a_l * b_h) << 32
5629 // Since we requested EDX:EAX for in and out,
5630 // we can use these as scratch registers once
5631 // input has been consumed.
5632 Register temp = locs()->temp(0).reg();
5633 __ movl(temp, left_lo);
5634 __ imull(left_hi, right_lo); // a_h * b_l
5635 __ imull(temp, right_hi); // a_l * b_h
5636 __ addl(temp, left_hi); // sum_high
5637 ASSERT(left_lo == EAX);
5638 __ mull(right_lo); // a_l * b_l in EDX:EAX
5639 __ addl(EDX, temp); // add sum_high
5640 ASSERT(out_lo == EAX);
5641 ASSERT(out_hi == EDX);
5642 break;
5643 }
5644 default:
5645 UNREACHABLE();
5646 }
5647}
5648
5649static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
5650 Token::Kind op_kind,
5651 Register left_lo,
5652 Register left_hi,
5653 const Object& right) {
5654 const int64_t shift = Integer::Cast(right).AsInt64Value();
5655 ASSERT(shift >= 0);
5656 switch (op_kind) {
5657 case Token::kSHR: {
5658 if (shift > 31) {
5659 __ movl(left_lo, left_hi); // Shift by 32.
5660 __ sarl(left_hi, compiler::Immediate(31)); // Sign extend left hi.
5661 if (shift > 32) {
5662 __ sarl(left_lo, compiler::Immediate(shift > 63 ? 31 : shift - 32));
5663 }
5664 } else {
5665 __ shrdl(left_lo, left_hi, compiler::Immediate(shift));
5666 __ sarl(left_hi, compiler::Immediate(shift));
5667 }
5668 break;
5669 }
5670 case Token::kUSHR: {
5671 ASSERT(shift < 64);
5672 if (shift > 31) {
5673 __ movl(left_lo, left_hi); // Shift by 32.
5674 __ xorl(left_hi, left_hi); // Zero extend left hi.
5675 if (shift > 32) {
5676 __ shrl(left_lo, compiler::Immediate(shift - 32));
5677 }
5678 } else {
5679 __ shrdl(left_lo, left_hi, compiler::Immediate(shift));
5680 __ shrl(left_hi, compiler::Immediate(shift));
5681 }
5682 break;
5683 }
5684 case Token::kSHL: {
5685 ASSERT(shift < 64);
5686 if (shift > 31) {
5687 __ movl(left_hi, left_lo); // Shift by 32.
5688 __ xorl(left_lo, left_lo); // Zero left_lo.
5689 if (shift > 32) {
5690 __ shll(left_hi, compiler::Immediate(shift - 32));
5691 }
5692 } else {
5693 __ shldl(left_hi, left_lo, compiler::Immediate(shift));
5694 __ shll(left_lo, compiler::Immediate(shift));
5695 }
5696 break;
5697 }
5698 default:
5699 UNREACHABLE();
5700 }
5701}
5702
5703static void EmitShiftInt64ByECX(FlowGraphCompiler* compiler,
5704 Token::Kind op_kind,
5705 Register left_lo,
5706 Register left_hi) {
5707 // sarl operation masks the count to 5 bits and
5708 // shrdl is undefined with count > operand size (32)
5709 compiler::Label done, large_shift;
5710 switch (op_kind) {
5711 case Token::kSHR: {
5712 __ cmpl(ECX, compiler::Immediate(31));
5713 __ j(ABOVE, &large_shift);
5714
5715 __ shrdl(left_lo, left_hi, ECX); // Shift count in CL.
5716 __ sarl(left_hi, ECX); // Shift count in CL.
5718
5719 __ Bind(&large_shift);
5720 // No need to subtract 32 from CL, only 5 bits used by sarl.
5721 __ movl(left_lo, left_hi); // Shift by 32.
5722 __ sarl(left_hi, compiler::Immediate(31)); // Sign extend left hi.
5723 __ sarl(left_lo, ECX); // Shift count: CL % 32.
5724 break;
5725 }
5726 case Token::kUSHR: {
5727 __ cmpl(ECX, compiler::Immediate(31));
5728 __ j(ABOVE, &large_shift);
5729
5730 __ shrdl(left_lo, left_hi, ECX); // Shift count in CL.
5731 __ shrl(left_hi, ECX); // Shift count in CL.
5733
5734 __ Bind(&large_shift);
5735 // No need to subtract 32 from CL, only 5 bits used by sarl.
5736 __ movl(left_lo, left_hi); // Shift by 32.
5737 __ xorl(left_hi, left_hi); // Zero extend left hi.
5738 __ shrl(left_lo, ECX); // Shift count: CL % 32.
5739 break;
5740 }
5741 case Token::kSHL: {
5742 __ cmpl(ECX, compiler::Immediate(31));
5743 __ j(ABOVE, &large_shift);
5744
5745 __ shldl(left_hi, left_lo, ECX); // Shift count in CL.
5746 __ shll(left_lo, ECX); // Shift count in CL.
5748
5749 __ Bind(&large_shift);
5750 // No need to subtract 32 from CL, only 5 bits used by shll.
5751 __ movl(left_hi, left_lo); // Shift by 32.
5752 __ xorl(left_lo, left_lo); // Zero left_lo.
5753 __ shll(left_hi, ECX); // Shift count: CL % 32.
5754 break;
5755 }
5756 default:
5757 UNREACHABLE();
5758 }
5759 __ Bind(&done);
5760}
5761
5762static void EmitShiftUint32ByConstant(FlowGraphCompiler* compiler,
5763 Token::Kind op_kind,
5764 Register left,
5765 const Object& right) {
5766 const int64_t shift = Integer::Cast(right).AsInt64Value();
5767 if (shift >= 32) {
5768 __ xorl(left, left);
5769 } else {
5770 switch (op_kind) {
5771 case Token::kSHR:
5772 case Token::kUSHR: {
5773 __ shrl(left, compiler::Immediate(shift));
5774 break;
5775 }
5776 case Token::kSHL: {
5777 __ shll(left, compiler::Immediate(shift));
5778 break;
5779 }
5780 default:
5781 UNREACHABLE();
5782 }
5783 }
5784}
5785
5786static void EmitShiftUint32ByECX(FlowGraphCompiler* compiler,
5787 Token::Kind op_kind,
5788 Register left) {
5789 switch (op_kind) {
5790 case Token::kSHR:
5791 case Token::kUSHR: {
5792 __ shrl(left, ECX);
5793 break;
5794 }
5795 case Token::kSHL: {
5796 __ shll(left, ECX);
5797 break;
5798 }
5799 default:
5800 UNREACHABLE();
5801 }
5802}
5803
5804class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
5805 public:
5806 explicit ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction)
5807 : ThrowErrorSlowPathCode(instruction,
5808 kArgumentErrorUnboxedInt64RuntimeEntry) {}
5809
5810 const char* name() override { return "int64 shift"; }
5811
5812 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
5813 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
5814 Register right_lo = right_pair->At(0).reg();
5815 Register right_hi = right_pair->At(1).reg();
5816 PairLocation* out_pair = instruction()->locs()->out(0).AsPairLocation();
5817 Register out_lo = out_pair->At(0).reg();
5818 Register out_hi = out_pair->At(1).reg();
5819#if defined(DEBUG)
5820 PairLocation* left_pair = instruction()->locs()->in(0).AsPairLocation();
5821 Register left_lo = left_pair->At(0).reg();
5822 Register left_hi = left_pair->At(1).reg();
5823 ASSERT(out_lo == left_lo);
5824 ASSERT(out_hi == left_hi);
5825#endif // defined(DEBUG)
5826
5827 compiler::Label throw_error;
5828 __ testl(right_hi, right_hi);
5829 __ j(NEGATIVE, &throw_error);
5830
5831 switch (instruction()->AsShiftInt64Op()->op_kind()) {
5832 case Token::kSHR:
5833 __ sarl(out_hi, compiler::Immediate(31));
5834 __ movl(out_lo, out_hi);
5835 break;
5836 case Token::kUSHR:
5837 case Token::kSHL: {
5838 __ xorl(out_lo, out_lo);
5839 __ xorl(out_hi, out_hi);
5840 break;
5841 }
5842 default:
5843 UNREACHABLE();
5844 }
5845 __ jmp(exit_label());
5846
5847 __ Bind(&throw_error);
5848
5849 // Can't pass unboxed int64 value directly to runtime call, as all
5850 // arguments are expected to be tagged (boxed).
5851 // The unboxed int64 argument is passed through a dedicated slot in Thread.
5852 // TODO(dartbug.com/33549): Clean this up when unboxed values
5853 // could be passed as arguments.
5854 __ movl(compiler::Address(
5855 THR, compiler::target::Thread::unboxed_runtime_arg_offset()),
5856 right_lo);
5857 __ movl(compiler::Address(
5858 THR, compiler::target::Thread::unboxed_runtime_arg_offset() +
5859 kWordSize),
5860 right_hi);
5861 }
5862};
5863
5864LocationSummary* ShiftInt64OpInstr::MakeLocationSummary(Zone* zone,
5865 bool opt) const {
5866 const intptr_t kNumInputs = 2;
5867 const intptr_t kNumTemps = 0;
5868 LocationSummary* summary = new (zone) LocationSummary(
5869 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
5870 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
5873 right()->definition()->IsConstant()) {
5874 ConstantInstr* constant = right()->definition()->AsConstant();
5875 summary->set_in(1, Location::Constant(constant));
5876 } else {
5877 summary->set_in(1, Location::Pair(Location::RegisterLocation(ECX),
5879 }
5880 summary->set_out(0, Location::SameAsFirstInput());
5881 return summary;
5882}
5883
5884void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5885 PairLocation* left_pair = locs()->in(0).AsPairLocation();
5886 Register left_lo = left_pair->At(0).reg();
5887 Register left_hi = left_pair->At(1).reg();
5888 PairLocation* out_pair = locs()->out(0).AsPairLocation();
5889 Register out_lo = out_pair->At(0).reg();
5890 Register out_hi = out_pair->At(1).reg();
5891 ASSERT(out_lo == left_lo);
5892 ASSERT(out_hi == left_hi);
5893 ASSERT(!can_overflow());
5894
5895 if (locs()->in(1).IsConstant()) {
5896 EmitShiftInt64ByConstant(compiler, op_kind(), left_lo, left_hi,
5897 locs()->in(1).constant());
5898 } else {
5899 // Code for a variable shift amount (or constant that throws).
5900 ASSERT(locs()->in(1).AsPairLocation()->At(0).reg() == ECX);
5901 Register right_hi = locs()->in(1).AsPairLocation()->At(1).reg();
5902
5903 // Jump to a slow path if shift count is > 63 or negative.
5904 ShiftInt64OpSlowPath* slow_path = nullptr;
5905 if (!IsShiftCountInRange()) {
5906 slow_path = new (Z) ShiftInt64OpSlowPath(this);
5907 compiler->AddSlowPathCode(slow_path);
5908 __ testl(right_hi, right_hi);
5909 __ j(NOT_ZERO, slow_path->entry_label());
5910 __ cmpl(ECX, compiler::Immediate(kShiftCountLimit));
5911 __ j(ABOVE, slow_path->entry_label());
5912 }
5913
5914 EmitShiftInt64ByECX(compiler, op_kind(), left_lo, left_hi);
5915
5916 if (slow_path != nullptr) {
5917 __ Bind(slow_path->exit_label());
5918 }
5919 }
5920}
5921
5922LocationSummary* SpeculativeShiftInt64OpInstr::MakeLocationSummary(
5923 Zone* zone,
5924 bool opt) const {
5925 const intptr_t kNumInputs = 2;
5926 const intptr_t kNumTemps = 0;
5927 LocationSummary* summary = new (zone)
5928 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5929 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
5931 summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX));
5932 summary->set_out(0, Location::SameAsFirstInput());
5933 return summary;
5934}
5935
5936void SpeculativeShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5937 PairLocation* left_pair = locs()->in(0).AsPairLocation();
5938 Register left_lo = left_pair->At(0).reg();
5939 Register left_hi = left_pair->At(1).reg();
5940 PairLocation* out_pair = locs()->out(0).AsPairLocation();
5941 Register out_lo = out_pair->At(0).reg();
5942 Register out_hi = out_pair->At(1).reg();
5943 ASSERT(out_lo == left_lo);
5944 ASSERT(out_hi == left_hi);
5945 ASSERT(!can_overflow());
5946
5947 if (locs()->in(1).IsConstant()) {
5948 EmitShiftInt64ByConstant(compiler, op_kind(), left_lo, left_hi,
5949 locs()->in(1).constant());
5950 } else {
5951 ASSERT(locs()->in(1).reg() == ECX);
5952 __ SmiUntag(ECX);
5953
5954 // Deoptimize if shift count is > 63 or negative (or not a smi).
5955 if (!IsShiftCountInRange()) {
5956 ASSERT(CanDeoptimize());
5957 compiler::Label* deopt =
5958 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
5959 __ cmpl(ECX, compiler::Immediate(kShiftCountLimit));
5960 __ j(ABOVE, deopt);
5961 }
5962
5963 EmitShiftInt64ByECX(compiler, op_kind(), left_lo, left_hi);
5964 }
5965}
5966
5967class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
5968 public:
5969 explicit ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction)
5970 : ThrowErrorSlowPathCode(instruction,
5971 kArgumentErrorUnboxedInt64RuntimeEntry) {}
5972
5973 const char* name() override { return "uint32 shift"; }
5974
5975 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
5976 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
5977 Register right_lo = right_pair->At(0).reg();
5978 Register right_hi = right_pair->At(1).reg();
5979 const Register out = instruction()->locs()->out(0).reg();
5980 ASSERT(out == instruction()->locs()->in(0).reg());
5981
5982 compiler::Label throw_error;
5983 __ testl(right_hi, right_hi);
5984 __ j(NEGATIVE, &throw_error);
5985
5986 __ xorl(out, out);
5987 __ jmp(exit_label());
5988
5989 __ Bind(&throw_error);
5990
5991 // Can't pass unboxed int64 value directly to runtime call, as all
5992 // arguments are expected to be tagged (boxed).
5993 // The unboxed int64 argument is passed through a dedicated slot in Thread.
5994 // TODO(dartbug.com/33549): Clean this up when unboxed values
5995 // could be passed as arguments.
5996 __ movl(compiler::Address(
5997 THR, compiler::target::Thread::unboxed_runtime_arg_offset()),
5998 right_lo);
5999 __ movl(compiler::Address(
6000 THR, compiler::target::Thread::unboxed_runtime_arg_offset() +
6001 kWordSize),
6002 right_hi);
6003 }
6004};
6005
6006LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
6007 bool opt) const {
6008 const intptr_t kNumInputs = 2;
6009 const intptr_t kNumTemps = 0;
6010 LocationSummary* summary = new (zone) LocationSummary(
6011 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
6012 summary->set_in(0, Location::RequiresRegister());
6014 right()->definition()->IsConstant()) {
6015 ConstantInstr* constant = right()->definition()->AsConstant();
6016 summary->set_in(1, Location::Constant(constant));
6017 } else {
6018 summary->set_in(1, Location::Pair(Location::RegisterLocation(ECX),
6020 }
6021 summary->set_out(0, Location::SameAsFirstInput());
6022 return summary;
6023}
6024
6025void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6026 Register left = locs()->in(0).reg();
6027 Register out = locs()->out(0).reg();
6028 ASSERT(left == out);
6029
6030 if (locs()->in(1).IsConstant()) {
6031 EmitShiftUint32ByConstant(compiler, op_kind(), left,
6032 locs()->in(1).constant());
6033 } else {
6034 // Code for a variable shift amount (or constant that throws).
6035 ASSERT(locs()->in(1).AsPairLocation()->At(0).reg() == ECX);
6036 Register right_hi = locs()->in(1).AsPairLocation()->At(1).reg();
6037
6038 // Jump to a slow path if shift count is > 31 or negative.
6039 ShiftUint32OpSlowPath* slow_path = nullptr;
6040 if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
6041 slow_path = new (Z) ShiftUint32OpSlowPath(this);
6042 compiler->AddSlowPathCode(slow_path);
6043
6044 __ testl(right_hi, right_hi);
6045 __ j(NOT_ZERO, slow_path->entry_label());
6046 __ cmpl(ECX, compiler::Immediate(kUint32ShiftCountLimit));
6047 __ j(ABOVE, slow_path->entry_label());
6048 }
6049
6050 EmitShiftUint32ByECX(compiler, op_kind(), left);
6051
6052 if (slow_path != nullptr) {
6053 __ Bind(slow_path->exit_label());
6054 }
6055 }
6056}
6057
6058LocationSummary* SpeculativeShiftUint32OpInstr::MakeLocationSummary(
6059 Zone* zone,
6060 bool opt) const {
6061 const intptr_t kNumInputs = 2;
6062 const intptr_t kNumTemps = 0;
6063 LocationSummary* summary = new (zone)
6064 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6065 summary->set_in(0, Location::RequiresRegister());
6066 summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX));
6067 summary->set_out(0, Location::SameAsFirstInput());
6068 return summary;
6069}
6070
6071void SpeculativeShiftUint32OpInstr::EmitNativeCode(
6072 FlowGraphCompiler* compiler) {
6073 Register left = locs()->in(0).reg();
6074 Register out = locs()->out(0).reg();
6075 ASSERT(left == out);
6076
6077 if (locs()->in(1).IsConstant()) {
6078 EmitShiftUint32ByConstant(compiler, op_kind(), left,
6079 locs()->in(1).constant());
6080 } else {
6081 ASSERT(locs()->in(1).reg() == ECX);
6082 __ SmiUntag(ECX);
6083
6084 if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
6085 if (!IsShiftCountInRange()) {
6086 // Deoptimize if shift count is negative.
6087 ASSERT(CanDeoptimize());
6088 compiler::Label* deopt =
6089 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
6090
6091 __ testl(ECX, ECX);
6092 __ j(LESS, deopt);
6093 }
6094
6095 compiler::Label cont;
6096 __ cmpl(ECX, compiler::Immediate(kUint32ShiftCountLimit));
6097 __ j(LESS_EQUAL, &cont);
6098
6099 __ xorl(left, left);
6100
6101 __ Bind(&cont);
6102 }
6103
6104 EmitShiftUint32ByECX(compiler, op_kind(), left);
6105 }
6106}
6107
6108LocationSummary* UnaryInt64OpInstr::MakeLocationSummary(Zone* zone,
6109 bool opt) const {
6110 const intptr_t kNumInputs = 1;
6111 const intptr_t kNumTemps = 0;
6112 LocationSummary* summary = new (zone)
6113 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6114 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
6116 summary->set_out(0, Location::SameAsFirstInput());
6117 return summary;
6118}
6119
6120void UnaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6121 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6122 Register left_lo = left_pair->At(0).reg();
6123 Register left_hi = left_pair->At(1).reg();
6124 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6125 Register out_lo = out_pair->At(0).reg();
6126 Register out_hi = out_pair->At(1).reg();
6127 ASSERT(out_lo == left_lo);
6128 ASSERT(out_hi == left_hi);
6129 switch (op_kind()) {
6130 case Token::kBIT_NOT:
6131 __ notl(left_lo);
6132 __ notl(left_hi);
6133 break;
6134 case Token::kNEGATE:
6135 __ negl(left_lo);
6136 __ adcl(left_hi, compiler::Immediate(0));
6137 __ negl(left_hi);
6138 break;
6139 default:
6140 UNREACHABLE();
6141 }
6142}
6143
6144LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone,
6145 bool opt) const {
6146 const intptr_t kNumInputs = 1;
6147 const intptr_t kNumTemps = 0;
6148 LocationSummary* summary = new (zone)
6149 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6150 summary->set_in(0, Location::RequiresRegister());
6151 summary->set_out(0, Location::SameAsFirstInput());
6152 return summary;
6153}
6154
6155void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6156 Register out = locs()->out(0).reg();
6157 ASSERT(locs()->in(0).reg() == out);
6158
6159 ASSERT(op_kind() == Token::kBIT_NOT);
6160
6161 __ notl(out);
6162}
6163
6164LocationSummary* IntConverterInstr::MakeLocationSummary(Zone* zone,
6165 bool opt) const {
6166 const intptr_t kNumInputs = 1;
6167 const intptr_t kNumTemps = 0;
6168 LocationSummary* summary = new (zone)
6169 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6170
6171 if (from() == kUntagged || to() == kUntagged) {
6172 ASSERT((from() == kUntagged && to() == kUnboxedInt32) ||
6173 (from() == kUntagged && to() == kUnboxedUint32) ||
6174 (from() == kUnboxedInt32 && to() == kUntagged) ||
6175 (from() == kUnboxedUint32 && to() == kUntagged));
6176 ASSERT(!CanDeoptimize());
6177 summary->set_in(0, Location::RequiresRegister());
6178 summary->set_out(0, Location::SameAsFirstInput());
6179 } else if ((from() == kUnboxedInt32 || from() == kUnboxedUint32) &&
6180 (to() == kUnboxedInt32 || to() == kUnboxedUint32)) {
6181 summary->set_in(0, Location::RequiresRegister());
6182 summary->set_out(0, Location::SameAsFirstInput());
6183 } else if (from() == kUnboxedInt64) {
6184 summary->set_in(
6185 0, Location::Pair(CanDeoptimize() ? Location::WritableRegister()
6186 : Location::RequiresRegister(),
6187 Location::RequiresRegister()));
6188 summary->set_out(0, Location::RequiresRegister());
6189 } else if (from() == kUnboxedUint32) {
6190 summary->set_in(0, Location::RequiresRegister());
6191 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6193 } else if (from() == kUnboxedInt32) {
6194 summary->set_in(0, Location::RegisterLocation(EAX));
6195 summary->set_out(0, Location::Pair(Location::RegisterLocation(EAX),
6197 }
6198
6199 return summary;
6200}
6201
6202void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6203 const bool is_nop_conversion =
6204 (from() == kUntagged && to() == kUnboxedInt32) ||
6205 (from() == kUntagged && to() == kUnboxedUint32) ||
6206 (from() == kUnboxedInt32 && to() == kUntagged) ||
6207 (from() == kUnboxedUint32 && to() == kUntagged);
6208 if (is_nop_conversion) {
6209 ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
6210 return;
6211 }
6212
6213 if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
6214 // Representations are bitwise equivalent.
6215 ASSERT(locs()->out(0).reg() == locs()->in(0).reg());
6216 } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
6217 // Representations are bitwise equivalent.
6218 ASSERT(locs()->out(0).reg() == locs()->in(0).reg());
6219 if (CanDeoptimize()) {
6220 compiler::Label* deopt =
6221 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
6222 __ testl(locs()->out(0).reg(), locs()->out(0).reg());
6223 __ j(NEGATIVE, deopt);
6224 }
6225 } else if (from() == kUnboxedInt64) {
6226 // TODO(vegorov) kUnboxedInt64 -> kInt32 conversion is currently usually
6227 // dominated by a CheckSmi(BoxInt64(val)) which is an artifact of ordering
6228 // of optimization passes and the way we check smi-ness of values.
6229 // Optimize it away.
6230 ASSERT(to() == kUnboxedInt32 || to() == kUnboxedUint32);
6231 PairLocation* in_pair = locs()->in(0).AsPairLocation();
6232 Register in_lo = in_pair->At(0).reg();
6233 Register in_hi = in_pair->At(1).reg();
6234 Register out = locs()->out(0).reg();
6235 // Copy low word.
6236 __ movl(out, in_lo);
6237 if (CanDeoptimize()) {
6238 compiler::Label* deopt =
6239 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
6240 __ sarl(in_lo, compiler::Immediate(31));
6241 __ cmpl(in_lo, in_hi);
6242 __ j(NOT_EQUAL, deopt);
6243 }
6244 } else if (from() == kUnboxedUint32) {
6245 ASSERT(to() == kUnboxedInt64);
6246 Register in = locs()->in(0).reg();
6247 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6248 Register out_lo = out_pair->At(0).reg();
6249 Register out_hi = out_pair->At(1).reg();
6250 // Copy low word.
6251 __ movl(out_lo, in);
6252 // Zero upper word.
6253 __ xorl(out_hi, out_hi);
6254 } else if (from() == kUnboxedInt32) {
6255 ASSERT(to() == kUnboxedInt64);
6256 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6257 Register out_lo = out_pair->At(0).reg();
6258 Register out_hi = out_pair->At(1).reg();
6259 ASSERT(locs()->in(0).reg() == EAX);
6260 ASSERT(out_lo == EAX && out_hi == EDX);
6261 __ cdq();
6262 } else {
6263 UNREACHABLE();
6264 }
6265}
6266
6267LocationSummary* StopInstr::MakeLocationSummary(Zone* zone, bool opt) const {
6268 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
6269}
6270
6271void StopInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6272 __ Stop(message());
6273}
6274
6275void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6276 BlockEntryInstr* entry = normal_entry();
6277 if (entry != nullptr) {
6278 if (!compiler->CanFallThroughTo(entry)) {
6279 FATAL("Checked function entry must have no offset");
6280 }
6281 } else {
6282 entry = osr_entry();
6283 if (!compiler->CanFallThroughTo(entry)) {
6284 __ jmp(compiler->GetJumpLabel(entry));
6285 }
6286 }
6287}
6288
6289LocationSummary* GotoInstr::MakeLocationSummary(Zone* zone, bool opt) const {
6290 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
6291}
6292
6293void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6294 if (!compiler->is_optimizing()) {
6295 if (FLAG_reorder_basic_blocks) {
6296 compiler->EmitEdgeCounter(block()->preorder_number());
6297 }
6298 // Add a deoptimization descriptor for deoptimizing instructions that
6299 // may be inserted before this instruction.
6300 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
6301 InstructionSource());
6302 }
6303 if (HasParallelMove()) {
6304 parallel_move()->EmitNativeCode(compiler);
6305 }
6306
6307 // We can fall through if the successor is the next block in the list.
6308 // Otherwise, we need a jump.
6309 if (!compiler->CanFallThroughTo(successor())) {
6310 __ jmp(compiler->GetJumpLabel(successor()));
6311 }
6312}
6313
6314LocationSummary* IndirectGotoInstr::MakeLocationSummary(Zone* zone,
6315 bool opt) const {
6316 const intptr_t kNumInputs = 1;
6317 const intptr_t kNumTemps = 2;
6318
6319 LocationSummary* summary = new (zone)
6320 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6321
6322 summary->set_in(0, Location::RequiresRegister());
6323 summary->set_temp(0, Location::RequiresRegister());
6324 summary->set_temp(1, Location::RequiresRegister());
6325
6326 return summary;
6327}
6328
6329void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6330 Register index_reg = locs()->in(0).reg();
6331 Register target_reg = locs()->temp(0).reg();
6332 Register offset = locs()->temp(1).reg();
6333
6334 ASSERT(RequiredInputRepresentation(0) == kTagged);
6335 __ LoadObject(offset, offsets_);
6337 /*is_external=*/false, kTypedDataInt32ArrayCid,
6338 /*index_scale=*/4,
6339 /*index_unboxed=*/false, offset, index_reg));
6340
6341 // Load code object from frame.
6342 __ movl(target_reg,
6343 compiler::Address(
6344 EBP, compiler::target::frame_layout.code_from_fp * kWordSize));
6345 // Load instructions object (active_instructions and Code::entry_point() may
6346 // not point to this instruction object any more; see Code::DisableDartCode).
6347 __ movl(target_reg,
6348 compiler::FieldAddress(target_reg, Code::instructions_offset()));
6349 __ addl(target_reg,
6350 compiler::Immediate(Instructions::HeaderSize() - kHeapObjectTag));
6351 __ addl(target_reg, offset);
6352
6353 // Jump to the absolute address.
6354 __ jmp(target_reg);
6355}
6356
6357LocationSummary* StrictCompareInstr::MakeLocationSummary(Zone* zone,
6358 bool opt) const {
6359 const intptr_t kNumInputs = 2;
6360 const intptr_t kNumTemps = 0;
6361 if (needs_number_check()) {
6362 LocationSummary* locs = new (zone)
6363 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6364 locs->set_in(0, Location::RegisterLocation(EAX));
6365 locs->set_in(1, Location::RegisterLocation(ECX));
6366 locs->set_out(0, Location::RegisterLocation(EAX));
6367 return locs;
6368 }
6369 LocationSummary* locs = new (zone)
6370 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6371 locs->set_in(0, LocationRegisterOrConstant(left()));
6372 // Only one of the inputs can be a constant. Choose register if the first one
6373 // is a constant.
6374 locs->set_in(1, locs->in(0).IsConstant()
6377 locs->set_out(0, Location::RequiresRegister());
6378 return locs;
6379}
6380
6381Condition StrictCompareInstr::EmitComparisonCodeRegConstant(
6382 FlowGraphCompiler* compiler,
6383 BranchLabels labels,
6384 Register reg,
6385 const Object& obj) {
6386 return compiler->EmitEqualityRegConstCompare(reg, obj, needs_number_check(),
6387 source(), deopt_id());
6388}
6389
6390// Detect pattern when one value is zero and another is a power of 2.
6391static bool IsPowerOfTwoKind(intptr_t v1, intptr_t v2) {
6392 return (Utils::IsPowerOfTwo(v1) && (v2 == 0)) ||
6393 (Utils::IsPowerOfTwo(v2) && (v1 == 0));
6394}
6395
6396LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone,
6397 bool opt) const {
6399 // TODO(dartbug.com/30953): support byte register constraints in the
6400 // register allocator.
6402 return comparison()->locs();
6403}
6404
6405void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6406 ASSERT(locs()->out(0).reg() == EDX);
6407
6408 // Clear upper part of the out register. We are going to use setcc on it
6409 // which is a byte move.
6410 __ xorl(EDX, EDX);
6411
6412 // Emit comparison code. This must not overwrite the result register.
6413 // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using
6414 // the labels or returning an invalid condition.
6415 BranchLabels labels = {nullptr, nullptr, nullptr};
6416 Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
6417 ASSERT(true_condition != kInvalidCondition);
6418
6419 const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_);
6420
6421 intptr_t true_value = if_true_;
6422 intptr_t false_value = if_false_;
6423
6424 if (is_power_of_two_kind) {
6425 if (true_value == 0) {
6426 // We need to have zero in EDX on true_condition.
6427 true_condition = InvertCondition(true_condition);
6428 }
6429 } else {
6430 if (true_value == 0) {
6431 // Swap values so that false_value is zero.
6432 intptr_t temp = true_value;
6433 true_value = false_value;
6434 false_value = temp;
6435 } else {
6436 true_condition = InvertCondition(true_condition);
6437 }
6438 }
6439
6440 __ setcc(true_condition, DL);
6441
6442 if (is_power_of_two_kind) {
6443 const intptr_t shift =
6444 Utils::ShiftForPowerOfTwo(Utils::Maximum(true_value, false_value));
6445 __ shll(EDX, compiler::Immediate(shift + kSmiTagSize));
6446 } else {
6447 __ decl(EDX);
6448 __ andl(EDX, compiler::Immediate(Smi::RawValue(true_value) -
6449 Smi::RawValue(false_value)));
6450 if (false_value != 0) {
6451 __ addl(EDX, compiler::Immediate(Smi::RawValue(false_value)));
6452 }
6453 }
6454}
6455
6456LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone,
6457 bool opt) const {
6458 const intptr_t kNumInputs = 1;
6459 const intptr_t kNumTemps = 0;
6460 LocationSummary* summary = new (zone)
6461 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6462 summary->set_in(0, Location::RegisterLocation(FUNCTION_REG)); // Function.
6463 summary->set_out(0, Location::RegisterLocation(EAX));
6464 return summary;
6465}
6466
6467void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6468 // Load arguments descriptor.
6469 const intptr_t argument_count = ArgumentCount(); // Includes type args.
6470 const Array& arguments_descriptor =
6472 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
6473
6474 // EBX: Code (compiled code or lazy compile stub).
6475 ASSERT(locs()->in(0).reg() == FUNCTION_REG);
6476 __ movl(EBX,
6477 compiler::FieldAddress(FUNCTION_REG, Function::entry_point_offset()));
6478
6479 // FUNCTION_REG: Function.
6480 // ARGS_DESC_REG: Arguments descriptor array.
6481 // ECX: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
6482 __ xorl(IC_DATA_REG, IC_DATA_REG);
6483 __ call(EBX);
6484 compiler->EmitCallsiteMetadata(source(), deopt_id(),
6485 UntaggedPcDescriptors::kOther, locs(), env());
6486 compiler->EmitDropArguments(argument_count);
6487}
6488
6489LocationSummary* BooleanNegateInstr::MakeLocationSummary(Zone* zone,
6490 bool opt) const {
6493}
6494
6495void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6496 Register input = locs()->in(0).reg();
6497 Register result = locs()->out(0).reg();
6498 ASSERT(input == result);
6499 __ xorl(result, compiler::Immediate(
6501}
6502
6503LocationSummary* BoolToIntInstr::MakeLocationSummary(Zone* zone,
6504 bool opt) const {
6505 UNREACHABLE();
6506 return NULL;
6507}
6508
6509void BoolToIntInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6510 UNREACHABLE();
6511}
6512
6513LocationSummary* IntToBoolInstr::MakeLocationSummary(Zone* zone,
6514 bool opt) const {
6515 UNREACHABLE();
6516 return NULL;
6517}
6518
6519void IntToBoolInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6520 UNREACHABLE();
6521}
6522
6523LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
6524 bool opt) const {
6525 const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0;
6526 const intptr_t kNumTemps = 0;
6527 LocationSummary* locs = new (zone)
6528 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6529 if (type_arguments() != nullptr) {
6532 }
6534 return locs;
6535}
6536
6537void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6538 const Code& stub = Code::ZoneHandle(
6540 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
6541 locs(), deopt_id(), env());
6542}
6543
6544void DebugStepCheckInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6545#ifdef PRODUCT
6546 UNREACHABLE();
6547#else
6548 ASSERT(!compiler->is_optimizing());
6549 __ Call(StubCode::DebugStepCheck());
6550 compiler->AddCurrentDescriptor(stub_kind_, deopt_id_, source());
6551 compiler->RecordSafepoint(locs());
6552#endif
6553}
6554
6555} // namespace dart
6556
6557#undef __
6558
6559#endif // defined(TARGET_ARCH_IA32)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
static void fail(const SkString &err)
Definition DM.cpp:234
static bool match(const char *needle, const char *haystack)
Definition DM.cpp:1132
static bool are_equal(skiatest::Reporter *reporter, const SkMatrix &a, const SkMatrix &b)
static int float_bits(float f)
@ kNaN
static bool ok(int result)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
static bool subtract(const R &a, const R &b, R *out)
Definition SkRect.cpp:177
static size_t element_size(Layout layout, SkSLType type)
Vec2Value v2
#define __
#define SIMPLE(name,...)
#define UNREACHABLE()
Definition assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
#define Z
intptr_t num_context_variables() const
Definition il.h:8344
Value * type_arguments() const
Definition il.h:7397
const Class & cls() const
Definition il.h:7396
intptr_t num_context_variables() const
Definition il.h:7555
static intptr_t type_arguments_offset()
Definition object.h:10902
static intptr_t InstanceSize()
Definition object.h:10910
static constexpr bool IsValidLength(intptr_t len)
Definition object.h:10906
static intptr_t length_offset()
Definition object.h:10813
Value * dst_type() const
Definition il.h:4405
Token::Kind op_kind() const
Definition il.h:8990
Value * right() const
Definition il.h:8988
Value * left() const
Definition il.h:8987
bool can_overflow() const
Definition il.h:9352
Value * right() const
Definition il.h:9350
Token::Kind op_kind() const
Definition il.h:9348
Value * left() const
Definition il.h:9349
bool RightIsPowerOfTwoConstant() const
Definition il.cc:2116
Range * right_range() const
Definition il.h:9425
ParallelMoveInstr * parallel_move() const
Definition il.h:1683
bool HasParallelMove() const
Definition il.h:1685
BlockEntryInstr(intptr_t block_id, intptr_t try_index, intptr_t deopt_id, intptr_t stack_depth)
Definition il.h:1776
static const Bool & False()
Definition object.h:10778
static const Bool & True()
Definition object.h:10776
static void Allocate(FlowGraphCompiler *compiler, Instruction *instruction, const Class &cls, Register result, Register temp)
Definition il.cc:6317
Value * value() const
Definition il.h:8480
Representation from_representation() const
Definition il.h:8481
virtual bool ValueFitsSmi() const
Definition il.cc:3244
ComparisonInstr * comparison() const
Definition il.h:4003
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.h:7963
intptr_t index_scale() const
Definition il.h:7972
static constexpr Register kFfiAnyNonAbiRegister
static constexpr Register kReturnReg
static constexpr Register kSecondNonArgumentRegister
static constexpr bool kUsesRet4
const RuntimeEntry & TargetFunction() const
Definition il.cc:1099
Value * index() const
Definition il.h:10743
Value * length() const
Definition il.h:10742
Value * value() const
Definition il.h:10701
bool IsDeoptIfNull() const
Definition il.cc:861
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr, TemplateInstruction, FIELD_LIST) private void EmitBitTest(FlowGraphCompiler *compiler, intptr_t min, intptr_t max, intptr_t mask, compiler::Label *deopt)
void EmitNullCheck(FlowGraphCompiler *compiler, compiler::Label *deopt)
bool IsNullCheck() const
Definition il.h:10546
bool IsDeoptIfNotNull() const
Definition il.cc:875
bool IsBitTest() const
Definition il.cc:897
Value * right() const
Definition il.h:8429
Value * left() const
Definition il.h:8428
Value * value() const
Definition il.h:10600
intptr_t loop_depth() const
Definition il.h:9858
static intptr_t instructions_offset()
Definition object.h:6752
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
Definition object.h:6766
virtual void EmitBranchCode(FlowGraphCompiler *compiler, BranchInstr *branch)
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual Condition EmitComparisonCode(FlowGraphCompiler *compiler, BranchLabels labels)=0
const Object & value() const
Definition il.h:4212
void EmitMoveToLocation(FlowGraphCompiler *compiler, const Location &destination, Register tmp=kNoRegister, intptr_t pair_index=0)
static intptr_t num_variables_offset()
Definition object.h:7386
static intptr_t InstanceSize()
Definition object.h:7419
virtual Value * num_elements() const
Definition il.h:7807
virtual Representation representation() const
Definition il.h:3483
Value * value() const
Definition il.h:9053
MethodRecognizer::Kind op_kind() const
Definition il.h:9055
MethodRecognizer::Kind recognized_kind() const
Definition il.h:10012
Value * value() const
Definition il.h:10059
static intptr_t value_offset()
Definition object.h:10118
static DoublePtr NewCanonical(double d)
Definition object.cc:23497
bool is_null_aware() const
Definition il.h:5292
virtual Representation representation() const
Definition il.h:10283
intptr_t index() const
Definition il.h:10281
void EmitReturnMoves(FlowGraphCompiler *compiler, const Register temp0, const Register temp1)
Definition il.cc:7633
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(FfiCallInstr, VariadicDefinition, FIELD_LIST) private void EmitParamMoves(FlowGraphCompiler *compiler, const Register saved_fp, const Register temp0, const Register temp1)
Definition il.cc:7421
virtual Representation representation() const
Definition il.cc:7946
static bool CanExecuteGeneratedCodeInSafepoint()
Definition il.h:6093
intptr_t TargetAddressIndex() const
Definition il.h:6051
static intptr_t guarded_cid_offset()
Definition object.h:4642
bool is_nullable() const
Definition object.cc:11821
@ kUnknownFixedLength
Definition object.h:4701
@ kUnknownLengthOffset
Definition object.h:4700
@ kNoFixedLength
Definition object.h:4702
static intptr_t guarded_list_length_in_object_offset_offset()
Definition object.h:4666
intptr_t guarded_cid() const
Definition object.cc:11800
static intptr_t is_nullable_offset()
Definition object.h:4739
static intptr_t guarded_list_length_offset()
Definition object.h:4656
static intptr_t entry_point_offset(CodeEntryKind entry_kind=CodeEntryKind::kNormal)
Definition object.h:3183
ParallelMoveInstr * parallel_move() const
Definition il.h:3717
BlockEntryInstr * block() const
Definition il.h:3692
bool HasParallelMove() const
Definition il.h:3719
JoinEntryInstr * successor() const
Definition il.h:3695
FunctionEntryInstr * normal_entry() const
Definition il.h:1986
OsrEntryInstr * osr_entry() const
Definition il.h:1992
const Field & field() const
Definition il.h:6476
Value * value() const
Definition il.h:6474
Value * value() const
Definition il.h:9101
Value * value() const
Definition il.h:9141
ComparisonInstr * comparison() const
Definition il.h:5434
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.h:3789
Value * offset() const
Definition il.h:3811
const AbstractType & type() const
Definition il.h:7245
Environment * env() const
Definition il.h:1209
virtual LocationSummary * MakeLocationSummary(Zone *zone, bool is_optimizing) const =0
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition il.h:1207
void InitializeLocationSummary(Zone *zone, bool optimizing)
Definition il.h:1196
LocationSummary * locs()
Definition il.h:1186
InstructionSource source() const
Definition il.h:1002
intptr_t deopt_id() const
Definition il.h:987
static LocationSummary * MakeCallSummary(Zone *zone, const Instruction *instr, LocationSummary *locs=nullptr)
static intptr_t HeaderSize()
Definition object.h:5796
Value * value() const
Definition il.h:9930
Representation to() const
Definition il.h:10993
Representation from() const
Definition il.h:10992
const RuntimeEntry & TargetFunction() const
Definition il.cc:7229
MethodRecognizer::Kind recognized_kind() const
Definition il.h:10209
static constexpr intptr_t kDoubleTempIndex
Definition il.h:10243
static constexpr intptr_t kObjectTempIndex
Definition il.h:10242
intptr_t TargetAddressIndex() const
Definition il.h:6149
void EmitParamMoves(FlowGraphCompiler *compiler, Register saved_fp, Register temp0)
Definition il.cc:8042
LocationSummary * MakeLocationSummaryInternal(Zone *zone, const RegList temps) const
Definition il.cc:7964
virtual Representation representation() const
Definition il.h:6865
intptr_t index_scale() const
Definition il.h:6851
Value * index() const
Definition il.h:6849
bool can_pack_into_smi() const
Definition il.h:6858
intptr_t element_count() const
Definition il.h:6856
bool IsExternal() const
Definition il.h:6844
intptr_t class_id() const
Definition il.h:6855
intptr_t class_id() const
Definition il.h:6759
bool IsUntagged() const
Definition il.h:6752
Value * array() const
Definition il.h:6756
intptr_t index_scale() const
Definition il.h:6758
Representation representation() const
Definition il.h:6775
Value * index() const
Definition il.h:6757
const LocalVariable & local() const
Definition il.h:5765
Location temp(intptr_t index) const
Definition locations.h:882
Location out(intptr_t index) const
Definition locations.h:903
static LocationSummary * Make(Zone *zone, intptr_t input_count, Location out, ContainsCall contains_call)
Definition locations.cc:187
void set_temp(intptr_t index, Location loc)
Definition locations.h:894
void set_out(intptr_t index, Location loc)
Definition locations.cc:232
Location in(intptr_t index) const
Definition locations.h:866
void set_in(intptr_t index, Location loc)
Definition locations.cc:205
static Location StackSlot(intptr_t stack_index, Register base)
Definition locations.h:447
static Location NoLocation()
Definition locations.h:387
static Location SameAsFirstInput()
Definition locations.h:382
static Location Pair(Location first, Location second)
Definition locations.cc:271
Register reg() const
Definition locations.h:404
static Location FpuRegisterLocation(FpuRegister reg)
Definition locations.h:410
static Location WritableRegister()
Definition locations.h:376
static Location RegisterLocation(Register reg)
Definition locations.h:398
static Location PrefersRegister()
Definition locations.h:358
static Location Any()
Definition locations.h:352
static Location RequiresRegister()
Definition locations.h:365
static Location RequiresFpuRegister()
Definition locations.h:369
static Location Constant(const ConstantInstr *obj, int pair_index=0)
Definition locations.h:294
Value * right() const
Definition il.h:8922
intptr_t result_cid() const
Definition il.h:8924
Value * left() const
Definition il.h:8921
MethodRecognizer::Kind op_kind() const
Definition il.h:8919
Value * length() const
Definition il.h:3193
bool unboxed_inputs() const
Definition il.h:3198
Value * src_start() const
Definition il.h:3191
void EmitLoopCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, Register length_reg, compiler::Label *done, compiler::Label *copy_forwards=nullptr)
void PrepareLengthRegForLoop(FlowGraphCompiler *compiler, Register length_reg, compiler::Label *done)
Value * dest_start() const
Definition il.h:3192
static intptr_t value_offset()
Definition object.h:10053
virtual Representation representation() const
Definition il.h:3369
Value * value() const
Definition il.h:3359
Location location() const
Definition il.h:3356
static int ComputeArgcTag(const Function &function)
bool is_auto_scope() const
Definition il.h:5977
bool is_bootstrap_native() const
Definition il.h:5976
const Function & function() const
Definition il.h:5974
NativeFunction native_c_function() const
Definition il.h:5975
bool link_lazily() const
Definition il.h:5978
static ObjectPtr null()
Definition object.h:433
static Object & ZoneHandle()
Definition object.h:419
static intptr_t data_offset()
Definition object.h:10533
static bool Overlaps(Range *range, intptr_t min, intptr_t max)
static bool OnlyLessThanOrEqualTo(Range *range, intptr_t value)
static bool IsWithin(const Range *range, int64_t min, int64_t max)
static bool IsPositive(Range *range)
static bool CanBeZero(Range *range)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ShiftIntegerOpInstr, BinaryIntegerOpInstr, FIELD_LIST) protected bool IsShiftCountInRange(int64_t max=kShiftCountLimit) const
Definition il.cc:2103
Range * shift_range() const
Definition il.h:9607
Kind kind() const
Definition il.h:11304
Value * value() const
Definition il.h:9904
static constexpr intptr_t kBits
Definition object.h:9965
static SmiPtr New(intptr_t value)
Definition object.h:9985
static constexpr intptr_t kMaxValue
Definition object.h:9966
static intptr_t RawValue(intptr_t value)
Definition object.h:10001
const char * message() const
Definition il.h:3663
bool ShouldEmitStoreBarrier() const
Definition il.h:7045
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition il.cc:6932
Value * value() const
Definition il.h:7039
Value * array() const
Definition il.h:7037
intptr_t class_id() const
Definition il.h:7042
bool IsUntagged() const
Definition il.h:7076
intptr_t index_scale() const
Definition il.h:7041
Value * index() const
Definition il.h:7038
Value * value() const
Definition il.h:5914
const LocalVariable & local() const
Definition il.h:5913
const Field & field() const
Definition il.h:6685
Value * value() const
Definition il.h:6686
bool needs_number_check() const
Definition il.h:5107
Value * str() const
Definition il.h:6923
static intptr_t length_offset()
Definition object.h:10193
static CodePtr GetAllocationStubForClass(const Class &cls)
Definition stub_code.cc:174
static constexpr int kNullCharCodeSymbolOffset
Definition symbols.h:604
static StringPtr * PredefinedAddress()
Definition symbols.h:771
static bool sse4_1_supported()
Definition cpu_ia32.h:61
intptr_t ArgumentCount() const
Definition il.h:4568
ArrayPtr GetArgumentsDescriptor() const
Definition il.h:4599
virtual intptr_t InputCount() const
Definition il.h:2737
const ZoneGrowableArray< intptr_t > & cid_results() const
Definition il.h:5185
static intptr_t stack_limit_offset()
Definition thread.h:401
static bool IsEqualityOperator(Kind tok)
Definition token.h:236
virtual Representation representation() const
Definition il.h:9793
Value * value() const
Definition il.h:9780
Token::Kind op_kind() const
Definition il.h:9781
Value * value() const
Definition il.h:9192
Token::Kind op_kind() const
Definition il.h:9193
virtual Representation representation() const
Definition il.h:8655
Value * value() const
Definition il.h:8630
bool is_truncating() const
Definition il.h:8724
uword constant_address() const
Definition il.h:4273
virtual Representation representation() const
Definition il.h:4270
bool IsScanFlagsUnboxed() const
Definition il.cc:7188
static T Abs(T x)
Definition utils.h:34
static int32_t Low32Bits(int64_t value)
Definition utils.h:354
static constexpr T Maximum(T x, T y)
Definition utils.h:26
static constexpr int ShiftForPowerOfTwo(T x)
Definition utils.h:66
static T Minimum(T x, T y)
Definition utils.h:21
static T AddWithWrapAround(T a, T b)
Definition utils.h:416
static bool DoublesBitEqual(const double a, const double b)
Definition utils.h:510
static constexpr size_t HighestBit(int64_t v)
Definition utils.h:170
static constexpr bool IsPowerOfTwo(T x)
Definition utils.h:61
bool NeedsWriteBarrier()
Definition il.cc:1388
bool BindsToConstant() const
Definition il.cc:1181
intptr_t BoundSmiConstant() const
Definition il.cc:1210
bool BindsToSmiConstant() const
Definition il.cc:1206
Definition * definition() const
Definition il.h:103
CompileType * Type()
Value(Definition *definition)
Definition il.h:95
intptr_t InputCount() const
Definition il.h:2776
static Address Absolute(const uword addr)
void static bool EmittingComments()
Address ElementAddressForRegIndex(bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool index_unboxed, Register array, Register index)
static Address VMTagAddress()
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool *needs_base=nullptr)
static bool IsSafeSmi(const Object &object)
static bool IsSafe(const Object &object)
#define UNIMPLEMENTED
#define ASSERT(E)
SkBitmap source
Definition examples.cpp:28
#define FATAL(error)
FlutterSemanticsFlag flag
FlutterSemanticsFlag flags
uint8_t value
GAsyncResult * result
uint32_t * target
const char * name
Definition fuchsia.cc:50
int argument_count
Definition fuchsia.cc:52
static float max(float r, float g, float b)
Definition hsl.cpp:49
static float min(float r, float g, float b)
Definition hsl.cpp:48
#define R(r)
#define CASE(Arity, Mask, Name, Args, Result)
#define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name)
Definition il.h:11813
size_t length
#define DEFINE_BACKEND(Name, Args)
const intptr_t kResultIndex
Definition marshaller.h:28
bool IsSmi(int64_t v)
bool HasIntegerValue(const dart::Object &object, int64_t *value)
Location LocationRegisterOrConstant(Value *value)
Definition locations.cc:289
const intptr_t kSmiBits
Definition globals.h:24
const Register kWriteBarrierSlotReg
const Register THR
uword FindDoubleConstant(double value)
static Condition InvertCondition(Condition c)
static constexpr int kSavedCallerPcSlotFromFp
bool IsTypedDataBaseClassId(intptr_t index)
Definition class_id.h:429
static bool IsSmiValue(Value *val, intptr_t *int_val)
const Register kExceptionObjectReg
const Register kWriteBarrierObjectReg
constexpr int32_t kMinInt32
Definition globals.h:482
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition constants.h:85
constexpr intptr_t kIntptrMin
Definition globals.h:556
int32_t classid_t
Definition globals.h:524
static const ClassId kLastErrorCid
Definition class_id.h:311
@ kIllegalCid
Definition class_id.h:214
@ kNullCid
Definition class_id.h:252
@ kDynamicCid
Definition class_id.h:253
Representation
Definition locations.h:66
constexpr intptr_t kSimd128Size
Definition globals.h:459
const FpuRegister FpuTMP
@ kHeapObjectTag
static const ClassId kFirstErrorCid
Definition class_id.h:310
uintptr_t uword
Definition globals.h:501
const Register CODE_REG
@ GREATER_EQUAL
@ UNSIGNED_GREATER
@ kInvalidCondition
@ UNSIGNED_GREATER_EQUAL
@ NOT_CARRY
@ NO_OVERFLOW
@ BELOW_EQUAL
@ PARITY_ODD
@ UNSIGNED_LESS
@ ABOVE_EQUAL
@ PARITY_EVEN
const Register ARGS_DESC_REG
bool IsClampedTypedDataBaseClassId(intptr_t index)
Definition class_id.h:461
@ kNoRegister
Location LocationFixedRegisterOrConstant(Value *value, Register reg)
Definition locations.cc:339
bool IsExternalPayloadClassId(classid_t cid)
Definition class_id.h:472
constexpr intptr_t kInt32Size
Definition globals.h:450
const Register FPREG
static constexpr int kPcMarkerSlotFromFp
const Register FUNCTION_REG
const Register IC_DATA_REG
constexpr int32_t kMaxInt32
Definition globals.h:483
compiler::Address LocationToStackSlotAddress(Location loc)
Definition locations.cc:365
constexpr intptr_t kWordSize
Definition globals.h:509
Location LocationWritableRegisterOrConstant(Value *value)
Definition locations.cc:314
static bool IsConstant(Definition *def, int64_t *val)
Definition loops.cc:123
constexpr intptr_t kFloatSize
Definition globals.h:457
QRegister FpuRegister
const Register kStackTraceObjectReg
static int8_t data[kExtLength]
constexpr intptr_t kDoubleSize
Definition globals.h:456
Location LocationFixedRegisterOrSmiConstant(Value *value, Register reg)
Definition locations.cc:348
static ScaleFactor ToScaleFactor(intptr_t index_scale, bool index_unboxed)
Definition constants.h:95
Location LocationRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
Definition locations.cc:297
constexpr intptr_t kBitsPerInt64
Definition globals.h:467
const Register SPREG
call(args)
Definition dom.py:159
Definition __init__.py:1
dst
Definition cp.py:12
SIN Vec< N, uint16_t > mull(const Vec< N, uint8_t > &x, const Vec< N, uint8_t > &y)
Definition SkVx.h:906
Definition ref_ptr.h:256
const Scalar scale
Point offset
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kObjectReg
static constexpr Representation NativeRepresentation(Representation rep)
Definition il.h:8456
static constexpr intptr_t kBoolVsNullMask
static constexpr intptr_t kBoolValueMask
static constexpr size_t ValueSize(Representation rep)
Definition locations.h:112
static constexpr bool IsUnboxedInteger(Representation rep)
Definition locations.h:92
static compiler::OperandSize OperandSize(Representation rep)
Definition locations.cc:16
static constexpr bool IsUnboxed(Representation rep)
Definition locations.h:101
static bool IsUnsignedInteger(Representation rep)
Definition locations.h:126
static Representation RepresentationOfArrayElement(classid_t cid)
Definition locations.cc:79
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
#define ASSERT_BOOL_FALSE_FOLLOWS_BOOL_TRUE()
Definition thread.h:204
#define kNegInfinity
Definition globals.h:66