Flutter Engine
The Flutter Engine
il_ia32.cc
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "platform/globals.h"
6#include "vm/globals.h" // Needed here to get TARGET_ARCH_IA32.
7#if defined(TARGET_ARCH_IA32)
8
10
20#include "vm/dart_entry.h"
21#include "vm/instructions.h"
22#include "vm/object_store.h"
23#include "vm/parser.h"
24#include "vm/stack_frame.h"
25#include "vm/stub_code.h"
26#include "vm/symbols.h"
27
28#define __ compiler->assembler()->
29#define Z (compiler->zone())
30
31namespace dart {
32
33// Generic summary for call instructions that have all arguments pushed
34// on the stack and return the result in a fixed register EAX.
35LocationSummary* Instruction::MakeCallSummary(Zone* zone,
36 const Instruction* instr,
37 LocationSummary* locs) {
38 // This is unused on ia32.
39 ASSERT(locs == nullptr);
40 ASSERT(instr->representation() == kTagged);
41 const intptr_t kNumInputs = 0;
42 const intptr_t kNumTemps = 0;
43 LocationSummary* result = new (zone)
44 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
46 return result;
47}
48
49DEFINE_BACKEND(LoadIndexedUnsafe, (Register out, Register index)) {
50 ASSERT(instr->RequiredInputRepresentation(0) == kTagged); // It is a Smi.
51 ASSERT(instr->representation() == kTagged);
52 __ movl(out, compiler::Address(instr->base_reg(), index, TIMES_2,
53 instr->offset()));
54
55 ASSERT(kSmiTag == 0);
56 ASSERT(kSmiTagSize == 1);
57}
58
59DEFINE_BACKEND(StoreIndexedUnsafe,
60 (NoLocation, Register index, Register value)) {
61 ASSERT(instr->RequiredInputRepresentation(
62 StoreIndexedUnsafeInstr::kIndexPos) == kTagged); // It is a Smi.
63 __ movl(compiler::Address(instr->base_reg(), index, TIMES_2, instr->offset()),
64 value);
65
66 ASSERT(kSmiTag == 0);
67 ASSERT(kSmiTagSize == 1);
68}
69
70DEFINE_BACKEND(TailCall,
71 (NoLocation,
72 Fixed<Register, ARGS_DESC_REG>,
73 Temp<Register> temp)) {
74 __ LoadObject(CODE_REG, instr->code());
75 __ LeaveFrame(); // The arguments are still on the stack.
76 __ movl(temp, compiler::FieldAddress(CODE_REG, Code::entry_point_offset()));
77 __ jmp(temp);
78}
79
80LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone,
81 bool opt) const {
82 // The compiler must optimize any function that includes a MemoryCopy
83 // instruction that uses typed data cids, since extracting the payload address
84 // from views is done in a compiler pass after all code motion has happened.
85 ASSERT((!IsTypedDataBaseClassId(src_cid_) &&
86 !IsTypedDataBaseClassId(dest_cid_)) ||
87 opt);
88 const bool remove_loop =
90 const intptr_t kNumInputs = 5;
91 const intptr_t kNumTemps = remove_loop ? 1 : 0;
92 LocationSummary* locs = new (zone)
93 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
94 // Unlike other architectures, IA32 don't have enough registers to allocate
95 // temps to hold the payload address, so instead these the rep mov input
96 // registers ESI and EDI, respectively... except ESI is THR, so use another
97 // writable register for the input and save/restore ESI internally as needed.
100 const bool needs_writable_inputs =
101 (((element_size_ == 1) && !unboxed_inputs_) ||
102 ((element_size_ == 16) && unboxed_inputs_));
104 needs_writable_inputs
108 needs_writable_inputs
111 if (remove_loop) {
112 locs->set_in(
115 length()->definition()->OriginalDefinition()->AsConstant()));
116 // Needs a valid ByteRegister for single byte moves, and a temp register
117 // for more than one move. We could potentially optimize the 2 and 4 byte
118 // single moves to overwrite the src_reg.
120 } else {
122 }
123 return locs;
124}
125
126static inline intptr_t SizeOfMemoryCopyElements(intptr_t element_size) {
127 return Utils::Minimum<intptr_t>(element_size, compiler::target::kWordSize);
128}
129
131 Register length_reg,
132 compiler::Label* done) {
133 const intptr_t mov_size = SizeOfMemoryCopyElements(element_size_);
134 // We want to convert the value in length_reg to an unboxed length in
135 // terms of mov_size-sized elements.
136 const intptr_t shift = Utils::ShiftForPowerOfTwo(element_size_) -
137 Utils::ShiftForPowerOfTwo(mov_size) -
139 if (shift < 0) {
140 ASSERT_EQUAL(shift, -kSmiTagShift);
141 __ SmiUntag(length_reg);
142 } else if (shift > 0) {
143 __ shll(length_reg, compiler::Immediate(shift));
144 }
145}
146
147void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
148 Register dest_reg,
149 Register src_reg,
150 Register length_reg,
151 compiler::Label* done,
152 compiler::Label* copy_forwards) {
153 const intptr_t mov_size = SizeOfMemoryCopyElements(element_size_);
154 const bool reversed = copy_forwards != nullptr;
155 if (reversed) {
156 // Avoid doing the extra work to prepare for the rep mov instructions
157 // if the length to copy is zero.
158 __ BranchIfZero(length_reg, done);
159 // Verify that the overlap actually exists by checking to see if
160 // the first element in dest <= the last element in src.
161 const ScaleFactor scale = ToScaleFactor(mov_size, /*index_unboxed=*/true);
162 __ leal(ESI, compiler::Address(src_reg, length_reg, scale, -mov_size));
163 __ CompareRegisters(dest_reg, ESI);
164 __ BranchIf(UNSIGNED_GREATER, copy_forwards,
166 // ESI already has the right address, so we just need to adjust dest_reg
167 // appropriately.
168 __ leal(dest_reg,
169 compiler::Address(dest_reg, length_reg, scale, -mov_size));
170 __ std();
171 } else {
172 // Move the start of the src array into ESI before the string operation.
173 __ movl(ESI, src_reg);
174 }
175 switch (mov_size) {
176 case 1:
177 __ rep_movsb();
178 break;
179 case 2:
180 __ rep_movsw();
181 break;
182 case 4:
183 __ rep_movsd();
184 break;
185 default:
186 UNREACHABLE();
187 }
188 if (reversed) {
189 __ cld();
190 }
191}
192
193void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
194 classid_t array_cid,
195 Register array_reg,
196 Register payload_reg,
197 Representation array_rep,
198 Location start_loc) {
199 intptr_t offset = 0;
200 if (array_rep != kTagged) {
201 // Do nothing, array_reg already contains the payload address.
202 } else if (IsTypedDataBaseClassId(array_cid)) {
203 // The incoming array must have been proven to be an internal typed data
204 // object, where the payload is in the object and we can just offset.
205 ASSERT_EQUAL(array_rep, kTagged);
207 } else {
208 ASSERT_EQUAL(array_rep, kTagged);
209 ASSERT(!IsExternalPayloadClassId(array_cid));
210 switch (array_cid) {
211 case kOneByteStringCid:
212 offset =
214 break;
215 case kTwoByteStringCid:
216 offset =
218 break;
219 default:
220 UNREACHABLE();
221 break;
222 }
223 }
224 ASSERT(start_loc.IsRegister() || start_loc.IsConstant());
225 if (start_loc.IsConstant()) {
226 const auto& constant = start_loc.constant();
227 ASSERT(constant.IsInteger());
228 const int64_t start_value = Integer::Cast(constant).AsInt64Value();
229 const intptr_t add_value = Utils::AddWithWrapAround(
230 Utils::MulWithWrapAround<intptr_t>(start_value, element_size_), offset);
231 __ leal(payload_reg, compiler::Address(array_reg, add_value));
232 return;
233 }
234 // Note that start_reg must be writable in the special cases below.
235 const Register start_reg = start_loc.reg();
236 bool index_unboxed = unboxed_inputs_;
237 // Both special cases below assume that Smis are only shifted one bit.
239 if (element_size_ == 1 && !index_unboxed) {
240 // Shift the value to the right by tagging it as a Smi.
241 __ SmiUntag(start_reg);
242 index_unboxed = true;
243 } else if (element_size_ == 16 && index_unboxed) {
244 // Can't use TIMES_16 on X86, so instead pre-shift the value to reduce the
245 // scaling needed in the leaq instruction.
246 __ SmiTag(start_reg);
247 index_unboxed = false;
248 }
249 auto const scale = ToScaleFactor(element_size_, index_unboxed);
250 __ leal(payload_reg, compiler::Address(array_reg, start_reg, scale, offset));
251}
252
253LocationSummary* MoveArgumentInstr::MakeLocationSummary(Zone* zone,
254 bool opt) const {
255 const intptr_t kNumInputs = 1;
256 const intptr_t kNumTemps = 0;
257 LocationSummary* locs = new (zone)
258 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
259 ASSERT(representation() == kTagged);
261 return locs;
262}
263
264void MoveArgumentInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
265 ASSERT(compiler->is_optimizing());
266
267 Location value = locs()->in(0);
268 const compiler::Address dst = LocationToStackSlotAddress(location());
269 if (value.IsConstant()) {
270 __ Store(value.constant(), dst);
271 } else {
272 ASSERT(value.IsRegister());
273 __ Store(value.reg(), dst);
274 }
275}
276
277LocationSummary* DartReturnInstr::MakeLocationSummary(Zone* zone,
278 bool opt) const {
279 const intptr_t kNumInputs = 1;
280 const intptr_t kNumTemps = 0;
281 LocationSummary* locs = new (zone)
282 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
283 ASSERT(representation() == kTagged);
285 return locs;
286}
287
288// Attempt optimized compilation at return instruction instead of at the entry.
289// The entry needs to be patchable, no inlined objects are allowed in the area
290// that will be overwritten by the patch instruction: a jump).
291void DartReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
292 Register result = locs()->in(0).reg();
293 ASSERT(result == EAX);
294
295 if (compiler->parsed_function().function().IsAsyncFunction() ||
296 compiler->parsed_function().function().IsAsyncGenerator()) {
297 ASSERT(compiler->flow_graph().graph_entry()->NeedsFrame());
298 const Code& stub = GetReturnStub(compiler);
299 compiler->EmitJumpToStub(stub);
300 return;
301 }
302
303 if (!compiler->flow_graph().graph_entry()->NeedsFrame()) {
304 __ ret();
305 return;
306 }
307
308#if defined(DEBUG)
309 __ Comment("Stack Check");
310 compiler::Label done;
311 const intptr_t fp_sp_dist =
313 compiler->StackSize()) *
314 kWordSize;
315 ASSERT(fp_sp_dist <= 0);
316 __ movl(EDI, ESP);
317 __ subl(EDI, EBP);
318 __ cmpl(EDI, compiler::Immediate(fp_sp_dist));
320 __ int3();
321 __ Bind(&done);
322#endif
323 __ LeaveDartFrame();
324 __ ret();
325}
326
327// Keep in sync with NativeEntryInstr::EmitNativeCode.
328void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
329 EmitReturnMoves(compiler);
330
331 bool return_in_st0 = false;
332 if (marshaller_.Location(compiler::ffi::kResultIndex)
333 .payload_type()
334 .IsFloat()) {
335 ASSERT(locs()->in(0).IsFpuRegister() && locs()->in(0).fpu_reg() == XMM0);
336 return_in_st0 = true;
337 }
338
339 // EDI is the only sane choice for a temporary register here because:
340 //
341 // EDX is used for large return values.
342 // ESI == THR.
343 // Could be EBX or ECX, but that would make code below confusing.
344 const Register tmp = EDI;
345
346 // Restore tag before the profiler's stack walker will no longer see the
347 // InvokeDartCode return address.
348 __ movl(tmp, compiler::Address(EBP, NativeEntryInstr::kVMTagOffsetFromFp));
350
351 __ LeaveDartFrame();
352
353 // Pop dummy return address.
354 __ popl(tmp);
355
356 // Anything besides the return register(s!). Callee-saved registers will be
357 // restored later.
358 const Register vm_tag_reg = EBX;
359 const Register old_exit_frame_reg = ECX;
360 const Register old_exit_through_ffi_reg = tmp;
361
362 __ popl(old_exit_frame_reg);
363 __ popl(vm_tag_reg); /* old_exit_through_ffi, we still need to use tmp. */
364
365 // Restore top_resource.
366 __ popl(tmp);
367 __ movl(
369 tmp);
370
371 __ movl(old_exit_through_ffi_reg, vm_tag_reg);
372 __ popl(vm_tag_reg);
373
374 // Reset the exit frame info to old_exit_frame_reg *before* entering the
375 // safepoint. The trampoline that called us will enter the safepoint on our
376 // behalf.
377 __ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
378 old_exit_through_ffi_reg,
379 /*enter_safepoint=*/false);
380
381 // Move XMM0 into ST0 if needed.
382 if (return_in_st0) {
383 if (marshaller_.Location(compiler::ffi::kResultIndex)
384 .payload_type()
385 .SizeInBytes() == 8) {
386 __ movsd(compiler::Address(SPREG, -8), XMM0);
387 __ fldl(compiler::Address(SPREG, -8));
388 } else {
389 __ movss(compiler::Address(SPREG, -4), XMM0);
390 __ flds(compiler::Address(SPREG, -4));
391 }
392 }
393
394 // Restore C++ ABI callee-saved registers.
395 __ popl(EDI);
396 __ popl(ESI);
397 __ popl(EBX);
398
399#if defined(DART_TARGET_OS_FUCHSIA) && defined(USING_SHADOW_CALL_STACK)
400#error Unimplemented
401#endif
402
403 // Leave the entry frame.
404 __ LeaveFrame();
405
406 // We deal with `ret 4` for structs in the JIT callback trampolines.
407 __ ret();
408}
409
410LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone,
411 bool opt) const {
412 const intptr_t kNumInputs = 0;
413 const intptr_t stack_index =
415 return LocationSummary::Make(zone, kNumInputs,
416 Location::StackSlot(stack_index, FPREG),
418}
419
420void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
421 ASSERT(!compiler->is_optimizing());
422 // Nothing to do.
423}
424
425LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone,
426 bool opt) const {
427 const intptr_t kNumInputs = 1;
428 return LocationSummary::Make(zone, kNumInputs, Location::SameAsFirstInput(),
430}
431
432void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
433 Register value = locs()->in(0).reg();
434 Register result = locs()->out(0).reg();
435 ASSERT(result == value); // Assert that register assignment is correct.
436 __ movl(compiler::Address(
438 value);
439}
440
441LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
442 bool opt) const {
443 const intptr_t kNumInputs = 0;
444 return LocationSummary::Make(zone, kNumInputs,
446 ? Location::Constant(this)
449}
450
451void ConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
452 // The register allocator drops constant definitions that have no uses.
453 Location out = locs()->out(0);
454 ASSERT(out.IsRegister() || out.IsConstant() || out.IsInvalid());
455 if (out.IsRegister()) {
456 Register result = out.reg();
457 __ LoadObjectSafely(result, value());
458 }
459}
460
461void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
462 const Location& destination,
463 Register tmp,
464 intptr_t pair_index) {
465 if (destination.IsRegister()) {
467 int64_t v;
468 const bool ok = compiler::HasIntegerValue(value_, &v);
470 if (value_.IsSmi() &&
472 // If the value is negative, then the sign bit was preserved during
473 // Smi untagging, which means the resulting value may be unexpected.
474 ASSERT(v >= 0);
475 }
476 __ movl(destination.reg(),
477 compiler::Immediate(pair_index == 0 ? Utils::Low32Bits(v)
478 : Utils::High32Bits(v)));
479 } else {
480 ASSERT(representation() == kTagged);
481 __ LoadObjectSafely(destination.reg(), value_);
482 }
483 } else if (destination.IsFpuRegister()) {
484 switch (representation()) {
485 case kUnboxedFloat:
486 __ LoadSImmediate(destination.fpu_reg(),
487 static_cast<float>(Double::Cast(value_).value()));
488 break;
489 case kUnboxedDouble: {
490 const double value_as_double = Double::Cast(value_).value();
491 uword addr = FindDoubleConstant(value_as_double);
492 if (addr == 0) {
493 __ pushl(EAX);
494 __ LoadObject(EAX, value_);
495 __ movsd(destination.fpu_reg(),
496 compiler::FieldAddress(EAX, Double::value_offset()));
497 __ popl(EAX);
498 } else if (Utils::DoublesBitEqual(value_as_double, 0.0)) {
499 __ xorps(destination.fpu_reg(), destination.fpu_reg());
500 } else {
501 __ movsd(destination.fpu_reg(), compiler::Address::Absolute(addr));
502 }
503 break;
504 }
505 case kUnboxedFloat64x2:
506 __ LoadQImmediate(destination.fpu_reg(),
507 Float64x2::Cast(value_).value());
508 break;
509 case kUnboxedFloat32x4:
510 __ LoadQImmediate(destination.fpu_reg(),
511 Float32x4::Cast(value_).value());
512 break;
513 case kUnboxedInt32x4:
514 __ LoadQImmediate(destination.fpu_reg(), Int32x4::Cast(value_).value());
515 break;
516 default:
517 UNREACHABLE();
518 }
519 } else if (destination.IsDoubleStackSlot()) {
520 const double value_as_double = Double::Cast(value_).value();
521 uword addr = FindDoubleConstant(value_as_double);
522 if (addr == 0) {
523 __ pushl(EAX);
524 __ LoadObject(EAX, value_);
525 __ movsd(FpuTMP, compiler::FieldAddress(EAX, Double::value_offset()));
526 __ popl(EAX);
527 } else if (Utils::DoublesBitEqual(value_as_double, 0.0)) {
528 __ xorps(FpuTMP, FpuTMP);
529 } else {
531 }
532 __ movsd(LocationToStackSlotAddress(destination), FpuTMP);
533 } else if (destination.IsQuadStackSlot()) {
534 switch (representation()) {
535 case kUnboxedFloat64x2:
536 __ LoadQImmediate(FpuTMP, Float64x2::Cast(value_).value());
537 break;
538 case kUnboxedFloat32x4:
539 __ LoadQImmediate(FpuTMP, Float32x4::Cast(value_).value());
540 break;
541 case kUnboxedInt32x4:
542 __ LoadQImmediate(FpuTMP, Int32x4::Cast(value_).value());
543 break;
544 default:
545 UNREACHABLE();
546 }
547 __ movups(LocationToStackSlotAddress(destination), FpuTMP);
548 } else {
549 ASSERT(destination.IsStackSlot());
551 int64_t v;
552 const bool ok = compiler::HasIntegerValue(value_, &v);
554 __ movl(LocationToStackSlotAddress(destination),
555 compiler::Immediate(pair_index == 0 ? Utils::Low32Bits(v)
556 : Utils::High32Bits(v)));
557 } else if (representation() == kUnboxedFloat) {
558 int32_t float_bits =
559 bit_cast<int32_t, float>(Double::Cast(value_).value());
560 __ movl(LocationToStackSlotAddress(destination),
561 compiler::Immediate(float_bits));
562 } else {
563 ASSERT(representation() == kTagged);
564 if (compiler::Assembler::IsSafeSmi(value_) || value_.IsNull()) {
565 __ movl(LocationToStackSlotAddress(destination),
566 compiler::Immediate(static_cast<int32_t>(value_.ptr())));
567 } else {
568 __ pushl(EAX);
569 __ LoadObjectSafely(EAX, value_);
570 __ movl(LocationToStackSlotAddress(destination), EAX);
571 __ popl(EAX);
572 }
573 }
574 }
575}
576
577LocationSummary* UnboxedConstantInstr::MakeLocationSummary(Zone* zone,
578 bool opt) const {
579 const bool is_unboxed_int =
583 const intptr_t kNumInputs = 0;
584 const intptr_t kNumTemps =
585 (constant_address() == 0) && !is_unboxed_int ? 1 : 0;
586 LocationSummary* locs = new (zone)
587 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
588 if (representation() == kUnboxedDouble) {
590 } else {
591 ASSERT(is_unboxed_int);
593 }
594 if (kNumTemps == 1) {
596 }
597 return locs;
598}
599
600void UnboxedConstantInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
601 // The register allocator drops constant definitions that have no uses.
602 if (!locs()->out(0).IsInvalid()) {
604 }
605}
606
607LocationSummary* AssertAssignableInstr::MakeLocationSummary(Zone* zone,
608 bool opt) const {
609 const intptr_t kNumInputs = 4;
610 const intptr_t kNumTemps = 0;
611 LocationSummary* summary = new (zone)
612 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
613 summary->set_in(kInstancePos,
617 summary->set_in(
622 summary->set_out(0, Location::SameAsFirstInput());
623 return summary;
624}
625
626void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
627 ASSERT(locs()->always_calls());
628
629 auto object_store = compiler->isolate_group()->object_store();
630 const auto& assert_boolean_stub =
631 Code::ZoneHandle(compiler->zone(), object_store->assert_boolean_stub());
632
633 compiler::Label done;
634 __ testl(
638 compiler->GenerateStubCall(source(), assert_boolean_stub,
639 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
640 deopt_id(), env());
641 __ Bind(&done);
642}
643
644static Condition TokenKindToIntCondition(Token::Kind kind) {
645 switch (kind) {
646 case Token::kEQ:
647 return EQUAL;
648 case Token::kNE:
649 return NOT_EQUAL;
650 case Token::kLT:
651 return LESS;
652 case Token::kGT:
653 return GREATER;
654 case Token::kLTE:
655 return LESS_EQUAL;
656 case Token::kGTE:
657 return GREATER_EQUAL;
658 default:
659 UNREACHABLE();
660 return OVERFLOW;
661 }
662}
663
664LocationSummary* EqualityCompareInstr::MakeLocationSummary(Zone* zone,
665 bool opt) const {
666 const intptr_t kNumInputs = 2;
667 if (operation_cid() == kMintCid) {
668 const intptr_t kNumTemps = 0;
669 LocationSummary* locs = new (zone)
670 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
676 return locs;
677 }
678 if (operation_cid() == kDoubleCid) {
679 const intptr_t kNumTemps = 0;
680 LocationSummary* locs = new (zone)
681 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
685 return locs;
686 }
687 if (operation_cid() == kSmiCid || operation_cid() == kIntegerCid) {
688 const intptr_t kNumTemps = 0;
689 LocationSummary* locs = new (zone)
690 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
692 // Only one input can be a constant operand. The case of two constant
693 // operands should be handled by constant propagation.
694 // Only right can be a stack slot.
695 locs->set_in(1, locs->in(0).IsConstant()
699 return locs;
700 }
701 UNREACHABLE();
702 return nullptr;
703}
704
705static void LoadValueCid(FlowGraphCompiler* compiler,
706 Register value_cid_reg,
707 Register value_reg,
708 compiler::Label* value_is_smi = nullptr) {
709 compiler::Label done;
710 if (value_is_smi == nullptr) {
711 __ movl(value_cid_reg, compiler::Immediate(kSmiCid));
712 }
713 __ testl(value_reg, compiler::Immediate(kSmiTagMask));
714 if (value_is_smi == nullptr) {
716 } else {
717 __ j(ZERO, value_is_smi);
718 }
719 __ LoadClassId(value_cid_reg, value_reg);
720 __ Bind(&done);
721}
722
723static Condition FlipCondition(Condition condition) {
724 switch (condition) {
725 case EQUAL:
726 return EQUAL;
727 case NOT_EQUAL:
728 return NOT_EQUAL;
729 case LESS:
730 return GREATER;
731 case LESS_EQUAL:
732 return GREATER_EQUAL;
733 case GREATER:
734 return LESS;
735 case GREATER_EQUAL:
736 return LESS_EQUAL;
737 case BELOW:
738 return ABOVE;
739 case BELOW_EQUAL:
740 return ABOVE_EQUAL;
741 case ABOVE:
742 return BELOW;
743 case ABOVE_EQUAL:
744 return BELOW_EQUAL;
745 default:
747 return EQUAL;
748 }
749}
750
751static void EmitBranchOnCondition(
752 FlowGraphCompiler* compiler,
753 Condition true_condition,
754 BranchLabels labels,
757 if (labels.fall_through == labels.false_label) {
758 // If the next block is the false successor, fall through to it.
759 __ j(true_condition, labels.true_label, jump_distance);
760 } else {
761 // If the next block is not the false successor, branch to it.
762 Condition false_condition = InvertCondition(true_condition);
763 __ j(false_condition, labels.false_label, jump_distance);
764
765 // Fall through or jump to the true successor.
766 if (labels.fall_through != labels.true_label) {
767 __ jmp(labels.true_label, jump_distance);
768 }
769 }
770}
771
772static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler,
773 const LocationSummary& locs,
774 Token::Kind kind,
775 BranchLabels labels) {
776 Location left = locs.in(0);
777 Location right = locs.in(1);
778 ASSERT(!left.IsConstant() || !right.IsConstant());
779
780 Condition true_condition = TokenKindToIntCondition(kind);
781
782 if (left.IsConstant()) {
783 __ CompareObject(right.reg(), left.constant());
784 true_condition = FlipCondition(true_condition);
785 } else if (right.IsConstant()) {
786 __ CompareObject(left.reg(), right.constant());
787 } else if (right.IsStackSlot()) {
788 __ cmpl(left.reg(), LocationToStackSlotAddress(right));
789 } else {
790 __ cmpl(left.reg(), right.reg());
791 }
792 return true_condition;
793}
794
795static Condition EmitWordComparisonOp(FlowGraphCompiler* compiler,
796 const LocationSummary& locs,
797 Token::Kind kind,
798 BranchLabels labels) {
799 Location left = locs.in(0);
800 Location right = locs.in(1);
801 ASSERT(!left.IsConstant() || !right.IsConstant());
802
803 Condition true_condition = TokenKindToIntCondition(kind);
804
805 if (left.IsConstant()) {
806 __ CompareImmediate(
807 right.reg(),
808 static_cast<uword>(Integer::Cast(left.constant()).AsInt64Value()));
809 true_condition = FlipCondition(true_condition);
810 } else if (right.IsConstant()) {
811 __ CompareImmediate(
812 left.reg(),
813 static_cast<uword>(Integer::Cast(right.constant()).AsInt64Value()));
814 } else if (right.IsStackSlot()) {
815 __ cmpl(left.reg(), LocationToStackSlotAddress(right));
816 } else {
817 __ cmpl(left.reg(), right.reg());
818 }
819 return true_condition;
820}
821
822static Condition EmitUnboxedMintEqualityOp(FlowGraphCompiler* compiler,
823 const LocationSummary& locs,
824 Token::Kind kind,
825 BranchLabels labels) {
827 PairLocation* left_pair = locs.in(0).AsPairLocation();
828 Register left1 = left_pair->At(0).reg();
829 Register left2 = left_pair->At(1).reg();
830 PairLocation* right_pair = locs.in(1).AsPairLocation();
831 Register right1 = right_pair->At(0).reg();
832 Register right2 = right_pair->At(1).reg();
833 compiler::Label done;
834 // Compare lower.
835 __ cmpl(left1, right1);
836 __ j(NOT_EQUAL, &done);
837 // Lower is equal, compare upper.
838 __ cmpl(left2, right2);
839 __ Bind(&done);
840 Condition true_condition = TokenKindToIntCondition(kind);
841 return true_condition;
842}
843
844static Condition EmitUnboxedMintComparisonOp(FlowGraphCompiler* compiler,
845 const LocationSummary& locs,
846 Token::Kind kind,
847 BranchLabels labels) {
848 PairLocation* left_pair = locs.in(0).AsPairLocation();
849 Register left1 = left_pair->At(0).reg();
850 Register left2 = left_pair->At(1).reg();
851 PairLocation* right_pair = locs.in(1).AsPairLocation();
852 Register right1 = right_pair->At(0).reg();
853 Register right2 = right_pair->At(1).reg();
854
855 Condition hi_cond = OVERFLOW, lo_cond = OVERFLOW;
856 switch (kind) {
857 case Token::kLT:
858 hi_cond = LESS;
859 lo_cond = BELOW;
860 break;
861 case Token::kGT:
862 hi_cond = GREATER;
863 lo_cond = ABOVE;
864 break;
865 case Token::kLTE:
866 hi_cond = LESS;
867 lo_cond = BELOW_EQUAL;
868 break;
869 case Token::kGTE:
870 hi_cond = GREATER;
871 lo_cond = ABOVE_EQUAL;
872 break;
873 default:
874 break;
875 }
876 ASSERT(hi_cond != OVERFLOW && lo_cond != OVERFLOW);
877 // Compare upper halves first.
878 __ cmpl(left2, right2);
879 __ j(hi_cond, labels.true_label);
880 __ j(FlipCondition(hi_cond), labels.false_label);
881
882 // If upper is equal, compare lower half.
883 __ cmpl(left1, right1);
884 return lo_cond;
885}
886
887static Condition TokenKindToDoubleCondition(Token::Kind kind) {
888 switch (kind) {
889 case Token::kEQ:
890 return EQUAL;
891 case Token::kNE:
892 return NOT_EQUAL;
893 case Token::kLT:
894 return BELOW;
895 case Token::kGT:
896 return ABOVE;
897 case Token::kLTE:
898 return BELOW_EQUAL;
899 case Token::kGTE:
900 return ABOVE_EQUAL;
901 default:
902 UNREACHABLE();
903 return OVERFLOW;
904 }
905}
906
907static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,
908 const LocationSummary& locs,
909 Token::Kind kind,
910 BranchLabels labels) {
911 XmmRegister left = locs.in(0).fpu_reg();
912 XmmRegister right = locs.in(1).fpu_reg();
913
914 __ comisd(left, right);
915
916 Condition true_condition = TokenKindToDoubleCondition(kind);
917 compiler::Label* nan_result =
918 (true_condition == NOT_EQUAL) ? labels.true_label : labels.false_label;
919 __ j(PARITY_EVEN, nan_result);
920 return true_condition;
921}
922
923Condition EqualityCompareInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
924 BranchLabels labels) {
925 if (is_null_aware()) {
926 // Null-aware EqualityCompare instruction is only used in AOT.
927 UNREACHABLE();
928 }
929 if (operation_cid() == kSmiCid) {
930 return EmitSmiComparisonOp(compiler, *locs(), kind(), labels);
931 } else if (operation_cid() == kMintCid) {
932 return EmitUnboxedMintEqualityOp(compiler, *locs(), kind(), labels);
933 } else if (operation_cid() == kIntegerCid) {
934 return EmitWordComparisonOp(compiler, *locs(), kind(), labels);
935 } else {
936 ASSERT(operation_cid() == kDoubleCid);
937 return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels);
938 }
939}
940
941void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
942 compiler::Label is_true, is_false;
943 BranchLabels labels = {&is_true, &is_false, &is_false};
944 Condition true_condition = EmitComparisonCode(compiler, labels);
945 if (true_condition != kInvalidCondition) {
946 EmitBranchOnCondition(compiler, true_condition, labels,
948 }
949
950 Register result = locs()->out(0).reg();
951 compiler::Label done;
952 __ Bind(&is_false);
953 __ LoadObject(result, Bool::False());
955 __ Bind(&is_true);
956 __ LoadObject(result, Bool::True());
957 __ Bind(&done);
958}
959
960void ComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler,
961 BranchInstr* branch) {
962 BranchLabels labels = compiler->CreateBranchLabels(branch);
963 Condition true_condition = EmitComparisonCode(compiler, labels);
964 if (true_condition != kInvalidCondition) {
965 EmitBranchOnCondition(compiler, true_condition, labels);
966 }
967}
968
969LocationSummary* TestIntInstr::MakeLocationSummary(Zone* zone, bool opt) const {
970 RELEASE_ASSERT(representation_ == kTagged);
971 const intptr_t kNumInputs = 2;
972 const intptr_t kNumTemps = 0;
973 LocationSummary* locs = new (zone)
974 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
976 // Only one input can be a constant operand. The case of two constant
977 // operands should be handled by constant propagation.
980 return locs;
981}
982
983Condition TestIntInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
984 BranchLabels labels) {
985 Register left = locs()->in(0).reg();
986 Location right = locs()->in(1);
987 if (right.IsConstant()) {
988 __ testl(left,
989 compiler::Immediate(static_cast<int32_t>(ComputeImmediateMask())));
990 } else {
991 __ testl(left, right.reg());
992 }
993 Condition true_condition = (kind() == Token::kNE) ? NOT_ZERO : ZERO;
994 return true_condition;
995}
996
997LocationSummary* TestCidsInstr::MakeLocationSummary(Zone* zone,
998 bool opt) const {
999 const intptr_t kNumInputs = 1;
1000 const intptr_t kNumTemps = 1;
1001 LocationSummary* locs = new (zone)
1002 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1006 return locs;
1007}
1008
1009Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1010 BranchLabels labels) {
1011 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
1012 Register val_reg = locs()->in(0).reg();
1013 Register cid_reg = locs()->temp(0).reg();
1014
1015 compiler::Label* deopt =
1017 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptTestCids)
1018 : nullptr;
1019
1020 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
1021 const ZoneGrowableArray<intptr_t>& data = cid_results();
1022 ASSERT(data[0] == kSmiCid);
1023 bool result = data[1] == true_result;
1024 __ testl(val_reg, compiler::Immediate(kSmiTagMask));
1025 __ j(ZERO, result ? labels.true_label : labels.false_label);
1026 __ LoadClassId(cid_reg, val_reg);
1027 for (intptr_t i = 2; i < data.length(); i += 2) {
1028 const intptr_t test_cid = data[i];
1029 ASSERT(test_cid != kSmiCid);
1030 result = data[i + 1] == true_result;
1031 __ cmpl(cid_reg, compiler::Immediate(test_cid));
1032 __ j(EQUAL, result ? labels.true_label : labels.false_label);
1033 }
1034 // No match found, deoptimize or default action.
1035 if (deopt == nullptr) {
1036 // If the cid is not in the list, jump to the opposite label from the cids
1037 // that are in the list. These must be all the same (see asserts in the
1038 // constructor).
1039 compiler::Label* target = result ? labels.false_label : labels.true_label;
1040 if (target != labels.fall_through) {
1041 __ jmp(target);
1042 }
1043 } else {
1044 __ jmp(deopt);
1045 }
1046 // Dummy result as this method already did the jump, there's no need
1047 // for the caller to branch on a condition.
1048 return kInvalidCondition;
1049}
1050
1051LocationSummary* RelationalOpInstr::MakeLocationSummary(Zone* zone,
1052 bool opt) const {
1053 const intptr_t kNumInputs = 2;
1054 const intptr_t kNumTemps = 0;
1055 if (operation_cid() == kMintCid) {
1056 const intptr_t kNumTemps = 0;
1057 LocationSummary* locs = new (zone)
1058 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1064 return locs;
1065 }
1066 if (operation_cid() == kDoubleCid) {
1067 LocationSummary* summary = new (zone)
1068 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1070 summary->set_in(1, Location::RequiresFpuRegister());
1071 summary->set_out(0, Location::RequiresRegister());
1072 return summary;
1073 }
1074 ASSERT(operation_cid() == kSmiCid);
1075 LocationSummary* summary = new (zone)
1076 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1077 summary->set_in(0, LocationRegisterOrConstant(left()));
1078 // Only one input can be a constant operand. The case of two constant
1079 // operands should be handled by constant propagation.
1080 summary->set_in(1, summary->in(0).IsConstant()
1083 summary->set_out(0, Location::RequiresRegister());
1084 return summary;
1085}
1086
1087Condition RelationalOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
1088 BranchLabels labels) {
1089 if (operation_cid() == kSmiCid) {
1090 return EmitSmiComparisonOp(compiler, *locs(), kind(), labels);
1091 } else if (operation_cid() == kMintCid) {
1092 return EmitUnboxedMintComparisonOp(compiler, *locs(), kind(), labels);
1093 } else {
1094 ASSERT(operation_cid() == kDoubleCid);
1095 return EmitDoubleComparisonOp(compiler, *locs(), kind(), labels);
1096 }
1097}
1098
1099void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1100 SetupNative();
1101 Register result = locs()->out(0).reg();
1102 const intptr_t argc_tag = NativeArguments::ComputeArgcTag(function());
1103
1104 // Pass a pointer to the first argument in EAX.
1105 __ leal(EAX, compiler::Address(ESP, (ArgumentCount() - 1) * kWordSize));
1106
1107 __ movl(EDX, compiler::Immediate(argc_tag));
1108
1109 const Code* stub;
1110
1111 // There is no lazy-linking support on ia32.
1112 ASSERT(!link_lazily());
1113 if (is_bootstrap_native()) {
1114 stub = &StubCode::CallBootstrapNative();
1115 } else if (is_auto_scope()) {
1116 stub = &StubCode::CallAutoScopeNative();
1117 } else {
1118 stub = &StubCode::CallNoScopeNative();
1119 }
1120 const compiler::ExternalLabel label(
1121 reinterpret_cast<uword>(native_c_function()));
1122 __ movl(ECX, compiler::Immediate(label.address()));
1123 // We can never lazy-deopt here because natives are never optimized.
1124 ASSERT(!compiler->is_optimizing());
1125 compiler->GenerateNonLazyDeoptableStubCall(
1126 source(), *stub, UntaggedPcDescriptors::kOther, locs());
1127 __ LoadFromOffset(result, ESP, 0);
1128
1129 compiler->EmitDropArguments(ArgumentCount()); // Drop the arguments.
1130}
1131
1132#define R(r) (1 << r)
1133
1134LocationSummary* FfiCallInstr::MakeLocationSummary(Zone* zone,
1135 bool is_optimizing) const {
1138 return MakeLocationSummaryInternal(
1139 zone, is_optimizing,
1142}
1143
1144#undef R
1145
1146void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1147 const Register branch = locs()->in(TargetAddressIndex()).reg();
1148
1149 // The temps are indexed according to their register number.
1150 const Register temp = locs()->temp(0).reg();
1151 // For regular calls, this holds the FP for rebasing the original locations
1152 // during EmitParamMoves.
1153 // For leaf calls, this holds the SP used to restore the pre-aligned SP after
1154 // the call.
1155 const Register saved_fp_or_sp = locs()->temp(2).reg();
1156
1157 // Ensure these are callee-saved register and are preserved across the call.
1158 ASSERT(IsCalleeSavedRegister(saved_fp_or_sp));
1159 // Other temps don't need to be preserved.
1160
1161 __ movl(saved_fp_or_sp, is_leaf_ ? SPREG : FPREG);
1162
1163 intptr_t stack_required = marshaller_.RequiredStackSpaceInBytes();
1164
1165 if (is_leaf_) {
1166 // For leaf calls we need to leave space at the bottom for the pre-align SP.
1167 stack_required += compiler::target::kWordSize;
1168 } else {
1169 // Make a space to put the return address.
1170 __ pushl(compiler::Immediate(0));
1171
1172 // We need to create a dummy "exit frame". It will have a null code object.
1173 __ LoadObject(CODE_REG, Object::null_object());
1174 __ EnterDartFrame(0);
1175 }
1176
1177 // Reserve space for the arguments that go on the stack (if any), then align.
1178 __ ReserveAlignedFrameSpace(stack_required);
1180 UNIMPLEMENTED();
1181 }
1182
1183 EmitParamMoves(compiler, is_leaf_ ? FPREG : saved_fp_or_sp, temp,
1184 locs()->temp(1).reg());
1185
1186 if (is_leaf_) {
1187 // We store the pre-align SP at a fixed offset from the final SP.
1188 // Pushing before alignment would mean its placement would vary with how
1189 // much the frame was unaligned.
1190 __ movl(compiler::Address(SPREG, marshaller_.RequiredStackSpaceInBytes()),
1191 saved_fp_or_sp);
1192 }
1193
1195 __ Comment(is_leaf_ ? "Leaf Call" : "Call");
1196 }
1197
1198 if (is_leaf_) {
1199#if !defined(PRODUCT)
1200 // Set the thread object's top_exit_frame_info and VMTag to enable the
1201 // profiler to determine that thread is no longer executing Dart code.
1202 __ movl(compiler::Address(
1204 FPREG);
1205 __ movl(compiler::Assembler::VMTagAddress(), branch);
1206#endif
1207
1208 __ call(branch);
1209
1210#if !defined(PRODUCT)
1212 compiler::Immediate(compiler::target::Thread::vm_tag_dart_id()));
1213 __ movl(compiler::Address(
1215 compiler::Immediate(0));
1216#endif
1217 } else {
1218 // We need to copy a dummy return address up into the dummy stack frame so
1219 // the stack walker will know which safepoint to use. Unlike X64, there's no
1220 // PC-relative 'leaq' available, so we have do a trick with 'call'.
1221 compiler::Label get_pc;
1222 __ call(&get_pc);
1223 compiler->EmitCallsiteMetadata(InstructionSource(), deopt_id(),
1224 UntaggedPcDescriptors::Kind::kOther, locs(),
1225 env());
1226 __ Bind(&get_pc);
1227 __ popl(temp);
1228 __ movl(compiler::Address(FPREG, kSavedCallerPcSlotFromFp * kWordSize),
1229 temp);
1230
1232 // We cannot trust that this code will be executable within a safepoint.
1233 // Therefore we delegate the responsibility of entering/exiting the
1234 // safepoint to a stub which in the VM isolate's heap, which will never lose
1235 // execute permission.
1236 __ movl(temp,
1237 compiler::Address(
1238 THR, compiler::target::Thread::
1239 call_native_through_safepoint_entry_point_offset()));
1240
1241 // Calls EAX within a safepoint and clobbers EBX.
1242 ASSERT(branch == EAX);
1243 __ call(temp);
1244
1245 if (marshaller_.IsHandleCType(compiler::ffi::kResultIndex)) {
1246 __ Comment("Check Dart_Handle for Error.");
1247 compiler::Label not_error;
1248 __ movl(temp,
1249 compiler::Address(CallingConventions::kReturnReg,
1251 __ BranchIfSmi(temp, &not_error);
1252 __ LoadClassId(temp, temp);
1253 __ RangeCheck(temp, kNoRegister, kFirstErrorCid, kLastErrorCid,
1255
1256 // Slow path, use the stub to propagate error, to save on code-size.
1257 __ Comment("Slow path: call Dart_PropagateError through stub.");
1258 __ movl(temp,
1259 compiler::Address(
1260 THR, compiler::target::Thread::
1261 call_native_through_safepoint_entry_point_offset()));
1263 __ movl(EAX, compiler::Address(
1264 THR, kPropagateErrorRuntimeEntry.OffsetFromThread()));
1265 __ call(temp);
1266#if defined(DEBUG)
1267 // We should never return with normal controlflow from this.
1268 __ int3();
1269#endif
1270
1271 __ Bind(&not_error);
1272 }
1273 }
1274
1275 // Restore the stack when a struct by value is returned into memory pointed
1276 // to by a pointer that is passed into the function.
1278 marshaller_.Location(compiler::ffi::kResultIndex).IsPointerToMemory()) {
1279 // Callee uses `ret 4` instead of `ret` to return.
1280 // See: https://c9x.me/x86/html/file_module_x86_id_280.html
1281 // Caller does `sub esp, 4` immediately after return to balance stack.
1282 __ subl(SPREG, compiler::Immediate(compiler::target::kWordSize));
1283 }
1284
1285 // The x86 calling convention requires floating point values to be returned
1286 // on the "floating-point stack" (aka. register ST0). We don't use the
1287 // floating-point stack in Dart, so we need to move the return value back
1288 // into an XMM register.
1289 if (representation() == kUnboxedDouble) {
1290 __ fstpl(compiler::Address(SPREG, -kDoubleSize));
1291 __ movsd(XMM0, compiler::Address(SPREG, -kDoubleSize));
1292 } else if (representation() == kUnboxedFloat) {
1293 __ fstps(compiler::Address(SPREG, -kFloatSize));
1294 __ movss(XMM0, compiler::Address(SPREG, -kFloatSize));
1295 }
1296
1297 // Pass both registers for use as clobbered temp registers.
1298 EmitReturnMoves(compiler, saved_fp_or_sp, temp);
1299
1300 if (is_leaf_) {
1301 // Restore pre-align SP. Was stored right before the first stack argument.
1302 __ movl(SPREG,
1303 compiler::Address(SPREG, marshaller_.RequiredStackSpaceInBytes()));
1304 } else {
1305 // Leave dummy exit frame.
1306 __ LeaveDartFrame();
1307
1308 // Instead of returning to the "fake" return address, we just pop it.
1309 __ popl(temp);
1310 }
1311}
1312
1313// Keep in sync with NativeReturnInstr::EmitNativeCode.
1314void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1315 __ Bind(compiler->GetJumpLabel(this));
1316
1317 // Enter the entry frame. NativeParameterInstr expects this frame has size
1318 // -exit_link_slot_from_entry_fp, verified below.
1319 __ EnterFrame(0);
1320
1321 // Save a space for the code object.
1322 __ xorl(EAX, EAX);
1323 __ pushl(EAX);
1324
1325#if defined(DART_TARGET_OS_FUCHSIA) && defined(USING_SHADOW_CALL_STACK)
1326#error Unimplemented
1327#endif
1328
1329 // Save ABI callee-saved registers.
1330 __ pushl(EBX);
1331 __ pushl(ESI);
1332 __ pushl(EDI);
1333
1334 // Save the current VMTag on the stack.
1336 __ pushl(ECX);
1338
1339 // Save top resource.
1340 __ pushl(
1342 __ movl(
1344 compiler::Immediate(0));
1345
1346 __ pushl(compiler::Address(
1348
1349 // Save top exit frame info. Stack walker expects it to be here.
1350 __ pushl(compiler::Address(
1352
1353 // In debug mode, verify that we've pushed the top exit frame info at the
1354 // correct offset from FP.
1355 __ EmitEntryFrameVerification();
1356
1357 // The callback trampoline (caller) has already left the safepoint for us.
1358 __ TransitionNativeToGenerated(EAX, /*exit_safepoint=*/false,
1359 /*ignore_unwind_in_progress=*/false,
1360 /*set_tag=*/false);
1361
1362 // Now that the safepoint has ended, we can hold Dart objects with bare hands.
1363
1364 // Load the code object.
1365 const Function& target_function = marshaller_.dart_signature();
1366 const intptr_t callback_id = target_function.FfiCallbackId();
1367 __ movl(EAX, compiler::Address(
1369 __ movl(EAX, compiler::Address(
1371 __ movl(EAX,
1372 compiler::Address(
1374 __ movl(EAX, compiler::FieldAddress(
1376 __ movl(CODE_REG, compiler::FieldAddress(
1378 callback_id * compiler::target::kWordSize));
1379
1380 // Put the code object in the reserved slot.
1381 __ movl(compiler::Address(FPREG,
1383 CODE_REG);
1384
1385 // Load a GC-safe value for the arguments descriptor (unused but tagged).
1387
1388 // Push a dummy return address which suggests that we are inside of
1389 // InvokeDartCodeStub. This is how the stack walker detects an entry frame.
1390 __ movl(EAX,
1391 compiler::Address(
1393 __ pushl(compiler::FieldAddress(
1395
1396 // Continue with Dart frame setup.
1398
1399 // Delay setting the tag until the profiler's stack walker will see the
1400 // InvokeDartCode return address.
1402 compiler::Immediate(compiler::target::Thread::vm_tag_dart_id()));
1403}
1404
1405#define R(r) (1 << r)
1406
1408 Zone* zone,
1409 bool is_optimizing) const {
1412 static_assert(saved_fp < temp0, "Unexpected ordering of registers in set.");
1413 return MakeLocationSummaryInternal(zone, (R(saved_fp) | R(temp0)));
1414}
1415
1416#undef R
1417
1418void LeafRuntimeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1419 const Register saved_fp = locs()->temp(0).reg();
1420 const Register temp0 = locs()->temp(1).reg();
1421
1422 __ MoveRegister(saved_fp, FPREG);
1423 const intptr_t frame_space = native_calling_convention_.StackTopInBytes();
1424 __ EnterCFrame(frame_space);
1425
1426 EmitParamMoves(compiler, saved_fp, temp0);
1427
1428 const Register target_address = locs()->in(TargetAddressIndex()).reg();
1429 __ movl(compiler::Assembler::VMTagAddress(), target_address);
1430 __ CallCFunction(target_address);
1432 compiler::Immediate(VMTag::kDartTagId));
1433
1434 __ LeaveCFrame();
1435}
1436
1438 Zone* zone,
1439 bool opt) const {
1440 const intptr_t kNumInputs = 1;
1441 // TODO(fschneider): Allow immediate operands for the char code.
1442 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
1444}
1445
1447 FlowGraphCompiler* compiler) {
1448 Register char_code = locs()->in(0).reg();
1449 Register result = locs()->out(0).reg();
1450 __ movl(result, compiler::Immediate(
1451 reinterpret_cast<uword>(Symbols::PredefinedAddress())));
1452 __ movl(result,
1453 compiler::Address(result, char_code,
1454 TIMES_HALF_WORD_SIZE, // Char code is a smi.
1456}
1457
1458LocationSummary* StringToCharCodeInstr::MakeLocationSummary(Zone* zone,
1459 bool opt) const {
1460 const intptr_t kNumInputs = 1;
1461 return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
1463}
1464
1465void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1466 ASSERT(cid_ == kOneByteStringCid);
1467 Register str = locs()->in(0).reg();
1468 Register result = locs()->out(0).reg();
1469 compiler::Label is_one, done;
1470 __ movl(result, compiler::FieldAddress(str, String::length_offset()));
1471 __ cmpl(result, compiler::Immediate(Smi::RawValue(1)));
1473 __ movl(result, compiler::Immediate(Smi::RawValue(-1)));
1474 __ jmp(&done);
1475 __ Bind(&is_one);
1476 __ movzxb(result, compiler::FieldAddress(str, OneByteString::data_offset()));
1477 __ SmiTag(result);
1478 __ Bind(&done);
1479}
1480
1481LocationSummary* Utf8ScanInstr::MakeLocationSummary(Zone* zone,
1482 bool opt) const {
1483 const intptr_t kNumInputs = 5;
1484 const intptr_t kNumTemps = 0;
1485 LocationSummary* summary = new (zone)
1486 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1487 summary->set_in(0, Location::Any()); // decoder
1488 summary->set_in(1, Location::WritableRegister()); // bytes
1489 summary->set_in(2, Location::WritableRegister()); // start
1490 summary->set_in(3, Location::WritableRegister()); // end
1491 summary->set_in(4, Location::RequiresRegister()); // table
1492 summary->set_out(0, Location::RequiresRegister());
1493 return summary;
1494}
1495
1496void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1497 const Register bytes_reg = locs()->in(1).reg();
1498 const Register start_reg = locs()->in(2).reg();
1499 const Register end_reg = locs()->in(3).reg();
1500 const Register table_reg = locs()->in(4).reg();
1501 const Register size_reg = locs()->out(0).reg();
1502
1503 const Register bytes_ptr_reg = start_reg;
1504 const Register flags_reg = end_reg;
1505 const Register temp_reg = bytes_reg;
1506 const XmmRegister vector_reg = FpuTMP;
1507
1508 const intptr_t kBytesEndTempOffset = 1 * compiler::target::kWordSize;
1509 const intptr_t kBytesEndMinus16TempOffset = 0 * compiler::target::kWordSize;
1510
1511 const intptr_t kSizeMask = 0x03;
1512 const intptr_t kFlagsMask = 0x3C;
1513
1514 compiler::Label scan_ascii, ascii_loop, ascii_loop_in, nonascii_loop;
1515 compiler::Label rest, rest_loop, rest_loop_in, done;
1516
1517 // Address of input bytes.
1518 __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
1519
1520 // Pointers to start, end and end-16.
1521 __ leal(bytes_ptr_reg, compiler::Address(bytes_reg, start_reg, TIMES_1, 0));
1522 __ leal(temp_reg, compiler::Address(bytes_reg, end_reg, TIMES_1, 0));
1523 __ pushl(temp_reg);
1524 __ leal(temp_reg, compiler::Address(temp_reg, -16));
1525 __ pushl(temp_reg);
1526
1527 // Initialize size and flags.
1528 __ xorl(size_reg, size_reg);
1529 __ xorl(flags_reg, flags_reg);
1530
1531 __ jmp(&scan_ascii, compiler::Assembler::kNearJump);
1532
1533 // Loop scanning through ASCII bytes one 16-byte vector at a time.
1534 // While scanning, the size register contains the size as it was at the start
1535 // of the current block of ASCII bytes, minus the address of the start of the
1536 // block. After the block, the end address of the block is added to update the
1537 // size to include the bytes in the block.
1538 __ Bind(&ascii_loop);
1539 __ addl(bytes_ptr_reg, compiler::Immediate(16));
1540 __ Bind(&ascii_loop_in);
1541
1542 // Exit vectorized loop when there are less than 16 bytes left.
1543 __ cmpl(bytes_ptr_reg, compiler::Address(ESP, kBytesEndMinus16TempOffset));
1545
1546 // Find next non-ASCII byte within the next 16 bytes.
1547 // Note: In principle, we should use MOVDQU here, since the loaded value is
1548 // used as input to an integer instruction. In practice, according to Agner
1549 // Fog, there is no penalty for using the wrong kind of load.
1550 __ movups(vector_reg, compiler::Address(bytes_ptr_reg, 0));
1551 __ pmovmskb(temp_reg, vector_reg);
1552 __ bsfl(temp_reg, temp_reg);
1553 __ j(EQUAL, &ascii_loop, compiler::Assembler::kNearJump);
1554
1555 // Point to non-ASCII byte and update size.
1556 __ addl(bytes_ptr_reg, temp_reg);
1557 __ addl(size_reg, bytes_ptr_reg);
1558
1559 // Read first non-ASCII byte.
1560 __ movzxb(temp_reg, compiler::Address(bytes_ptr_reg, 0));
1561
1562 // Loop over block of non-ASCII bytes.
1563 __ Bind(&nonascii_loop);
1564 __ addl(bytes_ptr_reg, compiler::Immediate(1));
1565
1566 // Update size and flags based on byte value.
1567 __ movzxb(temp_reg, compiler::FieldAddress(
1568 table_reg, temp_reg, TIMES_1,
1570 __ orl(flags_reg, temp_reg);
1571 __ andl(temp_reg, compiler::Immediate(kSizeMask));
1572 __ addl(size_reg, temp_reg);
1573
1574 // Stop if end is reached.
1575 __ cmpl(bytes_ptr_reg, compiler::Address(ESP, kBytesEndTempOffset));
1577
1578 // Go to ASCII scan if next byte is ASCII, otherwise loop.
1579 __ movzxb(temp_reg, compiler::Address(bytes_ptr_reg, 0));
1580 __ testl(temp_reg, compiler::Immediate(0x80));
1581 __ j(NOT_EQUAL, &nonascii_loop, compiler::Assembler::kNearJump);
1582
1583 // Enter the ASCII scanning loop.
1584 __ Bind(&scan_ascii);
1585 __ subl(size_reg, bytes_ptr_reg);
1586 __ jmp(&ascii_loop_in);
1587
1588 // Less than 16 bytes left. Process the remaining bytes individually.
1589 __ Bind(&rest);
1590
1591 // Update size after ASCII scanning loop.
1592 __ addl(size_reg, bytes_ptr_reg);
1593 __ jmp(&rest_loop_in, compiler::Assembler::kNearJump);
1594
1595 __ Bind(&rest_loop);
1596
1597 // Read byte and increment pointer.
1598 __ movzxb(temp_reg, compiler::Address(bytes_ptr_reg, 0));
1599 __ addl(bytes_ptr_reg, compiler::Immediate(1));
1600
1601 // Update size and flags based on byte value.
1602 __ movzxb(temp_reg, compiler::FieldAddress(
1603 table_reg, temp_reg, TIMES_1,
1605 __ orl(flags_reg, temp_reg);
1606 __ andl(temp_reg, compiler::Immediate(kSizeMask));
1607 __ addl(size_reg, temp_reg);
1608
1609 // Stop if end is reached.
1610 __ Bind(&rest_loop_in);
1611 __ cmpl(bytes_ptr_reg, compiler::Address(ESP, kBytesEndTempOffset));
1613 __ Bind(&done);
1614
1615 // Pop temporaries.
1616 __ addl(ESP, compiler::Immediate(2 * compiler::target::kWordSize));
1617
1618 // Write flags to field.
1619 __ andl(flags_reg, compiler::Immediate(kFlagsMask));
1620 if (!IsScanFlagsUnboxed()) {
1621 __ SmiTag(flags_reg);
1622 }
1623 Register decoder_reg;
1624 const Location decoder_location = locs()->in(0);
1625 if (decoder_location.IsStackSlot()) {
1626 __ movl(temp_reg, LocationToStackSlotAddress(decoder_location));
1627 decoder_reg = temp_reg;
1628 } else {
1629 decoder_reg = decoder_location.reg();
1630 }
1631 const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes();
1632 __ orl(compiler::FieldAddress(decoder_reg, scan_flags_field_offset),
1633 flags_reg);
1634}
1635
1636LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
1637 bool opt) const {
1638 // The compiler must optimize any function that includes a LoadIndexed
1639 // instruction that uses typed data cids, since extracting the payload address
1640 // from views is done in a compiler pass after all code motion has happened.
1642
1643 const intptr_t kNumInputs = 2;
1644 const intptr_t kNumTemps = 0;
1645 LocationSummary* locs = new (zone)
1646 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1648 // The index is either untagged (element size == 1) or a smi (for all
1649 // element sizes > 1).
1650 const bool need_writable_index_register = index_scale() == 1;
1651 const bool can_be_constant =
1652 index()->BindsToConstant() &&
1654 index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
1655 locs->set_in(
1656 kIndexPos,
1657 can_be_constant
1658 ? Location::Constant(index()->definition()->AsConstant())
1659 : (need_writable_index_register ? Location::WritableRegister()
1661 auto const rep =
1664 if (rep == kUnboxedInt64) {
1667 } else {
1669 }
1670 } else if (RepresentationUtils::IsUnboxed(rep)) {
1672 } else {
1674 }
1675 return locs;
1676}
1677
1678void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1679 // The array register points to the backing store for external arrays.
1680 const Register array = locs()->in(kArrayPos).reg();
1681 const Location index = locs()->in(kIndexPos);
1682
1683 bool index_unboxed = index_unboxed_;
1684 if (index_scale() == 1 && !index_unboxed) {
1685 if (index.IsRegister()) {
1686 __ SmiUntag(index.reg());
1687 index_unboxed = true;
1688 } else {
1689 ASSERT(index.IsConstant());
1690 }
1691 }
1692
1693 compiler::Address element_address =
1696 index_unboxed, array, index.reg())
1697 : compiler::Assembler::ElementAddressForIntIndex(
1699 Smi::Cast(index.constant()).Value());
1700
1701 auto const rep =
1705 if (rep == kUnboxedInt64) {
1706 ASSERT(locs()->out(0).IsPairLocation());
1707 PairLocation* result_pair = locs()->out(0).AsPairLocation();
1708 const Register result_lo = result_pair->At(0).reg();
1709 const Register result_hi = result_pair->At(1).reg();
1710 __ movl(result_lo, element_address);
1711 element_address =
1712 index.IsRegister()
1714 IsUntagged(), class_id(), index_scale(), index_unboxed,
1715 array, index.reg(), kWordSize)
1716 : compiler::Assembler::ElementAddressForIntIndex(
1718 Smi::Cast(index.constant()).Value(), kWordSize);
1719 __ movl(result_hi, element_address);
1720 } else {
1721 Register result = locs()->out(0).reg();
1722 __ Load(result, element_address, RepresentationUtils::OperandSize(rep));
1723 }
1724 } else if (RepresentationUtils::IsUnboxed(rep)) {
1725 XmmRegister result = locs()->out(0).fpu_reg();
1726 if (rep == kUnboxedFloat) {
1727 __ movss(result, element_address);
1728 } else if (rep == kUnboxedDouble) {
1729 __ movsd(result, element_address);
1730 } else {
1731 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
1732 rep == kUnboxedFloat64x2);
1733 __ movups(result, element_address);
1734 }
1735 } else {
1736 const Register result = locs()->out(0).reg();
1737 ASSERT(representation() == kTagged);
1738 ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
1739 (class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
1740 __ movl(result, element_address);
1741 }
1742}
1743
1744LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
1745 bool opt) const {
1746 // The compiler must optimize any function that includes a StoreIndexed
1747 // instruction that uses typed data cids, since extracting the payload address
1748 // from views is done in a compiler pass after all code motion has happened.
1750
1751 const intptr_t kNumInputs = 3;
1752 const intptr_t kNumTemps =
1753 class_id() == kArrayCid && ShouldEmitStoreBarrier() ? 2 : 0;
1754 LocationSummary* locs = new (zone)
1755 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
1757 // The index is either untagged (element size == 1) or a smi (for all
1758 // element sizes > 1).
1759 const bool need_writable_index_register = index_scale() == 1;
1760 const bool can_be_constant =
1761 index()->BindsToConstant() &&
1763 index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
1764 locs->set_in(
1765 1, can_be_constant
1766 ? Location::Constant(index()->definition()->AsConstant())
1767 : (need_writable_index_register ? Location::WritableRegister()
1769 auto const rep =
1772 if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
1773 // TODO(fschneider): Add location constraint for byte registers (EAX,
1774 // EBX, ECX, EDX) instead of using a fixed register.
1776 } else if (rep == kUnboxedInt64) {
1779 } else {
1781 }
1782 } else if (RepresentationUtils::IsUnboxed(rep)) {
1783 // TODO(srdjan): Support Float64 constants.
1785 } else if (class_id() == kArrayCid) {
1787 if (ShouldEmitStoreBarrier()) {
1792 }
1793 } else {
1794 UNREACHABLE();
1795 }
1796 return locs;
1797}
1798
1799void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1800 // The array register points to the backing store for external arrays.
1801 const Register array = locs()->in(0).reg();
1802 const Location index = locs()->in(1);
1803
1804 bool index_unboxed = index_unboxed_;
1805 if ((index_scale() == 1) && index.IsRegister() && !index_unboxed) {
1806 __ SmiUntag(index.reg());
1807 index_unboxed = true;
1808 }
1809 compiler::Address element_address =
1812 index_unboxed, array, index.reg())
1813 : compiler::Assembler::ElementAddressForIntIndex(
1815 Smi::Cast(index.constant()).Value());
1816
1817 auto const rep =
1821 ASSERT(rep == kUnboxedUint8);
1822 if (locs()->in(2).IsConstant()) {
1823 const Smi& constant = Smi::Cast(locs()->in(2).constant());
1824 intptr_t value = constant.Value();
1825 // Clamp to 0x0 or 0xFF respectively.
1826 if (value > 0xFF) {
1827 value = 0xFF;
1828 } else if (value < 0) {
1829 value = 0;
1830 }
1831 __ movb(element_address, compiler::Immediate(static_cast<int8_t>(value)));
1832 } else {
1833 ASSERT(locs()->in(2).reg() == EAX);
1834 compiler::Label store_value, store_0xff;
1835 __ cmpl(EAX, compiler::Immediate(0xFF));
1837 // Clamp to 0x0 or 0xFF respectively.
1838 __ j(GREATER, &store_0xff);
1839 __ xorl(EAX, EAX);
1840 __ jmp(&store_value, compiler::Assembler::kNearJump);
1841 __ Bind(&store_0xff);
1842 __ movl(EAX, compiler::Immediate(0xFF));
1843 __ Bind(&store_value);
1844 __ movb(element_address, AL);
1845 }
1846 } else if (RepresentationUtils::IsUnboxedInteger(rep)) {
1847 if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
1848 if (locs()->in(2).IsConstant()) {
1849 const Smi& constant = Smi::Cast(locs()->in(2).constant());
1850 __ movb(element_address,
1851 compiler::Immediate(static_cast<int8_t>(constant.Value())));
1852 } else {
1853 ASSERT(locs()->in(2).reg() == EAX);
1854 __ movb(element_address, AL);
1855 }
1856 } else if (rep == kUnboxedInt64) {
1857 ASSERT(locs()->in(2).IsPairLocation());
1858 PairLocation* value_pair = locs()->in(2).AsPairLocation();
1859 const Register value_lo = value_pair->At(0).reg();
1860 const Register value_hi = value_pair->At(1).reg();
1861 __ movl(element_address, value_lo);
1862 element_address =
1863 index.IsRegister()
1865 IsUntagged(), class_id(), index_scale(), index_unboxed,
1866 array, index.reg(), kWordSize)
1867 : compiler::Assembler::ElementAddressForIntIndex(
1869 Smi::Cast(index.constant()).Value(), kWordSize);
1870 __ movl(element_address, value_hi);
1871 } else {
1872 Register value = locs()->in(2).reg();
1873 __ Store(value, element_address, RepresentationUtils::OperandSize(rep));
1874 }
1875 } else if (RepresentationUtils::IsUnboxed(rep)) {
1876 if (rep == kUnboxedFloat) {
1877 __ movss(element_address, locs()->in(2).fpu_reg());
1878 } else if (rep == kUnboxedDouble) {
1879 __ movsd(element_address, locs()->in(2).fpu_reg());
1880 } else {
1881 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
1882 rep == kUnboxedFloat64x2);
1883 __ movups(element_address, locs()->in(2).fpu_reg());
1884 }
1885 } else if (class_id() == kArrayCid) {
1886 ASSERT(rep == kTagged);
1887 if (ShouldEmitStoreBarrier()) {
1888 Register value = locs()->in(2).reg();
1889 Register slot = locs()->temp(0).reg();
1890 Register scratch = locs()->temp(1).reg();
1891 __ leal(slot, element_address);
1892 __ StoreIntoArray(array, slot, value, CanValueBeSmi(), scratch);
1893 } else if (locs()->in(2).IsConstant()) {
1894 const Object& constant = locs()->in(2).constant();
1895 __ StoreObjectIntoObjectNoBarrier(array, element_address, constant);
1896 } else {
1897 Register value = locs()->in(2).reg();
1898 __ StoreIntoObjectNoBarrier(array, element_address, value);
1899 }
1900 } else {
1901 UNREACHABLE();
1902 }
1903}
1904
1905DEFINE_UNIMPLEMENTED_INSTRUCTION(GuardFieldTypeInstr)
1906
1907LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
1908 bool opt) const {
1909 const intptr_t kNumInputs = 1;
1910
1911 const intptr_t value_cid = value()->Type()->ToCid();
1912 const intptr_t field_cid = field().guarded_cid();
1913
1914 const bool emit_full_guard = !opt || (field_cid == kIllegalCid);
1915 const bool needs_value_cid_temp_reg =
1916 (value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid));
1917 const bool needs_field_temp_reg = emit_full_guard;
1918
1919 intptr_t num_temps = 0;
1920 if (needs_value_cid_temp_reg) {
1921 num_temps++;
1922 }
1923 if (needs_field_temp_reg) {
1924 num_temps++;
1925 }
1926
1927 LocationSummary* summary = new (zone)
1928 LocationSummary(zone, kNumInputs, num_temps, LocationSummary::kNoCall);
1929 summary->set_in(0, Location::RequiresRegister());
1930
1931 for (intptr_t i = 0; i < num_temps; i++) {
1932 summary->set_temp(i, Location::RequiresRegister());
1933 }
1934
1935 return summary;
1936}
1937
1938void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
1940 ASSERT(sizeof(UntaggedField::guarded_cid_) == 4);
1941 ASSERT(sizeof(UntaggedField::is_nullable_) == 4);
1942
1943 const intptr_t value_cid = value()->Type()->ToCid();
1944 const intptr_t field_cid = field().guarded_cid();
1945 const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid;
1946
1947 if (field_cid == kDynamicCid) {
1948 return; // Nothing to emit.
1949 }
1950
1951 const bool emit_full_guard =
1952 !compiler->is_optimizing() || (field_cid == kIllegalCid);
1953
1954 const bool needs_value_cid_temp_reg =
1955 (value_cid == kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid));
1956
1957 const bool needs_field_temp_reg = emit_full_guard;
1958
1959 const Register value_reg = locs()->in(0).reg();
1960
1961 const Register value_cid_reg =
1962 needs_value_cid_temp_reg ? locs()->temp(0).reg() : kNoRegister;
1963
1964 const Register field_reg = needs_field_temp_reg
1965 ? locs()->temp(locs()->temp_count() - 1).reg()
1966 : kNoRegister;
1967
1968 compiler::Label ok, fail_label;
1969
1970 compiler::Label* deopt = nullptr;
1971 if (compiler->is_optimizing()) {
1972 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField);
1973 }
1974
1975 compiler::Label* fail = (deopt != nullptr) ? deopt : &fail_label;
1976
1977 if (emit_full_guard) {
1978 __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
1979
1980 compiler::FieldAddress field_cid_operand(field_reg,
1982 compiler::FieldAddress field_nullability_operand(
1983 field_reg, Field::is_nullable_offset());
1984
1985 if (value_cid == kDynamicCid) {
1986 LoadValueCid(compiler, value_cid_reg, value_reg);
1987 __ cmpl(value_cid_reg, field_cid_operand);
1988 __ j(EQUAL, &ok);
1989 __ cmpl(value_cid_reg, field_nullability_operand);
1990 } else if (value_cid == kNullCid) {
1991 // Value in graph known to be null.
1992 // Compare with null.
1993 __ cmpl(field_nullability_operand, compiler::Immediate(value_cid));
1994 } else {
1995 // Value in graph known to be non-null.
1996 // Compare class id with guard field class id.
1997 __ cmpl(field_cid_operand, compiler::Immediate(value_cid));
1998 }
1999 __ j(EQUAL, &ok);
2000
2001 // Check if the tracked state of the guarded field can be initialized
2002 // inline. If the field needs length check we fall through to runtime
2003 // which is responsible for computing offset of the length field
2004 // based on the class id.
2005 // Length guard will be emitted separately when needed via GuardFieldLength
2006 // instruction after GuardFieldClass.
2007 if (!field().needs_length_check()) {
2008 // Uninitialized field can be handled inline. Check if the
2009 // field is still unitialized.
2010 __ cmpl(field_cid_operand, compiler::Immediate(kIllegalCid));
2011 // Jump to failure path when guard field has been initialized and
2012 // the field and value class ids do not match.
2013 __ j(NOT_EQUAL, fail);
2014
2015 if (value_cid == kDynamicCid) {
2016 // Do not know value's class id.
2017 __ movl(field_cid_operand, value_cid_reg);
2018 __ movl(field_nullability_operand, value_cid_reg);
2019 } else {
2020 ASSERT(field_reg != kNoRegister);
2021 __ movl(field_cid_operand, compiler::Immediate(value_cid));
2022 __ movl(field_nullability_operand, compiler::Immediate(value_cid));
2023 }
2024
2025 __ jmp(&ok);
2026 }
2027
2028 if (deopt == nullptr) {
2029 __ Bind(fail);
2030
2031 __ cmpl(compiler::FieldAddress(field_reg, Field::guarded_cid_offset()),
2032 compiler::Immediate(kDynamicCid));
2033 __ j(EQUAL, &ok);
2034
2035 __ pushl(field_reg);
2036 __ pushl(value_reg);
2037 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2038 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2039 __ Drop(2); // Drop the field and the value.
2040 } else {
2041 __ jmp(fail);
2042 }
2043 } else {
2044 ASSERT(compiler->is_optimizing());
2045 ASSERT(deopt != nullptr);
2046 ASSERT(fail == deopt);
2047
2048 // Field guard class has been initialized and is known.
2049 if (value_cid == kDynamicCid) {
2050 // Value's class id is not known.
2051 __ testl(value_reg, compiler::Immediate(kSmiTagMask));
2052
2053 if (field_cid != kSmiCid) {
2054 __ j(ZERO, fail);
2055 __ LoadClassId(value_cid_reg, value_reg);
2056 __ cmpl(value_cid_reg, compiler::Immediate(field_cid));
2057 }
2058
2059 if (field().is_nullable() && (field_cid != kNullCid)) {
2060 __ j(EQUAL, &ok);
2061 if (field_cid != kSmiCid) {
2062 __ cmpl(value_cid_reg, compiler::Immediate(kNullCid));
2063 } else {
2064 const compiler::Immediate& raw_null =
2065 compiler::Immediate(static_cast<intptr_t>(Object::null()));
2066 __ cmpl(value_reg, raw_null);
2067 }
2068 }
2069 __ j(NOT_EQUAL, fail);
2070 } else if (value_cid == field_cid) {
2071 // This would normally be caught by Canonicalize, but RemoveRedefinitions
2072 // may sometimes produce the situation after the last Canonicalize pass.
2073 } else {
2074 // Both value's and field's class id is known.
2075 ASSERT(value_cid != nullability);
2076 __ jmp(fail);
2077 }
2078 }
2079 __ Bind(&ok);
2080}
2081
2082LocationSummary* GuardFieldLengthInstr::MakeLocationSummary(Zone* zone,
2083 bool opt) const {
2084 const intptr_t kNumInputs = 1;
2085 if (!opt || (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2086 const intptr_t kNumTemps = 3;
2087 LocationSummary* summary = new (zone)
2088 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2089 summary->set_in(0, Location::RequiresRegister());
2090 // We need temporaries for field object, length offset and expected length.
2091 summary->set_temp(0, Location::RequiresRegister());
2092 summary->set_temp(1, Location::RequiresRegister());
2093 summary->set_temp(2, Location::RequiresRegister());
2094 return summary;
2095 } else {
2096 LocationSummary* summary = new (zone)
2097 LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
2098 summary->set_in(0, Location::RequiresRegister());
2099 return summary;
2100 }
2101 UNREACHABLE();
2102}
2103
2104void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2105 if (field().guarded_list_length() == Field::kNoFixedLength) {
2106 return; // Nothing to emit.
2107 }
2108
2109 compiler::Label* deopt =
2110 compiler->is_optimizing()
2111 ? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
2112 : nullptr;
2113
2114 const Register value_reg = locs()->in(0).reg();
2115
2116 if (!compiler->is_optimizing() ||
2117 (field().guarded_list_length() == Field::kUnknownFixedLength)) {
2118 const Register field_reg = locs()->temp(0).reg();
2119 const Register offset_reg = locs()->temp(1).reg();
2120 const Register length_reg = locs()->temp(2).reg();
2121
2122 compiler::Label ok;
2123
2124 __ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
2125
2126 __ movsxb(
2127 offset_reg,
2128 compiler::FieldAddress(
2130 __ movl(length_reg, compiler::FieldAddress(
2131 field_reg, Field::guarded_list_length_offset()));
2132
2133 __ cmpl(offset_reg, compiler::Immediate(0));
2134 __ j(NEGATIVE, &ok);
2135
2136 // Load the length from the value. GuardFieldClass already verified that
2137 // value's class matches guarded class id of the field.
2138 // offset_reg contains offset already corrected by -kHeapObjectTag that is
2139 // why we use Address instead of FieldAddress.
2140 __ cmpl(length_reg, compiler::Address(value_reg, offset_reg, TIMES_1, 0));
2141
2142 if (deopt == nullptr) {
2143 __ j(EQUAL, &ok);
2144
2145 __ pushl(field_reg);
2146 __ pushl(value_reg);
2147 ASSERT(!compiler->is_optimizing()); // No deopt info needed.
2148 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2149 __ Drop(2); // Drop the field and the value.
2150 } else {
2151 __ j(NOT_EQUAL, deopt);
2152 }
2153
2154 __ Bind(&ok);
2155 } else {
2156 ASSERT(compiler->is_optimizing());
2157 ASSERT(field().guarded_list_length() >= 0);
2158 ASSERT(field().guarded_list_length_in_object_offset() !=
2160
2161 __ cmpl(compiler::FieldAddress(
2162 value_reg, field().guarded_list_length_in_object_offset()),
2163 compiler::Immediate(Smi::RawValue(field().guarded_list_length())));
2164 __ j(NOT_EQUAL, deopt);
2165 }
2166}
2167
2168LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
2169 bool opt) const {
2170 LocationSummary* locs =
2171 new (zone) LocationSummary(zone, 1, 1, LocationSummary::kNoCall);
2173 : Location::RequiresRegister());
2175 return locs;
2176}
2177
2178void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2179 Register value = locs()->in(0).reg();
2180 Register temp = locs()->temp(0).reg();
2181
2182 compiler->used_static_fields().Add(&field());
2183
2184 __ movl(temp,
2185 compiler::Address(
2186 THR,
2187 field().is_shared()
2190 // Note: static fields ids won't be changed by hot-reload.
2191 __ movl(
2192 compiler::Address(temp, compiler::target::FieldTable::OffsetOf(field())),
2193 value);
2194}
2195
2196LocationSummary* InstanceOfInstr::MakeLocationSummary(Zone* zone,
2197 bool opt) const {
2198 const intptr_t kNumInputs = 3;
2199 const intptr_t kNumTemps = 0;
2200 LocationSummary* summary = new (zone)
2201 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2202
2204 summary->set_in(1, Location::RegisterLocation(
2206 summary->set_in(
2208 summary->set_out(0, Location::RegisterLocation(EAX));
2209 return summary;
2210}
2211
2212void InstanceOfInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2213 ASSERT(locs()->in(0).reg() == TypeTestABI::kInstanceReg);
2216
2217 compiler->GenerateInstanceOf(source(), deopt_id(), env(), type(), locs());
2218 ASSERT(locs()->out(0).reg() == EAX);
2219}
2220
2221// TODO(srdjan): In case of constant inputs make CreateArray kNoCall and
2222// use slow path stub.
2223LocationSummary* CreateArrayInstr::MakeLocationSummary(Zone* zone,
2224 bool opt) const {
2225 const intptr_t kNumInputs = 2;
2226 const intptr_t kNumTemps = 0;
2227 LocationSummary* locs = new (zone)
2228 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2234 return locs;
2235}
2236
2237// Inlines array allocation for known constant values.
2238static void InlineArrayAllocation(FlowGraphCompiler* compiler,
2239 intptr_t num_elements,
2240 compiler::Label* slow_path,
2241 compiler::Label* done) {
2242 const int kInlineArraySize = 12; // Same as kInlineInstanceSize.
2243 const intptr_t instance_size = Array::InstanceSize(num_elements);
2244
2245 // Instance in AllocateArrayABI::kResultReg.
2246 // Object end address in EBX.
2247 __ TryAllocateArray(kArrayCid, instance_size, slow_path,
2249 AllocateArrayABI::kResultReg, // instance
2250 EBX, // end address
2251 EDI); // temp
2252
2253 // Store the type argument field.
2254 __ StoreIntoObjectNoBarrier(
2256 compiler::FieldAddress(AllocateArrayABI::kResultReg,
2259
2260 // Set the length field.
2261 __ StoreIntoObjectNoBarrier(
2263 compiler::FieldAddress(AllocateArrayABI::kResultReg,
2266
2267 // Initialize all array elements to raw_null.
2268 // AllocateArrayABI::kResultReg: new object start as a tagged pointer.
2269 // EBX: new object end address.
2270 // EDI: iterator which initially points to the start of the variable
2271 // data area to be initialized.
2272 if (num_elements > 0) {
2273 const intptr_t array_size = instance_size - sizeof(UntaggedArray);
2274 const compiler::Immediate& raw_null =
2275 compiler::Immediate(static_cast<intptr_t>(Object::null()));
2276 __ leal(EDI, compiler::FieldAddress(AllocateArrayABI::kResultReg,
2277 sizeof(UntaggedArray)));
2278 if (array_size < (kInlineArraySize * kWordSize)) {
2279 intptr_t current_offset = 0;
2280 __ movl(EBX, raw_null);
2281 while (current_offset < array_size) {
2282 __ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
2283 compiler::Address(EDI, current_offset),
2284 EBX);
2285 current_offset += kWordSize;
2286 }
2287 } else {
2288 compiler::Label init_loop;
2289 __ Bind(&init_loop);
2290 __ StoreObjectIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
2291 compiler::Address(EDI, 0),
2292 Object::null_object());
2293 __ addl(EDI, compiler::Immediate(kWordSize));
2294 __ cmpl(EDI, EBX);
2295 __ j(BELOW, &init_loop, compiler::Assembler::kNearJump);
2296 }
2297 }
2299}
2300
2301void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2302 compiler::Label slow_path, done;
2303 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2304 if (compiler->is_optimizing() && num_elements()->BindsToConstant() &&
2305 num_elements()->BoundConstant().IsSmi()) {
2306 const intptr_t length =
2307 Smi::Cast(num_elements()->BoundConstant()).Value();
2309 InlineArrayAllocation(compiler, length, &slow_path, &done);
2310 }
2311 }
2312 }
2313
2314 __ Bind(&slow_path);
2315 auto object_store = compiler->isolate_group()->object_store();
2316 const auto& allocate_array_stub =
2317 Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
2318 compiler->GenerateStubCall(source(), allocate_array_stub,
2319 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
2320 env());
2321 __ Bind(&done);
2322}
2323
2325 Zone* zone,
2326 bool opt) const {
2327 ASSERT(opt);
2328 const intptr_t kNumInputs = 0;
2329 const intptr_t kNumTemps = 2;
2330 LocationSummary* locs = new (zone) LocationSummary(
2331 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
2335 return locs;
2336}
2337
2338class AllocateContextSlowPath
2339 : public TemplateSlowPathCode<AllocateUninitializedContextInstr> {
2340 public:
2341 explicit AllocateContextSlowPath(
2342 AllocateUninitializedContextInstr* instruction)
2343 : TemplateSlowPathCode(instruction) {}
2344
2345 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
2346 __ Comment("AllocateContextSlowPath");
2347 __ Bind(entry_label());
2348
2349 LocationSummary* locs = instruction()->locs();
2350 ASSERT(!locs->live_registers()->Contains(locs->out(0)));
2351
2352 compiler->SaveLiveRegisters(locs);
2353
2354 auto slow_path_env = compiler->SlowPathEnvironmentFor(
2355 instruction(), /*num_slow_path_args=*/0);
2356 ASSERT(slow_path_env != nullptr);
2357
2358 __ movl(EDX, compiler::Immediate(instruction()->num_context_variables()));
2359 compiler->GenerateStubCall(instruction()->source(),
2360 StubCode::AllocateContext(),
2361 UntaggedPcDescriptors::kOther, locs,
2362 instruction()->deopt_id(), slow_path_env);
2363 ASSERT(instruction()->locs()->out(0).reg() == EAX);
2364 compiler->RestoreLiveRegisters(instruction()->locs());
2365 __ jmp(exit_label());
2366 }
2367};
2368
2370 FlowGraphCompiler* compiler) {
2371 ASSERT(compiler->is_optimizing());
2372 Register temp = locs()->temp(0).reg();
2373 Register temp2 = locs()->temp(1).reg();
2374 Register result = locs()->out(0).reg();
2375 // Try allocate the object.
2376 AllocateContextSlowPath* slow_path = new AllocateContextSlowPath(this);
2377 compiler->AddSlowPathCode(slow_path);
2378 intptr_t instance_size = Context::InstanceSize(num_context_variables());
2379
2380 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2381 __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
2383 result, // instance
2384 temp, // end address
2385 temp2); // temp
2386
2387 // Setup up number of context variables field.
2388 __ movl(compiler::FieldAddress(result, Context::num_variables_offset()),
2389 compiler::Immediate(num_context_variables()));
2390 } else {
2391 __ Jump(slow_path->entry_label());
2392 }
2393
2394 __ Bind(slow_path->exit_label());
2395}
2396
2397LocationSummary* AllocateContextInstr::MakeLocationSummary(Zone* zone,
2398 bool opt) const {
2399 const intptr_t kNumInputs = 0;
2400 const intptr_t kNumTemps = 1;
2401 LocationSummary* locs = new (zone)
2402 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2405 return locs;
2406}
2407
2408void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2409 ASSERT(locs()->temp(0).reg() == EDX);
2410 ASSERT(locs()->out(0).reg() == EAX);
2411
2412 __ movl(EDX, compiler::Immediate(num_context_variables()));
2413 compiler->GenerateStubCall(source(), StubCode::AllocateContext(),
2414 UntaggedPcDescriptors::kOther, locs(), deopt_id(),
2415 env());
2416}
2417
2418LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
2419 bool opt) const {
2420 const intptr_t kNumInputs = 1;
2421 const intptr_t kNumTemps = 0;
2422 LocationSummary* locs = new (zone)
2423 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
2426 return locs;
2427}
2428
2429void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2430 ASSERT(locs()->in(0).reg() == ECX);
2431 ASSERT(locs()->out(0).reg() == EAX);
2432
2433 compiler->GenerateStubCall(source(), StubCode::CloneContext(),
2434 /*kind=*/UntaggedPcDescriptors::kOther, locs(),
2435 deopt_id(), env());
2436}
2437
2438LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
2439 bool opt) const {
2440 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kCall);
2441}
2442
2443void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2444 __ Bind(compiler->GetJumpLabel(this));
2445 compiler->AddExceptionHandler(this);
2446 if (HasParallelMove()) {
2448 }
2449
2450 // Restore ESP from EBP as we are coming from a throw and the code for
2451 // popping arguments has not been run.
2452 const intptr_t fp_sp_dist =
2454 compiler->StackSize()) *
2455 kWordSize;
2456 ASSERT(fp_sp_dist <= 0);
2457 __ leal(ESP, compiler::Address(EBP, fp_sp_dist));
2458
2459 if (!compiler->is_optimizing()) {
2460 if (raw_exception_var_ != nullptr) {
2461 __ movl(compiler::Address(EBP,
2463 raw_exception_var_)),
2465 }
2466 if (raw_stacktrace_var_ != nullptr) {
2467 __ movl(compiler::Address(EBP,
2469 raw_stacktrace_var_)),
2471 }
2472 }
2473}
2474
2475LocationSummary* CheckStackOverflowInstr::MakeLocationSummary(Zone* zone,
2476 bool opt) const {
2477 const intptr_t kNumInputs = 0;
2478 const intptr_t kNumTemps = opt ? 0 : 1;
2479 LocationSummary* summary = new (zone) LocationSummary(
2480 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
2481 if (!opt) {
2482 summary->set_temp(0, Location::RequiresRegister());
2483 }
2484 return summary;
2485}
2486
2487class CheckStackOverflowSlowPath
2488 : public TemplateSlowPathCode<CheckStackOverflowInstr> {
2489 public:
2490 static constexpr intptr_t kNumSlowPathArgs = 0;
2491
2492 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
2493 : TemplateSlowPathCode(instruction) {}
2494
2495 virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
2496 if (compiler->isolate_group()->use_osr() && osr_entry_label()->IsLinked()) {
2497 __ Comment("CheckStackOverflowSlowPathOsr");
2498 __ Bind(osr_entry_label());
2499 __ movl(compiler::Address(THR, Thread::stack_overflow_flags_offset()),
2500 compiler::Immediate(Thread::kOsrRequest));
2501 }
2502 __ Comment("CheckStackOverflowSlowPath");
2503 __ Bind(entry_label());
2504 compiler->SaveLiveRegisters(instruction()->locs());
2505 // pending_deoptimization_env_ is needed to generate a runtime call that
2506 // may throw an exception.
2507 ASSERT(compiler->pending_deoptimization_env_ == nullptr);
2508 Environment* env = compiler->SlowPathEnvironmentFor(
2509 instruction(), /*num_slow_path_args=*/0);
2510 compiler->pending_deoptimization_env_ = env;
2511
2512 __ CallRuntime(kInterruptOrStackOverflowRuntimeEntry, kNumSlowPathArgs);
2513 compiler->EmitCallsiteMetadata(
2514 instruction()->source(), instruction()->deopt_id(),
2515 UntaggedPcDescriptors::kOther, instruction()->locs(), env);
2516
2517 if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
2518 instruction()->in_loop()) {
2519 // In unoptimized code, record loop stack checks as possible OSR entries.
2520 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
2521 instruction()->deopt_id(),
2522 InstructionSource());
2523 }
2524 compiler->pending_deoptimization_env_ = nullptr;
2525 compiler->RestoreLiveRegisters(instruction()->locs());
2526 __ jmp(exit_label());
2527 }
2528
2529 compiler::Label* osr_entry_label() {
2530 ASSERT(IsolateGroup::Current()->use_osr());
2531 return &osr_entry_label_;
2532 }
2533
2534 private:
2535 compiler::Label osr_entry_label_;
2536};
2537
2538void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2539 CheckStackOverflowSlowPath* slow_path = new CheckStackOverflowSlowPath(this);
2540 compiler->AddSlowPathCode(slow_path);
2541
2542 __ cmpl(ESP, compiler::Address(THR, Thread::stack_limit_offset()));
2543 __ j(BELOW_EQUAL, slow_path->entry_label());
2544 if (compiler->CanOSRFunction() && in_loop()) {
2545 // In unoptimized code check the usage counter to trigger OSR at loop
2546 // stack checks. Use progressively higher thresholds for more deeply
2547 // nested loops to attempt to hit outer loops with OSR when possible.
2548 __ LoadObject(EDI, compiler->parsed_function().function());
2549 const intptr_t configured_optimization_counter_threshold =
2550 compiler->thread()->isolate_group()->optimization_counter_threshold();
2551 const int32_t threshold =
2552 configured_optimization_counter_threshold * (loop_depth() + 1);
2553 __ incl(compiler::FieldAddress(EDI, Function::usage_counter_offset()));
2554 __ cmpl(compiler::FieldAddress(EDI, Function::usage_counter_offset()),
2555 compiler::Immediate(threshold));
2556 __ j(GREATER_EQUAL, slow_path->osr_entry_label());
2557 }
2558 if (compiler->ForceSlowPathForStackOverflow()) {
2559 // TODO(turnidge): Implement stack overflow count in assembly to
2560 // make --stacktrace-every and --deoptimize-every faster.
2561 __ jmp(slow_path->entry_label());
2562 }
2563 __ Bind(slow_path->exit_label());
2564}
2565
2566static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
2567 BinarySmiOpInstr* shift_left) {
2568 const LocationSummary& locs = *shift_left->locs();
2569 Register left = locs.in(0).reg();
2570 Register result = locs.out(0).reg();
2571 ASSERT(left == result);
2572 compiler::Label* deopt =
2573 shift_left->CanDeoptimize()
2574 ? compiler->AddDeoptStub(shift_left->deopt_id(),
2575 ICData::kDeoptBinarySmiOp)
2576 : nullptr;
2577 if (locs.in(1).IsConstant()) {
2578 const Object& constant = locs.in(1).constant();
2579 ASSERT(constant.IsSmi());
2580 // shll operation masks the count to 5 bits.
2581 const intptr_t kCountLimit = 0x1F;
2582 const intptr_t value = Smi::Cast(constant).Value();
2583 ASSERT((0 < value) && (value < kCountLimit));
2584 if (shift_left->can_overflow()) {
2585 if (value == 1) {
2586 // Use overflow flag.
2587 __ shll(left, compiler::Immediate(1));
2588 __ j(OVERFLOW, deopt);
2589 return;
2590 }
2591 // Check for overflow.
2592 Register temp = locs.temp(0).reg();
2593 __ movl(temp, left);
2594 __ shll(left, compiler::Immediate(value));
2595 __ sarl(left, compiler::Immediate(value));
2596 __ cmpl(left, temp);
2597 __ j(NOT_EQUAL, deopt); // Overflow.
2598 }
2599 // Shift for result now we know there is no overflow.
2600 __ shll(left, compiler::Immediate(value));
2601 return;
2602 }
2603
2604 // Right (locs.in(1)) is not constant.
2605 Register right = locs.in(1).reg();
2606 Range* right_range = shift_left->right_range();
2607 if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
2608 // TODO(srdjan): Implement code below for can_overflow().
2609 // If left is constant, we know the maximal allowed size for right.
2610 const Object& obj = shift_left->left()->BoundConstant();
2611 if (obj.IsSmi()) {
2612 const intptr_t left_int = Smi::Cast(obj).Value();
2613 if (left_int == 0) {
2614 __ cmpl(right, compiler::Immediate(0));
2615 __ j(NEGATIVE, deopt);
2616 return;
2617 }
2618 const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int);
2619 const bool right_needs_check =
2620 !RangeUtils::IsWithin(right_range, 0, max_right - 1);
2621 if (right_needs_check) {
2622 __ cmpl(right,
2623 compiler::Immediate(static_cast<int32_t>(Smi::New(max_right))));
2624 __ j(ABOVE_EQUAL, deopt);
2625 }
2626 __ SmiUntag(right);
2627 __ shll(left, right);
2628 }
2629 return;
2630 }
2631
2632 const bool right_needs_check =
2633 !RangeUtils::IsWithin(right_range, 0, (Smi::kBits - 1));
2634 ASSERT(right == ECX); // Count must be in ECX
2635 if (!shift_left->can_overflow()) {
2636 if (right_needs_check) {
2637 if (!RangeUtils::IsPositive(right_range)) {
2638 ASSERT(shift_left->CanDeoptimize());
2639 __ cmpl(right, compiler::Immediate(0));
2640 __ j(NEGATIVE, deopt);
2641 }
2642 compiler::Label done, is_not_zero;
2643 __ cmpl(right,
2644 compiler::Immediate(static_cast<int32_t>(Smi::New(Smi::kBits))));
2645 __ j(BELOW, &is_not_zero, compiler::Assembler::kNearJump);
2646 __ xorl(left, left);
2648 __ Bind(&is_not_zero);
2649 __ SmiUntag(right);
2650 __ shll(left, right);
2651 __ Bind(&done);
2652 } else {
2653 __ SmiUntag(right);
2654 __ shll(left, right);
2655 }
2656 } else {
2657 if (right_needs_check) {
2658 ASSERT(shift_left->CanDeoptimize());
2659 __ cmpl(right,
2660 compiler::Immediate(static_cast<int32_t>(Smi::New(Smi::kBits))));
2661 __ j(ABOVE_EQUAL, deopt);
2662 }
2663 // Left is not a constant.
2664 Register temp = locs.temp(0).reg();
2665 // Check if count too large for handling it inlined.
2666 __ movl(temp, left);
2667 __ SmiUntag(right);
2668 // Overflow test (preserve temp and right);
2669 __ shll(left, right);
2670 __ sarl(left, right);
2671 __ cmpl(left, temp);
2672 __ j(NOT_EQUAL, deopt); // Overflow.
2673 // Shift for result now we know there is no overflow.
2674 __ shll(left, right);
2675 }
2676}
2677
2678static bool IsSmiValue(const Object& constant, intptr_t value) {
2679 return constant.IsSmi() && (Smi::Cast(constant).Value() == value);
2680}
2681
2682LocationSummary* BinarySmiOpInstr::MakeLocationSummary(Zone* zone,
2683 bool opt) const {
2684 const intptr_t kNumInputs = 2;
2685 if (op_kind() == Token::kTRUNCDIV) {
2686 const intptr_t kNumTemps = 1;
2687 LocationSummary* summary = new (zone)
2688 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2690 summary->set_in(0, Location::RequiresRegister());
2691 ConstantInstr* right_constant = right()->definition()->AsConstant();
2692 // The programmer only controls one bit, so the constant is safe.
2693 summary->set_in(1, Location::Constant(right_constant));
2694 summary->set_temp(0, Location::RequiresRegister());
2695 summary->set_out(0, Location::SameAsFirstInput());
2696 } else {
2697 // Both inputs must be writable because they will be untagged.
2698 summary->set_in(0, Location::RegisterLocation(EAX));
2699 summary->set_in(1, Location::WritableRegister());
2700 summary->set_out(0, Location::SameAsFirstInput());
2701 // Will be used for sign extension and division.
2702 summary->set_temp(0, Location::RegisterLocation(EDX));
2703 }
2704 return summary;
2705 } else if (op_kind() == Token::kMOD) {
2706 const intptr_t kNumTemps = 1;
2707 LocationSummary* summary = new (zone)
2708 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2709 // Both inputs must be writable because they will be untagged.
2710 summary->set_in(0, Location::RegisterLocation(EDX));
2711 summary->set_in(1, Location::WritableRegister());
2712 summary->set_out(0, Location::SameAsFirstInput());
2713 // Will be used for sign extension and division.
2714 summary->set_temp(0, Location::RegisterLocation(EAX));
2715 return summary;
2716 } else if ((op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR)) {
2717 const intptr_t kNumTemps = 0;
2718 LocationSummary* summary = new (zone)
2719 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2720 summary->set_in(0, Location::RequiresRegister());
2721 summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX));
2722 summary->set_out(0, Location::SameAsFirstInput());
2723 return summary;
2724 } else if (op_kind() == Token::kSHL) {
2725 ConstantInstr* right_constant = right()->definition()->AsConstant();
2726 // Shift-by-1 overflow checking can use flags, otherwise we need a temp.
2727 const bool shiftBy1 =
2728 (right_constant != nullptr) && IsSmiValue(right_constant->value(), 1);
2729 const intptr_t kNumTemps = (can_overflow() && !shiftBy1) ? 1 : 0;
2730 LocationSummary* summary = new (zone)
2731 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2732 summary->set_in(0, Location::RequiresRegister());
2733 summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX));
2734 if (kNumTemps == 1) {
2735 summary->set_temp(0, Location::RequiresRegister());
2736 }
2737 summary->set_out(0, Location::SameAsFirstInput());
2738 return summary;
2739 } else {
2740 const intptr_t kNumTemps = 0;
2741 LocationSummary* summary = new (zone)
2742 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
2743 summary->set_in(0, Location::RequiresRegister());
2744 ConstantInstr* constant = right()->definition()->AsConstant();
2745 if (constant != nullptr) {
2746 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
2747 } else {
2748 summary->set_in(1, Location::PrefersRegister());
2749 }
2750 summary->set_out(0, Location::SameAsFirstInput());
2751 return summary;
2752 }
2753}
2754
2755template <typename OperandType>
2756static void EmitIntegerArithmetic(FlowGraphCompiler* compiler,
2757 Token::Kind op_kind,
2758 Register left,
2759 const OperandType& right,
2760 compiler::Label* deopt) {
2761 switch (op_kind) {
2762 case Token::kADD:
2763 __ addl(left, right);
2764 break;
2765 case Token::kSUB:
2766 __ subl(left, right);
2767 break;
2768 case Token::kBIT_AND:
2769 __ andl(left, right);
2770 break;
2771 case Token::kBIT_OR:
2772 __ orl(left, right);
2773 break;
2774 case Token::kBIT_XOR:
2775 __ xorl(left, right);
2776 break;
2777 case Token::kMUL:
2778 __ imull(left, right);
2779 break;
2780 default:
2781 UNREACHABLE();
2782 }
2783 if (deopt != nullptr) __ j(OVERFLOW, deopt);
2784}
2785
2786void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
2787 if (op_kind() == Token::kSHL) {
2788 EmitSmiShiftLeft(compiler, this);
2789 return;
2790 }
2791
2792 Register left = locs()->in(0).reg();
2793 Register result = locs()->out(0).reg();
2794 ASSERT(left == result);
2795 compiler::Label* deopt = nullptr;
2796 if (CanDeoptimize()) {
2797 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
2798 }
2799
2800 if (locs()->in(1).IsConstant()) {
2801 const Object& constant = locs()->in(1).constant();
2802 ASSERT(constant.IsSmi());
2803 const intptr_t value = Smi::Cast(constant).Value();
2804 switch (op_kind()) {
2805 case Token::kADD:
2806 case Token::kSUB:
2807 case Token::kBIT_AND:
2808 case Token::kBIT_OR:
2809 case Token::kBIT_XOR:
2810 case Token::kMUL: {
2811 const intptr_t imm =
2812 (op_kind() == Token::kMUL) ? value : Smi::RawValue(value);
2813 EmitIntegerArithmetic(compiler, op_kind(), left,
2814 compiler::Immediate(imm), deopt);
2815 break;
2816 }
2817
2818 case Token::kTRUNCDIV: {
2821 const intptr_t shift_count =
2823 ASSERT(kSmiTagSize == 1);
2824 Register temp = locs()->temp(0).reg();
2825 __ movl(temp, left);
2826 __ sarl(temp, compiler::Immediate(31));
2827 ASSERT(shift_count > 1); // 1, -1 case handled above.
2828 __ shrl(temp, compiler::Immediate(32 - shift_count));
2829 __ addl(left, temp);
2830 ASSERT(shift_count > 0);
2831 __ sarl(left, compiler::Immediate(shift_count));
2832 if (value < 0) {
2833 __ negl(left);
2834 }
2835 __ SmiTag(left);
2836 break;
2837 }
2838
2839 case Token::kSHR: {
2840 // sarl operation masks the count to 5 bits.
2841 const intptr_t kCountLimit = 0x1F;
2842 __ sarl(left, compiler::Immediate(
2843 Utils::Minimum(value + kSmiTagSize, kCountLimit)));
2844 __ SmiTag(left);
2845 break;
2846 }
2847
2848 case Token::kUSHR: {
2849 ASSERT((value > 0) && (value < 64));
2851 // 64-bit representation of left operand value:
2852 //
2853 // ss...sssss s s xxxxxxxxxxxxx
2854 // | | | | | |
2855 // 63 32 31 30 kSmiBits-1 0
2856 //
2857 // Where 's' is a sign bit.
2858 //
2859 // If left operand is negative (sign bit is set), then
2860 // result will fit into Smi range if and only if
2861 // the shift amount >= 64 - kSmiBits.
2862 //
2863 // If left operand is non-negative, the result always
2864 // fits into Smi range.
2865 //
2866 if (value < (64 - compiler::target::kSmiBits)) {
2867 if (deopt != nullptr) {
2868 __ testl(left, left);
2869 __ j(LESS, deopt);
2870 } else {
2871 // Operation cannot overflow only if left value is always
2872 // non-negative.
2873 ASSERT(!can_overflow());
2874 }
2875 // At this point left operand is non-negative, so unsigned shift
2876 // can't overflow.
2878 __ xorl(left, left);
2879 } else {
2880 __ shrl(left, compiler::Immediate(value + kSmiTagSize));
2881 __ SmiTag(left);
2882 }
2883 } else {
2884 // Shift amount > 32, and the result is guaranteed to fit into Smi.
2885 // Low (Smi) part of the left operand is shifted out.
2886 // High part is filled with sign bits.
2887 __ sarl(left, compiler::Immediate(31));
2888 __ shrl(left, compiler::Immediate(value - 32));
2889 __ SmiTag(left);
2890 }
2891 break;
2892 }
2893
2894 default:
2895 UNREACHABLE();
2896 break;
2897 }
2898 return;
2899 } // if locs()->in(1).IsConstant()
2900
2901 if (locs()->in(1).IsStackSlot()) {
2902 const compiler::Address& right = LocationToStackSlotAddress(locs()->in(1));
2903 if (op_kind() == Token::kMUL) {
2904 __ SmiUntag(left);
2905 }
2906 EmitIntegerArithmetic(compiler, op_kind(), left, right, deopt);
2907 return;
2908 }
2909
2910 // if locs()->in(1).IsRegister.
2911 Register right = locs()->in(1).reg();
2912 switch (op_kind()) {
2913 case Token::kADD:
2914 case Token::kSUB:
2915 case Token::kBIT_AND:
2916 case Token::kBIT_OR:
2917 case Token::kBIT_XOR:
2918 case Token::kMUL:
2919 if (op_kind() == Token::kMUL) {
2920 __ SmiUntag(left);
2921 }
2922 EmitIntegerArithmetic(compiler, op_kind(), left, right, deopt);
2923 break;
2924
2925 case Token::kTRUNCDIV: {
2927 // Handle divide by zero in runtime.
2928 __ testl(right, right);
2929 __ j(ZERO, deopt);
2930 }
2931 ASSERT(left == EAX);
2932 ASSERT((right != EDX) && (right != EAX));
2933 ASSERT(locs()->temp(0).reg() == EDX);
2934 ASSERT(result == EAX);
2935 __ SmiUntag(left);
2936 __ SmiUntag(right);
2937 __ cdq(); // Sign extend EAX -> EDX:EAX.
2938 __ idivl(right); // EAX: quotient, EDX: remainder.
2939 if (RangeUtils::Overlaps(right_range(), -1, -1)) {
2940 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
2941 // case we cannot tag the result.
2942 __ cmpl(result, compiler::Immediate(0x40000000));
2943 __ j(EQUAL, deopt);
2944 }
2945 __ SmiTag(result);
2946 break;
2947 }
2948 case Token::kMOD: {
2950 // Handle divide by zero in runtime.
2951 __ testl(right, right);
2952 __ j(ZERO, deopt);
2953 }
2954 ASSERT(left == EDX);
2955 ASSERT((right != EDX) && (right != EAX));
2956 ASSERT(locs()->temp(0).reg() == EAX);
2957 ASSERT(result == EDX);
2958 __ SmiUntag(left);
2959 __ SmiUntag(right);
2960 __ movl(EAX, EDX);
2961 __ cdq(); // Sign extend EAX -> EDX:EAX.
2962 __ idivl(right); // EAX: quotient, EDX: remainder.
2963 // res = left % right;
2964 // if (res < 0) {
2965 // if (right < 0) {
2966 // res = res - right;
2967 // } else {
2968 // res = res + right;
2969 // }
2970 // }
2971 compiler::Label done;
2972 __ cmpl(result, compiler::Immediate(0));
2974 // Result is negative, adjust it.
2975 if (RangeUtils::Overlaps(right_range(), -1, 1)) {
2976 // Right can be positive and negative.
2977 compiler::Label subtract;
2978 __ cmpl(right, compiler::Immediate(0));
2980 __ addl(result, right);
2982 __ Bind(&subtract);
2983 __ subl(result, right);
2984 } else if (right_range()->IsPositive()) {
2985 // Right is positive.
2986 __ addl(result, right);
2987 } else {
2988 // Right is negative.
2989 __ subl(result, right);
2990 }
2991 __ Bind(&done);
2992 __ SmiTag(result);
2993 break;
2994 }
2995 case Token::kSHR: {
2996 if (CanDeoptimize()) {
2997 __ cmpl(right, compiler::Immediate(0));
2998 __ j(LESS, deopt);
2999 }
3000 __ SmiUntag(right);
3001 // sarl operation masks the count to 5 bits.
3002 const intptr_t kCountLimit = 0x1F;
3003 if (!RangeUtils::OnlyLessThanOrEqualTo(right_range(), kCountLimit)) {
3004 __ cmpl(right, compiler::Immediate(kCountLimit));
3005 compiler::Label count_ok;
3006 __ j(LESS, &count_ok, compiler::Assembler::kNearJump);
3007 __ movl(right, compiler::Immediate(kCountLimit));
3008 __ Bind(&count_ok);
3009 }
3010 ASSERT(right == ECX); // Count must be in ECX
3011 __ SmiUntag(left);
3012 __ sarl(left, right);
3013 __ SmiTag(left);
3014 break;
3015 }
3016 case Token::kUSHR: {
3017 compiler::Label done;
3018 __ SmiUntag(right);
3019 // 64-bit representation of left operand value:
3020 //
3021 // ss...sssss s s xxxxxxxxxxxxx
3022 // | | | | | |
3023 // 63 32 31 30 kSmiBits-1 0
3024 //
3025 // Where 's' is a sign bit.
3026 //
3027 // If left operand is negative (sign bit is set), then
3028 // result will fit into Smi range if and only if
3029 // the shift amount >= 64 - kSmiBits.
3030 //
3031 // If left operand is non-negative, the result always
3032 // fits into Smi range.
3033 //
3036 __ cmpl(right, compiler::Immediate(64 - compiler::target::kSmiBits));
3037 compiler::Label shift_less_34;
3038 __ j(LESS, &shift_less_34, compiler::Assembler::kNearJump);
3040 kBitsPerInt64 - 1)) {
3041 __ cmpl(right, compiler::Immediate(kBitsPerInt64));
3042 compiler::Label shift_less_64;
3043 __ j(LESS, &shift_less_64, compiler::Assembler::kNearJump);
3044 // Shift amount >= 64. Result is 0.
3045 __ xorl(left, left);
3047 __ Bind(&shift_less_64);
3048 }
3049 // Shift amount >= 64 - kSmiBits > 32, but < 64.
3050 // Result is guaranteed to fit into Smi range.
3051 // Low (Smi) part of the left operand is shifted out.
3052 // High part is filled with sign bits.
3053 ASSERT(right == ECX); // Count must be in ECX
3054 __ subl(right, compiler::Immediate(32));
3055 __ sarl(left, compiler::Immediate(31));
3056 __ shrl(left, right);
3057 __ SmiTag(left);
3059 __ Bind(&shift_less_34);
3060 }
3061 // Shift amount < 64 - kSmiBits.
3062 // If left is negative, then result will not fit into Smi range.
3063 // Also deopt in case of negative shift amount.
3064 if (deopt != nullptr) {
3065 __ testl(left, left);
3066 __ j(LESS, deopt);
3067 __ testl(right, right);
3068 __ j(LESS, deopt);
3069 } else {
3070 ASSERT(!can_overflow());
3071 }
3072 // At this point left operand is non-negative, so unsigned shift
3073 // can't overflow.
3076 __ cmpl(right, compiler::Immediate(compiler::target::kSmiBits));
3077 compiler::Label shift_less_30;
3078 __ j(LESS, &shift_less_30, compiler::Assembler::kNearJump);
3079 // Left operand >= 0, shift amount >= kSmiBits. Result is 0.
3080 __ xorl(left, left);
3082 __ Bind(&shift_less_30);
3083 }
3084 // Left operand >= 0, shift amount < kSmiBits < 32.
3085 ASSERT(right == ECX); // Count must be in ECX
3086 __ SmiUntag(left);
3087 __ shrl(left, right);
3088 __ SmiTag(left);
3089 __ Bind(&done);
3090 break;
3091 }
3092 case Token::kDIV: {
3093 // Dispatches to 'Double./'.
3094 // TODO(srdjan): Implement as conversion to double and double division.
3095 UNREACHABLE();
3096 break;
3097 }
3098 case Token::kOR:
3099 case Token::kAND: {
3100 // Flow graph builder has dissected this operation to guarantee correct
3101 // behavior (short-circuit evaluation).
3102 UNREACHABLE();
3103 break;
3104 }
3105 default:
3106 UNREACHABLE();
3107 break;
3108 }
3109}
3110
3111LocationSummary* BinaryInt32OpInstr::MakeLocationSummary(Zone* zone,
3112 bool opt) const {
3113 const intptr_t kNumInputs = 2;
3114 if (op_kind() == Token::kTRUNCDIV) {
3115 UNREACHABLE();
3116 return nullptr;
3117 } else if (op_kind() == Token::kMOD) {
3118 UNREACHABLE();
3119 return nullptr;
3120 } else if ((op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR)) {
3121 const intptr_t kNumTemps = 0;
3122 LocationSummary* summary = new (zone)
3123 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3124 summary->set_in(0, Location::RequiresRegister());
3125 summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX));
3126 summary->set_out(0, Location::SameAsFirstInput());
3127 return summary;
3128 } else if (op_kind() == Token::kSHL) {
3129 const intptr_t kNumTemps = can_overflow() ? 1 : 0;
3130 LocationSummary* summary = new (zone)
3131 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3132 summary->set_in(0, Location::RequiresRegister());
3133 summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX));
3134 if (can_overflow()) {
3135 summary->set_temp(0, Location::RequiresRegister());
3136 }
3137 summary->set_out(0, Location::SameAsFirstInput());
3138 return summary;
3139 } else {
3140 const intptr_t kNumTemps = 0;
3141 LocationSummary* summary = new (zone)
3142 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3143 summary->set_in(0, Location::RequiresRegister());
3144 ConstantInstr* constant = right()->definition()->AsConstant();
3145 if (constant != nullptr) {
3146 summary->set_in(1, LocationRegisterOrSmiConstant(right()));
3147 } else {
3148 summary->set_in(1, Location::PrefersRegister());
3149 }
3150 summary->set_out(0, Location::SameAsFirstInput());
3151 return summary;
3152 }
3153}
3154
3155static void EmitInt32ShiftLeft(FlowGraphCompiler* compiler,
3156 BinaryInt32OpInstr* shift_left) {
3157 const LocationSummary& locs = *shift_left->locs();
3158 Register left = locs.in(0).reg();
3159 Register result = locs.out(0).reg();
3160 ASSERT(left == result);
3161 compiler::Label* deopt =
3162 shift_left->CanDeoptimize()
3163 ? compiler->AddDeoptStub(shift_left->deopt_id(),
3164 ICData::kDeoptBinarySmiOp)
3165 : nullptr;
3166 ASSERT(locs.in(1).IsConstant());
3167
3168 const Object& constant = locs.in(1).constant();
3169 ASSERT(constant.IsSmi());
3170 // shll operation masks the count to 5 bits.
3171 const intptr_t kCountLimit = 0x1F;
3172 const intptr_t value = Smi::Cast(constant).Value();
3173 ASSERT((0 < value) && (value < kCountLimit));
3174 if (shift_left->can_overflow()) {
3175 // Check for overflow.
3176 Register temp = locs.temp(0).reg();
3177 __ movl(temp, left);
3178 __ shll(left, compiler::Immediate(value));
3179 __ sarl(left, compiler::Immediate(value));
3180 __ cmpl(left, temp);
3181 __ j(NOT_EQUAL, deopt); // Overflow.
3182 }
3183 // Shift for result now we know there is no overflow.
3184 __ shll(left, compiler::Immediate(value));
3185}
3186
3187void BinaryInt32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3188 if (op_kind() == Token::kSHL) {
3189 EmitInt32ShiftLeft(compiler, this);
3190 return;
3191 }
3192
3193 Register left = locs()->in(0).reg();
3194 Register result = locs()->out(0).reg();
3195 ASSERT(left == result);
3196 compiler::Label* deopt = nullptr;
3197 if (CanDeoptimize()) {
3198 deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
3199 }
3200
3201 if (locs()->in(1).IsConstant()) {
3202 const Object& constant = locs()->in(1).constant();
3203 ASSERT(constant.IsSmi());
3204 const intptr_t value = Smi::Cast(constant).Value();
3205 switch (op_kind()) {
3206 case Token::kADD:
3207 case Token::kSUB:
3208 case Token::kMUL:
3209 case Token::kBIT_AND:
3210 case Token::kBIT_OR:
3211 case Token::kBIT_XOR:
3212 EmitIntegerArithmetic(compiler, op_kind(), left,
3213 compiler::Immediate(value), deopt);
3214 break;
3215
3216 case Token::kTRUNCDIV: {
3217 UNREACHABLE();
3218 break;
3219 }
3220
3221 case Token::kSHR: {
3222 // sarl operation masks the count to 5 bits.
3223 const intptr_t kCountLimit = 0x1F;
3224 __ sarl(left, compiler::Immediate(Utils::Minimum(value, kCountLimit)));
3225 break;
3226 }
3227
3228 case Token::kUSHR: {
3229 ASSERT((value > 0) && (value < 64));
3230 // 64-bit representation of left operand value:
3231 //
3232 // ss...sssss s xxxxxxxxxxxxx
3233 // | | | | |
3234 // 63 32 31 30 0
3235 //
3236 // Where 's' is a sign bit.
3237 //
3238 // If left operand is negative (sign bit is set), then
3239 // result will fit into Int32 range if and only if
3240 // the shift amount > 32.
3241 //
3242 if (value <= 32) {
3243 if (deopt != nullptr) {
3244 __ testl(left, left);
3245 __ j(LESS, deopt);
3246 } else {
3247 // Operation cannot overflow only if left value is always
3248 // non-negative.
3249 ASSERT(!can_overflow());
3250 }
3251 // At this point left operand is non-negative, so unsigned shift
3252 // can't overflow.
3253 if (value == 32) {
3254 __ xorl(left, left);
3255 } else {
3256 __ shrl(left, compiler::Immediate(value));
3257 }
3258 } else {
3259 // Shift amount > 32.
3260 // Low (Int32) part of the left operand is shifted out.
3261 // Shift high part which is filled with sign bits.
3262 __ sarl(left, compiler::Immediate(31));
3263 __ shrl(left, compiler::Immediate(value - 32));
3264 }
3265 break;
3266 }
3267
3268 default:
3269 UNREACHABLE();
3270 break;
3271 }
3272 return;
3273 } // if locs()->in(1).IsConstant()
3274
3275 if (locs()->in(1).IsStackSlot()) {
3276 const compiler::Address& right = LocationToStackSlotAddress(locs()->in(1));
3277 EmitIntegerArithmetic(compiler, op_kind(), left, right, deopt);
3278 return;
3279 } // if locs()->in(1).IsStackSlot.
3280
3281 // if locs()->in(1).IsRegister.
3282 Register right = locs()->in(1).reg();
3283 switch (op_kind()) {
3284 case Token::kADD:
3285 case Token::kSUB:
3286 case Token::kMUL:
3287 case Token::kBIT_AND:
3288 case Token::kBIT_OR:
3289 case Token::kBIT_XOR:
3290 EmitIntegerArithmetic(compiler, op_kind(), left, right, deopt);
3291 break;
3292
3293 default:
3294 UNREACHABLE();
3295 break;
3296 }
3297}
3298
3299LocationSummary* BinaryUint32OpInstr::MakeLocationSummary(Zone* zone,
3300 bool opt) const {
3301 const intptr_t kNumInputs = 2;
3302 const intptr_t kNumTemps = (op_kind() == Token::kMUL) ? 1 : 0;
3303 LocationSummary* summary = new (zone)
3304 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3305 if (op_kind() == Token::kMUL) {
3307 summary->set_temp(0, Location::RegisterLocation(EDX));
3308 } else {
3309 summary->set_in(0, Location::RequiresRegister());
3310 }
3311 summary->set_in(1, Location::RequiresRegister());
3312 summary->set_out(0, Location::SameAsFirstInput());
3313 return summary;
3314}
3315
3316void BinaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3317 Register left = locs()->in(0).reg();
3318 Register right = locs()->in(1).reg();
3319 Register out = locs()->out(0).reg();
3320 ASSERT(out == left);
3321 switch (op_kind()) {
3322 case Token::kBIT_AND:
3323 case Token::kBIT_OR:
3324 case Token::kBIT_XOR:
3325 case Token::kADD:
3326 case Token::kSUB:
3327 EmitIntegerArithmetic(compiler, op_kind(), left, right, nullptr);
3328 return;
3329
3330 case Token::kMUL:
3331 __ mull(right); // Result in EDX:EAX.
3332 ASSERT(out == EAX);
3333 ASSERT(locs()->temp(0).reg() == EDX);
3334 break;
3335 default:
3336 UNREACHABLE();
3337 }
3338}
3339
3340LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary(Zone* zone,
3341 bool opt) const {
3342 intptr_t left_cid = left()->Type()->ToCid();
3343 intptr_t right_cid = right()->Type()->ToCid();
3344 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid));
3345 const intptr_t kNumInputs = 2;
3346 const bool need_temp = (left()->definition() != right()->definition()) &&
3347 (left_cid != kSmiCid) && (right_cid != kSmiCid);
3348 const intptr_t kNumTemps = need_temp ? 1 : 0;
3349 LocationSummary* summary = new (zone)
3350 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3351 summary->set_in(0, Location::RequiresRegister());
3352 summary->set_in(1, Location::RequiresRegister());
3353 if (need_temp) summary->set_temp(0, Location::RequiresRegister());
3354 return summary;
3355}
3356
3357void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3358 compiler::Label* deopt =
3359 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryDoubleOp);
3360 intptr_t left_cid = left()->Type()->ToCid();
3361 intptr_t right_cid = right()->Type()->ToCid();
3362 Register left = locs()->in(0).reg();
3363 Register right = locs()->in(1).reg();
3364 if (this->left()->definition() == this->right()->definition()) {
3365 __ testl(left, compiler::Immediate(kSmiTagMask));
3366 } else if (left_cid == kSmiCid) {
3367 __ testl(right, compiler::Immediate(kSmiTagMask));
3368 } else if (right_cid == kSmiCid) {
3369 __ testl(left, compiler::Immediate(kSmiTagMask));
3370 } else {
3371 Register temp = locs()->temp(0).reg();
3372 __ movl(temp, left);
3373 __ orl(temp, right);
3374 __ testl(temp, compiler::Immediate(kSmiTagMask));
3375 }
3376 __ j(ZERO, deopt);
3377}
3378
3379LocationSummary* BoxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
3380 const intptr_t kNumInputs = 1;
3381 const intptr_t kNumTemps = 1;
3382 LocationSummary* summary = new (zone) LocationSummary(
3383 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
3385 summary->set_temp(0, Location::RequiresRegister());
3386 summary->set_out(0, Location::RequiresRegister());
3387 return summary;
3388}
3389
3390void BoxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3391 Register out_reg = locs()->out(0).reg();
3392 XmmRegister value = locs()->in(0).fpu_reg();
3393
3395 compiler->BoxClassFor(from_representation()),
3396 out_reg, locs()->temp(0).reg());
3397
3398 switch (from_representation()) {
3399 case kUnboxedDouble:
3400 __ movsd(compiler::FieldAddress(out_reg, ValueOffset()), value);
3401 break;
3402 case kUnboxedFloat:
3403 __ cvtss2sd(FpuTMP, value);
3404 __ movsd(compiler::FieldAddress(out_reg, ValueOffset()), FpuTMP);
3405 break;
3406 case kUnboxedFloat32x4:
3407 case kUnboxedFloat64x2:
3408 case kUnboxedInt32x4:
3409 __ movups(compiler::FieldAddress(out_reg, ValueOffset()), value);
3410 break;
3411 default:
3412 UNREACHABLE();
3413 break;
3414 }
3415}
3416
3417LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
3418 ASSERT(BoxCid() != kSmiCid);
3419 const bool needs_temp =
3420 CanDeoptimize() ||
3421 (CanConvertSmi() && (value()->Type()->ToCid() == kSmiCid));
3422
3423 const intptr_t kNumInputs = 1;
3424 const intptr_t kNumTemps = needs_temp ? 1 : 0;
3425 LocationSummary* summary = new (zone)
3426 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3427 summary->set_in(0, Location::RequiresRegister());
3428 if (needs_temp) {
3429 summary->set_temp(0, Location::RequiresRegister());
3430 }
3431 if (representation() == kUnboxedInt64) {
3432 summary->set_out(0, Location::Pair(Location::RegisterLocation(EAX),
3434 } else if (representation() == kUnboxedInt32) {
3435 summary->set_out(0, Location::SameAsFirstInput());
3436 } else {
3437 summary->set_out(0, Location::RequiresFpuRegister());
3438 }
3439 return summary;
3440}
3441
3442void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler* compiler) {
3443 const Register box = locs()->in(0).reg();
3444
3445 switch (representation()) {
3446 case kUnboxedInt64: {
3447 PairLocation* result = locs()->out(0).AsPairLocation();
3448 ASSERT(result->At(0).reg() != box);
3449 __ movl(result->At(0).reg(), compiler::FieldAddress(box, ValueOffset()));
3450 __ movl(result->At(1).reg(),
3451 compiler::FieldAddress(box, ValueOffset() + kWordSize));
3452 break;
3453 }
3454
3455 case kUnboxedDouble: {
3456 const FpuRegister result = locs()->out(0).fpu_reg();
3457 __ movsd(result, compiler::FieldAddress(box, ValueOffset()));
3458 break;
3459 }
3460
3461 case kUnboxedFloat: {
3462 const FpuRegister result = locs()->out(0).fpu_reg();
3463 __ movsd(result, compiler::FieldAddress(box, ValueOffset()));
3464 __ cvtsd2ss(result, result);
3465 break;
3466 }
3467
3468 case kUnboxedFloat32x4:
3469 case kUnboxedFloat64x2:
3470 case kUnboxedInt32x4: {
3471 const FpuRegister result = locs()->out(0).fpu_reg();
3472 __ movups(result, compiler::FieldAddress(box, ValueOffset()));
3473 break;
3474 }
3475
3476 default:
3477 UNREACHABLE();
3478 break;
3479 }
3480}
3481
3482void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) {
3483 const Register box = locs()->in(0).reg();
3484
3485 switch (representation()) {
3486 case kUnboxedInt64: {
3487 PairLocation* result = locs()->out(0).AsPairLocation();
3488 ASSERT(result->At(0).reg() == EAX);
3489 ASSERT(result->At(1).reg() == EDX);
3490 __ movl(EAX, box);
3491 __ SmiUntag(EAX);
3492 __ cdq();
3493 break;
3494 }
3495
3496 case kUnboxedDouble: {
3497 const Register temp = locs()->temp(0).reg();
3498 const FpuRegister result = locs()->out(0).fpu_reg();
3499 __ movl(temp, box);
3500 __ SmiUntag(temp);
3501 __ cvtsi2sd(result, temp);
3502 break;
3503 }
3504
3505 default:
3506 UNREACHABLE();
3507 break;
3508 }
3509}
3510
3511void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) {
3512 const Register value = locs()->in(0).reg();
3513 const Register result = locs()->out(0).reg();
3514 __ LoadInt32FromBoxOrSmi(result, value);
3515}
3516
3517void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler) {
3518 const Register box = locs()->in(0).reg();
3519 PairLocation* result = locs()->out(0).AsPairLocation();
3520 ASSERT(result->At(0).reg() != box);
3521 ASSERT(result->At(1).reg() != box);
3522 compiler::Label done;
3523 EmitSmiConversion(compiler); // Leaves CF after SmiUntag.
3525 EmitLoadFromBox(compiler);
3526 __ Bind(&done);
3527}
3528
3529LocationSummary* BoxInteger32Instr::MakeLocationSummary(Zone* zone,
3530 bool opt) const {
3531 const intptr_t kNumInputs = 1;
3532 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
3533 if (ValueFitsSmi()) {
3534 LocationSummary* summary = new (zone)
3535 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3536 // Same regs, can overwrite input.
3537 summary->set_in(0, Location::RequiresRegister());
3538 summary->set_out(0, Location::SameAsFirstInput());
3539 return summary;
3540 } else {
3541 LocationSummary* summary = new (zone) LocationSummary(
3542 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
3543 // Guaranteed different regs. In the signed case we are going to use the
3544 // input for sign extension of any Mint.
3545 const bool needs_writable_input = (from_representation() == kUnboxedInt32);
3546 summary->set_in(0, needs_writable_input ? Location::WritableRegister()
3548 summary->set_temp(0, Location::RequiresRegister());
3549 summary->set_out(0, Location::RequiresRegister());
3550 return summary;
3551 }
3552}
3553
3554void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3555 const Register value = locs()->in(0).reg();
3556 const Register out = locs()->out(0).reg();
3557
3558 if (ValueFitsSmi()) {
3559 ASSERT(value == out);
3560 ASSERT(kSmiTag == 0);
3561 __ shll(out, compiler::Immediate(kSmiTagSize));
3562 return;
3563 }
3564
3565 __ movl(out, value);
3566 __ shll(out, compiler::Immediate(kSmiTagSize));
3567 compiler::Label done;
3568 if (from_representation() == kUnboxedInt32) {
3569 __ j(NO_OVERFLOW, &done);
3570 } else {
3571 ASSERT(value != out); // Value was not overwritten.
3572 __ testl(value, compiler::Immediate(0xC0000000));
3573 __ j(ZERO, &done);
3574 }
3575
3576 // Allocate a Mint.
3577 if (from_representation() == kUnboxedInt32) {
3578 // Value input is a writable register and should be manually preserved
3579 // across allocation slow-path. Add it to live_registers set which
3580 // determines which registers to preserve.
3581 locs()->live_registers()->Add(locs()->in(0), kUnboxedInt32);
3582 }
3583 ASSERT(value != out); // We need the value after the allocation.
3585 locs()->temp(0).reg());
3586 __ movl(compiler::FieldAddress(out, Mint::value_offset()), value);
3587 if (from_representation() == kUnboxedInt32) {
3588 // In the signed may-overflow case we asked for the input (value) to be
3589 // writable so we can use it as a temp to put the sign extension bits in.
3590 __ sarl(value, compiler::Immediate(31)); // Sign extend the Mint.
3591 __ movl(compiler::FieldAddress(out, Mint::value_offset() + kWordSize),
3592 value);
3593 } else {
3594 __ movl(compiler::FieldAddress(out, Mint::value_offset() + kWordSize),
3595 compiler::Immediate(0)); // Zero extend the Mint.
3596 }
3597 __ Bind(&done);
3598}
3599
3600LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
3601 bool opt) const {
3602 const intptr_t kNumInputs = 1;
3603 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
3604 LocationSummary* summary = new (zone)
3605 LocationSummary(zone, kNumInputs, kNumTemps,
3608 summary->set_in(0, Location::Pair(Location::RequiresRegister(),
3610 if (!ValueFitsSmi()) {
3611 summary->set_temp(0, Location::RequiresRegister());
3612 }
3613 summary->set_out(0, Location::RequiresRegister());
3614 return summary;
3615}
3616
3617void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3618 if (ValueFitsSmi()) {
3619 PairLocation* value_pair = locs()->in(0).AsPairLocation();
3620 Register value_lo = value_pair->At(0).reg();
3621 Register out_reg = locs()->out(0).reg();
3622 __ movl(out_reg, value_lo);
3623 __ SmiTag(out_reg);
3624 return;
3625 }
3626
3627 PairLocation* value_pair = locs()->in(0).AsPairLocation();
3628 Register value_lo = value_pair->At(0).reg();
3629 Register value_hi = value_pair->At(1).reg();
3630 Register out_reg = locs()->out(0).reg();
3631
3632 // Copy value_hi into out_reg as a temporary.
3633 // We modify value_lo but restore it before using it.
3634 __ movl(out_reg, value_hi);
3635
3636 // Unboxed operations produce smis or mint-sized values.
3637 // Check if value fits into a smi.
3638 compiler::Label not_smi, done;
3639
3640 // 1. Compute (x + -kMinSmi) which has to be in the range
3641 // 0 .. -kMinSmi+kMaxSmi for x to fit into a smi.
3642 __ addl(value_lo, compiler::Immediate(0x40000000));
3643 __ adcl(out_reg, compiler::Immediate(0));
3644 // 2. Unsigned compare to -kMinSmi+kMaxSmi.
3645 __ cmpl(value_lo, compiler::Immediate(0x80000000));
3646 __ sbbl(out_reg, compiler::Immediate(0));
3647 __ j(ABOVE_EQUAL, &not_smi);
3648 // 3. Restore lower half if result is a smi.
3649 __ subl(value_lo, compiler::Immediate(0x40000000));
3650 __ movl(out_reg, value_lo);
3651 __ SmiTag(out_reg);
3652 __ jmp(&done);
3653 __ Bind(&not_smi);
3654 // 3. Restore lower half of input before using it.
3655 __ subl(value_lo, compiler::Immediate(0x40000000));
3656
3658 out_reg, locs()->temp(0).reg());
3659 __ movl(compiler::FieldAddress(out_reg, Mint::value_offset()), value_lo);
3660 __ movl(compiler::FieldAddress(out_reg, Mint::value_offset() + kWordSize),
3661 value_hi);
3662 __ Bind(&done);
3663}
3664
3665LocationSummary* UnboxInteger32Instr::MakeLocationSummary(Zone* zone,
3666 bool opt) const {
3667 const intptr_t value_cid = value()->Type()->ToCid();
3668 const intptr_t kNumInputs = 1;
3669 intptr_t kNumTemps = 0;
3670
3671 if (CanDeoptimize()) {
3672 if ((value_cid != kSmiCid) && (value_cid != kMintCid) && !is_truncating()) {
3673 kNumTemps = 2;
3674 } else {
3675 kNumTemps = 1;
3676 }
3677 }
3678
3679 LocationSummary* summary = new (zone)
3680 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3681 summary->set_in(0, Location::RequiresRegister());
3682 for (int i = 0; i < kNumTemps; i++) {
3683 summary->set_temp(i, Location::RequiresRegister());
3684 }
3685 summary->set_out(0, ((value_cid == kSmiCid) || (value_cid != kMintCid))
3687 : Location::RequiresRegister());
3688 return summary;
3689}
3690
3691static void LoadInt32FromMint(FlowGraphCompiler* compiler,
3693 const compiler::Address& lo,
3694 const compiler::Address& hi,
3695 Register temp,
3696 compiler::Label* deopt) {
3697 __ movl(result, lo);
3698 if (deopt != nullptr) {
3699 ASSERT(temp != result);
3700 __ movl(temp, result);
3701 __ sarl(temp, compiler::Immediate(31));
3702 __ cmpl(temp, hi);
3703 __ j(NOT_EQUAL, deopt);
3704 }
3705}
3706
3707void UnboxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
3708 const intptr_t value_cid = value()->Type()->ToCid();
3709 Register value = locs()->in(0).reg();
3710 const Register result = locs()->out(0).reg();
3711 const Register temp = CanDeoptimize() ? locs()->temp(0).reg() : kNoRegister;
3712 compiler::Label* deopt = nullptr;
3713 if (CanDeoptimize()) {
3714 deopt = compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnboxInteger);
3715 }
3716 compiler::Label* out_of_range = !is_truncating() ? deopt : nullptr;
3717
3718 const intptr_t lo_offset = Mint::value_offset();
3719 const intptr_t hi_offset = Mint::value_offset() + kWordSize;
3720
3721 if (value_cid == kSmiCid) {
3722 ASSERT(value == result);
3723 __ SmiUntag(value);
3724 } else if (value_cid == kMintCid) {
3725 ASSERT((value != result) || (out_of_range == nullptr));
3726 LoadInt32FromMint(
3727 compiler, result, compiler::FieldAddress(value, lo_offset),
3728 compiler::FieldAddress(value, hi_offset), temp, out_of_range);
3729 } else if (!CanDeoptimize()) {
3730 ASSERT(value == result);
3731 compiler::Label done;
3732 __ SmiUntag(value);
3733 __ j(NOT_CARRY, &done);
3734 __ movl(value, compiler::Address(value, TIMES_2, lo_offset));
3735 __ Bind(&done);
3736 } else {
3737 ASSERT(value == result);
3738 compiler::Label done;
3739 __ SmiUntagOrCheckClass(value, kMintCid, temp, &done);
3740 __ j(NOT_EQUAL, deopt);
3741 if (out_of_range != nullptr) {
3742 Register value_temp = locs()->temp(1).reg();
3743 __ movl(value_temp, value);
3744 value = value_temp;
3745 }
3746 LoadInt32FromMint(
3747 compiler, result, compiler::Address(value, TIMES_2, lo_offset),
3748 compiler::Address(value, TIMES_2, hi_offset), temp, out_of_range);
3749 __ Bind(&done);
3750 }
3751}
3752
3753LocationSummary* LoadCodeUnitsInstr::MakeLocationSummary(Zone* zone,
3754 bool opt) const {
3755 const bool might_box = (representation() == kTagged) && !can_pack_into_smi();
3756 const intptr_t kNumInputs = 2;
3757 const intptr_t kNumTemps = might_box ? 2 : 0;
3758 LocationSummary* summary = new (zone) LocationSummary(
3759 zone, kNumInputs, kNumTemps,
3761 summary->set_in(0, Location::RequiresRegister());
3762 // The smi index is either untagged (element size == 1), or it is left smi
3763 // tagged (for all element sizes > 1).
3764 summary->set_in(1, (index_scale() == 1) ? Location::WritableRegister()
3766 if (might_box) {
3767 summary->set_temp(0, Location::RequiresRegister());
3768 summary->set_temp(1, Location::RequiresRegister());
3769 }
3770
3771 if (representation() == kUnboxedInt64) {
3772 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
3774 } else {
3775 ASSERT(representation() == kTagged);
3776 summary->set_out(0, Location::RequiresRegister());
3777 }
3778
3779 return summary;
3780}
3781
3782void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3783 // The string register points to the backing store for external strings.
3784 const Register str = locs()->in(0).reg();
3785 const Location index = locs()->in(1);
3786
3787 bool index_unboxed = false;
3788 if ((index_scale() == 1)) {
3789 __ SmiUntag(index.reg());
3790 index_unboxed = true;
3791 }
3792 compiler::Address element_address =
3794 IsExternal(), class_id(), index_scale(), index_unboxed, str,
3795 index.reg());
3796
3797 if (representation() == kUnboxedInt64) {
3798 ASSERT(compiler->is_optimizing());
3799 ASSERT(locs()->out(0).IsPairLocation());
3800 PairLocation* result_pair = locs()->out(0).AsPairLocation();
3801 Register result1 = result_pair->At(0).reg();
3802 Register result2 = result_pair->At(1).reg();
3803
3804 switch (class_id()) {
3805 case kOneByteStringCid:
3806 ASSERT(element_count() == 4);
3807 __ movl(result1, element_address);
3808 __ xorl(result2, result2);
3809 break;
3810 case kTwoByteStringCid:
3811 ASSERT(element_count() == 2);
3812 __ movl(result1, element_address);
3813 __ xorl(result2, result2);
3814 break;
3815 default:
3816 UNREACHABLE();
3817 }
3818 } else {
3819 ASSERT(representation() == kTagged);
3820 Register result = locs()->out(0).reg();
3821 switch (class_id()) {
3822 case kOneByteStringCid:
3823 switch (element_count()) {
3824 case 1:
3825 __ movzxb(result, element_address);
3826 break;
3827 case 2:
3828 __ movzxw(result, element_address);
3829 break;
3830 case 4:
3831 __ movl(result, element_address);
3832 break;
3833 default:
3834 UNREACHABLE();
3835 }
3836 break;
3837 case kTwoByteStringCid:
3838 switch (element_count()) {
3839 case 1:
3840 __ movzxw(result, element_address);
3841 break;
3842 case 2:
3843 __ movl(result, element_address);
3844 break;
3845 default:
3846 UNREACHABLE();
3847 }
3848 break;
3849 default:
3850 UNREACHABLE();
3851 break;
3852 }
3853 if (can_pack_into_smi()) {
3854 __ SmiTag(result);
3855 } else {
3856 // If the value cannot fit in a smi then allocate a mint box for it.
3857 Register temp = locs()->temp(0).reg();
3858 Register temp2 = locs()->temp(1).reg();
3859 // Temp register needs to be manually preserved on allocation slow-path.
3860 // Add it to live_registers set which determines which registers to
3861 // preserve.
3862 locs()->live_registers()->Add(locs()->temp(0), kUnboxedInt32);
3863
3864 ASSERT(temp != result);
3865 __ MoveRegister(temp, result);
3866 __ SmiTag(result);
3867
3868 compiler::Label done;
3869 __ testl(temp, compiler::Immediate(0xC0000000));
3870 __ j(ZERO, &done);
3872 result, temp2);
3873 __ movl(compiler::FieldAddress(result, Mint::value_offset()), temp);
3874 __ movl(compiler::FieldAddress(result, Mint::value_offset() + kWordSize),
3875 compiler::Immediate(0));
3876 __ Bind(&done);
3877 }
3878 }
3879}
3880
3881LocationSummary* BinaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
3882 bool opt) const {
3883 const intptr_t kNumInputs = 2;
3884 const intptr_t kNumTemps = 0;
3885 LocationSummary* summary = new (zone)
3886 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3888 summary->set_in(1, Location::RequiresFpuRegister());
3889 summary->set_out(0, Location::SameAsFirstInput());
3890 return summary;
3891}
3892
3893void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
3894 XmmRegister left = locs()->in(0).fpu_reg();
3895 XmmRegister right = locs()->in(1).fpu_reg();
3896
3897 ASSERT(locs()->out(0).fpu_reg() == left);
3898
3899 switch (op_kind()) {
3900 case Token::kADD:
3901 __ addsd(left, right);
3902 break;
3903 case Token::kSUB:
3904 __ subsd(left, right);
3905 break;
3906 case Token::kMUL:
3907 __ mulsd(left, right);
3908 break;
3909 case Token::kDIV:
3910 __ divsd(left, right);
3911 break;
3912 default:
3913 UNREACHABLE();
3914 }
3915}
3916
3917LocationSummary* DoubleTestOpInstr::MakeLocationSummary(Zone* zone,
3918 bool opt) const {
3919 const intptr_t kNumInputs = 1;
3920 const intptr_t kNumTemps =
3921 op_kind() == MethodRecognizer::kDouble_getIsNegative
3922 ? 2
3923 : (op_kind() == MethodRecognizer::kDouble_getIsInfinite ? 1 : 0);
3924 LocationSummary* summary = new (zone)
3925 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
3926 summary->set_in(0, Location::RequiresFpuRegister());
3927 if (kNumTemps > 0) {
3928 summary->set_temp(0, Location::RequiresRegister());
3929 if (op_kind() == MethodRecognizer::kDouble_getIsNegative) {
3930 summary->set_temp(1, Location::RequiresFpuRegister());
3931 }
3932 }
3933 summary->set_out(0, Location::RequiresRegister());
3934 return summary;
3935}
3936
3937Condition DoubleTestOpInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
3938 BranchLabels labels) {
3939 ASSERT(compiler->is_optimizing());
3940 const XmmRegister value = locs()->in(0).fpu_reg();
3941 const bool is_negated = kind() != Token::kEQ;
3942
3943 switch (op_kind()) {
3944 case MethodRecognizer::kDouble_getIsNaN: {
3945 __ comisd(value, value);
3946 return is_negated ? PARITY_ODD : PARITY_EVEN;
3947 }
3948 case MethodRecognizer::kDouble_getIsInfinite: {
3949 const Register temp = locs()->temp(0).reg();
3950 compiler::Label check_upper;
3951 __ AddImmediate(ESP, compiler::Immediate(-kDoubleSize));
3952 __ movsd(compiler::Address(ESP, 0), value);
3953 __ movl(temp, compiler::Address(ESP, 0));
3954 // If the low word isn't zero, then it isn't infinity.
3955 __ cmpl(temp, compiler::Immediate(0));
3956 __ j(EQUAL, &check_upper, compiler::Assembler::kNearJump);
3957 __ AddImmediate(ESP, compiler::Immediate(kDoubleSize));
3958 __ jmp(is_negated ? labels.true_label : labels.false_label);
3959 __ Bind(&check_upper);
3960 // Check the high word.
3961 __ movl(temp, compiler::Address(ESP, kWordSize));
3962 __ AddImmediate(ESP, compiler::Immediate(kDoubleSize));
3963 // Mask off sign bit.
3964 __ andl(temp, compiler::Immediate(0x7FFFFFFF));
3965 // Compare with +infinity.
3966 __ cmpl(temp, compiler::Immediate(0x7FF00000));
3967 return is_negated ? NOT_EQUAL : EQUAL;
3968 }
3969 case MethodRecognizer::kDouble_getIsNegative: {
3970 const Register temp = locs()->temp(0).reg();
3971 const FpuRegister temp_fpu = locs()->temp(1).fpu_reg();
3972 compiler::Label not_zero;
3973 __ xorpd(temp_fpu, temp_fpu);
3974 __ comisd(value, temp_fpu);
3975 // If it's NaN, it's not negative.
3976 __ j(PARITY_EVEN, is_negated ? labels.true_label : labels.false_label);
3977 // Looking at the sign bit also takes care of signed zero.
3978 __ movmskpd(temp, value);
3979 __ testl(temp, compiler::Immediate(1));
3980 return is_negated ? EQUAL : NOT_EQUAL;
3981 }
3982 default:
3983 UNREACHABLE();
3984 }
3985}
3986
3987// SIMD
3988
3989#define DEFINE_EMIT(Name, Args) \
3990 static void Emit##Name(FlowGraphCompiler* compiler, SimdOpInstr* instr, \
3991 PP_APPLY(PP_UNPACK, Args))
3992
3993#define SIMD_OP_FLOAT_ARITH(V, Name, op) \
3994 V(Float32x4##Name, op##ps) \
3995 V(Float64x2##Name, op##pd)
3996
3997#define SIMD_OP_SIMPLE_BINARY(V) \
3998 SIMD_OP_FLOAT_ARITH(V, Add, add) \
3999 SIMD_OP_FLOAT_ARITH(V, Sub, sub) \
4000 SIMD_OP_FLOAT_ARITH(V, Mul, mul) \
4001 SIMD_OP_FLOAT_ARITH(V, Div, div) \
4002 SIMD_OP_FLOAT_ARITH(V, Min, min) \
4003 SIMD_OP_FLOAT_ARITH(V, Max, max) \
4004 V(Int32x4Add, addpl) \
4005 V(Int32x4Sub, subpl) \
4006 V(Int32x4BitAnd, andps) \
4007 V(Int32x4BitOr, orps) \
4008 V(Int32x4BitXor, xorps) \
4009 V(Float32x4Equal, cmppseq) \
4010 V(Float32x4NotEqual, cmppsneq) \
4011 V(Float32x4LessThan, cmppslt) \
4012 V(Float32x4LessThanOrEqual, cmppsle)
4013
4014DEFINE_EMIT(SimdBinaryOp,
4015 (SameAsFirstInput, XmmRegister left, XmmRegister right)) {
4016 switch (instr->kind()) {
4017#define EMIT(Name, op) \
4018 case SimdOpInstr::k##Name: \
4019 __ op(left, right); \
4020 break;
4021 SIMD_OP_SIMPLE_BINARY(EMIT)
4022#undef EMIT
4023 case SimdOpInstr::kFloat32x4Scale:
4024 __ cvtsd2ss(left, left);
4025 __ shufps(left, left, compiler::Immediate(0x00));
4026 __ mulps(left, right);
4027 break;
4028 case SimdOpInstr::kFloat32x4ShuffleMix:
4029 case SimdOpInstr::kInt32x4ShuffleMix:
4030 __ shufps(left, right, compiler::Immediate(instr->mask()));
4031 break;
4032 case SimdOpInstr::kFloat64x2FromDoubles:
4033 // shufpd mask 0x0 results in:
4034 // Lower 64-bits of left = Lower 64-bits of left.
4035 // Upper 64-bits of left = Lower 64-bits of right.
4036 __ shufpd(left, right, compiler::Immediate(0x0));
4037 break;
4038 case SimdOpInstr::kFloat64x2Scale:
4039 __ shufpd(right, right, compiler::Immediate(0x00));
4040 __ mulpd(left, right);
4041 break;
4042 case SimdOpInstr::kFloat64x2WithX:
4043 case SimdOpInstr::kFloat64x2WithY: {
4044 // TODO(dartbug.com/30949) avoid transfer through memory
4045 COMPILE_ASSERT(SimdOpInstr::kFloat64x2WithY ==
4046 (SimdOpInstr::kFloat64x2WithX + 1));
4047 const intptr_t lane_index = instr->kind() - SimdOpInstr::kFloat64x2WithX;
4048 ASSERT(0 <= lane_index && lane_index < 2);
4049 __ SubImmediate(ESP, compiler::Immediate(kSimd128Size));
4050 __ movups(compiler::Address(ESP, 0), left);
4051 __ movsd(compiler::Address(ESP, lane_index * kDoubleSize), right);
4052 __ movups(left, compiler::Address(ESP, 0));
4053 __ AddImmediate(ESP, compiler::Immediate(kSimd128Size));
4054 break;
4055 }
4056 case SimdOpInstr::kFloat32x4WithX:
4057 case SimdOpInstr::kFloat32x4WithY:
4058 case SimdOpInstr::kFloat32x4WithZ:
4059 case SimdOpInstr::kFloat32x4WithW: {
4060 // TODO(dartbug.com/30949) avoid transfer through memory. SSE4.1 has
4061 // insertps. SSE2 these instructions can be implemented via a combination
4062 // of shufps/movss/movlhps.
4064 SimdOpInstr::kFloat32x4WithY == (SimdOpInstr::kFloat32x4WithX + 1) &&
4065 SimdOpInstr::kFloat32x4WithZ == (SimdOpInstr::kFloat32x4WithX + 2) &&
4066 SimdOpInstr::kFloat32x4WithW == (SimdOpInstr::kFloat32x4WithX + 3));
4067 const intptr_t lane_index = instr->kind() - SimdOpInstr::kFloat32x4WithX;
4068 ASSERT(0 <= lane_index && lane_index < 4);
4069 __ cvtsd2ss(left, left);
4070 __ SubImmediate(ESP, compiler::Immediate(kSimd128Size));
4071 __ movups(compiler::Address(ESP, 0), right);
4072 __ movss(compiler::Address(ESP, lane_index * kFloatSize), left);
4073 __ movups(left, compiler::Address(ESP, 0));
4074 __ AddImmediate(ESP, compiler::Immediate(kSimd128Size));
4075 break;
4076 }
4077 default:
4078 UNREACHABLE();
4079 }
4080}
4081
4082#define SIMD_OP_SIMPLE_UNARY(V) \
4083 SIMD_OP_FLOAT_ARITH(V, Sqrt, sqrt) \
4084 SIMD_OP_FLOAT_ARITH(V, Negate, negate) \
4085 SIMD_OP_FLOAT_ARITH(V, Abs, abs) \
4086 V(Float32x4Reciprocal, reciprocalps) \
4087 V(Float32x4ReciprocalSqrt, rsqrtps)
4088
4089DEFINE_EMIT(SimdUnaryOp, (SameAsFirstInput, XmmRegister value)) {
4090 // TODO(dartbug.com/30949) select better register constraints to avoid
4091 // redundant move of input into a different register because all instructions
4092 // below support two operand forms.
4093 switch (instr->kind()) {
4094#define EMIT(Name, op) \
4095 case SimdOpInstr::k##Name: \
4096 __ op(value); \
4097 break;
4098 SIMD_OP_SIMPLE_UNARY(EMIT)
4099#undef EMIT
4100 case SimdOpInstr::kFloat32x4GetX:
4101 // Shuffle not necessary.
4102 __ cvtss2sd(value, value);
4103 break;
4104 case SimdOpInstr::kFloat32x4GetY:
4105 __ shufps(value, value, compiler::Immediate(0x55));
4106 __ cvtss2sd(value, value);
4107 break;
4108 case SimdOpInstr::kFloat32x4GetZ:
4109 __ shufps(value, value, compiler::Immediate(0xAA));
4110 __ cvtss2sd(value, value);
4111 break;
4112 case SimdOpInstr::kFloat32x4GetW:
4113 __ shufps(value, value, compiler::Immediate(0xFF));
4114 __ cvtss2sd(value, value);
4115 break;
4116 case SimdOpInstr::kFloat32x4Shuffle:
4117 case SimdOpInstr::kInt32x4Shuffle:
4118 __ shufps(value, value, compiler::Immediate(instr->mask()));
4119 break;
4120 case SimdOpInstr::kFloat32x4Splat:
4121 // Convert to Float32.
4122 __ cvtsd2ss(value, value);
4123 // Splat across all lanes.
4124 __ shufps(value, value, compiler::Immediate(0x00));
4125 break;
4126 case SimdOpInstr::kFloat64x2ToFloat32x4:
4127 __ cvtpd2ps(value, value);
4128 break;
4129 case SimdOpInstr::kFloat32x4ToFloat64x2:
4130 __ cvtps2pd(value, value);
4131 break;
4132 case SimdOpInstr::kFloat32x4ToInt32x4:
4133 case SimdOpInstr::kInt32x4ToFloat32x4:
4134 // TODO(dartbug.com/30949) these operations are essentially nop and should
4135 // not generate any code. They should be removed from the graph before
4136 // code generation.
4137 break;
4138 case SimdOpInstr::kFloat64x2GetX:
4139 // NOP.
4140 break;
4141 case SimdOpInstr::kFloat64x2GetY:
4142 __ shufpd(value, value, compiler::Immediate(0x33));
4143 break;
4144 case SimdOpInstr::kFloat64x2Splat:
4145 __ shufpd(value, value, compiler::Immediate(0x0));
4146 break;
4147 default:
4148 UNREACHABLE();
4149 }
4150}
4151
4152DEFINE_EMIT(SimdGetSignMask, (Register out, XmmRegister value)) {
4153 switch (instr->kind()) {
4154 case SimdOpInstr::kFloat32x4GetSignMask:
4155 case SimdOpInstr::kInt32x4GetSignMask:
4156 __ movmskps(out, value);
4157 break;
4158 case SimdOpInstr::kFloat64x2GetSignMask:
4159 __ movmskpd(out, value);
4160 break;
4161 default:
4162 UNREACHABLE();
4163 break;
4164 }
4165}
4166
4167DEFINE_EMIT(
4168 Float32x4FromDoubles,
4169 (SameAsFirstInput, XmmRegister v0, XmmRegister, XmmRegister, XmmRegister)) {
4170 // TODO(dartbug.com/30949) avoid transfer through memory. SSE4.1 has
4171 // insertps, with SSE2 this instruction can be implemented through unpcklps.
4172 const XmmRegister out = v0;
4173 __ SubImmediate(ESP, compiler::Immediate(kSimd128Size));
4174 for (intptr_t i = 0; i < 4; i++) {
4175 __ cvtsd2ss(out, instr->locs()->in(i).fpu_reg());
4176 __ movss(compiler::Address(ESP, i * kFloatSize), out);
4177 }
4178 __ movups(out, compiler::Address(ESP, 0));
4179 __ AddImmediate(ESP, compiler::Immediate(kSimd128Size));
4180}
4181
4182DEFINE_EMIT(Float32x4Zero, (XmmRegister out)) {
4183 __ xorps(out, out);
4184}
4185
4186DEFINE_EMIT(Float64x2Zero, (XmmRegister value)) {
4187 __ xorpd(value, value);
4188}
4189
4190DEFINE_EMIT(Float32x4Clamp,
4191 (SameAsFirstInput,
4192 XmmRegister left,
4194 XmmRegister upper)) {
4195 __ minps(left, upper);
4196 __ maxps(left, lower);
4197}
4198
4199DEFINE_EMIT(Float64x2Clamp,
4200 (SameAsFirstInput,
4201 XmmRegister left,
4203 XmmRegister upper)) {
4204 __ minpd(left, upper);
4205 __ maxpd(left, lower);
4206}
4207
4208DEFINE_EMIT(Int32x4FromInts,
4210 // TODO(dartbug.com/30949) avoid transfer through memory.
4211 __ SubImmediate(ESP, compiler::Immediate(kSimd128Size));
4212 for (intptr_t i = 0; i < 4; i++) {
4213 __ movl(compiler::Address(ESP, i * kInt32Size), instr->locs()->in(i).reg());
4214 }
4215 __ movups(result, compiler::Address(ESP, 0));
4216 __ AddImmediate(ESP, compiler::Immediate(kSimd128Size));
4217}
4218
4219DEFINE_EMIT(Int32x4FromBools,
4221 // TODO(dartbug.com/30949) avoid transfer through memory and branches.
4222 __ SubImmediate(ESP, compiler::Immediate(kSimd128Size));
4223 for (intptr_t i = 0; i < 4; i++) {
4224 compiler::Label store_false, done;
4225 __ CompareObject(instr->locs()->in(i).reg(), Bool::True());
4226 __ j(NOT_EQUAL, &store_false);
4227 __ movl(compiler::Address(ESP, kInt32Size * i),
4228 compiler::Immediate(0xFFFFFFFF));
4229 __ jmp(&done);
4230 __ Bind(&store_false);
4231 __ movl(compiler::Address(ESP, kInt32Size * i), compiler::Immediate(0x0));
4232 __ Bind(&done);
4233 }
4234 __ movups(result, compiler::Address(ESP, 0));
4235 __ AddImmediate(ESP, compiler::Immediate(kSimd128Size));
4236}
4237
4238// TODO(dartbug.com/30953) need register with a byte component for setcc.
4239DEFINE_EMIT(Int32x4GetFlag, (Fixed<Register, EDX> result, XmmRegister value)) {
4241 SimdOpInstr::kInt32x4GetFlagY == (SimdOpInstr::kInt32x4GetFlagX + 1) &&
4242 SimdOpInstr::kInt32x4GetFlagZ == (SimdOpInstr::kInt32x4GetFlagX + 2) &&
4243 SimdOpInstr::kInt32x4GetFlagW == (SimdOpInstr::kInt32x4GetFlagX + 3));
4244 const intptr_t lane_index = instr->kind() - SimdOpInstr::kInt32x4GetFlagX;
4245 ASSERT(0 <= lane_index && lane_index < 4);
4246
4247 // TODO(dartbug.com/30949) avoid transfer through memory.
4248 __ SubImmediate(ESP, compiler::Immediate(kSimd128Size));
4249 __ movups(compiler::Address(ESP, 0), value);
4250 __ movl(EDX, compiler::Address(ESP, lane_index * kInt32Size));
4251 __ AddImmediate(ESP, compiler::Immediate(kSimd128Size));
4252
4253 // EDX = EDX != 0 ? 0 : 1
4254 __ testl(EDX, EDX);
4255 __ setcc(ZERO, DL);
4256 __ movzxb(EDX, DL);
4257
4259 __ movl(EDX,
4260 compiler::Address(THR, EDX, TIMES_4, Thread::bool_true_offset()));
4261}
4262
4263// TODO(dartbug.com/30953) need register with a byte component for setcc.
4264DEFINE_EMIT(Int32x4WithFlag,
4265 (SameAsFirstInput,
4266 XmmRegister mask,
4267 Register flag,
4268 Temp<Fixed<Register, EDX> > temp)) {
4270 SimdOpInstr::kInt32x4WithFlagY == (SimdOpInstr::kInt32x4WithFlagX + 1) &&
4271 SimdOpInstr::kInt32x4WithFlagZ == (SimdOpInstr::kInt32x4WithFlagX + 2) &&
4272 SimdOpInstr::kInt32x4WithFlagW == (SimdOpInstr::kInt32x4WithFlagX + 3));
4273 const intptr_t lane_index = instr->kind() - SimdOpInstr::kInt32x4WithFlagX;
4274 ASSERT(0 <= lane_index && lane_index < 4);
4275
4276 // TODO(dartbug.com/30949) avoid transfer through memory.
4277 __ SubImmediate(ESP, compiler::Immediate(kSimd128Size));
4278 __ movups(compiler::Address(ESP, 0), mask);
4279
4280 // EDX = flag == true ? -1 : 0
4281 __ xorl(EDX, EDX);
4282 __ CompareObject(flag, Bool::True());
4283 __ setcc(EQUAL, DL);
4284 __ negl(EDX);
4285
4286 __ movl(compiler::Address(ESP, lane_index * kInt32Size), EDX);
4287
4288 // Copy mask back to register.
4289 __ movups(mask, compiler::Address(ESP, 0));
4290 __ AddImmediate(ESP, compiler::Immediate(kSimd128Size));
4291}
4292
4293DEFINE_EMIT(Int32x4Select,
4294 (SameAsFirstInput,
4295 XmmRegister mask,
4296 XmmRegister trueValue,
4297 XmmRegister falseValue,
4298 Temp<XmmRegister> temp)) {
4299 // Copy mask.
4300 __ movaps(temp, mask);
4301 // Invert it.
4302 __ notps(temp);
4303 // mask = mask & trueValue.
4304 __ andps(mask, trueValue);
4305 // temp = temp & falseValue.
4306 __ andps(temp, falseValue);
4307 // out = mask | temp.
4308 __ orps(mask, temp);
4309}
4310
4311// Map SimdOpInstr::Kind-s to corresponding emit functions. Uses the following
4312// format:
4313//
4314// CASE(OpA) CASE(OpB) ____(Emitter) - Emitter is used to emit OpA and OpB.
4315// SIMPLE(OpA) - Emitter with name OpA is used to emit OpA.
4316//
4317#define SIMD_OP_VARIANTS(CASE, ____, SIMPLE) \
4318 SIMD_OP_SIMPLE_BINARY(CASE) \
4319 CASE(Float32x4Scale) \
4320 CASE(Float32x4ShuffleMix) \
4321 CASE(Int32x4ShuffleMix) \
4322 CASE(Float64x2FromDoubles) \
4323 CASE(Float64x2Scale) \
4324 CASE(Float64x2WithX) \
4325 CASE(Float64x2WithY) \
4326 CASE(Float32x4WithX) \
4327 CASE(Float32x4WithY) \
4328 CASE(Float32x4WithZ) \
4329 CASE(Float32x4WithW) \
4330 ____(SimdBinaryOp) \
4331 SIMD_OP_SIMPLE_UNARY(CASE) \
4332 CASE(Float32x4GetX) \
4333 CASE(Float32x4GetY) \
4334 CASE(Float32x4GetZ) \
4335 CASE(Float32x4GetW) \
4336 CASE(Float32x4Shuffle) \
4337 CASE(Int32x4Shuffle) \
4338 CASE(Float32x4Splat) \
4339 CASE(Float32x4ToFloat64x2) \
4340 CASE(Float64x2ToFloat32x4) \
4341 CASE(Int32x4ToFloat32x4) \
4342 CASE(Float32x4ToInt32x4) \
4343 CASE(Float64x2GetX) \
4344 CASE(Float64x2GetY) \
4345 CASE(Float64x2Splat) \
4346 ____(SimdUnaryOp) \
4347 CASE(Float32x4GetSignMask) \
4348 CASE(Int32x4GetSignMask) \
4349 CASE(Float64x2GetSignMask) \
4350 ____(SimdGetSignMask) \
4351 SIMPLE(Float32x4FromDoubles) \
4352 SIMPLE(Int32x4FromInts) \
4353 SIMPLE(Int32x4FromBools) \
4354 SIMPLE(Float32x4Zero) \
4355 SIMPLE(Float64x2Zero) \
4356 SIMPLE(Float32x4Clamp) \
4357 SIMPLE(Float64x2Clamp) \
4358 CASE(Int32x4GetFlagX) \
4359 CASE(Int32x4GetFlagY) \
4360 CASE(Int32x4GetFlagZ) \
4361 CASE(Int32x4GetFlagW) \
4362 ____(Int32x4GetFlag) \
4363 CASE(Int32x4WithFlagX) \
4364 CASE(Int32x4WithFlagY) \
4365 CASE(Int32x4WithFlagZ) \
4366 CASE(Int32x4WithFlagW) \
4367 ____(Int32x4WithFlag) \
4368 SIMPLE(Int32x4Select)
4369
4370LocationSummary* SimdOpInstr::MakeLocationSummary(Zone* zone, bool opt) const {
4371 switch (kind()) {
4372#define CASE(Name, ...) case k##Name:
4373#define EMIT(Name) \
4374 return MakeLocationSummaryFromEmitter(zone, this, &Emit##Name);
4375#define SIMPLE(Name) CASE(Name) EMIT(Name)
4376 SIMD_OP_VARIANTS(CASE, EMIT, SIMPLE)
4377#undef CASE
4378#undef EMIT
4379#undef SIMPLE
4380 case SimdOpInstr::kFloat32x4GreaterThan:
4381 case SimdOpInstr::kFloat32x4GreaterThanOrEqual:
4382 case kIllegalSimdOp:
4383 UNREACHABLE();
4384 break;
4385 }
4386 UNREACHABLE();
4387 return nullptr;
4388}
4389
4390void SimdOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4391 switch (kind()) {
4392#define CASE(Name, ...) case k##Name:
4393#define EMIT(Name) \
4394 InvokeEmitter(compiler, this, &Emit##Name); \
4395 break;
4396#define SIMPLE(Name) CASE(Name) EMIT(Name)
4397 SIMD_OP_VARIANTS(CASE, EMIT, SIMPLE)
4398#undef CASE
4399#undef EMIT
4400#undef SIMPLE
4401 case SimdOpInstr::kFloat32x4GreaterThan:
4402 case SimdOpInstr::kFloat32x4GreaterThanOrEqual:
4403 case kIllegalSimdOp:
4404 UNREACHABLE();
4405 break;
4406 }
4407}
4408
4409#undef DEFINE_EMIT
4410
4412 Zone* zone,
4413 bool opt) const {
4414 const intptr_t kNumTemps = 0;
4415 LocationSummary* summary = new (zone)
4416 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
4418 summary->set_in(1, Location::RegisterLocation(ECX));
4419 summary->set_in(2, Location::RegisterLocation(EDX));
4420 summary->set_in(3, Location::RegisterLocation(EBX));
4421 summary->set_out(0, Location::RegisterLocation(EAX));
4422 return summary;
4423}
4424
4426 compiler::LeafRuntimeScope rt(compiler->assembler(),
4427 /*frame_size=*/4 * compiler::target::kWordSize,
4428 /*preserve_registers=*/false);
4429 __ movl(compiler::Address(ESP, +0 * kWordSize), locs()->in(0).reg());
4430 __ movl(compiler::Address(ESP, +1 * kWordSize), locs()->in(1).reg());
4431 __ movl(compiler::Address(ESP, +2 * kWordSize), locs()->in(2).reg());
4432 __ movl(compiler::Address(ESP, +3 * kWordSize), locs()->in(3).reg());
4433 rt.Call(TargetFunction(), 4);
4434}
4435
4436LocationSummary* MathMinMaxInstr::MakeLocationSummary(Zone* zone,
4437 bool opt) const {
4438 if (result_cid() == kDoubleCid) {
4439 const intptr_t kNumInputs = 2;
4440 const intptr_t kNumTemps = 1;
4441 LocationSummary* summary = new (zone)
4442 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4444 summary->set_in(1, Location::RequiresFpuRegister());
4445 // Reuse the left register so that code can be made shorter.
4446 summary->set_out(0, Location::SameAsFirstInput());
4447 summary->set_temp(0, Location::RequiresRegister());
4448 return summary;
4449 }
4450
4451 ASSERT(result_cid() == kSmiCid);
4452 const intptr_t kNumInputs = 2;
4453 const intptr_t kNumTemps = 0;
4454 LocationSummary* summary = new (zone)
4455 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4456 summary->set_in(0, Location::RequiresRegister());
4457 summary->set_in(1, Location::RequiresRegister());
4458 // Reuse the left register so that code can be made shorter.
4459 summary->set_out(0, Location::SameAsFirstInput());
4460 return summary;
4461}
4462
4463void MathMinMaxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4464 ASSERT((op_kind() == MethodRecognizer::kMathMin) ||
4465 (op_kind() == MethodRecognizer::kMathMax));
4466 const bool is_min = (op_kind() == MethodRecognizer::kMathMin);
4467 if (result_cid() == kDoubleCid) {
4468 compiler::Label done, returns_nan, are_equal;
4469 XmmRegister left = locs()->in(0).fpu_reg();
4470 XmmRegister right = locs()->in(1).fpu_reg();
4471 XmmRegister result = locs()->out(0).fpu_reg();
4472 Register temp = locs()->temp(0).reg();
4473 __ comisd(left, right);
4476 const Condition double_condition =
4477 is_min ? TokenKindToDoubleCondition(Token::kLT)
4478 : TokenKindToDoubleCondition(Token::kGT);
4479 ASSERT(left == result);
4480 __ j(double_condition, &done, compiler::Assembler::kNearJump);
4481 __ movsd(result, right);
4483
4484 __ Bind(&returns_nan);
4485 static double kNaN = NAN;
4486 __ movsd(result,
4487 compiler::Address::Absolute(reinterpret_cast<uword>(&kNaN)));
4489
4490 __ Bind(&are_equal);
4491 compiler::Label left_is_negative;
4492 // Check for negative zero: -0.0 is equal 0.0 but min or max must return
4493 // -0.0 or 0.0 respectively.
4494 // Check for negative left value (get the sign bit):
4495 // - min -> left is negative ? left : right.
4496 // - max -> left is negative ? right : left
4497 // Check the sign bit.
4498 __ movmskpd(temp, left);
4499 __ testl(temp, compiler::Immediate(1));
4500 ASSERT(left == result);
4501 if (is_min) {
4502 __ j(NOT_ZERO, &done,
4503 compiler::Assembler::kNearJump); // Negative -> return left.
4504 } else {
4505 __ j(ZERO, &done,
4506 compiler::Assembler::kNearJump); // Positive -> return left.
4507 }
4508 __ movsd(result, right);
4509 __ Bind(&done);
4510 return;
4511 }
4512
4513 ASSERT(result_cid() == kSmiCid);
4514 Register left = locs()->in(0).reg();
4515 Register right = locs()->in(1).reg();
4516 Register result = locs()->out(0).reg();
4517 __ cmpl(left, right);
4518 ASSERT(result == left);
4519 if (is_min) {
4520 __ cmovgel(result, right);
4521 } else {
4522 __ cmovlessl(result, right);
4523 }
4524}
4525
4526LocationSummary* UnarySmiOpInstr::MakeLocationSummary(Zone* zone,
4527 bool opt) const {
4528 const intptr_t kNumInputs = 1;
4529 return LocationSummary::Make(zone, kNumInputs, Location::SameAsFirstInput(),
4531}
4532
4533void UnarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4534 Register value = locs()->in(0).reg();
4535 ASSERT(value == locs()->out(0).reg());
4536 switch (op_kind()) {
4537 case Token::kNEGATE: {
4538 compiler::Label* deopt =
4539 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnaryOp);
4540 __ negl(value);
4541 __ j(OVERFLOW, deopt);
4542 break;
4543 }
4544 case Token::kBIT_NOT:
4545 __ notl(value);
4546 __ andl(value,
4547 compiler::Immediate(~kSmiTagMask)); // Remove inverted smi-tag.
4548 break;
4549 default:
4550 UNREACHABLE();
4551 }
4552}
4553
4554LocationSummary* UnaryDoubleOpInstr::MakeLocationSummary(Zone* zone,
4555 bool opt) const {
4556 const intptr_t kNumInputs = 1;
4557 const intptr_t kNumTemps = 0;
4558 LocationSummary* summary = new (zone)
4559 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4561 summary->set_out(0, Location::SameAsFirstInput());
4562 return summary;
4563}
4564
4565void UnaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4566 ASSERT(representation() == kUnboxedDouble);
4567 XmmRegister value = locs()->in(0).fpu_reg();
4568 ASSERT(locs()->out(0).fpu_reg() == value);
4569 switch (op_kind()) {
4570 case Token::kNEGATE:
4571 __ DoubleNegate(value);
4572 break;
4573 case Token::kSQRT:
4574 __ sqrtsd(value, value);
4575 break;
4576 case Token::kSQUARE:
4577 __ mulsd(value, value);
4578 break;
4579 case Token::kTRUNCATE:
4581 break;
4582 case Token::kFLOOR:
4584 break;
4585 case Token::kCEILING:
4587 break;
4588 default:
4589 UNREACHABLE();
4590 }
4591}
4592
4593LocationSummary* Int32ToDoubleInstr::MakeLocationSummary(Zone* zone,
4594 bool opt) const {
4595 const intptr_t kNumInputs = 1;
4596 const intptr_t kNumTemps = 0;
4597 LocationSummary* result = new (zone)
4598 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4599 result->set_in(0, Location::RequiresRegister());
4601 return result;
4602}
4603
4604void Int32ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4605 Register value = locs()->in(0).reg();
4606 FpuRegister result = locs()->out(0).fpu_reg();
4607 __ cvtsi2sd(result, value);
4608}
4609
4610LocationSummary* SmiToDoubleInstr::MakeLocationSummary(Zone* zone,
4611 bool opt) const {
4612 const intptr_t kNumInputs = 1;
4613 const intptr_t kNumTemps = 0;
4614 LocationSummary* result = new (zone)
4615 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4616 result->set_in(0, Location::WritableRegister());
4618 return result;
4619}
4620
4621void SmiToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4622 Register value = locs()->in(0).reg();
4623 FpuRegister result = locs()->out(0).fpu_reg();
4624 __ SmiUntag(value);
4625 __ cvtsi2sd(result, value);
4626}
4627
4628LocationSummary* Int64ToDoubleInstr::MakeLocationSummary(Zone* zone,
4629 bool opt) const {
4630 const intptr_t kNumInputs = 1;
4631 const intptr_t kNumTemps = 0;
4632 LocationSummary* result = new (zone)
4633 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4637 return result;
4638}
4639
4640void Int64ToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4641 PairLocation* pair = locs()->in(0).AsPairLocation();
4642 Register in_lo = pair->At(0).reg();
4643 Register in_hi = pair->At(1).reg();
4644
4645 FpuRegister result = locs()->out(0).fpu_reg();
4646
4647 // Push hi.
4648 __ pushl(in_hi);
4649 // Push lo.
4650 __ pushl(in_lo);
4651 // Perform conversion from Mint to double.
4652 __ fildl(compiler::Address(ESP, 0));
4653 // Pop FPU stack onto regular stack.
4654 __ fstpl(compiler::Address(ESP, 0));
4655 // Copy into result.
4656 __ movsd(result, compiler::Address(ESP, 0));
4657 // Pop args.
4658 __ addl(ESP, compiler::Immediate(2 * kWordSize));
4659}
4660
4661LocationSummary* DoubleToIntegerInstr::MakeLocationSummary(Zone* zone,
4662 bool opt) const {
4663 const intptr_t kNumInputs = 1;
4664 const intptr_t kNumTemps = 0;
4665 LocationSummary* result = new (zone) LocationSummary(
4666 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
4668 result->set_out(0, Location::RequiresRegister());
4669 return result;
4670}
4671
4672void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4673 ASSERT(recognized_kind() == MethodRecognizer::kDoubleToInteger);
4674 const Register result = locs()->out(0).reg();
4675 const XmmRegister value_double = locs()->in(0).fpu_reg();
4676
4677 DoubleToIntegerSlowPath* slow_path =
4678 new DoubleToIntegerSlowPath(this, value_double);
4679 compiler->AddSlowPathCode(slow_path);
4680
4681 __ cvttsd2si(result, value_double);
4682 // Overflow is signalled with minint.
4683 // Check for overflow and that it fits into Smi.
4684 __ cmpl(result, compiler::Immediate(0xC0000000));
4685 __ j(NEGATIVE, slow_path->entry_label());
4686 __ SmiTag(result);
4687 __ Bind(slow_path->exit_label());
4688}
4689
4690LocationSummary* DoubleToSmiInstr::MakeLocationSummary(Zone* zone,
4691 bool opt) const {
4692 const intptr_t kNumInputs = 1;
4693 const intptr_t kNumTemps = 0;
4694 LocationSummary* result = new (zone)
4695 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4697 result->set_out(0, Location::RequiresRegister());
4698 return result;
4699}
4700
4701void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4702 compiler::Label* deopt =
4703 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptDoubleToSmi);
4704 Register result = locs()->out(0).reg();
4705 XmmRegister value = locs()->in(0).fpu_reg();
4706 __ cvttsd2si(result, value);
4707 // Check for overflow and that it fits into Smi.
4708 __ cmpl(result, compiler::Immediate(0xC0000000));
4709 __ j(NEGATIVE, deopt);
4710 __ SmiTag(result);
4711}
4712
4713LocationSummary* DoubleToFloatInstr::MakeLocationSummary(Zone* zone,
4714 bool opt) const {
4715 const intptr_t kNumInputs = 1;
4716 const intptr_t kNumTemps = 0;
4717 LocationSummary* result = new (zone)
4718 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4720 result->set_out(0, Location::SameAsFirstInput());
4721 return result;
4722}
4723
4724void DoubleToFloatInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4725 __ cvtsd2ss(locs()->out(0).fpu_reg(), locs()->in(0).fpu_reg());
4726}
4727
4728LocationSummary* FloatToDoubleInstr::MakeLocationSummary(Zone* zone,
4729 bool opt) const {
4730 const intptr_t kNumInputs = 1;
4731 const intptr_t kNumTemps = 0;
4732 LocationSummary* result = new (zone)
4733 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
4735 result->set_out(0, Location::SameAsFirstInput());
4736 return result;
4737}
4738
4739void FloatToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4740 __ cvtss2sd(locs()->out(0).fpu_reg(), locs()->in(0).fpu_reg());
4741}
4742
4743LocationSummary* FloatCompareInstr::MakeLocationSummary(Zone* zone,
4744 bool opt) const {
4745 UNREACHABLE();
4746 return NULL;
4747}
4748
4749void FloatCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4750 UNREACHABLE();
4751}
4752
4753LocationSummary* InvokeMathCFunctionInstr::MakeLocationSummary(Zone* zone,
4754 bool opt) const {
4755 ASSERT((InputCount() == 1) || (InputCount() == 2));
4756 const intptr_t kNumTemps =
4757 (recognized_kind() == MethodRecognizer::kMathDoublePow) ? 4 : 1;
4758 LocationSummary* result = new (zone)
4759 LocationSummary(zone, InputCount(), kNumTemps, LocationSummary::kCall);
4760 // EDI is chosen because it is callee saved so we do not need to back it
4761 // up before calling into the runtime.
4762 result->set_temp(0, Location::RegisterLocation(EDI));
4764 if (InputCount() == 2) {
4766 }
4767 if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
4768 // Temp index 1.
4769 result->set_temp(1, Location::RegisterLocation(EAX));
4770 // Temp index 2.
4772 // We need to block XMM0 for the floating-point calling convention.
4774 }
4776 return result;
4777}
4778
4779// Pseudo code:
4780// if (exponent == 0.0) return 1.0;
4781// // Speed up simple cases.
4782// if (exponent == 1.0) return base;
4783// if (exponent == 2.0) return base * base;
4784// if (exponent == 3.0) return base * base * base;
4785// if (base == 1.0) return 1.0;
4786// if (base.isNaN || exponent.isNaN) {
4787// return double.NAN;
4788// }
4789// if (base != -Infinity && exponent == 0.5) {
4790// if (base == 0.0) return 0.0;
4791// return sqrt(value);
4792// }
4793// TODO(srdjan): Move into a stub?
4794static void InvokeDoublePow(FlowGraphCompiler* compiler,
4795 InvokeMathCFunctionInstr* instr) {
4796 ASSERT(instr->recognized_kind() == MethodRecognizer::kMathDoublePow);
4797 const intptr_t kInputCount = 2;
4798 ASSERT(instr->InputCount() == kInputCount);
4799 LocationSummary* locs = instr->locs();
4800
4801 XmmRegister base = locs->in(0).fpu_reg();
4802 XmmRegister exp = locs->in(1).fpu_reg();
4803 XmmRegister result = locs->out(0).fpu_reg();
4805 XmmRegister zero_temp =
4806 locs->temp(InvokeMathCFunctionInstr::kDoubleTempIndex).fpu_reg();
4807
4808 __ xorps(zero_temp, zero_temp); // 0.0.
4809 __ LoadObject(temp, Double::ZoneHandle(Double::NewCanonical(1.0)));
4810 __ movsd(result, compiler::FieldAddress(temp, Double::value_offset()));
4811
4812 compiler::Label check_base, skip_call;
4813 // exponent == 0.0 -> return 1.0;
4814 __ comisd(exp, zero_temp);
4815 __ j(PARITY_EVEN, &check_base);
4816 __ j(EQUAL, &skip_call); // 'result' is 1.0.
4817
4818 // exponent == 1.0 ?
4819 __ comisd(exp, result);
4820 compiler::Label return_base;
4821 __ j(EQUAL, &return_base, compiler::Assembler::kNearJump);
4822
4823 // exponent == 2.0 ?
4824 __ LoadObject(temp, Double::ZoneHandle(Double::NewCanonical(2.0)));
4825 __ movsd(XMM0, compiler::FieldAddress(temp, Double::value_offset()));
4826 __ comisd(exp, XMM0);
4827 compiler::Label return_base_times_2;
4828 __ j(EQUAL, &return_base_times_2, compiler::Assembler::kNearJump);
4829
4830 // exponent == 3.0 ?
4831 __ LoadObject(temp, Double::ZoneHandle(Double::NewCanonical(3.0)));
4832 __ movsd(XMM0, compiler::FieldAddress(temp, Double::value_offset()));
4833 __ comisd(exp, XMM0);
4834 __ j(NOT_EQUAL, &check_base);
4835
4836 // Base times 3.
4837 __ movsd(result, base);
4838 __ mulsd(result, base);
4839 __ mulsd(result, base);
4840 __ jmp(&skip_call);
4841
4842 __ Bind(&return_base);
4843 __ movsd(result, base);
4844 __ jmp(&skip_call);
4845
4846 __ Bind(&return_base_times_2);
4847 __ movsd(result, base);
4848 __ mulsd(result, base);
4849 __ jmp(&skip_call);
4850
4851 __ Bind(&check_base);
4852 // Note: 'exp' could be NaN.
4853
4854 // base == 1.0 -> return 1.0;
4855 __ comisd(base, result);
4856 compiler::Label return_nan;
4858 __ j(EQUAL, &skip_call, compiler::Assembler::kNearJump);
4859 // Note: 'base' could be NaN.
4860 __ comisd(exp, base);
4861 // Neither 'exp' nor 'base' is NaN.
4862 compiler::Label try_sqrt;
4864 // Return NaN.
4865 __ Bind(&return_nan);
4866 __ LoadObject(temp, Double::ZoneHandle(Double::NewCanonical(NAN)));
4867 __ movsd(result, compiler::FieldAddress(temp, Double::value_offset()));
4868 __ jmp(&skip_call);
4869
4870 compiler::Label do_pow, return_zero;
4871 __ Bind(&try_sqrt);
4872 // Before calling pow, check if we could use sqrt instead of pow.
4874 __ movsd(result, compiler::FieldAddress(temp, Double::value_offset()));
4875 // base == -Infinity -> call pow;
4876 __ comisd(base, result);
4878
4879 // exponent == 0.5 ?
4880 __ LoadObject(temp, Double::ZoneHandle(Double::NewCanonical(0.5)));
4881 __ movsd(result, compiler::FieldAddress(temp, Double::value_offset()));
4882 __ comisd(exp, result);
4884
4885 // base == 0 -> return 0;
4886 __ comisd(base, zero_temp);
4887 __ j(EQUAL, &return_zero, compiler::Assembler::kNearJump);
4888
4889 __ sqrtsd(result, base);
4890 __ jmp(&skip_call, compiler::Assembler::kNearJump);
4891
4892 __ Bind(&return_zero);
4893 __ movsd(result, zero_temp);
4894 __ jmp(&skip_call);
4895
4896 __ Bind(&do_pow);
4897 {
4898 compiler::LeafRuntimeScope rt(compiler->assembler(),
4899 /*frame_size=*/kDoubleSize * kInputCount,
4900 /*preserve_registers=*/false);
4901 for (intptr_t i = 0; i < kInputCount; i++) {
4902 __ movsd(compiler::Address(ESP, kDoubleSize * i), locs->in(i).fpu_reg());
4903 }
4904 rt.Call(instr->TargetFunction(), kInputCount);
4905 __ fstpl(compiler::Address(ESP, 0));
4906 __ movsd(locs->out(0).fpu_reg(), compiler::Address(ESP, 0));
4907 }
4908 __ Bind(&skip_call);
4909}
4910
4911void InvokeMathCFunctionInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4912 if (recognized_kind() == MethodRecognizer::kMathDoublePow) {
4913 InvokeDoublePow(compiler, this);
4914 return;
4915 }
4916
4917 {
4918 compiler::LeafRuntimeScope rt(compiler->assembler(),
4919 /*frame_size=*/kDoubleSize * InputCount(),
4920 /*preserve_registers=*/false);
4921 for (intptr_t i = 0; i < InputCount(); i++) {
4922 __ movsd(compiler::Address(ESP, kDoubleSize * i),
4923 locs()->in(i).fpu_reg());
4924 }
4925 rt.Call(TargetFunction(), InputCount());
4926 __ fstpl(compiler::Address(ESP, 0));
4927 __ movsd(locs()->out(0).fpu_reg(), compiler::Address(ESP, 0));
4928 }
4929}
4930
4931LocationSummary* ExtractNthOutputInstr::MakeLocationSummary(Zone* zone,
4932 bool opt) const {
4933 // Only use this instruction in optimized code.
4934 ASSERT(opt);
4935 const intptr_t kNumInputs = 1;
4936 LocationSummary* summary =
4937 new (zone) LocationSummary(zone, kNumInputs, 0, LocationSummary::kNoCall);
4938 if (representation() == kUnboxedDouble) {
4939 if (index() == 0) {
4940 summary->set_in(
4942 } else {
4943 ASSERT(index() == 1);
4944 summary->set_in(
4946 }
4947 summary->set_out(0, Location::RequiresFpuRegister());
4948 } else {
4949 ASSERT(representation() == kTagged);
4950 if (index() == 0) {
4951 summary->set_in(
4953 } else {
4954 ASSERT(index() == 1);
4955 summary->set_in(
4957 }
4958 summary->set_out(0, Location::RequiresRegister());
4959 }
4960 return summary;
4961}
4962
4963void ExtractNthOutputInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4964 ASSERT(locs()->in(0).IsPairLocation());
4965 PairLocation* pair = locs()->in(0).AsPairLocation();
4966 Location in_loc = pair->At(index());
4967 if (representation() == kUnboxedDouble) {
4968 XmmRegister out = locs()->out(0).fpu_reg();
4969 XmmRegister in = in_loc.fpu_reg();
4970 __ movaps(out, in);
4971 } else {
4972 ASSERT(representation() == kTagged);
4973 Register out = locs()->out(0).reg();
4974 Register in = in_loc.reg();
4975 __ movl(out, in);
4976 }
4977}
4978
4979LocationSummary* UnboxLaneInstr::MakeLocationSummary(Zone* zone,
4980 bool opt) const {
4981 UNREACHABLE();
4982 return NULL;
4983}
4984
4985void UnboxLaneInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4986 UNREACHABLE();
4987}
4988
4989LocationSummary* BoxLanesInstr::MakeLocationSummary(Zone* zone,
4990 bool opt) const {
4991 UNREACHABLE();
4992 return NULL;
4993}
4994
4995void BoxLanesInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
4996 UNREACHABLE();
4997}
4998
4999LocationSummary* TruncDivModInstr::MakeLocationSummary(Zone* zone,
5000 bool opt) const {
5001 const intptr_t kNumInputs = 2;
5002 const intptr_t kNumTemps = 0;
5003 LocationSummary* summary = new (zone)
5004 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5005 // Both inputs must be writable because they will be untagged.
5007 summary->set_in(1, Location::WritableRegister());
5008 // Output is a pair of registers.
5009 summary->set_out(0, Location::Pair(Location::RegisterLocation(EAX),
5011 return summary;
5012}
5013
5014void TruncDivModInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5016 compiler::Label* deopt =
5017 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinarySmiOp);
5018 Register left = locs()->in(0).reg();
5019 Register right = locs()->in(1).reg();
5020 ASSERT(locs()->out(0).IsPairLocation());
5021 PairLocation* pair = locs()->out(0).AsPairLocation();
5022 Register result1 = pair->At(0).reg();
5023 Register result2 = pair->At(1).reg();
5024 if (RangeUtils::CanBeZero(divisor_range())) {
5025 // Handle divide by zero in runtime.
5026 __ testl(right, right);
5027 __ j(ZERO, deopt);
5028 }
5029 ASSERT(left == EAX);
5030 ASSERT((right != EDX) && (right != EAX));
5031 ASSERT(result1 == EAX);
5032 ASSERT(result2 == EDX);
5033 __ SmiUntag(left);
5034 __ SmiUntag(right);
5035 __ cdq(); // Sign extend EAX -> EDX:EAX.
5036 __ idivl(right); // EAX: quotient, EDX: remainder.
5037 // Check the corner case of dividing the 'MIN_SMI' with -1, in which
5038 // case we cannot tag the result.
5039 // TODO(srdjan): We could store instead untagged intermediate results in a
5040 // typed array, but then the load indexed instructions would need to be
5041 // able to deoptimize.
5042 __ cmpl(EAX, compiler::Immediate(0x40000000));
5043 __ j(EQUAL, deopt);
5044 // Modulo result (EDX) correction:
5045 // res = left % right;
5046 // if (res < 0) {
5047 // if (right < 0) {
5048 // res = res - right;
5049 // } else {
5050 // res = res + right;
5051 // }
5052 // }
5053 compiler::Label done;
5054 __ cmpl(EDX, compiler::Immediate(0));
5056 // Result is negative, adjust it.
5057 if (RangeUtils::Overlaps(divisor_range(), -1, 1)) {
5058 compiler::Label subtract;
5059 __ cmpl(right, compiler::Immediate(0));
5061 __ addl(EDX, right);
5063 __ Bind(&subtract);
5064 __ subl(EDX, right);
5065 } else if (divisor_range()->IsPositive()) {
5066 // Right is positive.
5067 __ addl(EDX, right);
5068 } else {
5069 // Right is negative.
5070 __ subl(EDX, right);
5071 }
5072 __ Bind(&done);
5073
5074 __ SmiTag(EAX);
5075 __ SmiTag(EDX);
5076}
5077
5078// Should be kept in sync with integers.cc Multiply64Hash
5079static void EmitHashIntegerCodeSequence(FlowGraphCompiler* compiler,
5080 const Register value_lo,
5081 const Register value_hi,
5082 const Register temp) {
5083 __ movl(EDX, compiler::Immediate(0x2d51));
5084 __ mull(EDX); // EAX = lo32(value_lo*0x2d51), EDX = carry(value_lo * 0x2d51)
5085 __ movl(temp, EAX); // save prod_lo32
5086 __ movl(EAX, value_hi); // get saved value_hi
5087 __ movl(value_hi, EDX); // save carry
5088 __ movl(EDX, compiler::Immediate(0x2d51));
5089 __ mull(EDX); // EAX = lo32(value_hi * 0x2d51, EDX = carry(value_hi * 0x2d51)
5090 __ addl(EAX, value_hi); // EAX has prod_hi32, EDX has prod_hi64_lo32
5091
5092 __ xorl(EAX, EDX); // EAX = prod_hi32 ^ prod_hi64_lo32
5093 __ xorl(EAX, temp); // result = prod_hi32 ^ prod_hi64_lo32 ^ prod_lo32
5094 __ andl(EAX, compiler::Immediate(0x3fffffff));
5095}
5096
5097LocationSummary* HashDoubleOpInstr::MakeLocationSummary(Zone* zone,
5098 bool opt) const {
5099 const intptr_t kNumInputs = 1;
5100 const intptr_t kNumTemps = 4;
5101 LocationSummary* summary = new (zone)
5102 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5104 summary->set_temp(0, Location::RequiresRegister());
5105 summary->set_temp(1, Location::RegisterLocation(EBX));
5106 summary->set_temp(2, Location::RegisterLocation(EDX));
5107 summary->set_temp(3, Location::RequiresFpuRegister());
5108 summary->set_out(0, Location::Pair(Location::RegisterLocation(EAX),
5110 return summary;
5111}
5112
5113void HashDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5114 const XmmRegister value = locs()->in(0).fpu_reg();
5115 const Register temp = locs()->temp(0).reg();
5116 ASSERT(locs()->temp(1).reg() == EBX);
5117 ASSERT(locs()->temp(2).reg() == EDX);
5118 const XmmRegister temp_double = locs()->temp(3).fpu_reg();
5119 PairLocation* result_pair = locs()->out(0).AsPairLocation();
5120 ASSERT(result_pair->At(0).reg() == EAX);
5121 ASSERT(result_pair->At(1).reg() == EDX);
5122
5123 // If either nan or infinity, do hash double
5124 compiler::Label hash_double, try_convert;
5125
5126 // extract high 32-bits out of double value.
5128 __ pextrd(temp, value, compiler::Immediate(1));
5129 } else {
5130 __ SubImmediate(ESP, compiler::Immediate(kDoubleSize));
5131 __ movsd(compiler::Address(ESP, 0), value);
5132 __ movl(temp, compiler::Address(ESP, kWordSize));
5133 __ AddImmediate(ESP, compiler::Immediate(kDoubleSize));
5134 }
5135 __ andl(temp, compiler::Immediate(0x7FF00000));
5136 __ cmpl(temp, compiler::Immediate(0x7FF00000));
5137 __ j(EQUAL, &hash_double); // is infinity or nan
5138
5139 compiler::Label slow_path;
5140 __ Bind(&try_convert);
5141 __ cvttsd2si(EAX, value);
5142 // Overflow is signaled with minint.
5143 __ cmpl(EAX, compiler::Immediate(0x80000000));
5144 __ j(EQUAL, &slow_path);
5145 __ cvtsi2sd(temp_double, EAX);
5146 __ comisd(value, temp_double);
5147 __ j(NOT_EQUAL, &hash_double);
5148 __ cdq(); // sign-extend EAX to EDX
5149 __ movl(temp, EDX);
5150
5151 compiler::Label hash_integer, done;
5152 // integer hash for (temp:EAX)
5153 __ Bind(&hash_integer);
5154 EmitHashIntegerCodeSequence(compiler, EAX, temp, EBX);
5155 __ jmp(&done);
5156
5157 __ Bind(&slow_path);
5158 // double value is potentially doesn't fit into Smi range, so
5159 // do the double->int64->double via runtime call.
5160 __ StoreUnboxedDouble(value, THR,
5162 {
5163 compiler::LeafRuntimeScope rt(
5164 compiler->assembler(),
5165 /*frame_size=*/1 * compiler::target::kWordSize,
5166 /*preserve_registers=*/true);
5167 __ movl(compiler::Address(ESP, 0 * compiler::target::kWordSize), THR);
5168 // Check if double can be represented as int64, load it into (temp:EAX) if
5169 // it can.
5170 rt.Call(kTryDoubleAsIntegerRuntimeEntry, 1);
5171 __ movl(EBX, EAX); // use non-volatile register to carry value out.
5172 }
5173 __ orl(EBX, EBX);
5174 __ j(ZERO, &hash_double);
5175 __ movl(EAX,
5176 compiler::Address(
5178 __ movl(temp,
5179 compiler::Address(
5181 kWordSize));
5182 __ jmp(&hash_integer);
5183
5184 __ Bind(&hash_double);
5186 __ pextrd(EAX, value, compiler::Immediate(0));
5187 __ pextrd(temp, value, compiler::Immediate(1));
5188 } else {
5189 __ SubImmediate(ESP, compiler::Immediate(kDoubleSize));
5190 __ movsd(compiler::Address(ESP, 0), value);
5191 __ movl(EAX, compiler::Address(ESP, 0));
5192 __ movl(temp, compiler::Address(ESP, kWordSize));
5193 __ AddImmediate(ESP, compiler::Immediate(kDoubleSize));
5194 }
5195 __ xorl(EAX, temp);
5196 __ andl(EAX, compiler::Immediate(compiler::target::kSmiMax));
5197
5198 __ Bind(&done);
5199 __ xorl(EDX, EDX);
5200}
5201
5202LocationSummary* HashIntegerOpInstr::MakeLocationSummary(Zone* zone,
5203 bool opt) const {
5204 const intptr_t kNumInputs = 1;
5205 const intptr_t kNumTemps = 3;
5206 LocationSummary* summary = new (zone)
5207 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5209 summary->set_out(0, Location::SameAsFirstInput());
5210 summary->set_temp(0, Location::RequiresRegister());
5211 summary->set_temp(1, Location::RequiresRegister());
5212 summary->set_temp(2, Location::RegisterLocation(EDX));
5213 return summary;
5214}
5215
5216void HashIntegerOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5217 Register value = locs()->in(0).reg();
5218 Register result = locs()->out(0).reg();
5219 Register temp = locs()->temp(0).reg();
5220 Register temp1 = locs()->temp(1).reg();
5221 ASSERT(value == EAX);
5222 ASSERT(result == EAX);
5223
5224 if (smi_) {
5225 __ SmiUntag(EAX);
5226 __ cdq(); // sign-extend EAX to EDX
5227 __ movl(temp, EDX);
5228 } else {
5229 __ LoadFieldFromOffset(temp, EAX,
5231 __ LoadFieldFromOffset(EAX, EAX, Mint::value_offset());
5232 }
5233
5234 // value = value_hi << 32 + value_lo
5235 //
5236 // value * 0x2d51 = (value_hi * 0x2d51) << 32 + value_lo * 0x2d51
5237 // prod_lo32 = value_lo * 0x2d51
5238 // prod_hi32 = carry(value_lo * 0x2d51) + value_hi * 0x2d51
5239 // prod_lo64 = prod_hi32 << 32 + prod_lo32
5240 // prod_hi64_lo32 = carry(value_hi * 0x2d51)
5241 // result = prod_lo32 ^ prod_hi32 ^ prod_hi64_lo32
5242 // return result & 0x3fffffff
5243
5244 // EAX has value_lo
5245 EmitHashIntegerCodeSequence(compiler, EAX, temp, temp1);
5246 __ SmiTag(EAX);
5247}
5248
5249LocationSummary* BranchInstr::MakeLocationSummary(Zone* zone, bool opt) const {
5251 // Branches don't produce a result.
5253 return comparison()->locs();
5254}
5255
5256void BranchInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5258}
5259
5260LocationSummary* CheckClassInstr::MakeLocationSummary(Zone* zone,
5261 bool opt) const {
5262 const intptr_t kNumInputs = 1;
5263 const bool need_mask_temp = IsBitTest();
5264 const intptr_t kNumTemps = !IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0;
5265 LocationSummary* summary = new (zone)
5266 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5267 summary->set_in(0, Location::RequiresRegister());
5268 if (!IsNullCheck()) {
5269 summary->set_temp(0, Location::RequiresRegister());
5270 if (need_mask_temp) {
5271 summary->set_temp(1, Location::RequiresRegister());
5272 }
5273 }
5274 return summary;
5275}
5276
5277void CheckClassInstr::EmitNullCheck(FlowGraphCompiler* compiler,
5278 compiler::Label* deopt) {
5279 const compiler::Immediate& raw_null =
5280 compiler::Immediate(static_cast<intptr_t>(Object::null()));
5281 __ cmpl(locs()->in(0).reg(), raw_null);
5284 __ j(cond, deopt);
5285}
5286
5287void CheckClassInstr::EmitBitTest(FlowGraphCompiler* compiler,
5288 intptr_t min,
5289 intptr_t max,
5290 intptr_t mask,
5291 compiler::Label* deopt) {
5292 Register biased_cid = locs()->temp(0).reg();
5293 __ subl(biased_cid, compiler::Immediate(min));
5294 __ cmpl(biased_cid, compiler::Immediate(max - min));
5295 __ j(ABOVE, deopt);
5296
5297 Register mask_reg = locs()->temp(1).reg();
5298 __ movl(mask_reg, compiler::Immediate(mask));
5299 __ bt(mask_reg, biased_cid);
5300 __ j(NOT_CARRY, deopt);
5301}
5302
5303int CheckClassInstr::EmitCheckCid(FlowGraphCompiler* compiler,
5304 int bias,
5305 intptr_t cid_start,
5306 intptr_t cid_end,
5307 bool is_last,
5308 compiler::Label* is_ok,
5309 compiler::Label* deopt,
5310 bool use_near_jump) {
5311 Register biased_cid = locs()->temp(0).reg();
5312 Condition no_match, match;
5313 if (cid_start == cid_end) {
5314 __ cmpl(biased_cid, compiler::Immediate(cid_start - bias));
5315 no_match = NOT_EQUAL;
5316 match = EQUAL;
5317 } else {
5318 // For class ID ranges use a subtract followed by an unsigned
5319 // comparison to check both ends of the ranges with one comparison.
5320 __ addl(biased_cid, compiler::Immediate(bias - cid_start));
5321 bias = cid_start;
5322 __ cmpl(biased_cid, compiler::Immediate(cid_end - cid_start));
5323 no_match = ABOVE;
5325 }
5326
5327 if (is_last) {
5328 __ j(no_match, deopt);
5329 } else {
5330 if (use_near_jump) {
5332 } else {
5333 __ j(match, is_ok);
5334 }
5335 }
5336 return bias;
5337}
5338
5339LocationSummary* CheckSmiInstr::MakeLocationSummary(Zone* zone,
5340 bool opt) const {
5341 const intptr_t kNumInputs = 1;
5342 const intptr_t kNumTemps = 0;
5343 LocationSummary* summary = new (zone)
5344 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5345 summary->set_in(0, Location::RequiresRegister());
5346 return summary;
5347}
5348
5349void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5350 Register value = locs()->in(0).reg();
5351 compiler::Label* deopt =
5352 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckSmi);
5353 __ BranchIfNotSmi(value, deopt);
5354}
5355
5356void CheckNullInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5357 ThrowErrorSlowPathCode* slow_path = new NullErrorSlowPath(this);
5358 compiler->AddSlowPathCode(slow_path);
5359
5360 Register value_reg = locs()->in(0).reg();
5361 // TODO(dartbug.com/30480): Consider passing `null` literal as an argument
5362 // in order to be able to allocate it on register.
5363 __ CompareObject(value_reg, Object::null_object());
5364 __ BranchIf(EQUAL, slow_path->entry_label());
5365}
5366
5367LocationSummary* CheckClassIdInstr::MakeLocationSummary(Zone* zone,
5368 bool opt) const {
5369 const intptr_t kNumInputs = 1;
5370 const intptr_t kNumTemps = 0;
5371 LocationSummary* summary = new (zone)
5372 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5373 summary->set_in(0, cids_.IsSingleCid() ? Location::RequiresRegister()
5374 : Location::WritableRegister());
5375 return summary;
5376}
5377
5378void CheckClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5379 Register value = locs()->in(0).reg();
5380 compiler::Label* deopt =
5381 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass);
5382 if (cids_.IsSingleCid()) {
5383 __ cmpl(value, compiler::Immediate(Smi::RawValue(cids_.cid_start)));
5384 __ j(NOT_ZERO, deopt);
5385 } else {
5386 __ AddImmediate(value,
5387 compiler::Immediate(-Smi::RawValue(cids_.cid_start)));
5388 __ cmpl(value, compiler::Immediate(Smi::RawValue(cids_.Extent())));
5389 __ j(ABOVE, deopt);
5390 }
5391}
5392
5393// Length: register or constant.
5394// Index: register, constant or stack slot.
5395LocationSummary* CheckArrayBoundInstr::MakeLocationSummary(Zone* zone,
5396 bool opt) const {
5397 const intptr_t kNumInputs = 2;
5398 const intptr_t kNumTemps = 0;
5399 LocationSummary* locs = new (zone)
5400 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5401 if (length()->definition()->IsConstant()) {
5403 } else {
5405 }
5407 return locs;
5408}
5409
5410void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5411 uint32_t flags = generalized_ ? ICData::kGeneralized : 0;
5412 compiler::Label* deopt =
5413 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckArrayBound, flags);
5414
5415 Location length_loc = locs()->in(kLengthPos);
5416 Location index_loc = locs()->in(kIndexPos);
5417
5418 if (length_loc.IsConstant() && index_loc.IsConstant()) {
5419 ASSERT((Smi::Cast(length_loc.constant()).Value() <=
5420 Smi::Cast(index_loc.constant()).Value()) ||
5421 (Smi::Cast(index_loc.constant()).Value() < 0));
5422 // Unconditionally deoptimize for constant bounds checks because they
5423 // only occur only when index is out-of-bounds.
5424 __ jmp(deopt);
5425 return;
5426 }
5427
5428 const intptr_t index_cid = index()->Type()->ToCid();
5429 if (length_loc.IsConstant()) {
5430 Register index = index_loc.reg();
5431 if (index_cid != kSmiCid) {
5432 __ BranchIfNotSmi(index, deopt);
5433 }
5434 const Smi& length = Smi::Cast(length_loc.constant());
5435 if (length.Value() == Smi::kMaxValue) {
5436 __ testl(index, index);
5437 __ j(NEGATIVE, deopt);
5438 } else {
5439 __ cmpl(index, compiler::Immediate(static_cast<int32_t>(length.ptr())));
5440 __ j(ABOVE_EQUAL, deopt);
5441 }
5442 } else if (index_loc.IsConstant()) {
5443 const Smi& index = Smi::Cast(index_loc.constant());
5444 if (length_loc.IsStackSlot()) {
5445 const compiler::Address& length = LocationToStackSlotAddress(length_loc);
5446 __ cmpl(length, compiler::Immediate(static_cast<int32_t>(index.ptr())));
5447 } else {
5448 Register length = length_loc.reg();
5449 __ cmpl(length, compiler::Immediate(static_cast<int32_t>(index.ptr())));
5450 }
5451 __ j(BELOW_EQUAL, deopt);
5452 } else if (length_loc.IsStackSlot()) {
5453 Register index = index_loc.reg();
5454 const compiler::Address& length = LocationToStackSlotAddress(length_loc);
5455 if (index_cid != kSmiCid) {
5456 __ BranchIfNotSmi(index, deopt);
5457 }
5458 __ cmpl(index, length);
5459 __ j(ABOVE_EQUAL, deopt);
5460 } else {
5461 Register index = index_loc.reg();
5462 Register length = length_loc.reg();
5463 if (index_cid != kSmiCid) {
5464 __ BranchIfNotSmi(index, deopt);
5465 }
5466 __ cmpl(length, index);
5467 __ j(BELOW_EQUAL, deopt);
5468 }
5469}
5470
5471LocationSummary* CheckWritableInstr::MakeLocationSummary(Zone* zone,
5472 bool opt) const {
5473 const intptr_t kNumInputs = 1;
5474 const intptr_t kNumTemps = 1;
5475 LocationSummary* locs = new (zone) LocationSummary(
5476 zone, kNumInputs, kNumTemps,
5481 return locs;
5482}
5483
5484void CheckWritableInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5485 WriteErrorSlowPath* slow_path = new WriteErrorSlowPath(this);
5486 const Register temp = locs()->temp(0).reg();
5487 compiler->AddSlowPathCode(slow_path);
5488 __ movl(temp,
5489 compiler::FieldAddress(locs()->in(0).reg(),
5491 __ testl(temp, compiler::Immediate(
5493 __ j(NOT_ZERO, slow_path->entry_label());
5494}
5495
5496LocationSummary* BinaryInt64OpInstr::MakeLocationSummary(Zone* zone,
5497 bool opt) const {
5498 const intptr_t kNumInputs = 2;
5499 switch (op_kind()) {
5500 case Token::kBIT_AND:
5501 case Token::kBIT_OR:
5502 case Token::kBIT_XOR:
5503 case Token::kADD:
5504 case Token::kSUB: {
5505 const intptr_t kNumTemps = 0;
5506 LocationSummary* summary = new (zone) LocationSummary(
5507 zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5510 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
5512 summary->set_out(0, Location::SameAsFirstInput());
5513 return summary;
5514 }
5515 case Token::kMUL: {
5516 const intptr_t kNumTemps = 1;
5517 LocationSummary* summary = new (zone) LocationSummary(
5518 zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5519 summary->set_in(0, Location::Pair(Location::RegisterLocation(EAX),
5521 summary->set_in(1, Location::Pair(Location::RequiresRegister(),
5523 summary->set_out(0, Location::SameAsFirstInput());
5524 summary->set_temp(0, Location::RequiresRegister());
5525 return summary;
5526 }
5527 default:
5528 UNREACHABLE();
5529 return nullptr;
5530 }
5531}
5532
5533void BinaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5534 PairLocation* left_pair = locs()->in(0).AsPairLocation();
5535 Register left_lo = left_pair->At(0).reg();
5536 Register left_hi = left_pair->At(1).reg();
5537 PairLocation* right_pair = locs()->in(1).AsPairLocation();
5538 Register right_lo = right_pair->At(0).reg();
5539 Register right_hi = right_pair->At(1).reg();
5540 PairLocation* out_pair = locs()->out(0).AsPairLocation();
5541 Register out_lo = out_pair->At(0).reg();
5542 Register out_hi = out_pair->At(1).reg();
5543 ASSERT(out_lo == left_lo);
5544 ASSERT(out_hi == left_hi);
5545 ASSERT(!can_overflow());
5547
5548 switch (op_kind()) {
5549 case Token::kBIT_AND:
5550 __ andl(left_lo, right_lo);
5551 __ andl(left_hi, right_hi);
5552 break;
5553 case Token::kBIT_OR:
5554 __ orl(left_lo, right_lo);
5555 __ orl(left_hi, right_hi);
5556 break;
5557 case Token::kBIT_XOR:
5558 __ xorl(left_lo, right_lo);
5559 __ xorl(left_hi, right_hi);
5560 break;
5561 case Token::kADD:
5562 case Token::kSUB: {
5563 if (op_kind() == Token::kADD) {
5564 __ addl(left_lo, right_lo);
5565 __ adcl(left_hi, right_hi);
5566 } else {
5567 __ subl(left_lo, right_lo);
5568 __ sbbl(left_hi, right_hi);
5569 }
5570 break;
5571 }
5572 case Token::kMUL: {
5573 // Compute 64-bit a * b as:
5574 // a_l * b_l + (a_h * b_l + a_l * b_h) << 32
5575 // Since we requested EDX:EAX for in and out,
5576 // we can use these as scratch registers once
5577 // input has been consumed.
5578 Register temp = locs()->temp(0).reg();
5579 __ movl(temp, left_lo);
5580 __ imull(left_hi, right_lo); // a_h * b_l
5581 __ imull(temp, right_hi); // a_l * b_h
5582 __ addl(temp, left_hi); // sum_high
5583 ASSERT(left_lo == EAX);
5584 __ mull(right_lo); // a_l * b_l in EDX:EAX
5585 __ addl(EDX, temp); // add sum_high
5586 ASSERT(out_lo == EAX);
5587 ASSERT(out_hi == EDX);
5588 break;
5589 }
5590 default:
5591 UNREACHABLE();
5592 }
5593}
5594
5595static void EmitShiftInt64ByConstant(FlowGraphCompiler* compiler,
5596 Token::Kind op_kind,
5597 Register left_lo,
5598 Register left_hi,
5599 const Object& right) {
5600 const int64_t shift = Integer::Cast(right).AsInt64Value();
5601 ASSERT(shift >= 0);
5602 switch (op_kind) {
5603 case Token::kSHR: {
5604 if (shift > 31) {
5605 __ movl(left_lo, left_hi); // Shift by 32.
5606 __ sarl(left_hi, compiler::Immediate(31)); // Sign extend left hi.
5607 if (shift > 32) {
5608 __ sarl(left_lo, compiler::Immediate(shift > 63 ? 31 : shift - 32));
5609 }
5610 } else {
5611 __ shrdl(left_lo, left_hi, compiler::Immediate(shift));
5612 __ sarl(left_hi, compiler::Immediate(shift));
5613 }
5614 break;
5615 }
5616 case Token::kUSHR: {
5617 ASSERT(shift < 64);
5618 if (shift > 31) {
5619 __ movl(left_lo, left_hi); // Shift by 32.
5620 __ xorl(left_hi, left_hi); // Zero extend left hi.
5621 if (shift > 32) {
5622 __ shrl(left_lo, compiler::Immediate(shift - 32));
5623 }
5624 } else {
5625 __ shrdl(left_lo, left_hi, compiler::Immediate(shift));
5626 __ shrl(left_hi, compiler::Immediate(shift));
5627 }
5628 break;
5629 }
5630 case Token::kSHL: {
5631 ASSERT(shift < 64);
5632 if (shift > 31) {
5633 __ movl(left_hi, left_lo); // Shift by 32.
5634 __ xorl(left_lo, left_lo); // Zero left_lo.
5635 if (shift > 32) {
5636 __ shll(left_hi, compiler::Immediate(shift - 32));
5637 }
5638 } else {
5639 __ shldl(left_hi, left_lo, compiler::Immediate(shift));
5640 __ shll(left_lo, compiler::Immediate(shift));
5641 }
5642 break;
5643 }
5644 default:
5645 UNREACHABLE();
5646 }
5647}
5648
5649static void EmitShiftInt64ByECX(FlowGraphCompiler* compiler,
5650 Token::Kind op_kind,
5651 Register left_lo,
5652 Register left_hi) {
5653 // sarl operation masks the count to 5 bits and
5654 // shrdl is undefined with count > operand size (32)
5655 compiler::Label done, large_shift;
5656 switch (op_kind) {
5657 case Token::kSHR: {
5658 __ cmpl(ECX, compiler::Immediate(31));
5659 __ j(ABOVE, &large_shift);
5660
5661 __ shrdl(left_lo, left_hi, ECX); // Shift count in CL.
5662 __ sarl(left_hi, ECX); // Shift count in CL.
5664
5665 __ Bind(&large_shift);
5666 // No need to subtract 32 from CL, only 5 bits used by sarl.
5667 __ movl(left_lo, left_hi); // Shift by 32.
5668 __ sarl(left_hi, compiler::Immediate(31)); // Sign extend left hi.
5669 __ sarl(left_lo, ECX); // Shift count: CL % 32.
5670 break;
5671 }
5672 case Token::kUSHR: {
5673 __ cmpl(ECX, compiler::Immediate(31));
5674 __ j(ABOVE, &large_shift);
5675
5676 __ shrdl(left_lo, left_hi, ECX); // Shift count in CL.
5677 __ shrl(left_hi, ECX); // Shift count in CL.
5679
5680 __ Bind(&large_shift);
5681 // No need to subtract 32 from CL, only 5 bits used by sarl.
5682 __ movl(left_lo, left_hi); // Shift by 32.
5683 __ xorl(left_hi, left_hi); // Zero extend left hi.
5684 __ shrl(left_lo, ECX); // Shift count: CL % 32.
5685 break;
5686 }
5687 case Token::kSHL: {
5688 __ cmpl(ECX, compiler::Immediate(31));
5689 __ j(ABOVE, &large_shift);
5690
5691 __ shldl(left_hi, left_lo, ECX); // Shift count in CL.
5692 __ shll(left_lo, ECX); // Shift count in CL.
5694
5695 __ Bind(&large_shift);
5696 // No need to subtract 32 from CL, only 5 bits used by shll.
5697 __ movl(left_hi, left_lo); // Shift by 32.
5698 __ xorl(left_lo, left_lo); // Zero left_lo.
5699 __ shll(left_hi, ECX); // Shift count: CL % 32.
5700 break;
5701 }
5702 default:
5703 UNREACHABLE();
5704 }
5705 __ Bind(&done);
5706}
5707
5708static void EmitShiftUint32ByConstant(FlowGraphCompiler* compiler,
5709 Token::Kind op_kind,
5710 Register left,
5711 const Object& right) {
5712 const int64_t shift = Integer::Cast(right).AsInt64Value();
5713 if (shift >= 32) {
5714 __ xorl(left, left);
5715 } else {
5716 switch (op_kind) {
5717 case Token::kSHR:
5718 case Token::kUSHR: {
5719 __ shrl(left, compiler::Immediate(shift));
5720 break;
5721 }
5722 case Token::kSHL: {
5723 __ shll(left, compiler::Immediate(shift));
5724 break;
5725 }
5726 default:
5727 UNREACHABLE();
5728 }
5729 }
5730}
5731
5732static void EmitShiftUint32ByECX(FlowGraphCompiler* compiler,
5733 Token::Kind op_kind,
5734 Register left) {
5735 switch (op_kind) {
5736 case Token::kSHR:
5737 case Token::kUSHR: {
5738 __ shrl(left, ECX);
5739 break;
5740 }
5741 case Token::kSHL: {
5742 __ shll(left, ECX);
5743 break;
5744 }
5745 default:
5746 UNREACHABLE();
5747 }
5748}
5749
5750class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
5751 public:
5752 explicit ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction)
5753 : ThrowErrorSlowPathCode(instruction,
5754 kArgumentErrorUnboxedInt64RuntimeEntry) {}
5755
5756 const char* name() override { return "int64 shift"; }
5757
5758 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
5759 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
5760 Register right_lo = right_pair->At(0).reg();
5761 Register right_hi = right_pair->At(1).reg();
5762 PairLocation* out_pair = instruction()->locs()->out(0).AsPairLocation();
5763 Register out_lo = out_pair->At(0).reg();
5764 Register out_hi = out_pair->At(1).reg();
5765#if defined(DEBUG)
5766 PairLocation* left_pair = instruction()->locs()->in(0).AsPairLocation();
5767 Register left_lo = left_pair->At(0).reg();
5768 Register left_hi = left_pair->At(1).reg();
5769 ASSERT(out_lo == left_lo);
5770 ASSERT(out_hi == left_hi);
5771#endif // defined(DEBUG)
5772
5773 compiler::Label throw_error;
5774 __ testl(right_hi, right_hi);
5775 __ j(NEGATIVE, &throw_error);
5776
5777 switch (instruction()->AsShiftInt64Op()->op_kind()) {
5778 case Token::kSHR:
5779 __ sarl(out_hi, compiler::Immediate(31));
5780 __ movl(out_lo, out_hi);
5781 break;
5782 case Token::kUSHR:
5783 case Token::kSHL: {
5784 __ xorl(out_lo, out_lo);
5785 __ xorl(out_hi, out_hi);
5786 break;
5787 }
5788 default:
5789 UNREACHABLE();
5790 }
5791 __ jmp(exit_label());
5792
5793 __ Bind(&throw_error);
5794
5795 // Can't pass unboxed int64 value directly to runtime call, as all
5796 // arguments are expected to be tagged (boxed).
5797 // The unboxed int64 argument is passed through a dedicated slot in Thread.
5798 // TODO(dartbug.com/33549): Clean this up when unboxed values
5799 // could be passed as arguments.
5800 __ movl(compiler::Address(
5802 right_lo);
5803 __ movl(compiler::Address(
5805 kWordSize),
5806 right_hi);
5807 }
5808};
5809
5810LocationSummary* ShiftInt64OpInstr::MakeLocationSummary(Zone* zone,
5811 bool opt) const {
5812 const intptr_t kNumInputs = 2;
5813 const intptr_t kNumTemps = 0;
5814 LocationSummary* summary = new (zone) LocationSummary(
5815 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
5819 right()->definition()->IsConstant()) {
5820 ConstantInstr* constant = right()->definition()->AsConstant();
5821 summary->set_in(1, Location::Constant(constant));
5822 } else {
5823 summary->set_in(1, Location::Pair(Location::RegisterLocation(ECX),
5825 }
5826 summary->set_out(0, Location::SameAsFirstInput());
5827 return summary;
5828}
5829
5830void ShiftInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5831 PairLocation* left_pair = locs()->in(0).AsPairLocation();
5832 Register left_lo = left_pair->At(0).reg();
5833 Register left_hi = left_pair->At(1).reg();
5834 PairLocation* out_pair = locs()->out(0).AsPairLocation();
5835 Register out_lo = out_pair->At(0).reg();
5836 Register out_hi = out_pair->At(1).reg();
5837 ASSERT(out_lo == left_lo);
5838 ASSERT(out_hi == left_hi);
5839 ASSERT(!can_overflow());
5840
5841 if (locs()->in(1).IsConstant()) {
5842 EmitShiftInt64ByConstant(compiler, op_kind(), left_lo, left_hi,
5843 locs()->in(1).constant());
5844 } else {
5845 // Code for a variable shift amount (or constant that throws).
5846 ASSERT(locs()->in(1).AsPairLocation()->At(0).reg() == ECX);
5847 Register right_hi = locs()->in(1).AsPairLocation()->At(1).reg();
5848
5849 // Jump to a slow path if shift count is > 63 or negative.
5850 ShiftInt64OpSlowPath* slow_path = nullptr;
5851 if (!IsShiftCountInRange()) {
5852 slow_path = new (Z) ShiftInt64OpSlowPath(this);
5853 compiler->AddSlowPathCode(slow_path);
5854 __ testl(right_hi, right_hi);
5855 __ j(NOT_ZERO, slow_path->entry_label());
5856 __ cmpl(ECX, compiler::Immediate(kShiftCountLimit));
5857 __ j(ABOVE, slow_path->entry_label());
5858 }
5859
5860 EmitShiftInt64ByECX(compiler, op_kind(), left_lo, left_hi);
5861
5862 if (slow_path != nullptr) {
5863 __ Bind(slow_path->exit_label());
5864 }
5865 }
5866}
5867
5869 Zone* zone,
5870 bool opt) const {
5871 const intptr_t kNumInputs = 2;
5872 const intptr_t kNumTemps = 0;
5873 LocationSummary* summary = new (zone)
5874 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
5877 summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX));
5878 summary->set_out(0, Location::SameAsFirstInput());
5879 return summary;
5880}
5881
5883 PairLocation* left_pair = locs()->in(0).AsPairLocation();
5884 Register left_lo = left_pair->At(0).reg();
5885 Register left_hi = left_pair->At(1).reg();
5886 PairLocation* out_pair = locs()->out(0).AsPairLocation();
5887 Register out_lo = out_pair->At(0).reg();
5888 Register out_hi = out_pair->At(1).reg();
5889 ASSERT(out_lo == left_lo);
5890 ASSERT(out_hi == left_hi);
5891 ASSERT(!can_overflow());
5892
5893 if (locs()->in(1).IsConstant()) {
5894 EmitShiftInt64ByConstant(compiler, op_kind(), left_lo, left_hi,
5895 locs()->in(1).constant());
5896 } else {
5897 ASSERT(locs()->in(1).reg() == ECX);
5898 __ SmiUntag(ECX);
5899
5900 // Deoptimize if shift count is > 63 or negative (or not a smi).
5901 if (!IsShiftCountInRange()) {
5903 compiler::Label* deopt =
5904 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
5905 __ cmpl(ECX, compiler::Immediate(kShiftCountLimit));
5906 __ j(ABOVE, deopt);
5907 }
5908
5909 EmitShiftInt64ByECX(compiler, op_kind(), left_lo, left_hi);
5910 }
5911}
5912
5913class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
5914 public:
5915 explicit ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction)
5916 : ThrowErrorSlowPathCode(instruction,
5917 kArgumentErrorUnboxedInt64RuntimeEntry) {}
5918
5919 const char* name() override { return "uint32 shift"; }
5920
5921 void EmitCodeAtSlowPathEntry(FlowGraphCompiler* compiler) override {
5922 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
5923 Register right_lo = right_pair->At(0).reg();
5924 Register right_hi = right_pair->At(1).reg();
5925 const Register out = instruction()->locs()->out(0).reg();
5926 ASSERT(out == instruction()->locs()->in(0).reg());
5927
5928 compiler::Label throw_error;
5929 __ testl(right_hi, right_hi);
5930 __ j(NEGATIVE, &throw_error);
5931
5932 __ xorl(out, out);
5933 __ jmp(exit_label());
5934
5935 __ Bind(&throw_error);
5936
5937 // Can't pass unboxed int64 value directly to runtime call, as all
5938 // arguments are expected to be tagged (boxed).
5939 // The unboxed int64 argument is passed through a dedicated slot in Thread.
5940 // TODO(dartbug.com/33549): Clean this up when unboxed values
5941 // could be passed as arguments.
5942 __ movl(compiler::Address(
5944 right_lo);
5945 __ movl(compiler::Address(
5947 kWordSize),
5948 right_hi);
5949 }
5950};
5951
5952LocationSummary* ShiftUint32OpInstr::MakeLocationSummary(Zone* zone,
5953 bool opt) const {
5954 const intptr_t kNumInputs = 2;
5955 const intptr_t kNumTemps = 0;
5956 LocationSummary* summary = new (zone) LocationSummary(
5957 zone, kNumInputs, kNumTemps, LocationSummary::kCallOnSlowPath);
5958 summary->set_in(0, Location::RequiresRegister());
5960 right()->definition()->IsConstant()) {
5961 ConstantInstr* constant = right()->definition()->AsConstant();
5962 summary->set_in(1, Location::Constant(constant));
5963 } else {
5964 summary->set_in(1, Location::Pair(Location::RegisterLocation(ECX),
5966 }
5967 summary->set_out(0, Location::SameAsFirstInput());
5968 return summary;
5969}
5970
5971void ShiftUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
5972 Register left = locs()->in(0).reg();
5973 Register out = locs()->out(0).reg();
5974 ASSERT(left == out);
5975
5976 if (locs()->in(1).IsConstant()) {
5977 EmitShiftUint32ByConstant(compiler, op_kind(), left,
5978 locs()->in(1).constant());
5979 } else {
5980 // Code for a variable shift amount (or constant that throws).
5981 ASSERT(locs()->in(1).AsPairLocation()->At(0).reg() == ECX);
5982 Register right_hi = locs()->in(1).AsPairLocation()->At(1).reg();
5983
5984 // Jump to a slow path if shift count is > 31 or negative.
5985 ShiftUint32OpSlowPath* slow_path = nullptr;
5986 if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
5987 slow_path = new (Z) ShiftUint32OpSlowPath(this);
5988 compiler->AddSlowPathCode(slow_path);
5989
5990 __ testl(right_hi, right_hi);
5991 __ j(NOT_ZERO, slow_path->entry_label());
5992 __ cmpl(ECX, compiler::Immediate(kUint32ShiftCountLimit));
5993 __ j(ABOVE, slow_path->entry_label());
5994 }
5995
5996 EmitShiftUint32ByECX(compiler, op_kind(), left);
5997
5998 if (slow_path != nullptr) {
5999 __ Bind(slow_path->exit_label());
6000 }
6001 }
6002}
6003
6005 Zone* zone,
6006 bool opt) const {
6007 const intptr_t kNumInputs = 2;
6008 const intptr_t kNumTemps = 0;
6009 LocationSummary* summary = new (zone)
6010 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6011 summary->set_in(0, Location::RequiresRegister());
6012 summary->set_in(1, LocationFixedRegisterOrSmiConstant(right(), ECX));
6013 summary->set_out(0, Location::SameAsFirstInput());
6014 return summary;
6015}
6016
6018 FlowGraphCompiler* compiler) {
6019 Register left = locs()->in(0).reg();
6020 Register out = locs()->out(0).reg();
6021 ASSERT(left == out);
6022
6023 if (locs()->in(1).IsConstant()) {
6024 EmitShiftUint32ByConstant(compiler, op_kind(), left,
6025 locs()->in(1).constant());
6026 } else {
6027 ASSERT(locs()->in(1).reg() == ECX);
6028 __ SmiUntag(ECX);
6029
6030 if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
6031 if (!IsShiftCountInRange()) {
6032 // Deoptimize if shift count is negative.
6034 compiler::Label* deopt =
6035 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptBinaryInt64Op);
6036
6037 __ testl(ECX, ECX);
6038 __ j(LESS, deopt);
6039 }
6040
6041 compiler::Label cont;
6042 __ cmpl(ECX, compiler::Immediate(kUint32ShiftCountLimit));
6043 __ j(LESS_EQUAL, &cont);
6044
6045 __ xorl(left, left);
6046
6047 __ Bind(&cont);
6048 }
6049
6050 EmitShiftUint32ByECX(compiler, op_kind(), left);
6051 }
6052}
6053
6054LocationSummary* UnaryInt64OpInstr::MakeLocationSummary(Zone* zone,
6055 bool opt) const {
6056 const intptr_t kNumInputs = 1;
6057 const intptr_t kNumTemps = 0;
6058 LocationSummary* summary = new (zone)
6059 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6062 summary->set_out(0, Location::SameAsFirstInput());
6063 return summary;
6064}
6065
6066void UnaryInt64OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6067 PairLocation* left_pair = locs()->in(0).AsPairLocation();
6068 Register left_lo = left_pair->At(0).reg();
6069 Register left_hi = left_pair->At(1).reg();
6070 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6071 Register out_lo = out_pair->At(0).reg();
6072 Register out_hi = out_pair->At(1).reg();
6073 ASSERT(out_lo == left_lo);
6074 ASSERT(out_hi == left_hi);
6075 switch (op_kind()) {
6076 case Token::kBIT_NOT:
6077 __ notl(left_lo);
6078 __ notl(left_hi);
6079 break;
6080 case Token::kNEGATE:
6081 __ negl(left_lo);
6082 __ adcl(left_hi, compiler::Immediate(0));
6083 __ negl(left_hi);
6084 break;
6085 default:
6086 UNREACHABLE();
6087 }
6088}
6089
6090LocationSummary* UnaryUint32OpInstr::MakeLocationSummary(Zone* zone,
6091 bool opt) const {
6092 const intptr_t kNumInputs = 1;
6093 const intptr_t kNumTemps = 0;
6094 LocationSummary* summary = new (zone)
6095 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6096 summary->set_in(0, Location::RequiresRegister());
6097 summary->set_out(0, Location::SameAsFirstInput());
6098 return summary;
6099}
6100
6101void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6102 Register out = locs()->out(0).reg();
6103 ASSERT(locs()->in(0).reg() == out);
6104
6105 ASSERT(op_kind() == Token::kBIT_NOT);
6106
6107 __ notl(out);
6108}
6109
6110LocationSummary* IntConverterInstr::MakeLocationSummary(Zone* zone,
6111 bool opt) const {
6112 const intptr_t kNumInputs = 1;
6113 const intptr_t kNumTemps = 0;
6114 LocationSummary* summary = new (zone)
6115 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6116
6117 if (from() == kUntagged || to() == kUntagged) {
6118 ASSERT((from() == kUntagged && to() == kUnboxedInt32) ||
6119 (from() == kUntagged && to() == kUnboxedUint32) ||
6120 (from() == kUnboxedInt32 && to() == kUntagged) ||
6121 (from() == kUnboxedUint32 && to() == kUntagged));
6123 summary->set_in(0, Location::RequiresRegister());
6124 summary->set_out(0, Location::SameAsFirstInput());
6125 } else if ((from() == kUnboxedInt32 || from() == kUnboxedUint32) &&
6126 (to() == kUnboxedInt32 || to() == kUnboxedUint32)) {
6127 summary->set_in(0, Location::RequiresRegister());
6128 summary->set_out(0, Location::SameAsFirstInput());
6129 } else if (from() == kUnboxedInt64) {
6130 summary->set_in(
6134 summary->set_out(0, Location::RequiresRegister());
6135 } else if (from() == kUnboxedUint32) {
6136 summary->set_in(0, Location::RequiresRegister());
6137 summary->set_out(0, Location::Pair(Location::RequiresRegister(),
6139 } else if (from() == kUnboxedInt32) {
6140 summary->set_in(0, Location::RegisterLocation(EAX));
6141 summary->set_out(0, Location::Pair(Location::RegisterLocation(EAX),
6143 }
6144
6145 return summary;
6146}
6147
6148void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6149 const bool is_nop_conversion =
6150 (from() == kUntagged && to() == kUnboxedInt32) ||
6151 (from() == kUntagged && to() == kUnboxedUint32) ||
6152 (from() == kUnboxedInt32 && to() == kUntagged) ||
6153 (from() == kUnboxedUint32 && to() == kUntagged);
6154 if (is_nop_conversion) {
6155 ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
6156 return;
6157 }
6158
6159 if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
6160 // Representations are bitwise equivalent.
6161 ASSERT(locs()->out(0).reg() == locs()->in(0).reg());
6162 } else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
6163 // Representations are bitwise equivalent.
6164 ASSERT(locs()->out(0).reg() == locs()->in(0).reg());
6165 if (CanDeoptimize()) {
6166 compiler::Label* deopt =
6167 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
6168 __ testl(locs()->out(0).reg(), locs()->out(0).reg());
6169 __ j(NEGATIVE, deopt);
6170 }
6171 } else if (from() == kUnboxedInt64) {
6172 // TODO(vegorov) kUnboxedInt64 -> kInt32 conversion is currently usually
6173 // dominated by a CheckSmi(BoxInt64(val)) which is an artifact of ordering
6174 // of optimization passes and the way we check smi-ness of values.
6175 // Optimize it away.
6176 ASSERT(to() == kUnboxedInt32 || to() == kUnboxedUint32);
6177 PairLocation* in_pair = locs()->in(0).AsPairLocation();
6178 Register in_lo = in_pair->At(0).reg();
6179 Register in_hi = in_pair->At(1).reg();
6180 Register out = locs()->out(0).reg();
6181 // Copy low word.
6182 __ movl(out, in_lo);
6183 if (CanDeoptimize()) {
6184 compiler::Label* deopt =
6185 compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
6186 __ sarl(in_lo, compiler::Immediate(31));
6187 __ cmpl(in_lo, in_hi);
6188 __ j(NOT_EQUAL, deopt);
6189 }
6190 } else if (from() == kUnboxedUint32) {
6191 ASSERT(to() == kUnboxedInt64);
6192 Register in = locs()->in(0).reg();
6193 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6194 Register out_lo = out_pair->At(0).reg();
6195 Register out_hi = out_pair->At(1).reg();
6196 // Copy low word.
6197 __ movl(out_lo, in);
6198 // Zero upper word.
6199 __ xorl(out_hi, out_hi);
6200 } else if (from() == kUnboxedInt32) {
6201 ASSERT(to() == kUnboxedInt64);
6202 PairLocation* out_pair = locs()->out(0).AsPairLocation();
6203 Register out_lo = out_pair->At(0).reg();
6204 Register out_hi = out_pair->At(1).reg();
6205 ASSERT(locs()->in(0).reg() == EAX);
6206 ASSERT(out_lo == EAX && out_hi == EDX);
6207 __ cdq();
6208 } else {
6209 UNREACHABLE();
6210 }
6211}
6212
6213LocationSummary* StopInstr::MakeLocationSummary(Zone* zone, bool opt) const {
6214 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
6215}
6216
6217void StopInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6218 __ Stop(message());
6219}
6220
6221void GraphEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6222 BlockEntryInstr* entry = normal_entry();
6223 if (entry != nullptr) {
6224 if (!compiler->CanFallThroughTo(entry)) {
6225 FATAL("Checked function entry must have no offset");
6226 }
6227 } else {
6228 entry = osr_entry();
6229 if (!compiler->CanFallThroughTo(entry)) {
6230 __ jmp(compiler->GetJumpLabel(entry));
6231 }
6232 }
6233}
6234
6235LocationSummary* GotoInstr::MakeLocationSummary(Zone* zone, bool opt) const {
6236 return new (zone) LocationSummary(zone, 0, 0, LocationSummary::kNoCall);
6237}
6238
6239void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6240 if (!compiler->is_optimizing()) {
6241 if (FLAG_reorder_basic_blocks) {
6242 compiler->EmitEdgeCounter(block()->preorder_number());
6243 }
6244 // Add a deoptimization descriptor for deoptimizing instructions that
6245 // may be inserted before this instruction.
6246 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
6247 InstructionSource());
6248 }
6249 if (HasParallelMove()) {
6251 }
6252
6253 // We can fall through if the successor is the next block in the list.
6254 // Otherwise, we need a jump.
6255 if (!compiler->CanFallThroughTo(successor())) {
6256 __ jmp(compiler->GetJumpLabel(successor()));
6257 }
6258}
6259
6260LocationSummary* IndirectGotoInstr::MakeLocationSummary(Zone* zone,
6261 bool opt) const {
6262 const intptr_t kNumInputs = 1;
6263 const intptr_t kNumTemps = 2;
6264
6265 LocationSummary* summary = new (zone)
6266 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6267
6268 summary->set_in(0, Location::RequiresRegister());
6269 summary->set_temp(0, Location::RequiresRegister());
6270 summary->set_temp(1, Location::RequiresRegister());
6271
6272 return summary;
6273}
6274
6275void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6276 Register index_reg = locs()->in(0).reg();
6277 Register target_reg = locs()->temp(0).reg();
6278 Register offset = locs()->temp(1).reg();
6279
6280 ASSERT(RequiredInputRepresentation(0) == kTagged);
6281 __ LoadObject(offset, offsets_);
6283 /*is_external=*/false, kTypedDataInt32ArrayCid,
6284 /*index_scale=*/4,
6285 /*index_unboxed=*/false, offset, index_reg));
6286
6287 // Load code object from frame.
6288 __ movl(target_reg,
6289 compiler::Address(
6291 // Load instructions object (active_instructions and Code::entry_point() may
6292 // not point to this instruction object any more; see Code::DisableDartCode).
6293 __ movl(target_reg,
6294 compiler::FieldAddress(target_reg, Code::instructions_offset()));
6295 __ addl(target_reg,
6296 compiler::Immediate(Instructions::HeaderSize() - kHeapObjectTag));
6297 __ addl(target_reg, offset);
6298
6299 // Jump to the absolute address.
6300 __ jmp(target_reg);
6301}
6302
6303LocationSummary* StrictCompareInstr::MakeLocationSummary(Zone* zone,
6304 bool opt) const {
6305 const intptr_t kNumInputs = 2;
6306 const intptr_t kNumTemps = 0;
6307 if (needs_number_check()) {
6308 LocationSummary* locs = new (zone)
6309 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6313 return locs;
6314 }
6315 LocationSummary* locs = new (zone)
6316 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
6318 // Only one of the inputs can be a constant. Choose register if the first one
6319 // is a constant.
6320 locs->set_in(1, locs->in(0).IsConstant()
6324 return locs;
6325}
6326
6327Condition StrictCompareInstr::EmitComparisonCodeRegConstant(
6328 FlowGraphCompiler* compiler,
6329 BranchLabels labels,
6330 Register reg,
6331 const Object& obj) {
6332 return compiler->EmitEqualityRegConstCompare(reg, obj, needs_number_check(),
6333 source(), deopt_id());
6334}
6335
6336// Detect pattern when one value is zero and another is a power of 2.
6337static bool IsPowerOfTwoKind(intptr_t v1, intptr_t v2) {
6338 return (Utils::IsPowerOfTwo(v1) && (v2 == 0)) ||
6339 (Utils::IsPowerOfTwo(v2) && (v1 == 0));
6340}
6341
6342LocationSummary* IfThenElseInstr::MakeLocationSummary(Zone* zone,
6343 bool opt) const {
6345 // TODO(dartbug.com/30953): support byte register constraints in the
6346 // register allocator.
6348 return comparison()->locs();
6349}
6350
6351void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6352 ASSERT(locs()->out(0).reg() == EDX);
6353
6354 // Clear upper part of the out register. We are going to use setcc on it
6355 // which is a byte move.
6356 __ xorl(EDX, EDX);
6357
6358 // Emit comparison code. This must not overwrite the result register.
6359 // IfThenElseInstr::Supports() should prevent EmitComparisonCode from using
6360 // the labels or returning an invalid condition.
6361 BranchLabels labels = {nullptr, nullptr, nullptr};
6362 Condition true_condition = comparison()->EmitComparisonCode(compiler, labels);
6363 ASSERT(true_condition != kInvalidCondition);
6364
6365 const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_);
6366
6367 intptr_t true_value = if_true_;
6368 intptr_t false_value = if_false_;
6369
6370 if (is_power_of_two_kind) {
6371 if (true_value == 0) {
6372 // We need to have zero in EDX on true_condition.
6373 true_condition = InvertCondition(true_condition);
6374 }
6375 } else {
6376 if (true_value == 0) {
6377 // Swap values so that false_value is zero.
6378 intptr_t temp = true_value;
6379 true_value = false_value;
6380 false_value = temp;
6381 } else {
6382 true_condition = InvertCondition(true_condition);
6383 }
6384 }
6385
6386 __ setcc(true_condition, DL);
6387
6388 if (is_power_of_two_kind) {
6389 const intptr_t shift =
6390 Utils::ShiftForPowerOfTwo(Utils::Maximum(true_value, false_value));
6391 __ shll(EDX, compiler::Immediate(shift + kSmiTagSize));
6392 } else {
6393 __ decl(EDX);
6394 __ andl(EDX, compiler::Immediate(Smi::RawValue(true_value) -
6395 Smi::RawValue(false_value)));
6396 if (false_value != 0) {
6397 __ addl(EDX, compiler::Immediate(Smi::RawValue(false_value)));
6398 }
6399 }
6400}
6401
6402LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone,
6403 bool opt) const {
6404 const intptr_t kNumInputs = 1;
6405 const intptr_t kNumTemps = 0;
6406 LocationSummary* summary = new (zone)
6407 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6408 summary->set_in(0, Location::RegisterLocation(FUNCTION_REG)); // Function.
6409 summary->set_out(0, Location::RegisterLocation(EAX));
6410 return summary;
6411}
6412
6413void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6414 // Load arguments descriptor.
6415 const intptr_t argument_count = ArgumentCount(); // Includes type args.
6416 const Array& arguments_descriptor =
6418 __ LoadObject(ARGS_DESC_REG, arguments_descriptor);
6419
6420 // EBX: Code (compiled code or lazy compile stub).
6421 ASSERT(locs()->in(0).reg() == FUNCTION_REG);
6422 __ movl(EBX,
6423 compiler::FieldAddress(FUNCTION_REG, Function::entry_point_offset()));
6424
6425 // FUNCTION_REG: Function.
6426 // ARGS_DESC_REG: Arguments descriptor array.
6427 // ECX: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
6428 __ xorl(IC_DATA_REG, IC_DATA_REG);
6429 __ call(EBX);
6430 compiler->EmitCallsiteMetadata(source(), deopt_id(),
6431 UntaggedPcDescriptors::kOther, locs(), env());
6432 compiler->EmitDropArguments(argument_count);
6433}
6434
6435LocationSummary* BooleanNegateInstr::MakeLocationSummary(Zone* zone,
6436 bool opt) const {
6439}
6440
6441void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6442 Register input = locs()->in(0).reg();
6443 Register result = locs()->out(0).reg();
6444 ASSERT(input == result);
6445 __ xorl(result, compiler::Immediate(
6447}
6448
6449LocationSummary* BoolToIntInstr::MakeLocationSummary(Zone* zone,
6450 bool opt) const {
6451 UNREACHABLE();
6452 return NULL;
6453}
6454
6455void BoolToIntInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6456 UNREACHABLE();
6457}
6458
6459LocationSummary* IntToBoolInstr::MakeLocationSummary(Zone* zone,
6460 bool opt) const {
6461 UNREACHABLE();
6462 return NULL;
6463}
6464
6465void IntToBoolInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6466 UNREACHABLE();
6467}
6468
6469LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
6470 bool opt) const {
6471 const intptr_t kNumInputs = (type_arguments() != nullptr) ? 1 : 0;
6472 const intptr_t kNumTemps = 0;
6473 LocationSummary* locs = new (zone)
6474 LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
6475 if (type_arguments() != nullptr) {
6478 }
6480 return locs;
6481}
6482
6483void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6484 const Code& stub = Code::ZoneHandle(
6486 compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
6487 locs(), deopt_id(), env());
6488}
6489
6490void DebugStepCheckInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
6491#ifdef PRODUCT
6492 UNREACHABLE();
6493#else
6494 ASSERT(!compiler->is_optimizing());
6495 __ Call(StubCode::DebugStepCheck());
6496 compiler->AddCurrentDescriptor(stub_kind_, deopt_id_, source());
6497 compiler->RecordSafepoint(locs());
6498#endif
6499}
6500
6501} // namespace dart
6502
6503#undef __
6504
6505#endif // defined(TARGET_ARCH_IA32)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
static void fail(const SkString &err)
Definition: DM.cpp:234
static bool are_equal(skiatest::Reporter *reporter, const SkMatrix &a, const SkMatrix &b)
Definition: MatrixTest.cpp:50
static int float_bits(float f)
Definition: MatrixTest.cpp:44
@ kNaN
Definition: ScalarTest.cpp:125
static bool ok(int result)
static bool subtract(const R &a, const R &b, R *out)
Definition: SkRect.cpp:177
static size_t element_size(Layout layout, SkSLType type)
Vec2Value v2
#define __
#define SIMPLE(name,...)
#define UNREACHABLE()
Definition: assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
#define Z
intptr_t num_context_variables() const
Definition: il.h:8392
Value * type_arguments() const
Definition: il.h:7436
const Class & cls() const
Definition: il.h:7435
intptr_t num_context_variables() const
Definition: il.h:7594
static intptr_t type_arguments_offset()
Definition: object.h:10928
static intptr_t InstanceSize()
Definition: object.h:10936
static constexpr bool IsValidLength(intptr_t len)
Definition: object.h:10932
static intptr_t length_offset()
Definition: object.h:10834
Value * dst_type() const
Definition: il.h:4423
Token::Kind op_kind() const
Definition: il.h:9038
Value * right() const
Definition: il.h:9036
Value * left() const
Definition: il.h:9035
bool can_overflow() const
Definition: il.h:9400
Value * right() const
Definition: il.h:9398
Token::Kind op_kind() const
Definition: il.h:9396
Value * left() const
Definition: il.h:9397
bool RightIsPowerOfTwoConstant() const
Definition: il.cc:2125
Range * right_range() const
Definition: il.h:9473
ParallelMoveInstr * parallel_move() const
Definition: il.h:1689
bool HasParallelMove() const
Definition: il.h:1691
static const Bool & False()
Definition: object.h:10799
static const Bool & True()
Definition: object.h:10797
static void Allocate(FlowGraphCompiler *compiler, Instruction *instruction, const Class &cls, Register result, Register temp)
Definition: il.cc:6309
Value * value() const
Definition: il.h:8528
Representation from_representation() const
Definition: il.h:8529
virtual bool ValueFitsSmi() const
Definition: il.cc:3253
ComparisonInstr * comparison() const
Definition: il.h:4021
static constexpr Register kFfiAnyNonAbiRegister
static constexpr Register kReturnReg
static constexpr Register kSecondNonArgumentRegister
static constexpr bool kUsesRet4
const RuntimeEntry & TargetFunction() const
Definition: il.cc:1101
Value * index() const
Definition: il.h:10797
Value * length() const
Definition: il.h:10796
Value * value() const
Definition: il.h:10755
bool IsDeoptIfNull() const
Definition: il.cc:863
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr, TemplateInstruction, FIELD_LIST) private void EmitBitTest(FlowGraphCompiler *compiler, intptr_t min, intptr_t max, intptr_t mask, compiler::Label *deopt)
void EmitNullCheck(FlowGraphCompiler *compiler, compiler::Label *deopt)
bool IsNullCheck() const
Definition: il.h:10600
bool IsDeoptIfNotNull() const
Definition: il.cc:877
bool IsBitTest() const
Definition: il.cc:899
Value * right() const
Definition: il.h:8477
Value * left() const
Definition: il.h:8476
Value * value() const
Definition: il.h:10654
intptr_t loop_depth() const
Definition: il.h:9906
bool in_loop() const
Definition: il.h:9904
static intptr_t instructions_offset()
Definition: object.h:6779
static intptr_t entry_point_offset(EntryKind kind=EntryKind::kNormal)
Definition: object.h:6793
virtual void EmitBranchCode(FlowGraphCompiler *compiler, BranchInstr *branch)
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual Condition EmitComparisonCode(FlowGraphCompiler *compiler, BranchLabels labels)=0
const Object & value() const
Definition: il.h:4230
void EmitMoveToLocation(FlowGraphCompiler *compiler, const Location &destination, Register tmp=kNoRegister, intptr_t pair_index=0)
static intptr_t num_variables_offset()
Definition: object.h:7415
static intptr_t InstanceSize()
Definition: object.h:7448
virtual Value * num_elements() const
Definition: il.h:7846
virtual Representation representation() const
Definition: il.h:3501
Definition * OriginalDefinition()
Definition: il.cc:532
Value * value() const
Definition: il.h:9101
MethodRecognizer::Kind op_kind() const
Definition: il.h:9103
MethodRecognizer::Kind recognized_kind() const
Definition: il.h:10060
Value * value() const
Definition: il.h:10111
static intptr_t value_offset()
Definition: object.h:10139
static DoublePtr NewCanonical(double d)
Definition: object.cc:23418
bool is_null_aware() const
Definition: il.h:5341
virtual Representation representation() const
Definition: il.h:10337
intptr_t index() const
Definition: il.h:10335
void EmitReturnMoves(FlowGraphCompiler *compiler, const Register temp0, const Register temp1)
Definition: il.cc:7690
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(FfiCallInstr, VariadicDefinition, FIELD_LIST) private void EmitParamMoves(FlowGraphCompiler *compiler, const Register saved_fp, const Register temp0, const Register temp1)
Definition: il.cc:7478
virtual Representation representation() const
Definition: il.cc:8095
static bool CanExecuteGeneratedCodeInSafepoint()
Definition: il.h:6142
intptr_t TargetAddressIndex() const
Definition: il.h:6100
static intptr_t guarded_cid_offset()
Definition: object.h:4669
bool is_nullable() const
Definition: object.cc:11770
@ kUnknownFixedLength
Definition: object.h:4728
@ kUnknownLengthOffset
Definition: object.h:4727
@ kNoFixedLength
Definition: object.h:4729
static intptr_t guarded_list_length_in_object_offset_offset()
Definition: object.h:4693
intptr_t guarded_cid() const
Definition: object.cc:11749
static intptr_t is_nullable_offset()
Definition: object.h:4766
static intptr_t guarded_list_length_offset()
Definition: object.h:4683
static intptr_t entry_point_offset(CodeEntryKind entry_kind=CodeEntryKind::kNormal)
Definition: object.h:3203
ParallelMoveInstr * parallel_move() const
Definition: il.h:3735
BlockEntryInstr * block() const
Definition: il.h:3710
bool HasParallelMove() const
Definition: il.h:3737
JoinEntryInstr * successor() const
Definition: il.h:3713
FunctionEntryInstr * normal_entry() const
Definition: il.h:2001
OsrEntryInstr * osr_entry() const
Definition: il.h:2007
const Field & field() const
Definition: il.h:6520
Value * value() const
Definition: il.h:6518
Value * value() const
Definition: il.h:9149
Value * value() const
Definition: il.h:9189
@ kGeneralized
Definition: object.h:2525
ComparisonInstr * comparison() const
Definition: il.h:5483
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.h:3807
Value * offset() const
Definition: il.h:3829
const AbstractType & type() const
Definition: il.h:7284
intptr_t GetDeoptId() const
Definition: il.h:1409
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Definition: il.h:1377
Environment * env() const
Definition: il.h:1215
virtual LocationSummary * MakeLocationSummary(Zone *zone, bool is_optimizing) const =0
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
Definition: il.h:1213
void InitializeLocationSummary(Zone *zone, bool optimizing)
Definition: il.h:1202
virtual Representation representation() const
Definition: il.h:1260
bool CanDeoptimize() const
Definition: il.h:1079
friend class BlockEntryInstr
Definition: il.h:1403
LocationSummary * locs()
Definition: il.h:1192
InstructionSource source() const
Definition: il.h:1008
intptr_t deopt_id() const
Definition: il.h:993
const T * Cast() const
Definition: il.h:1186
static LocationSummary * MakeCallSummary(Zone *zone, const Instruction *instr, LocationSummary *locs=nullptr)
static intptr_t HeaderSize()
Definition: object.h:5825
Value * value() const
Definition: il.h:9978
Representation to() const
Definition: il.h:11047
Representation from() const
Definition: il.h:11046
const RuntimeEntry & TargetFunction() const
Definition: il.cc:7223
MethodRecognizer::Kind recognized_kind() const
Definition: il.h:10261
static constexpr intptr_t kDoubleTempIndex
Definition: il.h:10297
static constexpr intptr_t kObjectTempIndex
Definition: il.h:10296
static IsolateGroup * Current()
Definition: isolate.h:539
intptr_t TargetAddressIndex() const
Definition: il.h:6198
void EmitParamMoves(FlowGraphCompiler *compiler, Register saved_fp, Register temp0)
Definition: il.cc:8191
LocationSummary * MakeLocationSummaryInternal(Zone *zone, const RegList temps) const
Definition: il.cc:8113
virtual Representation representation() const
Definition: il.h:6909
intptr_t index_scale() const
Definition: il.h:6895
Value * index() const
Definition: il.h:6893
bool can_pack_into_smi() const
Definition: il.h:6902
intptr_t element_count() const
Definition: il.h:6900
bool IsExternal() const
Definition: il.h:6888
intptr_t class_id() const
Definition: il.h:6899
intptr_t class_id() const
Definition: il.h:6803
bool IsUntagged() const
Definition: il.h:6796
Value * array() const
Definition: il.h:6800
intptr_t index_scale() const
Definition: il.h:6802
Representation representation() const
Definition: il.h:6819
Value * index() const
Definition: il.h:6801
const LocalVariable & local() const
Definition: il.h:5814
Location temp(intptr_t index) const
Definition: locations.h:882
Location out(intptr_t index) const
Definition: locations.h:903
static LocationSummary * Make(Zone *zone, intptr_t input_count, Location out, ContainsCall contains_call)
Definition: locations.cc:187
void set_temp(intptr_t index, Location loc)
Definition: locations.h:894
RegisterSet * live_registers()
Definition: locations.h:941
void set_out(intptr_t index, Location loc)
Definition: locations.cc:232
Location in(intptr_t index) const
Definition: locations.h:866
void set_in(intptr_t index, Location loc)
Definition: locations.cc:205
static Location StackSlot(intptr_t stack_index, Register base)
Definition: locations.h:447
static Location NoLocation()
Definition: locations.h:387
static Location SameAsFirstInput()
Definition: locations.h:382
static Location Pair(Location first, Location second)
Definition: locations.cc:271
Register reg() const
Definition: locations.h:404
static Location FpuRegisterLocation(FpuRegister reg)
Definition: locations.h:410
static Location WritableRegister()
Definition: locations.h:376
bool IsConstant() const
Definition: locations.h:292
static Location RegisterLocation(Register reg)
Definition: locations.h:398
static Location PrefersRegister()
Definition: locations.h:358
static Location Any()
Definition: locations.h:352
PairLocation * AsPairLocation() const
Definition: locations.cc:280
static Location RequiresRegister()
Definition: locations.h:365
static Location RequiresFpuRegister()
Definition: locations.h:369
FpuRegister fpu_reg() const
Definition: locations.h:416
const Object & constant() const
Definition: locations.cc:373
static Location Constant(const ConstantInstr *obj, int pair_index=0)
Definition: locations.h:294
Value * right() const
Definition: il.h:8970
intptr_t result_cid() const
Definition: il.h:8972
Value * left() const
Definition: il.h:8969
MethodRecognizer::Kind op_kind() const
Definition: il.h:8967
Value * length() const
Definition: il.h:3211
bool unboxed_inputs() const
Definition: il.h:3216
Value * src_start() const
Definition: il.h:3209
void EmitLoopCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, Register length_reg, compiler::Label *done, compiler::Label *copy_forwards=nullptr)
void PrepareLengthRegForLoop(FlowGraphCompiler *compiler, Register length_reg, compiler::Label *done)
Value * dest_start() const
Definition: il.h:3210
static intptr_t value_offset()
Definition: object.h:10074
virtual Representation representation() const
Definition: il.h:3387
Value * value() const
Definition: il.h:3377
Location location() const
Definition: il.h:3374
static int ComputeArgcTag(const Function &function)
void SetupNative()
Definition: il.cc:7347
bool is_auto_scope() const
Definition: il.h:6026
bool is_bootstrap_native() const
Definition: il.h:6025
const Function & function() const
Definition: il.h:6023
NativeFunction native_c_function() const
Definition: il.h:6024
bool link_lazily() const
Definition: il.h:6027
static constexpr intptr_t kVMTagOffsetFromFp
Definition: il.h:2235
static ObjectPtr null()
Definition: object.h:433
static Object & ZoneHandle()
Definition: object.h:419
static intptr_t data_offset()
Definition: object.h:10554
Location At(intptr_t i) const
Definition: locations.h:618
static bool Overlaps(Range *range, intptr_t min, intptr_t max)
static bool OnlyLessThanOrEqualTo(Range *range, intptr_t value)
static bool IsWithin(const Range *range, int64_t min, int64_t max)
static bool IsPositive(Range *range)
static bool CanBeZero(Range *range)
void Add(Location loc, Representation rep=kTagged)
Definition: locations.h:754
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ShiftIntegerOpInstr, BinaryIntegerOpInstr, FIELD_LIST) protected bool IsShiftCountInRange(int64_t max=kShiftCountLimit) const
Definition: il.cc:2112
Range * shift_range() const
Definition: il.h:9655
Kind kind() const
Definition: il.h:11358
Value * value() const
Definition: il.h:9952
static constexpr intptr_t kBits
Definition: object.h:9986
static SmiPtr New(intptr_t value)
Definition: object.h:10006
static constexpr intptr_t kMaxValue
Definition: object.h:9987
static intptr_t RawValue(intptr_t value)
Definition: object.h:10022
const char * message() const
Definition: il.h:3681
bool ShouldEmitStoreBarrier() const
Definition: il.h:7089
virtual Representation RequiredInputRepresentation(intptr_t idx) const
Definition: il.cc:6925
Value * value() const
Definition: il.h:7083
Value * array() const
Definition: il.h:7081
intptr_t class_id() const
Definition: il.h:7086
bool IsUntagged() const
Definition: il.h:7114
intptr_t index_scale() const
Definition: il.h:7085
Value * index() const
Definition: il.h:7082
Value * value() const
Definition: il.h:5963
const LocalVariable & local() const
Definition: il.h:5962
const Field & field() const
Definition: il.h:6729
Value * value() const
Definition: il.h:6730
bool needs_number_check() const
Definition: il.h:5125
Value * str() const
Definition: il.h:6967
static intptr_t length_offset()
Definition: object.h:10214
static CodePtr GetAllocationStubForClass(const Class &cls)
Definition: stub_code.cc:174
static constexpr int kNullCharCodeSymbolOffset
Definition: symbols.h:605
static StringPtr * PredefinedAddress()
Definition: symbols.h:772
static bool sse4_1_supported()
Definition: cpu_ia32.h:61
intptr_t ArgumentCount() const
Definition: il.h:4586
ArrayPtr GetArgumentsDescriptor() const
Definition: il.h:4617
virtual intptr_t InputCount() const
Definition: il.h:2755
const ZoneGrowableArray< intptr_t > & cid_results() const
Definition: il.h:5234
static intptr_t stack_limit_offset()
Definition: thread.h:402
@ kOsrRequest
Definition: thread.h:425
static intptr_t stack_overflow_flags_offset()
Definition: thread.h:443
static bool IsEqualityOperator(Kind tok)
Definition: token.h:236
virtual Representation representation() const
Definition: il.h:9841
Value * value() const
Definition: il.h:9828
Token::Kind op_kind() const
Definition: il.h:9829
Value * value() const
Definition: il.h:9240
Token::Kind op_kind() const
Definition: il.h:9241
virtual Representation representation() const
Definition: il.h:8703
Value * value() const
Definition: il.h:8678
bool is_truncating() const
Definition: il.h:8772
uword constant_address() const
Definition: il.h:4291
virtual Representation representation() const
Definition: il.h:4288
bool IsScanFlagsUnboxed() const
Definition: il.cc:7181
static T Abs(T x)
Definition: utils.h:49
static int32_t Low32Bits(int64_t value)
Definition: utils.h:369
static constexpr T Maximum(T x, T y)
Definition: utils.h:41
static constexpr int ShiftForPowerOfTwo(T x)
Definition: utils.h:81
static int32_t High32Bits(int64_t value)
Definition: utils.h:373
static T Minimum(T x, T y)
Definition: utils.h:36
static T AddWithWrapAround(T a, T b)
Definition: utils.h:431
static bool DoublesBitEqual(const double a, const double b)
Definition: utils.h:525
static constexpr size_t HighestBit(int64_t v)
Definition: utils.h:185
static constexpr bool IsPowerOfTwo(T x)
Definition: utils.h:76
bool NeedsWriteBarrier()
Definition: il.cc:1390
bool BindsToConstant() const
Definition: il.cc:1183
intptr_t BoundSmiConstant() const
Definition: il.cc:1212
bool BindsToSmiConstant() const
Definition: il.cc:1208
Definition * definition() const
Definition: il.h:103
CompileType * Type()
Value(Definition *definition)
Definition: il.h:95
intptr_t InputCount() const
Definition: il.h:2794
static Address Absolute(const uword addr)
void static bool EmittingComments()
Address ElementAddressForRegIndex(bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool index_unboxed, Register array, Register index)
static Address VMTagAddress()
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool *needs_base=nullptr)
static bool IsSafeSmi(const Object &object)
static bool IsSafe(const Object &object)
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word OffsetOf(const dart::Field &field)
static word unboxed_runtime_arg_offset()
static word shared_field_table_values_offset()
static word field_table_values_offset()
static word exit_through_ffi_offset()
static word invoke_dart_code_stub_offset()
static word top_exit_frame_info_offset()
#define UNIMPLEMENTED
#define ASSERT(E)
SkBitmap source
Definition: examples.cpp:28
#define FATAL(error)
FlutterSemanticsFlag flag
FlutterSemanticsFlag flags
uint8_t value
GAsyncResult * result
uint32_t * target
int argument_count
Definition: fuchsia.cc:52
static float max(float r, float g, float b)
Definition: hsl.cpp:49
static float min(float r, float g, float b)
Definition: hsl.cpp:48
#define R(r)
#define CASE(Arity, Mask, Name, Args, Result)
#define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name)
Definition: il.h:11867
size_t length
def match(bench, filt)
Definition: benchmark.py:23
const intptr_t kResultIndex
Definition: marshaller.h:28
intptr_t FrameOffsetInBytesForVariable(const LocalVariable *variable)
Definition: runtime_api.h:344
static constexpr intptr_t kWordSize
Definition: runtime_api.h:274
bool IsSmi(int64_t v)
Definition: runtime_api.cc:31
constexpr word kSmiMax
Definition: runtime_api.h:305
constexpr intptr_t kSmiBits
Definition: runtime_api.h:301
FrameLayout frame_layout
Definition: stack_frame.cc:76
bool HasIntegerValue(const dart::Object &object, int64_t *value)
Definition: runtime_api.cc:239
Definition: dart_vm.cc:33
Location LocationRegisterOrConstant(Value *value)
Definition: locations.cc:289
const intptr_t kSmiBits
Definition: globals.h:24
const Register kWriteBarrierSlotReg
const Register THR
const char *const name
uword FindDoubleConstant(double value)
static Condition InvertCondition(Condition c)
static constexpr int kSavedCallerPcSlotFromFp
bool IsTypedDataBaseClassId(intptr_t index)
Definition: class_id.h:429
static bool IsSmiValue(Value *val, intptr_t *int_val)
const Register kExceptionObjectReg
const Register kWriteBarrierObjectReg
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition: constants.h:85
constexpr intptr_t kIntptrMin
Definition: globals.h:556
int32_t classid_t
Definition: globals.h:524
static const ClassId kLastErrorCid
Definition: class_id.h:311
@ kIllegalCid
Definition: class_id.h:214
@ kNullCid
Definition: class_id.h:252
@ kDynamicCid
Definition: class_id.h:253
Representation
Definition: locations.h:66
constexpr intptr_t kSimd128Size
Definition: globals.h:459
const FpuRegister FpuTMP
static const ClassId kFirstErrorCid
Definition: class_id.h:310
uintptr_t uword
Definition: globals.h:501
const Register CODE_REG
@ OVERFLOW
@ GREATER_EQUAL
@ UNSIGNED_GREATER
@ kInvalidCondition
@ UNSIGNED_GREATER_EQUAL
@ NOT_CARRY
Definition: constants_x86.h:35
@ NOT_ZERO
@ NO_OVERFLOW
@ NEGATIVE
Definition: constants_x86.h:32
@ LESS_EQUAL
@ BELOW_EQUAL
Definition: constants_x86.h:19
@ PARITY_ODD
Definition: constants_x86.h:24
@ UNSIGNED_LESS
@ NOT_EQUAL
@ ABOVE_EQUAL
Definition: constants_x86.h:16
@ PARITY_EVEN
Definition: constants_x86.h:23
const Register ARGS_DESC_REG
bool IsClampedTypedDataBaseClassId(intptr_t index)
Definition: class_id.h:461
@ kNoRegister
Definition: constants_arm.h:99
Location LocationFixedRegisterOrConstant(Value *value, Register reg)
Definition: locations.cc:339
bool IsExternalPayloadClassId(classid_t cid)
Definition: class_id.h:472
constexpr intptr_t kInt32Size
Definition: globals.h:450
const Register FPREG
DEFINE_BACKEND(LoadThread,(Register out))
Definition: il.cc:8109
static constexpr int kPcMarkerSlotFromFp
const Register FUNCTION_REG
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
Definition: locations.cc:365
constexpr intptr_t kWordSize
Definition: globals.h:509
Location LocationWritableRegisterOrConstant(Value *value)
Definition: locations.cc:314
static bool IsConstant(Definition *def, int64_t *val)
Definition: loops.cc:123
constexpr intptr_t kFloatSize
Definition: globals.h:457
QRegister FpuRegister
constexpr bool FLAG_target_memory_sanitizer
Definition: flags.h:174
const Register kStackTraceObjectReg
static int8_t data[kExtLength]
constexpr intptr_t kDoubleSize
Definition: globals.h:456
Location LocationFixedRegisterOrSmiConstant(Value *value, Register reg)
Definition: locations.cc:348
@ kSmiTagSize
@ kHeapObjectTag
@ kSmiTagMask
@ kSmiTagShift
static ScaleFactor ToScaleFactor(intptr_t index_scale, bool index_unboxed)
Definition: constants.h:95
Location LocationRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
Definition: locations.cc:297
constexpr intptr_t kBitsPerInt64
Definition: globals.h:467
const Register SPREG
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
def call(args)
Definition: dom.py:159
Definition: __init__.py:1
dst
Definition: cp.py:12
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
SIN Vec< N, uint16_t > mull(const Vec< N, uint8_t > &x, const Vec< N, uint8_t > &y)
Definition: SkVx.h:906
Definition: ref_ptr.h:256
const Scalar scale
SeparatedVector2 offset
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kObjectReg
static constexpr Representation NativeRepresentation(Representation rep)
Definition: il.h:8504
intptr_t first_local_from_fp
Definition: frame_layout.h:37
intptr_t FrameSlotForVariable(const LocalVariable *variable) const
Definition: stack_frame.cc:83
static constexpr intptr_t kBoolVsNullMask
static constexpr intptr_t kBoolValueMask
static constexpr size_t ValueSize(Representation rep)
Definition: locations.h:112
static constexpr bool IsUnboxedInteger(Representation rep)
Definition: locations.h:92
static compiler::OperandSize OperandSize(Representation rep)
Definition: locations.cc:16
static constexpr bool IsUnboxed(Representation rep)
Definition: locations.h:101
static bool IsUnsignedInteger(Representation rep)
Definition: locations.h:126
static Representation RepresentationOfArrayElement(classid_t cid)
Definition: locations.cc:79
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
#define ASSERT_BOOL_FALSE_FOLLOWS_BOOL_TRUE()
Definition: thread.h:204
#define kNegInfinity
Definition: globals.h:66