7#if defined(TARGET_ARCH_X64)
32#define __ compiler->assembler()->
33#define Z (compiler->zone())
41 const Instruction* instr,
42 LocationSummary* locs) {
76 const intptr_t kNumInputs = 1;
77 const intptr_t kNumTemps = 0;
78 LocationSummary*
locs =
new (zone)
103#if defined(DART_COMPRESSED_POINTERS)
114 case kUnboxedInt64: {
119 case kUnboxedDouble: {
132 ASSERT(instr->RequiredInputRepresentation(
134#if defined(DART_COMPRESSED_POINTERS)
140 __ movsxd(index, index);
142 __ movq(compiler::Address(instr->base_reg(), index,
TIMES_4, instr->offset()),
149DEFINE_BACKEND(TailCall, (NoLocation, Fixed<Register, ARGS_DESC_REG>)) {
150 compiler->EmitTailCallToStub(instr->code());
156 __ set_constant_pool_allowed(
true);
167 const intptr_t kNumInputs = 5;
168 const intptr_t kNumTemps = 2;
169 LocationSummary*
locs =
new (zone)
173 const bool needs_writable_inputs =
174 (((element_size_ == 1) && !unboxed_inputs_) ||
175 ((element_size_ == 16) && unboxed_inputs_));
177 needs_writable_inputs
181 needs_writable_inputs
188 length()->definition()->OriginalDefinition()->AsConstant()));
198static inline intptr_t SizeOfMemoryCopyElements(intptr_t
element_size) {
204 compiler::Label*
done) {
205 const intptr_t mov_size = SizeOfMemoryCopyElements(element_size_);
214 __ SmiUntag(length_reg);
215 }
else if (shift > 0) {
216 __ OBJ(shl)(length_reg, compiler::Immediate(shift));
218 __ ExtendNonNegativeSmi(length_reg);
226 compiler::Label*
done,
227 compiler::Label* copy_forwards) {
228 const intptr_t mov_size = SizeOfMemoryCopyElements(element_size_);
229 const bool reversed = copy_forwards !=
nullptr;
233 __ BranchIfZero(length_reg,
done);
237 __ leaq(
TMP, compiler::Address(src_reg, length_reg,
scale, -mov_size));
238 __ CompareRegisters(dest_reg,
TMP);
245 __ movq(src_reg,
TMP);
247 compiler::Address(dest_reg, length_reg,
scale, -mov_size));
253 __ movq(
TMP, length_reg);
256 __ MulImmediate(
TMP, mov_size);
259 __ MsanUnpoison(dest_reg,
TMP);
284 __ MsanUnpoison(dest_reg,
TMP);
289void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler*
compiler,
296 if (array_rep != kTagged) {
307 case kOneByteStringCid:
311 case kTwoByteStringCid:
320 ASSERT(start_loc.IsRegister() || start_loc.IsConstant());
321 if (start_loc.IsConstant()) {
322 const auto& constant = start_loc.constant();
323 ASSERT(constant.IsInteger());
324 const int64_t start_value = Integer::Cast(constant).AsInt64Value();
326 Utils::MulWithWrapAround<intptr_t>(start_value, element_size_),
offset);
327 __ leaq(payload_reg, compiler::Address(array_reg, add_value));
331 const Register start_reg = start_loc.reg();
332 bool index_unboxed = unboxed_inputs_;
335 if (element_size_ == 1 && !index_unboxed) {
337 __ SmiUntag(start_reg);
338 index_unboxed =
true;
339 }
else if (element_size_ == 16 && index_unboxed) {
342 __ SmiTag(start_reg);
343 index_unboxed =
false;
344 }
else if (!index_unboxed) {
345 __ ExtendNonNegativeSmi(start_reg);
348 __ leaq(payload_reg, compiler::Address(array_reg, start_reg,
scale,
offset));
353 const intptr_t kNumInputs = 1;
354 const intptr_t kNumTemps = 0;
355 LocationSummary*
locs =
new (zone)
372 if (
value.IsRegister()) {
374 }
else if (
value.IsConstant()) {
376 }
else if (
value.IsFpuRegister()) {
386 const intptr_t kNumInputs = 1;
387 const intptr_t kNumTemps = 0;
388 LocationSummary*
locs =
new (zone)
418 if (
locs()->in(0).IsRegister()) {
421 }
else if (
locs()->in(0).IsPairLocation()) {
432 if (
compiler->parsed_function().function().IsAsyncFunction() ||
433 compiler->parsed_function().function().IsAsyncGenerator()) {
435 const Code& stub = GetReturnStub(
compiler);
440 if (!
compiler->flow_graph().graph_entry()->NeedsFrame()) {
446 __ Comment(
"Stack Check");
447 compiler::Label
done;
448 const intptr_t fp_sp_dist =
455 __ CompareImmediate(
RDI, compiler::Immediate(fp_sp_dist));
465 __ set_constant_pool_allowed(
true);
468static const RegisterSet kCalleeSaveRegistersSet(
491 __ popq(old_exit_frame_reg);
493 __ popq(old_exit_through_ffi_reg);
504 __ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
505 old_exit_through_ffi_reg,
509 __ PopRegisters(kCalleeSaveRegistersSet);
511#if defined(DART_TARGET_OS_FUCHSIA) && defined(USING_SHADOW_CALL_STACK)
524 __ set_constant_pool_allowed(
true);
528static bool IsPowerOfTwoKind(intptr_t v1, intptr_t
v2) {
552 BranchLabels labels = {
nullptr,
nullptr,
nullptr};
556 const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_);
558 intptr_t true_value = if_true_;
559 intptr_t false_value = if_false_;
561 if (is_power_of_two_kind) {
562 if (true_value == 0) {
567 if (true_value == 0) {
569 intptr_t temp = true_value;
570 true_value = false_value;
577 __ setcc(true_condition,
DL);
579 if (is_power_of_two_kind) {
580 const intptr_t shift =
587 if (false_value != 0) {
595 const intptr_t kNumInputs = 0;
596 const intptr_t stack_index =
610 const intptr_t kNumInputs = 1;
619 __ movq(compiler::Address(
626 const intptr_t kNumInputs = 0;
638 if (
out.IsRegister()) {
647 intptr_t pair_index) {
649 if (destination.IsRegister()) {
651 const int64_t
value = Integer::Cast(value_).AsInt64Value();
653 __ xorl(destination.reg(), destination.reg());
655 __ movq(destination.reg(), compiler::Immediate(
value));
659 __ LoadObject(destination.reg(), value_);
661 }
else if (destination.IsFpuRegister()) {
664 __ LoadSImmediate(destination.fpu_reg(), Double::Cast(value_).value());
667 __ LoadDImmediate(destination.fpu_reg(), Double::Cast(value_).value());
669 case kUnboxedFloat64x2:
670 __ LoadQImmediate(destination.fpu_reg(),
671 Float64x2::Cast(value_).value());
673 case kUnboxedFloat32x4:
674 __ LoadQImmediate(destination.fpu_reg(),
675 Float32x4::Cast(value_).value());
677 case kUnboxedInt32x4:
678 __ LoadQImmediate(destination.fpu_reg(), Int32x4::Cast(value_).value());
683 }
else if (destination.IsDoubleStackSlot()) {
687 }
else if (destination.IsQuadStackSlot()) {
689 case kUnboxedFloat64x2:
692 case kUnboxedFloat32x4:
695 case kUnboxedInt32x4:
703 ASSERT(destination.IsStackSlot());
705 const int64_t
value = Integer::Cast(value_).AsInt64Value();
707 compiler::Immediate(
value));
710 bit_cast<int32_t, float>(Double::Cast(value_).
value());
722 const bool is_unboxed_int =
726 const intptr_t kNumInputs = 0;
727 const intptr_t kNumTemps = is_unboxed_int ? 0 : 1;
728 LocationSummary*
locs =
new (zone)
730 if (is_unboxed_int) {
748 if (!
locs()->
out(0).IsInvalid()) {
759 auto const dst_type_loc =
769 const intptr_t kNonChangeableInputRegs =
781 const intptr_t kCpuRegistersToPreserve =
783 const intptr_t kFpuRegistersToPreserve =
789 LocationSummary* summary =
new (zone) LocationSummary(
802 intptr_t next_temp = 0;
804 const bool should_preserve = ((1 <<
i) & kCpuRegistersToPreserve) != 0;
805 if (should_preserve) {
806 summary->set_temp(next_temp++,
812 const bool should_preserve = ((1 <<
i) & kFpuRegistersToPreserve) != 0;
813 if (should_preserve) {
825 auto object_store =
compiler->isolate_group()->object_store();
826 const auto& assert_boolean_stub =
829 compiler::Label
done;
835 UntaggedPcDescriptors::kOther,
locs(),
862 const intptr_t kNumInputs = 2;
863 if (operation_cid() == kDoubleCid) {
864 const intptr_t kNumTemps = 0;
865 LocationSummary*
locs =
new (zone)
872 if (operation_cid() == kSmiCid || operation_cid() == kMintCid ||
873 operation_cid() == kIntegerCid) {
874 const intptr_t kNumTemps = 0;
875 LocationSummary*
locs =
new (zone)
896static void LoadValueCid(FlowGraphCompiler*
compiler,
899 compiler::Label* value_is_smi =
nullptr) {
900 compiler::Label
done;
901 if (value_is_smi ==
nullptr) {
902 __ LoadImmediate(value_cid_reg, compiler::Immediate(kSmiCid));
905 if (value_is_smi ==
nullptr) {
910 __ LoadClassId(value_cid_reg, value_reg);
942static void EmitBranchOnCondition(
948 if (labels.fall_through == labels.false_label) {
950 __ j(true_condition, labels.true_label, jump_distance);
954 __ j(false_condition, labels.false_label, jump_distance);
957 if (labels.fall_through != labels.true_label) {
958 __ jmp(labels.true_label, jump_distance);
964 const LocationSummary& locs,
970 Condition true_condition = TokenKindToIntCondition(kind);
971 if (
left.IsConstant() ||
right.IsConstant()) {
973 ConstantInstr* constant =
nullptr;
974 if (
left.IsConstant()) {
975 constant =
left.constant_instruction();
979 true_condition = FlipCondition(true_condition);
981 constant =
right.constant_instruction();
990 ASSERT(constant->representation() == kTagged);
993 }
else if (
right.IsStackSlot()) {
998 return true_condition;
1002 const LocationSummary& locs,
1008 Condition true_condition = TokenKindToIntCondition(kind);
1009 if (
left.IsConstant() ||
right.IsConstant()) {
1011 ConstantInstr* constant =
nullptr;
1012 if (
left.IsConstant()) {
1013 constant =
left.constant_instruction();
1017 true_condition = FlipCondition(true_condition);
1019 constant =
right.constant_instruction();
1030 }
else if (
right.IsStackSlot()) {
1035 return true_condition;
1039 const LocationSummary& locs,
1041 BranchLabels labels) {
1042 ASSERT((kind == Token::kEQ) || (kind == Token::kNE));
1045 const Condition true_condition = TokenKindToIntCondition(kind);
1046 compiler::Label* equal_result =
1047 (true_condition ==
EQUAL) ? labels.true_label : labels.false_label;
1048 compiler::Label* not_equal_result =
1049 (true_condition ==
EQUAL) ? labels.false_label : labels.true_label;
1057 __ BranchIfSmi(
TMP, not_equal_result);
1058 __ CompareClassId(left, kMintCid);
1060 __ CompareClassId(right, kMintCid);
1064 return true_condition;
1088 const LocationSummary& locs,
1090 BranchLabels labels) {
1094 __ comisd(left, right);
1096 Condition true_condition = TokenKindToDoubleCondition(kind);
1097 compiler::Label* nan_result =
1098 (true_condition ==
NOT_EQUAL) ? labels.true_label : labels.false_label;
1100 return true_condition;
1104 BranchLabels labels) {
1106 ASSERT(operation_cid() == kMintCid);
1107 return EmitNullAwareInt64ComparisonOp(
compiler, *
locs(), kind(), labels);
1109 if (operation_cid() == kSmiCid) {
1111 }
else if (operation_cid() == kMintCid || operation_cid() == kIntegerCid) {
1112 return EmitInt64ComparisonOp(
compiler, *
locs(), kind());
1114 ASSERT(operation_cid() == kDoubleCid);
1115 return EmitDoubleComparisonOp(
compiler, *
locs(), kind(), labels);
1120 compiler::Label is_true, is_false;
1121 BranchLabels labels = {&is_true, &is_false, &is_false};
1126 EmitBranchOnCondition(
compiler, true_condition, labels,
1132 compiler::Label
done;
1142 BranchInstr* branch) {
1143 BranchLabels labels =
compiler->CreateBranchLabels(branch);
1146 EmitBranchOnCondition(
compiler, true_condition, labels);
1151 const intptr_t kNumInputs = 2;
1152 const intptr_t kNumTemps = 0;
1153 LocationSummary*
locs =
new (zone)
1164 BranchLabels labels) {
1167 if (
right.IsConstant()) {
1168 const auto operand_size = representation_ == kTagged
1171 __ TestImmediate(left_reg, compiler::Immediate(ComputeImmediateMask()),
1174 if (representation_ == kTagged) {
1177 __ testq(left_reg,
right.reg());
1181 return true_condition;
1186 const intptr_t kNumInputs = 1;
1187 const intptr_t kNumTemps = 1;
1188 LocationSummary*
locs =
new (zone)
1197 BranchLabels labels) {
1198 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
1202 compiler::Label* deopt =
1207 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
1212 __ j(
ZERO,
result ? labels.true_label : labels.false_label);
1213 __ LoadClassId(cid_reg, val_reg);
1214 for (intptr_t
i = 2;
i <
data.length();
i += 2) {
1215 const intptr_t test_cid =
data[
i];
1216 ASSERT(test_cid != kSmiCid);
1218 __ cmpq(cid_reg, compiler::Immediate(test_cid));
1219 __ j(
EQUAL,
result ? labels.true_label : labels.false_label);
1222 if (deopt ==
nullptr) {
1226 compiler::Label*
target =
result ? labels.false_label : labels.true_label;
1227 if (
target != labels.fall_through) {
1240 const intptr_t kNumInputs = 2;
1241 const intptr_t kNumTemps = 0;
1242 if (operation_cid() == kDoubleCid) {
1243 LocationSummary* summary =
new (zone)
1250 if (operation_cid() == kSmiCid || operation_cid() == kMintCid) {
1251 LocationSummary* summary =
new (zone)
1256 summary->set_in(1, summary->in(0).IsConstant()
1267 BranchLabels labels) {
1268 if (operation_cid() == kSmiCid) {
1270 }
else if (operation_cid() == kMintCid) {
1271 return EmitInt64ComparisonOp(
compiler, *
locs(), kind());
1273 ASSERT(operation_cid() == kDoubleCid);
1274 return EmitDoubleComparisonOp(
compiler, *
locs(), kind(), labels);
1287 __ LoadImmediate(
R10, compiler::Immediate(argc_tag));
1290 stub = &StubCode::CallBootstrapNative();
1292 __ LoadNativeEntry(
RBX, &label,
1295 source(), *stub, UntaggedPcDescriptors::kOther,
locs(),
1299 stub = &StubCode::CallBootstrapNative();
1301 stub = &StubCode::CallAutoScopeNative();
1303 stub = &StubCode::CallNoScopeNative();
1305 const compiler::ExternalLabel label(
1307 __ LoadNativeEntry(
RBX, &label,
1311 compiler->GenerateNonLazyDeoptableStubCall(
1312 source(), *stub, UntaggedPcDescriptors::kOther,
locs(),
1319#define R(r) (1 << r)
1322 bool is_optimizing)
const {
1325 return MakeLocationSummaryInternal(
1326 zone, is_optimizing,
1357 __ pushq(compiler::Immediate(0));
1361 __ LoadObject(
CODE_REG, Code::null_object());
1362 __ set_constant_pool_allowed(
false);
1363 __ EnterDartFrame(0,
PP);
1367 intptr_t stack_space = marshaller_.RequiredStackSpaceInBytes();
1368 __ ReserveAlignedFrameSpace(stack_space);
1373 __ PushRegisters(kVolatileRegisterSet);
1381 __ MsanUnpoison(temp,
RAX);
1385 __ MsanUnpoison(is_leaf_ ?
FPREG : saved_fp,
1390 __ CallCFunction(compiler::Address(
1391 THR, kMsanUnpoisonParamRuntimeEntry.OffsetFromThread()));
1393 __ PopRegisters(kVolatileRegisterSet);
1403 __ Comment(is_leaf_ ?
"Leaf Call" :
"Call");
1407#if !defined(PRODUCT)
1410 __ movq(compiler::Address(
1416 if (marshaller_.contains_varargs() &&
1422 __ CallCFunction(target_address,
true);
1424#if !defined(PRODUCT)
1427 __ movq(compiler::Address(
1429 compiler::Immediate(0));
1438 UntaggedPcDescriptors::Kind::kOther,
locs(),
1445 __ movq(temp, compiler::Immediate(
1448 __ TransitionGeneratedToNative(target_address,
FPREG, temp,
1451 if (marshaller_.contains_varargs() &&
1455 __ CallCFunction(target_address,
true);
1458 __ TransitionNativeToGenerated(
true);
1466 THR, compiler::target::Thread::
1467 call_native_through_safepoint_entry_point_offset()));
1470 __ movq(
RBX, target_address);
1471 if (marshaller_.contains_varargs() &&
1479 __ Comment(
"Check Dart_Handle for Error.");
1480 compiler::Label not_error;
1484 __ BranchIfSmi(temp, ¬_error);
1485 __ LoadClassId(temp, temp);
1490 __ Comment(
"Slow path: call Dart_PropagateError through stub.");
1493 THR, compiler::target::Thread::
1494 call_native_through_safepoint_entry_point_offset()));
1495 __ movq(
RBX, compiler::Address(
1496 THR, kPropagateErrorRuntimeEntry.OffsetFromThread()));
1515 __ LeaveDartFrame();
1518 if (FLAG_precompiled_mode) {
1521 __ set_constant_pool_allowed(
true);
1536#if defined(DART_TARGET_OS_FUCHSIA) && defined(USING_SHADOW_CALL_STACK)
1546 __ PushImmediate(compiler::Immediate(0));
1550 __ PushImmediate(compiler::Immediate(0));
1554 __ PushImmediate(compiler::Immediate(0));
1557 __ PushRegisters(kCalleeSaveRegistersSet);
1569 compiler::Immediate(0));
1571 __ pushq(compiler::Address(
1575 __ pushq(compiler::Address(
1580 __ EmitEntryFrameVerification();
1583 __ TransitionNativeToGenerated(
false,
1588 const Function& target_function = marshaller_.dart_signature();
1589 const intptr_t callback_id = target_function.FfiCallbackId();
1590 __ movq(
RAX, compiler::Address(
1592 __ movq(
RAX, compiler::Address(
1598 RAX, compiler::FieldAddress(
1602 compiler::FieldAddress(
1607 __ movq(compiler::Address(
FPREG,
1611 if (FLAG_precompiled_mode) {
1627 __ pushq(compiler::FieldAddress(
1639#define R(r) (1 << r)
1643 bool is_optimizing)
const {
1656 __ MoveRegister(saved_fp,
FPREG);
1658 const intptr_t frame_space = native_calling_convention_.
StackTopInBytes();
1659 __ EnterCFrame(frame_space);
1664 __ CallCFunction(target_address);
1666 compiler::Immediate(VMTag::kDartTagId));
1674 const intptr_t kNumInputs = 1;
1691 compiler::Address(
THR, Thread::predefined_symbols_address_offset()));
1694 TIMES_HALF_WORD_SIZE,
1700 const intptr_t kNumInputs = 1;
1706 ASSERT(cid_ == kOneByteStringCid);
1709 compiler::Label is_one,
done;
1724 const intptr_t kNumInputs = 5;
1725 const intptr_t kNumTemps = 1;
1726 LocationSummary* summary =
new (zone)
1745 const Register bytes_ptr_reg = start_reg;
1746 const Register bytes_end_reg = end_reg;
1747 const Register bytes_end_minus_16_reg = bytes_reg;
1752 const intptr_t kSizeMask = 0x03;
1753 const intptr_t kFlagsMask = 0x3C;
1755 compiler::Label scan_ascii, ascii_loop, ascii_loop_in, nonascii_loop;
1756 compiler::Label rest, rest_loop, rest_loop_in,
done;
1759 __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
1762 __ leaq(bytes_ptr_reg, compiler::Address(bytes_reg, start_reg,
TIMES_1, 0));
1763 __ leaq(bytes_end_reg, compiler::Address(bytes_reg, end_reg,
TIMES_1, 0));
1764 __ leaq(bytes_end_minus_16_reg, compiler::Address(bytes_end_reg, -16));
1767 __ xorq(size_reg, size_reg);
1768 __ xorq(flags_reg, flags_reg);
1778 __ addq(bytes_ptr_reg, compiler::Immediate(16));
1782 __ cmpq(bytes_ptr_reg, bytes_end_minus_16_reg);
1789 __ movups(vector_reg, compiler::Address(bytes_ptr_reg, 0));
1790 __ pmovmskb(temp_reg, vector_reg);
1791 __ bsfq(temp_reg, temp_reg);
1795 __ addq(bytes_ptr_reg, temp_reg);
1796 __ addq(size_reg, bytes_ptr_reg);
1799 __ movzxb(temp_reg, compiler::Address(bytes_ptr_reg, 0));
1803 __ addq(bytes_ptr_reg, compiler::Immediate(1));
1806 __ movzxb(temp_reg, compiler::FieldAddress(
1809 __ orq(flags_reg, temp_reg);
1810 __ andq(temp_reg, compiler::Immediate(kSizeMask));
1811 __ addq(size_reg, temp_reg);
1814 __ cmpq(bytes_ptr_reg, bytes_end_reg);
1818 __ movzxb(temp_reg, compiler::Address(bytes_ptr_reg, 0));
1819 __ testq(temp_reg, compiler::Immediate(0x80));
1824 __ subq(size_reg, bytes_ptr_reg);
1825 __ jmp(&ascii_loop_in);
1831 __ addq(size_reg, bytes_ptr_reg);
1837 __ movzxb(temp_reg, compiler::Address(bytes_ptr_reg, 0));
1838 __ addq(bytes_ptr_reg, compiler::Immediate(1));
1841 __ movzxb(temp_reg, compiler::FieldAddress(
1844 __ orq(flags_reg, temp_reg);
1845 __ andq(temp_reg, compiler::Immediate(kSizeMask));
1846 __ addq(size_reg, temp_reg);
1850 __ cmpq(bytes_ptr_reg, bytes_end_reg);
1855 __ andq(flags_reg, compiler::Immediate(kFlagsMask));
1857 __ SmiTag(flags_reg);
1861 if (decoder_location.IsStackSlot()) {
1863 decoder_reg = temp_reg;
1865 decoder_reg = decoder_location.reg();
1867 const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes();
1869 __ OBJ(or)(compiler::FieldAddress(decoder_reg, scan_flags_field_offset),
1872 __ orq(compiler::FieldAddress(decoder_reg, scan_flags_field_offset),
1884 const intptr_t kNumInputs = 2;
1885 const intptr_t kNumTemps = 0;
1886 LocationSummary*
locs =
new (zone)
1892 const bool need_writable_index_register =
1895 const bool can_be_constant =
1922 bool index_unboxed = index_unboxed_;
1923 if (
index.IsRegister()) {
1924 if (index_scale_ == 1 && !index_unboxed) {
1926 index_unboxed =
true;
1927 }
else if (index_scale_ == 16 && index_unboxed) {
1930 index_unboxed =
false;
1931 }
else if (!index_unboxed) {
1935 __ ExtendNonNegativeSmi(
index.reg());
1941 compiler::Address element_address =
1945 :
compiler::Assembler::ElementAddressForIntIndex(
1957 if (rep == kUnboxedFloat) {
1960 }
else if (rep == kUnboxedDouble) {
1963 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
1964 rep == kUnboxedFloat64x2);
1965 __ movups(
result, element_address);
1972 __ LoadCompressed(
result, element_address);
1978 const intptr_t kNumInputs = 2;
1979 const intptr_t kNumTemps = 0;
1980 LocationSummary* summary =
new (zone)
1996 bool index_unboxed =
false;
1999 index_unboxed =
true;
2001 __ ExtendNonNegativeSmi(
index);
2003 compiler::Address element_address =
2009 case kOneByteStringCid:
2012 __ movzxb(
result, element_address);
2015 __ movzxw(
result, element_address);
2026 case kTwoByteStringCid:
2029 __ movzxw(
result, element_address);
2053 const intptr_t kNumInputs = 3;
2054 const intptr_t kNumTemps =
2056 LocationSummary*
locs =
new (zone)
2062 const bool need_writable_index_register =
2065 const bool can_be_constant =
2077 if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
2087 }
else if (
class_id() == kArrayCid) {
2106 bool index_unboxed = index_unboxed_;
2107 if (
index.IsRegister()) {
2108 if (index_scale_ == 1 && !index_unboxed) {
2110 index_unboxed =
true;
2111 }
else if (index_scale_ == 16 && index_unboxed) {
2114 index_unboxed =
false;
2115 }
else if (!index_unboxed) {
2119 __ ExtendNonNegativeSmi(
index.reg());
2125 compiler::Address element_address =
2129 :
compiler::Assembler::ElementAddressForIntIndex(
2137 ASSERT(rep == kUnboxedUint8);
2139 const Smi& constant = Smi::Cast(
locs()->in(2).constant());
2144 }
else if (
value < 0) {
2147 __ movb(element_address, compiler::Immediate(
static_cast<int8_t
>(
value)));
2150 compiler::Label store_value, store_0xff;
2151 __ CompareImmediate(storedValueReg, compiler::Immediate(0xFF));
2155 __ xorq(storedValueReg, storedValueReg);
2158 __ LoadImmediate(storedValueReg, compiler::Immediate(0xFF));
2163 if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
2165 const Smi& constant = Smi::Cast(
locs()->in(2).constant());
2166 __ movb(element_address,
2167 compiler::Immediate(
static_cast<int8_t
>(constant.Value())));
2176 if (rep == kUnboxedFloat) {
2177 __ movss(element_address,
locs()->in(2).fpu_reg());
2178 }
else if (rep == kUnboxedDouble) {
2179 __ movsd(element_address,
locs()->in(2).fpu_reg());
2181 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2182 rep == kUnboxedFloat64x2);
2183 __ movups(element_address,
locs()->in(2).fpu_reg());
2185 }
else if (
class_id() == kArrayCid) {
2190 __ leaq(slot, element_address);
2191 __ StoreCompressedIntoArray(
array, slot,
value, CanValueBeSmi());
2194 __ StoreCompressedObjectIntoObjectNoBarrier(
array, element_address,
2198 __ StoreCompressedIntoObjectNoBarrier(
array, element_address,
value);
2205 __ leaq(
TMP, element_address);
2208 __ MsanUnpoison(
TMP, length_in_bytes);
2214 const intptr_t kNumInputs = 1;
2219 const bool emit_full_guard = !opt || (field_cid ==
kIllegalCid);
2220 const bool needs_value_cid_temp_reg =
2221 (value_cid ==
kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid));
2222 const bool needs_field_temp_reg = emit_full_guard;
2224 intptr_t num_temps = 0;
2225 if (needs_value_cid_temp_reg) {
2228 if (needs_field_temp_reg) {
2232 LocationSummary* summary =
new (zone)
2236 for (intptr_t
i = 0;
i < num_temps;
i++) {
2245 ASSERT(
sizeof(UntaggedField::guarded_cid_) == 4);
2246 ASSERT(
sizeof(UntaggedField::is_nullable_) == 4);
2256 const bool emit_full_guard =
2259 const bool needs_value_cid_temp_reg =
2260 (value_cid ==
kDynamicCid) && (emit_full_guard || (field_cid != kSmiCid));
2262 const bool needs_field_temp_reg = emit_full_guard;
2269 const Register field_reg = needs_field_temp_reg
2273 compiler::Label
ok, fail_label;
2275 compiler::Label* deopt =
nullptr;
2280 compiler::Label*
fail = (deopt !=
nullptr) ? deopt : &fail_label;
2282 if (emit_full_guard) {
2285 compiler::FieldAddress field_cid_operand(field_reg,
2287 compiler::FieldAddress field_nullability_operand(
2291 LoadValueCid(
compiler, value_cid_reg, value_reg);
2293 __ cmpl(value_cid_reg, field_cid_operand);
2295 __ cmpl(value_cid_reg, field_nullability_operand);
2296 }
else if (value_cid ==
kNullCid) {
2297 __ cmpl(field_nullability_operand, compiler::Immediate(value_cid));
2299 __ cmpl(field_cid_operand, compiler::Immediate(value_cid));
2308 const bool is_complicated_field =
2311 if (!is_complicated_field) {
2314 __ cmpl(field_cid_operand, compiler::Immediate(
kIllegalCid));
2318 __ movl(field_cid_operand, value_cid_reg);
2319 __ movl(field_nullability_operand, value_cid_reg);
2322 __ movl(field_cid_operand, compiler::Immediate(value_cid));
2323 __ movl(field_nullability_operand, compiler::Immediate(value_cid));
2329 if (deopt ==
nullptr) {
2336 __ pushq(field_reg);
2337 __ pushq(value_reg);
2339 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2346 ASSERT(deopt !=
nullptr);
2353 if (field_cid != kSmiCid) {
2355 __ LoadClassId(value_cid_reg, value_reg);
2356 __ CompareImmediate(value_cid_reg, compiler::Immediate(field_cid));
2361 __ CompareObject(value_reg, Object::null_object());
2365 }
else if (value_cid == field_cid) {
2370 ASSERT(value_cid != nullability);
2379 const intptr_t kNumInputs = 1;
2381 const intptr_t kNumTemps = 3;
2382 LocationSummary* summary =
new (zone)
2391 LocationSummary* summary =
new (zone)
2404 compiler::Label* deopt =
2423 compiler::FieldAddress(
2425 __ LoadCompressedSmi(
2429 __ cmpq(offset_reg, compiler::Immediate(0));
2436 __ OBJ(cmp)(length_reg,
2437 compiler::Address(value_reg, offset_reg,
TIMES_1, 0));
2439 if (deopt ==
nullptr) {
2442 __ pushq(field_reg);
2443 __ pushq(value_reg);
2445 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2455 ASSERT(
field().guarded_list_length_in_object_offset() !=
2458 __ CompareImmediate(
2459 compiler::FieldAddress(value_reg,
2460 field().guarded_list_length_in_object_offset()),
2468 const intptr_t kNumInputs = 1;
2469 const intptr_t kNumTemps = 1;
2470 LocationSummary* summary =
new (zone)
2479 ASSERT(
field().static_type_exactness_state().IsTracking());
2480 if (!
field().static_type_exactness_state().NeedsFieldGuard()) {
2487 compiler::Label* deopt =
2504 const Field& original =
2506 __ LoadObject(temp, original);
2507 __ movsxb(temp, compiler::FieldAddress(
2518 compiler::Label call_runtime;
2519 if (
field().static_type_exactness_state().IsUninitialized()) {
2527 __ movq(temp, compiler::FieldAddress(value_reg, temp,
2533 .GetInstanceTypeArguments(
compiler->thread())));
2534 if (deopt !=
nullptr) {
2540 __ PushObject(original);
2541 __ pushq(value_reg);
2543 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2552 const intptr_t kNumInputs = 1;
2553 const intptr_t kNumTemps = 1;
2554 LocationSummary*
locs =
new (zone)
2581 const intptr_t kNumInputs = 3;
2582 const intptr_t kNumTemps = 0;
2583 LocationSummary* summary =
new (zone)
2607 const intptr_t kNumInputs = 2;
2608 const intptr_t kNumTemps = 0;
2609 LocationSummary*
locs =
new (zone)
2620static void InlineArrayAllocation(FlowGraphCompiler*
compiler,
2621 intptr_t num_elements,
2622 compiler::Label* slow_path,
2623 compiler::Label*
done) {
2624 const int kInlineArraySize = 12;
2627 __ TryAllocateArray(kArrayCid, instance_size, slow_path,
2635 __ StoreCompressedIntoObjectNoBarrier(
2642 __ StoreCompressedIntoObjectNoBarrier(
2653 if (num_elements > 0) {
2654 const intptr_t array_size = instance_size -
sizeof(UntaggedArray);
2655 __ LoadObject(
R12, Object::null_object());
2657 sizeof(UntaggedArray)));
2659 intptr_t current_offset = 0;
2660 while (current_offset < array_size) {
2661 __ StoreCompressedIntoObjectNoBarrier(
2663 compiler::Address(
RDI, current_offset),
R12);
2667 compiler::Label init_loop;
2670 compiler::Address(
RDI, 0),
R12);
2680 TypeUsageInfo* type_usage_info =
compiler->thread()->type_usage_info();
2681 if (type_usage_info !=
nullptr) {
2682 const Class& list_class =
2688 compiler::Label slow_path,
done;
2689 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2690 if (
compiler->is_optimizing() && !FLAG_precompiled_mode &&
2702 auto object_store =
compiler->isolate_group()->object_store();
2703 const auto& allocate_array_stub =
2715 const intptr_t kNumInputs = 0;
2716 const intptr_t kNumTemps = 2;
2717 LocationSummary*
locs =
new (zone) LocationSummary(
2725class AllocateContextSlowPath
2726 :
public TemplateSlowPathCode<AllocateUninitializedContextInstr> {
2728 explicit AllocateContextSlowPath(
2729 AllocateUninitializedContextInstr* instruction)
2730 : TemplateSlowPathCode(instruction) {}
2732 virtual void EmitNativeCode(FlowGraphCompiler*
compiler) {
2733 __ Comment(
"AllocateContextSlowPath");
2736 LocationSummary* locs = instruction()->locs();
2737 locs->live_registers()->Remove(locs->out(0));
2741 auto slow_path_env =
compiler->SlowPathEnvironmentFor(
2743 ASSERT(slow_path_env !=
nullptr);
2745 auto object_store =
compiler->isolate_group()->object_store();
2747 compiler->zone(), object_store->allocate_context_stub());
2750 R10, compiler::Immediate(instruction()->num_context_variables()));
2751 compiler->GenerateStubCall(instruction()->
source(), allocate_context_stub,
2752 UntaggedPcDescriptors::kOther, locs,
2753 instruction()->deopt_id(), slow_path_env);
2756 compiler->RestoreLiveRegisters(instruction()->locs());
2757 __ jmp(exit_label());
2767 AllocateContextSlowPath* slow_path =
new AllocateContextSlowPath(
this);
2768 compiler->AddSlowPathCode(slow_path);
2771 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2772 __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
2776 locs()->temp(1).reg());
2782 __ Jump(slow_path->entry_label());
2785 __ Bind(slow_path->exit_label());
2790 const intptr_t kNumInputs = 0;
2791 const intptr_t kNumTemps = 1;
2792 LocationSummary*
locs =
new (zone)
2803 auto object_store =
compiler->isolate_group()->object_store();
2804 const auto& allocate_context_stub =
2815 const intptr_t kNumInputs = 1;
2816 const intptr_t kNumTemps = 0;
2817 LocationSummary*
locs =
new (zone)
2828 auto object_store =
compiler->isolate_group()->object_store();
2829 const auto& clone_context_stub =
2832 UntaggedPcDescriptors::kOther,
locs(),
2843 compiler->AddExceptionHandler(
this);
2850 const intptr_t fp_sp_dist =
2855 __ leaq(
RSP, compiler::Address(
RBP, fp_sp_dist));
2858 if (raw_exception_var_ !=
nullptr) {
2859 __ movq(compiler::Address(
RBP,
2861 raw_exception_var_)),
2864 if (raw_stacktrace_var_ !=
nullptr) {
2865 __ movq(compiler::Address(
RBP,
2867 raw_stacktrace_var_)),
2875 const intptr_t kNumInputs = 0;
2876 const intptr_t kNumTemps = 1;
2878 LocationSummary* summary =
new (zone)
2879 LocationSummary(zone, kNumInputs, kNumTemps,
2886class CheckStackOverflowSlowPath
2887 :
public TemplateSlowPathCode<CheckStackOverflowInstr> {
2889 static constexpr intptr_t kNumSlowPathArgs = 0;
2891 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
2892 : TemplateSlowPathCode(instruction) {}
2894 virtual void EmitNativeCode(FlowGraphCompiler*
compiler) {
2895 if (
compiler->isolate_group()->use_osr() && osr_entry_label()->IsLinked()) {
2896 __ Comment(
"CheckStackOverflowSlowPathOsr");
2897 __ Bind(osr_entry_label());
2901 __ Comment(
"CheckStackOverflowSlowPath");
2903 const bool using_shared_stub =
2904 instruction()->locs()->call_on_shared_slow_path();
2905 if (!using_shared_stub) {
2906 compiler->SaveLiveRegisters(instruction()->locs());
2912 compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
2915 const bool has_frame =
compiler->flow_graph().graph_entry()->NeedsFrame();
2916 if (using_shared_stub) {
2918 ASSERT(
__ constant_pool_allowed());
2919 __ set_constant_pool_allowed(
false);
2920 __ EnterDartFrame(0);
2922 const uword entry_point_offset =
2924 instruction()->locs()->live_registers()->FpuRegisterCount() > 0);
2925 __ call(compiler::Address(
THR, entry_point_offset));
2926 compiler->RecordSafepoint(instruction()->locs(), kNumSlowPathArgs);
2928 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOther,
2929 instruction()->deopt_id(),
2930 instruction()->
source());
2932 __ LeaveDartFrame();
2933 __ set_constant_pool_allowed(
true);
2937 __ CallRuntime(kInterruptOrStackOverflowRuntimeEntry, kNumSlowPathArgs);
2939 instruction()->
source(), instruction()->deopt_id(),
2940 UntaggedPcDescriptors::kOther, instruction()->locs(),
env);
2944 instruction()->in_loop()) {
2946 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
2947 instruction()->deopt_id(),
2948 InstructionSource());
2950 compiler->pending_deoptimization_env_ =
nullptr;
2951 if (!using_shared_stub) {
2952 compiler->RestoreLiveRegisters(instruction()->locs());
2954 __ jmp(exit_label());
2957 compiler::Label* osr_entry_label() {
2959 return &osr_entry_label_;
2963 compiler::Label osr_entry_label_;
2967 CheckStackOverflowSlowPath* slow_path =
new CheckStackOverflowSlowPath(
this);
2968 compiler->AddSlowPathCode(slow_path);
2978 __ LoadObject(temp,
compiler->parsed_function().function());
2979 const intptr_t configured_optimization_counter_threshold =
2980 compiler->thread()->isolate_group()->optimization_counter_threshold();
2981 const int32_t threshold =
2982 configured_optimization_counter_threshold * (
loop_depth() + 1);
2983 __ incl(compiler::FieldAddress(temp, Function::usage_counter_offset()));
2984 __ cmpl(compiler::FieldAddress(temp, Function::usage_counter_offset()),
2985 compiler::Immediate(threshold));
2988 if (
compiler->ForceSlowPathForStackOverflow()) {
2989 __ jmp(slow_path->entry_label());
2991 __ Bind(slow_path->exit_label());
2994static void EmitSmiShiftLeft(FlowGraphCompiler*
compiler,
2995 BinarySmiOpInstr* shift_left) {
2996 const LocationSummary& locs = *shift_left->locs();
3000 compiler::Label* deopt =
3001 shift_left->CanDeoptimize()
3002 ?
compiler->AddDeoptStub(shift_left->deopt_id(),
3003 ICData::kDeoptBinarySmiOp)
3005 if (locs.in(1).IsConstant()) {
3006 const Object& constant = locs.in(1).constant();
3007 ASSERT(constant.IsSmi());
3009#if !defined(DART_COMPRESSED_POINTERS)
3010 const intptr_t kCountLimit = 0x3F;
3012 const intptr_t kCountLimit = 0x1F;
3014 const intptr_t
value = Smi::Cast(constant).Value();
3016 if (shift_left->can_overflow()) {
3019 __ OBJ(shl)(
left, compiler::Immediate(1));
3024 Register temp = locs.temp(0).reg();
3038 Range* right_range = shift_left->right_range();
3039 if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
3042 const Object& obj = shift_left->left()->BoundConstant();
3044 const intptr_t left_int = Smi::Cast(obj).Value();
3045 if (left_int == 0) {
3046 __ CompareImmediate(right, compiler::Immediate(0),
3052 const bool right_needs_check =
3054 if (right_needs_check) {
3064 const bool right_needs_check =
3067 if (!shift_left->can_overflow()) {
3068 if (right_needs_check) {
3069 const bool right_may_be_negative =
3070 (right_range ==
nullptr) || !right_range->IsPositive();
3071 if (right_may_be_negative) {
3072 ASSERT(shift_left->CanDeoptimize());
3073 __ CompareImmediate(right, compiler::Immediate(0),
3077 compiler::Label
done, is_not_zero;
3080 __ xorq(left, left);
3091 if (right_needs_check) {
3092 ASSERT(shift_left->CanDeoptimize());
3097 Register temp = locs.temp(0).reg();
3108 ASSERT(!shift_left->is_truncating());
3112static bool CanBeImmediate(
const Object& constant) {
3113 return constant.IsSmi() &&
3119 return constant.IsSmi() && (Smi::Cast(constant).Value() ==
value);
3124 const intptr_t kNumInputs = 2;
3127 if ((right_constant !=
nullptr) && (
op_kind() != Token::kTRUNCDIV) &&
3129#
if defined(DART_COMPRESSED_POINTERS)
3130 (
op_kind() != Token::kUSHR) &&
3133 CanBeImmediate(right_constant->value())) {
3134 const intptr_t kNumTemps = 0;
3135 LocationSummary* summary =
new (zone)
3143 if (
op_kind() == Token::kTRUNCDIV) {
3144 const intptr_t kNumTemps = 1;
3145 LocationSummary* summary =
new (zone)
3162 }
else if (
op_kind() == Token::kMOD) {
3163 const intptr_t kNumTemps = 1;
3164 LocationSummary* summary =
new (zone)
3173 }
else if ((
op_kind() == Token::kSHR)
3174#if !defined(DART_COMPRESSED_POINTERS)
3175 || (
op_kind() == Token::kUSHR)
3178 const intptr_t kNumTemps = 0;
3179 LocationSummary* summary =
new (zone)
3185#if defined(DART_COMPRESSED_POINTERS)
3186 }
else if (
op_kind() == Token::kUSHR) {
3187 const intptr_t kNumTemps = 1;
3188 LocationSummary* summary =
new (zone)
3191 if ((right_constant !=
nullptr) &&
3192 CanBeImmediate(right_constant->value())) {
3201 }
else if (
op_kind() == Token::kSHL) {
3203 const bool shiftBy1 =
3204 (right_constant !=
nullptr) &&
IsSmiValue(right_constant->value(), 1);
3205 const intptr_t kNumTemps = (
can_overflow() && !shiftBy1) ? 1 : 0;
3206 LocationSummary* summary =
new (zone)
3210 if (kNumTemps == 1) {
3216 const intptr_t kNumTemps = 0;
3217 LocationSummary* summary =
new (zone)
3221 if (constant !=
nullptr) {
3232 if (
op_kind() == Token::kSHL) {
3240 compiler::Label* deopt =
nullptr;
3247 ASSERT(constant.IsSmi());
3262 const intptr_t
value = Smi::Cast(constant).Value();
3268 case Token::kTRUNCDIV: {
3269 const intptr_t
value = Smi::Cast(constant).Value();
3272 const intptr_t shift_count =
3277#if !defined(DART_COMPRESSED_POINTERS)
3278 __ sarq(temp, compiler::Immediate(63));
3280 __ sarl(temp, compiler::Immediate(31));
3283#if !defined(DART_COMPRESSED_POINTERS)
3284 __ shrq(temp, compiler::Immediate(64 - shift_count));
3286 __ shrl(temp, compiler::Immediate(32 - shift_count));
3290 __ OBJ(sar)(
left, compiler::Immediate(shift_count));
3297 case Token::kBIT_AND: {
3299 __ AndImmediate(
left, compiler::Immediate(imm));
3302 case Token::kBIT_OR: {
3304 __ OrImmediate(
left, compiler::Immediate(imm));
3307 case Token::kBIT_XOR: {
3309 __ XorImmediate(
left, compiler::Immediate(imm));
3315#if !defined(DART_COMPRESSED_POINTERS)
3316 const intptr_t kCountLimit = 0x3F;
3318 const intptr_t kCountLimit = 0x1F;
3320 const intptr_t
value = Smi::Cast(constant).Value();
3327 case Token::kUSHR: {
3331 const intptr_t kCountLimit = 0x3F;
3332 const intptr_t
value = Smi::Cast(constant).Value();
3334 __ SmiUntagAndSignExtend(
left);
3336 __ shlq(
left, compiler::Immediate(1));
3337 if (deopt !=
nullptr) {
3339#if defined(DART_COMPRESSED_POINTERS)
3356 if (
locs()->in(1).IsStackSlot()) {
3375 case Token::kBIT_AND: {
3380 case Token::kBIT_OR: {
3385 case Token::kBIT_XOR: {
3416 case Token::kBIT_AND: {
3421 case Token::kBIT_OR: {
3426 case Token::kBIT_XOR: {
3431 case Token::kTRUNCDIV: {
3432 compiler::Label not_32bit,
done;
3444#if !defined(DART_COMPRESSED_POINTERS)
3474 __ CompareImmediate(
result, compiler::Immediate(0x4000000000000000));
3487 __ cmpl(
result, compiler::Immediate(0x40000000));
3497 compiler::Label not_32bit, div_done;
3509#if !defined(DART_COMPRESSED_POINTERS)
3529#if !defined(DART_COMPRESSED_POINTERS)
3549 compiler::Label all_done;
3574 __ CompareImmediate(
right, compiler::Immediate(0),
3580#if !defined(DART_COMPRESSED_POINTERS)
3581 const intptr_t kCountLimit = 0x3F;
3583 const intptr_t kCountLimit = 0x1F;
3586 __ CompareImmediate(
right, compiler::Immediate(kCountLimit));
3587 compiler::Label count_ok;
3589 __ LoadImmediate(
right, compiler::Immediate(kCountLimit));
3598 case Token::kUSHR: {
3599 if (deopt !=
nullptr) {
3600 __ CompareImmediate(
right, compiler::Immediate(0),
3606 const intptr_t kCountLimit = 0x3F;
3608 compiler::Label
done;
3610 __ CompareImmediate(
right, compiler::Immediate(kCountLimit),
3612 compiler::Label count_ok;
3619 __ SmiUntagAndSignExtend(
left);
3621 __ shlq(
left, compiler::Immediate(1));
3622 if (deopt !=
nullptr) {
3624#if defined(DART_COMPRESSED_POINTERS)
3657 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid));
3658 const intptr_t kNumInputs = 2;
3660 (left_cid != kSmiCid) && (right_cid != kSmiCid);
3661 const intptr_t kNumTemps = need_temp ? 1 : 0;
3662 LocationSummary* summary =
new (zone)
3671 compiler::Label* deopt =
3677 if (this->
left()->definition() == this->
right()->definition()) {
3679 }
else if (left_cid == kSmiCid) {
3681 }
else if (right_cid == kSmiCid) {
3693 const intptr_t kNumInputs = 1;
3694 const intptr_t kNumTemps = 1;
3695 LocationSummary* summary =
new (zone) LocationSummary(
3713 case kUnboxedDouble:
3714 __ movsd(compiler::FieldAddress(out_reg, ValueOffset()),
value);
3716 case kUnboxedFloat: {
3718 __ movsd(compiler::FieldAddress(out_reg, ValueOffset()),
FpuTMP);
3721 case kUnboxedFloat32x4:
3722 case kUnboxedFloat64x2:
3723 case kUnboxedInt32x4:
3724 __ movups(compiler::FieldAddress(out_reg, ValueOffset()),
value);
3734 const intptr_t kNumInputs = 1;
3735 const intptr_t kNumTemps = 0;
3736 const bool needs_writable_input =
3739 LocationSummary* summary =
new (zone)
3744#if !defined(DART_COMPRESSED_POINTERS)
3755void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler*
compiler) {
3759 case kUnboxedInt64: {
3761 __ movq(
result, compiler::FieldAddress(box, ValueOffset()));
3765 case kUnboxedDouble: {
3767 __ movsd(
result, compiler::FieldAddress(box, ValueOffset()));
3771 case kUnboxedFloat: {
3773 __ movsd(
result, compiler::FieldAddress(box, ValueOffset()));
3778 case kUnboxedFloat32x4:
3779 case kUnboxedFloat64x2:
3780 case kUnboxedInt32x4: {
3782 __ movups(
result, compiler::FieldAddress(box, ValueOffset()));
3792void UnboxInstr::EmitSmiConversion(FlowGraphCompiler*
compiler) {
3796 case kUnboxedInt32: {
3801 case kUnboxedInt64: {
3803 __ SmiUntagAndSignExtend(
result, box);
3806 case kUnboxedDouble: {
3819void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler*
compiler) {
3825void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler*
compiler) {
3833 const intptr_t kNumInputs = 1;
3835 LocationSummary* summary =
new (zone)
3839 if (kNumTemps > 0) {
3848 compiler::Label* deopt =
3854 if (value_cid == kSmiCid) {
3856 }
else if (value_cid == kMintCid) {
3863 compiler::Label
done;
3864#if !defined(DART_COMPRESSED_POINTERS)
3875 compiler::Label not_smi;
3877 __ SmiUntagAndSignExtend(
value);
3885 compiler::Label
done;
3886#if !defined(DART_COMPRESSED_POINTERS)
3897 compiler::Label not_smi;
3899 __ SmiUntagAndSignExtend(
value);
3902 __ CompareClassId(
value, kMintCid);
3924#if !defined(DART_COMPRESSED_POINTERS)
3927 const bool kMayAllocateMint =
false;
3931 const intptr_t kNumInputs = 1;
3932 const intptr_t kNumTemps = kMayAllocateMint ? 1 : 0;
3933 LocationSummary* summary =
new (zone)
3934 LocationSummary(zone, kNumInputs, kNumTemps,
3939 if (kMayAllocateMint) {
3950#if !defined(DART_COMPRESSED_POINTERS)
3960 compiler::Label
done;
3975 __ TestImmediate(
value, compiler::Immediate(0xC0000000LL));
3997 const intptr_t kNumInputs = 1;
4003 const bool stubs_in_vm_isolate =
4004 object_store->allocate_mint_with_fpu_regs_stub()
4006 ->InVMIsolateHeap() ||
4007 object_store->allocate_mint_without_fpu_regs_stub()
4009 ->InVMIsolateHeap();
4010 const bool shared_slow_path_call =
4012 LocationSummary* summary =
new (zone) LocationSummary(
4013 zone, kNumInputs, kNumTemps,
4021 }
else if (shared_slow_path_call) {
4035#if !defined(DART_COMPRESSED_POINTERS)
4043 compiler::Label
done;
4051 compiler::Label
done;
4054 __ sarq(temp, compiler::Immediate(30));
4055 __ addq(temp, compiler::Immediate(1));
4056 __ cmpq(temp, compiler::Immediate(2));
4062 compiler->intrinsic_slow_path_label(),
4064 }
else if (
locs()->call_on_shared_slow_path()) {
4065 const bool has_frame =
compiler->flow_graph().graph_entry()->NeedsFrame();
4067 ASSERT(
__ constant_pool_allowed());
4068 __ set_constant_pool_allowed(
false);
4069 __ EnterDartFrame(0);
4071 auto object_store =
compiler->isolate_group()->object_store();
4075 live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub()
4076 : object_store->allocate_mint_without_fpu_regs_stub());
4078 ASSERT(!
locs()->live_registers()->ContainsRegister(
4080 auto extended_env =
compiler->SlowPathEnvironmentFor(
this, 0);
4081 compiler->GenerateStubCall(
source(), stub, UntaggedPcDescriptors::kOther,
4084 __ LeaveDartFrame();
4085 __ set_constant_pool_allowed(
true);
4098 const intptr_t kNumInputs = 2;
4099 const intptr_t kNumTemps = 0;
4100 LocationSummary* summary =
new (zone)
4134 const intptr_t kNumInputs = 1;
4135 const intptr_t kNumTemps =
4136 op_kind() == MethodRecognizer::kDouble_getIsNegative
4138 : (
op_kind() == MethodRecognizer::kDouble_getIsInfinite ? 1 : 0);
4139 LocationSummary* summary =
new (zone)
4142 if (kNumTemps > 0) {
4144 if (
op_kind() == MethodRecognizer::kDouble_getIsNegative) {
4153 BranchLabels labels) {
4156 const bool is_negated = kind() != Token::kEQ;
4159 case MethodRecognizer::kDouble_getIsNaN: {
4163 case MethodRecognizer::kDouble_getIsInfinite: {
4167 __ movq(temp, compiler::Address(
RSP, 0));
4170 __ AndImmediate(temp, compiler::Immediate(0x7FFFFFFFFFFFFFFFLL));
4172 __ CompareImmediate(temp, compiler::Immediate(0x7FF0000000000000LL));
4175 case MethodRecognizer::kDouble_getIsNegative: {
4178 compiler::Label not_zero;
4179 __ xorpd(temp_fpu, temp_fpu);
4182 __ j(
PARITY_EVEN, is_negated ? labels.true_label : labels.false_label);
4185 __ TestImmediate(temp, compiler::Immediate(1));
4195#define DEFINE_EMIT(Name, Args) \
4196 static void Emit##Name(FlowGraphCompiler* compiler, SimdOpInstr* instr, \
4197 PP_APPLY(PP_UNPACK, Args))
4199#define SIMD_OP_FLOAT_ARITH(V, Name, op) \
4200 V(Float32x4##Name, op##ps) \
4201 V(Float64x2##Name, op##pd)
4203#define SIMD_OP_SIMPLE_BINARY(V) \
4204 SIMD_OP_FLOAT_ARITH(V, Add, add) \
4205 SIMD_OP_FLOAT_ARITH(V, Sub, sub) \
4206 SIMD_OP_FLOAT_ARITH(V, Mul, mul) \
4207 SIMD_OP_FLOAT_ARITH(V, Div, div) \
4208 SIMD_OP_FLOAT_ARITH(V, Min, min) \
4209 SIMD_OP_FLOAT_ARITH(V, Max, max) \
4210 V(Int32x4Add, addpl) \
4211 V(Int32x4Sub, subpl) \
4212 V(Int32x4BitAnd, andps) \
4213 V(Int32x4BitOr, orps) \
4214 V(Int32x4BitXor, xorps) \
4215 V(Float32x4Equal, cmppseq) \
4216 V(Float32x4NotEqual, cmppsneq) \
4217 V(Float32x4LessThan, cmppslt) \
4218 V(Float32x4LessThanOrEqual, cmppsle)
4220DEFINE_EMIT(SimdBinaryOp,
4222 switch (instr->kind()) {
4223#define EMIT(Name, op) \
4224 case SimdOpInstr::k##Name: \
4225 __ op(left, right); \
4227 SIMD_OP_SIMPLE_BINARY(EMIT)
4229 case SimdOpInstr::kFloat32x4Scale:
4230 __ cvtsd2ss(left, left);
4231 __ shufps(left, left, compiler::Immediate(0x00));
4232 __ mulps(left, right);
4234 case SimdOpInstr::kFloat32x4ShuffleMix:
4235 case SimdOpInstr::kInt32x4ShuffleMix:
4236 __ shufps(left, right, compiler::Immediate(instr->mask()));
4238 case SimdOpInstr::kFloat64x2FromDoubles:
4242 __ shufpd(left, right, compiler::Immediate(0x0));
4244 case SimdOpInstr::kFloat64x2Scale:
4245 __ shufpd(right, right, compiler::Immediate(0x00));
4246 __ mulpd(left, right);
4248 case SimdOpInstr::kFloat64x2WithX:
4249 case SimdOpInstr::kFloat64x2WithY: {
4252 (SimdOpInstr::kFloat64x2WithX + 1));
4253 const intptr_t lane_index = instr->kind() - SimdOpInstr::kFloat64x2WithX;
4254 ASSERT(0 <= lane_index && lane_index < 2);
4257 __ movups(compiler::Address(
RSP, 0), left);
4259 __ movups(left, compiler::Address(
RSP, 0));
4263 case SimdOpInstr::kFloat32x4WithX:
4264 case SimdOpInstr::kFloat32x4WithY:
4265 case SimdOpInstr::kFloat32x4WithZ:
4266 case SimdOpInstr::kFloat32x4WithW: {
4271 SimdOpInstr::kFloat32x4WithY == (SimdOpInstr::kFloat32x4WithX + 1) &&
4272 SimdOpInstr::kFloat32x4WithZ == (SimdOpInstr::kFloat32x4WithX + 2) &&
4273 SimdOpInstr::kFloat32x4WithW == (SimdOpInstr::kFloat32x4WithX + 3));
4274 const intptr_t lane_index = instr->kind() - SimdOpInstr::kFloat32x4WithX;
4275 ASSERT(0 <= lane_index && lane_index < 4);
4276 __ cvtsd2ss(left, left);
4278 __ movups(compiler::Address(
RSP, 0), right);
4280 __ movups(left, compiler::Address(
RSP, 0));
4290#define SIMD_OP_SIMPLE_UNARY(V) \
4291 SIMD_OP_FLOAT_ARITH(V, Sqrt, sqrt) \
4292 SIMD_OP_FLOAT_ARITH(V, Negate, negate) \
4293 SIMD_OP_FLOAT_ARITH(V, Abs, abs) \
4294 V(Float32x4Reciprocal, rcpps) \
4295 V(Float32x4ReciprocalSqrt, rsqrtps)
4300 switch (instr->kind()) {
4301#define EMIT(Name, op) \
4302 case SimdOpInstr::k##Name: \
4303 __ op(value, value); \
4305 SIMD_OP_SIMPLE_UNARY(EMIT)
4307 case SimdOpInstr::kFloat32x4GetX:
4311 case SimdOpInstr::kFloat32x4GetY:
4315 case SimdOpInstr::kFloat32x4GetZ:
4319 case SimdOpInstr::kFloat32x4GetW:
4323 case SimdOpInstr::kFloat32x4Shuffle:
4324 case SimdOpInstr::kInt32x4Shuffle:
4325 __ shufps(
value,
value, compiler::Immediate(instr->mask()));
4327 case SimdOpInstr::kFloat32x4Splat:
4333 case SimdOpInstr::kFloat32x4ToFloat64x2:
4336 case SimdOpInstr::kFloat64x2ToFloat32x4:
4339 case SimdOpInstr::kInt32x4ToFloat32x4:
4340 case SimdOpInstr::kFloat32x4ToInt32x4:
4345 case SimdOpInstr::kFloat64x2GetX:
4348 case SimdOpInstr::kFloat64x2GetY:
4351 case SimdOpInstr::kFloat64x2Splat:
4361 switch (instr->kind()) {
4362 case SimdOpInstr::kFloat32x4GetSignMask:
4363 case SimdOpInstr::kInt32x4GetSignMask:
4366 case SimdOpInstr::kFloat64x2GetSignMask:
4376 Float32x4FromDoubles,
4382 for (intptr_t
i = 0;
i < 4;
i++) {
4383 __ cvtsd2ss(
out, instr->locs()->in(
i).fpu_reg());
4386 __ movups(
out, compiler::Address(
RSP, 0));
4398DEFINE_EMIT(Float32x4Clamp,
4407DEFINE_EMIT(Float64x2Clamp,
4416DEFINE_EMIT(Int32x4FromInts,
4420 for (intptr_t
i = 0;
i < 4;
i++) {
4427DEFINE_EMIT(Int32x4FromBools,
4433 Temp<Register> temp)) {
4436 for (intptr_t
i = 0;
i < 4;
i++) {
4437 compiler::Label
done, load_false;
4438 __ xorq(temp, temp);
4454 compiler::Address(
THR,
out,
TIMES_8, Thread::bool_true_offset()));
4457DEFINE_EMIT(Int32x4GetFlagZorW,
4461 if (instr->kind() == SimdOpInstr::kInt32x4GetFlagW) {
4462 __ shrq(
out, compiler::Immediate(32));
4469 if (instr->kind() == SimdOpInstr::kInt32x4GetFlagY) {
4470 __ shrq(
out, compiler::Immediate(32));
4480 SimdOpInstr::kInt32x4WithFlagY == (SimdOpInstr::kInt32x4WithFlagX + 1) &&
4481 SimdOpInstr::kInt32x4WithFlagZ == (SimdOpInstr::kInt32x4WithFlagX + 2) &&
4482 SimdOpInstr::kInt32x4WithFlagW == (SimdOpInstr::kInt32x4WithFlagX + 3));
4483 const intptr_t lane_index = instr->kind() - SimdOpInstr::kInt32x4WithFlagX;
4484 ASSERT(0 <= lane_index && lane_index < 4);
4486 __ movups(compiler::Address(
RSP, 0), mask);
4489 __ xorq(temp, temp);
4495 __ movups(mask, compiler::Address(
RSP, 0));
4499DEFINE_EMIT(Int32x4Select,
4504 Temp<XmmRegister> temp)) {
4506 __ movaps(temp, mask);
4508 __ notps(temp, temp);
4510 __ andps(mask, trueValue);
4512 __ andps(temp, falseValue);
4514 __ orps(mask, temp);
4523#define SIMD_OP_VARIANTS(CASE, ____, SIMPLE) \
4524 SIMD_OP_SIMPLE_BINARY(CASE) \
4525 CASE(Float32x4Scale) \
4526 CASE(Float32x4ShuffleMix) \
4527 CASE(Int32x4ShuffleMix) \
4528 CASE(Float64x2FromDoubles) \
4529 CASE(Float64x2Scale) \
4530 CASE(Float64x2WithX) \
4531 CASE(Float64x2WithY) \
4532 CASE(Float32x4WithX) \
4533 CASE(Float32x4WithY) \
4534 CASE(Float32x4WithZ) \
4535 CASE(Float32x4WithW) \
4536 ____(SimdBinaryOp) \
4537 SIMD_OP_SIMPLE_UNARY(CASE) \
4538 CASE(Float32x4GetX) \
4539 CASE(Float32x4GetY) \
4540 CASE(Float32x4GetZ) \
4541 CASE(Float32x4GetW) \
4542 CASE(Float32x4Shuffle) \
4543 CASE(Int32x4Shuffle) \
4544 CASE(Float32x4Splat) \
4545 CASE(Float32x4ToFloat64x2) \
4546 CASE(Float64x2ToFloat32x4) \
4547 CASE(Int32x4ToFloat32x4) \
4548 CASE(Float32x4ToInt32x4) \
4549 CASE(Float64x2GetX) \
4550 CASE(Float64x2GetY) \
4551 CASE(Float64x2Splat) \
4553 CASE(Float32x4GetSignMask) \
4554 CASE(Int32x4GetSignMask) \
4555 CASE(Float64x2GetSignMask) \
4556 ____(SimdGetSignMask) \
4557 SIMPLE(Float32x4FromDoubles) \
4558 SIMPLE(Int32x4FromInts) \
4559 SIMPLE(Int32x4FromBools) \
4560 SIMPLE(Float32x4Zero) \
4561 SIMPLE(Float64x2Zero) \
4562 SIMPLE(Float32x4Clamp) \
4563 SIMPLE(Float64x2Clamp) \
4564 CASE(Int32x4GetFlagX) \
4565 CASE(Int32x4GetFlagY) \
4566 ____(Int32x4GetFlagXorY) \
4567 CASE(Int32x4GetFlagZ) \
4568 CASE(Int32x4GetFlagW) \
4569 ____(Int32x4GetFlagZorW) \
4570 CASE(Int32x4WithFlagX) \
4571 CASE(Int32x4WithFlagY) \
4572 CASE(Int32x4WithFlagZ) \
4573 CASE(Int32x4WithFlagW) \
4574 ____(Int32x4WithFlag) \
4575 SIMPLE(Int32x4Select)
4579#define CASE(Name, ...) case k##Name:
4581 return MakeLocationSummaryFromEmitter(zone, this, &Emit##Name);
4582#define SIMPLE(Name) CASE(Name) EMIT(Name)
4587 case SimdOpInstr::kFloat32x4GreaterThan:
4588 case SimdOpInstr::kFloat32x4GreaterThanOrEqual:
4598#define CASE(Name, ...) case k##Name:
4600 InvokeEmitter(compiler, this, &Emit##Name); \
4602#define SIMPLE(Name) CASE(Name) EMIT(Name)
4607 case SimdOpInstr::kFloat32x4GreaterThan:
4608 case SimdOpInstr::kFloat32x4GreaterThanOrEqual:
4620 const intptr_t kNumTemps = 0;
4621 LocationSummary* summary =
new (zone)
4632 compiler::LeafRuntimeScope rt(
compiler->assembler(),
4641 const intptr_t kNumInputs = 1;
4650 case Token::kNEGATE: {
4651 compiler::Label* deopt =
4657 case Token::kBIT_NOT:
4669 const intptr_t kNumInputs = 1;
4670 const intptr_t kNumTemps = 0;
4671 LocationSummary* summary =
new (zone)
4674 if (
op_kind() == Token::kSQUARE) {
4687 case Token::kNEGATE:
4693 case Token::kSQUARE:
4697 case Token::kTRUNCATE:
4703 case Token::kCEILING:
4714 const intptr_t kNumInputs = 2;
4715 const intptr_t kNumTemps = 1;
4716 LocationSummary* summary =
new (zone)
4726 const intptr_t kNumInputs = 2;
4727 const intptr_t kNumTemps = 0;
4728 LocationSummary* summary =
new (zone)
4739 (
op_kind() == MethodRecognizer::kMathMax));
4740 const bool is_min =
op_kind() == MethodRecognizer::kMathMin;
4751 is_min ? TokenKindToDoubleCondition(Token::kLT)
4752 : TokenKindToDoubleCondition(Token::kGT);
4759 __ movq(temp, compiler::Address(
THR, Thread::double_nan_address_offset()));
4760 __ movsd(
result, compiler::Address(temp, 0));
4764 compiler::Label left_is_negative;
4772 __ testq(temp, compiler::Immediate(1));
4802 const intptr_t kNumInputs = 1;
4803 const intptr_t kNumTemps = 0;
4804 LocationSummary*
result =
new (zone)
4819 const intptr_t kNumInputs = 1;
4820 const intptr_t kNumTemps = 0;
4821 LocationSummary*
result =
new (zone)
4841 const intptr_t kNumInputs = 1;
4842 const intptr_t kNumTemps = 1;
4843 LocationSummary*
result =
new (zone) LocationSummary(
4857 DoubleToIntegerSlowPath* slow_path =
4858 new DoubleToIntegerSlowPath(
this, value_double);
4859 compiler->AddSlowPathCode(slow_path);
4868 if (FLAG_use_slow_path) {
4869 __ jmp(slow_path->entry_label());
4870 __ Bind(slow_path->exit_label());
4877 compiler::Immediate(0));
4878 __ j(
EQUAL, slow_path->entry_label());
4882 case MethodRecognizer::kDoubleFloorToInt:
4885 case MethodRecognizer::kDoubleCeilToInt:
4898 __ OBJ(shl)(temp, compiler::Immediate(1));
4901 __ Bind(slow_path->exit_label());
4906 const intptr_t kNumInputs = 1;
4907 const intptr_t kNumTemps = 1;
4908 LocationSummary*
result =
new (zone)
4917 compiler::Label* deopt =
4925 compiler::Label do_call,
done;
4928 __ OBJ(shl)(temp, compiler::Immediate(1));
4935 const intptr_t kNumInputs = 1;
4936 const intptr_t kNumTemps = 0;
4937 LocationSummary*
result =
new (zone)
4945 __ cvtsd2ss(
locs()->
out(0).fpu_reg(),
locs()->in(0).fpu_reg());
4950 const intptr_t kNumInputs = 1;
4951 const intptr_t kNumTemps = 0;
4952 LocationSummary*
result =
new (zone)
4960 __ cvtss2sd(
locs()->
out(0).fpu_reg(),
locs()->in(0).fpu_reg());
4982 const intptr_t kNumTemps = 4;
4983 LocationSummary*
result =
new (zone)
4998 const intptr_t kNumTemps = 1;
4999 LocationSummary*
result =
new (zone)
5025static void InvokeDoublePow(FlowGraphCompiler*
compiler,
5026 InvokeMathCFunctionInstr* instr) {
5027 ASSERT(instr->recognized_kind() == MethodRecognizer::kMathDoublePow);
5028 const intptr_t kInputCount = 2;
5029 ASSERT(instr->InputCount() == kInputCount);
5030 LocationSummary* locs = instr->locs();
5038 __ xorps(zero_temp, zero_temp);
5041 compiler::Label check_base, skip_call;
5043 __ comisd(exp, zero_temp);
5049 compiler::Label return_base;
5053 __ LoadDImmediate(
XMM0, 2.0);
5055 compiler::Label return_base_times_2;
5059 __ LoadDImmediate(
XMM0, 3.0);
5073 __ Bind(&return_base_times_2);
5081 compiler::Label return_nan;
5089 compiler::Label try_sqrt;
5096 compiler::Label do_pow, return_zero;
5110 __ comisd(
base, zero_temp);
5122 compiler::LeafRuntimeScope rt(
compiler->assembler(),
5125 __ movaps(
XMM0, locs->in(0).fpu_reg());
5127 rt.Call(instr->TargetFunction(), kInputCount);
5128 __ movaps(locs->out(0).fpu_reg(),
XMM0);
5139 compiler::LeafRuntimeScope rt(
compiler->assembler(),
5154 const intptr_t kNumInputs = 1;
5155 LocationSummary* summary =
5220 const intptr_t kNumInputs = 2;
5221 const intptr_t kNumTemps = 0;
5222 LocationSummary* summary =
new (zone)
5234 compiler::Label* deopt =
5240 Register result1 = pair->At(0).reg();
5241 Register result2 = pair->At(1).reg();
5242 compiler::Label not_32bit,
done;
5253#if !defined(DART_COMPRESSED_POINTERS)
5259 __ movsxd(temp, left);
5260 __ cmpq(temp, left);
5262 __ movsxd(temp, right);
5263 __ cmpq(temp, right);
5283 __ CompareImmediate(
RAX, compiler::Immediate(0x4000000000000000));
5296 __ cmpl(
RAX, compiler::Immediate(0x40000000));
5311 compiler::Label all_done;
5312 __ cmpq(
RDX, compiler::Immediate(0));
5315 if ((divisor_range() ==
nullptr) || divisor_range()->Overlaps(-1, 1)) {
5317 __ cmpq(right, compiler::Immediate(0));
5319 __ addq(
RDX, right);
5322 __ subq(
RDX, right);
5323 }
else if (divisor_range()->IsPositive()) {
5325 __ addq(
RDX, right);
5328 __ subq(
RDX, right);
5339static void EmitHashIntegerCodeSequence(FlowGraphCompiler*
compiler) {
5340 __ movq(
RDX, compiler::Immediate(0x2d51));
5344 __ shrq(
RDX, compiler::Immediate(32));
5346 __ andq(
RAX, compiler::Immediate(0x3fffffff));
5351 const intptr_t kNumInputs = 1;
5352 const intptr_t kNumTemps = 2;
5353 LocationSummary* summary =
new (zone)
5368 compiler::Label hash_double;
5371 __ cvtsi2sdq(temp_fpu_reg,
RAX);
5372 __ comisd(
value, temp_fpu_reg);
5377 EmitHashIntegerCodeSequence(
compiler);
5379 compiler::Label
done;
5386 __ shrq(
RDX, compiler::Immediate(32));
5395 const intptr_t kNumInputs = 1;
5396 const intptr_t kNumTemps = 1;
5397 LocationSummary* summary =
new (zone)
5414 __ SmiUntagAndSignExtend(
RAX);
5419 EmitHashIntegerCodeSequence(
compiler);
5436 const intptr_t kNumInputs = 1;
5437 const bool need_mask_temp =
IsBitTest();
5438 const intptr_t kNumTemps = !
IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0;
5439 LocationSummary* summary =
new (zone)
5444 if (need_mask_temp) {
5452 compiler::Label* deopt) {
5453 __ CompareObject(
locs()->in(0).reg(), Object::null_object());
5462 compiler::Label* deopt) {
5464 __ subq(biased_cid, compiler::Immediate(
min));
5465 __ cmpq(biased_cid, compiler::Immediate(
max -
min));
5469 __ movq(mask_reg, compiler::Immediate(mask));
5470 __ btq(mask_reg, biased_cid);
5474int CheckClassInstr::EmitCheckCid(FlowGraphCompiler*
compiler,
5479 compiler::Label* is_ok,
5480 compiler::Label* deopt,
5481 bool use_near_jump) {
5484 if (cid_start == cid_end) {
5485 __ cmpl(biased_cid, compiler::Immediate(cid_start - bias));
5491 __ addl(biased_cid, compiler::Immediate(bias - cid_start));
5493 __ cmpl(biased_cid, compiler::Immediate(cid_end - cid_start));
5499 __ j(no_match, deopt);
5501 if (use_near_jump) {
5512 const intptr_t kNumInputs = 1;
5513 const intptr_t kNumTemps = 0;
5514 LocationSummary* summary =
new (zone)
5522 compiler::Label* deopt =
5524 __ BranchIfNotSmi(
value, deopt);
5528 ThrowErrorSlowPathCode* slow_path =
new NullErrorSlowPath(
this);
5529 compiler->AddSlowPathCode(slow_path);
5534 __ CompareObject(value_reg, Object::null_object());
5535 __ BranchIf(
EQUAL, slow_path->entry_label());
5540 const intptr_t kNumInputs = 1;
5541 const intptr_t kNumTemps = 0;
5542 LocationSummary* summary =
new (zone)
5551 compiler::Label* deopt =
5553 if (cids_.IsSingleCid()) {
5567 const intptr_t kNumInputs = 2;
5568 const intptr_t kNumTemps = 0;
5569 LocationSummary*
locs =
new (zone)
5578 compiler::Label* deopt =
5584 if (length_loc.IsConstant() && index_loc.IsConstant()) {
5585 ASSERT((Smi::Cast(length_loc.constant()).Value() <=
5586 Smi::Cast(index_loc.constant()).Value()) ||
5587 (Smi::Cast(index_loc.constant()).Value() < 0));
5595 if (index_loc.IsConstant()) {
5597 const Smi&
index = Smi::Cast(index_loc.constant());
5600 }
else if (length_loc.IsConstant()) {
5601 const Smi&
length = Smi::Cast(length_loc.constant());
5603 if (index_cid != kSmiCid) {
5604 __ BranchIfNotSmi(
index, deopt);
5616 if (index_cid != kSmiCid) {
5617 __ BranchIfNotSmi(
index, deopt);
5626 const intptr_t kNumInputs = 1;
5627 const intptr_t kNumTemps = 0;
5628 LocationSummary*
locs =
new (zone) LocationSummary(
5629 zone, kNumInputs, kNumTemps,
5637 WriteErrorSlowPath* slow_path =
new WriteErrorSlowPath(
this);
5638 compiler->AddSlowPathCode(slow_path);
5639 __ movq(
TMP, compiler::FieldAddress(
locs()->in(0).reg(),
5641 __ testq(
TMP, compiler::Immediate(
5646class Int64DivideSlowPath :
public ThrowErrorSlowPathCode {
5648 Int64DivideSlowPath(BinaryInt64OpInstr* instruction,
5650 Range* divisor_range)
5651 : ThrowErrorSlowPathCode(instruction,
5652 kIntegerDivisionByZeroExceptionRuntimeEntry),
5653 is_mod_(instruction->op_kind() == Token::kMOD),
5655 divisor_range_(divisor_range),
5656 div_by_minus_one_label_(),
5657 adjust_sign_label_() {}
5659 void EmitNativeCode(FlowGraphCompiler*
compiler)
override {
5661 if (has_divide_by_zero()) {
5666 __ Comment(
"slow path %s operation (no throw)",
name());
5671 if (has_divide_by_minus_one()) {
5672 __ Bind(div_by_minus_one_label());
5678 __ jmp(exit_label());
5685 if (has_adjust_sign()) {
5686 __ Bind(adjust_sign_label());
5690 __ testq(divisor_, divisor_);
5692 __ addq(
RDX, divisor_);
5693 __ jmp(exit_label());
5695 __ subq(
RDX, divisor_);
5696 }
else if (divisor_range_->IsPositive()) {
5698 __ addq(
RDX, divisor_);
5701 __ subq(
RDX, divisor_);
5703 __ jmp(exit_label());
5707 const char*
name()
override {
return "int64 divide"; }
5711 bool has_divide_by_minus_one() {
5715 bool has_adjust_sign() {
return is_mod_; }
5718 return has_divide_by_zero() || has_divide_by_minus_one() ||
5722 compiler::Label* div_by_minus_one_label() {
5723 ASSERT(has_divide_by_minus_one());
5724 return &div_by_minus_one_label_;
5727 compiler::Label* adjust_sign_label() {
5728 ASSERT(has_adjust_sign());
5729 return &adjust_sign_label_;
5735 Range* divisor_range_;
5736 compiler::Label div_by_minus_one_label_;
5737 compiler::Label adjust_sign_label_;
5740static void EmitInt64ModTruncDiv(FlowGraphCompiler*
compiler,
5741 BinaryInt64OpInstr* instruction,
5747 ASSERT(op_kind == Token::kMOD || op_kind == Token::kTRUNCDIV);
5753 if (
auto c = instruction->right()->definition()->AsConstant()) {
5754 if (c->value().IsInteger()) {
5755 const int64_t divisor = Integer::Cast(c->value()).AsInt64Value();
5756 if (divisor <= -2 || divisor >= 2) {
5758 compiler::Label
pos;
5765 __ LoadImmediate(
RAX, compiler::Immediate(
magic));
5768 if (divisor > 0 &&
magic < 0) {
5770 }
else if (divisor < 0 && magic > 0) {
5775 __ sarq(
RDX, compiler::Immediate(shift));
5779 __ shrq(
RDX, compiler::Immediate(63));
5782 if (op_kind == Token::kTRUNCDIV) {
5788 __ LoadImmediate(
TMP, compiler::Immediate(divisor));
5808 Range* right_range = instruction->right()->definition()->range();
5809 Int64DivideSlowPath* slow_path =
5810 new (
Z) Int64DivideSlowPath(instruction, right, right_range);
5813 if (slow_path->has_divide_by_zero()) {
5814 __ testq(right, right);
5815 __ j(
EQUAL, slow_path->entry_label());
5820 if (slow_path->has_divide_by_minus_one()) {
5821 __ cmpq(right, compiler::Immediate(-1));
5822 __ j(
EQUAL, slow_path->div_by_minus_one_label());
5839 compiler::Label div_64;
5840 compiler::Label div_merge;
5841 __ movsxd(
RDX, left);
5844 __ movsxd(
RDX, right);
5845 __ cmpq(
RDX, right);
5855 if (op_kind == Token::kMOD) {
5861 __ j(
LESS, slow_path->adjust_sign_label());
5867 if (slow_path->is_needed()) {
5868 __ Bind(slow_path->exit_label());
5869 compiler->AddSlowPathCode(slow_path);
5873template <
typename OperandType>
5874static void EmitInt64Arithmetic(FlowGraphCompiler*
compiler,
5877 const OperandType& right) {
5880 __ addq(left, right);
5883 __ subq(left, right);
5885 case Token::kBIT_AND:
5886 __ andq(left, right);
5888 case Token::kBIT_OR:
5889 __ orq(left, right);
5891 case Token::kBIT_XOR:
5892 __ xorq(left, right);
5895 __ imulq(left, right);
5906 case Token::kTRUNCDIV: {
5907 const intptr_t kNumInputs = 2;
5908 const intptr_t kNumTemps = 1;
5909 LocationSummary* summary =
new (zone) LocationSummary(
5922 const intptr_t kNumInputs = 2;
5923 const intptr_t kNumTemps = 0;
5924 LocationSummary* summary =
new (zone) LocationSummary(
5944 temp.reg(),
out.reg());
5945 }
else if (
right.IsConstant()) {
5951 compiler::Immediate(
value));
5960 const intptr_t kNumInputs = 1;
5961 const intptr_t kNumTemps = 0;
5962 LocationSummary* summary =
new (zone)
5974 case Token::kBIT_NOT:
5977 case Token::kNEGATE:
5985static void EmitShiftInt64ByConstant(FlowGraphCompiler*
compiler,
5988 const Object& right) {
5989 const int64_t shift = Integer::Cast(right).AsInt64Value();
5993 __ sarq(left, compiler::Immediate(
5998 __ shrq(left, compiler::Immediate(shift));
6002 __ shlq(left, compiler::Immediate(shift));
6010static void EmitShiftInt64ByRCX(FlowGraphCompiler*
compiler,
6018 case Token::kUSHR: {
6031static void EmitShiftUint32ByConstant(FlowGraphCompiler*
compiler,
6034 const Object& right) {
6035 const int64_t shift = Integer::Cast(right).AsInt64Value();
6038 __ xorl(left, left);
6042 case Token::kUSHR: {
6043 __ shrl(left, compiler::Immediate(shift));
6047 __ shll(left, compiler::Immediate(shift));
6056static void EmitShiftUint32ByRCX(FlowGraphCompiler*
compiler,
6061 case Token::kUSHR: {
6074class ShiftInt64OpSlowPath :
public ThrowErrorSlowPathCode {
6076 explicit ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction)
6077 : ThrowErrorSlowPathCode(instruction,
6078 kArgumentErrorUnboxedInt64RuntimeEntry) {}
6080 const char*
name()
override {
return "int64 shift"; }
6082 void EmitCodeAtSlowPathEntry(FlowGraphCompiler*
compiler)
override {
6083 const Register out = instruction()->locs()->out(0).reg();
6084 ASSERT(
out == instruction()->locs()->in(0).reg());
6086 compiler::Label throw_error;
6088 __ j(
LESS, &throw_error);
6090 switch (instruction()->AsShiftInt64Op()->op_kind()) {
6101 __ jmp(exit_label());
6110 __ movq(compiler::Address(
6118 const intptr_t kNumInputs = 2;
6119 const intptr_t kNumTemps = 0;
6120 LocationSummary* summary =
new (zone) LocationSummary(
6138 locs()->in(1).constant());
6144 ShiftInt64OpSlowPath* slow_path =
nullptr;
6146 slow_path =
new (
Z) ShiftInt64OpSlowPath(
this);
6147 compiler->AddSlowPathCode(slow_path);
6149 __ cmpq(
RCX, compiler::Immediate(kShiftCountLimit));
6150 __ j(
ABOVE, slow_path->entry_label());
6155 if (slow_path !=
nullptr) {
6156 __ Bind(slow_path->exit_label());
6164 const intptr_t kNumInputs = 2;
6165 const intptr_t kNumTemps = 0;
6166 LocationSummary* summary =
new (zone)
6182 locs()->in(1).constant());
6190 compiler::Label* deopt =
6193 __ cmpq(
RCX, compiler::Immediate(kShiftCountLimit));
6201class ShiftUint32OpSlowPath :
public ThrowErrorSlowPathCode {
6203 explicit ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction)
6204 : ThrowErrorSlowPathCode(instruction,
6205 kArgumentErrorUnboxedInt64RuntimeEntry) {}
6207 const char*
name()
override {
return "uint32 shift"; }
6209 void EmitCodeAtSlowPathEntry(FlowGraphCompiler*
compiler)
override {
6210 const Register out = instruction()->locs()->out(0).reg();
6211 ASSERT(
out == instruction()->locs()->in(0).reg());
6213 compiler::Label throw_error;
6215 __ j(
LESS, &throw_error);
6218 __ jmp(exit_label());
6227 __ movq(compiler::Address(
6235 const intptr_t kNumInputs = 2;
6236 const intptr_t kNumTemps = 0;
6237 LocationSummary* summary =
new (zone) LocationSummary(
6254 locs()->in(1).constant());
6260 ShiftUint32OpSlowPath* slow_path =
nullptr;
6262 slow_path =
new (
Z) ShiftUint32OpSlowPath(
this);
6263 compiler->AddSlowPathCode(slow_path);
6265 __ cmpq(
RCX, compiler::Immediate(kUint32ShiftCountLimit));
6266 __ j(
ABOVE, slow_path->entry_label());
6271 if (slow_path !=
nullptr) {
6272 __ Bind(slow_path->exit_label());
6280 const intptr_t kNumInputs = 2;
6281 const intptr_t kNumTemps = 0;
6282 LocationSummary* summary =
new (zone)
6298 locs()->in(1).constant());
6307 compiler::Label* deopt =
6314 compiler::Label cont;
6315 __ OBJ(cmp)(
RCX, compiler::Immediate(kUint32ShiftCountLimit));
6329 const intptr_t kNumInputs = 2;
6330 const intptr_t kNumTemps = 0;
6331 LocationSummary* summary =
new (zone)
6339template <
typename OperandType>
6340static void EmitIntegerArithmetic(FlowGraphCompiler*
compiler,
6343 const OperandType& right) {
6346 __ addl(left, right);
6349 __ subl(left, right);
6351 case Token::kBIT_AND:
6352 __ andl(left, right);
6354 case Token::kBIT_OR:
6355 __ orl(left, right);
6357 case Token::kBIT_XOR:
6358 __ xorl(left, right);
6361 __ imull(left, right);
6374 case Token::kBIT_AND:
6375 case Token::kBIT_OR:
6376 case Token::kBIT_XOR:
6388 ASSERT(instr->op_kind() == Token::kBIT_NOT);
6396 const intptr_t kNumInputs = 1;
6397 const intptr_t kNumTemps = 0;
6398 LocationSummary* summary =
new (zone)
6400 if (
from() == kUntagged ||
to() == kUntagged) {
6404 }
else if (
from() == kUnboxedInt64) {
6405 ASSERT(
to() == kUnboxedUint32 ||
to() == kUnboxedInt32);
6406 }
else if (
to() == kUnboxedInt64) {
6409 ASSERT(
to() == kUnboxedUint32 ||
to() == kUnboxedInt32);
6420 const bool is_nop_conversion =
6423 if (is_nop_conversion) {
6428 if (
from() == kUnboxedInt32 &&
to() == kUnboxedUint32) {
6436 }
else if (
from() == kUnboxedUint32 &&
to() == kUnboxedInt32) {
6442 compiler::Label* deopt =
6447 }
else if (
from() == kUnboxedInt64) {
6448 ASSERT(
to() == kUnboxedUint32 ||
to() == kUnboxedInt32);
6455 compiler::Label* deopt =
6464 }
else if (
to() == kUnboxedInt64) {
6468 if (
from() == kUnboxedUint32) {
6491 if (entry !=
nullptr) {
6492 if (!
compiler->CanFallThroughTo(entry)) {
6493 FATAL(
"Checked function entry must have no offset");
6497 if (!
compiler->CanFallThroughTo(entry)) {
6509 if (FLAG_reorder_basic_blocks) {
6515 InstructionSource());
6530 const intptr_t kNumInputs = 1;
6531 const intptr_t kNumTemps = 1;
6533 LocationSummary* summary =
new (zone)
6550 __ ExtendNonNegativeSmi(index_reg);
6551 __ LoadObject(offset_reg, offsets_);
6553 false, kTypedDataInt32ArrayCid,
6555 false, offset_reg, index_reg));
6558 const intptr_t kRIPRelativeLeaqSize = 7;
6559 const intptr_t entry_to_rip_offset =
__ CodeSize() + kRIPRelativeLeaqSize;
6561 ASSERT(
__ CodeSize() == entry_to_rip_offset);
6564 __ addq(
TMP, offset_reg);
6572 const intptr_t kNumInputs = 2;
6573 const intptr_t kNumTemps = 0;
6575 LocationSummary*
locs =
new (zone)
6582 LocationSummary*
locs =
new (zone)
6594Condition StrictCompareInstr::EmitComparisonCodeRegConstant(
6596 BranchLabels labels,
6598 const Object& obj) {
6605 const intptr_t kNumInputs = 1;
6606 const intptr_t kNumTemps = 0;
6607 LocationSummary* summary =
new (zone)
6617 const Array& arguments_descriptor =
6621 if (FLAG_precompiled_mode) {
6624 __ movq(
RCX, compiler::FieldAddress(
6633 __ movq(
RCX, compiler::FieldAddress(
6640 if (!FLAG_precompiled_mode) {
6646 UntaggedPcDescriptors::kOther,
locs(),
env());
6660 __ xorq(
result, compiler::Immediate(
6686 const intptr_t kNumInputs = (
type_arguments() !=
nullptr) ? 1 : 0;
6687 const intptr_t kNumTemps = 0;
6688 LocationSummary*
locs =
new (zone)
6700 TypeUsageInfo* type_usage_info =
compiler->thread()->type_usage_info();
6701 if (type_usage_info !=
nullptr) {
6708 compiler->GenerateStubCall(
source(), stub, UntaggedPcDescriptors::kOther,
6717 __ CallPatchable(StubCode::DebugStepCheck());
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
static void fail(const SkString &err)
static bool are_equal(skiatest::Reporter *reporter, const SkMatrix &a, const SkMatrix &b)
static int float_bits(float f)
static bool ok(int result)
static bool subtract(const R &a, const R &b, R *out)
#define ASSERT_EQUAL(expected, actual)
#define RELEASE_ASSERT(cond)
intptr_t num_context_variables() const
Value * type_arguments() const
const Class & cls() const
intptr_t num_context_variables() const
static intptr_t type_arguments_offset()
static intptr_t InstanceSize()
static constexpr bool IsValidLength(intptr_t len)
static intptr_t length_offset()
Token::Kind op_kind() const
bool can_overflow() const
Token::Kind op_kind() const
bool RightIsPowerOfTwoConstant() const
Range * right_range() const
ParallelMoveInstr * parallel_move() const
bool HasParallelMove() const
static const Bool & False()
static const Bool & True()
static void Allocate(FlowGraphCompiler *compiler, Instruction *instruction, const Class &cls, Register result, Register temp)
Representation from_representation() const
virtual bool ValueFitsSmi() const
ComparisonInstr * comparison() const
static constexpr Register kSecondReturnReg
static constexpr Register kVarArgFpuRegisterCount
static constexpr intptr_t kCalleeSaveCpuRegisters
static constexpr RegList kVolatileXmmRegisters
static constexpr intptr_t kVolatileCpuRegisters
static constexpr intptr_t kFpuArgumentRegisters
static constexpr FpuRegister kReturnFpuReg
static constexpr Register kFfiAnyNonAbiRegister
static constexpr Register kArg3Reg
static constexpr Register kArg1Reg
static constexpr Register kReturnReg
static constexpr Register kSecondNonArgumentRegister
static constexpr Register kArg2Reg
static constexpr Register kArg4Reg
static constexpr intptr_t kCalleeSaveXmmRegisters
const RuntimeEntry & TargetFunction() const
bool IsDeoptIfNull() const
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr, TemplateInstruction, FIELD_LIST) private void EmitBitTest(FlowGraphCompiler *compiler, intptr_t min, intptr_t max, intptr_t mask, compiler::Label *deopt)
void EmitNullCheck(FlowGraphCompiler *compiler, compiler::Label *deopt)
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
intptr_t loop_depth() const
virtual void EmitBranchCode(FlowGraphCompiler *compiler, BranchInstr *branch)
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual Condition EmitComparisonCode(FlowGraphCompiler *compiler, BranchLabels labels)=0
static CompilerState & Current()
const Object & value() const
void EmitMoveToLocation(FlowGraphCompiler *compiler, const Location &destination, Register tmp=kNoRegister, intptr_t pair_index=0)
static intptr_t num_variables_offset()
static intptr_t InstanceSize()
Value * type_arguments() const
virtual Value * num_elements() const
virtual Representation representation() const
static constexpr intptr_t kNone
MethodRecognizer::Kind op_kind() const
MethodRecognizer::Kind recognized_kind() const
bool is_null_aware() const
void EmitReturnMoves(FlowGraphCompiler *compiler, const Register temp0, const Register temp1)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(FfiCallInstr, VariadicDefinition, FIELD_LIST) private void EmitParamMoves(FlowGraphCompiler *compiler, const Register saved_fp, const Register temp0, const Register temp1)
static bool CanExecuteGeneratedCodeInSafepoint()
intptr_t TargetAddressIndex() const
static intptr_t guarded_cid_offset()
static intptr_t static_type_exactness_state_offset()
bool needs_length_check() const
static intptr_t guarded_list_length_in_object_offset_offset()
StaticTypeExactnessState static_type_exactness_state() const
intptr_t guarded_cid() const
static intptr_t is_nullable_offset()
static intptr_t guarded_list_length_offset()
ParallelMoveInstr * parallel_move() const
BlockEntryInstr * block() const
bool HasParallelMove() const
JoinEntryInstr * successor() const
FunctionEntryInstr * normal_entry() const
OsrEntryInstr * osr_entry() const
const Field & field() const
ComparisonInstr * comparison() const
virtual Representation RequiredInputRepresentation(intptr_t idx) const
const AbstractType & type() const
intptr_t GetDeoptId() const
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Environment * env() const
virtual LocationSummary * MakeLocationSummary(Zone *zone, bool is_optimizing) const =0
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
void InitializeLocationSummary(Zone *zone, bool optimizing)
virtual Representation representation() const
bool CanDeoptimize() const
friend class BlockEntryInstr
InstructionSource source() const
intptr_t deopt_id() const
static bool SlowPathSharingSupported(bool is_optimizing)
static LocationSummary * MakeCallSummary(Zone *zone, const Instruction *instr, LocationSummary *locs=nullptr)
Representation to() const
Representation from() const
const RuntimeEntry & TargetFunction() const
MethodRecognizer::Kind recognized_kind() const
static constexpr intptr_t kDoubleTempIndex
ObjectStore * object_store() const
static IsolateGroup * Current()
intptr_t TargetAddressIndex() const
void EmitParamMoves(FlowGraphCompiler *compiler, Register saved_fp, Register temp0)
LocationSummary * MakeLocationSummaryInternal(Zone *zone, const RegList temps) const
intptr_t index_scale() const
bool can_pack_into_smi() const
intptr_t element_count() const
intptr_t class_id() const
intptr_t class_id() const
intptr_t index_scale() const
Representation representation() const
virtual Representation RequiredInputRepresentation(intptr_t index) const
Register base_reg() const
virtual Representation representation() const
const LocalVariable & local() const
Location temp(intptr_t index) const
Location out(intptr_t index) const
static LocationSummary * Make(Zone *zone, intptr_t input_count, Location out, ContainsCall contains_call)
void set_temp(intptr_t index, Location loc)
RegisterSet * live_registers()
void set_out(intptr_t index, Location loc)
bool always_calls() const
Location in(intptr_t index) const
void set_in(intptr_t index, Location loc)
static Location StackSlot(intptr_t stack_index, Register base)
static Location NoLocation()
static Location SameAsFirstInput()
static Location Pair(Location first, Location second)
static Location FpuRegisterLocation(FpuRegister reg)
static Location WritableRegister()
static Location RegisterLocation(Register reg)
static Location PrefersRegister()
PairLocation * AsPairLocation() const
static Location RequiresRegister()
static Location RequiresFpuRegister()
FpuRegister fpu_reg() const
const Object & constant() const
static Location Constant(const ConstantInstr *obj, int pair_index=0)
intptr_t result_cid() const
MethodRecognizer::Kind op_kind() const
bool unboxed_inputs() const
Value * src_start() const
void EmitLoopCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, Register length_reg, compiler::Label *done, compiler::Label *copy_forwards=nullptr)
void PrepareLengthRegForLoop(FlowGraphCompiler *compiler, Register length_reg, compiler::Label *done)
Value * dest_start() const
static intptr_t value_offset()
virtual Representation representation() const
Location location() const
static int ComputeArgcTag(const Function &function)
bool is_auto_scope() const
bool is_bootstrap_native() const
const Function & function() const
NativeFunction native_c_function() const
static constexpr intptr_t kVMTagOffsetFromFp
static uword LinkNativeCallEntry()
static Object & ZoneHandle()
Value * char_code() const
static intptr_t data_offset()
Location At(intptr_t i) const
static bool Overlaps(Range *range, intptr_t min, intptr_t max)
static bool OnlyLessThanOrEqualTo(Range *range, intptr_t value)
static bool IsWithin(const Range *range, int64_t min, int64_t max)
static bool IsPositive(Range *range)
static bool CanBeZero(Range *range)
intptr_t FpuRegisterCount() const
void Add(Location loc, Representation rep=kTagged)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ShiftIntegerOpInstr, BinaryIntegerOpInstr, FIELD_LIST) protected bool IsShiftCountInRange(int64_t max=kShiftCountLimit) const
Range * shift_range() const
static constexpr intptr_t kBits
static SmiPtr New(intptr_t value)
static constexpr intptr_t kMaxValue
static intptr_t RawValue(intptr_t value)
static constexpr int8_t kUninitialized
bool IsUninitialized() const
const char * message() const
bool ShouldEmitStoreBarrier() const
virtual Representation RequiredInputRepresentation(intptr_t idx) const
intptr_t class_id() const
intptr_t index_scale() const
const LocalVariable & local() const
const Field & field() const
bool needs_number_check() const
static intptr_t length_offset()
static CodePtr GetAllocationStubForClass(const Class &cls)
static constexpr int kNullCharCodeSymbolOffset
intptr_t ArgumentCount() const
ArrayPtr GetArgumentsDescriptor() const
virtual intptr_t InputCount() const
const ZoneGrowableArray< intptr_t > & cid_results() const
static intptr_t stack_limit_offset()
static uword stack_overflow_shared_stub_entry_point_offset(bool fpu_regs)
static intptr_t global_object_pool_offset()
static intptr_t stack_overflow_flags_offset()
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual Representation representation() const
Token::Kind op_kind() const
Token::Kind op_kind() const
virtual Representation representation() const
bool is_truncating() const
virtual Representation representation() const
bool IsScanFlagsUnboxed() const
static void CalculateMagicAndShiftForDivRem(int64_t divisor, int64_t *magic, int64_t *shift)
static constexpr T Maximum(T x, T y)
static constexpr int ShiftForPowerOfTwo(T x)
static T Minimum(T x, T y)
static T AddWithWrapAround(T a, T b)
static constexpr int CountOneBits64(uint64_t x)
static constexpr size_t HighestBit(int64_t v)
static constexpr bool IsPowerOfTwo(T x)
bool BindsToConstant() const
intptr_t BoundSmiConstant() const
bool BindsToSmiConstant() const
Definition * definition() const
Value(Definition *definition)
intptr_t InputCount() const
static Address AddressRIPRelative(int32_t disp)
void static bool EmittingComments()
Address ElementAddressForRegIndex(bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool index_unboxed, Register array, Register index)
static Address VMTagAddress()
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool *needs_base=nullptr)
static bool IsSafe(const Object &object)
intptr_t StackTopInBytes() const
static word data_offset()
static word entry_point_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word OffsetOf(const dart::Field &field)
static word code_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word data_offset()
static word object_store_offset()
static word ffi_callback_code_offset()
static word tags_offset()
static word data_offset()
static word unboxed_runtime_arg_offset()
static word shared_field_table_values_offset()
static uword vm_tag_dart_id()
static word isolate_group_offset()
static word field_table_values_offset()
static word exit_through_ffi_offset()
static uword exit_through_ffi()
static word global_object_pool_offset()
static word invoke_dart_code_stub_offset()
static word top_exit_frame_info_offset()
static word double_truncate_round_supported_offset()
static word top_resource_offset()
static word data_offset()
static word payload_offset()
static const word kImmutableBit
static const word kClassIdTagSize
FlutterSemanticsFlag flag
FlutterSemanticsFlag flags
static float max(float r, float g, float b)
static float min(float r, float g, float b)
#define CASE(Arity, Mask, Name, Args, Result)
#define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name)
const intptr_t kResultIndex
intptr_t FrameOffsetInBytesForVariable(const LocalVariable *variable)
static constexpr intptr_t kWordSize
static constexpr intptr_t kCompressedWordSize
bool HasIntegerValue(const dart::Object &object, int64_t *value)
Location LocationAnyOrConstant(Value *value)
Location LocationRegisterOrConstant(Value *value)
const Register kWriteBarrierSlotReg
@ TIMES_COMPRESSED_WORD_SIZE
static Condition InvertCondition(Condition c)
static constexpr int kSavedCallerPcSlotFromFp
bool IsTypedDataBaseClassId(intptr_t index)
constexpr intptr_t kBitsPerWord
static bool IsSmiValue(Value *val, intptr_t *int_val)
const Register kExceptionObjectReg
const Register kWriteBarrierObjectReg
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
constexpr intptr_t kIntptrMin
static const ClassId kLastErrorCid
constexpr intptr_t kSimd128Size
const Register CALLEE_SAVED_TEMP
static const ClassId kFirstErrorCid
void RegisterTypeArgumentsUse(const Function &function, TypeUsageInfo *type_usage_info, const Class &klass, Definition *type_arguments)
static constexpr int kParamEndSlotFromFp
const Register ARGS_DESC_REG
bool IsClampedTypedDataBaseClassId(intptr_t index)
Location LocationFixedRegisterOrConstant(Value *value, Register reg)
const int kNumberOfFpuRegisters
bool IsExternalPayloadClassId(classid_t cid)
constexpr RegList kDartAvailableCpuRegs
constexpr intptr_t kInt32Size
static constexpr intptr_t kCompressedWordSize
DEFINE_BACKEND(LoadThread,(Register out))
static constexpr int kPcMarkerSlotFromFp
const Register FUNCTION_REG
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
constexpr intptr_t kWordSize
Location LocationWritableRegisterOrConstant(Value *value)
static bool IsConstant(Definition *def, int64_t *val)
constexpr intptr_t kFloatSize
static constexpr Representation kUnboxedIntPtr
constexpr bool FLAG_target_memory_sanitizer
const Register kStackTraceObjectReg
static int8_t data[kExtLength]
constexpr intptr_t kDoubleSize
Location LocationFixedRegisterOrSmiConstant(Value *value, Register reg)
static ScaleFactor ToScaleFactor(intptr_t index_scale, bool index_unboxed)
Location LocationRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
constexpr intptr_t kBitsPerInt64
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
ByteRegister ByteRegisterOf(Register reg)
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTempReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kObjectReg
static constexpr Representation NativeRepresentation(Representation rep)
intptr_t first_local_from_fp
intptr_t FrameSlotForVariable(const LocalVariable *variable) const
static constexpr intptr_t kBoolVsNullMask
static constexpr intptr_t kBoolValueMask
static constexpr size_t ValueSize(Representation rep)
static constexpr bool IsUnboxedInteger(Representation rep)
static compiler::OperandSize OperandSize(Representation rep)
static constexpr bool IsUnboxed(Representation rep)
static bool IsUnsignedInteger(Representation rep)
static Representation RepresentationOfArrayElement(classid_t cid)
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
@ kResetToBootstrapNative
#define ASSERT_BOOL_FALSE_FOLLOWS_BOOL_TRUE()