6#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
28#define __ (compiler->assembler())->
29#define Z (compiler->zone())
37 const Instruction* instr,
38 LocationSummary* locs) {
85 const intptr_t kNumInputs = 1;
86 const intptr_t kNumTemps = 0;
87 LocationSummary*
locs =
new (zone)
127 case kUnboxedInt64: {
141 case kUnboxedDouble: {
155 ASSERT(instr->RequiredInputRepresentation(
157 __ AddShifted(
TMP, instr->base_reg(), index,
159 __ sx(
value, compiler::Address(
TMP, instr->offset()));
166 Fixed<Register, ARGS_DESC_REG>,
167 Temp<Register> temp)) {
168 compiler->EmitTailCallToStub(instr->code());
174 __ set_constant_pool_allowed(
true);
185 const intptr_t kNumInputs = 5;
186 const intptr_t kNumTemps = 2;
187 LocationSummary*
locs =
new (zone)
202 compiler::Label*
done) {
203 __ BranchIfZero(length_reg,
done);
226static void CopyBytes(FlowGraphCompiler*
compiler,
237 const intptr_t
offset = (reversed ? -1 : 1) *
count;
238 const intptr_t initial = reversed ?
offset : 0;
239 __ lq(
TMP, compiler::Address(src_reg, initial));
241 __ sq(
TMP, compiler::Address(dest_reg, initial));
242 __ addi(dest_reg, dest_reg,
offset);
248 if (
count == 4 * (XLEN / 8)) {
249 auto const sz = OperandSizeFor(XLEN / 8);
250 const intptr_t
offset = (reversed ? -1 : 1) * (XLEN / 8);
251 const intptr_t initial = reversed ?
offset : 0;
252 __ LoadFromOffset(
TMP, src_reg, initial, sz);
254 __ StoreToOffset(
TMP, dest_reg, initial, sz);
256 __ LoadFromOffset(
TMP, src_reg, initial + 2 *
offset, sz);
257 __ LoadFromOffset(
TMP2, src_reg, initial + 3 *
offset, sz);
258 __ addi(src_reg, src_reg, 4 *
offset);
259 __ StoreToOffset(
TMP, dest_reg, initial + 2 *
offset, sz);
260 __ StoreToOffset(
TMP2, dest_reg, initial + 3 *
offset, sz);
261 __ addi(dest_reg, dest_reg, 4 *
offset);
267 if (
count == 2 * (XLEN / 8)) {
268 auto const sz = OperandSizeFor(XLEN / 8);
269 const intptr_t
offset = (reversed ? -1 : 1) * (XLEN / 8);
270 const intptr_t initial = reversed ?
offset : 0;
271 __ LoadFromOffset(
TMP, src_reg, initial, sz);
273 __ addi(src_reg, src_reg, 2 *
offset);
274 __ StoreToOffset(
TMP, dest_reg, initial, sz);
276 __ addi(dest_reg, dest_reg, 2 *
offset);
282 auto const sz = OperandSizeFor(
count);
283 const intptr_t
offset = (reversed ? -1 : 1) *
count;
284 const intptr_t initial = reversed ?
offset : 0;
285 __ LoadFromOffset(
TMP, src_reg, initial, sz);
287 __ StoreToOffset(
TMP, dest_reg, initial, sz);
288 __ addi(dest_reg, dest_reg,
offset);
291static void CopyUpToWordMultiple(FlowGraphCompiler*
compiler,
298 compiler::Label*
done) {
303 const intptr_t base_shift =
305 intptr_t tested_bits = 0;
307 __ Comment(
"Copying until region is a multiple of word size");
313 const intptr_t bytes = 1 << bit;
314 const intptr_t tested_bit = bit + base_shift;
315 tested_bits |= 1 << tested_bit;
316 compiler::Label skip_copy;
317 __ andi(
TMP, length_reg, 1 << tested_bit);
318 __ beqz(
TMP, &skip_copy);
319 CopyBytes(
compiler, dest_reg, src_reg, bytes, reversed);
324 __ andi(length_reg, length_reg, ~tested_bits);
325 __ beqz(length_reg,
done);
332 compiler::Label*
done,
333 compiler::Label* copy_forwards) {
334 const bool reversed = copy_forwards !=
nullptr;
341 __ add(
TMP, src_reg, length_reg);
342 }
else if (shift < 0) {
343 __ srai(
TMP, length_reg, -shift);
346 __ slli(
TMP, length_reg, shift);
349 __ CompareRegisters(dest_reg,
TMP);
353 __ add(dest_reg, dest_reg,
TMP);
354 __ sub(dest_reg, dest_reg, src_reg);
355 __ MoveRegister(src_reg,
TMP);
357 CopyUpToWordMultiple(
compiler, dest_reg, src_reg, length_reg, element_size_,
358 unboxed_inputs_, reversed,
done);
361 const intptr_t loop_subtract =
362 Utils::Maximum<intptr_t>(1, (XLEN / 8) / element_size_)
364 __ Comment(
"Copying by multiples of word size");
365 compiler::Label loop;
367 switch (element_size_) {
372 CopyBytes(
compiler, dest_reg, src_reg, 4, reversed);
377 CopyBytes(
compiler, dest_reg, src_reg, 8, reversed);
382 CopyBytes(
compiler, dest_reg, src_reg, 16, reversed);
388 __ subi(length_reg, length_reg, loop_subtract);
389 __ bnez(length_reg, &loop);
392void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler*
compiler,
399 if (array_rep != kTagged) {
410 case kOneByteStringCid:
414 case kTwoByteStringCid:
423 ASSERT(start_loc.IsRegister() || start_loc.IsConstant());
424 if (start_loc.IsConstant()) {
425 const auto& constant = start_loc.constant();
426 ASSERT(constant.IsInteger());
427 const int64_t start_value = Integer::Cast(constant).AsInt64Value();
428 const intx_t add_value = Utils::AddWithWrapAround<intx_t>(
429 Utils::MulWithWrapAround<intx_t>(start_value, element_size_),
offset);
430 __ AddImmediate(payload_reg, array_reg, add_value);
433 const Register start_reg = start_loc.reg();
436 __ AddShifted(payload_reg, array_reg, start_reg, shift);
437 __ AddImmediate(payload_reg,
offset);
442 const intptr_t kNumInputs = 1;
443 const intptr_t kNumTemps = 0;
444 LocationSummary*
locs =
new (zone)
447 if (constant !=
nullptr && constant->HasZeroRepresentation()) {
469 if (
value.IsRegister()) {
473 }
else if (
value.IsPairLocation()) {
474 __ StoreToOffset(
value.AsPairLocation()->At(1).reg(),
SP,
475 location().AsPairLocation()->At(1).stack_index() *
477 __ StoreToOffset(
value.AsPairLocation()->At(0).reg(),
SP,
478 location().AsPairLocation()->At(0).stack_index() *
481 }
else if (
value.IsConstant()) {
483 ASSERT(
value.constant_instruction()->HasZeroRepresentation());
492 ASSERT(
value.constant_instruction()->HasZeroRepresentation());
495 location().AsPairLocation()->At(1).stack_index() *
498 location().AsPairLocation()->At(0).stack_index() *
506 const Object& constant =
value.constant();
508 if (constant.IsNull()) {
510 }
else if (constant.IsSmi() && Smi::Cast(constant).Value() == 0) {
514 __ LoadObject(
TMP, constant);
516 __ StoreToOffset(reg,
SP,
519 }
else if (
value.IsFpuRegister()) {
522 }
else if (
value.IsStackSlot()) {
523 const intptr_t value_offset =
value.ToStackSlotOffset();
524 __ LoadFromOffset(
TMP,
value.base_reg(), value_offset);
534 const intptr_t kNumInputs = 1;
535 const intptr_t kNumTemps = 0;
536 LocationSummary*
locs =
new (zone)
577 if (
locs()->in(0).IsRegister()) {
580 }
else if (
locs()->in(0).IsPairLocation()) {
591 if (
compiler->parsed_function().function().IsAsyncFunction() ||
592 compiler->parsed_function().function().IsAsyncGenerator()) {
594 const Code& stub = GetReturnStub(
compiler);
599 if (!
compiler->flow_graph().graph_entry()->NeedsFrame()) {
604 const intptr_t fp_sp_dist =
608 __ CheckFpSpDist(fp_sp_dist);
610 __ LeaveDartFrame(fp_sp_dist);
614 __ set_constant_pool_allowed(
true);
618static bool IsPowerOfTwoKind(intptr_t v1, intptr_t
v2) {
639 BranchLabels labels = {
nullptr,
nullptr,
nullptr};
643 const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_);
645 intptr_t true_value = if_true_;
646 intptr_t false_value = if_false_;
648 if (is_power_of_two_kind) {
649 if (true_value == 0) {
654 if (true_value == 0) {
656 intptr_t temp = true_value;
657 true_value = false_value;
666 if (is_power_of_two_kind) {
667 const intptr_t shift =
674 if (false_value != 0) {
682 const intptr_t kNumInputs = 1;
683 const intptr_t kNumTemps = 0;
684 LocationSummary* summary =
new (zone)
694 const Array& arguments_descriptor =
698 if (FLAG_precompiled_mode) {
701 __ LoadFieldFromOffset(
A1,
T0,
716 if (!FLAG_precompiled_mode) {
722 UntaggedPcDescriptors::kOther,
locs(),
env());
764 if (!
locs()->
out(0).IsInvalid()) {
773 intptr_t pair_index) {
774 if (destination.IsRegister()) {
779 if (value_.IsSmi() &&
786 __ LoadImmediate(destination.reg(), pair_index == 0
791 __ LoadImmediate(destination.reg(), v);
795 __ LoadObject(destination.reg(), value_);
797 }
else if (destination.IsFpuRegister()) {
800 __ LoadSImmediate(
dst, Double::Cast(value_).
value());
803 __ LoadDImmediate(
dst, Double::Cast(value_).
value());
805 }
else if (destination.IsDoubleStackSlot()) {
806 const intptr_t dest_offset = destination.ToStackSlotOffset();
812 __ StoreToOffset(
ZR, destination.base_reg(), dest_offset);
814 __ LoadDImmediate(
FTMP, Double::Cast(value_).
value());
815 __ StoreDToOffset(
FTMP, destination.base_reg(), dest_offset);
818 ASSERT(destination.IsStackSlot());
820 const intptr_t dest_offset = destination.ToStackSlotOffset();
823 int64_t val = Integer::Cast(value_).AsInt64Value();
832 __ LoadImmediate(tmp, val);
836 bit_cast<int32_t, float>(Double::Cast(value_).
value());
841 if (value_.IsNull()) {
843 }
else if (value_.IsSmi() && Smi::Cast(value_).Value() == 0) {
846 __ LoadObject(tmp, value_);
849 __ StoreToOffset(tmp, destination.base_reg(), dest_offset, operand_size);
855 const bool is_unboxed_int =
859 const intptr_t kNumInputs = 0;
860 const intptr_t kNumTemps = is_unboxed_int ? 0 : 1;
861 LocationSummary*
locs =
new (zone)
863 if (is_unboxed_int) {
880 if (!
locs()->
out(0).IsInvalid()) {
891 auto const dst_type_loc =
901 const intptr_t kNonChangeableInputRegs =
907 const intptr_t kNumInputs = 4;
914 const intptr_t kCpuRegistersToPreserve =
916 const intptr_t kFpuRegistersToPreserve =
922 LocationSummary* summary =
new (zone) LocationSummary(
924 summary->set_in(kInstancePos,
926 summary->set_in(kDstTypePos, dst_type_loc);
935 intptr_t next_temp = 0;
937 const bool should_preserve = ((1 <<
i) & kCpuRegistersToPreserve) != 0;
938 if (should_preserve) {
939 summary->set_temp(next_temp++,
945 const bool should_preserve = ((1l <<
i) & kFpuRegistersToPreserve) != 0;
946 if (should_preserve) {
958 auto object_store =
compiler->isolate_group()->object_store();
959 const auto& assert_boolean_stub =
962 compiler::Label
done;
966 UntaggedPcDescriptors::kOther,
locs(),
1019static void EmitBranchOnCondition(FlowGraphCompiler*
compiler,
1021 BranchLabels labels) {
1022 if (labels.fall_through == labels.false_label) {
1024 __ BranchIf(true_condition, labels.true_label);
1028 __ BranchIf(false_condition, labels.false_label);
1031 if (labels.fall_through != labels.true_label) {
1032 __ j(labels.true_label);
1038 LocationSummary*
locs,
1040 BranchLabels labels) {
1045 Condition true_condition = TokenKindToIntCondition(kind);
1046 if (
left.IsConstant() ||
right.IsConstant()) {
1048 if (
left.IsConstant()) {
1052 true_condition = FlipCondition(true_condition);
1056 __ CompareObjectRegisters(
left.reg(),
right.reg());
1058 return true_condition;
1062 LocationSummary*
locs,
1064 BranchLabels labels) {
1069 Condition true_condition = TokenKindToIntCondition(kind);
1070 if (
left.IsConstant() ||
right.IsConstant()) {
1072 if (
left.IsConstant()) {
1076 true_condition = FlipCondition(true_condition);
1078 __ CompareImmediate(
1080 static_cast<uword>(Integer::Cast(
right.constant()).AsInt64Value()));
1084 return true_condition;
1089 LocationSummary*
locs,
1093 Register left_lo = left_pair->At(0).reg();
1094 Register left_hi = left_pair->At(1).reg();
1096 Register right_lo = right_pair->At(0).reg();
1097 Register right_hi = right_pair->At(1).reg();
1099 __ xor_(
TMP, left_lo, right_lo);
1100 __ xor_(
TMP2, left_hi, right_hi);
1102 __ CompareImmediate(
TMP, 0);
1103 if (kind == Token::kEQ) {
1105 }
else if (kind == Token::kNE) {
1112 LocationSummary*
locs,
1114 BranchLabels labels) {
1116 Register left_lo = left_pair->At(0).reg();
1117 Register left_hi = left_pair->At(1).reg();
1119 Register right_lo = right_pair->At(0).reg();
1120 Register right_hi = right_pair->At(1).reg();
1124 __ bne(left_lo, right_lo, labels.false_label);
1125 __ CompareRegisters(left_hi, right_hi);
1128 __ bne(left_lo, right_lo, labels.true_label);
1129 __ CompareRegisters(left_hi, right_hi);
1132 __ blt(left_hi, right_hi, labels.true_label);
1133 __ bgt(left_hi, right_hi, labels.false_label);
1134 __ CompareRegisters(left_lo, right_lo);
1137 __ bgt(left_hi, right_hi, labels.true_label);
1138 __ blt(left_hi, right_hi, labels.false_label);
1139 __ CompareRegisters(left_lo, right_lo);
1142 __ blt(left_hi, right_hi, labels.true_label);
1143 __ bgt(left_hi, right_hi, labels.false_label);
1144 __ CompareRegisters(left_lo, right_lo);
1147 __ bgt(left_hi, right_hi, labels.true_label);
1148 __ blt(left_hi, right_hi, labels.false_label);
1149 __ CompareRegisters(left_lo, right_lo);
1163 LocationSummary*
locs,
1165 BranchLabels labels) {
1170 Condition true_condition = TokenKindToIntCondition(kind);
1171 if (
left.IsConstant() ||
right.IsConstant()) {
1174 if (
left.IsConstant()) {
1175 constant =
left.constant_instruction();
1179 true_condition = FlipCondition(true_condition);
1181 constant =
right.constant_instruction();
1195 return true_condition;
1200 LocationSummary*
locs,
1202 BranchLabels labels) {
1203 ASSERT((kind == Token::kEQ) || (kind == Token::kNE));
1206 const Condition true_condition = TokenKindToIntCondition(kind);
1207 compiler::Label* equal_result =
1208 (true_condition ==
EQ) ? labels.true_label : labels.false_label;
1209 compiler::Label* not_equal_result =
1210 (true_condition ==
EQ) ? labels.false_label : labels.true_label;
1214 __ CompareObjectRegisters(left, right);
1215 __ BranchIf(
EQ, equal_result);
1216 __ and_(
TMP, left, right);
1217 __ BranchIfSmi(
TMP, not_equal_result);
1218 __ CompareClassId(left, kMintCid,
TMP);
1219 __ BranchIf(
NE, not_equal_result);
1220 __ CompareClassId(right, kMintCid,
TMP);
1221 __ BranchIf(
NE, not_equal_result);
1226 __ LoadFieldFromOffset(
1229 __ LoadFieldFromOffset(
1237 return true_condition;
1242 const intptr_t kNumInputs = 2;
1243 const intptr_t kNumTemps = 0;
1244 if (is_null_aware()) {
1245 LocationSummary*
locs =
new (zone)
1253 if (operation_cid() == kMintCid) {
1254 LocationSummary*
locs =
new (zone)
1264 if (operation_cid() == kDoubleCid) {
1265 LocationSummary*
locs =
new (zone)
1272 if (operation_cid() == kSmiCid || operation_cid() == kMintCid ||
1273 operation_cid() == kIntegerCid) {
1274 LocationSummary*
locs =
new (zone)
1276 if (is_null_aware()) {
1296 LocationSummary*
locs,
1297 BranchLabels labels,
1304 __ feqd(
TMP, left, right);
1305 __ CompareImmediate(
TMP, 0);
1308 __ feqd(
TMP, left, right);
1309 __ CompareImmediate(
TMP, 0);
1312 __ fltd(
TMP, left, right);
1313 __ CompareImmediate(
TMP, 0);
1316 __ fltd(
TMP, right, left);
1317 __ CompareImmediate(
TMP, 0);
1320 __ fled(
TMP, left, right);
1321 __ CompareImmediate(
TMP, 0);
1324 __ fled(
TMP, right, left);
1325 __ CompareImmediate(
TMP, 0);
1333 BranchLabels labels) {
1334 if (is_null_aware()) {
1335 ASSERT(operation_cid() == kMintCid);
1336 return EmitNullAwareInt64ComparisonOp(
compiler,
locs(), kind(), labels);
1338 if (operation_cid() == kSmiCid) {
1339 return EmitSmiComparisonOp(
compiler,
locs(), kind(), labels);
1340 }
else if (operation_cid() == kIntegerCid) {
1341 return EmitWordComparisonOp(
compiler,
locs(), kind(), labels);
1342 }
else if (operation_cid() == kMintCid) {
1344 return EmitUnboxedMintEqualityOp(
compiler,
locs(), kind());
1346 return EmitInt64ComparisonOp(
compiler,
locs(), kind(), labels);
1349 ASSERT(operation_cid() == kDoubleCid);
1350 return EmitDoubleComparisonOp(
compiler,
locs(), labels, kind());
1355 const intptr_t kNumInputs = 2;
1356 const intptr_t kNumTemps = 0;
1357 LocationSummary*
locs =
new (zone)
1368 BranchLabels labels) {
1371 if (
right.IsConstant()) {
1372 __ TestImmediate(left, ComputeImmediateMask());
1374 __ TestRegisters(left,
right.reg());
1376 Condition true_condition = (kind() == Token::kNE) ?
NE :
EQ;
1377 return true_condition;
1382 const intptr_t kNumInputs = 1;
1383 const intptr_t kNumTemps = 1;
1384 LocationSummary*
locs =
new (zone)
1393 BranchLabels labels) {
1394 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
1398 compiler::Label* deopt =
1403 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
1404 const ZoneGrowableArray<intptr_t>&
data = cid_results();
1407 __ BranchIfSmi(val_reg,
result ? labels.true_label : labels.false_label);
1408 __ LoadClassId(cid_reg, val_reg);
1410 for (intptr_t
i = 2;
i <
data.length();
i += 2) {
1411 const intptr_t test_cid =
data[
i];
1412 ASSERT(test_cid != kSmiCid);
1414 __ CompareImmediate(cid_reg, test_cid);
1415 __ BranchIf(
EQ,
result ? labels.true_label : labels.false_label);
1418 if (deopt ==
nullptr) {
1422 compiler::Label*
target =
result ? labels.false_label : labels.true_label;
1423 if (
target != labels.fall_through) {
1436 const intptr_t kNumInputs = 2;
1437 const intptr_t kNumTemps = 0;
1439 if (operation_cid() == kMintCid) {
1440 LocationSummary*
locs =
new (zone)
1450 if (operation_cid() == kDoubleCid) {
1451 LocationSummary* summary =
new (zone)
1458 if (operation_cid() == kSmiCid || operation_cid() == kMintCid) {
1459 LocationSummary* summary =
new (zone)
1464 summary->set_in(1, summary->in(0).IsConstant()
1476 BranchLabels labels) {
1477 if (operation_cid() == kSmiCid) {
1478 return EmitSmiComparisonOp(
compiler,
locs(), kind(), labels);
1479 }
else if (operation_cid() == kMintCid) {
1481 return EmitUnboxedMintComparisonOp(
compiler,
locs(), kind(), labels);
1483 return EmitInt64ComparisonOp(
compiler,
locs(), kind(), labels);
1486 ASSERT(operation_cid() == kDoubleCid);
1487 return EmitDoubleComparisonOp(
compiler,
locs(), labels, kind());
1504 if (link_lazily()) {
1505 stub = &StubCode::CallBootstrapNative();
1508 entry =
reinterpret_cast<uword>(native_c_function());
1509 if (is_bootstrap_native()) {
1510 stub = &StubCode::CallBootstrapNative();
1511 }
else if (is_auto_scope()) {
1512 stub = &StubCode::CallAutoScopeNative();
1514 stub = &StubCode::CallNoScopeNative();
1517 __ LoadImmediate(
T1, argc_tag);
1518 compiler::ExternalLabel label(entry);
1519 __ LoadNativeEntry(
T5, &label,
1520 link_lazily() ? ObjectPool::Patchability::kPatchable
1521 : ObjectPool::Patchability::kNotPatchable);
1522 if (link_lazily()) {
1524 source(), *stub, UntaggedPcDescriptors::kOther,
locs(),
1529 compiler->GenerateNonLazyDeoptableStubCall(
1530 source(), *stub, UntaggedPcDescriptors::kOther,
locs(),
1537#define R(r) (1 << r)
1540 bool is_optimizing)
const {
1541 return MakeLocationSummaryInternal(
1542 zone, is_optimizing,
1563 ASSERT(temp1 != saved_fp_or_sp);
1564 ASSERT(temp2 != saved_fp_or_sp);
1576 if (FLAG_precompiled_mode) {
1592 intptr_t stack_space = marshaller_.RequiredStackSpaceInBytes();
1593 __ ReserveAlignedFrameSpace(stack_space);
1597 __ PushRegisters(kVolatileRegisterSet);
1601 __ LoadImmediate(
A1, stack_space);
1603 compiler::Address(
THR, kMsanUnpoisonRuntimeEntry.OffsetFromThread()));
1607 __ mv(
A0, is_leaf_ ?
FPREG : saved_fp_or_sp);
1610 compiler::Address(
THR, kMsanUnpoisonRuntimeEntry.OffsetFromThread()));
1614 __ CallCFunction(compiler::Address(
1615 THR, kMsanUnpoisonParamRuntimeEntry.OffsetFromThread()));
1617 __ PopRegisters(kVolatileRegisterSet);
1620 EmitParamMoves(
compiler, is_leaf_ ?
FPREG : saved_fp_or_sp, temp1, temp2);
1623 __ Comment(is_leaf_ ?
"Leaf Call" :
"Call");
1627#if !defined(PRODUCT)
1640#if !defined(PRODUCT)
1652 UntaggedPcDescriptors::Kind::kOther,
locs(),
1657 if (CanExecuteGeneratedCodeInSafepoint()) {
1669 __ TransitionNativeToGenerated(temp1,
true);
1677 THR, compiler::target::Thread::
1678 call_native_through_safepoint_entry_point_offset()));
1689 __ Comment(
"Check Dart_Handle for Error.");
1692 compiler::Label not_error;
1695 __ BranchIfSmi(temp1, ¬_error);
1696 __ LoadClassId(temp1, temp1);
1701 __ Comment(
"Slow path: call Dart_PropagateError through stub.");
1706 THR, compiler::target::Thread::
1707 call_native_through_safepoint_entry_point_offset()));
1709 THR, kPropagateErrorRuntimeEntry.OffsetFromThread()));
1721 __ RestorePinnedRegisters();
1724 EmitReturnMoves(
compiler, temp1, temp2);
1730 __ LeaveDartFrame();
1734 if (FLAG_precompiled_mode) {
1735 __ SetupGlobalPoolAndDispatchTable();
1740 __ RestorePoolPointer();
1741 __ set_constant_pool_allowed(
true);
1753 __ LeaveDartFrame();
1760 const Register old_exit_through_ffi_reg =
T4;
1763 __ PopRegisterPair(old_exit_frame_reg, old_exit_through_ffi_reg);
1766 __ PopRegisterPair(tmp, vm_tag_reg);
1772 __ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
1773 old_exit_through_ffi_reg,
1776 __ PopNativeCalleeSavedRegisters();
1787 __ set_constant_pool_allowed(
true);
1793 __ set_constant_pool_allowed(
false);
1812 __ PushImmediate(0);
1814 __ PushNativeCalleeSavedRegisters();
1816#if defined(USING_SHADOW_CALL_STACK)
1821 __ RestorePinnedRegisters();
1834 __ PushRegister(
A0);
1840 __ PushRegister(
A0);
1844 __ EmitEntryFrameVerification();
1847 __ TransitionNativeToGenerated(
A0,
false,
1855 const Function& target_function = marshaller_.dart_signature();
1856 const intptr_t callback_id = target_function.FfiCallbackId();
1858 __ LoadFromOffset(
A0,
A0,
1860 __ LoadFromOffset(
A0,
A0,
1862 __ LoadCompressedFieldFromOffset(
1864 __ LoadCompressedFieldFromOffset(
1872 if (FLAG_precompiled_mode) {
1873 __ SetupGlobalPoolAndDispatchTable();
1898#define R(r) (1 << r)
1902 bool is_optimizing)
const {
1905 static_assert(saved_fp < temp0,
"Unexpected ordering of registers in set.");
1906 LocationSummary* summary =
1907 MakeLocationSummaryInternal(zone, (
R(saved_fp) |
R(temp0)));
1917 __ MoveRegister(saved_fp,
FPREG);
1919 const intptr_t frame_space = native_calling_convention_.StackTopInBytes();
1920 __ EnterCFrame(frame_space);
1922 EmitParamMoves(
compiler, saved_fp, temp0);
1926 RELEASE_ASSERT(native_calling_convention_.argument_locations().length() < 4);
1927 __ sx(target_address,
1929 __ CallCFunction(target_address);
1930 __ li(temp0, VMTag::kDartTagId);
1940 const intptr_t kNumInputs = 1;
1952 compiler::Address(
THR, Thread::predefined_symbols_address_offset()));
1960 const intptr_t kNumInputs = 1;
1966 ASSERT(cid_ == kOneByteStringCid);
1969 compiler::Label is_one,
done;
1984 const intptr_t kNumInputs = 5;
1985 const intptr_t kNumTemps = 0;
1986 LocationSummary* summary =
new (zone)
2004 const Register bytes_ptr_reg = start_reg;
2005 const Register bytes_end_reg = end_reg;
2006 const Register flags_reg = bytes_reg;
2008 const Register decoder_temp_reg = start_reg;
2009 const Register flags_temp_reg = end_reg;
2011 const intptr_t kSizeMask = 0x03;
2012 const intptr_t kFlagsMask = 0x3C;
2014 compiler::Label loop, loop_in;
2017 __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
2021 table_reg, table_reg,
2025 __ add(bytes_ptr_reg, bytes_reg, start_reg);
2026 __ add(bytes_end_reg, bytes_reg, end_reg);
2030 __ li(flags_reg, 0);
2036 __ lbu(temp_reg, compiler::Address(bytes_ptr_reg, 0));
2037 __ addi(bytes_ptr_reg, bytes_ptr_reg, 1);
2040 __ add(temp_reg, table_reg, temp_reg);
2041 __ lbu(temp_reg, compiler::Address(temp_reg));
2042 __ or_(flags_reg, flags_reg, temp_reg);
2043 __ andi(temp_reg, temp_reg, kSizeMask);
2044 __ add(size_reg, size_reg, temp_reg);
2051 __ AndImmediate(flags_reg, flags_reg, kFlagsMask);
2052 if (!IsScanFlagsUnboxed()) {
2053 __ SmiTag(flags_reg);
2057 if (decoder_location.IsStackSlot()) {
2059 decoder_reg = decoder_temp_reg;
2061 decoder_reg = decoder_location.reg();
2063 const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes();
2064 if (scan_flags_field_.is_compressed() && !IsScanFlagsUnboxed()) {
2067 __ LoadFieldFromOffset(flags_temp_reg, decoder_reg,
2068 scan_flags_field_offset);
2069 __ or_(flags_temp_reg, flags_temp_reg, flags_reg);
2070 __ StoreFieldToOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset);
2081 const intptr_t kNumInputs = 2;
2082 const intptr_t kNumTemps = 0;
2083 LocationSummary*
locs =
new (zone)
2086 const bool can_be_constant =
2087 index()->BindsToConstant() &&
2089 index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
2099 if (rep == kUnboxedInt64) {
2117 compiler::Address element_address(
TMP);
2118 element_address = index.IsRegister()
2119 ?
__ ElementAddressForRegIndex(
2120 IsUntagged(), class_id(), index_scale(),
2121 index_unboxed_, array, index.reg(),
TMP)
2122 :
__ ElementAddressForIntIndex(
2123 IsUntagged(), class_id(), index_scale(), array,
2131 if (rep == kUnboxedInt64) {
2134 const Register result_lo = result_pair->At(0).reg();
2135 const Register result_hi = result_pair->At(1).reg();
2136 __ lw(result_lo, element_address);
2137 __ lw(result_hi, compiler::Address(element_address.base(),
2138 element_address.offset() + 4));
2149 if (rep == kUnboxedFloat) {
2152 }
else if (rep == kUnboxedDouble) {
2156 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2157 rep == kUnboxedFloat64x2);
2162 ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
2163 (class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
2171 const intptr_t kNumInputs = 2;
2172 const intptr_t kNumTemps = 0;
2173 LocationSummary* summary =
new (zone)
2206 switch (class_id()) {
2207 case kOneByteStringCid:
2208 switch (element_count()) {
2222 case kTwoByteStringCid:
2223 switch (element_count()) {
2239 compiler::Address element_address =
__ ElementAddressForRegIndex(
2240 IsExternal(), class_id(), index_scale(),
false, str,
2260 ASSERT(can_pack_into_smi());
2271 const intptr_t kNumInputs = 3;
2272 const intptr_t kNumTemps = 1;
2273 LocationSummary*
locs =
new (zone)
2276 const bool can_be_constant =
2277 index()->BindsToConstant() &&
2279 index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
2288 ASSERT(rep == kUnboxedUint8);
2291 if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
2293 if (constant !=
nullptr && constant->HasZeroRepresentation()) {
2298 }
else if (rep == kUnboxedInt64) {
2304 if (constant !=
nullptr && constant->HasZeroRepresentation()) {
2312 if (constant !=
nullptr && constant->HasZeroRepresentation()) {
2319 if (rep == kUnboxedFloat) {
2321 if (constant !=
nullptr && constant->HasZeroRepresentation()) {
2326 }
else if (rep == kUnboxedDouble) {
2331 if (constant !=
nullptr && constant->HasZeroRepresentation()) {
2340 }
else if (class_id() == kArrayCid) {
2344 if (ShouldEmitStoreBarrier()) {
2359 compiler::Address element_address(
TMP);
2362 if (class_id() == kArrayCid && ShouldEmitStoreBarrier()) {
2363 if (index.IsRegister()) {
2364 __ ComputeElementAddressForRegIndex(temp, IsUntagged(), class_id(),
2365 index_scale(), index_unboxed_, array,
2368 __ ComputeElementAddressForIntIndex(temp, IsUntagged(), class_id(),
2369 index_scale(), array,
2370 Smi::Cast(index.constant()).Value());
2373 __ StoreIntoArray(array, temp,
value, CanValueBeSmi());
2377 element_address = index.IsRegister()
2378 ?
__ ElementAddressForRegIndex(
2379 IsUntagged(), class_id(), index_scale(),
2380 index_unboxed_, array, index.reg(), temp)
2381 :
__ ElementAddressForIntIndex(
2382 IsUntagged(), class_id(), index_scale(), array,
2390 const Smi& constant = Smi::Cast(
locs()->in(2).constant());
2391 intptr_t
value = constant.Value();
2395 }
else if (
value < 0) {
2399 __ sb(
ZR, element_address);
2401 __ LoadImmediate(
TMP,
static_cast<int8_t
>(
value));
2402 __ sb(
TMP, element_address);
2407 compiler::Label store_zero, store_ff,
done;
2413 __ sb(
value, element_address);
2420 __ sb(
TMP, element_address);
2425 if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
2428 __ sb(
ZR, element_address);
2431 __ sb(
value, element_address);
2433 }
else if (rep == kUnboxedInt64) {
2437 __ sd(
ZR, element_address);
2439 __ sd(
locs()->in(2).reg(), element_address);
2443 Register value_lo = value_pair->At(0).reg();
2444 Register value_hi = value_pair->At(1).reg();
2445 __ sw(value_lo, element_address);
2446 __ sw(value_hi, compiler::Address(element_address.base(),
2447 element_address.offset() + 4));
2454 __ Store(
locs()->in(2).reg(), element_address,
2459 if (rep == kUnboxedFloat) {
2462 __ sw(
ZR, element_address);
2464 __ fsw(
locs()->in(2).fpu_reg(), element_address);
2466 }
else if (rep == kUnboxedDouble) {
2470 __ sd(
ZR, element_address);
2472 __ fsd(
locs()->in(2).fpu_reg(), element_address);
2475 __ fsd(
locs()->in(2).fpu_reg(), element_address);
2478 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2479 rep == kUnboxedFloat64x2);
2482 }
else if (class_id() == kArrayCid) {
2484 ASSERT(!ShouldEmitStoreBarrier());
2487 __ StoreObjectIntoObjectNoBarrier(array, element_address, constant);
2490 __ StoreIntoObjectNoBarrier(array, element_address,
value);
2501static void LoadValueCid(FlowGraphCompiler*
compiler,
2504 compiler::Label* value_is_smi =
nullptr) {
2505 compiler::Label
done;
2506 if (value_is_smi ==
nullptr) {
2507 __ LoadImmediate(value_cid_reg, kSmiCid);
2509 __ BranchIfSmi(value_reg, value_is_smi ==
nullptr ? &
done : value_is_smi,
2511 __ LoadClassId(value_cid_reg, value_reg);
2519 const intptr_t kNumInputs = 1;
2521 const intptr_t value_cid =
value()->Type()->ToCid();
2522 const intptr_t field_cid = field().guarded_cid();
2524 const bool emit_full_guard = !opt || (field_cid ==
kIllegalCid);
2526 const bool needs_value_cid_temp_reg =
2527 emit_full_guard || ((value_cid ==
kDynamicCid) && (field_cid != kSmiCid));
2529 const bool needs_field_temp_reg = emit_full_guard;
2531 intptr_t num_temps = 0;
2532 if (needs_value_cid_temp_reg) {
2535 if (needs_field_temp_reg) {
2539 LocationSummary* summary =
new (zone)
2543 for (intptr_t
i = 0;
i < num_temps;
i++) {
2552 ASSERT(
sizeof(UntaggedField::guarded_cid_) == 4);
2553 ASSERT(
sizeof(UntaggedField::is_nullable_) == 4);
2555 const intptr_t value_cid =
value()->Type()->ToCid();
2556 const intptr_t field_cid = field().guarded_cid();
2563 const bool emit_full_guard =
2566 const bool needs_value_cid_temp_reg =
2567 emit_full_guard || ((value_cid ==
kDynamicCid) && (field_cid != kSmiCid));
2569 const bool needs_field_temp_reg = emit_full_guard;
2576 const Register field_reg = needs_field_temp_reg
2580 compiler::Label
ok, fail_label;
2582 compiler::Label* deopt =
2587 compiler::Label*
fail = (deopt !=
nullptr) ? deopt : &fail_label;
2589 if (emit_full_guard) {
2592 compiler::FieldAddress field_cid_operand(field_reg,
2594 compiler::FieldAddress field_nullability_operand(
2598 LoadValueCid(
compiler, value_cid_reg, value_reg);
2599 compiler::Label skip_length_check;
2600 __ lw(
TMP, field_cid_operand);
2601 __ CompareRegisters(value_cid_reg,
TMP);
2603 __ lw(
TMP, field_nullability_operand);
2604 __ CompareRegisters(value_cid_reg,
TMP);
2605 }
else if (value_cid ==
kNullCid) {
2606 __ lw(value_cid_reg, field_nullability_operand);
2607 __ CompareImmediate(value_cid_reg, value_cid);
2609 compiler::Label skip_length_check;
2610 __ lw(value_cid_reg, field_cid_operand);
2611 __ CompareImmediate(value_cid_reg, value_cid);
2621 if (!field().needs_length_check()) {
2624 __ lw(
TMP, field_cid_operand);
2629 __ sw(value_cid_reg, field_cid_operand);
2630 __ sw(value_cid_reg, field_nullability_operand);
2632 __ LoadImmediate(
TMP, value_cid);
2633 __ sw(
TMP, field_cid_operand);
2634 __ sw(
TMP, field_nullability_operand);
2640 if (deopt ==
nullptr) {
2648 __ PushRegisterPair(value_reg, field_reg);
2650 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2657 ASSERT(deopt !=
nullptr);
2664 if (field_cid != kSmiCid) {
2666 __ LoadClassId(value_cid_reg, value_reg);
2667 __ CompareImmediate(value_cid_reg, field_cid);
2670 if (field().is_nullable() && (field_cid !=
kNullCid)) {
2672 __ CompareObject(value_reg, Object::null_object());
2676 }
else if (value_cid == field_cid) {
2681 ASSERT(value_cid != nullability);
2690 const intptr_t kNumInputs = 1;
2692 const intptr_t kNumTemps = 3;
2693 LocationSummary* summary =
new (zone)
2702 LocationSummary* summary =
new (zone)
2715 compiler::Label* deopt =
2733 compiler::FieldAddress(
2745 __ add(
TMP, value_reg, offset_reg);
2746 __ lx(
TMP, compiler::Address(
TMP, 0));
2747 __ CompareObjectRegisters(length_reg,
TMP);
2749 if (deopt ==
nullptr) {
2752 __ PushRegisterPair(value_reg, field_reg);
2754 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2757 __ BranchIf(
NE, deopt);
2763 ASSERT(field().guarded_list_length() >= 0);
2764 ASSERT(field().guarded_list_length_in_object_offset() !=
2767 __ lx(
TMP, compiler::FieldAddress(
2768 value_reg, field().guarded_list_length_in_object_offset()));
2770 __ BranchIf(
NE, deopt);
2776 const intptr_t kNumInputs = 1;
2777 const intptr_t kNumTemps = 0;
2778 LocationSummary*
locs =
new (zone)
2787 compiler->used_static_fields().Add(&field());
2800 const intptr_t kNumInputs = 3;
2801 const intptr_t kNumTemps = 0;
2802 LocationSummary* summary =
new (zone)
2825 const intptr_t kNumInputs = 2;
2826 const intptr_t kNumTemps = 0;
2827 LocationSummary*
locs =
new (zone)
2838static void InlineArrayAllocation(FlowGraphCompiler*
compiler,
2839 intptr_t num_elements,
2840 compiler::Label* slow_path,
2841 compiler::Label*
done) {
2842 const int kInlineArraySize = 12;
2845 __ TryAllocateArray(kArrayCid, instance_size, slow_path,
2853 __ StoreCompressedIntoObjectNoBarrier(
2860 __ StoreCompressedIntoObjectNoBarrier(
2871 if (num_elements > 0) {
2872 const intptr_t array_size = instance_size -
sizeof(UntaggedArray);
2876 intptr_t current_offset = 0;
2877 while (current_offset < array_size) {
2878 __ StoreCompressedIntoObjectNoBarrier(
2884 compiler::Label end_loop, init_loop;
2886 __ CompareRegisters(
T5,
T3);
2899 TypeUsageInfo* type_usage_info =
compiler->thread()->type_usage_info();
2900 if (type_usage_info !=
nullptr) {
2901 const Class& list_class =
2904 type_arguments()->definition());
2907 compiler::Label slow_path,
done;
2908 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2909 if (
compiler->is_optimizing() && !FLAG_precompiled_mode &&
2910 num_elements()->BindsToConstant() &&
2911 num_elements()->BoundConstant().
IsSmi()) {
2913 Smi::Cast(num_elements()->BoundConstant()).Value();
2921 auto object_store =
compiler->isolate_group()->object_store();
2922 const auto& allocate_array_stub =
2934 const intptr_t kNumInputs = 0;
2935 const intptr_t kNumTemps = 3;
2936 LocationSummary*
locs =
new (zone) LocationSummary(
2945class AllocateContextSlowPath
2946 :
public TemplateSlowPathCode<AllocateUninitializedContextInstr> {
2948 explicit AllocateContextSlowPath(
2949 AllocateUninitializedContextInstr* instruction)
2950 : TemplateSlowPathCode(instruction) {}
2953 __ Comment(
"AllocateContextSlowPath");
2956 LocationSummary*
locs = instruction()->locs();
2961 auto slow_path_env =
compiler->SlowPathEnvironmentFor(
2963 ASSERT(slow_path_env !=
nullptr);
2965 auto object_store =
compiler->isolate_group()->object_store();
2967 compiler->zone(), object_store->allocate_context_stub());
2969 __ LoadImmediate(
T1, instruction()->num_context_variables());
2970 compiler->GenerateStubCall(instruction()->
source(), allocate_context_stub,
2971 UntaggedPcDescriptors::kOther,
locs,
2972 instruction()->
deopt_id(), slow_path_env);
2986 AllocateContextSlowPath* slow_path =
new AllocateContextSlowPath(
this);
2987 compiler->AddSlowPathCode(slow_path);
2990 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2991 __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
2993 temp0, temp1, temp2);
2996 __ LoadImmediate(temp0, num_context_variables());
3000 __ Jump(slow_path->entry_label());
3003 __ Bind(slow_path->exit_label());
3008 const intptr_t kNumInputs = 0;
3009 const intptr_t kNumTemps = 1;
3010 LocationSummary*
locs =
new (zone)
3021 auto object_store =
compiler->isolate_group()->object_store();
3022 const auto& allocate_context_stub =
3024 __ LoadImmediate(
T1, num_context_variables());
3032 const intptr_t kNumInputs = 1;
3033 const intptr_t kNumTemps = 0;
3034 LocationSummary*
locs =
new (zone)
3045 auto object_store =
compiler->isolate_group()->object_store();
3046 const auto& clone_context_stub =
3049 UntaggedPcDescriptors::kOther,
locs(),
3060 compiler->AddExceptionHandler(
this);
3061 if (HasParallelMove()) {
3062 parallel_move()->EmitNativeCode(
compiler);
3067 const intptr_t fp_sp_dist =
3072 __ AddImmediate(
SP,
FP, fp_sp_dist);
3075 if (raw_exception_var_ !=
nullptr) {
3080 if (raw_stacktrace_var_ !=
nullptr) {
3090 const intptr_t kNumInputs = 0;
3091 const intptr_t kNumTemps = 1;
3093 LocationSummary* summary =
new (zone)
3094 LocationSummary(zone, kNumInputs, kNumTemps,
3101class CheckStackOverflowSlowPath
3102 :
public TemplateSlowPathCode<CheckStackOverflowInstr> {
3104 static constexpr intptr_t kNumSlowPathArgs = 0;
3106 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
3107 : TemplateSlowPathCode(instruction) {}
3110 auto locs = instruction()->locs();
3111 if (
compiler->isolate_group()->use_osr() && osr_entry_label()->IsLinked()) {
3113 __ Comment(
"CheckStackOverflowSlowPathOsr");
3114 __ Bind(osr_entry_label());
3119 __ Comment(
"CheckStackOverflowSlowPath");
3122 if (!using_shared_stub) {
3129 compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
3132 const bool has_frame =
compiler->flow_graph().graph_entry()->NeedsFrame();
3133 if (using_shared_stub) {
3135 ASSERT(
__ constant_pool_allowed());
3136 __ set_constant_pool_allowed(
false);
3137 __ EnterDartFrame(0);
3139 auto object_store =
compiler->isolate_group()->object_store();
3144 ? object_store->stack_overflow_stub_with_fpu_regs_stub()
3145 : object_store->stack_overflow_stub_without_fpu_regs_stub());
3147 if (
compiler->CanPcRelativeCall(stub)) {
3148 __ GenerateUnRelocatedPcRelativeCall();
3149 compiler->AddPcRelativeCallStubTarget(stub);
3151 const uword entry_point_offset =
3154 __ Call(compiler::Address(
THR, entry_point_offset));
3158 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOther,
3160 instruction()->
source());
3162 __ LeaveDartFrame();
3163 __ set_constant_pool_allowed(
true);
3167 __ CallRuntime(kInterruptOrStackOverflowRuntimeEntry, kNumSlowPathArgs);
3170 UntaggedPcDescriptors::kOther, instruction()->
locs(),
env);
3174 instruction()->in_loop()) {
3176 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
3178 InstructionSource());
3180 compiler->pending_deoptimization_env_ =
nullptr;
3181 if (!using_shared_stub) {
3187 compiler::Label* osr_entry_label() {
3189 return &osr_entry_label_;
3193 compiler::Label osr_entry_label_;
3197 CheckStackOverflowSlowPath* slow_path =
new CheckStackOverflowSlowPath(
this);
3198 compiler->AddSlowPathCode(slow_path);
3202 __ bleu(
SP,
TMP, slow_path->entry_label());
3203 if (
compiler->CanOSRFunction() && in_loop()) {
3209 const intptr_t configured_optimization_counter_threshold =
3210 compiler->thread()->isolate_group()->optimization_counter_threshold();
3211 const int32_t threshold =
3212 configured_optimization_counter_threshold * (loop_depth() + 1);
3213 __ LoadFieldFromOffset(
TMP,
function, Function::usage_counter_offset(),
3216 __ StoreFieldToOffset(
TMP,
function, Function::usage_counter_offset(),
3218 __ CompareImmediate(
TMP, threshold);
3219 __ BranchIf(
GE, slow_path->osr_entry_label());
3221 if (
compiler->ForceSlowPathForStackOverflow()) {
3222 __ j(slow_path->entry_label());
3224 __ Bind(slow_path->exit_label());
3227static void EmitSmiShiftLeft(FlowGraphCompiler*
compiler,
3228 BinarySmiOpInstr* shift_left) {
3229 const LocationSummary&
locs = *shift_left->locs();
3232 compiler::Label* deopt =
3233 shift_left->CanDeoptimize()
3234 ?
compiler->AddDeoptStub(shift_left->deopt_id(),
3235 ICData::kDeoptBinarySmiOp)
3239 ASSERT(constant.IsSmi());
3241 const intptr_t kCountLimit = XLEN - 1;
3242 const intptr_t
value = Smi::Cast(constant).Value();
3245 if (shift_left->can_overflow()) {
3248 __ bne(left,
TMP2, deopt);
3255 Range* right_range = shift_left->right_range();
3256 if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
3259 const Object& obj = shift_left->left()->BoundConstant();
3261 const intptr_t left_int = Smi::Cast(obj).Value();
3262 if (left_int == 0) {
3263 __ bltz(right, deopt);
3267 const intptr_t max_right =
3269 const bool right_needs_check =
3271 if (right_needs_check) {
3273 __ BranchIf(
CS, deopt);
3275 __ SmiUntag(
TMP, right);
3281 const bool right_needs_check =
3283 if (!shift_left->can_overflow()) {
3284 if (right_needs_check) {
3286 ASSERT(shift_left->CanDeoptimize());
3287 __ bltz(right, deopt);
3290 compiler::Label
done, is_not_zero;
3296 __ SmiUntag(
TMP, right);
3300 __ SmiUntag(
TMP, right);
3304 if (right_needs_check) {
3305 ASSERT(shift_left->CanDeoptimize());
3307 __ BranchIf(
CS, deopt);
3309 __ SmiUntag(
TMP, right);
3313 __ bne(left,
TMP, deopt);
3319 const intptr_t kNumInputs = 2;
3320 const intptr_t kNumTemps =
3321 ((op_kind() == Token::kUSHR) || (op_kind() == Token::kMUL)) ? 1 : 0;
3322 LocationSummary* summary =
new (zone)
3324 if (op_kind() == Token::kTRUNCDIV) {
3326 if (RightIsPowerOfTwoConstant()) {
3335 if (op_kind() == Token::kMOD) {
3343 if (kNumTemps == 1) {
3353 if (op_kind() == Token::kSHL) {
3360 compiler::Label* deopt =
nullptr;
3367 ASSERT(constant.IsSmi());
3368 const intx_t imm =
static_cast<intx_t
>(constant.ptr());
3369 switch (op_kind()) {
3371 if (deopt ==
nullptr) {
3372 __ AddImmediate(
result, left, imm);
3374 __ AddImmediateBranchOverflow(
result, left, imm, deopt);
3379 if (deopt ==
nullptr) {
3380 __ AddImmediate(
result, left, -imm);
3384 __ SubtractImmediateBranchOverflow(
result, left, imm, deopt);
3390 const intptr_t
value = Smi::Cast(constant).Value();
3391 if (deopt ==
nullptr) {
3395 __ MultiplyImmediateBranchOverflow(
result, left,
value, deopt);
3399 case Token::kTRUNCDIV: {
3400 const intptr_t
value = Smi::Cast(constant).Value();
3403 const intptr_t shift_count =
3406 __ srai(
TMP, left, XLEN - 1);
3409 __ srli(
TMP,
TMP, XLEN - shift_count);
3410 __ add(temp, left,
TMP);
3412 __ srai(
result, temp, shift_count);
3419 case Token::kBIT_AND:
3421 __ AndImmediate(
result, left, imm);
3423 case Token::kBIT_OR:
3427 case Token::kBIT_XOR:
3429 __ XorImmediate(
result, left, imm);
3433 const intptr_t kCountLimit = XLEN - 1;
3434 intptr_t
value = Smi::Cast(constant).Value();
3439 case Token::kUSHR: {
3460 if (deopt !=
nullptr) {
3461 __ bltz(left, deopt);
3487 const intptr_t kCountLimit = XLEN - 1;
3488 intptr_t
value = Smi::Cast(constant).Value();
3490 __ SmiUntag(
TMP, left);
3493 if (deopt !=
nullptr) {
3508 switch (op_kind()) {
3510 if (deopt ==
nullptr) {
3521 __ AddBranchOverflow(
result, left, right, deopt);
3526 if (deopt ==
nullptr) {
3537 __ SubtractBranchOverflow(
result, left, right, deopt);
3543 __ SmiUntag(temp, left);
3544 if (deopt ==
nullptr) {
3547 __ MultiplyBranchOverflow(
result, temp, right, deopt);
3551 case Token::kBIT_AND: {
3556 case Token::kBIT_OR: {
3561 case Token::kBIT_XOR: {
3566 case Token::kTRUNCDIV: {
3569 __ beqz(right, deopt);
3571 __ SmiUntag(
TMP, left);
3572 __ SmiUntag(
TMP2, right);
3587 __ beqz(right, deopt);
3589 __ SmiUntag(
TMP, left);
3590 __ SmiUntag(
TMP2, right);
3602 compiler::Label
done, adjust;
3616 __ bltz(right, deopt);
3618 __ SmiUntag(
TMP, right);
3620 const intptr_t kCountLimit = XLEN - 1;
3622 __ LoadImmediate(
TMP2, kCountLimit);
3623 compiler::Label shift_in_bounds;
3626 __ Bind(&shift_in_bounds);
3633 case Token::kUSHR: {
3635 compiler::Label
done;
3636 __ SmiUntag(
TMP, right);
3668 compiler::Label
next;
3680 if (deopt !=
nullptr) {
3681 __ bltz(left, deopt);
3682 __ bltz(right, deopt);
3699 __ SmiUntag(temp, left);
3705 __ bltz(right, deopt);
3707 __ SmiUntag(
TMP, right);
3709 const intptr_t kCountLimit = XLEN - 1;
3711 compiler::Label
done;
3713 __ LoadImmediate(
TMP2, kCountLimit);
3714 compiler::Label shift_in_bounds;
3718 __ Bind(&shift_in_bounds);
3723 if (deopt !=
nullptr) {
3754 intptr_t left_cid =
left()->Type()->ToCid();
3755 intptr_t right_cid =
right()->Type()->ToCid();
3756 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid));
3757 const intptr_t kNumInputs = 2;
3758 const intptr_t kNumTemps = 0;
3759 LocationSummary* summary =
new (zone)
3767 compiler::Label* deopt =
3769 intptr_t left_cid =
left()->Type()->ToCid();
3770 intptr_t right_cid =
right()->Type()->ToCid();
3773 if (this->left()->definition() == this->right()->definition()) {
3774 __ BranchIfSmi(left, deopt);
3775 }
else if (left_cid == kSmiCid) {
3776 __ BranchIfSmi(right, deopt);
3777 }
else if (right_cid == kSmiCid) {
3778 __ BranchIfSmi(left, deopt);
3780 __ or_(
TMP, left, right);
3781 __ BranchIfSmi(
TMP, deopt);
3786 const intptr_t kNumInputs = 1;
3787 const intptr_t kNumTemps = 0;
3788 LocationSummary* summary =
new (zone) LocationSummary(
3800 compiler->BoxClassFor(from_representation()),
3803 switch (from_representation()) {
3804 case kUnboxedDouble:
3805 __ StoreDFieldToOffset(
value, out_reg, ValueOffset());
3809 __ StoreDFieldToOffset(
FpuTMP, out_reg, ValueOffset());
3811 case kUnboxedFloat32x4:
3812 case kUnboxedFloat64x2:
3813 case kUnboxedInt32x4:
3824 const intptr_t kNumInputs = 1;
3825 const intptr_t kNumTemps = 1;
3826 const bool is_floating_point =
3828 LocationSummary* summary =
new (zone)
3833 if (is_floating_point) {
3846void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler*
compiler) {
3850 case kUnboxedInt64: {
3854 __ LoadFieldFromOffset(
result->At(0).reg(), box, ValueOffset());
3855 __ LoadFieldFromOffset(
result->At(1).reg(), box,
3859 __ ld(
result, compiler::FieldAddress(box, ValueOffset()));
3864 case kUnboxedDouble: {
3866 __ LoadDFieldFromOffset(
result, box, ValueOffset());
3870 case kUnboxedFloat: {
3872 __ LoadDFieldFromOffset(
result, box, ValueOffset());
3877 case kUnboxedFloat32x4:
3878 case kUnboxedFloat64x2:
3879 case kUnboxedInt32x4: {
3890void UnboxInstr::EmitSmiConversion(FlowGraphCompiler*
compiler) {
3895 case kUnboxedInt64: {
3897 __ SmiUntag(
result->At(0).reg(), box);
3898 __ srai(
result->At(1).reg(), box, XLEN - 1);
3903 case kUnboxedInt64: {
3910 case kUnboxedFloat: {
3912 __ SmiUntag(
TMP, box);
3920 case kUnboxedDouble: {
3922 __ SmiUntag(
TMP, box);
3937void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler*
compiler) {
3943void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler*
compiler) {
3949 compiler::Label
done;
3950 __ srai(
result->At(1).reg(), box, XLEN - 1);
3951 __ SmiUntag(
result->At(0).reg(), box);
3964 ASSERT((from_representation() == kUnboxedInt32) ||
3965 (from_representation() == kUnboxedUint32));
3966 const intptr_t kNumInputs = 1;
3967 const intptr_t kNumTemps = 0;
3971 const bool kMayAllocateMint =
false;
3973 const bool kMayAllocateMint = !ValueFitsSmi();
3975 LocationSummary* summary =
new (zone)
3976 LocationSummary(zone, kNumInputs, kNumTemps,
3992 if (from_representation() == kUnboxedInt32) {
3995 ASSERT(from_representation() == kUnboxedUint32);
4000 if (ValueFitsSmi()) {
4003 compiler::Label
done;
4004 if (from_representation() == kUnboxedInt32) {
4008 ASSERT(from_representation() == kUnboxedUint32);
4016 if (from_representation() == kUnboxedInt32) {
4018 __ StoreFieldToOffset(
4022 ASSERT(from_representation() == kUnboxedUint32);
4023 __ StoreFieldToOffset(
4037 const bool stubs_in_vm_isolate =
4038 object_store->allocate_mint_with_fpu_regs_stub()
4040 ->InVMIsolateHeap() ||
4041 object_store->allocate_mint_without_fpu_regs_stub()
4043 ->InVMIsolateHeap();
4044 const bool shared_slow_path_call =
4046 const intptr_t kNumInputs = 1;
4047 const intptr_t kNumTemps = ValueFitsSmi() ? 0 : 1;
4048 LocationSummary* summary =
new (zone) LocationSummary(
4049 zone, kNumInputs, kNumTemps,
4060 if (ValueFitsSmi()) {
4062 }
else if (shared_slow_path_call) {
4075 if (ValueFitsSmi()) {
4077 Register value_lo = value_pair->At(0).reg();
4079 __ SmiTag(out_reg, value_lo);
4084 Register value_lo = value_pair->At(0).reg();
4085 Register value_hi = value_pair->At(1).reg();
4088 compiler::Label overflow,
done;
4089 __ SmiTag(out_reg, value_lo);
4092 __ srai(
TMP, out_reg, XLEN - 1);
4098 compiler->intrinsic_slow_path_label(),
4100 }
else if (
locs()->call_on_shared_slow_path()) {
4101 const bool has_frame =
compiler->flow_graph().graph_entry()->NeedsFrame();
4103 ASSERT(
__ constant_pool_allowed());
4104 __ set_constant_pool_allowed(
false);
4105 __ EnterDartFrame(0);
4107 auto object_store =
compiler->isolate_group()->object_store();
4111 live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub()
4112 : object_store->allocate_mint_without_fpu_regs_stub());
4114 ASSERT(!
locs()->live_registers()->ContainsRegister(
4116 auto extended_env =
compiler->SlowPathEnvironmentFor(
this, 0);
4117 compiler->GenerateStubCall(
source(), stub, UntaggedPcDescriptors::kOther,
4120 __ LeaveDartFrame();
4121 __ set_constant_pool_allowed(
true);
4128 __ StoreFieldToOffset(value_lo, out_reg,
4130 __ StoreFieldToOffset(
4137 if (ValueFitsSmi()) {
4142 compiler::Label
done;
4151 compiler->intrinsic_slow_path_label(),
4153 }
else if (
locs()->call_on_shared_slow_path()) {
4154 const bool has_frame =
compiler->flow_graph().graph_entry()->NeedsFrame();
4156 ASSERT(
__ constant_pool_allowed());
4157 __ set_constant_pool_allowed(
false);
4158 __ EnterDartFrame(0);
4160 auto object_store =
compiler->isolate_group()->object_store();
4164 live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub()
4165 : object_store->allocate_mint_without_fpu_regs_stub());
4167 ASSERT(!
locs()->live_registers()->ContainsRegister(
4169 auto extended_env =
compiler->SlowPathEnvironmentFor(
this, 0);
4170 compiler->GenerateStubCall(
source(), stub, UntaggedPcDescriptors::kOther,
4173 __ LeaveDartFrame();
4174 __ set_constant_pool_allowed(
true);
4187static void LoadInt32FromMint(FlowGraphCompiler*
compiler,
4190 compiler::Label* deopt) {
4192 if (deopt !=
nullptr) {
4193 __ LoadFieldFromOffset(
4204 const intptr_t kNumInputs = 1;
4205 const intptr_t kNumTemps = 0;
4206 LocationSummary* summary =
new (zone)
4215 const intptr_t value_cid =
value()->Type()->ToCid();
4218 compiler::Label* deopt =
4222 compiler::Label* out_of_range = !is_truncating() ? deopt :
nullptr;
4225 if (value_cid == kSmiCid) {
4227 }
else if (value_cid == kMintCid) {
4230 compiler::Label
done;
4236 compiler::Label
done;
4240 __ BranchIf(
NE, deopt);
4245 const intptr_t value_cid =
value()->Type()->ToCid();
4248 compiler::Label* deopt =
4253 if (value_cid == kSmiCid) {
4255 }
else if (value_cid == kMintCid) {
4262 compiler::Label
done;
4268 compiler::Label
done;
4272 __ BranchIf(
NE, deopt);
4279 if (!is_truncating() && (deopt !=
nullptr)) {
4289 const intptr_t kNumInputs = 2;
4290 const intptr_t kNumTemps = 0;
4291 LocationSummary* summary =
new (zone)
4304 switch (op_kind()) {
4328 switch (op_kind()) {
4355 const intptr_t kNumInputs = 1;
4356 const intptr_t kNumTemps = 0;
4357 LocationSummary* summary =
new (zone)
4365 BranchLabels labels) {
4370 if (op_kind() == MethodRecognizer::kDouble_getIsNaN) {
4372 }
else if (op_kind() == MethodRecognizer::kDouble_getIsInfinite) {
4374 }
else if (op_kind() == MethodRecognizer::kDouble_getIsNegative) {
4395 const intptr_t kNumTemps = 0;
4396 LocationSummary* summary =
new (zone)
4413 compiler::LeafRuntimeScope rt(
compiler->assembler(),
4416 if (
locs()->in(3).IsRegister()) {
4418 }
else if (
locs()->in(3).IsStackSlot()) {
4428 if (result_cid() == kDoubleCid) {
4429 const intptr_t kNumInputs = 2;
4430 const intptr_t kNumTemps = 0;
4431 LocationSummary* summary =
new (zone)
4439 ASSERT(result_cid() == kSmiCid);
4440 const intptr_t kNumInputs = 2;
4441 const intptr_t kNumTemps = 0;
4442 LocationSummary* summary =
new (zone)
4452 ASSERT((op_kind() == MethodRecognizer::kMathMin) ||
4453 (op_kind() == MethodRecognizer::kMathMax));
4454 const bool is_min = (op_kind() == MethodRecognizer::kMathMin);
4455 if (result_cid() == kDoubleCid) {
4468 ASSERT(result_cid() == kSmiCid);
4472 compiler::Label choose_right,
done;
4487 const intptr_t kNumInputs = 1;
4488 const intptr_t kNumTemps = 0;
4489 LocationSummary* summary =
new (zone)
4501 switch (op_kind()) {
4502 case Token::kNEGATE: {
4503 compiler::Label* deopt =
4510 case Token::kBIT_NOT:
4521 const intptr_t kNumInputs = 1;
4522 const intptr_t kNumTemps = 0;
4523 LocationSummary* summary =
new (zone)
4534 switch (op_kind()) {
4538 case Token::kNEGATE:
4544 case Token::kSQUARE:
4552 switch (op_kind()) {
4556 case Token::kNEGATE:
4559 case Token::kRECIPROCAL:
4564 case Token::kRECIPROCAL_SQRT:
4573 case Token::kSQUARE:
4584 const intptr_t kNumInputs = 1;
4585 const intptr_t kNumTemps = 0;
4586 LocationSummary*
result =
new (zone)
4601 const intptr_t kNumInputs = 1;
4602 const intptr_t kNumTemps = 0;
4603 LocationSummary*
result =
new (zone)
4627 const intptr_t kNumInputs = 1;
4628 const intptr_t kNumTemps = 0;
4629 LocationSummary*
result =
new (zone)
4649 const intptr_t kNumInputs = 1;
4650 const intptr_t kNumTemps = 0;
4651 LocationSummary*
result =
new (zone) LocationSummary(
4662 DoubleToIntegerSlowPath* slow_path =
4663 new DoubleToIntegerSlowPath(
this, value_double);
4664 compiler->AddSlowPathCode(slow_path);
4667 switch (recognized_kind()) {
4668 case MethodRecognizer::kDoubleToInteger:
4671 case MethodRecognizer::kDoubleFloorToInt:
4674 case MethodRecognizer::kDoubleCeilToInt:
4682 __ fcvtwd(
TMP, value_double, rounding);
4684 __ fcvtld(
TMP, value_double, rounding);
4692 __ bne(
TMP,
TMP2, slow_path->entry_label());
4693 __ Bind(slow_path->exit_label());
4698 const intptr_t kNumInputs = 1;
4699 const intptr_t kNumTemps = 0;
4700 LocationSummary*
result =
new (zone)
4708 compiler::Label* deopt =
4729 const intptr_t kNumInputs = 1;
4730 const intptr_t kNumTemps = 0;
4731 LocationSummary*
result =
new (zone)
4746 const intptr_t kNumInputs = 1;
4747 const intptr_t kNumTemps = 0;
4748 LocationSummary*
result =
new (zone)
4763 const intptr_t kNumInputs = 2;
4764 const intptr_t kNumTemps = 0;
4765 LocationSummary*
result =
new (zone)
4778 switch (op_kind()) {
4803 const intptr_t kNumTemps = 0;
4804 LocationSummary*
result =
new (zone)
4820 compiler::LeafRuntimeScope rt(
compiler->assembler(),
4837 const intptr_t kNumInputs = 1;
4838 LocationSummary* summary =
4868 Location in_loc = pair->At(index());
4883 const intptr_t kNumInputs = 1;
4884 LocationSummary* summary =
4888 case kUnboxedDouble:
4904 case kUnboxedDouble:
4906 compiler::FieldAddress(
4908 lane() *
sizeof(
double)));
4912 compiler::FieldAddress(
4914 lane() *
sizeof(
float)));
4920 lane() *
sizeof(int32_t)));
4930 LocationSummary* summary =
new (zone)
4932 switch (from_representation()) {
4933 case kUnboxedDouble:
4958 switch (from_representation()) {
4959 case kUnboxedDouble:
4962 for (intptr_t
i = 0;
i < 2;
i++) {
4963 __ fsd(
locs()->in(
i).fpu_reg(),
4964 compiler::FieldAddress(
4966 i *
sizeof(
double)));
4972 for (intptr_t
i = 0;
i < 4;
i++) {
4973 __ fsw(
locs()->in(
i).fpu_reg(),
4974 compiler::FieldAddress(
4976 i *
sizeof(
float)));
4982 for (intptr_t
i = 0;
i < 4;
i++) {
4984 compiler::FieldAddress(
result,
4986 i *
sizeof(int32_t)));
4996 const intptr_t kNumInputs = 2;
4997 const intptr_t kNumTemps = 0;
4998 LocationSummary* summary =
new (zone)
5010 compiler::Label* deopt =
5016 const Register result_div = pair->At(0).reg();
5017 const Register result_mod = pair->At(1).reg();
5020 __ beqz(right, deopt);
5023 __ SmiUntag(
TMP, left);
5024 __ SmiUntag(
TMP2, right);
5039 compiler::Label
done, adjust;
5043 __ sub(result_mod, result_mod,
TMP2);
5045 __ add(result_mod, result_mod,
TMP2);
5048 __ sub(result_mod, result_mod,
TMP2);
5051 __ add(result_mod, result_mod,
TMP2);
5058 __ mv(
TMP, result_div);
5059 __ SmiTag(result_div);
5060 __ SmiTag(result_mod);
5061 __ SmiUntag(
TMP2, result_div);
5064 __ SmiTag(result_div);
5065 __ SmiTag(result_mod);
5071static void EmitHashIntegerCodeSequence(FlowGraphCompiler*
compiler,
5082 __ LoadImmediate(
TMP, 0x2d51);
5090 __ mulhu(value_lo, value_hi,
TMP);
5100static void EmitHashIntegerCodeSequence(FlowGraphCompiler*
compiler,
5105 __ LoadImmediate(
TMP, 0x2d51);
5118 const intptr_t kNumInputs = 1;
5119 const intptr_t kNumTemps = 3;
5120 LocationSummary* summary =
new (zone) LocationSummary(
5141 const Register result_hi = out_pair->At(1).reg();
5149 compiler::Label hash_double, hash_double_value, hash_integer;
5150 compiler::Label slow_path,
done;
5157 __ fcvtdw(temp_double, temp1);
5160 __ fcvtdl(temp_double, temp1);
5162 __ feqd(temp,
value, temp_double);
5163 __ CompareImmediate(temp, 1);
5164 __ BranchIf(
NE, &hash_double_value);
5167 __ srai(temp, temp1, XLEN - 1);
5184 compiler::LeafRuntimeScope rt(
compiler->assembler(), 0,
5189 rt.Call(kTryDoubleAsIntegerRuntimeEntry, 1);
5193 __ LoadFromOffset(temp1,
THR,
5195 __ LoadFromOffset(temp,
THR,
5200 __ srli(temp, temp1, 32);
5202 __ CompareImmediate(
TMP, 0);
5203 __ BranchIf(
NE, &hash_integer);
5207 __ Bind(&hash_double_value);
5210 __ LoadFromOffset(temp1,
THR,
5212 __ LoadFromOffset(temp,
THR,
5216 __ Bind(&hash_double_value);
5218 __ srli(temp, temp1, 32);
5228 __ xor_(result_hi, result_hi, result_hi);
5234 const intptr_t kNumInputs = 1;
5236 const intptr_t kNumTemps = 1;
5237 LocationSummary* summary =
new (zone)
5241 const intptr_t kNumTemps = 0;
5242 LocationSummary* summary =
new (zone)
5259 __ srai(value_hi,
value, XLEN - 1);
5261 __ LoadFieldFromOffset(value_hi,
value,
5278 comparison()->InitializeLocationSummary(zone, opt);
5281 return comparison()->locs();
5285 comparison()->EmitBranchCode(
compiler,
this);
5290 const intptr_t kNumInputs = 1;
5291 const bool need_mask_temp = IsBitTest();
5292 const intptr_t kNumTemps = !IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0;
5293 LocationSummary* summary =
new (zone)
5296 if (!IsNullCheck()) {
5298 if (need_mask_temp) {
5306 compiler::Label* deopt) {
5307 if (IsDeoptIfNull()) {
5309 }
else if (IsDeoptIfNotNull()) {
5320 compiler::Label* deopt) {
5322 __ AddImmediate(biased_cid, -
min);
5323 __ CompareImmediate(biased_cid,
max -
min);
5324 __ BranchIf(
HI, deopt);
5327 __ LoadImmediate(bit_reg, 1);
5328 __ sll(bit_reg, bit_reg, biased_cid);
5329 __ TestImmediate(bit_reg, mask);
5330 __ BranchIf(
EQ, deopt);
5333int CheckClassInstr::EmitCheckCid(FlowGraphCompiler*
compiler,
5338 compiler::Label* is_ok,
5339 compiler::Label* deopt,
5340 bool use_near_jump) {
5343 if (cid_start == cid_end) {
5344 __ CompareImmediate(biased_cid, cid_start - bias);
5350 __ AddImmediate(biased_cid, bias - cid_start);
5352 __ CompareImmediate(biased_cid, cid_end - cid_start);
5357 __ BranchIf(no_match, deopt);
5366 const intptr_t kNumInputs = 1;
5367 const intptr_t kNumTemps = 0;
5368 LocationSummary* summary =
new (zone)
5377 compiler::Label* deopt =
5379 if (cids_.IsSingleCid()) {
5381 __ BranchIf(
NE, deopt);
5385 __ BranchIf(
HI, deopt);
5391 const intptr_t kNumInputs = 1;
5392 const intptr_t kNumTemps = 0;
5393 LocationSummary* summary =
new (zone)
5401 compiler::Label* deopt =
5403 __ BranchIfNotSmi(
value, deopt);
5407 ThrowErrorSlowPathCode* slow_path =
new NullErrorSlowPath(
this);
5408 compiler->AddSlowPathCode(slow_path);
5413 __ CompareObject(value_reg, Object::null_object());
5414 __ BranchIf(
EQUAL, slow_path->entry_label());
5419 const intptr_t kNumInputs = 2;
5420 const intptr_t kNumTemps = 0;
5421 LocationSummary*
locs =
new (zone)
5430 compiler::Label* deopt =
5436 const intptr_t index_cid = index()->Type()->ToCid();
5437 if (length_loc.IsConstant() && index_loc.IsConstant()) {
5439 if ((Smi::Cast(length_loc.constant()).Value() >
5440 Smi::Cast(index_loc.constant()).Value()) &&
5441 (Smi::Cast(index_loc.constant()).Value() >= 0)) {
5445 ASSERT((Smi::Cast(length_loc.constant()).Value() <=
5446 Smi::Cast(index_loc.constant()).Value()) ||
5447 (Smi::Cast(index_loc.constant()).Value() < 0));
5454 if (index_loc.IsConstant()) {
5456 const Smi& index = Smi::Cast(index_loc.constant());
5458 __ BranchIf(
LS, deopt);
5459 }
else if (length_loc.IsConstant()) {
5460 const Smi&
length = Smi::Cast(length_loc.constant());
5461 const Register index = index_loc.reg();
5462 if (index_cid != kSmiCid) {
5463 __ BranchIfNotSmi(index, deopt);
5466 __ bltz(index, deopt);
5469 __ BranchIf(
CS, deopt);
5473 const Register index = index_loc.reg();
5474 if (index_cid != kSmiCid) {
5475 __ BranchIfNotSmi(index, deopt);
5477 __ CompareObjectRegisters(index,
length);
5478 __ BranchIf(
CS, deopt);
5484 const intptr_t kNumInputs = 1;
5485 const intptr_t kNumTemps = 0;
5486 LocationSummary*
locs =
new (zone) LocationSummary(
5487 zone, kNumInputs, kNumTemps,
5495 WriteErrorSlowPath* slow_path =
new WriteErrorSlowPath(
this);
5496 compiler->AddSlowPathCode(slow_path);
5497 __ lbu(
TMP, compiler::FieldAddress(
locs()->in(0).reg(),
5502 __ bnez(
TMP, slow_path->entry_label());
5505class Int64DivideSlowPath :
public ThrowErrorSlowPathCode {
5507 Int64DivideSlowPath(BinaryInt64OpInstr* instruction,
5509 Range* divisor_range,
5512 : ThrowErrorSlowPathCode(instruction,
5513 kIntegerDivisionByZeroExceptionRuntimeEntry),
5514 is_mod_(instruction->op_kind() == Token::kMOD),
5516 divisor_range_(divisor_range),
5519 adjust_sign_label_() {}
5523 if (has_divide_by_zero()) {
5528 __ Comment(
"slow path %s operation (no throw)",
name());
5536 if (has_adjust_sign()) {
5537 __ Bind(adjust_sign_label());
5540 compiler::Label adjust,
done;
5542 __ sub(out_, out_, divisor_);
5545 __ add(out_, out_, divisor_);
5547 }
else if (divisor_range_->IsPositive()) {
5549 __ add(out_, out_, divisor_);
5552 __ sub(out_, out_, divisor_);
5558 const char*
name()
override {
return "int64 divide"; }
5562 bool has_adjust_sign() {
return is_mod_; }
5564 bool is_needed() {
return has_divide_by_zero() || has_adjust_sign(); }
5566 compiler::Label* adjust_sign_label() {
5567 ASSERT(has_adjust_sign());
5568 return &adjust_sign_label_;
5574 Range* divisor_range_;
5577 compiler::Label adjust_sign_label_;
5581static void EmitInt64ModTruncDiv(FlowGraphCompiler*
compiler,
5582 BinaryInt64OpInstr* instruction,
5588 ASSERT(op_kind == Token::kMOD || op_kind == Token::kTRUNCDIV);
5594 Range* right_range = instruction->right()->definition()->range();
5595 Int64DivideSlowPath* slow_path =
5596 new (
Z) Int64DivideSlowPath(instruction, right, right_range, tmp,
out);
5599 if (slow_path->has_divide_by_zero()) {
5600 __ beqz(right, slow_path->entry_label());
5607 if (op_kind == Token::kMOD) {
5608 __ rem(
out, left, right);
5611 __ bltz(
out, slow_path->adjust_sign_label());
5613 __ div(
out, left, right);
5616 if (slow_path->is_needed()) {
5617 __ Bind(slow_path->exit_label());
5618 compiler->AddSlowPathCode(slow_path);
5628 const intptr_t kNumInputs = 2;
5629 const intptr_t kNumTemps = 0;
5630 LocationSummary* summary =
new (zone)
5640 switch (op_kind()) {
5642 case Token::kTRUNCDIV: {
5643 const intptr_t kNumInputs = 2;
5644 const intptr_t kNumTemps = (op_kind() == Token::kMOD) ? 1 : 0;
5645 LocationSummary* summary =
new (zone) LocationSummary(
5650 if (kNumTemps == 1) {
5656 const intptr_t kNumInputs = 2;
5657 const intptr_t kNumTemps = 0;
5658 LocationSummary* summary =
new (zone) LocationSummary(
5672 Register left_lo = left_pair->At(0).reg();
5673 Register left_hi = left_pair->At(1).reg();
5675 Register right_lo = right_pair->At(0).reg();
5676 Register right_hi = right_pair->At(1).reg();
5678 Register out_lo = out_pair->At(0).reg();
5679 Register out_hi = out_pair->At(1).reg();
5683 switch (op_kind()) {
5684 case Token::kBIT_AND: {
5685 __ and_(out_lo, left_lo, right_lo);
5686 __ and_(out_hi, left_hi, right_hi);
5689 case Token::kBIT_OR: {
5690 __ or_(out_lo, left_lo, right_lo);
5691 __ or_(out_hi, left_hi, right_hi);
5694 case Token::kBIT_XOR: {
5695 __ xor_(out_lo, left_lo, right_lo);
5696 __ xor_(out_hi, left_hi, right_hi);
5700 __ add(out_hi, left_hi, right_hi);
5701 __ add(out_lo, left_lo, right_lo);
5702 __ sltu(
TMP, out_lo, right_lo);
5703 __ add(out_hi, out_hi,
TMP);
5707 __ sltu(
TMP, left_lo, right_lo);
5708 __ sub(out_hi, left_hi, right_hi);
5709 __ sub(out_hi, out_hi,
TMP);
5710 __ sub(out_lo, left_lo, right_lo);
5715 __ mul(out_lo, right_lo, left_hi);
5716 __ mulhu(out_hi, right_lo, left_lo);
5717 __ add(out_lo, out_lo, out_hi);
5718 __ mul(out_hi, right_hi, left_lo);
5719 __ add(out_hi, out_hi, out_lo);
5720 __ mul(out_lo, right_lo, left_lo);
5734 if (op_kind() == Token::kMOD || op_kind() == Token::kTRUNCDIV) {
5737 EmitInt64ModTruncDiv(
compiler,
this, op_kind(), left,
right.reg(), tmp,
5740 }
else if (op_kind() == Token::kMUL) {
5742 if (
right.IsConstant()) {
5750 __ mul(
out, left, r);
5754 if (
right.IsConstant()) {
5758 switch (op_kind()) {
5765 case Token::kBIT_AND:
5768 case Token::kBIT_OR:
5771 case Token::kBIT_XOR:
5778 switch (op_kind()) {
5785 case Token::kBIT_AND:
5788 case Token::kBIT_OR:
5791 case Token::kBIT_XOR:
5802static void EmitShiftInt64ByConstant(FlowGraphCompiler*
compiler,
5808 const Object& right) {
5809 const int64_t shift = Integer::Cast(right).AsInt64Value();
5815 __ slli(out_lo, left_hi, 32 - shift);
5816 __ srli(
TMP, left_lo, shift);
5817 __ or_(out_lo, out_lo,
TMP);
5818 __ srai(out_hi, left_hi, shift);
5821 __ mv(out_lo, left_hi);
5822 }
else if (shift < 64) {
5823 __ srai(out_lo, left_hi, shift - 32);
5825 __ srai(out_lo, left_hi, 31);
5827 __ srai(out_hi, left_hi, 31);
5831 case Token::kUSHR: {
5834 __ slli(out_lo, left_hi, 32 - shift);
5835 __ srli(
TMP, left_lo, shift);
5836 __ or_(out_lo, out_lo,
TMP);
5837 __ srli(out_hi, left_hi, shift);
5840 __ mv(out_lo, left_hi);
5842 __ srli(out_lo, left_hi, shift - 32);
5852 __ srli(out_hi, left_lo, 32 - shift);
5853 __ slli(
TMP, left_hi, shift);
5854 __ or_(out_hi, out_hi,
TMP);
5855 __ slli(out_lo, left_lo, shift);
5858 __ mv(out_hi, left_lo);
5860 __ slli(out_hi, left_lo, shift - 32);
5871static void EmitShiftInt64ByConstant(FlowGraphCompiler*
compiler,
5875 const Object& right) {
5876 const int64_t shift = Integer::Cast(right).AsInt64Value();
5880 __ srai(
out, left, Utils::Minimum<int64_t>(shift, XLEN - 1));
5883 case Token::kUSHR: {
5885 __ srli(
out, left, shift);
5890 __ slli(
out, left, shift);
5900static void EmitShiftInt64ByRegister(FlowGraphCompiler*
compiler,
5910 compiler::Label big_shift,
done;
5915 __ srl(out_lo, left_lo, right);
5916 __ sra(out_hi, left_hi, right);
5920 __ or_(out_lo, out_lo,
TMP2);
5926 __ sra(out_lo, left_hi,
TMP);
5927 __ srai(out_hi, left_hi, XLEN - 1);
5931 case Token::kUSHR: {
5932 compiler::Label big_shift,
done;
5937 __ srl(out_lo, left_lo, right);
5938 __ srl(out_hi, left_hi, right);
5942 __ or_(out_lo, out_lo,
TMP2);
5948 __ srl(out_lo, left_hi,
TMP);
5954 compiler::Label big_shift,
done;
5959 __ sll(out_lo, left_lo, right);
5960 __ sll(out_hi, left_hi, right);
5964 __ or_(out_hi, out_hi,
TMP2);
5970 __ sll(out_hi, left_lo,
TMP);
5980static void EmitShiftInt64ByRegister(FlowGraphCompiler*
compiler,
5987 __ sra(
out, left, right);
5990 case Token::kUSHR: {
5991 __ srl(
out, left, right);
5995 __ sll(
out, left, right);
6004static void EmitShiftUint32ByConstant(FlowGraphCompiler*
compiler,
6008 const Object& right) {
6009 const int64_t shift = Integer::Cast(right).AsInt64Value();
6018 __ srli(
out, left, shift);
6020 __ srliw(
out, left, shift);
6025 __ slli(
out, left, shift);
6027 __ slliw(
out, left, shift);
6036static void EmitShiftUint32ByRegister(FlowGraphCompiler*
compiler,
6045 __ srl(
out, left, right);
6047 __ srlw(
out, left, right);
6052 __ sll(
out, left, right);
6054 __ sllw(
out, left, right);
6062class ShiftInt64OpSlowPath :
public ThrowErrorSlowPathCode {
6064 explicit ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction)
6065 : ThrowErrorSlowPathCode(instruction,
6066 kArgumentErrorUnboxedInt64RuntimeEntry) {}
6068 const char*
name()
override {
return "int64 shift"; }
6070 void EmitCodeAtSlowPathEntry(FlowGraphCompiler*
compiler)
override {
6072 PairLocation* left_pair = instruction()->locs()->in(0).AsPairLocation();
6073 Register left_hi = left_pair->At(1).reg();
6074 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
6075 Register right_lo = right_pair->At(0).reg();
6076 Register right_hi = right_pair->At(1).reg();
6077 PairLocation* out_pair = instruction()->locs()->out(0).AsPairLocation();
6078 Register out_lo = out_pair->At(0).reg();
6079 Register out_hi = out_pair->At(1).reg();
6081 compiler::Label throw_error;
6082 __ bltz(right_hi, &throw_error);
6084 switch (instruction()->AsShiftInt64Op()->op_kind()) {
6087 __ mv(out_lo, out_hi);
6108 __ StoreToOffset(right_lo,
THR,
6110 __ StoreToOffset(right_hi,
THR,
6114 const Register left = instruction()->locs()->in(0).reg();
6116 const Register out = instruction()->locs()->out(0).reg();
6119 compiler::Label throw_error;
6120 __ bltz(right, &throw_error);
6122 switch (instruction()->AsShiftInt64Op()->op_kind()) {
6124 __ srai(
out, left, XLEN - 1);
6151 const intptr_t kNumInputs = 2;
6152 const intptr_t kNumTemps = 0;
6154 LocationSummary* summary =
new (zone) LocationSummary(
6159 right()->definition()->IsConstant()) {
6169 LocationSummary* summary =
new (zone) LocationSummary(
6183 Register left_lo = left_pair->At(0).reg();
6184 Register left_hi = left_pair->At(1).reg();
6186 Register out_lo = out_pair->At(0).reg();
6187 Register out_hi = out_pair->At(1).reg();
6191 EmitShiftInt64ByConstant(
compiler, op_kind(), out_lo, out_hi, left_lo,
6192 left_hi,
locs()->in(1).constant());
6196 Register right_lo = right_pair->At(0).reg();
6197 Register right_hi = right_pair->At(1).reg();
6200 ShiftInt64OpSlowPath* slow_path =
nullptr;
6201 if (!IsShiftCountInRange()) {
6202 slow_path =
new (
Z) ShiftInt64OpSlowPath(
this);
6203 compiler->AddSlowPathCode(slow_path);
6204 __ CompareImmediate(right_hi, 0);
6205 __ BranchIf(
NE, slow_path->entry_label());
6206 __ CompareImmediate(right_lo, kShiftCountLimit);
6207 __ BranchIf(
HI, slow_path->entry_label());
6210 EmitShiftInt64ByRegister(
compiler, op_kind(), out_lo, out_hi, left_lo,
6213 if (slow_path !=
nullptr) {
6214 __ Bind(slow_path->exit_label());
6223 EmitShiftInt64ByConstant(
compiler, op_kind(),
out, left,
6224 locs()->in(1).constant());
6230 ShiftInt64OpSlowPath* slow_path =
nullptr;
6231 if (!IsShiftCountInRange()) {
6232 slow_path =
new (
Z) ShiftInt64OpSlowPath(
this);
6233 compiler->AddSlowPathCode(slow_path);
6234 __ CompareImmediate(shift, kShiftCountLimit);
6235 __ BranchIf(
HI, slow_path->entry_label());
6238 EmitShiftInt64ByRegister(
compiler, op_kind(),
out, left, shift);
6240 if (slow_path !=
nullptr) {
6241 __ Bind(slow_path->exit_label());
6250 const intptr_t kNumInputs = 2;
6251 const intptr_t kNumTemps = 0;
6252 LocationSummary* summary =
new (zone)
6271 Register left_lo = left_pair->At(0).reg();
6272 Register left_hi = left_pair->At(1).reg();
6274 Register out_lo = out_pair->At(0).reg();
6275 Register out_hi = out_pair->At(1).reg();
6279 EmitShiftInt64ByConstant(
compiler, op_kind(), out_lo, out_hi, left_lo,
6280 left_hi,
locs()->in(1).constant());
6287 if (!IsShiftCountInRange()) {
6289 compiler::Label* deopt =
6292 __ CompareImmediate(shift, kShiftCountLimit);
6293 __ BranchIf(
HI, deopt);
6296 EmitShiftInt64ByRegister(
compiler, op_kind(), out_lo, out_hi, left_lo,
6305 EmitShiftInt64ByConstant(
compiler, op_kind(),
out, left,
6306 locs()->in(1).constant());
6312 __ SmiUntag(
TMP, shift);
6316 if (!IsShiftCountInRange()) {
6318 compiler::Label* deopt =
6321 __ CompareImmediate(shift, kShiftCountLimit);
6322 __ BranchIf(
HI, deopt);
6325 EmitShiftInt64ByRegister(
compiler, op_kind(),
out, left, shift);
6330class ShiftUint32OpSlowPath :
public ThrowErrorSlowPathCode {
6332 explicit ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction)
6333 : ThrowErrorSlowPathCode(instruction,
6334 kArgumentErrorUnboxedInt64RuntimeEntry) {}
6336 const char*
name()
override {
return "uint32 shift"; }
6338 void EmitCodeAtSlowPathEntry(FlowGraphCompiler*
compiler)
override {
6340 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
6341 Register right_lo = right_pair->At(0).reg();
6342 Register right_hi = right_pair->At(1).reg();
6343 Register out = instruction()->locs()->out(0).reg();
6345 compiler::Label throw_error;
6356 __ StoreToOffset(right_lo,
THR,
6358 __ StoreToOffset(right_hi,
THR,
6378 const intptr_t kNumInputs = 2;
6379 const intptr_t kNumTemps = 0;
6380 LocationSummary* summary =
new (zone) LocationSummary(
6384 right()->definition()->IsConstant()) {
6407 EmitShiftUint32ByConstant(
compiler, op_kind(),
out, left,
6408 locs()->in(1).constant());
6412 Register right_lo = right_pair->At(0).reg();
6413 Register right_hi = right_pair->At(1).reg();
6416 ShiftUint32OpSlowPath* slow_path =
nullptr;
6417 if (!IsShiftCountInRange(kUint32ShiftCountLimit)) {
6418 slow_path =
new (
Z) ShiftUint32OpSlowPath(
this);
6419 compiler->AddSlowPathCode(slow_path);
6421 __ CompareImmediate(right_hi, 0);
6422 __ BranchIf(
NE, slow_path->entry_label());
6423 __ CompareImmediate(right_lo, kUint32ShiftCountLimit);
6424 __ BranchIf(
HI, slow_path->entry_label());
6427 EmitShiftUint32ByRegister(
compiler, op_kind(),
out, left, right_lo);
6429 if (slow_path !=
nullptr) {
6430 __ Bind(slow_path->exit_label());
6438 EmitShiftUint32ByConstant(
compiler, op_kind(),
out, left,
6439 locs()->in(1).constant());
6443 const bool shift_count_in_range =
6444 IsShiftCountInRange(kUint32ShiftCountLimit);
6447 if (!shift_count_in_range) {
6448 ShiftUint32OpSlowPath* slow_path =
new (
Z) ShiftUint32OpSlowPath(
this);
6449 compiler->AddSlowPathCode(slow_path);
6451 __ bltz(right, slow_path->entry_label());
6454 EmitShiftUint32ByRegister(
compiler, op_kind(),
out, left, right);
6456 if (!shift_count_in_range) {
6458 compiler::Label
done;
6459 __ CompareImmediate(right, 31);
6471 const intptr_t kNumInputs = 2;
6472 const intptr_t kNumTemps = 0;
6473 LocationSummary* summary =
new (zone)
6487 EmitShiftUint32ByConstant(
compiler, op_kind(),
out, left,
6488 locs()->in(1).constant());
6491 const bool shift_count_in_range =
6492 IsShiftCountInRange(kUint32ShiftCountLimit);
6494 __ SmiUntag(
TMP, right);
6498 if (!shift_count_in_range) {
6501 compiler::Label* deopt =
6504 __ bltz(right, deopt);
6507 EmitShiftUint32ByRegister(
compiler, op_kind(),
out, left, right);
6509 if (!shift_count_in_range) {
6511 compiler::Label
done;
6512 __ CompareImmediate(right, 31);
6523 const intptr_t kNumInputs = 1;
6524 const intptr_t kNumTemps = 0;
6525 LocationSummary* summary =
new (zone)
6533 const intptr_t kNumInputs = 1;
6534 const intptr_t kNumTemps = 0;
6535 LocationSummary* summary =
new (zone)
6546 Register left_lo = left_pair->At(0).reg();
6547 Register left_hi = left_pair->At(1).reg();
6550 Register out_lo = out_pair->At(0).reg();
6551 Register out_hi = out_pair->At(1).reg();
6553 switch (op_kind()) {
6554 case Token::kBIT_NOT:
6555 __ not_(out_lo, left_lo);
6556 __ not_(out_hi, left_hi);
6558 case Token::kNEGATE:
6559 __ snez(
TMP, left_lo);
6560 __ neg(out_lo, left_lo);
6561 __ neg(out_hi, left_hi);
6562 __ sub(out_hi, out_hi,
TMP);
6570 switch (op_kind()) {
6571 case Token::kBIT_NOT:
6574 case Token::kNEGATE:
6585 const intptr_t kNumInputs = 2;
6586 const intptr_t kNumTemps = 0;
6587 LocationSummary* summary =
new (zone)
6599 switch (op_kind()) {
6600 case Token::kBIT_AND:
6601 __ and_(
out, left, right);
6603 case Token::kBIT_OR:
6604 __ or_(
out, left, right);
6606 case Token::kBIT_XOR:
6607 __ xor_(
out, left, right);
6611 __ add(
out, left, right);
6613 __ addw(
out, left, right);
6618 __ sub(
out, left, right);
6620 __ subw(
out, left, right);
6624 __ mul(
out, left, right);
6633 const intptr_t kNumInputs = 1;
6634 const intptr_t kNumTemps = 0;
6635 LocationSummary* summary =
new (zone)
6646 ASSERT(op_kind() == Token::kBIT_NOT);
6651static void EmitInt32ShiftLeft(FlowGraphCompiler*
compiler,
6652 BinaryInt32OpInstr* shift_left) {
6653 const LocationSummary&
locs = *shift_left->locs();
6656 compiler::Label* deopt =
6657 shift_left->CanDeoptimize()
6658 ?
compiler->AddDeoptStub(shift_left->deopt_id(),
6659 ICData::kDeoptBinarySmiOp)
6665 const intptr_t kCountLimit = 0x1F;
6669 if (shift_left->can_overflow()) {
6671 __ bne(
TMP, left, deopt);
6677 const intptr_t kNumInputs = 2;
6679 intptr_t num_temps = 0;
6680 if (((op_kind() == Token::kSHL) && can_overflow()) ||
6681 (op_kind() == Token::kSHR) || (op_kind() == Token::kUSHR) ||
6682 (op_kind() == Token::kMUL)) {
6685 LocationSummary* summary =
new (zone)
6689 if (num_temps == 1) {
6699 if (op_kind() == Token::kSHL) {
6700 EmitInt32ShiftLeft(
compiler,
this);
6706 compiler::Label* deopt =
nullptr;
6715 switch (op_kind()) {
6717 if (deopt ==
nullptr) {
6725 if (deopt ==
nullptr) {
6730 __ SubtractImmediateBranchOverflow(
result, left,
value, deopt);
6736 __ LoadImmediate(right,
value);
6737 if (deopt ==
nullptr) {
6740 __ MultiplyBranchOverflow(
result, left, right, deopt);
6744 case Token::kBIT_AND: {
6749 case Token::kBIT_OR: {
6754 case Token::kBIT_XOR: {
6761 const intptr_t kCountLimit = 0x1F;
6765 case Token::kUSHR: {
6778 switch (op_kind()) {
6780 if (deopt ==
nullptr) {
6783 __ AddBranchOverflow(
result, left, right, deopt);
6788 if (deopt ==
nullptr) {
6791 __ SubtractBranchOverflow(
result, left, right, deopt);
6796 if (deopt ==
nullptr) {
6799 __ MultiplyBranchOverflow(
result, left, right, deopt);
6803 case Token::kBIT_AND: {
6808 case Token::kBIT_OR: {
6813 case Token::kBIT_XOR: {
6830 const intptr_t kNumInputs = 1;
6831 const intptr_t kNumTemps = 0;
6832 LocationSummary* summary =
new (zone)
6834 if (from() == kUntagged || to() == kUntagged) {
6835 ASSERT((from() == kUntagged && to() == kUnboxedInt32) ||
6836 (from() == kUntagged && to() == kUnboxedUint32) ||
6837 (from() == kUnboxedInt32 && to() == kUntagged) ||
6838 (from() == kUnboxedUint32 && to() == kUntagged));
6842 }
else if (from() == kUnboxedInt64) {
6843 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6847 }
else if (to() == kUnboxedInt64) {
6848 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
6853 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6854 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
6860 const intptr_t kNumInputs = 1;
6861 const intptr_t kNumTemps = 0;
6862 LocationSummary* summary =
new (zone)
6864 if (from() == kUntagged || to() == kUntagged) {
6868 }
else if (from() == kUnboxedInt64) {
6869 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6870 }
else if (to() == kUnboxedInt64) {
6871 ASSERT(from() == kUnboxedInt32 || from() == kUnboxedUint32);
6873 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6874 ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
6888 const bool is_nop_conversion =
6889 (from() == kUntagged && to() == kUnboxedInt32) ||
6890 (from() == kUntagged && to() == kUnboxedUint32) ||
6891 (from() == kUnboxedInt32 && to() == kUntagged) ||
6892 (from() == kUnboxedUint32 && to() == kUntagged);
6893 if (is_nop_conversion) {
6898 if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
6902 }
else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
6907 compiler::Label* deopt =
6909 __ bltz(
out, deopt);
6911 }
else if (from() == kUnboxedInt64) {
6912 ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
6914 Register in_lo = in_pair->At(0).reg();
6915 Register in_hi = in_pair->At(1).reg();
6920 compiler::Label* deopt =
6922 ASSERT(to() == kUnboxedInt32);
6923 __ srai(
TMP, in_lo, XLEN - 1);
6924 __ bne(in_hi,
TMP, deopt);
6926 }
else if (from() == kUnboxedUint32 || from() == kUnboxedInt32) {
6927 ASSERT(to() == kUnboxedInt64);
6930 Register out_lo = out_pair->At(0).reg();
6931 Register out_hi = out_pair->At(1).reg();
6934 if (from() == kUnboxedUint32) {
6937 ASSERT(from() == kUnboxedInt32);
6938 __ srai(out_hi, in, XLEN - 1);
6946 const bool is_nop_conversion =
6949 if (is_nop_conversion) {
6956 compiler::Label* deopt =
6960 if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
6963 __ bltz(
TMP, deopt);
6968 }
else if (from() == kUnboxedUint32 && to() == kUnboxedInt32) {
6971 __ bltz(
TMP, deopt);
6976 }
else if (from() == kUnboxedInt64) {
6977 if (to() == kUnboxedInt32) {
6978 if (is_truncating() ||
out !=
value) {
6982 ASSERT(to() == kUnboxedUint32);
6983 if (is_truncating() ||
out !=
value) {
6991 ASSERT(to() == kUnboxedInt32);
6993 __ BranchIf(
NE, deopt);
6995 }
else if (to() == kUnboxedInt64) {
6996 if (from() == kUnboxedUint32) {
7001 ASSERT(from() == kUnboxedInt32);
7011 LocationSummary* summary =
7012 new (zone) LocationSummary(zone,
InputCount(),
7027 case kUnboxedDouble:
7047 case kUnboxedDouble:
7058 case kUnboxedFloat: {
7060 case kUnboxedInt32: {
7066 case kUnboxedInt64: {
7071 __ fmvxw(dst0,
src);
7084 case kUnboxedDouble: {
7085 ASSERT(to() == kUnboxedInt64);
7091 __ fsd(
src, compiler::Address(
SP, 0));
7092 __ lw(dst0, compiler::Address(
SP, 0));
7093 __ lw(dst1, compiler::Address(
SP, 4));
7101 case kUnboxedInt64: {
7103 case kUnboxedDouble: {
7109 __ sw(src0, compiler::Address(
SP, 0));
7110 __ sw(src1, compiler::Address(
SP, 4));
7111 __ fld(
dst, compiler::Address(
SP, 0));
7119 case kUnboxedFloat: {
7123 __ fmvwx(
dst, src0);
7135 case kUnboxedInt32: {
7136 ASSERT(to() == kUnboxedFloat);
7157 if (entry !=
nullptr) {
7158 if (!
compiler->CanFallThroughTo(entry)) {
7159 FATAL(
"Checked function entry must have no offset");
7162 entry = osr_entry();
7163 if (!
compiler->CanFallThroughTo(entry)) {
7175 if (FLAG_reorder_basic_blocks) {
7176 compiler->EmitEdgeCounter(block()->preorder_number());
7181 InstructionSource());
7183 if (HasParallelMove()) {
7184 parallel_move()->EmitNativeCode(
compiler);
7189 if (!
compiler->CanFallThroughTo(successor())) {
7196 const intptr_t kNumInputs = 1;
7197 const intptr_t kNumTemps = 2;
7199 LocationSummary* summary =
new (zone)
7215 __ LoadObject(offset_reg, offsets_);
7216 const auto element_address =
__ ElementAddressForRegIndex(
7217 false, kTypedDataInt32ArrayCid,
7219 false, offset_reg, index_reg,
TMP);
7220 __ lw(offset_reg, element_address);
7222 const intptr_t entry_offset =
__ CodeSize();
7223 intx_t imm = -entry_offset;
7224 intx_t lo =
ImmLo(imm);
7225 intx_t hi =
ImmHi(imm);
7226 __ auipc(target_address_reg, hi);
7227 __ add(target_address_reg, target_address_reg, offset_reg);
7228 __ jr(target_address_reg, lo);
7233 const intptr_t kNumInputs = 2;
7234 const intptr_t kNumTemps = 0;
7235 if (needs_number_check()) {
7236 LocationSummary*
locs =
new (zone)
7243 LocationSummary*
locs =
new (zone)
7255Condition StrictCompareInstr::EmitComparisonCodeRegConstant(
7257 BranchLabels labels,
7259 const Object& obj) {
7260 return compiler->EmitEqualityRegConstCompare(reg, obj, needs_number_check(),
7265 compiler::Label is_true, is_false;
7266 BranchLabels labels = {&is_true, &is_false, &is_false};
7270 if (is_true.IsLinked() || is_false.IsLinked()) {
7272 EmitBranchOnCondition(
compiler, true_condition, labels);
7274 compiler::Label
done;
7298 BranchLabels labels =
compiler->CreateBranchLabels(branch);
7301 EmitBranchOnCondition(
compiler, true_condition, labels);
7349 const intptr_t kNumInputs = (type_arguments() !=
nullptr) ? 1 : 0;
7350 const intptr_t kNumTemps = 0;
7351 LocationSummary*
locs =
new (zone)
7353 if (type_arguments() !=
nullptr) {
7362 if (type_arguments() !=
nullptr) {
7363 TypeUsageInfo* type_usage_info =
compiler->thread()->type_usage_info();
7364 if (type_usage_info !=
nullptr) {
7366 type_arguments()->definition());
7371 compiler->GenerateStubCall(
source(), stub, UntaggedPcDescriptors::kOther,
7380 __ JumpAndLinkPatchable(StubCode::DebugStepCheck());
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
static void fail(const SkString &err)
static bool are_equal(skiatest::Reporter *reporter, const SkMatrix &a, const SkMatrix &b)
static int float_bits(float f)
static bool ok(int result)
#define ASSERT_EQUAL(expected, actual)
#define RELEASE_ASSERT(cond)
static intptr_t type_arguments_offset()
static intptr_t InstanceSize()
static constexpr bool IsValidLength(intptr_t len)
static intptr_t length_offset()
static const Bool & False()
static const Bool & True()
static void Allocate(FlowGraphCompiler *compiler, Instruction *instruction, const Class &cls, Register result, Register temp)
static constexpr Register kSecondReturnReg
static const Register ArgumentRegisters[]
static constexpr FpuRegister kReturnFpuReg
static constexpr Register kFfiAnyNonAbiRegister
static constexpr Register kReturnReg
static constexpr Register kSecondNonArgumentRegister
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr, TemplateInstruction, FIELD_LIST) private void EmitBitTest(FlowGraphCompiler *compiler, intptr_t min, intptr_t max, intptr_t mask, compiler::Label *deopt)
void EmitNullCheck(FlowGraphCompiler *compiler, compiler::Label *deopt)
virtual void EmitBranchCode(FlowGraphCompiler *compiler, BranchInstr *branch)
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual Condition EmitComparisonCode(FlowGraphCompiler *compiler, BranchLabels labels)=0
const Object & value() const
void EmitMoveToLocation(FlowGraphCompiler *compiler, const Location &destination, Register tmp=kNoRegister, intptr_t pair_index=0)
ConstantInstr(const Object &value)
bool HasZeroRepresentation() const
static intptr_t num_variables_offset()
static intptr_t InstanceSize()
virtual Representation representation() const
static constexpr intptr_t kNone
static intptr_t guarded_cid_offset()
static intptr_t guarded_list_length_in_object_offset_offset()
static intptr_t is_nullable_offset()
static intptr_t guarded_list_length_offset()
ComparisonInstr * comparison() const
Instruction * next() const
intptr_t GetDeoptId() const
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Environment * env() const
virtual LocationSummary * MakeLocationSummary(Zone *zone, bool is_optimizing) const =0
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
void InitializeLocationSummary(Zone *zone, bool optimizing)
virtual Representation RequiredInputRepresentation(intptr_t idx) const
virtual intptr_t ArgumentCount() const
virtual Representation representation() const
bool CanDeoptimize() const
friend class BlockEntryInstr
InstructionSource source() const
intptr_t deopt_id() const
static bool SlowPathSharingSupported(bool is_optimizing)
static LocationSummary * MakeCallSummary(Zone *zone, const Instruction *instr, LocationSummary *locs=nullptr)
ObjectStore * object_store() const
static IsolateGroup * Current()
virtual Representation RequiredInputRepresentation(intptr_t index) const
Register base_reg() const
virtual Representation representation() const
const LocalVariable & local() const
Location temp(intptr_t index) const
Location out(intptr_t index) const
static LocationSummary * Make(Zone *zone, intptr_t input_count, Location out, ContainsCall contains_call)
void set_temp(intptr_t index, Location loc)
RegisterSet * live_registers()
void set_out(intptr_t index, Location loc)
bool always_calls() const
bool call_on_shared_slow_path() const
Location in(intptr_t index) const
void set_in(intptr_t index, Location loc)
static Location NoLocation()
static Location SameAsFirstInput()
static Location Pair(Location first, Location second)
static Location FpuRegisterLocation(FpuRegister reg)
intptr_t stack_index() const
static Location WritableRegister()
static Location RegisterLocation(Register reg)
PairLocation * AsPairLocation() const
static Location RequiresRegister()
static Location RequiresFpuRegister()
FpuRegister fpu_reg() const
const Object & constant() const
static Location Constant(const ConstantInstr *obj, int pair_index=0)
bool unboxed_inputs() const
Value * src_start() const
void EmitLoopCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, Register length_reg, compiler::Label *done, compiler::Label *copy_forwards=nullptr)
void PrepareLengthRegForLoop(FlowGraphCompiler *compiler, Register length_reg, compiler::Label *done)
Value * dest_start() const
static intptr_t value_offset()
virtual Representation representation() const
Location location() const
static int ComputeArgcTag(const Function &function)
static constexpr intptr_t kVMTagOffsetFromFp
static uword LinkNativeCallEntry()
static Object & ZoneHandle()
static intptr_t data_offset()
Location At(intptr_t i) const
static bool IsNegative(Range *range)
static bool Overlaps(Range *range, intptr_t min, intptr_t max)
static bool OnlyLessThanOrEqualTo(Range *range, intptr_t value)
static bool IsWithin(const Range *range, int64_t min, int64_t max)
static bool IsPositive(Range *range)
static bool CanBeZero(Range *range)
intptr_t FpuRegisterCount() const
void Remove(Location loc)
static constexpr intptr_t kBits
static SmiPtr New(intptr_t value)
static constexpr intptr_t kMaxValue
static intptr_t RawValue(intptr_t value)
const LocalVariable & local() const
static intptr_t length_offset()
static CodePtr GetAllocationStubForClass(const Class &cls)
static constexpr int kNullCharCodeSymbolOffset
intptr_t ArgumentCount() const
ArrayPtr GetArgumentsDescriptor() const
virtual intptr_t InputCount() const
static uword stack_overflow_shared_stub_entry_point_offset(bool fpu_regs)
static intptr_t stack_overflow_flags_offset()
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
static bool IsEqualityOperator(Kind tok)
static int32_t Low32Bits(int64_t value)
static constexpr T Maximum(T x, T y)
static constexpr int ShiftForPowerOfTwo(T x)
static int32_t High32Bits(int64_t value)
static T Minimum(T x, T y)
static constexpr int CountOneBits32(uint32_t x)
static bool DoublesBitEqual(const double a, const double b)
static constexpr size_t HighestBit(int64_t v)
static constexpr bool IsPowerOfTwo(T x)
Definition * definition() const
void static bool EmittingComments()
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool *needs_base=nullptr)
static word data_offset()
static word entry_point_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word OffsetOf(const dart::Field &field)
static word value_offset()
static word value_offset()
static word code_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word data_offset()
static word value_offset()
static word object_store_offset()
static word value_offset()
static word ffi_callback_code_offset()
static word tags_offset()
static word data_offset()
static word unboxed_runtime_arg_offset()
static word shared_field_table_values_offset()
static uword vm_tag_dart_id()
static word isolate_group_offset()
static word field_table_values_offset()
static word stack_limit_offset()
static word exit_through_ffi_offset()
static uword exit_through_ffi()
static word invoke_dart_code_stub_offset()
static word top_exit_frame_info_offset()
static word vm_tag_offset()
static word top_resource_offset()
static word data_offset()
static word payload_offset()
static const word kImmutableBit
static const word kClassIdTagSize
FlutterSemanticsFlag flags
Dart_NativeFunction function
static float max(float r, float g, float b)
static float min(float r, float g, float b)
#define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name)
const intptr_t kResultIndex
intptr_t FrameOffsetInBytesForVariable(const LocalVariable *variable)
static constexpr intptr_t kWordSize
static constexpr word kBitsPerWord
static constexpr intptr_t kCompressedWordSize
constexpr intptr_t kSmiBits
word SmiValue(const dart::Object &a)
const Object & NullObject()
constexpr OperandSize kWordBytes
bool HasIntegerValue(const dart::Object &object, int64_t *value)
Location LocationAnyOrConstant(Value *value)
Location LocationRegisterOrConstant(Value *value)
const Register kWriteBarrierSlotReg
static Condition InvertCondition(Condition c)
const RegList kAbiVolatileCpuRegs
static constexpr int kSavedCallerPcSlotFromFp
bool IsTypedDataBaseClassId(intptr_t index)
const Register kExceptionObjectReg
static constexpr intptr_t kFalseOffsetFromNull
const Register kWriteBarrierObjectReg
static constexpr intptr_t kBoolVsNullBitPosition
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
constexpr intptr_t kIntptrMin
static const ClassId kLastErrorCid
static constexpr intptr_t kTrueOffsetFromNull
constexpr intptr_t kWordSizeLog2
static const ClassId kFirstErrorCid
static constexpr intptr_t kBoolValueBitPosition
void RegisterTypeArgumentsUse(const Function &function, TypeUsageInfo *type_usage_info, const Class &klass, Definition *type_arguments)
static constexpr int kParamEndSlotFromFp
const Register ARGS_DESC_REG
bool IsClampedTypedDataBaseClassId(intptr_t index)
Location LocationFixedRegisterOrConstant(Value *value, Register reg)
const int kNumberOfFpuRegisters
Location LocationWritableRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
bool IsExternalPayloadClassId(classid_t cid)
constexpr RegList kDartAvailableCpuRegs
static constexpr intptr_t kCompressedWordSize
DEFINE_BACKEND(LoadThread,(Register out))
static constexpr int kPcMarkerSlotFromFp
const Register FUNCTION_REG
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
constexpr intptr_t kWordSize
static bool IsConstant(Definition *def, int64_t *val)
static constexpr Representation kUnboxedIntPtr
constexpr bool FLAG_target_memory_sanitizer
const Register kStackTraceObjectReg
static int8_t data[kExtLength]
const RegList kAbiVolatileFpuRegs
Location LocationRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
const Register CALLEE_SAVED_TEMP2
constexpr intptr_t kBitsPerInt64
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTempReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kObjectReg
static constexpr Representation NativeRepresentation(Representation rep)
intptr_t first_local_from_fp
static constexpr intptr_t kBoolValueMask
static constexpr size_t ValueSize(Representation rep)
static constexpr bool IsUnboxedInteger(Representation rep)
static compiler::OperandSize OperandSize(Representation rep)
static constexpr bool IsUnboxed(Representation rep)
static bool IsUnsignedInteger(Representation rep)
static Representation RepresentationOfArrayElement(classid_t cid)
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
static constexpr Register kInstanceOfResultReg
@ kResetToBootstrapNative