6#if defined(TARGET_ARCH_ARM64)
28#define __ (compiler->assembler())->
29#define Z (compiler->zone())
37 const Instruction* instr,
38 LocationSummary* locs) {
72 const intptr_t kNumInputs = 1;
73 const intptr_t kNumTemps = ((
representation() == kUnboxedDouble) ? 1 : 0);
74 LocationSummary*
locs =
new (zone)
103 case kUnboxedInt64: {
105#if !defined(DART_COMPRESSED_POINTERS)
113 case kUnboxedDouble: {
116#if !defined(DART_COMPRESSED_POINTERS)
132 ASSERT(instr->RequiredInputRepresentation(
134#if !defined(DART_COMPRESSED_POINTERS)
135 __ add(
TMP, instr->base_reg(), compiler::Operand(index,
LSL, 2));
137 __ add(
TMP, instr->base_reg(), compiler::Operand(index,
SXTW, 2));
139 __ str(
value, compiler::Address(
TMP, instr->offset()));
147 Fixed<Register, ARGS_DESC_REG>,
148 Temp<Register> temp)) {
149 compiler->EmitTailCallToStub(instr->code());
155 __ set_constant_pool_allowed(
true);
166 const intptr_t kNumInputs = 5;
167 const intptr_t kNumTemps = 2;
168 LocationSummary*
locs =
new (zone)
183 compiler::Label*
done) {
184 __ BranchIfZero(length_reg,
done);
204static void CopyUpToMultipleOfChunkSize(FlowGraphCompiler*
compiler,
212 compiler::Label*
done) {
217 const intptr_t base_shift =
219 const intptr_t offset_sign = reversed ? -1 : 1;
222 intptr_t tested_bits = 0;
224 __ Comment(
"Copying until region size is a multiple of chunk size");
227 bit >= element_shift; bit--) {
228 const intptr_t bytes = 1 << bit;
229 const intptr_t tested_bit = bit + base_shift;
230 tested_bits |= (1 << tested_bit);
231 const intptr_t
offset = offset_sign * bytes;
232 compiler::Label skip_copy;
233 __ tbz(&skip_copy, length_reg, tested_bit);
234 auto const sz = OperandSizeFor(bytes);
241 __ andis(length_reg, length_reg, compiler::Immediate(~tested_bits),
250 compiler::Label*
done,
251 compiler::Label* copy_forwards) {
252 const bool reversed = copy_forwards !=
nullptr;
256 __ PushPair(length_reg, dest_reg);
262 __ ExtendNonNegativeSmi(length_reg);
265 __ add(
TMP, src_reg, compiler::Operand(length_reg,
ASR, -shift));
267 __ add(
TMP, src_reg, compiler::Operand(length_reg,
LSL, shift));
269 __ CompareRegisters(dest_reg,
TMP);
272 __ MoveRegister(src_reg,
TMP);
274 __ add(dest_reg, dest_reg, compiler::Operand(length_reg,
ASR, -shift));
276 __ add(dest_reg, dest_reg, compiler::Operand(length_reg,
LSL, shift));
279 const intptr_t kChunkSize = 16;
280 ASSERT(kChunkSize >= element_size_);
281 CopyUpToMultipleOfChunkSize(
compiler, dest_reg, src_reg, length_reg,
282 element_size_, unboxed_inputs_, reversed,
285 const intptr_t loop_subtract = (kChunkSize / element_size_)
289 const intptr_t
offset = (reversed ? -1 : 1) * kChunkSize;
292 __ Comment(
"Copying chunks at a time");
293 compiler::Label loop;
297 __ subs(length_reg, length_reg, compiler::Operand(loop_subtract),
302 __ PopPair(length_reg, dest_reg);
304 __ ExtendNonNegativeSmi(length_reg);
307 __ AsrImmediate(length_reg, length_reg, -shift);
309 __ LslImmediate(length_reg, length_reg, shift);
311 __ MsanUnpoison(dest_reg, length_reg);
315void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler*
compiler,
322 if (array_rep != kTagged) {
333 case kOneByteStringCid:
337 case kTwoByteStringCid:
346 ASSERT(start_loc.IsRegister() || start_loc.IsConstant());
347 if (start_loc.IsConstant()) {
348 const auto& constant = start_loc.constant();
349 ASSERT(constant.IsInteger());
350 const int64_t start_value = Integer::Cast(constant).AsInt64Value();
352 Utils::MulWithWrapAround<intptr_t>(start_value, element_size_),
offset);
353 __ AddImmediate(payload_reg, array_reg, add_value);
356 const Register start_reg = start_loc.reg();
361 __ ExtendNonNegativeSmi(start_reg);
363 __ add(payload_reg, array_reg, compiler::Operand(start_reg,
ASR, -shift));
364#if defined(DART_COMPRESSED_POINTERS)
366 __ add(payload_reg, array_reg, compiler::Operand(start_reg,
SXTW, shift));
369 __ add(payload_reg, array_reg, compiler::Operand(start_reg,
LSL, shift));
371 __ AddImmediate(payload_reg,
offset);
376 const intptr_t kNumInputs = 1;
377 const intptr_t kNumTemps = 0;
378 LocationSummary*
locs =
new (zone)
381 if (constant !=
nullptr && constant->HasZeroRepresentation()) {
396class ArgumentsMover :
public ValueObject {
401 __ StoreToOffset(pending_register_,
SP,
403 pending_sp_relative_index_ = -1;
409 void MoveRegister(FlowGraphCompiler*
compiler,
410 intptr_t sp_relative_index,
413 ASSERT((sp_relative_index + 1) == pending_sp_relative_index_);
414 __ StorePairToOffset(reg, pending_register_,
SP,
419 pending_register_ = reg;
420 pending_sp_relative_index_ = sp_relative_index;
429 "LR should not be allocatable");
431 "TMP should not be allocatable");
432 return (pending_register_ ==
TMP) ?
LR :
TMP;
437 intptr_t pending_sp_relative_index_ = -1;
449 ArgumentsMover pusher;
451 move_arg = move_arg->next()->AsMoveArgument()) {
454 if (
value.IsRegister()) {
456 }
else if (
value.IsConstant()) {
457 if (
value.constant_instruction()->HasZeroRepresentation()) {
460 ASSERT(move_arg->representation() == kTagged);
461 const Object& constant =
value.constant();
462 if (constant.IsNull()) {
465 reg = pusher.GetFreeTempRegister(
compiler);
466 __ LoadObject(reg,
value.constant());
469 }
else if (
value.IsFpuRegister()) {
472 move_arg->location().stack_index() *
kWordSize);
476 const intptr_t value_offset =
value.ToStackSlotOffset();
477 reg = pusher.GetFreeTempRegister(
compiler);
478 __ LoadFromOffset(reg,
value.base_reg(), value_offset);
480 pusher.MoveRegister(
compiler, move_arg->location().stack_index(), reg);
487 const intptr_t kNumInputs = 1;
488 const intptr_t kNumTemps = 0;
489 LocationSummary*
locs =
new (zone)
519 if (
locs()->in(0).IsRegister()) {
522 }
else if (
locs()->in(0).IsPairLocation()) {
533 if (
compiler->parsed_function().function().IsAsyncFunction() ||
534 compiler->parsed_function().function().IsAsyncGenerator()) {
536 const Code& stub = GetReturnStub(
compiler);
541 if (!
compiler->flow_graph().graph_entry()->NeedsFrame()) {
547 compiler::Label stack_ok;
548 __ Comment(
"Stack Check");
549 const intptr_t fp_sp_dist =
554 __ sub(
R2,
SP, compiler::Operand(
FP));
555 __ CompareImmediate(
R2, fp_sp_dist);
565 __ set_constant_pool_allowed(
true);
569static bool IsPowerOfTwoKind(intptr_t v1, intptr_t
v2) {
590 BranchLabels labels = {
nullptr,
nullptr,
nullptr};
594 const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_);
596 intptr_t true_value = if_true_;
597 intptr_t false_value = if_false_;
599 if (is_power_of_two_kind) {
600 if (true_value == 0) {
605 if (true_value == 0) {
607 intptr_t temp = true_value;
608 true_value = false_value;
617 if (is_power_of_two_kind) {
618 const intptr_t shift =
625 if (false_value != 0) {
633 const intptr_t kNumInputs = 1;
634 const intptr_t kNumTemps = 0;
635 LocationSummary* summary =
new (zone)
645 const Array& arguments_descriptor =
649 if (FLAG_precompiled_mode) {
652 __ LoadFieldFromOffset(
R2,
R0,
666 if (!FLAG_precompiled_mode) {
672 UntaggedPcDescriptors::kOther,
locs(),
env());
710 if (!
locs()->
out(0).IsInvalid()) {
719 intptr_t pair_index) {
721 if (destination.IsRegister()) {
725 const int64_t
value = Integer::Cast(value_).AsInt64Value();
726 __ LoadImmediate(destination.reg(),
value);
729 __ LoadObject(destination.reg(), value_);
731 }
else if (destination.IsFpuRegister()) {
734 __ LoadSImmediate(destination.fpu_reg(), Double::Cast(value_).value());
737 __ LoadDImmediate(destination.fpu_reg(), Double::Cast(value_).value());
739 case kUnboxedFloat64x2:
740 __ LoadQImmediate(destination.fpu_reg(),
741 Float64x2::Cast(value_).value());
743 case kUnboxedFloat32x4:
744 __ LoadQImmediate(destination.fpu_reg(),
745 Float32x4::Cast(value_).value());
747 case kUnboxedInt32x4:
748 __ LoadQImmediate(destination.fpu_reg(), Int32x4::Cast(value_).value());
753 }
else if (destination.IsDoubleStackSlot()) {
755 __ LoadDImmediate(
VTMP, Double::Cast(value_).
value());
756 const intptr_t dest_offset = destination.ToStackSlotOffset();
757 __ StoreDToOffset(
VTMP, destination.base_reg(), dest_offset);
758 }
else if (destination.IsQuadStackSlot()) {
760 case kUnboxedFloat64x2:
761 __ LoadQImmediate(
VTMP, Float64x2::Cast(value_).
value());
763 case kUnboxedFloat32x4:
764 __ LoadQImmediate(
VTMP, Float32x4::Cast(value_).
value());
766 case kUnboxedInt32x4:
767 __ LoadQImmediate(
VTMP, Int32x4::Cast(value_).
value());
773 ASSERT(destination.IsStackSlot());
775 const intptr_t dest_offset = destination.ToStackSlotOffset();
780 const int64_t
value = Integer::Cast(value_).AsInt64Value();
788 bit_cast<int32_t, float>(Double::Cast(value_).
value());
793 if (value_.IsNull()) {
795 }
else if (value_.IsSmi() && Smi::Cast(value_).Value() == 0) {
798 __ LoadObject(tmp, value_);
801 __ StoreToOffset(tmp, destination.base_reg(), dest_offset, operand_size);
807 const bool is_unboxed_int =
811 const intptr_t kNumInputs = 0;
812 const intptr_t kNumTemps = is_unboxed_int ? 0 : 1;
813 LocationSummary*
locs =
new (zone)
815 if (is_unboxed_int) {
832 if (!
locs()->
out(0).IsInvalid()) {
843 auto const dst_type_loc =
853 const intptr_t kNonChangeableInputRegs =
866 const intptr_t kCpuRegistersToPreserve =
868 const intptr_t kFpuRegistersToPreserve =
874 LocationSummary* summary =
new (zone) LocationSummary(
887 intptr_t next_temp = 0;
889 const bool should_preserve = ((1 <<
i) & kCpuRegistersToPreserve) != 0;
890 if (should_preserve) {
891 summary->set_temp(next_temp++,
897 const bool should_preserve = ((1l <<
i) & kFpuRegistersToPreserve) != 0;
898 if (should_preserve) {
910 auto object_store =
compiler->isolate_group()->object_store();
911 const auto& assert_boolean_stub =
914 compiler::Label
done;
917 UntaggedPcDescriptors::kOther,
locs(),
970static void EmitBranchOnCondition(FlowGraphCompiler*
compiler,
972 BranchLabels labels) {
973 if (labels.fall_through == labels.false_label) {
975 __ b(labels.true_label, true_condition);
979 __ b(labels.false_label, false_condition);
982 if (labels.fall_through != labels.true_label) {
983 __ b(labels.true_label);
988static bool AreLabelsNull(BranchLabels labels) {
989 return (labels.true_label ==
nullptr && labels.false_label ==
nullptr &&
990 labels.fall_through ==
nullptr);
993static bool CanUseCbzTbzForComparison(FlowGraphCompiler*
compiler,
996 BranchLabels labels) {
997 return !AreLabelsNull(labels) &&
__ CanGenerateCbzTbz(rn, cond);
1000static void EmitCbzTbz(
Register reg,
1003 BranchLabels labels,
1005 ASSERT(CanUseCbzTbzForComparison(
compiler, reg, true_condition, labels));
1006 if (labels.fall_through == labels.false_label) {
1008 __ GenerateCbzTbz(reg, true_condition, labels.true_label, sz);
1012 __ GenerateCbzTbz(reg, false_condition, labels.false_label, sz);
1015 if (labels.fall_through != labels.true_label) {
1016 __ b(labels.true_label);
1022 LocationSummary* locs,
1024 BranchLabels labels) {
1029 Condition true_condition = TokenKindToIntCondition(kind);
1030 if (
left.IsConstant() ||
right.IsConstant()) {
1032 ConstantInstr* constant =
nullptr;
1033 if (
left.IsConstant()) {
1034 constant =
left.constant_instruction();
1038 true_condition = FlipCondition(true_condition);
1040 constant =
right.constant_instruction();
1043 ASSERT(constant->representation() == kTagged);
1046 CanUseCbzTbzForComparison(
compiler,
left.reg(), true_condition,
1048 EmitCbzTbz(
left.reg(),
compiler, true_condition, labels,
1054 __ CompareObjectRegisters(
left.reg(),
right.reg());
1056 return true_condition;
1066 LocationSummary* locs,
1068 BranchLabels labels) {
1073 Condition true_condition = TokenKindToIntCondition(kind);
1074 if (
left.IsConstant() ||
right.IsConstant()) {
1076 ConstantInstr* constant =
nullptr;
1077 if (
left.IsConstant()) {
1078 constant =
left.constant_instruction();
1082 true_condition = FlipCondition(true_condition);
1084 constant =
right.constant_instruction();
1092 true_condition, labels)) {
1093 EmitCbzTbz(
left.reg(),
compiler, true_condition, labels,
1104 return true_condition;
1108 LocationSummary* locs,
1110 BranchLabels labels) {
1111 ASSERT((kind == Token::kEQ) || (kind == Token::kNE));
1114 const Condition true_condition = TokenKindToIntCondition(kind);
1115 compiler::Label* equal_result =
1116 (true_condition ==
EQ) ? labels.true_label : labels.false_label;
1117 compiler::Label* not_equal_result =
1118 (true_condition ==
EQ) ? labels.false_label : labels.true_label;
1122 __ CompareObjectRegisters(left, right);
1123 __ b(equal_result,
EQ);
1125 __ BranchIfSmi(
TMP, not_equal_result);
1126 __ CompareClassId(left, kMintCid);
1127 __ b(not_equal_result,
NE);
1128 __ CompareClassId(right, kMintCid);
1129 __ b(not_equal_result,
NE);
1133 return true_condition;
1138 const intptr_t kNumInputs = 2;
1139 if (operation_cid() == kDoubleCid) {
1140 const intptr_t kNumTemps = 0;
1141 LocationSummary*
locs =
new (zone)
1148 if (operation_cid() == kSmiCid || operation_cid() == kMintCid ||
1149 operation_cid() == kIntegerCid) {
1150 const intptr_t kNumTemps = 0;
1151 LocationSummary*
locs =
new (zone)
1193 LocationSummary* locs,
1194 BranchLabels labels,
1201 __ fcmpd(left, right);
1204 __ fcmpd(left, right);
1207 __ fcmpd(right, left);
1210 __ fcmpd(left, right);
1213 __ fcmpd(right, left);
1216 __ fcmpd(left, right);
1225 BranchLabels labels) {
1227 ASSERT(operation_cid() == kMintCid);
1228 return EmitNullAwareInt64ComparisonOp(
compiler,
locs(), kind(), labels);
1230 if (operation_cid() == kSmiCid) {
1231 return EmitSmiComparisonOp(
compiler,
locs(), kind(), labels);
1232 }
else if (operation_cid() == kMintCid || operation_cid() == kIntegerCid) {
1233 return EmitInt64ComparisonOp(
compiler,
locs(), kind(), labels);
1235 ASSERT(operation_cid() == kDoubleCid);
1236 return EmitDoubleComparisonOp(
compiler,
locs(), labels, kind());
1241 const intptr_t kNumInputs = 2;
1242 const intptr_t kNumTemps = 0;
1243 LocationSummary*
locs =
new (zone)
1254 BranchLabels labels) {
1259 if (
right.IsConstant()) {
1260 __ TestImmediate(left, ComputeImmediateMask(), operand_size);
1262 __ tst(left, compiler::Operand(
right.reg()), operand_size);
1264 Condition true_condition = (kind() == Token::kNE) ?
NE :
EQ;
1265 return true_condition;
1268static bool IsSingleBitMask(
Location mask, intptr_t* bit) {
1269 if (!mask.IsConstant()) {
1273 uint64_t mask_value =
1274 static_cast<uint64_t
>(Integer::Cast(mask.constant()).AsInt64Value());
1283void TestIntInstr::EmitBranchCode(FlowGraphCompiler*
compiler,
1284 BranchInstr* branch) {
1288 if (IsSingleBitMask(
locs()->in(1), &bit_index)) {
1289 BranchLabels labels =
compiler->CreateBranchLabels(branch);
1292 bool branch_on_zero_bit;
1293 bool can_fallthrough;
1295 if (labels.fall_through == labels.true_label) {
1296 target = labels.false_label;
1297 branch_on_zero_bit = (kind() == Token::kNE);
1298 can_fallthrough =
true;
1300 target = labels.true_label;
1301 branch_on_zero_bit = (kind() == Token::kEQ);
1302 can_fallthrough = (labels.fall_through == labels.false_label);
1305 if (representation_ == kTagged) {
1309 if (branch_on_zero_bit) {
1314 if (!can_fallthrough) {
1315 __ b(labels.false_label);
1327 const intptr_t kNumInputs = 1;
1328 const intptr_t kNumTemps = 1;
1329 LocationSummary*
locs =
new (zone)
1338 BranchLabels labels) {
1339 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
1343 compiler::Label* deopt =
1348 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
1352 __ BranchIfSmi(val_reg,
result ? labels.true_label : labels.false_label);
1353 __ LoadClassId(cid_reg, val_reg);
1355 for (intptr_t
i = 2;
i <
data.length();
i += 2) {
1356 const intptr_t test_cid =
data[
i];
1357 ASSERT(test_cid != kSmiCid);
1359 __ CompareImmediate(cid_reg, test_cid);
1360 __ b(
result ? labels.true_label : labels.false_label,
EQ);
1363 if (deopt ==
nullptr) {
1367 compiler::Label*
target =
result ? labels.false_label : labels.true_label;
1368 if (
target != labels.fall_through) {
1381 const intptr_t kNumInputs = 2;
1382 const intptr_t kNumTemps = 0;
1383 if (operation_cid() == kDoubleCid) {
1384 LocationSummary* summary =
new (zone)
1391 if (operation_cid() == kSmiCid || operation_cid() == kMintCid) {
1392 LocationSummary* summary =
new (zone)
1397 summary->set_in(1, summary->in(0).IsConstant()
1409 BranchLabels labels) {
1410 if (operation_cid() == kSmiCid) {
1411 return EmitSmiComparisonOp(
compiler,
locs(), kind(), labels);
1412 }
else if (operation_cid() == kMintCid) {
1413 return EmitInt64ComparisonOp(
compiler,
locs(), kind(), labels);
1415 ASSERT(operation_cid() == kDoubleCid);
1416 return EmitDoubleComparisonOp(
compiler,
locs(), labels, kind());
1434 stub = &StubCode::CallBootstrapNative();
1439 stub = &StubCode::CallBootstrapNative();
1441 stub = &StubCode::CallAutoScopeNative();
1443 stub = &StubCode::CallNoScopeNative();
1446 __ LoadImmediate(
R1, argc_tag);
1447 compiler::ExternalLabel label(entry);
1448 __ LoadNativeEntry(
R5, &label,
1449 link_lazily() ? ObjectPool::Patchability::kPatchable
1450 : ObjectPool::Patchability::kNotPatchable);
1453 source(), *stub, UntaggedPcDescriptors::kOther,
locs(),
1458 compiler->GenerateNonLazyDeoptableStubCall(
1459 source(), *stub, UntaggedPcDescriptors::kOther,
locs(),
1467#define R(r) (1 << r)
1470 bool is_optimizing)
const {
1471 return MakeLocationSummaryInternal(
1472 zone, is_optimizing,
1502 __ LoadObject(
CODE_REG, Object::null_object());
1503 __ set_constant_pool_allowed(
false);
1504 __ EnterDartFrame(0,
PP);
1508 intptr_t stack_space = marshaller_.RequiredStackSpaceInBytes();
1509 __ ReserveAlignedFrameSpace(stack_space);
1514 SPILLS_LR_TO_FRAME(
__ PushRegisters(kVolatileRegisterSet));
1520 __ sub(
R1, is_leaf_ ?
FPREG : saved_fp_or_sp, compiler::Operand(temp1));
1521 __ MsanUnpoison(temp1,
R1);
1525 __ MsanUnpoison(is_leaf_ ?
FPREG : saved_fp_or_sp,
1530 __ CallCFunction(compiler::Address(
1531 THR, kMsanUnpoisonParamRuntimeEntry.OffsetFromThread()));
1533 RESTORES_LR_FROM_FRAME(
__ PopRegisters(kVolatileRegisterSet));
1539 __ Comment(is_leaf_ ?
"Leaf Call" :
"Call");
1543#if !defined(PRODUCT)
1553 __ mov(temp_csp,
CSP);
1560 __ mov(
CSP, temp_csp);
1562#if !defined(PRODUCT)
1576 UntaggedPcDescriptors::Kind::kOther,
locs(),
1586 THR, compiler::target::Thread::
1587 call_native_through_safepoint_entry_point_offset()));
1594 __ Comment(
"Check Dart_Handle for Error.");
1595 compiler::Label not_error;
1599 __ BranchIfSmi(temp1, ¬_error);
1600 __ LoadClassId(temp1, temp1);
1605 __ Comment(
"Slow path: call Dart_PropagateError through stub.");
1610 THR, compiler::target::Thread::
1611 call_native_through_safepoint_entry_point_offset()));
1612 __ ldr(branch, compiler::Address(
1613 THR, kPropagateErrorRuntimeEntry.OffsetFromThread()));
1625 __ RestorePinnedRegisters();
1632 __ mov(
SPREG, saved_fp_or_sp);
1634 __ LeaveDartFrame();
1638 if (FLAG_precompiled_mode) {
1639 __ SetupGlobalPoolAndDispatchTable();
1642 __ set_constant_pool_allowed(
true);
1655 __ LeaveDartFrame();
1662 const Register old_exit_through_ffi_reg =
R4;
1665 __ PopPair(old_exit_frame_reg, old_exit_through_ffi_reg);
1668 __ PopPair(tmp, vm_tag_reg);
1674 __ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
1675 old_exit_through_ffi_reg,
1678 __ PopNativeCalleeSavedRegisters();
1692 __ set_constant_pool_allowed(
true);
1698 __ set_constant_pool_allowed(
false);
1720 __ PushImmediate(0);
1722 __ PushNativeCalleeSavedRegisters();
1725 __ SetupCSPFromThread(
THR);
1727#if defined(DART_TARGET_OS_FUCHSIA)
1731#elif defined(USING_SHADOW_CALL_STACK)
1736 __ RestorePinnedRegisters();
1759 __ EmitEntryFrameVerification();
1762 __ TransitionNativeToGenerated(
R0,
false,
1770 const Function& target_function = marshaller_.dart_signature();
1771 const intptr_t callback_id = target_function.FfiCallbackId();
1773 __ LoadFromOffset(
R0,
R0,
1775 __ LoadFromOffset(
R0,
R0,
1777 __ LoadCompressedFieldFromOffset(
1779 __ LoadCompressedFieldFromOffset(
1787 if (FLAG_precompiled_mode) {
1788 __ SetupGlobalPoolAndDispatchTable();
1804 __ LoadFieldFromOffset(
LR,
LR,
1816#define R(r) (1 << r)
1820 bool is_optimizing)
const {
1834 __ MoveRegister(saved_fp,
FPREG);
1836 const intptr_t frame_space = native_calling_convention_.
StackTopInBytes();
1837 __ EnterCFrame(frame_space);
1839 __ mov(saved_csp,
CSP);
1845 __ str(target_address,
1847 __ CallCFunction(target_address);
1848 __ LoadImmediate(temp0, VMTag::kDartTagId);
1854 __ mov(
CSP, saved_csp);
1861 const intptr_t kNumInputs = 1;
1874 compiler::Address(
THR, Thread::predefined_symbols_address_offset()));
1883 const intptr_t kNumInputs = 1;
1889 ASSERT(cid_ == kOneByteStringCid);
1904 const intptr_t kNumInputs = 5;
1905 const intptr_t kNumTemps = 0;
1906 LocationSummary* summary =
new (zone)
1924 const Register bytes_ptr_reg = start_reg;
1925 const Register bytes_end_reg = end_reg;
1926 const Register flags_reg = bytes_reg;
1928 const Register decoder_temp_reg = start_reg;
1929 const Register flags_temp_reg = end_reg;
1931 const intptr_t kSizeMask = 0x03;
1932 const intptr_t kFlagsMask = 0x3C;
1934 compiler::Label loop, loop_in;
1937 __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
1941 table_reg, table_reg,
1945 __ add(bytes_ptr_reg, bytes_reg, compiler::Operand(start_reg));
1946 __ add(bytes_end_reg, bytes_reg, compiler::Operand(end_reg));
1949 __ mov(size_reg,
ZR);
1950 __ mov(flags_reg,
ZR);
1961 __ ldr(temp_reg, compiler::Address(table_reg, temp_reg),
1963 __ orr(flags_reg, flags_reg, compiler::Operand(temp_reg));
1964 __ andi(temp_reg, temp_reg, compiler::Immediate(kSizeMask));
1965 __ add(size_reg, size_reg, compiler::Operand(temp_reg));
1969 __ cmp(bytes_ptr_reg, compiler::Operand(bytes_end_reg));
1973 __ AndImmediate(flags_reg, flags_reg, kFlagsMask);
1975 __ SmiTag(flags_reg);
1979 if (decoder_location.IsStackSlot()) {
1981 decoder_reg = decoder_temp_reg;
1983 decoder_reg = decoder_location.reg();
1985 const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes();
1987 __ LoadCompressedSmiFieldFromOffset(flags_temp_reg, decoder_reg,
1988 scan_flags_field_offset);
1989 __ orr(flags_temp_reg, flags_temp_reg, compiler::Operand(flags_reg),
1991 __ StoreFieldToOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset,
1994 __ LoadFieldFromOffset(flags_temp_reg, decoder_reg,
1995 scan_flags_field_offset);
1996 __ orr(flags_temp_reg, flags_temp_reg, compiler::Operand(flags_reg));
1997 __ StoreFieldToOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset);
2008 const intptr_t kNumInputs = 2;
2009 const intptr_t kNumTemps = 0;
2010 LocationSummary*
locs =
new (zone)
2013 const bool can_be_constant =
2038 compiler::Address element_address(
TMP);
2039 element_address =
index.IsRegister()
2040 ?
__ ElementAddressForRegIndex(
2043 :
__ ElementAddressForIntIndex(
2054 if (rep == kUnboxedFloat) {
2057 }
else if (rep == kUnboxedDouble) {
2061 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2062 rep == kUnboxedFloat64x2);
2070 __ LoadCompressed(
result, element_address);
2076 const intptr_t kNumInputs = 2;
2077 const intptr_t kNumTemps = 0;
2078 LocationSummary* summary =
new (zone)
2094 case kOneByteStringCid:
2109 case kTwoByteStringCid:
2126 compiler::Address element_address =
__ ElementAddressForRegIndexWithSize(
2129 __ ldr(
result, element_address, sz);
2142 const intptr_t kNumInputs = 3;
2143 const intptr_t kNumTemps = 1;
2144 LocationSummary*
locs =
new (zone)
2147 const bool can_be_constant =
2158 ASSERT(rep == kUnboxedUint8);
2162 if (constant !=
nullptr && constant->HasZeroRepresentation()) {
2168 if (rep == kUnboxedFloat || rep == kUnboxedDouble) {
2170 if (constant !=
nullptr && constant->HasZeroRepresentation()) {
2178 }
else if (
class_id() == kArrayCid) {
2197 compiler::Address element_address(
TMP);
2205 if (
index.IsRegister()) {
2215 __ StoreCompressedIntoArray(
array, temp,
value, CanValueBeSmi());
2219 element_address =
index.IsRegister()
2220 ?
__ ElementAddressForRegIndex(
2223 :
__ ElementAddressForIntIndex(
2228 ASSERT(rep == kUnboxedUint8);
2230 const Smi& constant = Smi::Cast(
locs()->in(2).constant());
2235 }
else if (
value < 0) {
2241 __ LoadImmediate(
TMP,
static_cast<int8_t
>(
value));
2247 __ CompareImmediate(
value, 0xFF);
2254 ASSERT(
locs()->in(2).constant_instruction()->HasZeroRepresentation());
2257 __ str(
locs()->in(2).reg(), element_address,
2261 if (rep == kUnboxedFloat) {
2263 ASSERT(
locs()->in(2).constant_instruction()->HasZeroRepresentation());
2266 __ fstrs(
locs()->in(2).fpu_reg(), element_address);
2268 }
else if (rep == kUnboxedDouble) {
2270 ASSERT(
locs()->in(2).constant_instruction()->HasZeroRepresentation());
2273 __ fstrd(
locs()->in(2).fpu_reg(), element_address);
2276 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2277 rep == kUnboxedFloat64x2);
2279 __ fstrq(value_reg, element_address);
2281 }
else if (
class_id() == kArrayCid) {
2285 __ StoreCompressedObjectIntoObjectNoBarrier(
array, element_address,
2289 __ StoreCompressedIntoObjectNoBarrier(
array, element_address,
value);
2296 if (
index.IsRegister()) {
2307 __ MsanUnpoison(
TMP, length_in_bytes);
2311static void LoadValueCid(FlowGraphCompiler*
compiler,
2314 compiler::Label* value_is_smi =
nullptr) {
2315 compiler::Label
done;
2316 if (value_is_smi ==
nullptr) {
2317 __ LoadImmediate(value_cid_reg, kSmiCid);
2319 __ BranchIfSmi(value_reg, value_is_smi ==
nullptr ? &
done : value_is_smi);
2320 __ LoadClassId(value_cid_reg, value_reg);
2328 const intptr_t kNumInputs = 1;
2333 const bool emit_full_guard = !opt || (field_cid ==
kIllegalCid);
2335 const bool needs_value_cid_temp_reg =
2336 emit_full_guard || ((value_cid ==
kDynamicCid) && (field_cid != kSmiCid));
2338 const bool needs_field_temp_reg = emit_full_guard;
2340 intptr_t num_temps = 0;
2341 if (needs_value_cid_temp_reg) {
2344 if (needs_field_temp_reg) {
2348 LocationSummary* summary =
new (zone)
2352 for (intptr_t
i = 0;
i < num_temps;
i++) {
2361 ASSERT(
sizeof(UntaggedField::guarded_cid_) == 4);
2362 ASSERT(
sizeof(UntaggedField::is_nullable_) == 4);
2372 const bool emit_full_guard =
2375 const bool needs_value_cid_temp_reg =
2376 emit_full_guard || ((value_cid ==
kDynamicCid) && (field_cid != kSmiCid));
2378 const bool needs_field_temp_reg = emit_full_guard;
2385 const Register field_reg = needs_field_temp_reg
2389 compiler::Label
ok, fail_label;
2391 compiler::Label* deopt =
2396 compiler::Label*
fail = (deopt !=
nullptr) ? deopt : &fail_label;
2398 if (emit_full_guard) {
2401 compiler::FieldAddress field_cid_operand(field_reg,
2403 compiler::FieldAddress field_nullability_operand(
2407 LoadValueCid(
compiler, value_cid_reg, value_reg);
2408 compiler::Label skip_length_check;
2410 __ CompareRegisters(value_cid_reg,
TMP);
2413 __ CompareRegisters(value_cid_reg,
TMP);
2414 }
else if (value_cid ==
kNullCid) {
2415 __ ldr(value_cid_reg, field_nullability_operand,
2417 __ CompareImmediate(value_cid_reg, value_cid);
2419 compiler::Label skip_length_check;
2421 __ CompareImmediate(value_cid_reg, value_cid);
2431 if (!
field().needs_length_check()) {
2440 __ str(value_cid_reg, field_nullability_operand,
2443 __ LoadImmediate(
TMP, value_cid);
2451 if (deopt ==
nullptr) {
2459 __ PushPair(value_reg, field_reg);
2461 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2468 ASSERT(deopt !=
nullptr);
2475 if (field_cid != kSmiCid) {
2477 __ LoadClassId(value_cid_reg, value_reg);
2478 __ CompareImmediate(value_cid_reg, field_cid);
2483 __ CompareObject(value_reg, Object::null_object());
2487 }
else if (value_cid == field_cid) {
2492 ASSERT(value_cid != nullability);
2501 const intptr_t kNumInputs = 1;
2503 const intptr_t kNumTemps = 3;
2504 LocationSummary* summary =
new (zone)
2513 LocationSummary* summary =
new (zone)
2526 compiler::Label* deopt =
2544 compiler::FieldAddress(
2547 __ LoadCompressedSmi(
2551 __ tst(offset_reg, compiler::Operand(offset_reg));
2558 __ LoadCompressedSmi(
TMP, compiler::Address(value_reg, offset_reg));
2559 __ CompareObjectRegisters(length_reg,
TMP);
2561 if (deopt ==
nullptr) {
2564 __ PushPair(value_reg, field_reg);
2566 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2576 ASSERT(
field().guarded_list_length_in_object_offset() !=
2579 __ ldr(
TMP, compiler::FieldAddress(
2580 value_reg,
field().guarded_list_length_in_object_offset()));
2588 const intptr_t kNumInputs = 1;
2589 const intptr_t kNumTemps = 1;
2590 LocationSummary*
locs =
new (zone)
2615 const intptr_t kNumInputs = 3;
2616 const intptr_t kNumTemps = 0;
2617 LocationSummary* summary =
new (zone)
2639 const intptr_t kNumInputs = 2;
2640 const intptr_t kNumTemps = 0;
2641 LocationSummary*
locs =
new (zone)
2652static void InlineArrayAllocation(FlowGraphCompiler*
compiler,
2653 intptr_t num_elements,
2654 compiler::Label* slow_path,
2655 compiler::Label*
done) {
2656 const int kInlineArraySize = 12;
2659 __ TryAllocateArray(kArrayCid, instance_size, slow_path,
2667 __ StoreCompressedIntoObjectNoBarrier(
2674 __ StoreCompressedIntoObjectNoBarrier(
2686 if (num_elements > 0) {
2687 const intptr_t array_size = instance_size -
sizeof(UntaggedArray);
2691 intptr_t current_offset = 0;
2692 while (current_offset < array_size) {
2693 __ StoreCompressedIntoObjectNoBarrier(
2699 compiler::Label end_loop, init_loop;
2701 __ CompareRegisters(
R8,
R3);
2702 __ b(&end_loop,
CS);
2714 TypeUsageInfo* type_usage_info =
compiler->thread()->type_usage_info();
2715 if (type_usage_info !=
nullptr) {
2716 const Class& list_class =
2722 compiler::Label slow_path,
done;
2723 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2724 if (
compiler->is_optimizing() && !FLAG_precompiled_mode &&
2736 auto object_store =
compiler->isolate_group()->object_store();
2737 const auto& allocate_array_stub =
2749 const intptr_t kNumInputs = 0;
2750 const intptr_t kNumTemps = 3;
2751 LocationSummary*
locs =
new (zone) LocationSummary(
2760class AllocateContextSlowPath
2761 :
public TemplateSlowPathCode<AllocateUninitializedContextInstr> {
2763 explicit AllocateContextSlowPath(
2764 AllocateUninitializedContextInstr* instruction)
2765 : TemplateSlowPathCode(instruction) {}
2767 virtual void EmitNativeCode(FlowGraphCompiler*
compiler) {
2768 __ Comment(
"AllocateContextSlowPath");
2771 LocationSummary* locs = instruction()->locs();
2772 locs->live_registers()->Remove(locs->out(0));
2776 auto slow_path_env =
compiler->SlowPathEnvironmentFor(
2778 ASSERT(slow_path_env !=
nullptr);
2780 auto object_store =
compiler->isolate_group()->object_store();
2782 compiler->zone(), object_store->allocate_context_stub());
2784 __ LoadImmediate(
R1, instruction()->num_context_variables());
2785 compiler->GenerateStubCall(instruction()->
source(), allocate_context_stub,
2786 UntaggedPcDescriptors::kOther, locs,
2787 instruction()->deopt_id(), slow_path_env);
2788 ASSERT(instruction()->locs()->
out(0).reg() ==
R0);
2789 compiler->RestoreLiveRegisters(instruction()->locs());
2801 AllocateContextSlowPath* slow_path =
new AllocateContextSlowPath(
this);
2802 compiler->AddSlowPathCode(slow_path);
2805 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
2806 __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
2808 temp0, temp1, temp2);
2815 __ Jump(slow_path->entry_label());
2818 __ Bind(slow_path->exit_label());
2823 const intptr_t kNumInputs = 0;
2824 const intptr_t kNumTemps = 1;
2825 LocationSummary*
locs =
new (zone)
2836 auto object_store =
compiler->isolate_group()->object_store();
2837 const auto& allocate_context_stub =
2847 const intptr_t kNumInputs = 1;
2848 const intptr_t kNumTemps = 0;
2849 LocationSummary*
locs =
new (zone)
2860 auto object_store =
compiler->isolate_group()->object_store();
2861 const auto& clone_context_stub =
2864 UntaggedPcDescriptors::kOther,
locs(),
2875 compiler->AddExceptionHandler(
this);
2882 const intptr_t fp_sp_dist =
2887 __ AddImmediate(
SP,
FP, fp_sp_dist);
2890 if (raw_exception_var_ !=
nullptr) {
2895 if (raw_stacktrace_var_ !=
nullptr) {
2905 const intptr_t kNumInputs = 0;
2906 const intptr_t kNumTemps = 1;
2908 LocationSummary* summary =
new (zone)
2909 LocationSummary(zone, kNumInputs, kNumTemps,
2916class CheckStackOverflowSlowPath
2917 :
public TemplateSlowPathCode<CheckStackOverflowInstr> {
2919 static constexpr intptr_t kNumSlowPathArgs = 0;
2921 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
2922 : TemplateSlowPathCode(instruction) {}
2924 virtual void EmitNativeCode(FlowGraphCompiler*
compiler) {
2925 auto locs = instruction()->locs();
2926 if (
compiler->isolate_group()->use_osr() && osr_entry_label()->IsLinked()) {
2928 __ Comment(
"CheckStackOverflowSlowPathOsr");
2929 __ Bind(osr_entry_label());
2934 __ Comment(
"CheckStackOverflowSlowPath");
2936 const bool using_shared_stub = locs->call_on_shared_slow_path();
2937 if (!using_shared_stub) {
2944 compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
2947 const bool has_frame =
compiler->flow_graph().graph_entry()->NeedsFrame();
2948 if (using_shared_stub) {
2950 ASSERT(
__ constant_pool_allowed());
2951 __ set_constant_pool_allowed(
false);
2952 __ EnterDartFrame(0);
2954 auto object_store =
compiler->isolate_group()->object_store();
2955 const bool live_fpu_regs = locs->live_registers()->FpuRegisterCount() > 0;
2959 ? object_store->stack_overflow_stub_with_fpu_regs_stub()
2960 : object_store->stack_overflow_stub_without_fpu_regs_stub());
2962 if (
compiler->CanPcRelativeCall(stub)) {
2963 __ GenerateUnRelocatedPcRelativeCall();
2964 compiler->AddPcRelativeCallStubTarget(stub);
2966 const uword entry_point_offset =
2968 locs->live_registers()->FpuRegisterCount() > 0);
2969 __ Call(compiler::Address(
THR, entry_point_offset));
2971 compiler->RecordSafepoint(locs, kNumSlowPathArgs);
2973 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOther,
2974 instruction()->deopt_id(),
2975 instruction()->
source());
2977 __ LeaveDartFrame();
2978 __ set_constant_pool_allowed(
true);
2982 __ CallRuntime(kInterruptOrStackOverflowRuntimeEntry, kNumSlowPathArgs);
2984 instruction()->
source(), instruction()->deopt_id(),
2985 UntaggedPcDescriptors::kOther, instruction()->locs(),
env);
2989 instruction()->in_loop()) {
2991 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
2992 instruction()->deopt_id(),
2993 InstructionSource());
2995 compiler->pending_deoptimization_env_ =
nullptr;
2996 if (!using_shared_stub) {
2997 compiler->RestoreLiveRegisters(locs);
3002 compiler::Label* osr_entry_label() {
3004 return &osr_entry_label_;
3008 compiler::Label osr_entry_label_;
3012 CheckStackOverflowSlowPath* slow_path =
new CheckStackOverflowSlowPath(
this);
3013 compiler->AddSlowPathCode(slow_path);
3015 __ ldr(
TMP, compiler::Address(
3018 __ b(slow_path->entry_label(),
LS);
3025 const intptr_t configured_optimization_counter_threshold =
3026 compiler->thread()->isolate_group()->optimization_counter_threshold();
3027 const int32_t threshold =
3028 configured_optimization_counter_threshold * (
loop_depth() + 1);
3029 __ LoadFieldFromOffset(
TMP,
function, Function::usage_counter_offset(),
3031 __ add(
TMP,
TMP, compiler::Operand(1));
3032 __ StoreFieldToOffset(
TMP,
function, Function::usage_counter_offset(),
3034 __ CompareImmediate(
TMP, threshold);
3035 __ b(slow_path->osr_entry_label(),
GE);
3037 if (
compiler->ForceSlowPathForStackOverflow()) {
3038 __ b(slow_path->entry_label());
3040 __ Bind(slow_path->exit_label());
3043static void EmitSmiShiftLeft(FlowGraphCompiler*
compiler,
3044 BinarySmiOpInstr* shift_left) {
3045 const LocationSummary& locs = *shift_left->locs();
3048 compiler::Label* deopt =
3049 shift_left->CanDeoptimize()
3050 ?
compiler->AddDeoptStub(shift_left->deopt_id(),
3051 ICData::kDeoptBinarySmiOp)
3053 if (locs.in(1).IsConstant()) {
3054 const Object& constant = locs.in(1).constant();
3055 ASSERT(constant.IsSmi());
3057#if !defined(DART_COMPRESSED_POINTERS)
3058 const intptr_t kCountLimit = 0x3F;
3060 const intptr_t kCountLimit = 0x1F;
3062 const intptr_t
value = Smi::Cast(constant).Value();
3064 if (shift_left->can_overflow()) {
3077 Range* right_range = shift_left->right_range();
3078 if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
3081 const Object& obj = shift_left->left()->BoundConstant();
3083 const intptr_t left_int = Smi::Cast(obj).Value();
3084 if (left_int == 0) {
3085 __ CompareObjectRegisters(right,
ZR);
3090 const intptr_t max_right =
3092 const bool right_needs_check =
3094 if (right_needs_check) {
3098 __ SmiUntag(
TMP, right);
3104 const bool right_needs_check =
3106 if (!shift_left->can_overflow()) {
3107 if (right_needs_check) {
3109 ASSERT(shift_left->CanDeoptimize());
3110 __ CompareObjectRegisters(right,
ZR);
3116 __ SmiUntag(
TMP, right);
3120 __ SmiUntag(
TMP, right);
3124 if (right_needs_check) {
3125 ASSERT(shift_left->CanDeoptimize());
3131 __ SmiUntag(
TMP, right);
3133 const Register temp = locs.temp(0).reg();
3145 const intptr_t kNumInputs = 2;
3146 const intptr_t kNumTemps =
3151 LocationSummary* summary =
new (zone)
3153 if (
op_kind() == Token::kTRUNCDIV) {
3164 if (
op_kind() == Token::kMOD) {
3183 if (
op_kind() == Token::kSHL) {
3190 compiler::Label* deopt =
nullptr;
3197 ASSERT(constant.IsSmi());
3201 if (deopt ==
nullptr) {
3210 if (deopt ==
nullptr) {
3222 const intptr_t
value = Smi::Cast(constant).Value();
3224#if !defined(DART_COMPRESSED_POINTERS)
3229 if (deopt !=
nullptr) {
3230#if !defined(DART_COMPRESSED_POINTERS)
3242 case Token::kTRUNCDIV: {
3243 const intptr_t
value = Smi::Cast(constant).Value();
3246 const intptr_t shift_count =
3249#if !defined(DART_COMPRESSED_POINTERS)
3256#if !defined(DART_COMPRESSED_POINTERS)
3257 __ add(temp,
left, compiler::Operand(
TMP,
LSR, 64 - shift_count));
3259 __ addw(temp,
left, compiler::Operand(
TMP,
LSR, 32 - shift_count));
3269 case Token::kBIT_AND:
3273 case Token::kBIT_OR:
3277 case Token::kBIT_XOR:
3283#if !defined(DART_COMPRESSED_POINTERS)
3284 const intptr_t kCountLimit = 0x3F;
3286 const intptr_t kCountLimit = 0x1F;
3288 intptr_t
value = Smi::Cast(constant).Value();
3296 case Token::kUSHR: {
3300 const intptr_t kCountLimit = 0x3F;
3301 intptr_t
value = Smi::Cast(constant).Value();
3305 if (deopt !=
nullptr) {
3306 __ SmiTagAndBranchIfOverflow(
result, deopt);
3322 if (deopt ==
nullptr) {
3331 if (deopt ==
nullptr) {
3341#if !defined(DART_COMPRESSED_POINTERS)
3346 if (deopt !=
nullptr) {
3347#if !defined(DART_COMPRESSED_POINTERS)
3359 case Token::kBIT_AND: {
3364 case Token::kBIT_OR: {
3369 case Token::kBIT_XOR: {
3374 case Token::kTRUNCDIV: {
3387#if !defined(DART_COMPRESSED_POINTERS)
3388 __ CompareImmediate(
result, 0x4000000000000000LL);
3420 compiler::Label
done;
3437#if !defined(DART_COMPRESSED_POINTERS)
3438 const intptr_t kCountLimit = 0x3F;
3440 const intptr_t kCountLimit = 0x1F;
3443 __ LoadImmediate(
TMP2, kCountLimit);
3453 case Token::kUSHR: {
3459 const intptr_t kCountLimit = 0x3F;
3461 compiler::Label
done;
3463 __ LoadImmediate(
TMP2, kCountLimit);
3471 if (deopt !=
nullptr) {
3472 __ SmiTagAndBranchIfOverflow(
result, deopt);
3502 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid));
3503 const intptr_t kNumInputs = 2;
3504 const intptr_t kNumTemps = 0;
3505 LocationSummary* summary =
new (zone)
3513 compiler::Label* deopt =
3519 if (this->
left()->definition() == this->
right()->definition()) {
3520 __ BranchIfSmi(
left, deopt);
3521 }
else if (left_cid == kSmiCid) {
3523 }
else if (right_cid == kSmiCid) {
3524 __ BranchIfSmi(
left, deopt);
3527 __ BranchIfSmi(
TMP, deopt);
3532 const intptr_t kNumInputs = 1;
3533 const intptr_t kNumTemps = 1;
3534 LocationSummary* summary =
new (zone) LocationSummary(
3552 case kUnboxedDouble:
3553 __ StoreDFieldToOffset(
value, out_reg, ValueOffset());
3557 __ StoreDFieldToOffset(
FpuTMP, out_reg, ValueOffset());
3559 case kUnboxedFloat32x4:
3560 case kUnboxedFloat64x2:
3561 case kUnboxedInt32x4:
3562 __ StoreQFieldToOffset(
value, out_reg, ValueOffset());
3572 const intptr_t kNumInputs = 1;
3573 const intptr_t kNumTemps = 0;
3574 const bool is_floating_point =
3576 LocationSummary* summary =
new (zone)
3584void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler*
compiler) {
3588 case kUnboxedInt64: {
3590 __ ldr(
result, compiler::FieldAddress(box, ValueOffset()));
3594 case kUnboxedDouble: {
3596 __ LoadDFieldFromOffset(
result, box, ValueOffset());
3600 case kUnboxedFloat: {
3602 __ LoadDFieldFromOffset(
result, box, ValueOffset());
3607 case kUnboxedFloat32x4:
3608 case kUnboxedFloat64x2:
3609 case kUnboxedInt32x4: {
3611 __ LoadQFieldFromOffset(
result, box, ValueOffset());
3621void UnboxInstr::EmitSmiConversion(FlowGraphCompiler*
compiler) {
3626 case kUnboxedInt64: {
3632 case kUnboxedDouble: {
3634 __ SmiUntag(
TMP, box);
3635#if !defined(DART_COMPRESSED_POINTERS)
3649void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler*
compiler) {
3655void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler*
compiler) {
3665#if !defined(DART_COMPRESSED_POINTERS)
3668 const bool kMayAllocateMint =
false;
3672 const intptr_t kNumInputs = 1;
3673 const intptr_t kNumTemps = kMayAllocateMint ? 1 : 0;
3674 LocationSummary* summary =
new (zone)
3675 LocationSummary(zone, kNumInputs, kNumTemps,
3680 if (kMayAllocateMint) {
3691#if !defined(DART_COMPRESSED_POINTERS)
3700 compiler::Label
done;
3719 __ TestImmediate(
value, 0xC0000000);
3738 const intptr_t kNumInputs = 1;
3744 const bool stubs_in_vm_isolate =
3745 object_store->allocate_mint_with_fpu_regs_stub()
3747 ->InVMIsolateHeap() ||
3748 object_store->allocate_mint_without_fpu_regs_stub()
3750 ->InVMIsolateHeap();
3751 const bool shared_slow_path_call =
3753 LocationSummary* summary =
new (zone) LocationSummary(
3754 zone, kNumInputs, kNumTemps,
3761 }
else if (shared_slow_path_call) {
3780 compiler::Label
done;
3781#if !defined(DART_COMPRESSED_POINTERS)
3782 __ adds(
out, in, compiler::Operand(in));
3795 compiler->intrinsic_slow_path_label(),
3797 }
else if (
locs()->call_on_shared_slow_path()) {
3798 const bool has_frame =
compiler->flow_graph().graph_entry()->NeedsFrame();
3800 ASSERT(
__ constant_pool_allowed());
3801 __ set_constant_pool_allowed(
false);
3802 __ EnterDartFrame(0);
3804 auto object_store =
compiler->isolate_group()->object_store();
3808 live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub()
3809 : object_store->allocate_mint_without_fpu_regs_stub());
3811 ASSERT(!
locs()->live_registers()->ContainsRegister(
3813 auto extended_env =
compiler->SlowPathEnvironmentFor(
this, 0);
3814 compiler->GenerateStubCall(
source(), stub, UntaggedPcDescriptors::kOther,
3817 __ LeaveDartFrame();
3818 __ set_constant_pool_allowed(
true);
3831 const intptr_t kNumInputs = 1;
3832 const intptr_t kNumTemps = 0;
3833 LocationSummary* summary =
new (zone)
3844 compiler::Label* deopt =
3849 if (value_cid == kSmiCid) {
3851 }
else if (value_cid == kMintCid) {
3858 compiler::Label
done;
3864 compiler::Label
done;
3867 __ CompareClassId(
value, kMintCid);
3884 const intptr_t kNumInputs = 2;
3885 const intptr_t kNumTemps = 0;
3886 LocationSummary* summary =
new (zone)
3918 const bool needs_temp =
op_kind() != MethodRecognizer::kDouble_getIsNaN;
3919 const intptr_t kNumInputs = 1;
3920 const intptr_t kNumTemps = needs_temp ? 1 : 0;
3921 LocationSummary* summary =
new (zone)
3932 BranchLabels labels) {
3935 const bool is_negated = kind() != Token::kEQ;
3938 case MethodRecognizer::kDouble_getIsNaN: {
3940 return is_negated ?
VC :
VS;
3942 case MethodRecognizer::kDouble_getIsInfinite: {
3946 __ AndImmediate(temp, temp, 0x7FFFFFFFFFFFFFFFLL);
3948 __ CompareImmediate(temp, 0x7FF0000000000000LL);
3949 return is_negated ?
NE :
EQ;
3951 case MethodRecognizer::kDouble_getIsNegative: {
3953 compiler::Label not_zero;
3956 __ b(is_negated ? labels.true_label : labels.false_label,
VS);
3960 __ CompareImmediate(temp, 0);
3962 return is_negated ?
GE :
LT;
3971#define DEFINE_EMIT(Name, Args) \
3972 static void Emit##Name(FlowGraphCompiler* compiler, SimdOpInstr* instr, \
3973 PP_APPLY(PP_UNPACK, Args))
3975#define SIMD_OP_FLOAT_ARITH(V, Name, op) \
3976 V(Float32x4##Name, op##s) \
3977 V(Float64x2##Name, op##d)
3979#define SIMD_OP_SIMPLE_BINARY(V) \
3980 SIMD_OP_FLOAT_ARITH(V, Add, vadd) \
3981 SIMD_OP_FLOAT_ARITH(V, Sub, vsub) \
3982 SIMD_OP_FLOAT_ARITH(V, Mul, vmul) \
3983 SIMD_OP_FLOAT_ARITH(V, Div, vdiv) \
3984 SIMD_OP_FLOAT_ARITH(V, Min, vmin) \
3985 SIMD_OP_FLOAT_ARITH(V, Max, vmax) \
3986 V(Int32x4Add, vaddw) \
3987 V(Int32x4Sub, vsubw) \
3988 V(Int32x4BitAnd, vand) \
3989 V(Int32x4BitOr, vorr) \
3990 V(Int32x4BitXor, veor) \
3991 V(Float32x4Equal, vceqs) \
3992 V(Float32x4GreaterThan, vcgts) \
3993 V(Float32x4GreaterThanOrEqual, vcges)
3996 switch (instr->kind()) {
3997#define EMIT(Name, op) \
3998 case SimdOpInstr::k##Name: \
3999 __ op(result, left, right); \
4001 SIMD_OP_SIMPLE_BINARY(EMIT)
4003 case SimdOpInstr::kFloat32x4ShuffleMix:
4004 case SimdOpInstr::kInt32x4ShuffleMix: {
4005 const intptr_t mask = instr->mask();
4006 __ vinss(
result, 0, left, (mask >> 0) & 0x3);
4007 __ vinss(
result, 1, left, (mask >> 2) & 0x3);
4008 __ vinss(
result, 2, right, (mask >> 4) & 0x3);
4009 __ vinss(
result, 3, right, (mask >> 6) & 0x3);
4012 case SimdOpInstr::kFloat32x4NotEqual:
4017 case SimdOpInstr::kFloat32x4LessThan:
4020 case SimdOpInstr::kFloat32x4LessThanOrEqual:
4023 case SimdOpInstr::kFloat32x4Scale:
4028 case SimdOpInstr::kFloat64x2FromDoubles:
4032 case SimdOpInstr::kFloat64x2Scale:
4033 __ vdupd(
VTMP, right, 0);
4041#define SIMD_OP_SIMPLE_UNARY(V) \
4042 SIMD_OP_FLOAT_ARITH(V, Sqrt, vsqrt) \
4043 SIMD_OP_FLOAT_ARITH(V, Negate, vneg) \
4044 SIMD_OP_FLOAT_ARITH(V, Abs, vabs) \
4045 V(Float32x4Reciprocal, VRecps) \
4046 V(Float32x4ReciprocalSqrt, VRSqrts)
4049 switch (instr->kind()) {
4050#define EMIT(Name, op) \
4051 case SimdOpInstr::k##Name: \
4052 __ op(result, value); \
4054 SIMD_OP_SIMPLE_UNARY(EMIT)
4056 case SimdOpInstr::kFloat32x4GetX:
4060 case SimdOpInstr::kFloat32x4GetY:
4064 case SimdOpInstr::kFloat32x4GetZ:
4068 case SimdOpInstr::kFloat32x4GetW:
4072 case SimdOpInstr::kInt32x4Shuffle:
4073 case SimdOpInstr::kFloat32x4Shuffle: {
4074 const intptr_t mask = instr->mask();
4077 }
else if (mask == 0x55) {
4079 }
else if (mask == 0xAA) {
4081 }
else if (mask == 0xFF) {
4084 for (intptr_t
i = 0;
i < 4;
i++) {
4090 case SimdOpInstr::kFloat32x4Splat:
4096 case SimdOpInstr::kFloat64x2GetX:
4099 case SimdOpInstr::kFloat64x2GetY:
4102 case SimdOpInstr::kFloat64x2Splat:
4105 case SimdOpInstr::kFloat64x2ToFloat32x4:
4117 case SimdOpInstr::kFloat32x4ToFloat64x2:
4132DEFINE_EMIT(Simd32x4GetSignMask,
4139 __ LsrImmediate(temp, temp, 31);
4140 __ orr(
out,
out, compiler::Operand(temp,
LSL, 1));
4143 __ LsrImmediate(temp, temp, 31);
4144 __ orr(
out,
out, compiler::Operand(temp,
LSL, 2));
4147 __ LsrImmediate(temp, temp, 31);
4148 __ orr(
out,
out, compiler::Operand(temp,
LSL, 3));
4152 Float32x4FromDoubles,
4178DEFINE_EMIT(Float32x4With,
4180 __ fcvtsd(
VTMP, replacement);
4182 switch (instr->kind()) {
4183 case SimdOpInstr::kFloat32x4WithX:
4186 case SimdOpInstr::kFloat32x4WithY:
4189 case SimdOpInstr::kFloat32x4WithZ:
4192 case SimdOpInstr::kFloat32x4WithW:
4200DEFINE_EMIT(Simd32x4ToSimd32x4, (SameAsFirstInput,
VRegister value)) {
4220DEFINE_EMIT(Float64x2With,
4222 switch (instr->kind()) {
4223 case SimdOpInstr::kFloat64x2WithX:
4224 __ vinsd(left, 0, right, 0);
4226 case SimdOpInstr::kFloat64x2WithY:
4227 __ vinsd(left, 1, right, 0);
4244DEFINE_EMIT(Int32x4FromBools,
4250 Temp<Register> temp)) {
4252 __ LoadImmediate(temp, 0xffffffff);
4256 for (intptr_t
i = 0;
i < 4;
i++) {
4257 __ CompareObjectRegisters(vs[
i],
TMP2);
4264 switch (instr->kind()) {
4265 case SimdOpInstr::kInt32x4GetFlagX:
4268 case SimdOpInstr::kInt32x4GetFlagY:
4271 case SimdOpInstr::kInt32x4GetFlagZ:
4274 case SimdOpInstr::kInt32x4GetFlagW:
4287DEFINE_EMIT(Int32x4Select,
4292 Temp<VRegister> temp)) {
4294 __ vmov(temp, mask);
4296 __ vnot(temp, temp);
4298 __ vand(mask, mask, trueValue);
4300 __ vand(temp, temp, falseValue);
4302 __ vorr(
out, mask, temp);
4305DEFINE_EMIT(Int32x4WithFlag,
4309 __ LoadImmediate(
TMP, 0xffffffff);
4311 switch (instr->kind()) {
4312 case SimdOpInstr::kInt32x4WithFlagX:
4315 case SimdOpInstr::kInt32x4WithFlagY:
4318 case SimdOpInstr::kInt32x4WithFlagZ:
4321 case SimdOpInstr::kInt32x4WithFlagW:
4335#define SIMD_OP_VARIANTS(CASE, ____) \
4336 SIMD_OP_SIMPLE_BINARY(CASE) \
4337 CASE(Float32x4ShuffleMix) \
4338 CASE(Int32x4ShuffleMix) \
4339 CASE(Float32x4NotEqual) \
4340 CASE(Float32x4LessThan) \
4341 CASE(Float32x4LessThanOrEqual) \
4342 CASE(Float32x4Scale) \
4343 CASE(Float64x2FromDoubles) \
4344 CASE(Float64x2Scale) \
4345 ____(SimdBinaryOp) \
4346 SIMD_OP_SIMPLE_UNARY(CASE) \
4347 CASE(Float32x4GetX) \
4348 CASE(Float32x4GetY) \
4349 CASE(Float32x4GetZ) \
4350 CASE(Float32x4GetW) \
4351 CASE(Int32x4Shuffle) \
4352 CASE(Float32x4Shuffle) \
4353 CASE(Float32x4Splat) \
4354 CASE(Float64x2GetX) \
4355 CASE(Float64x2GetY) \
4356 CASE(Float64x2Splat) \
4357 CASE(Float64x2ToFloat32x4) \
4358 CASE(Float32x4ToFloat64x2) \
4360 CASE(Float32x4GetSignMask) \
4361 CASE(Int32x4GetSignMask) \
4362 ____(Simd32x4GetSignMask) \
4363 CASE(Float32x4FromDoubles) \
4364 ____(Float32x4FromDoubles) \
4365 CASE(Float32x4Zero) \
4366 CASE(Float64x2Zero) \
4368 CASE(Float32x4Clamp) \
4369 ____(Float32x4Clamp) \
4370 CASE(Float64x2Clamp) \
4371 ____(Float64x2Clamp) \
4372 CASE(Float32x4WithX) \
4373 CASE(Float32x4WithY) \
4374 CASE(Float32x4WithZ) \
4375 CASE(Float32x4WithW) \
4376 ____(Float32x4With) \
4377 CASE(Float32x4ToInt32x4) \
4378 CASE(Int32x4ToFloat32x4) \
4379 ____(Simd32x4ToSimd32x4) \
4380 CASE(Float64x2GetSignMask) \
4381 ____(Float64x2GetSignMask) \
4382 CASE(Float64x2WithX) \
4383 CASE(Float64x2WithY) \
4384 ____(Float64x2With) \
4385 CASE(Int32x4FromInts) \
4386 ____(Int32x4FromInts) \
4387 CASE(Int32x4FromBools) \
4388 ____(Int32x4FromBools) \
4389 CASE(Int32x4GetFlagX) \
4390 CASE(Int32x4GetFlagY) \
4391 CASE(Int32x4GetFlagZ) \
4392 CASE(Int32x4GetFlagW) \
4393 ____(Int32x4GetFlag) \
4394 CASE(Int32x4Select) \
4395 ____(Int32x4Select) \
4396 CASE(Int32x4WithFlagX) \
4397 CASE(Int32x4WithFlagY) \
4398 CASE(Int32x4WithFlagZ) \
4399 CASE(Int32x4WithFlagW) \
4400 ____(Int32x4WithFlag)
4404#define CASE(Name, ...) case k##Name:
4406 return MakeLocationSummaryFromEmitter(zone, this, &Emit##Name);
4407 SIMD_OP_VARIANTS(
CASE, EMIT)
4420#define CASE(Name, ...) case k##Name:
4422 InvokeEmitter(compiler, this, &Emit##Name); \
4424 SIMD_OP_VARIANTS(
CASE, EMIT)
4438 const intptr_t kNumTemps = 0;
4439 LocationSummary* summary =
new (zone)
4450 compiler::LeafRuntimeScope rt(
compiler->assembler(),
4460 const intptr_t kNumInputs = 2;
4461 const intptr_t kNumTemps = 0;
4462 LocationSummary* summary =
new (zone)
4471 const intptr_t kNumInputs = 2;
4472 const intptr_t kNumTemps = 0;
4473 LocationSummary* summary =
new (zone)
4484 (
op_kind() == MethodRecognizer::kMathMax));
4485 const bool is_min = (
op_kind() == MethodRecognizer::kMathMin);
4492 __ b(&returns_nan,
VS);
4495 is_min ? TokenKindToDoubleCondition(Token::kLTE)
4496 : TokenKindToDoubleCondition(Token::kGTE);
4498 __ b(&
done, double_condition);
4514 __ CompareImmediate(
TMP, 0);
4543 const intptr_t kNumInputs = 1;
4544 const intptr_t kNumTemps = 0;
4545 LocationSummary* summary =
new (zone)
4558 case Token::kNEGATE: {
4559 compiler::Label* deopt =
4565 case Token::kBIT_NOT:
4577 const intptr_t kNumInputs = 1;
4578 const intptr_t kNumTemps = 0;
4579 LocationSummary* summary =
new (zone)
4591 case Token::kNEGATE:
4597 case Token::kSQUARE:
4607 const intptr_t kNumInputs = 1;
4608 const intptr_t kNumTemps = 0;
4609 LocationSummary*
result =
new (zone)
4624 const intptr_t kNumInputs = 1;
4625 const intptr_t kNumTemps = 0;
4626 LocationSummary*
result =
new (zone)
4637#if !defined(DART_COMPRESSED_POINTERS)
4646 const intptr_t kNumInputs = 1;
4647 const intptr_t kNumTemps = 0;
4648 LocationSummary*
result =
new (zone)
4663 const intptr_t kNumInputs = 1;
4664 const intptr_t kNumTemps = 0;
4665 LocationSummary*
result =
new (zone) LocationSummary(
4676 DoubleToIntegerSlowPath* slow_path =
4677 new DoubleToIntegerSlowPath(
this, value_double);
4678 compiler->AddSlowPathCode(slow_path);
4682 __ fcmpd(value_double, value_double);
4683 __ b(slow_path->entry_label(),
VS);
4686 case MethodRecognizer::kDoubleToInteger:
4689 case MethodRecognizer::kDoubleFloorToInt:
4692 case MethodRecognizer::kDoubleCeilToInt:
4700#if !defined(DART_COMPRESSED_POINTERS)
4702 __ CompareImmediate(
result, 0xC000000000000000);
4703 __ b(slow_path->entry_label(),
MI);
4708 __ b(slow_path->entry_label(),
NE);
4711 __ Bind(slow_path->exit_label());
4716 const intptr_t kNumInputs = 1;
4717 const intptr_t kNumTemps = 0;
4718 LocationSummary*
result =
new (zone)
4726 compiler::Label* deopt =
4738#if !defined(DART_COMPRESSED_POINTERS)
4740 __ CompareImmediate(
result, 0xC000000000000000);
4753 const intptr_t kNumInputs = 1;
4754 const intptr_t kNumTemps = 0;
4755 LocationSummary*
result =
new (zone)
4770 const intptr_t kNumInputs = 1;
4771 const intptr_t kNumTemps = 0;
4772 LocationSummary*
result =
new (zone)
4798 const intptr_t kNumTemps =
4800 LocationSummary*
result =
new (zone)
4828static void InvokeDoublePow(FlowGraphCompiler*
compiler,
4829 InvokeMathCFunctionInstr* instr) {
4830 ASSERT(instr->recognized_kind() == MethodRecognizer::kMathDoublePow);
4831 const intptr_t kInputCount = 2;
4832 ASSERT(instr->InputCount() == kInputCount);
4833 LocationSummary* locs = instr->locs();
4836 const VRegister exp = locs->in(1).fpu_reg();
4838 const VRegister saved_base = locs->temp(0).fpu_reg();
4841 compiler::Label skip_call, try_sqrt, check_base, return_nan, do_pow;
4842 __ fmovdd(saved_base,
base);
4846 __ b(&check_base,
VS);
4847 __ b(&skip_call,
EQ);
4851 compiler::Label return_base;
4852 __ b(&return_base,
EQ);
4855 __ LoadDImmediate(
VTMP, 2.0);
4857 compiler::Label return_base_times_2;
4858 __ b(&return_base_times_2,
EQ);
4861 __ LoadDImmediate(
VTMP, 3.0);
4863 __ b(&check_base,
NE);
4866 __ fmuld(
result, saved_base, saved_base);
4874 __ Bind(&return_base_times_2);
4875 __ fmuld(
result, saved_base, saved_base);
4882 __ b(&return_nan,
VS);
4883 __ b(&skip_call,
EQ);
4885 __ fcmpd(saved_base, exp);
4886 __ b(&try_sqrt,
VC);
4892 compiler::Label return_zero;
4908 __ fcmpdz(saved_base);
4909 __ b(&return_zero,
EQ);
4919 __ fmovdd(
base, saved_base);
4921 compiler::LeafRuntimeScope rt(
compiler->assembler(),
4926 rt.Call(instr->TargetFunction(), kInputCount);
4938 compiler::LeafRuntimeScope rt(
compiler->assembler(),
4953 const intptr_t kNumInputs = 1;
4954 LocationSummary* summary =
5019 const intptr_t kNumInputs = 2;
5020 const intptr_t kNumTemps = 0;
5021 LocationSummary* summary =
new (zone)
5033 compiler::Label* deopt =
5039 const Register result_div = pair->At(0).reg();
5040 const Register result_mod = pair->At(1).reg();
5043 __ CompareObjectRegisters(right,
ZR);
5047 __ SmiUntag(result_mod, left);
5048 __ SmiUntag(
TMP, right);
5052#if !defined(DART_COMPRESSED_POINTERS)
5053 __ sdiv(result_div, result_mod,
TMP);
5054 __ CompareImmediate(result_div, 0x4000000000000000);
5056 __ sdivw(result_div, result_mod,
TMP);
5062 __ SmiTag(result_div);
5063 __ SmiTag(result_mod);
5073 compiler::Label
done;
5074 __ CompareObjectRegisters(result_mod,
ZR);
5078 __ sub(result_mod, result_mod, compiler::Operand(right));
5080 __ add(result_mod, result_mod, compiler::Operand(right));
5082 __ CompareObjectRegisters(right,
ZR);
5091static void EmitHashIntegerCodeSequence(FlowGraphCompiler*
compiler,
5097 __ LoadImmediate(
TMP2, compiler::Immediate(0x2d51));
5106 const intptr_t kNumInputs = 1;
5107 const intptr_t kNumTemps = 1;
5108 LocationSummary* summary =
new (zone)
5121 compiler::Label
done, hash_double;
5123 __ AndImmediate(
TMP,
TMP, 0x7FF0000000000000LL);
5124 __ CompareImmediate(
TMP, 0x7FF0000000000000LL);
5125 __ b(&hash_double,
EQ);
5128 __ scvtfdx(temp_double,
TMP);
5130 __ b(&hash_double,
NE);
5146 const intptr_t kNumInputs = 1;
5147 const intptr_t kNumTemps = 0;
5148 LocationSummary* summary =
new (zone)
5182 const intptr_t kNumInputs = 1;
5183 const bool need_mask_temp =
IsBitTest();
5184 const intptr_t kNumTemps = !
IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0;
5185 LocationSummary* summary =
new (zone)
5190 if (need_mask_temp) {
5198 compiler::Label* deopt) {
5199 __ CompareObject(
locs()->in(0).reg(), Object::null_object());
5209 compiler::Label* deopt) {
5211 __ AddImmediate(biased_cid, -
min);
5212 __ CompareImmediate(biased_cid,
max -
min);
5216 __ LoadImmediate(bit_reg, 1);
5217 __ lslv(bit_reg, bit_reg, biased_cid);
5218 __ TestImmediate(bit_reg, mask);
5222int CheckClassInstr::EmitCheckCid(FlowGraphCompiler*
compiler,
5227 compiler::Label* is_ok,
5228 compiler::Label* deopt,
5229 bool use_near_jump) {
5232 if (cid_start == cid_end) {
5233 __ CompareImmediate(biased_cid, cid_start - bias);
5239 __ AddImmediate(biased_cid, bias - cid_start);
5241 __ CompareImmediate(biased_cid, cid_end - cid_start);
5246 __ b(deopt, no_match);
5255 const intptr_t kNumInputs = 1;
5256 const intptr_t kNumTemps = 0;
5257 LocationSummary* summary =
new (zone)
5266 compiler::Label* deopt =
5268 if (cids_.IsSingleCid()) {
5280 const intptr_t kNumInputs = 1;
5281 const intptr_t kNumTemps = 0;
5282 LocationSummary* summary =
new (zone)
5290 compiler::Label* deopt =
5292 __ BranchIfNotSmi(
value, deopt);
5296 ThrowErrorSlowPathCode* slow_path =
new NullErrorSlowPath(
this);
5297 compiler->AddSlowPathCode(slow_path);
5302 __ CompareObject(value_reg, Object::null_object());
5303 __ BranchIf(
EQUAL, slow_path->entry_label());
5308 const intptr_t kNumInputs = 2;
5309 const intptr_t kNumTemps = 0;
5310 LocationSummary*
locs =
new (zone)
5319 compiler::Label* deopt =
5326 if (length_loc.IsConstant() && index_loc.IsConstant()) {
5328 if ((Smi::Cast(length_loc.constant()).Value() >
5329 Smi::Cast(index_loc.constant()).Value()) &&
5330 (Smi::Cast(index_loc.constant()).Value() >= 0)) {
5334 ASSERT((Smi::Cast(length_loc.constant()).Value() <=
5335 Smi::Cast(index_loc.constant()).Value()) ||
5336 (Smi::Cast(index_loc.constant()).Value() < 0));
5343 if (index_loc.IsConstant()) {
5345 const Smi&
index = Smi::Cast(index_loc.constant());
5348 }
else if (length_loc.IsConstant()) {
5349 const Smi&
length = Smi::Cast(length_loc.constant());
5351 if (index_cid != kSmiCid) {
5352 __ BranchIfNotSmi(
index, deopt);
5364 if (index_cid != kSmiCid) {
5365 __ BranchIfNotSmi(
index, deopt);
5374 const intptr_t kNumInputs = 1;
5375 const intptr_t kNumTemps = 0;
5376 LocationSummary*
locs =
new (zone) LocationSummary(
5377 zone, kNumInputs, kNumTemps,
5385 WriteErrorSlowPath* slow_path =
new WriteErrorSlowPath(
this);
5386 compiler->AddSlowPathCode(slow_path);
5388 compiler::FieldAddress(
locs()->in(0).reg(),
5393 __ tbnz(slow_path->entry_label(),
TMP,
5397class Int64DivideSlowPath :
public ThrowErrorSlowPathCode {
5399 Int64DivideSlowPath(BinaryInt64OpInstr* instruction,
5401 Range* divisor_range,
5404 : ThrowErrorSlowPathCode(instruction,
5405 kIntegerDivisionByZeroExceptionRuntimeEntry),
5406 is_mod_(instruction->op_kind() == Token::kMOD),
5408 divisor_range_(divisor_range),
5411 adjust_sign_label_() {}
5413 void EmitNativeCode(FlowGraphCompiler*
compiler)
override {
5415 if (has_divide_by_zero()) {
5420 __ Comment(
"slow path %s operation (no throw)",
name());
5428 if (has_adjust_sign()) {
5429 __ Bind(adjust_sign_label());
5432 __ CompareRegisters(divisor_,
ZR);
5433 __ sub(tmp_, out_, compiler::Operand(divisor_));
5434 __ add(out_, out_, compiler::Operand(divisor_));
5435 __ csel(out_, tmp_, out_,
LT);
5436 }
else if (divisor_range_->IsPositive()) {
5438 __ add(out_, out_, compiler::Operand(divisor_));
5441 __ sub(out_, out_, compiler::Operand(divisor_));
5447 const char*
name()
override {
return "int64 divide"; }
5451 bool has_adjust_sign() {
return is_mod_; }
5453 bool is_needed() {
return has_divide_by_zero() || has_adjust_sign(); }
5455 compiler::Label* adjust_sign_label() {
5456 ASSERT(has_adjust_sign());
5457 return &adjust_sign_label_;
5463 Range* divisor_range_;
5466 compiler::Label adjust_sign_label_;
5469static void EmitInt64ModTruncDiv(FlowGraphCompiler*
compiler,
5470 BinaryInt64OpInstr* instruction,
5476 ASSERT(op_kind == Token::kMOD || op_kind == Token::kTRUNCDIV);
5482 if (FLAG_optimization_level <= 2) {
5484 }
else if (
auto c = instruction->right()->definition()->AsConstant()) {
5485 if (c->value().IsInteger()) {
5486 const int64_t divisor = Integer::Cast(c->value()).AsInt64Value();
5487 if (divisor <= -2 || divisor >= 2) {
5489 compiler::Label
pos;
5497 if (divisor > 0 &&
magic < 0) {
5499 }
else if (divisor < 0 && magic > 0) {
5507 if (op_kind == Token::kTRUNCDIV) {
5511 __ LoadImmediate(
TMP, divisor);
5528 Range* right_range = instruction->right()->definition()->range();
5529 Int64DivideSlowPath* slow_path =
5530 new (
Z) Int64DivideSlowPath(instruction, right, right_range, tmp,
out);
5533 if (slow_path->has_divide_by_zero()) {
5534 __ cbz(slow_path->entry_label(), right);
5541 if (op_kind == Token::kMOD) {
5542 __ sdiv(tmp, left, right);
5543 __ msub(
out, tmp, right, left);
5547 __ b(slow_path->adjust_sign_label(),
LT);
5549 __ sdiv(
out, left, right);
5552 if (slow_path->is_needed()) {
5553 __ Bind(slow_path->exit_label());
5554 compiler->AddSlowPathCode(slow_path);
5562 case Token::kTRUNCDIV: {
5563 const intptr_t kNumInputs = 2;
5564 const intptr_t kNumTemps = (
op_kind() == Token::kMOD) ? 1 : 0;
5565 LocationSummary* summary =
new (zone) LocationSummary(
5570 if (kNumTemps == 1) {
5576 const intptr_t kNumInputs = 2;
5577 const intptr_t kNumTemps = 0;
5578 LocationSummary* summary =
new (zone) LocationSummary(
5602 }
else if (
op_kind() == Token::kMUL) {
5604 if (
right.IsConstant()) {
5616 if (
right.IsConstant()) {
5627 case Token::kBIT_AND:
5630 case Token::kBIT_OR:
5633 case Token::kBIT_XOR:
5640 compiler::Operand r = compiler::Operand(
right.reg());
5648 case Token::kBIT_AND:
5651 case Token::kBIT_OR:
5654 case Token::kBIT_XOR:
5663static void EmitShiftInt64ByConstant(FlowGraphCompiler*
compiler,
5667 const Object& right) {
5668 const int64_t shift = Integer::Cast(right).AsInt64Value();
5672 __ AsrImmediate(
out, left,
5676 case Token::kUSHR: {
5678 __ LsrImmediate(
out, left, shift);
5683 __ LslImmediate(
out, left, shift);
5691static void EmitShiftInt64ByRegister(FlowGraphCompiler*
compiler,
5698 __ asrv(
out, left, right);
5701 case Token::kUSHR: {
5702 __ lsrv(
out, left, right);
5706 __ lslv(
out, left, right);
5714static void EmitShiftUint32ByConstant(FlowGraphCompiler*
compiler,
5718 const Object& right) {
5719 const int64_t shift = Integer::Cast(right).AsInt64Value();
5722 __ LoadImmediate(
out, 0);
5738static void EmitShiftUint32ByRegister(FlowGraphCompiler*
compiler,
5746 __ lsrvw(
out, left, right);
5749 __ lslvw(
out, left, right);
5756class ShiftInt64OpSlowPath :
public ThrowErrorSlowPathCode {
5758 explicit ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction)
5759 : ThrowErrorSlowPathCode(instruction,
5760 kArgumentErrorUnboxedInt64RuntimeEntry) {}
5762 const char*
name()
override {
return "int64 shift"; }
5764 void EmitCodeAtSlowPathEntry(FlowGraphCompiler*
compiler)
override {
5765 const Register left = instruction()->locs()->in(0).reg();
5767 const Register out = instruction()->locs()->out(0).reg();
5770 compiler::Label throw_error;
5773 switch (instruction()->AsShiftInt64Op()->op_kind()) {
5801 const intptr_t kNumInputs = 2;
5802 const intptr_t kNumTemps = 0;
5803 LocationSummary* summary =
new (zone) LocationSummary(
5820 locs()->in(1).constant());
5826 ShiftInt64OpSlowPath* slow_path =
nullptr;
5828 slow_path =
new (
Z) ShiftInt64OpSlowPath(
this);
5829 compiler->AddSlowPathCode(slow_path);
5830 __ CompareImmediate(shift, kShiftCountLimit);
5831 __ b(slow_path->entry_label(),
HI);
5836 if (slow_path !=
nullptr) {
5837 __ Bind(slow_path->exit_label());
5845 const intptr_t kNumInputs = 2;
5846 const intptr_t kNumTemps = 0;
5847 LocationSummary* summary =
new (zone)
5862 locs()->in(1).constant());
5868 __ SmiUntag(
TMP, shift);
5874 compiler::Label* deopt =
5877 __ CompareImmediate(shift, kShiftCountLimit);
5885class ShiftUint32OpSlowPath :
public ThrowErrorSlowPathCode {
5887 explicit ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction)
5888 : ThrowErrorSlowPathCode(instruction,
5889 kArgumentErrorUnboxedInt64RuntimeEntry) {}
5891 const char*
name()
override {
return "uint32 shift"; }
5893 void EmitCodeAtSlowPathEntry(FlowGraphCompiler*
compiler)
override {
5909 const intptr_t kNumInputs = 2;
5910 const intptr_t kNumTemps = 0;
5911 LocationSummary* summary =
new (zone) LocationSummary(
5927 locs()->in(1).constant());
5931 const bool shift_count_in_range =
5935 if (!shift_count_in_range) {
5936 ShiftUint32OpSlowPath* slow_path =
new (
Z) ShiftUint32OpSlowPath(
this);
5937 compiler->AddSlowPathCode(slow_path);
5944 if (!shift_count_in_range) {
5946 __ CompareImmediate(
right, 31);
5955 const intptr_t kNumInputs = 2;
5956 const intptr_t kNumTemps = 0;
5957 LocationSummary* summary =
new (zone)
5972 locs()->in(1).constant());
5975 const bool shift_count_in_range =
5982 if (!shift_count_in_range) {
5985 compiler::Label* deopt =
5993 if (!shift_count_in_range) {
6003 const intptr_t kNumInputs = 1;
6004 const intptr_t kNumTemps = 0;
6005 LocationSummary* summary =
new (zone)
6016 case Token::kBIT_NOT:
6019 case Token::kNEGATE:
6020 __ sub(
out,
ZR, compiler::Operand(left));
6029 const intptr_t kNumInputs = 2;
6030 const intptr_t kNumTemps = 0;
6031 LocationSummary* summary =
new (zone)
6042 compiler::Operand r = compiler::Operand(
right);
6045 case Token::kBIT_AND:
6048 case Token::kBIT_OR:
6051 case Token::kBIT_XOR:
6070 const intptr_t kNumInputs = 1;
6071 const intptr_t kNumTemps = 0;
6072 LocationSummary* summary =
new (zone)
6091 const intptr_t kNumInputs = 1;
6092 const intptr_t kNumTemps = 0;
6093 LocationSummary* summary =
new (zone)
6095 if (
from() == kUntagged ||
to() == kUntagged) {
6099 }
else if (
from() == kUnboxedInt64) {
6100 ASSERT(
to() == kUnboxedUint32 ||
to() == kUnboxedInt32);
6101 }
else if (
to() == kUnboxedInt64) {
6104 ASSERT(
to() == kUnboxedUint32 ||
to() == kUnboxedInt32);
6119 const bool is_nop_conversion =
6122 if (is_nop_conversion) {
6129 compiler::Label* deopt =
6133 if (
from() == kUnboxedInt32 &&
to() == kUnboxedUint32) {
6141 }
else if (
from() == kUnboxedUint32 &&
to() == kUnboxedInt32) {
6149 }
else if (
from() == kUnboxedInt64) {
6150 if (
to() == kUnboxedInt32) {
6165 }
else if (
to() == kUnboxedInt64) {
6166 if (
from() == kUnboxedUint32) {
6178 LocationSummary* summary =
6179 new (zone) LocationSummary(zone,
InputCount(),
6187 case kUnboxedDouble:
6200 case kUnboxedDouble:
6211 case kUnboxedInt32: {
6215 __ fmovsr(to_reg, from_reg);
6218 case kUnboxedFloat: {
6222 __ fmovrs(to_reg, from_reg);
6225 case kUnboxedInt64: {
6229 __ fmovdr(to_reg, from_reg);
6232 case kUnboxedDouble: {
6236 __ fmovrd(to_reg, from_reg);
6254 if (entry !=
nullptr) {
6255 if (!
compiler->CanFallThroughTo(entry)) {
6256 FATAL(
"Checked function entry must have no offset");
6260 if (!
compiler->CanFallThroughTo(entry)) {
6272 if (FLAG_reorder_basic_blocks) {
6278 InstructionSource());
6293 const intptr_t kNumInputs = 1;
6294 const intptr_t kNumTemps = 2;
6296 LocationSummary* summary =
new (zone)
6312 __ LoadObject(offset_reg, offsets_);
6313 const auto element_address =
__ ElementAddressForRegIndex(
6314 false, kTypedDataInt32ArrayCid,
6316 false, offset_reg, index_reg,
TMP);
6320 const intptr_t entry_offset =
__ CodeSize();
6322 __ adr(target_address_reg, compiler::Immediate(-entry_offset));
6324 __ adr(target_address_reg, compiler::Immediate(0));
6325 __ AddImmediate(target_address_reg, -entry_offset);
6328 __ add(target_address_reg, target_address_reg, compiler::Operand(offset_reg));
6331 __ br(target_address_reg);
6336 const intptr_t kNumInputs = 2;
6337 const intptr_t kNumTemps = 0;
6339 LocationSummary*
locs =
new (zone)
6346 LocationSummary*
locs =
new (zone)
6358Condition StrictCompareInstr::EmitComparisonCodeRegConstant(
6360 BranchLabels labels,
6362 const Object& obj) {
6363 Condition orig_cond = (kind() == Token::kEQ_STRICT) ?
EQ :
NE;
6366 CanUseCbzTbzForComparison(
compiler, reg, orig_cond, labels)) {
6376 compiler::Label is_true, is_false;
6377 BranchLabels labels = {&is_true, &is_false, &is_false};
6381 if (is_true.IsLinked() || is_false.IsLinked()) {
6383 EmitBranchOnCondition(
compiler, true_condition, labels);
6385 compiler::Label
done;
6403 BranchInstr* branch) {
6404 BranchLabels labels =
compiler->CreateBranchLabels(branch);
6407 EmitBranchOnCondition(
compiler, true_condition, labels);
6447 const intptr_t kNumInputs = (
type_arguments() !=
nullptr) ? 1 : 0;
6448 const intptr_t kNumTemps = 0;
6449 LocationSummary*
locs =
new (zone)
6461 TypeUsageInfo* type_usage_info =
compiler->thread()->type_usage_info();
6462 if (type_usage_info !=
nullptr) {
6469 compiler->GenerateStubCall(
source(), stub, UntaggedPcDescriptors::kOther,
6478 __ BranchLinkPatchable(StubCode::DebugStepCheck());
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
static void fail(const SkString &err)
static bool are_equal(skiatest::Reporter *reporter, const SkMatrix &a, const SkMatrix &b)
static int float_bits(float f)
static bool ok(int result)
#define ASSERT_EQUAL(expected, actual)
#define RELEASE_ASSERT(cond)
intptr_t num_context_variables() const
Value * type_arguments() const
const Class & cls() const
intptr_t num_context_variables() const
static intptr_t type_arguments_offset()
static intptr_t InstanceSize()
static constexpr bool IsValidLength(intptr_t len)
static intptr_t length_offset()
Token::Kind op_kind() const
bool can_overflow() const
Token::Kind op_kind() const
bool RightIsPowerOfTwoConstant() const
Range * right_range() const
Representation to() const
Representation from() const
ParallelMoveInstr * parallel_move() const
bool HasParallelMove() const
static const Bool & False()
static const Bool & True()
static void Allocate(FlowGraphCompiler *compiler, Instruction *instruction, const Class &cls, Register result, Register temp)
Representation from_representation() const
virtual bool ValueFitsSmi() const
ComparisonInstr * comparison() const
static constexpr Register kSecondReturnReg
static const Register ArgumentRegisters[]
static constexpr FpuRegister kReturnFpuReg
static constexpr Register kFfiAnyNonAbiRegister
static constexpr Register kReturnReg
static constexpr Register kSecondNonArgumentRegister
const RuntimeEntry & TargetFunction() const
bool IsDeoptIfNull() const
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr, TemplateInstruction, FIELD_LIST) private void EmitBitTest(FlowGraphCompiler *compiler, intptr_t min, intptr_t max, intptr_t mask, compiler::Label *deopt)
void EmitNullCheck(FlowGraphCompiler *compiler, compiler::Label *deopt)
bool IsDeoptIfNotNull() const
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
intptr_t loop_depth() const
virtual void EmitBranchCode(FlowGraphCompiler *compiler, BranchInstr *branch)
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual Condition EmitComparisonCode(FlowGraphCompiler *compiler, BranchLabels labels)=0
const Object & value() const
void EmitMoveToLocation(FlowGraphCompiler *compiler, const Location &destination, Register tmp=kNoRegister, intptr_t pair_index=0)
static intptr_t num_variables_offset()
static intptr_t InstanceSize()
Value * type_arguments() const
virtual Value * num_elements() const
virtual Representation representation() const
static constexpr intptr_t kNone
MethodRecognizer::Kind op_kind() const
MethodRecognizer::Kind recognized_kind() const
bool is_null_aware() const
void EmitReturnMoves(FlowGraphCompiler *compiler, const Register temp0, const Register temp1)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(FfiCallInstr, VariadicDefinition, FIELD_LIST) private void EmitParamMoves(FlowGraphCompiler *compiler, const Register saved_fp, const Register temp0, const Register temp1)
intptr_t TargetAddressIndex() const
static intptr_t guarded_cid_offset()
static intptr_t guarded_list_length_in_object_offset_offset()
intptr_t guarded_cid() const
static intptr_t is_nullable_offset()
static intptr_t guarded_list_length_offset()
ParallelMoveInstr * parallel_move() const
BlockEntryInstr * block() const
bool HasParallelMove() const
JoinEntryInstr * successor() const
FunctionEntryInstr * normal_entry() const
OsrEntryInstr * osr_entry() const
const Field & field() const
ComparisonInstr * comparison() const
virtual Representation RequiredInputRepresentation(intptr_t idx) const
const AbstractType & type() const
intptr_t GetDeoptId() const
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Environment * env() const
virtual LocationSummary * MakeLocationSummary(Zone *zone, bool is_optimizing) const =0
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
void InitializeLocationSummary(Zone *zone, bool optimizing)
virtual Representation representation() const
bool CanDeoptimize() const
friend class BlockEntryInstr
InstructionSource source() const
intptr_t deopt_id() const
static bool SlowPathSharingSupported(bool is_optimizing)
Instruction * previous() const
static LocationSummary * MakeCallSummary(Zone *zone, const Instruction *instr, LocationSummary *locs=nullptr)
bool is_truncating() const
Representation to() const
Representation from() const
const RuntimeEntry & TargetFunction() const
MethodRecognizer::Kind recognized_kind() const
ObjectStore * object_store() const
static IsolateGroup * Current()
intptr_t TargetAddressIndex() const
void EmitParamMoves(FlowGraphCompiler *compiler, Register saved_fp, Register temp0)
LocationSummary * MakeLocationSummaryInternal(Zone *zone, const RegList temps) const
intptr_t index_scale() const
bool can_pack_into_smi() const
intptr_t element_count() const
intptr_t class_id() const
intptr_t class_id() const
intptr_t index_scale() const
Representation representation() const
virtual Representation RequiredInputRepresentation(intptr_t index) const
Register base_reg() const
virtual Representation representation() const
const LocalVariable & local() const
Location temp(intptr_t index) const
Location out(intptr_t index) const
static LocationSummary * Make(Zone *zone, intptr_t input_count, Location out, ContainsCall contains_call)
void set_temp(intptr_t index, Location loc)
RegisterSet * live_registers()
void set_out(intptr_t index, Location loc)
bool always_calls() const
Location in(intptr_t index) const
void set_in(intptr_t index, Location loc)
static Location NoLocation()
static Location SameAsFirstInput()
static Location Pair(Location first, Location second)
static Location FpuRegisterLocation(FpuRegister reg)
static Location WritableRegister()
static Location RegisterLocation(Register reg)
PairLocation * AsPairLocation() const
static Location RequiresRegister()
static Location RequiresFpuRegister()
FpuRegister fpu_reg() const
const Object & constant() const
static Location Constant(const ConstantInstr *obj, int pair_index=0)
intptr_t result_cid() const
MethodRecognizer::Kind op_kind() const
bool unboxed_inputs() const
Value * src_start() const
void EmitLoopCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, Register length_reg, compiler::Label *done, compiler::Label *copy_forwards=nullptr)
void PrepareLengthRegForLoop(FlowGraphCompiler *compiler, Register length_reg, compiler::Label *done)
Value * dest_start() const
static intptr_t value_offset()
virtual Representation representation() const
MoveArgumentInstr(Value *value, Representation representation, Location location)
static int ComputeArgcTag(const Function &function)
bool is_auto_scope() const
bool is_bootstrap_native() const
const Function & function() const
NativeFunction native_c_function() const
static constexpr intptr_t kVMTagOffsetFromFp
static uword LinkNativeCallEntry()
static Object & ZoneHandle()
Value * char_code() const
static intptr_t data_offset()
Location At(intptr_t i) const
static bool IsNegative(Range *range)
static bool Overlaps(Range *range, intptr_t min, intptr_t max)
static bool OnlyLessThanOrEqualTo(Range *range, intptr_t value)
static bool IsWithin(const Range *range, int64_t min, int64_t max)
static bool IsPositive(Range *range)
static bool CanBeZero(Range *range)
intptr_t FpuRegisterCount() const
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ShiftIntegerOpInstr, BinaryIntegerOpInstr, FIELD_LIST) protected bool IsShiftCountInRange(int64_t max=kShiftCountLimit) const
Range * shift_range() const
static constexpr intptr_t kBits
static SmiPtr New(intptr_t value)
static constexpr intptr_t kMaxValue
static intptr_t RawValue(intptr_t value)
const char * message() const
bool ShouldEmitStoreBarrier() const
virtual Representation RequiredInputRepresentation(intptr_t idx) const
intptr_t class_id() const
intptr_t index_scale() const
const LocalVariable & local() const
const Field & field() const
bool needs_number_check() const
static intptr_t length_offset()
static CodePtr GetAllocationStubForClass(const Class &cls)
static constexpr int kNullCharCodeSymbolOffset
intptr_t ArgumentCount() const
ArrayPtr GetArgumentsDescriptor() const
virtual intptr_t InputCount() const
const ZoneGrowableArray< intptr_t > & cid_results() const
static uword stack_overflow_shared_stub_entry_point_offset(bool fpu_regs)
static intptr_t stack_overflow_flags_offset()
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual Representation representation() const
Token::Kind op_kind() const
Token::Kind op_kind() const
virtual Representation representation() const
bool is_truncating() const
virtual Representation representation() const
bool IsScanFlagsUnboxed() const
static bool IsInt(intptr_t N, T value)
static void CalculateMagicAndShiftForDivRem(int64_t divisor, int64_t *magic, int64_t *shift)
static constexpr T Maximum(T x, T y)
static constexpr int ShiftForPowerOfTwo(T x)
static int CountTrailingZeros64(uint64_t x)
static T Minimum(T x, T y)
static T AddWithWrapAround(T a, T b)
static constexpr int CountOneBits64(uint64_t x)
static constexpr size_t HighestBit(int64_t v)
static constexpr bool IsPowerOfTwo(T x)
bool BindsToConstant() const
Definition * definition() const
Value(Definition *definition)
intptr_t InputCount() const
void static bool EmittingComments()
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool *needs_base=nullptr)
intptr_t StackTopInBytes() const
static word data_offset()
static word entry_point_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word OffsetOf(const dart::Field &field)
static word code_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word data_offset()
static word object_store_offset()
static word ffi_callback_code_offset()
static word tags_offset()
static word data_offset()
static word unboxed_runtime_arg_offset()
static word shared_field_table_values_offset()
static uword vm_tag_dart_id()
static word isolate_group_offset()
static word field_table_values_offset()
static word stack_limit_offset()
static word exit_through_ffi_offset()
static word invoke_dart_code_stub_offset()
static word saved_shadow_call_stack_offset()
static word top_exit_frame_info_offset()
static word vm_tag_offset()
static word top_resource_offset()
static word data_offset()
static word payload_offset()
static const word kImmutableBit
static const word kClassIdTagSize
FlutterSemanticsFlag flag
FlutterSemanticsFlag flags
Dart_NativeFunction function
static float max(float r, float g, float b)
static float min(float r, float g, float b)
#define CASE(Arity, Mask, Name, Args, Result)
#define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name)
const intptr_t kResultIndex
word ToRawSmi(const dart::Object &a)
intptr_t FrameOffsetInBytesForVariable(const LocalVariable *variable)
static constexpr intptr_t kWordSize
static constexpr intptr_t kCompressedWordSize
constexpr intptr_t kSmiBits
const Object & NullObject()
constexpr OperandSize kWordBytes
bool HasIntegerValue(const dart::Object &object, int64_t *value)
static intptr_t chunk_size(intptr_t bytes_left)
Location LocationAnyOrConstant(Value *value)
Location LocationRegisterOrConstant(Value *value)
const Register kWriteBarrierSlotReg
constexpr bool IsAbiPreservedRegister(Register reg)
static Condition InvertCondition(Condition c)
const RegList kAbiVolatileCpuRegs
static constexpr int kSavedCallerPcSlotFromFp
bool IsTypedDataBaseClassId(intptr_t index)
constexpr intptr_t kBitsPerWord
const Register kExceptionObjectReg
const Register kWriteBarrierObjectReg
static constexpr intptr_t kBoolVsNullBitPosition
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
constexpr intptr_t kIntptrMin
static const ClassId kLastErrorCid
static const ClassId kFirstErrorCid
void RegisterTypeArgumentsUse(const Function &function, TypeUsageInfo *type_usage_info, const Class &klass, Definition *type_arguments)
static constexpr int kParamEndSlotFromFp
const Register ARGS_DESC_REG
bool IsClampedTypedDataBaseClassId(intptr_t index)
Location LocationFixedRegisterOrConstant(Value *value, Register reg)
const int kNumberOfFpuRegisters
Location LocationWritableRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
bool IsExternalPayloadClassId(classid_t cid)
constexpr RegList kDartAvailableCpuRegs
static constexpr intptr_t kCompressedWordSize
DEFINE_BACKEND(LoadThread,(Register out))
static constexpr int kPcMarkerSlotFromFp
const Register FUNCTION_REG
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
constexpr intptr_t kWordSize
static bool IsConstant(Definition *def, int64_t *val)
static constexpr Representation kUnboxedIntPtr
constexpr bool FLAG_target_memory_sanitizer
const Register kStackTraceObjectReg
static int8_t data[kExtLength]
const RegList kAbiVolatileFpuRegs
Location LocationRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
constexpr intptr_t kBitsPerInt64
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
void Flush(SkSurface *surface)
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTempReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kObjectReg
static constexpr Representation NativeRepresentation(Representation rep)
intptr_t first_local_from_fp
static constexpr intptr_t kBoolValueMask
static constexpr size_t ValueSize(Representation rep)
static constexpr bool IsUnboxedInteger(Representation rep)
static compiler::OperandSize OperandSize(Representation rep)
static constexpr bool IsUnboxed(Representation rep)
static bool IsUnsignedInteger(Representation rep)
static Representation RepresentationOfArrayElement(classid_t cid)
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
@ kResetToBootstrapNative