6#if defined(TARGET_ARCH_ARM)
30#define __ compiler->assembler()->
31#define Z (compiler->zone())
39 const Instruction* instr,
40 LocationSummary* locs) {
76 const intptr_t kNumInputs = 1;
77 const intptr_t kNumTemps = ((
representation() == kUnboxedDouble) ? 1 : 0);
78 LocationSummary*
locs =
new (zone)
115 case kUnboxedInt64: {
120 __ LoadFromOffset(out_lo, out_hi,
offset());
124 case kUnboxedDouble: {
139 ASSERT(instr->RequiredInputRepresentation(
141 __ add(
TMP, instr->base_reg(), compiler::Operand(index,
LSL, 1));
142 __ str(
value, compiler::Address(
TMP, instr->offset()));
150 Fixed<Register, ARGS_DESC_REG>,
151 Temp<Register> temp)) {
152 compiler->EmitTailCallToStub(instr->code());
158 __ set_constant_pool_allowed(
true);
165static constexpr intptr_t kMaxMemoryCopyElementSize =
168static constexpr intptr_t kMemoryCopyPayloadTemps = 2;
178 const intptr_t kNumInputs = 5;
179 const intptr_t kNumTemps =
180 kMemoryCopyPayloadTemps +
181 (element_size_ >= kMaxMemoryCopyElementSize ? 1 : 0);
182 LocationSummary*
locs =
new (zone)
190 for (intptr_t
i = 0;
i < kNumTemps;
i++) {
199 intptr_t num_elements,
201 const intptr_t num_bytes = num_elements * element_size_;
203 const intptr_t mov_size =
205 const intptr_t mov_repeat = num_bytes / mov_size;
206 ASSERT(num_bytes % mov_size == 0);
211 if (mov_size == kMaxMemoryCopyElementSize) {
221 __ AddImmediate(src_reg, num_bytes);
222 __ AddImmediate(dest_reg, num_bytes);
224 for (intptr_t
i = 0;
i < mov_repeat;
i++) {
225 __ ldm(block_mode, src_reg, temp_regs);
226 __ stm(block_mode, dest_reg, temp_regs);
231 for (intptr_t
i = 0;
i < mov_repeat;
i++) {
232 const intptr_t byte_index =
233 (reversed ? mov_repeat - (
i + 1) :
i) * mov_size;
236 __ ldrb(
TMP, compiler::Address(src_reg, byte_index));
237 __ strb(
TMP, compiler::Address(dest_reg, byte_index));
240 __ ldrh(
TMP, compiler::Address(src_reg, byte_index));
241 __ strh(
TMP, compiler::Address(dest_reg, byte_index));
244 __ ldr(
TMP, compiler::Address(src_reg, byte_index));
245 __ str(
TMP, compiler::Address(dest_reg, byte_index));
255 compiler::Label*
done) {
256 __ BranchIfZero(length_reg,
done);
276static void CopyUpToWordMultiple(FlowGraphCompiler*
compiler,
283 compiler::Label*
done) {
288 const intptr_t base_shift =
292 intptr_t tested_bits = 0;
294 __ Comment(
"Copying until region is a multiple of word size");
298 const intptr_t bytes = 1 << bit;
299 const intptr_t tested_bit = bit + base_shift;
300 tested_bits |= (1 << tested_bit);
301 __ tst(length_reg, compiler::Operand(1 << tested_bit));
302 auto const sz = OperandSizeFor(bytes);
307 __ bics(length_reg, length_reg, compiler::Operand(tested_bits));
315 compiler::Label*
done,
316 compiler::Label* copy_forwards) {
317 const bool reversed = copy_forwards !=
nullptr;
324 __ add(src_reg, src_reg, compiler::Operand(length_reg,
ASR, -shift));
326 __ add(src_reg, src_reg, compiler::Operand(length_reg,
LSL, shift));
328 __ CompareRegisters(dest_reg, src_reg);
332 __ sub(src_reg, src_reg, compiler::Operand(length_reg,
ASR, -shift),
335 __ sub(src_reg, src_reg, compiler::Operand(length_reg,
LSL, shift),
341 __ add(dest_reg, dest_reg, compiler::Operand(length_reg,
ASR, -shift));
343 __ add(dest_reg, dest_reg, compiler::Operand(length_reg,
LSL, shift));
349 CopyUpToWordMultiple(
compiler, dest_reg, src_reg, length_reg, element_size_,
350 unboxed_inputs_, reversed,
done);
353 const auto load_mode =
355 const auto load_multiple_mode =
359 const intptr_t loop_subtract =
367 __ Comment(
"Copying by multiples of word size");
368 compiler::Label loop;
370 switch (element_size_) {
375 __ ldr(
TMP, compiler::Address(src_reg, 4, load_mode));
376 __ str(
TMP, compiler::Address(dest_reg, 4, load_mode));
381 __ ldm(load_multiple_mode, src_reg, temp_regs);
382 __ stm(load_multiple_mode, dest_reg, temp_regs);
387 __ ldm(load_multiple_mode, src_reg, temp_regs);
388 __ stm(load_multiple_mode, dest_reg, temp_regs);
389 __ ldm(load_multiple_mode, src_reg, temp_regs);
390 __ stm(load_multiple_mode, dest_reg, temp_regs);
396 __ subs(length_reg, length_reg, compiler::Operand(loop_subtract));
400void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler*
compiler,
407 if (array_rep != kTagged) {
418 case kOneByteStringCid:
422 case kTwoByteStringCid:
431 ASSERT(start_loc.IsRegister() || start_loc.IsConstant());
432 if (start_loc.IsConstant()) {
433 const auto& constant = start_loc.constant();
434 ASSERT(constant.IsInteger());
435 const int64_t start_value = Integer::Cast(constant).AsInt64Value();
437 Utils::MulWithWrapAround<intptr_t>(start_value, element_size_),
offset);
438 __ AddImmediate(payload_reg, array_reg, add_value);
441 const Register start_reg = start_loc.reg();
445 __ add(payload_reg, array_reg, compiler::Operand(start_reg,
ASR, -shift));
447 __ add(payload_reg, array_reg, compiler::Operand(start_reg,
LSL, shift));
449 __ AddImmediate(payload_reg,
offset);
454 const intptr_t kNumInputs = 1;
455 const intptr_t kNumTemps = 0;
456 LocationSummary*
locs =
new (zone)
471class ArgumentsMover :
public ValueObject {
475 if (pending_regs_ != 0) {
476 if (is_single_register_) {
478 lowest_register_,
SP,
481 if (lowest_register_sp_relative_index_ == 0) {
482 __ stm(
IA,
SP, pending_regs_);
487 if (((1 << reg) & pending_regs_) != 0) {
496 is_single_register_ =
false;
501 void MoveRegister(FlowGraphCompiler*
compiler,
502 intptr_t sp_relative_index,
504 if (pending_regs_ != 0) {
508 if (reg < lowest_register_) {
509 ASSERT((sp_relative_index + 1) == lowest_register_sp_relative_index_);
510 pending_regs_ |= (1 << reg);
511 lowest_register_ = reg;
512 is_single_register_ =
false;
513 lowest_register_sp_relative_index_ = sp_relative_index;
518 pending_regs_ = (1 << reg);
519 lowest_register_ = reg;
520 is_single_register_ =
true;
521 lowest_register_sp_relative_index_ = sp_relative_index;
526 Instruction* move_arg) {
532 for (Instruction* instr = move_arg;; instr = instr->next()) {
534 if (ParallelMoveInstr* parallel_move = instr->AsParallelMove()) {
535 for (intptr_t
i = 0, n = parallel_move->NumMoves();
i < n; ++
i) {
536 const auto src_loc = parallel_move->MoveOperandsAt(
i)->src();
537 if (src_loc.IsRegister()) {
538 busy |= (1 << src_loc.reg());
539 }
else if (src_loc.IsPairLocation()) {
540 busy |= (1 << src_loc.AsPairLocation()->At(0).reg());
541 busy |= (1 << src_loc.AsPairLocation()->At(1).reg());
545 ASSERT(instr->IsMoveArgument() || (instr->ArgumentCount() > 0));
546 for (intptr_t
i = 0, n = instr->locs()->input_count();
i < n; ++
i) {
547 const auto in_loc = instr->locs()->in(
i);
548 if (in_loc.IsRegister()) {
549 busy |= (1 << in_loc.reg());
550 }
else if (in_loc.IsPairLocation()) {
551 const auto pair_location = in_loc.AsPairLocation();
552 busy |= (1 << pair_location->At(0).reg());
553 busy |= (1 << pair_location->At(1).reg());
556 if (instr->ArgumentCount() > 0) {
561 if (pending_regs_ != 0) {
564 Register reg = HighestAvailableRegister(busy, lowest_register_);
575 "LR should not be allocatable");
583 intptr_t lowest_register_sp_relative_index_ = -1;
584 bool is_single_register_ =
false;
587 for (intptr_t
i = upper_bound - 1;
i >= 0; --
i) {
588 if ((busy & (1 <<
i)) == 0) {
603 ArgumentsMover pusher;
605 move_arg = move_arg->next()->AsMoveArgument()) {
607 if (
value.IsRegister()) {
608 pusher.MoveRegister(
compiler, move_arg->location().stack_index(),
610 }
else if (
value.IsPairLocation()) {
612 auto pair = move_arg->location().AsPairLocation();
615 pusher.MoveRegister(
compiler, pair->At(1).stack_index(),
616 value.AsPairLocation()->At(1).reg());
617 pusher.MoveRegister(
compiler, pair->At(0).stack_index(),
618 value.AsPairLocation()->At(0).reg());
619 }
else if (
value.IsFpuRegister()) {
627 if (
value.IsConstant()) {
628 __ LoadObject(reg,
value.constant());
631 const intptr_t value_offset =
value.ToStackSlotOffset();
632 __ LoadFromOffset(reg,
value.base_reg(), value_offset);
634 pusher.MoveRegister(
compiler, move_arg->location().stack_index(), reg);
642 const intptr_t kNumInputs = 1;
643 const intptr_t kNumTemps = 0;
644 LocationSummary*
locs =
new (zone)
674 if (
locs()->in(0).IsRegister()) {
677 }
else if (
locs()->in(0).IsPairLocation()) {
688 if (
compiler->parsed_function().function().IsAsyncFunction() ||
689 compiler->parsed_function().function().IsAsyncGenerator()) {
691 const Code& stub = GetReturnStub(
compiler);
696 if (!
compiler->flow_graph().graph_entry()->NeedsFrame()) {
702 compiler::Label stack_ok;
703 __ Comment(
"Stack Check");
704 const intptr_t fp_sp_dist =
709 __ sub(
R2,
SP, compiler::Operand(
FP));
710 __ CompareImmediate(
R2, fp_sp_dist);
716 __ LeaveDartFrameAndReturn();
719 __ set_constant_pool_allowed(
true);
723static bool IsPowerOfTwoKind(intptr_t v1, intptr_t
v2) {
747 BranchLabels labels = {
nullptr,
nullptr,
nullptr};
751 const bool is_power_of_two_kind = IsPowerOfTwoKind(if_true_, if_false_);
753 intptr_t true_value = if_true_;
754 intptr_t false_value = if_false_;
756 if (is_power_of_two_kind) {
757 if (true_value == 0) {
762 if (true_value == 0) {
764 intptr_t temp = true_value;
765 true_value = false_value;
772 __ mov(
result, compiler::Operand(1), true_condition);
774 if (is_power_of_two_kind) {
775 const intptr_t shift =
783 if (false_value != 0) {
791 const intptr_t kNumInputs = 1;
792 const intptr_t kNumTemps = 0;
793 LocationSummary* summary =
new (zone)
803 const Array& arguments_descriptor =
807 if (FLAG_precompiled_mode) {
810 __ ldr(
R2, compiler::FieldAddress(
820 compiler::FieldAddress(
826 if (!FLAG_precompiled_mode) {
832 UntaggedPcDescriptors::kOther,
locs(),
env());
870 if (!
locs()->
out(0).IsInvalid()) {
879 intptr_t pair_index) {
880 if (destination.IsRegister()) {
885 if (value_.IsSmi() &&
891 __ LoadImmediate(destination.reg(), pair_index == 0
896 __ LoadObject(destination.reg(), value_);
898 }
else if (destination.IsFpuRegister()) {
903 Double::Cast(value_).value());
908 Double::Cast(value_).value(), tmp);
910 case kUnboxedFloat64x2:
911 __ LoadQImmediate(destination.fpu_reg(),
912 Float64x2::Cast(value_).value());
914 case kUnboxedFloat32x4:
915 __ LoadQImmediate(destination.fpu_reg(),
916 Float32x4::Cast(value_).value());
918 case kUnboxedInt32x4:
919 __ LoadQImmediate(destination.fpu_reg(), Int32x4::Cast(value_).value());
924 }
else if (destination.IsDoubleStackSlot()) {
926 __ LoadDImmediate(
DTMP, Double::Cast(value_).
value(), tmp);
927 const intptr_t dest_offset = destination.ToStackSlotOffset();
928 __ StoreDToOffset(
DTMP, destination.base_reg(), dest_offset);
929 }
else if (destination.IsQuadStackSlot()) {
931 case kUnboxedFloat64x2:
932 __ LoadQImmediate(
QTMP, Float64x2::Cast(value_).
value());
934 case kUnboxedFloat32x4:
935 __ LoadQImmediate(
QTMP, Float32x4::Cast(value_).
value());
937 case kUnboxedInt32x4:
938 __ LoadQImmediate(
QTMP, Int32x4::Cast(value_).
value());
943 const intptr_t dest_offset = destination.ToStackSlotOffset();
947 ASSERT(destination.IsStackSlot());
949 const intptr_t dest_offset = destination.ToStackSlotOffset();
958 bit_cast<int32_t, float>(Double::Cast(value_).
value());
962 __ LoadObject(tmp, value_);
964 __ StoreToOffset(tmp, destination.base_reg(), dest_offset);
970 const bool is_unboxed_int =
974 const intptr_t kNumInputs = 0;
975 const intptr_t kNumTemps = is_unboxed_int ? 0 : 1;
976 LocationSummary*
locs =
new (zone)
978 if (is_unboxed_int) {
981 ASSERT(representation_ == kUnboxedDouble);
992 if (!
locs()->
out(0).IsInvalid()) {
1001 auto const dst_type_loc =
1011 const intptr_t kNonChangeableInputRegs =
1023 const intptr_t kCpuRegistersToPreserve =
1025 const intptr_t kFpuRegistersToPreserve =
1034 LocationSummary* summary =
new (zone) LocationSummary(
1047 intptr_t next_temp = 0;
1049 const bool should_preserve = ((1 <<
i) & kCpuRegistersToPreserve) != 0;
1050 if (should_preserve) {
1051 summary->set_temp(next_temp++,
1057 const bool should_preserve = ((1 <<
i) & kFpuRegistersToPreserve) != 0;
1058 if (should_preserve) {
1070 auto object_store =
compiler->isolate_group()->object_store();
1071 const auto& assert_boolean_stub =
1074 compiler::Label
done;
1079 UntaggedPcDescriptors::kOther,
locs(),
1104static bool CanBePairOfImmediateOperands(
const dart::Object& constant,
1105 compiler::Operand* low,
1106 compiler::Operand* high) {
1115static bool CanBePairOfImmediateOperands(
Value*
value,
1116 compiler::Operand* low,
1117 compiler::Operand* high) {
1118 if (!
value->BindsToConstant()) {
1121 return CanBePairOfImmediateOperands(
value->BoundConstant(), low, high);
1126 const intptr_t kNumInputs = 2;
1128 const intptr_t kNumTemps = 1;
1129 LocationSummary*
locs =
new (zone)
1137 if (operation_cid() == kMintCid) {
1138 compiler::Operand o;
1139 const intptr_t kNumTemps = 0;
1140 LocationSummary*
locs =
new (zone)
1142 if (CanBePairOfImmediateOperands(
left(), &o, &o)) {
1146 }
else if (CanBePairOfImmediateOperands(
right(), &o, &o)) {
1159 if (operation_cid() == kDoubleCid) {
1160 const intptr_t kNumTemps = 0;
1161 LocationSummary*
locs =
new (zone)
1168 if (operation_cid() == kSmiCid || operation_cid() == kIntegerCid) {
1169 const intptr_t kNumTemps = 0;
1170 LocationSummary*
locs =
new (zone)
1185static void LoadValueCid(FlowGraphCompiler*
compiler,
1188 compiler::Label* value_is_smi =
nullptr) {
1189 if (value_is_smi ==
nullptr) {
1190 __ mov(value_cid_reg, compiler::Operand(kSmiCid));
1193 if (value_is_smi ==
nullptr) {
1194 __ LoadClassId(value_cid_reg, value_reg,
NE);
1196 __ b(value_is_smi,
EQ);
1197 __ LoadClassId(value_cid_reg, value_reg);
1202 switch (condition) {
1229static void EmitBranchOnCondition(FlowGraphCompiler*
compiler,
1231 BranchLabels labels) {
1232 if (labels.fall_through == labels.false_label) {
1234 __ b(labels.true_label, true_condition);
1238 __ b(labels.false_label, false_condition);
1241 if (labels.fall_through != labels.true_label) {
1242 __ b(labels.true_label);
1248 LocationSummary* locs,
1254 Condition true_condition = TokenKindToIntCondition(kind);
1256 if (
left.IsConstant()) {
1258 true_condition = FlipCondition(true_condition);
1259 }
else if (
right.IsConstant()) {
1262 __ cmp(
left.reg(), compiler::Operand(
right.reg()));
1264 return true_condition;
1268 LocationSummary* locs,
1274 Condition true_condition = TokenKindToIntCondition(kind);
1276 if (
left.IsConstant()) {
1277 __ CompareImmediate(
1279 static_cast<uword>(Integer::Cast(
left.constant()).AsInt64Value()));
1280 true_condition = FlipCondition(true_condition);
1281 }
else if (
right.IsConstant()) {
1282 __ CompareImmediate(
1284 static_cast<uword>(Integer::Cast(
right.constant()).AsInt64Value()));
1286 __ cmp(
left.reg(), compiler::Operand(
right.reg()));
1288 return true_condition;
1292 LocationSummary* locs,
1295 PairLocation* left_pair;
1296 compiler::Operand right_lo, right_hi;
1297 if (locs->in(0).IsConstant()) {
1298 const bool ok = CanBePairOfImmediateOperands(locs->in(0).constant(),
1299 &right_lo, &right_hi);
1301 left_pair = locs->in(1).AsPairLocation();
1302 }
else if (locs->in(1).IsConstant()) {
1303 const bool ok = CanBePairOfImmediateOperands(locs->in(1).constant(),
1304 &right_lo, &right_hi);
1306 left_pair = locs->in(0).AsPairLocation();
1308 left_pair = locs->in(0).AsPairLocation();
1309 PairLocation* right_pair = locs->in(1).AsPairLocation();
1310 right_lo = compiler::Operand(right_pair->At(0).reg());
1311 right_hi = compiler::Operand(right_pair->At(1).reg());
1313 Register left_lo = left_pair->At(0).reg();
1314 Register left_hi = left_pair->At(1).reg();
1317 __ cmp(left_lo, right_lo);
1319 __ cmp(left_hi, right_hi,
EQ);
1320 return TokenKindToIntCondition(kind);
1324 LocationSummary* locs,
1326 BranchLabels labels) {
1327 PairLocation* left_pair;
1328 compiler::Operand right_lo, right_hi;
1329 Condition true_condition = TokenKindToIntCondition(kind);
1330 if (locs->in(0).IsConstant()) {
1331 const bool ok = CanBePairOfImmediateOperands(locs->in(0).constant(),
1332 &right_lo, &right_hi);
1334 left_pair = locs->in(1).AsPairLocation();
1335 true_condition = FlipCondition(true_condition);
1336 }
else if (locs->in(1).IsConstant()) {
1337 const bool ok = CanBePairOfImmediateOperands(locs->in(1).constant(),
1338 &right_lo, &right_hi);
1340 left_pair = locs->in(0).AsPairLocation();
1342 left_pair = locs->in(0).AsPairLocation();
1343 PairLocation* right_pair = locs->in(1).AsPairLocation();
1344 right_lo = compiler::Operand(right_pair->At(0).reg());
1345 right_hi = compiler::Operand(right_pair->At(1).reg());
1347 Register left_lo = left_pair->At(0).reg();
1348 Register left_hi = left_pair->At(1).reg();
1352 switch (true_condition) {
1371 hi_cond = lo_cond =
VS;
1374 __ cmp(left_hi, right_hi);
1375 __ b(labels.true_label, hi_cond);
1376 __ b(labels.false_label, FlipCondition(hi_cond));
1379 __ cmp(left_lo, right_lo);
1384 LocationSummary* locs,
1386 BranchLabels labels) {
1387 ASSERT((kind == Token::kEQ) || (kind == Token::kNE));
1390 const Register temp = locs->temp(0).reg();
1391 const Condition true_condition = TokenKindToIntCondition(kind);
1392 compiler::Label* equal_result =
1393 (true_condition ==
EQ) ? labels.true_label : labels.false_label;
1394 compiler::Label* not_equal_result =
1395 (true_condition ==
EQ) ? labels.false_label : labels.true_label;
1399 __ cmp(left, compiler::Operand(right));
1400 __ b(equal_result,
EQ);
1401 __ and_(temp, left, compiler::Operand(right));
1402 __ BranchIfSmi(temp, not_equal_result);
1403 __ CompareClassId(left, kMintCid, temp);
1404 __ b(not_equal_result,
NE);
1405 __ CompareClassId(right, kMintCid, temp);
1406 __ b(not_equal_result,
NE);
1409 __ cmp(temp, compiler::Operand(
TMP));
1410 __ LoadFieldFromOffset(
1414 __ LoadFieldFromOffset(
1418 __ cmp(temp, compiler::Operand(
TMP),
EQ);
1419 return true_condition;
1443 LocationSummary* locs,
1444 BranchLabels labels,
1453 __ vcmpd(dleft, dright);
1457 __ vcmpd(dleft, dright);
1461 __ vcmpd(dright, dleft);
1465 __ vcmpd(dleft, dright);
1469 __ vcmpd(dright, dleft);
1473 __ vcmpd(dleft, dright);
1483 BranchLabels labels) {
1485 ASSERT(operation_cid() == kMintCid);
1486 return EmitNullAwareInt64ComparisonOp(
compiler,
locs(), kind(), labels);
1488 if (operation_cid() == kSmiCid) {
1490 }
else if (operation_cid() == kIntegerCid) {
1492 }
else if (operation_cid() == kMintCid) {
1493 return EmitUnboxedMintEqualityOp(
compiler,
locs(), kind());
1495 ASSERT(operation_cid() == kDoubleCid);
1496 return EmitDoubleComparisonOp(
compiler,
locs(), labels, kind());
1502 const intptr_t kNumInputs = 2;
1503 const intptr_t kNumTemps = 0;
1504 LocationSummary*
locs =
new (zone)
1515 BranchLabels labels) {
1518 if (
right.IsConstant()) {
1519 __ TestImmediate(left,
static_cast<int32_t
>(ComputeImmediateMask()));
1521 __ tst(left, compiler::Operand(
right.reg()));
1523 Condition true_condition = (kind() == Token::kNE) ?
NE :
EQ;
1524 return true_condition;
1529 const intptr_t kNumInputs = 1;
1530 const intptr_t kNumTemps = 1;
1531 LocationSummary*
locs =
new (zone)
1540 BranchLabels labels) {
1541 ASSERT((kind() == Token::kIS) || (kind() == Token::kISNOT));
1545 compiler::Label* deopt =
1550 const intptr_t true_result = (kind() == Token::kIS) ? 1 : 0;
1555 __ b(
result ? labels.true_label : labels.false_label,
EQ);
1556 __ LoadClassId(cid_reg, val_reg);
1558 for (intptr_t
i = 2;
i <
data.length();
i += 2) {
1559 const intptr_t test_cid =
data[
i];
1560 ASSERT(test_cid != kSmiCid);
1562 __ CompareImmediate(cid_reg, test_cid);
1563 __ b(
result ? labels.true_label : labels.false_label,
EQ);
1566 if (deopt ==
nullptr) {
1570 compiler::Label*
target =
result ? labels.false_label : labels.true_label;
1571 if (
target != labels.fall_through) {
1584 const intptr_t kNumInputs = 2;
1585 const intptr_t kNumTemps = 0;
1586 if (operation_cid() == kMintCid) {
1587 compiler::Operand o;
1588 const intptr_t kNumTemps = 0;
1589 LocationSummary*
locs =
new (zone)
1591 if (CanBePairOfImmediateOperands(
left(), &o, &o)) {
1595 }
else if (CanBePairOfImmediateOperands(
right(), &o, &o)) {
1608 if (operation_cid() == kDoubleCid) {
1609 LocationSummary* summary =
new (zone)
1616 ASSERT(operation_cid() == kSmiCid);
1617 LocationSummary* summary =
new (zone)
1622 summary->set_in(1, summary->in(0).IsConstant()
1630 BranchLabels labels) {
1631 if (operation_cid() == kSmiCid) {
1633 }
else if (operation_cid() == kMintCid) {
1634 return EmitUnboxedMintComparisonOp(
compiler,
locs(), kind(), labels);
1636 ASSERT(operation_cid() == kDoubleCid);
1637 return EmitDoubleComparisonOp(
compiler,
locs(), labels, kind());
1657 stub = &StubCode::CallBootstrapNative();
1662 stub = &StubCode::CallBootstrapNative();
1664 stub = &StubCode::CallAutoScopeNative();
1666 stub = &StubCode::CallNoScopeNative();
1669 __ LoadImmediate(
R1, argc_tag);
1670 compiler::ExternalLabel label(entry);
1671 __ LoadNativeEntry(
R9, &label,
1677 source(), *stub, UntaggedPcDescriptors::kOther,
locs(),
1682 compiler->GenerateNonLazyDeoptableStubCall(
1683 source(), *stub, UntaggedPcDescriptors::kOther,
locs(),
1691#define R(r) (1 << r)
1694 bool is_optimizing)
const {
1697 return MakeLocationSummaryInternal(
1698 zone, is_optimizing,
1720 __ mov(saved_fp_or_sp,
1721 is_leaf_ ? compiler::Operand(
SPREG) : compiler::Operand(
FPREG));
1725 __ PushImmediate(0);
1728 __ LoadObject(
CODE_REG, Object::null_object());
1729 __ set_constant_pool_allowed(
false);
1730 __ EnterDartFrame(0,
false);
1734 __ ReserveAlignedFrameSpace(marshaller_.RequiredStackSpaceInBytes());
1742 __ Comment(is_leaf_ ?
"Leaf Call" :
"Call");
1746#if !defined(PRODUCT)
1756#if !defined(PRODUCT)
1759 __ LoadImmediate(temp1, 0);
1760 __ StoreToOffset(temp1,
THR,
1766 __ mov(temp1, compiler::Operand(
PC));
1774 UntaggedPcDescriptors::Kind::kOther,
locs(),
1782 THR, compiler::target::Thread::
1783 call_native_through_safepoint_entry_point_offset()));
1788 "NOTFP should be a reserved register");
1792 __ Comment(
"Check Dart_Handle for Error.");
1793 compiler::Label not_error;
1799 __ BranchIfSmi(temp1, ¬_error);
1800 __ LoadClassId(temp1, temp1);
1805 __ Comment(
"Slow path: call Dart_PropagateError through stub.");
1810 THR, compiler::target::Thread::
1811 call_native_through_safepoint_entry_point_offset()));
1812 __ ldr(branch, compiler::Address(
1813 THR, kPropagateErrorRuntimeEntry.OffsetFromThread()));
1825 if (FLAG_precompiled_mode) {
1826 __ SetupGlobalPoolAndDispatchTable();
1834 __ mov(
SPREG, compiler::Operand(saved_fp_or_sp));
1837 __ LeaveDartFrame();
1838 __ set_constant_pool_allowed(
true);
1841 __ PopRegister(temp1);
1854 __ LeaveDartFrame();
1862 const Register old_exit_through_ffi_reg =
R4;
1865 __ Pop(old_exit_frame_reg);
1866 __ Pop(old_exit_through_ffi_reg);
1875 __ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
1876 old_exit_through_ffi_reg, tmp,
1879 __ PopNativeCalleeSavedRegisters();
1881#if defined(DART_TARGET_OS_FUCHSIA) && defined(USING_SHADOW_CALL_STACK)
1886 RESTORES_LR_FROM_FRAME(
__ LeaveFrame(1 <<
LR | 1 <<
FP));
1889 RESTORES_LR_FROM_FRAME(
__ LeaveFrame(1 <<
LR | 1 <<
FP));
1894 __ set_constant_pool_allowed(
true);
1900 __ set_constant_pool_allowed(
false);
1906 SPILLS_LR_TO_FRAME(
__ EnterFrame((1 <<
FP) | (1 <<
LR), 0));
1913 SPILLS_LR_TO_FRAME(
__ EnterFrame((1 <<
FP) | (1 <<
LR), 0));
1916 __ PushImmediate(0);
1918#if defined(DART_TARGET_OS_FUCHSIA) && defined(USING_SHADOW_CALL_STACK)
1922 __ PushNativeCalleeSavedRegisters();
1930 const intptr_t top_resource_offset =
1932 __ LoadFromOffset(
R0,
THR, top_resource_offset);
1934 __ LoadImmediate(
R0, 0);
1935 __ StoreToOffset(
R0,
THR, top_resource_offset);
1947 __ EmitEntryFrameVerification(
R0);
1950 __ TransitionNativeToGenerated(
R0,
R1,
1959 const Function& target_function = marshaller_.dart_signature();
1960 const intptr_t callback_id = target_function.FfiCallbackId();
1962 __ LoadFromOffset(
R0,
R0,
1964 __ LoadFromOffset(
R0,
R0,
1966 __ LoadFieldFromOffset(
R0,
R0,
1975 if (FLAG_precompiled_mode) {
1976 __ SetupGlobalPoolAndDispatchTable();
1978 __ LoadImmediate(
PP, 0);
1989 __ LoadFieldFromOffset(
LR,
LR,
2001#define R(r) (1 << r)
2005 bool is_optimizing)
const {
2016 __ MoveRegister(saved_fp,
FPREG);
2018 const intptr_t frame_space = native_calling_convention_.
StackTopInBytes();
2019 __ EnterCFrame(frame_space);
2024 __ str(target_address,
2026 __ CallCFunction(target_address);
2027 __ LoadImmediate(temp0, VMTag::kDartTagId);
2037 const intptr_t kNumInputs = 1;
2061 const intptr_t kNumInputs = 1;
2067 ASSERT(cid_ == kOneByteStringCid);
2070 __ ldr(
result, compiler::FieldAddress(
2075 compiler::FieldAddress(
2083 const intptr_t kNumInputs = 5;
2084 const intptr_t kNumTemps = 0;
2085 LocationSummary* summary =
new (zone)
2103 const Register bytes_ptr_reg = start_reg;
2104 const Register bytes_end_reg = end_reg;
2105 const Register flags_reg = bytes_reg;
2107 const Register decoder_temp_reg = start_reg;
2108 const Register flags_temp_reg = end_reg;
2110 const intptr_t kSizeMask = 0x03;
2111 const intptr_t kFlagsMask = 0x3C;
2113 compiler::Label loop, loop_in;
2116 __ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
2120 table_reg, table_reg,
2124 __ add(bytes_ptr_reg, bytes_reg, compiler::Operand(start_reg));
2125 __ add(bytes_end_reg, bytes_reg, compiler::Operand(end_reg));
2128 __ LoadImmediate(size_reg, 0);
2129 __ LoadImmediate(flags_reg, 0);
2139 __ ldrb(temp_reg, compiler::Address(table_reg, temp_reg));
2140 __ orr(flags_reg, flags_reg, compiler::Operand(temp_reg));
2141 __ and_(temp_reg, temp_reg, compiler::Operand(kSizeMask));
2142 __ add(size_reg, size_reg, compiler::Operand(temp_reg));
2146 __ cmp(bytes_ptr_reg, compiler::Operand(bytes_end_reg));
2150 __ AndImmediate(flags_reg, flags_reg, kFlagsMask);
2152 __ SmiTag(flags_reg);
2156 if (decoder_location.IsStackSlot()) {
2158 decoder_reg = decoder_temp_reg;
2160 decoder_reg = decoder_location.reg();
2162 const auto scan_flags_field_offset = scan_flags_field_.offset_in_bytes();
2163 __ LoadFieldFromOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset);
2164 __ orr(flags_temp_reg, flags_temp_reg, compiler::Operand(flags_reg));
2165 __ StoreFieldToOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset);
2177 const bool directly_addressable =
aligned() && rep != kUnboxedInt64;
2178 const intptr_t kNumInputs = 2;
2179 intptr_t kNumTemps = 0;
2180 if (!directly_addressable) {
2182 if (rep == kUnboxedFloat || rep == kUnboxedDouble) {
2186 LocationSummary*
locs =
new (zone)
2190 const bool can_be_constant =
2202 if (rep == kUnboxedInt64) {
2209 if (rep == kUnboxedFloat) {
2219 if (!directly_addressable) {
2221 if (rep == kUnboxedFloat || rep == kUnboxedDouble) {
2232 const bool directly_addressable =
aligned() && rep != kUnboxedInt64;
2240 if (directly_addressable) {
2243 ?
__ ElementAddressForRegIndex(
true,
2247 :
__ ElementAddressForIntIndex(
2254 if (
index.IsRegister()) {
2255 __ LoadElementAddressForRegIndex(address,
2260 __ LoadElementAddressForIntIndex(
2269 if (rep == kUnboxedInt64) {
2270 ASSERT(!directly_addressable);
2273 const Register result_lo = result_pair->At(0).reg();
2274 const Register result_hi = result_pair->At(1).reg();
2276 __ ldr(result_lo, compiler::Address(address));
2280 __ LoadWordUnaligned(result_lo, address,
TMP);
2282 __ LoadWordUnaligned(result_hi, address,
TMP);
2290 case kUnboxedUint32:
2294 case kUnboxedUint16:
2295 __ LoadHalfWordUnsignedUnaligned(
result, address,
TMP);
2309 if (rep == kUnboxedFloat) {
2319 }
else if (rep == kUnboxedDouble) {
2322 __ vldrd(dresult0, element_address);
2326 __ vmovdr(dresult0, 0,
value);
2327 __ AddImmediate(address, address, 4);
2329 __ vmovdr(dresult0, 1,
value);
2332 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2333 rep == kUnboxedFloat64x2);
2334 ASSERT(element_address.Equals(compiler::Address(
IP)));
2336 __ vldmd(
IA,
IP, dresult0, 2);
2356 const bool directly_addressable =
2358 const intptr_t kNumInputs = 3;
2359 LocationSummary*
locs;
2361 intptr_t kNumTemps = 0;
2362 bool needs_base =
false;
2363 const bool can_be_constant =
2368 if (can_be_constant) {
2369 if (!directly_addressable) {
2371 }
else if (needs_base) {
2380 if (!directly_addressable) {
2390 for (intptr_t
i = 0;
i < kNumTemps;
i++) {
2395 if (rep == kUnboxedInt64) {
2398 }
else if (rep == kUnboxedInt8 || rep == kUnboxedUint8) {
2404 if (rep == kUnboxedFloat) {
2410 }
else if (
class_id() == kArrayCid) {
2427 const bool directly_addressable =
2439 if (directly_addressable) {
2442 ?
__ ElementAddressForRegIndex(
false,
2446 :
__ ElementAddressForIntIndex(
2451 if (
index.IsRegister()) {
2452 __ LoadElementAddressForRegIndex(temp,
2457 __ LoadElementAddressForIntIndex(
2466 ASSERT(rep == kUnboxedUint8);
2472 }
else if (
value < 0) {
2475 __ LoadImmediate(
IP,
static_cast<int8_t
>(
value));
2476 __ strb(
IP, element_address);
2480 __ LoadImmediate(
IP, 0xFF);
2484 __ mov(
IP, compiler::Operand(0),
LE);
2487 __ strb(
IP, element_address);
2490 if (rep == kUnboxedInt64) {
2491 ASSERT(!directly_addressable);
2494 Register value_lo = value_pair->At(0).reg();
2495 Register value_hi = value_pair->At(1).reg();
2497 __ str(value_lo, compiler::Address(temp));
2500 __ StoreWordUnaligned(value_lo, temp, temp2);
2502 __ StoreWordUnaligned(value_hi, temp, temp2);
2504 }
else if (rep == kUnboxedInt8 || rep == kUnboxedUint8) {
2506 __ LoadImmediate(
IP,
2508 __ strb(
IP, element_address);
2511 __ strb(
value, element_address);
2519 case kUnboxedUint32:
2521 __ StoreWordUnaligned(
value, temp, temp2);
2523 case kUnboxedUint16:
2525 __ StoreHalfWordUnaligned(
value, temp, temp2);
2534 if (rep == kUnboxedFloat) {
2538 __ vstrs(value_reg, element_address);
2545 }
else if (rep == kUnboxedDouble) {
2548 __ vstrd(value_reg, element_address);
2554 __ AddImmediate(address, address, 4);
2559 ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
2560 rep == kUnboxedFloat64x2);
2561 ASSERT(element_address.Equals(compiler::Address(
index.reg())));
2564 __ vstmd(
IA,
index.reg(), value_reg, 2);
2566 }
else if (
class_id() == kArrayCid) {
2569 __ StoreIntoArray(
array, temp,
value, CanValueBeSmi());
2572 __ StoreObjectIntoObjectNoBarrier(
array, compiler::Address(temp),
2576 __ StoreIntoObjectNoBarrier(
array, compiler::Address(temp),
value);
2583 const intptr_t kNumInputs = 1;
2588 const bool emit_full_guard = !opt || (field_cid ==
kIllegalCid);
2590 const bool needs_value_cid_temp_reg =
2591 emit_full_guard || ((value_cid ==
kDynamicCid) && (field_cid != kSmiCid));
2593 const bool needs_field_temp_reg = emit_full_guard;
2595 intptr_t num_temps = 0;
2596 if (needs_value_cid_temp_reg) {
2599 if (needs_field_temp_reg) {
2603 LocationSummary* summary =
new (zone)
2607 for (intptr_t
i = 0;
i < num_temps;
i++) {
2616 ASSERT(
sizeof(UntaggedField::guarded_cid_) == 4);
2617 ASSERT(
sizeof(UntaggedField::is_nullable_) == 4);
2627 const bool emit_full_guard =
2630 const bool needs_value_cid_temp_reg =
2631 emit_full_guard || ((value_cid ==
kDynamicCid) && (field_cid != kSmiCid));
2633 const bool needs_field_temp_reg = emit_full_guard;
2640 const Register field_reg = needs_field_temp_reg
2644 compiler::Label
ok, fail_label;
2646 compiler::Label* deopt =
2651 compiler::Label*
fail = (deopt !=
nullptr) ? deopt : &fail_label;
2653 if (emit_full_guard) {
2656 compiler::FieldAddress field_cid_operand(
2658 compiler::FieldAddress field_nullability_operand(
2662 LoadValueCid(
compiler, value_cid_reg, value_reg);
2663 __ ldr(
IP, field_cid_operand);
2664 __ cmp(value_cid_reg, compiler::Operand(
IP));
2666 __ ldr(
IP, field_nullability_operand);
2667 __ cmp(value_cid_reg, compiler::Operand(
IP));
2668 }
else if (value_cid ==
kNullCid) {
2669 __ ldr(value_cid_reg, field_nullability_operand);
2670 __ CompareImmediate(value_cid_reg, value_cid);
2672 __ ldr(value_cid_reg, field_cid_operand);
2673 __ CompareImmediate(value_cid_reg, value_cid);
2683 if (!
field().needs_length_check()) {
2686 __ ldr(
IP, field_cid_operand);
2691 __ str(value_cid_reg, field_cid_operand);
2692 __ str(value_cid_reg, field_nullability_operand);
2694 __ LoadImmediate(
IP, value_cid);
2695 __ str(
IP, field_cid_operand);
2696 __ str(
IP, field_nullability_operand);
2702 if (deopt ==
nullptr) {
2705 __ ldr(
IP, compiler::FieldAddress(
2713 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2720 ASSERT(deopt !=
nullptr);
2727 if (field_cid != kSmiCid) {
2729 __ LoadClassId(value_cid_reg, value_reg);
2730 __ CompareImmediate(value_cid_reg, field_cid);
2735 if (field_cid != kSmiCid) {
2736 __ CompareImmediate(value_cid_reg,
kNullCid);
2738 __ CompareObject(value_reg, Object::null_object());
2742 }
else if (value_cid == field_cid) {
2747 ASSERT(value_cid != nullability);
2756 const intptr_t kNumInputs = 1;
2758 const intptr_t kNumTemps = 3;
2759 LocationSummary* summary =
new (zone)
2770 const intptr_t kNumTemps = 1;
2771 LocationSummary* summary =
new (zone)
2785 compiler::Label* deopt =
2802 __ ldrsb(offset_reg,
2803 compiler::FieldAddress(
2804 field_reg, compiler::target::Field::
2805 guarded_list_length_in_object_offset_offset()));
2808 compiler::FieldAddress(
2811 __ tst(offset_reg, compiler::Operand(offset_reg));
2818 __ ldr(
IP, compiler::Address(value_reg, offset_reg));
2819 __ cmp(length_reg, compiler::Operand(
IP));
2821 if (deopt ==
nullptr) {
2827 __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2);
2837 ASSERT(
field().guarded_list_length_in_object_offset() !=
2843 compiler::FieldAddress(
2844 value_reg,
field().guarded_list_length_in_object_offset()));
2845 __ CompareImmediate(
2856 const intptr_t kNumInputs = 2;
2857 const intptr_t kNumTemps = might_box ? 2 : 0;
2858 LocationSummary* summary =
new (zone) LocationSummary(
2859 zone, kNumInputs, kNumTemps,
2885 compiler::Address element_address =
__ ElementAddressForRegIndex(
2894 Register result1 = result_pair->At(0).reg();
2895 Register result2 = result_pair->At(1).reg();
2897 case kOneByteStringCid:
2899 __ ldr(result1, element_address);
2900 __ eor(result2, result2, compiler::Operand(result2));
2902 case kTwoByteStringCid:
2904 __ ldr(result1, element_address);
2905 __ eor(result2, result2, compiler::Operand(result2));
2914 case kOneByteStringCid:
2929 case kTwoByteStringCid:
2958 compiler::Label
done;
2959 __ TestImmediate(
value, 0xC0000000);
2963 __ eor(temp, temp, compiler::Operand(temp));
2966 __ StoreFieldToOffset(
2976 const intptr_t kNumInputs = 1;
2977 const intptr_t kNumTemps = 1;
2978 LocationSummary*
locs =
new (zone)
3004 const intptr_t kNumInputs = 3;
3005 const intptr_t kNumTemps = 0;
3006 LocationSummary* summary =
new (zone)
3028 const intptr_t kNumInputs = 2;
3029 const intptr_t kNumTemps = 0;
3030 LocationSummary*
locs =
new (zone)
3041static void InlineArrayAllocation(FlowGraphCompiler*
compiler,
3042 intptr_t num_elements,
3043 compiler::Label* slow_path,
3044 compiler::Label*
done) {
3045 const int kInlineArraySize = 12;
3048 __ TryAllocateArray(kArrayCid, instance_size, slow_path,
3056 __ StoreIntoObjectNoBarrier(
3063 __ StoreIntoObjectNoBarrier(
3075 if (num_elements > 0) {
3076 const intptr_t array_size = instance_size -
sizeof(UntaggedArray);
3077 __ LoadObject(
R8, Object::null_object());
3078 if (num_elements >= 2) {
3079 __ mov(
R9, compiler::Operand(
R8));
3083 __ LoadImmediate(
R9, 0x1);
3089 __ InitializeFieldsNoBarrierUnrolled(
3101 TypeUsageInfo* type_usage_info =
compiler->thread()->type_usage_info();
3102 if (type_usage_info !=
nullptr) {
3103 const Class& list_class =
3109 compiler::Label slow_path,
done;
3110 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
3111 if (
compiler->is_optimizing() && !FLAG_precompiled_mode &&
3123 auto object_store =
compiler->isolate_group()->object_store();
3124 const auto& allocate_array_stub =
3136 const intptr_t kNumInputs = 0;
3137 const intptr_t kNumTemps = 3;
3138 LocationSummary*
locs =
new (zone) LocationSummary(
3147class AllocateContextSlowPath
3148 :
public TemplateSlowPathCode<AllocateUninitializedContextInstr> {
3150 explicit AllocateContextSlowPath(
3151 AllocateUninitializedContextInstr* instruction)
3152 : TemplateSlowPathCode(instruction) {}
3154 virtual void EmitNativeCode(FlowGraphCompiler*
compiler) {
3155 __ Comment(
"AllocateContextSlowPath");
3158 LocationSummary* locs = instruction()->locs();
3159 locs->live_registers()->Remove(locs->out(0));
3163 auto slow_path_env =
compiler->SlowPathEnvironmentFor(
3165 ASSERT(slow_path_env !=
nullptr);
3167 auto object_store =
compiler->isolate_group()->object_store();
3169 compiler->zone(), object_store->allocate_context_stub());
3170 __ LoadImmediate(
R1, instruction()->num_context_variables());
3171 compiler->GenerateStubCall(instruction()->
source(), allocate_context_stub,
3172 UntaggedPcDescriptors::kOther, locs,
3173 instruction()->deopt_id(), slow_path_env);
3174 ASSERT(instruction()->locs()->
out(0).reg() ==
R0);
3175 compiler->RestoreLiveRegisters(instruction()->locs());
3187 AllocateContextSlowPath* slow_path =
new AllocateContextSlowPath(
this);
3188 compiler->AddSlowPathCode(slow_path);
3191 if (!FLAG_use_slow_path && FLAG_inline_alloc) {
3192 __ TryAllocateArray(kContextCid, instance_size, slow_path->entry_label(),
3194 temp0, temp1, temp2);
3199 compiler::FieldAddress(
3202 __ Jump(slow_path->entry_label());
3205 __ Bind(slow_path->exit_label());
3210 const intptr_t kNumInputs = 0;
3211 const intptr_t kNumTemps = 1;
3212 LocationSummary*
locs =
new (zone)
3223 auto object_store =
compiler->isolate_group()->object_store();
3224 const auto& allocate_context_stub =
3234 const intptr_t kNumInputs = 1;
3235 const intptr_t kNumTemps = 0;
3236 LocationSummary*
locs =
new (zone)
3247 auto object_store =
compiler->isolate_group()->object_store();
3248 const auto& clone_context_stub =
3251 UntaggedPcDescriptors::kOther,
locs(),
3262 compiler->AddExceptionHandler(
this);
3269 const intptr_t fp_sp_dist =
3274 __ AddImmediate(
SP,
FP, fp_sp_dist);
3277 if (raw_exception_var_ !=
nullptr) {
3282 if (raw_stacktrace_var_ !=
nullptr) {
3292 const intptr_t kNumInputs = 0;
3293 const intptr_t kNumTemps = 2;
3295 LocationSummary* summary =
new (zone)
3296 LocationSummary(zone, kNumInputs, kNumTemps,
3304class CheckStackOverflowSlowPath
3305 :
public TemplateSlowPathCode<CheckStackOverflowInstr> {
3307 static constexpr intptr_t kNumSlowPathArgs = 0;
3309 explicit CheckStackOverflowSlowPath(CheckStackOverflowInstr* instruction)
3310 : TemplateSlowPathCode(instruction) {}
3312 virtual void EmitNativeCode(FlowGraphCompiler*
compiler) {
3313 if (
compiler->isolate_group()->use_osr() && osr_entry_label()->IsLinked()) {
3314 const Register value = instruction()->locs()->temp(0).reg();
3315 __ Comment(
"CheckStackOverflowSlowPathOsr");
3316 __ Bind(osr_entry_label());
3322 __ Comment(
"CheckStackOverflowSlowPath");
3324 const bool using_shared_stub =
3325 instruction()->locs()->call_on_shared_slow_path();
3326 if (!using_shared_stub) {
3327 compiler->SaveLiveRegisters(instruction()->locs());
3333 compiler->SlowPathEnvironmentFor(instruction(), kNumSlowPathArgs);
3336 const bool has_frame =
compiler->flow_graph().graph_entry()->NeedsFrame();
3337 if (using_shared_stub) {
3339 ASSERT(
__ constant_pool_allowed());
3340 __ set_constant_pool_allowed(
false);
3341 __ EnterDartFrame(0);
3345 instruction()->locs()->live_registers()->FpuRegisterCount() > 0);
3346 __ Call(compiler::Address(
THR, entry_point_offset));
3347 compiler->RecordSafepoint(instruction()->locs(), kNumSlowPathArgs);
3349 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOther,
3350 instruction()->deopt_id(),
3351 instruction()->
source());
3353 __ LeaveDartFrame();
3354 __ set_constant_pool_allowed(
true);
3358 __ CallRuntime(kInterruptOrStackOverflowRuntimeEntry, kNumSlowPathArgs);
3360 instruction()->
source(), instruction()->deopt_id(),
3361 UntaggedPcDescriptors::kOther, instruction()->locs(),
env);
3365 instruction()->in_loop()) {
3367 compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
3368 instruction()->deopt_id(),
3369 InstructionSource());
3371 compiler->pending_deoptimization_env_ =
nullptr;
3372 if (!using_shared_stub) {
3373 compiler->RestoreLiveRegisters(instruction()->locs());
3378 compiler::Label* osr_entry_label() {
3380 return &osr_entry_label_;
3384 compiler::Label osr_entry_label_;
3388 __ ldr(
IP, compiler::Address(
THR,
3390 __ cmp(
SP, compiler::Operand(
IP));
3392 auto object_store =
compiler->isolate_group()->object_store();
3397 ? object_store->stack_overflow_stub_with_fpu_regs_stub()
3398 : object_store->stack_overflow_stub_without_fpu_regs_stub());
3400 if (using_shared_stub &&
compiler->CanPcRelativeCall(stub) &&
3401 compiler->flow_graph().graph_entry()->NeedsFrame()) {
3402 __ GenerateUnRelocatedPcRelativeCall(
LS);
3403 compiler->AddPcRelativeCallStubTarget(stub);
3408 auto extended_env =
compiler->SlowPathEnvironmentFor(
this, 0);
3410 UntaggedPcDescriptors::kOther,
locs(),
3415 CheckStackOverflowSlowPath* slow_path =
new CheckStackOverflowSlowPath(
this);
3416 compiler->AddSlowPathCode(slow_path);
3417 __ b(slow_path->entry_label(),
LS);
3425 const intptr_t configured_optimization_counter_threshold =
3426 compiler->thread()->isolate_group()->optimization_counter_threshold();
3427 const int32_t threshold =
3428 configured_optimization_counter_threshold * (
loop_depth() + 1);
3430 compiler::FieldAddress(
3434 compiler::FieldAddress(
3436 __ CompareImmediate(
count, threshold);
3437 __ b(slow_path->osr_entry_label(),
GE);
3439 if (
compiler->ForceSlowPathForStackOverflow()) {
3440 __ b(slow_path->entry_label());
3442 __ Bind(slow_path->exit_label());
3445static void EmitSmiShiftLeft(FlowGraphCompiler*
compiler,
3446 BinarySmiOpInstr* shift_left) {
3447 const LocationSummary& locs = *shift_left->locs();
3450 compiler::Label* deopt =
3451 shift_left->CanDeoptimize()
3452 ?
compiler->AddDeoptStub(shift_left->deopt_id(),
3453 ICData::kDeoptBinarySmiOp)
3455 if (locs.in(1).IsConstant()) {
3456 const Object& constant = locs.in(1).constant();
3459 const intptr_t kCountLimit = 0x1F;
3462 if (shift_left->can_overflow()) {
3464 __ Lsl(
IP, left, compiler::Operand(
value));
3475 Range* right_range = shift_left->right_range();
3476 if (shift_left->left()->BindsToConstant() && shift_left->can_overflow()) {
3479 const Object& obj = shift_left->left()->BoundConstant();
3482 if (left_int == 0) {
3483 __ cmp(right, compiler::Operand(0));
3485 __ mov(
result, compiler::Operand(0));
3488 const intptr_t max_right =
3490 const bool right_needs_check =
3492 if (right_needs_check) {
3496 __ SmiUntag(
IP, right);
3502 const bool right_needs_check =
3504 if (!shift_left->can_overflow()) {
3505 if (right_needs_check) {
3507 ASSERT(shift_left->CanDeoptimize());
3508 __ cmp(right, compiler::Operand(0));
3515 __ SmiUntag(
IP, right,
CC);
3518 __ SmiUntag(
IP, right);
3522 if (right_needs_check) {
3523 ASSERT(shift_left->CanDeoptimize());
3530 __ SmiUntag(
IP, right);
3532 const Register temp = locs.temp(0).reg();
3533 __ Lsl(temp, left,
IP);
3534 __ cmp(left, compiler::Operand(temp,
ASR,
IP));
3543 const intptr_t kNumInputs = 2;
3545 intptr_t num_temps = 0;
3546 if (
op_kind() == Token::kTRUNCDIV) {
3552 }
else if (
op_kind() == Token::kMOD) {
3558 LocationSummary* summary =
new (zone)
3560 if (
op_kind() == Token::kTRUNCDIV) {
3575 if (
op_kind() == Token::kMOD) {
3597 if (
op_kind() == Token::kSHL) {
3604 compiler::Label* deopt =
nullptr;
3615 if (deopt ==
nullptr) {
3624 if (deopt ==
nullptr) {
3637 if (deopt ==
nullptr) {
3649 case Token::kTRUNCDIV: {
3653 const intptr_t shift_count =
3659 __ add(temp,
left, compiler::Operand(
IP,
LSR, 32 - shift_count));
3661 __ mov(
result, compiler::Operand(temp,
ASR, shift_count));
3668 case Token::kBIT_AND: {
3670 compiler::Operand o;
3676 __ LoadImmediate(
IP, imm);
3681 case Token::kBIT_OR: {
3683 compiler::Operand o;
3687 __ LoadImmediate(
IP, imm);
3692 case Token::kBIT_XOR: {
3694 compiler::Operand o;
3698 __ LoadImmediate(
IP, imm);
3705 const intptr_t kCountLimit = 0x1F;
3713 case Token::kUSHR: {
3733 if (deopt !=
nullptr) {
3734 __ CompareImmediate(
left, 0);
3770 if (deopt ==
nullptr) {
3779 if (deopt ==
nullptr) {
3789 if (deopt ==
nullptr) {
3799 case Token::kBIT_AND: {
3804 case Token::kBIT_OR: {
3809 case Token::kBIT_XOR: {
3814 case Token::kTRUNCDIV: {
3817 __ cmp(
right, compiler::Operand(0));
3829 __ CompareImmediate(
result, 0x40000000);
3838 __ cmp(
right, compiler::Operand(0));
3857 compiler::Label
done;
3858 __ cmp(
result, compiler::Operand(0));
3861 __ cmp(
right, compiler::Operand(0));
3869 __ CompareImmediate(
right, 0);
3874 const intptr_t kCountLimit = 0x1F;
3876 __ CompareImmediate(
IP, kCountLimit);
3877 __ LoadImmediate(
IP, kCountLimit,
GT);
3885 case Token::kUSHR: {
3886 compiler::Label
done;
3917 __ sub(
IP,
IP, compiler::Operand(32),
GE);
3926 if (deopt !=
nullptr) {
3969static void EmitInt32ShiftLeft(FlowGraphCompiler*
compiler,
3970 BinaryInt32OpInstr* shift_left) {
3971 const LocationSummary& locs = *shift_left->locs();
3974 compiler::Label* deopt =
3975 shift_left->CanDeoptimize()
3976 ?
compiler->AddDeoptStub(shift_left->deopt_id(),
3977 ICData::kDeoptBinarySmiOp)
3979 ASSERT(locs.in(1).IsConstant());
3980 const Object& constant = locs.in(1).constant();
3983 const intptr_t kCountLimit = 0x1F;
3986 if (shift_left->can_overflow()) {
3988 __ Lsl(
IP, left, compiler::Operand(
value));
3998 const intptr_t kNumInputs = 2;
4000 intptr_t num_temps = 0;
4005 LocationSummary* summary =
new (zone)
4020 if (
op_kind() == Token::kSHL) {
4021 EmitInt32ShiftLeft(
compiler,
this);
4027 compiler::Label* deopt =
nullptr;
4038 if (deopt ==
nullptr) {
4047 if (deopt ==
nullptr) {
4058 if (deopt ==
nullptr) {
4070 case Token::kBIT_AND: {
4072 compiler::Operand o;
4083 case Token::kBIT_OR: {
4085 compiler::Operand o;
4094 case Token::kBIT_XOR: {
4096 compiler::Operand o;
4107 const intptr_t kCountLimit = 0x1F;
4112 case Token::kUSHR: {
4127 if (deopt !=
nullptr) {
4162 if (deopt ==
nullptr) {
4171 if (deopt ==
nullptr) {
4180 if (deopt ==
nullptr) {
4190 case Token::kBIT_AND: {
4195 case Token::kBIT_OR: {
4200 case Token::kBIT_XOR: {
4215 ASSERT((left_cid != kDoubleCid) && (right_cid != kDoubleCid));
4216 const intptr_t kNumInputs = 2;
4217 const intptr_t kNumTemps = 0;
4218 LocationSummary* summary =
new (zone)
4226 compiler::Label* deopt =
4232 if (this->
left()->definition() == this->
right()->definition()) {
4234 }
else if (left_cid == kSmiCid) {
4236 }
else if (right_cid == kSmiCid) {
4246 const intptr_t kNumInputs = 1;
4247 const intptr_t kNumTemps = 1;
4248 LocationSummary* summary =
new (zone) LocationSummary(
4262 out_reg,
locs()->temp(0).reg());
4265 case kUnboxedDouble:
4273 case kUnboxedFloat32x4:
4274 case kUnboxedFloat64x2:
4275 case kUnboxedInt32x4:
4276 __ StoreMultipleDToOffset(
value, 2, out_reg,
4286 ASSERT(BoxCid() != kSmiCid);
4288 const intptr_t kNumInputs = 1;
4289 const intptr_t kNumTemps = needs_temp ? 1 : 0;
4290 LocationSummary* summary =
new (zone)
4311void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler*
compiler) {
4315 case kUnboxedInt64: {
4318 __ LoadFieldFromOffset(
result->At(0).reg(), box, ValueOffset());
4319 __ LoadFieldFromOffset(
result->At(1).reg(), box,
4324 case kUnboxedDouble: {
4330 case kUnboxedFloat: {
4338 case kUnboxedFloat32x4:
4339 case kUnboxedFloat64x2:
4340 case kUnboxedInt32x4: {
4342 __ LoadMultipleDFromOffset(
result, 2, box,
4353void UnboxInstr::EmitSmiConversion(FlowGraphCompiler*
compiler) {
4357 case kUnboxedInt64: {
4359 __ SmiUntag(
result->At(0).reg(), box);
4364 case kUnboxedDouble: {
4366 __ SmiUntag(
IP, box);
4378void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler*
compiler) {
4384void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler*
compiler) {
4389 compiler::Label
done;
4390 __ SignFill(
result->At(1).reg(), box);
4400 const intptr_t kNumInputs = 1;
4402 LocationSummary* summary =
new (zone)
4403 LocationSummary(zone, kNumInputs, kNumTemps,
4422 compiler::Label
done;
4429 __ TestImmediate(
value, 0xC0000000);
4439 __ eor(temp, temp, compiler::Operand(temp));
4442 __ StoreFieldToOffset(
4451 const intptr_t kNumInputs = 1;
4457 const bool stubs_in_vm_isolate =
4458 object_store->allocate_mint_with_fpu_regs_stub()
4460 ->InVMIsolateHeap() ||
4461 object_store->allocate_mint_without_fpu_regs_stub()
4463 ->InVMIsolateHeap();
4464 const bool shared_slow_path_call =
4466 LocationSummary* summary =
new (zone) LocationSummary(
4467 zone, kNumInputs, kNumTemps,
4476 }
else if (shared_slow_path_call) {
4490 Register value_lo = value_pair->At(0).reg();
4492 __ SmiTag(out_reg, value_lo);
4497 Register value_lo = value_pair->At(0).reg();
4498 Register value_hi = value_pair->At(1).reg();
4502 compiler::Label
done;
4503 __ SmiTag(out_reg, value_lo);
4505 __ cmp(value_hi, compiler::Operand(out_reg,
ASR, 31),
EQ);
4510 compiler->intrinsic_slow_path_label(),
4512 }
else if (
locs()->call_on_shared_slow_path()) {
4513 const bool has_frame =
compiler->flow_graph().graph_entry()->NeedsFrame();
4515 ASSERT(
__ constant_pool_allowed());
4516 __ set_constant_pool_allowed(
false);
4517 __ EnterDartFrame(0);
4519 auto object_store =
compiler->isolate_group()->object_store();
4523 live_fpu_regs ? object_store->allocate_mint_with_fpu_regs_stub()
4524 : object_store->allocate_mint_without_fpu_regs_stub());
4526 ASSERT(!
locs()->live_registers()->ContainsRegister(
4528 auto extended_env =
compiler->SlowPathEnvironmentFor(
this, 0);
4529 compiler->GenerateStubCall(
source(), stub, UntaggedPcDescriptors::kOther,
4532 __ LeaveDartFrame();
4533 __ set_constant_pool_allowed(
true);
4540 __ StoreFieldToOffset(value_lo, out_reg,
4542 __ StoreFieldToOffset(
4548static void LoadInt32FromMint(FlowGraphCompiler*
compiler,
4552 compiler::Label* deopt) {
4554 if (deopt !=
nullptr) {
4555 __ LoadFieldFromOffset(
4569 const intptr_t kNumInputs = 1;
4571 LocationSummary* summary =
new (zone)
4574 if (kNumTemps > 0) {
4586 compiler::Label* deopt =
4590 compiler::Label* out_of_range = !
is_truncating() ? deopt :
nullptr;
4593 if (value_cid == kSmiCid) {
4595 }
else if (value_cid == kMintCid) {
4598 compiler::Label
done;
4603 compiler::Label
done;
4605 __ CompareClassId(
value, kMintCid, temp);
4614 const intptr_t kNumInputs = 2;
4615 const intptr_t kNumTemps = 0;
4616 LocationSummary* summary =
new (zone)
4648 const bool needs_temp =
op_kind() != MethodRecognizer::kDouble_getIsNaN;
4649 const intptr_t kNumInputs = 1;
4650 const intptr_t kNumTemps = needs_temp ? 1 : 0;
4651 LocationSummary* summary =
new (zone)
4662 BranchLabels labels) {
4665 const bool is_negated = kind() != Token::kEQ;
4668 case MethodRecognizer::kDouble_getIsNaN: {
4671 return is_negated ?
VC :
VS;
4673 case MethodRecognizer::kDouble_getIsInfinite: {
4675 compiler::Label
done;
4678 __ cmp(
TMP, compiler::Operand(0));
4679 __ b(is_negated ? labels.true_label : labels.false_label,
NE);
4682 __ AndImmediate(temp, temp, 0x7FFFFFFF);
4684 __ CompareImmediate(temp, 0x7FF00000);
4685 return is_negated ?
NE :
EQ;
4687 case MethodRecognizer::kDouble_getIsNegative: {
4692 __ b(is_negated ? labels.true_label : labels.false_label,
VS);
4695 __ cmp(temp, compiler::Operand(0),
ZERO);
4696 return is_negated ?
GE :
LT;
4705#define DEFINE_EMIT(Name, Args) \
4706 static void Emit##Name(FlowGraphCompiler* compiler, SimdOpInstr* instr, \
4707 PP_APPLY(PP_UNPACK, Args))
4709DEFINE_EMIT(Simd32x4BinaryOp,
4711 switch (instr->kind()) {
4712 case SimdOpInstr::kFloat32x4Add:
4715 case SimdOpInstr::kFloat32x4Sub:
4718 case SimdOpInstr::kFloat32x4Mul:
4721 case SimdOpInstr::kFloat32x4Div:
4724 case SimdOpInstr::kFloat32x4Equal:
4727 case SimdOpInstr::kFloat32x4NotEqual:
4732 case SimdOpInstr::kFloat32x4GreaterThan:
4735 case SimdOpInstr::kFloat32x4GreaterThanOrEqual:
4738 case SimdOpInstr::kFloat32x4LessThan:
4741 case SimdOpInstr::kFloat32x4LessThanOrEqual:
4744 case SimdOpInstr::kFloat32x4Min:
4747 case SimdOpInstr::kFloat32x4Max:
4750 case SimdOpInstr::kFloat32x4Scale:
4755 case SimdOpInstr::kInt32x4BitAnd:
4758 case SimdOpInstr::kInt32x4BitOr:
4761 case SimdOpInstr::kInt32x4BitXor:
4764 case SimdOpInstr::kInt32x4Add:
4767 case SimdOpInstr::kInt32x4Sub:
4775DEFINE_EMIT(Float64x2BinaryOp,
4776 (QRegisterView
result, QRegisterView left, QRegisterView right)) {
4777 switch (instr->kind()) {
4778 case SimdOpInstr::kFloat64x2Add:
4782 case SimdOpInstr::kFloat64x2Sub:
4786 case SimdOpInstr::kFloat64x2Mul:
4790 case SimdOpInstr::kFloat64x2Div:
4801DEFINE_EMIT(Simd32x4Shuffle,
4802 (FixedQRegisterView<Q6>
result, FixedQRegisterView<Q5>
value)) {
4806 switch (instr->kind()) {
4807 case SimdOpInstr::kFloat32x4GetX:
4810 case SimdOpInstr::kFloat32x4GetY:
4813 case SimdOpInstr::kFloat32x4GetZ:
4816 case SimdOpInstr::kFloat32x4GetW:
4819 case SimdOpInstr::kInt32x4Shuffle:
4820 case SimdOpInstr::kFloat32x4Shuffle: {
4821 if (instr->mask() == 0x00) {
4823 }
else if (instr->mask() == 0x55) {
4825 }
else if (instr->mask() == 0xAA) {
4827 }
else if (instr->mask() == 0xFF) {
4832 QRegisterView temp(
QTMP);
4835 for (intptr_t
i = 0;
i < 4;
i++) {
4836 __ vmovs(
result.s(
i), temp.s((instr->mask() >> (2 *
i)) & 0x3));
4847DEFINE_EMIT(Simd32x4ShuffleMix,
4848 (FixedQRegisterView<Q6>
result,
4849 FixedQRegisterView<Q4> left,
4850 FixedQRegisterView<Q5> right)) {
4852 __ vmovs(
result.s(0),
left.s((instr->mask() >> 0) & 0x3));
4853 __ vmovs(
result.s(1),
left.s((instr->mask() >> 2) & 0x3));
4859DEFINE_EMIT(Simd32x4GetSignMask,
4863 __ Lsr(
out,
out, compiler::Operand(31));
4866 __ Lsr(temp, temp, compiler::Operand(31));
4867 __ orr(
out,
out, compiler::Operand(temp,
LSL, 1));
4870 __ Lsr(temp, temp, compiler::Operand(31));
4871 __ orr(
out,
out, compiler::Operand(temp,
LSL, 2));
4874 __ Lsr(temp, temp, compiler::Operand(31));
4875 __ orr(
out,
out, compiler::Operand(temp,
LSL, 3));
4880DEFINE_EMIT(Float32x4FromDoubles,
4881 (FixedQRegisterView<Q6>
out,
4885 QRegisterView q3)) {
4886 __ vcvtsd(
out.s(0), q0.d(0));
4887 __ vcvtsd(
out.s(1), q1.d(0));
4888 __ vcvtsd(
out.s(2), q2.d(0));
4889 __ vcvtsd(
out.s(3), q3.d(0));
4904DEFINE_EMIT(Float32x4Sqrt,
4910 switch (instr->kind()) {
4911 case SimdOpInstr::kFloat32x4Negate:
4914 case SimdOpInstr::kFloat32x4Abs:
4917 case SimdOpInstr::kFloat32x4Reciprocal:
4920 case SimdOpInstr::kFloat32x4ReciprocalSqrt:
4921 __ VreciprocalSqrtqs(
result, left);
4928DEFINE_EMIT(Simd32x4ToSimd32x4Conversion, (SameAsFirstInput,
QRegister left)) {
4941DEFINE_EMIT(Float64x2Clamp,
4944 QRegisterView
lower,
4945 QRegisterView upper)) {
4946 compiler::Label done0, done1;
4950 __ vcmpd(
left.d(0), upper.d(0));
4960 __ vcmpd(
left.d(1), upper.d(1));
4973DEFINE_EMIT(Float32x4With,
4974 (FixedQRegisterView<Q6>
result,
4975 QRegisterView replacement,
4977 __ vcvtsd(STMP, replacement.d(0));
4979 switch (instr->kind()) {
4980 case SimdOpInstr::kFloat32x4WithX:
4983 case SimdOpInstr::kFloat32x4WithY:
4986 case SimdOpInstr::kFloat32x4WithZ:
4989 case SimdOpInstr::kFloat32x4WithW:
4997DEFINE_EMIT(Simd64x2Shuffle, (QRegisterView
result, QRegisterView
value)) {
4998 switch (instr->kind()) {
4999 case SimdOpInstr::kFloat64x2GetX:
5002 case SimdOpInstr::kFloat64x2GetY:
5010DEFINE_EMIT(Float64x2Zero, (
QRegister q)) {
5014DEFINE_EMIT(Float64x2Splat, (QRegisterView
result, QRegisterView
value)) {
5020DEFINE_EMIT(Float64x2FromDoubles,
5021 (QRegisterView r, QRegisterView q0, QRegisterView q1)) {
5022 __ vmovd(r.d(0), q0.d(0));
5023 __ vmovd(r.d(1), q1.d(0));
5028DEFINE_EMIT(Float64x2ToFloat32x4, (FixedQRegisterView<Q6> r, QRegisterView q)) {
5031 __ vcvtsd(r.s(0), q.d(0));
5033 __ vcvtsd(r.s(1), q.d(1));
5038DEFINE_EMIT(Float32x4ToFloat64x2, (QRegisterView r, FixedQRegisterView<Q6> q)) {
5040 __ vcvtds(r.d(0), q.s(0));
5042 __ vcvtds(r.d(1), q.s(1));
5047DEFINE_EMIT(Float64x2GetSignMask,
5051 __ Lsr(
out,
out, compiler::Operand(31));
5054 __ Lsr(
TMP,
TMP, compiler::Operand(31));
5058DEFINE_EMIT(Float64x2Unary, (QRegisterView
result, QRegisterView
value)) {
5059 switch (instr->kind()) {
5060 case SimdOpInstr::kFloat64x2Negate:
5064 case SimdOpInstr::kFloat64x2Abs:
5068 case SimdOpInstr::kFloat64x2Sqrt:
5077DEFINE_EMIT(Float64x2Binary,
5078 (SameAsFirstInput, QRegisterView left, QRegisterView right)) {
5079 switch (instr->kind()) {
5080 case SimdOpInstr::kFloat64x2Scale:
5084 case SimdOpInstr::kFloat64x2WithX:
5087 case SimdOpInstr::kFloat64x2WithY:
5090 case SimdOpInstr::kFloat64x2Min: {
5101 case SimdOpInstr::kFloat64x2Max: {
5117DEFINE_EMIT(Int32x4FromInts,
5128DEFINE_EMIT(Int32x4FromBools,
5134 Temp<Register> temp)) {
5136 __ LoadImmediate(temp, 0xffffffff);
5139 __ cmp(v0, compiler::Operand(
IP));
5142 __ cmp(v1, compiler::Operand(
IP));
5145 __ cmp(
v2, compiler::Operand(
IP));
5148 __ cmp(v3, compiler::Operand(
IP));
5155 switch (instr->kind()) {
5156 case SimdOpInstr::kInt32x4GetFlagX:
5159 case SimdOpInstr::kInt32x4GetFlagY:
5162 case SimdOpInstr::kInt32x4GetFlagZ:
5165 case SimdOpInstr::kInt32x4GetFlagW:
5177DEFINE_EMIT(Int32x4Select,
5182 Temp<QRegister> temp)) {
5184 __ vmovq(temp, mask);
5186 __ vmvnq(temp, temp);
5188 __ vandq(mask, mask, trueValue);
5190 __ vandq(temp, temp, falseValue);
5192 __ vorrq(
out, mask, temp);
5195DEFINE_EMIT(Int32x4WithFlag,
5199 __ LoadImmediate(
TMP, 0xffffffff,
EQ);
5201 switch (instr->kind()) {
5202 case SimdOpInstr::kInt32x4WithFlagX:
5205 case SimdOpInstr::kInt32x4WithFlagY:
5208 case SimdOpInstr::kInt32x4WithFlagZ:
5211 case SimdOpInstr::kInt32x4WithFlagW:
5225#define SIMD_OP_VARIANTS(CASE, ____, SIMPLE) \
5226 CASE(Float32x4Add) \
5227 CASE(Float32x4Sub) \
5228 CASE(Float32x4Mul) \
5229 CASE(Float32x4Div) \
5230 CASE(Float32x4Equal) \
5231 CASE(Float32x4NotEqual) \
5232 CASE(Float32x4GreaterThan) \
5233 CASE(Float32x4GreaterThanOrEqual) \
5234 CASE(Float32x4LessThan) \
5235 CASE(Float32x4LessThanOrEqual) \
5236 CASE(Float32x4Min) \
5237 CASE(Float32x4Max) \
5238 CASE(Float32x4Scale) \
5239 CASE(Int32x4BitAnd) \
5240 CASE(Int32x4BitOr) \
5241 CASE(Int32x4BitXor) \
5244 ____(Simd32x4BinaryOp) \
5245 CASE(Float64x2Add) \
5246 CASE(Float64x2Sub) \
5247 CASE(Float64x2Mul) \
5248 CASE(Float64x2Div) \
5249 ____(Float64x2BinaryOp) \
5250 CASE(Float32x4GetX) \
5251 CASE(Float32x4GetY) \
5252 CASE(Float32x4GetZ) \
5253 CASE(Float32x4GetW) \
5254 CASE(Int32x4Shuffle) \
5255 CASE(Float32x4Shuffle) \
5256 ____(Simd32x4Shuffle) \
5257 CASE(Float32x4ShuffleMix) \
5258 CASE(Int32x4ShuffleMix) \
5259 ____(Simd32x4ShuffleMix) \
5260 CASE(Float32x4GetSignMask) \
5261 CASE(Int32x4GetSignMask) \
5262 ____(Simd32x4GetSignMask) \
5263 SIMPLE(Float32x4FromDoubles) \
5264 SIMPLE(Float32x4Zero) \
5265 SIMPLE(Float32x4Splat) \
5266 SIMPLE(Float32x4Sqrt) \
5267 CASE(Float32x4Negate) \
5268 CASE(Float32x4Abs) \
5269 CASE(Float32x4Reciprocal) \
5270 CASE(Float32x4ReciprocalSqrt) \
5271 ____(Float32x4Unary) \
5272 CASE(Float32x4ToInt32x4) \
5273 CASE(Int32x4ToFloat32x4) \
5274 ____(Simd32x4ToSimd32x4Conversion) \
5275 SIMPLE(Float32x4Clamp) \
5276 SIMPLE(Float64x2Clamp) \
5277 CASE(Float32x4WithX) \
5278 CASE(Float32x4WithY) \
5279 CASE(Float32x4WithZ) \
5280 CASE(Float32x4WithW) \
5281 ____(Float32x4With) \
5282 CASE(Float64x2GetX) \
5283 CASE(Float64x2GetY) \
5284 ____(Simd64x2Shuffle) \
5285 SIMPLE(Float64x2Zero) \
5286 SIMPLE(Float64x2Splat) \
5287 SIMPLE(Float64x2FromDoubles) \
5288 SIMPLE(Float64x2ToFloat32x4) \
5289 SIMPLE(Float32x4ToFloat64x2) \
5290 SIMPLE(Float64x2GetSignMask) \
5291 CASE(Float64x2Negate) \
5292 CASE(Float64x2Abs) \
5293 CASE(Float64x2Sqrt) \
5294 ____(Float64x2Unary) \
5295 CASE(Float64x2Scale) \
5296 CASE(Float64x2WithX) \
5297 CASE(Float64x2WithY) \
5298 CASE(Float64x2Min) \
5299 CASE(Float64x2Max) \
5300 ____(Float64x2Binary) \
5301 SIMPLE(Int32x4FromInts) \
5302 SIMPLE(Int32x4FromBools) \
5303 CASE(Int32x4GetFlagX) \
5304 CASE(Int32x4GetFlagY) \
5305 CASE(Int32x4GetFlagZ) \
5306 CASE(Int32x4GetFlagW) \
5307 ____(Int32x4GetFlag) \
5308 SIMPLE(Int32x4Select) \
5309 CASE(Int32x4WithFlagX) \
5310 CASE(Int32x4WithFlagY) \
5311 CASE(Int32x4WithFlagZ) \
5312 CASE(Int32x4WithFlagW) \
5313 ____(Int32x4WithFlag)
5317#define CASE(Name) case k##Name:
5319 return MakeLocationSummaryFromEmitter(zone, this, &Emit##Name);
5320#define SIMPLE(Name) CASE(Name) EMIT(Name)
5335#define CASE(Name) case k##Name:
5337 InvokeEmitter(compiler, this, &Emit##Name); \
5339#define SIMPLE(Name) CASE(Name) EMIT(Name)
5355 const intptr_t kNumTemps = 0;
5356 LocationSummary* summary =
new (zone)
5367 compiler::LeafRuntimeScope rt(
compiler->assembler(),
5377 const intptr_t kNumInputs = 2;
5378 const intptr_t kNumTemps = 1;
5379 LocationSummary* summary =
new (zone)
5389 const intptr_t kNumInputs = 2;
5390 const intptr_t kNumTemps = 0;
5391 LocationSummary* summary =
new (zone)
5402 (
op_kind() == MethodRecognizer::kMathMax));
5403 const bool is_min = (
op_kind() == MethodRecognizer::kMathMin);
5412 __ b(&returns_nan,
VS);
5415 is_min ? TokenKindToDoubleCondition(Token::kGTE)
5416 : TokenKindToDoubleCondition(Token::kLTE);
5422 __ LoadDImmediate(
result, NAN, temp);
5433 __ cmp(temp, compiler::Operand(0));
5460 const intptr_t kNumInputs = 1;
5461 const intptr_t kNumTemps = 0;
5462 LocationSummary* summary =
new (zone)
5475 case Token::kNEGATE: {
5476 compiler::Label* deopt =
5482 case Token::kBIT_NOT:
5494 const intptr_t kNumInputs = 1;
5495 const intptr_t kNumTemps = 0;
5496 LocationSummary* summary =
new (zone)
5508 case Token::kNEGATE:
5514 case Token::kSQUARE:
5524 const intptr_t kNumInputs = 1;
5525 const intptr_t kNumTemps = 0;
5526 LocationSummary*
result =
new (zone)
5542 const intptr_t kNumInputs = 1;
5543 const intptr_t kNumTemps = 0;
5544 LocationSummary*
result =
new (zone)
5571 const intptr_t kNumInputs = 1;
5572 const intptr_t kNumTemps = 0;
5573 LocationSummary*
result =
new (zone) LocationSummary(
5585 DoubleToIntegerSlowPath* slow_path =
5586 new DoubleToIntegerSlowPath(
this,
locs()->in(0).fpu_reg());
5587 compiler->AddSlowPathCode(slow_path);
5591 __ vcmpd(value_double, value_double);
5593 __ b(slow_path->entry_label(),
VS);
5595 __ vcvtid(STMP, value_double);
5599 __ CompareImmediate(
result, 0xC0000000);
5600 __ b(slow_path->entry_label(),
MI);
5602 __ Bind(slow_path->exit_label());
5607 const intptr_t kNumInputs = 1;
5608 const intptr_t kNumTemps = 0;
5609 LocationSummary*
result =
new (zone)
5617 compiler::Label* deopt =
5630 __ CompareImmediate(
result, 0xC0000000);
5637 const intptr_t kNumInputs = 1;
5638 const intptr_t kNumTemps = 0;
5639 LocationSummary*
result =
new (zone)
5656 const intptr_t kNumInputs = 1;
5657 const intptr_t kNumTemps = 0;
5658 LocationSummary*
result =
new (zone)
5686 const intptr_t kNumTemps =
5690 LocationSummary*
result =
new (zone)
5728static void InvokeDoublePow(FlowGraphCompiler*
compiler,
5729 InvokeMathCFunctionInstr* instr) {
5730 ASSERT(instr->recognized_kind() == MethodRecognizer::kMathDoublePow);
5731 const intptr_t kInputCount = 2;
5732 ASSERT(instr->InputCount() == kInputCount);
5733 LocationSummary* locs = instr->locs();
5738 const Register temp = locs->temp(0).reg();
5742 compiler::Label skip_call, try_sqrt, check_base, return_nan;
5743 __ vmovd(saved_base,
base);
5744 __ LoadDImmediate(
result, 1.0, temp);
5748 __ b(&check_base,
VS);
5749 __ b(&skip_call,
EQ);
5754 compiler::Label return_base;
5755 __ b(&return_base,
EQ);
5758 __ LoadDImmediate(
DTMP, 2.0, temp);
5761 compiler::Label return_base_times_2;
5762 __ b(&return_base_times_2,
EQ);
5765 __ LoadDImmediate(
DTMP, 3.0, temp);
5768 __ b(&check_base,
NE);
5771 __ vmuld(
result, saved_base, saved_base);
5779 __ Bind(&return_base_times_2);
5780 __ vmuld(
result, saved_base, saved_base);
5788 __ b(&return_nan,
VS);
5789 __ b(&skip_call,
EQ);
5791 __ vcmpd(saved_base, exp);
5793 __ b(&try_sqrt,
VC);
5796 __ LoadDImmediate(
result, NAN, temp);
5799 compiler::Label do_pow, return_zero;
5811 __ LoadDImmediate(
result, 0.5, temp);
5817 __ vcmpdz(saved_base);
5819 __ b(&return_zero,
EQ);
5825 __ LoadDImmediate(
result, 0.0, temp);
5829 __ vmovd(
base, saved_base);
5834 ASSERT(instr->TargetFunction().is_leaf());
5835 compiler::LeafRuntimeScope rt(
compiler->assembler(),
5838 rt.Call(instr->TargetFunction(), kInputCount);
5843 compiler::LeafRuntimeScope rt(
compiler->assembler(),
5848 rt.Call(instr->TargetFunction(), kInputCount);
5866 compiler::LeafRuntimeScope rt(
compiler->assembler(),
5874 compiler::LeafRuntimeScope rt(
compiler->assembler(),
5889 const intptr_t kNumInputs = 1;
5890 LocationSummary* summary =
5929 __ mov(
out, compiler::Operand(in));
5955 const intptr_t kNumInputs = 2;
5956 const intptr_t kNumTemps = 2;
5957 LocationSummary* summary =
new (zone)
5972 compiler::Label* deopt =
5979 const Register result_div = pair->At(0).reg();
5980 const Register result_mod = pair->At(1).reg();
5983 __ cmp(right, compiler::Operand(0));
5988 __ SmiUntag(temp, left);
5989 __ SmiUntag(
IP, right);
5990 __ IntegerDivide(result_div, temp,
IP, dtemp,
DTMP);
5994 __ CompareImmediate(result_div, 0x40000000);
5996 __ SmiUntag(
IP, right);
5998 __ mls(result_mod,
IP, result_div, temp);
5999 __ SmiTag(result_div);
6000 __ SmiTag(result_mod);
6010 compiler::Label
done;
6011 __ cmp(result_mod, compiler::Operand(0));
6014 __ cmp(right, compiler::Operand(0));
6015 __ sub(result_mod, result_mod, compiler::Operand(right),
LT);
6016 __ add(result_mod, result_mod, compiler::Operand(right),
GE);
6021static void EmitHashIntegerCodeSequence(FlowGraphCompiler*
compiler,
6025 __ LoadImmediate(
TMP, compiler::Immediate(0x2d51));
6027 __ umull(
TMP, value_hi, value_hi,
TMP);
6028 __ add(
TMP,
TMP, compiler::Operand(value_lo));
6037 const intptr_t kNumInputs = 1;
6038 const intptr_t kNumTemps = 4;
6039 LocationSummary* summary =
new (zone) LocationSummary(
6061 ASSERT(out_pair->At(1).reg() ==
R1);
6063 compiler::Label hash_double, hash_double_value, try_convert;
6066 __ AndImmediate(temp, temp, 0x7FF00000);
6067 __ CompareImmediate(temp, 0x7FF00000);
6068 __ b(&hash_double_value,
EQ);
6070 compiler::Label slow_path;
6074 __ vmovrs(temp1, STMP);
6076 __ CompareImmediate(temp1, 0xC0000000);
6077 __ b(&slow_path,
MI);
6078 __ vmovdr(
DTMP, 0, temp1);
6079 __ vcvtdi(temp_double, STMP);
6084 __ b(&hash_double_value,
NE);
6087 __ SignFill(temp, temp1);
6089 compiler::Label hash_integer,
done;
6103 compiler::LeafRuntimeScope rt(
compiler->assembler(), 0,
6105 __ mov(
R0, compiler::Operand(
THR));
6108 rt.Call(kTryDoubleAsIntegerRuntimeEntry, 1);
6109 __ mov(
R4, compiler::Operand(
R0));
6111 __ LoadFromOffset(temp1,
THR,
6113 __ LoadFromOffset(temp,
THR,
6116 __ cmp(
R4, compiler::Operand(0));
6117 __ b(&hash_integer,
NE);
6120 __ Bind(&hash_double_value);
6121 __ vmovrrd(temp, temp1,
value);
6125 __ eor(
result, temp1, compiler::Operand(temp));
6129 __ mov(
R1, compiler::Operand(0));
6134 const intptr_t kNumInputs = 1;
6135 const intptr_t kNumTemps = 1;
6136 LocationSummary* summary =
new (zone)
6153 __ LoadFieldFromOffset(temp,
value,
6174 const intptr_t kNumInputs = 1;
6175 const bool need_mask_temp =
IsBitTest();
6176 const intptr_t kNumTemps = !
IsNullCheck() ? (need_mask_temp ? 2 : 1) : 0;
6177 LocationSummary* summary =
new (zone)
6182 if (need_mask_temp) {
6190 compiler::Label* deopt) {
6191 __ CompareObject(
locs()->in(0).reg(), Object::null_object());
6201 compiler::Label* deopt) {
6203 __ AddImmediate(biased_cid, -
min);
6204 __ CompareImmediate(biased_cid,
max -
min);
6208 __ LoadImmediate(bit_reg, 1);
6209 __ Lsl(bit_reg, bit_reg, biased_cid);
6210 __ TestImmediate(bit_reg, mask);
6214int CheckClassInstr::EmitCheckCid(FlowGraphCompiler*
compiler,
6219 compiler::Label* is_ok,
6220 compiler::Label* deopt,
6221 bool use_near_jump) {
6224 if (cid_start == cid_end) {
6225 __ CompareImmediate(biased_cid, cid_start - bias);
6231 __ AddImmediate(biased_cid, bias - cid_start);
6233 __ CompareImmediate(biased_cid, cid_end - cid_start);
6238 __ b(deopt, no_match);
6247 const intptr_t kNumInputs = 1;
6248 const intptr_t kNumTemps = 0;
6249 LocationSummary* summary =
new (zone)
6257 compiler::Label* deopt =
6259 __ BranchIfNotSmi(
value, deopt);
6266 __ CompareObject(value_reg, Object::null_object());
6274 if (using_shared_stub &&
compiler->CanPcRelativeCall(stub) &&
6275 compiler->flow_graph().graph_entry()->NeedsFrame()) {
6276 __ GenerateUnRelocatedPcRelativeCall(
EQUAL);
6277 compiler->AddPcRelativeCallStubTarget(stub);
6282 auto extended_env =
compiler->SlowPathEnvironmentFor(
this, 0);
6284 UntaggedPcDescriptors::kOther,
locs(),
6290 ThrowErrorSlowPathCode* slow_path =
new NullErrorSlowPath(
this);
6291 compiler->AddSlowPathCode(slow_path);
6293 __ BranchIf(
EQUAL, slow_path->entry_label());
6298 const intptr_t kNumInputs = 1;
6299 const intptr_t kNumTemps = 0;
6300 LocationSummary* summary =
new (zone)
6309 compiler::Label* deopt =
6311 if (cids_.IsSingleCid()) {
6323 const intptr_t kNumInputs = 2;
6324 const intptr_t kNumTemps = 0;
6325 LocationSummary*
locs =
new (zone)
6334 compiler::Label* deopt =
6340 if (length_loc.IsConstant() && index_loc.IsConstant()) {
6353 if (index_loc.IsConstant()) {
6358 }
else if (length_loc.IsConstant()) {
6360 if (index_cid != kSmiCid) {
6361 __ BranchIfNotSmi(
index, deopt);
6375 if (index_cid != kSmiCid) {
6376 __ BranchIfNotSmi(
index, deopt);
6385 const intptr_t kNumInputs = 1;
6386 const intptr_t kNumTemps = 0;
6387 LocationSummary*
locs =
new (zone) LocationSummary(
6388 zone, kNumInputs, kNumTemps,
6396 WriteErrorSlowPath* slow_path =
new WriteErrorSlowPath(
this);
6397 compiler->AddSlowPathCode(slow_path);
6398 __ ldrb(
TMP, compiler::FieldAddress(
locs()->in(0).reg(),
6408 const intptr_t kNumInputs = 2;
6409 const intptr_t kNumTemps = (
op_kind() == Token::kMUL) ? 1 : 0;
6410 LocationSummary* summary =
new (zone)
6415 compiler::Operand o;
6416 if (CanBePairOfImmediateOperands(
right(), &o, &o) &&
6427 if (
op_kind() == Token::kMUL) {
6435 Register left_lo = left_pair->At(0).reg();
6436 Register left_hi = left_pair->At(1).reg();
6438 Register out_lo = out_pair->At(0).reg();
6439 Register out_hi = out_pair->At(1).reg();
6443 compiler::Operand right_lo, right_hi;
6445 const bool ok = CanBePairOfImmediateOperands(
locs()->in(1).constant(),
6446 &right_lo, &right_hi);
6450 right_lo = compiler::Operand(right_pair->At(0).reg());
6451 right_hi = compiler::Operand(right_pair->At(1).reg());
6455 case Token::kBIT_AND: {
6456 __ and_(out_lo, left_lo, compiler::Operand(right_lo));
6457 __ and_(out_hi, left_hi, compiler::Operand(right_hi));
6460 case Token::kBIT_OR: {
6461 __ orr(out_lo, left_lo, compiler::Operand(right_lo));
6462 __ orr(out_hi, left_hi, compiler::Operand(right_hi));
6465 case Token::kBIT_XOR: {
6466 __ eor(out_lo, left_lo, compiler::Operand(right_lo));
6467 __ eor(out_hi, left_hi, compiler::Operand(right_hi));
6471 __ adds(out_lo, left_lo, compiler::Operand(right_lo));
6472 __ adcs(out_hi, left_hi, compiler::Operand(right_hi));
6476 __ subs(out_lo, left_lo, compiler::Operand(right_lo));
6477 __ sbcs(out_hi, left_hi, compiler::Operand(right_hi));
6482 Register right_lo_reg = right_pair->At(0).reg();
6483 Register right_hi_reg = right_pair->At(1).reg();
6487 __ mul(temp, left_lo, right_hi_reg);
6488 __ mla(out_hi, left_hi, right_lo_reg, temp);
6489 __ umull(out_lo, temp, left_lo, right_lo_reg);
6490 __ add(out_hi, out_hi, compiler::Operand(temp));
6498static void EmitShiftInt64ByConstant(FlowGraphCompiler*
compiler,
6504 const Object& right) {
6505 const int64_t shift = Integer::Cast(right).AsInt64Value();
6511 __ Lsl(out_lo, left_hi, compiler::Operand(32 - shift));
6512 __ orr(out_lo, out_lo, compiler::Operand(left_lo,
LSR, shift));
6513 __ Asr(out_hi, left_hi, compiler::Operand(shift));
6516 __ mov(out_lo, compiler::Operand(left_hi));
6517 }
else if (shift < 64) {
6518 __ Asr(out_lo, left_hi, compiler::Operand(shift - 32));
6520 __ Asr(out_lo, left_hi, compiler::Operand(31));
6522 __ Asr(out_hi, left_hi, compiler::Operand(31));
6526 case Token::kUSHR: {
6529 __ Lsl(out_lo, left_hi, compiler::Operand(32 - shift));
6530 __ orr(out_lo, out_lo, compiler::Operand(left_lo,
LSR, shift));
6531 __ Lsr(out_hi, left_hi, compiler::Operand(shift));
6534 __ mov(out_lo, compiler::Operand(left_hi));
6536 __ Lsr(out_lo, left_hi, compiler::Operand(shift - 32));
6538 __ mov(out_hi, compiler::Operand(0));
6545 __ Lsr(out_hi, left_lo, compiler::Operand(32 - shift));
6546 __ orr(out_hi, out_hi, compiler::Operand(left_hi,
LSL, shift));
6547 __ Lsl(out_lo, left_lo, compiler::Operand(shift));
6550 __ mov(out_hi, compiler::Operand(left_lo));
6552 __ Lsl(out_hi, left_lo, compiler::Operand(shift - 32));
6554 __ mov(out_lo, compiler::Operand(0));
6563static void EmitShiftInt64ByRegister(FlowGraphCompiler*
compiler,
6572 __ rsbs(
IP, right, compiler::Operand(32));
6573 __ sub(
IP, right, compiler::Operand(32),
MI);
6574 __ mov(out_lo, compiler::Operand(left_hi,
ASR,
IP),
MI);
6575 __ mov(out_lo, compiler::Operand(left_lo,
LSR, right),
PL);
6576 __ orr(out_lo, out_lo, compiler::Operand(left_hi,
LSL,
IP),
PL);
6577 __ mov(out_hi, compiler::Operand(left_hi,
ASR, right));
6580 case Token::kUSHR: {
6581 __ rsbs(
IP, right, compiler::Operand(32));
6582 __ sub(
IP, right, compiler::Operand(32),
MI);
6583 __ mov(out_lo, compiler::Operand(left_hi,
LSR,
IP),
MI);
6584 __ mov(out_lo, compiler::Operand(left_lo,
LSR, right),
PL);
6585 __ orr(out_lo, out_lo, compiler::Operand(left_hi,
LSL,
IP),
PL);
6586 __ mov(out_hi, compiler::Operand(left_hi,
LSR, right));
6590 __ rsbs(
IP, right, compiler::Operand(32));
6591 __ sub(
IP, right, compiler::Operand(32),
MI);
6592 __ mov(out_hi, compiler::Operand(left_lo,
LSL,
IP),
MI);
6593 __ mov(out_hi, compiler::Operand(left_hi,
LSL, right),
PL);
6594 __ orr(out_hi, out_hi, compiler::Operand(left_lo,
LSR,
IP),
PL);
6595 __ mov(out_lo, compiler::Operand(left_lo,
LSL, right));
6603static void EmitShiftUint32ByConstant(FlowGraphCompiler*
compiler,
6607 const Object& right) {
6608 const int64_t shift = Integer::Cast(right).AsInt64Value();
6611 __ LoadImmediate(
out, 0);
6616 __ Lsr(
out, left, compiler::Operand(shift));
6619 __ Lsl(
out, left, compiler::Operand(shift));
6627static void EmitShiftUint32ByRegister(FlowGraphCompiler*
compiler,
6635 __ Lsr(
out, left, right);
6638 __ Lsl(
out, left, right);
6645class ShiftInt64OpSlowPath :
public ThrowErrorSlowPathCode {
6647 explicit ShiftInt64OpSlowPath(ShiftInt64OpInstr* instruction)
6648 : ThrowErrorSlowPathCode(instruction,
6649 kArgumentErrorUnboxedInt64RuntimeEntry) {}
6651 const char*
name()
override {
return "int64 shift"; }
6653 void EmitCodeAtSlowPathEntry(FlowGraphCompiler*
compiler)
override {
6654 PairLocation* left_pair = instruction()->locs()->in(0).AsPairLocation();
6655 Register left_hi = left_pair->At(1).reg();
6656 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
6657 Register right_lo = right_pair->At(0).reg();
6658 Register right_hi = right_pair->At(1).reg();
6659 PairLocation* out_pair = instruction()->locs()->out(0).AsPairLocation();
6660 Register out_lo = out_pair->At(0).reg();
6661 Register out_hi = out_pair->At(1).reg();
6663 __ CompareImmediate(right_hi, 0);
6665 switch (instruction()->AsShiftInt64Op()->op_kind()) {
6667 __ Asr(out_hi, left_hi,
6669 __ mov(out_lo, compiler::Operand(out_hi),
GE);
6673 __ LoadImmediate(out_lo, 0,
GE);
6674 __ LoadImmediate(out_hi, 0,
GE);
6681 __ b(exit_label(),
GE);
6688 __ StoreToOffset(right_lo,
THR,
6690 __ StoreToOffset(right_hi,
THR,
6698 const intptr_t kNumInputs = 2;
6699 const intptr_t kNumTemps = 0;
6700 LocationSummary* summary =
new (zone) LocationSummary(
6719 Register left_lo = left_pair->At(0).reg();
6720 Register left_hi = left_pair->At(1).reg();
6722 Register out_lo = out_pair->At(0).reg();
6723 Register out_hi = out_pair->At(1).reg();
6727 EmitShiftInt64ByConstant(
compiler,
op_kind(), out_lo, out_hi, left_lo,
6728 left_hi,
locs()->in(1).constant());
6732 Register right_lo = right_pair->At(0).reg();
6733 Register right_hi = right_pair->At(1).reg();
6736 ShiftInt64OpSlowPath* slow_path =
nullptr;
6738 slow_path =
new (
Z) ShiftInt64OpSlowPath(
this);
6739 compiler->AddSlowPathCode(slow_path);
6740 __ CompareImmediate(right_hi, 0);
6741 __ b(slow_path->entry_label(),
NE);
6742 __ CompareImmediate(right_lo, kShiftCountLimit);
6743 __ b(slow_path->entry_label(),
HI);
6746 EmitShiftInt64ByRegister(
compiler,
op_kind(), out_lo, out_hi, left_lo,
6749 if (slow_path !=
nullptr) {
6750 __ Bind(slow_path->exit_label());
6758 const intptr_t kNumInputs = 2;
6759 const intptr_t kNumTemps = 0;
6760 LocationSummary* summary =
new (zone)
6772 Register left_lo = left_pair->At(0).reg();
6773 Register left_hi = left_pair->At(1).reg();
6775 Register out_lo = out_pair->At(0).reg();
6776 Register out_hi = out_pair->At(1).reg();
6780 EmitShiftInt64ByConstant(
compiler,
op_kind(), out_lo, out_hi, left_lo,
6781 left_hi,
locs()->in(1).constant());
6790 compiler::Label* deopt =
6793 __ CompareImmediate(shift, kShiftCountLimit);
6797 EmitShiftInt64ByRegister(
compiler,
op_kind(), out_lo, out_hi, left_lo,
6802class ShiftUint32OpSlowPath :
public ThrowErrorSlowPathCode {
6804 explicit ShiftUint32OpSlowPath(ShiftUint32OpInstr* instruction)
6805 : ThrowErrorSlowPathCode(instruction,
6806 kArgumentErrorUnboxedInt64RuntimeEntry) {}
6808 const char*
name()
override {
return "uint32 shift"; }
6810 void EmitCodeAtSlowPathEntry(FlowGraphCompiler*
compiler)
override {
6811 PairLocation* right_pair = instruction()->locs()->in(1).AsPairLocation();
6812 Register right_lo = right_pair->At(0).reg();
6813 Register right_hi = right_pair->At(1).reg();
6814 Register out = instruction()->locs()->out(0).reg();
6816 __ CompareImmediate(right_hi, 0);
6818 __ b(exit_label(),
GE);
6825 __ StoreToOffset(right_lo,
THR,
6827 __ StoreToOffset(right_hi,
THR,
6835 const intptr_t kNumInputs = 2;
6836 const intptr_t kNumTemps = 0;
6837 LocationSummary* summary =
new (zone) LocationSummary(
6860 locs()->in(1).constant());
6864 Register right_lo = right_pair->At(0).reg();
6865 Register right_hi = right_pair->At(1).reg();
6868 ShiftUint32OpSlowPath* slow_path =
nullptr;
6870 slow_path =
new (
Z) ShiftUint32OpSlowPath(
this);
6871 compiler->AddSlowPathCode(slow_path);
6873 __ CompareImmediate(right_hi, 0);
6874 __ b(slow_path->entry_label(),
NE);
6875 __ CompareImmediate(right_lo, kUint32ShiftCountLimit);
6876 __ b(slow_path->entry_label(),
HI);
6881 if (slow_path !=
nullptr) {
6882 __ Bind(slow_path->exit_label());
6890 const intptr_t kNumInputs = 2;
6891 const intptr_t kNumTemps = 1;
6892 LocationSummary* summary =
new (zone)
6910 locs()->in(1).constant());
6913 const bool shift_count_in_range =
6920 if (!shift_count_in_range) {
6922 compiler::Label* deopt =
6925 __ CompareImmediate(
right, 0);
6931 if (!shift_count_in_range) {
6932 __ CompareImmediate(
right, kUint32ShiftCountLimit);
6940 const intptr_t kNumInputs = 1;
6941 const intptr_t kNumTemps = 0;
6942 LocationSummary* summary =
new (zone)
6953 Register left_lo = left_pair->At(0).reg();
6954 Register left_hi = left_pair->At(1).reg();
6957 Register out_lo = out_pair->At(0).reg();
6958 Register out_hi = out_pair->At(1).reg();
6961 case Token::kBIT_NOT:
6962 __ mvn_(out_lo, compiler::Operand(left_lo));
6963 __ mvn_(out_hi, compiler::Operand(left_hi));
6965 case Token::kNEGATE:
6966 __ rsbs(out_lo, left_lo, compiler::Operand(0));
6967 __ sbc(out_hi, out_hi, compiler::Operand(out_hi));
6968 __ sub(out_hi, out_hi, compiler::Operand(left_hi));
6977 const intptr_t kNumInputs = 2;
6978 const intptr_t kNumTemps = 0;
6979 LocationSummary* summary =
new (zone)
6993 case Token::kBIT_AND:
6996 case Token::kBIT_OR:
6999 case Token::kBIT_XOR:
7018 const intptr_t kNumInputs = 1;
7019 const intptr_t kNumTemps = 0;
7020 LocationSummary* summary =
new (zone)
7034 __ mvn_(
out, compiler::Operand(left));
7039 const intptr_t kNumInputs = 1;
7040 const intptr_t kNumTemps = 0;
7041 LocationSummary* summary =
new (zone)
7043 if (
from() == kUntagged ||
to() == kUntagged) {
7044 ASSERT((
from() == kUntagged &&
to() == kUnboxedInt32) ||
7045 (
from() == kUntagged &&
to() == kUnboxedUint32) ||
7046 (
from() == kUnboxedInt32 &&
to() == kUntagged) ||
7047 (
from() == kUnboxedUint32 &&
to() == kUntagged));
7051 }
else if (
from() == kUnboxedInt64) {
7052 ASSERT(
to() == kUnboxedUint32 ||
to() == kUnboxedInt32);
7056 }
else if (
to() == kUnboxedInt64) {
7062 ASSERT(
to() == kUnboxedUint32 ||
to() == kUnboxedInt32);
7071 const bool is_nop_conversion =
7072 (
from() == kUntagged &&
to() == kUnboxedInt32) ||
7073 (
from() == kUntagged &&
to() == kUnboxedUint32) ||
7074 (
from() == kUnboxedInt32 &&
to() == kUntagged) ||
7075 (
from() == kUnboxedUint32 &&
to() == kUntagged);
7076 if (is_nop_conversion) {
7081 if (
from() == kUnboxedInt32 &&
to() == kUnboxedUint32) {
7085 }
else if (
from() == kUnboxedUint32 &&
to() == kUnboxedInt32) {
7090 compiler::Label* deopt =
7092 __ tst(
out, compiler::Operand(
out));
7095 }
else if (
from() == kUnboxedInt64) {
7096 ASSERT(
to() == kUnboxedUint32 ||
to() == kUnboxedInt32);
7098 Register in_lo = in_pair->At(0).reg();
7099 Register in_hi = in_pair->At(1).reg();
7102 __ mov(
out, compiler::Operand(in_lo));
7104 compiler::Label* deopt =
7111 }
else if (
from() == kUnboxedUint32 ||
from() == kUnboxedInt32) {
7115 Register out_lo = out_pair->At(0).reg();
7116 Register out_hi = out_pair->At(1).reg();
7118 __ mov(out_lo, compiler::Operand(in));
7119 if (
from() == kUnboxedUint32) {
7120 __ eor(out_hi, out_hi, compiler::Operand(out_hi));
7132 LocationSummary* summary =
7133 new (zone) LocationSummary(zone,
InputCount(),
7144 case kUnboxedDouble:
7161 case kUnboxedDouble:
7173 case kUnboxedInt32: {
7180 case kUnboxedFloat: {
7187 case kUnboxedInt64: {
7196 case kUnboxedDouble: {
7220 if (entry !=
nullptr) {
7221 if (!
compiler->CanFallThroughTo(entry)) {
7222 FATAL(
"Checked function entry must have no offset");
7226 if (!
compiler->CanFallThroughTo(entry)) {
7238 if (FLAG_reorder_basic_blocks) {
7244 InstructionSource());
7259 const intptr_t kNumInputs = 1;
7260 const intptr_t kNumTemps = 2;
7262 LocationSummary* summary =
new (zone)
7278 __ LoadObject(offset_reg, offsets_);
7279 const auto element_address =
__ ElementAddressForRegIndex(
7281 false, kTypedDataInt32ArrayCid,
7283 false, offset_reg, index_reg);
7284 __ ldr(offset_reg, element_address);
7288 __ mov(target_address_reg, compiler::Operand(
PC));
7289 __ AddImmediate(target_address_reg, -entry_to_pc_offset);
7291 __ add(target_address_reg, target_address_reg, compiler::Operand(offset_reg));
7294 __ bx(target_address_reg);
7299 const intptr_t kNumInputs = 2;
7300 const intptr_t kNumTemps = 0;
7302 LocationSummary*
locs =
new (zone)
7309 LocationSummary*
locs =
new (zone)
7314 ConstantInstr* constant =
left()->definition()->AsConstant();
7315 if ((constant !=
nullptr) && !
left()->IsSingleUse()) {
7321 constant =
right()->definition()->AsConstant();
7322 if ((constant !=
nullptr) && !
right()->IsSingleUse()) {
7335Condition StrictCompareInstr::EmitComparisonCodeRegConstant(
7337 BranchLabels labels,
7339 const Object& obj) {
7346 compiler::Label is_true, is_false,
done;
7347 BranchLabels labels = {&is_true, &is_false, &is_false};
7351 if (is_false.IsLinked() || is_true.IsLinked()) {
7353 EmitBranchOnCondition(
compiler, true_condition, labels);
7371 BranchInstr* branch) {
7372 BranchLabels labels =
compiler->CreateBranchLabels(branch);
7375 EmitBranchOnCondition(
compiler, true_condition, labels);
7414 const intptr_t kNumInputs = (
type_arguments() !=
nullptr) ? 1 : 0;
7415 const intptr_t kNumTemps = 0;
7416 LocationSummary*
locs =
new (zone)
7428 TypeUsageInfo* type_usage_info =
compiler->thread()->type_usage_info();
7429 if (type_usage_info !=
nullptr) {
7436 compiler->GenerateStubCall(
source(), stub, UntaggedPcDescriptors::kOther,
7445 __ BranchLinkPatchable(StubCode::DebugStepCheck());
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
static void fail(const SkString &err)
static bool are_equal(skiatest::Reporter *reporter, const SkMatrix &a, const SkMatrix &b)
static int float_bits(float f)
static bool ok(int result)
#define ASSERT_EQUAL(expected, actual)
#define RELEASE_ASSERT(cond)
intptr_t num_context_variables() const
Value * type_arguments() const
const Class & cls() const
intptr_t num_context_variables() const
static intptr_t InstanceSize()
static constexpr bool IsValidLength(intptr_t len)
Token::Kind op_kind() const
bool can_overflow() const
Token::Kind op_kind() const
bool RightIsPowerOfTwoConstant() const
Range * right_range() const
Representation to() const
Representation from() const
ParallelMoveInstr * parallel_move() const
bool HasParallelMove() const
static const Bool & False()
static const Bool & True()
static void Allocate(FlowGraphCompiler *compiler, Instruction *instruction, const Class &cls, Register result, Register temp)
Representation from_representation() const
virtual bool ValueFitsSmi() const
ComparisonInstr * comparison() const
static constexpr Register kSecondReturnReg
static const Register ArgumentRegisters[]
static constexpr FpuRegister kReturnFpuReg
static constexpr Register kFfiAnyNonAbiRegister
static constexpr Register kReturnReg
static constexpr Register kSecondNonArgumentRegister
const RuntimeEntry & TargetFunction() const
bool IsDeoptIfNull() const
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(CheckClassInstr, TemplateInstruction, FIELD_LIST) private void EmitBitTest(FlowGraphCompiler *compiler, intptr_t min, intptr_t max, intptr_t mask, compiler::Label *deopt)
void EmitNullCheck(FlowGraphCompiler *compiler, compiler::Label *deopt)
bool IsDeoptIfNotNull() const
static void AddMetadataForRuntimeCall(CheckNullInstr *check_null, FlowGraphCompiler *compiler)
ExceptionType exception_type() const
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
intptr_t loop_depth() const
virtual void EmitBranchCode(FlowGraphCompiler *compiler, BranchInstr *branch)
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
virtual Condition EmitComparisonCode(FlowGraphCompiler *compiler, BranchLabels labels)=0
const Object & value() const
void EmitMoveToLocation(FlowGraphCompiler *compiler, const Location &destination, Register tmp=kNoRegister, intptr_t pair_index=0)
static intptr_t InstanceSize()
Value * type_arguments() const
virtual Value * num_elements() const
virtual Representation representation() const
static constexpr intptr_t kNone
MethodRecognizer::Kind op_kind() const
MethodRecognizer::Kind recognized_kind() const
bool is_null_aware() const
void EmitReturnMoves(FlowGraphCompiler *compiler, const Register temp0, const Register temp1)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(FfiCallInstr, VariadicDefinition, FIELD_LIST) private void EmitParamMoves(FlowGraphCompiler *compiler, const Register saved_fp, const Register temp0, const Register temp1)
intptr_t TargetAddressIndex() const
intptr_t guarded_cid() const
ParallelMoveInstr * parallel_move() const
BlockEntryInstr * block() const
bool HasParallelMove() const
JoinEntryInstr * successor() const
FunctionEntryInstr * normal_entry() const
OsrEntryInstr * osr_entry() const
const Field & field() const
ComparisonInstr * comparison() const
virtual Representation RequiredInputRepresentation(intptr_t idx) const
const AbstractType & type() const
intptr_t GetDeoptId() const
virtual bool UseSharedSlowPathStub(bool is_optimizing) const
Environment * env() const
virtual LocationSummary * MakeLocationSummary(Zone *zone, bool is_optimizing) const =0
virtual void EmitNativeCode(FlowGraphCompiler *compiler)
void InitializeLocationSummary(Zone *zone, bool optimizing)
virtual Representation representation() const
bool CanDeoptimize() const
friend class BlockEntryInstr
InstructionSource source() const
intptr_t deopt_id() const
static bool SlowPathSharingSupported(bool is_optimizing)
Instruction * previous() const
static LocationSummary * MakeCallSummary(Zone *zone, const Instruction *instr, LocationSummary *locs=nullptr)
Representation to() const
Representation from() const
const RuntimeEntry & TargetFunction() const
MethodRecognizer::Kind recognized_kind() const
ObjectStore * object_store() const
static IsolateGroup * Current()
intptr_t TargetAddressIndex() const
void EmitParamMoves(FlowGraphCompiler *compiler, Register saved_fp, Register temp0)
LocationSummary * MakeLocationSummaryInternal(Zone *zone, const RegList temps) const
virtual Representation representation() const
intptr_t index_scale() const
bool can_pack_into_smi() const
intptr_t element_count() const
intptr_t class_id() const
intptr_t class_id() const
intptr_t index_scale() const
Representation representation() const
virtual Representation RequiredInputRepresentation(intptr_t index) const
Register base_reg() const
virtual Representation representation() const
const LocalVariable & local() const
Location temp(intptr_t index) const
Location out(intptr_t index) const
static LocationSummary * Make(Zone *zone, intptr_t input_count, Location out, ContainsCall contains_call)
void set_temp(intptr_t index, Location loc)
intptr_t temp_count() const
RegisterSet * live_registers()
void set_out(intptr_t index, Location loc)
bool always_calls() const
bool call_on_shared_slow_path() const
Location in(intptr_t index) const
void set_in(intptr_t index, Location loc)
static Location NoLocation()
static Location SameAsFirstInput()
static Location Pair(Location first, Location second)
static Location FpuRegisterLocation(FpuRegister reg)
static Location WritableRegister()
static Location RegisterLocation(Register reg)
PairLocation * AsPairLocation() const
static Location RequiresRegister()
static Location RequiresFpuRegister()
FpuRegister fpu_reg() const
const Object & constant() const
static Location Constant(const ConstantInstr *obj, int pair_index=0)
intptr_t result_cid() const
MethodRecognizer::Kind op_kind() const
bool unboxed_inputs() const
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MemoryCopyInstr, TemplateInstruction, FIELD_LIST) private void EmitUnrolledCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, intptr_t num_elements, bool reversed)
Value * src_start() const
void EmitLoopCopy(FlowGraphCompiler *compiler, Register dest_reg, Register src_reg, Register length_reg, compiler::Label *done, compiler::Label *copy_forwards=nullptr)
void PrepareLengthRegForLoop(FlowGraphCompiler *compiler, Register length_reg, compiler::Label *done)
Value * dest_start() const
static intptr_t value_offset()
virtual Representation representation() const
MoveArgumentInstr(Value *value, Representation representation, Location location)
static int ComputeArgcTag(const Function &function)
bool is_auto_scope() const
bool is_bootstrap_native() const
const Function & function() const
NativeFunction native_c_function() const
static constexpr intptr_t kVMTagOffsetFromFp
static uword LinkNativeCallEntry()
static CodePtr GetStub(FlowGraphCompiler *compiler, CheckNullInstr::ExceptionType exception_type, bool save_fpu_registers)
static Object & ZoneHandle()
Value * char_code() const
Location At(intptr_t i) const
static bool Overlaps(Range *range, intptr_t min, intptr_t max)
static bool OnlyLessThanOrEqualTo(Range *range, intptr_t value)
static bool IsWithin(const Range *range, int64_t min, int64_t max)
static bool IsPositive(Range *range)
static bool CanBeZero(Range *range)
intptr_t FpuRegisterCount() const
void Add(Location loc, Representation rep=kTagged)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(ShiftIntegerOpInstr, BinaryIntegerOpInstr, FIELD_LIST) protected bool IsShiftCountInRange(int64_t max=kShiftCountLimit) const
Range * shift_range() const
const char * message() const
bool ShouldEmitStoreBarrier() const
virtual Representation RequiredInputRepresentation(intptr_t idx) const
intptr_t class_id() const
intptr_t index_scale() const
const LocalVariable & local() const
const Field & field() const
bool needs_number_check() const
static CodePtr GetAllocationStubForClass(const Class &cls)
static constexpr int kNullCharCodeSymbolOffset
static bool hardfp_supported()
intptr_t ArgumentCount() const
ArrayPtr GetArgumentsDescriptor() const
virtual intptr_t InputCount() const
const ZoneGrowableArray< intptr_t > & cid_results() const
static bool IsEqualityOperator(Kind tok)
virtual Representation representation() const
Token::Kind op_kind() const
Token::Kind op_kind() const
virtual Representation representation() const
bool is_truncating() const
virtual Representation representation() const
bool IsScanFlagsUnboxed() const
static int32_t Low32Bits(int64_t value)
static constexpr int CountOneBitsWord(uword x)
static constexpr T Maximum(T x, T y)
static constexpr int ShiftForPowerOfTwo(T x)
static int32_t High32Bits(int64_t value)
static T Minimum(T x, T y)
static T AddWithWrapAround(T a, T b)
static constexpr int CountOneBits64(uint64_t x)
static constexpr size_t HighestBit(int64_t v)
static constexpr bool IsPowerOfTwo(T x)
bool BindsToConstant() const
Definition * definition() const
intptr_t InputCount() const
void static bool EmittingComments()
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool *needs_base=nullptr)
static bool CanHold(uint32_t immediate, Operand *o)
intptr_t StackTopInBytes() const
static word type_arguments_offset()
static word length_offset()
static word data_offset()
static word entry_point_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word num_variables_offset()
static word OffsetOf(const dart::Field &field)
static word guarded_cid_offset()
static word guarded_list_length_offset()
static word is_nullable_offset()
static word code_offset()
static word entry_point_offset(CodeEntryKind kind=CodeEntryKind::kNormal)
static word usage_counter_offset()
static word data_offset()
static word object_store_offset()
static word value_offset()
static word ffi_callback_code_offset()
static word tags_offset()
static word data_offset()
static word length_offset()
static word unboxed_runtime_arg_offset()
static word shared_field_table_values_offset()
static word stack_overflow_shared_stub_entry_point_offset(bool fpu_regs)
static uword vm_tag_dart_id()
static word isolate_group_offset()
static word field_table_values_offset()
static word predefined_symbols_address_offset()
static word stack_limit_offset()
static word stack_overflow_flags_offset()
static word exit_through_ffi_offset()
static word invoke_dart_code_stub_offset()
static word top_exit_frame_info_offset()
static word vm_tag_offset()
static word top_resource_offset()
static word data_offset()
static word payload_offset()
static const word kImmutableBit
static const word kClassIdTagSize
FlutterSemanticsFlag flag
FlutterSemanticsFlag flags
Dart_NativeFunction function
static float max(float r, float g, float b)
static float min(float r, float g, float b)
#define CASE(Arity, Mask, Name, Args, Result)
#define DEFINE_UNIMPLEMENTED_INSTRUCTION(Name)
const intptr_t kResultIndex
word ToRawSmi(const dart::Object &a)
intptr_t FrameOffsetInBytesForVariable(const LocalVariable *variable)
static constexpr intptr_t kWordSize
static constexpr word kBitsPerWord
constexpr intptr_t kSmiBits
word SmiValue(const dart::Object &a)
bool HasIntegerValue(const dart::Object &object, int64_t *value)
Location LocationAnyOrConstant(Value *value)
Location LocationRegisterOrConstant(Value *value)
const Register kWriteBarrierSlotReg
static DRegister EvenDRegisterOf(QRegister q)
static Condition InvertCondition(Condition c)
static constexpr int kSavedCallerPcSlotFromFp
bool IsTypedDataBaseClassId(intptr_t index)
const Register kExceptionObjectReg
static DRegister OddDRegisterOf(QRegister q)
const RegList kReservedCpuRegisters
const Register kWriteBarrierObjectReg
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
constexpr intptr_t kIntptrMin
static const ClassId kLastErrorCid
constexpr intptr_t kWordSizeLog2
static const ClassId kFirstErrorCid
static SRegister OddSRegisterOf(DRegister d)
void RegisterTypeArgumentsUse(const Function &function, TypeUsageInfo *type_usage_info, const Class &klass, Definition *type_arguments)
const Register ARGS_DESC_REG
bool IsClampedTypedDataBaseClassId(intptr_t index)
Location LocationFixedRegisterOrConstant(Value *value, Register reg)
const int kNumberOfFpuRegisters
Location LocationWritableRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
bool IsExternalPayloadClassId(classid_t cid)
constexpr RegList kDartAvailableCpuRegs
const int kAbiPreservedFpuRegCount
DEFINE_BACKEND(LoadThread,(Register out))
static constexpr int kPcMarkerSlotFromFp
const Register FUNCTION_REG
const Register IC_DATA_REG
compiler::Address LocationToStackSlotAddress(Location loc)
static bool IsConstant(Definition *def, int64_t *val)
constexpr bool FLAG_target_memory_sanitizer
const Register kStackTraceObjectReg
static int8_t data[kExtLength]
Location LocationRegisterOrSmiConstant(Value *value, intptr_t min_value, intptr_t max_value)
constexpr intptr_t kBitsPerInt64
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
static SRegister EvenSRegisterOf(DRegister d)
const QRegister kAbiFirstPreservedFpuReg
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
void Flush(SkSurface *surface)
static constexpr Register kResultReg
static constexpr Register kLengthReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kTempReg
static constexpr Register kTypeArgumentsReg
static constexpr Register kResultReg
static constexpr Register kObjectReg
static constexpr Representation NativeRepresentation(Representation rep)
intptr_t first_local_from_fp
static constexpr intptr_t kBoolVsNullMask
static constexpr intptr_t kBoolValueMask
static constexpr size_t ValueSize(Representation rep)
static constexpr bool IsUnboxedInteger(Representation rep)
static compiler::OperandSize OperandSize(Representation rep)
static constexpr bool IsUnboxed(Representation rep)
static bool IsUnsignedInteger(Representation rep)
static Representation RepresentationOfArrayElement(classid_t cid)
static constexpr Register kDstTypeReg
static constexpr Register kInstanceReg
static constexpr Register kFunctionTypeArgumentsReg
static constexpr Register kInstantiatorTypeArgumentsReg
@ kResetToBootstrapNative