6#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
8#define SHOULD_NOT_INCLUDE_RUNTIME
22DEFINE_FLAG(
int, far_branch_level, 0,
"Always use far branches");
27 intptr_t far_branch_level,
29 : AssemblerBase(object_pool_builder),
31 far_branch_level_(far_branch_level) {
32 ASSERT(far_branch_level >= 0);
33 ASSERT(far_branch_level <= 2);
36MicroAssembler::~MicroAssembler() {}
40 intptr_t target_position = Position();
41 intptr_t branch_position;
43#define BIND(head, update) \
44 branch_position = label->head; \
45 while (branch_position >= 0) { \
46 ASSERT(Utils::IsAligned(branch_position, Supports(RV_C) ? 2 : 4)); \
47 intptr_t new_offset = target_position - branch_position; \
48 ASSERT(Utils::IsAligned(new_offset, Supports(RV_C) ? 2 : 4)); \
49 intptr_t old_offset = update(branch_position, new_offset); \
50 if (old_offset == 0) break; \
51 branch_position -= old_offset; \
55 BIND(unresolved_cb_, UpdateCBOffset);
56 BIND(unresolved_cj_, UpdateCJOffset);
57 BIND(unresolved_b_, UpdateBOffset);
58 BIND(unresolved_j_, UpdateJOffset);
59 BIND(unresolved_far_, UpdateFarOffset);
61 label->BindTo(target_position);
64intptr_t MicroAssembler::UpdateCBOffset(intptr_t branch_position,
65 intptr_t new_offset) {
66 CInstr instr(Read16(branch_position));
68 intptr_t old_offset = instr.b_imm();
70 FATAL(
"Incorrect Assembler::kNearJump");
72 Write16(branch_position,
73 instr.opcode() | EncodeCRs1p(instr.rs1p()) |
EncodeCBImm(new_offset));
77intptr_t MicroAssembler::UpdateCJOffset(intptr_t branch_position,
78 intptr_t new_offset) {
79 CInstr instr(Read16(branch_position));
81 intptr_t old_offset = instr.j_imm();
83 FATAL(
"Incorrect Assembler::kNearJump");
85 Write16(branch_position, instr.opcode() |
EncodeCJImm(new_offset));
89intptr_t MicroAssembler::UpdateBOffset(intptr_t branch_position,
90 intptr_t new_offset) {
91 Instr instr(Read32(branch_position));
93 intptr_t old_offset = instr.btype_imm();
97 Write32(branch_position, EncodeRs2(instr.rs2()) | EncodeRs1(instr.rs1()) |
98 EncodeFunct3(instr.funct3()) |
99 EncodeOpcode(instr.opcode()) |
104intptr_t MicroAssembler::UpdateJOffset(intptr_t branch_position,
105 intptr_t new_offset) {
106 Instr instr(Read32(branch_position));
108 intptr_t old_offset = instr.jtype_imm();
112 Write32(branch_position, EncodeRd(instr.rd()) | EncodeOpcode(instr.opcode()) |
117intptr_t MicroAssembler::UpdateFarOffset(intptr_t branch_position,
118 intptr_t new_offset) {
119 Instr auipc_instr(Read32(branch_position));
122 Instr jr_instr(Read32(branch_position + 4));
127 intptr_t old_offset = auipc_instr.utype_imm() + jr_instr.itype_imm();
128 intx_t lo =
ImmLo(new_offset);
129 intx_t hi =
ImmHi(new_offset);
131 FATAL(
"Jump/branch distance exceeds 2GB!");
133 Write32(branch_position,
136 EncodeFunct3(
F3_0) | EncodeRd(
ZR) |
141void MicroAssembler::lui(
Register rd, intptr_t imm) {
147 EmitUType(imm, rd,
LUI);
150void MicroAssembler::lui_fixed(
Register rd, intptr_t imm) {
152 EmitUType(imm, rd,
LUI);
155void MicroAssembler::auipc(
Register rd, intptr_t imm) {
157 EmitUType(imm, rd,
AUIPC);
162 if (Supports(
RV_C) &&
164 (label->IsBound() &&
IsCJImm(label->Position() - Position())))) {
181 if (Supports(
RV_C)) {
186 }
else if (rd ==
RA) {
200void MicroAssembler::beq(
Register rs1,
205 if (Supports(
RV_C) &&
207 (label->IsBound() &&
IsCBImm(label->Position() - Position())))) {
208 if ((rs1 ==
ZR) && IsCRs1p(rs2)) {
211 }
else if ((rs2 ==
ZR) && IsCRs1p(rs1)) {
219void MicroAssembler::bne(
Register rs1,
224 if (Supports(
RV_C) &&
226 (label->IsBound() &&
IsCBImm(label->Position() - Position())))) {
227 if ((rs1 ==
ZR) && IsCRs1p(rs2)) {
230 }
else if ((rs2 ==
ZR) && IsCRs1p(rs1)) {
238void MicroAssembler::blt(
Register rs1,
246void MicroAssembler::bge(
Register rs1,
254void MicroAssembler::bltu(
Register rs1,
262void MicroAssembler::bgeu(
Register rs1,
281 if (Supports(
RV_C)) {
316 if (Supports(
RV_C)) {
331 if (Supports(
RV_C)) {
336 if ((rd == rs1) &&
IsCIImm(imm) && (imm != 0)) {
337 c_addi(rd, rs1, imm);
340 if ((rd ==
SP) && (rs1 ==
SP) &&
IsCI16Imm(imm) && (imm != 0)) {
341 c_addi16sp(rd, rs1, imm);
344 if (IsCRdp(rd) && (rs1 ==
SP) &&
IsCI4SPNImm(imm) && (imm != 0)) {
345 c_addi4spn(rd, rs1, imm);
349 if ((rd ==
ZR) && (rs1 ==
ZR)) {
353 if ((rd !=
ZR) && (rs1 !=
ZR)) {
379 EmitIType(imm, rs1,
ORI, rd,
OPIMM);
384 if (Supports(
RV_C)) {
385 if ((rd == rs1) && IsCRs1p(rs1) &&
IsCIImm(imm)) {
386 c_andi(rd, rs1, imm);
394 ASSERT((shamt > 0) && (shamt < XLEN));
396 if (Supports(
RV_C)) {
397 if ((rd == rs1) && (shamt != 0) &&
IsCIImm(shamt)) {
398 c_slli(rd, rs1, shamt);
406 ASSERT((shamt > 0) && (shamt < XLEN));
408 if (Supports(
RV_C)) {
409 if ((rd == rs1) && IsCRs1p(rs1) && (shamt != 0) &&
IsCIImm(shamt)) {
410 c_srli(rd, rs1, shamt);
418 ASSERT((shamt > 0) && (shamt < XLEN));
420 if (Supports(
RV_C)) {
421 if ((rd == rs1) && IsCRs1p(rs1) && (shamt != 0) &&
IsCIImm(shamt)) {
422 c_srai(rd, rs1, shamt);
431 if (Supports(
RV_C)) {
446 if (Supports(
RV_C)) {
447 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
452 EmitRType(
SUB, rs2, rs1,
ADD, rd,
OP);
472 if (Supports(
RV_C)) {
473 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
477 if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
487 EmitRType(
F7_0, rs2, rs1,
SR, rd,
OP);
492 EmitRType(
SRA, rs2, rs1,
SR, rd,
OP);
497 if (Supports(
RV_C)) {
498 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
502 if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
507 EmitRType(
F7_0, rs2, rs1,
OR, rd,
OP);
512 if (Supports(
RV_C)) {
513 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
517 if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
526 ASSERT((predecessor & kAll) == predecessor);
527 ASSERT((successor & kAll) == successor);
532void MicroAssembler::fencei() {
537void MicroAssembler::ecall() {
541void MicroAssembler::ebreak() {
543 if (Supports(
RV_C)) {
549void MicroAssembler::SimulatorPrintObject(
Register rs1) {
569void MicroAssembler::csrrwi(
Register rd, uint32_t csr, uint32_t imm) {
574void MicroAssembler::csrrsi(
Register rd, uint32_t csr, uint32_t imm) {
579void MicroAssembler::csrrci(
Register rd, uint32_t csr, uint32_t imm) {
584void MicroAssembler::trap() {
586 if (Supports(
RV_C)) {
601 if (Supports(
RV_C)) {
616 if (Supports(
RV_C)) {
631 if (Supports(
RV_C)) {
636 if ((rd == rs1) && (rd !=
ZR) &&
IsCIImm(imm)) {
637 c_addiw(rd, rs1, imm);
645 ASSERT((shamt > 0) && (shamt < 32));
651 ASSERT((shamt > 0) && (shamt < 32));
657 ASSERT((shamt > 0) && (shamt < XLEN));
664 if (Supports(
RV_C)) {
665 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
666 c_addw(rd, rs1, rs2);
669 if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
670 c_addw(rd, rs2, rs1);
679 if (Supports(
RV_C)) {
680 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
681 c_subw(rd, rs1, rs2);
770void MicroAssembler::lrw(
Register rd, Address
addr, std::memory_order order) {
775void MicroAssembler::scw(
Register rd,
778 std::memory_order order) {
784void MicroAssembler::amoswapw(
Register rd,
787 std::memory_order order) {
793void MicroAssembler::amoaddw(
Register rd,
796 std::memory_order order) {
802void MicroAssembler::amoxorw(
Register rd,
805 std::memory_order order) {
811void MicroAssembler::amoandw(
Register rd,
814 std::memory_order order) {
820void MicroAssembler::amoorw(
Register rd,
823 std::memory_order order) {
829void MicroAssembler::amominw(
Register rd,
832 std::memory_order order) {
838void MicroAssembler::amomaxw(
Register rd,
841 std::memory_order order) {
847void MicroAssembler::amominuw(
Register rd,
850 std::memory_order order) {
856void MicroAssembler::amomaxuw(
Register rd,
859 std::memory_order order) {
866void MicroAssembler::lrd(
Register rd, Address
addr, std::memory_order order) {
872void MicroAssembler::scd(
Register rd,
875 std::memory_order order) {
881void MicroAssembler::amoswapd(
Register rd,
884 std::memory_order order) {
890void MicroAssembler::amoaddd(
Register rd,
893 std::memory_order order) {
899void MicroAssembler::amoxord(
Register rd,
902 std::memory_order order) {
908void MicroAssembler::amoandd(
Register rd,
911 std::memory_order order) {
917void MicroAssembler::amoord(
Register rd,
920 std::memory_order order) {
926void MicroAssembler::amomind(
Register rd,
929 std::memory_order order) {
935void MicroAssembler::amomaxd(
Register rd,
938 std::memory_order order) {
944void MicroAssembler::amominud(
Register rd,
947 std::memory_order order) {
953void MicroAssembler::amomaxud(
Register rd,
956 std::memory_order order) {
966 if (Supports(
RV_C)) {
983 if (Supports(
RV_C)) {
1003 EmitR4Type(rs3,
F2_S, rs2, rs1, rounding, rd,
FMADD);
1006void MicroAssembler::fmsubs(
FRegister rd,
1012 EmitR4Type(rs3,
F2_S, rs2, rs1, rounding, rd,
FMSUB);
1015void MicroAssembler::fnmsubs(
FRegister rd,
1021 EmitR4Type(rs3,
F2_S, rs2, rs1, rounding, rd,
FNMSUB);
1024void MicroAssembler::fnmadds(
FRegister rd,
1030 EmitR4Type(rs3,
F2_S, rs2, rs1, rounding, rd,
FNMADD);
1038 EmitRType(
FADDS, rs2, rs1, rounding, rd,
OPFP);
1046 EmitRType(
FSUBS, rs2, rs1, rounding, rd,
OPFP);
1054 EmitRType(
FMULS, rs2, rs1, rounding, rd,
OPFP);
1062 EmitRType(
FDIVS, rs2, rs1, rounding, rd,
OPFP);
1065void MicroAssembler::fsqrts(
FRegister rd,
1122void MicroAssembler::fcvtwus(
Register rd,
1134void MicroAssembler::fcvtswu(
FRegister rd,
1157void MicroAssembler::fcvtlus(
Register rd,
1169void MicroAssembler::fcvtslu(
FRegister rd,
1179 if (Supports(
RV_C)) {
1194 if (Supports(
RV_C)) {
1207void MicroAssembler::fmaddd(
FRegister rd,
1213 EmitR4Type(rs3,
F2_D, rs2, rs1, rounding, rd,
FMADD);
1216void MicroAssembler::fmsubd(
FRegister rd,
1222 EmitR4Type(rs3,
F2_D, rs2, rs1, rounding, rd,
FMSUB);
1225void MicroAssembler::fnmsubd(
FRegister rd,
1231 EmitR4Type(rs3,
F2_D, rs2, rs1, rounding, rd,
FNMSUB);
1234void MicroAssembler::fnmaddd(
FRegister rd,
1240 EmitR4Type(rs3,
F2_D, rs2, rs1, rounding, rd,
FNMADD);
1248 EmitRType(
FADDD, rs2, rs1, rounding, rd,
OPFP);
1256 EmitRType(
FSUBD, rs2, rs1, rounding, rd,
OPFP);
1264 EmitRType(
FMULD, rs2, rs1, rounding, rd,
OPFP);
1272 EmitRType(
FDIVD, rs2, rs1, rounding, rd,
OPFP);
1275void MicroAssembler::fsqrtd(
FRegister rd,
1307void MicroAssembler::fcvtsd(
FRegister rd,
1314void MicroAssembler::fcvtds(
FRegister rd,
1346void MicroAssembler::fcvtwud(
Register rd,
1358void MicroAssembler::fcvtdwu(
FRegister rd,
1371void MicroAssembler::fcvtlud(
Register rd,
1388void MicroAssembler::fcvtdlu(
FRegister rd,
1444 ASSERT((shamt > 0) && (shamt < 32));
1452 EmitRType(
SUB, rs2, rs1,
AND, rd,
OP);
1457 EmitRType(
SUB, rs2, rs1,
OR, rd,
OP);
1462 EmitRType(
SUB, rs2, rs1,
XOR, rd,
OP);
1528 EmitRType((
Funct7)0b0000100, 0b00000, rs1,
ZEXT, rd,
OP);
1637void MicroAssembler::lb(
Register rd, Address
addr, std::memory_order order) {
1639 ASSERT((order == std::memory_order_acquire) ||
1640 (order == std::memory_order_acq_rel));
1645void MicroAssembler::lh(
Register rd, Address
addr, std::memory_order order) {
1647 ASSERT((order == std::memory_order_acquire) ||
1648 (order == std::memory_order_acq_rel));
1653void MicroAssembler::lw(
Register rd, Address
addr, std::memory_order order) {
1655 ASSERT((order == std::memory_order_acquire) ||
1656 (order == std::memory_order_acq_rel));
1661void MicroAssembler::sb(
Register rs2, Address
addr, std::memory_order order) {
1663 ASSERT((order == std::memory_order_release) ||
1664 (order == std::memory_order_acq_rel));
1671 ASSERT((order == std::memory_order_release) ||
1672 (order == std::memory_order_acq_rel));
1677void MicroAssembler::sw(
Register rs2, Address
addr, std::memory_order order) {
1679 ASSERT((order == std::memory_order_release) ||
1680 (order == std::memory_order_acq_rel));
1686void MicroAssembler::ld(
Register rd, Address
addr, std::memory_order order) {
1688 ASSERT((order == std::memory_order_acquire) ||
1689 (order == std::memory_order_acq_rel));
1694void MicroAssembler::sd(
Register rs2, Address
addr, std::memory_order order) {
1696 ASSERT((order == std::memory_order_release) ||
1697 (order == std::memory_order_acq_rel));
1703void MicroAssembler::c_lwsp(
Register rd, Address
addr) {
1718void MicroAssembler::c_ldsp(
Register rd, Address
addr) {
1733void MicroAssembler::c_swsp(
Register rs2, Address
addr) {
1747void MicroAssembler::c_sdsp(
Register rs2, Address
addr) {
1762 Emit16(
C_LW | EncodeCRdp(rd) | EncodeCRs1p(
addr.base()) |
1768 Emit16(
C_LD | EncodeCRdp(rd) | EncodeCRs1p(
addr.base()) |
1775 Emit16(
C_FLW | EncodeCFRdp(rd) | EncodeCRs1p(
addr.base()) |
1782 Emit16(
C_FLD | EncodeCFRdp(rd) | EncodeCRs1p(
addr.base()) |
1788 Emit16(
C_SW | EncodeCRs1p(
addr.base()) | EncodeCRs2p(rs2) |
1794 Emit16(
C_SD | EncodeCRs1p(
addr.base()) | EncodeCRs2p(rs2) |
1801 Emit16(
C_FSW | EncodeCRs1p(
addr.base()) | EncodeCFRs2p(rs2) |
1808 Emit16(
C_FSD | EncodeCRs1p(
addr.base()) | EncodeCFRs2p(rs2) |
1812void MicroAssembler::c_j(Label* label) {
1814 EmitCJump(label,
C_J);
1818void MicroAssembler::c_jal(Label* label) {
1820 EmitCJump(label,
C_JAL);
1824void MicroAssembler::c_jr(
Register rs1) {
1827 Emit16(
C_JR | EncodeCRs1(rs1) | EncodeCRs2(
ZR));
1830void MicroAssembler::c_jalr(
Register rs1) {
1832 Emit16(
C_JALR | EncodeCRs1(rs1) | EncodeCRs2(
ZR));
1835void MicroAssembler::c_beqz(
Register rs1p, Label* label) {
1837 EmitCBranch(rs1p, label,
C_BEQZ);
1840void MicroAssembler::c_bnez(
Register rs1p, Label* label) {
1842 EmitCBranch(rs1p, label,
C_BNEZ);
1845void MicroAssembler::c_li(
Register rd, intptr_t imm) {
1851void MicroAssembler::c_lui(
Register rd, uintptr_t imm) {
1916 Emit16(
C_MV | EncodeCRd(rd) | EncodeCRs2(rs2));
1924 Emit16(
C_ADD | EncodeCRd(rd) | EncodeCRs2(rs2));
1930 Emit16(
C_AND | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1935 Emit16(
C_OR | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1940 Emit16(
C_XOR | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1945 Emit16(
C_SUB | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1951 Emit16(
C_ADDW | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1956 Emit16(
C_SUBW | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1960void MicroAssembler::c_nop() {
1965void MicroAssembler::c_ebreak() {
1989void MicroAssembler::EmitBranch(
Register rs1,
1995 if (label->IsBound()) {
1997 offset = label->Position() - Position();
2004 intptr_t
start = Position();
2005 const intptr_t kFarBranchLength = 8;
2006 EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func),
BRANCH);
2007 offset = label->Position() - Position();
2009 intptr_t
end = Position();
2014 intptr_t
start = Position();
2015 const intptr_t kFarBranchLength = 12;
2016 EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func),
BRANCH);
2017 offset = label->Position() - Position();
2021 FATAL(
"Branch distance exceeds 2GB!");
2025 intptr_t
end = Position();
2032 offset = label->link_b(Position());
2034 FATAL(
"Incorrect Assembler::kNearJump");
2037 }
else if (far_branch_level() == 0) {
2038 offset = label->link_b(Position());
2047 }
else if (far_branch_level() == 1) {
2048 intptr_t
start = Position();
2049 const intptr_t kFarBranchLength = 8;
2050 EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func),
BRANCH);
2051 offset = label->link_j(Position());
2053 intptr_t
end = Position();
2056 intptr_t
start = Position();
2057 const intptr_t kFarBranchLength = 12;
2058 EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func),
BRANCH);
2059 offset = label->link_far(Position());
2063 FATAL(
"Branch distance exceeds 2GB!");
2067 intptr_t
end = Position();
2073void MicroAssembler::EmitJump(
Register rd,
2078 if (label->IsBound()) {
2080 offset = label->Position() - Position();
2089 FATAL(
"Jump distance exceeds 2GB!");
2098 offset = label->link_j(Position());
2100 FATAL(
"Incorrect Assembler::kNearJump");
2103 }
else if (far_branch_level() < 2) {
2104 offset = label->link_j(Position());
2110 offset = label->link_far(Position());
2114 FATAL(
"Jump distance exceeds 2GB!");
2122void MicroAssembler::EmitCBranch(
Register rs1p, Label* label,
COpcode op) {
2124 if (label->IsBound()) {
2125 offset = label->Position() - Position();
2127 offset = label->link_cb(Position());
2130 FATAL(
"Incorrect Assembler::kNearJump");
2135void MicroAssembler::EmitCJump(Label* label,
COpcode op) {
2137 if (label->IsBound()) {
2138 offset = label->Position() - Position();
2140 offset = label->link_cj(Position());
2143 FATAL(
"Incorrect Assembler::kNearJump");
2148void MicroAssembler::EmitRType(
Funct5 funct5,
2149 std::memory_order order,
2155 intptr_t funct7 = funct5 << 2;
2157 case std::memory_order_acq_rel:
2160 case std::memory_order_acquire:
2163 case std::memory_order_release:
2166 case std::memory_order_relaxed:
2170 FATAL(
"Invalid memory order");
2172 EmitRType((
Funct7)funct7, rs2, rs1, funct3, rd, opcode);
2175void MicroAssembler::EmitRType(
Funct7 funct7,
2182 e |= EncodeFunct7(funct7);
2183 e |= EncodeRs2(rs2);
2184 e |= EncodeRs1(rs1);
2185 e |= EncodeFunct3(funct3);
2187 e |= EncodeOpcode(opcode);
2191void MicroAssembler::EmitRType(
Funct7 funct7,
2198 e |= EncodeFunct7(funct7);
2199 e |= EncodeFRs2(rs2);
2200 e |= EncodeFRs1(rs1);
2201 e |= EncodeFunct3(funct3);
2203 e |= EncodeOpcode(opcode);
2207void MicroAssembler::EmitRType(
Funct7 funct7,
2214 e |= EncodeFunct7(funct7);
2215 e |= EncodeFRs2(rs2);
2216 e |= EncodeFRs1(rs1);
2217 e |= EncodeRoundingMode(
round);
2219 e |= EncodeOpcode(opcode);
2223void MicroAssembler::EmitRType(
Funct7 funct7,
2230 e |= EncodeFunct7(funct7);
2231 e |= EncodeFRs2(rs2);
2232 e |= EncodeRs1(rs1);
2233 e |= EncodeRoundingMode(
round);
2235 e |= EncodeOpcode(opcode);
2239void MicroAssembler::EmitRType(
Funct7 funct7,
2246 e |= EncodeFunct7(funct7);
2247 e |= EncodeFRs2(rs2);
2248 e |= EncodeRs1(rs1);
2249 e |= EncodeFunct3(funct3);
2251 e |= EncodeOpcode(opcode);
2255void MicroAssembler::EmitRType(
Funct7 funct7,
2262 e |= EncodeFunct7(funct7);
2263 e |= EncodeFRs2(rs2);
2264 e |= EncodeFRs1(rs1);
2265 e |= EncodeFunct3(funct3);
2267 e |= EncodeOpcode(opcode);
2271void MicroAssembler::EmitRType(
Funct7 funct7,
2278 e |= EncodeFunct7(funct7);
2279 e |= EncodeFRs2(rs2);
2280 e |= EncodeFRs1(rs1);
2281 e |= EncodeRoundingMode(
round);
2283 e |= EncodeOpcode(opcode);
2287void MicroAssembler::EmitRType(
Funct7 funct7,
2294 e |= EncodeFunct7(funct7);
2295 e |= EncodeShamt(shamt);
2296 e |= EncodeRs1(rs1);
2297 e |= EncodeFunct3(funct3);
2299 e |= EncodeOpcode(opcode);
2303void MicroAssembler::EmitR4Type(
FRegister rs3,
2311 e |= EncodeFRs3(rs3);
2312 e |= EncodeFunct2(funct2);
2313 e |= EncodeFRs2(rs2);
2314 e |= EncodeFRs1(rs1);
2315 e |= EncodeRoundingMode(
round);
2317 e |= EncodeOpcode(opcode);
2321void MicroAssembler::EmitIType(intptr_t imm,
2328 e |= EncodeRs1(rs1);
2329 e |= EncodeFunct3(funct3);
2331 e |= EncodeOpcode(opcode);
2335void MicroAssembler::EmitIType(intptr_t imm,
2342 e |= EncodeRs1(rs1);
2343 e |= EncodeFunct3(funct3);
2345 e |= EncodeOpcode(opcode);
2349void MicroAssembler::EmitSType(intptr_t imm,
2356 e |= EncodeRs2(rs2);
2357 e |= EncodeRs1(rs1);
2358 e |= EncodeFunct3(funct3);
2359 e |= EncodeOpcode(opcode);
2363void MicroAssembler::EmitSType(intptr_t imm,
2370 e |= EncodeFRs2(rs2);
2371 e |= EncodeRs1(rs1);
2372 e |= EncodeFunct3(funct3);
2373 e |= EncodeOpcode(opcode);
2377void MicroAssembler::EmitBType(intptr_t imm,
2384 e |= EncodeRs2(rs2);
2385 e |= EncodeRs1(rs1);
2386 e |= EncodeFunct3(funct3);
2387 e |= EncodeOpcode(opcode);
2391void MicroAssembler::EmitUType(intptr_t imm,
Register rd,
Opcode opcode) {
2395 e |= EncodeOpcode(opcode);
2399void MicroAssembler::EmitJType(intptr_t imm,
Register rd,
Opcode opcode) {
2403 e |= EncodeOpcode(opcode);
2407Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
2408 intptr_t far_branch_level)
2409 : MicroAssembler(object_pool_builder,
2411 FLAG_use_compressed_instructions ?
RV_GC :
RV_G),
2412 constant_pool_allowed_(
false) {
2413 generate_invoke_write_barrier_wrapper_ = [&](
Register reg) {
2416 Address(
THR, target::Thread::write_barrier_wrappers_thread_offset(reg)));
2419 generate_invoke_array_write_barrier_ = [&]() {
2421 Address(
THR, target::Thread::array_write_barrier_entry_point_offset()));
2425void Assembler::PushRegister(
Register r) {
2428 sx(r, Address(
SP, 0));
2430void Assembler::PopRegister(
Register r) {
2432 lx(r, Address(
SP, 0));
2441 sx(r0, Address(
SP, 0));
2448 lx(r0, Address(
SP, 0));
2452void Assembler::PushRegisters(
const RegisterSet& regs) {
2466 if (regs.ContainsFpuRegister(reg)) {
2473 if (regs.ContainsRegister(reg)) {
2481void Assembler::PopRegisters(
const RegisterSet& regs) {
2493 if (regs.ContainsRegister(reg)) {
2500 if (regs.ContainsFpuRegister(reg)) {
2509void Assembler::PushRegistersInOrder(std::initializer_list<Register> regs) {
2519void Assembler::PushNativeCalleeSavedRegisters() {
2522 (regs.FpuRegisterCount() *
sizeof(double));
2527 if (regs.ContainsFpuRegister(reg)) {
2529 offset +=
sizeof(double);
2534 if (regs.ContainsRegister(reg)) {
2542void Assembler::PopNativeCalleeSavedRegisters() {
2545 (regs.FpuRegisterCount() *
sizeof(double));
2549 if (regs.ContainsFpuRegister(reg)) {
2551 offset +=
sizeof(double);
2556 if (regs.ContainsRegister(reg)) {
2569 if (rd == rn)
return;
2574 return sextw(rd, rn);
2578 if (rd == rn)
return;
2631void Assembler::Jump(
const Address& address) {
2637 LeafRuntimeScope rt(
this, 0,
true);
2639 rt.Call(kTsanLoadAcquireRuntimeEntry, 1);
2642 LeafRuntimeScope rt(
this, 0,
true);
2644 rt.Call(kTsanStoreReleaseRuntimeEntry, 1);
2648 const Address& address,
2653 Address
addr = PrepareAtomicOffset(address.base(), address.offset());
2657 ld(
dst,
addr, std::memory_order_acquire);
2661 lw(
dst,
addr, std::memory_order_acquire);
2664 lh(
dst,
addr, std::memory_order_acquire);
2667 lb(
dst,
addr, std::memory_order_acquire);
2677 if (FLAG_target_thread_sanitizer) {
2678 if (address.offset() == 0) {
2679 TsanLoadAcquire(address.base());
2681 AddImmediate(
TMP2, address.base(), address.offset());
2682 TsanLoadAcquire(
TMP2);
2688 const Address& address,
2691 Address
addr = PrepareAtomicOffset(address.base(), address.offset());
2695 sd(
src,
addr, std::memory_order_release);
2700 sw(
src,
addr, std::memory_order_release);
2704 sh(
src,
addr, std::memory_order_release);
2708 sb(
src,
addr, std::memory_order_release);
2736void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
2737 if (frame_space != 0) {
2738 addi(
SP,
SP, -frame_space);
2740 const intptr_t kAbiStackAlignment = 16;
2741 andi(
SP,
SP, ~(kAbiStackAlignment - 1));
2747void Assembler::EmitEntryFrameVerification() {
2750 ASSERT(!constant_pool_allowed());
2764 deferred_compare_ = kCompareReg;
2765 deferred_left_ = rn;
2769 CompareRegisters(rn, rm);
2773 deferred_compare_ = kTestReg;
2774 deferred_left_ = rn;
2778void Assembler::BranchIf(
Condition condition,
2783 if (deferred_compare_ == kCompareImm || deferred_compare_ == kCompareReg) {
2786 if (deferred_compare_ == kCompareImm) {
2787 if (deferred_imm_ == 0) {
2790 LoadImmediate(
TMP2, deferred_imm_);
2794 right = deferred_reg_;
2796 switch (condition) {
2829 FATAL(
"Use Add/Subtract/MultiplyBranchOverflow instead.");
2833 }
else if (deferred_compare_ == kTestImm || deferred_compare_ == kTestReg) {
2834 if (deferred_compare_ == kTestImm) {
2835 AndImmediate(
TMP2, deferred_left_, deferred_imm_);
2837 and_(
TMP2, deferred_left_, deferred_reg_);
2839 switch (condition) {
2852 deferred_compare_ =
kNone;
2858 if (deferred_compare_ == kCompareImm) {
2859 if (deferred_imm_ == 0) {
2860 deferred_compare_ = kCompareReg;
2862 SetIf(condition, rd);
2866 LoadImmediate(
TMP2, deferred_imm_);
2867 deferred_compare_ = kCompareReg;
2868 deferred_reg_ =
TMP2;
2869 SetIf(condition, rd);
2873 intx_t
right = deferred_imm_;
2874 switch (condition) {
2914 }
else if (deferred_compare_ == kCompareReg) {
2917 switch (condition) {
2965 }
else if (deferred_compare_ == kTestImm) {
2966 uintx_t uimm = deferred_imm_;
2967 if (deferred_imm_ == 1) {
2968 switch (condition) {
2970 andi(rd, deferred_left_, 1);
2974 andi(rd, deferred_left_, 1);
2979 }
else if (Supports(
RV_Zbs) && Utils::IsPowerOfTwo(uimm)) {
2980 switch (condition) {
2982 bexti(rd, deferred_left_, Utils::ShiftForPowerOfTwo(uimm));
2986 bexti(rd, deferred_left_, Utils::ShiftForPowerOfTwo(uimm));
2992 AndImmediate(rd, deferred_left_, deferred_imm_);
2993 switch (condition) {
3004 }
else if (deferred_compare_ == kTestReg) {
3005 and_(rd, deferred_left_, deferred_reg_);
3006 switch (condition) {
3020 deferred_compare_ =
kNone;
3023void Assembler::BranchIfZero(
Register rn, Label* label, JumpDistance
distance) {
3027void Assembler::BranchIfBit(
Register rn,
3028 intptr_t bit_number,
3033 andi(
TMP2, rn, 1 << bit_number);
3034 if (condition ==
ZERO) {
3036 }
else if (condition ==
NOT_ZERO) {
3043void Assembler::BranchIfNotSmi(
Register reg,
3050void Assembler::BranchIfSmi(
Register reg, Label* label, JumpDistance
distance) {
3056void Assembler::ArithmeticShiftRightImmediate(
Register reg, intptr_t shift) {
3057 srai(reg, reg, shift);
3060void Assembler::CompareWords(
Register reg1,
3068 BranchIfZero(
count,
equals, Assembler::kNearJump);
3069 AddImmediate(
count, -1);
3070 lx(temp, FieldAddress(reg1,
offset));
3074 beq(temp,
TMP, &loop, Assembler::kNearJump);
3077void Assembler::JumpAndLink(intptr_t target_code_pool_index,
3085 LoadWordFromPoolIndex(code_reg, target_code_pool_index);
3086 Call(FieldAddress(code_reg, target::Code::entry_point_offset(entry_kind)));
3089void Assembler::JumpAndLink(
3091 ObjectPoolBuilderEntry::Patchability patchable,
3093 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
3094 const intptr_t index = object_pool_builder().FindObject(
3096 JumpAndLink(index, entry_kind);
3099void Assembler::JumpAndLinkWithEquivalence(
const Code&
target,
3100 const Object& equivalence,
3102 const intptr_t index =
3104 JumpAndLink(index, entry_kind);
3107void Assembler::Call(Address
target) {
3122 }
else if (Supports(
RV_Zba) && (shift == 1)) {
3124 }
else if (Supports(
RV_Zba) && (shift == 2)) {
3126 }
else if (Supports(
RV_Zba) && (shift == 3)) {
3128 }
else if (shift < 0) {
3130 srai(
dest, index, -shift);
3133 srai(
TMP2, index, -shift);
3138 slli(
dest, index, shift);
3141 slli(
TMP2, index, shift);
3147void Assembler::AddImmediate(
Register rd,
3151 if ((imm == 0) && (rd == rs1)) {
3158 LoadImmediate(
TMP2, imm);
3163void Assembler::MulImmediate(
Register rd,
3167 if (Utils::IsPowerOfTwo(imm)) {
3168 const intx_t shift = Utils::ShiftForPowerOfTwo(imm);
3172 slliw(rd, rs1, shift);
3174 slli(rd, rs1, shift);
3178 slli(rd, rs1, shift);
3181 LoadImmediate(
TMP, imm);
3196void Assembler::AndImmediate(
Register rd,
3202 MoveRegister(rd, rs1);
3205 }
else if (Supports(
RV_Zbs) && Utils::IsPowerOfTwo(~uimm)) {
3206 bclri(rd, rs1, Utils::ShiftForPowerOfTwo(~uimm));
3207 }
else if (Utils::IsPowerOfTwo(uimm + 1)) {
3208 intptr_t shift = Utils::ShiftForPowerOfTwo(uimm + 1);
3209 if (Supports(
RV_Zbb) && (shift == 16)) {
3212 slli(rd, rs1, XLEN - shift);
3213 srli(rd, rd, XLEN - shift);
3217 LoadImmediate(
TMP2, imm);
3218 and_(rd, rs1,
TMP2);
3221void Assembler::OrImmediate(
Register rd,
3227 MoveRegister(rd, rs1);
3230 }
else if (Supports(
RV_Zbs) && Utils::IsPowerOfTwo(uimm)) {
3231 bseti(rd, rs1, Utils::ShiftForPowerOfTwo(uimm));
3234 LoadImmediate(
TMP2, imm);
3238void Assembler::XorImmediate(
Register rd,
3244 MoveRegister(rd, rs1);
3247 }
else if (Supports(
RV_Zbs) && Utils::IsPowerOfTwo(uimm)) {
3248 binvi(rd, rs1, Utils::ShiftForPowerOfTwo(uimm));
3251 LoadImmediate(
TMP2, imm);
3252 xor_(rd, rs1,
TMP2);
3258 deferred_compare_ = kTestImm;
3259 deferred_left_ = rn;
3260 deferred_imm_ = imm;
3264 deferred_compare_ = kCompareImm;
3265 deferred_left_ = rn;
3266 deferred_imm_ = imm;
3279 return Address(
TMP2, lo);
3285 return Address(
base, 0);
3288 return Address(
TMP2, 0);
3292 Address
addr = PrepareLargeOffset(address.base(), address.offset());
3322 int32_t payload_offset,
3338void Assembler::LoadFromStack(
Register dst, intptr_t depth) {
3341void Assembler::StoreToStack(
Register src, intptr_t depth) {
3344void Assembler::CompareToStack(
Register src, intptr_t depth) {
3349 Address
addr = PrepareLargeOffset(address.base(), address.offset());
3377void Assembler::StoreBarrier(
Register object,
3379 CanBeSmi can_value_be_smi,
3383 ASSERT(
object != scratch);
3402 if (can_value_be_smi == kValueCanBeSmi) {
3407 BranchIfNotSmi(
value, &passed_check, kNearJump);
3409 Bind(&passed_check);
3412 lbu(scratch, FieldAddress(
object, target::Object::tags_offset()));
3413 lbu(
TMP2, FieldAddress(
value, target::Object::tags_offset()));
3414 srli(scratch, scratch, target::UntaggedObject::kBarrierOverlapShift);
3415 and_(scratch, scratch,
TMP2);
3429 mv(objectForCall,
object);
3435 generate_invoke_write_barrier_wrapper_(objectForCall);
3447void Assembler::ArrayStoreBarrier(
Register object,
3450 CanBeSmi can_value_be_smi,
3453 const bool spill_lr =
true;
3456 ASSERT(
object != scratch);
3479 if (can_value_be_smi == kValueCanBeSmi) {
3484 BranchIfNotSmi(
value, &passed_check, kNearJump);
3486 Bind(&passed_check);
3489 lbu(scratch, FieldAddress(
object, target::Object::tags_offset()));
3490 lbu(
TMP2, FieldAddress(
value, target::Object::tags_offset()));
3491 srli(scratch, scratch, target::UntaggedObject::kBarrierOverlapShift);
3492 and_(scratch, scratch,
TMP2);
3504 generate_invoke_array_write_barrier_();
3511void Assembler::VerifyStoreNeedsNoWriteBarrier(
Register object,
3520 lbu(
TMP2, FieldAddress(
value, target::Object::tags_offset()));
3521 andi(
TMP2,
TMP2, 1 << target::UntaggedObject::kNewOrEvacuationCandidateBit);
3523 lbu(
TMP2, FieldAddress(
object, target::Object::tags_offset()));
3524 andi(
TMP2,
TMP2, 1 << target::UntaggedObject::kOldAndNotRememberedBit);
3526 Stop(
"Write barrier is required");
3530void Assembler::StoreObjectIntoObjectNoBarrier(
Register object,
3531 const Address&
dest,
3532 const Object&
value,
3533 MemoryOrder memory_order,
3548 if (memory_order == kRelease) {
3555void Assembler::StoreInternalPointer(
Register object,
3556 const Address&
dest,
3562void Assembler::LoadPoolPointer(
Register pp) {
3564 lx(pp, FieldAddress(
CODE_REG, target::Code::object_pool_offset()));
3573 set_constant_pool_allowed(pp ==
PP);
3576bool Assembler::CanLoadFromObjectPool(
const Object&
object)
const {
3578 if (!constant_pool_allowed()) {
3586void Assembler::LoadNativeEntry(
3588 const ExternalLabel* label,
3589 ObjectPoolBuilderEntry::Patchability patchable) {
3590 const intptr_t index =
3591 object_pool_builder().FindNativeFunction(label, patchable);
3592 LoadWordFromPoolIndex(
dst, index);
3595 lx(
dst, Address(
THR, target::Thread::isolate_offset()));
3598 lx(
dst, Address(
THR, target::Thread::isolate_group_offset()));
3601void Assembler::LoadImmediate(
Register reg, intx_t imm) {
3603 if (!Utils::IsInt(32, imm)) {
3604 int shift = Utils::CountTrailingZeros64(imm);
3606 li(reg, imm >> shift);
3607 slli(reg, reg, shift);
3610 if ((shift >= 12) &&
IsUTypeImm(imm >> (shift - 12))) {
3611 lui(reg, imm >> (shift - 12));
3612 slli(reg, reg, shift - 12);
3616 if (constant_pool_allowed()) {
3617 intptr_t index = object_pool_builder().FindImmediate(imm);
3618 LoadWordFromPoolIndex(reg, index);
3622 intx_t lo =
ImmLo(imm);
3623 intx_t hi = imm - lo;
3624 shift = Utils::CountTrailingZeros64(hi);
3626 LoadImmediate(reg, hi >> shift);
3627 slli(reg, reg, shift);
3635 intx_t lo =
ImmLo(imm);
3636 intx_t hi =
ImmHi(imm);
3645 addiw(reg, reg, lo);
3651void Assembler::LoadSImmediate(
FRegister reg,
float imms) {
3652 int32_t imm = bit_cast<int32_t, float>(imms);
3656 ASSERT(constant_pool_allowed());
3657 intptr_t index = object_pool_builder().FindImmediate(imm);
3658 intptr_t
offset = target::ObjectPool::element_offset(index);
3663void Assembler::LoadDImmediate(
FRegister reg,
double immd) {
3664 int64_t imm = bit_cast<int64_t, double>(immd);
3672 ASSERT(constant_pool_allowed());
3673 intptr_t index = object_pool_builder().FindImmediate64(imm);
3674 intptr_t
offset = target::ObjectPool::element_offset(index);
3679void Assembler::LoadQImmediate(
FRegister reg, simd128_value_t immq) {
3687void Assembler::LoadWordFromPoolIndex(
Register dst,
3690 ASSERT((pp !=
PP) || constant_pool_allowed());
3692 const uint32_t
offset = target::ObjectPool::element_offset(index);
3697 lx(
dst, Address(pp, lo));
3701 lx(
dst, Address(
dst, lo));
3708 ASSERT((pp !=
PP) || constant_pool_allowed());
3710 const uint32_t
offset = target::ObjectPool::element_offset(index);
3715 sx(
src, Address(pp, lo));
3719 sx(
src, Address(
TMP, lo));
3723void Assembler::CompareObject(
Register reg,
const Object&
object) {
3726 CompareObjectRegisters(reg,
NULL_REG);
3730 LoadObject(
TMP,
object);
3731 CompareObjectRegisters(reg,
TMP);
3736 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
3737 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
3739 srliw(
result, tags, target::UntaggedObject::kClassIdTagPos);
3741 srli(
result, tags, target::UntaggedObject::kClassIdTagPos);
3746 ASSERT(target::UntaggedObject::kSizeTagPos == 8);
3747 ASSERT(target::UntaggedObject::kSizeTagSize == 4);
3748 srli(
result, tags, target::UntaggedObject::kSizeTagPos);
3749 andi(
result,
result, (1 << target::UntaggedObject::kSizeTagSize) - 1);
3754 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
3755 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
3757 lwu(
result, FieldAddress(
object, target::Object::tags_offset()));
3759 lw(
result, FieldAddress(
object, target::Object::tags_offset()));
3761 srli(
result,
result, target::UntaggedObject::kClassIdTagPos);
3767 const intptr_t table_offset =
3768 target::IsolateGroup::cached_class_table_table_offset();
3770 LoadIsolateGroup(
result);
3775void Assembler::CompareClassId(
Register object,
3779 LoadClassId(scratch,
object);
3780 CompareImmediate(scratch, class_id);
3789 BranchIfSmi(
object, &
done, kNearJump);
3790 LoadClassId(
result,
object);
3794 LoadClassIdMayBeSmi(
result,
object);
3797void Assembler::EnsureHasClassIdInDEBUG(intptr_t
cid,
3802 Comment(
"Check that object in register has cid %" Pd "",
cid);
3804 LoadClassIdMayBeSmi(scratch,
src);
3805 CompareImmediate(scratch,
cid);
3808 CompareImmediate(scratch,
kNullCid);
3816void Assembler::EnterFrame(intptr_t frame_size) {
3824void Assembler::LeaveFrame() {
3833void Assembler::TransitionGeneratedToNative(
Register destination,
3836 bool enter_safepoint) {
3839 Address(
THR, target::Thread::top_exit_frame_info_offset()));
3841 sx(new_exit_through_ffi,
3842 Address(
THR, target::Thread::exit_through_ffi_offset()));
3843 Register tmp = new_exit_through_ffi;
3846 sx(destination, Address(
THR, target::Thread::vm_tag_offset()));
3847 li(tmp, target::Thread::native_execution_state());
3848 sx(tmp, Address(
THR, target::Thread::execution_state_offset()));
3850 if (enter_safepoint) {
3851 EnterFullSafepoint(tmp);
3856 bool exit_safepoint,
3857 bool ignore_unwind_in_progress,
3859 if (exit_safepoint) {
3860 ExitFullSafepoint(
state, ignore_unwind_in_progress);
3863 ASSERT(!ignore_unwind_in_progress);
3866 ASSERT(target::Thread::full_safepoint_state_acquired() != 0);
3867 li(
state, target::Thread::full_safepoint_state_acquired());
3868 lx(
RA, Address(
THR, target::Thread::safepoint_state_offset()));
3871 beqz(
RA, &
ok, Assembler::kNearJump);
3879 li(
state, target::Thread::vm_tag_dart_id());
3880 sx(
state, Address(
THR, target::Thread::vm_tag_offset()));
3882 li(
state, target::Thread::generated_execution_state());
3883 sx(
state, Address(
THR, target::Thread::execution_state_offset()));
3886 sx(
ZR, Address(
THR, target::Thread::top_exit_frame_info_offset()));
3887 sx(
ZR, Address(
THR, target::Thread::exit_through_ffi_offset()));
3899 Label slow_path,
done, retry;
3900 if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
3901 j(&slow_path, Assembler::kNearJump);
3904 addi(
addr,
THR, target::Thread::safepoint_state_offset());
3907 subi(
state,
state, target::Thread::full_safepoint_state_unacquired());
3908 bnez(
state, &slow_path, Assembler::kNearJump);
3910 li(
state, target::Thread::full_safepoint_state_acquired());
3912 beqz(
state, &
done, Assembler::kNearJump);
3914 if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
3915 j(&retry, Assembler::kNearJump);
3919 lx(
addr, Address(
THR, target::Thread::enter_safepoint_stub_offset()));
3920 lx(
addr, FieldAddress(
addr, target::Code::entry_point_offset()));
3927 bool ignore_unwind_in_progress) {
3935 Label slow_path,
done, retry;
3936 if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
3937 j(&slow_path, Assembler::kNearJump);
3940 addi(
addr,
THR, target::Thread::safepoint_state_offset());
3943 subi(
state,
state, target::Thread::full_safepoint_state_acquired());
3944 bnez(
state, &slow_path, Assembler::kNearJump);
3946 li(
state, target::Thread::full_safepoint_state_unacquired());
3948 beqz(
state, &
done, Assembler::kNearJump);
3950 if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
3951 j(&retry, Assembler::kNearJump);
3955 if (ignore_unwind_in_progress) {
3959 exit_safepoint_ignore_unwind_in_progress_stub_offset()));
3961 lx(
addr, Address(
THR, target::Thread::exit_safepoint_stub_offset()));
3963 lx(
addr, FieldAddress(
addr, target::Code::entry_point_offset()));
3969void Assembler::CheckFpSpDist(intptr_t fp_sp_dist) {
3973 Comment(
"CheckFpSpDist");
3975 CompareImmediate(
TMP, fp_sp_dist);
3976 BranchIf(
EQ, &
ok, compiler::Assembler::kNearJump);
3982void Assembler::CheckCodePointer() {
3984 if (!FLAG_check_code_pointer) {
3987 Comment(
"CheckCodePointer");
3988 Label cid_ok, instructions_ok;
3990 BranchIf(
EQ, &cid_ok, kNearJump);
3994 const intptr_t entry_offset =
3996 intx_t imm = -entry_offset;
3997 intx_t lo =
ImmLo(imm);
3998 intx_t hi =
ImmHi(imm);
4001 lx(
TMP2, FieldAddress(
CODE_REG, target::Code::instructions_offset()));
4002 beq(
TMP,
TMP2, &instructions_ok, kNearJump);
4004 Bind(&instructions_ok);
4008void Assembler::RestoreCodePointer() {
4014void Assembler::RestorePoolPointer() {
4015 if (FLAG_precompiled_mode) {
4016 lx(
PP, Address(
THR, target::Thread::global_object_pool_offset()));
4019 lx(
PP, FieldAddress(
PP, target::Code::object_pool_offset()));
4024void Assembler::RestorePinnedRegisters() {
4026 Address(
THR, target::Thread::write_barrier_mask_offset()));
4027 lx(
NULL_REG, Address(
THR, target::Thread::object_null_offset()));
4053 (target::UntaggedObject::kGenerationalBarrierMask << 1) - 1);
4057 ASSERT(target::UntaggedObject::kGenerationalBarrierMask ==
4058 (target::UntaggedObject::kIncrementalBarrierMask << 1));
4060 ASSERT(target::UntaggedObject::kIncrementalBarrierMask >
4061 target::UntaggedObject::kCanonicalBit);
4062 ASSERT(target::UntaggedObject::kIncrementalBarrierMask >
4063 target::UntaggedObject::kCardRememberedBit);
4066void Assembler::SetupGlobalPoolAndDispatchTable() {
4067 ASSERT(FLAG_precompiled_mode);
4068 lx(
PP, Address(
THR, target::Thread::global_object_pool_offset()));
4071 Address(
THR, target::Thread::dispatch_table_array_offset()));
4074void Assembler::EnterDartFrame(intptr_t frame_size,
Register new_pp) {
4075 ASSERT(!constant_pool_allowed());
4078 EnterDartFrame(0, new_pp);
4079 AddImmediate(
SP,
SP, -frame_size);
4085 if (FLAG_precompiled_mode) {
4104 set_constant_pool_allowed(
true);
4112void Assembler::EnterOsrFrame(intptr_t extra_size,
Register new_pp) {
4113 ASSERT(!constant_pool_allowed());
4114 Comment(
"EnterOsrFrame");
4115 RestoreCodePointer();
4118 if (extra_size > 0) {
4119 AddImmediate(
SP, -extra_size);
4123void Assembler::LeaveDartFrame() {
4126 if (!FLAG_precompiled_mode) {
4131 set_constant_pool_allowed(
false);
4138void Assembler::LeaveDartFrame(intptr_t fp_sp_dist) {
4139 intptr_t pp_offset =
4142 intptr_t fp_offset =
4145 intptr_t ra_offset =
4155 if (!FLAG_precompiled_mode) {
4156 lx(
PP, Address(
SP, pp_offset));
4159 set_constant_pool_allowed(
false);
4160 lx(
FP, Address(
SP, fp_offset));
4161 lx(
RA, Address(
SP, ra_offset));
4162 addi(
SP,
SP, -fp_sp_dist);
4165void Assembler::CallRuntime(
const RuntimeEntry& entry,
4167 ASSERT(!entry.is_leaf());
4170 lx(
T5, compiler::Address(
THR, entry.OffsetFromThread()));
4172 Call(Address(
THR, target::Thread::call_to_runtime_entry_point_offset()));
4178#define __ assembler_->
4180LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
4181 intptr_t frame_size,
4182 bool preserve_registers)
4183 : assembler_(assembler), preserve_registers_(preserve_registers) {
4193 if (preserve_registers) {
4194 __ PushRegisters(kRuntimeCallSavedRegisters);
4206 __ ReserveAlignedFrameSpace(frame_size);
4209void LeafRuntimeScope::Call(
const RuntimeEntry& entry,
4212 __ lx(
TMP2, compiler::Address(
THR, entry.OffsetFromThread()));
4213 __ sx(
TMP2, compiler::Address(
THR, target::Thread::vm_tag_offset()));
4215 __ LoadImmediate(
TMP2, VMTag::kDartTagId);
4216 __ sx(
TMP2, compiler::Address(
THR, target::Thread::vm_tag_offset()));
4219LeafRuntimeScope::~LeafRuntimeScope() {
4220 if (preserve_registers_) {
4221 const intptr_t kSavedRegistersSize =
4226 __ subi(
SP,
FP, kSavedRegistersSize);
4228 __ PopRegisters(kRuntimeCallSavedRegisters);
4241void Assembler::EnterCFrame(intptr_t frame_space) {
4257 const intptr_t kAbiStackAlignment = 16;
4258 andi(
SP,
SP, ~(kAbiStackAlignment - 1));
4261void Assembler::LeaveCFrame() {
4274void Assembler::MonomorphicCheckedEntryJIT() {
4275 has_monomorphic_entry_ =
true;
4276 const intptr_t saved_far_branch_level = far_branch_level();
4277 set_far_branch_level(0);
4278 const intptr_t
start = CodeSize();
4280 Label immediate, miss;
4282 lx(
TMP, Address(
THR, target::Thread::switchable_call_miss_entry_offset()));
4285 Comment(
"MonomorphicCheckedEntry");
4287 target::Instructions::kMonomorphicEntryOffsetJIT);
4290 const intptr_t cid_offset = target::Array::element_offset(0);
4291 const intptr_t count_offset = target::Array::element_offset(1);
4296 lx(
TMP, FieldAddress(entries_reg, cid_offset));
4297 LoadTaggedClassIdMayBeSmi(
A1,
A0);
4298 bne(
TMP,
A1, &miss, kNearJump);
4300 lx(
TMP, FieldAddress(entries_reg, count_offset));
4302 sx(
TMP, FieldAddress(entries_reg, count_offset));
4308 target::Instructions::kPolymorphicEntryOffsetJIT);
4310 set_far_branch_level(saved_far_branch_level);
4316void Assembler::MonomorphicCheckedEntryAOT() {
4317 has_monomorphic_entry_ =
true;
4318 intptr_t saved_far_branch_level = far_branch_level();
4319 set_far_branch_level(0);
4321 const intptr_t
start = CodeSize();
4323 Label immediate, miss;
4325 lx(
TMP, Address(
THR, target::Thread::switchable_call_miss_entry_offset()));
4328 Comment(
"MonomorphicCheckedEntry");
4330 target::Instructions::kMonomorphicEntryOffsetAOT);
4331 LoadClassId(
TMP,
A0);
4333 bne(
S5,
TMP, &miss, kNearJump);
4337 target::Instructions::kPolymorphicEntryOffsetAOT);
4339 set_far_branch_level(saved_far_branch_level);
4342void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
4343 has_monomorphic_entry_ =
true;
4344 while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
4348 while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
4358 slliw(other,
hash, 10);
4361 srliw(other,
hash, 6);
4367 slli(other,
hash, 10);
4370 srli(other,
hash, 6);
4375void Assembler::FinalizeHashForSize(intptr_t bit_size,
4385 slliw(scratch,
hash, 3);
4388 srliw(scratch,
hash, 11);
4391 slliw(scratch,
hash, 15);
4395 slli(scratch,
hash, 3);
4398 srli(scratch,
hash, 11);
4401 slli(scratch,
hash, 15);
4406 AndImmediate(
hash,
hash, Utils::NBitMask(bit_size));
4409 seqz(scratch,
hash);
4418 LoadIsolateGroup(temp_reg);
4419 lx(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
4422 target::ClassTable::allocation_tracing_state_table_offset()));
4423 add(temp_reg, temp_reg,
cid);
4424 LoadFromOffset(temp_reg, temp_reg,
4425 target::ClassTable::AllocationTracingStateSlotOffsetFor(0),
4427 bnez(temp_reg, trace);
4430void Assembler::MaybeTraceAllocation(intptr_t
cid,
4435 LoadIsolateGroup(temp_reg);
4436 lx(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
4439 target::ClassTable::allocation_tracing_state_table_offset()));
4440 LoadFromOffset(temp_reg, temp_reg,
4441 target::ClassTable::AllocationTracingStateSlotOffsetFor(
cid),
4443 bnez(temp_reg, trace);
4447void Assembler::TryAllocateObject(intptr_t
cid,
4448 intptr_t instance_size,
4453 ASSERT(failure !=
nullptr);
4454 ASSERT(instance_size != 0);
4455 ASSERT(instance_reg != temp_reg);
4457 ASSERT(Utils::IsAligned(instance_size,
4459 if (FLAG_inline_alloc &&
4466 lx(instance_reg, Address(
THR, target::Thread::top_offset()));
4467 lx(temp_reg, Address(
THR, target::Thread::end_offset()));
4472 AddImmediate(instance_reg, instance_size);
4475 bleu(temp_reg, instance_reg, failure,
distance);
4476 CheckAllocationCanary(instance_reg, temp_reg);
4480 sx(instance_reg, Address(
THR, target::Thread::top_offset()));
4485 LoadImmediate(temp_reg, tags);
4486 Store(temp_reg, FieldAddress(instance_reg, target::Object::tags_offset()));
4492void Assembler::TryAllocateArray(intptr_t
cid,
4493 intptr_t instance_size,
4499 if (FLAG_inline_alloc &&
4506 lx(
instance, Address(
THR, target::Thread::top_offset()));
4507 AddImmediate(end_address,
instance, instance_size);
4508 bltu(end_address,
instance, failure);
4513 lx(temp2, Address(
THR, target::Thread::end_offset()));
4514 bgeu(end_address, temp2, failure);
4515 CheckAllocationCanary(
instance, temp2);
4519 sx(end_address, Address(
THR, target::Thread::top_offset()));
4526 LoadImmediate(temp2, tags);
4527 sx(temp2, FieldAddress(
instance, target::Object::tags_offset()));
4540 lx(temp, Address(
src));
4542 sx(temp, Address(
dst));
4545 bnez(
size, &loop, kNearJump);
4549void Assembler::GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target) {
4551 intx_t lo =
ImmLo(offset_into_target);
4552 intx_t hi =
ImmHi(offset_into_target);
4554 jalr_fixed(
RA,
RA, lo);
4557void Assembler::GenerateUnRelocatedPcRelativeTailCall(
4558 intptr_t offset_into_target) {
4560 intx_t lo =
ImmLo(offset_into_target);
4561 intx_t hi =
ImmHi(offset_into_target);
4563 jalr_fixed(
ZR,
TMP, lo);
4566bool Assembler::AddressCanHoldConstantIndex(
const Object& constant,
4569 intptr_t index_scale) {
4570 if (!IsSafeSmi(constant))
return false;
4572 const int64_t
offset = index * index_scale + HeapDataOffset(is_external,
cid);
4580Address Assembler::ElementAddressForIntIndex(
bool is_external,
4582 intptr_t index_scale,
4584 intptr_t index)
const {
4585 const int64_t
offset = index * index_scale + HeapDataOffset(is_external,
cid);
4587 return Address(array,
static_cast<int32_t
>(
offset));
4589void Assembler::ComputeElementAddressForIntIndex(
Register address,
4592 intptr_t index_scale,
4595 const int64_t
offset = index * index_scale + HeapDataOffset(is_external,
cid);
4596 AddImmediate(address, array,
offset);
4599Address Assembler::ElementAddressForRegIndex(
bool is_external,
4601 intptr_t index_scale,
4607 const intptr_t boxing_shift = index_unboxed ? 0 : -
kSmiTagShift;
4608 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
4609 const int32_t
offset = HeapDataOffset(is_external,
cid);
4612 AddShifted(temp, array, index, shift);
4613 return Address(temp,
offset);
4616void Assembler::ComputeElementAddressForRegIndex(
Register address,
4619 intptr_t index_scale,
4624 const intptr_t boxing_shift = index_unboxed ? 0 : -
kSmiTagShift;
4625 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
4626 const int32_t
offset = HeapDataOffset(is_external,
cid);
4627 ASSERT(array != address);
4628 ASSERT(index != address);
4629 AddShifted(address, array, index, shift);
4631 AddImmediate(address, address,
offset);
4635void Assembler::LoadStaticFieldAddress(
Register address,
4639 LoadCompressedSmiFieldFromOffset(
4640 scratch, field, target::Field::host_offset_or_field_id_offset());
4641 const intptr_t field_table_offset =
4642 is_shared ? compiler::target::Thread::shared_field_table_values_offset()
4644 LoadMemoryValue(address,
THR,
static_cast<int32_t
>(field_table_offset));
4646 add(address, address, scratch);
4649void Assembler::LoadFieldAddressForRegOffset(
Register address,
4652 AddShifted(address,
instance, offset_in_words_as_smi,
4658void Assembler::LoadObjectHelper(
4660 const Object&
object,
4662 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
4690 const intptr_t index =
4692 ? object_pool_builder().AddObject(
4693 object, ObjectPoolBuilderEntry::kPatchable, snapshot_behavior)
4694 : object_pool_builder().FindObject(
4695 object, ObjectPoolBuilderEntry::kNotPatchable,
4697 LoadWordFromPoolIndex(
dst, index);
4700void Assembler::AddImmediateBranchOverflow(
Register rd,
4707 AddImmediate(rd, rs1, imm);
4709 blt(rd,
TMP2, overflow);
4710 }
else if (imm < 0) {
4711 bgt(rd,
TMP2, overflow);
4714 AddImmediate(rd, rs1, imm);
4716 blt(rd, rs1, overflow);
4717 }
else if (imm < 0) {
4718 bgt(rd, rs1, overflow);
4722void Assembler::SubtractImmediateBranchOverflow(
Register rd,
4727 AddImmediateBranchOverflow(rd, rs1, -imm, overflow);
4729void Assembler::MultiplyImmediateBranchOverflow(
Register rd,
4738 LoadImmediate(
TMP2, imm);
4743 srai(
TMP2, rd, XLEN - 1);
4746void Assembler::AddBranchOverflow(
Register rd,
4757 if ((rd == rs1) && (rd == rs2)) {
4762 bltz(
TMP, overflow);
4763 }
else if (rs1 == rs2) {
4768 bltz(
TMP, overflow);
4769 }
else if (rd == rs1) {
4775 }
else if (rd == rs2) {
4789void Assembler::SubtractBranchOverflow(
Register rd,
4800 if ((rd == rs1) && (rd == rs2)) {
4805 bltz(
TMP, overflow);
4806 }
else if (rs1 == rs2) {
4811 bltz(
TMP, overflow);
4812 }
else if (rd == rs1) {
4818 }
else if (rd == rs2) {
4832void Assembler::MultiplyBranchOverflow(
Register rd,
4845 mulh(
TMP, rs1, rs2);
4847 srai(
TMP2, rd, XLEN - 1);
4865 Label l0, l1, l2, l3, l4, l5;
4869 beqz(
TMP, &l0, Assembler::kNearJump);
4875 beqz(
TMP, &l1, Assembler::kNearJump);
4880 beqz(
TMP, &l2, Assembler::kNearJump);
4885 beqz(
TMP, &l3, Assembler::kNearJump);
4890 beqz(
TMP, &l4, Assembler::kNearJump);
4896 beqz(
TMP, &l5, Assembler::kNearJump);
4905 RangeCheckCondition condition,
4907 auto cc = condition == kIfInRange ?
LS :
HI;
4909 AddImmediate(to_check,
value, -low);
4910 CompareImmediate(to_check, high - low);
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
static void round(SkPoint *p)
static bool ok(int result)
static uint32_t hash(const SkShaderBase::GradientInfo &v)
bool equals(SkDrawable *a, SkDrawable *b)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
#define RA(width, name,...)
#define DEBUG_ASSERT(cond)
#define ASSERT_EQUAL(expected, actual)
#define RELEASE_ASSERT(cond)
#define COMPILE_ASSERT(expr)
MicroAssembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level, ExtensionSet extensions)
static float max(float r, float g, float b)
static float min(float r, float g, float b)
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
bool CanLoadFromThread(const dart::Object &object, intptr_t *offset)
word ToRawSmi(const dart::Object &a)
word SmiValue(const dart::Object &a)
void BailoutWithBranchOffsetError()
bool IsOriginalObject(const Object &object)
InvalidClass kObjectAlignment
InvalidClass kObjectAlignmentLog2
const Bool & TrueObject()
bool IsInOldSpace(const Object &obj)
bool IsSameObject(const Object &a, const Object &b)
const Bool & FalseObject()
const Object & NullObject()
constexpr OperandSize kWordBytes
const Object & ToObject(const Code &handle)
static constexpr int HeaderSize
uint32_t EncodeBTypeImm(intptr_t imm)
uint32_t EncodeCBImm(intptr_t imm)
const Register kWriteBarrierSlotReg
constexpr bool IsAbiPreservedRegister(Register reg)
bool IsCI16Imm(intptr_t imm)
static constexpr Extension RV_F(3)
uint32_t EncodeUTypeImm(intptr_t imm)
constexpr Register FAR_TMP
static constexpr intptr_t kFalseOffsetFromNull
const Register kWriteBarrierObjectReg
static constexpr Extension RV_Zbc(9)
static constexpr ExtensionSet RV_GC
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
uint32_t EncodeCSPStore4Imm(intptr_t imm)
static constexpr intptr_t kTrueOffsetFromNull
bool IsCSPLoad4Imm(intptr_t imm)
bool IsCJImm(intptr_t imm)
uint32_t EncodeCJImm(intptr_t imm)
bool IsCIImm(intptr_t imm)
uint32_t EncodeCSPLoad8Imm(intptr_t imm)
constexpr intptr_t kWordSizeLog2
constexpr intptr_t kBitsPerInt16
const Register ARGS_DESC_REG
static constexpr Extension RV_Zalasr(10)
const Register DISPATCH_TABLE_REG
const int kNumberOfFpuRegisters
const RegList kAbiPreservedCpuRegs
bool IsAllocatableInNewSpace(intptr_t size)
bool IsCSPStore8Imm(intptr_t imm)
uint32_t EncodeCMem8Imm(intptr_t imm)
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const RegList kDartVolatileCpuRegs
constexpr intptr_t kBitsPerInt32
uint32_t EncodeCUImm(intptr_t imm)
uint32_t EncodeCI4SPNImm(intptr_t imm)
bool IsSTypeImm(intptr_t imm)
bool IsJTypeImm(intptr_t imm)
static constexpr Extension RV_A(2)
uint32_t EncodeCSPLoad4Imm(intptr_t imm)
uint32_t EncodeCIImm(intptr_t imm)
bool IsITypeImm(intptr_t imm)
bool IsCSPStore4Imm(intptr_t imm)
bool IsCUImm(intptr_t imm)
const Register IC_DATA_REG
bool IsUTypeImm(intptr_t imm)
bool IsCMem4Imm(intptr_t imm)
bool IsBTypeImm(intptr_t imm)
constexpr Register WRITE_BARRIER_STATE
static constexpr Extension RV_Zba(6)
constexpr intptr_t kWordSize
static constexpr Extension RV_M(1)
static constexpr Extension RV_C(5)
uint32_t EncodeITypeImm(intptr_t imm)
uint32_t EncodeSTypeImm(intptr_t imm)
constexpr RegList kAbiPreservedFpuRegs
uint32_t EncodeCMem4Imm(intptr_t imm)
uint32_t EncodeCSPStore8Imm(intptr_t imm)
bool IsCSPLoad8Imm(intptr_t imm)
bool IsCMem8Imm(intptr_t imm)
uint32_t EncodeJTypeImm(intptr_t imm)
static constexpr Extension RV_Zbs(8)
bool IsCBImm(intptr_t imm)
const RegList kAbiVolatileFpuRegs
bool IsCI4SPNImm(intptr_t imm)
uint32_t EncodeCI16Imm(intptr_t imm)
static constexpr ExtensionSet RV_G
static constexpr Extension RV_D(4)
constexpr intptr_t kBitsPerInt8
static constexpr Extension RV_I(0)
const int kFpuRegisterSize
static constexpr Extension RV_Zbb(7)
DECLARE_FLAG(bool, show_invisible_frames)
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
intptr_t saved_caller_pc_from_fp
intptr_t saved_caller_fp_from_fp
intptr_t saved_caller_pp_from_fp
#define NOT_IN_PRODUCT(code)