Flutter Engine
The Flutter Engine
assembler_riscv.cc
Go to the documentation of this file.
1// Copyright (c) 2017, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // NOLINT
6#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
12#include "vm/cpu.h"
13#include "vm/instructions.h"
14#include "vm/simulator.h"
15#include "vm/tags.h"
16
17namespace dart {
18
19DECLARE_FLAG(bool, check_code_pointer);
20DECLARE_FLAG(bool, precompiled_mode);
21
22DEFINE_FLAG(int, far_branch_level, 0, "Always use far branches");
23
24namespace compiler {
25
26MicroAssembler::MicroAssembler(ObjectPoolBuilder* object_pool_builder,
27 intptr_t far_branch_level,
28 ExtensionSet extensions)
29 : AssemblerBase(object_pool_builder),
30 extensions_(extensions),
31 far_branch_level_(far_branch_level) {
32 ASSERT(far_branch_level >= 0);
33 ASSERT(far_branch_level <= 2);
34}
35
36MicroAssembler::~MicroAssembler() {}
37
38void MicroAssembler::Bind(Label* label) {
39 ASSERT(!label->IsBound());
40 intptr_t target_position = Position();
41 intptr_t branch_position;
42
43#define BIND(head, update) \
44 branch_position = label->head; \
45 while (branch_position >= 0) { \
46 ASSERT(Utils::IsAligned(branch_position, Supports(RV_C) ? 2 : 4)); \
47 intptr_t new_offset = target_position - branch_position; \
48 ASSERT(Utils::IsAligned(new_offset, Supports(RV_C) ? 2 : 4)); \
49 intptr_t old_offset = update(branch_position, new_offset); \
50 if (old_offset == 0) break; \
51 branch_position -= old_offset; \
52 } \
53 label->head = -1
54
55 BIND(unresolved_cb_, UpdateCBOffset);
56 BIND(unresolved_cj_, UpdateCJOffset);
57 BIND(unresolved_b_, UpdateBOffset);
58 BIND(unresolved_j_, UpdateJOffset);
59 BIND(unresolved_far_, UpdateFarOffset);
60
61 label->BindTo(target_position);
62}
63
64intptr_t MicroAssembler::UpdateCBOffset(intptr_t branch_position,
65 intptr_t new_offset) {
66 CInstr instr(Read16(branch_position));
67 ASSERT((instr.opcode() == C_BEQZ) || (instr.opcode() == C_BNEZ));
68 intptr_t old_offset = instr.b_imm();
69 if (!IsCBImm(new_offset)) {
70 FATAL("Incorrect Assembler::kNearJump");
71 }
72 Write16(branch_position,
73 instr.opcode() | EncodeCRs1p(instr.rs1p()) | EncodeCBImm(new_offset));
74 return old_offset;
75}
76
77intptr_t MicroAssembler::UpdateCJOffset(intptr_t branch_position,
78 intptr_t new_offset) {
79 CInstr instr(Read16(branch_position));
80 ASSERT((instr.opcode() == C_J) || (instr.opcode() == C_JAL));
81 intptr_t old_offset = instr.j_imm();
82 if (!IsCJImm(new_offset)) {
83 FATAL("Incorrect Assembler::kNearJump");
84 }
85 Write16(branch_position, instr.opcode() | EncodeCJImm(new_offset));
86 return old_offset;
87}
88
89intptr_t MicroAssembler::UpdateBOffset(intptr_t branch_position,
90 intptr_t new_offset) {
91 Instr instr(Read32(branch_position));
92 ASSERT(instr.opcode() == BRANCH);
93 intptr_t old_offset = instr.btype_imm();
94 if (!IsBTypeImm(new_offset)) {
96 }
97 Write32(branch_position, EncodeRs2(instr.rs2()) | EncodeRs1(instr.rs1()) |
98 EncodeFunct3(instr.funct3()) |
99 EncodeOpcode(instr.opcode()) |
100 EncodeBTypeImm(new_offset));
101 return old_offset;
102}
103
104intptr_t MicroAssembler::UpdateJOffset(intptr_t branch_position,
105 intptr_t new_offset) {
106 Instr instr(Read32(branch_position));
107 ASSERT(instr.opcode() == JAL);
108 intptr_t old_offset = instr.jtype_imm();
109 if (!IsJTypeImm(new_offset)) {
111 }
112 Write32(branch_position, EncodeRd(instr.rd()) | EncodeOpcode(instr.opcode()) |
113 EncodeJTypeImm(new_offset));
114 return old_offset;
115}
116
117intptr_t MicroAssembler::UpdateFarOffset(intptr_t branch_position,
118 intptr_t new_offset) {
119 Instr auipc_instr(Read32(branch_position));
120 ASSERT(auipc_instr.opcode() == AUIPC);
121 ASSERT(auipc_instr.rd() == FAR_TMP);
122 Instr jr_instr(Read32(branch_position + 4));
123 ASSERT(jr_instr.opcode() == JALR);
124 ASSERT(jr_instr.rd() == ZR);
125 ASSERT(jr_instr.funct3() == F3_0);
126 ASSERT(jr_instr.rs1() == FAR_TMP);
127 intptr_t old_offset = auipc_instr.utype_imm() + jr_instr.itype_imm();
128 intx_t lo = ImmLo(new_offset);
129 intx_t hi = ImmHi(new_offset);
130 if (!IsUTypeImm(hi)) {
131 FATAL("Jump/branch distance exceeds 2GB!");
132 }
133 Write32(branch_position,
134 EncodeUTypeImm(hi) | EncodeRd(FAR_TMP) | EncodeOpcode(AUIPC));
135 Write32(branch_position + 4, EncodeITypeImm(lo) | EncodeRs1(FAR_TMP) |
136 EncodeFunct3(F3_0) | EncodeRd(ZR) |
137 EncodeOpcode(JALR));
138 return old_offset;
139}
140
141void MicroAssembler::lui(Register rd, intptr_t imm) {
142 ASSERT(Supports(RV_I));
143 if (Supports(RV_C) && (rd != ZR) && (rd != SP) && IsCUImm(imm)) {
144 c_lui(rd, imm);
145 return;
146 }
147 EmitUType(imm, rd, LUI);
148}
149
150void MicroAssembler::lui_fixed(Register rd, intptr_t imm) {
151 ASSERT(Supports(RV_I));
152 EmitUType(imm, rd, LUI);
153}
154
155void MicroAssembler::auipc(Register rd, intptr_t imm) {
156 ASSERT(Supports(RV_I));
157 EmitUType(imm, rd, AUIPC);
158}
159
160void MicroAssembler::jal(Register rd, Label* label, JumpDistance distance) {
161 ASSERT(Supports(RV_I));
162 if (Supports(RV_C) &&
163 ((distance == kNearJump) ||
164 (label->IsBound() && IsCJImm(label->Position() - Position())))) {
165 if (rd == ZR) {
166 c_j(label);
167 return;
168 }
169#if XLEN == 32
170 if (rd == RA) {
171 c_jal(label);
172 return;
173 }
174#endif // XLEN == 32
175 }
176 EmitJump(rd, label, JAL, distance);
177}
178
179void MicroAssembler::jalr(Register rd, Register rs1, intptr_t offset) {
180 ASSERT(Supports(RV_I));
181 if (Supports(RV_C)) {
182 if (rs1 != ZR && offset == 0) {
183 if (rd == ZR) {
184 c_jr(rs1);
185 return;
186 } else if (rd == RA) {
187 c_jalr(rs1);
188 return;
189 }
190 }
191 }
192 EmitIType(offset, rs1, F3_0, rd, JALR);
193}
194
195void MicroAssembler::jalr_fixed(Register rd, Register rs1, intptr_t offset) {
196 ASSERT(Supports(RV_I));
197 EmitIType(offset, rs1, F3_0, rd, JALR);
198}
199
200void MicroAssembler::beq(Register rs1,
201 Register rs2,
202 Label* label,
203 JumpDistance distance) {
204 ASSERT(Supports(RV_I));
205 if (Supports(RV_C) &&
206 ((distance == kNearJump) ||
207 (label->IsBound() && IsCBImm(label->Position() - Position())))) {
208 if ((rs1 == ZR) && IsCRs1p(rs2)) {
209 c_beqz(rs2, label);
210 return;
211 } else if ((rs2 == ZR) && IsCRs1p(rs1)) {
212 c_beqz(rs1, label);
213 return;
214 }
215 }
216 EmitBranch(rs1, rs2, label, BEQ, distance);
217}
218
219void MicroAssembler::bne(Register rs1,
220 Register rs2,
221 Label* label,
222 JumpDistance distance) {
223 ASSERT(Supports(RV_I));
224 if (Supports(RV_C) &&
225 ((distance == kNearJump) ||
226 (label->IsBound() && IsCBImm(label->Position() - Position())))) {
227 if ((rs1 == ZR) && IsCRs1p(rs2)) {
228 c_bnez(rs2, label);
229 return;
230 } else if ((rs2 == ZR) && IsCRs1p(rs1)) {
231 c_bnez(rs1, label);
232 return;
233 }
234 }
235 EmitBranch(rs1, rs2, label, BNE, distance);
236}
237
238void MicroAssembler::blt(Register rs1,
239 Register rs2,
240 Label* label,
241 JumpDistance distance) {
242 ASSERT(Supports(RV_I));
243 EmitBranch(rs1, rs2, label, BLT, distance);
244}
245
246void MicroAssembler::bge(Register rs1,
247 Register rs2,
248 Label* label,
249 JumpDistance distance) {
250 ASSERT(Supports(RV_I));
251 EmitBranch(rs1, rs2, label, BGE, distance);
252}
253
254void MicroAssembler::bltu(Register rs1,
255 Register rs2,
256 Label* label,
257 JumpDistance distance) {
258 ASSERT(Supports(RV_I));
259 EmitBranch(rs1, rs2, label, BLTU, distance);
260}
261
262void MicroAssembler::bgeu(Register rs1,
263 Register rs2,
264 Label* label,
265 JumpDistance distance) {
266 EmitBranch(rs1, rs2, label, BGEU, distance);
267}
268
269void MicroAssembler::lb(Register rd, Address addr) {
270 ASSERT(Supports(RV_I));
271 EmitIType(addr.offset(), addr.base(), LB, rd, LOAD);
272}
273
274void MicroAssembler::lh(Register rd, Address addr) {
275 ASSERT(Supports(RV_I));
276 EmitIType(addr.offset(), addr.base(), LH, rd, LOAD);
277}
278
279void MicroAssembler::lw(Register rd, Address addr) {
280 ASSERT(Supports(RV_I));
281 if (Supports(RV_C)) {
282 if ((rd != ZR) && (addr.base() == SP) && IsCSPLoad4Imm(addr.offset())) {
283 c_lwsp(rd, addr);
284 return;
285 }
286 if (IsCRdp(rd) && IsCRs1p(addr.base()) && IsCMem4Imm(addr.offset())) {
287 c_lw(rd, addr);
288 return;
289 }
290 }
291 EmitIType(addr.offset(), addr.base(), LW, rd, LOAD);
292}
293
294void MicroAssembler::lbu(Register rd, Address addr) {
295 ASSERT(Supports(RV_I));
296 EmitIType(addr.offset(), addr.base(), LBU, rd, LOAD);
297}
298
299void MicroAssembler::lhu(Register rd, Address addr) {
300 ASSERT(Supports(RV_I));
301 EmitIType(addr.offset(), addr.base(), LHU, rd, LOAD);
302}
303
304void MicroAssembler::sb(Register rs2, Address addr) {
305 ASSERT(Supports(RV_I));
306 EmitSType(addr.offset(), rs2, addr.base(), SB, STORE);
307}
308
309void MicroAssembler::sh(Register rs2, Address addr) {
310 ASSERT(Supports(RV_I));
311 EmitSType(addr.offset(), rs2, addr.base(), SH, STORE);
312}
313
314void MicroAssembler::sw(Register rs2, Address addr) {
315 ASSERT(Supports(RV_I));
316 if (Supports(RV_C)) {
317 if ((addr.base() == SP) && IsCSPStore4Imm(addr.offset())) {
318 c_swsp(rs2, addr);
319 return;
320 }
321 if (IsCRs2p(rs2) && IsCRs1p(addr.base()) && IsCMem4Imm(addr.offset())) {
322 c_sw(rs2, addr);
323 return;
324 }
325 }
326 EmitSType(addr.offset(), rs2, addr.base(), SW, STORE);
327}
328
329void MicroAssembler::addi(Register rd, Register rs1, intptr_t imm) {
330 ASSERT(Supports(RV_I));
331 if (Supports(RV_C)) {
332 if ((rd != ZR) && (rs1 == ZR) && IsCIImm(imm)) {
333 c_li(rd, imm);
334 return;
335 }
336 if ((rd == rs1) && IsCIImm(imm) && (imm != 0)) {
337 c_addi(rd, rs1, imm);
338 return;
339 }
340 if ((rd == SP) && (rs1 == SP) && IsCI16Imm(imm) && (imm != 0)) {
341 c_addi16sp(rd, rs1, imm);
342 return;
343 }
344 if (IsCRdp(rd) && (rs1 == SP) && IsCI4SPNImm(imm) && (imm != 0)) {
345 c_addi4spn(rd, rs1, imm);
346 return;
347 }
348 if (imm == 0) {
349 if ((rd == ZR) && (rs1 == ZR)) {
350 c_nop();
351 return;
352 }
353 if ((rd != ZR) && (rs1 != ZR)) {
354 c_mv(rd, rs1);
355 return;
356 }
357 }
358 }
359 EmitIType(imm, rs1, ADDI, rd, OPIMM);
360}
361
362void MicroAssembler::slti(Register rd, Register rs1, intptr_t imm) {
363 ASSERT(Supports(RV_I));
364 EmitIType(imm, rs1, SLTI, rd, OPIMM);
365}
366
367void MicroAssembler::sltiu(Register rd, Register rs1, intptr_t imm) {
368 ASSERT(Supports(RV_I));
369 EmitIType(imm, rs1, SLTIU, rd, OPIMM);
370}
371
372void MicroAssembler::xori(Register rd, Register rs1, intptr_t imm) {
373 ASSERT(Supports(RV_I));
374 EmitIType(imm, rs1, XORI, rd, OPIMM);
375}
376
377void MicroAssembler::ori(Register rd, Register rs1, intptr_t imm) {
378 ASSERT(Supports(RV_I));
379 EmitIType(imm, rs1, ORI, rd, OPIMM);
380}
381
382void MicroAssembler::andi(Register rd, Register rs1, intptr_t imm) {
383 ASSERT(Supports(RV_I));
384 if (Supports(RV_C)) {
385 if ((rd == rs1) && IsCRs1p(rs1) && IsCIImm(imm)) {
386 c_andi(rd, rs1, imm);
387 return;
388 }
389 }
390 EmitIType(imm, rs1, ANDI, rd, OPIMM);
391}
392
393void MicroAssembler::slli(Register rd, Register rs1, intptr_t shamt) {
394 ASSERT((shamt > 0) && (shamt < XLEN));
395 ASSERT(Supports(RV_I));
396 if (Supports(RV_C)) {
397 if ((rd == rs1) && (shamt != 0) && IsCIImm(shamt)) {
398 c_slli(rd, rs1, shamt);
399 return;
400 }
401 }
402 EmitRType(F7_0, shamt, rs1, SLLI, rd, OPIMM);
403}
404
405void MicroAssembler::srli(Register rd, Register rs1, intptr_t shamt) {
406 ASSERT((shamt > 0) && (shamt < XLEN));
407 ASSERT(Supports(RV_I));
408 if (Supports(RV_C)) {
409 if ((rd == rs1) && IsCRs1p(rs1) && (shamt != 0) && IsCIImm(shamt)) {
410 c_srli(rd, rs1, shamt);
411 return;
412 }
413 }
414 EmitRType(F7_0, shamt, rs1, SRI, rd, OPIMM);
415}
416
417void MicroAssembler::srai(Register rd, Register rs1, intptr_t shamt) {
418 ASSERT((shamt > 0) && (shamt < XLEN));
419 ASSERT(Supports(RV_I));
420 if (Supports(RV_C)) {
421 if ((rd == rs1) && IsCRs1p(rs1) && (shamt != 0) && IsCIImm(shamt)) {
422 c_srai(rd, rs1, shamt);
423 return;
424 }
425 }
426 EmitRType(SRA, shamt, rs1, SRI, rd, OPIMM);
427}
428
429void MicroAssembler::add(Register rd, Register rs1, Register rs2) {
430 ASSERT(Supports(RV_I));
431 if (Supports(RV_C)) {
432 if (rd == rs1) {
433 c_add(rd, rs1, rs2);
434 return;
435 }
436 if (rd == rs2) {
437 c_add(rd, rs2, rs1);
438 return;
439 }
440 }
441 EmitRType(F7_0, rs2, rs1, ADD, rd, OP);
442}
443
444void MicroAssembler::sub(Register rd, Register rs1, Register rs2) {
445 ASSERT(Supports(RV_I));
446 if (Supports(RV_C)) {
447 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
448 c_sub(rd, rs1, rs2);
449 return;
450 }
451 }
452 EmitRType(SUB, rs2, rs1, ADD, rd, OP);
453}
454
455void MicroAssembler::sll(Register rd, Register rs1, Register rs2) {
456 ASSERT(Supports(RV_I));
457 EmitRType(F7_0, rs2, rs1, SLL, rd, OP);
458}
459
460void MicroAssembler::slt(Register rd, Register rs1, Register rs2) {
461 ASSERT(Supports(RV_I));
462 EmitRType(F7_0, rs2, rs1, SLT, rd, OP);
463}
464
465void MicroAssembler::sltu(Register rd, Register rs1, Register rs2) {
466 ASSERT(Supports(RV_I));
467 EmitRType(F7_0, rs2, rs1, SLTU, rd, OP);
468}
469
470void MicroAssembler::xor_(Register rd, Register rs1, Register rs2) {
471 ASSERT(Supports(RV_I));
472 if (Supports(RV_C)) {
473 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
474 c_xor(rd, rs1, rs2);
475 return;
476 }
477 if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
478 c_xor(rd, rs2, rs1);
479 return;
480 }
481 }
482 EmitRType(F7_0, rs2, rs1, XOR, rd, OP);
483}
484
485void MicroAssembler::srl(Register rd, Register rs1, Register rs2) {
486 ASSERT(Supports(RV_I));
487 EmitRType(F7_0, rs2, rs1, SR, rd, OP);
488}
489
490void MicroAssembler::sra(Register rd, Register rs1, Register rs2) {
491 ASSERT(Supports(RV_I));
492 EmitRType(SRA, rs2, rs1, SR, rd, OP);
493}
494
495void MicroAssembler::or_(Register rd, Register rs1, Register rs2) {
496 ASSERT(Supports(RV_I));
497 if (Supports(RV_C)) {
498 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
499 c_or(rd, rs1, rs2);
500 return;
501 }
502 if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
503 c_or(rd, rs2, rs1);
504 return;
505 }
506 }
507 EmitRType(F7_0, rs2, rs1, OR, rd, OP);
508}
509
510void MicroAssembler::and_(Register rd, Register rs1, Register rs2) {
511 ASSERT(Supports(RV_I));
512 if (Supports(RV_C)) {
513 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
514 c_and(rd, rs1, rs2);
515 return;
516 }
517 if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
518 c_and(rd, rs2, rs1);
519 return;
520 }
521 }
522 EmitRType(F7_0, rs2, rs1, AND, rd, OP);
523}
524
525void MicroAssembler::fence(HartEffects predecessor, HartEffects successor) {
526 ASSERT((predecessor & kAll) == predecessor);
527 ASSERT((successor & kAll) == successor);
528 ASSERT(Supports(RV_I));
529 EmitIType((predecessor << 4) | successor, ZR, FENCE, ZR, MISCMEM);
530}
531
532void MicroAssembler::fencei() {
533 ASSERT(Supports(RV_I));
534 EmitIType(0, ZR, FENCEI, ZR, MISCMEM);
535}
536
537void MicroAssembler::ecall() {
538 ASSERT(Supports(RV_I));
539 EmitIType(ECALL, ZR, F3_0, ZR, SYSTEM);
540}
541void MicroAssembler::ebreak() {
542 ASSERT(Supports(RV_I));
543 if (Supports(RV_C)) {
544 c_ebreak();
545 return;
546 }
547 EmitIType(EBREAK, ZR, F3_0, ZR, SYSTEM);
548}
549void MicroAssembler::SimulatorPrintObject(Register rs1) {
550 ASSERT(Supports(RV_I));
551 EmitIType(ECALL, rs1, F3_0, ZR, SYSTEM);
552}
553
554void MicroAssembler::csrrw(Register rd, uint32_t csr, Register rs1) {
555 ASSERT(Supports(RV_I));
556 EmitIType(csr, rs1, CSRRW, rd, SYSTEM);
557}
558
559void MicroAssembler::csrrs(Register rd, uint32_t csr, Register rs1) {
560 ASSERT(Supports(RV_I));
561 EmitIType(csr, rs1, CSRRS, rd, SYSTEM);
562}
563
564void MicroAssembler::csrrc(Register rd, uint32_t csr, Register rs1) {
565 ASSERT(Supports(RV_I));
566 EmitIType(csr, rs1, CSRRC, rd, SYSTEM);
567}
568
569void MicroAssembler::csrrwi(Register rd, uint32_t csr, uint32_t imm) {
570 ASSERT(Supports(RV_I));
571 EmitIType(csr, Register(imm), CSRRWI, rd, SYSTEM);
572}
573
574void MicroAssembler::csrrsi(Register rd, uint32_t csr, uint32_t imm) {
575 ASSERT(Supports(RV_I));
576 EmitIType(csr, Register(imm), CSRRSI, rd, SYSTEM);
577}
578
579void MicroAssembler::csrrci(Register rd, uint32_t csr, uint32_t imm) {
580 ASSERT(Supports(RV_I));
581 EmitIType(csr, Register(imm), CSRRCI, rd, SYSTEM);
582}
583
584void MicroAssembler::trap() {
585 ASSERT(Supports(RV_I));
586 if (Supports(RV_C)) {
587 Emit16(0); // Permanently reserved illegal instruction.
588 } else {
589 Emit32(0); // Permanently reserved illegal instruction.
590 }
591}
592
593#if XLEN >= 64
594void MicroAssembler::lwu(Register rd, Address addr) {
595 ASSERT(Supports(RV_I));
596 EmitIType(addr.offset(), addr.base(), LWU, rd, LOAD);
597}
598
599void MicroAssembler::ld(Register rd, Address addr) {
600 ASSERT(Supports(RV_I));
601 if (Supports(RV_C)) {
602 if ((rd != ZR) && (addr.base() == SP) && IsCSPLoad8Imm(addr.offset())) {
603 c_ldsp(rd, addr);
604 return;
605 }
606 if (IsCRdp(rd) && IsCRs1p(addr.base()) && IsCMem8Imm(addr.offset())) {
607 c_ld(rd, addr);
608 return;
609 }
610 }
611 EmitIType(addr.offset(), addr.base(), LD, rd, LOAD);
612}
613
614void MicroAssembler::sd(Register rs2, Address addr) {
615 ASSERT(Supports(RV_I));
616 if (Supports(RV_C)) {
617 if ((addr.base() == SP) && IsCSPStore8Imm(addr.offset())) {
618 c_sdsp(rs2, addr);
619 return;
620 }
621 if (IsCRs2p(rs2) && IsCRs1p(addr.base()) && IsCMem8Imm(addr.offset())) {
622 c_sd(rs2, addr);
623 return;
624 }
625 }
626 EmitSType(addr.offset(), rs2, addr.base(), SD, STORE);
627}
628
629void MicroAssembler::addiw(Register rd, Register rs1, intptr_t imm) {
630 ASSERT(Supports(RV_I));
631 if (Supports(RV_C)) {
632 if ((rd != ZR) && (rs1 == ZR) && IsCIImm(imm)) {
633 c_li(rd, imm);
634 return;
635 }
636 if ((rd == rs1) && (rd != ZR) && IsCIImm(imm)) {
637 c_addiw(rd, rs1, imm);
638 return;
639 }
640 }
641 EmitIType(imm, rs1, ADDI, rd, OPIMM32);
642}
643
644void MicroAssembler::slliw(Register rd, Register rs1, intptr_t shamt) {
645 ASSERT((shamt > 0) && (shamt < 32));
646 ASSERT(Supports(RV_I));
647 EmitRType(F7_0, shamt, rs1, SLLI, rd, OPIMM32);
648}
649
650void MicroAssembler::srliw(Register rd, Register rs1, intptr_t shamt) {
651 ASSERT((shamt > 0) && (shamt < 32));
652 ASSERT(Supports(RV_I));
653 EmitRType(F7_0, shamt, rs1, SRI, rd, OPIMM32);
654}
655
656void MicroAssembler::sraiw(Register rd, Register rs1, intptr_t shamt) {
657 ASSERT((shamt > 0) && (shamt < XLEN));
658 ASSERT(Supports(RV_I));
659 EmitRType(SRA, shamt, rs1, SRI, rd, OPIMM32);
660}
661
662void MicroAssembler::addw(Register rd, Register rs1, Register rs2) {
663 ASSERT(Supports(RV_I));
664 if (Supports(RV_C)) {
665 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
666 c_addw(rd, rs1, rs2);
667 return;
668 }
669 if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
670 c_addw(rd, rs2, rs1);
671 return;
672 }
673 }
674 EmitRType(F7_0, rs2, rs1, ADD, rd, OP32);
675}
676
677void MicroAssembler::subw(Register rd, Register rs1, Register rs2) {
678 ASSERT(Supports(RV_I));
679 if (Supports(RV_C)) {
680 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
681 c_subw(rd, rs1, rs2);
682 return;
683 }
684 }
685 EmitRType(SUB, rs2, rs1, ADD, rd, OP32);
686}
687
688void MicroAssembler::sllw(Register rd, Register rs1, Register rs2) {
689 ASSERT(Supports(RV_I));
690 EmitRType(F7_0, rs2, rs1, SLL, rd, OP32);
691}
692
693void MicroAssembler::srlw(Register rd, Register rs1, Register rs2) {
694 ASSERT(Supports(RV_I));
695 EmitRType(F7_0, rs2, rs1, SR, rd, OP32);
696}
697void MicroAssembler::sraw(Register rd, Register rs1, Register rs2) {
698 ASSERT(Supports(RV_I));
699 EmitRType(SRA, rs2, rs1, SR, rd, OP32);
700}
701#endif // XLEN >= 64
702
703void MicroAssembler::mul(Register rd, Register rs1, Register rs2) {
704 ASSERT(Supports(RV_M));
705 EmitRType(MULDIV, rs2, rs1, MUL, rd, OP);
706}
707
708void MicroAssembler::mulh(Register rd, Register rs1, Register rs2) {
709 ASSERT(Supports(RV_M));
710 EmitRType(MULDIV, rs2, rs1, MULH, rd, OP);
711}
712
713void MicroAssembler::mulhsu(Register rd, Register rs1, Register rs2) {
714 ASSERT(Supports(RV_M));
715 EmitRType(MULDIV, rs2, rs1, MULHSU, rd, OP);
716}
717
718void MicroAssembler::mulhu(Register rd, Register rs1, Register rs2) {
719 ASSERT(Supports(RV_M));
720 EmitRType(MULDIV, rs2, rs1, MULHU, rd, OP);
721}
722
723void MicroAssembler::div(Register rd, Register rs1, Register rs2) {
724 ASSERT(Supports(RV_M));
725 EmitRType(MULDIV, rs2, rs1, DIV, rd, OP);
726}
727
728void MicroAssembler::divu(Register rd, Register rs1, Register rs2) {
729 ASSERT(Supports(RV_M));
730 EmitRType(MULDIV, rs2, rs1, DIVU, rd, OP);
731}
732
733void MicroAssembler::rem(Register rd, Register rs1, Register rs2) {
734 ASSERT(Supports(RV_M));
735 EmitRType(MULDIV, rs2, rs1, REM, rd, OP);
736}
737
738void MicroAssembler::remu(Register rd, Register rs1, Register rs2) {
739 ASSERT(Supports(RV_M));
740 EmitRType(MULDIV, rs2, rs1, REMU, rd, OP);
741}
742
743#if XLEN >= 64
744void MicroAssembler::mulw(Register rd, Register rs1, Register rs2) {
745 ASSERT(Supports(RV_M));
746 EmitRType(MULDIV, rs2, rs1, MULW, rd, OP32);
747}
748
749void MicroAssembler::divw(Register rd, Register rs1, Register rs2) {
750 ASSERT(Supports(RV_M));
751 EmitRType(MULDIV, rs2, rs1, DIVW, rd, OP32);
752}
753
754void MicroAssembler::divuw(Register rd, Register rs1, Register rs2) {
755 ASSERT(Supports(RV_M));
756 EmitRType(MULDIV, rs2, rs1, DIVUW, rd, OP32);
757}
758
759void MicroAssembler::remw(Register rd, Register rs1, Register rs2) {
760 ASSERT(Supports(RV_M));
761 EmitRType(MULDIV, rs2, rs1, REMW, rd, OP32);
762}
763
764void MicroAssembler::remuw(Register rd, Register rs1, Register rs2) {
765 ASSERT(Supports(RV_M));
766 EmitRType(MULDIV, rs2, rs1, REMUW, rd, OP32);
767}
768#endif // XLEN >= 64
769
770void MicroAssembler::lrw(Register rd, Address addr, std::memory_order order) {
771 ASSERT(addr.offset() == 0);
772 ASSERT(Supports(RV_A));
773 EmitRType(LR, order, ZR, addr.base(), WIDTH32, rd, AMO);
774}
775void MicroAssembler::scw(Register rd,
776 Register rs2,
777 Address addr,
778 std::memory_order order) {
779 ASSERT(addr.offset() == 0);
780 ASSERT(Supports(RV_A));
781 EmitRType(SC, order, rs2, addr.base(), WIDTH32, rd, AMO);
782}
783
784void MicroAssembler::amoswapw(Register rd,
785 Register rs2,
786 Address addr,
787 std::memory_order order) {
788 ASSERT(addr.offset() == 0);
789 ASSERT(Supports(RV_A));
790 EmitRType(AMOSWAP, order, rs2, addr.base(), WIDTH32, rd, AMO);
791}
792
793void MicroAssembler::amoaddw(Register rd,
794 Register rs2,
795 Address addr,
796 std::memory_order order) {
797 ASSERT(addr.offset() == 0);
798 ASSERT(Supports(RV_A));
799 EmitRType(AMOADD, order, rs2, addr.base(), WIDTH32, rd, AMO);
800}
801
802void MicroAssembler::amoxorw(Register rd,
803 Register rs2,
804 Address addr,
805 std::memory_order order) {
806 ASSERT(addr.offset() == 0);
807 ASSERT(Supports(RV_A));
808 EmitRType(AMOXOR, order, rs2, addr.base(), WIDTH32, rd, AMO);
809}
810
811void MicroAssembler::amoandw(Register rd,
812 Register rs2,
813 Address addr,
814 std::memory_order order) {
815 ASSERT(addr.offset() == 0);
816 ASSERT(Supports(RV_A));
817 EmitRType(AMOAND, order, rs2, addr.base(), WIDTH32, rd, AMO);
818}
819
820void MicroAssembler::amoorw(Register rd,
821 Register rs2,
822 Address addr,
823 std::memory_order order) {
824 ASSERT(addr.offset() == 0);
825 ASSERT(Supports(RV_A));
826 EmitRType(AMOOR, order, rs2, addr.base(), WIDTH32, rd, AMO);
827}
828
829void MicroAssembler::amominw(Register rd,
830 Register rs2,
831 Address addr,
832 std::memory_order order) {
833 ASSERT(addr.offset() == 0);
834 ASSERT(Supports(RV_A));
835 EmitRType(AMOMIN, order, rs2, addr.base(), WIDTH32, rd, AMO);
836}
837
838void MicroAssembler::amomaxw(Register rd,
839 Register rs2,
840 Address addr,
841 std::memory_order order) {
842 ASSERT(addr.offset() == 0);
843 ASSERT(Supports(RV_A));
844 EmitRType(AMOMAX, order, rs2, addr.base(), WIDTH32, rd, AMO);
845}
846
847void MicroAssembler::amominuw(Register rd,
848 Register rs2,
849 Address addr,
850 std::memory_order order) {
851 ASSERT(addr.offset() == 0);
852 ASSERT(Supports(RV_A));
853 EmitRType(AMOMINU, order, rs2, addr.base(), WIDTH32, rd, AMO);
854}
855
856void MicroAssembler::amomaxuw(Register rd,
857 Register rs2,
858 Address addr,
859 std::memory_order order) {
860 ASSERT(addr.offset() == 0);
861 ASSERT(Supports(RV_A));
862 EmitRType(AMOMAXU, order, rs2, addr.base(), WIDTH32, rd, AMO);
863}
864
865#if XLEN >= 64
866void MicroAssembler::lrd(Register rd, Address addr, std::memory_order order) {
867 ASSERT(addr.offset() == 0);
868 ASSERT(Supports(RV_A));
869 EmitRType(LR, order, ZR, addr.base(), WIDTH64, rd, AMO);
870}
871
872void MicroAssembler::scd(Register rd,
873 Register rs2,
874 Address addr,
875 std::memory_order order) {
876 ASSERT(addr.offset() == 0);
877 ASSERT(Supports(RV_A));
878 EmitRType(SC, order, rs2, addr.base(), WIDTH64, rd, AMO);
879}
880
881void MicroAssembler::amoswapd(Register rd,
882 Register rs2,
883 Address addr,
884 std::memory_order order) {
885 ASSERT(addr.offset() == 0);
886 ASSERT(Supports(RV_A));
887 EmitRType(AMOSWAP, order, rs2, addr.base(), WIDTH64, rd, AMO);
888}
889
890void MicroAssembler::amoaddd(Register rd,
891 Register rs2,
892 Address addr,
893 std::memory_order order) {
894 ASSERT(addr.offset() == 0);
895 ASSERT(Supports(RV_A));
896 EmitRType(AMOADD, order, rs2, addr.base(), WIDTH64, rd, AMO);
897}
898
899void MicroAssembler::amoxord(Register rd,
900 Register rs2,
901 Address addr,
902 std::memory_order order) {
903 ASSERT(addr.offset() == 0);
904 ASSERT(Supports(RV_A));
905 EmitRType(AMOXOR, order, rs2, addr.base(), WIDTH64, rd, AMO);
906}
907
908void MicroAssembler::amoandd(Register rd,
909 Register rs2,
910 Address addr,
911 std::memory_order order) {
912 ASSERT(addr.offset() == 0);
913 ASSERT(Supports(RV_A));
914 EmitRType(AMOAND, order, rs2, addr.base(), WIDTH64, rd, AMO);
915}
916
917void MicroAssembler::amoord(Register rd,
918 Register rs2,
919 Address addr,
920 std::memory_order order) {
921 ASSERT(addr.offset() == 0);
922 ASSERT(Supports(RV_A));
923 EmitRType(AMOOR, order, rs2, addr.base(), WIDTH64, rd, AMO);
924}
925
926void MicroAssembler::amomind(Register rd,
927 Register rs2,
928 Address addr,
929 std::memory_order order) {
930 ASSERT(addr.offset() == 0);
931 ASSERT(Supports(RV_A));
932 EmitRType(AMOMIN, order, rs2, addr.base(), WIDTH64, rd, AMO);
933}
934
935void MicroAssembler::amomaxd(Register rd,
936 Register rs2,
937 Address addr,
938 std::memory_order order) {
939 ASSERT(addr.offset() == 0);
940 ASSERT(Supports(RV_A));
941 EmitRType(AMOMAX, order, rs2, addr.base(), WIDTH64, rd, AMO);
942}
943
944void MicroAssembler::amominud(Register rd,
945 Register rs2,
946 Address addr,
947 std::memory_order order) {
948 ASSERT(addr.offset() == 0);
949 ASSERT(Supports(RV_A));
950 EmitRType(AMOMINU, order, rs2, addr.base(), WIDTH64, rd, AMO);
951}
952
953void MicroAssembler::amomaxud(Register rd,
954 Register rs2,
955 Address addr,
956 std::memory_order order) {
957 ASSERT(addr.offset() == 0);
958 ASSERT(Supports(RV_A));
959 EmitRType(AMOMAXU, order, rs2, addr.base(), WIDTH64, rd, AMO);
960}
961#endif // XLEN >= 64
962
963void MicroAssembler::flw(FRegister rd, Address addr) {
964 ASSERT(Supports(RV_F));
965#if XLEN == 32
966 if (Supports(RV_C)) {
967 if ((addr.base() == SP) && IsCSPLoad4Imm(addr.offset())) {
968 c_flwsp(rd, addr);
969 return;
970 }
971 if (IsCFRdp(rd) && IsCRs1p(addr.base()) && IsCMem4Imm(addr.offset())) {
972 c_flw(rd, addr);
973 return;
974 }
975 }
976#endif // XLEN == 32
977 EmitIType(addr.offset(), addr.base(), S, rd, LOADFP);
978}
979
980void MicroAssembler::fsw(FRegister rs2, Address addr) {
981 ASSERT(Supports(RV_F));
982#if XLEN == 32
983 if (Supports(RV_C)) {
984 if ((addr.base() == SP) && IsCSPStore4Imm(addr.offset())) {
985 c_fswsp(rs2, addr);
986 return;
987 }
988 if (IsCFRs2p(rs2) && IsCRs1p(addr.base()) && IsCMem4Imm(addr.offset())) {
989 c_fsw(rs2, addr);
990 return;
991 }
992 }
993#endif // XLEN == 32
994 EmitSType(addr.offset(), rs2, addr.base(), S, STOREFP);
995}
996
997void MicroAssembler::fmadds(FRegister rd,
998 FRegister rs1,
999 FRegister rs2,
1000 FRegister rs3,
1001 RoundingMode rounding) {
1002 ASSERT(Supports(RV_F));
1003 EmitR4Type(rs3, F2_S, rs2, rs1, rounding, rd, FMADD);
1004}
1005
1006void MicroAssembler::fmsubs(FRegister rd,
1007 FRegister rs1,
1008 FRegister rs2,
1009 FRegister rs3,
1010 RoundingMode rounding) {
1011 ASSERT(Supports(RV_F));
1012 EmitR4Type(rs3, F2_S, rs2, rs1, rounding, rd, FMSUB);
1013}
1014
1015void MicroAssembler::fnmsubs(FRegister rd,
1016 FRegister rs1,
1017 FRegister rs2,
1018 FRegister rs3,
1019 RoundingMode rounding) {
1020 ASSERT(Supports(RV_F));
1021 EmitR4Type(rs3, F2_S, rs2, rs1, rounding, rd, FNMSUB);
1022}
1023
1024void MicroAssembler::fnmadds(FRegister rd,
1025 FRegister rs1,
1026 FRegister rs2,
1027 FRegister rs3,
1028 RoundingMode rounding) {
1029 ASSERT(Supports(RV_F));
1030 EmitR4Type(rs3, F2_S, rs2, rs1, rounding, rd, FNMADD);
1031}
1032
1033void MicroAssembler::fadds(FRegister rd,
1034 FRegister rs1,
1035 FRegister rs2,
1036 RoundingMode rounding) {
1037 ASSERT(Supports(RV_F));
1038 EmitRType(FADDS, rs2, rs1, rounding, rd, OPFP);
1039}
1040
1041void MicroAssembler::fsubs(FRegister rd,
1042 FRegister rs1,
1043 FRegister rs2,
1044 RoundingMode rounding) {
1045 ASSERT(Supports(RV_F));
1046 EmitRType(FSUBS, rs2, rs1, rounding, rd, OPFP);
1047}
1048
1049void MicroAssembler::fmuls(FRegister rd,
1050 FRegister rs1,
1051 FRegister rs2,
1052 RoundingMode rounding) {
1053 ASSERT(Supports(RV_F));
1054 EmitRType(FMULS, rs2, rs1, rounding, rd, OPFP);
1055}
1056
1057void MicroAssembler::fdivs(FRegister rd,
1058 FRegister rs1,
1059 FRegister rs2,
1060 RoundingMode rounding) {
1061 ASSERT(Supports(RV_F));
1062 EmitRType(FDIVS, rs2, rs1, rounding, rd, OPFP);
1063}
1064
1065void MicroAssembler::fsqrts(FRegister rd,
1066 FRegister rs1,
1067 RoundingMode rounding) {
1068 ASSERT(Supports(RV_F));
1069 EmitRType(FSQRTS, FRegister(0), rs1, rounding, rd, OPFP);
1070}
1071
1072void MicroAssembler::fsgnjs(FRegister rd, FRegister rs1, FRegister rs2) {
1073 ASSERT(Supports(RV_F));
1074 EmitRType(FSGNJS, rs2, rs1, J, rd, OPFP);
1075}
1076
1077void MicroAssembler::fsgnjns(FRegister rd, FRegister rs1, FRegister rs2) {
1078 ASSERT(Supports(RV_F));
1079 EmitRType(FSGNJS, rs2, rs1, JN, rd, OPFP);
1080}
1081
1082void MicroAssembler::fsgnjxs(FRegister rd, FRegister rs1, FRegister rs2) {
1083 ASSERT(Supports(RV_F));
1084 EmitRType(FSGNJS, rs2, rs1, JX, rd, OPFP);
1085}
1086
1087void MicroAssembler::fmins(FRegister rd, FRegister rs1, FRegister rs2) {
1088 ASSERT(Supports(RV_F));
1089 EmitRType(FMINMAXS, rs2, rs1, FMIN, rd, OPFP);
1090}
1091
1092void MicroAssembler::fmaxs(FRegister rd, FRegister rs1, FRegister rs2) {
1093 ASSERT(Supports(RV_F));
1094 EmitRType(FMINMAXS, rs2, rs1, FMAX, rd, OPFP);
1095}
1096
1097void MicroAssembler::feqs(Register rd, FRegister rs1, FRegister rs2) {
1098 ASSERT(Supports(RV_F));
1099 EmitRType(FCMPS, rs2, rs1, FEQ, rd, OPFP);
1100}
1101
1102void MicroAssembler::flts(Register rd, FRegister rs1, FRegister rs2) {
1103 ASSERT(Supports(RV_F));
1104 EmitRType(FCMPS, rs2, rs1, FLT, rd, OPFP);
1105}
1106
1107void MicroAssembler::fles(Register rd, FRegister rs1, FRegister rs2) {
1108 ASSERT(Supports(RV_F));
1109 EmitRType(FCMPS, rs2, rs1, FLE, rd, OPFP);
1110}
1111
1112void MicroAssembler::fclasss(Register rd, FRegister rs1) {
1113 ASSERT(Supports(RV_F));
1114 EmitRType(FCLASSS, FRegister(0), rs1, F3_1, rd, OPFP);
1115}
1116
1117void MicroAssembler::fcvtws(Register rd, FRegister rs1, RoundingMode rounding) {
1118 ASSERT(Supports(RV_F));
1119 EmitRType(FCVTintS, FRegister(W), rs1, rounding, rd, OPFP);
1120}
1121
1122void MicroAssembler::fcvtwus(Register rd,
1123 FRegister rs1,
1124 RoundingMode rounding) {
1125 ASSERT(Supports(RV_F));
1126 EmitRType(FCVTintS, FRegister(WU), rs1, rounding, rd, OPFP);
1127}
1128
1129void MicroAssembler::fcvtsw(FRegister rd, Register rs1, RoundingMode rounding) {
1130 ASSERT(Supports(RV_F));
1131 EmitRType(FCVTSint, FRegister(W), rs1, rounding, rd, OPFP);
1132}
1133
1134void MicroAssembler::fcvtswu(FRegister rd,
1135 Register rs1,
1136 RoundingMode rounding) {
1137 ASSERT(Supports(RV_F));
1138 EmitRType(FCVTSint, FRegister(WU), rs1, rounding, rd, OPFP);
1139}
1140
1141void MicroAssembler::fmvxw(Register rd, FRegister rs1) {
1142 ASSERT(Supports(RV_F));
1143 EmitRType(FMVXW, FRegister(0), rs1, F3_0, rd, OPFP);
1144}
1145
1146void MicroAssembler::fmvwx(FRegister rd, Register rs1) {
1147 ASSERT(Supports(RV_F));
1148 EmitRType(FMVWX, FRegister(0), rs1, F3_0, rd, OPFP);
1149}
1150
1151#if XLEN >= 64
1152void MicroAssembler::fcvtls(Register rd, FRegister rs1, RoundingMode rounding) {
1153 ASSERT(Supports(RV_F));
1154 EmitRType(FCVTintS, FRegister(L), rs1, rounding, rd, OPFP);
1155}
1156
1157void MicroAssembler::fcvtlus(Register rd,
1158 FRegister rs1,
1159 RoundingMode rounding) {
1160 ASSERT(Supports(RV_F));
1161 EmitRType(FCVTintS, FRegister(LU), rs1, rounding, rd, OPFP);
1162}
1163
1164void MicroAssembler::fcvtsl(FRegister rd, Register rs1, RoundingMode rounding) {
1165 ASSERT(Supports(RV_F));
1166 EmitRType(FCVTSint, FRegister(L), rs1, rounding, rd, OPFP);
1167}
1168
1169void MicroAssembler::fcvtslu(FRegister rd,
1170 Register rs1,
1171 RoundingMode rounding) {
1172 ASSERT(Supports(RV_F));
1173 EmitRType(FCVTSint, FRegister(LU), rs1, rounding, rd, OPFP);
1174}
1175#endif // XLEN >= 64
1176
1177void MicroAssembler::fld(FRegister rd, Address addr) {
1178 ASSERT(Supports(RV_D));
1179 if (Supports(RV_C)) {
1180 if ((addr.base() == SP) && IsCSPLoad8Imm(addr.offset())) {
1181 c_fldsp(rd, addr);
1182 return;
1183 }
1184 if (IsCFRdp(rd) && IsCRs1p(addr.base()) && IsCMem8Imm(addr.offset())) {
1185 c_fld(rd, addr);
1186 return;
1187 }
1188 }
1189 EmitIType(addr.offset(), addr.base(), D, rd, LOADFP);
1190}
1191
1192void MicroAssembler::fsd(FRegister rs2, Address addr) {
1193 ASSERT(Supports(RV_D));
1194 if (Supports(RV_C)) {
1195 if ((addr.base() == SP) && IsCSPStore8Imm(addr.offset())) {
1196 c_fsdsp(rs2, addr);
1197 return;
1198 }
1199 if (IsCFRs2p(rs2) && IsCRs1p(addr.base()) && IsCMem8Imm(addr.offset())) {
1200 c_fsd(rs2, addr);
1201 return;
1202 }
1203 }
1204 EmitSType(addr.offset(), rs2, addr.base(), D, STOREFP);
1205}
1206
1207void MicroAssembler::fmaddd(FRegister rd,
1208 FRegister rs1,
1209 FRegister rs2,
1210 FRegister rs3,
1211 RoundingMode rounding) {
1212 ASSERT(Supports(RV_D));
1213 EmitR4Type(rs3, F2_D, rs2, rs1, rounding, rd, FMADD);
1214}
1215
1216void MicroAssembler::fmsubd(FRegister rd,
1217 FRegister rs1,
1218 FRegister rs2,
1219 FRegister rs3,
1220 RoundingMode rounding) {
1221 ASSERT(Supports(RV_D));
1222 EmitR4Type(rs3, F2_D, rs2, rs1, rounding, rd, FMSUB);
1223}
1224
1225void MicroAssembler::fnmsubd(FRegister rd,
1226 FRegister rs1,
1227 FRegister rs2,
1228 FRegister rs3,
1229 RoundingMode rounding) {
1230 ASSERT(Supports(RV_D));
1231 EmitR4Type(rs3, F2_D, rs2, rs1, rounding, rd, FNMSUB);
1232}
1233
1234void MicroAssembler::fnmaddd(FRegister rd,
1235 FRegister rs1,
1236 FRegister rs2,
1237 FRegister rs3,
1238 RoundingMode rounding) {
1239 ASSERT(Supports(RV_D));
1240 EmitR4Type(rs3, F2_D, rs2, rs1, rounding, rd, FNMADD);
1241}
1242
1243void MicroAssembler::faddd(FRegister rd,
1244 FRegister rs1,
1245 FRegister rs2,
1246 RoundingMode rounding) {
1247 ASSERT(Supports(RV_D));
1248 EmitRType(FADDD, rs2, rs1, rounding, rd, OPFP);
1249}
1250
1251void MicroAssembler::fsubd(FRegister rd,
1252 FRegister rs1,
1253 FRegister rs2,
1254 RoundingMode rounding) {
1255 ASSERT(Supports(RV_D));
1256 EmitRType(FSUBD, rs2, rs1, rounding, rd, OPFP);
1257}
1258
1259void MicroAssembler::fmuld(FRegister rd,
1260 FRegister rs1,
1261 FRegister rs2,
1262 RoundingMode rounding) {
1263 ASSERT(Supports(RV_D));
1264 EmitRType(FMULD, rs2, rs1, rounding, rd, OPFP);
1265}
1266
1267void MicroAssembler::fdivd(FRegister rd,
1268 FRegister rs1,
1269 FRegister rs2,
1270 RoundingMode rounding) {
1271 ASSERT(Supports(RV_D));
1272 EmitRType(FDIVD, rs2, rs1, rounding, rd, OPFP);
1273}
1274
1275void MicroAssembler::fsqrtd(FRegister rd,
1276 FRegister rs1,
1277 RoundingMode rounding) {
1278 ASSERT(Supports(RV_D));
1279 EmitRType(FSQRTD, FRegister(0), rs1, rounding, rd, OPFP);
1280}
1281
1282void MicroAssembler::fsgnjd(FRegister rd, FRegister rs1, FRegister rs2) {
1283 ASSERT(Supports(RV_D));
1284 EmitRType(FSGNJD, rs2, rs1, J, rd, OPFP);
1285}
1286
1287void MicroAssembler::fsgnjnd(FRegister rd, FRegister rs1, FRegister rs2) {
1288 ASSERT(Supports(RV_D));
1289 EmitRType(FSGNJD, rs2, rs1, JN, rd, OPFP);
1290}
1291
1292void MicroAssembler::fsgnjxd(FRegister rd, FRegister rs1, FRegister rs2) {
1293 ASSERT(Supports(RV_D));
1294 EmitRType(FSGNJD, rs2, rs1, JX, rd, OPFP);
1295}
1296
1297void MicroAssembler::fmind(FRegister rd, FRegister rs1, FRegister rs2) {
1298 ASSERT(Supports(RV_D));
1299 EmitRType(FMINMAXD, rs2, rs1, FMIN, rd, OPFP);
1300}
1301
1302void MicroAssembler::fmaxd(FRegister rd, FRegister rs1, FRegister rs2) {
1303 ASSERT(Supports(RV_D));
1304 EmitRType(FMINMAXD, rs2, rs1, FMAX, rd, OPFP);
1305}
1306
1307void MicroAssembler::fcvtsd(FRegister rd,
1308 FRegister rs1,
1309 RoundingMode rounding) {
1310 ASSERT(Supports(RV_D));
1311 EmitRType(FCVTS, FRegister(1), rs1, rounding, rd, OPFP);
1312}
1313
1314void MicroAssembler::fcvtds(FRegister rd,
1315 FRegister rs1,
1316 RoundingMode rounding) {
1317 ASSERT(Supports(RV_D));
1318 EmitRType(FCVTD, FRegister(0), rs1, rounding, rd, OPFP);
1319}
1320
1321void MicroAssembler::feqd(Register rd, FRegister rs1, FRegister rs2) {
1322 ASSERT(Supports(RV_D));
1323 EmitRType(FCMPD, rs2, rs1, FEQ, rd, OPFP);
1324}
1325
1326void MicroAssembler::fltd(Register rd, FRegister rs1, FRegister rs2) {
1327 ASSERT(Supports(RV_D));
1328 EmitRType(FCMPD, rs2, rs1, FLT, rd, OPFP);
1329}
1330
1331void MicroAssembler::fled(Register rd, FRegister rs1, FRegister rs2) {
1332 ASSERT(Supports(RV_D));
1333 EmitRType(FCMPD, rs2, rs1, FLE, rd, OPFP);
1334}
1335
1336void MicroAssembler::fclassd(Register rd, FRegister rs1) {
1337 ASSERT(Supports(RV_D));
1338 EmitRType(FCLASSD, FRegister(0), rs1, F3_1, rd, OPFP);
1339}
1340
1341void MicroAssembler::fcvtwd(Register rd, FRegister rs1, RoundingMode rounding) {
1342 ASSERT(Supports(RV_D));
1343 EmitRType(FCVTintD, FRegister(W), rs1, rounding, rd, OPFP);
1344}
1345
1346void MicroAssembler::fcvtwud(Register rd,
1347 FRegister rs1,
1348 RoundingMode rounding) {
1349 ASSERT(Supports(RV_D));
1350 EmitRType(FCVTintD, FRegister(WU), rs1, rounding, rd, OPFP);
1351}
1352
1353void MicroAssembler::fcvtdw(FRegister rd, Register rs1, RoundingMode rounding) {
1354 ASSERT(Supports(RV_D));
1355 EmitRType(FCVTDint, FRegister(W), rs1, rounding, rd, OPFP);
1356}
1357
1358void MicroAssembler::fcvtdwu(FRegister rd,
1359 Register rs1,
1360 RoundingMode rounding) {
1361 ASSERT(Supports(RV_D));
1362 EmitRType(FCVTDint, FRegister(WU), rs1, rounding, rd, OPFP);
1363}
1364
1365#if XLEN >= 64
1366void MicroAssembler::fcvtld(Register rd, FRegister rs1, RoundingMode rounding) {
1367 ASSERT(Supports(RV_D));
1368 EmitRType(FCVTintD, FRegister(L), rs1, rounding, rd, OPFP);
1369}
1370
1371void MicroAssembler::fcvtlud(Register rd,
1372 FRegister rs1,
1373 RoundingMode rounding) {
1374 ASSERT(Supports(RV_D));
1375 EmitRType(FCVTintD, FRegister(LU), rs1, rounding, rd, OPFP);
1376}
1377
1378void MicroAssembler::fmvxd(Register rd, FRegister rs1) {
1379 ASSERT(Supports(RV_D));
1380 EmitRType(FMVXD, FRegister(0), rs1, F3_0, rd, OPFP);
1381}
1382
1383void MicroAssembler::fcvtdl(FRegister rd, Register rs1, RoundingMode rounding) {
1384 ASSERT(Supports(RV_D));
1385 EmitRType(FCVTDint, FRegister(L), rs1, rounding, rd, OPFP);
1386}
1387
1388void MicroAssembler::fcvtdlu(FRegister rd,
1389 Register rs1,
1390 RoundingMode rounding) {
1391 ASSERT(Supports(RV_D));
1392 EmitRType(FCVTDint, FRegister(LU), rs1, rounding, rd, OPFP);
1393}
1394
1395void MicroAssembler::fmvdx(FRegister rd, Register rs1) {
1396 ASSERT(Supports(RV_D));
1397 EmitRType(FMVDX, FRegister(0), rs1, F3_0, rd, OPFP);
1398}
1399#endif // XLEN >= 64
1400
1401#if XLEN >= 64
1402void MicroAssembler::adduw(Register rd, Register rs1, Register rs2) {
1403 ASSERT(Supports(RV_Zba));
1404 EmitRType(ADDUW, rs2, rs1, F3_0, rd, OP32);
1405}
1406#endif
1407
1408void MicroAssembler::sh1add(Register rd, Register rs1, Register rs2) {
1409 ASSERT(Supports(RV_Zba));
1410 EmitRType(SHADD, rs2, rs1, SH1ADD, rd, OP);
1411}
1412
1413#if XLEN >= 64
1414void MicroAssembler::sh1adduw(Register rd, Register rs1, Register rs2) {
1415 ASSERT(Supports(RV_Zba));
1416 EmitRType(SHADD, rs2, rs1, SH1ADD, rd, OP32);
1417}
1418#endif
1419
1420void MicroAssembler::sh2add(Register rd, Register rs1, Register rs2) {
1421 ASSERT(Supports(RV_Zba));
1422 EmitRType(SHADD, rs2, rs1, SH2ADD, rd, OP);
1423}
1424
1425#if XLEN >= 64
1426void MicroAssembler::sh2adduw(Register rd, Register rs1, Register rs2) {
1427 ASSERT(Supports(RV_Zba));
1428 EmitRType(SHADD, rs2, rs1, SH2ADD, rd, OP32);
1429}
1430#endif
1431
1432void MicroAssembler::sh3add(Register rd, Register rs1, Register rs2) {
1433 ASSERT(Supports(RV_Zba));
1434 EmitRType(SHADD, rs2, rs1, SH3ADD, rd, OP);
1435}
1436
1437#if XLEN >= 64
1438void MicroAssembler::sh3adduw(Register rd, Register rs1, Register rs2) {
1439 ASSERT(Supports(RV_Zba));
1440 EmitRType(SHADD, rs2, rs1, SH3ADD, rd, OP32);
1441}
1442
1443void MicroAssembler::slliuw(Register rd, Register rs1, intx_t shamt) {
1444 ASSERT((shamt > 0) && (shamt < 32));
1445 ASSERT(Supports(RV_Zba));
1446 EmitRType(SLLIUW, shamt, rs1, SLLI, rd, OPIMM32);
1447}
1448#endif
1449
1450void MicroAssembler::andn(Register rd, Register rs1, Register rs2) {
1451 ASSERT(Supports(RV_Zbb));
1452 EmitRType(SUB, rs2, rs1, AND, rd, OP);
1453}
1454
1455void MicroAssembler::orn(Register rd, Register rs1, Register rs2) {
1456 ASSERT(Supports(RV_Zbb));
1457 EmitRType(SUB, rs2, rs1, OR, rd, OP);
1458}
1459
1460void MicroAssembler::xnor(Register rd, Register rs1, Register rs2) {
1461 ASSERT(Supports(RV_Zbb));
1462 EmitRType(SUB, rs2, rs1, XOR, rd, OP);
1463}
1464
1465void MicroAssembler::clz(Register rd, Register rs1) {
1466 ASSERT(Supports(RV_Zbb));
1467 EmitRType(COUNT, 0b00000, rs1, F3_COUNT, rd, OPIMM);
1468}
1469
1470void MicroAssembler::clzw(Register rd, Register rs1) {
1471 ASSERT(Supports(RV_Zbb));
1472 EmitRType(COUNT, 0b00000, rs1, F3_COUNT, rd, OPIMM32);
1473}
1474
1475void MicroAssembler::ctz(Register rd, Register rs1) {
1476 ASSERT(Supports(RV_Zbb));
1477 EmitRType(COUNT, 0b00001, rs1, F3_COUNT, rd, OPIMM);
1478}
1479
1480void MicroAssembler::ctzw(Register rd, Register rs1) {
1481 ASSERT(Supports(RV_Zbb));
1482 EmitRType(COUNT, 0b00001, rs1, F3_COUNT, rd, OPIMM32);
1483}
1484
1485void MicroAssembler::cpop(Register rd, Register rs1) {
1486 ASSERT(Supports(RV_Zbb));
1487 EmitRType(COUNT, 0b00010, rs1, F3_COUNT, rd, OPIMM);
1488}
1489
1490void MicroAssembler::cpopw(Register rd, Register rs1) {
1491 ASSERT(Supports(RV_Zbb));
1492 EmitRType(COUNT, 0b00010, rs1, F3_COUNT, rd, OPIMM32);
1493}
1494
1495void MicroAssembler::max(Register rd, Register rs1, Register rs2) {
1496 ASSERT(Supports(RV_Zbb));
1497 EmitRType(MINMAXCLMUL, rs2, rs1, MAX, rd, OP);
1498}
1499
1500void MicroAssembler::maxu(Register rd, Register rs1, Register rs2) {
1501 ASSERT(Supports(RV_Zbb));
1502 EmitRType(MINMAXCLMUL, rs2, rs1, MAXU, rd, OP);
1503}
1504
1505void MicroAssembler::min(Register rd, Register rs1, Register rs2) {
1506 ASSERT(Supports(RV_Zbb));
1507 EmitRType(MINMAXCLMUL, rs2, rs1, MIN, rd, OP);
1508}
1509
1510void MicroAssembler::minu(Register rd, Register rs1, Register rs2) {
1511 ASSERT(Supports(RV_Zbb));
1512 EmitRType(MINMAXCLMUL, rs2, rs1, MINU, rd, OP);
1513}
1514
1515void MicroAssembler::sextb(Register rd, Register rs1) {
1516 ASSERT(Supports(RV_Zbb));
1517 EmitRType((Funct7)0b0110000, 0b00100, rs1, SEXT, rd, OPIMM);
1518}
1519
1520void MicroAssembler::sexth(Register rd, Register rs1) {
1521 ASSERT(Supports(RV_Zbb));
1522 EmitRType((Funct7)0b0110000, 0b00101, rs1, SEXT, rd, OPIMM);
1523}
1524
1525void MicroAssembler::zexth(Register rd, Register rs1) {
1526 ASSERT(Supports(RV_Zbb));
1527#if XLEN == 32
1528 EmitRType((Funct7)0b0000100, 0b00000, rs1, ZEXT, rd, OP);
1529#elif XLEN == 64
1530 EmitRType((Funct7)0b0000100, 0b00000, rs1, ZEXT, rd, OP32);
1531#else
1532 UNIMPLEMENTED();
1533#endif
1534}
1535
1536void MicroAssembler::rol(Register rd, Register rs1, Register rs2) {
1537 ASSERT(Supports(RV_Zbb));
1538 EmitRType(ROTATE, rs2, rs1, ROL, rd, OP);
1539}
1540
1541void MicroAssembler::rolw(Register rd, Register rs1, Register rs2) {
1542 ASSERT(Supports(RV_Zbb));
1543 EmitRType(ROTATE, rs2, rs1, ROL, rd, OP32);
1544}
1545
1546void MicroAssembler::ror(Register rd, Register rs1, Register rs2) {
1547 ASSERT(Supports(RV_Zbb));
1548 EmitRType(ROTATE, rs2, rs1, ROR, rd, OP);
1549}
1550
1551void MicroAssembler::rori(Register rd, Register rs1, intx_t shamt) {
1552 ASSERT(Supports(RV_Zbb));
1553 EmitRType(ROTATE, shamt, rs1, ROR, rd, OPIMM);
1554}
1555
1556void MicroAssembler::roriw(Register rd, Register rs1, intx_t shamt) {
1557 ASSERT(Supports(RV_Zbb));
1558 EmitRType(ROTATE, shamt, rs1, ROR, rd, OPIMM32);
1559}
1560
1561void MicroAssembler::rorw(Register rd, Register rs1, Register rs2) {
1562 ASSERT(Supports(RV_Zbb));
1563 EmitRType(ROTATE, rs2, rs1, ROR, rd, OP32);
1564}
1565
1566void MicroAssembler::orcb(Register rd, Register rs1) {
1567 ASSERT(Supports(RV_Zbb));
1568 EmitRType((Funct7)0b0010100, 0b00111, rs1, (Funct3)0b101, rd, OPIMM);
1569}
1570
1571void MicroAssembler::rev8(Register rd, Register rs1) {
1572 ASSERT(Supports(RV_Zbb));
1573#if XLEN == 32
1574 EmitRType((Funct7)0b0110100, 0b11000, rs1, (Funct3)0b101, rd, OPIMM);
1575#elif XLEN == 64
1576 EmitRType((Funct7)0b0110101, 0b11000, rs1, (Funct3)0b101, rd, OPIMM);
1577#else
1578 UNIMPLEMENTED();
1579#endif
1580}
1581
1582void MicroAssembler::clmul(Register rd, Register rs1, Register rs2) {
1583 ASSERT(Supports(RV_Zbc));
1584 EmitRType(MINMAXCLMUL, rs2, rs1, CLMUL, rd, OP);
1585}
1586
1587void MicroAssembler::clmulh(Register rd, Register rs1, Register rs2) {
1588 ASSERT(Supports(RV_Zbc));
1589 EmitRType(MINMAXCLMUL, rs2, rs1, CLMULH, rd, OP);
1590}
1591
1592void MicroAssembler::clmulr(Register rd, Register rs1, Register rs2) {
1593 ASSERT(Supports(RV_Zbc));
1594 EmitRType(MINMAXCLMUL, rs2, rs1, CLMULR, rd, OP);
1595}
1596
1597void MicroAssembler::bclr(Register rd, Register rs1, Register rs2) {
1598 ASSERT(Supports(RV_Zbs));
1599 EmitRType(BCLRBEXT, rs2, rs1, BCLR, rd, OP);
1600}
1601
1602void MicroAssembler::bclri(Register rd, Register rs1, intx_t shamt) {
1603 ASSERT(Supports(RV_Zbs));
1604 EmitRType(BCLRBEXT, shamt, rs1, BCLR, rd, OPIMM);
1605}
1606
1607void MicroAssembler::bext(Register rd, Register rs1, Register rs2) {
1608 ASSERT(Supports(RV_Zbs));
1609 EmitRType(BCLRBEXT, rs2, rs1, BEXT, rd, OP);
1610}
1611
1612void MicroAssembler::bexti(Register rd, Register rs1, intx_t shamt) {
1613 ASSERT(Supports(RV_Zbs));
1614 EmitRType(BCLRBEXT, shamt, rs1, BEXT, rd, OPIMM);
1615}
1616
1617void MicroAssembler::binv(Register rd, Register rs1, Register rs2) {
1618 ASSERT(Supports(RV_Zbs));
1619 EmitRType(BINV, rs2, rs1, F3_BINV, rd, OP);
1620}
1621
1622void MicroAssembler::binvi(Register rd, Register rs1, intx_t shamt) {
1623 ASSERT(Supports(RV_Zbs));
1624 EmitRType(BINV, shamt, rs1, F3_BINV, rd, OPIMM);
1625}
1626
1627void MicroAssembler::bset(Register rd, Register rs1, Register rs2) {
1628 ASSERT(Supports(RV_Zbs));
1629 EmitRType(BSET, rs2, rs1, F3_BSET, rd, OP);
1630}
1631
1632void MicroAssembler::bseti(Register rd, Register rs1, intx_t shamt) {
1633 ASSERT(Supports(RV_Zbs));
1634 EmitRType(BSET, shamt, rs1, F3_BSET, rd, OPIMM);
1635}
1636
1637void MicroAssembler::lb(Register rd, Address addr, std::memory_order order) {
1638 ASSERT(addr.offset() == 0);
1639 ASSERT((order == std::memory_order_acquire) ||
1640 (order == std::memory_order_acq_rel));
1641 ASSERT(Supports(RV_Zalasr));
1642 EmitRType(LOADORDERED, order, ZR, addr.base(), WIDTH8, rd, AMO);
1643}
1644
1645void MicroAssembler::lh(Register rd, Address addr, std::memory_order order) {
1646 ASSERT(addr.offset() == 0);
1647 ASSERT((order == std::memory_order_acquire) ||
1648 (order == std::memory_order_acq_rel));
1649 ASSERT(Supports(RV_Zalasr));
1650 EmitRType(LOADORDERED, order, ZR, addr.base(), WIDTH16, rd, AMO);
1651}
1652
1653void MicroAssembler::lw(Register rd, Address addr, std::memory_order order) {
1654 ASSERT(addr.offset() == 0);
1655 ASSERT((order == std::memory_order_acquire) ||
1656 (order == std::memory_order_acq_rel));
1657 ASSERT(Supports(RV_Zalasr));
1658 EmitRType(LOADORDERED, order, ZR, addr.base(), WIDTH32, rd, AMO);
1659}
1660
1661void MicroAssembler::sb(Register rs2, Address addr, std::memory_order order) {
1662 ASSERT(addr.offset() == 0);
1663 ASSERT((order == std::memory_order_release) ||
1664 (order == std::memory_order_acq_rel));
1665 ASSERT(Supports(RV_Zalasr));
1666 EmitRType(STOREORDERED, order, rs2, addr.base(), WIDTH8, ZR, AMO);
1667}
1668
1669void MicroAssembler::sh(Register rs2, Address addr, std::memory_order order) {
1670 ASSERT(addr.offset() == 0);
1671 ASSERT((order == std::memory_order_release) ||
1672 (order == std::memory_order_acq_rel));
1673 ASSERT(Supports(RV_Zalasr));
1674 EmitRType(STOREORDERED, order, rs2, addr.base(), WIDTH16, ZR, AMO);
1675}
1676
1677void MicroAssembler::sw(Register rs2, Address addr, std::memory_order order) {
1678 ASSERT(addr.offset() == 0);
1679 ASSERT((order == std::memory_order_release) ||
1680 (order == std::memory_order_acq_rel));
1681 ASSERT(Supports(RV_Zalasr));
1682 EmitRType(STOREORDERED, order, rs2, addr.base(), WIDTH32, ZR, AMO);
1683}
1684
1685#if XLEN >= 64
1686void MicroAssembler::ld(Register rd, Address addr, std::memory_order order) {
1687 ASSERT(addr.offset() == 0);
1688 ASSERT((order == std::memory_order_acquire) ||
1689 (order == std::memory_order_acq_rel));
1690 ASSERT(Supports(RV_Zalasr));
1691 EmitRType(LOADORDERED, order, ZR, addr.base(), WIDTH64, rd, AMO);
1692}
1693
1694void MicroAssembler::sd(Register rs2, Address addr, std::memory_order order) {
1695 ASSERT(addr.offset() == 0);
1696 ASSERT((order == std::memory_order_release) ||
1697 (order == std::memory_order_acq_rel));
1698 ASSERT(Supports(RV_Zalasr));
1699 EmitRType(STOREORDERED, order, rs2, addr.base(), WIDTH64, ZR, AMO);
1700}
1701#endif
1702
1703void MicroAssembler::c_lwsp(Register rd, Address addr) {
1704 ASSERT(rd != ZR);
1705 ASSERT(addr.base() == SP);
1706 ASSERT(Supports(RV_C));
1707 Emit16(C_LWSP | EncodeCRd(rd) | EncodeCSPLoad4Imm(addr.offset()));
1708}
1709
1710#if XLEN == 32
1711void MicroAssembler::c_flwsp(FRegister rd, Address addr) {
1712 ASSERT(addr.base() == SP);
1713 ASSERT(Supports(RV_C));
1714 ASSERT(Supports(RV_F));
1715 Emit16(C_FLWSP | EncodeCFRd(rd) | EncodeCSPLoad4Imm(addr.offset()));
1716}
1717#else
1718void MicroAssembler::c_ldsp(Register rd, Address addr) {
1719 ASSERT(rd != ZR);
1720 ASSERT(addr.base() == SP);
1721 ASSERT(Supports(RV_C));
1722 Emit16(C_LDSP | EncodeCRd(rd) | EncodeCSPLoad8Imm(addr.offset()));
1723}
1724#endif
1725
1726void MicroAssembler::c_fldsp(FRegister rd, Address addr) {
1727 ASSERT(addr.base() == SP);
1728 ASSERT(Supports(RV_C));
1729 ASSERT(Supports(RV_D));
1730 Emit16(C_FLDSP | EncodeCFRd(rd) | EncodeCSPLoad8Imm(addr.offset()));
1731}
1732
1733void MicroAssembler::c_swsp(Register rs2, Address addr) {
1734 ASSERT(addr.base() == SP);
1735 ASSERT(Supports(RV_C));
1736 Emit16(C_SWSP | EncodeCRs2(rs2) | EncodeCSPStore4Imm(addr.offset()));
1737}
1738
1739#if XLEN == 32
1740void MicroAssembler::c_fswsp(FRegister rs2, Address addr) {
1741 ASSERT(addr.base() == SP);
1742 ASSERT(Supports(RV_C));
1743 ASSERT(Supports(RV_F));
1744 Emit16(C_FSWSP | EncodeCFRs2(rs2) | EncodeCSPStore4Imm(addr.offset()));
1745}
1746#else
1747void MicroAssembler::c_sdsp(Register rs2, Address addr) {
1748 ASSERT(addr.base() == SP);
1749 ASSERT(Supports(RV_C));
1750 Emit16(C_SDSP | EncodeCRs2(rs2) | EncodeCSPStore8Imm(addr.offset()));
1751}
1752#endif
1753void MicroAssembler::c_fsdsp(FRegister rs2, Address addr) {
1754 ASSERT(addr.base() == SP);
1755 ASSERT(Supports(RV_C));
1756 ASSERT(Supports(RV_D));
1757 Emit16(C_FSDSP | EncodeCFRs2(rs2) | EncodeCSPStore8Imm(addr.offset()));
1758}
1759
1760void MicroAssembler::c_lw(Register rd, Address addr) {
1761 ASSERT(Supports(RV_C));
1762 Emit16(C_LW | EncodeCRdp(rd) | EncodeCRs1p(addr.base()) |
1763 EncodeCMem4Imm(addr.offset()));
1764}
1765
1766void MicroAssembler::c_ld(Register rd, Address addr) {
1767 ASSERT(Supports(RV_C));
1768 Emit16(C_LD | EncodeCRdp(rd) | EncodeCRs1p(addr.base()) |
1769 EncodeCMem8Imm(addr.offset()));
1770}
1771
1772void MicroAssembler::c_flw(FRegister rd, Address addr) {
1773 ASSERT(Supports(RV_C));
1774 ASSERT(Supports(RV_F));
1775 Emit16(C_FLW | EncodeCFRdp(rd) | EncodeCRs1p(addr.base()) |
1776 EncodeCMem4Imm(addr.offset()));
1777}
1778
1779void MicroAssembler::c_fld(FRegister rd, Address addr) {
1780 ASSERT(Supports(RV_C));
1781 ASSERT(Supports(RV_D));
1782 Emit16(C_FLD | EncodeCFRdp(rd) | EncodeCRs1p(addr.base()) |
1783 EncodeCMem8Imm(addr.offset()));
1784}
1785
1786void MicroAssembler::c_sw(Register rs2, Address addr) {
1787 ASSERT(Supports(RV_C));
1788 Emit16(C_SW | EncodeCRs1p(addr.base()) | EncodeCRs2p(rs2) |
1789 EncodeCMem4Imm(addr.offset()));
1790}
1791
1792void MicroAssembler::c_sd(Register rs2, Address addr) {
1793 ASSERT(Supports(RV_C));
1794 Emit16(C_SD | EncodeCRs1p(addr.base()) | EncodeCRs2p(rs2) |
1795 EncodeCMem8Imm(addr.offset()));
1796}
1797
1798void MicroAssembler::c_fsw(FRegister rs2, Address addr) {
1799 ASSERT(Supports(RV_C));
1800 ASSERT(Supports(RV_F));
1801 Emit16(C_FSW | EncodeCRs1p(addr.base()) | EncodeCFRs2p(rs2) |
1802 EncodeCMem4Imm(addr.offset()));
1803}
1804
1805void MicroAssembler::c_fsd(FRegister rs2, Address addr) {
1806 ASSERT(Supports(RV_C));
1807 ASSERT(Supports(RV_D));
1808 Emit16(C_FSD | EncodeCRs1p(addr.base()) | EncodeCFRs2p(rs2) |
1809 EncodeCMem8Imm(addr.offset()));
1810}
1811
1812void MicroAssembler::c_j(Label* label) {
1813 ASSERT(Supports(RV_C));
1814 EmitCJump(label, C_J);
1815}
1816
1817#if XLEN == 32
1818void MicroAssembler::c_jal(Label* label) {
1819 ASSERT(Supports(RV_C));
1820 EmitCJump(label, C_JAL);
1821}
1822#endif // XLEN == 32
1823
1824void MicroAssembler::c_jr(Register rs1) {
1825 ASSERT(Supports(RV_C));
1826 ASSERT(rs1 != ZR);
1827 Emit16(C_JR | EncodeCRs1(rs1) | EncodeCRs2(ZR));
1828}
1829
1830void MicroAssembler::c_jalr(Register rs1) {
1831 ASSERT(Supports(RV_C));
1832 Emit16(C_JALR | EncodeCRs1(rs1) | EncodeCRs2(ZR));
1833}
1834
1835void MicroAssembler::c_beqz(Register rs1p, Label* label) {
1836 ASSERT(Supports(RV_C));
1837 EmitCBranch(rs1p, label, C_BEQZ);
1838}
1839
1840void MicroAssembler::c_bnez(Register rs1p, Label* label) {
1841 ASSERT(Supports(RV_C));
1842 EmitCBranch(rs1p, label, C_BNEZ);
1843}
1844
1845void MicroAssembler::c_li(Register rd, intptr_t imm) {
1846 ASSERT(Supports(RV_C));
1847 ASSERT(rd != ZR);
1848 Emit16(C_LI | EncodeCRd(rd) | EncodeCIImm(imm));
1849}
1850
1851void MicroAssembler::c_lui(Register rd, uintptr_t imm) {
1852 ASSERT(Supports(RV_C));
1853 ASSERT(rd != ZR);
1854 ASSERT(rd != SP);
1855 Emit16(C_LUI | EncodeCRd(rd) | EncodeCUImm(imm));
1856}
1857
1858void MicroAssembler::c_addi(Register rd, Register rs1, intptr_t imm) {
1859 ASSERT(Supports(RV_C));
1860 ASSERT(imm != 0);
1861 ASSERT(rd == rs1);
1862 Emit16(C_ADDI | EncodeCRd(rd) | EncodeCIImm(imm));
1863}
1864
1865#if XLEN >= 64
1866void MicroAssembler::c_addiw(Register rd, Register rs1, intptr_t imm) {
1867 ASSERT(Supports(RV_C));
1868 ASSERT(rd == rs1);
1869 Emit16(C_ADDIW | EncodeCRd(rd) | EncodeCIImm(imm));
1870}
1871#endif
1872void MicroAssembler::c_addi16sp(Register rd, Register rs1, intptr_t imm) {
1873 ASSERT(Supports(RV_C));
1874 ASSERT(rd == rs1);
1875 Emit16(C_ADDI16SP | EncodeCRd(rd) | EncodeCI16Imm(imm));
1876}
1877
1878void MicroAssembler::c_addi4spn(Register rdp, Register rs1, intptr_t imm) {
1879 ASSERT(Supports(RV_C));
1880 ASSERT(rs1 == SP);
1881 ASSERT(imm != 0);
1882 Emit16(C_ADDI4SPN | EncodeCRdp(rdp) | EncodeCI4SPNImm(imm));
1883}
1884
1885void MicroAssembler::c_slli(Register rd, Register rs1, intptr_t imm) {
1886 ASSERT(Supports(RV_C));
1887 ASSERT(rd == rs1);
1888 ASSERT(imm != 0);
1889 Emit16(C_SLLI | EncodeCRd(rd) | EncodeCIImm(imm));
1890}
1891
1892void MicroAssembler::c_srli(Register rd, Register rs1, intptr_t imm) {
1893 ASSERT(Supports(RV_C));
1894 ASSERT(rd == rs1);
1895 ASSERT(imm != 0);
1896 Emit16(C_SRLI | EncodeCRs1p(rd) | EncodeCIImm(imm));
1897}
1898
1899void MicroAssembler::c_srai(Register rd, Register rs1, intptr_t imm) {
1900 ASSERT(Supports(RV_C));
1901 ASSERT(rd == rs1);
1902 ASSERT(imm != 0);
1903 Emit16(C_SRAI | EncodeCRs1p(rd) | EncodeCIImm(imm));
1904}
1905
1906void MicroAssembler::c_andi(Register rd, Register rs1, intptr_t imm) {
1907 ASSERT(Supports(RV_C));
1908 ASSERT(rd == rs1);
1909 Emit16(C_ANDI | EncodeCRs1p(rd) | EncodeCIImm(imm));
1910}
1911
1912void MicroAssembler::c_mv(Register rd, Register rs2) {
1913 ASSERT(Supports(RV_C));
1914 ASSERT(rd != ZR);
1915 ASSERT(rs2 != ZR);
1916 Emit16(C_MV | EncodeCRd(rd) | EncodeCRs2(rs2));
1917}
1918
1919void MicroAssembler::c_add(Register rd, Register rs1, Register rs2) {
1920 ASSERT(Supports(RV_C));
1921 ASSERT(rd != ZR);
1922 ASSERT(rd == rs1);
1923 ASSERT(rs2 != ZR);
1924 Emit16(C_ADD | EncodeCRd(rd) | EncodeCRs2(rs2));
1925}
1926
1927void MicroAssembler::c_and(Register rd, Register rs1, Register rs2) {
1928 ASSERT(Supports(RV_C));
1929 ASSERT(rd == rs1);
1930 Emit16(C_AND | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1931}
1932
1933void MicroAssembler::c_or(Register rd, Register rs1, Register rs2) {
1934 ASSERT(Supports(RV_C));
1935 Emit16(C_OR | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1936}
1937
1938void MicroAssembler::c_xor(Register rd, Register rs1, Register rs2) {
1939 ASSERT(Supports(RV_C));
1940 Emit16(C_XOR | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1941}
1942
1943void MicroAssembler::c_sub(Register rd, Register rs1, Register rs2) {
1944 ASSERT(Supports(RV_C));
1945 Emit16(C_SUB | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1946}
1947
1948#if XLEN >= 64
1949void MicroAssembler::c_addw(Register rd, Register rs1, Register rs2) {
1950 ASSERT(Supports(RV_C));
1951 Emit16(C_ADDW | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1952}
1953
1954void MicroAssembler::c_subw(Register rd, Register rs1, Register rs2) {
1955 ASSERT(Supports(RV_C));
1956 Emit16(C_SUBW | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1957}
1958#endif // XLEN >= 64
1959
1960void MicroAssembler::c_nop() {
1961 ASSERT(Supports(RV_C));
1962 Emit16(C_NOP);
1963}
1964
1965void MicroAssembler::c_ebreak() {
1966 ASSERT(Supports(RV_C));
1967 Emit16(C_EBREAK);
1968}
1969
1970static Funct3 InvertFunct3(Funct3 func) {
1971 switch (func) {
1972 case BEQ:
1973 return BNE;
1974 case BNE:
1975 return BEQ;
1976 case BGE:
1977 return BLT;
1978 case BGEU:
1979 return BLTU;
1980 case BLT:
1981 return BGE;
1982 case BLTU:
1983 return BGEU;
1984 default:
1985 UNREACHABLE();
1986 }
1987}
1988
1989void MicroAssembler::EmitBranch(Register rs1,
1990 Register rs2,
1991 Label* label,
1992 Funct3 func,
1993 JumpDistance distance) {
1994 intptr_t offset;
1995 if (label->IsBound()) {
1996 // Backward branch: use near or far branch based on actual distance.
1997 offset = label->Position() - Position();
1998 if (IsBTypeImm(offset)) {
1999 EmitBType(offset, rs2, rs1, func, BRANCH);
2000 return;
2001 }
2002
2003 if (IsJTypeImm(offset + 4)) {
2004 intptr_t start = Position();
2005 const intptr_t kFarBranchLength = 8;
2006 EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func), BRANCH);
2007 offset = label->Position() - Position();
2008 EmitJType(offset, ZR, JAL);
2009 intptr_t end = Position();
2010 ASSERT_EQUAL(end - start, kFarBranchLength);
2011 return;
2012 }
2013
2014 intptr_t start = Position();
2015 const intptr_t kFarBranchLength = 12;
2016 EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func), BRANCH);
2017 offset = label->Position() - Position();
2018 intx_t lo = ImmLo(offset);
2019 intx_t hi = ImmHi(offset);
2020 if (!IsUTypeImm(hi)) {
2021 FATAL("Branch distance exceeds 2GB!");
2022 }
2023 EmitUType(hi, FAR_TMP, AUIPC);
2024 EmitIType(lo, FAR_TMP, F3_0, ZR, JALR);
2025 intptr_t end = Position();
2026 ASSERT_EQUAL(end - start, kFarBranchLength);
2027 return;
2028 } else {
2029 // Forward branch: speculatively use near branches and re-assemble with far
2030 // branches if any need greater length.
2031 if (distance == kNearJump) {
2032 offset = label->link_b(Position());
2033 if (!IsBTypeImm(offset)) {
2034 FATAL("Incorrect Assembler::kNearJump");
2035 }
2036 EmitBType(offset, rs2, rs1, func, BRANCH);
2037 } else if (far_branch_level() == 0) {
2038 offset = label->link_b(Position());
2039 if (!IsBTypeImm(offset)) {
2040 // TODO(riscv): This isn't so much because the branch is out of range
2041 // as some previous jump to the same target would be out of B-type
2042 // range... A possible alternative is to have separate lists on Labels
2043 // for pending B-type and J-type instructions.
2045 }
2046 EmitBType(offset, rs2, rs1, func, BRANCH);
2047 } else if (far_branch_level() == 1) {
2048 intptr_t start = Position();
2049 const intptr_t kFarBranchLength = 8;
2050 EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func), BRANCH);
2051 offset = label->link_j(Position());
2052 EmitJType(offset, ZR, JAL);
2053 intptr_t end = Position();
2054 ASSERT_EQUAL(end - start, kFarBranchLength);
2055 } else {
2056 intptr_t start = Position();
2057 const intptr_t kFarBranchLength = 12;
2058 EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func), BRANCH);
2059 offset = label->link_far(Position());
2060 intx_t lo = ImmLo(offset);
2061 intx_t hi = ImmHi(offset);
2062 if (!IsUTypeImm(hi)) {
2063 FATAL("Branch distance exceeds 2GB!");
2064 }
2065 EmitUType(hi, FAR_TMP, AUIPC);
2066 EmitIType(lo, FAR_TMP, F3_0, ZR, JALR);
2067 intptr_t end = Position();
2068 ASSERT_EQUAL(end - start, kFarBranchLength);
2069 }
2070 }
2071}
2072
2073void MicroAssembler::EmitJump(Register rd,
2074 Label* label,
2075 Opcode op,
2076 JumpDistance distance) {
2077 intptr_t offset;
2078 if (label->IsBound()) {
2079 // Backward jump: use near or far jump based on actual distance.
2080 offset = label->Position() - Position();
2081
2082 if (IsJTypeImm(offset)) {
2083 EmitJType(offset, rd, JAL);
2084 return;
2085 }
2086 intx_t lo = ImmLo(offset);
2087 intx_t hi = ImmHi(offset);
2088 if (!IsUTypeImm(hi)) {
2089 FATAL("Jump distance exceeds 2GB!");
2090 }
2091 EmitUType(hi, FAR_TMP, AUIPC);
2092 EmitIType(lo, FAR_TMP, F3_0, ZR, JALR);
2093 return;
2094 } else {
2095 // Forward jump: speculatively use near jumps and re-assemble with far
2096 // jumps if any need greater length.
2097 if (distance == kNearJump) {
2098 offset = label->link_j(Position());
2099 if (!IsJTypeImm(offset)) {
2100 FATAL("Incorrect Assembler::kNearJump");
2101 }
2102 EmitJType(offset, rd, JAL);
2103 } else if (far_branch_level() < 2) {
2104 offset = label->link_j(Position());
2105 if (!IsJTypeImm(offset)) {
2107 }
2108 EmitJType(offset, rd, JAL);
2109 } else {
2110 offset = label->link_far(Position());
2111 intx_t lo = ImmLo(offset);
2112 intx_t hi = ImmHi(offset);
2113 if (!IsUTypeImm(hi)) {
2114 FATAL("Jump distance exceeds 2GB!");
2115 }
2116 EmitUType(hi, FAR_TMP, AUIPC);
2117 EmitIType(lo, FAR_TMP, F3_0, ZR, JALR);
2118 }
2119 }
2120}
2121
2122void MicroAssembler::EmitCBranch(Register rs1p, Label* label, COpcode op) {
2123 intptr_t offset;
2124 if (label->IsBound()) {
2125 offset = label->Position() - Position();
2126 } else {
2127 offset = label->link_cb(Position());
2128 }
2129 if (!IsCBImm(offset)) {
2130 FATAL("Incorrect Assembler::kNearJump");
2131 }
2132 Emit16(op | EncodeCRs1p(rs1p) | EncodeCBImm(offset));
2133}
2134
2135void MicroAssembler::EmitCJump(Label* label, COpcode op) {
2136 intptr_t offset;
2137 if (label->IsBound()) {
2138 offset = label->Position() - Position();
2139 } else {
2140 offset = label->link_cj(Position());
2141 }
2142 if (!IsCJImm(offset)) {
2143 FATAL("Incorrect Assembler::kNearJump");
2144 }
2145 Emit16(op | EncodeCJImm(offset));
2146}
2147
2148void MicroAssembler::EmitRType(Funct5 funct5,
2149 std::memory_order order,
2150 Register rs2,
2151 Register rs1,
2152 Funct3 funct3,
2153 Register rd,
2154 Opcode opcode) {
2155 intptr_t funct7 = funct5 << 2;
2156 switch (order) {
2157 case std::memory_order_acq_rel:
2158 funct7 |= 0b11;
2159 break;
2160 case std::memory_order_acquire:
2161 funct7 |= 0b10;
2162 break;
2163 case std::memory_order_release:
2164 funct7 |= 0b01;
2165 break;
2166 case std::memory_order_relaxed:
2167 funct7 |= 0b00;
2168 break;
2169 default:
2170 FATAL("Invalid memory order");
2171 }
2172 EmitRType((Funct7)funct7, rs2, rs1, funct3, rd, opcode);
2173}
2174
2175void MicroAssembler::EmitRType(Funct7 funct7,
2176 Register rs2,
2177 Register rs1,
2178 Funct3 funct3,
2179 Register rd,
2180 Opcode opcode) {
2181 uint32_t e = 0;
2182 e |= EncodeFunct7(funct7);
2183 e |= EncodeRs2(rs2);
2184 e |= EncodeRs1(rs1);
2185 e |= EncodeFunct3(funct3);
2186 e |= EncodeRd(rd);
2187 e |= EncodeOpcode(opcode);
2188 Emit32(e);
2189}
2190
2191void MicroAssembler::EmitRType(Funct7 funct7,
2192 FRegister rs2,
2193 FRegister rs1,
2194 Funct3 funct3,
2195 FRegister rd,
2196 Opcode opcode) {
2197 uint32_t e = 0;
2198 e |= EncodeFunct7(funct7);
2199 e |= EncodeFRs2(rs2);
2200 e |= EncodeFRs1(rs1);
2201 e |= EncodeFunct3(funct3);
2202 e |= EncodeFRd(rd);
2203 e |= EncodeOpcode(opcode);
2204 Emit32(e);
2205}
2206
2207void MicroAssembler::EmitRType(Funct7 funct7,
2208 FRegister rs2,
2209 FRegister rs1,
2211 FRegister rd,
2212 Opcode opcode) {
2213 uint32_t e = 0;
2214 e |= EncodeFunct7(funct7);
2215 e |= EncodeFRs2(rs2);
2216 e |= EncodeFRs1(rs1);
2217 e |= EncodeRoundingMode(round);
2218 e |= EncodeFRd(rd);
2219 e |= EncodeOpcode(opcode);
2220 Emit32(e);
2221}
2222
2223void MicroAssembler::EmitRType(Funct7 funct7,
2224 FRegister rs2,
2225 Register rs1,
2227 FRegister rd,
2228 Opcode opcode) {
2229 uint32_t e = 0;
2230 e |= EncodeFunct7(funct7);
2231 e |= EncodeFRs2(rs2);
2232 e |= EncodeRs1(rs1);
2233 e |= EncodeRoundingMode(round);
2234 e |= EncodeFRd(rd);
2235 e |= EncodeOpcode(opcode);
2236 Emit32(e);
2237}
2238
2239void MicroAssembler::EmitRType(Funct7 funct7,
2240 FRegister rs2,
2241 Register rs1,
2242 Funct3 funct3,
2243 FRegister rd,
2244 Opcode opcode) {
2245 uint32_t e = 0;
2246 e |= EncodeFunct7(funct7);
2247 e |= EncodeFRs2(rs2);
2248 e |= EncodeRs1(rs1);
2249 e |= EncodeFunct3(funct3);
2250 e |= EncodeFRd(rd);
2251 e |= EncodeOpcode(opcode);
2252 Emit32(e);
2253}
2254
2255void MicroAssembler::EmitRType(Funct7 funct7,
2256 FRegister rs2,
2257 FRegister rs1,
2258 Funct3 funct3,
2259 Register rd,
2260 Opcode opcode) {
2261 uint32_t e = 0;
2262 e |= EncodeFunct7(funct7);
2263 e |= EncodeFRs2(rs2);
2264 e |= EncodeFRs1(rs1);
2265 e |= EncodeFunct3(funct3);
2266 e |= EncodeRd(rd);
2267 e |= EncodeOpcode(opcode);
2268 Emit32(e);
2269}
2270
2271void MicroAssembler::EmitRType(Funct7 funct7,
2272 FRegister rs2,
2273 FRegister rs1,
2275 Register rd,
2276 Opcode opcode) {
2277 uint32_t e = 0;
2278 e |= EncodeFunct7(funct7);
2279 e |= EncodeFRs2(rs2);
2280 e |= EncodeFRs1(rs1);
2281 e |= EncodeRoundingMode(round);
2282 e |= EncodeRd(rd);
2283 e |= EncodeOpcode(opcode);
2284 Emit32(e);
2285}
2286
2287void MicroAssembler::EmitRType(Funct7 funct7,
2288 intptr_t shamt,
2289 Register rs1,
2290 Funct3 funct3,
2291 Register rd,
2292 Opcode opcode) {
2293 uint32_t e = 0;
2294 e |= EncodeFunct7(funct7);
2295 e |= EncodeShamt(shamt);
2296 e |= EncodeRs1(rs1);
2297 e |= EncodeFunct3(funct3);
2298 e |= EncodeRd(rd);
2299 e |= EncodeOpcode(opcode);
2300 Emit32(e);
2301}
2302
2303void MicroAssembler::EmitR4Type(FRegister rs3,
2304 Funct2 funct2,
2305 FRegister rs2,
2306 FRegister rs1,
2308 FRegister rd,
2309 Opcode opcode) {
2310 uint32_t e = 0;
2311 e |= EncodeFRs3(rs3);
2312 e |= EncodeFunct2(funct2);
2313 e |= EncodeFRs2(rs2);
2314 e |= EncodeFRs1(rs1);
2315 e |= EncodeRoundingMode(round);
2316 e |= EncodeFRd(rd);
2317 e |= EncodeOpcode(opcode);
2318 Emit32(e);
2319}
2320
2321void MicroAssembler::EmitIType(intptr_t imm,
2322 Register rs1,
2323 Funct3 funct3,
2324 Register rd,
2325 Opcode opcode) {
2326 uint32_t e = 0;
2327 e |= EncodeITypeImm(imm);
2328 e |= EncodeRs1(rs1);
2329 e |= EncodeFunct3(funct3);
2330 e |= EncodeRd(rd);
2331 e |= EncodeOpcode(opcode);
2332 Emit32(e);
2333}
2334
2335void MicroAssembler::EmitIType(intptr_t imm,
2336 Register rs1,
2337 Funct3 funct3,
2338 FRegister rd,
2339 Opcode opcode) {
2340 uint32_t e = 0;
2341 e |= EncodeITypeImm(imm);
2342 e |= EncodeRs1(rs1);
2343 e |= EncodeFunct3(funct3);
2344 e |= EncodeFRd(rd);
2345 e |= EncodeOpcode(opcode);
2346 Emit32(e);
2347}
2348
2349void MicroAssembler::EmitSType(intptr_t imm,
2350 Register rs2,
2351 Register rs1,
2352 Funct3 funct3,
2353 Opcode opcode) {
2354 uint32_t e = 0;
2355 e |= EncodeSTypeImm(imm);
2356 e |= EncodeRs2(rs2);
2357 e |= EncodeRs1(rs1);
2358 e |= EncodeFunct3(funct3);
2359 e |= EncodeOpcode(opcode);
2360 Emit32(e);
2361}
2362
2363void MicroAssembler::EmitSType(intptr_t imm,
2364 FRegister rs2,
2365 Register rs1,
2366 Funct3 funct3,
2367 Opcode opcode) {
2368 uint32_t e = 0;
2369 e |= EncodeSTypeImm(imm);
2370 e |= EncodeFRs2(rs2);
2371 e |= EncodeRs1(rs1);
2372 e |= EncodeFunct3(funct3);
2373 e |= EncodeOpcode(opcode);
2374 Emit32(e);
2375}
2376
2377void MicroAssembler::EmitBType(intptr_t imm,
2378 Register rs2,
2379 Register rs1,
2380 Funct3 funct3,
2381 Opcode opcode) {
2382 uint32_t e = 0;
2383 e |= EncodeBTypeImm(imm);
2384 e |= EncodeRs2(rs2);
2385 e |= EncodeRs1(rs1);
2386 e |= EncodeFunct3(funct3);
2387 e |= EncodeOpcode(opcode);
2388 Emit32(e);
2389}
2390
2391void MicroAssembler::EmitUType(intptr_t imm, Register rd, Opcode opcode) {
2392 uint32_t e = 0;
2393 e |= EncodeUTypeImm(imm);
2394 e |= EncodeRd(rd);
2395 e |= EncodeOpcode(opcode);
2396 Emit32(e);
2397}
2398
2399void MicroAssembler::EmitJType(intptr_t imm, Register rd, Opcode opcode) {
2400 uint32_t e = 0;
2401 e |= EncodeJTypeImm(imm);
2402 e |= EncodeRd(rd);
2403 e |= EncodeOpcode(opcode);
2404 Emit32(e);
2405}
2406
2407Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
2408 intptr_t far_branch_level)
2409 : MicroAssembler(object_pool_builder,
2410 far_branch_level,
2411 FLAG_use_compressed_instructions ? RV_GC : RV_G),
2412 constant_pool_allowed_(false) {
2413 generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
2414 // Note this does not destroy RA.
2415 lx(TMP,
2416 Address(THR, target::Thread::write_barrier_wrappers_thread_offset(reg)));
2417 jalr(TMP, TMP);
2418 };
2419 generate_invoke_array_write_barrier_ = [&]() {
2420 Call(
2421 Address(THR, target::Thread::array_write_barrier_entry_point_offset()));
2422 };
2423}
2424
2425void Assembler::PushRegister(Register r) {
2426 ASSERT(r != SP);
2427 subi(SP, SP, target::kWordSize);
2428 sx(r, Address(SP, 0));
2429}
2430void Assembler::PopRegister(Register r) {
2431 ASSERT(r != SP);
2432 lx(r, Address(SP, 0));
2433 addi(SP, SP, target::kWordSize);
2434}
2435
2436void Assembler::PushRegisterPair(Register r0, Register r1) {
2437 ASSERT(r0 != SP);
2438 ASSERT(r1 != SP);
2439 subi(SP, SP, 2 * target::kWordSize);
2440 sx(r1, Address(SP, target::kWordSize));
2441 sx(r0, Address(SP, 0));
2442}
2443
2444void Assembler::PopRegisterPair(Register r0, Register r1) {
2445 ASSERT(r0 != SP);
2446 ASSERT(r1 != SP);
2447 lx(r1, Address(SP, target::kWordSize));
2448 lx(r0, Address(SP, 0));
2449 addi(SP, SP, 2 * target::kWordSize);
2450}
2451
2452void Assembler::PushRegisters(const RegisterSet& regs) {
2453 // The order in which the registers are pushed must match the order
2454 // in which the registers are encoded in the safepoint's stack map.
2455
2456 intptr_t size = (regs.CpuRegisterCount() * target::kWordSize) +
2457 (regs.FpuRegisterCount() * kFpuRegisterSize);
2458 if (size == 0) {
2459 return; // Skip no-op SP update.
2460 }
2461
2462 subi(SP, SP, size);
2463 intptr_t offset = size;
2464 for (intptr_t i = kNumberOfFpuRegisters - 1; i >= 0; i--) {
2465 FRegister reg = static_cast<FRegister>(i);
2466 if (regs.ContainsFpuRegister(reg)) {
2468 fsd(reg, Address(SP, offset));
2469 }
2470 }
2471 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; i--) {
2472 Register reg = static_cast<Register>(i);
2473 if (regs.ContainsRegister(reg)) {
2475 sx(reg, Address(SP, offset));
2476 }
2477 }
2478 ASSERT(offset == 0);
2479}
2480
2481void Assembler::PopRegisters(const RegisterSet& regs) {
2482 // The order in which the registers are pushed must match the order
2483 // in which the registers are encoded in the safepoint's stack map.
2484
2485 intptr_t size = (regs.CpuRegisterCount() * target::kWordSize) +
2486 (regs.FpuRegisterCount() * kFpuRegisterSize);
2487 if (size == 0) {
2488 return; // Skip no-op SP update.
2489 }
2490 intptr_t offset = 0;
2491 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
2492 Register reg = static_cast<Register>(i);
2493 if (regs.ContainsRegister(reg)) {
2494 lx(reg, Address(SP, offset));
2496 }
2497 }
2498 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
2499 FRegister reg = static_cast<FRegister>(i);
2500 if (regs.ContainsFpuRegister(reg)) {
2501 fld(reg, Address(SP, offset));
2503 }
2504 }
2505 ASSERT(offset == size);
2506 addi(SP, SP, size);
2507}
2508
2509void Assembler::PushRegistersInOrder(std::initializer_list<Register> regs) {
2510 intptr_t offset = regs.size() * target::kWordSize;
2511 subi(SP, SP, offset);
2512 for (Register reg : regs) {
2513 ASSERT(reg != SP);
2515 sx(reg, Address(SP, offset));
2516 }
2517}
2518
2519void Assembler::PushNativeCalleeSavedRegisters() {
2520 RegisterSet regs(kAbiPreservedCpuRegs, kAbiPreservedFpuRegs);
2521 intptr_t size = (regs.CpuRegisterCount() * target::kWordSize) +
2522 (regs.FpuRegisterCount() * sizeof(double));
2523 subi(SP, SP, size);
2524 intptr_t offset = 0;
2525 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
2526 FRegister reg = static_cast<FRegister>(i);
2527 if (regs.ContainsFpuRegister(reg)) {
2528 fsd(reg, Address(SP, offset));
2529 offset += sizeof(double);
2530 }
2531 }
2532 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
2533 Register reg = static_cast<Register>(i);
2534 if (regs.ContainsRegister(reg)) {
2535 sx(reg, Address(SP, offset));
2537 }
2538 }
2539 ASSERT(offset == size);
2540}
2541
2542void Assembler::PopNativeCalleeSavedRegisters() {
2543 RegisterSet regs(kAbiPreservedCpuRegs, kAbiPreservedFpuRegs);
2544 intptr_t size = (regs.CpuRegisterCount() * target::kWordSize) +
2545 (regs.FpuRegisterCount() * sizeof(double));
2546 intptr_t offset = 0;
2547 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
2548 FRegister reg = static_cast<FRegister>(i);
2549 if (regs.ContainsFpuRegister(reg)) {
2550 fld(reg, Address(SP, offset));
2551 offset += sizeof(double);
2552 }
2553 }
2554 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
2555 Register reg = static_cast<Register>(i);
2556 if (regs.ContainsRegister(reg)) {
2557 lx(reg, Address(SP, offset));
2559 }
2560 }
2561 ASSERT(offset == size);
2562 addi(SP, SP, size);
2563}
2564
2565void Assembler::ExtendValue(Register rd, Register rn, OperandSize sz) {
2566 switch (sz) {
2567#if XLEN == 64
2568 case kEightBytes:
2569 if (rd == rn) return; // No operation needed.
2570 return mv(rd, rn);
2571 case kUnsignedFourBytes:
2572 return UNIMPLEMENTED();
2573 case kFourBytes:
2574 return sextw(rd, rn);
2575#elif XLEN == 32
2576 case kUnsignedFourBytes:
2577 case kFourBytes:
2578 if (rd == rn) return; // No operation needed.
2579 return mv(rd, rn);
2580#endif
2581 case kUnsignedTwoBytes:
2582 case kTwoBytes:
2583 case kUnsignedByte:
2584 case kByte:
2585 default:
2586 UNIMPLEMENTED();
2587 break;
2588 }
2589 UNIMPLEMENTED();
2590}
2591void Assembler::ExtendAndSmiTagValue(Register rd, Register rn, OperandSize sz) {
2592 if (sz == kWordBytes) {
2593 SmiTag(rd, rn);
2594 return;
2595 }
2596
2597 switch (sz) {
2598#if XLEN == 64
2599 case kUnsignedFourBytes:
2600 slli(rd, rn, XLEN - kBitsPerInt32);
2601 srli(rd, rd, XLEN - kBitsPerInt32 - kSmiTagShift);
2602 return;
2603 case kFourBytes:
2604 slli(rd, rn, XLEN - kBitsPerInt32);
2605 srai(rd, rd, XLEN - kBitsPerInt32 - kSmiTagShift);
2606 return;
2607#endif
2608 case kUnsignedTwoBytes:
2609 slli(rd, rn, XLEN - kBitsPerInt16);
2610 srli(rd, rd, XLEN - kBitsPerInt16 - kSmiTagShift);
2611 return;
2612 case kTwoBytes:
2613 slli(rd, rn, XLEN - kBitsPerInt16);
2614 srai(rd, rd, XLEN - kBitsPerInt16 - kSmiTagShift);
2615 return;
2616 case kUnsignedByte:
2617 slli(rd, rn, XLEN - kBitsPerInt8);
2618 srli(rd, rd, XLEN - kBitsPerInt8 - kSmiTagShift);
2619 return;
2620 case kByte:
2621 slli(rd, rn, XLEN - kBitsPerInt8);
2622 srai(rd, rd, XLEN - kBitsPerInt8 - kSmiTagShift);
2623 return;
2624 default:
2625 UNIMPLEMENTED();
2626 break;
2627 }
2628}
2629
2630// Unconditional jump to a given address in memory. Clobbers TMP.
2631void Assembler::Jump(const Address& address) {
2632 lx(TMP2, address);
2633 jr(TMP2);
2634}
2635
2636void Assembler::TsanLoadAcquire(Register addr) {
2637 LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
2638 MoveRegister(A0, addr);
2639 rt.Call(kTsanLoadAcquireRuntimeEntry, /*argument_count=*/1);
2640}
2641void Assembler::TsanStoreRelease(Register addr) {
2642 LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
2643 MoveRegister(A0, addr);
2644 rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1);
2645}
2646
2647void Assembler::LoadAcquire(Register dst,
2648 const Address& address,
2649 OperandSize size) {
2650 ASSERT(dst != address.base());
2651
2652 if (Supports(RV_Zalasr)) {
2653 Address addr = PrepareAtomicOffset(address.base(), address.offset());
2654 switch (size) {
2655#if XLEN == 64
2656 case kEightBytes:
2657 ld(dst, addr, std::memory_order_acquire);
2658 break;
2659#endif
2660 case kFourBytes:
2661 lw(dst, addr, std::memory_order_acquire);
2662 break;
2663 case kTwoBytes:
2664 lh(dst, addr, std::memory_order_acquire);
2665 break;
2666 case kByte:
2667 lb(dst, addr, std::memory_order_acquire);
2668 break;
2669 default:
2670 UNREACHABLE();
2671 }
2672 } else {
2673 Load(dst, address, size);
2675 }
2676
2677 if (FLAG_target_thread_sanitizer) {
2678 if (address.offset() == 0) {
2679 TsanLoadAcquire(address.base());
2680 } else {
2681 AddImmediate(TMP2, address.base(), address.offset());
2682 TsanLoadAcquire(TMP2);
2683 }
2684 }
2685}
2686
2687void Assembler::StoreRelease(Register src,
2688 const Address& address,
2689 OperandSize size) {
2690 if (Supports(RV_Zalasr)) {
2691 Address addr = PrepareAtomicOffset(address.base(), address.offset());
2692 switch (size) {
2693#if XLEN == 64
2694 case kEightBytes:
2695 sd(src, addr, std::memory_order_release);
2696 break;
2697#endif
2698 case kUnsignedFourBytes:
2699 case kFourBytes:
2700 sw(src, addr, std::memory_order_release);
2701 break;
2702 case kUnsignedTwoBytes:
2703 case kTwoBytes:
2704 sh(src, addr, std::memory_order_release);
2705 break;
2706 case kUnsignedByte:
2707 case kByte:
2708 sb(src, addr, std::memory_order_release);
2709 break;
2710 default:
2711 UNREACHABLE();
2712 }
2713 } else {
2715 Store(src, address, size);
2716 }
2717}
2718
2719void Assembler::CompareWithMemoryValue(Register value,
2720 Address address,
2721 OperandSize size) {
2722#if XLEN >= 64
2724 if (size == kFourBytes) {
2725 lw(TMP2, address);
2726 } else {
2727 ld(TMP2, address);
2728 }
2729#else
2731 lx(TMP2, address);
2732#endif
2733 CompareRegisters(value, TMP2);
2734}
2735
2736void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
2737 if (frame_space != 0) {
2738 addi(SP, SP, -frame_space);
2739 }
2740 const intptr_t kAbiStackAlignment = 16; // For both 32 and 64 bit.
2741 andi(SP, SP, ~(kAbiStackAlignment - 1));
2742}
2743
2744// In debug mode, this generates code to check that:
2745// FP + kExitLinkSlotFromEntryFp == SP
2746// or triggers breakpoint otherwise.
2747void Assembler::EmitEntryFrameVerification() {
2748#if defined(DEBUG)
2749 Label done;
2750 ASSERT(!constant_pool_allowed());
2751 LoadImmediate(TMP, target::frame_layout.exit_link_slot_from_entry_fp *
2753 add(TMP, TMP, FPREG);
2754 beq(TMP, SPREG, &done, kNearJump);
2755
2756 Breakpoint();
2757
2758 Bind(&done);
2759#endif
2760}
2761
2762void Assembler::CompareRegisters(Register rn, Register rm) {
2763 ASSERT(deferred_compare_ == kNone);
2764 deferred_compare_ = kCompareReg;
2765 deferred_left_ = rn;
2766 deferred_reg_ = rm;
2767}
2768void Assembler::CompareObjectRegisters(Register rn, Register rm) {
2769 CompareRegisters(rn, rm);
2770}
2771void Assembler::TestRegisters(Register rn, Register rm) {
2772 ASSERT(deferred_compare_ == kNone);
2773 deferred_compare_ = kTestReg;
2774 deferred_left_ = rn;
2775 deferred_reg_ = rm;
2776}
2777
2778void Assembler::BranchIf(Condition condition,
2779 Label* label,
2780 JumpDistance distance) {
2781 ASSERT(deferred_compare_ != kNone);
2782
2783 if (deferred_compare_ == kCompareImm || deferred_compare_ == kCompareReg) {
2784 Register left = deferred_left_;
2786 if (deferred_compare_ == kCompareImm) {
2787 if (deferred_imm_ == 0) {
2788 right = ZR;
2789 } else {
2790 LoadImmediate(TMP2, deferred_imm_);
2791 right = TMP2;
2792 }
2793 } else {
2794 right = deferred_reg_;
2795 }
2796 switch (condition) {
2797 case EQUAL:
2798 beq(left, right, label, distance);
2799 break;
2800 case NOT_EQUAL:
2801 bne(left, right, label, distance);
2802 break;
2803 case LESS:
2804 blt(left, right, label, distance);
2805 break;
2806 case LESS_EQUAL:
2807 ble(left, right, label, distance);
2808 break;
2809 case GREATER_EQUAL:
2810 bge(left, right, label, distance);
2811 break;
2812 case GREATER:
2813 bgt(left, right, label, distance);
2814 break;
2815 case UNSIGNED_LESS:
2816 bltu(left, right, label, distance);
2817 break;
2819 bleu(left, right, label, distance);
2820 break;
2822 bgeu(left, right, label, distance);
2823 break;
2824 case UNSIGNED_GREATER:
2825 bgtu(left, right, label, distance);
2826 break;
2827 case OVERFLOW:
2828 case NO_OVERFLOW:
2829 FATAL("Use Add/Subtract/MultiplyBranchOverflow instead.");
2830 default:
2831 UNREACHABLE();
2832 }
2833 } else if (deferred_compare_ == kTestImm || deferred_compare_ == kTestReg) {
2834 if (deferred_compare_ == kTestImm) {
2835 AndImmediate(TMP2, deferred_left_, deferred_imm_);
2836 } else {
2837 and_(TMP2, deferred_left_, deferred_reg_);
2838 }
2839 switch (condition) {
2840 case ZERO:
2841 beqz(TMP2, label, distance);
2842 break;
2843 case NOT_ZERO:
2844 bnez(TMP2, label, distance);
2845 break;
2846 default:
2847 UNREACHABLE();
2848 }
2849 } else {
2850 UNREACHABLE();
2851 }
2852 deferred_compare_ = kNone; // Consumed.
2853}
2854
2855void Assembler::SetIf(Condition condition, Register rd) {
2856 ASSERT(deferred_compare_ != kNone);
2857
2858 if (deferred_compare_ == kCompareImm) {
2859 if (deferred_imm_ == 0) {
2860 deferred_compare_ = kCompareReg;
2861 deferred_reg_ = ZR;
2862 SetIf(condition, rd);
2863 return;
2864 }
2865 if (!IsITypeImm(deferred_imm_) || !IsITypeImm(deferred_imm_ + 1)) {
2866 LoadImmediate(TMP2, deferred_imm_);
2867 deferred_compare_ = kCompareReg;
2868 deferred_reg_ = TMP2;
2869 SetIf(condition, rd);
2870 return;
2871 }
2872 Register left = deferred_left_;
2873 intx_t right = deferred_imm_;
2874 switch (condition) {
2875 case EQUAL:
2876 subi(rd, left, right);
2877 seqz(rd, rd);
2878 break;
2879 case NOT_EQUAL:
2880 subi(rd, left, right);
2881 snez(rd, rd);
2882 break;
2883 case LESS:
2884 slti(rd, left, right);
2885 break;
2886 case LESS_EQUAL:
2887 slti(rd, left, right + 1);
2888 break;
2889 case GREATER_EQUAL:
2890 slti(rd, left, right);
2891 xori(rd, rd, 1);
2892 break;
2893 case GREATER:
2894 slti(rd, left, right + 1);
2895 xori(rd, rd, 1);
2896 break;
2897 case UNSIGNED_LESS:
2898 sltiu(rd, left, right);
2899 break;
2901 sltiu(rd, left, right + 1);
2902 break;
2904 sltiu(rd, left, right);
2905 xori(rd, rd, 1);
2906 break;
2907 case UNSIGNED_GREATER:
2908 sltiu(rd, left, right + 1);
2909 xori(rd, rd, 1);
2910 break;
2911 default:
2912 UNREACHABLE();
2913 }
2914 } else if (deferred_compare_ == kCompareReg) {
2915 Register left = deferred_left_;
2916 Register right = deferred_reg_;
2917 switch (condition) {
2918 case EQUAL:
2919 if (right == ZR) {
2920 seqz(rd, left);
2921 } else {
2922 xor_(rd, left, right);
2923 seqz(rd, rd);
2924 }
2925 break;
2926 case NOT_EQUAL:
2927 if (right == ZR) {
2928 snez(rd, left);
2929 } else {
2930 xor_(rd, left, right);
2931 snez(rd, rd);
2932 }
2933 break;
2934 case LESS:
2935 slt(rd, left, right);
2936 break;
2937 case LESS_EQUAL:
2938 slt(rd, right, left);
2939 xori(rd, rd, 1);
2940 break;
2941 case GREATER_EQUAL:
2942 slt(rd, left, right);
2943 xori(rd, rd, 1);
2944 break;
2945 case GREATER:
2946 slt(rd, right, left);
2947 break;
2948 case UNSIGNED_LESS:
2949 sltu(rd, left, right);
2950 break;
2952 sltu(rd, right, left);
2953 xori(rd, rd, 1);
2954 break;
2956 sltu(rd, left, right);
2957 xori(rd, rd, 1);
2958 break;
2959 case UNSIGNED_GREATER:
2960 sltu(rd, right, left);
2961 break;
2962 default:
2963 UNREACHABLE();
2964 }
2965 } else if (deferred_compare_ == kTestImm) {
2966 uintx_t uimm = deferred_imm_;
2967 if (deferred_imm_ == 1) {
2968 switch (condition) {
2969 case ZERO:
2970 andi(rd, deferred_left_, 1);
2971 xori(rd, rd, 1);
2972 break;
2973 case NOT_ZERO:
2974 andi(rd, deferred_left_, 1);
2975 break;
2976 default:
2977 UNREACHABLE();
2978 }
2979 } else if (Supports(RV_Zbs) && Utils::IsPowerOfTwo(uimm)) {
2980 switch (condition) {
2981 case ZERO:
2982 bexti(rd, deferred_left_, Utils::ShiftForPowerOfTwo(uimm));
2983 xori(rd, rd, 1);
2984 break;
2985 case NOT_ZERO:
2986 bexti(rd, deferred_left_, Utils::ShiftForPowerOfTwo(uimm));
2987 break;
2988 default:
2989 UNREACHABLE();
2990 }
2991 } else {
2992 AndImmediate(rd, deferred_left_, deferred_imm_);
2993 switch (condition) {
2994 case ZERO:
2995 seqz(rd, rd);
2996 break;
2997 case NOT_ZERO:
2998 snez(rd, rd);
2999 break;
3000 default:
3001 UNREACHABLE();
3002 }
3003 }
3004 } else if (deferred_compare_ == kTestReg) {
3005 and_(rd, deferred_left_, deferred_reg_);
3006 switch (condition) {
3007 case ZERO:
3008 seqz(rd, rd);
3009 break;
3010 case NOT_ZERO:
3011 snez(rd, rd);
3012 break;
3013 default:
3014 UNREACHABLE();
3015 }
3016 } else {
3017 UNREACHABLE();
3018 }
3019
3020 deferred_compare_ = kNone; // Consumed.
3021}
3022
3023void Assembler::BranchIfZero(Register rn, Label* label, JumpDistance distance) {
3024 beqz(rn, label, distance);
3025}
3026
3027void Assembler::BranchIfBit(Register rn,
3028 intptr_t bit_number,
3029 Condition condition,
3030 Label* label,
3031 JumpDistance distance) {
3032 ASSERT(rn != TMP2);
3033 andi(TMP2, rn, 1 << bit_number);
3034 if (condition == ZERO) {
3035 beqz(TMP2, label, distance);
3036 } else if (condition == NOT_ZERO) {
3037 bnez(TMP2, label, distance);
3038 } else {
3039 UNREACHABLE();
3040 }
3041}
3042
3043void Assembler::BranchIfNotSmi(Register reg,
3044 Label* label,
3045 JumpDistance distance) {
3046 ASSERT(reg != TMP2);
3047 andi(TMP2, reg, kSmiTagMask);
3048 bnez(TMP2, label, distance);
3049}
3050void Assembler::BranchIfSmi(Register reg, Label* label, JumpDistance distance) {
3051 ASSERT(reg != TMP2);
3052 andi(TMP2, reg, kSmiTagMask);
3053 beqz(TMP2, label, distance);
3054}
3055
3056void Assembler::ArithmeticShiftRightImmediate(Register reg, intptr_t shift) {
3057 srai(reg, reg, shift);
3058}
3059
3060void Assembler::CompareWords(Register reg1,
3061 Register reg2,
3062 intptr_t offset,
3064 Register temp,
3065 Label* equals) {
3066 Label loop;
3067 Bind(&loop);
3068 BranchIfZero(count, equals, Assembler::kNearJump);
3069 AddImmediate(count, -1);
3070 lx(temp, FieldAddress(reg1, offset));
3071 lx(TMP, FieldAddress(reg2, offset));
3072 addi(reg1, reg1, target::kWordSize);
3073 addi(reg2, reg2, target::kWordSize);
3074 beq(temp, TMP, &loop, Assembler::kNearJump);
3075}
3076
3077void Assembler::JumpAndLink(intptr_t target_code_pool_index,
3078 CodeEntryKind entry_kind) {
3079 // Avoid clobbering CODE_REG when invoking code in precompiled mode.
3080 // We don't actually use CODE_REG in the callee and caller might
3081 // be using CODE_REG for a live value (e.g. a value that is alive
3082 // across invocation of a shared stub like the one we use for
3083 // allocating Mint boxes).
3084 const Register code_reg = FLAG_precompiled_mode ? TMP : CODE_REG;
3085 LoadWordFromPoolIndex(code_reg, target_code_pool_index);
3086 Call(FieldAddress(code_reg, target::Code::entry_point_offset(entry_kind)));
3087}
3088
3089void Assembler::JumpAndLink(
3090 const Code& target,
3091 ObjectPoolBuilderEntry::Patchability patchable,
3092 CodeEntryKind entry_kind,
3093 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
3094 const intptr_t index = object_pool_builder().FindObject(
3095 ToObject(target), patchable, snapshot_behavior);
3096 JumpAndLink(index, entry_kind);
3097}
3098
3099void Assembler::JumpAndLinkWithEquivalence(const Code& target,
3100 const Object& equivalence,
3101 CodeEntryKind entry_kind) {
3102 const intptr_t index =
3103 object_pool_builder().FindObject(ToObject(target), equivalence);
3104 JumpAndLink(index, entry_kind);
3105}
3106
3107void Assembler::Call(Address target) {
3108 lx(RA, target);
3109 jalr(RA);
3110}
3111
3112void Assembler::Call(Register target) {
3113 jalr(target);
3114}
3115
3116void Assembler::AddShifted(Register dest,
3117 Register base,
3118 Register index,
3119 intx_t shift) {
3120 if (shift == 0) {
3121 add(dest, index, base);
3122 } else if (Supports(RV_Zba) && (shift == 1)) {
3123 sh1add(dest, index, base);
3124 } else if (Supports(RV_Zba) && (shift == 2)) {
3125 sh2add(dest, index, base);
3126 } else if (Supports(RV_Zba) && (shift == 3)) {
3127 sh3add(dest, index, base);
3128 } else if (shift < 0) {
3129 if (base != dest) {
3130 srai(dest, index, -shift);
3131 add(dest, dest, base);
3132 } else {
3133 srai(TMP2, index, -shift);
3134 add(dest, TMP2, base);
3135 }
3136 } else {
3137 if (base != dest) {
3138 slli(dest, index, shift);
3139 add(dest, dest, base);
3140 } else {
3141 slli(TMP2, index, shift);
3142 add(dest, TMP2, base);
3143 }
3144 }
3145}
3146
3147void Assembler::AddImmediate(Register rd,
3148 Register rs1,
3149 intx_t imm,
3150 OperandSize sz) {
3151 if ((imm == 0) && (rd == rs1)) {
3152 return;
3153 }
3154 if (IsITypeImm(imm)) {
3155 addi(rd, rs1, imm);
3156 } else {
3157 ASSERT(rs1 != TMP2);
3158 LoadImmediate(TMP2, imm);
3159 add(rd, rs1, TMP2);
3160 }
3161}
3162
3163void Assembler::MulImmediate(Register rd,
3164 Register rs1,
3165 intx_t imm,
3166 OperandSize sz) {
3167 if (Utils::IsPowerOfTwo(imm)) {
3168 const intx_t shift = Utils::ShiftForPowerOfTwo(imm);
3169#if XLEN >= 64
3170 ASSERT(sz == kFourBytes || sz == kEightBytes);
3171 if (sz == kFourBytes) {
3172 slliw(rd, rs1, shift);
3173 } else {
3174 slli(rd, rs1, shift);
3175 }
3176#else
3177 ASSERT(sz == kFourBytes);
3178 slli(rd, rs1, shift);
3179#endif
3180 } else {
3181 LoadImmediate(TMP, imm);
3182#if XLEN >= 64
3183 ASSERT(sz == kFourBytes || sz == kEightBytes);
3184 if (sz == kFourBytes) {
3185 mulw(rd, rs1, TMP);
3186 } else {
3187 mul(rd, rs1, TMP);
3188 }
3189#else
3190 ASSERT(sz == kFourBytes);
3191 mul(rd, rs1, TMP);
3192#endif
3193 }
3194}
3195
3196void Assembler::AndImmediate(Register rd,
3197 Register rs1,
3198 intx_t imm,
3199 OperandSize sz) {
3200 uintx_t uimm = imm;
3201 if (imm == -1) {
3202 MoveRegister(rd, rs1);
3203 } else if (IsITypeImm(imm)) {
3204 andi(rd, rs1, imm);
3205 } else if (Supports(RV_Zbs) && Utils::IsPowerOfTwo(~uimm)) {
3206 bclri(rd, rs1, Utils::ShiftForPowerOfTwo(~uimm));
3207 } else if (Utils::IsPowerOfTwo(uimm + 1)) {
3208 intptr_t shift = Utils::ShiftForPowerOfTwo(uimm + 1);
3209 if (Supports(RV_Zbb) && (shift == 16)) {
3210 zexth(rd, rs1);
3211 } else {
3212 slli(rd, rs1, XLEN - shift);
3213 srli(rd, rd, XLEN - shift);
3214 }
3215 } else {
3216 ASSERT(rs1 != TMP2);
3217 LoadImmediate(TMP2, imm);
3218 and_(rd, rs1, TMP2);
3219 }
3220}
3221void Assembler::OrImmediate(Register rd,
3222 Register rs1,
3223 intx_t imm,
3224 OperandSize sz) {
3225 uintx_t uimm = imm;
3226 if (imm == 0) {
3227 MoveRegister(rd, rs1);
3228 } else if (IsITypeImm(imm)) {
3229 ori(rd, rs1, imm);
3230 } else if (Supports(RV_Zbs) && Utils::IsPowerOfTwo(uimm)) {
3231 bseti(rd, rs1, Utils::ShiftForPowerOfTwo(uimm));
3232 } else {
3233 ASSERT(rs1 != TMP2);
3234 LoadImmediate(TMP2, imm);
3235 or_(rd, rs1, TMP2);
3236 }
3237}
3238void Assembler::XorImmediate(Register rd,
3239 Register rs1,
3240 intx_t imm,
3241 OperandSize sz) {
3242 uintx_t uimm = imm;
3243 if (imm == 0) {
3244 MoveRegister(rd, rs1);
3245 } else if (IsITypeImm(imm)) {
3246 xori(rd, rs1, imm);
3247 } else if (Supports(RV_Zbs) && Utils::IsPowerOfTwo(uimm)) {
3248 binvi(rd, rs1, Utils::ShiftForPowerOfTwo(uimm));
3249 } else {
3250 ASSERT(rs1 != TMP2);
3251 LoadImmediate(TMP2, imm);
3252 xor_(rd, rs1, TMP2);
3253 }
3254}
3255
3256void Assembler::TestImmediate(Register rn, intx_t imm, OperandSize sz) {
3257 ASSERT(deferred_compare_ == kNone);
3258 deferred_compare_ = kTestImm;
3259 deferred_left_ = rn;
3260 deferred_imm_ = imm;
3261}
3262void Assembler::CompareImmediate(Register rn, intx_t imm, OperandSize sz) {
3263 ASSERT(deferred_compare_ == kNone);
3264 deferred_compare_ = kCompareImm;
3265 deferred_left_ = rn;
3266 deferred_imm_ = imm;
3267}
3268
3269Address Assembler::PrepareLargeOffset(Register base, int32_t offset) {
3270 ASSERT(base != TMP2);
3271 if (IsITypeImm(offset)) {
3272 return Address(base, offset);
3273 }
3274 intx_t lo = ImmLo(offset);
3275 intx_t hi = ImmHi(offset);
3276 ASSERT(hi != 0);
3277 lui(TMP2, hi);
3278 add(TMP2, TMP2, base);
3279 return Address(TMP2, lo);
3280}
3281
3282Address Assembler::PrepareAtomicOffset(Register base, int32_t offset) {
3283 ASSERT(base != TMP2);
3284 if (offset == 0) {
3285 return Address(base, 0);
3286 }
3287 AddImmediate(TMP2, base, offset);
3288 return Address(TMP2, 0);
3289}
3290
3291void Assembler::Load(Register dest, const Address& address, OperandSize sz) {
3292 Address addr = PrepareLargeOffset(address.base(), address.offset());
3293 switch (sz) {
3294#if XLEN == 64
3295 case kEightBytes:
3296 return ld(dest, addr);
3297 case kUnsignedFourBytes:
3298 return lwu(dest, addr);
3299#elif XLEN == 32
3300 case kUnsignedFourBytes:
3301 return lw(dest, addr);
3302#endif
3303 case kFourBytes:
3304 return lw(dest, addr);
3305 case kUnsignedTwoBytes:
3306 return lhu(dest, addr);
3307 case kTwoBytes:
3308 return lh(dest, addr);
3309 case kUnsignedByte:
3310 return lbu(dest, addr);
3311 case kByte:
3312 return lb(dest, addr);
3313 default:
3314 UNREACHABLE();
3315 }
3316}
3317// For loading indexed payloads out of tagged objects like Arrays. If the
3318// payload objects are word-sized, use TIMES_HALF_WORD_SIZE if the contents of
3319// [index] is a Smi, otherwise TIMES_WORD_SIZE if unboxed.
3320void Assembler::LoadIndexedPayload(Register dest,
3321 Register base,
3322 int32_t payload_offset,
3323 Register index,
3325 OperandSize sz) {
3326 AddShifted(TMP, base, index, scale);
3327 LoadFromOffset(dest, TMP, payload_offset - kHeapObjectTag, sz);
3328}
3329
3330void Assembler::LoadSFromOffset(FRegister dest, Register base, int32_t offset) {
3331 flw(dest, PrepareLargeOffset(base, offset));
3332}
3333
3334void Assembler::LoadDFromOffset(FRegister dest, Register base, int32_t offset) {
3335 fld(dest, PrepareLargeOffset(base, offset));
3336}
3337
3338void Assembler::LoadFromStack(Register dst, intptr_t depth) {
3339 LoadFromOffset(dst, SPREG, target::kWordSize * depth);
3340}
3341void Assembler::StoreToStack(Register src, intptr_t depth) {
3342 StoreToOffset(src, SPREG, target::kWordSize * depth);
3343}
3344void Assembler::CompareToStack(Register src, intptr_t depth) {
3345 CompareWithMemoryValue(src, Address(SPREG, target::kWordSize * depth));
3346}
3347
3348void Assembler::Store(Register src, const Address& address, OperandSize sz) {
3349 Address addr = PrepareLargeOffset(address.base(), address.offset());
3350 switch (sz) {
3351#if XLEN == 64
3352 case kEightBytes:
3353 return sd(src, addr);
3354#endif
3355 case kUnsignedFourBytes:
3356 case kFourBytes:
3357 return sw(src, addr);
3358 case kUnsignedTwoBytes:
3359 case kTwoBytes:
3360 return sh(src, addr);
3361 case kUnsignedByte:
3362 case kByte:
3363 return sb(src, addr);
3364 default:
3365 UNREACHABLE();
3366 }
3367}
3368
3369void Assembler::StoreSToOffset(FRegister src, Register base, int32_t offset) {
3370 fsw(src, PrepareLargeOffset(base, offset));
3371}
3372
3373void Assembler::StoreDToOffset(FRegister src, Register base, int32_t offset) {
3374 fsd(src, PrepareLargeOffset(base, offset));
3375}
3376
3377void Assembler::StoreBarrier(Register object,
3379 CanBeSmi can_value_be_smi,
3380 Register scratch) {
3381 // x.slot = x. Barrier should have be removed at the IL level.
3382 ASSERT(object != value);
3383 ASSERT(object != scratch);
3384 ASSERT(value != scratch);
3385 ASSERT(object != RA);
3386 ASSERT(value != RA);
3387 ASSERT(scratch != RA);
3388 ASSERT(object != TMP2);
3389 ASSERT(value != TMP2);
3390 ASSERT(scratch != TMP2);
3391 ASSERT(scratch != kNoRegister);
3392
3393 // In parallel, test whether
3394 // - object is old and not remembered and value is new, or
3395 // - object is old and value is old and not marked and concurrent marking is
3396 // in progress
3397 // If so, call the WriteBarrier stub, which will either add object to the
3398 // store buffer (case 1) or add value to the marking stack (case 2).
3399 // See RestorePinnedRegisters for why this can be `ble`.
3400 // Compare UntaggedObject::StorePointer.
3401 Label done;
3402 if (can_value_be_smi == kValueCanBeSmi) {
3403 BranchIfSmi(value, &done, kNearJump);
3404 } else {
3405#if defined(DEBUG)
3406 Label passed_check;
3407 BranchIfNotSmi(value, &passed_check, kNearJump);
3408 Breakpoint();
3409 Bind(&passed_check);
3410#endif
3411 }
3412 lbu(scratch, FieldAddress(object, target::Object::tags_offset()));
3413 lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
3414 srli(scratch, scratch, target::UntaggedObject::kBarrierOverlapShift);
3415 and_(scratch, scratch, TMP2);
3416 ble(scratch, WRITE_BARRIER_STATE, &done, kNearJump);
3417
3418 Register objectForCall = object;
3420 // Unlikely. Only non-graph intrinsics.
3421 // TODO(rmacnak): Shuffle registers in intrinsics.
3422 if (object != kWriteBarrierValueReg) {
3423 PushRegister(kWriteBarrierValueReg);
3424 } else {
3427 objectForCall = (value == S3) ? S4 : S3;
3428 PushRegisterPair(kWriteBarrierValueReg, objectForCall);
3429 mv(objectForCall, object);
3430 }
3432 }
3433
3434 // Note this uses TMP as the link register, so RA remains preserved.
3435 generate_invoke_write_barrier_wrapper_(objectForCall);
3436
3438 if (object != kWriteBarrierValueReg) {
3439 PopRegister(kWriteBarrierValueReg);
3440 } else {
3441 PopRegisterPair(kWriteBarrierValueReg, objectForCall);
3442 }
3443 }
3444 Bind(&done);
3445}
3446
3447void Assembler::ArrayStoreBarrier(Register object,
3448 Register slot,
3450 CanBeSmi can_value_be_smi,
3451 Register scratch) {
3452 // TODO(riscv): Use RA2 to avoid spilling RA inline?
3453 const bool spill_lr = true;
3454 ASSERT(object != slot);
3455 ASSERT(object != value);
3456 ASSERT(object != scratch);
3457 ASSERT(slot != value);
3458 ASSERT(slot != scratch);
3459 ASSERT(value != scratch);
3460 ASSERT(object != RA);
3461 ASSERT(slot != RA);
3462 ASSERT(value != RA);
3463 ASSERT(scratch != RA);
3464 ASSERT(object != TMP2);
3465 ASSERT(slot != TMP2);
3466 ASSERT(value != TMP2);
3467 ASSERT(scratch != TMP2);
3468 ASSERT(scratch != kNoRegister);
3469
3470 // In parallel, test whether
3471 // - object is old and not remembered and value is new, or
3472 // - object is old and value is old and not marked and concurrent marking is
3473 // in progress
3474 // If so, call the WriteBarrier stub, which will either add object to the
3475 // store buffer (case 1) or add value to the marking stack (case 2).
3476 // See RestorePinnedRegisters for why this can be `ble`.
3477 // Compare UntaggedObject::StorePointer.
3478 Label done;
3479 if (can_value_be_smi == kValueCanBeSmi) {
3480 BranchIfSmi(value, &done, kNearJump);
3481 } else {
3482#if defined(DEBUG)
3483 Label passed_check;
3484 BranchIfNotSmi(value, &passed_check, kNearJump);
3485 Breakpoint();
3486 Bind(&passed_check);
3487#endif
3488 }
3489 lbu(scratch, FieldAddress(object, target::Object::tags_offset()));
3490 lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
3491 srli(scratch, scratch, target::UntaggedObject::kBarrierOverlapShift);
3492 and_(scratch, scratch, TMP2);
3493 ble(scratch, WRITE_BARRIER_STATE, &done, kNearJump);
3494 if (spill_lr) {
3495 PushRegister(RA);
3496 }
3497 if ((object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
3498 (slot != kWriteBarrierSlotReg)) {
3499 // Spill and shuffle unimplemented. Currently StoreIntoArray is only used
3500 // from StoreIndexInstr, which gets these exact registers from the register
3501 // allocator.
3502 UNIMPLEMENTED();
3503 }
3504 generate_invoke_array_write_barrier_();
3505 if (spill_lr) {
3506 PopRegister(RA);
3507 }
3508 Bind(&done);
3509}
3510
3511void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
3512 Register value) {
3513 // We can't assert the incremental barrier is not needed here, only the
3514 // generational barrier. We sometimes omit the write barrier when 'value' is
3515 // a constant, but we don't eagerly mark 'value' and instead assume it is also
3516 // reachable via a constant pool, so it doesn't matter if it is not traced via
3517 // 'object'.
3518 Label done;
3519 BranchIfSmi(value, &done, kNearJump);
3520 lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
3521 andi(TMP2, TMP2, 1 << target::UntaggedObject::kNewOrEvacuationCandidateBit);
3522 beqz(TMP2, &done, kNearJump);
3523 lbu(TMP2, FieldAddress(object, target::Object::tags_offset()));
3524 andi(TMP2, TMP2, 1 << target::UntaggedObject::kOldAndNotRememberedBit);
3525 beqz(TMP2, &done, kNearJump);
3526 Stop("Write barrier is required");
3527 Bind(&done);
3528}
3529
3530void Assembler::StoreObjectIntoObjectNoBarrier(Register object,
3531 const Address& dest,
3532 const Object& value,
3533 MemoryOrder memory_order,
3534 OperandSize size) {
3536 DEBUG_ASSERT(IsNotTemporaryScopedHandle(value));
3537 // No store buffer update.
3538 Register value_reg;
3540 value_reg = NULL_REG;
3541 } else if (target::IsSmi(value) && (target::ToRawSmi(value) == 0)) {
3542 value_reg = ZR;
3543 } else {
3544 ASSERT(object != TMP);
3545 LoadObject(TMP, value);
3546 value_reg = TMP;
3547 }
3548 if (memory_order == kRelease) {
3550 }
3551 Store(value_reg, dest, size);
3552}
3553
3554// Stores a non-tagged value into a heap object.
3555void Assembler::StoreInternalPointer(Register object,
3556 const Address& dest,
3557 Register value) {
3558 sx(value, dest);
3559}
3560
3561// Object pool, loading from pool, etc.
3562void Assembler::LoadPoolPointer(Register pp) {
3563 CheckCodePointer();
3564 lx(pp, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
3565
3566 // When in the PP register, the pool pointer is untagged. When we
3567 // push it on the stack with TagAndPushPP it is tagged again. PopAndUntagPP
3568 // then untags when restoring from the stack. This will make loading from the
3569 // object pool only one instruction for the first 4096 entries. Otherwise,
3570 // because the offset wouldn't be aligned, it would be only one instruction
3571 // for the first 64 entries.
3572 subi(pp, pp, kHeapObjectTag);
3573 set_constant_pool_allowed(pp == PP);
3574}
3575
3576bool Assembler::CanLoadFromObjectPool(const Object& object) const {
3577 ASSERT(IsOriginalObject(object));
3578 if (!constant_pool_allowed()) {
3579 return false;
3580 }
3581
3582 DEBUG_ASSERT(IsNotTemporaryScopedHandle(object));
3583 ASSERT(IsInOldSpace(object));
3584 return true;
3585}
3586void Assembler::LoadNativeEntry(
3587 Register dst,
3588 const ExternalLabel* label,
3589 ObjectPoolBuilderEntry::Patchability patchable) {
3590 const intptr_t index =
3591 object_pool_builder().FindNativeFunction(label, patchable);
3592 LoadWordFromPoolIndex(dst, index);
3593}
3594void Assembler::LoadIsolate(Register dst) {
3595 lx(dst, Address(THR, target::Thread::isolate_offset()));
3596}
3597void Assembler::LoadIsolateGroup(Register dst) {
3598 lx(dst, Address(THR, target::Thread::isolate_group_offset()));
3599}
3600
3601void Assembler::LoadImmediate(Register reg, intx_t imm) {
3602#if XLEN > 32
3603 if (!Utils::IsInt(32, imm)) {
3604 int shift = Utils::CountTrailingZeros64(imm);
3605 if (IsITypeImm(imm >> shift)) {
3606 li(reg, imm >> shift);
3607 slli(reg, reg, shift);
3608 return;
3609 }
3610 if ((shift >= 12) && IsUTypeImm(imm >> (shift - 12))) {
3611 lui(reg, imm >> (shift - 12));
3612 slli(reg, reg, shift - 12);
3613 return;
3614 }
3615
3616 if (constant_pool_allowed()) {
3617 intptr_t index = object_pool_builder().FindImmediate(imm);
3618 LoadWordFromPoolIndex(reg, index);
3619 return;
3620 }
3621
3622 intx_t lo = ImmLo(imm);
3623 intx_t hi = imm - lo;
3624 shift = Utils::CountTrailingZeros64(hi);
3625 ASSERT(shift != 0);
3626 LoadImmediate(reg, hi >> shift);
3627 slli(reg, reg, shift);
3628 if (lo != 0) {
3629 addi(reg, reg, lo);
3630 }
3631 return;
3632 }
3633#endif
3634
3635 intx_t lo = ImmLo(imm);
3636 intx_t hi = ImmHi(imm);
3637 if (hi == 0) {
3638 addi(reg, ZR, lo);
3639 } else {
3640 lui(reg, hi);
3641 if (lo != 0) {
3642#if XLEN == 32
3643 addi(reg, reg, lo);
3644#else
3645 addiw(reg, reg, lo);
3646#endif
3647 }
3648 }
3649}
3650
3651void Assembler::LoadSImmediate(FRegister reg, float imms) {
3652 int32_t imm = bit_cast<int32_t, float>(imms);
3653 if (imm == 0) {
3654 fmvwx(reg, ZR); // bit_cast uint32_t -> float
3655 } else {
3656 ASSERT(constant_pool_allowed());
3657 intptr_t index = object_pool_builder().FindImmediate(imm);
3658 intptr_t offset = target::ObjectPool::element_offset(index);
3659 LoadSFromOffset(reg, PP, offset);
3660 }
3661}
3662
3663void Assembler::LoadDImmediate(FRegister reg, double immd) {
3664 int64_t imm = bit_cast<int64_t, double>(immd);
3665 if (imm == 0) {
3666#if XLEN >= 64
3667 fmvdx(reg, ZR); // bit_cast uint64_t -> double
3668#else
3669 fcvtdwu(reg, ZR); // static_cast uint32_t -> double
3670#endif
3671 } else {
3672 ASSERT(constant_pool_allowed());
3673 intptr_t index = object_pool_builder().FindImmediate64(imm);
3674 intptr_t offset = target::ObjectPool::element_offset(index);
3675 LoadDFromOffset(reg, PP, offset);
3676 }
3677}
3678
3679void Assembler::LoadQImmediate(FRegister reg, simd128_value_t immq) {
3680 UNREACHABLE(); // F registers cannot represent SIMD128.
3681}
3682
3683// Load word from pool from the given offset using encoding that
3684// InstructionPattern::DecodeLoadWordFromPool can decode.
3685//
3686// Note: the function never clobbers TMP, TMP2 scratch registers.
3687void Assembler::LoadWordFromPoolIndex(Register dst,
3688 intptr_t index,
3689 Register pp) {
3690 ASSERT((pp != PP) || constant_pool_allowed());
3691 ASSERT(dst != pp);
3692 const uint32_t offset = target::ObjectPool::element_offset(index);
3693 // PP is untagged.
3694 intx_t lo = ImmLo(offset);
3695 intx_t hi = ImmHi(offset);
3696 if (hi == 0) {
3697 lx(dst, Address(pp, lo));
3698 } else {
3699 lui(dst, hi);
3700 add(dst, dst, pp);
3701 lx(dst, Address(dst, lo));
3702 }
3703}
3704
3705void Assembler::StoreWordToPoolIndex(Register src,
3706 intptr_t index,
3707 Register pp) {
3708 ASSERT((pp != PP) || constant_pool_allowed());
3709 ASSERT(src != pp);
3710 const uint32_t offset = target::ObjectPool::element_offset(index);
3711 // PP is untagged.
3712 intx_t lo = ImmLo(offset);
3713 intx_t hi = ImmHi(offset);
3714 if (hi == 0) {
3715 sx(src, Address(pp, lo));
3716 } else {
3717 lui(TMP, hi);
3718 add(TMP, TMP, pp);
3719 sx(src, Address(TMP, lo));
3720 }
3721}
3722
3723void Assembler::CompareObject(Register reg, const Object& object) {
3724 ASSERT(IsOriginalObject(object));
3725 if (IsSameObject(compiler::NullObject(), object)) {
3726 CompareObjectRegisters(reg, NULL_REG);
3727 } else if (target::IsSmi(object)) {
3728 CompareImmediate(reg, target::ToRawSmi(object), kObjectBytes);
3729 } else {
3730 LoadObject(TMP, object);
3731 CompareObjectRegisters(reg, TMP);
3732 }
3733}
3734
3735void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
3736 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
3737 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
3738#if XLEN == 64
3739 srliw(result, tags, target::UntaggedObject::kClassIdTagPos);
3740#else
3741 srli(result, tags, target::UntaggedObject::kClassIdTagPos);
3742#endif
3743}
3744
3745void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
3746 ASSERT(target::UntaggedObject::kSizeTagPos == 8);
3747 ASSERT(target::UntaggedObject::kSizeTagSize == 4);
3748 srli(result, tags, target::UntaggedObject::kSizeTagPos);
3749 andi(result, result, (1 << target::UntaggedObject::kSizeTagSize) - 1);
3751}
3752
3753void Assembler::LoadClassId(Register result, Register object) {
3754 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
3755 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
3756#if XLEN == 64
3757 lwu(result, FieldAddress(object, target::Object::tags_offset()));
3758#else
3759 lw(result, FieldAddress(object, target::Object::tags_offset()));
3760#endif
3761 srli(result, result, target::UntaggedObject::kClassIdTagPos);
3762}
3763
3764void Assembler::LoadClassById(Register result, Register class_id) {
3765 ASSERT(result != class_id);
3766
3767 const intptr_t table_offset =
3768 target::IsolateGroup::cached_class_table_table_offset();
3769
3770 LoadIsolateGroup(result);
3771 LoadFromOffset(result, result, table_offset);
3772 AddShifted(result, result, class_id, target::kWordSizeLog2);
3773 lx(result, Address(result, 0));
3774}
3775void Assembler::CompareClassId(Register object,
3776 intptr_t class_id,
3777 Register scratch) {
3778 ASSERT(scratch != kNoRegister);
3779 LoadClassId(scratch, object);
3780 CompareImmediate(scratch, class_id);
3781}
3782// Note: input and output registers must be different.
3783void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
3784 ASSERT(result != object);
3785 ASSERT(result != TMP2);
3786 ASSERT(object != TMP2);
3787 li(result, kSmiCid);
3788 Label done;
3789 BranchIfSmi(object, &done, kNearJump);
3790 LoadClassId(result, object);
3791 Bind(&done);
3792}
3793void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
3794 LoadClassIdMayBeSmi(result, object);
3795 SmiTag(result);
3796}
3797void Assembler::EnsureHasClassIdInDEBUG(intptr_t cid,
3798 Register src,
3799 Register scratch,
3800 bool can_be_null) {
3801#if defined(DEBUG)
3802 Comment("Check that object in register has cid %" Pd "", cid);
3803 Label matches;
3804 LoadClassIdMayBeSmi(scratch, src);
3805 CompareImmediate(scratch, cid);
3806 BranchIf(EQUAL, &matches, Assembler::kNearJump);
3807 if (can_be_null) {
3808 CompareImmediate(scratch, kNullCid);
3809 BranchIf(EQUAL, &matches, Assembler::kNearJump);
3810 }
3811 trap();
3812 Bind(&matches);
3813#endif
3814}
3815
3816void Assembler::EnterFrame(intptr_t frame_size) {
3817 // N.B. The ordering here is important. We must never write beyond SP or
3818 // it can be clobbered by a signal handler.
3819 subi(SP, SP, frame_size + 2 * target::kWordSize);
3820 sx(RA, Address(SP, frame_size + 1 * target::kWordSize));
3821 sx(FP, Address(SP, frame_size + 0 * target::kWordSize));
3822 addi(FP, SP, frame_size + 2 * target::kWordSize);
3823}
3824void Assembler::LeaveFrame() {
3825 // N.B. The ordering here is important. We must never read beyond SP or
3826 // it may have already been clobbered by a signal handler.
3827 subi(SP, FP, 2 * target::kWordSize);
3828 lx(FP, Address(SP, 0 * target::kWordSize));
3829 lx(RA, Address(SP, 1 * target::kWordSize));
3830 addi(SP, SP, 2 * target::kWordSize);
3831}
3832
3833void Assembler::TransitionGeneratedToNative(Register destination,
3834 Register new_exit_frame,
3835 Register new_exit_through_ffi,
3836 bool enter_safepoint) {
3837 // Save exit frame information to enable stack walking.
3838 sx(new_exit_frame,
3839 Address(THR, target::Thread::top_exit_frame_info_offset()));
3840
3841 sx(new_exit_through_ffi,
3842 Address(THR, target::Thread::exit_through_ffi_offset()));
3843 Register tmp = new_exit_through_ffi;
3844
3845 // Mark that the thread is executing native code.
3846 sx(destination, Address(THR, target::Thread::vm_tag_offset()));
3847 li(tmp, target::Thread::native_execution_state());
3848 sx(tmp, Address(THR, target::Thread::execution_state_offset()));
3849
3850 if (enter_safepoint) {
3851 EnterFullSafepoint(tmp);
3852 }
3853}
3854
3855void Assembler::TransitionNativeToGenerated(Register state,
3856 bool exit_safepoint,
3857 bool ignore_unwind_in_progress,
3858 bool set_tag) {
3859 if (exit_safepoint) {
3860 ExitFullSafepoint(state, ignore_unwind_in_progress);
3861 } else {
3862 // flag only makes sense if we are leaving safepoint
3863 ASSERT(!ignore_unwind_in_progress);
3864#if defined(DEBUG)
3865 // Ensure we've already left the safepoint.
3866 ASSERT(target::Thread::full_safepoint_state_acquired() != 0);
3867 li(state, target::Thread::full_safepoint_state_acquired());
3868 lx(RA, Address(THR, target::Thread::safepoint_state_offset()));
3869 and_(RA, RA, state);
3870 Label ok;
3871 beqz(RA, &ok, Assembler::kNearJump);
3872 Breakpoint();
3873 Bind(&ok);
3874#endif
3875 }
3876
3877 // Mark that the thread is executing Dart code.
3878 if (set_tag) {
3879 li(state, target::Thread::vm_tag_dart_id());
3880 sx(state, Address(THR, target::Thread::vm_tag_offset()));
3881 }
3882 li(state, target::Thread::generated_execution_state());
3883 sx(state, Address(THR, target::Thread::execution_state_offset()));
3884
3885 // Reset exit frame information in Isolate's mutator thread structure.
3886 sx(ZR, Address(THR, target::Thread::top_exit_frame_info_offset()));
3887 sx(ZR, Address(THR, target::Thread::exit_through_ffi_offset()));
3888}
3889
3890void Assembler::EnterFullSafepoint(Register state) {
3891 // We generate the same number of instructions whether or not the slow-path is
3892 // forced. This simplifies GenerateJitCallbackTrampolines.
3893 // For TSAN, we always go to the runtime so TSAN is aware of the release
3894 // semantics of entering the safepoint.
3895
3896 Register addr = RA;
3897 ASSERT(addr != state);
3898
3899 Label slow_path, done, retry;
3900 if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
3901 j(&slow_path, Assembler::kNearJump);
3902 }
3903
3904 addi(addr, THR, target::Thread::safepoint_state_offset());
3905 Bind(&retry);
3906 lr(state, Address(addr, 0));
3907 subi(state, state, target::Thread::full_safepoint_state_unacquired());
3908 bnez(state, &slow_path, Assembler::kNearJump);
3909
3910 li(state, target::Thread::full_safepoint_state_acquired());
3911 sc(state, state, Address(addr, 0));
3912 beqz(state, &done, Assembler::kNearJump); // 0 means sc was successful.
3913
3914 if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
3915 j(&retry, Assembler::kNearJump);
3916 }
3917
3918 Bind(&slow_path);
3919 lx(addr, Address(THR, target::Thread::enter_safepoint_stub_offset()));
3920 lx(addr, FieldAddress(addr, target::Code::entry_point_offset()));
3921 jalr(addr);
3922
3923 Bind(&done);
3924}
3925
3926void Assembler::ExitFullSafepoint(Register state,
3927 bool ignore_unwind_in_progress) {
3928 // We generate the same number of instructions whether or not the slow-path is
3929 // forced, for consistency with EnterFullSafepoint.
3930 // For TSAN, we always go to the runtime so TSAN is aware of the acquire
3931 // semantics of leaving the safepoint.
3932 Register addr = RA;
3933 ASSERT(addr != state);
3934
3935 Label slow_path, done, retry;
3936 if (FLAG_use_slow_path || FLAG_target_thread_sanitizer) {
3937 j(&slow_path, Assembler::kNearJump);
3938 }
3939
3940 addi(addr, THR, target::Thread::safepoint_state_offset());
3941 Bind(&retry);
3942 lr(state, Address(addr, 0));
3943 subi(state, state, target::Thread::full_safepoint_state_acquired());
3944 bnez(state, &slow_path, Assembler::kNearJump);
3945
3946 li(state, target::Thread::full_safepoint_state_unacquired());
3947 sc(state, state, Address(addr, 0));
3948 beqz(state, &done, Assembler::kNearJump); // 0 means sc was successful.
3949
3950 if (!FLAG_use_slow_path && !FLAG_target_thread_sanitizer) {
3951 j(&retry, Assembler::kNearJump);
3952 }
3953
3954 Bind(&slow_path);
3955 if (ignore_unwind_in_progress) {
3956 lx(addr,
3957 Address(THR,
3958 target::Thread::
3959 exit_safepoint_ignore_unwind_in_progress_stub_offset()));
3960 } else {
3961 lx(addr, Address(THR, target::Thread::exit_safepoint_stub_offset()));
3962 }
3963 lx(addr, FieldAddress(addr, target::Code::entry_point_offset()));
3964 jalr(addr);
3965
3966 Bind(&done);
3967}
3968
3969void Assembler::CheckFpSpDist(intptr_t fp_sp_dist) {
3970 ASSERT(fp_sp_dist <= 0);
3971#if defined(DEBUG)
3972 Label ok;
3973 Comment("CheckFpSpDist");
3974 sub(TMP, SP, FP);
3975 CompareImmediate(TMP, fp_sp_dist);
3976 BranchIf(EQ, &ok, compiler::Assembler::kNearJump);
3977 ebreak();
3978 Bind(&ok);
3979#endif
3980}
3981
3982void Assembler::CheckCodePointer() {
3983#ifdef DEBUG
3984 if (!FLAG_check_code_pointer) {
3985 return;
3986 }
3987 Comment("CheckCodePointer");
3988 Label cid_ok, instructions_ok;
3989 CompareClassId(CODE_REG, kCodeCid, TMP);
3990 BranchIf(EQ, &cid_ok, kNearJump);
3991 ebreak();
3992 Bind(&cid_ok);
3993
3994 const intptr_t entry_offset =
3996 intx_t imm = -entry_offset;
3997 intx_t lo = ImmLo(imm);
3998 intx_t hi = ImmHi(imm);
3999 auipc(TMP, hi);
4000 addi(TMP, TMP, lo);
4001 lx(TMP2, FieldAddress(CODE_REG, target::Code::instructions_offset()));
4002 beq(TMP, TMP2, &instructions_ok, kNearJump);
4003 ebreak();
4004 Bind(&instructions_ok);
4005#endif
4006}
4007
4008void Assembler::RestoreCodePointer() {
4009 lx(CODE_REG,
4010 Address(FP, target::frame_layout.code_from_fp * target::kWordSize));
4011 CheckCodePointer();
4012}
4013
4014void Assembler::RestorePoolPointer() {
4015 if (FLAG_precompiled_mode) {
4016 lx(PP, Address(THR, target::Thread::global_object_pool_offset()));
4017 } else {
4018 lx(PP, Address(FP, target::frame_layout.code_from_fp * target::kWordSize));
4019 lx(PP, FieldAddress(PP, target::Code::object_pool_offset()));
4020 }
4021 subi(PP, PP, kHeapObjectTag); // Pool in PP is untagged!
4022}
4023
4024void Assembler::RestorePinnedRegisters() {
4026 Address(THR, target::Thread::write_barrier_mask_offset()));
4027 lx(NULL_REG, Address(THR, target::Thread::object_null_offset()));
4028
4029 // Our write barrier usually uses mask-and-test,
4030 // 01b6f6b3 and tmp, tmp, mask
4031 // c689 beqz tmp, +10
4032 // but on RISC-V compare-and-branch is shorter,
4033 // 00ddd663 ble tmp, wbs, +12
4034 //
4035 // TMP bit 4+ = 0
4036 // TMP bit 3 = object is old-and-not-remembered AND value is new (genr bit)
4037 // TMP bit 2 = object is old AND value is old-and-not-marked (incr bit)
4038 // TMP bit 1 = garbage
4039 // TMP bit 0 = garbage
4040 //
4041 // Thread::wbm | WRITE_BARRIER_STATE | TMP/combined headers | result
4042 // generational only
4043 // 0b1000 0b0111 0b11xx impossible
4044 // 0b10xx call stub
4045 // 0b01xx skip
4046 // 0b00xx skip
4047 // generational and incremental
4048 // 0b1100 0b0011 0b11xx impossible
4049 // 0b10xx call stub
4050 // 0b01xx call stub
4051 // 0b00xx skip
4053 (target::UntaggedObject::kGenerationalBarrierMask << 1) - 1);
4054
4055 // Generational bit must be higher than incremental bit, with no other bits
4056 // between.
4057 ASSERT(target::UntaggedObject::kGenerationalBarrierMask ==
4058 (target::UntaggedObject::kIncrementalBarrierMask << 1));
4059 // Other header bits must be lower.
4060 ASSERT(target::UntaggedObject::kIncrementalBarrierMask >
4061 target::UntaggedObject::kCanonicalBit);
4062 ASSERT(target::UntaggedObject::kIncrementalBarrierMask >
4063 target::UntaggedObject::kCardRememberedBit);
4064}
4065
4066void Assembler::SetupGlobalPoolAndDispatchTable() {
4067 ASSERT(FLAG_precompiled_mode);
4068 lx(PP, Address(THR, target::Thread::global_object_pool_offset()));
4069 subi(PP, PP, kHeapObjectTag); // Pool in PP is untagged!
4071 Address(THR, target::Thread::dispatch_table_array_offset()));
4072}
4073
4074void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) {
4075 ASSERT(!constant_pool_allowed());
4076
4077 if (!IsITypeImm(frame_size + 4 * target::kWordSize)) {
4078 EnterDartFrame(0, new_pp);
4079 AddImmediate(SP, SP, -frame_size);
4080 return;
4081 }
4082
4083 // N.B. The ordering here is important. We must never write beyond SP or
4084 // it can be clobbered by a signal handler.
4085 if (FLAG_precompiled_mode) {
4086 subi(SP, SP, frame_size + 2 * target::kWordSize);
4087 sx(RA, Address(SP, frame_size + 1 * target::kWordSize));
4088 sx(FP, Address(SP, frame_size + 0 * target::kWordSize));
4089 addi(FP, SP, frame_size + 2 * target::kWordSize);
4090 } else {
4091 subi(SP, SP, frame_size + 4 * target::kWordSize);
4092 sx(RA, Address(SP, frame_size + 3 * target::kWordSize));
4093 sx(FP, Address(SP, frame_size + 2 * target::kWordSize));
4094 sx(CODE_REG, Address(SP, frame_size + 1 * target::kWordSize));
4095 addi(PP, PP, kHeapObjectTag);
4096 sx(PP, Address(SP, frame_size + 0 * target::kWordSize));
4097 addi(FP, SP, frame_size + 4 * target::kWordSize);
4098 if (new_pp == kNoRegister) {
4099 LoadPoolPointer();
4100 } else {
4101 mv(PP, new_pp);
4102 }
4103 }
4104 set_constant_pool_allowed(true);
4105}
4106
4107// On entry to a function compiled for OSR, the caller's frame pointer, the
4108// stack locals, and any copied parameters are already in place. The frame
4109// pointer is already set up. The PC marker is not correct for the
4110// optimized function and there may be extra space for spill slots to
4111// allocate. We must also set up the pool pointer for the function.
4112void Assembler::EnterOsrFrame(intptr_t extra_size, Register new_pp) {
4113 ASSERT(!constant_pool_allowed());
4114 Comment("EnterOsrFrame");
4115 RestoreCodePointer();
4116 LoadPoolPointer();
4117
4118 if (extra_size > 0) {
4119 AddImmediate(SP, -extra_size);
4120 }
4121}
4122
4123void Assembler::LeaveDartFrame() {
4124 // N.B. The ordering here is important. We must never read beyond SP or
4125 // it may have already been clobbered by a signal handler.
4126 if (!FLAG_precompiled_mode) {
4127 lx(PP, Address(FP, target::frame_layout.saved_caller_pp_from_fp *
4129 subi(PP, PP, kHeapObjectTag);
4130 }
4131 set_constant_pool_allowed(false);
4132 subi(SP, FP, 2 * target::kWordSize);
4133 lx(FP, Address(SP, 0 * target::kWordSize));
4134 lx(RA, Address(SP, 1 * target::kWordSize));
4135 addi(SP, SP, 2 * target::kWordSize);
4136}
4137
4138void Assembler::LeaveDartFrame(intptr_t fp_sp_dist) {
4139 intptr_t pp_offset =
4141 fp_sp_dist;
4142 intptr_t fp_offset =
4144 fp_sp_dist;
4145 intptr_t ra_offset =
4147 fp_sp_dist;
4148 if (!IsITypeImm(pp_offset) || !IsITypeImm(fp_offset) ||
4149 !IsITypeImm(ra_offset)) {
4150 // Shorter to update SP twice than generate large immediates.
4151 LeaveDartFrame();
4152 return;
4153 }
4154
4155 if (!FLAG_precompiled_mode) {
4156 lx(PP, Address(SP, pp_offset));
4157 subi(PP, PP, kHeapObjectTag);
4158 }
4159 set_constant_pool_allowed(false);
4160 lx(FP, Address(SP, fp_offset));
4161 lx(RA, Address(SP, ra_offset));
4162 addi(SP, SP, -fp_sp_dist);
4163}
4164
4165void Assembler::CallRuntime(const RuntimeEntry& entry,
4166 intptr_t argument_count) {
4167 ASSERT(!entry.is_leaf());
4168 // Argument count is not checked here, but in the runtime entry for a more
4169 // informative error message.
4170 lx(T5, compiler::Address(THR, entry.OffsetFromThread()));
4171 li(T4, argument_count);
4172 Call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
4173}
4174
4175static const RegisterSet kRuntimeCallSavedRegisters(kDartVolatileCpuRegs,
4177
4178#define __ assembler_->
4179
4180LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
4181 intptr_t frame_size,
4182 bool preserve_registers)
4183 : assembler_(assembler), preserve_registers_(preserve_registers) {
4184 // N.B. The ordering here is important. We must never write beyond SP or
4185 // it can be clobbered by a signal handler.
4186 __ subi(SP, SP, 4 * target::kWordSize);
4187 __ sx(RA, Address(SP, 3 * target::kWordSize));
4188 __ sx(FP, Address(SP, 2 * target::kWordSize));
4189 __ sx(CODE_REG, Address(SP, 1 * target::kWordSize));
4190 __ sx(PP, Address(SP, 0 * target::kWordSize));
4191 __ addi(FP, SP, 4 * target::kWordSize);
4192
4193 if (preserve_registers) {
4194 __ PushRegisters(kRuntimeCallSavedRegisters);
4195 } else {
4196 // Or no reason to save above.
4199 // Or would need to save above.
4204 }
4205
4206 __ ReserveAlignedFrameSpace(frame_size);
4207}
4208
4209void LeafRuntimeScope::Call(const RuntimeEntry& entry,
4210 intptr_t argument_count) {
4211 ASSERT(argument_count == entry.argument_count());
4212 __ lx(TMP2, compiler::Address(THR, entry.OffsetFromThread()));
4213 __ sx(TMP2, compiler::Address(THR, target::Thread::vm_tag_offset()));
4214 __ jalr(TMP2);
4215 __ LoadImmediate(TMP2, VMTag::kDartTagId);
4216 __ sx(TMP2, compiler::Address(THR, target::Thread::vm_tag_offset()));
4217}
4218
4219LeafRuntimeScope::~LeafRuntimeScope() {
4220 if (preserve_registers_) {
4221 const intptr_t kSavedRegistersSize =
4222 kRuntimeCallSavedRegisters.CpuRegisterCount() * target::kWordSize +
4223 kRuntimeCallSavedRegisters.FpuRegisterCount() * kFpuRegisterSize +
4225
4226 __ subi(SP, FP, kSavedRegistersSize);
4227
4228 __ PopRegisters(kRuntimeCallSavedRegisters);
4229 }
4230
4231 __ subi(SP, FP, 4 * target::kWordSize);
4232 __ lx(PP, Address(SP, 0 * target::kWordSize));
4233 __ lx(CODE_REG, Address(SP, 1 * target::kWordSize));
4234 __ lx(FP, Address(SP, 2 * target::kWordSize));
4235 __ lx(RA, Address(SP, 3 * target::kWordSize));
4236 __ addi(SP, SP, 4 * target::kWordSize);
4237}
4238
4239#undef __
4240
4241void Assembler::EnterCFrame(intptr_t frame_space) {
4242 // Already saved.
4247 // Need to save.
4249
4250 // N.B. The ordering here is important. We must never read beyond SP or
4251 // it may have already been clobbered by a signal handler.
4252 subi(SP, SP, frame_space + 3 * target::kWordSize);
4253 sx(RA, Address(SP, frame_space + 2 * target::kWordSize));
4254 sx(FP, Address(SP, frame_space + 1 * target::kWordSize));
4255 sx(PP, Address(SP, frame_space + 0 * target::kWordSize));
4256 addi(FP, SP, frame_space + 3 * target::kWordSize);
4257 const intptr_t kAbiStackAlignment = 16; // For both 32 and 64 bit.
4258 andi(SP, SP, ~(kAbiStackAlignment - 1));
4259}
4260
4261void Assembler::LeaveCFrame() {
4262 // N.B. The ordering here is important. We must never read beyond SP or
4263 // it may have already been clobbered by a signal handler.
4264 subi(SP, FP, 3 * target::kWordSize);
4265 lx(PP, Address(SP, 0 * target::kWordSize));
4266 lx(FP, Address(SP, 1 * target::kWordSize));
4267 lx(RA, Address(SP, 2 * target::kWordSize));
4268 addi(SP, SP, 3 * target::kWordSize);
4269}
4270
4271// A0: Receiver
4272// S5: ICData entry array
4273// PP: Caller's PP (preserved)
4274void Assembler::MonomorphicCheckedEntryJIT() {
4275 has_monomorphic_entry_ = true;
4276 const intptr_t saved_far_branch_level = far_branch_level();
4277 set_far_branch_level(0);
4278 const intptr_t start = CodeSize();
4279
4280 Label immediate, miss;
4281 Bind(&miss);
4282 lx(TMP, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
4283 jr(TMP);
4284
4285 Comment("MonomorphicCheckedEntry");
4286 ASSERT_EQUAL(CodeSize() - start,
4287 target::Instructions::kMonomorphicEntryOffsetJIT);
4288
4289 Register entries_reg = IC_DATA_REG; // Contains ICData::entries().
4290 const intptr_t cid_offset = target::Array::element_offset(0);
4291 const intptr_t count_offset = target::Array::element_offset(1);
4292 ASSERT(A1 != PP);
4293 ASSERT(A1 != entries_reg);
4294 ASSERT(A1 != CODE_REG);
4295
4296 lx(TMP, FieldAddress(entries_reg, cid_offset));
4297 LoadTaggedClassIdMayBeSmi(A1, A0);
4298 bne(TMP, A1, &miss, kNearJump);
4299
4300 lx(TMP, FieldAddress(entries_reg, count_offset));
4301 addi(TMP, TMP, target::ToRawSmi(1));
4302 sx(TMP, FieldAddress(entries_reg, count_offset));
4303
4304 li(ARGS_DESC_REG, 0); // GC-safe for OptimizeInvokedFunction
4305
4306 // Fall through to unchecked entry.
4307 ASSERT_EQUAL(CodeSize() - start,
4308 target::Instructions::kPolymorphicEntryOffsetJIT);
4309
4310 set_far_branch_level(saved_far_branch_level);
4311}
4312
4313// A0 receiver, S5 guarded cid as Smi.
4314// Preserve S4 (ARGS_DESC_REG), not required today, but maybe later.
4315// PP: Caller's PP (preserved)
4316void Assembler::MonomorphicCheckedEntryAOT() {
4317 has_monomorphic_entry_ = true;
4318 intptr_t saved_far_branch_level = far_branch_level();
4319 set_far_branch_level(0);
4320
4321 const intptr_t start = CodeSize();
4322
4323 Label immediate, miss;
4324 Bind(&miss);
4325 lx(TMP, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
4326 jr(TMP);
4327
4328 Comment("MonomorphicCheckedEntry");
4329 ASSERT_EQUAL(CodeSize() - start,
4330 target::Instructions::kMonomorphicEntryOffsetAOT);
4331 LoadClassId(TMP, A0);
4332 SmiTag(TMP);
4333 bne(S5, TMP, &miss, kNearJump);
4334
4335 // Fall through to unchecked entry.
4336 ASSERT_EQUAL(CodeSize() - start,
4337 target::Instructions::kPolymorphicEntryOffsetAOT);
4338
4339 set_far_branch_level(saved_far_branch_level);
4340}
4341
4342void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
4343 has_monomorphic_entry_ = true;
4344 while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
4345 ebreak();
4346 }
4347 j(label);
4348 while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
4349 ebreak();
4350 }
4351}
4352
4354#if XLEN >= 64
4355 // hash += other_hash
4356 addw(hash, hash, other);
4357 // hash += hash << 10
4358 slliw(other, hash, 10);
4359 addw(hash, hash, other);
4360 // hash ^= hash >> 6
4361 srliw(other, hash, 6);
4362 xor_(hash, hash, other);
4363#else
4364 // hash += other_hash
4365 add(hash, hash, other);
4366 // hash += hash << 10
4367 slli(other, hash, 10);
4368 add(hash, hash, other);
4369 // hash ^= hash >> 6
4370 srli(other, hash, 6);
4371 xor_(hash, hash, other);
4372#endif
4373}
4374
4375void Assembler::FinalizeHashForSize(intptr_t bit_size,
4376 Register hash,
4377 Register scratch) {
4378 ASSERT(bit_size > 0); // Can't avoid returning 0 if there are no hash bits!
4379 // While any 32-bit hash value fits in X bits, where X > 32, the caller may
4380 // reasonably expect that the returned values fill the entire bit space.
4381 ASSERT(bit_size <= kBitsPerInt32);
4382 ASSERT(scratch != kNoRegister);
4383#if XLEN >= 64
4384 // hash += hash << 3;
4385 slliw(scratch, hash, 3);
4386 addw(hash, hash, scratch);
4387 // hash ^= hash >> 11; // Logical shift, unsigned hash.
4388 srliw(scratch, hash, 11);
4389 xor_(hash, hash, scratch);
4390 // hash += hash << 15;
4391 slliw(scratch, hash, 15);
4392 addw(hash, hash, scratch);
4393#else
4394 // hash += hash << 3;
4395 slli(scratch, hash, 3);
4396 add(hash, hash, scratch);
4397 // hash ^= hash >> 11; // Logical shift, unsigned hash.
4398 srli(scratch, hash, 11);
4399 xor_(hash, hash, scratch);
4400 // hash += hash << 15;
4401 slli(scratch, hash, 15);
4402 add(hash, hash, scratch);
4403#endif
4404 // Size to fit.
4405 if (bit_size < kBitsPerInt32) {
4406 AndImmediate(hash, hash, Utils::NBitMask(bit_size));
4407 }
4408 // return (hash == 0) ? 1 : hash;
4409 seqz(scratch, hash);
4410 add(hash, hash, scratch);
4411}
4412
4413#ifndef PRODUCT
4414void Assembler::MaybeTraceAllocation(Register cid,
4415 Label* trace,
4416 Register temp_reg,
4417 JumpDistance distance) {
4418 LoadIsolateGroup(temp_reg);
4419 lx(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
4420 lx(temp_reg,
4421 Address(temp_reg,
4422 target::ClassTable::allocation_tracing_state_table_offset()));
4423 add(temp_reg, temp_reg, cid);
4424 LoadFromOffset(temp_reg, temp_reg,
4425 target::ClassTable::AllocationTracingStateSlotOffsetFor(0),
4427 bnez(temp_reg, trace);
4428}
4429
4430void Assembler::MaybeTraceAllocation(intptr_t cid,
4431 Label* trace,
4432 Register temp_reg,
4433 JumpDistance distance) {
4434 ASSERT(cid > 0);
4435 LoadIsolateGroup(temp_reg);
4436 lx(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
4437 lx(temp_reg,
4438 Address(temp_reg,
4439 target::ClassTable::allocation_tracing_state_table_offset()));
4440 LoadFromOffset(temp_reg, temp_reg,
4441 target::ClassTable::AllocationTracingStateSlotOffsetFor(cid),
4443 bnez(temp_reg, trace);
4444}
4445#endif // !PRODUCT
4446
4447void Assembler::TryAllocateObject(intptr_t cid,
4448 intptr_t instance_size,
4449 Label* failure,
4450 JumpDistance distance,
4451 Register instance_reg,
4452 Register temp_reg) {
4453 ASSERT(failure != nullptr);
4454 ASSERT(instance_size != 0);
4455 ASSERT(instance_reg != temp_reg);
4456 ASSERT(temp_reg != kNoRegister);
4457 ASSERT(Utils::IsAligned(instance_size,
4459 if (FLAG_inline_alloc &&
4461 // If this allocation is traced, program will jump to failure path
4462 // (i.e. the allocation stub) which will allocate the object and trace the
4463 // allocation call site.
4464 NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp_reg));
4465
4466 lx(instance_reg, Address(THR, target::Thread::top_offset()));
4467 lx(temp_reg, Address(THR, target::Thread::end_offset()));
4468 // instance_reg: current top (next object start).
4469 // temp_reg: heap end
4470
4471 // TODO(koda): Protect against unsigned overflow here.
4472 AddImmediate(instance_reg, instance_size);
4473 // instance_reg: potential top (next object start).
4474 // fail if heap end unsigned less than or equal to new heap top.
4475 bleu(temp_reg, instance_reg, failure, distance);
4476 CheckAllocationCanary(instance_reg, temp_reg);
4477
4478 // Successfully allocated the object, now update temp to point to
4479 // next object start and store the class in the class field of object.
4480 sx(instance_reg, Address(THR, target::Thread::top_offset()));
4481 // Move instance_reg back to the start of the object and tag it.
4482 AddImmediate(instance_reg, -instance_size + kHeapObjectTag);
4483
4484 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
4485 LoadImmediate(temp_reg, tags);
4486 Store(temp_reg, FieldAddress(instance_reg, target::Object::tags_offset()));
4487 } else {
4488 j(failure, distance);
4489 }
4490}
4491
4492void Assembler::TryAllocateArray(intptr_t cid,
4493 intptr_t instance_size,
4494 Label* failure,
4496 Register end_address,
4497 Register temp1,
4498 Register temp2) {
4499 if (FLAG_inline_alloc &&
4501 // If this allocation is traced, program will jump to failure path
4502 // (i.e. the allocation stub) which will allocate the object and trace the
4503 // allocation call site.
4504 NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp1));
4505 // Potential new object start.
4506 lx(instance, Address(THR, target::Thread::top_offset()));
4507 AddImmediate(end_address, instance, instance_size);
4508 bltu(end_address, instance, failure); // Fail on unsigned overflow.
4509
4510 // Check if the allocation fits into the remaining space.
4511 // instance: potential new object start.
4512 // end_address: potential next object start.
4513 lx(temp2, Address(THR, target::Thread::end_offset()));
4514 bgeu(end_address, temp2, failure);
4515 CheckAllocationCanary(instance, temp2);
4516
4517 // Successfully allocated the object(s), now update top to point to
4518 // next object start and initialize the object.
4519 sx(end_address, Address(THR, target::Thread::top_offset()));
4521 NOT_IN_PRODUCT(LoadImmediate(temp2, instance_size));
4522
4523 // Initialize the tags.
4524 // instance: new object start as a tagged pointer.
4525 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
4526 LoadImmediate(temp2, tags);
4527 sx(temp2, FieldAddress(instance, target::Object::tags_offset()));
4528 } else {
4529 j(failure);
4530 }
4531}
4532
4533void Assembler::CopyMemoryWords(Register src,
4534 Register dst,
4535 Register size,
4536 Register temp) {
4537 Label loop, done;
4538 beqz(size, &done, kNearJump);
4539 Bind(&loop);
4540 lx(temp, Address(src));
4541 addi(src, src, target::kWordSize);
4542 sx(temp, Address(dst));
4543 addi(dst, dst, target::kWordSize);
4544 subi(size, size, target::kWordSize);
4545 bnez(size, &loop, kNearJump);
4546 Bind(&done);
4547}
4548
4549void Assembler::GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target) {
4550 // JAL only has a +/- 1MB range. AUIPC+JALR has a +/- 2GB range.
4551 intx_t lo = ImmLo(offset_into_target);
4552 intx_t hi = ImmHi(offset_into_target);
4553 auipc(RA, hi);
4554 jalr_fixed(RA, RA, lo);
4555}
4556
4557void Assembler::GenerateUnRelocatedPcRelativeTailCall(
4558 intptr_t offset_into_target) {
4559 // J only has a +/- 1MB range. AUIPC+JR has a +/- 2GB range.
4560 intx_t lo = ImmLo(offset_into_target);
4561 intx_t hi = ImmHi(offset_into_target);
4562 auipc(TMP, hi);
4563 jalr_fixed(ZR, TMP, lo);
4564}
4565
4566bool Assembler::AddressCanHoldConstantIndex(const Object& constant,
4567 bool is_external,
4568 intptr_t cid,
4569 intptr_t index_scale) {
4570 if (!IsSafeSmi(constant)) return false;
4571 const int64_t index = target::SmiValue(constant);
4572 const int64_t offset = index * index_scale + HeapDataOffset(is_external, cid);
4573 if (IsITypeImm(offset)) {
4575 return true;
4576 }
4577 return false;
4578}
4579
4580Address Assembler::ElementAddressForIntIndex(bool is_external,
4581 intptr_t cid,
4582 intptr_t index_scale,
4583 Register array,
4584 intptr_t index) const {
4585 const int64_t offset = index * index_scale + HeapDataOffset(is_external, cid);
4586 ASSERT(Utils::IsInt(32, offset));
4587 return Address(array, static_cast<int32_t>(offset));
4588}
4589void Assembler::ComputeElementAddressForIntIndex(Register address,
4590 bool is_external,
4591 intptr_t cid,
4592 intptr_t index_scale,
4593 Register array,
4594 intptr_t index) {
4595 const int64_t offset = index * index_scale + HeapDataOffset(is_external, cid);
4596 AddImmediate(address, array, offset);
4597}
4598
4599Address Assembler::ElementAddressForRegIndex(bool is_external,
4600 intptr_t cid,
4601 intptr_t index_scale,
4602 bool index_unboxed,
4603 Register array,
4604 Register index,
4605 Register temp) {
4606 // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays.
4607 const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift;
4608 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
4609 const int32_t offset = HeapDataOffset(is_external, cid);
4610 ASSERT(array != temp);
4611 ASSERT(index != temp);
4612 AddShifted(temp, array, index, shift);
4613 return Address(temp, offset);
4614}
4615
4616void Assembler::ComputeElementAddressForRegIndex(Register address,
4617 bool is_external,
4618 intptr_t cid,
4619 intptr_t index_scale,
4620 bool index_unboxed,
4621 Register array,
4622 Register index) {
4623 // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays.
4624 const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift;
4625 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
4626 const int32_t offset = HeapDataOffset(is_external, cid);
4627 ASSERT(array != address);
4628 ASSERT(index != address);
4629 AddShifted(address, array, index, shift);
4630 if (offset != 0) {
4631 AddImmediate(address, address, offset);
4632 }
4633}
4634
4635void Assembler::LoadStaticFieldAddress(Register address,
4636 Register field,
4637 Register scratch,
4638 bool is_shared) {
4639 LoadCompressedSmiFieldFromOffset(
4640 scratch, field, target::Field::host_offset_or_field_id_offset());
4641 const intptr_t field_table_offset =
4642 is_shared ? compiler::target::Thread::shared_field_table_values_offset()
4643 : compiler::target::Thread::field_table_values_offset();
4644 LoadMemoryValue(address, THR, static_cast<int32_t>(field_table_offset));
4645 slli(scratch, scratch, target::kWordSizeLog2 - kSmiTagShift);
4646 add(address, address, scratch);
4647}
4648
4649void Assembler::LoadFieldAddressForRegOffset(Register address,
4651 Register offset_in_words_as_smi) {
4652 AddShifted(address, instance, offset_in_words_as_smi,
4654 addi(address, address, -kHeapObjectTag);
4655}
4656
4657// Note: the function never clobbers TMP, TMP2 scratch registers.
4658void Assembler::LoadObjectHelper(
4659 Register dst,
4660 const Object& object,
4661 bool is_unique,
4662 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
4663 ASSERT(IsOriginalObject(object));
4664 // `is_unique == true` effectively means object has to be patchable.
4665 // (even if the object is null)
4666 if (!is_unique) {
4667 if (IsSameObject(compiler::NullObject(), object)) {
4668 mv(dst, NULL_REG);
4669 return;
4670 }
4671 if (IsSameObject(CastHandle<Object>(compiler::TrueObject()), object)) {
4673 return;
4674 }
4675 if (IsSameObject(CastHandle<Object>(compiler::FalseObject()), object)) {
4677 return;
4678 }
4679 word offset = 0;
4680 if (target::CanLoadFromThread(object, &offset)) {
4681 lx(dst, Address(THR, offset));
4682 return;
4683 }
4684 if (target::IsSmi(object)) {
4685 LoadImmediate(dst, target::ToRawSmi(object));
4686 return;
4687 }
4688 }
4689 RELEASE_ASSERT(CanLoadFromObjectPool(object));
4690 const intptr_t index =
4691 is_unique
4692 ? object_pool_builder().AddObject(
4693 object, ObjectPoolBuilderEntry::kPatchable, snapshot_behavior)
4694 : object_pool_builder().FindObject(
4695 object, ObjectPoolBuilderEntry::kNotPatchable,
4696 snapshot_behavior);
4697 LoadWordFromPoolIndex(dst, index);
4698}
4699
4700void Assembler::AddImmediateBranchOverflow(Register rd,
4701 Register rs1,
4702 intx_t imm,
4703 Label* overflow) {
4704 ASSERT(rd != TMP2);
4705 if (rd == rs1) {
4706 mv(TMP2, rs1);
4707 AddImmediate(rd, rs1, imm);
4708 if (imm > 0) {
4709 blt(rd, TMP2, overflow);
4710 } else if (imm < 0) {
4711 bgt(rd, TMP2, overflow);
4712 }
4713 } else {
4714 AddImmediate(rd, rs1, imm);
4715 if (imm > 0) {
4716 blt(rd, rs1, overflow);
4717 } else if (imm < 0) {
4718 bgt(rd, rs1, overflow);
4719 }
4720 }
4721}
4722void Assembler::SubtractImmediateBranchOverflow(Register rd,
4723 Register rs1,
4724 intx_t imm,
4725 Label* overflow) {
4726 // TODO(riscv): Incorrect for MIN_INTX_T!
4727 AddImmediateBranchOverflow(rd, rs1, -imm, overflow);
4728}
4729void Assembler::MultiplyImmediateBranchOverflow(Register rd,
4730 Register rs1,
4731 intx_t imm,
4732 Label* overflow) {
4733 ASSERT(rd != TMP);
4734 ASSERT(rd != TMP2);
4735 ASSERT(rs1 != TMP);
4736 ASSERT(rs1 != TMP2);
4737
4738 LoadImmediate(TMP2, imm);
4739 // Macro-op fusion: when both products are needed, the recommended sequence
4740 // is mulh first.
4741 mulh(TMP, rs1, TMP2);
4742 mul(rd, rs1, TMP2);
4743 srai(TMP2, rd, XLEN - 1);
4744 bne(TMP, TMP2, overflow);
4745}
4746void Assembler::AddBranchOverflow(Register rd,
4747 Register rs1,
4748 Register rs2,
4749 Label* overflow) {
4750 ASSERT(rd != TMP);
4751 ASSERT(rd != TMP2);
4752 ASSERT(rs1 != TMP);
4753 ASSERT(rs1 != TMP2);
4754 ASSERT(rs2 != TMP);
4755 ASSERT(rs2 != TMP2);
4756
4757 if ((rd == rs1) && (rd == rs2)) {
4758 ASSERT(rs1 == rs2);
4759 mv(TMP, rs1);
4760 add(rd, rs1, rs2); // rs1, rs2 destroyed
4761 xor_(TMP, TMP, rd); // TMP negative if sign changed
4762 bltz(TMP, overflow);
4763 } else if (rs1 == rs2) {
4764 ASSERT(rd != rs1);
4765 ASSERT(rd != rs2);
4766 add(rd, rs1, rs2);
4767 xor_(TMP, rd, rs1); // TMP negative if sign changed
4768 bltz(TMP, overflow);
4769 } else if (rd == rs1) {
4770 ASSERT(rs1 != rs2);
4771 slti(TMP, rs1, 0);
4772 add(rd, rs1, rs2); // rs1 destroyed
4773 slt(TMP2, rd, rs2);
4774 bne(TMP, TMP2, overflow);
4775 } else if (rd == rs2) {
4776 ASSERT(rs1 != rs2);
4777 slti(TMP, rs2, 0);
4778 add(rd, rs1, rs2); // rs2 destroyed
4779 slt(TMP2, rd, rs1);
4780 bne(TMP, TMP2, overflow);
4781 } else {
4782 add(rd, rs1, rs2);
4783 slti(TMP, rs2, 0);
4784 slt(TMP2, rd, rs1);
4785 bne(TMP, TMP2, overflow);
4786 }
4787}
4788
4789void Assembler::SubtractBranchOverflow(Register rd,
4790 Register rs1,
4791 Register rs2,
4792 Label* overflow) {
4793 ASSERT(rd != TMP);
4794 ASSERT(rd != TMP2);
4795 ASSERT(rs1 != TMP);
4796 ASSERT(rs1 != TMP2);
4797 ASSERT(rs2 != TMP);
4798 ASSERT(rs2 != TMP2);
4799
4800 if ((rd == rs1) && (rd == rs2)) {
4801 ASSERT(rs1 == rs2);
4802 mv(TMP, rs1);
4803 sub(rd, rs1, rs2); // rs1, rs2 destroyed
4804 xor_(TMP, TMP, rd); // TMP negative if sign changed
4805 bltz(TMP, overflow);
4806 } else if (rs1 == rs2) {
4807 ASSERT(rd != rs1);
4808 ASSERT(rd != rs2);
4809 sub(rd, rs1, rs2);
4810 xor_(TMP, rd, rs1); // TMP negative if sign changed
4811 bltz(TMP, overflow);
4812 } else if (rd == rs1) {
4813 ASSERT(rs1 != rs2);
4814 slti(TMP, rs1, 0);
4815 sub(rd, rs1, rs2); // rs1 destroyed
4816 slt(TMP2, rd, rs2);
4817 bne(TMP, TMP2, overflow);
4818 } else if (rd == rs2) {
4819 ASSERT(rs1 != rs2);
4820 slti(TMP, rs2, 0);
4821 sub(rd, rs1, rs2); // rs2 destroyed
4822 slt(TMP2, rd, rs1);
4823 bne(TMP, TMP2, overflow);
4824 } else {
4825 sub(rd, rs1, rs2);
4826 slti(TMP, rs2, 0);
4827 slt(TMP2, rs1, rd);
4828 bne(TMP, TMP2, overflow);
4829 }
4830}
4831
4832void Assembler::MultiplyBranchOverflow(Register rd,
4833 Register rs1,
4834 Register rs2,
4835 Label* overflow) {
4836 ASSERT(rd != TMP);
4837 ASSERT(rd != TMP2);
4838 ASSERT(rs1 != TMP);
4839 ASSERT(rs1 != TMP2);
4840 ASSERT(rs2 != TMP);
4841 ASSERT(rs2 != TMP2);
4842
4843 // Macro-op fusion: when both products are needed, the recommended sequence
4844 // is mulh first.
4845 mulh(TMP, rs1, rs2);
4846 mul(rd, rs1, rs2);
4847 srai(TMP2, rd, XLEN - 1);
4848 bne(TMP, TMP2, overflow);
4849}
4850
4851void Assembler::CountLeadingZeroes(Register rd, Register rs) {
4852 if (Supports(RV_Zbb)) {
4853 clz(rd, rs);
4854 return;
4855 }
4856
4857 // n = XLEN
4858 // y = x >>32; if (y != 0) { n = n - 32; x = y; }
4859 // y = x >>16; if (y != 0) { n = n - 16; x = y; }
4860 // y = x >> 8; if (y != 0) { n = n - 8; x = y; }
4861 // y = x >> 4; if (y != 0) { n = n - 4; x = y; }
4862 // y = x >> 2; if (y != 0) { n = n - 2; x = y; }
4863 // y = x >> 1; if (y != 0) { return n - 2; }
4864 // return n - x;
4865 Label l0, l1, l2, l3, l4, l5;
4866 li(TMP2, XLEN);
4867#if XLEN == 64
4868 srli(TMP, rs, 32);
4869 beqz(TMP, &l0, Assembler::kNearJump);
4870 subi(TMP2, TMP2, 32);
4871 mv(rs, TMP);
4872 Bind(&l0);
4873#endif
4874 srli(TMP, rs, 16);
4875 beqz(TMP, &l1, Assembler::kNearJump);
4876 subi(TMP2, TMP2, 16);
4877 mv(rs, TMP);
4878 Bind(&l1);
4879 srli(TMP, rs, 8);
4880 beqz(TMP, &l2, Assembler::kNearJump);
4881 subi(TMP2, TMP2, 8);
4882 mv(rs, TMP);
4883 Bind(&l2);
4884 srli(TMP, rs, 4);
4885 beqz(TMP, &l3, Assembler::kNearJump);
4886 subi(TMP2, TMP2, 4);
4887 mv(rs, TMP);
4888 Bind(&l3);
4889 srli(TMP, rs, 2);
4890 beqz(TMP, &l4, Assembler::kNearJump);
4891 subi(TMP2, TMP2, 2);
4892 mv(rs, TMP);
4893 Bind(&l4);
4894 srli(TMP, rs, 1);
4895 sub(rd, TMP2, rs);
4896 beqz(TMP, &l5, Assembler::kNearJump);
4897 subi(rd, TMP2, 2);
4898 Bind(&l5);
4899}
4900
4901void Assembler::RangeCheck(Register value,
4902 Register temp,
4903 intptr_t low,
4904 intptr_t high,
4905 RangeCheckCondition condition,
4906 Label* target) {
4907 auto cc = condition == kIfInRange ? LS : HI;
4908 Register to_check = temp != kNoRegister ? temp : value;
4909 AddImmediate(to_check, value, -low);
4910 CompareImmediate(to_check, high - low);
4911 BranchIf(cc, target);
4912}
4913
4914} // namespace compiler
4915
4916} // namespace dart
4917
4918#endif // defined(TARGET_ARCH_RISCV)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
int count
Definition: FontMgrTest.cpp:50
static void round(SkPoint *p)
static const double J
#define COUNT(T)
static bool ok(int result)
static uint32_t hash(const SkShaderBase::GradientInfo &v)
#define EQUAL(field)
@ ROTATE
Definition: SkPictureFlat.h:62
bool equals(SkDrawable *a, SkDrawable *b)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
#define W
Definition: aaa.cpp:17
#define __
#define RA(width, name,...)
#define UNREACHABLE()
Definition: assert.h:248
#define DEBUG_ASSERT(cond)
Definition: assert.h:321
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
#define COMPILE_ASSERT(expr)
Definition: assert.h:339
MicroAssembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level, ExtensionSet extensions)
#define LR
Definition: constants_arm.h:32
#define UNIMPLEMENTED
#define ASSERT(E)
VkInstance instance
Definition: main.cc:48
#define FATAL(error)
AtkStateType state
glong glong end
uint8_t value
GAsyncResult * result
uint32_t * target
int argument_count
Definition: fuchsia.cc:52
static float max(float r, float g, float b)
Definition: hsl.cpp:49
static float min(float r, float g, float b)
Definition: hsl.cpp:48
uword MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size)
Definition: runtime_api.cc:360
bool CanLoadFromThread(const dart::Object &object, intptr_t *offset)
Definition: runtime_api.cc:941
word ToRawSmi(const dart::Object &a)
Definition: runtime_api.cc:960
bool IsSmi(int64_t v)
Definition: runtime_api.cc:31
word SmiValue(const dart::Object &a)
Definition: runtime_api.cc:969
FrameLayout frame_layout
Definition: stack_frame.cc:76
void BailoutWithBranchOffsetError()
Definition: runtime_api.cc:328
bool IsOriginalObject(const Object &object)
Definition: runtime_api.cc:226
InvalidClass kObjectAlignment
InvalidClass kObjectAlignmentLog2
const Bool & TrueObject()
Definition: runtime_api.cc:157
bool IsInOldSpace(const Object &obj)
Definition: runtime_api.cc:101
bool IsSameObject(const Object &a, const Object &b)
Definition: runtime_api.cc:60
const Bool & FalseObject()
Definition: runtime_api.cc:161
const Object & NullObject()
Definition: runtime_api.cc:149
constexpr OperandSize kWordBytes
const Object & ToObject(const Code &handle)
Definition: runtime_api.h:173
static constexpr int HeaderSize
Definition: dart_vm.cc:33
uint32_t EncodeBTypeImm(intptr_t imm)
uint32_t EncodeCBImm(intptr_t imm)
const Register kWriteBarrierSlotReg
constexpr bool IsAbiPreservedRegister(Register reg)
Definition: constants.h:90
bool IsCI16Imm(intptr_t imm)
const Register THR
static constexpr Extension RV_F(3)
@ STOREORDERED
uint32_t EncodeUTypeImm(intptr_t imm)
constexpr Register FAR_TMP
static constexpr intptr_t kFalseOffsetFromNull
const Register kWriteBarrierObjectReg
static constexpr Extension RV_Zbc(9)
const Register NULL_REG
static constexpr ExtensionSet RV_GC
const Register kWriteBarrierValueReg
static constexpr bool IsCalleeSavedRegister(Register reg)
Definition: constants.h:85
uint32_t CombineHashes(uint32_t hash, uint32_t other_hash)
Definition: hash.h:12
uint32_t EncodeCSPStore4Imm(intptr_t imm)
static constexpr intptr_t kTrueOffsetFromNull
bool IsCSPLoad4Imm(intptr_t imm)
@ kNullCid
Definition: class_id.h:252
bool IsCJImm(intptr_t imm)
uint32_t EncodeCJImm(intptr_t imm)
bool IsCIImm(intptr_t imm)
uint32_t EncodeCSPLoad8Imm(intptr_t imm)
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
uintptr_t uword
Definition: globals.h:501
constexpr intptr_t kBitsPerInt16
Definition: globals.h:465
intptr_t word
Definition: globals.h:500
const Register CODE_REG
@ OVERFLOW
@ GREATER_EQUAL
@ UNSIGNED_GREATER
@ UNSIGNED_GREATER_EQUAL
@ NOT_ZERO
@ NO_OVERFLOW
@ LESS_EQUAL
@ UNSIGNED_LESS
@ NOT_EQUAL
@ UNSIGNED_LESS_EQUAL
const Register TMP2
const Register ARGS_DESC_REG
static constexpr Extension RV_Zalasr(10)
const Register DISPATCH_TABLE_REG
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
@ kNoRegister
Definition: constants_arm.h:99
const int kNumberOfFpuRegisters
const RegList kAbiPreservedCpuRegs
bool IsAllocatableInNewSpace(intptr_t size)
Definition: spaces.h:57
bool IsCSPStore8Imm(intptr_t imm)
const Register TMP
uint32_t EncodeCMem8Imm(intptr_t imm)
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
intx_t ImmHi(intx_t imm)
const RegList kDartVolatileCpuRegs
const Register FPREG
constexpr intptr_t kBitsPerInt32
Definition: globals.h:466
uint32_t EncodeCUImm(intptr_t imm)
const intptr_t cid
uint32_t EncodeCI4SPNImm(intptr_t imm)
bool IsSTypeImm(intptr_t imm)
bool IsJTypeImm(intptr_t imm)
static constexpr Extension RV_A(2)
uint32_t EncodeCSPLoad4Imm(intptr_t imm)
uint32_t EncodeCIImm(intptr_t imm)
bool IsITypeImm(intptr_t imm)
bool IsCSPStore4Imm(intptr_t imm)
bool IsCUImm(intptr_t imm)
const Register IC_DATA_REG
bool IsUTypeImm(intptr_t imm)
bool IsCMem4Imm(intptr_t imm)
bool IsBTypeImm(intptr_t imm)
constexpr Register WRITE_BARRIER_STATE
static constexpr Extension RV_Zba(6)
constexpr intptr_t kWordSize
Definition: globals.h:509
static constexpr Extension RV_M(1)
static constexpr Extension RV_C(5)
uint32_t EncodeITypeImm(intptr_t imm)
uint32_t EncodeSTypeImm(intptr_t imm)
const Register PP
constexpr RegList kAbiPreservedFpuRegs
uint32_t EncodeCMem4Imm(intptr_t imm)
uint32_t EncodeCSPStore8Imm(intptr_t imm)
bool IsCSPLoad8Imm(intptr_t imm)
bool IsCMem8Imm(intptr_t imm)
uint32_t EncodeJTypeImm(intptr_t imm)
static constexpr Extension RV_Zbs(8)
bool IsCBImm(intptr_t imm)
@ kHeapObjectTag
@ kSmiTagMask
@ kSmiTagShift
const RegList kAbiVolatileFpuRegs
bool IsCI4SPNImm(intptr_t imm)
uint32_t EncodeCI16Imm(intptr_t imm)
const Register SPREG
static constexpr ExtensionSet RV_G
static constexpr Extension RV_D(4)
constexpr intptr_t kBitsPerInt8
Definition: globals.h:464
intx_t ImmLo(intx_t imm)
static constexpr Extension RV_I(0)
const int kFpuRegisterSize
static constexpr Extension RV_Zbb(7)
DECLARE_FLAG(bool, show_invisible_frames)
@ kNone
Definition: layer.h:53
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
def matches(file)
Definition: gen_manifest.py:38
dst
Definition: cp.py:12
static bool Bind(PassBindingsCacheMTL &pass, ShaderStage stage, size_t bind_index, const BufferView &view)
sh
Definition: run_sh.py:10
dest
Definition: zip.py:79
#define Pd
Definition: globals.h:408
const Scalar scale
SeparatedVector2 offset
constexpr SkScalar SW
Definition: strokes.cpp:37
intptr_t saved_caller_pc_from_fp
Definition: frame_layout.h:49
intptr_t saved_caller_fp_from_fp
Definition: frame_layout.h:46
intptr_t saved_caller_pp_from_fp
Definition: frame_layout.h:43
#define LH
#define NOT_IN_PRODUCT(code)
Definition: globals.h:84