Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
assembler_riscv.cc
Go to the documentation of this file.
1// Copyright (c) 2017, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h" // NOLINT
6#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
7
8#define SHOULD_NOT_INCLUDE_RUNTIME
9
12#include "vm/cpu.h"
13#include "vm/instructions.h"
14#include "vm/simulator.h"
15#include "vm/tags.h"
16
17namespace dart {
18
19DECLARE_FLAG(bool, check_code_pointer);
20DECLARE_FLAG(bool, precompiled_mode);
21
22DEFINE_FLAG(int, far_branch_level, 0, "Always use far branches");
23
24namespace compiler {
25
26MicroAssembler::MicroAssembler(ObjectPoolBuilder* object_pool_builder,
27 intptr_t far_branch_level,
28 ExtensionSet extensions)
29 : AssemblerBase(object_pool_builder),
30 extensions_(extensions),
31 far_branch_level_(far_branch_level) {
32 ASSERT(far_branch_level >= 0);
33 ASSERT(far_branch_level <= 2);
34}
35
36MicroAssembler::~MicroAssembler() {}
37
38void MicroAssembler::Bind(Label* label) {
39 ASSERT(!label->IsBound());
40 intptr_t target_position = Position();
41 intptr_t branch_position;
42
43#define BIND(head, update) \
44 branch_position = label->head; \
45 while (branch_position >= 0) { \
46 ASSERT(Utils::IsAligned(branch_position, Supports(RV_C) ? 2 : 4)); \
47 intptr_t new_offset = target_position - branch_position; \
48 ASSERT(Utils::IsAligned(new_offset, Supports(RV_C) ? 2 : 4)); \
49 intptr_t old_offset = update(branch_position, new_offset); \
50 if (old_offset == 0) break; \
51 branch_position -= old_offset; \
52 } \
53 label->head = -1
54
55 BIND(unresolved_cb_, UpdateCBOffset);
56 BIND(unresolved_cj_, UpdateCJOffset);
57 BIND(unresolved_b_, UpdateBOffset);
58 BIND(unresolved_j_, UpdateJOffset);
59 BIND(unresolved_far_, UpdateFarOffset);
60
61 label->BindTo(target_position);
62}
63
64intptr_t MicroAssembler::UpdateCBOffset(intptr_t branch_position,
65 intptr_t new_offset) {
66 CInstr instr(Read16(branch_position));
67 ASSERT((instr.opcode() == C_BEQZ) || (instr.opcode() == C_BNEZ));
68 intptr_t old_offset = instr.b_imm();
69 if (!IsCBImm(new_offset)) {
70 FATAL("Incorrect Assembler::kNearJump");
71 }
72 Write16(branch_position,
73 instr.opcode() | EncodeCRs1p(instr.rs1p()) | EncodeCBImm(new_offset));
74 return old_offset;
75}
76
77intptr_t MicroAssembler::UpdateCJOffset(intptr_t branch_position,
78 intptr_t new_offset) {
79 CInstr instr(Read16(branch_position));
80 ASSERT((instr.opcode() == C_J) || (instr.opcode() == C_JAL));
81 intptr_t old_offset = instr.j_imm();
82 if (!IsCJImm(new_offset)) {
83 FATAL("Incorrect Assembler::kNearJump");
84 }
85 Write16(branch_position, instr.opcode() | EncodeCJImm(new_offset));
86 return old_offset;
87}
88
89intptr_t MicroAssembler::UpdateBOffset(intptr_t branch_position,
90 intptr_t new_offset) {
91 Instr instr(Read32(branch_position));
92 ASSERT(instr.opcode() == BRANCH);
93 intptr_t old_offset = instr.btype_imm();
94 if (!IsBTypeImm(new_offset)) {
96 }
97 Write32(branch_position, EncodeRs2(instr.rs2()) | EncodeRs1(instr.rs1()) |
98 EncodeFunct3(instr.funct3()) |
99 EncodeOpcode(instr.opcode()) |
100 EncodeBTypeImm(new_offset));
101 return old_offset;
102}
103
104intptr_t MicroAssembler::UpdateJOffset(intptr_t branch_position,
105 intptr_t new_offset) {
106 Instr instr(Read32(branch_position));
107 ASSERT(instr.opcode() == JAL);
108 intptr_t old_offset = instr.jtype_imm();
109 if (!IsJTypeImm(new_offset)) {
111 }
112 Write32(branch_position, EncodeRd(instr.rd()) | EncodeOpcode(instr.opcode()) |
113 EncodeJTypeImm(new_offset));
114 return old_offset;
115}
116
117intptr_t MicroAssembler::UpdateFarOffset(intptr_t branch_position,
118 intptr_t new_offset) {
119 Instr auipc_instr(Read32(branch_position));
120 ASSERT(auipc_instr.opcode() == AUIPC);
121 ASSERT(auipc_instr.rd() == FAR_TMP);
122 Instr jr_instr(Read32(branch_position + 4));
123 ASSERT(jr_instr.opcode() == JALR);
124 ASSERT(jr_instr.rd() == ZR);
125 ASSERT(jr_instr.funct3() == F3_0);
126 ASSERT(jr_instr.rs1() == FAR_TMP);
127 intptr_t old_offset = auipc_instr.utype_imm() + jr_instr.itype_imm();
128 intx_t lo = ImmLo(new_offset);
129 intx_t hi = ImmHi(new_offset);
130 if (!IsUTypeImm(hi)) {
131 FATAL("Jump/branch distance exceeds 2GB!");
132 }
133 Write32(branch_position,
134 EncodeUTypeImm(hi) | EncodeRd(FAR_TMP) | EncodeOpcode(AUIPC));
135 Write32(branch_position + 4, EncodeITypeImm(lo) | EncodeRs1(FAR_TMP) |
136 EncodeFunct3(F3_0) | EncodeRd(ZR) |
137 EncodeOpcode(JALR));
138 return old_offset;
139}
140
141void MicroAssembler::lui(Register rd, intptr_t imm) {
142 ASSERT(Supports(RV_I));
143 if (Supports(RV_C) && (rd != ZR) && (rd != SP) && IsCUImm(imm)) {
144 c_lui(rd, imm);
145 return;
146 }
147 EmitUType(imm, rd, LUI);
148}
149
150void MicroAssembler::lui_fixed(Register rd, intptr_t imm) {
151 ASSERT(Supports(RV_I));
152 EmitUType(imm, rd, LUI);
153}
154
155void MicroAssembler::auipc(Register rd, intptr_t imm) {
156 ASSERT(Supports(RV_I));
157 EmitUType(imm, rd, AUIPC);
158}
159
160void MicroAssembler::jal(Register rd, Label* label, JumpDistance distance) {
161 ASSERT(Supports(RV_I));
162 if (Supports(RV_C) &&
163 ((distance == kNearJump) ||
164 (label->IsBound() && IsCJImm(label->Position() - Position())))) {
165 if (rd == ZR) {
166 c_j(label);
167 return;
168 }
169#if XLEN == 32
170 if (rd == RA) {
171 c_jal(label);
172 return;
173 }
174#endif // XLEN == 32
175 }
176 EmitJump(rd, label, JAL, distance);
177}
178
179void MicroAssembler::jalr(Register rd, Register rs1, intptr_t offset) {
180 ASSERT(Supports(RV_I));
181 if (Supports(RV_C)) {
182 if (rs1 != ZR && offset == 0) {
183 if (rd == ZR) {
184 c_jr(rs1);
185 return;
186 } else if (rd == RA) {
187 c_jalr(rs1);
188 return;
189 }
190 }
191 }
192 EmitIType(offset, rs1, F3_0, rd, JALR);
193}
194
195void MicroAssembler::jalr_fixed(Register rd, Register rs1, intptr_t offset) {
196 ASSERT(Supports(RV_I));
197 EmitIType(offset, rs1, F3_0, rd, JALR);
198}
199
200void MicroAssembler::beq(Register rs1,
201 Register rs2,
202 Label* label,
203 JumpDistance distance) {
204 ASSERT(Supports(RV_I));
205 if (Supports(RV_C) &&
206 ((distance == kNearJump) ||
207 (label->IsBound() && IsCBImm(label->Position() - Position())))) {
208 if ((rs1 == ZR) && IsCRs1p(rs2)) {
209 c_beqz(rs2, label);
210 return;
211 } else if ((rs2 == ZR) && IsCRs1p(rs1)) {
212 c_beqz(rs1, label);
213 return;
214 }
215 }
216 EmitBranch(rs1, rs2, label, BEQ, distance);
217}
218
219void MicroAssembler::bne(Register rs1,
220 Register rs2,
221 Label* label,
222 JumpDistance distance) {
223 ASSERT(Supports(RV_I));
224 if (Supports(RV_C) &&
225 ((distance == kNearJump) ||
226 (label->IsBound() && IsCBImm(label->Position() - Position())))) {
227 if ((rs1 == ZR) && IsCRs1p(rs2)) {
228 c_bnez(rs2, label);
229 return;
230 } else if ((rs2 == ZR) && IsCRs1p(rs1)) {
231 c_bnez(rs1, label);
232 return;
233 }
234 }
235 EmitBranch(rs1, rs2, label, BNE, distance);
236}
237
238void MicroAssembler::blt(Register rs1,
239 Register rs2,
240 Label* label,
241 JumpDistance distance) {
242 ASSERT(Supports(RV_I));
243 EmitBranch(rs1, rs2, label, BLT, distance);
244}
245
246void MicroAssembler::bge(Register rs1,
247 Register rs2,
248 Label* label,
249 JumpDistance distance) {
250 ASSERT(Supports(RV_I));
251 EmitBranch(rs1, rs2, label, BGE, distance);
252}
253
254void MicroAssembler::bltu(Register rs1,
255 Register rs2,
256 Label* label,
257 JumpDistance distance) {
258 ASSERT(Supports(RV_I));
259 EmitBranch(rs1, rs2, label, BLTU, distance);
260}
261
262void MicroAssembler::bgeu(Register rs1,
263 Register rs2,
264 Label* label,
265 JumpDistance distance) {
266 EmitBranch(rs1, rs2, label, BGEU, distance);
267}
268
269void MicroAssembler::lb(Register rd, Address addr) {
270 ASSERT(Supports(RV_I));
271 EmitIType(addr.offset(), addr.base(), LB, rd, LOAD);
272}
273
274void MicroAssembler::lh(Register rd, Address addr) {
275 ASSERT(Supports(RV_I));
276 EmitIType(addr.offset(), addr.base(), LH, rd, LOAD);
277}
278
279void MicroAssembler::lw(Register rd, Address addr) {
280 ASSERT(Supports(RV_I));
281 if (Supports(RV_C)) {
282 if ((rd != ZR) && (addr.base() == SP) && IsCSPLoad4Imm(addr.offset())) {
283 c_lwsp(rd, addr);
284 return;
285 }
286 if (IsCRdp(rd) && IsCRs1p(addr.base()) && IsCMem4Imm(addr.offset())) {
287 c_lw(rd, addr);
288 return;
289 }
290 }
291 EmitIType(addr.offset(), addr.base(), LW, rd, LOAD);
292}
293
294void MicroAssembler::lbu(Register rd, Address addr) {
295 ASSERT(Supports(RV_I));
296 EmitIType(addr.offset(), addr.base(), LBU, rd, LOAD);
297}
298
299void MicroAssembler::lhu(Register rd, Address addr) {
300 ASSERT(Supports(RV_I));
301 EmitIType(addr.offset(), addr.base(), LHU, rd, LOAD);
302}
303
304void MicroAssembler::sb(Register rs2, Address addr) {
305 ASSERT(Supports(RV_I));
306 EmitSType(addr.offset(), rs2, addr.base(), SB, STORE);
307}
308
309void MicroAssembler::sh(Register rs2, Address addr) {
310 ASSERT(Supports(RV_I));
311 EmitSType(addr.offset(), rs2, addr.base(), SH, STORE);
312}
313
314void MicroAssembler::sw(Register rs2, Address addr) {
315 ASSERT(Supports(RV_I));
316 if (Supports(RV_C)) {
317 if ((addr.base() == SP) && IsCSPStore4Imm(addr.offset())) {
318 c_swsp(rs2, addr);
319 return;
320 }
321 if (IsCRs2p(rs2) && IsCRs1p(addr.base()) && IsCMem4Imm(addr.offset())) {
322 c_sw(rs2, addr);
323 return;
324 }
325 }
326 EmitSType(addr.offset(), rs2, addr.base(), SW, STORE);
327}
328
329void MicroAssembler::addi(Register rd, Register rs1, intptr_t imm) {
330 ASSERT(Supports(RV_I));
331 if (Supports(RV_C)) {
332 if ((rd != ZR) && (rs1 == ZR) && IsCIImm(imm)) {
333 c_li(rd, imm);
334 return;
335 }
336 if ((rd == rs1) && IsCIImm(imm) && (imm != 0)) {
337 c_addi(rd, rs1, imm);
338 return;
339 }
340 if ((rd == SP) && (rs1 == SP) && IsCI16Imm(imm) && (imm != 0)) {
341 c_addi16sp(rd, rs1, imm);
342 return;
343 }
344 if (IsCRdp(rd) && (rs1 == SP) && IsCI4SPNImm(imm) && (imm != 0)) {
345 c_addi4spn(rd, rs1, imm);
346 return;
347 }
348 if (imm == 0) {
349 if ((rd == ZR) && (rs1 == ZR)) {
350 c_nop();
351 return;
352 }
353 if ((rd != ZR) && (rs1 != ZR)) {
354 c_mv(rd, rs1);
355 return;
356 }
357 }
358 }
359 EmitIType(imm, rs1, ADDI, rd, OPIMM);
360}
361
362void MicroAssembler::slti(Register rd, Register rs1, intptr_t imm) {
363 ASSERT(Supports(RV_I));
364 EmitIType(imm, rs1, SLTI, rd, OPIMM);
365}
366
367void MicroAssembler::sltiu(Register rd, Register rs1, intptr_t imm) {
368 ASSERT(Supports(RV_I));
369 EmitIType(imm, rs1, SLTIU, rd, OPIMM);
370}
371
372void MicroAssembler::xori(Register rd, Register rs1, intptr_t imm) {
373 ASSERT(Supports(RV_I));
374 EmitIType(imm, rs1, XORI, rd, OPIMM);
375}
376
377void MicroAssembler::ori(Register rd, Register rs1, intptr_t imm) {
378 ASSERT(Supports(RV_I));
379 EmitIType(imm, rs1, ORI, rd, OPIMM);
380}
381
382void MicroAssembler::andi(Register rd, Register rs1, intptr_t imm) {
383 ASSERT(Supports(RV_I));
384 if (Supports(RV_C)) {
385 if ((rd == rs1) && IsCRs1p(rs1) && IsCIImm(imm)) {
386 c_andi(rd, rs1, imm);
387 return;
388 }
389 }
390 EmitIType(imm, rs1, ANDI, rd, OPIMM);
391}
392
393void MicroAssembler::slli(Register rd, Register rs1, intptr_t shamt) {
394 ASSERT((shamt > 0) && (shamt < XLEN));
395 ASSERT(Supports(RV_I));
396 if (Supports(RV_C)) {
397 if ((rd == rs1) && (shamt != 0) && IsCIImm(shamt)) {
398 c_slli(rd, rs1, shamt);
399 return;
400 }
401 }
402 EmitRType(F7_0, shamt, rs1, SLLI, rd, OPIMM);
403}
404
405void MicroAssembler::srli(Register rd, Register rs1, intptr_t shamt) {
406 ASSERT((shamt > 0) && (shamt < XLEN));
407 ASSERT(Supports(RV_I));
408 if (Supports(RV_C)) {
409 if ((rd == rs1) && IsCRs1p(rs1) && (shamt != 0) && IsCIImm(shamt)) {
410 c_srli(rd, rs1, shamt);
411 return;
412 }
413 }
414 EmitRType(F7_0, shamt, rs1, SRI, rd, OPIMM);
415}
416
417void MicroAssembler::srai(Register rd, Register rs1, intptr_t shamt) {
418 ASSERT((shamt > 0) && (shamt < XLEN));
419 ASSERT(Supports(RV_I));
420 if (Supports(RV_C)) {
421 if ((rd == rs1) && IsCRs1p(rs1) && (shamt != 0) && IsCIImm(shamt)) {
422 c_srai(rd, rs1, shamt);
423 return;
424 }
425 }
426 EmitRType(SRA, shamt, rs1, SRI, rd, OPIMM);
427}
428
429void MicroAssembler::add(Register rd, Register rs1, Register rs2) {
430 ASSERT(Supports(RV_I));
431 if (Supports(RV_C)) {
432 if (rd == rs1) {
433 c_add(rd, rs1, rs2);
434 return;
435 }
436 if (rd == rs2) {
437 c_add(rd, rs2, rs1);
438 return;
439 }
440 }
441 EmitRType(F7_0, rs2, rs1, ADD, rd, OP);
442}
443
444void MicroAssembler::sub(Register rd, Register rs1, Register rs2) {
445 ASSERT(Supports(RV_I));
446 if (Supports(RV_C)) {
447 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
448 c_sub(rd, rs1, rs2);
449 return;
450 }
451 }
452 EmitRType(SUB, rs2, rs1, ADD, rd, OP);
453}
454
455void MicroAssembler::sll(Register rd, Register rs1, Register rs2) {
456 ASSERT(Supports(RV_I));
457 EmitRType(F7_0, rs2, rs1, SLL, rd, OP);
458}
459
460void MicroAssembler::slt(Register rd, Register rs1, Register rs2) {
461 ASSERT(Supports(RV_I));
462 EmitRType(F7_0, rs2, rs1, SLT, rd, OP);
463}
464
465void MicroAssembler::sltu(Register rd, Register rs1, Register rs2) {
466 ASSERT(Supports(RV_I));
467 EmitRType(F7_0, rs2, rs1, SLTU, rd, OP);
468}
469
470void MicroAssembler::xor_(Register rd, Register rs1, Register rs2) {
471 ASSERT(Supports(RV_I));
472 if (Supports(RV_C)) {
473 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
474 c_xor(rd, rs1, rs2);
475 return;
476 }
477 if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
478 c_xor(rd, rs2, rs1);
479 return;
480 }
481 }
482 EmitRType(F7_0, rs2, rs1, XOR, rd, OP);
483}
484
485void MicroAssembler::srl(Register rd, Register rs1, Register rs2) {
486 ASSERT(Supports(RV_I));
487 EmitRType(F7_0, rs2, rs1, SR, rd, OP);
488}
489
490void MicroAssembler::sra(Register rd, Register rs1, Register rs2) {
491 ASSERT(Supports(RV_I));
492 EmitRType(SRA, rs2, rs1, SR, rd, OP);
493}
494
495void MicroAssembler::or_(Register rd, Register rs1, Register rs2) {
496 ASSERT(Supports(RV_I));
497 if (Supports(RV_C)) {
498 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
499 c_or(rd, rs1, rs2);
500 return;
501 }
502 if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
503 c_or(rd, rs2, rs1);
504 return;
505 }
506 }
507 EmitRType(F7_0, rs2, rs1, OR, rd, OP);
508}
509
510void MicroAssembler::and_(Register rd, Register rs1, Register rs2) {
511 ASSERT(Supports(RV_I));
512 if (Supports(RV_C)) {
513 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
514 c_and(rd, rs1, rs2);
515 return;
516 }
517 if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
518 c_and(rd, rs2, rs1);
519 return;
520 }
521 }
522 EmitRType(F7_0, rs2, rs1, AND, rd, OP);
523}
524
525void MicroAssembler::fence(HartEffects predecessor, HartEffects successor) {
526 ASSERT((predecessor & kAll) == predecessor);
527 ASSERT((successor & kAll) == successor);
528 ASSERT(Supports(RV_I));
529 EmitIType((predecessor << 4) | successor, ZR, FENCE, ZR, MISCMEM);
530}
531
532void MicroAssembler::fencei() {
533 ASSERT(Supports(RV_I));
534 EmitIType(0, ZR, FENCEI, ZR, MISCMEM);
535}
536
537void MicroAssembler::ecall() {
538 ASSERT(Supports(RV_I));
539 EmitIType(ECALL, ZR, F3_0, ZR, SYSTEM);
540}
541void MicroAssembler::ebreak() {
542 ASSERT(Supports(RV_I));
543 if (Supports(RV_C)) {
544 c_ebreak();
545 return;
546 }
547 EmitIType(EBREAK, ZR, F3_0, ZR, SYSTEM);
548}
549void MicroAssembler::SimulatorPrintObject(Register rs1) {
550 ASSERT(Supports(RV_I));
551 EmitIType(ECALL, rs1, F3_0, ZR, SYSTEM);
552}
553
554void MicroAssembler::csrrw(Register rd, uint32_t csr, Register rs1) {
555 ASSERT(Supports(RV_I));
556 EmitIType(csr, rs1, CSRRW, rd, SYSTEM);
557}
558
559void MicroAssembler::csrrs(Register rd, uint32_t csr, Register rs1) {
560 ASSERT(Supports(RV_I));
561 EmitIType(csr, rs1, CSRRS, rd, SYSTEM);
562}
563
564void MicroAssembler::csrrc(Register rd, uint32_t csr, Register rs1) {
565 ASSERT(Supports(RV_I));
566 EmitIType(csr, rs1, CSRRC, rd, SYSTEM);
567}
568
569void MicroAssembler::csrrwi(Register rd, uint32_t csr, uint32_t imm) {
570 ASSERT(Supports(RV_I));
571 EmitIType(csr, Register(imm), CSRRWI, rd, SYSTEM);
572}
573
574void MicroAssembler::csrrsi(Register rd, uint32_t csr, uint32_t imm) {
575 ASSERT(Supports(RV_I));
576 EmitIType(csr, Register(imm), CSRRSI, rd, SYSTEM);
577}
578
579void MicroAssembler::csrrci(Register rd, uint32_t csr, uint32_t imm) {
580 ASSERT(Supports(RV_I));
581 EmitIType(csr, Register(imm), CSRRCI, rd, SYSTEM);
582}
583
584void MicroAssembler::trap() {
585 ASSERT(Supports(RV_I));
586 if (Supports(RV_C)) {
587 Emit16(0); // Permanently reserved illegal instruction.
588 } else {
589 Emit32(0); // Permanently reserved illegal instruction.
590 }
591}
592
593#if XLEN >= 64
594void MicroAssembler::lwu(Register rd, Address addr) {
595 ASSERT(Supports(RV_I));
596 EmitIType(addr.offset(), addr.base(), LWU, rd, LOAD);
597}
598
599void MicroAssembler::ld(Register rd, Address addr) {
600 ASSERT(Supports(RV_I));
601 if (Supports(RV_C)) {
602 if ((rd != ZR) && (addr.base() == SP) && IsCSPLoad8Imm(addr.offset())) {
603 c_ldsp(rd, addr);
604 return;
605 }
606 if (IsCRdp(rd) && IsCRs1p(addr.base()) && IsCMem8Imm(addr.offset())) {
607 c_ld(rd, addr);
608 return;
609 }
610 }
611 EmitIType(addr.offset(), addr.base(), LD, rd, LOAD);
612}
613
614void MicroAssembler::sd(Register rs2, Address addr) {
615 ASSERT(Supports(RV_I));
616 if (Supports(RV_C)) {
617 if ((addr.base() == SP) && IsCSPStore8Imm(addr.offset())) {
618 c_sdsp(rs2, addr);
619 return;
620 }
621 if (IsCRs2p(rs2) && IsCRs1p(addr.base()) && IsCMem8Imm(addr.offset())) {
622 c_sd(rs2, addr);
623 return;
624 }
625 }
626 EmitSType(addr.offset(), rs2, addr.base(), SD, STORE);
627}
628
629void MicroAssembler::addiw(Register rd, Register rs1, intptr_t imm) {
630 ASSERT(Supports(RV_I));
631 if (Supports(RV_C)) {
632 if ((rd != ZR) && (rs1 == ZR) && IsCIImm(imm)) {
633 c_li(rd, imm);
634 return;
635 }
636 if ((rd == rs1) && (rd != ZR) && IsCIImm(imm)) {
637 c_addiw(rd, rs1, imm);
638 return;
639 }
640 }
641 EmitIType(imm, rs1, ADDI, rd, OPIMM32);
642}
643
644void MicroAssembler::slliw(Register rd, Register rs1, intptr_t shamt) {
645 ASSERT((shamt > 0) && (shamt < 32));
646 ASSERT(Supports(RV_I));
647 EmitRType(F7_0, shamt, rs1, SLLI, rd, OPIMM32);
648}
649
650void MicroAssembler::srliw(Register rd, Register rs1, intptr_t shamt) {
651 ASSERT((shamt > 0) && (shamt < 32));
652 ASSERT(Supports(RV_I));
653 EmitRType(F7_0, shamt, rs1, SRI, rd, OPIMM32);
654}
655
656void MicroAssembler::sraiw(Register rd, Register rs1, intptr_t shamt) {
657 ASSERT((shamt > 0) && (shamt < XLEN));
658 ASSERT(Supports(RV_I));
659 EmitRType(SRA, shamt, rs1, SRI, rd, OPIMM32);
660}
661
662void MicroAssembler::addw(Register rd, Register rs1, Register rs2) {
663 ASSERT(Supports(RV_I));
664 if (Supports(RV_C)) {
665 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
666 c_addw(rd, rs1, rs2);
667 return;
668 }
669 if ((rd == rs2) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
670 c_addw(rd, rs2, rs1);
671 return;
672 }
673 }
674 EmitRType(F7_0, rs2, rs1, ADD, rd, OP32);
675}
676
677void MicroAssembler::subw(Register rd, Register rs1, Register rs2) {
678 ASSERT(Supports(RV_I));
679 if (Supports(RV_C)) {
680 if ((rd == rs1) && IsCRs1p(rs1) && IsCRs2p(rs2)) {
681 c_subw(rd, rs1, rs2);
682 return;
683 }
684 }
685 EmitRType(SUB, rs2, rs1, ADD, rd, OP32);
686}
687
688void MicroAssembler::sllw(Register rd, Register rs1, Register rs2) {
689 ASSERT(Supports(RV_I));
690 EmitRType(F7_0, rs2, rs1, SLL, rd, OP32);
691}
692
693void MicroAssembler::srlw(Register rd, Register rs1, Register rs2) {
694 ASSERT(Supports(RV_I));
695 EmitRType(F7_0, rs2, rs1, SR, rd, OP32);
696}
697void MicroAssembler::sraw(Register rd, Register rs1, Register rs2) {
698 ASSERT(Supports(RV_I));
699 EmitRType(SRA, rs2, rs1, SR, rd, OP32);
700}
701#endif // XLEN >= 64
702
703void MicroAssembler::mul(Register rd, Register rs1, Register rs2) {
704 ASSERT(Supports(RV_M));
705 EmitRType(MULDIV, rs2, rs1, MUL, rd, OP);
706}
707
708void MicroAssembler::mulh(Register rd, Register rs1, Register rs2) {
709 ASSERT(Supports(RV_M));
710 EmitRType(MULDIV, rs2, rs1, MULH, rd, OP);
711}
712
713void MicroAssembler::mulhsu(Register rd, Register rs1, Register rs2) {
714 ASSERT(Supports(RV_M));
715 EmitRType(MULDIV, rs2, rs1, MULHSU, rd, OP);
716}
717
718void MicroAssembler::mulhu(Register rd, Register rs1, Register rs2) {
719 ASSERT(Supports(RV_M));
720 EmitRType(MULDIV, rs2, rs1, MULHU, rd, OP);
721}
722
723void MicroAssembler::div(Register rd, Register rs1, Register rs2) {
724 ASSERT(Supports(RV_M));
725 EmitRType(MULDIV, rs2, rs1, DIV, rd, OP);
726}
727
728void MicroAssembler::divu(Register rd, Register rs1, Register rs2) {
729 ASSERT(Supports(RV_M));
730 EmitRType(MULDIV, rs2, rs1, DIVU, rd, OP);
731}
732
733void MicroAssembler::rem(Register rd, Register rs1, Register rs2) {
734 ASSERT(Supports(RV_M));
735 EmitRType(MULDIV, rs2, rs1, REM, rd, OP);
736}
737
738void MicroAssembler::remu(Register rd, Register rs1, Register rs2) {
739 ASSERT(Supports(RV_M));
740 EmitRType(MULDIV, rs2, rs1, REMU, rd, OP);
741}
742
743#if XLEN >= 64
744void MicroAssembler::mulw(Register rd, Register rs1, Register rs2) {
745 ASSERT(Supports(RV_M));
746 EmitRType(MULDIV, rs2, rs1, MULW, rd, OP32);
747}
748
749void MicroAssembler::divw(Register rd, Register rs1, Register rs2) {
750 ASSERT(Supports(RV_M));
751 EmitRType(MULDIV, rs2, rs1, DIVW, rd, OP32);
752}
753
754void MicroAssembler::divuw(Register rd, Register rs1, Register rs2) {
755 ASSERT(Supports(RV_M));
756 EmitRType(MULDIV, rs2, rs1, DIVUW, rd, OP32);
757}
758
759void MicroAssembler::remw(Register rd, Register rs1, Register rs2) {
760 ASSERT(Supports(RV_M));
761 EmitRType(MULDIV, rs2, rs1, REMW, rd, OP32);
762}
763
764void MicroAssembler::remuw(Register rd, Register rs1, Register rs2) {
765 ASSERT(Supports(RV_M));
766 EmitRType(MULDIV, rs2, rs1, REMUW, rd, OP32);
767}
768#endif // XLEN >= 64
769
770void MicroAssembler::lrw(Register rd, Address addr, std::memory_order order) {
771 ASSERT(addr.offset() == 0);
772 ASSERT(Supports(RV_A));
773 EmitRType(LR, order, ZR, addr.base(), WIDTH32, rd, AMO);
774}
775void MicroAssembler::scw(Register rd,
776 Register rs2,
777 Address addr,
778 std::memory_order order) {
779 ASSERT(addr.offset() == 0);
780 ASSERT(Supports(RV_A));
781 EmitRType(SC, order, rs2, addr.base(), WIDTH32, rd, AMO);
782}
783
784void MicroAssembler::amoswapw(Register rd,
785 Register rs2,
786 Address addr,
787 std::memory_order order) {
788 ASSERT(addr.offset() == 0);
789 ASSERT(Supports(RV_A));
790 EmitRType(AMOSWAP, order, rs2, addr.base(), WIDTH32, rd, AMO);
791}
792
793void MicroAssembler::amoaddw(Register rd,
794 Register rs2,
795 Address addr,
796 std::memory_order order) {
797 ASSERT(addr.offset() == 0);
798 ASSERT(Supports(RV_A));
799 EmitRType(AMOADD, order, rs2, addr.base(), WIDTH32, rd, AMO);
800}
801
802void MicroAssembler::amoxorw(Register rd,
803 Register rs2,
804 Address addr,
805 std::memory_order order) {
806 ASSERT(addr.offset() == 0);
807 ASSERT(Supports(RV_A));
808 EmitRType(AMOXOR, order, rs2, addr.base(), WIDTH32, rd, AMO);
809}
810
811void MicroAssembler::amoandw(Register rd,
812 Register rs2,
813 Address addr,
814 std::memory_order order) {
815 ASSERT(addr.offset() == 0);
816 ASSERT(Supports(RV_A));
817 EmitRType(AMOAND, order, rs2, addr.base(), WIDTH32, rd, AMO);
818}
819
820void MicroAssembler::amoorw(Register rd,
821 Register rs2,
822 Address addr,
823 std::memory_order order) {
824 ASSERT(addr.offset() == 0);
825 ASSERT(Supports(RV_A));
826 EmitRType(AMOOR, order, rs2, addr.base(), WIDTH32, rd, AMO);
827}
828
829void MicroAssembler::amominw(Register rd,
830 Register rs2,
831 Address addr,
832 std::memory_order order) {
833 ASSERT(addr.offset() == 0);
834 ASSERT(Supports(RV_A));
835 EmitRType(AMOMIN, order, rs2, addr.base(), WIDTH32, rd, AMO);
836}
837
838void MicroAssembler::amomaxw(Register rd,
839 Register rs2,
840 Address addr,
841 std::memory_order order) {
842 ASSERT(addr.offset() == 0);
843 ASSERT(Supports(RV_A));
844 EmitRType(AMOMAX, order, rs2, addr.base(), WIDTH32, rd, AMO);
845}
846
847void MicroAssembler::amominuw(Register rd,
848 Register rs2,
849 Address addr,
850 std::memory_order order) {
851 ASSERT(addr.offset() == 0);
852 ASSERT(Supports(RV_A));
853 EmitRType(AMOMINU, order, rs2, addr.base(), WIDTH32, rd, AMO);
854}
855
856void MicroAssembler::amomaxuw(Register rd,
857 Register rs2,
858 Address addr,
859 std::memory_order order) {
860 ASSERT(addr.offset() == 0);
861 ASSERT(Supports(RV_A));
862 EmitRType(AMOMAXU, order, rs2, addr.base(), WIDTH32, rd, AMO);
863}
864
865#if XLEN >= 64
866void MicroAssembler::lrd(Register rd, Address addr, std::memory_order order) {
867 ASSERT(addr.offset() == 0);
868 ASSERT(Supports(RV_A));
869 EmitRType(LR, order, ZR, addr.base(), WIDTH64, rd, AMO);
870}
871
872void MicroAssembler::scd(Register rd,
873 Register rs2,
874 Address addr,
875 std::memory_order order) {
876 ASSERT(addr.offset() == 0);
877 ASSERT(Supports(RV_A));
878 EmitRType(SC, order, rs2, addr.base(), WIDTH64, rd, AMO);
879}
880
881void MicroAssembler::amoswapd(Register rd,
882 Register rs2,
883 Address addr,
884 std::memory_order order) {
885 ASSERT(addr.offset() == 0);
886 ASSERT(Supports(RV_A));
887 EmitRType(AMOSWAP, order, rs2, addr.base(), WIDTH64, rd, AMO);
888}
889
890void MicroAssembler::amoaddd(Register rd,
891 Register rs2,
892 Address addr,
893 std::memory_order order) {
894 ASSERT(addr.offset() == 0);
895 ASSERT(Supports(RV_A));
896 EmitRType(AMOADD, order, rs2, addr.base(), WIDTH64, rd, AMO);
897}
898
899void MicroAssembler::amoxord(Register rd,
900 Register rs2,
901 Address addr,
902 std::memory_order order) {
903 ASSERT(addr.offset() == 0);
904 ASSERT(Supports(RV_A));
905 EmitRType(AMOXOR, order, rs2, addr.base(), WIDTH64, rd, AMO);
906}
907
908void MicroAssembler::amoandd(Register rd,
909 Register rs2,
910 Address addr,
911 std::memory_order order) {
912 ASSERT(addr.offset() == 0);
913 ASSERT(Supports(RV_A));
914 EmitRType(AMOAND, order, rs2, addr.base(), WIDTH64, rd, AMO);
915}
916
917void MicroAssembler::amoord(Register rd,
918 Register rs2,
919 Address addr,
920 std::memory_order order) {
921 ASSERT(addr.offset() == 0);
922 ASSERT(Supports(RV_A));
923 EmitRType(AMOOR, order, rs2, addr.base(), WIDTH64, rd, AMO);
924}
925
926void MicroAssembler::amomind(Register rd,
927 Register rs2,
928 Address addr,
929 std::memory_order order) {
930 ASSERT(addr.offset() == 0);
931 ASSERT(Supports(RV_A));
932 EmitRType(AMOMIN, order, rs2, addr.base(), WIDTH64, rd, AMO);
933}
934
935void MicroAssembler::amomaxd(Register rd,
936 Register rs2,
937 Address addr,
938 std::memory_order order) {
939 ASSERT(addr.offset() == 0);
940 ASSERT(Supports(RV_A));
941 EmitRType(AMOMAX, order, rs2, addr.base(), WIDTH64, rd, AMO);
942}
943
944void MicroAssembler::amominud(Register rd,
945 Register rs2,
946 Address addr,
947 std::memory_order order) {
948 ASSERT(addr.offset() == 0);
949 ASSERT(Supports(RV_A));
950 EmitRType(AMOMINU, order, rs2, addr.base(), WIDTH64, rd, AMO);
951}
952
953void MicroAssembler::amomaxud(Register rd,
954 Register rs2,
955 Address addr,
956 std::memory_order order) {
957 ASSERT(addr.offset() == 0);
958 ASSERT(Supports(RV_A));
959 EmitRType(AMOMAXU, order, rs2, addr.base(), WIDTH64, rd, AMO);
960}
961#endif // XLEN >= 64
962
963void MicroAssembler::flw(FRegister rd, Address addr) {
964 ASSERT(Supports(RV_F));
965#if XLEN == 32
966 if (Supports(RV_C)) {
967 if ((addr.base() == SP) && IsCSPLoad4Imm(addr.offset())) {
968 c_flwsp(rd, addr);
969 return;
970 }
971 if (IsCFRdp(rd) && IsCRs1p(addr.base()) && IsCMem4Imm(addr.offset())) {
972 c_flw(rd, addr);
973 return;
974 }
975 }
976#endif // XLEN == 32
977 EmitIType(addr.offset(), addr.base(), S, rd, LOADFP);
978}
979
980void MicroAssembler::fsw(FRegister rs2, Address addr) {
981 ASSERT(Supports(RV_F));
982#if XLEN == 32
983 if (Supports(RV_C)) {
984 if ((addr.base() == SP) && IsCSPStore4Imm(addr.offset())) {
985 c_fswsp(rs2, addr);
986 return;
987 }
988 if (IsCFRs2p(rs2) && IsCRs1p(addr.base()) && IsCMem4Imm(addr.offset())) {
989 c_fsw(rs2, addr);
990 return;
991 }
992 }
993#endif // XLEN == 32
994 EmitSType(addr.offset(), rs2, addr.base(), S, STOREFP);
995}
996
997void MicroAssembler::fmadds(FRegister rd,
998 FRegister rs1,
999 FRegister rs2,
1000 FRegister rs3,
1001 RoundingMode rounding) {
1002 ASSERT(Supports(RV_F));
1003 EmitR4Type(rs3, F2_S, rs2, rs1, rounding, rd, FMADD);
1004}
1005
1006void MicroAssembler::fmsubs(FRegister rd,
1007 FRegister rs1,
1008 FRegister rs2,
1009 FRegister rs3,
1010 RoundingMode rounding) {
1011 ASSERT(Supports(RV_F));
1012 EmitR4Type(rs3, F2_S, rs2, rs1, rounding, rd, FMSUB);
1013}
1014
1015void MicroAssembler::fnmsubs(FRegister rd,
1016 FRegister rs1,
1017 FRegister rs2,
1018 FRegister rs3,
1019 RoundingMode rounding) {
1020 ASSERT(Supports(RV_F));
1021 EmitR4Type(rs3, F2_S, rs2, rs1, rounding, rd, FNMSUB);
1022}
1023
1024void MicroAssembler::fnmadds(FRegister rd,
1025 FRegister rs1,
1026 FRegister rs2,
1027 FRegister rs3,
1028 RoundingMode rounding) {
1029 ASSERT(Supports(RV_F));
1030 EmitR4Type(rs3, F2_S, rs2, rs1, rounding, rd, FNMADD);
1031}
1032
1033void MicroAssembler::fadds(FRegister rd,
1034 FRegister rs1,
1035 FRegister rs2,
1036 RoundingMode rounding) {
1037 ASSERT(Supports(RV_F));
1038 EmitRType(FADDS, rs2, rs1, rounding, rd, OPFP);
1039}
1040
1041void MicroAssembler::fsubs(FRegister rd,
1042 FRegister rs1,
1043 FRegister rs2,
1044 RoundingMode rounding) {
1045 ASSERT(Supports(RV_F));
1046 EmitRType(FSUBS, rs2, rs1, rounding, rd, OPFP);
1047}
1048
1049void MicroAssembler::fmuls(FRegister rd,
1050 FRegister rs1,
1051 FRegister rs2,
1052 RoundingMode rounding) {
1053 ASSERT(Supports(RV_F));
1054 EmitRType(FMULS, rs2, rs1, rounding, rd, OPFP);
1055}
1056
1057void MicroAssembler::fdivs(FRegister rd,
1058 FRegister rs1,
1059 FRegister rs2,
1060 RoundingMode rounding) {
1061 ASSERT(Supports(RV_F));
1062 EmitRType(FDIVS, rs2, rs1, rounding, rd, OPFP);
1063}
1064
1065void MicroAssembler::fsqrts(FRegister rd,
1066 FRegister rs1,
1067 RoundingMode rounding) {
1068 ASSERT(Supports(RV_F));
1069 EmitRType(FSQRTS, FRegister(0), rs1, rounding, rd, OPFP);
1070}
1071
1072void MicroAssembler::fsgnjs(FRegister rd, FRegister rs1, FRegister rs2) {
1073 ASSERT(Supports(RV_F));
1074 EmitRType(FSGNJS, rs2, rs1, J, rd, OPFP);
1075}
1076
1077void MicroAssembler::fsgnjns(FRegister rd, FRegister rs1, FRegister rs2) {
1078 ASSERT(Supports(RV_F));
1079 EmitRType(FSGNJS, rs2, rs1, JN, rd, OPFP);
1080}
1081
1082void MicroAssembler::fsgnjxs(FRegister rd, FRegister rs1, FRegister rs2) {
1083 ASSERT(Supports(RV_F));
1084 EmitRType(FSGNJS, rs2, rs1, JX, rd, OPFP);
1085}
1086
1087void MicroAssembler::fmins(FRegister rd, FRegister rs1, FRegister rs2) {
1088 ASSERT(Supports(RV_F));
1089 EmitRType(FMINMAXS, rs2, rs1, FMIN, rd, OPFP);
1090}
1091
1092void MicroAssembler::fmaxs(FRegister rd, FRegister rs1, FRegister rs2) {
1093 ASSERT(Supports(RV_F));
1094 EmitRType(FMINMAXS, rs2, rs1, FMAX, rd, OPFP);
1095}
1096
1097void MicroAssembler::feqs(Register rd, FRegister rs1, FRegister rs2) {
1098 ASSERT(Supports(RV_F));
1099 EmitRType(FCMPS, rs2, rs1, FEQ, rd, OPFP);
1100}
1101
1102void MicroAssembler::flts(Register rd, FRegister rs1, FRegister rs2) {
1103 ASSERT(Supports(RV_F));
1104 EmitRType(FCMPS, rs2, rs1, FLT, rd, OPFP);
1105}
1106
1107void MicroAssembler::fles(Register rd, FRegister rs1, FRegister rs2) {
1108 ASSERT(Supports(RV_F));
1109 EmitRType(FCMPS, rs2, rs1, FLE, rd, OPFP);
1110}
1111
1112void MicroAssembler::fclasss(Register rd, FRegister rs1) {
1113 ASSERT(Supports(RV_F));
1114 EmitRType(FCLASSS, FRegister(0), rs1, F3_1, rd, OPFP);
1115}
1116
1117void MicroAssembler::fcvtws(Register rd, FRegister rs1, RoundingMode rounding) {
1118 ASSERT(Supports(RV_F));
1119 EmitRType(FCVTintS, FRegister(W), rs1, rounding, rd, OPFP);
1120}
1121
1122void MicroAssembler::fcvtwus(Register rd,
1123 FRegister rs1,
1124 RoundingMode rounding) {
1125 ASSERT(Supports(RV_F));
1126 EmitRType(FCVTintS, FRegister(WU), rs1, rounding, rd, OPFP);
1127}
1128
1129void MicroAssembler::fcvtsw(FRegister rd, Register rs1, RoundingMode rounding) {
1130 ASSERT(Supports(RV_F));
1131 EmitRType(FCVTSint, FRegister(W), rs1, rounding, rd, OPFP);
1132}
1133
1134void MicroAssembler::fcvtswu(FRegister rd,
1135 Register rs1,
1136 RoundingMode rounding) {
1137 ASSERT(Supports(RV_F));
1138 EmitRType(FCVTSint, FRegister(WU), rs1, rounding, rd, OPFP);
1139}
1140
1141void MicroAssembler::fmvxw(Register rd, FRegister rs1) {
1142 ASSERT(Supports(RV_F));
1143 EmitRType(FMVXW, FRegister(0), rs1, F3_0, rd, OPFP);
1144}
1145
1146void MicroAssembler::fmvwx(FRegister rd, Register rs1) {
1147 ASSERT(Supports(RV_F));
1148 EmitRType(FMVWX, FRegister(0), rs1, F3_0, rd, OPFP);
1149}
1150
1151#if XLEN >= 64
1152void MicroAssembler::fcvtls(Register rd, FRegister rs1, RoundingMode rounding) {
1153 ASSERT(Supports(RV_F));
1154 EmitRType(FCVTintS, FRegister(L), rs1, rounding, rd, OPFP);
1155}
1156
1157void MicroAssembler::fcvtlus(Register rd,
1158 FRegister rs1,
1159 RoundingMode rounding) {
1160 ASSERT(Supports(RV_F));
1161 EmitRType(FCVTintS, FRegister(LU), rs1, rounding, rd, OPFP);
1162}
1163
1164void MicroAssembler::fcvtsl(FRegister rd, Register rs1, RoundingMode rounding) {
1165 ASSERT(Supports(RV_F));
1166 EmitRType(FCVTSint, FRegister(L), rs1, rounding, rd, OPFP);
1167}
1168
1169void MicroAssembler::fcvtslu(FRegister rd,
1170 Register rs1,
1171 RoundingMode rounding) {
1172 ASSERT(Supports(RV_F));
1173 EmitRType(FCVTSint, FRegister(LU), rs1, rounding, rd, OPFP);
1174}
1175#endif // XLEN >= 64
1176
1177void MicroAssembler::fld(FRegister rd, Address addr) {
1178 ASSERT(Supports(RV_D));
1179 if (Supports(RV_C)) {
1180 if ((addr.base() == SP) && IsCSPLoad8Imm(addr.offset())) {
1181 c_fldsp(rd, addr);
1182 return;
1183 }
1184 if (IsCFRdp(rd) && IsCRs1p(addr.base()) && IsCMem8Imm(addr.offset())) {
1185 c_fld(rd, addr);
1186 return;
1187 }
1188 }
1189 EmitIType(addr.offset(), addr.base(), D, rd, LOADFP);
1190}
1191
1192void MicroAssembler::fsd(FRegister rs2, Address addr) {
1193 ASSERT(Supports(RV_D));
1194 if (Supports(RV_C)) {
1195 if ((addr.base() == SP) && IsCSPStore8Imm(addr.offset())) {
1196 c_fsdsp(rs2, addr);
1197 return;
1198 }
1199 if (IsCFRs2p(rs2) && IsCRs1p(addr.base()) && IsCMem8Imm(addr.offset())) {
1200 c_fsd(rs2, addr);
1201 return;
1202 }
1203 }
1204 EmitSType(addr.offset(), rs2, addr.base(), D, STOREFP);
1205}
1206
1207void MicroAssembler::fmaddd(FRegister rd,
1208 FRegister rs1,
1209 FRegister rs2,
1210 FRegister rs3,
1211 RoundingMode rounding) {
1212 ASSERT(Supports(RV_D));
1213 EmitR4Type(rs3, F2_D, rs2, rs1, rounding, rd, FMADD);
1214}
1215
1216void MicroAssembler::fmsubd(FRegister rd,
1217 FRegister rs1,
1218 FRegister rs2,
1219 FRegister rs3,
1220 RoundingMode rounding) {
1221 ASSERT(Supports(RV_D));
1222 EmitR4Type(rs3, F2_D, rs2, rs1, rounding, rd, FMSUB);
1223}
1224
1225void MicroAssembler::fnmsubd(FRegister rd,
1226 FRegister rs1,
1227 FRegister rs2,
1228 FRegister rs3,
1229 RoundingMode rounding) {
1230 ASSERT(Supports(RV_D));
1231 EmitR4Type(rs3, F2_D, rs2, rs1, rounding, rd, FNMSUB);
1232}
1233
1234void MicroAssembler::fnmaddd(FRegister rd,
1235 FRegister rs1,
1236 FRegister rs2,
1237 FRegister rs3,
1238 RoundingMode rounding) {
1239 ASSERT(Supports(RV_D));
1240 EmitR4Type(rs3, F2_D, rs2, rs1, rounding, rd, FNMADD);
1241}
1242
1243void MicroAssembler::faddd(FRegister rd,
1244 FRegister rs1,
1245 FRegister rs2,
1246 RoundingMode rounding) {
1247 ASSERT(Supports(RV_D));
1248 EmitRType(FADDD, rs2, rs1, rounding, rd, OPFP);
1249}
1250
1251void MicroAssembler::fsubd(FRegister rd,
1252 FRegister rs1,
1253 FRegister rs2,
1254 RoundingMode rounding) {
1255 ASSERT(Supports(RV_D));
1256 EmitRType(FSUBD, rs2, rs1, rounding, rd, OPFP);
1257}
1258
1259void MicroAssembler::fmuld(FRegister rd,
1260 FRegister rs1,
1261 FRegister rs2,
1262 RoundingMode rounding) {
1263 ASSERT(Supports(RV_D));
1264 EmitRType(FMULD, rs2, rs1, rounding, rd, OPFP);
1265}
1266
1267void MicroAssembler::fdivd(FRegister rd,
1268 FRegister rs1,
1269 FRegister rs2,
1270 RoundingMode rounding) {
1271 ASSERT(Supports(RV_D));
1272 EmitRType(FDIVD, rs2, rs1, rounding, rd, OPFP);
1273}
1274
1275void MicroAssembler::fsqrtd(FRegister rd,
1276 FRegister rs1,
1277 RoundingMode rounding) {
1278 ASSERT(Supports(RV_D));
1279 EmitRType(FSQRTD, FRegister(0), rs1, rounding, rd, OPFP);
1280}
1281
1282void MicroAssembler::fsgnjd(FRegister rd, FRegister rs1, FRegister rs2) {
1283 ASSERT(Supports(RV_D));
1284 EmitRType(FSGNJD, rs2, rs1, J, rd, OPFP);
1285}
1286
1287void MicroAssembler::fsgnjnd(FRegister rd, FRegister rs1, FRegister rs2) {
1288 ASSERT(Supports(RV_D));
1289 EmitRType(FSGNJD, rs2, rs1, JN, rd, OPFP);
1290}
1291
1292void MicroAssembler::fsgnjxd(FRegister rd, FRegister rs1, FRegister rs2) {
1293 ASSERT(Supports(RV_D));
1294 EmitRType(FSGNJD, rs2, rs1, JX, rd, OPFP);
1295}
1296
1297void MicroAssembler::fmind(FRegister rd, FRegister rs1, FRegister rs2) {
1298 ASSERT(Supports(RV_D));
1299 EmitRType(FMINMAXD, rs2, rs1, FMIN, rd, OPFP);
1300}
1301
1302void MicroAssembler::fmaxd(FRegister rd, FRegister rs1, FRegister rs2) {
1303 ASSERT(Supports(RV_D));
1304 EmitRType(FMINMAXD, rs2, rs1, FMAX, rd, OPFP);
1305}
1306
1307void MicroAssembler::fcvtsd(FRegister rd,
1308 FRegister rs1,
1309 RoundingMode rounding) {
1310 ASSERT(Supports(RV_D));
1311 EmitRType(FCVTS, FRegister(1), rs1, rounding, rd, OPFP);
1312}
1313
1314void MicroAssembler::fcvtds(FRegister rd,
1315 FRegister rs1,
1316 RoundingMode rounding) {
1317 ASSERT(Supports(RV_D));
1318 EmitRType(FCVTD, FRegister(0), rs1, rounding, rd, OPFP);
1319}
1320
1321void MicroAssembler::feqd(Register rd, FRegister rs1, FRegister rs2) {
1322 ASSERT(Supports(RV_D));
1323 EmitRType(FCMPD, rs2, rs1, FEQ, rd, OPFP);
1324}
1325
1326void MicroAssembler::fltd(Register rd, FRegister rs1, FRegister rs2) {
1327 ASSERT(Supports(RV_D));
1328 EmitRType(FCMPD, rs2, rs1, FLT, rd, OPFP);
1329}
1330
1331void MicroAssembler::fled(Register rd, FRegister rs1, FRegister rs2) {
1332 ASSERT(Supports(RV_D));
1333 EmitRType(FCMPD, rs2, rs1, FLE, rd, OPFP);
1334}
1335
1336void MicroAssembler::fclassd(Register rd, FRegister rs1) {
1337 ASSERT(Supports(RV_D));
1338 EmitRType(FCLASSD, FRegister(0), rs1, F3_1, rd, OPFP);
1339}
1340
1341void MicroAssembler::fcvtwd(Register rd, FRegister rs1, RoundingMode rounding) {
1342 ASSERT(Supports(RV_D));
1343 EmitRType(FCVTintD, FRegister(W), rs1, rounding, rd, OPFP);
1344}
1345
1346void MicroAssembler::fcvtwud(Register rd,
1347 FRegister rs1,
1348 RoundingMode rounding) {
1349 ASSERT(Supports(RV_D));
1350 EmitRType(FCVTintD, FRegister(WU), rs1, rounding, rd, OPFP);
1351}
1352
1353void MicroAssembler::fcvtdw(FRegister rd, Register rs1, RoundingMode rounding) {
1354 ASSERT(Supports(RV_D));
1355 EmitRType(FCVTDint, FRegister(W), rs1, rounding, rd, OPFP);
1356}
1357
1358void MicroAssembler::fcvtdwu(FRegister rd,
1359 Register rs1,
1360 RoundingMode rounding) {
1361 ASSERT(Supports(RV_D));
1362 EmitRType(FCVTDint, FRegister(WU), rs1, rounding, rd, OPFP);
1363}
1364
1365#if XLEN >= 64
1366void MicroAssembler::fcvtld(Register rd, FRegister rs1, RoundingMode rounding) {
1367 ASSERT(Supports(RV_D));
1368 EmitRType(FCVTintD, FRegister(L), rs1, rounding, rd, OPFP);
1369}
1370
1371void MicroAssembler::fcvtlud(Register rd,
1372 FRegister rs1,
1373 RoundingMode rounding) {
1374 ASSERT(Supports(RV_D));
1375 EmitRType(FCVTintD, FRegister(LU), rs1, rounding, rd, OPFP);
1376}
1377
1378void MicroAssembler::fmvxd(Register rd, FRegister rs1) {
1379 ASSERT(Supports(RV_D));
1380 EmitRType(FMVXD, FRegister(0), rs1, F3_0, rd, OPFP);
1381}
1382
1383void MicroAssembler::fcvtdl(FRegister rd, Register rs1, RoundingMode rounding) {
1384 ASSERT(Supports(RV_D));
1385 EmitRType(FCVTDint, FRegister(L), rs1, rounding, rd, OPFP);
1386}
1387
1388void MicroAssembler::fcvtdlu(FRegister rd,
1389 Register rs1,
1390 RoundingMode rounding) {
1391 ASSERT(Supports(RV_D));
1392 EmitRType(FCVTDint, FRegister(LU), rs1, rounding, rd, OPFP);
1393}
1394
1395void MicroAssembler::fmvdx(FRegister rd, Register rs1) {
1396 ASSERT(Supports(RV_D));
1397 EmitRType(FMVDX, FRegister(0), rs1, F3_0, rd, OPFP);
1398}
1399#endif // XLEN >= 64
1400
1401#if XLEN >= 64
1402void MicroAssembler::adduw(Register rd, Register rs1, Register rs2) {
1403 ASSERT(Supports(RV_Zba));
1404 EmitRType(ADDUW, rs2, rs1, F3_0, rd, OP32);
1405}
1406#endif
1407
1408void MicroAssembler::sh1add(Register rd, Register rs1, Register rs2) {
1409 ASSERT(Supports(RV_Zba));
1410 EmitRType(SHADD, rs2, rs1, SH1ADD, rd, OP);
1411}
1412
1413#if XLEN >= 64
1414void MicroAssembler::sh1adduw(Register rd, Register rs1, Register rs2) {
1415 ASSERT(Supports(RV_Zba));
1416 EmitRType(SHADD, rs2, rs1, SH1ADD, rd, OP32);
1417}
1418#endif
1419
1420void MicroAssembler::sh2add(Register rd, Register rs1, Register rs2) {
1421 ASSERT(Supports(RV_Zba));
1422 EmitRType(SHADD, rs2, rs1, SH2ADD, rd, OP);
1423}
1424
1425#if XLEN >= 64
1426void MicroAssembler::sh2adduw(Register rd, Register rs1, Register rs2) {
1427 ASSERT(Supports(RV_Zba));
1428 EmitRType(SHADD, rs2, rs1, SH2ADD, rd, OP32);
1429}
1430#endif
1431
1432void MicroAssembler::sh3add(Register rd, Register rs1, Register rs2) {
1433 ASSERT(Supports(RV_Zba));
1434 EmitRType(SHADD, rs2, rs1, SH3ADD, rd, OP);
1435}
1436
1437#if XLEN >= 64
1438void MicroAssembler::sh3adduw(Register rd, Register rs1, Register rs2) {
1439 ASSERT(Supports(RV_Zba));
1440 EmitRType(SHADD, rs2, rs1, SH3ADD, rd, OP32);
1441}
1442
1443void MicroAssembler::slliuw(Register rd, Register rs1, intx_t shamt) {
1444 ASSERT((shamt > 0) && (shamt < 32));
1445 ASSERT(Supports(RV_Zba));
1446 EmitRType(SLLIUW, shamt, rs1, SLLI, rd, OPIMM32);
1447}
1448#endif
1449
1450void MicroAssembler::andn(Register rd, Register rs1, Register rs2) {
1451 ASSERT(Supports(RV_Zbb));
1452 EmitRType(SUB, rs2, rs1, AND, rd, OP);
1453}
1454
1455void MicroAssembler::orn(Register rd, Register rs1, Register rs2) {
1456 ASSERT(Supports(RV_Zbb));
1457 EmitRType(SUB, rs2, rs1, OR, rd, OP);
1458}
1459
1460void MicroAssembler::xnor(Register rd, Register rs1, Register rs2) {
1461 ASSERT(Supports(RV_Zbb));
1462 EmitRType(SUB, rs2, rs1, XOR, rd, OP);
1463}
1464
1465void MicroAssembler::clz(Register rd, Register rs1) {
1466 ASSERT(Supports(RV_Zbb));
1467 EmitRType(COUNT, 0b00000, rs1, F3_COUNT, rd, OPIMM);
1468}
1469
1470void MicroAssembler::clzw(Register rd, Register rs1) {
1471 ASSERT(Supports(RV_Zbb));
1472 EmitRType(COUNT, 0b00000, rs1, F3_COUNT, rd, OPIMM32);
1473}
1474
1475void MicroAssembler::ctz(Register rd, Register rs1) {
1476 ASSERT(Supports(RV_Zbb));
1477 EmitRType(COUNT, 0b00001, rs1, F3_COUNT, rd, OPIMM);
1478}
1479
1480void MicroAssembler::ctzw(Register rd, Register rs1) {
1481 ASSERT(Supports(RV_Zbb));
1482 EmitRType(COUNT, 0b00001, rs1, F3_COUNT, rd, OPIMM32);
1483}
1484
1485void MicroAssembler::cpop(Register rd, Register rs1) {
1486 ASSERT(Supports(RV_Zbb));
1487 EmitRType(COUNT, 0b00010, rs1, F3_COUNT, rd, OPIMM);
1488}
1489
1490void MicroAssembler::cpopw(Register rd, Register rs1) {
1491 ASSERT(Supports(RV_Zbb));
1492 EmitRType(COUNT, 0b00010, rs1, F3_COUNT, rd, OPIMM32);
1493}
1494
1495void MicroAssembler::max(Register rd, Register rs1, Register rs2) {
1496 ASSERT(Supports(RV_Zbb));
1497 EmitRType(MINMAXCLMUL, rs2, rs1, MAX, rd, OP);
1498}
1499
1500void MicroAssembler::maxu(Register rd, Register rs1, Register rs2) {
1501 ASSERT(Supports(RV_Zbb));
1502 EmitRType(MINMAXCLMUL, rs2, rs1, MAXU, rd, OP);
1503}
1504
1505void MicroAssembler::min(Register rd, Register rs1, Register rs2) {
1506 ASSERT(Supports(RV_Zbb));
1507 EmitRType(MINMAXCLMUL, rs2, rs1, MIN, rd, OP);
1508}
1509
1510void MicroAssembler::minu(Register rd, Register rs1, Register rs2) {
1511 ASSERT(Supports(RV_Zbb));
1512 EmitRType(MINMAXCLMUL, rs2, rs1, MINU, rd, OP);
1513}
1514
1515void MicroAssembler::sextb(Register rd, Register rs1) {
1516 ASSERT(Supports(RV_Zbb));
1517 EmitRType((Funct7)0b0110000, 0b00100, rs1, SEXT, rd, OPIMM);
1518}
1519
1520void MicroAssembler::sexth(Register rd, Register rs1) {
1521 ASSERT(Supports(RV_Zbb));
1522 EmitRType((Funct7)0b0110000, 0b00101, rs1, SEXT, rd, OPIMM);
1523}
1524
1525void MicroAssembler::zexth(Register rd, Register rs1) {
1526 ASSERT(Supports(RV_Zbb));
1527#if XLEN == 32
1528 EmitRType((Funct7)0b0000100, 0b00000, rs1, ZEXT, rd, OP);
1529#elif XLEN == 64
1530 EmitRType((Funct7)0b0000100, 0b00000, rs1, ZEXT, rd, OP32);
1531#else
1532 UNIMPLEMENTED();
1533#endif
1534}
1535
1536void MicroAssembler::rol(Register rd, Register rs1, Register rs2) {
1537 ASSERT(Supports(RV_Zbb));
1538 EmitRType(ROTATE, rs2, rs1, ROL, rd, OP);
1539}
1540
1541void MicroAssembler::rolw(Register rd, Register rs1, Register rs2) {
1542 ASSERT(Supports(RV_Zbb));
1543 EmitRType(ROTATE, rs2, rs1, ROL, rd, OP32);
1544}
1545
1546void MicroAssembler::ror(Register rd, Register rs1, Register rs2) {
1547 ASSERT(Supports(RV_Zbb));
1548 EmitRType(ROTATE, rs2, rs1, ROR, rd, OP);
1549}
1550
1551void MicroAssembler::rori(Register rd, Register rs1, intx_t shamt) {
1552 ASSERT(Supports(RV_Zbb));
1553 EmitRType(ROTATE, shamt, rs1, ROR, rd, OPIMM);
1554}
1555
1556void MicroAssembler::roriw(Register rd, Register rs1, intx_t shamt) {
1557 ASSERT(Supports(RV_Zbb));
1558 EmitRType(ROTATE, shamt, rs1, ROR, rd, OPIMM32);
1559}
1560
1561void MicroAssembler::rorw(Register rd, Register rs1, Register rs2) {
1562 ASSERT(Supports(RV_Zbb));
1563 EmitRType(ROTATE, rs2, rs1, ROR, rd, OP32);
1564}
1565
1566void MicroAssembler::orcb(Register rd, Register rs1) {
1567 ASSERT(Supports(RV_Zbb));
1568 EmitRType((Funct7)0b0010100, 0b00111, rs1, (Funct3)0b101, rd, OPIMM);
1569}
1570
1571void MicroAssembler::rev8(Register rd, Register rs1) {
1572 ASSERT(Supports(RV_Zbb));
1573#if XLEN == 32
1574 EmitRType((Funct7)0b0110100, 0b11000, rs1, (Funct3)0b101, rd, OPIMM);
1575#elif XLEN == 64
1576 EmitRType((Funct7)0b0110101, 0b11000, rs1, (Funct3)0b101, rd, OPIMM);
1577#else
1578 UNIMPLEMENTED();
1579#endif
1580}
1581
1582void MicroAssembler::clmul(Register rd, Register rs1, Register rs2) {
1583 ASSERT(Supports(RV_Zbc));
1584 EmitRType(MINMAXCLMUL, rs2, rs1, CLMUL, rd, OP);
1585}
1586
1587void MicroAssembler::clmulh(Register rd, Register rs1, Register rs2) {
1588 ASSERT(Supports(RV_Zbc));
1589 EmitRType(MINMAXCLMUL, rs2, rs1, CLMULH, rd, OP);
1590}
1591
1592void MicroAssembler::clmulr(Register rd, Register rs1, Register rs2) {
1593 ASSERT(Supports(RV_Zbc));
1594 EmitRType(MINMAXCLMUL, rs2, rs1, CLMULR, rd, OP);
1595}
1596
1597void MicroAssembler::bclr(Register rd, Register rs1, Register rs2) {
1598 ASSERT(Supports(RV_Zbs));
1599 EmitRType(BCLRBEXT, rs2, rs1, BCLR, rd, OP);
1600}
1601
1602void MicroAssembler::bclri(Register rd, Register rs1, intx_t shamt) {
1603 ASSERT(Supports(RV_Zbs));
1604 EmitRType(BCLRBEXT, shamt, rs1, BCLR, rd, OPIMM);
1605}
1606
1607void MicroAssembler::bext(Register rd, Register rs1, Register rs2) {
1608 ASSERT(Supports(RV_Zbs));
1609 EmitRType(BCLRBEXT, rs2, rs1, BEXT, rd, OP);
1610}
1611
1612void MicroAssembler::bexti(Register rd, Register rs1, intx_t shamt) {
1613 ASSERT(Supports(RV_Zbs));
1614 EmitRType(BCLRBEXT, shamt, rs1, BEXT, rd, OPIMM);
1615}
1616
1617void MicroAssembler::binv(Register rd, Register rs1, Register rs2) {
1618 ASSERT(Supports(RV_Zbs));
1619 EmitRType(BINV, rs2, rs1, F3_BINV, rd, OP);
1620}
1621
1622void MicroAssembler::binvi(Register rd, Register rs1, intx_t shamt) {
1623 ASSERT(Supports(RV_Zbs));
1624 EmitRType(BINV, shamt, rs1, F3_BINV, rd, OPIMM);
1625}
1626
1627void MicroAssembler::bset(Register rd, Register rs1, Register rs2) {
1628 ASSERT(Supports(RV_Zbs));
1629 EmitRType(BSET, rs2, rs1, F3_BSET, rd, OP);
1630}
1631
1632void MicroAssembler::bseti(Register rd, Register rs1, intx_t shamt) {
1633 ASSERT(Supports(RV_Zbs));
1634 EmitRType(BSET, shamt, rs1, F3_BSET, rd, OPIMM);
1635}
1636
1637void MicroAssembler::c_lwsp(Register rd, Address addr) {
1638 ASSERT(rd != ZR);
1639 ASSERT(addr.base() == SP);
1640 ASSERT(Supports(RV_C));
1641 Emit16(C_LWSP | EncodeCRd(rd) | EncodeCSPLoad4Imm(addr.offset()));
1642}
1643
1644#if XLEN == 32
1645void MicroAssembler::c_flwsp(FRegister rd, Address addr) {
1646 ASSERT(addr.base() == SP);
1647 ASSERT(Supports(RV_C));
1648 ASSERT(Supports(RV_F));
1649 Emit16(C_FLWSP | EncodeCFRd(rd) | EncodeCSPLoad4Imm(addr.offset()));
1650}
1651#else
1652void MicroAssembler::c_ldsp(Register rd, Address addr) {
1653 ASSERT(rd != ZR);
1654 ASSERT(addr.base() == SP);
1655 ASSERT(Supports(RV_C));
1656 Emit16(C_LDSP | EncodeCRd(rd) | EncodeCSPLoad8Imm(addr.offset()));
1657}
1658#endif
1659
1660void MicroAssembler::c_fldsp(FRegister rd, Address addr) {
1661 ASSERT(addr.base() == SP);
1662 ASSERT(Supports(RV_C));
1663 ASSERT(Supports(RV_D));
1664 Emit16(C_FLDSP | EncodeCFRd(rd) | EncodeCSPLoad8Imm(addr.offset()));
1665}
1666
1667void MicroAssembler::c_swsp(Register rs2, Address addr) {
1668 ASSERT(addr.base() == SP);
1669 ASSERT(Supports(RV_C));
1670 Emit16(C_SWSP | EncodeCRs2(rs2) | EncodeCSPStore4Imm(addr.offset()));
1671}
1672
1673#if XLEN == 32
1674void MicroAssembler::c_fswsp(FRegister rs2, Address addr) {
1675 ASSERT(addr.base() == SP);
1676 ASSERT(Supports(RV_C));
1677 ASSERT(Supports(RV_F));
1678 Emit16(C_FSWSP | EncodeCFRs2(rs2) | EncodeCSPStore4Imm(addr.offset()));
1679}
1680#else
1681void MicroAssembler::c_sdsp(Register rs2, Address addr) {
1682 ASSERT(addr.base() == SP);
1683 ASSERT(Supports(RV_C));
1684 Emit16(C_SDSP | EncodeCRs2(rs2) | EncodeCSPStore8Imm(addr.offset()));
1685}
1686#endif
1687void MicroAssembler::c_fsdsp(FRegister rs2, Address addr) {
1688 ASSERT(addr.base() == SP);
1689 ASSERT(Supports(RV_C));
1690 ASSERT(Supports(RV_D));
1691 Emit16(C_FSDSP | EncodeCFRs2(rs2) | EncodeCSPStore8Imm(addr.offset()));
1692}
1693
1694void MicroAssembler::c_lw(Register rd, Address addr) {
1695 ASSERT(Supports(RV_C));
1696 Emit16(C_LW | EncodeCRdp(rd) | EncodeCRs1p(addr.base()) |
1697 EncodeCMem4Imm(addr.offset()));
1698}
1699
1700void MicroAssembler::c_ld(Register rd, Address addr) {
1701 ASSERT(Supports(RV_C));
1702 Emit16(C_LD | EncodeCRdp(rd) | EncodeCRs1p(addr.base()) |
1703 EncodeCMem8Imm(addr.offset()));
1704}
1705
1706void MicroAssembler::c_flw(FRegister rd, Address addr) {
1707 ASSERT(Supports(RV_C));
1708 ASSERT(Supports(RV_F));
1709 Emit16(C_FLW | EncodeCFRdp(rd) | EncodeCRs1p(addr.base()) |
1710 EncodeCMem4Imm(addr.offset()));
1711}
1712
1713void MicroAssembler::c_fld(FRegister rd, Address addr) {
1714 ASSERT(Supports(RV_C));
1715 ASSERT(Supports(RV_D));
1716 Emit16(C_FLD | EncodeCFRdp(rd) | EncodeCRs1p(addr.base()) |
1717 EncodeCMem8Imm(addr.offset()));
1718}
1719
1720void MicroAssembler::c_sw(Register rs2, Address addr) {
1721 ASSERT(Supports(RV_C));
1722 Emit16(C_SW | EncodeCRs1p(addr.base()) | EncodeCRs2p(rs2) |
1723 EncodeCMem4Imm(addr.offset()));
1724}
1725
1726void MicroAssembler::c_sd(Register rs2, Address addr) {
1727 ASSERT(Supports(RV_C));
1728 Emit16(C_SD | EncodeCRs1p(addr.base()) | EncodeCRs2p(rs2) |
1729 EncodeCMem8Imm(addr.offset()));
1730}
1731
1732void MicroAssembler::c_fsw(FRegister rs2, Address addr) {
1733 ASSERT(Supports(RV_C));
1734 ASSERT(Supports(RV_F));
1735 Emit16(C_FSW | EncodeCRs1p(addr.base()) | EncodeCFRs2p(rs2) |
1736 EncodeCMem4Imm(addr.offset()));
1737}
1738
1739void MicroAssembler::c_fsd(FRegister rs2, Address addr) {
1740 ASSERT(Supports(RV_C));
1741 ASSERT(Supports(RV_D));
1742 Emit16(C_FSD | EncodeCRs1p(addr.base()) | EncodeCFRs2p(rs2) |
1743 EncodeCMem8Imm(addr.offset()));
1744}
1745
1746void MicroAssembler::c_j(Label* label) {
1747 ASSERT(Supports(RV_C));
1748 EmitCJump(label, C_J);
1749}
1750
1751#if XLEN == 32
1752void MicroAssembler::c_jal(Label* label) {
1753 ASSERT(Supports(RV_C));
1754 EmitCJump(label, C_JAL);
1755}
1756#endif // XLEN == 32
1757
1758void MicroAssembler::c_jr(Register rs1) {
1759 ASSERT(Supports(RV_C));
1760 ASSERT(rs1 != ZR);
1761 Emit16(C_JR | EncodeCRs1(rs1) | EncodeCRs2(ZR));
1762}
1763
1764void MicroAssembler::c_jalr(Register rs1) {
1765 ASSERT(Supports(RV_C));
1766 Emit16(C_JALR | EncodeCRs1(rs1) | EncodeCRs2(ZR));
1767}
1768
1769void MicroAssembler::c_beqz(Register rs1p, Label* label) {
1770 ASSERT(Supports(RV_C));
1771 EmitCBranch(rs1p, label, C_BEQZ);
1772}
1773
1774void MicroAssembler::c_bnez(Register rs1p, Label* label) {
1775 ASSERT(Supports(RV_C));
1776 EmitCBranch(rs1p, label, C_BNEZ);
1777}
1778
1779void MicroAssembler::c_li(Register rd, intptr_t imm) {
1780 ASSERT(Supports(RV_C));
1781 ASSERT(rd != ZR);
1782 Emit16(C_LI | EncodeCRd(rd) | EncodeCIImm(imm));
1783}
1784
1785void MicroAssembler::c_lui(Register rd, uintptr_t imm) {
1786 ASSERT(Supports(RV_C));
1787 ASSERT(rd != ZR);
1788 ASSERT(rd != SP);
1789 Emit16(C_LUI | EncodeCRd(rd) | EncodeCUImm(imm));
1790}
1791
1792void MicroAssembler::c_addi(Register rd, Register rs1, intptr_t imm) {
1793 ASSERT(Supports(RV_C));
1794 ASSERT(imm != 0);
1795 ASSERT(rd == rs1);
1796 Emit16(C_ADDI | EncodeCRd(rd) | EncodeCIImm(imm));
1797}
1798
1799#if XLEN >= 64
1800void MicroAssembler::c_addiw(Register rd, Register rs1, intptr_t imm) {
1801 ASSERT(Supports(RV_C));
1802 ASSERT(rd == rs1);
1803 Emit16(C_ADDIW | EncodeCRd(rd) | EncodeCIImm(imm));
1804}
1805#endif
1806void MicroAssembler::c_addi16sp(Register rd, Register rs1, intptr_t imm) {
1807 ASSERT(Supports(RV_C));
1808 ASSERT(rd == rs1);
1809 Emit16(C_ADDI16SP | EncodeCRd(rd) | EncodeCI16Imm(imm));
1810}
1811
1812void MicroAssembler::c_addi4spn(Register rdp, Register rs1, intptr_t imm) {
1813 ASSERT(Supports(RV_C));
1814 ASSERT(rs1 == SP);
1815 ASSERT(imm != 0);
1816 Emit16(C_ADDI4SPN | EncodeCRdp(rdp) | EncodeCI4SPNImm(imm));
1817}
1818
1819void MicroAssembler::c_slli(Register rd, Register rs1, intptr_t imm) {
1820 ASSERT(Supports(RV_C));
1821 ASSERT(rd == rs1);
1822 ASSERT(imm != 0);
1823 Emit16(C_SLLI | EncodeCRd(rd) | EncodeCIImm(imm));
1824}
1825
1826void MicroAssembler::c_srli(Register rd, Register rs1, intptr_t imm) {
1827 ASSERT(Supports(RV_C));
1828 ASSERT(rd == rs1);
1829 ASSERT(imm != 0);
1830 Emit16(C_SRLI | EncodeCRs1p(rd) | EncodeCIImm(imm));
1831}
1832
1833void MicroAssembler::c_srai(Register rd, Register rs1, intptr_t imm) {
1834 ASSERT(Supports(RV_C));
1835 ASSERT(rd == rs1);
1836 ASSERT(imm != 0);
1837 Emit16(C_SRAI | EncodeCRs1p(rd) | EncodeCIImm(imm));
1838}
1839
1840void MicroAssembler::c_andi(Register rd, Register rs1, intptr_t imm) {
1841 ASSERT(Supports(RV_C));
1842 ASSERT(rd == rs1);
1843 Emit16(C_ANDI | EncodeCRs1p(rd) | EncodeCIImm(imm));
1844}
1845
1846void MicroAssembler::c_mv(Register rd, Register rs2) {
1847 ASSERT(Supports(RV_C));
1848 ASSERT(rd != ZR);
1849 ASSERT(rs2 != ZR);
1850 Emit16(C_MV | EncodeCRd(rd) | EncodeCRs2(rs2));
1851}
1852
1853void MicroAssembler::c_add(Register rd, Register rs1, Register rs2) {
1854 ASSERT(Supports(RV_C));
1855 ASSERT(rd != ZR);
1856 ASSERT(rd == rs1);
1857 ASSERT(rs2 != ZR);
1858 Emit16(C_ADD | EncodeCRd(rd) | EncodeCRs2(rs2));
1859}
1860
1861void MicroAssembler::c_and(Register rd, Register rs1, Register rs2) {
1862 ASSERT(Supports(RV_C));
1863 ASSERT(rd == rs1);
1864 Emit16(C_AND | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1865}
1866
1867void MicroAssembler::c_or(Register rd, Register rs1, Register rs2) {
1868 ASSERT(Supports(RV_C));
1869 Emit16(C_OR | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1870}
1871
1872void MicroAssembler::c_xor(Register rd, Register rs1, Register rs2) {
1873 ASSERT(Supports(RV_C));
1874 Emit16(C_XOR | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1875}
1876
1877void MicroAssembler::c_sub(Register rd, Register rs1, Register rs2) {
1878 ASSERT(Supports(RV_C));
1879 Emit16(C_SUB | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1880}
1881
1882#if XLEN >= 64
1883void MicroAssembler::c_addw(Register rd, Register rs1, Register rs2) {
1884 ASSERT(Supports(RV_C));
1885 Emit16(C_ADDW | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1886}
1887
1888void MicroAssembler::c_subw(Register rd, Register rs1, Register rs2) {
1889 ASSERT(Supports(RV_C));
1890 Emit16(C_SUBW | EncodeCRs1p(rs1) | EncodeCRs2p(rs2));
1891}
1892#endif // XLEN >= 64
1893
1894void MicroAssembler::c_nop() {
1895 ASSERT(Supports(RV_C));
1896 Emit16(C_NOP);
1897}
1898
1899void MicroAssembler::c_ebreak() {
1900 ASSERT(Supports(RV_C));
1901 Emit16(C_EBREAK);
1902}
1903
1904static Funct3 InvertFunct3(Funct3 func) {
1905 switch (func) {
1906 case BEQ:
1907 return BNE;
1908 case BNE:
1909 return BEQ;
1910 case BGE:
1911 return BLT;
1912 case BGEU:
1913 return BLTU;
1914 case BLT:
1915 return BGE;
1916 case BLTU:
1917 return BGEU;
1918 default:
1919 UNREACHABLE();
1920 }
1921}
1922
1923void MicroAssembler::EmitBranch(Register rs1,
1924 Register rs2,
1925 Label* label,
1926 Funct3 func,
1927 JumpDistance distance) {
1928 intptr_t offset;
1929 if (label->IsBound()) {
1930 // Backward branch: use near or far branch based on actual distance.
1931 offset = label->Position() - Position();
1932 if (IsBTypeImm(offset)) {
1933 EmitBType(offset, rs2, rs1, func, BRANCH);
1934 return;
1935 }
1936
1937 if (IsJTypeImm(offset + 4)) {
1938 intptr_t start = Position();
1939 const intptr_t kFarBranchLength = 8;
1940 EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func), BRANCH);
1941 offset = label->Position() - Position();
1942 EmitJType(offset, ZR, JAL);
1943 intptr_t end = Position();
1944 ASSERT_EQUAL(end - start, kFarBranchLength);
1945 return;
1946 }
1947
1948 intptr_t start = Position();
1949 const intptr_t kFarBranchLength = 12;
1950 EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func), BRANCH);
1951 offset = label->Position() - Position();
1952 intx_t lo = ImmLo(offset);
1953 intx_t hi = ImmHi(offset);
1954 if (!IsUTypeImm(hi)) {
1955 FATAL("Branch distance exceeds 2GB!");
1956 }
1957 EmitUType(hi, FAR_TMP, AUIPC);
1958 EmitIType(lo, FAR_TMP, F3_0, ZR, JALR);
1959 intptr_t end = Position();
1960 ASSERT_EQUAL(end - start, kFarBranchLength);
1961 return;
1962 } else {
1963 // Forward branch: speculatively use near branches and re-assemble with far
1964 // branches if any need greater length.
1965 if (distance == kNearJump) {
1966 offset = label->link_b(Position());
1967 if (!IsBTypeImm(offset)) {
1968 FATAL("Incorrect Assembler::kNearJump");
1969 }
1970 EmitBType(offset, rs2, rs1, func, BRANCH);
1971 } else if (far_branch_level() == 0) {
1972 offset = label->link_b(Position());
1973 if (!IsBTypeImm(offset)) {
1974 // TODO(riscv): This isn't so much because the branch is out of range
1975 // as some previous jump to the same target would be out of B-type
1976 // range... A possible alternative is to have separate lists on Labels
1977 // for pending B-type and J-type instructions.
1979 }
1980 EmitBType(offset, rs2, rs1, func, BRANCH);
1981 } else if (far_branch_level() == 1) {
1982 intptr_t start = Position();
1983 const intptr_t kFarBranchLength = 8;
1984 EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func), BRANCH);
1985 offset = label->link_j(Position());
1986 EmitJType(offset, ZR, JAL);
1987 intptr_t end = Position();
1988 ASSERT_EQUAL(end - start, kFarBranchLength);
1989 } else {
1990 intptr_t start = Position();
1991 const intptr_t kFarBranchLength = 12;
1992 EmitBType(kFarBranchLength, rs2, rs1, InvertFunct3(func), BRANCH);
1993 offset = label->link_far(Position());
1994 intx_t lo = ImmLo(offset);
1995 intx_t hi = ImmHi(offset);
1996 if (!IsUTypeImm(hi)) {
1997 FATAL("Branch distance exceeds 2GB!");
1998 }
1999 EmitUType(hi, FAR_TMP, AUIPC);
2000 EmitIType(lo, FAR_TMP, F3_0, ZR, JALR);
2001 intptr_t end = Position();
2002 ASSERT_EQUAL(end - start, kFarBranchLength);
2003 }
2004 }
2005}
2006
2007void MicroAssembler::EmitJump(Register rd,
2008 Label* label,
2009 Opcode op,
2010 JumpDistance distance) {
2011 intptr_t offset;
2012 if (label->IsBound()) {
2013 // Backward jump: use near or far jump based on actual distance.
2014 offset = label->Position() - Position();
2015
2016 if (IsJTypeImm(offset)) {
2017 EmitJType(offset, rd, JAL);
2018 return;
2019 }
2020 intx_t lo = ImmLo(offset);
2021 intx_t hi = ImmHi(offset);
2022 if (!IsUTypeImm(hi)) {
2023 FATAL("Jump distance exceeds 2GB!");
2024 }
2025 EmitUType(hi, FAR_TMP, AUIPC);
2026 EmitIType(lo, FAR_TMP, F3_0, ZR, JALR);
2027 return;
2028 } else {
2029 // Forward jump: speculatively use near jumps and re-assemble with far
2030 // jumps if any need greater length.
2031 if (distance == kNearJump) {
2032 offset = label->link_j(Position());
2033 if (!IsJTypeImm(offset)) {
2034 FATAL("Incorrect Assembler::kNearJump");
2035 }
2036 EmitJType(offset, rd, JAL);
2037 } else if (far_branch_level() < 2) {
2038 offset = label->link_j(Position());
2039 if (!IsJTypeImm(offset)) {
2041 }
2042 EmitJType(offset, rd, JAL);
2043 } else {
2044 offset = label->link_far(Position());
2045 intx_t lo = ImmLo(offset);
2046 intx_t hi = ImmHi(offset);
2047 if (!IsUTypeImm(hi)) {
2048 FATAL("Jump distance exceeds 2GB!");
2049 }
2050 EmitUType(hi, FAR_TMP, AUIPC);
2051 EmitIType(lo, FAR_TMP, F3_0, ZR, JALR);
2052 }
2053 }
2054}
2055
2056void MicroAssembler::EmitCBranch(Register rs1p, Label* label, COpcode op) {
2057 intptr_t offset;
2058 if (label->IsBound()) {
2059 offset = label->Position() - Position();
2060 } else {
2061 offset = label->link_cb(Position());
2062 }
2063 if (!IsCBImm(offset)) {
2064 FATAL("Incorrect Assembler::kNearJump");
2065 }
2066 Emit16(op | EncodeCRs1p(rs1p) | EncodeCBImm(offset));
2067}
2068
2069void MicroAssembler::EmitCJump(Label* label, COpcode op) {
2070 intptr_t offset;
2071 if (label->IsBound()) {
2072 offset = label->Position() - Position();
2073 } else {
2074 offset = label->link_cj(Position());
2075 }
2076 if (!IsCJImm(offset)) {
2077 FATAL("Incorrect Assembler::kNearJump");
2078 }
2079 Emit16(op | EncodeCJImm(offset));
2080}
2081
2082void MicroAssembler::EmitRType(Funct5 funct5,
2083 std::memory_order order,
2084 Register rs2,
2085 Register rs1,
2086 Funct3 funct3,
2087 Register rd,
2088 Opcode opcode) {
2089 intptr_t funct7 = funct5 << 2;
2090 switch (order) {
2091 case std::memory_order_acq_rel:
2092 funct7 |= 0b11;
2093 break;
2094 case std::memory_order_acquire:
2095 funct7 |= 0b10;
2096 break;
2097 case std::memory_order_release:
2098 funct7 |= 0b01;
2099 break;
2100 case std::memory_order_relaxed:
2101 funct7 |= 0b00;
2102 break;
2103 default:
2104 FATAL("Invalid memory order");
2105 }
2106 EmitRType((Funct7)funct7, rs2, rs1, funct3, rd, opcode);
2107}
2108
2109void MicroAssembler::EmitRType(Funct7 funct7,
2110 Register rs2,
2111 Register rs1,
2112 Funct3 funct3,
2113 Register rd,
2114 Opcode opcode) {
2115 uint32_t e = 0;
2116 e |= EncodeFunct7(funct7);
2117 e |= EncodeRs2(rs2);
2118 e |= EncodeRs1(rs1);
2119 e |= EncodeFunct3(funct3);
2120 e |= EncodeRd(rd);
2121 e |= EncodeOpcode(opcode);
2122 Emit32(e);
2123}
2124
2125void MicroAssembler::EmitRType(Funct7 funct7,
2126 FRegister rs2,
2127 FRegister rs1,
2128 Funct3 funct3,
2129 FRegister rd,
2130 Opcode opcode) {
2131 uint32_t e = 0;
2132 e |= EncodeFunct7(funct7);
2133 e |= EncodeFRs2(rs2);
2134 e |= EncodeFRs1(rs1);
2135 e |= EncodeFunct3(funct3);
2136 e |= EncodeFRd(rd);
2137 e |= EncodeOpcode(opcode);
2138 Emit32(e);
2139}
2140
2141void MicroAssembler::EmitRType(Funct7 funct7,
2142 FRegister rs2,
2143 FRegister rs1,
2144 RoundingMode round,
2145 FRegister rd,
2146 Opcode opcode) {
2147 uint32_t e = 0;
2148 e |= EncodeFunct7(funct7);
2149 e |= EncodeFRs2(rs2);
2150 e |= EncodeFRs1(rs1);
2151 e |= EncodeRoundingMode(round);
2152 e |= EncodeFRd(rd);
2153 e |= EncodeOpcode(opcode);
2154 Emit32(e);
2155}
2156
2157void MicroAssembler::EmitRType(Funct7 funct7,
2158 FRegister rs2,
2159 Register rs1,
2160 RoundingMode round,
2161 FRegister rd,
2162 Opcode opcode) {
2163 uint32_t e = 0;
2164 e |= EncodeFunct7(funct7);
2165 e |= EncodeFRs2(rs2);
2166 e |= EncodeRs1(rs1);
2167 e |= EncodeRoundingMode(round);
2168 e |= EncodeFRd(rd);
2169 e |= EncodeOpcode(opcode);
2170 Emit32(e);
2171}
2172
2173void MicroAssembler::EmitRType(Funct7 funct7,
2174 FRegister rs2,
2175 Register rs1,
2176 Funct3 funct3,
2177 FRegister rd,
2178 Opcode opcode) {
2179 uint32_t e = 0;
2180 e |= EncodeFunct7(funct7);
2181 e |= EncodeFRs2(rs2);
2182 e |= EncodeRs1(rs1);
2183 e |= EncodeFunct3(funct3);
2184 e |= EncodeFRd(rd);
2185 e |= EncodeOpcode(opcode);
2186 Emit32(e);
2187}
2188
2189void MicroAssembler::EmitRType(Funct7 funct7,
2190 FRegister rs2,
2191 FRegister rs1,
2192 Funct3 funct3,
2193 Register rd,
2194 Opcode opcode) {
2195 uint32_t e = 0;
2196 e |= EncodeFunct7(funct7);
2197 e |= EncodeFRs2(rs2);
2198 e |= EncodeFRs1(rs1);
2199 e |= EncodeFunct3(funct3);
2200 e |= EncodeRd(rd);
2201 e |= EncodeOpcode(opcode);
2202 Emit32(e);
2203}
2204
2205void MicroAssembler::EmitRType(Funct7 funct7,
2206 FRegister rs2,
2207 FRegister rs1,
2208 RoundingMode round,
2209 Register rd,
2210 Opcode opcode) {
2211 uint32_t e = 0;
2212 e |= EncodeFunct7(funct7);
2213 e |= EncodeFRs2(rs2);
2214 e |= EncodeFRs1(rs1);
2215 e |= EncodeRoundingMode(round);
2216 e |= EncodeRd(rd);
2217 e |= EncodeOpcode(opcode);
2218 Emit32(e);
2219}
2220
2221void MicroAssembler::EmitRType(Funct7 funct7,
2222 intptr_t shamt,
2223 Register rs1,
2224 Funct3 funct3,
2225 Register rd,
2226 Opcode opcode) {
2227 uint32_t e = 0;
2228 e |= EncodeFunct7(funct7);
2229 e |= EncodeShamt(shamt);
2230 e |= EncodeRs1(rs1);
2231 e |= EncodeFunct3(funct3);
2232 e |= EncodeRd(rd);
2233 e |= EncodeOpcode(opcode);
2234 Emit32(e);
2235}
2236
2237void MicroAssembler::EmitR4Type(FRegister rs3,
2238 Funct2 funct2,
2239 FRegister rs2,
2240 FRegister rs1,
2241 RoundingMode round,
2242 FRegister rd,
2243 Opcode opcode) {
2244 uint32_t e = 0;
2245 e |= EncodeFRs3(rs3);
2246 e |= EncodeFunct2(funct2);
2247 e |= EncodeFRs2(rs2);
2248 e |= EncodeFRs1(rs1);
2249 e |= EncodeRoundingMode(round);
2250 e |= EncodeFRd(rd);
2251 e |= EncodeOpcode(opcode);
2252 Emit32(e);
2253}
2254
2255void MicroAssembler::EmitIType(intptr_t imm,
2256 Register rs1,
2257 Funct3 funct3,
2258 Register rd,
2259 Opcode opcode) {
2260 uint32_t e = 0;
2261 e |= EncodeITypeImm(imm);
2262 e |= EncodeRs1(rs1);
2263 e |= EncodeFunct3(funct3);
2264 e |= EncodeRd(rd);
2265 e |= EncodeOpcode(opcode);
2266 Emit32(e);
2267}
2268
2269void MicroAssembler::EmitIType(intptr_t imm,
2270 Register rs1,
2271 Funct3 funct3,
2272 FRegister rd,
2273 Opcode opcode) {
2274 uint32_t e = 0;
2275 e |= EncodeITypeImm(imm);
2276 e |= EncodeRs1(rs1);
2277 e |= EncodeFunct3(funct3);
2278 e |= EncodeFRd(rd);
2279 e |= EncodeOpcode(opcode);
2280 Emit32(e);
2281}
2282
2283void MicroAssembler::EmitSType(intptr_t imm,
2284 Register rs2,
2285 Register rs1,
2286 Funct3 funct3,
2287 Opcode opcode) {
2288 uint32_t e = 0;
2289 e |= EncodeSTypeImm(imm);
2290 e |= EncodeRs2(rs2);
2291 e |= EncodeRs1(rs1);
2292 e |= EncodeFunct3(funct3);
2293 e |= EncodeOpcode(opcode);
2294 Emit32(e);
2295}
2296
2297void MicroAssembler::EmitSType(intptr_t imm,
2298 FRegister rs2,
2299 Register rs1,
2300 Funct3 funct3,
2301 Opcode opcode) {
2302 uint32_t e = 0;
2303 e |= EncodeSTypeImm(imm);
2304 e |= EncodeFRs2(rs2);
2305 e |= EncodeRs1(rs1);
2306 e |= EncodeFunct3(funct3);
2307 e |= EncodeOpcode(opcode);
2308 Emit32(e);
2309}
2310
2311void MicroAssembler::EmitBType(intptr_t imm,
2312 Register rs2,
2313 Register rs1,
2314 Funct3 funct3,
2315 Opcode opcode) {
2316 uint32_t e = 0;
2317 e |= EncodeBTypeImm(imm);
2318 e |= EncodeRs2(rs2);
2319 e |= EncodeRs1(rs1);
2320 e |= EncodeFunct3(funct3);
2321 e |= EncodeOpcode(opcode);
2322 Emit32(e);
2323}
2324
2325void MicroAssembler::EmitUType(intptr_t imm, Register rd, Opcode opcode) {
2326 uint32_t e = 0;
2327 e |= EncodeUTypeImm(imm);
2328 e |= EncodeRd(rd);
2329 e |= EncodeOpcode(opcode);
2330 Emit32(e);
2331}
2332
2333void MicroAssembler::EmitJType(intptr_t imm, Register rd, Opcode opcode) {
2334 uint32_t e = 0;
2335 e |= EncodeJTypeImm(imm);
2336 e |= EncodeRd(rd);
2337 e |= EncodeOpcode(opcode);
2338 Emit32(e);
2339}
2340
2341Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
2342 intptr_t far_branch_level)
2343 : MicroAssembler(object_pool_builder,
2344 far_branch_level,
2345 FLAG_use_compressed_instructions ? RV_GC : RV_G),
2346 constant_pool_allowed_(false) {
2347 generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
2348 // Note this does not destroy RA.
2349 lx(TMP,
2350 Address(THR, target::Thread::write_barrier_wrappers_thread_offset(reg)));
2351 jalr(TMP, TMP);
2352 };
2353 generate_invoke_array_write_barrier_ = [&]() {
2354 Call(
2355 Address(THR, target::Thread::array_write_barrier_entry_point_offset()));
2356 };
2357}
2358
2359void Assembler::PushRegister(Register r) {
2360 ASSERT(r != SP);
2361 subi(SP, SP, target::kWordSize);
2362 sx(r, Address(SP, 0));
2363}
2364void Assembler::PopRegister(Register r) {
2365 ASSERT(r != SP);
2366 lx(r, Address(SP, 0));
2367 addi(SP, SP, target::kWordSize);
2368}
2369
2370void Assembler::PushRegisterPair(Register r0, Register r1) {
2371 ASSERT(r0 != SP);
2372 ASSERT(r1 != SP);
2373 subi(SP, SP, 2 * target::kWordSize);
2374 sx(r1, Address(SP, target::kWordSize));
2375 sx(r0, Address(SP, 0));
2376}
2377
2378void Assembler::PopRegisterPair(Register r0, Register r1) {
2379 ASSERT(r0 != SP);
2380 ASSERT(r1 != SP);
2381 lx(r1, Address(SP, target::kWordSize));
2382 lx(r0, Address(SP, 0));
2383 addi(SP, SP, 2 * target::kWordSize);
2384}
2385
2386void Assembler::PushRegisters(const RegisterSet& regs) {
2387 // The order in which the registers are pushed must match the order
2388 // in which the registers are encoded in the safepoint's stack map.
2389
2390 intptr_t size = (regs.CpuRegisterCount() * target::kWordSize) +
2391 (regs.FpuRegisterCount() * kFpuRegisterSize);
2392 if (size == 0) {
2393 return; // Skip no-op SP update.
2394 }
2395
2396 subi(SP, SP, size);
2397 intptr_t offset = size;
2398 for (intptr_t i = kNumberOfFpuRegisters - 1; i >= 0; i--) {
2399 FRegister reg = static_cast<FRegister>(i);
2400 if (regs.ContainsFpuRegister(reg)) {
2402 fsd(reg, Address(SP, offset));
2403 }
2404 }
2405 for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; i--) {
2406 Register reg = static_cast<Register>(i);
2407 if (regs.ContainsRegister(reg)) {
2408 offset -= target::kWordSize;
2409 sx(reg, Address(SP, offset));
2410 }
2411 }
2412 ASSERT(offset == 0);
2413}
2414
2415void Assembler::PopRegisters(const RegisterSet& regs) {
2416 // The order in which the registers are pushed must match the order
2417 // in which the registers are encoded in the safepoint's stack map.
2418
2419 intptr_t size = (regs.CpuRegisterCount() * target::kWordSize) +
2420 (regs.FpuRegisterCount() * kFpuRegisterSize);
2421 if (size == 0) {
2422 return; // Skip no-op SP update.
2423 }
2424 intptr_t offset = 0;
2425 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
2426 Register reg = static_cast<Register>(i);
2427 if (regs.ContainsRegister(reg)) {
2428 lx(reg, Address(SP, offset));
2429 offset += target::kWordSize;
2430 }
2431 }
2432 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
2433 FRegister reg = static_cast<FRegister>(i);
2434 if (regs.ContainsFpuRegister(reg)) {
2435 fld(reg, Address(SP, offset));
2437 }
2438 }
2439 ASSERT(offset == size);
2440 addi(SP, SP, size);
2441}
2442
2443void Assembler::PushRegistersInOrder(std::initializer_list<Register> regs) {
2444 intptr_t offset = regs.size() * target::kWordSize;
2445 subi(SP, SP, offset);
2446 for (Register reg : regs) {
2447 ASSERT(reg != SP);
2448 offset -= target::kWordSize;
2449 sx(reg, Address(SP, offset));
2450 }
2451}
2452
2453void Assembler::PushNativeCalleeSavedRegisters() {
2454 RegisterSet regs(kAbiPreservedCpuRegs, kAbiPreservedFpuRegs);
2455 intptr_t size = (regs.CpuRegisterCount() * target::kWordSize) +
2456 (regs.FpuRegisterCount() * sizeof(double));
2457 subi(SP, SP, size);
2458 intptr_t offset = 0;
2459 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
2460 FRegister reg = static_cast<FRegister>(i);
2461 if (regs.ContainsFpuRegister(reg)) {
2462 fsd(reg, Address(SP, offset));
2463 offset += sizeof(double);
2464 }
2465 }
2466 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
2467 Register reg = static_cast<Register>(i);
2468 if (regs.ContainsRegister(reg)) {
2469 sx(reg, Address(SP, offset));
2470 offset += target::kWordSize;
2471 }
2472 }
2473 ASSERT(offset == size);
2474}
2475
2476void Assembler::PopNativeCalleeSavedRegisters() {
2477 RegisterSet regs(kAbiPreservedCpuRegs, kAbiPreservedFpuRegs);
2478 intptr_t size = (regs.CpuRegisterCount() * target::kWordSize) +
2479 (regs.FpuRegisterCount() * sizeof(double));
2480 intptr_t offset = 0;
2481 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
2482 FRegister reg = static_cast<FRegister>(i);
2483 if (regs.ContainsFpuRegister(reg)) {
2484 fld(reg, Address(SP, offset));
2485 offset += sizeof(double);
2486 }
2487 }
2488 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
2489 Register reg = static_cast<Register>(i);
2490 if (regs.ContainsRegister(reg)) {
2491 lx(reg, Address(SP, offset));
2492 offset += target::kWordSize;
2493 }
2494 }
2495 ASSERT(offset == size);
2496 addi(SP, SP, size);
2497}
2498
2499void Assembler::ExtendValue(Register rd, Register rn, OperandSize sz) {
2500 switch (sz) {
2501#if XLEN == 64
2502 case kEightBytes:
2503 if (rd == rn) return; // No operation needed.
2504 return mv(rd, rn);
2505 case kUnsignedFourBytes:
2506 return UNIMPLEMENTED();
2507 case kFourBytes:
2508 return sextw(rd, rn);
2509#elif XLEN == 32
2510 case kUnsignedFourBytes:
2511 case kFourBytes:
2512 if (rd == rn) return; // No operation needed.
2513 return mv(rd, rn);
2514#endif
2515 case kUnsignedTwoBytes:
2516 case kTwoBytes:
2517 case kUnsignedByte:
2518 case kByte:
2519 default:
2520 UNIMPLEMENTED();
2521 break;
2522 }
2523 UNIMPLEMENTED();
2524}
2525void Assembler::ExtendAndSmiTagValue(Register rd, Register rn, OperandSize sz) {
2526 if (sz == kWordBytes) {
2527 SmiTag(rd, rn);
2528 return;
2529 }
2530
2531 switch (sz) {
2532#if XLEN == 64
2533 case kUnsignedFourBytes:
2534 slli(rd, rn, XLEN - kBitsPerInt32);
2535 srli(rd, rd, XLEN - kBitsPerInt32 - kSmiTagShift);
2536 return;
2537 case kFourBytes:
2538 slli(rd, rn, XLEN - kBitsPerInt32);
2539 srai(rd, rd, XLEN - kBitsPerInt32 - kSmiTagShift);
2540 return;
2541#endif
2542 case kUnsignedTwoBytes:
2543 slli(rd, rn, XLEN - kBitsPerInt16);
2544 srli(rd, rd, XLEN - kBitsPerInt16 - kSmiTagShift);
2545 return;
2546 case kTwoBytes:
2547 slli(rd, rn, XLEN - kBitsPerInt16);
2548 srai(rd, rd, XLEN - kBitsPerInt16 - kSmiTagShift);
2549 return;
2550 case kUnsignedByte:
2551 slli(rd, rn, XLEN - kBitsPerInt8);
2552 srli(rd, rd, XLEN - kBitsPerInt8 - kSmiTagShift);
2553 return;
2554 case kByte:
2555 slli(rd, rn, XLEN - kBitsPerInt8);
2556 srai(rd, rd, XLEN - kBitsPerInt8 - kSmiTagShift);
2557 return;
2558 default:
2559 UNIMPLEMENTED();
2560 break;
2561 }
2562}
2563
2564// Unconditional jump to a given address in memory. Clobbers TMP.
2565void Assembler::Jump(const Address& address) {
2566 lx(TMP2, address);
2567 jr(TMP2);
2568}
2569
2570#if defined(TARGET_USES_THREAD_SANITIZER)
2571void Assembler::TsanLoadAcquire(Register addr) {
2572 LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
2573 MoveRegister(A0, addr);
2574 rt.Call(kTsanLoadAcquireRuntimeEntry, /*argument_count=*/1);
2575}
2576void Assembler::TsanStoreRelease(Register addr) {
2577 LeafRuntimeScope rt(this, /*frame_size=*/0, /*preserve_registers=*/true);
2578 MoveRegister(A0, addr);
2579 rt.Call(kTsanStoreReleaseRuntimeEntry, /*argument_count=*/1);
2580}
2581#endif
2582
2583void Assembler::LoadAcquire(Register dst,
2584 const Address& address,
2585 OperandSize size) {
2586 ASSERT(dst != address.base());
2587 Load(dst, address, size);
2588 fence(HartEffects::kRead, HartEffects::kMemory);
2589
2590#if defined(TARGET_USES_THREAD_SANITIZER)
2591 if (address.offset() == 0) {
2592 TsanLoadAcquire(address.base());
2593 } else {
2594 AddImmediate(TMP2, address.base(), address.offset());
2595 TsanLoadAcquire(TMP2);
2596 }
2597#endif
2598}
2599
2600void Assembler::StoreRelease(Register src,
2601 const Address& address,
2602 OperandSize size) {
2603 fence(HartEffects::kMemory, HartEffects::kWrite);
2604 Store(src, address, size);
2605}
2606
2607void Assembler::CompareWithMemoryValue(Register value,
2608 Address address,
2609 OperandSize size) {
2610#if XLEN >= 64
2611 ASSERT(size == kEightBytes || size == kFourBytes);
2612 if (size == kFourBytes) {
2613 lw(TMP2, address);
2614 } else {
2615 ld(TMP2, address);
2616 }
2617#else
2618 ASSERT_EQUAL(size, kFourBytes);
2619 lx(TMP2, address);
2620#endif
2621 CompareRegisters(value, TMP2);
2622}
2623
2624void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
2625 if (frame_space != 0) {
2626 addi(SP, SP, -frame_space);
2627 }
2628 const intptr_t kAbiStackAlignment = 16; // For both 32 and 64 bit.
2629 andi(SP, SP, ~(kAbiStackAlignment - 1));
2630}
2631
2632// In debug mode, this generates code to check that:
2633// FP + kExitLinkSlotFromEntryFp == SP
2634// or triggers breakpoint otherwise.
2635void Assembler::EmitEntryFrameVerification() {
2636#if defined(DEBUG)
2637 Label done;
2638 ASSERT(!constant_pool_allowed());
2639 LoadImmediate(TMP, target::frame_layout.exit_link_slot_from_entry_fp *
2640 target::kWordSize);
2641 add(TMP, TMP, FPREG);
2642 beq(TMP, SPREG, &done, kNearJump);
2643
2644 Breakpoint();
2645
2646 Bind(&done);
2647#endif
2648}
2649
2650void Assembler::CompareRegisters(Register rn, Register rm) {
2651 ASSERT(deferred_compare_ == kNone);
2652 deferred_compare_ = kCompareReg;
2653 deferred_left_ = rn;
2654 deferred_reg_ = rm;
2655}
2656void Assembler::CompareObjectRegisters(Register rn, Register rm) {
2657 CompareRegisters(rn, rm);
2658}
2659void Assembler::TestRegisters(Register rn, Register rm) {
2660 ASSERT(deferred_compare_ == kNone);
2661 deferred_compare_ = kTestReg;
2662 deferred_left_ = rn;
2663 deferred_reg_ = rm;
2664}
2665
2666void Assembler::BranchIf(Condition condition,
2667 Label* label,
2668 JumpDistance distance) {
2669 ASSERT(deferred_compare_ != kNone);
2670
2671 if (deferred_compare_ == kCompareImm || deferred_compare_ == kCompareReg) {
2672 Register left = deferred_left_;
2674 if (deferred_compare_ == kCompareImm) {
2675 if (deferred_imm_ == 0) {
2676 right = ZR;
2677 } else {
2678 LoadImmediate(TMP2, deferred_imm_);
2679 right = TMP2;
2680 }
2681 } else {
2682 right = deferred_reg_;
2683 }
2684 switch (condition) {
2685 case EQUAL:
2686 beq(left, right, label, distance);
2687 break;
2688 case NOT_EQUAL:
2689 bne(left, right, label, distance);
2690 break;
2691 case LESS:
2692 blt(left, right, label, distance);
2693 break;
2694 case LESS_EQUAL:
2695 ble(left, right, label, distance);
2696 break;
2697 case GREATER_EQUAL:
2698 bge(left, right, label, distance);
2699 break;
2700 case GREATER:
2701 bgt(left, right, label, distance);
2702 break;
2703 case UNSIGNED_LESS:
2704 bltu(left, right, label, distance);
2705 break;
2707 bleu(left, right, label, distance);
2708 break;
2710 bgeu(left, right, label, distance);
2711 break;
2712 case UNSIGNED_GREATER:
2713 bgtu(left, right, label, distance);
2714 break;
2715 case OVERFLOW:
2716 case NO_OVERFLOW:
2717 FATAL("Use Add/Subtract/MultiplyBranchOverflow instead.");
2718 default:
2719 UNREACHABLE();
2720 }
2721 } else if (deferred_compare_ == kTestImm || deferred_compare_ == kTestReg) {
2722 if (deferred_compare_ == kTestImm) {
2723 AndImmediate(TMP2, deferred_left_, deferred_imm_);
2724 } else {
2725 and_(TMP2, deferred_left_, deferred_reg_);
2726 }
2727 switch (condition) {
2728 case ZERO:
2729 beqz(TMP2, label, distance);
2730 break;
2731 case NOT_ZERO:
2732 bnez(TMP2, label, distance);
2733 break;
2734 default:
2735 UNREACHABLE();
2736 }
2737 } else {
2738 UNREACHABLE();
2739 }
2740 deferred_compare_ = kNone; // Consumed.
2741}
2742
2743void Assembler::SetIf(Condition condition, Register rd) {
2744 ASSERT(deferred_compare_ != kNone);
2745
2746 if (deferred_compare_ == kCompareImm) {
2747 if (deferred_imm_ == 0) {
2748 deferred_compare_ = kCompareReg;
2749 deferred_reg_ = ZR;
2750 SetIf(condition, rd);
2751 return;
2752 }
2753 if (!IsITypeImm(deferred_imm_) || !IsITypeImm(deferred_imm_ + 1)) {
2754 LoadImmediate(TMP2, deferred_imm_);
2755 deferred_compare_ = kCompareReg;
2756 deferred_reg_ = TMP2;
2757 SetIf(condition, rd);
2758 return;
2759 }
2760 Register left = deferred_left_;
2761 intx_t right = deferred_imm_;
2762 switch (condition) {
2763 case EQUAL:
2764 subi(rd, left, right);
2765 seqz(rd, rd);
2766 break;
2767 case NOT_EQUAL:
2768 subi(rd, left, right);
2769 snez(rd, rd);
2770 break;
2771 case LESS:
2772 slti(rd, left, right);
2773 break;
2774 case LESS_EQUAL:
2775 slti(rd, left, right + 1);
2776 break;
2777 case GREATER_EQUAL:
2778 slti(rd, left, right);
2779 xori(rd, rd, 1);
2780 break;
2781 case GREATER:
2782 slti(rd, left, right + 1);
2783 xori(rd, rd, 1);
2784 break;
2785 case UNSIGNED_LESS:
2786 sltiu(rd, left, right);
2787 break;
2789 sltiu(rd, left, right + 1);
2790 break;
2792 sltiu(rd, left, right);
2793 xori(rd, rd, 1);
2794 break;
2795 case UNSIGNED_GREATER:
2796 sltiu(rd, left, right + 1);
2797 xori(rd, rd, 1);
2798 break;
2799 default:
2800 UNREACHABLE();
2801 }
2802 } else if (deferred_compare_ == kCompareReg) {
2803 Register left = deferred_left_;
2804 Register right = deferred_reg_;
2805 switch (condition) {
2806 case EQUAL:
2807 if (right == ZR) {
2808 seqz(rd, left);
2809 } else {
2810 xor_(rd, left, right);
2811 seqz(rd, rd);
2812 }
2813 break;
2814 case NOT_EQUAL:
2815 if (right == ZR) {
2816 snez(rd, left);
2817 } else {
2818 xor_(rd, left, right);
2819 snez(rd, rd);
2820 }
2821 break;
2822 case LESS:
2823 slt(rd, left, right);
2824 break;
2825 case LESS_EQUAL:
2826 slt(rd, right, left);
2827 xori(rd, rd, 1);
2828 break;
2829 case GREATER_EQUAL:
2830 slt(rd, left, right);
2831 xori(rd, rd, 1);
2832 break;
2833 case GREATER:
2834 slt(rd, right, left);
2835 break;
2836 case UNSIGNED_LESS:
2837 sltu(rd, left, right);
2838 break;
2840 sltu(rd, right, left);
2841 xori(rd, rd, 1);
2842 break;
2844 sltu(rd, left, right);
2845 xori(rd, rd, 1);
2846 break;
2847 case UNSIGNED_GREATER:
2848 sltu(rd, right, left);
2849 break;
2850 default:
2851 UNREACHABLE();
2852 }
2853 } else if (deferred_compare_ == kTestImm) {
2854 uintx_t uimm = deferred_imm_;
2855 if (deferred_imm_ == 1) {
2856 switch (condition) {
2857 case ZERO:
2858 andi(rd, deferred_left_, 1);
2859 xori(rd, rd, 1);
2860 break;
2861 case NOT_ZERO:
2862 andi(rd, deferred_left_, 1);
2863 break;
2864 default:
2865 UNREACHABLE();
2866 }
2867 } else if (Supports(RV_Zbs) && Utils::IsPowerOfTwo(uimm)) {
2868 switch (condition) {
2869 case ZERO:
2870 bexti(rd, deferred_left_, Utils::ShiftForPowerOfTwo(uimm));
2871 xori(rd, rd, 1);
2872 break;
2873 case NOT_ZERO:
2874 bexti(rd, deferred_left_, Utils::ShiftForPowerOfTwo(uimm));
2875 break;
2876 default:
2877 UNREACHABLE();
2878 }
2879 } else {
2880 AndImmediate(rd, deferred_left_, deferred_imm_);
2881 switch (condition) {
2882 case ZERO:
2883 seqz(rd, rd);
2884 break;
2885 case NOT_ZERO:
2886 snez(rd, rd);
2887 break;
2888 default:
2889 UNREACHABLE();
2890 }
2891 }
2892 } else if (deferred_compare_ == kTestReg) {
2893 and_(rd, deferred_left_, deferred_reg_);
2894 switch (condition) {
2895 case ZERO:
2896 seqz(rd, rd);
2897 break;
2898 case NOT_ZERO:
2899 snez(rd, rd);
2900 break;
2901 default:
2902 UNREACHABLE();
2903 }
2904 } else {
2905 UNREACHABLE();
2906 }
2907
2908 deferred_compare_ = kNone; // Consumed.
2909}
2910
2911void Assembler::BranchIfZero(Register rn, Label* label, JumpDistance distance) {
2912 beqz(rn, label, distance);
2913}
2914
2915void Assembler::BranchIfBit(Register rn,
2916 intptr_t bit_number,
2917 Condition condition,
2918 Label* label,
2919 JumpDistance distance) {
2920 ASSERT(rn != TMP2);
2921 andi(TMP2, rn, 1 << bit_number);
2922 if (condition == ZERO) {
2923 beqz(TMP2, label, distance);
2924 } else if (condition == NOT_ZERO) {
2925 bnez(TMP2, label, distance);
2926 } else {
2927 UNREACHABLE();
2928 }
2929}
2930
2931void Assembler::BranchIfNotSmi(Register reg,
2932 Label* label,
2933 JumpDistance distance) {
2934 ASSERT(reg != TMP2);
2935 andi(TMP2, reg, kSmiTagMask);
2936 bnez(TMP2, label, distance);
2937}
2938void Assembler::BranchIfSmi(Register reg, Label* label, JumpDistance distance) {
2939 ASSERT(reg != TMP2);
2940 andi(TMP2, reg, kSmiTagMask);
2941 beqz(TMP2, label, distance);
2942}
2943
2944void Assembler::ArithmeticShiftRightImmediate(Register reg, intptr_t shift) {
2945 srai(reg, reg, shift);
2946}
2947
2948void Assembler::CompareWords(Register reg1,
2949 Register reg2,
2950 intptr_t offset,
2951 Register count,
2952 Register temp,
2953 Label* equals) {
2954 Label loop;
2955 Bind(&loop);
2956 BranchIfZero(count, equals, Assembler::kNearJump);
2957 AddImmediate(count, -1);
2958 lx(temp, FieldAddress(reg1, offset));
2959 lx(TMP, FieldAddress(reg2, offset));
2960 addi(reg1, reg1, target::kWordSize);
2961 addi(reg2, reg2, target::kWordSize);
2962 beq(temp, TMP, &loop, Assembler::kNearJump);
2963}
2964
2965void Assembler::JumpAndLink(intptr_t target_code_pool_index,
2966 CodeEntryKind entry_kind) {
2967 // Avoid clobbering CODE_REG when invoking code in precompiled mode.
2968 // We don't actually use CODE_REG in the callee and caller might
2969 // be using CODE_REG for a live value (e.g. a value that is alive
2970 // across invocation of a shared stub like the one we use for
2971 // allocating Mint boxes).
2972 const Register code_reg = FLAG_precompiled_mode ? TMP : CODE_REG;
2973 LoadWordFromPoolIndex(code_reg, target_code_pool_index);
2974 Call(FieldAddress(code_reg, target::Code::entry_point_offset(entry_kind)));
2975}
2976
2977void Assembler::JumpAndLink(
2978 const Code& target,
2979 ObjectPoolBuilderEntry::Patchability patchable,
2980 CodeEntryKind entry_kind,
2981 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
2982 const intptr_t index = object_pool_builder().FindObject(
2983 ToObject(target), patchable, snapshot_behavior);
2984 JumpAndLink(index, entry_kind);
2985}
2986
2987void Assembler::JumpAndLinkWithEquivalence(const Code& target,
2988 const Object& equivalence,
2989 CodeEntryKind entry_kind) {
2990 const intptr_t index =
2991 object_pool_builder().FindObject(ToObject(target), equivalence);
2992 JumpAndLink(index, entry_kind);
2993}
2994
2995void Assembler::Call(Address target) {
2996 lx(RA, target);
2997 jalr(RA);
2998}
2999
3000void Assembler::Call(Register target) {
3001 jalr(target);
3002}
3003
3004void Assembler::AddShifted(Register dest,
3005 Register base,
3006 Register index,
3007 intx_t shift) {
3008 if (shift == 0) {
3009 add(dest, index, base);
3010 } else if (Supports(RV_Zba) && (shift == 1)) {
3011 sh1add(dest, index, base);
3012 } else if (Supports(RV_Zba) && (shift == 2)) {
3013 sh2add(dest, index, base);
3014 } else if (Supports(RV_Zba) && (shift == 3)) {
3015 sh3add(dest, index, base);
3016 } else if (shift < 0) {
3017 if (base != dest) {
3018 srai(dest, index, -shift);
3019 add(dest, dest, base);
3020 } else {
3021 srai(TMP2, index, -shift);
3022 add(dest, TMP2, base);
3023 }
3024 } else {
3025 if (base != dest) {
3026 slli(dest, index, shift);
3027 add(dest, dest, base);
3028 } else {
3029 slli(TMP2, index, shift);
3030 add(dest, TMP2, base);
3031 }
3032 }
3033}
3034
3035void Assembler::AddImmediate(Register rd,
3036 Register rs1,
3037 intx_t imm,
3038 OperandSize sz) {
3039 if ((imm == 0) && (rd == rs1)) {
3040 return;
3041 }
3042 if (IsITypeImm(imm)) {
3043 addi(rd, rs1, imm);
3044 } else {
3045 ASSERT(rs1 != TMP2);
3046 LoadImmediate(TMP2, imm);
3047 add(rd, rs1, TMP2);
3048 }
3049}
3050
3051void Assembler::MulImmediate(Register rd,
3052 Register rs1,
3053 intx_t imm,
3054 OperandSize sz) {
3055 if (Utils::IsPowerOfTwo(imm)) {
3056 const intx_t shift = Utils::ShiftForPowerOfTwo(imm);
3057#if XLEN >= 64
3058 ASSERT(sz == kFourBytes || sz == kEightBytes);
3059 if (sz == kFourBytes) {
3060 slliw(rd, rs1, shift);
3061 } else {
3062 slli(rd, rs1, shift);
3063 }
3064#else
3065 ASSERT(sz == kFourBytes);
3066 slli(rd, rs1, shift);
3067#endif
3068 } else {
3069 LoadImmediate(TMP, imm);
3070#if XLEN >= 64
3071 ASSERT(sz == kFourBytes || sz == kEightBytes);
3072 if (sz == kFourBytes) {
3073 mulw(rd, rs1, TMP);
3074 } else {
3075 mul(rd, rs1, TMP);
3076 }
3077#else
3078 ASSERT(sz == kFourBytes);
3079 mul(rd, rs1, TMP);
3080#endif
3081 }
3082}
3083
3084void Assembler::AndImmediate(Register rd,
3085 Register rs1,
3086 intx_t imm,
3087 OperandSize sz) {
3088 uintx_t uimm = imm;
3089 if (imm == -1) {
3090 MoveRegister(rd, rs1);
3091 } else if (IsITypeImm(imm)) {
3092 andi(rd, rs1, imm);
3093 } else if (Supports(RV_Zbs) && Utils::IsPowerOfTwo(~uimm)) {
3094 bclri(rd, rs1, Utils::ShiftForPowerOfTwo(~uimm));
3095 } else if (Utils::IsPowerOfTwo(uimm + 1)) {
3096 intptr_t shift = Utils::ShiftForPowerOfTwo(uimm + 1);
3097 if (Supports(RV_Zbb) && (shift == 16)) {
3098 zexth(rd, rs1);
3099 } else {
3100 slli(rd, rs1, XLEN - shift);
3101 srli(rd, rd, XLEN - shift);
3102 }
3103 } else {
3104 ASSERT(rs1 != TMP2);
3105 LoadImmediate(TMP2, imm);
3106 and_(rd, rs1, TMP2);
3107 }
3108}
3109void Assembler::OrImmediate(Register rd,
3110 Register rs1,
3111 intx_t imm,
3112 OperandSize sz) {
3113 uintx_t uimm = imm;
3114 if (imm == 0) {
3115 MoveRegister(rd, rs1);
3116 } else if (IsITypeImm(imm)) {
3117 ori(rd, rs1, imm);
3118 } else if (Supports(RV_Zbs) && Utils::IsPowerOfTwo(uimm)) {
3119 bseti(rd, rs1, Utils::ShiftForPowerOfTwo(uimm));
3120 } else {
3121 ASSERT(rs1 != TMP2);
3122 LoadImmediate(TMP2, imm);
3123 or_(rd, rs1, TMP2);
3124 }
3125}
3126void Assembler::XorImmediate(Register rd,
3127 Register rs1,
3128 intx_t imm,
3129 OperandSize sz) {
3130 uintx_t uimm = imm;
3131 if (imm == 0) {
3132 MoveRegister(rd, rs1);
3133 } else if (IsITypeImm(imm)) {
3134 xori(rd, rs1, imm);
3135 } else if (Supports(RV_Zbs) && Utils::IsPowerOfTwo(uimm)) {
3136 binvi(rd, rs1, Utils::ShiftForPowerOfTwo(uimm));
3137 } else {
3138 ASSERT(rs1 != TMP2);
3139 LoadImmediate(TMP2, imm);
3140 xor_(rd, rs1, TMP2);
3141 }
3142}
3143
3144void Assembler::TestImmediate(Register rn, intx_t imm, OperandSize sz) {
3145 ASSERT(deferred_compare_ == kNone);
3146 deferred_compare_ = kTestImm;
3147 deferred_left_ = rn;
3148 deferred_imm_ = imm;
3149}
3150void Assembler::CompareImmediate(Register rn, intx_t imm, OperandSize sz) {
3151 ASSERT(deferred_compare_ == kNone);
3152 deferred_compare_ = kCompareImm;
3153 deferred_left_ = rn;
3154 deferred_imm_ = imm;
3155}
3156
3157Address Assembler::PrepareLargeOffset(Register base, int32_t offset) {
3158 ASSERT(base != TMP2);
3159 if (IsITypeImm(offset)) {
3160 return Address(base, offset);
3161 }
3162 intx_t lo = ImmLo(offset);
3163 intx_t hi = ImmHi(offset);
3164 ASSERT(hi != 0);
3165 lui(TMP2, hi);
3166 add(TMP2, TMP2, base);
3167 return Address(TMP2, lo);
3168}
3169
3170void Assembler::Load(Register dest, const Address& address, OperandSize sz) {
3171 Address addr = PrepareLargeOffset(address.base(), address.offset());
3172 switch (sz) {
3173#if XLEN == 64
3174 case kEightBytes:
3175 return ld(dest, addr);
3176 case kUnsignedFourBytes:
3177 return lwu(dest, addr);
3178#elif XLEN == 32
3179 case kUnsignedFourBytes:
3180 return lw(dest, addr);
3181#endif
3182 case kFourBytes:
3183 return lw(dest, addr);
3184 case kUnsignedTwoBytes:
3185 return lhu(dest, addr);
3186 case kTwoBytes:
3187 return lh(dest, addr);
3188 case kUnsignedByte:
3189 return lbu(dest, addr);
3190 case kByte:
3191 return lb(dest, addr);
3192 default:
3193 UNREACHABLE();
3194 }
3195}
3196// For loading indexed payloads out of tagged objects like Arrays. If the
3197// payload objects are word-sized, use TIMES_HALF_WORD_SIZE if the contents of
3198// [index] is a Smi, otherwise TIMES_WORD_SIZE if unboxed.
3199void Assembler::LoadIndexedPayload(Register dest,
3200 Register base,
3201 int32_t payload_offset,
3202 Register index,
3203 ScaleFactor scale,
3204 OperandSize sz) {
3205 AddShifted(TMP, base, index, scale);
3206 LoadFromOffset(dest, TMP, payload_offset - kHeapObjectTag, sz);
3207}
3208
3209void Assembler::LoadSFromOffset(FRegister dest, Register base, int32_t offset) {
3210 flw(dest, PrepareLargeOffset(base, offset));
3211}
3212
3213void Assembler::LoadDFromOffset(FRegister dest, Register base, int32_t offset) {
3214 fld(dest, PrepareLargeOffset(base, offset));
3215}
3216
3217void Assembler::LoadFromStack(Register dst, intptr_t depth) {
3218 LoadFromOffset(dst, SPREG, target::kWordSize * depth);
3219}
3220void Assembler::StoreToStack(Register src, intptr_t depth) {
3221 StoreToOffset(src, SPREG, target::kWordSize * depth);
3222}
3223void Assembler::CompareToStack(Register src, intptr_t depth) {
3224 CompareWithMemoryValue(src, Address(SPREG, target::kWordSize * depth));
3225}
3226
3227void Assembler::Store(Register src, const Address& address, OperandSize sz) {
3228 Address addr = PrepareLargeOffset(address.base(), address.offset());
3229 switch (sz) {
3230#if XLEN == 64
3231 case kEightBytes:
3232 return sd(src, addr);
3233#endif
3234 case kUnsignedFourBytes:
3235 case kFourBytes:
3236 return sw(src, addr);
3237 case kUnsignedTwoBytes:
3238 case kTwoBytes:
3239 return sh(src, addr);
3240 case kUnsignedByte:
3241 case kByte:
3242 return sb(src, addr);
3243 default:
3244 UNREACHABLE();
3245 }
3246}
3247
3248void Assembler::StoreSToOffset(FRegister src, Register base, int32_t offset) {
3249 fsw(src, PrepareLargeOffset(base, offset));
3250}
3251
3252void Assembler::StoreDToOffset(FRegister src, Register base, int32_t offset) {
3253 fsd(src, PrepareLargeOffset(base, offset));
3254}
3255
3256void Assembler::StoreBarrier(Register object,
3257 Register value,
3258 CanBeSmi can_value_be_smi,
3259 Register scratch) {
3260 // x.slot = x. Barrier should have be removed at the IL level.
3261 ASSERT(object != value);
3262 ASSERT(object != scratch);
3263 ASSERT(value != scratch);
3264 ASSERT(object != RA);
3265 ASSERT(value != RA);
3266 ASSERT(scratch != RA);
3267 ASSERT(object != TMP2);
3268 ASSERT(value != TMP2);
3269 ASSERT(scratch != TMP2);
3270 ASSERT(scratch != kNoRegister);
3271
3272 // In parallel, test whether
3273 // - object is old and not remembered and value is new, or
3274 // - object is old and value is old and not marked and concurrent marking is
3275 // in progress
3276 // If so, call the WriteBarrier stub, which will either add object to the
3277 // store buffer (case 1) or add value to the marking stack (case 2).
3278 // See RestorePinnedRegisters for why this can be `ble`.
3279 // Compare UntaggedObject::StorePointer.
3280 Label done;
3281 if (can_value_be_smi == kValueCanBeSmi) {
3282 BranchIfSmi(value, &done, kNearJump);
3283 } else {
3284#if defined(DEBUG)
3285 Label passed_check;
3286 BranchIfNotSmi(value, &passed_check, kNearJump);
3287 Breakpoint();
3288 Bind(&passed_check);
3289#endif
3290 }
3291 lbu(scratch, FieldAddress(object, target::Object::tags_offset()));
3292 lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
3293 srli(scratch, scratch, target::UntaggedObject::kBarrierOverlapShift);
3294 and_(scratch, scratch, TMP2);
3295 ble(scratch, WRITE_BARRIER_STATE, &done, kNearJump);
3296
3297 Register objectForCall = object;
3298 if (value != kWriteBarrierValueReg) {
3299 // Unlikely. Only non-graph intrinsics.
3300 // TODO(rmacnak): Shuffle registers in intrinsics.
3301 if (object != kWriteBarrierValueReg) {
3302 PushRegister(kWriteBarrierValueReg);
3303 } else {
3304 COMPILE_ASSERT(S3 != kWriteBarrierValueReg);
3305 COMPILE_ASSERT(S4 != kWriteBarrierValueReg);
3306 objectForCall = (value == S3) ? S4 : S3;
3307 PushRegisterPair(kWriteBarrierValueReg, objectForCall);
3308 mv(objectForCall, object);
3309 }
3310 mv(kWriteBarrierValueReg, value);
3311 }
3312
3313 // Note this uses TMP as the link register, so RA remains preserved.
3314 generate_invoke_write_barrier_wrapper_(objectForCall);
3315
3316 if (value != kWriteBarrierValueReg) {
3317 if (object != kWriteBarrierValueReg) {
3318 PopRegister(kWriteBarrierValueReg);
3319 } else {
3320 PopRegisterPair(kWriteBarrierValueReg, objectForCall);
3321 }
3322 }
3323 Bind(&done);
3324}
3325
3326void Assembler::ArrayStoreBarrier(Register object,
3327 Register slot,
3328 Register value,
3329 CanBeSmi can_value_be_smi,
3330 Register scratch) {
3331 // TODO(riscv): Use RA2 to avoid spilling RA inline?
3332 const bool spill_lr = true;
3333 ASSERT(object != slot);
3334 ASSERT(object != value);
3335 ASSERT(object != scratch);
3336 ASSERT(slot != value);
3337 ASSERT(slot != scratch);
3338 ASSERT(value != scratch);
3339 ASSERT(object != RA);
3340 ASSERT(slot != RA);
3341 ASSERT(value != RA);
3342 ASSERT(scratch != RA);
3343 ASSERT(object != TMP2);
3344 ASSERT(slot != TMP2);
3345 ASSERT(value != TMP2);
3346 ASSERT(scratch != TMP2);
3347 ASSERT(scratch != kNoRegister);
3348
3349 // In parallel, test whether
3350 // - object is old and not remembered and value is new, or
3351 // - object is old and value is old and not marked and concurrent marking is
3352 // in progress
3353 // If so, call the WriteBarrier stub, which will either add object to the
3354 // store buffer (case 1) or add value to the marking stack (case 2).
3355 // See RestorePinnedRegisters for why this can be `ble`.
3356 // Compare UntaggedObject::StorePointer.
3357 Label done;
3358 if (can_value_be_smi == kValueCanBeSmi) {
3359 BranchIfSmi(value, &done, kNearJump);
3360 } else {
3361#if defined(DEBUG)
3362 Label passed_check;
3363 BranchIfNotSmi(value, &passed_check, kNearJump);
3364 Breakpoint();
3365 Bind(&passed_check);
3366#endif
3367 }
3368 lbu(scratch, FieldAddress(object, target::Object::tags_offset()));
3369 lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
3370 srli(scratch, scratch, target::UntaggedObject::kBarrierOverlapShift);
3371 and_(scratch, scratch, TMP2);
3372 ble(scratch, WRITE_BARRIER_STATE, &done, kNearJump);
3373 if (spill_lr) {
3374 PushRegister(RA);
3375 }
3376 if ((object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
3377 (slot != kWriteBarrierSlotReg)) {
3378 // Spill and shuffle unimplemented. Currently StoreIntoArray is only used
3379 // from StoreIndexInstr, which gets these exact registers from the register
3380 // allocator.
3381 UNIMPLEMENTED();
3382 }
3383 generate_invoke_array_write_barrier_();
3384 if (spill_lr) {
3385 PopRegister(RA);
3386 }
3387 Bind(&done);
3388}
3389
3390void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
3391 Register value) {
3392 // We can't assert the incremental barrier is not needed here, only the
3393 // generational barrier. We sometimes omit the write barrier when 'value' is
3394 // a constant, but we don't eagerly mark 'value' and instead assume it is also
3395 // reachable via a constant pool, so it doesn't matter if it is not traced via
3396 // 'object'.
3397 Label done;
3398 BranchIfSmi(value, &done, kNearJump);
3399 lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
3400 andi(TMP2, TMP2, 1 << target::UntaggedObject::kNewBit);
3401 beqz(TMP2, &done, kNearJump);
3402 lbu(TMP2, FieldAddress(object, target::Object::tags_offset()));
3403 andi(TMP2, TMP2, 1 << target::UntaggedObject::kOldAndNotRememberedBit);
3404 beqz(TMP2, &done, kNearJump);
3405 Stop("Write barrier is required");
3406 Bind(&done);
3407}
3408
3409void Assembler::StoreObjectIntoObjectNoBarrier(Register object,
3410 const Address& dest,
3411 const Object& value,
3412 MemoryOrder memory_order,
3413 OperandSize size) {
3414 ASSERT(IsOriginalObject(value));
3415 DEBUG_ASSERT(IsNotTemporaryScopedHandle(value));
3416 // No store buffer update.
3417 Register value_reg;
3418 if (IsSameObject(compiler::NullObject(), value)) {
3419 value_reg = NULL_REG;
3420 } else if (target::IsSmi(value) && (target::ToRawSmi(value) == 0)) {
3421 value_reg = ZR;
3422 } else {
3423 ASSERT(object != TMP);
3424 LoadObject(TMP, value);
3425 value_reg = TMP;
3426 }
3427 if (memory_order == kRelease) {
3428 fence(HartEffects::kMemory, HartEffects::kWrite);
3429 }
3430 Store(value_reg, dest, size);
3431}
3432
3433// Stores a non-tagged value into a heap object.
3434void Assembler::StoreInternalPointer(Register object,
3435 const Address& dest,
3436 Register value) {
3437 sx(value, dest);
3438}
3439
3440// Object pool, loading from pool, etc.
3441void Assembler::LoadPoolPointer(Register pp) {
3442 CheckCodePointer();
3443 lx(pp, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
3444
3445 // When in the PP register, the pool pointer is untagged. When we
3446 // push it on the stack with TagAndPushPP it is tagged again. PopAndUntagPP
3447 // then untags when restoring from the stack. This will make loading from the
3448 // object pool only one instruction for the first 4096 entries. Otherwise,
3449 // because the offset wouldn't be aligned, it would be only one instruction
3450 // for the first 64 entries.
3451 subi(pp, pp, kHeapObjectTag);
3452 set_constant_pool_allowed(pp == PP);
3453}
3454
3455bool Assembler::CanLoadFromObjectPool(const Object& object) const {
3456 ASSERT(IsOriginalObject(object));
3457 if (!constant_pool_allowed()) {
3458 return false;
3459 }
3460
3461 DEBUG_ASSERT(IsNotTemporaryScopedHandle(object));
3462 ASSERT(IsInOldSpace(object));
3463 return true;
3464}
3465void Assembler::LoadNativeEntry(
3466 Register dst,
3467 const ExternalLabel* label,
3468 ObjectPoolBuilderEntry::Patchability patchable) {
3469 const intptr_t index =
3470 object_pool_builder().FindNativeFunction(label, patchable);
3471 LoadWordFromPoolIndex(dst, index);
3472}
3473void Assembler::LoadIsolate(Register dst) {
3474 lx(dst, Address(THR, target::Thread::isolate_offset()));
3475}
3476void Assembler::LoadIsolateGroup(Register dst) {
3477 lx(dst, Address(THR, target::Thread::isolate_group_offset()));
3478}
3479
3480void Assembler::LoadImmediate(Register reg, intx_t imm) {
3481#if XLEN > 32
3482 if (!Utils::IsInt(32, imm)) {
3483 int shift = Utils::CountTrailingZeros64(imm);
3484 if (IsITypeImm(imm >> shift)) {
3485 li(reg, imm >> shift);
3486 slli(reg, reg, shift);
3487 return;
3488 }
3489 if ((shift >= 12) && IsUTypeImm(imm >> (shift - 12))) {
3490 lui(reg, imm >> (shift - 12));
3491 slli(reg, reg, shift - 12);
3492 return;
3493 }
3494
3495 if (constant_pool_allowed()) {
3496 intptr_t index = object_pool_builder().FindImmediate(imm);
3497 LoadWordFromPoolIndex(reg, index);
3498 return;
3499 }
3500
3501 intx_t lo = ImmLo(imm);
3502 intx_t hi = imm - lo;
3503 shift = Utils::CountTrailingZeros64(hi);
3504 ASSERT(shift != 0);
3505 LoadImmediate(reg, hi >> shift);
3506 slli(reg, reg, shift);
3507 if (lo != 0) {
3508 addi(reg, reg, lo);
3509 }
3510 return;
3511 }
3512#endif
3513
3514 intx_t lo = ImmLo(imm);
3515 intx_t hi = ImmHi(imm);
3516 if (hi == 0) {
3517 addi(reg, ZR, lo);
3518 } else {
3519 lui(reg, hi);
3520 if (lo != 0) {
3521#if XLEN == 32
3522 addi(reg, reg, lo);
3523#else
3524 addiw(reg, reg, lo);
3525#endif
3526 }
3527 }
3528}
3529
3530void Assembler::LoadSImmediate(FRegister reg, float imms) {
3531 int32_t imm = bit_cast<int32_t, float>(imms);
3532 if (imm == 0) {
3533 fmvwx(reg, ZR); // bit_cast uint32_t -> float
3534 } else {
3535 ASSERT(constant_pool_allowed());
3536 intptr_t index = object_pool_builder().FindImmediate(imm);
3537 intptr_t offset = target::ObjectPool::element_offset(index);
3538 LoadSFromOffset(reg, PP, offset);
3539 }
3540}
3541
3542void Assembler::LoadDImmediate(FRegister reg, double immd) {
3543 int64_t imm = bit_cast<int64_t, double>(immd);
3544 if (imm == 0) {
3545#if XLEN >= 64
3546 fmvdx(reg, ZR); // bit_cast uint64_t -> double
3547#else
3548 fcvtdwu(reg, ZR); // static_cast uint32_t -> double
3549#endif
3550 } else {
3551 ASSERT(constant_pool_allowed());
3552 intptr_t index = object_pool_builder().FindImmediate64(imm);
3553 intptr_t offset = target::ObjectPool::element_offset(index);
3554 LoadDFromOffset(reg, PP, offset);
3555 }
3556}
3557
3558void Assembler::LoadQImmediate(FRegister reg, simd128_value_t immq) {
3559 UNREACHABLE(); // F registers cannot represent SIMD128.
3560}
3561
3562// Load word from pool from the given offset using encoding that
3563// InstructionPattern::DecodeLoadWordFromPool can decode.
3564//
3565// Note: the function never clobbers TMP, TMP2 scratch registers.
3566void Assembler::LoadWordFromPoolIndex(Register dst,
3567 intptr_t index,
3568 Register pp) {
3569 ASSERT((pp != PP) || constant_pool_allowed());
3570 ASSERT(dst != pp);
3571 const uint32_t offset = target::ObjectPool::element_offset(index);
3572 // PP is untagged.
3573 intx_t lo = ImmLo(offset);
3574 intx_t hi = ImmHi(offset);
3575 if (hi == 0) {
3576 lx(dst, Address(pp, lo));
3577 } else {
3578 lui(dst, hi);
3579 add(dst, dst, pp);
3580 lx(dst, Address(dst, lo));
3581 }
3582}
3583
3584void Assembler::StoreWordToPoolIndex(Register src,
3585 intptr_t index,
3586 Register pp) {
3587 ASSERT((pp != PP) || constant_pool_allowed());
3588 ASSERT(src != pp);
3589 const uint32_t offset = target::ObjectPool::element_offset(index);
3590 // PP is untagged.
3591 intx_t lo = ImmLo(offset);
3592 intx_t hi = ImmHi(offset);
3593 if (hi == 0) {
3594 sx(src, Address(pp, lo));
3595 } else {
3596 lui(TMP, hi);
3597 add(TMP, TMP, pp);
3598 sx(src, Address(TMP, lo));
3599 }
3600}
3601
3602void Assembler::CompareObject(Register reg, const Object& object) {
3603 ASSERT(IsOriginalObject(object));
3604 if (IsSameObject(compiler::NullObject(), object)) {
3605 CompareObjectRegisters(reg, NULL_REG);
3606 } else if (target::IsSmi(object)) {
3607 CompareImmediate(reg, target::ToRawSmi(object), kObjectBytes);
3608 } else {
3609 LoadObject(TMP, object);
3610 CompareObjectRegisters(reg, TMP);
3611 }
3612}
3613
3614void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
3615 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
3616 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
3617#if XLEN == 64
3618 srliw(result, tags, target::UntaggedObject::kClassIdTagPos);
3619#else
3620 srli(result, tags, target::UntaggedObject::kClassIdTagPos);
3621#endif
3622}
3623
3624void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
3625 ASSERT(target::UntaggedObject::kSizeTagPos == 8);
3626 ASSERT(target::UntaggedObject::kSizeTagSize == 4);
3627 srli(result, tags, target::UntaggedObject::kSizeTagPos);
3628 andi(result, result, (1 << target::UntaggedObject::kSizeTagSize) - 1);
3629 slli(result, result, target::ObjectAlignment::kObjectAlignmentLog2);
3630}
3631
3632void Assembler::LoadClassId(Register result, Register object) {
3633 ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
3634 ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
3635#if XLEN == 64
3636 lwu(result, FieldAddress(object, target::Object::tags_offset()));
3637#else
3638 lw(result, FieldAddress(object, target::Object::tags_offset()));
3639#endif
3640 srli(result, result, target::UntaggedObject::kClassIdTagPos);
3641}
3642
3643void Assembler::LoadClassById(Register result, Register class_id) {
3644 ASSERT(result != class_id);
3645
3646 const intptr_t table_offset =
3647 target::IsolateGroup::cached_class_table_table_offset();
3648
3649 LoadIsolateGroup(result);
3650 LoadFromOffset(result, result, table_offset);
3651 AddShifted(result, result, class_id, target::kWordSizeLog2);
3652 lx(result, Address(result, 0));
3653}
3654void Assembler::CompareClassId(Register object,
3655 intptr_t class_id,
3656 Register scratch) {
3657 ASSERT(scratch != kNoRegister);
3658 LoadClassId(scratch, object);
3659 CompareImmediate(scratch, class_id);
3660}
3661// Note: input and output registers must be different.
3662void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
3663 ASSERT(result != object);
3664 ASSERT(result != TMP2);
3665 ASSERT(object != TMP2);
3666 li(result, kSmiCid);
3667 Label done;
3668 BranchIfSmi(object, &done, kNearJump);
3669 LoadClassId(result, object);
3670 Bind(&done);
3671}
3672void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
3673 LoadClassIdMayBeSmi(result, object);
3674 SmiTag(result);
3675}
3676void Assembler::EnsureHasClassIdInDEBUG(intptr_t cid,
3677 Register src,
3678 Register scratch,
3679 bool can_be_null) {
3680#if defined(DEBUG)
3681 Comment("Check that object in register has cid %" Pd "", cid);
3682 Label matches;
3683 LoadClassIdMayBeSmi(scratch, src);
3684 CompareImmediate(scratch, cid);
3685 BranchIf(EQUAL, &matches, Assembler::kNearJump);
3686 if (can_be_null) {
3687 CompareImmediate(scratch, kNullCid);
3688 BranchIf(EQUAL, &matches, Assembler::kNearJump);
3689 }
3690 trap();
3691 Bind(&matches);
3692#endif
3693}
3694
3695void Assembler::EnterFrame(intptr_t frame_size) {
3696 // N.B. The ordering here is important. We must never write beyond SP or
3697 // it can be clobbered by a signal handler.
3698 subi(SP, SP, frame_size + 2 * target::kWordSize);
3699 sx(RA, Address(SP, frame_size + 1 * target::kWordSize));
3700 sx(FP, Address(SP, frame_size + 0 * target::kWordSize));
3701 addi(FP, SP, frame_size + 2 * target::kWordSize);
3702}
3703void Assembler::LeaveFrame() {
3704 // N.B. The ordering here is important. We must never read beyond SP or
3705 // it may have already been clobbered by a signal handler.
3706 subi(SP, FP, 2 * target::kWordSize);
3707 lx(FP, Address(SP, 0 * target::kWordSize));
3708 lx(RA, Address(SP, 1 * target::kWordSize));
3709 addi(SP, SP, 2 * target::kWordSize);
3710}
3711
3712void Assembler::TransitionGeneratedToNative(Register destination,
3713 Register new_exit_frame,
3714 Register new_exit_through_ffi,
3715 bool enter_safepoint) {
3716 // Save exit frame information to enable stack walking.
3717 sx(new_exit_frame,
3718 Address(THR, target::Thread::top_exit_frame_info_offset()));
3719
3720 sx(new_exit_through_ffi,
3721 Address(THR, target::Thread::exit_through_ffi_offset()));
3722 Register tmp = new_exit_through_ffi;
3723
3724 // Mark that the thread is executing native code.
3725 sx(destination, Address(THR, target::Thread::vm_tag_offset()));
3726 li(tmp, target::Thread::native_execution_state());
3727 sx(tmp, Address(THR, target::Thread::execution_state_offset()));
3728
3729 if (enter_safepoint) {
3730 EnterFullSafepoint(tmp);
3731 }
3732}
3733
3734void Assembler::TransitionNativeToGenerated(Register state,
3735 bool exit_safepoint,
3736 bool ignore_unwind_in_progress) {
3737 if (exit_safepoint) {
3738 ExitFullSafepoint(state, ignore_unwind_in_progress);
3739 } else {
3740 // flag only makes sense if we are leaving safepoint
3741 ASSERT(!ignore_unwind_in_progress);
3742#if defined(DEBUG)
3743 // Ensure we've already left the safepoint.
3744 ASSERT(target::Thread::full_safepoint_state_acquired() != 0);
3745 li(state, target::Thread::full_safepoint_state_acquired());
3746 lx(RA, Address(THR, target::Thread::safepoint_state_offset()));
3747 and_(RA, RA, state);
3748 Label ok;
3749 beqz(RA, &ok, Assembler::kNearJump);
3750 Breakpoint();
3751 Bind(&ok);
3752#endif
3753 }
3754
3755 // Mark that the thread is executing Dart code.
3756 li(state, target::Thread::vm_tag_dart_id());
3757 sx(state, Address(THR, target::Thread::vm_tag_offset()));
3758 li(state, target::Thread::generated_execution_state());
3759 sx(state, Address(THR, target::Thread::execution_state_offset()));
3760
3761 // Reset exit frame information in Isolate's mutator thread structure.
3762 sx(ZR, Address(THR, target::Thread::top_exit_frame_info_offset()));
3763 sx(ZR, Address(THR, target::Thread::exit_through_ffi_offset()));
3764}
3765
3766void Assembler::EnterFullSafepoint(Register state) {
3767 // We generate the same number of instructions whether or not the slow-path is
3768 // forced. This simplifies GenerateJitCallbackTrampolines.
3769 // For TSAN, we always go to the runtime so TSAN is aware of the release
3770 // semantics of entering the safepoint.
3771
3772 Register addr = RA;
3773 ASSERT(addr != state);
3774
3775 Label slow_path, done, retry;
3776 if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) {
3777 j(&slow_path, Assembler::kNearJump);
3778 }
3779
3780 addi(addr, THR, target::Thread::safepoint_state_offset());
3781 Bind(&retry);
3782 lr(state, Address(addr, 0));
3783 subi(state, state, target::Thread::full_safepoint_state_unacquired());
3784 bnez(state, &slow_path, Assembler::kNearJump);
3785
3786 li(state, target::Thread::full_safepoint_state_acquired());
3787 sc(state, state, Address(addr, 0));
3788 beqz(state, &done, Assembler::kNearJump); // 0 means sc was successful.
3789
3790 if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) {
3791 j(&retry, Assembler::kNearJump);
3792 }
3793
3794 Bind(&slow_path);
3795 lx(addr, Address(THR, target::Thread::enter_safepoint_stub_offset()));
3796 lx(addr, FieldAddress(addr, target::Code::entry_point_offset()));
3797 jalr(addr);
3798
3799 Bind(&done);
3800}
3801
3802void Assembler::ExitFullSafepoint(Register state,
3803 bool ignore_unwind_in_progress) {
3804 // We generate the same number of instructions whether or not the slow-path is
3805 // forced, for consistency with EnterFullSafepoint.
3806 // For TSAN, we always go to the runtime so TSAN is aware of the acquire
3807 // semantics of leaving the safepoint.
3808 Register addr = RA;
3809 ASSERT(addr != state);
3810
3811 Label slow_path, done, retry;
3812 if (FLAG_use_slow_path || kTargetUsesThreadSanitizer) {
3813 j(&slow_path, Assembler::kNearJump);
3814 }
3815
3816 addi(addr, THR, target::Thread::safepoint_state_offset());
3817 Bind(&retry);
3818 lr(state, Address(addr, 0));
3819 subi(state, state, target::Thread::full_safepoint_state_acquired());
3820 bnez(state, &slow_path, Assembler::kNearJump);
3821
3822 li(state, target::Thread::full_safepoint_state_unacquired());
3823 sc(state, state, Address(addr, 0));
3824 beqz(state, &done, Assembler::kNearJump); // 0 means sc was successful.
3825
3826 if (!FLAG_use_slow_path && !kTargetUsesThreadSanitizer) {
3827 j(&retry, Assembler::kNearJump);
3828 }
3829
3830 Bind(&slow_path);
3831 if (ignore_unwind_in_progress) {
3832 lx(addr,
3833 Address(THR,
3834 target::Thread::
3835 exit_safepoint_ignore_unwind_in_progress_stub_offset()));
3836 } else {
3837 lx(addr, Address(THR, target::Thread::exit_safepoint_stub_offset()));
3838 }
3839 lx(addr, FieldAddress(addr, target::Code::entry_point_offset()));
3840 jalr(addr);
3841
3842 Bind(&done);
3843}
3844
3845void Assembler::CheckFpSpDist(intptr_t fp_sp_dist) {
3846 ASSERT(fp_sp_dist <= 0);
3847#if defined(DEBUG)
3848 Label ok;
3849 Comment("CheckFpSpDist");
3850 sub(TMP, SP, FP);
3851 CompareImmediate(TMP, fp_sp_dist);
3852 BranchIf(EQ, &ok, compiler::Assembler::kNearJump);
3853 ebreak();
3854 Bind(&ok);
3855#endif
3856}
3857
3858void Assembler::CheckCodePointer() {
3859#ifdef DEBUG
3860 if (!FLAG_check_code_pointer) {
3861 return;
3862 }
3863 Comment("CheckCodePointer");
3864 Label cid_ok, instructions_ok;
3865 CompareClassId(CODE_REG, kCodeCid, TMP);
3866 BranchIf(EQ, &cid_ok, kNearJump);
3867 ebreak();
3868 Bind(&cid_ok);
3869
3870 const intptr_t entry_offset =
3871 CodeSize() + target::Instructions::HeaderSize() - kHeapObjectTag;
3872 intx_t imm = -entry_offset;
3873 intx_t lo = ImmLo(imm);
3874 intx_t hi = ImmHi(imm);
3875 auipc(TMP, hi);
3876 addi(TMP, TMP, lo);
3877 lx(TMP2, FieldAddress(CODE_REG, target::Code::instructions_offset()));
3878 beq(TMP, TMP2, &instructions_ok, kNearJump);
3879 ebreak();
3880 Bind(&instructions_ok);
3881#endif
3882}
3883
3884void Assembler::RestoreCodePointer() {
3885 lx(CODE_REG,
3886 Address(FP, target::frame_layout.code_from_fp * target::kWordSize));
3887 CheckCodePointer();
3888}
3889
3890void Assembler::RestorePoolPointer() {
3891 if (FLAG_precompiled_mode) {
3892 lx(PP, Address(THR, target::Thread::global_object_pool_offset()));
3893 } else {
3894 lx(PP, Address(FP, target::frame_layout.code_from_fp * target::kWordSize));
3895 lx(PP, FieldAddress(PP, target::Code::object_pool_offset()));
3896 }
3897 subi(PP, PP, kHeapObjectTag); // Pool in PP is untagged!
3898}
3899
3900void Assembler::RestorePinnedRegisters() {
3901 lx(WRITE_BARRIER_STATE,
3902 Address(THR, target::Thread::write_barrier_mask_offset()));
3903 lx(NULL_REG, Address(THR, target::Thread::object_null_offset()));
3904
3905 // Our write barrier usually uses mask-and-test,
3906 // 01b6f6b3 and tmp, tmp, mask
3907 // c689 beqz tmp, +10
3908 // but on RISC-V compare-and-branch is shorter,
3909 // 00ddd663 ble tmp, wbs, +12
3910 //
3911 // TMP bit 4+ = 0
3912 // TMP bit 3 = object is old-and-not-remembered AND value is new (genr bit)
3913 // TMP bit 2 = object is old AND value is old-and-not-marked (incr bit)
3914 // TMP bit 1 = garbage
3915 // TMP bit 0 = garbage
3916 //
3917 // Thread::wbm | WRITE_BARRIER_STATE | TMP/combined headers | result
3918 // generational only
3919 // 0b1000 0b0111 0b11xx impossible
3920 // 0b10xx call stub
3921 // 0b01xx skip
3922 // 0b00xx skip
3923 // generational and incremental
3924 // 0b1100 0b0011 0b11xx impossible
3925 // 0b10xx call stub
3926 // 0b01xx call stub
3927 // 0b00xx skip
3928 xori(WRITE_BARRIER_STATE, WRITE_BARRIER_STATE,
3929 (target::UntaggedObject::kGenerationalBarrierMask << 1) - 1);
3930
3931 // Generational bit must be higher than incremental bit, with no other bits
3932 // between.
3933 ASSERT(target::UntaggedObject::kGenerationalBarrierMask ==
3934 (target::UntaggedObject::kIncrementalBarrierMask << 1));
3935 // Other header bits must be lower.
3936 ASSERT(target::UntaggedObject::kIncrementalBarrierMask >
3937 target::UntaggedObject::kCanonicalBit);
3938 ASSERT(target::UntaggedObject::kIncrementalBarrierMask >
3939 target::UntaggedObject::kCardRememberedBit);
3940}
3941
3942void Assembler::SetupGlobalPoolAndDispatchTable() {
3943 ASSERT(FLAG_precompiled_mode);
3944 lx(PP, Address(THR, target::Thread::global_object_pool_offset()));
3945 subi(PP, PP, kHeapObjectTag); // Pool in PP is untagged!
3946 lx(DISPATCH_TABLE_REG,
3947 Address(THR, target::Thread::dispatch_table_array_offset()));
3948}
3949
3950void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) {
3951 ASSERT(!constant_pool_allowed());
3952
3953 if (!IsITypeImm(frame_size + 4 * target::kWordSize)) {
3954 EnterDartFrame(0, new_pp);
3955 AddImmediate(SP, SP, -frame_size);
3956 return;
3957 }
3958
3959 // N.B. The ordering here is important. We must never write beyond SP or
3960 // it can be clobbered by a signal handler.
3961 if (FLAG_precompiled_mode) {
3962 subi(SP, SP, frame_size + 2 * target::kWordSize);
3963 sx(RA, Address(SP, frame_size + 1 * target::kWordSize));
3964 sx(FP, Address(SP, frame_size + 0 * target::kWordSize));
3965 addi(FP, SP, frame_size + 2 * target::kWordSize);
3966 } else {
3967 subi(SP, SP, frame_size + 4 * target::kWordSize);
3968 sx(RA, Address(SP, frame_size + 3 * target::kWordSize));
3969 sx(FP, Address(SP, frame_size + 2 * target::kWordSize));
3970 sx(CODE_REG, Address(SP, frame_size + 1 * target::kWordSize));
3971 addi(PP, PP, kHeapObjectTag);
3972 sx(PP, Address(SP, frame_size + 0 * target::kWordSize));
3973 addi(FP, SP, frame_size + 4 * target::kWordSize);
3974 if (new_pp == kNoRegister) {
3975 LoadPoolPointer();
3976 } else {
3977 mv(PP, new_pp);
3978 }
3979 }
3980 set_constant_pool_allowed(true);
3981}
3982
3983// On entry to a function compiled for OSR, the caller's frame pointer, the
3984// stack locals, and any copied parameters are already in place. The frame
3985// pointer is already set up. The PC marker is not correct for the
3986// optimized function and there may be extra space for spill slots to
3987// allocate. We must also set up the pool pointer for the function.
3988void Assembler::EnterOsrFrame(intptr_t extra_size, Register new_pp) {
3989 ASSERT(!constant_pool_allowed());
3990 Comment("EnterOsrFrame");
3991 RestoreCodePointer();
3992 LoadPoolPointer();
3993
3994 if (extra_size > 0) {
3995 AddImmediate(SP, -extra_size);
3996 }
3997}
3998
3999void Assembler::LeaveDartFrame() {
4000 // N.B. The ordering here is important. We must never read beyond SP or
4001 // it may have already been clobbered by a signal handler.
4002 if (!FLAG_precompiled_mode) {
4003 lx(PP, Address(FP, target::frame_layout.saved_caller_pp_from_fp *
4004 target::kWordSize));
4005 subi(PP, PP, kHeapObjectTag);
4006 }
4007 set_constant_pool_allowed(false);
4008 subi(SP, FP, 2 * target::kWordSize);
4009 lx(FP, Address(SP, 0 * target::kWordSize));
4010 lx(RA, Address(SP, 1 * target::kWordSize));
4011 addi(SP, SP, 2 * target::kWordSize);
4012}
4013
4014void Assembler::LeaveDartFrame(intptr_t fp_sp_dist) {
4015 intptr_t pp_offset =
4016 target::frame_layout.saved_caller_pp_from_fp * target::kWordSize -
4017 fp_sp_dist;
4018 intptr_t fp_offset =
4019 target::frame_layout.saved_caller_fp_from_fp * target::kWordSize -
4020 fp_sp_dist;
4021 intptr_t ra_offset =
4022 target::frame_layout.saved_caller_pc_from_fp * target::kWordSize -
4023 fp_sp_dist;
4024 if (!IsITypeImm(pp_offset) || !IsITypeImm(fp_offset) ||
4025 !IsITypeImm(ra_offset)) {
4026 // Shorter to update SP twice than generate large immediates.
4027 LeaveDartFrame();
4028 return;
4029 }
4030
4031 if (!FLAG_precompiled_mode) {
4032 lx(PP, Address(SP, pp_offset));
4033 subi(PP, PP, kHeapObjectTag);
4034 }
4035 set_constant_pool_allowed(false);
4036 lx(FP, Address(SP, fp_offset));
4037 lx(RA, Address(SP, ra_offset));
4038 addi(SP, SP, -fp_sp_dist);
4039}
4040
4041void Assembler::CallRuntime(const RuntimeEntry& entry,
4042 intptr_t argument_count) {
4043 ASSERT(!entry.is_leaf());
4044 // Argument count is not checked here, but in the runtime entry for a more
4045 // informative error message.
4046 lx(T5, compiler::Address(THR, entry.OffsetFromThread()));
4047 li(T4, argument_count);
4048 Call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
4049}
4050
4051static const RegisterSet kRuntimeCallSavedRegisters(kDartVolatileCpuRegs,
4052 kAbiVolatileFpuRegs);
4053
4054#define __ assembler_->
4055
4056LeafRuntimeScope::LeafRuntimeScope(Assembler* assembler,
4057 intptr_t frame_size,
4058 bool preserve_registers)
4059 : assembler_(assembler), preserve_registers_(preserve_registers) {
4060 // N.B. The ordering here is important. We must never write beyond SP or
4061 // it can be clobbered by a signal handler.
4062 __ subi(SP, SP, 4 * target::kWordSize);
4063 __ sx(RA, Address(SP, 3 * target::kWordSize));
4064 __ sx(FP, Address(SP, 2 * target::kWordSize));
4065 __ sx(CODE_REG, Address(SP, 1 * target::kWordSize));
4066 __ sx(PP, Address(SP, 0 * target::kWordSize));
4067 __ addi(FP, SP, 4 * target::kWordSize);
4068
4069 if (preserve_registers) {
4070 __ PushRegisters(kRuntimeCallSavedRegisters);
4071 } else {
4072 // Or no reason to save above.
4075 // Or would need to save above.
4076 COMPILE_ASSERT(IsCalleeSavedRegister(THR));
4077 COMPILE_ASSERT(IsCalleeSavedRegister(NULL_REG));
4078 COMPILE_ASSERT(IsCalleeSavedRegister(WRITE_BARRIER_STATE));
4079 COMPILE_ASSERT(IsCalleeSavedRegister(DISPATCH_TABLE_REG));
4080 }
4081
4082 __ ReserveAlignedFrameSpace(frame_size);
4083}
4084
4085void LeafRuntimeScope::Call(const RuntimeEntry& entry,
4086 intptr_t argument_count) {
4087 ASSERT(argument_count == entry.argument_count());
4088 __ lx(TMP2, compiler::Address(THR, entry.OffsetFromThread()));
4089 __ sx(TMP2, compiler::Address(THR, target::Thread::vm_tag_offset()));
4090 __ jalr(TMP2);
4091 __ LoadImmediate(TMP2, VMTag::kDartTagId);
4092 __ sx(TMP2, compiler::Address(THR, target::Thread::vm_tag_offset()));
4093}
4094
4095LeafRuntimeScope::~LeafRuntimeScope() {
4096 if (preserve_registers_) {
4097 const intptr_t kSavedRegistersSize =
4098 kRuntimeCallSavedRegisters.CpuRegisterCount() * target::kWordSize +
4099 kRuntimeCallSavedRegisters.FpuRegisterCount() * kFpuRegisterSize +
4100 4 * target::kWordSize;
4101
4102 __ subi(SP, FP, kSavedRegistersSize);
4103
4104 __ PopRegisters(kRuntimeCallSavedRegisters);
4105 }
4106
4107 __ subi(SP, FP, 4 * target::kWordSize);
4108 __ lx(PP, Address(SP, 0 * target::kWordSize));
4109 __ lx(CODE_REG, Address(SP, 1 * target::kWordSize));
4110 __ lx(FP, Address(SP, 2 * target::kWordSize));
4111 __ lx(RA, Address(SP, 3 * target::kWordSize));
4112 __ addi(SP, SP, 4 * target::kWordSize);
4113}
4114
4115#undef __
4116
4117void Assembler::EnterCFrame(intptr_t frame_space) {
4118 // Already saved.
4119 COMPILE_ASSERT(IsCalleeSavedRegister(THR));
4120 COMPILE_ASSERT(IsCalleeSavedRegister(NULL_REG));
4121 COMPILE_ASSERT(IsCalleeSavedRegister(WRITE_BARRIER_STATE));
4122 COMPILE_ASSERT(IsCalleeSavedRegister(DISPATCH_TABLE_REG));
4123 // Need to save.
4124 COMPILE_ASSERT(!IsCalleeSavedRegister(PP));
4125
4126 // N.B. The ordering here is important. We must never read beyond SP or
4127 // it may have already been clobbered by a signal handler.
4128 subi(SP, SP, frame_space + 3 * target::kWordSize);
4129 sx(RA, Address(SP, frame_space + 2 * target::kWordSize));
4130 sx(FP, Address(SP, frame_space + 1 * target::kWordSize));
4131 sx(PP, Address(SP, frame_space + 0 * target::kWordSize));
4132 addi(FP, SP, frame_space + 3 * target::kWordSize);
4133 const intptr_t kAbiStackAlignment = 16; // For both 32 and 64 bit.
4134 andi(SP, SP, ~(kAbiStackAlignment - 1));
4135}
4136
4137void Assembler::LeaveCFrame() {
4138 // N.B. The ordering here is important. We must never read beyond SP or
4139 // it may have already been clobbered by a signal handler.
4140 subi(SP, FP, 3 * target::kWordSize);
4141 lx(PP, Address(SP, 0 * target::kWordSize));
4142 lx(FP, Address(SP, 1 * target::kWordSize));
4143 lx(RA, Address(SP, 2 * target::kWordSize));
4144 addi(SP, SP, 3 * target::kWordSize);
4145}
4146
4147// A0: Receiver
4148// S5: ICData entry array
4149// PP: Caller's PP (preserved)
4150void Assembler::MonomorphicCheckedEntryJIT() {
4151 has_monomorphic_entry_ = true;
4152 const intptr_t saved_far_branch_level = far_branch_level();
4153 set_far_branch_level(0);
4154 const intptr_t start = CodeSize();
4155
4156 Label immediate, miss;
4157 Bind(&miss);
4158 lx(TMP, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
4159 jr(TMP);
4160
4161 Comment("MonomorphicCheckedEntry");
4162 ASSERT_EQUAL(CodeSize() - start,
4163 target::Instructions::kMonomorphicEntryOffsetJIT);
4164
4165 Register entries_reg = IC_DATA_REG; // Contains ICData::entries().
4166 const intptr_t cid_offset = target::Array::element_offset(0);
4167 const intptr_t count_offset = target::Array::element_offset(1);
4168 ASSERT(A1 != PP);
4169 ASSERT(A1 != entries_reg);
4170 ASSERT(A1 != CODE_REG);
4171
4172 lx(TMP, FieldAddress(entries_reg, cid_offset));
4173 LoadTaggedClassIdMayBeSmi(A1, A0);
4174 bne(TMP, A1, &miss, kNearJump);
4175
4176 lx(TMP, FieldAddress(entries_reg, count_offset));
4177 addi(TMP, TMP, target::ToRawSmi(1));
4178 sx(TMP, FieldAddress(entries_reg, count_offset));
4179
4180 li(ARGS_DESC_REG, 0); // GC-safe for OptimizeInvokedFunction
4181
4182 // Fall through to unchecked entry.
4183 ASSERT_EQUAL(CodeSize() - start,
4184 target::Instructions::kPolymorphicEntryOffsetJIT);
4185
4186 set_far_branch_level(saved_far_branch_level);
4187}
4188
4189// A0 receiver, S5 guarded cid as Smi.
4190// Preserve S4 (ARGS_DESC_REG), not required today, but maybe later.
4191// PP: Caller's PP (preserved)
4192void Assembler::MonomorphicCheckedEntryAOT() {
4193 has_monomorphic_entry_ = true;
4194 intptr_t saved_far_branch_level = far_branch_level();
4195 set_far_branch_level(0);
4196
4197 const intptr_t start = CodeSize();
4198
4199 Label immediate, miss;
4200 Bind(&miss);
4201 lx(TMP, Address(THR, target::Thread::switchable_call_miss_entry_offset()));
4202 jr(TMP);
4203
4204 Comment("MonomorphicCheckedEntry");
4205 ASSERT_EQUAL(CodeSize() - start,
4206 target::Instructions::kMonomorphicEntryOffsetAOT);
4207 LoadClassId(TMP, A0);
4208 SmiTag(TMP);
4209 bne(S5, TMP, &miss, kNearJump);
4210
4211 // Fall through to unchecked entry.
4212 ASSERT_EQUAL(CodeSize() - start,
4213 target::Instructions::kPolymorphicEntryOffsetAOT);
4214
4215 set_far_branch_level(saved_far_branch_level);
4216}
4217
4218void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
4219 has_monomorphic_entry_ = true;
4220 while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
4221 ebreak();
4222 }
4223 j(label);
4224 while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
4225 ebreak();
4226 }
4227}
4228
4229void Assembler::CombineHashes(Register hash, Register other) {
4230#if XLEN >= 64
4231 // hash += other_hash
4232 addw(hash, hash, other);
4233 // hash += hash << 10
4234 slliw(other, hash, 10);
4235 addw(hash, hash, other);
4236 // hash ^= hash >> 6
4237 srliw(other, hash, 6);
4238 xor_(hash, hash, other);
4239#else
4240 // hash += other_hash
4241 add(hash, hash, other);
4242 // hash += hash << 10
4243 slli(other, hash, 10);
4244 add(hash, hash, other);
4245 // hash ^= hash >> 6
4246 srli(other, hash, 6);
4247 xor_(hash, hash, other);
4248#endif
4249}
4250
4251void Assembler::FinalizeHashForSize(intptr_t bit_size,
4252 Register hash,
4253 Register scratch) {
4254 ASSERT(bit_size > 0); // Can't avoid returning 0 if there are no hash bits!
4255 // While any 32-bit hash value fits in X bits, where X > 32, the caller may
4256 // reasonably expect that the returned values fill the entire bit space.
4257 ASSERT(bit_size <= kBitsPerInt32);
4258 ASSERT(scratch != kNoRegister);
4259#if XLEN >= 64
4260 // hash += hash << 3;
4261 slliw(scratch, hash, 3);
4262 addw(hash, hash, scratch);
4263 // hash ^= hash >> 11; // Logical shift, unsigned hash.
4264 srliw(scratch, hash, 11);
4265 xor_(hash, hash, scratch);
4266 // hash += hash << 15;
4267 slliw(scratch, hash, 15);
4268 addw(hash, hash, scratch);
4269#else
4270 // hash += hash << 3;
4271 slli(scratch, hash, 3);
4272 add(hash, hash, scratch);
4273 // hash ^= hash >> 11; // Logical shift, unsigned hash.
4274 srli(scratch, hash, 11);
4275 xor_(hash, hash, scratch);
4276 // hash += hash << 15;
4277 slli(scratch, hash, 15);
4278 add(hash, hash, scratch);
4279#endif
4280 // Size to fit.
4281 if (bit_size < kBitsPerInt32) {
4282 AndImmediate(hash, hash, Utils::NBitMask(bit_size));
4283 }
4284 // return (hash == 0) ? 1 : hash;
4285 seqz(scratch, hash);
4286 add(hash, hash, scratch);
4287}
4288
4289#ifndef PRODUCT
4290void Assembler::MaybeTraceAllocation(Register cid,
4291 Label* trace,
4292 Register temp_reg,
4293 JumpDistance distance) {
4294 LoadIsolateGroup(temp_reg);
4295 lx(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
4296 lx(temp_reg,
4297 Address(temp_reg,
4298 target::ClassTable::allocation_tracing_state_table_offset()));
4299 add(temp_reg, temp_reg, cid);
4300 LoadFromOffset(temp_reg, temp_reg,
4301 target::ClassTable::AllocationTracingStateSlotOffsetFor(0),
4302 kUnsignedByte);
4303 bnez(temp_reg, trace);
4304}
4305
4306void Assembler::MaybeTraceAllocation(intptr_t cid,
4307 Label* trace,
4308 Register temp_reg,
4309 JumpDistance distance) {
4310 ASSERT(cid > 0);
4311 LoadIsolateGroup(temp_reg);
4312 lx(temp_reg, Address(temp_reg, target::IsolateGroup::class_table_offset()));
4313 lx(temp_reg,
4314 Address(temp_reg,
4315 target::ClassTable::allocation_tracing_state_table_offset()));
4316 LoadFromOffset(temp_reg, temp_reg,
4317 target::ClassTable::AllocationTracingStateSlotOffsetFor(cid),
4318 kUnsignedByte);
4319 bnez(temp_reg, trace);
4320}
4321#endif // !PRODUCT
4322
4323void Assembler::TryAllocateObject(intptr_t cid,
4324 intptr_t instance_size,
4325 Label* failure,
4326 JumpDistance distance,
4327 Register instance_reg,
4328 Register temp_reg) {
4329 ASSERT(failure != nullptr);
4330 ASSERT(instance_size != 0);
4331 ASSERT(instance_reg != temp_reg);
4332 ASSERT(temp_reg != kNoRegister);
4333 ASSERT(Utils::IsAligned(instance_size,
4334 target::ObjectAlignment::kObjectAlignment));
4335 if (FLAG_inline_alloc &&
4336 target::Heap::IsAllocatableInNewSpace(instance_size)) {
4337 // If this allocation is traced, program will jump to failure path
4338 // (i.e. the allocation stub) which will allocate the object and trace the
4339 // allocation call site.
4340 NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp_reg));
4341
4342 lx(instance_reg, Address(THR, target::Thread::top_offset()));
4343 lx(temp_reg, Address(THR, target::Thread::end_offset()));
4344 // instance_reg: current top (next object start).
4345 // temp_reg: heap end
4346
4347 // TODO(koda): Protect against unsigned overflow here.
4348 AddImmediate(instance_reg, instance_size);
4349 // instance_reg: potential top (next object start).
4350 // fail if heap end unsigned less than or equal to new heap top.
4351 bleu(temp_reg, instance_reg, failure, distance);
4352 CheckAllocationCanary(instance_reg, temp_reg);
4353
4354 // Successfully allocated the object, now update temp to point to
4355 // next object start and store the class in the class field of object.
4356 sx(instance_reg, Address(THR, target::Thread::top_offset()));
4357 // Move instance_reg back to the start of the object and tag it.
4358 AddImmediate(instance_reg, -instance_size + kHeapObjectTag);
4359
4360 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
4361 LoadImmediate(temp_reg, tags);
4362 Store(temp_reg, FieldAddress(instance_reg, target::Object::tags_offset()));
4363 } else {
4364 j(failure, distance);
4365 }
4366}
4367
4368void Assembler::TryAllocateArray(intptr_t cid,
4369 intptr_t instance_size,
4370 Label* failure,
4371 Register instance,
4372 Register end_address,
4373 Register temp1,
4374 Register temp2) {
4375 if (FLAG_inline_alloc &&
4376 target::Heap::IsAllocatableInNewSpace(instance_size)) {
4377 // If this allocation is traced, program will jump to failure path
4378 // (i.e. the allocation stub) which will allocate the object and trace the
4379 // allocation call site.
4380 NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, temp1));
4381 // Potential new object start.
4382 lx(instance, Address(THR, target::Thread::top_offset()));
4383 AddImmediate(end_address, instance, instance_size);
4384 bltu(end_address, instance, failure); // Fail on unsigned overflow.
4385
4386 // Check if the allocation fits into the remaining space.
4387 // instance: potential new object start.
4388 // end_address: potential next object start.
4389 lx(temp2, Address(THR, target::Thread::end_offset()));
4390 bgeu(end_address, temp2, failure);
4391 CheckAllocationCanary(instance, temp2);
4392
4393 // Successfully allocated the object(s), now update top to point to
4394 // next object start and initialize the object.
4395 sx(end_address, Address(THR, target::Thread::top_offset()));
4396 addi(instance, instance, kHeapObjectTag);
4397 NOT_IN_PRODUCT(LoadImmediate(temp2, instance_size));
4398
4399 // Initialize the tags.
4400 // instance: new object start as a tagged pointer.
4401 const uword tags = target::MakeTagWordForNewSpaceObject(cid, instance_size);
4402 LoadImmediate(temp2, tags);
4403 sx(temp2, FieldAddress(instance, target::Object::tags_offset()));
4404 } else {
4405 j(failure);
4406 }
4407}
4408
4409void Assembler::CopyMemoryWords(Register src,
4410 Register dst,
4411 Register size,
4412 Register temp) {
4413 Label loop, done;
4414 beqz(size, &done, kNearJump);
4415 Bind(&loop);
4416 lx(temp, Address(src));
4417 addi(src, src, target::kWordSize);
4418 sx(temp, Address(dst));
4419 addi(dst, dst, target::kWordSize);
4420 subi(size, size, target::kWordSize);
4421 bnez(size, &loop, kNearJump);
4422 Bind(&done);
4423}
4424
4425void Assembler::GenerateUnRelocatedPcRelativeCall(intptr_t offset_into_target) {
4426 // JAL only has a +/- 1MB range. AUIPC+JALR has a +/- 2GB range.
4427 intx_t lo = ImmLo(offset_into_target);
4428 intx_t hi = ImmHi(offset_into_target);
4429 auipc(RA, hi);
4430 jalr_fixed(RA, RA, lo);
4431}
4432
4433void Assembler::GenerateUnRelocatedPcRelativeTailCall(
4434 intptr_t offset_into_target) {
4435 // J only has a +/- 1MB range. AUIPC+JR has a +/- 2GB range.
4436 intx_t lo = ImmLo(offset_into_target);
4437 intx_t hi = ImmHi(offset_into_target);
4438 auipc(TMP, hi);
4439 jalr_fixed(ZR, TMP, lo);
4440}
4441
4442bool Assembler::AddressCanHoldConstantIndex(const Object& constant,
4443 bool is_external,
4444 intptr_t cid,
4445 intptr_t index_scale) {
4446 if (!IsSafeSmi(constant)) return false;
4447 const int64_t index = target::SmiValue(constant);
4448 const int64_t offset = index * index_scale + HeapDataOffset(is_external, cid);
4449 if (IsITypeImm(offset)) {
4451 return true;
4452 }
4453 return false;
4454}
4455
4456Address Assembler::ElementAddressForIntIndex(bool is_external,
4457 intptr_t cid,
4458 intptr_t index_scale,
4459 Register array,
4460 intptr_t index) const {
4461 const int64_t offset = index * index_scale + HeapDataOffset(is_external, cid);
4462 ASSERT(Utils::IsInt(32, offset));
4463 return Address(array, static_cast<int32_t>(offset));
4464}
4465void Assembler::ComputeElementAddressForIntIndex(Register address,
4466 bool is_external,
4467 intptr_t cid,
4468 intptr_t index_scale,
4469 Register array,
4470 intptr_t index) {
4471 const int64_t offset = index * index_scale + HeapDataOffset(is_external, cid);
4472 AddImmediate(address, array, offset);
4473}
4474
4475Address Assembler::ElementAddressForRegIndex(bool is_external,
4476 intptr_t cid,
4477 intptr_t index_scale,
4478 bool index_unboxed,
4479 Register array,
4480 Register index,
4481 Register temp) {
4482 // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays.
4483 const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift;
4484 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
4485 const int32_t offset = HeapDataOffset(is_external, cid);
4486 ASSERT(array != temp);
4487 ASSERT(index != temp);
4488 AddShifted(temp, array, index, shift);
4489 return Address(temp, offset);
4490}
4491
4492void Assembler::ComputeElementAddressForRegIndex(Register address,
4493 bool is_external,
4494 intptr_t cid,
4495 intptr_t index_scale,
4496 bool index_unboxed,
4497 Register array,
4498 Register index) {
4499 // If unboxed, index is expected smi-tagged, (i.e, LSL 1) for all arrays.
4500 const intptr_t boxing_shift = index_unboxed ? 0 : -kSmiTagShift;
4501 const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) + boxing_shift;
4502 const int32_t offset = HeapDataOffset(is_external, cid);
4503 ASSERT(array != address);
4504 ASSERT(index != address);
4505 AddShifted(address, array, index, shift);
4506 if (offset != 0) {
4507 AddImmediate(address, address, offset);
4508 }
4509}
4510
4511void Assembler::LoadStaticFieldAddress(Register address,
4512 Register field,
4513 Register scratch) {
4514 LoadCompressedSmiFieldFromOffset(
4515 scratch, field, target::Field::host_offset_or_field_id_offset());
4516 const intptr_t field_table_offset =
4517 compiler::target::Thread::field_table_values_offset();
4518 LoadMemoryValue(address, THR, static_cast<int32_t>(field_table_offset));
4519 slli(scratch, scratch, target::kWordSizeLog2 - kSmiTagShift);
4520 add(address, address, scratch);
4521}
4522
4523void Assembler::LoadFieldAddressForRegOffset(Register address,
4524 Register instance,
4525 Register offset_in_words_as_smi) {
4526 AddShifted(address, instance, offset_in_words_as_smi,
4527 target::kWordSizeLog2 - kSmiTagShift);
4528 addi(address, address, -kHeapObjectTag);
4529}
4530
4531// Note: the function never clobbers TMP, TMP2 scratch registers.
4532void Assembler::LoadObjectHelper(
4533 Register dst,
4534 const Object& object,
4535 bool is_unique,
4536 ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior) {
4537 ASSERT(IsOriginalObject(object));
4538 // `is_unique == true` effectively means object has to be patchable.
4539 // (even if the object is null)
4540 if (!is_unique) {
4541 if (IsSameObject(compiler::NullObject(), object)) {
4542 mv(dst, NULL_REG);
4543 return;
4544 }
4545 if (IsSameObject(CastHandle<Object>(compiler::TrueObject()), object)) {
4546 addi(dst, NULL_REG, kTrueOffsetFromNull);
4547 return;
4548 }
4549 if (IsSameObject(CastHandle<Object>(compiler::FalseObject()), object)) {
4550 addi(dst, NULL_REG, kFalseOffsetFromNull);
4551 return;
4552 }
4553 word offset = 0;
4554 if (target::CanLoadFromThread(object, &offset)) {
4555 lx(dst, Address(THR, offset));
4556 return;
4557 }
4558 if (target::IsSmi(object)) {
4559 LoadImmediate(dst, target::ToRawSmi(object));
4560 return;
4561 }
4562 }
4563 RELEASE_ASSERT(CanLoadFromObjectPool(object));
4564 const intptr_t index =
4565 is_unique
4566 ? object_pool_builder().AddObject(
4567 object, ObjectPoolBuilderEntry::kPatchable, snapshot_behavior)
4568 : object_pool_builder().FindObject(
4569 object, ObjectPoolBuilderEntry::kNotPatchable,
4570 snapshot_behavior);
4571 LoadWordFromPoolIndex(dst, index);
4572}
4573
4574void Assembler::AddImmediateBranchOverflow(Register rd,
4575 Register rs1,
4576 intx_t imm,
4577 Label* overflow) {
4578 ASSERT(rd != TMP2);
4579 if (rd == rs1) {
4580 mv(TMP2, rs1);
4581 AddImmediate(rd, rs1, imm);
4582 if (imm > 0) {
4583 blt(rd, TMP2, overflow);
4584 } else if (imm < 0) {
4585 bgt(rd, TMP2, overflow);
4586 }
4587 } else {
4588 AddImmediate(rd, rs1, imm);
4589 if (imm > 0) {
4590 blt(rd, rs1, overflow);
4591 } else if (imm < 0) {
4592 bgt(rd, rs1, overflow);
4593 }
4594 }
4595}
4596void Assembler::SubtractImmediateBranchOverflow(Register rd,
4597 Register rs1,
4598 intx_t imm,
4599 Label* overflow) {
4600 // TODO(riscv): Incorrect for MIN_INTX_T!
4601 AddImmediateBranchOverflow(rd, rs1, -imm, overflow);
4602}
4603void Assembler::MultiplyImmediateBranchOverflow(Register rd,
4604 Register rs1,
4605 intx_t imm,
4606 Label* overflow) {
4607 ASSERT(rd != TMP);
4608 ASSERT(rd != TMP2);
4609 ASSERT(rs1 != TMP);
4610 ASSERT(rs1 != TMP2);
4611
4612 LoadImmediate(TMP2, imm);
4613 // Macro-op fusion: when both products are needed, the recommended sequence
4614 // is mulh first.
4615 mulh(TMP, rs1, TMP2);
4616 mul(rd, rs1, TMP2);
4617 srai(TMP2, rd, XLEN - 1);
4618 bne(TMP, TMP2, overflow);
4619}
4620void Assembler::AddBranchOverflow(Register rd,
4621 Register rs1,
4622 Register rs2,
4623 Label* overflow) {
4624 ASSERT(rd != TMP);
4625 ASSERT(rd != TMP2);
4626 ASSERT(rs1 != TMP);
4627 ASSERT(rs1 != TMP2);
4628 ASSERT(rs2 != TMP);
4629 ASSERT(rs2 != TMP2);
4630
4631 if ((rd == rs1) && (rd == rs2)) {
4632 ASSERT(rs1 == rs2);
4633 mv(TMP, rs1);
4634 add(rd, rs1, rs2); // rs1, rs2 destroyed
4635 xor_(TMP, TMP, rd); // TMP negative if sign changed
4636 bltz(TMP, overflow);
4637 } else if (rs1 == rs2) {
4638 ASSERT(rd != rs1);
4639 ASSERT(rd != rs2);
4640 add(rd, rs1, rs2);
4641 xor_(TMP, rd, rs1); // TMP negative if sign changed
4642 bltz(TMP, overflow);
4643 } else if (rd == rs1) {
4644 ASSERT(rs1 != rs2);
4645 slti(TMP, rs1, 0);
4646 add(rd, rs1, rs2); // rs1 destroyed
4647 slt(TMP2, rd, rs2);
4648 bne(TMP, TMP2, overflow);
4649 } else if (rd == rs2) {
4650 ASSERT(rs1 != rs2);
4651 slti(TMP, rs2, 0);
4652 add(rd, rs1, rs2); // rs2 destroyed
4653 slt(TMP2, rd, rs1);
4654 bne(TMP, TMP2, overflow);
4655 } else {
4656 add(rd, rs1, rs2);
4657 slti(TMP, rs2, 0);
4658 slt(TMP2, rd, rs1);
4659 bne(TMP, TMP2, overflow);
4660 }
4661}
4662
4663void Assembler::SubtractBranchOverflow(Register rd,
4664 Register rs1,
4665 Register rs2,
4666 Label* overflow) {
4667 ASSERT(rd != TMP);
4668 ASSERT(rd != TMP2);
4669 ASSERT(rs1 != TMP);
4670 ASSERT(rs1 != TMP2);
4671 ASSERT(rs2 != TMP);
4672 ASSERT(rs2 != TMP2);
4673
4674 if ((rd == rs1) && (rd == rs2)) {
4675 ASSERT(rs1 == rs2);
4676 mv(TMP, rs1);
4677 sub(rd, rs1, rs2); // rs1, rs2 destroyed
4678 xor_(TMP, TMP, rd); // TMP negative if sign changed
4679 bltz(TMP, overflow);
4680 } else if (rs1 == rs2) {
4681 ASSERT(rd != rs1);
4682 ASSERT(rd != rs2);
4683 sub(rd, rs1, rs2);
4684 xor_(TMP, rd, rs1); // TMP negative if sign changed
4685 bltz(TMP, overflow);
4686 } else if (rd == rs1) {
4687 ASSERT(rs1 != rs2);
4688 slti(TMP, rs1, 0);
4689 sub(rd, rs1, rs2); // rs1 destroyed
4690 slt(TMP2, rd, rs2);
4691 bne(TMP, TMP2, overflow);
4692 } else if (rd == rs2) {
4693 ASSERT(rs1 != rs2);
4694 slti(TMP, rs2, 0);
4695 sub(rd, rs1, rs2); // rs2 destroyed
4696 slt(TMP2, rd, rs1);
4697 bne(TMP, TMP2, overflow);
4698 } else {
4699 sub(rd, rs1, rs2);
4700 slti(TMP, rs2, 0);
4701 slt(TMP2, rs1, rd);
4702 bne(TMP, TMP2, overflow);
4703 }
4704}
4705
4706void Assembler::MultiplyBranchOverflow(Register rd,
4707 Register rs1,
4708 Register rs2,
4709 Label* overflow) {
4710 ASSERT(rd != TMP);
4711 ASSERT(rd != TMP2);
4712 ASSERT(rs1 != TMP);
4713 ASSERT(rs1 != TMP2);
4714 ASSERT(rs2 != TMP);
4715 ASSERT(rs2 != TMP2);
4716
4717 // Macro-op fusion: when both products are needed, the recommended sequence
4718 // is mulh first.
4719 mulh(TMP, rs1, rs2);
4720 mul(rd, rs1, rs2);
4721 srai(TMP2, rd, XLEN - 1);
4722 bne(TMP, TMP2, overflow);
4723}
4724
4725void Assembler::CountLeadingZeroes(Register rd, Register rs) {
4726 if (Supports(RV_Zbb)) {
4727 clz(rd, rs);
4728 return;
4729 }
4730
4731 // n = XLEN
4732 // y = x >>32; if (y != 0) { n = n - 32; x = y; }
4733 // y = x >>16; if (y != 0) { n = n - 16; x = y; }
4734 // y = x >> 8; if (y != 0) { n = n - 8; x = y; }
4735 // y = x >> 4; if (y != 0) { n = n - 4; x = y; }
4736 // y = x >> 2; if (y != 0) { n = n - 2; x = y; }
4737 // y = x >> 1; if (y != 0) { return n - 2; }
4738 // return n - x;
4739 Label l0, l1, l2, l3, l4, l5;
4740 li(TMP2, XLEN);
4741#if XLEN == 64
4742 srli(TMP, rs, 32);
4743 beqz(TMP, &l0, Assembler::kNearJump);
4744 subi(TMP2, TMP2, 32);
4745 mv(rs, TMP);
4746 Bind(&l0);
4747#endif
4748 srli(TMP, rs, 16);
4749 beqz(TMP, &l1, Assembler::kNearJump);
4750 subi(TMP2, TMP2, 16);
4751 mv(rs, TMP);
4752 Bind(&l1);
4753 srli(TMP, rs, 8);
4754 beqz(TMP, &l2, Assembler::kNearJump);
4755 subi(TMP2, TMP2, 8);
4756 mv(rs, TMP);
4757 Bind(&l2);
4758 srli(TMP, rs, 4);
4759 beqz(TMP, &l3, Assembler::kNearJump);
4760 subi(TMP2, TMP2, 4);
4761 mv(rs, TMP);
4762 Bind(&l3);
4763 srli(TMP, rs, 2);
4764 beqz(TMP, &l4, Assembler::kNearJump);
4765 subi(TMP2, TMP2, 2);
4766 mv(rs, TMP);
4767 Bind(&l4);
4768 srli(TMP, rs, 1);
4769 sub(rd, TMP2, rs);
4770 beqz(TMP, &l5, Assembler::kNearJump);
4771 subi(rd, TMP2, 2);
4772 Bind(&l5);
4773}
4774
4775void Assembler::RangeCheck(Register value,
4776 Register temp,
4777 intptr_t low,
4778 intptr_t high,
4779 RangeCheckCondition condition,
4780 Label* target) {
4781 auto cc = condition == kIfInRange ? LS : HI;
4782 Register to_check = temp != kNoRegister ? temp : value;
4783 AddImmediate(to_check, value, -low);
4784 CompareImmediate(to_check, high - low);
4785 BranchIf(cc, target);
4786}
4787
4788} // namespace compiler
4789
4790} // namespace dart
4791
4792#endif // defined(TARGET_ARCH_RISCV)
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
int count
static void round(SkPoint *p)
static const double J
#define COUNT(T)
static bool ok(int result)
static uint32_t hash(const SkShaderBase::GradientInfo &v)
#define EQUAL(field)
@ ROTATE
static bool equals(T *a, T *b)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
#define W
Definition aaa.cpp:17
#define __
#define RA(width, name,...)
#define UNREACHABLE()
Definition assert.h:248
#define DEBUG_ASSERT(cond)
Definition assert.h:321
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
constexpr SkScalar SH
Definition beziers.cpp:21
MicroAssembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level, ExtensionSet extensions)
#define LR
#define UNIMPLEMENTED
#define ASSERT(E)
VkInstance instance
Definition main.cc:48
#define FATAL(error)
AtkStateType state
glong glong end
uint8_t value
GAsyncResult * result
uint32_t * target
#define DECLARE_FLAG(type, name)
Definition flags.h:14
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
int argument_count
Definition fuchsia.cc:52
void BailoutWithBranchOffsetError()
bool IsOriginalObject(const Object &object)
bool IsInOldSpace(const Object &obj)
bool IsSameObject(const Object &a, const Object &b)
const Object & ToObject(const Code &handle)
uint32_t EncodeBTypeImm(intptr_t imm)
uint32_t EncodeCBImm(intptr_t imm)
constexpr bool IsAbiPreservedRegister(Register reg)
Definition constants.h:90
bool IsCI16Imm(intptr_t imm)
uint32_t EncodeUTypeImm(intptr_t imm)
const Register NULL_REG
uint32_t EncodeCSPStore4Imm(intptr_t imm)
bool IsCSPLoad4Imm(intptr_t imm)
bool IsCJImm(intptr_t imm)
uint32_t EncodeCJImm(intptr_t imm)
bool IsCIImm(intptr_t imm)
uint32_t EncodeCSPLoad8Imm(intptr_t imm)
@ kHeapObjectTag
uintptr_t uword
Definition globals.h:501
intptr_t word
Definition globals.h:500
const Register CODE_REG
@ GREATER_EQUAL
@ UNSIGNED_GREATER
@ UNSIGNED_GREATER_EQUAL
@ NO_OVERFLOW
@ UNSIGNED_LESS
@ UNSIGNED_LESS_EQUAL
const Register TMP2
@ kNumberOfCpuRegisters
@ kNoRegister
const int kNumberOfFpuRegisters
bool IsCSPStore8Imm(intptr_t imm)
const Register TMP
uint32_t EncodeCMem8Imm(intptr_t imm)
intx_t ImmHi(intx_t imm)
uint32_t EncodeCUImm(intptr_t imm)
uint32_t EncodeCI4SPNImm(intptr_t imm)
bool IsSTypeImm(intptr_t imm)
bool IsJTypeImm(intptr_t imm)
uint32_t EncodeCSPLoad4Imm(intptr_t imm)
uint32_t EncodeCIImm(intptr_t imm)
bool IsITypeImm(intptr_t imm)
bool IsCSPStore4Imm(intptr_t imm)
bool IsCUImm(intptr_t imm)
const Register IC_DATA_REG
bool IsUTypeImm(intptr_t imm)
bool IsCMem4Imm(intptr_t imm)
bool IsBTypeImm(intptr_t imm)
uint32_t EncodeITypeImm(intptr_t imm)
uint32_t EncodeSTypeImm(intptr_t imm)
uint32_t EncodeCMem4Imm(intptr_t imm)
uint32_t EncodeCSPStore8Imm(intptr_t imm)
bool IsCSPLoad8Imm(intptr_t imm)
bool IsCMem8Imm(intptr_t imm)
uint32_t EncodeJTypeImm(intptr_t imm)
bool IsCBImm(intptr_t imm)
bool IsCI4SPNImm(intptr_t imm)
uint32_t EncodeCI16Imm(intptr_t imm)
intx_t ImmLo(intx_t imm)
const int kFpuRegisterSize
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
#define Pd
Definition globals.h:408
const Scalar scale
Point offset
constexpr SkScalar SW
Definition strokes.cpp:37
constexpr bool kTargetUsesThreadSanitizer
#define LH
#define NOT_IN_PRODUCT(code)
Definition globals.h:84