Flutter Engine
The Flutter Engine
simulator_riscv.cc
Go to the documentation of this file.
1// Copyright (c) 2017, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include <setjmp.h> // NOLINT
6#include <stdlib.h>
7
8#include "vm/globals.h"
9#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
10
11// Only build the simulator if not compiling for real RISCV hardware.
12#if defined(USING_SIMULATOR)
13
14#include "vm/simulator.h"
15
17#include "vm/constants.h"
18#include "vm/image_snapshot.h"
19#include "vm/native_arguments.h"
20#include "vm/os_thread.h"
21#include "vm/stack_frame.h"
22
23namespace dart {
24
25DEFINE_FLAG(uint64_t,
26 trace_sim_after,
27 ULLONG_MAX,
28 "Trace simulator execution after instruction count reached.");
29DEFINE_FLAG(uint64_t,
30 stop_sim_at,
31 ULLONG_MAX,
32 "Instruction address or instruction count to stop simulator at.");
33
34// SimulatorSetjmpBuffer are linked together, and the last created one
35// is referenced by the Simulator. When an exception is thrown, the exception
36// runtime looks at where to jump and finds the corresponding
37// SimulatorSetjmpBuffer based on the stack pointer of the exception handler.
38// The runtime then does a Longjmp on that buffer to return to the simulator.
39class SimulatorSetjmpBuffer {
40 public:
41 void Longjmp() {
42 // "This" is now the last setjmp buffer.
43 simulator_->set_last_setjmp_buffer(this);
44 longjmp(buffer_, 1);
45 }
46
47 explicit SimulatorSetjmpBuffer(Simulator* sim) {
48 simulator_ = sim;
49 link_ = sim->last_setjmp_buffer();
50 sim->set_last_setjmp_buffer(this);
51 sp_ = static_cast<uword>(sim->get_register(SP));
52 }
53
54 ~SimulatorSetjmpBuffer() {
55 ASSERT(simulator_->last_setjmp_buffer() == this);
56 simulator_->set_last_setjmp_buffer(link_);
57 }
58
59 SimulatorSetjmpBuffer* link() { return link_; }
60
61 uword sp() { return sp_; }
62
63 private:
64 uword sp_;
65 Simulator* simulator_;
66 SimulatorSetjmpBuffer* link_;
67 jmp_buf buffer_;
68
69 friend class Simulator;
70};
71
72// When the generated code calls an external reference we need to catch that in
73// the simulator. The external reference will be a function compiled for the
74// host architecture. We need to call that function instead of trying to
75// execute it with the simulator. We do that by redirecting the external
76// reference to a svc (supervisor call) instruction that is handled by
77// the simulator. We write the original destination of the jump just at a known
78// offset from the svc instruction so the simulator knows what to call.
79class Redirection {
80 public:
81 uword address_of_ecall_instruction() {
82 return reinterpret_cast<uword>(&ecall_instruction_);
83 }
84
85 uword external_function() const { return external_function_; }
86
87 Simulator::CallKind call_kind() const { return call_kind_; }
88
89 int argument_count() const { return argument_count_; }
90
91 static Redirection* Get(uword external_function,
92 Simulator::CallKind call_kind,
93 int argument_count) {
94 MutexLocker ml(mutex_);
95
96 Redirection* old_head = list_.load(std::memory_order_relaxed);
97 for (Redirection* current = old_head; current != nullptr;
98 current = current->next_) {
99 if (current->external_function_ == external_function) return current;
100 }
101
102 Redirection* redirection =
103 new Redirection(external_function, call_kind, argument_count);
104 redirection->next_ = old_head;
105
106 // Use a memory fence to ensure all pending writes are written at the time
107 // of updating the list head, so the profiling thread always has a valid
108 // list to look at.
109 list_.store(redirection, std::memory_order_release);
110
111 return redirection;
112 }
113
114 static Redirection* FromECallInstruction(uintx_t ecall_instruction) {
115 char* addr_of_ecall = reinterpret_cast<char*>(ecall_instruction);
116 char* addr_of_redirection =
117 addr_of_ecall - OFFSET_OF(Redirection, ecall_instruction_);
118 return reinterpret_cast<Redirection*>(addr_of_redirection);
119 }
120
121 // Please note that this function is called by the signal handler of the
122 // profiling thread. It can therefore run at any point in time and is not
123 // allowed to hold any locks - which is precisely the reason why the list is
124 // prepend-only and a memory fence is used when writing the list head [list_]!
125 static uword FunctionForRedirect(uword address_of_ecall) {
126 for (Redirection* current = list_.load(std::memory_order_acquire);
127 current != nullptr; current = current->next_) {
128 if (current->address_of_ecall_instruction() == address_of_ecall) {
129 return current->external_function_;
130 }
131 }
132 return 0;
133 }
134
135 private:
136 Redirection(uword external_function,
137 Simulator::CallKind call_kind,
138 int argument_count)
139 : external_function_(external_function),
140 call_kind_(call_kind),
141 argument_count_(argument_count),
142 ecall_instruction_(Instr::kSimulatorRedirectInstruction),
143 next_(nullptr) {}
144
145 uword external_function_;
146 Simulator::CallKind call_kind_;
147 int argument_count_;
148 uint32_t ecall_instruction_;
149 Redirection* next_;
150 static std::atomic<Redirection*> list_;
151 static Mutex* mutex_;
152};
153
154std::atomic<Redirection*> Redirection::list_ = {nullptr};
155Mutex* Redirection::mutex_ = new Mutex();
156
158 CallKind call_kind,
159 int argument_count) {
160 Redirection* redirection =
162 return redirection->address_of_ecall_instruction();
163}
164
166 return Redirection::FunctionForRedirect(redirect);
167}
168
169// Get the active Simulator for the current isolate.
170Simulator* Simulator::Current() {
171 Isolate* isolate = Isolate::Current();
172 Simulator* simulator = isolate->simulator();
173 if (simulator == nullptr) {
174 NoSafepointScope no_safepoint;
175 simulator = new Simulator();
176 isolate->set_simulator(simulator);
177 }
178 return simulator;
179}
180
181void Simulator::Init() {}
182
184 : pc_(0),
185 instret_(0),
186 reserved_address_(0),
187 reserved_value_(0),
188 fcsr_(0),
189 random_(),
190 last_setjmp_buffer_(nullptr) {
191 // Setup simulator support first. Some of this information is needed to
192 // setup the architecture state.
193 // We allocate the stack here, the size is computed as the sum of
194 // the size specified by the user and the buffer space needed for
195 // handling stack overflow exceptions. To be safe in potential
196 // stack underflows we also add some underflow buffer space.
197 stack_ =
198 new char[(OSThread::GetSpecifiedStackSize() +
199 OSThread::kStackSizeBufferMax + kSimulatorStackUnderflowSize)];
200 // Low address.
201 stack_limit_ = reinterpret_cast<uword>(stack_);
202 // Limit for StackOverflowError.
203 overflow_stack_limit_ = stack_limit_ + OSThread::kStackSizeBufferMax;
204 // High address.
205 stack_base_ = overflow_stack_limit_ + OSThread::GetSpecifiedStackSize();
206
207 // Setup architecture state.
208 xregs_[0] = 0;
209 for (intptr_t i = 1; i < kNumberOfCpuRegisters; i++) {
210 xregs_[i] = random_.NextUInt64();
211 }
212 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
213 // TODO(riscv): This generates values that are very wide when printed,
214 // making it hard to read register state. Maybe generate random values in
215 // the unit interval instead?
216 // fregs_[i] = bit_cast<double>(random_.NextUInt64());
217 fregs_[i] = bit_cast<double>(kNaNBox);
218 }
219
220 // The sp is initialized to point to the bottom (high address) of the
221 // allocated stack area.
222 set_xreg(SP, stack_base());
223 // The lr and pc are initialized to a known bad value that will cause an
224 // access violation if the simulator ever tries to execute it.
225 set_xreg(RA, kBadLR);
226 pc_ = kBadLR;
227}
228
229Simulator::~Simulator() {
230 delete[] stack_;
231 Isolate* isolate = Isolate::Current();
232 if (isolate != nullptr) {
233 isolate->set_simulator(nullptr);
234 }
235}
236
237void Simulator::PrepareCall(PreservedRegisters* preserved) {
238#if defined(DEBUG)
239 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
240 preserved->xregs[i] = xregs_[i];
241 if ((kAbiVolatileCpuRegs & (1 << i)) != 0) {
242 xregs_[i] = random_.NextUInt64();
243 }
244 }
245 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
246 preserved->fregs[i] = fregs_[i];
247 if ((kAbiVolatileFpuRegs & (1 << i)) != 0) {
248 // TODO(riscv): This generates values that are very wide when printed,
249 // making it hard to read register state. Maybe generate random values in
250 // the unit interval instead?
251 // fregs_[i] = bit_cast<double>(random_.NextUInt64());
252 fregs_[i] = bit_cast<double>(kNaNBox);
253 }
254 }
255#endif
256}
257
258void Simulator::ClobberVolatileRegisters() {
259#if defined(DEBUG)
260 reserved_address_ = reserved_value_ = 0; // Clear atomic reservation.
261 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
262 if ((kAbiVolatileCpuRegs & (1 << i)) != 0) {
263 xregs_[i] = random_.NextUInt64();
264 }
265 }
266 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
267 if ((kAbiVolatileFpuRegs & (1 << i)) != 0) {
268 // TODO(riscv): This generates values that are very wide when printed,
269 // making it hard to read register state. Maybe generate random values in
270 // the unit interval instead?
271 // fregs_[i] = bit_cast<double>(random_.NextUInt64());
272 fregs_[i] = bit_cast<double>(kNaNBox);
273 }
274 }
275#endif
276}
277
278void Simulator::SavePreservedRegisters(PreservedRegisters* preserved) {
279#if defined(DEBUG)
280 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
281 preserved->xregs[i] = xregs_[i];
282 }
283 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
284 preserved->fregs[i] = fregs_[i];
285 }
286#endif
287}
288
289void Simulator::CheckPreservedRegisters(PreservedRegisters* preserved) {
290#if defined(DEBUG)
291 if (preserved->xregs[SP] != xregs_[SP]) {
292 PrintRegisters();
293 PrintStack();
294 FATAL("Stack unbalanced");
295 }
296 const intptr_t kPreservedAtCall =
297 kAbiPreservedCpuRegs | (1 << TP) | (1 << GP) | (1 << SP) | (1 << FP);
298 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
299 if ((kPreservedAtCall & (1 << i)) != 0) {
300 if (preserved->xregs[i] != xregs_[i]) {
301 FATAL("%s was not preserved\n", cpu_reg_names[i]);
302 }
303 }
304 }
305 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
306 if ((kAbiVolatileFpuRegs & (1 << i)) == 0) {
307 if (bit_cast<uint64_t>(preserved->fregs[i]) !=
308 bit_cast<uint64_t>(fregs_[i])) {
309 FATAL("%s was not preserved\n", fpu_reg_names[i]);
310 }
311 }
312 }
313#endif
314}
315
316void Simulator::RunCall(intx_t entry, PreservedRegisters* preserved) {
317 pc_ = entry;
318 set_xreg(RA, kEndSimulatingPC);
319 Execute();
320 CheckPreservedRegisters(preserved);
321}
322
323int64_t Simulator::Call(intx_t entry,
324 intx_t parameter0,
325 intx_t parameter1,
326 intx_t parameter2,
327 intx_t parameter3,
328 bool fp_return,
329 bool fp_args) {
330 // Save the SP register before the call so we can restore it.
331 const intptr_t sp_before_call = get_xreg(SP);
332
333 // Setup parameters.
334 if (fp_args) {
335 set_fregd(FA0, parameter0);
336 set_fregd(FA1, parameter1);
337 set_fregd(FA2, parameter2);
338 set_fregd(FA3, parameter3);
339 } else {
340 set_xreg(A0, parameter0);
341 set_xreg(A1, parameter1);
342 set_xreg(A2, parameter2);
343 set_xreg(A3, parameter3);
344 }
345
346 // Make sure the activation frames are properly aligned.
347 intptr_t stack_pointer = sp_before_call;
348 if (OS::ActivationFrameAlignment() > 1) {
349 stack_pointer =
350 Utils::RoundDown(stack_pointer, OS::ActivationFrameAlignment());
351 }
352 set_xreg(SP, stack_pointer);
353
354 // Prepare to execute the code at entry.
355 pc_ = entry;
356 // Put down marker for end of simulation. The simulator will stop simulation
357 // when the PC reaches this value. By saving the "end simulation" value into
358 // the LR the simulation stops when returning to this call point.
359 set_xreg(RA, kEndSimulatingPC);
360
361 // Remember the values of callee-saved registers, and set them up with a
362 // known value so that we are able to check that they are preserved
363 // properly across Dart execution.
364 PreservedRegisters preserved;
365 SavePreservedRegisters(&preserved);
366
367 // Start the simulation.
368 Execute();
369
370 // Check that the callee-saved registers have been preserved,
371 // and restore them with the original value.
372 CheckPreservedRegisters(&preserved);
373
374 // Restore the SP register and return R0.
375 set_xreg(SP, sp_before_call);
376 int64_t return_value;
377 if (fp_return) {
378 return_value = get_fregd(FA0);
379 } else {
380 return_value = get_xreg(A0);
381 }
382 return return_value;
383}
384
385void Simulator::Execute() {
386 if (LIKELY(FLAG_trace_sim_after == ULLONG_MAX)) {
387 ExecuteNoTrace();
388 } else {
389 ExecuteTrace();
390 }
391}
392
393void Simulator::ExecuteNoTrace() {
394 while (pc_ != kEndSimulatingPC) {
395 uint16_t parcel = *reinterpret_cast<uint16_t*>(pc_);
396 if (IsCInstruction(parcel)) {
397 CInstr instr(parcel);
398 Interpret(instr);
399 } else {
400 Instr instr(LoadUnaligned(reinterpret_cast<uint32_t*>(pc_)));
401 Interpret(instr);
402 }
403 instret_++;
404 }
405}
406
407void Simulator::ExecuteTrace() {
408 while (pc_ != kEndSimulatingPC) {
409 uint16_t parcel = *reinterpret_cast<uint16_t*>(pc_);
410 if (IsCInstruction(parcel)) {
411 CInstr instr(parcel);
412 if (IsTracingExecution()) {
413 Disassembler::Disassemble(pc_, pc_ + instr.length());
414 }
415 Interpret(instr);
416 } else {
417 Instr instr(LoadUnaligned(reinterpret_cast<uint32_t*>(pc_)));
418 if (IsTracingExecution()) {
419 Disassembler::Disassemble(pc_, pc_ + instr.length());
420 }
421 Interpret(instr);
422 }
423 instret_++;
424 }
425}
426
427bool Simulator::IsTracingExecution() const {
428 return instret_ > FLAG_trace_sim_after;
429}
430
431void Simulator::JumpToFrame(uword pc, uword sp, uword fp, Thread* thread) {
432 // Walk over all setjmp buffers (simulated --> C++ transitions)
433 // and try to find the setjmp associated with the simulated stack pointer.
434 SimulatorSetjmpBuffer* buf = last_setjmp_buffer();
435 while (buf->link() != nullptr && buf->link()->sp() <= sp) {
436 buf = buf->link();
437 }
438 ASSERT(buf != nullptr);
439
440 // The C++ caller has not cleaned up the stack memory of C++ frames.
441 // Prepare for unwinding frames by destroying all the stack resources
442 // in the previous C++ frames.
443 StackResource::Unwind(thread);
444
445 // Keep the following code in sync with `StubCode::JumpToFrameStub()`.
446
447 // Unwind the C++ stack and continue simulation in the target frame.
448 pc_ = pc;
449 set_xreg(SP, static_cast<uintx_t>(sp));
450 set_xreg(FP, static_cast<uintx_t>(fp));
451 set_xreg(THR, reinterpret_cast<uintx_t>(thread));
452#if defined(DART_TARGET_OS_FUCHSIA) || defined(DART_TARGET_OS_ANDROID)
453 set_xreg(GP, thread->saved_shadow_call_stack());
454#endif
455 // Set the tag.
456 thread->set_vm_tag(VMTag::kDartTagId);
457 // Clear top exit frame.
458 thread->set_top_exit_frame_info(0);
459 // Restore pool pointer.
460 uintx_t code =
461 *reinterpret_cast<uintx_t*>(fp + kPcMarkerSlotFromFp * kWordSize);
462 uintx_t pp = FLAG_precompiled_mode
463 ? static_cast<uintx_t>(thread->global_object_pool())
464 : *reinterpret_cast<uintx_t*>(
465 code + Code::object_pool_offset() - kHeapObjectTag);
466 pp -= kHeapObjectTag; // In the PP register, the pool pointer is untagged.
467 set_xreg(CODE_REG, code);
468 set_xreg(PP, pp);
469 set_xreg(WRITE_BARRIER_STATE,
470 thread->write_barrier_mask() ^
471 ((UntaggedObject::kGenerationalBarrierMask << 1) - 1));
472 set_xreg(NULL_REG, static_cast<uintx_t>(Object::null()));
473 if (FLAG_precompiled_mode) {
474 set_xreg(DISPATCH_TABLE_REG,
475 reinterpret_cast<uintx_t>(thread->dispatch_table_array()));
476 }
477
478 buf->Longjmp();
479}
480
481void Simulator::PrintRegisters() {
482 ASSERT(static_cast<intptr_t>(kNumberOfCpuRegisters) ==
483 static_cast<intptr_t>(kNumberOfFpuRegisters));
484 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
485#if XLEN == 32
486 OS::Print("%4s: %8x %11d", cpu_reg_names[i], xregs_[i], xregs_[i]);
487#elif XLEN == 64
488 OS::Print("%4s: %16" Px64 " %20" Pd64, cpu_reg_names[i], xregs_[i],
489 xregs_[i]);
490#endif
491 OS::Print(" %4s: %lf\n", fpu_reg_names[i], fregs_[i]);
492 }
493#if XLEN == 32
494 OS::Print(" pc: %8x\n", pc_);
495#elif XLEN == 64
496 OS::Print(" pc: %16" Px64 "\n", pc_);
497#endif
498}
499
500void Simulator::PrintStack() {
501 StackFrameIterator frames(get_register(FP), get_register(SP), get_pc(),
502 ValidationPolicy::kDontValidateFrames,
503 Thread::Current(),
504 StackFrameIterator::kNoCrossThreadIteration);
505 StackFrame* frame = frames.NextFrame();
506 while (frame != nullptr) {
507 OS::PrintErr("%s\n", frame->ToCString());
508 frame = frames.NextFrame();
509 }
510}
511
512DART_FORCE_INLINE
513void Simulator::Interpret(Instr instr) {
514 switch (instr.opcode()) {
515 case LUI:
516 InterpretLUI(instr);
517 break;
518 case AUIPC:
519 InterpretAUIPC(instr);
520 break;
521 case JAL:
522 InterpretJAL(instr);
523 break;
524 case JALR:
525 InterpretJALR(instr);
526 break;
527 case BRANCH:
528 InterpretBRANCH(instr);
529 break;
530 case LOAD:
531 InterpretLOAD(instr);
532 break;
533 case STORE:
534 InterpretSTORE(instr);
535 break;
536 case OPIMM:
537 InterpretOPIMM(instr);
538 break;
539 case OPIMM32:
540 InterpretOPIMM32(instr);
541 break;
542 case OP:
543 InterpretOP(instr);
544 break;
545 case OP32:
546 InterpretOP32(instr);
547 break;
548 case MISCMEM:
549 InterpretMISCMEM(instr);
550 break;
551 case SYSTEM:
552 InterpretSYSTEM(instr);
553 break;
554 case AMO:
555 InterpretAMO(instr);
556 break;
557 case LOADFP:
558 InterpretLOADFP(instr);
559 break;
560 case STOREFP:
561 InterpretSTOREFP(instr);
562 break;
563 case FMADD:
564 InterpretFMADD(instr);
565 break;
566 case FMSUB:
567 InterpretFMSUB(instr);
568 break;
569 case FNMADD:
570 InterpretFNMADD(instr);
571 break;
572 case FNMSUB:
573 InterpretFNMSUB(instr);
574 break;
575 case OPFP:
576 InterpretOPFP(instr);
577 break;
578 default:
579 IllegalInstruction(instr);
580 }
581}
582
583DART_FORCE_INLINE
584void Simulator::Interpret(CInstr instr) {
585 switch (instr.opcode()) {
586 case C_LWSP: {
587 uintx_t addr = get_xreg(SP) + instr.spload4_imm();
588 set_xreg(instr.rd(), MemoryRead<int32_t>(addr, SP));
589 break;
590 }
591#if XLEN == 32
592 case C_FLWSP: {
593 uintx_t addr = get_xreg(SP) + instr.spload4_imm();
594 set_fregs(instr.frd(), MemoryRead<float>(addr, SP));
595 break;
596 }
597#else
598 case C_LDSP: {
599 uintx_t addr = get_xreg(SP) + instr.spload8_imm();
600 set_xreg(instr.rd(), MemoryRead<int64_t>(addr, SP));
601 break;
602 }
603#endif
604 case C_FLDSP: {
605 uintx_t addr = get_xreg(SP) + instr.spload8_imm();
606 set_fregd(instr.frd(), MemoryRead<double>(addr, SP));
607 break;
608 }
609 case C_SWSP: {
610 uintx_t addr = get_xreg(SP) + instr.spstore4_imm();
611 MemoryWrite<uint32_t>(addr, get_xreg(instr.rs2()), SP);
612 break;
613 }
614#if XLEN == 32
615 case C_FSWSP: {
616 uintx_t addr = get_xreg(SP) + instr.spstore4_imm();
617 MemoryWrite<float>(addr, get_fregs(instr.frs2()), SP);
618 break;
619 }
620#else
621 case C_SDSP: {
622 uintx_t addr = get_xreg(SP) + instr.spstore8_imm();
623 MemoryWrite<uint64_t>(addr, get_xreg(instr.rs2()), SP);
624 break;
625 }
626#endif
627 case C_FSDSP: {
628 uintx_t addr = get_xreg(SP) + instr.spstore8_imm();
629 MemoryWrite<double>(addr, get_fregd(instr.frs2()), SP);
630 break;
631 }
632 case C_LW: {
633 uintx_t addr = get_xreg(instr.rs1p()) + instr.mem4_imm();
634 set_xreg(instr.rdp(), MemoryRead<int32_t>(addr, instr.rs1p()));
635 break;
636 }
637#if XLEN == 32
638 case C_FLW: {
639 uintx_t addr = get_xreg(instr.rs1p()) + instr.mem4_imm();
640 set_fregs(instr.frdp(), MemoryRead<float>(addr, instr.rs1p()));
641 break;
642 }
643#else
644 case C_LD: {
645 uintx_t addr = get_xreg(instr.rs1p()) + instr.mem8_imm();
646 set_xreg(instr.rdp(), MemoryRead<int64_t>(addr, instr.rs1p()));
647 break;
648 }
649#endif
650 case C_FLD: {
651 uintx_t addr = get_xreg(instr.rs1p()) + instr.mem8_imm();
652 set_fregd(instr.frdp(), MemoryRead<double>(addr, instr.rs1p()));
653 break;
654 }
655 case C_SW: {
656 uintx_t addr = get_xreg(instr.rs1p()) + instr.mem4_imm();
657 MemoryWrite<uint32_t>(addr, get_xreg(instr.rs2p()), instr.rs1p());
658 break;
659 }
660#if XLEN == 32
661 case C_FSW: {
662 uintx_t addr = get_xreg(instr.rs1p()) + instr.mem4_imm();
663 MemoryWrite<float>(addr, get_fregs(instr.frs2p()), instr.rs1p());
664 break;
665 }
666#else
667 case C_SD: {
668 uintx_t addr = get_xreg(instr.rs1p()) + instr.mem8_imm();
669 MemoryWrite<uint64_t>(addr, get_xreg(instr.rs2p()), instr.rs1p());
670 break;
671 }
672#endif
673 case C_FSD: {
674 uintx_t addr = get_xreg(instr.rs1p()) + instr.mem8_imm();
675 MemoryWrite<double>(addr, get_fregd(instr.frs2p()), instr.rs1p());
676 break;
677 }
678 case C_J: {
679 pc_ += sign_extend((int32_t)instr.j_imm());
680 return;
681 }
682#if XLEN == 32
683 case C_JAL: {
684 set_xreg(RA, pc_ + instr.length());
685 pc_ += sign_extend((int32_t)instr.j_imm());
686 return;
687 }
688#endif
689 case C_JR: {
690 if ((instr.encoding() & (C_JALR ^ C_JR)) != 0) {
691 if ((instr.rs1() == ZR) && (instr.rs2() == ZR)) {
692 InterpretEBREAK(instr);
693 } else if (instr.rs2() == ZR) {
694 // JALR
695 uintx_t target = get_xreg(instr.rs1());
696 set_xreg(RA, pc_ + instr.length());
697 pc_ = target;
698 return;
699 } else {
700 // ADD
701 set_xreg(instr.rd(), get_xreg(instr.rs1()) + get_xreg(instr.rs2()));
702 }
703 } else {
704 if ((instr.rd() != ZR) && (instr.rs2() != ZR)) {
705 // MV
706 set_xreg(instr.rd(), get_xreg(instr.rs2()));
707 } else if (instr.rs2() != ZR) {
708 IllegalInstruction(instr);
709 } else {
710 // JR
711 pc_ = get_xreg(instr.rs1());
712 return;
713 }
714 }
715 break;
716 }
717 case C_BEQZ:
718 if (get_xreg(instr.rs1p()) == 0) {
719 pc_ += instr.b_imm();
720 return;
721 }
722 break;
723 case C_BNEZ:
724 if (get_xreg(instr.rs1p()) != 0) {
725 pc_ += instr.b_imm();
726 return;
727 }
728 break;
729 case C_LI:
730 if (instr.rd() == ZR) {
731 IllegalInstruction(instr);
732 } else {
733 set_xreg(instr.rd(), sign_extend(instr.i_imm()));
734 }
735 break;
736 case C_LUI:
737 if (instr.rd() == SP) {
738 if (instr.i16_imm() == 0) {
739 IllegalInstruction(instr);
740 } else {
741 set_xreg(instr.rd(),
742 get_xreg(instr.rs1()) + sign_extend(instr.i16_imm()));
743 }
744 } else if ((instr.rd() == ZR) || (instr.u_imm() == 0)) {
745 IllegalInstruction(instr);
746 } else {
747 set_xreg(instr.rd(), sign_extend(instr.u_imm()));
748 }
749 break;
750 case C_ADDI:
751 set_xreg(instr.rd(), get_xreg(instr.rs1()) + instr.i_imm());
752 break;
753#if XLEN >= 64
754 case C_ADDIW: {
755 uint32_t a = get_xreg(instr.rs1());
756 uint32_t b = instr.i_imm();
757 set_xreg(instr.rd(), sign_extend(a + b));
758 break;
759 }
760#endif // XLEN >= 64
761 case C_ADDI4SPN:
762 if (instr.i4spn_imm() == 0) {
763 IllegalInstruction(instr);
764 } else {
765 set_xreg(instr.rdp(), get_xreg(SP) + instr.i4spn_imm());
766 }
767 break;
768 case C_SLLI:
769 if (instr.i_imm() == 0) {
770 IllegalInstruction(instr);
771 } else {
772 set_xreg(instr.rd(), get_xreg(instr.rs1())
773 << (instr.i_imm() & (XLEN - 1)));
774 }
775 break;
776 case C_MISCALU:
777 // Note MISCALU has a different notion of rsd′ than other instructions,
778 // so use rs1′ instead.
779 switch (instr.encoding() & C_MISCALU_MASK) {
780 case C_SRLI:
781 if (instr.i_imm() == 0) {
782 IllegalInstruction(instr);
783 } else {
784 set_xreg(instr.rs1p(),
785 get_xreg(instr.rs1p()) >> (instr.i_imm() & (XLEN - 1)));
786 }
787 break;
788 case C_SRAI:
789 if (instr.i_imm() == 0) {
790 IllegalInstruction(instr);
791 } else {
792 set_xreg(instr.rs1p(),
793 static_cast<intx_t>(get_xreg(instr.rs1p())) >>
794 (instr.i_imm() & (XLEN - 1)));
795 }
796 break;
797 case C_ANDI:
798 set_xreg(instr.rs1p(), get_xreg(instr.rs1p()) & instr.i_imm());
799 break;
800 case C_RR:
801 switch (instr.encoding() & C_RR_MASK) {
802 case C_AND:
803 set_xreg(instr.rs1p(),
804 get_xreg(instr.rs1p()) & get_xreg(instr.rs2p()));
805 break;
806 case C_OR:
807 set_xreg(instr.rs1p(),
808 get_xreg(instr.rs1p()) | get_xreg(instr.rs2p()));
809 break;
810 case C_XOR:
811 set_xreg(instr.rs1p(),
812 get_xreg(instr.rs1p()) ^ get_xreg(instr.rs2p()));
813 break;
814 case C_SUB:
815 set_xreg(instr.rs1p(),
816 get_xreg(instr.rs1p()) - get_xreg(instr.rs2p()));
817 break;
818 case C_ADDW: {
819 uint32_t a = get_xreg(instr.rs1p());
820 uint32_t b = get_xreg(instr.rs2p());
821 set_xreg(instr.rs1p(), sign_extend(a + b));
822 break;
823 }
824 case C_SUBW: {
825 uint32_t a = get_xreg(instr.rs1p());
826 uint32_t b = get_xreg(instr.rs2p());
827 set_xreg(instr.rs1p(), sign_extend(a - b));
828 break;
829 }
830 default:
831 IllegalInstruction(instr);
832 }
833 break;
834 default:
835 IllegalInstruction(instr);
836 }
837 break;
838 default:
839 IllegalInstruction(instr);
840 }
841 pc_ += instr.length();
842}
843
844DART_FORCE_INLINE
845void Simulator::InterpretLUI(Instr instr) {
846 set_xreg(instr.rd(), sign_extend(instr.utype_imm()));
847 pc_ += instr.length();
848}
849
850DART_FORCE_INLINE
851void Simulator::InterpretAUIPC(Instr instr) {
852 set_xreg(instr.rd(), pc_ + sign_extend(instr.utype_imm()));
853 pc_ += instr.length();
854}
855
856DART_FORCE_INLINE
857void Simulator::InterpretJAL(Instr instr) {
858 set_xreg(instr.rd(), pc_ + instr.length());
859 pc_ += sign_extend(instr.jtype_imm());
860}
861
862DART_FORCE_INLINE
863void Simulator::InterpretJALR(Instr instr) {
864 uintx_t base = get_xreg(instr.rs1());
865 uintx_t offset = static_cast<uintx_t>(instr.itype_imm());
866 set_xreg(instr.rd(), pc_ + instr.length());
867 pc_ = base + offset;
868}
869
870DART_FORCE_INLINE
871void Simulator::InterpretBRANCH(Instr instr) {
872 switch (instr.funct3()) {
873 case BEQ:
874 if (get_xreg(instr.rs1()) == get_xreg(instr.rs2())) {
875 pc_ += instr.btype_imm();
876 } else {
877 pc_ += instr.length();
878 }
879 break;
880 case BNE:
881 if (get_xreg(instr.rs1()) != get_xreg(instr.rs2())) {
882 pc_ += instr.btype_imm();
883 } else {
884 pc_ += instr.length();
885 }
886 break;
887 case BLT:
888 if (static_cast<intx_t>(get_xreg(instr.rs1())) <
889 static_cast<intx_t>(get_xreg(instr.rs2()))) {
890 pc_ += instr.btype_imm();
891 } else {
892 pc_ += instr.length();
893 }
894 break;
895 case BGE:
896 if (static_cast<intx_t>(get_xreg(instr.rs1())) >=
897 static_cast<intx_t>(get_xreg(instr.rs2()))) {
898 pc_ += instr.btype_imm();
899 } else {
900 pc_ += instr.length();
901 }
902 break;
903 case BLTU:
904 if (static_cast<uintx_t>(get_xreg(instr.rs1())) <
905 static_cast<uintx_t>(get_xreg(instr.rs2()))) {
906 pc_ += instr.btype_imm();
907 } else {
908 pc_ += instr.length();
909 }
910 break;
911 case BGEU:
912 if (static_cast<uintx_t>(get_xreg(instr.rs1())) >=
913 static_cast<uintx_t>(get_xreg(instr.rs2()))) {
914 pc_ += instr.btype_imm();
915 } else {
916 pc_ += instr.length();
917 }
918 break;
919 default:
920 IllegalInstruction(instr);
921 }
922}
923
924DART_FORCE_INLINE
925void Simulator::InterpretLOAD(Instr instr) {
926 uintx_t addr = get_xreg(instr.rs1()) + instr.itype_imm();
927 switch (instr.funct3()) {
928 case LB:
929 set_xreg(instr.rd(), MemoryRead<int8_t>(addr, instr.rs1()));
930 break;
931 case LH:
932 set_xreg(instr.rd(), MemoryRead<int16_t>(addr, instr.rs1()));
933 break;
934 case LW:
935 set_xreg(instr.rd(), MemoryRead<int32_t>(addr, instr.rs1()));
936 break;
937 case LBU:
938 set_xreg(instr.rd(), MemoryRead<uint8_t>(addr, instr.rs1()));
939 break;
940 case LHU:
941 set_xreg(instr.rd(), MemoryRead<uint16_t>(addr, instr.rs1()));
942 break;
943#if XLEN >= 64
944 case LWU:
945 set_xreg(instr.rd(), MemoryRead<uint32_t>(addr, instr.rs1()));
946 break;
947 case LD:
948 set_xreg(instr.rd(), MemoryRead<int64_t>(addr, instr.rs1()));
949 break;
950#endif // XLEN >= 64
951 default:
952 IllegalInstruction(instr);
953 }
954 pc_ += instr.length();
955}
956
957DART_FORCE_INLINE
958void Simulator::InterpretLOADFP(Instr instr) {
959 uintx_t addr = get_xreg(instr.rs1()) + instr.itype_imm();
960 switch (instr.funct3()) {
961 case S:
962 set_fregs(instr.frd(), MemoryRead<float>(addr, instr.rs1()));
963 break;
964 case D:
965 set_fregd(instr.frd(), MemoryRead<double>(addr, instr.rs1()));
966 break;
967 default:
968 IllegalInstruction(instr);
969 }
970 pc_ += instr.length();
971}
972
973DART_FORCE_INLINE
974void Simulator::InterpretSTORE(Instr instr) {
975 uintx_t addr = get_xreg(instr.rs1()) + instr.stype_imm();
976 switch (instr.funct3()) {
977 case SB:
978 MemoryWrite<uint8_t>(addr, get_xreg(instr.rs2()), instr.rs1());
979 break;
980 case SH:
981 MemoryWrite<uint16_t>(addr, get_xreg(instr.rs2()), instr.rs1());
982 break;
983 case SW:
984 MemoryWrite<uint32_t>(addr, get_xreg(instr.rs2()), instr.rs1());
985 break;
986#if XLEN >= 64
987 case SD:
988 MemoryWrite<uint64_t>(addr, get_xreg(instr.rs2()), instr.rs1());
989 break;
990#endif // XLEN >= 64
991 default:
992 IllegalInstruction(instr);
993 }
994 pc_ += instr.length();
995}
996
997DART_FORCE_INLINE
998void Simulator::InterpretSTOREFP(Instr instr) {
999 uintx_t addr = get_xreg(instr.rs1()) + instr.stype_imm();
1000 switch (instr.funct3()) {
1001 case S:
1002 MemoryWrite<float>(addr, get_fregs(instr.frs2()), instr.rs1());
1003 break;
1004 case D:
1005 MemoryWrite<double>(addr, get_fregd(instr.frs2()), instr.rs1());
1006 break;
1007 default:
1008 IllegalInstruction(instr);
1009 }
1010 pc_ += instr.length();
1011}
1012
1013static uintx_t clz(uintx_t a) {
1014 for (int bit = XLEN - 1; bit >= 0; bit--) {
1015 if ((a & (static_cast<uintx_t>(1) << bit)) != 0) {
1016 return XLEN - bit - 1;
1017 }
1018 }
1019 return XLEN;
1020}
1021
1022static uintx_t ctz(uintx_t a) {
1023 for (int bit = 0; bit < XLEN; bit++) {
1024 if ((a & (static_cast<uintx_t>(1) << bit)) != 0) {
1025 return bit;
1026 }
1027 }
1028 return XLEN;
1029}
1030
1031static uintx_t cpop(uintx_t a) {
1032 uintx_t count = 0;
1033 for (int bit = 0; bit < XLEN; bit++) {
1034 if ((a & (static_cast<uintx_t>(1) << bit)) != 0) {
1035 count++;
1036 }
1037 }
1038 return count;
1039}
1040
1041static uintx_t clzw(uint32_t a) {
1042 for (int bit = 32 - 1; bit >= 0; bit--) {
1043 if ((a & (static_cast<uint32_t>(1) << bit)) != 0) {
1044 return 32 - bit - 1;
1045 }
1046 }
1047 return 32;
1048}
1049
1050static uintx_t ctzw(uint32_t a) {
1051 for (int bit = 0; bit < 32; bit++) {
1052 if ((a & (static_cast<uint32_t>(1) << bit)) != 0) {
1053 return bit;
1054 }
1055 }
1056 return 32;
1057}
1058
1059static uintx_t cpopw(uint32_t a) {
1060 uintx_t count = 0;
1061 for (int bit = 0; bit < 32; bit++) {
1062 if ((a & (static_cast<uint32_t>(1) << bit)) != 0) {
1063 count++;
1064 }
1065 }
1066 return count;
1067}
1068
1069static intx_t max(intx_t a, intx_t b) {
1070 return a > b ? a : b;
1071}
1072static uintx_t maxu(uintx_t a, uintx_t b) {
1073 return a > b ? a : b;
1074}
1075static intx_t min(intx_t a, intx_t b) {
1076 return a < b ? a : b;
1077}
1078static uintx_t minu(uintx_t a, uintx_t b) {
1079 return a < b ? a : b;
1080}
1081static uintx_t clmul(uintx_t a, uintx_t b) {
1082 uintx_t result = 0;
1083 for (int bit = 0; bit < XLEN; bit++) {
1084 if (((b >> bit) & 1) != 0) {
1085 result ^= a << bit;
1086 }
1087 }
1088 return result;
1089}
1090static uintx_t clmulh(uintx_t a, uintx_t b) {
1091 uintx_t result = 0;
1092 for (int bit = 1; bit < XLEN; bit++) {
1093 if (((b >> bit) & 1) != 0) {
1094 result ^= a >> (XLEN - bit);
1095 }
1096 }
1097 return result;
1098}
1099static uintx_t clmulr(uintx_t a, uintx_t b) {
1100 uintx_t result = 0;
1101 for (int bit = 0; bit < XLEN; bit++) {
1102 if (((b >> bit) & 1) != 0) {
1103 result ^= a >> (XLEN - bit - 1);
1104 }
1105 }
1106 return result;
1107}
1108static uintx_t sextb(uintx_t a) {
1109 return static_cast<intx_t>(a << (XLEN - 8)) >> (XLEN - 8);
1110}
1111static uintx_t sexth(uintx_t a) {
1112 return static_cast<intx_t>(a << (XLEN - 16)) >> (XLEN - 16);
1113}
1114static uintx_t zexth(uintx_t a) {
1115 return a << (XLEN - 16) >> (XLEN - 16);
1116}
1117static uintx_t ror(uintx_t a, uintx_t b) {
1118 uintx_t r = b & (XLEN - 1);
1119 uintx_t l = (XLEN - r) & (XLEN - 1);
1120 return (a << l) | (a >> r);
1121}
1122static uintx_t rol(uintx_t a, uintx_t b) {
1123 uintx_t l = b & (XLEN - 1);
1124 uintx_t r = (XLEN - l) & (XLEN - 1);
1125 return (a << l) | (a >> r);
1126}
1127static uintx_t rorw(uintx_t a, uintx_t b) {
1128 uint32_t r = b & (XLEN - 1);
1129 uint32_t l = (XLEN - r) & (XLEN - 1);
1130 uint32_t x = a;
1131 return sign_extend((x << l) | (x >> r));
1132}
1133static uintx_t rolw(uintx_t a, uintx_t b) {
1134 uint32_t l = b & (XLEN - 1);
1135 uint32_t r = (XLEN - l) & (XLEN - 1);
1136 uint32_t x = a;
1137 return sign_extend((x << l) | (x >> r));
1138}
1139static uintx_t orcb(uintx_t a) {
1140 uintx_t result = 0;
1141 for (int shift = 0; shift < XLEN; shift += 8) {
1142 if (((a >> shift) & 0xFF) != 0) {
1143 result |= static_cast<uintx_t>(0xFF) << shift;
1144 }
1145 }
1146 return result;
1147}
1148static uintx_t rev8(uintx_t a) {
1149 uintx_t result = 0;
1150 for (int shift = 0; shift < XLEN; shift += 8) {
1151 result <<= 8;
1152 result |= (a >> shift) & 0xFF;
1153 }
1154 return result;
1155}
1156static uintx_t bclr(uintx_t a, uintx_t b) {
1157 return a & ~(static_cast<uintx_t>(1) << (b & (XLEN - 1)));
1158}
1159static uintx_t bext(uintx_t a, uintx_t b) {
1160 return (a >> (b & (XLEN - 1))) & 1;
1161}
1162static uintx_t binv(uintx_t a, uintx_t b) {
1163 return a ^ (static_cast<uintx_t>(1) << (b & (XLEN - 1)));
1164}
1165static uintx_t bset(uintx_t a, uintx_t b) {
1166 return a | (static_cast<uintx_t>(1) << (b & (XLEN - 1)));
1167}
1168
1169DART_FORCE_INLINE
1170void Simulator::InterpretOPIMM(Instr instr) {
1171 switch (instr.funct3()) {
1172 case ADDI:
1173 set_xreg(instr.rd(), get_xreg(instr.rs1()) + instr.itype_imm());
1174 break;
1175 case SLTI: {
1176 set_xreg(instr.rd(), static_cast<intx_t>(get_xreg(instr.rs1())) <
1177 static_cast<intx_t>(instr.itype_imm())
1178 ? 1
1179 : 0);
1180 break;
1181 }
1182 case SLTIU:
1183 set_xreg(instr.rd(), static_cast<uintx_t>(get_xreg(instr.rs1())) <
1184 static_cast<uintx_t>(instr.itype_imm())
1185 ? 1
1186 : 0);
1187 break;
1188 case XORI:
1189 set_xreg(instr.rd(), get_xreg(instr.rs1()) ^ instr.itype_imm());
1190 break;
1191 case ORI:
1192 set_xreg(instr.rd(), get_xreg(instr.rs1()) | instr.itype_imm());
1193 break;
1194 case ANDI:
1195 set_xreg(instr.rd(), get_xreg(instr.rs1()) & instr.itype_imm());
1196 break;
1197 case SLLI:
1198 if (instr.funct7() == COUNT) {
1199 if (instr.shamt() == 0b00000) {
1200 set_xreg(instr.rd(), clz(get_xreg(instr.rs1())));
1201 } else if (instr.shamt() == 0b00001) {
1202 set_xreg(instr.rd(), ctz(get_xreg(instr.rs1())));
1203 } else if (instr.shamt() == 0b00010) {
1204 set_xreg(instr.rd(), cpop(get_xreg(instr.rs1())));
1205 } else if (instr.shamt() == 0b00100) {
1206 set_xreg(instr.rd(), sextb(get_xreg(instr.rs1())));
1207 } else if (instr.shamt() == 0b00101) {
1208 set_xreg(instr.rd(), sexth(get_xreg(instr.rs1())));
1209 } else {
1210 IllegalInstruction(instr);
1211 }
1212 } else if ((instr.funct7() & 0b1111110) == BCLRBEXT) {
1213 set_xreg(instr.rd(), bclr(get_xreg(instr.rs1()), instr.shamt()));
1214 } else if ((instr.funct7() & 0b1111110) == BINV) {
1215 set_xreg(instr.rd(), binv(get_xreg(instr.rs1()), instr.shamt()));
1216 } else if ((instr.funct7() & 0b1111110) == BSET) {
1217 set_xreg(instr.rd(), bset(get_xreg(instr.rs1()), instr.shamt()));
1218 } else {
1219 set_xreg(instr.rd(), get_xreg(instr.rs1()) << instr.shamt());
1220 }
1221 break;
1222 case SRI:
1223 if ((instr.funct7() & 0b1111110) == SRA) {
1224 set_xreg(instr.rd(),
1225 static_cast<intx_t>(get_xreg(instr.rs1())) >> instr.shamt());
1226 } else if ((instr.funct7() & 0b1111110) == ROTATE) {
1227 set_xreg(instr.rd(), ror(get_xreg(instr.rs1()), instr.shamt()));
1228 } else if (instr.funct7() == 0b0010100) {
1229 set_xreg(instr.rd(), orcb(get_xreg(instr.rs1())));
1230#if XLEN == 32
1231 } else if (instr.funct7() == 0b0110100) {
1232#else
1233 } else if (instr.funct7() == 0b0110101) {
1234#endif
1235 set_xreg(instr.rd(), rev8(get_xreg(instr.rs1())));
1236 } else if ((instr.funct7() & 0b1111110) == BCLRBEXT) {
1237 set_xreg(instr.rd(), bext(get_xreg(instr.rs1()), instr.shamt()));
1238 } else {
1239 set_xreg(instr.rd(),
1240 static_cast<uintx_t>(get_xreg(instr.rs1())) >> instr.shamt());
1241 }
1242 break;
1243 default:
1244 IllegalInstruction(instr);
1245 }
1246 pc_ += instr.length();
1247}
1248
1249DART_FORCE_INLINE
1250void Simulator::InterpretOPIMM32(Instr instr) {
1251 switch (instr.funct3()) {
1252 case ADDI: {
1253 uint32_t a = get_xreg(instr.rs1());
1254 uint32_t b = instr.itype_imm();
1255 set_xreg(instr.rd(), sign_extend(a + b));
1256 break;
1257 }
1258 case SLLI: {
1259 if (instr.funct7() == SLLIUW) {
1260 uintx_t a = static_cast<uint32_t>(get_xreg(instr.rs1()));
1261 uintx_t b = instr.shamt();
1262 set_xreg(instr.rd(), a << b);
1263 } else if (instr.funct7() == COUNT) {
1264 if (instr.shamt() == 0b00000) {
1265 set_xreg(instr.rd(), clzw(get_xreg(instr.rs1())));
1266 } else if (instr.shamt() == 0b00001) {
1267 set_xreg(instr.rd(), ctzw(get_xreg(instr.rs1())));
1268 } else if (instr.shamt() == 0b00010) {
1269 set_xreg(instr.rd(), cpopw(get_xreg(instr.rs1())));
1270 } else {
1271 IllegalInstruction(instr);
1272 }
1273 } else {
1274 uint32_t a = get_xreg(instr.rs1());
1275 uint32_t b = instr.shamt();
1276 set_xreg(instr.rd(), sign_extend(a << b));
1277 }
1278 break;
1279 }
1280 case SRI:
1281 if (instr.funct7() == SRA) {
1282 int32_t a = get_xreg(instr.rs1());
1283 int32_t b = instr.shamt();
1284 set_xreg(instr.rd(), sign_extend(a >> b));
1285 } else if (instr.funct7() == ROTATE) {
1286 set_xreg(instr.rd(), rorw(get_xreg(instr.rs1()), instr.shamt()));
1287 } else {
1288 uint32_t a = get_xreg(instr.rs1());
1289 uint32_t b = instr.shamt();
1290 set_xreg(instr.rd(), sign_extend(a >> b));
1291 }
1292 break;
1293 default:
1294 IllegalInstruction(instr);
1295 }
1296 pc_ += instr.length();
1297}
1298
1299DART_FORCE_INLINE
1300void Simulator::InterpretOP(Instr instr) {
1301 switch (instr.funct7()) {
1302 case 0:
1303 InterpretOP_0(instr);
1304 break;
1305 case SUB:
1306 InterpretOP_SUB(instr);
1307 break;
1308 case MULDIV:
1309 InterpretOP_MULDIV(instr);
1310 break;
1311 case SHADD:
1312 InterpretOP_SHADD(instr);
1313 break;
1314 case MINMAXCLMUL:
1315 InterpretOP_MINMAXCLMUL(instr);
1316 break;
1317 case ROTATE:
1318 InterpretOP_ROTATE(instr);
1319 break;
1320 case BCLRBEXT:
1321 InterpretOP_BCLRBEXT(instr);
1322 break;
1323 case BINV:
1324 set_xreg(instr.rd(), binv(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1325 pc_ += instr.length();
1326 break;
1327 case BSET:
1328 set_xreg(instr.rd(), bset(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1329 pc_ += instr.length();
1330 break;
1331#if XLEN == 32
1332 case 0b0000100:
1333 set_xreg(instr.rd(), zexth(get_xreg(instr.rs1())));
1334 pc_ += instr.length();
1335 break;
1336#endif
1337 default:
1338 IllegalInstruction(instr);
1339 }
1340}
1341
1342DART_FORCE_INLINE
1343void Simulator::InterpretOP_0(Instr instr) {
1344 switch (instr.funct3()) {
1345 case ADD:
1346 set_xreg(instr.rd(), get_xreg(instr.rs1()) + get_xreg(instr.rs2()));
1347 break;
1348 case SLL: {
1349 uintx_t shamt = get_xreg(instr.rs2()) & (XLEN - 1);
1350 set_xreg(instr.rd(), get_xreg(instr.rs1()) << shamt);
1351 break;
1352 }
1353 case SLT:
1354 set_xreg(instr.rd(), static_cast<intx_t>(get_xreg(instr.rs1())) <
1355 static_cast<intx_t>(get_xreg(instr.rs2()))
1356 ? 1
1357 : 0);
1358 break;
1359 case SLTU:
1360 set_xreg(instr.rd(), static_cast<uintx_t>(get_xreg(instr.rs1())) <
1361 static_cast<uintx_t>(get_xreg(instr.rs2()))
1362 ? 1
1363 : 0);
1364 break;
1365 case XOR:
1366 set_xreg(instr.rd(), get_xreg(instr.rs1()) ^ get_xreg(instr.rs2()));
1367 break;
1368 case SR: {
1369 uintx_t shamt = get_xreg(instr.rs2()) & (XLEN - 1);
1370 set_xreg(instr.rd(),
1371 static_cast<uintx_t>(get_xreg(instr.rs1())) >> shamt);
1372 break;
1373 }
1374 case OR:
1375 set_xreg(instr.rd(), get_xreg(instr.rs1()) | get_xreg(instr.rs2()));
1376 break;
1377 case AND:
1378 set_xreg(instr.rd(), get_xreg(instr.rs1()) & get_xreg(instr.rs2()));
1379 break;
1380 default:
1381 IllegalInstruction(instr);
1382 }
1383 pc_ += instr.length();
1384}
1385
1386static intx_t mul(intx_t a, intx_t b) {
1387 return static_cast<uintx_t>(a) * static_cast<uintx_t>(b);
1388}
1389
1390static intx_t mulh(intx_t a, intx_t b) {
1391 const uintx_t kLoMask = (static_cast<uintx_t>(1) << (XLEN / 2)) - 1;
1392 const uintx_t kHiShift = XLEN / 2;
1393
1394 uintx_t a_lo = a & kLoMask;
1395 intx_t a_hi = a >> kHiShift;
1396 uintx_t b_lo = b & kLoMask;
1397 intx_t b_hi = b >> kHiShift;
1398
1399 uintx_t x = a_lo * b_lo;
1400 intx_t y = a_hi * b_lo;
1401 intx_t z = a_lo * b_hi;
1402 intx_t w = a_hi * b_hi;
1403
1404 intx_t r0 = (x >> kHiShift) + y;
1405 intx_t r1 = (r0 & kLoMask) + z;
1406 return w + (r0 >> kHiShift) + (r1 >> kHiShift);
1407}
1408
1409static uintx_t mulhu(uintx_t a, uintx_t b) {
1410 const uintx_t kLoMask = (static_cast<uintx_t>(1) << (XLEN / 2)) - 1;
1411 const uintx_t kHiShift = XLEN / 2;
1412
1413 uintx_t a_lo = a & kLoMask;
1414 uintx_t a_hi = a >> kHiShift;
1415 uintx_t b_lo = b & kLoMask;
1416 uintx_t b_hi = b >> kHiShift;
1417
1418 uintx_t x = a_lo * b_lo;
1419 uintx_t y = a_hi * b_lo;
1420 uintx_t z = a_lo * b_hi;
1421 uintx_t w = a_hi * b_hi;
1422
1423 uintx_t r0 = (x >> kHiShift) + y;
1424 uintx_t r1 = (r0 & kLoMask) + z;
1425 return w + (r0 >> kHiShift) + (r1 >> kHiShift);
1426}
1427
1428static uintx_t mulhsu(intx_t a, uintx_t b) {
1429 const uintx_t kLoMask = (static_cast<uintx_t>(1) << (XLEN / 2)) - 1;
1430 const uintx_t kHiShift = XLEN / 2;
1431
1432 uintx_t a_lo = a & kLoMask;
1433 intx_t a_hi = a >> kHiShift;
1434 uintx_t b_lo = b & kLoMask;
1435 uintx_t b_hi = b >> kHiShift;
1436
1437 uintx_t x = a_lo * b_lo;
1438 intx_t y = a_hi * b_lo;
1439 uintx_t z = a_lo * b_hi;
1440 intx_t w = a_hi * b_hi;
1441
1442 intx_t r0 = (x >> kHiShift) + y;
1443 uintx_t r1 = (r0 & kLoMask) + z;
1444 return w + (r0 >> kHiShift) + (r1 >> kHiShift);
1445}
1446
1447static intx_t div(intx_t a, intx_t b) {
1448 if (b == 0) {
1449 return -1;
1450 } else if (b == -1 && a == kMinIntX) {
1451 return kMinIntX;
1452 } else {
1453 return a / b;
1454 }
1455}
1456
1457static uintx_t divu(uintx_t a, uintx_t b) {
1458 if (b == 0) {
1459 return kMaxUIntX;
1460 } else {
1461 return a / b;
1462 }
1463}
1464
1465static intx_t rem(intx_t a, intx_t b) {
1466 if (b == 0) {
1467 return a;
1468 } else if (b == -1 && a == kMinIntX) {
1469 return 0;
1470 } else {
1471 return a % b;
1472 }
1473}
1474
1475static uintx_t remu(uintx_t a, uintx_t b) {
1476 if (b == 0) {
1477 return a;
1478 } else {
1479 return a % b;
1480 }
1481}
1482
1483#if XLEN >= 64
1484static int32_t mulw(int32_t a, int32_t b) {
1485 return a * b;
1486}
1487
1488static int32_t divw(int32_t a, int32_t b) {
1489 if (b == 0) {
1490 return -1;
1491 } else if (b == -1 && a == kMinInt32) {
1492 return kMinInt32;
1493 } else {
1494 return a / b;
1495 }
1496}
1497
1498static uint32_t divuw(uint32_t a, uint32_t b) {
1499 if (b == 0) {
1500 return kMaxUint32;
1501 } else {
1502 return a / b;
1503 }
1504}
1505
1506static int32_t remw(int32_t a, int32_t b) {
1507 if (b == 0) {
1508 return a;
1509 } else if (b == -1 && a == kMinInt32) {
1510 return 0;
1511 } else {
1512 return a % b;
1513 }
1514}
1515
1516static uint32_t remuw(uint32_t a, uint32_t b) {
1517 if (b == 0) {
1518 return a;
1519 } else {
1520 return a % b;
1521 }
1522}
1523#endif // XLEN >= 64
1524
1525DART_FORCE_INLINE
1526void Simulator::InterpretOP_MULDIV(Instr instr) {
1527 switch (instr.funct3()) {
1528 case MUL:
1529 set_xreg(instr.rd(), mul(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1530 break;
1531 case MULH:
1532 set_xreg(instr.rd(), mulh(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1533 break;
1534 case MULHSU:
1535 set_xreg(instr.rd(),
1536 mulhsu(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1537 break;
1538 case MULHU:
1539 set_xreg(instr.rd(), mulhu(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1540 break;
1541 case DIV:
1542 set_xreg(instr.rd(), div(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1543 break;
1544 case DIVU:
1545 set_xreg(instr.rd(), divu(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1546 break;
1547 case REM:
1548 set_xreg(instr.rd(), rem(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1549 break;
1550 case REMU:
1551 set_xreg(instr.rd(), remu(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1552 break;
1553 default:
1554 IllegalInstruction(instr);
1555 }
1556 pc_ += instr.length();
1557}
1558
1559DART_FORCE_INLINE
1560void Simulator::InterpretOP_SUB(Instr instr) {
1561 switch (instr.funct3()) {
1562 case ADD:
1563 set_xreg(instr.rd(), get_xreg(instr.rs1()) - get_xreg(instr.rs2()));
1564 break;
1565 case SR: {
1566 uintx_t shamt = get_xreg(instr.rs2()) & (XLEN - 1);
1567 set_xreg(instr.rd(), static_cast<intx_t>(get_xreg(instr.rs1())) >> shamt);
1568 break;
1569 }
1570 case AND:
1571 set_xreg(instr.rd(), get_xreg(instr.rs1()) & ~get_xreg(instr.rs2()));
1572 break;
1573 case OR:
1574 set_xreg(instr.rd(), get_xreg(instr.rs1()) | ~get_xreg(instr.rs2()));
1575 break;
1576 case XOR:
1577 set_xreg(instr.rd(), get_xreg(instr.rs1()) ^ ~get_xreg(instr.rs2()));
1578 break;
1579 default:
1580 IllegalInstruction(instr);
1581 }
1582 pc_ += instr.length();
1583}
1584
1585DART_FORCE_INLINE
1586void Simulator::InterpretOP_SHADD(Instr instr) {
1587 switch (instr.funct3()) {
1588 case SH1ADD:
1589 set_xreg(instr.rd(),
1590 (get_xreg(instr.rs1()) << 1) + get_xreg(instr.rs2()));
1591 break;
1592 case SH2ADD:
1593 set_xreg(instr.rd(),
1594 (get_xreg(instr.rs1()) << 2) + get_xreg(instr.rs2()));
1595 break;
1596 case SH3ADD:
1597 set_xreg(instr.rd(),
1598 (get_xreg(instr.rs1()) << 3) + get_xreg(instr.rs2()));
1599 break;
1600 default:
1601 IllegalInstruction(instr);
1602 }
1603 pc_ += instr.length();
1604}
1605
1606DART_FORCE_INLINE
1607void Simulator::InterpretOP_MINMAXCLMUL(Instr instr) {
1608 switch (instr.funct3()) {
1609 case MAX:
1610 set_xreg(instr.rd(), max(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1611 break;
1612 case MAXU:
1613 set_xreg(instr.rd(), maxu(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1614 break;
1615 case MIN:
1616 set_xreg(instr.rd(), min(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1617 break;
1618 case MINU:
1619 set_xreg(instr.rd(), minu(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1620 break;
1621 case CLMUL:
1622 set_xreg(instr.rd(), clmul(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1623 break;
1624 case CLMULH:
1625 set_xreg(instr.rd(),
1626 clmulh(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1627 break;
1628 case CLMULR:
1629 set_xreg(instr.rd(),
1630 clmulr(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1631 break;
1632 default:
1633 IllegalInstruction(instr);
1634 }
1635 pc_ += instr.length();
1636}
1637
1638DART_FORCE_INLINE
1639void Simulator::InterpretOP_ROTATE(Instr instr) {
1640 switch (instr.funct3()) {
1641 case ROR:
1642 set_xreg(instr.rd(), ror(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1643 break;
1644 case ROL:
1645 set_xreg(instr.rd(), rol(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1646 break;
1647 default:
1648 IllegalInstruction(instr);
1649 }
1650 pc_ += instr.length();
1651}
1652
1653DART_FORCE_INLINE
1654void Simulator::InterpretOP_BCLRBEXT(Instr instr) {
1655 switch (instr.funct3()) {
1656 case BCLR:
1657 set_xreg(instr.rd(), bclr(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1658 break;
1659 case BEXT:
1660 set_xreg(instr.rd(), bext(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1661 break;
1662 default:
1663 IllegalInstruction(instr);
1664 }
1665 pc_ += instr.length();
1666}
1667
1668DART_FORCE_INLINE
1669void Simulator::InterpretOP32(Instr instr) {
1670 switch (instr.funct7()) {
1671#if XLEN >= 64
1672 case 0:
1673 InterpretOP32_0(instr);
1674 break;
1675 case SUB:
1676 InterpretOP32_SUB(instr);
1677 break;
1678 case MULDIV:
1679 InterpretOP32_MULDIV(instr);
1680 break;
1681 case SHADD:
1682 InterpretOP32_SHADD(instr);
1683 break;
1684 case ADDUW:
1685 InterpretOP32_ADDUW(instr);
1686 break;
1687 case ROTATE:
1688 InterpretOP32_ROTATE(instr);
1689 break;
1690#endif // XLEN >= 64
1691 default:
1692 IllegalInstruction(instr);
1693 }
1694}
1695
1696DART_FORCE_INLINE
1697void Simulator::InterpretOP32_0(Instr instr) {
1698 switch (instr.funct3()) {
1699#if XLEN >= 64
1700 case ADD: {
1701 uint32_t a = get_xreg(instr.rs1());
1702 uint32_t b = get_xreg(instr.rs2());
1703 set_xreg(instr.rd(), sign_extend(a + b));
1704 break;
1705 }
1706 case SLL: {
1707 uint32_t a = get_xreg(instr.rs1());
1708 uint32_t b = get_xreg(instr.rs2()) & (32 - 1);
1709 set_xreg(instr.rd(), sign_extend(a << b));
1710 break;
1711 }
1712 case SR: {
1713 uint32_t b = get_xreg(instr.rs2()) & (32 - 1);
1714 uint32_t a = get_xreg(instr.rs1());
1715 set_xreg(instr.rd(), sign_extend(a >> b));
1716 break;
1717 }
1718#endif // XLEN >= 64
1719 default:
1720 IllegalInstruction(instr);
1721 }
1722 pc_ += instr.length();
1723}
1724
1725DART_FORCE_INLINE
1726void Simulator::InterpretOP32_SUB(Instr instr) {
1727 switch (instr.funct3()) {
1728#if XLEN >= 64
1729 case ADD: {
1730 uint32_t a = get_xreg(instr.rs1());
1731 uint32_t b = get_xreg(instr.rs2());
1732 set_xreg(instr.rd(), sign_extend(a - b));
1733 break;
1734 }
1735 case SR: {
1736 uint32_t b = get_xreg(instr.rs2()) & (32 - 1);
1737 int32_t a = get_xreg(instr.rs1());
1738 set_xreg(instr.rd(), sign_extend(a >> b));
1739 break;
1740 }
1741#endif // XLEN >= 64
1742 default:
1743 IllegalInstruction(instr);
1744 }
1745 pc_ += instr.length();
1746}
1747
1748DART_FORCE_INLINE
1749void Simulator::InterpretOP32_MULDIV(Instr instr) {
1750 switch (instr.funct3()) {
1751#if XLEN >= 64
1752 case MULW:
1753 set_xreg(instr.rd(),
1754 sign_extend(mulw(get_xreg(instr.rs1()), get_xreg(instr.rs2()))));
1755 break;
1756 case DIVW:
1757 set_xreg(instr.rd(),
1758 sign_extend(divw(get_xreg(instr.rs1()), get_xreg(instr.rs2()))));
1759 break;
1760 case DIVUW:
1761 set_xreg(instr.rd(), sign_extend(divuw(get_xreg(instr.rs1()),
1762 get_xreg(instr.rs2()))));
1763 break;
1764 case REMW:
1765 set_xreg(instr.rd(),
1766 sign_extend(remw(get_xreg(instr.rs1()), get_xreg(instr.rs2()))));
1767 break;
1768 case REMUW:
1769 set_xreg(instr.rd(), sign_extend(remuw(get_xreg(instr.rs1()),
1770 get_xreg(instr.rs2()))));
1771 break;
1772#endif // XLEN >= 64
1773 default:
1774 IllegalInstruction(instr);
1775 }
1776 pc_ += instr.length();
1777}
1778
1779DART_FORCE_INLINE
1780void Simulator::InterpretOP32_SHADD(Instr instr) {
1781 switch (instr.funct3()) {
1782 case SH1ADD: {
1783 uintx_t a = static_cast<uint32_t>(get_xreg(instr.rs1()));
1784 uintx_t b = get_xreg(instr.rs2());
1785 set_xreg(instr.rd(), (a << 1) + b);
1786 break;
1787 }
1788 case SH2ADD: {
1789 uintx_t a = static_cast<uint32_t>(get_xreg(instr.rs1()));
1790 uintx_t b = get_xreg(instr.rs2());
1791 set_xreg(instr.rd(), (a << 2) + b);
1792 break;
1793 }
1794 case SH3ADD: {
1795 uintx_t a = static_cast<uint32_t>(get_xreg(instr.rs1()));
1796 uintx_t b = get_xreg(instr.rs2());
1797 set_xreg(instr.rd(), (a << 3) + b);
1798 break;
1799 }
1800 default:
1801 IllegalInstruction(instr);
1802 }
1803 pc_ += instr.length();
1804}
1805
1806DART_FORCE_INLINE
1807void Simulator::InterpretOP32_ADDUW(Instr instr) {
1808 switch (instr.funct3()) {
1809#if XLEN >= 64
1810 case F3_0: {
1811 uintx_t a = static_cast<uint32_t>(get_xreg(instr.rs1()));
1812 uintx_t b = get_xreg(instr.rs2());
1813 set_xreg(instr.rd(), a + b);
1814 break;
1815 }
1816 case ZEXT:
1817 set_xreg(instr.rd(), zexth(get_xreg(instr.rs1())));
1818 break;
1819#endif // XLEN >= 64
1820 default:
1821 IllegalInstruction(instr);
1822 }
1823 pc_ += instr.length();
1824}
1825
1826DART_FORCE_INLINE
1827void Simulator::InterpretOP32_ROTATE(Instr instr) {
1828 switch (instr.funct3()) {
1829 case ROR:
1830 set_xreg(instr.rd(), rorw(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1831 break;
1832 case ROL:
1833 set_xreg(instr.rd(), rolw(get_xreg(instr.rs1()), get_xreg(instr.rs2())));
1834 break;
1835 default:
1836 IllegalInstruction(instr);
1837 }
1838 pc_ += instr.length();
1839}
1840
1841void Simulator::InterpretMISCMEM(Instr instr) {
1842 switch (instr.funct3()) {
1843 case FENCE:
1844 std::atomic_thread_fence(std::memory_order_acq_rel);
1845 break;
1846 case FENCEI:
1847 // Nothing to do: simulated instructions are data on the host.
1848 break;
1849 default:
1850 IllegalInstruction(instr);
1851 }
1852 pc_ += instr.length();
1853}
1854
1855void Simulator::InterpretSYSTEM(Instr instr) {
1856 switch (instr.funct3()) {
1857 case 0:
1858 switch (instr.funct12()) {
1859 case ECALL:
1860 InterpretECALL(instr);
1861 return;
1862 case EBREAK:
1863 InterpretEBREAK(instr);
1864 return;
1865 default:
1866 IllegalInstruction(instr);
1867 }
1868 break;
1869 case CSRRW: {
1870 if (instr.rd() == ZR) {
1871 // No read effect.
1872 CSRWrite(instr.csr(), get_xreg(instr.rs1()));
1873 } else {
1874 intx_t result = CSRRead(instr.csr());
1875 CSRWrite(instr.csr(), get_xreg(instr.rs1()));
1876 set_xreg(instr.rd(), result);
1877 }
1878 break;
1879 }
1880 case CSRRS: {
1881 intx_t result = CSRRead(instr.csr());
1882 if (instr.rs1() == ZR) {
1883 // No write effect.
1884 } else {
1885 CSRSet(instr.csr(), get_xreg(instr.rs1()));
1886 }
1887 set_xreg(instr.rd(), result);
1888 break;
1889 }
1890 case CSRRC: {
1891 intx_t result = CSRRead(instr.csr());
1892 if (instr.rs1() == ZR) {
1893 // No write effect.
1894 } else {
1895 CSRClear(instr.csr(), get_xreg(instr.rs1()));
1896 }
1897 set_xreg(instr.rd(), result);
1898 break;
1899 }
1900 case CSRRWI: {
1901 if (instr.rd() == ZR) {
1902 // No read effect.
1903 CSRWrite(instr.csr(), instr.zimm());
1904 } else {
1905 intx_t result = CSRRead(instr.csr());
1906 CSRWrite(instr.csr(), instr.zimm());
1907 set_xreg(instr.rd(), result);
1908 }
1909 break;
1910 }
1911 case CSRRSI: {
1912 intx_t result = CSRRead(instr.csr());
1913 if (instr.zimm() == 0) {
1914 // No write effect.
1915 } else {
1916 CSRSet(instr.csr(), instr.zimm());
1917 }
1918 set_xreg(instr.rd(), result);
1919 break;
1920 }
1921 case CSRRCI: {
1922 intx_t result = CSRRead(instr.csr());
1923 if (instr.zimm() == 0) {
1924 // No write effect.
1925 } else {
1926 CSRClear(instr.csr(), instr.zimm());
1927 }
1928 set_xreg(instr.rd(), result);
1929 break;
1930 }
1931 default:
1932 IllegalInstruction(instr);
1933 }
1934 pc_ += instr.length();
1935}
1936
1937// Calls into the Dart runtime are based on this interface.
1938typedef void (*SimulatorRuntimeCall)(NativeArguments arguments);
1939
1940// Calls to leaf Dart runtime functions are based on this interface.
1941typedef intx_t (*SimulatorLeafRuntimeCall)(intx_t r0,
1942 intx_t r1,
1943 intx_t r2,
1944 intx_t r3,
1945 intx_t r4,
1946 intx_t r5,
1947 intx_t r6,
1948 intx_t r7);
1949
1950// [target] has several different signatures that differ from
1951// SimulatorLeafRuntimeCall. We can call them all from here only because in
1952// X64's calling conventions a function can be called with extra arguments
1953// and the callee will see the first arguments and won't unbalance the stack.
1954NO_SANITIZE_UNDEFINED("function")
1955static intx_t InvokeLeafRuntime(SimulatorLeafRuntimeCall target,
1956 intx_t r0,
1957 intx_t r1,
1958 intx_t r2,
1959 intx_t r3,
1960 intx_t r4,
1961 intx_t r5,
1962 intx_t r6,
1963 intx_t r7) {
1964 return target(r0, r1, r2, r3, r4, r5, r6, r7);
1965}
1966
1967// Calls to leaf float Dart runtime functions are based on this interface.
1968typedef double (*SimulatorLeafFloatRuntimeCall)(double d0,
1969 double d1,
1970 double d2,
1971 double d3,
1972 double d4,
1973 double d5,
1974 double d6,
1975 double d7);
1976
1977// [target] has several different signatures that differ from
1978// SimulatorFloatLeafRuntimeCall. We can call them all from here only because in
1979// X64's calling conventions a function can be called with extra arguments
1980// and the callee will see the first arguments and won't unbalance the stack.
1981NO_SANITIZE_UNDEFINED("function")
1982static double InvokeFloatLeafRuntime(SimulatorLeafFloatRuntimeCall target,
1983 double d0,
1984 double d1,
1985 double d2,
1986 double d3,
1987 double d4,
1988 double d5,
1989 double d6,
1990 double d7) {
1991 return target(d0, d1, d2, d3, d4, d5, d6, d7);
1992}
1993
1994// Calls to native Dart functions are based on this interface.
1995typedef void (*SimulatorNativeCallWrapper)(Dart_NativeArguments arguments,
1997
1998void Simulator::InterpretECALL(Instr instr) {
1999 if (instr.rs1() != ZR) {
2000 // Fake instruction generated by Assembler::SimulatorPrintObject.
2001 if (true || IsTracingExecution()) {
2002 Object& obj = Object::Handle(
2003 static_cast<ObjectPtr>(static_cast<uword>(get_xreg(instr.rs1()))));
2004 THR_Print("%" Px ": %s = %s\n", static_cast<uword>(pc_),
2005 cpu_reg_names[instr.rs1()], obj.ToCString());
2006 FLAG_trace_sim_after = 1;
2007 }
2008 pc_ += instr.length();
2009 return;
2010 }
2011
2012 // The C ABI stack alignment is 16 for both 32 and 64 bit.
2013 if (!Utils::IsAligned(get_xreg(SP), 16)) {
2014 PrintRegisters();
2015 PrintStack();
2016 FATAL("Stack misaligned at call to C function");
2017 }
2018
2019 SimulatorSetjmpBuffer buffer(this);
2020 if (!setjmp(buffer.buffer_)) {
2021 uintx_t saved_ra = get_xreg(RA);
2022 Redirection* redirection = Redirection::FromECallInstruction(pc_);
2023 uword external = redirection->external_function();
2024 if (IsTracingExecution()) {
2025 THR_Print("Call to host function at 0x%" Pd "\n", external);
2026 }
2027
2028 if (redirection->call_kind() == kRuntimeCall) {
2029 NativeArguments* arguments =
2030 reinterpret_cast<NativeArguments*>(get_register(A0));
2031 SimulatorRuntimeCall target =
2032 reinterpret_cast<SimulatorRuntimeCall>(external);
2033 target(*arguments);
2034 ClobberVolatileRegisters();
2035 } else if (redirection->call_kind() == kLeafRuntimeCall) {
2036 ASSERT((0 <= redirection->argument_count()) &&
2037 (redirection->argument_count() <= 8));
2038 SimulatorLeafRuntimeCall target =
2039 reinterpret_cast<SimulatorLeafRuntimeCall>(external);
2040 const intx_t r0 = get_register(A0);
2041 const intx_t r1 = get_register(A1);
2042 const intx_t r2 = get_register(A2);
2043 const intx_t r3 = get_register(A3);
2044 const intx_t r4 = get_register(A4);
2045 const intx_t r5 = get_register(A5);
2046 const intx_t r6 = get_register(A6);
2047 const intx_t r7 = get_register(A7);
2048 const intx_t res =
2049 InvokeLeafRuntime(target, r0, r1, r2, r3, r4, r5, r6, r7);
2050 ClobberVolatileRegisters();
2051 set_xreg(A0, res); // Set returned result from function.
2052 } else if (redirection->call_kind() == kLeafFloatRuntimeCall) {
2053 ASSERT((0 <= redirection->argument_count()) &&
2054 (redirection->argument_count() <= 8));
2055 SimulatorLeafFloatRuntimeCall target =
2056 reinterpret_cast<SimulatorLeafFloatRuntimeCall>(external);
2057 const double d0 = get_fregd(FA0);
2058 const double d1 = get_fregd(FA1);
2059 const double d2 = get_fregd(FA2);
2060 const double d3 = get_fregd(FA3);
2061 const double d4 = get_fregd(FA4);
2062 const double d5 = get_fregd(FA5);
2063 const double d6 = get_fregd(FA6);
2064 const double d7 = get_fregd(FA7);
2065 const double res =
2066 InvokeFloatLeafRuntime(target, d0, d1, d2, d3, d4, d5, d6, d7);
2067 ClobberVolatileRegisters();
2068 set_fregd(FA0, res);
2069 } else if (redirection->call_kind() == kNativeCallWrapper) {
2070 SimulatorNativeCallWrapper wrapper =
2071 reinterpret_cast<SimulatorNativeCallWrapper>(external);
2072 Dart_NativeArguments arguments =
2073 reinterpret_cast<Dart_NativeArguments>(get_register(A0));
2075 reinterpret_cast<Dart_NativeFunction>(get_register(A1));
2076 wrapper(arguments, target);
2077 ClobberVolatileRegisters();
2078 } else {
2079 UNREACHABLE();
2080 }
2081
2082 // Return.
2083 pc_ = saved_ra;
2084 } else {
2085 // Coming via long jump from a throw. Continue to exception handler.
2086 }
2087}
2088
2089void Simulator::InterpretAMO(Instr instr) {
2090 switch (instr.funct3()) {
2091 case WIDTH8:
2092 InterpretAMO8(instr);
2093 break;
2094 case WIDTH16:
2095 InterpretAMO16(instr);
2096 break;
2097 case WIDTH32:
2098 InterpretAMO32(instr);
2099 break;
2100 case WIDTH64:
2101 InterpretAMO64(instr);
2102 break;
2103 default:
2104 IllegalInstruction(instr);
2105 }
2106}
2107
2108// Note: This implementation does not give full LR/SC semantics because it
2109// suffers from the ABA problem.
2110
2111template <typename type>
2112void Simulator::InterpretLR(Instr instr) {
2113 uintx_t addr = get_xreg(instr.rs1());
2114 if ((addr & (sizeof(type) - 1)) != 0) {
2115 FATAL("Misaligned atomic memory operation");
2116 }
2117 std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
2118 reserved_address_ = addr;
2119 reserved_value_ = atomic->load(instr.memory_order());
2120 set_xreg(instr.rd(), reserved_value_);
2121}
2122
2123template <typename type>
2124void Simulator::InterpretSC(Instr instr) {
2125 uintx_t addr = get_xreg(instr.rs1());
2126 if ((addr & (sizeof(type) - 1)) != 0) {
2127 FATAL("Misaligned atomic memory operation");
2128 }
2129 std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
2130 if (addr != reserved_address_) {
2131 set_xreg(instr.rd(), 1);
2132 return;
2133 }
2134 if ((random_.NextUInt32() % 16) == 0) { // Spurious failure.
2135 set_xreg(instr.rd(), 1);
2136 return;
2137 }
2138 type expected = reserved_value_;
2139 type desired = get_xreg(instr.rs2());
2140 bool success =
2141 atomic->compare_exchange_strong(expected, desired, instr.memory_order());
2142 set_xreg(instr.rd(), success ? 0 : 1);
2143}
2144
2145template <typename type>
2146void Simulator::InterpretAMOSWAP(Instr instr) {
2147 uintx_t addr = get_xreg(instr.rs1());
2148 if ((addr & (sizeof(type) - 1)) != 0) {
2149 FATAL("Misaligned atomic memory operation");
2150 }
2151 std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
2152 type desired = get_xreg(instr.rs2());
2153 type result = atomic->exchange(desired, instr.memory_order());
2154 set_xreg(instr.rd(), sign_extend(result));
2155}
2156
2157template <typename type>
2158void Simulator::InterpretAMOADD(Instr instr) {
2159 uintx_t addr = get_xreg(instr.rs1());
2160 if ((addr & (sizeof(type) - 1)) != 0) {
2161 FATAL("Misaligned atomic memory operation");
2162 }
2163 std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
2164 type arg = get_xreg(instr.rs2());
2165 type result = atomic->fetch_add(arg, instr.memory_order());
2166 set_xreg(instr.rd(), sign_extend(result));
2167}
2168
2169template <typename type>
2170void Simulator::InterpretAMOXOR(Instr instr) {
2171 uintx_t addr = get_xreg(instr.rs1());
2172 if ((addr & (sizeof(type) - 1)) != 0) {
2173 FATAL("Misaligned atomic memory operation");
2174 }
2175 std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
2176 type arg = get_xreg(instr.rs2());
2177 type result = atomic->fetch_xor(arg, instr.memory_order());
2178 set_xreg(instr.rd(), sign_extend(result));
2179}
2180
2181template <typename type>
2182void Simulator::InterpretAMOAND(Instr instr) {
2183 uintx_t addr = get_xreg(instr.rs1());
2184 if ((addr & (sizeof(type) - 1)) != 0) {
2185 FATAL("Misaligned atomic memory operation");
2186 }
2187 std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
2188 type arg = get_xreg(instr.rs2());
2189 type result = atomic->fetch_and(arg, instr.memory_order());
2190 set_xreg(instr.rd(), sign_extend(result));
2191}
2192
2193template <typename type>
2194void Simulator::InterpretAMOOR(Instr instr) {
2195 uintx_t addr = get_xreg(instr.rs1());
2196 if ((addr & (sizeof(type) - 1)) != 0) {
2197 FATAL("Misaligned atomic memory operation");
2198 }
2199 std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
2200 type arg = get_xreg(instr.rs2());
2201 type result = atomic->fetch_or(arg, instr.memory_order());
2202 set_xreg(instr.rd(), sign_extend(result));
2203}
2204
2205template <typename type>
2206void Simulator::InterpretAMOMIN(Instr instr) {
2207 uintx_t addr = get_xreg(instr.rs1());
2208 if ((addr & (sizeof(type) - 1)) != 0) {
2209 FATAL("Misaligned atomic memory operation");
2210 }
2211 std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
2212 type expected = atomic->load(std::memory_order_relaxed);
2213 type compare = get_xreg(instr.rs2());
2214 type desired;
2215 do {
2216 desired = expected < compare ? expected : compare;
2217 } while (
2218 !atomic->compare_exchange_weak(expected, desired, instr.memory_order()));
2219 set_xreg(instr.rd(), sign_extend(expected));
2220}
2221
2222template <typename type>
2223void Simulator::InterpretAMOMAX(Instr instr) {
2224 uintx_t addr = get_xreg(instr.rs1());
2225 if ((addr & (sizeof(type) - 1)) != 0) {
2226 FATAL("Misaligned atomic memory operation");
2227 }
2228 std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
2229 type expected = atomic->load(std::memory_order_relaxed);
2230 type compare = get_xreg(instr.rs2());
2231 type desired;
2232 do {
2233 desired = expected > compare ? expected : compare;
2234 } while (
2235 !atomic->compare_exchange_weak(expected, desired, instr.memory_order()));
2236 set_xreg(instr.rd(), sign_extend(expected));
2237}
2238
2239template <typename type>
2240void Simulator::InterpretLOADORDERED(Instr instr) {
2241 uintx_t addr = get_xreg(instr.rs1());
2242 if ((addr & (sizeof(type) - 1)) != 0) {
2243 FATAL("Misaligned atomic memory operation");
2244 }
2245 std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
2246 type value = atomic->load(instr.memory_order());
2247 set_xreg(instr.rd(), sign_extend(value));
2248}
2249
2250template <typename type>
2251void Simulator::InterpretSTOREORDERED(Instr instr) {
2252 uintx_t addr = get_xreg(instr.rs1());
2253 if ((addr & (sizeof(type) - 1)) != 0) {
2254 FATAL("Misaligned atomic memory operation");
2255 }
2256 type value = get_xreg(instr.rs2());
2257 std::atomic<type>* atomic = reinterpret_cast<std::atomic<type>*>(addr);
2258 atomic->store(value, instr.memory_order());
2259}
2260
2261void Simulator::InterpretAMO8(Instr instr) {
2262 switch (instr.funct5()) {
2263 case LOADORDERED:
2264 InterpretLOADORDERED<int8_t>(instr);
2265 break;
2266 case STOREORDERED:
2267 InterpretSTOREORDERED<int8_t>(instr);
2268 break;
2269 default:
2270 IllegalInstruction(instr);
2271 }
2272 pc_ += instr.length();
2273}
2274
2275void Simulator::InterpretAMO16(Instr instr) {
2276 switch (instr.funct5()) {
2277 case LOADORDERED:
2278 InterpretLOADORDERED<int16_t>(instr);
2279 break;
2280 case STOREORDERED:
2281 InterpretSTOREORDERED<int16_t>(instr);
2282 break;
2283 default:
2284 IllegalInstruction(instr);
2285 }
2286 pc_ += instr.length();
2287}
2288
2289void Simulator::InterpretAMO32(Instr instr) {
2290 switch (instr.funct5()) {
2291 case LR:
2292 InterpretLR<int32_t>(instr);
2293 break;
2294 case SC:
2295 InterpretSC<int32_t>(instr);
2296 break;
2297 case AMOSWAP:
2298 InterpretAMOSWAP<int32_t>(instr);
2299 break;
2300 case AMOADD:
2301 InterpretAMOADD<int32_t>(instr);
2302 break;
2303 case AMOXOR:
2304 InterpretAMOXOR<int32_t>(instr);
2305 break;
2306 case AMOAND:
2307 InterpretAMOAND<int32_t>(instr);
2308 break;
2309 case AMOOR:
2310 InterpretAMOOR<int32_t>(instr);
2311 break;
2312 case AMOMIN:
2313 InterpretAMOMIN<int32_t>(instr);
2314 break;
2315 case AMOMAX:
2316 InterpretAMOMAX<int32_t>(instr);
2317 break;
2318 case AMOMINU:
2319 InterpretAMOMIN<uint32_t>(instr);
2320 break;
2321 case AMOMAXU:
2322 InterpretAMOMAX<uint32_t>(instr);
2323 break;
2324 case LOADORDERED:
2325 InterpretLOADORDERED<int32_t>(instr);
2326 break;
2327 case STOREORDERED:
2328 InterpretSTOREORDERED<int32_t>(instr);
2329 break;
2330 default:
2331 IllegalInstruction(instr);
2332 }
2333 pc_ += instr.length();
2334}
2335
2336void Simulator::InterpretAMO64(Instr instr) {
2337 switch (instr.funct5()) {
2338#if XLEN >= 64
2339 case LR:
2340 InterpretLR<int64_t>(instr);
2341 break;
2342 case SC:
2343 InterpretSC<int64_t>(instr);
2344 break;
2345 case AMOSWAP:
2346 InterpretAMOSWAP<int64_t>(instr);
2347 break;
2348 case AMOADD:
2349 InterpretAMOADD<int64_t>(instr);
2350 break;
2351 case AMOXOR:
2352 InterpretAMOXOR<int64_t>(instr);
2353 break;
2354 case AMOAND:
2355 InterpretAMOAND<int64_t>(instr);
2356 break;
2357 case AMOOR:
2358 InterpretAMOOR<int64_t>(instr);
2359 break;
2360 case AMOMIN:
2361 InterpretAMOMIN<int64_t>(instr);
2362 break;
2363 case AMOMAX:
2364 InterpretAMOMAX<int64_t>(instr);
2365 break;
2366 case AMOMINU:
2367 InterpretAMOMIN<uint64_t>(instr);
2368 break;
2369 case AMOMAXU:
2370 InterpretAMOMAX<uint64_t>(instr);
2371 break;
2372 case LOADORDERED:
2373 InterpretLOADORDERED<int64_t>(instr);
2374 break;
2375 case STOREORDERED:
2376 InterpretSTOREORDERED<int64_t>(instr);
2377 break;
2378#endif // XLEN >= 64
2379 default:
2380 IllegalInstruction(instr);
2381 }
2382 pc_ += instr.length();
2383}
2384
2385void Simulator::InterpretFMADD(Instr instr) {
2386 switch (instr.funct2()) {
2387 case F2_S: {
2388 float rs1 = get_fregs(instr.frs1());
2389 float rs2 = get_fregs(instr.frs2());
2390 float rs3 = get_fregs(instr.frs3());
2391 set_fregs(instr.frd(), (rs1 * rs2) + rs3);
2392 break;
2393 }
2394 case F2_D: {
2395 double rs1 = get_fregd(instr.frs1());
2396 double rs2 = get_fregd(instr.frs2());
2397 double rs3 = get_fregd(instr.frs3());
2398 set_fregd(instr.frd(), (rs1 * rs2) + rs3);
2399 break;
2400 }
2401 default:
2402 IllegalInstruction(instr);
2403 }
2404 pc_ += instr.length();
2405}
2406
2407void Simulator::InterpretFMSUB(Instr instr) {
2408 switch (instr.funct2()) {
2409 case F2_S: {
2410 float rs1 = get_fregs(instr.frs1());
2411 float rs2 = get_fregs(instr.frs2());
2412 float rs3 = get_fregs(instr.frs3());
2413 set_fregs(instr.frd(), (rs1 * rs2) - rs3);
2414 break;
2415 }
2416 case F2_D: {
2417 double rs1 = get_fregd(instr.frs1());
2418 double rs2 = get_fregd(instr.frs2());
2419 double rs3 = get_fregd(instr.frs3());
2420 set_fregd(instr.frd(), (rs1 * rs2) - rs3);
2421 break;
2422 }
2423 default:
2424 IllegalInstruction(instr);
2425 }
2426 pc_ += instr.length();
2427}
2428
2429void Simulator::InterpretFNMSUB(Instr instr) {
2430 switch (instr.funct2()) {
2431 case F2_S: {
2432 float rs1 = get_fregs(instr.frs1());
2433 float rs2 = get_fregs(instr.frs2());
2434 float rs3 = get_fregs(instr.frs3());
2435 set_fregs(instr.frd(), -(rs1 * rs2) + rs3);
2436 break;
2437 }
2438 case F2_D: {
2439 double rs1 = get_fregd(instr.frs1());
2440 double rs2 = get_fregd(instr.frs2());
2441 double rs3 = get_fregd(instr.frs3());
2442 set_fregd(instr.frd(), -(rs1 * rs2) + rs3);
2443 break;
2444 }
2445 default:
2446 IllegalInstruction(instr);
2447 }
2448 pc_ += instr.length();
2449}
2450
2451void Simulator::InterpretFNMADD(Instr instr) {
2452 switch (instr.funct2()) {
2453 case F2_S: {
2454 float rs1 = get_fregs(instr.frs1());
2455 float rs2 = get_fregs(instr.frs2());
2456 float rs3 = get_fregs(instr.frs3());
2457 set_fregs(instr.frd(), -(rs1 * rs2) - rs3);
2458 break;
2459 }
2460 case F2_D: {
2461 double rs1 = get_fregd(instr.frs1());
2462 double rs2 = get_fregd(instr.frs2());
2463 double rs3 = get_fregd(instr.frs3());
2464 set_fregd(instr.frd(), -(rs1 * rs2) - rs3);
2465 break;
2466 }
2467 default:
2468 IllegalInstruction(instr);
2469 }
2470 pc_ += instr.length();
2471}
2472
2473// "For the purposes of these instructions only, the value −0.0 is considered to
2474// be less than the value +0.0. If both inputs are NaNs, the result is the
2475// canonical NaN. If only one operand is a NaN, the result is the non-NaN
2476// operand."
2477static double rv_fmin(double x, double y) {
2478 if (isnan(x) && isnan(y)) return std::numeric_limits<double>::quiet_NaN();
2479 if (isnan(x)) return y;
2480 if (isnan(y)) return x;
2481 if (x == y) return signbit(x) ? x : y;
2482 return fmin(x, y);
2483}
2484
2485static double rv_fmax(double x, double y) {
2486 if (isnan(x) && isnan(y)) return std::numeric_limits<double>::quiet_NaN();
2487 if (isnan(x)) return y;
2488 if (isnan(y)) return x;
2489 if (x == y) return signbit(x) ? y : x;
2490 return fmax(x, y);
2491}
2492
2493static float rv_fminf(float x, float y) {
2494 if (isnan(x) && isnan(y)) return std::numeric_limits<float>::quiet_NaN();
2495 if (isnan(x)) return y;
2496 if (isnan(y)) return x;
2497 if (x == y) return signbit(x) ? x : y;
2498 return fminf(x, y);
2499}
2500
2501static float rv_fmaxf(float x, float y) {
2502 if (isnan(x) && isnan(y)) return std::numeric_limits<float>::quiet_NaN();
2503 if (isnan(x)) return y;
2504 if (isnan(y)) return x;
2505 if (x == y) return signbit(x) ? y : x;
2506 return fmaxf(x, y);
2507}
2508
2509static bool is_quiet(float x) {
2510 // Warning: This is true on Intel/ARM, but not everywhere.
2511 return (bit_cast<uint32_t>(x) & (static_cast<uint32_t>(1) << 22)) != 0;
2512}
2513
2514static uintx_t fclass(float x) {
2515 ASSERT(!is_quiet(std::numeric_limits<float>::signaling_NaN()));
2516 ASSERT(is_quiet(std::numeric_limits<float>::quiet_NaN()));
2517
2518 switch (fpclassify(x)) {
2519 case FP_INFINITE:
2520 return signbit(x) ? kFClassNegInfinity : kFClassPosInfinity;
2521 case FP_NAN:
2522 return is_quiet(x) ? kFClassQuietNan : kFClassSignallingNan;
2523 case FP_ZERO:
2524 return signbit(x) ? kFClassNegZero : kFClassPosZero;
2525 case FP_SUBNORMAL:
2526 return signbit(x) ? kFClassNegSubnormal : kFClassPosSubnormal;
2527 case FP_NORMAL:
2528 return signbit(x) ? kFClassNegNormal : kFClassPosNormal;
2529 default:
2530 UNREACHABLE();
2531 return 0;
2532 }
2533}
2534
2535static bool is_quiet(double x) {
2536 // Warning: This is true on Intel/ARM, but not everywhere.
2537 return (bit_cast<uint64_t>(x) & (static_cast<uint64_t>(1) << 51)) != 0;
2538}
2539
2540static uintx_t fclass(double x) {
2541 ASSERT(!is_quiet(std::numeric_limits<double>::signaling_NaN()));
2542 ASSERT(is_quiet(std::numeric_limits<double>::quiet_NaN()));
2543
2544 switch (fpclassify(x)) {
2545 case FP_INFINITE:
2546 return signbit(x) ? kFClassNegInfinity : kFClassPosInfinity;
2547 case FP_NAN:
2548 return is_quiet(x) ? kFClassQuietNan : kFClassSignallingNan;
2549 case FP_ZERO:
2550 return signbit(x) ? kFClassNegZero : kFClassPosZero;
2551 case FP_SUBNORMAL:
2552 return signbit(x) ? kFClassNegSubnormal : kFClassPosSubnormal;
2553 case FP_NORMAL:
2554 return signbit(x) ? kFClassNegNormal : kFClassPosNormal;
2555 default:
2556 UNREACHABLE();
2557 return 0;
2558 }
2559}
2560
2561static float roundevenf(float x) {
2562 float rounded = roundf(x);
2563 if (fabsf(x - rounded) == 0.5f) { // Tie
2564 if (fmodf(rounded, 2) != 0) { // Not even
2565 if (rounded > 0.0f) {
2566 rounded -= 1.0f;
2567 } else {
2568 rounded += 1.0f;
2569 }
2570 ASSERT(fmodf(rounded, 2) == 0);
2571 }
2572 }
2573 return rounded;
2574}
2575
2576static double roundeven(double x) {
2577 double rounded = round(x);
2578 if (fabs(x - rounded) == 0.5f) { // Tie
2579 if (fmod(rounded, 2) != 0) { // Not even
2580 if (rounded > 0.0f) {
2581 rounded -= 1.0f;
2582 } else {
2583 rounded += 1.0f;
2584 }
2585 ASSERT(fmod(rounded, 2) == 0);
2586 }
2587 }
2588 return rounded;
2589}
2590
2591static float Round(float x, RoundingMode rounding) {
2592 switch (rounding) {
2593 case RNE: // Round to Nearest, ties to Even
2594 return roundevenf(x);
2595 case RTZ: // Round towards Zero
2596 return truncf(x);
2597 case RDN: // Round Down (toward negative infinity)
2598 return floorf(x);
2599 case RUP: // Round Up (toward positive infinity)
2600 return ceilf(x);
2601 case RMM: // Round to nearest, ties to Max Magnitude
2602 return roundf(x);
2603 case DYN: // Dynamic rounding mode
2604 UNIMPLEMENTED();
2605 default:
2606 FATAL("Invalid rounding mode");
2607 }
2608}
2609
2610static double Round(double x, RoundingMode rounding) {
2611 switch (rounding) {
2612 case RNE: // Round to Nearest, ties to Even
2613 return roundeven(x);
2614 case RTZ: // Round towards Zero
2615 return trunc(x);
2616 case RDN: // Round Down (toward negative infinity)
2617 return floor(x);
2618 case RUP: // Round Up (toward positive infinity)
2619 return ceil(x);
2620 case RMM: // Round to nearest, ties to Max Magnitude
2621 return round(x);
2622 case DYN: // Dynamic rounding mode
2623 UNIMPLEMENTED();
2624 default:
2625 FATAL("Invalid rounding mode");
2626 }
2627}
2628
2629static int32_t fcvtws(float x, RoundingMode rounding) {
2630 if (x < static_cast<float>(kMinInt32)) {
2631 return kMinInt32; // Negative infinity.
2632 }
2633 if (x < static_cast<float>(kMaxInt32)) {
2634 return static_cast<int32_t>(Round(x, rounding));
2635 }
2636 return kMaxInt32; // Positive infinity, NaN.
2637}
2638
2639static uint32_t fcvtwus(float x, RoundingMode rounding) {
2640 if (x < static_cast<float>(0)) {
2641 return 0; // Negative infinity.
2642 }
2643 if (x < static_cast<float>(kMaxUint32)) {
2644 return static_cast<uint32_t>(Round(x, rounding));
2645 }
2646 return kMaxUint32; // Positive infinity, NaN.
2647}
2648
2649#if XLEN >= 64
2650static int64_t fcvtls(float x, RoundingMode rounding) {
2651 if (x < static_cast<float>(kMinInt64)) {
2652 return kMinInt64; // Negative infinity.
2653 }
2654 if (x < static_cast<float>(kMaxInt64)) {
2655 return static_cast<int64_t>(Round(x, rounding));
2656 }
2657 return kMaxInt64; // Positive infinity, NaN.
2658}
2659
2660static uint64_t fcvtlus(float x, RoundingMode rounding) {
2661 if (x < static_cast<float>(0.0)) {
2662 return 0; // Negative infinity.
2663 }
2664 if (x < static_cast<float>(kMaxUint64)) {
2665 return static_cast<uint64_t>(Round(x, rounding));
2666 }
2667 return kMaxUint64; // Positive infinity, NaN.
2668}
2669#endif // XLEN >= 64
2670
2671static int32_t fcvtwd(double x, RoundingMode rounding) {
2672 if (x < static_cast<double>(kMinInt32)) {
2673 return kMinInt32; // Negative infinity.
2674 }
2675 if (x < static_cast<double>(kMaxInt32)) {
2676 return static_cast<int32_t>(Round(x, rounding));
2677 }
2678 return kMaxInt32; // Positive infinity, NaN.
2679}
2680
2681static uint32_t fcvtwud(double x, RoundingMode rounding) {
2682 if (x < static_cast<double>(0)) {
2683 return 0; // Negative infinity.
2684 }
2685 if (x < static_cast<double>(kMaxUint32)) {
2686 return static_cast<uint32_t>(Round(x, rounding));
2687 }
2688 return kMaxUint32; // Positive infinity, NaN.
2689}
2690
2691#if XLEN >= 64
2692static int64_t fcvtld(double x, RoundingMode rounding) {
2693 if (x < static_cast<double>(kMinInt64)) {
2694 return kMinInt64; // Negative infinity.
2695 }
2696 if (x < static_cast<double>(kMaxInt64)) {
2697 return static_cast<int64_t>(Round(x, rounding));
2698 }
2699 return kMaxInt64; // Positive infinity, NaN.
2700}
2701
2702static uint64_t fcvtlud(double x, RoundingMode rounding) {
2703 if (x < static_cast<double>(0.0)) {
2704 return 0; // Negative infinity.
2705 }
2706 if (x < static_cast<double>(kMaxUint64)) {
2707 return static_cast<uint64_t>(Round(x, rounding));
2708 }
2709 return kMaxUint64; // Positive infinity, NaN.
2710}
2711#endif // XLEN >= 64
2712
2713void Simulator::InterpretOPFP(Instr instr) {
2714 switch (instr.funct7()) {
2715 case FADDS: {
2716 float rs1 = get_fregs(instr.frs1());
2717 float rs2 = get_fregs(instr.frs2());
2718 set_fregs(instr.frd(), rs1 + rs2);
2719 break;
2720 }
2721 case FSUBS: {
2722 float rs1 = get_fregs(instr.frs1());
2723 float rs2 = get_fregs(instr.frs2());
2724 set_fregs(instr.frd(), rs1 - rs2);
2725 break;
2726 }
2727 case FMULS: {
2728 float rs1 = get_fregs(instr.frs1());
2729 float rs2 = get_fregs(instr.frs2());
2730 set_fregs(instr.frd(), rs1 * rs2);
2731 break;
2732 }
2733 case FDIVS: {
2734 float rs1 = get_fregs(instr.frs1());
2735 float rs2 = get_fregs(instr.frs2());
2736 set_fregs(instr.frd(), rs1 / rs2);
2737 break;
2738 }
2739 case FSQRTS: {
2740 float rs1 = get_fregs(instr.frs1());
2741 set_fregs(instr.frd(), sqrtf(rs1));
2742 break;
2743 }
2744 case FSGNJS: {
2745 const uint32_t kSignMask = static_cast<uint32_t>(1) << 31;
2746 uint32_t rs1 = bit_cast<uint32_t>(get_fregs(instr.frs1()));
2747 uint32_t rs2 = bit_cast<uint32_t>(get_fregs(instr.frs2()));
2748 uint32_t result;
2749 switch (instr.funct3()) {
2750 case J:
2751 result = (rs1 & ~kSignMask) | (rs2 & kSignMask);
2752 break;
2753 case JN:
2754 result = (rs1 & ~kSignMask) | (~rs2 & kSignMask);
2755 break;
2756 case JX:
2757 result = (rs1 & ~kSignMask) | ((rs1 ^ rs2) & kSignMask);
2758 break;
2759 default:
2760 IllegalInstruction(instr);
2761 }
2762 set_fregs(instr.frd(), bit_cast<float>(result));
2763 break;
2764 }
2765 case FMINMAXS: {
2766 float rs1 = get_fregs(instr.frs1());
2767 float rs2 = get_fregs(instr.frs2());
2768 switch (instr.funct3()) {
2769 case FMIN:
2770 set_fregs(instr.frd(), rv_fminf(rs1, rs2));
2771 break;
2772 case FMAX:
2773 set_fregs(instr.frd(), rv_fmaxf(rs1, rs2));
2774 break;
2775 default:
2776 IllegalInstruction(instr);
2777 }
2778 break;
2779 }
2780 case FCMPS: {
2781 float rs1 = get_fregs(instr.frs1());
2782 float rs2 = get_fregs(instr.frs2());
2783 switch (instr.funct3()) {
2784 case FEQ:
2785 set_xreg(instr.rd(), rs1 == rs2 ? 1 : 0);
2786 break;
2787 case FLT:
2788 set_xreg(instr.rd(), rs1 < rs2 ? 1 : 0);
2789 break;
2790 case FLE:
2791 set_xreg(instr.rd(), rs1 <= rs2 ? 1 : 0);
2792 break;
2793 default:
2794 IllegalInstruction(instr);
2795 }
2796 break;
2797 }
2798 case FCLASSS: // = FMVXW
2799 switch (instr.funct3()) {
2800 case 1:
2801 // fclass.s
2802 set_xreg(instr.rd(), fclass(get_fregs(instr.frs1())));
2803 break;
2804 case 0:
2805 // fmv.x.s
2806 set_xreg(instr.rd(),
2807 sign_extend(bit_cast<int32_t>(get_fregs(instr.frs1()))));
2808 break;
2809 default:
2810 IllegalInstruction(instr);
2811 }
2812 break;
2813 case FCVTintS:
2814 switch (static_cast<FcvtRs2>(instr.rs2())) {
2815 case W:
2816 set_xreg(instr.rd(), sign_extend(fcvtws(get_fregs(instr.frs1()),
2817 instr.rounding())));
2818 break;
2819 case WU:
2820 set_xreg(instr.rd(), sign_extend(fcvtwus(get_fregs(instr.frs1()),
2821 instr.rounding())));
2822 break;
2823#if XLEN >= 64
2824 case L:
2825 set_xreg(instr.rd(), sign_extend(fcvtls(get_fregs(instr.frs1()),
2826 instr.rounding())));
2827 break;
2828 case LU:
2829 set_xreg(instr.rd(), sign_extend(fcvtlus(get_fregs(instr.frs1()),
2830 instr.rounding())));
2831 break;
2832#endif // XLEN >= 64
2833 default:
2834 IllegalInstruction(instr);
2835 }
2836 break;
2837 case FCVTSint:
2838 switch (static_cast<FcvtRs2>(instr.rs2())) {
2839 case W:
2840 set_fregs(
2841 instr.frd(),
2842 static_cast<float>(static_cast<int32_t>(get_xreg(instr.rs1()))));
2843 break;
2844 case WU:
2845 set_fregs(
2846 instr.frd(),
2847 static_cast<float>(static_cast<uint32_t>(get_xreg(instr.rs1()))));
2848 break;
2849#if XLEN >= 64
2850 case L:
2851 set_fregs(
2852 instr.frd(),
2853 static_cast<float>(static_cast<int64_t>(get_xreg(instr.rs1()))));
2854 break;
2855 case LU:
2856 set_fregs(
2857 instr.frd(),
2858 static_cast<float>(static_cast<uint64_t>(get_xreg(instr.rs1()))));
2859 break;
2860#endif // XLEN >= 64
2861 default:
2862 IllegalInstruction(instr);
2863 }
2864 break;
2865 case FMVWX:
2866 set_fregs(instr.frd(),
2867 bit_cast<float>(static_cast<int32_t>(get_xreg(instr.rs1()))));
2868 break;
2869 case FADDD: {
2870 double rs1 = get_fregd(instr.frs1());
2871 double rs2 = get_fregd(instr.frs2());
2872 set_fregd(instr.frd(), rs1 + rs2);
2873 break;
2874 }
2875 case FSUBD: {
2876 double rs1 = get_fregd(instr.frs1());
2877 double rs2 = get_fregd(instr.frs2());
2878 set_fregd(instr.frd(), rs1 - rs2);
2879 break;
2880 }
2881 case FMULD: {
2882 double rs1 = get_fregd(instr.frs1());
2883 double rs2 = get_fregd(instr.frs2());
2884 set_fregd(instr.frd(), rs1 * rs2);
2885 break;
2886 }
2887 case FDIVD: {
2888 double rs1 = get_fregd(instr.frs1());
2889 double rs2 = get_fregd(instr.frs2());
2890 set_fregd(instr.frd(), rs1 / rs2);
2891 break;
2892 }
2893 case FSQRTD: {
2894 double rs1 = get_fregd(instr.frs1());
2895 set_fregd(instr.frd(), sqrt(rs1));
2896 break;
2897 }
2898 case FSGNJD: {
2899 const uint64_t kSignMask = static_cast<uint64_t>(1) << 63;
2900 uint64_t rs1 = bit_cast<uint64_t>(get_fregd(instr.frs1()));
2901 uint64_t rs2 = bit_cast<uint64_t>(get_fregd(instr.frs2()));
2902 uint64_t result;
2903 switch (instr.funct3()) {
2904 case J:
2905 result = (rs1 & ~kSignMask) | (rs2 & kSignMask);
2906 break;
2907 case JN:
2908 result = (rs1 & ~kSignMask) | (~rs2 & kSignMask);
2909 break;
2910 case JX:
2911 result = (rs1 & ~kSignMask) | ((rs1 ^ rs2) & kSignMask);
2912 break;
2913 default:
2914 IllegalInstruction(instr);
2915 }
2916 set_fregd(instr.frd(), bit_cast<double>(result));
2917 break;
2918 }
2919 case FMINMAXD: {
2920 double rs1 = get_fregd(instr.frs1());
2921 double rs2 = get_fregd(instr.frs2());
2922 switch (instr.funct3()) {
2923 case FMIN:
2924 set_fregd(instr.frd(), rv_fmin(rs1, rs2));
2925 break;
2926 case FMAX:
2927 set_fregd(instr.frd(), rv_fmax(rs1, rs2));
2928 break;
2929 default:
2930 IllegalInstruction(instr);
2931 }
2932 break;
2933 }
2934 case FCVTS: {
2935 switch (static_cast<FcvtRs2>(instr.rs2())) {
2936 case 1:
2937 set_fregs(instr.frd(), static_cast<float>(get_fregd(instr.frs1())));
2938 break;
2939 default:
2940 IllegalInstruction(instr);
2941 }
2942 break;
2943 }
2944 case FCVTD: {
2945 switch (static_cast<FcvtRs2>(instr.rs2())) {
2946 case 0:
2947 set_fregd(instr.frd(), static_cast<double>(get_fregs(instr.frs1())));
2948 break;
2949 default:
2950 IllegalInstruction(instr);
2951 }
2952 break;
2953 }
2954
2955 case FCMPD: {
2956 double rs1 = get_fregd(instr.frs1());
2957 double rs2 = get_fregd(instr.frs2());
2958 switch (instr.funct3()) {
2959 case FEQ:
2960 set_xreg(instr.rd(), rs1 == rs2 ? 1 : 0);
2961 break;
2962 case FLT:
2963 set_xreg(instr.rd(), rs1 < rs2 ? 1 : 0);
2964 break;
2965 case FLE:
2966 set_xreg(instr.rd(), rs1 <= rs2 ? 1 : 0);
2967 break;
2968 default:
2969 IllegalInstruction(instr);
2970 }
2971 break;
2972 }
2973 case FCLASSD: // = FMVXD
2974 switch (instr.funct3()) {
2975 case 1:
2976 // fclass.d
2977 set_xreg(instr.rd(), fclass(get_fregd(instr.frs1())));
2978 break;
2979#if XLEN >= 64
2980 case 0:
2981 // fmv.x.d
2982 set_xreg(instr.rd(), bit_cast<int64_t>(get_fregd(instr.frs1())));
2983 break;
2984#endif // XLEN >= 64
2985 default:
2986 IllegalInstruction(instr);
2987 }
2988 break;
2989 case FCVTintD:
2990 switch (static_cast<FcvtRs2>(instr.rs2())) {
2991 case W:
2992 set_xreg(instr.rd(), sign_extend(fcvtwd(get_fregd(instr.frs1()),
2993 instr.rounding())));
2994 break;
2995 case WU:
2996 set_xreg(instr.rd(), sign_extend(fcvtwud(get_fregd(instr.frs1()),
2997 instr.rounding())));
2998 break;
2999#if XLEN >= 64
3000 case L:
3001 set_xreg(instr.rd(), sign_extend(fcvtld(get_fregd(instr.frs1()),
3002 instr.rounding())));
3003 break;
3004 case LU:
3005 set_xreg(instr.rd(), sign_extend(fcvtlud(get_fregd(instr.frs1()),
3006 instr.rounding())));
3007 break;
3008#endif // XLEN >= 64
3009 default:
3010 IllegalInstruction(instr);
3011 }
3012 break;
3013 case FCVTDint:
3014 switch (static_cast<FcvtRs2>(instr.rs2())) {
3015 case W:
3016 set_fregd(
3017 instr.frd(),
3018 static_cast<double>(static_cast<int32_t>(get_xreg(instr.rs1()))));
3019 break;
3020 case WU:
3021 set_fregd(instr.frd(), static_cast<double>(static_cast<uint32_t>(
3022 get_xreg(instr.rs1()))));
3023 break;
3024#if XLEN >= 64
3025 case L:
3026 set_fregd(
3027 instr.frd(),
3028 static_cast<double>(static_cast<int64_t>(get_xreg(instr.rs1()))));
3029 break;
3030 case LU:
3031 set_fregd(instr.frd(), static_cast<double>(static_cast<uint64_t>(
3032 get_xreg(instr.rs1()))));
3033 break;
3034#endif // XLEN >= 64
3035 default:
3036 IllegalInstruction(instr);
3037 }
3038 break;
3039#if XLEN >= 64
3040 case FMVDX:
3041 set_fregd(instr.frd(), bit_cast<double>(get_xreg(instr.rs1())));
3042 break;
3043#endif // XLEN >= 64
3044 default:
3045 IllegalInstruction(instr);
3046 }
3047 pc_ += instr.length();
3048}
3049
3050void Simulator::InterpretEBREAK(Instr instr) {
3051 PrintRegisters();
3052 PrintStack();
3053 FATAL("Encountered EBREAK");
3054}
3055
3056void Simulator::InterpretEBREAK(CInstr instr) {
3057 PrintRegisters();
3058 PrintStack();
3059 FATAL("Encountered EBREAK");
3060}
3061
3062void Simulator::IllegalInstruction(Instr instr) {
3063 PrintRegisters();
3064 PrintStack();
3065 FATAL("Illegal instruction: 0x%08x", instr.encoding());
3066}
3067
3068void Simulator::IllegalInstruction(CInstr instr) {
3069 PrintRegisters();
3070 PrintStack();
3071 FATAL("Illegal instruction: 0x%04x", instr.encoding());
3072}
3073
3074template <typename type>
3075type Simulator::MemoryRead(uintx_t addr, Register base) {
3076#if defined(DEBUG)
3077 if ((base == SP) || (base == FP)) {
3078 if ((addr + sizeof(type) > stack_base()) || (addr < get_xreg(SP))) {
3079 PrintRegisters();
3080 PrintStack();
3081 FATAL("Out-of-bounds stack access");
3082 }
3083 } else {
3084 const uintx_t kPageSize = 16 * KB;
3085 if ((addr < kPageSize) || (addr + sizeof(type) >= ~kPageSize)) {
3086 PrintRegisters();
3087 PrintStack();
3088 FATAL("Bad memory access");
3089 }
3090 }
3091#endif
3092 return LoadUnaligned(reinterpret_cast<type*>(addr));
3093}
3094
3095template <typename type>
3096void Simulator::MemoryWrite(uintx_t addr, type value, Register base) {
3097#if defined(DEBUG)
3098 if ((base == SP) || (base == FP)) {
3099 if ((addr + sizeof(type) > stack_base()) || (addr < get_xreg(SP))) {
3100 PrintRegisters();
3101 PrintStack();
3102 FATAL("Out-of-bounds stack access");
3103 }
3104 } else {
3105 const uintx_t kPageSize = 16 * KB;
3106 if ((addr < kPageSize) || (addr + sizeof(type) >= ~kPageSize)) {
3107 PrintRegisters();
3108 PrintStack();
3109 FATAL("Bad memory access");
3110 }
3111 }
3112#endif
3113 StoreUnaligned(reinterpret_cast<type*>(addr), value);
3114}
3115
3116enum ControlStatusRegister {
3117 fflags = 0x001,
3118 frm = 0x002,
3119 fcsr = 0x003,
3120 cycle = 0xC00,
3121 time = 0xC01,
3122 instret = 0xC02,
3123#if XLEN == 32
3124 cycleh = 0xC80,
3125 timeh = 0xC81,
3126 instreth = 0xC82,
3127#endif
3128};
3129
3130intx_t Simulator::CSRRead(uint16_t csr) {
3131 switch (csr) {
3132 case fcsr:
3133 return fcsr_;
3134 case cycle:
3135 return instret_ / 2;
3136 case time:
3137 return 0;
3138 case instret:
3139 return instret_;
3140#if XLEN == 32
3141 case cycleh:
3142 return (instret_ / 2) >> 32;
3143 case timeh:
3144 return 0;
3145 case instreth:
3146 return instret_ >> 32;
3147#endif
3148 default:
3149 FATAL("Unknown CSR: %d", csr);
3150 }
3151}
3152
3153void Simulator::CSRWrite(uint16_t csr, intx_t value) {
3154 UNIMPLEMENTED();
3155}
3156
3157void Simulator::CSRSet(uint16_t csr, intx_t mask) {
3158 UNIMPLEMENTED();
3159}
3160
3161void Simulator::CSRClear(uint16_t csr, intx_t mask) {
3162 UNIMPLEMENTED();
3163}
3164
3165} // namespace dart
3166
3167#endif // !defined(USING_SIMULATOR)
3168
3169#endif // defined TARGET_ARCH_RISCV
int count
Definition: FontMgrTest.cpp:50
static void round(SkPoint *p)
static const double J
#define COUNT(T)
@ ROTATE
Definition: SkPictureFlat.h:62
SweepLineTestingPeer TP
#define W
Definition: aaa.cpp:17
#define RA(width, name,...)
#define UNREACHABLE()
Definition: assert.h:248
GLenum type
static Isolate * Current()
Definition: isolate.h:986
static uword RedirectExternalReference(uword function, CallKind call_kind, int argument_count)
static Simulator * Current()
static void Init()
static uword FunctionForRedirect(uword redirect)
#define LR
Definition: constants_arm.h:32
#define THR_Print(format,...)
Definition: log.h:20
struct _Dart_NativeArguments * Dart_NativeArguments
Definition: dart_api.h:3019
void(* Dart_NativeFunction)(Dart_NativeArguments arguments)
Definition: dart_api.h:3207
#define UNIMPLEMENTED
#define ASSERT(E)
double frame
Definition: examples.cpp:31
static bool b
struct MyStruct a[10]
#define FATAL(error)
uint8_t value
GAsyncResult * result
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
int argument_count
Definition: fuchsia.cc:52
static float max(float r, float g, float b)
Definition: hsl.cpp:49
static float min(float r, float g, float b)
Definition: hsl.cpp:48
double y
double x
const GrXPFactory * Get(SkBlendMode mode)
def link(from_root, to_root)
Definition: dart_pkg.py:44
Definition: dart_vm.cc:33
constexpr int64_t kMaxInt64
Definition: globals.h:486
constexpr int64_t kMinInt64
Definition: globals.h:485
const Register THR
@ STOREORDERED
const RegList kAbiVolatileCpuRegs
bool IsCInstruction(uint16_t parcel)
const Register NULL_REG
constexpr int32_t kMinInt32
Definition: globals.h:482
const char *const fpu_reg_names[kNumberOfFpuRegisters]
constexpr uint64_t kMaxUint64
Definition: globals.h:487
constexpr uint32_t kMaxUint32
Definition: globals.h:484
@ C_MISCALU_MASK
constexpr intptr_t KB
Definition: globals.h:528
uintptr_t uword
Definition: globals.h:501
const Register CODE_REG
const uint32_t fp
const Register DISPATCH_TABLE_REG
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
const int kNumberOfFpuRegisters
const RegList kAbiPreservedCpuRegs
intx_t sign_extend(int32_t x)
@ kFClassNegZero
@ kFClassNegSubnormal
@ kFClassPosInfinity
@ kFClassQuietNan
@ kFClassSignallingNan
@ kFClassPosZero
@ kFClassNegNormal
@ kFClassNegInfinity
@ kFClassPosSubnormal
@ kFClassPosNormal
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
static T LoadUnaligned(const T *ptr)
Definition: unaligned.h:14
static void StoreUnaligned(T *ptr, T value)
Definition: unaligned.h:22
static constexpr int kPcMarkerSlotFromFp
const char *const cpu_reg_names[kNumberOfCpuRegisters]
constexpr int32_t kMaxInt32
Definition: globals.h:483
constexpr Register WRITE_BARRIER_STATE
constexpr intptr_t kWordSize
Definition: globals.h:509
const Register PP
@ kHeapObjectTag
const RegList kAbiVolatileFpuRegs
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
SIN Vec< N, float > trunc(const Vec< N, float > &x)
Definition: SkVx.h:704
SIN Vec< N, float > sqrt(const Vec< N, float > &x)
Definition: SkVx.h:706
SIN Vec< N, float > floor(const Vec< N, float > &x)
Definition: SkVx.h:703
SIN Vec< N, float > ceil(const Vec< N, float > &x)
Definition: SkVx.h:702
static double time(int loops, Benchmark *bench, Target *target)
Definition: nanobench.cpp:394
SkScalar w
#define LIKELY(cond)
Definition: globals.h:260
#define Px
Definition: globals.h:410
#define Pd64
Definition: globals.h:416
#define Px64
Definition: globals.h:418
#define Pd
Definition: globals.h:408
int compare(const void *untyped_lhs, const void *untyped_rhs)
Definition: skdiff.h:161
SeparatedVector2 offset
constexpr SkScalar SW
Definition: strokes.cpp:37
#define NO_SANITIZE_UNDEFINED(check)
#define LH
#define OFFSET_OF(type, field)
Definition: globals.h:138