Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
simulator_arm64.cc
Go to the documentation of this file.
1// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include <setjmp.h> // NOLINT
6#include <stdlib.h>
7
8#include "vm/globals.h"
9#if defined(TARGET_ARCH_ARM64)
10
11// Only build the simulator if not compiling for real ARM hardware.
12#if defined(USING_SIMULATOR)
13
14#include "vm/simulator.h"
15
17#include "vm/constants.h"
18#include "vm/image_snapshot.h"
19#include "vm/native_arguments.h"
20#include "vm/os_thread.h"
21#include "vm/stack_frame.h"
22
23namespace dart {
24
25// constants_arm64.h does not define LR constant to prevent accidental direct
26// use of it during code generation. However using LR directly is okay in this
27// file because it is a simulator.
28constexpr Register LR = LR_DO_NOT_USE_DIRECTLY;
29
30DEFINE_FLAG(uint64_t,
31 trace_sim_after,
32 ULLONG_MAX,
33 "Trace simulator execution after instruction count reached.");
34DEFINE_FLAG(uint64_t,
35 stop_sim_at,
36 ULLONG_MAX,
37 "Instruction address or instruction count to stop simulator at.");
38
39DEFINE_FLAG(bool,
40 sim_allow_unaligned_accesses,
41 true,
42 "Allow unaligned accesses to Normal memory.");
43
44// This macro provides a platform independent use of sscanf. The reason for
45// SScanF not being implemented in a platform independent way through
46// OS in the same way as SNPrint is that the Windows C Run-Time
47// Library does not provide vsscanf.
48#define SScanF sscanf // NOLINT
49
50// SimulatorSetjmpBuffer are linked together, and the last created one
51// is referenced by the Simulator. When an exception is thrown, the exception
52// runtime looks at where to jump and finds the corresponding
53// SimulatorSetjmpBuffer based on the stack pointer of the exception handler.
54// The runtime then does a Longjmp on that buffer to return to the simulator.
55class SimulatorSetjmpBuffer {
56 public:
57 void Longjmp() {
58 // "This" is now the last setjmp buffer.
59 simulator_->set_last_setjmp_buffer(this);
60 longjmp(buffer_, 1);
61 }
62
63 explicit SimulatorSetjmpBuffer(Simulator* sim) {
64 simulator_ = sim;
65 link_ = sim->last_setjmp_buffer();
66 sim->set_last_setjmp_buffer(this);
67 sp_ = static_cast<uword>(sim->get_register(R31, R31IsSP));
68 }
69
70 ~SimulatorSetjmpBuffer() {
71 ASSERT(simulator_->last_setjmp_buffer() == this);
72 simulator_->set_last_setjmp_buffer(link_);
73 }
74
75 SimulatorSetjmpBuffer* link() { return link_; }
76
77 uword sp() { return sp_; }
78
79 private:
80 uword sp_;
81 Simulator* simulator_;
82 SimulatorSetjmpBuffer* link_;
83 jmp_buf buffer_;
84
85 friend class Simulator;
86};
87
88// The SimulatorDebugger class is used by the simulator while debugging
89// simulated ARM64 code.
90class SimulatorDebugger {
91 public:
92 explicit SimulatorDebugger(Simulator* sim);
93 ~SimulatorDebugger();
94
95 void Stop(Instr* instr, const char* message);
96 void Debug();
97 char* ReadLine(const char* prompt);
98
99 private:
100 Simulator* sim_;
101
102 bool GetValue(char* desc, uint64_t* value);
103 bool GetSValue(char* desc, uint32_t* value);
104 bool GetDValue(char* desc, uint64_t* value);
105 bool GetQValue(char* desc, simd_value_t* value);
106
107 static TokenPosition GetApproximateTokenIndex(const Code& code, uword pc);
108
109 static void PrintDartFrame(uword vm_instructions,
110 uword isolate_instructions,
111 uword pc,
112 uword fp,
113 uword sp,
114 const Function& function,
115 TokenPosition token_pos,
116 bool is_optimized,
117 bool is_inlined);
118 void PrintBacktrace();
119
120 // Set or delete a breakpoint. Returns true if successful.
121 bool SetBreakpoint(Instr* breakpc);
122 bool DeleteBreakpoint(Instr* breakpc);
123
124 // Undo and redo all breakpoints. This is needed to bracket disassembly and
125 // execution to skip past breakpoints when run from the debugger.
126 void UndoBreakpoints();
127 void RedoBreakpoints();
128};
129
130SimulatorDebugger::SimulatorDebugger(Simulator* sim) {
131 sim_ = sim;
132}
133
134SimulatorDebugger::~SimulatorDebugger() {}
135
136void SimulatorDebugger::Stop(Instr* instr, const char* message) {
137 OS::PrintErr("Simulator hit %s\n", message);
138 Debug();
139}
140
141static Register LookupCpuRegisterByName(const char* name) {
142 static const char* const kNames[] = {
143 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
144 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
145 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
146 "r24", "r25", "r26", "r27", "r28", "r29", "r30",
147
148 "ip0", "ip1", "pp", "fp", "lr", "sp", "zr",
149 };
150 static const Register kRegisters[] = {
151 R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10,
152 R11, R12, R13, R14, R15, R16, R17, R18, R19, R20, R21,
153 R22, R23, R24, R25, R26, R27, R28, R29, R30,
154
155 IP0, IP1, PP, FP, LR, R31, ZR,
156 };
157 ASSERT(ARRAY_SIZE(kNames) == ARRAY_SIZE(kRegisters));
158 for (unsigned i = 0; i < ARRAY_SIZE(kNames); i++) {
159 if (strcmp(kNames[i], name) == 0) {
160 return kRegisters[i];
161 }
162 }
163 return kNoRegister;
164}
165
166static VRegister LookupVRegisterByName(const char* name) {
167 int reg_nr = -1;
168 bool ok = SScanF(name, "v%d", &reg_nr);
169 if (ok && (0 <= reg_nr) && (reg_nr < kNumberOfVRegisters)) {
170 return static_cast<VRegister>(reg_nr);
171 }
172 return kNoVRegister;
173}
174
175bool SimulatorDebugger::GetValue(char* desc, uint64_t* value) {
176 Register reg = LookupCpuRegisterByName(desc);
177 if (reg != kNoRegister) {
178 if (reg == ZR) {
179 *value = 0;
180 return true;
181 }
182 *value = sim_->get_register(reg);
183 return true;
184 }
185 if (desc[0] == '*') {
186 uint64_t addr;
187 if (GetValue(desc + 1, &addr)) {
188 if (Simulator::IsIllegalAddress(addr)) {
189 return false;
190 }
191 *value = *(reinterpret_cast<int64_t*>(addr));
192 return true;
193 }
194 }
195 if (strcmp("pc", desc) == 0) {
196 *value = sim_->get_pc();
197 return true;
198 }
199 bool retval = SScanF(desc, "0x%" Px64, value) == 1;
200 if (!retval) {
201 retval = SScanF(desc, "%" Px64, value) == 1;
202 }
203 return retval;
204}
205
206bool SimulatorDebugger::GetSValue(char* desc, uint32_t* value) {
207 VRegister vreg = LookupVRegisterByName(desc);
208 if (vreg != kNoVRegister) {
209 *value = sim_->get_vregisters(vreg, 0);
210 return true;
211 }
212 if (desc[0] == '*') {
213 uint64_t addr;
214 if (GetValue(desc + 1, &addr)) {
215 if (Simulator::IsIllegalAddress(addr)) {
216 return false;
217 }
218 *value = *(reinterpret_cast<uint32_t*>(addr));
219 return true;
220 }
221 }
222 return false;
223}
224
225bool SimulatorDebugger::GetDValue(char* desc, uint64_t* value) {
226 VRegister vreg = LookupVRegisterByName(desc);
227 if (vreg != kNoVRegister) {
228 *value = sim_->get_vregisterd(vreg, 0);
229 return true;
230 }
231 if (desc[0] == '*') {
232 uint64_t addr;
233 if (GetValue(desc + 1, &addr)) {
234 if (Simulator::IsIllegalAddress(addr)) {
235 return false;
236 }
237 *value = *(reinterpret_cast<uint64_t*>(addr));
238 return true;
239 }
240 }
241 return false;
242}
243
244bool SimulatorDebugger::GetQValue(char* desc, simd_value_t* value) {
245 VRegister vreg = LookupVRegisterByName(desc);
246 if (vreg != kNoVRegister) {
247 sim_->get_vregister(vreg, value);
248 return true;
249 }
250 if (desc[0] == '*') {
251 uint64_t addr;
252 if (GetValue(desc + 1, &addr)) {
253 if (Simulator::IsIllegalAddress(addr)) {
254 return false;
255 }
256 *value = *(reinterpret_cast<simd_value_t*>(addr));
257 return true;
258 }
259 }
260 return false;
261}
262
263TokenPosition SimulatorDebugger::GetApproximateTokenIndex(const Code& code,
264 uword pc) {
265 TokenPosition token_pos = TokenPosition::kNoSource;
266 uword pc_offset = pc - code.PayloadStart();
267 const PcDescriptors& descriptors =
268 PcDescriptors::Handle(code.pc_descriptors());
269 PcDescriptors::Iterator iter(descriptors, UntaggedPcDescriptors::kAnyKind);
270 while (iter.MoveNext()) {
271 if (iter.PcOffset() == pc_offset) {
272 return iter.TokenPos();
273 } else if (!token_pos.IsReal() && (iter.PcOffset() > pc_offset)) {
274 token_pos = iter.TokenPos();
275 }
276 }
277 return token_pos;
278}
279
280#if defined(DART_PRECOMPILED_RUNTIME)
281static const char* ImageName(uword vm_instructions,
282 uword isolate_instructions,
283 uword pc,
284 intptr_t* offset) {
285 const Image vm_image(vm_instructions);
286 const Image isolate_image(isolate_instructions);
287 if (vm_image.contains(pc)) {
288 *offset = pc - vm_instructions;
290 } else if (isolate_image.contains(pc)) {
291 *offset = pc - isolate_instructions;
293 } else {
294 *offset = 0;
295 return "<unknown>";
296 }
297}
298#endif
299
300void SimulatorDebugger::PrintDartFrame(uword vm_instructions,
301 uword isolate_instructions,
302 uword pc,
303 uword fp,
304 uword sp,
305 const Function& function,
306 TokenPosition token_pos,
307 bool is_optimized,
308 bool is_inlined) {
309 const Script& script = Script::Handle(function.script());
310 const String& func_name = String::Handle(function.QualifiedScrubbedName());
311 const String& url = String::Handle(script.url());
312 intptr_t line, column;
313 if (script.GetTokenLocation(token_pos, &line, &column)) {
315 "pc=0x%" Px " fp=0x%" Px " sp=0x%" Px " %s%s (%s:%" Pd ":%" Pd ")", pc,
316 fp, sp, is_optimized ? (is_inlined ? "inlined " : "optimized ") : "",
317 func_name.ToCString(), url.ToCString(), line, column);
318 } else {
319 OS::PrintErr("pc=0x%" Px " fp=0x%" Px " sp=0x%" Px " %s%s (%s)", pc, fp, sp,
320 is_optimized ? (is_inlined ? "inlined " : "optimized ") : "",
321 func_name.ToCString(), url.ToCString());
322 }
323#if defined(DART_PRECOMPILED_RUNTIME)
324 intptr_t offset;
325 auto const symbol_name =
326 ImageName(vm_instructions, isolate_instructions, pc, &offset);
327 OS::PrintErr(" %s+0x%" Px "", symbol_name, offset);
328#endif
329 OS::PrintErr("\n");
330}
331
332void SimulatorDebugger::PrintBacktrace() {
333 auto const T = Thread::Current();
334 auto const Z = T->zone();
335#if defined(DART_PRECOMPILED_RUNTIME)
336 auto const vm_instructions = reinterpret_cast<uword>(
338 auto const isolate_instructions = reinterpret_cast<uword>(
339 T->isolate_group()->source()->snapshot_instructions);
340 OS::PrintErr("vm_instructions=0x%" Px ", isolate_instructions=0x%" Px "\n",
341 vm_instructions, isolate_instructions);
342#else
343 const uword vm_instructions = 0;
344 const uword isolate_instructions = 0;
345#endif
346 StackFrameIterator frames(sim_->get_register(FP), sim_->get_register(SP),
347 sim_->get_pc(),
350 StackFrame* frame = frames.NextFrame();
351 ASSERT(frame != nullptr);
352 Function& function = Function::Handle(Z);
353 Function& inlined_function = Function::Handle(Z);
354 Code& code = Code::Handle(Z);
355 Code& unoptimized_code = Code::Handle(Z);
356 while (frame != nullptr) {
357 if (frame->IsDartFrame()) {
358 code = frame->LookupDartCode();
359 function = code.function();
360 if (code.is_optimized()) {
361 // For optimized frames, extract all the inlined functions if any
362 // into the stack trace.
363 InlinedFunctionsIterator it(code, frame->pc());
364 while (!it.Done()) {
365 // Print each inlined frame with its pc in the corresponding
366 // unoptimized frame.
367 inlined_function = it.function();
368 unoptimized_code = it.code();
369 uword unoptimized_pc = it.pc();
370 it.Advance();
371 if (!it.Done()) {
372 PrintDartFrame(
373 vm_instructions, isolate_instructions, unoptimized_pc,
374 frame->fp(), frame->sp(), inlined_function,
375 GetApproximateTokenIndex(unoptimized_code, unoptimized_pc),
376 true, true);
377 }
378 }
379 // Print the optimized inlining frame below.
380 }
381 PrintDartFrame(vm_instructions, isolate_instructions, frame->pc(),
382 frame->fp(), frame->sp(), function,
383 GetApproximateTokenIndex(code, frame->pc()),
384 code.is_optimized(), false);
385 } else {
386 OS::PrintErr("pc=0x%" Px " fp=0x%" Px " sp=0x%" Px " %s frame",
387 frame->pc(), frame->fp(), frame->sp(),
388 frame->IsEntryFrame() ? "entry"
389 : frame->IsExitFrame() ? "exit"
390 : frame->IsStubFrame() ? "stub"
391 : "invalid");
392#if defined(DART_PRECOMPILED_RUNTIME)
393 intptr_t offset;
394 auto const symbol_name = ImageName(vm_instructions, isolate_instructions,
395 frame->pc(), &offset);
396 OS::PrintErr(" %s+0x%" Px "", symbol_name, offset);
397#endif
398 OS::PrintErr("\n");
399 }
400 frame = frames.NextFrame();
401 }
402}
403
404bool SimulatorDebugger::SetBreakpoint(Instr* breakpc) {
405 // Check if a breakpoint can be set. If not return without any side-effects.
406 if (sim_->break_pc_ != nullptr) {
407 return false;
408 }
409
410 // Set the breakpoint.
411 sim_->break_pc_ = breakpc;
412 sim_->break_instr_ = breakpc->InstructionBits();
413 // Not setting the breakpoint instruction in the code itself. It will be set
414 // when the debugger shell continues.
415 return true;
416}
417
418bool SimulatorDebugger::DeleteBreakpoint(Instr* breakpc) {
419 if (sim_->break_pc_ != nullptr) {
420 sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
421 }
422
423 sim_->break_pc_ = nullptr;
424 sim_->break_instr_ = 0;
425 return true;
426}
427
428void SimulatorDebugger::UndoBreakpoints() {
429 if (sim_->break_pc_ != nullptr) {
430 sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
431 }
432}
433
434void SimulatorDebugger::RedoBreakpoints() {
435 if (sim_->break_pc_ != nullptr) {
436 sim_->break_pc_->SetInstructionBits(Instr::kSimulatorBreakpointInstruction);
437 }
438}
439
440void SimulatorDebugger::Debug() {
441 uintptr_t last_pc = -1;
442 bool done = false;
443
444#define COMMAND_SIZE 63
445#define ARG_SIZE 255
446
447#define STR(a) #a
448#define XSTR(a) STR(a)
449
450 char cmd[COMMAND_SIZE + 1];
451 char arg1[ARG_SIZE + 1];
452 char arg2[ARG_SIZE + 1];
453
454 // make sure to have a proper terminating character if reaching the limit
455 cmd[COMMAND_SIZE] = 0;
456 arg1[ARG_SIZE] = 0;
457 arg2[ARG_SIZE] = 0;
458
459 // Undo all set breakpoints while running in the debugger shell. This will
460 // make them invisible to all commands.
461 UndoBreakpoints();
462
463 while (!done) {
464 if (last_pc != sim_->get_pc()) {
465 last_pc = sim_->get_pc();
466 if (Simulator::IsIllegalAddress(last_pc)) {
467 OS::PrintErr("pc is out of bounds: 0x%" Px "\n", last_pc);
468 } else {
469 if (FLAG_support_disassembler) {
471 } else {
472 OS::PrintErr("Disassembler not supported in this mode.\n");
473 }
474 }
475 }
476 char* line = ReadLine("sim> ");
477 if (line == nullptr) {
478 FATAL("ReadLine failed");
479 } else {
480 // Use sscanf to parse the individual parts of the command line. At the
481 // moment no command expects more than two parameters.
482 int args = SScanF(line,
483 "%" XSTR(COMMAND_SIZE) "s "
484 "%" XSTR(ARG_SIZE) "s "
485 "%" XSTR(ARG_SIZE) "s",
486 cmd, arg1, arg2);
487 if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
489 "c/cont -- continue execution\n"
490 "disasm -- disassemble instrs at current pc location\n"
491 " other variants are:\n"
492 " disasm <address>\n"
493 " disasm <address> <number_of_instructions>\n"
494 " by default 10 instrs are disassembled\n"
495 "del -- delete breakpoints\n"
496 "flags -- print flag values\n"
497 "gdb -- transfer control to gdb\n"
498 "h/help -- print this help string\n"
499 "break <address> -- set break point at specified address\n"
500 "p/print <reg or icount or value or *addr> -- print integer\n"
501 "pf/printfloat <vreg or *addr> --print float value\n"
502 "pd/printdouble <vreg or *addr> -- print double value\n"
503 "pq/printquad <vreg or *addr> -- print vector register\n"
504 "po/printobject <*reg or *addr> -- print object\n"
505 "si/stepi -- single step an instruction\n"
506 "trace -- toggle execution tracing mode\n"
507 "bt -- print backtrace\n"
508 "unstop -- if current pc is a stop instr make it a nop\n"
509 "q/quit -- Quit the debugger and exit the program\n");
510 } else if ((strcmp(cmd, "quit") == 0) || (strcmp(cmd, "q") == 0)) {
511 OS::PrintErr("Quitting\n");
512 OS::Exit(0);
513 } else if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
514 sim_->InstructionDecode(reinterpret_cast<Instr*>(sim_->get_pc()));
515 } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
516 // Execute the one instruction we broke at with breakpoints disabled.
517 sim_->InstructionDecode(reinterpret_cast<Instr*>(sim_->get_pc()));
518 // Leave the debugger shell.
519 done = true;
520 } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
521 if (args == 2) {
522 uint64_t value;
523 if (strcmp(arg1, "icount") == 0) {
524 value = sim_->get_icount();
525 OS::PrintErr("icount: %" Pu64 " 0x%" Px64 "\n", value, value);
526 } else if (GetValue(arg1, &value)) {
527 OS::PrintErr("%s: %" Pu64 " 0x%" Px64 "\n", arg1, value, value);
528 } else {
529 OS::PrintErr("%s unrecognized\n", arg1);
530 }
531 } else {
532 OS::PrintErr("print <reg or icount or value or *addr>\n");
533 }
534 } else if ((strcmp(cmd, "pf") == 0) || (strcmp(cmd, "printfloat") == 0)) {
535 if (args == 2) {
536 uint32_t value;
537 if (GetSValue(arg1, &value)) {
538 float svalue = bit_cast<float, uint32_t>(value);
539 OS::PrintErr("%s: %d 0x%x %.8g\n", arg1, value, value, svalue);
540 } else {
541 OS::PrintErr("%s unrecognized\n", arg1);
542 }
543 } else {
544 OS::PrintErr("printfloat <vreg or *addr>\n");
545 }
546 } else if ((strcmp(cmd, "pd") == 0) ||
547 (strcmp(cmd, "printdouble") == 0)) {
548 if (args == 2) {
549 uint64_t long_value;
550 if (GetDValue(arg1, &long_value)) {
551 double dvalue = bit_cast<double, uint64_t>(long_value);
552 OS::PrintErr("%s: %" Pu64 " 0x%" Px64 " %.8g\n", arg1, long_value,
553 long_value, dvalue);
554 } else {
555 OS::PrintErr("%s unrecognized\n", arg1);
556 }
557 } else {
558 OS::PrintErr("printdouble <vreg or *addr>\n");
559 }
560 } else if ((strcmp(cmd, "pq") == 0) || (strcmp(cmd, "printquad") == 0)) {
561 if (args == 2) {
562 simd_value_t quad_value;
563 if (GetQValue(arg1, &quad_value)) {
564 const int64_t d0 = quad_value.bits.i64[0];
565 const int64_t d1 = quad_value.bits.i64[1];
566 const double dval0 = bit_cast<double, int64_t>(d0);
567 const double dval1 = bit_cast<double, int64_t>(d1);
568 const int32_t s0 = quad_value.bits.i32[0];
569 const int32_t s1 = quad_value.bits.i32[1];
570 const int32_t s2 = quad_value.bits.i32[2];
571 const int32_t s3 = quad_value.bits.i32[3];
572 const float sval0 = bit_cast<float, int32_t>(s0);
573 const float sval1 = bit_cast<float, int32_t>(s1);
574 const float sval2 = bit_cast<float, int32_t>(s2);
575 const float sval3 = bit_cast<float, int32_t>(s3);
576 OS::PrintErr("%s: %" Pu64 " 0x%" Px64 " %.8g\n", arg1, d0, d0,
577 dval0);
578 OS::PrintErr("%s: %" Pu64 " 0x%" Px64 " %.8g\n", arg1, d1, d1,
579 dval1);
580 OS::PrintErr("%s: %d 0x%x %.8g\n", arg1, s0, s0, sval0);
581 OS::PrintErr("%s: %d 0x%x %.8g\n", arg1, s1, s1, sval1);
582 OS::PrintErr("%s: %d 0x%x %.8g\n", arg1, s2, s2, sval2);
583 OS::PrintErr("%s: %d 0x%x %.8g\n", arg1, s3, s3, sval3);
584 } else {
585 OS::PrintErr("%s unrecognized\n", arg1);
586 }
587 } else {
588 OS::PrintErr("printquad <vreg or *addr>\n");
589 }
590 } else if ((strcmp(cmd, "po") == 0) ||
591 (strcmp(cmd, "printobject") == 0)) {
592 if (args == 2) {
593 uint64_t value;
594 // Make the dereferencing '*' optional.
595 if (((arg1[0] == '*') && GetValue(arg1 + 1, &value)) ||
596 GetValue(arg1, &value)) {
597 if (IsolateGroup::Current()->heap()->Contains(value)) {
598 OS::PrintErr("%s: \n", arg1);
599#if defined(DEBUG)
600 const Object& obj = Object::Handle(
601 static_cast<ObjectPtr>(static_cast<uword>(value)));
602 obj.Print();
603#endif // defined(DEBUG)
604 } else {
605 OS::PrintErr("0x%" Px64 " is not an object reference\n", value);
606 }
607 } else {
608 OS::PrintErr("%s unrecognized\n", arg1);
609 }
610 } else {
611 OS::PrintErr("printobject <*reg or *addr>\n");
612 }
613 } else if (strcmp(cmd, "disasm") == 0) {
614 uint64_t start = 0;
615 uint64_t end = 0;
616 if (args == 1) {
617 start = sim_->get_pc();
618 end = start + (10 * Instr::kInstrSize);
619 } else if (args == 2) {
620 if (GetValue(arg1, &start)) {
621 // No length parameter passed, assume 10 instructions.
622 if (Simulator::IsIllegalAddress(start)) {
623 // If start isn't a valid address, warn and use PC instead.
624 OS::PrintErr("First argument yields invalid address: 0x%" Px64
625 "\n",
626 start);
627 OS::PrintErr("Using PC instead\n");
628 start = sim_->get_pc();
629 }
630 end = start + (10 * Instr::kInstrSize);
631 }
632 } else {
633 uint64_t length;
634 if (GetValue(arg1, &start) && GetValue(arg2, &length)) {
635 if (Simulator::IsIllegalAddress(start)) {
636 // If start isn't a valid address, warn and use PC instead.
637 OS::PrintErr("First argument yields invalid address: 0x%" Px64
638 "\n",
639 start);
640 OS::PrintErr("Using PC instead\n");
641 start = sim_->get_pc();
642 }
644 }
645 }
646 if ((start > 0) && (end > start)) {
647 if (FLAG_support_disassembler) {
649 } else {
650 OS::PrintErr("Disassembler not supported in this mode.\n");
651 }
652 } else {
653 OS::PrintErr("disasm [<address> [<number_of_instructions>]]\n");
654 }
655 } else if (strcmp(cmd, "gdb") == 0) {
656 OS::PrintErr("relinquishing control to gdb\n");
658 OS::PrintErr("regaining control from gdb\n");
659 } else if (strcmp(cmd, "break") == 0) {
660 if (args == 2) {
661 uint64_t addr;
662 if (GetValue(arg1, &addr)) {
663 if (!SetBreakpoint(reinterpret_cast<Instr*>(addr))) {
664 OS::PrintErr("setting breakpoint failed\n");
665 }
666 } else {
667 OS::PrintErr("%s unrecognized\n", arg1);
668 }
669 } else {
670 OS::PrintErr("break <addr>\n");
671 }
672 } else if (strcmp(cmd, "del") == 0) {
673 if (!DeleteBreakpoint(nullptr)) {
674 OS::PrintErr("deleting breakpoint failed\n");
675 }
676 } else if (strcmp(cmd, "flags") == 0) {
677 OS::PrintErr("APSR: ");
678 OS::PrintErr("N flag: %d; ", static_cast<int>(sim_->n_flag_));
679 OS::PrintErr("Z flag: %d; ", static_cast<int>(sim_->z_flag_));
680 OS::PrintErr("C flag: %d; ", static_cast<int>(sim_->c_flag_));
681 OS::PrintErr("V flag: %d\n", static_cast<int>(sim_->v_flag_));
682 } else if (strcmp(cmd, "unstop") == 0) {
683 intptr_t stop_pc = sim_->get_pc() - Instr::kInstrSize;
684 Instr* stop_instr = reinterpret_cast<Instr*>(stop_pc);
685 if (stop_instr->IsExceptionGenOp()) {
686 stop_instr->SetInstructionBits(Instr::kNopInstruction);
687 } else {
688 OS::PrintErr("Not at debugger stop.\n");
689 }
690 } else if (strcmp(cmd, "trace") == 0) {
691 if (FLAG_trace_sim_after == ULLONG_MAX) {
692 FLAG_trace_sim_after = sim_->get_icount();
693 OS::PrintErr("execution tracing on\n");
694 } else {
695 FLAG_trace_sim_after = ULLONG_MAX;
696 OS::PrintErr("execution tracing off\n");
697 }
698 } else if (strcmp(cmd, "bt") == 0) {
699 Thread* thread = reinterpret_cast<Thread*>(sim_->get_register(THR));
700 thread->set_execution_state(Thread::kThreadInVM);
701 PrintBacktrace();
702 thread->set_execution_state(Thread::kThreadInGenerated);
703 } else {
704 OS::PrintErr("Unknown command: %s\n", cmd);
705 }
706 }
707 delete[] line;
708 }
709
710 // Add all the breakpoints back to stop execution and enter the debugger
711 // shell when hit.
712 RedoBreakpoints();
713
714#undef COMMAND_SIZE
715#undef ARG_SIZE
716
717#undef STR
718#undef XSTR
719}
720
721char* SimulatorDebugger::ReadLine(const char* prompt) {
722 char* result = nullptr;
723 char line_buf[256];
724 intptr_t offset = 0;
725 bool keep_going = true;
726 OS::PrintErr("%s", prompt);
727 while (keep_going) {
728 if (fgets(line_buf, sizeof(line_buf), stdin) == nullptr) {
729 // fgets got an error. Just give up.
730 if (result != nullptr) {
731 delete[] result;
732 }
733 return nullptr;
734 }
735 intptr_t len = strlen(line_buf);
736 if (len > 1 && line_buf[len - 2] == '\\' && line_buf[len - 1] == '\n') {
737 // When we read a line that ends with a "\" we remove the escape and
738 // append the remainder.
739 line_buf[len - 2] = '\n';
740 line_buf[len - 1] = 0;
741 len -= 1;
742 } else if ((len > 0) && (line_buf[len - 1] == '\n')) {
743 // Since we read a new line we are done reading the line. This
744 // will exit the loop after copying this buffer into the result.
745 keep_going = false;
746 }
747 if (result == nullptr) {
748 // Allocate the initial result and make room for the terminating '\0'
749 result = new char[len + 1];
750 if (result == nullptr) {
751 // OOM, so cannot readline anymore.
752 return nullptr;
753 }
754 } else {
755 // Allocate a new result with enough room for the new addition.
756 intptr_t new_len = offset + len + 1;
757 char* new_result = new char[new_len];
758 if (new_result == nullptr) {
759 // OOM, free the buffer allocated so far and return nullptr.
760 delete[] result;
761 return nullptr;
762 } else {
763 // Copy the existing input into the new array and set the new
764 // array as the result.
765 memmove(new_result, result, offset);
766 delete[] result;
767 result = new_result;
768 }
769 }
770 // Copy the newly read line into the result.
771 memmove(result + offset, line_buf, len);
772 offset += len;
773 }
774 ASSERT(result != nullptr);
775 result[offset] = '\0';
776 return result;
777}
778
779void Simulator::Init() {}
780
781Simulator::Simulator() : exclusive_access_addr_(0), exclusive_access_value_(0) {
782 // Setup simulator support first. Some of this information is needed to
783 // setup the architecture state.
784 // We allocate the stack here, the size is computed as the sum of
785 // the size specified by the user and the buffer space needed for
786 // handling stack overflow exceptions. To be safe in potential
787 // stack underflows we also add some underflow buffer space.
788 stack_ =
789 new char[(OSThread::GetSpecifiedStackSize() +
790 OSThread::kStackSizeBufferMax + kSimulatorStackUnderflowSize)];
791 // Low address.
792 stack_limit_ = reinterpret_cast<uword>(stack_);
793 // Limit for StackOverflowError.
794 overflow_stack_limit_ = stack_limit_ + OSThread::kStackSizeBufferMax;
795 // High address.
796 stack_base_ = overflow_stack_limit_ + OSThread::GetSpecifiedStackSize();
797
798 pc_modified_ = false;
799 icount_ = 0;
800 break_pc_ = nullptr;
801 break_instr_ = 0;
802 last_setjmp_buffer_ = nullptr;
803
804 // Setup architecture state.
805 // All registers are initialized to zero to start with.
806 for (int i = 0; i < kNumberOfCpuRegisters; i++) {
807 registers_[i] = 0;
808 }
809 n_flag_ = false;
810 z_flag_ = false;
811 c_flag_ = false;
812 v_flag_ = false;
813
814 for (int i = 0; i < kNumberOfVRegisters; i++) {
815 vregisters_[i].bits.i64[0] = 0;
816 vregisters_[i].bits.i64[1] = 0;
817 }
818
819 // The sp is initialized to point to the bottom (high address) of the
820 // allocated stack area.
821 registers_[R31] = stack_base();
822 // The lr and pc are initialized to a known bad value that will cause an
823 // access violation if the simulator ever tries to execute it.
824 registers_[LR] = kBadLR;
825 pc_ = kBadLR;
826}
827
828Simulator::~Simulator() {
829 delete[] stack_;
830 Isolate* isolate = Isolate::Current();
831 if (isolate != nullptr) {
832 isolate->set_simulator(nullptr);
833 }
834}
835
836// When the generated code calls an external reference we need to catch that in
837// the simulator. The external reference will be a function compiled for the
838// host architecture. We need to call that function instead of trying to
839// execute it with the simulator. We do that by redirecting the external
840// reference to a svc (supervisor call) instruction that is handled by
841// the simulator. We write the original destination of the jump just at a known
842// offset from the svc instruction so the simulator knows what to call.
843class Redirection {
844 public:
845 uword address_of_hlt_instruction() {
846 return reinterpret_cast<uword>(&hlt_instruction_);
847 }
848
849 uword external_function() const { return external_function_; }
850
851 Simulator::CallKind call_kind() const { return call_kind_; }
852
853 int argument_count() const { return argument_count_; }
854
855 static Redirection* Get(uword external_function,
856 Simulator::CallKind call_kind,
857 int argument_count) {
858 MutexLocker ml(mutex_);
859
860 Redirection* old_head = list_.load(std::memory_order_relaxed);
861 for (Redirection* current = old_head; current != nullptr;
862 current = current->next_) {
863 if (current->external_function_ == external_function) return current;
864 }
865
866 Redirection* redirection =
867 new Redirection(external_function, call_kind, argument_count);
868 redirection->next_ = old_head;
869
870 // Use a memory fence to ensure all pending writes are written at the time
871 // of updating the list head, so the profiling thread always has a valid
872 // list to look at.
873 list_.store(redirection, std::memory_order_release);
874
875 return redirection;
876 }
877
878 static Redirection* FromHltInstruction(Instr* hlt_instruction) {
879 char* addr_of_hlt = reinterpret_cast<char*>(hlt_instruction);
880 char* addr_of_redirection =
881 addr_of_hlt - OFFSET_OF(Redirection, hlt_instruction_);
882 return reinterpret_cast<Redirection*>(addr_of_redirection);
883 }
884
885 // Please note that this function is called by the signal handler of the
886 // profiling thread. It can therefore run at any point in time and is not
887 // allowed to hold any locks - which is precisely the reason why the list is
888 // prepend-only and a memory fence is used when writing the list head [list_]!
889 static uword FunctionForRedirect(uword address_of_hlt) {
890 for (Redirection* current = list_.load(std::memory_order_acquire);
891 current != nullptr; current = current->next_) {
892 if (current->address_of_hlt_instruction() == address_of_hlt) {
893 return current->external_function_;
894 }
895 }
896 return 0;
897 }
898
899 private:
900 Redirection(uword external_function,
901 Simulator::CallKind call_kind,
902 int argument_count)
903 : external_function_(external_function),
904 call_kind_(call_kind),
905 argument_count_(argument_count),
906 hlt_instruction_(Instr::kSimulatorRedirectInstruction),
907 next_(nullptr) {}
908
909 uword external_function_;
910 Simulator::CallKind call_kind_;
911 int argument_count_;
912 uint32_t hlt_instruction_;
913 Redirection* next_;
914 static std::atomic<Redirection*> list_;
915 static Mutex* mutex_;
916};
917
918std::atomic<Redirection*> Redirection::list_ = {nullptr};
919Mutex* Redirection::mutex_ = new Mutex();
920
921uword Simulator::RedirectExternalReference(uword function,
922 CallKind call_kind,
923 int argument_count) {
924 Redirection* redirection =
925 Redirection::Get(function, call_kind, argument_count);
926 return redirection->address_of_hlt_instruction();
927}
928
929uword Simulator::FunctionForRedirect(uword redirect) {
930 return Redirection::FunctionForRedirect(redirect);
931}
932
933// Get the active Simulator for the current isolate.
934Simulator* Simulator::Current() {
935 Isolate* isolate = Isolate::Current();
936 Simulator* simulator = isolate->simulator();
937 if (simulator == nullptr) {
938 NoSafepointScope no_safepoint;
939 simulator = new Simulator();
940 isolate->set_simulator(simulator);
941 }
942 return simulator;
943}
944
945// Sets the register in the architecture state.
946void Simulator::set_register(Instr* instr,
947 Register reg,
948 int64_t value,
949 R31Type r31t) {
950 // Register is in range.
951 ASSERT((reg >= 0) && (reg < kNumberOfCpuRegisters));
952#if !defined(DART_TARGET_OS_FUCHSIA)
953 ASSERT(instr == nullptr || reg != R18); // R18 is globally reserved on iOS.
954#endif
955
956 if ((reg != R31) || (r31t != R31IsZR)) {
957 registers_[reg] = value;
958 // If we're setting CSP, make sure it is 16-byte aligned. In truth, CSP
959 // can store addresses that are not 16-byte aligned, but loads and stores
960 // are not allowed through CSP when it is not aligned. Thus, this check is
961 // more conservative that necessary. However, it will likely be more
962 // useful to find the program locations where CSP is set to a bad value,
963 // than to find only the resulting loads/stores that would cause a fault on
964 // hardware.
965 if ((instr != nullptr) && (reg == R31) && !Utils::IsAligned(value, 16)) {
966 UnalignedAccess("CSP set", value, instr);
967 }
968
969#if defined(DEBUG)
970 if (reg == SP) {
971 // Memory below CSP can be written to at any instruction boundary by a
972 // signal handler. Simulate this to ensure we're keeping CSP far enough
973 // ahead of SP to prevent Dart frames from being trashed.
974 uword csp = registers_[R31];
975 WriteX(csp - 1 * kWordSize, icount_, nullptr);
976 WriteX(csp - 2 * kWordSize, icount_, nullptr);
977 WriteX(csp - 3 * kWordSize, icount_, nullptr);
978 WriteX(csp - 4 * kWordSize, icount_, nullptr);
979 }
980#endif
981 }
982}
983
984// Get the register from the architecture state.
985int64_t Simulator::get_register(Register reg, R31Type r31t) const {
986 ASSERT((reg >= 0) && (reg < kNumberOfCpuRegisters));
987 if ((reg == R31) && (r31t == R31IsZR)) {
988 return 0;
989 } else {
990 return registers_[reg];
991 }
992}
993
994void Simulator::set_wregister(Register reg, int32_t value, R31Type r31t) {
995 ASSERT((reg >= 0) && (reg < kNumberOfCpuRegisters));
996 // When setting in W mode, clear the high bits.
997 if ((reg != R31) || (r31t != R31IsZR)) {
998 registers_[reg] = Utils::LowHighTo64Bits(static_cast<uint32_t>(value), 0);
999 }
1000}
1001
1002// Get the register from the architecture state.
1003int32_t Simulator::get_wregister(Register reg, R31Type r31t) const {
1004 ASSERT((reg >= 0) && (reg < kNumberOfCpuRegisters));
1005 if ((reg == R31) && (r31t == R31IsZR)) {
1006 return 0;
1007 } else {
1008 return static_cast<int32_t>(registers_[reg]);
1009 }
1010}
1011
1012int32_t Simulator::get_vregisters(VRegister reg, int idx) const {
1013 ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
1014 ASSERT((idx >= 0) && (idx <= 3));
1015 return vregisters_[reg].bits.i32[idx];
1016}
1017
1018void Simulator::set_vregisters(VRegister reg, int idx, int32_t value) {
1019 ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
1020 ASSERT((idx >= 0) && (idx <= 3));
1021 vregisters_[reg].bits.i32[idx] = value;
1022}
1023
1024int64_t Simulator::get_vregisterd(VRegister reg, int idx) const {
1025 ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
1026 ASSERT((idx == 0) || (idx == 1));
1027 return vregisters_[reg].bits.i64[idx];
1028}
1029
1030void Simulator::set_vregisterd(VRegister reg, int idx, int64_t value) {
1031 ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
1032 ASSERT((idx == 0) || (idx == 1));
1033 vregisters_[reg].bits.i64[idx] = value;
1034}
1035
1036void Simulator::get_vregister(VRegister reg, simd_value_t* value) const {
1037 ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
1038 value->bits.i64[0] = vregisters_[reg].bits.i64[0];
1039 value->bits.i64[1] = vregisters_[reg].bits.i64[1];
1040}
1041
1042void Simulator::set_vregister(VRegister reg, const simd_value_t& value) {
1043 ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
1044 vregisters_[reg].bits.i64[0] = value.bits.i64[0];
1045 vregisters_[reg].bits.i64[1] = value.bits.i64[1];
1046}
1047
1048// Raw access to the PC register.
1049void Simulator::set_pc(uint64_t value) {
1050 pc_modified_ = true;
1051 last_pc_ = pc_;
1052 pc_ = value;
1053}
1054
1055// Raw access to the pc.
1056uint64_t Simulator::get_pc() const {
1057 return pc_;
1058}
1059
1060uint64_t Simulator::get_last_pc() const {
1061 return last_pc_;
1062}
1063
1064void Simulator::HandleIllegalAccess(uword addr, Instr* instr) {
1065 uword fault_pc = get_pc();
1066 uword last_pc = get_last_pc();
1067 char buffer[128];
1068 snprintf(buffer, sizeof(buffer),
1069 "illegal memory access at 0x%" Px ", pc=0x%" Px ", last_pc=0x%" Px
1070 "\n",
1071 addr, fault_pc, last_pc);
1072 SimulatorDebugger dbg(this);
1073 dbg.Stop(instr, buffer);
1074 // The debugger will return control in non-interactive mode.
1075 FATAL("Cannot continue execution after illegal memory access.");
1076}
1077
1078// ARMv8 supports unaligned memory accesses to normal memory without trapping
1079// for all instructions except Load-Exclusive/Store-Exclusive and
1080// Load-Acquire/Store-Release.
1081// See B2.4.2 "Alignment of data accesses" for more information.
1082void Simulator::UnalignedAccess(const char* msg, uword addr, Instr* instr) {
1083 char buffer[128];
1084 snprintf(buffer, sizeof(buffer), "unaligned %s at 0x%" Px ", pc=%p\n", msg,
1085 addr, instr);
1086 SimulatorDebugger dbg(this);
1087 dbg.Stop(instr, buffer);
1088 // The debugger will not be able to single step past this instruction, but
1089 // it will be possible to disassemble the code and inspect registers.
1090 FATAL("Cannot continue execution after unaligned access.");
1091}
1092
1093void Simulator::UnimplementedInstruction(Instr* instr) {
1094 char buffer[128];
1095 snprintf(buffer, sizeof(buffer),
1096 "Unimplemented instruction: at %p, last_pc=0x%" Px64 "\n", instr,
1097 get_last_pc());
1098 SimulatorDebugger dbg(this);
1099 dbg.Stop(instr, buffer);
1100 FATAL("Cannot continue execution after unimplemented instruction.");
1101}
1102
1103bool Simulator::IsTracingExecution() const {
1104 return icount_ > FLAG_trace_sim_after;
1105}
1106
1107intptr_t Simulator::ReadX(uword addr,
1108 Instr* instr,
1109 bool must_be_aligned /* = false */) {
1110 const bool allow_unaligned_access =
1111 FLAG_sim_allow_unaligned_accesses && !must_be_aligned;
1112 if (allow_unaligned_access || (addr & 7) == 0) {
1113 return LoadUnaligned(reinterpret_cast<intptr_t*>(addr));
1114 }
1115 UnalignedAccess("read", addr, instr);
1116 return 0;
1117}
1118
1119void Simulator::WriteX(uword addr, intptr_t value, Instr* instr) {
1120 if (FLAG_sim_allow_unaligned_accesses || (addr & 7) == 0) {
1121 StoreUnaligned(reinterpret_cast<intptr_t*>(addr), value);
1122 return;
1123 }
1124 UnalignedAccess("write", addr, instr);
1125}
1126
1127uint32_t Simulator::ReadWU(uword addr,
1128 Instr* instr,
1129 bool must_be_aligned /* = false */) {
1130 const bool allow_unaligned_access =
1131 FLAG_sim_allow_unaligned_accesses && !must_be_aligned;
1132 if (allow_unaligned_access || (addr & 3) == 0) {
1133 return LoadUnaligned(reinterpret_cast<uint32_t*>(addr));
1134 }
1135 UnalignedAccess("read unsigned single word", addr, instr);
1136 return 0;
1137}
1138
1139int32_t Simulator::ReadW(uword addr, Instr* instr) {
1140 if (FLAG_sim_allow_unaligned_accesses || (addr & 3) == 0) {
1141 return LoadUnaligned(reinterpret_cast<int32_t*>(addr));
1142 }
1143 UnalignedAccess("read single word", addr, instr);
1144 return 0;
1145}
1146
1147void Simulator::WriteW(uword addr, uint32_t value, Instr* instr) {
1148 if (FLAG_sim_allow_unaligned_accesses || (addr & 3) == 0) {
1149 StoreUnaligned(reinterpret_cast<uint32_t*>(addr), value);
1150 return;
1151 }
1152 UnalignedAccess("write single word", addr, instr);
1153}
1154
1155uint16_t Simulator::ReadHU(uword addr, Instr* instr) {
1156 if (FLAG_sim_allow_unaligned_accesses || (addr & 1) == 0) {
1157 return LoadUnaligned(reinterpret_cast<uint16_t*>(addr));
1158 }
1159 UnalignedAccess("unsigned halfword read", addr, instr);
1160 return 0;
1161}
1162
1163int16_t Simulator::ReadH(uword addr, Instr* instr) {
1164 if (FLAG_sim_allow_unaligned_accesses || (addr & 1) == 0) {
1165 return LoadUnaligned(reinterpret_cast<int16_t*>(addr));
1166 }
1167 UnalignedAccess("signed halfword read", addr, instr);
1168 return 0;
1169}
1170
1171void Simulator::WriteH(uword addr, uint16_t value, Instr* instr) {
1172 if (FLAG_sim_allow_unaligned_accesses || (addr & 1) == 0) {
1173 StoreUnaligned(reinterpret_cast<uint16_t*>(addr), value);
1174 return;
1175 }
1176 UnalignedAccess("halfword write", addr, instr);
1177}
1178
1179uint8_t Simulator::ReadBU(uword addr) {
1180 uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
1181 return *ptr;
1182}
1183
1184int8_t Simulator::ReadB(uword addr) {
1185 int8_t* ptr = reinterpret_cast<int8_t*>(addr);
1186 return *ptr;
1187}
1188
1189void Simulator::WriteB(uword addr, uint8_t value) {
1190 uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
1191 *ptr = value;
1192}
1193
1194void Simulator::ClearExclusive() {
1195 exclusive_access_addr_ = 0;
1196 exclusive_access_value_ = 0;
1197}
1198
1199intptr_t Simulator::ReadExclusiveX(uword addr, Instr* instr) {
1200 exclusive_access_addr_ = addr;
1201 exclusive_access_value_ = ReadX(addr, instr, /*must_be_aligned=*/true);
1202 return exclusive_access_value_;
1203}
1204
1205intptr_t Simulator::ReadExclusiveW(uword addr, Instr* instr) {
1206 exclusive_access_addr_ = addr;
1207 exclusive_access_value_ = ReadWU(addr, instr, /*must_be_aligned=*/true);
1208 return exclusive_access_value_;
1209}
1210
1211intptr_t Simulator::WriteExclusiveX(uword addr, intptr_t value, Instr* instr) {
1212 // In a well-formed code store-exclusive instruction should always follow
1213 // a corresponding load-exclusive instruction with the same address.
1214 ASSERT((exclusive_access_addr_ == 0) || (exclusive_access_addr_ == addr));
1215 if (exclusive_access_addr_ != addr) {
1216 return 1; // Failure.
1217 }
1218
1219 int64_t old_value = exclusive_access_value_;
1220 ClearExclusive();
1221
1222 auto atomic_addr = reinterpret_cast<RelaxedAtomic<int64_t>*>(addr);
1223 if (atomic_addr->compare_exchange_weak(old_value, value)) {
1224 return 0; // Success.
1225 }
1226 return 1; // Failure.
1227}
1228
1229intptr_t Simulator::WriteExclusiveW(uword addr, intptr_t value, Instr* instr) {
1230 // In a well-formed code store-exclusive instruction should always follow
1231 // a corresponding load-exclusive instruction with the same address.
1232 ASSERT((exclusive_access_addr_ == 0) || (exclusive_access_addr_ == addr));
1233 if (exclusive_access_addr_ != addr) {
1234 return 1; // Failure.
1235 }
1236
1237 int32_t old_value = static_cast<uint32_t>(exclusive_access_value_);
1238 ClearExclusive();
1239
1240 auto atomic_addr = reinterpret_cast<RelaxedAtomic<int32_t>*>(addr);
1241 if (atomic_addr->compare_exchange_weak(old_value, value)) {
1242 return 0; // Success.
1243 }
1244 return 1; // Failure.
1245}
1246
1247intptr_t Simulator::ReadAcquire(uword addr, Instr* instr) {
1248 // TODO(42074): Once we switch to C++20 we should change this to use use
1249 // `std::atomic_ref<T>` which supports performing atomic operations on
1250 // non-atomic data.
1251 COMPILE_ASSERT(sizeof(std::atomic<intptr_t>) == sizeof(intptr_t));
1252 return reinterpret_cast<std::atomic<intptr_t>*>(addr)->load(
1253 std::memory_order_acquire);
1254}
1255
1256uint32_t Simulator::ReadAcquireW(uword addr, Instr* instr) {
1257 // TODO(42074): Once we switch to C++20 we should change this to use use
1258 // `std::atomic_ref<T>` which supports performing atomic operations on
1259 // non-atomic data.
1260 COMPILE_ASSERT(sizeof(std::atomic<intptr_t>) == sizeof(intptr_t));
1261 return reinterpret_cast<std::atomic<uint32_t>*>(addr)->load(
1262 std::memory_order_acquire);
1263}
1264
1265void Simulator::WriteRelease(uword addr, intptr_t value, Instr* instr) {
1266 // TODO(42074): Once we switch to C++20 we should change this to use use
1267 // `std::atomic_ref<T>` which supports performing atomic operations on
1268 // non-atomic data.
1269 COMPILE_ASSERT(sizeof(std::atomic<intptr_t>) == sizeof(intptr_t));
1270 reinterpret_cast<std::atomic<intptr_t>*>(addr)->store(
1271 value, std::memory_order_release);
1272}
1273
1274void Simulator::WriteReleaseW(uword addr, uint32_t value, Instr* instr) {
1275 // TODO(42074): Once we switch to C++20 we should change this to use use
1276 // `std::atomic_ref<T>` which supports performing atomic operations on
1277 // non-atomic data.
1278 COMPILE_ASSERT(sizeof(std::atomic<intptr_t>) == sizeof(intptr_t));
1279 reinterpret_cast<std::atomic<uint32_t>*>(addr)->store(
1280 value, std::memory_order_release);
1281}
1282
1283// Unsupported instructions use Format to print an error and stop execution.
1284void Simulator::Format(Instr* instr, const char* format) {
1285 OS::PrintErr("Simulator found unsupported instruction:\n 0x%p: %s\n", instr,
1286 format);
1287 UNIMPLEMENTED();
1288}
1289
1290// Calculate and set the Negative and Zero flags.
1291void Simulator::SetNZFlagsW(int32_t val) {
1292 n_flag_ = (val < 0);
1293 z_flag_ = (val == 0);
1294}
1295
1296// Calculate C flag value for additions (and subtractions with adjusted args).
1297bool Simulator::CarryFromW(int32_t left, int32_t right, int32_t carry) {
1298 uint64_t uleft = static_cast<uint32_t>(left);
1299 uint64_t uright = static_cast<uint32_t>(right);
1300 uint64_t ucarry = static_cast<uint32_t>(carry);
1301 return ((uleft + uright + ucarry) >> 32) != 0;
1302}
1303
1304// Calculate V flag value for additions (and subtractions with adjusted args).
1305bool Simulator::OverflowFromW(int32_t left, int32_t right, int32_t carry) {
1306 int64_t result = static_cast<int64_t>(left) + right + carry;
1307 return (result >> 31) != (result >> 32);
1308}
1309
1310// Calculate and set the Negative and Zero flags.
1311void Simulator::SetNZFlagsX(int64_t val) {
1312 n_flag_ = (val < 0);
1313 z_flag_ = (val == 0);
1314}
1315
1316// Calculate C flag value for additions and subtractions.
1317bool Simulator::CarryFromX(int64_t alu_out,
1318 int64_t left,
1319 int64_t right,
1320 bool addition) {
1321 if (addition) {
1322 return (((left & right) | ((left | right) & ~alu_out)) >> 63) != 0;
1323 } else {
1324 return (((~left & right) | ((~left | right) & alu_out)) >> 63) == 0;
1325 }
1326}
1327
1328// Calculate V flag value for additions and subtractions.
1329bool Simulator::OverflowFromX(int64_t alu_out,
1330 int64_t left,
1331 int64_t right,
1332 bool addition) {
1333 if (addition) {
1334 return (((alu_out ^ left) & (alu_out ^ right)) >> 63) != 0;
1335 } else {
1336 return (((left ^ right) & (alu_out ^ left)) >> 63) != 0;
1337 }
1338}
1339
1340// Set the Carry flag.
1341void Simulator::SetCFlag(bool val) {
1342 c_flag_ = val;
1343}
1344
1345// Set the oVerflow flag.
1346void Simulator::SetVFlag(bool val) {
1347 v_flag_ = val;
1348}
1349
1350void Simulator::DecodeMoveWide(Instr* instr) {
1351 const Register rd = instr->RdField();
1352 const int hw = instr->HWField();
1353 const int64_t shift = hw << 4;
1354 const int64_t shifted_imm = static_cast<int64_t>(instr->Imm16Field())
1355 << shift;
1356
1357 if (instr->SFField() != 0) {
1358 if (instr->Bits(29, 2) == 0) {
1359 // Format(instr, "movn'sf 'rd, 'imm16 'hw");
1360 set_register(instr, rd, ~shifted_imm, instr->RdMode());
1361 } else if (instr->Bits(29, 2) == 2) {
1362 // Format(instr, "movz'sf 'rd, 'imm16 'hw");
1363 set_register(instr, rd, shifted_imm, instr->RdMode());
1364 } else if (instr->Bits(29, 2) == 3) {
1365 // Format(instr, "movk'sf 'rd, 'imm16 'hw");
1366 const int64_t rd_val = get_register(rd, instr->RdMode());
1367 const int64_t result = (rd_val & ~(0xffffL << shift)) | shifted_imm;
1368 set_register(instr, rd, result, instr->RdMode());
1369 } else {
1370 UnimplementedInstruction(instr);
1371 }
1372 } else if ((hw & 0x2) == 0) {
1373 if (instr->Bits(29, 2) == 0) {
1374 // Format(instr, "movn'sf 'rd, 'imm16 'hw");
1375 set_wregister(rd, ~shifted_imm & kWRegMask, instr->RdMode());
1376 } else if (instr->Bits(29, 2) == 2) {
1377 // Format(instr, "movz'sf 'rd, 'imm16 'hw");
1378 set_wregister(rd, shifted_imm & kWRegMask, instr->RdMode());
1379 } else if (instr->Bits(29, 2) == 3) {
1380 // Format(instr, "movk'sf 'rd, 'imm16 'hw");
1381 const int32_t rd_val = get_wregister(rd, instr->RdMode());
1382 const int32_t result = (rd_val & ~(0xffffL << shift)) | shifted_imm;
1383 set_wregister(rd, result, instr->RdMode());
1384 } else {
1385 UnimplementedInstruction(instr);
1386 }
1387 } else {
1388 // Dest is 32 bits, but shift is more than 32.
1389 UnimplementedInstruction(instr);
1390 }
1391}
1392
1393void Simulator::DecodeAddSubImm(Instr* instr) {
1394 const bool addition = (instr->Bit(30) == 0);
1395 // Format(instr, "addi'sf's 'rd, 'rn, 'imm12s");
1396 // Format(instr, "subi'sf's 'rd, 'rn, 'imm12s");
1397 const Register rd = instr->RdField();
1398 const Register rn = instr->RnField();
1399 uint32_t imm = (instr->Bit(22) == 1) ? (instr->Imm12Field() << 12)
1400 : (instr->Imm12Field());
1401 if (instr->SFField() != 0) {
1402 // 64-bit add.
1403 const uint64_t rn_val = get_register(rn, instr->RnMode());
1404 const uint64_t alu_out = addition ? (rn_val + imm) : (rn_val - imm);
1405 set_register(instr, rd, alu_out, instr->RdMode());
1406 if (instr->HasS()) {
1407 SetNZFlagsX(alu_out);
1408 SetCFlag(CarryFromX(alu_out, rn_val, imm, addition));
1409 SetVFlag(OverflowFromX(alu_out, rn_val, imm, addition));
1410 }
1411 } else {
1412 // 32-bit add.
1413 const uint32_t rn_val = get_wregister(rn, instr->RnMode());
1414 uint32_t carry_in = 0;
1415 if (!addition) {
1416 carry_in = 1;
1417 imm = ~imm;
1418 }
1419 const uint32_t alu_out = rn_val + imm + carry_in;
1420 set_wregister(rd, alu_out, instr->RdMode());
1421 if (instr->HasS()) {
1422 SetNZFlagsW(alu_out);
1423 SetCFlag(CarryFromW(rn_val, imm, carry_in));
1424 SetVFlag(OverflowFromW(rn_val, imm, carry_in));
1425 }
1426 }
1427}
1428
1429void Simulator::DecodeBitfield(Instr* instr) {
1430 int bitwidth = instr->SFField() == 0 ? 32 : 64;
1431 unsigned op = instr->Bits(29, 2);
1432 ASSERT(op <= 2);
1433 bool sign_extend = op == 0;
1434 bool zero_extend = op == 2;
1435 ASSERT(instr->NField() == instr->SFField());
1436 const Register rn = instr->RnField();
1437 const Register rd = instr->RdField();
1438 int64_t result = get_register(rn, instr->RnMode());
1439 int r_bit = instr->ImmRField();
1440 int s_bit = instr->ImmSField();
1441 result &= Utils::NBitMask(bitwidth);
1442 ASSERT(s_bit < bitwidth && r_bit < bitwidth);
1443 // See ARM v8 Instruction set overview 5.4.5.
1444 // If s >= r then Rd[s-r:0] := Rn[s:r], else Rd[bitwidth+s-r:bitwidth-r] :=
1445 // Rn[s:0].
1446 uword mask = Utils::NBitMask(s_bit + 1);
1447 if (s_bit >= r_bit) {
1448 mask >>= r_bit;
1449 result >>= r_bit;
1450 } else {
1451 result = static_cast<uint64_t>(result) << (bitwidth - r_bit);
1452 mask <<= bitwidth - r_bit;
1453 }
1454 result &= mask;
1455 if (sign_extend) {
1456 int highest_bit = (s_bit - r_bit) & (bitwidth - 1);
1457 int shift = 64 - highest_bit - 1;
1458 result <<= shift;
1459 result = static_cast<word>(result) >> shift;
1460 } else if (!zero_extend) {
1461 const int64_t rd_val = get_register(rd, instr->RnMode());
1462 result |= rd_val & ~mask;
1463 }
1464 if (bitwidth == 64) {
1465 set_register(instr, rd, result, instr->RdMode());
1466 } else {
1467 set_wregister(rd, result, instr->RdMode());
1468 }
1469}
1470
1471void Simulator::DecodeLogicalImm(Instr* instr) {
1472 const int op = instr->Bits(29, 2);
1473 const bool set_flags = op == 3;
1474 const int out_size = ((instr->SFField() == 0) && (instr->NField() == 0))
1477 const Register rn = instr->RnField();
1478 const Register rd = instr->RdField();
1479 const int64_t rn_val = get_register(rn, instr->RnMode());
1480 const uint64_t imm = instr->ImmLogical();
1481 if (imm == 0) {
1482 UnimplementedInstruction(instr);
1483 }
1484
1485 int64_t alu_out = 0;
1486 switch (op) {
1487 case 0:
1488 alu_out = rn_val & imm;
1489 break;
1490 case 1:
1491 alu_out = rn_val | imm;
1492 break;
1493 case 2:
1494 alu_out = rn_val ^ imm;
1495 break;
1496 case 3:
1497 alu_out = rn_val & imm;
1498 break;
1499 default:
1500 UNREACHABLE();
1501 break;
1502 }
1503
1504 if (set_flags) {
1505 if (out_size == kXRegSizeInBits) {
1506 SetNZFlagsX(alu_out);
1507 } else {
1508 SetNZFlagsW(alu_out);
1509 }
1510 SetCFlag(false);
1511 SetVFlag(false);
1512 }
1513
1514 if (out_size == kXRegSizeInBits) {
1515 set_register(instr, rd, alu_out, instr->RdMode());
1516 } else {
1517 set_wregister(rd, alu_out, instr->RdMode());
1518 }
1519}
1520
1521void Simulator::DecodePCRel(Instr* instr) {
1522 const int op = instr->Bit(31);
1523 if (op == 0) {
1524 // Format(instr, "adr 'rd, 'pcrel")
1525 const Register rd = instr->RdField();
1526 const uint64_t immhi = instr->SImm19Field();
1527 const uint64_t immlo = instr->Bits(29, 2);
1528 const uint64_t off = (immhi << 2) | immlo;
1529 const uint64_t dest = get_pc() + off;
1530 set_register(instr, rd, dest, instr->RdMode());
1531 } else {
1532 UnimplementedInstruction(instr);
1533 }
1534}
1535
1536void Simulator::DecodeDPImmediate(Instr* instr) {
1537 if (instr->IsMoveWideOp()) {
1538 DecodeMoveWide(instr);
1539 } else if (instr->IsAddSubImmOp()) {
1540 DecodeAddSubImm(instr);
1541 } else if (instr->IsBitfieldOp()) {
1542 DecodeBitfield(instr);
1543 } else if (instr->IsLogicalImmOp()) {
1544 DecodeLogicalImm(instr);
1545 } else if (instr->IsPCRelOp()) {
1546 DecodePCRel(instr);
1547 } else {
1548 UnimplementedInstruction(instr);
1549 }
1550}
1551
1552void Simulator::DecodeCompareAndBranch(Instr* instr) {
1553 const int op = instr->Bit(24);
1554 const Register rt = instr->RtField();
1555 const uint64_t imm19 = instr->SImm19Field();
1556 const uint64_t dest = get_pc() + (imm19 << 2);
1557 const uint64_t mask = instr->SFField() == 1 ? kXRegMask : kWRegMask;
1558 const uint64_t rt_val = get_register(rt, R31IsZR) & mask;
1559 if (op == 0) {
1560 // Format(instr, "cbz'sf 'rt, 'dest19");
1561 if (rt_val == 0) {
1562 set_pc(dest);
1563 }
1564 } else {
1565 // Format(instr, "cbnz'sf 'rt, 'dest19");
1566 if (rt_val != 0) {
1567 set_pc(dest);
1568 }
1569 }
1570}
1571
1572bool Simulator::ConditionallyExecute(Instr* instr) {
1573 Condition cond;
1574 if (instr->IsConditionalSelectOp()) {
1575 cond = instr->SelectConditionField();
1576 } else {
1577 cond = instr->ConditionField();
1578 }
1579 switch (cond) {
1580 case EQ:
1581 return z_flag_;
1582 case NE:
1583 return !z_flag_;
1584 case CS:
1585 return c_flag_;
1586 case CC:
1587 return !c_flag_;
1588 case MI:
1589 return n_flag_;
1590 case PL:
1591 return !n_flag_;
1592 case VS:
1593 return v_flag_;
1594 case VC:
1595 return !v_flag_;
1596 case HI:
1597 return c_flag_ && !z_flag_;
1598 case LS:
1599 return !c_flag_ || z_flag_;
1600 case GE:
1601 return n_flag_ == v_flag_;
1602 case LT:
1603 return n_flag_ != v_flag_;
1604 case GT:
1605 return !z_flag_ && (n_flag_ == v_flag_);
1606 case LE:
1607 return z_flag_ || (n_flag_ != v_flag_);
1608 case AL:
1609 return true;
1610 default:
1611 UNREACHABLE();
1612 }
1613 return false;
1614}
1615
1616void Simulator::DecodeConditionalBranch(Instr* instr) {
1617 // Format(instr, "b'cond 'dest19");
1618 if ((instr->Bit(24) != 0) || (instr->Bit(4) != 0)) {
1619 UnimplementedInstruction(instr);
1620 }
1621 const uint64_t imm19 = instr->SImm19Field();
1622 const uint64_t dest = get_pc() + (imm19 << 2);
1623 if (ConditionallyExecute(instr)) {
1624 set_pc(dest);
1625 }
1626}
1627
1628// Calls into the Dart runtime are based on this interface.
1629typedef void (*SimulatorRuntimeCall)(NativeArguments arguments);
1630
1631// Calls to leaf Dart runtime functions are based on this interface.
1632typedef int64_t (*SimulatorLeafRuntimeCall)(int64_t r0,
1633 int64_t r1,
1634 int64_t r2,
1635 int64_t r3,
1636 int64_t r4,
1637 int64_t r5,
1638 int64_t r6,
1639 int64_t r7);
1640
1641// [target] has several different signatures that differ from
1642// SimulatorLeafRuntimeCall. We can call them all from here only because in
1643// X64's calling conventions a function can be called with extra arguments
1644// and the callee will see the first arguments and won't unbalance the stack.
1645NO_SANITIZE_UNDEFINED("function")
1646static int64_t InvokeLeafRuntime(SimulatorLeafRuntimeCall target,
1647 int64_t r0,
1648 int64_t r1,
1649 int64_t r2,
1650 int64_t r3,
1651 int64_t r4,
1652 int64_t r5,
1653 int64_t r6,
1654 int64_t r7) {
1655 return target(r0, r1, r2, r3, r4, r5, r6, r7);
1656}
1657
1658// Calls to leaf float Dart runtime functions are based on this interface.
1659typedef double (*SimulatorLeafFloatRuntimeCall)(double d0,
1660 double d1,
1661 double d2,
1662 double d3,
1663 double d4,
1664 double d5,
1665 double d6,
1666 double d7);
1667
1668// [target] has several different signatures that differ from
1669// SimulatorFloatLeafRuntimeCall. We can call them all from here only because in
1670// X64's calling conventions a function can be called with extra arguments
1671// and the callee will see the first arguments and won't unbalance the stack.
1672NO_SANITIZE_UNDEFINED("function")
1673static double InvokeFloatLeafRuntime(SimulatorLeafFloatRuntimeCall target,
1674 double d0,
1675 double d1,
1676 double d2,
1677 double d3,
1678 double d4,
1679 double d5,
1680 double d6,
1681 double d7) {
1682 return target(d0, d1, d2, d3, d4, d5, d6, d7);
1683}
1684
1685// Calls to native Dart functions are based on this interface.
1686typedef void (*SimulatorNativeCallWrapper)(Dart_NativeArguments arguments,
1688
1689void Simulator::DoRedirectedCall(Instr* instr) {
1690 SimulatorSetjmpBuffer buffer(this);
1691 if (!setjmp(buffer.buffer_)) {
1692 int64_t saved_lr = get_register(LR);
1693 Redirection* redirection = Redirection::FromHltInstruction(instr);
1694 uword external = redirection->external_function();
1695 if (IsTracingExecution()) {
1696 THR_Print("Call to host function at 0x%" Pd "\n", external);
1697 }
1698
1699 if (redirection->call_kind() == kRuntimeCall) {
1700 NativeArguments* arguments =
1701 reinterpret_cast<NativeArguments*>(get_register(R0));
1702 SimulatorRuntimeCall target =
1703 reinterpret_cast<SimulatorRuntimeCall>(external);
1704 target(*arguments);
1705 ClobberVolatileRegisters();
1706 } else if (redirection->call_kind() == kLeafRuntimeCall) {
1707 ASSERT((0 <= redirection->argument_count()) &&
1708 (redirection->argument_count() <= 8));
1709 SimulatorLeafRuntimeCall target =
1710 reinterpret_cast<SimulatorLeafRuntimeCall>(external);
1711 const int64_t r0 = get_register(R0);
1712 const int64_t r1 = get_register(R1);
1713 const int64_t r2 = get_register(R2);
1714 const int64_t r3 = get_register(R3);
1715 const int64_t r4 = get_register(R4);
1716 const int64_t r5 = get_register(R5);
1717 const int64_t r6 = get_register(R6);
1718 const int64_t r7 = get_register(R7);
1719 const int64_t res =
1720 InvokeLeafRuntime(target, r0, r1, r2, r3, r4, r5, r6, r7);
1721 ClobberVolatileRegisters();
1722 set_register(instr, R0, res); // Set returned result from function.
1723 } else if (redirection->call_kind() == kLeafFloatRuntimeCall) {
1724 ASSERT((0 <= redirection->argument_count()) &&
1725 (redirection->argument_count() <= 8));
1726 SimulatorLeafFloatRuntimeCall target =
1727 reinterpret_cast<SimulatorLeafFloatRuntimeCall>(external);
1728 const double d0 = bit_cast<double, int64_t>(get_vregisterd(V0, 0));
1729 const double d1 = bit_cast<double, int64_t>(get_vregisterd(V1, 0));
1730 const double d2 = bit_cast<double, int64_t>(get_vregisterd(V2, 0));
1731 const double d3 = bit_cast<double, int64_t>(get_vregisterd(V3, 0));
1732 const double d4 = bit_cast<double, int64_t>(get_vregisterd(V4, 0));
1733 const double d5 = bit_cast<double, int64_t>(get_vregisterd(V5, 0));
1734 const double d6 = bit_cast<double, int64_t>(get_vregisterd(V6, 0));
1735 const double d7 = bit_cast<double, int64_t>(get_vregisterd(V7, 0));
1736 const double res =
1737 InvokeFloatLeafRuntime(target, d0, d1, d2, d3, d4, d5, d6, d7);
1738 ClobberVolatileRegisters();
1739 set_vregisterd(V0, 0, bit_cast<int64_t, double>(res));
1740 set_vregisterd(V0, 1, 0);
1741 } else {
1742 ASSERT(redirection->call_kind() == kNativeCallWrapper);
1743 SimulatorNativeCallWrapper wrapper =
1744 reinterpret_cast<SimulatorNativeCallWrapper>(external);
1745 Dart_NativeArguments arguments =
1746 reinterpret_cast<Dart_NativeArguments>(get_register(R0));
1748 reinterpret_cast<Dart_NativeFunction>(get_register(R1));
1749 wrapper(arguments, target);
1750 ClobberVolatileRegisters();
1751 }
1752
1753 // Return.
1754 set_pc(saved_lr);
1755 } else {
1756 // Coming via long jump from a throw. Continue to exception handler.
1757 }
1758}
1759
1760void Simulator::ClobberVolatileRegisters() {
1761 // Clear atomic reservation.
1762 exclusive_access_addr_ = exclusive_access_value_ = 0;
1763
1764 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
1765 if ((kAbiVolatileCpuRegs & (1 << i)) != 0) {
1766 registers_[i] = icount_;
1767 }
1768 }
1769
1770 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
1771 if ((kAbiVolatileFpuRegs & (1 << i)) != 0) {
1772 vregisters_[i].bits.i64[0] = icount_;
1773 vregisters_[i].bits.i64[1] = icount_;
1774 }
1775 }
1776}
1777
1778void Simulator::DecodeExceptionGen(Instr* instr) {
1779 if ((instr->Bits(0, 2) == 1) && (instr->Bits(2, 3) == 0) &&
1780 (instr->Bits(21, 3) == 0)) {
1781 // Format(instr, "svc 'imm16");
1782 UnimplementedInstruction(instr);
1783 } else if ((instr->Bits(0, 2) == 0) && (instr->Bits(2, 3) == 0) &&
1784 (instr->Bits(21, 3) == 1)) {
1785 // Format(instr, "brk 'imm16");
1786 SimulatorDebugger dbg(this);
1787 int32_t imm = instr->Imm16Field();
1788 char buffer[32];
1789 snprintf(buffer, sizeof(buffer), "brk #0x%x", imm);
1790 set_pc(get_pc() + Instr::kInstrSize);
1791 dbg.Stop(instr, buffer);
1792 } else if ((instr->Bits(0, 2) == 0) && (instr->Bits(2, 3) == 0) &&
1793 (instr->Bits(21, 3) == 2)) {
1794 // Format(instr, "hlt 'imm16");
1795 uint16_t imm = static_cast<uint16_t>(instr->Imm16Field());
1796 if (imm == Instr::kSimulatorBreakCode) {
1797 SimulatorDebugger dbg(this);
1798 dbg.Stop(instr, "breakpoint");
1799 } else if (imm == Instr::kSimulatorRedirectCode) {
1800 DoRedirectedCall(instr);
1801 } else {
1802 UnimplementedInstruction(instr);
1803 }
1804 } else {
1805 UnimplementedInstruction(instr);
1806 }
1807}
1808
1809void Simulator::DecodeSystem(Instr* instr) {
1810 if (instr->InstructionBits() == CLREX) {
1811 // Format(instr, "clrex");
1812 ClearExclusive();
1813 return;
1814 }
1815
1816 if ((instr->Bits(0, 8) == 0x1f) && (instr->Bits(12, 4) == 2) &&
1817 (instr->Bits(16, 3) == 3) && (instr->Bits(19, 2) == 0) &&
1818 (instr->Bit(21) == 0)) {
1819 if (instr->Bits(8, 4) == 0) {
1820 // Format(instr, "nop");
1821 } else {
1822 UnimplementedInstruction(instr);
1823 }
1824 } else {
1825 UnimplementedInstruction(instr);
1826 }
1827}
1828
1829void Simulator::DecodeTestAndBranch(Instr* instr) {
1830 const int op = instr->Bit(24);
1831 const int bitpos = instr->Bits(19, 5) | (instr->Bit(31) << 5);
1832 const uint64_t imm14 = instr->SImm14Field();
1833 const uint64_t dest = get_pc() + (imm14 << 2);
1834 const Register rt = instr->RtField();
1835 const uint64_t rt_val = get_register(rt, R31IsZR);
1836 if (op == 0) {
1837 // Format(instr, "tbz'sf 'rt, 'bitpos, 'dest14");
1838 if ((rt_val & (1ull << bitpos)) == 0) {
1839 set_pc(dest);
1840 }
1841 } else {
1842 // Format(instr, "tbnz'sf 'rt, 'bitpos, 'dest14");
1843 if ((rt_val & (1ull << bitpos)) != 0) {
1844 set_pc(dest);
1845 }
1846 }
1847}
1848
1849void Simulator::DecodeUnconditionalBranch(Instr* instr) {
1850 const bool link = instr->Bit(31) == 1;
1851 const uint64_t imm26 = instr->SImm26Field();
1852 const uint64_t dest = get_pc() + (imm26 << 2);
1853 const uint64_t ret = get_pc() + Instr::kInstrSize;
1854 set_pc(dest);
1855 if (link) {
1856 set_register(instr, LR, ret);
1857 }
1858}
1859
1860void Simulator::DecodeUnconditionalBranchReg(Instr* instr) {
1861 if ((instr->Bits(0, 5) == 0) && (instr->Bits(10, 6) == 0) &&
1862 (instr->Bits(16, 5) == 0x1f)) {
1863 switch (instr->Bits(21, 4)) {
1864 case 0: {
1865 // Format(instr, "br 'rn");
1866 const Register rn = instr->RnField();
1867 const int64_t dest = get_register(rn, instr->RnMode());
1868 set_pc(dest);
1869 break;
1870 }
1871 case 1: {
1872 // Format(instr, "blr 'rn");
1873 const Register rn = instr->RnField();
1874 const int64_t dest = get_register(rn, instr->RnMode());
1875 const int64_t ret = get_pc() + Instr::kInstrSize;
1876 set_pc(dest);
1877 set_register(instr, LR, ret);
1878 break;
1879 }
1880 case 2: {
1881 // Format(instr, "ret 'rn");
1882 const Register rn = instr->RnField();
1883 const int64_t rn_val = get_register(rn, instr->RnMode());
1884 set_pc(rn_val);
1885 break;
1886 }
1887 default:
1888 UnimplementedInstruction(instr);
1889 break;
1890 }
1891 } else {
1892 UnimplementedInstruction(instr);
1893 }
1894}
1895
1896DART_FORCE_INLINE
1897void Simulator::DecodeCompareBranch(Instr* instr) {
1898 if (instr->IsCompareAndBranchOp()) {
1899 DecodeCompareAndBranch(instr);
1900 } else if (instr->IsConditionalBranchOp()) {
1901 DecodeConditionalBranch(instr);
1902 } else if (instr->IsExceptionGenOp()) {
1903 DecodeExceptionGen(instr);
1904 } else if (instr->IsSystemOp()) {
1905 DecodeSystem(instr);
1906 } else if (instr->IsTestAndBranchOp()) {
1907 DecodeTestAndBranch(instr);
1908 } else if (instr->IsUnconditionalBranchOp()) {
1909 DecodeUnconditionalBranch(instr);
1910 } else if (instr->IsUnconditionalBranchRegOp()) {
1911 DecodeUnconditionalBranchReg(instr);
1912 } else {
1913 UnimplementedInstruction(instr);
1914 }
1915}
1916
1917void Simulator::DecodeLoadStoreReg(Instr* instr) {
1918 // Calculate the address.
1919 const Register rn = instr->RnField();
1920 const Register rt = instr->RtField();
1921 const VRegister vt = instr->VtField();
1922 const int64_t rn_val = get_register(rn, R31IsSP);
1923 const uint32_t size = (instr->Bit(26) == 1)
1924 ? ((instr->Bit(23) << 2) | instr->SzField())
1925 : instr->SzField();
1926 uword address = 0;
1927 uword wb_address = 0;
1928 bool wb = false;
1929 if (instr->Bit(24) == 1) {
1930 // addr = rn + scaled unsigned 12-bit immediate offset.
1931 const uint32_t imm12 = static_cast<uint32_t>(instr->Imm12Field());
1932 const uint32_t offset = imm12 << size;
1933 address = rn_val + offset;
1934 } else if (instr->Bits(10, 2) == 0) {
1935 // addr = rn + signed 9-bit immediate offset.
1936 wb = false;
1937 const int64_t offset = static_cast<int64_t>(instr->SImm9Field());
1938 address = rn_val + offset;
1939 wb_address = rn_val;
1940 } else if (instr->Bit(10) == 1) {
1941 // addr = rn + signed 9-bit immediate offset.
1942 wb = true;
1943 const int64_t offset = static_cast<int64_t>(instr->SImm9Field());
1944 if (instr->Bit(11) == 1) {
1945 // Pre-index.
1946 address = rn_val + offset;
1947 wb_address = address;
1948 } else {
1949 // Post-index.
1950 address = rn_val;
1951 wb_address = rn_val + offset;
1952 }
1953 } else if (instr->Bits(10, 2) == 2) {
1954 // addr = rn + (rm EXT optionally scaled by operand instruction size).
1955 const Register rm = instr->RmField();
1956 const Extend ext = instr->ExtendTypeField();
1957 const uint8_t scale = (ext == UXTX) && (instr->Bit(12) == 1) ? size : 0;
1958 const int64_t rm_val = get_register(rm, R31IsZR);
1959 const int64_t offset = ExtendOperand(kXRegSizeInBits, rm_val, ext, scale);
1960 address = rn_val + offset;
1961 } else {
1962 UnimplementedInstruction(instr);
1963 return;
1964 }
1965
1966 // Check the address.
1967 if (IsIllegalAddress(address)) {
1968 HandleIllegalAccess(address, instr);
1969 return;
1970 }
1971
1972 // Do access.
1973 if (instr->Bit(26) == 1) {
1974 if (instr->Bit(22) == 0) {
1975 // Format(instr, "fstr'fsz 'vt, 'memop");
1976 const int64_t vt_val = get_vregisterd(vt, 0);
1977 switch (size) {
1978 case 2:
1979 WriteW(address, vt_val & kWRegMask, instr);
1980 break;
1981 case 3:
1982 WriteX(address, vt_val, instr);
1983 break;
1984 case 4: {
1985 simd_value_t val;
1986 get_vregister(vt, &val);
1987 WriteX(address, val.bits.i64[0], instr);
1988 WriteX(address + kWordSize, val.bits.i64[1], instr);
1989 break;
1990 }
1991 default:
1992 UnimplementedInstruction(instr);
1993 return;
1994 }
1995 } else {
1996 // Format(instr, "fldr'fsz 'vt, 'memop");
1997 switch (size) {
1998 case 2:
1999 set_vregisterd(vt, 0, static_cast<int64_t>(ReadWU(address, instr)));
2000 set_vregisterd(vt, 1, 0);
2001 break;
2002 case 3:
2003 set_vregisterd(vt, 0, ReadX(address, instr));
2004 set_vregisterd(vt, 1, 0);
2005 break;
2006 case 4: {
2007 simd_value_t val;
2008 val.bits.i64[0] = ReadX(address, instr);
2009 val.bits.i64[1] = ReadX(address + kWordSize, instr);
2010 set_vregister(vt, val);
2011 break;
2012 }
2013 default:
2014 UnimplementedInstruction(instr);
2015 return;
2016 }
2017 }
2018 } else {
2019 if (instr->Bits(22, 2) == 0) {
2020 // Format(instr, "str'sz 'rt, 'memop");
2021 const int32_t rt_val32 = get_wregister(rt, R31IsZR);
2022 switch (size) {
2023 case 0: {
2024 const uint8_t val = static_cast<uint8_t>(rt_val32);
2025 WriteB(address, val);
2026 break;
2027 }
2028 case 1: {
2029 const uint16_t val = static_cast<uint16_t>(rt_val32);
2030 WriteH(address, val, instr);
2031 break;
2032 }
2033 case 2: {
2034 const uint32_t val = static_cast<uint32_t>(rt_val32);
2035 WriteW(address, val, instr);
2036 break;
2037 }
2038 case 3: {
2039 const int64_t val = get_register(rt, R31IsZR);
2040 WriteX(address, val, instr);
2041 break;
2042 }
2043 default:
2044 UNREACHABLE();
2045 break;
2046 }
2047 } else {
2048 // Format(instr, "ldr'sz 'rt, 'memop");
2049 // Undefined case.
2050 if ((size == 3) && (instr->Bits(22, 2) == 3)) {
2051 UnimplementedInstruction(instr);
2052 return;
2053 }
2054
2055 // Read the value.
2056 const bool is_signed = instr->Bit(23) == 1;
2057 // Write the W register for signed values when size < 2.
2058 // Write the W register for unsigned values when size == 2.
2059 const bool use_w =
2060 (is_signed && (instr->Bit(22) == 1)) || (!is_signed && (size == 2));
2061 int64_t val = 0; // Sign extend into an int64_t.
2062 switch (size) {
2063 case 0: {
2064 if (is_signed) {
2065 val = static_cast<int64_t>(ReadB(address));
2066 } else {
2067 val = static_cast<int64_t>(ReadBU(address));
2068 }
2069 break;
2070 }
2071 case 1: {
2072 if (is_signed) {
2073 val = static_cast<int64_t>(ReadH(address, instr));
2074 } else {
2075 val = static_cast<int64_t>(ReadHU(address, instr));
2076 }
2077 break;
2078 }
2079 case 2: {
2080 if (is_signed) {
2081 val = static_cast<int64_t>(ReadW(address, instr));
2082 } else {
2083 val = static_cast<int64_t>(ReadWU(address, instr));
2084 }
2085 break;
2086 }
2087 case 3:
2088 val = ReadX(address, instr);
2089 break;
2090 default:
2091 UNREACHABLE();
2092 break;
2093 }
2094
2095 // Write to register.
2096 if (use_w) {
2097 set_wregister(rt, static_cast<int32_t>(val), R31IsZR);
2098 } else {
2099 set_register(instr, rt, val, R31IsZR);
2100 }
2101 }
2102 }
2103
2104 // Do writeback.
2105 if (wb) {
2106 set_register(instr, rn, wb_address, R31IsSP);
2107 }
2108}
2109
2110void Simulator::DecodeLoadStoreRegPair(Instr* instr) {
2111 const int32_t opc = instr->Bits(23, 3);
2112 const Register rn = instr->RnField();
2113 const int64_t rn_val = get_register(rn, R31IsSP);
2114 const intptr_t shift =
2115 (instr->Bit(26) == 1) ? 2 + instr->SzField() : 2 + instr->SFField();
2116 const intptr_t size = 1 << shift;
2117 const int32_t offset = (static_cast<uint32_t>(instr->SImm7Field()) << shift);
2118 uword address = 0;
2119 uword wb_address = 0;
2120 bool wb = false;
2121
2122 if ((instr->Bits(30, 2) == 3)) {
2123 UnimplementedInstruction(instr);
2124 return;
2125 }
2126
2127 // Calculate address.
2128 switch (opc) {
2129 case 1:
2130 address = rn_val;
2131 wb_address = rn_val + offset;
2132 wb = true;
2133 break;
2134 case 2:
2135 address = rn_val + offset;
2136 break;
2137 case 3:
2138 address = rn_val + offset;
2139 wb_address = address;
2140 wb = true;
2141 break;
2142 default:
2143 UnimplementedInstruction(instr);
2144 return;
2145 }
2146
2147 // Check the address.
2148 if (IsIllegalAddress(address)) {
2149 HandleIllegalAccess(address, instr);
2150 return;
2151 }
2152
2153 // Do access.
2154 if (instr->Bit(26) == 1) {
2155 // SIMD/FP.
2156 const VRegister vt = instr->VtField();
2157 const VRegister vt2 = instr->Vt2Field();
2158 if (instr->Bit(22) != 0) {
2159 // Format(instr, "ldp 'vt, 'vt2, 'memop");
2160 switch (size) {
2161 case 4:
2162 set_vregisterd(vt, 0, static_cast<int64_t>(ReadWU(address, instr)));
2163 set_vregisterd(vt, 1, 0);
2164 set_vregisterd(vt2, 0,
2165 static_cast<int64_t>(ReadWU(address + 4, instr)));
2166 set_vregisterd(vt2, 1, 0);
2167 break;
2168 case 8:
2169 set_vregisterd(vt, 0, ReadX(address, instr));
2170 set_vregisterd(vt, 1, 0);
2171 set_vregisterd(vt2, 0, ReadX(address + 8, instr));
2172 set_vregisterd(vt2, 1, 0);
2173 break;
2174 case 16: {
2175 simd_value_t val;
2176 val.bits.i64[0] = ReadX(address, instr);
2177 val.bits.i64[1] = ReadX(address + 8, instr);
2178 set_vregister(vt, val);
2179 val.bits.i64[0] = ReadX(address + 16, instr);
2180 val.bits.i64[1] = ReadX(address + 24, instr);
2181 set_vregister(vt2, val);
2182 break;
2183 }
2184 default:
2185 UnimplementedInstruction(instr);
2186 return;
2187 }
2188 } else {
2189 // Format(instr, "stp 'vt, 'vt2, 'memop");
2190 switch (size) {
2191 case 4:
2192 WriteW(address, get_vregisterd(vt, 0) & kWRegMask, instr);
2193 WriteW(address + 4, get_vregisterd(vt2, 0) & kWRegMask, instr);
2194 break;
2195 case 8:
2196 WriteX(address, get_vregisterd(vt, 0), instr);
2197 WriteX(address + 8, get_vregisterd(vt2, 0), instr);
2198 break;
2199 case 16: {
2200 simd_value_t val;
2201 get_vregister(vt, &val);
2202 WriteX(address, val.bits.i64[0], instr);
2203 WriteX(address + 8, val.bits.i64[1], instr);
2204 get_vregister(vt2, &val);
2205 WriteX(address + 16, val.bits.i64[0], instr);
2206 WriteX(address + 24, val.bits.i64[1], instr);
2207 break;
2208 }
2209 default:
2210 UnimplementedInstruction(instr);
2211 return;
2212 }
2213 }
2214 } else {
2215 // Integer.
2216 const Register rt = instr->RtField();
2217 const Register rt2 = instr->Rt2Field();
2218 if (instr->Bit(22) != 0) {
2219 // Format(instr, "ldp'sf 'rt, 'rt2, 'memop");
2220 const bool is_signed = instr->Bit(30) == 1;
2221 int64_t val1 = 0; // Sign extend into an int64_t.
2222 int64_t val2 = 0;
2223 if (instr->Bit(31) == 1) {
2224 // 64-bit read.
2225 val1 = ReadX(address, instr);
2226 val2 = ReadX(address + size, instr);
2227 } else {
2228 if (is_signed) {
2229 val1 = static_cast<int64_t>(ReadW(address, instr));
2230 val2 = static_cast<int64_t>(ReadW(address + size, instr));
2231 } else {
2232 val1 = static_cast<int64_t>(ReadWU(address, instr));
2233 val2 = static_cast<int64_t>(ReadWU(address + size, instr));
2234 }
2235 }
2236 // Write to register.
2237 if (instr->Bit(31) == 1) {
2238 set_register(instr, rt, val1, R31IsZR);
2239 set_register(instr, rt2, val2, R31IsZR);
2240 } else {
2241 set_wregister(rt, static_cast<int32_t>(val1), R31IsZR);
2242 set_wregister(rt2, static_cast<int32_t>(val2), R31IsZR);
2243 }
2244 } else {
2245 // Format(instr, "stp'sf 'rt, 'rt2, 'memop");
2246 if (instr->Bit(31) == 1) {
2247 const int64_t val1 = get_register(rt, R31IsZR);
2248 const int64_t val2 = get_register(rt2, R31IsZR);
2249 WriteX(address, val1, instr);
2250 WriteX(address + size, val2, instr);
2251 } else {
2252 const int32_t val1 = get_wregister(rt, R31IsZR);
2253 const int32_t val2 = get_wregister(rt2, R31IsZR);
2254 WriteW(address, val1, instr);
2255 WriteW(address + size, val2, instr);
2256 }
2257 }
2258 }
2259
2260 // Do writeback.
2261 if (wb) {
2262 set_register(instr, rn, wb_address, R31IsSP);
2263 }
2264}
2265
2266void Simulator::DecodeLoadRegLiteral(Instr* instr) {
2267 if ((instr->Bit(31) != 0) || (instr->Bit(29) != 0) ||
2268 (instr->Bits(24, 3) != 0)) {
2269 UnimplementedInstruction(instr);
2270 }
2271
2272 const Register rt = instr->RtField();
2273 const int64_t off = instr->SImm19Field() << 2;
2274 const int64_t pc = reinterpret_cast<int64_t>(instr);
2275 const int64_t address = pc + off;
2276 const int64_t val = ReadX(address, instr);
2277 if (instr->Bit(30) != 0) {
2278 // Format(instr, "ldrx 'rt, 'pcldr");
2279 set_register(instr, rt, val, R31IsZR);
2280 } else {
2281 // Format(instr, "ldrw 'rt, 'pcldr");
2282 set_wregister(rt, static_cast<int32_t>(val), R31IsZR);
2283 }
2284}
2285
2286void Simulator::DecodeLoadStoreExclusive(Instr* instr) {
2287 if (instr->Bit(21) != 0 || instr->Bit(23) != instr->Bit(15)) {
2288 UNIMPLEMENTED();
2289 }
2290 const int32_t size = instr->Bits(30, 2);
2291 if (size != 3 && size != 2) {
2292 UNIMPLEMENTED();
2293 }
2294 const Register rs = instr->RsField();
2295 const Register rn = instr->RnField();
2296 const Register rt = instr->RtField();
2297 ASSERT(instr->Rt2Field() == R31); // Should-Be-One
2298 const bool is_load = instr->Bit(22) == 1;
2299 const bool is_exclusive = instr->Bit(23) == 0;
2300 const bool is_ordered = instr->Bit(15) == 1;
2301 if (is_load) {
2302 const bool is_load_acquire = !is_exclusive && is_ordered;
2303 if (is_load_acquire) {
2304 ASSERT(rs == R31); // Should-Be-One
2305 // Format(instr, "ldar 'rt, 'rn");
2306 const int64_t addr = get_register(rn, R31IsSP);
2307 const intptr_t value =
2308 (size == 3) ? ReadAcquire(addr, instr) : ReadAcquireW(addr, instr);
2309 set_register(instr, rt, value, R31IsZR);
2310 } else {
2311 ASSERT(rs == R31); // Should-Be-One
2312 // Format(instr, "ldxr 'rt, 'rn");
2313 const int64_t addr = get_register(rn, R31IsSP);
2314 const intptr_t value = (size == 3) ? ReadExclusiveX(addr, instr)
2315 : ReadExclusiveW(addr, instr);
2316 set_register(instr, rt, value, R31IsZR);
2317 }
2318 } else {
2319 const bool is_store_release = !is_exclusive && is_ordered;
2320 if (is_store_release) {
2321 ASSERT(rs == R31); // Should-Be-One
2322 // Format(instr, "stlr 'rt, 'rn");
2323 const uword value = get_register(rt, R31IsZR);
2324 const uword addr = get_register(rn, R31IsSP);
2325 if (size == 3) {
2326 WriteRelease(addr, value, instr);
2327 } else {
2328 WriteReleaseW(addr, static_cast<uint32_t>(value), instr);
2329 }
2330 } else {
2331 // Format(instr, "stxr 'rs, 'rt, 'rn");
2332 const uword value = get_register(rt, R31IsZR);
2333 const uword addr = get_register(rn, R31IsSP);
2334 const intptr_t status =
2335 (size == 3)
2336 ? WriteExclusiveX(addr, value, instr)
2337 : WriteExclusiveW(addr, static_cast<uint32_t>(value), instr);
2338 set_register(instr, rs, status, R31IsSP);
2339 }
2340 }
2341}
2342
2343void Simulator::DecodeAtomicMemory(Instr* instr) {
2344 const int32_t size = instr->Bits(30, 2);
2345 std::memory_order order;
2346 switch (instr->Bits(22, 2)) {
2347 case 3:
2348 order = std::memory_order_acq_rel;
2349 break;
2350 case 2:
2351 order = std::memory_order_acquire;
2352 break;
2353 case 1:
2354 order = std::memory_order_release;
2355 break;
2356 case 0:
2357 order = std::memory_order_relaxed;
2358 break;
2359 }
2360 const Register rs = instr->RsField();
2361 const Register rn = instr->RnField();
2362 const Register rt = instr->RtField();
2363 const int32_t opc = instr->Bits(12, 3);
2364
2365 if (size == 3) {
2366 uint64_t in = get_register(rs, R31IsZR);
2367 auto addr =
2368 reinterpret_cast<std::atomic<uint64_t>*>(get_register(rn, R31IsSP));
2369 uint64_t out;
2370 switch (opc) {
2371 case 1:
2372 out = addr->fetch_and(~in, order);
2373 break;
2374 case 3:
2375 out = addr->fetch_or(in, order);
2376 break;
2377 default:
2378 UNIMPLEMENTED();
2379 }
2380 set_register(instr, rt, out, R31IsZR);
2381 } else if (size == 2) {
2382 ASSERT(size == 2);
2383 uint32_t in = get_wregister(rs, R31IsZR);
2384 auto addr =
2385 reinterpret_cast<std::atomic<uint32_t>*>(get_register(rn, R31IsSP));
2386 uint32_t out;
2387 switch (opc) {
2388 case 1:
2389 out = addr->fetch_and(~in, order);
2390 break;
2391 case 3:
2392 out = addr->fetch_or(in, order);
2393 break;
2394 default:
2395 UNIMPLEMENTED();
2396 }
2397 set_wregister(rt, out, R31IsZR);
2398 } else {
2399 UNIMPLEMENTED();
2400 }
2401}
2402
2403DART_FORCE_INLINE
2404void Simulator::DecodeLoadStore(Instr* instr) {
2405 if (instr->IsAtomicMemoryOp()) {
2406 DecodeAtomicMemory(instr);
2407 } else if (instr->IsLoadStoreRegOp()) {
2408 DecodeLoadStoreReg(instr);
2409 } else if (instr->IsLoadStoreRegPairOp()) {
2410 DecodeLoadStoreRegPair(instr);
2411 } else if (instr->IsLoadRegLiteralOp()) {
2412 DecodeLoadRegLiteral(instr);
2413 } else if (instr->IsLoadStoreExclusiveOp()) {
2414 DecodeLoadStoreExclusive(instr);
2415 } else {
2416 UnimplementedInstruction(instr);
2417 }
2418}
2419
2420int64_t Simulator::ShiftOperand(uint8_t reg_size,
2421 int64_t value,
2422 Shift shift_type,
2423 uint8_t amount) {
2424 if (amount == 0) {
2425 return value;
2426 }
2427 int64_t mask = reg_size == kXRegSizeInBits ? kXRegMask : kWRegMask;
2428 switch (shift_type) {
2429 case LSL:
2430 return (static_cast<uint64_t>(value) << amount) & mask;
2431 case LSR:
2432 return static_cast<uint64_t>(value) >> amount;
2433 case ASR: {
2434 // Shift used to restore the sign.
2435 uint8_t s_shift = kXRegSizeInBits - reg_size;
2436 // Value with its sign restored.
2437 int64_t s_value = (value << s_shift) >> s_shift;
2438 return (s_value >> amount) & mask;
2439 }
2440 case ROR: {
2441 if (reg_size == kWRegSizeInBits) {
2442 value &= kWRegMask;
2443 }
2444 return (static_cast<uint64_t>(value) >> amount) |
2445 ((static_cast<uint64_t>(value) & ((1ULL << amount) - 1ULL))
2446 << (reg_size - amount));
2447 }
2448 default:
2449 UNIMPLEMENTED();
2450 return 0;
2451 }
2452}
2453
2454int64_t Simulator::ExtendOperand(uint8_t reg_size,
2455 int64_t value,
2456 Extend extend_type,
2457 uint8_t amount) {
2458 switch (extend_type) {
2459 case UXTB:
2460 value &= 0xff;
2461 break;
2462 case UXTH:
2463 value &= 0xffff;
2464 break;
2465 case UXTW:
2466 value &= 0xffffffff;
2467 break;
2468 case SXTB:
2469 value = static_cast<int64_t>(static_cast<uint64_t>(value) << 56) >> 56;
2470 break;
2471 case SXTH:
2472 value = static_cast<int64_t>(static_cast<uint64_t>(value) << 48) >> 48;
2473 break;
2474 case SXTW:
2475 value = static_cast<int64_t>(static_cast<uint64_t>(value) << 32) >> 32;
2476 break;
2477 case UXTX:
2478 case SXTX:
2479 break;
2480 default:
2481 UNREACHABLE();
2482 break;
2483 }
2484 int64_t mask = (reg_size == kXRegSizeInBits) ? kXRegMask : kWRegMask;
2485 return (static_cast<uint64_t>(value) << amount) & mask;
2486}
2487
2488int64_t Simulator::DecodeShiftExtendOperand(Instr* instr) {
2489 const Register rm = instr->RmField();
2490 const int64_t rm_val = get_register(rm, R31IsZR);
2491 const uint8_t size =
2492 instr->SFField() != 0 ? kXRegSizeInBits : kWRegSizeInBits;
2493 if (instr->IsShift()) {
2494 const Shift shift_type = instr->ShiftTypeField();
2495 const uint8_t shift_amount = instr->Imm6Field();
2496 return ShiftOperand(size, rm_val, shift_type, shift_amount);
2497 } else {
2498 ASSERT(instr->IsExtend());
2499 const Extend extend_type = instr->ExtendTypeField();
2500 const uint8_t shift_amount = instr->Imm3Field();
2501 return ExtendOperand(size, rm_val, extend_type, shift_amount);
2502 }
2503 UNREACHABLE();
2504 return -1;
2505}
2506
2507void Simulator::DecodeAddSubShiftExt(Instr* instr) {
2508 // Format(instr, "add'sf's 'rd, 'rn, 'shift_op");
2509 // also, sub, cmp, etc.
2510 const bool addition = (instr->Bit(30) == 0);
2511 const Register rd = instr->RdField();
2512 const Register rn = instr->RnField();
2513 const uint64_t rm_val = DecodeShiftExtendOperand(instr);
2514 if (instr->SFField() != 0) {
2515 // 64-bit add.
2516 const uint64_t rn_val = get_register(rn, instr->RnMode());
2517 const uint64_t alu_out = rn_val + (addition ? rm_val : -rm_val);
2518 set_register(instr, rd, alu_out, instr->RdMode());
2519 if (instr->HasS()) {
2520 SetNZFlagsX(alu_out);
2521 SetCFlag(CarryFromX(alu_out, rn_val, rm_val, addition));
2522 SetVFlag(OverflowFromX(alu_out, rn_val, rm_val, addition));
2523 }
2524 } else {
2525 // 32-bit add.
2526 const uint32_t rn_val = get_wregister(rn, instr->RnMode());
2527 uint32_t rm_val32 = static_cast<uint32_t>(rm_val & kWRegMask);
2528 uint32_t carry_in = 0;
2529 if (!addition) {
2530 carry_in = 1;
2531 rm_val32 = ~rm_val32;
2532 }
2533 const uint32_t alu_out = rn_val + rm_val32 + carry_in;
2534 set_wregister(rd, alu_out, instr->RdMode());
2535 if (instr->HasS()) {
2536 SetNZFlagsW(alu_out);
2537 SetCFlag(CarryFromW(rn_val, rm_val32, carry_in));
2538 SetVFlag(OverflowFromW(rn_val, rm_val32, carry_in));
2539 }
2540 }
2541}
2542
2543void Simulator::DecodeAddSubWithCarry(Instr* instr) {
2544 // Format(instr, "adc'sf's 'rd, 'rn, 'rm");
2545 // Format(instr, "sbc'sf's 'rd, 'rn, 'rm");
2546 const bool addition = (instr->Bit(30) == 0);
2547 const Register rd = instr->RdField();
2548 const Register rn = instr->RnField();
2549 const Register rm = instr->RmField();
2550 const uint64_t rn_val64 = get_register(rn, R31IsZR);
2551 const uint32_t rn_val32 = get_wregister(rn, R31IsZR);
2552 const uint64_t rm_val64 = get_register(rm, R31IsZR);
2553 uint32_t rm_val32 = get_wregister(rm, R31IsZR);
2554 const uint32_t carry_in = c_flag_ ? 1 : 0;
2555 if (instr->SFField() != 0) {
2556 // 64-bit add.
2557 const uint64_t alu_out =
2558 rn_val64 + (addition ? rm_val64 : ~rm_val64) + carry_in;
2559 set_register(instr, rd, alu_out, R31IsZR);
2560 if (instr->HasS()) {
2561 SetNZFlagsX(alu_out);
2562 SetCFlag(CarryFromX(alu_out, rn_val64, rm_val64, addition));
2563 SetVFlag(OverflowFromX(alu_out, rn_val64, rm_val64, addition));
2564 }
2565 } else {
2566 // 32-bit add.
2567 if (!addition) {
2568 rm_val32 = ~rm_val32;
2569 }
2570 const uint32_t alu_out = rn_val32 + rm_val32 + carry_in;
2571 set_wregister(rd, alu_out, R31IsZR);
2572 if (instr->HasS()) {
2573 SetNZFlagsW(alu_out);
2574 SetCFlag(CarryFromW(rn_val32, rm_val32, carry_in));
2575 SetVFlag(OverflowFromW(rn_val32, rm_val32, carry_in));
2576 }
2577 }
2578}
2579
2580void Simulator::DecodeLogicalShift(Instr* instr) {
2581 const int op = (instr->Bits(29, 2) << 1) | instr->Bit(21);
2582 const Register rd = instr->RdField();
2583 const Register rn = instr->RnField();
2584 const int64_t rn_val = get_register(rn, instr->RnMode());
2585 const int64_t rm_val = DecodeShiftExtendOperand(instr);
2586 int64_t alu_out = 0;
2587 switch (op) {
2588 case 0:
2589 // Format(instr, "and'sf 'rd, 'rn, 'shift_op");
2590 alu_out = rn_val & rm_val;
2591 break;
2592 case 1:
2593 // Format(instr, "bic'sf 'rd, 'rn, 'shift_op");
2594 alu_out = rn_val & (~rm_val);
2595 break;
2596 case 2:
2597 // Format(instr, "orr'sf 'rd, 'rn, 'shift_op");
2598 alu_out = rn_val | rm_val;
2599 break;
2600 case 3:
2601 // Format(instr, "orn'sf 'rd, 'rn, 'shift_op");
2602 alu_out = rn_val | (~rm_val);
2603 break;
2604 case 4:
2605 // Format(instr, "eor'sf 'rd, 'rn, 'shift_op");
2606 alu_out = rn_val ^ rm_val;
2607 break;
2608 case 5:
2609 // Format(instr, "eon'sf 'rd, 'rn, 'shift_op");
2610 alu_out = rn_val ^ (~rm_val);
2611 break;
2612 case 6:
2613 // Format(instr, "and'sfs 'rd, 'rn, 'shift_op");
2614 alu_out = rn_val & rm_val;
2615 break;
2616 case 7:
2617 // Format(instr, "bic'sfs 'rd, 'rn, 'shift_op");
2618 alu_out = rn_val & (~rm_val);
2619 break;
2620 default:
2621 UNREACHABLE();
2622 break;
2623 }
2624
2625 // Set flags if ands or bics.
2626 if ((op == 6) || (op == 7)) {
2627 if (instr->SFField() == 1) {
2628 SetNZFlagsX(alu_out);
2629 } else {
2630 SetNZFlagsW(alu_out);
2631 }
2632 SetCFlag(false);
2633 SetVFlag(false);
2634 }
2635
2636 if (instr->SFField() == 1) {
2637 set_register(instr, rd, alu_out, instr->RdMode());
2638 } else {
2639 set_wregister(rd, alu_out & kWRegMask, instr->RdMode());
2640 }
2641}
2642
2643static int64_t divide64(int64_t top, int64_t bottom, bool is_signed) {
2644 // ARM64 does not trap on integer division by zero. The destination register
2645 // is instead set to 0.
2646 if (bottom == 0) {
2647 return 0;
2648 }
2649
2650 if (is_signed) {
2651 // INT_MIN / -1 = INT_MIN.
2652 if ((top == static_cast<int64_t>(0x8000000000000000LL)) &&
2653 (bottom == static_cast<int64_t>(0xffffffffffffffffLL))) {
2654 return static_cast<int64_t>(0x8000000000000000LL);
2655 } else {
2656 return top / bottom;
2657 }
2658 } else {
2659 const uint64_t utop = static_cast<uint64_t>(top);
2660 const uint64_t ubottom = static_cast<uint64_t>(bottom);
2661 return static_cast<int64_t>(utop / ubottom);
2662 }
2663}
2664
2665static int32_t divide32(int32_t top, int32_t bottom, bool is_signed) {
2666 // ARM64 does not trap on integer division by zero. The destination register
2667 // is instead set to 0.
2668 if (bottom == 0) {
2669 return 0;
2670 }
2671
2672 if (is_signed) {
2673 // INT_MIN / -1 = INT_MIN.
2674 if ((top == static_cast<int32_t>(0x80000000)) &&
2675 (bottom == static_cast<int32_t>(0xffffffff))) {
2676 return static_cast<int32_t>(0x80000000);
2677 } else {
2678 return top / bottom;
2679 }
2680 } else {
2681 const uint32_t utop = static_cast<uint32_t>(top);
2682 const uint32_t ubottom = static_cast<uint32_t>(bottom);
2683 return static_cast<int32_t>(utop / ubottom);
2684 }
2685}
2686
2687void Simulator::DecodeMiscDP1Source(Instr* instr) {
2688 if (instr->Bit(29) != 0) {
2689 UnimplementedInstruction(instr);
2690 }
2691
2692 const Register rd = instr->RdField();
2693 const Register rn = instr->RnField();
2694 const int op = instr->Bits(10, 10);
2695 const int64_t rn_val64 = get_register(rn, R31IsZR);
2696 const int32_t rn_val32 = get_wregister(rn, R31IsZR);
2697 switch (op) {
2698 case 4: {
2699 // Format(instr, "clz'sf 'rd, 'rn");
2700 if (instr->SFField() == 1) {
2701 const uint64_t rd_val = Utils::CountLeadingZeros64(rn_val64);
2702 set_register(instr, rd, rd_val, R31IsZR);
2703 } else {
2704 const uint32_t rd_val = Utils::CountLeadingZeros32(rn_val32);
2705 set_wregister(rd, rd_val, R31IsZR);
2706 }
2707 break;
2708 }
2709 case 0: {
2710 // Format(instr, "rbit'sf 'rd, 'rn");
2711 if (instr->SFField() == 1) {
2712 const uint64_t rd_val = Utils::ReverseBits64(rn_val64);
2713 set_register(instr, rd, rd_val, R31IsZR);
2714 } else {
2715 const uint32_t rd_val = Utils::ReverseBits32(rn_val32);
2716 set_wregister(rd, rd_val, R31IsZR);
2717 }
2718 break;
2719 }
2720 default:
2721 UnimplementedInstruction(instr);
2722 break;
2723 }
2724}
2725
2726void Simulator::DecodeMiscDP2Source(Instr* instr) {
2727 if (instr->Bit(29) != 0) {
2728 UnimplementedInstruction(instr);
2729 }
2730
2731 const Register rd = instr->RdField();
2732 const Register rn = instr->RnField();
2733 const Register rm = instr->RmField();
2734 const int op = instr->Bits(10, 5);
2735 const int64_t rn_val64 = get_register(rn, R31IsZR);
2736 const int64_t rm_val64 = get_register(rm, R31IsZR);
2737 const int32_t rn_val32 = get_wregister(rn, R31IsZR);
2738 const int32_t rm_val32 = get_wregister(rm, R31IsZR);
2739 switch (op) {
2740 case 2:
2741 case 3: {
2742 // Format(instr, "udiv'sf 'rd, 'rn, 'rm");
2743 // Format(instr, "sdiv'sf 'rd, 'rn, 'rm");
2744 const bool is_signed = instr->Bit(10) == 1;
2745 if (instr->SFField() == 1) {
2746 set_register(instr, rd, divide64(rn_val64, rm_val64, is_signed),
2747 R31IsZR);
2748 } else {
2749 set_wregister(rd, divide32(rn_val32, rm_val32, is_signed), R31IsZR);
2750 }
2751 break;
2752 }
2753 case 8: {
2754 // Format(instr, "lsl'sf 'rd, 'rn, 'rm");
2755 if (instr->SFField() == 1) {
2756 const uint64_t rn_u64 = static_cast<uint64_t>(rn_val64);
2757 const int64_t alu_out = rn_u64 << (rm_val64 & (kXRegSizeInBits - 1));
2758 set_register(instr, rd, alu_out, R31IsZR);
2759 } else {
2760 const uint32_t rn_u32 = static_cast<uint32_t>(rn_val32);
2761 const int32_t alu_out = rn_u32 << (rm_val32 & (kXRegSizeInBits - 1));
2762 set_wregister(rd, alu_out, R31IsZR);
2763 }
2764 break;
2765 }
2766 case 9: {
2767 // Format(instr, "lsr'sf 'rd, 'rn, 'rm");
2768 if (instr->SFField() == 1) {
2769 const uint64_t rn_u64 = static_cast<uint64_t>(rn_val64);
2770 const int64_t alu_out = rn_u64 >> (rm_val64 & (kXRegSizeInBits - 1));
2771 set_register(instr, rd, alu_out, R31IsZR);
2772 } else {
2773 const uint32_t rn_u32 = static_cast<uint32_t>(rn_val32);
2774 const int32_t alu_out = rn_u32 >> (rm_val32 & (kXRegSizeInBits - 1));
2775 set_wregister(rd, alu_out, R31IsZR);
2776 }
2777 break;
2778 }
2779 case 10: {
2780 // Format(instr, "asr'sf 'rd, 'rn, 'rm");
2781 if (instr->SFField() == 1) {
2782 const int64_t alu_out = rn_val64 >> (rm_val64 & (kXRegSizeInBits - 1));
2783 set_register(instr, rd, alu_out, R31IsZR);
2784 } else {
2785 const int32_t alu_out = rn_val32 >> (rm_val32 & (kXRegSizeInBits - 1));
2786 set_wregister(rd, alu_out, R31IsZR);
2787 }
2788 break;
2789 }
2790 default:
2791 UnimplementedInstruction(instr);
2792 break;
2793 }
2794}
2795
2796void Simulator::DecodeMiscDP3Source(Instr* instr) {
2797 const Register rd = instr->RdField();
2798 const Register rn = instr->RnField();
2799 const Register rm = instr->RmField();
2800 const Register ra = instr->RaField();
2801 if ((instr->Bits(29, 2) == 0) && (instr->Bits(21, 3) == 0) &&
2802 (instr->Bit(15) == 0)) {
2803 // Format(instr, "madd'sf 'rd, 'rn, 'rm, 'ra");
2804 if (instr->SFField() == 1) {
2805 const uint64_t rn_val = get_register(rn, R31IsZR);
2806 const uint64_t rm_val = get_register(rm, R31IsZR);
2807 const uint64_t ra_val = get_register(ra, R31IsZR);
2808 const uint64_t alu_out = ra_val + (rn_val * rm_val);
2809 set_register(instr, rd, alu_out, R31IsZR);
2810 } else {
2811 const uint32_t rn_val = get_wregister(rn, R31IsZR);
2812 const uint32_t rm_val = get_wregister(rm, R31IsZR);
2813 const uint32_t ra_val = get_wregister(ra, R31IsZR);
2814 const uint32_t alu_out = ra_val + (rn_val * rm_val);
2815 set_wregister(rd, alu_out, R31IsZR);
2816 }
2817 } else if ((instr->Bits(29, 2) == 0) && (instr->Bits(21, 3) == 0) &&
2818 (instr->Bit(15) == 1)) {
2819 // Format(instr, "msub'sf 'rd, 'rn, 'rm, 'ra");
2820 if (instr->SFField() == 1) {
2821 const uint64_t rn_val = get_register(rn, R31IsZR);
2822 const uint64_t rm_val = get_register(rm, R31IsZR);
2823 const uint64_t ra_val = get_register(ra, R31IsZR);
2824 const uint64_t alu_out = ra_val - (rn_val * rm_val);
2825 set_register(instr, rd, alu_out, R31IsZR);
2826 } else {
2827 const uint32_t rn_val = get_wregister(rn, R31IsZR);
2828 const uint32_t rm_val = get_wregister(rm, R31IsZR);
2829 const uint32_t ra_val = get_wregister(ra, R31IsZR);
2830 const uint32_t alu_out = ra_val - (rn_val * rm_val);
2831 set_wregister(rd, alu_out, R31IsZR);
2832 }
2833 } else if ((instr->Bits(29, 3) == 4) && (instr->Bits(21, 3) == 2) &&
2834 (instr->Bit(15) == 0)) {
2835 ASSERT(ra == R31); // Should-Be-One
2836 // Format(instr, "smulh 'rd, 'rn, 'rm");
2837 const int64_t rn_val = get_register(rn, R31IsZR);
2838 const int64_t rm_val = get_register(rm, R31IsZR);
2839#if defined(DART_HOST_OS_WINDOWS)
2840 // Visual Studio does not support __int128.
2841 int64_t alu_out;
2842 Multiply128(rn_val, rm_val, &alu_out);
2843#else
2844 const __int128 res =
2845 static_cast<__int128>(rn_val) * static_cast<__int128>(rm_val);
2846 const int64_t alu_out = static_cast<int64_t>(res >> 64);
2847#endif // DART_HOST_OS_WINDOWS
2848 set_register(instr, rd, alu_out, R31IsZR);
2849 } else if ((instr->Bits(29, 3) == 4) && (instr->Bits(21, 3) == 6) &&
2850 (instr->Bit(15) == 0)) {
2851 ASSERT(ra == R31); // Should-Be-One
2852 // Format(instr, "umulh 'rd, 'rn, 'rm");
2853 const uint64_t rn_val = get_register(rn, R31IsZR);
2854 const uint64_t rm_val = get_register(rm, R31IsZR);
2855#if defined(DART_HOST_OS_WINDOWS)
2856 // Visual Studio does not support __int128.
2857 uint64_t alu_out;
2858 UnsignedMultiply128(rn_val, rm_val, &alu_out);
2859#else
2860 const unsigned __int128 res = static_cast<unsigned __int128>(rn_val) *
2861 static_cast<unsigned __int128>(rm_val);
2862 const uint64_t alu_out = static_cast<uint64_t>(res >> 64);
2863#endif // DART_HOST_OS_WINDOWS
2864 set_register(instr, rd, alu_out, R31IsZR);
2865 } else if ((instr->Bits(29, 3) == 4) && (instr->Bit(15) == 0)) {
2866 if (instr->Bits(21, 3) == 5) {
2867 // Format(instr, "umaddl 'rd, 'rn, 'rm, 'ra");
2868 const uint64_t rn_val = static_cast<uint32_t>(get_wregister(rn, R31IsZR));
2869 const uint64_t rm_val = static_cast<uint32_t>(get_wregister(rm, R31IsZR));
2870 const uint64_t ra_val = get_register(ra, R31IsZR);
2871 const uint64_t alu_out = ra_val + (rn_val * rm_val);
2872 set_register(instr, rd, alu_out, R31IsZR);
2873 } else {
2874 // Format(instr, "smaddl 'rd, 'rn, 'rm, 'ra");
2875 const int64_t rn_val = static_cast<int32_t>(get_wregister(rn, R31IsZR));
2876 const int64_t rm_val = static_cast<int32_t>(get_wregister(rm, R31IsZR));
2877 const int64_t ra_val = get_register(ra, R31IsZR);
2878 const int64_t alu_out = ra_val + (rn_val * rm_val);
2879 set_register(instr, rd, alu_out, R31IsZR);
2880 }
2881 } else {
2882 UnimplementedInstruction(instr);
2883 }
2884}
2885
2886void Simulator::DecodeConditionalSelect(Instr* instr) {
2887 const Register rd = instr->RdField();
2888 const Register rn = instr->RnField();
2889 const Register rm = instr->RmField();
2890 const int64_t rm_val64 = get_register(rm, R31IsZR);
2891 const int32_t rm_val32 = get_wregister(rm, R31IsZR);
2892 const int64_t rn_val64 = get_register(rn, instr->RnMode());
2893 const int32_t rn_val32 = get_wregister(rn, instr->RnMode());
2894 int64_t result64 = 0;
2895 int32_t result32 = 0;
2896
2897 if ((instr->Bits(29, 2) == 0) && (instr->Bits(10, 2) == 0)) {
2898 // Format(instr, "mov'sf'cond 'rd, 'rn, 'rm");
2899 result64 = rm_val64;
2900 result32 = rm_val32;
2901 if (ConditionallyExecute(instr)) {
2902 result64 = rn_val64;
2903 result32 = rn_val32;
2904 }
2905 } else if ((instr->Bits(29, 2) == 0) && (instr->Bits(10, 2) == 1)) {
2906 // Format(instr, "csinc'sf'cond 'rd, 'rn, 'rm");
2907 result64 = rm_val64 + 1;
2908 result32 = rm_val32 + 1;
2909 if (ConditionallyExecute(instr)) {
2910 result64 = rn_val64;
2911 result32 = rn_val32;
2912 }
2913 } else if ((instr->Bits(29, 2) == 2) && (instr->Bits(10, 2) == 0)) {
2914 // Format(instr, "csinv'sf'cond 'rd, 'rn, 'rm");
2915 result64 = ~rm_val64;
2916 result32 = ~rm_val32;
2917 if (ConditionallyExecute(instr)) {
2918 result64 = rn_val64;
2919 result32 = rn_val32;
2920 }
2921 } else if ((instr->Bits(29, 2) == 2) && (instr->Bits(10, 2) == 1)) {
2922 // Format(instr, "csneg'sf'cond 'rd, 'rn, 'rm");
2923 result64 = -rm_val64;
2924 result32 = -rm_val32;
2925 if (ConditionallyExecute(instr)) {
2926 result64 = rn_val64;
2927 result32 = rn_val32;
2928 }
2929 } else {
2930 UnimplementedInstruction(instr);
2931 return;
2932 }
2933
2934 if (instr->SFField() == 1) {
2935 set_register(instr, rd, result64, instr->RdMode());
2936 } else {
2937 set_wregister(rd, result32, instr->RdMode());
2938 }
2939}
2940
2941void Simulator::DecodeDPRegister(Instr* instr) {
2942 if (instr->IsAddSubShiftExtOp()) {
2943 DecodeAddSubShiftExt(instr);
2944 } else if (instr->IsAddSubWithCarryOp()) {
2945 DecodeAddSubWithCarry(instr);
2946 } else if (instr->IsLogicalShiftOp()) {
2947 DecodeLogicalShift(instr);
2948 } else if (instr->IsMiscDP1SourceOp()) {
2949 DecodeMiscDP1Source(instr);
2950 } else if (instr->IsMiscDP2SourceOp()) {
2951 DecodeMiscDP2Source(instr);
2952 } else if (instr->IsMiscDP3SourceOp()) {
2953 DecodeMiscDP3Source(instr);
2954 } else if (instr->IsConditionalSelectOp()) {
2955 DecodeConditionalSelect(instr);
2956 } else {
2957 UnimplementedInstruction(instr);
2958 }
2959}
2960
2961void Simulator::DecodeSIMDCopy(Instr* instr) {
2962 const int32_t Q = instr->Bit(30);
2963 const int32_t op = instr->Bit(29);
2964 const int32_t imm4 = instr->Bits(11, 4);
2965 const int32_t imm5 = instr->Bits(16, 5);
2966
2967 int32_t idx4 = -1;
2968 int32_t idx5 = -1;
2969 int32_t element_bytes;
2970 if ((imm5 & 0x1) != 0) {
2971 idx4 = imm4;
2972 idx5 = imm5 >> 1;
2973 element_bytes = 1;
2974 } else if ((imm5 & 0x2) != 0) {
2975 idx4 = imm4 >> 1;
2976 idx5 = imm5 >> 2;
2977 element_bytes = 2;
2978 } else if ((imm5 & 0x4) != 0) {
2979 idx4 = imm4 >> 2;
2980 idx5 = imm5 >> 3;
2981 element_bytes = 4;
2982 } else if ((imm5 & 0x8) != 0) {
2983 idx4 = imm4 >> 3;
2984 idx5 = imm5 >> 4;
2985 element_bytes = 8;
2986 } else {
2987 UnimplementedInstruction(instr);
2988 return;
2989 }
2990 ASSERT((idx4 != -1) && (idx5 != -1));
2991
2992 const VRegister vd = instr->VdField();
2993 const VRegister vn = instr->VnField();
2994 const Register rn = instr->RnField();
2995 const Register rd = instr->RdField();
2996 if ((op == 0) && (imm4 == 7)) {
2997 if (Q == 0) {
2998 // Format(instr, "vmovrs 'rd, 'vn'idx5");
2999 set_wregister(rd, get_vregisters(vn, idx5), R31IsZR);
3000 } else {
3001 // Format(instr, "vmovrd 'rd, 'vn'idx5");
3002 set_register(instr, rd, get_vregisterd(vn, idx5), R31IsZR);
3003 }
3004 } else if ((Q == 1) && (op == 0) && (imm4 == 0)) {
3005 // Format(instr, "vdup'csz 'vd, 'vn'idx5");
3006 if (element_bytes == 4) {
3007 for (int i = 0; i < 4; i++) {
3008 set_vregisters(vd, i, get_vregisters(vn, idx5));
3009 }
3010 } else if (element_bytes == 8) {
3011 for (int i = 0; i < 2; i++) {
3012 set_vregisterd(vd, i, get_vregisterd(vn, idx5));
3013 }
3014 } else {
3015 UnimplementedInstruction(instr);
3016 return;
3017 }
3018 } else if ((Q == 1) && (op == 0) && (imm4 == 3)) {
3019 // Format(instr, "vins'csz 'vd'idx5, 'rn");
3020 if (element_bytes == 4) {
3021 set_vregisters(vd, idx5, get_wregister(rn, R31IsZR));
3022 } else if (element_bytes == 8) {
3023 set_vregisterd(vd, idx5, get_register(rn, R31IsZR));
3024 } else {
3025 UnimplementedInstruction(instr);
3026 }
3027 } else if ((Q == 1) && (op == 0) && (imm4 == 1)) {
3028 // Format(instr, "vdup'csz 'vd, 'rn");
3029 if (element_bytes == 4) {
3030 for (int i = 0; i < 4; i++) {
3031 set_vregisters(vd, i, get_wregister(rn, R31IsZR));
3032 }
3033 } else if (element_bytes == 8) {
3034 for (int i = 0; i < 2; i++) {
3035 set_vregisterd(vd, i, get_register(rn, R31IsZR));
3036 }
3037 } else {
3038 UnimplementedInstruction(instr);
3039 return;
3040 }
3041 } else if ((Q == 1) && (op == 1)) {
3042 // Format(instr, "vins'csz 'vd'idx5, 'vn'idx4");
3043 if (element_bytes == 4) {
3044 set_vregisters(vd, idx5, get_vregisters(vn, idx4));
3045 } else if (element_bytes == 8) {
3046 set_vregisterd(vd, idx5, get_vregisterd(vn, idx4));
3047 } else {
3048 UnimplementedInstruction(instr);
3049 }
3050 } else {
3051 UnimplementedInstruction(instr);
3052 }
3053}
3054
3055static float vminf(float f1, float f2) {
3056 if (f1 == f2) {
3057 // take care of (-0.0) < 0.0, (they are equal according to minss)
3058 return signbit(f1) ? f1 : f2;
3059 }
3060 return f1 > f2 ? f2 : f1;
3061}
3062
3063static float vmaxf(float f1, float f2) {
3064 if (f1 == f2) {
3065 // take care of (-0.0) < 0.0, (they are equal according to minss)
3066 return signbit(f1) ? f2 : f1;
3067 }
3068 return f1 < f2 ? f2 : f1;
3069}
3070
3071static double vmind(double f1, double f2) {
3072 if (f1 == f2) {
3073 // take care of (-0.0) < 0.0, (they are equal according to minss)
3074 return signbit(f1) ? f1 : f2;
3075 }
3076 return f1 > f2 ? f2 : f1;
3077}
3078
3079static double vmaxd(double f1, double f2) {
3080 if (f1 == f2) {
3081 // take care of (-0.0) < 0.0, (they are equal according to minss)
3082 return signbit(f1) ? f2 : f1;
3083 }
3084 return f1 < f2 ? f2 : f1;
3085}
3086
3087void Simulator::DecodeSIMDThreeSame(Instr* instr) {
3088 const int Q = instr->Bit(30);
3089 const int U = instr->Bit(29);
3090 const int opcode = instr->Bits(11, 5);
3091
3092 if (Q == 0) {
3093 UnimplementedInstruction(instr);
3094 return;
3095 }
3096
3097 const VRegister vd = instr->VdField();
3098 const VRegister vn = instr->VnField();
3099 const VRegister vm = instr->VmField();
3100 if (instr->Bit(22) == 0) {
3101 // f32 case.
3102 for (int idx = 0; idx < 4; idx++) {
3103 const int32_t vn_val = get_vregisters(vn, idx);
3104 const int32_t vm_val = get_vregisters(vm, idx);
3105 const float vn_flt = bit_cast<float, int32_t>(vn_val);
3106 const float vm_flt = bit_cast<float, int32_t>(vm_val);
3107 int32_t res = 0.0;
3108 if ((U == 0) && (opcode == 0x3)) {
3109 if (instr->Bit(23) == 0) {
3110 // Format(instr, "vand 'vd, 'vn, 'vm");
3111 res = vn_val & vm_val;
3112 } else {
3113 // Format(instr, "vorr 'vd, 'vn, 'vm");
3114 res = vn_val | vm_val;
3115 }
3116 } else if ((U == 1) && (opcode == 0x3)) {
3117 // Format(instr, "veor 'vd, 'vn, 'vm");
3118 res = vn_val ^ vm_val;
3119 } else if ((U == 0) && (opcode == 0x10)) {
3120 // Format(instr, "vadd'vsz 'vd, 'vn, 'vm");
3121 res = vn_val + vm_val;
3122 } else if ((U == 1) && (opcode == 0x10)) {
3123 // Format(instr, "vsub'vsz 'vd, 'vn, 'vm");
3124 res = vn_val - vm_val;
3125 } else if ((U == 0) && (opcode == 0x1a)) {
3126 if (instr->Bit(23) == 0) {
3127 // Format(instr, "vadd'vsz 'vd, 'vn, 'vm");
3128 res = bit_cast<int32_t, float>(vn_flt + vm_flt);
3129 } else {
3130 // Format(instr, "vsub'vsz 'vd, 'vn, 'vm");
3131 res = bit_cast<int32_t, float>(vn_flt - vm_flt);
3132 }
3133 } else if ((U == 1) && (opcode == 0x1b)) {
3134 // Format(instr, "vmul'vsz 'vd, 'vn, 'vm");
3135 res = bit_cast<int32_t, float>(vn_flt * vm_flt);
3136 } else if ((U == 1) && (opcode == 0x1f)) {
3137 // Format(instr, "vdiv'vsz 'vd, 'vn, 'vm");
3138 res = bit_cast<int32_t, float>(vn_flt / vm_flt);
3139 } else if ((U == 0) && (opcode == 0x1c)) {
3140 // Format(instr, "vceq'vsz 'vd, 'vn, 'vm");
3141 res = (vn_flt == vm_flt) ? 0xffffffff : 0;
3142 } else if ((U == 1) && (opcode == 0x1c)) {
3143 if (instr->Bit(23) == 1) {
3144 // Format(instr, "vcgt'vsz 'vd, 'vn, 'vm");
3145 res = (vn_flt > vm_flt) ? 0xffffffff : 0;
3146 } else {
3147 // Format(instr, "vcge'vsz 'vd, 'vn, 'vm");
3148 res = (vn_flt >= vm_flt) ? 0xffffffff : 0;
3149 }
3150 } else if ((U == 0) && (opcode == 0x1e)) {
3151 if (instr->Bit(23) == 1) {
3152 // Format(instr, "vmin'vsz 'vd, 'vn, 'vm");
3153 const float m = vminf(vn_flt, vm_flt);
3154 res = bit_cast<int32_t, float>(m);
3155 } else {
3156 // Format(instr, "vmax'vsz 'vd, 'vn, 'vm");
3157 const float m = vmaxf(vn_flt, vm_flt);
3158 res = bit_cast<int32_t, float>(m);
3159 }
3160 } else if ((U == 0) && (opcode == 0x1f)) {
3161 if (instr->Bit(23) == 0) {
3162 // Format(instr, "vrecps'vsz 'vd, 'vn, 'vm");
3163 res = bit_cast<int32_t, float>(2.0 - (vn_flt * vm_flt));
3164 } else {
3165 // Format(instr, "vrsqrt'vsz 'vd, 'vn, 'vm");
3166 res = bit_cast<int32_t, float>((3.0 - vn_flt * vm_flt) / 2.0);
3167 }
3168 } else {
3169 UnimplementedInstruction(instr);
3170 return;
3171 }
3172 set_vregisters(vd, idx, res);
3173 }
3174 } else {
3175 // f64 case.
3176 for (int idx = 0; idx < 2; idx++) {
3177 const int64_t vn_val = get_vregisterd(vn, idx);
3178 const int64_t vm_val = get_vregisterd(vm, idx);
3179 const double vn_dbl = bit_cast<double, int64_t>(vn_val);
3180 const double vm_dbl = bit_cast<double, int64_t>(vm_val);
3181 int64_t res = 0.0;
3182 if ((U == 0) && (opcode == 0x3)) {
3183 if (instr->Bit(23) == 0) {
3184 // Format(instr, "vand 'vd, 'vn, 'vm");
3185 res = vn_val & vm_val;
3186 } else {
3187 // Format(instr, "vorr 'vd, 'vn, 'vm");
3188 res = vn_val | vm_val;
3189 }
3190 } else if ((U == 1) && (opcode == 0x3)) {
3191 // Format(instr, "veor 'vd, 'vn, 'vm");
3192 res = vn_val ^ vm_val;
3193 } else if ((U == 0) && (opcode == 0x10)) {
3194 // Format(instr, "vadd'vsz 'vd, 'vn, 'vm");
3195 res = vn_val + vm_val;
3196 } else if ((U == 1) && (opcode == 0x10)) {
3197 // Format(instr, "vsub'vsz 'vd, 'vn, 'vm");
3198 res = vn_val - vm_val;
3199 } else if ((U == 0) && (opcode == 0x1a)) {
3200 if (instr->Bit(23) == 0) {
3201 // Format(instr, "vadd'vsz 'vd, 'vn, 'vm");
3202 res = bit_cast<int64_t, double>(vn_dbl + vm_dbl);
3203 } else {
3204 // Format(instr, "vsub'vsz 'vd, 'vn, 'vm");
3205 res = bit_cast<int64_t, double>(vn_dbl - vm_dbl);
3206 }
3207 } else if ((U == 1) && (opcode == 0x1b)) {
3208 // Format(instr, "vmul'vsz 'vd, 'vn, 'vm");
3209 res = bit_cast<int64_t, double>(vn_dbl * vm_dbl);
3210 } else if ((U == 1) && (opcode == 0x1f)) {
3211 // Format(instr, "vdiv'vsz 'vd, 'vn, 'vm");
3212 res = bit_cast<int64_t, double>(vn_dbl / vm_dbl);
3213 } else if ((U == 0) && (opcode == 0x1c)) {
3214 // Format(instr, "vceq'vsz 'vd, 'vn, 'vm");
3215 res = (vn_dbl == vm_dbl) ? 0xffffffffffffffffLL : 0;
3216 } else if ((U == 1) && (opcode == 0x1c)) {
3217 if (instr->Bit(23) == 1) {
3218 // Format(instr, "vcgt'vsz 'vd, 'vn, 'vm");
3219 res = (vn_dbl > vm_dbl) ? 0xffffffffffffffffLL : 0;
3220 } else {
3221 // Format(instr, "vcge'vsz 'vd, 'vn, 'vm");
3222 res = (vn_dbl >= vm_dbl) ? 0xffffffffffffffffLL : 0;
3223 }
3224 } else if ((U == 0) && (opcode == 0x1e)) {
3225 if (instr->Bit(23) == 1) {
3226 // Format(instr, "vmin'vsz 'vd, 'vn, 'vm");
3227 const double m = vmind(vn_dbl, vm_dbl);
3228 res = bit_cast<int64_t, double>(m);
3229 } else {
3230 // Format(instr, "vmax'vsz 'vd, 'vn, 'vm");
3231 const double m = vmaxd(vn_dbl, vm_dbl);
3232 res = bit_cast<int64_t, double>(m);
3233 }
3234 } else {
3235 UnimplementedInstruction(instr);
3236 return;
3237 }
3238 set_vregisterd(vd, idx, res);
3239 }
3240 }
3241}
3242
3243static float arm_reciprocal_sqrt_estimate(float a) {
3244 // From the ARM Architecture Reference Manual A2-87.
3245 if (isinf(a) || (fabs(a) >= exp2f(126)))
3246 return 0.0;
3247 else if (a == 0.0)
3248 return kPosInfinity;
3249 else if (isnan(a))
3250 return a;
3251
3252 uint32_t a_bits = bit_cast<uint32_t, float>(a);
3253 uint64_t scaled;
3254 if (((a_bits >> 23) & 1) != 0) {
3255 // scaled = '0 01111111101' : operand<22:0> : Zeros(29)
3256 scaled = (static_cast<uint64_t>(0x3fd) << 52) |
3257 ((static_cast<uint64_t>(a_bits) & 0x7fffff) << 29);
3258 } else {
3259 // scaled = '0 01111111110' : operand<22:0> : Zeros(29)
3260 scaled = (static_cast<uint64_t>(0x3fe) << 52) |
3261 ((static_cast<uint64_t>(a_bits) & 0x7fffff) << 29);
3262 }
3263 // result_exp = (380 - UInt(operand<30:23>) DIV 2;
3264 int32_t result_exp = (380 - ((a_bits >> 23) & 0xff)) / 2;
3265
3266 double scaled_d = bit_cast<double, uint64_t>(scaled);
3267 ASSERT((scaled_d >= 0.25) && (scaled_d < 1.0));
3268
3269 double r;
3270 if (scaled_d < 0.5) {
3271 // range 0.25 <= a < 0.5
3272
3273 // a in units of 1/512 rounded down.
3274 int32_t q0 = static_cast<int32_t>(scaled_d * 512.0);
3275 // reciprocal root r.
3276 r = 1.0 / sqrt((static_cast<double>(q0) + 0.5) / 512.0);
3277 } else {
3278 // range 0.5 <= a < 1.0
3279
3280 // a in units of 1/256 rounded down.
3281 int32_t q1 = static_cast<int32_t>(scaled_d * 256.0);
3282 // reciprocal root r.
3283 r = 1.0 / sqrt((static_cast<double>(q1) + 0.5) / 256.0);
3284 }
3285 // r in units of 1/256 rounded to nearest.
3286 int32_t s = static_cast<int>(256.0 * r + 0.5);
3287 double estimate = static_cast<double>(s) / 256.0;
3288 ASSERT((estimate >= 1.0) && (estimate <= (511.0 / 256.0)));
3289
3290 // result = 0 : result_exp<7:0> : estimate<51:29>
3291 int32_t result_bits =
3292 ((result_exp & 0xff) << 23) |
3293 ((bit_cast<uint64_t, double>(estimate) >> 29) & 0x7fffff);
3294 return bit_cast<float, int32_t>(result_bits);
3295}
3296
3297static float arm_recip_estimate(float a) {
3298 // From the ARM Architecture Reference Manual A2-85.
3299 if (isinf(a) || (fabs(a) >= exp2f(126)))
3300 return 0.0;
3301 else if (a == 0.0)
3302 return kPosInfinity;
3303 else if (isnan(a))
3304 return a;
3305
3306 uint32_t a_bits = bit_cast<uint32_t, float>(a);
3307 // scaled = '0011 1111 1110' : a<22:0> : Zeros(29)
3308 uint64_t scaled = (static_cast<uint64_t>(0x3fe) << 52) |
3309 ((static_cast<uint64_t>(a_bits) & 0x7fffff) << 29);
3310 // result_exp = 253 - UInt(a<30:23>)
3311 int32_t result_exp = 253 - ((a_bits >> 23) & 0xff);
3312 ASSERT((result_exp >= 1) && (result_exp <= 252));
3313
3314 double scaled_d = bit_cast<double, uint64_t>(scaled);
3315 ASSERT((scaled_d >= 0.5) && (scaled_d < 1.0));
3316
3317 // a in units of 1/512 rounded down.
3318 int32_t q = static_cast<int32_t>(scaled_d * 512.0);
3319 // reciprocal r.
3320 double r = 1.0 / ((static_cast<double>(q) + 0.5) / 512.0);
3321 // r in units of 1/256 rounded to nearest.
3322 int32_t s = static_cast<int32_t>(256.0 * r + 0.5);
3323 double estimate = static_cast<double>(s) / 256.0;
3324 ASSERT((estimate >= 1.0) && (estimate <= (511.0 / 256.0)));
3325
3326 // result = sign : result_exp<7:0> : estimate<51:29>
3327 int32_t result_bits =
3328 (a_bits & 0x80000000) | ((result_exp & 0xff) << 23) |
3329 ((bit_cast<uint64_t, double>(estimate) >> 29) & 0x7fffff);
3330 return bit_cast<float, int32_t>(result_bits);
3331}
3332
3333void Simulator::DecodeSIMDTwoReg(Instr* instr) {
3334 const int32_t Q = instr->Bit(30);
3335 const int32_t U = instr->Bit(29);
3336 const int32_t op = instr->Bits(12, 5);
3337 const int32_t sz = instr->Bits(22, 2);
3338 const VRegister vd = instr->VdField();
3339 const VRegister vn = instr->VnField();
3340
3341 if (Q != 1) {
3342 UnimplementedInstruction(instr);
3343 return;
3344 }
3345
3346 if ((U == 1) && (op == 5)) {
3347 // Format(instr, "vnot 'vd, 'vn");
3348 for (int i = 0; i < 2; i++) {
3349 set_vregisterd(vd, i, ~get_vregisterd(vn, i));
3350 }
3351 } else if ((U == 0) && (op == 0xf)) {
3352 if (sz == 2) {
3353 // Format(instr, "vabss 'vd, 'vn");
3354 for (int i = 0; i < 4; i++) {
3355 const int32_t vn_val = get_vregisters(vn, i);
3356 const float vn_flt = bit_cast<float, int32_t>(vn_val);
3357 set_vregisters(vd, i, bit_cast<int32_t, float>(fabsf(vn_flt)));
3358 }
3359 } else if (sz == 3) {
3360 // Format(instr, "vabsd 'vd, 'vn");
3361 for (int i = 0; i < 2; i++) {
3362 const int64_t vn_val = get_vregisterd(vn, i);
3363 const double vn_dbl = bit_cast<double, int64_t>(vn_val);
3364 set_vregisterd(vd, i, bit_cast<int64_t, double>(fabs(vn_dbl)));
3365 }
3366 } else {
3367 UnimplementedInstruction(instr);
3368 }
3369 } else if ((U == 1) && (op == 0xf)) {
3370 if (sz == 2) {
3371 // Format(instr, "vnegs 'vd, 'vn");
3372 for (int i = 0; i < 4; i++) {
3373 const int32_t vn_val = get_vregisters(vn, i);
3374 const float vn_flt = bit_cast<float, int32_t>(vn_val);
3375 set_vregisters(vd, i, bit_cast<int32_t, float>(-vn_flt));
3376 }
3377 } else if (sz == 3) {
3378 // Format(instr, "vnegd 'vd, 'vn");
3379 for (int i = 0; i < 2; i++) {
3380 const int64_t vn_val = get_vregisterd(vn, i);
3381 const double vn_dbl = bit_cast<double, int64_t>(vn_val);
3382 set_vregisterd(vd, i, bit_cast<int64_t, double>(-vn_dbl));
3383 }
3384 } else {
3385 UnimplementedInstruction(instr);
3386 }
3387 } else if ((U == 1) && (op == 0x1f)) {
3388 if (sz == 2) {
3389 // Format(instr, "vsqrts 'vd, 'vn");
3390 for (int i = 0; i < 4; i++) {
3391 const int32_t vn_val = get_vregisters(vn, i);
3392 const float vn_flt = bit_cast<float, int32_t>(vn_val);
3393 set_vregisters(vd, i, bit_cast<int32_t, float>(sqrtf(vn_flt)));
3394 }
3395 } else if (sz == 3) {
3396 // Format(instr, "vsqrtd 'vd, 'vn");
3397 for (int i = 0; i < 2; i++) {
3398 const int64_t vn_val = get_vregisterd(vn, i);
3399 const double vn_dbl = bit_cast<double, int64_t>(vn_val);
3400 set_vregisterd(vd, i, bit_cast<int64_t, double>(sqrt(vn_dbl)));
3401 }
3402 } else {
3403 UnimplementedInstruction(instr);
3404 }
3405 } else if ((U == 0) && (op == 0x1d)) {
3406 if (sz != 2) {
3407 UnimplementedInstruction(instr);
3408 return;
3409 }
3410 // Format(instr, "vrecpes 'vd, 'vn");
3411 for (int i = 0; i < 4; i++) {
3412 const int32_t vn_val = get_vregisters(vn, i);
3413 const float vn_flt = bit_cast<float, int32_t>(vn_val);
3414 const float re = arm_recip_estimate(vn_flt);
3415 set_vregisters(vd, i, bit_cast<int32_t, float>(re));
3416 }
3417 } else if ((U == 1) && (op == 0x1d)) {
3418 if (sz != 2) {
3419 UnimplementedInstruction(instr);
3420 return;
3421 }
3422 // Format(instr, "vrsqrtes 'vd, 'vn");
3423 for (int i = 0; i < 4; i++) {
3424 const int32_t vn_val = get_vregisters(vn, i);
3425 const float vn_flt = bit_cast<float, int32_t>(vn_val);
3426 const float re = arm_reciprocal_sqrt_estimate(vn_flt);
3427 set_vregisters(vd, i, bit_cast<int32_t, float>(re));
3428 }
3429 } else {
3430 UnimplementedInstruction(instr);
3431 }
3432}
3433
3434void Simulator::DecodeDPSimd1(Instr* instr) {
3435 if (instr->IsSIMDCopyOp()) {
3436 DecodeSIMDCopy(instr);
3437 } else if (instr->IsSIMDThreeSameOp()) {
3438 DecodeSIMDThreeSame(instr);
3439 } else if (instr->IsSIMDTwoRegOp()) {
3440 DecodeSIMDTwoReg(instr);
3441 } else {
3442 UnimplementedInstruction(instr);
3443 }
3444}
3445
3446void Simulator::DecodeFPImm(Instr* instr) {
3447 if ((instr->Bit(31) != 0) || (instr->Bit(29) != 0) || (instr->Bit(23) != 0) ||
3448 (instr->Bits(5, 5) != 0)) {
3449 UnimplementedInstruction(instr);
3450 return;
3451 }
3452 if (instr->Bit(22) == 1) {
3453 // Double.
3454 // Format(instr, "fmovd 'vd, #'immd");
3455 const VRegister vd = instr->VdField();
3456 const int64_t immd = Instr::VFPExpandImm(instr->Imm8Field());
3457 set_vregisterd(vd, 0, immd);
3458 set_vregisterd(vd, 1, 0);
3459 } else {
3460 // Single.
3461 UnimplementedInstruction(instr);
3462 }
3463}
3464
3465void Simulator::DecodeFPIntCvt(Instr* instr) {
3466 const VRegister vd = instr->VdField();
3467 const VRegister vn = instr->VnField();
3468 const Register rd = instr->RdField();
3469 const Register rn = instr->RnField();
3470
3471 if (instr->Bit(29) != 0) {
3472 UnimplementedInstruction(instr);
3473 return;
3474 }
3475
3476 if ((instr->SFField() == 0) && (instr->Bits(22, 2) == 0)) {
3477 if (instr->Bits(16, 5) == 6) {
3478 // Format(instr, "fmovrs'sf 'rd, 'vn");
3479 const int32_t vn_val = get_vregisters(vn, 0);
3480 set_wregister(rd, vn_val, R31IsZR);
3481 } else if (instr->Bits(16, 5) == 7) {
3482 // Format(instr, "fmovsr'sf 'vd, 'rn");
3483 const int32_t rn_val = get_wregister(rn, R31IsZR);
3484 set_vregisters(vd, 0, rn_val);
3485 set_vregisters(vd, 1, 0);
3486 set_vregisters(vd, 2, 0);
3487 set_vregisters(vd, 3, 0);
3488 } else {
3489 UnimplementedInstruction(instr);
3490 }
3491 } else if (instr->Bits(22, 2) == 1) {
3492 if (instr->Bits(16, 5) == 2) {
3493 // Format(instr, "scvtfd'sf 'vd, 'rn");
3494 const int64_t rn_val64 = get_register(rn, instr->RnMode());
3495 const int32_t rn_val32 = get_wregister(rn, instr->RnMode());
3496 const double vn_dbl = (instr->SFField() == 1)
3497 ? static_cast<double>(rn_val64)
3498 : static_cast<double>(rn_val32);
3499 set_vregisterd(vd, 0, bit_cast<int64_t, double>(vn_dbl));
3500 set_vregisterd(vd, 1, 0);
3501 } else if (instr->Bits(16, 5) == 6) {
3502 // Format(instr, "fmovrd'sf 'rd, 'vn");
3503 const int64_t vn_val = get_vregisterd(vn, 0);
3504 set_register(instr, rd, vn_val, R31IsZR);
3505 } else if (instr->Bits(16, 5) == 7) {
3506 // Format(instr, "fmovdr'sf 'vd, 'rn");
3507 const int64_t rn_val = get_register(rn, R31IsZR);
3508 set_vregisterd(vd, 0, rn_val);
3509 set_vregisterd(vd, 1, 0);
3510 } else if ((instr->Bits(16, 5) == 8) || (instr->Bits(16, 5) == 16) ||
3511 (instr->Bits(16, 5) == 24)) {
3512 const intptr_t max = instr->Bit(31) == 1 ? INT64_MAX : INT32_MAX;
3513 const intptr_t min = instr->Bit(31) == 1 ? INT64_MIN : INT32_MIN;
3514 double vn_val = bit_cast<double, int64_t>(get_vregisterd(vn, 0));
3515 switch (instr->Bits(16, 5)) {
3516 case 8:
3517 // Format(instr, "fcvtps'sf 'rd, 'vn");
3518 vn_val = ceil(vn_val);
3519 break;
3520 case 16:
3521 // Format(instr, "fcvtms'sf 'rd, 'vn");
3522 vn_val = floor(vn_val);
3523 break;
3524 case 24:
3525 // Format(instr, "fcvtzs'sf 'rd, 'vn");
3526 break;
3527 }
3528 int64_t result;
3529 if (vn_val >= static_cast<double>(max)) {
3530 result = max;
3531 } else if (vn_val <= static_cast<double>(min)) {
3532 result = min;
3533 } else {
3534 result = static_cast<int64_t>(vn_val);
3535 }
3536 if (instr->Bit(31) == 1) {
3537 set_register(instr, rd, result, instr->RdMode());
3538 } else {
3539 set_register(instr, rd, result & 0xffffffffll, instr->RdMode());
3540 }
3541 } else {
3542 UnimplementedInstruction(instr);
3543 }
3544 } else {
3545 UnimplementedInstruction(instr);
3546 }
3547}
3548
3549void Simulator::DecodeFPOneSource(Instr* instr) {
3550 const int opc = instr->Bits(15, 6);
3551 const VRegister vd = instr->VdField();
3552 const VRegister vn = instr->VnField();
3553 const int64_t vn_val = get_vregisterd(vn, 0);
3554 const int32_t vn_val32 = vn_val & kWRegMask;
3555 const double vn_dbl = bit_cast<double, int64_t>(vn_val);
3556 const float vn_flt = bit_cast<float, int32_t>(vn_val32);
3557
3558 if ((opc != 5) && (instr->Bit(22) != 1)) {
3559 // Source is interpreted as single-precision only if we're doing a
3560 // conversion from single -> double.
3561 UnimplementedInstruction(instr);
3562 return;
3563 }
3564
3565 int64_t res_val = 0;
3566 switch (opc) {
3567 case 0:
3568 // Format("fmovdd 'vd, 'vn");
3569 res_val = get_vregisterd(vn, 0);
3570 break;
3571 case 1:
3572 // Format("fabsd 'vd, 'vn");
3573 res_val = bit_cast<int64_t, double>(fabs(vn_dbl));
3574 break;
3575 case 2:
3576 // Format("fnegd 'vd, 'vn");
3577 res_val = bit_cast<int64_t, double>(-vn_dbl);
3578 break;
3579 case 3:
3580 // Format("fsqrtd 'vd, 'vn");
3581 res_val = bit_cast<int64_t, double>(sqrt(vn_dbl));
3582 break;
3583 case 4: {
3584 // Format(instr, "fcvtsd 'vd, 'vn");
3585 const uint32_t val =
3586 bit_cast<uint32_t, float>(static_cast<float>(vn_dbl));
3587 res_val = static_cast<int64_t>(val);
3588 break;
3589 }
3590 case 5:
3591 // Format(instr, "fcvtds 'vd, 'vn");
3592 res_val = bit_cast<int64_t, double>(static_cast<double>(vn_flt));
3593 break;
3594 default:
3595 UnimplementedInstruction(instr);
3596 break;
3597 }
3598
3599 set_vregisterd(vd, 0, res_val);
3600 set_vregisterd(vd, 1, 0);
3601}
3602
3603void Simulator::DecodeFPTwoSource(Instr* instr) {
3604 if (instr->Bits(22, 2) != 1) {
3605 UnimplementedInstruction(instr);
3606 return;
3607 }
3608 const VRegister vd = instr->VdField();
3609 const VRegister vn = instr->VnField();
3610 const VRegister vm = instr->VmField();
3611 const double vn_val = bit_cast<double, int64_t>(get_vregisterd(vn, 0));
3612 const double vm_val = bit_cast<double, int64_t>(get_vregisterd(vm, 0));
3613 const int opc = instr->Bits(12, 4);
3614 double result;
3615
3616 switch (opc) {
3617 case 0:
3618 // Format(instr, "fmuld 'vd, 'vn, 'vm");
3619 result = vn_val * vm_val;
3620 break;
3621 case 1:
3622 // Format(instr, "fdivd 'vd, 'vn, 'vm");
3623 result = vn_val / vm_val;
3624 break;
3625 case 2:
3626 // Format(instr, "faddd 'vd, 'vn, 'vm");
3627 result = vn_val + vm_val;
3628 break;
3629 case 3:
3630 // Format(instr, "fsubd 'vd, 'vn, 'vm");
3631 result = vn_val - vm_val;
3632 break;
3633 default:
3634 UnimplementedInstruction(instr);
3635 return;
3636 }
3637
3638 set_vregisterd(vd, 0, bit_cast<int64_t, double>(result));
3639 set_vregisterd(vd, 1, 0);
3640}
3641
3642void Simulator::DecodeFPCompare(Instr* instr) {
3643 const VRegister vn = instr->VnField();
3644 const VRegister vm = instr->VmField();
3645 const double vn_val = bit_cast<double, int64_t>(get_vregisterd(vn, 0));
3646 double vm_val;
3647
3648 if ((instr->Bit(22) == 1) && (instr->Bits(3, 2) == 0)) {
3649 // Format(instr, "fcmpd 'vn, 'vm");
3650 vm_val = bit_cast<double, int64_t>(get_vregisterd(vm, 0));
3651 } else if ((instr->Bit(22) == 1) && (instr->Bits(3, 2) == 1)) {
3652 if (instr->VmField() == V0) {
3653 // Format(instr, "fcmpd 'vn, #0.0");
3654 vm_val = 0.0;
3655 } else {
3656 UnimplementedInstruction(instr);
3657 return;
3658 }
3659 } else {
3660 UnimplementedInstruction(instr);
3661 return;
3662 }
3663
3664 n_flag_ = false;
3665 z_flag_ = false;
3666 c_flag_ = false;
3667 v_flag_ = false;
3668
3669 if (isnan(vn_val) || isnan(vm_val)) {
3670 c_flag_ = true;
3671 v_flag_ = true;
3672 } else if (vn_val == vm_val) {
3673 z_flag_ = true;
3674 c_flag_ = true;
3675 } else if (vn_val < vm_val) {
3676 n_flag_ = true;
3677 } else {
3678 c_flag_ = true;
3679 }
3680}
3681
3682void Simulator::DecodeFP(Instr* instr) {
3683 if (instr->IsFPImmOp()) {
3684 DecodeFPImm(instr);
3685 } else if (instr->IsFPIntCvtOp()) {
3686 DecodeFPIntCvt(instr);
3687 } else if (instr->IsFPOneSourceOp()) {
3688 DecodeFPOneSource(instr);
3689 } else if (instr->IsFPTwoSourceOp()) {
3690 DecodeFPTwoSource(instr);
3691 } else if (instr->IsFPCompareOp()) {
3692 DecodeFPCompare(instr);
3693 } else {
3694 UnimplementedInstruction(instr);
3695 }
3696}
3697
3698void Simulator::DecodeDPSimd2(Instr* instr) {
3699 if (instr->IsFPOp()) {
3700 DecodeFP(instr);
3701 } else {
3702 UnimplementedInstruction(instr);
3703 }
3704}
3705
3706// Executes the current instruction.
3707DART_FORCE_INLINE
3708void Simulator::InstructionDecodeImpl(Instr* instr) {
3709 pc_modified_ = false;
3710
3711 if (instr->IsLoadStoreOp()) {
3712 DecodeLoadStore(instr);
3713 } else if (instr->IsDPImmediateOp()) {
3714 DecodeDPImmediate(instr);
3715 } else if (instr->IsCompareBranchOp()) {
3716 DecodeCompareBranch(instr);
3717 } else if (instr->IsDPRegisterOp()) {
3718 DecodeDPRegister(instr);
3719 } else if (instr->IsDPSimd1Op()) {
3720 DecodeDPSimd1(instr);
3721 } else if (instr->IsDPSimd2Op()) {
3722 DecodeDPSimd2(instr);
3723 } else {
3724 UnimplementedInstruction(instr);
3725 }
3726
3727 if (!pc_modified_) {
3728 set_pc(reinterpret_cast<int64_t>(instr) + Instr::kInstrSize);
3729 }
3730}
3731
3732void Simulator::InstructionDecode(Instr* instr) {
3733 if (IsTracingExecution()) {
3734 THR_Print("%" Pu64 " ", icount_);
3735 const uword start = reinterpret_cast<uword>(instr);
3736 const uword end = start + Instr::kInstrSize;
3737 if (FLAG_support_disassembler) {
3738 Disassembler::Disassemble(start, end);
3739 } else {
3740 THR_Print("Disassembler not supported in this mode.\n");
3741 }
3742 }
3743 InstructionDecodeImpl(instr);
3744}
3745
3746void Simulator::Execute() {
3747 if (LIKELY(FLAG_stop_sim_at == ULLONG_MAX &&
3748 FLAG_trace_sim_after == ULLONG_MAX)) {
3749 ExecuteNoTrace();
3750 } else {
3751 ExecuteTrace();
3752 }
3753}
3754
3755void Simulator::ExecuteNoTrace() {
3756 // Get the PC to simulate. Cannot use the accessor here as we need the
3757 // raw PC value and not the one used as input to arithmetic instructions.
3758 uword program_counter = get_pc();
3759
3760 // Fast version of the dispatch loop without checking whether the simulator
3761 // should be stopping at a particular executed instruction.
3762 while (program_counter != kEndSimulatingPC) {
3763 Instr* instr = reinterpret_cast<Instr*>(program_counter);
3764 icount_++;
3765 InstructionDecodeImpl(instr);
3766 program_counter = get_pc();
3767 }
3768}
3769
3770void Simulator::ExecuteTrace() {
3771 // Get the PC to simulate. Cannot use the accessor here as we need the
3772 // raw PC value and not the one used as input to arithmetic instructions.
3773 uword program_counter = get_pc();
3774
3775 // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
3776 // we reach the particular instruction count or address.
3777 while (program_counter != kEndSimulatingPC) {
3778 Instr* instr = reinterpret_cast<Instr*>(program_counter);
3779 icount_++;
3780 if (icount_ == FLAG_stop_sim_at) {
3781 SimulatorDebugger dbg(this);
3782 dbg.Stop(instr, "Instruction count reached");
3783 } else if (reinterpret_cast<uint64_t>(instr) == FLAG_stop_sim_at) {
3784 SimulatorDebugger dbg(this);
3785 dbg.Stop(instr, "Instruction address reached");
3786 } else if (IsIllegalAddress(program_counter)) {
3787 HandleIllegalAccess(program_counter, instr);
3788 } else {
3789 InstructionDecode(instr);
3790 }
3791 program_counter = get_pc();
3792 }
3793}
3794
3795int64_t Simulator::Call(int64_t entry,
3796 int64_t parameter0,
3797 int64_t parameter1,
3798 int64_t parameter2,
3799 int64_t parameter3,
3800 bool fp_return,
3801 bool fp_args) {
3802 // Save the SP register before the call so we can restore it.
3803 const intptr_t sp_before_call = get_register(R31, R31IsSP);
3804
3805 // Setup parameters.
3806 if (fp_args) {
3807 set_vregisterd(V0, 0, parameter0);
3808 set_vregisterd(V0, 1, 0);
3809 set_vregisterd(V1, 0, parameter1);
3810 set_vregisterd(V1, 1, 0);
3811 set_vregisterd(V2, 0, parameter2);
3812 set_vregisterd(V2, 1, 0);
3813 set_vregisterd(V3, 0, parameter3);
3814 set_vregisterd(V3, 1, 0);
3815 } else {
3816 set_register(nullptr, R0, parameter0);
3817 set_register(nullptr, R1, parameter1);
3818 set_register(nullptr, R2, parameter2);
3819 set_register(nullptr, R3, parameter3);
3820 }
3821
3822 // Make sure the activation frames are properly aligned.
3823 intptr_t stack_pointer = sp_before_call;
3824 if (OS::ActivationFrameAlignment() > 1) {
3825 stack_pointer =
3826 Utils::RoundDown(stack_pointer, OS::ActivationFrameAlignment());
3827 }
3828 set_register(nullptr, R31, stack_pointer, R31IsSP);
3829
3830 // Prepare to execute the code at entry.
3831 set_pc(entry);
3832 // Put down marker for end of simulation. The simulator will stop simulation
3833 // when the PC reaches this value. By saving the "end simulation" value into
3834 // the LR the simulation stops when returning to this call point.
3835 set_register(nullptr, LR, kEndSimulatingPC);
3836
3837 // Remember the values of callee-saved registers, and set them up with a
3838 // known value so that we are able to check that they are preserved
3839 // properly across Dart execution.
3840 int64_t preserved_vals[kAbiPreservedCpuRegCount];
3841 const double dicount = static_cast<double>(icount_);
3842 const int64_t callee_saved_value = bit_cast<int64_t, double>(dicount);
3843 for (int i = kAbiFirstPreservedCpuReg; i <= kAbiLastPreservedCpuReg; i++) {
3844 const Register r = static_cast<Register>(i);
3845 preserved_vals[i - kAbiFirstPreservedCpuReg] = get_register(r);
3846 set_register(nullptr, r, callee_saved_value);
3847 }
3848
3849 // Only the bottom half of the V registers must be preserved.
3850 int64_t preserved_dvals[kAbiPreservedFpuRegCount];
3851 for (int i = kAbiFirstPreservedFpuReg; i <= kAbiLastPreservedFpuReg; i++) {
3852 const VRegister r = static_cast<VRegister>(i);
3853 preserved_dvals[i - kAbiFirstPreservedFpuReg] = get_vregisterd(r, 0);
3854 set_vregisterd(r, 0, callee_saved_value);
3855 set_vregisterd(r, 1, 0);
3856 }
3857
3858 // Start the simulation.
3859 Execute();
3860
3861 // Check that the callee-saved registers have been preserved,
3862 // and restore them with the original value.
3863 for (int i = kAbiFirstPreservedCpuReg; i <= kAbiLastPreservedCpuReg; i++) {
3864 const Register r = static_cast<Register>(i);
3865 ASSERT(callee_saved_value == get_register(r));
3866 set_register(nullptr, r, preserved_vals[i - kAbiFirstPreservedCpuReg]);
3867 }
3868
3869 for (int i = kAbiFirstPreservedFpuReg; i <= kAbiLastPreservedFpuReg; i++) {
3870 const VRegister r = static_cast<VRegister>(i);
3871 ASSERT(callee_saved_value == get_vregisterd(r, 0));
3872 set_vregisterd(r, 0, preserved_dvals[i - kAbiFirstPreservedFpuReg]);
3873 set_vregisterd(r, 1, 0);
3874 }
3875
3876 // Restore the SP register and return R0.
3877 set_register(nullptr, R31, sp_before_call, R31IsSP);
3878 int64_t return_value;
3879 if (fp_return) {
3880 return_value = get_vregisterd(V0, 0);
3881 } else {
3882 return_value = get_register(R0);
3883 }
3884 return return_value;
3885}
3886
3887void Simulator::JumpToFrame(uword pc, uword sp, uword fp, Thread* thread) {
3888 // Walk over all setjmp buffers (simulated --> C++ transitions)
3889 // and try to find the setjmp associated with the simulated stack pointer.
3890 SimulatorSetjmpBuffer* buf = last_setjmp_buffer();
3891 while (buf->link() != nullptr && buf->link()->sp() <= sp) {
3892 buf = buf->link();
3893 }
3894 ASSERT(buf != nullptr);
3895
3896 // The C++ caller has not cleaned up the stack memory of C++ frames.
3897 // Prepare for unwinding frames by destroying all the stack resources
3898 // in the previous C++ frames.
3899 StackResource::Unwind(thread);
3900
3901 // Keep the following code in sync with `StubCode::JumpToFrameStub()`.
3902
3903 // Unwind the C++ stack and continue simulation in the target frame.
3904 set_pc(static_cast<int64_t>(pc));
3905 set_register(nullptr, SP, static_cast<int64_t>(sp));
3906 set_register(nullptr, FP, static_cast<int64_t>(fp));
3907 set_register(nullptr, THR, reinterpret_cast<int64_t>(thread));
3908 set_register(nullptr, R31, thread->saved_stack_limit() - 4096);
3909#if defined(DART_TARGET_OS_FUCHSIA)
3910 set_register(nullptr, R18, thread->saved_shadow_call_stack());
3911#endif
3912 // Set the tag.
3913 thread->set_vm_tag(VMTag::kDartTagId);
3914 // Clear top exit frame.
3915 thread->set_top_exit_frame_info(0);
3916 // Restore pool pointer.
3917 int64_t code =
3918 *reinterpret_cast<int64_t*>(fp + kPcMarkerSlotFromFp * kWordSize);
3919 int64_t pp = FLAG_precompiled_mode
3920 ? static_cast<int64_t>(thread->global_object_pool())
3921 : *reinterpret_cast<int64_t*>(
3922 code + Code::object_pool_offset() - kHeapObjectTag);
3923 pp -= kHeapObjectTag; // In the PP register, the pool pointer is untagged.
3924 set_register(nullptr, CODE_REG, code);
3925 set_register(nullptr, PP, pp);
3926 set_register(
3927 nullptr, HEAP_BITS,
3928 (thread->write_barrier_mask() << 32) | (thread->heap_base() >> 32));
3929 set_register(nullptr, NULL_REG, static_cast<int64_t>(Object::null()));
3930 if (FLAG_precompiled_mode) {
3931 set_register(nullptr, DISPATCH_TABLE_REG,
3932 reinterpret_cast<int64_t>(thread->dispatch_table_array()));
3933 }
3934
3935 buf->Longjmp();
3936}
3937
3938} // namespace dart
3939
3940#endif // !defined(USING_SIMULATOR)
3941
3942#endif // defined TARGET_ARCH_ARM64
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
static bool ok(int result)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
SI void store(P *ptr, const T &val)
SI T load(const P *ptr)
#define UNREACHABLE()
Definition assert.h:248
#define COMPILE_ASSERT(expr)
Definition assert.h:339
#define Z
static IsolateGroup * vm_isolate_group()
Definition dart.h:69
static void Disassemble(uword start, uword end, DisassemblyFormatter *formatter, const Code &code, const CodeComments *comments=nullptr)
static constexpr int32_t kSimulatorBreakpointInstruction
static constexpr int32_t kNopInstruction
const uint8_t * snapshot_instructions
Definition isolate.h:193
static IsolateGroup * Current()
Definition isolate.h:534
IsolateGroupSource * source() const
Definition isolate.h:285
static DART_NORETURN void Exit(int code)
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
static void DebugBreak()
static Object & Handle()
Definition object.h:407
static void Init()
static Thread * Current()
Definition thread.h:361
@ kThreadInGenerated
Definition thread.h:1022
#define LR
#define THR_Print(format,...)
Definition log.h:20
#define kIsolateSnapshotInstructionsAsmSymbol
Definition dart_api.h:3911
#define kVmSnapshotInstructionsAsmSymbol
Definition dart_api.h:3908
struct _Dart_NativeArguments * Dart_NativeArguments
Definition dart_api.h:3010
void(* Dart_NativeFunction)(Dart_NativeArguments arguments)
Definition dart_api.h:3198
#define UNIMPLEMENTED
#define ASSERT(E)
double frame
Definition examples.cpp:31
struct MyStruct s
struct MyStruct a[10]
#define FATAL(error)
glong glong end
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
static const uint8_t buffer[]
uint8_t value
GAsyncResult * result
uint32_t uint32_t * format
uint32_t * target
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
Dart_NativeFunction function
Definition fuchsia.cc:51
int argument_count
Definition fuchsia.cc:52
static float max(float r, float g, float b)
Definition hsl.cpp:49
static float min(float r, float g, float b)
Definition hsl.cpp:48
size_t length
Win32Message message
const GrXPFactory * Get(SkBlendMode mode)
bool Contains(const Container &container, const Value &value)
link(from_root, to_root)
Definition dart_pkg.py:44
const QRegister kAbiLastPreservedFpuReg
const int kXRegSizeInBits
static int64_t GetValue(Dart_Handle arg)
const int64_t kWRegMask
void SetBreakpoint(Dart_NativeArguments args)
const Register THR
const char *const name
@ kHeapObjectTag
uintptr_t uword
Definition globals.h:501
intptr_t word
Definition globals.h:500
const uint32_t fp
@ kNumberOfCpuRegisters
@ kNoRegister
const int kNumberOfFpuRegisters
intx_t sign_extend(int32_t x)
const Register kAbiLastPreservedCpuReg
@ kNumberOfVRegisters
const int kAbiPreservedCpuRegCount
const Register kAbiFirstPreservedCpuReg
const int kAbiPreservedFpuRegCount
const int kWRegSizeInBits
constexpr intptr_t kWordSize
Definition globals.h:509
const Register PP
const int64_t kXRegMask
const QRegister kAbiFirstPreservedFpuReg
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
CanvasImage Image
Definition dart_ui.cc:55
SolidFillVertexShader VS
SIN Vec< N, float > sqrt(const Vec< N, float > &x)
Definition SkVx.h:706
SIN Vec< N, float > floor(const Vec< N, float > &x)
Definition SkVx.h:703
SIN Vec< N, float > ceil(const Vec< N, float > &x)
Definition SkVx.h:702
dest
Definition zip.py:79
#define LIKELY(cond)
Definition globals.h:260
#define Px
Definition globals.h:410
#define Px64
Definition globals.h:418
#define Pd
Definition globals.h:408
#define Pu64
Definition globals.h:417
#define T
const Scalar scale
Point offset
#define NO_SANITIZE_UNDEFINED(check)
#define ARRAY_SIZE(array)
Definition globals.h:72
#define kPosInfinity
Definition globals.h:65
#define OFFSET_OF(type, field)
Definition globals.h:138