Flutter Engine
The Flutter Engine
simulator_arm64.cc
Go to the documentation of this file.
1// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include <setjmp.h> // NOLINT
6#include <stdlib.h>
7
8#include "vm/globals.h"
9#if defined(TARGET_ARCH_ARM64)
10
11// Only build the simulator if not compiling for real ARM hardware.
12#if defined(USING_SIMULATOR)
13
14#include "vm/simulator.h"
15
17#include "vm/constants.h"
18#include "vm/image_snapshot.h"
19#include "vm/native_arguments.h"
20#include "vm/os_thread.h"
21#include "vm/stack_frame.h"
22
23namespace dart {
24
25// constants_arm64.h does not define LR constant to prevent accidental direct
26// use of it during code generation. However using LR directly is okay in this
27// file because it is a simulator.
28constexpr Register LR = LR_DO_NOT_USE_DIRECTLY;
29
30DEFINE_FLAG(uint64_t,
31 trace_sim_after,
32 ULLONG_MAX,
33 "Trace simulator execution after instruction count reached.");
34DEFINE_FLAG(uint64_t,
35 stop_sim_at,
36 ULLONG_MAX,
37 "Instruction address or instruction count to stop simulator at.");
38
39DEFINE_FLAG(bool,
40 sim_allow_unaligned_accesses,
41 true,
42 "Allow unaligned accesses to Normal memory.");
43
44// This macro provides a platform independent use of sscanf. The reason for
45// SScanF not being implemented in a platform independent way through
46// OS in the same way as SNPrint is that the Windows C Run-Time
47// Library does not provide vsscanf.
48#define SScanF sscanf // NOLINT
49
50// SimulatorSetjmpBuffer are linked together, and the last created one
51// is referenced by the Simulator. When an exception is thrown, the exception
52// runtime looks at where to jump and finds the corresponding
53// SimulatorSetjmpBuffer based on the stack pointer of the exception handler.
54// The runtime then does a Longjmp on that buffer to return to the simulator.
55class SimulatorSetjmpBuffer {
56 public:
57 void Longjmp() {
58 // "This" is now the last setjmp buffer.
59 simulator_->set_last_setjmp_buffer(this);
60 longjmp(buffer_, 1);
61 }
62
63 explicit SimulatorSetjmpBuffer(Simulator* sim) {
64 simulator_ = sim;
65 link_ = sim->last_setjmp_buffer();
66 sim->set_last_setjmp_buffer(this);
67 sp_ = static_cast<uword>(sim->get_register(R31, R31IsSP));
68 }
69
70 ~SimulatorSetjmpBuffer() {
71 ASSERT(simulator_->last_setjmp_buffer() == this);
72 simulator_->set_last_setjmp_buffer(link_);
73 }
74
75 SimulatorSetjmpBuffer* link() { return link_; }
76
77 uword sp() { return sp_; }
78
79 private:
80 uword sp_;
81 Simulator* simulator_;
82 SimulatorSetjmpBuffer* link_;
83 jmp_buf buffer_;
84
85 friend class Simulator;
86};
87
88// The SimulatorDebugger class is used by the simulator while debugging
89// simulated ARM64 code.
90class SimulatorDebugger {
91 public:
92 explicit SimulatorDebugger(Simulator* sim);
93 ~SimulatorDebugger();
94
95 void Stop(Instr* instr, const char* message);
96 void Debug();
97 char* ReadLine(const char* prompt);
98
99 private:
100 Simulator* sim_;
101
102 bool GetValue(char* desc, uint64_t* value);
103 bool GetSValue(char* desc, uint32_t* value);
104 bool GetDValue(char* desc, uint64_t* value);
105 bool GetQValue(char* desc, simd_value_t* value);
106
107 static TokenPosition GetApproximateTokenIndex(const Code& code, uword pc);
108
109 static void PrintDartFrame(uword vm_instructions,
110 uword isolate_instructions,
111 uword pc,
112 uword fp,
113 uword sp,
114 const Function& function,
115 TokenPosition token_pos,
116 bool is_optimized,
117 bool is_inlined);
118 void PrintBacktrace();
119
120 // Set or delete a breakpoint. Returns true if successful.
121 bool SetBreakpoint(Instr* breakpc);
122 bool DeleteBreakpoint(Instr* breakpc);
123
124 // Undo and redo all breakpoints. This is needed to bracket disassembly and
125 // execution to skip past breakpoints when run from the debugger.
126 void UndoBreakpoints();
127 void RedoBreakpoints();
128};
129
130SimulatorDebugger::SimulatorDebugger(Simulator* sim) {
131 sim_ = sim;
132}
133
134SimulatorDebugger::~SimulatorDebugger() {}
135
136void SimulatorDebugger::Stop(Instr* instr, const char* message) {
137 OS::PrintErr("Simulator hit %s\n", message);
138 Debug();
139}
140
141static Register LookupCpuRegisterByName(const char* name) {
142 static const char* const kNames[] = {
143 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
144 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
145 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
146 "r24", "r25", "r26", "r27", "r28", "r29", "r30",
147
148 "ip0", "ip1", "pp", "fp", "lr", "sp", "zr",
149 };
150 static const Register kRegisters[] = {
151 R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10,
152 R11, R12, R13, R14, R15, R16, R17, R18, R19, R20, R21,
153 R22, R23, R24, R25, R26, R27, R28, R29, R30,
154
155 IP0, IP1, PP, FP, LR, R31, ZR,
156 };
157 ASSERT(ARRAY_SIZE(kNames) == ARRAY_SIZE(kRegisters));
158 for (unsigned i = 0; i < ARRAY_SIZE(kNames); i++) {
159 if (strcmp(kNames[i], name) == 0) {
160 return kRegisters[i];
161 }
162 }
163 return kNoRegister;
164}
165
166static VRegister LookupVRegisterByName(const char* name) {
167 int reg_nr = -1;
168 bool ok = SScanF(name, "v%d", &reg_nr);
169 if (ok && (0 <= reg_nr) && (reg_nr < kNumberOfVRegisters)) {
170 return static_cast<VRegister>(reg_nr);
171 }
172 return kNoVRegister;
173}
174
175bool SimulatorDebugger::GetValue(char* desc, uint64_t* value) {
176 Register reg = LookupCpuRegisterByName(desc);
177 if (reg != kNoRegister) {
178 if (reg == ZR) {
179 *value = 0;
180 return true;
181 }
182 *value = sim_->get_register(reg);
183 return true;
184 }
185 if (desc[0] == '*') {
186 uint64_t addr;
187 if (GetValue(desc + 1, &addr)) {
188 if (Simulator::IsIllegalAddress(addr)) {
189 return false;
190 }
191 *value = *(reinterpret_cast<int64_t*>(addr));
192 return true;
193 }
194 }
195 if (strcmp("pc", desc) == 0) {
196 *value = sim_->get_pc();
197 return true;
198 }
199 bool retval = SScanF(desc, "0x%" Px64, value) == 1;
200 if (!retval) {
201 retval = SScanF(desc, "%" Px64, value) == 1;
202 }
203 return retval;
204}
205
206bool SimulatorDebugger::GetSValue(char* desc, uint32_t* value) {
207 VRegister vreg = LookupVRegisterByName(desc);
208 if (vreg != kNoVRegister) {
209 *value = sim_->get_vregisters(vreg, 0);
210 return true;
211 }
212 if (desc[0] == '*') {
213 uint64_t addr;
214 if (GetValue(desc + 1, &addr)) {
215 if (Simulator::IsIllegalAddress(addr)) {
216 return false;
217 }
218 *value = *(reinterpret_cast<uint32_t*>(addr));
219 return true;
220 }
221 }
222 return false;
223}
224
225bool SimulatorDebugger::GetDValue(char* desc, uint64_t* value) {
226 VRegister vreg = LookupVRegisterByName(desc);
227 if (vreg != kNoVRegister) {
228 *value = sim_->get_vregisterd(vreg, 0);
229 return true;
230 }
231 if (desc[0] == '*') {
232 uint64_t addr;
233 if (GetValue(desc + 1, &addr)) {
234 if (Simulator::IsIllegalAddress(addr)) {
235 return false;
236 }
237 *value = *(reinterpret_cast<uint64_t*>(addr));
238 return true;
239 }
240 }
241 return false;
242}
243
244bool SimulatorDebugger::GetQValue(char* desc, simd_value_t* value) {
245 VRegister vreg = LookupVRegisterByName(desc);
246 if (vreg != kNoVRegister) {
247 sim_->get_vregister(vreg, value);
248 return true;
249 }
250 if (desc[0] == '*') {
251 uint64_t addr;
252 if (GetValue(desc + 1, &addr)) {
253 if (Simulator::IsIllegalAddress(addr)) {
254 return false;
255 }
256 *value = *(reinterpret_cast<simd_value_t*>(addr));
257 return true;
258 }
259 }
260 return false;
261}
262
263TokenPosition SimulatorDebugger::GetApproximateTokenIndex(const Code& code,
264 uword pc) {
265 TokenPosition token_pos = TokenPosition::kNoSource;
266 uword pc_offset = pc - code.PayloadStart();
267 const PcDescriptors& descriptors =
268 PcDescriptors::Handle(code.pc_descriptors());
269 PcDescriptors::Iterator iter(descriptors, UntaggedPcDescriptors::kAnyKind);
270 while (iter.MoveNext()) {
271 if (iter.PcOffset() == pc_offset) {
272 return iter.TokenPos();
273 } else if (!token_pos.IsReal() && (iter.PcOffset() > pc_offset)) {
274 token_pos = iter.TokenPos();
275 }
276 }
277 return token_pos;
278}
279
280#if defined(DART_PRECOMPILED_RUNTIME)
281static const char* ImageName(uword vm_instructions,
282 uword isolate_instructions,
283 uword pc,
284 intptr_t* offset) {
285 const Image vm_image(vm_instructions);
286 const Image isolate_image(isolate_instructions);
287 if (vm_image.contains(pc)) {
288 *offset = pc - vm_instructions;
290 } else if (isolate_image.contains(pc)) {
291 *offset = pc - isolate_instructions;
293 } else {
294 *offset = 0;
295 return "<unknown>";
296 }
297}
298#endif
299
300void SimulatorDebugger::PrintDartFrame(uword vm_instructions,
301 uword isolate_instructions,
302 uword pc,
303 uword fp,
304 uword sp,
305 const Function& function,
306 TokenPosition token_pos,
307 bool is_optimized,
308 bool is_inlined) {
309 const Script& script = Script::Handle(function.script());
310 const String& func_name = String::Handle(function.QualifiedScrubbedName());
311 const String& url = String::Handle(script.url());
312 intptr_t line, column;
313 if (script.GetTokenLocation(token_pos, &line, &column)) {
315 "pc=0x%" Px " fp=0x%" Px " sp=0x%" Px " %s%s (%s:%" Pd ":%" Pd ")", pc,
316 fp, sp, is_optimized ? (is_inlined ? "inlined " : "optimized ") : "",
317 func_name.ToCString(), url.ToCString(), line, column);
318 } else {
319 OS::PrintErr("pc=0x%" Px " fp=0x%" Px " sp=0x%" Px " %s%s (%s)", pc, fp, sp,
320 is_optimized ? (is_inlined ? "inlined " : "optimized ") : "",
321 func_name.ToCString(), url.ToCString());
322 }
323#if defined(DART_PRECOMPILED_RUNTIME)
324 intptr_t offset;
325 auto const symbol_name =
326 ImageName(vm_instructions, isolate_instructions, pc, &offset);
327 OS::PrintErr(" %s+0x%" Px "", symbol_name, offset);
328#endif
329 OS::PrintErr("\n");
330}
331
332void SimulatorDebugger::PrintBacktrace() {
333 auto const T = Thread::Current();
334 auto const Z = T->zone();
335#if defined(DART_PRECOMPILED_RUNTIME)
336 auto const vm_instructions = reinterpret_cast<uword>(
338 auto const isolate_instructions = reinterpret_cast<uword>(
339 T->isolate_group()->source()->snapshot_instructions);
340 OS::PrintErr("vm_instructions=0x%" Px ", isolate_instructions=0x%" Px "\n",
341 vm_instructions, isolate_instructions);
342#else
343 const uword vm_instructions = 0;
344 const uword isolate_instructions = 0;
345#endif
346 StackFrameIterator frames(sim_->get_register(FP), sim_->get_register(SP),
347 sim_->get_pc(),
350 StackFrame* frame = frames.NextFrame();
351 ASSERT(frame != nullptr);
352 Function& function = Function::Handle(Z);
353 Function& inlined_function = Function::Handle(Z);
354 Code& code = Code::Handle(Z);
355 Code& unoptimized_code = Code::Handle(Z);
356 while (frame != nullptr) {
357 if (frame->IsDartFrame()) {
358 code = frame->LookupDartCode();
359 function = code.function();
360 if (code.is_optimized()) {
361 // For optimized frames, extract all the inlined functions if any
362 // into the stack trace.
363 InlinedFunctionsIterator it(code, frame->pc());
364 while (!it.Done()) {
365 // Print each inlined frame with its pc in the corresponding
366 // unoptimized frame.
367 inlined_function = it.function();
368 unoptimized_code = it.code();
369 uword unoptimized_pc = it.pc();
370 it.Advance();
371 if (!it.Done()) {
372 PrintDartFrame(
373 vm_instructions, isolate_instructions, unoptimized_pc,
374 frame->fp(), frame->sp(), inlined_function,
375 GetApproximateTokenIndex(unoptimized_code, unoptimized_pc),
376 true, true);
377 }
378 }
379 // Print the optimized inlining frame below.
380 }
381 PrintDartFrame(vm_instructions, isolate_instructions, frame->pc(),
382 frame->fp(), frame->sp(), function,
383 GetApproximateTokenIndex(code, frame->pc()),
384 code.is_optimized(), false);
385 } else {
386 OS::PrintErr("pc=0x%" Px " fp=0x%" Px " sp=0x%" Px " %s frame",
387 frame->pc(), frame->fp(), frame->sp(),
388 frame->IsEntryFrame() ? "entry"
389 : frame->IsExitFrame() ? "exit"
390 : frame->IsStubFrame() ? "stub"
391 : "invalid");
392#if defined(DART_PRECOMPILED_RUNTIME)
393 intptr_t offset;
394 auto const symbol_name = ImageName(vm_instructions, isolate_instructions,
395 frame->pc(), &offset);
396 OS::PrintErr(" %s+0x%" Px "", symbol_name, offset);
397#endif
398 OS::PrintErr("\n");
399 }
400 frame = frames.NextFrame();
401 }
402}
403
404bool SimulatorDebugger::SetBreakpoint(Instr* breakpc) {
405 // Check if a breakpoint can be set. If not return without any side-effects.
406 if (sim_->break_pc_ != nullptr) {
407 return false;
408 }
409
410 // Set the breakpoint.
411 sim_->break_pc_ = breakpc;
412 sim_->break_instr_ = breakpc->InstructionBits();
413 // Not setting the breakpoint instruction in the code itself. It will be set
414 // when the debugger shell continues.
415 return true;
416}
417
418bool SimulatorDebugger::DeleteBreakpoint(Instr* breakpc) {
419 if (sim_->break_pc_ != nullptr) {
420 sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
421 }
422
423 sim_->break_pc_ = nullptr;
424 sim_->break_instr_ = 0;
425 return true;
426}
427
428void SimulatorDebugger::UndoBreakpoints() {
429 if (sim_->break_pc_ != nullptr) {
430 sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
431 }
432}
433
434void SimulatorDebugger::RedoBreakpoints() {
435 if (sim_->break_pc_ != nullptr) {
436 sim_->break_pc_->SetInstructionBits(Instr::kSimulatorBreakpointInstruction);
437 }
438}
439
440void SimulatorDebugger::Debug() {
441 uintptr_t last_pc = -1;
442 bool done = false;
443
444#define COMMAND_SIZE 63
445#define ARG_SIZE 255
446
447#define STR(a) #a
448#define XSTR(a) STR(a)
449
450 char cmd[COMMAND_SIZE + 1];
451 char arg1[ARG_SIZE + 1];
452 char arg2[ARG_SIZE + 1];
453
454 // make sure to have a proper terminating character if reaching the limit
455 cmd[COMMAND_SIZE] = 0;
456 arg1[ARG_SIZE] = 0;
457 arg2[ARG_SIZE] = 0;
458
459 // Undo all set breakpoints while running in the debugger shell. This will
460 // make them invisible to all commands.
461 UndoBreakpoints();
462
463 while (!done) {
464 if (last_pc != sim_->get_pc()) {
465 last_pc = sim_->get_pc();
466 if (Simulator::IsIllegalAddress(last_pc)) {
467 OS::PrintErr("pc is out of bounds: 0x%" Px "\n", last_pc);
468 } else {
469 if (FLAG_support_disassembler) {
471 } else {
472 OS::PrintErr("Disassembler not supported in this mode.\n");
473 }
474 }
475 }
476 char* line = ReadLine("sim> ");
477 if (line == nullptr) {
478 FATAL("ReadLine failed");
479 } else {
480 // Use sscanf to parse the individual parts of the command line. At the
481 // moment no command expects more than two parameters.
482 int args = SScanF(line,
483 "%" XSTR(COMMAND_SIZE) "s "
484 "%" XSTR(ARG_SIZE) "s "
485 "%" XSTR(ARG_SIZE) "s",
486 cmd, arg1, arg2);
487 if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
489 "c/cont -- continue execution\n"
490 "disasm -- disassemble instrs at current pc location\n"
491 " other variants are:\n"
492 " disasm <address>\n"
493 " disasm <address> <number_of_instructions>\n"
494 " by default 10 instrs are disassembled\n"
495 "del -- delete breakpoints\n"
496 "flags -- print flag values\n"
497 "gdb -- transfer control to gdb\n"
498 "h/help -- print this help string\n"
499 "break <address> -- set break point at specified address\n"
500 "p/print <reg or icount or value or *addr> -- print integer\n"
501 "pf/printfloat <vreg or *addr> --print float value\n"
502 "pd/printdouble <vreg or *addr> -- print double value\n"
503 "pq/printquad <vreg or *addr> -- print vector register\n"
504 "po/printobject <*reg or *addr> -- print object\n"
505 "si/stepi -- single step an instruction\n"
506 "trace -- toggle execution tracing mode\n"
507 "bt -- print backtrace\n"
508 "unstop -- if current pc is a stop instr make it a nop\n"
509 "q/quit -- Quit the debugger and exit the program\n");
510 } else if ((strcmp(cmd, "quit") == 0) || (strcmp(cmd, "q") == 0)) {
511 OS::PrintErr("Quitting\n");
512 OS::Exit(0);
513 } else if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
514 sim_->InstructionDecode(reinterpret_cast<Instr*>(sim_->get_pc()));
515 } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
516 // Execute the one instruction we broke at with breakpoints disabled.
517 sim_->InstructionDecode(reinterpret_cast<Instr*>(sim_->get_pc()));
518 // Leave the debugger shell.
519 done = true;
520 } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
521 if (args == 2) {
522 uint64_t value;
523 if (strcmp(arg1, "icount") == 0) {
524 value = sim_->get_icount();
525 OS::PrintErr("icount: %" Pu64 " 0x%" Px64 "\n", value, value);
526 } else if (GetValue(arg1, &value)) {
527 OS::PrintErr("%s: %" Pu64 " 0x%" Px64 "\n", arg1, value, value);
528 } else {
529 OS::PrintErr("%s unrecognized\n", arg1);
530 }
531 } else {
532 OS::PrintErr("print <reg or icount or value or *addr>\n");
533 }
534 } else if ((strcmp(cmd, "pf") == 0) || (strcmp(cmd, "printfloat") == 0)) {
535 if (args == 2) {
536 uint32_t value;
537 if (GetSValue(arg1, &value)) {
538 float svalue = bit_cast<float, uint32_t>(value);
539 OS::PrintErr("%s: %d 0x%x %.8g\n", arg1, value, value, svalue);
540 } else {
541 OS::PrintErr("%s unrecognized\n", arg1);
542 }
543 } else {
544 OS::PrintErr("printfloat <vreg or *addr>\n");
545 }
546 } else if ((strcmp(cmd, "pd") == 0) ||
547 (strcmp(cmd, "printdouble") == 0)) {
548 if (args == 2) {
549 uint64_t long_value;
550 if (GetDValue(arg1, &long_value)) {
551 double dvalue = bit_cast<double, uint64_t>(long_value);
552 OS::PrintErr("%s: %" Pu64 " 0x%" Px64 " %.8g\n", arg1, long_value,
553 long_value, dvalue);
554 } else {
555 OS::PrintErr("%s unrecognized\n", arg1);
556 }
557 } else {
558 OS::PrintErr("printdouble <vreg or *addr>\n");
559 }
560 } else if ((strcmp(cmd, "pq") == 0) || (strcmp(cmd, "printquad") == 0)) {
561 if (args == 2) {
562 simd_value_t quad_value;
563 if (GetQValue(arg1, &quad_value)) {
564 const int64_t d0 = quad_value.bits.i64[0];
565 const int64_t d1 = quad_value.bits.i64[1];
566 const double dval0 = bit_cast<double, int64_t>(d0);
567 const double dval1 = bit_cast<double, int64_t>(d1);
568 const int32_t s0 = quad_value.bits.i32[0];
569 const int32_t s1 = quad_value.bits.i32[1];
570 const int32_t s2 = quad_value.bits.i32[2];
571 const int32_t s3 = quad_value.bits.i32[3];
572 const float sval0 = bit_cast<float, int32_t>(s0);
573 const float sval1 = bit_cast<float, int32_t>(s1);
574 const float sval2 = bit_cast<float, int32_t>(s2);
575 const float sval3 = bit_cast<float, int32_t>(s3);
576 OS::PrintErr("%s: %" Pu64 " 0x%" Px64 " %.8g\n", arg1, d0, d0,
577 dval0);
578 OS::PrintErr("%s: %" Pu64 " 0x%" Px64 " %.8g\n", arg1, d1, d1,
579 dval1);
580 OS::PrintErr("%s: %d 0x%x %.8g\n", arg1, s0, s0, sval0);
581 OS::PrintErr("%s: %d 0x%x %.8g\n", arg1, s1, s1, sval1);
582 OS::PrintErr("%s: %d 0x%x %.8g\n", arg1, s2, s2, sval2);
583 OS::PrintErr("%s: %d 0x%x %.8g\n", arg1, s3, s3, sval3);
584 } else {
585 OS::PrintErr("%s unrecognized\n", arg1);
586 }
587 } else {
588 OS::PrintErr("printquad <vreg or *addr>\n");
589 }
590 } else if ((strcmp(cmd, "po") == 0) ||
591 (strcmp(cmd, "printobject") == 0)) {
592 if (args == 2) {
593 uint64_t value;
594 // Make the dereferencing '*' optional.
595 if (((arg1[0] == '*') && GetValue(arg1 + 1, &value)) ||
596 GetValue(arg1, &value)) {
597 if (IsolateGroup::Current()->heap()->Contains(value)) {
598 OS::PrintErr("%s: \n", arg1);
599#if defined(DEBUG)
600 const Object& obj = Object::Handle(
601 static_cast<ObjectPtr>(static_cast<uword>(value)));
602 obj.Print();
603#endif // defined(DEBUG)
604 } else {
605 OS::PrintErr("0x%" Px64 " is not an object reference\n", value);
606 }
607 } else {
608 OS::PrintErr("%s unrecognized\n", arg1);
609 }
610 } else {
611 OS::PrintErr("printobject <*reg or *addr>\n");
612 }
613 } else if (strcmp(cmd, "disasm") == 0) {
614 uint64_t start = 0;
615 uint64_t end = 0;
616 if (args == 1) {
617 start = sim_->get_pc();
618 end = start + (10 * Instr::kInstrSize);
619 } else if (args == 2) {
620 if (GetValue(arg1, &start)) {
621 // No length parameter passed, assume 10 instructions.
622 if (Simulator::IsIllegalAddress(start)) {
623 // If start isn't a valid address, warn and use PC instead.
624 OS::PrintErr("First argument yields invalid address: 0x%" Px64
625 "\n",
626 start);
627 OS::PrintErr("Using PC instead\n");
628 start = sim_->get_pc();
629 }
630 end = start + (10 * Instr::kInstrSize);
631 }
632 } else {
633 uint64_t length;
634 if (GetValue(arg1, &start) && GetValue(arg2, &length)) {
635 if (Simulator::IsIllegalAddress(start)) {
636 // If start isn't a valid address, warn and use PC instead.
637 OS::PrintErr("First argument yields invalid address: 0x%" Px64
638 "\n",
639 start);
640 OS::PrintErr("Using PC instead\n");
641 start = sim_->get_pc();
642 }
644 }
645 }
646 if ((start > 0) && (end > start)) {
647 if (FLAG_support_disassembler) {
648 Disassembler::Disassemble(start, end);
649 } else {
650 OS::PrintErr("Disassembler not supported in this mode.\n");
651 }
652 } else {
653 OS::PrintErr("disasm [<address> [<number_of_instructions>]]\n");
654 }
655 } else if (strcmp(cmd, "gdb") == 0) {
656 OS::PrintErr("relinquishing control to gdb\n");
658 OS::PrintErr("regaining control from gdb\n");
659 } else if (strcmp(cmd, "break") == 0) {
660 if (args == 2) {
661 uint64_t addr;
662 if (GetValue(arg1, &addr)) {
663 if (!SetBreakpoint(reinterpret_cast<Instr*>(addr))) {
664 OS::PrintErr("setting breakpoint failed\n");
665 }
666 } else {
667 OS::PrintErr("%s unrecognized\n", arg1);
668 }
669 } else {
670 OS::PrintErr("break <addr>\n");
671 }
672 } else if (strcmp(cmd, "del") == 0) {
673 if (!DeleteBreakpoint(nullptr)) {
674 OS::PrintErr("deleting breakpoint failed\n");
675 }
676 } else if (strcmp(cmd, "flags") == 0) {
677 OS::PrintErr("APSR: ");
678 OS::PrintErr("N flag: %d; ", static_cast<int>(sim_->n_flag_));
679 OS::PrintErr("Z flag: %d; ", static_cast<int>(sim_->z_flag_));
680 OS::PrintErr("C flag: %d; ", static_cast<int>(sim_->c_flag_));
681 OS::PrintErr("V flag: %d\n", static_cast<int>(sim_->v_flag_));
682 } else if (strcmp(cmd, "unstop") == 0) {
683 intptr_t stop_pc = sim_->get_pc() - Instr::kInstrSize;
684 Instr* stop_instr = reinterpret_cast<Instr*>(stop_pc);
685 if (stop_instr->IsExceptionGenOp()) {
686 stop_instr->SetInstructionBits(Instr::kNopInstruction);
687 } else {
688 OS::PrintErr("Not at debugger stop.\n");
689 }
690 } else if (strcmp(cmd, "trace") == 0) {
691 if (FLAG_trace_sim_after == ULLONG_MAX) {
692 FLAG_trace_sim_after = sim_->get_icount();
693 OS::PrintErr("execution tracing on\n");
694 } else {
695 FLAG_trace_sim_after = ULLONG_MAX;
696 OS::PrintErr("execution tracing off\n");
697 }
698 } else if (strcmp(cmd, "bt") == 0) {
699 Thread* thread = reinterpret_cast<Thread*>(sim_->get_register(THR));
700 thread->set_execution_state(Thread::kThreadInVM);
701 PrintBacktrace();
702 thread->set_execution_state(Thread::kThreadInGenerated);
703 } else {
704 OS::PrintErr("Unknown command: %s\n", cmd);
705 }
706 }
707 delete[] line;
708 }
709
710 // Add all the breakpoints back to stop execution and enter the debugger
711 // shell when hit.
712 RedoBreakpoints();
713
714#undef COMMAND_SIZE
715#undef ARG_SIZE
716
717#undef STR
718#undef XSTR
719}
720
721char* SimulatorDebugger::ReadLine(const char* prompt) {
722 char* result = nullptr;
723 char line_buf[256];
724 intptr_t offset = 0;
725 bool keep_going = true;
726 OS::PrintErr("%s", prompt);
727 while (keep_going) {
728 if (fgets(line_buf, sizeof(line_buf), stdin) == nullptr) {
729 // fgets got an error. Just give up.
730 if (result != nullptr) {
731 delete[] result;
732 }
733 return nullptr;
734 }
735 intptr_t len = strlen(line_buf);
736 if (len > 1 && line_buf[len - 2] == '\\' && line_buf[len - 1] == '\n') {
737 // When we read a line that ends with a "\" we remove the escape and
738 // append the remainder.
739 line_buf[len - 2] = '\n';
740 line_buf[len - 1] = 0;
741 len -= 1;
742 } else if ((len > 0) && (line_buf[len - 1] == '\n')) {
743 // Since we read a new line we are done reading the line. This
744 // will exit the loop after copying this buffer into the result.
745 keep_going = false;
746 }
747 if (result == nullptr) {
748 // Allocate the initial result and make room for the terminating '\0'
749 result = new char[len + 1];
750 if (result == nullptr) {
751 // OOM, so cannot readline anymore.
752 return nullptr;
753 }
754 } else {
755 // Allocate a new result with enough room for the new addition.
756 intptr_t new_len = offset + len + 1;
757 char* new_result = new char[new_len];
758 if (new_result == nullptr) {
759 // OOM, free the buffer allocated so far and return nullptr.
760 delete[] result;
761 return nullptr;
762 } else {
763 // Copy the existing input into the new array and set the new
764 // array as the result.
765 memmove(new_result, result, offset);
766 delete[] result;
767 result = new_result;
768 }
769 }
770 // Copy the newly read line into the result.
771 memmove(result + offset, line_buf, len);
772 offset += len;
773 }
774 ASSERT(result != nullptr);
775 result[offset] = '\0';
776 return result;
777}
778
779void Simulator::Init() {}
780
781Simulator::Simulator() : exclusive_access_addr_(0), exclusive_access_value_(0) {
782 // Setup simulator support first. Some of this information is needed to
783 // setup the architecture state.
784 // We allocate the stack here, the size is computed as the sum of
785 // the size specified by the user and the buffer space needed for
786 // handling stack overflow exceptions. To be safe in potential
787 // stack underflows we also add some underflow buffer space.
788 stack_ =
789 new char[(OSThread::GetSpecifiedStackSize() +
790 OSThread::kStackSizeBufferMax + kSimulatorStackUnderflowSize)];
791 // Low address.
792 stack_limit_ = reinterpret_cast<uword>(stack_);
793 // Limit for StackOverflowError.
794 overflow_stack_limit_ = stack_limit_ + OSThread::kStackSizeBufferMax;
795 // High address.
796 stack_base_ = overflow_stack_limit_ + OSThread::GetSpecifiedStackSize();
797
798 pc_modified_ = false;
799 icount_ = 0;
800 break_pc_ = nullptr;
801 break_instr_ = 0;
802 last_setjmp_buffer_ = nullptr;
803
804 // Setup architecture state.
805 // All registers are initialized to zero to start with.
806 for (int i = 0; i < kNumberOfCpuRegisters; i++) {
807 registers_[i] = 0;
808 }
809 n_flag_ = false;
810 z_flag_ = false;
811 c_flag_ = false;
812 v_flag_ = false;
813
814 for (int i = 0; i < kNumberOfVRegisters; i++) {
815 vregisters_[i].bits.i64[0] = 0;
816 vregisters_[i].bits.i64[1] = 0;
817 }
818
819 // The sp is initialized to point to the bottom (high address) of the
820 // allocated stack area.
821 registers_[R31] = stack_base();
822 // The lr and pc are initialized to a known bad value that will cause an
823 // access violation if the simulator ever tries to execute it.
824 registers_[LR] = kBadLR;
825 pc_ = kBadLR;
826}
827
828Simulator::~Simulator() {
829 delete[] stack_;
830 Isolate* isolate = Isolate::Current();
831 if (isolate != nullptr) {
832 isolate->set_simulator(nullptr);
833 }
834}
835
836// When the generated code calls an external reference we need to catch that in
837// the simulator. The external reference will be a function compiled for the
838// host architecture. We need to call that function instead of trying to
839// execute it with the simulator. We do that by redirecting the external
840// reference to a svc (supervisor call) instruction that is handled by
841// the simulator. We write the original destination of the jump just at a known
842// offset from the svc instruction so the simulator knows what to call.
843class Redirection {
844 public:
845 uword address_of_hlt_instruction() {
846 return reinterpret_cast<uword>(&hlt_instruction_);
847 }
848
849 uword external_function() const { return external_function_; }
850
851 Simulator::CallKind call_kind() const { return call_kind_; }
852
853 int argument_count() const { return argument_count_; }
854
855 static Redirection* Get(uword external_function,
856 Simulator::CallKind call_kind,
857 int argument_count) {
858 MutexLocker ml(mutex_);
859
860 Redirection* old_head = list_.load(std::memory_order_relaxed);
861 for (Redirection* current = old_head; current != nullptr;
862 current = current->next_) {
863 if (current->external_function_ == external_function) return current;
864 }
865
866 Redirection* redirection =
867 new Redirection(external_function, call_kind, argument_count);
868 redirection->next_ = old_head;
869
870 // Use a memory fence to ensure all pending writes are written at the time
871 // of updating the list head, so the profiling thread always has a valid
872 // list to look at.
873 list_.store(redirection, std::memory_order_release);
874
875 return redirection;
876 }
877
878 static Redirection* FromHltInstruction(Instr* hlt_instruction) {
879 char* addr_of_hlt = reinterpret_cast<char*>(hlt_instruction);
880 char* addr_of_redirection =
881 addr_of_hlt - OFFSET_OF(Redirection, hlt_instruction_);
882 return reinterpret_cast<Redirection*>(addr_of_redirection);
883 }
884
885 // Please note that this function is called by the signal handler of the
886 // profiling thread. It can therefore run at any point in time and is not
887 // allowed to hold any locks - which is precisely the reason why the list is
888 // prepend-only and a memory fence is used when writing the list head [list_]!
889 static uword FunctionForRedirect(uword address_of_hlt) {
890 for (Redirection* current = list_.load(std::memory_order_acquire);
891 current != nullptr; current = current->next_) {
892 if (current->address_of_hlt_instruction() == address_of_hlt) {
893 return current->external_function_;
894 }
895 }
896 return 0;
897 }
898
899 private:
900 Redirection(uword external_function,
901 Simulator::CallKind call_kind,
902 int argument_count)
903 : external_function_(external_function),
904 call_kind_(call_kind),
905 argument_count_(argument_count),
906 hlt_instruction_(Instr::kSimulatorRedirectInstruction),
907 next_(nullptr) {}
908
909 uword external_function_;
910 Simulator::CallKind call_kind_;
911 int argument_count_;
912 uint32_t hlt_instruction_;
913 Redirection* next_;
914 static std::atomic<Redirection*> list_;
915 static Mutex* mutex_;
916};
917
918std::atomic<Redirection*> Redirection::list_ = {nullptr};
919Mutex* Redirection::mutex_ = new Mutex();
920
921uword Simulator::RedirectExternalReference(uword function,
922 CallKind call_kind,
923 int argument_count) {
924 Redirection* redirection =
926 return redirection->address_of_hlt_instruction();
927}
928
929uword Simulator::FunctionForRedirect(uword redirect) {
930 return Redirection::FunctionForRedirect(redirect);
931}
932
933// Get the active Simulator for the current isolate.
934Simulator* Simulator::Current() {
935 Isolate* isolate = Isolate::Current();
936 Simulator* simulator = isolate->simulator();
937 if (simulator == nullptr) {
938 NoSafepointScope no_safepoint;
939 simulator = new Simulator();
940 isolate->set_simulator(simulator);
941 }
942 return simulator;
943}
944
945// Sets the register in the architecture state.
946void Simulator::set_register(Instr* instr,
947 Register reg,
948 int64_t value,
949 R31Type r31t) {
950 // Register is in range.
951 ASSERT((reg >= 0) && (reg < kNumberOfCpuRegisters));
952#if !defined(DART_TARGET_OS_FUCHSIA)
953 ASSERT(instr == nullptr || reg != R18); // R18 is globally reserved on iOS.
954#endif
955
956 if ((reg != R31) || (r31t != R31IsZR)) {
957 registers_[reg] = value;
958 // If we're setting CSP, make sure it is 16-byte aligned. In truth, CSP
959 // can store addresses that are not 16-byte aligned, but loads and stores
960 // are not allowed through CSP when it is not aligned. Thus, this check is
961 // more conservative that necessary. However, it will likely be more
962 // useful to find the program locations where CSP is set to a bad value,
963 // than to find only the resulting loads/stores that would cause a fault on
964 // hardware.
965 if ((instr != nullptr) && (reg == R31) && !Utils::IsAligned(value, 16)) {
966 UnalignedAccess("CSP set", value, instr);
967 }
968
969#if defined(DEBUG)
970 if (reg == SP) {
971 // Memory below CSP can be written to at any instruction boundary by a
972 // signal handler. Simulate this to ensure we're keeping CSP far enough
973 // ahead of SP to prevent Dart frames from being trashed.
974 uword csp = registers_[R31];
975 WriteX(csp - 1 * kWordSize, icount_, nullptr);
976 WriteX(csp - 2 * kWordSize, icount_, nullptr);
977 WriteX(csp - 3 * kWordSize, icount_, nullptr);
978 WriteX(csp - 4 * kWordSize, icount_, nullptr);
979 }
980#endif
981 }
982}
983
984// Get the register from the architecture state.
985int64_t Simulator::get_register(Register reg, R31Type r31t) const {
986 ASSERT((reg >= 0) && (reg < kNumberOfCpuRegisters));
987 if ((reg == R31) && (r31t == R31IsZR)) {
988 return 0;
989 } else {
990 return registers_[reg];
991 }
992}
993
994void Simulator::set_wregister(Register reg, int32_t value, R31Type r31t) {
995 ASSERT((reg >= 0) && (reg < kNumberOfCpuRegisters));
996 // When setting in W mode, clear the high bits.
997 if ((reg != R31) || (r31t != R31IsZR)) {
998 registers_[reg] = Utils::LowHighTo64Bits(static_cast<uint32_t>(value), 0);
999 }
1000}
1001
1002// Get the register from the architecture state.
1003int32_t Simulator::get_wregister(Register reg, R31Type r31t) const {
1004 ASSERT((reg >= 0) && (reg < kNumberOfCpuRegisters));
1005 if ((reg == R31) && (r31t == R31IsZR)) {
1006 return 0;
1007 } else {
1008 return static_cast<int32_t>(registers_[reg]);
1009 }
1010}
1011
1012int32_t Simulator::get_vregisters(VRegister reg, int idx) const {
1013 ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
1014 ASSERT((idx >= 0) && (idx <= 3));
1015 return vregisters_[reg].bits.i32[idx];
1016}
1017
1018void Simulator::set_vregisters(VRegister reg, int idx, int32_t value) {
1019 ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
1020 ASSERT((idx >= 0) && (idx <= 3));
1021 vregisters_[reg].bits.i32[idx] = value;
1022}
1023
1024int64_t Simulator::get_vregisterd(VRegister reg, int idx) const {
1025 ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
1026 ASSERT((idx == 0) || (idx == 1));
1027 return vregisters_[reg].bits.i64[idx];
1028}
1029
1030void Simulator::set_vregisterd(VRegister reg, int idx, int64_t value) {
1031 ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
1032 ASSERT((idx == 0) || (idx == 1));
1033 vregisters_[reg].bits.i64[idx] = value;
1034}
1035
1036void Simulator::get_vregister(VRegister reg, simd_value_t* value) const {
1037 ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
1038 value->bits.i64[0] = vregisters_[reg].bits.i64[0];
1039 value->bits.i64[1] = vregisters_[reg].bits.i64[1];
1040}
1041
1042void Simulator::set_vregister(VRegister reg, const simd_value_t& value) {
1043 ASSERT((reg >= 0) && (reg < kNumberOfVRegisters));
1044 vregisters_[reg].bits.i64[0] = value.bits.i64[0];
1045 vregisters_[reg].bits.i64[1] = value.bits.i64[1];
1046}
1047
1048// Raw access to the PC register.
1049void Simulator::set_pc(uint64_t value) {
1050 pc_modified_ = true;
1051 last_pc_ = pc_;
1052 pc_ = value;
1053}
1054
1055// Raw access to the pc.
1056uint64_t Simulator::get_pc() const {
1057 return pc_;
1058}
1059
1060uint64_t Simulator::get_last_pc() const {
1061 return last_pc_;
1062}
1063
1064void Simulator::HandleIllegalAccess(uword addr, Instr* instr) {
1065 uword fault_pc = get_pc();
1066 uword last_pc = get_last_pc();
1067 char buffer[128];
1068 snprintf(buffer, sizeof(buffer),
1069 "illegal memory access at 0x%" Px ", pc=0x%" Px ", last_pc=0x%" Px
1070 "\n",
1071 addr, fault_pc, last_pc);
1072 SimulatorDebugger dbg(this);
1073 dbg.Stop(instr, buffer);
1074 // The debugger will return control in non-interactive mode.
1075 FATAL("Cannot continue execution after illegal memory access.");
1076}
1077
1078// ARMv8 supports unaligned memory accesses to normal memory without trapping
1079// for all instructions except Load-Exclusive/Store-Exclusive and
1080// Load-Acquire/Store-Release.
1081// See B2.4.2 "Alignment of data accesses" for more information.
1082void Simulator::UnalignedAccess(const char* msg, uword addr, Instr* instr) {
1083 char buffer[128];
1084 snprintf(buffer, sizeof(buffer), "unaligned %s at 0x%" Px ", pc=%p\n", msg,
1085 addr, instr);
1086 SimulatorDebugger dbg(this);
1087 dbg.Stop(instr, buffer);
1088 // The debugger will not be able to single step past this instruction, but
1089 // it will be possible to disassemble the code and inspect registers.
1090 FATAL("Cannot continue execution after unaligned access.");
1091}
1092
1093void Simulator::UnimplementedInstruction(Instr* instr) {
1094 char buffer[128];
1095 snprintf(buffer, sizeof(buffer),
1096 "Unimplemented instruction: at %p, last_pc=0x%" Px64 "\n", instr,
1097 get_last_pc());
1098 SimulatorDebugger dbg(this);
1099 dbg.Stop(instr, buffer);
1100 FATAL("Cannot continue execution after unimplemented instruction.");
1101}
1102
1103bool Simulator::IsTracingExecution() const {
1104 return icount_ > FLAG_trace_sim_after;
1105}
1106
1107intptr_t Simulator::ReadX(uword addr,
1108 Instr* instr,
1109 bool must_be_aligned /* = false */) {
1110 const bool allow_unaligned_access =
1111 FLAG_sim_allow_unaligned_accesses && !must_be_aligned;
1112 if (allow_unaligned_access || (addr & 7) == 0) {
1113 return LoadUnaligned(reinterpret_cast<intptr_t*>(addr));
1114 }
1115 UnalignedAccess("read", addr, instr);
1116 return 0;
1117}
1118
1119void Simulator::WriteX(uword addr, intptr_t value, Instr* instr) {
1120 if (FLAG_sim_allow_unaligned_accesses || (addr & 7) == 0) {
1121 StoreUnaligned(reinterpret_cast<intptr_t*>(addr), value);
1122 return;
1123 }
1124 UnalignedAccess("write", addr, instr);
1125}
1126
1127uint32_t Simulator::ReadWU(uword addr,
1128 Instr* instr,
1129 bool must_be_aligned /* = false */) {
1130 const bool allow_unaligned_access =
1131 FLAG_sim_allow_unaligned_accesses && !must_be_aligned;
1132 if (allow_unaligned_access || (addr & 3) == 0) {
1133 return LoadUnaligned(reinterpret_cast<uint32_t*>(addr));
1134 }
1135 UnalignedAccess("read unsigned single word", addr, instr);
1136 return 0;
1137}
1138
1139int32_t Simulator::ReadW(uword addr, Instr* instr) {
1140 if (FLAG_sim_allow_unaligned_accesses || (addr & 3) == 0) {
1141 return LoadUnaligned(reinterpret_cast<int32_t*>(addr));
1142 }
1143 UnalignedAccess("read single word", addr, instr);
1144 return 0;
1145}
1146
1147void Simulator::WriteW(uword addr, uint32_t value, Instr* instr) {
1148 if (FLAG_sim_allow_unaligned_accesses || (addr & 3) == 0) {
1149 StoreUnaligned(reinterpret_cast<uint32_t*>(addr), value);
1150 return;
1151 }
1152 UnalignedAccess("write single word", addr, instr);
1153}
1154
1155uint16_t Simulator::ReadHU(uword addr, Instr* instr) {
1156 if (FLAG_sim_allow_unaligned_accesses || (addr & 1) == 0) {
1157 return LoadUnaligned(reinterpret_cast<uint16_t*>(addr));
1158 }
1159 UnalignedAccess("unsigned halfword read", addr, instr);
1160 return 0;
1161}
1162
1163int16_t Simulator::ReadH(uword addr, Instr* instr) {
1164 if (FLAG_sim_allow_unaligned_accesses || (addr & 1) == 0) {
1165 return LoadUnaligned(reinterpret_cast<int16_t*>(addr));
1166 }
1167 UnalignedAccess("signed halfword read", addr, instr);
1168 return 0;
1169}
1170
1171void Simulator::WriteH(uword addr, uint16_t value, Instr* instr) {
1172 if (FLAG_sim_allow_unaligned_accesses || (addr & 1) == 0) {
1173 StoreUnaligned(reinterpret_cast<uint16_t*>(addr), value);
1174 return;
1175 }
1176 UnalignedAccess("halfword write", addr, instr);
1177}
1178
1179uint8_t Simulator::ReadBU(uword addr) {
1180 uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
1181 return *ptr;
1182}
1183
1184int8_t Simulator::ReadB(uword addr) {
1185 int8_t* ptr = reinterpret_cast<int8_t*>(addr);
1186 return *ptr;
1187}
1188
1189void Simulator::WriteB(uword addr, uint8_t value) {
1190 uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
1191 *ptr = value;
1192}
1193
1194void Simulator::ClearExclusive() {
1195 exclusive_access_addr_ = 0;
1196 exclusive_access_value_ = 0;
1197}
1198
1199intptr_t Simulator::ReadExclusiveX(uword addr, Instr* instr) {
1200 exclusive_access_addr_ = addr;
1201 exclusive_access_value_ = ReadX(addr, instr, /*must_be_aligned=*/true);
1202 return exclusive_access_value_;
1203}
1204
1205intptr_t Simulator::ReadExclusiveW(uword addr, Instr* instr) {
1206 exclusive_access_addr_ = addr;
1207 exclusive_access_value_ = ReadWU(addr, instr, /*must_be_aligned=*/true);
1208 return exclusive_access_value_;
1209}
1210
1211intptr_t Simulator::WriteExclusiveX(uword addr, intptr_t value, Instr* instr) {
1212 // In a well-formed code store-exclusive instruction should always follow
1213 // a corresponding load-exclusive instruction with the same address.
1214 ASSERT((exclusive_access_addr_ == 0) || (exclusive_access_addr_ == addr));
1215 if (exclusive_access_addr_ != addr) {
1216 return 1; // Failure.
1217 }
1218
1219 int64_t old_value = exclusive_access_value_;
1220 ClearExclusive();
1221
1222 if ((random_.NextUInt32() % 16) == 0) {
1223 return 1; // Suprious failure.
1224 }
1225
1226 auto atomic_addr = reinterpret_cast<RelaxedAtomic<int64_t>*>(addr);
1227 if (atomic_addr->compare_exchange_weak(old_value, value)) {
1228 return 0; // Success.
1229 }
1230 return 1; // Failure.
1231}
1232
1233intptr_t Simulator::WriteExclusiveW(uword addr, intptr_t value, Instr* instr) {
1234 // In a well-formed code store-exclusive instruction should always follow
1235 // a corresponding load-exclusive instruction with the same address.
1236 ASSERT((exclusive_access_addr_ == 0) || (exclusive_access_addr_ == addr));
1237 if (exclusive_access_addr_ != addr) {
1238 return 1; // Failure.
1239 }
1240
1241 int32_t old_value = static_cast<uint32_t>(exclusive_access_value_);
1242 ClearExclusive();
1243
1244 if ((random_.NextUInt32() % 16) == 0) {
1245 return 1; // Spurious failure.
1246 }
1247
1248 auto atomic_addr = reinterpret_cast<RelaxedAtomic<int32_t>*>(addr);
1249 if (atomic_addr->compare_exchange_weak(old_value, value)) {
1250 return 0; // Success.
1251 }
1252 return 1; // Failure.
1253}
1254
1255intptr_t Simulator::ReadAcquire(uword addr, Instr* instr) {
1256 // TODO(42074): Once we switch to C++20 we should change this to use use
1257 // `std::atomic_ref<T>` which supports performing atomic operations on
1258 // non-atomic data.
1259 COMPILE_ASSERT(sizeof(std::atomic<intptr_t>) == sizeof(intptr_t));
1260 return reinterpret_cast<std::atomic<intptr_t>*>(addr)->load(
1261 std::memory_order_acquire);
1262}
1263
1264uint32_t Simulator::ReadAcquireW(uword addr, Instr* instr) {
1265 // TODO(42074): Once we switch to C++20 we should change this to use use
1266 // `std::atomic_ref<T>` which supports performing atomic operations on
1267 // non-atomic data.
1268 COMPILE_ASSERT(sizeof(std::atomic<intptr_t>) == sizeof(intptr_t));
1269 return reinterpret_cast<std::atomic<uint32_t>*>(addr)->load(
1270 std::memory_order_acquire);
1271}
1272
1273void Simulator::WriteRelease(uword addr, intptr_t value, Instr* instr) {
1274 // TODO(42074): Once we switch to C++20 we should change this to use use
1275 // `std::atomic_ref<T>` which supports performing atomic operations on
1276 // non-atomic data.
1277 COMPILE_ASSERT(sizeof(std::atomic<intptr_t>) == sizeof(intptr_t));
1278 reinterpret_cast<std::atomic<intptr_t>*>(addr)->store(
1279 value, std::memory_order_release);
1280}
1281
1282void Simulator::WriteReleaseW(uword addr, uint32_t value, Instr* instr) {
1283 // TODO(42074): Once we switch to C++20 we should change this to use use
1284 // `std::atomic_ref<T>` which supports performing atomic operations on
1285 // non-atomic data.
1286 COMPILE_ASSERT(sizeof(std::atomic<intptr_t>) == sizeof(intptr_t));
1287 reinterpret_cast<std::atomic<uint32_t>*>(addr)->store(
1288 value, std::memory_order_release);
1289}
1290
1291// Unsupported instructions use Format to print an error and stop execution.
1292void Simulator::Format(Instr* instr, const char* format) {
1293 OS::PrintErr("Simulator found unsupported instruction:\n 0x%p: %s\n", instr,
1294 format);
1295 UNIMPLEMENTED();
1296}
1297
1298// Calculate and set the Negative and Zero flags.
1299void Simulator::SetNZFlagsW(int32_t val) {
1300 n_flag_ = (val < 0);
1301 z_flag_ = (val == 0);
1302}
1303
1304// Calculate C flag value for additions (and subtractions with adjusted args).
1305bool Simulator::CarryFromW(int32_t left, int32_t right, int32_t carry) {
1306 uint64_t uleft = static_cast<uint32_t>(left);
1307 uint64_t uright = static_cast<uint32_t>(right);
1308 uint64_t ucarry = static_cast<uint32_t>(carry);
1309 return ((uleft + uright + ucarry) >> 32) != 0;
1310}
1311
1312// Calculate V flag value for additions (and subtractions with adjusted args).
1313bool Simulator::OverflowFromW(int32_t left, int32_t right, int32_t carry) {
1314 int64_t result = static_cast<int64_t>(left) + right + carry;
1315 return (result >> 31) != (result >> 32);
1316}
1317
1318// Calculate and set the Negative and Zero flags.
1319void Simulator::SetNZFlagsX(int64_t val) {
1320 n_flag_ = (val < 0);
1321 z_flag_ = (val == 0);
1322}
1323
1324// Calculate C flag value for additions and subtractions.
1325bool Simulator::CarryFromX(int64_t alu_out,
1326 int64_t left,
1327 int64_t right,
1328 bool addition) {
1329 if (addition) {
1330 return (((left & right) | ((left | right) & ~alu_out)) >> 63) != 0;
1331 } else {
1332 return (((~left & right) | ((~left | right) & alu_out)) >> 63) == 0;
1333 }
1334}
1335
1336// Calculate V flag value for additions and subtractions.
1337bool Simulator::OverflowFromX(int64_t alu_out,
1338 int64_t left,
1339 int64_t right,
1340 bool addition) {
1341 if (addition) {
1342 return (((alu_out ^ left) & (alu_out ^ right)) >> 63) != 0;
1343 } else {
1344 return (((left ^ right) & (alu_out ^ left)) >> 63) != 0;
1345 }
1346}
1347
1348// Set the Carry flag.
1349void Simulator::SetCFlag(bool val) {
1350 c_flag_ = val;
1351}
1352
1353// Set the oVerflow flag.
1354void Simulator::SetVFlag(bool val) {
1355 v_flag_ = val;
1356}
1357
1358void Simulator::DecodeMoveWide(Instr* instr) {
1359 const Register rd = instr->RdField();
1360 const int hw = instr->HWField();
1361 const int64_t shift = hw << 4;
1362 const int64_t shifted_imm = static_cast<int64_t>(instr->Imm16Field())
1363 << shift;
1364
1365 if (instr->SFField() != 0) {
1366 if (instr->Bits(29, 2) == 0) {
1367 // Format(instr, "movn'sf 'rd, 'imm16 'hw");
1368 set_register(instr, rd, ~shifted_imm, instr->RdMode());
1369 } else if (instr->Bits(29, 2) == 2) {
1370 // Format(instr, "movz'sf 'rd, 'imm16 'hw");
1371 set_register(instr, rd, shifted_imm, instr->RdMode());
1372 } else if (instr->Bits(29, 2) == 3) {
1373 // Format(instr, "movk'sf 'rd, 'imm16 'hw");
1374 const int64_t rd_val = get_register(rd, instr->RdMode());
1375 const int64_t result = (rd_val & ~(0xffffL << shift)) | shifted_imm;
1376 set_register(instr, rd, result, instr->RdMode());
1377 } else {
1378 UnimplementedInstruction(instr);
1379 }
1380 } else if ((hw & 0x2) == 0) {
1381 if (instr->Bits(29, 2) == 0) {
1382 // Format(instr, "movn'sf 'rd, 'imm16 'hw");
1383 set_wregister(rd, ~shifted_imm & kWRegMask, instr->RdMode());
1384 } else if (instr->Bits(29, 2) == 2) {
1385 // Format(instr, "movz'sf 'rd, 'imm16 'hw");
1386 set_wregister(rd, shifted_imm & kWRegMask, instr->RdMode());
1387 } else if (instr->Bits(29, 2) == 3) {
1388 // Format(instr, "movk'sf 'rd, 'imm16 'hw");
1389 const int32_t rd_val = get_wregister(rd, instr->RdMode());
1390 const int32_t result = (rd_val & ~(0xffffL << shift)) | shifted_imm;
1391 set_wregister(rd, result, instr->RdMode());
1392 } else {
1393 UnimplementedInstruction(instr);
1394 }
1395 } else {
1396 // Dest is 32 bits, but shift is more than 32.
1397 UnimplementedInstruction(instr);
1398 }
1399}
1400
1401void Simulator::DecodeAddSubImm(Instr* instr) {
1402 const bool addition = (instr->Bit(30) == 0);
1403 // Format(instr, "addi'sf's 'rd, 'rn, 'imm12s");
1404 // Format(instr, "subi'sf's 'rd, 'rn, 'imm12s");
1405 const Register rd = instr->RdField();
1406 const Register rn = instr->RnField();
1407 uint32_t imm = (instr->Bit(22) == 1) ? (instr->Imm12Field() << 12)
1408 : (instr->Imm12Field());
1409 if (instr->SFField() != 0) {
1410 // 64-bit add.
1411 const uint64_t rn_val = get_register(rn, instr->RnMode());
1412 const uint64_t alu_out = addition ? (rn_val + imm) : (rn_val - imm);
1413 set_register(instr, rd, alu_out, instr->RdMode());
1414 if (instr->HasS()) {
1415 SetNZFlagsX(alu_out);
1416 SetCFlag(CarryFromX(alu_out, rn_val, imm, addition));
1417 SetVFlag(OverflowFromX(alu_out, rn_val, imm, addition));
1418 }
1419 } else {
1420 // 32-bit add.
1421 const uint32_t rn_val = get_wregister(rn, instr->RnMode());
1422 uint32_t carry_in = 0;
1423 if (!addition) {
1424 carry_in = 1;
1425 imm = ~imm;
1426 }
1427 const uint32_t alu_out = rn_val + imm + carry_in;
1428 set_wregister(rd, alu_out, instr->RdMode());
1429 if (instr->HasS()) {
1430 SetNZFlagsW(alu_out);
1431 SetCFlag(CarryFromW(rn_val, imm, carry_in));
1432 SetVFlag(OverflowFromW(rn_val, imm, carry_in));
1433 }
1434 }
1435}
1436
1437void Simulator::DecodeBitfield(Instr* instr) {
1438 int bitwidth = instr->SFField() == 0 ? 32 : 64;
1439 unsigned op = instr->Bits(29, 2);
1440 ASSERT(op <= 2);
1441 bool sign_extend = op == 0;
1442 bool zero_extend = op == 2;
1443 ASSERT(instr->NField() == instr->SFField());
1444 const Register rn = instr->RnField();
1445 const Register rd = instr->RdField();
1446 int64_t result = get_register(rn, instr->RnMode());
1447 int r_bit = instr->ImmRField();
1448 int s_bit = instr->ImmSField();
1449 result &= Utils::NBitMask(bitwidth);
1450 ASSERT(s_bit < bitwidth && r_bit < bitwidth);
1451 // See ARM v8 Instruction set overview 5.4.5.
1452 // If s >= r then Rd[s-r:0] := Rn[s:r], else Rd[bitwidth+s-r:bitwidth-r] :=
1453 // Rn[s:0].
1454 uword mask = Utils::NBitMask(s_bit + 1);
1455 if (s_bit >= r_bit) {
1456 mask >>= r_bit;
1457 result >>= r_bit;
1458 } else {
1459 result = static_cast<uint64_t>(result) << (bitwidth - r_bit);
1460 mask <<= bitwidth - r_bit;
1461 }
1462 result &= mask;
1463 if (sign_extend) {
1464 int highest_bit = (s_bit - r_bit) & (bitwidth - 1);
1465 int shift = 64 - highest_bit - 1;
1466 result <<= shift;
1467 result = static_cast<word>(result) >> shift;
1468 } else if (!zero_extend) {
1469 const int64_t rd_val = get_register(rd, instr->RnMode());
1470 result |= rd_val & ~mask;
1471 }
1472 if (bitwidth == 64) {
1473 set_register(instr, rd, result, instr->RdMode());
1474 } else {
1475 set_wregister(rd, result, instr->RdMode());
1476 }
1477}
1478
1479void Simulator::DecodeLogicalImm(Instr* instr) {
1480 const int op = instr->Bits(29, 2);
1481 const bool set_flags = op == 3;
1482 const int out_size = ((instr->SFField() == 0) && (instr->NField() == 0))
1485 const Register rn = instr->RnField();
1486 const Register rd = instr->RdField();
1487 const int64_t rn_val = get_register(rn, instr->RnMode());
1488 const uint64_t imm = instr->ImmLogical();
1489 if (imm == 0) {
1490 UnimplementedInstruction(instr);
1491 }
1492
1493 int64_t alu_out = 0;
1494 switch (op) {
1495 case 0:
1496 alu_out = rn_val & imm;
1497 break;
1498 case 1:
1499 alu_out = rn_val | imm;
1500 break;
1501 case 2:
1502 alu_out = rn_val ^ imm;
1503 break;
1504 case 3:
1505 alu_out = rn_val & imm;
1506 break;
1507 default:
1508 UNREACHABLE();
1509 break;
1510 }
1511
1512 if (set_flags) {
1513 if (out_size == kXRegSizeInBits) {
1514 SetNZFlagsX(alu_out);
1515 } else {
1516 SetNZFlagsW(alu_out);
1517 }
1518 SetCFlag(false);
1519 SetVFlag(false);
1520 }
1521
1522 if (out_size == kXRegSizeInBits) {
1523 set_register(instr, rd, alu_out, instr->RdMode());
1524 } else {
1525 set_wregister(rd, alu_out, instr->RdMode());
1526 }
1527}
1528
1529void Simulator::DecodePCRel(Instr* instr) {
1530 const int op = instr->Bit(31);
1531 if (op == 0) {
1532 // Format(instr, "adr 'rd, 'pcrel")
1533 const Register rd = instr->RdField();
1534 const uint64_t immhi = instr->SImm19Field();
1535 const uint64_t immlo = instr->Bits(29, 2);
1536 const uint64_t off = (immhi << 2) | immlo;
1537 const uint64_t dest = get_pc() + off;
1538 set_register(instr, rd, dest, instr->RdMode());
1539 } else {
1540 UnimplementedInstruction(instr);
1541 }
1542}
1543
1544void Simulator::DecodeDPImmediate(Instr* instr) {
1545 if (instr->IsMoveWideOp()) {
1546 DecodeMoveWide(instr);
1547 } else if (instr->IsAddSubImmOp()) {
1548 DecodeAddSubImm(instr);
1549 } else if (instr->IsBitfieldOp()) {
1550 DecodeBitfield(instr);
1551 } else if (instr->IsLogicalImmOp()) {
1552 DecodeLogicalImm(instr);
1553 } else if (instr->IsPCRelOp()) {
1554 DecodePCRel(instr);
1555 } else {
1556 UnimplementedInstruction(instr);
1557 }
1558}
1559
1560void Simulator::DecodeCompareAndBranch(Instr* instr) {
1561 const int op = instr->Bit(24);
1562 const Register rt = instr->RtField();
1563 const uint64_t imm19 = instr->SImm19Field();
1564 const uint64_t dest = get_pc() + (imm19 << 2);
1565 const uint64_t mask = instr->SFField() == 1 ? kXRegMask : kWRegMask;
1566 const uint64_t rt_val = get_register(rt, R31IsZR) & mask;
1567 if (op == 0) {
1568 // Format(instr, "cbz'sf 'rt, 'dest19");
1569 if (rt_val == 0) {
1570 set_pc(dest);
1571 }
1572 } else {
1573 // Format(instr, "cbnz'sf 'rt, 'dest19");
1574 if (rt_val != 0) {
1575 set_pc(dest);
1576 }
1577 }
1578}
1579
1580bool Simulator::ConditionallyExecute(Instr* instr) {
1581 Condition cond;
1582 if (instr->IsConditionalSelectOp()) {
1583 cond = instr->SelectConditionField();
1584 } else {
1585 cond = instr->ConditionField();
1586 }
1587 switch (cond) {
1588 case EQ:
1589 return z_flag_;
1590 case NE:
1591 return !z_flag_;
1592 case CS:
1593 return c_flag_;
1594 case CC:
1595 return !c_flag_;
1596 case MI:
1597 return n_flag_;
1598 case PL:
1599 return !n_flag_;
1600 case VS:
1601 return v_flag_;
1602 case VC:
1603 return !v_flag_;
1604 case HI:
1605 return c_flag_ && !z_flag_;
1606 case LS:
1607 return !c_flag_ || z_flag_;
1608 case GE:
1609 return n_flag_ == v_flag_;
1610 case LT:
1611 return n_flag_ != v_flag_;
1612 case GT:
1613 return !z_flag_ && (n_flag_ == v_flag_);
1614 case LE:
1615 return z_flag_ || (n_flag_ != v_flag_);
1616 case AL:
1617 return true;
1618 default:
1619 UNREACHABLE();
1620 }
1621 return false;
1622}
1623
1624void Simulator::DecodeConditionalBranch(Instr* instr) {
1625 // Format(instr, "b'cond 'dest19");
1626 if ((instr->Bit(24) != 0) || (instr->Bit(4) != 0)) {
1627 UnimplementedInstruction(instr);
1628 }
1629 const uint64_t imm19 = instr->SImm19Field();
1630 const uint64_t dest = get_pc() + (imm19 << 2);
1631 if (ConditionallyExecute(instr)) {
1632 set_pc(dest);
1633 }
1634}
1635
1636// Calls into the Dart runtime are based on this interface.
1637typedef void (*SimulatorRuntimeCall)(NativeArguments arguments);
1638
1639// Calls to leaf Dart runtime functions are based on this interface.
1640typedef int64_t (*SimulatorLeafRuntimeCall)(int64_t r0,
1641 int64_t r1,
1642 int64_t r2,
1643 int64_t r3,
1644 int64_t r4,
1645 int64_t r5,
1646 int64_t r6,
1647 int64_t r7);
1648
1649// [target] has several different signatures that differ from
1650// SimulatorLeafRuntimeCall. We can call them all from here only because in
1651// X64's calling conventions a function can be called with extra arguments
1652// and the callee will see the first arguments and won't unbalance the stack.
1653NO_SANITIZE_UNDEFINED("function")
1654static int64_t InvokeLeafRuntime(SimulatorLeafRuntimeCall target,
1655 int64_t r0,
1656 int64_t r1,
1657 int64_t r2,
1658 int64_t r3,
1659 int64_t r4,
1660 int64_t r5,
1661 int64_t r6,
1662 int64_t r7) {
1663 return target(r0, r1, r2, r3, r4, r5, r6, r7);
1664}
1665
1666// Calls to leaf float Dart runtime functions are based on this interface.
1667typedef double (*SimulatorLeafFloatRuntimeCall)(double d0,
1668 double d1,
1669 double d2,
1670 double d3,
1671 double d4,
1672 double d5,
1673 double d6,
1674 double d7);
1675
1676// [target] has several different signatures that differ from
1677// SimulatorFloatLeafRuntimeCall. We can call them all from here only because in
1678// X64's calling conventions a function can be called with extra arguments
1679// and the callee will see the first arguments and won't unbalance the stack.
1680NO_SANITIZE_UNDEFINED("function")
1681static double InvokeFloatLeafRuntime(SimulatorLeafFloatRuntimeCall target,
1682 double d0,
1683 double d1,
1684 double d2,
1685 double d3,
1686 double d4,
1687 double d5,
1688 double d6,
1689 double d7) {
1690 return target(d0, d1, d2, d3, d4, d5, d6, d7);
1691}
1692
1693// Calls to native Dart functions are based on this interface.
1694typedef void (*SimulatorNativeCallWrapper)(Dart_NativeArguments arguments,
1696
1697void Simulator::DoRedirectedCall(Instr* instr) {
1698 SimulatorSetjmpBuffer buffer(this);
1699 if (!setjmp(buffer.buffer_)) {
1700 int64_t saved_lr = get_register(LR);
1701 Redirection* redirection = Redirection::FromHltInstruction(instr);
1702 uword external = redirection->external_function();
1703 if (IsTracingExecution()) {
1704 THR_Print("Call to host function at 0x%" Pd "\n", external);
1705 }
1706
1707 if (redirection->call_kind() == kRuntimeCall) {
1708 NativeArguments* arguments =
1709 reinterpret_cast<NativeArguments*>(get_register(R0));
1710 SimulatorRuntimeCall target =
1711 reinterpret_cast<SimulatorRuntimeCall>(external);
1712 target(*arguments);
1713 ClobberVolatileRegisters();
1714 } else if (redirection->call_kind() == kLeafRuntimeCall) {
1715 ASSERT((0 <= redirection->argument_count()) &&
1716 (redirection->argument_count() <= 8));
1717 SimulatorLeafRuntimeCall target =
1718 reinterpret_cast<SimulatorLeafRuntimeCall>(external);
1719 const int64_t r0 = get_register(R0);
1720 const int64_t r1 = get_register(R1);
1721 const int64_t r2 = get_register(R2);
1722 const int64_t r3 = get_register(R3);
1723 const int64_t r4 = get_register(R4);
1724 const int64_t r5 = get_register(R5);
1725 const int64_t r6 = get_register(R6);
1726 const int64_t r7 = get_register(R7);
1727 const int64_t res =
1728 InvokeLeafRuntime(target, r0, r1, r2, r3, r4, r5, r6, r7);
1729 ClobberVolatileRegisters();
1730 set_register(instr, R0, res); // Set returned result from function.
1731 } else if (redirection->call_kind() == kLeafFloatRuntimeCall) {
1732 ASSERT((0 <= redirection->argument_count()) &&
1733 (redirection->argument_count() <= 8));
1734 SimulatorLeafFloatRuntimeCall target =
1735 reinterpret_cast<SimulatorLeafFloatRuntimeCall>(external);
1736 const double d0 = bit_cast<double, int64_t>(get_vregisterd(V0, 0));
1737 const double d1 = bit_cast<double, int64_t>(get_vregisterd(V1, 0));
1738 const double d2 = bit_cast<double, int64_t>(get_vregisterd(V2, 0));
1739 const double d3 = bit_cast<double, int64_t>(get_vregisterd(V3, 0));
1740 const double d4 = bit_cast<double, int64_t>(get_vregisterd(V4, 0));
1741 const double d5 = bit_cast<double, int64_t>(get_vregisterd(V5, 0));
1742 const double d6 = bit_cast<double, int64_t>(get_vregisterd(V6, 0));
1743 const double d7 = bit_cast<double, int64_t>(get_vregisterd(V7, 0));
1744 const double res =
1745 InvokeFloatLeafRuntime(target, d0, d1, d2, d3, d4, d5, d6, d7);
1746 ClobberVolatileRegisters();
1747 set_vregisterd(V0, 0, bit_cast<int64_t, double>(res));
1748 set_vregisterd(V0, 1, 0);
1749 } else {
1750 ASSERT(redirection->call_kind() == kNativeCallWrapper);
1751 SimulatorNativeCallWrapper wrapper =
1752 reinterpret_cast<SimulatorNativeCallWrapper>(external);
1753 Dart_NativeArguments arguments =
1754 reinterpret_cast<Dart_NativeArguments>(get_register(R0));
1756 reinterpret_cast<Dart_NativeFunction>(get_register(R1));
1757 wrapper(arguments, target);
1758 ClobberVolatileRegisters();
1759 }
1760
1761 // Return.
1762 set_pc(saved_lr);
1763 } else {
1764 // Coming via long jump from a throw. Continue to exception handler.
1765 }
1766}
1767
1768void Simulator::ClobberVolatileRegisters() {
1769 // Clear atomic reservation.
1770 exclusive_access_addr_ = exclusive_access_value_ = 0;
1771
1772 for (intptr_t i = 0; i < kNumberOfCpuRegisters; i++) {
1773 if ((kAbiVolatileCpuRegs & (1 << i)) != 0) {
1774 registers_[i] = random_.NextUInt64();
1775 }
1776 }
1777
1778 for (intptr_t i = 0; i < kNumberOfFpuRegisters; i++) {
1779 if ((kAbiVolatileFpuRegs & (1 << i)) != 0) {
1780 vregisters_[i].bits.i64[0] = icount_;
1781 vregisters_[i].bits.i64[1] = icount_;
1782 }
1783 }
1784}
1785
1786void Simulator::DecodeExceptionGen(Instr* instr) {
1787 if ((instr->Bits(0, 2) == 1) && (instr->Bits(2, 3) == 0) &&
1788 (instr->Bits(21, 3) == 0)) {
1789 // Format(instr, "svc 'imm16");
1790 UnimplementedInstruction(instr);
1791 } else if ((instr->Bits(0, 2) == 0) && (instr->Bits(2, 3) == 0) &&
1792 (instr->Bits(21, 3) == 1)) {
1793 // Format(instr, "brk 'imm16");
1794 SimulatorDebugger dbg(this);
1795 int32_t imm = instr->Imm16Field();
1796 char buffer[32];
1797 snprintf(buffer, sizeof(buffer), "brk #0x%x", imm);
1798 set_pc(get_pc() + Instr::kInstrSize);
1799 dbg.Stop(instr, buffer);
1800 } else if ((instr->Bits(0, 2) == 0) && (instr->Bits(2, 3) == 0) &&
1801 (instr->Bits(21, 3) == 2)) {
1802 // Format(instr, "hlt 'imm16");
1803 uint16_t imm = static_cast<uint16_t>(instr->Imm16Field());
1804 if (imm == Instr::kSimulatorBreakCode) {
1805 SimulatorDebugger dbg(this);
1806 dbg.Stop(instr, "breakpoint");
1807 } else if (imm == Instr::kSimulatorRedirectCode) {
1808 DoRedirectedCall(instr);
1809 } else {
1810 UnimplementedInstruction(instr);
1811 }
1812 } else {
1813 UnimplementedInstruction(instr);
1814 }
1815}
1816
1817void Simulator::DecodeSystem(Instr* instr) {
1818 if (instr->InstructionBits() == CLREX) {
1819 // Format(instr, "clrex");
1820 ClearExclusive();
1821 return;
1822 }
1823
1824 if ((instr->Bits(0, 8) == 0x1f) && (instr->Bits(12, 4) == 2) &&
1825 (instr->Bits(16, 3) == 3) && (instr->Bits(19, 2) == 0) &&
1826 (instr->Bit(21) == 0)) {
1827 if (instr->Bits(8, 4) == 0) {
1828 // Format(instr, "nop");
1829 } else {
1830 UnimplementedInstruction(instr);
1831 }
1832 } else {
1833 UnimplementedInstruction(instr);
1834 }
1835}
1836
1837void Simulator::DecodeTestAndBranch(Instr* instr) {
1838 const int op = instr->Bit(24);
1839 const int bitpos = instr->Bits(19, 5) | (instr->Bit(31) << 5);
1840 const uint64_t imm14 = instr->SImm14Field();
1841 const uint64_t dest = get_pc() + (imm14 << 2);
1842 const Register rt = instr->RtField();
1843 const uint64_t rt_val = get_register(rt, R31IsZR);
1844 if (op == 0) {
1845 // Format(instr, "tbz'sf 'rt, 'bitpos, 'dest14");
1846 if ((rt_val & (1ull << bitpos)) == 0) {
1847 set_pc(dest);
1848 }
1849 } else {
1850 // Format(instr, "tbnz'sf 'rt, 'bitpos, 'dest14");
1851 if ((rt_val & (1ull << bitpos)) != 0) {
1852 set_pc(dest);
1853 }
1854 }
1855}
1856
1857void Simulator::DecodeUnconditionalBranch(Instr* instr) {
1858 const bool link = instr->Bit(31) == 1;
1859 const uint64_t imm26 = instr->SImm26Field();
1860 const uint64_t dest = get_pc() + (imm26 << 2);
1861 const uint64_t ret = get_pc() + Instr::kInstrSize;
1862 set_pc(dest);
1863 if (link) {
1864 set_register(instr, LR, ret);
1865 }
1866}
1867
1868void Simulator::DecodeUnconditionalBranchReg(Instr* instr) {
1869 if ((instr->Bits(0, 5) == 0) && (instr->Bits(10, 6) == 0) &&
1870 (instr->Bits(16, 5) == 0x1f)) {
1871 switch (instr->Bits(21, 4)) {
1872 case 0: {
1873 // Format(instr, "br 'rn");
1874 const Register rn = instr->RnField();
1875 const int64_t dest = get_register(rn, instr->RnMode());
1876 set_pc(dest);
1877 break;
1878 }
1879 case 1: {
1880 // Format(instr, "blr 'rn");
1881 const Register rn = instr->RnField();
1882 const int64_t dest = get_register(rn, instr->RnMode());
1883 const int64_t ret = get_pc() + Instr::kInstrSize;
1884 set_pc(dest);
1885 set_register(instr, LR, ret);
1886 break;
1887 }
1888 case 2: {
1889 // Format(instr, "ret 'rn");
1890 const Register rn = instr->RnField();
1891 const int64_t rn_val = get_register(rn, instr->RnMode());
1892 set_pc(rn_val);
1893 break;
1894 }
1895 default:
1896 UnimplementedInstruction(instr);
1897 break;
1898 }
1899 } else {
1900 UnimplementedInstruction(instr);
1901 }
1902}
1903
1904DART_FORCE_INLINE
1905void Simulator::DecodeCompareBranch(Instr* instr) {
1906 if (instr->IsCompareAndBranchOp()) {
1907 DecodeCompareAndBranch(instr);
1908 } else if (instr->IsConditionalBranchOp()) {
1909 DecodeConditionalBranch(instr);
1910 } else if (instr->IsExceptionGenOp()) {
1911 DecodeExceptionGen(instr);
1912 } else if (instr->IsSystemOp()) {
1913 DecodeSystem(instr);
1914 } else if (instr->IsTestAndBranchOp()) {
1915 DecodeTestAndBranch(instr);
1916 } else if (instr->IsUnconditionalBranchOp()) {
1917 DecodeUnconditionalBranch(instr);
1918 } else if (instr->IsUnconditionalBranchRegOp()) {
1919 DecodeUnconditionalBranchReg(instr);
1920 } else {
1921 UnimplementedInstruction(instr);
1922 }
1923}
1924
1925void Simulator::DecodeLoadStoreReg(Instr* instr) {
1926 // Calculate the address.
1927 const Register rn = instr->RnField();
1928 const Register rt = instr->RtField();
1929 const VRegister vt = instr->VtField();
1930 const int64_t rn_val = get_register(rn, R31IsSP);
1931 const uint32_t size = (instr->Bit(26) == 1)
1932 ? ((instr->Bit(23) << 2) | instr->SzField())
1933 : instr->SzField();
1934 uword address = 0;
1935 uword wb_address = 0;
1936 bool wb = false;
1937 if (instr->Bit(24) == 1) {
1938 // addr = rn + scaled unsigned 12-bit immediate offset.
1939 const uint32_t imm12 = static_cast<uint32_t>(instr->Imm12Field());
1940 const uint32_t offset = imm12 << size;
1941 address = rn_val + offset;
1942 } else if (instr->Bits(10, 2) == 0) {
1943 // addr = rn + signed 9-bit immediate offset.
1944 wb = false;
1945 const int64_t offset = static_cast<int64_t>(instr->SImm9Field());
1946 address = rn_val + offset;
1947 wb_address = rn_val;
1948 } else if (instr->Bit(10) == 1) {
1949 // addr = rn + signed 9-bit immediate offset.
1950 wb = true;
1951 const int64_t offset = static_cast<int64_t>(instr->SImm9Field());
1952 if (instr->Bit(11) == 1) {
1953 // Pre-index.
1954 address = rn_val + offset;
1955 wb_address = address;
1956 } else {
1957 // Post-index.
1958 address = rn_val;
1959 wb_address = rn_val + offset;
1960 }
1961 } else if (instr->Bits(10, 2) == 2) {
1962 // addr = rn + (rm EXT optionally scaled by operand instruction size).
1963 const Register rm = instr->RmField();
1964 const Extend ext = instr->ExtendTypeField();
1965 const uint8_t scale = (ext == UXTX) && (instr->Bit(12) == 1) ? size : 0;
1966 const int64_t rm_val = get_register(rm, R31IsZR);
1967 const int64_t offset = ExtendOperand(kXRegSizeInBits, rm_val, ext, scale);
1968 address = rn_val + offset;
1969 } else {
1970 UnimplementedInstruction(instr);
1971 return;
1972 }
1973
1974 // Check the address.
1975 if (IsIllegalAddress(address)) {
1976 HandleIllegalAccess(address, instr);
1977 return;
1978 }
1979
1980 // Do access.
1981 if (instr->Bit(26) == 1) {
1982 if (instr->Bit(22) == 0) {
1983 // Format(instr, "fstr'fsz 'vt, 'memop");
1984 const int64_t vt_val = get_vregisterd(vt, 0);
1985 switch (size) {
1986 case 2:
1987 WriteW(address, vt_val & kWRegMask, instr);
1988 break;
1989 case 3:
1990 WriteX(address, vt_val, instr);
1991 break;
1992 case 4: {
1993 simd_value_t val;
1994 get_vregister(vt, &val);
1995 WriteX(address, val.bits.i64[0], instr);
1996 WriteX(address + kWordSize, val.bits.i64[1], instr);
1997 break;
1998 }
1999 default:
2000 UnimplementedInstruction(instr);
2001 return;
2002 }
2003 } else {
2004 // Format(instr, "fldr'fsz 'vt, 'memop");
2005 switch (size) {
2006 case 2:
2007 set_vregisterd(vt, 0, static_cast<int64_t>(ReadWU(address, instr)));
2008 set_vregisterd(vt, 1, 0);
2009 break;
2010 case 3:
2011 set_vregisterd(vt, 0, ReadX(address, instr));
2012 set_vregisterd(vt, 1, 0);
2013 break;
2014 case 4: {
2015 simd_value_t val;
2016 val.bits.i64[0] = ReadX(address, instr);
2017 val.bits.i64[1] = ReadX(address + kWordSize, instr);
2018 set_vregister(vt, val);
2019 break;
2020 }
2021 default:
2022 UnimplementedInstruction(instr);
2023 return;
2024 }
2025 }
2026 } else {
2027 if (instr->Bits(22, 2) == 0) {
2028 // Format(instr, "str'sz 'rt, 'memop");
2029 const int32_t rt_val32 = get_wregister(rt, R31IsZR);
2030 switch (size) {
2031 case 0: {
2032 const uint8_t val = static_cast<uint8_t>(rt_val32);
2033 WriteB(address, val);
2034 break;
2035 }
2036 case 1: {
2037 const uint16_t val = static_cast<uint16_t>(rt_val32);
2038 WriteH(address, val, instr);
2039 break;
2040 }
2041 case 2: {
2042 const uint32_t val = static_cast<uint32_t>(rt_val32);
2043 WriteW(address, val, instr);
2044 break;
2045 }
2046 case 3: {
2047 const int64_t val = get_register(rt, R31IsZR);
2048 WriteX(address, val, instr);
2049 break;
2050 }
2051 default:
2052 UNREACHABLE();
2053 break;
2054 }
2055 } else {
2056 // Format(instr, "ldr'sz 'rt, 'memop");
2057 // Undefined case.
2058 if ((size == 3) && (instr->Bits(22, 2) == 3)) {
2059 UnimplementedInstruction(instr);
2060 return;
2061 }
2062
2063 // Read the value.
2064 const bool is_signed = instr->Bit(23) == 1;
2065 // Write the W register for signed values when size < 2.
2066 // Write the W register for unsigned values when size == 2.
2067 const bool use_w =
2068 (is_signed && (instr->Bit(22) == 1)) || (!is_signed && (size == 2));
2069 int64_t val = 0; // Sign extend into an int64_t.
2070 switch (size) {
2071 case 0: {
2072 if (is_signed) {
2073 val = static_cast<int64_t>(ReadB(address));
2074 } else {
2075 val = static_cast<int64_t>(ReadBU(address));
2076 }
2077 break;
2078 }
2079 case 1: {
2080 if (is_signed) {
2081 val = static_cast<int64_t>(ReadH(address, instr));
2082 } else {
2083 val = static_cast<int64_t>(ReadHU(address, instr));
2084 }
2085 break;
2086 }
2087 case 2: {
2088 if (is_signed) {
2089 val = static_cast<int64_t>(ReadW(address, instr));
2090 } else {
2091 val = static_cast<int64_t>(ReadWU(address, instr));
2092 }
2093 break;
2094 }
2095 case 3:
2096 val = ReadX(address, instr);
2097 break;
2098 default:
2099 UNREACHABLE();
2100 break;
2101 }
2102
2103 // Write to register.
2104 if (use_w) {
2105 set_wregister(rt, static_cast<int32_t>(val), R31IsZR);
2106 } else {
2107 set_register(instr, rt, val, R31IsZR);
2108 }
2109 }
2110 }
2111
2112 // Do writeback.
2113 if (wb) {
2114 set_register(instr, rn, wb_address, R31IsSP);
2115 }
2116}
2117
2118void Simulator::DecodeLoadStoreRegPair(Instr* instr) {
2119 const int32_t opc = instr->Bits(23, 3);
2120 const Register rn = instr->RnField();
2121 const int64_t rn_val = get_register(rn, R31IsSP);
2122 const intptr_t shift =
2123 (instr->Bit(26) == 1) ? 2 + instr->SzField() : 2 + instr->SFField();
2124 const intptr_t size = 1 << shift;
2125 const int32_t offset = (static_cast<uint32_t>(instr->SImm7Field()) << shift);
2126 uword address = 0;
2127 uword wb_address = 0;
2128 bool wb = false;
2129
2130 if ((instr->Bits(30, 2) == 3)) {
2131 UnimplementedInstruction(instr);
2132 return;
2133 }
2134
2135 // Calculate address.
2136 switch (opc) {
2137 case 1:
2138 address = rn_val;
2139 wb_address = rn_val + offset;
2140 wb = true;
2141 break;
2142 case 2:
2143 address = rn_val + offset;
2144 break;
2145 case 3:
2146 address = rn_val + offset;
2147 wb_address = address;
2148 wb = true;
2149 break;
2150 default:
2151 UnimplementedInstruction(instr);
2152 return;
2153 }
2154
2155 // Check the address.
2156 if (IsIllegalAddress(address)) {
2157 HandleIllegalAccess(address, instr);
2158 return;
2159 }
2160
2161 // Do access.
2162 if (instr->Bit(26) == 1) {
2163 // SIMD/FP.
2164 const VRegister vt = instr->VtField();
2165 const VRegister vt2 = instr->Vt2Field();
2166 if (instr->Bit(22) != 0) {
2167 // Format(instr, "ldp 'vt, 'vt2, 'memop");
2168 switch (size) {
2169 case 4:
2170 set_vregisterd(vt, 0, static_cast<int64_t>(ReadWU(address, instr)));
2171 set_vregisterd(vt, 1, 0);
2172 set_vregisterd(vt2, 0,
2173 static_cast<int64_t>(ReadWU(address + 4, instr)));
2174 set_vregisterd(vt2, 1, 0);
2175 break;
2176 case 8:
2177 set_vregisterd(vt, 0, ReadX(address, instr));
2178 set_vregisterd(vt, 1, 0);
2179 set_vregisterd(vt2, 0, ReadX(address + 8, instr));
2180 set_vregisterd(vt2, 1, 0);
2181 break;
2182 case 16: {
2183 simd_value_t val;
2184 val.bits.i64[0] = ReadX(address, instr);
2185 val.bits.i64[1] = ReadX(address + 8, instr);
2186 set_vregister(vt, val);
2187 val.bits.i64[0] = ReadX(address + 16, instr);
2188 val.bits.i64[1] = ReadX(address + 24, instr);
2189 set_vregister(vt2, val);
2190 break;
2191 }
2192 default:
2193 UnimplementedInstruction(instr);
2194 return;
2195 }
2196 } else {
2197 // Format(instr, "stp 'vt, 'vt2, 'memop");
2198 switch (size) {
2199 case 4:
2200 WriteW(address, get_vregisterd(vt, 0) & kWRegMask, instr);
2201 WriteW(address + 4, get_vregisterd(vt2, 0) & kWRegMask, instr);
2202 break;
2203 case 8:
2204 WriteX(address, get_vregisterd(vt, 0), instr);
2205 WriteX(address + 8, get_vregisterd(vt2, 0), instr);
2206 break;
2207 case 16: {
2208 simd_value_t val;
2209 get_vregister(vt, &val);
2210 WriteX(address, val.bits.i64[0], instr);
2211 WriteX(address + 8, val.bits.i64[1], instr);
2212 get_vregister(vt2, &val);
2213 WriteX(address + 16, val.bits.i64[0], instr);
2214 WriteX(address + 24, val.bits.i64[1], instr);
2215 break;
2216 }
2217 default:
2218 UnimplementedInstruction(instr);
2219 return;
2220 }
2221 }
2222 } else {
2223 // Integer.
2224 const Register rt = instr->RtField();
2225 const Register rt2 = instr->Rt2Field();
2226 if (instr->Bit(22) != 0) {
2227 // Format(instr, "ldp'sf 'rt, 'rt2, 'memop");
2228 const bool is_signed = instr->Bit(30) == 1;
2229 int64_t val1 = 0; // Sign extend into an int64_t.
2230 int64_t val2 = 0;
2231 if (instr->Bit(31) == 1) {
2232 // 64-bit read.
2233 val1 = ReadX(address, instr);
2234 val2 = ReadX(address + size, instr);
2235 } else {
2236 if (is_signed) {
2237 val1 = static_cast<int64_t>(ReadW(address, instr));
2238 val2 = static_cast<int64_t>(ReadW(address + size, instr));
2239 } else {
2240 val1 = static_cast<int64_t>(ReadWU(address, instr));
2241 val2 = static_cast<int64_t>(ReadWU(address + size, instr));
2242 }
2243 }
2244 // Write to register.
2245 if (instr->Bit(31) == 1) {
2246 set_register(instr, rt, val1, R31IsZR);
2247 set_register(instr, rt2, val2, R31IsZR);
2248 } else {
2249 set_wregister(rt, static_cast<int32_t>(val1), R31IsZR);
2250 set_wregister(rt2, static_cast<int32_t>(val2), R31IsZR);
2251 }
2252 } else {
2253 // Format(instr, "stp'sf 'rt, 'rt2, 'memop");
2254 if (instr->Bit(31) == 1) {
2255 const int64_t val1 = get_register(rt, R31IsZR);
2256 const int64_t val2 = get_register(rt2, R31IsZR);
2257 WriteX(address, val1, instr);
2258 WriteX(address + size, val2, instr);
2259 } else {
2260 const int32_t val1 = get_wregister(rt, R31IsZR);
2261 const int32_t val2 = get_wregister(rt2, R31IsZR);
2262 WriteW(address, val1, instr);
2263 WriteW(address + size, val2, instr);
2264 }
2265 }
2266 }
2267
2268 // Do writeback.
2269 if (wb) {
2270 set_register(instr, rn, wb_address, R31IsSP);
2271 }
2272}
2273
2274void Simulator::DecodeLoadRegLiteral(Instr* instr) {
2275 if ((instr->Bit(31) != 0) || (instr->Bit(29) != 0) ||
2276 (instr->Bits(24, 3) != 0)) {
2277 UnimplementedInstruction(instr);
2278 }
2279
2280 const Register rt = instr->RtField();
2281 const int64_t off = instr->SImm19Field() << 2;
2282 const int64_t pc = reinterpret_cast<int64_t>(instr);
2283 const int64_t address = pc + off;
2284 const int64_t val = ReadX(address, instr);
2285 if (instr->Bit(30) != 0) {
2286 // Format(instr, "ldrx 'rt, 'pcldr");
2287 set_register(instr, rt, val, R31IsZR);
2288 } else {
2289 // Format(instr, "ldrw 'rt, 'pcldr");
2290 set_wregister(rt, static_cast<int32_t>(val), R31IsZR);
2291 }
2292}
2293
2294void Simulator::DecodeLoadStoreExclusive(Instr* instr) {
2295 if (instr->Bit(21) != 0 || instr->Bit(23) != instr->Bit(15)) {
2296 UNIMPLEMENTED();
2297 }
2298 const int32_t size = instr->Bits(30, 2);
2299 if (size != 3 && size != 2) {
2300 UNIMPLEMENTED();
2301 }
2302 const Register rs = instr->RsField();
2303 const Register rn = instr->RnField();
2304 const Register rt = instr->RtField();
2305 ASSERT(instr->Rt2Field() == R31); // Should-Be-One
2306 const bool is_load = instr->Bit(22) == 1;
2307 const bool is_exclusive = instr->Bit(23) == 0;
2308 const bool is_ordered = instr->Bit(15) == 1;
2309 if (is_load) {
2310 const bool is_load_acquire = !is_exclusive && is_ordered;
2311 if (is_load_acquire) {
2312 ASSERT(rs == R31); // Should-Be-One
2313 // Format(instr, "ldar 'rt, 'rn");
2314 const int64_t addr = get_register(rn, R31IsSP);
2315 const intptr_t value =
2316 (size == 3) ? ReadAcquire(addr, instr) : ReadAcquireW(addr, instr);
2317 set_register(instr, rt, value, R31IsZR);
2318 } else {
2319 ASSERT(rs == R31); // Should-Be-One
2320 // Format(instr, "ldxr 'rt, 'rn");
2321 const int64_t addr = get_register(rn, R31IsSP);
2322 const intptr_t value = (size == 3) ? ReadExclusiveX(addr, instr)
2323 : ReadExclusiveW(addr, instr);
2324 set_register(instr, rt, value, R31IsZR);
2325 }
2326 } else {
2327 const bool is_store_release = !is_exclusive && is_ordered;
2328 if (is_store_release) {
2329 ASSERT(rs == R31); // Should-Be-One
2330 // Format(instr, "stlr 'rt, 'rn");
2331 const uword value = get_register(rt, R31IsZR);
2332 const uword addr = get_register(rn, R31IsSP);
2333 if (size == 3) {
2334 WriteRelease(addr, value, instr);
2335 } else {
2336 WriteReleaseW(addr, static_cast<uint32_t>(value), instr);
2337 }
2338 } else {
2339 // Format(instr, "stxr 'rs, 'rt, 'rn");
2340 const uword value = get_register(rt, R31IsZR);
2341 const uword addr = get_register(rn, R31IsSP);
2342 const intptr_t status =
2343 (size == 3)
2344 ? WriteExclusiveX(addr, value, instr)
2345 : WriteExclusiveW(addr, static_cast<uint32_t>(value), instr);
2346 set_register(instr, rs, status, R31IsSP);
2347 }
2348 }
2349}
2350
2351void Simulator::DecodeAtomicMemory(Instr* instr) {
2352 const int32_t size = instr->Bits(30, 2);
2353 std::memory_order order;
2354 switch (instr->Bits(22, 2)) {
2355 case 3:
2356 order = std::memory_order_acq_rel;
2357 break;
2358 case 2:
2359 order = std::memory_order_acquire;
2360 break;
2361 case 1:
2362 order = std::memory_order_release;
2363 break;
2364 case 0:
2365 order = std::memory_order_relaxed;
2366 break;
2367 }
2368 const Register rs = instr->RsField();
2369 const Register rn = instr->RnField();
2370 const Register rt = instr->RtField();
2371 const int32_t opc = instr->Bits(12, 3);
2372
2373 if (size == 3) {
2374 uint64_t in = get_register(rs, R31IsZR);
2375 auto addr =
2376 reinterpret_cast<std::atomic<uint64_t>*>(get_register(rn, R31IsSP));
2377 uint64_t out;
2378 switch (opc) {
2379 case 1:
2380 out = addr->fetch_and(~in, order);
2381 break;
2382 case 3:
2383 out = addr->fetch_or(in, order);
2384 break;
2385 default:
2386 UNIMPLEMENTED();
2387 }
2388 set_register(instr, rt, out, R31IsZR);
2389 } else if (size == 2) {
2390 ASSERT(size == 2);
2391 uint32_t in = get_wregister(rs, R31IsZR);
2392 auto addr =
2393 reinterpret_cast<std::atomic<uint32_t>*>(get_register(rn, R31IsSP));
2394 uint32_t out;
2395 switch (opc) {
2396 case 1:
2397 out = addr->fetch_and(~in, order);
2398 break;
2399 case 3:
2400 out = addr->fetch_or(in, order);
2401 break;
2402 default:
2403 UNIMPLEMENTED();
2404 }
2405 set_wregister(rt, out, R31IsZR);
2406 } else {
2407 UNIMPLEMENTED();
2408 }
2409}
2410
2411DART_FORCE_INLINE
2412void Simulator::DecodeLoadStore(Instr* instr) {
2413 if (instr->IsAtomicMemoryOp()) {
2414 DecodeAtomicMemory(instr);
2415 } else if (instr->IsLoadStoreRegOp()) {
2416 DecodeLoadStoreReg(instr);
2417 } else if (instr->IsLoadStoreRegPairOp()) {
2418 DecodeLoadStoreRegPair(instr);
2419 } else if (instr->IsLoadRegLiteralOp()) {
2420 DecodeLoadRegLiteral(instr);
2421 } else if (instr->IsLoadStoreExclusiveOp()) {
2422 DecodeLoadStoreExclusive(instr);
2423 } else {
2424 UnimplementedInstruction(instr);
2425 }
2426}
2427
2428int64_t Simulator::ShiftOperand(uint8_t reg_size,
2429 int64_t value,
2430 Shift shift_type,
2431 uint8_t amount) {
2432 if (amount == 0) {
2433 return value;
2434 }
2435 int64_t mask = reg_size == kXRegSizeInBits ? kXRegMask : kWRegMask;
2436 switch (shift_type) {
2437 case LSL:
2438 return (static_cast<uint64_t>(value) << amount) & mask;
2439 case LSR:
2440 return static_cast<uint64_t>(value) >> amount;
2441 case ASR: {
2442 // Shift used to restore the sign.
2443 uint8_t s_shift = kXRegSizeInBits - reg_size;
2444 // Value with its sign restored.
2445 int64_t s_value = (value << s_shift) >> s_shift;
2446 return (s_value >> amount) & mask;
2447 }
2448 case ROR: {
2449 if (reg_size == kWRegSizeInBits) {
2450 value &= kWRegMask;
2451 }
2452 return (static_cast<uint64_t>(value) >> amount) |
2453 ((static_cast<uint64_t>(value) & ((1ULL << amount) - 1ULL))
2454 << (reg_size - amount));
2455 }
2456 default:
2457 UNIMPLEMENTED();
2458 return 0;
2459 }
2460}
2461
2462int64_t Simulator::ExtendOperand(uint8_t reg_size,
2463 int64_t value,
2464 Extend extend_type,
2465 uint8_t amount) {
2466 switch (extend_type) {
2467 case UXTB:
2468 value &= 0xff;
2469 break;
2470 case UXTH:
2471 value &= 0xffff;
2472 break;
2473 case UXTW:
2474 value &= 0xffffffff;
2475 break;
2476 case SXTB:
2477 value = static_cast<int64_t>(static_cast<uint64_t>(value) << 56) >> 56;
2478 break;
2479 case SXTH:
2480 value = static_cast<int64_t>(static_cast<uint64_t>(value) << 48) >> 48;
2481 break;
2482 case SXTW:
2483 value = static_cast<int64_t>(static_cast<uint64_t>(value) << 32) >> 32;
2484 break;
2485 case UXTX:
2486 case SXTX:
2487 break;
2488 default:
2489 UNREACHABLE();
2490 break;
2491 }
2492 int64_t mask = (reg_size == kXRegSizeInBits) ? kXRegMask : kWRegMask;
2493 return (static_cast<uint64_t>(value) << amount) & mask;
2494}
2495
2496int64_t Simulator::DecodeShiftExtendOperand(Instr* instr) {
2497 const Register rm = instr->RmField();
2498 const int64_t rm_val = get_register(rm, R31IsZR);
2499 const uint8_t size =
2500 instr->SFField() != 0 ? kXRegSizeInBits : kWRegSizeInBits;
2501 if (instr->IsShift()) {
2502 const Shift shift_type = instr->ShiftTypeField();
2503 const uint8_t shift_amount = instr->Imm6Field();
2504 return ShiftOperand(size, rm_val, shift_type, shift_amount);
2505 } else {
2506 ASSERT(instr->IsExtend());
2507 const Extend extend_type = instr->ExtendTypeField();
2508 const uint8_t shift_amount = instr->Imm3Field();
2509 return ExtendOperand(size, rm_val, extend_type, shift_amount);
2510 }
2511 UNREACHABLE();
2512 return -1;
2513}
2514
2515void Simulator::DecodeAddSubShiftExt(Instr* instr) {
2516 // Format(instr, "add'sf's 'rd, 'rn, 'shift_op");
2517 // also, sub, cmp, etc.
2518 const bool addition = (instr->Bit(30) == 0);
2519 const Register rd = instr->RdField();
2520 const Register rn = instr->RnField();
2521 const uint64_t rm_val = DecodeShiftExtendOperand(instr);
2522 if (instr->SFField() != 0) {
2523 // 64-bit add.
2524 const uint64_t rn_val = get_register(rn, instr->RnMode());
2525 const uint64_t alu_out = rn_val + (addition ? rm_val : -rm_val);
2526 set_register(instr, rd, alu_out, instr->RdMode());
2527 if (instr->HasS()) {
2528 SetNZFlagsX(alu_out);
2529 SetCFlag(CarryFromX(alu_out, rn_val, rm_val, addition));
2530 SetVFlag(OverflowFromX(alu_out, rn_val, rm_val, addition));
2531 }
2532 } else {
2533 // 32-bit add.
2534 const uint32_t rn_val = get_wregister(rn, instr->RnMode());
2535 uint32_t rm_val32 = static_cast<uint32_t>(rm_val & kWRegMask);
2536 uint32_t carry_in = 0;
2537 if (!addition) {
2538 carry_in = 1;
2539 rm_val32 = ~rm_val32;
2540 }
2541 const uint32_t alu_out = rn_val + rm_val32 + carry_in;
2542 set_wregister(rd, alu_out, instr->RdMode());
2543 if (instr->HasS()) {
2544 SetNZFlagsW(alu_out);
2545 SetCFlag(CarryFromW(rn_val, rm_val32, carry_in));
2546 SetVFlag(OverflowFromW(rn_val, rm_val32, carry_in));
2547 }
2548 }
2549}
2550
2551void Simulator::DecodeAddSubWithCarry(Instr* instr) {
2552 // Format(instr, "adc'sf's 'rd, 'rn, 'rm");
2553 // Format(instr, "sbc'sf's 'rd, 'rn, 'rm");
2554 const bool addition = (instr->Bit(30) == 0);
2555 const Register rd = instr->RdField();
2556 const Register rn = instr->RnField();
2557 const Register rm = instr->RmField();
2558 const uint64_t rn_val64 = get_register(rn, R31IsZR);
2559 const uint32_t rn_val32 = get_wregister(rn, R31IsZR);
2560 const uint64_t rm_val64 = get_register(rm, R31IsZR);
2561 uint32_t rm_val32 = get_wregister(rm, R31IsZR);
2562 const uint32_t carry_in = c_flag_ ? 1 : 0;
2563 if (instr->SFField() != 0) {
2564 // 64-bit add.
2565 const uint64_t alu_out =
2566 rn_val64 + (addition ? rm_val64 : ~rm_val64) + carry_in;
2567 set_register(instr, rd, alu_out, R31IsZR);
2568 if (instr->HasS()) {
2569 SetNZFlagsX(alu_out);
2570 SetCFlag(CarryFromX(alu_out, rn_val64, rm_val64, addition));
2571 SetVFlag(OverflowFromX(alu_out, rn_val64, rm_val64, addition));
2572 }
2573 } else {
2574 // 32-bit add.
2575 if (!addition) {
2576 rm_val32 = ~rm_val32;
2577 }
2578 const uint32_t alu_out = rn_val32 + rm_val32 + carry_in;
2579 set_wregister(rd, alu_out, R31IsZR);
2580 if (instr->HasS()) {
2581 SetNZFlagsW(alu_out);
2582 SetCFlag(CarryFromW(rn_val32, rm_val32, carry_in));
2583 SetVFlag(OverflowFromW(rn_val32, rm_val32, carry_in));
2584 }
2585 }
2586}
2587
2588void Simulator::DecodeLogicalShift(Instr* instr) {
2589 const int op = (instr->Bits(29, 2) << 1) | instr->Bit(21);
2590 const Register rd = instr->RdField();
2591 const Register rn = instr->RnField();
2592 const int64_t rn_val = get_register(rn, instr->RnMode());
2593 const int64_t rm_val = DecodeShiftExtendOperand(instr);
2594 int64_t alu_out = 0;
2595 switch (op) {
2596 case 0:
2597 // Format(instr, "and'sf 'rd, 'rn, 'shift_op");
2598 alu_out = rn_val & rm_val;
2599 break;
2600 case 1:
2601 // Format(instr, "bic'sf 'rd, 'rn, 'shift_op");
2602 alu_out = rn_val & (~rm_val);
2603 break;
2604 case 2:
2605 // Format(instr, "orr'sf 'rd, 'rn, 'shift_op");
2606 alu_out = rn_val | rm_val;
2607 break;
2608 case 3:
2609 // Format(instr, "orn'sf 'rd, 'rn, 'shift_op");
2610 alu_out = rn_val | (~rm_val);
2611 break;
2612 case 4:
2613 // Format(instr, "eor'sf 'rd, 'rn, 'shift_op");
2614 alu_out = rn_val ^ rm_val;
2615 break;
2616 case 5:
2617 // Format(instr, "eon'sf 'rd, 'rn, 'shift_op");
2618 alu_out = rn_val ^ (~rm_val);
2619 break;
2620 case 6:
2621 // Format(instr, "and'sfs 'rd, 'rn, 'shift_op");
2622 alu_out = rn_val & rm_val;
2623 break;
2624 case 7:
2625 // Format(instr, "bic'sfs 'rd, 'rn, 'shift_op");
2626 alu_out = rn_val & (~rm_val);
2627 break;
2628 default:
2629 UNREACHABLE();
2630 break;
2631 }
2632
2633 // Set flags if ands or bics.
2634 if ((op == 6) || (op == 7)) {
2635 if (instr->SFField() == 1) {
2636 SetNZFlagsX(alu_out);
2637 } else {
2638 SetNZFlagsW(alu_out);
2639 }
2640 SetCFlag(false);
2641 SetVFlag(false);
2642 }
2643
2644 if (instr->SFField() == 1) {
2645 set_register(instr, rd, alu_out, instr->RdMode());
2646 } else {
2647 set_wregister(rd, alu_out & kWRegMask, instr->RdMode());
2648 }
2649}
2650
2651static int64_t divide64(int64_t top, int64_t bottom, bool is_signed) {
2652 // ARM64 does not trap on integer division by zero. The destination register
2653 // is instead set to 0.
2654 if (bottom == 0) {
2655 return 0;
2656 }
2657
2658 if (is_signed) {
2659 // INT_MIN / -1 = INT_MIN.
2660 if ((top == static_cast<int64_t>(0x8000000000000000LL)) &&
2661 (bottom == static_cast<int64_t>(0xffffffffffffffffLL))) {
2662 return static_cast<int64_t>(0x8000000000000000LL);
2663 } else {
2664 return top / bottom;
2665 }
2666 } else {
2667 const uint64_t utop = static_cast<uint64_t>(top);
2668 const uint64_t ubottom = static_cast<uint64_t>(bottom);
2669 return static_cast<int64_t>(utop / ubottom);
2670 }
2671}
2672
2673static int32_t divide32(int32_t top, int32_t bottom, bool is_signed) {
2674 // ARM64 does not trap on integer division by zero. The destination register
2675 // is instead set to 0.
2676 if (bottom == 0) {
2677 return 0;
2678 }
2679
2680 if (is_signed) {
2681 // INT_MIN / -1 = INT_MIN.
2682 if ((top == static_cast<int32_t>(0x80000000)) &&
2683 (bottom == static_cast<int32_t>(0xffffffff))) {
2684 return static_cast<int32_t>(0x80000000);
2685 } else {
2686 return top / bottom;
2687 }
2688 } else {
2689 const uint32_t utop = static_cast<uint32_t>(top);
2690 const uint32_t ubottom = static_cast<uint32_t>(bottom);
2691 return static_cast<int32_t>(utop / ubottom);
2692 }
2693}
2694
2695void Simulator::DecodeMiscDP1Source(Instr* instr) {
2696 if (instr->Bit(29) != 0) {
2697 UnimplementedInstruction(instr);
2698 }
2699
2700 const Register rd = instr->RdField();
2701 const Register rn = instr->RnField();
2702 const int op = instr->Bits(10, 10);
2703 const int64_t rn_val64 = get_register(rn, R31IsZR);
2704 const int32_t rn_val32 = get_wregister(rn, R31IsZR);
2705 switch (op) {
2706 case 4: {
2707 // Format(instr, "clz'sf 'rd, 'rn");
2708 if (instr->SFField() == 1) {
2709 const uint64_t rd_val = Utils::CountLeadingZeros64(rn_val64);
2710 set_register(instr, rd, rd_val, R31IsZR);
2711 } else {
2712 const uint32_t rd_val = Utils::CountLeadingZeros32(rn_val32);
2713 set_wregister(rd, rd_val, R31IsZR);
2714 }
2715 break;
2716 }
2717 case 0: {
2718 // Format(instr, "rbit'sf 'rd, 'rn");
2719 if (instr->SFField() == 1) {
2720 const uint64_t rd_val = Utils::ReverseBits64(rn_val64);
2721 set_register(instr, rd, rd_val, R31IsZR);
2722 } else {
2723 const uint32_t rd_val = Utils::ReverseBits32(rn_val32);
2724 set_wregister(rd, rd_val, R31IsZR);
2725 }
2726 break;
2727 }
2728 default:
2729 UnimplementedInstruction(instr);
2730 break;
2731 }
2732}
2733
2734void Simulator::DecodeMiscDP2Source(Instr* instr) {
2735 if (instr->Bit(29) != 0) {
2736 UnimplementedInstruction(instr);
2737 }
2738
2739 const Register rd = instr->RdField();
2740 const Register rn = instr->RnField();
2741 const Register rm = instr->RmField();
2742 const int op = instr->Bits(10, 5);
2743 const int64_t rn_val64 = get_register(rn, R31IsZR);
2744 const int64_t rm_val64 = get_register(rm, R31IsZR);
2745 const int32_t rn_val32 = get_wregister(rn, R31IsZR);
2746 const int32_t rm_val32 = get_wregister(rm, R31IsZR);
2747 switch (op) {
2748 case 2:
2749 case 3: {
2750 // Format(instr, "udiv'sf 'rd, 'rn, 'rm");
2751 // Format(instr, "sdiv'sf 'rd, 'rn, 'rm");
2752 const bool is_signed = instr->Bit(10) == 1;
2753 if (instr->SFField() == 1) {
2754 set_register(instr, rd, divide64(rn_val64, rm_val64, is_signed),
2755 R31IsZR);
2756 } else {
2757 set_wregister(rd, divide32(rn_val32, rm_val32, is_signed), R31IsZR);
2758 }
2759 break;
2760 }
2761 case 8: {
2762 // Format(instr, "lsl'sf 'rd, 'rn, 'rm");
2763 if (instr->SFField() == 1) {
2764 const uint64_t rn_u64 = static_cast<uint64_t>(rn_val64);
2765 const int64_t alu_out = rn_u64 << (rm_val64 & (kXRegSizeInBits - 1));
2766 set_register(instr, rd, alu_out, R31IsZR);
2767 } else {
2768 const uint32_t rn_u32 = static_cast<uint32_t>(rn_val32);
2769 const int32_t alu_out = rn_u32 << (rm_val32 & (kXRegSizeInBits - 1));
2770 set_wregister(rd, alu_out, R31IsZR);
2771 }
2772 break;
2773 }
2774 case 9: {
2775 // Format(instr, "lsr'sf 'rd, 'rn, 'rm");
2776 if (instr->SFField() == 1) {
2777 const uint64_t rn_u64 = static_cast<uint64_t>(rn_val64);
2778 const int64_t alu_out = rn_u64 >> (rm_val64 & (kXRegSizeInBits - 1));
2779 set_register(instr, rd, alu_out, R31IsZR);
2780 } else {
2781 const uint32_t rn_u32 = static_cast<uint32_t>(rn_val32);
2782 const int32_t alu_out = rn_u32 >> (rm_val32 & (kXRegSizeInBits - 1));
2783 set_wregister(rd, alu_out, R31IsZR);
2784 }
2785 break;
2786 }
2787 case 10: {
2788 // Format(instr, "asr'sf 'rd, 'rn, 'rm");
2789 if (instr->SFField() == 1) {
2790 const int64_t alu_out = rn_val64 >> (rm_val64 & (kXRegSizeInBits - 1));
2791 set_register(instr, rd, alu_out, R31IsZR);
2792 } else {
2793 const int32_t alu_out = rn_val32 >> (rm_val32 & (kXRegSizeInBits - 1));
2794 set_wregister(rd, alu_out, R31IsZR);
2795 }
2796 break;
2797 }
2798 default:
2799 UnimplementedInstruction(instr);
2800 break;
2801 }
2802}
2803
2804void Simulator::DecodeMiscDP3Source(Instr* instr) {
2805 const Register rd = instr->RdField();
2806 const Register rn = instr->RnField();
2807 const Register rm = instr->RmField();
2808 const Register ra = instr->RaField();
2809 if ((instr->Bits(29, 2) == 0) && (instr->Bits(21, 3) == 0) &&
2810 (instr->Bit(15) == 0)) {
2811 // Format(instr, "madd'sf 'rd, 'rn, 'rm, 'ra");
2812 if (instr->SFField() == 1) {
2813 const uint64_t rn_val = get_register(rn, R31IsZR);
2814 const uint64_t rm_val = get_register(rm, R31IsZR);
2815 const uint64_t ra_val = get_register(ra, R31IsZR);
2816 const uint64_t alu_out = ra_val + (rn_val * rm_val);
2817 set_register(instr, rd, alu_out, R31IsZR);
2818 } else {
2819 const uint32_t rn_val = get_wregister(rn, R31IsZR);
2820 const uint32_t rm_val = get_wregister(rm, R31IsZR);
2821 const uint32_t ra_val = get_wregister(ra, R31IsZR);
2822 const uint32_t alu_out = ra_val + (rn_val * rm_val);
2823 set_wregister(rd, alu_out, R31IsZR);
2824 }
2825 } else if ((instr->Bits(29, 2) == 0) && (instr->Bits(21, 3) == 0) &&
2826 (instr->Bit(15) == 1)) {
2827 // Format(instr, "msub'sf 'rd, 'rn, 'rm, 'ra");
2828 if (instr->SFField() == 1) {
2829 const uint64_t rn_val = get_register(rn, R31IsZR);
2830 const uint64_t rm_val = get_register(rm, R31IsZR);
2831 const uint64_t ra_val = get_register(ra, R31IsZR);
2832 const uint64_t alu_out = ra_val - (rn_val * rm_val);
2833 set_register(instr, rd, alu_out, R31IsZR);
2834 } else {
2835 const uint32_t rn_val = get_wregister(rn, R31IsZR);
2836 const uint32_t rm_val = get_wregister(rm, R31IsZR);
2837 const uint32_t ra_val = get_wregister(ra, R31IsZR);
2838 const uint32_t alu_out = ra_val - (rn_val * rm_val);
2839 set_wregister(rd, alu_out, R31IsZR);
2840 }
2841 } else if ((instr->Bits(29, 3) == 4) && (instr->Bits(21, 3) == 2) &&
2842 (instr->Bit(15) == 0)) {
2843 ASSERT(ra == R31); // Should-Be-One
2844 // Format(instr, "smulh 'rd, 'rn, 'rm");
2845 const int64_t rn_val = get_register(rn, R31IsZR);
2846 const int64_t rm_val = get_register(rm, R31IsZR);
2847#if defined(DART_HOST_OS_WINDOWS)
2848 // Visual Studio does not support __int128.
2849 int64_t alu_out;
2850 Multiply128(rn_val, rm_val, &alu_out);
2851#else
2852 const __int128 res =
2853 static_cast<__int128>(rn_val) * static_cast<__int128>(rm_val);
2854 const int64_t alu_out = static_cast<int64_t>(res >> 64);
2855#endif // DART_HOST_OS_WINDOWS
2856 set_register(instr, rd, alu_out, R31IsZR);
2857 } else if ((instr->Bits(29, 3) == 4) && (instr->Bits(21, 3) == 6) &&
2858 (instr->Bit(15) == 0)) {
2859 ASSERT(ra == R31); // Should-Be-One
2860 // Format(instr, "umulh 'rd, 'rn, 'rm");
2861 const uint64_t rn_val = get_register(rn, R31IsZR);
2862 const uint64_t rm_val = get_register(rm, R31IsZR);
2863#if defined(DART_HOST_OS_WINDOWS)
2864 // Visual Studio does not support __int128.
2865 uint64_t alu_out;
2866 UnsignedMultiply128(rn_val, rm_val, &alu_out);
2867#else
2868 const unsigned __int128 res = static_cast<unsigned __int128>(rn_val) *
2869 static_cast<unsigned __int128>(rm_val);
2870 const uint64_t alu_out = static_cast<uint64_t>(res >> 64);
2871#endif // DART_HOST_OS_WINDOWS
2872 set_register(instr, rd, alu_out, R31IsZR);
2873 } else if ((instr->Bits(29, 3) == 4) && (instr->Bit(15) == 0)) {
2874 if (instr->Bits(21, 3) == 5) {
2875 // Format(instr, "umaddl 'rd, 'rn, 'rm, 'ra");
2876 const uint64_t rn_val = static_cast<uint32_t>(get_wregister(rn, R31IsZR));
2877 const uint64_t rm_val = static_cast<uint32_t>(get_wregister(rm, R31IsZR));
2878 const uint64_t ra_val = get_register(ra, R31IsZR);
2879 const uint64_t alu_out = ra_val + (rn_val * rm_val);
2880 set_register(instr, rd, alu_out, R31IsZR);
2881 } else {
2882 // Format(instr, "smaddl 'rd, 'rn, 'rm, 'ra");
2883 const int64_t rn_val = static_cast<int32_t>(get_wregister(rn, R31IsZR));
2884 const int64_t rm_val = static_cast<int32_t>(get_wregister(rm, R31IsZR));
2885 const int64_t ra_val = get_register(ra, R31IsZR);
2886 const int64_t alu_out = ra_val + (rn_val * rm_val);
2887 set_register(instr, rd, alu_out, R31IsZR);
2888 }
2889 } else {
2890 UnimplementedInstruction(instr);
2891 }
2892}
2893
2894void Simulator::DecodeConditionalSelect(Instr* instr) {
2895 const Register rd = instr->RdField();
2896 const Register rn = instr->RnField();
2897 const Register rm = instr->RmField();
2898 const int64_t rm_val64 = get_register(rm, R31IsZR);
2899 const int32_t rm_val32 = get_wregister(rm, R31IsZR);
2900 const int64_t rn_val64 = get_register(rn, instr->RnMode());
2901 const int32_t rn_val32 = get_wregister(rn, instr->RnMode());
2902 int64_t result64 = 0;
2903 int32_t result32 = 0;
2904
2905 if ((instr->Bits(29, 2) == 0) && (instr->Bits(10, 2) == 0)) {
2906 // Format(instr, "mov'sf'cond 'rd, 'rn, 'rm");
2907 result64 = rm_val64;
2908 result32 = rm_val32;
2909 if (ConditionallyExecute(instr)) {
2910 result64 = rn_val64;
2911 result32 = rn_val32;
2912 }
2913 } else if ((instr->Bits(29, 2) == 0) && (instr->Bits(10, 2) == 1)) {
2914 // Format(instr, "csinc'sf'cond 'rd, 'rn, 'rm");
2915 result64 = rm_val64 + 1;
2916 result32 = rm_val32 + 1;
2917 if (ConditionallyExecute(instr)) {
2918 result64 = rn_val64;
2919 result32 = rn_val32;
2920 }
2921 } else if ((instr->Bits(29, 2) == 2) && (instr->Bits(10, 2) == 0)) {
2922 // Format(instr, "csinv'sf'cond 'rd, 'rn, 'rm");
2923 result64 = ~rm_val64;
2924 result32 = ~rm_val32;
2925 if (ConditionallyExecute(instr)) {
2926 result64 = rn_val64;
2927 result32 = rn_val32;
2928 }
2929 } else if ((instr->Bits(29, 2) == 2) && (instr->Bits(10, 2) == 1)) {
2930 // Format(instr, "csneg'sf'cond 'rd, 'rn, 'rm");
2931 result64 = -rm_val64;
2932 result32 = -rm_val32;
2933 if (ConditionallyExecute(instr)) {
2934 result64 = rn_val64;
2935 result32 = rn_val32;
2936 }
2937 } else {
2938 UnimplementedInstruction(instr);
2939 return;
2940 }
2941
2942 if (instr->SFField() == 1) {
2943 set_register(instr, rd, result64, instr->RdMode());
2944 } else {
2945 set_wregister(rd, result32, instr->RdMode());
2946 }
2947}
2948
2949void Simulator::DecodeDPRegister(Instr* instr) {
2950 if (instr->IsAddSubShiftExtOp()) {
2951 DecodeAddSubShiftExt(instr);
2952 } else if (instr->IsAddSubWithCarryOp()) {
2953 DecodeAddSubWithCarry(instr);
2954 } else if (instr->IsLogicalShiftOp()) {
2955 DecodeLogicalShift(instr);
2956 } else if (instr->IsMiscDP1SourceOp()) {
2957 DecodeMiscDP1Source(instr);
2958 } else if (instr->IsMiscDP2SourceOp()) {
2959 DecodeMiscDP2Source(instr);
2960 } else if (instr->IsMiscDP3SourceOp()) {
2961 DecodeMiscDP3Source(instr);
2962 } else if (instr->IsConditionalSelectOp()) {
2963 DecodeConditionalSelect(instr);
2964 } else {
2965 UnimplementedInstruction(instr);
2966 }
2967}
2968
2969void Simulator::DecodeSIMDCopy(Instr* instr) {
2970 const int32_t Q = instr->Bit(30);
2971 const int32_t op = instr->Bit(29);
2972 const int32_t imm4 = instr->Bits(11, 4);
2973 const int32_t imm5 = instr->Bits(16, 5);
2974
2975 int32_t idx4 = -1;
2976 int32_t idx5 = -1;
2977 int32_t element_bytes;
2978 if ((imm5 & 0x1) != 0) {
2979 idx4 = imm4;
2980 idx5 = imm5 >> 1;
2981 element_bytes = 1;
2982 } else if ((imm5 & 0x2) != 0) {
2983 idx4 = imm4 >> 1;
2984 idx5 = imm5 >> 2;
2985 element_bytes = 2;
2986 } else if ((imm5 & 0x4) != 0) {
2987 idx4 = imm4 >> 2;
2988 idx5 = imm5 >> 3;
2989 element_bytes = 4;
2990 } else if ((imm5 & 0x8) != 0) {
2991 idx4 = imm4 >> 3;
2992 idx5 = imm5 >> 4;
2993 element_bytes = 8;
2994 } else {
2995 UnimplementedInstruction(instr);
2996 return;
2997 }
2998 ASSERT((idx4 != -1) && (idx5 != -1));
2999
3000 const VRegister vd = instr->VdField();
3001 const VRegister vn = instr->VnField();
3002 const Register rn = instr->RnField();
3003 const Register rd = instr->RdField();
3004 if ((op == 0) && (imm4 == 7)) {
3005 if (Q == 0) {
3006 // Format(instr, "vmovrs 'rd, 'vn'idx5");
3007 set_wregister(rd, get_vregisters(vn, idx5), R31IsZR);
3008 } else {
3009 // Format(instr, "vmovrd 'rd, 'vn'idx5");
3010 set_register(instr, rd, get_vregisterd(vn, idx5), R31IsZR);
3011 }
3012 } else if ((Q == 1) && (op == 0) && (imm4 == 0)) {
3013 // Format(instr, "vdup'csz 'vd, 'vn'idx5");
3014 if (element_bytes == 4) {
3015 for (int i = 0; i < 4; i++) {
3016 set_vregisters(vd, i, get_vregisters(vn, idx5));
3017 }
3018 } else if (element_bytes == 8) {
3019 for (int i = 0; i < 2; i++) {
3020 set_vregisterd(vd, i, get_vregisterd(vn, idx5));
3021 }
3022 } else {
3023 UnimplementedInstruction(instr);
3024 return;
3025 }
3026 } else if ((Q == 1) && (op == 0) && (imm4 == 3)) {
3027 // Format(instr, "vins'csz 'vd'idx5, 'rn");
3028 if (element_bytes == 4) {
3029 set_vregisters(vd, idx5, get_wregister(rn, R31IsZR));
3030 } else if (element_bytes == 8) {
3031 set_vregisterd(vd, idx5, get_register(rn, R31IsZR));
3032 } else {
3033 UnimplementedInstruction(instr);
3034 }
3035 } else if ((Q == 1) && (op == 0) && (imm4 == 1)) {
3036 // Format(instr, "vdup'csz 'vd, 'rn");
3037 if (element_bytes == 4) {
3038 for (int i = 0; i < 4; i++) {
3039 set_vregisters(vd, i, get_wregister(rn, R31IsZR));
3040 }
3041 } else if (element_bytes == 8) {
3042 for (int i = 0; i < 2; i++) {
3043 set_vregisterd(vd, i, get_register(rn, R31IsZR));
3044 }
3045 } else {
3046 UnimplementedInstruction(instr);
3047 return;
3048 }
3049 } else if ((Q == 1) && (op == 1)) {
3050 // Format(instr, "vins'csz 'vd'idx5, 'vn'idx4");
3051 if (element_bytes == 4) {
3052 set_vregisters(vd, idx5, get_vregisters(vn, idx4));
3053 } else if (element_bytes == 8) {
3054 set_vregisterd(vd, idx5, get_vregisterd(vn, idx4));
3055 } else {
3056 UnimplementedInstruction(instr);
3057 }
3058 } else {
3059 UnimplementedInstruction(instr);
3060 }
3061}
3062
3063static float vminf(float f1, float f2) {
3064 if (f1 == f2) {
3065 // take care of (-0.0) < 0.0, (they are equal according to minss)
3066 return signbit(f1) ? f1 : f2;
3067 }
3068 return f1 > f2 ? f2 : f1;
3069}
3070
3071static float vmaxf(float f1, float f2) {
3072 if (f1 == f2) {
3073 // take care of (-0.0) < 0.0, (they are equal according to minss)
3074 return signbit(f1) ? f2 : f1;
3075 }
3076 return f1 < f2 ? f2 : f1;
3077}
3078
3079static double vmind(double f1, double f2) {
3080 if (f1 == f2) {
3081 // take care of (-0.0) < 0.0, (they are equal according to minss)
3082 return signbit(f1) ? f1 : f2;
3083 }
3084 return f1 > f2 ? f2 : f1;
3085}
3086
3087static double vmaxd(double f1, double f2) {
3088 if (f1 == f2) {
3089 // take care of (-0.0) < 0.0, (they are equal according to minss)
3090 return signbit(f1) ? f2 : f1;
3091 }
3092 return f1 < f2 ? f2 : f1;
3093}
3094
3095void Simulator::DecodeSIMDThreeSame(Instr* instr) {
3096 const int Q = instr->Bit(30);
3097 const int U = instr->Bit(29);
3098 const int opcode = instr->Bits(11, 5);
3099
3100 if (Q == 0) {
3101 UnimplementedInstruction(instr);
3102 return;
3103 }
3104
3105 const VRegister vd = instr->VdField();
3106 const VRegister vn = instr->VnField();
3107 const VRegister vm = instr->VmField();
3108 if (instr->Bit(22) == 0) {
3109 // f32 case.
3110 for (int idx = 0; idx < 4; idx++) {
3111 const int32_t vn_val = get_vregisters(vn, idx);
3112 const int32_t vm_val = get_vregisters(vm, idx);
3113 const float vn_flt = bit_cast<float, int32_t>(vn_val);
3114 const float vm_flt = bit_cast<float, int32_t>(vm_val);
3115 int32_t res = 0.0;
3116 if ((U == 0) && (opcode == 0x3)) {
3117 if (instr->Bit(23) == 0) {
3118 // Format(instr, "vand 'vd, 'vn, 'vm");
3119 res = vn_val & vm_val;
3120 } else {
3121 // Format(instr, "vorr 'vd, 'vn, 'vm");
3122 res = vn_val | vm_val;
3123 }
3124 } else if ((U == 1) && (opcode == 0x3)) {
3125 // Format(instr, "veor 'vd, 'vn, 'vm");
3126 res = vn_val ^ vm_val;
3127 } else if ((U == 0) && (opcode == 0x10)) {
3128 // Format(instr, "vadd'vsz 'vd, 'vn, 'vm");
3129 res = vn_val + vm_val;
3130 } else if ((U == 1) && (opcode == 0x10)) {
3131 // Format(instr, "vsub'vsz 'vd, 'vn, 'vm");
3132 res = vn_val - vm_val;
3133 } else if ((U == 0) && (opcode == 0x1a)) {
3134 if (instr->Bit(23) == 0) {
3135 // Format(instr, "vadd'vsz 'vd, 'vn, 'vm");
3136 res = bit_cast<int32_t, float>(vn_flt + vm_flt);
3137 } else {
3138 // Format(instr, "vsub'vsz 'vd, 'vn, 'vm");
3139 res = bit_cast<int32_t, float>(vn_flt - vm_flt);
3140 }
3141 } else if ((U == 1) && (opcode == 0x1b)) {
3142 // Format(instr, "vmul'vsz 'vd, 'vn, 'vm");
3143 res = bit_cast<int32_t, float>(vn_flt * vm_flt);
3144 } else if ((U == 1) && (opcode == 0x1f)) {
3145 // Format(instr, "vdiv'vsz 'vd, 'vn, 'vm");
3146 res = bit_cast<int32_t, float>(vn_flt / vm_flt);
3147 } else if ((U == 0) && (opcode == 0x1c)) {
3148 // Format(instr, "vceq'vsz 'vd, 'vn, 'vm");
3149 res = (vn_flt == vm_flt) ? 0xffffffff : 0;
3150 } else if ((U == 1) && (opcode == 0x1c)) {
3151 if (instr->Bit(23) == 1) {
3152 // Format(instr, "vcgt'vsz 'vd, 'vn, 'vm");
3153 res = (vn_flt > vm_flt) ? 0xffffffff : 0;
3154 } else {
3155 // Format(instr, "vcge'vsz 'vd, 'vn, 'vm");
3156 res = (vn_flt >= vm_flt) ? 0xffffffff : 0;
3157 }
3158 } else if ((U == 0) && (opcode == 0x1e)) {
3159 if (instr->Bit(23) == 1) {
3160 // Format(instr, "vmin'vsz 'vd, 'vn, 'vm");
3161 const float m = vminf(vn_flt, vm_flt);
3162 res = bit_cast<int32_t, float>(m);
3163 } else {
3164 // Format(instr, "vmax'vsz 'vd, 'vn, 'vm");
3165 const float m = vmaxf(vn_flt, vm_flt);
3166 res = bit_cast<int32_t, float>(m);
3167 }
3168 } else if ((U == 0) && (opcode == 0x1f)) {
3169 if (instr->Bit(23) == 0) {
3170 // Format(instr, "vrecps'vsz 'vd, 'vn, 'vm");
3171 res = bit_cast<int32_t, float>(2.0 - (vn_flt * vm_flt));
3172 } else {
3173 // Format(instr, "vrsqrt'vsz 'vd, 'vn, 'vm");
3174 res = bit_cast<int32_t, float>((3.0 - vn_flt * vm_flt) / 2.0);
3175 }
3176 } else {
3177 UnimplementedInstruction(instr);
3178 return;
3179 }
3180 set_vregisters(vd, idx, res);
3181 }
3182 } else {
3183 // f64 case.
3184 for (int idx = 0; idx < 2; idx++) {
3185 const int64_t vn_val = get_vregisterd(vn, idx);
3186 const int64_t vm_val = get_vregisterd(vm, idx);
3187 const double vn_dbl = bit_cast<double, int64_t>(vn_val);
3188 const double vm_dbl = bit_cast<double, int64_t>(vm_val);
3189 int64_t res = 0.0;
3190 if ((U == 0) && (opcode == 0x3)) {
3191 if (instr->Bit(23) == 0) {
3192 // Format(instr, "vand 'vd, 'vn, 'vm");
3193 res = vn_val & vm_val;
3194 } else {
3195 // Format(instr, "vorr 'vd, 'vn, 'vm");
3196 res = vn_val | vm_val;
3197 }
3198 } else if ((U == 1) && (opcode == 0x3)) {
3199 // Format(instr, "veor 'vd, 'vn, 'vm");
3200 res = vn_val ^ vm_val;
3201 } else if ((U == 0) && (opcode == 0x10)) {
3202 // Format(instr, "vadd'vsz 'vd, 'vn, 'vm");
3203 res = vn_val + vm_val;
3204 } else if ((U == 1) && (opcode == 0x10)) {
3205 // Format(instr, "vsub'vsz 'vd, 'vn, 'vm");
3206 res = vn_val - vm_val;
3207 } else if ((U == 0) && (opcode == 0x1a)) {
3208 if (instr->Bit(23) == 0) {
3209 // Format(instr, "vadd'vsz 'vd, 'vn, 'vm");
3210 res = bit_cast<int64_t, double>(vn_dbl + vm_dbl);
3211 } else {
3212 // Format(instr, "vsub'vsz 'vd, 'vn, 'vm");
3213 res = bit_cast<int64_t, double>(vn_dbl - vm_dbl);
3214 }
3215 } else if ((U == 1) && (opcode == 0x1b)) {
3216 // Format(instr, "vmul'vsz 'vd, 'vn, 'vm");
3217 res = bit_cast<int64_t, double>(vn_dbl * vm_dbl);
3218 } else if ((U == 1) && (opcode == 0x1f)) {
3219 // Format(instr, "vdiv'vsz 'vd, 'vn, 'vm");
3220 res = bit_cast<int64_t, double>(vn_dbl / vm_dbl);
3221 } else if ((U == 0) && (opcode == 0x1c)) {
3222 // Format(instr, "vceq'vsz 'vd, 'vn, 'vm");
3223 res = (vn_dbl == vm_dbl) ? 0xffffffffffffffffLL : 0;
3224 } else if ((U == 1) && (opcode == 0x1c)) {
3225 if (instr->Bit(23) == 1) {
3226 // Format(instr, "vcgt'vsz 'vd, 'vn, 'vm");
3227 res = (vn_dbl > vm_dbl) ? 0xffffffffffffffffLL : 0;
3228 } else {
3229 // Format(instr, "vcge'vsz 'vd, 'vn, 'vm");
3230 res = (vn_dbl >= vm_dbl) ? 0xffffffffffffffffLL : 0;
3231 }
3232 } else if ((U == 0) && (opcode == 0x1e)) {
3233 if (instr->Bit(23) == 1) {
3234 // Format(instr, "vmin'vsz 'vd, 'vn, 'vm");
3235 const double m = vmind(vn_dbl, vm_dbl);
3236 res = bit_cast<int64_t, double>(m);
3237 } else {
3238 // Format(instr, "vmax'vsz 'vd, 'vn, 'vm");
3239 const double m = vmaxd(vn_dbl, vm_dbl);
3240 res = bit_cast<int64_t, double>(m);
3241 }
3242 } else {
3243 UnimplementedInstruction(instr);
3244 return;
3245 }
3246 set_vregisterd(vd, idx, res);
3247 }
3248 }
3249}
3250
3251static float arm_reciprocal_sqrt_estimate(float a) {
3252 // From the ARM Architecture Reference Manual A2-87.
3253 if (isinf(a) || (fabs(a) >= exp2f(126)))
3254 return 0.0;
3255 else if (a == 0.0)
3256 return kPosInfinity;
3257 else if (isnan(a))
3258 return a;
3259
3260 uint32_t a_bits = bit_cast<uint32_t, float>(a);
3261 uint64_t scaled;
3262 if (((a_bits >> 23) & 1) != 0) {
3263 // scaled = '0 01111111101' : operand<22:0> : Zeros(29)
3264 scaled = (static_cast<uint64_t>(0x3fd) << 52) |
3265 ((static_cast<uint64_t>(a_bits) & 0x7fffff) << 29);
3266 } else {
3267 // scaled = '0 01111111110' : operand<22:0> : Zeros(29)
3268 scaled = (static_cast<uint64_t>(0x3fe) << 52) |
3269 ((static_cast<uint64_t>(a_bits) & 0x7fffff) << 29);
3270 }
3271 // result_exp = (380 - UInt(operand<30:23>) DIV 2;
3272 int32_t result_exp = (380 - ((a_bits >> 23) & 0xff)) / 2;
3273
3274 double scaled_d = bit_cast<double, uint64_t>(scaled);
3275 ASSERT((scaled_d >= 0.25) && (scaled_d < 1.0));
3276
3277 double r;
3278 if (scaled_d < 0.5) {
3279 // range 0.25 <= a < 0.5
3280
3281 // a in units of 1/512 rounded down.
3282 int32_t q0 = static_cast<int32_t>(scaled_d * 512.0);
3283 // reciprocal root r.
3284 r = 1.0 / sqrt((static_cast<double>(q0) + 0.5) / 512.0);
3285 } else {
3286 // range 0.5 <= a < 1.0
3287
3288 // a in units of 1/256 rounded down.
3289 int32_t q1 = static_cast<int32_t>(scaled_d * 256.0);
3290 // reciprocal root r.
3291 r = 1.0 / sqrt((static_cast<double>(q1) + 0.5) / 256.0);
3292 }
3293 // r in units of 1/256 rounded to nearest.
3294 int32_t s = static_cast<int>(256.0 * r + 0.5);
3295 double estimate = static_cast<double>(s) / 256.0;
3296 ASSERT((estimate >= 1.0) && (estimate <= (511.0 / 256.0)));
3297
3298 // result = 0 : result_exp<7:0> : estimate<51:29>
3299 int32_t result_bits =
3300 ((result_exp & 0xff) << 23) |
3301 ((bit_cast<uint64_t, double>(estimate) >> 29) & 0x7fffff);
3302 return bit_cast<float, int32_t>(result_bits);
3303}
3304
3305static float arm_recip_estimate(float a) {
3306 // From the ARM Architecture Reference Manual A2-85.
3307 if (isinf(a) || (fabs(a) >= exp2f(126)))
3308 return 0.0;
3309 else if (a == 0.0)
3310 return kPosInfinity;
3311 else if (isnan(a))
3312 return a;
3313
3314 uint32_t a_bits = bit_cast<uint32_t, float>(a);
3315 // scaled = '0011 1111 1110' : a<22:0> : Zeros(29)
3316 uint64_t scaled = (static_cast<uint64_t>(0x3fe) << 52) |
3317 ((static_cast<uint64_t>(a_bits) & 0x7fffff) << 29);
3318 // result_exp = 253 - UInt(a<30:23>)
3319 int32_t result_exp = 253 - ((a_bits >> 23) & 0xff);
3320 ASSERT((result_exp >= 1) && (result_exp <= 252));
3321
3322 double scaled_d = bit_cast<double, uint64_t>(scaled);
3323 ASSERT((scaled_d >= 0.5) && (scaled_d < 1.0));
3324
3325 // a in units of 1/512 rounded down.
3326 int32_t q = static_cast<int32_t>(scaled_d * 512.0);
3327 // reciprocal r.
3328 double r = 1.0 / ((static_cast<double>(q) + 0.5) / 512.0);
3329 // r in units of 1/256 rounded to nearest.
3330 int32_t s = static_cast<int32_t>(256.0 * r + 0.5);
3331 double estimate = static_cast<double>(s) / 256.0;
3332 ASSERT((estimate >= 1.0) && (estimate <= (511.0 / 256.0)));
3333
3334 // result = sign : result_exp<7:0> : estimate<51:29>
3335 int32_t result_bits =
3336 (a_bits & 0x80000000) | ((result_exp & 0xff) << 23) |
3337 ((bit_cast<uint64_t, double>(estimate) >> 29) & 0x7fffff);
3338 return bit_cast<float, int32_t>(result_bits);
3339}
3340
3341void Simulator::DecodeSIMDTwoReg(Instr* instr) {
3342 const int32_t Q = instr->Bit(30);
3343 const int32_t U = instr->Bit(29);
3344 const int32_t op = instr->Bits(12, 5);
3345 const int32_t sz = instr->Bits(22, 2);
3346 const VRegister vd = instr->VdField();
3347 const VRegister vn = instr->VnField();
3348
3349 if (Q != 1) {
3350 UnimplementedInstruction(instr);
3351 return;
3352 }
3353
3354 if ((U == 1) && (op == 5)) {
3355 // Format(instr, "vnot 'vd, 'vn");
3356 for (int i = 0; i < 2; i++) {
3357 set_vregisterd(vd, i, ~get_vregisterd(vn, i));
3358 }
3359 } else if ((U == 0) && (op == 0xf)) {
3360 if (sz == 2) {
3361 // Format(instr, "vabss 'vd, 'vn");
3362 for (int i = 0; i < 4; i++) {
3363 const int32_t vn_val = get_vregisters(vn, i);
3364 const float vn_flt = bit_cast<float, int32_t>(vn_val);
3365 set_vregisters(vd, i, bit_cast<int32_t, float>(fabsf(vn_flt)));
3366 }
3367 } else if (sz == 3) {
3368 // Format(instr, "vabsd 'vd, 'vn");
3369 for (int i = 0; i < 2; i++) {
3370 const int64_t vn_val = get_vregisterd(vn, i);
3371 const double vn_dbl = bit_cast<double, int64_t>(vn_val);
3372 set_vregisterd(vd, i, bit_cast<int64_t, double>(fabs(vn_dbl)));
3373 }
3374 } else {
3375 UnimplementedInstruction(instr);
3376 }
3377 } else if ((U == 1) && (op == 0xf)) {
3378 if (sz == 2) {
3379 // Format(instr, "vnegs 'vd, 'vn");
3380 for (int i = 0; i < 4; i++) {
3381 const int32_t vn_val = get_vregisters(vn, i);
3382 const float vn_flt = bit_cast<float, int32_t>(vn_val);
3383 set_vregisters(vd, i, bit_cast<int32_t, float>(-vn_flt));
3384 }
3385 } else if (sz == 3) {
3386 // Format(instr, "vnegd 'vd, 'vn");
3387 for (int i = 0; i < 2; i++) {
3388 const int64_t vn_val = get_vregisterd(vn, i);
3389 const double vn_dbl = bit_cast<double, int64_t>(vn_val);
3390 set_vregisterd(vd, i, bit_cast<int64_t, double>(-vn_dbl));
3391 }
3392 } else {
3393 UnimplementedInstruction(instr);
3394 }
3395 } else if ((U == 1) && (op == 0x1f)) {
3396 if (sz == 2) {
3397 // Format(instr, "vsqrts 'vd, 'vn");
3398 for (int i = 0; i < 4; i++) {
3399 const int32_t vn_val = get_vregisters(vn, i);
3400 const float vn_flt = bit_cast<float, int32_t>(vn_val);
3401 set_vregisters(vd, i, bit_cast<int32_t, float>(sqrtf(vn_flt)));
3402 }
3403 } else if (sz == 3) {
3404 // Format(instr, "vsqrtd 'vd, 'vn");
3405 for (int i = 0; i < 2; i++) {
3406 const int64_t vn_val = get_vregisterd(vn, i);
3407 const double vn_dbl = bit_cast<double, int64_t>(vn_val);
3408 set_vregisterd(vd, i, bit_cast<int64_t, double>(sqrt(vn_dbl)));
3409 }
3410 } else {
3411 UnimplementedInstruction(instr);
3412 }
3413 } else if ((U == 0) && (op == 0x1d)) {
3414 if (sz != 2) {
3415 UnimplementedInstruction(instr);
3416 return;
3417 }
3418 // Format(instr, "vrecpes 'vd, 'vn");
3419 for (int i = 0; i < 4; i++) {
3420 const int32_t vn_val = get_vregisters(vn, i);
3421 const float vn_flt = bit_cast<float, int32_t>(vn_val);
3422 const float re = arm_recip_estimate(vn_flt);
3423 set_vregisters(vd, i, bit_cast<int32_t, float>(re));
3424 }
3425 } else if ((U == 1) && (op == 0x1d)) {
3426 if (sz != 2) {
3427 UnimplementedInstruction(instr);
3428 return;
3429 }
3430 // Format(instr, "vrsqrtes 'vd, 'vn");
3431 for (int i = 0; i < 4; i++) {
3432 const int32_t vn_val = get_vregisters(vn, i);
3433 const float vn_flt = bit_cast<float, int32_t>(vn_val);
3434 const float re = arm_reciprocal_sqrt_estimate(vn_flt);
3435 set_vregisters(vd, i, bit_cast<int32_t, float>(re));
3436 }
3437 } else {
3438 UnimplementedInstruction(instr);
3439 }
3440}
3441
3442void Simulator::DecodeDPSimd1(Instr* instr) {
3443 if (instr->IsSIMDCopyOp()) {
3444 DecodeSIMDCopy(instr);
3445 } else if (instr->IsSIMDThreeSameOp()) {
3446 DecodeSIMDThreeSame(instr);
3447 } else if (instr->IsSIMDTwoRegOp()) {
3448 DecodeSIMDTwoReg(instr);
3449 } else {
3450 UnimplementedInstruction(instr);
3451 }
3452}
3453
3454void Simulator::DecodeFPImm(Instr* instr) {
3455 if ((instr->Bit(31) != 0) || (instr->Bit(29) != 0) || (instr->Bit(23) != 0) ||
3456 (instr->Bits(5, 5) != 0)) {
3457 UnimplementedInstruction(instr);
3458 return;
3459 }
3460 if (instr->Bit(22) == 1) {
3461 // Double.
3462 // Format(instr, "fmovd 'vd, #'immd");
3463 const VRegister vd = instr->VdField();
3464 const int64_t immd = Instr::VFPExpandImm(instr->Imm8Field());
3465 set_vregisterd(vd, 0, immd);
3466 set_vregisterd(vd, 1, 0);
3467 } else {
3468 // Single.
3469 UnimplementedInstruction(instr);
3470 }
3471}
3472
3473void Simulator::DecodeFPIntCvt(Instr* instr) {
3474 const VRegister vd = instr->VdField();
3475 const VRegister vn = instr->VnField();
3476 const Register rd = instr->RdField();
3477 const Register rn = instr->RnField();
3478
3479 if (instr->Bit(29) != 0) {
3480 UnimplementedInstruction(instr);
3481 return;
3482 }
3483
3484 if ((instr->SFField() == 0) && (instr->Bits(22, 2) == 0)) {
3485 if (instr->Bits(16, 5) == 6) {
3486 // Format(instr, "fmovrs'sf 'rd, 'vn");
3487 const int32_t vn_val = get_vregisters(vn, 0);
3488 set_wregister(rd, vn_val, R31IsZR);
3489 } else if (instr->Bits(16, 5) == 7) {
3490 // Format(instr, "fmovsr'sf 'vd, 'rn");
3491 const int32_t rn_val = get_wregister(rn, R31IsZR);
3492 set_vregisters(vd, 0, rn_val);
3493 set_vregisters(vd, 1, 0);
3494 set_vregisters(vd, 2, 0);
3495 set_vregisters(vd, 3, 0);
3496 } else {
3497 UnimplementedInstruction(instr);
3498 }
3499 } else if (instr->Bits(22, 2) == 1) {
3500 if (instr->Bits(16, 5) == 2) {
3501 // Format(instr, "scvtfd'sf 'vd, 'rn");
3502 const int64_t rn_val64 = get_register(rn, instr->RnMode());
3503 const int32_t rn_val32 = get_wregister(rn, instr->RnMode());
3504 const double vn_dbl = (instr->SFField() == 1)
3505 ? static_cast<double>(rn_val64)
3506 : static_cast<double>(rn_val32);
3507 set_vregisterd(vd, 0, bit_cast<int64_t, double>(vn_dbl));
3508 set_vregisterd(vd, 1, 0);
3509 } else if (instr->Bits(16, 5) == 6) {
3510 // Format(instr, "fmovrd'sf 'rd, 'vn");
3511 const int64_t vn_val = get_vregisterd(vn, 0);
3512 set_register(instr, rd, vn_val, R31IsZR);
3513 } else if (instr->Bits(16, 5) == 7) {
3514 // Format(instr, "fmovdr'sf 'vd, 'rn");
3515 const int64_t rn_val = get_register(rn, R31IsZR);
3516 set_vregisterd(vd, 0, rn_val);
3517 set_vregisterd(vd, 1, 0);
3518 } else if ((instr->Bits(16, 5) == 8) || (instr->Bits(16, 5) == 16) ||
3519 (instr->Bits(16, 5) == 24)) {
3520 const intptr_t max = instr->Bit(31) == 1 ? INT64_MAX : INT32_MAX;
3521 const intptr_t min = instr->Bit(31) == 1 ? INT64_MIN : INT32_MIN;
3522 double vn_val = bit_cast<double, int64_t>(get_vregisterd(vn, 0));
3523 switch (instr->Bits(16, 5)) {
3524 case 8:
3525 // Format(instr, "fcvtps'sf 'rd, 'vn");
3526 vn_val = ceil(vn_val);
3527 break;
3528 case 16:
3529 // Format(instr, "fcvtms'sf 'rd, 'vn");
3530 vn_val = floor(vn_val);
3531 break;
3532 case 24:
3533 // Format(instr, "fcvtzs'sf 'rd, 'vn");
3534 break;
3535 }
3536 int64_t result;
3537 if (vn_val >= static_cast<double>(max)) {
3538 result = max;
3539 } else if (vn_val <= static_cast<double>(min)) {
3540 result = min;
3541 } else {
3542 result = static_cast<int64_t>(vn_val);
3543 }
3544 if (instr->Bit(31) == 1) {
3545 set_register(instr, rd, result, instr->RdMode());
3546 } else {
3547 set_register(instr, rd, result & 0xffffffffll, instr->RdMode());
3548 }
3549 } else {
3550 UnimplementedInstruction(instr);
3551 }
3552 } else {
3553 UnimplementedInstruction(instr);
3554 }
3555}
3556
3557void Simulator::DecodeFPOneSource(Instr* instr) {
3558 const int opc = instr->Bits(15, 6);
3559 const VRegister vd = instr->VdField();
3560 const VRegister vn = instr->VnField();
3561 const int64_t vn_val = get_vregisterd(vn, 0);
3562 const int32_t vn_val32 = vn_val & kWRegMask;
3563 const double vn_dbl = bit_cast<double, int64_t>(vn_val);
3564 const float vn_flt = bit_cast<float, int32_t>(vn_val32);
3565
3566 if ((opc != 5) && (instr->Bit(22) != 1)) {
3567 // Source is interpreted as single-precision only if we're doing a
3568 // conversion from single -> double.
3569 UnimplementedInstruction(instr);
3570 return;
3571 }
3572
3573 int64_t res_val = 0;
3574 switch (opc) {
3575 case 0:
3576 // Format("fmovdd 'vd, 'vn");
3577 res_val = get_vregisterd(vn, 0);
3578 break;
3579 case 1:
3580 // Format("fabsd 'vd, 'vn");
3581 res_val = bit_cast<int64_t, double>(fabs(vn_dbl));
3582 break;
3583 case 2:
3584 // Format("fnegd 'vd, 'vn");
3585 res_val = bit_cast<int64_t, double>(-vn_dbl);
3586 break;
3587 case 3:
3588 // Format("fsqrtd 'vd, 'vn");
3589 res_val = bit_cast<int64_t, double>(sqrt(vn_dbl));
3590 break;
3591 case 4: {
3592 // Format(instr, "fcvtsd 'vd, 'vn");
3593 const uint32_t val =
3594 bit_cast<uint32_t, float>(static_cast<float>(vn_dbl));
3595 res_val = static_cast<int64_t>(val);
3596 break;
3597 }
3598 case 5:
3599 // Format(instr, "fcvtds 'vd, 'vn");
3600 res_val = bit_cast<int64_t, double>(static_cast<double>(vn_flt));
3601 break;
3602 default:
3603 UnimplementedInstruction(instr);
3604 break;
3605 }
3606
3607 set_vregisterd(vd, 0, res_val);
3608 set_vregisterd(vd, 1, 0);
3609}
3610
3611void Simulator::DecodeFPTwoSource(Instr* instr) {
3612 if (instr->Bits(22, 2) != 1) {
3613 UnimplementedInstruction(instr);
3614 return;
3615 }
3616 const VRegister vd = instr->VdField();
3617 const VRegister vn = instr->VnField();
3618 const VRegister vm = instr->VmField();
3619 const double vn_val = bit_cast<double, int64_t>(get_vregisterd(vn, 0));
3620 const double vm_val = bit_cast<double, int64_t>(get_vregisterd(vm, 0));
3621 const int opc = instr->Bits(12, 4);
3622 double result;
3623
3624 switch (opc) {
3625 case 0:
3626 // Format(instr, "fmuld 'vd, 'vn, 'vm");
3627 result = vn_val * vm_val;
3628 break;
3629 case 1:
3630 // Format(instr, "fdivd 'vd, 'vn, 'vm");
3631 result = vn_val / vm_val;
3632 break;
3633 case 2:
3634 // Format(instr, "faddd 'vd, 'vn, 'vm");
3635 result = vn_val + vm_val;
3636 break;
3637 case 3:
3638 // Format(instr, "fsubd 'vd, 'vn, 'vm");
3639 result = vn_val - vm_val;
3640 break;
3641 default:
3642 UnimplementedInstruction(instr);
3643 return;
3644 }
3645
3646 set_vregisterd(vd, 0, bit_cast<int64_t, double>(result));
3647 set_vregisterd(vd, 1, 0);
3648}
3649
3650void Simulator::DecodeFPCompare(Instr* instr) {
3651 const VRegister vn = instr->VnField();
3652 const VRegister vm = instr->VmField();
3653 const double vn_val = bit_cast<double, int64_t>(get_vregisterd(vn, 0));
3654 double vm_val;
3655
3656 if ((instr->Bit(22) == 1) && (instr->Bits(3, 2) == 0)) {
3657 // Format(instr, "fcmpd 'vn, 'vm");
3658 vm_val = bit_cast<double, int64_t>(get_vregisterd(vm, 0));
3659 } else if ((instr->Bit(22) == 1) && (instr->Bits(3, 2) == 1)) {
3660 if (instr->VmField() == V0) {
3661 // Format(instr, "fcmpd 'vn, #0.0");
3662 vm_val = 0.0;
3663 } else {
3664 UnimplementedInstruction(instr);
3665 return;
3666 }
3667 } else {
3668 UnimplementedInstruction(instr);
3669 return;
3670 }
3671
3672 n_flag_ = false;
3673 z_flag_ = false;
3674 c_flag_ = false;
3675 v_flag_ = false;
3676
3677 if (isnan(vn_val) || isnan(vm_val)) {
3678 c_flag_ = true;
3679 v_flag_ = true;
3680 } else if (vn_val == vm_val) {
3681 z_flag_ = true;
3682 c_flag_ = true;
3683 } else if (vn_val < vm_val) {
3684 n_flag_ = true;
3685 } else {
3686 c_flag_ = true;
3687 }
3688}
3689
3690void Simulator::DecodeFP(Instr* instr) {
3691 if (instr->IsFPImmOp()) {
3692 DecodeFPImm(instr);
3693 } else if (instr->IsFPIntCvtOp()) {
3694 DecodeFPIntCvt(instr);
3695 } else if (instr->IsFPOneSourceOp()) {
3696 DecodeFPOneSource(instr);
3697 } else if (instr->IsFPTwoSourceOp()) {
3698 DecodeFPTwoSource(instr);
3699 } else if (instr->IsFPCompareOp()) {
3700 DecodeFPCompare(instr);
3701 } else {
3702 UnimplementedInstruction(instr);
3703 }
3704}
3705
3706void Simulator::DecodeDPSimd2(Instr* instr) {
3707 if (instr->IsFPOp()) {
3708 DecodeFP(instr);
3709 } else {
3710 UnimplementedInstruction(instr);
3711 }
3712}
3713
3714// Executes the current instruction.
3715DART_FORCE_INLINE
3716void Simulator::InstructionDecodeImpl(Instr* instr) {
3717 pc_modified_ = false;
3718
3719 if (instr->IsLoadStoreOp()) {
3720 DecodeLoadStore(instr);
3721 } else if (instr->IsDPImmediateOp()) {
3722 DecodeDPImmediate(instr);
3723 } else if (instr->IsCompareBranchOp()) {
3724 DecodeCompareBranch(instr);
3725 } else if (instr->IsDPRegisterOp()) {
3726 DecodeDPRegister(instr);
3727 } else if (instr->IsDPSimd1Op()) {
3728 DecodeDPSimd1(instr);
3729 } else if (instr->IsDPSimd2Op()) {
3730 DecodeDPSimd2(instr);
3731 } else {
3732 UnimplementedInstruction(instr);
3733 }
3734
3735 if (!pc_modified_) {
3736 set_pc(reinterpret_cast<int64_t>(instr) + Instr::kInstrSize);
3737 }
3738}
3739
3740void Simulator::InstructionDecode(Instr* instr) {
3741 if (IsTracingExecution()) {
3742 THR_Print("%" Pu64 " ", icount_);
3743 const uword start = reinterpret_cast<uword>(instr);
3744 const uword end = start + Instr::kInstrSize;
3745 if (FLAG_support_disassembler) {
3746 Disassembler::Disassemble(start, end);
3747 } else {
3748 THR_Print("Disassembler not supported in this mode.\n");
3749 }
3750 }
3751 InstructionDecodeImpl(instr);
3752}
3753
3754void Simulator::Execute() {
3755 if (LIKELY(FLAG_stop_sim_at == ULLONG_MAX &&
3756 FLAG_trace_sim_after == ULLONG_MAX)) {
3757 ExecuteNoTrace();
3758 } else {
3759 ExecuteTrace();
3760 }
3761}
3762
3763void Simulator::ExecuteNoTrace() {
3764 // Get the PC to simulate. Cannot use the accessor here as we need the
3765 // raw PC value and not the one used as input to arithmetic instructions.
3766 uword program_counter = get_pc();
3767
3768 // Fast version of the dispatch loop without checking whether the simulator
3769 // should be stopping at a particular executed instruction.
3770 while (program_counter != kEndSimulatingPC) {
3771 Instr* instr = reinterpret_cast<Instr*>(program_counter);
3772 icount_++;
3773 InstructionDecodeImpl(instr);
3774 program_counter = get_pc();
3775 }
3776}
3777
3778void Simulator::ExecuteTrace() {
3779 // Get the PC to simulate. Cannot use the accessor here as we need the
3780 // raw PC value and not the one used as input to arithmetic instructions.
3781 uword program_counter = get_pc();
3782
3783 // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
3784 // we reach the particular instruction count or address.
3785 while (program_counter != kEndSimulatingPC) {
3786 Instr* instr = reinterpret_cast<Instr*>(program_counter);
3787 icount_++;
3788 if (icount_ == FLAG_stop_sim_at) {
3789 SimulatorDebugger dbg(this);
3790 dbg.Stop(instr, "Instruction count reached");
3791 } else if (reinterpret_cast<uint64_t>(instr) == FLAG_stop_sim_at) {
3792 SimulatorDebugger dbg(this);
3793 dbg.Stop(instr, "Instruction address reached");
3794 } else if (IsIllegalAddress(program_counter)) {
3795 HandleIllegalAccess(program_counter, instr);
3796 } else {
3797 InstructionDecode(instr);
3798 }
3799 program_counter = get_pc();
3800 }
3801}
3802
3803int64_t Simulator::Call(int64_t entry,
3804 int64_t parameter0,
3805 int64_t parameter1,
3806 int64_t parameter2,
3807 int64_t parameter3,
3808 bool fp_return,
3809 bool fp_args) {
3810 // Save the SP register before the call so we can restore it.
3811 const intptr_t sp_before_call = get_register(R31, R31IsSP);
3812
3813 // Setup parameters.
3814 if (fp_args) {
3815 set_vregisterd(V0, 0, parameter0);
3816 set_vregisterd(V0, 1, 0);
3817 set_vregisterd(V1, 0, parameter1);
3818 set_vregisterd(V1, 1, 0);
3819 set_vregisterd(V2, 0, parameter2);
3820 set_vregisterd(V2, 1, 0);
3821 set_vregisterd(V3, 0, parameter3);
3822 set_vregisterd(V3, 1, 0);
3823 } else {
3824 set_register(nullptr, R0, parameter0);
3825 set_register(nullptr, R1, parameter1);
3826 set_register(nullptr, R2, parameter2);
3827 set_register(nullptr, R3, parameter3);
3828 }
3829
3830 // Make sure the activation frames are properly aligned.
3831 intptr_t stack_pointer = sp_before_call;
3832 if (OS::ActivationFrameAlignment() > 1) {
3833 stack_pointer =
3834 Utils::RoundDown(stack_pointer, OS::ActivationFrameAlignment());
3835 }
3836 set_register(nullptr, R31, stack_pointer, R31IsSP);
3837
3838 // Prepare to execute the code at entry.
3839 set_pc(entry);
3840 // Put down marker for end of simulation. The simulator will stop simulation
3841 // when the PC reaches this value. By saving the "end simulation" value into
3842 // the LR the simulation stops when returning to this call point.
3843 set_register(nullptr, LR, kEndSimulatingPC);
3844
3845 // Remember the values of callee-saved registers, and set them up with a
3846 // known value so that we are able to check that they are preserved
3847 // properly across Dart execution.
3848 int64_t preserved_vals[kAbiPreservedCpuRegCount];
3849 const double dicount = static_cast<double>(icount_);
3850 const int64_t callee_saved_value = bit_cast<int64_t, double>(dicount);
3852 const Register r = static_cast<Register>(i);
3853 preserved_vals[i - kAbiFirstPreservedCpuReg] = get_register(r);
3854 set_register(nullptr, r, callee_saved_value);
3855 }
3856
3857 // Only the bottom half of the V registers must be preserved.
3858 int64_t preserved_dvals[kAbiPreservedFpuRegCount];
3860 const VRegister r = static_cast<VRegister>(i);
3861 preserved_dvals[i - kAbiFirstPreservedFpuReg] = get_vregisterd(r, 0);
3862 set_vregisterd(r, 0, callee_saved_value);
3863 set_vregisterd(r, 1, 0);
3864 }
3865
3866 // Start the simulation.
3867 Execute();
3868
3869 // Check that the callee-saved registers have been preserved,
3870 // and restore them with the original value.
3872 const Register r = static_cast<Register>(i);
3873 ASSERT(callee_saved_value == get_register(r));
3874 set_register(nullptr, r, preserved_vals[i - kAbiFirstPreservedCpuReg]);
3875 }
3876
3878 const VRegister r = static_cast<VRegister>(i);
3879 ASSERT(callee_saved_value == get_vregisterd(r, 0));
3880 set_vregisterd(r, 0, preserved_dvals[i - kAbiFirstPreservedFpuReg]);
3881 set_vregisterd(r, 1, 0);
3882 }
3883
3884 // Restore the SP register and return R0.
3885 set_register(nullptr, R31, sp_before_call, R31IsSP);
3886 int64_t return_value;
3887 if (fp_return) {
3888 return_value = get_vregisterd(V0, 0);
3889 } else {
3890 return_value = get_register(R0);
3891 }
3892 return return_value;
3893}
3894
3895void Simulator::JumpToFrame(uword pc, uword sp, uword fp, Thread* thread) {
3896 // Walk over all setjmp buffers (simulated --> C++ transitions)
3897 // and try to find the setjmp associated with the simulated stack pointer.
3898 SimulatorSetjmpBuffer* buf = last_setjmp_buffer();
3899 while (buf->link() != nullptr && buf->link()->sp() <= sp) {
3900 buf = buf->link();
3901 }
3902 ASSERT(buf != nullptr);
3903
3904 // The C++ caller has not cleaned up the stack memory of C++ frames.
3905 // Prepare for unwinding frames by destroying all the stack resources
3906 // in the previous C++ frames.
3907 StackResource::Unwind(thread);
3908
3909 // Keep the following code in sync with `StubCode::JumpToFrameStub()`.
3910
3911 // Unwind the C++ stack and continue simulation in the target frame.
3912 set_pc(static_cast<int64_t>(pc));
3913 set_register(nullptr, SP, static_cast<int64_t>(sp));
3914 set_register(nullptr, FP, static_cast<int64_t>(fp));
3915 set_register(nullptr, THR, reinterpret_cast<int64_t>(thread));
3916 set_register(nullptr, R31, thread->saved_stack_limit() - 4096);
3917#if defined(DART_TARGET_OS_FUCHSIA)
3918 set_register(nullptr, R18, thread->saved_shadow_call_stack());
3919#endif
3920 // Set the tag.
3921 thread->set_vm_tag(VMTag::kDartTagId);
3922 // Clear top exit frame.
3923 thread->set_top_exit_frame_info(0);
3924 // Restore pool pointer.
3925 int64_t code =
3926 *reinterpret_cast<int64_t*>(fp + kPcMarkerSlotFromFp * kWordSize);
3927 int64_t pp = FLAG_precompiled_mode
3928 ? static_cast<int64_t>(thread->global_object_pool())
3929 : *reinterpret_cast<int64_t*>(
3930 code + Code::object_pool_offset() - kHeapObjectTag);
3931 pp -= kHeapObjectTag; // In the PP register, the pool pointer is untagged.
3932 set_register(nullptr, CODE_REG, code);
3933 set_register(nullptr, PP, pp);
3934 set_register(
3935 nullptr, HEAP_BITS,
3936 (thread->write_barrier_mask() << 32) | (thread->heap_base() >> 32));
3937 set_register(nullptr, NULL_REG, static_cast<int64_t>(Object::null()));
3938 if (FLAG_precompiled_mode) {
3939 set_register(nullptr, DISPATCH_TABLE_REG,
3940 reinterpret_cast<int64_t>(thread->dispatch_table_array()));
3941 }
3942
3943 buf->Longjmp();
3944}
3945
3946} // namespace dart
3947
3948#endif // !defined(USING_SIMULATOR)
3949
3950#endif // defined TARGET_ARCH_ARM64
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
static bool ok(int result)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
SI void store(P *ptr, const T &val)
SI T load(const P *ptr)
Definition: Transform_inl.h:98
#define UNREACHABLE()
Definition: assert.h:248
#define COMPILE_ASSERT(expr)
Definition: assert.h:339
#define Z
static IsolateGroup * vm_isolate_group()
Definition: dart.h:69
static void Disassemble(uword start, uword end, DisassemblyFormatter *formatter, const Code &code, const CodeComments *comments=nullptr)
static constexpr int32_t kSimulatorBreakpointInstruction
static constexpr int32_t kNopInstruction
const uint8_t * snapshot_instructions
Definition: isolate.h:194
static IsolateGroup * Current()
Definition: isolate.h:539
IsolateGroupSource * source() const
Definition: isolate.h:286
static DART_NORETURN void Exit(int code)
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
static void DebugBreak()
static Object & Handle()
Definition: object.h:407
static void Init()
static Thread * Current()
Definition: thread.h:362
@ kThreadInGenerated
Definition: thread.h:1035
#define LR
Definition: constants_arm.h:32
#define THR_Print(format,...)
Definition: log.h:20
#define kIsolateSnapshotInstructionsAsmSymbol
Definition: dart_api.h:3968
#define kVmSnapshotInstructionsAsmSymbol
Definition: dart_api.h:3965
struct _Dart_NativeArguments * Dart_NativeArguments
Definition: dart_api.h:3019
void(* Dart_NativeFunction)(Dart_NativeArguments arguments)
Definition: dart_api.h:3207
#define UNIMPLEMENTED
#define ASSERT(E)
double frame
Definition: examples.cpp:31
struct MyStruct s
struct MyStruct a[10]
#define FATAL(error)
glong glong end
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
uint8_t value
GAsyncResult * result
uint32_t uint32_t * format
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
int argument_count
Definition: fuchsia.cc:52
static float max(float r, float g, float b)
Definition: hsl.cpp:49
static float min(float r, float g, float b)
Definition: hsl.cpp:48
size_t length
Win32Message message
const GrXPFactory * Get(SkBlendMode mode)
static bool is_signed(const Type &type)
bool Contains(const Container &container, const Value &value)
def link(from_root, to_root)
Definition: dart_pkg.py:44
Definition: dart_vm.cc:33
const QRegister kAbiLastPreservedFpuReg
const int kXRegSizeInBits
static int64_t GetValue(Dart_Handle arg)
const int64_t kWRegMask
void SetBreakpoint(Dart_NativeArguments args)
const Register THR
const char *const name
const RegList kAbiVolatileCpuRegs
const Register NULL_REG
uintptr_t uword
Definition: globals.h:501
intptr_t word
Definition: globals.h:500
const Register CODE_REG
const uint32_t fp
const Register DISPATCH_TABLE_REG
@ kNumberOfCpuRegisters
Definition: constants_arm.h:98
@ kNoRegister
Definition: constants_arm.h:99
const int kNumberOfFpuRegisters
intx_t sign_extend(int32_t x)
const Register kAbiLastPreservedCpuReg
@ kNumberOfVRegisters
@ kNoVRegister
const int kAbiPreservedCpuRegCount
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const Register kAbiFirstPreservedCpuReg
static T LoadUnaligned(const T *ptr)
Definition: unaligned.h:14
const Register HEAP_BITS
const int kAbiPreservedFpuRegCount
static void StoreUnaligned(T *ptr, T value)
Definition: unaligned.h:22
static constexpr int kPcMarkerSlotFromFp
const int kWRegSizeInBits
constexpr intptr_t kWordSize
Definition: globals.h:509
const Register PP
@ kHeapObjectTag
const RegList kAbiVolatileFpuRegs
const int64_t kXRegMask
const QRegister kAbiFirstPreservedFpuReg
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
CanvasImage Image
Definition: dart_ui.cc:55
SolidFillVertexShader VS
def Format(template, **parameters)
Definition: emitter.py:13
SIN Vec< N, float > sqrt(const Vec< N, float > &x)
Definition: SkVx.h:706
SIN Vec< N, float > floor(const Vec< N, float > &x)
Definition: SkVx.h:703
SIN Vec< N, float > ceil(const Vec< N, float > &x)
Definition: SkVx.h:702
dest
Definition: zip.py:79
#define LIKELY(cond)
Definition: globals.h:260
#define Px
Definition: globals.h:410
#define Px64
Definition: globals.h:418
#define Pd
Definition: globals.h:408
#define Pu64
Definition: globals.h:417
#define T
Definition: precompiler.cc:65
const Scalar scale
SeparatedVector2 offset
#define NO_SANITIZE_UNDEFINED(check)
#define ARRAY_SIZE(array)
Definition: globals.h:72
#define kPosInfinity
Definition: globals.h:65
#define OFFSET_OF(type, field)
Definition: globals.h:138