Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
assembler_arm.h
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM_H_
6#define RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM_H_
7
8#if defined(DART_PRECOMPILED_RUNTIME)
9#error "AOT runtime should not use compiler sources (including header files)"
10#endif // defined(DART_PRECOMPILED_RUNTIME)
11
12#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_H_
13#error Do not include assembler_arm.h directly; use assembler.h instead.
14#endif
15
16#include <functional>
17
18#include "platform/assert.h"
19#include "platform/utils.h"
20#include "vm/code_entry_kind.h"
24#include "vm/constants.h"
25#include "vm/cpu.h"
26#include "vm/hash_map.h"
27#include "vm/simulator.h"
28
29namespace dart {
30
31// Forward declarations.
32class FlowGraphCompiler;
33class RegisterSet;
34class RuntimeEntry;
35
36// Load/store multiple addressing mode.
38 // bit encoding P U W
39 DA = (0 | 0 | 0) << 21, // decrement after
40 IA = (0 | 4 | 0) << 21, // increment after
41 DB = (8 | 0 | 0) << 21, // decrement before
42 IB = (8 | 4 | 0) << 21, // increment before
43 DA_W = (0 | 0 | 1) << 21, // decrement after with writeback to base
44 IA_W = (0 | 4 | 1) << 21, // increment after with writeback to base
45 DB_W = (8 | 0 | 1) << 21, // decrement before with writeback to base
46 IB_W = (8 | 4 | 1) << 21 // increment before with writeback to base
47};
48
49namespace compiler {
50
51class Immediate : public ValueObject {
52 public:
53 explicit Immediate(int32_t value) : value_(value) {}
54
55 int32_t value() const { return value_; }
56
57 private:
58 const int32_t value_;
59};
60
61// Instruction encoding bits.
62enum {
63 H = 1 << 5, // halfword (or byte)
64 L = 1 << 20, // load (or store)
65 S = 1 << 20, // set condition code (or leave unchanged)
66 W = 1 << 21, // writeback base register (or leave unchanged)
67 A = 1 << 21, // accumulate in multiply instruction (or not)
68 B = 1 << 22, // unsigned byte (or word)
69 D = 1 << 22, // high/lo bit of start of s/d register range
70 N = 1 << 22, // long (or short)
71 U = 1 << 23, // positive (or negative) offset/index
72 P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
73 I = 1 << 25, // immediate shifter operand (or not)
74
75 B0 = 1,
76 B1 = 1 << 1,
77 B2 = 1 << 2,
78 B3 = 1 << 3,
79 B4 = 1 << 4,
80 B5 = 1 << 5,
81 B6 = 1 << 6,
82 B7 = 1 << 7,
83 B8 = 1 << 8,
84 B9 = 1 << 9,
85 B10 = 1 << 10,
86 B11 = 1 << 11,
87 B12 = 1 << 12,
88 B13 = 1 << 13,
89 B14 = 1 << 14,
90 B15 = 1 << 15,
91 B16 = 1 << 16,
92 B17 = 1 << 17,
93 B18 = 1 << 18,
94 B19 = 1 << 19,
95 B20 = 1 << 20,
96 B21 = 1 << 21,
97 B22 = 1 << 22,
98 B23 = 1 << 23,
99 B24 = 1 << 24,
100 B25 = 1 << 25,
101 B26 = 1 << 26,
102 B27 = 1 << 27,
103};
104
105class ArmEncode : public AllStatic {
106 public:
107 static inline uint32_t Rd(Register rd) {
108 ASSERT(rd < 16);
109 return static_cast<uint32_t>(rd) << kRdShift;
110 }
111
112 static inline uint32_t Rm(Register rm) {
113 ASSERT(rm < 16);
114 return static_cast<uint32_t>(rm) << kRmShift;
115 }
116
117 static inline uint32_t Rn(Register rn) {
118 ASSERT(rn < 16);
119 return static_cast<uint32_t>(rn) << kRnShift;
120 }
121
122 static inline uint32_t Rs(Register rs) {
123 ASSERT(rs < 16);
124 return static_cast<uint32_t>(rs) << kRsShift;
125 }
126};
127
128// Encodes Addressing Mode 1 - Data-processing operands.
129class Operand : public ValueObject {
130 public:
131 // Data-processing operands - Uninitialized.
132 Operand() : type_(-1), encoding_(-1) {}
133
134 // Data-processing operands - Copy constructor.
135 Operand(const Operand& other)
136 : ValueObject(), type_(other.type_), encoding_(other.encoding_) {}
137
138 // Data-processing operands - Assignment operator.
139 Operand& operator=(const Operand& other) {
140 type_ = other.type_;
141 encoding_ = other.encoding_;
142 return *this;
143 }
144
145 // Data-processing operands - Immediate.
146 explicit Operand(uint32_t immediate) {
147 ASSERT(immediate < (1 << kImmed8Bits));
148 type_ = 1;
149 encoding_ = immediate;
150 }
151
152 // Data-processing operands - Rotated immediate.
153 Operand(uint32_t rotate, uint32_t immed8) {
154 ASSERT((rotate < (1 << kRotateBits)) && (immed8 < (1 << kImmed8Bits)));
155 type_ = 1;
156 encoding_ = (rotate << kRotateShift) | (immed8 << kImmed8Shift);
157 }
158
159 // Data-processing operands - Register.
160 explicit Operand(Register rm) {
161 type_ = 0;
162 encoding_ = static_cast<uint32_t>(rm);
163 }
164
165 // Data-processing operands - Logical shift/rotate by immediate.
166 Operand(Register rm, Shift shift, uint32_t shift_imm) {
167 ASSERT(shift_imm < (1 << kShiftImmBits));
168 type_ = 0;
169 encoding_ = shift_imm << kShiftImmShift |
170 static_cast<uint32_t>(shift) << kShiftShift |
171 static_cast<uint32_t>(rm);
172 }
173
174 // Data-processing operands - Logical shift/rotate by register.
176 type_ = 0;
177 encoding_ = static_cast<uint32_t>(rs) << kShiftRegisterShift |
178 static_cast<uint32_t>(shift) << kShiftShift | (1 << 4) |
179 static_cast<uint32_t>(rm);
180 }
181
182 static bool CanHold(uint32_t immediate, Operand* o) {
183 // Avoid the more expensive test for frequent small immediate values.
184 if (immediate < (1 << kImmed8Bits)) {
185 o->type_ = 1;
186 o->encoding_ = (0 << kRotateShift) | (immediate << kImmed8Shift);
187 return true;
188 }
189 // Note that immediate must be unsigned for the test to work correctly.
190 for (int rot = 0; rot < 16; rot++) {
191 uint32_t imm8 = Utils::RotateLeft(immediate, 2 * rot);
192 if (imm8 < (1 << kImmed8Bits)) {
193 o->type_ = 1;
194 o->encoding_ = (rot << kRotateShift) | (imm8 << kImmed8Shift);
195 return true;
196 }
197 }
198 return false;
199 }
200
201 private:
202 bool is_valid() const { return (type_ == 0) || (type_ == 1); }
203
204 uint32_t type() const {
205 ASSERT(is_valid());
206 return type_;
207 }
208
209 uint32_t encoding() const {
210 ASSERT(is_valid());
211 return encoding_;
212 }
213
214 uint32_t type_; // Encodes the type field (bits 27-25) in the instruction.
215 uint32_t encoding_;
216
217 friend class Assembler;
218 friend class Address;
219};
220
221class Address : public ValueObject {
222 public:
228
229 // Memory operand addressing mode
230 enum Mode {
231 kModeMask = (8 | 4 | 1) << 21,
232 // bit encoding P U W
233 Offset = (8 | 4 | 0) << 21, // offset (w/o writeback to base)
234 PreIndex = (8 | 4 | 1) << 21, // pre-indexed addressing with writeback
235 PostIndex = (0 | 4 | 0) << 21, // post-indexed addressing with writeback
236 NegOffset = (8 | 0 | 0) << 21, // negative offset (w/o writeback to base)
237 NegPreIndex = (8 | 0 | 1) << 21, // negative pre-indexed with writeback
238 NegPostIndex = (0 | 0 | 0) << 21 // negative post-indexed with writeback
239 };
240
241 Address(const Address& other)
242 : ValueObject(),
243 encoding_(other.encoding_),
244 kind_(other.kind_),
245 base_(other.base_),
246 offset_(other.offset_) {}
247
248 Address& operator=(const Address& other) {
249 encoding_ = other.encoding_;
250 kind_ = other.kind_;
251 base_ = other.base_;
252 offset_ = other.offset_;
253 return *this;
254 }
255
256 bool Equals(const Address& other) const {
257 return (encoding_ == other.encoding_) && (kind_ == other.kind_);
258 }
259
260 explicit Address(Register rn, int32_t offset = 0, Mode am = Offset) {
261 kind_ = Immediate;
262 base_ = rn;
263 offset_ = offset;
264 // If the offset can't be encoded in fewer bits, then it'll conflict with
265 // the encoding of the mode and we won't be able to retrieve it later.
267 if (offset < 0) {
268 encoding_ = (am ^ (1 << kUShift)) | -offset; // Flip U to adjust sign.
269 } else {
270 encoding_ = am | offset;
271 }
272 encoding_ |= ArmEncode::Rn(rn);
273 }
274
275 // There is no register offset mode unless Mode is Offset, in which case the
276 // shifted register case below should be used.
278
280 Register rm,
281 Shift shift = LSL,
282 uint32_t shift_imm = 0,
283 Mode am = Offset) {
284 Operand o(rm, shift, shift_imm);
285
286 if ((shift == LSL) && (shift_imm == 0)) {
287 kind_ = IndexRegister;
288 } else {
289 kind_ = ScaledIndexRegister;
290 }
291 encoding_ = o.encoding() | am | ArmEncode::Rn(rn);
292 }
293
294 // There is no shifted register mode with a register shift.
296
298
300 int32_t offset,
301 int32_t* offset_mask);
303 int32_t offset,
304 int32_t* offset_mask);
305 static bool CanHoldImmediateOffset(bool is_load,
306 intptr_t cid,
307 int64_t offset);
308
309 private:
310 Register rn() const {
311 return Instr::At(reinterpret_cast<uword>(&encoding_))->RnField();
312 }
313
314 Register rm() const {
315 return ((kind() == IndexRegister) || (kind() == ScaledIndexRegister))
316 ? Instr::At(reinterpret_cast<uword>(&encoding_))->RmField()
317 : kNoRegister;
318 }
319
320 Mode mode() const { return static_cast<Mode>(encoding_ & kModeMask); }
321
322 bool has_writeback() const {
323 return (mode() == PreIndex) || (mode() == PostIndex) ||
324 (mode() == NegPreIndex) || (mode() == NegPostIndex);
325 }
326
327 static bool has_writeback(BlockAddressMode am) {
328 switch (am) {
329 case DA:
330 case IA:
331 case DB:
332 case IB:
333 return false;
334 case DA_W:
335 case IA_W:
336 case DB_W:
337 case IB_W:
338 return true;
339 default:
340 UNREACHABLE();
341 return false;
342 }
343 }
344
345 uint32_t encoding() const {
346 ASSERT_IMPLIES(kind_ == Immediate, Utils::MagnitudeIsUint(12, offset_));
347 return encoding_;
348 }
349
350 // Encoding for addressing mode 3.
351 uint32_t encoding3() const;
352
353 // Encoding for vfp load/store addressing.
354 uint32_t vencoding() const;
355
356 OffsetKind kind() const { return kind_; }
357 Register base() const { return base_; }
358 int32_t offset() const { return offset_; }
359
360 uint32_t encoding_;
361
362 OffsetKind kind_;
363 Register base_ = kNoRegister;
364 int32_t offset_ = 0;
365
366 friend class Assembler;
367};
368
369class FieldAddress : public Address {
370 public:
372 : Address(base, disp - kHeapObjectTag) {}
373
374 // This addressing mode does not exist.
376
377 FieldAddress(const FieldAddress& other) : Address(other) {}
378
380 Address::operator=(other);
381 return *this;
382 }
383};
384
385class Assembler : public AssemblerBase {
386 public:
388 intptr_t far_branch_level = 0);
390
391 void PushRegister(Register r) { Push(r); }
392 void PopRegister(Register r) { Pop(r); }
393
394 // Push two registers to the stack; r0 to lower address location.
396 if ((r0 < r1) && (r0 != SP) && (r1 != SP)) {
397 RegList reg_list = (1 << r0) | (1 << r1);
398 PushList(reg_list);
399 } else {
400 PushRegister(r1);
401 PushRegister(r0);
402 }
403 }
404
405 // Pop two registers from the stack; r0 from lower address location.
407 if ((r0 < r1) && (r0 != SP) && (r1 != SP)) {
408 RegList reg_list = (1 << r0) | (1 << r1);
409 PopList(reg_list);
410 } else {
411 PopRegister(r0);
412 PopRegister(r1);
413 }
414 }
415
417
418 void Bind(Label* label) override;
419 // Unconditional jump to a given label. [distance] is ignored on ARM.
420 void Jump(Label* label, JumpDistance distance = kFarJump) { b(label); }
421 // Unconditional jump to a given address in register.
422 void Jump(Register target) { bx(target); }
423 // Unconditional jump to a given address in memory.
424 void Jump(const Address& address) { Branch(address); }
425
428 }
431 }
433 const Address& address,
434 OperandSize size = kFourBytes) override {
435 Load(dst, address, size);
436 dmb();
437 }
439 const Address& address,
440 OperandSize size = kFourBytes) override {
441 dmb();
442 Store(src, address, size);
443 }
444
446 Address address,
447 OperandSize size = kFourBytes) override {
449 Load(TMP, address, size);
450 cmp(value, Operand(TMP));
451 }
452
453 // Misc. functionality
454 bool use_far_branches() const {
455 return FLAG_use_far_branches || use_far_branches_;
456 }
457
458#if defined(TESTING) || defined(DEBUG)
459 // Used in unit tests and to ensure predictable verification code size in
460 // FlowGraphCompiler::EmitEdgeCounter.
461 void set_use_far_branches(bool b) { use_far_branches_ = b; }
462#endif // TESTING || DEBUG
463
464 // Debugging and bringup support.
465 void Breakpoint() override { bkpt(0); }
466
467 // Data-processing instructions.
468 void and_(Register rd, Register rn, Operand o, Condition cond = AL);
469 void ands(Register rd, Register rn, Operand o, Condition cond = AL);
470
471 void eor(Register rd, Register rn, Operand o, Condition cond = AL);
472
473 void sub(Register rd, Register rn, Operand o, Condition cond = AL);
474 void subs(Register rd, Register rn, Operand o, Condition cond = AL);
475
476 void rsb(Register rd, Register rn, Operand o, Condition cond = AL);
477 void rsbs(Register rd, Register rn, Operand o, Condition cond = AL);
478
479 void add(Register rd, Register rn, Operand o, Condition cond = AL);
480
481 void adds(Register rd, Register rn, Operand o, Condition cond = AL);
482
483 void adc(Register rd, Register rn, Operand o, Condition cond = AL);
484
485 void adcs(Register rd, Register rn, Operand o, Condition cond = AL);
486
487 void sbc(Register rd, Register rn, Operand o, Condition cond = AL);
488
489 void sbcs(Register rd, Register rn, Operand o, Condition cond = AL);
490
491 void rsc(Register rd, Register rn, Operand o, Condition cond = AL);
492
493 void tst(Register rn, Operand o, Condition cond = AL);
494
495 void teq(Register rn, Operand o, Condition cond = AL);
496
497 void cmp(Register rn, Operand o, Condition cond = AL);
498
499 void cmn(Register rn, Operand o, Condition cond = AL);
500
501 void orr(Register rd, Register rn, Operand o, Condition cond = AL);
502 void orrs(Register rd, Register rn, Operand o, Condition cond = AL);
503
504 void mov(Register rd, Operand o, Condition cond = AL);
505 void movs(Register rd, Operand o, Condition cond = AL);
506
507 void bic(Register rd, Register rn, Operand o, Condition cond = AL);
508 void bics(Register rd, Register rn, Operand o, Condition cond = AL);
509
510 void mvn_(Register rd, Operand o, Condition cond = AL);
511 void mvns(Register rd, Operand o, Condition cond = AL);
512
513 // Miscellaneous data-processing instructions.
514 void clz(Register rd, Register rm, Condition cond = AL);
515 void rbit(Register rd, Register rm, Condition cond = AL);
516
517 // Multiply instructions.
518 void mul(Register rd, Register rn, Register rm, Condition cond = AL);
519 void muls(Register rd, Register rn, Register rm, Condition cond = AL);
520 void mla(Register rd,
521 Register rn,
522 Register rm,
523 Register ra,
524 Condition cond = AL);
525 void mls(Register rd,
526 Register rn,
527 Register rm,
528 Register ra,
529 Condition cond = AL);
530 void smull(Register rd_lo,
531 Register rd_hi,
532 Register rn,
533 Register rm,
534 Condition cond = AL);
535 void umull(Register rd_lo,
536 Register rd_hi,
537 Register rn,
538 Register rm,
539 Condition cond = AL);
540 void smlal(Register rd_lo,
541 Register rd_hi,
542 Register rn,
543 Register rm,
544 Condition cond = AL);
545 void umlal(Register rd_lo,
546 Register rd_hi,
547 Register rn,
548 Register rm,
549 Condition cond = AL);
550
551 // Emulation of this instruction uses IP and the condition codes. Therefore,
552 // none of the registers can be IP, and the instruction can only be used
553 // unconditionally.
554 void umaal(Register rd_lo, Register rd_hi, Register rn, Register rm);
555
556 // Division instructions.
557 void sdiv(Register rd, Register rn, Register rm, Condition cond = AL);
558 void udiv(Register rd, Register rn, Register rm, Condition cond = AL);
559
560 // Load/store instructions.
561 void ldr(Register rd, Address ad, Condition cond = AL);
562 void str(Register rd, Address ad, Condition cond = AL);
563
564 void ldrb(Register rd, Address ad, Condition cond = AL);
565 void strb(Register rd, Address ad, Condition cond = AL);
566
567 void ldrh(Register rd, Address ad, Condition cond = AL);
568 void strh(Register rd, Address ad, Condition cond = AL);
569
570 void ldrsb(Register rd, Address ad, Condition cond = AL);
571 void ldrsh(Register rd, Address ad, Condition cond = AL);
572
573 // ldrd and strd actually support the full range of addressing modes, but
574 // we don't use them, so we only support the base + offset mode.
575 // rd must be an even register and rd2 must be rd + 1.
576 void ldrd(Register rd,
577 Register rd2,
578 Register rn,
579 int32_t offset,
580 Condition cond = AL);
581 void strd(Register rd,
582 Register rd2,
583 Register rn,
584 int32_t offset,
585 Condition cond = AL);
586
589 RegList regs,
590 Condition cond = AL);
593 RegList regs,
594 Condition cond = AL);
595
596 void ldrex(Register rd, Register rn, Condition cond = AL);
597 void strex(Register rd, Register rt, Register rn, Condition cond = AL);
598
599 void dmb();
600
601 // Media instructions.
602 void sbfx(Register rd,
603 Register rn,
604 int32_t lsb,
605 int32_t width,
606 Condition cond = AL);
607 void ubfx(Register rd,
608 Register rn,
609 int32_t lsb,
610 int32_t width,
611 Condition cond = AL);
612
613 // Emit code to transition between generated and native modes.
614 //
615 // These require that CSP and SP are equal and aligned and require two scratch
616 // registers (in addition to TMP).
617 void TransitionGeneratedToNative(Register destination_address,
618 Register exit_frame_fp,
619 Register exit_through_ffi,
620 Register scratch0,
621 bool enter_safepoint);
623 Register scratch1,
624 bool exit_safepoint,
625 bool ignore_unwind_in_progress = false);
626 void EnterFullSafepoint(Register scratch0, Register scratch1);
628 Register scratch1,
629 bool ignore_unwind_in_progress);
630
631 // Miscellaneous instructions.
632 void clrex();
633 void nop(Condition cond = AL);
634
635 // Note that gdb sets breakpoints using the undefined instruction 0xe7f001f0.
636 void bkpt(uint16_t imm16);
637
638 static int32_t BkptEncoding(uint16_t imm16) {
639 // bkpt requires that the cond field is AL.
640 return (AL << kConditionShift) | B24 | B21 | ((imm16 >> 4) << 8) | B6 | B5 |
641 B4 | (imm16 & 0xf);
642 }
643
644 // Floating point instructions (VFPv3-D16 and VFPv3-D32 profiles).
645 void vmovsr(SRegister sn, Register rt, Condition cond = AL);
646 void vmovrs(Register rt, SRegister sn, Condition cond = AL);
647 void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond = AL);
648 void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond = AL);
649 void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond = AL);
650 void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond = AL);
651 void vmovdr(DRegister dd, int i, Register rt, Condition cond = AL);
652 void vmovs(SRegister sd, SRegister sm, Condition cond = AL);
653 void vmovd(DRegister dd, DRegister dm, Condition cond = AL);
655
656 // Returns false if the immediate cannot be encoded.
657 bool vmovs(SRegister sd, float s_imm, Condition cond = AL);
658 bool vmovd(DRegister dd, double d_imm, Condition cond = AL);
659
660 void vldrs(SRegister sd, Address ad, Condition cond = AL);
661 void vstrs(SRegister sd, Address ad, Condition cond = AL);
662 void vldrd(DRegister dd, Address ad, Condition cond = AL);
663 void vstrd(DRegister dd, Address ad, Condition cond = AL);
664
667 SRegister first,
668 SRegister last,
669 Condition cond = AL);
672 SRegister first,
673 SRegister last,
674 Condition cond = AL);
675
678 DRegister first,
679 intptr_t count,
680 Condition cond = AL);
683 DRegister first,
684 intptr_t count,
685 Condition cond = AL);
686
687 void vadds(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
688 void vaddd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
691 void vsubs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
692 void vsubd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
695 void vmuls(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
696 void vmuld(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
701 void vmlas(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
702 void vmlad(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
703 void vmlss(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
704 void vmlsd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
705 void vdivs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
706 void vdivd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
713
719
728
729 void vabss(SRegister sd, SRegister sm, Condition cond = AL);
730 void vabsd(DRegister dd, DRegister dm, Condition cond = AL);
732 void vnegs(SRegister sd, SRegister sm, Condition cond = AL);
733 void vnegd(DRegister dd, DRegister dm, Condition cond = AL);
735 void vsqrts(SRegister sd, SRegister sm, Condition cond = AL);
736 void vsqrtd(DRegister dd, DRegister dm, Condition cond = AL);
737
738 void vcvtsd(SRegister sd, DRegister dm, Condition cond = AL);
739 void vcvtds(DRegister dd, SRegister sm, Condition cond = AL);
740 void vcvtis(SRegister sd, SRegister sm, Condition cond = AL);
741 void vcvtid(SRegister sd, DRegister dm, Condition cond = AL);
742 void vcvtsi(SRegister sd, SRegister sm, Condition cond = AL);
743 void vcvtdi(DRegister dd, SRegister sm, Condition cond = AL);
744 void vcvtus(SRegister sd, SRegister sm, Condition cond = AL);
745 void vcvtud(SRegister sd, DRegister dm, Condition cond = AL);
746 void vcvtsu(SRegister sd, SRegister sm, Condition cond = AL);
747 void vcvtdu(DRegister dd, SRegister sm, Condition cond = AL);
748
749 void vcmps(SRegister sd, SRegister sm, Condition cond = AL);
750 void vcmpd(DRegister dd, DRegister dm, Condition cond = AL);
751 void vcmpsz(SRegister sd, Condition cond = AL);
752 void vcmpdz(DRegister dd, Condition cond = AL);
753 void vmrs(Register rd, Condition cond = AL);
754 void vmstat(Condition cond = AL);
755
756 // Duplicates the operand of size sz at index idx from dm to all elements of
757 // qd. This is a special case of vtbl.
758 void vdup(OperandSize sz, QRegister qd, DRegister dm, int idx);
759
760 // Each byte of dm is an index into the table of bytes formed by concatenating
761 // a list of 'length' registers starting with dn. The result is placed in dd.
762 void vtbl(DRegister dd, DRegister dn, int length, DRegister dm);
763
764 // The words of qd and qm are interleaved with the low words of the result
765 // in qd and the high words in qm.
767
768 // Branch instructions.
769 void b(Label* label, Condition cond = AL);
770 void bl(Label* label, Condition cond = AL);
771 void bx(Register rm, Condition cond = AL);
772 void blx(Register rm, Condition cond = AL);
773
774 void Branch(const Address& address, Condition cond = AL);
775
776 void BranchLink(const Code& code,
782
783 // Branch and link to an entry address. Call sequence can be patched.
785 const Code& code,
789
790 // Emit a call that shares its object pool entries with other calls
791 // that have the same equivalence marker.
793 const Code& code,
794 const Object& equivalence,
796
797 // Branch and link to [base + offset]. Call sequence is never patched.
799
800 void Call(Address target, Condition cond = AL) {
801 // CLOBBERS_LR uses __ to access the assembler.
802#define __ this->
803 CLOBBERS_LR({
804 ldr(LR, target, cond);
805 blx(LR, cond);
806 });
807#undef __
808 }
809 void Call(const Code& code) { BranchLink(code); }
810
811 void CallCFunction(Address target) { Call(target); }
812
813 void CallCFunction(Register target, Condition cond = AL) {
814 blx(target, cond);
815 }
816
817 // Add signed immediate value to rd. May clobber IP.
818 void AddImmediate(Register rd, int32_t value, Condition cond = AL) {
819 AddImmediate(rd, rd, value, cond);
820 }
821
822 // Add signed immediate value. May clobber IP.
824 Register rn,
825 int32_t value,
826 Condition cond = AL);
828 Register rn,
829 int32_t value,
830 Condition cond = AL);
832 add(dest, dest, Operand(src));
833 }
834 // [dest] = [src] << [scale] + [value].
836 Register src,
838 int32_t value) {
839 if (scale == 0) {
840 AddImmediate(dest, src, value);
841 } else {
842 Lsl(dest, src, Operand(scale));
843 AddImmediate(dest, dest, value);
844 }
845 }
847 Register rn,
848 int32_t value,
849 Condition cond = AL);
851 Register rn,
852 int32_t value,
853 Condition cond = AL);
855 sub(dest, dest, Operand(src));
856 }
858 int32_t imm,
859 OperandSize width = kFourBytes) override {
861 if (Utils::IsPowerOfTwo(imm)) {
863 } else {
864 LoadImmediate(TMP, imm);
865 mul(reg, reg, TMP);
866 }
867 }
868 void AndImmediate(Register rd, Register rs, int32_t imm, Condition cond = AL);
869 void AndImmediate(Register rd, int32_t imm, Condition cond) {
870 AndImmediate(rd, rd, imm, cond);
871 }
872 void AndImmediate(Register rd, int32_t imm) override {
873 AndImmediate(rd, rd, imm, AL);
874 }
876 Register rn,
877 int32_t value,
878 Condition cond = AL);
880 Register src1,
881 Register src2 = kNoRegister) override {
882 ASSERT(src1 != src2); // Likely a mistake.
883 if (src2 == kNoRegister) {
884 src2 = dst;
885 }
886 and_(dst, src2, Operand(src1));
887 }
888 void OrImmediate(Register rd, Register rs, int32_t imm, Condition cond = AL);
889 void OrImmediate(Register rd, int32_t imm, Condition cond = AL) {
890 OrImmediate(rd, rd, imm, cond);
891 }
892 void LslImmediate(Register rd, Register rn, int32_t shift) {
893 ASSERT((shift >= 0) && (shift < kBitsPerInt32));
894 Lsl(rd, rn, Operand(shift));
895 }
896 void LslImmediate(Register rd, int32_t shift) { LslImmediate(rd, rd, shift); }
897 void LslRegister(Register dst, Register shift) override {
898 Lsl(dst, dst, shift);
899 }
900 void LsrImmediate(Register rd, Register rn, int32_t shift) {
901 ASSERT((shift >= 0) && (shift < kBitsPerInt32));
902 Lsr(rd, rn, Operand(shift));
903 }
904 void LsrImmediate(Register rd, int32_t shift) override {
905 LsrImmediate(rd, rd, shift);
906 }
907
908 // Test rn and immediate. May clobber IP.
909 void TestImmediate(Register rn, int32_t imm, Condition cond = AL);
910
911 // Compare rn with signed immediate value. May clobber IP.
912 void CompareImmediate(Register rn, int32_t value, Condition cond);
914 int32_t value,
915 OperandSize width = kFourBytes) override {
918 }
919
920 // Signed integer division of left by right. Checks to see if integer
921 // division is supported. If not, uses the FPU for division with
922 // temporary registers tmpl and tmpr. tmpl and tmpr must be different
923 // registers.
927 DRegister tmpl,
928 DRegister tmpr);
929
930 // Load and Store.
931 // These three do not clobber IP.
932 void LoadPatchableImmediate(Register rd, int32_t value, Condition cond = AL);
933 void LoadDecodableImmediate(Register rd, int32_t value, Condition cond = AL);
934 void LoadImmediate(Register rd, Immediate value, Condition cond = AL);
935 void LoadImmediate(Register rd, int32_t value, Condition cond);
936 void LoadImmediate(Register rd, int32_t value) override {
937 LoadImmediate(rd, value, AL);
938 }
939 // These two may clobber IP.
940 void LoadSImmediate(SRegister sd, float value, Condition cond = AL);
942 double value,
943 Register scratch,
944 Condition cond = AL);
946
948
949 void Drop(intptr_t stack_elements);
950
954
957
958 // Load word from pool from the given index using encoding that
959 // InstructionPattern::DecodeLoadWordFromPool can decode.
961 intptr_t index,
962 Register pp = PP,
963 Condition cond = AL);
964 // Store word to pool at the given offset.
965 //
966 // Note: clobbers TMP.
968 intptr_t index,
969 Register pp = PP,
970 Condition cond = AL);
971
972 void LoadObject(Register rd, const Object& object, Condition cond = AL);
974 Register rd,
975 const Object& object,
976 Condition cond = AL,
980 const ExternalLabel* label,
982 Condition cond = AL);
983 void PushObject(const Object& object);
984 void PushImmediate(int32_t immediate) {
985 LoadImmediate(TMP, immediate);
986 Push(TMP);
987 }
988 void CompareObject(Register rn, const Object& object);
989
991 Register object,
992 const Address& dest,
993 const Object& value,
994 MemoryOrder memory_order = kRelaxedNonAtomic,
995 OperandSize size = kWordBytes) override;
996
998 Register value,
999 CanBeSmi can_be_smi,
1000 Register scratch) override;
1002 Register slot,
1003 Register value,
1004 CanBeSmi can_be_smi,
1005 Register scratch) override;
1007
1008 // Stores a non-tagged value into a heap object.
1010 const Address& dest,
1011 Register value);
1012
1013 // Store value_even, value_odd, value_even, ... into the words in the address
1014 // range [begin, end), assumed to be uninitialized fields in object (tagged).
1015 // The stores must not need a generational store barrier (e.g., smi/null),
1016 // and (value_even, value_odd) must be a valid register pair.
1017 // Destroys register 'begin'.
1020 Register end,
1021 Register value_even,
1022 Register value_odd);
1023 // Like above, for the range [base+begin_offset, base+end_offset), unrolled.
1025 Register base,
1026 intptr_t begin_offset,
1027 intptr_t end_offset,
1028 Register value_even,
1029 Register value_odd);
1030
1031 // Stores a Smi value into a heap object field that always contains a Smi.
1032 void StoreIntoSmiField(const Address& dest, Register value);
1033
1035 Register tags,
1036 Condition cond = AL);
1038
1040 Register temp,
1041 intptr_t low,
1042 intptr_t high,
1043 RangeCheckCondition condition,
1044 Label* target) override;
1045
1048 void CompareClassId(Register object, intptr_t class_id, Register scratch);
1052 Register src,
1053 Register scratch,
1054 bool can_be_null = false) override;
1055
1056 bool CanLoadFromObjectPool(const Object& object) const;
1057
1059 OperandSize sz,
1060 Condition cond);
1062 OperandSize sz,
1063 Condition cond);
1064
1065 void Load(Register reg,
1066 const Address& address,
1068 Condition cond);
1069 void Load(Register reg,
1070 const Address& address,
1071 OperandSize type = kFourBytes) override {
1072 Load(reg, address, type, AL);
1073 }
1075 Register base,
1076 int32_t offset,
1077 OperandSize type = kFourBytes) override {
1078 LoadFromOffset(reg, base, offset, type, AL);
1079 }
1081 Register base,
1082 int32_t offset,
1084 Condition cond) {
1085 Load(reg, Address(base, offset), type, cond);
1086 }
1088 Register base,
1089 int32_t offset,
1090 OperandSize type = kFourBytes) override {
1092 }
1094 Register base,
1095 int32_t offset,
1097 Condition cond) {
1098 Load(reg, FieldAddress(base, offset), type, cond);
1099 }
1100 // For loading indexed payloads out of tagged objects like Arrays. If the
1101 // payload objects are word-sized, use TIMES_HALF_WORD_SIZE if the contents of
1102 // [index] is a Smi, otherwise TIMES_WORD_SIZE if unboxed.
1104 Register base,
1105 int32_t payload_start,
1106 Register index,
1108 OperandSize type = kFourBytes) override {
1109 add(dst, base, Operand(index, LSL, scale));
1110 LoadFromOffset(dst, dst, payload_start - kHeapObjectTag, type);
1111 }
1112 void LoadFromStack(Register dst, intptr_t depth);
1113 void StoreToStack(Register src, intptr_t depth);
1114 void CompareToStack(Register src, intptr_t depth);
1115
1116 void Store(Register reg,
1117 const Address& address,
1119 Condition cond);
1120 void Store(Register reg,
1121 const Address& address,
1122 OperandSize type = kFourBytes) override {
1123 Store(reg, address, type, AL);
1124 }
1126 Register base,
1127 int32_t offset,
1128 OperandSize type = kFourBytes) override {
1129 StoreToOffset(reg, base, offset, type, AL);
1130 }
1132 Register base,
1133 int32_t offset,
1135 Condition cond) {
1136 Store(reg, Address(base, offset), type, cond);
1137 }
1139 Register base,
1140 int32_t offset,
1141 OperandSize type = kFourBytes) override {
1143 }
1145 Register base,
1146 int32_t offset,
1148 Condition cond) {
1149 Store(reg, FieldAddress(base, offset), type, cond);
1150 }
1151 void StoreZero(const Address& address, Register temp) {
1152 mov(temp, Operand(0));
1153 Store(temp, address);
1154 }
1156 Register base,
1157 int32_t offset,
1158 Condition cond = AL);
1160 Register base,
1161 int32_t offset,
1162 Condition cond = AL);
1164 Register base,
1165 int32_t offset,
1166 Condition cond = AL);
1168 Register base,
1169 int32_t offset,
1170 Condition cond = AL);
1171
1179 if (src != dst) {
1181 }
1182 }
1183
1185 intptr_t count,
1186 Register base,
1187 int32_t offset);
1189 intptr_t count,
1190 Register base,
1191 int32_t offset);
1192
1200 if (src != dst) {
1201 vmovq(dst, src);
1202 }
1203 }
1204
1205 void Push(Register rd, Condition cond = AL);
1206 void Pop(Register rd, Condition cond = AL);
1207
1208 void PushList(RegList regs, Condition cond = AL);
1209 void PopList(RegList regs, Condition cond = AL);
1210
1213
1214 void PushRegisters(const RegisterSet& regs);
1215 void PopRegisters(const RegisterSet& regs);
1216
1217 void PushRegistersInOrder(std::initializer_list<Register> regs);
1218
1219 // Push all registers which are callee-saved according to the ARM ABI.
1221
1222 // Pop all registers which are callee-saved according to the ARM ABI.
1224
1225 void CompareRegisters(Register rn, Register rm) { cmp(rn, Operand(rm)); }
1227 CompareRegisters(rn, rm);
1228 }
1229 // Branches to the given label if the condition holds.
1230 // [distance] is ignored on ARM.
1231 void BranchIf(Condition condition,
1232 Label* label,
1233 JumpDistance distance = kFarJump) {
1234 b(label, condition);
1235 }
1237 Label* label,
1238 JumpDistance distance = kFarJump) {
1239 cmp(rn, Operand(0));
1240 b(label, ZERO);
1241 }
1243 intptr_t bit_number,
1244 Condition condition,
1245 Label* label,
1246 JumpDistance distance = kFarJump) {
1247 tst(rn, Operand(1 << bit_number));
1248 b(label, condition);
1249 }
1250
1252 ExtendValue(rd, rm, kFourBytes, cond);
1253 }
1254 void MoveRegister(Register rd, Register rm) override {
1255 MoveRegister(rd, rm, AL);
1256 }
1258 ExtendAndSmiTagValue(rd, rm, kFourBytes, cond);
1259 }
1261 MoveAndSmiTagRegister(rd, rm, AL);
1262 }
1264 void ExtendValue(Register rd, Register rm, OperandSize sz) override {
1265 ExtendValue(rd, rm, sz, AL);
1266 }
1268 Register rm,
1269 OperandSize sz,
1270 Condition cond) {
1271 ExtendValue(rd, rm, sz, cond);
1272 SmiTag(rd, cond);
1273 }
1275 Register rm,
1276 OperandSize sz = kFourBytes) override {
1277 ExtendAndSmiTagValue(rd, rm, sz, AL);
1278 }
1279
1280 // Convenience shift instructions. Use mov instruction with shifter operand
1281 // for variants setting the status flags.
1282 void Lsl(Register rd,
1283 Register rm,
1284 const Operand& shift_imm,
1285 Condition cond = AL);
1286 void Lsl(Register rd, Register rm, Register rs, Condition cond = AL);
1287 void Lsr(Register rd,
1288 Register rm,
1289 const Operand& shift_imm,
1290 Condition cond = AL);
1291 void Lsr(Register rd, Register rm, Register rs, Condition cond = AL);
1292 void Asr(Register rd,
1293 Register rm,
1294 const Operand& shift_imm,
1295 Condition cond = AL);
1296 void Asr(Register rd, Register rm, Register rs, Condition cond = AL);
1298 Register rm,
1299 const Operand& shift_imm,
1300 Condition cond = AL);
1301 void Ror(Register rd,
1302 Register rm,
1303 const Operand& shift_imm,
1304 Condition cond = AL);
1305 void Ror(Register rd, Register rm, Register rs, Condition cond = AL);
1306 void Rrx(Register rd, Register rm, Condition cond = AL);
1307
1308 // Fill rd with the sign of rm.
1309 void SignFill(Register rd, Register rm, Condition cond = AL);
1310
1313 // If qm must be preserved, then provide a (non-QTMP) temporary.
1316
1317 void SmiTag(Register reg, Condition cond) { SmiTag(reg, reg, cond); }
1318 void SmiTag(Register reg) override { SmiTag(reg, AL); }
1319
1320 void SmiTag(Register dst, Register src, Condition cond = AL) {
1321 Lsl(dst, src, Operand(kSmiTagSize), cond);
1322 }
1323
1324 void SmiUntag(Register reg, Condition cond = AL) { SmiUntag(reg, reg, cond); }
1325
1326 void SmiUntag(Register dst, Register src, Condition cond = AL) {
1327 Asr(dst, src, Operand(kSmiTagSize), cond);
1328 }
1329
1330 // Untag the value in the register assuming it is a smi.
1331 // Untagging shifts tag bit into the carry flag - if carry is clear
1332 // assumption was correct. In this case jump to the is_smi label.
1333 // Otherwise fall-through.
1334 void SmiUntag(Register dst, Register src, Label* is_smi) {
1335 ASSERT(kSmiTagSize == 1);
1336 Asrs(dst, src, Operand(kSmiTagSize));
1337 b(is_smi, CC);
1338 }
1339
1340 // For ARM, the near argument is ignored.
1342 Label* label,
1343 JumpDistance distance = kFarJump) {
1344 tst(reg, Operand(kSmiTagMask));
1345 b(label, NE);
1346 }
1347
1348 // Truncates upper bits.
1350 if (result == value) {
1351 ASSERT(TMP != value);
1353 value = TMP;
1354 }
1355 ASSERT(value != result);
1358 LoadFieldFromOffset(result, value, compiler::target::Mint::value_offset());
1359 Bind(&done);
1360 }
1361
1362 // For ARM, the near argument is ignored.
1364 Label* label,
1365 JumpDistance distance = kFarJump) override {
1366 tst(reg, Operand(kSmiTagMask));
1367 b(label, EQ);
1368 }
1369
1371
1372 void ArithmeticShiftRightImmediate(Register reg, intptr_t shift) override;
1374 Register reg2,
1375 intptr_t offset,
1377 Register temp,
1378 Label* equals) override;
1379
1380 // Function frame setup and tear down.
1381 void EnterFrame(RegList regs, intptr_t frame_space);
1382 void LeaveFrame(RegList regs, bool allow_pop_pc = false);
1383 void Ret(Condition cond = AL);
1384
1385 // Sets the return address to [value] as if there was a call.
1386 // On ARM sets LR.
1388
1389 void ReserveAlignedFrameSpace(intptr_t frame_space);
1390
1391 // In debug mode, this generates code to check that:
1392 // FP + kExitLinkSlotFromEntryFp == SP
1393 // or triggers breakpoint otherwise.
1394 //
1395 // Requires a scratch register in addition to the assembler temporary.
1397
1398 // For non-leaf runtime calls. For leaf runtime calls, use LeafRuntimeScope,
1399 void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count);
1400
1401 // Set up a Dart frame on entry with a frame pointer and PC information to
1402 // enable easy access to the RawInstruction object of code corresponding
1403 // to this frame.
1404 void EnterDartFrame(intptr_t frame_size, bool load_pool_pointer = true);
1405
1407
1408 // Leaves the frame and returns.
1409 //
1410 // The difference to "LeaveDartFrame(); Ret();" is that we return using
1411 //
1412 // ldmia sp!, {fp, pc}
1413 //
1414 // instead of
1415 //
1416 // ldmia sp!, {fp, lr}
1417 // blx lr
1418 //
1419 // This means that our return must go to ARM mode (and not thumb).
1421
1422 // Set up a Dart frame for a function compiled for on-stack replacement.
1423 // The frame layout is a normal Dart frame, but the frame is partially set
1424 // up on entry (it is the frame of the unoptimized code).
1425 void EnterOsrFrame(intptr_t extra_size);
1426
1427 // Set up a stub frame so that the stack traversal code can easily identify
1428 // a stub frame.
1431
1432 // Set up a frame for calling a C function.
1433 // Automatically save the pinned registers in Dart which are not callee-
1434 // saved in the native calling convention.
1435 // Use together with CallCFunction.
1436 void EnterCFrame(intptr_t frame_space);
1438
1442
1443 void CombineHashes(Register dst, Register other) override;
1444 void FinalizeHashForSize(intptr_t bit_size,
1445 Register dst,
1446 Register scratch = TMP) override;
1447
1448 // The register into which the allocation tracing state table is loaded with
1449 // LoadAllocationTracingStateAddress should be passed to MaybeTraceAllocation.
1450 //
1451 // These are separate assembler macros so we can avoid a dependent load too
1452 // nearby the load of the table address.
1455
1456 // If true is returned, then the out parameter [need_base] signifies whether
1457 // a register is needed for storing the array base (which should be passed
1458 // as the [temp] parameter to ElementAddressForIntIndex).
1459 static bool AddressCanHoldConstantIndex(const Object& constant,
1460 bool is_load,
1461 bool is_external,
1462 intptr_t cid,
1463 intptr_t index_scale,
1464 bool* needs_base = nullptr);
1465
1467 bool is_external,
1468 intptr_t cid,
1469 intptr_t index_scale,
1470 Register array,
1471 intptr_t index,
1472 Register temp);
1473
1475 bool is_load,
1476 bool is_external,
1477 intptr_t cid,
1478 intptr_t index_scale,
1479 Register array,
1480 intptr_t index);
1481
1483 bool is_external,
1484 intptr_t cid,
1485 intptr_t index_scale,
1486 bool index_unboxed,
1487 Register array,
1488 Register index);
1489
1491 bool is_load,
1492 bool is_external,
1493 intptr_t cid,
1494 intptr_t index_scale,
1495 bool index_unboxed,
1496 Register array,
1497 Register index);
1498
1500 Register field,
1501 Register scratch);
1502
1505 Register offset_in_words_as_smi) override;
1506
1509 int32_t offset) override {
1511 }
1512
1518
1519 // If allocation tracing is enabled, will jump to |trace| label,
1520 // which will allocate in the runtime where tracing occurs.
1521 void MaybeTraceAllocation(Register stats_addr_reg, Label* trace);
1522
1523 // If allocation tracing for |cid| is enabled, will jump to |trace| label,
1524 // which will allocate in the runtime where tracing occurs.
1526 Label* trace,
1527 Register temp_reg,
1529
1531 Label* trace,
1532 Register temp_reg,
1534
1535 void TryAllocateObject(intptr_t cid,
1536 intptr_t instance_size,
1537 Label* failure,
1538 JumpDistance distance,
1539 Register instance_reg,
1540 Register temp_reg) override;
1541
1542 void TryAllocateArray(intptr_t cid,
1543 intptr_t instance_size,
1544 Label* failure,
1546 Register end_address,
1547 Register temp1,
1548 Register temp2);
1549
1551#if defined(DEBUG)
1552 Label okay;
1553 ldr(tmp, Address(top, 0));
1555 b(&okay, EQUAL);
1556 Stop("Allocation canary");
1557 Bind(&okay);
1558#endif
1559 }
1561#if defined(DEBUG)
1562 ASSERT(top != TMP);
1564 str(TMP, Address(top, 0));
1565#endif
1566 }
1567
1568 // Copy [size] bytes from [src] address to [dst] address.
1569 // [size] should be a multiple of word size.
1570 // Clobbers [src], [dst], [size] and [temp] registers.
1572 Register dst,
1573 Register size,
1574 Register temp);
1575
1576 // This emits an PC-relative call of the form "blr.<cond> <offset>". The
1577 // offset is not yet known and needs therefore relocation to the right place
1578 // before the code can be used.
1579 //
1580 // The necessary information for the "linker" (i.e. the relocation
1581 // information) is stored in [UntaggedCode::static_calls_target_table_]: an
1582 // entry of the form
1583 //
1584 // (Code::kPcRelativeCall & pc_offset, <target-code>, <target-function>)
1585 //
1586 // will be used during relocation to fix the offset.
1587 //
1588 // The provided [offset_into_target] will be added to calculate the final
1589 // destination. It can be used e.g. for calling into the middle of a
1590 // function.
1592 intptr_t offset_into_target = 0);
1593
1594 // This emits an PC-relative tail call of the form "b.<cond> <offset>".
1595 //
1596 // See also above for the pc-relative call.
1598 intptr_t offset_into_target = 0);
1599
1600 // Emit data (e.g encoded instruction or immediate) in instruction stream.
1601 void Emit(int32_t value);
1602
1603 // On some other platforms, we draw a distinction between safe and unsafe
1604 // smis.
1605 static bool IsSafe(const Object& object) { return true; }
1606 static bool IsSafeSmi(const Object& object) { return target::IsSmi(object); }
1607
1608 bool constant_pool_allowed() const { return constant_pool_allowed_; }
1609 void set_constant_pool_allowed(bool b) { constant_pool_allowed_ = b; }
1610
1611 compiler::LRState lr_state() const { return lr_state_; }
1612 void set_lr_state(compiler::LRState b) { lr_state_ = b; }
1613
1614 // Whether we can branch to a target which is [distance] bytes away from the
1615 // beginning of the branch instruction.
1616 //
1617 // Use this function for testing whether [distance] can be encoded using the
1618 // 24-bit offsets in the branch instructions, which are multiples of 4.
1619 static bool CanEncodeBranchDistance(int32_t distance) {
1620 ASSERT(Utils::IsAligned(distance, 4));
1621 // The distance is off by 8 due to the way the ARM CPUs read PC.
1622 distance -= Instr::kPCReadOffset;
1623 distance >>= 2;
1624 return Utils::IsInt(24, distance);
1625 }
1626
1627 static int32_t EncodeBranchOffset(int32_t offset, int32_t inst);
1628 static int32_t DecodeBranchOffset(int32_t inst);
1629
1630 private:
1631 bool use_far_branches_;
1632
1633 bool constant_pool_allowed_;
1634
1635 compiler::LRState lr_state_ = compiler::LRState::OnEntry();
1636
1637 // If you are thinking of using one or both of these instructions directly,
1638 // instead LoadImmediate should probably be used.
1639 void movw(Register rd, uint16_t imm16, Condition cond = AL);
1640 void movt(Register rd, uint16_t imm16, Condition cond = AL);
1641
1642 void BindARMv7(Label* label);
1643
1644 void BranchLink(const ExternalLabel* label);
1645 void BranchLink(intptr_t target_code_pool_index, CodeEntryKind entry_kind);
1646
1647 void LoadObjectHelper(
1648 Register rd,
1649 const Object& object,
1650 Condition cond,
1651 bool is_unique,
1652 Register pp,
1655
1656 void EmitType01(Condition cond,
1657 int type,
1658 Opcode opcode,
1659 int set_cc,
1660 Register rn,
1661 Register rd,
1662 Operand o);
1663
1664 void EmitType5(Condition cond, int32_t offset, bool link);
1665
1666 void EmitMemOp(Condition cond, bool load, bool byte, Register rd, Address ad);
1667
1668 void EmitMemOpAddressMode3(Condition cond,
1669 int32_t mode,
1670 Register rd,
1671 Address ad);
1672
1673 void EmitMultiMemOp(Condition cond,
1675 bool load,
1676 Register base,
1677 RegList regs);
1678
1679 void EmitShiftImmediate(Condition cond,
1680 Shift opcode,
1681 Register rd,
1682 Register rm,
1683 Operand o);
1684
1685 void EmitShiftRegister(Condition cond,
1686 Shift opcode,
1687 Register rd,
1688 Register rm,
1689 Operand o);
1690
1691 void EmitMulOp(Condition cond,
1692 int32_t opcode,
1693 Register rd,
1694 Register rn,
1695 Register rm,
1696 Register rs);
1697
1698 void EmitDivOp(Condition cond,
1699 int32_t opcode,
1700 Register rd,
1701 Register rn,
1702 Register rm);
1703
1704 void EmitMultiVSMemOp(Condition cond,
1706 bool load,
1707 Register base,
1709 uint32_t count);
1710
1711 void EmitMultiVDMemOp(Condition cond,
1713 bool load,
1714 Register base,
1716 int32_t count);
1717
1718 void EmitVFPsss(Condition cond,
1719 int32_t opcode,
1720 SRegister sd,
1721 SRegister sn,
1722 SRegister sm);
1723
1724 void EmitVFPddd(Condition cond,
1725 int32_t opcode,
1726 DRegister dd,
1727 DRegister dn,
1728 DRegister dm);
1729
1730 void EmitVFPsd(Condition cond, int32_t opcode, SRegister sd, DRegister dm);
1731
1732 void EmitVFPds(Condition cond, int32_t opcode, DRegister dd, SRegister sm);
1733
1734 void EmitSIMDqqq(int32_t opcode,
1735 OperandSize sz,
1736 QRegister qd,
1737 QRegister qn,
1738 QRegister qm);
1739
1740 void EmitSIMDddd(int32_t opcode,
1741 OperandSize sz,
1742 DRegister dd,
1743 DRegister dn,
1744 DRegister dm);
1745
1746 void EmitFarBranch(Condition cond, int32_t offset, bool link);
1747 void EmitBranch(Condition cond, Label* label, bool link);
1748 void BailoutIfInvalidBranchOffset(int32_t offset);
1749 int32_t EncodeTstOffset(int32_t offset, int32_t inst);
1750 int32_t DecodeTstOffset(int32_t inst);
1751
1753 std::function<void(Condition, Register)>
1754 generate_invoke_write_barrier_wrapper_;
1755 std::function<void(Condition)> generate_invoke_array_write_barrier_;
1756
1759};
1760
1761} // namespace compiler
1762} // namespace dart
1763
1764#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM_H_
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
int count
static bool rotate(const SkDCubic &cubic, int zero, int index, SkDCubic &rotPath)
static bool equals(T *a, T *b)
static bool left(const SkPoint &p0, const SkPoint &p1)
static bool right(const SkPoint &p0, const SkPoint &p1)
SI T load(const P *ptr)
#define UNREACHABLE()
Definition assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define ASSERT_IMPLIES(antecedent, consequent)
Definition assert.h:317
static Instr * At(uword pc)
Register RnField() const
Register RmField() const
static bool IsInt(intptr_t N, T value)
Definition utils.h:298
static bool MagnitudeIsUint(intptr_t N, T value)
Definition utils.h:337
static constexpr int ShiftForPowerOfTwo(T x)
Definition utils.h:66
static T RotateLeft(T value, uint8_t rotate)
Definition utils.h:459
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition utils.h:77
static constexpr bool IsPowerOfTwo(T x)
Definition utils.h:61
Address(Register rn, Register r, Mode am)
Address(Register rn, Register rm, Shift shift, Register r, Mode am=Offset)
Address & operator=(const Address &other)
static OperandSize OperandSizeFor(intptr_t cid)
static bool CanHoldStoreOffset(OperandSize size, int32_t offset, int32_t *offset_mask)
Address(Register rn, Register rm, Shift shift=LSL, uint32_t shift_imm=0, Mode am=Offset)
static bool CanHoldImmediateOffset(bool is_load, intptr_t cid, int64_t offset)
Address(Register rn, int32_t offset=0, Mode am=Offset)
bool Equals(const Address &other) const
static bool CanHoldLoadOffset(OperandSize size, int32_t offset, int32_t *offset_mask)
Address(const Address &other)
static uint32_t Rs(Register rs)
static uint32_t Rm(Register rm)
static uint32_t Rd(Register rd)
static uint32_t Rn(Register rn)
void Stop(const char *message)
ObjectPoolBuilder & object_pool_builder()
void vaddqs(QRegister qd, QRegister qn, QRegister qm)
void PushRegistersInOrder(std::initializer_list< Register > regs)
void PopRegisterPair(Register r0, Register r1)
void MoveUnboxedDouble(FpuRegister dst, FpuRegister src)
void PopList(RegList regs, Condition cond=AL)
void LoadPatchableImmediate(Register rd, int32_t value, Condition cond=AL)
void smull(Register rd_lo, Register rd_hi, Register rn, Register rm, Condition cond=AL)
void SignFill(Register rd, Register rm, Condition cond=AL)
bool CanLoadFromObjectPool(const Object &object) const
void eor(Register rd, Register rn, Operand o, Condition cond=AL)
void LoadSFromOffset(SRegister reg, Register base, int32_t offset, Condition cond=AL)
void vmlad(DRegister dd, DRegister dn, DRegister dm, Condition cond=AL)
void StoreUnboxedDouble(FpuRegister src, Register base, int32_t offset)
void bx(Register rm, Condition cond=AL)
void MoveUnboxedSimd128(FpuRegister dst, FpuRegister src)
void BranchIfZero(Register rn, Label *label, JumpDistance distance=kFarJump)
void Load(Register reg, const Address &address, OperandSize type=kFourBytes) override
void LoadAllocationTracingStateAddress(Register dest, Register cid)
void CombineHashes(Register dst, Register other) override
void GenerateUnRelocatedPcRelativeTailCall(Condition cond=AL, intptr_t offset_into_target=0)
void orr(Register rd, Register rn, Operand o, Condition cond=AL)
void BranchIfSmi(Register reg, Label *label, JumpDistance distance=kFarJump) override
void LoadTaggedClassIdMayBeSmi(Register result, Register object)
void Call(const Code &code)
void LoadWordUnaligned(Register dst, Register addr, Register tmp)
void BranchLink(const Code &code, ObjectPoolBuilderEntry::Patchability patchable=ObjectPoolBuilderEntry::kNotPatchable, CodeEntryKind entry_kind=CodeEntryKind::kNormal, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
void PushList(RegList regs, Condition cond=AL)
static int32_t DecodeBranchOffset(int32_t inst)
void LoadDFromOffset(DRegister reg, Register base, int32_t offset, Condition cond=AL)
void ldr(Register rd, Address ad, Condition cond=AL)
void LoadStaticFieldAddress(Register address, Register field, Register scratch)
void vmaxqs(QRegister qd, QRegister qn, QRegister qm)
void vcgtqs(QRegister qd, QRegister qn, QRegister qm)
void Load(Register reg, const Address &address, OperandSize type, Condition cond)
void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond=AL)
void PushRegisterPair(Register r0, Register r1)
void CompareClassId(Register object, intptr_t class_id, Register scratch)
void LoadFieldFromOffset(Register reg, Register base, int32_t offset, OperandSize type, Condition cond)
void LoadIsolate(Register rd)
Assembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level=0)
void vcvtsu(SRegister sd, SRegister sm, Condition cond=AL)
void vcvtus(SRegister sd, SRegister sm, Condition cond=AL)
void LoadUniqueObject(Register rd, const Object &object, Condition cond=AL, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
void StoreHalfWordUnaligned(Register src, Register addr, Register tmp)
void MaybeTraceAllocation(Register stats_addr_reg, Label *trace)
void LoadFieldFromOffset(Register reg, Register base, int32_t offset, OperandSize type=kFourBytes) override
void CheckAllocationCanary(Register top, Register tmp=TMP)
void vcvtdu(DRegister dd, SRegister sm, Condition cond=AL)
void vornq(QRegister qd, QRegister qn, QRegister qm)
void mls(Register rd, Register rn, Register rm, Register ra, Condition cond=AL)
void vstrs(SRegister sd, Address ad, Condition cond=AL)
void Lsl(Register rd, Register rm, Register rs, Condition cond=AL)
static bool CanEncodeBranchDistance(int32_t distance)
void veorq(QRegister qd, QRegister qn, QRegister qm)
void vcmps(SRegister sd, SRegister sm, Condition cond=AL)
void TryAllocateObject(intptr_t cid, intptr_t instance_size, Label *failure, JumpDistance distance, Register instance_reg, Register temp_reg) override
void SmiUntag(Register dst, Register src, Condition cond=AL)
void Jump(Label *label, JumpDistance distance=kFarJump)
void StoreFieldToOffset(Register reg, Register base, int32_t offset, OperandSize type=kFourBytes) override
void bics(Register rd, Register rn, Operand o, Condition cond=AL)
void CompareObjectRegisters(Register rn, Register rm)
void LoadAcquire(Register dst, const Address &address, OperandSize size=kFourBytes) override
void Pop(Register rd, Condition cond=AL)
void vcmpsz(SRegister sd, Condition cond=AL)
void vcmpd(DRegister dd, DRegister dm, Condition cond=AL)
void vaddd(DRegister dd, DRegister dn, DRegister dm, Condition cond=AL)
void LoadFromStack(Register dst, intptr_t depth)
static int32_t BkptEncoding(uint16_t imm16)
void LoadAllocationTracingStateAddress(Register dest, intptr_t cid)
void vabss(SRegister sd, SRegister sm, Condition cond=AL)
void orrs(Register rd, Register rn, Operand o, Condition cond=AL)
void vldms(BlockAddressMode am, Register base, SRegister first, SRegister last, Condition cond=AL)
void vmlss(SRegister sd, SRegister sn, SRegister sm, Condition cond=AL)
void EnterDartFrame(intptr_t frame_size, bool load_pool_pointer=true)
void ubfx(Register rd, Register rn, int32_t lsb, int32_t width, Condition cond=AL)
bool vmovd(DRegister dd, double d_imm, Condition cond=AL)
void TestImmediate(Register rn, int32_t imm, Condition cond=AL)
void CompareImmediate(Register rn, int32_t value, Condition cond)
void CompareWithMemoryValue(Register value, Address address, OperandSize size=kFourBytes) override
void PushRegister(Register r)
void LoadMemoryValue(Register dst, Register base, int32_t offset)
void Ror(Register rd, Register rm, Register rs, Condition cond=AL)
void vstrd(DRegister dd, Address ad, Condition cond=AL)
void vmstat(Condition cond=AL)
void ExtractClassIdFromTags(Register result, Register tags, Condition cond=AL)
void PushQuad(FpuRegister rd, Condition cond=AL)
void b(Label *label, Condition cond=AL)
void vcvtid(SRegister sd, DRegister dm, Condition cond=AL)
void ExtendAndSmiTagValue(Register rd, Register rm, OperandSize sz=kFourBytes) override
void umlal(Register rd_lo, Register rd_hi, Register rn, Register rm, Condition cond=AL)
void Lsl(Register rd, Register rm, const Operand &shift_imm, Condition cond=AL)
void vmlas(SRegister sd, SRegister sn, SRegister sm, Condition cond=AL)
void set_constant_pool_allowed(bool b)
void vmovd(DRegister dd, DRegister dm, Condition cond=AL)
void LoadDImmediate(DRegister dd, double value, Register scratch, Condition cond=AL)
void vcmpdz(DRegister dd, Condition cond=AL)
void vmovrs(Register rt, SRegister sn, Condition cond=AL)
void PushRegisters(const RegisterSet &regs)
void IntegerDivide(Register result, Register left, Register right, DRegister tmpl, DRegister tmpr)
void vdivd(DRegister dd, DRegister dn, DRegister dm, Condition cond=AL)
void LslImmediate(Register rd, Register rn, int32_t shift)
void MoveRegister(Register rd, Register rm) override
void SmiTag(Register dst, Register src, Condition cond=AL)
void Lsr(Register rd, Register rm, const Operand &shift_imm, Condition cond=AL)
void vcvtdi(DRegister dd, SRegister sm, Condition cond=AL)
void StoreObjectIntoObjectNoBarrier(Register object, const Address &dest, const Object &value, MemoryOrder memory_order=kRelaxedNonAtomic, OperandSize size=kWordBytes) override
void LoadClassById(Register result, Register class_id)
void StoreWordToPoolIndex(Register value, intptr_t index, Register pp=PP, Condition cond=AL)
void BranchLinkWithEquivalence(const Code &code, const Object &equivalence, CodeEntryKind entry_kind=CodeEntryKind::kNormal)
void PushValueAtOffset(Register base, int32_t offset)
void StoreWordUnaligned(Register src, Register addr, Register tmp)
void adds(Register rd, Register rn, Operand o, Condition cond=AL)
void vmovs(SRegister sd, SRegister sm, Condition cond=AL)
void Vdivqs(QRegister qd, QRegister qn, QRegister qm)
void PopRegisters(const RegisterSet &regs)
void muls(Register rd, Register rn, Register rm, Condition cond=AL)
void AddImmediateSetFlags(Register rd, Register rn, int32_t value, Condition cond=AL)
void cmn(Register rn, Operand o, Condition cond=AL)
void Push(Register rd, Condition cond=AL)
void Ror(Register rd, Register rm, const Operand &shift_imm, Condition cond=AL)
void mla(Register rd, Register rn, Register rm, Register ra, Condition cond=AL)
void LsrImmediate(Register rd, int32_t shift) override
void str(Register rd, Address ad, Condition cond=AL)
void OrImmediate(Register rd, int32_t imm, Condition cond=AL)
void BranchLinkOffset(Register base, int32_t offset)
void mov(Register rd, Operand o, Condition cond=AL)
void SetReturnAddress(Register value)
void vceqqs(QRegister qd, QRegister qn, QRegister qm)
void TransitionNativeToGenerated(Register scratch0, Register scratch1, bool exit_safepoint, bool ignore_unwind_in_progress=false)
void LoadPoolPointer(Register reg=PP)
void vsubs(SRegister sd, SRegister sn, SRegister sm, Condition cond=AL)
void tst(Register rn, Operand o, Condition cond=AL)
void LoadInt32FromBoxOrSmi(Register result, Register value) override
void vcgeqs(QRegister qd, QRegister qn, QRegister qm)
void BranchIf(Condition condition, Label *label, JumpDistance distance=kFarJump)
void EnterOsrFrame(intptr_t extra_size)
void vcvtsi(SRegister sd, SRegister sm, Condition cond=AL)
void vshlqu(OperandSize sz, QRegister qd, QRegister qm, QRegister qn)
void stm(BlockAddressMode am, Register base, RegList regs, Condition cond=AL)
void CompareObject(Register rn, const Object &object)
void vzipqw(QRegister qd, QRegister qm)
void vnegqs(QRegister qd, QRegister qm)
void Asr(Register rd, Register rm, const Operand &shift_imm, Condition cond=AL)
void LoadElementAddressForIntIndex(Register address, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, Register array, intptr_t index)
void Store(Register reg, const Address &address, OperandSize type=kFourBytes) override
void vmovsr(SRegister sn, Register rt, Condition cond=AL)
void vminqs(QRegister qd, QRegister qn, QRegister qm)
void PushObject(const Object &object)
void AndRegisters(Register dst, Register src1, Register src2=kNoRegister) override
void PopQuad(FpuRegister rd, Condition cond=AL)
void mvn_(Register rd, Operand o, Condition cond=AL)
void vtbl(DRegister dd, DRegister dn, int length, DRegister dm)
void LoadMultipleDFromOffset(DRegister first, intptr_t count, Register base, int32_t offset)
void ldm(BlockAddressMode am, Register base, RegList regs, Condition cond=AL)
void Rrx(Register rd, Register rm, Condition cond=AL)
void LoadNativeEntry(Register dst, const ExternalLabel *label, ObjectPoolBuilderEntry::Patchability patchable, Condition cond=AL)
void LsrImmediate(Register rd, Register rn, int32_t shift)
void ldrsh(Register rd, Address ad, Condition cond=AL)
void LoadElementAddressForRegIndex(Register address, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool index_unboxed, Register array, Register index)
void cmp(Register rn, Operand o, Condition cond=AL)
void LoadUnboxedSimd128(FpuRegister dst, Register base, int32_t offset)
void LoadQImmediate(QRegister dd, simd128_value_t value)
void StoreMemoryValue(Register src, Register base, int32_t offset)
void vmuls(SRegister sd, SRegister sn, SRegister sm, Condition cond=AL)
void StoreFieldToOffset(Register reg, Register base, int32_t offset, OperandSize type, Condition cond)
void bic(Register rd, Register rn, Operand o, Condition cond=AL)
Address ElementAddressForRegIndex(bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool index_unboxed, Register array, Register index)
void smlal(Register rd_lo, Register rd_hi, Register rn, Register rm, Condition cond=AL)
Address PrepareLargeLoadOffset(const Address &addr, OperandSize sz, Condition cond)
void StoreToStack(Register src, intptr_t depth)
void vadds(SRegister sd, SRegister sn, SRegister sm, Condition cond=AL)
void InitializeFieldsNoBarrierUnrolled(Register object, Register base, intptr_t begin_offset, intptr_t end_offset, Register value_even, Register value_odd)
void vandq(QRegister qd, QRegister qn, QRegister qm)
void nop(Condition cond=AL)
void vsubqs(QRegister qd, QRegister qn, QRegister qm)
void SubRegisters(Register dest, Register src)
void rbit(Register rd, Register rm, Condition cond=AL)
void StoreToOffset(Register reg, Register base, int32_t offset, OperandSize type, Condition cond)
void StoreInternalPointer(Register object, const Address &dest, Register value)
void vaddqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm)
void MulImmediate(Register reg, int32_t imm, OperandSize width=kFourBytes) override
void AddRegisters(Register dest, Register src)
void rsb(Register rd, Register rn, Operand o, Condition cond=AL)
void Bind(Label *label) override
void ReserveAlignedFrameSpace(intptr_t frame_space)
void vstmd(BlockAddressMode am, Register base, DRegister first, intptr_t count, Condition cond=AL)
void blx(Register rm, Condition cond=AL)
void strd(Register rd, Register rd2, Register rn, int32_t offset, Condition cond=AL)
void vsqrts(SRegister sd, SRegister sm, Condition cond=AL)
void ldrsb(Register rd, Address ad, Condition cond=AL)
void LoadImmediate(Register rd, int32_t value, Condition cond)
void sbc(Register rd, Register rn, Operand o, Condition cond=AL)
void RangeCheck(Register value, Register temp, intptr_t low, intptr_t high, RangeCheckCondition condition, Label *target) override
void Ret(Condition cond=AL)
void vldrd(DRegister dd, Address ad, Condition cond=AL)
void movs(Register rd, Operand o, Condition cond=AL)
void LoadClassId(Register result, Register object, Condition cond=AL)
void sbcs(Register rd, Register rn, Operand o, Condition cond=AL)
void StoreMultipleDToOffset(DRegister first, intptr_t count, Register base, int32_t offset)
void teq(Register rn, Operand o, Condition cond=AL)
void bl(Label *label, Condition cond=AL)
void GenerateUnRelocatedPcRelativeCall(Condition cond=AL, intptr_t offset_into_target=0)
void PushImmediate(int32_t immediate)
void PopRegister(Register r)
void vorrq(QRegister qd, QRegister qn, QRegister qm)
void ArrayStoreBarrier(Register object, Register slot, Register value, CanBeSmi can_be_smi, Register scratch) override
void LslRegister(Register dst, Register shift) override
void strh(Register rd, Address ad, Condition cond=AL)
void add(Register rd, Register rn, Operand o, Condition cond=AL)
void ldrb(Register rd, Address ad, Condition cond=AL)
void StoreRelease(Register src, const Address &address, OperandSize size=kFourBytes) override
void ExitFullSafepoint(Register scratch0, Register scratch1, bool ignore_unwind_in_progress)
void LoadHalfWordUnsignedUnaligned(Register dst, Register addr, Register tmp)
void mul(Register rd, Register rn, Register rm, Condition cond=AL)
void Call(Address target, Condition cond=AL)
void vcvtud(SRegister sd, DRegister dm, Condition cond=AL)
void StoreSToOffset(SRegister reg, Register base, int32_t offset, Condition cond=AL)
void WriteAllocationCanary(Register top)
Address ElementAddressForIntIndex(bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, Register array, intptr_t index, Register temp)
void vcvtds(DRegister dd, SRegister sm, Condition cond=AL)
void vmvnq(QRegister qd, QRegister qm)
void CallRuntime(const RuntimeEntry &entry, intptr_t argument_count)
void ExtractInstanceSizeFromTags(Register result, Register tags)
void LoadObject(Register rd, const Object &object, Condition cond=AL)
void Asrs(Register rd, Register rm, const Operand &shift_imm, Condition cond=AL)
void CompareRegisters(Register rn, Register rm)
void AndImmediate(Register rd, int32_t imm, Condition cond)
void ExtendValue(Register rd, Register rm, OperandSize sz) override
bool vmovs(SRegister sd, float s_imm, Condition cond=AL)
void CompareToStack(Register src, intptr_t depth)
void umull(Register rd_lo, Register rd_hi, Register rn, Register rm, Condition cond=AL)
void vrecpsqs(QRegister qd, QRegister qn, QRegister qm)
void sbfx(Register rd, Register rn, int32_t lsb, int32_t width, Condition cond=AL)
void SubImmediateSetFlags(Register rd, Register rn, int32_t value, Condition cond=AL)
void LeaveFrame(RegList regs, bool allow_pop_pc=false)
void LoadImmediate(Register rd, Immediate value, Condition cond=AL)
void bkpt(uint16_t imm16)
void VreciprocalSqrtqs(QRegister qd, QRegister qm)
void vceqqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm)
void LoadImmediate(Register rd, int32_t value) override
void Vreciprocalqs(QRegister qd, QRegister qm)
void AddImmediate(Register rd, Register rn, int32_t value, Condition cond=AL)
void AndImmediate(Register rd, Register rs, int32_t imm, Condition cond=AL)
void adcs(Register rd, Register rn, Operand o, Condition cond=AL)
void vcugeqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm)
void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond=AL)
void vsqrtd(DRegister dd, DRegister dm, Condition cond=AL)
void vmuld(DRegister dd, DRegister dn, DRegister dm, Condition cond=AL)
void LoadFromOffset(Register reg, Register base, int32_t offset, OperandSize type=kFourBytes) override
void vrsqrtsqs(QRegister qd, QRegister qn, QRegister qm)
void Jump(const Address &address)
void vnegs(SRegister sd, SRegister sm, Condition cond=AL)
void vsubqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm)
void vcvtsd(SRegister sd, DRegister dm, Condition cond=AL)
void adc(Register rd, Register rn, Operand o, Condition cond=AL)
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool *needs_base=nullptr)
void LoadWordFromPoolIndex(Register rd, intptr_t index, Register pp=PP, Condition cond=AL)
void strb(Register rd, Address ad, Condition cond=AL)
void MaybeTraceAllocation(intptr_t cid, Label *trace, Register temp_reg, JumpDistance distance=JumpDistance::kFarJump)
void CallCFunction(Register target, Condition cond=AL)
void vdivs(SRegister sd, SRegister sn, SRegister sm, Condition cond=AL)
void BranchLinkPatchable(const Code &code, CodeEntryKind entry_kind=CodeEntryKind::kNormal, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
void vmulqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm)
void vcgeqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm)
static bool IsSafeSmi(const Object &object)
void vcgtqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm)
void TransitionGeneratedToNative(Register destination_address, Register exit_frame_fp, Register exit_through_ffi, Register scratch0, bool enter_safepoint)
void and_(Register rd, Register rn, Operand o, Condition cond=AL)
void vabsqs(QRegister qd, QRegister qm)
void SubImmediate(Register rd, Register rn, int32_t value, Condition cond=AL)
void vcvtis(SRegister sd, SRegister sm, Condition cond=AL)
void Drop(intptr_t stack_elements)
void vabsd(DRegister dd, DRegister dm, Condition cond=AL)
void StoreUnboxedSimd128(FpuRegister src, Register base, int32_t offset)
void EnterCFrame(intptr_t frame_space)
void InitializeFieldsNoBarrier(Register object, Register begin, Register end, Register value_even, Register value_odd)
void StoreBarrier(Register object, Register value, CanBeSmi can_be_smi, Register scratch) override
compiler::LRState lr_state() const
void vcugtqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm)
void SmiTag(Register reg, Condition cond)
void AddScaled(Register dest, Register src, ScaleFactor scale, int32_t value)
void AndImmediate(Register rd, int32_t imm) override
void vmovq(QRegister qd, QRegister qm)
void vldrs(SRegister sd, Address ad, Condition cond=AL)
void vmlsd(DRegister dd, DRegister dn, DRegister dm, Condition cond=AL)
void ands(Register rd, Register rn, Operand o, Condition cond=AL)
void VerifyStoreNeedsNoWriteBarrier(Register object, Register value) override
void udiv(Register rd, Register rn, Register rm, Condition cond=AL)
void CompareWords(Register reg1, Register reg2, intptr_t offset, Register count, Register temp, Label *equals) override
void SmiUntag(Register dst, Register src, Label *is_smi)
void ExtendValue(Register rd, Register rm, OperandSize sz, Condition cond)
void clz(Register rd, Register rm, Condition cond=AL)
void BranchIfNotSmi(Register reg, Label *label, JumpDistance distance=kFarJump)
void MoveAndSmiTagRegister(Register rd, Register rm) override
void ldrh(Register rd, Address ad, Condition cond=AL)
void vmulqs(QRegister qd, QRegister qn, QRegister qm)
void vdup(OperandSize sz, QRegister qd, DRegister dm, int idx)
void vmrs(Register rd, Condition cond=AL)
void CallCFunction(Address target)
void vldmd(BlockAddressMode am, Register base, DRegister first, intptr_t count, Condition cond=AL)
void set_lr_state(compiler::LRState b)
void subs(Register rd, Register rn, Operand o, Condition cond=AL)
void vshlqi(OperandSize sz, QRegister qd, QRegister qm, QRegister qn)
static bool IsSafe(const Object &object)
void BranchIfBit(Register rn, intptr_t bit_number, Condition condition, Label *label, JumpDistance distance=kFarJump)
void OrImmediate(Register rd, Register rs, int32_t imm, Condition cond=AL)
void StoreIntoSmiField(const Address &dest, Register value)
void LoadFromOffset(Register reg, Register base, int32_t offset, OperandSize type, Condition cond)
void Vsqrtqs(QRegister qd, QRegister qm, QRegister temp)
void Jump(Register target)
void sdiv(Register rd, Register rn, Register rm, Condition cond=AL)
void mvns(Register rd, Operand o, Condition cond=AL)
void ldrd(Register rd, Register rd2, Register rn, int32_t offset, Condition cond=AL)
void LoadClassIdMayBeSmi(Register result, Register object)
void LoadSImmediate(SRegister sd, float value, Condition cond=AL)
void ldrex(Register rd, Register rn, Condition cond=AL)
void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset)
void LoadIsolateGroup(Register dst)
void LoadFieldAddressForRegOffset(Register address, Register instance, Register offset_in_words_as_smi) override
void AndImmediateSetFlags(Register rd, Register rn, int32_t value, Condition cond=AL)
void BranchOnMonomorphicCheckedEntryJIT(Label *label)
void Store(Register reg, const Address &address, OperandSize type, Condition cond)
void SmiUntag(Register reg, Condition cond=AL)
void MoveRegister(Register rd, Register rm, Condition cond)
void sub(Register rd, Register rn, Operand o, Condition cond=AL)
void vnegd(DRegister dd, DRegister dm, Condition cond=AL)
void Emit(int32_t value)
void LoadFieldAddressForOffset(Register address, Register instance, int32_t offset) override
void EmitEntryFrameVerification(Register scratch)
void TryAllocateArray(intptr_t cid, intptr_t instance_size, Label *failure, Register instance, Register end_address, Register temp1, Register temp2)
void CompareImmediate(Register rn, int32_t value, OperandSize width=kFourBytes) override
void EnterFrame(RegList regs, intptr_t frame_space)
void StoreToOffset(Register reg, Register base, int32_t offset, OperandSize type=kFourBytes) override
void vsubd(DRegister dd, DRegister dn, DRegister dm, Condition cond=AL)
void vrecpeqs(QRegister qd, QRegister qm)
void vmovdr(DRegister dd, int i, Register rt, Condition cond=AL)
void MaybeTraceAllocation(Register cid, Label *trace, Register temp_reg, JumpDistance distance=JumpDistance::kFarJump)
void vrsqrteqs(QRegister qd, QRegister qm)
void Asr(Register rd, Register rm, Register rs, Condition cond=AL)
void FinalizeHashForSize(intptr_t bit_size, Register dst, Register scratch=TMP) override
void strex(Register rd, Register rt, Register rn, Condition cond=AL)
void EnsureHasClassIdInDEBUG(intptr_t cid, Register src, Register scratch, bool can_be_null=false) override
void ExtendAndSmiTagValue(Register rd, Register rm, OperandSize sz, Condition cond)
void LslImmediate(Register rd, int32_t shift)
void rsc(Register rd, Register rn, Operand o, Condition cond=AL)
void LoadIndexedPayload(Register dst, Register base, int32_t payload_start, Register index, ScaleFactor scale, OperandSize type=kFourBytes) override
void StoreDToOffset(DRegister reg, Register base, int32_t offset, Condition cond=AL)
void AddImmediate(Register rd, int32_t value, Condition cond=AL)
void rsbs(Register rd, Register rn, Operand o, Condition cond=AL)
void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond=AL)
void MarkExceptionHandler(Label *label)
void vstms(BlockAddressMode am, Register base, SRegister first, SRegister last, Condition cond=AL)
void LoadDecodableImmediate(Register rd, int32_t value, Condition cond=AL)
void Branch(const Address &address, Condition cond=AL)
void EnterFullSafepoint(Register scratch0, Register scratch1)
static int32_t EncodeBranchOffset(int32_t offset, int32_t inst)
void CopyMemoryWords(Register src, Register dst, Register size, Register temp)
void LoadHalfWordUnaligned(Register dst, Register addr, Register tmp)
void umaal(Register rd_lo, Register rd_hi, Register rn, Register rm)
void StoreZero(const Address &address, Register temp)
Address PrepareLargeStoreOffset(const Address &addr, OperandSize sz, Condition cond)
void MoveAndSmiTagRegister(Register rd, Register rm, Condition cond)
void SmiTag(Register reg) override
void Lsr(Register rd, Register rm, Register rs, Condition cond=AL)
void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond=AL)
void ArithmeticShiftRightImmediate(Register reg, intptr_t shift) override
FieldAddress & operator=(const FieldAddress &other)
FieldAddress(Register base, Register r)
FieldAddress(const FieldAddress &other)
FieldAddress(Register base, int32_t disp)
Operand(uint32_t immediate)
Operand(uint32_t rotate, uint32_t immed8)
static bool CanHold(uint32_t immediate, Operand *o)
Operand(const Operand &other)
Operand(Register rm, Shift shift, Register rs)
Operand & operator=(const Operand &other)
Operand(Register rm, Shift shift, uint32_t shift_imm)
#define UNIMPLEMENTED
static const char * begin(const StringSlice &s)
Definition editor.cpp:252
#define ASSERT(E)
VkInstance instance
Definition main.cc:48
static bool b
glong glong end
uint8_t value
GAsyncResult * result
uint32_t * target
int argument_count
Definition fuchsia.cc:52
size_t length
bool IsSmi(int64_t v)
constexpr OperandSize kWordBytes
static DRegister EvenDRegisterOf(QRegister q)
static constexpr intptr_t kAllocationCanary
Definition globals.h:181
uint16_t RegList
BlockAddressMode
@ kHeapObjectTag
uintptr_t uword
Definition globals.h:501
@ kNoRegister
const Register TMP
constexpr intptr_t kBitsPerInt32
Definition globals.h:466
@ kShiftImmShift
@ kConditionShift
@ kRotateBits
@ kShiftImmBits
@ kImmed8Shift
@ kImmed8Bits
@ kRotateShift
@ kShiftRegisterShift
@ kShiftShift
@ kOpcodeShift
const intptr_t cid
const Register PP
#define DISALLOW_ALLOCATION()
Definition globals.h:604
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition globals.h:581
int32_t width
const Scalar scale
Point offset
Definition SkMD5.cpp:130
Definition SkMD5.cpp:134