Flutter Engine
The Flutter Engine
assembler_arm.h
Go to the documentation of this file.
1// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM_H_
6#define RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM_H_
7
8#if defined(DART_PRECOMPILED_RUNTIME)
9#error "AOT runtime should not use compiler sources (including header files)"
10#endif // defined(DART_PRECOMPILED_RUNTIME)
11
12#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_H_
13#error Do not include assembler_arm.h directly; use assembler.h instead.
14#endif
15
16#include <functional>
17
18#include "platform/assert.h"
19#include "platform/utils.h"
20#include "vm/code_entry_kind.h"
24#include "vm/constants.h"
25#include "vm/cpu.h"
26#include "vm/hash_map.h"
27#include "vm/simulator.h"
28
29namespace dart {
30
31// Forward declarations.
32class FlowGraphCompiler;
33class RegisterSet;
34class RuntimeEntry;
35
36// Load/store multiple addressing mode.
38 // bit encoding P U W
39 DA = (0 | 0 | 0) << 21, // decrement after
40 IA = (0 | 4 | 0) << 21, // increment after
41 DB = (8 | 0 | 0) << 21, // decrement before
42 IB = (8 | 4 | 0) << 21, // increment before
43 DA_W = (0 | 0 | 1) << 21, // decrement after with writeback to base
44 IA_W = (0 | 4 | 1) << 21, // increment after with writeback to base
45 DB_W = (8 | 0 | 1) << 21, // decrement before with writeback to base
46 IB_W = (8 | 4 | 1) << 21 // increment before with writeback to base
47};
48
49namespace compiler {
50
51class Immediate : public ValueObject {
52 public:
53 explicit Immediate(int32_t value) : value_(value) {}
54
55 int32_t value() const { return value_; }
56
57 private:
58 const int32_t value_;
59};
60
61// Instruction encoding bits.
62enum {
63 H = 1 << 5, // halfword (or byte)
64 L = 1 << 20, // load (or store)
65 S = 1 << 20, // set condition code (or leave unchanged)
66 W = 1 << 21, // writeback base register (or leave unchanged)
67 A = 1 << 21, // accumulate in multiply instruction (or not)
68 B = 1 << 22, // unsigned byte (or word)
69 D = 1 << 22, // high/lo bit of start of s/d register range
70 N = 1 << 22, // long (or short)
71 U = 1 << 23, // positive (or negative) offset/index
72 P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
73 I = 1 << 25, // immediate shifter operand (or not)
74
75 B0 = 1,
76 B1 = 1 << 1,
77 B2 = 1 << 2,
78 B3 = 1 << 3,
79 B4 = 1 << 4,
80 B5 = 1 << 5,
81 B6 = 1 << 6,
82 B7 = 1 << 7,
83 B8 = 1 << 8,
84 B9 = 1 << 9,
85 B10 = 1 << 10,
86 B11 = 1 << 11,
87 B12 = 1 << 12,
88 B13 = 1 << 13,
89 B14 = 1 << 14,
90 B15 = 1 << 15,
91 B16 = 1 << 16,
92 B17 = 1 << 17,
93 B18 = 1 << 18,
94 B19 = 1 << 19,
95 B20 = 1 << 20,
96 B21 = 1 << 21,
97 B22 = 1 << 22,
98 B23 = 1 << 23,
99 B24 = 1 << 24,
100 B25 = 1 << 25,
101 B26 = 1 << 26,
102 B27 = 1 << 27,
103};
104
105class ArmEncode : public AllStatic {
106 public:
107 static inline uint32_t Rd(Register rd) {
108 ASSERT(rd < 16);
109 return static_cast<uint32_t>(rd) << kRdShift;
110 }
111
112 static inline uint32_t Rm(Register rm) {
113 ASSERT(rm < 16);
114 return static_cast<uint32_t>(rm) << kRmShift;
115 }
116
117 static inline uint32_t Rn(Register rn) {
118 ASSERT(rn < 16);
119 return static_cast<uint32_t>(rn) << kRnShift;
120 }
121
122 static inline uint32_t Rs(Register rs) {
123 ASSERT(rs < 16);
124 return static_cast<uint32_t>(rs) << kRsShift;
125 }
126};
127
128// Encodes Addressing Mode 1 - Data-processing operands.
129class Operand : public ValueObject {
130 public:
131 // Data-processing operands - Uninitialized.
132 Operand() : type_(-1), encoding_(-1) {}
133
134 // Data-processing operands - Copy constructor.
135 Operand(const Operand& other)
136 : ValueObject(), type_(other.type_), encoding_(other.encoding_) {}
137
138 // Data-processing operands - Assignment operator.
139 Operand& operator=(const Operand& other) {
140 type_ = other.type_;
141 encoding_ = other.encoding_;
142 return *this;
143 }
144
145 // Data-processing operands - Immediate.
146 explicit Operand(uint32_t immediate) {
147 ASSERT(immediate < (1 << kImmed8Bits));
148 type_ = 1;
149 encoding_ = immediate;
150 }
151
152 // Data-processing operands - Rotated immediate.
153 Operand(uint32_t rotate, uint32_t immed8) {
154 ASSERT((rotate < (1 << kRotateBits)) && (immed8 < (1 << kImmed8Bits)));
155 type_ = 1;
156 encoding_ = (rotate << kRotateShift) | (immed8 << kImmed8Shift);
157 }
158
159 // Data-processing operands - Register.
160 explicit Operand(Register rm) {
161 type_ = 0;
162 encoding_ = static_cast<uint32_t>(rm);
163 }
164
165 // Data-processing operands - Logical shift/rotate by immediate.
166 Operand(Register rm, Shift shift, uint32_t shift_imm) {
167 ASSERT(shift_imm < (1 << kShiftImmBits));
168 type_ = 0;
169 encoding_ = shift_imm << kShiftImmShift |
170 static_cast<uint32_t>(shift) << kShiftShift |
171 static_cast<uint32_t>(rm);
172 }
173
174 // Data-processing operands - Logical shift/rotate by register.
176 type_ = 0;
177 encoding_ = static_cast<uint32_t>(rs) << kShiftRegisterShift |
178 static_cast<uint32_t>(shift) << kShiftShift | (1 << 4) |
179 static_cast<uint32_t>(rm);
180 }
181
182 static bool CanHold(uint32_t immediate, Operand* o) {
183 // Avoid the more expensive test for frequent small immediate values.
184 if (immediate < (1 << kImmed8Bits)) {
185 o->type_ = 1;
186 o->encoding_ = (0 << kRotateShift) | (immediate << kImmed8Shift);
187 return true;
188 }
189 // Note that immediate must be unsigned for the test to work correctly.
190 for (int rot = 0; rot < 16; rot++) {
191 uint32_t imm8 = Utils::RotateLeft(immediate, 2 * rot);
192 if (imm8 < (1 << kImmed8Bits)) {
193 o->type_ = 1;
194 o->encoding_ = (rot << kRotateShift) | (imm8 << kImmed8Shift);
195 return true;
196 }
197 }
198 return false;
199 }
200
201 private:
202 bool is_valid() const { return (type_ == 0) || (type_ == 1); }
203
204 uint32_t type() const {
205 ASSERT(is_valid());
206 return type_;
207 }
208
209 uint32_t encoding() const {
210 ASSERT(is_valid());
211 return encoding_;
212 }
213
214 uint32_t type_; // Encodes the type field (bits 27-25) in the instruction.
215 uint32_t encoding_;
216
217 friend class Assembler;
218 friend class Address;
219};
220
221class Address : public ValueObject {
222 public:
227 };
228
229 // Memory operand addressing mode
230 enum Mode {
231 kModeMask = (8 | 4 | 1) << 21,
232 // bit encoding P U W
233 Offset = (8 | 4 | 0) << 21, // offset (w/o writeback to base)
234 PreIndex = (8 | 4 | 1) << 21, // pre-indexed addressing with writeback
235 PostIndex = (0 | 4 | 0) << 21, // post-indexed addressing with writeback
236 NegOffset = (8 | 0 | 0) << 21, // negative offset (w/o writeback to base)
237 NegPreIndex = (8 | 0 | 1) << 21, // negative pre-indexed with writeback
238 NegPostIndex = (0 | 0 | 0) << 21 // negative post-indexed with writeback
239 };
240
241 Address(const Address& other)
242 : ValueObject(),
243 encoding_(other.encoding_),
244 kind_(other.kind_),
245 base_(other.base_),
246 offset_(other.offset_) {}
247
248 Address& operator=(const Address& other) {
249 encoding_ = other.encoding_;
250 kind_ = other.kind_;
251 base_ = other.base_;
252 offset_ = other.offset_;
253 return *this;
254 }
255
256 bool Equals(const Address& other) const {
257 return (encoding_ == other.encoding_) && (kind_ == other.kind_);
258 }
259
260 explicit Address(Register rn, int32_t offset = 0, Mode am = Offset) {
261 kind_ = Immediate;
262 base_ = rn;
263 offset_ = offset;
264 // If the offset can't be encoded in fewer bits, then it'll conflict with
265 // the encoding of the mode and we won't be able to retrieve it later.
267 if (offset < 0) {
268 encoding_ = (am ^ (1 << kUShift)) | -offset; // Flip U to adjust sign.
269 } else {
270 encoding_ = am | offset;
271 }
272 encoding_ |= ArmEncode::Rn(rn);
273 }
274
275 // There is no register offset mode unless Mode is Offset, in which case the
276 // shifted register case below should be used.
278
280 Register rm,
281 Shift shift = LSL,
282 uint32_t shift_imm = 0,
283 Mode am = Offset) {
284 Operand o(rm, shift, shift_imm);
285
286 if ((shift == LSL) && (shift_imm == 0)) {
287 kind_ = IndexRegister;
288 } else {
289 kind_ = ScaledIndexRegister;
290 }
291 encoding_ = o.encoding() | am | ArmEncode::Rn(rn);
292 }
293
294 // There is no shifted register mode with a register shift.
296
298
300 int32_t offset,
301 int32_t* offset_mask);
303 int32_t offset,
304 int32_t* offset_mask);
305 static bool CanHoldImmediateOffset(bool is_load,
306 intptr_t cid,
307 int64_t offset);
308
309 private:
310 Register rn() const {
311 return Instr::At(reinterpret_cast<uword>(&encoding_))->RnField();
312 }
313
314 Register rm() const {
315 return ((kind() == IndexRegister) || (kind() == ScaledIndexRegister))
316 ? Instr::At(reinterpret_cast<uword>(&encoding_))->RmField()
317 : kNoRegister;
318 }
319
320 Mode mode() const { return static_cast<Mode>(encoding_ & kModeMask); }
321
322 bool has_writeback() const {
323 return (mode() == PreIndex) || (mode() == PostIndex) ||
324 (mode() == NegPreIndex) || (mode() == NegPostIndex);
325 }
326
327 static bool has_writeback(BlockAddressMode am) {
328 switch (am) {
329 case DA:
330 case IA:
331 case DB:
332 case IB:
333 return false;
334 case DA_W:
335 case IA_W:
336 case DB_W:
337 case IB_W:
338 return true;
339 default:
340 UNREACHABLE();
341 return false;
342 }
343 }
344
345 uint32_t encoding() const {
346 ASSERT_IMPLIES(kind_ == Immediate, Utils::MagnitudeIsUint(12, offset_));
347 return encoding_;
348 }
349
350 // Encoding for addressing mode 3.
351 uint32_t encoding3() const;
352
353 // Encoding for vfp load/store addressing.
354 uint32_t vencoding() const;
355
356 OffsetKind kind() const { return kind_; }
357 Register base() const { return base_; }
358 int32_t offset() const { return offset_; }
359
360 uint32_t encoding_;
361
362 OffsetKind kind_;
363 Register base_ = kNoRegister;
364 int32_t offset_ = 0;
365
366 friend class Assembler;
367};
368
369class FieldAddress : public Address {
370 public:
372 : Address(base, disp - kHeapObjectTag) {}
373
374 // This addressing mode does not exist.
376
377 FieldAddress(const FieldAddress& other) : Address(other) {}
378
380 Address::operator=(other);
381 return *this;
382 }
383};
384
385class Assembler : public AssemblerBase {
386 public:
388 intptr_t far_branch_level = 0);
390
391 void PushRegister(Register r) { Push(r); }
392 void PopRegister(Register r) { Pop(r); }
393
394 // Push two registers to the stack; r0 to lower address location.
396 if ((r0 < r1) && (r0 != SP) && (r1 != SP)) {
397 RegList reg_list = (1 << r0) | (1 << r1);
398 PushList(reg_list);
399 } else {
400 PushRegister(r1);
401 PushRegister(r0);
402 }
403 }
404
405 // Pop two registers from the stack; r0 from lower address location.
407 if ((r0 < r1) && (r0 != SP) && (r1 != SP)) {
408 RegList reg_list = (1 << r0) | (1 << r1);
409 PopList(reg_list);
410 } else {
411 PopRegister(r0);
412 PopRegister(r1);
413 }
414 }
415
417
418 void Bind(Label* label) override;
419 // Unconditional jump to a given label. [distance] is ignored on ARM.
420 void Jump(Label* label, JumpDistance distance = kFarJump) { b(label); }
421 // Unconditional jump to a given address in register.
423 // Unconditional jump to a given address in memory.
424 void Jump(const Address& address) { Branch(address); }
425
428 }
431 }
433 const Address& address,
434 OperandSize size = kFourBytes) override {
435 Load(dst, address, size);
436 dmb();
437 }
439 const Address& address,
440 OperandSize size = kFourBytes) override {
441 dmb();
442 Store(src, address, size);
443 }
444
446 Address address,
447 OperandSize size = kFourBytes) override {
449 Load(TMP, address, size);
450 cmp(value, Operand(TMP));
451 }
452
453 // Misc. functionality
454 bool use_far_branches() const {
455 return FLAG_use_far_branches || use_far_branches_;
456 }
457
458#if defined(TESTING) || defined(DEBUG)
459 // Used in unit tests and to ensure predictable verification code size in
460 // FlowGraphCompiler::EmitEdgeCounter.
461 void set_use_far_branches(bool b) { use_far_branches_ = b; }
462#endif // TESTING || DEBUG
463
464 // Debugging and bringup support.
465 void Breakpoint() override { bkpt(0); }
466
467 // Data-processing instructions.
468 void and_(Register rd, Register rn, Operand o, Condition cond = AL);
469 void ands(Register rd, Register rn, Operand o, Condition cond = AL);
470
471 void eor(Register rd, Register rn, Operand o, Condition cond = AL);
472
473 void sub(Register rd, Register rn, Operand o, Condition cond = AL);
474 void subs(Register rd, Register rn, Operand o, Condition cond = AL);
475
476 void rsb(Register rd, Register rn, Operand o, Condition cond = AL);
477 void rsbs(Register rd, Register rn, Operand o, Condition cond = AL);
478
479 void add(Register rd, Register rn, Operand o, Condition cond = AL);
480
481 void adds(Register rd, Register rn, Operand o, Condition cond = AL);
482
483 void adc(Register rd, Register rn, Operand o, Condition cond = AL);
484
485 void adcs(Register rd, Register rn, Operand o, Condition cond = AL);
486
487 void sbc(Register rd, Register rn, Operand o, Condition cond = AL);
488
489 void sbcs(Register rd, Register rn, Operand o, Condition cond = AL);
490
491 void rsc(Register rd, Register rn, Operand o, Condition cond = AL);
492
493 void tst(Register rn, Operand o, Condition cond = AL);
494
495 void teq(Register rn, Operand o, Condition cond = AL);
496
497 void cmp(Register rn, Operand o, Condition cond = AL);
498
499 void cmn(Register rn, Operand o, Condition cond = AL);
500
501 void orr(Register rd, Register rn, Operand o, Condition cond = AL);
502 void orrs(Register rd, Register rn, Operand o, Condition cond = AL);
503
504 void mov(Register rd, Operand o, Condition cond = AL);
505 void movs(Register rd, Operand o, Condition cond = AL);
506
507 void bic(Register rd, Register rn, Operand o, Condition cond = AL);
508 void bics(Register rd, Register rn, Operand o, Condition cond = AL);
509
510 void mvn_(Register rd, Operand o, Condition cond = AL);
511 void mvns(Register rd, Operand o, Condition cond = AL);
512
513 // Miscellaneous data-processing instructions.
514 void clz(Register rd, Register rm, Condition cond = AL);
515 void rbit(Register rd, Register rm, Condition cond = AL);
516
517 // Multiply instructions.
518 void mul(Register rd, Register rn, Register rm, Condition cond = AL);
519 void muls(Register rd, Register rn, Register rm, Condition cond = AL);
520 void mla(Register rd,
521 Register rn,
522 Register rm,
523 Register ra,
524 Condition cond = AL);
525 void mls(Register rd,
526 Register rn,
527 Register rm,
528 Register ra,
529 Condition cond = AL);
530 void smull(Register rd_lo,
531 Register rd_hi,
532 Register rn,
533 Register rm,
534 Condition cond = AL);
535 void umull(Register rd_lo,
536 Register rd_hi,
537 Register rn,
538 Register rm,
539 Condition cond = AL);
540 void smlal(Register rd_lo,
541 Register rd_hi,
542 Register rn,
543 Register rm,
544 Condition cond = AL);
545 void umlal(Register rd_lo,
546 Register rd_hi,
547 Register rn,
548 Register rm,
549 Condition cond = AL);
550
551 // Emulation of this instruction uses IP and the condition codes. Therefore,
552 // none of the registers can be IP, and the instruction can only be used
553 // unconditionally.
554 void umaal(Register rd_lo, Register rd_hi, Register rn, Register rm);
555
556 // Division instructions.
557 void sdiv(Register rd, Register rn, Register rm, Condition cond = AL);
558 void udiv(Register rd, Register rn, Register rm, Condition cond = AL);
559
560 // Load/store instructions.
561 void ldr(Register rd, Address ad, Condition cond = AL);
562 void str(Register rd, Address ad, Condition cond = AL);
563
564 void ldrb(Register rd, Address ad, Condition cond = AL);
565 void strb(Register rd, Address ad, Condition cond = AL);
566
567 void ldrh(Register rd, Address ad, Condition cond = AL);
568 void strh(Register rd, Address ad, Condition cond = AL);
569
570 void ldrsb(Register rd, Address ad, Condition cond = AL);
571 void ldrsh(Register rd, Address ad, Condition cond = AL);
572
573 // ldrd and strd actually support the full range of addressing modes, but
574 // we don't use them, so we only support the base + offset mode.
575 // rd must be an even register and rd2 must be rd + 1.
576 void ldrd(Register rd,
577 Register rd2,
578 Register rn,
579 int32_t offset,
580 Condition cond = AL);
581 void strd(Register rd,
582 Register rd2,
583 Register rn,
584 int32_t offset,
585 Condition cond = AL);
586
589 RegList regs,
590 Condition cond = AL);
593 RegList regs,
594 Condition cond = AL);
595
596 void ldrex(Register rd, Register rn, Condition cond = AL);
597 void strex(Register rd, Register rt, Register rn, Condition cond = AL);
598
599 void dmb();
600
601 // Media instructions.
602 void sbfx(Register rd,
603 Register rn,
604 int32_t lsb,
605 int32_t width,
606 Condition cond = AL);
607 void ubfx(Register rd,
608 Register rn,
609 int32_t lsb,
610 int32_t width,
611 Condition cond = AL);
612
613 // Emit code to transition between generated and native modes.
614 //
615 // These require that CSP and SP are equal and aligned and require two scratch
616 // registers (in addition to TMP).
617 void TransitionGeneratedToNative(Register destination_address,
618 Register exit_frame_fp,
619 Register exit_through_ffi,
620 Register scratch0,
621 bool enter_safepoint);
623 Register scratch1,
624 bool exit_safepoint,
625 bool ignore_unwind_in_progress = false,
626 bool set_tag = true);
627 void EnterFullSafepoint(Register scratch0, Register scratch1);
629 Register scratch1,
630 bool ignore_unwind_in_progress);
631
632 // Miscellaneous instructions.
633 void clrex();
634 void nop(Condition cond = AL);
635
636 // Note that gdb sets breakpoints using the undefined instruction 0xe7f001f0.
637 void bkpt(uint16_t imm16);
638
639 static int32_t BkptEncoding(uint16_t imm16) {
640 // bkpt requires that the cond field is AL.
641 return (AL << kConditionShift) | B24 | B21 | ((imm16 >> 4) << 8) | B6 | B5 |
642 B4 | (imm16 & 0xf);
643 }
644
645 // Floating point instructions (VFPv3-D16 and VFPv3-D32 profiles).
646 void vmovsr(SRegister sn, Register rt, Condition cond = AL);
647 void vmovrs(Register rt, SRegister sn, Condition cond = AL);
648 void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond = AL);
649 void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond = AL);
650 void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond = AL);
651 void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond = AL);
652 void vmovdr(DRegister dd, int i, Register rt, Condition cond = AL);
653 void vmovs(SRegister sd, SRegister sm, Condition cond = AL);
654 void vmovd(DRegister dd, DRegister dm, Condition cond = AL);
656
657 // Returns false if the immediate cannot be encoded.
658 bool vmovs(SRegister sd, float s_imm, Condition cond = AL);
659 bool vmovd(DRegister dd, double d_imm, Condition cond = AL);
660
661 void vldrs(SRegister sd, Address ad, Condition cond = AL);
662 void vstrs(SRegister sd, Address ad, Condition cond = AL);
663 void vldrd(DRegister dd, Address ad, Condition cond = AL);
664 void vstrd(DRegister dd, Address ad, Condition cond = AL);
665
668 SRegister first,
669 SRegister last,
670 Condition cond = AL);
673 SRegister first,
674 SRegister last,
675 Condition cond = AL);
676
679 DRegister first,
680 intptr_t count,
681 Condition cond = AL);
684 DRegister first,
685 intptr_t count,
686 Condition cond = AL);
687
688 void vadds(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
689 void vaddd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
692 void vsubs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
693 void vsubd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
696 void vmuls(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
697 void vmuld(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
702 void vmlas(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
703 void vmlad(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
704 void vmlss(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
705 void vmlsd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
706 void vdivs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
707 void vdivd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
714
720
729
730 void vabss(SRegister sd, SRegister sm, Condition cond = AL);
731 void vabsd(DRegister dd, DRegister dm, Condition cond = AL);
733 void vnegs(SRegister sd, SRegister sm, Condition cond = AL);
734 void vnegd(DRegister dd, DRegister dm, Condition cond = AL);
736 void vsqrts(SRegister sd, SRegister sm, Condition cond = AL);
737 void vsqrtd(DRegister dd, DRegister dm, Condition cond = AL);
738
739 void vcvtsd(SRegister sd, DRegister dm, Condition cond = AL);
740 void vcvtds(DRegister dd, SRegister sm, Condition cond = AL);
741 void vcvtis(SRegister sd, SRegister sm, Condition cond = AL);
742 void vcvtid(SRegister sd, DRegister dm, Condition cond = AL);
743 void vcvtsi(SRegister sd, SRegister sm, Condition cond = AL);
744 void vcvtdi(DRegister dd, SRegister sm, Condition cond = AL);
745 void vcvtus(SRegister sd, SRegister sm, Condition cond = AL);
746 void vcvtud(SRegister sd, DRegister dm, Condition cond = AL);
747 void vcvtsu(SRegister sd, SRegister sm, Condition cond = AL);
748 void vcvtdu(DRegister dd, SRegister sm, Condition cond = AL);
749
750 void vcmps(SRegister sd, SRegister sm, Condition cond = AL);
751 void vcmpd(DRegister dd, DRegister dm, Condition cond = AL);
752 void vcmpsz(SRegister sd, Condition cond = AL);
753 void vcmpdz(DRegister dd, Condition cond = AL);
754 void vmrs(Register rd, Condition cond = AL);
755 void vmstat(Condition cond = AL);
756
757 // Duplicates the operand of size sz at index idx from dm to all elements of
758 // qd. This is a special case of vtbl.
759 void vdup(OperandSize sz, QRegister qd, DRegister dm, int idx);
760
761 // Each byte of dm is an index into the table of bytes formed by concatenating
762 // a list of 'length' registers starting with dn. The result is placed in dd.
763 void vtbl(DRegister dd, DRegister dn, int length, DRegister dm);
764
765 // The words of qd and qm are interleaved with the low words of the result
766 // in qd and the high words in qm.
768
769 // Branch instructions.
770 void b(Label* label, Condition cond = AL);
771 void bl(Label* label, Condition cond = AL);
772 void bx(Register rm, Condition cond = AL);
773 void blx(Register rm, Condition cond = AL);
774
775 void Branch(const Address& address, Condition cond = AL);
776
777 void BranchLink(const Code& code,
783
784 // Branch and link to an entry address. Call sequence can be patched.
786 const Code& code,
790
791 // Emit a call that shares its object pool entries with other calls
792 // that have the same equivalence marker.
794 const Code& code,
795 const Object& equivalence,
797
798 // Branch and link to [base + offset]. Call sequence is never patched.
800
802 // CLOBBERS_LR uses __ to access the assembler.
803#define __ this->
804 CLOBBERS_LR({
805 ldr(LR, target, cond);
806 blx(LR, cond);
807 });
808#undef __
809 }
810 void Call(const Code& code) { BranchLink(code); }
811
813
815 blx(target, cond);
816 }
817
818 // Add signed immediate value to rd. May clobber IP.
819 void AddImmediate(Register rd, int32_t value, Condition cond = AL) {
820 AddImmediate(rd, rd, value, cond);
821 }
822
823 // Add signed immediate value. May clobber IP.
825 Register rn,
826 int32_t value,
827 Condition cond = AL);
829 Register rn,
830 int32_t value,
831 Condition cond = AL);
833 add(dest, dest, Operand(src));
834 }
837 Register index,
839 int32_t disp) override {
840 if (base == kNoRegister) {
841 if (scale == TIMES_1) {
842 AddImmediate(dest, index, disp);
843 } else {
844 Lsl(dest, index, Operand(scale));
845 AddImmediate(dest, disp);
846 }
847 } else {
849 AddImmediate(dest, disp);
850 }
851 }
853 Register rn,
854 int32_t value,
855 Condition cond = AL);
857 Register rn,
858 int32_t value,
859 Condition cond = AL);
861 sub(dest, dest, Operand(src));
862 }
864 int32_t imm,
865 OperandSize width = kFourBytes) override {
867 if (Utils::IsPowerOfTwo(imm)) {
869 } else {
870 LoadImmediate(TMP, imm);
871 mul(reg, reg, TMP);
872 }
873 }
874 void AndImmediate(Register rd, Register rs, int32_t imm, Condition cond = AL);
875 void AndImmediate(Register rd, int32_t imm, Condition cond) {
876 AndImmediate(rd, rd, imm, cond);
877 }
878 void AndImmediate(Register rd, int32_t imm) override {
879 AndImmediate(rd, rd, imm, AL);
880 }
882 Register rn,
883 int32_t value,
884 Condition cond = AL);
886 Register src1,
887 Register src2 = kNoRegister) override {
888 ASSERT(src1 != src2); // Likely a mistake.
889 if (src2 == kNoRegister) {
890 src2 = dst;
891 }
892 and_(dst, src2, Operand(src1));
893 }
894 void OrImmediate(Register rd, Register rs, int32_t imm, Condition cond = AL);
895 void OrImmediate(Register rd, int32_t imm, Condition cond = AL) {
896 OrImmediate(rd, rd, imm, cond);
897 }
898 void LslImmediate(Register rd, Register rn, int32_t shift) {
899 ASSERT((shift >= 0) && (shift < kBitsPerInt32));
900 Lsl(rd, rn, Operand(shift));
901 }
902 void LslImmediate(Register rd, int32_t shift) { LslImmediate(rd, rd, shift); }
903 void LslRegister(Register dst, Register shift) override {
904 Lsl(dst, dst, shift);
905 }
906 void LsrImmediate(Register rd, Register rn, int32_t shift) {
907 ASSERT((shift >= 0) && (shift < kBitsPerInt32));
908 Lsr(rd, rn, Operand(shift));
909 }
910 void LsrImmediate(Register rd, int32_t shift) override {
911 LsrImmediate(rd, rd, shift);
912 }
913
914 // Test rn and immediate. May clobber IP.
915 void TestImmediate(Register rn, int32_t imm, Condition cond = AL);
916
917 // Compare rn with signed immediate value. May clobber IP.
918 void CompareImmediate(Register rn, int32_t value, Condition cond);
920 int32_t value,
921 OperandSize width = kFourBytes) override {
924 }
925
926 // Signed integer division of left by right. Checks to see if integer
927 // division is supported. If not, uses the FPU for division with
928 // temporary registers tmpl and tmpr. tmpl and tmpr must be different
929 // registers.
931 Register left,
932 Register right,
933 DRegister tmpl,
934 DRegister tmpr);
935
936 // Load and Store.
937 // These three do not clobber IP.
941 void LoadImmediate(Register rd, int32_t value, Condition cond);
942 void LoadImmediate(Register rd, int32_t value) override {
943 LoadImmediate(rd, value, AL);
944 }
945 // These two may clobber IP.
946 void LoadSImmediate(SRegister sd, float value, Condition cond = AL);
948 double value,
949 Register scratch,
950 Condition cond = AL);
952
954
955 void Drop(intptr_t stack_elements);
956
960
963
964 // Load word from pool from the given index using encoding that
965 // InstructionPattern::DecodeLoadWordFromPool can decode.
967 intptr_t index,
968 Register pp = PP,
969 Condition cond = AL);
970 // Store word to pool at the given offset.
971 //
972 // Note: clobbers TMP.
974 intptr_t index,
975 Register pp = PP,
976 Condition cond = AL);
977
978 void LoadObject(Register rd, const Object& object, Condition cond = AL);
980 Register rd,
981 const Object& object,
982 Condition cond = AL,
986 const ExternalLabel* label,
988 Condition cond = AL);
989 void PushObject(const Object& object);
990 void PushImmediate(int32_t immediate) {
991 LoadImmediate(TMP, immediate);
992 Push(TMP);
993 }
994 void CompareObject(Register rn, const Object& object);
995
997 Register object,
998 const Address& dest,
999 const Object& value,
1000 MemoryOrder memory_order = kRelaxedNonAtomic,
1001 OperandSize size = kWordBytes) override;
1002
1005 CanBeSmi can_be_smi,
1006 Register scratch) override;
1008 Register slot,
1010 CanBeSmi can_be_smi,
1011 Register scratch) override;
1013
1014 // Stores a non-tagged value into a heap object.
1016 const Address& dest,
1017 Register value);
1018
1019 // Store value_even, value_odd, value_even, ... into the words in the address
1020 // range [begin, end), assumed to be uninitialized fields in object (tagged).
1021 // The stores must not need a generational store barrier (e.g., smi/null),
1022 // and (value_even, value_odd) must be a valid register pair.
1023 // Destroys register 'begin'.
1026 Register end,
1027 Register value_even,
1028 Register value_odd);
1029 // Like above, for the range [base+begin_offset, base+end_offset), unrolled.
1031 Register base,
1032 intptr_t begin_offset,
1033 intptr_t end_offset,
1034 Register value_even,
1035 Register value_odd);
1036
1037 // Stores a Smi value into a heap object field that always contains a Smi.
1039
1041 Register tags,
1042 Condition cond = AL);
1044
1046 Register temp,
1047 intptr_t low,
1048 intptr_t high,
1049 RangeCheckCondition condition,
1050 Label* target) override;
1051
1054 void CompareClassId(Register object, intptr_t class_id, Register scratch);
1058 Register src,
1059 Register scratch,
1060 bool can_be_null = false) override;
1061
1062 bool CanLoadFromObjectPool(const Object& object) const;
1063
1065 OperandSize sz,
1066 Condition cond);
1068 OperandSize sz,
1069 Condition cond);
1070
1071 void Load(Register reg,
1072 const Address& address,
1074 Condition cond);
1075 void Load(Register reg,
1076 const Address& address,
1077 OperandSize type = kFourBytes) override {
1078 Load(reg, address, type, AL);
1079 }
1081 Register base,
1082 int32_t offset,
1083 OperandSize type = kFourBytes) override {
1084 LoadFromOffset(reg, base, offset, type, AL);
1085 }
1087 Register base,
1088 int32_t offset,
1090 Condition cond) {
1091 Load(reg, Address(base, offset), type, cond);
1092 }
1094 Register base,
1095 int32_t offset,
1096 OperandSize type = kFourBytes) override {
1098 }
1100 Register base,
1101 int32_t offset,
1103 Condition cond) {
1104 Load(reg, FieldAddress(base, offset), type, cond);
1105 }
1106 // For loading indexed payloads out of tagged objects like Arrays. If the
1107 // payload objects are word-sized, use TIMES_HALF_WORD_SIZE if the contents of
1108 // [index] is a Smi, otherwise TIMES_WORD_SIZE if unboxed.
1110 Register base,
1111 int32_t payload_start,
1112 Register index,
1114 OperandSize type = kFourBytes) override {
1115 add(dst, base, Operand(index, LSL, scale));
1116 LoadFromOffset(dst, dst, payload_start - kHeapObjectTag, type);
1117 }
1118 void LoadFromStack(Register dst, intptr_t depth);
1119 void StoreToStack(Register src, intptr_t depth);
1120 void CompareToStack(Register src, intptr_t depth);
1121
1122 void Store(Register reg,
1123 const Address& address,
1125 Condition cond);
1126 void Store(Register reg,
1127 const Address& address,
1128 OperandSize type = kFourBytes) override {
1129 Store(reg, address, type, AL);
1130 }
1132 Register base,
1133 int32_t offset,
1134 OperandSize type = kFourBytes) override {
1135 StoreToOffset(reg, base, offset, type, AL);
1136 }
1138 Register base,
1139 int32_t offset,
1141 Condition cond) {
1142 Store(reg, Address(base, offset), type, cond);
1143 }
1145 Register base,
1146 int32_t offset,
1147 OperandSize type = kFourBytes) override {
1149 }
1151 Register base,
1152 int32_t offset,
1154 Condition cond) {
1155 Store(reg, FieldAddress(base, offset), type, cond);
1156 }
1157 void StoreZero(const Address& address, Register temp) {
1158 mov(temp, Operand(0));
1159 Store(temp, address);
1160 }
1162 Register base,
1163 int32_t offset,
1164 Condition cond = AL);
1166 Register base,
1167 int32_t offset,
1168 Condition cond = AL);
1170 Register base,
1171 int32_t offset,
1172 Condition cond = AL);
1174 Register base,
1175 int32_t offset,
1176 Condition cond = AL);
1177
1180 }
1183 }
1185 if (src != dst) {
1187 }
1188 }
1189
1191 intptr_t count,
1192 Register base,
1193 int32_t offset);
1195 intptr_t count,
1196 Register base,
1197 int32_t offset);
1198
1201 }
1204 }
1206 if (src != dst) {
1207 vmovq(dst, src);
1208 }
1209 }
1210
1211 void Push(Register rd, Condition cond = AL);
1212 void Pop(Register rd, Condition cond = AL);
1213
1214 void PushList(RegList regs, Condition cond = AL);
1215 void PopList(RegList regs, Condition cond = AL);
1216
1219
1220 void PushRegisters(const RegisterSet& regs);
1221 void PopRegisters(const RegisterSet& regs);
1222
1223 void PushRegistersInOrder(std::initializer_list<Register> regs);
1224
1225 // Push all registers which are callee-saved according to the ARM ABI.
1227
1228 // Pop all registers which are callee-saved according to the ARM ABI.
1230
1231 void CompareRegisters(Register rn, Register rm) { cmp(rn, Operand(rm)); }
1233 CompareRegisters(rn, rm);
1234 }
1235 // Branches to the given label if the condition holds.
1236 // [distance] is ignored on ARM.
1237 void BranchIf(Condition condition,
1238 Label* label,
1240 b(label, condition);
1241 }
1243 Label* label,
1245 cmp(rn, Operand(0));
1246 b(label, ZERO);
1247 }
1249 intptr_t bit_number,
1250 Condition condition,
1251 Label* label,
1253 tst(rn, Operand(1 << bit_number));
1254 b(label, condition);
1255 }
1256
1258 ExtendValue(rd, rm, kFourBytes, cond);
1259 }
1260 void MoveRegister(Register rd, Register rm) override {
1261 MoveRegister(rd, rm, AL);
1262 }
1264 ExtendAndSmiTagValue(rd, rm, kFourBytes, cond);
1265 }
1267 MoveAndSmiTagRegister(rd, rm, AL);
1268 }
1270 void ExtendValue(Register rd, Register rm, OperandSize sz) override {
1271 ExtendValue(rd, rm, sz, AL);
1272 }
1274 Register rm,
1275 OperandSize sz,
1276 Condition cond) {
1277 ExtendValue(rd, rm, sz, cond);
1278 SmiTag(rd, cond);
1279 }
1281 Register rm,
1282 OperandSize sz = kFourBytes) override {
1283 ExtendAndSmiTagValue(rd, rm, sz, AL);
1284 }
1285
1286 // Convenience shift instructions. Use mov instruction with shifter operand
1287 // for variants setting the status flags.
1288 void Lsl(Register rd,
1289 Register rm,
1290 const Operand& shift_imm,
1291 Condition cond = AL);
1292 void Lsl(Register rd, Register rm, Register rs, Condition cond = AL);
1293 void Lsr(Register rd,
1294 Register rm,
1295 const Operand& shift_imm,
1296 Condition cond = AL);
1297 void Lsr(Register rd, Register rm, Register rs, Condition cond = AL);
1298 void Asr(Register rd,
1299 Register rm,
1300 const Operand& shift_imm,
1301 Condition cond = AL);
1302 void Asr(Register rd, Register rm, Register rs, Condition cond = AL);
1304 Register rm,
1305 const Operand& shift_imm,
1306 Condition cond = AL);
1307 void Ror(Register rd,
1308 Register rm,
1309 const Operand& shift_imm,
1310 Condition cond = AL);
1311 void Ror(Register rd, Register rm, Register rs, Condition cond = AL);
1312 void Rrx(Register rd, Register rm, Condition cond = AL);
1313
1314 // Fill rd with the sign of rm.
1315 void SignFill(Register rd, Register rm, Condition cond = AL);
1316
1319 // If qm must be preserved, then provide a (non-QTMP) temporary.
1322
1323 void SmiTag(Register reg, Condition cond) { SmiTag(reg, reg, cond); }
1324 void SmiTag(Register reg) override { SmiTag(reg, AL); }
1325
1327 Lsl(dst, src, Operand(kSmiTagSize), cond);
1328 }
1329
1330 void SmiUntag(Register reg, Condition cond = AL) { SmiUntag(reg, reg, cond); }
1331
1333 Asr(dst, src, Operand(kSmiTagSize), cond);
1334 }
1335
1336 // Untag the value in the register assuming it is a smi.
1337 // Untagging shifts tag bit into the carry flag - if carry is clear
1338 // assumption was correct. In this case jump to the is_smi label.
1339 // Otherwise fall-through.
1341 ASSERT(kSmiTagSize == 1);
1343 b(is_smi, CC);
1344 }
1345
1346 // For ARM, the near argument is ignored.
1348 Label* label,
1350 tst(reg, Operand(kSmiTagMask));
1351 b(label, NE);
1352 }
1353
1354 // Truncates upper bits.
1356 if (result == value) {
1357 ASSERT(TMP != value);
1359 value = TMP;
1360 }
1361 ASSERT(value != result);
1365 Bind(&done);
1366 }
1367
1368 // For ARM, the near argument is ignored.
1370 Label* label,
1371 JumpDistance distance = kFarJump) override {
1372 tst(reg, Operand(kSmiTagMask));
1373 b(label, EQ);
1374 }
1375
1377
1378 void ArithmeticShiftRightImmediate(Register reg, intptr_t shift) override;
1380 Register reg2,
1381 intptr_t offset,
1383 Register temp,
1384 Label* equals) override;
1385
1386 // Function frame setup and tear down.
1387 void EnterFrame(RegList regs, intptr_t frame_space);
1388 void LeaveFrame(RegList regs, bool allow_pop_pc = false);
1389 void Ret(Condition cond = AL);
1390
1391 // Sets the return address to [value] as if there was a call.
1392 // On ARM sets LR.
1394
1395 void ReserveAlignedFrameSpace(intptr_t frame_space);
1396
1397 // In debug mode, this generates code to check that:
1398 // FP + kExitLinkSlotFromEntryFp == SP
1399 // or triggers breakpoint otherwise.
1400 //
1401 // Requires a scratch register in addition to the assembler temporary.
1403
1404 // For non-leaf runtime calls. For leaf runtime calls, use LeafRuntimeScope,
1405 void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count);
1406
1407 // Set up a Dart frame on entry with a frame pointer and PC information to
1408 // enable easy access to the RawInstruction object of code corresponding
1409 // to this frame.
1410 void EnterDartFrame(intptr_t frame_size, bool load_pool_pointer = true);
1411
1413
1414 // Leaves the frame and returns.
1415 //
1416 // The difference to "LeaveDartFrame(); Ret();" is that we return using
1417 //
1418 // ldmia sp!, {fp, pc}
1419 //
1420 // instead of
1421 //
1422 // ldmia sp!, {fp, lr}
1423 // blx lr
1424 //
1425 // This means that our return must go to ARM mode (and not thumb).
1427
1428 // Set up a Dart frame for a function compiled for on-stack replacement.
1429 // The frame layout is a normal Dart frame, but the frame is partially set
1430 // up on entry (it is the frame of the unoptimized code).
1431 void EnterOsrFrame(intptr_t extra_size);
1432
1433 // Set up a stub frame so that the stack traversal code can easily identify
1434 // a stub frame.
1437
1438 // Set up a frame for calling a C function.
1439 // Automatically save the pinned registers in Dart which are not callee-
1440 // saved in the native calling convention.
1441 // Use together with CallCFunction.
1442 void EnterCFrame(intptr_t frame_space);
1444
1448
1449 void CombineHashes(Register dst, Register other) override;
1450 void FinalizeHashForSize(intptr_t bit_size,
1451 Register dst,
1452 Register scratch = TMP) override;
1453
1454 // The register into which the allocation tracing state table is loaded with
1455 // LoadAllocationTracingStateAddress should be passed to MaybeTraceAllocation.
1456 //
1457 // These are separate assembler macros so we can avoid a dependent load too
1458 // nearby the load of the table address.
1461
1462 // If true is returned, then the out parameter [need_base] signifies whether
1463 // a register is needed for storing the array base (which should be passed
1464 // as the [temp] parameter to ElementAddressForIntIndex).
1465 static bool AddressCanHoldConstantIndex(const Object& constant,
1466 bool is_load,
1467 bool is_external,
1468 intptr_t cid,
1469 intptr_t index_scale,
1470 bool* needs_base = nullptr);
1471
1473 bool is_external,
1474 intptr_t cid,
1475 intptr_t index_scale,
1476 Register array,
1477 intptr_t index,
1478 Register temp);
1479
1481 bool is_load,
1482 bool is_external,
1483 intptr_t cid,
1484 intptr_t index_scale,
1485 Register array,
1486 intptr_t index);
1487
1489 bool is_external,
1490 intptr_t cid,
1491 intptr_t index_scale,
1492 bool index_unboxed,
1493 Register array,
1494 Register index);
1495
1497 bool is_load,
1498 bool is_external,
1499 intptr_t cid,
1500 intptr_t index_scale,
1501 bool index_unboxed,
1502 Register array,
1503 Register index);
1504
1506 Register field,
1507 Register scratch,
1508 bool is_shared);
1509
1512 Register offset_in_words_as_smi) override;
1513
1516 int32_t offset) override {
1518 }
1519
1525
1526 // If allocation tracing is enabled, will jump to |trace| label,
1527 // which will allocate in the runtime where tracing occurs.
1528 void MaybeTraceAllocation(Register stats_addr_reg, Label* trace);
1529
1530 // If allocation tracing for |cid| is enabled, will jump to |trace| label,
1531 // which will allocate in the runtime where tracing occurs.
1533 Label* trace,
1534 Register temp_reg,
1535 JumpDistance distance = JumpDistance::kFarJump);
1536
1538 Label* trace,
1539 Register temp_reg,
1540 JumpDistance distance = JumpDistance::kFarJump);
1541
1542 void TryAllocateObject(intptr_t cid,
1543 intptr_t instance_size,
1544 Label* failure,
1546 Register instance_reg,
1547 Register temp_reg) override;
1548
1549 void TryAllocateArray(intptr_t cid,
1550 intptr_t instance_size,
1551 Label* failure,
1553 Register end_address,
1554 Register temp1,
1555 Register temp2);
1556
1558#if defined(DEBUG)
1559 Label okay;
1560 ldr(tmp, Address(top, 0));
1562 b(&okay, EQUAL);
1563 Stop("Allocation canary");
1564 Bind(&okay);
1565#endif
1566 }
1568#if defined(DEBUG)
1569 ASSERT(top != TMP);
1571 str(TMP, Address(top, 0));
1572#endif
1573 }
1574
1575 // Copy [size] bytes from [src] address to [dst] address.
1576 // [size] should be a multiple of word size.
1577 // Clobbers [src], [dst], [size] and [temp] registers.
1579 Register dst,
1580 Register size,
1581 Register temp);
1582
1583 // This emits an PC-relative call of the form "blr.<cond> <offset>". The
1584 // offset is not yet known and needs therefore relocation to the right place
1585 // before the code can be used.
1586 //
1587 // The necessary information for the "linker" (i.e. the relocation
1588 // information) is stored in [UntaggedCode::static_calls_target_table_]: an
1589 // entry of the form
1590 //
1591 // (Code::kPcRelativeCall & pc_offset, <target-code>, <target-function>)
1592 //
1593 // will be used during relocation to fix the offset.
1594 //
1595 // The provided [offset_into_target] will be added to calculate the final
1596 // destination. It can be used e.g. for calling into the middle of a
1597 // function.
1599 intptr_t offset_into_target = 0);
1600
1601 // This emits an PC-relative tail call of the form "b.<cond> <offset>".
1602 //
1603 // See also above for the pc-relative call.
1605 intptr_t offset_into_target = 0);
1606
1607 // Emit data (e.g encoded instruction or immediate) in instruction stream.
1608 void Emit(int32_t value);
1609
1610 // On some other platforms, we draw a distinction between safe and unsafe
1611 // smis.
1612 static bool IsSafe(const Object& object) { return true; }
1613 static bool IsSafeSmi(const Object& object) { return target::IsSmi(object); }
1614
1615 bool constant_pool_allowed() const { return constant_pool_allowed_; }
1616 void set_constant_pool_allowed(bool b) { constant_pool_allowed_ = b; }
1617
1618 compiler::LRState lr_state() const { return lr_state_; }
1619 void set_lr_state(compiler::LRState b) { lr_state_ = b; }
1620
1621 // Whether we can branch to a target which is [distance] bytes away from the
1622 // beginning of the branch instruction.
1623 //
1624 // Use this function for testing whether [distance] can be encoded using the
1625 // 24-bit offsets in the branch instructions, which are multiples of 4.
1626 static bool CanEncodeBranchDistance(int32_t distance) {
1628 // The distance is off by 8 due to the way the ARM CPUs read PC.
1630 distance >>= 2;
1631 return Utils::IsInt(24, distance);
1632 }
1633
1634 static int32_t EncodeBranchOffset(int32_t offset, int32_t inst);
1635 static int32_t DecodeBranchOffset(int32_t inst);
1636
1637 private:
1638 bool use_far_branches_;
1639
1640 bool constant_pool_allowed_;
1641
1642 compiler::LRState lr_state_ = compiler::LRState::OnEntry();
1643
1644 // If you are thinking of using one or both of these instructions directly,
1645 // instead LoadImmediate should probably be used.
1646 void movw(Register rd, uint16_t imm16, Condition cond = AL);
1647 void movt(Register rd, uint16_t imm16, Condition cond = AL);
1648
1649 void BindARMv7(Label* label);
1650
1651 void BranchLink(const ExternalLabel* label);
1652 void BranchLink(intptr_t target_code_pool_index, CodeEntryKind entry_kind);
1653
1654 void LoadObjectHelper(
1655 Register rd,
1656 const Object& object,
1657 Condition cond,
1658 bool is_unique,
1659 Register pp,
1662
1663 void EmitType01(Condition cond,
1664 int type,
1665 Opcode opcode,
1666 int set_cc,
1667 Register rn,
1668 Register rd,
1669 Operand o);
1670
1671 void EmitType5(Condition cond, int32_t offset, bool link);
1672
1673 void EmitMemOp(Condition cond, bool load, bool byte, Register rd, Address ad);
1674
1675 void EmitMemOpAddressMode3(Condition cond,
1676 int32_t mode,
1677 Register rd,
1678 Address ad);
1679
1680 void EmitMultiMemOp(Condition cond,
1682 bool load,
1683 Register base,
1684 RegList regs);
1685
1686 void EmitShiftImmediate(Condition cond,
1687 Shift opcode,
1688 Register rd,
1689 Register rm,
1690 Operand o);
1691
1692 void EmitShiftRegister(Condition cond,
1693 Shift opcode,
1694 Register rd,
1695 Register rm,
1696 Operand o);
1697
1698 void EmitMulOp(Condition cond,
1699 int32_t opcode,
1700 Register rd,
1701 Register rn,
1702 Register rm,
1703 Register rs);
1704
1705 void EmitDivOp(Condition cond,
1706 int32_t opcode,
1707 Register rd,
1708 Register rn,
1709 Register rm);
1710
1711 void EmitMultiVSMemOp(Condition cond,
1713 bool load,
1714 Register base,
1715 SRegister start,
1716 uint32_t count);
1717
1718 void EmitMultiVDMemOp(Condition cond,
1720 bool load,
1721 Register base,
1722 DRegister start,
1723 int32_t count);
1724
1725 void EmitVFPsss(Condition cond,
1726 int32_t opcode,
1727 SRegister sd,
1728 SRegister sn,
1729 SRegister sm);
1730
1731 void EmitVFPddd(Condition cond,
1732 int32_t opcode,
1733 DRegister dd,
1734 DRegister dn,
1735 DRegister dm);
1736
1737 void EmitVFPsd(Condition cond, int32_t opcode, SRegister sd, DRegister dm);
1738
1739 void EmitVFPds(Condition cond, int32_t opcode, DRegister dd, SRegister sm);
1740
1741 void EmitSIMDqqq(int32_t opcode,
1742 OperandSize sz,
1743 QRegister qd,
1744 QRegister qn,
1745 QRegister qm);
1746
1747 void EmitSIMDddd(int32_t opcode,
1748 OperandSize sz,
1749 DRegister dd,
1750 DRegister dn,
1751 DRegister dm);
1752
1753 void EmitFarBranch(Condition cond, int32_t offset, bool link);
1754 void EmitBranch(Condition cond, Label* label, bool link);
1755 void BailoutIfInvalidBranchOffset(int32_t offset);
1756 int32_t EncodeTstOffset(int32_t offset, int32_t inst);
1757 int32_t DecodeTstOffset(int32_t inst);
1758
1761 generate_invoke_write_barrier_wrapper_;
1762 std::function<void(Condition)> generate_invoke_array_write_barrier_;
1763
1764 DISALLOW_ALLOCATION();
1765 DISALLOW_COPY_AND_ASSIGN(Assembler);
1766};
1767
1768} // namespace compiler
1769} // namespace dart
1770
1771#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM_H_
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition: DM.cpp:263
int count
Definition: FontMgrTest.cpp:50
static bool rotate(const SkDCubic &cubic, int zero, int index, SkDCubic &rotPath)
bool equals(SkDrawable *a, SkDrawable *b)
SI T load(const P *ptr)
Definition: Transform_inl.h:98
#define UNREACHABLE()
Definition: assert.h:248
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
#define ASSERT_IMPLIES(antecedent, consequent)
Definition: assert.h:317
GLenum type
static Instr * At(uword pc)
Register RnField() const
Register RmField() const
static bool IsInt(intptr_t N, T value)
Definition: utils.h:313
static bool MagnitudeIsUint(intptr_t N, T value)
Definition: utils.h:352
static constexpr int ShiftForPowerOfTwo(T x)
Definition: utils.h:81
static T RotateLeft(T value, uint8_t rotate)
Definition: utils.h:474
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition: utils.h:92
static constexpr bool IsPowerOfTwo(T x)
Definition: utils.h:76
Address(Register rn, Register r, Mode am)
Address(Register rn, Register rm, Shift shift, Register r, Mode am=Offset)
Address & operator=(const Address &other)
static OperandSize OperandSizeFor(intptr_t cid)
static bool CanHoldStoreOffset(OperandSize size, int32_t offset, int32_t *offset_mask)
Address(Register rn, Register rm, Shift shift=LSL, uint32_t shift_imm=0, Mode am=Offset)
static bool CanHoldImmediateOffset(bool is_load, intptr_t cid, int64_t offset)
Address(Register rn, int32_t offset=0, Mode am=Offset)
bool Equals(const Address &other) const
static bool CanHoldLoadOffset(OperandSize size, int32_t offset, int32_t *offset_mask)
Address(const Address &other)
static uint32_t Rs(Register rs)
static uint32_t Rm(Register rm)
static uint32_t Rd(Register rd)
static uint32_t Rn(Register rn)
void Stop(const char *message)
ObjectPoolBuilder & object_pool_builder()
void vaddqs(QRegister qd, QRegister qn, QRegister qm)
void PushRegistersInOrder(std::initializer_list< Register > regs)
void PopRegisterPair(Register r0, Register r1)
void MoveUnboxedDouble(FpuRegister dst, FpuRegister src)
void PopList(RegList regs, Condition cond=AL)
void LoadPatchableImmediate(Register rd, int32_t value, Condition cond=AL)
void smull(Register rd_lo, Register rd_hi, Register rn, Register rm, Condition cond=AL)
void SignFill(Register rd, Register rm, Condition cond=AL)
bool CanLoadFromObjectPool(const Object &object) const
void eor(Register rd, Register rn, Operand o, Condition cond=AL)
void LoadSFromOffset(SRegister reg, Register base, int32_t offset, Condition cond=AL)
void vmlad(DRegister dd, DRegister dn, DRegister dm, Condition cond=AL)
void StoreUnboxedDouble(FpuRegister src, Register base, int32_t offset)
void bx(Register rm, Condition cond=AL)
void MoveUnboxedSimd128(FpuRegister dst, FpuRegister src)
void BranchIfZero(Register rn, Label *label, JumpDistance distance=kFarJump)
void Load(Register reg, const Address &address, OperandSize type=kFourBytes) override
void LoadAllocationTracingStateAddress(Register dest, Register cid)
void CombineHashes(Register dst, Register other) override
void GenerateUnRelocatedPcRelativeTailCall(Condition cond=AL, intptr_t offset_into_target=0)
void orr(Register rd, Register rn, Operand o, Condition cond=AL)
void BranchIfSmi(Register reg, Label *label, JumpDistance distance=kFarJump) override
void LoadTaggedClassIdMayBeSmi(Register result, Register object)
void Call(const Code &code)
void LoadWordUnaligned(Register dst, Register addr, Register tmp)
void BranchLink(const Code &code, ObjectPoolBuilderEntry::Patchability patchable=ObjectPoolBuilderEntry::kNotPatchable, CodeEntryKind entry_kind=CodeEntryKind::kNormal, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
void PushList(RegList regs, Condition cond=AL)
static int32_t DecodeBranchOffset(int32_t inst)
void LoadDFromOffset(DRegister reg, Register base, int32_t offset, Condition cond=AL)
void ldr(Register rd, Address ad, Condition cond=AL)
void vmaxqs(QRegister qd, QRegister qn, QRegister qm)
void vcgtqs(QRegister qd, QRegister qn, QRegister qm)
void Load(Register reg, const Address &address, OperandSize type, Condition cond)
void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond=AL)
void PushRegisterPair(Register r0, Register r1)
void CompareClassId(Register object, intptr_t class_id, Register scratch)
void LoadFieldFromOffset(Register reg, Register base, int32_t offset, OperandSize type, Condition cond)
void LoadIsolate(Register rd)
Assembler(ObjectPoolBuilder *object_pool_builder, intptr_t far_branch_level=0)
void vcvtsu(SRegister sd, SRegister sm, Condition cond=AL)
void vcvtus(SRegister sd, SRegister sm, Condition cond=AL)
void LoadUniqueObject(Register rd, const Object &object, Condition cond=AL, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
void StoreHalfWordUnaligned(Register src, Register addr, Register tmp)
void MaybeTraceAllocation(Register stats_addr_reg, Label *trace)
void LoadFieldFromOffset(Register reg, Register base, int32_t offset, OperandSize type=kFourBytes) override
void CheckAllocationCanary(Register top, Register tmp=TMP)
void vcvtdu(DRegister dd, SRegister sm, Condition cond=AL)
void vornq(QRegister qd, QRegister qn, QRegister qm)
void mls(Register rd, Register rn, Register rm, Register ra, Condition cond=AL)
void vstrs(SRegister sd, Address ad, Condition cond=AL)
void Lsl(Register rd, Register rm, Register rs, Condition cond=AL)
static bool CanEncodeBranchDistance(int32_t distance)
void veorq(QRegister qd, QRegister qn, QRegister qm)
void vcmps(SRegister sd, SRegister sm, Condition cond=AL)
void TryAllocateObject(intptr_t cid, intptr_t instance_size, Label *failure, JumpDistance distance, Register instance_reg, Register temp_reg) override
void SmiUntag(Register dst, Register src, Condition cond=AL)
void Jump(Label *label, JumpDistance distance=kFarJump)
void StoreFieldToOffset(Register reg, Register base, int32_t offset, OperandSize type=kFourBytes) override
void bics(Register rd, Register rn, Operand o, Condition cond=AL)
void CompareObjectRegisters(Register rn, Register rm)
void LoadAcquire(Register dst, const Address &address, OperandSize size=kFourBytes) override
void Pop(Register rd, Condition cond=AL)
void vcmpsz(SRegister sd, Condition cond=AL)
void vcmpd(DRegister dd, DRegister dm, Condition cond=AL)
void vaddd(DRegister dd, DRegister dn, DRegister dm, Condition cond=AL)
void LoadFromStack(Register dst, intptr_t depth)
static int32_t BkptEncoding(uint16_t imm16)
void LoadAllocationTracingStateAddress(Register dest, intptr_t cid)
void vabss(SRegister sd, SRegister sm, Condition cond=AL)
void orrs(Register rd, Register rn, Operand o, Condition cond=AL)
void vldms(BlockAddressMode am, Register base, SRegister first, SRegister last, Condition cond=AL)
void vmlss(SRegister sd, SRegister sn, SRegister sm, Condition cond=AL)
void EnterDartFrame(intptr_t frame_size, bool load_pool_pointer=true)
void ubfx(Register rd, Register rn, int32_t lsb, int32_t width, Condition cond=AL)
bool vmovd(DRegister dd, double d_imm, Condition cond=AL)
void TestImmediate(Register rn, int32_t imm, Condition cond=AL)
void CompareImmediate(Register rn, int32_t value, Condition cond)
void CompareWithMemoryValue(Register value, Address address, OperandSize size=kFourBytes) override
void PushRegister(Register r)
void LoadMemoryValue(Register dst, Register base, int32_t offset)
void Ror(Register rd, Register rm, Register rs, Condition cond=AL)
void vstrd(DRegister dd, Address ad, Condition cond=AL)
void vmstat(Condition cond=AL)
void ExtractClassIdFromTags(Register result, Register tags, Condition cond=AL)
void PushQuad(FpuRegister rd, Condition cond=AL)
void b(Label *label, Condition cond=AL)
void vcvtid(SRegister sd, DRegister dm, Condition cond=AL)
void ExtendAndSmiTagValue(Register rd, Register rm, OperandSize sz=kFourBytes) override
void umlal(Register rd_lo, Register rd_hi, Register rn, Register rm, Condition cond=AL)
void Lsl(Register rd, Register rm, const Operand &shift_imm, Condition cond=AL)
void vmlas(SRegister sd, SRegister sn, SRegister sm, Condition cond=AL)
void set_constant_pool_allowed(bool b)
void vmovd(DRegister dd, DRegister dm, Condition cond=AL)
void LoadDImmediate(DRegister dd, double value, Register scratch, Condition cond=AL)
void vcmpdz(DRegister dd, Condition cond=AL)
void vmovrs(Register rt, SRegister sn, Condition cond=AL)
void PushRegisters(const RegisterSet &regs)
void IntegerDivide(Register result, Register left, Register right, DRegister tmpl, DRegister tmpr)
void vdivd(DRegister dd, DRegister dn, DRegister dm, Condition cond=AL)
void LslImmediate(Register rd, Register rn, int32_t shift)
void MoveRegister(Register rd, Register rm) override
void SmiTag(Register dst, Register src, Condition cond=AL)
void Lsr(Register rd, Register rm, const Operand &shift_imm, Condition cond=AL)
void vcvtdi(DRegister dd, SRegister sm, Condition cond=AL)
void StoreObjectIntoObjectNoBarrier(Register object, const Address &dest, const Object &value, MemoryOrder memory_order=kRelaxedNonAtomic, OperandSize size=kWordBytes) override
void LoadClassById(Register result, Register class_id)
void StoreWordToPoolIndex(Register value, intptr_t index, Register pp=PP, Condition cond=AL)
void BranchLinkWithEquivalence(const Code &code, const Object &equivalence, CodeEntryKind entry_kind=CodeEntryKind::kNormal)
void PushValueAtOffset(Register base, int32_t offset)
void StoreWordUnaligned(Register src, Register addr, Register tmp)
void adds(Register rd, Register rn, Operand o, Condition cond=AL)
void vmovs(SRegister sd, SRegister sm, Condition cond=AL)
void Vdivqs(QRegister qd, QRegister qn, QRegister qm)
void PopRegisters(const RegisterSet &regs)
void muls(Register rd, Register rn, Register rm, Condition cond=AL)
void AddImmediateSetFlags(Register rd, Register rn, int32_t value, Condition cond=AL)
void cmn(Register rn, Operand o, Condition cond=AL)
void Push(Register rd, Condition cond=AL)
void Ror(Register rd, Register rm, const Operand &shift_imm, Condition cond=AL)
void mla(Register rd, Register rn, Register rm, Register ra, Condition cond=AL)
void LsrImmediate(Register rd, int32_t shift) override
void str(Register rd, Address ad, Condition cond=AL)
void OrImmediate(Register rd, int32_t imm, Condition cond=AL)
void BranchLinkOffset(Register base, int32_t offset)
void mov(Register rd, Operand o, Condition cond=AL)
void SetReturnAddress(Register value)
void vceqqs(QRegister qd, QRegister qn, QRegister qm)
void LoadPoolPointer(Register reg=PP)
void vsubs(SRegister sd, SRegister sn, SRegister sm, Condition cond=AL)
void tst(Register rn, Operand o, Condition cond=AL)
void LoadInt32FromBoxOrSmi(Register result, Register value) override
void vcgeqs(QRegister qd, QRegister qn, QRegister qm)
void BranchIf(Condition condition, Label *label, JumpDistance distance=kFarJump)
void EnterOsrFrame(intptr_t extra_size)
void vcvtsi(SRegister sd, SRegister sm, Condition cond=AL)
void vshlqu(OperandSize sz, QRegister qd, QRegister qm, QRegister qn)
void stm(BlockAddressMode am, Register base, RegList regs, Condition cond=AL)
void CompareObject(Register rn, const Object &object)
void vzipqw(QRegister qd, QRegister qm)
void vnegqs(QRegister qd, QRegister qm)
void Asr(Register rd, Register rm, const Operand &shift_imm, Condition cond=AL)
void LoadElementAddressForIntIndex(Register address, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, Register array, intptr_t index)
void Store(Register reg, const Address &address, OperandSize type=kFourBytes) override
void vmovsr(SRegister sn, Register rt, Condition cond=AL)
void vminqs(QRegister qd, QRegister qn, QRegister qm)
void PushObject(const Object &object)
void AndRegisters(Register dst, Register src1, Register src2=kNoRegister) override
void PopQuad(FpuRegister rd, Condition cond=AL)
void mvn_(Register rd, Operand o, Condition cond=AL)
void vtbl(DRegister dd, DRegister dn, int length, DRegister dm)
void LoadMultipleDFromOffset(DRegister first, intptr_t count, Register base, int32_t offset)
void ldm(BlockAddressMode am, Register base, RegList regs, Condition cond=AL)
void Rrx(Register rd, Register rm, Condition cond=AL)
void LoadNativeEntry(Register dst, const ExternalLabel *label, ObjectPoolBuilderEntry::Patchability patchable, Condition cond=AL)
void LsrImmediate(Register rd, Register rn, int32_t shift)
void ldrsh(Register rd, Address ad, Condition cond=AL)
void LoadElementAddressForRegIndex(Register address, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool index_unboxed, Register array, Register index)
void cmp(Register rn, Operand o, Condition cond=AL)
void LoadUnboxedSimd128(FpuRegister dst, Register base, int32_t offset)
void LoadQImmediate(QRegister dd, simd128_value_t value)
void StoreMemoryValue(Register src, Register base, int32_t offset)
void vmuls(SRegister sd, SRegister sn, SRegister sm, Condition cond=AL)
void StoreFieldToOffset(Register reg, Register base, int32_t offset, OperandSize type, Condition cond)
void bic(Register rd, Register rn, Operand o, Condition cond=AL)
Address ElementAddressForRegIndex(bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool index_unboxed, Register array, Register index)
void smlal(Register rd_lo, Register rd_hi, Register rn, Register rm, Condition cond=AL)
Address PrepareLargeLoadOffset(const Address &addr, OperandSize sz, Condition cond)
void StoreToStack(Register src, intptr_t depth)
void vadds(SRegister sd, SRegister sn, SRegister sm, Condition cond=AL)
void InitializeFieldsNoBarrierUnrolled(Register object, Register base, intptr_t begin_offset, intptr_t end_offset, Register value_even, Register value_odd)
void vandq(QRegister qd, QRegister qn, QRegister qm)
void AddScaled(Register dest, Register base, Register index, ScaleFactor scale, int32_t disp) override
void nop(Condition cond=AL)
void LoadStaticFieldAddress(Register address, Register field, Register scratch, bool is_shared)
void vsubqs(QRegister qd, QRegister qn, QRegister qm)
void SubRegisters(Register dest, Register src)
void rbit(Register rd, Register rm, Condition cond=AL)
void StoreToOffset(Register reg, Register base, int32_t offset, OperandSize type, Condition cond)
void StoreInternalPointer(Register object, const Address &dest, Register value)
void vaddqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm)
void MulImmediate(Register reg, int32_t imm, OperandSize width=kFourBytes) override
void AddRegisters(Register dest, Register src)
void rsb(Register rd, Register rn, Operand o, Condition cond=AL)
void Bind(Label *label) override
void ReserveAlignedFrameSpace(intptr_t frame_space)
void vstmd(BlockAddressMode am, Register base, DRegister first, intptr_t count, Condition cond=AL)
void blx(Register rm, Condition cond=AL)
void strd(Register rd, Register rd2, Register rn, int32_t offset, Condition cond=AL)
void vsqrts(SRegister sd, SRegister sm, Condition cond=AL)
void ldrsb(Register rd, Address ad, Condition cond=AL)
void LoadImmediate(Register rd, int32_t value, Condition cond)
void sbc(Register rd, Register rn, Operand o, Condition cond=AL)
void RangeCheck(Register value, Register temp, intptr_t low, intptr_t high, RangeCheckCondition condition, Label *target) override
void Ret(Condition cond=AL)
void vldrd(DRegister dd, Address ad, Condition cond=AL)
void movs(Register rd, Operand o, Condition cond=AL)
void LoadClassId(Register result, Register object, Condition cond=AL)
void sbcs(Register rd, Register rn, Operand o, Condition cond=AL)
void StoreMultipleDToOffset(DRegister first, intptr_t count, Register base, int32_t offset)
void teq(Register rn, Operand o, Condition cond=AL)
void bl(Label *label, Condition cond=AL)
void GenerateUnRelocatedPcRelativeCall(Condition cond=AL, intptr_t offset_into_target=0)
void PushImmediate(int32_t immediate)
bool constant_pool_allowed() const
void PopRegister(Register r)
void vorrq(QRegister qd, QRegister qn, QRegister qm)
void ArrayStoreBarrier(Register object, Register slot, Register value, CanBeSmi can_be_smi, Register scratch) override
void LslRegister(Register dst, Register shift) override
void strh(Register rd, Address ad, Condition cond=AL)
void add(Register rd, Register rn, Operand o, Condition cond=AL)
void ldrb(Register rd, Address ad, Condition cond=AL)
void StoreRelease(Register src, const Address &address, OperandSize size=kFourBytes) override
void ExitFullSafepoint(Register scratch0, Register scratch1, bool ignore_unwind_in_progress)
void LoadHalfWordUnsignedUnaligned(Register dst, Register addr, Register tmp)
void mul(Register rd, Register rn, Register rm, Condition cond=AL)
void Call(Address target, Condition cond=AL)
void vcvtud(SRegister sd, DRegister dm, Condition cond=AL)
void StoreSToOffset(SRegister reg, Register base, int32_t offset, Condition cond=AL)
void WriteAllocationCanary(Register top)
Address ElementAddressForIntIndex(bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, Register array, intptr_t index, Register temp)
void vcvtds(DRegister dd, SRegister sm, Condition cond=AL)
void vmvnq(QRegister qd, QRegister qm)
void CallRuntime(const RuntimeEntry &entry, intptr_t argument_count)
void ExtractInstanceSizeFromTags(Register result, Register tags)
void LoadObject(Register rd, const Object &object, Condition cond=AL)
void Asrs(Register rd, Register rm, const Operand &shift_imm, Condition cond=AL)
void CompareRegisters(Register rn, Register rm)
void AndImmediate(Register rd, int32_t imm, Condition cond)
void ExtendValue(Register rd, Register rm, OperandSize sz) override
bool vmovs(SRegister sd, float s_imm, Condition cond=AL)
void CompareToStack(Register src, intptr_t depth)
void umull(Register rd_lo, Register rd_hi, Register rn, Register rm, Condition cond=AL)
void vrecpsqs(QRegister qd, QRegister qn, QRegister qm)
void sbfx(Register rd, Register rn, int32_t lsb, int32_t width, Condition cond=AL)
void SubImmediateSetFlags(Register rd, Register rn, int32_t value, Condition cond=AL)
void LeaveFrame(RegList regs, bool allow_pop_pc=false)
void LoadImmediate(Register rd, Immediate value, Condition cond=AL)
void bkpt(uint16_t imm16)
void VreciprocalSqrtqs(QRegister qd, QRegister qm)
void vceqqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm)
void LoadImmediate(Register rd, int32_t value) override
void Vreciprocalqs(QRegister qd, QRegister qm)
void AddImmediate(Register rd, Register rn, int32_t value, Condition cond=AL)
void AndImmediate(Register rd, Register rs, int32_t imm, Condition cond=AL)
void adcs(Register rd, Register rn, Operand o, Condition cond=AL)
void vcugeqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm)
void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond=AL)
void vsqrtd(DRegister dd, DRegister dm, Condition cond=AL)
void vmuld(DRegister dd, DRegister dn, DRegister dm, Condition cond=AL)
void LoadFromOffset(Register reg, Register base, int32_t offset, OperandSize type=kFourBytes) override
void vrsqrtsqs(QRegister qd, QRegister qn, QRegister qm)
void Jump(const Address &address)
void vnegs(SRegister sd, SRegister sm, Condition cond=AL)
void vsubqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm)
void vcvtsd(SRegister sd, DRegister dm, Condition cond=AL)
void adc(Register rd, Register rn, Operand o, Condition cond=AL)
static bool AddressCanHoldConstantIndex(const Object &constant, bool is_load, bool is_external, intptr_t cid, intptr_t index_scale, bool *needs_base=nullptr)
void LoadWordFromPoolIndex(Register rd, intptr_t index, Register pp=PP, Condition cond=AL)
void strb(Register rd, Address ad, Condition cond=AL)
void MaybeTraceAllocation(intptr_t cid, Label *trace, Register temp_reg, JumpDistance distance=JumpDistance::kFarJump)
void CallCFunction(Register target, Condition cond=AL)
void vdivs(SRegister sd, SRegister sn, SRegister sm, Condition cond=AL)
void BranchLinkPatchable(const Code &code, CodeEntryKind entry_kind=CodeEntryKind::kNormal, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
void vmulqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm)
void vcgeqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm)
static bool IsSafeSmi(const Object &object)
void vcgtqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm)
void TransitionGeneratedToNative(Register destination_address, Register exit_frame_fp, Register exit_through_ffi, Register scratch0, bool enter_safepoint)
void and_(Register rd, Register rn, Operand o, Condition cond=AL)
void vabsqs(QRegister qd, QRegister qm)
void SubImmediate(Register rd, Register rn, int32_t value, Condition cond=AL)
void vcvtis(SRegister sd, SRegister sm, Condition cond=AL)
void Drop(intptr_t stack_elements)
void vabsd(DRegister dd, DRegister dm, Condition cond=AL)
void StoreUnboxedSimd128(FpuRegister src, Register base, int32_t offset)
void EnterCFrame(intptr_t frame_space)
void InitializeFieldsNoBarrier(Register object, Register begin, Register end, Register value_even, Register value_odd)
void StoreBarrier(Register object, Register value, CanBeSmi can_be_smi, Register scratch) override
compiler::LRState lr_state() const
void vcugtqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm)
void SmiTag(Register reg, Condition cond)
void AndImmediate(Register rd, int32_t imm) override
void vmovq(QRegister qd, QRegister qm)
void vldrs(SRegister sd, Address ad, Condition cond=AL)
void vmlsd(DRegister dd, DRegister dn, DRegister dm, Condition cond=AL)
void ands(Register rd, Register rn, Operand o, Condition cond=AL)
void VerifyStoreNeedsNoWriteBarrier(Register object, Register value) override
void udiv(Register rd, Register rn, Register rm, Condition cond=AL)
void CompareWords(Register reg1, Register reg2, intptr_t offset, Register count, Register temp, Label *equals) override
void SmiUntag(Register dst, Register src, Label *is_smi)
void ExtendValue(Register rd, Register rm, OperandSize sz, Condition cond)
void clz(Register rd, Register rm, Condition cond=AL)
void BranchIfNotSmi(Register reg, Label *label, JumpDistance distance=kFarJump)
void MoveAndSmiTagRegister(Register rd, Register rm) override
void ldrh(Register rd, Address ad, Condition cond=AL)
void vmulqs(QRegister qd, QRegister qn, QRegister qm)
void vdup(OperandSize sz, QRegister qd, DRegister dm, int idx)
void vmrs(Register rd, Condition cond=AL)
void CallCFunction(Address target)
void vldmd(BlockAddressMode am, Register base, DRegister first, intptr_t count, Condition cond=AL)
void set_lr_state(compiler::LRState b)
void subs(Register rd, Register rn, Operand o, Condition cond=AL)
void vshlqi(OperandSize sz, QRegister qd, QRegister qm, QRegister qn)
static bool IsSafe(const Object &object)
void BranchIfBit(Register rn, intptr_t bit_number, Condition condition, Label *label, JumpDistance distance=kFarJump)
void OrImmediate(Register rd, Register rs, int32_t imm, Condition cond=AL)
void StoreIntoSmiField(const Address &dest, Register value)
void LoadFromOffset(Register reg, Register base, int32_t offset, OperandSize type, Condition cond)
void Vsqrtqs(QRegister qd, QRegister qm, QRegister temp)
void Jump(Register target)
void TransitionNativeToGenerated(Register scratch0, Register scratch1, bool exit_safepoint, bool ignore_unwind_in_progress=false, bool set_tag=true)
void sdiv(Register rd, Register rn, Register rm, Condition cond=AL)
void mvns(Register rd, Operand o, Condition cond=AL)
void ldrd(Register rd, Register rd2, Register rn, int32_t offset, Condition cond=AL)
void LoadClassIdMayBeSmi(Register result, Register object)
void LoadSImmediate(SRegister sd, float value, Condition cond=AL)
void ldrex(Register rd, Register rn, Condition cond=AL)
void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset)
void LoadIsolateGroup(Register dst)
void LoadFieldAddressForRegOffset(Register address, Register instance, Register offset_in_words_as_smi) override
void AndImmediateSetFlags(Register rd, Register rn, int32_t value, Condition cond=AL)
void BranchOnMonomorphicCheckedEntryJIT(Label *label)
void Store(Register reg, const Address &address, OperandSize type, Condition cond)
void SmiUntag(Register reg, Condition cond=AL)
void MoveRegister(Register rd, Register rm, Condition cond)
void Breakpoint() override
void sub(Register rd, Register rn, Operand o, Condition cond=AL)
void vnegd(DRegister dd, DRegister dm, Condition cond=AL)
void Emit(int32_t value)
void LoadFieldAddressForOffset(Register address, Register instance, int32_t offset) override
void EmitEntryFrameVerification(Register scratch)
void TryAllocateArray(intptr_t cid, intptr_t instance_size, Label *failure, Register instance, Register end_address, Register temp1, Register temp2)
void CompareImmediate(Register rn, int32_t value, OperandSize width=kFourBytes) override
void EnterFrame(RegList regs, intptr_t frame_space)
void StoreToOffset(Register reg, Register base, int32_t offset, OperandSize type=kFourBytes) override
void vsubd(DRegister dd, DRegister dn, DRegister dm, Condition cond=AL)
void vrecpeqs(QRegister qd, QRegister qm)
void vmovdr(DRegister dd, int i, Register rt, Condition cond=AL)
void MaybeTraceAllocation(Register cid, Label *trace, Register temp_reg, JumpDistance distance=JumpDistance::kFarJump)
void vrsqrteqs(QRegister qd, QRegister qm)
void Asr(Register rd, Register rm, Register rs, Condition cond=AL)
void FinalizeHashForSize(intptr_t bit_size, Register dst, Register scratch=TMP) override
void strex(Register rd, Register rt, Register rn, Condition cond=AL)
void EnsureHasClassIdInDEBUG(intptr_t cid, Register src, Register scratch, bool can_be_null=false) override
void ExtendAndSmiTagValue(Register rd, Register rm, OperandSize sz, Condition cond)
void LslImmediate(Register rd, int32_t shift)
void rsc(Register rd, Register rn, Operand o, Condition cond=AL)
void LoadIndexedPayload(Register dst, Register base, int32_t payload_start, Register index, ScaleFactor scale, OperandSize type=kFourBytes) override
void StoreDToOffset(DRegister reg, Register base, int32_t offset, Condition cond=AL)
void AddImmediate(Register rd, int32_t value, Condition cond=AL)
void rsbs(Register rd, Register rn, Operand o, Condition cond=AL)
void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond=AL)
void MarkExceptionHandler(Label *label)
void vstms(BlockAddressMode am, Register base, SRegister first, SRegister last, Condition cond=AL)
void LoadDecodableImmediate(Register rd, int32_t value, Condition cond=AL)
void Branch(const Address &address, Condition cond=AL)
void EnterFullSafepoint(Register scratch0, Register scratch1)
static int32_t EncodeBranchOffset(int32_t offset, int32_t inst)
void CopyMemoryWords(Register src, Register dst, Register size, Register temp)
void LoadHalfWordUnaligned(Register dst, Register addr, Register tmp)
void umaal(Register rd_lo, Register rd_hi, Register rn, Register rm)
void StoreZero(const Address &address, Register temp)
Address PrepareLargeStoreOffset(const Address &addr, OperandSize sz, Condition cond)
void MoveAndSmiTagRegister(Register rd, Register rm, Condition cond)
void SmiTag(Register reg) override
void Lsr(Register rd, Register rm, Register rs, Condition cond=AL)
void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond=AL)
void ArithmeticShiftRightImmediate(Register reg, intptr_t shift) override
FieldAddress & operator=(const FieldAddress &other)
FieldAddress(Register base, Register r)
FieldAddress(const FieldAddress &other)
FieldAddress(Register base, int32_t disp)
Immediate(int32_t value)
Definition: assembler_arm.h:53
Operand(uint32_t immediate)
Operand(uint32_t rotate, uint32_t immed8)
static bool CanHold(uint32_t immediate, Operand *o)
Operand(const Operand &other)
Operand(Register rm, Shift shift, Register rs)
Operand & operator=(const Operand &other)
Operand(Register rm, Shift shift, uint32_t shift_imm)
Register rm() const
#define UNIMPLEMENTED
static const char * begin(const StringSlice &s)
Definition: editor.cpp:252
#define ASSERT(E)
VkInstance instance
Definition: main.cc:48
uint8_t value
GAsyncResult * result
uint32_t * target
Dart_NativeFunction function
Definition: fuchsia.cc:51
int argument_count
Definition: fuchsia.cc:52
size_t length
bool IsSmi(int64_t v)
Definition: runtime_api.cc:31
constexpr OperandSize kWordBytes
def link(from_root, to_root)
Definition: dart_pkg.py:44
Definition: dart_vm.cc:33
static DRegister EvenDRegisterOf(QRegister q)
static constexpr intptr_t kAllocationCanary
Definition: globals.h:181
uint16_t RegList
BlockAddressMode
Definition: assembler_arm.h:37
uintptr_t uword
Definition: globals.h:501
@ kNoRegister
Definition: constants_arm.h:99
const Register TMP
constexpr intptr_t kBitsPerInt32
Definition: globals.h:466
@ kShiftImmShift
@ kConditionShift
@ kRdShift
@ kRnShift
@ kRotateBits
@ kShiftImmBits
@ kImmed8Shift
@ kRmShift
@ kImmed8Bits
@ kRotateShift
@ kShiftRegisterShift
@ kShiftShift
@ kRsShift
@ kOpcodeShift
const intptr_t cid
const Register PP
@ kSmiTagSize
@ kHeapObjectTag
@ kSmiTagMask
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive mode
Definition: switches.h:228
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
dst
Definition: cp.py:12
inst
Definition: malisc.py:37
dest
Definition: zip.py:79
int32_t width
const Scalar scale
SeparatedVector2 offset
Definition: SkMD5.cpp:130
Definition: SkMD5.cpp:134