Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
assembler_base.h
Go to the documentation of this file.
1// Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_BASE_H_
6#define RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_BASE_H_
7
8#if defined(DART_PRECOMPILED_RUNTIME)
9#error "AOT runtime should not use compiler sources (including header files)"
10#endif // defined(DART_PRECOMPILED_RUNTIME)
11
12#include "platform/assert.h"
13#include "platform/unaligned.h"
14#include "vm/allocation.h"
17#include "vm/globals.h"
18#include "vm/growable_array.h"
19#include "vm/hash_map.h"
20
21namespace dart {
22
23#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) || \
24 defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
25DECLARE_FLAG(bool, use_far_branches);
26#endif
27
28class MemoryRegion;
29class Slot;
30
31namespace compiler {
32
33#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
34// On ARM and ARM64 branch-link family of instructions puts return address
35// into a dedicated register (LR), which called code will then preserve
36// manually if needed. To ensure that LR is not clobbered accidentally we
37// discourage direct use of the register and instead require users to wrap
38// their code in one of the macroses below, which would verify that it is
39// safe to modify LR.
40// We use RELEASE_ASSERT instead of ASSERT because we use LR state (tracked
41// by the assembler) to generate different code sequences for write barriers
42// so we would like to ensure that incorrect code will trigger an assertion
43// instead of producing incorrect code.
44
45// Class representing the state of LR register. In addition to tracking
46// whether LR currently contain return address or not it also tracks
47// entered frames - and whether they preserved a return address or not.
48class LRState {
49 public:
50 LRState(const LRState&) = default;
51 LRState& operator=(const LRState&) = default;
52
53 bool LRContainsReturnAddress() const {
54 RELEASE_ASSERT(!IsUnknown());
55 return (state_ & kLRContainsReturnAddressMask) != 0;
56 }
57
58 LRState SetLRContainsReturnAddress(bool v) const {
59 RELEASE_ASSERT(!IsUnknown());
60 return LRState(frames_, v ? (state_ | 1) : (state_ & ~1));
61 }
62
63 // Returns a |LRState| representing a state after pushing current value
64 // of LR on the stack. LR is assumed clobberable in the new state.
65 LRState EnterFrame() const {
66 RELEASE_ASSERT(!IsUnknown());
67 // 1 bit is used for LR state the rest for frame states.
68 constexpr auto kMaxFrames = (sizeof(state_) * kBitsPerByte) - 1;
69 RELEASE_ASSERT(frames_ < kMaxFrames);
70 // LSB will be clear after the shift meaning that LR can be clobbered.
71 return LRState(frames_ + 1, state_ << 1);
72 }
73
74 // Returns a |LRState| representing a state after popping LR from the stack.
75 // Note that for inner frames LR would usually be assumed cloberrable
76 // even after leaving a frame. Only outerframe would restore return address
77 // into LR.
78 LRState LeaveFrame() const {
79 RELEASE_ASSERT(!IsUnknown());
80 RELEASE_ASSERT(frames_ > 0);
81 return LRState(frames_ - 1, state_ >> 1);
82 }
83
84 bool IsUnknown() const { return *this == Unknown(); }
85
86 static LRState Unknown() { return LRState(kUnknownMarker, kUnknownMarker); }
87
88 static LRState OnEntry() { return LRState(0, 1); }
89
90 static LRState Clobbered() { return LRState(0, 0); }
91
92 bool operator==(const LRState& other) const {
93 return frames_ == other.frames_ && state_ == other.state_;
94 }
95
96 private:
97 LRState(uint8_t frames, uint8_t state) : frames_(frames), state_(state) {}
98
99 // LR state is encoded in the LSB of state_ bitvector.
100 static constexpr uint8_t kLRContainsReturnAddressMask = 1;
101
102 static constexpr uint8_t kUnknownMarker = 0xFF;
103
104 // Number of frames on the stack or kUnknownMarker when representing
105 // Unknown state.
106 uint8_t frames_ = 0;
107
108 // Bit vector with frames_ + 1 bits: LSB represents LR state, other bits
109 // represent state of LR in each entered frame. Normally this value would
110 // just be (1 << frames_).
111 uint8_t state_ = 1;
112};
113
114// READS_RETURN_ADDRESS_FROM_LR(...) macro verifies that LR contains return
115// address before allowing to use it.
116#define READS_RETURN_ADDRESS_FROM_LR(block) \
117 do { \
118 RELEASE_ASSERT(__ lr_state().LRContainsReturnAddress()); \
119 constexpr Register LR = LR_DO_NOT_USE_DIRECTLY; \
120 USE(LR); \
121 block; \
122 } while (0)
123
124// WRITES_RETURN_ADDRESS_TO_LR(...) macro verifies that LR contains return
125// address before allowing to write into it. LR is considered to still
126// contain return address after this operation.
127#define WRITES_RETURN_ADDRESS_TO_LR(block) READS_RETURN_ADDRESS_FROM_LR(block)
128
129// CLOBBERS_LR(...) checks that LR does *not* contain return address and it is
130// safe to clobber it.
131#define CLOBBERS_LR(block) \
132 do { \
133 RELEASE_ASSERT(!(__ lr_state().LRContainsReturnAddress())); \
134 constexpr Register LR = LR_DO_NOT_USE_DIRECTLY; \
135 USE(LR); \
136 block; \
137 } while (0)
138
139// SPILLS_RETURN_ADDRESS_FROM_LR_TO_REGISTER(...) checks that LR contains return
140// address, executes |block| and marks that LR can be safely clobbered
141// afterwards (assuming that |block| moved LR value onto into another register).
142#define SPILLS_RETURN_ADDRESS_FROM_LR_TO_REGISTER(block) \
143 do { \
144 READS_RETURN_ADDRESS_FROM_LR(block); \
145 __ set_lr_state(__ lr_state().SetLRContainsReturnAddress(false)); \
146 } while (0)
147
148// RESTORES_RETURN_ADDRESS_FROM_REGISTER_TO_LR(...) checks that LR does not
149// contain return address, executes |block| and marks LR as containing return
150// address (assuming that |block| restored LR value from another register).
151#define RESTORES_RETURN_ADDRESS_FROM_REGISTER_TO_LR(block) \
152 do { \
153 CLOBBERS_LR(block); \
154 __ set_lr_state(__ lr_state().SetLRContainsReturnAddress(true)); \
155 } while (0)
156
157// SPILLS_LR_TO_FRAME(...) executes |block| and updates tracked LR state to
158// record that we entered a frame which preserved LR. LR can be clobbered
159// afterwards.
160#define SPILLS_LR_TO_FRAME(block) \
161 do { \
162 constexpr Register LR = LR_DO_NOT_USE_DIRECTLY; \
163 USE(LR); \
164 block; \
165 __ set_lr_state(__ lr_state().EnterFrame()); \
166 } while (0)
167
168// RESTORE_LR(...) checks that LR does not contain return address, executes
169// |block| and updates tracked LR state to record that we exited a frame.
170// Whether LR contains return address or not after this operation depends on
171// the frame state (only the outermost frame usually restores LR).
172#define RESTORES_LR_FROM_FRAME(block) \
173 do { \
174 CLOBBERS_LR(block); \
175 __ set_lr_state(__ lr_state().LeaveFrame()); \
176 } while (0)
177#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
178
180 // Architecture-independent constants.
183 kTwoBytes, // Halfword (ARM), w(ord) (Intel)
185 kFourBytes, // Word (ARM), l(ong) (Intel)
187 kEightBytes, // DoubleWord (ARM), q(uadword) (Intel)
188 // ARM-specific constants.
191 // 32-bit ARM specific constants.
194 // 64-bit ARM specific constants.
196
197#if defined(HAS_SMI_63_BITS)
199#else
201#endif
202};
203
204// For declaring default sizes in AssemblerBase.
205#if defined(TARGET_ARCH_IS_64_BIT)
207#else
209#endif
210
211// Forward declarations.
212class Assembler;
213class AssemblerFixup;
214class AssemblerBuffer;
215class Address;
216class FieldAddress;
217
218#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
219class Label : public ZoneAllocated {
220 public:
221 Label() {}
222 ~Label() {
223 // Assert if label is being destroyed with unresolved branches pending.
224 ASSERT(!IsLinked());
225 }
226
227 intptr_t Position() const {
228 ASSERT(IsBound());
229 return position_;
230 }
231
232 bool IsBound() const { return position_ != -1; }
233 bool IsUnused() const { return !IsBound() && !IsLinked(); }
234 bool IsLinked() const {
235 return unresolved_cb_ != -1 || unresolved_cj_ != -1 ||
236 unresolved_b_ != -1 || unresolved_j_ != -1 || unresolved_far_ != -1;
237 }
238
239 private:
240 int32_t position_ = -1;
241 void BindTo(intptr_t position) {
242 ASSERT(!IsBound());
243 ASSERT(!IsLinked());
244 position_ = position;
245 ASSERT(IsBound());
246 }
247
248 // Linked lists of unresolved forward branches, threaded through the branch
249 // instructions. The offset encoded in each unresolved branch the delta to the
250 // next instruction in the list, terminated with 0 delta. Each branch class
251 // has a separate list because the offset range of each is different.
252#define DEFINE_BRANCH_CLASS(name) \
253 int32_t unresolved_##name##_ = -1; \
254 int32_t link_##name(int32_t position) { \
255 ASSERT(position > unresolved_##name##_); \
256 int32_t offset; \
257 if (unresolved_##name##_ == -1) { \
258 offset = 0; \
259 } else { \
260 offset = position - unresolved_##name##_; \
261 ASSERT(offset > 0); \
262 } \
263 unresolved_##name##_ = position; \
264 return offset; \
265 }
266 DEFINE_BRANCH_CLASS(cb);
267 DEFINE_BRANCH_CLASS(cj);
268 DEFINE_BRANCH_CLASS(b);
269 DEFINE_BRANCH_CLASS(j);
270 DEFINE_BRANCH_CLASS(far);
271
272 friend class MicroAssembler;
274};
275#else
276class Label : public ZoneAllocated {
277 public:
278 Label() : position_(0), unresolved_(0) {
279#ifdef DEBUG
280 for (int i = 0; i < kMaxUnresolvedBranches; i++) {
281 unresolved_near_positions_[i] = -1;
282 }
283#endif // DEBUG
284 }
285
287 // Assert if label is being destroyed with unresolved branches pending.
288 ASSERT(!IsLinked());
289 ASSERT(!HasNear());
290 }
291
292 // Returns the position for bound and linked labels. Cannot be used
293 // for unused labels.
294 intptr_t Position() const {
295 ASSERT(!IsUnused());
296 return IsBound() ? -position_ - kBias : position_ - kBias;
297 }
298
299 intptr_t LinkPosition() const {
300 ASSERT(IsLinked());
301 return position_ - kBias;
302 }
303
304 intptr_t NearPosition() {
305 ASSERT(HasNear());
306 return unresolved_near_positions_[--unresolved_];
307 }
308
309 bool IsBound() const { return position_ < 0; }
310 bool IsUnused() const { return position_ == 0 && unresolved_ == 0; }
311 bool IsLinked() const { return position_ > 0; }
312 bool HasNear() const { return unresolved_ != 0; }
313
314 private:
315#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32)
316 static constexpr int kMaxUnresolvedBranches = 20;
317#else
318 static constexpr int kMaxUnresolvedBranches = 1; // Unused on non-Intel.
319#endif
320 // Zero position_ means unused (neither bound nor linked to).
321 // Thus we offset actual positions by the given bias to prevent zero
322 // positions from occurring.
323 // Note: we use target::kWordSize as a bias because on ARM
324 // there are assertions that check that distance is aligned.
325 static constexpr int kBias = 4;
326
327 intptr_t position_;
328 intptr_t unresolved_;
329 intptr_t unresolved_near_positions_[kMaxUnresolvedBranches];
330#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
331 // On ARM/ARM64 we track LR state: whether it contains return address or
332 // whether it can be clobbered. To make sure that our tracking it correct
333 // for non linear code sequences we additionally verify at labels that
334 // incoming states are compatible.
335 LRState lr_state_ = LRState::Unknown();
336
337 void UpdateLRState(LRState new_state) {
338 if (lr_state_.IsUnknown()) {
339 lr_state_ = new_state;
340 } else {
341 RELEASE_ASSERT(lr_state_ == new_state);
342 }
343 }
344#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
345
346 void Reinitialize() { position_ = 0; }
347
348#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
349 void BindTo(intptr_t position, LRState lr_state)
350#else
351 void BindTo(intptr_t position)
352#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
353 {
354 ASSERT(!IsBound());
355 ASSERT(!HasNear());
356 position_ = -position - kBias;
357 ASSERT(IsBound());
358#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
359 UpdateLRState(lr_state);
360#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
361 }
362
363#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
364 void LinkTo(intptr_t position, LRState lr_state)
365#else
366 void LinkTo(intptr_t position)
367#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
368 {
369 ASSERT(!IsBound());
370 position_ = position + kBias;
371 ASSERT(IsLinked());
372#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
373 UpdateLRState(lr_state);
374#endif // defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
375 }
376
377 void NearLinkTo(intptr_t position) {
378 ASSERT(!IsBound());
379 ASSERT(unresolved_ < kMaxUnresolvedBranches);
380 unresolved_near_positions_[unresolved_++] = position;
381 }
382
383 friend class Assembler;
385};
386#endif
387
388// External labels keep a function pointer to allow them
389// to be called from code generated by the assembler.
391 public:
392 explicit ExternalLabel(uword address) : address_(address) {}
393
394 bool is_resolved() const { return address_ != 0; }
395 uword address() const {
397 return address_;
398 }
399
400 private:
401 const uword address_;
402};
403
404// Assembler fixups are positions in generated code that hold relocation
405// information that needs to be processed before finalizing the code
406// into executable memory.
408 public:
409 virtual void Process(const MemoryRegion& region, intptr_t position) = 0;
410
411 virtual bool IsPointerOffset() const = 0;
412
413 // It would be ideal if the destructor method could be made private,
414 // but the g++ compiler complains when this is subclassed.
416
417 private:
418 AssemblerFixup* previous_;
419 intptr_t position_;
420
421 AssemblerFixup* previous() const { return previous_; }
422 void set_previous(AssemblerFixup* previous) { previous_ = previous; }
423
424 intptr_t position() const { return position_; }
425 void set_position(intptr_t position) { position_ = position; }
426
427 friend class AssemblerBuffer;
428};
429
430// Assembler buffers are used to emit binary code. They grow on demand.
432 public:
435
436 // Basic support for emitting, loading, and storing.
437 template <typename T>
438 void Emit(T value) {
440#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) || \
441 defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
442 // Variable-length instructions in ia32/x64 have unaligned immediates.
443 // Instruction parcels in RISC-V are only 2-byte aligned.
444 StoreUnaligned(reinterpret_cast<T*>(cursor_), value);
445#else
446 // Other architecture have aligned, fixed-length instructions.
447 *reinterpret_cast<T*>(cursor_) = value;
448#endif
449 cursor_ += sizeof(T);
450 }
451
452 template <typename T>
453 void Remit() {
454 ASSERT(Size() >= static_cast<intptr_t>(sizeof(T)));
455 cursor_ -= sizeof(T);
456 }
457
458 // Return address to code at |position| bytes.
459 uword Address(intptr_t position) { return contents_ + position; }
460
461 template <typename T>
462 T Load(intptr_t position) {
463 ASSERT(position >= 0 &&
464 position <= (Size() - static_cast<intptr_t>(sizeof(T))));
465#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) || \
466 defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
467 // Variable-length instructions in ia32/x64 have unaligned immediates.
468 // Instruction parcels in RISC-V are only 2-byte aligned.
469 return LoadUnaligned(reinterpret_cast<T*>(contents_ + position));
470#else
471 // Other architecture have aligned, fixed-length instructions.
472 return *reinterpret_cast<T*>(contents_ + position);
473#endif
474 }
475
476 template <typename T>
477 void Store(intptr_t position, T value) {
478 ASSERT(position >= 0 &&
479 position <= (Size() - static_cast<intptr_t>(sizeof(T))));
480#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_X64) || \
481 defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
482 // Variable-length instructions in ia32/x64 have unaligned immediates.
483 // Instruction parcels in RISC-V are only 2-byte aligned.
484 StoreUnaligned(reinterpret_cast<T*>(contents_ + position), value);
485#else
486 // Other architecture have aligned, fixed-length instructions.
487 *reinterpret_cast<T*>(contents_ + position) = value;
488#endif
489 }
490
492#if defined(DEBUG)
493 ASSERT(fixups_processed_);
494#endif
495 return *pointer_offsets_;
496 }
497
498#if defined(TARGET_ARCH_IA32)
499 // Emit an object pointer directly in the code.
500 void EmitObject(const Object& object);
501#endif
502
503 // Emit a fixup at the current location.
505 fixup->set_previous(fixup_);
506 fixup->set_position(Size());
507 fixup_ = fixup;
508 }
509
510 // Count the fixups that produce a pointer offset, without processing
511 // the fixups.
512 intptr_t CountPointerOffsets() const;
513
514 // Get the size of the emitted code.
515 intptr_t Size() const { return cursor_ - contents_; }
516 uword contents() const { return contents_; }
517
518 // Copy the assembled instructions into the specified memory block
519 // and apply all fixups.
520 void FinalizeInstructions(const MemoryRegion& region);
521
522 // To emit an instruction to the assembler buffer, the EnsureCapacity helper
523 // must be used to guarantee that the underlying data area is big enough to
524 // hold the emitted instruction. Usage:
525 //
526 // AssemblerBuffer buffer;
527 // AssemblerBuffer::EnsureCapacity ensured(&buffer);
528 // ... emit bytes for single instruction ...
529
530#if defined(DEBUG)
531 class EnsureCapacity : public ValueObject {
532 public:
533 explicit EnsureCapacity(AssemblerBuffer* buffer);
534 ~EnsureCapacity();
535
536 private:
537 AssemblerBuffer* buffer_;
538 intptr_t gap_;
539
540 intptr_t ComputeGap() { return buffer_->Capacity() - buffer_->Size(); }
541 };
542
543 bool has_ensured_capacity_;
544 bool HasEnsuredCapacity() const { return has_ensured_capacity_; }
545#else
547 public:
549 if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity();
550 }
551 };
552
553 // When building the C++ tests, assertion code is enabled. To allow
554 // asserting that the user of the assembler buffer has ensured the
555 // capacity needed for emitting, we add a dummy method in non-debug mode.
556 bool HasEnsuredCapacity() const { return true; }
557#endif
558
559 // Returns the position in the instruction stream.
560 intptr_t GetPosition() const { return cursor_ - contents_; }
561
562 void Reset() { cursor_ = contents_; }
563
564 private:
565 // The limit is set to kMinimumGap bytes before the end of the data area.
566 // This leaves enough space for the longest possible instruction and allows
567 // for a single, fast space check per instruction.
568 static constexpr intptr_t kMinimumGap = 32;
569
570 uword contents_;
571 uword cursor_;
572 uword limit_;
573 AssemblerFixup* fixup_;
574 ZoneGrowableArray<intptr_t>* pointer_offsets_;
575#if defined(DEBUG)
576 bool fixups_processed_;
577#endif
578
579 uword cursor() const { return cursor_; }
580 uword limit() const { return limit_; }
581 intptr_t Capacity() const {
582 ASSERT(limit_ >= contents_);
583 return (limit_ - contents_) + kMinimumGap;
584 }
585
586 // Process the fixup chain.
587 void ProcessFixups(const MemoryRegion& region);
588
589 // Compute the limit based on the data area and the capacity. See
590 // description of kMinimumGap for the reasoning behind the value.
591 static uword ComputeLimit(uword data, intptr_t capacity) {
592 return data + capacity - kMinimumGap;
593 }
594
595 void ExtendCapacity();
596
597 friend class AssemblerFixup;
598};
599
601 public:
603 : StackResource(ThreadState::Current()),
604 object_pool_builder_(object_pool_builder) {}
605 virtual ~AssemblerBase();
606
607 // Used for near/far jumps on IA32/X64, ignored for ARM.
608 enum JumpDistance : bool {
609 kFarJump = false,
610 kNearJump = true,
611 };
612
613 intptr_t CodeSize() const { return buffer_.Size(); }
614
616
617 bool HasObjectPoolBuilder() const { return object_pool_builder_ != nullptr; }
618 ObjectPoolBuilder& object_pool_builder() { return *object_pool_builder_; }
619
620 intptr_t prologue_offset() const { return prologue_offset_; }
622
623 // Tracks if the resulting code should be aligned by kPreferredLoopAlignment
624 // boundary.
626 bool should_be_aligned() const { return should_be_aligned_; }
627
628 void Comment(const char* format, ...) PRINTF_ATTRIBUTE(2, 3);
629 static bool EmittingComments();
630
631 virtual void Breakpoint() = 0;
632
633 virtual void SmiTag(Register r) = 0;
634
635 virtual void Bind(Label* label) = 0;
636
637 // If Smis are compressed and the Smi value in dst is non-negative, ensures
638 // the upper bits are cleared. If Smis are not compressed, is a no-op.
639 //
640 // Since this operation only affects the unused upper bits when Smis are
641 // compressed, it can be used on registers not allocated as writable.
642 //
643 // The behavior on the upper bits of signed compressed Smis is undefined.
644#if defined(DART_COMPRESSED_POINTERS)
645 virtual void ExtendNonNegativeSmi(Register dst) {
646 // Default to sign extension and allow architecture-specific assemblers
647 // where an alternative like zero-extension is preferred to override this.
648 ExtendValue(dst, dst, kObjectBytes);
649 }
650#else
652#endif
653
654 // Extends a value of size sz in src to a value of size kWordBytes in dst.
655 // That is, bits in the source register that are not part of the sz-sized
656 // value are ignored, and if sz is signed, then the value is sign extended.
657 //
658 // Produces no instructions if dst and src are the same and sz is kWordBytes.
659 virtual void ExtendValue(Register dst, Register src, OperandSize sz) = 0;
660
661 // Extends a value of size sz in src to a tagged Smi value in dst.
662 // That is, bits in the source register that are not part of the sz-sized
663 // value are ignored, and if sz is signed, then the value is sign extended.
665 Register src,
666 OperandSize sz) {
667 ExtendValue(dst, src, sz);
668 SmiTag(dst);
669 }
670
671 // Move the contents of src into dst.
672 //
673 // Produces no instructions if dst and src are the same.
674 virtual void MoveRegister(Register dst, Register src) {
675 ExtendValue(dst, src, kWordBytes);
676 }
677
678 // Move the contents of src into dst and tag the value in dst as a Smi.
679 virtual void MoveAndSmiTagRegister(Register dst, Register src) {
681 }
682
683 // Inlined allocation in new space of an instance of an object whose instance
684 // size is known at compile time with class ID 'cid'. The generated code has
685 // no runtime calls. Jump to 'failure' if the instance cannot be allocated
686 // here and should be done via runtime call instead.
687 //
688 // ObjectPtr to allocated instance is returned in 'instance_reg'.
689 //
690 // WARNING: The caller is responsible for initializing all GC-visible fields
691 // of the object other than the tags field, which is initialized here.
692 virtual void TryAllocateObject(intptr_t cid,
693 intptr_t instance_size,
694 Label* failure,
695 JumpDistance distance,
696 Register instance_reg,
697 Register temp) = 0;
698
699 // An alternative version of TryAllocateObject that takes a Class object
700 // and passes the class id and instance size to TryAllocateObject along with
701 // the other arguments.
702 void TryAllocate(const Class& cls,
703 Label* failure,
704 JumpDistance distance,
705 Register instance_reg,
706 Register temp) {
707 TryAllocateObject(target::Class::GetId(cls),
708 target::Class::GetInstanceSize(cls), failure, distance,
709 instance_reg, temp);
710 }
711
712 virtual void BranchIfSmi(Register reg,
713 Label* label,
714 JumpDistance distance = kFarJump) = 0;
715
716 virtual void ArithmeticShiftRightImmediate(Register reg, intptr_t shift) = 0;
717 virtual void CompareWords(Register reg1,
718 Register reg2,
719 intptr_t offset,
721 Register temp,
722 Label* equals) = 0;
723
724 void UnrolledMemCopy(Register dst_base,
725 intptr_t dst_offset,
726 Register src_base,
727 intptr_t src_offset,
728 intptr_t size,
729 Register temp);
734
736 // All previous writes to memory in this thread must be visible to other
737 // threads. Currently, only used for lazily populating hash indices in
738 // shared const maps and sets.
740
741 // All other stores.
743 };
744
747 int32_t offset) = 0;
749 Register address,
751 Register offset_in_words_as_smi) = 0;
752
753 virtual void LoadAcquire(Register dst,
754 const Address& address,
755 OperandSize size = kWordBytes) = 0;
756 virtual void StoreRelease(Register src,
757 const Address& address,
758 OperandSize size = kWordBytes) = 0;
759
760 virtual void Load(Register dst,
761 const Address& address,
762 OperandSize sz = kWordBytes) = 0;
763 // Does not use write barriers, use StoreIntoObject instead for boxed fields.
764 virtual void Store(Register src,
765 const Address& address,
766 OperandSize sz = kWordBytes) = 0;
767
768 // When emitting the write barrier code on IA32, either the caller must
769 // allocate a scratch register or the implementation chooses a register to
770 // save and restore and uses that as a scratch register internally.
771 // Thus, the scratch register is an additional optional argument to
772 // StoreIntoObject, StoreIntoArray, StoreIntoObjectOffset, and StoreBarrier
773 // that defaults to TMP on other architectures. (TMP is kNoRegister on IA32,
774 // so the default value invokes the correct behavior.)
775
776 // Store into a heap object and applies the appropriate write barriers.
777 // (See StoreBarrier for which are applied on a given architecture.)
778 //
779 // All stores into heap objects must pass through this function or,
780 // if the value can be proven either Smi or old-and-premarked, its NoBarrier
781 // variant. Preserves the [object] and [value] registers.
782 void StoreIntoObject(Register object, // Object being stored into.
783 const Address& address, // Offset into object.
784 Register value, // Value being stored.
785 CanBeSmi can_be_smi = kValueCanBeSmi,
786 MemoryOrder memory_order = kRelaxedNonAtomic,
787 Register scratch = TMP,
788 OperandSize size = kWordBytes);
789
790 void StoreIntoObjectNoBarrier(Register object, // Object being stored into.
791 const Address& address, // Offset into object.
792 Register value, // Value being stored.
793 MemoryOrder memory_order = kRelaxedNonAtomic,
794 OperandSize size = kWordBytes);
796 Register object, // Object being stored into.
797 const Address& address, // Offset into object.
798 const Object& value, // Value being stored.
799 MemoryOrder memory_order = kRelaxedNonAtomic,
800 OperandSize size = kWordBytes) = 0;
801
802 virtual void LoadIndexedPayload(Register dst,
804 int32_t offset,
805 Register index,
807 OperandSize sz = kWordBytes) = 0;
808
809 // For virtual XOffset methods, the base method implementation creates an
810 // appropriate address from the base register and offset and calls the
811 // corresponding address-taking method. These should be overridden for
812 // architectures where offsets should not be converted to addresses without
813 // additional precautions, for when the ARM-specific Assembler needs
814 // to override with an overloaded version for the Condition argument,
815 // or for when the IA32-specific Assembler needs to override with an
816 // overloaded version for adding a scratch register argument.
817
820 int32_t offset = 0,
821 OperandSize size = kWordBytes);
824 int32_t offset = 0,
825 OperandSize size = kWordBytes);
826
827 virtual void LoadFromOffset(Register dst,
829 int32_t offset,
831 // Does not use write barriers, use StoreIntoObject instead for boxed fields.
832 virtual void StoreToOffset(Register src,
834 int32_t offset,
836
837 virtual void StoreIntoObjectOffset(
838 Register object, // Object being stored into.
839 int32_t offset, // Offset into object.
840 Register value, // Value being stored.
841 CanBeSmi can_be_smi = kValueCanBeSmi,
842 MemoryOrder memory_order = kRelaxedNonAtomic,
843 Register scratch = TMP,
844 OperandSize size = kWordBytes);
846 Register object, // Object being stored into.
847 int32_t offset, // Offset into object.
848 Register value, // Value being stored.
849 MemoryOrder memory_order = kRelaxedNonAtomic,
850 OperandSize size = kWordBytes);
852 Register object, // Object being stored into.
853 int32_t offset, // Offset into object.
854 const Object& value, // Value being stored.
855 MemoryOrder memory_order = kRelaxedNonAtomic,
856 OperandSize size = kWordBytes);
857
858 void LoadField(Register dst,
859 const FieldAddress& address,
861 virtual void LoadFieldFromOffset(Register dst,
863 int32_t offset,
865
866 // Does not use write barriers, use StoreIntoObjectOffset instead for
867 // boxed fields.
868 virtual void StoreFieldToOffset(Register src,
870 int32_t offset,
872
873 // Loads a Smi. In DEBUG mode, also checks that the loaded value is a Smi and
874 // halts if not.
875 void LoadSmi(Register dst, const Address& address) {
876 Load(dst, address);
877 DEBUG_ONLY(VerifySmi(dst));
878 }
879
880 // Loads a Smi field from a Dart object. In DEBUG mode, also checks that the
881 // loaded value is a Smi and halts if not.
882 void LoadSmiField(Register dst, const FieldAddress& address);
883
884 // Loads a Smi. In DEBUG mode, also checks that the loaded value is a Smi and
885 // halts if not.
886 void LoadSmiFromOffset(Register dst, Register base, int32_t offset);
887
888 // Loads a Smi field from a Dart object. In DEBUG mode, also checks that the
889 // loaded value is a Smi and halts if not.
891
892#if defined(DART_COMPRESSED_POINTERS)
893 // These are the base methods that all other compressed methods delegate to.
894 //
895 // For the virtual methods, they are only virtual when using compressed
896 // pointers, so the overriding definitions must be guarded with an #ifdef.
897
899 Register address,
901 Register offset_in_words_as_smi) = 0;
902
903 virtual void LoadAcquireCompressed(Register dst, const Address& address) = 0;
904
905 virtual void LoadCompressed(Register dst, const Address& address) = 0;
906
907 virtual void LoadIndexedCompressed(Register dst,
909 int32_t offset,
910 Register index) = 0;
911
912 // Loads a compressed Smi. In DEBUG mode, also checks that the loaded value is
913 // a Smi and halts if not.
914 void LoadCompressedSmi(Register dst, const Address& address) {
915 Load(dst, address, kUnsignedFourBytes); // Zero extension.
916 DEBUG_ONLY(VerifySmi(dst);)
917 }
918#else
919 // These are the base methods that all other compressed methods delegate to.
920 //
921 // The methods are non-virtual and forward to the uncompressed versions.
922
925 Register offset_in_words_as_smi) {
926 LoadFieldAddressForRegOffset(address, instance, offset_in_words_as_smi);
927 }
928
929 void LoadAcquireCompressed(Register dst, const Address& address) {
930 LoadAcquire(dst, address);
931 }
932
933 void LoadCompressed(Register dst, const Address& address) {
934 Load(dst, address);
935 }
936
939 int32_t offset,
940 Register index) {
941 LoadIndexedPayload(dst, base, offset, index, TIMES_WORD_SIZE, kWordBytes);
942 }
943
944 // Loads a compressed Smi. In DEBUG mode, also checks that the loaded value is
945 // a Smi and halts if not.
946 void LoadCompressedSmi(Register dst, const Address& address) {
947 LoadSmi(dst, address);
948 }
949#endif // defined(DART_COMPRESSED_POINTERS)
950
951 // Compressed store methods are implemented in AssemblerBase, as the only
952 // difference is whether the entire word is stored or just the low bits.
953
954 void StoreReleaseCompressed(Register src, const Address& address) {
955 StoreRelease(src, address, kObjectBytes);
956 }
962
964 Register object, // Object being stored into.
965 const Address& address, // Address to store the value at.
966 Register value, // Value being stored.
967 CanBeSmi can_be_smi = kValueCanBeSmi,
968 MemoryOrder memory_order = kRelaxedNonAtomic,
969 Register scratch = TMP) {
970 StoreIntoObject(object, address, value, can_be_smi, memory_order, TMP,
972 }
974 Register object, // Object being stored into.
975 const Address& address, // Address to store the value at.
976 Register value, // Value being stored.
977 MemoryOrder memory_order = kRelaxedNonAtomic) {
978 StoreIntoObjectNoBarrier(object, address, value, memory_order,
980 }
982 Register object, // Object being stored into.
983 const Address& address, // Address to store the value at.
984 const Object& value, // Value being stored.
985 MemoryOrder memory_order = kRelaxedNonAtomic) {
986 StoreObjectIntoObjectNoBarrier(object, address, value, memory_order,
988 }
989
991 Register object, // Object being stored into.
992 int32_t offset, // Offset into object.
993 Register value, // Value being stored.
994 CanBeSmi can_be_smi = kValueCanBeSmi,
995 MemoryOrder memory_order = kRelaxedNonAtomic,
996 Register scratch = TMP) {
997 StoreIntoObjectOffset(object, offset, value, can_be_smi, memory_order, TMP,
999 }
1001 Register object, // Object being stored into.
1002 int32_t offset, // Offset into object.
1003 Register value, // Value being stored.
1004 MemoryOrder memory_order = kRelaxedNonAtomic) {
1005 StoreIntoObjectOffsetNoBarrier(object, offset, value, memory_order,
1006 kObjectBytes);
1007 }
1009 Register object, // Object being stored into.
1010 int32_t offset, // Offset into object.
1011 const Object& value, // Value being stored.
1012 MemoryOrder memory_order = kRelaxedNonAtomic) {
1013 StoreObjectIntoObjectOffsetNoBarrier(object, offset, value, memory_order,
1014 kObjectBytes);
1015 }
1016
1017 void StoreIntoArray(Register object,
1018 Register slot,
1019 Register value,
1020 CanBeSmi can_value_be_smi = kValueCanBeSmi,
1021 Register scratch = TMP,
1022 OperandSize size = kWordBytes);
1024 Register slot,
1025 Register value,
1026 CanBeSmi can_value_be_smi = kValueCanBeSmi,
1027 Register scratch = TMP) {
1028 StoreIntoArray(object, slot, value, can_value_be_smi, scratch,
1029 kObjectBytes);
1030 }
1031
1032 // These methods just delegate to the non-Field classes, either passing
1033 // along a FieldAddress as the Address or adjusting the offset appropriately.
1034
1036 Register base,
1037 int32_t offset);
1038 void LoadCompressedField(Register dst, const FieldAddress& address);
1041 Register base,
1042 int32_t offset);
1043 void LoadCompressedSmiField(Register dst, const FieldAddress& address);
1046 Register base,
1047 int32_t offset);
1048
1049 // There are no StoreCompressedField methods because only Dart objects contain
1050 // compressed pointers and compressed pointers may require write barriers, so
1051 // StoreCompressedIntoObject should be used instead.
1052
1053 void LoadFromSlot(Register dst, Register base, const Slot& slot);
1054 void StoreToSlot(Register src,
1055 Register base,
1056 const Slot& slot,
1057 CanBeSmi can_be_smi,
1058 MemoryOrder memory_order = kRelaxedNonAtomic,
1059 Register scratch = TMP);
1061 Register base,
1062 const Slot& slot,
1063 MemoryOrder memory_order = kRelaxedNonAtomic);
1064 // Uses the type information of the Slot to determine whether the field
1065 // can be a Smi or not.
1066 void StoreToSlot(Register src,
1067 Register base,
1068 const Slot& slot,
1069 MemoryOrder memory_order = kRelaxedNonAtomic,
1070 Register scratch = TMP);
1071
1072 // Truncates upper bits.
1074
1075#if !defined(TARGET_ARCH_IS_32_BIT)
1077#endif
1078
1079 // Truncates upper bits on 32 bit archs.
1081#if defined(TARGET_ARCH_IS_32_BIT)
1083#else
1085#endif
1086 }
1087
1088 // Loads nullability from an AbstractType [type] to [dst].
1090 // Loads nullability from an AbstractType [type] and compares it
1091 // to [value]. Clobbers [scratch].
1093 /*Nullability*/ int8_t value,
1094 Register scratch);
1095
1096 virtual void LoadImmediate(Register dst, target::word imm) = 0;
1097
1098 virtual void CompareImmediate(Register reg,
1099 target::word imm,
1101
1103 Address address,
1104 OperandSize size = kWordBytes) = 0;
1105
1106 virtual void AndImmediate(Register dst, target::word imm) = 0;
1107
1108 virtual void LsrImmediate(Register dst, int32_t shift) = 0;
1109
1110 virtual void MulImmediate(Register dst,
1111 target::word imm,
1112 OperandSize = kWordBytes) = 0;
1113
1114 // If src2 == kNoRegister, dst = dst & src1, otherwise dst = src1 & src2.
1115 virtual void AndRegisters(Register dst,
1116 Register src1,
1117 Register src2 = kNoRegister) = 0;
1118
1119 // dst = dst << shift. On some architectures, we must use a specific register
1120 // for the shift, so either the shift register must be that specific register
1121 // or the architecture must define a TMP register, which is clobbered.
1122 virtual void LslRegister(Register dst, Register shift) = 0;
1123
1124 // Performs CombineHashes from runtime/vm/hash.h on the hashes contained in
1125 // dst and other. Puts the result in dst. Clobbers other.
1126 //
1127 // Note: Only uses the lower 32 bits of the hashes and returns a 32 bit hash.
1128 virtual void CombineHashes(Register dst, Register other) = 0;
1129 // Performs FinalizeHash from runtime/vm/hash.h on the hash contained in
1130 // dst. May clobber scratch if provided, otherwise may clobber TMP.
1131 //
1132 // Note: Only uses the lower 32 bits of the hash and returns a 32 bit hash.
1134 return FinalizeHashForSize(/*bit_size=*/kBitsPerInt32, hash, scratch);
1135 }
1136 // Performs FinalizeHash from runtime/vm/hash.h on the hash contained in
1137 // dst and returns the result, masked to a maximum of [bit_size] bits.
1138 // May clobber scratch if provided, otherwise may clobber TMP.
1139 //
1140 // Note: Only uses the lower 32 bits of the hash. Since the underlying
1141 // algorithm produces 32-bit values, assumes 0 < [bit_size] <= 32.
1142 virtual void FinalizeHashForSize(intptr_t bit_size,
1143 Register hash,
1144 Register scratch = TMP) = 0;
1145
1146 void LoadTypeClassId(Register dst, Register src);
1147
1148 virtual void EnsureHasClassIdInDEBUG(intptr_t cid,
1149 Register src,
1150 Register scratch,
1151 bool can_be_null = false) = 0;
1152
1154
1155 void MsanUnpoison(Register base, intptr_t length_in_bytes);
1156 void MsanUnpoison(Register base, Register length_in_bytes);
1157
1158 void Unimplemented(const char* message);
1159 void Untested(const char* message);
1160 void Unreachable(const char* message);
1161 void Stop(const char* message);
1162
1165 }
1166
1167 // Count the fixups that produce a pointer offset, without processing
1168 // the fixups.
1169 intptr_t CountPointerOffsets() const { return buffer_.CountPointerOffsets(); }
1170
1174
1176 public:
1178 : pc_offset_(pc_offset), comment_(comment) {}
1179
1180 intptr_t pc_offset() const { return pc_offset_; }
1181 const String& comment() const { return comment_; }
1182
1183 private:
1184 intptr_t pc_offset_;
1185 const String& comment_;
1186
1188 };
1189
1190 const GrowableArray<CodeComment*>& comments() const { return comments_; }
1191
1196
1197 // Returns the offset (from the very beginning of the instructions) to the
1198 // unchecked entry point (incl. prologue/frame setup, etc.).
1200
1205
1206 // Jumps to [target] if [condition] is satisfied.
1207 //
1208 // [low] and [high] are inclusive.
1209 // If [temp] is kNoRegister, then [value] is overwritten.
1210 // Note: Using a valid [temp] register generates an additional
1211 // instruction on x64/ia32.
1212 virtual void RangeCheck(Register value,
1213 Register temp,
1214 intptr_t low,
1215 intptr_t high,
1216 RangeCheckCondition condition,
1217 Label* target) = 0;
1218
1219 // Checks [dst] for a Smi, halting if it does not contain one.
1221 Label done;
1222 BranchIfSmi(dst, &done, kNearJump);
1223 Stop("Expected Smi");
1224 Bind(&done);
1225 }
1226
1227 protected:
1228 AssemblerBuffer buffer_; // Contains position independent code.
1229 int32_t prologue_offset_ = -1;
1232
1234
1235 private:
1236 // Apply the generational write barrier on all architectures and incremental
1237 // write barrier on non-IA32 architectures.
1238 //
1239 // On IA32, since the incremental write barrier is not applied,
1240 // concurrent marking cannot be enabled.
1241 virtual void StoreBarrier(Register object, // Object being stored into.
1242 Register value, // Value being stored.
1243 CanBeSmi can_be_smi,
1244 Register scratch) = 0;
1245
1246 // Apply the generational write barrier on all architectures and incremental
1247 // write barrier on non-IA32 architectures when storing into an array.
1248 //
1249 // On IA32, since the incremental write barrier is not applied,
1250 // concurrent marking cannot be enabled.
1251 virtual void ArrayStoreBarrier(Register object, // Object being stored into.
1252 Register slot, // Slot being stored into.
1253 Register value, // Value being stored.
1254 CanBeSmi can_be_smi,
1255 Register scratch) = 0;
1256
1257 // Checks that storing [value] into [object] does not require a write barrier.
1259 Register value) = 0;
1260
1262 ObjectPoolBuilder* object_pool_builder_;
1263};
1264
1265// For leaf runtime calls. For non-leaf runtime calls, use
1266// Assembler::CallRuntime.
1268 public:
1269 // Enters a frame, saves registers, and aligns the stack according to the C
1270 // ABI.
1271 //
1272 // If [preserve_registers] is false, only registers normally preserved at a
1273 // Dart call will be preserved (SP, FP, THR, PP, CODE_REG, RA). Suitable for
1274 // use in IL instructions marked with LocationSummary::kCall.
1275 // If [preserve registers] is true, all registers allocatable by Dart (roughly
1276 // everything but TMP, TMP2) will be preserved. Suitable for non-call IL
1277 // instructions like the write barrier.
1279 intptr_t frame_size,
1280 bool preserve_registers);
1281
1282 // Restores registers and leaves the frame.
1284
1285 // Sets the current tag, calls the runtime function, and restores the current
1286 // tag.
1287 void Call(const RuntimeEntry& entry, intptr_t argument_count);
1288
1289 private:
1290 Assembler* const assembler_;
1291 const bool preserve_registers_;
1292};
1293
1294} // namespace compiler
1295
1296} // namespace dart
1297
1298#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_BASE_H_
static void done(const char *config, const char *src, const char *srcOptions, const char *name)
Definition DM.cpp:263
int count
static uint32_t hash(const SkShaderBase::GradientInfo &v)
static bool equals(T *a, T *b)
#define UNREACHABLE()
Definition assert.h:248
#define RELEASE_ASSERT(cond)
Definition assert.h:327
CodeComment(intptr_t pc_offset, const String &comment)
virtual void StoreRelease(Register src, const Address &address, OperandSize size=kWordBytes)=0
void LoadCompressedField(Register dst, const FieldAddress &address)
void LoadCompressedSmiField(Register dst, const FieldAddress &address)
virtual void TryAllocateObject(intptr_t cid, intptr_t instance_size, Label *failure, JumpDistance distance, Register instance_reg, Register temp)=0
void FinalizeHash(Register hash, Register scratch=TMP)
void UnrolledMemCopy(Register dst_base, intptr_t dst_offset, Register src_base, intptr_t src_offset, intptr_t size, Register temp)
void LoadCompressedSmi(Register dst, const Address &address)
virtual void StoreBarrier(Register object, Register value, CanBeSmi can_be_smi, Register scratch)=0
virtual void LoadIndexedPayload(Register dst, Register base, int32_t offset, Register index, ScaleFactor scale, OperandSize sz=kWordBytes)=0
void StoreReleaseCompressedToOffset(Register src, Register base, int32_t offset=0)
virtual void FinalizeHashForSize(intptr_t bit_size, Register hash, Register scratch=TMP)=0
void StoreReleaseCompressed(Register src, const Address &address)
virtual void LoadFieldAddressForOffset(Register reg, Register base, int32_t offset)=0
void Untested(const char *message)
void TryAllocate(const Class &cls, Label *failure, JumpDistance distance, Register instance_reg, Register temp)
void Stop(const char *message)
void LoadCompressed(Register dst, const Address &address)
virtual void LoadInt64FromBoxOrSmi(Register result, Register value)=0
virtual void RangeCheck(Register value, Register temp, intptr_t low, intptr_t high, RangeCheckCondition condition, Label *target)=0
void LoadCompressedFieldFromOffset(Register dst, Register base, int32_t offset)
virtual void LoadInt32FromBoxOrSmi(Register result, Register value)=0
ObjectPoolBuilder & object_pool_builder()
virtual void CompareWords(Register reg1, Register reg2, intptr_t offset, Register count, Register temp, Label *equals)=0
void LoadCompressedSmiFromOffset(Register dst, Register base, int32_t offset)
void LoadCompressedSmiFieldFromOffset(Register dst, Register base, int32_t offset)
virtual void LslRegister(Register dst, Register shift)=0
void StoreReleaseToOffset(Register src, Register base, int32_t offset=0, OperandSize size=kWordBytes)
AssemblerBase(ObjectPoolBuilder *object_pool_builder)
virtual void MoveRegister(Register dst, Register src)
void LoadWordFromBoxOrSmi(Register result, Register value)
void StoreIntoObjectNoBarrier(Register object, const Address &address, Register value, MemoryOrder memory_order=kRelaxedNonAtomic, OperandSize size=kWordBytes)
void StoreToSlot(Register src, Register base, const Slot &slot, CanBeSmi can_be_smi, MemoryOrder memory_order=kRelaxedNonAtomic, Register scratch=TMP)
void LoadAcquireFromOffset(Register dst, Register base, int32_t offset=0, OperandSize size=kWordBytes)
void LoadCompressedFieldAddressForRegOffset(Register address, Register instance, Register offset_in_words_as_smi)
intptr_t UncheckedEntryOffset() const
virtual void StoreFieldToOffset(Register src, Register base, int32_t offset, OperandSize sz=kWordBytes)
virtual void StoreIntoObjectOffsetNoBarrier(Register object, int32_t offset, Register value, MemoryOrder memory_order=kRelaxedNonAtomic, OperandSize size=kWordBytes)
uword CodeAddress(intptr_t offset)
virtual void SmiTag(Register r)=0
void LoadCompressedFromOffset(Register dst, Register base, int32_t offset)
const ZoneGrowableArray< intptr_t > & GetPointerOffsets() const
void LoadSmiFieldFromOffset(Register dst, Register base, int32_t offset)
virtual void LoadFieldAddressForRegOffset(Register address, Register instance, Register offset_in_words_as_smi)=0
void LoadField(Register dst, const FieldAddress &address, OperandSize sz=kWordBytes)
virtual void LoadAcquire(Register dst, const Address &address, OperandSize size=kWordBytes)=0
virtual void MoveAndSmiTagRegister(Register dst, Register src)
void FinalizeInstructions(const MemoryRegion &region)
void LoadIndexedCompressed(Register dst, Register base, int32_t offset, Register index)
void ExtendNonNegativeSmi(Register dst)
const GrowableArray< CodeComment * > & comments() const
void StoreCompressedIntoObject(Register object, const Address &address, Register value, CanBeSmi can_be_smi=kValueCanBeSmi, MemoryOrder memory_order=kRelaxedNonAtomic, Register scratch=TMP)
void LoadFromSlot(Register dst, Register base, const Slot &slot)
void static bool EmittingComments()
void StoreToSlotNoBarrier(Register src, Register base, const Slot &slot, MemoryOrder memory_order=kRelaxedNonAtomic)
virtual void BranchIfSmi(Register reg, Label *label, JumpDistance distance=kFarJump)=0
virtual void LoadFromOffset(Register dst, Register base, int32_t offset, OperandSize sz=kWordBytes)
virtual void CompareWithMemoryValue(Register value, Address address, OperandSize size=kWordBytes)=0
virtual void ExtendAndSmiTagValue(Register dst, Register src, OperandSize sz)
virtual void CompareImmediate(Register reg, target::word imm, OperandSize width=kWordBytes)=0
void StoreCompressedIntoObjectOffsetNoBarrier(Register object, int32_t offset, Register value, MemoryOrder memory_order=kRelaxedNonAtomic)
void LoadAcquireCompressedFromOffset(Register dst, Register base, int32_t offset)
virtual void ArithmeticShiftRightImmediate(Register reg, intptr_t shift)=0
void StoreObjectIntoObjectOffsetNoBarrier(Register object, int32_t offset, const Object &value, MemoryOrder memory_order=kRelaxedNonAtomic, OperandSize size=kWordBytes)
virtual void VerifyStoreNeedsNoWriteBarrier(Register object, Register value)=0
void LoadTypeClassId(Register dst, Register src)
intptr_t InsertAlignedRelocation(BSS::Relocation reloc)
virtual void CombineHashes(Register dst, Register other)=0
virtual void StoreIntoObjectOffset(Register object, int32_t offset, Register value, CanBeSmi can_be_smi=kValueCanBeSmi, MemoryOrder memory_order=kRelaxedNonAtomic, Register scratch=TMP, OperandSize size=kWordBytes)
virtual void LoadFieldFromOffset(Register dst, Register base, int32_t offset, OperandSize sz=kWordBytes)
virtual void AndRegisters(Register dst, Register src1, Register src2=kNoRegister)=0
void LoadAcquireCompressed(Register dst, const Address &address)
virtual void Load(Register dst, const Address &address, OperandSize sz=kWordBytes)=0
void StoreCompressedObjectIntoObjectOffsetNoBarrier(Register object, int32_t offset, const Object &value, MemoryOrder memory_order=kRelaxedNonAtomic)
virtual void Bind(Label *label)=0
virtual void ExtendValue(Register dst, Register src, OperandSize sz)=0
void StoreCompressedIntoObjectNoBarrier(Register object, const Address &address, Register value, MemoryOrder memory_order=kRelaxedNonAtomic)
virtual void StoreObjectIntoObjectNoBarrier(Register object, const Address &address, const Object &value, MemoryOrder memory_order=kRelaxedNonAtomic, OperandSize size=kWordBytes)=0
void StoreCompressedIntoObjectOffset(Register object, int32_t offset, Register value, CanBeSmi can_be_smi=kValueCanBeSmi, MemoryOrder memory_order=kRelaxedNonAtomic, Register scratch=TMP)
virtual void ArrayStoreBarrier(Register object, Register slot, Register value, CanBeSmi can_be_smi, Register scratch)=0
void LoadSmi(Register dst, const Address &address)
virtual void StoreCompressedObjectIntoObjectNoBarrier(Register object, const Address &address, const Object &value, MemoryOrder memory_order=kRelaxedNonAtomic)
virtual void StoreToOffset(Register src, Register base, int32_t offset, OperandSize sz=kWordBytes)
void LoadAbstractTypeNullability(Register dst, Register type)
virtual void LsrImmediate(Register dst, int32_t shift)=0
void LoadSmiFromOffset(Register dst, Register base, int32_t offset)
virtual void AndImmediate(Register dst, target::word imm)=0
void StoreIntoArray(Register object, Register slot, Register value, CanBeSmi can_value_be_smi=kValueCanBeSmi, Register scratch=TMP, OperandSize size=kWordBytes)
void StoreCompressedIntoArray(Register object, Register slot, Register value, CanBeSmi can_value_be_smi=kValueCanBeSmi, Register scratch=TMP)
void LoadSmiField(Register dst, const FieldAddress &address)
void Comment(const char *format,...) PRINTF_ATTRIBUTE(2
void StoreIntoObject(Register object, const Address &address, Register value, CanBeSmi can_be_smi=kValueCanBeSmi, MemoryOrder memory_order=kRelaxedNonAtomic, Register scratch=TMP, OperandSize size=kWordBytes)
virtual void MulImmediate(Register dst, target::word imm, OperandSize=kWordBytes)=0
virtual void EnsureHasClassIdInDEBUG(intptr_t cid, Register src, Register scratch, bool can_be_null=false)=0
void MsanUnpoison(Register base, intptr_t length_in_bytes)
void CompareAbstractTypeNullabilityWith(Register type, int8_t value, Register scratch)
void Unreachable(const char *message)
virtual void LoadImmediate(Register dst, target::word imm)=0
void Unimplemented(const char *message)
virtual void Store(Register src, const Address &address, OperandSize sz=kWordBytes)=0
void Store(intptr_t position, T value)
void EmitFixup(AssemblerFixup *fixup)
const ZoneGrowableArray< intptr_t > & pointer_offsets() const
void FinalizeInstructions(const MemoryRegion &region)
uword Address(intptr_t position)
virtual bool IsPointerOffset() const =0
virtual void Process(const MemoryRegion &region, intptr_t position)=0
intptr_t LinkPosition() const
intptr_t Position() const
LeafRuntimeScope(Assembler *assembler, intptr_t frame_size, bool preserve_registers)
void Call(const RuntimeEntry &entry, intptr_t argument_count)
bool operator==(const FlutterPoint &a, const FlutterPoint &b)
#define ASSERT(E)
VkInstance instance
Definition main.cc:48
static bool b
AtkStateType state
static const uint8_t buffer[]
uint8_t value
GAsyncResult * result
uint32_t uint32_t * format
#define DECLARE_FLAG(type, name)
Definition flags.h:14
int argument_count
Definition fuchsia.cc:52
Win32Message message
constexpr OperandSize kWordBytes
uintptr_t uword
Definition globals.h:501
@ kNoRegister
const Register TMP
constexpr intptr_t kBitsPerInt32
Definition globals.h:466
static T LoadUnaligned(const T *ptr)
Definition unaligned.h:14
const intptr_t cid
static void StoreUnaligned(T *ptr, T value)
Definition unaligned.h:22
static int8_t data[kExtLength]
#define DEBUG_ONLY(code)
Definition globals.h:141
#define PRINTF_ATTRIBUTE(string_index, first_to_check)
Definition globals.h:697
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition globals.h:581
#define T
int32_t width
const Scalar scale
Point offset