Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
assembler_base.cc
Go to the documentation of this file.
1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6
7#include "platform/utils.h"
10#include "vm/cpu.h"
11#include "vm/flags.h"
12#include "vm/heap/heap.h"
13#include "vm/memory_region.h"
14#include "vm/os.h"
15#include "vm/zone.h"
16
17namespace dart {
18
20 check_code_pointer,
21 false,
22 "Verify instructions offset in code object."
23 "NOTE: This breaks the profiler.");
24#if defined(TARGET_ARCH_ARM)
25DEFINE_FLAG(bool, use_far_branches, false, "Enable far branches for ARM.");
26#endif
27
28namespace compiler {
29
31
34 const Slot& slot) {
35 if (!slot.is_tagged()) {
36 // The result cannot be a floating point or SIMD value.
37 ASSERT(slot.representation() == kUntagged ||
39 // Since we only have a single destination register, the result value must
40 // fit into a register.
42 compiler::target::kWordSize);
44 if (slot.has_untagged_instance()) {
45 LoadFromOffset(dst, base, slot.offset_in_bytes(), sz);
46 } else {
48 }
49 } else if (slot.has_untagged_instance()) {
50 // Non-Dart objects do not contain compressed pointers.
51 ASSERT(!slot.is_compressed());
53 } else if (!slot.is_guarded_field() && slot.type().ToCid() == kSmiCid) {
54 if (slot.is_compressed()) {
56 } else {
58 }
59 } else {
60 if (slot.is_compressed()) {
62 } else {
64 }
65 }
66}
67
70 const Slot& slot,
71 MemoryOrder memory_order,
72 Register scratch) {
73 auto const can_be_smi =
75 StoreToSlot(src, base, slot, can_be_smi, memory_order, scratch);
76}
77
80 const Slot& slot,
81 CanBeSmi can_be_smi,
82 MemoryOrder memory_order,
83 Register scratch) {
84 if (!slot.is_tagged() || slot.has_untagged_instance()) {
85 // Same as the no barrier case.
86 StoreToSlotNoBarrier(src, base, slot, memory_order);
87 } else if (slot.is_compressed()) {
89 can_be_smi, memory_order, scratch);
90 } else {
91 StoreIntoObjectOffset(base, slot.offset_in_bytes(), src, can_be_smi,
92 memory_order, scratch);
93 }
94}
95
98 const Slot& slot,
99 MemoryOrder memory_order) {
100 if (!slot.is_tagged()) {
101 // The stored value cannot be a floating point or SIMD value.
102 ASSERT(slot.representation() == kUntagged ||
104 // Since we only have a single source register, the stored value must
105 // fit into a register.
107 compiler::target::kWordSize);
108 auto const sz = RepresentationUtils::OperandSize(slot.representation());
109 if (slot.has_untagged_instance()) {
110 StoreToOffset(src, base, slot.offset_in_bytes(), sz);
111 } else {
112 StoreFieldToOffset(src, base, slot.offset_in_bytes(), sz);
113 }
114 } else if (slot.has_untagged_instance()) {
115 // Non-Dart objects do not contain compressed pointers.
116 ASSERT(!slot.is_compressed());
117 StoreToOffset(src, base, slot.offset_in_bytes());
118 } else if (slot.is_compressed()) {
120 memory_order);
121 } else {
123 memory_order);
124 }
125}
126
129 int32_t offset,
130 OperandSize sz) {
131 Load(dst, Address(base, offset), sz);
132}
133
136 int32_t offset,
137 OperandSize sz) {
138 Store(src, Address(base, offset), sz);
139}
140
142 const FieldAddress& address,
143 OperandSize sz) {
144 Load(dst, address, sz);
145}
148 int32_t offset,
149 OperandSize sz) {
150 Load(dst, FieldAddress(base, offset), sz);
151}
152
155 int32_t offset,
156 OperandSize sz) {
157 Store(src, FieldAddress(base, offset), sz);
158}
159
161 LoadSmi(dst, address);
162}
173
180 const FieldAddress& address) {
181 LoadCompressed(dst, address);
182}
194 const FieldAddress& address) {
195 LoadCompressedSmi(dst, address);
196}
207
210 int32_t offset,
211 OperandSize size) {
212 LoadAcquire(dst, Address(base, offset), size);
213}
216 int32_t offset,
217 OperandSize size) {
218 StoreRelease(src, Address(base, offset), size);
219}
220
222 const Address& address,
223 Register value,
224 CanBeSmi can_be_smi,
225 MemoryOrder memory_order,
226 Register scratch,
227 OperandSize size) {
228 // A write barrier should never be applied when writing a reference to an
229 // object into itself.
230 ASSERT(object != value);
231 ASSERT(object != scratch);
232 ASSERT(value != scratch);
233 if (memory_order == kRelease) {
234 StoreRelease(value, address, size);
235 } else {
236 Store(value, address, size);
237 }
238 StoreBarrier(object, value, can_be_smi, scratch);
239}
240
242 const Address& address,
243 Register value,
244 MemoryOrder memory_order,
245 OperandSize size) {
246 if (memory_order == kRelease) {
247 StoreRelease(value, address, size);
248 } else {
249 Store(value, address, size);
250 }
252}
253
255 int32_t offset,
256 Register value,
257 CanBeSmi can_be_smi,
258 MemoryOrder memory_order,
259 Register scratch,
260 OperandSize size) {
261 StoreIntoObject(object, FieldAddress(object, offset), value, can_be_smi,
262 memory_order, scratch, size);
263}
264
266 int32_t offset,
267 Register value,
268 MemoryOrder memory_order,
269 OperandSize size) {
271 memory_order, size);
272}
274 Register object,
275 int32_t offset,
276 const Object& value,
277 MemoryOrder memory_order,
278 OperandSize size) {
280 memory_order, size);
281}
282
284 Register slot,
285 Register value,
286 CanBeSmi can_be_smi,
287 Register scratch,
288 OperandSize size) {
289 ASSERT(object != scratch);
290 ASSERT(value != object);
291 ASSERT(value != scratch);
292 ASSERT(slot != object);
293 ASSERT(slot != value);
294 ASSERT(slot != scratch);
295 Store(value, Address(slot, 0), size);
296 ArrayStoreBarrier(object, slot, value, can_be_smi, scratch);
297}
298
300 intptr_t dst_offset,
301 Register src_base,
302 intptr_t src_offset,
303 intptr_t size,
304 Register temp) {
305 intptr_t offset = 0;
306 if (target::kWordSize >= 8) {
307 while (offset + 8 <= size) {
308 LoadFromOffset(temp, src_base, src_offset + offset, kEightBytes);
309 StoreToOffset(temp, dst_base, dst_offset + offset, kEightBytes);
310 offset += 8;
311 }
312 }
313 while (offset + 4 <= size) {
314 LoadFromOffset(temp, src_base, src_offset + offset, kUnsignedFourBytes);
315 StoreToOffset(temp, dst_base, dst_offset + offset, kUnsignedFourBytes);
316 offset += 4;
317 }
318 while (offset + 2 <= size) {
319 LoadFromOffset(temp, src_base, src_offset + offset, kUnsignedTwoBytes);
320 StoreToOffset(temp, dst_base, dst_offset + offset, kUnsignedTwoBytes);
321 offset += 2;
322 }
323 while (offset + 1 <= size) {
324 LoadFromOffset(temp, src_base, src_offset + offset, kUnsignedByte);
325 StoreToOffset(temp, dst_base, dst_offset + offset, kUnsignedByte);
326 offset += 1;
327 }
328 ASSERT(offset == size);
329}
330
332 if (dst != src) {
333 EnsureHasClassIdInDEBUG(kTypeCid, src, dst);
334 } else {
335#if !defined(TARGET_ARCH_IA32)
336 EnsureHasClassIdInDEBUG(kTypeCid, src, TMP);
337#else
338 // Skip check on IA32 since we don't have TMP.
339#endif
340 }
341 LoadFromSlot(dst, src, Slot::AbstractType_flags());
342 LsrImmediate(dst, compiler::target::UntaggedType::kTypeClassIdShift);
343}
344
346 LoadFromSlot(dst, type, Slot::AbstractType_flags());
347 AndImmediate(dst, compiler::target::UntaggedAbstractType::kNullabilityMask);
348}
349
356
358 // We cannot put a relocation at the very start (it's not a valid
359 // instruction)!
360 ASSERT(CodeSize() != 0);
361
362 // Align to a target word boundary.
363 const intptr_t offset =
364 Utils::RoundUp(CodeSize(), compiler::target::kWordSize);
365
366 while (CodeSize() < offset) {
367 Breakpoint();
368 }
369 ASSERT(CodeSize() == offset);
370
372 buffer_.Emit<compiler::target::word>(BSS::RelocationIndex(reloc) *
373 compiler::target::kWordSize);
374
375 ASSERT(CodeSize() == (offset + compiler::target::kWordSize));
376
377 return offset;
378}
379
380void AssemblerBase::MsanUnpoison(Register base, intptr_t length_in_bytes) {
381 Comment("MsanUnpoison base %s length_in_bytes %" Pd,
382 RegisterNames::RegisterName(base), length_in_bytes);
383 LeafRuntimeScope rt(static_cast<Assembler*>(this), /*frame_size=*/0,
384 /*preserve_registers=*/true);
387 rt.Call(kMsanUnpoisonRuntimeEntry, /*argument_count=*/2);
388}
389
391 Comment("MsanUnpoison base %s length_in_bytes %s",
393 RegisterNames::RegisterName(length_in_bytes));
394 LeafRuntimeScope rt(static_cast<Assembler*>(this), /*frame_size=*/0,
395 /*preserve_registers=*/true);
398 if (length_in_bytes == a0) {
399 if (base == a1) {
400 MoveRegister(TMP, length_in_bytes);
401 MoveRegister(a0, base);
402 MoveRegister(a1, TMP);
403 } else {
404 MoveRegister(a1, length_in_bytes);
405 MoveRegister(a0, base);
406 }
407 } else {
408 MoveRegister(a0, base);
409 MoveRegister(a1, length_in_bytes);
410 }
411 rt.Call(kMsanUnpoisonRuntimeEntry, /*argument_count=*/2);
412}
413
414#if defined(DEBUG)
415static void InitializeMemoryWithBreakpoints(uword data, intptr_t length) {
416#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64)
419 const uword end = data + length;
420 while (data < end) {
421 *reinterpret_cast<int32_t*>(data) = Instr::kBreakPointInstruction;
422 data += 4;
423 }
424#else
425 memset(reinterpret_cast<void*>(data), Instr::kBreakPointInstruction, length);
426#endif
427}
428#endif
429
430static uword NewContents(intptr_t capacity) {
431 Zone* zone = Thread::Current()->zone();
432 uword result = zone->AllocUnsafe(capacity);
433#if defined(DEBUG)
434 // Initialize the buffer with kBreakPointInstruction to force a break
435 // point if we ever execute an uninitialized part of the code buffer.
436 InitializeMemoryWithBreakpoints(result, capacity);
437#endif
438 return result;
439}
440
441#if defined(DEBUG)
443 if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity();
444 // In debug mode, we save the assembler buffer along with the gap
445 // size before we start emitting to the buffer. This allows us to
446 // check that any single generated instruction doesn't overflow the
447 // limit implied by the minimum gap size.
448 buffer_ = buffer;
449 gap_ = ComputeGap();
450 // Make sure that extending the capacity leaves a big enough gap
451 // for any kind of instruction.
452 ASSERT(gap_ >= kMinimumGap);
453 // Mark the buffer as having ensured the capacity.
454 ASSERT(!buffer->HasEnsuredCapacity()); // Cannot nest.
455 buffer->has_ensured_capacity_ = true;
456}
457
458AssemblerBuffer::EnsureCapacity::~EnsureCapacity() {
459 // Unmark the buffer, so we cannot emit after this.
460 buffer_->has_ensured_capacity_ = false;
461 // Make sure the generated instruction doesn't take up more
462 // space than the minimum gap.
463 intptr_t delta = gap_ - ComputeGap();
464 ASSERT(delta <= kMinimumGap);
465}
466#endif
467
469 : pointer_offsets_(new ZoneGrowableArray<intptr_t>(16)) {
470 const intptr_t kInitialBufferCapacity = 4 * KB;
471 contents_ = NewContents(kInitialBufferCapacity);
472 cursor_ = contents_;
473 limit_ = ComputeLimit(contents_, kInitialBufferCapacity);
474 fixup_ = nullptr;
475#if defined(DEBUG)
476 has_ensured_capacity_ = false;
477 fixups_processed_ = false;
478#endif
479
480 // Verify internal state.
481 ASSERT(Capacity() == kInitialBufferCapacity);
482 ASSERT(Size() == 0);
483}
484
486
487void AssemblerBuffer::ProcessFixups(const MemoryRegion& region) {
488 AssemblerFixup* fixup = fixup_;
489 while (fixup != nullptr) {
490 fixup->Process(region, fixup->position());
491 fixup = fixup->previous();
492 }
493}
494
496 // Copy the instructions from the buffer.
497 MemoryRegion from(reinterpret_cast<void*>(contents()), Size());
498 instructions.CopyFrom(0, from);
499
500 // Process fixups in the instructions.
501 ProcessFixups(instructions);
502#if defined(DEBUG)
503 fixups_processed_ = true;
504#endif
505}
506
507void AssemblerBuffer::ExtendCapacity() {
508 intptr_t old_size = Size();
509 intptr_t old_capacity = Capacity();
510 intptr_t new_capacity =
511 Utils::Minimum(old_capacity * 2, old_capacity + 1 * MB);
512 if (new_capacity < old_capacity) {
513 FATAL("Unexpected overflow in AssemblerBuffer::ExtendCapacity");
514 }
515
516 // Allocate the new data area and copy contents of the old one to it.
517 uword new_contents = NewContents(new_capacity);
518 memmove(reinterpret_cast<void*>(new_contents),
519 reinterpret_cast<void*>(contents_), old_size);
520
521 // Compute the relocation delta and switch to the new contents area.
522 intptr_t delta = new_contents - contents_;
523 contents_ = new_contents;
524
525 // Update the cursor and recompute the limit.
526 cursor_ += delta;
527 limit_ = ComputeLimit(new_contents, new_capacity);
528
529 // Verify internal state.
530 ASSERT(Capacity() == new_capacity);
531 ASSERT(Size() == old_size);
532}
533
535 public:
537 const Object& object)
538 : pointer_offsets_(pointer_offsets), object_(object) {}
539
540 void Process(const MemoryRegion& region, intptr_t position) {
541 // Patch the handle into the code. Once the instructions are installed into
542 // a raw code object and the pointer offsets are setup, the handle is
543 // resolved.
544 region.StoreUnaligned<const Object*>(position, &object_);
545 pointer_offsets_->Add(position);
546 }
547
548 virtual bool IsPointerOffset() const { return true; }
549
550 private:
551 ZoneGrowableArray<intptr_t>* pointer_offsets_;
552 const Object& object_;
553};
554
556 intptr_t count = 0;
557 AssemblerFixup* current = fixup_;
558 while (current != nullptr) {
559 if (current->IsPointerOffset()) ++count;
560 current = current->previous_;
561 }
562 return count;
563}
564
565#if defined(TARGET_ARCH_IA32)
566void AssemblerBuffer::EmitObject(const Object& object) {
567 // Since we are going to store the handle as part of the fixup information
568 // the handle needs to be a zone handle.
569 DEBUG_ASSERT(IsNotTemporaryScopedHandle(object));
570 ASSERT(IsInOldSpace(object));
571 EmitFixup(new PatchCodeWithHandle(pointer_offsets_, object));
572 cursor_ += target::kWordSize; // Reserve space for pointer.
573}
574#endif
575
576// Shared macros are implemented here.
578 const char* format = "Unimplemented: %s";
579 const intptr_t len = Utils::SNPrint(nullptr, 0, format, message);
580 char* buffer = reinterpret_cast<char*>(malloc(len + 1));
582 Stop(buffer);
583}
584
586 const char* format = "Untested: %s";
587 const intptr_t len = Utils::SNPrint(nullptr, 0, format, message);
588 char* buffer = reinterpret_cast<char*>(malloc(len + 1));
590 Stop(buffer);
591}
592
594 const char* format = "Unreachable: %s";
595 const intptr_t len = Utils::SNPrint(nullptr, 0, format, message);
596 char* buffer = reinterpret_cast<char*>(malloc(len + 1));
598 Stop(buffer);
599}
600
601void AssemblerBase::Comment(const char* format, ...) {
602 if (EmittingComments()) {
603 char buffer[1024];
604
605 va_list args;
606 va_start(args, format);
608 va_end(args);
609
610 comments_.Add(
612 }
613}
614
616 return FLAG_code_comments || FLAG_disassemble || FLAG_disassemble_optimized ||
617 FLAG_disassemble_stubs;
618}
619
620void AssemblerBase::Stop(const char* message) {
621 Comment("Stop: %s", message);
622 Breakpoint();
623}
624
626 switch (key.type()) {
628 return key.imm128_.int_storage[0] ^ key.imm128_.int_storage[1] ^
629 key.imm128_.int_storage[2] ^ key.imm128_.int_storage[3];
630
631#if defined(TARGET_ARCH_IS_32_BIT)
632 case ObjectPoolBuilderEntry::kImmediate64:
633 return key.imm64_;
634#endif
637 return key.imm_;
639 return ObjectHash(*key.obj_);
640 }
641
642 UNREACHABLE();
643}
644
646 // Null out the handles we've accumulated.
647 for (intptr_t i = 0; i < object_pool_.length(); ++i) {
648 if (object_pool_[i].type() == ObjectPoolBuilderEntry::kTaggedObject) {
649 SetToNull(const_cast<Object*>(object_pool_[i].obj_));
650 SetToNull(const_cast<Object*>(object_pool_[i].equivalence_));
651 }
652 }
653
654 object_pool_.Clear();
655 object_pool_index_table_.Clear();
656}
657
659 const Object& obj,
662 DEBUG_ASSERT(IsNotTemporaryScopedHandle(obj));
663 return AddObject(ObjectPoolBuilderEntry(&obj, patchable, snapshot_behavior));
664}
665
667 uword imm,
671 imm, ObjectPoolBuilderEntry::kImmediate, patchable, snapshotability));
672}
673
674intptr_t ObjectPoolBuilder::AddImmediate64(uint64_t imm) {
675#if defined(TARGET_ARCH_IS_32_BIT)
676 return AddObject(
677 ObjectPoolBuilderEntry(imm, ObjectPoolBuilderEntry::kImmediate64,
679#else
680 return AddImmediate(imm);
681#endif
682}
683
689
692 (IsNotTemporaryScopedHandle(*entry.obj_) &&
693 (entry.equivalence_ == nullptr ||
694 IsNotTemporaryScopedHandle(*entry.equivalence_))));
695
697 // If the owner of the object pool wrapper specified a specific zone we
698 // should use we'll do so.
699 if (zone_ != nullptr) {
700 entry.obj_ = &NewZoneHandle(zone_, *entry.obj_);
701 if (entry.equivalence_ != nullptr) {
702 entry.equivalence_ = &NewZoneHandle(zone_, *entry.equivalence_);
703 }
704 }
705 }
706
707#if defined(TARGET_ARCH_IS_32_BIT)
708 if (entry.type() == ObjectPoolBuilderEntry::kImmediate64) {
710 uint64_t imm = entry.imm64_;
711 intptr_t idx = AddImmediate(Utils::Low32Bits(imm));
713 object_pool_index_table_.Insert(ObjIndexPair(entry, idx));
714 return idx;
715 }
718 intptr_t idx = AddImmediate(entry.imm128_.int_storage[0]);
722 object_pool_index_table_.Insert(ObjIndexPair(entry, idx));
723 return idx;
724 }
725#else
728 uword lo64 =
729 (static_cast<uword>(entry.imm128_.int_storage[0]) & 0xffffffff) |
730 (static_cast<uword>(entry.imm128_.int_storage[1]) << 32);
731 uword hi64 =
732 (static_cast<uword>(entry.imm128_.int_storage[2]) & 0xffffffff) |
733 (static_cast<uword>(entry.imm128_.int_storage[3]) << 32);
734 intptr_t idx = AddImmediate(lo64);
735 AddImmediate(hi64);
736 object_pool_index_table_.Insert(ObjIndexPair(entry, idx));
737 return idx;
738 }
739#endif
740
741 const intptr_t idx = base_index_ + object_pool_.length();
742 object_pool_.Add(entry);
744 // The object isn't patchable. Record the index for fast lookup.
745 object_pool_index_table_.Insert(ObjIndexPair(entry, idx));
746 }
747 return idx;
748}
749
751 // If the object is not patchable, check if we've already got it in the
752 // object pool.
754 // First check in the parent pool if we have one.
755 if (parent_ != nullptr) {
756 const intptr_t idx = parent_->object_pool_index_table_.LookupValue(entry);
757 if (idx != ObjIndexPair::kNoIndex) {
758 used_from_parent_.Add(idx);
759 return idx;
760 }
761 }
762
763 const intptr_t idx = object_pool_index_table_.LookupValue(entry);
764 if (idx != ObjIndexPair::kNoIndex) {
765 return idx;
766 }
767 }
768 return AddObject(entry);
769}
770
772 const Object& obj,
775 return FindObject(ObjectPoolBuilderEntry(&obj, patchable, snapshot_behavior));
776}
777
779 const Object& equivalence) {
781 &obj, &equivalence, ObjectPoolBuilderEntry::kNotPatchable));
782}
783
789
791#if defined(TARGET_ARCH_IS_32_BIT)
792 return FindObject(
793 ObjectPoolBuilderEntry(imm, ObjectPoolBuilderEntry::kImmediate64,
795#else
796 return FindImmediate(imm);
797#endif
798}
799
805
812
814 ASSERT(parent_ != nullptr);
815 if (parent_->CurrentLength() != base_index_) {
816 return false;
817 }
818 for (intptr_t i = 0; i < object_pool_.length(); i++) {
819 intptr_t idx = parent_->AddObject(object_pool_[i]);
820 ASSERT(idx == (base_index_ + i));
821 }
822 return true;
823}
824
825} // namespace compiler
826
827} // namespace dart
int count
#define UNREACHABLE()
Definition assert.h:248
#define DEBUG_ASSERT(cond)
Definition assert.h:321
static constexpr intptr_t RelocationIndex(Relocation reloc)
Definition bss_relocs.h:33
void Add(const T &value)
static const Register ArgumentRegisters[]
static constexpr int32_t kBreakPointInstruction
void CopyFrom(uword offset, const MemoryRegion &from) const
static const char * RegisterName(Register reg)
Definition constants.h:46
bool is_guarded_field() const
Definition slot.h:527
bool is_tagged() const
Definition slot.h:555
Representation representation() const
Definition slot.h:519
intptr_t offset_in_bytes() const
Definition slot.h:513
bool has_untagged_instance() const
Definition slot.h:556
bool is_compressed() const
Definition slot.h:529
CompileType type() const
Definition slot.h:538
Zone * zone() const
static Thread * Current()
Definition thread.h:361
static int32_t Low32Bits(int64_t value)
Definition utils.h:354
static int SNPrint(char *str, size_t size, const char *format,...) PRINTF_ATTRIBUTE(3
static int static int VSNPrint(char *str, size_t size, const char *format, va_list args)
static int32_t High32Bits(int64_t value)
Definition utils.h:358
static T Minimum(T x, T y)
Definition utils.h:21
static constexpr T RoundUp(T x, uintptr_t alignment, uintptr_t offset=0)
Definition utils.h:105
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition utils.h:77
void * AllocUnsafe(intptr_t size)
virtual void StoreRelease(Register src, const Address &address, OperandSize size=kWordBytes)=0
void LoadCompressedField(Register dst, const FieldAddress &address)
void LoadCompressedSmiField(Register dst, const FieldAddress &address)
void UnrolledMemCopy(Register dst_base, intptr_t dst_offset, Register src_base, intptr_t src_offset, intptr_t size, Register temp)
void LoadCompressedSmi(Register dst, const Address &address)
virtual void StoreBarrier(Register object, Register value, CanBeSmi can_be_smi, Register scratch)=0
void Untested(const char *message)
void Stop(const char *message)
void LoadCompressed(Register dst, const Address &address)
void LoadCompressedFieldFromOffset(Register dst, Register base, int32_t offset)
void LoadCompressedSmiFromOffset(Register dst, Register base, int32_t offset)
void LoadCompressedSmiFieldFromOffset(Register dst, Register base, int32_t offset)
void StoreReleaseToOffset(Register src, Register base, int32_t offset=0, OperandSize size=kWordBytes)
virtual void MoveRegister(Register dst, Register src)
void StoreIntoObjectNoBarrier(Register object, const Address &address, Register value, MemoryOrder memory_order=kRelaxedNonAtomic, OperandSize size=kWordBytes)
void StoreToSlot(Register src, Register base, const Slot &slot, CanBeSmi can_be_smi, MemoryOrder memory_order=kRelaxedNonAtomic, Register scratch=TMP)
void LoadAcquireFromOffset(Register dst, Register base, int32_t offset=0, OperandSize size=kWordBytes)
virtual void StoreFieldToOffset(Register src, Register base, int32_t offset, OperandSize sz=kWordBytes)
virtual void StoreIntoObjectOffsetNoBarrier(Register object, int32_t offset, Register value, MemoryOrder memory_order=kRelaxedNonAtomic, OperandSize size=kWordBytes)
void LoadCompressedFromOffset(Register dst, Register base, int32_t offset)
void LoadSmiFieldFromOffset(Register dst, Register base, int32_t offset)
void LoadField(Register dst, const FieldAddress &address, OperandSize sz=kWordBytes)
virtual void LoadAcquire(Register dst, const Address &address, OperandSize size=kWordBytes)=0
void LoadFromSlot(Register dst, Register base, const Slot &slot)
void static bool EmittingComments()
void StoreToSlotNoBarrier(Register src, Register base, const Slot &slot, MemoryOrder memory_order=kRelaxedNonAtomic)
virtual void LoadFromOffset(Register dst, Register base, int32_t offset, OperandSize sz=kWordBytes)
virtual void CompareImmediate(Register reg, target::word imm, OperandSize width=kWordBytes)=0
void StoreCompressedIntoObjectOffsetNoBarrier(Register object, int32_t offset, Register value, MemoryOrder memory_order=kRelaxedNonAtomic)
void LoadAcquireCompressedFromOffset(Register dst, Register base, int32_t offset)
void StoreObjectIntoObjectOffsetNoBarrier(Register object, int32_t offset, const Object &value, MemoryOrder memory_order=kRelaxedNonAtomic, OperandSize size=kWordBytes)
virtual void VerifyStoreNeedsNoWriteBarrier(Register object, Register value)=0
void LoadTypeClassId(Register dst, Register src)
intptr_t InsertAlignedRelocation(BSS::Relocation reloc)
virtual void StoreIntoObjectOffset(Register object, int32_t offset, Register value, CanBeSmi can_be_smi=kValueCanBeSmi, MemoryOrder memory_order=kRelaxedNonAtomic, Register scratch=TMP, OperandSize size=kWordBytes)
virtual void LoadFieldFromOffset(Register dst, Register base, int32_t offset, OperandSize sz=kWordBytes)
void LoadAcquireCompressed(Register dst, const Address &address)
virtual void Load(Register dst, const Address &address, OperandSize sz=kWordBytes)=0
virtual void StoreObjectIntoObjectNoBarrier(Register object, const Address &address, const Object &value, MemoryOrder memory_order=kRelaxedNonAtomic, OperandSize size=kWordBytes)=0
void StoreCompressedIntoObjectOffset(Register object, int32_t offset, Register value, CanBeSmi can_be_smi=kValueCanBeSmi, MemoryOrder memory_order=kRelaxedNonAtomic, Register scratch=TMP)
virtual void ArrayStoreBarrier(Register object, Register slot, Register value, CanBeSmi can_be_smi, Register scratch)=0
void LoadSmi(Register dst, const Address &address)
virtual void StoreToOffset(Register src, Register base, int32_t offset, OperandSize sz=kWordBytes)
void LoadAbstractTypeNullability(Register dst, Register type)
virtual void LsrImmediate(Register dst, int32_t shift)=0
void LoadSmiFromOffset(Register dst, Register base, int32_t offset)
virtual void AndImmediate(Register dst, target::word imm)=0
void StoreIntoArray(Register object, Register slot, Register value, CanBeSmi can_value_be_smi=kValueCanBeSmi, Register scratch=TMP, OperandSize size=kWordBytes)
void LoadSmiField(Register dst, const FieldAddress &address)
void Comment(const char *format,...) PRINTF_ATTRIBUTE(2
void StoreIntoObject(Register object, const Address &address, Register value, CanBeSmi can_be_smi=kValueCanBeSmi, MemoryOrder memory_order=kRelaxedNonAtomic, Register scratch=TMP, OperandSize size=kWordBytes)
virtual void EnsureHasClassIdInDEBUG(intptr_t cid, Register src, Register scratch, bool can_be_null=false)=0
void MsanUnpoison(Register base, intptr_t length_in_bytes)
void CompareAbstractTypeNullabilityWith(Register type, int8_t value, Register scratch)
void Unreachable(const char *message)
virtual void LoadImmediate(Register dst, target::word imm)=0
void Unimplemented(const char *message)
virtual void Store(Register src, const Address &address, OperandSize sz=kWordBytes)=0
void EmitFixup(AssemblerFixup *fixup)
void FinalizeInstructions(const MemoryRegion &region)
virtual bool IsPointerOffset() const =0
virtual void Process(const MemoryRegion &region, intptr_t position)=0
void Call(const RuntimeEntry &entry, intptr_t argument_count)
static constexpr intptr_t kNoIndex
static uword Hash(Key key)
intptr_t AddObject(const Object &obj, ObjectPoolBuilderEntry::Patchability patchable=ObjectPoolBuilderEntry::kNotPatchable, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
intptr_t AddImmediate(uword imm, ObjectPoolBuilderEntry::Patchability patchable=ObjectPoolBuilderEntry::kNotPatchable, ObjectPoolBuilderEntry::SnapshotBehavior snapshotability=ObjectPoolBuilderEntry::kSnapshotable)
intptr_t AddImmediate64(uint64_t imm)
intptr_t FindImmediate128(simd128_value_t imm)
intptr_t FindObject(const Object &obj, ObjectPoolBuilderEntry::Patchability patchable=ObjectPoolBuilderEntry::kNotPatchable, ObjectPoolBuilderEntry::SnapshotBehavior snapshot_behavior=ObjectPoolBuilderEntry::kSnapshotable)
intptr_t FindImmediate64(uint64_t imm)
intptr_t FindNativeFunction(const ExternalLabel *label, ObjectPoolBuilderEntry::Patchability patchable)
intptr_t AddImmediate128(simd128_value_t imm)
PatchCodeWithHandle(ZoneGrowableArray< intptr_t > *pointer_offsets, const Object &object)
void Process(const MemoryRegion &region, intptr_t position)
#define ASSERT(E)
#define FATAL(error)
glong glong end
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
static const uint8_t buffer[]
uint8_t value
GAsyncResult * result
uint32_t uint32_t * format
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
size_t length
Win32Message message
Object & NewZoneHandle(Zone *zone)
intptr_t ObjectHash(const Object &obj)
void SetToNull(Object *obj)
const String & AllocateString(const char *buffer)
static uword NewContents(intptr_t capacity)
bool IsInOldSpace(const Object &obj)
constexpr intptr_t MB
Definition globals.h:530
void * malloc(size_t size)
Definition allocation.cc:19
constexpr intptr_t KB
Definition globals.h:528
uintptr_t uword
Definition globals.h:501
const Register TMP
static int8_t data[kExtLength]
#define DEBUG_ONLY(code)
Definition globals.h:141
#define Pd
Definition globals.h:408
Point offset
static constexpr size_t ValueSize(Representation rep)
Definition locations.h:112
static constexpr bool IsUnboxedInteger(Representation rep)
Definition locations.h:92
static compiler::OperandSize OperandSize(Representation rep)
Definition locations.cc:16
int32_t int_storage[4]
Definition globals.h:148