5#ifndef RUNTIME_VM_OBJECT_GRAPH_H_
6#define RUNTIME_VM_OBJECT_GRAPH_H_
20#if defined(DART_ENABLE_HEAP_SNAPSHOT_WRITER)
26class ObjectGraph :
public ThreadStackResource {
34 ObjectPtr
Get()
const;
38 intptr_t OffsetFromParent()
const;
41 StackIterator(
const Stack* stack, intptr_t index)
42 : stack_(stack), index_(index) {}
45 friend class ObjectGraph::Stack;
61 virtual Direction VisitObject(StackIterator* it) = 0;
63 virtual bool visit_weak_persistent_handles()
const {
return false; }
65 const char* gc_root_type =
nullptr;
66 bool is_traversing =
false;
71 const char* gc_root_type;
72 } RetainingPathResult;
74 explicit ObjectGraph(Thread* thread);
79 void IterateObjects(Visitor* visitor);
80 void IterateUserObjects(Visitor* visitor);
84 void IterateObjectsFrom(
const Object& root, Visitor* visitor);
85 void IterateObjectsFrom(intptr_t class_id,
86 HeapIterationScope* iteration,
90 intptr_t SizeRetainedByInstance(
const Object& obj);
91 intptr_t SizeReachableByInstance(
const Object& obj);
94 intptr_t SizeRetainedByClass(intptr_t class_id);
95 intptr_t SizeReachableByClass(intptr_t class_id);
106 RetainingPathResult RetainingPath(Object* obj,
const Array& path);
114 intptr_t InboundReferences(Object* obj,
const Array& references);
120class ChunkedWriter :
public ThreadStackResource {
122 explicit ChunkedWriter(Thread* thread) : ThreadStackResource(thread) {}
124 virtual intptr_t ReserveChunkPrefixSize() {
return 0; }
127 virtual void WriteChunk(uint8_t*
buffer, intptr_t size,
bool last) = 0;
130class FileHeapSnapshotWriter :
public ChunkedWriter {
132 FileHeapSnapshotWriter(Thread* thread,
133 const char* filename,
134 bool* success =
nullptr);
135 ~FileHeapSnapshotWriter();
137 virtual void WriteChunk(uint8_t*
buffer, intptr_t size,
bool last);
140 void* file_ =
nullptr;
144class CallbackHeapSnapshotWriter :
public ChunkedWriter {
146 CallbackHeapSnapshotWriter(Thread* thread,
149 ~CallbackHeapSnapshotWriter();
151 virtual void WriteChunk(uint8_t*
buffer, intptr_t size,
bool last);
158class VmServiceHeapSnapshotChunkedWriter :
public ChunkedWriter {
160 explicit VmServiceHeapSnapshotChunkedWriter(Thread* thread)
161 : ChunkedWriter(thread) {}
163 virtual intptr_t ReserveChunkPrefixSize() {
return kMetadataReservation; }
164 virtual void WriteChunk(uint8_t*
buffer, intptr_t size,
bool last);
167 static constexpr intptr_t kMetadataReservation = 512;
172class HeapSnapshotWriter :
public ThreadStackResource {
174 HeapSnapshotWriter(Thread* thread, ChunkedWriter* writer)
175 : ThreadStackResource(thread), writer_(writer) {}
176 ~HeapSnapshotWriter() { free(image_page_ranges_); }
178 void WriteSigned(int64_t value) {
179 EnsureAvailable((
sizeof(value) * kBitsPerByte) / 7 + 1);
181 bool is_last_part =
false;
182 while (!is_last_part) {
183 uint8_t part =
value & 0x7F;
185 if ((value == 0 && (part & 0x40) == 0) ||
186 (value ==
static_cast<intptr_t
>(-1) && (part & 0x40) != 0)) {
191 buffer_[size_++] = part;
195 void WriteUnsigned(uintptr_t value) {
196 EnsureAvailable((
sizeof(value) * kBitsPerByte) / 7 + 1);
198 bool is_last_part =
false;
199 while (!is_last_part) {
200 uint8_t part =
value & 0x7F;
207 buffer_[size_++] = part;
211 void WriteBytes(
const void* bytes, intptr_t len) {
212 EnsureAvailable(len);
213 memmove(&buffer_[size_], bytes, len);
217 void ScrubAndWriteUtf8(
char* value) {
218 intptr_t
len = strlen(value);
219 for (intptr_t i = len - 1; i >= 0; i--) {
220 if (value[i] ==
'@') {
227 void WriteUtf8(
const char* value) {
228 intptr_t
len = strlen(value);
230 WriteBytes(value, len);
233 void AssignObjectId(ObjectPtr obj);
234 intptr_t GetObjectId(ObjectPtr obj)
const;
235 void ClearObjectIds();
236 void CountReferences(intptr_t
count);
237 void CountExternalProperty();
238 void AddSmi(SmiPtr smi);
242 static uint32_t GetHeapSnapshotIdentityHash(Thread* thread, ObjectPtr obj);
245 static uint32_t GetHashHelper(Thread* thread, ObjectPtr obj);
247 static constexpr intptr_t kPreferredChunkSize =
MB;
249 void SetupImagePageBoundaries();
250 void SetupCountingPages();
251 bool OnImagePage(ObjectPtr obj)
const;
252 CountingPage* FindCountingPage(ObjectPtr obj)
const;
254 void EnsureAvailable(intptr_t needed);
255 void Flush(
bool last =
false);
257 ChunkedWriter* writer_ =
nullptr;
259 uint8_t* buffer_ =
nullptr;
261 intptr_t capacity_ = 0;
263 intptr_t class_count_ = 0;
264 intptr_t object_count_ = 0;
265 intptr_t reference_count_ = 0;
266 intptr_t external_property_count_ = 0;
268 struct ImagePageRange {
272 static int CompareImagePageRanges(
const ImagePageRange*
a,
273 const ImagePageRange*
b) {
274 if (
a->start <
b->start) {
276 }
else if (
a->start ==
b->start) {
282 intptr_t image_page_hi_ = 0;
283 ImagePageRange* image_page_ranges_ =
nullptr;
285 MallocGrowableArray<SmiPtr> smis_;
290class CountObjectsVisitor :
public ObjectVisitor,
public HandleVisitor {
292 CountObjectsVisitor(Thread* thread, intptr_t class_count);
293 ~CountObjectsVisitor() {}
295 void VisitObject(ObjectPtr obj)
override;
296 void VisitHandle(uword addr)
override;
298 std::unique_ptr<intptr_t[]> new_count_;
299 std::unique_ptr<intptr_t[]> new_size_;
300 std::unique_ptr<intptr_t[]> new_external_size_;
301 std::unique_ptr<intptr_t[]> old_count_;
302 std::unique_ptr<intptr_t[]> old_size_;
303 std::unique_ptr<intptr_t[]> old_external_size_;
FlKeyEvent uint64_t FlKeyResponderAsyncCallback callback
static const uint8_t buffer[]
const GrXPFactory * Get(SkBlendMode mode)
Visitor(Ts...) -> Visitor< Ts... >
void Flush(SkSurface *surface)