Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
object_graph.cc
Go to the documentation of this file.
1// Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/object_graph.h"
6
7#include "vm/dart.h"
8#include "vm/dart_api_state.h"
9#include "vm/growable_array.h"
10#include "vm/isolate.h"
11#include "vm/native_symbol.h"
12#include "vm/object.h"
13#include "vm/object_store.h"
14#include "vm/profiler.h"
15#include "vm/raw_object.h"
17#include "vm/reusable_handles.h"
18#include "vm/visitor.h"
19
20namespace dart {
21
22#if defined(DART_ENABLE_HEAP_SNAPSHOT_WRITER)
23
24static bool IsUserClass(intptr_t cid) {
25 if (cid == kContextCid) return true;
26 if (cid == kTypeArgumentsCid) return false;
28}
29
30// A slot in the fixed-size portion of a heap object.
31//
32// This may be a regulard dart field, a unboxed dart field or
33// a slot of any type in a predefined layout.
34struct ObjectSlot {
35 uint16_t offset;
36 bool is_compressed_pointer;
37 const char* name;
38 ObjectSlot(uint16_t offset, bool is_compressed_pointer, const char* name)
39 : offset(offset),
40 is_compressed_pointer(is_compressed_pointer),
41 name(name) {}
42};
43
44class ObjectSlots {
45 public:
46 using ObjectSlotsType = ZoneGrowableArray<ObjectSlot>;
47
48 explicit ObjectSlots(Thread* thread) {
49 auto class_table = thread->isolate_group()->class_table();
50 const intptr_t class_count = class_table->NumCids();
51
52 HANDLESCOPE(thread);
53 auto& cls = Class::Handle(thread->zone());
54 auto& fields = Array::Handle(thread->zone());
55 auto& field = Field::Handle(thread->zone());
56 auto& name = String::Handle(thread->zone());
57
58 cid2object_slots_.FillWith(nullptr, 0, class_count);
59 contains_only_tagged_words_.FillWith(false, 0, class_count);
60
61 for (intptr_t cid = 1; cid < class_count; cid++) {
62 if (!class_table->HasValidClassAt(cid)) continue;
63
64 // Non-finalized classes are abstract, so we will not collect any field
65 // information for them.
66 cls = class_table->At(cid);
67 if (!cls.is_finalized()) continue;
68
69 auto slots = cid2object_slots_[cid] = new ObjectSlotsType();
70 for (const auto& entry : OffsetsTable::offsets_table()) {
71 if (entry.class_id == cid) {
72 slots->Add(ObjectSlot(entry.offset, entry.is_compressed_pointer,
73 entry.field_name));
74 }
75 }
76
77 // The VM doesn't define a layout for the object, so it's a regular Dart
78 // class.
79 if (slots->is_empty()) {
80 // If the class has native fields, the native fields array is the first
81 // field and therefore starts after the `kWordSize` tagging word.
82 if (cls.num_native_fields() > 0) {
83 slots->Add(ObjectSlot(kWordSize, true, "native_fields"));
84 }
85 // If the class or any super class is generic, it will have a type
86 // arguments vector.
87 const auto tav_offset = cls.host_type_arguments_field_offset();
88 if (tav_offset != Class::kNoTypeArguments) {
89 slots->Add(ObjectSlot(tav_offset, true, "type_arguments"));
90 }
91
92 // Add slots for all user-defined instance fields in the hierarchy.
93 while (!cls.IsNull()) {
94 fields = cls.fields();
95 if (!fields.IsNull()) {
96 for (intptr_t i = 0; i < fields.Length(); ++i) {
97 field ^= fields.At(i);
98 if (!field.is_instance()) continue;
99 name = field.name();
100 // If the field is unboxed, we don't know the size of it (may be
101 // multiple words) - but that doesn't matter because
102 // a) we will process instances using the slots we collect
103 // (instead of regular GC visitor);
104 // b) we will not write the value of the field and instead treat
105 // it like a dummy reference to 0 (like we do with Smis).
106 slots->Add(ObjectSlot(field.HostOffset(), !field.is_unboxed(),
107 name.ToCString()));
108 }
109 }
110 cls = cls.SuperClass();
111 }
112 }
113
114 // We sort the slots, so we'll visit the slots in memory order.
115 slots->Sort([](const ObjectSlot* a, const ObjectSlot* b) {
116 return a->offset - b->offset;
117 });
118
119 // As optimization as well as to support variable-length data, we remember
120 // whether this class has only pure tagged pointers in it, then we can
121 // safely use regular GC visitors.
122 bool contains_only_tagged_words = true;
123 for (auto& slot : *slots) {
124 if (!slot.is_compressed_pointer) {
125 contains_only_tagged_words = false;
126 break;
127 }
128 }
129#if defined(DEBUG)
130 // For pure pointer objects, the slots have to start after tagging word
131 // and be without holes (otherwise, e.g. if a slot was not declared,
132 // the visitors will visit them but we won't emit the field description in
133 // the heap snapshot).
134 if (contains_only_tagged_words) {
135 intptr_t expected_offset = kWordSize;
136 for (auto& slot : *slots) {
137 RELEASE_ASSERT(slot.offset = expected_offset);
138 expected_offset += kCompressedWordSize;
139 }
140 }
141 ASSERT(contains_only_tagged_words ||
142 (cid != kArrayCid && cid != kImmutableArrayCid));
143#endif // defined(DEBUG)
144
145 contains_only_tagged_words_[cid] = contains_only_tagged_words;
146 }
147 }
148
149 const ObjectSlotsType* ObjectSlotsFor(intptr_t cid) const {
150 return cid2object_slots_[cid];
151 }
152
153 // Returns `true` if all fields are tagged (i.e. no unboxed fields).
154 bool ContainsOnlyTaggedPointers(intptr_t cid) {
155 return contains_only_tagged_words_[cid];
156 }
157
158 private:
159 GrowableArray<ObjectSlotsType*> cid2object_slots_;
160 GrowableArray<bool> contains_only_tagged_words_;
161};
162
163// The state of a pre-order, depth-first traversal of an object graph.
164// When a node is visited, *all* its children are pushed to the stack at once.
165// We insert a sentinel between the node and its children on the stack, to
166// remember that the node has been visited. The node is kept on the stack while
167// its children are processed, to give the visitor a complete chain of parents.
168//
169// TODO(koda): Potential optimizations:
170// - Use tag bits for compact Node and sentinel representations.
171class ObjectGraph::Stack : public ObjectPointerVisitor {
172 public:
173 explicit Stack(IsolateGroup* isolate_group)
174 : ObjectPointerVisitor(isolate_group),
175 include_vm_objects_(true),
176 data_(kInitialCapacity) {
177 object_ids_ = new WeakTable();
178 }
179 ~Stack() {
180 delete object_ids_;
181 object_ids_ = nullptr;
182 }
183
184 bool trace_values_through_fields() const override { return true; }
185
186 // Marks and pushes. Used to initialize this stack with roots.
187 // We can use ObjectIdTable normally used by serializers because it
188 // won't be in use while handling a service request (ObjectGraph's only use).
189 void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
190 for (ObjectPtr* current = first; current <= last; ++current) {
191 Visit(current, *current);
192 }
193 }
194
195#if defined(DART_COMPRESSED_POINTERS)
196 void VisitCompressedPointers(uword heap_base,
197 CompressedObjectPtr* first,
198 CompressedObjectPtr* last) override {
199 for (CompressedObjectPtr* current = first; current <= last; ++current) {
200 Visit(current, current->Decompress(heap_base));
201 }
202 }
203#endif
204
205 void Visit(void* ptr, ObjectPtr obj) {
206 if (obj->IsHeapObject() && !obj->untag()->InVMIsolateHeap() &&
207 object_ids_->GetValueExclusive(obj) == 0) { // not visited yet
208 if (!include_vm_objects_ && !IsUserClass(obj->GetClassId())) {
209 return;
210 }
211 object_ids_->SetValueExclusive(obj, 1);
212 Node node;
213 node.ptr = ptr;
214 node.obj = obj;
215 node.gc_root_type = gc_root_type();
216 data_.Add(node);
217 }
218 }
219
220 // Traverses the object graph from the current state.
221 void TraverseGraph(ObjectGraph::Visitor* visitor) {
222 while (!data_.is_empty()) {
223 Node node = data_.Last();
224 if (node.ptr == kSentinel) {
225 data_.RemoveLast();
226 // The node below the sentinel has already been visited.
227 data_.RemoveLast();
228 continue;
229 }
230 ObjectPtr obj = node.obj;
231 ASSERT(obj->IsHeapObject());
232 Node sentinel;
233 sentinel.ptr = kSentinel;
234 data_.Add(sentinel);
235 StackIterator it(this, data_.length() - 2);
236 visitor->gc_root_type = node.gc_root_type;
237 Visitor::Direction direction = visitor->VisitObject(&it);
238 if (direction == ObjectGraph::Visitor::kAbort) {
239 break;
240 }
241 if (direction == ObjectGraph::Visitor::kProceed) {
242 set_gc_root_type(node.gc_root_type);
243 ASSERT(obj->IsHeapObject());
244 switch (obj->GetClassId()) {
245 case kWeakArrayCid:
246 VisitWeakArray(static_cast<WeakArrayPtr>(obj));
247 break;
248 case kWeakReferenceCid:
249 VisitWeakReference(static_cast<WeakReferencePtr>(obj));
250 break;
251 case kFinalizerEntryCid:
252 VisitFinalizerEntry(static_cast<FinalizerEntryPtr>(obj));
253 break;
254 default:
255 obj->untag()->VisitPointers(this);
256 break;
257 }
258 clear_gc_root_type();
259 }
260 }
261 }
262
263 void VisitWeakArray(WeakArrayPtr array) {}
264
265 void VisitWeakReference(WeakReferencePtr ref) {
266#if !defined(DART_COMPRESSED_POINTERS)
267 VisitPointers(&ref->untag()->type_arguments_,
268 &ref->untag()->type_arguments_);
269#else
270 VisitCompressedPointers(ref->heap_base(), &ref->untag()->type_arguments_,
271 &ref->untag()->type_arguments_);
272#endif
273 }
274
275 void VisitFinalizerEntry(FinalizerEntryPtr entry) {
276#if !defined(DART_COMPRESSED_POINTERS)
277 VisitPointers(&entry->untag()->token_, &entry->untag()->token_);
278 VisitPointers(&entry->untag()->next_, &entry->untag()->next_);
279#else
280 VisitCompressedPointers(entry->heap_base(), &entry->untag()->token_,
281 &entry->untag()->token_);
282 VisitCompressedPointers(entry->heap_base(), &entry->untag()->next_,
283 &entry->untag()->next_);
284#endif
285 }
286
287 bool visit_weak_persistent_handles() const override {
288 return visit_weak_persistent_handles_;
289 }
290
291 void set_visit_weak_persistent_handles(bool value) {
292 visit_weak_persistent_handles_ = value;
293 }
294
295 bool include_vm_objects_;
296
297 private:
298 struct Node {
299 void* ptr; // kSentinel for the sentinel node.
300 ObjectPtr obj;
301 const char* gc_root_type;
302 };
303
304 bool visit_weak_persistent_handles_ = false;
305 static ObjectPtr* const kSentinel;
306 static constexpr intptr_t kInitialCapacity = 1024;
307 static constexpr intptr_t kNoParent = -1;
308
309 intptr_t Parent(intptr_t index) const {
310 // The parent is just below the next sentinel.
311 for (intptr_t i = index; i >= 1; --i) {
312 if (data_[i].ptr == kSentinel) {
313 return i - 1;
314 }
315 }
316 return kNoParent;
317 }
318
319 // During the iteration of the heap we are already at a safepoint, so there is
320 // no need to let the GC know about [object_ids_] (i.e. GC cannot run while we
321 // use [object_ids]).
322 WeakTable* object_ids_ = nullptr;
323 GrowableArray<Node> data_;
324 friend class StackIterator;
326};
327
328ObjectPtr* const ObjectGraph::Stack::kSentinel = nullptr;
329
330ObjectPtr ObjectGraph::StackIterator::Get() const {
331 return stack_->data_[index_].obj;
332}
333
334bool ObjectGraph::StackIterator::MoveToParent() {
335 intptr_t parent = stack_->Parent(index_);
336 if (parent == Stack::kNoParent) {
337 return false;
338 } else {
339 index_ = parent;
340 return true;
341 }
342}
343
344intptr_t ObjectGraph::StackIterator::OffsetFromParent() const {
345 intptr_t parent_index = stack_->Parent(index_);
346 if (parent_index == Stack::kNoParent) {
347 return -1;
348 }
349 Stack::Node parent = stack_->data_[parent_index];
350 uword parent_start = UntaggedObject::ToAddr(parent.obj);
351 Stack::Node child = stack_->data_[index_];
352 uword child_ptr_addr = reinterpret_cast<uword>(child.ptr);
353 intptr_t offset = child_ptr_addr - parent_start;
354 if (offset > 0 && offset < parent.obj->untag()->HeapSize()) {
355 return offset;
356 } else {
357 // Some internal VM objects visit pointers not contained within the parent.
358 // For instance, UntaggedCode::VisitCodePointers visits pointers in
359 // instructions.
360 ASSERT(!parent.obj->IsDartInstance());
361 return -1;
362 }
363}
364
365static void IterateUserFields(ObjectPointerVisitor* visitor) {
366 visitor->set_gc_root_type("user global");
367 Thread* thread = Thread::Current();
368 // Scope to prevent handles create here from appearing as stack references.
369 HANDLESCOPE(thread);
370 Zone* zone = thread->zone();
371 const GrowableObjectArray& libraries = GrowableObjectArray::Handle(
372 zone, thread->isolate_group()->object_store()->libraries());
373 Library& library = Library::Handle(zone);
374 Object& entry = Object::Handle(zone);
375 Class& cls = Class::Handle(zone);
376 Array& fields = Array::Handle(zone);
377 Field& field = Field::Handle(zone);
378 for (intptr_t i = 0; i < libraries.Length(); i++) {
379 library ^= libraries.At(i);
380 DictionaryIterator entries(library);
381 while (entries.HasNext()) {
382 entry = entries.GetNext();
383 if (entry.IsClass()) {
384 cls ^= entry.ptr();
385 fields = cls.fields();
386 for (intptr_t j = 0; j < fields.Length(); j++) {
387 field ^= fields.At(j);
388 ObjectPtr ptr = field.ptr();
389 visitor->VisitPointer(&ptr);
390 }
391 } else if (entry.IsField()) {
392 field ^= entry.ptr();
393 ObjectPtr ptr = field.ptr();
394 visitor->VisitPointer(&ptr);
395 }
396 }
397 }
398 visitor->clear_gc_root_type();
399}
400
401ObjectGraph::ObjectGraph(Thread* thread) : ThreadStackResource(thread) {
402 // The VM isolate has all its objects pre-marked, so iterating over it
403 // would be a no-op.
404 ASSERT(thread->isolate() != Dart::vm_isolate());
405}
406
407ObjectGraph::~ObjectGraph() {}
408
409void ObjectGraph::IterateObjects(ObjectGraph::Visitor* visitor) {
410 Stack stack(isolate_group());
411 stack.set_visit_weak_persistent_handles(
412 visitor->visit_weak_persistent_handles());
413 isolate_group()->VisitObjectPointers(&stack,
414 ValidationPolicy::kDontValidateFrames);
415 stack.TraverseGraph(visitor);
416}
417
418void ObjectGraph::IterateUserObjects(ObjectGraph::Visitor* visitor) {
419 Stack stack(isolate_group());
420 stack.set_visit_weak_persistent_handles(
421 visitor->visit_weak_persistent_handles());
422 IterateUserFields(&stack);
423 stack.include_vm_objects_ = false;
424 stack.TraverseGraph(visitor);
425}
426
427void ObjectGraph::IterateObjectsFrom(const Object& root,
428 ObjectGraph::Visitor* visitor) {
429 Stack stack(isolate_group());
430 stack.set_visit_weak_persistent_handles(
431 visitor->visit_weak_persistent_handles());
432 ObjectPtr root_raw = root.ptr();
433 stack.VisitPointer(&root_raw);
434 stack.TraverseGraph(visitor);
435}
436
437class InstanceAccumulator : public ObjectVisitor {
438 public:
439 InstanceAccumulator(ObjectGraph::Stack* stack, intptr_t class_id)
440 : stack_(stack), class_id_(class_id) {}
441
442 void VisitObject(ObjectPtr obj) override {
443 if (obj->GetClassId() == class_id_) {
444 ObjectPtr rawobj = obj;
445 stack_->VisitPointer(&rawobj);
446 }
447 }
448
449 private:
450 ObjectGraph::Stack* stack_;
451 const intptr_t class_id_;
452
453 DISALLOW_COPY_AND_ASSIGN(InstanceAccumulator);
454};
455
456void ObjectGraph::IterateObjectsFrom(intptr_t class_id,
457 HeapIterationScope* iteration,
458 ObjectGraph::Visitor* visitor) {
459 Stack stack(isolate_group());
460
461 InstanceAccumulator accumulator(&stack, class_id);
462 iteration->IterateObjectsNoImagePages(&accumulator);
463
464 stack.TraverseGraph(visitor);
465}
466
467class SizeVisitor : public ObjectGraph::Visitor {
468 public:
469 SizeVisitor() : size_(0) {}
470 intptr_t size() const { return size_; }
471 virtual bool ShouldSkip(ObjectPtr obj) const { return false; }
472 virtual Direction VisitObject(ObjectGraph::StackIterator* it) {
473 ObjectPtr obj = it->Get();
474 if (ShouldSkip(obj)) {
475 return kBacktrack;
476 }
477 size_ += obj->untag()->HeapSize();
478 return kProceed;
479 }
480
481 private:
482 intptr_t size_;
483};
484
485class SizeExcludingObjectVisitor : public SizeVisitor {
486 public:
487 explicit SizeExcludingObjectVisitor(const Object& skip) : skip_(skip) {}
488 virtual bool ShouldSkip(ObjectPtr obj) const { return obj == skip_.ptr(); }
489
490 private:
491 const Object& skip_;
492};
493
494class SizeExcludingClassVisitor : public SizeVisitor {
495 public:
496 explicit SizeExcludingClassVisitor(intptr_t skip) : skip_(skip) {}
497 virtual bool ShouldSkip(ObjectPtr obj) const {
498 return obj->GetClassId() == skip_;
499 }
500
501 private:
502 const intptr_t skip_;
503};
504
505intptr_t ObjectGraph::SizeRetainedByInstance(const Object& obj) {
506 HeapIterationScope iteration_scope(Thread::Current(), true);
507 SizeVisitor total;
508 IterateObjects(&total);
509 intptr_t size_total = total.size();
510 SizeExcludingObjectVisitor excluding_obj(obj);
511 IterateObjects(&excluding_obj);
512 intptr_t size_excluding_obj = excluding_obj.size();
513 return size_total - size_excluding_obj;
514}
515
516intptr_t ObjectGraph::SizeReachableByInstance(const Object& obj) {
517 HeapIterationScope iteration_scope(Thread::Current(), true);
518 SizeVisitor total;
519 IterateObjectsFrom(obj, &total);
520 return total.size();
521}
522
523intptr_t ObjectGraph::SizeRetainedByClass(intptr_t class_id) {
524 HeapIterationScope iteration_scope(Thread::Current(), true);
525 SizeVisitor total;
526 IterateObjects(&total);
527 intptr_t size_total = total.size();
528 SizeExcludingClassVisitor excluding_class(class_id);
529 IterateObjects(&excluding_class);
530 intptr_t size_excluding_class = excluding_class.size();
531 return size_total - size_excluding_class;
532}
533
534intptr_t ObjectGraph::SizeReachableByClass(intptr_t class_id) {
535 HeapIterationScope iteration_scope(Thread::Current(), true);
536 SizeVisitor total;
537 IterateObjectsFrom(class_id, &iteration_scope, &total);
538 return total.size();
539}
540
541class RetainingPathVisitor : public ObjectGraph::Visitor {
542 public:
543 // We cannot use a GrowableObjectArray, since we must not trigger GC.
544 RetainingPathVisitor(ObjectPtr obj, const Array& path)
545 : thread_(Thread::Current()), obj_(obj), path_(path), length_(0) {}
546
547 intptr_t length() const { return length_; }
548 virtual bool visit_weak_persistent_handles() const { return true; }
549
550 bool ShouldSkip(ObjectPtr obj) {
551 // A retaining path through ICData is never the only retaining path,
552 // and it is less informative than its alternatives.
553 intptr_t cid = obj->GetClassId();
554 switch (cid) {
555 case kICDataCid:
556 return true;
557 default:
558 return false;
559 }
560 }
561
562 bool ShouldStop(ObjectPtr obj) {
563 // A static field is considered a root from a language point of view.
564 if (obj->IsField()) {
565 const Field& field = Field::Handle(static_cast<FieldPtr>(obj));
566 return field.is_static();
567 }
568 return false;
569 }
570
571 void StartList() { was_last_array_ = false; }
572
573 intptr_t HideNDescendant(ObjectPtr obj) {
574 // A GrowableObjectArray overwrites its internal storage.
575 // Keeping both of them in the list is redundant.
576 if (was_last_array_ && obj->IsGrowableObjectArray()) {
577 was_last_array_ = false;
578 return 1;
579 }
580 // A LinkedHasMap overwrites its internal storage.
581 // Keeping both of them in the list is redundant.
582 if (was_last_array_ && obj->IsMap()) {
583 was_last_array_ = false;
584 return 1;
585 }
586 was_last_array_ = obj->IsArray();
587 return 0;
588 }
589
590 virtual Direction VisitObject(ObjectGraph::StackIterator* it) {
591 if (it->Get() != obj_) {
592 if (ShouldSkip(it->Get())) {
593 return kBacktrack;
594 } else {
595 return kProceed;
596 }
597 } else {
598 HANDLESCOPE(thread_);
599 Object& current = Object::Handle();
600 Smi& offset_from_parent = Smi::Handle();
601 StartList();
602 do {
603 // We collapse the backingstore of some internal objects.
604 length_ -= HideNDescendant(it->Get());
605 intptr_t obj_index = length_ * 2;
606 intptr_t offset_index = obj_index + 1;
607 if (!path_.IsNull() && offset_index < path_.Length()) {
608 current = it->Get();
609 path_.SetAt(obj_index, current);
610 offset_from_parent = Smi::New(it->OffsetFromParent());
611 path_.SetAt(offset_index, offset_from_parent);
612 }
613 ++length_;
614 } while (!ShouldStop(it->Get()) && it->MoveToParent());
615 return kAbort;
616 }
617 }
618
619 private:
620 Thread* thread_;
621 ObjectPtr obj_;
622 const Array& path_;
623 intptr_t length_;
624 bool was_last_array_;
625};
626
627ObjectGraph::RetainingPathResult ObjectGraph::RetainingPath(Object* obj,
628 const Array& path) {
629 HeapIterationScope iteration_scope(Thread::Current(), true);
630 // To break the trivial path, the handle 'obj' is temporarily cleared during
631 // the search, but restored before returning.
632 ObjectPtr raw = obj->ptr();
633 *obj = Object::null();
634 RetainingPathVisitor visitor(raw, path);
635 IterateUserObjects(&visitor);
636 if (visitor.length() == 0) {
637 IterateObjects(&visitor);
638 }
639 *obj = raw;
640 return {visitor.length(), visitor.gc_root_type};
641}
642
643class InboundReferencesVisitor : public ObjectVisitor,
644 public ObjectPointerVisitor {
645 public:
646 // We cannot use a GrowableObjectArray, since we must not trigger GC.
647 InboundReferencesVisitor(Isolate* isolate,
648 ObjectPtr target,
649 const Array& references,
650 Object* scratch)
651 : ObjectPointerVisitor(isolate->group()),
652 source_(nullptr),
653 target_(target),
654 references_(references),
655 scratch_(scratch),
656 length_(0) {
657 ASSERT(Thread::Current()->no_safepoint_scope_depth() != 0);
658 }
659
660 bool trace_values_through_fields() const override { return true; }
661
662 intptr_t length() const { return length_; }
663
664 void VisitObject(ObjectPtr raw_obj) override {
665 source_ = raw_obj;
666 raw_obj->untag()->VisitPointers(this);
667 }
668
669 void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
670 for (ObjectPtr* current_ptr = first; current_ptr <= last; current_ptr++) {
671 ObjectPtr current_obj = *current_ptr;
672 if (current_obj == target_) {
673 intptr_t obj_index = length_ * 2;
674 intptr_t offset_index = obj_index + 1;
675 if (!references_.IsNull() && offset_index < references_.Length()) {
676 *scratch_ = source_;
677 references_.SetAt(obj_index, *scratch_);
678
679 *scratch_ = Smi::New(0);
680 uword source_start = UntaggedObject::ToAddr(source_);
681 uword current_ptr_addr = reinterpret_cast<uword>(current_ptr);
682 intptr_t offset = current_ptr_addr - source_start;
683 if (offset > 0 && offset < source_->untag()->HeapSize()) {
684 *scratch_ = Smi::New(offset);
685 } else {
686 // Some internal VM objects visit pointers not contained within the
687 // parent. For instance, UntaggedCode::VisitCodePointers visits
688 // pointers in instructions.
689 ASSERT(!source_->IsDartInstance());
690 *scratch_ = Smi::New(-1);
691 }
692 references_.SetAt(offset_index, *scratch_);
693 }
694 ++length_;
695 }
696 }
697 }
698
699#if defined(DART_COMPRESSED_POINTERS)
700 void VisitCompressedPointers(uword heap_base,
701 CompressedObjectPtr* first,
702 CompressedObjectPtr* last) override {
703 for (CompressedObjectPtr* current_ptr = first; current_ptr <= last;
704 current_ptr++) {
705 ObjectPtr current_obj = current_ptr->Decompress(heap_base);
706 if (current_obj == target_) {
707 intptr_t obj_index = length_ * 2;
708 intptr_t offset_index = obj_index + 1;
709 if (!references_.IsNull() && offset_index < references_.Length()) {
710 *scratch_ = source_;
711 references_.SetAt(obj_index, *scratch_);
712
713 *scratch_ = Smi::New(0);
714 uword source_start = UntaggedObject::ToAddr(source_);
715 uword current_ptr_addr = reinterpret_cast<uword>(current_ptr);
716 intptr_t offset = current_ptr_addr - source_start;
717 if (offset > 0 && offset < source_->untag()->HeapSize()) {
718 *scratch_ = Smi::New(offset);
719 } else {
720 // Some internal VM objects visit pointers not contained within the
721 // parent. For instance, UntaggedCode::VisitCodePointers visits
722 // pointers in instructions.
723 ASSERT(!source_->IsDartInstance());
724 *scratch_ = Smi::New(-1);
725 }
726 references_.SetAt(offset_index, *scratch_);
727 }
728 ++length_;
729 }
730 }
731 }
732#endif
733
734 private:
735 ObjectPtr source_;
736 ObjectPtr target_;
737 const Array& references_;
738 Object* scratch_;
739 intptr_t length_;
740};
741
742intptr_t ObjectGraph::InboundReferences(Object* obj, const Array& references) {
743 Object& scratch = Object::Handle();
744 HeapIterationScope iteration(Thread::Current());
745 NoSafepointScope no_safepoint;
746 InboundReferencesVisitor visitor(isolate(), obj->ptr(), references, &scratch);
747 iteration.IterateObjects(&visitor);
748 return visitor.length();
749}
750
751// Each Page is divided into blocks of size kBlockSize. Each object belongs
752// to the block containing its header word.
753// When generating a heap snapshot, we assign objects sequential ids in heap
754// iteration order. A bitvector is computed that indicates the number of objects
755// in each block, so the id of any object in the block can be found be adding
756// the number of bits set before the object to the block's first id.
757// Compare ForwardingBlock used for heap compaction.
758class CountingBlock {
759 public:
760 void Clear() {
761 base_count_ = 0;
762 count_bitvector_ = 0;
763 }
764
765 intptr_t Lookup(uword addr) const {
766 uword block_offset = addr & ~kBlockMask;
767 intptr_t bitvector_shift = block_offset >> kObjectAlignmentLog2;
768 ASSERT(bitvector_shift < kBitsPerWord);
769 uword preceding_bitmask = (static_cast<uword>(1) << bitvector_shift) - 1;
770 return base_count_ +
771 Utils::CountOneBitsWord(count_bitvector_ & preceding_bitmask);
772 }
773
774 void Record(uword old_addr, intptr_t id) {
775 if (base_count_ == 0) {
776 ASSERT(count_bitvector_ == 0);
777 base_count_ = id; // First object in the block.
778 }
779
780 uword block_offset = old_addr & ~kBlockMask;
781 intptr_t bitvector_shift = block_offset >> kObjectAlignmentLog2;
782 ASSERT(bitvector_shift < kBitsPerWord);
783 count_bitvector_ |= static_cast<uword>(1) << bitvector_shift;
784 }
785
786 private:
787 intptr_t base_count_;
788 uword count_bitvector_;
789 COMPILE_ASSERT(kBitVectorWordsPerBlock == 1);
790
791 DISALLOW_COPY_AND_ASSIGN(CountingBlock);
792};
793
794class CountingPage {
795 public:
796 void Clear() {
797 for (intptr_t i = 0; i < kBlocksPerPage; i++) {
798 blocks_[i].Clear();
799 }
800 }
801
802 intptr_t Lookup(uword addr) { return BlockFor(addr)->Lookup(addr); }
803 void Record(uword addr, intptr_t id) {
804 return BlockFor(addr)->Record(addr, id);
805 }
806
807 CountingBlock* BlockFor(uword addr) {
808 intptr_t page_offset = addr & ~kPageMask;
809 intptr_t block_number = page_offset / kBlockSize;
810 ASSERT(block_number >= 0);
811 ASSERT(block_number <= kBlocksPerPage);
812 return &blocks_[block_number];
813 }
814
815 private:
816 CountingBlock blocks_[kBlocksPerPage];
817
819 DISALLOW_IMPLICIT_CONSTRUCTORS(CountingPage);
820};
821
822void HeapSnapshotWriter::EnsureAvailable(intptr_t needed) {
823 intptr_t available = capacity_ - size_;
824 if (available >= needed) {
825 return;
826 }
827
828 if (buffer_ != nullptr) {
829 Flush();
830 }
831 ASSERT(buffer_ == nullptr);
832
833 intptr_t chunk_size = kPreferredChunkSize;
834 const intptr_t reserved_prefix = writer_->ReserveChunkPrefixSize();
835 if (chunk_size < (reserved_prefix + needed)) {
836 chunk_size = reserved_prefix + needed;
837 }
838 buffer_ = reinterpret_cast<uint8_t*>(malloc(chunk_size));
839 size_ = reserved_prefix;
840 capacity_ = chunk_size;
841}
842
843void HeapSnapshotWriter::Flush(bool last) {
844 if (size_ == 0 && !last) {
845 return;
846 }
847
848 writer_->WriteChunk(buffer_, size_, last);
849
850 buffer_ = nullptr;
851 size_ = 0;
852 capacity_ = 0;
853}
854
855void HeapSnapshotWriter::SetupImagePageBoundaries() {
856 MallocGrowableArray<ImagePageRange> ranges(4);
857
858 Page* image_page =
859 Dart::vm_isolate_group()->heap()->old_space()->image_pages_;
860 while (image_page != nullptr) {
861 ImagePageRange range = {image_page->object_start(),
862 image_page->object_end()};
863 ranges.Add(range);
864 image_page = image_page->next();
865 }
866 image_page = isolate_group()->heap()->old_space()->image_pages_;
867 while (image_page != nullptr) {
868 ImagePageRange range = {image_page->object_start(),
869 image_page->object_end()};
870 ranges.Add(range);
871 image_page = image_page->next();
872 }
873
874 ranges.Sort(CompareImagePageRanges);
875 intptr_t image_page_count;
876 ranges.StealBuffer(&image_page_ranges_, &image_page_count);
877 image_page_hi_ = image_page_count - 1;
878}
879
880void HeapSnapshotWriter::SetupCountingPages() {
881 Page* page = isolate_group()->heap()->old_space()->pages_;
882 while (page != nullptr) {
883 CountingPage* counting_page =
884 reinterpret_cast<CountingPage*>(page->forwarding_page());
885 ASSERT(counting_page != nullptr);
886 counting_page->Clear();
887 page = page->next();
888 }
889}
890
891bool HeapSnapshotWriter::OnImagePage(ObjectPtr obj) const {
892 const uword addr = UntaggedObject::ToAddr(obj);
893 intptr_t lo = 0;
894 intptr_t hi = image_page_hi_;
895 while (lo <= hi) {
896 intptr_t mid = (hi - lo + 1) / 2 + lo;
897 ASSERT(mid >= lo);
898 ASSERT(mid <= hi);
899 if (addr < image_page_ranges_[mid].start) {
900 hi = mid - 1;
901 } else if (addr >= image_page_ranges_[mid].end) {
902 lo = mid + 1;
903 } else {
904 return true;
905 }
906 }
907 return false;
908}
909
910CountingPage* HeapSnapshotWriter::FindCountingPage(ObjectPtr obj) const {
911 if (obj->IsOldObject() && !OnImagePage(obj)) {
912 // On a regular or large page.
913 Page* page = Page::Of(obj);
914 return reinterpret_cast<CountingPage*>(page->forwarding_page());
915 }
916
917 // On an image page or in new space.
918 return nullptr;
919}
920
921void HeapSnapshotWriter::AssignObjectId(ObjectPtr obj) {
922 if (!obj->IsHeapObject()) {
923 thread()->heap()->SetObjectId(obj, ++object_count_);
924 return;
925 }
926
927 CountingPage* counting_page = FindCountingPage(obj);
928 if (counting_page != nullptr) {
929 // Likely: object on an ordinary page.
930 counting_page->Record(UntaggedObject::ToAddr(obj), ++object_count_);
931 } else {
932 // Unlikely: new space object, or object on a large or image page.
933 thread()->heap()->SetObjectId(obj, ++object_count_);
934 }
935}
936
937intptr_t HeapSnapshotWriter::GetObjectId(ObjectPtr obj) const {
938 if (!obj->IsHeapObject()) {
939 intptr_t id = thread()->heap()->GetObjectId(obj);
940 ASSERT(id != 0);
941 return id;
942 }
943
944 CountingPage* counting_page = FindCountingPage(obj);
945 intptr_t id;
946 if (counting_page != nullptr) {
947 // Likely: object on an ordinary page.
948 id = counting_page->Lookup(UntaggedObject::ToAddr(obj));
949 } else {
950 // Unlikely: new space object, or object on a large or image page.
951 id = thread()->heap()->GetObjectId(obj);
952 }
953 ASSERT(id != 0);
954 return id;
955}
956
957void HeapSnapshotWriter::ClearObjectIds() {
958 thread()->heap()->ResetObjectIdTable();
959}
960
961void HeapSnapshotWriter::CountReferences(intptr_t count) {
962 reference_count_ += count;
963}
964
965void HeapSnapshotWriter::CountExternalProperty() {
966 external_property_count_ += 1;
967}
968
969void HeapSnapshotWriter::AddSmi(SmiPtr smi) {
970 if (thread()->heap()->GetObjectId(smi) == WeakTable::kNoValue) {
971 thread()->heap()->SetObjectId(smi, -1);
972 smis_.Add(smi);
973 }
974}
975
976class Pass1Visitor : public ObjectVisitor,
977 public ObjectPointerVisitor,
978 public HandleVisitor {
979 public:
980 explicit Pass1Visitor(HeapSnapshotWriter* writer, ObjectSlots* object_slots)
981 : ObjectVisitor(),
982 ObjectPointerVisitor(IsolateGroup::Current()),
983 HandleVisitor(Thread::Current()),
984 writer_(writer),
985 object_slots_(object_slots) {}
986
987 void VisitObject(ObjectPtr obj) override {
988 if (obj->IsPseudoObject()) return;
989
990 writer_->AssignObjectId(obj);
991 const auto cid = obj->GetClassId();
992
993 if (object_slots_->ContainsOnlyTaggedPointers(cid)) {
994 obj->untag()->VisitPointersPrecise(this);
995 } else {
996 for (auto& slot : *object_slots_->ObjectSlotsFor(cid)) {
997 if (slot.is_compressed_pointer) {
998 auto target = reinterpret_cast<CompressedObjectPtr*>(
999 UntaggedObject::ToAddr(obj->untag()) + slot.offset);
1000 VisitCompressedPointers(obj->heap_base(), target, target);
1001 } else {
1002 writer_->CountReferences(1);
1003 }
1004 }
1005 }
1006 }
1007
1008 void VisitPointers(ObjectPtr* from, ObjectPtr* to) override {
1009 for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
1010 ObjectPtr obj = *ptr;
1011 if (!obj->IsHeapObject()) {
1012 writer_->AddSmi(static_cast<SmiPtr>(obj));
1013 }
1014 writer_->CountReferences(1);
1015 }
1016 }
1017
1018#if defined(DART_COMPRESSED_POINTERS)
1019 void VisitCompressedPointers(uword heap_base,
1020 CompressedObjectPtr* from,
1021 CompressedObjectPtr* to) override {
1022 for (CompressedObjectPtr* ptr = from; ptr <= to; ptr++) {
1023 ObjectPtr obj = ptr->Decompress(heap_base);
1024 if (!obj->IsHeapObject()) {
1025 writer_->AddSmi(static_cast<SmiPtr>(obj));
1026 }
1027 writer_->CountReferences(1);
1028 }
1029 }
1030#endif
1031
1032 void VisitHandle(uword addr) override {
1033 FinalizablePersistentHandle* weak_persistent_handle =
1034 reinterpret_cast<FinalizablePersistentHandle*>(addr);
1035 if (!weak_persistent_handle->ptr()->IsHeapObject()) {
1036 return; // Free handle.
1037 }
1038
1039 writer_->CountExternalProperty();
1040 }
1041
1042 private:
1043 HeapSnapshotWriter* const writer_;
1044 ObjectSlots* object_slots_;
1045
1046 DISALLOW_COPY_AND_ASSIGN(Pass1Visitor);
1047};
1048
1049class CountImagePageRefs : public ObjectVisitor {
1050 public:
1051 CountImagePageRefs() : ObjectVisitor() {}
1052
1053 void VisitObject(ObjectPtr obj) override {
1054 if (obj->IsPseudoObject()) return;
1055 count_++;
1056 }
1057 intptr_t count() const { return count_; }
1058
1059 private:
1060 intptr_t count_ = 0;
1061
1062 DISALLOW_COPY_AND_ASSIGN(CountImagePageRefs);
1063};
1064
1065class WriteImagePageRefs : public ObjectVisitor {
1066 public:
1067 explicit WriteImagePageRefs(HeapSnapshotWriter* writer)
1068 : ObjectVisitor(), writer_(writer) {}
1069
1070 void VisitObject(ObjectPtr obj) override {
1071 if (obj->IsPseudoObject()) return;
1072#if defined(DEBUG)
1073 count_++;
1074#endif
1075 writer_->WriteUnsigned(writer_->GetObjectId(obj));
1076 }
1077#if defined(DEBUG)
1078 intptr_t count() const { return count_; }
1079#endif
1080
1081 private:
1082 HeapSnapshotWriter* const writer_;
1083#if defined(DEBUG)
1084 intptr_t count_ = 0;
1085#endif
1086
1087 DISALLOW_COPY_AND_ASSIGN(WriteImagePageRefs);
1088};
1089
1090enum NonReferenceDataTags {
1091 kNoData = 0,
1092 kNullData,
1093 kBoolData,
1094 kIntData,
1095 kDoubleData,
1096 kLatin1Data,
1097 kUTF16Data,
1098 kLengthData,
1099 kNameData,
1100};
1101
1102static constexpr intptr_t kMaxStringElements = 128;
1103
1104enum ExtraCids {
1105 kRootExtraCid = 1, // 1-origin
1106 kImagePageExtraCid = 2,
1107 kIsolateExtraCid = 3,
1108
1109 kNumExtraCids = 3,
1110};
1111
1112class Pass2Visitor : public ObjectVisitor,
1113 public ObjectPointerVisitor,
1114 public HandleVisitor {
1115 public:
1116 explicit Pass2Visitor(HeapSnapshotWriter* writer, ObjectSlots* object_slots)
1117 : ObjectVisitor(),
1118 ObjectPointerVisitor(IsolateGroup::Current()),
1119 HandleVisitor(Thread::Current()),
1120 writer_(writer),
1121 object_slots_(object_slots) {}
1122
1123 void VisitObject(ObjectPtr obj) override {
1124 if (obj->IsPseudoObject()) return;
1125
1126 intptr_t cid = obj->GetClassId();
1127 writer_->WriteUnsigned(cid + kNumExtraCids);
1128 writer_->WriteUnsigned(discount_sizes_ ? 0 : obj->untag()->HeapSize());
1129
1130 if (cid == kNullCid) {
1131 writer_->WriteUnsigned(kNullData);
1132 } else if (cid == kBoolCid) {
1133 writer_->WriteUnsigned(kBoolData);
1134 writer_->WriteUnsigned(
1135 static_cast<uintptr_t>(static_cast<BoolPtr>(obj)->untag()->value_));
1136 } else if (cid == kSentinelCid) {
1137 if (obj == Object::sentinel().ptr()) {
1138 writer_->WriteUnsigned(kNameData);
1139 writer_->WriteUtf8("uninitialized");
1140 } else if (obj == Object::transition_sentinel().ptr()) {
1141 writer_->WriteUnsigned(kNameData);
1142 writer_->WriteUtf8("initializing");
1143 } else {
1144 writer_->WriteUnsigned(kNoData);
1145 }
1146 } else if (cid == kSmiCid) {
1147 UNREACHABLE();
1148 } else if (cid == kMintCid) {
1149 writer_->WriteUnsigned(kIntData);
1150 writer_->WriteSigned(static_cast<MintPtr>(obj)->untag()->value_);
1151 } else if (cid == kDoubleCid) {
1152 writer_->WriteUnsigned(kDoubleData);
1153 writer_->WriteBytes(&(static_cast<DoublePtr>(obj)->untag()->value_),
1154 sizeof(double));
1155 } else if (cid == kOneByteStringCid) {
1156 OneByteStringPtr str = static_cast<OneByteStringPtr>(obj);
1157 intptr_t len = Smi::Value(str->untag()->length());
1158 intptr_t trunc_len = Utils::Minimum(len, kMaxStringElements);
1159 writer_->WriteUnsigned(kLatin1Data);
1160 writer_->WriteUnsigned(len);
1161 writer_->WriteUnsigned(trunc_len);
1162 writer_->WriteBytes(&str->untag()->data()[0], trunc_len);
1163 } else if (cid == kTwoByteStringCid) {
1164 TwoByteStringPtr str = static_cast<TwoByteStringPtr>(obj);
1165 intptr_t len = Smi::Value(str->untag()->length());
1166 intptr_t trunc_len = Utils::Minimum(len, kMaxStringElements);
1167 writer_->WriteUnsigned(kUTF16Data);
1168 writer_->WriteUnsigned(len);
1169 writer_->WriteUnsigned(trunc_len);
1170 writer_->WriteBytes(&str->untag()->data()[0], trunc_len * 2);
1171 } else if (cid == kArrayCid || cid == kImmutableArrayCid) {
1172 writer_->WriteUnsigned(kLengthData);
1173 writer_->WriteUnsigned(
1174 Smi::Value(static_cast<ArrayPtr>(obj)->untag()->length()));
1175 } else if (cid == kGrowableObjectArrayCid) {
1176 writer_->WriteUnsigned(kLengthData);
1177 writer_->WriteUnsigned(Smi::Value(
1178 static_cast<GrowableObjectArrayPtr>(obj)->untag()->length()));
1179 } else if (cid == kMapCid || cid == kConstMapCid) {
1180 writer_->WriteUnsigned(kLengthData);
1181 writer_->WriteUnsigned(
1182 Smi::Value(static_cast<MapPtr>(obj)->untag()->used_data()));
1183 } else if (cid == kSetCid || cid == kConstSetCid) {
1184 writer_->WriteUnsigned(kLengthData);
1185 writer_->WriteUnsigned(
1186 Smi::Value(static_cast<SetPtr>(obj)->untag()->used_data()));
1187 } else if (cid == kObjectPoolCid) {
1188 writer_->WriteUnsigned(kLengthData);
1189 writer_->WriteUnsigned(static_cast<ObjectPoolPtr>(obj)->untag()->length_);
1190 } else if (IsTypedDataClassId(cid)) {
1191 writer_->WriteUnsigned(kLengthData);
1192 writer_->WriteUnsigned(
1193 Smi::Value(static_cast<TypedDataPtr>(obj)->untag()->length()));
1194 } else if (IsExternalTypedDataClassId(cid)) {
1195 writer_->WriteUnsigned(kLengthData);
1196 writer_->WriteUnsigned(Smi::Value(
1197 static_cast<ExternalTypedDataPtr>(obj)->untag()->length()));
1198 } else if (cid == kFunctionCid) {
1199 writer_->WriteUnsigned(kNameData);
1200 ScrubAndWriteUtf8(static_cast<FunctionPtr>(obj)->untag()->name());
1201 } else if (cid == kCodeCid) {
1202 ObjectPtr owner = static_cast<CodePtr>(obj)->untag()->owner_;
1203 if (!owner->IsHeapObject()) {
1204 // Precompiler removed owner object from the snapshot,
1205 // only leaving Smi classId.
1206 writer_->WriteUnsigned(kNoData);
1207 } else if (owner->IsFunction()) {
1208 writer_->WriteUnsigned(kNameData);
1209 ScrubAndWriteUtf8(static_cast<FunctionPtr>(owner)->untag()->name());
1210 } else if (owner->IsClass()) {
1211 writer_->WriteUnsigned(kNameData);
1212 ScrubAndWriteUtf8(static_cast<ClassPtr>(owner)->untag()->name());
1213 } else {
1214 writer_->WriteUnsigned(kNoData);
1215 }
1216 } else if (cid == kFieldCid) {
1217 writer_->WriteUnsigned(kNameData);
1218 ScrubAndWriteUtf8(static_cast<FieldPtr>(obj)->untag()->name());
1219 } else if (cid == kClassCid) {
1220 writer_->WriteUnsigned(kNameData);
1221 ScrubAndWriteUtf8(static_cast<ClassPtr>(obj)->untag()->name());
1222 } else if (cid == kLibraryCid) {
1223 writer_->WriteUnsigned(kNameData);
1224 ScrubAndWriteUtf8(static_cast<LibraryPtr>(obj)->untag()->url());
1225 } else if (cid == kScriptCid) {
1226 writer_->WriteUnsigned(kNameData);
1227 ScrubAndWriteUtf8(static_cast<ScriptPtr>(obj)->untag()->url());
1228 } else if (cid == kTypeArgumentsCid) {
1229 // Handle scope so we do not change the root set.
1230 // We are assuming that TypeArguments::PrintSubvectorName never allocates
1231 // objects or zone handles.
1232 HANDLESCOPE(thread());
1233 const TypeArguments& args =
1234 TypeArguments::Handle(static_cast<TypeArgumentsPtr>(obj));
1235 TextBuffer buffer(128);
1236 args.PrintSubvectorName(0, args.Length(), TypeArguments::kScrubbedName,
1237 &buffer);
1238 writer_->WriteUnsigned(kNameData);
1239 writer_->WriteUtf8(buffer.buffer());
1240 } else {
1241 writer_->WriteUnsigned(kNoData);
1242 }
1243
1244 if (object_slots_->ContainsOnlyTaggedPointers(cid)) {
1245 DoCount();
1246 obj->untag()->VisitPointersPrecise(this);
1247 DoWrite();
1248 obj->untag()->VisitPointersPrecise(this);
1249 } else {
1250 auto slots = object_slots_->ObjectSlotsFor(cid);
1251 DoCount();
1252 counted_ += slots->length();
1253 DoWrite();
1254 for (auto& slot : *slots) {
1255 if (slot.is_compressed_pointer) {
1256 auto target = reinterpret_cast<CompressedObjectPtr*>(
1257 UntaggedObject::ToAddr(obj->untag()) + slot.offset);
1258 VisitCompressedPointers(obj->heap_base(), target, target);
1259 } else {
1260 writer_->WriteUnsigned(0);
1261 }
1262 written_++;
1263 total_++;
1264 }
1265 }
1266 }
1267
1268 void ScrubAndWriteUtf8(StringPtr str) {
1269 if (str == String::null()) {
1270 writer_->WriteUtf8("null");
1271 } else {
1272 String handle;
1273 handle = str;
1274 char* value = handle.ToMallocCString();
1275 writer_->ScrubAndWriteUtf8(value);
1276 free(value);
1277 }
1278 }
1279
1280 void set_discount_sizes(bool value) { discount_sizes_ = value; }
1281
1282 void DoCount() {
1283 writing_ = false;
1284 counted_ = 0;
1285 written_ = 0;
1286 }
1287 void DoWrite() {
1288 writing_ = true;
1289 writer_->WriteUnsigned(counted_);
1290 }
1291
1292 void VisitPointers(ObjectPtr* from, ObjectPtr* to) override {
1293 if (writing_) {
1294 for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
1295 ObjectPtr target = *ptr;
1296 written_++;
1297 total_++;
1298 writer_->WriteUnsigned(writer_->GetObjectId(target));
1299 }
1300 } else {
1301 intptr_t count = to - from + 1;
1302 ASSERT(count >= 0);
1303 counted_ += count;
1304 }
1305 }
1306
1307#if defined(DART_COMPRESSED_POINTERS)
1308 void VisitCompressedPointers(uword heap_base,
1309 CompressedObjectPtr* from,
1310 CompressedObjectPtr* to) override {
1311 if (writing_) {
1312 for (CompressedObjectPtr* ptr = from; ptr <= to; ptr++) {
1313 ObjectPtr target = ptr->Decompress(heap_base);
1314 written_++;
1315 total_++;
1316 writer_->WriteUnsigned(writer_->GetObjectId(target));
1317 }
1318 } else {
1319 intptr_t count = to - from + 1;
1320 ASSERT(count >= 0);
1321 counted_ += count;
1322 }
1323 }
1324#endif
1325
1326 void VisitHandle(uword addr) override {
1327 FinalizablePersistentHandle* weak_persistent_handle =
1328 reinterpret_cast<FinalizablePersistentHandle*>(addr);
1329 if (!weak_persistent_handle->ptr()->IsHeapObject()) {
1330 return; // Free handle.
1331 }
1332
1333 writer_->WriteUnsigned(writer_->GetObjectId(weak_persistent_handle->ptr()));
1334 writer_->WriteUnsigned(weak_persistent_handle->external_size());
1335 // Attempt to include a native symbol name.
1336 auto const name = NativeSymbolResolver::LookupSymbolName(
1337 reinterpret_cast<uword>(weak_persistent_handle->callback()), nullptr);
1338 writer_->WriteUtf8((name == nullptr) ? "Unknown native function" : name);
1339 if (name != nullptr) {
1340 NativeSymbolResolver::FreeSymbolName(name);
1341 }
1342 }
1343
1344 void CountExtraRefs(intptr_t count) {
1345 ASSERT(!writing_);
1346 counted_ += count;
1347 }
1348 void WriteExtraRef(intptr_t oid) {
1349 ASSERT(writing_);
1350 written_++;
1351 writer_->WriteUnsigned(oid);
1352 }
1353
1354 private:
1355 IsolateGroup* isolate_group_;
1356 HeapSnapshotWriter* const writer_;
1357 ObjectSlots* object_slots_;
1358 bool writing_ = false;
1359 intptr_t counted_ = 0;
1360 intptr_t written_ = 0;
1361 intptr_t total_ = 0;
1362 bool discount_sizes_ = false;
1363
1364 DISALLOW_COPY_AND_ASSIGN(Pass2Visitor);
1365};
1366
1367class Pass3Visitor : public ObjectVisitor {
1368 public:
1369 explicit Pass3Visitor(HeapSnapshotWriter* writer)
1370 : ObjectVisitor(), thread_(Thread::Current()), writer_(writer) {}
1371
1372 void VisitObject(ObjectPtr obj) override {
1373 if (obj->IsPseudoObject()) {
1374 return;
1375 }
1376 writer_->WriteUnsigned(
1377 HeapSnapshotWriter::GetHeapSnapshotIdentityHash(thread_, obj));
1378 }
1379
1380 private:
1381 Thread* thread_;
1382 HeapSnapshotWriter* const writer_;
1383
1384 DISALLOW_COPY_AND_ASSIGN(Pass3Visitor);
1385};
1386
1387class CollectStaticFieldNames : public ObjectVisitor {
1388 public:
1389 CollectStaticFieldNames(intptr_t field_table_size,
1390 const char** field_table_names)
1391 : ObjectVisitor(),
1392 field_table_size_(field_table_size),
1393 field_table_names_(field_table_names),
1394 field_(Field::Handle()) {}
1395
1396 void VisitObject(ObjectPtr obj) override {
1397 if (obj->IsField()) {
1398 field_ ^= obj;
1399 if (field_.is_static()) {
1400 intptr_t id = field_.field_id();
1401 if (id > 0) {
1402 ASSERT(id < field_table_size_);
1403 field_table_names_[id] = field_.UserVisibleNameCString();
1404 }
1405 }
1406 }
1407 }
1408
1409 private:
1410 intptr_t field_table_size_;
1411 const char** field_table_names_;
1412 Field& field_;
1413
1414 DISALLOW_COPY_AND_ASSIGN(CollectStaticFieldNames);
1415};
1416
1417void VmServiceHeapSnapshotChunkedWriter::WriteChunk(uint8_t* buffer,
1418 intptr_t size,
1419 bool last) {
1420 JSONStream js;
1421 {
1422 JSONObject jsobj(&js);
1423 jsobj.AddProperty("jsonrpc", "2.0");
1424 jsobj.AddProperty("method", "streamNotify");
1425 {
1426 JSONObject params(&jsobj, "params");
1427 params.AddProperty("streamId", Service::heapsnapshot_stream.id());
1428 {
1429 JSONObject event(&params, "event");
1430 event.AddProperty("type", "Event");
1431 event.AddProperty("kind", "HeapSnapshot");
1432 event.AddProperty("isolate", thread()->isolate());
1433 event.AddPropertyTimeMillis("timestamp", OS::GetCurrentTimeMillis());
1434 event.AddProperty("last", last);
1435 }
1436 }
1437 }
1438
1439 Service::SendEventWithData(Service::heapsnapshot_stream.id(), "HeapSnapshot",
1440 kMetadataReservation, js.buffer()->buffer(),
1441 js.buffer()->length(), buffer, size);
1442}
1443
1444FileHeapSnapshotWriter::FileHeapSnapshotWriter(Thread* thread,
1445 const char* filename,
1446 bool* success)
1447 : ChunkedWriter(thread), success_(success) {
1448 auto open = Dart::file_open_callback();
1449 auto write = Dart::file_write_callback();
1450 auto close = Dart::file_close_callback();
1451 if (open != nullptr && write != nullptr && close != nullptr) {
1452 file_ = open(filename, /*write=*/true);
1453 }
1454 // If we have open/write/close callbacks we assume it can be done
1455 // successfully. (Those embedder-provided callbacks currently don't allow
1456 // signaling of failure conditions)
1457 if (success_ != nullptr) *success_ = file_ != nullptr;
1458}
1459
1460FileHeapSnapshotWriter::~FileHeapSnapshotWriter() {
1461 if (file_ != nullptr) {
1462 Dart::file_close_callback()(file_);
1463 }
1464}
1465
1466void FileHeapSnapshotWriter::WriteChunk(uint8_t* buffer,
1467 intptr_t size,
1468 bool last) {
1469 if (file_ != nullptr) {
1470 Dart::file_write_callback()(buffer, size, file_);
1471 }
1472 free(buffer);
1473}
1474
1475CallbackHeapSnapshotWriter::CallbackHeapSnapshotWriter(
1476 Thread* thread,
1478 void* context)
1479 : ChunkedWriter(thread), callback_(callback), context_(context) {}
1480
1481CallbackHeapSnapshotWriter::~CallbackHeapSnapshotWriter() {}
1482
1483void CallbackHeapSnapshotWriter::WriteChunk(uint8_t* buffer,
1484 intptr_t size,
1485 bool last) {
1486 callback_(context_, buffer, size, last);
1487}
1488
1489void HeapSnapshotWriter::Write() {
1490 HeapIterationScope iteration(thread());
1491
1492 WriteBytes("dartheap", 8); // Magic value.
1493 WriteUnsigned(0); // Flags.
1494 WriteUtf8(isolate()->name());
1495 Heap* H = thread()->heap();
1496
1497 {
1498 intptr_t used = H->TotalUsedInWords() << kWordSizeLog2;
1499 intptr_t capacity = H->TotalCapacityInWords() << kWordSizeLog2;
1500 intptr_t external = H->TotalExternalInWords() << kWordSizeLog2;
1501 intptr_t image = H->old_space()->ImageInWords() << kWordSizeLog2;
1502 WriteUnsigned(used + image);
1503 WriteUnsigned(capacity + image);
1504 WriteUnsigned(external);
1505 }
1506
1507 ObjectSlots object_slots(thread());
1508
1509 {
1510 HANDLESCOPE(thread());
1511 ClassTable* class_table = isolate_group()->class_table();
1512 class_count_ = class_table->NumCids() - 1;
1513
1514 Class& cls = Class::Handle();
1515 Library& lib = Library::Handle();
1516 String& str = String::Handle();
1517
1518 intptr_t field_table_size = isolate()->field_table()->NumFieldIds();
1519 const char** field_table_names =
1520 thread()->zone()->Alloc<const char*>(field_table_size);
1521 for (intptr_t i = 0; i < field_table_size; i++) {
1522 field_table_names[i] = nullptr;
1523 }
1524 {
1525 CollectStaticFieldNames visitor(field_table_size, field_table_names);
1526 iteration.IterateObjects(&visitor);
1527 }
1528
1529 WriteUnsigned(class_count_ + kNumExtraCids);
1530 {
1531 ASSERT(kRootExtraCid == 1);
1532 WriteUnsigned(0); // Flags
1533 WriteUtf8("Root"); // Name
1534 WriteUtf8(""); // Library name
1535 WriteUtf8(""); // Library uri
1536 WriteUtf8(""); // Reserved
1537 WriteUnsigned(0); // Field count
1538 }
1539 {
1540 ASSERT(kImagePageExtraCid == 2);
1541 WriteUnsigned(0); // Flags
1542 WriteUtf8("Read-Only Pages"); // Name
1543 WriteUtf8(""); // Library name
1544 WriteUtf8(""); // Library uri
1545 WriteUtf8(""); // Reserved
1546 WriteUnsigned(0); // Field count
1547 }
1548 {
1549 ASSERT(kIsolateExtraCid == 3);
1550 WriteUnsigned(0); // Flags
1551 WriteUtf8("Isolate"); // Name
1552 WriteUtf8(""); // Library name
1553 WriteUtf8(""); // Library uri
1554 WriteUtf8(""); // Reserved
1555
1556 WriteUnsigned(field_table_size); // Field count
1557 for (intptr_t i = 0; i < field_table_size; i++) {
1558 intptr_t flags = 1; // Strong.
1559 WriteUnsigned(flags);
1560 WriteUnsigned(i); // Index.
1561 const char* name = field_table_names[i];
1562 WriteUtf8(name == nullptr ? "" : name);
1563 WriteUtf8(""); // Reserved
1564 }
1565 }
1566
1567 ASSERT(kNumExtraCids == 3);
1568 for (intptr_t cid = 1; cid <= class_count_; cid++) {
1569 if (!class_table->HasValidClassAt(cid)) {
1570 WriteUnsigned(0); // Flags
1571 WriteUtf8(""); // Name
1572 WriteUtf8(""); // Library name
1573 WriteUtf8(""); // Library uri
1574 WriteUtf8(""); // Reserved
1575 WriteUnsigned(0); // Field count
1576 } else {
1577 cls = class_table->At(cid);
1578 WriteUnsigned(0); // Flags
1579 str = cls.Name();
1580 ScrubAndWriteUtf8(const_cast<char*>(str.ToCString()));
1581 lib = cls.library();
1582 if (lib.IsNull()) {
1583 WriteUtf8("");
1584 WriteUtf8("");
1585 } else {
1586 str = lib.name();
1587 ScrubAndWriteUtf8(const_cast<char*>(str.ToCString()));
1588 str = lib.url();
1589 ScrubAndWriteUtf8(const_cast<char*>(str.ToCString()));
1590 }
1591 WriteUtf8(""); // Reserved
1592
1593 if (auto slots = object_slots.ObjectSlotsFor(cid)) {
1594 WriteUnsigned(slots->length());
1595 for (intptr_t index = 0; index < slots->length(); ++index) {
1596 const auto& slot = (*slots)[index];
1597 const intptr_t kStrongFlag = 1;
1598 WriteUnsigned(kStrongFlag);
1599 WriteUnsigned(index);
1600 ScrubAndWriteUtf8(const_cast<char*>(slot.name));
1601 WriteUtf8(""); // Reserved
1602 }
1603 } else {
1604 // May be an abstract class.
1605 ASSERT(!cls.is_finalized());
1606 WriteUnsigned(0);
1607 }
1608 }
1609 }
1610 }
1611
1612 SetupImagePageBoundaries();
1613 SetupCountingPages();
1614
1615 intptr_t num_isolates = 0;
1616 intptr_t num_image_objects = 0;
1617 {
1618 Pass1Visitor visitor(this, &object_slots);
1619
1620 // Root "objects".
1621 {
1622 ++object_count_;
1623 isolate_group()->VisitSharedPointers(&visitor);
1624 }
1625 {
1626 ++object_count_;
1627 CountImagePageRefs visitor;
1628 H->old_space()->VisitObjectsImagePages(&visitor);
1629 num_image_objects = visitor.count();
1630 CountReferences(num_image_objects);
1631 }
1632 {
1633 isolate_group()->ForEachIsolate(
1634 [&](Isolate* isolate) {
1635 ++object_count_;
1636 isolate->VisitObjectPointers(&visitor,
1637 ValidationPolicy::kDontValidateFrames);
1638 isolate->VisitStackPointers(&visitor,
1639 ValidationPolicy::kDontValidateFrames);
1640 ++num_isolates;
1641 },
1642 /*at_safepoint=*/true);
1643 }
1644 CountReferences(1); // Root -> Image Pages
1645 CountReferences(num_isolates); // Root -> Isolate
1646
1647 // Heap objects.
1648 iteration.IterateVMIsolateObjects(&visitor);
1649 iteration.IterateObjects(&visitor);
1650
1651 // External properties.
1652 isolate()->group()->VisitWeakPersistentHandles(&visitor);
1653
1654 // Smis.
1655 for (SmiPtr smi : smis_) {
1656 AssignObjectId(smi);
1657 }
1658 }
1659
1660 {
1661 Pass2Visitor visitor(this, &object_slots);
1662
1663 WriteUnsigned(reference_count_);
1664 WriteUnsigned(object_count_);
1665
1666 // Root "objects".
1667 {
1668 WriteUnsigned(kRootExtraCid);
1669 WriteUnsigned(0); // shallowSize
1670 WriteUnsigned(kNoData);
1671 visitor.DoCount();
1672 isolate_group()->VisitSharedPointers(&visitor);
1673 visitor.CountExtraRefs(num_isolates + 1);
1674 visitor.DoWrite();
1675 isolate_group()->VisitSharedPointers(&visitor);
1676 visitor.WriteExtraRef(2); // Root -> Image Pages
1677 for (intptr_t i = 0; i < num_isolates; i++) {
1678 // 0 = sentinel, 1 = root, 2 = image pages, 2+ = isolates
1679 visitor.WriteExtraRef(i + 3);
1680 }
1681 }
1682 {
1683 WriteUnsigned(kImagePageExtraCid);
1684 WriteUnsigned(0); // shallowSize
1685 WriteUnsigned(kNoData);
1686 WriteUnsigned(num_image_objects);
1687 WriteImagePageRefs visitor(this);
1688 H->old_space()->VisitObjectsImagePages(&visitor);
1689 DEBUG_ASSERT(visitor.count() == num_image_objects);
1690 }
1691 isolate_group()->ForEachIsolate(
1692 [&](Isolate* isolate) {
1693 WriteUnsigned(kIsolateExtraCid);
1694 WriteUnsigned(0); // shallowSize
1695 WriteUnsigned(kNameData);
1696 WriteUtf8(
1697 OS::SCreate(thread()->zone(), "%" Pd64, isolate->main_port()));
1698 visitor.DoCount();
1699 isolate->VisitObjectPointers(&visitor,
1700 ValidationPolicy::kDontValidateFrames);
1701 isolate->VisitStackPointers(&visitor,
1702 ValidationPolicy::kDontValidateFrames);
1703 visitor.DoWrite();
1704 isolate->VisitObjectPointers(&visitor,
1705 ValidationPolicy::kDontValidateFrames);
1706 isolate->VisitStackPointers(&visitor,
1707 ValidationPolicy::kDontValidateFrames);
1708 },
1709 /*at_safepoint=*/true);
1710
1711 // Heap objects.
1712 visitor.set_discount_sizes(true);
1713 iteration.IterateVMIsolateObjects(&visitor);
1714 visitor.set_discount_sizes(false);
1715 iteration.IterateObjects(&visitor);
1716
1717 // Smis.
1718 for (SmiPtr smi : smis_) {
1719 WriteUnsigned(kSmiCid + kNumExtraCids);
1720 WriteUnsigned(0); // Heap size.
1721 WriteUnsigned(kIntData);
1722 WriteUnsigned(Smi::Value(smi));
1723 WriteUnsigned(0); // No slots.
1724 }
1725
1726 // External properties.
1727 WriteUnsigned(external_property_count_);
1728 isolate()->group()->VisitWeakPersistentHandles(&visitor);
1729 }
1730
1731 {
1732 // Identity hash codes
1733 Pass3Visitor visitor(this);
1734
1735 WriteUnsigned(0); // Root fake object.
1736 WriteUnsigned(0); // Image pages fake object.
1737 isolate_group()->ForEachIsolate(
1738 [&](Isolate* isolate) {
1739 WriteUnsigned(0); // Isolate fake object.
1740 },
1741 /*at_safepoint=*/true);
1742
1743 // Handle visit rest of the objects.
1744 iteration.IterateVMIsolateObjects(&visitor);
1745 iteration.IterateObjects(&visitor);
1746 for (SmiPtr smi : smis_) {
1747 USE(smi);
1748 WriteUnsigned(0); // No identity hash.
1749 }
1750 }
1751
1752 ClearObjectIds();
1753 Flush(true);
1754}
1755
1756uint32_t HeapSnapshotWriter::GetHeapSnapshotIdentityHash(Thread* thread,
1757 ObjectPtr obj) {
1758 if (!obj->IsHeapObject()) return 0;
1759 intptr_t cid = obj->GetClassId();
1760 uint32_t hash = 0;
1761 switch (cid) {
1762 case kForwardingCorpse:
1763 case kFreeListElement:
1764 case kSmiCid:
1765 UNREACHABLE();
1766 case kArrayCid:
1767 case kBoolCid:
1768 case kCodeSourceMapCid:
1769 case kCompressedStackMapsCid:
1770 case kDoubleCid:
1771 case kGrowableObjectArrayCid:
1772 case kImmutableArrayCid:
1773 case kConstMapCid:
1774 case kConstSetCid:
1775 case kInstructionsCid:
1776 case kInstructionsSectionCid:
1777 case kInstructionsTableCid:
1778 case kMapCid:
1779 case kSetCid:
1780 case kMintCid:
1781 case kNeverCid:
1782 case kSentinelCid:
1783 case kNullCid:
1784 case kObjectPoolCid:
1785 case kOneByteStringCid:
1786 case kPcDescriptorsCid:
1787 case kTwoByteStringCid:
1788 case kVoidCid:
1789 // Don't provide hash codes for objects with the above CIDs in order
1790 // to try and avoid having to initialize identity hash codes for common
1791 // primitives and types that don't have hash codes.
1792 break;
1793 default: {
1794 hash = GetHashHelper(thread, obj);
1795 }
1796 }
1797 return hash;
1798}
1799
1800// Generates a random value which can serve as an identity hash.
1801// It must be a non-zero smi value (see also [Object._objectHashCode]).
1802static uint32_t GenerateHash(Random* random) {
1803 uint32_t hash;
1804 do {
1805 hash = random->NextUInt32();
1806 } while (hash == 0 || (kSmiBits < 32 && !Smi::IsValid(hash)));
1807 return hash;
1808}
1809
1810uint32_t HeapSnapshotWriter::GetHashHelper(Thread* thread, ObjectPtr obj) {
1811 uint32_t hash;
1812#if defined(HASH_IN_OBJECT_HEADER)
1813 hash = Object::GetCachedHash(obj);
1814 if (hash == 0) {
1815 ASSERT(!thread->heap()->old_space()->IsObjectFromImagePages(obj));
1816 hash = GenerateHash(thread->random());
1817 Object::SetCachedHashIfNotSet(obj, hash);
1818 }
1819#else
1820 Heap* heap = thread->heap();
1821 hash = heap->GetHash(obj);
1822 if (hash == 0) {
1823 ASSERT(!heap->old_space()->IsObjectFromImagePages(obj));
1824 hash = GenerateHash(thread->random());
1825 heap->SetHashIfNotSet(obj, hash);
1826 }
1827#endif
1828 return hash;
1829}
1830
1831CountObjectsVisitor::CountObjectsVisitor(Thread* thread, intptr_t class_count)
1832 : ObjectVisitor(),
1833 HandleVisitor(thread),
1834 new_count_(new intptr_t[class_count]),
1835 new_size_(new intptr_t[class_count]),
1836 new_external_size_(new intptr_t[class_count]),
1837 old_count_(new intptr_t[class_count]),
1838 old_size_(new intptr_t[class_count]),
1839 old_external_size_(new intptr_t[class_count]) {
1840 memset(new_count_.get(), 0, class_count * sizeof(intptr_t));
1841 memset(new_size_.get(), 0, class_count * sizeof(intptr_t));
1842 memset(new_external_size_.get(), 0, class_count * sizeof(intptr_t));
1843 memset(old_count_.get(), 0, class_count * sizeof(intptr_t));
1844 memset(old_size_.get(), 0, class_count * sizeof(intptr_t));
1845 memset(old_external_size_.get(), 0, class_count * sizeof(intptr_t));
1846}
1847
1848void CountObjectsVisitor::VisitObject(ObjectPtr obj) {
1849 intptr_t cid = obj->GetClassId();
1850 intptr_t size = obj->untag()->HeapSize();
1851 if (obj->IsNewObject()) {
1852 new_count_[cid] += 1;
1853 new_size_[cid] += size;
1854 } else {
1855 old_count_[cid] += 1;
1856 old_size_[cid] += size;
1857 }
1858}
1859
1860void CountObjectsVisitor::VisitHandle(uword addr) {
1861 FinalizablePersistentHandle* handle =
1862 reinterpret_cast<FinalizablePersistentHandle*>(addr);
1863 ObjectPtr obj = handle->ptr();
1864 if (!obj->IsHeapObject()) {
1865 return;
1866 }
1867 intptr_t cid = obj->GetClassId();
1868 intptr_t size = handle->external_size();
1869 if (obj->IsNewObject()) {
1870 new_external_size_[cid] += size;
1871 } else {
1872 old_external_size_[cid] += size;
1873 }
1874}
1875
1876#endif // defined(DART_ENABLE_HEAP_SNAPSHOT_WRITER)
1877
1878} // namespace dart
int count
@ kSentinel
static bool skip(SkStream *stream, size_t amount)
static uint32_t hash(const SkShaderBase::GradientInfo &v)
#define UNREACHABLE()
Definition assert.h:248
#define DEBUG_ASSERT(cond)
Definition assert.h:321
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
static Object & Handle()
Definition object.h:407
static Thread * Current()
Definition thread.h:361
static uword ToAddr(const UntaggedObject *raw_obj)
Definition raw_object.h:501
void(* Dart_HeapSnapshotWriteChunkCallback)(void *context, uint8_t *buffer, intptr_t size, bool is_last)
const EmbeddedViewParams * params
#define ASSERT(E)
sk_sp< SkImage > image
Definition examples.cpp:29
static bool b
struct MyStruct a[10]
FlutterSemanticsFlag flags
glong glong end
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
FlKeyEvent uint64_t FlKeyResponderAsyncCallback callback
FlKeyEvent * event
static const uint8_t buffer[]
uint8_t value
uint32_t * target
const char * name
Definition fuchsia.cc:50
#define HANDLESCOPE(thread)
Definition handles.h:321
Definition dart.idl:29
size_t length
InvalidClass kObjectAlignmentLog2
bool IsTypedDataClassId(intptr_t index)
Definition class_id.h:433
void * malloc(size_t size)
Definition allocation.cc:19
@ kForwardingCorpse
Definition class_id.h:225
@ kNullCid
Definition class_id.h:252
@ kVoidCid
Definition class_id.h:254
@ kNeverCid
Definition class_id.h:255
@ kFreeListElement
Definition class_id.h:224
constexpr intptr_t kWordSizeLog2
Definition globals.h:507
uintptr_t uword
Definition globals.h:501
const intptr_t cid
raw_obj untag() -> num_entries()) VARIABLE_COMPRESSED_VISITOR(Array, Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(TypedData, TypedData::ElementSizeInBytes(raw_obj->GetClassId()) *Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(Record, RecordShape(raw_obj->untag() ->shape()).num_fields()) VARIABLE_NULL_VISITOR(CompressedStackMaps, CompressedStackMaps::PayloadSizeOf(raw_obj)) VARIABLE_NULL_VISITOR(OneByteString, Smi::Value(raw_obj->untag() ->length())) VARIABLE_NULL_VISITOR(TwoByteString, Smi::Value(raw_obj->untag() ->length())) intptr_t UntaggedField::VisitFieldPointers(FieldPtr raw_obj, ObjectPointerVisitor *visitor)
constexpr intptr_t kWordSize
Definition globals.h:509
bool IsInternalOnlyClassId(intptr_t index)
Definition class_id.h:299
bool IsExternalTypedDataClassId(intptr_t index)
Definition class_id.h:447
ObjectPtr CompressedObjectPtr
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir path
Definition switches.h:57
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
void Flush(SkSurface *surface)
Definition GpuTools.h:25
#define Pd64
Definition globals.h:416
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName)
Definition globals.h:593
#define DISALLOW_ALLOCATION()
Definition globals.h:604
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition globals.h:581
void write(SkWStream *wStream, const T &text)
Definition skqp.cpp:188
Point offset
Definition SkMD5.cpp:130
const uintptr_t id