Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
marker.cc
Go to the documentation of this file.
1// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/heap/marker.h"
6
7#include "platform/assert.h"
8#include "platform/atomic.h"
9#include "vm/allocation.h"
10#include "vm/dart_api_state.h"
11#include "vm/heap/gc_shared.h"
12#include "vm/heap/pages.h"
14#include "vm/isolate.h"
15#include "vm/log.h"
16#include "vm/object_id_ring.h"
17#include "vm/raw_object.h"
18#include "vm/stack_frame.h"
19#include "vm/tagged_pointer.h"
20#include "vm/thread_barrier.h"
21#include "vm/thread_pool.h"
22#include "vm/timeline.h"
23#include "vm/visitor.h"
24
25namespace dart {
26
27template <bool sync>
29 public:
31 PageSpace* page_space,
32 MarkingStack* marking_stack,
33 MarkingStack* new_marking_stack,
34 MarkingStack* deferred_marking_stack)
36 page_space_(page_space),
37 work_list_(marking_stack),
38 new_work_list_(new_marking_stack),
39 deferred_work_list_(deferred_marking_stack),
40 marked_bytes_(0),
41 marked_micros_(0),
42 concurrent_(true) {}
44
45 uintptr_t marked_bytes() const { return marked_bytes_; }
46 int64_t marked_micros() const { return marked_micros_; }
47 void AddMicros(int64_t micros) { marked_micros_ += micros; }
48 void set_concurrent(bool value) { concurrent_ = value; }
49
50#ifdef DEBUG
51 constexpr static const char* const kName = "Marker";
52#endif
53
54 static bool IsMarked(ObjectPtr raw) {
55 ASSERT(raw->IsHeapObject());
56 return raw->untag()->IsMarked();
57 }
58
60 bool more_to_mark = false;
61 WeakPropertyPtr cur_weak = delayed_.weak_properties.Release();
62 while (cur_weak != WeakProperty::null()) {
63 WeakPropertyPtr next_weak =
64 cur_weak->untag()->next_seen_by_gc_.Decompress(cur_weak->heap_base());
65 ObjectPtr raw_key = cur_weak->untag()->key();
66 // Reset the next pointer in the weak property.
67 cur_weak->untag()->next_seen_by_gc_ = WeakProperty::null();
68 if (raw_key->IsImmediateObject() || raw_key->untag()->IsMarked()) {
69 ObjectPtr raw_val = cur_weak->untag()->value();
70 if (!raw_val->IsImmediateObject() && !raw_val->untag()->IsMarked()) {
71 more_to_mark = true;
72 }
73
74 // The key is marked so we make sure to properly visit all pointers
75 // originating from this weak property.
76 cur_weak->untag()->VisitPointersNonvirtual(this);
77 } else {
78 // Requeue this weak property to be handled later.
79 ASSERT(IsMarked(cur_weak));
80 delayed_.weak_properties.Enqueue(cur_weak);
81 }
82 // Advance to next weak property in the queue.
83 cur_weak = next_weak;
84 }
85 return more_to_mark;
86 }
87
89 do {
90 ObjectPtr obj;
91 while (work_list_.Pop(&obj)) {
92 if (obj->IsNewObject()) {
93 Page* page = Page::Of(obj);
94 uword top = page->original_top();
95 uword end = page->original_end();
96 uword addr = static_cast<uword>(obj);
97 if (top <= addr && addr < end) {
98 new_work_list_.Push(obj);
99 if (UNLIKELY(page_space_->pause_concurrent_marking())) {
100 work_list_.Flush();
101 new_work_list_.Flush();
102 deferred_work_list_.Flush();
103 page_space_->YieldConcurrentMarking();
104 }
105 continue;
106 }
107 }
108
109 const intptr_t class_id = obj->GetClassId();
110 ASSERT(class_id != kIllegalCid);
111 ASSERT(class_id != kFreeListElement);
112 ASSERT(class_id != kForwardingCorpse);
113
114 intptr_t size;
115 if (class_id == kWeakPropertyCid) {
116 size = ProcessWeakProperty(static_cast<WeakPropertyPtr>(obj));
117 } else if (class_id == kWeakReferenceCid) {
118 size = ProcessWeakReference(static_cast<WeakReferencePtr>(obj));
119 } else if (class_id == kWeakArrayCid) {
120 size = ProcessWeakArray(static_cast<WeakArrayPtr>(obj));
121 } else if (class_id == kFinalizerEntryCid) {
122 size = ProcessFinalizerEntry(static_cast<FinalizerEntryPtr>(obj));
123 } else if (sync && concurrent_ && class_id == kSuspendStateCid) {
124 // Shape changing is not compatible with concurrent marking.
125 deferred_work_list_.Push(obj);
126 size = obj->untag()->HeapSize();
127 } else {
128 size = obj->untag()->VisitPointersNonvirtual(this);
129 }
130 if (!obj->IsNewObject()) {
131 marked_bytes_ += size;
132 }
133
134 if (UNLIKELY(page_space_->pause_concurrent_marking())) {
135 work_list_.Flush();
136 new_work_list_.Flush();
137 deferred_work_list_.Flush();
138 page_space_->YieldConcurrentMarking();
139 }
140 }
142
143 ASSERT(work_list_.IsLocalEmpty());
144 // In case of scavenge before final marking.
145 new_work_list_.Flush();
146 deferred_work_list_.Flush();
147 }
148
151 }
152 }
153
154 void ProcessMarkingStackUntil(int64_t deadline) {
155 // We check the clock *before* starting a batch of work, but we want to
156 // *end* work before the deadline. So we compare to the deadline adjusted
157 // by a conservative estimate of the duration of one batch of work.
158 deadline -= 1500;
159
160 // A 512kB budget is chosen to be large enough that we don't waste too much
161 // time on the overhead of exiting ProcessMarkingStack, querying the clock,
162 // and re-entering, and small enough that a few batches can fit in the idle
163 // time between animation frames. This amount of marking takes ~1ms on a
164 // Pixel phone.
165 constexpr intptr_t kBudget = 512 * KB;
166
167 while ((OS::GetCurrentMonotonicMicros() < deadline) &&
168 ProcessMarkingStack(kBudget)) {
169 }
170 }
171
172 bool ProcessMarkingStack(intptr_t remaining_budget) {
173 do {
174 // First drain the marking stacks.
175 ObjectPtr obj;
176 while (work_list_.Pop(&obj)) {
177 if (sync && concurrent_ && obj->IsNewObject()) {
178 Page* page = Page::Of(obj);
179 uword top = page->original_top();
180 uword end = page->original_end();
181 uword addr = static_cast<uword>(obj);
182 if (top <= addr && addr < end) {
183 new_work_list_.Push(obj);
184 // We did some work routing this object, but didn't look at any of
185 // its slots.
186 intptr_t size = kObjectAlignment;
187 remaining_budget -= size;
188 if (remaining_budget < 0) {
189 return true; // More to mark.
190 }
191 continue;
192 }
193 }
194
195 const intptr_t class_id = obj->GetClassId();
196 ASSERT(class_id != kIllegalCid);
197 ASSERT(class_id != kFreeListElement);
198 ASSERT(class_id != kForwardingCorpse);
199
200 intptr_t size;
201 if (class_id == kWeakPropertyCid) {
202 size = ProcessWeakProperty(static_cast<WeakPropertyPtr>(obj));
203 } else if (class_id == kWeakReferenceCid) {
204 size = ProcessWeakReference(static_cast<WeakReferencePtr>(obj));
205 } else if (class_id == kWeakArrayCid) {
206 size = ProcessWeakArray(static_cast<WeakArrayPtr>(obj));
207 } else if (class_id == kFinalizerEntryCid) {
208 size = ProcessFinalizerEntry(static_cast<FinalizerEntryPtr>(obj));
209 } else if (sync && concurrent_ && class_id == kSuspendStateCid) {
210 // Shape changing is not compatible with concurrent marking.
211 deferred_work_list_.Push(obj);
212 size = obj->untag()->HeapSize();
213 } else {
214 if ((class_id == kArrayCid) || (class_id == kImmutableArrayCid)) {
215 size = obj->untag()->HeapSize();
216 if (size > remaining_budget) {
217 work_list_.Push(obj);
218 return true; // More to mark.
219 }
220 }
221 size = obj->untag()->VisitPointersNonvirtual(this);
222 }
223 if (!obj->IsNewObject()) {
224 marked_bytes_ += size;
225 }
226 remaining_budget -= size;
227 if (remaining_budget < 0) {
228 return true; // More to mark.
229 }
230 }
231 // Marking stack is empty.
233
234 return false; // No more work.
235 }
236
237 // Races: The concurrent marker is racing with the mutator, but this race is
238 // harmless. The concurrent marker will only visit objects that were created
239 // before the marker started. It will ignore all new-space objects based on
240 // pointer alignment, and it will ignore old-space objects created after the
241 // marker started because old-space objects allocated while marking is in
242 // progress are allocated black (mark bit set). When visiting object slots,
243 // the marker can see either the value it had when marking started (because
244 // spawning the marker task creates acq-rel ordering) or any value later
245 // stored into that slot. Because pointer slots always contain pointers (i.e.,
246 // we don't do any in-place unboxing like V8), any value we read from the slot
247 // is safe.
255
256 void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
257 for (ObjectPtr* current = first; current <= last; current++) {
258 MarkObject(LoadPointerIgnoreRace(current));
259 }
260 }
261
262#if defined(DART_COMPRESSED_POINTERS)
263 void VisitCompressedPointers(uword heap_base,
264 CompressedObjectPtr* first,
265 CompressedObjectPtr* last) override {
266 for (CompressedObjectPtr* current = first; current <= last; current++) {
267 MarkObject(
268 LoadCompressedPointerIgnoreRace(current).Decompress(heap_base));
269 }
270 }
271#endif
272
273 intptr_t ProcessWeakProperty(WeakPropertyPtr raw_weak) {
274 // The fate of the weak property is determined by its key.
275 ObjectPtr raw_key =
276 LoadCompressedPointerIgnoreRace(&raw_weak->untag()->key_)
277 .Decompress(raw_weak->heap_base());
278 if (raw_key->IsHeapObject() && !raw_key->untag()->IsMarked()) {
279 // Key was white. Enqueue the weak property.
280 ASSERT(IsMarked(raw_weak));
281 delayed_.weak_properties.Enqueue(raw_weak);
282 return raw_weak->untag()->HeapSize();
283 }
284 // Key is gray or black. Make the weak property black.
285 return raw_weak->untag()->VisitPointersNonvirtual(this);
286 }
287
288 intptr_t ProcessWeakReference(WeakReferencePtr raw_weak) {
289 // The fate of the target field is determined by the target.
290 // The type arguments always stay alive.
291 ObjectPtr raw_target =
292 LoadCompressedPointerIgnoreRace(&raw_weak->untag()->target_)
293 .Decompress(raw_weak->heap_base());
294 if (raw_target->IsHeapObject() && !raw_target->untag()->IsMarked()) {
295 // Target was white. Enqueue the weak reference. It is potentially dead.
296 // It might still be made alive by weak properties in next rounds.
297 ASSERT(IsMarked(raw_weak));
298 delayed_.weak_references.Enqueue(raw_weak);
299 }
300 // Always visit the type argument.
301 ObjectPtr raw_type_arguments =
302 LoadCompressedPointerIgnoreRace(&raw_weak->untag()->type_arguments_)
303 .Decompress(raw_weak->heap_base());
304 MarkObject(raw_type_arguments);
305 return raw_weak->untag()->HeapSize();
306 }
307
308 intptr_t ProcessWeakArray(WeakArrayPtr raw_weak) {
309 delayed_.weak_arrays.Enqueue(raw_weak);
310 return raw_weak->untag()->HeapSize();
311 }
312
313 intptr_t ProcessFinalizerEntry(FinalizerEntryPtr raw_entry) {
314 ASSERT(IsMarked(raw_entry));
315 delayed_.finalizer_entries.Enqueue(raw_entry);
316 // Only visit token and next.
317 MarkObject(LoadCompressedPointerIgnoreRace(&raw_entry->untag()->token_)
318 .Decompress(raw_entry->heap_base()));
319 MarkObject(LoadCompressedPointerIgnoreRace(&raw_entry->untag()->next_)
320 .Decompress(raw_entry->heap_base()));
321 return raw_entry->untag()->HeapSize();
322 }
323
325 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ProcessDeferredMarking");
326
327 ObjectPtr obj;
328 while (deferred_work_list_.Pop(&obj)) {
329 ASSERT(obj->IsHeapObject());
330 // We need to scan objects even if they were already scanned via ordinary
331 // marking. An object may have changed since its ordinary scan and been
332 // added to deferred marking stack to compensate for write-barrier
333 // elimination.
334 // A given object may be included in the deferred marking stack multiple
335 // times. It may or may not also be in the ordinary marking stack, so
336 // failing to acquire the mark bit here doesn't reliably indicate the
337 // object was already encountered through the deferred marking stack. Our
338 // processing here is idempotent, so repeated visits only hurt performance
339 // but not correctness. Duplication is expected to be low.
340 // By the absence of a special case, we are treating WeakProperties as
341 // strong references here. This guarantees a WeakProperty will only be
342 // added to the delayed_weak_properties_ list of the worker that
343 // encounters it during ordinary marking. This is in the same spirit as
344 // the eliminated write barrier, which would have added the newly written
345 // key and value to the ordinary marking stack.
346 intptr_t size = obj->untag()->VisitPointersNonvirtual(this);
347 // Add the size only if we win the marking race to prevent
348 // double-counting.
349 if (TryAcquireMarkBit(obj)) {
350 if (!obj->IsNewObject()) {
351 marked_bytes_ += size;
352 }
353 }
354 }
355 }
356
357 // Called when all marking is complete. Any attempt to push to the mark stack
358 // after this will trigger an error.
360 work_list_.Finalize();
361 new_work_list_.Finalize();
362 deferred_work_list_.Finalize();
364 // MournFinalizerEntries inserts newly discovered dead entries into the
365 // linked list attached to the Finalizer. This might create
366 // cross-generational references which might be added to the store
367 // buffer. Release the store buffer to satisfy the invariant that
368 // thread local store buffer is empty after marking and all references
369 // are processed.
371 }
372
374 WeakPropertyPtr current = delayed_.weak_properties.Release();
375 while (current != WeakProperty::null()) {
376 WeakPropertyPtr next = current->untag()->next_seen_by_gc();
377 current->untag()->next_seen_by_gc_ = WeakProperty::null();
378 current->untag()->key_ = Object::null();
379 current->untag()->value_ = Object::null();
380 current = next;
381 }
382 }
383
385 WeakReferencePtr current = delayed_.weak_references.Release();
386 while (current != WeakReference::null()) {
387 WeakReferencePtr next = current->untag()->next_seen_by_gc();
388 current->untag()->next_seen_by_gc_ = WeakReference::null();
389 ForwardOrSetNullIfCollected(current, &current->untag()->target_);
390 current = next;
391 }
392 }
393
395 WeakArrayPtr current = delayed_.weak_arrays.Release();
396 while (current != WeakArray::null()) {
397 WeakArrayPtr next = current->untag()->next_seen_by_gc();
398 current->untag()->next_seen_by_gc_ = WeakArray::null();
399 intptr_t length = Smi::Value(current->untag()->length());
400 for (intptr_t i = 0; i < length; i++) {
401 ForwardOrSetNullIfCollected(current, &current->untag()->data()[i]);
402 }
403 current = next;
404 }
405 }
406
408 FinalizerEntryPtr current = delayed_.finalizer_entries.Release();
409 while (current != FinalizerEntry::null()) {
410 FinalizerEntryPtr next = current->untag()->next_seen_by_gc();
411 current->untag()->next_seen_by_gc_ = FinalizerEntry::null();
412 MournFinalizerEntry(this, current);
413 current = next;
414 }
415 }
416
417 // Returns whether the object referred to in `slot` was GCed this GC.
419 CompressedObjectPtr* slot) {
420 ObjectPtr target = slot->Decompress(parent->heap_base());
421 if (target->IsImmediateObject()) {
422 // Object not touched during this GC.
423 return false;
424 }
425 if (target->untag()->IsMarked()) {
426 // Object already null (which is permanently marked) or has survived this
427 // GC.
428 return false;
429 }
430 *slot = Object::null();
431 return true;
432 }
433
435 return work_list_.WaitForWork(num_busy);
436 }
437
438 void Flush(GCLinkedLists* global_list) {
439 work_list_.Flush();
440 new_work_list_.Flush();
441 deferred_work_list_.Flush();
442 delayed_.FlushInto(global_list);
443 }
444
445 void Adopt(GCLinkedLists* other) {
446 ASSERT(delayed_.IsEmpty());
447 other->FlushInto(&delayed_);
448 }
449
450 void AbandonWork() {
451 work_list_.AbandonWork();
452 new_work_list_.AbandonWork();
453 deferred_work_list_.AbandonWork();
454 delayed_.Release();
455 }
456
458 work_list_.Flush();
459 work_list_.Finalize();
460 new_work_list_.Flush();
461 new_work_list_.Finalize();
462 deferred_work_list_.Flush();
463 deferred_work_list_.Finalize();
464 delayed_.FlushInto(global_list);
465 }
466
467 GCLinkedLists* delayed() { return &delayed_; }
468
469 private:
470 void PushMarked(ObjectPtr obj) {
471 ASSERT(obj->IsHeapObject());
472
473 // Push the marked object on the marking stack.
474 ASSERT(obj->untag()->IsMarked());
475 work_list_.Push(obj);
476 }
477
478 static bool TryAcquireMarkBit(ObjectPtr obj) {
479 if (!sync) {
480 obj->untag()->SetMarkBitUnsynchronized();
481 return true;
482 } else {
483 return obj->untag()->TryAcquireMarkBit();
484 }
485 }
486
487 DART_FORCE_INLINE
488 void MarkObject(ObjectPtr obj) {
489 if (obj->IsImmediateObject()) {
490 return;
491 }
492
493 if (sync && concurrent_ && obj->IsNewObject()) {
494 if (TryAcquireMarkBit(obj)) {
495 PushMarked(obj);
496 }
497 return;
498 }
499
500 // While it might seem this is redundant with TryAcquireMarkBit, we must
501 // do this check first to avoid attempting an atomic::fetch_and on the
502 // read-only vm-isolate or image pages, which can fault even if there is no
503 // change in the value.
504 // Doing this before checking for an Instructions object avoids
505 // unnecessary queueing of pre-marked objects.
506 // Race: The concurrent marker may observe a pointer into a heap page that
507 // was allocated after the concurrent marker started. It can read either a
508 // zero or the header of an object allocated black, both of which appear
509 // marked.
510 if (obj->untag()->IsMarkedIgnoreRace()) {
511 return;
512 }
513
514 intptr_t class_id = obj->GetClassId();
515 ASSERT(class_id != kFreeListElement);
516
517 if (sync && UNLIKELY(class_id == kInstructionsCid)) {
518 // If this is the concurrent marker, this object may be non-writable due
519 // to W^X (--write-protect-code).
520 deferred_work_list_.Push(obj);
521 return;
522 }
523
524 if (!TryAcquireMarkBit(obj)) {
525 // Already marked.
526 return;
527 }
528
529 PushMarked(obj);
530 }
531
532 PageSpace* page_space_;
533 MarkerWorkList work_list_;
534 MarkerWorkList new_work_list_;
535 MarkerWorkList deferred_work_list_;
536 GCLinkedLists delayed_;
537 uintptr_t marked_bytes_;
538 int64_t marked_micros_;
539 bool concurrent_;
540
541 DISALLOW_IMPLICIT_CONSTRUCTORS(MarkingVisitorBase);
542};
543
546
547static bool IsUnreachable(const ObjectPtr obj) {
548 if (obj->IsImmediateObject()) {
549 return false;
550 }
551 return !obj->untag()->IsMarked();
552}
553
555 public:
557
558 void VisitHandle(uword addr) override {
560 reinterpret_cast<FinalizablePersistentHandle*>(addr);
561 ObjectPtr obj = handle->ptr();
562 if (IsUnreachable(obj)) {
564 }
565 }
566
567 private:
569};
570
571void GCMarker::Prologue() {
572 isolate_group_->ReleaseStoreBuffers();
573 marking_stack_.PushAll(new_marking_stack_.PopAll());
574}
575
576void GCMarker::Epilogue() {}
577
582
583void GCMarker::ResetSlices() {
584 ASSERT(Thread::Current()->OwnsGCSafepoint());
585
586 root_slices_started_ = 0;
587 root_slices_finished_ = 0;
588 root_slices_count_ = kNumFixedRootSlices;
589
590 weak_slices_started_ = 0;
591}
592
593void GCMarker::IterateRoots(ObjectPointerVisitor* visitor) {
594 for (;;) {
595 intptr_t slice = root_slices_started_.fetch_add(1);
596 if (slice >= root_slices_count_) {
597 break; // No more slices.
598 }
599
600 switch (slice) {
601 case kIsolate: {
603 "ProcessIsolateGroupRoots");
604 isolate_group_->VisitObjectPointers(
606 break;
607 }
608 }
609
610 MonitorLocker ml(&root_slices_monitor_);
611 root_slices_finished_++;
612 if (root_slices_finished_ == root_slices_count_) {
613 ml.Notify();
614 }
615 }
616}
617
625
626void GCMarker::IterateWeakRoots(Thread* thread) {
627 for (;;) {
628 intptr_t slice = weak_slices_started_.fetch_add(1);
629 if (slice >= kNumWeakSlices) {
630 return; // No more slices.
631 }
632
633 switch (slice) {
634 case kWeakHandles:
635 ProcessWeakHandles(thread);
636 break;
637 case kWeakTables:
638 ProcessWeakTables(thread);
639 break;
640 case kObjectIdRing:
641 ProcessObjectIdTable(thread);
642 break;
643 case kRememberedSet:
644 ProcessRememberedSet(thread);
645 break;
646 default:
647 UNREACHABLE();
648 }
649 }
650}
651
652void GCMarker::ProcessWeakHandles(Thread* thread) {
653 TIMELINE_FUNCTION_GC_DURATION(thread, "ProcessWeakHandles");
654 MarkingWeakVisitor visitor(thread);
655 ApiState* state = isolate_group_->api_state();
656 ASSERT(state != nullptr);
657 isolate_group_->VisitWeakPersistentHandles(&visitor);
658}
659
660void GCMarker::ProcessWeakTables(Thread* thread) {
661 TIMELINE_FUNCTION_GC_DURATION(thread, "ProcessWeakTables");
662 for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) {
663 Dart_HeapSamplingDeleteCallback cleanup = nullptr;
664#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
665 if (sel == Heap::kHeapSamplingData) {
667 }
668#endif
669 WeakTable* table =
670 heap_->GetWeakTable(Heap::kOld, static_cast<Heap::WeakSelector>(sel));
671 intptr_t size = table->size();
672 for (intptr_t i = 0; i < size; i++) {
673 if (table->IsValidEntryAtExclusive(i)) {
674 // The object has been collected.
675 ObjectPtr obj = table->ObjectAtExclusive(i);
676 if (obj->IsHeapObject() && !obj->untag()->IsMarked()) {
677 if (cleanup != nullptr) {
678 cleanup(reinterpret_cast<void*>(table->ValueAtExclusive(i)));
679 }
680 table->InvalidateAtExclusive(i);
681 }
682 }
683 }
684 table =
685 heap_->GetWeakTable(Heap::kNew, static_cast<Heap::WeakSelector>(sel));
686 size = table->size();
687 for (intptr_t i = 0; i < size; i++) {
688 if (table->IsValidEntryAtExclusive(i)) {
689 // The object has been collected.
690 ObjectPtr obj = table->ObjectAtExclusive(i);
691 if (obj->IsHeapObject() && !obj->untag()->IsMarked()) {
692 if (cleanup != nullptr) {
693 cleanup(reinterpret_cast<void*>(table->ValueAtExclusive(i)));
694 }
695 table->InvalidateAtExclusive(i);
696 }
697 }
698 }
699 }
700}
701
702void GCMarker::ProcessRememberedSet(Thread* thread) {
703 TIMELINE_FUNCTION_GC_DURATION(thread, "ProcessRememberedSet");
704 // Filter collected objects from the remembered set.
705 StoreBuffer* store_buffer = isolate_group_->store_buffer();
706 StoreBufferBlock* reading = store_buffer->PopAll();
707 StoreBufferBlock* writing = store_buffer->PopNonFullBlock();
708 while (reading != nullptr) {
709 StoreBufferBlock* next = reading->next();
710 // Generated code appends to store buffers; tell MemorySanitizer.
711 MSAN_UNPOISON(reading, sizeof(*reading));
712 while (!reading->IsEmpty()) {
713 ObjectPtr obj = reading->Pop();
714 ASSERT(!obj->IsForwardingCorpse());
715 ASSERT(obj->untag()->IsRemembered());
716 if (obj->untag()->IsMarked()) {
717 writing->Push(obj);
718 if (writing->IsFull()) {
719 store_buffer->PushBlock(writing, StoreBuffer::kIgnoreThreshold);
720 writing = store_buffer->PopNonFullBlock();
721 }
722 }
723 }
724 reading->Reset();
725 // Return the emptied block for recycling (no need to check threshold).
726 store_buffer->PushBlock(reading, StoreBuffer::kIgnoreThreshold);
727 reading = next;
728 }
729 store_buffer->PushBlock(writing, StoreBuffer::kIgnoreThreshold);
730}
731
733 public:
736
737 void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
738 for (ObjectPtr* current = first; current <= last; current++) {
739 ObjectPtr obj = *current;
740 ASSERT(obj->IsHeapObject());
741 if (!obj->untag()->IsMarked()) {
742 // Object has become garbage. Replace it will null.
743 *current = Object::null();
744 }
745 }
746 }
747
748#if defined(DART_COMPRESSED_POINTERS)
749 void VisitCompressedPointers(uword heap_base,
750 CompressedObjectPtr* first,
751 CompressedObjectPtr* last) override {
752 UNREACHABLE(); // ObjectIdRing is not compressed.
753 }
754#endif
755};
756
757void GCMarker::ProcessObjectIdTable(Thread* thread) {
758#ifndef PRODUCT
759 TIMELINE_FUNCTION_GC_DURATION(thread, "ProcessObjectIdTable");
760 ObjectIdRingClearPointerVisitor visitor(isolate_group_);
761 isolate_group_->VisitObjectIdRingPointers(&visitor);
762#endif // !PRODUCT
763}
764
766 public:
768 IsolateGroup* isolate_group,
769 MarkingStack* marking_stack,
770 ThreadBarrier* barrier,
771 SyncMarkingVisitor* visitor,
772 RelaxedAtomic<uintptr_t>* num_busy)
773 : marker_(marker),
774 isolate_group_(isolate_group),
775 marking_stack_(marking_stack),
776 barrier_(barrier),
777 visitor_(visitor),
778 num_busy_(num_busy) {}
779
780 virtual void Run() {
781 if (!barrier_->TryEnter()) {
782 barrier_->Release();
783 return;
784 }
785
787 isolate_group_, Thread::kMarkerTask, /*bypass_safepoint=*/true);
788 ASSERT(result);
789
791
792 Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true);
793
794 barrier_->Sync();
795 barrier_->Release();
796 }
797
799 {
800 Thread* thread = Thread::Current();
801 TIMELINE_FUNCTION_GC_DURATION(thread, "ParallelMark");
803
804 // Phase 1: Iterate over roots and drain marking stack in tasks.
805 num_busy_->fetch_add(1u);
806 visitor_->set_concurrent(false);
807 marker_->IterateRoots(visitor_);
808
809 visitor_->ProcessDeferredMarking();
810
811 bool more_to_mark = false;
812 do {
813 do {
814 visitor_->DrainMarkingStack();
815 } while (visitor_->WaitForWork(num_busy_));
816 // Wait for all markers to stop.
817 barrier_->Sync();
818#if defined(DEBUG)
819 ASSERT(num_busy_->load() == 0);
820 // Caveat: must not allow any marker to continue past the barrier
821 // before we checked num_busy, otherwise one of them might rush
822 // ahead and increment it.
823 barrier_->Sync();
824#endif
825 // Check if we have any pending properties with marked keys.
826 // Those might have been marked by another marker.
827 more_to_mark = visitor_->ProcessPendingWeakProperties();
828 if (more_to_mark) {
829 // We have more work to do. Notify others.
830 num_busy_->fetch_add(1u);
831 }
832
833 // Wait for all other markers to finish processing their pending
834 // weak properties and decide if they need to continue marking.
835 // Caveat: we need two barriers here to make this decision in lock step
836 // between all markers and the main thread.
837 barrier_->Sync();
838 if (!more_to_mark && (num_busy_->load() > 0)) {
839 // All markers continue to mark as long as any single marker has
840 // some work to do.
841 num_busy_->fetch_add(1u);
842 more_to_mark = true;
843 }
844 barrier_->Sync();
845 } while (more_to_mark);
846
847 // Phase 2: deferred marking.
848 visitor_->ProcessDeferredMarking();
849 barrier_->Sync();
850
851 // Phase 3: Weak processing and statistics.
852 visitor_->MournWeakProperties();
853 visitor_->MournWeakReferences();
854 visitor_->MournWeakArrays();
855 // Don't MournFinalizerEntries here, do it on main thread, so that we
856 // don't have to coordinate workers.
857
858 marker_->IterateWeakRoots(thread);
859 int64_t stop = OS::GetCurrentMonotonicMicros();
860 visitor_->AddMicros(stop - start);
861 if (FLAG_log_marker_tasks) {
862 THR_Print("Task marked %" Pd " bytes in %" Pd64 " micros.\n",
863 visitor_->marked_bytes(), visitor_->marked_micros());
864 }
865 }
866 }
867
868 private:
869 GCMarker* marker_;
870 IsolateGroup* isolate_group_;
871 MarkingStack* marking_stack_;
872 ThreadBarrier* barrier_;
873 SyncMarkingVisitor* visitor_;
874 RelaxedAtomic<uintptr_t>* num_busy_;
875
877};
878
880 public:
882 IsolateGroup* isolate_group,
883 PageSpace* page_space,
884 SyncMarkingVisitor* visitor)
885 : marker_(marker),
886 isolate_group_(isolate_group),
887 page_space_(page_space),
888 visitor_(visitor) {
889#if defined(DEBUG)
890 MonitorLocker ml(page_space_->tasks_lock());
891 ASSERT(page_space_->phase() == PageSpace::kMarking);
892#endif
893 }
894
895 virtual void Run() {
897 isolate_group_, Thread::kMarkerTask, /*bypass_safepoint=*/true);
898 ASSERT(result);
899 {
902
903 marker_->IterateRoots(visitor_);
904
906 int64_t stop = OS::GetCurrentMonotonicMicros();
907 visitor_->AddMicros(stop - start);
908 if (FLAG_log_marker_tasks) {
909 THR_Print("Task marked %" Pd " bytes in %" Pd64 " micros.\n",
910 visitor_->marked_bytes(), visitor_->marked_micros());
911 }
912 }
913
914 // Exit isolate cleanly *before* notifying it, to avoid shutdown race.
915 Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true);
916 // This marker task is done. Notify the original isolate.
917 {
918 MonitorLocker ml(page_space_->tasks_lock());
919 page_space_->set_tasks(page_space_->tasks() - 1);
920 page_space_->set_concurrent_marker_tasks(
921 page_space_->concurrent_marker_tasks() - 1);
923 page_space_->concurrent_marker_tasks_active() - 1);
924 ASSERT(page_space_->phase() == PageSpace::kMarking);
925 if (page_space_->concurrent_marker_tasks() == 0) {
928 }
929 ml.NotifyAll();
930 }
931 }
932
933 private:
934 GCMarker* marker_;
935 IsolateGroup* isolate_group_;
936 PageSpace* page_space_;
937 SyncMarkingVisitor* visitor_;
938
940};
941
943 intptr_t marked_words_per_job_micro;
944 if (marked_micros_ == 0) {
945 marked_words_per_job_micro = marked_words(); // Prevent division by zero.
946 } else {
947 marked_words_per_job_micro = marked_words() / marked_micros_;
948 }
949 if (marked_words_per_job_micro == 0) {
950 marked_words_per_job_micro = 1; // Prevent division by zero.
951 }
952 intptr_t jobs = FLAG_marker_tasks;
953 if (jobs == 0) {
954 jobs = 1; // Marking on main thread is still one job.
955 }
956 return marked_words_per_job_micro * jobs;
957}
958
960 : isolate_group_(isolate_group),
961 heap_(heap),
962 marking_stack_(),
963 new_marking_stack_(),
964 deferred_marking_stack_(),
965 global_list_(),
966 visitors_(),
967 marked_bytes_(0),
968 marked_micros_(0) {
969 visitors_ = new SyncMarkingVisitor*[FLAG_marker_tasks];
970 for (intptr_t i = 0; i < FLAG_marker_tasks; i++) {
971 visitors_[i] = nullptr;
972 }
973}
974
976 // Cleanup in case isolate shutdown happens after starting the concurrent
977 // marker and before finalizing.
978 if (isolate_group_->marking_stack() != nullptr) {
979 isolate_group_->DisableIncrementalBarrier();
980 for (intptr_t i = 0; i < FLAG_marker_tasks; i++) {
981 visitors_[i]->AbandonWork();
982 delete visitors_[i];
983 }
984 }
985 delete[] visitors_;
986}
987
989 isolate_group_->EnableIncrementalBarrier(&marking_stack_,
990 &deferred_marking_stack_);
991
992 const intptr_t num_tasks = FLAG_marker_tasks;
993
994 {
995 // Bulk increase task count before starting any task, instead of
996 // incrementing as each task is started, to prevent a task which
997 // races ahead from falsely believing it was the last task to complete.
998 MonitorLocker ml(page_space->tasks_lock());
999 ASSERT(page_space->phase() == PageSpace::kDone);
1000 page_space->set_phase(PageSpace::kMarking);
1001 page_space->set_tasks(page_space->tasks() + num_tasks);
1002 page_space->set_concurrent_marker_tasks(
1003 page_space->concurrent_marker_tasks() + num_tasks);
1005 page_space->concurrent_marker_tasks_active() + num_tasks);
1006 }
1007
1008 ResetSlices();
1009 for (intptr_t i = 0; i < num_tasks; i++) {
1010 ASSERT(visitors_[i] == nullptr);
1011 SyncMarkingVisitor* visitor =
1012 new SyncMarkingVisitor(isolate_group_, page_space, &marking_stack_,
1013 &new_marking_stack_, &deferred_marking_stack_);
1014 visitors_[i] = visitor;
1015
1016 if (i < (num_tasks - 1)) {
1017 // Begin marking on a helper thread.
1019 this, isolate_group_, page_space, visitor);
1020 ASSERT(result);
1021 } else {
1022 // For the last visitor, mark roots on the main thread.
1025 IterateRoots(visitor);
1026 int64_t stop = OS::GetCurrentMonotonicMicros();
1027 visitor->AddMicros(stop - start);
1028 if (FLAG_log_marker_tasks) {
1029 THR_Print("Task marked %" Pd " bytes in %" Pd64 " micros.\n",
1030 visitor->marked_bytes(), visitor->marked_micros());
1031 }
1032 // Continue non-root marking concurrently.
1034 this, isolate_group_, page_space, visitor);
1035 ASSERT(result);
1036 }
1037 }
1038
1039 isolate_group_->DeferredMarkLiveTemporaries();
1040
1041 // Wait for roots to be marked before exiting safepoint.
1042 MonitorLocker ml(&root_slices_monitor_);
1043 while (root_slices_finished_ != root_slices_count_) {
1044 ml.Wait();
1045 }
1046}
1047
1050 "IncrementalMarkWithUnlimitedBudget");
1051
1052 SyncMarkingVisitor visitor(isolate_group_, page_space, &marking_stack_,
1053 &new_marking_stack_, &deferred_marking_stack_);
1055 visitor.DrainMarkingStack();
1056 int64_t stop = OS::GetCurrentMonotonicMicros();
1057 visitor.AddMicros(stop - start);
1058 {
1059 MonitorLocker ml(page_space->tasks_lock());
1060 visitor.FinalizeIncremental(&global_list_);
1061 marked_bytes_ += visitor.marked_bytes();
1062 marked_micros_ += visitor.marked_micros();
1063 }
1064}
1065
1067 intptr_t size) {
1068 // Avoid setup overhead for tiny amounts of marking as the last bits of TLABs
1069 // get filled in.
1070 const intptr_t kMinimumMarkingStep = KB;
1071 if (size < kMinimumMarkingStep) return;
1072
1074 "IncrementalMarkWithSizeBudget");
1075
1076 SyncMarkingVisitor visitor(isolate_group_, page_space, &marking_stack_,
1077 &new_marking_stack_, &deferred_marking_stack_);
1079 visitor.ProcessMarkingStack(size);
1080 int64_t stop = OS::GetCurrentMonotonicMicros();
1081 visitor.AddMicros(stop - start);
1082 {
1083 MonitorLocker ml(page_space->tasks_lock());
1084 visitor.FinalizeIncremental(&global_list_);
1085 marked_bytes_ += visitor.marked_bytes();
1086 marked_micros_ += visitor.marked_micros();
1087 }
1088}
1089
1091 int64_t deadline) {
1093 "IncrementalMarkWithTimeBudget");
1094
1095 SyncMarkingVisitor visitor(isolate_group_, page_space, &marking_stack_,
1096 &new_marking_stack_, &deferred_marking_stack_);
1098 visitor.ProcessMarkingStackUntil(deadline);
1099 int64_t stop = OS::GetCurrentMonotonicMicros();
1100 visitor.AddMicros(stop - start);
1101 {
1102 MonitorLocker ml(page_space->tasks_lock());
1103 visitor.FinalizeIncremental(&global_list_);
1104 marked_bytes_ += visitor.marked_bytes();
1105 marked_micros_ += visitor.marked_micros();
1106 }
1107}
1108
1110 public ObjectPointerVisitor {
1111 public:
1114
1115 void VisitObject(ObjectPtr obj) override {
1116 if (obj->untag()->IsMarked()) {
1117 current_ = obj;
1118 obj->untag()->VisitPointers(this);
1119 }
1120 }
1121
1122 void VisitPointers(ObjectPtr* from, ObjectPtr* to) override {
1123 for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
1124 ObjectPtr obj = *ptr;
1125 if (obj->IsHeapObject() && !obj->untag()->IsMarked()) {
1126 OS::PrintErr("object=0x%" Px ", slot=0x%" Px ", value=0x%" Px "\n",
1127 static_cast<uword>(current_), reinterpret_cast<uword>(ptr),
1128 static_cast<uword>(obj));
1129 failed_ = true;
1130 }
1131 }
1132 }
1133
1134#if defined(DART_COMPRESSED_POINTERS)
1135 void VisitCompressedPointers(uword heap_base,
1136 CompressedObjectPtr* from,
1137 CompressedObjectPtr* to) override {
1138 for (CompressedObjectPtr* ptr = from; ptr <= to; ptr++) {
1139 ObjectPtr obj = ptr->Decompress(heap_base);
1140 if (obj->IsHeapObject() && !obj->untag()->IsMarked()) {
1141 OS::PrintErr("object=0x%" Px ", slot=0x%" Px ", value=0x%" Px "\n",
1142 static_cast<uword>(current_), reinterpret_cast<uword>(ptr),
1143 static_cast<uword>(obj));
1144 failed_ = true;
1145 }
1146 }
1147 }
1148#endif
1149
1150 bool failed() const { return failed_; }
1151
1152 private:
1153 ObjectPtr current_;
1154 bool failed_ = false;
1155};
1156
1158 if (isolate_group_->marking_stack() != nullptr) {
1159 isolate_group_->DisableIncrementalBarrier();
1160 }
1161
1162 Prologue();
1163 {
1164 Thread* thread = Thread::Current();
1165 const int num_tasks = FLAG_marker_tasks;
1166 if (num_tasks == 0) {
1167 TIMELINE_FUNCTION_GC_DURATION(thread, "Mark");
1169 // Mark everything on main thread.
1170 UnsyncMarkingVisitor visitor(isolate_group_, page_space, &marking_stack_,
1171 &new_marking_stack_,
1172 &deferred_marking_stack_);
1173 visitor.set_concurrent(false);
1174 ResetSlices();
1175 IterateRoots(&visitor);
1176 visitor.ProcessDeferredMarking();
1177 visitor.DrainMarkingStack();
1178 visitor.ProcessDeferredMarking();
1179 visitor.FinalizeMarking();
1180 visitor.MournWeakProperties();
1181 visitor.MournWeakReferences();
1182 visitor.MournWeakArrays();
1183 visitor.MournFinalizerEntries();
1184 IterateWeakRoots(thread);
1185 // All marking done; detach code, etc.
1186 int64_t stop = OS::GetCurrentMonotonicMicros();
1187 visitor.AddMicros(stop - start);
1188 marked_bytes_ += visitor.marked_bytes();
1189 marked_micros_ += visitor.marked_micros();
1190 } else {
1191 ThreadBarrier* barrier = new ThreadBarrier(num_tasks, 1);
1192
1193 ResetSlices();
1194 // Used to coordinate draining among tasks; all start out as 'busy'.
1195 RelaxedAtomic<uintptr_t> num_busy = 0;
1196 // Phase 1: Iterate over roots and drain marking stack in tasks.
1197
1198 for (intptr_t i = 0; i < num_tasks; ++i) {
1199 SyncMarkingVisitor* visitor = visitors_[i];
1200 // Visitors may or may not have already been created depending on
1201 // whether we did some concurrent marking.
1202 if (visitor == nullptr) {
1203 visitor = new SyncMarkingVisitor(isolate_group_, page_space,
1204 &marking_stack_, &new_marking_stack_,
1205 &deferred_marking_stack_);
1206 visitors_[i] = visitor;
1207 }
1208
1209 // Move all work from local blocks to the global list. Any given
1210 // visitor might not get to run if it fails to reach TryEnter soon
1211 // enough, and we must fail to visit objects but they're sitting in
1212 // such a visitor's local blocks.
1213 visitor->Flush(&global_list_);
1214 // Need to move weak property list too.
1215
1216 if (i < (num_tasks - 1)) {
1217 // Begin marking on a helper thread.
1219 this, isolate_group_, &marking_stack_, barrier, visitor,
1220 &num_busy);
1221 ASSERT(result);
1222 } else {
1223 // Last worker is the main thread.
1224 visitor->Adopt(&global_list_);
1225 ParallelMarkTask task(this, isolate_group_, &marking_stack_, barrier,
1226 visitor, &num_busy);
1228 barrier->Sync();
1229 barrier->Release();
1230 }
1231 }
1232
1233 for (intptr_t i = 0; i < num_tasks; i++) {
1234 SyncMarkingVisitor* visitor = visitors_[i];
1235 visitor->FinalizeMarking();
1236 marked_bytes_ += visitor->marked_bytes();
1237 marked_micros_ += visitor->marked_micros();
1238 delete visitor;
1239 visitors_[i] = nullptr;
1240 }
1241
1242 ASSERT(global_list_.IsEmpty());
1243 }
1244 }
1245
1246 // Separate from verify_after_gc because that verification interferes with
1247 // concurrent marking.
1248 if (FLAG_verify_after_marking) {
1250 heap_->VisitObjects(&visitor);
1251 if (visitor.failed()) {
1252 FATAL("verify after marking");
1253 }
1254 }
1255
1256 Epilogue();
1257}
1258
1260 scavenger->PruneWeak(&global_list_);
1261 for (intptr_t i = 0, n = FLAG_marker_tasks; i < n; i++) {
1262 scavenger->PruneWeak(visitors_[i]->delayed());
1263 }
1264}
1265
1266} // namespace dart
static float next(float f)
static const char marker[]
SI F table(const skcms_Curve *curve, F v)
static const char kName[]
Definition Viewer.cpp:481
#define UNREACHABLE()
Definition assert.h:248
void PushAll(Block *blocks)
void Push(ObjectPtr raw_obj)
bool Pop(ObjectPtr *object)
bool WaitForWork(RelaxedAtomic< uintptr_t > *num_busy, bool abort=false)
virtual void Run()
Definition marker.cc:895
ConcurrentMarkTask(GCMarker *marker, IsolateGroup *isolate_group, PageSpace *page_space, SyncMarkingVisitor *visitor)
Definition marker.cc:881
static ThreadPool * thread_pool()
Definition dart.h:73
void UpdateUnreachable(IsolateGroup *isolate_group)
void IncrementalMarkWithSizeBudget(PageSpace *page_space, intptr_t size)
Definition marker.cc:1066
GCMarker(IsolateGroup *isolate_group, Heap *heap)
Definition marker.cc:959
intptr_t MarkedWordsPerMicro() const
Definition marker.cc:942
void IncrementalMarkWithTimeBudget(PageSpace *page_space, int64_t deadline)
Definition marker.cc:1090
void PruneWeak(Scavenger *scavenger)
Definition marker.cc:1259
void StartConcurrentMark(PageSpace *page_space)
Definition marker.cc:988
intptr_t marked_words() const
Definition marker.h:51
void MarkObjects(PageSpace *page_space)
Definition marker.cc:1157
void IncrementalMarkWithUnlimitedBudget(PageSpace *page_space)
Definition marker.cc:1048
Thread * thread() const
static Dart_HeapSamplingDeleteCallback delete_callback()
Definition sampler.h:54
WeakSelector
Definition heap.h:43
@ kHeapSamplingData
Definition heap.h:52
@ kNumWeakSelectors
Definition heap.h:54
@ kNew
Definition heap.h:38
@ kOld
Definition heap.h:39
WeakTable * GetWeakTable(Space space, WeakSelector selector) const
Definition heap.h:225
StoreBuffer * store_buffer() const
Definition isolate.h:504
void ScheduleInterrupts(uword interrupt_bits)
Definition isolate.cc:1924
void DisableIncrementalBarrier()
Definition isolate.cc:2791
void VisitObjectIdRingPointers(ObjectPointerVisitor *visitor)
Definition isolate.cc:2941
ApiState * api_state() const
Definition isolate.h:693
void VisitObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
Definition isolate.cc:2868
void EnableIncrementalBarrier(MarkingStack *marking_stack, MarkingStack *deferred_marking_stack)
Definition isolate.cc:2781
void ReleaseStoreBuffers()
Definition isolate.cc:2740
void VisitWeakPersistentHandles(HandleVisitor *visitor)
Definition isolate.cc:2952
void DeferredMarkLiveTemporaries()
Definition isolate.cc:2956
MarkingStack * marking_stack() const
Definition isolate.h:570
intptr_t ProcessWeakProperty(WeakPropertyPtr raw_weak)
Definition marker.cc:273
void AddMicros(int64_t micros)
Definition marker.cc:47
uintptr_t marked_bytes() const
Definition marker.cc:45
GCLinkedLists * delayed()
Definition marker.cc:467
static bool ForwardOrSetNullIfCollected(ObjectPtr parent, CompressedObjectPtr *slot)
Definition marker.cc:418
static bool IsMarked(ObjectPtr raw)
Definition marker.cc:54
NO_SANITIZE_THREAD ObjectPtr LoadPointerIgnoreRace(ObjectPtr *ptr)
Definition marker.cc:249
void Flush(GCLinkedLists *global_list)
Definition marker.cc:438
void VisitPointers(ObjectPtr *first, ObjectPtr *last) override
Definition marker.cc:256
intptr_t ProcessWeakArray(WeakArrayPtr raw_weak)
Definition marker.cc:308
intptr_t ProcessWeakReference(WeakReferencePtr raw_weak)
Definition marker.cc:288
MarkingVisitorBase(IsolateGroup *isolate_group, PageSpace *page_space, MarkingStack *marking_stack, MarkingStack *new_marking_stack, MarkingStack *deferred_marking_stack)
Definition marker.cc:30
void DrainMarkingStackWithPauseChecks()
Definition marker.cc:88
bool ProcessMarkingStack(intptr_t remaining_budget)
Definition marker.cc:172
void FinalizeIncremental(GCLinkedLists *global_list)
Definition marker.cc:457
int64_t marked_micros() const
Definition marker.cc:46
void set_concurrent(bool value)
Definition marker.cc:48
NO_SANITIZE_THREAD CompressedObjectPtr LoadCompressedPointerIgnoreRace(CompressedObjectPtr *ptr)
Definition marker.cc:251
void Adopt(GCLinkedLists *other)
Definition marker.cc:445
void ProcessMarkingStackUntil(int64_t deadline)
Definition marker.cc:154
intptr_t ProcessFinalizerEntry(FinalizerEntryPtr raw_entry)
Definition marker.cc:313
bool ProcessPendingWeakProperties()
Definition marker.cc:59
bool WaitForWork(RelaxedAtomic< uintptr_t > *num_busy)
Definition marker.cc:434
void VisitHandle(uword addr) override
Definition marker.cc:558
MarkingWeakVisitor(Thread *thread)
Definition marker.cc:556
Monitor::WaitResult Wait(int64_t millis=Monitor::kNoTimeout)
Definition lockers.h:172
static int64_t GetCurrentMonotonicMicros()
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
ObjectIdRingClearPointerVisitor(IsolateGroup *isolate_group)
Definition marker.cc:734
void VisitPointers(ObjectPtr *first, ObjectPtr *last) override
Definition marker.cc:737
IsolateGroup * isolate_group() const
Definition visitor.h:25
void VisitCompressedPointers(uword heap_base, CompressedObjectPtr *first, CompressedObjectPtr *last)
Definition visitor.h:43
ObjectPtr Decompress(uword heap_base) const
UntaggedObject * untag() const
uword heap_base() const
intptr_t GetClassId() const
Definition raw_object.h:864
static ObjectPtr null()
Definition object.h:433
void set_concurrent_marker_tasks_active(intptr_t val)
Definition pages.h:328
intptr_t concurrent_marker_tasks_active() const
Definition pages.h:324
intptr_t tasks() const
Definition pages.h:310
bool pause_concurrent_marking() const
Definition pages.h:333
void set_tasks(intptr_t val)
Definition pages.h:311
void set_concurrent_marker_tasks(intptr_t val)
Definition pages.h:319
@ kAwaitingFinalization
Definition pages.h:133
intptr_t concurrent_marker_tasks() const
Definition pages.h:315
void YieldConcurrentMarking()
Definition pages.cc:453
void set_phase(Phase val)
Definition pages.h:337
Monitor * tasks_lock() const
Definition pages.h:309
Phase phase() const
Definition pages.h:336
static Page * Of(ObjectPtr obj)
Definition page.h:141
ParallelMarkTask(GCMarker *marker, IsolateGroup *isolate_group, MarkingStack *marking_stack, ThreadBarrier *barrier, SyncMarkingVisitor *visitor, RelaxedAtomic< uintptr_t > *num_busy)
Definition marker.cc:767
void RunEnteredIsolateGroup()
Definition marker.cc:798
virtual void Run()
Definition marker.cc:780
PointerBlock< Size > * next() const
T load(std::memory_order order=std::memory_order_relaxed) const
Definition atomic.h:21
T fetch_add(T arg, std::memory_order order=std::memory_order_relaxed)
Definition atomic.h:35
void PruneWeak(GCLinkedLists *delayed)
intptr_t Value() const
Definition object.h:9969
bool Run(Args &&... args)
Definition thread_pool.h:45
static Thread * Current()
Definition thread.h:361
void ReleaseStoreBuffer()
Definition thread.cc:672
static void ExitIsolateGroupAsHelper(bool bypass_safepoint)
Definition thread.cc:494
IsolateGroup * isolate_group() const
Definition thread.h:540
static bool EnterIsolateGroupAsHelper(IsolateGroup *isolate_group, TaskKind kind, bool bypass_safepoint)
Definition thread.cc:476
DART_FORCE_INLINE intptr_t VisitPointersNonvirtual(V *visitor)
Definition raw_object.h:459
static bool IsMarked(uword tags)
Definition raw_object.h:298
intptr_t HeapSize() const
Definition raw_object.h:380
intptr_t VisitPointers(ObjectPointerVisitor *visitor)
Definition raw_object.h:426
void VisitPointers(ObjectPtr *from, ObjectPtr *to) override
Definition marker.cc:1122
void VisitObject(ObjectPtr obj) override
Definition marker.cc:1115
#define THR_Print(format,...)
Definition log.h:20
void(* Dart_HeapSamplingDeleteCallback)(void *data)
Definition dart_api.h:1287
#define ASSERT(E)
#define FATAL(error)
AtkStateType state
glong glong end
uint8_t value
GAsyncResult * result
uint32_t * target
size_t length
#define MSAN_UNPOISON(ptr, len)
StoreBuffer::Block StoreBufferBlock
WeakSlices
Definition marker.cc:618
@ kRememberedSet
Definition marker.cc:622
@ kWeakTables
Definition marker.cc:620
@ kNumWeakSlices
Definition marker.cc:623
@ kWeakHandles
Definition marker.cc:619
static bool IsUnreachable(const ObjectPtr obj)
Definition marker.cc:547
MarkingVisitorBase< true > SyncMarkingVisitor
Definition marker.cc:545
@ kForwardingCorpse
Definition class_id.h:225
@ kIllegalCid
Definition class_id.h:214
@ kFreeListElement
Definition class_id.h:224
constexpr intptr_t KB
Definition globals.h:528
uintptr_t uword
Definition globals.h:501
void MournFinalizerEntry(GCVisitorType *visitor, FinalizerEntryPtr current_entry)
Definition gc_shared.h:162
MarkingVisitorBase< false > UnsyncMarkingVisitor
Definition marker.cc:544
static constexpr intptr_t kObjectAlignment
RootSlices
Definition marker.cc:578
@ kNumFixedRootSlices
Definition marker.cc:580
@ kObjectIdRing
Definition marker.cc:621
@ kIsolate
Definition marker.cc:579
BlockWorkList< MarkingStack > MarkerWorkList
constexpr intptr_t kIntptrMax
Definition globals.h:557
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
#define Px
Definition globals.h:410
#define UNLIKELY(cond)
Definition globals.h:261
#define Pd64
Definition globals.h:416
#define Pd
Definition globals.h:408
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName)
Definition globals.h:593
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition globals.h:581
void FlushInto(GCLinkedLists *to)
Definition gc_shared.cc:34
#define NO_SANITIZE_THREAD
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)
Definition timeline.h:41