Flutter Engine
The Flutter Engine
marker.cc
Go to the documentation of this file.
1// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/heap/marker.h"
6
7#include "platform/assert.h"
8#include "platform/atomic.h"
9#include "vm/allocation.h"
10#include "vm/dart_api_state.h"
11#include "vm/heap/gc_shared.h"
12#include "vm/heap/pages.h"
14#include "vm/isolate.h"
15#include "vm/log.h"
16#include "vm/object_id_ring.h"
17#include "vm/raw_object.h"
18#include "vm/stack_frame.h"
19#include "vm/tagged_pointer.h"
20#include "vm/thread_barrier.h"
21#include "vm/thread_pool.h"
22#include "vm/timeline.h"
23#include "vm/visitor.h"
24
25namespace dart {
26
27template <bool sync>
29 public:
31 PageSpace* page_space,
32 MarkingStack* old_marking_stack,
33 MarkingStack* new_marking_stack,
34 MarkingStack* tlab_deferred_marking_stack,
35 MarkingStack* deferred_marking_stack)
37 page_space_(page_space),
38 old_work_list_(old_marking_stack),
39 new_work_list_(new_marking_stack),
40 tlab_deferred_work_list_(tlab_deferred_marking_stack),
41 deferred_work_list_(deferred_marking_stack),
42 marked_bytes_(0),
43 marked_micros_(0),
44 concurrent_(true),
45 has_evacuation_candidate_(false) {}
47
48 uintptr_t marked_bytes() const { return marked_bytes_; }
49 int64_t marked_micros() const { return marked_micros_; }
50 void AddMicros(int64_t micros) { marked_micros_ += micros; }
51 void set_concurrent(bool value) { concurrent_ = value; }
52
53#ifdef DEBUG
54 constexpr static const char* const kName = "Marker";
55#endif
56
57 static bool IsMarked(ObjectPtr raw) {
58 ASSERT(raw->IsHeapObject());
59 return raw->untag()->IsMarked();
60 }
61
63 // Nothing to remember for roots. Don't carry over to objects.
64 has_evacuation_candidate_ = false;
65 }
66
68 bool more_to_mark = false;
69 WeakPropertyPtr cur_weak = delayed_.weak_properties.Release();
70 while (cur_weak != WeakProperty::null()) {
71 WeakPropertyPtr next_weak =
72 cur_weak->untag()->next_seen_by_gc_.Decompress(cur_weak->heap_base());
73 ObjectPtr raw_key = cur_weak->untag()->key();
74 // Reset the next pointer in the weak property.
75 cur_weak->untag()->next_seen_by_gc_ = WeakProperty::null();
76 if (raw_key->IsImmediateObject() || raw_key->untag()->IsMarked()) {
77 ObjectPtr raw_val = cur_weak->untag()->value();
78 if (!raw_val->IsImmediateObject() && !raw_val->untag()->IsMarked()) {
79 more_to_mark = true;
80 }
81
82 // The key is marked so we make sure to properly visit all pointers
83 // originating from this weak property.
84 cur_weak->untag()->VisitPointersNonvirtual(this);
85 if (has_evacuation_candidate_) {
86 has_evacuation_candidate_ = false;
87 if (!cur_weak->untag()->IsCardRemembered()) {
88 if (cur_weak->untag()->TryAcquireRememberedBit()) {
90 }
91 }
92 }
93
94 } else {
95 // Requeue this weak property to be handled later.
96 ASSERT(IsMarked(cur_weak));
97 delayed_.weak_properties.Enqueue(cur_weak);
98 }
99 // Advance to next weak property in the queue.
100 cur_weak = next_weak;
101 }
102 return more_to_mark;
103 }
104
105 DART_NOINLINE
107 old_work_list_.Flush();
108 new_work_list_.Flush();
109 tlab_deferred_work_list_.Flush();
110 deferred_work_list_.Flush();
111 Thread* thread = Thread::Current();
112 thread->StoreBufferReleaseGC();
113 page_space_->YieldConcurrentMarking();
114 thread->StoreBufferAcquireGC();
115 }
116
118 ASSERT(concurrent_);
119 Thread* thread = Thread::Current();
120 do {
121 ObjectPtr obj;
122 while (MarkerWorkList::Pop(&old_work_list_, &new_work_list_, &obj)) {
123 ASSERT(!has_evacuation_candidate_);
124
125 if (obj->IsNewObject()) {
126 Page* page = Page::Of(obj);
127 uword top = page->original_top();
128 uword end = page->original_end();
129 uword addr = static_cast<uword>(obj);
130 if (top <= addr && addr < end) {
131 // New-space objects still in a TLAB are deferred. This allows the
132 // compiler to remove write barriers for freshly allocated objects.
133 tlab_deferred_work_list_.Push(obj);
134 if (UNLIKELY(page_space_->pause_concurrent_marking())) {
136 }
137 continue;
138 }
139 }
140
141 const intptr_t class_id = obj->GetClassId();
142 ASSERT(class_id != kIllegalCid);
143 ASSERT(class_id != kFreeListElement);
144 ASSERT(class_id != kForwardingCorpse);
145
146 intptr_t size;
147 if (class_id == kWeakPropertyCid) {
148 size = ProcessWeakProperty(static_cast<WeakPropertyPtr>(obj));
149 } else if (class_id == kWeakReferenceCid) {
150 size = ProcessWeakReference(static_cast<WeakReferencePtr>(obj));
151 } else if (class_id == kWeakArrayCid) {
152 size = ProcessWeakArray(static_cast<WeakArrayPtr>(obj));
153 } else if (class_id == kFinalizerEntryCid) {
154 size = ProcessFinalizerEntry(static_cast<FinalizerEntryPtr>(obj));
155 } else if (class_id == kSuspendStateCid) {
156 // Shape changing is not compatible with concurrent marking.
157 deferred_work_list_.Push(obj);
158 size = obj->untag()->HeapSize();
159 } else if (obj->untag()->IsCardRemembered()) {
160 ASSERT((class_id == kArrayCid) || (class_id == kImmutableArrayCid));
161 size = VisitCards(static_cast<ArrayPtr>(obj));
162 } else {
163 size = obj->untag()->VisitPointersNonvirtual(this);
164 }
165 if (has_evacuation_candidate_) {
166 has_evacuation_candidate_ = false;
167 if (!obj->untag()->IsCardRemembered()) {
168 if (obj->untag()->TryAcquireRememberedBit()) {
169 thread->StoreBufferAddObjectGC(obj);
170 }
171 }
172 }
173 if (!obj->IsNewObject()) {
174 marked_bytes_ += size;
175 }
176
177 if (UNLIKELY(page_space_->pause_concurrent_marking())) {
179 }
180 }
182
183 ASSERT(old_work_list_.IsLocalEmpty());
184 // In case of scavenge before final marking.
185 new_work_list_.Flush();
186 tlab_deferred_work_list_.Flush();
187 deferred_work_list_.Flush();
188 }
189
190 intptr_t VisitCards(ArrayPtr obj) {
191 ASSERT(obj->IsArray() || obj->IsImmutableArray());
192 ASSERT(obj->untag()->IsCardRemembered());
193 CompressedObjectPtr* obj_from = obj->untag()->from();
194 CompressedObjectPtr* obj_to =
195 obj->untag()->to(Smi::Value(obj->untag()->length()));
196 uword heap_base = obj.heap_base();
197
198 Page* page = Page::Of(obj);
199 for (intptr_t i = 0, n = page->card_table_size(); i < n; i++) {
200 CompressedObjectPtr* card_from =
201 reinterpret_cast<CompressedObjectPtr*>(page) +
203 CompressedObjectPtr* card_to =
204 reinterpret_cast<CompressedObjectPtr*>(card_from) +
205 (1 << Page::kSlotsPerCardLog2) - 1;
206 // Minus 1 because to is inclusive.
207
208 if (card_from < obj_from) {
209 // First card overlaps with header.
210 card_from = obj_from;
211 }
212 if (card_to > obj_to) {
213 // Last card(s) may extend past the object. Array truncation can make
214 // this happen for more than one card.
215 card_to = obj_to;
216 }
217
218 VisitCompressedPointers(heap_base, card_from, card_to);
219 if (has_evacuation_candidate_) {
220 has_evacuation_candidate_ = false;
221 page->RememberCard(card_from);
222 }
223
224 if (((i + 1) % kCardsPerInterruptCheck) == 0) {
225 if (UNLIKELY(page_space_->pause_concurrent_marking())) {
227 }
228 }
229 }
230
231 return obj->untag()->HeapSize();
232 }
233
235 ASSERT(!concurrent_);
236 Thread* thread = Thread::Current();
237 do {
238 ObjectPtr obj;
239 while (MarkerWorkList::Pop(&old_work_list_, &new_work_list_, &obj)) {
240 ASSERT(!has_evacuation_candidate_);
241
242 const intptr_t class_id = obj->GetClassId();
243 ASSERT(class_id != kIllegalCid);
244 ASSERT(class_id != kFreeListElement);
245 ASSERT(class_id != kForwardingCorpse);
246
247 intptr_t size;
248 if (class_id == kWeakPropertyCid) {
249 size = ProcessWeakProperty(static_cast<WeakPropertyPtr>(obj));
250 } else if (class_id == kWeakReferenceCid) {
251 size = ProcessWeakReference(static_cast<WeakReferencePtr>(obj));
252 } else if (class_id == kWeakArrayCid) {
253 size = ProcessWeakArray(static_cast<WeakArrayPtr>(obj));
254 } else if (class_id == kFinalizerEntryCid) {
255 size = ProcessFinalizerEntry(static_cast<FinalizerEntryPtr>(obj));
256 } else {
257 if (obj->untag()->IsCardRemembered()) {
258 ASSERT((class_id == kArrayCid) || (class_id == kImmutableArrayCid));
259 size = VisitCards(static_cast<ArrayPtr>(obj));
260 } else {
261 size = obj->untag()->VisitPointersNonvirtual(this);
262 }
263 }
264 if (has_evacuation_candidate_) {
265 has_evacuation_candidate_ = false;
266 if (!obj->untag()->IsCardRemembered() &&
267 obj->untag()->TryAcquireRememberedBit()) {
268 thread->StoreBufferAddObjectGC(obj);
269 }
270 }
271 if (!obj->IsNewObject()) {
272 marked_bytes_ += size;
273 }
274 }
276 }
277
278 void ProcessOldMarkingStackUntil(int64_t deadline) {
279 // We check the clock *before* starting a batch of work, but we want to
280 // *end* work before the deadline. So we compare to the deadline adjusted
281 // by a conservative estimate of the duration of one batch of work.
282 deadline -= 1500;
283
284 // A 512kB budget is chosen to be large enough that we don't waste too much
285 // time on the overhead of exiting ProcessMarkingStack, querying the clock,
286 // and re-entering, and small enough that a few batches can fit in the idle
287 // time between animation frames. This amount of marking takes ~1ms on a
288 // Pixel phone.
289 constexpr intptr_t kBudget = 512 * KB;
290
291 while ((OS::GetCurrentMonotonicMicros() < deadline) &&
292 ProcessOldMarkingStack(kBudget)) {
293 }
294 }
295
296 bool ProcessOldMarkingStack(intptr_t remaining_budget) {
297 Thread* thread = Thread::Current();
298 do {
299 // First drain the marking stacks.
300 ObjectPtr obj;
301 while (old_work_list_.Pop(&obj)) {
302 ASSERT(!has_evacuation_candidate_);
303
304 const intptr_t class_id = obj->GetClassId();
305 ASSERT(class_id != kIllegalCid);
306 ASSERT(class_id != kFreeListElement);
307 ASSERT(class_id != kForwardingCorpse);
308
309 intptr_t size;
310 if (class_id == kWeakPropertyCid) {
311 size = ProcessWeakProperty(static_cast<WeakPropertyPtr>(obj));
312 } else if (class_id == kWeakReferenceCid) {
313 size = ProcessWeakReference(static_cast<WeakReferencePtr>(obj));
314 } else if (class_id == kWeakArrayCid) {
315 size = ProcessWeakArray(static_cast<WeakArrayPtr>(obj));
316 } else if (class_id == kFinalizerEntryCid) {
317 size = ProcessFinalizerEntry(static_cast<FinalizerEntryPtr>(obj));
318 } else if (sync && concurrent_ && class_id == kSuspendStateCid) {
319 // Shape changing is not compatible with concurrent marking.
320 deferred_work_list_.Push(obj);
321 size = obj->untag()->HeapSize();
322 } else {
323 if ((class_id == kArrayCid) || (class_id == kImmutableArrayCid)) {
324 size = obj->untag()->HeapSize();
325 if (size > remaining_budget) {
326 old_work_list_.Push(obj);
327 return true; // More to mark.
328 }
329 }
330 if (obj->untag()->IsCardRemembered()) {
331 ASSERT((class_id == kArrayCid) || (class_id == kImmutableArrayCid));
332 size = VisitCards(static_cast<ArrayPtr>(obj));
333 } else {
334 size = obj->untag()->VisitPointersNonvirtual(this);
335 }
336 }
337 if (has_evacuation_candidate_) {
338 has_evacuation_candidate_ = false;
339 if (!obj->untag()->IsCardRemembered() &&
340 obj->untag()->TryAcquireRememberedBit()) {
341 thread->StoreBufferAddObjectGC(obj);
342 }
343 }
344 marked_bytes_ += size;
345 remaining_budget -= size;
346 if (remaining_budget < 0) {
347 return true; // More to mark.
348 }
349 }
350 // Marking stack is empty.
352
353 return false; // No more work.
354 }
355
356 // Races: The concurrent marker is racing with the mutator, but this race is
357 // harmless. The concurrent marker will only visit objects that were created
358 // before the marker started. It will ignore all new-space objects based on
359 // pointer alignment, and it will ignore old-space objects created after the
360 // marker started because old-space objects allocated while marking is in
361 // progress are allocated black (mark bit set). When visiting object slots,
362 // the marker can see either the value it had when marking started (because
363 // spawning the marker task creates acq-rel ordering) or any value later
364 // stored into that slot. Because pointer slots always contain pointers (i.e.,
365 // we don't do any in-place unboxing like V8), any value we read from the slot
366 // is safe.
371 CompressedObjectPtr* ptr) {
372 return *ptr;
373 }
374
375 void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
376 bool has_evacuation_candidate = false;
377 for (ObjectPtr* current = first; current <= last; current++) {
378 has_evacuation_candidate |= MarkObject(LoadPointerIgnoreRace(current));
379 }
380 has_evacuation_candidate_ |= has_evacuation_candidate;
381 }
382
383#if defined(DART_COMPRESSED_POINTERS)
384 void VisitCompressedPointers(uword heap_base,
385 CompressedObjectPtr* first,
386 CompressedObjectPtr* last) override {
387 bool has_evacuation_candidate = false;
388 for (CompressedObjectPtr* current = first; current <= last; current++) {
389 has_evacuation_candidate |= MarkObject(
390 LoadCompressedPointerIgnoreRace(current).Decompress(heap_base));
391 }
392 has_evacuation_candidate_ |= has_evacuation_candidate;
393 }
394#endif
395
396 intptr_t ProcessWeakProperty(WeakPropertyPtr raw_weak) {
397 // The fate of the weak property is determined by its key.
398 ObjectPtr raw_key =
399 LoadCompressedPointerIgnoreRace(&raw_weak->untag()->key_)
400 .Decompress(raw_weak->heap_base());
401 if (raw_key->IsHeapObject() && !raw_key->untag()->IsMarked()) {
402 // Key was white. Enqueue the weak property.
403 ASSERT(IsMarked(raw_weak));
404 delayed_.weak_properties.Enqueue(raw_weak);
405 return raw_weak->untag()->HeapSize();
406 }
407 // Key is gray or black. Make the weak property black.
408 return raw_weak->untag()->VisitPointersNonvirtual(this);
409 }
410
411 intptr_t ProcessWeakReference(WeakReferencePtr raw_weak) {
412 // The fate of the target field is determined by the target.
413 // The type arguments always stay alive.
414 ObjectPtr raw_target =
415 LoadCompressedPointerIgnoreRace(&raw_weak->untag()->target_)
416 .Decompress(raw_weak->heap_base());
417 if (raw_target->IsHeapObject()) {
418 if (!raw_target->untag()->IsMarked()) {
419 // Target was white. Enqueue the weak reference. It is potentially dead.
420 // It might still be made alive by weak properties in next rounds.
421 ASSERT(IsMarked(raw_weak));
422 delayed_.weak_references.Enqueue(raw_weak);
423 } else {
424 if (raw_target->untag()->IsEvacuationCandidate()) {
425 has_evacuation_candidate_ = true;
426 }
427 }
428 }
429 // Always visit the type argument.
430 ObjectPtr raw_type_arguments =
431 LoadCompressedPointerIgnoreRace(&raw_weak->untag()->type_arguments_)
432 .Decompress(raw_weak->heap_base());
433 if (MarkObject(raw_type_arguments)) {
434 has_evacuation_candidate_ = true;
435 }
436 return raw_weak->untag()->HeapSize();
437 }
438
439 intptr_t ProcessWeakArray(WeakArrayPtr raw_weak) {
440 delayed_.weak_arrays.Enqueue(raw_weak);
441 return raw_weak->untag()->HeapSize();
442 }
443
444 intptr_t ProcessFinalizerEntry(FinalizerEntryPtr raw_entry) {
445 ASSERT(IsMarked(raw_entry));
446 delayed_.finalizer_entries.Enqueue(raw_entry);
447 // Only visit token and next.
448 if (MarkObject(LoadCompressedPointerIgnoreRace(&raw_entry->untag()->token_)
449 .Decompress(raw_entry->heap_base()))) {
450 has_evacuation_candidate_ = true;
451 }
452 if (MarkObject(LoadCompressedPointerIgnoreRace(&raw_entry->untag()->next_)
453 .Decompress(raw_entry->heap_base()))) {
454 has_evacuation_candidate_ = true;
455 }
456 return raw_entry->untag()->HeapSize();
457 }
458
460 Thread* thread = Thread::Current();
461 TIMELINE_FUNCTION_GC_DURATION(thread, "ProcessDeferredMarking");
462
463 ObjectPtr obj;
464 while (deferred_work_list_.Pop(&obj)) {
465 ASSERT(!has_evacuation_candidate_);
466 ASSERT(obj->IsHeapObject());
467 // We need to scan objects even if they were already scanned via ordinary
468 // marking. An object may have changed since its ordinary scan and been
469 // added to deferred marking stack to compensate for write-barrier
470 // elimination.
471 // A given object may be included in the deferred marking stack multiple
472 // times. It may or may not also be in the ordinary marking stack, so
473 // failing to acquire the mark bit here doesn't reliably indicate the
474 // object was already encountered through the deferred marking stack. Our
475 // processing here is idempotent, so repeated visits only hurt performance
476 // but not correctness. Duplication is expected to be low.
477 // By the absence of a special case, we are treating WeakProperties as
478 // strong references here. This guarantees a WeakProperty will only be
479 // added to the delayed_weak_properties_ list of the worker that
480 // encounters it during ordinary marking. This is in the same spirit as
481 // the eliminated write barrier, which would have added the newly written
482 // key and value to the ordinary marking stack.
483 intptr_t size = obj->untag()->VisitPointersNonvirtual(this);
484 // Add the size only if we win the marking race to prevent
485 // double-counting.
486 if (TryAcquireMarkBit(obj)) {
487 if (!obj->IsNewObject()) {
488 marked_bytes_ += size;
489 }
490 }
491 if (has_evacuation_candidate_) {
492 has_evacuation_candidate_ = false;
493 if (!obj->untag()->IsCardRemembered() &&
494 obj->untag()->TryAcquireRememberedBit()) {
495 thread->StoreBufferAddObjectGC(obj);
496 }
497 }
498 }
499 }
500
501 // Called when all marking is complete. Any attempt to push to the mark stack
502 // after this will trigger an error.
504 old_work_list_.Finalize();
505 new_work_list_.Finalize();
506 tlab_deferred_work_list_.Finalize();
507 deferred_work_list_.Finalize();
509 // MournFinalizerEntries inserts newly discovered dead entries into the
510 // linked list attached to the Finalizer. This might create
511 // cross-generational references which might be added to the store
512 // buffer. Release the store buffer to satisfy the invariant that
513 // thread local store buffer is empty after marking and all references
514 // are processed.
516 }
517
519 WeakPropertyPtr current = delayed_.weak_properties.Release();
520 while (current != WeakProperty::null()) {
521 WeakPropertyPtr next = current->untag()->next_seen_by_gc();
522 current->untag()->next_seen_by_gc_ = WeakProperty::null();
523 current->untag()->key_ = Object::null();
524 current->untag()->value_ = Object::null();
525 current = next;
526 }
527 }
528
530 WeakReferencePtr current = delayed_.weak_references.Release();
531 while (current != WeakReference::null()) {
532 WeakReferencePtr next = current->untag()->next_seen_by_gc();
533 current->untag()->next_seen_by_gc_ = WeakReference::null();
534 ForwardOrSetNullIfCollected(current, &current->untag()->target_);
535 current = next;
536 }
537 }
538
540 WeakArrayPtr current = delayed_.weak_arrays.Release();
541 while (current != WeakArray::null()) {
542 WeakArrayPtr next = current->untag()->next_seen_by_gc();
543 current->untag()->next_seen_by_gc_ = WeakArray::null();
544 intptr_t length = Smi::Value(current->untag()->length());
545 for (intptr_t i = 0; i < length; i++) {
546 ForwardOrSetNullIfCollected(current, &current->untag()->data()[i]);
547 }
548 current = next;
549 }
550 }
551
553 FinalizerEntryPtr current = delayed_.finalizer_entries.Release();
554 while (current != FinalizerEntry::null()) {
555 FinalizerEntryPtr next = current->untag()->next_seen_by_gc();
556 current->untag()->next_seen_by_gc_ = FinalizerEntry::null();
557 MournFinalizerEntry(this, current);
558 current = next;
559 }
560 }
561
562 // Returns whether the object referred to in `slot` was GCed this GC.
564 CompressedObjectPtr* slot) {
565 ObjectPtr target = slot->Decompress(parent->heap_base());
566 if (target->IsImmediateObject()) {
567 // Object not touched during this GC.
568 return false;
569 }
570 if (target->untag()->IsMarked()) {
571 // Object already null (which is permanently marked) or has survived this
572 // GC.
573 if (target->untag()->IsEvacuationCandidate()) {
574 if (parent->untag()->IsCardRemembered()) {
575 Page::Of(parent)->RememberCard(slot);
576 } else {
577 if (parent->untag()->TryAcquireRememberedBit()) {
579 }
580 }
581 }
582 return false;
583 }
584 *slot = Object::null();
585 return true;
586 }
587
589 return old_work_list_.WaitForWork(num_busy);
590 }
591
592 void Flush(GCLinkedLists* global_list) {
593 old_work_list_.Flush();
594 new_work_list_.Flush();
595 tlab_deferred_work_list_.Flush();
596 deferred_work_list_.Flush();
597 delayed_.FlushInto(global_list);
598 }
599
600 void Adopt(GCLinkedLists* other) {
601 ASSERT(delayed_.IsEmpty());
602 other->FlushInto(&delayed_);
603 }
604
605 void AbandonWork() {
606 old_work_list_.AbandonWork();
607 new_work_list_.AbandonWork();
608 tlab_deferred_work_list_.AbandonWork();
609 deferred_work_list_.AbandonWork();
610 delayed_.Release();
611 }
612
614 old_work_list_.Flush();
615 old_work_list_.Finalize();
616 new_work_list_.Flush();
617 new_work_list_.Finalize();
618 tlab_deferred_work_list_.Flush();
619 tlab_deferred_work_list_.Finalize();
620 deferred_work_list_.Flush();
621 deferred_work_list_.Finalize();
622 delayed_.FlushInto(global_list);
623 }
624
625 GCLinkedLists* delayed() { return &delayed_; }
626
627 private:
628 static bool TryAcquireMarkBit(ObjectPtr obj) {
629 if constexpr (!sync) {
630 if (!obj->untag()->IsMarked()) {
632 return true;
633 }
634 return false;
635 } else {
636 return obj->untag()->TryAcquireMarkBit();
637 }
638 }
639
640 DART_FORCE_INLINE
641 bool MarkObject(ObjectPtr obj) {
642 if (obj->IsImmediateObject()) {
643 return false;
644 }
645
646 if (obj->IsNewObject()) {
647 if (TryAcquireMarkBit(obj)) {
648 new_work_list_.Push(obj);
649 }
650 return false;
651 }
652
653 // While it might seem this is redundant with TryAcquireMarkBit, we must
654 // do this check first to avoid attempting an atomic::fetch_and on the
655 // read-only vm-isolate or image pages, which can fault even if there is no
656 // change in the value.
657 // Doing this before checking for an Instructions object avoids
658 // unnecessary queueing of pre-marked objects.
659 // Race: The concurrent marker may observe a pointer into a heap page that
660 // was allocated after the concurrent marker started. It can read either a
661 // zero or the header of an object allocated black, both of which appear
662 // marked.
663 uword tags = obj->untag()->tags_ignore_race();
664 if (UntaggedObject::IsMarked(tags)) {
666 }
667
668 intptr_t class_id = UntaggedObject::ClassIdTag::decode(tags);
669 ASSERT(class_id != kFreeListElement);
670
671 if (sync && UNLIKELY(class_id == kInstructionsCid)) {
672 // If this is the concurrent marker, this object may be non-writable due
673 // to W^X (--write-protect-code).
674 deferred_work_list_.Push(obj);
675 return false;
676 }
677
678 if (TryAcquireMarkBit(obj)) {
679 old_work_list_.Push(obj);
680 }
681
683 }
684
685 PageSpace* page_space_;
686 MarkerWorkList old_work_list_;
687 MarkerWorkList new_work_list_;
688 MarkerWorkList tlab_deferred_work_list_;
689 MarkerWorkList deferred_work_list_;
690 GCLinkedLists delayed_;
691 uintptr_t marked_bytes_;
692 int64_t marked_micros_;
693 bool concurrent_;
694 bool has_evacuation_candidate_;
695
696 DISALLOW_IMPLICIT_CONSTRUCTORS(MarkingVisitorBase);
697};
698
701
702static bool IsUnreachable(const ObjectPtr obj) {
703 if (obj->IsImmediateObject()) {
704 return false;
705 }
706 return !obj->untag()->IsMarked();
707}
708
710 public:
712
713 void VisitHandle(uword addr) override {
715 reinterpret_cast<FinalizablePersistentHandle*>(addr);
716 ObjectPtr obj = handle->ptr();
717 if (IsUnreachable(obj)) {
719 }
720 }
721
722 private:
723 DISALLOW_COPY_AND_ASSIGN(MarkingWeakVisitor);
724};
725
726void GCMarker::Prologue() {
727 isolate_group_->ReleaseStoreBuffers();
728 new_marking_stack_.PushAll(tlab_deferred_marking_stack_.PopAll());
729}
730
731void GCMarker::Epilogue() {}
732
737};
738
739void GCMarker::ResetSlices() {
740 ASSERT(Thread::Current()->OwnsGCSafepoint());
741
742 root_slices_started_ = 0;
743 root_slices_finished_ = 0;
744 root_slices_count_ = kNumFixedRootSlices;
745
746 weak_slices_started_ = 0;
747}
748
749void GCMarker::IterateRoots(ObjectPointerVisitor* visitor) {
750 for (;;) {
751 intptr_t slice = root_slices_started_.fetch_add(1);
752 if (slice >= root_slices_count_) {
753 break; // No more slices.
754 }
755
756 switch (slice) {
757 case kIsolate: {
759 "ProcessIsolateGroupRoots");
760 isolate_group_->VisitObjectPointers(
762 break;
763 }
764 case kObjectIdRing: {
766 "ProcessObjectIdTable");
767 isolate_group_->VisitObjectIdRingPointers(visitor);
768 break;
769 }
770 }
771
772 MonitorLocker ml(&root_slices_monitor_);
773 root_slices_finished_++;
774 if (root_slices_finished_ == root_slices_count_) {
775 ml.Notify();
776 }
777 }
778}
779
785};
786
787void GCMarker::IterateWeakRoots(Thread* thread) {
788 for (;;) {
789 intptr_t slice = weak_slices_started_.fetch_add(1);
790 if (slice >= kNumWeakSlices) {
791 return; // No more slices.
792 }
793
794 switch (slice) {
795 case kWeakHandles:
796 ProcessWeakHandles(thread);
797 break;
798 case kWeakTables:
799 ProcessWeakTables(thread);
800 break;
801 case kRememberedSet:
802 ProcessRememberedSet(thread);
803 break;
804 default:
805 UNREACHABLE();
806 }
807 }
808}
809
810void GCMarker::ProcessWeakHandles(Thread* thread) {
811 TIMELINE_FUNCTION_GC_DURATION(thread, "ProcessWeakHandles");
812 MarkingWeakVisitor visitor(thread);
813 ApiState* state = isolate_group_->api_state();
814 ASSERT(state != nullptr);
815 isolate_group_->VisitWeakPersistentHandles(&visitor);
816}
817
818void GCMarker::ProcessWeakTables(Thread* thread) {
819 TIMELINE_FUNCTION_GC_DURATION(thread, "ProcessWeakTables");
820 for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) {
821 Dart_HeapSamplingDeleteCallback cleanup = nullptr;
822#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
823 if (sel == Heap::kHeapSamplingData) {
825 }
826#endif
827 WeakTable* table =
828 heap_->GetWeakTable(Heap::kOld, static_cast<Heap::WeakSelector>(sel));
829 intptr_t size = table->size();
830 for (intptr_t i = 0; i < size; i++) {
831 if (table->IsValidEntryAtExclusive(i)) {
832 // The object has been collected.
833 ObjectPtr obj = table->ObjectAtExclusive(i);
834 if (obj->IsHeapObject() && !obj->untag()->IsMarked()) {
835 if (cleanup != nullptr) {
836 cleanup(reinterpret_cast<void*>(table->ValueAtExclusive(i)));
837 }
838 table->InvalidateAtExclusive(i);
839 }
840 }
841 }
842 table =
843 heap_->GetWeakTable(Heap::kNew, static_cast<Heap::WeakSelector>(sel));
844 size = table->size();
845 for (intptr_t i = 0; i < size; i++) {
846 if (table->IsValidEntryAtExclusive(i)) {
847 // The object has been collected.
848 ObjectPtr obj = table->ObjectAtExclusive(i);
849 if (obj->IsHeapObject() && !obj->untag()->IsMarked()) {
850 if (cleanup != nullptr) {
851 cleanup(reinterpret_cast<void*>(table->ValueAtExclusive(i)));
852 }
853 table->InvalidateAtExclusive(i);
854 }
855 }
856 }
857 }
858}
859
860void GCMarker::ProcessRememberedSet(Thread* thread) {
861 TIMELINE_FUNCTION_GC_DURATION(thread, "ProcessRememberedSet");
862 // Filter collected objects from the remembered set.
863 StoreBuffer* store_buffer = isolate_group_->store_buffer();
864 StoreBufferBlock* reading = store_buffer->PopAll();
865 StoreBufferBlock* writing = store_buffer->PopNonFullBlock();
866 while (reading != nullptr) {
867 StoreBufferBlock* next = reading->next();
868 // Generated code appends to store buffers; tell MemorySanitizer.
869 MSAN_UNPOISON(reading, sizeof(*reading));
870 while (!reading->IsEmpty()) {
871 ObjectPtr obj = reading->Pop();
872 ASSERT(!obj->IsForwardingCorpse());
873 ASSERT(obj->untag()->IsRemembered());
874 if (obj->untag()->IsMarked()) {
875 writing->Push(obj);
876 if (writing->IsFull()) {
877 store_buffer->PushBlock(writing, StoreBuffer::kIgnoreThreshold);
878 writing = store_buffer->PopNonFullBlock();
879 }
880 }
881 }
882 reading->Reset();
883 // Return the emptied block for recycling (no need to check threshold).
884 store_buffer->PushBlock(reading, StoreBuffer::kIgnoreThreshold);
885 reading = next;
886 }
887 store_buffer->PushBlock(writing, StoreBuffer::kIgnoreThreshold);
888}
889
891 public:
893 IsolateGroup* isolate_group,
894 MarkingStack* marking_stack,
895 ThreadBarrier* barrier,
896 SyncMarkingVisitor* visitor,
897 RelaxedAtomic<uintptr_t>* num_busy)
898 : marker_(marker),
899 isolate_group_(isolate_group),
900 marking_stack_(marking_stack),
901 barrier_(barrier),
902 visitor_(visitor),
903 num_busy_(num_busy) {}
904
905 virtual void Run() {
906 if (!barrier_->TryEnter()) {
907 barrier_->Release();
908 return;
909 }
910
912 isolate_group_, Thread::kMarkerTask, /*bypass_safepoint=*/true);
913 ASSERT(result);
914
916
917 Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true);
918
919 barrier_->Sync();
920 barrier_->Release();
921 }
922
924 {
925 Thread* thread = Thread::Current();
926 TIMELINE_FUNCTION_GC_DURATION(thread, "ParallelMark");
928
929 // Phase 1: Iterate over roots and drain marking stack in tasks.
930 num_busy_->fetch_add(1u);
931 visitor_->set_concurrent(false);
932 marker_->IterateRoots(visitor_);
933 visitor_->FinishedRoots();
934
935 visitor_->ProcessDeferredMarking();
936
937 bool more_to_mark = false;
938 do {
939 do {
940 visitor_->DrainMarkingStack();
941 } while (visitor_->WaitForWork(num_busy_));
942 // Wait for all markers to stop.
943 barrier_->Sync();
944#if defined(DEBUG)
945 ASSERT(num_busy_->load() == 0);
946 // Caveat: must not allow any marker to continue past the barrier
947 // before we checked num_busy, otherwise one of them might rush
948 // ahead and increment it.
949 barrier_->Sync();
950#endif
951 // Check if we have any pending properties with marked keys.
952 // Those might have been marked by another marker.
953 more_to_mark = visitor_->ProcessPendingWeakProperties();
954 if (more_to_mark) {
955 // We have more work to do. Notify others.
956 num_busy_->fetch_add(1u);
957 }
958
959 // Wait for all other markers to finish processing their pending
960 // weak properties and decide if they need to continue marking.
961 // Caveat: we need two barriers here to make this decision in lock step
962 // between all markers and the main thread.
963 barrier_->Sync();
964 if (!more_to_mark && (num_busy_->load() > 0)) {
965 // All markers continue to mark as long as any single marker has
966 // some work to do.
967 num_busy_->fetch_add(1u);
968 more_to_mark = true;
969 }
970 barrier_->Sync();
971 } while (more_to_mark);
972
973 // Phase 2: deferred marking.
974 visitor_->ProcessDeferredMarking();
975 barrier_->Sync();
976
977 // Phase 3: Weak processing and statistics.
978 visitor_->MournWeakProperties();
979 visitor_->MournWeakReferences();
980 visitor_->MournWeakArrays();
981 // Don't MournFinalizerEntries here, do it on main thread, so that we
982 // don't have to coordinate workers.
983
984 thread->ReleaseStoreBuffer(); // Ahead of IterateWeak
985 barrier_->Sync();
986 marker_->IterateWeakRoots(thread);
987 int64_t stop = OS::GetCurrentMonotonicMicros();
988 visitor_->AddMicros(stop - start);
989 if (FLAG_log_marker_tasks) {
990 THR_Print("Task marked %" Pd " bytes in %" Pd64 " micros.\n",
991 visitor_->marked_bytes(), visitor_->marked_micros());
992 }
993 }
994 }
995
996 private:
997 GCMarker* marker_;
998 IsolateGroup* isolate_group_;
999 MarkingStack* marking_stack_;
1000 ThreadBarrier* barrier_;
1001 SyncMarkingVisitor* visitor_;
1002 RelaxedAtomic<uintptr_t>* num_busy_;
1003
1004 DISALLOW_COPY_AND_ASSIGN(ParallelMarkTask);
1005};
1006
1008 public:
1010 IsolateGroup* isolate_group,
1011 PageSpace* page_space,
1012 SyncMarkingVisitor* visitor)
1013 : marker_(marker),
1014 isolate_group_(isolate_group),
1015 page_space_(page_space),
1016 visitor_(visitor) {
1017#if defined(DEBUG)
1018 MonitorLocker ml(page_space_->tasks_lock());
1019 ASSERT(page_space_->phase() == PageSpace::kMarking);
1020#endif
1021 }
1022
1023 virtual void Run() {
1025 isolate_group_, Thread::kMarkerTask, /*bypass_safepoint=*/true);
1026 ASSERT(result);
1027 {
1030
1031 marker_->IterateRoots(visitor_);
1032 visitor_->FinishedRoots();
1033
1035 int64_t stop = OS::GetCurrentMonotonicMicros();
1036 visitor_->AddMicros(stop - start);
1037 if (FLAG_log_marker_tasks) {
1038 THR_Print("Task marked %" Pd " bytes in %" Pd64 " micros.\n",
1039 visitor_->marked_bytes(), visitor_->marked_micros());
1040 }
1041 }
1042
1043 // Exit isolate cleanly *before* notifying it, to avoid shutdown race.
1044 Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true);
1045 // This marker task is done. Notify the original isolate.
1046 {
1047 MonitorLocker ml(page_space_->tasks_lock());
1048 page_space_->set_tasks(page_space_->tasks() - 1);
1049 page_space_->set_concurrent_marker_tasks(
1050 page_space_->concurrent_marker_tasks() - 1);
1052 page_space_->concurrent_marker_tasks_active() - 1);
1053 ASSERT(page_space_->phase() == PageSpace::kMarking);
1054 if (page_space_->concurrent_marker_tasks() == 0) {
1057 }
1058 ml.NotifyAll();
1059 }
1060 }
1061
1062 private:
1063 GCMarker* marker_;
1064 IsolateGroup* isolate_group_;
1065 PageSpace* page_space_;
1066 SyncMarkingVisitor* visitor_;
1067
1068 DISALLOW_COPY_AND_ASSIGN(ConcurrentMarkTask);
1069};
1070
1072 intptr_t marked_words_per_job_micro;
1073 if (marked_micros_ == 0) {
1074 marked_words_per_job_micro = marked_words(); // Prevent division by zero.
1075 } else {
1076 marked_words_per_job_micro = marked_words() / marked_micros_;
1077 }
1078 if (marked_words_per_job_micro == 0) {
1079 marked_words_per_job_micro = 1; // Prevent division by zero.
1080 }
1081 intptr_t jobs = FLAG_marker_tasks;
1082 if (jobs == 0) {
1083 jobs = 1; // Marking on main thread is still one job.
1084 }
1085 return marked_words_per_job_micro * jobs;
1086}
1087
1089 : isolate_group_(isolate_group),
1090 heap_(heap),
1091 old_marking_stack_(),
1092 new_marking_stack_(),
1093 tlab_deferred_marking_stack_(),
1094 deferred_marking_stack_(),
1095 global_list_(),
1096 visitors_(),
1097 marked_bytes_(0),
1098 marked_micros_(0) {
1099 visitors_ = new SyncMarkingVisitor*[FLAG_marker_tasks];
1100 for (intptr_t i = 0; i < FLAG_marker_tasks; i++) {
1101 visitors_[i] = nullptr;
1102 }
1103}
1104
1106 // Cleanup in case isolate shutdown happens after starting the concurrent
1107 // marker and before finalizing.
1108 if (isolate_group_->old_marking_stack() != nullptr) {
1109 isolate_group_->DisableIncrementalBarrier();
1110 for (intptr_t i = 0; i < FLAG_marker_tasks; i++) {
1111 visitors_[i]->AbandonWork();
1112 delete visitors_[i];
1113 }
1114 }
1115 delete[] visitors_;
1116}
1117
1119 isolate_group_->EnableIncrementalBarrier(
1120 &old_marking_stack_, &new_marking_stack_, &deferred_marking_stack_);
1121
1122 const intptr_t num_tasks = FLAG_marker_tasks;
1123
1124 {
1125 // Bulk increase task count before starting any task, instead of
1126 // incrementing as each task is started, to prevent a task which
1127 // races ahead from falsely believing it was the last task to complete.
1128 MonitorLocker ml(page_space->tasks_lock());
1129 ASSERT(page_space->phase() == PageSpace::kDone);
1130 page_space->set_phase(PageSpace::kMarking);
1131 page_space->set_tasks(page_space->tasks() + num_tasks);
1132 page_space->set_concurrent_marker_tasks(
1133 page_space->concurrent_marker_tasks() + num_tasks);
1135 page_space->concurrent_marker_tasks_active() + num_tasks);
1136 }
1137
1138 ResetSlices();
1139 for (intptr_t i = 0; i < num_tasks; i++) {
1140 ASSERT(visitors_[i] == nullptr);
1142 isolate_group_, page_space, &old_marking_stack_, &new_marking_stack_,
1143 &tlab_deferred_marking_stack_, &deferred_marking_stack_);
1144 visitors_[i] = visitor;
1145
1146 if (i < (num_tasks - 1)) {
1147 // Begin marking on a helper thread.
1149 this, isolate_group_, page_space, visitor);
1150 ASSERT(result);
1151 } else {
1152 // For the last visitor, mark roots on the main thread.
1155 IterateRoots(visitor);
1156 visitor->FinishedRoots();
1157 int64_t stop = OS::GetCurrentMonotonicMicros();
1158 visitor->AddMicros(stop - start);
1159 if (FLAG_log_marker_tasks) {
1160 THR_Print("Task marked %" Pd " bytes in %" Pd64 " micros.\n",
1161 visitor->marked_bytes(), visitor->marked_micros());
1162 }
1163 // Continue non-root marking concurrently.
1165 this, isolate_group_, page_space, visitor);
1166 ASSERT(result);
1167 }
1168 }
1169
1170 isolate_group_->DeferredMarkLiveTemporaries();
1171
1172 // Wait for roots to be marked before exiting safepoint.
1173 MonitorLocker ml(&root_slices_monitor_);
1174 while (root_slices_finished_ != root_slices_count_) {
1175 ml.Wait();
1176 }
1177}
1178
1181 "IncrementalMarkWithUnlimitedBudget");
1182
1183 SyncMarkingVisitor visitor(isolate_group_, page_space, &old_marking_stack_,
1184 &new_marking_stack_, &tlab_deferred_marking_stack_,
1185 &deferred_marking_stack_);
1188 int64_t stop = OS::GetCurrentMonotonicMicros();
1189 visitor.AddMicros(stop - start);
1190 {
1191 MonitorLocker ml(page_space->tasks_lock());
1192 visitor.FinalizeIncremental(&global_list_);
1193 marked_bytes_ += visitor.marked_bytes();
1194 marked_micros_ += visitor.marked_micros();
1195 }
1196}
1197
1199 intptr_t size) {
1200 // Avoid setup overhead for tiny amounts of marking as the last bits of TLABs
1201 // get filled in.
1202 const intptr_t kMinimumMarkingStep = KB;
1203 if (size < kMinimumMarkingStep) return;
1204
1206 "IncrementalMarkWithSizeBudget");
1207
1208 SyncMarkingVisitor visitor(isolate_group_, page_space, &old_marking_stack_,
1209 &new_marking_stack_, &tlab_deferred_marking_stack_,
1210 &deferred_marking_stack_);
1213 int64_t stop = OS::GetCurrentMonotonicMicros();
1214 visitor.AddMicros(stop - start);
1215 {
1216 MonitorLocker ml(page_space->tasks_lock());
1217 visitor.FinalizeIncremental(&global_list_);
1218 marked_bytes_ += visitor.marked_bytes();
1219 marked_micros_ += visitor.marked_micros();
1220 }
1221}
1222
1224 int64_t deadline) {
1226 "IncrementalMarkWithTimeBudget");
1227
1228 SyncMarkingVisitor visitor(isolate_group_, page_space, &old_marking_stack_,
1229 &new_marking_stack_, &tlab_deferred_marking_stack_,
1230 &deferred_marking_stack_);
1232 visitor.ProcessOldMarkingStackUntil(deadline);
1233 int64_t stop = OS::GetCurrentMonotonicMicros();
1234 visitor.AddMicros(stop - start);
1235 {
1236 MonitorLocker ml(page_space->tasks_lock());
1237 visitor.FinalizeIncremental(&global_list_);
1238 marked_bytes_ += visitor.marked_bytes();
1239 marked_micros_ += visitor.marked_micros();
1240 }
1241}
1242
1244 public ObjectPointerVisitor {
1245 public:
1248
1249 void VisitObject(ObjectPtr obj) override {
1250 if (obj->untag()->IsMarked()) {
1251 current_ = obj;
1252 obj->untag()->VisitPointers(this);
1253 }
1254 }
1255
1256 void VisitPointers(ObjectPtr* from, ObjectPtr* to) override {
1257 for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
1258 ObjectPtr obj = *ptr;
1259 if (obj->IsHeapObject() && !obj->untag()->IsMarked()) {
1260 OS::PrintErr("object=0x%" Px ", slot=0x%" Px ", value=0x%" Px "\n",
1261 static_cast<uword>(current_), reinterpret_cast<uword>(ptr),
1262 static_cast<uword>(obj));
1263 failed_ = true;
1264 }
1265 }
1266 }
1267
1268#if defined(DART_COMPRESSED_POINTERS)
1269 void VisitCompressedPointers(uword heap_base,
1270 CompressedObjectPtr* from,
1271 CompressedObjectPtr* to) override {
1272 for (CompressedObjectPtr* ptr = from; ptr <= to; ptr++) {
1273 ObjectPtr obj = ptr->Decompress(heap_base);
1274 if (obj->IsHeapObject() && !obj->untag()->IsMarked()) {
1275 OS::PrintErr("object=0x%" Px ", slot=0x%" Px ", value=0x%" Px "\n",
1276 static_cast<uword>(current_), reinterpret_cast<uword>(ptr),
1277 static_cast<uword>(obj));
1278 failed_ = true;
1279 }
1280 }
1281 }
1282#endif
1283
1284 bool failed() const { return failed_; }
1285
1286 private:
1287 ObjectPtr current_;
1288 bool failed_ = false;
1289};
1290
1292 if (isolate_group_->old_marking_stack() != nullptr) {
1293 isolate_group_->DisableIncrementalBarrier();
1294 }
1295
1296 Prologue();
1297 {
1298 Thread* thread = Thread::Current();
1299 const int num_tasks = FLAG_marker_tasks;
1300 if (num_tasks == 0) {
1301 TIMELINE_FUNCTION_GC_DURATION(thread, "Mark");
1303 // Mark everything on main thread.
1304 UnsyncMarkingVisitor visitor(
1305 isolate_group_, page_space, &old_marking_stack_, &new_marking_stack_,
1306 &tlab_deferred_marking_stack_, &deferred_marking_stack_);
1307 visitor.set_concurrent(false);
1308 ResetSlices();
1309 IterateRoots(&visitor);
1310 visitor.FinishedRoots();
1311 visitor.ProcessDeferredMarking();
1312 visitor.DrainMarkingStack();
1313 visitor.ProcessDeferredMarking();
1314 visitor.FinalizeMarking();
1315 visitor.MournWeakProperties();
1316 visitor.MournWeakReferences();
1317 visitor.MournWeakArrays();
1318 visitor.MournFinalizerEntries();
1319 thread->ReleaseStoreBuffer(); // Ahead of IterateWeak
1320 IterateWeakRoots(thread);
1321 // All marking done; detach code, etc.
1322 int64_t stop = OS::GetCurrentMonotonicMicros();
1323 visitor.AddMicros(stop - start);
1324 marked_bytes_ += visitor.marked_bytes();
1325 marked_micros_ += visitor.marked_micros();
1326 } else {
1327 ThreadBarrier* barrier = new ThreadBarrier(num_tasks, 1);
1328
1329 ResetSlices();
1330 // Used to coordinate draining among tasks; all start out as 'busy'.
1331 RelaxedAtomic<uintptr_t> num_busy = 0;
1332 // Phase 1: Iterate over roots and drain marking stack in tasks.
1333
1334 for (intptr_t i = 0; i < num_tasks; ++i) {
1335 SyncMarkingVisitor* visitor = visitors_[i];
1336 // Visitors may or may not have already been created depending on
1337 // whether we did some concurrent marking.
1338 if (visitor == nullptr) {
1339 visitor = new SyncMarkingVisitor(
1340 isolate_group_, page_space, &old_marking_stack_,
1341 &new_marking_stack_, &tlab_deferred_marking_stack_,
1342 &deferred_marking_stack_);
1343 visitors_[i] = visitor;
1344 }
1345
1346 // Move all work from local blocks to the global list. Any given
1347 // visitor might not get to run if it fails to reach TryEnter soon
1348 // enough, and we must fail to visit objects but they're sitting in
1349 // such a visitor's local blocks.
1350 visitor->Flush(&global_list_);
1351 // Need to move weak property list too.
1352
1353 if (i < (num_tasks - 1)) {
1354 // Begin marking on a helper thread.
1356 this, isolate_group_, &old_marking_stack_, barrier, visitor,
1357 &num_busy);
1358 ASSERT(result);
1359 } else {
1360 // Last worker is the main thread.
1361 visitor->Adopt(&global_list_);
1362 ParallelMarkTask task(this, isolate_group_, &old_marking_stack_,
1363 barrier, visitor, &num_busy);
1365 barrier->Sync();
1366 barrier->Release();
1367 }
1368 }
1369
1370 for (intptr_t i = 0; i < num_tasks; i++) {
1371 SyncMarkingVisitor* visitor = visitors_[i];
1372 visitor->FinalizeMarking();
1373 marked_bytes_ += visitor->marked_bytes();
1374 marked_micros_ += visitor->marked_micros();
1375 delete visitor;
1376 visitors_[i] = nullptr;
1377 }
1378
1379 ASSERT(global_list_.IsEmpty());
1380 }
1381 }
1382
1383 // Separate from verify_after_gc because that verification interferes with
1384 // concurrent marking.
1385 if (FLAG_verify_after_marking) {
1387 heap_->VisitObjects(&visitor);
1388 if (visitor.failed()) {
1389 FATAL("verify after marking");
1390 }
1391 }
1392
1393 Epilogue();
1394}
1395
1397 scavenger->PruneWeak(&global_list_);
1398 for (intptr_t i = 0, n = FLAG_marker_tasks; i < n; i++) {
1399 scavenger->PruneWeak(visitors_[i]->delayed());
1400 }
1401}
1402
1403} // namespace dart
static float next(float f)
static const char marker[]
SI F table(const skcms_Curve *curve, F v)
#define UNREACHABLE()
Definition: assert.h:248
static constexpr ClassIdTagType decode(uword value)
Definition: bitfield.h:171
void PushAll(Block *blocks)
DART_FORCE_INLINE bool Pop(ObjectPtr *object)
void Push(ObjectPtr raw_obj)
bool WaitForWork(RelaxedAtomic< uintptr_t > *num_busy, bool abort=false)
virtual void Run()
Definition: marker.cc:1023
ConcurrentMarkTask(GCMarker *marker, IsolateGroup *isolate_group, PageSpace *page_space, SyncMarkingVisitor *visitor)
Definition: marker.cc:1009
static ThreadPool * thread_pool()
Definition: dart.h:73
void UpdateUnreachable(IsolateGroup *isolate_group)
void IncrementalMarkWithSizeBudget(PageSpace *page_space, intptr_t size)
Definition: marker.cc:1198
GCMarker(IsolateGroup *isolate_group, Heap *heap)
Definition: marker.cc:1088
intptr_t MarkedWordsPerMicro() const
Definition: marker.cc:1071
void IncrementalMarkWithTimeBudget(PageSpace *page_space, int64_t deadline)
Definition: marker.cc:1223
void PruneWeak(Scavenger *scavenger)
Definition: marker.cc:1396
void StartConcurrentMark(PageSpace *page_space)
Definition: marker.cc:1118
intptr_t marked_words() const
Definition: marker.h:51
void MarkObjects(PageSpace *page_space)
Definition: marker.cc:1291
void IncrementalMarkWithUnlimitedBudget(PageSpace *page_space)
Definition: marker.cc:1179
Thread * thread() const
static Dart_HeapSamplingDeleteCallback delete_callback()
Definition: sampler.h:54
WeakSelector
Definition: heap.h:43
@ kHeapSamplingData
Definition: heap.h:52
@ kNumWeakSelectors
Definition: heap.h:54
@ kNew
Definition: heap.h:38
@ kOld
Definition: heap.h:39
WeakTable * GetWeakTable(Space space, WeakSelector selector) const
Definition: heap.h:225
StoreBuffer * store_buffer() const
Definition: isolate.h:509
void ScheduleInterrupts(uword interrupt_bits)
Definition: isolate.cc:1960
void DisableIncrementalBarrier()
Definition: isolate.cc:2831
void VisitObjectIdRingPointers(ObjectPointerVisitor *visitor)
Definition: isolate.cc:2987
ApiState * api_state() const
Definition: isolate.h:700
void VisitObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
Definition: isolate.cc:2912
void ReleaseStoreBuffers()
Definition: isolate.cc:2776
MarkingStack * old_marking_stack() const
Definition: isolate.h:576
void VisitWeakPersistentHandles(HandleVisitor *visitor)
Definition: isolate.cc:2998
void DeferredMarkLiveTemporaries()
Definition: isolate.cc:3002
void EnableIncrementalBarrier(MarkingStack *old_marking_stack, MarkingStack *new_marking_stack, MarkingStack *deferred_marking_stack)
Definition: isolate.cc:2817
intptr_t ProcessWeakProperty(WeakPropertyPtr raw_weak)
Definition: marker.cc:396
void AddMicros(int64_t micros)
Definition: marker.cc:50
void MournFinalizerEntries()
Definition: marker.cc:552
uintptr_t marked_bytes() const
Definition: marker.cc:48
GCLinkedLists * delayed()
Definition: marker.cc:625
static bool ForwardOrSetNullIfCollected(ObjectPtr parent, CompressedObjectPtr *slot)
Definition: marker.cc:563
static bool IsMarked(ObjectPtr raw)
Definition: marker.cc:57
void ProcessOldMarkingStackUntil(int64_t deadline)
Definition: marker.cc:278
NO_SANITIZE_THREAD ObjectPtr LoadPointerIgnoreRace(ObjectPtr *ptr)
Definition: marker.cc:368
intptr_t VisitCards(ArrayPtr obj)
Definition: marker.cc:190
void Flush(GCLinkedLists *global_list)
Definition: marker.cc:592
void VisitPointers(ObjectPtr *first, ObjectPtr *last) override
Definition: marker.cc:375
intptr_t ProcessWeakArray(WeakArrayPtr raw_weak)
Definition: marker.cc:439
intptr_t ProcessWeakReference(WeakReferencePtr raw_weak)
Definition: marker.cc:411
void DrainMarkingStackWithPauseChecks()
Definition: marker.cc:117
void FinalizeIncremental(GCLinkedLists *global_list)
Definition: marker.cc:613
void ProcessDeferredMarking()
Definition: marker.cc:459
DART_NOINLINE void YieldConcurrentMarking()
Definition: marker.cc:106
int64_t marked_micros() const
Definition: marker.cc:49
void set_concurrent(bool value)
Definition: marker.cc:51
NO_SANITIZE_THREAD CompressedObjectPtr LoadCompressedPointerIgnoreRace(CompressedObjectPtr *ptr)
Definition: marker.cc:370
bool ProcessOldMarkingStack(intptr_t remaining_budget)
Definition: marker.cc:296
void Adopt(GCLinkedLists *other)
Definition: marker.cc:600
intptr_t ProcessFinalizerEntry(FinalizerEntryPtr raw_entry)
Definition: marker.cc:444
MarkingVisitorBase(IsolateGroup *isolate_group, PageSpace *page_space, MarkingStack *old_marking_stack, MarkingStack *new_marking_stack, MarkingStack *tlab_deferred_marking_stack, MarkingStack *deferred_marking_stack)
Definition: marker.cc:30
bool ProcessPendingWeakProperties()
Definition: marker.cc:67
bool WaitForWork(RelaxedAtomic< uintptr_t > *num_busy)
Definition: marker.cc:588
void VisitHandle(uword addr) override
Definition: marker.cc:713
MarkingWeakVisitor(Thread *thread)
Definition: marker.cc:711
Monitor::WaitResult Wait(int64_t millis=Monitor::kNoTimeout)
Definition: lockers.h:172
static int64_t GetCurrentMonotonicMicros()
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
IsolateGroup * isolate_group() const
Definition: visitor.h:25
void VisitCompressedPointers(uword heap_base, CompressedObjectPtr *first, CompressedObjectPtr *last)
Definition: visitor.h:43
ObjectPtr Decompress(uword heap_base) const
UntaggedObject * untag() const
uword heap_base() const
intptr_t GetClassId() const
Definition: raw_object.h:885
static ObjectPtr null()
Definition: object.h:433
void set_concurrent_marker_tasks_active(intptr_t val)
Definition: pages.h:333
intptr_t concurrent_marker_tasks_active() const
Definition: pages.h:329
intptr_t tasks() const
Definition: pages.h:315
bool pause_concurrent_marking() const
Definition: pages.h:338
void set_tasks(intptr_t val)
Definition: pages.h:316
void set_concurrent_marker_tasks(intptr_t val)
Definition: pages.h:324
@ kAwaitingFinalization
Definition: pages.h:133
intptr_t concurrent_marker_tasks() const
Definition: pages.h:320
void YieldConcurrentMarking()
Definition: pages.cc:459
void set_phase(Phase val)
Definition: pages.h:342
Monitor * tasks_lock() const
Definition: pages.h:314
Phase phase() const
Definition: pages.h:341
static Page * Of(ObjectPtr obj)
Definition: page.h:162
void RememberCard(ObjectPtr const *slot)
Definition: page.h:185
static constexpr intptr_t kSlotsPerCardLog2
Definition: page.h:174
ParallelMarkTask(GCMarker *marker, IsolateGroup *isolate_group, MarkingStack *marking_stack, ThreadBarrier *barrier, SyncMarkingVisitor *visitor, RelaxedAtomic< uintptr_t > *num_busy)
Definition: marker.cc:892
void RunEnteredIsolateGroup()
Definition: marker.cc:923
virtual void Run()
Definition: marker.cc:905
T load(std::memory_order order=std::memory_order_relaxed) const
Definition: atomic.h:21
T fetch_add(T arg, std::memory_order order=std::memory_order_relaxed)
Definition: atomic.h:35
void PruneWeak(GCLinkedLists *delayed)
Definition: scavenger.cc:1700
intptr_t Value() const
Definition: object.h:9990
bool Run(Args &&... args)
Definition: thread_pool.h:45
@ kVMInterrupt
Definition: thread.h:488
@ kMarkerTask
Definition: thread.h:349
static Thread * Current()
Definition: thread.h:362
void ReleaseStoreBuffer()
Definition: thread.cc:677
void StoreBufferReleaseGC()
Definition: thread.cc:821
void StoreBufferAcquireGC()
Definition: thread.cc:828
static void ExitIsolateGroupAsHelper(bool bypass_safepoint)
Definition: thread.cc:499
IsolateGroup * isolate_group() const
Definition: thread.h:541
void StoreBufferAddObjectGC(ObjectPtr obj)
Definition: thread.cc:804
static bool EnterIsolateGroupAsHelper(IsolateGroup *isolate_group, TaskKind kind, bool bypass_safepoint)
Definition: thread.cc:481
bool IsCardRemembered() const
Definition: raw_object.h:385
DART_FORCE_INLINE intptr_t VisitPointersNonvirtual(V *visitor)
Definition: raw_object.h:480
static bool IsMarked(uword tags)
Definition: raw_object.h:303
intptr_t HeapSize() const
Definition: raw_object.h:401
bool IsMarked() const
Definition: raw_object.h:304
uword heap_base() const
Definition: raw_object.h:590
bool IsEvacuationCandidate()
Definition: raw_object.h:332
bool TryAcquireRememberedBit()
Definition: raw_object.h:365
intptr_t VisitPointers(ObjectPointerVisitor *visitor)
Definition: raw_object.h:447
DART_WARN_UNUSED_RESULT bool TryAcquireMarkBit()
Definition: raw_object.h:327
static bool IsEvacuationCandidate(uword tags)
Definition: raw_object.h:329
void SetMarkBitUnsynchronized()
Definition: raw_object.h:309
void VisitPointers(ObjectPtr *from, ObjectPtr *to) override
Definition: marker.cc:1256
void VisitObject(ObjectPtr obj) override
Definition: marker.cc:1249
#define THR_Print(format,...)
Definition: log.h:20
void(* Dart_HeapSamplingDeleteCallback)(void *data)
Definition: dart_api.h:1288
#define ASSERT(E)
#define FATAL(error)
AtkStateType state
glong glong end
uint8_t value
GAsyncResult * result
uint32_t * target
size_t length
#define MSAN_UNPOISON(ptr, len)
void Decompress(const uint8_t *input, intptr_t input_len, uint8_t **output, intptr_t *output_length)
Definition: gzip.cc:15
Definition: dart_vm.cc:33
StoreBuffer::Block StoreBufferBlock
static constexpr intptr_t kCardsPerInterruptCheck
Definition: page.h:360
WeakSlices
Definition: marker.cc:780
@ kRememberedSet
Definition: marker.cc:783
@ kWeakTables
Definition: marker.cc:782
@ kNumWeakSlices
Definition: marker.cc:784
@ kWeakHandles
Definition: marker.cc:781
static bool IsUnreachable(const ObjectPtr obj)
Definition: marker.cc:702
MarkingVisitorBase< true > SyncMarkingVisitor
Definition: marker.cc:700
@ kForwardingCorpse
Definition: class_id.h:225
@ kIllegalCid
Definition: class_id.h:214
@ kFreeListElement
Definition: class_id.h:224
constexpr intptr_t KB
Definition: globals.h:528
uintptr_t uword
Definition: globals.h:501
void MournFinalizerEntry(GCVisitorType *visitor, FinalizerEntryPtr current_entry)
Definition: gc_shared.h:162
MarkingVisitorBase< false > UnsyncMarkingVisitor
Definition: marker.cc:699
RootSlices
Definition: marker.cc:733
@ kNumFixedRootSlices
Definition: marker.cc:736
@ kObjectIdRing
Definition: marker.cc:735
@ kIsolate
Definition: marker.cc:734
BlockWorkList< MarkingStack > MarkerWorkList
constexpr intptr_t kIntptrMax
Definition: globals.h:557
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
#define Px
Definition: globals.h:410
#define UNLIKELY(cond)
Definition: globals.h:261
#define Pd64
Definition: globals.h:416
#define Pd
Definition: globals.h:408
void FlushInto(GCLinkedLists *to)
Definition: gc_shared.cc:34
#define NO_SANITIZE_THREAD
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)
Definition: timeline.h:41