36 page_space_(page_space),
37 work_list_(marking_stack),
38 new_work_list_(new_marking_stack),
39 deferred_work_list_(deferred_marking_stack),
47 void AddMicros(int64_t micros) { marked_micros_ += micros; }
51 constexpr static const char*
const kName =
"Marker";
55 ASSERT(raw->IsHeapObject());
60 bool more_to_mark =
false;
61 WeakPropertyPtr cur_weak = delayed_.weak_properties.
Release();
63 WeakPropertyPtr next_weak =
64 cur_weak->untag()->next_seen_by_gc_.Decompress(cur_weak->heap_base());
68 if (raw_key->IsImmediateObject() || raw_key->
untag()->
IsMarked()) {
70 if (!raw_val->IsImmediateObject() && !raw_val->
untag()->
IsMarked()) {
80 delayed_.weak_properties.Enqueue(cur_weak);
91 while (work_list_.
Pop(&obj)) {
92 if (obj->IsNewObject()) {
94 uword top = page->original_top();
97 if (top <= addr && addr <
end) {
98 new_work_list_.
Push(obj);
101 new_work_list_.
Flush();
102 deferred_work_list_.
Flush();
115 if (class_id == kWeakPropertyCid) {
117 }
else if (class_id == kWeakReferenceCid) {
119 }
else if (class_id == kWeakArrayCid) {
121 }
else if (class_id == kFinalizerEntryCid) {
123 }
else if (sync && concurrent_ && class_id == kSuspendStateCid) {
125 deferred_work_list_.
Push(obj);
130 if (!obj->IsNewObject()) {
131 marked_bytes_ += size;
136 new_work_list_.
Flush();
137 deferred_work_list_.
Flush();
145 new_work_list_.
Flush();
146 deferred_work_list_.
Flush();
165 constexpr intptr_t kBudget = 512 *
KB;
176 while (work_list_.
Pop(&obj)) {
177 if (sync && concurrent_ && obj->IsNewObject()) {
179 uword top = page->original_top();
182 if (top <= addr && addr <
end) {
183 new_work_list_.
Push(obj);
187 remaining_budget -= size;
188 if (remaining_budget < 0) {
201 if (class_id == kWeakPropertyCid) {
203 }
else if (class_id == kWeakReferenceCid) {
205 }
else if (class_id == kWeakArrayCid) {
207 }
else if (class_id == kFinalizerEntryCid) {
209 }
else if (sync && concurrent_ && class_id == kSuspendStateCid) {
211 deferred_work_list_.
Push(obj);
214 if ((class_id == kArrayCid) || (class_id == kImmutableArrayCid)) {
216 if (size > remaining_budget) {
217 work_list_.
Push(obj);
223 if (!obj->IsNewObject()) {
224 marked_bytes_ += size;
226 remaining_budget -= size;
227 if (remaining_budget < 0) {
257 for (
ObjectPtr* current = first; current <= last; current++) {
262#if defined(DART_COMPRESSED_POINTERS)
278 if (raw_key->IsHeapObject() && !raw_key->
untag()->
IsMarked()) {
281 delayed_.weak_properties.Enqueue(raw_weak);
282 return raw_weak->untag()->HeapSize();
285 return raw_weak->untag()->VisitPointersNonvirtual(
this);
294 if (raw_target->IsHeapObject() && !raw_target->
untag()->
IsMarked()) {
298 delayed_.weak_references.Enqueue(raw_weak);
304 MarkObject(raw_type_arguments);
305 return raw_weak->untag()->HeapSize();
309 delayed_.weak_arrays.Enqueue(raw_weak);
310 return raw_weak->untag()->HeapSize();
315 delayed_.finalizer_entries.Enqueue(raw_entry);
321 return raw_entry->untag()->HeapSize();
328 while (deferred_work_list_.
Pop(&obj)) {
329 ASSERT(obj->IsHeapObject());
349 if (TryAcquireMarkBit(obj)) {
350 if (!obj->IsNewObject()) {
351 marked_bytes_ += size;
374 WeakPropertyPtr current = delayed_.weak_properties.
Release();
376 WeakPropertyPtr
next = current->untag()->next_seen_by_gc();
385 WeakReferencePtr current = delayed_.weak_references.
Release();
387 WeakReferencePtr
next = current->untag()->next_seen_by_gc();
395 WeakArrayPtr current = delayed_.weak_arrays.
Release();
397 WeakArrayPtr
next = current->untag()->next_seen_by_gc();
400 for (intptr_t i = 0; i <
length; i++) {
408 FinalizerEntryPtr current = delayed_.finalizer_entries.
Release();
410 FinalizerEntryPtr
next = current->untag()->next_seen_by_gc();
421 if (
target->IsImmediateObject()) {
425 if (
target->untag()->IsMarked()) {
440 new_work_list_.
Flush();
441 deferred_work_list_.
Flush();
460 new_work_list_.
Flush();
462 deferred_work_list_.
Flush();
471 ASSERT(obj->IsHeapObject());
475 work_list_.
Push(obj);
478 static bool TryAcquireMarkBit(ObjectPtr obj) {
480 obj->untag()->SetMarkBitUnsynchronized();
483 return obj->untag()->TryAcquireMarkBit();
488 void MarkObject(ObjectPtr obj) {
489 if (obj->IsImmediateObject()) {
493 if (sync && concurrent_ && obj->IsNewObject()) {
494 if (TryAcquireMarkBit(obj)) {
510 if (obj->untag()->IsMarkedIgnoreRace()) {
514 intptr_t class_id = obj->GetClassId();
517 if (sync &&
UNLIKELY(class_id == kInstructionsCid)) {
520 deferred_work_list_.
Push(obj);
524 if (!TryAcquireMarkBit(obj)) {
532 PageSpace* page_space_;
536 GCLinkedLists delayed_;
537 uintptr_t marked_bytes_;
538 int64_t marked_micros_;
548 if (obj->IsImmediateObject()) {
571void GCMarker::Prologue() {
576void GCMarker::Epilogue() {}
583void GCMarker::ResetSlices() {
586 root_slices_started_ = 0;
587 root_slices_finished_ = 0;
590 weak_slices_started_ = 0;
593void GCMarker::IterateRoots(ObjectPointerVisitor* visitor) {
595 intptr_t slice = root_slices_started_.
fetch_add(1);
596 if (slice >= root_slices_count_) {
603 "ProcessIsolateGroupRoots");
610 MonitorLocker ml(&root_slices_monitor_);
611 root_slices_finished_++;
612 if (root_slices_finished_ == root_slices_count_) {
626void GCMarker::IterateWeakRoots(Thread* thread) {
628 intptr_t slice = weak_slices_started_.
fetch_add(1);
635 ProcessWeakHandles(thread);
638 ProcessWeakTables(thread);
641 ProcessObjectIdTable(thread);
644 ProcessRememberedSet(thread);
652void GCMarker::ProcessWeakHandles(Thread* thread) {
654 MarkingWeakVisitor visitor(thread);
660void GCMarker::ProcessWeakTables(Thread* thread) {
664#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
672 for (intptr_t i = 0; i <
size; i++) {
673 if (
table->IsValidEntryAtExclusive(i)) {
675 ObjectPtr obj =
table->ObjectAtExclusive(i);
676 if (obj->IsHeapObject() && !obj->untag()->IsMarked()) {
677 if (cleanup !=
nullptr) {
678 cleanup(
reinterpret_cast<void*
>(
table->ValueAtExclusive(i)));
680 table->InvalidateAtExclusive(i);
687 for (intptr_t i = 0; i <
size; i++) {
688 if (
table->IsValidEntryAtExclusive(i)) {
690 ObjectPtr obj =
table->ObjectAtExclusive(i);
691 if (obj->IsHeapObject() && !obj->untag()->IsMarked()) {
692 if (cleanup !=
nullptr) {
693 cleanup(
reinterpret_cast<void*
>(
table->ValueAtExclusive(i)));
695 table->InvalidateAtExclusive(i);
702void GCMarker::ProcessRememberedSet(Thread* thread) {
705 StoreBuffer* store_buffer = isolate_group_->
store_buffer();
708 while (reading !=
nullptr) {
712 while (!reading->IsEmpty()) {
713 ObjectPtr obj = reading->Pop();
714 ASSERT(!obj->IsForwardingCorpse());
715 ASSERT(obj->untag()->IsRemembered());
716 if (obj->untag()->IsMarked()) {
718 if (writing->IsFull()) {
720 writing = store_buffer->PopNonFullBlock();
738 for (
ObjectPtr* current = first; current <= last; current++) {
740 ASSERT(obj->IsHeapObject());
748#if defined(DART_COMPRESSED_POINTERS)
757void GCMarker::ProcessObjectIdTable(Thread* thread) {
760 ObjectIdRingClearPointerVisitor visitor(isolate_group_);
774 isolate_group_(isolate_group),
775 marking_stack_(marking_stack),
778 num_busy_(num_busy) {}
807 marker_->IterateRoots(visitor_);
811 bool more_to_mark =
false;
838 if (!more_to_mark && (num_busy_->
load() > 0)) {
845 }
while (more_to_mark);
858 marker_->IterateWeakRoots(thread);
861 if (FLAG_log_marker_tasks) {
886 isolate_group_(isolate_group),
887 page_space_(page_space),
903 marker_->IterateRoots(visitor_);
908 if (FLAG_log_marker_tasks) {
943 intptr_t marked_words_per_job_micro;
944 if (marked_micros_ == 0) {
947 marked_words_per_job_micro =
marked_words() / marked_micros_;
949 if (marked_words_per_job_micro == 0) {
950 marked_words_per_job_micro = 1;
952 intptr_t jobs = FLAG_marker_tasks;
956 return marked_words_per_job_micro * jobs;
960 : isolate_group_(isolate_group),
963 new_marking_stack_(),
964 deferred_marking_stack_(),
970 for (intptr_t i = 0; i < FLAG_marker_tasks; i++) {
971 visitors_[i] =
nullptr;
980 for (intptr_t i = 0; i < FLAG_marker_tasks; i++) {
990 &deferred_marking_stack_);
992 const intptr_t num_tasks = FLAG_marker_tasks;
1009 for (intptr_t i = 0; i < num_tasks; i++) {
1010 ASSERT(visitors_[i] ==
nullptr);
1013 &new_marking_stack_, &deferred_marking_stack_);
1014 visitors_[i] = visitor;
1016 if (i < (num_tasks - 1)) {
1019 this, isolate_group_, page_space, visitor);
1025 IterateRoots(visitor);
1028 if (FLAG_log_marker_tasks) {
1034 this, isolate_group_, page_space, visitor);
1043 while (root_slices_finished_ != root_slices_count_) {
1050 "IncrementalMarkWithUnlimitedBudget");
1053 &new_marking_stack_, &deferred_marking_stack_);
1070 const intptr_t kMinimumMarkingStep =
KB;
1071 if (size < kMinimumMarkingStep)
return;
1074 "IncrementalMarkWithSizeBudget");
1077 &new_marking_stack_, &deferred_marking_stack_);
1093 "IncrementalMarkWithTimeBudget");
1096 &new_marking_stack_, &deferred_marking_stack_);
1123 for (
ObjectPtr* ptr = from; ptr <= to; ptr++) {
1127 static_cast<uword>(current_),
reinterpret_cast<uword>(ptr),
1128 static_cast<uword>(obj));
1134#if defined(DART_COMPRESSED_POINTERS)
1142 static_cast<uword>(current_),
reinterpret_cast<uword>(ptr),
1143 static_cast<uword>(obj));
1154 bool failed_ =
false;
1165 const int num_tasks = FLAG_marker_tasks;
1166 if (num_tasks == 0) {
1171 &new_marking_stack_,
1172 &deferred_marking_stack_);
1175 IterateRoots(&visitor);
1184 IterateWeakRoots(thread);
1198 for (intptr_t i = 0; i < num_tasks; ++i) {
1202 if (visitor ==
nullptr) {
1204 &marking_stack_, &new_marking_stack_,
1205 &deferred_marking_stack_);
1206 visitors_[i] = visitor;
1213 visitor->
Flush(&global_list_);
1216 if (i < (num_tasks - 1)) {
1219 this, isolate_group_, &marking_stack_, barrier, visitor,
1224 visitor->
Adopt(&global_list_);
1226 visitor, &num_busy);
1233 for (intptr_t i = 0; i < num_tasks; i++) {
1239 visitors_[i] =
nullptr;
1248 if (FLAG_verify_after_marking) {
1250 heap_->VisitObjects(&visitor);
1252 FATAL(
"verify after marking");
1261 for (intptr_t i = 0, n = FLAG_marker_tasks; i < n; i++) {
1262 scavenger->
PruneWeak(visitors_[i]->delayed());
static float next(float f)
static const char marker[]
static const char kName[]
void PushAll(Block *blocks)
void Push(ObjectPtr raw_obj)
bool Pop(ObjectPtr *object)
bool WaitForWork(RelaxedAtomic< uintptr_t > *num_busy, bool abort=false)
ConcurrentMarkTask(GCMarker *marker, IsolateGroup *isolate_group, PageSpace *page_space, SyncMarkingVisitor *visitor)
static ThreadPool * thread_pool()
void UpdateUnreachable(IsolateGroup *isolate_group)
void IncrementalMarkWithSizeBudget(PageSpace *page_space, intptr_t size)
GCMarker(IsolateGroup *isolate_group, Heap *heap)
intptr_t MarkedWordsPerMicro() const
void IncrementalMarkWithTimeBudget(PageSpace *page_space, int64_t deadline)
void PruneWeak(Scavenger *scavenger)
void StartConcurrentMark(PageSpace *page_space)
intptr_t marked_words() const
void MarkObjects(PageSpace *page_space)
void IncrementalMarkWithUnlimitedBudget(PageSpace *page_space)
static Dart_HeapSamplingDeleteCallback delete_callback()
WeakTable * GetWeakTable(Space space, WeakSelector selector) const
StoreBuffer * store_buffer() const
void ScheduleInterrupts(uword interrupt_bits)
void DisableIncrementalBarrier()
void VisitObjectIdRingPointers(ObjectPointerVisitor *visitor)
ApiState * api_state() const
void VisitObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
void EnableIncrementalBarrier(MarkingStack *marking_stack, MarkingStack *deferred_marking_stack)
void ReleaseStoreBuffers()
void VisitWeakPersistentHandles(HandleVisitor *visitor)
void DeferredMarkLiveTemporaries()
MarkingStack * marking_stack() const
intptr_t ProcessWeakProperty(WeakPropertyPtr raw_weak)
void AddMicros(int64_t micros)
void MournFinalizerEntries()
uintptr_t marked_bytes() const
GCLinkedLists * delayed()
static bool ForwardOrSetNullIfCollected(ObjectPtr parent, CompressedObjectPtr *slot)
static bool IsMarked(ObjectPtr raw)
NO_SANITIZE_THREAD ObjectPtr LoadPointerIgnoreRace(ObjectPtr *ptr)
void Flush(GCLinkedLists *global_list)
void VisitPointers(ObjectPtr *first, ObjectPtr *last) override
intptr_t ProcessWeakArray(WeakArrayPtr raw_weak)
void MournWeakProperties()
intptr_t ProcessWeakReference(WeakReferencePtr raw_weak)
MarkingVisitorBase(IsolateGroup *isolate_group, PageSpace *page_space, MarkingStack *marking_stack, MarkingStack *new_marking_stack, MarkingStack *deferred_marking_stack)
void DrainMarkingStackWithPauseChecks()
bool ProcessMarkingStack(intptr_t remaining_budget)
void FinalizeIncremental(GCLinkedLists *global_list)
void ProcessDeferredMarking()
int64_t marked_micros() const
void set_concurrent(bool value)
NO_SANITIZE_THREAD CompressedObjectPtr LoadCompressedPointerIgnoreRace(CompressedObjectPtr *ptr)
void MournWeakReferences()
void Adopt(GCLinkedLists *other)
void ProcessMarkingStackUntil(int64_t deadline)
intptr_t ProcessFinalizerEntry(FinalizerEntryPtr raw_entry)
bool ProcessPendingWeakProperties()
bool WaitForWork(RelaxedAtomic< uintptr_t > *num_busy)
void VisitHandle(uword addr) override
MarkingWeakVisitor(Thread *thread)
Monitor::WaitResult Wait(int64_t millis=Monitor::kNoTimeout)
static int64_t GetCurrentMonotonicMicros()
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
ObjectIdRingClearPointerVisitor(IsolateGroup *isolate_group)
void VisitPointers(ObjectPtr *first, ObjectPtr *last) override
IsolateGroup * isolate_group() const
void VisitCompressedPointers(uword heap_base, CompressedObjectPtr *first, CompressedObjectPtr *last)
ObjectPtr Decompress(uword heap_base) const
UntaggedObject * untag() const
intptr_t GetClassId() const
void set_concurrent_marker_tasks_active(intptr_t val)
intptr_t concurrent_marker_tasks_active() const
bool pause_concurrent_marking() const
void set_tasks(intptr_t val)
void set_concurrent_marker_tasks(intptr_t val)
intptr_t concurrent_marker_tasks() const
void YieldConcurrentMarking()
void set_phase(Phase val)
Monitor * tasks_lock() const
static Page * Of(ObjectPtr obj)
ParallelMarkTask(GCMarker *marker, IsolateGroup *isolate_group, MarkingStack *marking_stack, ThreadBarrier *barrier, SyncMarkingVisitor *visitor, RelaxedAtomic< uintptr_t > *num_busy)
void RunEnteredIsolateGroup()
PointerBlock< Size > * next() const
T load(std::memory_order order=std::memory_order_relaxed) const
T fetch_add(T arg, std::memory_order order=std::memory_order_relaxed)
void PruneWeak(GCLinkedLists *delayed)
bool Run(Args &&... args)
static Thread * Current()
void ReleaseStoreBuffer()
static void ExitIsolateGroupAsHelper(bool bypass_safepoint)
IsolateGroup * isolate_group() const
static bool EnterIsolateGroupAsHelper(IsolateGroup *isolate_group, TaskKind kind, bool bypass_safepoint)
DART_FORCE_INLINE intptr_t VisitPointersNonvirtual(V *visitor)
static bool IsMarked(uword tags)
intptr_t HeapSize() const
intptr_t VisitPointers(ObjectPointerVisitor *visitor)
void VisitPointers(ObjectPtr *from, ObjectPtr *to) override
VerifyAfterMarkingVisitor()
void VisitObject(ObjectPtr obj) override
#define THR_Print(format,...)
void(* Dart_HeapSamplingDeleteCallback)(void *data)
#define MSAN_UNPOISON(ptr, len)
StoreBuffer::Block StoreBufferBlock
static bool IsUnreachable(const ObjectPtr obj)
MarkingVisitorBase< true > SyncMarkingVisitor
void MournFinalizerEntry(GCVisitorType *visitor, FinalizerEntryPtr current_entry)
MarkingVisitorBase< false > UnsyncMarkingVisitor
static constexpr intptr_t kObjectAlignment
BlockWorkList< MarkingStack > MarkerWorkList
constexpr intptr_t kIntptrMax
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
void FlushInto(GCLinkedLists *to)
#define NO_SANITIZE_THREAD
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)