24 if (!SelectEvacuationCandidates(old_space)) {
27 CheckFreeLists(old_space);
33 if (!HasEvacuationCandidates(old_space)) {
36 old_space->MakeIterable();
37 CheckFreeLists(old_space);
38 CheckPreEvacuate(old_space);
40 CheckPostEvacuate(old_space);
41 CheckFreeLists(old_space);
42 FreeEvacuatedPages(old_space);
43 VerifyAfterIncrementalCompaction(old_space);
53 switch (old_space->
phase()) {
72 if (!
page->is_evacuation_candidate())
continue;
74 page->set_evacuation_candidate(
false);
79 while (current <
end) {
109 isolate_group_(isolate_group),
110 old_space_(old_space),
139 "MarkEvacuationCandidates");
152 while (current <
end) {
168 intptr_t list_index = chunk / (FreeList::kNumLists + 1);
169 intptr_t size_class_index = chunk % (FreeList::kNumLists + 1);
170 FreeList* freelist = &old_space_->freelists_[list_index];
173 ASSERT(freelist->top_ == freelist->end_);
176 freelist->free_lists_[size_class_index] =
nullptr;
177 while (current !=
nullptr) {
179 if (!
Page::Of(current)->is_evacuation_candidate()) {
180 current->
set_next(freelist->free_lists_[size_class_index]);
181 freelist->free_lists_[size_class_index] = current;
197bool GCIncrementalCompactor::SelectEvacuationCandidates(PageSpace* old_space) {
199 constexpr intptr_t kEvacuationThreshold =
kPageSize / 2;
204 const intptr_t kMaxEvacuatedBytes =
205 (old_space->heap_->new_space()->ThresholdInWords() <<
kWordSizeLog2) / 4;
210 "SelectEvacuationCandidates");
211 for (Page*
page = old_space->pages_;
page !=
nullptr;
page =
page->next()) {
212 if (
page->is_never_evacuate())
continue;
214 intptr_t live_bytes =
page->live_bytes();
215 if (live_bytes > kEvacuationThreshold)
continue;
219 state.pages.Sort([](
const LiveBytes*
a,
const LiveBytes*
b) ->
int {
220 if (
a->live_bytes <
b->live_bytes)
return -1;
221 if (
a->live_bytes >
b->live_bytes)
return 1;
225 intptr_t num_candidates = 0;
226 intptr_t cumulative_live_bytes = 0;
227 for (intptr_t
i = 0;
i <
state.pages.length();
i++) {
228 intptr_t live_bytes =
state.pages[
i].live_bytes;
229 if (cumulative_live_bytes + live_bytes <= kMaxEvacuatedBytes) {
231 cumulative_live_bytes += live_bytes;
232 state.pages[
i].page->set_evacuation_candidate(
true);
236#if defined(SUPPORT_TIMELINE)
237 tbes.SetNumArguments(2);
238 tbes.FormatArgument(0,
"cumulative_live_bytes",
"%" Pd,
239 cumulative_live_bytes);
240 tbes.FormatArgument(1,
"num_candidates",
"%" Pd, num_candidates);
243 state.page_cursor = 0;
244 state.page_limit = num_candidates;
245 state.freelist_cursor =
246 PageSpace::kDataFreelist * (FreeList::kNumLists + 1);
247 state.freelist_limit =
248 old_space->num_freelists_ * (FreeList::kNumLists + 1);
250 if (num_candidates == 0)
return false;
253 old_space->ReleaseBumpAllocation();
255 const intptr_t num_tasks =
Utils::Maximum(1, FLAG_scavenger_tasks);
257 ThreadBarrier* barrier =
new ThreadBarrier(num_tasks, 1);
259 for (intptr_t
i = 0;
i < num_tasks;
i++) {
260 if (
i < (num_tasks - 1)) {
263 barrier, isolate_group, old_space, &
state);
267 PrologueTask task(barrier, isolate_group, old_space, &
state);
268 task.RunEnteredIsolateGroup();
274 for (intptr_t
i = PageSpace::kDataFreelist, n = old_space->num_freelists_;
276 FreeList* freelist = &old_space->freelists_[
i];
277 ASSERT(freelist->top_ == freelist->end_);
278 freelist->free_map_.Reset();
279 for (intptr_t j = 0; j < FreeList::kNumLists; j++) {
280 freelist->free_map_.Set(j, freelist->free_lists_[j] !=
nullptr);
288void GCIncrementalCompactor::CheckFreeLists(PageSpace* old_space) {
290 for (intptr_t
i = 0, n = old_space->num_freelists_;
i < n;
i++) {
291 FreeList* freelist = &old_space->freelists_[
i];
292 if (freelist->top_ < freelist->end_) {
296 for (intptr_t j = 0; j <= FreeList::kNumLists; j++) {
297 FreeListElement* current = freelist->free_lists_[j];
298 while (current !=
nullptr) {
301 current = current->next();
309 uword* __restrict dst_cursor =
reinterpret_cast<uword*
>(
dst);
310 const uword* __restrict src_cursor =
reinterpret_cast<const uword*
>(
src);
320bool GCIncrementalCompactor::HasEvacuationCandidates(PageSpace* old_space) {
321 for (Page*
page = old_space->pages_;
page !=
nullptr;
page =
page->next()) {
322 if (
page->is_evacuation_candidate())
return true;
327void GCIncrementalCompactor::CheckPreEvacuate(PageSpace* old_space) {
328 if (!FLAG_verify_before_gc)
return;
335 for (Page*
page = old_space->pages_;
page !=
nullptr;
page =
page->next()) {
336 if (
page->is_evacuation_candidate()) {
340 while (current < end) {
342 intptr_t
size = obj->untag()->HeapSize();
343 ASSERT(obj->untag()->IsEvacuationCandidate() ||
352 for (Page*
page = old_space->pages_;
page !=
nullptr;
page =
page->next()) {
353 if (!
page->is_evacuation_candidate()) {
357 while (current < end) {
359 intptr_t
size = obj->untag()->HeapSize();
360 ASSERT(!obj->untag()->IsEvacuationCandidate());
385 bool has_new_target =
false;
386 for (
ObjectPtr* ptr = first; ptr <= last; ptr++) {
388 if (
target->IsImmediateObject())
continue;
389 if (
target->IsNewObject()) {
390 has_new_target =
true;
394 if (
target->IsForwardingCorpse()) {
399 *ptr = forwarder->
target();
405 return has_new_target;
408#if defined(DART_COMPRESSED_POINTERS)
417 bool has_new_target =
false;
419 ObjectPtr
target = ptr->Decompress(heap_base);
420 if (
target->IsImmediateObject())
continue;
421 if (
target->IsNewObject()) {
422 has_new_target =
true;
426 if (
target->IsForwardingCorpse()) {
430 ForwardingCorpse* forwarder =
reinterpret_cast<ForwardingCorpse*
>(
addr);
431 *ptr = forwarder->target();
437 return has_new_target;
445 if (
target->IsHeapObject() &&
target->IsForwardingCorpse()) {
459 const bool backing_moved = old_backing != new_backing;
461 typed_data_views_.
Add(view);
466 if ((suspend_state->untag()->pc() != 0) && !can_visit_stack_frames_) {
473 suspend_states_.
Add(suspend_state);
482 auto raw_view = typed_data_views_[
i];
484 raw_view->untag()->typed_data()->GetClassIdMayBeSmi();
489 raw_view->untag()->RecomputeDataFieldForInternalTypedData();
497 can_visit_stack_frames_ =
true;
500 auto suspend_state = suspend_states_[
i];
501 suspend_state->untag()->VisitPointers(
this);
506 bool can_visit_stack_frames_ =
false;
520 for (
ObjectPtr* ptr = first; ptr <= last; ptr++) {
522 ASSERT(!obj->IsImmediateOrNewObject());
529 obj = forwarder->
target();
540#if defined(DART_COMPRESSED_POINTERS)
549 IncrementalForwardingVisitor* visitor_;
560 : evac_page_(evac_page),
563 pages_lock_(pages_lock) {}
569 while (evac_page_ !=
nullptr) {
570 Page* current = evac_page_;
571 evac_page_ = current->
next();
582 if (block_ !=
nullptr) {
584 block_ = current->
next();
594 if (new_page_ !=
nullptr) {
595 Page* current = new_page_;
596 new_page_ = current->
next();
609 return reset_progress_bars_slice_.
exchange(
false);
622 RelaxedAtomic<bool> weak_handles_slice_ = {
true};
623 RelaxedAtomic<bool> weak_tables_slice_ = {
true};
624 RelaxedAtomic<bool> id_ring_slice_ = {
true};
625 RelaxedAtomic<bool> roots_slice_ = {
true};
626 RelaxedAtomic<bool> reset_progress_bars_slice_ = {
true};
627 RelaxedAtomic<intptr_t> new_free_size_ = {0};
638 isolate_group_(isolate_group),
639 old_space_(old_space),
685 if (ring !=
nullptr) {
729 bool any_failed =
false;
730 intptr_t bytes_evacuated = 0;
735 bool page_failed =
false;
739 while (current <
end) {
751 bytes_evacuated +=
size;
752 objcpy(
reinterpret_cast<void*
>(copied),
753 reinterpret_cast<const void*
>(current),
size);
758 static_cast<TypedDataPtr
>(copied_obj)
760 ->RecomputeDataField();
772 page->set_evacuation_candidate(
false);
778#if defined(SUPPORT_TIMELINE)
779 tbes.SetNumArguments(1);
780 tbes.FormatArgument(0,
"bytes_evacuated",
"%" Pd, bytes_evacuated);
806 for (
Page*
page = old_space_->large_pages_;
page !=
nullptr;
808 page->VisitRememberedCards(visitor,
true);
832 while (current <
end) {
842 uword free_end = current + obj_size;
843 while (free_end <
end) {
853 obj_size = free_end - current;
855 memset(
reinterpret_cast<void*
>(current),
Heap::kZapByte, obj_size);
873void GCIncrementalCompactor::Evacuate(PageSpace* old_space) {
875 isolate_group->ReleaseStoreBuffers();
877 old_space->pages_, isolate_group->store_buffer()->PopAll(),
878 old_space->heap_->new_space()->head(), &old_space->pages_lock_);
882 const intptr_t num_tasks =
Utils::Maximum(1, FLAG_scavenger_tasks);
884 ThreadBarrier* barrier =
new ThreadBarrier(num_tasks, num_tasks);
885 for (intptr_t
i = 0;
i < num_tasks;
i++) {
887 FreeList* freelist = old_space->DataFreeList(
i);
888 if (
i < (num_tasks - 1)) {
890 barrier, isolate_group, old_space, freelist, &
state);
894 EpilogueTask task(barrier, isolate_group, old_space, freelist, &
state);
895 task.RunEnteredIsolateGroup();
901 old_space->heap_->new_space()->set_freed_in_words(
state.NewFreeSize() >>
905void GCIncrementalCompactor::CheckPostEvacuate(PageSpace* old_space) {
906 if (!FLAG_verify_after_gc)
return;
911 for (Page*
page = old_space->pages_;
page !=
nullptr;
page =
page->next()) {
915 while (current < end) {
917 intptr_t
size = obj->untag()->HeapSize();
918 ASSERT(!obj->untag()->IsEvacuationCandidate() ||
919 !obj->untag()->IsMarked());
925void GCIncrementalCompactor::FreeEvacuatedPages(PageSpace* old_space) {
926 Page* prev_page =
nullptr;
927 Page*
page = old_space->pages_;
928 while (
page !=
nullptr) {
929 Page* next_page =
page->next();
930 if (
page->is_evacuation_candidate()) {
931 old_space->FreePage(
page, prev_page);
947 if (obj->IsNewObject()) {
961 for (
ObjectPtr* ptr = from; ptr <= to; ptr++) {
963 if (!obj->IsHeapObject())
continue;
967 static_cast<uword>(current_),
reinterpret_cast<uword>(ptr),
968 static_cast<uword>(obj));
974#if defined(DART_COMPRESSED_POINTERS)
980 if (!obj->IsHeapObject())
continue;
984 static_cast<uword>(current_),
reinterpret_cast<uword>(ptr),
985 static_cast<uword>(obj));
996 bool failed_ =
false;
1001void GCIncrementalCompactor::VerifyAfterIncrementalCompaction(
1002 PageSpace* old_space) {
1003 if (!FLAG_verify_after_gc)
return;
1005 "VerifyAfterIncrementalCompaction");
1006 VerifyAfterIncrementalCompactionVisitor visitor;
1007 old_space->heap_->VisitObjects(&visitor);
1008 if (visitor.failed()) {
1009 FATAL(
"verify after incremental compact");
static float next(float f)
#define RELEASE_ASSERT(cond)
static ThreadPool * thread_pool()
bool TakeResetProgressBars()
bool NextEvacPage(Page **page)
bool NextBlock(StoreBufferBlock **block)
bool NextNewPage(Page **page)
void AddNewFreeSize(intptr_t size)
EpilogueState(Page *evac_page, StoreBufferBlock *block, Page *new_page, Mutex *pages_lock)
void ForwardStoreBuffer(IncrementalForwardingVisitor *visitor)
void RunEnteredIsolateGroup()
EpilogueTask(ThreadBarrier *barrier, IsolateGroup *isolate_group, PageSpace *old_space, FreeList *freelist, EpilogueState *state)
void ForwardRememberedCards(IncrementalForwardingVisitor *visitor)
DART_NOINLINE intptr_t ForwardAndSweepNewPage(IncrementalForwardingVisitor *visitor, Page *page)
void ForwardNewSpace(IncrementalForwardingVisitor *visitor)
static ForwardingCorpse * AsForwarder(uword addr, intptr_t size)
void set_target(ObjectPtr target)
FreeListElement * next() const
void set_next(FreeListElement *next)
static FreeListElement * AsElementNew(uword addr, intptr_t size)
static void Prologue(PageSpace *old_space)
static void Abort(PageSpace *old_space)
static bool Epilogue(PageSpace *old_space)
static constexpr uint8_t kZapByte
void ForwardWeakTables(ObjectPointerVisitor *visitor)
void UpdateSuspendStates()
bool CanVisitSuspendStatePointers(SuspendStatePtr suspend_state) override
bool PredicateVisitPointers(ObjectPtr *first, ObjectPtr *last) override
void VisitTypedDataViewPointers(TypedDataViewPtr view, CompressedObjectPtr *first, CompressedObjectPtr *last) override
IncrementalForwardingVisitor(Thread *thread)
void VisitPointers(ObjectPtr *first, ObjectPtr *last) override
void VisitObject(ObjectPtr obj) override
void VisitHandle(uword addr) override
StoreBuffer * store_buffer() const
void ForEachIsolate(std::function< void(Isolate *isolate)> function, bool at_safepoint=false)
static IsolateGroup * Current()
void VisitObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
void VisitWeakPersistentHandles(HandleVisitor *visitor)
ObjectIdRing * object_id_ring() const
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
void VisitPointers(ObjectPointerVisitor *visitor)
IsolateGroup * isolate_group() const
void VisitCompressedPointers(uword heap_base, CompressedObjectPtr *first, CompressedObjectPtr *last)
bool IsFreeListElement() const
ObjectPtr Decompress(uword heap_base) const
UntaggedObject * untag() const
bool IsForwardingCorpse() const
intptr_t GetClassId() const
DART_FORCE_INLINE uword TryAllocatePromoLocked(FreeList *freelist, intptr_t size)
void AcquireLock(FreeList *freelist)
void ReleaseLock(FreeList *freelist)
void ResumeConcurrentMarking()
void PauseConcurrentMarking()
void ResetProgressBars() const
void VisitRoots(ObjectPointerVisitor *visitor)
Monitor * tasks_lock() const
static Page * Of(ObjectPtr obj)
bool is_evacuation_candidate() const
PointerBlock< Size > * next() const
void set_next(PointerBlock< Size > *next)
void VisitObjectPointers(ObjectPointerVisitor *visitor)
bool PredicateVisitCompressedPointers(uword heap_base, CompressedObjectPtr *first, CompressedObjectPtr *last)
void MarkEvacuationCandidates()
void RunEnteredIsolateGroup()
PrologueTask(ThreadBarrier *barrier, IsolateGroup *isolate_group, PageSpace *old_space, PrologueState *state)
T fetch_add(T arg, std::memory_order order=std::memory_order_relaxed)
T exchange(T arg, std::memory_order order=std::memory_order_relaxed)
RelaxedAtomic< intptr_t > used_in_words
void VisitPointers(ObjectPtr *first, ObjectPtr *last) override
StoreBufferForwardingVisitor(IsolateGroup *isolate_group, IncrementalForwardingVisitor *visitor)
void PushBlock(Block *block, ThresholdPolicy policy)
bool Run(Args &&... args)
@ kIncrementalCompactorTask
static Thread * Current()
static void ExitIsolateGroupAsHelper(bool bypass_safepoint)
static bool EnterIsolateGroupAsHelper(IsolateGroup *isolate_group, TaskKind kind, bool bypass_safepoint)
static ObjectPtr FromAddr(uword addr)
static bool IsMarked(uword tags)
static uword ToAddr(const UntaggedObject *raw_obj)
intptr_t HeapSize() const
void SetIsEvacuationCandidateUnsynchronized()
void ClearIsEvacuationCandidateUnsynchronized()
void ClearMarkBitUnsynchronized()
intptr_t VisitPointers(ObjectPointerVisitor *visitor)
intptr_t GetClassId() const
static bool IsEvacuationCandidate(uword tags)
static constexpr T Maximum(T x, T y)
void VisitPointers(ObjectPtr *from, ObjectPtr *to) override
VerifyAfterIncrementalCompactionVisitor()
void VisitObject(ObjectPtr obj) override
#define MSAN_UNPOISON(ptr, len)
bool IsTypedDataClassId(intptr_t index)
static constexpr intptr_t kPageSize
constexpr intptr_t kWordSizeLog2
bool IsAllocatableInNewSpace(intptr_t size)
raw_obj untag() -> num_entries()) VARIABLE_COMPRESSED_VISITOR(Array, Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(TypedData, TypedData::ElementSizeInBytes(raw_obj->GetClassId()) *Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(Record, RecordShape(raw_obj->untag() ->shape()).num_fields()) VARIABLE_NULL_VISITOR(CompressedStackMaps, CompressedStackMaps::PayloadSizeOf(raw_obj)) VARIABLE_NULL_VISITOR(OneByteString, Smi::Value(raw_obj->untag() ->length())) VARIABLE_NULL_VISITOR(TwoByteString, Smi::Value(raw_obj->untag() ->length())) intptr_t UntaggedField::VisitFieldPointers(FieldPtr raw_obj, ObjectPointerVisitor *visitor)
static void objcpy(void *dst, const void *src, size_t size)
bool IsExternalTypedDataClassId(intptr_t index)
ObjectPtr CompressedObjectPtr
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
RelaxedAtomic< intptr_t > freelist_cursor
MallocGrowableArray< LiveBytes > pages
RelaxedAtomic< intptr_t > page_cursor
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)