Flutter Engine
The Flutter Engine
incremental_compactor.cc
Go to the documentation of this file.
1// Copyright (c) 2024, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
6
7#include "platform/assert.h"
8#include "vm/dart_api_state.h"
9#include "vm/globals.h"
10#include "vm/heap/become.h"
11#include "vm/heap/freelist.h"
12#include "vm/heap/heap.h"
13#include "vm/heap/pages.h"
14#include "vm/log.h"
15#include "vm/thread_barrier.h"
16#include "vm/timeline.h"
17#include "vm/visitor.h"
18
19namespace dart {
20
22 ASSERT(Thread::Current()->OwnsGCSafepoint());
23 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "StartIncrementalCompact");
24 if (!SelectEvacuationCandidates(old_space)) {
25 return;
26 }
27 CheckFreeLists(old_space);
28}
29
31 ASSERT(Thread::Current()->OwnsGCSafepoint());
32 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "FinishIncrementalCompact");
33 if (!HasEvacuationCandidates(old_space)) {
34 return false;
35 }
36 old_space->MakeIterable();
37 CheckFreeLists(old_space);
38 CheckPreEvacuate(old_space);
39 Evacuate(old_space);
40 CheckPostEvacuate(old_space);
41 CheckFreeLists(old_space);
42 FreeEvacuatedPages(old_space);
43 VerifyAfterIncrementalCompaction(old_space);
44 return true;
45}
46
48 ASSERT(Thread::Current()->OwnsGCSafepoint());
49 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "AbortIncrementalCompact");
50
51 {
52 MonitorLocker ml(old_space->tasks_lock());
53 switch (old_space->phase()) {
55 return; // No incremental compact in progress.
58 // No incremental compact in progress, the page list is incomplete, and
59 // accessing page->next is a data race.
60 return;
63 break; // Incremental compact may be in progress.
64 default:
66 }
67 }
68
69 old_space->PauseConcurrentMarking();
70
71 for (Page* page = old_space->pages_; page != nullptr; page = page->next()) {
72 if (!page->is_evacuation_candidate()) continue;
73
74 page->set_evacuation_candidate(false);
75
76 uword start = page->object_start();
77 uword end = page->object_end();
78 uword current = start;
79 while (current < end) {
82 current += obj->untag()->HeapSize();
83 }
84 }
85
86 old_space->ResumeConcurrentMarking();
87}
88
89struct LiveBytes {
91 intptr_t live_bytes;
92};
93
97 intptr_t page_limit;
100};
101
103 public:
105 IsolateGroup* isolate_group,
106 PageSpace* old_space,
108 : barrier_(barrier),
109 isolate_group_(isolate_group),
110 old_space_(old_space),
111 state_(state) {}
112
113 void Run() {
114 if (!barrier_->TryEnter()) {
115 barrier_->Release();
116 return;
117 }
118
120 isolate_group_, Thread::kIncrementalCompactorTask,
121 /*bypass_safepoint=*/true);
122 ASSERT(result);
123
125
126 Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true);
127
128 barrier_->Sync();
129 barrier_->Release();
130 }
131
135 }
136
139 "MarkEvacuationCandidates");
140 for (;;) {
141 intptr_t page_index = state_->page_cursor.fetch_add(1);
142 if (page_index >= state_->page_limit) break;
143 Page* page = state_->pages[page_index].page;
144
145 // Already set, otherwise a barrier would be needed before moving onto
146 // freelists.
147 ASSERT(page->is_evacuation_candidate());
148
149 uword start = page->object_start();
150 uword end = page->object_end();
151 uword current = start;
152 while (current < end) {
153 ObjectPtr obj = UntaggedObject::FromAddr(current);
154 intptr_t cid = obj->untag()->GetClassId();
157 }
158 current += obj->untag()->HeapSize();
159 }
160 }
161 }
162
165 for (;;) {
166 intptr_t chunk = state_->freelist_cursor.fetch_add(1);
167 if (chunk >= state_->freelist_limit) break;
168 intptr_t list_index = chunk / (FreeList::kNumLists + 1);
169 intptr_t size_class_index = chunk % (FreeList::kNumLists + 1);
170 FreeList* freelist = &old_space_->freelists_[list_index];
171
172 // Empty bump-region, no need to prune this.
173 ASSERT(freelist->top_ == freelist->end_);
174
175 FreeListElement* current = freelist->free_lists_[size_class_index];
176 freelist->free_lists_[size_class_index] = nullptr;
177 while (current != nullptr) {
178 FreeListElement* next = current->next();
179 if (!Page::Of(current)->is_evacuation_candidate()) {
180 current->set_next(freelist->free_lists_[size_class_index]);
181 freelist->free_lists_[size_class_index] = current;
182 }
183 current = next;
184 }
185 }
186 }
187
188 private:
189 ThreadBarrier* barrier_;
190 IsolateGroup* isolate_group_;
191 PageSpace* old_space_;
192 PrologueState* state_;
193
194 DISALLOW_COPY_AND_ASSIGN(PrologueTask);
195};
196
197bool GCIncrementalCompactor::SelectEvacuationCandidates(PageSpace* old_space) {
198 // Only evacuate pages that are at least half empty.
199 constexpr intptr_t kEvacuationThreshold = kPageSize / 2;
200
201 // Evacuate no more than this amount of objects. This puts a bound on the
202 // stop-the-world evacuate step that is similar to the existing longest
203 // stop-the-world step of the scavenger.
204 const intptr_t kMaxEvacuatedBytes =
205 (old_space->heap_->new_space()->ThresholdInWords() << kWordSizeLog2) / 4;
206
207 PrologueState state;
208 {
210 "SelectEvacuationCandidates");
211 for (Page* page = old_space->pages_; page != nullptr; page = page->next()) {
212 if (page->is_never_evacuate()) continue;
213
214 intptr_t live_bytes = page->live_bytes();
215 if (live_bytes > kEvacuationThreshold) continue;
216
217 state.pages.Add({page, live_bytes});
218 }
219 state.pages.Sort([](const LiveBytes* a, const LiveBytes* b) -> int {
220 if (a->live_bytes < b->live_bytes) return -1;
221 if (a->live_bytes > b->live_bytes) return 1;
222 return 0;
223 });
224
225 intptr_t num_candidates = 0;
226 intptr_t cumulative_live_bytes = 0;
227 for (intptr_t i = 0; i < state.pages.length(); i++) {
228 intptr_t live_bytes = state.pages[i].live_bytes;
229 if (cumulative_live_bytes + live_bytes <= kMaxEvacuatedBytes) {
230 num_candidates++;
231 cumulative_live_bytes += live_bytes;
232 state.pages[i].page->set_evacuation_candidate(true);
233 }
234 }
235
236#if defined(SUPPORT_TIMELINE)
237 tbes.SetNumArguments(2);
238 tbes.FormatArgument(0, "cumulative_live_bytes", "%" Pd,
239 cumulative_live_bytes);
240 tbes.FormatArgument(1, "num_candidates", "%" Pd, num_candidates);
241#endif
242
243 state.page_cursor = 0;
244 state.page_limit = num_candidates;
245 state.freelist_cursor =
246 PageSpace::kDataFreelist * (FreeList::kNumLists + 1);
247 state.freelist_limit =
248 old_space->num_freelists_ * (FreeList::kNumLists + 1);
249
250 if (num_candidates == 0) return false;
251 }
252
253 old_space->ReleaseBumpAllocation();
254
255 const intptr_t num_tasks = Utils::Maximum(1, FLAG_scavenger_tasks);
256 RELEASE_ASSERT(num_tasks > 0);
257 ThreadBarrier* barrier = new ThreadBarrier(num_tasks, 1);
258 IsolateGroup* isolate_group = IsolateGroup::Current();
259 for (intptr_t i = 0; i < num_tasks; i++) {
260 if (i < (num_tasks - 1)) {
261 // Begin compacting on a helper thread.
262 bool result = Dart::thread_pool()->Run<PrologueTask>(
263 barrier, isolate_group, old_space, &state);
264 ASSERT(result);
265 } else {
266 // Last worker is the main thread.
267 PrologueTask task(barrier, isolate_group, old_space, &state);
268 task.RunEnteredIsolateGroup();
269 barrier->Sync();
270 barrier->Release();
271 }
272 }
273
274 for (intptr_t i = PageSpace::kDataFreelist, n = old_space->num_freelists_;
275 i < n; i++) {
276 FreeList* freelist = &old_space->freelists_[i];
277 ASSERT(freelist->top_ == freelist->end_);
278 freelist->free_map_.Reset();
279 for (intptr_t j = 0; j < FreeList::kNumLists; j++) {
280 freelist->free_map_.Set(j, freelist->free_lists_[j] != nullptr);
281 }
282 }
283
284 return true;
285}
286
287// Free lists should not contain any evacuation candidates.
288void GCIncrementalCompactor::CheckFreeLists(PageSpace* old_space) {
289#if defined(DEBUG)
290 for (intptr_t i = 0, n = old_space->num_freelists_; i < n; i++) {
291 FreeList* freelist = &old_space->freelists_[i];
292 if (freelist->top_ < freelist->end_) {
293 Page* page = Page::Of(freelist->top_);
294 ASSERT(!page->is_evacuation_candidate());
295 }
296 for (intptr_t j = 0; j <= FreeList::kNumLists; j++) {
297 FreeListElement* current = freelist->free_lists_[j];
298 while (current != nullptr) {
299 Page* page = Page::Of(reinterpret_cast<uword>(current));
300 ASSERT(!page->is_evacuation_candidate());
301 current = current->next();
302 }
303 }
304 }
305#endif
306}
307
308static void objcpy(void* dst, const void* src, size_t size) {
309 uword* __restrict dst_cursor = reinterpret_cast<uword*>(dst);
310 const uword* __restrict src_cursor = reinterpret_cast<const uword*>(src);
311 do {
312 uword a = *src_cursor++;
313 uword b = *src_cursor++;
314 *dst_cursor++ = a;
315 *dst_cursor++ = b;
316 size -= (2 * sizeof(uword));
317 } while (size > 0);
318}
319
320bool GCIncrementalCompactor::HasEvacuationCandidates(PageSpace* old_space) {
321 for (Page* page = old_space->pages_; page != nullptr; page = page->next()) {
322 if (page->is_evacuation_candidate()) return true;
323 }
324 return false;
325}
326
327void GCIncrementalCompactor::CheckPreEvacuate(PageSpace* old_space) {
328 if (!FLAG_verify_before_gc) return;
329
330 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "CheckPreEvacuate");
331
332 // Check evacuation candidate pages have evacuation candidate objects or free
333 // space. I.e., we didn't allocate into it after selecting it as an evacuation
334 // candidate.
335 for (Page* page = old_space->pages_; page != nullptr; page = page->next()) {
336 if (page->is_evacuation_candidate()) {
337 uword start = page->object_start();
338 uword end = page->object_end();
339 uword current = start;
340 while (current < end) {
341 ObjectPtr obj = UntaggedObject::FromAddr(current);
342 intptr_t size = obj->untag()->HeapSize();
343 ASSERT(obj->untag()->IsEvacuationCandidate() ||
344 obj->untag()->GetClassId() == kFreeListElement ||
345 obj->untag()->GetClassId() == kForwardingCorpse);
346 current += size;
347 }
348 }
349 }
350
351 // Check non-evac pages don't have evac candidates.
352 for (Page* page = old_space->pages_; page != nullptr; page = page->next()) {
353 if (!page->is_evacuation_candidate()) {
354 uword start = page->object_start();
355 uword end = page->object_end();
356 uword current = start;
357 while (current < end) {
358 ObjectPtr obj = UntaggedObject::FromAddr(current);
359 intptr_t size = obj->untag()->HeapSize();
360 ASSERT(!obj->untag()->IsEvacuationCandidate());
361 current += size;
362 }
363 }
364 }
365}
366
369 public ObjectVisitor,
370 public HandleVisitor {
371 public:
374
375 void VisitObject(ObjectPtr obj) override {
376 if (obj->untag()->IsMarked()) {
377 obj->untag()->VisitPointers(this);
378 }
379 }
380
381 void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
382 PredicateVisitPointers(first, last);
383 }
384 bool PredicateVisitPointers(ObjectPtr* first, ObjectPtr* last) override {
385 bool has_new_target = false;
386 for (ObjectPtr* ptr = first; ptr <= last; ptr++) {
387 ObjectPtr target = *ptr;
388 if (target->IsImmediateObject()) continue;
389 if (target->IsNewObject()) {
390 has_new_target = true;
391 continue;
392 }
393
394 if (target->IsForwardingCorpse()) {
395 ASSERT(!target->untag()->IsMarked());
396 ASSERT(!target->untag()->IsEvacuationCandidate());
398 ForwardingCorpse* forwarder = reinterpret_cast<ForwardingCorpse*>(addr);
399 *ptr = forwarder->target();
400 } else {
401 ASSERT(target->untag()->IsMarked());
402 ASSERT(!target->untag()->IsEvacuationCandidate());
403 }
404 }
405 return has_new_target;
406 }
407
408#if defined(DART_COMPRESSED_POINTERS)
409 void VisitCompressedPointers(uword heap_base,
410 CompressedObjectPtr* first,
411 CompressedObjectPtr* last) override {
412 PredicateVisitCompressedPointers(heap_base, first, last);
413 }
415 CompressedObjectPtr* first,
416 CompressedObjectPtr* last) override {
417 bool has_new_target = false;
418 for (CompressedObjectPtr* ptr = first; ptr <= last; ptr++) {
419 ObjectPtr target = ptr->Decompress(heap_base);
420 if (target->IsImmediateObject()) continue;
421 if (target->IsNewObject()) {
422 has_new_target = true;
423 continue;
424 }
425
426 if (target->IsForwardingCorpse()) {
427 ASSERT(!target->untag()->IsMarked());
428 ASSERT(!target->untag()->IsEvacuationCandidate());
430 ForwardingCorpse* forwarder = reinterpret_cast<ForwardingCorpse*>(addr);
431 *ptr = forwarder->target();
432 } else {
433 ASSERT(target->untag()->IsMarked());
434 ASSERT(!target->untag()->IsEvacuationCandidate());
435 }
436 }
437 return has_new_target;
438 }
439#endif
440
441 void VisitHandle(uword addr) override {
443 reinterpret_cast<FinalizablePersistentHandle*>(addr);
444 ObjectPtr target = handle->ptr();
445 if (target->IsHeapObject() && target->IsForwardingCorpse()) {
447 ForwardingCorpse* forwarder = reinterpret_cast<ForwardingCorpse*>(addr);
448 *handle->ptr_addr() = forwarder->target();
449 }
450 }
451
452 void VisitTypedDataViewPointers(TypedDataViewPtr view,
453 CompressedObjectPtr* first,
454 CompressedObjectPtr* last) override {
455 ObjectPtr old_backing = view->untag()->typed_data();
456 VisitCompressedPointers(view->heap_base(), first, last);
457 ObjectPtr new_backing = view->untag()->typed_data();
458
459 const bool backing_moved = old_backing != new_backing;
460 if (backing_moved) {
461 typed_data_views_.Add(view);
462 }
463 }
464
465 bool CanVisitSuspendStatePointers(SuspendStatePtr suspend_state) override {
466 if ((suspend_state->untag()->pc() != 0) && !can_visit_stack_frames_) {
467 // Visiting pointers of SuspendState objects with copied stack frame
468 // needs to query stack map, which can touch other Dart objects
469 // (such as GrowableObjectArray of InstructionsTable).
470 // Those objects may have an inconsistent state during compaction,
471 // so processing of SuspendState objects is postponed to the later
472 // stage of compaction.
473 suspend_states_.Add(suspend_state);
474 return false;
475 }
476 return true;
477 }
478
479 void UpdateViews() {
480 const intptr_t length = typed_data_views_.length();
481 for (intptr_t i = 0; i < length; ++i) {
482 auto raw_view = typed_data_views_[i];
483 const classid_t cid =
484 raw_view->untag()->typed_data()->GetClassIdMayBeSmi();
485 // If we have external typed data we can simply return, since the backing
486 // store lives in C-heap and will not move. Otherwise we have to update
487 // the inner pointer.
488 if (IsTypedDataClassId(cid)) {
489 raw_view->untag()->RecomputeDataFieldForInternalTypedData();
490 } else {
492 }
493 }
494 }
495
497 can_visit_stack_frames_ = true;
498 const intptr_t length = suspend_states_.length();
499 for (intptr_t i = 0; i < length; ++i) {
500 auto suspend_state = suspend_states_[i];
501 suspend_state->untag()->VisitPointers(this);
502 }
503 }
504
505 private:
506 bool can_visit_stack_frames_ = false;
509
510 DISALLOW_COPY_AND_ASSIGN(IncrementalForwardingVisitor);
511};
512
514 public:
517 : ObjectPointerVisitor(isolate_group), visitor_(visitor) {}
518
519 void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
520 for (ObjectPtr* ptr = first; ptr <= last; ptr++) {
521 ObjectPtr obj = *ptr;
522 ASSERT(!obj->IsImmediateOrNewObject());
523
524 if (obj->IsForwardingCorpse()) {
525 ASSERT(!obj->untag()->IsMarked());
528 ForwardingCorpse* forwarder = reinterpret_cast<ForwardingCorpse*>(addr);
529 obj = forwarder->target();
530 *ptr = obj;
531 } else {
532 ASSERT(obj->untag()->IsMarked());
534 }
535
536 visitor_->VisitObject(obj);
537 }
538 }
539
540#if defined(DART_COMPRESSED_POINTERS)
541 void VisitCompressedPointers(uword heap_base,
542 CompressedObjectPtr* first,
543 CompressedObjectPtr* last) override {
544 UNREACHABLE(); // Store buffer blocks are not compressed.
545 }
546#endif
547
548 private:
549 IncrementalForwardingVisitor* visitor_;
550
551 DISALLOW_COPY_AND_ASSIGN(StoreBufferForwardingVisitor);
552};
553
555 public:
556 EpilogueState(Page* evac_page,
557 StoreBufferBlock* block,
558 Page* new_page,
559 Mutex* pages_lock)
560 : evac_page_(evac_page),
561 block_(block),
562 new_page_(new_page),
563 pages_lock_(pages_lock) {}
564
566 // Needs to be the old_space pages lock since evacuation may also allocate
567 // new pages and race with page->next_.
568 MutexLocker ml(pages_lock_);
569 while (evac_page_ != nullptr) {
570 Page* current = evac_page_;
571 evac_page_ = current->next();
572 if (current->is_evacuation_candidate()) {
573 *page = current;
574 return true;
575 }
576 }
577 return false;
578 }
579
581 MutexLocker ml(pages_lock_);
582 if (block_ != nullptr) {
583 StoreBufferBlock* current = block_;
584 block_ = current->next();
585 current->set_next(nullptr);
586 *block = current;
587 return true;
588 }
589 return false;
590 }
591
593 MutexLocker ml(pages_lock_);
594 if (new_page_ != nullptr) {
595 Page* current = new_page_;
596 new_page_ = current->next();
597 *page = current;
598 return true;
599 }
600 return false;
601 }
602
603 bool TakeOOM() { return oom_slice_.exchange(false); }
604 bool TakeWeakHandles() { return weak_handles_slice_.exchange(false); }
605 bool TakeWeakTables() { return weak_tables_slice_.exchange(false); }
606 bool TakeIdRing() { return id_ring_slice_.exchange(false); }
607 bool TakeRoots() { return roots_slice_.exchange(false); }
609 return reset_progress_bars_slice_.exchange(false);
610 }
611
612 void AddNewFreeSize(intptr_t size) { new_free_size_ += size; }
613 intptr_t NewFreeSize() { return new_free_size_; }
614
615 private:
616 Page* evac_page_;
617 StoreBufferBlock* block_;
618 Page* new_page_;
619 Mutex* pages_lock_;
620
621 RelaxedAtomic<bool> oom_slice_ = {true};
622 RelaxedAtomic<bool> weak_handles_slice_ = {true};
623 RelaxedAtomic<bool> weak_tables_slice_ = {true};
624 RelaxedAtomic<bool> id_ring_slice_ = {true};
625 RelaxedAtomic<bool> roots_slice_ = {true};
626 RelaxedAtomic<bool> reset_progress_bars_slice_ = {true};
627 RelaxedAtomic<intptr_t> new_free_size_ = {0};
628};
629
631 public:
633 IsolateGroup* isolate_group,
634 PageSpace* old_space,
635 FreeList* freelist,
637 : barrier_(barrier),
638 isolate_group_(isolate_group),
639 old_space_(old_space),
640 freelist_(freelist),
641 state_(state) {}
642
643 void Run() {
645 isolate_group_, Thread::kIncrementalCompactorTask,
646 /*bypass_safepoint=*/true);
647 ASSERT(result);
648
650
651 Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true);
652
653 barrier_->Sync();
654 barrier_->Release();
655 }
656
658 Thread* thread = Thread::Current();
659
660 Evacuate();
661
662 barrier_->Sync();
663
664 IncrementalForwardingVisitor visitor(thread);
665 if (state_->TakeOOM()) {
666 old_space_->VisitRoots(&visitor); // OOM reservation.
667 }
668 ForwardStoreBuffer(&visitor);
669 ForwardRememberedCards(&visitor);
670 ForwardNewSpace(&visitor);
671 if (state_->TakeWeakHandles()) {
672 TIMELINE_FUNCTION_GC_DURATION(thread, "WeakPersistentHandles");
673 isolate_group_->VisitWeakPersistentHandles(&visitor);
674 }
675 if (state_->TakeWeakTables()) {
676 TIMELINE_FUNCTION_GC_DURATION(thread, "WeakTables");
677 isolate_group_->heap()->ForwardWeakTables(&visitor);
678 }
679#ifndef PRODUCT
680 if (state_->TakeIdRing()) {
681 TIMELINE_FUNCTION_GC_DURATION(thread, "IdRing");
682 isolate_group_->ForEachIsolate(
683 [&](Isolate* isolate) {
684 ObjectIdRing* ring = isolate->object_id_ring();
685 if (ring != nullptr) {
686 ring->VisitPointers(&visitor);
687 }
688 },
689 /*at_safepoint=*/true);
690 }
691#endif // !PRODUCT
692
693 barrier_->Sync();
694
695 {
696 // After forwarding the heap because visits each view's underyling buffer.
697 TIMELINE_FUNCTION_GC_DURATION(thread, "Views");
698 visitor.UpdateViews();
699 }
700
701 if (state_->TakeRoots()) {
702 // After forwarding the heap because visiting the stack requires stackmaps
703 // to already be forwarded.
704 TIMELINE_FUNCTION_GC_DURATION(thread, "Roots");
705 isolate_group_->VisitObjectPointers(
707 }
708
709 barrier_->Sync();
710
711 {
712 // After processing the object store because of the dependency on
713 // canonicalized_stack_map_entries.
714 TIMELINE_FUNCTION_GC_DURATION(thread, "SuspendStates");
715 visitor.UpdateSuspendStates();
716 }
717
718 if (state_->TakeResetProgressBars()) {
719 // After ForwardRememberedCards.
720 old_space_->ResetProgressBars();
721 }
722 }
723
724 void Evacuate() {
726
727 old_space_->AcquireLock(freelist_);
728
729 bool any_failed = false;
730 intptr_t bytes_evacuated = 0;
731 Page* page;
732 while (state_->NextEvacPage(&page)) {
733 ASSERT(page->is_evacuation_candidate());
734
735 bool page_failed = false;
736 uword start = page->object_start();
737 uword end = page->object_end();
738 uword current = start;
739 while (current < end) {
740 ObjectPtr obj = UntaggedObject::FromAddr(current);
741 intptr_t size = obj->untag()->HeapSize();
742
743 if (obj->untag()->IsMarked()) {
744 uword copied = old_space_->TryAllocatePromoLocked(freelist_, size);
745 if (copied == 0) {
747 page_failed = true;
748 any_failed = true;
749 } else {
750 ASSERT(!Page::Of(copied)->is_evacuation_candidate());
751 bytes_evacuated += size;
752 objcpy(reinterpret_cast<void*>(copied),
753 reinterpret_cast<const void*>(current), size);
754 ObjectPtr copied_obj = UntaggedObject::FromAddr(copied);
755
757 if (IsTypedDataClassId(copied_obj->GetClassId())) {
758 static_cast<TypedDataPtr>(copied_obj)
759 ->untag()
760 ->RecomputeDataField();
761 }
762
764 ->set_target(copied_obj);
765 }
766 }
767
768 current += size;
769 }
770
771 if (page_failed) {
772 page->set_evacuation_candidate(false);
773 }
774 }
775
776 old_space_->ReleaseLock(freelist_);
777 old_space_->usage_.used_in_words -= (bytes_evacuated >> kWordSizeLog2);
778#if defined(SUPPORT_TIMELINE)
779 tbes.SetNumArguments(1);
780 tbes.FormatArgument(0, "bytes_evacuated", "%" Pd, bytes_evacuated);
781#endif
782
783 if (any_failed) {
784 OS::PrintErr("evacuation failed\n");
785 }
786 }
787
789 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ForwardStoreBuffer");
790
791 StoreBufferForwardingVisitor store_visitor(isolate_group_, visitor);
792 StoreBuffer* store_buffer = isolate_group_->store_buffer();
793 StoreBufferBlock* block;
794 while (state_->NextBlock(&block)) {
795 // Generated code appends to store buffers; tell MemorySanitizer.
796 MSAN_UNPOISON(block, sizeof(*block));
797
798 block->VisitObjectPointers(&store_visitor);
799
800 store_buffer->PushBlock(block, StoreBuffer::kIgnoreThreshold);
801 }
802 }
803
805 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ForwardRememberedCards");
806 for (Page* page = old_space_->large_pages_; page != nullptr;
807 page = page->next()) {
808 page->VisitRememberedCards(visitor, /*only_marked*/ true);
809 }
810 }
811
814 Page* page;
815 while (state_->NextNewPage(&page)) {
816 intptr_t free = ForwardAndSweepNewPage(visitor, page);
817 state_->AddNewFreeSize(free);
818 }
819 }
820
821 DART_NOINLINE
823 Page* page) {
824 ASSERT(!page->is_image());
825 ASSERT(!page->is_old());
826 ASSERT(!page->is_executable());
827
828 uword start = page->object_start();
829 uword end = page->object_end();
830 uword current = start;
831 intptr_t free = 0;
832 while (current < end) {
833 ObjectPtr raw_obj = UntaggedObject::FromAddr(current);
834 ASSERT(Page::Of(raw_obj) == page);
835 uword tags = raw_obj->untag()->tags();
836 intptr_t obj_size = raw_obj->untag()->HeapSize(tags);
837 if (UntaggedObject::IsMarked(tags)) {
838 raw_obj->untag()->ClearMarkBitUnsynchronized();
840 raw_obj->untag()->VisitPointers(visitor);
841 } else {
842 uword free_end = current + obj_size;
843 while (free_end < end) {
844 ObjectPtr next_obj = UntaggedObject::FromAddr(free_end);
845 tags = next_obj->untag()->tags();
846 if (UntaggedObject::IsMarked(tags)) {
847 // Reached the end of the free block.
848 break;
849 }
850 // Expand the free block by the size of this object.
851 free_end += next_obj->untag()->HeapSize(tags);
852 }
853 obj_size = free_end - current;
854#if defined(DEBUG)
855 memset(reinterpret_cast<void*>(current), Heap::kZapByte, obj_size);
856#endif // DEBUG
857 FreeListElement::AsElementNew(current, obj_size);
858 free += obj_size;
859 }
860 current += obj_size;
861 }
862 return free;
863 }
864
865 private:
866 ThreadBarrier* barrier_;
867 IsolateGroup* isolate_group_;
868 PageSpace* old_space_;
869 FreeList* freelist_;
870 EpilogueState* state_;
871};
872
873void GCIncrementalCompactor::Evacuate(PageSpace* old_space) {
874 IsolateGroup* isolate_group = IsolateGroup::Current();
875 isolate_group->ReleaseStoreBuffers();
876 EpilogueState state(
877 old_space->pages_, isolate_group->store_buffer()->PopAll(),
878 old_space->heap_->new_space()->head(), &old_space->pages_lock_);
879
880 // This must use FLAG_scavenger_tasks because that determines the number of
881 // freelists available for workers.
882 const intptr_t num_tasks = Utils::Maximum(1, FLAG_scavenger_tasks);
883 RELEASE_ASSERT(num_tasks > 0);
884 ThreadBarrier* barrier = new ThreadBarrier(num_tasks, num_tasks);
885 for (intptr_t i = 0; i < num_tasks; i++) {
886 // Begin compacting on a helper thread.
887 FreeList* freelist = old_space->DataFreeList(i);
888 if (i < (num_tasks - 1)) {
889 bool result = Dart::thread_pool()->Run<EpilogueTask>(
890 barrier, isolate_group, old_space, freelist, &state);
891 ASSERT(result);
892 } else {
893 // Last worker is the main thread.
894 EpilogueTask task(barrier, isolate_group, old_space, freelist, &state);
895 task.RunEnteredIsolateGroup();
896 barrier->Sync();
897 barrier->Release();
898 }
899 }
900
901 old_space->heap_->new_space()->set_freed_in_words(state.NewFreeSize() >>
903}
904
905void GCIncrementalCompactor::CheckPostEvacuate(PageSpace* old_space) {
906 if (!FLAG_verify_after_gc) return;
907
908 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "CheckPostEvacuate");
909
910 // Check there are no remaining evac candidates
911 for (Page* page = old_space->pages_; page != nullptr; page = page->next()) {
912 uword start = page->object_start();
913 uword end = page->object_end();
914 uword current = start;
915 while (current < end) {
916 ObjectPtr obj = UntaggedObject::FromAddr(current);
917 intptr_t size = obj->untag()->HeapSize();
918 ASSERT(!obj->untag()->IsEvacuationCandidate() ||
919 !obj->untag()->IsMarked());
920 current += size;
921 }
922 }
923}
924
925void GCIncrementalCompactor::FreeEvacuatedPages(PageSpace* old_space) {
926 Page* prev_page = nullptr;
927 Page* page = old_space->pages_;
928 while (page != nullptr) {
929 Page* next_page = page->next();
930 if (page->is_evacuation_candidate()) {
931 old_space->FreePage(page, prev_page);
932 } else {
933 prev_page = page;
934 }
935 page = next_page;
936 }
937}
938
940 public ObjectPointerVisitor {
941 public:
944
945 void VisitObject(ObjectPtr obj) override {
946 // New-space has been swept, but old-space has not.
947 if (obj->IsNewObject()) {
948 if (obj->untag()->GetClassId() != kFreeListElement) {
949 current_ = obj;
950 obj->untag()->VisitPointers(this);
951 }
952 } else {
953 if (obj->untag()->IsMarked()) {
954 current_ = obj;
955 obj->untag()->VisitPointers(this);
956 }
957 }
958 }
959
960 void VisitPointers(ObjectPtr* from, ObjectPtr* to) override {
961 for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
962 ObjectPtr obj = *ptr;
963 if (!obj->IsHeapObject()) continue;
964 if (obj->IsForwardingCorpse() || obj->IsFreeListElement() ||
965 (obj->IsOldObject() && !obj->untag()->IsMarked())) {
966 OS::PrintErr("object=0x%" Px ", slot=0x%" Px ", value=0x%" Px "\n",
967 static_cast<uword>(current_), reinterpret_cast<uword>(ptr),
968 static_cast<uword>(obj));
969 failed_ = true;
970 }
971 }
972 }
973
974#if defined(DART_COMPRESSED_POINTERS)
975 void VisitCompressedPointers(uword heap_base,
977 CompressedObjectPtr* to) override {
978 for (CompressedObjectPtr* ptr = from; ptr <= to; ptr++) {
979 ObjectPtr obj = ptr->Decompress(heap_base);
980 if (!obj->IsHeapObject()) continue;
981 if (obj->IsForwardingCorpse() || obj->IsFreeListElement() ||
982 (obj->IsOldObject() && !obj->untag()->IsMarked())) {
983 OS::PrintErr("object=0x%" Px ", slot=0x%" Px ", value=0x%" Px "\n",
984 static_cast<uword>(current_), reinterpret_cast<uword>(ptr),
985 static_cast<uword>(obj));
986 failed_ = true;
987 }
988 }
989 }
990#endif
991
992 bool failed() const { return failed_; }
993
994 private:
995 ObjectPtr current_;
996 bool failed_ = false;
997
998 DISALLOW_COPY_AND_ASSIGN(VerifyAfterIncrementalCompactionVisitor);
999};
1000
1001void GCIncrementalCompactor::VerifyAfterIncrementalCompaction(
1002 PageSpace* old_space) {
1003 if (!FLAG_verify_after_gc) return;
1005 "VerifyAfterIncrementalCompaction");
1006 VerifyAfterIncrementalCompactionVisitor visitor;
1007 old_space->heap_->VisitObjects(&visitor);
1008 if (visitor.failed()) {
1009 FATAL("verify after incremental compact");
1010 }
1011}
1012
1013} // namespace dart
static float next(float f)
#define UNREACHABLE()
Definition: assert.h:248
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
void Add(const T &value)
intptr_t length() const
static ThreadPool * thread_pool()
Definition: dart.h:73
bool NextEvacPage(Page **page)
bool NextBlock(StoreBufferBlock **block)
bool NextNewPage(Page **page)
void AddNewFreeSize(intptr_t size)
EpilogueState(Page *evac_page, StoreBufferBlock *block, Page *new_page, Mutex *pages_lock)
void ForwardStoreBuffer(IncrementalForwardingVisitor *visitor)
EpilogueTask(ThreadBarrier *barrier, IsolateGroup *isolate_group, PageSpace *old_space, FreeList *freelist, EpilogueState *state)
void ForwardRememberedCards(IncrementalForwardingVisitor *visitor)
DART_NOINLINE intptr_t ForwardAndSweepNewPage(IncrementalForwardingVisitor *visitor, Page *page)
void ForwardNewSpace(IncrementalForwardingVisitor *visitor)
static ForwardingCorpse * AsForwarder(uword addr, intptr_t size)
Definition: become.cc:20
void set_target(ObjectPtr target)
Definition: become.h:28
ObjectPtr target() const
Definition: become.h:27
FreeListElement * next() const
Definition: freelist.h:26
void set_next(FreeListElement *next)
Definition: freelist.h:29
static FreeListElement * AsElementNew(uword addr, intptr_t size)
Definition: freelist.cc:43
static void Prologue(PageSpace *old_space)
static void Abort(PageSpace *old_space)
static bool Epilogue(PageSpace *old_space)
Thread * thread() const
static constexpr uint8_t kZapByte
Definition: heap.h:58
void ForwardWeakTables(ObjectPointerVisitor *visitor)
Definition: heap.cc:965
bool CanVisitSuspendStatePointers(SuspendStatePtr suspend_state) override
bool PredicateVisitPointers(ObjectPtr *first, ObjectPtr *last) override
void VisitTypedDataViewPointers(TypedDataViewPtr view, CompressedObjectPtr *first, CompressedObjectPtr *last) override
void VisitPointers(ObjectPtr *first, ObjectPtr *last) override
void VisitObject(ObjectPtr obj) override
StoreBuffer * store_buffer() const
Definition: isolate.h:509
void ForEachIsolate(std::function< void(Isolate *isolate)> function, bool at_safepoint=false)
Definition: isolate.cc:2841
Heap * heap() const
Definition: isolate.h:296
static IsolateGroup * Current()
Definition: isolate.h:539
void VisitObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
Definition: isolate.cc:2912
void VisitWeakPersistentHandles(HandleVisitor *visitor)
Definition: isolate.cc:2998
ObjectIdRing * object_id_ring() const
Definition: isolate.h:1250
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
void VisitPointers(ObjectPointerVisitor *visitor)
IsolateGroup * isolate_group() const
Definition: visitor.h:25
void VisitCompressedPointers(uword heap_base, CompressedObjectPtr *first, CompressedObjectPtr *last)
Definition: visitor.h:43
bool IsFreeListElement() const
ObjectPtr Decompress(uword heap_base) const
UntaggedObject * untag() const
bool IsForwardingCorpse() const
intptr_t GetClassId() const
Definition: raw_object.h:885
DART_FORCE_INLINE uword TryAllocatePromoLocked(FreeList *freelist, intptr_t size)
Definition: pages.h:151
void AcquireLock(FreeList *freelist)
Definition: pages.cc:432
@ kAwaitingFinalization
Definition: pages.h:133
@ kSweepingRegular
Definition: pages.h:135
@ kSweepingLarge
Definition: pages.h:134
void ReleaseLock(FreeList *freelist)
Definition: pages.cc:436
void ResumeConcurrentMarking()
Definition: pages.cc:452
void PauseConcurrentMarking()
Definition: pages.cc:443
void ResetProgressBars() const
Definition: pages.cc:716
void VisitRoots(ObjectPointerVisitor *visitor)
Definition: pages.cc:962
Monitor * tasks_lock() const
Definition: pages.h:314
Phase phase() const
Definition: pages.h:341
static Page * Of(ObjectPtr obj)
Definition: page.h:162
bool is_evacuation_candidate() const
Definition: page.h:83
Page * next() const
Definition: page.h:102
PointerBlock< Size > * next() const
Definition: pointer_block.h:30
void set_next(PointerBlock< Size > *next)
Definition: pointer_block.h:31
void VisitObjectPointers(ObjectPointerVisitor *visitor)
bool PredicateVisitCompressedPointers(uword heap_base, CompressedObjectPtr *first, CompressedObjectPtr *last)
Definition: visitor.h:100
PrologueTask(ThreadBarrier *barrier, IsolateGroup *isolate_group, PageSpace *old_space, PrologueState *state)
T fetch_add(T arg, std::memory_order order=std::memory_order_relaxed)
Definition: atomic.h:35
T exchange(T arg, std::memory_order order=std::memory_order_relaxed)
Definition: atomic.h:48
RelaxedAtomic< intptr_t > used_in_words
Definition: spaces.h:21
void VisitPointers(ObjectPtr *first, ObjectPtr *last) override
StoreBufferForwardingVisitor(IsolateGroup *isolate_group, IncrementalForwardingVisitor *visitor)
void PushBlock(Block *block, ThresholdPolicy policy)
bool Run(Args &&... args)
Definition: thread_pool.h:45
@ kIncrementalCompactorTask
Definition: thread.h:354
static Thread * Current()
Definition: thread.h:362
static void ExitIsolateGroupAsHelper(bool bypass_safepoint)
Definition: thread.cc:499
static bool EnterIsolateGroupAsHelper(IsolateGroup *isolate_group, TaskKind kind, bool bypass_safepoint)
Definition: thread.cc:481
uword tags() const
Definition: raw_object.h:298
static ObjectPtr FromAddr(uword addr)
Definition: raw_object.h:516
static bool IsMarked(uword tags)
Definition: raw_object.h:303
static uword ToAddr(const UntaggedObject *raw_obj)
Definition: raw_object.h:522
intptr_t HeapSize() const
Definition: raw_object.h:401
bool IsMarked() const
Definition: raw_object.h:304
void SetIsEvacuationCandidateUnsynchronized()
Definition: raw_object.h:339
void ClearIsEvacuationCandidateUnsynchronized()
Definition: raw_object.h:343
void ClearMarkBitUnsynchronized()
Definition: raw_object.h:321
intptr_t VisitPointers(ObjectPointerVisitor *visitor)
Definition: raw_object.h:447
intptr_t GetClassId() const
Definition: raw_object.h:392
static bool IsEvacuationCandidate(uword tags)
Definition: raw_object.h:329
static constexpr T Maximum(T x, T y)
Definition: utils.h:41
void VisitPointers(ObjectPtr *from, ObjectPtr *to) override
#define ASSERT(E)
static bool b
struct MyStruct a[10]
#define FATAL(error)
AtkStateType state
if(end==-1)
glong glong end
GAsyncResult * result
uint32_t * target
size_t length
#define MSAN_UNPOISON(ptr, len)
Definition: dart_vm.cc:33
bool IsTypedDataClassId(intptr_t index)
Definition: class_id.h:433
static constexpr intptr_t kPageSize
Definition: page.h:27
int32_t classid_t
Definition: globals.h:524
@ kForwardingCorpse
Definition: class_id.h:225
@ kFreeListElement
Definition: class_id.h:224
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
uintptr_t uword
Definition: globals.h:501
bool IsAllocatableInNewSpace(intptr_t size)
Definition: spaces.h:57
const intptr_t cid
raw_obj untag() -> num_entries()) VARIABLE_COMPRESSED_VISITOR(Array, Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(TypedData, TypedData::ElementSizeInBytes(raw_obj->GetClassId()) *Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(Record, RecordShape(raw_obj->untag() ->shape()).num_fields()) VARIABLE_NULL_VISITOR(CompressedStackMaps, CompressedStackMaps::PayloadSizeOf(raw_obj)) VARIABLE_NULL_VISITOR(OneByteString, Smi::Value(raw_obj->untag() ->length())) VARIABLE_NULL_VISITOR(TwoByteString, Smi::Value(raw_obj->untag() ->length())) intptr_t UntaggedField::VisitFieldPointers(FieldPtr raw_obj, ObjectPointerVisitor *visitor)
Definition: raw_object.cc:558
static void objcpy(void *dst, const void *src, size_t size)
bool IsExternalTypedDataClassId(intptr_t index)
Definition: class_id.h:447
ObjectPtr CompressedObjectPtr
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
dst
Definition: cp.py:12
#define Px
Definition: globals.h:410
#define Pd
Definition: globals.h:408
RelaxedAtomic< intptr_t > freelist_cursor
MallocGrowableArray< LiveBytes > pages
RelaxedAtomic< intptr_t > page_cursor
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)
Definition: timeline.h:41