20 "Force compaction to move every movable object");
40 uword block_offset = old_addr & ~kBlockMask;
43 uword preceding_live_bitmask =
44 (
static_cast<uword>(1) << first_unit_position) - 1;
45 uword preceding_live_bitset = live_bitvector_ & preceding_live_bitmask;
48 return new_address_ + preceding_live_bytes;
61 uword block_offset = old_addr & ~kBlockMask;
64 live_bitvector_ |= ((
static_cast<uword>(1) << size_in_units) - 1)
65 << first_unit_position;
69 uword block_offset = old_addr & ~kBlockMask;
72 return (live_bitvector_ & (
static_cast<uword>(1) << first_unit_position)) !=
81 uword live_bitvector_;
98 intptr_t page_offset = old_addr & ~kPageMask;
99 intptr_t block_number = page_offset /
kBlockSize;
100 ASSERT(block_number >= 0);
102 return &blocks_[block_number];
108 DISALLOW_ALLOCATION();
113 ASSERT(forwarding_page_ ==
nullptr);
137 : isolate_group_(isolate_group),
138 compactor_(compactor),
140 next_planning_task_(next_planning_task),
141 next_setup_task_(next_setup_task),
142 next_sliding_task_(next_sliding_task),
143 next_forwarding_task_(next_forwarding_task),
144 num_tasks_(num_tasks),
145 partitions_(partitions),
159 void PlanMoveToContiguousSize(intptr_t
size);
185 SetupImagePageBoundaries();
187 Page* fixed_head =
nullptr;
188 Page* fixed_tail =
nullptr;
192 intptr_t num_pages = 0;
195 while (
page !=
nullptr) {
197 if (
page->is_never_evacuate()) {
198 if (
prev !=
nullptr) {
203 if (fixed_tail ==
nullptr) {
206 page->set_next(fixed_head);
214 fixed_pages_ = fixed_head;
216 intptr_t num_tasks = FLAG_compactor_tasks;
218 if (num_pages < num_tasks) {
219 num_tasks = num_pages;
221 if (num_tasks == 0) {
226 heap_->
old_space()->pages_tail_ =
nullptr;
227 heap_->
old_space()->sweep_regular_ = fixed_head;
237 const intptr_t pages_per_task = num_pages / num_tasks;
238 intptr_t task_index = 0;
239 intptr_t page_index = 0;
242 while (task_index < num_tasks) {
244 if (page_index % pages_per_task == 0) {
246 partitions[task_index].
tail =
nullptr;
247 if (
prev !=
nullptr) {
248 prev->set_next(
nullptr);
256 ASSERT(page_index <= num_pages);
257 ASSERT(task_index == num_tasks);
260 if (FLAG_force_evacuation) {
266 for (intptr_t task_index = 0; task_index < num_tasks && !oom;
268 const intptr_t pages_per_task = num_pages / num_tasks;
269 for (intptr_t j = 0; j < pages_per_task; j++) {
273 if (
page ==
nullptr) {
279 page->object_end() -
page->object_start());
282 page->set_next(partitions[task_index].
head);
295 for (intptr_t task_index = 0; task_index < num_tasks; task_index++) {
296 if (task_index < (num_tasks - 1)) {
300 &next_setup_task, &next_sliding_task, &next_forwarding_task,
301 num_tasks, partitions, freelist);
305 &next_planning_task, &next_setup_task,
306 &next_sliding_task, &next_forwarding_task, num_tasks,
307 partitions, freelist);
326 "ForwardTypedDataViewInternalPointers");
329 auto raw_view = typed_data_views_[
i];
331 raw_view->untag()->typed_data()->GetClassIdMayBeSmi();
337 raw_view->untag()->RecomputeDataFieldForInternalTypedData();
344 for (intptr_t task_index = 0; task_index < num_tasks; task_index++) {
345 ASSERT(partitions[task_index].
tail !=
nullptr);
350 ForwardStackPointers();
355 "ForwardPostponedSuspendStatePointers");
358 can_visit_stack_frames_ =
true;
359 const intptr_t
length = postponed_suspend_states_.
length();
361 auto suspend_state = postponed_suspend_states_[
i];
362 suspend_state->untag()->VisitPointers(
this);
372 for (intptr_t task_index = 0; task_index < num_tasks; task_index++) {
374 while (
page !=
nullptr) {
384 for (intptr_t task_index = 0; task_index < num_tasks - 1; task_index++) {
389 heap_->
old_space()->pages_tail_ = partitions[num_tasks - 1].
tail;
390 if (fixed_head !=
nullptr) {
422#ifdef SUPPORT_TIMELINE
429 intptr_t planning_task = next_planning_task_->
fetch_add(1u);
430 if (planning_task >= num_tasks_)
break;
435 free_current_ =
head->object_start();
436 free_end_ =
head->object_end();
445 if (next_setup_task_->
fetch_add(1u) == 0) {
446 compactor_->SetupLargePages();
452 intptr_t sliding_task = next_sliding_task_->
fetch_add(1u);
453 if (sliding_task >= num_tasks_)
break;
458 free_current_ =
head->object_start();
459 free_end_ =
head->object_end();
467 intptr_t free_remaining = free_end_ - free_current_;
468 if (free_remaining != 0) {
469 freelist_->
Free(free_current_, free_remaining);
472 ASSERT(free_page_ !=
nullptr);
473 partitions_[sliding_task].
tail = free_page_;
477 compactor_->ForwardLargePages();
484 bool more_forwarding_tasks =
true;
485 while (more_forwarding_tasks) {
486 intptr_t forwarding_task = next_forwarding_task_->
fetch_add(1u);
487 switch (forwarding_task) {
514 if (ring !=
nullptr) {
523 more_forwarding_tasks =
false;
529void CompactorTask::PlanPage(
Page*
page) {
535 ASSERT(forwarding_page !=
nullptr);
536 forwarding_page->
Clear();
537 while (current < end) {
538 current = PlanBlock(current, forwarding_page);
542void CompactorTask::SlidePage(Page*
page) {
547 ForwardingPage* forwarding_page =
page->forwarding_page();
548 ASSERT(forwarding_page !=
nullptr);
549 while (current < end) {
550 current = SlideBlock(current, forwarding_page);
557uword CompactorTask::PlanBlock(
uword first_object,
558 ForwardingPage* forwarding_page) {
561 ForwardingBlock* forwarding_block = forwarding_page->BlockFor(first_object);
564 intptr_t block_live_size = 0;
565 uword current = first_object;
566 while (current < block_end) {
568 intptr_t
size = obj->untag()->HeapSize();
569 if (obj->untag()->IsMarked()) {
570 forwarding_block->RecordLive(current,
size);
571 ASSERT(
static_cast<intptr_t
>(forwarding_block->Lookup(current)) ==
573 block_live_size +=
size;
580 PlanMoveToContiguousSize(block_live_size);
581 forwarding_block->set_new_address(free_current_);
582 free_current_ += block_live_size;
587uword CompactorTask::SlideBlock(
uword first_object,
588 ForwardingPage* forwarding_page) {
591 ForwardingBlock* forwarding_block = forwarding_page->BlockFor(first_object);
593 uword old_addr = first_object;
594 while (old_addr < block_end) {
596 intptr_t
size = old_obj->untag()->HeapSize();
597 if (old_obj->untag()->IsMarked()) {
598 uword new_addr = forwarding_block->Lookup(old_addr);
599 if (new_addr != free_current_) {
605 intptr_t free_remaining = free_end_ - free_current_;
607 if (free_remaining > 0) {
608 freelist_->
Free(free_current_, free_remaining);
610 free_page_ = free_page_->
next();
611 ASSERT(free_page_ !=
nullptr);
614 ASSERT(free_current_ == new_addr);
620 if (new_addr != old_addr) {
622 memmove(
reinterpret_cast<void*
>(new_addr),
623 reinterpret_cast<void*
>(old_addr),
size);
626 static_cast<TypedDataPtr
>(new_obj)->
untag()->RecomputeDataField();
629 new_obj->untag()->ClearMarkBit();
630 new_obj->untag()->VisitPointers(compactor_);
632 ASSERT(free_current_ == new_addr);
633 free_current_ +=
size;
635 ASSERT(!forwarding_block->IsLive(old_addr));
643void CompactorTask::PlanMoveToContiguousSize(intptr_t
size) {
648 intptr_t free_remaining = free_end_ - free_current_;
649 if (free_remaining <
size) {
651 free_page_ = free_page_->
next();
652 ASSERT(free_page_ !=
nullptr);
655 free_remaining = free_end_ - free_current_;
660void GCCompactor::SetupImagePageBoundaries() {
661 MallocGrowableArray<ImagePageRange> ranges(4);
665 while (image_page !=
nullptr) {
667 image_page->object_end()};
669 image_page = image_page->next();
671 image_page = heap_->
old_space()->image_pages_;
672 while (image_page !=
nullptr) {
674 image_page->object_end()};
676 image_page = image_page->next();
679 ranges.Sort(CompareImagePageRanges);
680 intptr_t image_page_count;
681 ranges.StealBuffer(&image_page_ranges_, &image_page_count);
682 image_page_hi_ = image_page_count - 1;
686void GCCompactor::ForwardPointer(ObjectPtr* ptr) {
687 ObjectPtr old_target = *ptr;
688 if (old_target->IsImmediateOrNewObject()) {
694 intptr_t hi = image_page_hi_;
696 intptr_t mid = (hi - lo + 1) / 2 + lo;
699 if (old_addr < image_page_ranges_[mid].start) {
701 }
else if (old_addr >= image_page_ranges_[mid].end) {
709 ForwardingPage* forwarding_page =
page->forwarding_page();
710 if (forwarding_page ==
nullptr) {
713 if (
page->is_never_evacuate()) {
719 ObjectPtr new_target =
721 ASSERT(!new_target->IsImmediateOrNewObject());
726void GCCompactor::ForwardCompressedPointer(
uword heap_base,
728 ObjectPtr old_target = ptr->Decompress(heap_base);
729 if (old_target->IsImmediateOrNewObject()) {
735 intptr_t hi = image_page_hi_;
737 intptr_t mid = (hi - lo + 1) / 2 + lo;
740 if (old_addr < image_page_ranges_[mid].start) {
742 }
else if (old_addr >= image_page_ranges_[mid].end) {
750 ForwardingPage* forwarding_page =
page->forwarding_page();
751 if (forwarding_page ==
nullptr) {
754 if (
page->is_never_evacuate()) {
760 ObjectPtr new_target =
762 ASSERT(!new_target->IsImmediateOrNewObject());
766void GCCompactor::VisitTypedDataViewPointers(TypedDataViewPtr view,
770 ObjectPtr old_backing = view->untag()->typed_data();
772 ObjectPtr new_backing = view->untag()->typed_data();
774 const bool backing_moved = old_backing != new_backing;
787 MutexLocker ml(&typed_data_view_mutex_);
788 typed_data_views_.
Add(view);
792 if (view->untag()->data_ ==
nullptr) {
802void GCCompactor::VisitPointers(ObjectPtr* first, ObjectPtr* last) {
803 for (ObjectPtr* ptr = first; ptr <= last; ptr++) {
808#if defined(DART_COMPRESSED_POINTERS)
813 ForwardCompressedPointer(heap_base, ptr);
818bool GCCompactor::CanVisitSuspendStatePointers(SuspendStatePtr suspend_state) {
819 if ((suspend_state->untag()->pc() != 0) && !can_visit_stack_frames_) {
826 MutexLocker ml(&postponed_suspend_states_mutex_);
827 postponed_suspend_states_.
Add(suspend_state);
833void GCCompactor::VisitHandle(
uword addr) {
834 FinalizablePersistentHandle* handle =
835 reinterpret_cast<FinalizablePersistentHandle*
>(
addr);
836 ForwardPointer(handle->ptr_addr());
839void GCCompactor::SetupLargePages() {
840 large_pages_ = heap_->
old_space()->large_pages_;
843void GCCompactor::ForwardLargePages() {
844 MutexLocker ml(&large_pages_mutex_);
845 while (large_pages_ !=
nullptr) {
846 Page*
page = large_pages_;
847 large_pages_ =
page->next();
849 page->VisitObjectPointers(
this);
852 while (fixed_pages_ !=
nullptr) {
853 Page*
page = fixed_pages_;
854 fixed_pages_ =
page->next();
861 MutexLocker ml(freelist->mutex());
862 page_in_use = sweeper.SweepPage(
page, freelist);
866 page->VisitObjectPointers(
this);
872void GCCompactor::ForwardStackPointers() {
static float next(float f)
static float prev(float f)
#define RELEASE_ASSERT(cond)
void VisitObjectPointers(ObjectPointerVisitor *visitor)
CompactorTask(IsolateGroup *isolate_group, GCCompactor *compactor, ThreadBarrier *barrier, RelaxedAtomic< intptr_t > *next_planning_task, RelaxedAtomic< intptr_t > *next_setup_task, RelaxedAtomic< intptr_t > *next_sliding_task, RelaxedAtomic< intptr_t > *next_forwarding_task, intptr_t num_tasks, Partition *partitions, FreeList *freelist)
void RunEnteredIsolateGroup()
static ThreadPool * thread_pool()
static IsolateGroup * vm_isolate_group()
uword new_address() const
bool IsLive(uword old_addr) const
void RecordLive(uword old_addr, intptr_t size)
uword Lookup(uword old_addr) const
void set_new_address(uword value)
uword Lookup(uword old_addr)
ForwardingBlock * BlockFor(uword old_addr)
static FreeListElement * AsElement(uword addr, intptr_t size)
void Free(uword addr, intptr_t size)
void Compact(Page *pages, FreeList *freelist, Mutex *mutex)
void ForwardWeakTables(ObjectPointerVisitor *visitor)
StoreBuffer * store_buffer() const
void ForEachIsolate(std::function< void(Isolate *isolate)> function, bool at_safepoint=false)
void VisitObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
void VisitWeakPersistentHandles(HandleVisitor *visitor)
ObjectIdRing * object_id_ring() const
void VisitPointers(ObjectPointerVisitor *visitor)
IsolateGroup * isolate_group() const
void VisitCompressedPointers(uword heap_base, CompressedObjectPtr *first, CompressedObjectPtr *last)
FreeList * DataFreeList(intptr_t i=0)
void IncreaseCapacityInWordsLocked(intptr_t increase_in_words)
void VisitRoots(ObjectPointerVisitor *visitor)
uword object_start() const
void AllocateForwardingPage()
void set_next(Page *next)
static Page * Of(ObjectPtr obj)
T load(std::memory_order order=std::memory_order_relaxed) const
T fetch_add(T arg, std::memory_order order=std::memory_order_relaxed)
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
bool Run(Args &&... args)
static Thread * Current()
static void ExitIsolateGroupAsHelper(bool bypass_safepoint)
IsolateGroup * isolate_group() const
static bool EnterIsolateGroupAsHelper(IsolateGroup *isolate_group, TaskKind kind, bool bypass_safepoint)
static ObjectPtr FromAddr(uword addr)
static uword ToAddr(const UntaggedObject *raw_obj)
static constexpr int CountOneBitsWord(uword x)
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
bool IsTypedDataClassId(intptr_t index)
constexpr intptr_t kBitsPerWord
static constexpr intptr_t kPageSize
intptr_t RawSmiValue(const SmiPtr raw_value)
static constexpr intptr_t kBlockSize
constexpr intptr_t kWordSizeLog2
static constexpr intptr_t kBitVectorWordsPerBlock
static constexpr intptr_t kBlockMask
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
raw_obj untag() -> num_entries()) VARIABLE_COMPRESSED_VISITOR(Array, Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(TypedData, TypedData::ElementSizeInBytes(raw_obj->GetClassId()) *Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(Record, RecordShape(raw_obj->untag() ->shape()).num_fields()) VARIABLE_NULL_VISITOR(CompressedStackMaps, CompressedStackMaps::PayloadSizeOf(raw_obj)) VARIABLE_NULL_VISITOR(OneByteString, Smi::Value(raw_obj->untag() ->length())) VARIABLE_NULL_VISITOR(TwoByteString, Smi::Value(raw_obj->untag() ->length())) intptr_t UntaggedField::VisitFieldPointers(FieldPtr raw_obj, ObjectPointerVisitor *visitor)
static constexpr intptr_t kObjectAlignment
static constexpr intptr_t kBlocksPerPage
static constexpr intptr_t kObjectAlignmentLog2
bool IsExternalTypedDataClassId(intptr_t index)
ObjectPtr CompressedObjectPtr
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)