20 "Force compaction to move every movable object");
40 uword block_offset = old_addr & ~kBlockMask;
43 uword preceding_live_bitmask =
44 (
static_cast<uword>(1) << first_unit_position) - 1;
45 uword preceding_live_bitset = live_bitvector_ & preceding_live_bitmask;
48 return new_address_ + preceding_live_bytes;
61 uword block_offset = old_addr & ~kBlockMask;
64 live_bitvector_ |= ((
static_cast<uword>(1) << size_in_units) - 1)
65 << first_unit_position;
69 uword block_offset = old_addr & ~kBlockMask;
72 return (live_bitvector_ & (
static_cast<uword>(1) << first_unit_position)) !=
81 uword live_bitvector_;
98 intptr_t page_offset = old_addr & ~kPageMask;
99 intptr_t block_number = page_offset /
kBlockSize;
100 ASSERT(block_number >= 0);
102 return &blocks_[block_number];
108 DISALLOW_ALLOCATION();
113 ASSERT(forwarding_page_ ==
nullptr);
137 : isolate_group_(isolate_group),
138 compactor_(compactor),
140 next_planning_task_(next_planning_task),
141 next_setup_task_(next_setup_task),
142 next_sliding_task_(next_sliding_task),
143 next_forwarding_task_(next_forwarding_task),
144 num_tasks_(num_tasks),
145 partitions_(partitions),
155 void PlanPage(
Page* page);
156 void SlidePage(
Page* page);
159 void PlanMoveToContiguousSize(intptr_t size);
185 SetupImagePageBoundaries();
189 intptr_t num_pages = 0;
190 for (
Page* page = pages; page !=
nullptr; page = page->next()) {
194 intptr_t num_tasks = FLAG_compactor_tasks;
196 if (num_pages < num_tasks) {
197 num_tasks = num_pages;
203 const intptr_t pages_per_task = num_pages / num_tasks;
204 intptr_t task_index = 0;
205 intptr_t page_index = 0;
208 while (task_index < num_tasks) {
209 if (page_index % pages_per_task == 0) {
210 partitions[task_index].
head = page;
211 partitions[task_index].
tail =
nullptr;
212 if (
prev !=
nullptr) {
221 ASSERT(page_index <= num_pages);
222 ASSERT(task_index == num_tasks);
225 if (FLAG_force_evacuation) {
231 for (intptr_t task_index = 0; task_index < num_tasks && !oom;
233 const intptr_t pages_per_task = num_pages / num_tasks;
234 for (intptr_t j = 0; j < pages_per_task; j++) {
238 if (page ==
nullptr) {
244 page->object_end() - page->object_start());
247 page->set_next(partitions[task_index].head);
248 partitions[task_index].
head = page;
260 for (intptr_t task_index = 0; task_index < num_tasks; task_index++) {
261 if (task_index < (num_tasks - 1)) {
265 &next_setup_task, &next_sliding_task, &next_forwarding_task,
266 num_tasks, partitions, freelist);
270 &next_planning_task, &next_setup_task,
271 &next_sliding_task, &next_forwarding_task, num_tasks,
272 partitions, freelist);
291 "ForwardTypedDataViewInternalPointers");
293 for (intptr_t i = 0; i <
length; ++i) {
294 auto raw_view = typed_data_views_[i];
296 raw_view->untag()->typed_data()->GetClassIdMayBeSmi();
302 raw_view->untag()->RecomputeDataFieldForInternalTypedData();
309 for (intptr_t task_index = 0; task_index < num_tasks; task_index++) {
310 ASSERT(partitions[task_index].tail !=
nullptr);
315 ForwardStackPointers();
320 "ForwardPostponedSuspendStatePointers");
323 can_visit_stack_frames_ =
true;
324 const intptr_t
length = postponed_suspend_states_.
length();
325 for (intptr_t i = 0; i <
length; ++i) {
326 auto suspend_state = postponed_suspend_states_[i];
327 suspend_state->untag()->VisitPointers(
this);
337 for (intptr_t task_index = 0; task_index < num_tasks; task_index++) {
339 while (page !=
nullptr) {
349 for (intptr_t task_index = 0; task_index < num_tasks - 1; task_index++) {
350 partitions[task_index].
tail->
set_next(partitions[task_index + 1].head);
354 heap_->
old_space()->pages_tail_ = partitions[num_tasks - 1].
tail;
381#ifdef SUPPORT_TIMELINE
388 intptr_t planning_task = next_planning_task_->
fetch_add(1u);
389 if (planning_task >= num_tasks_)
break;
392 Page* head = partitions_[planning_task].
head;
394 free_current_ = head->object_start();
395 free_end_ = head->object_end();
397 for (
Page* page = head; page !=
nullptr; page = page->next()) {
404 if (next_setup_task_->
fetch_add(1u) == 0) {
405 compactor_->SetupLargePages();
411 intptr_t sliding_task = next_sliding_task_->
fetch_add(1u);
412 if (sliding_task >= num_tasks_)
break;
415 Page* head = partitions_[sliding_task].
head;
417 free_current_ = head->object_start();
418 free_end_ = head->object_end();
420 for (
Page* page = head; page !=
nullptr; page = page->next()) {
426 intptr_t free_remaining = free_end_ - free_current_;
427 if (free_remaining != 0) {
428 freelist_->
Free(free_current_, free_remaining);
431 ASSERT(free_page_ !=
nullptr);
432 partitions_[sliding_task].
tail = free_page_;
436 compactor_->ForwardLargePages();
443 bool more_forwarding_tasks =
true;
444 while (more_forwarding_tasks) {
445 intptr_t forwarding_task = next_forwarding_task_->
fetch_add(1u);
446 switch (forwarding_task) {
473 if (ring !=
nullptr) {
482 more_forwarding_tasks =
false;
488void CompactorTask::PlanPage(
Page* page) {
489 uword current = page->object_start();
493 ASSERT(forwarding_page !=
nullptr);
494 forwarding_page->
Clear();
495 while (current <
end) {
496 current = PlanBlock(current, forwarding_page);
500void CompactorTask::SlidePage(Page* page) {
504 ForwardingPage* forwarding_page =
page->forwarding_page();
505 ASSERT(forwarding_page !=
nullptr);
506 while (current <
end) {
507 current = SlideBlock(current, forwarding_page);
514uword CompactorTask::PlanBlock(
uword first_object,
515 ForwardingPage* forwarding_page) {
518 ForwardingBlock* forwarding_block = forwarding_page->BlockFor(first_object);
521 intptr_t block_live_size = 0;
522 uword current = first_object;
523 while (current < block_end) {
525 intptr_t
size = obj->untag()->HeapSize();
526 if (obj->untag()->IsMarked()) {
527 forwarding_block->RecordLive(current, size);
528 ASSERT(
static_cast<intptr_t
>(forwarding_block->Lookup(current)) ==
530 block_live_size +=
size;
537 PlanMoveToContiguousSize(block_live_size);
538 forwarding_block->set_new_address(free_current_);
539 free_current_ += block_live_size;
544uword CompactorTask::SlideBlock(
uword first_object,
545 ForwardingPage* forwarding_page) {
548 ForwardingBlock* forwarding_block = forwarding_page->BlockFor(first_object);
550 uword old_addr = first_object;
551 while (old_addr < block_end) {
553 intptr_t
size = old_obj->untag()->HeapSize();
554 if (old_obj->untag()->IsMarked()) {
555 uword new_addr = forwarding_block->Lookup(old_addr);
556 if (new_addr != free_current_) {
562 intptr_t free_remaining = free_end_ - free_current_;
564 if (free_remaining > 0) {
565 freelist_->
Free(free_current_, free_remaining);
567 free_page_ = free_page_->
next();
568 ASSERT(free_page_ !=
nullptr);
571 ASSERT(free_current_ == new_addr);
577 if (new_addr != old_addr) {
579 memmove(
reinterpret_cast<void*
>(new_addr),
580 reinterpret_cast<void*
>(old_addr), size);
583 static_cast<TypedDataPtr
>(new_obj)->
untag()->RecomputeDataField();
586 new_obj->untag()->ClearMarkBit();
587 new_obj->untag()->VisitPointers(compactor_);
589 ASSERT(free_current_ == new_addr);
590 free_current_ +=
size;
592 ASSERT(!forwarding_block->IsLive(old_addr));
600void CompactorTask::PlanMoveToContiguousSize(intptr_t size) {
605 intptr_t free_remaining = free_end_ - free_current_;
606 if (free_remaining < size) {
608 free_page_ = free_page_->
next();
609 ASSERT(free_page_ !=
nullptr);
612 free_remaining = free_end_ - free_current_;
613 ASSERT(free_remaining >= size);
617void GCCompactor::SetupImagePageBoundaries() {
618 MallocGrowableArray<ImagePageRange> ranges(4);
622 while (image_page !=
nullptr) {
624 image_page->object_end()};
626 image_page = image_page->next();
628 image_page = heap_->
old_space()->image_pages_;
629 while (image_page !=
nullptr) {
631 image_page->object_end()};
633 image_page = image_page->next();
636 ranges.Sort(CompareImagePageRanges);
637 intptr_t image_page_count;
638 ranges.StealBuffer(&image_page_ranges_, &image_page_count);
639 image_page_hi_ = image_page_count - 1;
643void GCCompactor::ForwardPointer(ObjectPtr* ptr) {
644 ObjectPtr old_target = *ptr;
645 if (old_target->IsImmediateOrNewObject()) {
651 intptr_t hi = image_page_hi_;
653 intptr_t mid = (hi - lo + 1) / 2 + lo;
656 if (old_addr < image_page_ranges_[mid].
start) {
658 }
else if (old_addr >= image_page_ranges_[mid].
end) {
666 ForwardingPage* forwarding_page =
page->forwarding_page();
667 if (forwarding_page ==
nullptr) {
671 ObjectPtr new_target =
673 ASSERT(!new_target->IsImmediateOrNewObject());
678void GCCompactor::ForwardCompressedPointer(
uword heap_base,
680 ObjectPtr old_target = ptr->Decompress(heap_base);
681 if (old_target->IsImmediateOrNewObject()) {
687 intptr_t hi = image_page_hi_;
689 intptr_t mid = (hi - lo + 1) / 2 + lo;
692 if (old_addr < image_page_ranges_[mid].
start) {
694 }
else if (old_addr >= image_page_ranges_[mid].
end) {
702 ForwardingPage* forwarding_page =
page->forwarding_page();
703 if (forwarding_page ==
nullptr) {
707 ObjectPtr new_target =
709 ASSERT(!new_target->IsImmediateOrNewObject());
721 const bool backing_moved = old_backing != new_backing;
735 typed_data_views_.
Add(view);
739 if (view->untag()->data_ ==
nullptr) {
750 for (
ObjectPtr* ptr = first; ptr <= last; ptr++) {
755#if defined(DART_COMPRESSED_POINTERS)
760 ForwardCompressedPointer(heap_base, ptr);
766 if ((suspend_state->untag()->pc() != 0) && !can_visit_stack_frames_) {
774 postponed_suspend_states_.
Add(suspend_state);
786void GCCompactor::SetupLargePages() {
787 large_pages_ = heap_->
old_space()->large_pages_;
790void GCCompactor::ForwardLargePages() {
791 MutexLocker ml(&large_pages_mutex_);
792 while (large_pages_ !=
nullptr) {
793 Page* page = large_pages_;
794 large_pages_ = page->next();
796 page->VisitObjectPointers(
this);
801void GCCompactor::ForwardStackPointers() {
static float next(float f)
static float prev(float f)
#define RELEASE_ASSERT(cond)
#define COMPILE_ASSERT(expr)
void VisitObjectPointers(ObjectPointerVisitor *visitor)
CompactorTask(IsolateGroup *isolate_group, GCCompactor *compactor, ThreadBarrier *barrier, RelaxedAtomic< intptr_t > *next_planning_task, RelaxedAtomic< intptr_t > *next_setup_task, RelaxedAtomic< intptr_t > *next_sliding_task, RelaxedAtomic< intptr_t > *next_forwarding_task, intptr_t num_tasks, Partition *partitions, FreeList *freelist)
void RunEnteredIsolateGroup()
static ThreadPool * thread_pool()
static IsolateGroup * vm_isolate_group()
uword new_address() const
bool IsLive(uword old_addr) const
void RecordLive(uword old_addr, intptr_t size)
uword Lookup(uword old_addr) const
void set_new_address(uword value)
uword Lookup(uword old_addr)
ForwardingBlock * BlockFor(uword old_addr)
static FreeListElement * AsElement(uword addr, intptr_t size)
void Free(uword addr, intptr_t size)
void Compact(Page *pages, FreeList *freelist, Mutex *mutex)
void VisitTypedDataViewPointers(TypedDataViewPtr view, CompressedObjectPtr *first, CompressedObjectPtr *last) override
void VisitHandle(uword addr) override
void VisitPointers(ObjectPtr *first, ObjectPtr *last) override
bool CanVisitSuspendStatePointers(SuspendStatePtr suspend_state) override
void ForwardWeakTables(ObjectPointerVisitor *visitor)
StoreBuffer * store_buffer() const
void ForEachIsolate(std::function< void(Isolate *isolate)> function, bool at_safepoint=false)
void VisitObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
void VisitWeakPersistentHandles(HandleVisitor *visitor)
ObjectIdRing * object_id_ring() const
void VisitPointers(ObjectPointerVisitor *visitor)
IsolateGroup * isolate_group() const
void VisitCompressedPointers(uword heap_base, CompressedObjectPtr *first, CompressedObjectPtr *last)
UntaggedObject * untag() const
void IncreaseCapacityInWordsLocked(intptr_t increase_in_words)
void VisitRoots(ObjectPointerVisitor *visitor)
uword object_start() const
void AllocateForwardingPage()
void set_next(Page *next)
static Page * Of(ObjectPtr obj)
T load(std::memory_order order=std::memory_order_relaxed) const
T fetch_add(T arg, std::memory_order order=std::memory_order_relaxed)
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
bool Run(Args &&... args)
static Thread * Current()
static void ExitIsolateGroupAsHelper(bool bypass_safepoint)
IsolateGroup * isolate_group() const
static bool EnterIsolateGroupAsHelper(IsolateGroup *isolate_group, TaskKind kind, bool bypass_safepoint)
static ObjectPtr FromAddr(uword addr)
static uword ToAddr(const UntaggedObject *raw_obj)
static constexpr int CountOneBitsWord(uword x)
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
#define DEFINE_FLAG(type, name, default_value, comment)
bool IsTypedDataClassId(intptr_t index)
constexpr intptr_t kBitsPerWord
static constexpr intptr_t kPageSize
intptr_t RawSmiValue(const SmiPtr raw_value)
static constexpr intptr_t kBlockSize
constexpr intptr_t kWordSizeLog2
static constexpr intptr_t kBitVectorWordsPerBlock
static constexpr intptr_t kBlockMask
raw_obj untag() -> num_entries()) VARIABLE_COMPRESSED_VISITOR(Array, Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(TypedData, TypedData::ElementSizeInBytes(raw_obj->GetClassId()) *Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(Record, RecordShape(raw_obj->untag() ->shape()).num_fields()) VARIABLE_NULL_VISITOR(CompressedStackMaps, CompressedStackMaps::PayloadSizeOf(raw_obj)) VARIABLE_NULL_VISITOR(OneByteString, Smi::Value(raw_obj->untag() ->length())) VARIABLE_NULL_VISITOR(TwoByteString, Smi::Value(raw_obj->untag() ->length())) intptr_t UntaggedField::VisitFieldPointers(FieldPtr raw_obj, ObjectPointerVisitor *visitor)
static constexpr intptr_t kObjectAlignment
static constexpr intptr_t kBlocksPerPage
static constexpr intptr_t kObjectAlignmentLog2
bool IsExternalTypedDataClassId(intptr_t index)
ObjectPtr CompressedObjectPtr
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)