28 old_gen_growth_space_ratio,
30 "The desired maximum percentage of free space after old gen GC");
32 old_gen_growth_time_ratio,
34 "The desired maximum percentage of time spent in old gen GC");
38 "The max number of pages the old generation can grow at a time");
40 print_free_list_before_gc,
42 "Print free list statistics before a GC");
44 print_free_list_after_gc,
46 "Print free list statistics after a GC");
47DEFINE_FLAG(
bool, log_growth,
false,
"Log PageSpace growth policy decisions.");
57 num_freelists_(
Utils::Maximum(FLAG_scavenger_tasks, 1) + 1),
58 freelists_(new
FreeList[num_freelists_]),
60 max_capacity_in_words_(max_capacity_in_words),
62 allocated_black_in_words_(0),
65 concurrent_marker_tasks_(0),
66 concurrent_marker_tasks_active_(0),
67 pause_concurrent_marking_(0),
70 iterating_thread_(nullptr),
72 page_space_controller_(heap,
73 FLAG_old_gen_growth_space_ratio,
74 FLAG_old_gen_growth_rate,
75 FLAG_old_gen_growth_time_ratio),
80 enable_concurrent_mark_(FLAG_concurrent_mark) {
87 for (intptr_t
i = 0;
i < num_freelists_;
i++) {
103 FreePages(exec_pages_);
104 FreePages(large_pages_);
105 FreePages(image_pages_);
106 ASSERT(marker_ ==
nullptr);
110intptr_t PageSpace::LargePageSizeInWordsFor(intptr_t
size) {
116void PageSpace::AddPageLocked(Page*
page) {
117 if (pages_ ==
nullptr) {
125void PageSpace::AddLargePageLocked(Page*
page) {
126 if (large_pages_ ==
nullptr) {
131 large_pages_tail_ =
page;
134void PageSpace::AddExecPageLocked(Page*
page) {
135 if (exec_pages_ ==
nullptr) {
138 if (FLAG_write_protect_code) {
142 if (FLAG_write_protect_code) {
146 exec_pages_tail_ =
page;
149void PageSpace::RemovePageLocked(Page*
page, Page* previous_page) {
150 if (previous_page !=
nullptr) {
151 previous_page->set_next(
page->next());
153 pages_ =
page->next();
155 if (
page == pages_tail_) {
156 pages_tail_ = previous_page;
160void PageSpace::RemoveLargePageLocked(Page*
page, Page* previous_page) {
161 if (previous_page !=
nullptr) {
164 large_pages_ =
page->next();
166 if (
page == large_pages_tail_) {
167 large_pages_tail_ = previous_page;
171void PageSpace::RemoveExecPageLocked(Page*
page, Page* previous_page) {
172 if (previous_page !=
nullptr) {
175 exec_pages_ =
page->next();
177 if (
page == exec_pages_tail_) {
178 exec_pages_tail_ = previous_page;
182Page* PageSpace::AllocatePage(
bool is_exec,
bool link) {
184 MutexLocker ml(&pages_lock_);
198 if (
page ==
nullptr) {
204 MutexLocker ml(&pages_lock_);
207 AddExecPageLocked(
page);
213 page->set_object_end(
page->memory_->end());
214 if (!is_exec && (heap_ !=
nullptr) && !heap_->
is_vm_isolate()) {
215 page->AllocateForwardingPage();
224Page* PageSpace::AllocateLargePage(intptr_t
size,
bool is_exec) {
225 const intptr_t page_size_in_words = LargePageSizeInWordsFor(
228 MutexLocker ml(&pages_lock_);
229 if (!CanIncreaseCapacityInWordsLocked(page_size_in_words)) {
243 MutexLocker ml(&pages_lock_);
244 if (
page ==
nullptr) {
249 if (actual_size_in_words != page_size_in_words) {
254 AddExecPageLocked(
page);
256 AddLargePageLocked(
page);
269void PageSpace::TruncateLargePage(Page*
page,
270 intptr_t new_object_size_in_bytes) {
271 const intptr_t old_object_size_in_bytes =
272 page->object_end() -
page->object_start();
273 ASSERT(new_object_size_in_bytes <= old_object_size_in_bytes);
275 const intptr_t new_page_size_in_words =
276 LargePageSizeInWordsFor(new_object_size_in_bytes);
277 VirtualMemory* memory =
page->memory_;
278 const intptr_t old_page_size_in_words = (memory->size() >>
kWordSizeLog2);
279 if (new_page_size_in_words < old_page_size_in_words) {
282 page->set_object_end(
page->object_start() + new_object_size_in_bytes);
286void PageSpace::FreePage(Page*
page, Page* previous_page) {
287 bool is_exec =
page->is_executable();
289 MutexLocker ml(&pages_lock_);
292 RemoveExecPageLocked(
page, previous_page);
294 RemovePageLocked(
page, previous_page);
297 if (is_exec && !
page->is_image()) {
303void PageSpace::FreeLargePage(Page*
page, Page* previous_page) {
305 MutexLocker ml(&pages_lock_);
307 RemoveLargePageLocked(
page, previous_page);
311void PageSpace::FreePages(Page* pages) {
313 while (
page !=
nullptr) {
315 if (
page->is_executable() && !
page->is_image()) {
323uword PageSpace::TryAllocateInFreshPage(intptr_t
size,
343 Page*
page = AllocatePage(is_exec);
344 if (
page ==
nullptr) {
354 intptr_t free_size =
page->object_end() - free_start;
357 freelist->FreeLocked(free_start, free_size);
359 freelist->Free(free_start, free_size);
366uword PageSpace::TryAllocateInFreshLargePage(intptr_t
size,
376 intptr_t page_size_in_words = LargePageSizeInWordsFor(
size);
385 after_allocation.capacity_in_words += page_size_in_words;
388 Page*
page = AllocateLargePage(
size, is_exec);
389 if (
page !=
nullptr) {
399uword PageSpace::TryAllocateInternal(intptr_t
size,
410 result = freelist->TryAllocateLocked(
size, is_protected);
412 result = freelist->TryAllocate(
size, is_protected);
415 result = TryAllocateInFreshPage(
size, freelist, is_exec, growth_policy,
425 result = TryAllocateInFreshLargePage(
size, is_exec, growth_policy);
433 freelist->
mutex()->Lock();
439 freelist->
mutex()->Unlock();
445 ASSERT(pause_concurrent_marking_.
load() == 0);
446 pause_concurrent_marking_.
store(1);
447 while (concurrent_marker_tasks_active_ != 0) {
454 ASSERT(pause_concurrent_marking_.
load() != 0);
455 pause_concurrent_marking_.
store(0);
461 if (pause_concurrent_marking_.
load() != 0) {
463 concurrent_marker_tasks_active_--;
464 if (concurrent_marker_tasks_active_ == 0) {
467 while (pause_concurrent_marking_.
load() != 0) {
470 concurrent_marker_tasks_active_++;
506 if (
page_ ==
nullptr) {
509 if (
page_ ==
nullptr) {
512 if (
page_ ==
nullptr) {
553 : space_(space), ml_(&space->pages_lock_) {
554 space_->MakeIterable();
555 page_ = space_->exec_pages_;
558 bool Done()
const {
return page_ ==
nullptr; }
561 page_ = page_->
next();
571void PageSpace::MakeIterable()
const {
575 for (intptr_t
i = 0;
i < num_freelists_;
i++) {
581 for (intptr_t
i = 0;
i < num_freelists_;
i++) {
596 isolate_group->GetHeapOldCapacityMaxMetric()->SetValue(
609 if (it.page()->Contains(
addr)) {
618 if (it.page()->Contains(
addr)) {
627 if (it.page()->Contains(
addr)) {
636 if (!it.page()->is_executable() && it.page()->Contains(
addr)) {
644 ASSERT((pages_ !=
nullptr) || (exec_pages_ !=
nullptr) ||
645 (large_pages_ !=
nullptr));
647 set->AddRegion(it.page()->object_start(), it.page()->object_end());
653 it.page()->VisitObjects(visitor);
659 if (!it.page()->is_image()) {
660 it.page()->VisitObjects(visitor);
667 if (it.page()->is_image()) {
668 it.page()->VisitObjects(visitor);
675 it.page()->VisitObjectsUnsafe(visitor);
681 it.page()->VisitObjectPointers(visitor);
707 tail = large_pages_tail_;
709 while (
page !=
nullptr) {
710 page->VisitRememberedCards(visitor);
718 page->ResetProgressBar();
728 if (!it.page()->is_image()) {
729 it.page()->WriteProtect(read_only);
737 ASSERT(isolate_group !=
nullptr);
748 int64_t run_time = isolate_group->UptimeMicros();
751 double avg_time_between_collections =
752 run_time_millis /
static_cast<double>(
collections());
754 avg_time_between_collections);
756 space.
AddProperty(
"avgCollectionPeriodMillis", 0.0);
781 JSONObject class_list(&heap_map,
"classList");
795 page->object_start());
796 JSONArray page_map(&page_container,
"objects");
798 page->VisitObjects(&printer);
803 page->object_start());
804 JSONArray page_map(&page_container,
"objects");
806 page->VisitObjects(&printer);
813 if (FLAG_write_protect_code) {
818 while (
page !=
nullptr) {
820 page->WriteProtect(read_only);
824 while (
page !=
nullptr) {
825 if (
page->is_executable()) {
826 page->WriteProtect(read_only);
854 int64_t estimated_mark_completion =
857 return estimated_mark_completion <= deadline;
867 if (FLAG_use_incremental_compactor) {
873 const intptr_t excess_in_words =
875 const double excess_ratio =
static_cast<double>(excess_in_words) /
877 const bool fragmented = excess_ratio > 0.05;
894 intptr_t mark_compact_words_per_micro = mark_words_per_micro_ / 2;
895 if (mark_compact_words_per_micro == 0) {
896 mark_compact_words_per_micro = 1;
899 int64_t estimated_mark_compact_completion =
902 return estimated_mark_compact_completion <= deadline;
906 if (marker_ !=
nullptr) {
912 if (marker_ !=
nullptr) {
934 if (oom_reservation_ ==
nullptr)
return;
937 oom_reservation_ =
nullptr;
942 if (oom_reservation_ ==
nullptr) {
953 if (oom_reservation_ ==
nullptr) {
963 if (oom_reservation_ !=
nullptr) {
979 if (FLAG_marker_tasks == 0)
return;
994 while (
tasks() > 0) {
1005 CollectGarbageHelper(thread, compact, finalize);
1015void PageSpace::CollectGarbageHelper(
Thread* thread,
1025 isolate_group->class_table_allocator()->FreePending();
1026 isolate_group->ForEachIsolate(
1030 NoSafepointScope no_safepoints(thread);
1032 if (FLAG_print_free_list_before_gc) {
1033 for (intptr_t
i = 0;
i < num_freelists_;
i++) {
1039 if (FLAG_verify_before_gc) {
1040 heap_->VerifyGC(
"Verifying before marking",
1051 if (marker_ ==
nullptr) {
1053 marker_ =
new GCMarker(isolate_group, heap_);
1054 if (FLAG_use_incremental_compactor) {
1072 allocated_black_in_words_ = 0;
1077 if (FLAG_verify_store_buffer) {
1078 VerifyStoreBuffers(
"Verifying remembered set after marking");
1081 if (FLAG_verify_before_gc) {
1082 heap_->VerifyGC(
"Verifying before sweeping",
kAllowMarked);
1087 bool new_space_is_swept =
false;
1088 if (FLAG_use_incremental_compactor) {
1093 for (intptr_t
i = 0;
i < num_freelists_;
i++) {
1102 Page* prev_page =
nullptr;
1103 Page*
page = exec_pages_;
1104 FreeList* freelist = &freelists_[kExecutableFreelist];
1105 MutexLocker ml(freelist->mutex());
1106 while (
page !=
nullptr) {
1107 Page* next_page =
page->next();
1108 bool page_in_use = sweeper.SweepPage(
page, freelist);
1112 FreePage(
page, prev_page);
1121 MutexLocker ml(&pages_lock_);
1122 ASSERT(sweep_large_ ==
nullptr);
1123 sweep_large_ = large_pages_;
1124 large_pages_ = large_pages_tail_ =
nullptr;
1125 ASSERT(sweep_regular_ ==
nullptr);
1127 sweep_regular_ = pages_;
1128 pages_ = pages_tail_ =
nullptr;
1132 if (!new_space_is_swept) {
1135 bool is_concurrent_sweep_running =
false;
1139 is_concurrent_sweep_running =
true;
1140 }
else if (FLAG_concurrent_sweep && has_reservation) {
1141 ConcurrentSweep(isolate_group);
1142 is_concurrent_sweep_running =
true;
1149 if (FLAG_verify_after_gc && !is_concurrent_sweep_running) {
1164 if (FLAG_print_free_list_after_gc) {
1165 for (intptr_t
i = 0;
i < num_freelists_;
i++) {
1172 if (heap_ !=
nullptr) {
1181 in_store_buffer_(in_store_buffer),
1185 for (
ObjectPtr* ptr = from; ptr <= to; ptr++) {
1197 in_store_buffer_->
Add(obj);
1201#if defined(DART_COMPRESSED_POINTERS)
1210 ObjectSet*
const in_store_buffer_;
1222 in_store_buffer_(in_store_buffer),
1239 if (is_card_remembered_) {
1247 for (
ObjectPtr* ptr = from; ptr <= to; ptr++) {
1250 if (is_card_remembered_) {
1251 if (!
Page::Of(visiting_)->IsCardRemembered(ptr)) {
1253 "%s: Old object %#" Px " references new object %#" Px
1255 "slot's card is not remembered. Consider using rr to watch the "
1256 "slot %p and reverse-continue to find the store with a missing "
1258 msg_,
static_cast<uword>(visiting_),
static_cast<uword>(obj),
1261 }
else if (!is_remembered_) {
1262 FATAL(
"%s: Old object %#" Px " references new object %#" Px
1264 "not in any store buffer. Consider using rr to watch the "
1265 "slot %p and reverse-continue to find the store with a missing "
1267 msg_,
static_cast<uword>(visiting_),
static_cast<uword>(obj),
1274#if defined(DART_COMPRESSED_POINTERS)
1280 if (obj->IsHeapObject() && obj->IsNewObject()) {
1281 if (is_card_remembered_) {
1282 if (!
Page::Of(visiting_)->IsCardRemembered(ptr)) {
1284 "%s: Old object %#" Px " references new object %#" Px
1286 "slot's card is not remembered. Consider using rr to watch the "
1287 "slot %p and reverse-continue to find the store with a missing "
1289 msg_,
static_cast<uword>(visiting_),
static_cast<uword>(obj),
1292 }
else if (!is_remembered_) {
1293 FATAL(
"%s: Old object %#" Px " references new object %#" Px
1295 "not in any store buffer. Consider using rr to watch the "
1296 "slot %p and reverse-continue to find the store with a missing "
1298 msg_,
static_cast<uword>(visiting_),
static_cast<uword>(obj),
1307 const ObjectSet*
const in_store_buffer_;
1308 ObjectPtr visiting_;
1309 bool is_remembered_;
1310 bool is_card_remembered_;
1314void PageSpace::VerifyStoreBuffers(
const char* msg) {
1317 StackZone stack_zone(thread);
1318 Zone* zone = stack_zone.GetZone();
1320 ObjectSet* in_store_buffer =
new (zone) ObjectSet(zone);
1321 heap_->AddRegionsToObjectSet(in_store_buffer);
1324 CollectStoreBufferEvacuateVisitor visitor(in_store_buffer, msg);
1329 CheckStoreBufferEvacuateVisitor visitor(in_store_buffer, msg);
1334void PageSpace::SweepNew() {
1343 free += sweeper.SweepNewPage(
page);
1348void PageSpace::SweepLarge() {
1352 MutexLocker ml(&pages_lock_);
1353 while (sweep_large_ !=
nullptr) {
1354 Page*
page = sweep_large_;
1355 sweep_large_ =
page->next();
1356 page->set_next(
nullptr);
1360 intptr_t words_to_end = sweeper.SweepLargePage(
page);
1362 if (words_to_end == 0) {
1370 AddLargePageLocked(
page);
1375void PageSpace::Sweep(
bool exclusive) {
1381 const intptr_t num_shards =
Utils::Maximum(FLAG_scavenger_tasks, 1);
1383 for (intptr_t
i = 0;
i < num_shards;
i++) {
1388 MutexLocker ml(&pages_lock_);
1389 while (sweep_regular_ !=
nullptr) {
1390 Page*
page = sweep_regular_;
1391 sweep_regular_ =
page->next();
1392 page->set_next(
nullptr);
1402 freelist->mutex()->Lock();
1404 bool page_in_use = sweeper.SweepPage(
page, freelist);
1406 freelist->mutex()->Unlock();
1416 AddPageLocked(
page);
1423 for (intptr_t
i = 0;
i < num_shards;
i++) {
1429void PageSpace::ConcurrentSweep(IsolateGroup* isolate_group) {
1434void PageSpace::Compact(Thread* thread) {
1436 compactor.Compact(pages_, &freelists_[kDataFreelist], &pages_lock_);
1438 if (FLAG_verify_after_gc) {
1439 heap_->VerifyGC(
"Verifying after compacting",
kForbidMarked);
1443uword PageSpace::TryAllocateDataBumpLocked(FreeList* freelist, intptr_t
size) {
1451 intptr_t remaining = freelist->end() - freelist->top();
1453 FreeListElement* block = freelist->TryAllocateLargeLocked(
size);
1454 if (block ==
nullptr) {
1458 return TryAllocateInFreshPage(
size, freelist,
false ,
1461 intptr_t block_size = block->HeapSize();
1462 if (remaining > 0) {
1465 freelist->FreeLocked(freelist->top(), remaining);
1467 freelist->set_top(
reinterpret_cast<uword>(block));
1468 freelist->set_end(freelist->top() + block_size);
1474 remaining = block_size;
1482 if (freelist->top() < freelist->end()) {
1485 *
reinterpret_cast<uword*
>(freelist->top()) = 0;
1491uword PageSpace::TryAllocatePromoLockedSlow(FreeList* freelist, intptr_t
size) {
1495 freelist->AddUnaccountedSize(
size);
1498 return TryAllocateDataBumpLocked(freelist,
size);
1501uword PageSpace::AllocateSnapshotLockedSlow(FreeList* freelist, intptr_t
size) {
1515 pointer =
reinterpret_cast<void*
>(
reinterpret_cast<uword>(pointer) -
offset);
1520 ASSERT(memory !=
nullptr);
1523 if (is_executable) {
1527 page->memory_ = memory;
1528 page->next_ =
nullptr;
1529 page->forwarding_page_ =
nullptr;
1530 page->card_table_ =
nullptr;
1531 page->progress_bar_ = 0;
1532 page->owner_ =
nullptr;
1535 page->survivor_end_ = 0;
1536 page->resolved_top_ = 0;
1537 page->live_bytes_ = 0;
1540 page->next_ = image_pages_;
1541 image_pages_ =
page;
1546 Page* image_page = image_pages_;
1547 while (image_page !=
nullptr) {
1548 if (image_page->
Contains(object_addr)) {
1551 image_page = image_page->
next();
1557 int heap_growth_ratio,
1558 int heap_growth_max,
1559 int garbage_collection_time_ratio)
1561 heap_growth_ratio_(heap_growth_ratio),
1562 desired_utilization_((100.0 - heap_growth_ratio) / 100.0),
1563 heap_growth_max_(heap_growth_max),
1564 garbage_collection_time_ratio_(garbage_collection_time_ratio),
1565 idle_gc_threshold_in_words_(0) {
1566 const intptr_t growth_in_pages = heap_growth_max / 2;
1567 RecordUpdate(last_usage_, last_usage_, growth_in_pages,
"initial");
1573 if (heap_growth_ratio_ == 100) {
1583 if (heap_growth_ratio_ == 100) {
1593 if (heap_growth_ratio_ == 100) {
1609 const intptr_t allocated_since_previous_gc =
1612 if (allocated_since_previous_gc > 0) {
1620 1.0, garbage /
static_cast<double>(allocated_since_previous_gc));
1622 const int garbage_ratio =
static_cast<int>(k * 100);
1625 double t = 1.0 - desired_utilization_;
1627 if (gc_time_fraction > garbage_collection_time_ratio_) {
1628 t += (gc_time_fraction - garbage_collection_time_ratio_) / 100.0;
1633 const intptr_t grow_pages =
1635 desired_utilization_) -
1638 if (garbage_ratio == 0) {
1643 Utils::Maximum(
static_cast<intptr_t
>(heap_growth_max_), grow_pages);
1644 }
else if (garbage_collection_time_ratio_ == 0) {
1647 Utils::Maximum(
static_cast<intptr_t
>(heap_growth_max_), grow_pages);
1651 intptr_t
max = heap_growth_max_;
1653 intptr_t local_grow_heap = 0;
1655 local_grow_heap = (
max +
min) / 2;
1656 const intptr_t limit =
1658 const intptr_t allocated_before_next_gc =
1660 const double estimated_garbage = k * allocated_before_next_gc;
1661 if (t <= estimated_garbage / limit) {
1662 max = local_grow_heap - 1;
1664 min = local_grow_heap + 1;
1667 local_grow_heap = (
max +
min) / 2;
1668 grow_heap = local_grow_heap;
1672 if (grow_heap >= heap_growth_max_) {
1679 last_usage_ = after;
1681 intptr_t max_capacity_in_words = heap_->
old_space()->max_capacity_in_words_;
1682 if (max_capacity_in_words != 0) {
1687 static_cast<double>(max_capacity_in_words);
1695 grow_heap =
static_cast<intptr_t
>(grow_heap *
f);
1701 RecordUpdate(before, after, grow_heap,
"gc");
1707 intptr_t growth_in_pages;
1708 if (desired_utilization_ == 0.0) {
1709 growth_in_pages = heap_growth_max_;
1712 desired_utilization_) -
1719 Utils::Minimum(
static_cast<intptr_t
>(heap_growth_max_), growth_in_pages);
1721 RecordUpdate(after, after, growth_in_pages,
"loaded");
1724void PageSpaceController::RecordUpdate(
SpaceUsage before,
1726 intptr_t growth_in_pages,
1727 const char* reason) {
1729 intptr_t threshold =
1732 bool concurrent_mark = FLAG_concurrent_mark && (FLAG_marker_tasks != 0);
1733 if (concurrent_mark) {
1734 soft_gc_threshold_in_words_ = threshold;
1738 hard_gc_threshold_in_words_ = threshold;
1742 idle_gc_threshold_in_words_ =
1745#if defined(SUPPORT_TIMELINE)
1747 if (thread !=
nullptr) {
1749 tbes.SetNumArguments(6);
1750 tbes.CopyArgument(0,
"Reason", reason);
1751 tbes.FormatArgument(1,
"Before.CombinedUsed (kB)",
"%" Pd "",
1753 tbes.FormatArgument(2,
"After.CombinedUsed (kB)",
"%" Pd "",
1755 tbes.FormatArgument(3,
"Hard Threshold (kB)",
"%" Pd "",
1757 tbes.FormatArgument(4,
"Soft Threshold (kB)",
"%" Pd "",
1759 tbes.FormatArgument(5,
"Idle Threshold (kB)",
"%" Pd "",
1764 if (FLAG_log_growth || FLAG_verbose_gc) {
1765 THR_Print(
"%s: hard_threshold=%" Pd "MB, soft_threshold=%" Pd
1766 "MB, idle_threshold=%" Pd "MB, reason=%s\n",
1777 entry.start =
start;
1779 history_.
Add(entry);
1783 int64_t gc_time = 0;
1784 int64_t total_time = 0;
1785 for (
int i = 0;
i < history_.
Size() - 1;
i++) {
1788 gc_time += current.end - current.start;
1789 total_time += current.end - previous.end;
1791 if (total_time == 0) {
1794 ASSERT(total_time >= gc_time);
1795 int result =
static_cast<int>(
1796 (
static_cast<double>(gc_time) /
static_cast<double>(total_time)) * 100);
static float next(float f)
#define RELEASE_ASSERT_WITH_MSG(cond, msg)
#define RELEASE_ASSERT(cond)
T load(std::memory_order order=std::memory_order_acquire) const
void store(T arg, std::memory_order order=std::memory_order_release)
static constexpr bool UseCardMarkingForAllocation(const intptr_t array_length)
BasePageIterator(const PageSpace *space)
void VisitObjectPointers(ObjectPointerVisitor *visitor)
void VisitObject(ObjectPtr obj) override
CheckStoreBufferEvacuateVisitor(ObjectSet *in_store_buffer, const char *msg)
void VisitPointers(ObjectPtr *from, ObjectPtr *to) override
void PrintToJSONObject(JSONObject *object)
void VisitPointers(ObjectPtr *from, ObjectPtr *to) override
CollectStoreBufferEvacuateVisitor(ObjectSet *in_store_buffer, const char *msg)
ExclusiveCodePageIterator(const PageSpace *space)
ExclusivePageIterator(const PageSpace *space)
static FreeListElement * AsElement(uword addr, intptr_t size)
DART_WARN_UNUSED_RESULT intptr_t ReleaseBumpAllocation()
void Free(uword addr, intptr_t size)
intptr_t TakeUnaccountedSizeLocked()
static void Prologue(PageSpace *old_space)
static bool Epilogue(PageSpace *old_space)
void IncrementalMarkWithSizeBudget(PageSpace *page_space, intptr_t size)
intptr_t MarkedWordsPerMicro() const
void IncrementalMarkWithTimeBudget(PageSpace *page_space, int64_t deadline)
void StartConcurrentMark(PageSpace *page_space)
intptr_t marked_words() const
void MarkObjects(PageSpace *page_space)
void IncrementalMarkWithUnlimitedBudget(PageSpace *page_space)
static void SweepConcurrent(IsolateGroup *isolate_group)
void VisitObject(ObjectPtr obj) override
HeapMapAsJSONVisitor(JSONArray *array)
IsolateGroup * isolate_group() const
void CheckConcurrentMarking(Thread *thread, GCReason reason, intptr_t size)
Dart_PerformanceMode mode() const
bool is_vm_isolate() const
void UpdateGlobalMaxUsed()
StoreBuffer * store_buffer() const
static IsolateGroup * Current()
ClassTable * class_table() const
IsolateGroupSource * source() const
FieldTable * field_table() const
void AddValue(bool b) const
void AddProperty64(const char *name, int64_t i) const
void AddProperty(const char *name, bool b) const
void AddPropertyF(const char *name, const char *format,...) const PRINTF_ATTRIBUTE(3
Monitor::WaitResult Wait(int64_t millis=Monitor::kNoTimeout)
static int64_t GetCurrentMonotonicMicros()
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
void VisitCompressedPointers(uword heap_base, CompressedObjectPtr *first, CompressedObjectPtr *last)
void VisitPointer(ObjectPtr *p)
ObjectPtr Decompress(uword heap_base) const
UntaggedObject * untag() const
intptr_t GetClassId() const
bool IsPseudoObject() const
void Add(ObjectPtr raw_obj)
bool Contains(ObjectPtr raw_obj) const
bool ReachedSoftThreshold(SpaceUsage after) const
bool ReachedHardThreshold(SpaceUsage after) const
bool ReachedIdleThreshold(SpaceUsage current) const
void EvaluateAfterLoading(SpaceUsage after)
void EvaluateGarbageCollection(SpaceUsage before, SpaceUsage after, int64_t start, int64_t end)
PageSpaceController(Heap *heap, int heap_growth_ratio, int heap_growth_max, int garbage_collection_time_ratio)
void AddGarbageCollectionTime(int64_t start, int64_t end)
int GarbageCollectionTimeFraction()
void WriteProtectCode(bool read_only)
FreeList * DataFreeList(intptr_t i=0)
void PrintHeapMapToJSONStream(IsolateGroup *isolate_group, JSONStream *stream) const
intptr_t UsedInWords() const
bool ShouldStartIdleMarkSweep(int64_t deadline)
void IncrementalMarkWithSizeBudget(intptr_t size)
bool Contains(uword addr) const
int64_t gc_time_micros() const
void WriteProtect(bool read_only)
uword TryAllocate(intptr_t size, bool is_executable=false, GrowthPolicy growth_policy=kControlGrowth)
void TryReleaseReservation()
void AcquireLock(FreeList *freelist)
bool ShouldPerformIdleMarkCompact(int64_t deadline)
PageSpace(Heap *heap, intptr_t max_capacity_in_words)
void set_tasks(intptr_t val)
bool enable_concurrent_mark() const
void IncrementalMarkWithTimeBudget(int64_t deadline)
intptr_t collections() const
void VisitObjects(ObjectVisitor *visitor) const
void CollectGarbage(Thread *thread, bool compact, bool finalize)
void ReleaseLock(FreeList *freelist)
void VisitObjectsNoImagePages(ObjectVisitor *visitor) const
void UpdateMaxCapacityLocked()
SpaceUsage GetCurrentUsage() const
bool DataContains(uword addr) const
void IncreaseCapacityInWordsLocked(intptr_t increase_in_words)
bool CodeContains(uword addr) const
void VisitRememberedCards(PredicateObjectPointerVisitor *visitor) const
void ReleaseBumpAllocation()
bool ContainsUnsafe(uword addr) const
void SetupImagePage(void *pointer, uword size, bool is_executable)
intptr_t CapacityInWords() const
void ResumeConcurrentMarking()
void YieldConcurrentMarking()
void AddRegionsToObjectSet(ObjectSet *set) const
void AbandonMarkingForShutdown()
void PrintToJSONObject(JSONObject *object) const
void VisitObjectsImagePages(ObjectVisitor *visitor) const
void set_phase(Phase val)
void IncreaseCapacityInWords(intptr_t increase_in_words)
void PauseConcurrentMarking()
bool IsObjectFromImagePages(ObjectPtr object)
void ResetProgressBars() const
void AssistTasks(MonitorLocker *ml)
void VisitRoots(ObjectPointerVisitor *visitor)
Monitor * tasks_lock() const
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
intptr_t ExternalInWords() const
void VisitObjectsUnsafe(ObjectVisitor *visitor) const
bool Contains(uword addr) const
void set_next(Page *next)
static constexpr intptr_t OldObjectStartOffset()
void add_live_bytes(intptr_t value)
static Page * Of(ObjectPtr obj)
void WriteProtect(bool read_only)
const T & Get(int i) const
void set_freed_in_words(intptr_t value)
intptr_t UsedInWords() const
intptr_t CombinedUsedInWords() const
RelaxedAtomic< intptr_t > capacity_in_words
RelaxedAtomic< intptr_t > used_in_words
static Thread * Current()
bool OwnsGCSafepoint() const
UnsafeExclusivePageIterator(const PageSpace *space)
static ObjectPtr FromAddr(uword addr)
bool IsCardRemembered() const
static bool IsMarked(uword tags)
static uword ToAddr(const UntaggedObject *raw_obj)
intptr_t HeapSize() const
intptr_t VisitPointers(ObjectPointerVisitor *visitor)
bool IsRemembered() const
static bool IsEvacuationCandidate(uword tags)
static void UnregisterExecutablePage(Page *page)
static void RegisterExecutablePage(Page *page)
static constexpr T Maximum(T x, T y)
static T Minimum(T x, T y)
static constexpr T RoundUp(T x, uintptr_t alignment, uintptr_t offset=0)
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
static intptr_t PageSize()
static VirtualMemory * ForImagePage(void *pointer, uword size)
#define THR_Print(format,...)
@ Dart_PerformanceMode_Latency
FlutterSemanticsFlag flags
static float max(float r, float g, float b)
static float min(float r, float g, float b)
def link(from_root, to_root)
static constexpr intptr_t kOldObjectAlignmentOffset
constexpr double MicrosecondsToSeconds(int64_t micros)
static constexpr intptr_t kPageSizeInWords
static constexpr intptr_t kPageSize
constexpr intptr_t RoundWordsToMB(intptr_t size_in_words)
void * malloc(size_t size)
constexpr intptr_t kWordSizeLog2
static constexpr intptr_t kConservativeInitialMarkSpeed
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
static constexpr intptr_t kObjectAlignmentMask
constexpr intptr_t kWordSize
static constexpr intptr_t kObjectAlignment
constexpr double MicrosecondsToMilliseconds(int64_t micros)
bool IsAllocatableViaFreeLists(intptr_t size)
constexpr intptr_t RoundWordsToKB(intptr_t size_in_words)
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
constexpr intptr_t kIntptrMax
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not set
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)