27 old_gen_growth_space_ratio,
29 "The desired maximum percentage of free space after old gen GC");
31 old_gen_growth_time_ratio,
33 "The desired maximum percentage of time spent in old gen GC");
37 "The max number of pages the old generation can grow at a time");
39 print_free_list_before_gc,
41 "Print free list statistics before a GC");
43 print_free_list_after_gc,
45 "Print free list statistics after a GC");
46DEFINE_FLAG(
bool, log_growth,
false,
"Log PageSpace growth policy decisions.");
56 num_freelists_(
Utils::Maximum(FLAG_scavenger_tasks, 1) + 1),
57 freelists_(new
FreeList[num_freelists_]),
59 max_capacity_in_words_(max_capacity_in_words),
61 allocated_black_in_words_(0),
64 concurrent_marker_tasks_(0),
65 concurrent_marker_tasks_active_(0),
66 pause_concurrent_marking_(0),
69 iterating_thread_(nullptr),
71 page_space_controller_(heap,
72 FLAG_old_gen_growth_space_ratio,
73 FLAG_old_gen_growth_rate,
74 FLAG_old_gen_growth_time_ratio),
79 enable_concurrent_mark_(FLAG_concurrent_mark) {
86 for (intptr_t i = 0; i < num_freelists_; i++) {
87 freelists_[i].
Reset();
102 FreePages(exec_pages_);
103 FreePages(large_pages_);
104 FreePages(image_pages_);
105 ASSERT(marker_ ==
nullptr);
109intptr_t PageSpace::LargePageSizeInWordsFor(intptr_t size) {
115void PageSpace::AddPageLocked(Page* page) {
116 if (pages_ ==
nullptr) {
124void PageSpace::AddLargePageLocked(Page* page) {
125 if (large_pages_ ==
nullptr) {
130 large_pages_tail_ =
page;
133void PageSpace::AddExecPageLocked(Page* page) {
134 if (exec_pages_ ==
nullptr) {
137 if (FLAG_write_protect_code) {
141 if (FLAG_write_protect_code) {
145 exec_pages_tail_ =
page;
148void PageSpace::RemovePageLocked(Page* page, Page* previous_page) {
149 if (previous_page !=
nullptr) {
150 previous_page->set_next(
page->next());
152 pages_ =
page->next();
154 if (page == pages_tail_) {
155 pages_tail_ = previous_page;
159void PageSpace::RemoveLargePageLocked(Page* page, Page* previous_page) {
160 if (previous_page !=
nullptr) {
163 large_pages_ =
page->next();
165 if (page == large_pages_tail_) {
166 large_pages_tail_ = previous_page;
170void PageSpace::RemoveExecPageLocked(Page* page, Page* previous_page) {
171 if (previous_page !=
nullptr) {
174 exec_pages_ =
page->next();
176 if (page == exec_pages_tail_) {
177 exec_pages_tail_ = previous_page;
181Page* PageSpace::AllocatePage(
bool is_exec,
bool link) {
183 MutexLocker ml(&pages_lock_);
197 if (page ==
nullptr) {
203 MutexLocker ml(&pages_lock_);
206 AddExecPageLocked(page);
212 page->set_object_end(
page->memory_->end());
213 if (!is_exec && (heap_ !=
nullptr) && !heap_->
is_vm_isolate()) {
214 page->AllocateForwardingPage();
223Page* PageSpace::AllocateLargePage(intptr_t size,
bool is_exec) {
224 const intptr_t page_size_in_words = LargePageSizeInWordsFor(
227 MutexLocker ml(&pages_lock_);
228 if (!CanIncreaseCapacityInWordsLocked(page_size_in_words)) {
242 MutexLocker ml(&pages_lock_);
243 if (page ==
nullptr) {
248 if (actual_size_in_words != page_size_in_words) {
253 AddExecPageLocked(page);
255 AddLargePageLocked(page);
264 page->set_object_end(
page->object_start() + size);
268void PageSpace::TruncateLargePage(Page* page,
269 intptr_t new_object_size_in_bytes) {
270 const intptr_t old_object_size_in_bytes =
271 page->object_end() -
page->object_start();
272 ASSERT(new_object_size_in_bytes <= old_object_size_in_bytes);
274 const intptr_t new_page_size_in_words =
275 LargePageSizeInWordsFor(new_object_size_in_bytes);
276 VirtualMemory* memory =
page->memory_;
277 const intptr_t old_page_size_in_words = (memory->size() >>
kWordSizeLog2);
278 if (new_page_size_in_words < old_page_size_in_words) {
281 page->set_object_end(
page->object_start() + new_object_size_in_bytes);
285void PageSpace::FreePage(Page* page, Page* previous_page) {
286 bool is_exec =
page->is_executable();
288 MutexLocker ml(&pages_lock_);
291 RemoveExecPageLocked(page, previous_page);
293 RemovePageLocked(page, previous_page);
296 if (is_exec && !
page->is_image()) {
302void PageSpace::FreeLargePage(Page* page, Page* previous_page) {
304 MutexLocker ml(&pages_lock_);
306 RemoveLargePageLocked(page, previous_page);
310void PageSpace::FreePages(Page* pages) {
312 while (page !=
nullptr) {
314 if (
page->is_executable() && !
page->is_image()) {
322uword PageSpace::TryAllocateInFreshPage(intptr_t size,
342 Page*
page = AllocatePage(is_exec);
343 if (page ==
nullptr) {
352 intptr_t free_size =
page->object_end() - free_start;
355 freelist->FreeLocked(free_start, free_size);
357 freelist->Free(free_start, free_size);
364uword PageSpace::TryAllocateInFreshLargePage(intptr_t size,
374 intptr_t page_size_in_words = LargePageSizeInWordsFor(size);
383 after_allocation.capacity_in_words += page_size_in_words;
386 Page*
page = AllocateLargePage(size, is_exec);
387 if (page !=
nullptr) {
396uword PageSpace::TryAllocateInternal(intptr_t size,
407 result = freelist->TryAllocateLocked(size, is_protected);
409 result = freelist->TryAllocate(size, is_protected);
412 result = TryAllocateInFreshPage(size, freelist, is_exec, growth_policy,
419 result = TryAllocateInFreshLargePage(size, is_exec, growth_policy);
427 freelist->
mutex()->Lock();
433 freelist->
mutex()->Unlock();
439 ASSERT(pause_concurrent_marking_.
load() == 0);
440 pause_concurrent_marking_.
store(1);
441 while (concurrent_marker_tasks_active_ != 0) {
448 ASSERT(pause_concurrent_marking_.
load() != 0);
449 pause_concurrent_marking_.
store(0);
455 if (pause_concurrent_marking_.
load() != 0) {
457 concurrent_marker_tasks_active_--;
458 if (concurrent_marker_tasks_active_ == 0) {
461 while (pause_concurrent_marking_.
load() != 0) {
464 concurrent_marker_tasks_active_++;
500 if (
page_ ==
nullptr) {
503 if (
page_ ==
nullptr) {
506 if (
page_ ==
nullptr) {
547 : space_(space), ml_(&space->pages_lock_) {
548 space_->MakeIterable();
549 page_ = space_->exec_pages_;
552 bool Done()
const {
return page_ ==
nullptr; }
555 page_ = page_->
next();
565void PageSpace::MakeIterable()
const {
569 for (intptr_t i = 0; i < num_freelists_; i++) {
575 for (intptr_t i = 0; i < num_freelists_; i++) {
590 isolate_group->GetHeapOldCapacityMaxMetric()->SetValue(
603 if (it.page()->Contains(addr)) {
612 if (it.page()->Contains(addr)) {
621 if (it.page()->Contains(addr)) {
630 if (!it.page()->is_executable() && it.page()->Contains(addr)) {
638 ASSERT((pages_ !=
nullptr) || (exec_pages_ !=
nullptr) ||
639 (large_pages_ !=
nullptr));
641 set->AddRegion(it.page()->object_start(), it.page()->object_end());
647 it.page()->VisitObjects(visitor);
653 if (!it.page()->is_image()) {
654 it.page()->VisitObjects(visitor);
661 if (it.page()->is_image()) {
662 it.page()->VisitObjects(visitor);
669 it.page()->VisitObjectsUnsafe(visitor);
675 it.page()->VisitObjectPointers(visitor);
700 tail = large_pages_tail_;
702 while (page !=
nullptr) {
703 page->VisitRememberedCards(visitor);
704 if (page == tail)
break;
710 for (
Page* page = large_pages_; page !=
nullptr; page = page->next()) {
711 page->ResetProgressBar();
721 if (!it.page()->is_image()) {
722 it.page()->WriteProtect(read_only);
730 ASSERT(isolate_group !=
nullptr);
741 int64_t run_time = isolate_group->UptimeMicros();
744 double avg_time_between_collections =
745 run_time_millis /
static_cast<double>(
collections());
747 avg_time_between_collections);
749 space.
AddProperty(
"avgCollectionPeriodMillis", 0.0);
774 JSONObject class_list(&heap_map,
"classList");
785 for (
Page* page = pages_; page !=
nullptr; page = page->next()) {
788 page->object_start());
789 JSONArray page_map(&page_container,
"objects");
791 page->VisitObjects(&printer);
793 for (
Page* page = exec_pages_; page !=
nullptr; page = page->next()) {
796 page->object_start());
797 JSONArray page_map(&page_container,
"objects");
799 page->VisitObjects(&printer);
806 if (FLAG_write_protect_code) {
810 Page* page = exec_pages_;
811 while (page !=
nullptr) {
812 ASSERT(page->is_executable());
813 page->WriteProtect(read_only);
817 while (page !=
nullptr) {
818 if (page->is_executable()) {
819 page->WriteProtect(read_only);
847 int64_t estimated_mark_completion =
850 return estimated_mark_completion <= deadline;
860 const intptr_t excess_in_words =
862 const double excess_ratio =
static_cast<double>(excess_in_words) /
864 const bool fragmented = excess_ratio > 0.05;
881 intptr_t mark_compact_words_per_micro = mark_words_per_micro_ / 2;
882 if (mark_compact_words_per_micro == 0) {
883 mark_compact_words_per_micro = 1;
886 int64_t estimated_mark_compact_completion =
889 return estimated_mark_compact_completion <= deadline;
893 if (marker_ !=
nullptr) {
899 if (marker_ !=
nullptr) {
921 if (oom_reservation_ ==
nullptr)
return;
922 uword addr =
reinterpret_cast<uword>(oom_reservation_);
923 intptr_t size = oom_reservation_->
HeapSize();
924 oom_reservation_ =
nullptr;
925 freelists_[kDataFreelist].
Free(addr, size);
929 if (oom_reservation_ ==
nullptr) {
940 if (oom_reservation_ ==
nullptr) {
950 if (oom_reservation_ !=
nullptr) {
966 if (FLAG_marker_tasks == 0)
return;
981 while (
tasks() > 0) {
992 CollectGarbageHelper(thread, compact, finalize);
1002void PageSpace::CollectGarbageHelper(
Thread* thread,
1012 isolate_group->class_table_allocator()->FreePending();
1013 isolate_group->ForEachIsolate(
1017 NoSafepointScope no_safepoints(thread);
1019 if (FLAG_print_free_list_before_gc) {
1020 for (intptr_t i = 0; i < num_freelists_; i++) {
1022 freelists_[i].
Print();
1026 if (FLAG_verify_before_gc) {
1027 heap_->VerifyGC(
"Verifying before marking",
1038 if (marker_ ==
nullptr) {
1040 marker_ =
new GCMarker(isolate_group, heap_);
1056 allocated_black_in_words_ = 0;
1062 for (intptr_t i = 0; i < num_freelists_; i++) {
1063 freelists_[i].
Reset();
1066 if (FLAG_verify_before_gc) {
1067 heap_->VerifyGC(
"Verifying before sweeping",
kAllowMarked);
1075 Page* prev_page =
nullptr;
1076 Page*
page = exec_pages_;
1077 FreeList* freelist = &freelists_[kExecutableFreelist];
1078 MutexLocker ml(freelist->mutex());
1079 while (page !=
nullptr) {
1080 Page* next_page =
page->next();
1081 bool page_in_use = sweeper.SweepPage(page, freelist);
1085 FreePage(page, prev_page);
1096 MutexLocker ml(&pages_lock_);
1097 ASSERT(sweep_large_ ==
nullptr);
1098 sweep_large_ = large_pages_;
1099 large_pages_ = large_pages_tail_ =
nullptr;
1100 ASSERT(sweep_regular_ ==
nullptr);
1102 sweep_regular_ = pages_;
1103 pages_ = pages_tail_ =
nullptr;
1113 }
else if (FLAG_concurrent_sweep && has_reservation) {
1114 ConcurrentSweep(isolate_group);
1123 if (FLAG_verify_after_gc && can_verify) {
1138 if (FLAG_print_free_list_after_gc) {
1139 for (intptr_t i = 0; i < num_freelists_; i++) {
1141 freelists_[i].
Print();
1146 if (heap_ !=
nullptr) {
1151void PageSpace::SweepNew() {
1160 free += sweeper.SweepNewPage(page);
1165void PageSpace::SweepLarge() {
1169 MutexLocker ml(&pages_lock_);
1170 while (sweep_large_ !=
nullptr) {
1171 Page*
page = sweep_large_;
1172 sweep_large_ =
page->next();
1173 page->set_next(
nullptr);
1177 intptr_t words_to_end = sweeper.SweepLargePage(page);
1179 if (words_to_end == 0) {
1187 AddLargePageLocked(page);
1192void PageSpace::Sweep(
bool exclusive) {
1198 const intptr_t num_shards =
Utils::Maximum(FLAG_scavenger_tasks, 1);
1200 for (intptr_t i = 0; i < num_shards; i++) {
1205 MutexLocker ml(&pages_lock_);
1206 while (sweep_regular_ !=
nullptr) {
1207 Page*
page = sweep_regular_;
1208 sweep_regular_ =
page->next();
1209 page->set_next(
nullptr);
1219 freelist->mutex()->Lock();
1221 bool page_in_use = sweeper.SweepPage(page, freelist);
1223 freelist->mutex()->Unlock();
1233 AddPageLocked(page);
1240 for (intptr_t i = 0; i < num_shards; i++) {
1246void PageSpace::ConcurrentSweep(IsolateGroup* isolate_group) {
1251void PageSpace::Compact(Thread* thread) {
1253 compactor.Compact(pages_, &freelists_[kDataFreelist], &pages_lock_);
1255 if (FLAG_verify_after_gc) {
1256 heap_->VerifyGC(
"Verifying after compacting",
kForbidMarked);
1260uword PageSpace::TryAllocateDataBumpLocked(FreeList* freelist, intptr_t size) {
1265 return TryAllocateDataLocked(freelist, size,
kForceGrowth);
1268 intptr_t remaining = freelist->end() - freelist->top();
1270 FreeListElement* block = freelist->TryAllocateLargeLocked(size);
1271 if (block ==
nullptr) {
1275 return TryAllocateInFreshPage(size, freelist,
false ,
1278 intptr_t block_size = block->HeapSize();
1279 if (remaining > 0) {
1281 freelist->FreeLocked(freelist->top(), remaining);
1283 freelist->set_top(
reinterpret_cast<uword>(block));
1284 freelist->set_end(freelist->top() + block_size);
1289 remaining = block_size;
1291 ASSERT(remaining >= size);
1293 freelist->set_top(
result + size);
1297 if (freelist->top() < freelist->end()) {
1300 *
reinterpret_cast<uword*
>(freelist->top()) = 0;
1306uword PageSpace::TryAllocatePromoLockedSlow(FreeList* freelist, intptr_t size) {
1307 uword result = freelist->TryAllocateSmallLocked(size);
1309 freelist->AddUnaccountedSize(size);
1312 return TryAllocateDataBumpLocked(freelist, size);
1315uword PageSpace::AllocateSnapshotLockedSlow(FreeList* freelist, intptr_t size) {
1316 uword result = TryAllocateDataBumpLocked(freelist, size);
1329 pointer =
reinterpret_cast<void*
>(
reinterpret_cast<uword>(pointer) -
offset);
1334 ASSERT(memory !=
nullptr);
1337 if (is_executable) {
1340 page->flags_ =
flags;
1341 page->memory_ = memory;
1342 page->next_ =
nullptr;
1343 page->forwarding_page_ =
nullptr;
1344 page->card_table_ =
nullptr;
1345 page->progress_bar_ = 0;
1346 page->owner_ =
nullptr;
1347 page->top_ = memory->
end();
1348 page->end_ = memory->
end();
1349 page->survivor_end_ = 0;
1350 page->resolved_top_ = 0;
1353 page->next_ = image_pages_;
1354 image_pages_ = page;
1359 Page* image_page = image_pages_;
1360 while (image_page !=
nullptr) {
1361 if (image_page->
Contains(object_addr)) {
1364 image_page = image_page->
next();
1370 int heap_growth_ratio,
1371 int heap_growth_max,
1372 int garbage_collection_time_ratio)
1374 heap_growth_ratio_(heap_growth_ratio),
1375 desired_utilization_((100.0 - heap_growth_ratio) / 100.0),
1376 heap_growth_max_(heap_growth_max),
1377 garbage_collection_time_ratio_(garbage_collection_time_ratio),
1378 idle_gc_threshold_in_words_(0) {
1379 const intptr_t growth_in_pages = heap_growth_max / 2;
1380 RecordUpdate(last_usage_, last_usage_, growth_in_pages,
"initial");
1386 if (heap_growth_ratio_ == 100) {
1396 if (heap_growth_ratio_ == 100) {
1406 if (heap_growth_ratio_ == 100) {
1422 const intptr_t allocated_since_previous_gc =
1425 if (allocated_since_previous_gc > 0) {
1433 1.0, garbage /
static_cast<double>(allocated_since_previous_gc));
1435 const int garbage_ratio =
static_cast<int>(k * 100);
1438 double t = 1.0 - desired_utilization_;
1440 if (gc_time_fraction > garbage_collection_time_ratio_) {
1441 t += (gc_time_fraction - garbage_collection_time_ratio_) / 100.0;
1446 const intptr_t grow_pages =
1448 desired_utilization_) -
1451 if (garbage_ratio == 0) {
1456 Utils::Maximum(
static_cast<intptr_t
>(heap_growth_max_), grow_pages);
1457 }
else if (garbage_collection_time_ratio_ == 0) {
1460 Utils::Maximum(
static_cast<intptr_t
>(heap_growth_max_), grow_pages);
1464 intptr_t
max = heap_growth_max_;
1466 intptr_t local_grow_heap = 0;
1468 local_grow_heap = (
max +
min) / 2;
1469 const intptr_t limit =
1471 const intptr_t allocated_before_next_gc =
1473 const double estimated_garbage = k * allocated_before_next_gc;
1474 if (t <= estimated_garbage / limit) {
1475 max = local_grow_heap - 1;
1477 min = local_grow_heap + 1;
1480 local_grow_heap = (
max +
min) / 2;
1481 grow_heap = local_grow_heap;
1485 if (grow_heap >= heap_growth_max_) {
1492 last_usage_ = after;
1494 intptr_t max_capacity_in_words = heap_->
old_space()->max_capacity_in_words_;
1495 if (max_capacity_in_words != 0) {
1500 static_cast<double>(max_capacity_in_words);
1508 grow_heap =
static_cast<intptr_t
>(grow_heap * f);
1514 RecordUpdate(before, after, grow_heap,
"gc");
1520 intptr_t growth_in_pages;
1521 if (desired_utilization_ == 0.0) {
1522 growth_in_pages = heap_growth_max_;
1525 desired_utilization_) -
1532 Utils::Minimum(
static_cast<intptr_t
>(heap_growth_max_), growth_in_pages);
1534 RecordUpdate(after, after, growth_in_pages,
"loaded");
1537void PageSpaceController::RecordUpdate(
SpaceUsage before,
1539 intptr_t growth_in_pages,
1540 const char* reason) {
1542 intptr_t threshold =
1545 bool concurrent_mark = FLAG_concurrent_mark && (FLAG_marker_tasks != 0);
1546 if (concurrent_mark) {
1547 soft_gc_threshold_in_words_ = threshold;
1551 hard_gc_threshold_in_words_ = threshold;
1555 idle_gc_threshold_in_words_ =
1558#if defined(SUPPORT_TIMELINE)
1560 if (thread !=
nullptr) {
1562 tbes.SetNumArguments(6);
1563 tbes.CopyArgument(0,
"Reason", reason);
1564 tbes.FormatArgument(1,
"Before.CombinedUsed (kB)",
"%" Pd "",
1566 tbes.FormatArgument(2,
"After.CombinedUsed (kB)",
"%" Pd "",
1568 tbes.FormatArgument(3,
"Hard Threshold (kB)",
"%" Pd "",
1570 tbes.FormatArgument(4,
"Soft Threshold (kB)",
"%" Pd "",
1572 tbes.FormatArgument(5,
"Idle Threshold (kB)",
"%" Pd "",
1577 if (FLAG_log_growth || FLAG_verbose_gc) {
1578 THR_Print(
"%s: hard_threshold=%" Pd "MB, soft_threshold=%" Pd
1579 "MB, idle_threshold=%" Pd "MB, reason=%s\n",
1590 entry.start =
start;
1592 history_.
Add(entry);
1596 int64_t gc_time = 0;
1597 int64_t total_time = 0;
1598 for (
int i = 0; i < history_.
Size() - 1; i++) {
1599 Entry current = history_.
Get(i);
1600 Entry previous = history_.
Get(i + 1);
1601 gc_time += current.end - current.start;
1602 total_time += current.end - previous.end;
1604 if (total_time == 0) {
1607 ASSERT(total_time >= gc_time);
1608 int result =
static_cast<int>(
1609 (
static_cast<double>(gc_time) /
static_cast<double>(total_time)) * 100);
static float next(float f)
#define RELEASE_ASSERT(cond)
#define COMPILE_ASSERT(expr)
T load(std::memory_order order=std::memory_order_acquire) const
void store(T arg, std::memory_order order=std::memory_order_release)
BasePageIterator(const PageSpace *space)
void PrintToJSONObject(JSONObject *object)
ExclusiveCodePageIterator(const PageSpace *space)
ExclusivePageIterator(const PageSpace *space)
static FreeListElement * AsElement(uword addr, intptr_t size)
DART_WARN_UNUSED_RESULT intptr_t ReleaseBumpAllocation()
void Free(uword addr, intptr_t size)
intptr_t TakeUnaccountedSizeLocked()
void IncrementalMarkWithSizeBudget(PageSpace *page_space, intptr_t size)
intptr_t MarkedWordsPerMicro() const
void IncrementalMarkWithTimeBudget(PageSpace *page_space, int64_t deadline)
void StartConcurrentMark(PageSpace *page_space)
intptr_t marked_words() const
void MarkObjects(PageSpace *page_space)
void IncrementalMarkWithUnlimitedBudget(PageSpace *page_space)
static void SweepConcurrent(IsolateGroup *isolate_group)
void VisitObject(ObjectPtr obj) override
HeapMapAsJSONVisitor(JSONArray *array)
IsolateGroup * isolate_group() const
void CheckConcurrentMarking(Thread *thread, GCReason reason, intptr_t size)
Dart_PerformanceMode mode() const
bool is_vm_isolate() const
void UpdateGlobalMaxUsed()
static IsolateGroup * Current()
ClassTable * class_table() const
IsolateGroupSource * source() const
FieldTable * field_table() const
void AddValue(bool b) const
void AddProperty64(const char *name, int64_t i) const
void AddProperty(const char *name, bool b) const
void AddPropertyF(const char *name, const char *format,...) const PRINTF_ATTRIBUTE(3
Monitor::WaitResult Wait(int64_t millis=Monitor::kNoTimeout)
static int64_t GetCurrentMonotonicMicros()
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
void VisitPointer(ObjectPtr *p)
UntaggedObject * untag() const
intptr_t GetClassId() const
bool ReachedSoftThreshold(SpaceUsage after) const
bool ReachedHardThreshold(SpaceUsage after) const
bool ReachedIdleThreshold(SpaceUsage current) const
void EvaluateAfterLoading(SpaceUsage after)
void EvaluateGarbageCollection(SpaceUsage before, SpaceUsage after, int64_t start, int64_t end)
PageSpaceController(Heap *heap, int heap_growth_ratio, int heap_growth_max, int garbage_collection_time_ratio)
void AddGarbageCollectionTime(int64_t start, int64_t end)
int GarbageCollectionTimeFraction()
void WriteProtectCode(bool read_only)
FreeList * DataFreeList(intptr_t i=0)
void PrintHeapMapToJSONStream(IsolateGroup *isolate_group, JSONStream *stream) const
intptr_t UsedInWords() const
bool ShouldStartIdleMarkSweep(int64_t deadline)
void IncrementalMarkWithSizeBudget(intptr_t size)
bool Contains(uword addr) const
int64_t gc_time_micros() const
void WriteProtect(bool read_only)
uword TryAllocate(intptr_t size, bool is_executable=false, GrowthPolicy growth_policy=kControlGrowth)
void TryReleaseReservation()
void AcquireLock(FreeList *freelist)
void VisitRememberedCards(ObjectPointerVisitor *visitor) const
bool ShouldPerformIdleMarkCompact(int64_t deadline)
PageSpace(Heap *heap, intptr_t max_capacity_in_words)
void set_tasks(intptr_t val)
bool enable_concurrent_mark() const
void IncrementalMarkWithTimeBudget(int64_t deadline)
intptr_t collections() const
void VisitObjects(ObjectVisitor *visitor) const
void CollectGarbage(Thread *thread, bool compact, bool finalize)
void ReleaseLock(FreeList *freelist)
void VisitObjectsNoImagePages(ObjectVisitor *visitor) const
void UpdateMaxCapacityLocked()
SpaceUsage GetCurrentUsage() const
bool DataContains(uword addr) const
void IncreaseCapacityInWordsLocked(intptr_t increase_in_words)
bool CodeContains(uword addr) const
void ReleaseBumpAllocation()
bool ContainsUnsafe(uword addr) const
void SetupImagePage(void *pointer, uword size, bool is_executable)
intptr_t CapacityInWords() const
void ResumeConcurrentMarking()
void YieldConcurrentMarking()
void AddRegionsToObjectSet(ObjectSet *set) const
void AbandonMarkingForShutdown()
void PrintToJSONObject(JSONObject *object) const
void VisitObjectsImagePages(ObjectVisitor *visitor) const
void set_phase(Phase val)
void IncreaseCapacityInWords(intptr_t increase_in_words)
void PauseConcurrentMarking()
bool IsObjectFromImagePages(ObjectPtr object)
void ResetProgressBars() const
void AssistTasks(MonitorLocker *ml)
void VisitRoots(ObjectPointerVisitor *visitor)
Monitor * tasks_lock() const
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
intptr_t ExternalInWords() const
void VisitObjectsUnsafe(ObjectVisitor *visitor) const
bool Contains(uword addr) const
void set_next(Page *next)
static constexpr intptr_t OldObjectStartOffset()
void WriteProtect(bool read_only)
const T & Get(int i) const
void set_freed_in_words(intptr_t value)
intptr_t UsedInWords() const
intptr_t CombinedUsedInWords() const
RelaxedAtomic< intptr_t > capacity_in_words
RelaxedAtomic< intptr_t > used_in_words
static Thread * Current()
bool OwnsGCSafepoint() const
UnsafeExclusivePageIterator(const PageSpace *space)
static ObjectPtr FromAddr(uword addr)
static bool IsMarked(uword tags)
static uword ToAddr(const UntaggedObject *raw_obj)
intptr_t HeapSize() const
static void UnregisterExecutablePage(Page *page)
static void RegisterExecutablePage(Page *page)
static constexpr T Maximum(T x, T y)
static T Minimum(T x, T y)
static constexpr T RoundUp(T x, uintptr_t alignment, uintptr_t offset=0)
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
static intptr_t PageSize()
static VirtualMemory * ForImagePage(void *pointer, uword size)
#define THR_Print(format,...)
@ Dart_PerformanceMode_Latency
FlutterSemanticsFlag flags
#define DEFINE_FLAG(type, name, default_value, comment)
static float max(float r, float g, float b)
static float min(float r, float g, float b)
static constexpr intptr_t kOldObjectAlignmentOffset
constexpr double MicrosecondsToSeconds(int64_t micros)
static constexpr intptr_t kPageSizeInWords
static constexpr intptr_t kPageSize
constexpr intptr_t RoundWordsToMB(intptr_t size_in_words)
void * malloc(size_t size)
constexpr intptr_t kWordSizeLog2
static constexpr intptr_t kConservativeInitialMarkSpeed
static constexpr intptr_t kObjectAlignmentMask
constexpr intptr_t kWordSize
static constexpr intptr_t kObjectAlignment
constexpr double MicrosecondsToMilliseconds(int64_t micros)
bool IsAllocatableViaFreeLists(intptr_t size)
constexpr intptr_t RoundWordsToKB(intptr_t size_in_words)
constexpr intptr_t kIntptrMax
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)