5#ifndef RUNTIME_VM_HEAP_PAGES_H_
6#define RUNTIME_VM_HEAP_PAGES_H_
48 static constexpr intptr_t kHistoryLength = 4;
49 RingBuffer<Entry, kHistoryLength> history_;
51 DISALLOW_ALLOCATION();
61 int heap_growth_ratio,
63 int garbage_collection_time_ratio);
90 intptr_t growth_in_pages,
100 const int heap_growth_ratio_;
104 const double desired_utilization_;
107 const int heap_growth_max_;
111 const int garbage_collection_time_ratio_;
114 intptr_t hard_gc_threshold_in_words_;
117 intptr_t soft_gc_threshold_in_words_;
120 intptr_t idle_gc_threshold_in_words_;
142 bool is_executable =
false,
144 bool is_protected = (is_executable) && FLAG_write_protect_code;
145 bool is_locked =
false;
146 return TryAllocateInternal(
147 size, &freelists_[is_executable ? kExecutableFreelist : kDataFreelist],
148 is_executable, growth_policy, is_protected, is_locked);
158 return TryAllocatePromoLockedSlow(freelist, size);
168 return AllocateSnapshotLockedSlow(freelist, size);
215 for (
Page* page = image_pages_; page !=
nullptr; page = page->next()) {
216 size += page->memory_->size();
252 void AddGCTime(int64_t micros) { gc_time_micros_ += micros; }
279 desired = expected + size_in_words;
297 return &freelists_[kDataFreelist + i];
306 pause_concurrent_marking_.
fetch_or(0);
310 intptr_t
tasks()
const {
return tasks_; }
317 return concurrent_marker_tasks_;
322 concurrent_marker_tasks_ = val;
326 return concurrent_marker_tasks_active_;
331 concurrent_marker_tasks_active_ = val;
334 return pause_concurrent_marking_.
load() != 0;
359 kConcurrentSweep = 0,
364 kSweepLargePages = 5,
367 uword TryAllocateDataLocked(FreeList* freelist,
370 bool is_executable =
false;
371 bool is_protected =
false;
372 bool is_locked =
true;
373 return TryAllocateInternal(size, freelist, is_executable, growth_policy,
374 is_protected, is_locked);
376 uword TryAllocateInternal(intptr_t size,
382 uword TryAllocateInFreshPage(intptr_t size,
387 uword TryAllocateInFreshLargePage(intptr_t size,
392 uword TryAllocateDataBumpLocked(FreeList* freelist, intptr_t size);
393 uword TryAllocatePromoLockedSlow(FreeList* freelist, intptr_t size);
394 uword AllocateSnapshotLockedSlow(FreeList* freelist, intptr_t size);
397 void MakeIterable()
const;
399 void AddPageLocked(Page* page);
400 void AddLargePageLocked(Page* page);
401 void AddExecPageLocked(Page* page);
402 void RemovePageLocked(Page* page, Page* previous_page);
403 void RemoveLargePageLocked(Page* page, Page* previous_page);
404 void RemoveExecPageLocked(Page* page, Page* previous_page);
406 Page* AllocatePage(
bool is_executable,
bool link =
true);
407 Page* AllocateLargePage(intptr_t size,
bool is_executable);
409 void TruncateLargePage(Page* page, intptr_t new_object_size_in_bytes);
410 void FreePage(Page* page, Page* previous_page);
411 void FreeLargePage(Page* page, Page* previous_page);
412 void FreePages(Page* pages);
414 void CollectGarbageHelper(Thread* thread,
bool compact,
bool finalize);
417 void Sweep(
bool exclusive);
418 void ConcurrentSweep(IsolateGroup* isolate_group);
419 void Compact(Thread* thread);
421 static intptr_t LargePageSizeInWordsFor(intptr_t size);
423 bool CanIncreaseCapacityInWordsLocked(intptr_t increase_in_words) {
424 if (max_capacity_in_words_ == 0) {
428 intptr_t free_capacity_in_words =
430 return ((free_capacity_in_words > 0) &&
431 (increase_in_words <= free_capacity_in_words));
441 const intptr_t num_freelists_;
443 kExecutableFreelist = 0,
446 FreeList* freelists_;
447 static constexpr intptr_t kOOMReservationSize = 32 *
KB;
448 FreeListElement* oom_reservation_ =
nullptr;
451 mutable Mutex pages_lock_;
452 Page* pages_ =
nullptr;
453 Page* pages_tail_ =
nullptr;
454 Page* exec_pages_ =
nullptr;
455 Page* exec_pages_tail_ =
nullptr;
456 Page* large_pages_ =
nullptr;
457 Page* large_pages_tail_ =
nullptr;
458 Page* image_pages_ =
nullptr;
459 Page* sweep_regular_ =
nullptr;
460 Page* sweep_large_ =
nullptr;
463 intptr_t max_capacity_in_words_;
468 RelaxedAtomic<intptr_t> allocated_black_in_words_;
471 mutable Monitor tasks_lock_;
473 intptr_t concurrent_marker_tasks_;
474 intptr_t concurrent_marker_tasks_active_;
475 AcqRelAtomic<uword> pause_concurrent_marking_;
479 Thread* iterating_thread_;
484 int64_t gc_time_micros_;
485 intptr_t collections_;
486 intptr_t mark_words_per_micro_;
488 bool enable_concurrent_mark_;
#define DEBUG_ASSERT(cond)
T fetch_or(T arg, std::memory_order order=std::memory_order_acq_rel)
T load(std::memory_order order=std::memory_order_acquire) const
DART_FORCE_INLINE bool TryAllocateBumpLocked(intptr_t size, uword *result)
bool IsOwnedByCurrentThread() const
bool IsOwnedByCurrentThread() const
bool ReachedSoftThreshold(SpaceUsage after) const
bool ReachedHardThreshold(SpaceUsage after) const
bool ReachedIdleThreshold(SpaceUsage current) const
void set_last_usage(SpaceUsage current)
void EvaluateAfterLoading(SpaceUsage after)
void EvaluateGarbageCollection(SpaceUsage before, SpaceUsage after, int64_t start, int64_t end)
void AddGarbageCollectionTime(int64_t start, int64_t end)
~PageSpaceGarbageCollectionHistory()
int GarbageCollectionTimeFraction()
PageSpaceGarbageCollectionHistory()
void set_concurrent_marker_tasks_active(intptr_t val)
void WriteProtectCode(bool read_only)
FreeList * DataFreeList(intptr_t i=0)
void PrintHeapMapToJSONStream(IsolateGroup *isolate_group, JSONStream *stream) const
intptr_t UsedInWords() const
bool ReachedSoftThreshold() const
void AllocateBlack(intptr_t size)
intptr_t concurrent_marker_tasks_active() const
bool ShouldStartIdleMarkSweep(int64_t deadline)
void IncrementalMarkWithSizeBudget(intptr_t size)
void AddGCTime(int64_t micros)
DART_FORCE_INLINE uword TryAllocatePromoLocked(FreeList *freelist, intptr_t size)
bool Contains(uword addr) const
int64_t gc_time_micros() const
void WriteProtect(bool read_only)
uword TryAllocate(intptr_t size, bool is_executable=false, GrowthPolicy growth_policy=kControlGrowth)
void TryReleaseReservation()
void AcquireLock(FreeList *freelist)
bool ReachedIdleThreshold() const
void VisitRememberedCards(ObjectPointerVisitor *visitor) const
void IncrementCollections()
bool ShouldPerformIdleMarkCompact(int64_t deadline)
void PushDependencyToConcurrentMarking()
bool pause_concurrent_marking() const
friend class ExclusiveLargePageIterator
void set_tasks(intptr_t val)
bool enable_concurrent_mark() const
void set_concurrent_marker_tasks(intptr_t val)
void IncrementalMarkWithTimeBudget(int64_t deadline)
intptr_t collections() const
void VisitObjects(ObjectVisitor *visitor) const
void CollectGarbage(Thread *thread, bool compact, bool finalize)
void ReleaseLock(FreeList *freelist)
void VisitObjectsNoImagePages(ObjectVisitor *visitor) const
void UpdateMaxCapacityLocked()
SpaceUsage GetCurrentUsage() const
bool DataContains(uword addr) const
void IncreaseCapacityInWordsLocked(intptr_t increase_in_words)
bool ReachedHardThreshold() const
bool CodeContains(uword addr) const
void EvaluateAfterLoading()
friend class HeapSnapshotWriter
DART_FORCE_INLINE uword AllocateSnapshotLocked(FreeList *freelist, intptr_t size)
void ReleaseBumpAllocation()
bool ContainsUnsafe(uword addr) const
intptr_t ImageInWords() const
void SetupImagePage(void *pointer, uword size, bool is_executable)
intptr_t CapacityInWords() const
void ResumeConcurrentMarking()
intptr_t concurrent_marker_tasks() const
void YieldConcurrentMarking()
void AddRegionsToObjectSet(ObjectSet *set) const
void AbandonMarkingForShutdown()
void FreedExternal(intptr_t size)
void PrintToJSONObject(JSONObject *object) const
void VisitObjectsImagePages(ObjectVisitor *visitor) const
void set_phase(Phase val)
bool AllocatedExternal(intptr_t size)
void IncreaseCapacityInWords(intptr_t increase_in_words)
void PauseConcurrentMarking()
bool IsValidAddress(uword addr) const
bool IsObjectFromImagePages(ObjectPtr object)
void ResetProgressBars() const
void AssistTasks(MonitorLocker *ml)
void VisitRoots(ObjectPointerVisitor *visitor)
Monitor * tasks_lock() const
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
intptr_t ExternalInWords() const
GCMarker * marker() const
void set_enable_concurrent_mark(bool enable_concurrent_mark)
friend class PageSpaceController
void VisitObjectsUnsafe(ObjectVisitor *visitor) const
T load(std::memory_order order=std::memory_order_relaxed) const
T fetch_add(T arg, std::memory_order order=std::memory_order_relaxed)
bool compare_exchange_weak(T &expected, T desired, std::memory_order order=std::memory_order_relaxed)
RelaxedAtomic< intptr_t > external_in_words
RelaxedAtomic< intptr_t > capacity_in_words
RelaxedAtomic< intptr_t > used_in_words
#define DECLARE_FLAG(type, name)
constexpr intptr_t kWordSizeLog2
bool IsAllocatableViaFreeLists(intptr_t size)
const intptr_t kMaxAddrSpaceInWords