5#ifndef RUNTIME_VM_HEAP_SCAVENGER_H_
6#define RUNTIME_VM_HEAP_SCAVENGER_H_
30template <
bool parallel>
31class ScavengerVisitorBase;
33template <
typename Type,
typename PtrType>
49 for (
const Page*
p = head_;
p !=
nullptr;
p =
p->next()) {
63 intptr_t capacity_in_words_ = 0;
67 intptr_t gc_threshold_in_words_;
69 Page* head_ =
nullptr;
70 Page* tail_ =
nullptr;
81 intptr_t promo_candidates_in_words,
82 intptr_t promoted_in_words,
83 intptr_t abandoned_in_words)
84 : start_micros_(start_micros),
85 end_micros_(end_micros),
88 promo_candidates_in_words_(promo_candidates_in_words),
89 promoted_in_words_(promoted_in_words),
90 abandoned_in_words_(abandoned_in_words) {}
97 after_.
used_in_words + promoted_in_words_ + abandoned_in_words_;
98 return 1.0 - (work / old_threshold_in_words);
104 return promo_candidates_in_words_ > 0
105 ? promoted_in_words_ /
106 static_cast<double>(promo_candidates_in_words_)
115 int64_t start_micros_;
119 intptr_t promo_candidates_in_words_;
120 intptr_t promoted_in_words_;
121 intptr_t abandoned_in_words_;
126 static constexpr intptr_t kTLABSize = 512 *
KB;
143 TryAllocateNewTLAB(thread,
size,
true);
144 return TryAllocateFromTLAB(thread,
size);
151 TryAllocateNewTLAB(thread,
size,
false);
152 return TryAllocateFromTLAB(thread,
size);
187 void AddGCTime(int64_t micros) { gc_time_micros_ += micros; }
204 intptr_t expected = external_size_.
load();
207 intptr_t next_external_size_in_words =
209 if (next_external_size_in_words < 0 ||
213 desired = expected +
size;
220 external_size_ -=
size;
221 ASSERT(external_size_ >= 0);
231 const intptr_t max_parallel_tlab_usage =
232 (FLAG_new_gen_semi_max_size *
MB) / Scavenger::kTLABSize;
233 const intptr_t max_pool_size = max_parallel_tlab_usage / 4;
234 return max_pool_size > 0 ? max_pool_size : 1;
244 template <
typename Type,
typename PtrType>
251 kDummyScavengeTime = 0,
253 kVisitIsolateRoots = 2,
254 kIterateStoreBuffers = 3,
259 uword TryAllocateFromTLAB(Thread* thread, intptr_t
size) {
264 const intptr_t remaining =
static_cast<intptr_t
>(thread->end()) -
result;
274 void TryAllocateNewTLAB(Thread* thread, intptr_t
size,
bool can_safepoint);
276 SemiSpace* Prologue(
GCReason reason);
277 intptr_t ParallelScavenge(SemiSpace* from);
278 intptr_t SerialScavenge(SemiSpace* from);
279 void ReverseScavenge(SemiSpace** from);
280 void IterateIsolateRoots(ObjectPointerVisitor* visitor);
281 template <
bool parallel>
282 void IterateStoreBuffers(ScavengerVisitorBase<parallel>* visitor);
283 template <
bool parallel>
284 void IterateRememberedCards(ScavengerVisitorBase<parallel>* visitor);
285 void IterateObjectIdTable(ObjectPointerVisitor* visitor);
286 template <
bool parallel>
287 void IterateRoots(ScavengerVisitorBase<parallel>* visitor);
289 void MournWeakHandles();
290 void MournWeakTables();
291 void Epilogue(SemiSpace* from);
293 void VerifyStoreBuffers(
const char* msg);
295 void UpdateMaxHeapCapacity();
296 void UpdateMaxHeapUsage();
298 intptr_t NewSizeInWords(intptr_t old_size_in_words,
GCReason reason)
const;
304 PromotionStack promotion_stack_;
306 intptr_t max_semi_capacity_in_words_;
309 bool scavenging_ =
false;
310 bool early_tenure_ =
false;
311 RelaxedAtomic<intptr_t> root_slices_started_ = {0};
312 RelaxedAtomic<intptr_t> weak_slices_started_ = {0};
317 int64_t gc_time_micros_ = 0;
318 intptr_t collections_ = 0;
319 static constexpr int kStatsHistoryCapacity = 4;
320 RingBuffer<ScavengeStats, kStatsHistoryCapacity> stats_history_;
322 intptr_t scavenge_words_per_micro_;
323 intptr_t idle_scavenge_threshold_in_words_ = 0;
326 RelaxedAtomic<intptr_t> external_size_ = {0};
327 intptr_t freed_in_words_ = 0;
329 RelaxedAtomic<bool> failed_to_promote_ = {
false};
330 RelaxedAtomic<bool> abort_ = {
false};
333 mutable Mutex space_lock_;
static IsolateGroup * vm_isolate_group()
T load(std::memory_order order=std::memory_order_relaxed) const
bool compare_exchange_weak(T &expected, T desired, std::memory_order order=std::memory_order_relaxed)
ScavengeStats(int64_t start_micros, int64_t end_micros, SpaceUsage before, SpaceUsage after, intptr_t promo_candidates_in_words, intptr_t promoted_in_words, intptr_t abandoned_in_words)
intptr_t UsedBeforeInWords() const
double ExpectedGarbageFraction(intptr_t old_threshold_in_words) const
int64_t DurationMicros() const
double PromoCandidatesSuccessFraction() const
void Scavenge(Thread *thread, GCType type, GCReason reason)
bool Contains(uword addr) const
static intptr_t MaxMutatorThreadCount()
intptr_t ExternalInWords() const
void VisitObjects(ObjectVisitor *visitor) const
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
void PruneWeak(GCLinkedLists *delayed)
void AddGCTime(int64_t micros)
void WriteProtect(bool read_only)
void AbandonRemainingTLABForDebugging(Thread *thread)
bool ShouldPerformIdleScavenge(int64_t deadline)
uword TryAllocate(Thread *thread, intptr_t size)
void AddRegionsToObjectSet(ObjectSet *set) const
void set_freed_in_words(intptr_t value)
intptr_t CapacityInWords() const
intptr_t UsedInWords() const
SpaceUsage GetCurrentUsage() const
bool AllocatedExternal(intptr_t size)
void FreedExternal(intptr_t size)
int64_t gc_time_micros() const
void IncrementCollections()
intptr_t AbandonRemainingTLAB(Thread *thread)
intptr_t ThresholdInWords() const
Scavenger(Heap *heap, intptr_t max_semi_capacity_in_words)
uword TryAllocateNoSafepoint(Thread *thread, intptr_t size)
intptr_t collections() const
void Forward(MarkingStackBlock *blocks)
void PrintToJSONObject(JSONObject *object) const
void AddList(Page *head, Page *tail)
void WriteProtect(bool read_only)
intptr_t gc_threshold_in_words() const
Page * TryAllocatePageLocked(bool link)
bool Contains(uword addr) const
intptr_t capacity_in_words() const
intptr_t used_in_words() const
SemiSpace(intptr_t gc_threshold_in_words)
RelaxedAtomic< intptr_t > used_in_words
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
def link(from_root, to_root)
StoreBuffer::Block StoreBufferBlock
static constexpr intptr_t kNewObjectAlignmentOffset
MarkingStack::Block MarkingStackBlock
constexpr intptr_t kWordSizeLog2
static constexpr intptr_t kObjectAlignmentMask
static constexpr intptr_t kObjectAlignment
const intptr_t kMaxAddrSpaceInWords
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not set
static void usage(char *argv0)