38DEFINE_FLAG(
bool, write_protect_vm_isolate,
true,
"Write protect vm_isolate.");
40 disable_heap_verification,
42 "Explicitly disable heap verification.");
46 intptr_t max_new_gen_semi_words,
47 intptr_t max_old_gen_words)
48 : isolate_group_(isolate_group),
49 is_vm_isolate_(is_vm_isolate),
50 new_space_(this, max_new_gen_semi_words),
51 old_space_(this, max_old_gen_words),
53 assume_scavenge_will_fail_(
false),
54 gc_on_nth_allocation_(kNoForcedGarbageCollection) {
55 UpdateGlobalMaxUsed();
56 for (
int sel = 0; sel < kNumWeakSelectors; sel++) {
64#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
66 HeapProfileSampler::delete_callback();
67 if (cleanup !=
nullptr) {
68 new_weak_tables_[kHeapSamplingData]->CleanupValues(cleanup);
69 old_weak_tables_[kHeapSamplingData]->CleanupValues(cleanup);
73 for (
int sel = 0; sel < kNumWeakSelectors; sel++) {
74 delete new_weak_tables_[sel];
75 delete old_weak_tables_[sel];
81 CollectForDebugging(thread);
86 if (!assume_scavenge_will_fail_ && !thread->
force_growth()) {
87 GcSafepointOperationScope safepoint_operation(thread);
92 addr = new_space_.TryAllocate(thread,
size);
97 CollectGarbage(thread, GCType::kScavenge, GCReason::kNewSpace);
99 addr = new_space_.TryAllocate(thread,
size);
107 return AllocateOld(thread,
size,
false);
110uword Heap::AllocateOld(Thread* thread, intptr_t
size,
bool is_exec) {
111 ASSERT(thread->no_safepoint_scope_depth() == 0);
113#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
114 if (HeapProfileSampler::enabled()) {
115 thread->heap_sampler().SampleOldSpaceAllocation(
size);
119 if (!thread->force_growth()) {
120 CollectForDebugging(thread);
126 WaitForSweeperTasks(thread);
127 addr = old_space_.TryAllocate(
size, is_exec);
131 GcSafepointOperationScope safepoint_operation(thread);
135 addr = old_space_.TryAllocate(
size, is_exec);
141 CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kOldSpace);
142 addr = old_space_.TryAllocate(
size, is_exec);
147 WaitForSweeperTasksAtSafepoint(thread);
148 addr = old_space_.TryAllocate(
size, is_exec);
153 addr = old_space_.TryAllocate(
size, is_exec, PageSpace::kForceGrowth);
158 CollectOldSpaceGarbage(thread, GCType::kMarkCompact, GCReason::kOldSpace);
159 WaitForSweeperTasksAtSafepoint(thread);
161 uword addr = old_space_.TryAllocate(
size, is_exec, PageSpace::kForceGrowth);
166 if (!thread->force_growth()) {
167 WaitForSweeperTasks(thread);
168 old_space_.TryReleaseReservation();
175 OS::PrintErr(
"Exhausted heap space, trying to allocate %" Pd " bytes.\n",
180bool Heap::AllocatedExternal(intptr_t
size,
Space space) {
182 if (!new_space_.AllocatedExternal(
size)) {
187 if (!old_space_.AllocatedExternal(
size)) {
192 Thread* thread = Thread::Current();
194 CheckExternalGC(thread);
203 new_space_.FreedExternal(
size);
206 old_space_.FreedExternal(
size);
210void Heap::PromotedExternal(intptr_t
size) {
211 new_space_.FreedExternal(
size);
212 old_space_.AllocatedExternal(
size);
215void Heap::CheckExternalGC(
Thread* thread) {
224 if (new_space_.ExternalInWords() >= (4 * new_space_.CapacityInWords())) {
227 CollectGarbage(thread, GCType::kScavenge, GCReason::kExternal);
232 if (old_space_.ReachedHardThreshold()) {
233 CollectGarbage(thread, GCType::kMarkSweep, GCReason::kExternal);
235 CheckConcurrentMarking(thread, GCReason::kExternal, 0);
240 return new_space_.Contains(
addr) || old_space_.Contains(
addr);
244 return new_space_.Contains(
addr);
248 return old_space_.Contains(
addr);
252 return old_space_.CodeContains(
addr);
256 return old_space_.DataContains(
addr);
260 new_space_.VisitObjects(visitor);
261 old_space_.VisitObjects(visitor);
264void Heap::VisitObjectsNoImagePages(ObjectVisitor* visitor) {
265 new_space_.VisitObjects(visitor);
266 old_space_.VisitObjectsNoImagePages(visitor);
269void Heap::VisitObjectsImagePages(ObjectVisitor* visitor)
const {
270 old_space_.VisitObjectsImagePages(visitor);
273HeapIterationScope::HeapIterationScope(
Thread* thread,
bool writable)
275 heap_(isolate_group()->heap()),
276 old_space_(heap_->old_space()),
277 writable_(writable) {
290 while ((old_space_->
tasks() > 0) ||
299 while (old_space_->
tasks() > 0) {
304 ASSERT(old_space_->iterating_thread_ ==
nullptr);
305 old_space_->iterating_thread_ =
thread;
324 old_space_->iterating_thread_ =
nullptr;
336 heap_->VisitObjects(visitor);
417 phase = old_space_.
phase();
427 if (FLAG_mark_when_idle) {
451void Heap::CollectNewSpaceGarbage(
Thread* thread,
465 GcSafepointOperationScope safepoint_operation(thread);
466 RecordBeforeGC(
type, reason);
469 ? VMTag::kGCIdleTagId
470 : VMTag::kGCNewSpaceTagId);
485#if defined(SUPPORT_TIMELINE)
486 PrintStatsToTimeline(&tbes, reason);
500void Heap::CollectOldSpaceGarbage(Thread* thread,
503 NoActiveIsolateScope no_active_isolate_scope(thread);
508 if (FLAG_use_compactor) {
519 GcSafepointOperationScope safepoint_operation(thread);
527 thread->isolate_group()->ForEachIsolate(
528 [&](Isolate* isolate) {
530 isolate->CacheRegexpBacktrackStack(
nullptr);
534 RecordBeforeGC(
type, reason);
536 ? VMTag::kGCIdleTagId
537 : VMTag::kGCOldSpaceTagId);
543#if defined(SUPPORT_TIMELINE)
544 PrintStatsToTimeline(&tbes, reason);
548 thread->isolate_group()->ForEachIsolate(
549 [&](Isolate* isolate) {
550 isolate->handler_info_cache()->Clear();
551 isolate->catch_entry_moves_cache()->Clear();
554 assume_scavenge_will_fail_ =
false;
562 CollectNewSpaceGarbage(thread,
type, reason);
566 CollectOldSpaceGarbage(thread,
type, reason);
581 CollectOldSpaceGarbage(
587 if (
old_space()->ReachedHardThreshold()) {
602 phase = old_space_.
phase();
633 phase = old_space_.
phase();
645 ? VMTag::kGCIdleTagId
646 : VMTag::kGCOldSpaceTagId);
651#if defined(SUPPORT_TIMELINE)
652 PrintStatsToTimeline(&tbes, reason);
690 ASSERT(isolate_group_ !=
nullptr);
693 isolate_group_->GetHeapGlobalUsedMaxMetric()->SetValue(
699 read_only_ = read_only;
706 intptr_t max_new_gen_words,
707 intptr_t max_old_gen_words) {
710 max_new_gen_words, max_old_gen_words));
723 gc_on_nth_allocation_ = num_allocations;
726void Heap::CollectForDebugging(
Thread* thread) {
727 if (gc_on_nth_allocation_ == kNoForcedGarbageCollection)
return;
733 gc_on_nth_allocation_--;
734 if (gc_on_nth_allocation_ == 0) {
736 gc_on_nth_allocation_ = kNoForcedGarbageCollection;
747 this->AddRegionsToObjectSet(allocated_set);
749 vm_isolate->
group()->
heap()->AddRegionsToObjectSet(allocated_set);
754 this->VisitObjectsNoImagePages(&object_visitor);
759 this->VisitObjectsImagePages(&object_visitor);
765 vm_isolate->
group()->
heap()->VisitObjects(&vm_object_visitor);
768 return allocated_set;
772 if (FLAG_disable_heap_verification) {
776 return VerifyGC(msg, mark_expectation);
779bool Heap::VerifyGC(
const char* msg,
MarkExpectation mark_expectation) {
787 VisitObjectPointers(&visitor);
795 "New space (%" Pd "k of %" Pd
797 "Old space (%" Pd "k of %" Pd "k)\n",
853 return "MarkCompact";
865 return "store buffer";
905 if (raw_obj->IsImmediateOrOldObject()) {
906 return old_weak_tables_[sel]->
GetValue(raw_obj);
908 return new_weak_tables_[sel]->
GetValue(raw_obj);
913 if (raw_obj->IsImmediateOrOldObject()) {
914 old_weak_tables_[sel]->
SetValue(raw_obj, val);
916 new_weak_tables_[sel]->
SetValue(raw_obj, val);
923 if (raw_obj->IsImmediateOrOldObject()) {
931 const auto before_space =
933 const auto after_space =
938 auto before_table =
GetWeakTable(before_space, selector);
939 intptr_t entry = before_table->RemoveValueExclusive(before_object);
942 after_table->SetValueExclusive(after_object, entry);
948 auto before_table = before_object->IsImmediateOrOldObject()
951 if (before_table !=
nullptr) {
952 intptr_t entry = before_table->RemoveValueExclusive(before_object);
954 auto after_table = after_object->IsImmediateOrOldObject()
957 ASSERT(after_table !=
nullptr);
958 after_table->SetValueExclusive(after_object, entry);
978 if (table_old !=
nullptr) table_old->
Forward(visitor);
1007 stats_.type_ =
type;
1008 stats_.reason_ = reason;
1017 int64_t
delta = stats_.after_.micros_ - stats_.before_.micros_;
1032 [&](Isolate* isolate) {
1035 event.set_gc_stats(&stats_);
1044void Heap::PrintStats() {
1045 if (!FLAG_verbose_gc)
return;
1047 if ((FLAG_verbose_gc_hdr != 0) &&
1048 (((stats_.num_ - 1) % FLAG_verbose_gc_hdr) == 0)) {
1051 "gen | new gen | new gen | old gen | old gen | old "
1052 "gen | store | delta used ]\n"
1053 "[ GC isolate | space (reason) | GC# | start | time | used "
1054 "(MB) | capacity MB | external| used (MB) | capacity (MB) | "
1055 "external MB | buffer | new | old ]\n"
1056 "[ | | | (s) | (ms) "
1057 "|before| after|before| after| b4 |aftr| before| after | before| after "
1058 "|before| after| b4 |aftr| (MB) | (MB) ]\n");
1063 "[ %-13.13s, %11s(%12s), "
1073 "%3" Pd ", %3" Pd ", "
1082 stats_.before_.micros_),
1083 WordsToMB(stats_.before_.new_.used_in_words),
1084 WordsToMB(stats_.after_.new_.used_in_words),
1085 WordsToMB(stats_.before_.new_.capacity_in_words),
1086 WordsToMB(stats_.after_.new_.capacity_in_words),
1087 WordsToMB(stats_.before_.new_.external_in_words),
1088 WordsToMB(stats_.after_.new_.external_in_words),
1089 WordsToMB(stats_.before_.old_.used_in_words),
1090 WordsToMB(stats_.after_.old_.used_in_words),
1091 WordsToMB(stats_.before_.old_.capacity_in_words),
1092 WordsToMB(stats_.after_.old_.capacity_in_words),
1093 WordsToMB(stats_.before_.old_.external_in_words),
1094 WordsToMB(stats_.after_.old_.external_in_words),
1095 stats_.before_.store_buffer_,
1096 stats_.after_.store_buffer_,
1097 WordsToMB(stats_.after_.new_.used_in_words -
1098 stats_.before_.new_.used_in_words),
1099 WordsToMB(stats_.after_.old_.used_in_words -
1100 stats_.before_.old_.used_in_words));
1104void Heap::PrintStatsToTimeline(TimelineEventScope*
event,
GCReason reason) {
1105#if defined(SUPPORT_TIMELINE)
1106 if ((
event ==
nullptr) || !
event->enabled()) {
1109 intptr_t arguments =
event->GetNumArguments();
1110 event->SetNumArguments(arguments + 13);
1112 event->FormatArgument(arguments + 1,
"Before.New.Used (kB)",
"%" Pd "",
1114 event->FormatArgument(arguments + 2,
"After.New.Used (kB)",
"%" Pd "",
1116 event->FormatArgument(arguments + 3,
"Before.Old.Used (kB)",
"%" Pd "",
1118 event->FormatArgument(arguments + 4,
"After.Old.Used (kB)",
"%" Pd "",
1121 event->FormatArgument(arguments + 5,
"Before.New.Capacity (kB)",
"%" Pd "",
1123 event->FormatArgument(arguments + 6,
"After.New.Capacity (kB)",
"%" Pd "",
1125 event->FormatArgument(arguments + 7,
"Before.Old.Capacity (kB)",
"%" Pd "",
1127 event->FormatArgument(arguments + 8,
"After.Old.Capacity (kB)",
"%" Pd "",
1130 event->FormatArgument(arguments + 9,
"Before.New.External (kB)",
"%" Pd "",
1132 event->FormatArgument(arguments + 10,
"After.New.External (kB)",
"%" Pd "",
1134 event->FormatArgument(arguments + 11,
"Before.Old.External (kB)",
"%" Pd "",
1136 event->FormatArgument(arguments + 12,
"After.Old.External (kB)",
"%" Pd "",
1143 const int kExtNewRatio = 16;
1162 if (FLAG_write_protect_code && FLAG_write_protect_vm_isolate) {
1169 if (FLAG_write_protect_code && FLAG_write_protect_vm_isolate) {
static IsolateGroup * vm_isolate_group()
static Isolate * vm_isolate()
ForceGrowthScope(Thread *thread)
static void Abort(PageSpace *old_space)
void IterateObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
void IterateStackPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
void IterateVMIsolateObjects(ObjectVisitor *visitor) const
void IterateObjects(ObjectVisitor *visitor) const
void IterateOldObjectsNoImagePages(ObjectVisitor *visitor) const
void IterateObjectsNoImagePages(ObjectVisitor *visitor) const
void IterateOldObjects(ObjectVisitor *visitor) const
static const char * GCReasonToString(GCReason reason)
intptr_t GetWeakEntry(ObjectPtr raw_obj, WeakSelector sel) const
void PrintMemoryUsageJSON(JSONStream *stream) const
IsolateGroup * isolate_group() const
void CheckConcurrentMarking(Thread *thread, GCReason reason, intptr_t size)
Dart_PerformanceMode SetMode(Dart_PerformanceMode mode)
void WriteProtect(bool read_only)
void WaitForMarkerTasks(Thread *thread)
void WaitForSweeperTasksAtSafepoint(Thread *thread)
int64_t PeerCount() const
void SetWeakEntry(ObjectPtr raw_obj, WeakSelector sel, intptr_t val)
void ResetObjectIdTable()
void PrintToJSONObject(Space space, JSONObject *object) const
void CheckCatchUp(Thread *thread)
bool Verify(const char *msg, MarkExpectation mark_expectation=kForbidMarked)
void CollectOnNthAllocation(intptr_t num_allocations)
bool is_vm_isolate() const
void WriteProtectCode(bool read_only)
intptr_t SetWeakEntryIfNonExistent(ObjectPtr raw_obj, WeakSelector sel, intptr_t val)
intptr_t TotalExternalInWords() const
void CollectAllGarbage(GCReason reason=GCReason::kFull, bool compact=false)
int64_t GCTimeInMicros(Space space) const
friend class ServiceEvent
void NotifyIdle(int64_t deadline)
void WaitForSweeperTasks(Thread *thread)
void CheckFinalizeMarking(Thread *thread)
intptr_t Collections(Space space) const
WeakTable * GetWeakTable(Space space, WeakSelector selector) const
void ForwardWeakEntries(ObjectPtr before_object, ObjectPtr after_object)
void ResetCanonicalHashTable()
intptr_t ExternalInWords(Space space) const
void StartConcurrentMarking(Thread *thread, GCReason reason)
intptr_t TotalCapacityInWords() const
intptr_t UsedInWords(Space space) const
static void Init(IsolateGroup *isolate_group, bool is_vm_isolate, intptr_t max_new_gen_words, intptr_t max_old_gen_words)
intptr_t TotalUsedInWords() const
void CollectGarbage(Thread *thread, GCType type, GCReason reason)
void ForwardWeakTables(ObjectPointerVisitor *visitor)
void UpdateGlobalMaxUsed()
Space SpaceForExternal(intptr_t size) const
intptr_t CapacityInWords(Space space) const
ObjectSet * CreateAllocatedObjectSet(Zone *zone, MarkExpectation mark_expectation)
static const char * GCTypeToString(GCType type)
int64_t UptimeMicros() const
StoreBuffer * store_buffer() const
void ForEachIsolate(std::function< void(Isolate *isolate)> function, bool at_safepoint=false)
void VisitStackPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
void VisitObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
SafepointHandler * safepoint_handler()
static bool IsSystemIsolate(const Isolate *isolate)
IsolateGroup * group() const
WeakTable * forward_table_old()
WeakTable * forward_table_new()
void AddProperty64(const char *name, int64_t i) const
void AddProperty(const char *name, bool b) const
Monitor::WaitResult WaitWithSafepointCheck(Thread *thread, int64_t millis=Monitor::kNoTimeout)
Monitor::WaitResult Wait(int64_t millis=Monitor::kNoTimeout)
static int64_t GetCurrentMonotonicMicros()
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
intptr_t UsedInWords() const
bool ReachedSoftThreshold() const
bool ShouldStartIdleMarkSweep(int64_t deadline)
void IncrementalMarkWithSizeBudget(intptr_t size)
void AddGCTime(int64_t micros)
int64_t gc_time_micros() const
void WriteProtect(bool read_only)
void IncrementCollections()
bool ShouldPerformIdleMarkCompact(int64_t deadline)
void set_tasks(intptr_t val)
void IncrementalMarkWithTimeBudget(int64_t deadline)
intptr_t collections() const
void VisitObjects(ObjectVisitor *visitor) const
void CollectGarbage(Thread *thread, bool compact, bool finalize)
void VisitObjectsNoImagePages(ObjectVisitor *visitor) const
SpaceUsage GetCurrentUsage() const
bool ReachedHardThreshold() const
intptr_t CapacityInWords() const
void AddRegionsToObjectSet(ObjectSet *set) const
void PrintToJSONObject(JSONObject *object) const
void AssistTasks(MonitorLocker *ml)
Monitor * tasks_lock() const
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
intptr_t ExternalInWords() const
T exchange(T arg, std::memory_order order=std::memory_order_relaxed)
void Scavenge(Thread *thread, GCType type, GCReason reason)
intptr_t ExternalInWords() const
void VisitObjects(ObjectVisitor *visitor) const
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
void AddGCTime(int64_t micros)
void WriteProtect(bool read_only)
void AbandonRemainingTLABForDebugging(Thread *thread)
bool ShouldPerformIdleScavenge(int64_t deadline)
void AddRegionsToObjectSet(ObjectSet *set) const
intptr_t CapacityInWords() const
intptr_t UsedInWords() const
SpaceUsage GetCurrentUsage() const
int64_t gc_time_micros() const
void IncrementCollections()
intptr_t ThresholdInWords() const
intptr_t collections() const
void PrintToJSONObject(JSONObject *object) const
static void HandleEvent(ServiceEvent *event, bool enter_safepoint=true)
static StreamInfo gc_stream
IsolateGroup * isolate_group() const
bool force_growth() const
int32_t no_callback_scope_depth() const
static Thread * Current()
bool OwnsGCSafepoint() const
int32_t no_safepoint_scope_depth() const
void DecrementForceGrowthScopeDepth()
void IncrementForceGrowthScopeDepth()
IsolateGroup * isolate_group() const
intptr_t GetValue(ObjectPtr key)
intptr_t SetValueIfNonExistent(ObjectPtr key, intptr_t val)
void Forward(ObjectPointerVisitor *visitor)
void SetValue(ObjectPtr key, intptr_t val)
WritableCodePages(Thread *thread, IsolateGroup *isolate_group)
WritableVMIsolateScope(Thread *thread)
~WritableVMIsolateScope()
@ Dart_PerformanceMode_Default
@ Dart_PerformanceMode_Latency
void(* Dart_HeapSamplingDeleteCallback)(void *data)
bool Contains(const Container &container, const Value &value)
constexpr double MicrosecondsToSeconds(int64_t micros)
constexpr double WordsToMB(intptr_t size_in_words)
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
constexpr intptr_t kWordSize
constexpr double MicrosecondsToMilliseconds(int64_t micros)
constexpr intptr_t KBInWords
constexpr intptr_t RoundWordsToKB(intptr_t size_in_words)
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not set
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)