Flutter Engine
The Flutter Engine
heap.cc
Go to the documentation of this file.
1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include <memory>
6#include <utility>
7
8#include "vm/heap/heap.h"
9
10#include "platform/assert.h"
11#include "platform/utils.h"
13#include "vm/dart.h"
14#include "vm/flags.h"
16#include "vm/heap/pages.h"
17#include "vm/heap/safepoint.h"
18#include "vm/heap/scavenger.h"
19#include "vm/heap/verifier.h"
20#include "vm/heap/weak_table.h"
21#include "vm/isolate.h"
22#include "vm/lockers.h"
23#include "vm/object.h"
24#include "vm/object_set.h"
25#include "vm/os.h"
26#include "vm/raw_object.h"
27#include "vm/service.h"
28#include "vm/service_event.h"
29#include "vm/service_isolate.h"
30#include "vm/stack_frame.h"
31#include "vm/tags.h"
32#include "vm/thread_pool.h"
33#include "vm/timeline.h"
34#include "vm/virtual_memory.h"
35
36namespace dart {
37
38DEFINE_FLAG(bool, write_protect_vm_isolate, true, "Write protect vm_isolate.");
40 disable_heap_verification,
41 false,
42 "Explicitly disable heap verification.");
43
44Heap::Heap(IsolateGroup* isolate_group,
45 bool is_vm_isolate,
46 intptr_t max_new_gen_semi_words,
47 intptr_t max_old_gen_words)
48 : isolate_group_(isolate_group),
49 is_vm_isolate_(is_vm_isolate),
50 new_space_(this, max_new_gen_semi_words),
51 old_space_(this, max_old_gen_words),
52 read_only_(false),
53 assume_scavenge_will_fail_(false),
54 gc_on_nth_allocation_(kNoForcedGarbageCollection) {
55 UpdateGlobalMaxUsed();
56 for (int sel = 0; sel < kNumWeakSelectors; sel++) {
57 new_weak_tables_[sel] = new WeakTable();
58 old_weak_tables_[sel] = new WeakTable();
59 }
60 stats_.num_ = 0;
61}
62
63Heap::~Heap() {
64#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
66 HeapProfileSampler::delete_callback();
67 if (cleanup != nullptr) {
68 new_weak_tables_[kHeapSamplingData]->CleanupValues(cleanup);
69 old_weak_tables_[kHeapSamplingData]->CleanupValues(cleanup);
70 }
71#endif
72
73 for (int sel = 0; sel < kNumWeakSelectors; sel++) {
74 delete new_weak_tables_[sel];
75 delete old_weak_tables_[sel];
76 }
77}
78
79uword Heap::AllocateNew(Thread* thread, intptr_t size) {
80 ASSERT(thread->no_safepoint_scope_depth() == 0);
81 CollectForDebugging(thread);
82 uword addr = new_space_.TryAllocate(thread, size);
83 if (LIKELY(addr != 0)) {
84 return addr;
85 }
86 if (!assume_scavenge_will_fail_ && !thread->force_growth()) {
87 GcSafepointOperationScope safepoint_operation(thread);
88
89 // Another thread may have won the race to the safepoint and performed a GC
90 // before this thread acquired the safepoint. Retry the allocation under the
91 // safepoint to avoid back-to-back GC.
92 addr = new_space_.TryAllocate(thread, size);
93 if (addr != 0) {
94 return addr;
95 }
96
97 CollectGarbage(thread, GCType::kScavenge, GCReason::kNewSpace);
98
99 addr = new_space_.TryAllocate(thread, size);
100 if (LIKELY(addr != 0)) {
101 return addr;
102 }
103 }
104
105 // It is possible a GC doesn't clear enough space.
106 // In that case, we must fall through and allocate into old space.
107 return AllocateOld(thread, size, /*exec*/ false);
108}
109
110uword Heap::AllocateOld(Thread* thread, intptr_t size, bool is_exec) {
111 ASSERT(thread->no_safepoint_scope_depth() == 0);
112
113#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
114 if (HeapProfileSampler::enabled()) {
115 thread->heap_sampler().SampleOldSpaceAllocation(size);
116 }
117#endif
118
119 if (!thread->force_growth()) {
120 CollectForDebugging(thread);
121 uword addr = old_space_.TryAllocate(size, is_exec);
122 if (addr != 0) {
123 return addr;
124 }
125 // Wait for any GC tasks that are in progress.
126 WaitForSweeperTasks(thread);
127 addr = old_space_.TryAllocate(size, is_exec);
128 if (addr != 0) {
129 return addr;
130 }
131 GcSafepointOperationScope safepoint_operation(thread);
132 // Another thread may have won the race to the safepoint and performed a GC
133 // before this thread acquired the safepoint. Retry the allocation under the
134 // safepoint to avoid back-to-back GC.
135 addr = old_space_.TryAllocate(size, is_exec);
136 if (addr != 0) {
137 return addr;
138 }
139 // All GC tasks finished without allocating successfully. Collect both
140 // generations.
141 CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kOldSpace);
142 addr = old_space_.TryAllocate(size, is_exec);
143 if (addr != 0) {
144 return addr;
145 }
146 // Wait for all of the concurrent tasks to finish before giving up.
147 WaitForSweeperTasksAtSafepoint(thread);
148 addr = old_space_.TryAllocate(size, is_exec);
149 if (addr != 0) {
150 return addr;
151 }
152 // Force growth before attempting another synchronous GC.
153 addr = old_space_.TryAllocate(size, is_exec, PageSpace::kForceGrowth);
154 if (addr != 0) {
155 return addr;
156 }
157 // Before throwing an out-of-memory error try a synchronous GC.
158 CollectOldSpaceGarbage(thread, GCType::kMarkCompact, GCReason::kOldSpace);
159 WaitForSweeperTasksAtSafepoint(thread);
160 }
161 uword addr = old_space_.TryAllocate(size, is_exec, PageSpace::kForceGrowth);
162 if (addr != 0) {
163 return addr;
164 }
165
166 if (!thread->force_growth()) {
167 WaitForSweeperTasks(thread);
168 old_space_.TryReleaseReservation();
169 } else {
170 // We may or may not be a safepoint, so we don't know how to wait for the
171 // sweeper.
172 }
173
174 // Give up allocating this object.
175 OS::PrintErr("Exhausted heap space, trying to allocate %" Pd " bytes.\n",
176 size);
177 return 0;
178}
179
180bool Heap::AllocatedExternal(intptr_t size, Space space) {
181 if (space == kNew) {
182 if (!new_space_.AllocatedExternal(size)) {
183 return false;
184 }
185 } else {
186 ASSERT(space == kOld);
187 if (!old_space_.AllocatedExternal(size)) {
188 return false;
189 }
190 }
191
192 Thread* thread = Thread::Current();
193 if ((thread->no_callback_scope_depth() == 0) && !thread->force_growth()) {
194 CheckExternalGC(thread);
195 } else {
196 // Check delayed until Dart_TypedDataRelease/~ForceGrowthScope.
197 }
198 return true;
199}
200
201void Heap::FreedExternal(intptr_t size, Space space) {
202 if (space == kNew) {
203 new_space_.FreedExternal(size);
204 } else {
205 ASSERT(space == kOld);
206 old_space_.FreedExternal(size);
207 }
208}
209
210void Heap::PromotedExternal(intptr_t size) {
211 new_space_.FreedExternal(size);
212 old_space_.AllocatedExternal(size);
213}
214
215void Heap::CheckExternalGC(Thread* thread) {
216 ASSERT(thread->no_safepoint_scope_depth() == 0);
217 ASSERT(thread->no_callback_scope_depth() == 0);
218 ASSERT(!thread->force_growth());
219
220 if (mode_ == Dart_PerformanceMode_Latency) {
221 return;
222 }
223
224 if (new_space_.ExternalInWords() >= (4 * new_space_.CapacityInWords())) {
225 // Attempt to free some external allocation by a scavenge. (If the total
226 // remains above the limit, next external alloc will trigger another.)
227 CollectGarbage(thread, GCType::kScavenge, GCReason::kExternal);
228 // Promotion may have pushed old space over its limit. Fall through for old
229 // space GC check.
230 }
231
232 if (old_space_.ReachedHardThreshold()) {
233 CollectGarbage(thread, GCType::kMarkSweep, GCReason::kExternal);
234 } else {
235 CheckConcurrentMarking(thread, GCReason::kExternal, 0);
236 }
237}
238
240 return new_space_.Contains(addr) || old_space_.Contains(addr);
241}
242
243bool Heap::NewContains(uword addr) const {
244 return new_space_.Contains(addr);
245}
246
247bool Heap::OldContains(uword addr) const {
248 return old_space_.Contains(addr);
249}
250
251bool Heap::CodeContains(uword addr) const {
252 return old_space_.CodeContains(addr);
253}
254
255bool Heap::DataContains(uword addr) const {
256 return old_space_.DataContains(addr);
257}
258
259void Heap::VisitObjects(ObjectVisitor* visitor) {
260 new_space_.VisitObjects(visitor);
261 old_space_.VisitObjects(visitor);
262}
263
264void Heap::VisitObjectsNoImagePages(ObjectVisitor* visitor) {
265 new_space_.VisitObjects(visitor);
266 old_space_.VisitObjectsNoImagePages(visitor);
267}
268
269void Heap::VisitObjectsImagePages(ObjectVisitor* visitor) const {
270 old_space_.VisitObjectsImagePages(visitor);
271}
272
273HeapIterationScope::HeapIterationScope(Thread* thread, bool writable)
274 : ThreadStackResource(thread),
275 heap_(isolate_group()->heap()),
276 old_space_(heap_->old_space()),
277 writable_(writable) {
278 isolate_group()->safepoint_handler()->SafepointThreads(thread,
280
281 {
282 // It's not safe to iterate over old space when concurrent marking or
283 // sweeping is in progress, or another thread is iterating the heap, so wait
284 // for any such task to complete first.
285 MonitorLocker ml(old_space_->tasks_lock());
286#if defined(DEBUG)
287 // We currently don't support nesting of HeapIterationScopes.
288 ASSERT(old_space_->iterating_thread_ != thread);
289#endif
290 while ((old_space_->tasks() > 0) ||
291 (old_space_->phase() != PageSpace::kDone)) {
292 old_space_->AssistTasks(&ml);
293 if (old_space_->phase() == PageSpace::kAwaitingFinalization) {
294 ml.Exit();
295 heap_->CollectOldSpaceGarbage(thread, GCType::kMarkSweep,
297 ml.Enter();
298 }
299 while (old_space_->tasks() > 0) {
300 ml.Wait();
301 }
302 }
303#if defined(DEBUG)
304 ASSERT(old_space_->iterating_thread_ == nullptr);
305 old_space_->iterating_thread_ = thread;
306#endif
307 old_space_->set_tasks(1);
308 }
309
310 if (writable_) {
311 heap_->WriteProtectCode(false);
312 }
313}
314
316 if (writable_) {
317 heap_->WriteProtectCode(true);
318 }
319
320 {
321 MonitorLocker ml(old_space_->tasks_lock());
322#if defined(DEBUG)
323 ASSERT(old_space_->iterating_thread_ == thread());
324 old_space_->iterating_thread_ = nullptr;
325#endif
326 ASSERT(old_space_->tasks() == 1);
327 old_space_->set_tasks(0);
328 ml.NotifyAll();
329 }
330
331 isolate_group()->safepoint_handler()->ResumeThreads(thread(),
333}
334
336 heap_->VisitObjects(visitor);
337}
338
340 ObjectVisitor* visitor) const {
341 heap_->new_space()->VisitObjects(visitor);
342 heap_->old_space()->VisitObjectsNoImagePages(visitor);
343}
344
346 old_space_->VisitObjects(visitor);
347}
348
350 ObjectVisitor* visitor) const {
351 old_space_->VisitObjectsNoImagePages(visitor);
352}
353
355 Dart::vm_isolate_group()->heap()->VisitObjects(visitor);
356}
357
359 ObjectPointerVisitor* visitor,
360 ValidationPolicy validate_frames) {
361 isolate_group()->VisitObjectPointers(visitor, validate_frames);
362}
363
365 ObjectPointerVisitor* visitor,
366 ValidationPolicy validate_frames) {
367 isolate_group()->VisitStackPointers(visitor, validate_frames);
368}
369
370void Heap::VisitObjectPointers(ObjectPointerVisitor* visitor) {
371 new_space_.VisitObjectPointers(visitor);
372 old_space_.VisitObjectPointers(visitor);
373}
374
375void Heap::NotifyIdle(int64_t deadline) {
376 Thread* thread = Thread::Current();
377 TIMELINE_FUNCTION_GC_DURATION(thread, "NotifyIdle");
378 {
379 GcSafepointOperationScope safepoint_operation(thread);
380
381 // Check if we want to collect new-space first, because if we want to
382 // collect both new-space and old-space, the new-space collection should run
383 // first to shrink the root set (make old-space GC faster) and avoid
384 // intergenerational garbage (make old-space GC free more memory).
385 if (new_space_.ShouldPerformIdleScavenge(deadline)) {
386 CollectNewSpaceGarbage(thread, GCType::kScavenge, GCReason::kIdle);
387 }
388
389 // Check if we want to collect old-space, in decreasing order of cost.
390 // Because we use a deadline instead of a timeout, we automatically take any
391 // time used up by a scavenge into account when deciding if we can complete
392 // a mark-sweep on time.
393 if (old_space_.ShouldPerformIdleMarkCompact(deadline)) {
394 // We prefer mark-compact over other old space GCs if we have enough time,
395 // since it removes old space fragmentation and frees up most memory.
396 // Blocks for O(heap), roughly twice as costly as mark-sweep.
397 CollectOldSpaceGarbage(thread, GCType::kMarkCompact, GCReason::kIdle);
398 } else if (old_space_.ReachedHardThreshold()) {
399 // Even though the following GC may exceed our idle deadline, we need to
400 // ensure than that promotions during idle scavenges do not lead to
401 // unbounded growth of old space. If a program is allocating only in new
402 // space and all scavenges happen during idle time, then NotifyIdle will
403 // be the only place that checks the old space allocation limit.
404 // Compare the tail end of Heap::CollectNewSpaceGarbage.
405 // Blocks for O(heap).
406 CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kIdle);
407 } else if (old_space_.ShouldStartIdleMarkSweep(deadline) ||
408 old_space_.ReachedSoftThreshold()) {
409 // If we have both work to do and enough time, start or finish GC.
410 // If we have crossed the soft threshold, ignore time; the next old-space
411 // allocation will trigger this work anyway, so we try to pay at least
412 // some of that cost with idle time.
413 // Blocks for O(roots).
414 PageSpace::Phase phase;
415 {
416 MonitorLocker ml(old_space_.tasks_lock());
417 phase = old_space_.phase();
418 }
420 CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kFinalize);
421 } else if (phase == PageSpace::kDone) {
423 }
424 }
425 }
426
427 if (FLAG_mark_when_idle) {
428 old_space_.IncrementalMarkWithTimeBudget(deadline);
429 }
430
431 if (OS::GetCurrentMonotonicMicros() < deadline) {
433 }
434}
435
438 CollectAllGarbage(GCReason::kDestroyed, /*compact=*/true);
440}
441
443 Dart_PerformanceMode old_mode = mode_.exchange(new_mode);
444 if ((old_mode == Dart_PerformanceMode_Latency) &&
445 (new_mode == Dart_PerformanceMode_Default)) {
447 }
448 return old_mode;
449}
450
451void Heap::CollectNewSpaceGarbage(Thread* thread,
452 GCType type,
453 GCReason reason) {
454 NoActiveIsolateScope no_active_isolate_scope(thread);
455 ASSERT(reason != GCReason::kPromotion);
456 ASSERT(reason != GCReason::kFinalize);
457 if (thread->isolate_group() == Dart::vm_isolate_group()) {
458 // The vm isolate cannot safely collect garbage due to unvisited read-only
459 // handles and slots bootstrapped with RAW_NULL. Ignore GC requests to
460 // trigger a nice out-of-memory message instead of a crash in the middle of
461 // visiting pointers.
462 return;
463 }
464 {
465 GcSafepointOperationScope safepoint_operation(thread);
466 RecordBeforeGC(type, reason);
467 {
468 VMTagScope tagScope(thread, reason == GCReason::kIdle
469 ? VMTag::kGCIdleTagId
470 : VMTag::kGCNewSpaceTagId);
471 if (reason == GCReason::kStoreBuffer) {
472 // The remembered set may become too full, increasing the time of
473 // stop-the-world phases, if new-space or to-be-evacuated objects are
474 // pointed to by too many objects. This is resolved by evacuating
475 // new-space (so there are no old->new pointers) and aborting an
476 // incremental compaction (so there are no old->to-be-evacuated
477 // pointers). If we had separate remembered sets, would could do these
478 // actions separately.
480 }
481 TIMELINE_FUNCTION_GC_DURATION(thread, "CollectNewGeneration");
482 new_space_.Scavenge(thread, type, reason);
483 RecordAfterGC(type);
484 PrintStats();
485#if defined(SUPPORT_TIMELINE)
486 PrintStatsToTimeline(&tbes, reason);
487#endif
488 }
489 if (type == GCType::kScavenge && reason == GCReason::kNewSpace) {
490 if (old_space_.ReachedHardThreshold()) {
491 CollectOldSpaceGarbage(thread, GCType::kMarkSweep,
493 } else {
495 }
496 }
497 }
498}
499
500void Heap::CollectOldSpaceGarbage(Thread* thread,
501 GCType type,
502 GCReason reason) {
503 NoActiveIsolateScope no_active_isolate_scope(thread);
504
506 ASSERT(reason != GCReason::kNewSpace);
508 if (FLAG_use_compactor) {
510 }
511 if (thread->isolate_group() == Dart::vm_isolate_group()) {
512 // The vm isolate cannot safely collect garbage due to unvisited read-only
513 // handles and slots bootstrapped with RAW_NULL. Ignore GC requests to
514 // trigger a nice out-of-memory message instead of a crash in the middle of
515 // visiting pointers.
516 return;
517 }
518 {
519 GcSafepointOperationScope safepoint_operation(thread);
520 if (reason == GCReason::kFinalize) {
521 MonitorLocker ml(old_space_.tasks_lock());
522 if (old_space_.phase() != PageSpace::kAwaitingFinalization) {
523 return; // Lost race.
524 }
525 }
526
527 thread->isolate_group()->ForEachIsolate(
528 [&](Isolate* isolate) {
529 // Discard regexp backtracking stacks to further reduce memory usage.
530 isolate->CacheRegexpBacktrackStack(nullptr);
531 },
532 /*at_safepoint=*/true);
533
534 RecordBeforeGC(type, reason);
535 VMTagScope tagScope(thread, reason == GCReason::kIdle
536 ? VMTag::kGCIdleTagId
537 : VMTag::kGCOldSpaceTagId);
538 TIMELINE_FUNCTION_GC_DURATION(thread, "CollectOldGeneration");
539 old_space_.CollectGarbage(thread, /*compact=*/type == GCType::kMarkCompact,
540 /*finalize=*/true);
541 RecordAfterGC(type);
542 PrintStats();
543#if defined(SUPPORT_TIMELINE)
544 PrintStatsToTimeline(&tbes, reason);
545#endif
546
547 // Some Code objects may have been collected so invalidate handler cache.
548 thread->isolate_group()->ForEachIsolate(
549 [&](Isolate* isolate) {
550 isolate->handler_info_cache()->Clear();
551 isolate->catch_entry_moves_cache()->Clear();
552 },
553 /*at_safepoint=*/true);
554 assume_scavenge_will_fail_ = false;
555 }
556}
557
559 switch (type) {
562 CollectNewSpaceGarbage(thread, type, reason);
563 break;
566 CollectOldSpaceGarbage(thread, type, reason);
567 break;
568 default:
569 UNREACHABLE();
570 }
571}
572
573void Heap::CollectAllGarbage(GCReason reason, bool compact) {
574 Thread* thread = Thread::Current();
575 if (thread->is_marking()) {
576 // If incremental marking is happening, we need to finish the GC cycle
577 // and perform a follow-up GC to purge any "floating garbage" that may be
578 // retained by the incremental barrier.
579 CollectOldSpaceGarbage(thread, GCType::kMarkSweep, reason);
580 }
581 CollectOldSpaceGarbage(
582 thread, compact ? GCType::kMarkCompact : GCType::kMarkSweep, reason);
583}
584
586 ASSERT(!thread->force_growth());
587 if (old_space()->ReachedHardThreshold()) {
589 } else {
591 }
592}
593
595 GCReason reason,
596 intptr_t size) {
597 ASSERT(!thread->force_growth());
598
599 PageSpace::Phase phase;
600 {
601 MonitorLocker ml(old_space_.tasks_lock());
602 phase = old_space_.phase();
603 }
604
605 switch (phase) {
607 if (mode_ != Dart_PerformanceMode_Latency) {
609 }
610 return;
613 return; // Busy.
615 CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kFinalize);
616 return;
617 case PageSpace::kDone:
618 if (old_space_.ReachedSoftThreshold()) {
619 StartConcurrentMarking(thread, reason);
620 }
621 return;
622 default:
623 UNREACHABLE();
624 }
625}
626
628 ASSERT(!thread->force_growth());
629
630 PageSpace::Phase phase;
631 {
632 MonitorLocker ml(old_space_.tasks_lock());
633 phase = old_space_.phase();
634 }
635
637 CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kFinalize);
638 }
639}
640
642 GcSafepointOperationScope safepoint_operation(thread);
643 RecordBeforeGC(GCType::kStartConcurrentMark, reason);
644 VMTagScope tagScope(thread, reason == GCReason::kIdle
645 ? VMTag::kGCIdleTagId
646 : VMTag::kGCOldSpaceTagId);
647 TIMELINE_FUNCTION_GC_DURATION(thread, "StartConcurrentMarking");
648 old_space_.CollectGarbage(thread, /*compact=*/false, /*finalize=*/false);
649 RecordAfterGC(GCType::kStartConcurrentMark);
650 PrintStats();
651#if defined(SUPPORT_TIMELINE)
652 PrintStatsToTimeline(&tbes, reason);
653#endif
654}
655
657 MonitorLocker ml(old_space_.tasks_lock());
658 while ((old_space_.phase() == PageSpace::kMarking) ||
659 (old_space_.phase() == PageSpace::kAwaitingFinalization)) {
660 while (old_space_.phase() == PageSpace::kMarking) {
661 ml.WaitWithSafepointCheck(thread);
662 }
663 if (old_space_.phase() == PageSpace::kAwaitingFinalization) {
664 ml.Exit();
665 CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kFinalize);
666 ml.Enter();
667 }
668 }
669}
670
672 ASSERT(!thread->OwnsGCSafepoint());
673 MonitorLocker ml(old_space_.tasks_lock());
674 while ((old_space_.phase() == PageSpace::kSweepingLarge) ||
675 (old_space_.phase() == PageSpace::kSweepingRegular)) {
676 ml.WaitWithSafepointCheck(thread);
677 }
678}
679
681 ASSERT(thread->OwnsGCSafepoint());
682 MonitorLocker ml(old_space_.tasks_lock());
683 while ((old_space_.phase() == PageSpace::kSweepingLarge) ||
684 (old_space_.phase() == PageSpace::kSweepingRegular)) {
685 ml.Wait();
686 }
687}
688
690 ASSERT(isolate_group_ != nullptr);
691 // We are accessing the used in words count for both new and old space
692 // without synchronizing. The value of this metric is approximate.
693 isolate_group_->GetHeapGlobalUsedMaxMetric()->SetValue(
696}
697
698void Heap::WriteProtect(bool read_only) {
699 read_only_ = read_only;
700 new_space_.WriteProtect(read_only);
701 old_space_.WriteProtect(read_only);
702}
703
704void Heap::Init(IsolateGroup* isolate_group,
705 bool is_vm_isolate,
706 intptr_t max_new_gen_words,
707 intptr_t max_old_gen_words) {
708 ASSERT(isolate_group->heap() == nullptr);
709 std::unique_ptr<Heap> heap(new Heap(isolate_group, is_vm_isolate,
710 max_new_gen_words, max_old_gen_words));
711 isolate_group->set_heap(std::move(heap));
712}
713
714void Heap::AddRegionsToObjectSet(ObjectSet* set) const {
715 new_space_.AddRegionsToObjectSet(set);
716 old_space_.AddRegionsToObjectSet(set);
717 set->SortRegions();
718}
719
720void Heap::CollectOnNthAllocation(intptr_t num_allocations) {
721 // Prevent generated code from using the TLAB fast path on next allocation.
723 gc_on_nth_allocation_ = num_allocations;
724}
725
726void Heap::CollectForDebugging(Thread* thread) {
727 if (gc_on_nth_allocation_ == kNoForcedGarbageCollection) return;
728 if (thread->OwnsGCSafepoint()) {
729 // CollectAllGarbage is not supported when we are at a safepoint.
730 // Allocating when at a safepoint is not a common case.
731 return;
732 }
733 gc_on_nth_allocation_--;
734 if (gc_on_nth_allocation_ == 0) {
736 gc_on_nth_allocation_ = kNoForcedGarbageCollection;
737 } else {
738 // Prevent generated code from using the TLAB fast path on next allocation.
739 new_space_.AbandonRemainingTLABForDebugging(thread);
740 }
741}
742
744 MarkExpectation mark_expectation) {
745 ObjectSet* allocated_set = new (zone) ObjectSet(zone);
746
747 this->AddRegionsToObjectSet(allocated_set);
748 Isolate* vm_isolate = Dart::vm_isolate();
749 vm_isolate->group()->heap()->AddRegionsToObjectSet(allocated_set);
750
751 {
752 VerifyObjectVisitor object_visitor(isolate_group(), allocated_set,
753 mark_expectation);
754 this->VisitObjectsNoImagePages(&object_visitor);
755 }
756 {
757 VerifyObjectVisitor object_visitor(isolate_group(), allocated_set,
759 this->VisitObjectsImagePages(&object_visitor);
760 }
761 {
762 // VM isolate heap is premarked.
763 VerifyObjectVisitor vm_object_visitor(isolate_group(), allocated_set,
765 vm_isolate->group()->heap()->VisitObjects(&vm_object_visitor);
766 }
767
768 return allocated_set;
769}
770
771bool Heap::Verify(const char* msg, MarkExpectation mark_expectation) {
772 if (FLAG_disable_heap_verification) {
773 return true;
774 }
775 HeapIterationScope heap_iteration_scope(Thread::Current());
776 return VerifyGC(msg, mark_expectation);
777}
778
779bool Heap::VerifyGC(const char* msg, MarkExpectation mark_expectation) {
780 ASSERT(msg != nullptr);
781 auto thread = Thread::Current();
782 StackZone stack_zone(thread);
783
784 ObjectSet* allocated_set =
785 CreateAllocatedObjectSet(stack_zone.GetZone(), mark_expectation);
786 VerifyPointersVisitor visitor(isolate_group(), allocated_set, msg);
787 VisitObjectPointers(&visitor);
788
789 // Only returning a value so that Heap::Validate can be called from an ASSERT.
790 return true;
791}
792
793void Heap::PrintSizes() const {
795 "New space (%" Pd "k of %" Pd
796 "k) "
797 "Old space (%" Pd "k of %" Pd "k)\n",
800}
801
802intptr_t Heap::UsedInWords(Space space) const {
803 return space == kNew ? new_space_.UsedInWords() : old_space_.UsedInWords();
804}
805
806intptr_t Heap::CapacityInWords(Space space) const {
807 return space == kNew ? new_space_.CapacityInWords()
808 : old_space_.CapacityInWords();
809}
810
811intptr_t Heap::ExternalInWords(Space space) const {
812 return space == kNew ? new_space_.ExternalInWords()
813 : old_space_.ExternalInWords();
814}
815
816intptr_t Heap::TotalUsedInWords() const {
817 return UsedInWords(kNew) + UsedInWords(kOld);
818}
819
822}
823
826}
827
828int64_t Heap::GCTimeInMicros(Space space) const {
829 if (space == kNew) {
830 return new_space_.gc_time_micros();
831 }
832 return old_space_.gc_time_micros();
833}
834
835intptr_t Heap::Collections(Space space) const {
836 if (space == kNew) {
837 return new_space_.collections();
838 }
839 return old_space_.collections();
840}
841
843 switch (type) {
845 return "Scavenge";
847 return "Evacuate";
849 return "StartCMark";
851 return "MarkSweep";
853 return "MarkCompact";
854 default:
855 UNREACHABLE();
856 return "";
857 }
858}
859
860const char* Heap::GCReasonToString(GCReason gc_reason) {
861 switch (gc_reason) {
863 return "new space";
865 return "store buffer";
867 return "promotion";
869 return "old space";
871 return "finalize";
872 case GCReason::kFull:
873 return "full";
875 return "external";
876 case GCReason::kIdle:
877 return "idle";
879 return "destroyed";
881 return "debugging";
883 return "catch-up";
884 default:
885 UNREACHABLE();
886 return "";
887 }
888}
889
890int64_t Heap::PeerCount() const {
891 return new_weak_tables_[kPeers]->count() + old_weak_tables_[kPeers]->count();
892}
893
895 new_weak_tables_[kCanonicalHashes]->Reset();
896 old_weak_tables_[kCanonicalHashes]->Reset();
897}
898
900 new_weak_tables_[kObjectIds]->Reset();
901 old_weak_tables_[kObjectIds]->Reset();
902}
903
904intptr_t Heap::GetWeakEntry(ObjectPtr raw_obj, WeakSelector sel) const {
905 if (raw_obj->IsImmediateOrOldObject()) {
906 return old_weak_tables_[sel]->GetValue(raw_obj);
907 } else {
908 return new_weak_tables_[sel]->GetValue(raw_obj);
909 }
910}
911
912void Heap::SetWeakEntry(ObjectPtr raw_obj, WeakSelector sel, intptr_t val) {
913 if (raw_obj->IsImmediateOrOldObject()) {
914 old_weak_tables_[sel]->SetValue(raw_obj, val);
915 } else {
916 new_weak_tables_[sel]->SetValue(raw_obj, val);
917 }
918}
919
921 WeakSelector sel,
922 intptr_t val) {
923 if (raw_obj->IsImmediateOrOldObject()) {
924 return old_weak_tables_[sel]->SetValueIfNonExistent(raw_obj, val);
925 } else {
926 return new_weak_tables_[sel]->SetValueIfNonExistent(raw_obj, val);
927 }
928}
929
930void Heap::ForwardWeakEntries(ObjectPtr before_object, ObjectPtr after_object) {
931 const auto before_space =
932 before_object->IsImmediateOrOldObject() ? Heap::kOld : Heap::kNew;
933 const auto after_space =
934 after_object->IsImmediateOrOldObject() ? Heap::kOld : Heap::kNew;
935
936 for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) {
937 const auto selector = static_cast<Heap::WeakSelector>(sel);
938 auto before_table = GetWeakTable(before_space, selector);
939 intptr_t entry = before_table->RemoveValueExclusive(before_object);
940 if (entry != 0) {
941 auto after_table = GetWeakTable(after_space, selector);
942 after_table->SetValueExclusive(after_object, entry);
943 }
944 }
945
947 [&](Isolate* isolate) {
948 auto before_table = before_object->IsImmediateOrOldObject()
949 ? isolate->forward_table_old()
950 : isolate->forward_table_new();
951 if (before_table != nullptr) {
952 intptr_t entry = before_table->RemoveValueExclusive(before_object);
953 if (entry != 0) {
954 auto after_table = after_object->IsImmediateOrOldObject()
955 ? isolate->forward_table_old()
956 : isolate->forward_table_new();
957 ASSERT(after_table != nullptr);
958 after_table->SetValueExclusive(after_object, entry);
959 }
960 }
961 },
962 /*at_safepoint=*/true);
963}
964
966 // NOTE: This method is only used by the compactor, so there is no need to
967 // process the `Heap::kNew` tables.
968 for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) {
969 WeakSelector selector = static_cast<Heap::WeakSelector>(sel);
970 GetWeakTable(Heap::kOld, selector)->Forward(visitor);
971 }
972
973 // Isolates might have forwarding tables (used for during snapshotting in
974 // isolate communication).
976 [&](Isolate* isolate) {
977 auto table_old = isolate->forward_table_old();
978 if (table_old != nullptr) table_old->Forward(visitor);
979 },
980 /*at_safepoint=*/true);
981}
982
983#ifndef PRODUCT
984void Heap::PrintToJSONObject(Space space, JSONObject* object) const {
985 if (space == kNew) {
986 new_space_.PrintToJSONObject(object);
987 } else {
988 old_space_.PrintToJSONObject(object);
989 }
990}
991
993 JSONObject obj(stream);
995}
996
998 jsobj->AddProperty("type", "MemoryUsage");
999 jsobj->AddProperty64("heapUsage", TotalUsedInWords() * kWordSize);
1000 jsobj->AddProperty64("heapCapacity", TotalCapacityInWords() * kWordSize);
1001 jsobj->AddProperty64("externalUsage", TotalExternalInWords() * kWordSize);
1002}
1003#endif // PRODUCT
1004
1005void Heap::RecordBeforeGC(GCType type, GCReason reason) {
1006 stats_.num_++;
1007 stats_.type_ = type;
1008 stats_.reason_ = reason;
1009 stats_.before_.micros_ = OS::GetCurrentMonotonicMicros();
1010 stats_.before_.new_ = new_space_.GetCurrentUsage();
1011 stats_.before_.old_ = old_space_.GetCurrentUsage();
1012 stats_.before_.store_buffer_ = isolate_group_->store_buffer()->Size();
1013}
1014
1015void Heap::RecordAfterGC(GCType type) {
1016 stats_.after_.micros_ = OS::GetCurrentMonotonicMicros();
1017 int64_t delta = stats_.after_.micros_ - stats_.before_.micros_;
1018 if (stats_.type_ == GCType::kScavenge) {
1019 new_space_.AddGCTime(delta);
1020 new_space_.IncrementCollections();
1021 } else {
1022 old_space_.AddGCTime(delta);
1023 old_space_.IncrementCollections();
1024 }
1025 stats_.after_.new_ = new_space_.GetCurrentUsage();
1026 stats_.after_.old_ = old_space_.GetCurrentUsage();
1027 stats_.after_.store_buffer_ = isolate_group_->store_buffer()->Size();
1028#ifndef PRODUCT
1029 // For now we'll emit the same GC events on all isolates.
1030 if (Service::gc_stream.enabled()) {
1031 isolate_group_->ForEachIsolate(
1032 [&](Isolate* isolate) {
1033 if (!Isolate::IsSystemIsolate(isolate)) {
1035 event.set_gc_stats(&stats_);
1036 Service::HandleEvent(&event, /*enter_safepoint*/ false);
1037 }
1038 },
1039 /*at_safepoint=*/true);
1040 }
1041#endif // !PRODUCT
1042}
1043
1044void Heap::PrintStats() {
1045 if (!FLAG_verbose_gc) return;
1046
1047 if ((FLAG_verbose_gc_hdr != 0) &&
1048 (((stats_.num_ - 1) % FLAG_verbose_gc_hdr) == 0)) {
1050 "[ | | | | | new "
1051 "gen | new gen | new gen | old gen | old gen | old "
1052 "gen | store | delta used ]\n"
1053 "[ GC isolate | space (reason) | GC# | start | time | used "
1054 "(MB) | capacity MB | external| used (MB) | capacity (MB) | "
1055 "external MB | buffer | new | old ]\n"
1056 "[ | | | (s) | (ms) "
1057 "|before| after|before| after| b4 |aftr| before| after | before| after "
1058 "|before| after| b4 |aftr| (MB) | (MB) ]\n");
1059 }
1060
1061 // clang-format off
1063 "[ %-13.13s, %11s(%12s), " // GC(isolate-group), type(reason)
1064 "%4" Pd ", " // count
1065 "%6.2f, " // start time
1066 "%5.1f, " // total time
1067 "%5.1f, %5.1f, " // new gen: in use before/after
1068 "%5.1f, %5.1f, " // new gen: capacity before/after
1069 "%3.1f, %3.1f, " // new gen: external before/after
1070 "%6.1f, %6.1f, " // old gen: in use before/after
1071 "%6.1f, %6.1f, " // old gen: capacity before/after
1072 "%5.1f, %5.1f, " // old gen: external before/after
1073 "%3" Pd ", %3" Pd ", " // store buffer: before/after
1074 "%5.1f, %6.1f, " // delta used: new gen/old gen
1075 "]\n", // End with a comma to make it easier to import in spreadsheets.
1077 GCTypeToString(stats_.type_),
1078 GCReasonToString(stats_.reason_),
1079 stats_.num_,
1080 MicrosecondsToSeconds(isolate_group_->UptimeMicros()),
1081 MicrosecondsToMilliseconds(stats_.after_.micros_ -
1082 stats_.before_.micros_),
1083 WordsToMB(stats_.before_.new_.used_in_words),
1084 WordsToMB(stats_.after_.new_.used_in_words),
1085 WordsToMB(stats_.before_.new_.capacity_in_words),
1086 WordsToMB(stats_.after_.new_.capacity_in_words),
1087 WordsToMB(stats_.before_.new_.external_in_words),
1088 WordsToMB(stats_.after_.new_.external_in_words),
1089 WordsToMB(stats_.before_.old_.used_in_words),
1090 WordsToMB(stats_.after_.old_.used_in_words),
1091 WordsToMB(stats_.before_.old_.capacity_in_words),
1092 WordsToMB(stats_.after_.old_.capacity_in_words),
1093 WordsToMB(stats_.before_.old_.external_in_words),
1094 WordsToMB(stats_.after_.old_.external_in_words),
1095 stats_.before_.store_buffer_,
1096 stats_.after_.store_buffer_,
1097 WordsToMB(stats_.after_.new_.used_in_words -
1098 stats_.before_.new_.used_in_words),
1099 WordsToMB(stats_.after_.old_.used_in_words -
1100 stats_.before_.old_.used_in_words));
1101 // clang-format on
1102}
1103
1104void Heap::PrintStatsToTimeline(TimelineEventScope* event, GCReason reason) {
1105#if defined(SUPPORT_TIMELINE)
1106 if ((event == nullptr) || !event->enabled()) {
1107 return;
1108 }
1109 intptr_t arguments = event->GetNumArguments();
1110 event->SetNumArguments(arguments + 13);
1111 event->CopyArgument(arguments + 0, "Reason", GCReasonToString(reason));
1112 event->FormatArgument(arguments + 1, "Before.New.Used (kB)", "%" Pd "",
1113 RoundWordsToKB(stats_.before_.new_.used_in_words));
1114 event->FormatArgument(arguments + 2, "After.New.Used (kB)", "%" Pd "",
1115 RoundWordsToKB(stats_.after_.new_.used_in_words));
1116 event->FormatArgument(arguments + 3, "Before.Old.Used (kB)", "%" Pd "",
1117 RoundWordsToKB(stats_.before_.old_.used_in_words));
1118 event->FormatArgument(arguments + 4, "After.Old.Used (kB)", "%" Pd "",
1119 RoundWordsToKB(stats_.after_.old_.used_in_words));
1120
1121 event->FormatArgument(arguments + 5, "Before.New.Capacity (kB)", "%" Pd "",
1122 RoundWordsToKB(stats_.before_.new_.capacity_in_words));
1123 event->FormatArgument(arguments + 6, "After.New.Capacity (kB)", "%" Pd "",
1124 RoundWordsToKB(stats_.after_.new_.capacity_in_words));
1125 event->FormatArgument(arguments + 7, "Before.Old.Capacity (kB)", "%" Pd "",
1126 RoundWordsToKB(stats_.before_.old_.capacity_in_words));
1127 event->FormatArgument(arguments + 8, "After.Old.Capacity (kB)", "%" Pd "",
1128 RoundWordsToKB(stats_.after_.old_.capacity_in_words));
1129
1130 event->FormatArgument(arguments + 9, "Before.New.External (kB)", "%" Pd "",
1131 RoundWordsToKB(stats_.before_.new_.external_in_words));
1132 event->FormatArgument(arguments + 10, "After.New.External (kB)", "%" Pd "",
1133 RoundWordsToKB(stats_.after_.new_.external_in_words));
1134 event->FormatArgument(arguments + 11, "Before.Old.External (kB)", "%" Pd "",
1135 RoundWordsToKB(stats_.before_.old_.external_in_words));
1136 event->FormatArgument(arguments + 12, "After.Old.External (kB)", "%" Pd "",
1137 RoundWordsToKB(stats_.after_.old_.external_in_words));
1138#endif // defined(SUPPORT_TIMELINE)
1139}
1140
1142 // If 'size' would be a significant fraction of new space, then use old.
1143 const int kExtNewRatio = 16;
1144 if (size > (new_space_.ThresholdInWords() * kWordSize) / kExtNewRatio) {
1145 return Heap::kOld;
1146 } else {
1147 return Heap::kNew;
1148 }
1149}
1150
1152 : ThreadStackResource(thread) {
1154}
1155
1158}
1159
1161 : ThreadStackResource(thread) {
1162 if (FLAG_write_protect_code && FLAG_write_protect_vm_isolate) {
1164 }
1165}
1166
1168 ASSERT(Dart::vm_isolate_group()->heap()->UsedInWords(Heap::kNew) == 0);
1169 if (FLAG_write_protect_code && FLAG_write_protect_vm_isolate) {
1171 }
1172}
1173
1175 IsolateGroup* isolate_group)
1176 : StackResource(thread), isolate_group_(isolate_group) {
1177 isolate_group_->heap()->WriteProtectCode(false);
1178}
1179
1181 isolate_group_->heap()->WriteProtectCode(true);
1182}
1183
1184} // namespace dart
#define UNREACHABLE()
Definition: assert.h:248
GLenum type
static IsolateGroup * vm_isolate_group()
Definition: dart.h:69
static Isolate * vm_isolate()
Definition: dart.h:68
ForceGrowthScope(Thread *thread)
Definition: heap.cc:1151
static void Abort(PageSpace *old_space)
void IterateObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
Definition: heap.cc:358
void IterateStackPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
Definition: heap.cc:364
void IterateVMIsolateObjects(ObjectVisitor *visitor) const
Definition: heap.cc:354
void IterateObjects(ObjectVisitor *visitor) const
Definition: heap.cc:335
void IterateOldObjectsNoImagePages(ObjectVisitor *visitor) const
Definition: heap.cc:349
void IterateObjectsNoImagePages(ObjectVisitor *visitor) const
Definition: heap.cc:339
void IterateOldObjects(ObjectVisitor *visitor) const
Definition: heap.cc:345
static const char * GCReasonToString(GCReason reason)
Definition: heap.cc:860
void NotifyDestroyed()
Definition: heap.cc:436
WeakSelector
Definition: heap.h:43
@ kCanonicalHashes
Definition: heap.h:48
@ kObjectIds
Definition: heap.h:49
@ kNumWeakSelectors
Definition: heap.h:54
@ kPeers
Definition: heap.h:44
@ kNew
Definition: heap.h:38
@ kOld
Definition: heap.h:39
intptr_t GetWeakEntry(ObjectPtr raw_obj, WeakSelector sel) const
Definition: heap.cc:904
void PrintMemoryUsageJSON(JSONStream *stream) const
Definition: heap.cc:992
IsolateGroup * isolate_group() const
Definition: heap.h:273
Scavenger * new_space()
Definition: heap.h:62
void CheckConcurrentMarking(Thread *thread, GCReason reason, intptr_t size)
Definition: heap.cc:594
Dart_PerformanceMode SetMode(Dart_PerformanceMode mode)
Definition: heap.cc:442
void PrintSizes() const
Definition: heap.cc:793
void WriteProtect(bool read_only)
Definition: heap.cc:698
PageSpace * old_space()
Definition: heap.h:63
void WaitForMarkerTasks(Thread *thread)
Definition: heap.cc:656
void WaitForSweeperTasksAtSafepoint(Thread *thread)
Definition: heap.cc:680
int64_t PeerCount() const
Definition: heap.cc:890
void SetWeakEntry(ObjectPtr raw_obj, WeakSelector sel, intptr_t val)
Definition: heap.cc:912
void ResetObjectIdTable()
Definition: heap.cc:899
void PrintToJSONObject(Space space, JSONObject *object) const
Definition: heap.cc:984
void CheckCatchUp(Thread *thread)
Definition: heap.cc:585
bool Verify(const char *msg, MarkExpectation mark_expectation=kForbidMarked)
Definition: heap.cc:771
void CollectOnNthAllocation(intptr_t num_allocations)
Definition: heap.cc:720
bool is_vm_isolate() const
Definition: heap.h:274
void WriteProtectCode(bool read_only)
Definition: heap.h:127
intptr_t SetWeakEntryIfNonExistent(ObjectPtr raw_obj, WeakSelector sel, intptr_t val)
Definition: heap.cc:920
intptr_t TotalExternalInWords() const
Definition: heap.cc:824
void CollectAllGarbage(GCReason reason=GCReason::kFull, bool compact=false)
Definition: heap.cc:573
int64_t GCTimeInMicros(Space space) const
Definition: heap.cc:828
friend class ServiceEvent
Definition: heap.h:379
void NotifyIdle(int64_t deadline)
Definition: heap.cc:375
void WaitForSweeperTasks(Thread *thread)
Definition: heap.cc:671
void CheckFinalizeMarking(Thread *thread)
Definition: heap.cc:627
intptr_t Collections(Space space) const
Definition: heap.cc:835
WeakTable * GetWeakTable(Space space, WeakSelector selector) const
Definition: heap.h:225
void ForwardWeakEntries(ObjectPtr before_object, ObjectPtr after_object)
Definition: heap.cc:930
void ResetCanonicalHashTable()
Definition: heap.cc:894
intptr_t ExternalInWords(Space space) const
Definition: heap.cc:811
void StartConcurrentMarking(Thread *thread, GCReason reason)
Definition: heap.cc:641
intptr_t TotalCapacityInWords() const
Definition: heap.cc:820
intptr_t UsedInWords(Space space) const
Definition: heap.cc:802
static void Init(IsolateGroup *isolate_group, bool is_vm_isolate, intptr_t max_new_gen_words, intptr_t max_old_gen_words)
Definition: heap.cc:704
intptr_t TotalUsedInWords() const
Definition: heap.cc:816
void CollectGarbage(Thread *thread, GCType type, GCReason reason)
Definition: heap.cc:558
void ForwardWeakTables(ObjectPointerVisitor *visitor)
Definition: heap.cc:965
void UpdateGlobalMaxUsed()
Definition: heap.cc:689
Space SpaceForExternal(intptr_t size) const
Definition: heap.cc:1141
intptr_t CapacityInWords(Space space) const
Definition: heap.cc:806
ObjectSet * CreateAllocatedObjectSet(Zone *zone, MarkExpectation mark_expectation)
Definition: heap.cc:743
static const char * GCTypeToString(GCType type)
Definition: heap.cc:842
int64_t UptimeMicros() const
Definition: isolate.cc:1982
StoreBuffer * store_buffer() const
Definition: isolate.h:509
void ForEachIsolate(std::function< void(Isolate *isolate)> function, bool at_safepoint=false)
Definition: isolate.cc:2841
void VisitStackPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
Definition: isolate.cc:2970
Heap * heap() const
Definition: isolate.h:296
void VisitObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
Definition: isolate.cc:2912
SafepointHandler * safepoint_handler()
Definition: isolate.h:334
static bool IsSystemIsolate(const Isolate *isolate)
Definition: isolate.h:1445
IsolateGroup * group() const
Definition: isolate.h:1037
WeakTable * forward_table_old()
Definition: isolate.h:1461
WeakTable * forward_table_new()
Definition: isolate.h:1458
void AddProperty64(const char *name, int64_t i) const
Definition: json_stream.h:401
void AddProperty(const char *name, bool b) const
Definition: json_stream.h:395
Monitor::WaitResult WaitWithSafepointCheck(Thread *thread, int64_t millis=Monitor::kNoTimeout)
Definition: lockers.cc:12
Monitor::WaitResult Wait(int64_t millis=Monitor::kNoTimeout)
Definition: lockers.h:172
void Enter() const
Definition: lockers.h:155
void Exit() const
Definition: lockers.h:163
static int64_t GetCurrentMonotonicMicros()
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
intptr_t UsedInWords() const
Definition: pages.h:194
bool ReachedSoftThreshold() const
Definition: pages.h:179
bool ShouldStartIdleMarkSweep(int64_t deadline)
Definition: pages.cc:833
intptr_t tasks() const
Definition: pages.h:315
void IncrementalMarkWithSizeBudget(intptr_t size)
Definition: pages.cc:905
void AddGCTime(int64_t micros)
Definition: pages.h:257
int64_t gc_time_micros() const
Definition: pages.h:259
void WriteProtect(bool read_only)
Definition: pages.cc:722
void IncrementCollections()
Definition: pages.h:261
bool ShouldPerformIdleMarkCompact(int64_t deadline)
Definition: pages.cc:860
void set_tasks(intptr_t val)
Definition: pages.h:316
@ kAwaitingFinalization
Definition: pages.h:133
@ kSweepingRegular
Definition: pages.h:135
@ kSweepingLarge
Definition: pages.h:134
void IncrementalMarkWithTimeBudget(int64_t deadline)
Definition: pages.cc:911
intptr_t collections() const
Definition: pages.h:263
void VisitObjects(ObjectVisitor *visitor) const
Definition: pages.cc:651
void CollectGarbage(Thread *thread, bool compact, bool finalize)
Definition: pages.cc:974
void VisitObjectsNoImagePages(ObjectVisitor *visitor) const
Definition: pages.cc:657
SpaceUsage GetCurrentUsage() const
Definition: pages.h:213
bool ReachedHardThreshold() const
Definition: pages.h:176
intptr_t CapacityInWords() const
Definition: pages.h:195
void AddRegionsToObjectSet(ObjectSet *set) const
Definition: pages.cc:643
void PrintToJSONObject(JSONObject *object) const
Definition: pages.cc:735
void AssistTasks(MonitorLocker *ml)
Definition: pages.cc:917
Monitor * tasks_lock() const
Definition: pages.h:314
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
Definition: pages.cc:679
intptr_t ExternalInWords() const
Definition: pages.h:212
Phase phase() const
Definition: pages.h:341
static void ClearCache()
Definition: page.cc:36
T exchange(T arg, std::memory_order order=std::memory_order_relaxed)
Definition: atomic.h:48
void Scavenge(Thread *thread, GCType type, GCReason reason)
Definition: scavenger.cc:1901
intptr_t ExternalInWords() const
Definition: scavenger.h:168
void VisitObjects(ObjectVisitor *visitor) const
Definition: scavenger.cc:1784
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
Definition: scavenger.cc:1775
void AddGCTime(int64_t micros)
Definition: scavenger.h:187
void WriteProtect(bool read_only)
Definition: scavenger.cc:2126
void AbandonRemainingTLABForDebugging(Thread *thread)
Definition: scavenger.cc:1850
bool ShouldPerformIdleScavenge(int64_t deadline)
Definition: scavenger.cc:1170
void AddRegionsToObjectSet(ObjectSet *set) const
Definition: scavenger.cc:1793
intptr_t CapacityInWords() const
Definition: scavenger.h:164
intptr_t UsedInWords() const
Definition: scavenger.h:160
SpaceUsage GetCurrentUsage() const
Definition: scavenger.h:169
int64_t gc_time_micros() const
Definition: scavenger.h:189
void IncrementCollections()
Definition: scavenger.h:191
intptr_t ThresholdInWords() const
Definition: scavenger.h:176
intptr_t collections() const
Definition: scavenger.h:193
void PrintToJSONObject(JSONObject *object) const
Definition: scavenger.cc:2132
static void HandleEvent(ServiceEvent *event, bool enter_safepoint=true)
Definition: service.cc:1206
static StreamInfo gc_stream
Definition: service.h:182
IsolateGroup * isolate_group() const
bool force_growth() const
Definition: thread.h:633
int32_t no_callback_scope_depth() const
Definition: thread.h:623
static Thread * Current()
Definition: thread.h:362
bool OwnsGCSafepoint() const
Definition: thread.cc:1352
int32_t no_safepoint_scope_depth() const
Definition: thread.h:718
bool is_marking() const
Definition: thread.h:676
void DecrementForceGrowthScopeDepth()
Definition: thread.h:638
void IncrementForceGrowthScopeDepth()
Definition: thread.h:634
IsolateGroup * isolate_group() const
Definition: thread.h:541
intptr_t GetValue(ObjectPtr key)
Definition: weak_table.h:55
intptr_t count() const
Definition: weak_table.h:51
intptr_t SetValueIfNonExistent(ObjectPtr key, intptr_t val)
Definition: weak_table.h:65
void Forward(ObjectPointerVisitor *visitor)
Definition: weak_table.cc:131
void SetValue(ObjectPtr key, intptr_t val)
Definition: weak_table.h:60
WritableCodePages(Thread *thread, IsolateGroup *isolate_group)
Definition: heap.cc:1174
WritableVMIsolateScope(Thread *thread)
Definition: heap.cc:1160
Dart_PerformanceMode
Definition: dart_api.h:1369
@ Dart_PerformanceMode_Default
Definition: dart_api.h:1373
@ Dart_PerformanceMode_Latency
Definition: dart_api.h:1380
void(* Dart_HeapSamplingDeleteCallback)(void *data)
Definition: dart_api.h:1288
#define ASSERT(E)
SkBitmap source
Definition: examples.cpp:28
if(end==-1)
FlKeyEvent * event
bool Contains(const Container &container, const Value &value)
Definition: dart_vm.cc:33
@ kGC
Definition: thread.h:291
const char *const name
constexpr double MicrosecondsToSeconds(int64_t micros)
Definition: globals.h:571
GCType
Definition: spaces.h:32
@ kNew
Definition: heap_test.cc:891
@ kOld
Definition: heap_test.cc:892
uintptr_t uword
Definition: globals.h:501
constexpr double WordsToMB(intptr_t size_in_words)
Definition: globals.h:551
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
GCReason
Definition: spaces.h:40
MarkExpectation
Definition: verifier.h:21
@ kRequireMarked
Definition: verifier.h:21
ValidationPolicy
Definition: thread.h:271
constexpr intptr_t kWordSize
Definition: globals.h:509
constexpr double MicrosecondsToMilliseconds(int64_t micros)
Definition: globals.h:574
constexpr intptr_t KBInWords
Definition: globals.h:535
constexpr intptr_t RoundWordsToKB(intptr_t size_in_words)
Definition: globals.h:542
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not set
Definition: switches.h:76
#define LIKELY(cond)
Definition: globals.h:260
#define Pd
Definition: globals.h:408
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)
Definition: timeline.h:41