Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
heap.cc
Go to the documentation of this file.
1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include <memory>
6#include <utility>
7
8#include "vm/heap/heap.h"
9
10#include "platform/assert.h"
11#include "platform/utils.h"
13#include "vm/dart.h"
14#include "vm/flags.h"
15#include "vm/heap/pages.h"
16#include "vm/heap/safepoint.h"
17#include "vm/heap/scavenger.h"
18#include "vm/heap/verifier.h"
19#include "vm/heap/weak_table.h"
20#include "vm/isolate.h"
21#include "vm/lockers.h"
22#include "vm/object.h"
23#include "vm/object_set.h"
24#include "vm/os.h"
25#include "vm/raw_object.h"
26#include "vm/service.h"
27#include "vm/service_event.h"
28#include "vm/service_isolate.h"
29#include "vm/stack_frame.h"
30#include "vm/tags.h"
31#include "vm/thread_pool.h"
32#include "vm/timeline.h"
33#include "vm/virtual_memory.h"
34
35namespace dart {
36
37DEFINE_FLAG(bool, write_protect_vm_isolate, true, "Write protect vm_isolate.");
39 disable_heap_verification,
40 false,
41 "Explicitly disable heap verification.");
42
43Heap::Heap(IsolateGroup* isolate_group,
44 bool is_vm_isolate,
45 intptr_t max_new_gen_semi_words,
46 intptr_t max_old_gen_words)
47 : isolate_group_(isolate_group),
48 is_vm_isolate_(is_vm_isolate),
49 new_space_(this, max_new_gen_semi_words),
50 old_space_(this, max_old_gen_words),
51 read_only_(false),
52 assume_scavenge_will_fail_(false),
53 gc_on_nth_allocation_(kNoForcedGarbageCollection) {
54 UpdateGlobalMaxUsed();
55 for (int sel = 0; sel < kNumWeakSelectors; sel++) {
56 new_weak_tables_[sel] = new WeakTable();
57 old_weak_tables_[sel] = new WeakTable();
58 }
59 stats_.num_ = 0;
60}
61
62Heap::~Heap() {
63#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
65 HeapProfileSampler::delete_callback();
66 if (cleanup != nullptr) {
67 new_weak_tables_[kHeapSamplingData]->CleanupValues(cleanup);
68 old_weak_tables_[kHeapSamplingData]->CleanupValues(cleanup);
69 }
70#endif
71
72 for (int sel = 0; sel < kNumWeakSelectors; sel++) {
73 delete new_weak_tables_[sel];
74 delete old_weak_tables_[sel];
75 }
76}
77
78uword Heap::AllocateNew(Thread* thread, intptr_t size) {
79 ASSERT(thread->no_safepoint_scope_depth() == 0);
80 CollectForDebugging(thread);
81 uword addr = new_space_.TryAllocate(thread, size);
82 if (LIKELY(addr != 0)) {
83 return addr;
84 }
85 if (!assume_scavenge_will_fail_ && !thread->force_growth()) {
86 GcSafepointOperationScope safepoint_operation(thread);
87
88 // Another thread may have won the race to the safepoint and performed a GC
89 // before this thread acquired the safepoint. Retry the allocation under the
90 // safepoint to avoid back-to-back GC.
91 addr = new_space_.TryAllocate(thread, size);
92 if (addr != 0) {
93 return addr;
94 }
95
96 CollectGarbage(thread, GCType::kScavenge, GCReason::kNewSpace);
97
98 addr = new_space_.TryAllocate(thread, size);
99 if (LIKELY(addr != 0)) {
100 return addr;
101 }
102 }
103
104 // It is possible a GC doesn't clear enough space.
105 // In that case, we must fall through and allocate into old space.
106 return AllocateOld(thread, size, /*exec*/ false);
107}
108
109uword Heap::AllocateOld(Thread* thread, intptr_t size, bool is_exec) {
110 ASSERT(thread->no_safepoint_scope_depth() == 0);
111
112#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
113 if (HeapProfileSampler::enabled()) {
114 thread->heap_sampler().SampleOldSpaceAllocation(size);
115 }
116#endif
117
118 if (!thread->force_growth()) {
119 CollectForDebugging(thread);
120 uword addr = old_space_.TryAllocate(size, is_exec);
121 if (addr != 0) {
122 return addr;
123 }
124 // Wait for any GC tasks that are in progress.
125 WaitForSweeperTasks(thread);
126 addr = old_space_.TryAllocate(size, is_exec);
127 if (addr != 0) {
128 return addr;
129 }
130 GcSafepointOperationScope safepoint_operation(thread);
131 // Another thread may have won the race to the safepoint and performed a GC
132 // before this thread acquired the safepoint. Retry the allocation under the
133 // safepoint to avoid back-to-back GC.
134 addr = old_space_.TryAllocate(size, is_exec);
135 if (addr != 0) {
136 return addr;
137 }
138 // All GC tasks finished without allocating successfully. Collect both
139 // generations.
140 CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kOldSpace);
141 addr = old_space_.TryAllocate(size, is_exec);
142 if (addr != 0) {
143 return addr;
144 }
145 // Wait for all of the concurrent tasks to finish before giving up.
146 WaitForSweeperTasksAtSafepoint(thread);
147 addr = old_space_.TryAllocate(size, is_exec);
148 if (addr != 0) {
149 return addr;
150 }
151 // Force growth before attempting another synchronous GC.
152 addr = old_space_.TryAllocate(size, is_exec, PageSpace::kForceGrowth);
153 if (addr != 0) {
154 return addr;
155 }
156 // Before throwing an out-of-memory error try a synchronous GC.
157 CollectOldSpaceGarbage(thread, GCType::kMarkCompact, GCReason::kOldSpace);
158 WaitForSweeperTasksAtSafepoint(thread);
159 }
160 uword addr = old_space_.TryAllocate(size, is_exec, PageSpace::kForceGrowth);
161 if (addr != 0) {
162 return addr;
163 }
164
165 if (!thread->force_growth()) {
166 WaitForSweeperTasks(thread);
167 old_space_.TryReleaseReservation();
168 } else {
169 // We may or may not be a safepoint, so we don't know how to wait for the
170 // sweeper.
171 }
172
173 // Give up allocating this object.
174 OS::PrintErr("Exhausted heap space, trying to allocate %" Pd " bytes.\n",
175 size);
176 return 0;
177}
178
179bool Heap::AllocatedExternal(intptr_t size, Space space) {
180 if (space == kNew) {
181 if (!new_space_.AllocatedExternal(size)) {
182 return false;
183 }
184 } else {
185 ASSERT(space == kOld);
186 if (!old_space_.AllocatedExternal(size)) {
187 return false;
188 }
189 }
190
191 Thread* thread = Thread::Current();
192 if ((thread->no_callback_scope_depth() == 0) && !thread->force_growth()) {
193 CheckExternalGC(thread);
194 } else {
195 // Check delayed until Dart_TypedDataRelease/~ForceGrowthScope.
196 }
197 return true;
198}
199
200void Heap::FreedExternal(intptr_t size, Space space) {
201 if (space == kNew) {
202 new_space_.FreedExternal(size);
203 } else {
204 ASSERT(space == kOld);
205 old_space_.FreedExternal(size);
206 }
207}
208
209void Heap::PromotedExternal(intptr_t size) {
210 new_space_.FreedExternal(size);
211 old_space_.AllocatedExternal(size);
212}
213
214void Heap::CheckExternalGC(Thread* thread) {
215 ASSERT(thread->no_safepoint_scope_depth() == 0);
216 ASSERT(thread->no_callback_scope_depth() == 0);
217 ASSERT(!thread->force_growth());
218
219 if (mode_ == Dart_PerformanceMode_Latency) {
220 return;
221 }
222
223 if (new_space_.ExternalInWords() >= (4 * new_space_.CapacityInWords())) {
224 // Attempt to free some external allocation by a scavenge. (If the total
225 // remains above the limit, next external alloc will trigger another.)
226 CollectGarbage(thread, GCType::kScavenge, GCReason::kExternal);
227 // Promotion may have pushed old space over its limit. Fall through for old
228 // space GC check.
229 }
230
231 if (old_space_.ReachedHardThreshold()) {
232 CollectGarbage(thread, GCType::kMarkSweep, GCReason::kExternal);
233 } else {
234 CheckConcurrentMarking(thread, GCReason::kExternal, 0);
235 }
236}
237
238bool Heap::Contains(uword addr) const {
239 return new_space_.Contains(addr) || old_space_.Contains(addr);
240}
241
242bool Heap::NewContains(uword addr) const {
243 return new_space_.Contains(addr);
244}
245
246bool Heap::OldContains(uword addr) const {
247 return old_space_.Contains(addr);
248}
249
250bool Heap::CodeContains(uword addr) const {
251 return old_space_.CodeContains(addr);
252}
253
254bool Heap::DataContains(uword addr) const {
255 return old_space_.DataContains(addr);
256}
257
258void Heap::VisitObjects(ObjectVisitor* visitor) {
259 new_space_.VisitObjects(visitor);
260 old_space_.VisitObjects(visitor);
261}
262
263void Heap::VisitObjectsNoImagePages(ObjectVisitor* visitor) {
264 new_space_.VisitObjects(visitor);
265 old_space_.VisitObjectsNoImagePages(visitor);
266}
267
268void Heap::VisitObjectsImagePages(ObjectVisitor* visitor) const {
269 old_space_.VisitObjectsImagePages(visitor);
270}
271
272HeapIterationScope::HeapIterationScope(Thread* thread, bool writable)
273 : ThreadStackResource(thread),
274 heap_(isolate_group()->heap()),
275 old_space_(heap_->old_space()),
276 writable_(writable) {
277 isolate_group()->safepoint_handler()->SafepointThreads(thread,
279
280 {
281 // It's not safe to iterate over old space when concurrent marking or
282 // sweeping is in progress, or another thread is iterating the heap, so wait
283 // for any such task to complete first.
284 MonitorLocker ml(old_space_->tasks_lock());
285#if defined(DEBUG)
286 // We currently don't support nesting of HeapIterationScopes.
287 ASSERT(old_space_->iterating_thread_ != thread);
288#endif
289 while ((old_space_->tasks() > 0) ||
290 (old_space_->phase() != PageSpace::kDone)) {
291 old_space_->AssistTasks(&ml);
292 if (old_space_->phase() == PageSpace::kAwaitingFinalization) {
293 ml.Exit();
294 heap_->CollectOldSpaceGarbage(thread, GCType::kMarkSweep,
296 ml.Enter();
297 }
298 while (old_space_->tasks() > 0) {
299 ml.Wait();
300 }
301 }
302#if defined(DEBUG)
303 ASSERT(old_space_->iterating_thread_ == nullptr);
304 old_space_->iterating_thread_ = thread;
305#endif
306 old_space_->set_tasks(1);
307 }
308
309 if (writable_) {
310 heap_->WriteProtectCode(false);
311 }
312}
313
315 if (writable_) {
316 heap_->WriteProtectCode(true);
317 }
318
319 {
320 MonitorLocker ml(old_space_->tasks_lock());
321#if defined(DEBUG)
322 ASSERT(old_space_->iterating_thread_ == thread());
323 old_space_->iterating_thread_ = nullptr;
324#endif
325 ASSERT(old_space_->tasks() == 1);
326 old_space_->set_tasks(0);
327 ml.NotifyAll();
328 }
329
330 isolate_group()->safepoint_handler()->ResumeThreads(thread(),
332}
333
335 heap_->VisitObjects(visitor);
336}
337
339 ObjectVisitor* visitor) const {
340 heap_->new_space()->VisitObjects(visitor);
341 heap_->old_space()->VisitObjectsNoImagePages(visitor);
342}
343
345 old_space_->VisitObjects(visitor);
346}
347
349 ObjectVisitor* visitor) const {
350 old_space_->VisitObjectsNoImagePages(visitor);
351}
352
354 Dart::vm_isolate_group()->heap()->VisitObjects(visitor);
355}
356
358 ObjectPointerVisitor* visitor,
359 ValidationPolicy validate_frames) {
360 isolate_group()->VisitObjectPointers(visitor, validate_frames);
361}
362
364 ObjectPointerVisitor* visitor,
365 ValidationPolicy validate_frames) {
366 isolate_group()->VisitStackPointers(visitor, validate_frames);
367}
368
369void Heap::VisitObjectPointers(ObjectPointerVisitor* visitor) {
370 new_space_.VisitObjectPointers(visitor);
371 old_space_.VisitObjectPointers(visitor);
372}
373
374void Heap::NotifyIdle(int64_t deadline) {
375 Thread* thread = Thread::Current();
376 TIMELINE_FUNCTION_GC_DURATION(thread, "NotifyIdle");
377 {
378 GcSafepointOperationScope safepoint_operation(thread);
379
380 // Check if we want to collect new-space first, because if we want to
381 // collect both new-space and old-space, the new-space collection should run
382 // first to shrink the root set (make old-space GC faster) and avoid
383 // intergenerational garbage (make old-space GC free more memory).
384 if (new_space_.ShouldPerformIdleScavenge(deadline)) {
385 CollectNewSpaceGarbage(thread, GCType::kScavenge, GCReason::kIdle);
386 }
387
388 // Check if we want to collect old-space, in decreasing order of cost.
389 // Because we use a deadline instead of a timeout, we automatically take any
390 // time used up by a scavenge into account when deciding if we can complete
391 // a mark-sweep on time.
392 if (old_space_.ShouldPerformIdleMarkCompact(deadline)) {
393 // We prefer mark-compact over other old space GCs if we have enough time,
394 // since it removes old space fragmentation and frees up most memory.
395 // Blocks for O(heap), roughly twice as costly as mark-sweep.
396 CollectOldSpaceGarbage(thread, GCType::kMarkCompact, GCReason::kIdle);
397 } else if (old_space_.ReachedHardThreshold()) {
398 // Even though the following GC may exceed our idle deadline, we need to
399 // ensure than that promotions during idle scavenges do not lead to
400 // unbounded growth of old space. If a program is allocating only in new
401 // space and all scavenges happen during idle time, then NotifyIdle will
402 // be the only place that checks the old space allocation limit.
403 // Compare the tail end of Heap::CollectNewSpaceGarbage.
404 // Blocks for O(heap).
405 CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kIdle);
406 } else if (old_space_.ShouldStartIdleMarkSweep(deadline) ||
407 old_space_.ReachedSoftThreshold()) {
408 // If we have both work to do and enough time, start or finish GC.
409 // If we have crossed the soft threshold, ignore time; the next old-space
410 // allocation will trigger this work anyway, so we try to pay at least
411 // some of that cost with idle time.
412 // Blocks for O(roots).
413 PageSpace::Phase phase;
414 {
415 MonitorLocker ml(old_space_.tasks_lock());
416 phase = old_space_.phase();
417 }
419 CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kFinalize);
420 } else if (phase == PageSpace::kDone) {
422 }
423 }
424 }
425
426 if (FLAG_mark_when_idle) {
427 old_space_.IncrementalMarkWithTimeBudget(deadline);
428 }
429
430 if (OS::GetCurrentMonotonicMicros() < deadline) {
432 }
433}
434
440
442 Dart_PerformanceMode old_mode = mode_.exchange(new_mode);
443 if ((old_mode == Dart_PerformanceMode_Latency) &&
444 (new_mode == Dart_PerformanceMode_Default)) {
446 }
447 return old_mode;
448}
449
450void Heap::CollectNewSpaceGarbage(Thread* thread,
451 GCType type,
452 GCReason reason) {
453 NoActiveIsolateScope no_active_isolate_scope(thread);
454 ASSERT(reason != GCReason::kPromotion);
455 ASSERT(reason != GCReason::kFinalize);
456 if (thread->isolate_group() == Dart::vm_isolate_group()) {
457 // The vm isolate cannot safely collect garbage due to unvisited read-only
458 // handles and slots bootstrapped with RAW_NULL. Ignore GC requests to
459 // trigger a nice out-of-memory message instead of a crash in the middle of
460 // visiting pointers.
461 return;
462 }
463 {
464 GcSafepointOperationScope safepoint_operation(thread);
465 RecordBeforeGC(type, reason);
466 {
467 VMTagScope tagScope(thread, reason == GCReason::kIdle
468 ? VMTag::kGCIdleTagId
469 : VMTag::kGCNewSpaceTagId);
470 TIMELINE_FUNCTION_GC_DURATION(thread, "CollectNewGeneration");
471 new_space_.Scavenge(thread, type, reason);
472 RecordAfterGC(type);
473 PrintStats();
474#if defined(SUPPORT_TIMELINE)
475 PrintStatsToTimeline(&tbes, reason);
476#endif
477 }
478 if (type == GCType::kScavenge && reason == GCReason::kNewSpace) {
479 if (old_space_.ReachedHardThreshold()) {
480 CollectOldSpaceGarbage(thread, GCType::kMarkSweep,
482 } else {
484 }
485 }
486 }
487}
488
489void Heap::CollectOldSpaceGarbage(Thread* thread,
490 GCType type,
491 GCReason reason) {
492 NoActiveIsolateScope no_active_isolate_scope(thread);
493
495 ASSERT(reason != GCReason::kNewSpace);
497 if (FLAG_use_compactor) {
499 }
500 if (thread->isolate_group() == Dart::vm_isolate_group()) {
501 // The vm isolate cannot safely collect garbage due to unvisited read-only
502 // handles and slots bootstrapped with RAW_NULL. Ignore GC requests to
503 // trigger a nice out-of-memory message instead of a crash in the middle of
504 // visiting pointers.
505 return;
506 }
507 {
508 GcSafepointOperationScope safepoint_operation(thread);
509 if (reason == GCReason::kFinalize) {
510 MonitorLocker ml(old_space_.tasks_lock());
511 if (old_space_.phase() != PageSpace::kAwaitingFinalization) {
512 return; // Lost race.
513 }
514 }
515
516 thread->isolate_group()->ForEachIsolate(
517 [&](Isolate* isolate) {
518 // Discard regexp backtracking stacks to further reduce memory usage.
519 isolate->CacheRegexpBacktrackStack(nullptr);
520 },
521 /*at_safepoint=*/true);
522
523 RecordBeforeGC(type, reason);
524 VMTagScope tagScope(thread, reason == GCReason::kIdle
525 ? VMTag::kGCIdleTagId
526 : VMTag::kGCOldSpaceTagId);
527 TIMELINE_FUNCTION_GC_DURATION(thread, "CollectOldGeneration");
528 old_space_.CollectGarbage(thread, /*compact=*/type == GCType::kMarkCompact,
529 /*finalize=*/true);
530 RecordAfterGC(type);
531 PrintStats();
532#if defined(SUPPORT_TIMELINE)
533 PrintStatsToTimeline(&tbes, reason);
534#endif
535
536 // Some Code objects may have been collected so invalidate handler cache.
537 thread->isolate_group()->ForEachIsolate(
538 [&](Isolate* isolate) {
539 isolate->handler_info_cache()->Clear();
540 isolate->catch_entry_moves_cache()->Clear();
541 },
542 /*at_safepoint=*/true);
543 assume_scavenge_will_fail_ = false;
544 }
545}
546
548 switch (type) {
551 CollectNewSpaceGarbage(thread, type, reason);
552 break;
555 CollectOldSpaceGarbage(thread, type, reason);
556 break;
557 default:
558 UNREACHABLE();
559 }
560}
561
562void Heap::CollectAllGarbage(GCReason reason, bool compact) {
563 Thread* thread = Thread::Current();
564 if (thread->is_marking()) {
565 // If incremental marking is happening, we need to finish the GC cycle
566 // and perform a follow-up GC to purge any "floating garbage" that may be
567 // retained by the incremental barrier.
568 CollectOldSpaceGarbage(thread, GCType::kMarkSweep, reason);
569 }
570 CollectOldSpaceGarbage(
571 thread, compact ? GCType::kMarkCompact : GCType::kMarkSweep, reason);
572}
573
575 ASSERT(!thread->force_growth());
576 if (old_space()->ReachedHardThreshold()) {
578 } else {
580 }
581}
582
584 GCReason reason,
585 intptr_t size) {
586 ASSERT(!thread->force_growth());
587
588 PageSpace::Phase phase;
589 {
590 MonitorLocker ml(old_space_.tasks_lock());
591 phase = old_space_.phase();
592 }
593
594 switch (phase) {
596 if (mode_ != Dart_PerformanceMode_Latency) {
597 old_space_.IncrementalMarkWithSizeBudget(size);
598 }
599 return;
602 return; // Busy.
604 CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kFinalize);
605 return;
606 case PageSpace::kDone:
607 if (old_space_.ReachedSoftThreshold()) {
608 StartConcurrentMarking(thread, reason);
609 }
610 return;
611 default:
612 UNREACHABLE();
613 }
614}
615
617 ASSERT(!thread->force_growth());
618
619 PageSpace::Phase phase;
620 {
621 MonitorLocker ml(old_space_.tasks_lock());
622 phase = old_space_.phase();
623 }
624
626 CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kFinalize);
627 }
628}
629
631 GcSafepointOperationScope safepoint_operation(thread);
632 RecordBeforeGC(GCType::kStartConcurrentMark, reason);
633 VMTagScope tagScope(thread, reason == GCReason::kIdle
634 ? VMTag::kGCIdleTagId
635 : VMTag::kGCOldSpaceTagId);
636 TIMELINE_FUNCTION_GC_DURATION(thread, "StartConcurrentMarking");
637 old_space_.CollectGarbage(thread, /*compact=*/false, /*finalize=*/false);
638 RecordAfterGC(GCType::kStartConcurrentMark);
639 PrintStats();
640#if defined(SUPPORT_TIMELINE)
641 PrintStatsToTimeline(&tbes, reason);
642#endif
643}
644
646 MonitorLocker ml(old_space_.tasks_lock());
647 while ((old_space_.phase() == PageSpace::kMarking) ||
648 (old_space_.phase() == PageSpace::kAwaitingFinalization)) {
649 while (old_space_.phase() == PageSpace::kMarking) {
650 ml.WaitWithSafepointCheck(thread);
651 }
652 if (old_space_.phase() == PageSpace::kAwaitingFinalization) {
653 ml.Exit();
654 CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kFinalize);
655 ml.Enter();
656 }
657 }
658}
659
661 ASSERT(!thread->OwnsGCSafepoint());
662 MonitorLocker ml(old_space_.tasks_lock());
663 while ((old_space_.phase() == PageSpace::kSweepingLarge) ||
664 (old_space_.phase() == PageSpace::kSweepingRegular)) {
665 ml.WaitWithSafepointCheck(thread);
666 }
667}
668
670 ASSERT(thread->OwnsGCSafepoint());
671 MonitorLocker ml(old_space_.tasks_lock());
672 while ((old_space_.phase() == PageSpace::kSweepingLarge) ||
673 (old_space_.phase() == PageSpace::kSweepingRegular)) {
674 ml.Wait();
675 }
676}
677
679 ASSERT(isolate_group_ != nullptr);
680 // We are accessing the used in words count for both new and old space
681 // without synchronizing. The value of this metric is approximate.
682 isolate_group_->GetHeapGlobalUsedMaxMetric()->SetValue(
685}
686
687void Heap::WriteProtect(bool read_only) {
688 read_only_ = read_only;
689 new_space_.WriteProtect(read_only);
690 old_space_.WriteProtect(read_only);
691}
692
693void Heap::Init(IsolateGroup* isolate_group,
694 bool is_vm_isolate,
695 intptr_t max_new_gen_words,
696 intptr_t max_old_gen_words) {
697 ASSERT(isolate_group->heap() == nullptr);
698 std::unique_ptr<Heap> heap(new Heap(isolate_group, is_vm_isolate,
699 max_new_gen_words, max_old_gen_words));
700 isolate_group->set_heap(std::move(heap));
701}
702
703void Heap::AddRegionsToObjectSet(ObjectSet* set) const {
704 new_space_.AddRegionsToObjectSet(set);
705 old_space_.AddRegionsToObjectSet(set);
706 set->SortRegions();
707}
708
709void Heap::CollectOnNthAllocation(intptr_t num_allocations) {
710 // Prevent generated code from using the TLAB fast path on next allocation.
712 gc_on_nth_allocation_ = num_allocations;
713}
714
715void Heap::CollectForDebugging(Thread* thread) {
716 if (gc_on_nth_allocation_ == kNoForcedGarbageCollection) return;
717 if (thread->OwnsGCSafepoint()) {
718 // CollectAllGarbage is not supported when we are at a safepoint.
719 // Allocating when at a safepoint is not a common case.
720 return;
721 }
722 gc_on_nth_allocation_--;
723 if (gc_on_nth_allocation_ == 0) {
725 gc_on_nth_allocation_ = kNoForcedGarbageCollection;
726 } else {
727 // Prevent generated code from using the TLAB fast path on next allocation.
728 new_space_.AbandonRemainingTLABForDebugging(thread);
729 }
730}
731
733 MarkExpectation mark_expectation) {
734 ObjectSet* allocated_set = new (zone) ObjectSet(zone);
735
736 this->AddRegionsToObjectSet(allocated_set);
737 Isolate* vm_isolate = Dart::vm_isolate();
738 vm_isolate->group()->heap()->AddRegionsToObjectSet(allocated_set);
739
740 {
741 VerifyObjectVisitor object_visitor(isolate_group(), allocated_set,
742 mark_expectation);
743 this->VisitObjectsNoImagePages(&object_visitor);
744 }
745 {
746 VerifyObjectVisitor object_visitor(isolate_group(), allocated_set,
748 this->VisitObjectsImagePages(&object_visitor);
749 }
750 {
751 // VM isolate heap is premarked.
752 VerifyObjectVisitor vm_object_visitor(isolate_group(), allocated_set,
754 vm_isolate->group()->heap()->VisitObjects(&vm_object_visitor);
755 }
756
757 return allocated_set;
758}
759
760bool Heap::Verify(const char* msg, MarkExpectation mark_expectation) {
761 if (FLAG_disable_heap_verification) {
762 return true;
763 }
764 HeapIterationScope heap_iteration_scope(Thread::Current());
765 return VerifyGC(msg, mark_expectation);
766}
767
768bool Heap::VerifyGC(const char* msg, MarkExpectation mark_expectation) {
769 ASSERT(msg != nullptr);
770 auto thread = Thread::Current();
771 StackZone stack_zone(thread);
772
773 ObjectSet* allocated_set =
774 CreateAllocatedObjectSet(stack_zone.GetZone(), mark_expectation);
775 VerifyPointersVisitor visitor(isolate_group(), allocated_set, msg);
776 VisitObjectPointers(&visitor);
777
778 // Only returning a value so that Heap::Validate can be called from an ASSERT.
779 return true;
780}
781
782void Heap::PrintSizes() const {
784 "New space (%" Pd "k of %" Pd
785 "k) "
786 "Old space (%" Pd "k of %" Pd "k)\n",
789}
790
791intptr_t Heap::UsedInWords(Space space) const {
792 return space == kNew ? new_space_.UsedInWords() : old_space_.UsedInWords();
793}
794
795intptr_t Heap::CapacityInWords(Space space) const {
796 return space == kNew ? new_space_.CapacityInWords()
797 : old_space_.CapacityInWords();
798}
799
800intptr_t Heap::ExternalInWords(Space space) const {
801 return space == kNew ? new_space_.ExternalInWords()
802 : old_space_.ExternalInWords();
803}
804
805intptr_t Heap::TotalUsedInWords() const {
806 return UsedInWords(kNew) + UsedInWords(kOld);
807}
808
812
816
817int64_t Heap::GCTimeInMicros(Space space) const {
818 if (space == kNew) {
819 return new_space_.gc_time_micros();
820 }
821 return old_space_.gc_time_micros();
822}
823
824intptr_t Heap::Collections(Space space) const {
825 if (space == kNew) {
826 return new_space_.collections();
827 }
828 return old_space_.collections();
829}
830
832 switch (type) {
834 return "Scavenge";
836 return "Evacuate";
838 return "StartCMark";
840 return "MarkSweep";
842 return "MarkCompact";
843 default:
844 UNREACHABLE();
845 return "";
846 }
847}
848
849const char* Heap::GCReasonToString(GCReason gc_reason) {
850 switch (gc_reason) {
852 return "new space";
854 return "store buffer";
856 return "promotion";
858 return "old space";
860 return "finalize";
861 case GCReason::kFull:
862 return "full";
864 return "external";
865 case GCReason::kIdle:
866 return "idle";
868 return "destroyed";
870 return "debugging";
872 return "catch-up";
873 default:
874 UNREACHABLE();
875 return "";
876 }
877}
878
879int64_t Heap::PeerCount() const {
880 return new_weak_tables_[kPeers]->count() + old_weak_tables_[kPeers]->count();
881}
882
884 new_weak_tables_[kCanonicalHashes]->Reset();
885 old_weak_tables_[kCanonicalHashes]->Reset();
886}
887
889 new_weak_tables_[kObjectIds]->Reset();
890 old_weak_tables_[kObjectIds]->Reset();
891}
892
893intptr_t Heap::GetWeakEntry(ObjectPtr raw_obj, WeakSelector sel) const {
894 if (raw_obj->IsImmediateOrOldObject()) {
895 return old_weak_tables_[sel]->GetValue(raw_obj);
896 } else {
897 return new_weak_tables_[sel]->GetValue(raw_obj);
898 }
899}
900
901void Heap::SetWeakEntry(ObjectPtr raw_obj, WeakSelector sel, intptr_t val) {
902 if (raw_obj->IsImmediateOrOldObject()) {
903 old_weak_tables_[sel]->SetValue(raw_obj, val);
904 } else {
905 new_weak_tables_[sel]->SetValue(raw_obj, val);
906 }
907}
908
910 WeakSelector sel,
911 intptr_t val) {
912 if (raw_obj->IsImmediateOrOldObject()) {
913 return old_weak_tables_[sel]->SetValueIfNonExistent(raw_obj, val);
914 } else {
915 return new_weak_tables_[sel]->SetValueIfNonExistent(raw_obj, val);
916 }
917}
918
919void Heap::ForwardWeakEntries(ObjectPtr before_object, ObjectPtr after_object) {
920 const auto before_space =
921 before_object->IsImmediateOrOldObject() ? Heap::kOld : Heap::kNew;
922 const auto after_space =
923 after_object->IsImmediateOrOldObject() ? Heap::kOld : Heap::kNew;
924
925 for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) {
926 const auto selector = static_cast<Heap::WeakSelector>(sel);
927 auto before_table = GetWeakTable(before_space, selector);
928 intptr_t entry = before_table->RemoveValueExclusive(before_object);
929 if (entry != 0) {
930 auto after_table = GetWeakTable(after_space, selector);
931 after_table->SetValueExclusive(after_object, entry);
932 }
933 }
934
936 [&](Isolate* isolate) {
937 auto before_table = before_object->IsImmediateOrOldObject()
938 ? isolate->forward_table_old()
939 : isolate->forward_table_new();
940 if (before_table != nullptr) {
941 intptr_t entry = before_table->RemoveValueExclusive(before_object);
942 if (entry != 0) {
943 auto after_table = after_object->IsImmediateOrOldObject()
944 ? isolate->forward_table_old()
945 : isolate->forward_table_new();
946 ASSERT(after_table != nullptr);
947 after_table->SetValueExclusive(after_object, entry);
948 }
949 }
950 },
951 /*at_safepoint=*/true);
952}
953
955 // NOTE: This method is only used by the compactor, so there is no need to
956 // process the `Heap::kNew` tables.
957 for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) {
958 WeakSelector selector = static_cast<Heap::WeakSelector>(sel);
959 GetWeakTable(Heap::kOld, selector)->Forward(visitor);
960 }
961
962 // Isolates might have forwarding tables (used for during snapshotting in
963 // isolate communication).
965 [&](Isolate* isolate) {
966 auto table_old = isolate->forward_table_old();
967 if (table_old != nullptr) table_old->Forward(visitor);
968 },
969 /*at_safepoint=*/true);
970}
971
972#ifndef PRODUCT
973void Heap::PrintToJSONObject(Space space, JSONObject* object) const {
974 if (space == kNew) {
975 new_space_.PrintToJSONObject(object);
976 } else {
977 old_space_.PrintToJSONObject(object);
978 }
979}
980
982 JSONObject obj(stream);
984}
985
987 jsobj->AddProperty("type", "MemoryUsage");
988 jsobj->AddProperty64("heapUsage", TotalUsedInWords() * kWordSize);
989 jsobj->AddProperty64("heapCapacity", TotalCapacityInWords() * kWordSize);
990 jsobj->AddProperty64("externalUsage", TotalExternalInWords() * kWordSize);
991}
992#endif // PRODUCT
993
994void Heap::RecordBeforeGC(GCType type, GCReason reason) {
995 stats_.num_++;
996 stats_.type_ = type;
997 stats_.reason_ = reason;
998 stats_.before_.micros_ = OS::GetCurrentMonotonicMicros();
999 stats_.before_.new_ = new_space_.GetCurrentUsage();
1000 stats_.before_.old_ = old_space_.GetCurrentUsage();
1001 stats_.before_.store_buffer_ = isolate_group_->store_buffer()->Size();
1002}
1003
1004void Heap::RecordAfterGC(GCType type) {
1005 stats_.after_.micros_ = OS::GetCurrentMonotonicMicros();
1006 int64_t delta = stats_.after_.micros_ - stats_.before_.micros_;
1007 if (stats_.type_ == GCType::kScavenge) {
1008 new_space_.AddGCTime(delta);
1009 new_space_.IncrementCollections();
1010 } else {
1011 old_space_.AddGCTime(delta);
1012 old_space_.IncrementCollections();
1013 }
1014 stats_.after_.new_ = new_space_.GetCurrentUsage();
1015 stats_.after_.old_ = old_space_.GetCurrentUsage();
1016 stats_.after_.store_buffer_ = isolate_group_->store_buffer()->Size();
1017#ifndef PRODUCT
1018 // For now we'll emit the same GC events on all isolates.
1019 if (Service::gc_stream.enabled()) {
1020 isolate_group_->ForEachIsolate(
1021 [&](Isolate* isolate) {
1022 if (!Isolate::IsSystemIsolate(isolate)) {
1024 event.set_gc_stats(&stats_);
1025 Service::HandleEvent(&event, /*enter_safepoint*/ false);
1026 }
1027 },
1028 /*at_safepoint=*/true);
1029 }
1030#endif // !PRODUCT
1031}
1032
1033void Heap::PrintStats() {
1034 if (!FLAG_verbose_gc) return;
1035
1036 if ((FLAG_verbose_gc_hdr != 0) &&
1037 (((stats_.num_ - 1) % FLAG_verbose_gc_hdr) == 0)) {
1039 "[ | | | | | new "
1040 "gen | new gen | new gen | old gen | old gen | old "
1041 "gen | store | delta used ]\n"
1042 "[ GC isolate | space (reason) | GC# | start | time | used "
1043 "(MB) | capacity MB | external| used (MB) | capacity (MB) | "
1044 "external MB | buffer | new | old ]\n"
1045 "[ | | | (s) | (ms) "
1046 "|before| after|before| after| b4 |aftr| before| after | before| after "
1047 "|before| after| b4 |aftr| (MB) | (MB) ]\n");
1048 }
1049
1050 // clang-format off
1052 "[ %-13.13s, %11s(%12s), " // GC(isolate-group), type(reason)
1053 "%4" Pd ", " // count
1054 "%6.2f, " // start time
1055 "%5.1f, " // total time
1056 "%5.1f, %5.1f, " // new gen: in use before/after
1057 "%5.1f, %5.1f, " // new gen: capacity before/after
1058 "%3.1f, %3.1f, " // new gen: external before/after
1059 "%6.1f, %6.1f, " // old gen: in use before/after
1060 "%6.1f, %6.1f, " // old gen: capacity before/after
1061 "%5.1f, %5.1f, " // old gen: external before/after
1062 "%3" Pd ", %3" Pd ", " // store buffer: before/after
1063 "%5.1f, %6.1f, " // delta used: new gen/old gen
1064 "]\n", // End with a comma to make it easier to import in spreadsheets.
1066 GCTypeToString(stats_.type_),
1067 GCReasonToString(stats_.reason_),
1068 stats_.num_,
1069 MicrosecondsToSeconds(isolate_group_->UptimeMicros()),
1070 MicrosecondsToMilliseconds(stats_.after_.micros_ -
1071 stats_.before_.micros_),
1072 WordsToMB(stats_.before_.new_.used_in_words),
1073 WordsToMB(stats_.after_.new_.used_in_words),
1074 WordsToMB(stats_.before_.new_.capacity_in_words),
1075 WordsToMB(stats_.after_.new_.capacity_in_words),
1076 WordsToMB(stats_.before_.new_.external_in_words),
1077 WordsToMB(stats_.after_.new_.external_in_words),
1078 WordsToMB(stats_.before_.old_.used_in_words),
1079 WordsToMB(stats_.after_.old_.used_in_words),
1080 WordsToMB(stats_.before_.old_.capacity_in_words),
1081 WordsToMB(stats_.after_.old_.capacity_in_words),
1082 WordsToMB(stats_.before_.old_.external_in_words),
1083 WordsToMB(stats_.after_.old_.external_in_words),
1084 stats_.before_.store_buffer_,
1085 stats_.after_.store_buffer_,
1086 WordsToMB(stats_.after_.new_.used_in_words -
1087 stats_.before_.new_.used_in_words),
1088 WordsToMB(stats_.after_.old_.used_in_words -
1089 stats_.before_.old_.used_in_words));
1090 // clang-format on
1091}
1092
1093void Heap::PrintStatsToTimeline(TimelineEventScope* event, GCReason reason) {
1094#if defined(SUPPORT_TIMELINE)
1095 if ((event == nullptr) || !event->enabled()) {
1096 return;
1097 }
1098 intptr_t arguments = event->GetNumArguments();
1099 event->SetNumArguments(arguments + 13);
1100 event->CopyArgument(arguments + 0, "Reason", GCReasonToString(reason));
1101 event->FormatArgument(arguments + 1, "Before.New.Used (kB)", "%" Pd "",
1102 RoundWordsToKB(stats_.before_.new_.used_in_words));
1103 event->FormatArgument(arguments + 2, "After.New.Used (kB)", "%" Pd "",
1104 RoundWordsToKB(stats_.after_.new_.used_in_words));
1105 event->FormatArgument(arguments + 3, "Before.Old.Used (kB)", "%" Pd "",
1106 RoundWordsToKB(stats_.before_.old_.used_in_words));
1107 event->FormatArgument(arguments + 4, "After.Old.Used (kB)", "%" Pd "",
1108 RoundWordsToKB(stats_.after_.old_.used_in_words));
1109
1110 event->FormatArgument(arguments + 5, "Before.New.Capacity (kB)", "%" Pd "",
1111 RoundWordsToKB(stats_.before_.new_.capacity_in_words));
1112 event->FormatArgument(arguments + 6, "After.New.Capacity (kB)", "%" Pd "",
1113 RoundWordsToKB(stats_.after_.new_.capacity_in_words));
1114 event->FormatArgument(arguments + 7, "Before.Old.Capacity (kB)", "%" Pd "",
1115 RoundWordsToKB(stats_.before_.old_.capacity_in_words));
1116 event->FormatArgument(arguments + 8, "After.Old.Capacity (kB)", "%" Pd "",
1117 RoundWordsToKB(stats_.after_.old_.capacity_in_words));
1118
1119 event->FormatArgument(arguments + 9, "Before.New.External (kB)", "%" Pd "",
1120 RoundWordsToKB(stats_.before_.new_.external_in_words));
1121 event->FormatArgument(arguments + 10, "After.New.External (kB)", "%" Pd "",
1122 RoundWordsToKB(stats_.after_.new_.external_in_words));
1123 event->FormatArgument(arguments + 11, "Before.Old.External (kB)", "%" Pd "",
1124 RoundWordsToKB(stats_.before_.old_.external_in_words));
1125 event->FormatArgument(arguments + 12, "After.Old.External (kB)", "%" Pd "",
1126 RoundWordsToKB(stats_.after_.old_.external_in_words));
1127#endif // defined(SUPPORT_TIMELINE)
1128}
1129
1131 // If 'size' would be a significant fraction of new space, then use old.
1132 const int kExtNewRatio = 16;
1133 if (size > (new_space_.ThresholdInWords() * kWordSize) / kExtNewRatio) {
1134 return Heap::kOld;
1135 } else {
1136 return Heap::kNew;
1137 }
1138}
1139
1144
1148
1150 : ThreadStackResource(thread) {
1151 if (FLAG_write_protect_code && FLAG_write_protect_vm_isolate) {
1153 }
1154}
1155
1157 ASSERT(Dart::vm_isolate_group()->heap()->UsedInWords(Heap::kNew) == 0);
1158 if (FLAG_write_protect_code && FLAG_write_protect_vm_isolate) {
1160 }
1161}
1162
1164 IsolateGroup* isolate_group)
1165 : StackResource(thread), isolate_group_(isolate_group) {
1166 isolate_group_->heap()->WriteProtectCode(false);
1167}
1168
1170 isolate_group_->heap()->WriteProtectCode(true);
1171}
1172
1173} // namespace dart
#define UNREACHABLE()
Definition assert.h:248
static IsolateGroup * vm_isolate_group()
Definition dart.h:69
static Isolate * vm_isolate()
Definition dart.h:68
ForceGrowthScope(Thread *thread)
Definition heap.cc:1140
void IterateObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
Definition heap.cc:357
void IterateStackPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
Definition heap.cc:363
void IterateVMIsolateObjects(ObjectVisitor *visitor) const
Definition heap.cc:353
void IterateObjects(ObjectVisitor *visitor) const
Definition heap.cc:334
void IterateOldObjectsNoImagePages(ObjectVisitor *visitor) const
Definition heap.cc:348
void IterateObjectsNoImagePages(ObjectVisitor *visitor) const
Definition heap.cc:338
void IterateOldObjects(ObjectVisitor *visitor) const
Definition heap.cc:344
static const char * GCReasonToString(GCReason reason)
Definition heap.cc:849
void NotifyDestroyed()
Definition heap.cc:435
WeakSelector
Definition heap.h:43
@ kCanonicalHashes
Definition heap.h:48
@ kObjectIds
Definition heap.h:49
@ kNumWeakSelectors
Definition heap.h:54
@ kPeers
Definition heap.h:44
@ kNew
Definition heap.h:38
@ kOld
Definition heap.h:39
intptr_t GetWeakEntry(ObjectPtr raw_obj, WeakSelector sel) const
Definition heap.cc:893
void PrintMemoryUsageJSON(JSONStream *stream) const
Definition heap.cc:981
IsolateGroup * isolate_group() const
Definition heap.h:273
Scavenger * new_space()
Definition heap.h:62
void CheckConcurrentMarking(Thread *thread, GCReason reason, intptr_t size)
Definition heap.cc:583
Dart_PerformanceMode SetMode(Dart_PerformanceMode mode)
Definition heap.cc:441
void PrintSizes() const
Definition heap.cc:782
void WriteProtect(bool read_only)
Definition heap.cc:687
PageSpace * old_space()
Definition heap.h:63
void WaitForMarkerTasks(Thread *thread)
Definition heap.cc:645
void WaitForSweeperTasksAtSafepoint(Thread *thread)
Definition heap.cc:669
int64_t PeerCount() const
Definition heap.cc:879
void SetWeakEntry(ObjectPtr raw_obj, WeakSelector sel, intptr_t val)
Definition heap.cc:901
void ResetObjectIdTable()
Definition heap.cc:888
void PrintToJSONObject(Space space, JSONObject *object) const
Definition heap.cc:973
void CheckCatchUp(Thread *thread)
Definition heap.cc:574
bool Verify(const char *msg, MarkExpectation mark_expectation=kForbidMarked)
Definition heap.cc:760
void CollectOnNthAllocation(intptr_t num_allocations)
Definition heap.cc:709
bool is_vm_isolate() const
Definition heap.h:274
void WriteProtectCode(bool read_only)
Definition heap.h:127
intptr_t SetWeakEntryIfNonExistent(ObjectPtr raw_obj, WeakSelector sel, intptr_t val)
Definition heap.cc:909
intptr_t TotalExternalInWords() const
Definition heap.cc:813
void CollectAllGarbage(GCReason reason=GCReason::kFull, bool compact=false)
Definition heap.cc:562
int64_t GCTimeInMicros(Space space) const
Definition heap.cc:817
friend class ServiceEvent
Definition heap.h:379
void NotifyIdle(int64_t deadline)
Definition heap.cc:374
void WaitForSweeperTasks(Thread *thread)
Definition heap.cc:660
void CheckFinalizeMarking(Thread *thread)
Definition heap.cc:616
intptr_t Collections(Space space) const
Definition heap.cc:824
WeakTable * GetWeakTable(Space space, WeakSelector selector) const
Definition heap.h:225
void ForwardWeakEntries(ObjectPtr before_object, ObjectPtr after_object)
Definition heap.cc:919
void ResetCanonicalHashTable()
Definition heap.cc:883
intptr_t ExternalInWords(Space space) const
Definition heap.cc:800
void StartConcurrentMarking(Thread *thread, GCReason reason)
Definition heap.cc:630
intptr_t TotalCapacityInWords() const
Definition heap.cc:809
intptr_t UsedInWords(Space space) const
Definition heap.cc:791
static void Init(IsolateGroup *isolate_group, bool is_vm_isolate, intptr_t max_new_gen_words, intptr_t max_old_gen_words)
Definition heap.cc:693
intptr_t TotalUsedInWords() const
Definition heap.cc:805
void CollectGarbage(Thread *thread, GCType type, GCReason reason)
Definition heap.cc:547
void ForwardWeakTables(ObjectPointerVisitor *visitor)
Definition heap.cc:954
void UpdateGlobalMaxUsed()
Definition heap.cc:678
Space SpaceForExternal(intptr_t size) const
Definition heap.cc:1130
intptr_t CapacityInWords(Space space) const
Definition heap.cc:795
ObjectSet * CreateAllocatedObjectSet(Zone *zone, MarkExpectation mark_expectation)
Definition heap.cc:732
static const char * GCTypeToString(GCType type)
Definition heap.cc:831
int64_t UptimeMicros() const
Definition isolate.cc:1946
StoreBuffer * store_buffer() const
Definition isolate.h:504
void ForEachIsolate(std::function< void(Isolate *isolate)> function, bool at_safepoint=false)
Definition isolate.cc:2798
void VisitStackPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
Definition isolate.cc:2924
Heap * heap() const
Definition isolate.h:295
void VisitObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
Definition isolate.cc:2868
SafepointHandler * safepoint_handler()
Definition isolate.h:333
static bool IsSystemIsolate(const Isolate *isolate)
Definition isolate.h:1398
IsolateGroup * group() const
Definition isolate.h:990
WeakTable * forward_table_old()
Definition isolate.h:1414
WeakTable * forward_table_new()
Definition isolate.h:1411
void AddProperty64(const char *name, int64_t i) const
void AddProperty(const char *name, bool b) const
Monitor::WaitResult WaitWithSafepointCheck(Thread *thread, int64_t millis=Monitor::kNoTimeout)
Definition lockers.cc:12
Monitor::WaitResult Wait(int64_t millis=Monitor::kNoTimeout)
Definition lockers.h:172
void Enter() const
Definition lockers.h:155
void Exit() const
Definition lockers.h:163
static int64_t GetCurrentMonotonicMicros()
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
intptr_t UsedInWords() const
Definition pages.h:189
bool ReachedSoftThreshold() const
Definition pages.h:179
bool ShouldStartIdleMarkSweep(int64_t deadline)
Definition pages.cc:826
intptr_t tasks() const
Definition pages.h:310
void IncrementalMarkWithSizeBudget(intptr_t size)
Definition pages.cc:892
void AddGCTime(int64_t micros)
Definition pages.h:252
int64_t gc_time_micros() const
Definition pages.h:254
void WriteProtect(bool read_only)
Definition pages.cc:715
void IncrementCollections()
Definition pages.h:256
bool ShouldPerformIdleMarkCompact(int64_t deadline)
Definition pages.cc:853
void set_tasks(intptr_t val)
Definition pages.h:311
@ kAwaitingFinalization
Definition pages.h:133
void IncrementalMarkWithTimeBudget(int64_t deadline)
Definition pages.cc:898
intptr_t collections() const
Definition pages.h:258
void VisitObjects(ObjectVisitor *visitor) const
Definition pages.cc:645
void CollectGarbage(Thread *thread, bool compact, bool finalize)
Definition pages.cc:961
void VisitObjectsNoImagePages(ObjectVisitor *visitor) const
Definition pages.cc:651
SpaceUsage GetCurrentUsage() const
Definition pages.h:208
bool ReachedHardThreshold() const
Definition pages.h:176
intptr_t CapacityInWords() const
Definition pages.h:190
void AddRegionsToObjectSet(ObjectSet *set) const
Definition pages.cc:637
void PrintToJSONObject(JSONObject *object) const
Definition pages.cc:728
void AssistTasks(MonitorLocker *ml)
Definition pages.cc:904
Monitor * tasks_lock() const
Definition pages.h:309
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
Definition pages.cc:673
intptr_t ExternalInWords() const
Definition pages.h:207
Phase phase() const
Definition pages.h:336
static void ClearCache()
Definition page.cc:36
T exchange(T arg, std::memory_order order=std::memory_order_relaxed)
Definition atomic.h:48
void Scavenge(Thread *thread, GCType type, GCReason reason)
intptr_t ExternalInWords() const
Definition scavenger.h:168
void VisitObjects(ObjectVisitor *visitor) const
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
void AddGCTime(int64_t micros)
Definition scavenger.h:187
void WriteProtect(bool read_only)
void AbandonRemainingTLABForDebugging(Thread *thread)
bool ShouldPerformIdleScavenge(int64_t deadline)
void AddRegionsToObjectSet(ObjectSet *set) const
intptr_t CapacityInWords() const
Definition scavenger.h:164
intptr_t UsedInWords() const
Definition scavenger.h:160
SpaceUsage GetCurrentUsage() const
Definition scavenger.h:169
int64_t gc_time_micros() const
Definition scavenger.h:189
void IncrementCollections()
Definition scavenger.h:191
intptr_t ThresholdInWords() const
Definition scavenger.h:176
intptr_t collections() const
Definition scavenger.h:193
void PrintToJSONObject(JSONObject *object) const
static void HandleEvent(ServiceEvent *event, bool enter_safepoint=true)
Definition service.cc:1206
static StreamInfo gc_stream
Definition service.h:182
IsolateGroup * isolate_group() const
bool force_growth() const
Definition thread.h:628
int32_t no_callback_scope_depth() const
Definition thread.h:618
static Thread * Current()
Definition thread.h:361
bool OwnsGCSafepoint() const
Definition thread.cc:1286
int32_t no_safepoint_scope_depth() const
Definition thread.h:705
bool is_marking() const
Definition thread.h:669
void DecrementForceGrowthScopeDepth()
Definition thread.h:633
void IncrementForceGrowthScopeDepth()
Definition thread.h:629
IsolateGroup * isolate_group() const
Definition thread.h:540
intptr_t RemoveValueExclusive(ObjectPtr key)
Definition weak_table.h:126
intptr_t GetValue(ObjectPtr key)
Definition weak_table.h:55
intptr_t count() const
Definition weak_table.h:51
intptr_t SetValueIfNonExistent(ObjectPtr key, intptr_t val)
Definition weak_table.h:65
void Forward(ObjectPointerVisitor *visitor)
void SetValue(ObjectPtr key, intptr_t val)
Definition weak_table.h:60
WritableCodePages(Thread *thread, IsolateGroup *isolate_group)
Definition heap.cc:1163
WritableVMIsolateScope(Thread *thread)
Definition heap.cc:1149
Dart_PerformanceMode
Definition dart_api.h:1368
@ Dart_PerformanceMode_Default
Definition dart_api.h:1372
@ Dart_PerformanceMode_Latency
Definition dart_api.h:1379
void(* Dart_HeapSamplingDeleteCallback)(void *data)
Definition dart_api.h:1287
#define ASSERT(E)
SkBitmap source
Definition examples.cpp:28
FlKeyEvent * event
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
@ kGC
Definition thread.h:291
const char *const name
constexpr double MicrosecondsToSeconds(int64_t micros)
Definition globals.h:571
GCType
Definition spaces.h:32
uintptr_t uword
Definition globals.h:501
constexpr double WordsToMB(intptr_t size_in_words)
Definition globals.h:551
GCReason
Definition spaces.h:40
MarkExpectation
Definition verifier.h:21
@ kRequireMarked
Definition verifier.h:21
ValidationPolicy
Definition thread.h:271
constexpr intptr_t kWordSize
Definition globals.h:509
constexpr double MicrosecondsToMilliseconds(int64_t micros)
Definition globals.h:574
constexpr intptr_t KBInWords
Definition globals.h:535
constexpr intptr_t RoundWordsToKB(intptr_t size_in_words)
Definition globals.h:542
#define LIKELY(cond)
Definition globals.h:260
#define Pd
Definition globals.h:408
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)
Definition timeline.h:41