Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
scavenger.cc
Go to the documentation of this file.
1// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/heap/scavenger.h"
6
7#include "platform/assert.h"
9#include "vm/class_id.h"
11#include "vm/dart.h"
12#include "vm/dart_api_state.h"
13#include "vm/flag_list.h"
14#include "vm/flags.h"
15#include "vm/heap/become.h"
16#include "vm/heap/gc_shared.h"
17#include "vm/heap/marker.h"
18#include "vm/heap/pages.h"
20#include "vm/heap/safepoint.h"
21#include "vm/heap/verifier.h"
22#include "vm/heap/weak_table.h"
23#include "vm/isolate.h"
24#include "vm/lockers.h"
25#include "vm/log.h"
26#include "vm/longjump.h"
27#include "vm/object.h"
28#include "vm/object_id_ring.h"
29#include "vm/object_set.h"
30#include "vm/port.h"
31#include "vm/stack_frame.h"
32#include "vm/tagged_pointer.h"
33#include "vm/thread_barrier.h"
34#include "vm/timeline.h"
35#include "vm/visitor.h"
36
37namespace dart {
38
40 early_tenuring_threshold,
41 66,
42 "When more than this percentage of promotion candidates survive, "
43 "promote all survivors of next scavenge.");
45 new_gen_garbage_threshold,
46 90,
47 "Grow new gen when less than this percentage is garbage.");
48DEFINE_FLAG(int, new_gen_growth_factor, 2, "Grow new gen by this factor.");
49
50// Scavenger uses the kCardRememberedBit to distinguish forwarded and
51// non-forwarded objects. We must choose a bit that is clear for all new-space
52// object headers, and which doesn't intersect with the target address because
53// of object alignment.
54enum {
58};
59
60// If the forwarded bit and pointer tag bit are the same, we can avoid a few
61// conversions.
63 static_cast<uword>(kHeapObjectTag));
64
65DART_FORCE_INLINE
68 ASSERT((bits == kNotForwarded) || (bits == kForwarded));
69 return bits == kForwarded;
70}
71
72DART_FORCE_INLINE
75 return static_cast<ObjectPtr>(header);
76}
77
78DART_FORCE_INLINE
80 uword result = static_cast<uword>(target);
82 return result;
83}
84
85// Races: The first word in the copied region is a header word that may be
86// updated by the scavenger worker in another thread, so we might copy either
87// the original object header or an installed forwarding pointer. This race is
88// harmless because if we copy the installed forwarding pointer, the scavenge
89// worker in the current thread will abandon this copy. We do not mark the loads
90// here as relaxed so the C++ compiler still has the freedom to reorder them.
92static void objcpy(void* dst, const void* src, size_t size) {
93 // A mem copy specialized for objects. We can assume:
94 // - dst and src do not overlap
95 ASSERT(
96 (reinterpret_cast<uword>(dst) + size <= reinterpret_cast<uword>(src)) ||
97 (reinterpret_cast<uword>(src) + size <= reinterpret_cast<uword>(dst)));
98 // - dst and src are word aligned
99 ASSERT(Utils::IsAligned(reinterpret_cast<uword>(dst), sizeof(uword)));
100 ASSERT(Utils::IsAligned(reinterpret_cast<uword>(src), sizeof(uword)));
101 // - size is strictly positive
102 ASSERT(size > 0);
103 // - size is a multiple of double words
104 ASSERT(Utils::IsAligned(size, 2 * sizeof(uword)));
105
106 uword* __restrict dst_cursor = reinterpret_cast<uword*>(dst);
107 const uword* __restrict src_cursor = reinterpret_cast<const uword*>(src);
108 do {
109 uword a = *src_cursor++;
110 uword b = *src_cursor++;
111 *dst_cursor++ = a;
112 *dst_cursor++ = b;
113 size -= (2 * sizeof(uword));
114 } while (size > 0);
115}
116
117DART_FORCE_INLINE
119 return reinterpret_cast<std::atomic<uword>*>(UntaggedObject::ToAddr(obj))
120 ->load(std::memory_order_relaxed);
121}
122
123DART_FORCE_INLINE
125 reinterpret_cast<std::atomic<uword>*>(UntaggedObject::ToAddr(obj))
126 ->store(header, std::memory_order_relaxed);
127}
128
129template <bool parallel>
131 public:
133 Scavenger* scavenger,
134 SemiSpace* from,
135 FreeList* freelist,
136 PromotionStack* promotion_stack)
138 thread_(nullptr),
139 scavenger_(scavenger),
140 from_(from),
141 page_space_(scavenger->heap_->old_space()),
142 freelist_(freelist),
143 bytes_promoted_(0),
144 visiting_old_object_(nullptr),
145 promoted_list_(promotion_stack) {}
147
148#ifdef DEBUG
149 constexpr static const char* const kName = "Scavenger";
150#endif
151
152 void VisitTypedDataViewPointers(TypedDataViewPtr view,
153 CompressedObjectPtr* first,
154 CompressedObjectPtr* last) override {
155 // TypedDataViews require extra processing to update their
156 // PointerBase::data_ pointer. If the underlying typed data is external, no
157 // update is needed. If the underlying typed data is internal, the pointer
158 // must be updated if the typed data was copied or promoted. We cannot
159 // safely dereference the underlying typed data to make this distinction.
160 // It may have been forwarded by a different scavenger worker, so the access
161 // could have a data race. Rather than checking the CID of the underlying
162 // typed data, which requires dereferencing the copied/promoted header, we
163 // compare the view's internal pointer to what it should be if the
164 // underlying typed data was internal, and assume that external typed data
165 // never points into the Dart heap. We must do this before VisitPointers
166 // because we want to compare the old pointer and old typed data.
167 const bool is_external =
168 view->untag()->data_ != view->untag()->DataFieldForInternalTypedData();
169
170 // Forward all fields of the typed data view.
171 VisitCompressedPointers(view->heap_base(), first, last);
172
173 if (view->untag()->data_ == nullptr) {
174 ASSERT(RawSmiValue(view->untag()->offset_in_bytes()) == 0 &&
175 RawSmiValue(view->untag()->length()) == 0);
176 ASSERT(is_external);
177 return;
178 }
179
180 // Explicit ifdefs because the compiler does not eliminate the unused
181 // relaxed load.
182#if defined(DEBUG)
183 // Validate 'this' is a typed data view.
184 const uword view_header = ReadHeaderRelaxed(view);
185 ASSERT(!IsForwarding(view_header) || view->IsOldObject());
186 ASSERT(IsTypedDataViewClassId(view->GetClassIdMayBeSmi()) ||
187 IsUnmodifiableTypedDataViewClassId(view->GetClassIdMayBeSmi()));
188
189 // Validate that the backing store is not a forwarding word. There is a data
190 // race reader the backing store's header unless there is only one worker.
191 TypedDataBasePtr td = view->untag()->typed_data();
192 ASSERT(td->IsHeapObject());
193 if (!parallel) {
194 const uword td_header = ReadHeaderRelaxed(td);
195 ASSERT(!IsForwarding(td_header) || td->IsOldObject());
196 if (td != Object::null()) {
197 // Fast object copy temporarily stores null in the typed_data field of
198 // views. This can cause the RecomputeDataFieldForInternalTypedData to
199 // run inappropriately, but when the object copy continues it will fix
200 // the data_ pointer.
201 ASSERT_EQUAL(IsExternalTypedDataClassId(td->GetClassId()), is_external);
202 }
203 }
204#endif
205
206 // If we have external typed data we can simply return since the backing
207 // store lives in C-heap and will not move.
208 if (is_external) {
209 return;
210 }
211
212 // Now we update the inner pointer.
213#if defined(DEBUG)
214 if (!parallel) {
215 ASSERT(IsTypedDataClassId(td->GetClassId()));
216 }
217#endif
218 view->untag()->RecomputeDataFieldForInternalTypedData();
219 }
220
221 void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
222 ASSERT(Utils::IsAligned(first, sizeof(*first)));
223 ASSERT(Utils::IsAligned(last, sizeof(*last)));
224 for (ObjectPtr* current = first; current <= last; current++) {
225 ScavengePointer(current);
226 }
227 }
228
229#if defined(DART_COMPRESSED_POINTERS)
230 void VisitCompressedPointers(uword heap_base,
231 CompressedObjectPtr* first,
232 CompressedObjectPtr* last) override {
233 ASSERT(Utils::IsAligned(first, sizeof(*first)));
234 ASSERT(Utils::IsAligned(last, sizeof(*last)));
235 for (CompressedObjectPtr* current = first; current <= last; current++) {
236 ScavengeCompressedPointer(heap_base, current);
237 }
238 }
239#endif
240
242 ASSERT((obj == nullptr) || obj->IsOldObject());
243 visiting_old_object_ = obj;
244 if (obj != nullptr) {
245 // Card update happens in Page::VisitRememberedCards.
246 ASSERT(!obj->untag()->IsCardRemembered());
247 }
248 }
249 DART_FORCE_INLINE intptr_t ProcessObject(ObjectPtr obj);
250
251 intptr_t bytes_promoted() const { return bytes_promoted_; }
252
254 thread_ = Thread::Current();
255 page_space_->AcquireLock(freelist_);
256
257 LongJumpScope jump(thread_);
258 if (setjmp(*jump.Set()) == 0) {
259 scavenger_->IterateRoots(this);
260 } else {
261 ASSERT(scavenger_->abort_);
262 }
263 }
264
266 LongJumpScope jump(thread_);
267 if (setjmp(*jump.Set()) == 0) {
268 // Iterate until all work has been drained.
269 do {
270 ProcessToSpace();
271 ProcessPromotedList();
272 } while (HasWork());
273 } else {
274 ASSERT(scavenger_->abort_);
275 }
276 }
277
278 void ProcessAll() {
279 TIMELINE_FUNCTION_GC_DURATION(thread_, "ProcessToSpace");
280 LongJumpScope jump(thread_);
281 if (setjmp(*jump.Set()) == 0) {
282 do {
283 do {
284 ProcessToSpace();
285 ProcessPromotedList();
286 } while (HasWork());
287 ProcessWeakPropertiesScoped();
288 } while (HasWork());
289 } else {
290 ASSERT(scavenger_->abort_);
291 }
292 }
293
295 LongJumpScope jump(thread_);
296 if (setjmp(*jump.Set()) == 0) {
297 ProcessWeakPropertiesScoped();
298 } else {
299 ASSERT(scavenger_->abort_);
300 }
301 }
302
303 bool HasWork() {
304 if (scavenger_->abort_) return false;
305 return (scan_ != tail_) || (scan_ != nullptr && !scan_->IsResolved()) ||
306 !promoted_list_.IsEmpty();
307 }
308
310 return promoted_list_.WaitForWork(num_busy, scavenger_->abort_);
311 }
312
313 void ProcessWeak() {
314 if (!scavenger_->abort_) {
315 ASSERT(!HasWork());
316
317 for (Page* page = head_; page != nullptr; page = page->next()) {
318 ASSERT(page->IsResolved());
319 page->RecordSurvivors();
320 }
321
322 MournWeakProperties();
323 MournWeakReferences();
324 MournWeakArrays();
325 MournFinalizerEntries();
326 scavenger_->IterateWeak();
327 }
328 page_space_->ReleaseLock(freelist_);
329 thread_ = nullptr;
330 }
331
332 void Finalize() {
333 if (!scavenger_->abort_) {
334 promoted_list_.Finalize();
335 weak_array_list_.Finalize();
336 weak_property_list_.Finalize();
337 weak_reference_list_.Finalize();
338 finalizer_entry_list_.Finalize();
339 } else {
340 promoted_list_.AbandonWork();
341 weak_array_list_.AbandonWork();
342 weak_property_list_.AbandonWork();
343 weak_reference_list_.AbandonWork();
344 finalizer_entry_list_.AbandonWork();
345 }
346 }
347
348 Page* head() const { return head_; }
349 Page* tail() const { return tail_; }
350
351 static bool ForwardOrSetNullIfCollected(ObjectPtr parent,
352 CompressedObjectPtr* ptr_address);
353
354 private:
355 DART_FORCE_INLINE
356 void ScavengePointer(ObjectPtr* p) {
357 // ScavengePointer cannot be called recursively.
358 ObjectPtr obj = *p;
359
360 if (obj->IsImmediateOrOldObject()) {
361 return;
362 }
363
364 ObjectPtr new_obj = ScavengeObject(obj);
365
366 // Update the reference.
367 *p = new_obj;
368 if (new_obj->IsNewObject()) {
369 // Update the store buffer as needed.
370 ObjectPtr visiting_object = visiting_old_object_;
371 if (visiting_object != nullptr &&
372 visiting_object->untag()->TryAcquireRememberedBit()) {
373 thread_->StoreBufferAddObjectGC(visiting_object);
374 }
375 }
376 }
377
378 DART_FORCE_INLINE
379 void ScavengeCompressedPointer(uword heap_base, CompressedObjectPtr* p) {
380 // ScavengePointer cannot be called recursively.
381 ObjectPtr obj = p->Decompress(heap_base);
382
383 // Could be tested without decompression.
384 if (obj->IsImmediateOrOldObject()) {
385 return;
386 }
387
388 ObjectPtr new_obj = ScavengeObject(obj);
389
390 // Update the reference.
391 *p = new_obj;
392 if (new_obj->IsNewObject()) {
393 // Update the store buffer as needed.
394 ObjectPtr visiting_object = visiting_old_object_;
395 if (visiting_object != nullptr &&
396 visiting_object->untag()->TryAcquireRememberedBit()) {
397 thread_->StoreBufferAddObjectGC(visiting_object);
398 }
399 }
400 }
401
402 DART_FORCE_INLINE
403 ObjectPtr ScavengeObject(ObjectPtr obj) {
404 // Fragmentation might cause the scavenge to fail. Ensure we always have
405 // somewhere to bail out to.
406 ASSERT(thread_->long_jump_base() != nullptr);
407
408 uword raw_addr = UntaggedObject::ToAddr(obj);
409 // The scavenger is only expects objects located in the from space.
410 ASSERT(from_->Contains(raw_addr));
411 // Read the header word of the object and determine if the object has
412 // already been copied.
414 ObjectPtr new_obj;
415 if (IsForwarding(header)) {
416 // Get the new location of the object.
417 new_obj = ForwardedObj(header);
418 } else {
419 intptr_t size = obj->untag()->HeapSize(header);
421 uword new_addr = 0;
422 // Check whether object should be promoted.
423 if (!Page::Of(obj)->IsSurvivor(raw_addr)) {
424 // Not a survivor of a previous scavenge. Just copy the object into the
425 // to space.
426 new_addr = TryAllocateCopy(size);
427 }
428 if (new_addr == 0) {
429 // This object is a survivor of a previous scavenge. Attempt to promote
430 // the object. (Or, unlikely, to-space was exhausted by fragmentation.)
431 new_addr = page_space_->TryAllocatePromoLocked(freelist_, size);
432 if (UNLIKELY(new_addr == 0)) {
433 // Promotion did not succeed. Copy into the to space instead.
434 scavenger_->failed_to_promote_ = true;
435 new_addr = TryAllocateCopy(size);
436 // To-space was exhausted by fragmentation and old-space could not
437 // grow.
438 if (UNLIKELY(new_addr == 0)) {
439 AbortScavenge();
440 }
441 }
442 }
443 ASSERT(new_addr != 0);
444 // Copy the object to the new location.
445 objcpy(reinterpret_cast<void*>(new_addr),
446 reinterpret_cast<void*>(raw_addr), size);
447
448 new_obj = UntaggedObject::FromAddr(new_addr);
449 if (new_obj->IsOldObject()) {
450 // Promoted: update age/barrier tags.
451 uword tags = static_cast<uword>(header);
453 tags = UntaggedObject::NewBit::update(false, tags);
454 new_obj->untag()->tags_.store(tags, std::memory_order_relaxed);
455 }
456
458 if (IsTypedDataClassId(cid)) {
459 static_cast<TypedDataPtr>(new_obj)->untag()->RecomputeDataField();
460 }
461
462 // Try to install forwarding address.
463 uword forwarding_header = ForwardingHeader(new_obj);
464 if (InstallForwardingPointer(raw_addr, &header, forwarding_header)) {
465 if (new_obj->IsOldObject()) {
466 // If promotion succeeded then we need to remember it so that it can
467 // be traversed later.
468 promoted_list_.Push(new_obj);
469 bytes_promoted_ += size;
470 }
471 } else {
473 if (new_obj->IsOldObject()) {
474 // Abandon as a free list element.
475 FreeListElement::AsElement(new_addr, size);
476 bytes_promoted_ -= size;
477 } else {
478 // Undo to-space allocation.
479 tail_->Unallocate(new_addr, size);
480 }
481 // Use the winner's forwarding target.
482 new_obj = ForwardedObj(header);
483 }
484 }
485
486 return new_obj;
487 }
488
489 DART_FORCE_INLINE
490 bool InstallForwardingPointer(uword addr,
491 uword* old_header,
492 uword new_header) {
493 if (parallel) {
494 return reinterpret_cast<std::atomic<uword>*>(addr)
495 ->compare_exchange_strong(*old_header, new_header,
496 std::memory_order_relaxed);
497 } else {
498 *reinterpret_cast<uword*>(addr) = new_header;
499 return true;
500 }
501 }
502
503 DART_FORCE_INLINE
504 uword TryAllocateCopy(intptr_t size) {
506 // TODO(rmacnak): Allocate one to start?
507 if (tail_ != nullptr) {
508 uword result = tail_->top_;
510 uword new_top = result + size;
511 if (LIKELY(new_top <= tail_->end_)) {
512 tail_->top_ = new_top;
513 return result;
514 }
515 }
516 return TryAllocateCopySlow(size);
517 }
518
519 DART_NOINLINE uword TryAllocateCopySlow(intptr_t size);
520
521 DART_NOINLINE DART_NORETURN void AbortScavenge() {
522 if (FLAG_verbose_gc) {
523 OS::PrintErr("Aborting scavenge\n");
524 }
525 scavenger_->abort_ = true;
526 // N.B. We must not set the sticky error, which may be a data race if
527 // that root slot was processed by a different worker.
528 thread_->long_jump_base()->Jump(1);
529 }
530
531 void ProcessToSpace();
532 void ProcessPromotedList();
533 void ProcessWeakPropertiesScoped();
534
535 void MournWeakProperties() {
536 weak_property_list_.Process([](WeakPropertyPtr weak_property) {
537 weak_property->untag()->key_ = Object::null();
538 weak_property->untag()->value_ = Object::null();
539 });
540 }
541
542 void MournWeakReferences() {
543 weak_reference_list_.Process([](WeakReferencePtr weak_reference) {
544 ForwardOrSetNullIfCollected(weak_reference,
545 &weak_reference->untag()->target_);
546 });
547 }
548
549 void MournWeakArrays() {
550 weak_array_list_.Process([](WeakArrayPtr weak_array) {
551 intptr_t length = Smi::Value(weak_array->untag()->length());
552 for (intptr_t i = 0; i < length; i++) {
554 &(weak_array->untag()->data()[i]));
555 }
556 });
557 }
558
559 void MournFinalizerEntries() {
560 finalizer_entry_list_.Process([&](FinalizerEntryPtr finalizer_entry) {
561 MournFinalizerEntry(this, finalizer_entry);
562 });
563 }
564
565 Thread* thread_;
566 Scavenger* scavenger_;
567 SemiSpace* from_;
568 PageSpace* page_space_;
569 FreeList* freelist_;
570 intptr_t bytes_promoted_;
571 ObjectPtr visiting_old_object_;
572 PromotionWorkList promoted_list_;
573 LocalBlockWorkList<64, WeakArrayPtr> weak_array_list_;
574 LocalBlockWorkList<64, WeakPropertyPtr> weak_property_list_;
575 LocalBlockWorkList<64, WeakReferencePtr> weak_reference_list_;
576 LocalBlockWorkList<64, FinalizerEntryPtr> finalizer_entry_list_;
577
578 Page* head_ = nullptr;
579 Page* tail_ = nullptr; // Allocating from here.
580 Page* scan_ = nullptr; // Resolving from here.
581
582 DISALLOW_COPY_AND_ASSIGN(ScavengerVisitorBase);
583};
584
587
588static bool IsUnreachable(ObjectPtr* ptr) {
589 ObjectPtr obj = *ptr;
590 if (obj->IsImmediateOrOldObject()) {
591 return false;
592 }
593 uword raw_addr = UntaggedObject::ToAddr(obj);
594 uword header = *reinterpret_cast<uword*>(raw_addr);
595 if (IsForwarding(header)) {
596 *ptr = ForwardedObj(header);
597 return false;
598 }
599 return true;
600}
601
603 public:
605
606 void VisitHandle(uword addr) override {
608 reinterpret_cast<FinalizablePersistentHandle*>(addr);
609 ObjectPtr* p = handle->ptr_addr();
610 if (IsUnreachable(p)) {
612 } else {
614 }
615 }
616
617 private:
619};
620
622 public:
624 ThreadBarrier* barrier,
626 RelaxedAtomic<uintptr_t>* num_busy)
627 : isolate_group_(isolate_group),
628 barrier_(barrier),
629 visitor_(visitor),
630 num_busy_(num_busy) {}
631
632 virtual void Run() {
633 if (!barrier_->TryEnter()) {
634 barrier_->Release();
635 return;
636 }
637
639 isolate_group_, Thread::kScavengerTask, /*bypass_safepoint=*/true);
640 ASSERT(result);
641
643
644 Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true);
645
646 barrier_->Sync();
647 barrier_->Release();
648 }
649
651 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ParallelScavenge");
652
653 num_busy_->fetch_add(1u);
654 visitor_->ProcessRoots();
655
656 // Phase 1: Copying.
657 bool more_to_scavenge = false;
658 do {
659 do {
660 visitor_->ProcessSurvivors();
661 } while (visitor_->WaitForWork(num_busy_));
662 // Wait for all scavengers to stop.
663 barrier_->Sync();
664#if defined(DEBUG)
665 ASSERT(num_busy_->load() == 0);
666 // Caveat: must not allow any marker to continue past the barrier
667 // before we checked num_busy, otherwise one of them might rush
668 // ahead and increment it.
669 barrier_->Sync();
670#endif
671 // Check if we have any pending properties with marked keys.
672 // Those might have been marked by another marker.
673 visitor_->ProcessWeakProperties();
674 more_to_scavenge = visitor_->HasWork();
675 if (more_to_scavenge) {
676 // We have more work to do. Notify others.
677 num_busy_->fetch_add(1u);
678 }
679
680 // Wait for all other scavengers to finish processing their pending
681 // weak properties and decide if they need to continue marking.
682 // Caveat: we need two barriers here to make this decision in lock step
683 // between all scavengers and the main thread.
684 barrier_->Sync();
685 if (!more_to_scavenge && (num_busy_->load() > 0)) {
686 // All scavengers continue to mark as long as any single marker has
687 // some work to do.
688 num_busy_->fetch_add(1u);
689 more_to_scavenge = true;
690 }
691 barrier_->Sync();
692 } while (more_to_scavenge);
693
694 ASSERT(!visitor_->HasWork());
695
696 // Phase 2: Weak processing, statistics.
697 visitor_->ProcessWeak();
698 }
699
700 private:
701 IsolateGroup* isolate_group_;
702 ThreadBarrier* barrier_;
703 ParallelScavengerVisitor* visitor_;
704 RelaxedAtomic<uintptr_t>* num_busy_;
705
707};
708
709SemiSpace::SemiSpace(intptr_t gc_threshold_in_words)
710 : gc_threshold_in_words_(gc_threshold_in_words) {}
711
713 Page* page = head_;
714 while (page != nullptr) {
715 Page* next = page->next();
716 page->Deallocate();
717 page = next;
718 }
719}
720
722 if (capacity_in_words_ >= gc_threshold_in_words_) {
723 return nullptr; // Full.
724 }
725 Page* page = Page::Allocate(kPageSize, Page::kNew);
726 if (page == nullptr) {
727 return nullptr; // Out of memory;
728 }
729 capacity_in_words_ += kPageSizeInWords;
730 if (link) {
731 if (head_ == nullptr) {
732 head_ = tail_ = page;
733 } else {
734 tail_->set_next(page);
735 tail_ = page;
736 }
737 }
738 return page;
739}
740
741bool SemiSpace::Contains(uword addr) const {
742 for (Page* page = head_; page != nullptr; page = page->next()) {
743 if (page->Contains(addr)) return true;
744 }
745 return false;
746}
747
748void SemiSpace::WriteProtect(bool read_only) {
749 for (Page* page = head_; page != nullptr; page = page->next()) {
750 page->WriteProtect(read_only);
751 }
752}
753
754void SemiSpace::AddList(Page* head, Page* tail) {
755 if (head == nullptr) {
756 return;
757 }
758 if (head_ == nullptr) {
759 head_ = head;
760 tail_ = tail;
761 return;
762 }
763 tail_->set_next(head);
764 tail_ = tail;
765}
766
767// The initial estimate of how many words we can scavenge per microsecond (usage
768// before / scavenge time). This is a conservative value observed running
769// Flutter on a Nexus 4. After the first scavenge, we instead use a value based
770// on the device's actual speed.
771static constexpr intptr_t kConservativeInitialScavengeSpeed = 40;
772
773Scavenger::Scavenger(Heap* heap, intptr_t max_semi_capacity_in_words)
774 : heap_(heap),
775 max_semi_capacity_in_words_(max_semi_capacity_in_words),
776 scavenge_words_per_micro_(kConservativeInitialScavengeSpeed) {
777 ASSERT(heap != nullptr);
778
779 // Verify assumptions about the first word in objects which the scavenger is
780 // going to use for forwarding pointers.
782
783 // Set initial semi space size in words.
784 const intptr_t initial_semi_capacity_in_words = Utils::Minimum(
785 max_semi_capacity_in_words, FLAG_new_gen_semi_initial_size * MBInWords);
786
787 to_ = new SemiSpace(initial_semi_capacity_in_words);
788 idle_scavenge_threshold_in_words_ = initial_semi_capacity_in_words;
789
790 UpdateMaxHeapCapacity();
791 UpdateMaxHeapUsage();
792}
793
795 ASSERT(!scavenging_);
796 delete to_;
797 ASSERT(blocks_ == nullptr);
798}
799
800intptr_t Scavenger::NewSizeInWords(intptr_t old_size_in_words,
801 GCReason reason) const {
802 bool grow = false;
803 if (2 * heap_->isolate_group()->MutatorCount() >
804 (old_size_in_words / kPageSizeInWords)) {
805 // Not enough TLABs to give two to each mutator.
806 grow = true;
807 }
808
809 if (reason == GCReason::kNewSpace) {
810 // If we GC for a reason other than new-space being full (i.e., full
811 // collection for old-space or store-buffer overflow), that's not an
812 // indication that new-space is too small.
813 if (stats_history_.Size() != 0) {
814 double garbage =
815 stats_history_.Get(0).ExpectedGarbageFraction(old_size_in_words);
816 if (garbage < (FLAG_new_gen_garbage_threshold / 100.0)) {
817 // Too much survived last time; grow new-space in the hope that a
818 // greater fraction of objects will become unreachable before new-space
819 // becomes full.
820 grow = true;
821 }
822 }
823 }
824
825 if (grow) {
826 return Utils::Minimum(max_semi_capacity_in_words_,
827 old_size_in_words * FLAG_new_gen_growth_factor);
828 }
829 return old_size_in_words;
830}
831
833 public:
834 CollectStoreBufferVisitor(ObjectSet* in_store_buffer, const char* msg)
836 in_store_buffer_(in_store_buffer),
837 msg_(msg) {}
838
839 void VisitPointers(ObjectPtr* from, ObjectPtr* to) override {
840 for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
841 ObjectPtr obj = *ptr;
843 RELEASE_ASSERT_WITH_MSG(obj->IsOldObject(), msg_);
844
846 if (obj.GetClassId() == kArrayCid) {
847 const uword length =
848 Smi::Value(static_cast<UntaggedArray*>(obj.untag())->length());
850 msg_);
851 }
852 in_store_buffer_->Add(obj);
853 }
854 }
855
856#if defined(DART_COMPRESSED_POINTERS)
857 void VisitCompressedPointers(uword heap_base,
859 CompressedObjectPtr* to) override {
860 UNREACHABLE(); // Store buffer blocks are not compressed.
861 }
862#endif
863
864 private:
865 ObjectSet* const in_store_buffer_;
866 const char* msg_;
867};
868
870 public ObjectPointerVisitor {
871 public:
873 const SemiSpace* to,
874 const char* msg)
875 : ObjectVisitor(),
877 in_store_buffer_(in_store_buffer),
878 to_(to),
879 msg_(msg) {}
880
881 void VisitObject(ObjectPtr obj) override {
882 if (obj->IsPseudoObject()) return;
883 RELEASE_ASSERT_WITH_MSG(obj->IsOldObject(), msg_);
884
886 obj->untag()->IsRemembered() == in_store_buffer_->Contains(obj), msg_);
887
888 visiting_ = obj;
889 is_remembered_ = obj->untag()->IsRemembered();
890 is_card_remembered_ = obj->untag()->IsCardRemembered();
891 if (is_card_remembered_) {
892 RELEASE_ASSERT_WITH_MSG(!is_remembered_, msg_);
893 RELEASE_ASSERT_WITH_MSG(Page::Of(obj)->progress_bar_ == 0, msg_);
894 }
895 obj->untag()->VisitPointers(this);
896 }
897
898 void VisitPointers(ObjectPtr* from, ObjectPtr* to) override {
899 for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
900 ObjectPtr obj = *ptr;
901 if (obj->IsHeapObject() && obj->IsNewObject()) {
902 if (is_card_remembered_) {
903 if (!Page::Of(visiting_)->IsCardRemembered(ptr)) {
904 FATAL(
905 "%s: Old object %#" Px " references new object %#" Px
906 ", but the "
907 "slot's card is not remembered. Consider using rr to watch the "
908 "slot %p and reverse-continue to find the store with a missing "
909 "barrier.\n",
910 msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
911 ptr);
912 }
913 } else if (!is_remembered_) {
914 FATAL("%s: Old object %#" Px " references new object %#" Px
915 ", but it is "
916 "not in any store buffer. Consider using rr to watch the "
917 "slot %p and reverse-continue to find the store with a missing "
918 "barrier.\n",
919 msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
920 ptr);
921 }
923 msg_);
924 }
925 }
926 }
927
928#if defined(DART_COMPRESSED_POINTERS)
929 void VisitCompressedPointers(uword heap_base,
931 CompressedObjectPtr* to) override {
932 for (CompressedObjectPtr* ptr = from; ptr <= to; ptr++) {
933 ObjectPtr obj = ptr->Decompress(heap_base);
934 if (obj->IsHeapObject() && obj->IsNewObject()) {
935 if (is_card_remembered_) {
936 if (!Page::Of(visiting_)->IsCardRemembered(ptr)) {
937 FATAL(
938 "%s: Old object %#" Px " references new object %#" Px
939 ", but the "
940 "slot's card is not remembered. Consider using rr to watch the "
941 "slot %p and reverse-continue to find the store with a missing "
942 "barrier.\n",
943 msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
944 ptr);
945 }
946 } else if (!is_remembered_) {
947 FATAL("%s: Old object %#" Px " references new object %#" Px
948 ", but it is "
949 "not in any store buffer. Consider using rr to watch the "
950 "slot %p and reverse-continue to find the store with a missing "
951 "barrier.\n",
952 msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
953 ptr);
954 }
956 msg_);
957 }
958 }
959 }
960#endif
961
962 private:
963 const ObjectSet* const in_store_buffer_;
964 const SemiSpace* const to_;
965 ObjectPtr visiting_;
966 bool is_remembered_;
967 bool is_card_remembered_;
968 const char* msg_;
969};
970
971void Scavenger::VerifyStoreBuffers(const char* msg) {
972 ASSERT(msg != nullptr);
973 Thread* thread = Thread::Current();
974 StackZone stack_zone(thread);
975 Zone* zone = stack_zone.GetZone();
976
977 ObjectSet* in_store_buffer = new (zone) ObjectSet(zone);
978 heap_->AddRegionsToObjectSet(in_store_buffer);
979
980 {
981 CollectStoreBufferVisitor visitor(in_store_buffer, msg);
982 heap_->isolate_group()->store_buffer()->VisitObjectPointers(&visitor);
983 }
984
985 {
986 CheckStoreBufferVisitor visitor(in_store_buffer, to_, msg);
987 heap_->old_space()->VisitObjects(&visitor);
988 }
989}
990
991SemiSpace* Scavenger::Prologue(GCReason reason) {
993
996
997 if (FLAG_verify_store_buffer) {
999 VerifyStoreBuffers("Verifying remembered set before Scavenge");
1000 }
1001
1002 // Need to stash the old remembered set before any worker begins adding to the
1003 // new remembered set.
1004 blocks_ = heap_->isolate_group()->store_buffer()->PopAll();
1005 GCMarker* marker = heap_->old_space()->marker();
1006 if (marker != nullptr) {
1007 mark_blocks_ = marker->marking_stack_.PopAll();
1008 new_blocks_ = marker->new_marking_stack_.PopAll();
1009 deferred_blocks_ = marker->deferred_marking_stack_.PopAll();
1010 }
1011
1012 UpdateMaxHeapCapacity();
1013
1014 // Flip the two semi-spaces so that to_ is always the space for allocating
1015 // objects.
1016 SemiSpace* from;
1017 {
1018 MutexLocker ml(&space_lock_);
1019 from = to_;
1020 to_ = new SemiSpace(NewSizeInWords(from->gc_threshold_in_words(), reason));
1021 }
1022
1023 return from;
1024}
1025
1026void Scavenger::Epilogue(SemiSpace* from) {
1028
1029 // All objects in the to space have been copied from the from space at this
1030 // moment.
1031
1032 // Ensure the mutator thread will fail the next allocation. This will force
1033 // mutator to allocate a new TLAB
1034#if defined(DEBUG)
1035 heap_->isolate_group()->ForEachIsolate(
1036 [&](Isolate* isolate) {
1037 Thread* mutator_thread = isolate->mutator_thread();
1038 ASSERT(mutator_thread == nullptr || mutator_thread->top() == 0);
1039 },
1040 /*at_safepoint=*/true);
1041#endif // DEBUG
1042
1043 double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction();
1044 if (stats_history_.Size() >= 2) {
1045 // Previous scavenge is only given half as much weight.
1046 avg_frac += 0.5 * stats_history_.Get(1).PromoCandidatesSuccessFraction();
1047 avg_frac /= 1.0 + 0.5; // Normalize.
1048 }
1049
1050 early_tenure_ = avg_frac >= (FLAG_early_tenuring_threshold / 100.0);
1051
1052 // Update estimate of scavenger speed. This statistic assumes survivorship
1053 // rates don't change much.
1054 intptr_t history_used = 0;
1055 intptr_t history_micros = 0;
1056 ASSERT(stats_history_.Size() > 0);
1057 for (intptr_t i = 0; i < stats_history_.Size(); i++) {
1058 history_used += stats_history_.Get(i).UsedBeforeInWords();
1059 history_micros += stats_history_.Get(i).DurationMicros();
1060 }
1061 if (history_micros == 0) {
1062 history_micros = 1;
1063 }
1064 scavenge_words_per_micro_ = history_used / history_micros;
1065 if (scavenge_words_per_micro_ == 0) {
1066 scavenge_words_per_micro_ = 1;
1067 }
1068
1069 // Update amount of new-space we must allocate before performing an idle
1070 // scavenge. This is based on the amount of work we expect to be able to
1071 // complete in a typical idle period.
1072 intptr_t average_idle_task_micros = 6000;
1073 idle_scavenge_threshold_in_words_ =
1074 scavenge_words_per_micro_ * average_idle_task_micros;
1075 // Even if the scavenge speed is slow, make sure we don't scavenge too
1076 // frequently, which just wastes power and falsely increases the promotion
1077 // rate.
1078 intptr_t lower_bound = 512 * KBInWords;
1079 if (idle_scavenge_threshold_in_words_ < lower_bound) {
1080 idle_scavenge_threshold_in_words_ = lower_bound;
1081 }
1082 // Even if the scavenge speed is very high, make sure we start considering
1083 // idle scavenges before new space is full to avoid requiring a scavenge in
1084 // the middle of a frame.
1085 intptr_t upper_bound = 8 * ThresholdInWords() / 10;
1086 if (idle_scavenge_threshold_in_words_ > upper_bound) {
1087 idle_scavenge_threshold_in_words_ = upper_bound;
1088 }
1089
1090 if (FLAG_verify_store_buffer) {
1091 // Scavenging will insert into the store buffer block on the current
1092 // thread (later will parallel scavenge, the worker's threads). We need to
1093 // flush this thread-local block to the isolate group or we will incorrectly
1094 // report some objects as absent from the store buffer. This might cause
1095 // a program to hit a store buffer overflow a bit sooner than it might
1096 // otherwise, since overflow is measured in blocks. Store buffer overflows
1097 // are very rare.
1099
1101 VerifyStoreBuffers("Verifying remembered set after Scavenge");
1102 }
1103
1104 delete from;
1105 UpdateMaxHeapUsage();
1106 if (heap_ != nullptr) {
1107 heap_->UpdateGlobalMaxUsed();
1108 }
1109}
1110
1112 // To make a consistent decision, we should not yield for a safepoint in the
1113 // middle of deciding whether to perform an idle GC.
1114 NoSafepointScope no_safepoint;
1115
1116 // TODO(rmacnak): Investigate collecting a history of idle period durations.
1117 intptr_t used_in_words = UsedInWords() + freed_in_words_;
1118 intptr_t external_in_words = ExternalInWords();
1119 // Normal reason: new space is getting full.
1120 bool for_new_space = (used_in_words >= idle_scavenge_threshold_in_words_) ||
1121 (external_in_words >= idle_scavenge_threshold_in_words_);
1122 if (!for_new_space) {
1123 return false;
1124 }
1125
1126 int64_t estimated_scavenge_completion =
1128 used_in_words / scavenge_words_per_micro_;
1129 return estimated_scavenge_completion <= deadline;
1130}
1131
1132void Scavenger::IterateIsolateRoots(ObjectPointerVisitor* visitor) {
1133 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "IterateIsolateRoots");
1136}
1137
1138template <bool parallel>
1139void Scavenger::IterateStoreBuffers(ScavengerVisitorBase<parallel>* visitor) {
1140 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "IterateStoreBuffers");
1141
1142 // Iterating through the store buffers.
1143 // Grab the deduplication sets out of the isolate's consolidated store buffer.
1144 StoreBuffer* store_buffer = heap_->isolate_group()->store_buffer();
1145 StoreBufferBlock* pending = blocks_;
1146 while (pending != nullptr) {
1147 StoreBufferBlock* next = pending->next();
1148 // Generated code appends to store buffers; tell MemorySanitizer.
1149 MSAN_UNPOISON(pending, sizeof(*pending));
1150 while (!pending->IsEmpty()) {
1151 ObjectPtr obj = pending->Pop();
1152 ASSERT(!obj->IsForwardingCorpse());
1153 ASSERT(obj->untag()->IsRemembered());
1154 obj->untag()->ClearRememberedBit();
1155 visitor->VisitingOldObject(obj);
1156 visitor->ProcessObject(obj);
1157 }
1158 pending->Reset();
1159 // Return the emptied block for recycling (no need to check threshold).
1160 store_buffer->PushBlock(pending, StoreBuffer::kIgnoreThreshold);
1161 blocks_ = pending = next;
1162 }
1163 // Done iterating through old objects remembered in the store buffers.
1164}
1165
1166template <bool parallel>
1167void Scavenger::IterateRememberedCards(
1168 ScavengerVisitorBase<parallel>* visitor) {
1169 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "IterateRememberedCards");
1170 heap_->old_space()->VisitRememberedCards(visitor);
1171}
1172
1173void Scavenger::IterateObjectIdTable(ObjectPointerVisitor* visitor) {
1174#ifndef PRODUCT
1175 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "IterateObjectIdTable");
1176 heap_->isolate_group()->VisitObjectIdRingPointers(visitor);
1177#endif // !PRODUCT
1178}
1179
1186
1187template <bool parallel>
1188void Scavenger::IterateRoots(ScavengerVisitorBase<parallel>* visitor) {
1189 for (;;) {
1190 intptr_t slice = root_slices_started_.fetch_add(1);
1191 if (slice >= kNumRootSlices) {
1192 break; // No more slices.
1193 }
1194
1195 switch (slice) {
1196 case kIsolate:
1197 IterateIsolateRoots(visitor);
1198 break;
1199 case kObjectIdRing:
1200 IterateObjectIdTable(visitor);
1201 break;
1202 case kStoreBuffer:
1203 IterateStoreBuffers(visitor);
1204 break;
1205 default:
1206 UNREACHABLE();
1207 }
1208 }
1209
1210 IterateRememberedCards(visitor);
1211}
1212
1221
1222void Scavenger::IterateWeak() {
1223 for (;;) {
1224 intptr_t slice = weak_slices_started_.fetch_add(1);
1225 if (slice >= kNumWeakSlices) {
1226 break; // No more slices.
1227 }
1228
1229 switch (slice) {
1230 case kWeakHandles:
1231 MournWeakHandles();
1232 break;
1233 case kWeakTables:
1234 MournWeakTables();
1235 break;
1236 case kProgressBars:
1237 heap_->old_space()->ResetProgressBars();
1238 break;
1240 // Restore write-barrier assumptions.
1242 break;
1243 case kPruneWeak: {
1244 GCMarker* marker = heap_->old_space()->marker();
1245 if (marker != nullptr) {
1246 marker->PruneWeak(this);
1247 }
1248 } break;
1249 default:
1250 UNREACHABLE();
1251 }
1252 }
1253
1254 GCMarker* marker = heap_->old_space()->marker();
1255 if (marker != nullptr) {
1256 Prune(&mark_blocks_, &marker->marking_stack_);
1257 Prune(&new_blocks_, &marker->marking_stack_);
1258 Prune(&deferred_blocks_, &marker->deferred_marking_stack_);
1259 }
1260}
1261
1262void Scavenger::MournWeakHandles() {
1263 Thread* thread = Thread::Current();
1264 TIMELINE_FUNCTION_GC_DURATION(thread, "MournWeakHandles");
1265 ScavengerWeakVisitor weak_visitor(thread);
1266 heap_->isolate_group()->VisitWeakPersistentHandles(&weak_visitor);
1267}
1268
1269template <bool parallel>
1270void ScavengerVisitorBase<parallel>::ProcessToSpace() {
1271 VisitingOldObject(nullptr);
1272 while (scan_ != nullptr) {
1273 uword resolved_top = scan_->resolved_top_;
1274 while (resolved_top < scan_->top_) {
1275 ObjectPtr obj = UntaggedObject::FromAddr(resolved_top);
1276 resolved_top += ProcessObject(obj);
1277 }
1278 scan_->resolved_top_ = resolved_top;
1279
1280 Page* next = scan_->next();
1281 if (next == nullptr) {
1282 // Don't update scan_. More objects may yet be copied to this TLAB.
1283 return;
1284 }
1285 scan_ = next;
1286 }
1287}
1288
1289template <bool parallel>
1290void ScavengerVisitorBase<parallel>::ProcessPromotedList() {
1291 ObjectPtr obj;
1292 while (promoted_list_.Pop(&obj)) {
1293 VisitingOldObject(obj);
1294 ProcessObject(obj);
1295 // Black allocation.
1296 if (thread_->is_marking() && obj->untag()->TryAcquireMarkBit()) {
1297 thread_->MarkingStackAddObject(obj);
1298 }
1299 }
1300}
1301
1302template <bool parallel>
1303void ScavengerVisitorBase<parallel>::ProcessWeakPropertiesScoped() {
1304 if (scavenger_->abort_) return;
1305
1306 // Finished this round of scavenging. Process the pending weak properties
1307 // for which the keys have become reachable. Potentially this adds more
1308 // objects to the to space.
1309 weak_property_list_.Process([&](WeakPropertyPtr weak_property) {
1310 ObjectPtr key = weak_property->untag()->key();
1311 ASSERT(key->IsHeapObject());
1312 ASSERT(key->IsNewObject());
1313 ASSERT(from_->Contains(UntaggedObject::ToAddr(key)));
1314
1316 if (IsForwarding(header)) {
1317 VisitingOldObject(weak_property->IsOldObject() ? weak_property : nullptr);
1318 weak_property->untag()->VisitPointersNonvirtual(this);
1319 } else {
1320 weak_property_list_.Push(weak_property);
1321 }
1322 });
1323}
1324
1325void Scavenger::UpdateMaxHeapCapacity() {
1326 ASSERT(to_ != nullptr);
1327 ASSERT(heap_ != nullptr);
1328 auto isolate_group = heap_->isolate_group();
1329 ASSERT(isolate_group != nullptr);
1330 isolate_group->GetHeapNewCapacityMaxMetric()->SetValue(
1331 to_->capacity_in_words() * kWordSize);
1332}
1333
1334void Scavenger::UpdateMaxHeapUsage() {
1335 ASSERT(to_ != nullptr);
1336 ASSERT(heap_ != nullptr);
1337 auto isolate_group = heap_->isolate_group();
1338 ASSERT(isolate_group != nullptr);
1339 isolate_group->GetHeapNewUsedMaxMetric()->SetValue(UsedInWords() * kWordSize);
1340}
1341
1343 if (obj->IsImmediateOrOldObject()) return true;
1344 return IsForwarding(ReadHeaderRelaxed(obj));
1345}
1346
1347template <bool parallel>
1349#if defined(DEBUG)
1350 if (obj->IsNewObject()) {
1351 ASSERT(visiting_old_object_ == nullptr);
1352 } else {
1353 ASSERT(visiting_old_object_ == obj);
1354 ASSERT(!obj->untag()->IsRemembered());
1355 }
1356#endif
1357
1358 intptr_t cid = obj->GetClassId();
1359 if (UNLIKELY(cid == kWeakPropertyCid)) {
1360 WeakPropertyPtr weak_property = static_cast<WeakPropertyPtr>(obj);
1361 if (!IsScavengeSurvivor(weak_property->untag()->key())) {
1362 weak_property_list_.Push(weak_property);
1364 }
1365 } else if (UNLIKELY(cid == kWeakReferenceCid)) {
1366 WeakReferencePtr weak_reference = static_cast<WeakReferencePtr>(obj);
1367 if (!IsScavengeSurvivor(weak_reference->untag()->target())) {
1368#if !defined(DART_COMPRESSED_POINTERS)
1369 ScavengePointer(&weak_reference->untag()->type_arguments_);
1370#else
1371 ScavengeCompressedPointer(weak_reference->heap_base(),
1372 &weak_reference->untag()->type_arguments_);
1373#endif
1374 weak_reference_list_.Push(weak_reference);
1376 }
1377 } else if (UNLIKELY(cid == kWeakArrayCid)) {
1378 WeakArrayPtr weak_array = static_cast<WeakArrayPtr>(obj);
1379 weak_array_list_.Push(weak_array);
1380 return WeakArray::InstanceSize(Smi::Value(weak_array->untag()->length()));
1381 } else if (UNLIKELY(cid == kFinalizerEntryCid)) {
1382 FinalizerEntryPtr finalizer_entry = static_cast<FinalizerEntryPtr>(obj);
1383#if !defined(DART_COMPRESSED_POINTERS)
1384 ScavengePointer(&finalizer_entry->untag()->token_);
1385 ScavengePointer(&finalizer_entry->untag()->next_);
1386#else
1387 ScavengeCompressedPointer(finalizer_entry->heap_base(),
1388 &finalizer_entry->untag()->token_);
1389 ScavengeCompressedPointer(finalizer_entry->heap_base(),
1390 &finalizer_entry->untag()->next_);
1391#endif
1392 finalizer_entry_list_.Push(finalizer_entry);
1394 }
1395 return obj->untag()->VisitPointersNonvirtual(this);
1396}
1397
1398void Scavenger::MournWeakTables() {
1399 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "MournWeakTables");
1400
1401 auto rehash_weak_table = [](WeakTable* table, WeakTable* replacement_new,
1402 WeakTable* replacement_old,
1404 intptr_t size = table->size();
1405 for (intptr_t i = 0; i < size; i++) {
1406 if (table->IsValidEntryAtExclusive(i)) {
1407 ObjectPtr obj = table->ObjectAtExclusive(i);
1408 ASSERT(obj->IsHeapObject());
1409 uword raw_addr = UntaggedObject::ToAddr(obj);
1410 uword header = *reinterpret_cast<uword*>(raw_addr);
1411 if (IsForwarding(header)) {
1412 // The object has survived. Preserve its record.
1413 obj = ForwardedObj(header);
1414 auto replacement =
1415 obj->IsNewObject() ? replacement_new : replacement_old;
1416 replacement->SetValueExclusive(obj, table->ValueAtExclusive(i));
1417 } else {
1418 // The object has been collected.
1419 if (cleanup != nullptr) {
1420 cleanup(reinterpret_cast<void*>(table->ValueAtExclusive(i)));
1421 }
1422 }
1423 }
1424 }
1425 };
1426
1427 // Rehash the weak tables now that we know which objects survive this cycle.
1428 for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) {
1429 const auto selector = static_cast<Heap::WeakSelector>(sel);
1430 auto table = heap_->GetWeakTable(Heap::kNew, selector);
1431 auto table_old = heap_->GetWeakTable(Heap::kOld, selector);
1432
1433 // Create a new weak table for the new-space.
1434 auto table_new = WeakTable::NewFrom(table);
1435
1436 Dart_HeapSamplingDeleteCallback cleanup = nullptr;
1437#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
1438 if (sel == Heap::kHeapSamplingData) {
1440 }
1441#endif
1442 rehash_weak_table(table, table_new, table_old, cleanup);
1443 heap_->SetWeakTable(Heap::kNew, selector, table_new);
1444
1445 // Remove the old table as it has been replaced with the newly allocated
1446 // table above.
1447 delete table;
1448 }
1449
1450 // Each isolate might have a weak table used for fast snapshot writing (i.e.
1451 // isolate communication). Rehash those tables if need be.
1452 heap_->isolate_group()->ForEachIsolate(
1453 [&](Isolate* isolate) {
1454 auto table = isolate->forward_table_new();
1455 if (table != nullptr) {
1456 auto replacement = WeakTable::NewFrom(table);
1457 rehash_weak_table(table, replacement, isolate->forward_table_old(),
1458 nullptr);
1459 isolate->set_forward_table_new(replacement);
1460 }
1461 },
1462 /*at_safepoint=*/true);
1463}
1464
1465void Scavenger::Forward(MarkingStack* marking_stack) {
1466 ASSERT(abort_);
1467
1468 class ReverseMarkStack : public ObjectPointerVisitor {
1469 public:
1470 explicit ReverseMarkStack(IsolateGroup* group)
1471 : ObjectPointerVisitor(group) {}
1472
1473 void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
1474 for (ObjectPtr* p = first; p <= last; p++) {
1475 ObjectPtr obj = *p;
1476#if defined(DEBUG)
1477 if (obj->IsNewObject()) {
1480 }
1481#endif
1482 if (obj->IsForwardingCorpse()) {
1483 // Promoted object was pushed to mark list but reversed.
1484 *p = reinterpret_cast<ForwardingCorpse*>(UntaggedObject::ToAddr(obj))
1485 ->target();
1486 }
1487 }
1488 }
1489#if defined(DART_COMPRESSED_POINTERS)
1490 void VisitCompressedPointers(uword heap_base,
1491 CompressedObjectPtr* first,
1492 CompressedObjectPtr* last) override {
1493 UNREACHABLE();
1494 }
1495#endif
1496 };
1497
1498 ReverseMarkStack visitor(heap_->isolate_group());
1499 marking_stack->VisitObjectPointers(&visitor);
1500}
1501
1503 ASSERT(!abort_);
1504 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "PruneMarkingStack");
1505 MarkingStackBlock* reading;
1506 MarkingStackBlock* writing = marking_stack->PopNonFullBlock();
1507 for (;;) {
1508 {
1509 MutexLocker ml(&space_lock_);
1510 reading = *source;
1511 if (reading == nullptr) break;
1512 *source = reading->next();
1513 }
1514 // Generated code appends to marking stacks; tell MemorySanitizer.
1515 MSAN_UNPOISON(reading, sizeof(*reading));
1516 while (!reading->IsEmpty()) {
1517 ObjectPtr obj = reading->Pop();
1518 ASSERT(obj->IsHeapObject());
1519 if (obj->IsNewObject()) {
1521 if (!IsForwarding(header)) continue;
1522 obj = ForwardedObj(header);
1523 }
1524 ASSERT(!obj->IsForwardingCorpse());
1525 ASSERT(!obj->IsFreeListElement());
1526 writing->Push(obj);
1527 if (writing->IsFull()) {
1528 marking_stack->PushBlock(writing);
1529 writing = marking_stack->PopNonFullBlock();
1530 }
1531 }
1532 reading->Reset();
1533 marking_stack->PushBlock(reading);
1534 }
1535 marking_stack->PushBlock(writing);
1536}
1537
1539 ASSERT(!abort_);
1541 PruneWeak(&deferred->weak_properties);
1542 PruneWeak(&deferred->weak_references);
1543 PruneWeak(&deferred->weak_arrays);
1544 PruneWeak(&deferred->finalizer_entries);
1545}
1546
1547template <typename Type, typename PtrType>
1549 PtrType weak = list->Release();
1550 while (weak != Object::null()) {
1551 PtrType next;
1552 if (weak->IsOldObject()) {
1553 ASSERT(weak->GetClassId() == Type::kClassId);
1554 next = weak->untag()->next_seen_by_gc_.Decompress(weak->heap_base());
1555 weak->untag()->next_seen_by_gc_ = Type::null();
1556 list->Enqueue(weak);
1557 } else {
1559 if (IsForwarding(header)) {
1560 weak = static_cast<PtrType>(ForwardedObj(header));
1561 ASSERT(weak->GetClassId() == Type::kClassId);
1562 next = weak->untag()->next_seen_by_gc_.Decompress(weak->heap_base());
1563 weak->untag()->next_seen_by_gc_ = Type::null();
1564 list->Enqueue(weak);
1565 } else {
1566 // Collected in this scavenge.
1567 ASSERT(weak->GetClassId() == Type::kClassId);
1568 next = weak->untag()->next_seen_by_gc_.Decompress(weak->heap_base());
1569 }
1570 }
1571
1572 weak = next;
1573 }
1574}
1575
1576// Returns whether the object referred to in `slot` was GCed this GC.
1577template <bool parallel>
1579 ObjectPtr parent,
1580 CompressedObjectPtr* slot) {
1581 ObjectPtr target = slot->Decompress(parent->heap_base());
1582 if (target->IsImmediateOrOldObject()) {
1583 // Object already null (which is old) or not touched during this GC.
1584 return false;
1585 }
1587 if (IsForwarding(header)) {
1588 // Get the new location of the object.
1590 *slot = target;
1591 if (target->IsNewObject() && parent->IsOldObject() &&
1592 parent->untag()->TryAcquireRememberedBit()) {
1594 }
1595 return false;
1596 }
1597 ASSERT(target->IsHeapObject());
1598 ASSERT(target->IsNewObject());
1599 *slot = Object::null();
1600 return true;
1601}
1602
1604 ASSERT(Thread::Current()->OwnsGCSafepoint() ||
1605 (Thread::Current()->task_kind() == Thread::kMarkerTask) ||
1606 (Thread::Current()->task_kind() == Thread::kCompactorTask));
1607 for (Page* page = to_->head(); page != nullptr; page = page->next()) {
1608 page->VisitObjectPointers(visitor);
1609 }
1610}
1611
1613 ASSERT(Thread::Current()->OwnsGCSafepoint() ||
1614 (Thread::Current()->task_kind() == Thread::kMarkerTask));
1615 for (Page* page = to_->head(); page != nullptr; page = page->next()) {
1616 page->VisitObjects(visitor);
1617 }
1618}
1619
1621 for (Page* page = to_->head(); page != nullptr; page = page->next()) {
1622 set->AddRegion(page->start(), page->end());
1623 }
1624}
1625
1626void Scavenger::TryAllocateNewTLAB(Thread* thread,
1627 intptr_t min_size,
1628 bool can_safepoint) {
1629 ASSERT(heap_ != Dart::vm_isolate_group()->heap());
1630 ASSERT(!scavenging_);
1631
1632#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
1633 // Find the remaining space available in the TLAB before abandoning it so we
1634 // can reset the heap sampling offset in the new TLAB.
1635 intptr_t remaining = thread->true_end() - thread->top();
1636 const bool heap_sampling_enabled = thread->end() != thread->true_end();
1637 const bool is_first_tlab = thread->true_end() == 0;
1638 if (heap_sampling_enabled && remaining > min_size) {
1639 // This is a sampling point and the TLAB isn't actually full.
1640 thread->heap_sampler().SampleNewSpaceAllocation(min_size);
1641 return;
1642 }
1643#endif
1644
1645 intptr_t allocated = AbandonRemainingTLAB(thread);
1646 if (can_safepoint && !thread->force_growth()) {
1647 ASSERT(thread->no_safepoint_scope_depth() == 0);
1648 heap_->CheckConcurrentMarking(thread, GCReason::kNewSpace, allocated);
1649 }
1650
1651 MutexLocker ml(&space_lock_);
1652 for (Page* page = to_->head(); page != nullptr; page = page->next()) {
1653 if (page->owner() != nullptr) continue;
1654 intptr_t available =
1655 (page->end() - kAllocationRedZoneSize) - page->object_end();
1656 if (available >= min_size) {
1657 page->Acquire(thread);
1658#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
1659 thread->heap_sampler().HandleNewTLAB(remaining, /*is_first_tlab=*/false);
1660#endif
1661 return;
1662 }
1663 }
1664
1665 Page* page = to_->TryAllocatePageLocked(true);
1666 if (page == nullptr) {
1667 return;
1668 }
1669 page->Acquire(thread);
1670#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
1671 thread->heap_sampler().HandleNewTLAB(remaining, is_first_tlab);
1672#endif
1673
1675}
1676
1678 // Allocate any remaining space so the TLAB won't be reused. Write a filler
1679 // object so it remains iterable.
1680 uword top = thread->top();
1681 intptr_t size = thread->end() - thread->top();
1682 if (size > 0) {
1683 thread->set_top(top + size);
1685 }
1686
1687 AbandonRemainingTLAB(thread);
1688}
1689
1691 if (thread->top() == 0) return 0;
1692
1693 Page* page = Page::Of(thread->top() - 1);
1694 intptr_t allocated;
1695 {
1696 if (thread->is_marking()) {
1698 }
1699 MutexLocker ml(&space_lock_);
1700 allocated = page->Release(thread);
1701 }
1702 ASSERT(thread->top() == 0);
1703 return allocated;
1704}
1705
1706template <bool parallel>
1708 Page* page;
1709 {
1710 MutexLocker ml(&scavenger_->space_lock_);
1711 page = scavenger_->to_->TryAllocatePageLocked(false);
1712 }
1713 if (page == nullptr) {
1714 return 0;
1715 }
1716
1717 if (head_ == nullptr) {
1718 head_ = scan_ = page;
1719 } else {
1720 ASSERT(scan_ != nullptr);
1721 tail_->set_next(page);
1722 }
1723 tail_ = page;
1724
1725 return tail_->TryAllocateGC(size);
1726}
1727
1730
1731 ASSERT(thread->OwnsGCSafepoint());
1732
1733 // Scavenging is not reentrant. Make sure that is the case.
1734 ASSERT(!scavenging_);
1735 scavenging_ = true;
1736
1737 if (type == GCType::kEvacuate) {
1738 // Forces the next scavenge to promote all the objects in the new space.
1739 early_tenure_ = true;
1740 }
1741
1742 if (FLAG_verify_before_gc) {
1743 heap_->WaitForSweeperTasksAtSafepoint(thread);
1744 heap_->VerifyGC("Verifying before Scavenge",
1745 thread->is_marking() ? kAllowMarked : kForbidMarked);
1746 }
1747
1748 // Prepare for a scavenge.
1749 failed_to_promote_ = false;
1750 abort_ = false;
1751 root_slices_started_ = 0;
1752 weak_slices_started_ = 0;
1753 freed_in_words_ = 0;
1754 intptr_t abandoned_bytes = 0; // TODO(rmacnak): Count fragmentation?
1755 SpaceUsage usage_before = GetCurrentUsage();
1756 intptr_t promo_candidate_words = 0;
1757 for (Page* page = to_->head(); page != nullptr; page = page->next()) {
1758 page->Release();
1759 if (early_tenure_) {
1760 page->EarlyTenure();
1761 }
1762 promo_candidate_words += page->promo_candidate_words();
1763 }
1765 SemiSpace* from = Prologue(reason);
1766
1767 intptr_t bytes_promoted;
1768 if (FLAG_scavenger_tasks == 0) {
1769 bytes_promoted = SerialScavenge(from);
1770 } else {
1771 bytes_promoted = ParallelScavenge(from);
1772 }
1773 if (abort_) {
1774 ReverseScavenge(&from);
1775 bytes_promoted = 0;
1776 } else {
1777 if ((ThresholdInWords() - UsedInWords()) < KBInWords) {
1778 // Don't scavenge again until the next old-space GC has occurred. Prevents
1779 // performing one scavenge per allocation as the heap limit is approached.
1780 heap_->assume_scavenge_will_fail_ = true;
1781 }
1782 }
1783 ASSERT(promotion_stack_.IsEmpty());
1785
1786 // Scavenge finished. Run accounting.
1788 stats_history_.Add(ScavengeStats(
1789 start, end, usage_before, GetCurrentUsage(), promo_candidate_words,
1790 bytes_promoted >> kWordSizeLog2, abandoned_bytes >> kWordSizeLog2));
1791 Epilogue(from);
1792
1793 if (FLAG_verify_after_gc) {
1794 heap_->WaitForSweeperTasksAtSafepoint(thread);
1795 heap_->VerifyGC("Verifying after Scavenge...",
1796 thread->is_marking() ? kAllowMarked : kForbidMarked);
1797 }
1798
1799 // Done scavenging. Reset the marker.
1800 ASSERT(scavenging_);
1801 scavenging_ = false;
1802
1803 // It is possible for objects to stay in the new space
1804 // if the VM cannot create more pages for these objects.
1805 ASSERT((type != GCType::kEvacuate) || (UsedInWords() == 0) ||
1806 failed_to_promote_);
1807}
1808
1809intptr_t Scavenger::SerialScavenge(SemiSpace* from) {
1810 FreeList* freelist = heap_->old_space()->DataFreeList(0);
1811 SerialScavengerVisitor visitor(heap_->isolate_group(), this, from, freelist,
1812 &promotion_stack_);
1813 visitor.ProcessRoots();
1814 visitor.ProcessAll();
1815 visitor.ProcessWeak();
1816 visitor.Finalize();
1817 to_->AddList(visitor.head(), visitor.tail());
1818 return visitor.bytes_promoted();
1819}
1820
1821intptr_t Scavenger::ParallelScavenge(SemiSpace* from) {
1822 intptr_t bytes_promoted = 0;
1823 const intptr_t num_tasks = FLAG_scavenger_tasks;
1824 ASSERT(num_tasks > 0);
1825
1826 ThreadBarrier* barrier = new ThreadBarrier(num_tasks, 1);
1827 RelaxedAtomic<uintptr_t> num_busy = 0;
1828
1829 ParallelScavengerVisitor** visitors =
1830 new ParallelScavengerVisitor*[num_tasks];
1831 for (intptr_t i = 0; i < num_tasks; i++) {
1832 FreeList* freelist = heap_->old_space()->DataFreeList(i);
1833 visitors[i] = new ParallelScavengerVisitor(
1834 heap_->isolate_group(), this, from, freelist, &promotion_stack_);
1835 if (i < (num_tasks - 1)) {
1836 // Begin scavenging on a helper thread.
1837 bool result = Dart::thread_pool()->Run<ParallelScavengerTask>(
1838 heap_->isolate_group(), barrier, visitors[i], &num_busy);
1839 ASSERT(result);
1840 } else {
1841 // Last worker is the main thread.
1842 ParallelScavengerTask task(heap_->isolate_group(), barrier, visitors[i],
1843 &num_busy);
1844 task.RunEnteredIsolateGroup();
1845 barrier->Sync();
1846 barrier->Release();
1847 }
1848 }
1849
1850 for (intptr_t i = 0; i < num_tasks; i++) {
1851 ParallelScavengerVisitor* visitor = visitors[i];
1852 visitor->Finalize();
1853 to_->AddList(visitor->head(), visitor->tail());
1854 bytes_promoted += visitor->bytes_promoted();
1855 delete visitor;
1856 }
1857
1858 delete[] visitors;
1859 return bytes_promoted;
1860}
1861
1862void Scavenger::ReverseScavenge(SemiSpace** from) {
1863 Thread* thread = Thread::Current();
1864 TIMELINE_FUNCTION_GC_DURATION(thread, "ReverseScavenge");
1865
1866 class ReverseFromForwardingVisitor : public ObjectVisitor {
1867 void VisitObject(ObjectPtr from_obj) override {
1868 uword from_header = ReadHeaderRelaxed(from_obj);
1869 if (IsForwarding(from_header)) {
1870 ObjectPtr to_obj = ForwardedObj(from_header);
1871 uword to_header = ReadHeaderRelaxed(to_obj);
1872 intptr_t size = to_obj->untag()->HeapSize();
1873
1874 // Reset the ages bits in case this was a promotion.
1875 uword from_header = static_cast<uword>(to_header);
1876 from_header =
1877 UntaggedObject::OldAndNotRememberedBit::update(false, from_header);
1878 from_header = UntaggedObject::NewBit::update(true, from_header);
1879
1880 WriteHeaderRelaxed(from_obj, from_header);
1881
1882 ForwardingCorpse::AsForwarder(UntaggedObject::ToAddr(to_obj), size)
1883 ->set_target(from_obj);
1884 }
1885 }
1886 };
1887
1888 ReverseFromForwardingVisitor visitor;
1889 for (Page* page = (*from)->head(); page != nullptr; page = page->next()) {
1890 page->VisitObjects(&visitor);
1891 }
1892
1893 // Swap from-space and to-space. The abandoned to-space will be deleted in
1894 // the epilogue.
1895 {
1896 MutexLocker ml(&space_lock_);
1897 SemiSpace* temp = to_;
1898 to_ = *from;
1899 *from = temp;
1900 }
1901
1902 // Release any remaining part of the promotion worklist that wasn't completed.
1903 promotion_stack_.Reset();
1904
1905 // Release any remaining part of the remembered set that wasn't completed.
1906 StoreBuffer* store_buffer = heap_->isolate_group()->store_buffer();
1907 StoreBufferBlock* pending = blocks_;
1908 while (pending != nullptr) {
1909 StoreBufferBlock* next = pending->next();
1910 pending->Reset();
1911 // Return the emptied block for recycling (no need to check threshold).
1912 store_buffer->PushBlock(pending, StoreBuffer::kIgnoreThreshold);
1913 pending = next;
1914 }
1915 blocks_ = nullptr;
1916
1917 // Reverse the partial forwarding from the aborted scavenge. This also
1918 // rebuilds the remembered set.
1919 heap_->WaitForSweeperTasksAtSafepoint(thread);
1921
1922 heap_->old_space()->ResetProgressBars();
1923
1924 GCMarker* marker = heap_->old_space()->marker();
1925 if (marker != nullptr) {
1926 marker->marking_stack_.PushAll(mark_blocks_);
1927 mark_blocks_ = nullptr;
1928 marker->marking_stack_.PushAll(new_blocks_);
1929 new_blocks_ = nullptr;
1930 marker->deferred_marking_stack_.PushAll(deferred_blocks_);
1931 deferred_blocks_ = nullptr;
1932 // Not redundant with the flush at the beginning of the scavenge because
1933 // the scavenge workers may add promoted objects to the mark stack.
1935
1936 Forward(&marker->marking_stack_);
1937 ASSERT(marker->new_marking_stack_.IsEmpty());
1938 Forward(&marker->deferred_marking_stack_);
1939 }
1940
1941 // Restore write-barrier assumptions. Must occur after mark list fixups.
1943
1944 // Don't scavenge again until the next old-space GC has occurred. Prevents
1945 // performing one scavenge per allocation as the heap limit is approached.
1946 heap_->assume_scavenge_will_fail_ = true;
1947}
1948
1949void Scavenger::WriteProtect(bool read_only) {
1950 ASSERT(!scavenging_);
1951 to_->WriteProtect(read_only);
1952}
1953
1954#ifndef PRODUCT
1956 auto isolate_group = IsolateGroup::Current();
1957 ASSERT(isolate_group != nullptr);
1958 JSONObject space(object, "new");
1959 space.AddProperty("type", "HeapSpace");
1960 space.AddProperty("name", "new");
1961 space.AddProperty("vmName", "Scavenger");
1962 space.AddProperty("collections", collections());
1963 if (collections() > 0) {
1964 int64_t run_time = isolate_group->UptimeMicros();
1965 run_time = Utils::Maximum(run_time, static_cast<int64_t>(0));
1966 double run_time_millis = MicrosecondsToMilliseconds(run_time);
1967 double avg_time_between_collections =
1968 run_time_millis / static_cast<double>(collections());
1969 space.AddProperty("avgCollectionPeriodMillis",
1970 avg_time_between_collections);
1971 } else {
1972 space.AddProperty("avgCollectionPeriodMillis", 0.0);
1973 }
1974 space.AddProperty64("used", UsedInWords() * kWordSize);
1975 space.AddProperty64("capacity", CapacityInWords() * kWordSize);
1976 space.AddProperty64("external", ExternalInWords() * kWordSize);
1978}
1979#endif // !PRODUCT
1980
1981} // namespace dart
static float next(float f)
static const char marker[]
SI void store(P *ptr, const T &val)
SI T load(const P *ptr)
SI F table(const skcms_Curve *curve, F v)
static const char kName[]
Definition Viewer.cpp:481
#define UNREACHABLE()
Definition assert.h:248
#define RELEASE_ASSERT_WITH_MSG(cond, msg)
Definition assert.h:332
#define ASSERT_EQUAL(expected, actual)
Definition assert.h:309
#define COMPILE_ASSERT(expr)
Definition assert.h:339
static constexpr bool UseCardMarkingForAllocation(const intptr_t array_length)
Definition object.h:10797
static void FollowForwardingPointers(Thread *thread)
Definition become.cc:330
static constexpr uword update(bool value, uword original)
Definition bitfield.h:190
Block * PopNonFullBlock()
void PushAll(Block *blocks)
void VisitObjectPointers(ObjectPointerVisitor *visitor)
void Push(ObjectPtr raw_obj)
bool WaitForWork(RelaxedAtomic< uintptr_t > *num_busy, bool abort=false)
void VisitObject(ObjectPtr obj) override
Definition scavenger.cc:881
CheckStoreBufferVisitor(ObjectSet *in_store_buffer, const SemiSpace *to, const char *msg)
Definition scavenger.cc:872
void VisitPointers(ObjectPtr *from, ObjectPtr *to) override
Definition scavenger.cc:898
CollectStoreBufferVisitor(ObjectSet *in_store_buffer, const char *msg)
Definition scavenger.cc:834
void VisitPointers(ObjectPtr *from, ObjectPtr *to) override
Definition scavenger.cc:839
static ThreadPool * thread_pool()
Definition dart.h:73
static IsolateGroup * vm_isolate_group()
Definition dart.h:69
void UpdateUnreachable(IsolateGroup *isolate_group)
void UpdateRelocated(IsolateGroup *isolate_group)
static intptr_t InstanceSize()
Definition object.h:12986
static ForwardingCorpse * AsForwarder(uword addr, intptr_t size)
Definition become.cc:20
static FreeListElement * AsElement(uword addr, intptr_t size)
Definition freelist.cc:16
void Enqueue(PtrType ptr)
Definition gc_shared.h:40
PtrType Release()
Definition gc_shared.h:65
void PruneWeak(Scavenger *scavenger)
Definition marker.cc:1259
Thread * thread() const
void HandleNewTLAB(intptr_t old_tlab_remaining_space, bool is_first_tlab)
Definition sampler.cc:162
static Dart_HeapSamplingDeleteCallback delete_callback()
Definition sampler.h:54
void SampleNewSpaceAllocation(intptr_t allocation_size)
Definition sampler.cc:213
WeakSelector
Definition heap.h:43
@ kHeapSamplingData
Definition heap.h:52
@ kNumWeakSelectors
Definition heap.h:54
@ kNew
Definition heap.h:38
@ kOld
Definition heap.h:39
IsolateGroup * isolate_group() const
Definition heap.h:273
void CheckConcurrentMarking(Thread *thread, GCReason reason, intptr_t size)
Definition heap.cc:583
PageSpace * old_space()
Definition heap.h:63
void WaitForSweeperTasksAtSafepoint(Thread *thread)
Definition heap.cc:669
WeakTable * GetWeakTable(Space space, WeakSelector selector) const
Definition heap.h:225
void SetWeakTable(Space space, WeakSelector selector, WeakTable *value)
Definition heap.h:232
void UpdateGlobalMaxUsed()
Definition heap.cc:678
StoreBuffer * store_buffer() const
Definition isolate.h:504
void ForEachIsolate(std::function< void(Isolate *isolate)> function, bool at_safepoint=false)
Definition isolate.cc:2798
static IsolateGroup * Current()
Definition isolate.h:534
void VisitObjectIdRingPointers(ObjectPointerVisitor *visitor)
Definition isolate.cc:2941
void VisitObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
Definition isolate.cc:2868
void ReleaseStoreBuffers()
Definition isolate.cc:2740
void RememberLiveTemporaries()
Definition isolate.cc:2962
intptr_t MutatorCount() const
Definition isolate.h:541
void VisitWeakPersistentHandles(HandleVisitor *visitor)
Definition isolate.cc:2952
void FlushMarkingStacks()
Definition isolate.cc:2744
void AddProperty64(const char *name, int64_t i) const
void AddProperty(const char *name, bool b) const
DART_FORCE_INLINE void Process(Lambda action)
DART_NORETURN void Jump(int value, const Error &error)
Definition longjump.cc:22
jmp_buf * Set()
Definition longjump.cc:16
void PushBlock(Block *block)
static int64_t GetCurrentMonotonicMicros()
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
IsolateGroup * isolate_group() const
Definition visitor.h:25
void VisitCompressedPointers(uword heap_base, CompressedObjectPtr *first, CompressedObjectPtr *last)
Definition visitor.h:43
bool IsFreeListElement() const
ObjectPtr Decompress(uword heap_base) const
UntaggedObject * untag() const
uword heap_base() const
bool IsForwardingCorpse() const
intptr_t GetClassId() const
Definition raw_object.h:864
bool IsPseudoObject() const
void Add(ObjectPtr raw_obj)
Definition object_set.h:75
bool Contains(ObjectPtr raw_obj) const
Definition object_set.h:66
static const ClassId kClassId
Definition object.h:606
static ObjectPtr null()
Definition object.h:433
static intptr_t tags_offset()
Definition object.h:346
FreeList * DataFreeList(intptr_t i=0)
Definition pages.h:296
DART_FORCE_INLINE uword TryAllocatePromoLocked(FreeList *freelist, intptr_t size)
Definition pages.h:151
void AcquireLock(FreeList *freelist)
Definition pages.cc:426
void VisitRememberedCards(ObjectPointerVisitor *visitor) const
Definition pages.cc:679
void PushDependencyToConcurrentMarking()
Definition pages.h:305
void VisitObjects(ObjectVisitor *visitor) const
Definition pages.cc:645
void ReleaseLock(FreeList *freelist)
Definition pages.cc:430
void ResumeConcurrentMarking()
Definition pages.cc:446
void PauseConcurrentMarking()
Definition pages.cc:437
void ResetProgressBars() const
Definition pages.cc:709
GCMarker * marker() const
Definition pages.h:353
void set_next(Page *next)
Definition page.h:87
static Page * Of(ObjectPtr obj)
Definition page.h:141
bool IsResolved() const
Definition page.h:248
@ kNew
Definition page.h:73
void Unallocate(uword addr, intptr_t size)
Definition page.h:233
ParallelScavengerTask(IsolateGroup *isolate_group, ThreadBarrier *barrier, ParallelScavengerVisitor *visitor, RelaxedAtomic< uintptr_t > *num_busy)
Definition scavenger.cc:623
void Push(ObjectPtr obj)
PointerBlock< Size > * next() const
bool IsFull() const
bool IsEmpty() const
T load(std::memory_order order=std::memory_order_relaxed) const
Definition atomic.h:21
T fetch_add(T arg, std::memory_order order=std::memory_order_relaxed)
Definition atomic.h:35
intptr_t bytes_promoted() const
Definition scavenger.cc:251
bool WaitForWork(RelaxedAtomic< uintptr_t > *num_busy)
Definition scavenger.cc:309
DART_FORCE_INLINE intptr_t ProcessObject(ObjectPtr obj)
void VisitTypedDataViewPointers(TypedDataViewPtr view, CompressedObjectPtr *first, CompressedObjectPtr *last) override
Definition scavenger.cc:152
void VisitingOldObject(ObjectPtr obj)
Definition scavenger.cc:241
static bool ForwardOrSetNullIfCollected(ObjectPtr parent, CompressedObjectPtr *ptr_address)
ScavengerVisitorBase(IsolateGroup *isolate_group, Scavenger *scavenger, SemiSpace *from, FreeList *freelist, PromotionStack *promotion_stack)
Definition scavenger.cc:132
void VisitPointers(ObjectPtr *first, ObjectPtr *last) override
Definition scavenger.cc:221
void VisitHandle(uword addr) override
Definition scavenger.cc:606
ScavengerWeakVisitor(Thread *thread)
Definition scavenger.cc:604
void Scavenge(Thread *thread, GCType type, GCReason reason)
void Prune(MarkingStackBlock **from, MarkingStack *to)
intptr_t ExternalInWords() const
Definition scavenger.h:168
void VisitObjects(ObjectVisitor *visitor) const
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
void PruneWeak(GCLinkedLists *delayed)
void WriteProtect(bool read_only)
void AbandonRemainingTLABForDebugging(Thread *thread)
bool ShouldPerformIdleScavenge(int64_t deadline)
void AddRegionsToObjectSet(ObjectSet *set) const
intptr_t CapacityInWords() const
Definition scavenger.h:164
intptr_t UsedInWords() const
Definition scavenger.h:160
SpaceUsage GetCurrentUsage() const
Definition scavenger.h:169
int64_t gc_time_micros() const
Definition scavenger.h:189
intptr_t AbandonRemainingTLAB(Thread *thread)
intptr_t ThresholdInWords() const
Definition scavenger.h:176
Scavenger(Heap *heap, intptr_t max_semi_capacity_in_words)
Definition scavenger.cc:773
intptr_t collections() const
Definition scavenger.h:193
void PrintToJSONObject(JSONObject *object) const
void Forward(MarkingStack *stack)
Page * head() const
Definition scavenger.h:57
void AddList(Page *head, Page *tail)
Definition scavenger.cc:754
void WriteProtect(bool read_only)
Definition scavenger.cc:748
Page * TryAllocatePageLocked(bool link)
Definition scavenger.cc:721
bool Contains(uword addr) const
Definition scavenger.cc:741
intptr_t capacity_in_words() const
Definition scavenger.h:54
SemiSpace(intptr_t gc_threshold_in_words)
Definition scavenger.cc:709
intptr_t Value() const
Definition object.h:9969
bool Run(Args &&... args)
Definition thread_pool.h:45
LongJumpScope * long_jump_base() const
bool force_growth() const
Definition thread.h:628
@ kScavengerTask
Definition thread.h:352
@ kCompactorTask
Definition thread.h:351
static Thread * Current()
Definition thread.h:361
bool OwnsGCSafepoint() const
Definition thread.cc:1286
int32_t no_safepoint_scope_depth() const
Definition thread.h:705
uword end() const
Definition thread.h:697
bool is_marking() const
Definition thread.h:669
uword top() const
Definition thread.h:696
static void ExitIsolateGroupAsHelper(bool bypass_safepoint)
Definition thread.cc:494
IsolateGroup * isolate_group() const
Definition thread.h:540
void StoreBufferAddObjectGC(ObjectPtr obj)
Definition thread.cc:799
void set_top(uword top)
Definition thread.h:699
HeapProfileSampler & heap_sampler()
Definition thread.h:1128
uword true_end() const
Definition thread.h:698
static bool EnterIsolateGroupAsHelper(IsolateGroup *isolate_group, TaskKind kind, bool bypass_safepoint)
Definition thread.cc:476
void DeferredMarkLiveTemporaries()
Definition thread.cc:1078
static ObjectPtr FromAddr(uword addr)
Definition raw_object.h:495
bool IsCardRemembered() const
Definition raw_object.h:364
DART_FORCE_INLINE intptr_t VisitPointersNonvirtual(V *visitor)
Definition raw_object.h:459
static uword ToAddr(const UntaggedObject *raw_obj)
Definition raw_object.h:501
bool TryAcquireRememberedBit()
Definition raw_object.h:344
intptr_t VisitPointers(ObjectPointerVisitor *visitor)
Definition raw_object.h:426
bool IsRemembered() const
Definition raw_object.h:340
static constexpr T Maximum(T x, T y)
Definition utils.h:26
static T Minimum(T x, T y)
Definition utils.h:21
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition utils.h:77
static intptr_t InstanceSize()
Definition object.h:6715
static intptr_t InstanceSize()
Definition object.h:12906
static intptr_t InstanceSize()
Definition object.h:12932
static WeakTable * NewFrom(WeakTable *original)
Definition weak_table.h:45
void(* Dart_HeapSamplingDeleteCallback)(void *data)
Definition dart_api.h:1287
#define ASSERT(E)
SkBitmap source
Definition examples.cpp:28
static bool b
struct MyStruct a[10]
#define FATAL(error)
glong glong end
GAsyncResult * result
uint32_t * target
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
size_t length
#define MSAN_UNPOISON(ptr, len)
bool IsTypedDataViewClassId(intptr_t index)
Definition class_id.h:439
bool IsTypedDataClassId(intptr_t index)
Definition class_id.h:433
StoreBuffer::Block StoreBufferBlock
ScavengerVisitorBase< true > ParallelScavengerVisitor
Definition scavenger.cc:586
static constexpr intptr_t kNewObjectAlignmentOffset
WeakSlices
Definition marker.cc:618
@ kPruneWeak
@ kProgressBars
@ kRememberLiveTemporaries
@ kWeakTables
Definition marker.cc:620
@ kNumWeakSlices
Definition marker.cc:623
@ kWeakHandles
Definition marker.cc:619
constexpr double MicrosecondsToSeconds(int64_t micros)
Definition globals.h:571
static constexpr intptr_t kPageSizeInWords
Definition page.h:28
GCType
Definition spaces.h:32
static DART_FORCE_INLINE uword ReadHeaderRelaxed(ObjectPtr obj)
Definition scavenger.cc:118
static constexpr intptr_t kPageSize
Definition page.h:27
static constexpr intptr_t kConservativeInitialScavengeSpeed
Definition scavenger.cc:771
static bool IsUnreachable(const ObjectPtr obj)
Definition marker.cc:547
@ kNotForwarded
Definition scavenger.cc:56
@ kForwardingMask
Definition scavenger.cc:55
@ kForwarded
Definition scavenger.cc:57
intptr_t RawSmiValue(const SmiPtr raw_value)
static DART_FORCE_INLINE bool IsForwarding(uword header)
Definition scavenger.cc:66
bool IsUnmodifiableTypedDataViewClassId(intptr_t index)
Definition class_id.h:453
constexpr intptr_t kWordSizeLog2
Definition globals.h:507
static bool IsScavengeSurvivor(ObjectPtr obj)
@ kHeapObjectTag
uintptr_t uword
Definition globals.h:501
constexpr intptr_t MBInWords
Definition globals.h:537
bool IsAllocatableInNewSpace(intptr_t size)
Definition spaces.h:57
BlockWorkList< PromotionStack > PromotionWorkList
static DART_FORCE_INLINE ObjectPtr ForwardedObj(uword header)
Definition scavenger.cc:73
ScavengerVisitorBase< false > SerialScavengerVisitor
Definition scavenger.cc:585
GCReason
Definition spaces.h:40
const intptr_t cid
static constexpr intptr_t kObjectAlignmentMask
raw_obj untag() -> num_entries()) VARIABLE_COMPRESSED_VISITOR(Array, Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(TypedData, TypedData::ElementSizeInBytes(raw_obj->GetClassId()) *Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(Record, RecordShape(raw_obj->untag() ->shape()).num_fields()) VARIABLE_NULL_VISITOR(CompressedStackMaps, CompressedStackMaps::PayloadSizeOf(raw_obj)) VARIABLE_NULL_VISITOR(OneByteString, Smi::Value(raw_obj->untag() ->length())) VARIABLE_NULL_VISITOR(TwoByteString, Smi::Value(raw_obj->untag() ->length())) intptr_t UntaggedField::VisitFieldPointers(FieldPtr raw_obj, ObjectPointerVisitor *visitor)
@ kAllowMarked
Definition verifier.h:21
@ kForbidMarked
Definition verifier.h:21
static DART_FORCE_INLINE void WriteHeaderRelaxed(ObjectPtr obj, uword header)
Definition scavenger.cc:124
void MournFinalizerEntry(GCVisitorType *visitor, FinalizerEntryPtr current_entry)
Definition gc_shared.h:162
static constexpr intptr_t kAllocationRedZoneSize
Definition page.h:41
constexpr intptr_t kWordSize
Definition globals.h:509
static constexpr intptr_t kObjectAlignment
constexpr double MicrosecondsToMilliseconds(int64_t micros)
Definition globals.h:574
RootSlices
Definition marker.cc:578
@ kObjectIdRing
Definition marker.cc:621
@ kIsolate
Definition marker.cc:579
@ kStoreBuffer
@ kNumRootSlices
constexpr intptr_t KBInWords
Definition globals.h:535
static NO_SANITIZE_THREAD void objcpy(void *dst, const void *src, size_t size)
Definition scavenger.cc:92
bool IsExternalTypedDataClassId(intptr_t index)
Definition class_id.h:447
ObjectPtr CompressedObjectPtr
static DART_FORCE_INLINE uword ForwardingHeader(ObjectPtr target)
Definition scavenger.cc:79
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
#define LIKELY(cond)
Definition globals.h:260
#define Px
Definition globals.h:410
#define UNLIKELY(cond)
Definition globals.h:261
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition globals.h:581
static const char header[]
Definition skpbench.cpp:88
#define NO_SANITIZE_THREAD
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)
Definition timeline.h:41