Flutter Engine
The Flutter Engine
scavenger.cc
Go to the documentation of this file.
1// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/heap/scavenger.h"
6
7#include "platform/assert.h"
9#include "vm/class_id.h"
11#include "vm/dart.h"
12#include "vm/dart_api_state.h"
13#include "vm/flag_list.h"
14#include "vm/flags.h"
15#include "vm/heap/become.h"
16#include "vm/heap/gc_shared.h"
17#include "vm/heap/marker.h"
18#include "vm/heap/pages.h"
20#include "vm/heap/safepoint.h"
21#include "vm/heap/verifier.h"
22#include "vm/heap/weak_table.h"
23#include "vm/isolate.h"
24#include "vm/lockers.h"
25#include "vm/log.h"
26#include "vm/longjump.h"
27#include "vm/object.h"
28#include "vm/object_id_ring.h"
29#include "vm/object_set.h"
30#include "vm/port.h"
31#include "vm/stack_frame.h"
32#include "vm/tagged_pointer.h"
33#include "vm/thread_barrier.h"
34#include "vm/timeline.h"
35#include "vm/visitor.h"
36
37namespace dart {
38
40 early_tenuring_threshold,
41 66,
42 "When more than this percentage of promotion candidates survive, "
43 "promote all survivors of next scavenge.");
45 new_gen_garbage_threshold,
46 90,
47 "Grow new gen when less than this percentage is garbage.");
48DEFINE_FLAG(int, new_gen_growth_factor, 2, "Grow new gen by this factor.");
49
50// Scavenger uses the kCardRememberedBit to distinguish forwarded and
51// non-forwarded objects. We must choose a bit that is clear for all new-space
52// object headers, and which doesn't intersect with the target address because
53// of object alignment.
54enum {
58};
59
60// If the forwarded bit and pointer tag bit are the same, we can avoid a few
61// conversions.
63 static_cast<uword>(kHeapObjectTag));
64
65DART_FORCE_INLINE
69 return bits == kForwarded;
70}
71
72DART_FORCE_INLINE
75 return static_cast<ObjectPtr>(header);
76}
77
78DART_FORCE_INLINE
80 uword result = static_cast<uword>(target);
82 return result;
83}
84
85// Races: The first word in the copied region is a header word that may be
86// updated by the scavenger worker in another thread, so we might copy either
87// the original object header or an installed forwarding pointer. This race is
88// harmless because if we copy the installed forwarding pointer, the scavenge
89// worker in the current thread will abandon this copy. We do not mark the loads
90// here as relaxed so the C++ compiler still has the freedom to reorder them.
92static void objcpy(void* dst, const void* src, size_t size) {
93 // A mem copy specialized for objects. We can assume:
94 // - dst and src do not overlap
95 ASSERT(
96 (reinterpret_cast<uword>(dst) + size <= reinterpret_cast<uword>(src)) ||
97 (reinterpret_cast<uword>(src) + size <= reinterpret_cast<uword>(dst)));
98 // - dst and src are word aligned
99 ASSERT(Utils::IsAligned(reinterpret_cast<uword>(dst), sizeof(uword)));
100 ASSERT(Utils::IsAligned(reinterpret_cast<uword>(src), sizeof(uword)));
101 // - size is strictly positive
102 ASSERT(size > 0);
103 // - size is a multiple of double words
104 ASSERT(Utils::IsAligned(size, 2 * sizeof(uword)));
105
106 uword* __restrict dst_cursor = reinterpret_cast<uword*>(dst);
107 const uword* __restrict src_cursor = reinterpret_cast<const uword*>(src);
108 do {
109 uword a = *src_cursor++;
110 uword b = *src_cursor++;
111 *dst_cursor++ = a;
112 *dst_cursor++ = b;
113 size -= (2 * sizeof(uword));
114 } while (size > 0);
115}
116
117DART_FORCE_INLINE
119 return reinterpret_cast<std::atomic<uword>*>(UntaggedObject::ToAddr(obj))
120 ->load(std::memory_order_relaxed);
121}
122
123DART_FORCE_INLINE
125 reinterpret_cast<std::atomic<uword>*>(UntaggedObject::ToAddr(obj))
126 ->store(header, std::memory_order_relaxed);
127}
128
129template <bool parallel>
132 public:
134 Scavenger* scavenger,
135 SemiSpace* from,
136 FreeList* freelist,
137 PromotionStack* promotion_stack)
139 thread_(nullptr),
140 scavenger_(scavenger),
141 from_(from),
142 page_space_(scavenger->heap_->old_space()),
143 freelist_(freelist),
144 bytes_promoted_(0),
145 visiting_old_object_(nullptr),
146 pending_(nullptr),
147 promoted_list_(promotion_stack) {}
148 ~ScavengerVisitorBase() { ASSERT(pending_ == nullptr); }
149
150#ifdef DEBUG
151 constexpr static const char* const kName = "Scavenger";
152#endif
153
154 void VisitTypedDataViewPointers(TypedDataViewPtr view,
155 CompressedObjectPtr* first,
156 CompressedObjectPtr* last) override {
157 // TypedDataViews require extra processing to update their
158 // PointerBase::data_ pointer. If the underlying typed data is external, no
159 // update is needed. If the underlying typed data is internal, the pointer
160 // must be updated if the typed data was copied or promoted. We cannot
161 // safely dereference the underlying typed data to make this distinction.
162 // It may have been forwarded by a different scavenger worker, so the access
163 // could have a data race. Rather than checking the CID of the underlying
164 // typed data, which requires dereferencing the copied/promoted header, we
165 // compare the view's internal pointer to what it should be if the
166 // underlying typed data was internal, and assume that external typed data
167 // never points into the Dart heap. We must do this before VisitPointers
168 // because we want to compare the old pointer and old typed data.
169 const bool is_external =
170 view->untag()->data_ != view->untag()->DataFieldForInternalTypedData();
171
172 // Forward all fields of the typed data view.
173 VisitCompressedPointers(view->heap_base(), first, last);
174
175 if (view->untag()->data_ == nullptr) {
176 ASSERT(RawSmiValue(view->untag()->offset_in_bytes()) == 0 &&
177 RawSmiValue(view->untag()->length()) == 0);
178 ASSERT(is_external);
179 return;
180 }
181
182 // Explicit ifdefs because the compiler does not eliminate the unused
183 // relaxed load.
184#if defined(DEBUG)
185 // Validate 'this' is a typed data view.
186 const uword view_header = ReadHeaderRelaxed(view);
187 ASSERT(!IsForwarding(view_header) || view->IsOldObject());
188 ASSERT(IsTypedDataViewClassId(view->GetClassIdMayBeSmi()) ||
189 IsUnmodifiableTypedDataViewClassId(view->GetClassIdMayBeSmi()));
190
191 // Validate that the backing store is not a forwarding word. There is a data
192 // race reader the backing store's header unless there is only one worker.
193 TypedDataBasePtr td = view->untag()->typed_data();
194 ASSERT(td->IsHeapObject());
195 if (!parallel) {
196 const uword td_header = ReadHeaderRelaxed(td);
197 ASSERT(!IsForwarding(td_header) || td->IsOldObject());
198 if (td != Object::null()) {
199 // Fast object copy temporarily stores null in the typed_data field of
200 // views. This can cause the RecomputeDataFieldForInternalTypedData to
201 // run inappropriately, but when the object copy continues it will fix
202 // the data_ pointer.
203 ASSERT_EQUAL(IsExternalTypedDataClassId(td->GetClassId()), is_external);
204 }
205 }
206#endif
207
208 // If we have external typed data we can simply return since the backing
209 // store lives in C-heap and will not move.
210 if (is_external) {
211 return;
212 }
213
214 // Now we update the inner pointer.
215#if defined(DEBUG)
216 if (!parallel) {
217 ASSERT(IsTypedDataClassId(td->GetClassId()));
218 }
219#endif
220 view->untag()->RecomputeDataFieldForInternalTypedData();
221 }
222
223 void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
224#if !defined(TARGET_ARCH_IA32)
225 // Pointers embedded in Instructions are not aligned.
226 ASSERT(Utils::IsAligned(first, sizeof(*first)));
227 ASSERT(Utils::IsAligned(last, sizeof(*last)));
228#endif
229 for (ObjectPtr* current = first; current <= last; current++) {
230 ScavengePointer(current);
231 }
232 }
233
234 bool PredicateVisitPointers(ObjectPtr* first, ObjectPtr* last) override {
235 bool has_new_target = false;
236 for (ObjectPtr* current = first; current <= last; current++) {
237 has_new_target |= PredicateScavengePointer(current);
238 }
239 return has_new_target;
240 }
241
242#if defined(DART_COMPRESSED_POINTERS)
244 CompressedObjectPtr* first,
245 CompressedObjectPtr* last) override {
246 bool has_new_target = false;
247 for (CompressedObjectPtr* current = first; current <= last; current++) {
248 has_new_target |= PredicateScavengeCompressedPointer(heap_base, current);
249 }
250 return has_new_target;
251 }
252
253 void VisitCompressedPointers(uword heap_base,
254 CompressedObjectPtr* first,
255 CompressedObjectPtr* last) override {
256 if (PredicateVisitCompressedPointers(heap_base, first, last)) {
257 // Update the store buffer as needed.
258 ObjectPtr visiting_object = visiting_old_object_;
259 if (visiting_object != nullptr &&
260 visiting_object->untag()->TryAcquireRememberedBit()) {
261 thread_->StoreBufferAddObjectGC(visiting_object);
262 }
263 }
264 }
265#endif
266
268 ASSERT((obj == nullptr) || obj->IsOldObject());
269 visiting_old_object_ = obj;
270 if (obj != nullptr) {
271 // Card update happens in Page::VisitRememberedCards.
272 ASSERT(!obj->untag()->IsCardRemembered());
273 }
274 }
275 DART_FORCE_INLINE intptr_t ProcessObject(ObjectPtr obj);
276
277 intptr_t bytes_promoted() const { return bytes_promoted_; }
278
280 thread_ = Thread::Current();
281 page_space_->AcquireLock(freelist_);
282
283 LongJumpScope jump(thread_);
284 if (setjmp(*jump.Set()) == 0) {
285 scavenger_->IterateRoots(this);
286 } else {
287 ASSERT(scavenger_->abort_);
288 }
289 }
290
292 LongJumpScope jump(thread_);
293 if (setjmp(*jump.Set()) == 0) {
294 // Iterate until all work has been drained.
295 do {
296 ProcessToSpace();
297 ProcessPromotedList();
298 } while (HasWork());
299 } else {
300 ASSERT(scavenger_->abort_);
301 }
302 }
303
304 void ProcessAll() {
305 TIMELINE_FUNCTION_GC_DURATION(thread_, "ProcessToSpace");
306 LongJumpScope jump(thread_);
307 if (setjmp(*jump.Set()) == 0) {
308 do {
309 do {
310 ProcessToSpace();
311 ProcessPromotedList();
312 } while (HasWork());
313 ProcessWeakPropertiesScoped();
314 } while (HasWork());
315 } else {
316 ASSERT(scavenger_->abort_);
317 }
318 }
319
321 LongJumpScope jump(thread_);
322 if (setjmp(*jump.Set()) == 0) {
323 ProcessWeakPropertiesScoped();
324 } else {
325 ASSERT(scavenger_->abort_);
326 }
327 }
328
329 bool HasWork() {
330 if (scavenger_->abort_) return false;
331 return (scan_ != tail_) || (scan_ != nullptr && !scan_->IsResolved()) ||
332 !promoted_list_.IsEmpty();
333 }
334
336 return promoted_list_.WaitForWork(num_busy, scavenger_->abort_);
337 }
338
339 void ProcessWeak() {
340 if (!scavenger_->abort_) {
341 ASSERT(!HasWork());
342
343 for (Page* page = head_; page != nullptr; page = page->next()) {
344 ASSERT(page->IsResolved());
345 page->RecordSurvivors();
346 }
347
348 MournWeakProperties();
349 MournWeakReferences();
350 MournWeakArrays();
351 MournFinalizerEntries();
352 scavenger_->IterateWeak();
353 }
354 page_space_->ReleaseLock(freelist_);
355 thread_ = nullptr;
356 }
357
358 void Finalize(StoreBuffer* store_buffer) {
359 if (!scavenger_->abort_) {
360 promoted_list_.Finalize();
361 weak_array_list_.Finalize();
362 weak_property_list_.Finalize();
363 weak_reference_list_.Finalize();
364 finalizer_entry_list_.Finalize();
365 ASSERT(pending_ == nullptr);
366 } else {
367 promoted_list_.AbandonWork();
368 weak_array_list_.AbandonWork();
369 weak_property_list_.AbandonWork();
370 weak_reference_list_.AbandonWork();
371 finalizer_entry_list_.AbandonWork();
372 if (pending_ != nullptr) {
373 pending_->Reset();
374 store_buffer->PushBlock(pending_, StoreBuffer::kIgnoreThreshold);
375 pending_ = nullptr;
376 }
377 }
378 }
379
380 Page* head() const { return head_; }
381 Page* tail() const { return tail_; }
382 void set_pending(StoreBufferBlock* pending) { pending_ = pending; }
383
384 static bool ForwardOrSetNullIfCollected(ObjectPtr parent,
385 CompressedObjectPtr* ptr_address);
386
387 private:
388 DART_FORCE_INLINE
389 bool PredicateScavengePointer(ObjectPtr* p) {
390 // ScavengePointer cannot be called recursively.
391 ObjectPtr obj = *p;
392
393 if (obj->IsImmediateObject()) {
394 return false;
395 }
396 if (obj->IsOldObject()) {
397 return obj->untag()->IsEvacuationCandidate();
398 }
399
400 ObjectPtr new_obj = ScavengeObject(obj);
401
402 // Update the reference.
403 *p = new_obj;
404 return new_obj->IsNewObject();
405 }
406
407 DART_FORCE_INLINE
408 void ScavengePointer(ObjectPtr* p) {
409 if (PredicateScavengePointer(p)) {
410 // Update the store buffer as needed.
411 ObjectPtr visiting_object = visiting_old_object_;
412 if (visiting_object != nullptr &&
413 visiting_object->untag()->TryAcquireRememberedBit()) {
414 thread_->StoreBufferAddObjectGC(visiting_object);
415 }
416 }
417 }
418
419 DART_FORCE_INLINE
420 bool PredicateScavengeCompressedPointer(uword heap_base,
422 // ScavengePointer cannot be called recursively.
423 ObjectPtr obj = p->Decompress(heap_base);
424
425 if (obj->IsImmediateObject()) {
426 return false;
427 }
428 if (obj->IsOldObject()) {
429 return obj->untag()->IsEvacuationCandidate();
430 }
431
432 ObjectPtr new_obj = ScavengeObject(obj);
433
434 // Update the reference.
435 *p = new_obj;
436 return new_obj->IsNewObject();
437 }
438
439 DART_FORCE_INLINE
440 void ScavengeCompressedPointer(uword heap_base, CompressedObjectPtr* p) {
441 if (PredicateScavengeCompressedPointer(heap_base, p)) {
442 // Update the store buffer as needed.
443 ObjectPtr visiting_object = visiting_old_object_;
444 if (visiting_object != nullptr &&
445 visiting_object->untag()->TryAcquireRememberedBit()) {
446 thread_->StoreBufferAddObjectGC(visiting_object);
447 }
448 }
449 }
450
451 DART_FORCE_INLINE
452 ObjectPtr ScavengeObject(ObjectPtr obj) {
453 // Fragmentation might cause the scavenge to fail. Ensure we always have
454 // somewhere to bail out to.
455 ASSERT(thread_->long_jump_base() != nullptr);
456
457 uword raw_addr = UntaggedObject::ToAddr(obj);
458 // The scavenger is only expects objects located in the from space.
459 ASSERT(from_->Contains(raw_addr));
460 // Read the header word of the object and determine if the object has
461 // already been copied.
463 ObjectPtr new_obj;
464 if (IsForwarding(header)) {
465 // Get the new location of the object.
466 new_obj = ForwardedObj(header);
467 } else {
468 intptr_t size = obj->untag()->HeapSize(header);
470 uword new_addr = 0;
471 // Check whether object should be promoted.
472 if (!Page::Of(obj)->IsSurvivor(raw_addr)) {
473 // Not a survivor of a previous scavenge. Just copy the object into the
474 // to space.
475 new_addr = TryAllocateCopy(size);
476 }
477 if (new_addr == 0) {
478 // This object is a survivor of a previous scavenge. Attempt to promote
479 // the object. (Or, unlikely, to-space was exhausted by fragmentation.)
480 new_addr = page_space_->TryAllocatePromoLocked(freelist_, size);
481 if (UNLIKELY(new_addr == 0)) {
482 // Promotion did not succeed. Copy into the to space instead.
483 scavenger_->failed_to_promote_ = true;
484 new_addr = TryAllocateCopy(size);
485 // To-space was exhausted by fragmentation and old-space could not
486 // grow.
487 if (UNLIKELY(new_addr == 0)) {
488 AbortScavenge();
489 }
490 }
491 }
492 ASSERT(new_addr != 0);
493 // Copy the object to the new location.
494 objcpy(reinterpret_cast<void*>(new_addr),
495 reinterpret_cast<void*>(raw_addr), size);
496
497 new_obj = UntaggedObject::FromAddr(new_addr);
498 if (new_obj->IsOldObject()) {
499 // Promoted: update age/barrier tags.
500 uword tags = static_cast<uword>(header);
503 new_obj->untag()->tags_.store(tags, std::memory_order_relaxed);
504 }
505
507 if (IsTypedDataClassId(cid)) {
508 static_cast<TypedDataPtr>(new_obj)->untag()->RecomputeDataField();
509 }
510
511 // Try to install forwarding address.
512 uword forwarding_header = ForwardingHeader(new_obj);
513 if (InstallForwardingPointer(raw_addr, &header, forwarding_header)) {
514 if (new_obj->IsOldObject()) {
515 // If promotion succeeded then we need to remember it so that it can
516 // be traversed later.
517 promoted_list_.Push(new_obj);
518 bytes_promoted_ += size;
519 }
520 } else {
522 if (new_obj->IsOldObject()) {
523 // Abandon as a free list element.
525 Page::Of(new_addr)->sub_live_bytes(size);
526 bytes_promoted_ -= size;
527 } else {
528 // Undo to-space allocation.
529 tail_->Unallocate(new_addr, size);
530 }
531 // Use the winner's forwarding target.
532 new_obj = ForwardedObj(header);
533 }
534 }
535
536 return new_obj;
537 }
538
539 DART_FORCE_INLINE
540 bool InstallForwardingPointer(uword addr,
541 uword* old_header,
542 uword new_header) {
543 if (parallel) {
544 return reinterpret_cast<std::atomic<uword>*>(addr)
545 ->compare_exchange_strong(*old_header, new_header,
546 std::memory_order_relaxed);
547 } else {
548 *reinterpret_cast<uword*>(addr) = new_header;
549 return true;
550 }
551 }
552
553 DART_FORCE_INLINE
554 uword TryAllocateCopy(intptr_t size) {
556 // TODO(rmacnak): Allocate one to start?
557 if (tail_ != nullptr) {
558 uword result = tail_->top_;
560 uword new_top = result + size;
561 if (LIKELY(new_top <= tail_->end_)) {
562 tail_->top_ = new_top;
563 return result;
564 }
565 }
566 return TryAllocateCopySlow(size);
567 }
568
569 DART_NOINLINE uword TryAllocateCopySlow(intptr_t size);
570
571 DART_NOINLINE DART_NORETURN void AbortScavenge() {
572 if (FLAG_verbose_gc) {
573 OS::PrintErr("Aborting scavenge\n");
574 }
575 scavenger_->abort_ = true;
576 // N.B. We must not set the sticky error, which may be a data race if
577 // that root slot was processed by a different worker.
578 thread_->long_jump_base()->Jump(1);
579 }
580
581 void ProcessToSpace();
582 void ProcessPromotedList();
583 void ProcessWeakPropertiesScoped();
584
585 void MournWeakProperties() {
586 weak_property_list_.Process([](WeakPropertyPtr weak_property) {
587 weak_property->untag()->key_ = Object::null();
588 weak_property->untag()->value_ = Object::null();
589 });
590 }
591
592 void MournWeakReferences() {
593 weak_reference_list_.Process([](WeakReferencePtr weak_reference) {
594 ForwardOrSetNullIfCollected(weak_reference,
595 &weak_reference->untag()->target_);
596 });
597 }
598
599 void MournWeakArrays() {
600 weak_array_list_.Process([](WeakArrayPtr weak_array) {
601 intptr_t length = Smi::Value(weak_array->untag()->length());
602 for (intptr_t i = 0; i < length; i++) {
604 &(weak_array->untag()->data()[i]));
605 }
606 });
607 }
608
609 void MournFinalizerEntries() {
610 finalizer_entry_list_.Process([&](FinalizerEntryPtr finalizer_entry) {
611 MournFinalizerEntry(this, finalizer_entry);
612 });
613 }
614
615 Thread* thread_;
616 Scavenger* scavenger_;
617 SemiSpace* from_;
618 PageSpace* page_space_;
619 FreeList* freelist_;
620 intptr_t bytes_promoted_;
621 ObjectPtr visiting_old_object_;
622 StoreBufferBlock* pending_;
623 PromotionWorkList promoted_list_;
624 LocalBlockWorkList<64, WeakArrayPtr> weak_array_list_;
625 LocalBlockWorkList<64, WeakPropertyPtr> weak_property_list_;
626 LocalBlockWorkList<64, WeakReferencePtr> weak_reference_list_;
627 LocalBlockWorkList<64, FinalizerEntryPtr> finalizer_entry_list_;
628
629 Page* head_ = nullptr;
630 Page* tail_ = nullptr; // Allocating from here.
631 Page* scan_ = nullptr; // Resolving from here.
632
633 DISALLOW_COPY_AND_ASSIGN(ScavengerVisitorBase);
634};
635
638
639static bool IsUnreachable(ObjectPtr* ptr) {
640 ObjectPtr obj = *ptr;
641 if (obj->IsImmediateOrOldObject()) {
642 return false;
643 }
644 uword raw_addr = UntaggedObject::ToAddr(obj);
645 uword header = *reinterpret_cast<uword*>(raw_addr);
646 if (IsForwarding(header)) {
647 *ptr = ForwardedObj(header);
648 return false;
649 }
650 return true;
651}
652
654 public:
656
657 void VisitHandle(uword addr) override {
659 reinterpret_cast<FinalizablePersistentHandle*>(addr);
660 ObjectPtr* p = handle->ptr_addr();
661 if (IsUnreachable(p)) {
663 } else {
665 }
666 }
667
668 private:
669 DISALLOW_COPY_AND_ASSIGN(ScavengerWeakVisitor);
670};
671
673 public:
675 ThreadBarrier* barrier,
677 RelaxedAtomic<uintptr_t>* num_busy)
678 : isolate_group_(isolate_group),
679 barrier_(barrier),
680 visitor_(visitor),
681 num_busy_(num_busy) {}
682
683 virtual void Run() {
684 if (!barrier_->TryEnter()) {
685 barrier_->Release();
686 return;
687 }
688
690 isolate_group_, Thread::kScavengerTask, /*bypass_safepoint=*/true);
691 ASSERT(result);
692
694
695 Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true);
696
697 barrier_->Sync();
698 barrier_->Release();
699 }
700
702 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ParallelScavenge");
703
704 num_busy_->fetch_add(1u);
705 visitor_->ProcessRoots();
706
707 // Phase 1: Copying.
708 bool more_to_scavenge = false;
709 do {
710 do {
711 visitor_->ProcessSurvivors();
712 } while (visitor_->WaitForWork(num_busy_));
713 // Wait for all scavengers to stop.
714 barrier_->Sync();
715#if defined(DEBUG)
716 ASSERT(num_busy_->load() == 0);
717 // Caveat: must not allow any marker to continue past the barrier
718 // before we checked num_busy, otherwise one of them might rush
719 // ahead and increment it.
720 barrier_->Sync();
721#endif
722 // Check if we have any pending properties with marked keys.
723 // Those might have been marked by another marker.
724 visitor_->ProcessWeakProperties();
725 more_to_scavenge = visitor_->HasWork();
726 if (more_to_scavenge) {
727 // We have more work to do. Notify others.
728 num_busy_->fetch_add(1u);
729 }
730
731 // Wait for all other scavengers to finish processing their pending
732 // weak properties and decide if they need to continue marking.
733 // Caveat: we need two barriers here to make this decision in lock step
734 // between all scavengers and the main thread.
735 barrier_->Sync();
736 if (!more_to_scavenge && (num_busy_->load() > 0)) {
737 // All scavengers continue to mark as long as any single marker has
738 // some work to do.
739 num_busy_->fetch_add(1u);
740 more_to_scavenge = true;
741 }
742 barrier_->Sync();
743 } while (more_to_scavenge);
744
745 ASSERT(!visitor_->HasWork());
746
747 // Phase 2: Weak processing, statistics.
748 visitor_->ProcessWeak();
749 }
750
751 private:
752 IsolateGroup* isolate_group_;
753 ThreadBarrier* barrier_;
754 ParallelScavengerVisitor* visitor_;
755 RelaxedAtomic<uintptr_t>* num_busy_;
756
757 DISALLOW_COPY_AND_ASSIGN(ParallelScavengerTask);
758};
759
760SemiSpace::SemiSpace(intptr_t gc_threshold_in_words)
761 : gc_threshold_in_words_(gc_threshold_in_words) {}
762
764 Page* page = head_;
765 while (page != nullptr) {
766 Page* next = page->next();
767 page->Deallocate();
768 page = next;
769 }
770}
771
773 if (capacity_in_words_ >= gc_threshold_in_words_) {
774 return nullptr; // Full.
775 }
776 Page* page = Page::Allocate(kPageSize, Page::kNew);
777 if (page == nullptr) {
778 return nullptr; // Out of memory;
779 }
780 capacity_in_words_ += kPageSizeInWords;
781 if (link) {
782 if (head_ == nullptr) {
783 head_ = tail_ = page;
784 } else {
785 tail_->set_next(page);
786 tail_ = page;
787 }
788 }
789 return page;
790}
791
793 for (Page* page = head_; page != nullptr; page = page->next()) {
794 if (page->Contains(addr)) return true;
795 }
796 return false;
797}
798
799void SemiSpace::WriteProtect(bool read_only) {
800 for (Page* page = head_; page != nullptr; page = page->next()) {
801 page->WriteProtect(read_only);
802 }
803}
804
806 if (head == nullptr) {
807 return;
808 }
809 if (head_ == nullptr) {
810 head_ = head;
811 tail_ = tail;
812 return;
813 }
814 tail_->set_next(head);
815 tail_ = tail;
816}
817
818// The initial estimate of how many words we can scavenge per microsecond (usage
819// before / scavenge time). This is a conservative value observed running
820// Flutter on a Nexus 4. After the first scavenge, we instead use a value based
821// on the device's actual speed.
822static constexpr intptr_t kConservativeInitialScavengeSpeed = 40;
823
824Scavenger::Scavenger(Heap* heap, intptr_t max_semi_capacity_in_words)
825 : heap_(heap),
826 max_semi_capacity_in_words_(max_semi_capacity_in_words),
827 scavenge_words_per_micro_(kConservativeInitialScavengeSpeed) {
828 ASSERT(heap != nullptr);
829
830 // Verify assumptions about the first word in objects which the scavenger is
831 // going to use for forwarding pointers.
833
834 // Set initial semi space size in words.
835 const intptr_t initial_semi_capacity_in_words = Utils::Minimum(
836 max_semi_capacity_in_words, FLAG_new_gen_semi_initial_size * MBInWords);
837
838 to_ = new SemiSpace(initial_semi_capacity_in_words);
839 idle_scavenge_threshold_in_words_ = initial_semi_capacity_in_words;
840
841 UpdateMaxHeapCapacity();
842 UpdateMaxHeapUsage();
843}
844
846 ASSERT(!scavenging_);
847 delete to_;
848 ASSERT(blocks_ == nullptr);
849}
850
851intptr_t Scavenger::NewSizeInWords(intptr_t old_size_in_words,
852 GCReason reason) const {
853 bool grow = false;
854 if (2 * heap_->isolate_group()->MutatorCount() >
855 (old_size_in_words / kPageSizeInWords)) {
856 // Not enough TLABs to give two to each mutator.
857 grow = true;
858 }
859
860 if (reason == GCReason::kNewSpace) {
861 // If we GC for a reason other than new-space being full (i.e., full
862 // collection for old-space or store-buffer overflow), that's not an
863 // indication that new-space is too small.
864 if (stats_history_.Size() != 0) {
865 double garbage =
866 stats_history_.Get(0).ExpectedGarbageFraction(old_size_in_words);
867 if (garbage < (FLAG_new_gen_garbage_threshold / 100.0)) {
868 // Too much survived last time; grow new-space in the hope that a
869 // greater fraction of objects will become unreachable before new-space
870 // becomes full.
871 grow = true;
872 }
873 }
874 }
875
876 if (grow) {
877 return Utils::Minimum(max_semi_capacity_in_words_,
878 old_size_in_words * FLAG_new_gen_growth_factor);
879 }
880 return old_size_in_words;
881}
882
884 public:
885 CollectStoreBufferScavengeVisitor(ObjectSet* in_store_buffer, const char* msg)
887 in_store_buffer_(in_store_buffer),
888 msg_(msg) {}
889
890 void VisitPointers(ObjectPtr* from, ObjectPtr* to) override {
891 for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
892 ObjectPtr obj = *ptr;
894 RELEASE_ASSERT_WITH_MSG(obj->IsOldObject(), msg_);
895
897 if (obj.GetClassId() == kArrayCid) {
898 const uword length =
899 Smi::Value(static_cast<UntaggedArray*>(obj.untag())->length());
901 msg_);
902 }
903 in_store_buffer_->Add(obj);
904 }
905 }
906
907#if defined(DART_COMPRESSED_POINTERS)
908 void VisitCompressedPointers(uword heap_base,
910 CompressedObjectPtr* to) override {
911 UNREACHABLE(); // Store buffer blocks are not compressed.
912 }
913#endif
914
915 private:
916 ObjectSet* const in_store_buffer_;
917 const char* msg_;
918
919 DISALLOW_COPY_AND_ASSIGN(CollectStoreBufferScavengeVisitor);
920};
921
923 public ObjectPointerVisitor {
924 public:
926 const SemiSpace* to,
927 const char* msg)
928 : ObjectVisitor(),
930 in_store_buffer_(in_store_buffer),
931 to_(to),
932 msg_(msg) {}
933
934 void VisitObject(ObjectPtr obj) override {
935 if (obj->IsPseudoObject()) return;
936 RELEASE_ASSERT_WITH_MSG(obj->IsOldObject(), msg_);
937
938 if (obj->untag()->IsRemembered()) {
939 RELEASE_ASSERT_WITH_MSG(in_store_buffer_->Contains(obj), msg_);
940 } else {
941 RELEASE_ASSERT_WITH_MSG(!in_store_buffer_->Contains(obj), msg_);
942 }
943
944 visiting_ = obj;
945 is_remembered_ = obj->untag()->IsRemembered();
946 is_card_remembered_ = obj->untag()->IsCardRemembered();
947 if (is_card_remembered_) {
948 RELEASE_ASSERT_WITH_MSG(!is_remembered_, msg_);
949 RELEASE_ASSERT_WITH_MSG(Page::Of(obj)->progress_bar_ == 0, msg_);
950 }
951 obj->untag()->VisitPointers(this);
952 }
953
954 void VisitPointers(ObjectPtr* from, ObjectPtr* to) override {
955 for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
956 ObjectPtr obj = *ptr;
957 if (obj->IsHeapObject() && obj->IsNewObject()) {
958 if (is_card_remembered_) {
959 if (!Page::Of(visiting_)->IsCardRemembered(ptr)) {
960 FATAL(
961 "%s: Old object %#" Px " references new object %#" Px
962 ", but the "
963 "slot's card is not remembered. Consider using rr to watch the "
964 "slot %p and reverse-continue to find the store with a missing "
965 "barrier.\n",
966 msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
967 ptr);
968 }
969 } else if (!is_remembered_) {
970 FATAL("%s: Old object %#" Px " references new object %#" Px
971 ", but it is "
972 "not in any store buffer. Consider using rr to watch the "
973 "slot %p and reverse-continue to find the store with a missing "
974 "barrier.\n",
975 msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
976 ptr);
977 }
979 msg_);
980 }
981 }
982 }
983
984#if defined(DART_COMPRESSED_POINTERS)
985 void VisitCompressedPointers(uword heap_base,
987 CompressedObjectPtr* to) override {
988 for (CompressedObjectPtr* ptr = from; ptr <= to; ptr++) {
989 ObjectPtr obj = ptr->Decompress(heap_base);
990 if (obj->IsHeapObject() && obj->IsNewObject()) {
991 if (is_card_remembered_) {
992 if (!Page::Of(visiting_)->IsCardRemembered(ptr)) {
993 FATAL(
994 "%s: Old object %#" Px " references new object %#" Px
995 ", but the "
996 "slot's card is not remembered. Consider using rr to watch the "
997 "slot %p and reverse-continue to find the store with a missing "
998 "barrier.\n",
999 msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
1000 ptr);
1001 }
1002 } else if (!is_remembered_) {
1003 FATAL("%s: Old object %#" Px " references new object %#" Px
1004 ", but it is "
1005 "not in any store buffer. Consider using rr to watch the "
1006 "slot %p and reverse-continue to find the store with a missing "
1007 "barrier.\n",
1008 msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
1009 ptr);
1010 }
1012 msg_);
1013 }
1014 }
1015 }
1016#endif
1017
1018 private:
1019 const ObjectSet* const in_store_buffer_;
1020 const SemiSpace* const to_;
1021 ObjectPtr visiting_;
1022 bool is_remembered_;
1023 bool is_card_remembered_;
1024 const char* msg_;
1025
1026 DISALLOW_COPY_AND_ASSIGN(CheckStoreBufferScavengeVisitor);
1027};
1028
1029void Scavenger::VerifyStoreBuffers(const char* msg) {
1030 ASSERT(msg != nullptr);
1031 Thread* thread = Thread::Current();
1032 StackZone stack_zone(thread);
1033 Zone* zone = stack_zone.GetZone();
1034
1035 ObjectSet* in_store_buffer = new (zone) ObjectSet(zone);
1036 heap_->AddRegionsToObjectSet(in_store_buffer);
1037
1038 {
1039 CollectStoreBufferScavengeVisitor visitor(in_store_buffer, msg);
1040 heap_->isolate_group()->store_buffer()->VisitObjectPointers(&visitor);
1041 }
1042
1043 {
1044 CheckStoreBufferScavengeVisitor visitor(in_store_buffer, to_, msg);
1045 heap_->old_space()->VisitObjects(&visitor);
1046 }
1047}
1048
1049SemiSpace* Scavenger::Prologue(GCReason reason) {
1051
1054
1055 if (FLAG_verify_store_buffer) {
1057 VerifyStoreBuffers("Verifying remembered set before Scavenge");
1058 }
1059
1060 // Need to stash the old remembered set before any worker begins adding to the
1061 // new remembered set.
1062 blocks_ = heap_->isolate_group()->store_buffer()->PopAll();
1063 GCMarker* marker = heap_->old_space()->marker();
1064 if (marker != nullptr) {
1065 marker->new_marking_stack_.PushAll(
1066 marker->tlab_deferred_marking_stack_.PopAll());
1067 new_blocks_ = marker->new_marking_stack_.PopAll();
1068 deferred_blocks_ = marker->deferred_marking_stack_.PopAll();
1069 }
1070
1071 UpdateMaxHeapCapacity();
1072
1073 // Flip the two semi-spaces so that to_ is always the space for allocating
1074 // objects.
1075 SemiSpace* from;
1076 {
1077 MutexLocker ml(&space_lock_);
1078 from = to_;
1079 to_ = new SemiSpace(NewSizeInWords(from->gc_threshold_in_words(), reason));
1080 }
1081
1082 return from;
1083}
1084
1085void Scavenger::Epilogue(SemiSpace* from) {
1087
1088 // All objects in the to space have been copied from the from space at this
1089 // moment.
1090
1091 // Ensure the mutator thread will fail the next allocation. This will force
1092 // mutator to allocate a new TLAB
1093#if defined(DEBUG)
1094 heap_->isolate_group()->ForEachIsolate(
1095 [&](Isolate* isolate) {
1096 Thread* mutator_thread = isolate->mutator_thread();
1097 ASSERT(mutator_thread == nullptr || mutator_thread->top() == 0);
1098 },
1099 /*at_safepoint=*/true);
1100#endif // DEBUG
1101
1102 double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction();
1103 if (stats_history_.Size() >= 2) {
1104 // Previous scavenge is only given half as much weight.
1105 avg_frac += 0.5 * stats_history_.Get(1).PromoCandidatesSuccessFraction();
1106 avg_frac /= 1.0 + 0.5; // Normalize.
1107 }
1108
1109 early_tenure_ = avg_frac >= (FLAG_early_tenuring_threshold / 100.0);
1110
1111 // Update estimate of scavenger speed. This statistic assumes survivorship
1112 // rates don't change much.
1113 intptr_t history_used = 0;
1114 intptr_t history_micros = 0;
1115 ASSERT(stats_history_.Size() > 0);
1116 for (intptr_t i = 0; i < stats_history_.Size(); i++) {
1117 history_used += stats_history_.Get(i).UsedBeforeInWords();
1118 history_micros += stats_history_.Get(i).DurationMicros();
1119 }
1120 if (history_micros == 0) {
1121 history_micros = 1;
1122 }
1123 scavenge_words_per_micro_ = history_used / history_micros;
1124 if (scavenge_words_per_micro_ == 0) {
1125 scavenge_words_per_micro_ = 1;
1126 }
1127
1128 // Update amount of new-space we must allocate before performing an idle
1129 // scavenge. This is based on the amount of work we expect to be able to
1130 // complete in a typical idle period.
1131 intptr_t average_idle_task_micros = 6000;
1132 idle_scavenge_threshold_in_words_ =
1133 scavenge_words_per_micro_ * average_idle_task_micros;
1134 // Even if the scavenge speed is slow, make sure we don't scavenge too
1135 // frequently, which just wastes power and falsely increases the promotion
1136 // rate.
1137 intptr_t lower_bound = 512 * KBInWords;
1138 if (idle_scavenge_threshold_in_words_ < lower_bound) {
1139 idle_scavenge_threshold_in_words_ = lower_bound;
1140 }
1141 // Even if the scavenge speed is very high, make sure we start considering
1142 // idle scavenges before new space is full to avoid requiring a scavenge in
1143 // the middle of a frame.
1144 intptr_t upper_bound = 8 * ThresholdInWords() / 10;
1145 if (idle_scavenge_threshold_in_words_ > upper_bound) {
1146 idle_scavenge_threshold_in_words_ = upper_bound;
1147 }
1148
1149 if (FLAG_verify_store_buffer) {
1150 // Scavenging will insert into the store buffer block on the current
1151 // thread (later will parallel scavenge, the worker's threads). We need to
1152 // flush this thread-local block to the isolate group or we will incorrectly
1153 // report some objects as absent from the store buffer. This might cause
1154 // a program to hit a store buffer overflow a bit sooner than it might
1155 // otherwise, since overflow is measured in blocks. Store buffer overflows
1156 // are very rare.
1158
1160 VerifyStoreBuffers("Verifying remembered set after Scavenge");
1161 }
1162
1163 delete from;
1164 UpdateMaxHeapUsage();
1165 if (heap_ != nullptr) {
1166 heap_->UpdateGlobalMaxUsed();
1167 }
1168}
1169
1171 // To make a consistent decision, we should not yield for a safepoint in the
1172 // middle of deciding whether to perform an idle GC.
1173 NoSafepointScope no_safepoint;
1174
1175 // TODO(rmacnak): Investigate collecting a history of idle period durations.
1176 intptr_t used_in_words = UsedInWords() + freed_in_words_;
1177 intptr_t external_in_words = ExternalInWords();
1178 // Normal reason: new space is getting full.
1179 bool for_new_space = (used_in_words >= idle_scavenge_threshold_in_words_) ||
1180 (external_in_words >= idle_scavenge_threshold_in_words_);
1181 if (!for_new_space) {
1182 return false;
1183 }
1184
1185 int64_t estimated_scavenge_completion =
1187 used_in_words / scavenge_words_per_micro_;
1188 return estimated_scavenge_completion <= deadline;
1189}
1190
1191void Scavenger::IterateIsolateRoots(ObjectPointerVisitor* visitor) {
1192 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "IterateIsolateRoots");
1195}
1196
1197template <bool parallel>
1198void Scavenger::IterateStoreBuffers(ScavengerVisitorBase<parallel>* visitor) {
1199 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "IterateStoreBuffers");
1200
1201 StoreBuffer* store_buffer = heap_->isolate_group()->store_buffer();
1202 StoreBufferBlock* pending;
1203 for (;;) {
1204 {
1205 MutexLocker ml(&space_lock_);
1206 pending = blocks_;
1207 if (pending == nullptr) break;
1208 blocks_ = pending->next();
1209 }
1210 // Ensure the block is freed in case of scavenger abort.
1211 visitor->set_pending(pending);
1212 // Generated code appends to store buffers; tell MemorySanitizer.
1213 MSAN_UNPOISON(pending, sizeof(*pending));
1214 while (!pending->IsEmpty()) {
1215 ObjectPtr obj = pending->Pop();
1216 ASSERT(!obj->IsForwardingCorpse());
1217 ASSERT(obj->untag()->IsRemembered());
1218 obj->untag()->ClearRememberedBit();
1219 visitor->VisitingOldObject(obj);
1220 visitor->ProcessObject(obj);
1221 }
1222 pending->Reset();
1223 // Return the emptied block for recycling (no need to check threshold).
1224 store_buffer->PushBlock(pending, StoreBuffer::kIgnoreThreshold);
1225 visitor->set_pending(nullptr);
1226 }
1227}
1228
1229template <bool parallel>
1230void Scavenger::IterateRememberedCards(
1231 ScavengerVisitorBase<parallel>* visitor) {
1232 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "IterateRememberedCards");
1233 heap_->old_space()->VisitRememberedCards(visitor);
1234}
1235
1236void Scavenger::IterateObjectIdTable(ObjectPointerVisitor* visitor) {
1237#ifndef PRODUCT
1238 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "IterateObjectIdTable");
1239 heap_->isolate_group()->VisitObjectIdRingPointers(visitor);
1240#endif // !PRODUCT
1241}
1242
1244 kIsolate = 0,
1247};
1248
1249template <bool parallel>
1250void Scavenger::IterateRoots(ScavengerVisitorBase<parallel>* visitor) {
1251 for (;;) {
1252 intptr_t slice = root_slices_started_.fetch_add(1);
1253 if (slice >= kNumRootSlices) {
1254 break; // No more slices.
1255 }
1256
1257 switch (slice) {
1258 case kIsolate:
1259 IterateIsolateRoots(visitor);
1260 break;
1261 case kObjectIdRing:
1262 IterateObjectIdTable(visitor);
1263 break;
1264 default:
1265 UNREACHABLE();
1266 }
1267 }
1268
1269 IterateStoreBuffers(visitor);
1270 IterateRememberedCards(visitor);
1271}
1272
1274 kWeakHandles = 0,
1280};
1281
1282void Scavenger::IterateWeak() {
1283 for (;;) {
1284 intptr_t slice = weak_slices_started_.fetch_add(1);
1285 if (slice >= kNumWeakSlices) {
1286 break; // No more slices.
1287 }
1288
1289 switch (slice) {
1290 case kWeakHandles:
1291 MournWeakHandles();
1292 break;
1293 case kWeakTables:
1294 MournWeakTables();
1295 break;
1296 case kProgressBars:
1297 heap_->old_space()->ResetProgressBars();
1298 break;
1300 // Restore write-barrier assumptions.
1302 break;
1303 case kPruneWeak: {
1304 GCMarker* marker = heap_->old_space()->marker();
1305 if (marker != nullptr) {
1306 marker->PruneWeak(this);
1307 }
1308 } break;
1309 default:
1310 UNREACHABLE();
1311 }
1312 }
1313
1314 GCMarker* marker = heap_->old_space()->marker();
1315 if (marker != nullptr) {
1316 PruneNew();
1317 PruneDeferred();
1318 }
1319}
1320
1321void Scavenger::MournWeakHandles() {
1322 Thread* thread = Thread::Current();
1323 TIMELINE_FUNCTION_GC_DURATION(thread, "MournWeakHandles");
1324 ScavengerWeakVisitor weak_visitor(thread);
1325 heap_->isolate_group()->VisitWeakPersistentHandles(&weak_visitor);
1326}
1327
1328template <bool parallel>
1329void ScavengerVisitorBase<parallel>::ProcessToSpace() {
1330 VisitingOldObject(nullptr);
1331 while (scan_ != nullptr) {
1332 uword resolved_top = scan_->resolved_top_;
1333 while (resolved_top < scan_->top_) {
1334 ObjectPtr obj = UntaggedObject::FromAddr(resolved_top);
1335 resolved_top += ProcessObject(obj);
1336 }
1337 scan_->resolved_top_ = resolved_top;
1338
1339 Page* next = scan_->next();
1340 if (next == nullptr) {
1341 // Don't update scan_. More objects may yet be copied to this TLAB.
1342 return;
1343 }
1344 scan_ = next;
1345 }
1346}
1347
1348template <bool parallel>
1349void ScavengerVisitorBase<parallel>::ProcessPromotedList() {
1350 ObjectPtr obj;
1351 while (promoted_list_.Pop(&obj)) {
1352 VisitingOldObject(obj);
1353 ProcessObject(obj);
1354 // Black allocation.
1355 if (thread_->is_marking() && obj->untag()->TryAcquireMarkBit()) {
1356 thread_->MarkingStackAddObject(obj);
1357 }
1358 }
1359}
1360
1361template <bool parallel>
1362void ScavengerVisitorBase<parallel>::ProcessWeakPropertiesScoped() {
1363 if (scavenger_->abort_) return;
1364
1365 // Finished this round of scavenging. Process the pending weak properties
1366 // for which the keys have become reachable. Potentially this adds more
1367 // objects to the to space.
1368 weak_property_list_.Process([&](WeakPropertyPtr weak_property) {
1369 ObjectPtr key = weak_property->untag()->key();
1370 ASSERT(key->IsHeapObject());
1371 ASSERT(key->IsNewObject());
1372 ASSERT(from_->Contains(UntaggedObject::ToAddr(key)));
1373
1375 if (IsForwarding(header)) {
1376 VisitingOldObject(weak_property->IsOldObject() ? weak_property : nullptr);
1377 weak_property->untag()->VisitPointersNonvirtual(this);
1378 } else {
1379 weak_property_list_.Push(weak_property);
1380 }
1381 });
1382}
1383
1384void Scavenger::UpdateMaxHeapCapacity() {
1385 ASSERT(to_ != nullptr);
1386 ASSERT(heap_ != nullptr);
1387 auto isolate_group = heap_->isolate_group();
1388 ASSERT(isolate_group != nullptr);
1389 isolate_group->GetHeapNewCapacityMaxMetric()->SetValue(
1390 to_->capacity_in_words() * kWordSize);
1391}
1392
1393void Scavenger::UpdateMaxHeapUsage() {
1394 ASSERT(to_ != nullptr);
1395 ASSERT(heap_ != nullptr);
1396 auto isolate_group = heap_->isolate_group();
1397 ASSERT(isolate_group != nullptr);
1398 isolate_group->GetHeapNewUsedMaxMetric()->SetValue(UsedInWords() * kWordSize);
1399}
1400
1402 if (obj->IsImmediateOrOldObject()) return true;
1403 return IsForwarding(ReadHeaderRelaxed(obj));
1404}
1405
1406template <bool parallel>
1408#if defined(DEBUG)
1409 if (obj->IsNewObject()) {
1410 ASSERT(visiting_old_object_ == nullptr);
1411 } else {
1412 ASSERT(visiting_old_object_ == obj);
1413 ASSERT(!obj->untag()->IsRemembered());
1414 }
1415#endif
1416
1417 intptr_t cid = obj->GetClassId();
1418 if (UNLIKELY(cid == kWeakPropertyCid)) {
1419 WeakPropertyPtr weak_property = static_cast<WeakPropertyPtr>(obj);
1420 if (!IsScavengeSurvivor(weak_property->untag()->key())) {
1421 weak_property_list_.Push(weak_property);
1423 }
1424 } else if (UNLIKELY(cid == kWeakReferenceCid)) {
1425 WeakReferencePtr weak_reference = static_cast<WeakReferencePtr>(obj);
1426 if (!IsScavengeSurvivor(weak_reference->untag()->target())) {
1427#if !defined(DART_COMPRESSED_POINTERS)
1428 ScavengePointer(&weak_reference->untag()->type_arguments_);
1429#else
1430 ScavengeCompressedPointer(weak_reference->heap_base(),
1431 &weak_reference->untag()->type_arguments_);
1432#endif
1433 weak_reference_list_.Push(weak_reference);
1435 }
1436 } else if (UNLIKELY(cid == kWeakArrayCid)) {
1437 WeakArrayPtr weak_array = static_cast<WeakArrayPtr>(obj);
1438 weak_array_list_.Push(weak_array);
1439 return WeakArray::InstanceSize(Smi::Value(weak_array->untag()->length()));
1440 } else if (UNLIKELY(cid == kFinalizerEntryCid)) {
1441 FinalizerEntryPtr finalizer_entry = static_cast<FinalizerEntryPtr>(obj);
1442#if !defined(DART_COMPRESSED_POINTERS)
1443 ScavengePointer(&finalizer_entry->untag()->token_);
1444 ScavengePointer(&finalizer_entry->untag()->next_);
1445#else
1446 ScavengeCompressedPointer(finalizer_entry->heap_base(),
1447 &finalizer_entry->untag()->token_);
1448 ScavengeCompressedPointer(finalizer_entry->heap_base(),
1449 &finalizer_entry->untag()->next_);
1450#endif
1451 finalizer_entry_list_.Push(finalizer_entry);
1453 }
1454 return obj->untag()->VisitPointersNonvirtual(this);
1455}
1456
1457void Scavenger::MournWeakTables() {
1458 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "MournWeakTables");
1459
1460 auto rehash_weak_table = [](WeakTable* table, WeakTable* replacement_new,
1461 WeakTable* replacement_old,
1463 intptr_t size = table->size();
1464 for (intptr_t i = 0; i < size; i++) {
1465 if (table->IsValidEntryAtExclusive(i)) {
1466 ObjectPtr obj = table->ObjectAtExclusive(i);
1467 ASSERT(obj->IsHeapObject());
1468 uword raw_addr = UntaggedObject::ToAddr(obj);
1469 uword header = *reinterpret_cast<uword*>(raw_addr);
1470 if (IsForwarding(header)) {
1471 // The object has survived. Preserve its record.
1472 obj = ForwardedObj(header);
1473 auto replacement =
1474 obj->IsNewObject() ? replacement_new : replacement_old;
1475 replacement->SetValueExclusive(obj, table->ValueAtExclusive(i));
1476 } else {
1477 // The object has been collected.
1478 if (cleanup != nullptr) {
1479 cleanup(reinterpret_cast<void*>(table->ValueAtExclusive(i)));
1480 }
1481 }
1482 }
1483 }
1484 };
1485
1486 // Rehash the weak tables now that we know which objects survive this cycle.
1487 for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) {
1488 const auto selector = static_cast<Heap::WeakSelector>(sel);
1489 auto table = heap_->GetWeakTable(Heap::kNew, selector);
1490 auto table_old = heap_->GetWeakTable(Heap::kOld, selector);
1491
1492 // Create a new weak table for the new-space.
1493 auto table_new = WeakTable::NewFrom(table);
1494
1495 Dart_HeapSamplingDeleteCallback cleanup = nullptr;
1496#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
1497 if (sel == Heap::kHeapSamplingData) {
1499 }
1500#endif
1501 rehash_weak_table(table, table_new, table_old, cleanup);
1502 heap_->SetWeakTable(Heap::kNew, selector, table_new);
1503
1504 // Remove the old table as it has been replaced with the newly allocated
1505 // table above.
1506 delete table;
1507 }
1508
1509 // Each isolate might have a weak table used for fast snapshot writing (i.e.
1510 // isolate communication). Rehash those tables if need be.
1511 heap_->isolate_group()->ForEachIsolate(
1512 [&](Isolate* isolate) {
1513 auto table = isolate->forward_table_new();
1514 if (table != nullptr) {
1515 auto replacement = WeakTable::NewFrom(table);
1516 rehash_weak_table(table, replacement, isolate->forward_table_old(),
1517 nullptr);
1518 isolate->set_forward_table_new(replacement);
1519 }
1520 },
1521 /*at_safepoint=*/true);
1522}
1523
1525 ASSERT(abort_);
1526
1527 GCMarker* marker = heap_->old_space()->marker();
1528 MarkingStack* old_marking_stack = &marker->old_marking_stack_;
1529 MarkingStack* new_marking_stack = &marker->new_marking_stack_;
1530 MarkingStackBlock* old_writing = old_marking_stack->PopNonFullBlock();
1531 MarkingStackBlock* new_writing = new_marking_stack->PopNonFullBlock();
1532 while (reading != nullptr) {
1533 // Generated code appends to marking stacks; tell MemorySanitizer.
1534 MSAN_UNPOISON(reading, sizeof(*reading));
1535 while (!reading->IsEmpty()) {
1536 ObjectPtr obj = reading->Pop();
1537 ASSERT(obj->IsHeapObject());
1538#if defined(DEBUG)
1539 if (obj->IsNewObject()) {
1542 }
1543#endif
1544 if (obj->IsForwardingCorpse()) {
1545 // Promoted object was pushed to mark list but reversed.
1546 obj = reinterpret_cast<ForwardingCorpse*>(UntaggedObject::ToAddr(obj))
1547 ->target();
1548 }
1549 ASSERT(!obj->IsForwardingCorpse());
1550 ASSERT(!obj->IsFreeListElement());
1551 if (obj->IsNewObject()) {
1552 new_writing->Push(obj);
1553 if (new_writing->IsFull()) {
1554 new_marking_stack->PushBlock(new_writing);
1555 new_writing = new_marking_stack->PopNonFullBlock();
1556 }
1557 } else {
1558 old_writing->Push(obj);
1559 if (old_writing->IsFull()) {
1560 old_marking_stack->PushBlock(old_writing);
1561 old_writing = old_marking_stack->PopNonFullBlock();
1562 }
1563 }
1564 }
1565
1566 MarkingStackBlock* next = reading->next();
1567 reading->Reset();
1568 old_marking_stack->PushBlock(reading);
1569 reading = next;
1570 }
1571 old_marking_stack->PushBlock(old_writing);
1572 new_marking_stack->PushBlock(new_writing);
1573}
1574
1576 ASSERT(abort_);
1577
1578 class ReverseMarkStack : public ObjectPointerVisitor {
1579 public:
1580 explicit ReverseMarkStack(IsolateGroup* group)
1582
1583 void VisitPointers(ObjectPtr* first, ObjectPtr* last) override {
1584 for (ObjectPtr* p = first; p <= last; p++) {
1585 ObjectPtr obj = *p;
1586#if defined(DEBUG)
1587 if (obj->IsNewObject()) {
1590 }
1591#endif
1592 if (obj->IsForwardingCorpse()) {
1593 // Promoted object was pushed to mark list but reversed.
1594 *p = reinterpret_cast<ForwardingCorpse*>(UntaggedObject::ToAddr(obj))
1595 ->target();
1596 }
1597 }
1598 }
1599#if defined(DART_COMPRESSED_POINTERS)
1600 void VisitCompressedPointers(uword heap_base,
1601 CompressedObjectPtr* first,
1602 CompressedObjectPtr* last) override {
1603 UNREACHABLE();
1604 }
1605#endif
1606 };
1607
1608 ReverseMarkStack visitor(heap_->isolate_group());
1609 heap_->old_space()->marker()->deferred_marking_stack_.VisitObjectPointers(
1610 &visitor);
1611}
1612
1614 ASSERT(!abort_);
1615 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "PruneNewMarkingStack");
1616 MarkingStackBlock* reading;
1617 GCMarker* marker = heap_->old_space()->marker();
1618 MarkingStack* old_marking_stack = &marker->old_marking_stack_;
1619 MarkingStack* new_marking_stack = &marker->new_marking_stack_;
1620 MarkingStackBlock* old_writing = old_marking_stack->PopNonFullBlock();
1621 MarkingStackBlock* new_writing = new_marking_stack->PopNonFullBlock();
1622 for (;;) {
1623 {
1624 MutexLocker ml(&space_lock_);
1625 reading = new_blocks_;
1626 if (reading == nullptr) break;
1627 new_blocks_ = reading->next();
1628 }
1629 // Generated code appends to marking stacks; tell MemorySanitizer.
1630 MSAN_UNPOISON(reading, sizeof(*reading));
1631 while (!reading->IsEmpty()) {
1632 ObjectPtr obj = reading->Pop();
1633 ASSERT(obj->IsHeapObject());
1634 if (obj->IsNewObject()) {
1636 if (!IsForwarding(header)) continue;
1637 obj = ForwardedObj(header);
1638 }
1639 ASSERT(!obj->IsForwardingCorpse());
1640 ASSERT(!obj->IsFreeListElement());
1641 if (obj->IsNewObject()) {
1642 new_writing->Push(obj);
1643 if (new_writing->IsFull()) {
1644 new_marking_stack->PushBlock(new_writing);
1645 new_writing = new_marking_stack->PopNonFullBlock();
1646 }
1647 } else {
1648 old_writing->Push(obj);
1649 if (old_writing->IsFull()) {
1650 old_marking_stack->PushBlock(old_writing);
1651 old_writing = old_marking_stack->PopNonFullBlock();
1652 }
1653 }
1654 }
1655 reading->Reset();
1656 new_marking_stack->PushBlock(reading);
1657 }
1658 old_marking_stack->PushBlock(old_writing);
1659 new_marking_stack->PushBlock(new_writing);
1660}
1661
1663 ASSERT(!abort_);
1664 TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "PruneDeferredMarkingStack");
1665 MarkingStackBlock* reading;
1666 GCMarker* marker = heap_->old_space()->marker();
1667 MarkingStack* marking_stack = &marker->deferred_marking_stack_;
1668 MarkingStackBlock* writing = marking_stack->PopNonFullBlock();
1669 for (;;) {
1670 {
1671 MutexLocker ml(&space_lock_);
1672 reading = deferred_blocks_;
1673 if (reading == nullptr) break;
1674 deferred_blocks_ = reading->next();
1675 }
1676 // Generated code appends to marking stacks; tell MemorySanitizer.
1677 MSAN_UNPOISON(reading, sizeof(*reading));
1678 while (!reading->IsEmpty()) {
1679 ObjectPtr obj = reading->Pop();
1680 ASSERT(obj->IsHeapObject());
1681 if (obj->IsNewObject()) {
1683 if (!IsForwarding(header)) continue;
1684 obj = ForwardedObj(header);
1685 }
1686 ASSERT(!obj->IsForwardingCorpse());
1687 ASSERT(!obj->IsFreeListElement());
1688 writing->Push(obj);
1689 if (writing->IsFull()) {
1690 marking_stack->PushBlock(writing);
1691 writing = marking_stack->PopNonFullBlock();
1692 }
1693 }
1694 reading->Reset();
1695 marking_stack->PushBlock(reading);
1696 }
1697 marking_stack->PushBlock(writing);
1698}
1699
1701 ASSERT(!abort_);
1703 PruneWeak(&deferred->weak_properties);
1704 PruneWeak(&deferred->weak_references);
1705 PruneWeak(&deferred->weak_arrays);
1706 PruneWeak(&deferred->finalizer_entries);
1707}
1708
1709template <typename Type, typename PtrType>
1711 PtrType weak = list->Release();
1712 while (weak != Object::null()) {
1713 PtrType next;
1714 if (weak->IsOldObject()) {
1715 ASSERT(weak->GetClassId() == Type::kClassId);
1716 next = weak->untag()->next_seen_by_gc_.Decompress(weak->heap_base());
1717 weak->untag()->next_seen_by_gc_ = Type::null();
1718 list->Enqueue(weak);
1719 } else {
1721 if (IsForwarding(header)) {
1722 weak = static_cast<PtrType>(ForwardedObj(header));
1723 ASSERT(weak->GetClassId() == Type::kClassId);
1724 next = weak->untag()->next_seen_by_gc_.Decompress(weak->heap_base());
1725 weak->untag()->next_seen_by_gc_ = Type::null();
1726 list->Enqueue(weak);
1727 } else {
1728 // Collected in this scavenge.
1729 ASSERT(weak->GetClassId() == Type::kClassId);
1730 next = weak->untag()->next_seen_by_gc_.Decompress(weak->heap_base());
1731 }
1732 }
1733
1734 weak = next;
1735 }
1736}
1737
1738// Returns whether the object referred to in `slot` was GCed this GC.
1739template <bool parallel>
1741 ObjectPtr parent,
1742 CompressedObjectPtr* slot) {
1743 ObjectPtr target = slot->Decompress(parent->heap_base());
1744 if (target->IsImmediateObject()) {
1745 // Object already null (which is old) or not touched during this GC.
1746 return false;
1747 }
1748 if (target->IsOldObject()) {
1749 if (parent->IsOldObject() && target->untag()->IsEvacuationCandidate()) {
1750 if (!parent->untag()->IsCardRemembered()) {
1751 if (parent->untag()->TryAcquireRememberedBit()) {
1753 }
1754 }
1755 }
1756 return false;
1757 }
1759 if (IsForwarding(header)) {
1760 // Get the new location of the object.
1762 *slot = target;
1763 if (target->IsNewObject() && parent->IsOldObject() &&
1764 parent->untag()->TryAcquireRememberedBit()) {
1766 }
1767 return false;
1768 }
1769 ASSERT(target->IsHeapObject());
1770 ASSERT(target->IsNewObject());
1771 *slot = Object::null();
1772 return true;
1773}
1774
1776 ASSERT(Thread::Current()->OwnsGCSafepoint() ||
1777 (Thread::Current()->task_kind() == Thread::kMarkerTask) ||
1778 (Thread::Current()->task_kind() == Thread::kCompactorTask));
1779 for (Page* page = to_->head(); page != nullptr; page = page->next()) {
1780 page->VisitObjectPointers(visitor);
1781 }
1782}
1783
1785 ASSERT(Thread::Current()->OwnsGCSafepoint() ||
1786 (Thread::Current()->task_kind() == Thread::kMarkerTask) ||
1788 for (Page* page = to_->head(); page != nullptr; page = page->next()) {
1789 page->VisitObjects(visitor);
1790 }
1791}
1792
1794 for (Page* page = to_->head(); page != nullptr; page = page->next()) {
1795 set->AddRegion(page->start(), page->end());
1796 }
1797}
1798
1799void Scavenger::TryAllocateNewTLAB(Thread* thread,
1800 intptr_t min_size,
1801 bool can_safepoint) {
1802 ASSERT(heap_ != Dart::vm_isolate_group()->heap());
1803 ASSERT(!scavenging_);
1804
1805#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
1806 // Find the remaining space available in the TLAB before abandoning it so we
1807 // can reset the heap sampling offset in the new TLAB.
1808 intptr_t remaining = thread->true_end() - thread->top();
1809 const bool heap_sampling_enabled = thread->end() != thread->true_end();
1810 const bool is_first_tlab = thread->true_end() == 0;
1811 if (heap_sampling_enabled && remaining > min_size) {
1812 // This is a sampling point and the TLAB isn't actually full.
1813 thread->heap_sampler().SampleNewSpaceAllocation(min_size);
1814 return;
1815 }
1816#endif
1817
1818 intptr_t allocated = AbandonRemainingTLAB(thread);
1819 if (can_safepoint && !thread->force_growth()) {
1820 ASSERT(thread->no_safepoint_scope_depth() == 0);
1821 heap_->CheckConcurrentMarking(thread, GCReason::kNewSpace, allocated);
1822 }
1823
1824 MutexLocker ml(&space_lock_);
1825 for (Page* page = to_->head(); page != nullptr; page = page->next()) {
1826 if (page->owner() != nullptr) continue;
1827 intptr_t available =
1828 (page->end() - kAllocationRedZoneSize) - page->object_end();
1829 if (available >= min_size) {
1830 page->Acquire(thread);
1831#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
1832 thread->heap_sampler().HandleNewTLAB(remaining, /*is_first_tlab=*/false);
1833#endif
1834 return;
1835 }
1836 }
1837
1838 Page* page = to_->TryAllocatePageLocked(true);
1839 if (page == nullptr) {
1840 return;
1841 }
1842 page->Acquire(thread);
1843#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
1844 thread->heap_sampler().HandleNewTLAB(remaining, is_first_tlab);
1845#endif
1846
1848}
1849
1851 // Allocate any remaining space so the TLAB won't be reused. Write a filler
1852 // object so it remains iterable.
1853 uword top = thread->top();
1854 intptr_t size = thread->end() - thread->top();
1855 if (size > 0) {
1856 thread->set_top(top + size);
1858 }
1859
1860 AbandonRemainingTLAB(thread);
1861}
1862
1864 if (thread->top() == 0) return 0;
1865
1866 Page* page = Page::Of(thread->top() - 1);
1867 intptr_t allocated;
1868 {
1869 if (thread->is_marking()) {
1871 }
1872 MutexLocker ml(&space_lock_);
1873 allocated = page->Release(thread);
1874 }
1875 ASSERT(thread->top() == 0);
1876 return allocated;
1877}
1878
1879template <bool parallel>
1881 Page* page;
1882 {
1883 MutexLocker ml(&scavenger_->space_lock_);
1884 page = scavenger_->to_->TryAllocatePageLocked(false);
1885 }
1886 if (page == nullptr) {
1887 return 0;
1888 }
1889
1890 if (head_ == nullptr) {
1891 head_ = scan_ = page;
1892 } else {
1893 ASSERT(scan_ != nullptr);
1894 tail_->set_next(page);
1895 }
1896 tail_ = page;
1897
1898 return tail_->TryAllocateGC(size);
1899}
1900
1903
1904 ASSERT(thread->OwnsGCSafepoint());
1905
1906 // Scavenging is not reentrant. Make sure that is the case.
1907 ASSERT(!scavenging_);
1908 scavenging_ = true;
1909
1910 if (type == GCType::kEvacuate) {
1911 // Forces the next scavenge to promote all the objects in the new space.
1912 early_tenure_ = true;
1913 }
1914
1915 if (FLAG_verify_before_gc) {
1916 heap_->WaitForSweeperTasksAtSafepoint(thread);
1917 heap_->VerifyGC("Verifying before Scavenge",
1918 thread->is_marking() ? kAllowMarked : kForbidMarked);
1919 }
1920
1921 // Prepare for a scavenge.
1922 failed_to_promote_ = false;
1923 abort_ = false;
1924 root_slices_started_ = 0;
1925 weak_slices_started_ = 0;
1926 freed_in_words_ = 0;
1927 intptr_t abandoned_bytes = 0; // TODO(rmacnak): Count fragmentation?
1928 SpaceUsage usage_before = GetCurrentUsage();
1929 intptr_t promo_candidate_words = 0;
1930 for (Page* page = to_->head(); page != nullptr; page = page->next()) {
1931 page->Release();
1932 if (early_tenure_) {
1933 page->EarlyTenure();
1934 }
1935 promo_candidate_words += page->promo_candidate_words();
1936 }
1938 SemiSpace* from = Prologue(reason);
1939
1940 intptr_t bytes_promoted;
1941 if (FLAG_scavenger_tasks == 0) {
1942 bytes_promoted = SerialScavenge(from);
1943 } else {
1944 bytes_promoted = ParallelScavenge(from);
1945 }
1946 if (abort_) {
1947 ReverseScavenge(&from);
1948 bytes_promoted = 0;
1949 } else {
1950 if ((ThresholdInWords() - UsedInWords()) < KBInWords) {
1951 // Don't scavenge again until the next old-space GC has occurred. Prevents
1952 // performing one scavenge per allocation as the heap limit is approached.
1953 heap_->assume_scavenge_will_fail_ = true;
1954 }
1955 }
1956 ASSERT(promotion_stack_.IsEmpty());
1957
1958 // Scavenge finished. Run accounting.
1960 stats_history_.Add(ScavengeStats(
1961 start, end, usage_before, GetCurrentUsage(), promo_candidate_words,
1962 bytes_promoted >> kWordSizeLog2, abandoned_bytes >> kWordSizeLog2));
1963 Epilogue(from);
1965
1966 if (FLAG_verify_after_gc) {
1967 heap_->WaitForSweeperTasksAtSafepoint(thread);
1968 heap_->VerifyGC("Verifying after Scavenge...",
1969 thread->is_marking() ? kAllowMarked : kForbidMarked);
1970 }
1971
1972 // Done scavenging. Reset the marker.
1973 ASSERT(scavenging_);
1974 scavenging_ = false;
1975
1976 // It is possible for objects to stay in the new space
1977 // if the VM cannot create more pages for these objects.
1978 ASSERT((type != GCType::kEvacuate) || (UsedInWords() == 0) ||
1979 failed_to_promote_);
1980}
1981
1982intptr_t Scavenger::SerialScavenge(SemiSpace* from) {
1983 FreeList* freelist = heap_->old_space()->DataFreeList(0);
1984 SerialScavengerVisitor visitor(heap_->isolate_group(), this, from, freelist,
1985 &promotion_stack_);
1986 visitor.ProcessRoots();
1987 visitor.ProcessAll();
1988 visitor.ProcessWeak();
1989 visitor.Finalize(heap_->isolate_group()->store_buffer());
1990 to_->AddList(visitor.head(), visitor.tail());
1991 return visitor.bytes_promoted();
1992}
1993
1994intptr_t Scavenger::ParallelScavenge(SemiSpace* from) {
1995 intptr_t bytes_promoted = 0;
1996 const intptr_t num_tasks = FLAG_scavenger_tasks;
1997 ASSERT(num_tasks > 0);
1998
1999 ThreadBarrier* barrier = new ThreadBarrier(num_tasks, 1);
2000 RelaxedAtomic<uintptr_t> num_busy = 0;
2001
2002 ParallelScavengerVisitor** visitors =
2003 new ParallelScavengerVisitor*[num_tasks];
2004 for (intptr_t i = 0; i < num_tasks; i++) {
2005 FreeList* freelist = heap_->old_space()->DataFreeList(i);
2006 visitors[i] = new ParallelScavengerVisitor(
2007 heap_->isolate_group(), this, from, freelist, &promotion_stack_);
2008 if (i < (num_tasks - 1)) {
2009 // Begin scavenging on a helper thread.
2010 bool result = Dart::thread_pool()->Run<ParallelScavengerTask>(
2011 heap_->isolate_group(), barrier, visitors[i], &num_busy);
2012 ASSERT(result);
2013 } else {
2014 // Last worker is the main thread.
2015 ParallelScavengerTask task(heap_->isolate_group(), barrier, visitors[i],
2016 &num_busy);
2017 task.RunEnteredIsolateGroup();
2018 barrier->Sync();
2019 barrier->Release();
2020 }
2021 }
2022
2023 StoreBuffer* store_buffer = heap_->isolate_group()->store_buffer();
2024 for (intptr_t i = 0; i < num_tasks; i++) {
2025 ParallelScavengerVisitor* visitor = visitors[i];
2026 visitor->Finalize(store_buffer);
2027 to_->AddList(visitor->head(), visitor->tail());
2028 bytes_promoted += visitor->bytes_promoted();
2029 delete visitor;
2030 }
2031
2032 delete[] visitors;
2033 return bytes_promoted;
2034}
2035
2036void Scavenger::ReverseScavenge(SemiSpace** from) {
2037 Thread* thread = Thread::Current();
2038 TIMELINE_FUNCTION_GC_DURATION(thread, "ReverseScavenge");
2039
2040 class ReverseFromForwardingVisitor : public ObjectVisitor {
2041 void VisitObject(ObjectPtr from_obj) override {
2042 uword from_header = ReadHeaderRelaxed(from_obj);
2043 if (IsForwarding(from_header)) {
2044 ObjectPtr to_obj = ForwardedObj(from_header);
2045 uword to_header = ReadHeaderRelaxed(to_obj);
2046 intptr_t size = to_obj->untag()->HeapSize();
2047
2048 // Reset the ages bits in case this was a promotion.
2049 uword from_header = static_cast<uword>(to_header);
2050 from_header =
2053 true, from_header);
2054
2055 WriteHeaderRelaxed(from_obj, from_header);
2056
2058 ->set_target(from_obj);
2059 }
2060 }
2061 };
2062
2063 ReverseFromForwardingVisitor visitor;
2064 for (Page* page = (*from)->head(); page != nullptr; page = page->next()) {
2065 page->VisitObjects(&visitor);
2066 }
2067
2068 // Swap from-space and to-space. The abandoned to-space will be deleted in
2069 // the epilogue.
2070 {
2071 MutexLocker ml(&space_lock_);
2072 SemiSpace* temp = to_;
2073 to_ = *from;
2074 *from = temp;
2075 }
2076
2077 // Release any remaining part of the promotion worklist that wasn't completed.
2078 promotion_stack_.Reset();
2079
2080 // Release any remaining part of the remembered set that wasn't completed.
2081 StoreBuffer* store_buffer = heap_->isolate_group()->store_buffer();
2082 StoreBufferBlock* pending = blocks_;
2083 while (pending != nullptr) {
2084 StoreBufferBlock* next = pending->next();
2085 pending->Reset();
2086 // Return the emptied block for recycling (no need to check threshold).
2087 store_buffer->PushBlock(pending, StoreBuffer::kIgnoreThreshold);
2088 pending = next;
2089 }
2090 blocks_ = nullptr;
2091
2092 // Reverse the partial forwarding from the aborted scavenge. This also
2093 // rebuilds the remembered set.
2094 heap_->WaitForSweeperTasksAtSafepoint(thread);
2096
2097 heap_->old_space()->ResetProgressBars();
2098
2099 GCMarker* marker = heap_->old_space()->marker();
2100 if (marker != nullptr) {
2101 marker->new_marking_stack_.PushAll(new_blocks_);
2102 new_blocks_ = nullptr;
2103 marker->deferred_marking_stack_.PushAll(deferred_blocks_);
2104 deferred_blocks_ = nullptr;
2105 // Not redundant with the flush at the beginning of the scavenge because
2106 // the scavenge workers may add promoted objects to the mark stack.
2108
2109 MarkingStackBlock* old = marker->old_marking_stack_.PopAll();
2110 MarkingStackBlock* neu = marker->old_marking_stack_.PopAll();
2111 MarkingStackBlock* tlab = marker->old_marking_stack_.PopAll();
2112 Forward(old);
2113 Forward(neu);
2114 Forward(tlab);
2116 }
2117
2118 // Restore write-barrier assumptions. Must occur after mark list fixups.
2120
2121 // Don't scavenge again until the next old-space GC has occurred. Prevents
2122 // performing one scavenge per allocation as the heap limit is approached.
2123 heap_->assume_scavenge_will_fail_ = true;
2124}
2125
2126void Scavenger::WriteProtect(bool read_only) {
2127 ASSERT(!scavenging_);
2128 to_->WriteProtect(read_only);
2129}
2130
2131#ifndef PRODUCT
2133 auto isolate_group = IsolateGroup::Current();
2134 ASSERT(isolate_group != nullptr);
2135 JSONObject space(object, "new");
2136 space.AddProperty("type", "HeapSpace");
2137 space.AddProperty("name", "new");
2138 space.AddProperty("vmName", "Scavenger");
2139 space.AddProperty("collections", collections());
2140 if (collections() > 0) {
2141 int64_t run_time = isolate_group->UptimeMicros();
2142 run_time = Utils::Maximum(run_time, static_cast<int64_t>(0));
2143 double run_time_millis = MicrosecondsToMilliseconds(run_time);
2144 double avg_time_between_collections =
2145 run_time_millis / static_cast<double>(collections());
2146 space.AddProperty("avgCollectionPeriodMillis",
2147 avg_time_between_collections);
2148 } else {
2149 space.AddProperty("avgCollectionPeriodMillis", 0.0);
2150 }
2151 space.AddProperty64("used", UsedInWords() * kWordSize);
2152 space.AddProperty64("capacity", CapacityInWords() * kWordSize);
2153 space.AddProperty64("external", ExternalInWords() * kWordSize);
2155}
2156#endif // !PRODUCT
2157
2158} // namespace dart
static float next(float f)
static const char marker[]
SI void store(P *ptr, const T &val)
SI T load(const P *ptr)
Definition: Transform_inl.h:98
SI F table(const skcms_Curve *curve, F v)
#define UNREACHABLE()
Definition: assert.h:248
#define RELEASE_ASSERT_WITH_MSG(cond, msg)
Definition: assert.h:332
#define ASSERT_EQUAL(expected, actual)
Definition: assert.h:309
GLenum type
static constexpr bool UseCardMarkingForAllocation(const intptr_t array_length)
Definition: object.h:10818
static void FollowForwardingPointers(Thread *thread)
Definition: become.cc:330
static constexpr ClassIdTagType decode(uword value)
Definition: bitfield.h:171
static constexpr uword update(bool value, uword original)
Definition: bitfield.h:188
Block * PopNonFullBlock()
void VisitObjectPointers(ObjectPointerVisitor *visitor)
void Push(ObjectPtr raw_obj)
bool WaitForWork(RelaxedAtomic< uintptr_t > *num_busy, bool abort=false)
void VisitPointers(ObjectPtr *from, ObjectPtr *to) override
Definition: scavenger.cc:954
CheckStoreBufferScavengeVisitor(ObjectSet *in_store_buffer, const SemiSpace *to, const char *msg)
Definition: scavenger.cc:925
void VisitObject(ObjectPtr obj) override
Definition: scavenger.cc:934
void VisitPointers(ObjectPtr *from, ObjectPtr *to) override
Definition: scavenger.cc:890
CollectStoreBufferScavengeVisitor(ObjectSet *in_store_buffer, const char *msg)
Definition: scavenger.cc:885
static ThreadPool * thread_pool()
Definition: dart.h:73
static IsolateGroup * vm_isolate_group()
Definition: dart.h:69
void UpdateUnreachable(IsolateGroup *isolate_group)
void UpdateRelocated(IsolateGroup *isolate_group)
static intptr_t InstanceSize()
Definition: object.h:13012
static ForwardingCorpse * AsForwarder(uword addr, intptr_t size)
Definition: become.cc:20
void set_target(ObjectPtr target)
Definition: become.h:28
static FreeListElement * AsElement(uword addr, intptr_t size)
Definition: freelist.cc:16
void Enqueue(PtrType ptr)
Definition: gc_shared.h:40
PtrType Release()
Definition: gc_shared.h:65
Thread * thread() const
void HandleNewTLAB(intptr_t old_tlab_remaining_space, bool is_first_tlab)
Definition: sampler.cc:162
static Dart_HeapSamplingDeleteCallback delete_callback()
Definition: sampler.h:54
void SampleNewSpaceAllocation(intptr_t allocation_size)
Definition: sampler.cc:213
WeakSelector
Definition: heap.h:43
@ kHeapSamplingData
Definition: heap.h:52
@ kNumWeakSelectors
Definition: heap.h:54
@ kNew
Definition: heap.h:38
@ kOld
Definition: heap.h:39
IsolateGroup * isolate_group() const
Definition: heap.h:273
void CheckConcurrentMarking(Thread *thread, GCReason reason, intptr_t size)
Definition: heap.cc:594
PageSpace * old_space()
Definition: heap.h:63
void WaitForSweeperTasksAtSafepoint(Thread *thread)
Definition: heap.cc:680
WeakTable * GetWeakTable(Space space, WeakSelector selector) const
Definition: heap.h:225
void SetWeakTable(Space space, WeakSelector selector, WeakTable *value)
Definition: heap.h:232
void UpdateGlobalMaxUsed()
Definition: heap.cc:689
StoreBuffer * store_buffer() const
Definition: isolate.h:509
void ForEachIsolate(std::function< void(Isolate *isolate)> function, bool at_safepoint=false)
Definition: isolate.cc:2841
static IsolateGroup * Current()
Definition: isolate.h:539
void VisitObjectIdRingPointers(ObjectPointerVisitor *visitor)
Definition: isolate.cc:2987
void VisitObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
Definition: isolate.cc:2912
void ReleaseStoreBuffers()
Definition: isolate.cc:2776
void RememberLiveTemporaries()
Definition: isolate.cc:3008
intptr_t MutatorCount() const
Definition: isolate.h:546
void VisitWeakPersistentHandles(HandleVisitor *visitor)
Definition: isolate.cc:2998
void FlushMarkingStacks()
Definition: isolate.cc:2780
void AddProperty64(const char *name, int64_t i) const
Definition: json_stream.h:401
void AddProperty(const char *name, bool b) const
Definition: json_stream.h:395
DART_FORCE_INLINE void Process(Lambda action)
DART_NORETURN void Jump(int value, const Error &error)
Definition: longjump.cc:22
jmp_buf * Set()
Definition: longjump.cc:16
void PushBlock(Block *block)
static int64_t GetCurrentMonotonicMicros()
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
IsolateGroup * isolate_group() const
Definition: visitor.h:25
void VisitCompressedPointers(uword heap_base, CompressedObjectPtr *first, CompressedObjectPtr *last)
Definition: visitor.h:43
bool IsFreeListElement() const
ObjectPtr Decompress(uword heap_base) const
UntaggedObject * untag() const
uword heap_base() const
bool IsForwardingCorpse() const
intptr_t GetClassId() const
Definition: raw_object.h:885
bool IsPseudoObject() const
void Add(ObjectPtr raw_obj)
Definition: object_set.h:75
bool Contains(ObjectPtr raw_obj) const
Definition: object_set.h:66
static const ClassId kClassId
Definition: object.h:606
static ObjectPtr null()
Definition: object.h:433
static intptr_t tags_offset()
Definition: object.h:346
FreeList * DataFreeList(intptr_t i=0)
Definition: pages.h:301
DART_FORCE_INLINE uword TryAllocatePromoLocked(FreeList *freelist, intptr_t size)
Definition: pages.h:151
void AcquireLock(FreeList *freelist)
Definition: pages.cc:432
void PushDependencyToConcurrentMarking()
Definition: pages.h:310
void VisitObjects(ObjectVisitor *visitor) const
Definition: pages.cc:651
void ReleaseLock(FreeList *freelist)
Definition: pages.cc:436
void VisitRememberedCards(PredicateObjectPointerVisitor *visitor) const
Definition: pages.cc:685
void ResumeConcurrentMarking()
Definition: pages.cc:452
void PauseConcurrentMarking()
Definition: pages.cc:443
void ResetProgressBars() const
Definition: pages.cc:716
GCMarker * marker() const
Definition: pages.h:358
void set_next(Page *next)
Definition: page.h:103
static Page * Of(ObjectPtr obj)
Definition: page.h:162
bool IsResolved() const
Definition: page.h:272
@ kNew
Definition: page.h:73
void Unallocate(uword addr, intptr_t size)
Definition: page.h:257
void sub_live_bytes(intptr_t value)
Definition: page.h:127
ParallelScavengerTask(IsolateGroup *isolate_group, ThreadBarrier *barrier, ParallelScavengerVisitor *visitor, RelaxedAtomic< uintptr_t > *num_busy)
Definition: scavenger.cc:674
void Push(ObjectPtr obj)
Definition: pointer_block.h:37
PointerBlock< Size > * next() const
Definition: pointer_block.h:30
bool IsFull() const
Definition: pointer_block.h:34
bool IsEmpty() const
Definition: pointer_block.h:35
bool PredicateVisitCompressedPointers(uword heap_base, CompressedObjectPtr *first, CompressedObjectPtr *last)
Definition: visitor.h:100
T load(std::memory_order order=std::memory_order_relaxed) const
Definition: atomic.h:21
T fetch_add(T arg, std::memory_order order=std::memory_order_relaxed)
Definition: atomic.h:35
void set_pending(StoreBufferBlock *pending)
Definition: scavenger.cc:382
bool PredicateVisitPointers(ObjectPtr *first, ObjectPtr *last) override
Definition: scavenger.cc:234
void Finalize(StoreBuffer *store_buffer)
Definition: scavenger.cc:358
intptr_t bytes_promoted() const
Definition: scavenger.cc:277
bool WaitForWork(RelaxedAtomic< uintptr_t > *num_busy)
Definition: scavenger.cc:335
DART_FORCE_INLINE intptr_t ProcessObject(ObjectPtr obj)
Definition: scavenger.cc:1407
void VisitTypedDataViewPointers(TypedDataViewPtr view, CompressedObjectPtr *first, CompressedObjectPtr *last) override
Definition: scavenger.cc:154
void VisitingOldObject(ObjectPtr obj)
Definition: scavenger.cc:267
static bool ForwardOrSetNullIfCollected(ObjectPtr parent, CompressedObjectPtr *ptr_address)
Definition: scavenger.cc:1740
ScavengerVisitorBase(IsolateGroup *isolate_group, Scavenger *scavenger, SemiSpace *from, FreeList *freelist, PromotionStack *promotion_stack)
Definition: scavenger.cc:133
void VisitPointers(ObjectPtr *first, ObjectPtr *last) override
Definition: scavenger.cc:223
void VisitHandle(uword addr) override
Definition: scavenger.cc:657
ScavengerWeakVisitor(Thread *thread)
Definition: scavenger.cc:655
void Scavenge(Thread *thread, GCType type, GCReason reason)
Definition: scavenger.cc:1901
void ForwardDeferred()
Definition: scavenger.cc:1575
intptr_t ExternalInWords() const
Definition: scavenger.h:168
void VisitObjects(ObjectVisitor *visitor) const
Definition: scavenger.cc:1784
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
Definition: scavenger.cc:1775
void PruneDeferred()
Definition: scavenger.cc:1662
void PruneWeak(GCLinkedLists *delayed)
Definition: scavenger.cc:1700
void WriteProtect(bool read_only)
Definition: scavenger.cc:2126
void AbandonRemainingTLABForDebugging(Thread *thread)
Definition: scavenger.cc:1850
bool ShouldPerformIdleScavenge(int64_t deadline)
Definition: scavenger.cc:1170
void AddRegionsToObjectSet(ObjectSet *set) const
Definition: scavenger.cc:1793
intptr_t CapacityInWords() const
Definition: scavenger.h:164
intptr_t UsedInWords() const
Definition: scavenger.h:160
SpaceUsage GetCurrentUsage() const
Definition: scavenger.h:169
int64_t gc_time_micros() const
Definition: scavenger.h:189
intptr_t AbandonRemainingTLAB(Thread *thread)
Definition: scavenger.cc:1863
intptr_t ThresholdInWords() const
Definition: scavenger.h:176
Scavenger(Heap *heap, intptr_t max_semi_capacity_in_words)
Definition: scavenger.cc:824
intptr_t collections() const
Definition: scavenger.h:193
void Forward(MarkingStackBlock *blocks)
Definition: scavenger.cc:1524
void PrintToJSONObject(JSONObject *object) const
Definition: scavenger.cc:2132
Page * head() const
Definition: scavenger.h:57
void AddList(Page *head, Page *tail)
Definition: scavenger.cc:805
void WriteProtect(bool read_only)
Definition: scavenger.cc:799
Page * TryAllocatePageLocked(bool link)
Definition: scavenger.cc:772
bool Contains(uword addr) const
Definition: scavenger.cc:792
intptr_t capacity_in_words() const
Definition: scavenger.h:54
SemiSpace(intptr_t gc_threshold_in_words)
Definition: scavenger.cc:760
intptr_t Value() const
Definition: object.h:9990
void PushBlock(Block *block, ThresholdPolicy policy)
bool Run(Args &&... args)
Definition: thread_pool.h:45
LongJumpScope * long_jump_base() const
Definition: thread_state.h:47
bool force_growth() const
Definition: thread.h:633
@ kScavengerTask
Definition: thread.h:352
@ kMarkerTask
Definition: thread.h:349
@ kIncrementalCompactorTask
Definition: thread.h:354
@ kCompactorTask
Definition: thread.h:351
static Thread * Current()
Definition: thread.h:362
bool OwnsGCSafepoint() const
Definition: thread.cc:1352
int32_t no_safepoint_scope_depth() const
Definition: thread.h:718
uword end() const
Definition: thread.h:710
bool is_marking() const
Definition: thread.h:676
uword top() const
Definition: thread.h:709
static void ExitIsolateGroupAsHelper(bool bypass_safepoint)
Definition: thread.cc:499
IsolateGroup * isolate_group() const
Definition: thread.h:541
void StoreBufferAddObjectGC(ObjectPtr obj)
Definition: thread.cc:804
void set_top(uword top)
Definition: thread.h:712
HeapProfileSampler & heap_sampler()
Definition: thread.h:1141
uword true_end() const
Definition: thread.h:711
static bool EnterIsolateGroupAsHelper(IsolateGroup *isolate_group, TaskKind kind, bool bypass_safepoint)
Definition: thread.cc:481
void DeferredMarkLiveTemporaries()
Definition: thread.cc:1144
static ObjectPtr FromAddr(uword addr)
Definition: raw_object.h:516
bool IsCardRemembered() const
Definition: raw_object.h:385
DART_FORCE_INLINE intptr_t VisitPointersNonvirtual(V *visitor)
Definition: raw_object.h:480
static uword ToAddr(const UntaggedObject *raw_obj)
Definition: raw_object.h:522
bool TryAcquireRememberedBit()
Definition: raw_object.h:365
intptr_t VisitPointers(ObjectPointerVisitor *visitor)
Definition: raw_object.h:447
bool IsRemembered() const
Definition: raw_object.h:361
static bool IsEvacuationCandidate(uword tags)
Definition: raw_object.h:329
static constexpr T Maximum(T x, T y)
Definition: utils.h:41
static T Minimum(T x, T y)
Definition: utils.h:36
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition: utils.h:92
static intptr_t InstanceSize()
Definition: object.h:6742
static intptr_t InstanceSize()
Definition: object.h:12932
static intptr_t InstanceSize()
Definition: object.h:12958
static WeakTable * NewFrom(WeakTable *original)
Definition: weak_table.h:45
void(* Dart_HeapSamplingDeleteCallback)(void *data)
Definition: dart_api.h:1288
#define ASSERT(E)
static bool b
struct MyStruct a[10]
#define FATAL(error)
glong glong end
GAsyncResult * result
uint32_t * target
size_t length
#define MSAN_UNPOISON(ptr, len)
def link(from_root, to_root)
Definition: dart_pkg.py:44
Definition: dart_vm.cc:33
bool IsTypedDataViewClassId(intptr_t index)
Definition: class_id.h:439
bool IsTypedDataClassId(intptr_t index)
Definition: class_id.h:433
StoreBuffer::Block StoreBufferBlock
ScavengerVisitorBase< true > ParallelScavengerVisitor
Definition: scavenger.cc:637
static constexpr intptr_t kNewObjectAlignmentOffset
WeakSlices
Definition: marker.cc:780
@ kPruneWeak
Definition: scavenger.cc:1278
@ kProgressBars
Definition: scavenger.cc:1276
@ kRememberLiveTemporaries
Definition: scavenger.cc:1277
@ kWeakTables
Definition: marker.cc:782
@ kNumWeakSlices
Definition: marker.cc:784
@ kWeakHandles
Definition: marker.cc:781
constexpr double MicrosecondsToSeconds(int64_t micros)
Definition: globals.h:571
static constexpr intptr_t kPageSizeInWords
Definition: page.h:28
GCType
Definition: spaces.h:32
static DART_FORCE_INLINE uword ReadHeaderRelaxed(ObjectPtr obj)
Definition: scavenger.cc:118
static constexpr intptr_t kPageSize
Definition: page.h:27
static constexpr intptr_t kConservativeInitialScavengeSpeed
Definition: scavenger.cc:822
static bool IsUnreachable(const ObjectPtr obj)
Definition: marker.cc:702
intptr_t RawSmiValue(const SmiPtr raw_value)
MarkingStack::Block MarkingStackBlock
static DART_FORCE_INLINE bool IsForwarding(uword header)
Definition: scavenger.cc:66
bool IsUnmodifiableTypedDataViewClassId(intptr_t index)
Definition: class_id.h:453
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
static bool IsScavengeSurvivor(ObjectPtr obj)
Definition: scavenger.cc:1401
uintptr_t uword
Definition: globals.h:501
constexpr intptr_t MBInWords
Definition: globals.h:537
bool IsAllocatableInNewSpace(intptr_t size)
Definition: spaces.h:57
BlockWorkList< PromotionStack > PromotionWorkList
static DART_FORCE_INLINE ObjectPtr ForwardedObj(uword header)
Definition: scavenger.cc:73
ScavengerVisitorBase< false > SerialScavengerVisitor
Definition: scavenger.cc:636
@ kNotForwarded
Definition: scavenger.cc:56
@ kForwardingMask
Definition: scavenger.cc:55
@ kForwarded
Definition: scavenger.cc:57
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
GCReason
Definition: spaces.h:40
const intptr_t cid
static constexpr intptr_t kObjectAlignmentMask
raw_obj untag() -> num_entries()) VARIABLE_COMPRESSED_VISITOR(Array, Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(TypedData, TypedData::ElementSizeInBytes(raw_obj->GetClassId()) *Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(Record, RecordShape(raw_obj->untag() ->shape()).num_fields()) VARIABLE_NULL_VISITOR(CompressedStackMaps, CompressedStackMaps::PayloadSizeOf(raw_obj)) VARIABLE_NULL_VISITOR(OneByteString, Smi::Value(raw_obj->untag() ->length())) VARIABLE_NULL_VISITOR(TwoByteString, Smi::Value(raw_obj->untag() ->length())) intptr_t UntaggedField::VisitFieldPointers(FieldPtr raw_obj, ObjectPointerVisitor *visitor)
Definition: raw_object.cc:558
@ kAllowMarked
Definition: verifier.h:21
@ kForbidMarked
Definition: verifier.h:21
static DART_FORCE_INLINE void WriteHeaderRelaxed(ObjectPtr obj, uword header)
Definition: scavenger.cc:124
void MournFinalizerEntry(GCVisitorType *visitor, FinalizerEntryPtr current_entry)
Definition: gc_shared.h:162
static void objcpy(void *dst, const void *src, size_t size)
static constexpr intptr_t kAllocationRedZoneSize
Definition: page.h:41
constexpr intptr_t kWordSize
Definition: globals.h:509
static constexpr intptr_t kObjectAlignment
constexpr double MicrosecondsToMilliseconds(int64_t micros)
Definition: globals.h:574
RootSlices
Definition: marker.cc:733
@ kObjectIdRing
Definition: marker.cc:735
@ kIsolate
Definition: marker.cc:734
@ kNumRootSlices
Definition: scavenger.cc:1246
constexpr intptr_t KBInWords
Definition: globals.h:535
@ kHeapObjectTag
bool IsExternalTypedDataClassId(intptr_t index)
Definition: class_id.h:447
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
ObjectPtr CompressedObjectPtr
static DART_FORCE_INLINE uword ForwardingHeader(ObjectPtr target)
Definition: scavenger.cc:79
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not set
Definition: switches.h:76
dst
Definition: cp.py:12
#define LIKELY(cond)
Definition: globals.h:260
#define Px
Definition: globals.h:410
#define UNLIKELY(cond)
Definition: globals.h:261
static const char header[]
Definition: skpbench.cpp:88
#define NO_SANITIZE_THREAD
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)
Definition: timeline.h:41