Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
pages.cc
Go to the documentation of this file.
1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/heap/pages.h"
6
7#include "platform/assert.h"
10#include "vm/dart.h"
11#include "vm/heap/become.h"
12#include "vm/heap/compactor.h"
13#include "vm/heap/marker.h"
14#include "vm/heap/safepoint.h"
15#include "vm/heap/sweeper.h"
16#include "vm/lockers.h"
17#include "vm/log.h"
18#include "vm/object.h"
19#include "vm/object_set.h"
20#include "vm/os_thread.h"
22#include "vm/virtual_memory.h"
23
24namespace dart {
25
27 old_gen_growth_space_ratio,
28 20,
29 "The desired maximum percentage of free space after old gen GC");
31 old_gen_growth_time_ratio,
32 3,
33 "The desired maximum percentage of time spent in old gen GC");
35 old_gen_growth_rate,
36 280,
37 "The max number of pages the old generation can grow at a time");
39 print_free_list_before_gc,
40 false,
41 "Print free list statistics before a GC");
43 print_free_list_after_gc,
44 false,
45 "Print free list statistics after a GC");
46DEFINE_FLAG(bool, log_growth, false, "Log PageSpace growth policy decisions.");
47
48// The initial estimate of how many words we can mark per microsecond (usage
49// before / mark-sweep time). This is a conservative value observed running
50// Flutter on a Nexus 4. After the first mark-sweep, we instead use a value
51// based on the device's actual speed.
52static constexpr intptr_t kConservativeInitialMarkSpeed = 20;
53
54PageSpace::PageSpace(Heap* heap, intptr_t max_capacity_in_words)
55 : heap_(heap),
56 num_freelists_(Utils::Maximum(FLAG_scavenger_tasks, 1) + 1),
57 freelists_(new FreeList[num_freelists_]),
58 pages_lock_(),
59 max_capacity_in_words_(max_capacity_in_words),
60 usage_(),
61 allocated_black_in_words_(0),
62 tasks_lock_(),
63 tasks_(0),
64 concurrent_marker_tasks_(0),
65 concurrent_marker_tasks_active_(0),
66 pause_concurrent_marking_(0),
67 phase_(kDone),
68#if defined(DEBUG)
69 iterating_thread_(nullptr),
70#endif
71 page_space_controller_(heap,
72 FLAG_old_gen_growth_space_ratio,
73 FLAG_old_gen_growth_rate,
74 FLAG_old_gen_growth_time_ratio),
75 marker_(nullptr),
76 gc_time_micros_(0),
77 collections_(0),
78 mark_words_per_micro_(kConservativeInitialMarkSpeed),
79 enable_concurrent_mark_(FLAG_concurrent_mark) {
80 ASSERT(heap != nullptr);
81
82 // We aren't holding the lock but no one can reference us yet.
85
86 for (intptr_t i = 0; i < num_freelists_; i++) {
87 freelists_[i].Reset();
88 }
89
91}
92
94 {
96 AssistTasks(&ml);
97 while (tasks() > 0) {
98 ml.Wait();
99 }
100 }
101 FreePages(pages_);
102 FreePages(exec_pages_);
103 FreePages(large_pages_);
104 FreePages(image_pages_);
105 ASSERT(marker_ == nullptr);
106 delete[] freelists_;
107}
108
109intptr_t PageSpace::LargePageSizeInWordsFor(intptr_t size) {
110 intptr_t page_size = Utils::RoundUp(size + Page::OldObjectStartOffset(),
112 return page_size >> kWordSizeLog2;
113}
114
115void PageSpace::AddPageLocked(Page* page) {
116 if (pages_ == nullptr) {
117 pages_ = page;
118 } else {
119 pages_tail_->set_next(page);
120 }
121 pages_tail_ = page;
122}
123
124void PageSpace::AddLargePageLocked(Page* page) {
125 if (large_pages_ == nullptr) {
126 large_pages_ = page;
127 } else {
128 large_pages_tail_->set_next(page);
129 }
130 large_pages_tail_ = page;
131}
132
133void PageSpace::AddExecPageLocked(Page* page) {
134 if (exec_pages_ == nullptr) {
135 exec_pages_ = page;
136 } else {
137 if (FLAG_write_protect_code) {
138 exec_pages_tail_->WriteProtect(false);
139 }
140 exec_pages_tail_->set_next(page);
141 if (FLAG_write_protect_code) {
142 exec_pages_tail_->WriteProtect(true);
143 }
144 }
145 exec_pages_tail_ = page;
146}
147
148void PageSpace::RemovePageLocked(Page* page, Page* previous_page) {
149 if (previous_page != nullptr) {
150 previous_page->set_next(page->next());
151 } else {
152 pages_ = page->next();
153 }
154 if (page == pages_tail_) {
155 pages_tail_ = previous_page;
156 }
157}
158
159void PageSpace::RemoveLargePageLocked(Page* page, Page* previous_page) {
160 if (previous_page != nullptr) {
161 previous_page->set_next(page->next());
162 } else {
163 large_pages_ = page->next();
164 }
165 if (page == large_pages_tail_) {
166 large_pages_tail_ = previous_page;
167 }
168}
169
170void PageSpace::RemoveExecPageLocked(Page* page, Page* previous_page) {
171 if (previous_page != nullptr) {
172 previous_page->set_next(page->next());
173 } else {
174 exec_pages_ = page->next();
175 }
176 if (page == exec_pages_tail_) {
177 exec_pages_tail_ = previous_page;
178 }
179}
180
181Page* PageSpace::AllocatePage(bool is_exec, bool link) {
182 {
183 MutexLocker ml(&pages_lock_);
184 if (!CanIncreaseCapacityInWordsLocked(kPageSizeInWords)) {
185 return nullptr;
186 }
188 }
189 uword flags = 0;
190 if (is_exec) {
192 }
193 if ((heap_ != nullptr) && (heap_->is_vm_isolate())) {
195 }
196 Page* page = Page::Allocate(kPageSize, flags);
197 if (page == nullptr) {
198 RELEASE_ASSERT(!FLAG_abort_on_oom);
200 return nullptr;
201 }
202
203 MutexLocker ml(&pages_lock_);
204 if (link) {
205 if (is_exec) {
206 AddExecPageLocked(page);
207 } else {
208 AddPageLocked(page);
209 }
210 }
211
212 page->set_object_end(page->memory_->end());
213 if (!is_exec && (heap_ != nullptr) && !heap_->is_vm_isolate()) {
214 page->AllocateForwardingPage();
215 }
216
217 if (is_exec) {
219 }
220 return page;
221}
222
223Page* PageSpace::AllocateLargePage(intptr_t size, bool is_exec) {
224 const intptr_t page_size_in_words = LargePageSizeInWordsFor(
225 size + (is_exec ? UnwindingRecordsPlatform::SizeInBytes() : 0));
226 {
227 MutexLocker ml(&pages_lock_);
228 if (!CanIncreaseCapacityInWordsLocked(page_size_in_words)) {
229 return nullptr;
230 }
231 IncreaseCapacityInWordsLocked(page_size_in_words);
232 }
234 if (is_exec) {
236 }
237 if ((heap_ != nullptr) && (heap_->is_vm_isolate())) {
239 }
240 Page* page = Page::Allocate(page_size_in_words << kWordSizeLog2, flags);
241
242 MutexLocker ml(&pages_lock_);
243 if (page == nullptr) {
244 IncreaseCapacityInWordsLocked(-page_size_in_words);
245 return nullptr;
246 } else {
247 intptr_t actual_size_in_words = page->memory_->size() >> kWordSizeLog2;
248 if (actual_size_in_words != page_size_in_words) {
249 IncreaseCapacityInWordsLocked(actual_size_in_words - page_size_in_words);
250 }
251 }
252 if (is_exec) {
253 AddExecPageLocked(page);
254 } else {
255 AddLargePageLocked(page);
256 }
257
258 if (is_exec) {
260 }
261
262 // Only one object in this page (at least until Array::MakeFixedLength
263 // is called).
264 page->set_object_end(page->object_start() + size);
265 return page;
266}
267
268void PageSpace::TruncateLargePage(Page* page,
269 intptr_t new_object_size_in_bytes) {
270 const intptr_t old_object_size_in_bytes =
271 page->object_end() - page->object_start();
272 ASSERT(new_object_size_in_bytes <= old_object_size_in_bytes);
273 ASSERT(!page->is_executable());
274 const intptr_t new_page_size_in_words =
275 LargePageSizeInWordsFor(new_object_size_in_bytes);
276 VirtualMemory* memory = page->memory_;
277 const intptr_t old_page_size_in_words = (memory->size() >> kWordSizeLog2);
278 if (new_page_size_in_words < old_page_size_in_words) {
279 memory->Truncate(new_page_size_in_words << kWordSizeLog2);
280 IncreaseCapacityInWords(new_page_size_in_words - old_page_size_in_words);
281 page->set_object_end(page->object_start() + new_object_size_in_bytes);
282 }
283}
284
285void PageSpace::FreePage(Page* page, Page* previous_page) {
286 bool is_exec = page->is_executable();
287 {
288 MutexLocker ml(&pages_lock_);
289 IncreaseCapacityInWordsLocked(-(page->memory_->size() >> kWordSizeLog2));
290 if (is_exec) {
291 RemoveExecPageLocked(page, previous_page);
292 } else {
293 RemovePageLocked(page, previous_page);
294 }
295 }
296 if (is_exec && !page->is_image()) {
298 }
299 page->Deallocate();
300}
301
302void PageSpace::FreeLargePage(Page* page, Page* previous_page) {
303 ASSERT(!page->is_executable());
304 MutexLocker ml(&pages_lock_);
305 IncreaseCapacityInWordsLocked(-(page->memory_->size() >> kWordSizeLog2));
306 RemoveLargePageLocked(page, previous_page);
307 page->Deallocate();
308}
309
310void PageSpace::FreePages(Page* pages) {
311 Page* page = pages;
312 while (page != nullptr) {
313 Page* next = page->next();
314 if (page->is_executable() && !page->is_image()) {
316 }
317 page->Deallocate();
318 page = next;
319 }
320}
321
322uword PageSpace::TryAllocateInFreshPage(intptr_t size,
323 FreeList* freelist,
324 bool is_exec,
325 GrowthPolicy growth_policy,
326 bool is_locked) {
328
329 if (growth_policy != kForceGrowth) {
330 ASSERT(!Thread::Current()->force_growth());
332 kPageSize);
333 }
334
335 uword result = 0;
336 SpaceUsage after_allocation = GetCurrentUsage();
337 after_allocation.used_in_words += size >> kWordSizeLog2;
338 // Can we grow by one page?
339 after_allocation.capacity_in_words += kPageSizeInWords;
340 if (growth_policy == kForceGrowth ||
341 !page_space_controller_.ReachedHardThreshold(after_allocation)) {
342 Page* page = AllocatePage(is_exec);
343 if (page == nullptr) {
344 return 0;
345 }
346 // Start of the newly allocated page is the allocated object.
347 result = page->object_start();
348 // Note: usage_.capacity_in_words is increased by AllocatePage.
349 usage_.used_in_words += (size >> kWordSizeLog2);
350 // Enqueue the remainder in the free list.
351 uword free_start = result + size;
352 intptr_t free_size = page->object_end() - free_start;
353 if (free_size > 0) {
354 if (is_locked) {
355 freelist->FreeLocked(free_start, free_size);
356 } else {
357 freelist->Free(free_start, free_size);
358 }
359 }
360 }
361 return result;
362}
363
364uword PageSpace::TryAllocateInFreshLargePage(intptr_t size,
365 bool is_exec,
366 GrowthPolicy growth_policy) {
368
369 if (growth_policy != kForceGrowth) {
370 ASSERT(!Thread::Current()->force_growth());
372 }
373
374 intptr_t page_size_in_words = LargePageSizeInWordsFor(size);
375 if ((page_size_in_words << kWordSizeLog2) < size) {
376 // On overflow we fail to allocate.
377 return 0;
378 }
379
380 uword result = 0;
381 SpaceUsage after_allocation = GetCurrentUsage();
382 after_allocation.used_in_words += size >> kWordSizeLog2;
383 after_allocation.capacity_in_words += page_size_in_words;
384 if (growth_policy == kForceGrowth ||
385 !page_space_controller_.ReachedHardThreshold(after_allocation)) {
386 Page* page = AllocateLargePage(size, is_exec);
387 if (page != nullptr) {
388 result = page->object_start();
389 // Note: usage_.capacity_in_words is increased by AllocateLargePage.
390 usage_.used_in_words += (size >> kWordSizeLog2);
391 }
392 }
393 return result;
394}
395
396uword PageSpace::TryAllocateInternal(intptr_t size,
397 FreeList* freelist,
398 bool is_exec,
399 GrowthPolicy growth_policy,
400 bool is_protected,
401 bool is_locked) {
402 ASSERT(size >= kObjectAlignment);
404 uword result = 0;
405 if (IsAllocatableViaFreeLists(size)) {
406 if (is_locked) {
407 result = freelist->TryAllocateLocked(size, is_protected);
408 } else {
409 result = freelist->TryAllocate(size, is_protected);
410 }
411 if (result == 0) {
412 result = TryAllocateInFreshPage(size, freelist, is_exec, growth_policy,
413 is_locked);
414 // usage_ is updated by the call above.
415 } else {
416 usage_.used_in_words += (size >> kWordSizeLog2);
417 }
418 } else {
419 result = TryAllocateInFreshLargePage(size, is_exec, growth_policy);
420 // usage_ is updated by the call above.
421 }
423 return result;
424}
425
427 freelist->mutex()->Lock();
428}
429
431 usage_.used_in_words +=
433 freelist->mutex()->Unlock();
434 usage_.used_in_words -= (freelist->ReleaseBumpAllocation() >> kWordSizeLog2);
435}
436
438 MonitorLocker ml(&tasks_lock_);
439 ASSERT(pause_concurrent_marking_.load() == 0);
440 pause_concurrent_marking_.store(1);
441 while (concurrent_marker_tasks_active_ != 0) {
442 ml.Wait();
443 }
444}
445
447 MonitorLocker ml(&tasks_lock_);
448 ASSERT(pause_concurrent_marking_.load() != 0);
449 pause_concurrent_marking_.store(0);
450 ml.NotifyAll();
451}
452
454 MonitorLocker ml(&tasks_lock_);
455 if (pause_concurrent_marking_.load() != 0) {
457 concurrent_marker_tasks_active_--;
458 if (concurrent_marker_tasks_active_ == 0) {
459 ml.NotifyAll();
460 }
461 while (pause_concurrent_marking_.load() != 0) {
462 ml.Wait();
463 }
464 concurrent_marker_tasks_active_++;
465 }
466}
467
469 public:
470 explicit BasePageIterator(const PageSpace* space) : space_(space) {}
471
472 Page* page() const { return page_; }
473
474 bool Done() const { return page_ == nullptr; }
475
476 void Advance() {
477 ASSERT(!Done());
478 page_ = page_->next();
479 if ((page_ == nullptr) && (list_ == kRegular)) {
481 page_ = space_->exec_pages_;
482 }
483 if ((page_ == nullptr) && (list_ == kExecutable)) {
484 list_ = kLarge;
485 page_ = space_->large_pages_;
486 }
487 if ((page_ == nullptr) && (list_ == kLarge)) {
488 list_ = kImage;
489 page_ = space_->image_pages_;
490 }
491 ASSERT((page_ != nullptr) || (list_ == kImage));
492 }
493
494 protected:
496
497 void Initialize() {
498 list_ = kRegular;
499 page_ = space_->pages_;
500 if (page_ == nullptr) {
502 page_ = space_->exec_pages_;
503 if (page_ == nullptr) {
504 list_ = kLarge;
505 page_ = space_->large_pages_;
506 if (page_ == nullptr) {
507 list_ = kImage;
508 page_ = space_->image_pages_;
509 }
510 }
511 }
512 }
513
514 const PageSpace* space_ = nullptr;
516 Page* page_ = nullptr;
517};
518
519// Provides unsafe access to all pages. Assumes pages are walkable.
521 public:
523 : BasePageIterator(space) {
524 Initialize();
525 }
526};
527
528// Provides exclusive access to all pages, and ensures they are walkable.
530 public:
531 explicit ExclusivePageIterator(const PageSpace* space)
532 : BasePageIterator(space), ml_(&space->pages_lock_) {
533 space_->MakeIterable();
534 Initialize();
535 }
536
537 private:
538 MutexLocker ml_;
539 NoSafepointScope no_safepoint;
540};
541
542// Provides exclusive access to code pages, and ensures they are walkable.
543// NOTE: This does not iterate over large pages which can contain code.
545 public:
547 : space_(space), ml_(&space->pages_lock_) {
548 space_->MakeIterable();
549 page_ = space_->exec_pages_;
550 }
551 Page* page() const { return page_; }
552 bool Done() const { return page_ == nullptr; }
553 void Advance() {
554 ASSERT(!Done());
555 page_ = page_->next();
556 }
557
558 private:
559 const PageSpace* space_;
560 MutexLocker ml_;
561 NoSafepointScope no_safepoint;
562 Page* page_;
563};
564
565void PageSpace::MakeIterable() const {
566 // Assert not called from concurrent sweeper task.
567 // TODO(koda): Use thread/task identity when implemented.
568 ASSERT(IsolateGroup::Current()->heap() != nullptr);
569 for (intptr_t i = 0; i < num_freelists_; i++) {
570 freelists_[i].MakeIterable();
571 }
572}
573
575 for (intptr_t i = 0; i < num_freelists_; i++) {
576 size_t leftover = freelists_[i].ReleaseBumpAllocation();
577 usage_.used_in_words -= (leftover >> kWordSizeLog2);
578 }
579}
580
582 delete marker_;
583 marker_ = nullptr;
584}
585
587 ASSERT(heap_ != nullptr);
588 ASSERT(heap_->isolate_group() != nullptr);
589 auto isolate_group = heap_->isolate_group();
590 isolate_group->GetHeapOldCapacityMaxMetric()->SetValue(
591 static_cast<int64_t>(usage_.capacity_in_words) * kWordSize);
592}
593
595 ASSERT(heap_ != nullptr);
596 ASSERT(heap_->isolate_group() != nullptr);
597 auto isolate_group = heap_->isolate_group();
598 isolate_group->GetHeapOldUsedMaxMetric()->SetValue(UsedInWords() * kWordSize);
599}
600
601bool PageSpace::Contains(uword addr) const {
602 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) {
603 if (it.page()->Contains(addr)) {
604 return true;
605 }
606 }
607 return false;
608}
609
611 for (UnsafeExclusivePageIterator it(this); !it.Done(); it.Advance()) {
612 if (it.page()->Contains(addr)) {
613 return true;
614 }
615 }
616 return false;
617}
618
620 for (ExclusiveCodePageIterator it(this); !it.Done(); it.Advance()) {
621 if (it.page()->Contains(addr)) {
622 return true;
623 }
624 }
625 return false;
626}
627
629 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) {
630 if (!it.page()->is_executable() && it.page()->Contains(addr)) {
631 return true;
632 }
633 }
634 return false;
635}
636
638 ASSERT((pages_ != nullptr) || (exec_pages_ != nullptr) ||
639 (large_pages_ != nullptr));
640 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) {
641 set->AddRegion(it.page()->object_start(), it.page()->object_end());
642 }
643}
644
646 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) {
647 it.page()->VisitObjects(visitor);
648 }
649}
650
652 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) {
653 if (!it.page()->is_image()) {
654 it.page()->VisitObjects(visitor);
655 }
656 }
657}
658
660 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) {
661 if (it.page()->is_image()) {
662 it.page()->VisitObjects(visitor);
663 }
664 }
665}
666
668 for (UnsafeExclusivePageIterator it(this); !it.Done(); it.Advance()) {
669 it.page()->VisitObjectsUnsafe(visitor);
670 }
671}
672
674 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) {
675 it.page()->VisitObjectPointers(visitor);
676 }
677}
678
680 ASSERT(Thread::Current()->OwnsGCSafepoint() ||
681 (Thread::Current()->task_kind() == Thread::kScavengerTask));
682
683 // Wait for the sweeper to finish mutating the large page list.
684 {
686 while (phase() == kSweepingLarge) {
687 ml.Wait(); // No safepoint check.
688 }
689 }
690
691 // Large pages may be added concurrently due to promotion in another scavenge
692 // worker, so terminate the traversal when we hit the tail we saw while
693 // holding the pages lock, instead of at nullptr, otherwise we are racing when
694 // we read Page::next_ and Page::remembered_cards_.
695 Page* page;
696 Page* tail;
697 {
698 MutexLocker ml(&pages_lock_);
699 page = large_pages_;
700 tail = large_pages_tail_;
701 }
702 while (page != nullptr) {
703 page->VisitRememberedCards(visitor);
704 if (page == tail) break;
705 page = page->next();
706 }
707}
708
710 for (Page* page = large_pages_; page != nullptr; page = page->next()) {
711 page->ResetProgressBar();
712 }
713}
714
715void PageSpace::WriteProtect(bool read_only) {
716 if (read_only) {
717 // Avoid MakeIterable trying to write to the heap.
719 }
720 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) {
721 if (!it.page()->is_image()) {
722 it.page()->WriteProtect(read_only);
723 }
724 }
725}
726
727#ifndef PRODUCT
729 auto isolate_group = IsolateGroup::Current();
730 ASSERT(isolate_group != nullptr);
731 JSONObject space(object, "old");
732 space.AddProperty("type", "HeapSpace");
733 space.AddProperty("name", "old");
734 space.AddProperty("vmName", "PageSpace");
735 space.AddProperty("collections", collections());
736 space.AddProperty64("used", UsedInWords() * kWordSize);
737 space.AddProperty64("capacity", CapacityInWords() * kWordSize);
738 space.AddProperty64("external", ExternalInWords() * kWordSize);
740 if (collections() > 0) {
741 int64_t run_time = isolate_group->UptimeMicros();
742 run_time = Utils::Maximum(run_time, static_cast<int64_t>(0));
743 double run_time_millis = MicrosecondsToMilliseconds(run_time);
744 double avg_time_between_collections =
745 run_time_millis / static_cast<double>(collections());
746 space.AddProperty("avgCollectionPeriodMillis",
747 avg_time_between_collections);
748 } else {
749 space.AddProperty("avgCollectionPeriodMillis", 0.0);
750 }
751}
752
754 public:
755 explicit HeapMapAsJSONVisitor(JSONArray* array) : array_(array) {}
756 void VisitObject(ObjectPtr obj) override {
757 array_->AddValue(obj->untag()->HeapSize() / kObjectAlignment);
758 array_->AddValue(obj->GetClassId());
759 }
760
761 private:
762 JSONArray* array_;
763};
764
766 JSONStream* stream) const {
767 JSONObject heap_map(stream);
768 heap_map.AddProperty("type", "HeapMap");
769 heap_map.AddProperty("freeClassId", static_cast<intptr_t>(kFreeListElement));
770 heap_map.AddProperty("unitSizeBytes",
771 static_cast<intptr_t>(kObjectAlignment));
772 heap_map.AddProperty("pageSizeBytes", kPageSizeInWords * kWordSize);
773 {
774 JSONObject class_list(&heap_map, "classList");
775 isolate_group->class_table()->PrintToJSONObject(&class_list);
776 }
777 {
778 // "pages" is an array [page0, page1, ..., pageN], each page of the form
779 // {"object_start": "0x...", "objects": [size, class id, size, ...]}
780 // TODO(19445): Use ExclusivePageIterator once HeapMap supports large pages.
782 MutexLocker ml(&pages_lock_);
783 MakeIterable();
784 JSONArray all_pages(&heap_map, "pages");
785 for (Page* page = pages_; page != nullptr; page = page->next()) {
786 JSONObject page_container(&all_pages);
787 page_container.AddPropertyF("objectStart", "0x%" Px "",
788 page->object_start());
789 JSONArray page_map(&page_container, "objects");
790 HeapMapAsJSONVisitor printer(&page_map);
791 page->VisitObjects(&printer);
792 }
793 for (Page* page = exec_pages_; page != nullptr; page = page->next()) {
794 JSONObject page_container(&all_pages);
795 page_container.AddPropertyF("objectStart", "0x%" Px "",
796 page->object_start());
797 JSONArray page_map(&page_container, "objects");
798 HeapMapAsJSONVisitor printer(&page_map);
799 page->VisitObjects(&printer);
800 }
801 }
802}
803#endif // PRODUCT
804
805void PageSpace::WriteProtectCode(bool read_only) {
806 if (FLAG_write_protect_code) {
807 MutexLocker ml(&pages_lock_);
808 NoSafepointScope no_safepoint;
809 // No need to go through all of the data pages first.
810 Page* page = exec_pages_;
811 while (page != nullptr) {
812 ASSERT(page->is_executable());
813 page->WriteProtect(read_only);
814 page = page->next();
815 }
816 page = large_pages_;
817 while (page != nullptr) {
818 if (page->is_executable()) {
819 page->WriteProtect(read_only);
820 }
821 page = page->next();
822 }
823 }
824}
825
827 // To make a consistent decision, we should not yield for a safepoint in the
828 // middle of deciding whether to perform an idle GC.
829 NoSafepointScope no_safepoint;
830
831 if (!page_space_controller_.ReachedIdleThreshold(usage_)) {
832 return false;
833 }
834
835 {
836 MonitorLocker locker(tasks_lock());
837 if (tasks() > 0) {
838 // A concurrent sweeper is running. If we start a mark sweep now
839 // we'll have to wait for it, and this wait time is not included in
840 // mark_words_per_micro_.
841 return false;
842 }
843 }
844
845 // This uses the size of new-space because the pause time to start concurrent
846 // marking is related to the size of the root set, which is mostly new-space.
847 int64_t estimated_mark_completion =
849 heap_->new_space()->UsedInWords() / mark_words_per_micro_;
850 return estimated_mark_completion <= deadline;
851}
852
854 // To make a consistent decision, we should not yield for a safepoint in the
855 // middle of deciding whether to perform an idle GC.
856 NoSafepointScope no_safepoint;
857
858 // Discount two pages to account for the newest data and code pages, whose
859 // partial use doesn't indicate fragmentation.
860 const intptr_t excess_in_words =
862 const double excess_ratio = static_cast<double>(excess_in_words) /
863 static_cast<double>(usage_.capacity_in_words);
864 const bool fragmented = excess_ratio > 0.05;
865
866 if (!fragmented && !page_space_controller_.ReachedIdleThreshold(usage_)) {
867 return false;
868 }
869
870 {
871 MonitorLocker locker(tasks_lock());
872 if (tasks() > 0) {
873 // A concurrent sweeper is running. If we start a mark sweep now
874 // we'll have to wait for it, and this wait time is not included in
875 // mark_words_per_micro_.
876 return false;
877 }
878 }
879
880 // Assuming compaction takes as long as marking.
881 intptr_t mark_compact_words_per_micro = mark_words_per_micro_ / 2;
882 if (mark_compact_words_per_micro == 0) {
883 mark_compact_words_per_micro = 1; // Prevent division by zero.
884 }
885
886 int64_t estimated_mark_compact_completion =
888 UsedInWords() / mark_compact_words_per_micro;
889 return estimated_mark_compact_completion <= deadline;
890}
891
893 if (marker_ != nullptr) {
894 marker_->IncrementalMarkWithSizeBudget(this, size);
895 }
896}
897
899 if (marker_ != nullptr) {
900 marker_->IncrementalMarkWithTimeBudget(this, deadline);
901 }
902}
903
905 if (phase() == PageSpace::kMarking) {
906 ml->Exit();
908 ml->Enter();
909 }
910 if ((phase() == kSweepingLarge) || (phase() == kSweepingRegular)) {
911 ml->Exit();
912 Sweep(/*exclusive*/ false);
913 SweepLarge();
914 ml->Enter();
915 }
916}
917
921 if (oom_reservation_ == nullptr) return;
922 uword addr = reinterpret_cast<uword>(oom_reservation_);
923 intptr_t size = oom_reservation_->HeapSize();
924 oom_reservation_ = nullptr;
925 freelists_[kDataFreelist].Free(addr, size);
926}
927
929 if (oom_reservation_ == nullptr) {
930 return false;
931 }
932 UntaggedObject* ptr = reinterpret_cast<UntaggedObject*>(oom_reservation_);
933 if (!ptr->IsMarked()) {
934 ptr->SetMarkBit();
935 }
936 return true;
937}
938
940 if (oom_reservation_ == nullptr) {
941 uword addr = TryAllocate(kOOMReservationSize, /*exec*/ false,
942 kForceGrowth /* Don't re-enter GC */);
943 if (addr != 0) {
944 oom_reservation_ = FreeListElement::AsElement(addr, kOOMReservationSize);
945 }
946 }
947}
948
950 if (oom_reservation_ != nullptr) {
951 // FreeListElements are generally held untagged, but ObjectPointerVisitors
952 // expect tagged pointers.
953 ObjectPtr ptr =
954 UntaggedObject::FromAddr(reinterpret_cast<uword>(oom_reservation_));
955 visitor->VisitPointer(&ptr);
956 oom_reservation_ =
957 reinterpret_cast<FreeListElement*>(UntaggedObject::ToAddr(ptr));
958 }
959}
960
961void PageSpace::CollectGarbage(Thread* thread, bool compact, bool finalize) {
962 ASSERT(!Thread::Current()->force_growth());
963
964 if (!finalize) {
965 if (!enable_concurrent_mark()) return; // Disabled.
966 if (FLAG_marker_tasks == 0) return; // Disabled.
967 }
968
969 GcSafepointOperationScope safepoint_scope(thread);
970
971 // Wait for pending tasks to complete and then account for the driver task.
972 {
973 MonitorLocker locker(tasks_lock());
974 if (!finalize &&
975 (phase() == kMarking || phase() == kAwaitingFinalization)) {
976 // Concurrent mark is already running.
977 return;
978 }
979
980 AssistTasks(&locker);
981 while (tasks() > 0) {
982 locker.Wait();
983 }
985 set_tasks(1);
986 }
987
988 // Ensure that all threads for this isolate are at a safepoint (either
989 // stopped or in native code). We have guards around Newgen GC and oldgen GC
990 // to ensure that if two threads are racing to collect at the same time the
991 // loser skips collection and goes straight to allocation.
992 CollectGarbageHelper(thread, compact, finalize);
993
994 // Done, reset the task count.
995 {
997 set_tasks(tasks() - 1);
998 ml.NotifyAll();
999 }
1000}
1001
1002void PageSpace::CollectGarbageHelper(Thread* thread,
1003 bool compact,
1004 bool finalize) {
1005 ASSERT(thread->OwnsGCSafepoint());
1006 auto isolate_group = heap_->isolate_group();
1007 ASSERT(isolate_group == IsolateGroup::Current());
1008
1009 const int64_t start = OS::GetCurrentMonotonicMicros();
1010
1011 // Perform various cleanup that relies on no tasks interfering.
1012 isolate_group->class_table_allocator()->FreePending();
1013 isolate_group->ForEachIsolate(
1014 [&](Isolate* isolate) { isolate->field_table()->FreeOldTables(); },
1015 /*at_safepoint=*/true);
1016
1017 NoSafepointScope no_safepoints(thread);
1018
1019 if (FLAG_print_free_list_before_gc) {
1020 for (intptr_t i = 0; i < num_freelists_; i++) {
1021 OS::PrintErr("Before GC: Freelist %" Pd "\n", i);
1022 freelists_[i].Print();
1023 }
1024 }
1025
1026 if (FLAG_verify_before_gc) {
1027 heap_->VerifyGC("Verifying before marking",
1029 }
1030
1031 // Make code pages writable.
1032 if (finalize) WriteProtectCode(false);
1033
1034 // Save old value before GCMarker visits the weak persistent handles.
1035 SpaceUsage usage_before = GetCurrentUsage();
1036
1037 // Mark all reachable old-gen objects.
1038 if (marker_ == nullptr) {
1039 ASSERT(phase() == kDone);
1040 marker_ = new GCMarker(isolate_group, heap_);
1041 } else {
1043 }
1044
1045 if (!finalize) {
1046 ASSERT(phase() == kDone);
1047 marker_->StartConcurrentMark(this);
1048 return;
1049 }
1050
1051 // Abandon the remainder of the bump allocation block.
1053
1054 marker_->MarkObjects(this);
1055 usage_.used_in_words = marker_->marked_words() + allocated_black_in_words_;
1056 allocated_black_in_words_ = 0;
1057 mark_words_per_micro_ = marker_->MarkedWordsPerMicro();
1058 delete marker_;
1059 marker_ = nullptr;
1060
1061 // Reset the freelists and setup sweeping.
1062 for (intptr_t i = 0; i < num_freelists_; i++) {
1063 freelists_[i].Reset();
1064 }
1065
1066 if (FLAG_verify_before_gc) {
1067 heap_->VerifyGC("Verifying before sweeping", kAllowMarked);
1068 }
1069
1070 {
1071 // Executable pages are always swept immediately to simplify
1072 // code protection.
1073 TIMELINE_FUNCTION_GC_DURATION(thread, "SweepExecutable");
1074 GCSweeper sweeper;
1075 Page* prev_page = nullptr;
1076 Page* page = exec_pages_;
1077 FreeList* freelist = &freelists_[kExecutableFreelist];
1078 MutexLocker ml(freelist->mutex());
1079 while (page != nullptr) {
1080 Page* next_page = page->next();
1081 bool page_in_use = sweeper.SweepPage(page, freelist);
1082 if (page_in_use) {
1083 prev_page = page;
1084 } else {
1085 FreePage(page, prev_page);
1086 }
1087 // Advance to the next page.
1088 page = next_page;
1089 }
1090 }
1091
1092 bool has_reservation = MarkReservation();
1093
1094 {
1095 // Move pages to sweeper work lists.
1096 MutexLocker ml(&pages_lock_);
1097 ASSERT(sweep_large_ == nullptr);
1098 sweep_large_ = large_pages_;
1099 large_pages_ = large_pages_tail_ = nullptr;
1100 ASSERT(sweep_regular_ == nullptr);
1101 if (!compact) {
1102 sweep_regular_ = pages_;
1103 pages_ = pages_tail_ = nullptr;
1104 }
1105 }
1106
1107 bool can_verify;
1108 SweepNew();
1109 if (compact) {
1110 Compact(thread);
1112 can_verify = true;
1113 } else if (FLAG_concurrent_sweep && has_reservation) {
1114 ConcurrentSweep(isolate_group);
1115 can_verify = false;
1116 } else {
1117 SweepLarge();
1118 Sweep(/*exclusive*/ true);
1120 can_verify = true;
1121 }
1122
1123 if (FLAG_verify_after_gc && can_verify) {
1124 heap_->VerifyGC("Verifying after sweeping", kForbidMarked);
1125 }
1126
1128
1129 // Make code pages read-only.
1130 if (finalize) WriteProtectCode(true);
1131
1133
1134 // Record signals for growth control. Include size of external allocations.
1135 page_space_controller_.EvaluateGarbageCollection(
1136 usage_before, GetCurrentUsage(), start, end);
1137
1138 if (FLAG_print_free_list_after_gc) {
1139 for (intptr_t i = 0; i < num_freelists_; i++) {
1140 OS::PrintErr("After GC: Freelist %" Pd "\n", i);
1141 freelists_[i].Print();
1142 }
1143 }
1144
1145 UpdateMaxUsed();
1146 if (heap_ != nullptr) {
1147 heap_->UpdateGlobalMaxUsed();
1148 }
1149}
1150
1151void PageSpace::SweepNew() {
1152 // TODO(rmacnak): Run in parallel with SweepExecutable.
1154
1155 GCSweeper sweeper;
1156 intptr_t free = 0;
1157 for (Page* page = heap_->new_space()->head(); page != nullptr;
1158 page = page->next()) {
1159 page->Release();
1160 free += sweeper.SweepNewPage(page);
1161 }
1162 heap_->new_space()->set_freed_in_words(free >> kWordSizeLog2);
1163}
1164
1165void PageSpace::SweepLarge() {
1167
1168 GCSweeper sweeper;
1169 MutexLocker ml(&pages_lock_);
1170 while (sweep_large_ != nullptr) {
1171 Page* page = sweep_large_;
1172 sweep_large_ = page->next();
1173 page->set_next(nullptr);
1174 ASSERT(!page->is_executable());
1175
1176 ml.Unlock();
1177 intptr_t words_to_end = sweeper.SweepLargePage(page);
1178 intptr_t size;
1179 if (words_to_end == 0) {
1180 size = page->memory_->size();
1181 page->Deallocate();
1182 ml.Lock();
1184 } else {
1185 TruncateLargePage(page, words_to_end << kWordSizeLog2);
1186 ml.Lock();
1187 AddLargePageLocked(page);
1188 }
1189 }
1190}
1191
1192void PageSpace::Sweep(bool exclusive) {
1194
1195 GCSweeper sweeper;
1196
1197 intptr_t shard = 0;
1198 const intptr_t num_shards = Utils::Maximum(FLAG_scavenger_tasks, 1);
1199 if (exclusive) {
1200 for (intptr_t i = 0; i < num_shards; i++) {
1201 DataFreeList(i)->mutex()->Lock();
1202 }
1203 }
1204
1205 MutexLocker ml(&pages_lock_);
1206 while (sweep_regular_ != nullptr) {
1207 Page* page = sweep_regular_;
1208 sweep_regular_ = page->next();
1209 page->set_next(nullptr);
1210 ASSERT(!page->is_executable());
1211
1212 ml.Unlock();
1213 // Cycle through the shards round-robin so that free space is roughly
1214 // evenly distributed among the freelists and so roughly evenly available
1215 // to each scavenger worker.
1216 shard = (shard + 1) % num_shards;
1217 FreeList* freelist = DataFreeList(shard);
1218 if (!exclusive) {
1219 freelist->mutex()->Lock();
1220 }
1221 bool page_in_use = sweeper.SweepPage(page, freelist);
1222 if (!exclusive) {
1223 freelist->mutex()->Unlock();
1224 }
1225 intptr_t size;
1226 if (!page_in_use) {
1227 size = page->memory_->size();
1228 page->Deallocate();
1229 }
1230 ml.Lock();
1231
1232 if (page_in_use) {
1233 AddPageLocked(page);
1234 } else {
1236 }
1237 }
1238
1239 if (exclusive) {
1240 for (intptr_t i = 0; i < num_shards; i++) {
1241 DataFreeList(i)->mutex()->Unlock();
1242 }
1243 }
1244}
1245
1246void PageSpace::ConcurrentSweep(IsolateGroup* isolate_group) {
1247 // Start the concurrent sweeper task now.
1248 GCSweeper::SweepConcurrent(isolate_group);
1249}
1250
1251void PageSpace::Compact(Thread* thread) {
1252 GCCompactor compactor(thread, heap_);
1253 compactor.Compact(pages_, &freelists_[kDataFreelist], &pages_lock_);
1254
1255 if (FLAG_verify_after_gc) {
1256 heap_->VerifyGC("Verifying after compacting", kForbidMarked);
1257 }
1258}
1259
1260uword PageSpace::TryAllocateDataBumpLocked(FreeList* freelist, intptr_t size) {
1261 ASSERT(size >= kObjectAlignment);
1263
1264 if (!IsAllocatableViaFreeLists(size)) {
1265 return TryAllocateDataLocked(freelist, size, kForceGrowth);
1266 }
1267
1268 intptr_t remaining = freelist->end() - freelist->top();
1269 if (UNLIKELY(remaining < size)) {
1270 FreeListElement* block = freelist->TryAllocateLargeLocked(size);
1271 if (block == nullptr) {
1272 // Allocating from a new page (if growth policy allows) will have the
1273 // side-effect of populating the freelist with a large block. The next
1274 // bump allocation request will have a chance to consume that block.
1275 return TryAllocateInFreshPage(size, freelist, false /* exec */,
1276 kForceGrowth, true /* is_locked*/);
1277 }
1278 intptr_t block_size = block->HeapSize();
1279 if (remaining > 0) {
1280 usage_.used_in_words -= (remaining >> kWordSizeLog2);
1281 freelist->FreeLocked(freelist->top(), remaining);
1282 }
1283 freelist->set_top(reinterpret_cast<uword>(block));
1284 freelist->set_end(freelist->top() + block_size);
1285 // To avoid accounting overhead during each bump pointer allocation, we add
1286 // the size of the whole bump area here and subtract the remaining size
1287 // when switching to a new area.
1288 usage_.used_in_words += (block_size >> kWordSizeLog2);
1289 remaining = block_size;
1290 }
1291 ASSERT(remaining >= size);
1292 uword result = freelist->top();
1293 freelist->set_top(result + size);
1294
1295// Note: Remaining block is unwalkable until MakeIterable is called.
1296#ifdef DEBUG
1297 if (freelist->top() < freelist->end()) {
1298 // Fail fast if we try to walk the remaining block.
1300 *reinterpret_cast<uword*>(freelist->top()) = 0;
1301 }
1302#endif // DEBUG
1303 return result;
1304}
1305
1306uword PageSpace::TryAllocatePromoLockedSlow(FreeList* freelist, intptr_t size) {
1307 uword result = freelist->TryAllocateSmallLocked(size);
1308 if (result != 0) {
1309 freelist->AddUnaccountedSize(size);
1310 return result;
1311 }
1312 return TryAllocateDataBumpLocked(freelist, size);
1313}
1314
1315uword PageSpace::AllocateSnapshotLockedSlow(FreeList* freelist, intptr_t size) {
1316 uword result = TryAllocateDataBumpLocked(freelist, size);
1317 if (result != 0) {
1318 return result;
1319 }
1320 OUT_OF_MEMORY();
1321}
1322
1323void PageSpace::SetupImagePage(void* pointer, uword size, bool is_executable) {
1324 // Setup a Page so precompiled Instructions can be traversed.
1325 // Instructions are contiguous at [pointer, pointer + size). Page
1326 // expects to find objects at [memory->start() + ObjectStartOffset,
1327 // memory->end()).
1329 pointer = reinterpret_cast<void*>(reinterpret_cast<uword>(pointer) - offset);
1331 size += offset;
1332
1333 VirtualMemory* memory = VirtualMemory::ForImagePage(pointer, size);
1334 ASSERT(memory != nullptr);
1335 Page* page = reinterpret_cast<Page*>(malloc(sizeof(Page)));
1337 if (is_executable) {
1339 }
1340 page->flags_ = flags;
1341 page->memory_ = memory;
1342 page->next_ = nullptr;
1343 page->forwarding_page_ = nullptr;
1344 page->card_table_ = nullptr;
1345 page->progress_bar_ = 0;
1346 page->owner_ = nullptr;
1347 page->top_ = memory->end();
1348 page->end_ = memory->end();
1349 page->survivor_end_ = 0;
1350 page->resolved_top_ = 0;
1351
1352 MutexLocker ml(&pages_lock_);
1353 page->next_ = image_pages_;
1354 image_pages_ = page;
1355}
1356
1358 uword object_addr = UntaggedObject::ToAddr(object);
1359 Page* image_page = image_pages_;
1360 while (image_page != nullptr) {
1361 if (image_page->Contains(object_addr)) {
1362 return true;
1363 }
1364 image_page = image_page->next();
1365 }
1366 return false;
1367}
1368
1370 int heap_growth_ratio,
1371 int heap_growth_max,
1372 int garbage_collection_time_ratio)
1373 : heap_(heap),
1374 heap_growth_ratio_(heap_growth_ratio),
1375 desired_utilization_((100.0 - heap_growth_ratio) / 100.0),
1376 heap_growth_max_(heap_growth_max),
1377 garbage_collection_time_ratio_(garbage_collection_time_ratio),
1378 idle_gc_threshold_in_words_(0) {
1379 const intptr_t growth_in_pages = heap_growth_max / 2;
1380 RecordUpdate(last_usage_, last_usage_, growth_in_pages, "initial");
1381}
1382
1384
1386 if (heap_growth_ratio_ == 100) {
1387 return false;
1388 }
1389 if ((heap_ != nullptr) && (heap_->mode() == Dart_PerformanceMode_Latency)) {
1390 return false;
1391 }
1392 return after.CombinedUsedInWords() > hard_gc_threshold_in_words_;
1393}
1394
1396 if (heap_growth_ratio_ == 100) {
1397 return false;
1398 }
1399 if ((heap_ != nullptr) && (heap_->mode() == Dart_PerformanceMode_Latency)) {
1400 return false;
1401 }
1402 return after.CombinedUsedInWords() > soft_gc_threshold_in_words_;
1403}
1404
1406 if (heap_growth_ratio_ == 100) {
1407 return false;
1408 }
1409 return current.CombinedUsedInWords() > idle_gc_threshold_in_words_;
1410}
1411
1413 SpaceUsage after,
1414 int64_t start,
1415 int64_t end) {
1416 ASSERT(end >= start);
1418 const int gc_time_fraction = history_.GarbageCollectionTimeFraction();
1419
1420 // Assume garbage increases linearly with allocation:
1421 // G = kA, and estimate k from the previous cycle.
1422 const intptr_t allocated_since_previous_gc =
1423 before.CombinedUsedInWords() - last_usage_.CombinedUsedInWords();
1424 intptr_t grow_heap;
1425 if (allocated_since_previous_gc > 0) {
1426 intptr_t garbage =
1427 before.CombinedUsedInWords() - after.CombinedUsedInWords();
1428 // Garbage may be negative if when the OOM reservation is refilled.
1429 garbage = Utils::Maximum(static_cast<intptr_t>(0), garbage);
1430 // It makes no sense to expect that each kb allocated will cause more than
1431 // one kb of garbage, so we clamp k at 1.0.
1432 const double k = Utils::Minimum(
1433 1.0, garbage / static_cast<double>(allocated_since_previous_gc));
1434
1435 const int garbage_ratio = static_cast<int>(k * 100);
1436
1437 // Define GC to be 'worthwhile' iff at least fraction t of heap is garbage.
1438 double t = 1.0 - desired_utilization_;
1439 // If we spend too much time in GC, strive for even more free space.
1440 if (gc_time_fraction > garbage_collection_time_ratio_) {
1441 t += (gc_time_fraction - garbage_collection_time_ratio_) / 100.0;
1442 }
1443
1444 // Number of pages we can allocate and still be within the desired growth
1445 // ratio.
1446 const intptr_t grow_pages =
1447 (static_cast<intptr_t>(after.CombinedUsedInWords() /
1448 desired_utilization_) -
1449 (after.CombinedUsedInWords())) /
1451 if (garbage_ratio == 0) {
1452 // No garbage in the previous cycle so it would be hard to compute a
1453 // grow_heap size based on estimated garbage so we use growth ratio
1454 // heuristics instead.
1455 grow_heap =
1456 Utils::Maximum(static_cast<intptr_t>(heap_growth_max_), grow_pages);
1457 } else if (garbage_collection_time_ratio_ == 0) {
1458 // Exclude time from the growth policy decision for --deterministic.
1459 grow_heap =
1460 Utils::Maximum(static_cast<intptr_t>(heap_growth_max_), grow_pages);
1461 } else {
1462 // Find minimum 'grow_heap' such that after increasing capacity by
1463 // 'grow_heap' pages and filling them, we expect a GC to be worthwhile.
1464 intptr_t max = heap_growth_max_;
1465 intptr_t min = 0;
1466 intptr_t local_grow_heap = 0;
1467 while (min < max) {
1468 local_grow_heap = (max + min) / 2;
1469 const intptr_t limit =
1470 after.CombinedUsedInWords() + (local_grow_heap * kPageSizeInWords);
1471 const intptr_t allocated_before_next_gc =
1472 limit - (after.CombinedUsedInWords());
1473 const double estimated_garbage = k * allocated_before_next_gc;
1474 if (t <= estimated_garbage / limit) {
1475 max = local_grow_heap - 1;
1476 } else {
1477 min = local_grow_heap + 1;
1478 }
1479 }
1480 local_grow_heap = (max + min) / 2;
1481 grow_heap = local_grow_heap;
1482 ASSERT(grow_heap >= 0);
1483 // If we are going to grow by heap_grow_max_ then ensure that we
1484 // will be growing the heap at least by the growth ratio heuristics.
1485 if (grow_heap >= heap_growth_max_) {
1486 grow_heap = Utils::Maximum(grow_pages, grow_heap);
1487 }
1488 }
1489 } else {
1490 grow_heap = 0;
1491 }
1492 last_usage_ = after;
1493
1494 intptr_t max_capacity_in_words = heap_->old_space()->max_capacity_in_words_;
1495 if (max_capacity_in_words != 0) {
1496 ASSERT(grow_heap >= 0);
1497 // Fraction of asymptote used.
1498 double f = static_cast<double>(after.CombinedUsedInWords() +
1499 (kPageSizeInWords * grow_heap)) /
1500 static_cast<double>(max_capacity_in_words);
1501 ASSERT(f >= 0.0);
1502 // Increase weight at the high end.
1503 f = f * f;
1504 // Fraction of asymptote available.
1505 f = 1.0 - f;
1506 ASSERT(f <= 1.0);
1507 // Discount growth more the closer we get to the desired asymptote.
1508 grow_heap = static_cast<intptr_t>(grow_heap * f);
1509 // Minimum growth step after reaching the asymptote.
1510 intptr_t min_step = (2 * MB) / kPageSize;
1511 grow_heap = Utils::Maximum(min_step, grow_heap);
1512 }
1513
1514 RecordUpdate(before, after, grow_heap, "gc");
1515}
1516
1518 // Number of pages we can allocate and still be within the desired growth
1519 // ratio.
1520 intptr_t growth_in_pages;
1521 if (desired_utilization_ == 0.0) {
1522 growth_in_pages = heap_growth_max_;
1523 } else {
1524 growth_in_pages = (static_cast<intptr_t>(after.CombinedUsedInWords() /
1525 desired_utilization_) -
1526 (after.CombinedUsedInWords())) /
1528 }
1529
1530 // Apply growth cap.
1531 growth_in_pages =
1532 Utils::Minimum(static_cast<intptr_t>(heap_growth_max_), growth_in_pages);
1533
1534 RecordUpdate(after, after, growth_in_pages, "loaded");
1535}
1536
1537void PageSpaceController::RecordUpdate(SpaceUsage before,
1538 SpaceUsage after,
1539 intptr_t growth_in_pages,
1540 const char* reason) {
1541 // Save final threshold compared before growing.
1542 intptr_t threshold =
1543 after.CombinedUsedInWords() + (kPageSizeInWords * growth_in_pages);
1544
1545 bool concurrent_mark = FLAG_concurrent_mark && (FLAG_marker_tasks != 0);
1546 if (concurrent_mark) {
1547 soft_gc_threshold_in_words_ = threshold;
1548 hard_gc_threshold_in_words_ = kIntptrMax / kWordSize;
1549 } else {
1550 soft_gc_threshold_in_words_ = kIntptrMax / kWordSize;
1551 hard_gc_threshold_in_words_ = threshold;
1552 }
1553
1554 // Set a tight idle threshold.
1555 idle_gc_threshold_in_words_ =
1556 after.CombinedUsedInWords() + (2 * kPageSizeInWords);
1557
1558#if defined(SUPPORT_TIMELINE)
1559 Thread* thread = Thread::Current();
1560 if (thread != nullptr) {
1561 TIMELINE_FUNCTION_GC_DURATION(thread, "UpdateGrowthLimit");
1562 tbes.SetNumArguments(6);
1563 tbes.CopyArgument(0, "Reason", reason);
1564 tbes.FormatArgument(1, "Before.CombinedUsed (kB)", "%" Pd "",
1566 tbes.FormatArgument(2, "After.CombinedUsed (kB)", "%" Pd "",
1568 tbes.FormatArgument(3, "Hard Threshold (kB)", "%" Pd "",
1569 RoundWordsToKB(hard_gc_threshold_in_words_));
1570 tbes.FormatArgument(4, "Soft Threshold (kB)", "%" Pd "",
1571 RoundWordsToKB(soft_gc_threshold_in_words_));
1572 tbes.FormatArgument(5, "Idle Threshold (kB)", "%" Pd "",
1573 RoundWordsToKB(idle_gc_threshold_in_words_));
1574 }
1575#endif
1576
1577 if (FLAG_log_growth || FLAG_verbose_gc) {
1578 THR_Print("%s: hard_threshold=%" Pd "MB, soft_threshold=%" Pd
1579 "MB, idle_threshold=%" Pd "MB, reason=%s\n",
1580 heap_->isolate_group()->source()->name,
1581 RoundWordsToMB(hard_gc_threshold_in_words_),
1582 RoundWordsToMB(soft_gc_threshold_in_words_),
1583 RoundWordsToMB(idle_gc_threshold_in_words_), reason);
1584 }
1585}
1586
1588 int64_t end) {
1589 Entry entry;
1590 entry.start = start;
1591 entry.end = end;
1592 history_.Add(entry);
1593}
1594
1596 int64_t gc_time = 0;
1597 int64_t total_time = 0;
1598 for (int i = 0; i < history_.Size() - 1; i++) {
1599 Entry current = history_.Get(i);
1600 Entry previous = history_.Get(i + 1);
1601 gc_time += current.end - current.start;
1602 total_time += current.end - previous.end;
1603 }
1604 if (total_time == 0) {
1605 return 0;
1606 } else {
1607 ASSERT(total_time >= gc_time);
1608 int result = static_cast<int>(
1609 (static_cast<double>(gc_time) / static_cast<double>(total_time)) * 100);
1610 return result;
1611 }
1612}
1613
1614} // namespace dart
static float next(float f)
#define OUT_OF_MEMORY()
Definition assert.h:250
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
T load(std::memory_order order=std::memory_order_acquire) const
Definition atomic.h:101
void store(T arg, std::memory_order order=std::memory_order_release)
Definition atomic.h:104
bool Done() const
Definition pages.cc:474
BasePageIterator(const PageSpace *space)
Definition pages.cc:470
Page * page() const
Definition pages.cc:472
const PageSpace * space_
Definition pages.cc:514
void PrintToJSONObject(JSONObject *object)
ExclusiveCodePageIterator(const PageSpace *space)
Definition pages.cc:546
ExclusivePageIterator(const PageSpace *space)
Definition pages.cc:531
intptr_t HeapSize()
Definition freelist.h:31
static FreeListElement * AsElement(uword addr, intptr_t size)
Definition freelist.cc:16
void Print() const
Definition freelist.cc:315
Mutex * mutex()
Definition freelist.h:91
DART_WARN_UNUSED_RESULT intptr_t ReleaseBumpAllocation()
Definition freelist.h:149
void Free(uword addr, intptr_t size)
Definition freelist.cc:193
intptr_t TakeUnaccountedSizeLocked()
Definition freelist.h:133
void MakeIterable()
Definition freelist.h:142
void IncrementalMarkWithSizeBudget(PageSpace *page_space, intptr_t size)
Definition marker.cc:1066
intptr_t MarkedWordsPerMicro() const
Definition marker.cc:942
void IncrementalMarkWithTimeBudget(PageSpace *page_space, int64_t deadline)
Definition marker.cc:1090
void StartConcurrentMark(PageSpace *page_space)
Definition marker.cc:988
intptr_t marked_words() const
Definition marker.h:51
void MarkObjects(PageSpace *page_space)
Definition marker.cc:1157
void IncrementalMarkWithUnlimitedBudget(PageSpace *page_space)
Definition marker.cc:1048
static void SweepConcurrent(IsolateGroup *isolate_group)
Definition sweeper.cc:213
void VisitObject(ObjectPtr obj) override
Definition pages.cc:756
HeapMapAsJSONVisitor(JSONArray *array)
Definition pages.cc:755
IsolateGroup * isolate_group() const
Definition heap.h:273
Scavenger * new_space()
Definition heap.h:62
void CheckConcurrentMarking(Thread *thread, GCReason reason, intptr_t size)
Definition heap.cc:583
PageSpace * old_space()
Definition heap.h:63
Dart_PerformanceMode mode() const
Definition heap.h:103
bool is_vm_isolate() const
Definition heap.h:274
void UpdateGlobalMaxUsed()
Definition heap.cc:678
static IsolateGroup * Current()
Definition isolate.h:534
ClassTable * class_table() const
Definition isolate.h:491
IsolateGroupSource * source() const
Definition isolate.h:285
FieldTable * field_table() const
Definition isolate.h:953
void AddValue(bool b) const
void AddProperty64(const char *name, int64_t i) const
void AddProperty(const char *name, bool b) const
void AddPropertyF(const char *name, const char *format,...) const PRINTF_ATTRIBUTE(3
Monitor::WaitResult Wait(int64_t millis=Monitor::kNoTimeout)
Definition lockers.h:172
void Enter() const
Definition lockers.h:155
void Exit() const
Definition lockers.h:163
static int64_t GetCurrentMonotonicMicros()
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
void VisitPointer(ObjectPtr *p)
Definition visitor.h:55
UntaggedObject * untag() const
intptr_t GetClassId() const
Definition raw_object.h:864
bool ReachedSoftThreshold(SpaceUsage after) const
Definition pages.cc:1395
bool ReachedHardThreshold(SpaceUsage after) const
Definition pages.cc:1385
bool ReachedIdleThreshold(SpaceUsage current) const
Definition pages.cc:1405
void EvaluateAfterLoading(SpaceUsage after)
Definition pages.cc:1517
void EvaluateGarbageCollection(SpaceUsage before, SpaceUsage after, int64_t start, int64_t end)
Definition pages.cc:1412
PageSpaceController(Heap *heap, int heap_growth_ratio, int heap_growth_max, int garbage_collection_time_ratio)
Definition pages.cc:1369
void AddGarbageCollectionTime(int64_t start, int64_t end)
Definition pages.cc:1587
void WriteProtectCode(bool read_only)
Definition pages.cc:805
FreeList * DataFreeList(intptr_t i=0)
Definition pages.h:296
void PrintHeapMapToJSONStream(IsolateGroup *isolate_group, JSONStream *stream) const
Definition pages.cc:765
intptr_t UsedInWords() const
Definition pages.h:189
bool ShouldStartIdleMarkSweep(int64_t deadline)
Definition pages.cc:826
intptr_t tasks() const
Definition pages.h:310
void IncrementalMarkWithSizeBudget(intptr_t size)
Definition pages.cc:892
bool Contains(uword addr) const
Definition pages.cc:601
int64_t gc_time_micros() const
Definition pages.h:254
void WriteProtect(bool read_only)
Definition pages.cc:715
uword TryAllocate(intptr_t size, bool is_executable=false, GrowthPolicy growth_policy=kControlGrowth)
Definition pages.h:141
void TryReleaseReservation()
Definition pages.cc:918
void AcquireLock(FreeList *freelist)
Definition pages.cc:426
void VisitRememberedCards(ObjectPointerVisitor *visitor) const
Definition pages.cc:679
bool ShouldPerformIdleMarkCompact(int64_t deadline)
Definition pages.cc:853
PageSpace(Heap *heap, intptr_t max_capacity_in_words)
Definition pages.cc:54
void set_tasks(intptr_t val)
Definition pages.h:311
bool enable_concurrent_mark() const
Definition pages.h:346
@ kAwaitingFinalization
Definition pages.h:133
void IncrementalMarkWithTimeBudget(int64_t deadline)
Definition pages.cc:898
intptr_t collections() const
Definition pages.h:258
void VisitObjects(ObjectVisitor *visitor) const
Definition pages.cc:645
void CollectGarbage(Thread *thread, bool compact, bool finalize)
Definition pages.cc:961
void ReleaseLock(FreeList *freelist)
Definition pages.cc:430
void VisitObjectsNoImagePages(ObjectVisitor *visitor) const
Definition pages.cc:651
void UpdateMaxCapacityLocked()
Definition pages.cc:586
SpaceUsage GetCurrentUsage() const
Definition pages.h:208
bool DataContains(uword addr) const
Definition pages.cc:628
void IncreaseCapacityInWordsLocked(intptr_t increase_in_words)
Definition pages.h:198
bool CodeContains(uword addr) const
Definition pages.cc:619
bool MarkReservation()
Definition pages.cc:928
void ReleaseBumpAllocation()
Definition pages.cc:574
bool ContainsUnsafe(uword addr) const
Definition pages.cc:610
void SetupImagePage(void *pointer, uword size, bool is_executable)
Definition pages.cc:1323
void UpdateMaxUsed()
Definition pages.cc:594
intptr_t CapacityInWords() const
Definition pages.h:190
void ResumeConcurrentMarking()
Definition pages.cc:446
void YieldConcurrentMarking()
Definition pages.cc:453
void AddRegionsToObjectSet(ObjectSet *set) const
Definition pages.cc:637
void AbandonMarkingForShutdown()
Definition pages.cc:581
void PrintToJSONObject(JSONObject *object) const
Definition pages.cc:728
void VisitObjectsImagePages(ObjectVisitor *visitor) const
Definition pages.cc:659
void set_phase(Phase val)
Definition pages.h:337
void TryReserveForOOM()
Definition pages.cc:939
void IncreaseCapacityInWords(intptr_t increase_in_words)
Definition pages.h:194
void PauseConcurrentMarking()
Definition pages.cc:437
bool IsObjectFromImagePages(ObjectPtr object)
Definition pages.cc:1357
void ResetProgressBars() const
Definition pages.cc:709
void AssistTasks(MonitorLocker *ml)
Definition pages.cc:904
void VisitRoots(ObjectPointerVisitor *visitor)
Definition pages.cc:949
Monitor * tasks_lock() const
Definition pages.h:309
friend class GCCompactor
Definition pages.h:498
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
Definition pages.cc:673
intptr_t ExternalInWords() const
Definition pages.h:207
void VisitObjectsUnsafe(ObjectVisitor *visitor) const
Definition pages.cc:667
Phase phase() const
Definition pages.h:336
bool Contains(uword addr) const
Definition page.h:91
void set_next(Page *next)
Definition page.h:87
static constexpr intptr_t OldObjectStartOffset()
Definition page.h:119
@ kLarge
Definition page.h:70
@ kVMIsolate
Definition page.h:72
@ kExecutable
Definition page.h:69
@ kImage
Definition page.h:71
void WriteProtect(bool read_only)
Definition page.cc:282
Page * next() const
Definition page.h:86
const T & Get(int i) const
Definition ring_buffer.h:22
int64_t Size() const
Definition ring_buffer.h:28
void Add(const T &t)
Definition ring_buffer.h:19
void set_freed_in_words(intptr_t value)
Definition scavenger.h:224
intptr_t UsedInWords() const
Definition scavenger.h:160
Page * head() const
Definition scavenger.h:237
intptr_t CombinedUsedInWords() const
Definition spaces.h:27
RelaxedAtomic< intptr_t > capacity_in_words
Definition spaces.h:20
RelaxedAtomic< intptr_t > used_in_words
Definition spaces.h:21
@ kScavengerTask
Definition thread.h:352
static Thread * Current()
Definition thread.h:361
bool OwnsGCSafepoint() const
Definition thread.cc:1286
UnsafeExclusivePageIterator(const PageSpace *space)
Definition pages.cc:522
static ObjectPtr FromAddr(uword addr)
Definition raw_object.h:495
static bool IsMarked(uword tags)
Definition raw_object.h:298
static uword ToAddr(const UntaggedObject *raw_obj)
Definition raw_object.h:501
intptr_t HeapSize() const
Definition raw_object.h:380
static void UnregisterExecutablePage(Page *page)
static void RegisterExecutablePage(Page *page)
static constexpr T Maximum(T x, T y)
Definition utils.h:26
static T Minimum(T x, T y)
Definition utils.h:21
static constexpr T RoundUp(T x, uintptr_t alignment, uintptr_t offset=0)
Definition utils.h:105
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition utils.h:77
static intptr_t PageSize()
static VirtualMemory * ForImagePage(void *pointer, uword size)
#define THR_Print(format,...)
Definition log.h:20
@ Dart_PerformanceMode_Latency
Definition dart_api.h:1379
#define ASSERT(E)
FlutterSemanticsFlag flags
if(end==-1)
glong glong end
GAsyncResult * result
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
static float max(float r, float g, float b)
Definition hsl.cpp:49
static float min(float r, float g, float b)
Definition hsl.cpp:48
constexpr intptr_t MB
Definition globals.h:530
static constexpr intptr_t kOldObjectAlignmentOffset
constexpr double MicrosecondsToSeconds(int64_t micros)
Definition globals.h:571
static constexpr intptr_t kPageSizeInWords
Definition page.h:28
static constexpr intptr_t kPageSize
Definition page.h:27
constexpr intptr_t RoundWordsToMB(intptr_t size_in_words)
Definition globals.h:545
void * malloc(size_t size)
Definition allocation.cc:19
@ kIllegalCid
Definition class_id.h:214
@ kFreeListElement
Definition class_id.h:224
constexpr intptr_t kWordSizeLog2
Definition globals.h:507
uintptr_t uword
Definition globals.h:501
static constexpr intptr_t kConservativeInitialMarkSpeed
Definition pages.cc:52
static constexpr intptr_t kObjectAlignmentMask
@ kAllowMarked
Definition verifier.h:21
@ kForbidMarked
Definition verifier.h:21
constexpr intptr_t kWordSize
Definition globals.h:509
static constexpr intptr_t kObjectAlignment
constexpr double MicrosecondsToMilliseconds(int64_t micros)
Definition globals.h:574
bool IsAllocatableViaFreeLists(intptr_t size)
Definition spaces.h:60
constexpr intptr_t RoundWordsToKB(intptr_t size_in_words)
Definition globals.h:542
constexpr intptr_t kIntptrMax
Definition globals.h:557
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
shard(fn, arglist)
#define Px
Definition globals.h:410
#define UNLIKELY(cond)
Definition globals.h:261
#define Pd
Definition globals.h:408
Point offset
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)
Definition timeline.h:41