Flutter Engine
The Flutter Engine
pages.cc
Go to the documentation of this file.
1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/heap/pages.h"
6
7#include "platform/assert.h"
10#include "vm/dart.h"
11#include "vm/heap/become.h"
12#include "vm/heap/compactor.h"
14#include "vm/heap/marker.h"
15#include "vm/heap/safepoint.h"
16#include "vm/heap/sweeper.h"
17#include "vm/lockers.h"
18#include "vm/log.h"
19#include "vm/object.h"
20#include "vm/object_set.h"
21#include "vm/os_thread.h"
23#include "vm/virtual_memory.h"
24
25namespace dart {
26
28 old_gen_growth_space_ratio,
29 20,
30 "The desired maximum percentage of free space after old gen GC");
32 old_gen_growth_time_ratio,
33 3,
34 "The desired maximum percentage of time spent in old gen GC");
36 old_gen_growth_rate,
37 280,
38 "The max number of pages the old generation can grow at a time");
40 print_free_list_before_gc,
41 false,
42 "Print free list statistics before a GC");
44 print_free_list_after_gc,
45 false,
46 "Print free list statistics after a GC");
47DEFINE_FLAG(bool, log_growth, false, "Log PageSpace growth policy decisions.");
48
49// The initial estimate of how many words we can mark per microsecond (usage
50// before / mark-sweep time). This is a conservative value observed running
51// Flutter on a Nexus 4. After the first mark-sweep, we instead use a value
52// based on the device's actual speed.
53static constexpr intptr_t kConservativeInitialMarkSpeed = 20;
54
55PageSpace::PageSpace(Heap* heap, intptr_t max_capacity_in_words)
56 : heap_(heap),
57 num_freelists_(Utils::Maximum(FLAG_scavenger_tasks, 1) + 1),
58 freelists_(new FreeList[num_freelists_]),
59 pages_lock_(),
60 max_capacity_in_words_(max_capacity_in_words),
61 usage_(),
62 allocated_black_in_words_(0),
63 tasks_lock_(),
64 tasks_(0),
65 concurrent_marker_tasks_(0),
66 concurrent_marker_tasks_active_(0),
67 pause_concurrent_marking_(0),
68 phase_(kDone),
69#if defined(DEBUG)
70 iterating_thread_(nullptr),
71#endif
72 page_space_controller_(heap,
73 FLAG_old_gen_growth_space_ratio,
74 FLAG_old_gen_growth_rate,
75 FLAG_old_gen_growth_time_ratio),
76 marker_(nullptr),
77 gc_time_micros_(0),
78 collections_(0),
79 mark_words_per_micro_(kConservativeInitialMarkSpeed),
80 enable_concurrent_mark_(FLAG_concurrent_mark) {
81 ASSERT(heap != nullptr);
82
83 // We aren't holding the lock but no one can reference us yet.
86
87 for (intptr_t i = 0; i < num_freelists_; i++) {
88 freelists_[i].Reset();
89 }
90
92}
93
95 {
97 AssistTasks(&ml);
98 while (tasks() > 0) {
99 ml.Wait();
100 }
101 }
102 FreePages(pages_);
103 FreePages(exec_pages_);
104 FreePages(large_pages_);
105 FreePages(image_pages_);
106 ASSERT(marker_ == nullptr);
107 delete[] freelists_;
108}
109
110intptr_t PageSpace::LargePageSizeInWordsFor(intptr_t size) {
111 intptr_t page_size = Utils::RoundUp(size + Page::OldObjectStartOffset(),
113 return page_size >> kWordSizeLog2;
114}
115
116void PageSpace::AddPageLocked(Page* page) {
117 if (pages_ == nullptr) {
118 pages_ = page;
119 } else {
120 pages_tail_->set_next(page);
121 }
122 pages_tail_ = page;
123}
124
125void PageSpace::AddLargePageLocked(Page* page) {
126 if (large_pages_ == nullptr) {
127 large_pages_ = page;
128 } else {
129 large_pages_tail_->set_next(page);
130 }
131 large_pages_tail_ = page;
132}
133
134void PageSpace::AddExecPageLocked(Page* page) {
135 if (exec_pages_ == nullptr) {
136 exec_pages_ = page;
137 } else {
138 if (FLAG_write_protect_code) {
139 exec_pages_tail_->WriteProtect(false);
140 }
141 exec_pages_tail_->set_next(page);
142 if (FLAG_write_protect_code) {
143 exec_pages_tail_->WriteProtect(true);
144 }
145 }
146 exec_pages_tail_ = page;
147}
148
149void PageSpace::RemovePageLocked(Page* page, Page* previous_page) {
150 if (previous_page != nullptr) {
151 previous_page->set_next(page->next());
152 } else {
153 pages_ = page->next();
154 }
155 if (page == pages_tail_) {
156 pages_tail_ = previous_page;
157 }
158}
159
160void PageSpace::RemoveLargePageLocked(Page* page, Page* previous_page) {
161 if (previous_page != nullptr) {
162 previous_page->set_next(page->next());
163 } else {
164 large_pages_ = page->next();
165 }
166 if (page == large_pages_tail_) {
167 large_pages_tail_ = previous_page;
168 }
169}
170
171void PageSpace::RemoveExecPageLocked(Page* page, Page* previous_page) {
172 if (previous_page != nullptr) {
173 previous_page->set_next(page->next());
174 } else {
175 exec_pages_ = page->next();
176 }
177 if (page == exec_pages_tail_) {
178 exec_pages_tail_ = previous_page;
179 }
180}
181
182Page* PageSpace::AllocatePage(bool is_exec, bool link) {
183 {
184 MutexLocker ml(&pages_lock_);
185 if (!CanIncreaseCapacityInWordsLocked(kPageSizeInWords)) {
186 return nullptr;
187 }
189 }
190 uword flags = 0;
191 if (is_exec) {
193 }
194 if ((heap_ != nullptr) && (heap_->is_vm_isolate())) {
196 }
197 Page* page = Page::Allocate(kPageSize, flags);
198 if (page == nullptr) {
199 RELEASE_ASSERT(!FLAG_abort_on_oom);
201 return nullptr;
202 }
203
204 MutexLocker ml(&pages_lock_);
205 if (link) {
206 if (is_exec) {
207 AddExecPageLocked(page);
208 } else {
209 AddPageLocked(page);
210 }
211 }
212
213 page->set_object_end(page->memory_->end());
214 if (!is_exec && (heap_ != nullptr) && !heap_->is_vm_isolate()) {
215 page->AllocateForwardingPage();
216 }
217
218 if (is_exec) {
220 }
221 return page;
222}
223
224Page* PageSpace::AllocateLargePage(intptr_t size, bool is_exec) {
225 const intptr_t page_size_in_words = LargePageSizeInWordsFor(
226 size + (is_exec ? UnwindingRecordsPlatform::SizeInBytes() : 0));
227 {
228 MutexLocker ml(&pages_lock_);
229 if (!CanIncreaseCapacityInWordsLocked(page_size_in_words)) {
230 return nullptr;
231 }
232 IncreaseCapacityInWordsLocked(page_size_in_words);
233 }
235 if (is_exec) {
237 }
238 if ((heap_ != nullptr) && (heap_->is_vm_isolate())) {
240 }
241 Page* page = Page::Allocate(page_size_in_words << kWordSizeLog2, flags);
242
243 MutexLocker ml(&pages_lock_);
244 if (page == nullptr) {
245 IncreaseCapacityInWordsLocked(-page_size_in_words);
246 return nullptr;
247 } else {
248 intptr_t actual_size_in_words = page->memory_->size() >> kWordSizeLog2;
249 if (actual_size_in_words != page_size_in_words) {
250 IncreaseCapacityInWordsLocked(actual_size_in_words - page_size_in_words);
251 }
252 }
253 if (is_exec) {
254 AddExecPageLocked(page);
255 } else {
256 AddLargePageLocked(page);
257 }
258
259 if (is_exec) {
261 }
262
263 // Only one object in this page (at least until Array::MakeFixedLength
264 // is called).
265 page->set_object_end(page->object_start() + size);
266 return page;
267}
268
269void PageSpace::TruncateLargePage(Page* page,
270 intptr_t new_object_size_in_bytes) {
271 const intptr_t old_object_size_in_bytes =
272 page->object_end() - page->object_start();
273 ASSERT(new_object_size_in_bytes <= old_object_size_in_bytes);
274 ASSERT(!page->is_executable());
275 const intptr_t new_page_size_in_words =
276 LargePageSizeInWordsFor(new_object_size_in_bytes);
277 VirtualMemory* memory = page->memory_;
278 const intptr_t old_page_size_in_words = (memory->size() >> kWordSizeLog2);
279 if (new_page_size_in_words < old_page_size_in_words) {
280 memory->Truncate(new_page_size_in_words << kWordSizeLog2);
281 IncreaseCapacityInWords(new_page_size_in_words - old_page_size_in_words);
282 page->set_object_end(page->object_start() + new_object_size_in_bytes);
283 }
284}
285
286void PageSpace::FreePage(Page* page, Page* previous_page) {
287 bool is_exec = page->is_executable();
288 {
289 MutexLocker ml(&pages_lock_);
290 IncreaseCapacityInWordsLocked(-(page->memory_->size() >> kWordSizeLog2));
291 if (is_exec) {
292 RemoveExecPageLocked(page, previous_page);
293 } else {
294 RemovePageLocked(page, previous_page);
295 }
296 }
297 if (is_exec && !page->is_image()) {
299 }
300 page->Deallocate();
301}
302
303void PageSpace::FreeLargePage(Page* page, Page* previous_page) {
304 ASSERT(!page->is_executable());
305 MutexLocker ml(&pages_lock_);
306 IncreaseCapacityInWordsLocked(-(page->memory_->size() >> kWordSizeLog2));
307 RemoveLargePageLocked(page, previous_page);
308 page->Deallocate();
309}
310
311void PageSpace::FreePages(Page* pages) {
312 Page* page = pages;
313 while (page != nullptr) {
314 Page* next = page->next();
315 if (page->is_executable() && !page->is_image()) {
317 }
318 page->Deallocate();
319 page = next;
320 }
321}
322
323uword PageSpace::TryAllocateInFreshPage(intptr_t size,
324 FreeList* freelist,
325 bool is_exec,
326 GrowthPolicy growth_policy,
327 bool is_locked) {
329
330 if (growth_policy != kForceGrowth) {
331 ASSERT(!Thread::Current()->force_growth());
333 kPageSize);
334 }
335
336 uword result = 0;
337 SpaceUsage after_allocation = GetCurrentUsage();
338 after_allocation.used_in_words += size >> kWordSizeLog2;
339 // Can we grow by one page?
340 after_allocation.capacity_in_words += kPageSizeInWords;
341 if (growth_policy == kForceGrowth ||
342 !page_space_controller_.ReachedHardThreshold(after_allocation)) {
343 Page* page = AllocatePage(is_exec);
344 if (page == nullptr) {
345 return 0;
346 }
347 // Start of the newly allocated page is the allocated object.
348 result = page->object_start();
349 // Note: usage_.capacity_in_words is increased by AllocatePage.
351 usage_.used_in_words += (size >> kWordSizeLog2);
352 // Enqueue the remainder in the free list.
353 uword free_start = result + size;
354 intptr_t free_size = page->object_end() - free_start;
355 if (free_size > 0) {
356 if (is_locked) {
357 freelist->FreeLocked(free_start, free_size);
358 } else {
359 freelist->Free(free_start, free_size);
360 }
361 }
362 }
363 return result;
364}
365
366uword PageSpace::TryAllocateInFreshLargePage(intptr_t size,
367 bool is_exec,
368 GrowthPolicy growth_policy) {
370
371 if (growth_policy != kForceGrowth) {
372 ASSERT(!Thread::Current()->force_growth());
374 }
375
376 intptr_t page_size_in_words = LargePageSizeInWordsFor(size);
377 if ((page_size_in_words << kWordSizeLog2) < size) {
378 // On overflow we fail to allocate.
379 return 0;
380 }
381
382 uword result = 0;
383 SpaceUsage after_allocation = GetCurrentUsage();
384 after_allocation.used_in_words += size >> kWordSizeLog2;
385 after_allocation.capacity_in_words += page_size_in_words;
386 if (growth_policy == kForceGrowth ||
387 !page_space_controller_.ReachedHardThreshold(after_allocation)) {
388 Page* page = AllocateLargePage(size, is_exec);
389 if (page != nullptr) {
390 result = page->object_start();
391 // Note: usage_.capacity_in_words is increased by AllocateLargePage.
393 usage_.used_in_words += (size >> kWordSizeLog2);
394 }
395 }
396 return result;
397}
398
399uword PageSpace::TryAllocateInternal(intptr_t size,
400 FreeList* freelist,
401 bool is_exec,
402 GrowthPolicy growth_policy,
403 bool is_protected,
404 bool is_locked) {
407 uword result = 0;
409 if (is_locked) {
410 result = freelist->TryAllocateLocked(size, is_protected);
411 } else {
412 result = freelist->TryAllocate(size, is_protected);
413 }
414 if (result == 0) {
415 result = TryAllocateInFreshPage(size, freelist, is_exec, growth_policy,
416 is_locked);
417 // usage_ is updated by the call above.
418 } else {
419 if (!is_protected) {
421 }
422 usage_.used_in_words += (size >> kWordSizeLog2);
423 }
424 } else {
425 result = TryAllocateInFreshLargePage(size, is_exec, growth_policy);
426 // usage_ is updated by the call above.
427 }
429 return result;
430}
431
433 freelist->mutex()->Lock();
434}
435
437 usage_.used_in_words +=
439 freelist->mutex()->Unlock();
440 usage_.used_in_words -= (freelist->ReleaseBumpAllocation() >> kWordSizeLog2);
441}
442
444 MonitorLocker ml(&tasks_lock_);
445 ASSERT(pause_concurrent_marking_.load() == 0);
446 pause_concurrent_marking_.store(1);
447 while (concurrent_marker_tasks_active_ != 0) {
448 ml.Wait();
449 }
450}
451
453 MonitorLocker ml(&tasks_lock_);
454 ASSERT(pause_concurrent_marking_.load() != 0);
455 pause_concurrent_marking_.store(0);
456 ml.NotifyAll();
457}
458
460 MonitorLocker ml(&tasks_lock_);
461 if (pause_concurrent_marking_.load() != 0) {
463 concurrent_marker_tasks_active_--;
464 if (concurrent_marker_tasks_active_ == 0) {
465 ml.NotifyAll();
466 }
467 while (pause_concurrent_marking_.load() != 0) {
468 ml.Wait();
469 }
470 concurrent_marker_tasks_active_++;
471 }
472}
473
475 public:
476 explicit BasePageIterator(const PageSpace* space) : space_(space) {}
477
478 Page* page() const { return page_; }
479
480 bool Done() const { return page_ == nullptr; }
481
482 void Advance() {
483 ASSERT(!Done());
484 page_ = page_->next();
485 if ((page_ == nullptr) && (list_ == kRegular)) {
487 page_ = space_->exec_pages_;
488 }
489 if ((page_ == nullptr) && (list_ == kExecutable)) {
490 list_ = kLarge;
491 page_ = space_->large_pages_;
492 }
493 if ((page_ == nullptr) && (list_ == kLarge)) {
494 list_ = kImage;
495 page_ = space_->image_pages_;
496 }
497 ASSERT((page_ != nullptr) || (list_ == kImage));
498 }
499
500 protected:
502
503 void Initialize() {
504 list_ = kRegular;
505 page_ = space_->pages_;
506 if (page_ == nullptr) {
508 page_ = space_->exec_pages_;
509 if (page_ == nullptr) {
510 list_ = kLarge;
511 page_ = space_->large_pages_;
512 if (page_ == nullptr) {
513 list_ = kImage;
514 page_ = space_->image_pages_;
515 }
516 }
517 }
518 }
519
520 const PageSpace* space_ = nullptr;
522 Page* page_ = nullptr;
523};
524
525// Provides unsafe access to all pages. Assumes pages are walkable.
527 public:
529 : BasePageIterator(space) {
530 Initialize();
531 }
532};
533
534// Provides exclusive access to all pages, and ensures they are walkable.
536 public:
537 explicit ExclusivePageIterator(const PageSpace* space)
538 : BasePageIterator(space), ml_(&space->pages_lock_) {
539 space_->MakeIterable();
540 Initialize();
541 }
542
543 private:
544 MutexLocker ml_;
545 NoSafepointScope no_safepoint;
546};
547
548// Provides exclusive access to code pages, and ensures they are walkable.
549// NOTE: This does not iterate over large pages which can contain code.
551 public:
553 : space_(space), ml_(&space->pages_lock_) {
554 space_->MakeIterable();
555 page_ = space_->exec_pages_;
556 }
557 Page* page() const { return page_; }
558 bool Done() const { return page_ == nullptr; }
559 void Advance() {
560 ASSERT(!Done());
561 page_ = page_->next();
562 }
563
564 private:
565 const PageSpace* space_;
566 MutexLocker ml_;
567 NoSafepointScope no_safepoint;
568 Page* page_;
569};
570
571void PageSpace::MakeIterable() const {
572 // Assert not called from concurrent sweeper task.
573 // TODO(koda): Use thread/task identity when implemented.
574 ASSERT(IsolateGroup::Current()->heap() != nullptr);
575 for (intptr_t i = 0; i < num_freelists_; i++) {
576 freelists_[i].MakeIterable();
577 }
578}
579
581 for (intptr_t i = 0; i < num_freelists_; i++) {
582 size_t leftover = freelists_[i].ReleaseBumpAllocation();
583 usage_.used_in_words -= (leftover >> kWordSizeLog2);
584 }
585}
586
588 delete marker_;
589 marker_ = nullptr;
590}
591
593 ASSERT(heap_ != nullptr);
594 ASSERT(heap_->isolate_group() != nullptr);
595 auto isolate_group = heap_->isolate_group();
596 isolate_group->GetHeapOldCapacityMaxMetric()->SetValue(
597 static_cast<int64_t>(usage_.capacity_in_words) * kWordSize);
598}
599
601 ASSERT(heap_ != nullptr);
602 ASSERT(heap_->isolate_group() != nullptr);
603 auto isolate_group = heap_->isolate_group();
604 isolate_group->GetHeapOldUsedMaxMetric()->SetValue(UsedInWords() * kWordSize);
605}
606
608 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) {
609 if (it.page()->Contains(addr)) {
610 return true;
611 }
612 }
613 return false;
614}
615
617 for (UnsafeExclusivePageIterator it(this); !it.Done(); it.Advance()) {
618 if (it.page()->Contains(addr)) {
619 return true;
620 }
621 }
622 return false;
623}
624
626 for (ExclusiveCodePageIterator it(this); !it.Done(); it.Advance()) {
627 if (it.page()->Contains(addr)) {
628 return true;
629 }
630 }
631 return false;
632}
633
635 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) {
636 if (!it.page()->is_executable() && it.page()->Contains(addr)) {
637 return true;
638 }
639 }
640 return false;
641}
642
644 ASSERT((pages_ != nullptr) || (exec_pages_ != nullptr) ||
645 (large_pages_ != nullptr));
646 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) {
647 set->AddRegion(it.page()->object_start(), it.page()->object_end());
648 }
649}
650
652 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) {
653 it.page()->VisitObjects(visitor);
654 }
655}
656
658 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) {
659 if (!it.page()->is_image()) {
660 it.page()->VisitObjects(visitor);
661 }
662 }
663}
664
666 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) {
667 if (it.page()->is_image()) {
668 it.page()->VisitObjects(visitor);
669 }
670 }
671}
672
674 for (UnsafeExclusivePageIterator it(this); !it.Done(); it.Advance()) {
675 it.page()->VisitObjectsUnsafe(visitor);
676 }
677}
678
680 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) {
681 it.page()->VisitObjectPointers(visitor);
682 }
683}
684
686 PredicateObjectPointerVisitor* visitor) const {
687 ASSERT(Thread::Current()->OwnsGCSafepoint() ||
688 (Thread::Current()->task_kind() == Thread::kScavengerTask));
689
690 // Wait for the sweeper to finish mutating the large page list.
691 {
693 while (phase() == kSweepingLarge) {
694 ml.Wait(); // No safepoint check.
695 }
696 }
697
698 // Large pages may be added concurrently due to promotion in another scavenge
699 // worker, so terminate the traversal when we hit the tail we saw while
700 // holding the pages lock, instead of at nullptr, otherwise we are racing when
701 // we read Page::next_ and Page::remembered_cards_.
702 Page* page;
703 Page* tail;
704 {
705 MutexLocker ml(&pages_lock_);
706 page = large_pages_;
707 tail = large_pages_tail_;
708 }
709 while (page != nullptr) {
710 page->VisitRememberedCards(visitor);
711 if (page == tail) break;
712 page = page->next();
713 }
714}
715
717 for (Page* page = large_pages_; page != nullptr; page = page->next()) {
718 page->ResetProgressBar();
719 }
720}
721
722void PageSpace::WriteProtect(bool read_only) {
723 if (read_only) {
724 // Avoid MakeIterable trying to write to the heap.
726 }
727 for (ExclusivePageIterator it(this); !it.Done(); it.Advance()) {
728 if (!it.page()->is_image()) {
729 it.page()->WriteProtect(read_only);
730 }
731 }
732}
733
734#ifndef PRODUCT
736 auto isolate_group = IsolateGroup::Current();
737 ASSERT(isolate_group != nullptr);
738 JSONObject space(object, "old");
739 space.AddProperty("type", "HeapSpace");
740 space.AddProperty("name", "old");
741 space.AddProperty("vmName", "PageSpace");
742 space.AddProperty("collections", collections());
743 space.AddProperty64("used", UsedInWords() * kWordSize);
744 space.AddProperty64("capacity", CapacityInWords() * kWordSize);
745 space.AddProperty64("external", ExternalInWords() * kWordSize);
747 if (collections() > 0) {
748 int64_t run_time = isolate_group->UptimeMicros();
749 run_time = Utils::Maximum(run_time, static_cast<int64_t>(0));
750 double run_time_millis = MicrosecondsToMilliseconds(run_time);
751 double avg_time_between_collections =
752 run_time_millis / static_cast<double>(collections());
753 space.AddProperty("avgCollectionPeriodMillis",
754 avg_time_between_collections);
755 } else {
756 space.AddProperty("avgCollectionPeriodMillis", 0.0);
757 }
758}
759
761 public:
762 explicit HeapMapAsJSONVisitor(JSONArray* array) : array_(array) {}
763 void VisitObject(ObjectPtr obj) override {
764 array_->AddValue(obj->untag()->HeapSize() / kObjectAlignment);
765 array_->AddValue(obj->GetClassId());
766 }
767
768 private:
769 JSONArray* array_;
770};
771
773 JSONStream* stream) const {
774 JSONObject heap_map(stream);
775 heap_map.AddProperty("type", "HeapMap");
776 heap_map.AddProperty("freeClassId", static_cast<intptr_t>(kFreeListElement));
777 heap_map.AddProperty("unitSizeBytes",
778 static_cast<intptr_t>(kObjectAlignment));
779 heap_map.AddProperty("pageSizeBytes", kPageSizeInWords * kWordSize);
780 {
781 JSONObject class_list(&heap_map, "classList");
782 isolate_group->class_table()->PrintToJSONObject(&class_list);
783 }
784 {
785 // "pages" is an array [page0, page1, ..., pageN], each page of the form
786 // {"object_start": "0x...", "objects": [size, class id, size, ...]}
787 // TODO(19445): Use ExclusivePageIterator once HeapMap supports large pages.
789 MutexLocker ml(&pages_lock_);
790 MakeIterable();
791 JSONArray all_pages(&heap_map, "pages");
792 for (Page* page = pages_; page != nullptr; page = page->next()) {
793 JSONObject page_container(&all_pages);
794 page_container.AddPropertyF("objectStart", "0x%" Px "",
795 page->object_start());
796 JSONArray page_map(&page_container, "objects");
797 HeapMapAsJSONVisitor printer(&page_map);
798 page->VisitObjects(&printer);
799 }
800 for (Page* page = exec_pages_; page != nullptr; page = page->next()) {
801 JSONObject page_container(&all_pages);
802 page_container.AddPropertyF("objectStart", "0x%" Px "",
803 page->object_start());
804 JSONArray page_map(&page_container, "objects");
805 HeapMapAsJSONVisitor printer(&page_map);
806 page->VisitObjects(&printer);
807 }
808 }
809}
810#endif // PRODUCT
811
812void PageSpace::WriteProtectCode(bool read_only) {
813 if (FLAG_write_protect_code) {
814 MutexLocker ml(&pages_lock_);
815 NoSafepointScope no_safepoint;
816 // No need to go through all of the data pages first.
817 Page* page = exec_pages_;
818 while (page != nullptr) {
819 ASSERT(page->is_executable());
820 page->WriteProtect(read_only);
821 page = page->next();
822 }
823 page = large_pages_;
824 while (page != nullptr) {
825 if (page->is_executable()) {
826 page->WriteProtect(read_only);
827 }
828 page = page->next();
829 }
830 }
831}
832
834 // To make a consistent decision, we should not yield for a safepoint in the
835 // middle of deciding whether to perform an idle GC.
836 NoSafepointScope no_safepoint;
837
838 if (!page_space_controller_.ReachedIdleThreshold(usage_)) {
839 return false;
840 }
841
842 {
843 MonitorLocker locker(tasks_lock());
844 if (tasks() > 0) {
845 // A concurrent sweeper is running. If we start a mark sweep now
846 // we'll have to wait for it, and this wait time is not included in
847 // mark_words_per_micro_.
848 return false;
849 }
850 }
851
852 // This uses the size of new-space because the pause time to start concurrent
853 // marking is related to the size of the root set, which is mostly new-space.
854 int64_t estimated_mark_completion =
856 heap_->new_space()->UsedInWords() / mark_words_per_micro_;
857 return estimated_mark_completion <= deadline;
858}
859
861 // To make a consistent decision, we should not yield for a safepoint in the
862 // middle of deciding whether to perform an idle GC.
863 NoSafepointScope no_safepoint;
864
865 // When enabled, prefer the incremental/evacuating compactor over the
866 // full/sliding compactor.
867 if (FLAG_use_incremental_compactor) {
868 return false;
869 }
870
871 // Discount two pages to account for the newest data and code pages, whose
872 // partial use doesn't indicate fragmentation.
873 const intptr_t excess_in_words =
875 const double excess_ratio = static_cast<double>(excess_in_words) /
876 static_cast<double>(usage_.capacity_in_words);
877 const bool fragmented = excess_ratio > 0.05;
878
879 if (!fragmented && !page_space_controller_.ReachedIdleThreshold(usage_)) {
880 return false;
881 }
882
883 {
884 MonitorLocker locker(tasks_lock());
885 if (tasks() > 0) {
886 // A concurrent sweeper is running. If we start a mark sweep now
887 // we'll have to wait for it, and this wait time is not included in
888 // mark_words_per_micro_.
889 return false;
890 }
891 }
892
893 // Assuming compaction takes as long as marking.
894 intptr_t mark_compact_words_per_micro = mark_words_per_micro_ / 2;
895 if (mark_compact_words_per_micro == 0) {
896 mark_compact_words_per_micro = 1; // Prevent division by zero.
897 }
898
899 int64_t estimated_mark_compact_completion =
901 UsedInWords() / mark_compact_words_per_micro;
902 return estimated_mark_compact_completion <= deadline;
903}
904
906 if (marker_ != nullptr) {
907 marker_->IncrementalMarkWithSizeBudget(this, size);
908 }
909}
910
912 if (marker_ != nullptr) {
913 marker_->IncrementalMarkWithTimeBudget(this, deadline);
914 }
915}
916
918 if (phase() == PageSpace::kMarking) {
919 ml->Exit();
921 ml->Enter();
922 }
923 if ((phase() == kSweepingLarge) || (phase() == kSweepingRegular)) {
924 ml->Exit();
925 Sweep(/*exclusive*/ false);
926 SweepLarge();
927 ml->Enter();
928 }
929}
930
934 if (oom_reservation_ == nullptr) return;
935 uword addr = reinterpret_cast<uword>(oom_reservation_);
936 intptr_t size = oom_reservation_->HeapSize();
937 oom_reservation_ = nullptr;
938 freelists_[kDataFreelist].Free(addr, size);
939}
940
942 if (oom_reservation_ == nullptr) {
943 return false;
944 }
945 UntaggedObject* ptr = reinterpret_cast<UntaggedObject*>(oom_reservation_);
946 if (!ptr->IsMarked()) {
947 ptr->SetMarkBit();
948 }
949 return true;
950}
951
953 if (oom_reservation_ == nullptr) {
954 uword addr = TryAllocate(kOOMReservationSize, /*exec*/ false,
955 kForceGrowth /* Don't re-enter GC */);
956 if (addr != 0) {
957 oom_reservation_ = FreeListElement::AsElement(addr, kOOMReservationSize);
958 }
959 }
960}
961
963 if (oom_reservation_ != nullptr) {
964 // FreeListElements are generally held untagged, but ObjectPointerVisitors
965 // expect tagged pointers.
966 ObjectPtr ptr =
967 UntaggedObject::FromAddr(reinterpret_cast<uword>(oom_reservation_));
968 visitor->VisitPointer(&ptr);
969 oom_reservation_ =
970 reinterpret_cast<FreeListElement*>(UntaggedObject::ToAddr(ptr));
971 }
972}
973
974void PageSpace::CollectGarbage(Thread* thread, bool compact, bool finalize) {
975 ASSERT(!Thread::Current()->force_growth());
976
977 if (!finalize) {
978 if (!enable_concurrent_mark()) return; // Disabled.
979 if (FLAG_marker_tasks == 0) return; // Disabled.
980 }
981
982 GcSafepointOperationScope safepoint_scope(thread);
983
984 // Wait for pending tasks to complete and then account for the driver task.
985 {
986 MonitorLocker locker(tasks_lock());
987 if (!finalize &&
988 (phase() == kMarking || phase() == kAwaitingFinalization)) {
989 // Concurrent mark is already running.
990 return;
991 }
992
993 AssistTasks(&locker);
994 while (tasks() > 0) {
995 locker.Wait();
996 }
998 set_tasks(1);
999 }
1000
1001 // Ensure that all threads for this isolate are at a safepoint (either
1002 // stopped or in native code). We have guards around Newgen GC and oldgen GC
1003 // to ensure that if two threads are racing to collect at the same time the
1004 // loser skips collection and goes straight to allocation.
1005 CollectGarbageHelper(thread, compact, finalize);
1006
1007 // Done, reset the task count.
1008 {
1010 set_tasks(tasks() - 1);
1011 ml.NotifyAll();
1012 }
1013}
1014
1015void PageSpace::CollectGarbageHelper(Thread* thread,
1016 bool compact,
1017 bool finalize) {
1018 ASSERT(thread->OwnsGCSafepoint());
1019 auto isolate_group = heap_->isolate_group();
1020 ASSERT(isolate_group == IsolateGroup::Current());
1021
1022 const int64_t start = OS::GetCurrentMonotonicMicros();
1023
1024 // Perform various cleanup that relies on no tasks interfering.
1025 isolate_group->class_table_allocator()->FreePending();
1026 isolate_group->ForEachIsolate(
1027 [&](Isolate* isolate) { isolate->field_table()->FreeOldTables(); },
1028 /*at_safepoint=*/true);
1029
1030 NoSafepointScope no_safepoints(thread);
1031
1032 if (FLAG_print_free_list_before_gc) {
1033 for (intptr_t i = 0; i < num_freelists_; i++) {
1034 OS::PrintErr("Before GC: Freelist %" Pd "\n", i);
1035 freelists_[i].Print();
1036 }
1037 }
1038
1039 if (FLAG_verify_before_gc) {
1040 heap_->VerifyGC("Verifying before marking",
1042 }
1043
1044 // Make code pages writable.
1045 if (finalize) WriteProtectCode(false);
1046
1047 // Save old value before GCMarker visits the weak persistent handles.
1048 SpaceUsage usage_before = GetCurrentUsage();
1049
1050 // Mark all reachable old-gen objects.
1051 if (marker_ == nullptr) {
1052 ASSERT(phase() == kDone);
1053 marker_ = new GCMarker(isolate_group, heap_);
1054 if (FLAG_use_incremental_compactor) {
1056 }
1057 } else {
1059 }
1060
1061 if (!finalize) {
1062 ASSERT(phase() == kDone);
1063 marker_->StartConcurrentMark(this);
1064 return;
1065 }
1066
1067 // Abandon the remainder of the bump allocation block.
1069
1070 marker_->MarkObjects(this);
1071 usage_.used_in_words = marker_->marked_words() + allocated_black_in_words_;
1072 allocated_black_in_words_ = 0;
1073 mark_words_per_micro_ = marker_->MarkedWordsPerMicro();
1074 delete marker_;
1075 marker_ = nullptr;
1076
1077 if (FLAG_verify_store_buffer) {
1078 VerifyStoreBuffers("Verifying remembered set after marking");
1079 }
1080
1081 if (FLAG_verify_before_gc) {
1082 heap_->VerifyGC("Verifying before sweeping", kAllowMarked);
1083 }
1084
1085 bool has_reservation = MarkReservation();
1086
1087 bool new_space_is_swept = false;
1088 if (FLAG_use_incremental_compactor) {
1089 new_space_is_swept = GCIncrementalCompactor::Epilogue(this);
1090 }
1091
1092 // Reset the freelists and setup sweeping.
1093 for (intptr_t i = 0; i < num_freelists_; i++) {
1094 freelists_[i].Reset();
1095 }
1096
1097 {
1098 // Executable pages are always swept immediately to simplify
1099 // code protection.
1100 TIMELINE_FUNCTION_GC_DURATION(thread, "SweepExecutable");
1101 GCSweeper sweeper;
1102 Page* prev_page = nullptr;
1103 Page* page = exec_pages_;
1104 FreeList* freelist = &freelists_[kExecutableFreelist];
1105 MutexLocker ml(freelist->mutex());
1106 while (page != nullptr) {
1107 Page* next_page = page->next();
1108 bool page_in_use = sweeper.SweepPage(page, freelist);
1109 if (page_in_use) {
1110 prev_page = page;
1111 } else {
1112 FreePage(page, prev_page);
1113 }
1114 // Advance to the next page.
1115 page = next_page;
1116 }
1117 }
1118
1119 {
1120 // Move pages to sweeper work lists.
1121 MutexLocker ml(&pages_lock_);
1122 ASSERT(sweep_large_ == nullptr);
1123 sweep_large_ = large_pages_;
1124 large_pages_ = large_pages_tail_ = nullptr;
1125 ASSERT(sweep_regular_ == nullptr);
1126 if (!compact) {
1127 sweep_regular_ = pages_;
1128 pages_ = pages_tail_ = nullptr;
1129 }
1130 }
1131
1132 if (!new_space_is_swept) {
1133 SweepNew();
1134 }
1135 bool is_concurrent_sweep_running = false;
1136 if (compact) {
1137 Compact(thread);
1139 is_concurrent_sweep_running = true;
1140 } else if (FLAG_concurrent_sweep && has_reservation) {
1141 ConcurrentSweep(isolate_group);
1142 is_concurrent_sweep_running = true;
1143 } else {
1144 SweepLarge();
1145 Sweep(/*exclusive*/ true);
1147 }
1148
1149 if (FLAG_verify_after_gc && !is_concurrent_sweep_running) {
1150 heap_->VerifyGC("Verifying after sweeping", kForbidMarked);
1151 }
1152
1154
1155 // Make code pages read-only.
1156 if (finalize) WriteProtectCode(true);
1157
1159
1160 // Record signals for growth control. Include size of external allocations.
1161 page_space_controller_.EvaluateGarbageCollection(
1162 usage_before, GetCurrentUsage(), start, end);
1163
1164 if (FLAG_print_free_list_after_gc) {
1165 for (intptr_t i = 0; i < num_freelists_; i++) {
1166 OS::PrintErr("After GC: Freelist %" Pd "\n", i);
1167 freelists_[i].Print();
1168 }
1169 }
1170
1171 UpdateMaxUsed();
1172 if (heap_ != nullptr) {
1173 heap_->UpdateGlobalMaxUsed();
1174 }
1175}
1176
1178 public:
1179 CollectStoreBufferEvacuateVisitor(ObjectSet* in_store_buffer, const char* msg)
1180 : ObjectPointerVisitor(IsolateGroup::Current()),
1181 in_store_buffer_(in_store_buffer),
1182 msg_(msg) {}
1183
1184 void VisitPointers(ObjectPtr* from, ObjectPtr* to) override {
1185 for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
1186 ObjectPtr obj = *ptr;
1188 RELEASE_ASSERT_WITH_MSG(obj->IsOldObject(), msg_);
1189
1191 if (obj.GetClassId() == kArrayCid) {
1192 const uword length =
1193 Smi::Value(static_cast<UntaggedArray*>(obj.untag())->length());
1195 msg_);
1196 }
1197 in_store_buffer_->Add(obj);
1198 }
1199 }
1200
1201#if defined(DART_COMPRESSED_POINTERS)
1202 void VisitCompressedPointers(uword heap_base,
1203 CompressedObjectPtr* from,
1204 CompressedObjectPtr* to) override {
1205 UNREACHABLE(); // Store buffer blocks are not compressed.
1206 }
1207#endif
1208
1209 private:
1210 ObjectSet* const in_store_buffer_;
1211 const char* msg_;
1212
1213 DISALLOW_COPY_AND_ASSIGN(CollectStoreBufferEvacuateVisitor);
1214};
1215
1217 public ObjectPointerVisitor {
1218 public:
1219 CheckStoreBufferEvacuateVisitor(ObjectSet* in_store_buffer, const char* msg)
1220 : ObjectVisitor(),
1222 in_store_buffer_(in_store_buffer),
1223 msg_(msg) {}
1224
1225 void VisitObject(ObjectPtr obj) override {
1226 if (obj->IsPseudoObject()) return;
1227 RELEASE_ASSERT_WITH_MSG(obj->IsOldObject(), msg_);
1228 if (!obj->untag()->IsMarked()) return;
1229
1230 if (obj->untag()->IsRemembered()) {
1231 RELEASE_ASSERT_WITH_MSG(in_store_buffer_->Contains(obj), msg_);
1232 } else {
1233 RELEASE_ASSERT_WITH_MSG(!in_store_buffer_->Contains(obj), msg_);
1234 }
1235
1236 visiting_ = obj;
1237 is_remembered_ = obj->untag()->IsRemembered();
1238 is_card_remembered_ = obj->untag()->IsCardRemembered();
1239 if (is_card_remembered_) {
1240 RELEASE_ASSERT_WITH_MSG(!is_remembered_, msg_);
1241 RELEASE_ASSERT_WITH_MSG(Page::Of(obj)->progress_bar_ == 0, msg_);
1242 }
1243 obj->untag()->VisitPointers(this);
1244 }
1245
1246 void VisitPointers(ObjectPtr* from, ObjectPtr* to) override {
1247 for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
1248 ObjectPtr obj = *ptr;
1249 if (obj->IsHeapObject() && obj->untag()->IsEvacuationCandidate()) {
1250 if (is_card_remembered_) {
1251 if (!Page::Of(visiting_)->IsCardRemembered(ptr)) {
1252 FATAL(
1253 "%s: Old object %#" Px " references new object %#" Px
1254 ", but the "
1255 "slot's card is not remembered. Consider using rr to watch the "
1256 "slot %p and reverse-continue to find the store with a missing "
1257 "barrier.\n",
1258 msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
1259 ptr);
1260 }
1261 } else if (!is_remembered_) {
1262 FATAL("%s: Old object %#" Px " references new object %#" Px
1263 ", but it is "
1264 "not in any store buffer. Consider using rr to watch the "
1265 "slot %p and reverse-continue to find the store with a missing "
1266 "barrier.\n",
1267 msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
1268 ptr);
1269 }
1270 }
1271 }
1272 }
1273
1274#if defined(DART_COMPRESSED_POINTERS)
1275 void VisitCompressedPointers(uword heap_base,
1276 CompressedObjectPtr* from,
1277 CompressedObjectPtr* to) override {
1278 for (CompressedObjectPtr* ptr = from; ptr <= to; ptr++) {
1279 ObjectPtr obj = ptr->Decompress(heap_base);
1280 if (obj->IsHeapObject() && obj->IsNewObject()) {
1281 if (is_card_remembered_) {
1282 if (!Page::Of(visiting_)->IsCardRemembered(ptr)) {
1283 FATAL(
1284 "%s: Old object %#" Px " references new object %#" Px
1285 ", but the "
1286 "slot's card is not remembered. Consider using rr to watch the "
1287 "slot %p and reverse-continue to find the store with a missing "
1288 "barrier.\n",
1289 msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
1290 ptr);
1291 }
1292 } else if (!is_remembered_) {
1293 FATAL("%s: Old object %#" Px " references new object %#" Px
1294 ", but it is "
1295 "not in any store buffer. Consider using rr to watch the "
1296 "slot %p and reverse-continue to find the store with a missing "
1297 "barrier.\n",
1298 msg_, static_cast<uword>(visiting_), static_cast<uword>(obj),
1299 ptr);
1300 }
1301 }
1302 }
1303 }
1304#endif
1305
1306 private:
1307 const ObjectSet* const in_store_buffer_;
1308 ObjectPtr visiting_;
1309 bool is_remembered_;
1310 bool is_card_remembered_;
1311 const char* msg_;
1312};
1313
1314void PageSpace::VerifyStoreBuffers(const char* msg) {
1315 ASSERT(msg != nullptr);
1316 Thread* thread = Thread::Current();
1317 StackZone stack_zone(thread);
1318 Zone* zone = stack_zone.GetZone();
1319
1320 ObjectSet* in_store_buffer = new (zone) ObjectSet(zone);
1321 heap_->AddRegionsToObjectSet(in_store_buffer);
1322
1323 {
1324 CollectStoreBufferEvacuateVisitor visitor(in_store_buffer, msg);
1325 heap_->isolate_group()->store_buffer()->VisitObjectPointers(&visitor);
1326 }
1327
1328 {
1329 CheckStoreBufferEvacuateVisitor visitor(in_store_buffer, msg);
1330 heap_->old_space()->VisitObjects(&visitor);
1331 }
1332}
1333
1334void PageSpace::SweepNew() {
1335 // TODO(rmacnak): Run in parallel with SweepExecutable.
1337
1338 GCSweeper sweeper;
1339 intptr_t free = 0;
1340 for (Page* page = heap_->new_space()->head(); page != nullptr;
1341 page = page->next()) {
1342 page->Release();
1343 free += sweeper.SweepNewPage(page);
1344 }
1345 heap_->new_space()->set_freed_in_words(free >> kWordSizeLog2);
1346}
1347
1348void PageSpace::SweepLarge() {
1350
1351 GCSweeper sweeper;
1352 MutexLocker ml(&pages_lock_);
1353 while (sweep_large_ != nullptr) {
1354 Page* page = sweep_large_;
1355 sweep_large_ = page->next();
1356 page->set_next(nullptr);
1357 ASSERT(!page->is_executable());
1358
1359 ml.Unlock();
1360 intptr_t words_to_end = sweeper.SweepLargePage(page);
1361 intptr_t size;
1362 if (words_to_end == 0) {
1363 size = page->memory_->size();
1364 page->Deallocate();
1365 ml.Lock();
1367 } else {
1368 TruncateLargePage(page, words_to_end << kWordSizeLog2);
1369 ml.Lock();
1370 AddLargePageLocked(page);
1371 }
1372 }
1373}
1374
1375void PageSpace::Sweep(bool exclusive) {
1377
1378 GCSweeper sweeper;
1379
1380 intptr_t shard = 0;
1381 const intptr_t num_shards = Utils::Maximum(FLAG_scavenger_tasks, 1);
1382 if (exclusive) {
1383 for (intptr_t i = 0; i < num_shards; i++) {
1384 DataFreeList(i)->mutex()->Lock();
1385 }
1386 }
1387
1388 MutexLocker ml(&pages_lock_);
1389 while (sweep_regular_ != nullptr) {
1390 Page* page = sweep_regular_;
1391 sweep_regular_ = page->next();
1392 page->set_next(nullptr);
1393 ASSERT(!page->is_executable());
1394
1395 ml.Unlock();
1396 // Cycle through the shards round-robin so that free space is roughly
1397 // evenly distributed among the freelists and so roughly evenly available
1398 // to each scavenger worker.
1399 shard = (shard + 1) % num_shards;
1400 FreeList* freelist = DataFreeList(shard);
1401 if (!exclusive) {
1402 freelist->mutex()->Lock();
1403 }
1404 bool page_in_use = sweeper.SweepPage(page, freelist);
1405 if (!exclusive) {
1406 freelist->mutex()->Unlock();
1407 }
1408 intptr_t size;
1409 if (!page_in_use) {
1410 size = page->memory_->size();
1411 page->Deallocate();
1412 }
1413 ml.Lock();
1414
1415 if (page_in_use) {
1416 AddPageLocked(page);
1417 } else {
1419 }
1420 }
1421
1422 if (exclusive) {
1423 for (intptr_t i = 0; i < num_shards; i++) {
1424 DataFreeList(i)->mutex()->Unlock();
1425 }
1426 }
1427}
1428
1429void PageSpace::ConcurrentSweep(IsolateGroup* isolate_group) {
1430 // Start the concurrent sweeper task now.
1431 GCSweeper::SweepConcurrent(isolate_group);
1432}
1433
1434void PageSpace::Compact(Thread* thread) {
1435 GCCompactor compactor(thread, heap_);
1436 compactor.Compact(pages_, &freelists_[kDataFreelist], &pages_lock_);
1437
1438 if (FLAG_verify_after_gc) {
1439 heap_->VerifyGC("Verifying after compacting", kForbidMarked);
1440 }
1441}
1442
1443uword PageSpace::TryAllocateDataBumpLocked(FreeList* freelist, intptr_t size) {
1446
1448 return TryAllocateDataLocked(freelist, size, kForceGrowth);
1449 }
1450
1451 intptr_t remaining = freelist->end() - freelist->top();
1452 if (UNLIKELY(remaining < size)) {
1453 FreeListElement* block = freelist->TryAllocateLargeLocked(size);
1454 if (block == nullptr) {
1455 // Allocating from a new page (if growth policy allows) will have the
1456 // side-effect of populating the freelist with a large block. The next
1457 // bump allocation request will have a chance to consume that block.
1458 return TryAllocateInFreshPage(size, freelist, false /* exec */,
1459 kForceGrowth, true /* is_locked*/);
1460 }
1461 intptr_t block_size = block->HeapSize();
1462 if (remaining > 0) {
1463 usage_.used_in_words -= (remaining >> kWordSizeLog2);
1464 Page::Of(freelist->top())->add_live_bytes(remaining);
1465 freelist->FreeLocked(freelist->top(), remaining);
1466 }
1467 freelist->set_top(reinterpret_cast<uword>(block));
1468 freelist->set_end(freelist->top() + block_size);
1469 // To avoid accounting overhead during each bump pointer allocation, we add
1470 // the size of the whole bump area here and subtract the remaining size
1471 // when switching to a new area.
1472 usage_.used_in_words += (block_size >> kWordSizeLog2);
1473 Page::Of(block)->add_live_bytes(block_size);
1474 remaining = block_size;
1475 }
1476 ASSERT(remaining >= size);
1477 uword result = freelist->top();
1478 freelist->set_top(result + size);
1479
1480// Note: Remaining block is unwalkable until MakeIterable is called.
1481#ifdef DEBUG
1482 if (freelist->top() < freelist->end()) {
1483 // Fail fast if we try to walk the remaining block.
1485 *reinterpret_cast<uword*>(freelist->top()) = 0;
1486 }
1487#endif // DEBUG
1488 return result;
1489}
1490
1491uword PageSpace::TryAllocatePromoLockedSlow(FreeList* freelist, intptr_t size) {
1492 uword result = freelist->TryAllocateSmallLocked(size);
1493 if (result != 0) {
1495 freelist->AddUnaccountedSize(size);
1496 return result;
1497 }
1498 return TryAllocateDataBumpLocked(freelist, size);
1499}
1500
1501uword PageSpace::AllocateSnapshotLockedSlow(FreeList* freelist, intptr_t size) {
1502 uword result = TryAllocateDataBumpLocked(freelist, size);
1503 if (result != 0) {
1504 return result;
1505 }
1506 OUT_OF_MEMORY();
1507}
1508
1509void PageSpace::SetupImagePage(void* pointer, uword size, bool is_executable) {
1510 // Setup a Page so precompiled Instructions can be traversed.
1511 // Instructions are contiguous at [pointer, pointer + size). Page
1512 // expects to find objects at [memory->start() + ObjectStartOffset,
1513 // memory->end()).
1515 pointer = reinterpret_cast<void*>(reinterpret_cast<uword>(pointer) - offset);
1517 size += offset;
1518
1520 ASSERT(memory != nullptr);
1521 Page* page = reinterpret_cast<Page*>(malloc(sizeof(Page)));
1523 if (is_executable) {
1525 }
1526 page->flags_ = flags;
1527 page->memory_ = memory;
1528 page->next_ = nullptr;
1529 page->forwarding_page_ = nullptr;
1530 page->card_table_ = nullptr;
1531 page->progress_bar_ = 0;
1532 page->owner_ = nullptr;
1533 page->top_ = memory->end();
1534 page->end_ = memory->end();
1535 page->survivor_end_ = 0;
1536 page->resolved_top_ = 0;
1537 page->live_bytes_ = 0;
1538
1539 MutexLocker ml(&pages_lock_);
1540 page->next_ = image_pages_;
1541 image_pages_ = page;
1542}
1543
1545 uword object_addr = UntaggedObject::ToAddr(object);
1546 Page* image_page = image_pages_;
1547 while (image_page != nullptr) {
1548 if (image_page->Contains(object_addr)) {
1549 return true;
1550 }
1551 image_page = image_page->next();
1552 }
1553 return false;
1554}
1555
1557 int heap_growth_ratio,
1558 int heap_growth_max,
1559 int garbage_collection_time_ratio)
1560 : heap_(heap),
1561 heap_growth_ratio_(heap_growth_ratio),
1562 desired_utilization_((100.0 - heap_growth_ratio) / 100.0),
1563 heap_growth_max_(heap_growth_max),
1564 garbage_collection_time_ratio_(garbage_collection_time_ratio),
1565 idle_gc_threshold_in_words_(0) {
1566 const intptr_t growth_in_pages = heap_growth_max / 2;
1567 RecordUpdate(last_usage_, last_usage_, growth_in_pages, "initial");
1568}
1569
1571
1573 if (heap_growth_ratio_ == 100) {
1574 return false;
1575 }
1576 if ((heap_ != nullptr) && (heap_->mode() == Dart_PerformanceMode_Latency)) {
1577 return false;
1578 }
1579 return after.CombinedUsedInWords() > hard_gc_threshold_in_words_;
1580}
1581
1583 if (heap_growth_ratio_ == 100) {
1584 return false;
1585 }
1586 if ((heap_ != nullptr) && (heap_->mode() == Dart_PerformanceMode_Latency)) {
1587 return false;
1588 }
1589 return after.CombinedUsedInWords() > soft_gc_threshold_in_words_;
1590}
1591
1593 if (heap_growth_ratio_ == 100) {
1594 return false;
1595 }
1596 return current.CombinedUsedInWords() > idle_gc_threshold_in_words_;
1597}
1598
1600 SpaceUsage after,
1601 int64_t start,
1602 int64_t end) {
1603 ASSERT(end >= start);
1605 const int gc_time_fraction = history_.GarbageCollectionTimeFraction();
1606
1607 // Assume garbage increases linearly with allocation:
1608 // G = kA, and estimate k from the previous cycle.
1609 const intptr_t allocated_since_previous_gc =
1610 before.CombinedUsedInWords() - last_usage_.CombinedUsedInWords();
1611 intptr_t grow_heap;
1612 if (allocated_since_previous_gc > 0) {
1613 intptr_t garbage =
1614 before.CombinedUsedInWords() - after.CombinedUsedInWords();
1615 // Garbage may be negative if when the OOM reservation is refilled.
1616 garbage = Utils::Maximum(static_cast<intptr_t>(0), garbage);
1617 // It makes no sense to expect that each kb allocated will cause more than
1618 // one kb of garbage, so we clamp k at 1.0.
1619 const double k = Utils::Minimum(
1620 1.0, garbage / static_cast<double>(allocated_since_previous_gc));
1621
1622 const int garbage_ratio = static_cast<int>(k * 100);
1623
1624 // Define GC to be 'worthwhile' iff at least fraction t of heap is garbage.
1625 double t = 1.0 - desired_utilization_;
1626 // If we spend too much time in GC, strive for even more free space.
1627 if (gc_time_fraction > garbage_collection_time_ratio_) {
1628 t += (gc_time_fraction - garbage_collection_time_ratio_) / 100.0;
1629 }
1630
1631 // Number of pages we can allocate and still be within the desired growth
1632 // ratio.
1633 const intptr_t grow_pages =
1634 (static_cast<intptr_t>(after.CombinedUsedInWords() /
1635 desired_utilization_) -
1636 (after.CombinedUsedInWords())) /
1638 if (garbage_ratio == 0) {
1639 // No garbage in the previous cycle so it would be hard to compute a
1640 // grow_heap size based on estimated garbage so we use growth ratio
1641 // heuristics instead.
1642 grow_heap =
1643 Utils::Maximum(static_cast<intptr_t>(heap_growth_max_), grow_pages);
1644 } else if (garbage_collection_time_ratio_ == 0) {
1645 // Exclude time from the growth policy decision for --deterministic.
1646 grow_heap =
1647 Utils::Maximum(static_cast<intptr_t>(heap_growth_max_), grow_pages);
1648 } else {
1649 // Find minimum 'grow_heap' such that after increasing capacity by
1650 // 'grow_heap' pages and filling them, we expect a GC to be worthwhile.
1651 intptr_t max = heap_growth_max_;
1652 intptr_t min = 0;
1653 intptr_t local_grow_heap = 0;
1654 while (min < max) {
1655 local_grow_heap = (max + min) / 2;
1656 const intptr_t limit =
1657 after.CombinedUsedInWords() + (local_grow_heap * kPageSizeInWords);
1658 const intptr_t allocated_before_next_gc =
1659 limit - (after.CombinedUsedInWords());
1660 const double estimated_garbage = k * allocated_before_next_gc;
1661 if (t <= estimated_garbage / limit) {
1662 max = local_grow_heap - 1;
1663 } else {
1664 min = local_grow_heap + 1;
1665 }
1666 }
1667 local_grow_heap = (max + min) / 2;
1668 grow_heap = local_grow_heap;
1669 ASSERT(grow_heap >= 0);
1670 // If we are going to grow by heap_grow_max_ then ensure that we
1671 // will be growing the heap at least by the growth ratio heuristics.
1672 if (grow_heap >= heap_growth_max_) {
1673 grow_heap = Utils::Maximum(grow_pages, grow_heap);
1674 }
1675 }
1676 } else {
1677 grow_heap = 0;
1678 }
1679 last_usage_ = after;
1680
1681 intptr_t max_capacity_in_words = heap_->old_space()->max_capacity_in_words_;
1682 if (max_capacity_in_words != 0) {
1683 ASSERT(grow_heap >= 0);
1684 // Fraction of asymptote used.
1685 double f = static_cast<double>(after.CombinedUsedInWords() +
1686 (kPageSizeInWords * grow_heap)) /
1687 static_cast<double>(max_capacity_in_words);
1688 ASSERT(f >= 0.0);
1689 // Increase weight at the high end.
1690 f = f * f;
1691 // Fraction of asymptote available.
1692 f = 1.0 - f;
1693 ASSERT(f <= 1.0);
1694 // Discount growth more the closer we get to the desired asymptote.
1695 grow_heap = static_cast<intptr_t>(grow_heap * f);
1696 // Minimum growth step after reaching the asymptote.
1697 intptr_t min_step = (2 * MB) / kPageSize;
1698 grow_heap = Utils::Maximum(min_step, grow_heap);
1699 }
1700
1701 RecordUpdate(before, after, grow_heap, "gc");
1702}
1703
1705 // Number of pages we can allocate and still be within the desired growth
1706 // ratio.
1707 intptr_t growth_in_pages;
1708 if (desired_utilization_ == 0.0) {
1709 growth_in_pages = heap_growth_max_;
1710 } else {
1711 growth_in_pages = (static_cast<intptr_t>(after.CombinedUsedInWords() /
1712 desired_utilization_) -
1713 (after.CombinedUsedInWords())) /
1715 }
1716
1717 // Apply growth cap.
1718 growth_in_pages =
1719 Utils::Minimum(static_cast<intptr_t>(heap_growth_max_), growth_in_pages);
1720
1721 RecordUpdate(after, after, growth_in_pages, "loaded");
1722}
1723
1724void PageSpaceController::RecordUpdate(SpaceUsage before,
1725 SpaceUsage after,
1726 intptr_t growth_in_pages,
1727 const char* reason) {
1728 // Save final threshold compared before growing.
1729 intptr_t threshold =
1730 after.CombinedUsedInWords() + (kPageSizeInWords * growth_in_pages);
1731
1732 bool concurrent_mark = FLAG_concurrent_mark && (FLAG_marker_tasks != 0);
1733 if (concurrent_mark) {
1734 soft_gc_threshold_in_words_ = threshold;
1735 hard_gc_threshold_in_words_ = kIntptrMax / kWordSize;
1736 } else {
1737 soft_gc_threshold_in_words_ = kIntptrMax / kWordSize;
1738 hard_gc_threshold_in_words_ = threshold;
1739 }
1740
1741 // Set a tight idle threshold.
1742 idle_gc_threshold_in_words_ =
1743 after.CombinedUsedInWords() + (2 * kPageSizeInWords);
1744
1745#if defined(SUPPORT_TIMELINE)
1746 Thread* thread = Thread::Current();
1747 if (thread != nullptr) {
1748 TIMELINE_FUNCTION_GC_DURATION(thread, "UpdateGrowthLimit");
1749 tbes.SetNumArguments(6);
1750 tbes.CopyArgument(0, "Reason", reason);
1751 tbes.FormatArgument(1, "Before.CombinedUsed (kB)", "%" Pd "",
1753 tbes.FormatArgument(2, "After.CombinedUsed (kB)", "%" Pd "",
1755 tbes.FormatArgument(3, "Hard Threshold (kB)", "%" Pd "",
1756 RoundWordsToKB(hard_gc_threshold_in_words_));
1757 tbes.FormatArgument(4, "Soft Threshold (kB)", "%" Pd "",
1758 RoundWordsToKB(soft_gc_threshold_in_words_));
1759 tbes.FormatArgument(5, "Idle Threshold (kB)", "%" Pd "",
1760 RoundWordsToKB(idle_gc_threshold_in_words_));
1761 }
1762#endif
1763
1764 if (FLAG_log_growth || FLAG_verbose_gc) {
1765 THR_Print("%s: hard_threshold=%" Pd "MB, soft_threshold=%" Pd
1766 "MB, idle_threshold=%" Pd "MB, reason=%s\n",
1767 heap_->isolate_group()->source()->name,
1768 RoundWordsToMB(hard_gc_threshold_in_words_),
1769 RoundWordsToMB(soft_gc_threshold_in_words_),
1770 RoundWordsToMB(idle_gc_threshold_in_words_), reason);
1771 }
1772}
1773
1775 int64_t end) {
1776 Entry entry;
1777 entry.start = start;
1778 entry.end = end;
1779 history_.Add(entry);
1780}
1781
1783 int64_t gc_time = 0;
1784 int64_t total_time = 0;
1785 for (int i = 0; i < history_.Size() - 1; i++) {
1786 Entry current = history_.Get(i);
1787 Entry previous = history_.Get(i + 1);
1788 gc_time += current.end - current.start;
1789 total_time += current.end - previous.end;
1790 }
1791 if (total_time == 0) {
1792 return 0;
1793 } else {
1794 ASSERT(total_time >= gc_time);
1795 int result = static_cast<int>(
1796 (static_cast<double>(gc_time) / static_cast<double>(total_time)) * 100);
1797 return result;
1798 }
1799}
1800
1801} // namespace dart
static float next(float f)
#define UNREACHABLE()
Definition: assert.h:248
#define OUT_OF_MEMORY()
Definition: assert.h:250
#define RELEASE_ASSERT_WITH_MSG(cond, msg)
Definition: assert.h:332
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
T load(std::memory_order order=std::memory_order_acquire) const
Definition: atomic.h:101
void store(T arg, std::memory_order order=std::memory_order_release)
Definition: atomic.h:104
static constexpr bool UseCardMarkingForAllocation(const intptr_t array_length)
Definition: object.h:10818
bool Done() const
Definition: pages.cc:480
BasePageIterator(const PageSpace *space)
Definition: pages.cc:476
Page * page() const
Definition: pages.cc:478
const PageSpace * space_
Definition: pages.cc:520
void VisitObjectPointers(ObjectPointerVisitor *visitor)
void VisitObject(ObjectPtr obj) override
Definition: pages.cc:1225
CheckStoreBufferEvacuateVisitor(ObjectSet *in_store_buffer, const char *msg)
Definition: pages.cc:1219
void VisitPointers(ObjectPtr *from, ObjectPtr *to) override
Definition: pages.cc:1246
void PrintToJSONObject(JSONObject *object)
Definition: class_table.cc:291
void VisitPointers(ObjectPtr *from, ObjectPtr *to) override
Definition: pages.cc:1184
CollectStoreBufferEvacuateVisitor(ObjectSet *in_store_buffer, const char *msg)
Definition: pages.cc:1179
ExclusiveCodePageIterator(const PageSpace *space)
Definition: pages.cc:552
ExclusivePageIterator(const PageSpace *space)
Definition: pages.cc:537
intptr_t HeapSize()
Definition: freelist.h:31
static FreeListElement * AsElement(uword addr, intptr_t size)
Definition: freelist.cc:16
void Reset()
Definition: freelist.cc:211
void Print() const
Definition: freelist.cc:315
Mutex * mutex()
Definition: freelist.h:91
DART_WARN_UNUSED_RESULT intptr_t ReleaseBumpAllocation()
Definition: freelist.h:149
void Free(uword addr, intptr_t size)
Definition: freelist.cc:193
intptr_t TakeUnaccountedSizeLocked()
Definition: freelist.h:133
void MakeIterable()
Definition: freelist.h:142
static void Prologue(PageSpace *old_space)
static bool Epilogue(PageSpace *old_space)
void IncrementalMarkWithSizeBudget(PageSpace *page_space, intptr_t size)
Definition: marker.cc:1198
intptr_t MarkedWordsPerMicro() const
Definition: marker.cc:1071
void IncrementalMarkWithTimeBudget(PageSpace *page_space, int64_t deadline)
Definition: marker.cc:1223
void StartConcurrentMark(PageSpace *page_space)
Definition: marker.cc:1118
intptr_t marked_words() const
Definition: marker.h:51
void MarkObjects(PageSpace *page_space)
Definition: marker.cc:1291
void IncrementalMarkWithUnlimitedBudget(PageSpace *page_space)
Definition: marker.cc:1179
static void SweepConcurrent(IsolateGroup *isolate_group)
Definition: sweeper.cc:215
void VisitObject(ObjectPtr obj) override
Definition: pages.cc:763
HeapMapAsJSONVisitor(JSONArray *array)
Definition: pages.cc:762
IsolateGroup * isolate_group() const
Definition: heap.h:273
Scavenger * new_space()
Definition: heap.h:62
void CheckConcurrentMarking(Thread *thread, GCReason reason, intptr_t size)
Definition: heap.cc:594
PageSpace * old_space()
Definition: heap.h:63
Dart_PerformanceMode mode() const
Definition: heap.h:103
bool is_vm_isolate() const
Definition: heap.h:274
void UpdateGlobalMaxUsed()
Definition: heap.cc:689
StoreBuffer * store_buffer() const
Definition: isolate.h:509
static IsolateGroup * Current()
Definition: isolate.h:539
ClassTable * class_table() const
Definition: isolate.h:496
IsolateGroupSource * source() const
Definition: isolate.h:286
FieldTable * field_table() const
Definition: isolate.h:1000
void AddValue(bool b) const
Definition: json_stream.h:494
void AddProperty64(const char *name, int64_t i) const
Definition: json_stream.h:401
void AddProperty(const char *name, bool b) const
Definition: json_stream.h:395
void AddPropertyF(const char *name, const char *format,...) const PRINTF_ATTRIBUTE(3
Definition: json_stream.cc:589
Monitor::WaitResult Wait(int64_t millis=Monitor::kNoTimeout)
Definition: lockers.h:172
void Enter() const
Definition: lockers.h:155
void Exit() const
Definition: lockers.h:163
static int64_t GetCurrentMonotonicMicros()
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
void VisitCompressedPointers(uword heap_base, CompressedObjectPtr *first, CompressedObjectPtr *last)
Definition: visitor.h:43
void VisitPointer(ObjectPtr *p)
Definition: visitor.h:55
ObjectPtr Decompress(uword heap_base) const
UntaggedObject * untag() const
intptr_t GetClassId() const
Definition: raw_object.h:885
bool IsPseudoObject() const
void Add(ObjectPtr raw_obj)
Definition: object_set.h:75
bool Contains(ObjectPtr raw_obj) const
Definition: object_set.h:66
bool ReachedSoftThreshold(SpaceUsage after) const
Definition: pages.cc:1582
bool ReachedHardThreshold(SpaceUsage after) const
Definition: pages.cc:1572
bool ReachedIdleThreshold(SpaceUsage current) const
Definition: pages.cc:1592
void EvaluateAfterLoading(SpaceUsage after)
Definition: pages.cc:1704
void EvaluateGarbageCollection(SpaceUsage before, SpaceUsage after, int64_t start, int64_t end)
Definition: pages.cc:1599
PageSpaceController(Heap *heap, int heap_growth_ratio, int heap_growth_max, int garbage_collection_time_ratio)
Definition: pages.cc:1556
void AddGarbageCollectionTime(int64_t start, int64_t end)
Definition: pages.cc:1774
void WriteProtectCode(bool read_only)
Definition: pages.cc:812
FreeList * DataFreeList(intptr_t i=0)
Definition: pages.h:301
void PrintHeapMapToJSONStream(IsolateGroup *isolate_group, JSONStream *stream) const
Definition: pages.cc:772
intptr_t UsedInWords() const
Definition: pages.h:194
bool ShouldStartIdleMarkSweep(int64_t deadline)
Definition: pages.cc:833
intptr_t tasks() const
Definition: pages.h:315
void IncrementalMarkWithSizeBudget(intptr_t size)
Definition: pages.cc:905
bool Contains(uword addr) const
Definition: pages.cc:607
int64_t gc_time_micros() const
Definition: pages.h:259
void WriteProtect(bool read_only)
Definition: pages.cc:722
uword TryAllocate(intptr_t size, bool is_executable=false, GrowthPolicy growth_policy=kControlGrowth)
Definition: pages.h:141
void TryReleaseReservation()
Definition: pages.cc:931
void AcquireLock(FreeList *freelist)
Definition: pages.cc:432
bool ShouldPerformIdleMarkCompact(int64_t deadline)
Definition: pages.cc:860
PageSpace(Heap *heap, intptr_t max_capacity_in_words)
Definition: pages.cc:55
void set_tasks(intptr_t val)
Definition: pages.h:316
bool enable_concurrent_mark() const
Definition: pages.h:351
@ kAwaitingFinalization
Definition: pages.h:133
@ kSweepingRegular
Definition: pages.h:135
@ kSweepingLarge
Definition: pages.h:134
void IncrementalMarkWithTimeBudget(int64_t deadline)
Definition: pages.cc:911
intptr_t collections() const
Definition: pages.h:263
void VisitObjects(ObjectVisitor *visitor) const
Definition: pages.cc:651
void CollectGarbage(Thread *thread, bool compact, bool finalize)
Definition: pages.cc:974
void ReleaseLock(FreeList *freelist)
Definition: pages.cc:436
void VisitObjectsNoImagePages(ObjectVisitor *visitor) const
Definition: pages.cc:657
void UpdateMaxCapacityLocked()
Definition: pages.cc:592
SpaceUsage GetCurrentUsage() const
Definition: pages.h:213
bool DataContains(uword addr) const
Definition: pages.cc:634
void IncreaseCapacityInWordsLocked(intptr_t increase_in_words)
Definition: pages.h:203
bool CodeContains(uword addr) const
Definition: pages.cc:625
void VisitRememberedCards(PredicateObjectPointerVisitor *visitor) const
Definition: pages.cc:685
bool MarkReservation()
Definition: pages.cc:941
void ReleaseBumpAllocation()
Definition: pages.cc:580
bool ContainsUnsafe(uword addr) const
Definition: pages.cc:616
void SetupImagePage(void *pointer, uword size, bool is_executable)
Definition: pages.cc:1509
void UpdateMaxUsed()
Definition: pages.cc:600
intptr_t CapacityInWords() const
Definition: pages.h:195
void ResumeConcurrentMarking()
Definition: pages.cc:452
void YieldConcurrentMarking()
Definition: pages.cc:459
void AddRegionsToObjectSet(ObjectSet *set) const
Definition: pages.cc:643
void AbandonMarkingForShutdown()
Definition: pages.cc:587
void PrintToJSONObject(JSONObject *object) const
Definition: pages.cc:735
void VisitObjectsImagePages(ObjectVisitor *visitor) const
Definition: pages.cc:665
void set_phase(Phase val)
Definition: pages.h:342
void TryReserveForOOM()
Definition: pages.cc:952
void IncreaseCapacityInWords(intptr_t increase_in_words)
Definition: pages.h:199
void PauseConcurrentMarking()
Definition: pages.cc:443
bool IsObjectFromImagePages(ObjectPtr object)
Definition: pages.cc:1544
void ResetProgressBars() const
Definition: pages.cc:716
void AssistTasks(MonitorLocker *ml)
Definition: pages.cc:917
void VisitRoots(ObjectPointerVisitor *visitor)
Definition: pages.cc:962
Monitor * tasks_lock() const
Definition: pages.h:314
friend class GCCompactor
Definition: pages.h:504
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
Definition: pages.cc:679
intptr_t ExternalInWords() const
Definition: pages.h:212
void VisitObjectsUnsafe(ObjectVisitor *visitor) const
Definition: pages.cc:673
Phase phase() const
Definition: pages.h:341
bool Contains(uword addr) const
Definition: page.h:107
void set_next(Page *next)
Definition: page.h:103
static constexpr intptr_t OldObjectStartOffset()
Definition: page.h:140
void add_live_bytes(intptr_t value)
Definition: page.h:126
static Page * Of(ObjectPtr obj)
Definition: page.h:162
@ kLarge
Definition: page.h:70
@ kVMIsolate
Definition: page.h:72
@ kExecutable
Definition: page.h:69
@ kImage
Definition: page.h:71
void WriteProtect(bool read_only)
Definition: page.cc:281
Page * next() const
Definition: page.h:102
const T & Get(int i) const
Definition: ring_buffer.h:22
int64_t Size() const
Definition: ring_buffer.h:28
void Add(const T &t)
Definition: ring_buffer.h:19
void set_freed_in_words(intptr_t value)
Definition: scavenger.h:224
intptr_t UsedInWords() const
Definition: scavenger.h:160
Page * head() const
Definition: scavenger.h:237
intptr_t Value() const
Definition: object.h:9990
intptr_t CombinedUsedInWords() const
Definition: spaces.h:27
RelaxedAtomic< intptr_t > capacity_in_words
Definition: spaces.h:20
RelaxedAtomic< intptr_t > used_in_words
Definition: spaces.h:21
@ kScavengerTask
Definition: thread.h:352
static Thread * Current()
Definition: thread.h:362
bool OwnsGCSafepoint() const
Definition: thread.cc:1352
UnsafeExclusivePageIterator(const PageSpace *space)
Definition: pages.cc:528
static ObjectPtr FromAddr(uword addr)
Definition: raw_object.h:516
bool IsCardRemembered() const
Definition: raw_object.h:385
static bool IsMarked(uword tags)
Definition: raw_object.h:303
static uword ToAddr(const UntaggedObject *raw_obj)
Definition: raw_object.h:522
intptr_t HeapSize() const
Definition: raw_object.h:401
intptr_t VisitPointers(ObjectPointerVisitor *visitor)
Definition: raw_object.h:447
bool IsRemembered() const
Definition: raw_object.h:361
static bool IsEvacuationCandidate(uword tags)
Definition: raw_object.h:329
static void UnregisterExecutablePage(Page *page)
static void RegisterExecutablePage(Page *page)
static constexpr T Maximum(T x, T y)
Definition: utils.h:41
static T Minimum(T x, T y)
Definition: utils.h:36
static constexpr T RoundUp(T x, uintptr_t alignment, uintptr_t offset=0)
Definition: utils.h:120
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition: utils.h:92
static intptr_t PageSize()
uword end() const
static VirtualMemory * ForImagePage(void *pointer, uword size)
#define THR_Print(format,...)
Definition: log.h:20
@ Dart_PerformanceMode_Latency
Definition: dart_api.h:1380
#define ASSERT(E)
#define FATAL(error)
FlutterSemanticsFlag flags
if(end==-1)
glong glong end
GAsyncResult * result
static float max(float r, float g, float b)
Definition: hsl.cpp:49
static float min(float r, float g, float b)
Definition: hsl.cpp:48
size_t length
def link(from_root, to_root)
Definition: dart_pkg.py:44
Definition: dart_vm.cc:33
constexpr intptr_t MB
Definition: globals.h:530
static constexpr intptr_t kOldObjectAlignmentOffset
constexpr double MicrosecondsToSeconds(int64_t micros)
Definition: globals.h:571
static constexpr intptr_t kPageSizeInWords
Definition: page.h:28
static constexpr intptr_t kPageSize
Definition: page.h:27
constexpr intptr_t RoundWordsToMB(intptr_t size_in_words)
Definition: globals.h:545
void * malloc(size_t size)
Definition: allocation.cc:19
@ kIllegalCid
Definition: class_id.h:214
@ kFreeListElement
Definition: class_id.h:224
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
uintptr_t uword
Definition: globals.h:501
static constexpr intptr_t kConservativeInitialMarkSpeed
Definition: pages.cc:53
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
static constexpr intptr_t kObjectAlignmentMask
@ kAllowMarked
Definition: verifier.h:21
@ kForbidMarked
Definition: verifier.h:21
constexpr intptr_t kWordSize
Definition: globals.h:509
static constexpr intptr_t kObjectAlignment
constexpr double MicrosecondsToMilliseconds(int64_t micros)
Definition: globals.h:574
bool IsAllocatableViaFreeLists(intptr_t size)
Definition: spaces.h:60
constexpr intptr_t RoundWordsToKB(intptr_t size_in_words)
Definition: globals.h:542
COMPILE_ASSERT(kUnreachableReference==WeakTable::kNoValue)
constexpr intptr_t kIntptrMax
Definition: globals.h:557
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not set
Definition: switches.h:76
def shard(fn, arglist)
#define Px
Definition: globals.h:410
#define UNLIKELY(cond)
Definition: globals.h:261
#define Pd
Definition: globals.h:408
SeparatedVector2 offset
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)
Definition: timeline.h:41