Flutter Engine
The Flutter Engine
compactor.cc
Go to the documentation of this file.
1// Copyright (c) 2017, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/heap/compactor.h"
6
7#include "platform/atomic.h"
8#include "vm/globals.h"
9#include "vm/heap/heap.h"
10#include "vm/heap/pages.h"
11#include "vm/heap/sweeper.h"
12#include "vm/thread_barrier.h"
13#include "vm/timeline.h"
14
15namespace dart {
16
18 force_evacuation,
19 false,
20 "Force compaction to move every movable object");
21
22// Each Page is divided into blocks of size kBlockSize. Each object belongs
23// to the block containing its header word (so up to kBlockSize +
24// kAllocatablePageSize - 2 * kObjectAlignment bytes belong to the same block).
25// During compaction, all live objects in the same block will slide such that
26// they all end up on the same Page, and all gaps within the block will be
27// closed. During sliding, a bitvector is computed that indicates which
28// allocation units are live, so the new address of any object in the block can
29// be found by adding the number of live allocation units before the object to
30// the block's new start address.
31// Compare CountingBlock used for heap snapshot generation.
33 public:
34 void Clear() {
35 new_address_ = 0;
36 live_bitvector_ = 0;
37 }
38
39 uword Lookup(uword old_addr) const {
40 uword block_offset = old_addr & ~kBlockMask;
41 intptr_t first_unit_position = block_offset >> kObjectAlignmentLog2;
42 ASSERT(first_unit_position < kBitsPerWord);
43 uword preceding_live_bitmask =
44 (static_cast<uword>(1) << first_unit_position) - 1;
45 uword preceding_live_bitset = live_bitvector_ & preceding_live_bitmask;
46 uword preceding_live_bytes = Utils::CountOneBitsWord(preceding_live_bitset)
48 return new_address_ + preceding_live_bytes;
49 }
50
51 // Marks a range of allocation units belonging to an object live by setting
52 // the corresponding bits in this ForwardingBlock. Does not update the
53 // new_address_ field; that is done after the total live size of the block is
54 // known and forwarding location is chosen. Does not mark words in subsequent
55 // ForwardingBlocks live for objects that extend into the next block.
56 void RecordLive(uword old_addr, intptr_t size) {
57 intptr_t size_in_units = size >> kObjectAlignmentLog2;
58 if (size_in_units >= kBitsPerWord) {
59 size_in_units = kBitsPerWord - 1;
60 }
61 uword block_offset = old_addr & ~kBlockMask;
62 intptr_t first_unit_position = block_offset >> kObjectAlignmentLog2;
63 ASSERT(first_unit_position < kBitsPerWord);
64 live_bitvector_ |= ((static_cast<uword>(1) << size_in_units) - 1)
65 << first_unit_position;
66 }
67
68 bool IsLive(uword old_addr) const {
69 uword block_offset = old_addr & ~kBlockMask;
70 intptr_t first_unit_position = block_offset >> kObjectAlignmentLog2;
71 ASSERT(first_unit_position < kBitsPerWord);
72 return (live_bitvector_ & (static_cast<uword>(1) << first_unit_position)) !=
73 0;
74 }
75
76 uword new_address() const { return new_address_; }
77 void set_new_address(uword value) { new_address_ = value; }
78
79 private:
80 uword new_address_;
81 uword live_bitvector_;
82 COMPILE_ASSERT(kBitVectorWordsPerBlock == 1);
83
84 DISALLOW_COPY_AND_ASSIGN(ForwardingBlock);
85};
86
88 public:
89 void Clear() {
90 for (intptr_t i = 0; i < kBlocksPerPage; i++) {
91 blocks_[i].Clear();
92 }
93 }
94
95 uword Lookup(uword old_addr) { return BlockFor(old_addr)->Lookup(old_addr); }
96
98 intptr_t page_offset = old_addr & ~kPageMask;
99 intptr_t block_number = page_offset / kBlockSize;
100 ASSERT(block_number >= 0);
101 ASSERT(block_number <= kBlocksPerPage);
102 return &blocks_[block_number];
103 }
104
105 private:
107
108 DISALLOW_ALLOCATION();
109 DISALLOW_IMPLICIT_CONSTRUCTORS(ForwardingPage);
110};
111
113 ASSERT(forwarding_page_ == nullptr);
114 ASSERT((object_start() + sizeof(ForwardingPage)) < object_end());
116 top_ -= sizeof(ForwardingPage);
117 forwarding_page_ = reinterpret_cast<ForwardingPage*>(top_.load());
118}
119
120struct Partition {
123};
124
126 public:
128 GCCompactor* compactor,
129 ThreadBarrier* barrier,
130 RelaxedAtomic<intptr_t>* next_planning_task,
131 RelaxedAtomic<intptr_t>* next_setup_task,
132 RelaxedAtomic<intptr_t>* next_sliding_task,
133 RelaxedAtomic<intptr_t>* next_forwarding_task,
134 intptr_t num_tasks,
135 Partition* partitions,
136 FreeList* freelist)
137 : isolate_group_(isolate_group),
138 compactor_(compactor),
139 barrier_(barrier),
140 next_planning_task_(next_planning_task),
141 next_setup_task_(next_setup_task),
142 next_sliding_task_(next_sliding_task),
143 next_forwarding_task_(next_forwarding_task),
144 num_tasks_(num_tasks),
145 partitions_(partitions),
146 freelist_(freelist),
147 free_page_(nullptr),
148 free_current_(0),
149 free_end_(0) {}
150
151 void Run();
153
154 private:
155 void PlanPage(Page* page);
156 void SlidePage(Page* page);
157 uword PlanBlock(uword first_object, ForwardingPage* forwarding_page);
158 uword SlideBlock(uword first_object, ForwardingPage* forwarding_page);
159 void PlanMoveToContiguousSize(intptr_t size);
160
161 IsolateGroup* isolate_group_;
162 GCCompactor* compactor_;
163 ThreadBarrier* barrier_;
164 RelaxedAtomic<intptr_t>* next_planning_task_;
165 RelaxedAtomic<intptr_t>* next_setup_task_;
166 RelaxedAtomic<intptr_t>* next_sliding_task_;
167 RelaxedAtomic<intptr_t>* next_forwarding_task_;
168 intptr_t num_tasks_;
169 Partition* partitions_;
170 FreeList* freelist_;
171 Page* free_page_;
172 uword free_current_;
173 uword free_end_;
174
175 DISALLOW_COPY_AND_ASSIGN(CompactorTask);
176};
177
178// Slides live objects down past free gaps, updates pointers and frees empty
179// pages. Keeps cursors pointing to the next free and next live chunks, and
180// repeatedly moves the next live chunk to the next free chunk, one block at a
181// time, keeping blocks from spanning page boundaries (see ForwardingBlock).
182// Free space at the end of a page that is too small for the next block is
183// added to the freelist.
184void GCCompactor::Compact(Page* pages, FreeList* freelist, Mutex* pages_lock) {
185 SetupImagePageBoundaries();
186
187 Page* fixed_head = nullptr;
188 Page* fixed_tail = nullptr;
189
190 // Divide the heap, and set aside never-evacuate pages.
191 // TODO(30978): Try to divide based on live bytes or with work stealing.
192 intptr_t num_pages = 0;
193 Page* page = pages;
194 Page* prev = nullptr;
195 while (page != nullptr) {
196 Page* next = page->next();
197 if (page->is_never_evacuate()) {
198 if (prev != nullptr) {
199 prev->set_next(next);
200 } else {
201 pages = next;
202 }
203 if (fixed_tail == nullptr) {
204 fixed_tail = page;
205 }
206 page->set_next(fixed_head);
207 fixed_head = page;
208 } else {
209 prev = page;
210 num_pages++;
211 }
212 page = next;
213 }
214 fixed_pages_ = fixed_head;
215
216 intptr_t num_tasks = FLAG_compactor_tasks;
217 RELEASE_ASSERT(num_tasks >= 1);
218 if (num_pages < num_tasks) {
219 num_tasks = num_pages;
220 }
221 if (num_tasks == 0) {
222 ASSERT(pages == nullptr);
223
224 // Move pages to sweeper work lists.
225 heap_->old_space()->pages_ = nullptr;
226 heap_->old_space()->pages_tail_ = nullptr;
227 heap_->old_space()->sweep_regular_ = fixed_head;
228
229 heap_->old_space()->Sweep(/*exclusive*/ true);
230 heap_->old_space()->SweepLarge();
231 return;
232 }
233
234 Partition* partitions = new Partition[num_tasks];
235
236 {
237 const intptr_t pages_per_task = num_pages / num_tasks;
238 intptr_t task_index = 0;
239 intptr_t page_index = 0;
240 Page* page = pages;
241 Page* prev = nullptr;
242 while (task_index < num_tasks) {
243 ASSERT(!page->is_never_evacuate());
244 if (page_index % pages_per_task == 0) {
245 partitions[task_index].head = page;
246 partitions[task_index].tail = nullptr;
247 if (prev != nullptr) {
248 prev->set_next(nullptr);
249 }
250 task_index++;
251 }
252 prev = page;
253 page = page->next();
254 page_index++;
255 }
256 ASSERT(page_index <= num_pages);
257 ASSERT(task_index == num_tasks);
258 }
259
260 if (FLAG_force_evacuation) {
261 // Inject empty pages at the beginning of each worker's list to ensure all
262 // objects move and all pages that used to have an object are released.
263 // This can be helpful for finding untracked pointers because it prevents
264 // an untracked pointer from getting lucky with its target not moving.
265 bool oom = false;
266 for (intptr_t task_index = 0; task_index < num_tasks && !oom;
267 task_index++) {
268 const intptr_t pages_per_task = num_pages / num_tasks;
269 for (intptr_t j = 0; j < pages_per_task; j++) {
270 Page* page = heap_->old_space()->AllocatePage(/* exec */ false,
271 /* link */ false);
272
273 if (page == nullptr) {
274 oom = true;
275 break;
276 }
277
278 FreeListElement::AsElement(page->object_start(),
279 page->object_end() - page->object_start());
280
281 // The compactor slides down: add the empty pages to the beginning.
282 page->set_next(partitions[task_index].head);
283 partitions[task_index].head = page;
284 }
285 }
286 }
287
288 {
289 ThreadBarrier* barrier = new ThreadBarrier(num_tasks, 1);
290 RelaxedAtomic<intptr_t> next_planning_task = {0};
291 RelaxedAtomic<intptr_t> next_setup_task = {0};
292 RelaxedAtomic<intptr_t> next_sliding_task = {0};
293 RelaxedAtomic<intptr_t> next_forwarding_task = {0};
294
295 for (intptr_t task_index = 0; task_index < num_tasks; task_index++) {
296 if (task_index < (num_tasks - 1)) {
297 // Begin compacting on a helper thread.
299 thread()->isolate_group(), this, barrier, &next_planning_task,
300 &next_setup_task, &next_sliding_task, &next_forwarding_task,
301 num_tasks, partitions, freelist);
302 } else {
303 // Last worker is the main thread.
304 CompactorTask task(thread()->isolate_group(), this, barrier,
305 &next_planning_task, &next_setup_task,
306 &next_sliding_task, &next_forwarding_task, num_tasks,
307 partitions, freelist);
309 barrier->Sync();
310 barrier->Release();
311 }
312 }
313 }
314
315 // Update inner pointers in typed data views (needs to be done after all
316 // threads are done with sliding since we need to access fields of the
317 // view's backing store)
318 //
319 // (If the sliding compactor was single-threaded we could do this during the
320 // sliding phase: The class id of the backing store can be either accessed by
321 // looking at the already-slided-object or the not-yet-slided object. Though
322 // with parallel sliding there is no safe way to access the backing store
323 // object header.)
324 {
326 "ForwardTypedDataViewInternalPointers");
327 const intptr_t length = typed_data_views_.length();
328 for (intptr_t i = 0; i < length; ++i) {
329 auto raw_view = typed_data_views_[i];
330 const classid_t cid =
331 raw_view->untag()->typed_data()->GetClassIdMayBeSmi();
332
333 // If we have external typed data we can simply return, since the backing
334 // store lives in C-heap and will not move. Otherwise we have to update
335 // the inner pointer.
336 if (IsTypedDataClassId(cid)) {
337 raw_view->untag()->RecomputeDataFieldForInternalTypedData();
338 } else {
340 }
341 }
342 }
343
344 for (intptr_t task_index = 0; task_index < num_tasks; task_index++) {
345 ASSERT(partitions[task_index].tail != nullptr);
346 }
347
348 {
349 TIMELINE_FUNCTION_GC_DURATION(thread(), "ForwardStackPointers");
350 ForwardStackPointers();
351 }
352
353 {
355 "ForwardPostponedSuspendStatePointers");
356 // After heap sliding is complete and ObjectStore pointers are forwarded
357 // it is finally safe to visit SuspendState objects with copied frames.
358 can_visit_stack_frames_ = true;
359 const intptr_t length = postponed_suspend_states_.length();
360 for (intptr_t i = 0; i < length; ++i) {
361 auto suspend_state = postponed_suspend_states_[i];
362 suspend_state->untag()->VisitPointers(this);
363 }
364 }
365
366 heap_->old_space()->VisitRoots(this);
367
368 {
369 MutexLocker ml(pages_lock);
370
371 // Free empty pages.
372 for (intptr_t task_index = 0; task_index < num_tasks; task_index++) {
373 Page* page = partitions[task_index].tail->next();
374 while (page != nullptr) {
375 Page* next = page->next();
377 -(page->memory_->size() >> kWordSizeLog2));
378 page->Deallocate();
379 page = next;
380 }
381 }
382
383 // Re-join the heap.
384 for (intptr_t task_index = 0; task_index < num_tasks - 1; task_index++) {
385 partitions[task_index].tail->set_next(partitions[task_index + 1].head);
386 }
387 partitions[num_tasks - 1].tail->set_next(nullptr);
388 heap_->old_space()->pages_ = pages = partitions[0].head;
389 heap_->old_space()->pages_tail_ = partitions[num_tasks - 1].tail;
390 if (fixed_head != nullptr) {
391 fixed_tail->set_next(heap_->old_space()->pages_);
392 heap_->old_space()->pages_ = fixed_head;
393
394 ASSERT(heap_->old_space()->pages_tail_ != nullptr);
395 }
396
397 delete[] partitions;
398 }
399}
400
402 if (!barrier_->TryEnter()) {
403 barrier_->Release();
404 return;
405 }
406
407 bool result =
409 /*bypass_safepoint=*/true);
410 ASSERT(result);
411
413
414 Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true);
415
416 // This task is done. Notify the original thread.
417 barrier_->Sync();
418 barrier_->Release();
419}
420
422#ifdef SUPPORT_TIMELINE
423 Thread* thread = Thread::Current();
424#endif
425 {
426 isolate_group_->heap()->old_space()->SweepLarge();
427
428 while (true) {
429 intptr_t planning_task = next_planning_task_->fetch_add(1u);
430 if (planning_task >= num_tasks_) break;
431
432 TIMELINE_FUNCTION_GC_DURATION(thread, "Plan");
433 Page* head = partitions_[planning_task].head;
434 free_page_ = head;
435 free_current_ = head->object_start();
436 free_end_ = head->object_end();
437
438 for (Page* page = head; page != nullptr; page = page->next()) {
439 PlanPage(page);
440 }
441 }
442
443 barrier_->Sync();
444
445 if (next_setup_task_->fetch_add(1u) == 0) {
446 compactor_->SetupLargePages();
447 }
448
449 barrier_->Sync();
450
451 while (true) {
452 intptr_t sliding_task = next_sliding_task_->fetch_add(1u);
453 if (sliding_task >= num_tasks_) break;
454
455 TIMELINE_FUNCTION_GC_DURATION(thread, "Slide");
456 Page* head = partitions_[sliding_task].head;
457 free_page_ = head;
458 free_current_ = head->object_start();
459 free_end_ = head->object_end();
460
461 for (Page* page = head; page != nullptr; page = page->next()) {
462 SlidePage(page);
463 }
464
465 // Add any leftover in the last used page to the freelist. This is
466 // required to make the page walkable during forwarding, etc.
467 intptr_t free_remaining = free_end_ - free_current_;
468 if (free_remaining != 0) {
469 freelist_->Free(free_current_, free_remaining);
470 }
471
472 ASSERT(free_page_ != nullptr);
473 partitions_[sliding_task].tail = free_page_; // Last live page.
474
475 {
476 TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardLargePages");
477 compactor_->ForwardLargePages();
478 }
479 }
480
481 // Heap: Regular pages already visited during sliding. Code and image pages
482 // have no pointers to forward. Visit large pages and new-space.
483
484 bool more_forwarding_tasks = true;
485 while (more_forwarding_tasks) {
486 intptr_t forwarding_task = next_forwarding_task_->fetch_add(1u);
487 switch (forwarding_task) {
488 case 0: {
489 TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardNewSpace");
490 isolate_group_->heap()->new_space()->VisitObjectPointers(compactor_);
491 break;
492 }
493 case 1: {
494 TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardRememberedSet");
495 isolate_group_->store_buffer()->VisitObjectPointers(compactor_);
496 break;
497 }
498 case 2: {
499 TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardWeakTables");
500 isolate_group_->heap()->ForwardWeakTables(compactor_);
501 break;
502 }
503 case 3: {
504 TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardWeakHandles");
505 isolate_group_->VisitWeakPersistentHandles(compactor_);
506 break;
507 }
508#ifndef PRODUCT
509 case 4: {
510 TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardObjectIdRing");
511 isolate_group_->ForEachIsolate(
512 [&](Isolate* isolate) {
513 ObjectIdRing* ring = isolate->object_id_ring();
514 if (ring != nullptr) {
515 ring->VisitPointers(compactor_);
516 }
517 },
518 /*at_safepoint=*/true);
519 break;
520 }
521#endif // !PRODUCT
522 default:
523 more_forwarding_tasks = false;
524 }
525 }
526 }
527}
528
529void CompactorTask::PlanPage(Page* page) {
530 ASSERT(!page->is_never_evacuate());
531 uword current = page->object_start();
532 uword end = page->object_end();
533
534 ForwardingPage* forwarding_page = page->forwarding_page();
535 ASSERT(forwarding_page != nullptr);
536 forwarding_page->Clear();
537 while (current < end) {
538 current = PlanBlock(current, forwarding_page);
539 }
540}
541
542void CompactorTask::SlidePage(Page* page) {
543 ASSERT(!page->is_never_evacuate());
544 uword current = page->object_start();
545 uword end = page->object_end();
546
547 ForwardingPage* forwarding_page = page->forwarding_page();
548 ASSERT(forwarding_page != nullptr);
549 while (current < end) {
550 current = SlideBlock(current, forwarding_page);
551 }
552}
553
554// Plans the destination for a set of live objects starting with the first
555// live object that starts in a block, up to and including the last live
556// object that starts in that block.
557uword CompactorTask::PlanBlock(uword first_object,
558 ForwardingPage* forwarding_page) {
559 uword block_start = first_object & kBlockMask;
560 uword block_end = block_start + kBlockSize;
561 ForwardingBlock* forwarding_block = forwarding_page->BlockFor(first_object);
562
563 // 1. Compute bitvector of surviving allocation units in the block.
564 intptr_t block_live_size = 0;
565 uword current = first_object;
566 while (current < block_end) {
567 ObjectPtr obj = UntaggedObject::FromAddr(current);
568 intptr_t size = obj->untag()->HeapSize();
569 if (obj->untag()->IsMarked()) {
570 forwarding_block->RecordLive(current, size);
571 ASSERT(static_cast<intptr_t>(forwarding_block->Lookup(current)) ==
572 block_live_size);
573 block_live_size += size;
574 }
575 current += size;
576 }
577
578 // 2. Find the next contiguous space that can fit the live objects that
579 // start in the block.
580 PlanMoveToContiguousSize(block_live_size);
581 forwarding_block->set_new_address(free_current_);
582 free_current_ += block_live_size;
583
584 return current; // First object in the next block
585}
586
587uword CompactorTask::SlideBlock(uword first_object,
588 ForwardingPage* forwarding_page) {
589 uword block_start = first_object & kBlockMask;
590 uword block_end = block_start + kBlockSize;
591 ForwardingBlock* forwarding_block = forwarding_page->BlockFor(first_object);
592
593 uword old_addr = first_object;
594 while (old_addr < block_end) {
595 ObjectPtr old_obj = UntaggedObject::FromAddr(old_addr);
596 intptr_t size = old_obj->untag()->HeapSize();
597 if (old_obj->untag()->IsMarked()) {
598 uword new_addr = forwarding_block->Lookup(old_addr);
599 if (new_addr != free_current_) {
600 // The only situation where these two don't match is if we are moving
601 // to a new page. But if we exactly hit the end of the previous page
602 // then free_current could be at the start of the next page, so we
603 // subtract 1.
604 ASSERT(Page::Of(free_current_ - 1) != Page::Of(new_addr));
605 intptr_t free_remaining = free_end_ - free_current_;
606 // Add any leftover at the end of a page to the free list.
607 if (free_remaining > 0) {
608 freelist_->Free(free_current_, free_remaining);
609 }
610 free_page_ = free_page_->next();
611 ASSERT(free_page_ != nullptr);
612 free_current_ = free_page_->object_start();
613 free_end_ = free_page_->object_end();
614 ASSERT(free_current_ == new_addr);
615 }
616 ObjectPtr new_obj = UntaggedObject::FromAddr(new_addr);
617
618 // Fast path for no movement. There's often a large block of objects at
619 // the beginning that don't move.
620 if (new_addr != old_addr) {
621 // Slide the object down.
622 memmove(reinterpret_cast<void*>(new_addr),
623 reinterpret_cast<void*>(old_addr), size);
624
625 if (IsTypedDataClassId(new_obj->GetClassId())) {
626 static_cast<TypedDataPtr>(new_obj)->untag()->RecomputeDataField();
627 }
628 }
629 new_obj->untag()->ClearMarkBit();
630 new_obj->untag()->VisitPointers(compactor_);
631
632 ASSERT(free_current_ == new_addr);
633 free_current_ += size;
634 } else {
635 ASSERT(!forwarding_block->IsLive(old_addr));
636 }
637 old_addr += size;
638 }
639
640 return old_addr; // First object in the next block.
641}
642
643void CompactorTask::PlanMoveToContiguousSize(intptr_t size) {
644 // Move the free cursor to ensure 'size' bytes of contiguous space.
646
647 // Check if the current free page has enough space.
648 intptr_t free_remaining = free_end_ - free_current_;
649 if (free_remaining < size) {
650 // Not enough; advance to the next free page.
651 free_page_ = free_page_->next();
652 ASSERT(free_page_ != nullptr);
653 free_current_ = free_page_->object_start();
654 free_end_ = free_page_->object_end();
655 free_remaining = free_end_ - free_current_;
656 ASSERT(free_remaining >= size);
657 }
658}
659
660void GCCompactor::SetupImagePageBoundaries() {
661 MallocGrowableArray<ImagePageRange> ranges(4);
662
663 Page* image_page =
664 Dart::vm_isolate_group()->heap()->old_space()->image_pages_;
665 while (image_page != nullptr) {
666 ImagePageRange range = {image_page->object_start(),
667 image_page->object_end()};
668 ranges.Add(range);
669 image_page = image_page->next();
670 }
671 image_page = heap_->old_space()->image_pages_;
672 while (image_page != nullptr) {
673 ImagePageRange range = {image_page->object_start(),
674 image_page->object_end()};
675 ranges.Add(range);
676 image_page = image_page->next();
677 }
678
679 ranges.Sort(CompareImagePageRanges);
680 intptr_t image_page_count;
681 ranges.StealBuffer(&image_page_ranges_, &image_page_count);
682 image_page_hi_ = image_page_count - 1;
683}
684
685DART_FORCE_INLINE
686void GCCompactor::ForwardPointer(ObjectPtr* ptr) {
687 ObjectPtr old_target = *ptr;
688 if (old_target->IsImmediateOrNewObject()) {
689 return; // Not moved.
690 }
691
692 uword old_addr = UntaggedObject::ToAddr(old_target);
693 intptr_t lo = 0;
694 intptr_t hi = image_page_hi_;
695 while (lo <= hi) {
696 intptr_t mid = (hi - lo + 1) / 2 + lo;
697 ASSERT(mid >= lo);
698 ASSERT(mid <= hi);
699 if (old_addr < image_page_ranges_[mid].start) {
700 hi = mid - 1;
701 } else if (old_addr >= image_page_ranges_[mid].end) {
702 lo = mid + 1;
703 } else {
704 return; // Not moved (unaligned image page).
705 }
706 }
707
708 Page* page = Page::Of(old_target);
709 ForwardingPage* forwarding_page = page->forwarding_page();
710 if (forwarding_page == nullptr) {
711 return; // Not moved (VM isolate, large page, code page).
712 }
713 if (page->is_never_evacuate()) {
714 // Forwarding page is non-NULL since one is still reserved for use as a
715 // counting page, but it doesn't have forwarding information.
716 return;
717 }
718
719 ObjectPtr new_target =
720 UntaggedObject::FromAddr(forwarding_page->Lookup(old_addr));
721 ASSERT(!new_target->IsImmediateOrNewObject());
722 *ptr = new_target;
723}
724
725DART_FORCE_INLINE
726void GCCompactor::ForwardCompressedPointer(uword heap_base,
727 CompressedObjectPtr* ptr) {
728 ObjectPtr old_target = ptr->Decompress(heap_base);
729 if (old_target->IsImmediateOrNewObject()) {
730 return; // Not moved.
731 }
732
733 uword old_addr = UntaggedObject::ToAddr(old_target);
734 intptr_t lo = 0;
735 intptr_t hi = image_page_hi_;
736 while (lo <= hi) {
737 intptr_t mid = (hi - lo + 1) / 2 + lo;
738 ASSERT(mid >= lo);
739 ASSERT(mid <= hi);
740 if (old_addr < image_page_ranges_[mid].start) {
741 hi = mid - 1;
742 } else if (old_addr >= image_page_ranges_[mid].end) {
743 lo = mid + 1;
744 } else {
745 return; // Not moved (unaligned image page).
746 }
747 }
748
749 Page* page = Page::Of(old_target);
750 ForwardingPage* forwarding_page = page->forwarding_page();
751 if (forwarding_page == nullptr) {
752 return; // Not moved (VM isolate, large page, code page).
753 }
754 if (page->is_never_evacuate()) {
755 // Forwarding page is non-NULL since one is still reserved for use as a
756 // counting page, but it doesn't have forwarding information.
757 return;
758 }
759
760 ObjectPtr new_target =
761 UntaggedObject::FromAddr(forwarding_page->Lookup(old_addr));
762 ASSERT(!new_target->IsImmediateOrNewObject());
763 *ptr = new_target;
764}
765
766void GCCompactor::VisitTypedDataViewPointers(TypedDataViewPtr view,
767 CompressedObjectPtr* first,
768 CompressedObjectPtr* last) {
769 // First we forward all fields of the typed data view.
770 ObjectPtr old_backing = view->untag()->typed_data();
771 VisitCompressedPointers(view->heap_base(), first, last);
772 ObjectPtr new_backing = view->untag()->typed_data();
773
774 const bool backing_moved = old_backing != new_backing;
775 if (backing_moved) {
776 // The backing store moved, so we *might* need to update the view's inner
777 // pointer. If the backing store is internal typed data we *have* to update
778 // it, otherwise (in case of external typed data) we don't have to.
779 //
780 // Unfortunately we cannot find out whether the backing store is internal
781 // or external during sliding phase: Even though we know the old and new
782 // location of the backing store another thread might be responsible for
783 // moving it and we have no way to tell when it got moved.
784 //
785 // So instead we queue all those views up and fix their inner pointer in a
786 // final phase after compaction.
787 MutexLocker ml(&typed_data_view_mutex_);
788 typed_data_views_.Add(view);
789 } else {
790 // The backing store didn't move, we therefore don't need to update the
791 // inner pointer.
792 if (view->untag()->data_ == nullptr) {
793 ASSERT(RawSmiValue(view->untag()->offset_in_bytes()) == 0 &&
794 RawSmiValue(view->untag()->length()) == 0 &&
795 view->untag()->typed_data() == Object::null());
796 }
797 }
798}
799
800// N.B.: This pointer visitor is not idempotent. We must take care to visit
801// each pointer exactly once.
802void GCCompactor::VisitPointers(ObjectPtr* first, ObjectPtr* last) {
803 for (ObjectPtr* ptr = first; ptr <= last; ptr++) {
804 ForwardPointer(ptr);
805 }
806}
807
808#if defined(DART_COMPRESSED_POINTERS)
810 CompressedObjectPtr* first,
811 CompressedObjectPtr* last) {
812 for (CompressedObjectPtr* ptr = first; ptr <= last; ptr++) {
813 ForwardCompressedPointer(heap_base, ptr);
814 }
815}
816#endif
817
818bool GCCompactor::CanVisitSuspendStatePointers(SuspendStatePtr suspend_state) {
819 if ((suspend_state->untag()->pc() != 0) && !can_visit_stack_frames_) {
820 // Visiting pointers of SuspendState objects with copied stack frame
821 // needs to query stack map, which can touch other Dart objects
822 // (such as GrowableObjectArray of InstructionsTable).
823 // Those objects may have an inconsistent state during compaction,
824 // so processing of SuspendState objects is postponed to the later
825 // stage of compaction.
826 MutexLocker ml(&postponed_suspend_states_mutex_);
827 postponed_suspend_states_.Add(suspend_state);
828 return false;
829 }
830 return true;
831}
832
833void GCCompactor::VisitHandle(uword addr) {
834 FinalizablePersistentHandle* handle =
835 reinterpret_cast<FinalizablePersistentHandle*>(addr);
836 ForwardPointer(handle->ptr_addr());
837}
838
839void GCCompactor::SetupLargePages() {
840 large_pages_ = heap_->old_space()->large_pages_;
841}
842
843void GCCompactor::ForwardLargePages() {
844 MutexLocker ml(&large_pages_mutex_);
845 while (large_pages_ != nullptr) {
846 Page* page = large_pages_;
847 large_pages_ = page->next();
848 ml.Unlock();
849 page->VisitObjectPointers(this);
850 ml.Lock();
851 }
852 while (fixed_pages_ != nullptr) {
853 Page* page = fixed_pages_;
854 fixed_pages_ = page->next();
855 ml.Unlock();
856
857 GCSweeper sweeper;
858 FreeList* freelist = heap_->old_space()->DataFreeList(0);
859 bool page_in_use;
860 {
861 MutexLocker ml(freelist->mutex());
862 page_in_use = sweeper.SweepPage(page, freelist);
863 }
864 ASSERT(page_in_use);
865
866 page->VisitObjectPointers(this);
867
868 ml.Lock();
869 }
870}
871
872void GCCompactor::ForwardStackPointers() {
873 // N.B.: Heap pointers have already been forwarded. We forward the heap before
874 // forwarding the stack to limit the number of places that need to be aware of
875 // forwarding when reading stack maps.
878}
879
880} // namespace dart
static float next(float f)
static float prev(float f)
#define RELEASE_ASSERT(cond)
Definition: assert.h:327
void Add(const T &value)
intptr_t length() const
void VisitObjectPointers(ObjectPointerVisitor *visitor)
CompactorTask(IsolateGroup *isolate_group, GCCompactor *compactor, ThreadBarrier *barrier, RelaxedAtomic< intptr_t > *next_planning_task, RelaxedAtomic< intptr_t > *next_setup_task, RelaxedAtomic< intptr_t > *next_sliding_task, RelaxedAtomic< intptr_t > *next_forwarding_task, intptr_t num_tasks, Partition *partitions, FreeList *freelist)
Definition: compactor.cc:127
void RunEnteredIsolateGroup()
Definition: compactor.cc:421
static ThreadPool * thread_pool()
Definition: dart.h:73
static IsolateGroup * vm_isolate_group()
Definition: dart.h:69
uword new_address() const
Definition: compactor.cc:76
bool IsLive(uword old_addr) const
Definition: compactor.cc:68
void RecordLive(uword old_addr, intptr_t size)
Definition: compactor.cc:56
uword Lookup(uword old_addr) const
Definition: compactor.cc:39
void set_new_address(uword value)
Definition: compactor.cc:77
uword Lookup(uword old_addr)
Definition: compactor.cc:95
ForwardingBlock * BlockFor(uword old_addr)
Definition: compactor.cc:97
static FreeListElement * AsElement(uword addr, intptr_t size)
Definition: freelist.cc:16
void Free(uword addr, intptr_t size)
Definition: freelist.cc:193
void Compact(Page *pages, FreeList *freelist, Mutex *mutex)
Definition: compactor.cc:184
Thread * thread() const
Scavenger * new_space()
Definition: heap.h:62
PageSpace * old_space()
Definition: heap.h:63
void ForwardWeakTables(ObjectPointerVisitor *visitor)
Definition: heap.cc:965
StoreBuffer * store_buffer() const
Definition: isolate.h:509
void ForEachIsolate(std::function< void(Isolate *isolate)> function, bool at_safepoint=false)
Definition: isolate.cc:2841
Heap * heap() const
Definition: isolate.h:296
void VisitObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
Definition: isolate.cc:2912
void VisitWeakPersistentHandles(HandleVisitor *visitor)
Definition: isolate.cc:2998
ObjectIdRing * object_id_ring() const
Definition: isolate.h:1250
void VisitPointers(ObjectPointerVisitor *visitor)
IsolateGroup * isolate_group() const
Definition: visitor.h:25
void VisitCompressedPointers(uword heap_base, CompressedObjectPtr *first, CompressedObjectPtr *last)
Definition: visitor.h:43
static ObjectPtr null()
Definition: object.h:433
FreeList * DataFreeList(intptr_t i=0)
Definition: pages.h:301
void IncreaseCapacityInWordsLocked(intptr_t increase_in_words)
Definition: pages.h:203
void VisitRoots(ObjectPointerVisitor *visitor)
Definition: pages.cc:962
uword object_start() const
Definition: page.h:109
void AllocateForwardingPage()
Definition: compactor.cc:112
void set_next(Page *next)
Definition: page.h:103
uword object_end() const
Definition: page.h:118
static Page * Of(ObjectPtr obj)
Definition: page.h:162
Page * next() const
Definition: page.h:102
T load(std::memory_order order=std::memory_order_relaxed) const
Definition: atomic.h:21
T fetch_add(T arg, std::memory_order order=std::memory_order_relaxed)
Definition: atomic.h:35
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
Definition: scavenger.cc:1775
bool Run(Args &&... args)
Definition: thread_pool.h:45
@ kCompactorTask
Definition: thread.h:351
static Thread * Current()
Definition: thread.h:362
static void ExitIsolateGroupAsHelper(bool bypass_safepoint)
Definition: thread.cc:499
IsolateGroup * isolate_group() const
Definition: thread.h:541
static bool EnterIsolateGroupAsHelper(IsolateGroup *isolate_group, TaskKind kind, bool bypass_safepoint)
Definition: thread.cc:481
static ObjectPtr FromAddr(uword addr)
Definition: raw_object.h:516
static uword ToAddr(const UntaggedObject *raw_obj)
Definition: raw_object.h:522
static constexpr int CountOneBitsWord(uword x)
Definition: utils.h:176
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition: utils.h:92
#define ASSERT(E)
glong glong end
uint8_t value
GAsyncResult * result
size_t length
Definition: dart_vm.cc:33
bool IsTypedDataClassId(intptr_t index)
Definition: class_id.h:433
constexpr intptr_t kBitsPerWord
Definition: globals.h:514
static constexpr intptr_t kPageSize
Definition: page.h:27
intptr_t RawSmiValue(const SmiPtr raw_value)
static constexpr intptr_t kBlockSize
Definition: page.h:33
int32_t classid_t
Definition: globals.h:524
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
uintptr_t uword
Definition: globals.h:501
static constexpr intptr_t kBitVectorWordsPerBlock
Definition: page.h:32
static constexpr intptr_t kBlockMask
Definition: page.h:35
DEFINE_FLAG(bool, print_cluster_information, false, "Print information about clusters written to snapshot")
const intptr_t cid
raw_obj untag() -> num_entries()) VARIABLE_COMPRESSED_VISITOR(Array, Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(TypedData, TypedData::ElementSizeInBytes(raw_obj->GetClassId()) *Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(Record, RecordShape(raw_obj->untag() ->shape()).num_fields()) VARIABLE_NULL_VISITOR(CompressedStackMaps, CompressedStackMaps::PayloadSizeOf(raw_obj)) VARIABLE_NULL_VISITOR(OneByteString, Smi::Value(raw_obj->untag() ->length())) VARIABLE_NULL_VISITOR(TwoByteString, Smi::Value(raw_obj->untag() ->length())) intptr_t UntaggedField::VisitFieldPointers(FieldPtr raw_obj, ObjectPointerVisitor *visitor)
Definition: raw_object.cc:558
static constexpr intptr_t kObjectAlignment
static constexpr intptr_t kBlocksPerPage
Definition: page.h:36
static constexpr intptr_t kObjectAlignmentLog2
bool IsExternalTypedDataClassId(intptr_t index)
Definition: class_id.h:447
ObjectPtr CompressedObjectPtr
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)
Definition: timeline.h:41