Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
compactor.cc
Go to the documentation of this file.
1// Copyright (c) 2017, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/heap/compactor.h"
6
7#include "platform/atomic.h"
8#include "vm/globals.h"
9#include "vm/heap/become.h"
10#include "vm/heap/heap.h"
11#include "vm/heap/pages.h"
12#include "vm/thread_barrier.h"
13#include "vm/timeline.h"
14
15namespace dart {
16
18 force_evacuation,
19 false,
20 "Force compaction to move every movable object");
21
22// Each Page is divided into blocks of size kBlockSize. Each object belongs
23// to the block containing its header word (so up to kBlockSize +
24// kAllocatablePageSize - 2 * kObjectAlignment bytes belong to the same block).
25// During compaction, all live objects in the same block will slide such that
26// they all end up on the same Page, and all gaps within the block will be
27// closed. During sliding, a bitvector is computed that indicates which
28// allocation units are live, so the new address of any object in the block can
29// be found by adding the number of live allocation units before the object to
30// the block's new start address.
31// Compare CountingBlock used for heap snapshot generation.
33 public:
34 void Clear() {
35 new_address_ = 0;
36 live_bitvector_ = 0;
37 }
38
39 uword Lookup(uword old_addr) const {
40 uword block_offset = old_addr & ~kBlockMask;
41 intptr_t first_unit_position = block_offset >> kObjectAlignmentLog2;
42 ASSERT(first_unit_position < kBitsPerWord);
43 uword preceding_live_bitmask =
44 (static_cast<uword>(1) << first_unit_position) - 1;
45 uword preceding_live_bitset = live_bitvector_ & preceding_live_bitmask;
46 uword preceding_live_bytes = Utils::CountOneBitsWord(preceding_live_bitset)
48 return new_address_ + preceding_live_bytes;
49 }
50
51 // Marks a range of allocation units belonging to an object live by setting
52 // the corresponding bits in this ForwardingBlock. Does not update the
53 // new_address_ field; that is done after the total live size of the block is
54 // known and forwarding location is chosen. Does not mark words in subsequent
55 // ForwardingBlocks live for objects that extend into the next block.
56 void RecordLive(uword old_addr, intptr_t size) {
57 intptr_t size_in_units = size >> kObjectAlignmentLog2;
58 if (size_in_units >= kBitsPerWord) {
59 size_in_units = kBitsPerWord - 1;
60 }
61 uword block_offset = old_addr & ~kBlockMask;
62 intptr_t first_unit_position = block_offset >> kObjectAlignmentLog2;
63 ASSERT(first_unit_position < kBitsPerWord);
64 live_bitvector_ |= ((static_cast<uword>(1) << size_in_units) - 1)
65 << first_unit_position;
66 }
67
68 bool IsLive(uword old_addr) const {
69 uword block_offset = old_addr & ~kBlockMask;
70 intptr_t first_unit_position = block_offset >> kObjectAlignmentLog2;
71 ASSERT(first_unit_position < kBitsPerWord);
72 return (live_bitvector_ & (static_cast<uword>(1) << first_unit_position)) !=
73 0;
74 }
75
76 uword new_address() const { return new_address_; }
77 void set_new_address(uword value) { new_address_ = value; }
78
79 private:
80 uword new_address_;
81 uword live_bitvector_;
83
85};
86
88 public:
89 void Clear() {
90 for (intptr_t i = 0; i < kBlocksPerPage; i++) {
91 blocks_[i].Clear();
92 }
93 }
94
95 uword Lookup(uword old_addr) { return BlockFor(old_addr)->Lookup(old_addr); }
96
98 intptr_t page_offset = old_addr & ~kPageMask;
99 intptr_t block_number = page_offset / kBlockSize;
100 ASSERT(block_number >= 0);
101 ASSERT(block_number <= kBlocksPerPage);
102 return &blocks_[block_number];
103 }
104
105 private:
107
108 DISALLOW_ALLOCATION();
110};
111
113 ASSERT(forwarding_page_ == nullptr);
114 ASSERT((object_start() + sizeof(ForwardingPage)) < object_end());
116 top_ -= sizeof(ForwardingPage);
117 forwarding_page_ = reinterpret_cast<ForwardingPage*>(top_.load());
118}
119
124
126 public:
128 GCCompactor* compactor,
129 ThreadBarrier* barrier,
130 RelaxedAtomic<intptr_t>* next_planning_task,
131 RelaxedAtomic<intptr_t>* next_setup_task,
132 RelaxedAtomic<intptr_t>* next_sliding_task,
133 RelaxedAtomic<intptr_t>* next_forwarding_task,
134 intptr_t num_tasks,
135 Partition* partitions,
136 FreeList* freelist)
137 : isolate_group_(isolate_group),
138 compactor_(compactor),
139 barrier_(barrier),
140 next_planning_task_(next_planning_task),
141 next_setup_task_(next_setup_task),
142 next_sliding_task_(next_sliding_task),
143 next_forwarding_task_(next_forwarding_task),
144 num_tasks_(num_tasks),
145 partitions_(partitions),
146 freelist_(freelist),
147 free_page_(nullptr),
148 free_current_(0),
149 free_end_(0) {}
150
151 void Run();
153
154 private:
155 void PlanPage(Page* page);
156 void SlidePage(Page* page);
157 uword PlanBlock(uword first_object, ForwardingPage* forwarding_page);
158 uword SlideBlock(uword first_object, ForwardingPage* forwarding_page);
159 void PlanMoveToContiguousSize(intptr_t size);
160
161 IsolateGroup* isolate_group_;
162 GCCompactor* compactor_;
163 ThreadBarrier* barrier_;
164 RelaxedAtomic<intptr_t>* next_planning_task_;
165 RelaxedAtomic<intptr_t>* next_setup_task_;
166 RelaxedAtomic<intptr_t>* next_sliding_task_;
167 RelaxedAtomic<intptr_t>* next_forwarding_task_;
168 intptr_t num_tasks_;
169 Partition* partitions_;
170 FreeList* freelist_;
171 Page* free_page_;
172 uword free_current_;
173 uword free_end_;
174
176};
177
178// Slides live objects down past free gaps, updates pointers and frees empty
179// pages. Keeps cursors pointing to the next free and next live chunks, and
180// repeatedly moves the next live chunk to the next free chunk, one block at a
181// time, keeping blocks from spanning page boundaries (see ForwardingBlock).
182// Free space at the end of a page that is too small for the next block is
183// added to the freelist.
184void GCCompactor::Compact(Page* pages, FreeList* freelist, Mutex* pages_lock) {
185 SetupImagePageBoundaries();
186
187 // Divide the heap.
188 // TODO(30978): Try to divide based on live bytes or with work stealing.
189 intptr_t num_pages = 0;
190 for (Page* page = pages; page != nullptr; page = page->next()) {
191 num_pages++;
192 }
193
194 intptr_t num_tasks = FLAG_compactor_tasks;
195 RELEASE_ASSERT(num_tasks >= 1);
196 if (num_pages < num_tasks) {
197 num_tasks = num_pages;
198 }
199
200 Partition* partitions = new Partition[num_tasks];
201
202 {
203 const intptr_t pages_per_task = num_pages / num_tasks;
204 intptr_t task_index = 0;
205 intptr_t page_index = 0;
206 Page* page = pages;
207 Page* prev = nullptr;
208 while (task_index < num_tasks) {
209 if (page_index % pages_per_task == 0) {
210 partitions[task_index].head = page;
211 partitions[task_index].tail = nullptr;
212 if (prev != nullptr) {
213 prev->set_next(nullptr);
214 }
215 task_index++;
216 }
217 prev = page;
218 page = page->next();
219 page_index++;
220 }
221 ASSERT(page_index <= num_pages);
222 ASSERT(task_index == num_tasks);
223 }
224
225 if (FLAG_force_evacuation) {
226 // Inject empty pages at the beginning of each worker's list to ensure all
227 // objects move and all pages that used to have an object are released.
228 // This can be helpful for finding untracked pointers because it prevents
229 // an untracked pointer from getting lucky with its target not moving.
230 bool oom = false;
231 for (intptr_t task_index = 0; task_index < num_tasks && !oom;
232 task_index++) {
233 const intptr_t pages_per_task = num_pages / num_tasks;
234 for (intptr_t j = 0; j < pages_per_task; j++) {
235 Page* page = heap_->old_space()->AllocatePage(/* exec */ false,
236 /* link */ false);
237
238 if (page == nullptr) {
239 oom = true;
240 break;
241 }
242
243 FreeListElement::AsElement(page->object_start(),
244 page->object_end() - page->object_start());
245
246 // The compactor slides down: add the empty pages to the beginning.
247 page->set_next(partitions[task_index].head);
248 partitions[task_index].head = page;
249 }
250 }
251 }
252
253 {
254 ThreadBarrier* barrier = new ThreadBarrier(num_tasks, 1);
255 RelaxedAtomic<intptr_t> next_planning_task = {0};
256 RelaxedAtomic<intptr_t> next_setup_task = {0};
257 RelaxedAtomic<intptr_t> next_sliding_task = {0};
258 RelaxedAtomic<intptr_t> next_forwarding_task = {0};
259
260 for (intptr_t task_index = 0; task_index < num_tasks; task_index++) {
261 if (task_index < (num_tasks - 1)) {
262 // Begin compacting on a helper thread.
264 thread()->isolate_group(), this, barrier, &next_planning_task,
265 &next_setup_task, &next_sliding_task, &next_forwarding_task,
266 num_tasks, partitions, freelist);
267 } else {
268 // Last worker is the main thread.
269 CompactorTask task(thread()->isolate_group(), this, barrier,
270 &next_planning_task, &next_setup_task,
271 &next_sliding_task, &next_forwarding_task, num_tasks,
272 partitions, freelist);
274 barrier->Sync();
275 barrier->Release();
276 }
277 }
278 }
279
280 // Update inner pointers in typed data views (needs to be done after all
281 // threads are done with sliding since we need to access fields of the
282 // view's backing store)
283 //
284 // (If the sliding compactor was single-threaded we could do this during the
285 // sliding phase: The class id of the backing store can be either accessed by
286 // looking at the already-slided-object or the not-yet-slided object. Though
287 // with parallel sliding there is no safe way to access the backing store
288 // object header.)
289 {
291 "ForwardTypedDataViewInternalPointers");
292 const intptr_t length = typed_data_views_.length();
293 for (intptr_t i = 0; i < length; ++i) {
294 auto raw_view = typed_data_views_[i];
295 const classid_t cid =
296 raw_view->untag()->typed_data()->GetClassIdMayBeSmi();
297
298 // If we have external typed data we can simply return, since the backing
299 // store lives in C-heap and will not move. Otherwise we have to update
300 // the inner pointer.
301 if (IsTypedDataClassId(cid)) {
302 raw_view->untag()->RecomputeDataFieldForInternalTypedData();
303 } else {
305 }
306 }
307 }
308
309 for (intptr_t task_index = 0; task_index < num_tasks; task_index++) {
310 ASSERT(partitions[task_index].tail != nullptr);
311 }
312
313 {
314 TIMELINE_FUNCTION_GC_DURATION(thread(), "ForwardStackPointers");
315 ForwardStackPointers();
316 }
317
318 {
320 "ForwardPostponedSuspendStatePointers");
321 // After heap sliding is complete and ObjectStore pointers are forwarded
322 // it is finally safe to visit SuspendState objects with copied frames.
323 can_visit_stack_frames_ = true;
324 const intptr_t length = postponed_suspend_states_.length();
325 for (intptr_t i = 0; i < length; ++i) {
326 auto suspend_state = postponed_suspend_states_[i];
327 suspend_state->untag()->VisitPointers(this);
328 }
329 }
330
331 heap_->old_space()->VisitRoots(this);
332
333 {
334 MutexLocker ml(pages_lock);
335
336 // Free empty pages.
337 for (intptr_t task_index = 0; task_index < num_tasks; task_index++) {
338 Page* page = partitions[task_index].tail->next();
339 while (page != nullptr) {
340 Page* next = page->next();
342 -(page->memory_->size() >> kWordSizeLog2));
343 page->Deallocate();
344 page = next;
345 }
346 }
347
348 // Re-join the heap.
349 for (intptr_t task_index = 0; task_index < num_tasks - 1; task_index++) {
350 partitions[task_index].tail->set_next(partitions[task_index + 1].head);
351 }
352 partitions[num_tasks - 1].tail->set_next(nullptr);
353 heap_->old_space()->pages_ = pages = partitions[0].head;
354 heap_->old_space()->pages_tail_ = partitions[num_tasks - 1].tail;
355
356 delete[] partitions;
357 }
358}
359
361 if (!barrier_->TryEnter()) {
362 barrier_->Release();
363 return;
364 }
365
366 bool result =
368 /*bypass_safepoint=*/true);
369 ASSERT(result);
370
372
373 Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true);
374
375 // This task is done. Notify the original thread.
376 barrier_->Sync();
377 barrier_->Release();
378}
379
381#ifdef SUPPORT_TIMELINE
382 Thread* thread = Thread::Current();
383#endif
384 {
385 isolate_group_->heap()->old_space()->SweepLarge();
386
387 while (true) {
388 intptr_t planning_task = next_planning_task_->fetch_add(1u);
389 if (planning_task >= num_tasks_) break;
390
391 TIMELINE_FUNCTION_GC_DURATION(thread, "Plan");
392 Page* head = partitions_[planning_task].head;
393 free_page_ = head;
394 free_current_ = head->object_start();
395 free_end_ = head->object_end();
396
397 for (Page* page = head; page != nullptr; page = page->next()) {
398 PlanPage(page);
399 }
400 }
401
402 barrier_->Sync();
403
404 if (next_setup_task_->fetch_add(1u) == 0) {
405 compactor_->SetupLargePages();
406 }
407
408 barrier_->Sync();
409
410 while (true) {
411 intptr_t sliding_task = next_sliding_task_->fetch_add(1u);
412 if (sliding_task >= num_tasks_) break;
413
414 TIMELINE_FUNCTION_GC_DURATION(thread, "Slide");
415 Page* head = partitions_[sliding_task].head;
416 free_page_ = head;
417 free_current_ = head->object_start();
418 free_end_ = head->object_end();
419
420 for (Page* page = head; page != nullptr; page = page->next()) {
421 SlidePage(page);
422 }
423
424 // Add any leftover in the last used page to the freelist. This is
425 // required to make the page walkable during forwarding, etc.
426 intptr_t free_remaining = free_end_ - free_current_;
427 if (free_remaining != 0) {
428 freelist_->Free(free_current_, free_remaining);
429 }
430
431 ASSERT(free_page_ != nullptr);
432 partitions_[sliding_task].tail = free_page_; // Last live page.
433
434 {
435 TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardLargePages");
436 compactor_->ForwardLargePages();
437 }
438 }
439
440 // Heap: Regular pages already visited during sliding. Code and image pages
441 // have no pointers to forward. Visit large pages and new-space.
442
443 bool more_forwarding_tasks = true;
444 while (more_forwarding_tasks) {
445 intptr_t forwarding_task = next_forwarding_task_->fetch_add(1u);
446 switch (forwarding_task) {
447 case 0: {
448 TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardNewSpace");
449 isolate_group_->heap()->new_space()->VisitObjectPointers(compactor_);
450 break;
451 }
452 case 1: {
453 TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardRememberedSet");
454 isolate_group_->store_buffer()->VisitObjectPointers(compactor_);
455 break;
456 }
457 case 2: {
458 TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardWeakTables");
459 isolate_group_->heap()->ForwardWeakTables(compactor_);
460 break;
461 }
462 case 3: {
463 TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardWeakHandles");
464 isolate_group_->VisitWeakPersistentHandles(compactor_);
465 break;
466 }
467#ifndef PRODUCT
468 case 4: {
469 TIMELINE_FUNCTION_GC_DURATION(thread, "ForwardObjectIdRing");
470 isolate_group_->ForEachIsolate(
471 [&](Isolate* isolate) {
472 ObjectIdRing* ring = isolate->object_id_ring();
473 if (ring != nullptr) {
474 ring->VisitPointers(compactor_);
475 }
476 },
477 /*at_safepoint=*/true);
478 break;
479 }
480#endif // !PRODUCT
481 default:
482 more_forwarding_tasks = false;
483 }
484 }
485 }
486}
487
488void CompactorTask::PlanPage(Page* page) {
489 uword current = page->object_start();
490 uword end = page->object_end();
491
492 ForwardingPage* forwarding_page = page->forwarding_page();
493 ASSERT(forwarding_page != nullptr);
494 forwarding_page->Clear();
495 while (current < end) {
496 current = PlanBlock(current, forwarding_page);
497 }
498}
499
500void CompactorTask::SlidePage(Page* page) {
501 uword current = page->object_start();
502 uword end = page->object_end();
503
504 ForwardingPage* forwarding_page = page->forwarding_page();
505 ASSERT(forwarding_page != nullptr);
506 while (current < end) {
507 current = SlideBlock(current, forwarding_page);
508 }
509}
510
511// Plans the destination for a set of live objects starting with the first
512// live object that starts in a block, up to and including the last live
513// object that starts in that block.
514uword CompactorTask::PlanBlock(uword first_object,
515 ForwardingPage* forwarding_page) {
516 uword block_start = first_object & kBlockMask;
517 uword block_end = block_start + kBlockSize;
518 ForwardingBlock* forwarding_block = forwarding_page->BlockFor(first_object);
519
520 // 1. Compute bitvector of surviving allocation units in the block.
521 intptr_t block_live_size = 0;
522 uword current = first_object;
523 while (current < block_end) {
524 ObjectPtr obj = UntaggedObject::FromAddr(current);
525 intptr_t size = obj->untag()->HeapSize();
526 if (obj->untag()->IsMarked()) {
527 forwarding_block->RecordLive(current, size);
528 ASSERT(static_cast<intptr_t>(forwarding_block->Lookup(current)) ==
529 block_live_size);
530 block_live_size += size;
531 }
532 current += size;
533 }
534
535 // 2. Find the next contiguous space that can fit the live objects that
536 // start in the block.
537 PlanMoveToContiguousSize(block_live_size);
538 forwarding_block->set_new_address(free_current_);
539 free_current_ += block_live_size;
540
541 return current; // First object in the next block
542}
543
544uword CompactorTask::SlideBlock(uword first_object,
545 ForwardingPage* forwarding_page) {
546 uword block_start = first_object & kBlockMask;
547 uword block_end = block_start + kBlockSize;
548 ForwardingBlock* forwarding_block = forwarding_page->BlockFor(first_object);
549
550 uword old_addr = first_object;
551 while (old_addr < block_end) {
552 ObjectPtr old_obj = UntaggedObject::FromAddr(old_addr);
553 intptr_t size = old_obj->untag()->HeapSize();
554 if (old_obj->untag()->IsMarked()) {
555 uword new_addr = forwarding_block->Lookup(old_addr);
556 if (new_addr != free_current_) {
557 // The only situation where these two don't match is if we are moving
558 // to a new page. But if we exactly hit the end of the previous page
559 // then free_current could be at the start of the next page, so we
560 // subtract 1.
561 ASSERT(Page::Of(free_current_ - 1) != Page::Of(new_addr));
562 intptr_t free_remaining = free_end_ - free_current_;
563 // Add any leftover at the end of a page to the free list.
564 if (free_remaining > 0) {
565 freelist_->Free(free_current_, free_remaining);
566 }
567 free_page_ = free_page_->next();
568 ASSERT(free_page_ != nullptr);
569 free_current_ = free_page_->object_start();
570 free_end_ = free_page_->object_end();
571 ASSERT(free_current_ == new_addr);
572 }
573 ObjectPtr new_obj = UntaggedObject::FromAddr(new_addr);
574
575 // Fast path for no movement. There's often a large block of objects at
576 // the beginning that don't move.
577 if (new_addr != old_addr) {
578 // Slide the object down.
579 memmove(reinterpret_cast<void*>(new_addr),
580 reinterpret_cast<void*>(old_addr), size);
581
582 if (IsTypedDataClassId(new_obj->GetClassId())) {
583 static_cast<TypedDataPtr>(new_obj)->untag()->RecomputeDataField();
584 }
585 }
586 new_obj->untag()->ClearMarkBit();
587 new_obj->untag()->VisitPointers(compactor_);
588
589 ASSERT(free_current_ == new_addr);
590 free_current_ += size;
591 } else {
592 ASSERT(!forwarding_block->IsLive(old_addr));
593 }
594 old_addr += size;
595 }
596
597 return old_addr; // First object in the next block.
598}
599
600void CompactorTask::PlanMoveToContiguousSize(intptr_t size) {
601 // Move the free cursor to ensure 'size' bytes of contiguous space.
602 ASSERT(size <= kPageSize);
603
604 // Check if the current free page has enough space.
605 intptr_t free_remaining = free_end_ - free_current_;
606 if (free_remaining < size) {
607 // Not enough; advance to the next free page.
608 free_page_ = free_page_->next();
609 ASSERT(free_page_ != nullptr);
610 free_current_ = free_page_->object_start();
611 free_end_ = free_page_->object_end();
612 free_remaining = free_end_ - free_current_;
613 ASSERT(free_remaining >= size);
614 }
615}
616
617void GCCompactor::SetupImagePageBoundaries() {
618 MallocGrowableArray<ImagePageRange> ranges(4);
619
620 Page* image_page =
621 Dart::vm_isolate_group()->heap()->old_space()->image_pages_;
622 while (image_page != nullptr) {
623 ImagePageRange range = {image_page->object_start(),
624 image_page->object_end()};
625 ranges.Add(range);
626 image_page = image_page->next();
627 }
628 image_page = heap_->old_space()->image_pages_;
629 while (image_page != nullptr) {
630 ImagePageRange range = {image_page->object_start(),
631 image_page->object_end()};
632 ranges.Add(range);
633 image_page = image_page->next();
634 }
635
636 ranges.Sort(CompareImagePageRanges);
637 intptr_t image_page_count;
638 ranges.StealBuffer(&image_page_ranges_, &image_page_count);
639 image_page_hi_ = image_page_count - 1;
640}
641
642DART_FORCE_INLINE
643void GCCompactor::ForwardPointer(ObjectPtr* ptr) {
644 ObjectPtr old_target = *ptr;
645 if (old_target->IsImmediateOrNewObject()) {
646 return; // Not moved.
647 }
648
649 uword old_addr = UntaggedObject::ToAddr(old_target);
650 intptr_t lo = 0;
651 intptr_t hi = image_page_hi_;
652 while (lo <= hi) {
653 intptr_t mid = (hi - lo + 1) / 2 + lo;
654 ASSERT(mid >= lo);
655 ASSERT(mid <= hi);
656 if (old_addr < image_page_ranges_[mid].start) {
657 hi = mid - 1;
658 } else if (old_addr >= image_page_ranges_[mid].end) {
659 lo = mid + 1;
660 } else {
661 return; // Not moved (unaligned image page).
662 }
663 }
664
665 Page* page = Page::Of(old_target);
666 ForwardingPage* forwarding_page = page->forwarding_page();
667 if (forwarding_page == nullptr) {
668 return; // Not moved (VM isolate, large page, code page).
669 }
670
671 ObjectPtr new_target =
672 UntaggedObject::FromAddr(forwarding_page->Lookup(old_addr));
673 ASSERT(!new_target->IsImmediateOrNewObject());
674 *ptr = new_target;
675}
676
677DART_FORCE_INLINE
678void GCCompactor::ForwardCompressedPointer(uword heap_base,
679 CompressedObjectPtr* ptr) {
680 ObjectPtr old_target = ptr->Decompress(heap_base);
681 if (old_target->IsImmediateOrNewObject()) {
682 return; // Not moved.
683 }
684
685 uword old_addr = UntaggedObject::ToAddr(old_target);
686 intptr_t lo = 0;
687 intptr_t hi = image_page_hi_;
688 while (lo <= hi) {
689 intptr_t mid = (hi - lo + 1) / 2 + lo;
690 ASSERT(mid >= lo);
691 ASSERT(mid <= hi);
692 if (old_addr < image_page_ranges_[mid].start) {
693 hi = mid - 1;
694 } else if (old_addr >= image_page_ranges_[mid].end) {
695 lo = mid + 1;
696 } else {
697 return; // Not moved (unaligned image page).
698 }
699 }
700
701 Page* page = Page::Of(old_target);
702 ForwardingPage* forwarding_page = page->forwarding_page();
703 if (forwarding_page == nullptr) {
704 return; // Not moved (VM isolate, large page, code page).
705 }
706
707 ObjectPtr new_target =
708 UntaggedObject::FromAddr(forwarding_page->Lookup(old_addr));
709 ASSERT(!new_target->IsImmediateOrNewObject());
710 *ptr = new_target;
711}
712
714 CompressedObjectPtr* first,
715 CompressedObjectPtr* last) {
716 // First we forward all fields of the typed data view.
717 ObjectPtr old_backing = view->untag()->typed_data();
718 VisitCompressedPointers(view->heap_base(), first, last);
719 ObjectPtr new_backing = view->untag()->typed_data();
720
721 const bool backing_moved = old_backing != new_backing;
722 if (backing_moved) {
723 // The backing store moved, so we *might* need to update the view's inner
724 // pointer. If the backing store is internal typed data we *have* to update
725 // it, otherwise (in case of external typed data) we don't have to.
726 //
727 // Unfortunately we cannot find out whether the backing store is internal
728 // or external during sliding phase: Even though we know the old and new
729 // location of the backing store another thread might be responsible for
730 // moving it and we have no way to tell when it got moved.
731 //
732 // So instead we queue all those views up and fix their inner pointer in a
733 // final phase after compaction.
734 MutexLocker ml(&typed_data_view_mutex_);
735 typed_data_views_.Add(view);
736 } else {
737 // The backing store didn't move, we therefore don't need to update the
738 // inner pointer.
739 if (view->untag()->data_ == nullptr) {
740 ASSERT(RawSmiValue(view->untag()->offset_in_bytes()) == 0 &&
741 RawSmiValue(view->untag()->length()) == 0 &&
742 view->untag()->typed_data() == Object::null());
743 }
744 }
745}
746
747// N.B.: This pointer visitor is not idempotent. We must take care to visit
748// each pointer exactly once.
750 for (ObjectPtr* ptr = first; ptr <= last; ptr++) {
751 ForwardPointer(ptr);
752 }
753}
754
755#if defined(DART_COMPRESSED_POINTERS)
757 CompressedObjectPtr* first,
758 CompressedObjectPtr* last) {
759 for (CompressedObjectPtr* ptr = first; ptr <= last; ptr++) {
760 ForwardCompressedPointer(heap_base, ptr);
761 }
762}
763#endif
764
765bool GCCompactor::CanVisitSuspendStatePointers(SuspendStatePtr suspend_state) {
766 if ((suspend_state->untag()->pc() != 0) && !can_visit_stack_frames_) {
767 // Visiting pointers of SuspendState objects with copied stack frame
768 // needs to query stack map, which can touch other Dart objects
769 // (such as GrowableObjectArray of InstructionsTable).
770 // Those objects may have an inconsistent state during compaction,
771 // so processing of SuspendState objects is postponed to the later
772 // stage of compaction.
773 MutexLocker ml(&postponed_suspend_states_mutex_);
774 postponed_suspend_states_.Add(suspend_state);
775 return false;
776 }
777 return true;
778}
779
782 reinterpret_cast<FinalizablePersistentHandle*>(addr);
783 ForwardPointer(handle->ptr_addr());
784}
785
786void GCCompactor::SetupLargePages() {
787 large_pages_ = heap_->old_space()->large_pages_;
788}
789
790void GCCompactor::ForwardLargePages() {
791 MutexLocker ml(&large_pages_mutex_);
792 while (large_pages_ != nullptr) {
793 Page* page = large_pages_;
794 large_pages_ = page->next();
795 ml.Unlock();
796 page->VisitObjectPointers(this);
797 ml.Lock();
798 }
799}
800
801void GCCompactor::ForwardStackPointers() {
802 // N.B.: Heap pointers have already been forwarded. We forward the heap before
803 // forwarding the stack to limit the number of places that need to be aware of
804 // forwarding when reading stack maps.
807}
808
809} // namespace dart
static float next(float f)
static float prev(float f)
#define RELEASE_ASSERT(cond)
Definition assert.h:327
#define COMPILE_ASSERT(expr)
Definition assert.h:339
void Add(const T &value)
intptr_t length() const
void VisitObjectPointers(ObjectPointerVisitor *visitor)
CompactorTask(IsolateGroup *isolate_group, GCCompactor *compactor, ThreadBarrier *barrier, RelaxedAtomic< intptr_t > *next_planning_task, RelaxedAtomic< intptr_t > *next_setup_task, RelaxedAtomic< intptr_t > *next_sliding_task, RelaxedAtomic< intptr_t > *next_forwarding_task, intptr_t num_tasks, Partition *partitions, FreeList *freelist)
Definition compactor.cc:127
void RunEnteredIsolateGroup()
Definition compactor.cc:380
static ThreadPool * thread_pool()
Definition dart.h:73
static IsolateGroup * vm_isolate_group()
Definition dart.h:69
uword new_address() const
Definition compactor.cc:76
bool IsLive(uword old_addr) const
Definition compactor.cc:68
void RecordLive(uword old_addr, intptr_t size)
Definition compactor.cc:56
uword Lookup(uword old_addr) const
Definition compactor.cc:39
void set_new_address(uword value)
Definition compactor.cc:77
uword Lookup(uword old_addr)
Definition compactor.cc:95
ForwardingBlock * BlockFor(uword old_addr)
Definition compactor.cc:97
static FreeListElement * AsElement(uword addr, intptr_t size)
Definition freelist.cc:16
void Free(uword addr, intptr_t size)
Definition freelist.cc:193
void Compact(Page *pages, FreeList *freelist, Mutex *mutex)
Definition compactor.cc:184
void VisitTypedDataViewPointers(TypedDataViewPtr view, CompressedObjectPtr *first, CompressedObjectPtr *last) override
Definition compactor.cc:713
void VisitHandle(uword addr) override
Definition compactor.cc:780
void VisitPointers(ObjectPtr *first, ObjectPtr *last) override
Definition compactor.cc:749
bool CanVisitSuspendStatePointers(SuspendStatePtr suspend_state) override
Definition compactor.cc:765
Thread * thread() const
Scavenger * new_space()
Definition heap.h:62
PageSpace * old_space()
Definition heap.h:63
void ForwardWeakTables(ObjectPointerVisitor *visitor)
Definition heap.cc:954
StoreBuffer * store_buffer() const
Definition isolate.h:504
void ForEachIsolate(std::function< void(Isolate *isolate)> function, bool at_safepoint=false)
Definition isolate.cc:2798
Heap * heap() const
Definition isolate.h:295
void VisitObjectPointers(ObjectPointerVisitor *visitor, ValidationPolicy validate_frames)
Definition isolate.cc:2868
void VisitWeakPersistentHandles(HandleVisitor *visitor)
Definition isolate.cc:2952
ObjectIdRing * object_id_ring() const
Definition isolate.h:1203
void VisitPointers(ObjectPointerVisitor *visitor)
IsolateGroup * isolate_group() const
Definition visitor.h:25
void VisitCompressedPointers(uword heap_base, CompressedObjectPtr *first, CompressedObjectPtr *last)
Definition visitor.h:43
UntaggedObject * untag() const
static ObjectPtr null()
Definition object.h:433
void IncreaseCapacityInWordsLocked(intptr_t increase_in_words)
Definition pages.h:198
void VisitRoots(ObjectPointerVisitor *visitor)
Definition pages.cc:949
uword object_start() const
Definition page.h:93
void AllocateForwardingPage()
Definition compactor.cc:112
void set_next(Page *next)
Definition page.h:87
uword object_end() const
Definition page.h:102
static Page * Of(ObjectPtr obj)
Definition page.h:141
Page * next() const
Definition page.h:86
T load(std::memory_order order=std::memory_order_relaxed) const
Definition atomic.h:21
T fetch_add(T arg, std::memory_order order=std::memory_order_relaxed)
Definition atomic.h:35
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
bool Run(Args &&... args)
Definition thread_pool.h:45
@ kCompactorTask
Definition thread.h:351
static Thread * Current()
Definition thread.h:361
static void ExitIsolateGroupAsHelper(bool bypass_safepoint)
Definition thread.cc:494
IsolateGroup * isolate_group() const
Definition thread.h:540
static bool EnterIsolateGroupAsHelper(IsolateGroup *isolate_group, TaskKind kind, bool bypass_safepoint)
Definition thread.cc:476
static ObjectPtr FromAddr(uword addr)
Definition raw_object.h:495
static uword ToAddr(const UntaggedObject *raw_obj)
Definition raw_object.h:501
static constexpr int CountOneBitsWord(uword x)
Definition utils.h:161
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition utils.h:77
#define ASSERT(E)
glong glong end
uint8_t value
GAsyncResult * result
#define DEFINE_FLAG(type, name, default_value, comment)
Definition flags.h:16
size_t length
bool IsTypedDataClassId(intptr_t index)
Definition class_id.h:433
constexpr intptr_t kBitsPerWord
Definition globals.h:514
static constexpr intptr_t kPageSize
Definition page.h:27
intptr_t RawSmiValue(const SmiPtr raw_value)
static constexpr intptr_t kBlockSize
Definition page.h:33
int32_t classid_t
Definition globals.h:524
constexpr intptr_t kWordSizeLog2
Definition globals.h:507
uintptr_t uword
Definition globals.h:501
static constexpr intptr_t kBitVectorWordsPerBlock
Definition page.h:32
static constexpr intptr_t kBlockMask
Definition page.h:35
const intptr_t cid
raw_obj untag() -> num_entries()) VARIABLE_COMPRESSED_VISITOR(Array, Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(TypedData, TypedData::ElementSizeInBytes(raw_obj->GetClassId()) *Smi::Value(raw_obj->untag() ->length())) VARIABLE_COMPRESSED_VISITOR(Record, RecordShape(raw_obj->untag() ->shape()).num_fields()) VARIABLE_NULL_VISITOR(CompressedStackMaps, CompressedStackMaps::PayloadSizeOf(raw_obj)) VARIABLE_NULL_VISITOR(OneByteString, Smi::Value(raw_obj->untag() ->length())) VARIABLE_NULL_VISITOR(TwoByteString, Smi::Value(raw_obj->untag() ->length())) intptr_t UntaggedField::VisitFieldPointers(FieldPtr raw_obj, ObjectPointerVisitor *visitor)
static constexpr intptr_t kObjectAlignment
static constexpr intptr_t kBlocksPerPage
Definition page.h:36
static constexpr intptr_t kObjectAlignmentLog2
bool IsExternalTypedDataClassId(intptr_t index)
Definition class_id.h:447
ObjectPtr CompressedObjectPtr
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName)
Definition globals.h:593
#define DISALLOW_COPY_AND_ASSIGN(TypeName)
Definition globals.h:581
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)
Definition timeline.h:41