Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
sweeper.cc
Go to the documentation of this file.
1// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/heap/sweeper.h"
6
7#include "vm/globals.h"
8#include "vm/heap/freelist.h"
9#include "vm/heap/heap.h"
10#include "vm/heap/pages.h"
11#include "vm/heap/safepoint.h"
12#include "vm/lockers.h"
13#include "vm/thread_pool.h"
14#include "vm/timeline.h"
15
16namespace dart {
17
19 ASSERT(!page->is_image());
20 ASSERT(!page->is_old());
21 ASSERT(!page->is_executable());
22
23 uword start = page->object_start();
24 uword end = page->object_end();
25 uword current = start;
26 intptr_t free = 0;
27 while (current < end) {
28 ObjectPtr raw_obj = UntaggedObject::FromAddr(current);
29 ASSERT(Page::Of(raw_obj) == page);
30 uword tags = raw_obj->untag()->tags_.load(std::memory_order_relaxed);
31 intptr_t obj_size = raw_obj->untag()->HeapSize(tags);
32 if (UntaggedObject::IsMarked(tags)) {
33 // Found marked object. Clear the mark bit and update swept bytes.
36 } else {
37 uword free_end = current + obj_size;
38 while (free_end < end) {
39 ObjectPtr next_obj = UntaggedObject::FromAddr(free_end);
40 tags = next_obj->untag()->tags_.load(std::memory_order_relaxed);
41 if (UntaggedObject::IsMarked(tags)) {
42 // Reached the end of the free block.
43 break;
44 }
45 // Expand the free block by the size of this object.
46 free_end += next_obj->untag()->HeapSize(tags);
47 }
48 obj_size = free_end - current;
49#if defined(DEBUG)
50 memset(reinterpret_cast<void*>(current), Heap::kZapByte, obj_size);
51#endif // DEBUG
52 FreeListElement::AsElementNew(current, obj_size);
53 free += obj_size;
54 }
55 current += obj_size;
56 }
57 return free;
58}
59
60bool GCSweeper::SweepPage(Page* page, FreeList* freelist) {
61 ASSERT(!page->is_image());
62 // Large executable pages are handled here. We never truncate Instructions
63 // objects, so we never truncate executable pages.
64 ASSERT(!page->is_large() || page->is_executable());
66
67 // Keep track whether this page is still in use.
68 intptr_t used_in_bytes = 0;
69
70 bool is_executable = page->is_executable();
71 uword start = page->object_start();
72 uword end = page->object_end();
73 uword current = start;
74 const bool dontneed_on_sweep = FLAG_dontneed_on_sweep;
75 const uword page_size = VirtualMemory::PageSize();
76
77 while (current < end) {
78 ObjectPtr raw_obj = UntaggedObject::FromAddr(current);
79 ASSERT(Page::Of(raw_obj) == page);
80 // These acquire operations balance release operations in array
81 // truncation, ensuring the writes creating the filler object are ordered
82 // before the writes inserting the filler object into the freelist.
83 uword tags = raw_obj->untag()->tags_.load(std::memory_order_acquire);
84 intptr_t obj_size = raw_obj->untag()->HeapSize(tags);
85 if (UntaggedObject::IsMarked(tags)) {
86 // Found marked object. Clear the mark bit and update swept bytes.
87 raw_obj->untag()->ClearMarkBit();
88 used_in_bytes += obj_size;
89 // Large objects should never appear on regular pages.
90 ASSERT(IsAllocatableViaFreeLists(obj_size) || page->is_large());
91 } else {
92 uword free_end = current + obj_size;
93 while (free_end < end) {
94 ObjectPtr next_obj = UntaggedObject::FromAddr(free_end);
95 tags = next_obj->untag()->tags_.load(std::memory_order_acquire);
96 if (UntaggedObject::IsMarked(tags)) {
97 // Reached the end of the free block.
98 break;
99 }
100 // Expand the free block by the size of this object.
101 free_end += next_obj->untag()->HeapSize(tags);
102 }
103 // Only add to the free list if not covering the whole page.
104 if ((current == start) && (free_end == end)) {
105 return false; // Not in use.
106 }
107 obj_size = free_end - current;
108 if (is_executable) {
109 uword cursor = current;
110 uword end = current + obj_size;
111 while (cursor < end) {
112 *reinterpret_cast<uword*>(cursor) = kBreakInstructionFiller;
113 cursor += kWordSize;
114 }
115 } else if (UNLIKELY(dontneed_on_sweep)) {
116 uword page_aligned_start = Utils::RoundUp(
117 current + FreeListElement::kLargeHeaderSize, page_size);
118 uword page_aligned_end = Utils::RoundDown(free_end, page_size);
119 if (UNLIKELY(page_aligned_start < page_aligned_end)) {
120 VirtualMemory::DontNeed(reinterpret_cast<void*>(page_aligned_start),
121 page_aligned_end - page_aligned_start);
122 }
123 } else {
124#if defined(DEBUG)
125 memset(reinterpret_cast<void*>(current), Heap::kZapByte, obj_size);
126#endif // DEBUG
127 }
128 freelist->FreeLocked(current, obj_size);
129 }
130 current += obj_size;
131 }
132 ASSERT(current == end);
133 ASSERT(used_in_bytes != 0);
134 return true; // In use.
135}
136
138 ASSERT(!page->is_image());
139 ASSERT(page->is_large() && !page->is_executable());
140
141 intptr_t words_to_end = 0;
142 ObjectPtr raw_obj = UntaggedObject::FromAddr(page->object_start());
143 ASSERT(Page::Of(raw_obj) == page);
144 if (raw_obj->untag()->IsMarked()) {
145 raw_obj->untag()->ClearMarkBit();
146 words_to_end = (raw_obj->untag()->HeapSize() >> kWordSizeLog2);
147 }
148#ifdef DEBUG
149 // Array::MakeFixedLength creates trailing filler objects,
150 // but they are always unreachable. Verify that they are not marked.
151 uword current =
152 UntaggedObject::ToAddr(raw_obj) + raw_obj->untag()->HeapSize();
153 uword end = page->object_end();
154 while (current < end) {
155 ObjectPtr cur_obj = UntaggedObject::FromAddr(current);
156 ASSERT(!cur_obj->untag()->IsMarked());
157 intptr_t obj_size = cur_obj->untag()->HeapSize();
158 memset(reinterpret_cast<void*>(current), Heap::kZapByte, obj_size);
159 current += obj_size;
160 }
161#endif // DEBUG
162 return words_to_end;
163}
164
166 public:
167 explicit ConcurrentSweeperTask(IsolateGroup* isolate_group)
168 : isolate_group_(isolate_group) {
169 ASSERT(isolate_group != nullptr);
170 PageSpace* old_space = isolate_group->heap()->old_space();
171 MonitorLocker ml(old_space->tasks_lock());
172 old_space->set_tasks(old_space->tasks() + 1);
174 }
175
176 virtual void Run() {
177 bool result = Thread::EnterIsolateGroupAsNonMutator(isolate_group_,
179 ASSERT(result);
180 PageSpace* old_space = isolate_group_->heap()->old_space();
181 {
182 Thread* thread = Thread::Current();
183 ASSERT(thread->BypassSafepoints()); // Or we should be checking in.
184 TIMELINE_FUNCTION_GC_DURATION(thread, "ConcurrentSweep");
185
186 old_space->SweepLarge();
187
188 {
189 MonitorLocker ml(old_space->tasks_lock());
190 ASSERT(old_space->phase() == PageSpace::kSweepingLarge);
192 ml.NotifyAll();
193 }
194
195 old_space->Sweep(/*exclusive*/ false);
196 }
197 // Exit isolate cleanly *before* notifying it, to avoid shutdown race.
199 // This sweeper task is done. Notify the original isolate.
200 {
201 MonitorLocker ml(old_space->tasks_lock());
202 old_space->set_tasks(old_space->tasks() - 1);
204 old_space->set_phase(PageSpace::kDone);
205 ml.NotifyAll();
206 }
207 }
208
209 private:
210 IsolateGroup* isolate_group_;
211};
212
214 bool result = Dart::thread_pool()->Run<ConcurrentSweeperTask>(isolate_group);
215 ASSERT(result);
216}
217
218} // namespace dart
#define DEBUG_ASSERT(cond)
Definition assert.h:321
T load(std::memory_order order) const
Definition bitfield.h:38
ConcurrentSweeperTask(IsolateGroup *isolate_group)
Definition sweeper.cc:167
static ThreadPool * thread_pool()
Definition dart.h:73
static FreeListElement * AsElementNew(uword addr, intptr_t size)
Definition freelist.cc:43
static constexpr intptr_t kLargeHeaderSize
Definition freelist.h:44
Mutex * mutex()
Definition freelist.h:91
void FreeLocked(uword addr, intptr_t size)
Definition freelist.cc:198
bool SweepPage(Page *page, FreeList *freelist)
Definition sweeper.cc:60
static void SweepConcurrent(IsolateGroup *isolate_group)
Definition sweeper.cc:213
intptr_t SweepNewPage(Page *page)
Definition sweeper.cc:18
intptr_t SweepLargePage(Page *page)
Definition sweeper.cc:137
static constexpr uint8_t kZapByte
Definition heap.h:58
PageSpace * old_space()
Definition heap.h:63
Heap * heap() const
Definition isolate.h:295
bool IsOwnedByCurrentThread() const
Definition os_thread.h:401
UntaggedObject * untag() const
intptr_t tasks() const
Definition pages.h:310
void set_tasks(intptr_t val)
Definition pages.h:311
void set_phase(Phase val)
Definition pages.h:337
Monitor * tasks_lock() const
Definition pages.h:309
Phase phase() const
Definition pages.h:336
static Page * Of(ObjectPtr obj)
Definition page.h:141
bool Run(Args &&... args)
Definition thread_pool.h:45
static Thread * Current()
Definition thread.h:361
static void ExitIsolateGroupAsNonMutator()
Definition thread.cc:521
static bool EnterIsolateGroupAsNonMutator(IsolateGroup *isolate_group, TaskKind kind)
Definition thread.cc:506
bool BypassSafepoints() const
Definition thread.h:994
static ObjectPtr FromAddr(uword addr)
Definition raw_object.h:495
static bool IsMarked(uword tags)
Definition raw_object.h:298
static uword ToAddr(const UntaggedObject *raw_obj)
Definition raw_object.h:501
intptr_t HeapSize() const
Definition raw_object.h:380
bool IsMarked() const
Definition raw_object.h:299
void ClearMarkBitUnsynchronized()
Definition raw_object.h:319
static constexpr T RoundUp(T x, uintptr_t alignment, uintptr_t offset=0)
Definition utils.h:105
static constexpr T RoundDown(T x, intptr_t alignment)
Definition utils.h:93
static intptr_t PageSize()
static void DontNeed(void *address, intptr_t size)
#define ASSERT(E)
glong glong end
GAsyncResult * result
constexpr intptr_t kWordSizeLog2
Definition globals.h:507
uintptr_t uword
Definition globals.h:501
constexpr uword kBreakInstructionFiller
bool IsAllocatableInNewSpace(intptr_t size)
Definition spaces.h:57
constexpr intptr_t kWordSize
Definition globals.h:509
bool IsAllocatableViaFreeLists(intptr_t size)
Definition spaces.h:60
#define UNLIKELY(cond)
Definition globals.h:261
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)
Definition timeline.h:41