Flutter Engine
The Flutter Engine
sweeper.cc
Go to the documentation of this file.
1// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/heap/sweeper.h"
6
7#include "vm/globals.h"
8#include "vm/heap/freelist.h"
9#include "vm/heap/heap.h"
10#include "vm/heap/pages.h"
11#include "vm/heap/safepoint.h"
12#include "vm/lockers.h"
13#include "vm/thread_pool.h"
14#include "vm/timeline.h"
15
16namespace dart {
17
19 ASSERT(!page->is_image());
20 ASSERT(!page->is_old());
21 ASSERT(!page->is_executable());
22
23 uword start = page->object_start();
24 uword end = page->object_end();
25 uword current = start;
26 intptr_t free = 0;
27 while (current < end) {
28 ObjectPtr raw_obj = UntaggedObject::FromAddr(current);
29 ASSERT(Page::Of(raw_obj) == page);
30 uword tags = raw_obj->untag()->tags_.load(std::memory_order_relaxed);
31 intptr_t obj_size = raw_obj->untag()->HeapSize(tags);
32 if (UntaggedObject::IsMarked(tags)) {
33 // Found marked object. Clear the mark bit and update swept bytes.
36 } else {
37 uword free_end = current + obj_size;
38 while (free_end < end) {
39 ObjectPtr next_obj = UntaggedObject::FromAddr(free_end);
40 tags = next_obj->untag()->tags_.load(std::memory_order_relaxed);
41 if (UntaggedObject::IsMarked(tags)) {
42 // Reached the end of the free block.
43 break;
44 }
45 // Expand the free block by the size of this object.
46 free_end += next_obj->untag()->HeapSize(tags);
47 }
48 obj_size = free_end - current;
49#if defined(DEBUG)
50 memset(reinterpret_cast<void*>(current), Heap::kZapByte, obj_size);
51#endif // DEBUG
52 FreeListElement::AsElementNew(current, obj_size);
53 free += obj_size;
54 }
55 current += obj_size;
56 }
57 return free;
58}
59
61 ASSERT(!page->is_image());
62 // Large executable pages are handled here. We never truncate Instructions
63 // objects, so we never truncate executable pages.
64 ASSERT(!page->is_large() || page->is_executable());
66
67 // Keep track whether this page is still in use.
68 intptr_t used_in_bytes = 0;
69
70 bool is_executable = page->is_executable();
71 uword start = page->object_start();
72 uword end = page->object_end();
73 uword current = start;
74 const bool dontneed_on_sweep = FLAG_dontneed_on_sweep;
75 const uword page_size = VirtualMemory::PageSize();
76
77 while (current < end) {
78 ObjectPtr raw_obj = UntaggedObject::FromAddr(current);
79 ASSERT(Page::Of(raw_obj) == page);
80 // These acquire operations balance release operations in array
81 // truncation, ensuring the writes creating the filler object are ordered
82 // before the writes inserting the filler object into the freelist.
83 uword tags = raw_obj->untag()->tags_.load(std::memory_order_acquire);
84 intptr_t obj_size = raw_obj->untag()->HeapSize(tags);
85 if (UntaggedObject::IsMarked(tags)) {
86 // Found marked object. Clear the mark bit and update swept bytes.
87 raw_obj->untag()->ClearMarkBit();
88 used_in_bytes += obj_size;
89 // Large objects should never appear on regular pages.
90 ASSERT(IsAllocatableViaFreeLists(obj_size) || page->is_large());
91 } else {
92 uword free_end = current + obj_size;
93 while (free_end < end) {
94 ObjectPtr next_obj = UntaggedObject::FromAddr(free_end);
95 tags = next_obj->untag()->tags_.load(std::memory_order_acquire);
96 if (UntaggedObject::IsMarked(tags)) {
97 // Reached the end of the free block.
98 break;
99 }
100 // Expand the free block by the size of this object.
101 free_end += next_obj->untag()->HeapSize(tags);
102 }
103 // Only add to the free list if not covering the whole page.
104 if ((current == start) && (free_end == end)) {
105 page->set_live_bytes(0);
106 return false; // Not in use.
107 }
108 obj_size = free_end - current;
109 if (is_executable) {
110 uword cursor = current;
111 uword end = current + obj_size;
112 while (cursor < end) {
113 *reinterpret_cast<uword*>(cursor) = kBreakInstructionFiller;
114 cursor += kWordSize;
115 }
116 } else if (UNLIKELY(dontneed_on_sweep)) {
117 uword page_aligned_start = Utils::RoundUp(
118 current + FreeListElement::kLargeHeaderSize, page_size);
119 uword page_aligned_end = Utils::RoundDown(free_end, page_size);
120 if (UNLIKELY(page_aligned_start < page_aligned_end)) {
121 VirtualMemory::DontNeed(reinterpret_cast<void*>(page_aligned_start),
122 page_aligned_end - page_aligned_start);
123 }
124 } else {
125#if defined(DEBUG)
126 memset(reinterpret_cast<void*>(current), Heap::kZapByte, obj_size);
127#endif // DEBUG
128 }
129 freelist->FreeLocked(current, obj_size);
130 }
131 current += obj_size;
132 }
133 ASSERT(current == end);
134 ASSERT(used_in_bytes != 0);
135 page->set_live_bytes(used_in_bytes);
136 return true; // In use.
137}
138
140 ASSERT(!page->is_image());
141 ASSERT(page->is_large() && !page->is_executable());
142
143 intptr_t words_to_end = 0;
144 ObjectPtr raw_obj = UntaggedObject::FromAddr(page->object_start());
145 ASSERT(Page::Of(raw_obj) == page);
146 if (raw_obj->untag()->IsMarked()) {
147 raw_obj->untag()->ClearMarkBit();
148 words_to_end = (raw_obj->untag()->HeapSize() >> kWordSizeLog2);
149 }
150#ifdef DEBUG
151 // Array::MakeFixedLength creates trailing filler objects,
152 // but they are always unreachable. Verify that they are not marked.
153 uword current =
154 UntaggedObject::ToAddr(raw_obj) + raw_obj->untag()->HeapSize();
155 uword end = page->object_end();
156 while (current < end) {
157 ObjectPtr cur_obj = UntaggedObject::FromAddr(current);
158 ASSERT(!cur_obj->untag()->IsMarked());
159 intptr_t obj_size = cur_obj->untag()->HeapSize();
160 memset(reinterpret_cast<void*>(current), Heap::kZapByte, obj_size);
161 current += obj_size;
162 }
163#endif // DEBUG
164 return words_to_end;
165}
166
168 public:
169 explicit ConcurrentSweeperTask(IsolateGroup* isolate_group)
170 : isolate_group_(isolate_group) {
171 ASSERT(isolate_group != nullptr);
172 PageSpace* old_space = isolate_group->heap()->old_space();
173 MonitorLocker ml(old_space->tasks_lock());
174 old_space->set_tasks(old_space->tasks() + 1);
176 }
177
178 virtual void Run() {
179 bool result = Thread::EnterIsolateGroupAsNonMutator(isolate_group_,
181 ASSERT(result);
182 PageSpace* old_space = isolate_group_->heap()->old_space();
183 {
184 Thread* thread = Thread::Current();
185 ASSERT(thread->BypassSafepoints()); // Or we should be checking in.
186 TIMELINE_FUNCTION_GC_DURATION(thread, "ConcurrentSweep");
187
188 old_space->SweepLarge();
189
190 {
191 MonitorLocker ml(old_space->tasks_lock());
192 ASSERT(old_space->phase() == PageSpace::kSweepingLarge);
194 ml.NotifyAll();
195 }
196
197 old_space->Sweep(/*exclusive*/ false);
198 }
199 // Exit isolate cleanly *before* notifying it, to avoid shutdown race.
201 // This sweeper task is done. Notify the original isolate.
202 {
203 MonitorLocker ml(old_space->tasks_lock());
204 old_space->set_tasks(old_space->tasks() - 1);
206 old_space->set_phase(PageSpace::kDone);
207 ml.NotifyAll();
208 }
209 }
210
211 private:
212 IsolateGroup* isolate_group_;
213};
214
216 bool result = Dart::thread_pool()->Run<ConcurrentSweeperTask>(isolate_group);
217 ASSERT(result);
218}
219
220} // namespace dart
#define DEBUG_ASSERT(cond)
Definition: assert.h:321
T load(std::memory_order order) const
Definition: bitfield.h:38
ConcurrentSweeperTask(IsolateGroup *isolate_group)
Definition: sweeper.cc:169
static ThreadPool * thread_pool()
Definition: dart.h:73
static FreeListElement * AsElementNew(uword addr, intptr_t size)
Definition: freelist.cc:43
static constexpr intptr_t kLargeHeaderSize
Definition: freelist.h:44
Mutex * mutex()
Definition: freelist.h:91
void FreeLocked(uword addr, intptr_t size)
Definition: freelist.cc:198
bool SweepPage(Page *page, FreeList *freelist)
Definition: sweeper.cc:60
static void SweepConcurrent(IsolateGroup *isolate_group)
Definition: sweeper.cc:215
intptr_t SweepNewPage(Page *page)
Definition: sweeper.cc:18
intptr_t SweepLargePage(Page *page)
Definition: sweeper.cc:139
static constexpr uint8_t kZapByte
Definition: heap.h:58
PageSpace * old_space()
Definition: heap.h:63
Heap * heap() const
Definition: isolate.h:296
bool IsOwnedByCurrentThread() const
Definition: os_thread.h:402
UntaggedObject * untag() const
intptr_t tasks() const
Definition: pages.h:315
void set_tasks(intptr_t val)
Definition: pages.h:316
@ kSweepingRegular
Definition: pages.h:135
@ kSweepingLarge
Definition: pages.h:134
void set_phase(Phase val)
Definition: pages.h:342
Monitor * tasks_lock() const
Definition: pages.h:314
Phase phase() const
Definition: pages.h:341
static Page * Of(ObjectPtr obj)
Definition: page.h:162
bool Run(Args &&... args)
Definition: thread_pool.h:45
@ kSweeperTask
Definition: thread.h:350
static Thread * Current()
Definition: thread.h:362
static void ExitIsolateGroupAsNonMutator()
Definition: thread.cc:526
static bool EnterIsolateGroupAsNonMutator(IsolateGroup *isolate_group, TaskKind kind)
Definition: thread.cc:511
bool BypassSafepoints() const
Definition: thread.h:1007
static ObjectPtr FromAddr(uword addr)
Definition: raw_object.h:516
static bool IsMarked(uword tags)
Definition: raw_object.h:303
static uword ToAddr(const UntaggedObject *raw_obj)
Definition: raw_object.h:522
intptr_t HeapSize() const
Definition: raw_object.h:401
bool IsMarked() const
Definition: raw_object.h:304
void ClearMarkBitUnsynchronized()
Definition: raw_object.h:321
static constexpr T RoundUp(T x, uintptr_t alignment, uintptr_t offset=0)
Definition: utils.h:120
static constexpr T RoundDown(T x, intptr_t alignment)
Definition: utils.h:108
static intptr_t PageSize()
static void DontNeed(void *address, intptr_t size)
#define ASSERT(E)
glong glong end
GAsyncResult * result
Definition: dart_vm.cc:33
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
uintptr_t uword
Definition: globals.h:501
constexpr uword kBreakInstructionFiller
bool IsAllocatableInNewSpace(intptr_t size)
Definition: spaces.h:57
constexpr intptr_t kWordSize
Definition: globals.h:509
bool IsAllocatableViaFreeLists(intptr_t size)
Definition: spaces.h:60
#define UNLIKELY(cond)
Definition: globals.h:261
#define TIMELINE_FUNCTION_GC_DURATION(thread, name)
Definition: timeline.h:41