Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
page.h
Go to the documentation of this file.
1// Copyright (c) 2022, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_HEAP_PAGE_H_
6#define RUNTIME_VM_HEAP_PAGE_H_
7
8#include "platform/atomic.h"
9#include "vm/globals.h"
10#include "vm/heap/spaces.h"
11#include "vm/pointer_tagging.h"
12#include "vm/raw_object.h"
13#include "vm/virtual_memory.h"
14
15namespace dart {
16
17class ForwardingPage;
18class ObjectVisitor;
19class ObjectPointerVisitor;
20class Thread;
21class UnwindingRecords;
22
23// Pages are allocated with kPageSize alignment so that the Page of any object
24// can be computed by masking the object with kPageMask. This does not apply to
25// image pages, whose address is chosen by the system loader rather than the
26// Dart VM.
27static constexpr intptr_t kPageSize = 512 * KB;
28static constexpr intptr_t kPageSizeInWords = kPageSize / kWordSize;
29static constexpr intptr_t kPageMask = ~(kPageSize - 1);
30
31// See ForwardingBlock and CountingBlock.
32static constexpr intptr_t kBitVectorWordsPerBlock = 1;
33static constexpr intptr_t kBlockSize =
35static constexpr intptr_t kBlockMask = ~(kBlockSize - 1);
36static constexpr intptr_t kBlocksPerPage = kPageSize / kBlockSize;
37
38// Simplify initialization in allocation stubs by ensuring it is safe
39// to overshoot the object end by up to kAllocationRedZoneSize. (Just as the
40// stack red zone allows one to overshoot the stack pointer.)
41static constexpr intptr_t kAllocationRedZoneSize = kObjectAlignment;
42
43// A Page is the granuitary at which the Dart heap allocates memory from the OS.
44// Pages are usually of size kPageSize, except large objects are allocated on
45// their own Page sized to the object.
46//
47// +----------------------+ <- start
48// | struct Page (header) |
49// +----------------------+
50// | alignment gap |
51// +----------------------+ <- object_start
52// | objects |
53// | ... |
54// | ... |
55// +----------------------+ <- object_end / top_
56// | available |
57// +----------------------+ <- end_
58// | red zone or |
59// | forwarding table |
60// +----------------------+ <- memory_->end()
61class Page {
62 public:
63 static void Init();
64 static void ClearCache();
65 static intptr_t CachedSize();
66 static void Cleanup();
67
69 kExecutable = 1 << 0,
70 kLarge = 1 << 1,
71 kImage = 1 << 2,
72 kVMIsolate = 1 << 3,
73 kNew = 1 << 4,
75 };
76 bool is_executable() const { return (flags_ & kExecutable) != 0; }
77 bool is_large() const { return (flags_ & kLarge) != 0; }
78 bool is_image() const { return (flags_ & kImage) != 0; }
79 bool is_vm_isolate() const { return (flags_ & kVMIsolate) != 0; }
80 bool is_new() const { return (flags_ & kNew) != 0; }
81 bool is_old() const { return !is_new(); }
83 return (flags_ & kEvacuationCandidate) != 0;
84 }
85
86 Page* next() const { return next_; }
87 void set_next(Page* next) { next_ = next; }
88
89 uword start() const { return memory_->start(); }
90 uword end() const { return memory_->end(); }
91 bool Contains(uword addr) const { return memory_->Contains(addr); }
92
95 }
97 return memory_->start() + OldObjectStartOffset();
98 }
100 return memory_->start() + NewObjectStartOffset();
101 }
103 if (owner_ != nullptr) return owner_->top();
104 return top_;
105 }
106 intptr_t used() const { return object_end() - object_start(); }
107
108 ForwardingPage* forwarding_page() const { return forwarding_page_; }
112
113 void VisitObjects(ObjectVisitor* visitor) const;
114 void VisitObjectsUnsafe(ObjectVisitor* visitor) const;
115 void VisitObjectPointers(ObjectPointerVisitor* visitor) const;
116
117 void WriteProtect(bool read_only);
118
119 constexpr static intptr_t OldObjectStartOffset() {
122 }
123 constexpr static intptr_t NewObjectStartOffset() {
124 // Note weaker alignment because the bool/null offset tricks don't apply to
125 // new-space.
126 return Utils::RoundUp(sizeof(Page), kObjectAlignment,
128 }
129 // These are "original" in the sense that they reflect TLAB boundaries when
130 // the TLAB was acquired, not the current boundaries. An object between
131 // original_top and top may still be in use by Dart code that has eliminated
132 // write barriers.
133 uword original_top() const { return top_.load(std::memory_order_acquire); }
134 uword original_end() const { return end_.load(std::memory_order_relaxed); }
135 static intptr_t original_top_offset() { return OFFSET_OF(Page, top_); }
136 static intptr_t original_end_offset() { return OFFSET_OF(Page, end_); }
137
138 // Warning: This does not work for objects on image pages because image pages
139 // are not aligned. However, it works for objects on large pages, because
140 // only one object is allocated per large page.
141 static Page* Of(ObjectPtr obj) {
142 ASSERT(obj->IsHeapObject());
143 return reinterpret_cast<Page*>(static_cast<uword>(obj) & kPageMask);
144 }
145
146 // Warning: This does not work for addresses on image pages or on large pages.
147 static Page* Of(uword addr) {
148 return reinterpret_cast<Page*>(addr & kPageMask);
149 }
150
151 // 1 card = 32 slots.
152 static constexpr intptr_t kSlotsPerCardLog2 = 5;
153 static constexpr intptr_t kBytesPerCardLog2 =
155
156 intptr_t card_table_size() const {
157 return memory_->size() >> kBytesPerCardLog2;
158 }
159
160 static intptr_t card_table_offset() { return OFFSET_OF(Page, card_table_); }
161
162 void RememberCard(ObjectPtr const* slot) {
163 RememberCard(reinterpret_cast<uword>(slot));
164 }
165 bool IsCardRemembered(ObjectPtr const* slot) {
166 return IsCardRemembered(reinterpret_cast<uword>(slot));
167 }
168#if defined(DART_COMPRESSED_POINTERS)
169 void RememberCard(CompressedObjectPtr const* slot) {
170 RememberCard(reinterpret_cast<uword>(slot));
171 }
172 bool IsCardRemembered(CompressedObjectPtr const* slot) {
173 return IsCardRemembered(reinterpret_cast<uword>(slot));
174 }
175#endif
176 void VisitRememberedCards(ObjectPointerVisitor* visitor);
177 void ResetProgressBar();
178
179 Thread* owner() const { return owner_; }
180
181 // Remember the limit to which objects have been copied.
182 void RecordSurvivors() { survivor_end_ = object_end(); }
183
184 // Move survivor end to the end of the to_ space, making all surviving
185 // objects candidates for promotion next time.
186 void EarlyTenure() { survivor_end_ = end_; }
187
189 return (survivor_end_ - object_start()) / kWordSize;
190 }
191
192 void Acquire(Thread* thread) {
193 ASSERT(owner_ == nullptr);
194 owner_ = thread;
195 ASSERT(thread->top() == 0);
196 ASSERT(thread->end() == 0);
197 thread->set_top(top_);
198 thread->set_end(end_);
199 thread->set_true_end(end_);
200 }
201 intptr_t Release(Thread* thread) {
202 ASSERT(owner_ == thread);
203 owner_ = nullptr;
204 uword old_top = top_;
205 uword new_top = thread->top();
206 top_.store(new_top, std::memory_order_release);
207 thread->set_top(0);
208 thread->set_end(0);
209 thread->set_true_end(0);
210#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
212#endif
213 ASSERT(new_top >= old_top);
214 return new_top - old_top;
215 }
216 void Release() {
217 if (owner_ != nullptr) {
218 Release(owner_);
219 }
220 }
221
222 uword TryAllocateGC(intptr_t size) {
223 ASSERT(owner_ == nullptr);
224 uword result = top_;
225 uword new_top = result + size;
226 if (LIKELY(new_top <= end_)) {
227 top_ = new_top;
228 return result;
229 }
230 return 0;
231 }
232
233 void Unallocate(uword addr, intptr_t size) {
234 ASSERT((addr + size) == top_);
235
236#if defined(DEBUG)
237 uword* cursor = reinterpret_cast<uword*>(addr);
238 uword* end = reinterpret_cast<uword*>(addr + size);
239 while (cursor < end) {
240 *cursor++ = kAllocationCanary;
241 }
242#endif
243
244 top_ -= size;
245 }
246
247 bool IsSurvivor(uword raw_addr) const { return raw_addr < survivor_end_; }
248 bool IsResolved() const { return top_ == resolved_top_; }
249
250 private:
251 void RememberCard(uword slot) {
252 ASSERT(Contains(slot));
253 if (card_table_ == nullptr) {
254 size_t size_in_bits = card_table_size();
255 size_t size_in_bytes =
257 card_table_ =
258 reinterpret_cast<uword*>(calloc(size_in_bytes, sizeof(uint8_t)));
259 }
260 intptr_t offset = slot - reinterpret_cast<uword>(this);
261 intptr_t index = offset >> kBytesPerCardLog2;
262 ASSERT((index >= 0) && (index < card_table_size()));
263 intptr_t word_offset = index >> kBitsPerWordLog2;
264 intptr_t bit_offset = index & (kBitsPerWord - 1);
265 uword bit_mask = static_cast<uword>(1) << bit_offset;
266 card_table_[word_offset] |= bit_mask;
267 }
268 bool IsCardRemembered(uword slot) {
269 ASSERT(Contains(slot));
270 if (card_table_ == nullptr) {
271 return false;
272 }
273 intptr_t offset = slot - reinterpret_cast<uword>(this);
274 intptr_t index = offset >> kBytesPerCardLog2;
275 ASSERT((index >= 0) && (index < card_table_size()));
276 intptr_t word_offset = index >> kBitsPerWordLog2;
277 intptr_t bit_offset = index & (kBitsPerWord - 1);
278 uword bit_mask = static_cast<uword>(1) << bit_offset;
279 return (card_table_[word_offset] & bit_mask) != 0;
280 }
281
282 void set_object_end(uword value) {
284 top_ = value;
285 }
286
287 // Returns nullptr on OOM.
288 static Page* Allocate(intptr_t size, uword flags);
289
290 // Deallocate the virtual memory backing this page. The page pointer to this
291 // page becomes immediately inaccessible.
292 void Deallocate();
293
294 uword flags_;
295 VirtualMemory* memory_;
296 Page* next_;
297 ForwardingPage* forwarding_page_;
298 uword* card_table_; // Remembered set, not marking.
299 RelaxedAtomic<intptr_t> progress_bar_;
300
301 // The thread using this page for allocation, otherwise nullptr.
302 Thread* owner_;
303
304 // The address of the next allocation. If owner is non-NULL, this value is
305 // stale and the current value is at owner->top_. Called "NEXT" in the
306 // original Cheney paper.
307 RelaxedAtomic<uword> top_;
308
309 // The address after the last allocatable byte in this page.
310 RelaxedAtomic<uword> end_;
311
312 // Objects below this address have survived a scavenge.
313 uword survivor_end_;
314
315 // A pointer to the first unprocessed object. Resolution completes when this
316 // value meets the allocation top. Called "SCAN" in the original Cheney paper.
317 uword resolved_top_;
318
320 friend class GCCompactor;
321 friend class PageSpace;
322 template <bool>
324 friend class SemiSpace;
325 friend class UnwindingRecords;
326
329};
330
331} // namespace dart
332
333#endif // RUNTIME_VM_HEAP_PAGE_H_
void HandleReleasedTLAB(Thread *thread)
Definition sampler.cc:153
uword TryAllocateGC(intptr_t size)
Definition page.h:222
uword end() const
Definition page.h:90
static constexpr intptr_t kBytesPerCardLog2
Definition page.h:153
void Release()
Definition page.h:216
bool Contains(uword addr) const
Definition page.h:91
static intptr_t CachedSize()
Definition page.cc:51
bool is_new() const
Definition page.h:80
uword object_start() const
Definition page.h:93
bool IsCardRemembered(ObjectPtr const *slot)
Definition page.h:165
uword promo_candidate_words() const
Definition page.h:188
void AllocateForwardingPage()
Definition compactor.cc:112
ForwardingPage * forwarding_page() const
Definition page.h:108
void set_next(Page *next)
Definition page.h:87
uword object_end() const
Definition page.h:102
static intptr_t card_table_offset()
Definition page.h:160
static constexpr intptr_t OldObjectStartOffset()
Definition page.h:119
intptr_t card_table_size() const
Definition page.h:156
bool is_image() const
Definition page.h:78
static Page * Of(uword addr)
Definition page.h:147
static Page * Of(ObjectPtr obj)
Definition page.h:141
bool is_large() const
Definition page.h:77
void RecordSurvivors()
Definition page.h:182
bool IsSurvivor(uword raw_addr) const
Definition page.h:247
static void Init()
Definition page.cc:31
void EarlyTenure()
Definition page.h:186
uword original_top() const
Definition page.h:133
static constexpr intptr_t NewObjectStartOffset()
Definition page.h:123
uword original_end() const
Definition page.h:134
void VisitObjects(ObjectVisitor *visitor) const
Definition page.cc:173
void RememberCard(ObjectPtr const *slot)
Definition page.h:162
void Acquire(Thread *thread)
Definition page.h:192
void UnregisterUnwindingRecords()
void VisitObjectsUnsafe(ObjectVisitor *visitor) const
Definition page.cc:186
bool IsResolved() const
Definition page.h:248
static intptr_t original_top_offset()
Definition page.h:135
PageFlags
Definition page.h:68
@ kNew
Definition page.h:73
@ kLarge
Definition page.h:70
@ kVMIsolate
Definition page.h:72
@ kExecutable
Definition page.h:69
@ kEvacuationCandidate
Definition page.h:74
@ kImage
Definition page.h:71
intptr_t used() const
Definition page.h:106
bool is_old() const
Definition page.h:81
uword new_object_start() const
Definition page.h:99
void WriteProtect(bool read_only)
Definition page.cc:282
void ResetProgressBar()
Definition page.cc:278
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
Definition page.cc:196
static void ClearCache()
Definition page.cc:36
bool is_executable() const
Definition page.h:76
static intptr_t original_end_offset()
Definition page.h:136
bool is_evacuation_candidate() const
Definition page.h:82
void Unallocate(uword addr, intptr_t size)
Definition page.h:233
Page * next() const
Definition page.h:86
uword start() const
Definition page.h:89
bool is_vm_isolate() const
Definition page.h:79
void VisitRememberedCards(ObjectPointerVisitor *visitor)
Definition page.cc:210
uword old_object_start() const
Definition page.h:96
static constexpr intptr_t kSlotsPerCardLog2
Definition page.h:152
void RegisterUnwindingRecords()
static void Cleanup()
Definition page.cc:45
Thread * owner() const
Definition page.h:179
intptr_t Release(Thread *thread)
Definition page.h:201
T load(std::memory_order order=std::memory_order_relaxed) const
Definition atomic.h:21
void store(T arg, std::memory_order order=std::memory_order_relaxed)
Definition atomic.h:27
static Thread * Current()
Definition thread.h:361
uword end() const
Definition thread.h:697
void set_end(uword end)
Definition thread.h:700
uword top() const
Definition thread.h:696
void set_true_end(uword true_end)
Definition thread.h:701
void set_top(uword top)
Definition thread.h:699
HeapProfileSampler & heap_sampler()
Definition thread.h:1128
static constexpr T RoundUp(T x, uintptr_t alignment, uintptr_t offset=0)
Definition utils.h:105
bool Contains(uword addr) const
intptr_t size() const
uword start() const
#define ASSERT(E)
FlutterSemanticsFlag flags
uint8_t value
GAsyncResult * result
constexpr intptr_t kBitsPerWordLog2
Definition globals.h:513
constexpr intptr_t kBitsPerByteLog2
Definition globals.h:462
static constexpr intptr_t kOldObjectAlignmentOffset
static constexpr intptr_t kObjectStartAlignment
static constexpr intptr_t kNewObjectAlignmentOffset
static constexpr intptr_t kCompressedWordSizeLog2
Definition globals.h:43
static constexpr intptr_t kPageSizeInWords
Definition page.h:28
constexpr intptr_t kBitsPerWord
Definition globals.h:514
static constexpr intptr_t kAllocationCanary
Definition globals.h:181
static constexpr intptr_t kPageSize
Definition page.h:27
static constexpr intptr_t kBlockSize
Definition page.h:33
constexpr intptr_t KB
Definition globals.h:528
uintptr_t uword
Definition globals.h:501
static constexpr intptr_t kBitVectorWordsPerBlock
Definition page.h:32
static constexpr intptr_t kBlockMask
Definition page.h:35
static constexpr intptr_t kObjectAlignmentMask
void * calloc(size_t n, size_t size)
Definition allocation.cc:11
static constexpr intptr_t kAllocationRedZoneSize
Definition page.h:41
constexpr intptr_t kWordSize
Definition globals.h:509
static constexpr intptr_t kPageMask
Definition page.h:29
static constexpr intptr_t kObjectAlignment
static constexpr intptr_t kBlocksPerPage
Definition page.h:36
ObjectPtr CompressedObjectPtr
#define LIKELY(cond)
Definition globals.h:260
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName)
Definition globals.h:593
#define DISALLOW_ALLOCATION()
Definition globals.h:604
Point offset
#define OFFSET_OF(type, field)
Definition globals.h:138