Flutter Engine
The Flutter Engine
page.h
Go to the documentation of this file.
1// Copyright (c) 2022, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#ifndef RUNTIME_VM_HEAP_PAGE_H_
6#define RUNTIME_VM_HEAP_PAGE_H_
7
8#include "platform/atomic.h"
9#include "vm/globals.h"
10#include "vm/heap/spaces.h"
11#include "vm/pointer_tagging.h"
12#include "vm/raw_object.h"
13#include "vm/virtual_memory.h"
14
15namespace dart {
16
17class ForwardingPage;
18class ObjectVisitor;
19class ObjectPointerVisitor;
20class Thread;
21class UnwindingRecords;
22
23// Pages are allocated with kPageSize alignment so that the Page of any object
24// can be computed by masking the object with kPageMask. This does not apply to
25// image pages, whose address is chosen by the system loader rather than the
26// Dart VM.
27static constexpr intptr_t kPageSize = 512 * KB;
28static constexpr intptr_t kPageSizeInWords = kPageSize / kWordSize;
29static constexpr intptr_t kPageMask = ~(kPageSize - 1);
30
31// See ForwardingBlock and CountingBlock.
32static constexpr intptr_t kBitVectorWordsPerBlock = 1;
33static constexpr intptr_t kBlockSize =
35static constexpr intptr_t kBlockMask = ~(kBlockSize - 1);
36static constexpr intptr_t kBlocksPerPage = kPageSize / kBlockSize;
37
38// Simplify initialization in allocation stubs by ensuring it is safe
39// to overshoot the object end by up to kAllocationRedZoneSize. (Just as the
40// stack red zone allows one to overshoot the stack pointer.)
41static constexpr intptr_t kAllocationRedZoneSize = kObjectAlignment;
42
43// A Page is the granuitary at which the Dart heap allocates memory from the OS.
44// Pages are usually of size kPageSize, except large objects are allocated on
45// their own Page sized to the object.
46//
47// +----------------------+ <- start
48// | struct Page (header) |
49// +----------------------+
50// | alignment gap |
51// +----------------------+ <- object_start
52// | objects |
53// | ... |
54// | ... |
55// +----------------------+ <- object_end / top_
56// | available |
57// +----------------------+ <- end_
58// | red zone or |
59// | forwarding table |
60// +----------------------+ <- memory_->end()
61class Page {
62 public:
63 static void Init();
64 static void ClearCache();
65 static intptr_t CachedSize();
66 static void Cleanup();
67
69 kExecutable = 1 << 0,
70 kLarge = 1 << 1,
71 kImage = 1 << 2,
72 kVMIsolate = 1 << 3,
73 kNew = 1 << 4,
76 };
77 bool is_executable() const { return (flags_ & kExecutable) != 0; }
78 bool is_large() const { return (flags_ & kLarge) != 0; }
79 bool is_image() const { return (flags_ & kImage) != 0; }
80 bool is_vm_isolate() const { return (flags_ & kVMIsolate) != 0; }
81 bool is_new() const { return (flags_ & kNew) != 0; }
82 bool is_old() const { return !is_new(); }
84 return (flags_ & kEvacuationCandidate) != 0;
85 }
87 if (value) {
88 flags_ |= kEvacuationCandidate;
89 } else {
90 flags_ &= ~kEvacuationCandidate;
91 }
92 }
93 bool is_never_evacuate() const { return (flags_ & kNeverEvacuate) != 0; }
95 if (value) {
96 flags_ |= kNeverEvacuate;
97 } else {
98 flags_ &= ~kNeverEvacuate;
99 }
100 }
101
102 Page* next() const { return next_; }
103 void set_next(Page* next) { next_ = next; }
104
105 uword start() const { return memory_->start(); }
106 uword end() const { return memory_->end(); }
107 bool Contains(uword addr) const { return memory_->Contains(addr); }
108
111 }
113 return memory_->start() + OldObjectStartOffset();
114 }
116 return memory_->start() + NewObjectStartOffset();
117 }
119 if (owner_ != nullptr) return owner_->top();
120 return top_;
121 }
122 intptr_t used() const { return object_end() - object_start(); }
123
124 intptr_t live_bytes() const { return live_bytes_; }
125 void set_live_bytes(intptr_t value) { live_bytes_ = value; }
126 void add_live_bytes(intptr_t value) { live_bytes_ += value; }
127 void sub_live_bytes(intptr_t value) { live_bytes_ -= value; }
128
129 ForwardingPage* forwarding_page() const { return forwarding_page_; }
133
134 void VisitObjects(ObjectVisitor* visitor) const;
135 void VisitObjectsUnsafe(ObjectVisitor* visitor) const;
136 void VisitObjectPointers(ObjectPointerVisitor* visitor) const;
137
138 void WriteProtect(bool read_only);
139
140 constexpr static intptr_t OldObjectStartOffset() {
143 }
144 constexpr static intptr_t NewObjectStartOffset() {
145 // Note weaker alignment because the bool/null offset tricks don't apply to
146 // new-space.
147 return Utils::RoundUp(sizeof(Page), kObjectAlignment,
149 }
150 // These are "original" in the sense that they reflect TLAB boundaries when
151 // the TLAB was acquired, not the current boundaries. An object between
152 // original_top and top may still be in use by Dart code that has eliminated
153 // write barriers.
154 uword original_top() const { return top_.load(std::memory_order_acquire); }
155 uword original_end() const { return end_.load(std::memory_order_relaxed); }
156 static intptr_t original_top_offset() { return OFFSET_OF(Page, top_); }
157 static intptr_t original_end_offset() { return OFFSET_OF(Page, end_); }
158
159 // Warning: This does not work for objects on image pages because image pages
160 // are not aligned. However, it works for objects on large pages, because
161 // only one object is allocated per large page.
162 static Page* Of(ObjectPtr obj) {
163 ASSERT(obj->IsHeapObject());
164 return reinterpret_cast<Page*>(static_cast<uword>(obj) & kPageMask);
165 }
166 static Page* Of(uword addr) {
167 return reinterpret_cast<Page*>(addr & kPageMask);
168 }
169 static Page* Of(void* addr) {
170 return reinterpret_cast<Page*>(reinterpret_cast<uword>(addr) & kPageMask);
171 }
172
173 // 1 card = 32 slots.
174 static constexpr intptr_t kSlotsPerCardLog2 = 5;
175 static constexpr intptr_t kSlotsPerCard = 1 << kSlotsPerCardLog2;
176 static constexpr intptr_t kBytesPerCardLog2 =
178
179 intptr_t card_table_size() const {
180 return memory_->size() >> kBytesPerCardLog2;
181 }
182
183 static intptr_t card_table_offset() { return OFFSET_OF(Page, card_table_); }
184
185 void RememberCard(ObjectPtr const* slot) {
186 RememberCard(reinterpret_cast<uword>(slot));
187 }
188 bool IsCardRemembered(ObjectPtr const* slot) {
189 return IsCardRemembered(reinterpret_cast<uword>(slot));
190 }
191#if defined(DART_COMPRESSED_POINTERS)
192 void RememberCard(CompressedObjectPtr const* slot) {
193 RememberCard(reinterpret_cast<uword>(slot));
194 }
195 bool IsCardRemembered(CompressedObjectPtr const* slot) {
196 return IsCardRemembered(reinterpret_cast<uword>(slot));
197 }
198#endif
199 void VisitRememberedCards(PredicateObjectPointerVisitor* visitor,
200 bool only_marked = false);
201 void ResetProgressBar();
202
203 Thread* owner() const { return owner_; }
204
205 // Remember the limit to which objects have been copied.
206 void RecordSurvivors() { survivor_end_ = object_end(); }
207
208 // Move survivor end to the end of the to_ space, making all surviving
209 // objects candidates for promotion next time.
210 void EarlyTenure() { survivor_end_ = end_; }
211
213 return (survivor_end_ - object_start()) / kWordSize;
214 }
215
216 void Acquire(Thread* thread) {
217 ASSERT(owner_ == nullptr);
218 owner_ = thread;
219 ASSERT(thread->top() == 0);
220 ASSERT(thread->end() == 0);
221 thread->set_top(top_);
222 thread->set_end(end_);
223 thread->set_true_end(end_);
224 }
225 intptr_t Release(Thread* thread) {
226 ASSERT(owner_ == thread);
227 owner_ = nullptr;
228 uword old_top = top_;
229 uword new_top = thread->top();
230 top_.store(new_top, std::memory_order_release);
231 thread->set_top(0);
232 thread->set_end(0);
233 thread->set_true_end(0);
234#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
236#endif
237 ASSERT(new_top >= old_top);
238 return new_top - old_top;
239 }
240 void Release() {
241 if (owner_ != nullptr) {
242 Release(owner_);
243 }
244 }
245
247 ASSERT(owner_ == nullptr);
248 uword result = top_;
249 uword new_top = result + size;
250 if (LIKELY(new_top <= end_)) {
251 top_ = new_top;
252 return result;
253 }
254 return 0;
255 }
256
257 void Unallocate(uword addr, intptr_t size) {
258 ASSERT((addr + size) == top_);
259
260#if defined(DEBUG)
261 uword* cursor = reinterpret_cast<uword*>(addr);
262 uword* end = reinterpret_cast<uword*>(addr + size);
263 while (cursor < end) {
264 *cursor++ = kAllocationCanary;
265 }
266#endif
267
268 top_ -= size;
269 }
270
271 bool IsSurvivor(uword raw_addr) const { return raw_addr < survivor_end_; }
272 bool IsResolved() const { return top_ == resolved_top_; }
273
274 private:
275 void RememberCard(uword slot) {
276 ASSERT(Contains(slot));
277 if (card_table_ == nullptr) {
278 size_t size_in_bits = card_table_size();
279 size_t size_in_bytes =
281 card_table_ =
282 reinterpret_cast<uword*>(calloc(size_in_bytes, sizeof(uint8_t)));
283 }
284 intptr_t offset = slot - reinterpret_cast<uword>(this);
285 intptr_t index = offset >> kBytesPerCardLog2;
286 ASSERT((index >= 0) && (index < card_table_size()));
287 intptr_t word_offset = index >> kBitsPerWordLog2;
288 intptr_t bit_offset = index & (kBitsPerWord - 1);
289 uword bit_mask = static_cast<uword>(1) << bit_offset;
290 reinterpret_cast<std::atomic<uword>*>(&card_table_[word_offset])
291 ->fetch_or(bit_mask, std::memory_order_relaxed);
292 }
293 bool IsCardRemembered(uword slot) {
294 ASSERT(Contains(slot));
295 if (card_table_ == nullptr) {
296 return false;
297 }
298 intptr_t offset = slot - reinterpret_cast<uword>(this);
299 intptr_t index = offset >> kBytesPerCardLog2;
300 ASSERT((index >= 0) && (index < card_table_size()));
301 intptr_t word_offset = index >> kBitsPerWordLog2;
302 intptr_t bit_offset = index & (kBitsPerWord - 1);
303 uword bit_mask = static_cast<uword>(1) << bit_offset;
304 return (card_table_[word_offset] & bit_mask) != 0;
305 }
306
307 void set_object_end(uword value) {
309 top_ = value;
310 }
311
312 // Returns nullptr on OOM.
313 static Page* Allocate(intptr_t size, uword flags);
314
315 // Deallocate the virtual memory backing this page. The page pointer to this
316 // page becomes immediately inaccessible.
317 void Deallocate();
318
319 uword flags_;
320 VirtualMemory* memory_;
321 Page* next_;
322 ForwardingPage* forwarding_page_;
323 uword* card_table_; // Remembered set, not marking.
324 RelaxedAtomic<intptr_t> progress_bar_;
325
326 // The thread using this page for allocation, otherwise nullptr.
327 Thread* owner_;
328
329 // The address of the next allocation. If owner is non-NULL, this value is
330 // stale and the current value is at owner->top_. Called "NEXT" in the
331 // original Cheney paper.
332 RelaxedAtomic<uword> top_;
333
334 // The address after the last allocatable byte in this page.
335 RelaxedAtomic<uword> end_;
336
337 // Objects below this address have survived a scavenge.
338 uword survivor_end_;
339
340 // A pointer to the first unprocessed object. Resolution completes when this
341 // value meets the allocation top. Called "SCAN" in the original Cheney paper.
342 uword resolved_top_;
343
344 RelaxedAtomic<intptr_t> live_bytes_;
345
348 friend class GCCompactor;
349 friend class PageSpace;
350 template <bool>
352 friend class SemiSpace;
353 friend class UnwindingRecords;
354
357};
358
359static constexpr intptr_t kSlotsPerInterruptCheck = KB;
360static constexpr intptr_t kCardsPerInterruptCheck =
362
363} // namespace dart
364
365#endif // RUNTIME_VM_HEAP_PAGE_H_
void HandleReleasedTLAB(Thread *thread)
Definition: sampler.cc:153
uword TryAllocateGC(intptr_t size)
Definition: page.h:246
uword end() const
Definition: page.h:106
static constexpr intptr_t kSlotsPerCard
Definition: page.h:175
static constexpr intptr_t kBytesPerCardLog2
Definition: page.h:176
void Release()
Definition: page.h:240
bool Contains(uword addr) const
Definition: page.h:107
static intptr_t CachedSize()
Definition: page.cc:51
bool is_new() const
Definition: page.h:81
uword object_start() const
Definition: page.h:109
bool IsCardRemembered(ObjectPtr const *slot)
Definition: page.h:188
uword promo_candidate_words() const
Definition: page.h:212
void AllocateForwardingPage()
Definition: compactor.cc:112
ForwardingPage * forwarding_page() const
Definition: page.h:129
void set_next(Page *next)
Definition: page.h:103
uword object_end() const
Definition: page.h:118
static intptr_t card_table_offset()
Definition: page.h:183
static constexpr intptr_t OldObjectStartOffset()
Definition: page.h:140
intptr_t card_table_size() const
Definition: page.h:179
bool is_image() const
Definition: page.h:79
void add_live_bytes(intptr_t value)
Definition: page.h:126
static Page * Of(uword addr)
Definition: page.h:166
static Page * Of(ObjectPtr obj)
Definition: page.h:162
bool is_large() const
Definition: page.h:78
static Page * Of(void *addr)
Definition: page.h:169
void RecordSurvivors()
Definition: page.h:206
bool IsSurvivor(uword raw_addr) const
Definition: page.h:271
static void Init()
Definition: page.cc:31
void EarlyTenure()
Definition: page.h:210
uword original_top() const
Definition: page.h:154
static constexpr intptr_t NewObjectStartOffset()
Definition: page.h:144
uword original_end() const
Definition: page.h:155
void VisitObjects(ObjectVisitor *visitor) const
Definition: page.cc:174
void RememberCard(ObjectPtr const *slot)
Definition: page.h:185
void VisitRememberedCards(PredicateObjectPointerVisitor *visitor, bool only_marked=false)
Definition: page.cc:212
void Acquire(Thread *thread)
Definition: page.h:216
void set_evacuation_candidate(bool value)
Definition: page.h:86
void UnregisterUnwindingRecords()
void VisitObjectsUnsafe(ObjectVisitor *visitor) const
Definition: page.cc:188
bool IsResolved() const
Definition: page.h:272
static intptr_t original_top_offset()
Definition: page.h:156
PageFlags
Definition: page.h:68
@ kNew
Definition: page.h:73
@ kLarge
Definition: page.h:70
@ kVMIsolate
Definition: page.h:72
@ kExecutable
Definition: page.h:69
@ kEvacuationCandidate
Definition: page.h:74
@ kImage
Definition: page.h:71
@ kNeverEvacuate
Definition: page.h:75
intptr_t used() const
Definition: page.h:122
bool is_old() const
Definition: page.h:82
bool is_never_evacuate() const
Definition: page.h:93
uword new_object_start() const
Definition: page.h:115
void WriteProtect(bool read_only)
Definition: page.cc:281
void ResetProgressBar()
Definition: page.cc:277
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
Definition: page.cc:198
static void ClearCache()
Definition: page.cc:36
bool is_executable() const
Definition: page.h:77
static intptr_t original_end_offset()
Definition: page.h:157
intptr_t live_bytes() const
Definition: page.h:124
bool is_evacuation_candidate() const
Definition: page.h:83
void Unallocate(uword addr, intptr_t size)
Definition: page.h:257
void sub_live_bytes(intptr_t value)
Definition: page.h:127
Page * next() const
Definition: page.h:102
uword start() const
Definition: page.h:105
bool is_vm_isolate() const
Definition: page.h:80
uword old_object_start() const
Definition: page.h:112
static constexpr intptr_t kSlotsPerCardLog2
Definition: page.h:174
void RegisterUnwindingRecords()
static void Cleanup()
Definition: page.cc:45
void set_never_evacuate(bool value)
Definition: page.h:94
void set_live_bytes(intptr_t value)
Definition: page.h:125
Thread * owner() const
Definition: page.h:203
intptr_t Release(Thread *thread)
Definition: page.h:225
T load(std::memory_order order=std::memory_order_relaxed) const
Definition: atomic.h:21
void store(T arg, std::memory_order order=std::memory_order_relaxed)
Definition: atomic.h:27
static Thread * Current()
Definition: thread.h:362
uword end() const
Definition: thread.h:710
void set_end(uword end)
Definition: thread.h:713
uword top() const
Definition: thread.h:709
void set_true_end(uword true_end)
Definition: thread.h:714
void set_top(uword top)
Definition: thread.h:712
HeapProfileSampler & heap_sampler()
Definition: thread.h:1141
static constexpr T RoundUp(T x, uintptr_t alignment, uintptr_t offset=0)
Definition: utils.h:120
bool Contains(uword addr) const
intptr_t size() const
uword end() const
uword start() const
#define ASSERT(E)
FlutterSemanticsFlag flags
uint8_t value
GAsyncResult * result
Definition: dart_vm.cc:33
constexpr intptr_t kBitsPerWordLog2
Definition: globals.h:513
static constexpr intptr_t kCardsPerInterruptCheck
Definition: page.h:360
constexpr intptr_t kBitsPerByteLog2
Definition: globals.h:462
static constexpr intptr_t kOldObjectAlignmentOffset
static constexpr intptr_t kObjectStartAlignment
static constexpr intptr_t kNewObjectAlignmentOffset
static constexpr intptr_t kCompressedWordSizeLog2
Definition: globals.h:43
static constexpr intptr_t kPageSizeInWords
Definition: page.h:28
constexpr intptr_t kBitsPerWord
Definition: globals.h:514
static constexpr intptr_t kAllocationCanary
Definition: globals.h:181
static constexpr intptr_t kPageSize
Definition: page.h:27
static constexpr intptr_t kBlockSize
Definition: page.h:33
constexpr intptr_t KB
Definition: globals.h:528
uintptr_t uword
Definition: globals.h:501
static constexpr intptr_t kSlotsPerInterruptCheck
Definition: page.h:359
static constexpr intptr_t kBitVectorWordsPerBlock
Definition: page.h:32
static constexpr intptr_t kBlockMask
Definition: page.h:35
static constexpr intptr_t kObjectAlignmentMask
void * calloc(size_t n, size_t size)
Definition: allocation.cc:11
static constexpr intptr_t kAllocationRedZoneSize
Definition: page.h:41
constexpr intptr_t kWordSize
Definition: globals.h:509
static constexpr intptr_t kPageMask
Definition: page.h:29
static constexpr intptr_t kObjectAlignment
static constexpr intptr_t kBlocksPerPage
Definition: page.h:36
ObjectPtr CompressedObjectPtr
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
#define LIKELY(cond)
Definition: globals.h:260
#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName)
Definition: globals.h:593
#define DISALLOW_ALLOCATION()
Definition: globals.h:604
SeparatedVector2 offset
#define OFFSET_OF(type, field)
Definition: globals.h:138