Flutter Engine
The Flutter Engine
page.cc
Go to the documentation of this file.
1// Copyright (c) 2022, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/heap/page.h"
6
7#include "platform/assert.h"
9#include "vm/dart.h"
10#include "vm/heap/become.h"
11#include "vm/heap/compactor.h"
12#include "vm/heap/marker.h"
13#include "vm/heap/safepoint.h"
14#include "vm/heap/sweeper.h"
15#include "vm/lockers.h"
16#include "vm/log.h"
17#include "vm/object.h"
18#include "vm/object_set.h"
19#include "vm/os_thread.h"
20#include "vm/virtual_memory.h"
21
22namespace dart {
23
24// This cache needs to be at least as big as FLAG_new_gen_semi_max_size or
25// munmap will noticeably impact performance.
26static constexpr intptr_t kPageCacheCapacity = 8 * kWordSize;
27static Mutex* page_cache_mutex = nullptr;
29static intptr_t page_cache_size = 0;
30
31void Page::Init() {
32 ASSERT(page_cache_mutex == nullptr);
33 page_cache_mutex = new Mutex(NOT_IN_PRODUCT("page_cache_mutex"));
34}
35
40 while (page_cache_size > 0) {
42 }
43}
44
46 ClearCache();
47 delete page_cache_mutex;
48 page_cache_mutex = nullptr;
49}
50
51intptr_t Page::CachedSize() {
54}
55
56static bool CanUseCache(uword flags) {
58 Page::kVMIsolate)) == 0;
59}
60
61Page* Page::Allocate(intptr_t size, uword flags) {
62 const bool executable = (flags & Page::kExecutable) != 0;
63 const bool compressed = !executable;
64 const char* name = executable ? "dart-code" : "dart-heap";
65
66 VirtualMemory* memory = nullptr;
67 if (CanUseCache(flags)) {
68 // We don't automatically use the cache based on size and type because a
69 // large page that happens to be the same size as a regular page can't
70 // use the cache. Large pages are expected to be zeroed on allocation but
71 // cached pages are dirty.
73 MutexLocker ml(page_cache_mutex);
76 if (page_cache_size > 0) {
77 memory = page_cache[--page_cache_size];
78 }
79 }
80 if (memory == nullptr) {
81 memory = VirtualMemory::AllocateAligned(size, kPageSize, executable,
82 compressed, name);
83 }
84 if (memory == nullptr) {
85 return nullptr; // Out of memory.
86 }
87
88 if ((flags & kNew) != 0) {
89 // Initialized by generated code.
90 MSAN_UNPOISON(memory->address(), size);
91
92#if defined(DEBUG)
93 uword* cursor = reinterpret_cast<uword*>(memory->address());
94 uword* end = reinterpret_cast<uword*>(memory->end());
95 while (cursor < end) {
96 *cursor++ = kAllocationCanary;
97 }
98#endif
99 } else {
100 // We don't zap old-gen because we rely on implicit zero-initialization
101 // of large typed data arrays.
102 }
103
104 Page* result = reinterpret_cast<Page*>(memory->address());
105 ASSERT(result != nullptr);
106 result->flags_ = flags;
107 result->memory_ = memory;
108 result->next_ = nullptr;
109 result->forwarding_page_ = nullptr;
110 result->card_table_ = nullptr;
111 result->progress_bar_ = 0;
112 result->owner_ = nullptr;
113 result->top_ = 0;
114 result->end_ = 0;
115 result->survivor_end_ = 0;
116 result->resolved_top_ = 0;
117 result->live_bytes_ = 0;
118
119 if ((flags & kNew) != 0) {
120 uword top = result->object_start();
121 uword end =
123 result->top_ = top;
124 result->end_ = end;
125 result->survivor_end_ = top;
126 result->resolved_top_ = top;
127 }
128
130
131 return result;
132}
133
134void Page::Deallocate() {
135 if (is_image()) {
136 delete memory_;
137 // For a heap page from a snapshot, the Page object lives in the malloc
138 // heap rather than the page itself.
139 free(this);
140 return;
141 }
142
143 free(card_table_);
144
145 // Load before unregistering with LSAN, or LSAN will temporarily think it has
146 // been leaked.
147 VirtualMemory* memory = memory_;
148
149 LSAN_UNREGISTER_ROOT_REGION(this, sizeof(*this));
150
151 if (CanUseCache(flags_)) {
152 ASSERT(memory->size() == kPageSize);
153 MutexLocker ml(page_cache_mutex);
157 intptr_t size = memory->size();
158#if defined(DEBUG)
159 if ((flags_ & kNew) != 0) {
160 memset(memory->address(), Heap::kZapByte, size);
161 } else {
162 // We don't zap old-gen because we rely on implicit zero-initialization
163 // of large typed data arrays.
164 }
165#endif
166 MSAN_POISON(memory->address(), size);
167 page_cache[page_cache_size++] = memory;
168 memory = nullptr;
169 }
170 }
171 delete memory;
172}
173
174void Page::VisitObjects(ObjectVisitor* visitor) const {
175 ASSERT(Thread::Current()->OwnsGCSafepoint() ||
177 NoSafepointScope no_safepoint;
178 uword obj_addr = object_start();
179 uword end_addr = object_end();
180 while (obj_addr < end_addr) {
181 ObjectPtr raw_obj = UntaggedObject::FromAddr(obj_addr);
182 visitor->VisitObject(raw_obj);
183 obj_addr += raw_obj->untag()->HeapSize();
184 }
185 ASSERT(obj_addr == end_addr);
186}
187
189 uword obj_addr = object_start();
190 uword end_addr = object_end();
191 while (obj_addr < end_addr) {
192 ObjectPtr raw_obj = UntaggedObject::FromAddr(obj_addr);
193 visitor->VisitObject(raw_obj);
194 obj_addr += raw_obj->untag()->HeapSize();
195 }
196}
197
199 ASSERT(Thread::Current()->OwnsGCSafepoint() ||
200 (Thread::Current()->task_kind() == Thread::kCompactorTask) ||
201 (Thread::Current()->task_kind() == Thread::kMarkerTask));
202 NoSafepointScope no_safepoint;
203 uword obj_addr = object_start();
204 uword end_addr = object_end();
205 while (obj_addr < end_addr) {
206 ObjectPtr raw_obj = UntaggedObject::FromAddr(obj_addr);
207 obj_addr += raw_obj->untag()->VisitPointers(visitor);
208 }
209 ASSERT(obj_addr == end_addr);
210}
211
213 bool only_marked) {
214 ASSERT(Thread::Current()->OwnsGCSafepoint() ||
215 (Thread::Current()->task_kind() == Thread::kScavengerTask) ||
217 NoSafepointScope no_safepoint;
218
219 if (card_table_ == nullptr) {
220 return;
221 }
222
223 ArrayPtr obj =
224 static_cast<ArrayPtr>(UntaggedObject::FromAddr(object_start()));
225 ASSERT(obj->IsArray() || obj->IsImmutableArray());
226 ASSERT(obj->untag()->IsCardRemembered());
227 if (only_marked && !obj->untag()->IsMarked()) return;
228 CompressedObjectPtr* obj_from = obj->untag()->from();
229 CompressedObjectPtr* obj_to =
230 obj->untag()->to(Smi::Value(obj->untag()->length()));
231 uword heap_base = obj.heap_base();
232
233 const size_t size_in_bits = card_table_size();
234 const size_t size_in_words =
236 for (;;) {
237 const size_t word_offset = progress_bar_.fetch_add(1);
238 if (word_offset >= size_in_words) break;
239
240 uword cell = card_table_[word_offset];
241 if (cell == 0) continue;
242
243 for (intptr_t bit_offset = 0; bit_offset < kBitsPerWord; bit_offset++) {
244 const uword bit_mask = static_cast<uword>(1) << bit_offset;
245 if ((cell & bit_mask) == 0) continue;
246 const intptr_t i = (word_offset << kBitsPerWordLog2) + bit_offset;
247
248 CompressedObjectPtr* card_from =
249 reinterpret_cast<CompressedObjectPtr*>(this) +
250 (i << kSlotsPerCardLog2);
251 CompressedObjectPtr* card_to =
252 reinterpret_cast<CompressedObjectPtr*>(card_from) +
253 (1 << kSlotsPerCardLog2) - 1;
254 // Minus 1 because to is inclusive.
255
256 if (card_from < obj_from) {
257 // First card overlaps with header.
258 card_from = obj_from;
259 }
260 if (card_to > obj_to) {
261 // Last card(s) may extend past the object. Array truncation can make
262 // this happen for more than one card.
263 card_to = obj_to;
264 }
265
266 bool has_new_target = visitor->PredicateVisitCompressedPointers(
267 heap_base, card_from, card_to);
268
269 if (!has_new_target) {
270 cell ^= bit_mask;
271 }
272 }
273 card_table_[word_offset] = cell;
274 }
275}
276
278 progress_bar_ = 0;
279}
280
281void Page::WriteProtect(bool read_only) {
282 ASSERT(!is_image());
283
285 if (read_only) {
286 if (is_executable()) {
288 } else {
290 }
291 } else {
293 }
294 memory_->Protect(prot);
295}
296
297} // namespace dart
static constexpr uint8_t kZapByte
Definition: heap.h:58
UntaggedObject * untag() const
virtual void VisitObject(ObjectPtr obj)=0
uword end() const
Definition: page.h:106
static intptr_t CachedSize()
Definition: page.cc:51
uword object_start() const
Definition: page.h:109
uword object_end() const
Definition: page.h:118
intptr_t card_table_size() const
Definition: page.h:179
bool is_image() const
Definition: page.h:79
static void Init()
Definition: page.cc:31
void VisitObjects(ObjectVisitor *visitor) const
Definition: page.cc:174
void VisitRememberedCards(PredicateObjectPointerVisitor *visitor, bool only_marked=false)
Definition: page.cc:212
void VisitObjectsUnsafe(ObjectVisitor *visitor) const
Definition: page.cc:188
@ kNew
Definition: page.h:73
@ kLarge
Definition: page.h:70
@ kVMIsolate
Definition: page.h:72
@ kExecutable
Definition: page.h:69
@ kImage
Definition: page.h:71
void WriteProtect(bool read_only)
Definition: page.cc:281
void ResetProgressBar()
Definition: page.cc:277
void VisitObjectPointers(ObjectPointerVisitor *visitor) const
Definition: page.cc:198
static void ClearCache()
Definition: page.cc:36
bool is_executable() const
Definition: page.h:77
static constexpr intptr_t kSlotsPerCardLog2
Definition: page.h:174
static void Cleanup()
Definition: page.cc:45
bool PredicateVisitCompressedPointers(uword heap_base, CompressedObjectPtr *first, CompressedObjectPtr *last)
Definition: visitor.h:100
T fetch_add(T arg, std::memory_order order=std::memory_order_relaxed)
Definition: atomic.h:35
intptr_t Value() const
Definition: object.h:9990
@ kScavengerTask
Definition: thread.h:352
@ kMarkerTask
Definition: thread.h:349
@ kIncrementalCompactorTask
Definition: thread.h:354
@ kCompactorTask
Definition: thread.h:351
static Thread * Current()
Definition: thread.h:362
static ObjectPtr FromAddr(uword addr)
Definition: raw_object.h:516
intptr_t HeapSize() const
Definition: raw_object.h:401
uword heap_base() const
Definition: raw_object.h:590
intptr_t VisitPointers(ObjectPointerVisitor *visitor)
Definition: raw_object.h:447
static constexpr T RoundUp(T x, uintptr_t alignment, uintptr_t offset=0)
Definition: utils.h:120
static VirtualMemory * AllocateAligned(intptr_t size, intptr_t alignment, bool is_executable, bool is_compressed, const char *name)
static void Protect(void *address, intptr_t size, Protection mode)
#define ASSERT(E)
FlutterSemanticsFlag flags
GAsyncResult * result
#define LSAN_UNREGISTER_ROOT_REGION(ptr, len)
#define LSAN_REGISTER_ROOT_REGION(ptr, len)
#define MSAN_POISON(ptr, len)
#define MSAN_UNPOISON(ptr, len)
Definition: dart_vm.cc:33
constexpr intptr_t kBitsPerWordLog2
Definition: globals.h:513
static VirtualMemory * page_cache[kPageCacheCapacity]
Definition: page.cc:28
static constexpr intptr_t kNewObjectAlignmentOffset
const char *const name
static Mutex * page_cache_mutex
Definition: page.cc:27
constexpr intptr_t kBitsPerWord
Definition: globals.h:514
static constexpr intptr_t kAllocationCanary
Definition: globals.h:181
static constexpr intptr_t kPageSize
Definition: page.h:27
uintptr_t uword
Definition: globals.h:501
static bool CanUseCache(uword flags)
Definition: page.cc:56
static intptr_t page_cache_size
Definition: page.cc:29
static constexpr intptr_t kAllocationRedZoneSize
Definition: page.h:41
constexpr intptr_t kWordSize
Definition: globals.h:509
static constexpr intptr_t kPageCacheCapacity
Definition: page.cc:26
NOT_IN_PRODUCT(LibraryPtr ReloadTestScript(const char *script))
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259