Flutter Engine
The Flutter Engine
zone.cc
Go to the documentation of this file.
1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/zone.h"
6
7#include "platform/assert.h"
9#include "platform/utils.h"
10#include "vm/dart_api_state.h"
11#include "vm/flags.h"
12#include "vm/handles_impl.h"
13#include "vm/heap/heap.h"
14#include "vm/os.h"
15#include "vm/virtual_memory.h"
16
17namespace dart {
18
19RelaxedAtomic<intptr_t> Zone::total_size_ = {0};
20
21// Zone segments represent chunks of memory: They have starting
22// address encoded in the this pointer and a size in bytes. They are
23// chained together to form the backing storage for an expanding zone.
25 public:
26 Segment* next() const { return next_; }
27 intptr_t size() const { return size_; }
28 VirtualMemory* memory() const { return memory_; }
29
30 uword start() { return address(sizeof(Segment)); }
31 uword end() { return address(size_); }
32
33 // Allocate or delete individual segments.
34 static Segment* New(intptr_t size, Segment* next);
35 static void DeleteSegmentList(Segment* segment);
36
37 private:
38 Segment* next_;
39 intptr_t size_;
40 VirtualMemory* memory_;
41 void* alignment_;
42
43 // Computes the address of the nth byte in this segment.
44 uword address(intptr_t n) { return reinterpret_cast<uword>(this) + n; }
45
46 DISALLOW_IMPLICIT_CONSTRUCTORS(Segment);
47};
48
49// tcmalloc and jemalloc have both been observed to hold onto lots of free'd
50// zone segments (jemalloc to the point of causing OOM), so instead of using
51// malloc to allocate segments, we allocate directly from mmap/zx_vmo_create/
52// VirtualAlloc, and cache a small number of the normal sized segments.
53static constexpr intptr_t kSegmentCacheCapacity = 16; // 1 MB of Segments
54static Mutex* segment_cache_mutex = nullptr;
56static intptr_t segment_cache_size = 0;
57
58void Zone::Init() {
59 ASSERT(segment_cache_mutex == nullptr);
60 segment_cache_mutex = new Mutex(NOT_IN_PRODUCT("segment_cache_mutex"));
61}
62
64 ClearCache();
66 segment_cache_mutex = nullptr;
67}
68
73 while (segment_cache_size > 0) {
75 }
76}
77
80 VirtualMemory* memory = nullptr;
81 if (size == kSegmentSize) {
85 if (segment_cache_size > 0) {
87 }
88 }
89 if (memory == nullptr) {
90 bool executable = false;
91 bool compressed = false;
92 memory = VirtualMemory::Allocate(size, executable, compressed, "dart-zone");
93 total_size_.fetch_add(size);
94 }
95 if (memory == nullptr) {
97 }
98 Segment* result = reinterpret_cast<Segment*>(memory->start());
99#ifdef DEBUG
100 // Zap the entire allocated segment (including the header).
101 ASAN_UNPOISON(reinterpret_cast<void*>(result), size);
102 memset(reinterpret_cast<void*>(result), kZapUninitializedByte, size);
103#endif
104 result->next_ = next;
105 result->size_ = size;
106 result->memory_ = memory;
107 result->alignment_ = nullptr; // Avoid unused variable warnings.
108
110
111 return result;
112}
113
115 Segment* current = head;
116 while (current != nullptr) {
117 intptr_t size = current->size();
118 Segment* next = current->next();
119 VirtualMemory* memory = current->memory();
120#ifdef DEBUG
121 // Zap the entire current segment (including the header).
122 ASAN_UNPOISON(reinterpret_cast<void*>(current), current->size());
123 memset(reinterpret_cast<void*>(current), kZapDeletedByte, current->size());
124#endif
125 LSAN_UNREGISTER_ROOT_REGION(current, sizeof(*current));
126
127 if (size == kSegmentSize) {
133 memory = nullptr;
134 }
135 }
136 if (memory != nullptr) {
137 total_size_.fetch_sub(size);
138 delete memory;
139 }
140 current = next;
141 }
142}
143
145 : position_(reinterpret_cast<uword>(&buffer_)),
146 limit_(position_ + kInitialChunkSize),
147 segments_(nullptr),
148 previous_(nullptr),
149 handles_() {
151#ifdef DEBUG
152 // Zap the entire initial buffer.
153 memset(&buffer_, kZapUninitializedByte, kInitialChunkSize);
154#endif
155}
156
157Zone::~Zone() {
158 if (FLAG_trace_zones) {
159 Print();
160 }
162}
163
164void Zone::Reset() {
165 // Traverse the chained list of segments, zapping (in debug mode)
166 // and freeing every zone segment.
168 segments_ = nullptr;
169
170#ifdef DEBUG
171 ASAN_UNPOISON(&buffer_, kInitialChunkSize);
172 memset(&buffer_, kZapDeletedByte, kInitialChunkSize);
173#endif
174 position_ = reinterpret_cast<uword>(&buffer_);
175 limit_ = position_ + kInitialChunkSize;
176 size_ = 0;
177 small_segment_capacity_ = 0;
178 previous_ = nullptr;
179 handles_.Reset();
180}
181
182uintptr_t Zone::SizeInBytes() const {
183 return size_;
184}
185
186uintptr_t Zone::CapacityInBytes() const {
187 uintptr_t size = kInitialChunkSize;
188 for (Segment* s = segments_; s != nullptr; s = s->next()) {
189 size += s->size();
190 }
191 return size;
192}
193
194void Zone::Print() const {
195 intptr_t segment_size = CapacityInBytes();
196 intptr_t scoped_handle_size = handles_.ScopedHandlesCapacityInBytes();
197 intptr_t zone_handle_size = handles_.ZoneHandlesCapacityInBytes();
198 intptr_t total_size = segment_size + scoped_handle_size + zone_handle_size;
199 OS::PrintErr("Zone(%p, segments: %" Pd ", scoped_handles: %" Pd
200 ", zone_handles: %" Pd ", total: %" Pd ")\n",
201 this, segment_size, scoped_handle_size, zone_handle_size,
202 total_size);
203}
204
205uword Zone::AllocateExpand(intptr_t size) {
206 ASSERT(size >= 0);
207 if (FLAG_trace_zones) {
208 OS::PrintErr("*** Expanding zone 0x%" Px "\n",
209 reinterpret_cast<intptr_t>(this));
210 Print();
211 }
212 // Make sure the requested size is already properly aligned and that
213 // there isn't enough room in the Zone to satisfy the request.
215 intptr_t free_size = (limit_ - position_);
216 ASSERT(free_size < size);
217
218 // First check to see if we should just chain it as a large segment.
219 intptr_t max_size =
220 Utils::RoundDown(kSegmentSize - sizeof(Segment), kAlignment);
221 ASSERT(max_size > 0);
222 if (size > max_size) {
223 return AllocateLargeSegment(size);
224 }
225
226 const intptr_t kSuperPageSize = 2 * MB;
227 intptr_t next_size;
228 if (small_segment_capacity_ < kSuperPageSize) {
229 // When the Zone is small, grow linearly to reduce size and use the segment
230 // cache to avoid expensive mmap calls.
231 next_size = kSegmentSize;
232 } else {
233 // When the Zone is large, grow geometrically to avoid Page Table Entry
234 // exhaustion. Using 1.125 ratio.
235 next_size = Utils::RoundUp(small_segment_capacity_ >> 3, kSuperPageSize);
236 }
237 ASSERT(next_size >= kSegmentSize);
238
239 // Allocate another segment and chain it up.
240 segments_ = Segment::New(next_size, segments_);
241 small_segment_capacity_ += next_size;
242
243 // Recompute 'position' and 'limit' based on the new head segment.
244 uword result = Utils::RoundUp(segments_->start(), kAlignment);
245 position_ = result + size;
246 limit_ = segments_->end();
247 size_ += size;
248 ASSERT(position_ <= limit_);
249 return result;
250}
251
252uword Zone::AllocateLargeSegment(intptr_t size) {
253 ASSERT(size >= 0);
254 // Make sure the requested size is already properly aligned and that
255 // there isn't enough room in the Zone to satisfy the request.
257 intptr_t free_size = (limit_ - position_);
258 ASSERT(free_size < size);
259
260 // Create a new large segment and chain it up.
261 // Account for book keeping fields in size.
262 size_ += size;
263 size += Utils::RoundUp(sizeof(Segment), kAlignment);
264 segments_ = Segment::New(size, segments_);
265
266 uword result = Utils::RoundUp(segments_->start(), kAlignment);
267 return result;
268}
269
270char* Zone::MakeCopyOfString(const char* str) {
271 intptr_t len = strlen(str) + 1; // '\0'-terminated.
272 char* copy = Alloc<char>(len);
273 strncpy(copy, str, len);
274 return copy;
275}
276
277char* Zone::MakeCopyOfStringN(const char* str, intptr_t len) {
278 ASSERT(len >= 0);
279 for (intptr_t i = 0; i < len; i++) {
280 if (str[i] == '\0') {
281 len = i;
282 break;
283 }
284 }
285 char* copy = Alloc<char>(len + 1); // +1 for '\0'
286 strncpy(copy, str, len);
287 copy[len] = '\0';
288 return copy;
289}
290
291char* Zone::ConcatStrings(const char* a, const char* b, char join) {
292 intptr_t a_len = (a == nullptr) ? 0 : strlen(a);
293 const intptr_t b_len = strlen(b) + 1; // '\0'-terminated.
294 const intptr_t len = a_len + b_len;
295 char* copy = Alloc<char>(len);
296 if (a_len > 0) {
297 strncpy(copy, a, a_len);
298 // Insert join character.
299 copy[a_len++] = join;
300 }
301 strncpy(&copy[a_len], b, b_len);
302 return copy;
303}
304
306 Zone* zone = this;
307 while (zone != nullptr) {
308 zone->handles()->VisitObjectPointers(visitor);
309 zone = zone->previous_;
310 }
311}
312
313char* Zone::PrintToString(const char* format, ...) {
314 va_list args;
316 char* buffer = OS::VSCreate(this, format, args);
317 va_end(args);
318 return buffer;
319}
320
321char* Zone::VPrint(const char* format, va_list args) {
322 return OS::VSCreate(this, format, args);
323}
324
326#if defined(DART_USE_ABSL)
327 // DART_USE_ABSL encodes the use of fibers in the Dart VM for threading.
328 : StackResource(thread), zone_(new Zone()) {
329#else
330 : StackResource(thread), zone_() {
331#endif // defined(DART_USE_ABSL)
332 if (FLAG_trace_zones) {
333 OS::PrintErr("*** Starting a new Stack zone 0x%" Px "(0x%" Px ")\n",
334 reinterpret_cast<intptr_t>(this),
335 reinterpret_cast<intptr_t>(GetZone()));
336 }
337
338 // This thread must be preventing safepoints or the GC could be visiting the
339 // chain of handle blocks we're about the mutate.
340 ASSERT(Thread::Current()->MayAllocateHandles());
341
342 Zone* lzone = GetZone();
343 lzone->Link(thread->zone());
344 thread->set_zone(lzone);
345}
346
348 // This thread must be preventing safepoints or the GC could be visiting the
349 // chain of handle blocks we're about the mutate.
350 ASSERT(Thread::Current()->MayAllocateHandles());
351
352 Zone* lzone = GetZone();
353 ASSERT(thread()->zone() == lzone);
354 thread()->set_zone(lzone->previous_);
355 if (FLAG_trace_zones) {
356 OS::PrintErr("*** Deleting Stack zone 0x%" Px "(0x%" Px ")\n",
357 reinterpret_cast<intptr_t>(this),
358 reinterpret_cast<intptr_t>(lzone));
359 }
360
361#if defined(DART_USE_ABSL)
362 // DART_USE_ABSL encodes the use of fibers in the Dart VM for threading.
363 delete zone_;
364#endif // defined(DART_USE_ABSL)
365}
366
367} // namespace dart
static float next(float f)
static size_t total_size(SkSBlockAllocator< N > &pool)
static void copy(void *dst, const uint8_t *src, int width, int bpp, int deltaSrc, int offset, const SkPMColor ctable[])
Definition: SkSwizzler.cpp:31
#define ASAN_UNPOISON(ptr, len)
#define OUT_OF_MEMORY()
Definition: assert.h:250
intptr_t ZoneHandlesCapacityInBytes() const
Definition: handles.h:110
intptr_t ScopedHandlesCapacityInBytes() const
Definition: handles.h:119
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
static char static char * VSCreate(Zone *zone, const char *format, va_list args)
T fetch_sub(T arg, std::memory_order order=std::memory_order_relaxed)
Definition: atomic.h:38
StackZone(ThreadState *thread)
Definition: zone.cc:325
virtual ~StackZone()
Definition: zone.cc:347
Zone * zone() const
Definition: thread_state.h:37
static Thread * Current()
Definition: thread.h:362
static constexpr T RoundUp(T x, uintptr_t alignment, uintptr_t offset=0)
Definition: utils.h:120
static constexpr T RoundDown(T x, intptr_t alignment)
Definition: utils.h:108
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition: utils.h:92
void VisitObjectPointers(ObjectPointerVisitor *visitor)
Definition: handles.cc:20
static intptr_t PageSize()
static VirtualMemory * Allocate(intptr_t size, bool is_executable, bool is_compressed, const char *name)
uword start() const
intptr_t size() const
Definition: zone.cc:27
static void DeleteSegmentList(Segment *segment)
Definition: zone.cc:114
Segment * next() const
Definition: zone.cc:26
static Segment * New(intptr_t size, Segment *next)
Definition: zone.cc:78
uword start()
Definition: zone.cc:30
uword end()
Definition: zone.cc:31
VirtualMemory * memory() const
Definition: zone.cc:28
char * PrintToString(const char *format,...) PRINTF_ATTRIBUTE(2
Definition: zone.cc:313
static void ClearCache()
Definition: zone.cc:69
static void Init()
Definition: zone.cc:58
uintptr_t CapacityInBytes() const
Definition: zone.cc:186
uintptr_t SizeInBytes() const
Definition: zone.cc:182
char * MakeCopyOfStringN(const char *str, intptr_t len)
Definition: zone.cc:277
static constexpr intptr_t kAlignment
Definition: zone.h:88
char * MakeCopyOfString(const char *str)
Definition: zone.cc:270
static void Cleanup()
Definition: zone.cc:63
char * ConcatStrings(const char *a, const char *b, char join=',')
Definition: zone.cc:291
void Print() const
Definition: zone.cc:194
VMHandles * handles()
Definition: zone.h:73
void VisitObjectPointers(ObjectPointerVisitor *visitor)
Definition: zone.cc:305
char char * VPrint(const char *format, va_list args)
Definition: zone.cc:321
#define ASSERT(E)
static bool b
struct MyStruct s
struct MyStruct a[10]
G_BEGIN_DECLS G_MODULE_EXPORT FlValue * args
GAsyncResult * result
uint32_t uint32_t * format
#define LSAN_UNREGISTER_ROOT_REGION(ptr, len)
#define LSAN_REGISTER_ROOT_REGION(ptr, len)
Definition: copy.py:1
va_start(args, format)
va_end(args)
Definition: dart_vm.cc:33
constexpr intptr_t MB
Definition: globals.h:530
uintptr_t uword
Definition: globals.h:501
static intptr_t segment_cache_size
Definition: zone.cc:56
static VirtualMemory * segment_cache[kSegmentCacheCapacity]
Definition: zone.cc:55
static constexpr intptr_t kSegmentCacheCapacity
Definition: zone.cc:53
static Mutex * segment_cache_mutex
Definition: zone.cc:54
NOT_IN_PRODUCT(LibraryPtr ReloadTestScript(const char *script))
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
#define Px
Definition: globals.h:410
#define Pd
Definition: globals.h:408
static SkString join(const CommandLineFlags::StringArray &)
Definition: skpbench.cpp:741