Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
virtual_memory_posix.cc
Go to the documentation of this file.
1// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h"
6#if defined(DART_HOST_OS_ANDROID) || defined(DART_HOST_OS_LINUX) || \
7 defined(DART_HOST_OS_MACOS)
8
9#include "vm/virtual_memory.h"
10
11#include <errno.h>
12#include <fcntl.h>
13#include <sys/mman.h>
14#include <sys/stat.h>
15#include <sys/syscall.h>
16#include <unistd.h>
17
18#if defined(DART_HOST_OS_ANDROID) || defined(DART_HOST_OS_LINUX)
19#include <sys/prctl.h>
20#endif
21
22#include "platform/assert.h"
23#include "platform/utils.h"
24#include "vm/heap/pages.h"
25#include "vm/isolate.h"
27
28// #define VIRTUAL_MEMORY_LOGGING 1
29#if defined(VIRTUAL_MEMORY_LOGGING)
30#define LOG_INFO(msg, ...) OS::PrintErr(msg, ##__VA_ARGS__)
31#else
32#define LOG_INFO(msg, ...)
33#endif // defined(VIRTUAL_MEMORY_LOGGING)
34
35namespace dart {
36
37// standard MAP_FAILED causes "error: use of old-style cast" as it
38// defines MAP_FAILED as ((void *) -1)
39#undef MAP_FAILED
40#define MAP_FAILED reinterpret_cast<void*>(-1)
41
42#if defined(DART_HOST_OS_IOS)
43#define LARGE_RESERVATIONS_MAY_FAIL
44#endif
45
46DECLARE_FLAG(bool, write_protect_code);
47
48#if defined(DART_TARGET_OS_LINUX)
49DECLARE_FLAG(bool, generate_perf_events_symbols);
50DECLARE_FLAG(bool, generate_perf_jitdump);
51#endif
52
53uword VirtualMemory::page_size_ = 0;
54VirtualMemory* VirtualMemory::compressed_heap_ = nullptr;
55
56static void* Map(void* addr,
57 size_t length,
58 int prot,
59 int flags,
60 int fd,
61 off_t offset) {
62 void* result = mmap(addr, length, prot, flags, fd, offset);
63 int error = errno;
64 LOG_INFO("mmap(%p, 0x%" Px ", %u, ...): %p\n", addr, length, prot, result);
65 if ((result == MAP_FAILED) && (error != ENOMEM)) {
66 const int kBufferSize = 1024;
67 char error_buf[kBufferSize];
68 FATAL("mmap failed: %d (%s)", error,
69 Utils::StrError(error, error_buf, kBufferSize));
70 }
71 return result;
72}
73
74static void Unmap(uword start, uword end) {
75 ASSERT(start <= end);
76 uword size = end - start;
77 if (size == 0) {
78 return;
79 }
80
81 if (munmap(reinterpret_cast<void*>(start), size) != 0) {
82 int error = errno;
83 const int kBufferSize = 1024;
84 char error_buf[kBufferSize];
85 FATAL("munmap failed: %d (%s)", error,
86 Utils::StrError(error, error_buf, kBufferSize));
87 }
88}
89
90static void* GenericMapAligned(void* hint,
91 int prot,
92 intptr_t size,
93 intptr_t alignment,
94 intptr_t allocated_size,
95 int map_flags) {
96 void* address = Map(hint, allocated_size, prot, map_flags, -1, 0);
97 if (address == MAP_FAILED) {
98 return nullptr;
99 }
100
101 const uword base = reinterpret_cast<uword>(address);
102 const uword aligned_base = Utils::RoundUp(base, alignment);
103
104 Unmap(base, aligned_base);
105 Unmap(aligned_base + size, base + allocated_size);
106 return reinterpret_cast<void*>(aligned_base);
107}
108
109intptr_t VirtualMemory::CalculatePageSize() {
110 const intptr_t page_size = getpagesize();
111 ASSERT(page_size != 0);
112 ASSERT(Utils::IsPowerOfTwo(page_size));
113 return page_size;
114}
115
116#if defined(DART_COMPRESSED_POINTERS) && defined(LARGE_RESERVATIONS_MAY_FAIL)
117// Truncate to the largest subregion in [region] that doesn't cross an
118// [alignment] boundary.
119static MemoryRegion ClipToAlignedRegion(MemoryRegion region, size_t alignment) {
120 uword base = region.start();
121 uword aligned_base = Utils::RoundUp(base, alignment);
122 uword size_below =
123 region.end() >= aligned_base ? aligned_base - base : region.size();
124 uword size_above =
125 region.end() >= aligned_base ? region.end() - aligned_base : 0;
126 ASSERT(size_below + size_above == region.size());
127 if (size_below >= size_above) {
128 Unmap(aligned_base, aligned_base + size_above);
129 return MemoryRegion(reinterpret_cast<void*>(base), size_below);
130 }
131 Unmap(base, base + size_below);
132 if (size_above > alignment) {
133 Unmap(aligned_base + alignment, aligned_base + size_above);
134 size_above = alignment;
135 }
136 return MemoryRegion(reinterpret_cast<void*>(aligned_base), size_above);
137}
138#endif // LARGE_RESERVATIONS_MAY_FAIL
139
140void VirtualMemory::Init() {
141 if (FLAG_old_gen_heap_size < 0 || FLAG_old_gen_heap_size > kMaxAddrSpaceMB) {
143 "warning: value specified for --old_gen_heap_size %d is larger than"
144 " the physically addressable range, using 0(unlimited) instead.`\n",
145 FLAG_old_gen_heap_size);
146 FLAG_old_gen_heap_size = 0;
147 }
148 if (FLAG_new_gen_semi_max_size < 0 ||
149 FLAG_new_gen_semi_max_size > kMaxAddrSpaceMB) {
151 "warning: value specified for --new_gen_semi_max_size %d is larger"
152 " than the physically addressable range, using %" Pd " instead.`\n",
153 FLAG_new_gen_semi_max_size, kDefaultNewGenSemiMaxSize);
154 FLAG_new_gen_semi_max_size = kDefaultNewGenSemiMaxSize;
155 }
156 page_size_ = CalculatePageSize();
157#if defined(DART_COMPRESSED_POINTERS)
158 ASSERT(compressed_heap_ == nullptr);
159#if defined(LARGE_RESERVATIONS_MAY_FAIL)
160 // Try to reserve a region for the compressed heap by requesting decreasing
161 // powers-of-two until one succeeds, and use the largest subregion that does
162 // not cross a 4GB boundary. The subregion itself is not necessarily
163 // 4GB-aligned.
164 for (size_t allocated_size = kCompressedHeapSize + kCompressedHeapAlignment;
165 allocated_size >= kCompressedPageSize; allocated_size >>= 1) {
166 void* address = GenericMapAligned(
167 nullptr, PROT_NONE, allocated_size, kCompressedPageSize,
168 allocated_size + kCompressedPageSize,
169 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE);
170 if (address == nullptr) continue;
171
172 MemoryRegion region(address, allocated_size);
173 region = ClipToAlignedRegion(region, kCompressedHeapAlignment);
174 compressed_heap_ = new VirtualMemory(region, region);
175 break;
176 }
177#else
178 compressed_heap_ = Reserve(kCompressedHeapSize, kCompressedHeapAlignment);
179#endif
180 if (compressed_heap_ == nullptr) {
181 int error = errno;
182 const int kBufferSize = 1024;
183 char error_buf[kBufferSize];
184 FATAL("Failed to reserve region for compressed heap: %d (%s)", error,
185 Utils::StrError(error, error_buf, kBufferSize));
186 }
187 VirtualMemoryCompressedHeap::Init(compressed_heap_->address(),
188 compressed_heap_->size());
189#endif // defined(DART_COMPRESSED_POINTERS)
190
191#if defined(DART_HOST_OS_LINUX) || defined(DART_HOST_OS_ANDROID)
192 FILE* fp = fopen("/proc/sys/vm/max_map_count", "r");
193 if (fp != nullptr) {
194 size_t max_map_count = 0;
195 int count = fscanf(fp, "%zu", &max_map_count);
196 fclose(fp);
197 if (count == 1) {
198 size_t max_heap_pages = FLAG_old_gen_heap_size * MB / kPageSize;
199 if (max_map_count < max_heap_pages) {
201 "warning: vm.max_map_count (%zu) is not large enough to support "
202 "--old_gen_heap_size=%d. Consider increasing it with `sysctl -w "
203 "vm.max_map_count=%zu`\n",
204 max_map_count, FLAG_old_gen_heap_size, max_heap_pages);
205 }
206 }
207 }
208#endif
209}
210
212#if defined(DART_COMPRESSED_POINTERS)
213 delete compressed_heap_;
214#endif // defined(DART_COMPRESSED_POINTERS)
215 page_size_ = 0;
216#if defined(DART_COMPRESSED_POINTERS)
217 compressed_heap_ = nullptr;
218 VirtualMemoryCompressedHeap::Cleanup();
219#endif // defined(DART_COMPRESSED_POINTERS)
220}
221
222VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
223 intptr_t alignment,
224 bool is_executable,
225 bool is_compressed,
226 const char* name) {
227 // When FLAG_write_protect_code is active, code memory (indicated by
228 // is_executable = true) is allocated as non-executable and later
229 // changed to executable via VirtualMemory::Protect.
231 ASSERT(Utils::IsPowerOfTwo(alignment));
232 ASSERT(Utils::IsAligned(alignment, PageSize()));
233 ASSERT(name != nullptr);
234
235#if defined(DART_COMPRESSED_POINTERS)
236 if (is_compressed) {
237 RELEASE_ASSERT(!is_executable);
238 MemoryRegion region =
239 VirtualMemoryCompressedHeap::Allocate(size, alignment);
240 if (region.pointer() == nullptr) {
241#if defined(LARGE_RESERVATIONS_MAY_FAIL)
242 // Try a fresh allocation and hope it ends up in the right region. On
243 // macOS/iOS, this works surprisingly often.
244 void* address =
245 GenericMapAligned(nullptr, PROT_READ | PROT_WRITE, size, alignment,
246 size + alignment, MAP_PRIVATE | MAP_ANONYMOUS);
247 if (address != nullptr) {
248 uword ok_start = Utils::RoundDown(compressed_heap_->start(),
249 kCompressedHeapAlignment);
250 uword ok_end = ok_start + kCompressedHeapSize;
251 uword start = reinterpret_cast<uword>(address);
252 uword end = start + size;
253 if ((start >= ok_start) && (end <= ok_end)) {
254 MemoryRegion region(address, size);
255 return new VirtualMemory(region, region);
256 }
257 munmap(address, size);
258 }
259#endif
260 return nullptr;
261 }
262 Commit(region.pointer(), region.size());
263 return new VirtualMemory(region, region);
264 }
265#endif // defined(DART_COMPRESSED_POINTERS)
266
267 const intptr_t allocated_size = size + alignment - PageSize();
268 const int prot =
269 PROT_READ | PROT_WRITE |
270 ((is_executable && !FLAG_write_protect_code) ? PROT_EXEC : 0);
271
272 int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
273#if (defined(DART_HOST_OS_MACOS) && !defined(DART_HOST_OS_IOS))
274 if (is_executable && IsAtLeastOS10_14()) {
275 map_flags |= MAP_JIT;
276 }
277#endif // defined(DART_HOST_OS_MACOS)
278
279 void* hint = nullptr;
280 // Some 64-bit microarchitectures store only the low 32-bits of targets as
281 // part of indirect branch prediction, predicting that the target's upper bits
282 // will be same as the call instruction's address. This leads to misprediction
283 // for indirect calls crossing a 4GB boundary. We ask mmap to place our
284 // generated code near the VM binary to avoid this.
285 if (is_executable) {
286 hint = reinterpret_cast<void*>(&Dart_Initialize);
287 }
288 void* address =
289 GenericMapAligned(hint, prot, size, alignment, allocated_size, map_flags);
290#if defined(DART_HOST_OS_LINUX)
291 // On WSL 1 trying to allocate memory close to the binary by supplying a hint
292 // fails with ENOMEM for unclear reason. Some reports suggest that this might
293 // be related to the alignment of the hint but aligning it by 64Kb does not
294 // make the issue go away in our experiments. Instead just retry without any
295 // hint.
296 if (address == nullptr && hint != nullptr &&
297 Utils::IsWindowsSubsystemForLinux()) {
298 address = GenericMapAligned(nullptr, prot, size, alignment, allocated_size,
299 map_flags);
300 }
301#endif
302 if (address == nullptr) {
303 return nullptr;
304 }
305
306#if defined(DART_HOST_OS_ANDROID) || defined(DART_HOST_OS_LINUX)
307 // PR_SET_VMA was only added to mainline Linux in 5.17, and some versions of
308 // the Android NDK have incorrect headers, so we manually define it if absent.
309#if !defined(PR_SET_VMA)
310#define PR_SET_VMA 0x53564d41
311#endif
312#if !defined(PR_SET_VMA_ANON_NAME)
313#define PR_SET_VMA_ANON_NAME 0
314#endif
315 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, address, size, name);
316#endif
317
318 MemoryRegion region(reinterpret_cast<void*>(address), size);
319 return new VirtualMemory(region, region);
320}
321
322VirtualMemory* VirtualMemory::Reserve(intptr_t size, intptr_t alignment) {
324 ASSERT(Utils::IsPowerOfTwo(alignment));
325 ASSERT(Utils::IsAligned(alignment, PageSize()));
326 intptr_t allocated_size = size + alignment - PageSize();
327 void* address =
328 GenericMapAligned(nullptr, PROT_NONE, size, alignment, allocated_size,
329 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE);
330 if (address == nullptr) {
331 return nullptr;
332 }
333 MemoryRegion region(address, size);
334 return new VirtualMemory(region, region);
335}
336
337void VirtualMemory::Commit(void* address, intptr_t size) {
340 void* result = mmap(address, size, PROT_READ | PROT_WRITE,
341 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
342 if (result == MAP_FAILED) {
343 int error = errno;
344 const int kBufferSize = 1024;
345 char error_buf[kBufferSize];
346 FATAL("Failed to commit: %d (%s)", error,
347 Utils::StrError(error, error_buf, kBufferSize));
348 }
349}
350
351void VirtualMemory::Decommit(void* address, intptr_t size) {
354 void* result =
355 mmap(address, size, PROT_NONE,
356 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, -1, 0);
357 if (result == MAP_FAILED) {
358 int error = errno;
359 const int kBufferSize = 1024;
360 char error_buf[kBufferSize];
361 FATAL("Failed to decommit: %d (%s)", error,
362 Utils::StrError(error, error_buf, kBufferSize));
363 }
364}
365
367#if defined(DART_COMPRESSED_POINTERS)
368 if (VirtualMemoryCompressedHeap::Contains(reserved_.pointer()) &&
369 (this != compressed_heap_)) {
370 Decommit(reserved_.pointer(), reserved_.size());
371 VirtualMemoryCompressedHeap::Free(reserved_.pointer(), reserved_.size());
372 return;
373 }
374#endif // defined(DART_COMPRESSED_POINTERS)
375 if (vm_owns_region()) {
376 Unmap(reserved_.start(), reserved_.end());
377 }
378}
379
380bool VirtualMemory::FreeSubSegment(void* address, intptr_t size) {
381#if defined(DART_COMPRESSED_POINTERS)
382 // Don't free the sub segment if it's managed by the compressed pointer heap.
383 if (VirtualMemoryCompressedHeap::Contains(address)) {
384 return false;
385 }
386#endif // defined(DART_COMPRESSED_POINTERS)
387 const uword start = reinterpret_cast<uword>(address);
388 Unmap(start, start + size);
389 return true;
390}
391
392void VirtualMemory::Protect(void* address, intptr_t size, Protection mode) {
393#if defined(DEBUG)
394 Thread* thread = Thread::Current();
395 ASSERT(thread == nullptr || thread->IsDartMutatorThread() ||
396 thread->isolate() == nullptr ||
397 thread->isolate()->mutator_thread()->IsAtSafepoint());
398#endif
399 uword start_address = reinterpret_cast<uword>(address);
400 uword end_address = start_address + size;
401 uword page_address = Utils::RoundDown(start_address, PageSize());
402 int prot = 0;
403 switch (mode) {
404 case kNoAccess:
405 prot = PROT_NONE;
406 break;
407 case kReadOnly:
408 prot = PROT_READ;
409 break;
410 case kReadWrite:
411 prot = PROT_READ | PROT_WRITE;
412 break;
413 case kReadExecute:
414 prot = PROT_READ | PROT_EXEC;
415 break;
417 prot = PROT_READ | PROT_WRITE | PROT_EXEC;
418 break;
419 }
420 if (mprotect(reinterpret_cast<void*>(page_address),
421 end_address - page_address, prot) != 0) {
422 int error = errno;
423 const int kBufferSize = 1024;
424 char error_buf[kBufferSize];
425 LOG_INFO("mprotect(0x%" Px ", 0x%" Px ", %u) failed\n", page_address,
426 end_address - page_address, prot);
427 FATAL("mprotect failed: %d (%s)", error,
428 Utils::StrError(error, error_buf, kBufferSize));
429 }
430 LOG_INFO("mprotect(0x%" Px ", 0x%" Px ", %u) ok\n", page_address,
431 end_address - page_address, prot);
432}
433
434void VirtualMemory::DontNeed(void* address, intptr_t size) {
435 uword start_address = reinterpret_cast<uword>(address);
436 uword end_address = start_address + size;
437 uword page_address = Utils::RoundDown(start_address, PageSize());
438#if defined(DART_HOST_OS_MACOS)
439 int advice = MADV_FREE;
440#else
441 int advice = MADV_DONTNEED;
442#endif
443 if (madvise(reinterpret_cast<void*>(page_address), end_address - page_address,
444 advice) != 0) {
445 int error = errno;
446 const int kBufferSize = 1024;
447 char error_buf[kBufferSize];
448 FATAL("madvise failed: %d (%s)", error,
449 Utils::StrError(error, error_buf, kBufferSize));
450 }
451}
452
453} // namespace dart
454
455#endif // defined(DART_HOST_OS_ANDROID) || defined(DART_HOST_OS_LINUX) || \
456 // defined(DART_HOST_OS_MACOS)
int count
#define LOG_INFO(...)
static const size_t kBufferSize
Definition SkString.cpp:27
#define RELEASE_ASSERT(cond)
Definition assert.h:327
void * pointer() const
uword end() const
uword start() const
uword size() const
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
static Thread * Current()
Definition thread.h:361
static constexpr T RoundUp(T x, uintptr_t alignment, uintptr_t offset=0)
Definition utils.h:105
static char * StrError(int err, char *buffer, size_t bufsize)
static constexpr T RoundDown(T x, intptr_t alignment)
Definition utils.h:93
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition utils.h:77
static constexpr bool IsPowerOfTwo(T x)
Definition utils.h:61
static void Init()
static VirtualMemory * AllocateAligned(intptr_t size, intptr_t alignment, bool is_executable, bool is_compressed, const char *name)
static void Protect(void *address, intptr_t size, Protection mode)
static intptr_t PageSize()
bool vm_owns_region() const
intptr_t size() const
static void DontNeed(void *address, intptr_t size)
static void Cleanup()
void * address() const
uword start() const
#define ASSERT(E)
#define FATAL(error)
FlutterSemanticsFlag flags
glong glong end
const uint8_t uint32_t uint32_t GError ** error
GAsyncResult * result
#define DECLARE_FLAG(type, name)
Definition flags.h:14
size_t length
ClipOpAndAA opAA SkRegion region
Definition SkRecords.h:238
constexpr intptr_t MB
Definition globals.h:530
const char *const name
static constexpr intptr_t kPageSize
Definition page.h:27
const intptr_t kDefaultNewGenSemiMaxSize
Definition globals.h:63
DART_EXPORT char * Dart_Initialize(Dart_InitializeParams *params)
uintptr_t uword
Definition globals.h:501
const uint32_t fp
const intptr_t kMaxAddrSpaceMB
Definition globals.h:49
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
#define Px
Definition globals.h:410
#define Pd
Definition globals.h:408
Point offset