Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
virtual_memory_fuchsia.cc
Go to the documentation of this file.
1// Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/globals.h"
6#if defined(DART_HOST_OS_FUCHSIA)
7
8#include "vm/virtual_memory.h"
9
10#include <zircon/process.h>
11#include <zircon/status.h>
12#include <zircon/syscalls.h>
13
14#include "platform/assert.h"
15#include "vm/allocation.h"
16#include "vm/growable_array.h"
17#include "vm/isolate.h"
18#include "vm/lockers.h"
19#include "vm/memory_region.h"
20#include "vm/os.h"
21#include "vm/os_thread.h"
23
24// #define VIRTUAL_MEMORY_LOGGING 1
25#if defined(VIRTUAL_MEMORY_LOGGING)
26#define LOG_ERR(msg, ...) \
27 OS::PrintErr("VMVM: %s:%d: " msg, __FILE__, __LINE__, ##__VA_ARGS__)
28#define LOG_INFO(msg, ...) \
29 OS::PrintErr("VMVM: %s:%d: " msg, __FILE__, __LINE__, ##__VA_ARGS__)
30#else
31#define LOG_ERR(msg, ...)
32#define LOG_INFO(msg, ...)
33#endif // defined(VIRTUAL_MEMORY_LOGGING)
34
35namespace dart {
36
37DECLARE_FLAG(bool, write_protect_code);
38
39uword VirtualMemory::page_size_ = 0;
40
41#if defined(DART_COMPRESSED_POINTERS)
42static zx_handle_t compressed_heap_vmar_ = ZX_HANDLE_INVALID;
43static uword compressed_heap_base_ = 0;
44#endif // defined(DART_COMPRESSED_POINTERS)
45static zx_handle_t vmex_resource_ = ZX_HANDLE_INVALID;
46
47intptr_t VirtualMemory::CalculatePageSize() {
48 const intptr_t page_size = getpagesize();
49 ASSERT(page_size != 0);
50 ASSERT(Utils::IsPowerOfTwo(page_size));
51 return page_size;
52}
53
54void VirtualMemory::Init(zx_handle_t vmex_resource) {
55 if (FLAG_old_gen_heap_size < 0 || FLAG_old_gen_heap_size > kMaxAddrSpaceMB) {
57 "warning: value specified for --old_gen_heap_size %d is larger than"
58 " the physically addressable range, using 0(unlimited) instead.`\n",
59 FLAG_old_gen_heap_size);
60 FLAG_old_gen_heap_size = 0;
61 }
62 if (FLAG_new_gen_semi_max_size < 0 ||
63 FLAG_new_gen_semi_max_size > kMaxAddrSpaceMB) {
65 "warning: value specified for --new_gen_semi_max_size %d is larger"
66 " than the physically addressable range, using %" Pd " instead.`\n",
67 FLAG_new_gen_semi_max_size, kDefaultNewGenSemiMaxSize);
68 FLAG_new_gen_semi_max_size = kDefaultNewGenSemiMaxSize;
69 }
70
71#if defined(DART_COMPRESSED_POINTERS)
72 if (compressed_heap_vmar_ == ZX_HANDLE_INVALID) {
73 const zx_vm_option_t align_flag =
74 Utils::ShiftForPowerOfTwo(kCompressedHeapAlignment) << ZX_VM_ALIGN_BASE;
75 const zx_vm_option_t options = ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE |
76 ZX_VM_CAN_MAP_SPECIFIC | align_flag;
77 zx_vaddr_t region;
78 zx_status_t status =
79 zx_vmar_allocate(zx_vmar_root_self(), options, 0, kCompressedHeapSize,
80 &compressed_heap_vmar_, &region);
81 if (status != ZX_OK) {
82 LOG_ERR("zx_vmar_allocate(0x%lx) failed: %s\n", kCompressedHeapSize,
83 zx_status_get_string(status));
84 } else {
85 compressed_heap_base_ = reinterpret_cast<uword>(region);
86 ASSERT(Utils::IsAligned(compressed_heap_base_, kCompressedHeapAlignment));
87 }
88 }
89#endif // defined(DART_COMPRESSED_POINTERS)
90
91 page_size_ = CalculatePageSize();
92 vmex_resource_ = vmex_resource;
93}
94
96 vmex_resource_ = ZX_HANDLE_INVALID;
97 page_size_ = 0;
98
99#if defined(DART_COMPRESSED_POINTERS)
100 zx_vmar_destroy(compressed_heap_vmar_);
101 compressed_heap_vmar_ = ZX_HANDLE_INVALID;
102 compressed_heap_base_ = 0;
103#endif // defined(DART_COMPRESSED_POINTERS)
104}
105
106static zx_handle_t getVmarForAddress(uword address) {
107#if defined(DART_COMPRESSED_POINTERS)
108 if (address - compressed_heap_base_ < kCompressedHeapSize) {
109 return compressed_heap_vmar_;
110 }
111#endif // defined(DART_COMPRESSED_POINTERS)
112 return zx_vmar_root_self();
113}
114
115static void Unmap(zx_handle_t vmar, uword start, uword end) {
116 ASSERT(start <= end);
117 const uword size = end - start;
118 if (size == 0) {
119 return;
120 }
121
122 zx_status_t status = zx_vmar_unmap(vmar, start, size);
123 if (status != ZX_OK) {
124 FATAL("zx_vmar_unmap failed: %s\n", zx_status_get_string(status));
125 }
126}
127
128VirtualMemory* VirtualMemory::AllocateAligned(intptr_t size,
129 intptr_t alignment,
130 bool is_executable,
131 bool is_compressed,
132 const char* name) {
133 // When FLAG_write_protect_code is active, code memory (indicated by
134 // is_executable = true) is allocated as non-executable and later
135 // changed to executable via VirtualMemory::Protect, which requires
136 // ZX_RIGHT_EXECUTE on the underlying VMO.
137 ASSERT(Utils::IsAligned(size, page_size_));
138 ASSERT(Utils::IsPowerOfTwo(alignment));
139 ASSERT(Utils::IsAligned(alignment, page_size_));
140
141 const zx_vm_option_t align_flag = Utils::ShiftForPowerOfTwo(alignment)
142 << ZX_VM_ALIGN_BASE;
143 ASSERT((ZX_VM_ALIGN_1KB <= align_flag) && (align_flag <= ZX_VM_ALIGN_4GB));
144
145#if defined(DART_COMPRESSED_POINTERS)
146 zx_handle_t vmar;
147 if (is_compressed) {
148 RELEASE_ASSERT(!is_executable);
149 vmar = compressed_heap_vmar_;
150 } else {
151 vmar = zx_vmar_root_self();
152 }
153#else
154 zx_handle_t vmar = zx_vmar_root_self();
155#endif // defined(DART_COMPRESSED_POINTERS)
156 zx_handle_t vmo = ZX_HANDLE_INVALID;
157 zx_status_t status = zx_vmo_create(size, 0u, &vmo);
158 if (status == ZX_ERR_NO_MEMORY) {
159 LOG_ERR("zx_vmo_create(0x%lx) failed: %s\n", size,
160 zx_status_get_string(status));
161 return nullptr;
162 } else if (status != ZX_OK) {
163 FATAL("zx_vmo_create(0x%lx) failed: %s\n", size,
164 zx_status_get_string(status));
165 }
166
167 if (name != nullptr) {
168 zx_object_set_property(vmo, ZX_PROP_NAME, name, strlen(name));
169 }
170
171 if (is_executable) {
172 // Add ZX_RIGHT_EXECUTE permission to VMO, so it can be mapped
173 // into memory as executable (now or later).
174 status = zx_vmo_replace_as_executable(vmo, vmex_resource_, &vmo);
175 if (status == ZX_ERR_NO_MEMORY) {
176 LOG_ERR("zx_vmo_replace_as_executable() failed: %s\n",
177 zx_status_get_string(status));
178 zx_handle_close(vmo);
179 return nullptr;
180 } else if (status != ZX_OK) {
181 FATAL("zx_vmo_replace_as_executable() failed: %s\n",
182 zx_status_get_string(status));
183 }
184 }
185
186 const zx_vm_option_t region_options =
187 ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | align_flag |
188 ((is_executable && !FLAG_write_protect_code) ? ZX_VM_PERM_EXECUTE : 0);
189 uword base;
190 status = zx_vmar_map(vmar, region_options, 0, vmo, 0u, size, &base);
191 LOG_INFO("zx_vmar_map(%u, 0x%lx, 0x%lx)\n", region_options, base, size);
192 if (status != ZX_OK) {
193 LOG_ERR("zx_vmar_map(%u, 0x%lx, 0x%lx) failed: %s\n", region_options, base,
194 size, zx_status_get_string(status));
195 zx_handle_close(vmo);
196 return nullptr;
197 }
198 void* region_ptr = reinterpret_cast<void*>(base);
199 MemoryRegion region(region_ptr, size);
200 VirtualMemory* result = new VirtualMemory(region, region);
201 zx_handle_close(vmo);
202
203#if defined(DART_COMPRESSED_POINTERS)
204 if (!is_executable) {
205 uword offset = result->start() - compressed_heap_base_;
206 ASSERT(offset < kCompressedHeapSize);
207 }
208#endif // defined(DART_COMPRESSED_POINTERS)
209
210 return result;
211}
212
214 // Reserved region may be empty due to VirtualMemory::Truncate.
215 if (vm_owns_region() && reserved_.size() != 0) {
216 Unmap(getVmarForAddress(reserved_.start()), reserved_.start(),
217 reserved_.end());
218 LOG_INFO("zx_vmar_unmap(0x%lx, 0x%lx) success\n", reserved_.start(),
219 reserved_.size());
220 }
221}
222
223bool VirtualMemory::FreeSubSegment(void* address, intptr_t size) {
224 const uword start = reinterpret_cast<uword>(address);
225 Unmap(getVmarForAddress(start), start, start + size);
226 LOG_INFO("zx_vmar_unmap(0x%p, 0x%lx) success\n", address, size);
227 return true;
228}
229
230void VirtualMemory::Protect(void* address, intptr_t size, Protection mode) {
231#if defined(DEBUG)
232 Thread* thread = Thread::Current();
233 ASSERT(thread == nullptr || thread->IsDartMutatorThread() ||
234 thread->isolate() == nullptr ||
235 thread->isolate()->mutator_thread()->IsAtSafepoint());
236#endif
237 const uword start_address = reinterpret_cast<uword>(address);
238 const uword end_address = start_address + size;
239 const uword page_address = Utils::RoundDown(start_address, PageSize());
240 uint32_t prot = 0;
241 switch (mode) {
242 case kNoAccess:
243 prot = 0;
244 break;
245 case kReadOnly:
246 prot = ZX_VM_PERM_READ;
247 break;
248 case kReadWrite:
249 prot = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
250 break;
251 case kReadExecute:
252 prot = ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE;
253 break;
255 prot = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_PERM_EXECUTE;
256 break;
257 }
258 zx_status_t status =
259 zx_vmar_protect(getVmarForAddress(page_address), prot, page_address,
260 end_address - page_address);
261 LOG_INFO("zx_vmar_protect(%u, 0x%lx, 0x%lx)\n", prot, page_address,
262 end_address - page_address);
263 if (status != ZX_OK) {
264 FATAL("zx_vmar_protect(0x%lx, 0x%lx) failed: %s\n", page_address,
265 end_address - page_address, zx_status_get_string(status));
266 }
267}
268
269void VirtualMemory::DontNeed(void* address, intptr_t size) {
270 uword start_address = reinterpret_cast<uword>(address);
271 uword end_address = start_address + size;
272 uword page_address = Utils::RoundDown(start_address, PageSize());
273 zx_status_t status = zx_vmar_op_range(
274 getVmarForAddress(reinterpret_cast<uword>(address)), ZX_VMAR_OP_DONT_NEED,
275 page_address, end_address - page_address, nullptr, 0);
276 LOG_INFO("zx_vmar_op_range(DONTNEED, 0x%lx, 0x%lx)\n", page_address,
277 end_address - page_address);
278 if (status != ZX_OK) {
279 FATAL("zx_vmar_op_range(DONTNEED, 0x%lx, 0x%lx) failed: %s\n", page_address,
280 end_address - page_address, zx_status_get_string(status));
281 }
282}
283
284} // namespace dart
285
286#endif // defined(DART_HOST_OS_FUCHSIA)
const char * options
#define LOG_INFO(...)
#define RELEASE_ASSERT(cond)
Definition assert.h:327
uword end() const
uword start() const
uword size() const
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
static Thread * Current()
Definition thread.h:361
static constexpr int ShiftForPowerOfTwo(T x)
Definition utils.h:66
static constexpr T RoundDown(T x, intptr_t alignment)
Definition utils.h:93
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition utils.h:77
static constexpr bool IsPowerOfTwo(T x)
Definition utils.h:61
static void Init()
static VirtualMemory * AllocateAligned(intptr_t size, intptr_t alignment, bool is_executable, bool is_compressed, const char *name)
static void Protect(void *address, intptr_t size, Protection mode)
static intptr_t PageSize()
bool vm_owns_region() const
intptr_t size() const
static void DontNeed(void *address, intptr_t size)
static void Cleanup()
void * address() const
uword start() const
#define ASSERT(E)
#define FATAL(error)
glong glong end
GAsyncResult * result
#define DECLARE_FLAG(type, name)
Definition flags.h:14
ClipOpAndAA opAA SkRegion region
Definition SkRecords.h:238
const char *const name
const intptr_t kDefaultNewGenSemiMaxSize
Definition globals.h:63
uintptr_t uword
Definition globals.h:501
const intptr_t kMaxAddrSpaceMB
Definition globals.h:49
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
#define Pd
Definition globals.h:408
Point offset