Flutter Engine
The Flutter Engine
freelist.cc
Go to the documentation of this file.
1// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
2// for details. All rights reserved. Use of this source code is governed by a
3// BSD-style license that can be found in the LICENSE file.
4
5#include "vm/heap/freelist.h"
6
7#include "vm/bit_set.h"
8#include "vm/hash_map.h"
9#include "vm/lockers.h"
10#include "vm/object.h"
11#include "vm/os_thread.h"
12#include "vm/raw_object.h"
13
14namespace dart {
15
17 // Precondition: the (page containing the) header of the element is
18 // writable.
21
22 FreeListElement* result = reinterpret_cast<FreeListElement*>(addr);
23
24 uword tags = 0;
28 tags = UntaggedObject::AlwaysSetBit::update(true, tags);
29 tags = UntaggedObject::NotMarkedBit::update(true, tags);
32 result->tags_ = tags;
33
35 *result->SizeAddress() = size;
36 }
37 result->set_next(nullptr);
38 return result;
39 // Postcondition: the (page containing the) header of the element is
40 // writable.
41}
42
46
47 FreeListElement* result = reinterpret_cast<FreeListElement*>(addr);
48
49 uword tags = 0;
53 tags = UntaggedObject::AlwaysSetBit::update(true, tags);
54 tags = UntaggedObject::NotMarkedBit::update(true, tags);
57 result->tags_ = tags;
58
60 *result->SizeAddress() = size;
61 }
62 result->set_next(nullptr);
63 return result;
64}
65
69}
70
72 if (size == 0) return 0;
74}
75
76FreeList::FreeList() : mutex_() {
77 Reset();
78}
79
81
82uword FreeList::TryAllocate(intptr_t size, bool is_protected) {
83 MutexLocker ml(&mutex_);
84 return TryAllocateLocked(size, is_protected);
85}
86
87uword FreeList::TryAllocateLocked(intptr_t size, bool is_protected) {
89 // Precondition: is_protected is false or else all free list elements are
90 // in non-writable pages.
91
92 // Postcondition: if allocation succeeds, the allocated block is writable.
93 int index = IndexForSize(size);
94 if ((index != kNumLists) && free_map_.Test(index)) {
95 FreeListElement* element = DequeueElement(index);
96 if (is_protected) {
97 VirtualMemory::Protect(reinterpret_cast<void*>(element), size,
99 }
100 return reinterpret_cast<uword>(element);
101 }
102
103 if ((index + 1) < kNumLists) {
104 intptr_t next_index = free_map_.Next(index + 1);
105 if (next_index != -1) {
106 // Dequeue an element from the list, split and enqueue the remainder in
107 // the appropriate list.
108 FreeListElement* element = DequeueElement(next_index);
109 if (is_protected) {
110 // Make the allocated block and the header of the remainder element
111 // writable. The remainder will be non-writable if necessary after
112 // the call to SplitElementAfterAndEnqueue.
113 // If the remainder size is zero, only the element itself needs to
114 // be made writable.
115 intptr_t remainder_size = element->HeapSize() - size;
116 intptr_t region_size =
117 size + FreeListElement::HeaderSizeFor(remainder_size);
118 VirtualMemory::Protect(reinterpret_cast<void*>(element), region_size,
120 }
121 SplitElementAfterAndEnqueue(element, size, is_protected);
122 return reinterpret_cast<uword>(element);
123 }
124 }
125
126 FreeListElement* previous = nullptr;
127 FreeListElement* current = free_lists_[kNumLists];
128 // We are willing to search the freelist further for a big block.
129 // For each successful free-list search we:
130 // * increase the search budget by #allocated-words
131 // * decrease the search budget by #free-list-entries-traversed
132 // which guarantees us to not waste more than around 1 search step per
133 // word of allocation
134 //
135 // If we run out of search budget we fall back to allocating a new page and
136 // reset the search budget.
137 intptr_t tries_left = freelist_search_budget_ + (size >> kWordSizeLog2);
138 while (current != nullptr) {
139 if (current->HeapSize() >= size) {
140 // Found an element large enough to hold the requested size. Dequeue,
141 // split and enqueue the remainder.
142 intptr_t remainder_size = current->HeapSize() - size;
143 intptr_t region_size =
144 size + FreeListElement::HeaderSizeFor(remainder_size);
145 if (is_protected) {
146 // Make the allocated block and the header of the remainder element
147 // writable. The remainder will be non-writable if necessary after
148 // the call to SplitElementAfterAndEnqueue.
149 VirtualMemory::Protect(reinterpret_cast<void*>(current), region_size,
151 }
152
153 if (previous == nullptr) {
154 free_lists_[kNumLists] = current->next();
155 } else {
156 // If the previous free list element's next field is protected, it
157 // needs to be unprotected before storing to it and reprotected
158 // after.
159 bool target_is_protected = false;
160 uword target_address = 0L;
161 if (is_protected) {
162 uword writable_start = reinterpret_cast<uword>(current);
163 uword writable_end = writable_start + region_size - 1;
164 target_address = previous->next_address();
165 target_is_protected =
166 !VirtualMemory::InSamePage(target_address, writable_start) &&
167 !VirtualMemory::InSamePage(target_address, writable_end);
168 }
169 if (target_is_protected) {
170 VirtualMemory::Protect(reinterpret_cast<void*>(target_address),
172 }
173 previous->set_next(current->next());
174 if (target_is_protected) {
175 VirtualMemory::Protect(reinterpret_cast<void*>(target_address),
177 }
178 }
179 SplitElementAfterAndEnqueue(current, size, is_protected);
180 freelist_search_budget_ =
181 Utils::Minimum(tries_left, kInitialFreeListSearchBudget);
182 return reinterpret_cast<uword>(current);
183 } else if (tries_left-- < 0) {
184 freelist_search_budget_ = kInitialFreeListSearchBudget;
185 return 0; // Trigger allocation of new page.
186 }
187 previous = current;
188 current = current->next();
189 }
190 return 0;
191}
192
193void FreeList::Free(uword addr, intptr_t size) {
194 MutexLocker ml(&mutex_);
196}
197
200 // Precondition required by AsElement and EnqueueElement: the (page
201 // containing the) header of the freed block should be writable. This is
202 // the case when called for newly allocated pages because they are
203 // allocated as writable. It is the case when called during GC sweeping
204 // because the entire heap is writable.
205 intptr_t index = IndexForSize(size);
207 EnqueueElement(element, index);
208 // Postcondition: the (page containing the) header is left writable.
209}
210
212 MutexLocker ml(&mutex_);
213 free_map_.Reset();
214 last_free_small_size_ = -1;
215 for (int i = 0; i < (kNumLists + 1); i++) {
216 free_lists_[i] = nullptr;
217 }
218}
219
220void FreeList::EnqueueElement(FreeListElement* element, intptr_t index) {
221 FreeListElement* next = free_lists_[index];
222 if (next == nullptr && index != kNumLists) {
223 free_map_.Set(index, true);
224 last_free_small_size_ =
225 Utils::Maximum(last_free_small_size_, index << kObjectAlignmentLog2);
226 }
227 element->set_next(next);
228 free_lists_[index] = element;
229}
230
231intptr_t FreeList::LengthLocked(int index) const {
233 ASSERT(index >= 0);
234 ASSERT(index < kNumLists);
235 intptr_t result = 0;
236 FreeListElement* element = free_lists_[index];
237 while (element != nullptr) {
238 ++result;
239 element = element->next();
240 }
241 return result;
242}
243
244void FreeList::PrintSmall() const {
245 intptr_t small_bytes = 0;
246 for (int i = 0; i < kNumLists; ++i) {
247 if (free_lists_[i] == nullptr) {
248 continue;
249 }
250 intptr_t list_length = LengthLocked(i);
251 intptr_t list_bytes = list_length * i * kObjectAlignment;
252 small_bytes += list_bytes;
254 "small %3d [%8d bytes] : "
255 "%8" Pd " objs; %8.1f KB; %8.1f cum KB\n",
256 i, static_cast<int>(i * kObjectAlignment), list_length,
257 list_bytes / static_cast<double>(KB),
258 small_bytes / static_cast<double>(KB));
259 }
260}
261
262class IntptrPair {
263 public:
264 IntptrPair() : first_(-1), second_(-1) {}
265 IntptrPair(intptr_t first, intptr_t second)
266 : first_(first), second_(second) {}
267
268 intptr_t first() const { return first_; }
269 intptr_t second() const { return second_; }
270 void set_second(intptr_t s) { second_ = s; }
271
272 bool operator==(const IntptrPair& other) {
273 return (first_ == other.first_) && (second_ == other.second_);
274 }
275
276 bool operator!=(const IntptrPair& other) {
277 return (first_ != other.first_) || (second_ != other.second_);
278 }
279
280 private:
281 intptr_t first_;
282 intptr_t second_;
283};
284
285void FreeList::PrintLarge() const {
286 intptr_t large_bytes = 0;
287 MallocDirectChainedHashMap<NumbersKeyValueTrait<IntptrPair> > map;
288 FreeListElement* node;
289 for (node = free_lists_[kNumLists]; node != nullptr; node = node->next()) {
290 IntptrPair* pair = map.Lookup(node->HeapSize());
291 if (pair == nullptr) {
292 map.Insert(IntptrPair(node->HeapSize(), 1));
293 } else {
294 pair->set_second(pair->second() + 1);
295 }
296 }
297
298 MallocDirectChainedHashMap<NumbersKeyValueTrait<IntptrPair> >::Iterator it =
299 map.GetIterator();
300 IntptrPair* pair;
301 while ((pair = it.Next()) != nullptr) {
302 intptr_t size = pair->first();
303 intptr_t list_length = pair->second();
304 intptr_t list_bytes = list_length * size;
305 large_bytes += list_bytes;
306 OS::PrintErr("large %3" Pd " [%8" Pd
307 " bytes] : "
308 "%8" Pd " objs; %8.1f KB; %8.1f cum KB\n",
309 size / kObjectAlignment, size, list_length,
310 list_bytes / static_cast<double>(KB),
311 large_bytes / static_cast<double>(KB));
312 }
313}
314
315void FreeList::Print() const {
316 MutexLocker ml(&mutex_);
317 PrintSmall();
318 PrintLarge();
319}
320
321void FreeList::SplitElementAfterAndEnqueue(FreeListElement* element,
322 intptr_t size,
323 bool is_protected) {
324 // Precondition required by AsElement and EnqueueElement: either
325 // element->Size() == size, or else the (page containing the) header of
326 // the remainder element starting at element + size is writable.
327 intptr_t remainder_size = element->HeapSize() - size;
328 if (remainder_size == 0) return;
329
330 uword remainder_address = reinterpret_cast<uword>(element) + size;
331 element = FreeListElement::AsElement(remainder_address, remainder_size);
332 intptr_t remainder_index = IndexForSize(remainder_size);
333 EnqueueElement(element, remainder_index);
334
335 // Postcondition: when allocating in a protected page, the fraction of the
336 // remainder element which does not share a page with the allocated element is
337 // no longer writable. This means that if the remainder's header is not fully
338 // contained in the last page of the allocation, we need to re-protect the
339 // page it ends on.
340 if (is_protected) {
341 const uword remainder_header_size =
342 FreeListElement::HeaderSizeFor(remainder_size);
344 remainder_address - 1,
345 remainder_address + remainder_header_size - 1)) {
347 reinterpret_cast<void*>(
348 Utils::RoundUp(remainder_address, VirtualMemory::PageSize())),
349 remainder_address + remainder_header_size -
350 Utils::RoundUp(remainder_address, VirtualMemory::PageSize()),
352 }
353 }
354}
355
357 MutexLocker ml(&mutex_);
358 return TryAllocateLargeLocked(minimum_size);
359}
360
363 FreeListElement* previous = nullptr;
364 FreeListElement* current = free_lists_[kNumLists];
365 // TODO(koda): Find largest.
366 // We are willing to search the freelist further for a big block.
367 intptr_t tries_left =
368 freelist_search_budget_ + (minimum_size >> kWordSizeLog2);
369 while (current != nullptr) {
370 FreeListElement* next = current->next();
371 if (current->HeapSize() >= minimum_size) {
372 if (previous == nullptr) {
373 free_lists_[kNumLists] = next;
374 } else {
375 previous->set_next(next);
376 }
377 freelist_search_budget_ =
378 Utils::Minimum(tries_left, kInitialFreeListSearchBudget);
379 return current;
380 } else if (tries_left-- < 0) {
381 freelist_search_budget_ = kInitialFreeListSearchBudget;
382 return nullptr; // Trigger allocation of new page.
383 }
384 previous = current;
385 current = next;
386 }
387 return nullptr;
388}
389
390} // namespace dart
static float next(float f)
#define DEBUG_ASSERT(cond)
Definition: assert.h:321
static constexpr uword update(ClassIdTagType value, uword original)
Definition: bitfield.h:188
bool Test(intptr_t i) const
Definition: bit_set.h:31
void Set(intptr_t i, bool value)
Definition: bit_set.h:20
intptr_t Next(intptr_t i) const
Definition: bit_set.h:38
void Reset()
Definition: bit_set.h:89
FreeListElement * next() const
Definition: freelist.h:26
uword next_address() const
Definition: freelist.h:27
static void Init()
Definition: freelist.cc:66
void set_next(FreeListElement *next)
Definition: freelist.h:29
static FreeListElement * AsElementNew(uword addr, intptr_t size)
Definition: freelist.cc:43
static intptr_t HeaderSizeFor(intptr_t size)
Definition: freelist.cc:71
intptr_t HeapSize()
Definition: freelist.h:31
static FreeListElement * AsElement(uword addr, intptr_t size)
Definition: freelist.cc:16
void Reset()
Definition: freelist.cc:211
void Print() const
Definition: freelist.cc:315
void FreeLocked(uword addr, intptr_t size)
Definition: freelist.cc:198
FreeListElement * TryAllocateLarge(intptr_t minimum_size)
Definition: freelist.cc:356
uword TryAllocateLocked(intptr_t size, bool is_protected)
Definition: freelist.cc:87
uword TryAllocate(intptr_t size, bool is_protected)
Definition: freelist.cc:82
FreeListElement * TryAllocateLargeLocked(intptr_t minimum_size)
Definition: freelist.cc:361
void Free(uword addr, intptr_t size)
Definition: freelist.cc:193
bool operator!=(const IntptrPair &other)
Definition: freelist.cc:276
void set_second(intptr_t s)
Definition: freelist.cc:270
intptr_t first() const
bool operator==(const IntptrPair &other)
Definition: freelist.cc:272
intptr_t second() const
IntptrPair(intptr_t first, intptr_t second)
Definition: freelist.cc:265
bool IsOwnedByCurrentThread() const
Definition: os_thread.h:402
static void static void PrintErr(const char *format,...) PRINTF_ATTRIBUTE(1
static intptr_t tags_offset()
Definition: object.h:346
static constexpr uword update(intptr_t size, uword tag)
Definition: raw_object.h:212
static constexpr intptr_t kMaxSizeTag
Definition: raw_object.h:201
static constexpr T Maximum(T x, T y)
Definition: utils.h:41
static T Minimum(T x, T y)
Definition: utils.h:36
static constexpr T RoundUp(T x, uintptr_t alignment, uintptr_t offset=0)
Definition: utils.h:120
static constexpr bool IsAligned(T x, uintptr_t alignment, uintptr_t offset=0)
Definition: utils.h:92
static void Protect(void *address, intptr_t size, Protection mode)
static intptr_t PageSize()
static bool InSamePage(uword address0, uword address1)
#define ASSERT(E)
struct MyStruct s
GAsyncResult * result
Definition: dart_vm.cc:33
static constexpr intptr_t kOldObjectAlignmentOffset
static constexpr intptr_t kNewObjectAlignmentOffset
@ kFreeListElement
Definition: class_id.h:224
constexpr intptr_t kWordSizeLog2
Definition: globals.h:507
constexpr intptr_t KB
Definition: globals.h:528
uintptr_t uword
Definition: globals.h:501
constexpr intptr_t kWordSize
Definition: globals.h:509
static constexpr intptr_t kObjectAlignment
static constexpr intptr_t kObjectAlignmentLog2
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
SI auto map(std::index_sequence< I... >, Fn &&fn, const Args &... args) -> skvx::Vec< sizeof...(I), decltype(fn(args[0]...))>
Definition: SkVx.h:680
#define Pd
Definition: globals.h:408
#define OFFSET_OF(type, field)
Definition: globals.h:138