Flutter Engine
The Flutter Engine
SkBlockAllocator.h
Go to the documentation of this file.
1/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkBlockAllocator_DEFINED
9#define SkBlockAllocator_DEFINED
10
18
19#include <algorithm>
20#include <cstddef>
21#include <cstdint>
22#include <limits>
23#include <new>
24#include <type_traits>
25
26/**
27 * SkBlockAllocator provides low-level support for a block allocated arena with a dynamic tail that
28 * tracks space reservations within each block. Its APIs provide the ability to reserve space,
29 * resize reservations, and release reservations. It will automatically create new blocks if needed
30 * and destroy all remaining blocks when it is destructed. It assumes that anything allocated within
31 * its blocks has its destructors called externally. It is recommended that SkBlockAllocator is
32 * wrapped by a higher-level allocator that uses the low-level APIs to implement a simpler,
33 * purpose-focused API w/o having to worry as much about byte-level concerns.
34 *
35 * SkBlockAllocator has no limit to its total size, but each allocation is limited to 512MB (which
36 * should be sufficient for Skia's use cases). This upper allocation limit allows all internal
37 * operations to be performed using 'int' and avoid many overflow checks. Static asserts are used
38 * to ensure that those operations would not overflow when using the largest possible values.
39 *
40 * Possible use modes:
41 * 1. No upfront allocation, either on the stack or as a field
42 * SkBlockAllocator allocator(policy, heapAllocSize);
43 *
44 * 2. In-place new'd
45 * void* mem = operator new(totalSize);
46 * SkBlockAllocator* allocator = new (mem) SkBlockAllocator(policy, heapAllocSize,
47 * totalSize- sizeof(SkBlockAllocator));
48 * delete allocator;
49 *
50 * 3. Use SkSBlockAllocator to increase the preallocation size
51 * SkSBlockAllocator<1024> allocator(policy, heapAllocSize);
52 * sizeof(allocator) == 1024;
53 */
54// TODO(michaelludwig) - While API is different, this shares similarities to SkArenaAlloc and
55// SkFibBlockSizes, so we should work to integrate them.
57public:
58 // Largest size that can be requested from allocate(), chosen because it's the largest pow-2
59 // that is less than int32_t::max()/2.
60 inline static constexpr int kMaxAllocationSize = 1 << 29;
61
62 enum class GrowthPolicy : int {
63 kFixed, // Next block size = N
64 kLinear, // = #blocks * N
65 kFibonacci, // = fibonacci(#blocks) * N
66 kExponential, // = 2^#blocks * N
67 kLast = kExponential
68 };
69 inline static constexpr int kGrowthPolicyCount = static_cast<int>(GrowthPolicy::kLast) + 1;
70
71 class Block final {
72 public:
73 ~Block();
74 void operator delete(void* p) { ::operator delete(p); }
75
76 // Return the maximum allocation size with the given alignment that can fit in this block.
77 template <size_t Align = 1, size_t Padding = 0>
78 int avail() const { return std::max(0, fSize - this->cursor<Align, Padding>()); }
79
80 // Return the aligned offset of the first allocation, assuming it was made with the
81 // specified Align, and Padding. The returned offset does not mean a valid allocation
82 // starts at that offset, this is a utility function for classes built on top to manage
83 // indexing into a block effectively.
84 template <size_t Align = 1, size_t Padding = 0>
85 int firstAlignedOffset() const { return this->alignedOffset<Align, Padding>(kDataStart); }
86
87 // Convert an offset into this block's storage into a usable pointer.
88 void* ptr(int offset) {
89 SkASSERT(offset >= kDataStart && offset < fSize);
90 return reinterpret_cast<char*>(this) + offset;
91 }
92 const void* ptr(int offset) const { return const_cast<Block*>(this)->ptr(offset); }
93
94 // Every block has an extra 'int' for clients to use however they want. It will start
95 // at 0 when a new block is made, or when the head block is reset.
96 int metadata() const { return fMetadata; }
97 void setMetadata(int value) { fMetadata = value; }
98
99 /**
100 * Release the byte range between offset 'start' (inclusive) and 'end' (exclusive). This
101 * will return true if those bytes were successfully reclaimed, i.e. a subsequent allocation
102 * request could occupy the space. Regardless of return value, the provided byte range that
103 * [start, end) represents should not be used until it's re-allocated with allocate<...>().
104 */
105 inline bool release(int start, int end);
106
107 /**
108 * Resize a previously reserved byte range of offset 'start' (inclusive) to 'end'
109 * (exclusive). 'deltaBytes' is the SIGNED change to length of the reservation.
110 *
111 * When negative this means the reservation is shrunk and the new length is (end - start -
112 * |deltaBytes|). If this new length would be 0, the byte range can no longer be used (as if
113 * it were released instead). Asserts that it would not shrink the reservation below 0.
114 *
115 * If 'deltaBytes' is positive, the allocator attempts to increase the length of the
116 * reservation. If 'deltaBytes' is less than or equal to avail() and it was the last
117 * allocation in the block, it can be resized. If there is not enough available bytes to
118 * accommodate the increase in size, or another allocation is blocking the increase in size,
119 * then false will be returned and the reserved byte range is unmodified.
120 */
121 inline bool resize(int start, int end, int deltaBytes);
122
123 private:
124 friend class SkBlockAllocator;
125
126 Block(Block* prev, int allocationSize);
127
128 // We poison the unallocated space in a Block to allow ASAN to catch invalid writes.
129 void poisonRange(int start, int end) {
130 sk_asan_poison_memory_region(reinterpret_cast<char*>(this) + start, end - start);
131 }
132 void unpoisonRange(int start, int end) {
133 sk_asan_unpoison_memory_region(reinterpret_cast<char*>(this) + start, end - start);
134 }
135
136 // Get fCursor, but aligned such that ptr(rval) satisfies Align.
137 template <size_t Align, size_t Padding>
138 int cursor() const { return this->alignedOffset<Align, Padding>(fCursor); }
139
140 template <size_t Align, size_t Padding>
141 int alignedOffset(int offset) const;
142
143 bool isScratch() const { return fCursor < 0; }
144 void markAsScratch() {
145 fCursor = -1;
146 this->poisonRange(kDataStart, fSize);
147 }
148
149 SkDEBUGCODE(uint32_t fSentinel;) // known value to check for bad back pointers to blocks
150
151 Block* fNext; // doubly-linked list of blocks
152 Block* fPrev;
153
154 // Each block tracks its own cursor because as later blocks are released, an older block
155 // may become the active tail again.
156 int fSize; // includes the size of the BlockHeader and requested metadata
157 int fCursor; // (this + fCursor) points to next available allocation
158 int fMetadata;
159
160 // On release builds, a Block's other 2 pointers and 3 int fields leaves 4 bytes of padding
161 // for 8 and 16 aligned systems. Currently this is only manipulated in the head block for
162 // an allocator-level metadata and is explicitly not reset when the head block is "released"
163 // Down the road we could instead choose to offer multiple metadata slots per block.
164 int fAllocatorMetadata;
165 };
166
167 // Tuple representing a range of bytes, marking the unaligned start, the first aligned point
168 // after any padding, and the upper limit depending on requested size.
169 struct ByteRange {
170 Block* fBlock; // Owning block
171 int fStart; // Inclusive byte lower limit of byte range
172 int fAlignedOffset; // >= start, matching alignment requirement (i.e. first real byte)
173 int fEnd; // Exclusive upper limit of byte range
174 };
175
176 // The size of the head block is determined by 'additionalPreallocBytes'. Subsequent heap blocks
177 // are determined by 'policy' and 'blockIncrementBytes', although 'blockIncrementBytes' will be
178 // aligned to std::max_align_t.
179 //
180 // When 'additionalPreallocBytes' > 0, the allocator assumes that many extra bytes immediately
181 // after the allocator can be used by its inline head block. This is useful when the allocator
182 // is in-place new'ed into a larger block of memory, but it should remain set to 0 if stack
183 // allocated or if the class layout does not guarantee that space is present.
184 SkBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes,
185 size_t additionalPreallocBytes = 0);
186
188 void operator delete(void* p) { ::operator delete(p); }
189
190 /**
191 * Helper to calculate the minimum number of bytes needed for heap block size, under the
192 * assumption that Align will be the requested alignment of the first call to allocate().
193 * Ex. To store N instances of T in a heap block, the 'blockIncrementBytes' should be set to
194 * BlockOverhead<alignof(T)>() + N * sizeof(T) when making the SkBlockAllocator.
195 */
196 template<size_t Align = 1, size_t Padding = 0>
197 static constexpr size_t BlockOverhead();
198
199 /**
200 * Helper to calculate the minimum number of bytes needed for a preallocation, under the
201 * assumption that Align will be the requested alignment of the first call to allocate().
202 * Ex. To preallocate a SkSBlockAllocator to hold N instances of T, its arge should be
203 * Overhead<alignof(T)>() + N * sizeof(T)
204 */
205 template<size_t Align = 1, size_t Padding = 0>
206 static constexpr size_t Overhead();
207
208 /**
209 * Return the total number of bytes of the allocator, including its instance overhead, per-block
210 * overhead and space used for allocations.
211 */
212 size_t totalSize() const;
213 /**
214 * Return the total number of bytes usable for allocations. This includes bytes that have
215 * been reserved already by a call to allocate() and bytes that are still available. It is
216 * totalSize() minus all allocator and block-level overhead.
217 */
218 size_t totalUsableSpace() const;
219 /**
220 * Return the total number of usable bytes that have been reserved by allocations. This will
221 * be less than or equal to totalUsableSpace().
222 */
223 size_t totalSpaceInUse() const;
224
225 /**
226 * Return the total number of bytes that were pre-allocated for the SkBlockAllocator. This will
227 * include 'additionalPreallocBytes' passed to the constructor, and represents what the total
228 * size would become after a call to reset().
229 */
230 size_t preallocSize() const {
231 // Don't double count fHead's Block overhead in both sizeof(SkBlockAllocator) and fSize.
232 return sizeof(SkBlockAllocator) + fHead.fSize - BaseHeadBlockSize();
233 }
234 /**
235 * Return the usable size of the inline head block; this will be equal to
236 * 'additionalPreallocBytes' plus any alignment padding that the system had to add to Block.
237 * The returned value represents what could be allocated before a heap block is be created.
238 */
239 size_t preallocUsableSpace() const {
240 return fHead.fSize - kDataStart;
241 }
242
243 /**
244 * Get the current value of the allocator-level metadata (a user-oriented slot). This is
245 * separate from any block-level metadata, but can serve a similar purpose to compactly support
246 * data collections on top of SkBlockAllocator.
247 */
248 int metadata() const { return fHead.fAllocatorMetadata; }
249
250 /**
251 * Set the current value of the allocator-level metadata.
252 */
253 void setMetadata(int value) { fHead.fAllocatorMetadata = value; }
254
255 /**
256 * Reserve space that will hold 'size' bytes. This will automatically allocate a new block if
257 * there is not enough available space in the current block to provide 'size' bytes. The
258 * returned ByteRange tuple specifies the Block owning the reserved memory, the full byte range,
259 * and the aligned offset within that range to use for the user-facing pointer. The following
260 * invariants hold:
261 *
262 * 1. block->ptr(alignedOffset) is aligned to Align
263 * 2. end - alignedOffset == size
264 * 3. Padding <= alignedOffset - start <= Padding + Align - 1
265 *
266 * Invariant #3, when Padding > 0, allows intermediate allocators to embed metadata along with
267 * the allocations. If the Padding bytes are used for some 'struct Meta', then
268 * ptr(alignedOffset - sizeof(Meta)) can be safely used as a Meta* if Meta's alignment
269 * requirements are less than or equal to the alignment specified in allocate<>. This can be
270 * easily guaranteed by using the pattern:
271 *
272 * allocate<max(UserAlign, alignof(Meta)), sizeof(Meta)>(userSize);
273 *
274 * This ensures that ptr(alignedOffset) will always satisfy UserAlign and
275 * ptr(alignedOffset - sizeof(Meta)) will always satisfy alignof(Meta). Alternatively, memcpy
276 * can be used to read and write values between start and alignedOffset without worrying about
277 * alignment requirements of the metadata.
278 *
279 * For over-aligned allocations, the alignedOffset (as an int) may not be a multiple of Align,
280 * but the result of ptr(alignedOffset) will be a multiple of Align.
281 */
282 template <size_t Align, size_t Padding = 0>
283 ByteRange allocate(size_t size);
284
285 enum ReserveFlags : unsigned {
286 // If provided to reserve(), the input 'size' will be rounded up to the next size determined
287 // by the growth policy of the SkBlockAllocator. If not, 'size' will be aligned to max_align
289 // If provided to reserve(), the number of available bytes of the current block will not
290 // be used to satisfy the reservation (assuming the contiguous range was long enough to
291 // begin with).
293
294 kNo_ReserveFlags = 0b00
295 };
296
297 /**
298 * Ensure the block allocator has 'size' contiguous available bytes. After calling this
299 * function, currentBlock()->avail<Align, Padding>() may still report less than 'size' if the
300 * reserved space was added as a scratch block. This is done so that anything remaining in
301 * the current block can still be used if a smaller-than-size allocation is requested. If 'size'
302 * is requested by a subsequent allocation, the scratch block will automatically be activated
303 * and the request will not itself trigger any malloc.
304 *
305 * The optional 'flags' controls how the input size is allocated; by default it will attempt
306 * to use available contiguous bytes in the current block and will respect the growth policy
307 * of the allocator.
308 */
309 template <size_t Align = 1, size_t Padding = 0>
311
312 /**
313 * Return a pointer to the start of the current block. This will never be null.
314 */
315 const Block* currentBlock() const { return fTail; }
316 Block* currentBlock() { return fTail; }
317
318 const Block* headBlock() const { return &fHead; }
319 Block* headBlock() { return &fHead; }
320
321 /**
322 * Return the block that owns the allocated 'ptr'. Assuming that earlier, an allocation was
323 * returned as {b, start, alignedOffset, end}, and 'p = b->ptr(alignedOffset)', then a call
324 * to 'owningBlock<Align, Padding>(p, start) == b'.
325 *
326 * If calling code has already made a pointer to their metadata, i.e. 'm = p - Padding', then
327 * 'owningBlock<Align, 0>(m, start)' will also return b, allowing you to recover the block from
328 * the metadata pointer.
329 *
330 * If calling code has access to the original alignedOffset, this function should not be used
331 * since the owning block is just 'p - alignedOffset', regardless of original Align or Padding.
332 */
333 template <size_t Align, size_t Padding = 0>
334 Block* owningBlock(const void* ptr, int start);
335
336 template <size_t Align, size_t Padding = 0>
337 const Block* owningBlock(const void* ptr, int start) const {
338 return const_cast<SkBlockAllocator*>(this)->owningBlock<Align, Padding>(ptr, start);
339 }
340
341 /**
342 * Find the owning block of the allocated pointer, 'p'. Without any additional information this
343 * is O(N) on the number of allocated blocks.
344 */
345 Block* findOwningBlock(const void* ptr);
346 const Block* findOwningBlock(const void* ptr) const {
347 return const_cast<SkBlockAllocator*>(this)->findOwningBlock(ptr);
348 }
349
350 /**
351 * Explicitly free an entire block, invalidating any remaining allocations from the block.
352 * SkBlockAllocator will release all alive blocks automatically when it is destroyed, but this
353 * function can be used to reclaim memory over the lifetime of the allocator. The provided
354 * 'block' pointer must have previously come from a call to currentBlock() or allocate().
355 *
356 * If 'block' represents the inline-allocated head block, its cursor and metadata are instead
357 * reset to their defaults.
358 *
359 * If the block is not the head block, it may be kept as a scratch block to be reused for
360 * subsequent allocation requests, instead of making an entirely new block. A scratch block is
361 * not visible when iterating over blocks but is reported in the total size of the allocator.
362 */
363 void releaseBlock(Block* block);
364
365 /**
366 * Detach every heap-allocated block owned by 'other' and concatenate them to this allocator's
367 * list of blocks. This memory is now managed by this allocator. Since this only transfers
368 * ownership of a Block, and a Block itself does not move, any previous allocations remain
369 * valid and associated with their original Block instances. SkBlockAllocator-level functions
370 * that accept allocated pointers (e.g. findOwningBlock), must now use this allocator and not
371 * 'other' for these allocations.
372 *
373 * The head block of 'other' cannot be stolen, so higher-level allocators and memory structures
374 * must handle that data differently.
375 */
377
378 /**
379 * Explicitly free all blocks (invalidating all allocations), and resets the head block to its
380 * default state. The allocator-level metadata is reset to 0 as well.
381 */
382 void reset();
383
384 /**
385 * Remove any reserved scratch space, either from calling reserve() or releaseBlock().
386 */
387 void resetScratchSpace();
388
389 template <bool Forward, bool Const> class BlockIter;
390
391 /**
392 * Clients can iterate over all active Blocks in the SkBlockAllocator using for loops:
393 *
394 * Forward iteration from head to tail block (or non-const variant):
395 * for (const Block* b : this->blocks()) { }
396 * Reverse iteration from tail to head block:
397 * for (const Block* b : this->rblocks()) { }
398 *
399 * It is safe to call releaseBlock() on the active block while looping.
400 */
401 inline BlockIter<true, false> blocks();
402 inline BlockIter<true, true> blocks() const;
403 inline BlockIter<false, false> rblocks();
404 inline BlockIter<false, true> rblocks() const;
405
406#ifdef SK_DEBUG
407 inline static constexpr uint32_t kAssignedMarker = 0xBEEFFACE;
408 inline static constexpr uint32_t kFreedMarker = 0xCAFEBABE;
409
410 void validate() const;
411#endif
412
413private:
416
417 inline static constexpr int kDataStart = sizeof(Block);
418 #ifdef SK_FORCE_8_BYTE_ALIGNMENT
419 // This is an issue for WASM builds using emscripten, which had std::max_align_t = 16, but
420 // was returning pointers only aligned to 8 bytes.
421 // https://github.com/emscripten-core/emscripten/issues/10072
422 //
423 // Setting this to 8 will let SkBlockAllocator properly correct for the pointer address if
424 // a 16-byte aligned allocation is requested in wasm (unlikely since we don't use long
425 // doubles).
426 static constexpr size_t kAddressAlign = 8;
427 #else
428 // The alignment Block addresses will be at when created using operator new
429 // (spec-compliant is pointers are aligned to max_align_t).
430 static constexpr size_t kAddressAlign = alignof(std::max_align_t);
431 #endif
432
433 // Calculates the size of a new Block required to store a kMaxAllocationSize request for the
434 // given alignment and padding bytes. Also represents maximum valid fCursor value in a Block.
435 template<size_t Align, size_t Padding>
436 static constexpr size_t MaxBlockSize();
437
438 static constexpr int BaseHeadBlockSize() {
439 return sizeof(SkBlockAllocator) - offsetof(SkBlockAllocator, fHead);
440 }
441
442 // Append a new block to the end of the block linked list, updating fTail. 'minSize' must
443 // have enough room for sizeof(Block). 'maxSize' is the upper limit of fSize for the new block
444 // that will preserve the static guarantees SkBlockAllocator makes.
445 void addBlock(int minSize, int maxSize);
446
447 int scratchBlockSize() const { return fHead.fPrev ? fHead.fPrev->fSize : 0; }
448
449 Block* fTail; // All non-head blocks are heap allocated; tail will never be null.
450
451 // All remaining state is packed into 64 bits to keep SkBlockAllocator at 16 bytes + head block
452 // (on a 64-bit system).
453
454 // Growth of the block size is controlled by four factors: BlockIncrement, N0 and N1, and a
455 // policy defining how N0 is updated. When a new block is needed, we calculate N1' = N0 + N1.
456 // Depending on the policy, N0' = N0 (no growth or linear growth), or N0' = N1 (Fibonacci), or
457 // N0' = N1' (exponential). The size of the new block is N1' * BlockIncrement * MaxAlign,
458 // after which fN0 and fN1 store N0' and N1' clamped into 23 bits. With current bit allocations,
459 // N1' is limited to 2^24, and assuming MaxAlign=16, then BlockIncrement must be '2' in order to
460 // eventually reach the hard 2^29 size limit of SkBlockAllocator.
461
462 // Next heap block size = (fBlockIncrement * alignof(std::max_align_t) * (fN0 + fN1))
463 uint64_t fBlockIncrement : 16;
464 uint64_t fGrowthPolicy : 2; // GrowthPolicy
465 uint64_t fN0 : 23; // = 1 for linear/exp.; = 0 for fixed/fibonacci, initially
466 uint64_t fN1 : 23; // = 1 initially
467
468 // Inline head block, must be at the end so that it can utilize any additional reserved space
469 // from the initial allocation.
470 // The head block's prev pointer may be non-null, which signifies a scratch block that may be
471 // reused instead of allocating an entirely new block (this helps when allocate+release calls
472 // bounce back and forth across the capacity of a block).
473 alignas(kAddressAlign) Block fHead;
474
475 static_assert(kGrowthPolicyCount <= 4);
476};
477
478// A wrapper around SkBlockAllocator that includes preallocated storage for the head block.
479// N will be the preallocSize() reported by the allocator.
480template<size_t N>
482public:
484
486 new (fStorage) SkBlockAllocator(GrowthPolicy::kFixed, N, N - sizeof(SkBlockAllocator));
487 }
489 new (fStorage) SkBlockAllocator(policy, N, N - sizeof(SkBlockAllocator));
490 }
491
492 SkSBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes) {
493 new (fStorage) SkBlockAllocator(policy, blockIncrementBytes, N - sizeof(SkBlockAllocator));
494 }
495
497 this->allocator()->~SkBlockAllocator();
498 }
499
500 SkBlockAllocator* operator->() { return this->allocator(); }
501 const SkBlockAllocator* operator->() const { return this->allocator(); }
502
503 SkBlockAllocator* allocator() { return reinterpret_cast<SkBlockAllocator*>(fStorage); }
505 return reinterpret_cast<const SkBlockAllocator*>(fStorage);
506 }
507
508private:
509 static_assert(N >= sizeof(SkBlockAllocator));
510
511 // Will be used to placement new the allocator
512 alignas(SkBlockAllocator) char fStorage[N];
513};
514
515///////////////////////////////////////////////////////////////////////////////////////////////////
516// Template and inline implementations
517
519
520template<size_t Align, size_t Padding>
522 static_assert(SkAlignTo(kDataStart + Padding, Align) >= sizeof(Block));
523 return SkAlignTo(kDataStart + Padding, Align);
524}
525
526template<size_t Align, size_t Padding>
527constexpr size_t SkBlockAllocator::Overhead() {
528 // NOTE: On most platforms, SkBlockAllocator is packed; this is not the case on debug builds
529 // due to extra fields, or on WASM due to 4byte pointers but 16byte max align.
530 return std::max(sizeof(SkBlockAllocator),
531 offsetof(SkBlockAllocator, fHead) + BlockOverhead<Align, Padding>());
532}
533
534template<size_t Align, size_t Padding>
535constexpr size_t SkBlockAllocator::MaxBlockSize() {
536 // Without loss of generality, assumes 'align' will be the largest encountered alignment for the
537 // allocator (if it's not, the largest align will be encountered by the compiler and pass/fail
538 // the same set of static asserts).
539 return BlockOverhead<Align, Padding>() + kMaxAllocationSize;
540}
541
542template<size_t Align, size_t Padding>
544 if (size > kMaxAllocationSize) {
545 SK_ABORT("Allocation too large (%zu bytes requested)", size);
546 }
547 int iSize = (int) size;
549 this->currentBlock()->avail<Align, Padding>() < iSize) {
550
551 int blockSize = BlockOverhead<Align, Padding>() + iSize;
552 int maxSize = (flags & kIgnoreGrowthPolicy_Flag) ? blockSize
553 : MaxBlockSize<Align, Padding>();
554 SkASSERT((size_t) maxSize <= (MaxBlockSize<Align, Padding>()));
555
556 SkDEBUGCODE(auto oldTail = fTail;)
557 this->addBlock(blockSize, maxSize);
558 SkASSERT(fTail != oldTail);
559 // Releasing the just added block will move it into scratch space, allowing the original
560 // tail's bytes to be used first before the scratch block is activated.
561 this->releaseBlock(fTail);
562 }
563}
564
565template <size_t Align, size_t Padding>
567 // Amount of extra space for a new block to make sure the allocation can succeed.
568 static constexpr int kBlockOverhead = (int) BlockOverhead<Align, Padding>();
569
570 // Ensures 'offset' and 'end' calculations will be valid
571 static_assert((kMaxAllocationSize + SkAlignTo(MaxBlockSize<Align, Padding>(), Align))
573 // Ensures size + blockOverhead + addBlock's alignment operations will be valid
574 static_assert(kMaxAllocationSize + kBlockOverhead + ((1 << 12) - 1) // 4K align for large blocks
576
577 if (size > kMaxAllocationSize) {
578 SK_ABORT("Allocation too large (%zu bytes requested)", size);
579 }
580
581 int iSize = (int) size;
582 int offset = fTail->cursor<Align, Padding>();
583 int end = offset + iSize;
584 if (end > fTail->fSize) {
585 this->addBlock(iSize + kBlockOverhead, MaxBlockSize<Align, Padding>());
586 offset = fTail->cursor<Align, Padding>();
587 end = offset + iSize;
588 }
589
590 // Check invariants
591 SkASSERT(end <= fTail->fSize);
592 SkASSERT(end - offset == iSize);
593 SkASSERT(offset - fTail->fCursor >= (int) Padding &&
594 offset - fTail->fCursor <= (int) (Padding + Align - 1));
595 SkASSERT(reinterpret_cast<uintptr_t>(fTail->ptr(offset)) % Align == 0);
596
597 int start = fTail->fCursor;
598 fTail->fCursor = end;
599
600 fTail->unpoisonRange(offset - Padding, end);
601
602 return {fTail, start, offset, end};
603}
604
605template <size_t Align, size_t Padding>
607 // 'p' was originally formed by aligning 'block + start + Padding', producing the inequality:
608 // block + start + Padding <= p <= block + start + Padding + Align-1
609 // Rearranging this yields:
610 // block <= p - start - Padding <= block + Align-1
611 // Masking these terms by ~(Align-1) reconstructs 'block' if the alignment of the block is
612 // greater than or equal to Align (since block & ~(Align-1) == (block + Align-1) & ~(Align-1)
613 // in that case). Overalignment does not reduce to inequality unfortunately.
614 if /* constexpr */ (Align <= kAddressAlign) {
615 Block* block = reinterpret_cast<Block*>(
616 (reinterpret_cast<uintptr_t>(p) - start - Padding) & ~(Align - 1));
617 SkASSERT(block->fSentinel == kAssignedMarker);
618 return block;
619 } else {
620 // There's not a constant-time expression available to reconstruct the block from 'p',
621 // but this is unlikely to happen frequently.
622 return this->findOwningBlock(p);
623 }
624}
625
626template <size_t Align, size_t Padding>
627int SkBlockAllocator::Block::alignedOffset(int offset) const {
628 static_assert(SkIsPow2(Align));
629 // Aligning adds (Padding + Align - 1) as an intermediate step, so ensure that can't overflow
630 static_assert(MaxBlockSize<Align, Padding>() + Padding + Align - 1
632
633 if /* constexpr */ (Align <= kAddressAlign) {
634 // Same as SkAlignTo, but operates on ints instead of size_t
635 return (offset + Padding + Align - 1) & ~(Align - 1);
636 } else {
637 // Must take into account that 'this' may be starting at a pointer that doesn't satisfy the
638 // larger alignment request, so must align the entire pointer, not just offset
639 uintptr_t blockPtr = reinterpret_cast<uintptr_t>(this);
640 uintptr_t alignedPtr = (blockPtr + offset + Padding + Align - 1) & ~(Align - 1);
641 SkASSERT(alignedPtr - blockPtr <= (uintptr_t) std::numeric_limits<int32_t>::max());
642 return (int) (alignedPtr - blockPtr);
643 }
644}
645
646bool SkBlockAllocator::Block::resize(int start, int end, int deltaBytes) {
647 SkASSERT(fSentinel == kAssignedMarker);
648 SkASSERT(start >= kDataStart && end <= fSize && start < end);
649
650 if (deltaBytes > kMaxAllocationSize || deltaBytes < -kMaxAllocationSize) {
651 // Cannot possibly satisfy the resize and could overflow subsequent math
652 return false;
653 }
654 if (fCursor == end) {
655 int nextCursor = end + deltaBytes;
656 SkASSERT(nextCursor >= start);
657 // We still check nextCursor >= start for release builds that wouldn't assert.
658 if (nextCursor <= fSize && nextCursor >= start) {
659 if (nextCursor < fCursor) {
660 // The allocation got smaller; poison the space that can no longer be used.
661 this->poisonRange(nextCursor + 1, end);
662 } else {
663 // The allocation got larger; unpoison the space that can now be used.
664 this->unpoisonRange(end, nextCursor);
665 }
666
667 fCursor = nextCursor;
668 return true;
669 }
670 }
671 return false;
672}
673
674// NOTE: release is equivalent to resize(start, end, start - end), and the compiler can optimize
675// most of the operations away, but it wasn't able to remove the unnecessary branch comparing the
676// new cursor to the block size or old start, so release() gets a specialization.
678 SkASSERT(fSentinel == kAssignedMarker);
679 SkASSERT(start >= kDataStart && end <= fSize && start < end);
680
681 this->poisonRange(start, end);
682
683 if (fCursor == end) {
684 fCursor = start;
685 return true;
686 } else {
687 return false;
688 }
689}
690
691///////// Block iteration
692template <bool Forward, bool Const>
694private:
696 using AllocatorT =
698
699public:
700 BlockIter(AllocatorT* allocator) : fAllocator(allocator) {}
701
702 class Item {
703 public:
704 bool operator!=(const Item& other) const { return fBlock != other.fBlock; }
705
706 BlockT* operator*() const { return fBlock; }
707
709 this->advance(fNext);
710 return *this;
711 }
712
713 private:
714 friend BlockIter;
715
716 Item(BlockT* block) { this->advance(block); }
717
718 void advance(BlockT* block) {
719 fBlock = block;
720 fNext = block ? (Forward ? block->fNext : block->fPrev) : nullptr;
721 if (!Forward && fNext && fNext->isScratch()) {
722 // For reverse-iteration only, we need to stop at the head, not the scratch block
723 // possibly stashed in head->prev.
724 fNext = nullptr;
725 }
726 SkASSERT(!fNext || !fNext->isScratch());
727 }
728
729 BlockT* fBlock;
730 // Cache this before operator++ so that fBlock can be released during iteration
731 BlockT* fNext;
732 };
733
734 Item begin() const { return Item(Forward ? &fAllocator->fHead : fAllocator->fTail); }
735 Item end() const { return Item(nullptr); }
736
737private:
738 AllocatorT* fAllocator;
739};
740
742 return BlockIter<true, false>(this);
743}
745 return BlockIter<true, true>(this);
746}
748 return BlockIter<false, false>(this);
749}
751 return BlockIter<false, true>(this);
752}
753
754#endif // SkBlockAllocator_DEFINED
Align
Instance * fNext
static float prev(float f)
static void sk_asan_poison_memory_region(void const volatile *addr, size_t size)
Definition: SkASAN.h:34
static void sk_asan_unpoison_memory_region(void const volatile *addr, size_t size)
Definition: SkASAN.h:41
static constexpr size_t SkAlignTo(size_t x, size_t alignment)
Definition: SkAlign.h:33
#define SK_ABORT(message,...)
Definition: SkAssert.h:70
#define SkASSERT(cond)
Definition: SkAssert.h:116
SkBlockAllocator::Block Block
#define SK_MAKE_BITFIELD_OPS(X)
Definition: SkMacros.h:66
constexpr bool SkIsPow2(T value)
Definition: SkMath.h:51
SkDEBUGCODE(SK_SPI) SkThreadID SkGetThreadID()
#define N
Definition: beziers.cpp:19
GLenum type
bool operator!=(const Item &other) const
BlockIter(AllocatorT *allocator)
const void * ptr(int offset) const
bool release(int start, int end)
void * ptr(int offset)
bool resize(int start, int end, int deltaBytes)
void setMetadata(int value)
static constexpr size_t BlockOverhead()
size_t totalSpaceInUse() const
void reserve(size_t size, ReserveFlags flags=kNo_ReserveFlags)
static constexpr size_t Overhead()
size_t preallocUsableSpace() const
void setMetadata(int value)
void releaseBlock(Block *block)
size_t totalSize() const
void stealHeapBlocks(SkBlockAllocator *other)
const Block * findOwningBlock(const void *ptr) const
ByteRange allocate(size_t size)
int metadata() const
size_t preallocSize() const
size_t totalUsableSpace() const
static constexpr int kGrowthPolicyCount
SkBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes, size_t additionalPreallocBytes=0)
Block * findOwningBlock(const void *ptr)
static constexpr int kMaxAllocationSize
BlockIter< false, false > rblocks()
const Block * owningBlock(const void *ptr, int start) const
const Block * headBlock() const
BlockIter< true, false > blocks()
const Block * currentBlock() const
Block * owningBlock(const void *ptr, int start)
const SkBlockAllocator * allocator() const
const SkBlockAllocator * operator->() const
SkBlockAllocator * allocator()
SkSBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes)
SkBlockAllocator * operator->()
SkSBlockAllocator(GrowthPolicy policy)
static constexpr size_t kAddressAlign
FlutterSemanticsFlag flags
glong glong end
uint8_t value
static float max(float r, float g, float b)
Definition: hsl.cpp:49
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network policy
Definition: switches.h:248
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
SeparatedVector2 offset