Flutter Engine
The Flutter Engine
SkBlockAllocator.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
12
13#ifdef SK_DEBUG
14#include <vector>
15#endif
16
18 size_t additionalPreallocBytes)
19 : fTail(&fHead)
20 // Round up to the nearest max-aligned value, and then divide so that fBlockSizeIncrement
21 // can effectively fit higher byte counts in its 16 bits of storage
22 , fBlockIncrement(SkTo<uint16_t>(
23 std::min(SkAlignTo(blockIncrementBytes, kAddressAlign) / kAddressAlign,
24 (size_t) std::numeric_limits<uint16_t>::max())))
25 , fGrowthPolicy(static_cast<uint64_t>(policy))
26 , fN0((policy == GrowthPolicy::kLinear || policy == GrowthPolicy::kExponential) ? 1 : 0)
27 , fN1(1)
28 // The head block always fills remaining space from SkBlockAllocator's size, because it's
29 // inline, but can take over the specified number of bytes immediately after it.
30 , fHead(/*prev=*/nullptr, additionalPreallocBytes + BaseHeadBlockSize()) {
31 SkASSERT(fBlockIncrement >= 1);
32 SkASSERT(additionalPreallocBytes <= kMaxAllocationSize);
33}
34
35SkBlockAllocator::Block::Block(Block* prev, int allocationSize)
36 : fNext(nullptr)
37 , fPrev(prev)
38 , fSize(allocationSize)
39 , fCursor(kDataStart)
40 , fMetadata(0)
41 , fAllocatorMetadata(0) {
42 SkASSERT(allocationSize >= (int) sizeof(Block));
43 SkDEBUGCODE(fSentinel = kAssignedMarker;)
44
45 this->poisonRange(kDataStart, fSize);
46}
47
49 this->unpoisonRange(kDataStart, fSize);
50
51 SkASSERT(fSentinel == kAssignedMarker);
52 SkDEBUGCODE(fSentinel = kFreedMarker;) // FWIW
53}
54
56 // Use size_t since the sum across all blocks could exceed 'int', even though each block won't
57 size_t size = offsetof(SkBlockAllocator, fHead) + this->scratchBlockSize();
58 for (const Block* b : this->blocks()) {
59 size += b->fSize;
60 }
61 SkASSERT(size >= this->preallocSize());
62 return size;
63}
64
66 size_t size = this->scratchBlockSize();
67 if (size > 0) {
68 size -= kDataStart; // scratchBlockSize reports total block size, not usable size
69 }
70 for (const Block* b : this->blocks()) {
71 size += (b->fSize - kDataStart);
72 }
74 return size;
75}
76
78 size_t size = 0;
79 for (const Block* b : this->blocks()) {
80 size += (b->fCursor - kDataStart);
81 }
82 SkASSERT(size <= this->totalUsableSpace());
83 return size;
84}
85
87 // When in doubt, search in reverse to find an overlapping block.
88 uintptr_t ptr = reinterpret_cast<uintptr_t>(p);
89 for (Block* b : this->rblocks()) {
90 uintptr_t lowerBound = reinterpret_cast<uintptr_t>(b) + kDataStart;
91 uintptr_t upperBound = reinterpret_cast<uintptr_t>(b) + b->fSize;
92 if (lowerBound <= ptr && ptr < upperBound) {
93 SkASSERT(b->fSentinel == kAssignedMarker);
94 return b;
95 }
96 }
97 return nullptr;
98}
99
101 if (block == &fHead) {
102 // Reset the cursor of the head block so that it can be reused if it becomes the new tail
103 block->fCursor = kDataStart;
104 block->fMetadata = 0;
105 block->poisonRange(kDataStart, block->fSize);
106 // Unlike in reset(), we don't set the head's next block to null because there are
107 // potentially heap-allocated blocks that are still connected to it.
108 } else {
109 SkASSERT(block->fPrev);
110 block->fPrev->fNext = block->fNext;
111 if (block->fNext) {
112 SkASSERT(fTail != block);
113 block->fNext->fPrev = block->fPrev;
114 } else {
115 SkASSERT(fTail == block);
116 fTail = block->fPrev;
117 }
118
119 // The released block becomes the new scratch block (if it's bigger), or delete it
120 if (this->scratchBlockSize() < block->fSize) {
121 SkASSERT(block != fHead.fPrev); // shouldn't already be the scratch block
122 if (fHead.fPrev) {
123 delete fHead.fPrev;
124 }
125 block->markAsScratch();
126 fHead.fPrev = block;
127 } else {
128 delete block;
129 }
130 }
131
132 // Decrement growth policy (opposite of addBlock()'s increment operations)
133 GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
134 if (fN0 > 0 && (fN1 > 1 || gp == GrowthPolicy::kFibonacci)) {
135 SkASSERT(gp != GrowthPolicy::kFixed); // fixed never needs undoing, fN0 always is 0
136 if (gp == GrowthPolicy::kLinear) {
137 fN1 = fN1 - fN0;
138 } else if (gp == GrowthPolicy::kFibonacci) {
139 // Subtract n0 from n1 to get the prior 2 terms in the fibonacci sequence
140 int temp = fN1 - fN0; // yields prior fN0
141 fN1 = fN1 - temp; // yields prior fN1
142 fN0 = temp;
143 } else {
145 // Divide by 2 to undo the 2N update from addBlock
146 fN1 = fN1 >> 1;
147 fN0 = fN1;
148 }
149 }
150
151 SkASSERT(fN1 >= 1 && fN0 >= 0);
152}
153
155 Block* toSteal = other->fHead.fNext;
156 if (toSteal) {
157 // The other's next block connects back to this allocator's current tail, and its new tail
158 // becomes the end of other's block linked list.
159 SkASSERT(other->fTail != &other->fHead);
160 toSteal->fPrev = fTail;
161 fTail->fNext = toSteal;
162 fTail = other->fTail;
163 // The other allocator becomes just its inline head block
164 other->fTail = &other->fHead;
165 other->fHead.fNext = nullptr;
166 } // else no block to steal
167}
168
170 for (Block* b : this->rblocks()) {
171 if (b == &fHead) {
172 // Reset metadata and cursor, tail points to the head block again
173 fTail = b;
174 b->fNext = nullptr;
175 b->fCursor = kDataStart;
176 b->fMetadata = 0;
177 // For reset(), but NOT releaseBlock(), the head allocatorMetadata and scratch block
178 // are reset/destroyed.
179 b->fAllocatorMetadata = 0;
180 b->poisonRange(kDataStart, b->fSize);
181 this->resetScratchSpace();
182 } else {
183 delete b;
184 }
185 }
186 SkASSERT(fTail == &fHead && fHead.fNext == nullptr && fHead.fPrev == nullptr &&
187 fHead.metadata() == 0 && fHead.fCursor == kDataStart);
188
189 GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
190 fN0 = (gp == GrowthPolicy::kLinear || gp == GrowthPolicy::kExponential) ? 1 : 0;
191 fN1 = 1;
192}
193
195 if (fHead.fPrev) {
196 delete fHead.fPrev;
197 fHead.fPrev = nullptr;
198 }
199}
200
201void SkBlockAllocator::addBlock(int minSize, int maxSize) {
202 SkASSERT(minSize > (int) sizeof(Block) && minSize <= maxSize);
203
204 // Max positive value for uint:23 storage (decltype(fN0) picks up uint64_t, not uint:23).
205 static constexpr int kMaxN = (1 << 23) - 1;
206 static_assert(2 * kMaxN <= std::numeric_limits<int32_t>::max()); // Growth policy won't overflow
207
208 auto alignAllocSize = [](int size) {
209 // Round to a nice boundary since the block isn't maxing out:
210 // if allocSize > 32K, aligns on 4K boundary otherwise aligns on max_align_t, to play
211 // nicely with jeMalloc (from SkArenaAlloc).
212 int mask = size > (1 << 15) ? ((1 << 12) - 1) : (kAddressAlign - 1);
213 return (size + mask) & ~mask;
214 };
215
216 int allocSize;
217 void* mem = nullptr;
218 if (this->scratchBlockSize() >= minSize) {
219 // Activate the scratch block instead of making a new block
220 SkASSERT(fHead.fPrev->isScratch());
221 allocSize = fHead.fPrev->fSize;
222 mem = fHead.fPrev;
223 fHead.fPrev = nullptr;
224 } else if (minSize < maxSize) {
225 // Calculate the 'next' size per growth policy sequence
226 GrowthPolicy gp = static_cast<GrowthPolicy>(fGrowthPolicy);
227 int nextN1 = fN0 + fN1;
228 int nextN0;
229 if (gp == GrowthPolicy::kFixed || gp == GrowthPolicy::kLinear) {
230 nextN0 = fN0;
231 } else if (gp == GrowthPolicy::kFibonacci) {
232 nextN0 = fN1;
233 } else {
235 nextN0 = nextN1;
236 }
237 fN0 = std::min(kMaxN, nextN0);
238 fN1 = std::min(kMaxN, nextN1);
239
240 // However, must guard against overflow here, since all the size-based asserts prevented
241 // alignment/addition overflows, while multiplication requires 2x bits instead of x+1.
242 int sizeIncrement = fBlockIncrement * kAddressAlign;
243 if (maxSize / sizeIncrement < nextN1) {
244 // The growth policy would overflow, so use the max. We've already confirmed that
245 // maxSize will be sufficient for the requested minimumSize
246 allocSize = maxSize;
247 } else {
248 allocSize = std::min(alignAllocSize(std::max(minSize, sizeIncrement * nextN1)),
249 maxSize);
250 }
251 } else {
252 SkASSERT(minSize == maxSize);
253 // Still align on a nice boundary, no max clamping since that would just undo the alignment
254 allocSize = alignAllocSize(minSize);
255 }
256
257 // Create new block and append to the linked list of blocks in this allocator
258 if (!mem) {
259 mem = operator new(allocSize);
260 }
261 fTail->fNext = new (mem) Block(fTail, allocSize);
262 fTail = fTail->fNext;
263}
264
265#ifdef SK_DEBUG
266void SkBlockAllocator::validate() const {
267 std::vector<const Block*> blocks;
268 const Block* prev = nullptr;
269 for (const Block* block : this->blocks()) {
270 blocks.push_back(block);
271
272 SkASSERT(kAssignedMarker == block->fSentinel);
273 if (block == &fHead) {
274 // The head blocks' fPrev may be non-null if it holds a scratch block, but that's not
275 // considered part of the linked list
276 SkASSERT(!prev && (!fHead.fPrev || fHead.fPrev->isScratch()));
277 } else {
278 SkASSERT(prev == block->fPrev);
279 }
280 if (prev) {
281 SkASSERT(prev->fNext == block);
282 }
283
284 SkASSERT(block->fSize >= (int) sizeof(Block));
285 SkASSERT(block->fCursor >= kDataStart);
286 SkASSERT(block->fCursor <= block->fSize);
287
288 prev = block;
289 }
290 SkASSERT(prev == fTail);
291 SkASSERT(!blocks.empty());
292 SkASSERT(blocks[0] == &fHead);
293
294 // Confirm reverse iteration matches forward iteration
295 size_t j = blocks.size();
296 for (const Block* b : this->rblocks()) {
297 SkASSERT(b == blocks[j - 1]);
298 j--;
299 }
300 SkASSERT(j == 0);
301}
302#endif
Instance * fNext
static float prev(float f)
static constexpr size_t SkAlignTo(size_t x, size_t alignment)
Definition: SkAlign.h:33
#define SkASSERT(cond)
Definition: SkAssert.h:116
SkBlockAllocator::Block Block
SkDEBUGCODE(SK_SPI) SkThreadID SkGetThreadID()
constexpr D SkTo(S s)
Definition: SkTo.h:16
size_t totalSpaceInUse() const
size_t preallocUsableSpace() const
void releaseBlock(Block *block)
size_t totalSize() const
void stealHeapBlocks(SkBlockAllocator *other)
size_t preallocSize() const
size_t totalUsableSpace() const
SkBlockAllocator(GrowthPolicy policy, size_t blockIncrementBytes, size_t additionalPreallocBytes=0)
Block * findOwningBlock(const void *ptr)
static constexpr int kMaxAllocationSize
BlockIter< false, false > rblocks()
BlockIter< true, false > blocks()
static bool b
if(end==-1)
static float max(float r, float g, float b)
Definition: hsl.cpp:49
static float min(float r, float g, float b)
Definition: hsl.cpp:48
static constexpr skcms_TransferFunction kLinear
Definition: SkColorSpace.h:51
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network policy
Definition: switches.h:248
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
Definition: ref_ptr.h:256