Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
Classes | Typedefs | Functions
SkBlockAllocatorTest.cpp File Reference
#include "include/core/SkTypes.h"
#include "include/private/base/SkDebug.h"
#include "src/base/SkBlockAllocator.h"
#include "tests/Test.h"
#include <cstdint>
#include <cstring>
#include <new>
#include <vector>

Go to the source code of this file.

Classes

class  BlockAllocatorTestAccess
 
struct  TestMeta
 
struct  TestMetaBig
 

Typedefs

using Block = SkBlockAllocator::Block
 
using GrowthPolicy = SkBlockAllocator::GrowthPolicy
 

Functions

template<size_t N>
static int block_count (const SkSBlockAllocator< N > &pool)
 
template<size_t N>
static Blockget_block (SkSBlockAllocator< N > &pool, int blockIndex)
 
template<size_t N>
static size_t total_size (SkSBlockAllocator< N > &pool)
 
template<size_t N>
static size_t add_block (SkSBlockAllocator< N > &pool)
 
template<size_t N>
static void * alloc_byte (SkSBlockAllocator< N > &pool)
 
 DEF_TEST (SkBlockAllocatorPreallocSize, r)
 
 DEF_TEST (SkBlockAllocatorAlloc, r)
 
 DEF_TEST (SkBlockAllocatorResize, r)
 
 DEF_TEST (SkBlockAllocatorRelease, r)
 
 DEF_TEST (SkBlockAllocatorRewind, r)
 
 DEF_TEST (SkBlockAllocatorGrowthPolicy, r)
 
 DEF_TEST (SkBlockAllocatorReset, r)
 
 DEF_TEST (SkBlockAllocatorReleaseBlock, r)
 
 DEF_TEST (SkBlockAllocatorIterateAndRelease, r)
 
 DEF_TEST (SkBlockAllocatorScratchBlockReserve, r)
 
 DEF_TEST (SkBlockAllocatorStealBlocks, r)
 
 DEF_TEST (SkBlockAllocatorMetadata, r)
 
 DEF_TEST (SkBlockAllocatorAllocatorMetadata, r)
 
template<size_t Align, size_t Padding>
static void run_owning_block_test (skiatest::Reporter *r, SkBlockAllocator *pool)
 
template<size_t Padding>
static void run_owning_block_tests (skiatest::Reporter *r, SkBlockAllocator *pool)
 
 DEF_TEST (SkBlockAllocatorOwningBlock, r)
 

Typedef Documentation

◆ Block

Definition at line 18 of file SkBlockAllocatorTest.cpp.

◆ GrowthPolicy

Definition at line 19 of file SkBlockAllocatorTest.cpp.

Function Documentation

◆ add_block()

template<size_t N>
static size_t add_block ( SkSBlockAllocator< N > &  pool)
static

Definition at line 65 of file SkBlockAllocatorTest.cpp.

65 {
66 size_t currentSize = total_size(pool);
67 SkBlockAllocator::Block* current = pool->currentBlock();
68 while(pool->currentBlock() == current) {
69 pool->template allocate<4>(pool->preallocSize() / 2);
70 }
71 return total_size(pool) - currentSize;
72}
AutoreleasePool pool
static size_t total_size(SkSBlockAllocator< N > &pool)

◆ alloc_byte()

template<size_t N>
static void * alloc_byte ( SkSBlockAllocator< N > &  pool)
static

Definition at line 75 of file SkBlockAllocatorTest.cpp.

75 {
76 auto br = pool->template allocate<1>(1);
77 return br.fBlock->ptr(br.fAlignedOffset);
78}

◆ block_count()

template<size_t N>
static int block_count ( const SkSBlockAllocator< N > &  pool)
static

Definition at line 31 of file SkBlockAllocatorTest.cpp.

31 {
32 int ct = 0;
33 for (const Block* b : pool->blocks()) {
34 (void) b;
35 ct++;
36 }
37 return ct;
38}
static bool b

◆ DEF_TEST() [1/14]

DEF_TEST ( SkBlockAllocatorAlloc  ,
 
)

Definition at line 106 of file SkBlockAllocatorTest.cpp.

106 {
108 SkDEBUGCODE(pool->validate();)
109
110 // Assumes the previous pointer was in the same block
111 auto validate_ptr = [&](int align, int size,
114 uintptr_t pt = reinterpret_cast<uintptr_t>(br.fBlock->ptr(br.fAlignedOffset));
115 // Matches the requested align
116 REPORTER_ASSERT(r, pt % align == 0);
117 // And large enough
118 REPORTER_ASSERT(r, br.fEnd - br.fAlignedOffset >= size);
119 // And has enough padding for alignment
120 REPORTER_ASSERT(r, br.fAlignedOffset - br.fStart >= 0);
121 REPORTER_ASSERT(r, br.fAlignedOffset - br.fStart <= align - 1);
122 // And block of the returned struct is the current block of the allocator
123 REPORTER_ASSERT(r, pool->currentBlock() == br.fBlock);
124
125 // And make sure that we're past the required end of the previous allocation
126 if (prevBR) {
127 uintptr_t prevEnd =
128 reinterpret_cast<uintptr_t>(prevBR->fBlock->ptr(prevBR->fEnd - 1));
129 REPORTER_ASSERT(r, pt > prevEnd);
130 }
131
132 // And make sure that the entire byte range is safe to write into (excluding the dead space
133 // between "start" and "aligned offset," which is just padding and is left poisoned)
134 std::memset(br.fBlock->ptr(br.fAlignedOffset), 0xFF, br.fEnd - br.fAlignedOffset);
135 };
136
137 auto p1 = pool->allocate<1>(14);
138 validate_ptr(1, 14, p1, nullptr);
139
140 auto p2 = pool->allocate<2>(24);
141 validate_ptr(2, 24, p2, &p1);
142
143 auto p4 = pool->allocate<4>(28);
144 validate_ptr(4, 28, p4, &p2);
145
146 auto p8 = pool->allocate<8>(40);
147 validate_ptr(8, 40, p8, &p4);
148
149 auto p16 = pool->allocate<16>(64);
150 validate_ptr(16, 64, p16, &p8);
151
152 auto p32 = pool->allocate<32>(96);
153 validate_ptr(32, 96, p32, &p16);
154
155 // All of these allocations should be in the head block
156 REPORTER_ASSERT(r, total_size(pool) == pool->preallocSize());
157 SkDEBUGCODE(pool->validate();)
158
159 // Requesting an allocation of avail() should not make a new block
160 size_t avail = pool->currentBlock()->avail<4>();
161 auto pAvail = pool->allocate<4>(avail);
162 validate_ptr(4, avail, pAvail, &p32);
163
164 // Remaining should be less than the alignment that was requested, and then
165 // the next allocation will make a new block
166 REPORTER_ASSERT(r, pool->currentBlock()->avail<4>() < 4);
167 auto pNextBlock = pool->allocate<4>(4);
168 validate_ptr(4, 4, pNextBlock, nullptr);
169 REPORTER_ASSERT(r, total_size(pool) > pool->preallocSize());
170
171 // Allocating more than avail() makes an another block
172 size_t currentSize = total_size(pool);
173 size_t bigRequest = pool->currentBlock()->avail<4>() * 2;
174 auto pTooBig = pool->allocate<4>(bigRequest);
175 validate_ptr(4, bigRequest, pTooBig, nullptr);
176 REPORTER_ASSERT(r, total_size(pool) > currentSize);
177
178 // Allocating more than the default growth policy (1024 in this case), will fulfill the request
179 REPORTER_ASSERT(r, total_size(pool) - currentSize < 4096);
180 currentSize = total_size(pool);
181 auto pReallyTooBig = pool->allocate<4>(4096);
182 validate_ptr(4, 4096, pReallyTooBig, nullptr);
183 REPORTER_ASSERT(r, total_size(pool) >= currentSize + 4096);
184 SkDEBUGCODE(pool->validate();)
185}
#define SkDEBUGCODE(...)
Definition SkDebug.h:23
#define REPORTER_ASSERT(r, cond,...)
Definition Test.h:286
void * ptr(int offset)

◆ DEF_TEST() [2/14]

DEF_TEST ( SkBlockAllocatorAllocatorMetadata  ,
 
)

Definition at line 606 of file SkBlockAllocatorTest.cpp.

606 {
608 SkDEBUGCODE(pool->validate();)
609
610 REPORTER_ASSERT(r, pool->metadata() == 0); // initial value
611
612 pool->setMetadata(4);
613 REPORTER_ASSERT(r, pool->metadata() == 4);
614
615 // Releasing the head block doesn't change the allocator's metadata (even though that's where
616 // it is stored).
617 pool->releaseBlock(pool->headBlock());
618 REPORTER_ASSERT(r, pool->metadata() == 4);
619
620 // But resetting the whole allocator brings things back to as if it were newly constructed
621 pool->reset();
622 REPORTER_ASSERT(r, pool->metadata() == 0);
623}

◆ DEF_TEST() [3/14]

DEF_TEST ( SkBlockAllocatorGrowthPolicy  ,
 
)

Definition at line 297 of file SkBlockAllocatorTest.cpp.

297 {
298 static constexpr int kInitSize = 128;
299 static constexpr int kBlockCount = 5;
300 static constexpr size_t kExpectedSizes[SkBlockAllocator::kGrowthPolicyCount][kBlockCount] = {
301 // kFixed -> kInitSize per block
302 { kInitSize, kInitSize, kInitSize, kInitSize, kInitSize },
303 // kLinear -> (block ct + 1) * kInitSize for next block
304 { kInitSize, 2 * kInitSize, 3 * kInitSize, 4 * kInitSize, 5 * kInitSize },
305 // kFibonacci -> 1, 1, 2, 3, 5 * kInitSize for the blocks
306 { kInitSize, kInitSize, 2 * kInitSize, 3 * kInitSize, 5 * kInitSize },
307 // kExponential -> 1, 2, 4, 8, 16 * kInitSize for the blocks
308 { kInitSize, 2 * kInitSize, 4 * kInitSize, 8 * kInitSize, 16 * kInitSize },
309 };
310
311 for (int gp = 0; gp < SkBlockAllocator::kGrowthPolicyCount; ++gp) {
313 SkDEBUGCODE(pool->validate();)
314
315 REPORTER_ASSERT(r, kExpectedSizes[gp][0] == total_size(pool));
316 for (int i = 1; i < kBlockCount; ++i) {
317 REPORTER_ASSERT(r, kExpectedSizes[gp][i] == add_block(pool));
318 }
319
320 SkDEBUGCODE(pool->validate();)
321 }
322}
SkBlockAllocator::GrowthPolicy GrowthPolicy
static size_t add_block(SkSBlockAllocator< N > &pool)
static constexpr int kGrowthPolicyCount

◆ DEF_TEST() [4/14]

DEF_TEST ( SkBlockAllocatorIterateAndRelease  ,
 
)

Definition at line 418 of file SkBlockAllocatorTest.cpp.

418 {
420
421 pool->headBlock()->setMetadata(1);
425
426 // Loop forward and release the blocks
427 int releaseCount = 0;
428 for (auto* b : pool->blocks()) {
429 pool->releaseBlock(b);
430 releaseCount++;
431 }
432 REPORTER_ASSERT(r, releaseCount == 4);
433 // pool should have just the head block, but was reset
434 REPORTER_ASSERT(r, pool->headBlock()->metadata() == 0);
436
437 // Add more blocks
438 pool->headBlock()->setMetadata(1);
442
443 // Loop in reverse and release the blocks
444 releaseCount = 0;
445 for (auto* b : pool->rblocks()) {
446 pool->releaseBlock(b);
447 releaseCount++;
448 }
449 REPORTER_ASSERT(r, releaseCount == 4);
450 // pool should have just the head block, but was reset
451 REPORTER_ASSERT(r, pool->headBlock()->metadata() == 0);
453}
static int block_count(const SkSBlockAllocator< N > &pool)

◆ DEF_TEST() [5/14]

DEF_TEST ( SkBlockAllocatorMetadata  ,
 
)

Definition at line 568 of file SkBlockAllocatorTest.cpp.

568 {
570 SkDEBUGCODE(pool->validate();)
571
572 // Allocation where alignment of user data > alignment of metadata
573 SkASSERT(alignof(TestMeta) < 16);
574 auto p1 = pool->allocate<16, sizeof(TestMeta)>(16);
575 SkDEBUGCODE(pool->validate();)
576
577 REPORTER_ASSERT(r, p1.fAlignedOffset - p1.fStart >= (int) sizeof(TestMeta));
578 TestMeta* meta = static_cast<TestMeta*>(p1.fBlock->ptr(p1.fAlignedOffset - sizeof(TestMeta)));
579 // Confirm alignment for both pointers
580 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(meta) % alignof(TestMeta) == 0);
581 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(p1.fBlock->ptr(p1.fAlignedOffset)) % 16 == 0);
582 // Access fields to make sure 'meta' matches compilers expectations...
583 meta->fX1 = 2;
584 meta->fX2 = 5;
585
586 // Repeat, but for metadata that has a larger alignment than the allocation
587 SkASSERT(alignof(TestMetaBig) == 32);
588 auto p2 = pool->allocate<alignof(TestMetaBig), sizeof(TestMetaBig)>(16);
589 SkDEBUGCODE(pool->validate();)
590
591 REPORTER_ASSERT(r, p2.fAlignedOffset - p2.fStart >= (int) sizeof(TestMetaBig));
592 TestMetaBig* metaBig = static_cast<TestMetaBig*>(
593 p2.fBlock->ptr(p2.fAlignedOffset - sizeof(TestMetaBig)));
594 // Confirm alignment for both pointers
595 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(metaBig) % alignof(TestMetaBig) == 0);
596 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(p2.fBlock->ptr(p2.fAlignedOffset)) % 16 == 0);
597 // Access fields
598 metaBig->fX1 = 3;
599 metaBig->fX2 = 6;
600
601 // Ensure metadata values persist after allocations
602 REPORTER_ASSERT(r, meta->fX1 == 2 && meta->fX2 == 5);
603 REPORTER_ASSERT(r, metaBig->fX1 == 3 && metaBig->fX2 == 6);
604}
#define SkASSERT(cond)
Definition SkAssert.h:116

◆ DEF_TEST() [6/14]

DEF_TEST ( SkBlockAllocatorOwningBlock  ,
 
)

Definition at line 654 of file SkBlockAllocatorTest.cpp.

654 {
656 SkDEBUGCODE(pool->validate();)
657
658 run_owning_block_tests<1>(r, pool.allocator());
659 run_owning_block_tests<2>(r, pool.allocator());
660 run_owning_block_tests<4>(r, pool.allocator());
661 run_owning_block_tests<8>(r, pool.allocator());
662 run_owning_block_tests<16>(r, pool.allocator());
663 run_owning_block_tests<32>(r, pool.allocator());
664
665 // And some weird numbers
666 run_owning_block_tests<3>(r, pool.allocator());
667 run_owning_block_tests<9>(r, pool.allocator());
668 run_owning_block_tests<17>(r, pool.allocator());
669}

◆ DEF_TEST() [7/14]

DEF_TEST ( SkBlockAllocatorPreallocSize  ,
 
)

Definition at line 80 of file SkBlockAllocatorTest.cpp.

80 {
81 // Tests stack/member initialization, option #1 described in doc
82 SkBlockAllocator stack{GrowthPolicy::kFixed, 2048};
83 SkDEBUGCODE(stack.validate();)
84
85 REPORTER_ASSERT(r, stack.preallocSize() == sizeof(SkBlockAllocator));
86 REPORTER_ASSERT(r, stack.preallocUsableSpace() == (size_t) stack.currentBlock()->avail());
87
88 // Tests placement new initialization to increase head block size, option #2
89 void* mem = operator new(1024);
90 SkBlockAllocator* placement = new (mem) SkBlockAllocator(GrowthPolicy::kLinear, 1024,
91 1024 - sizeof(SkBlockAllocator));
92 REPORTER_ASSERT(r, placement->preallocSize() == 1024);
93 REPORTER_ASSERT(r, placement->preallocUsableSpace() < 1024 &&
94 placement->preallocUsableSpace() >= (1024 - sizeof(SkBlockAllocator)));
95 placement->~SkBlockAllocator();
96 operator delete(mem);
97
98 // Tests inline increased preallocation, option #3
100 SkDEBUGCODE(inlined->validate();)
101 REPORTER_ASSERT(r, inlined->preallocSize() == 2048);
102 REPORTER_ASSERT(r, inlined->preallocUsableSpace() < 2048 &&
103 inlined->preallocUsableSpace() >= (2048 - sizeof(SkBlockAllocator)));
104}
size_t preallocUsableSpace() const
size_t preallocSize() const

◆ DEF_TEST() [8/14]

DEF_TEST ( SkBlockAllocatorRelease  ,
 
)

Definition at line 240 of file SkBlockAllocatorTest.cpp.

240 {
242 SkDEBUGCODE(pool->validate();)
243
244 // Successful allocate and release
245 auto p = pool->allocate<8>(32);
246 REPORTER_ASSERT(r, pool->currentBlock()->release(p.fStart, p.fEnd));
247 // Ensure the above release actually means the next allocation reuses the same space
248 auto p2 = pool->allocate<8>(32);
249 REPORTER_ASSERT(r, p.fStart == p2.fStart);
250
251 // Confirm that 'p2' cannot be released if another allocation came after it
252 auto p3 = pool->allocate<8>(64);
253 (void) p3;
254 REPORTER_ASSERT(r, !p2.fBlock->release(p2.fStart, p2.fEnd));
255
256 // Confirm that 'p4' can be released if 'p5' is released first, and confirm that 'p2' and 'p3'
257 // can be released simultaneously (equivalent to 'p3' then 'p2').
258 auto p4 = pool->allocate<8>(16);
259 auto p5 = pool->allocate<8>(96);
260 REPORTER_ASSERT(r, p5.fBlock->release(p5.fStart, p5.fEnd));
261 REPORTER_ASSERT(r, p4.fBlock->release(p4.fStart, p4.fEnd));
262 REPORTER_ASSERT(r, p2.fBlock->release(p2.fStart, p3.fEnd));
263
264 // And confirm that passing in the wrong size for the allocation fails
265 p = pool->allocate<8>(32);
266 REPORTER_ASSERT(r, !p.fBlock->release(p.fStart, p.fEnd - 16));
267 REPORTER_ASSERT(r, !p.fBlock->release(p.fStart, p.fEnd + 16));
268 REPORTER_ASSERT(r, p.fBlock->release(p.fStart, p.fEnd));
269 SkDEBUGCODE(pool->validate();)
270}

◆ DEF_TEST() [9/14]

DEF_TEST ( SkBlockAllocatorReleaseBlock  ,
 
)

Definition at line 357 of file SkBlockAllocatorTest.cpp.

357 {
358 // This loops over all growth policies to make sure that the incremental releases update the
359 // sequence correctly for each policy.
360 for (int gp = 0; gp < SkBlockAllocator::kGrowthPolicyCount; ++gp) {
362 SkDEBUGCODE(pool->validate();)
363
364 void* firstAlloc = alloc_byte(pool);
365
366 size_t b1Size = total_size(pool);
367 size_t b2Size = add_block(pool);
368 size_t b3Size = add_block(pool);
369 size_t b4Size = add_block(pool);
370 SkDEBUGCODE(pool->validate();)
371
372 get_block(pool, 0)->setMetadata(1);
373 get_block(pool, 1)->setMetadata(2);
374 get_block(pool, 2)->setMetadata(3);
375 get_block(pool, 3)->setMetadata(4);
376
377 // Remove the 3 added blocks, but always remove the i = 1 to test intermediate removal (and
378 // on the last iteration, will test tail removal).
379 REPORTER_ASSERT(r, total_size(pool) == b1Size + b2Size + b3Size + b4Size);
380 pool->releaseBlock(get_block(pool, 1));
382 REPORTER_ASSERT(r, get_block(pool, 1)->metadata() == 3);
383 REPORTER_ASSERT(r, total_size(pool) == b1Size + b3Size + b4Size);
384
385 pool->releaseBlock(get_block(pool, 1));
387 REPORTER_ASSERT(r, get_block(pool, 1)->metadata() == 4);
388 REPORTER_ASSERT(r, total_size(pool) == b1Size + b4Size);
389
390 pool->releaseBlock(get_block(pool, 1));
392 REPORTER_ASSERT(r, total_size(pool) == b1Size);
393
394 // Since we're back to just the head block, if we add a new block, the growth policy should
395 // match the original sequence instead of continuing with "b5Size'"
396 pool->resetScratchSpace();
397 size_t size = add_block(pool);
398 REPORTER_ASSERT(r, size == b2Size);
399 pool->releaseBlock(get_block(pool, 1));
400
401 // Explicitly release the head block and confirm it's reset
402 pool->releaseBlock(get_block(pool, 0));
403 REPORTER_ASSERT(r, total_size(pool) == pool->preallocSize());
405 REPORTER_ASSERT(r, firstAlloc == alloc_byte(pool));
406 REPORTER_ASSERT(r, get_block(pool, 0)->metadata() == 0); // metadata reset too
407
408 // Confirm that if we have > 1 block, but release the head block we can still access the
409 // others
412 pool->releaseBlock(get_block(pool, 0));
414 SkDEBUGCODE(pool->validate();)
415 }
416}
static void * alloc_byte(SkSBlockAllocator< N > &pool)
static Block * get_block(SkSBlockAllocator< N > &pool, int blockIndex)
void setMetadata(int value)
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259

◆ DEF_TEST() [10/14]

DEF_TEST ( SkBlockAllocatorReset  ,
 
)

Definition at line 324 of file SkBlockAllocatorTest.cpp.

324 {
325 static constexpr int kBlockIncrement = 1024;
326
327 SkSBlockAllocator<kBlockIncrement> pool{GrowthPolicy::kLinear};
328 SkDEBUGCODE(pool->validate();)
329
330 void* firstAlloc = alloc_byte(pool);
331
332 // Add several blocks
336 SkDEBUGCODE(pool->validate();)
337
338 REPORTER_ASSERT(r, block_count(pool) == 4); // 3 added plus the implicit head
339
340 get_block(pool, 0)->setMetadata(2);
341
342 // Reset and confirm that there's only one block, a new allocation matches 'firstAlloc' again,
343 // and new blocks are sized based on a reset growth policy.
344 pool->reset();
345 SkDEBUGCODE(pool->validate();)
346
348 REPORTER_ASSERT(r, pool->preallocSize() == pool->totalSize());
349 REPORTER_ASSERT(r, get_block(pool, 0)->metadata() == 0);
350
351 REPORTER_ASSERT(r, firstAlloc == alloc_byte(pool));
352 REPORTER_ASSERT(r, 2 * kBlockIncrement == add_block(pool));
353 REPORTER_ASSERT(r, 3 * kBlockIncrement == add_block(pool));
354 SkDEBUGCODE(pool->validate();)
355}

◆ DEF_TEST() [11/14]

DEF_TEST ( SkBlockAllocatorResize  ,
 
)

Definition at line 187 of file SkBlockAllocatorTest.cpp.

187 {
189 SkDEBUGCODE(pool->validate();)
190
191 // Fixed resize from 16 to 32
192 SkBlockAllocator::ByteRange p = pool->allocate<4>(16);
193 REPORTER_ASSERT(r, p.fBlock->avail<4>() > 16);
194 REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, 16));
195 p.fEnd += 16;
196
197 std::memset(p.fBlock->ptr(p.fAlignedOffset), 0x11, p.fEnd - p.fAlignedOffset);
198
199 // Subsequent allocation is 32 bytes ahead of 'p' now, and 'p' cannot be resized further.
200 auto pNext = pool->allocate<4>(16);
201 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(pNext.fAlignedOffset)) -
202 reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(p.fAlignedOffset)) == 32);
203 REPORTER_ASSERT(r, p.fBlock == pNext.fBlock);
204 REPORTER_ASSERT(r, !p.fBlock->resize(p.fStart, p.fEnd, 48));
205
206 // Confirm that releasing pNext allows 'p' to be resized, and that it can be resized up to avail
207 REPORTER_ASSERT(r, p.fBlock->release(pNext.fStart, pNext.fEnd));
208 int fillBlock = p.fBlock->avail<4>();
209 REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, fillBlock));
210 p.fEnd += fillBlock;
211
212 std::memset(p.fBlock->ptr(p.fAlignedOffset), 0x22, p.fEnd - p.fAlignedOffset);
213
214 // Confirm that resizing when there's not enough room fails
215 REPORTER_ASSERT(r, p.fBlock->avail<4>() < fillBlock);
216 REPORTER_ASSERT(r, !p.fBlock->resize(p.fStart, p.fEnd, fillBlock));
217
218 // Confirm that we can shrink 'p' back to 32 bytes and then further allocate again
219 int shrinkTo32 = p.fStart - p.fEnd + 32;
220 REPORTER_ASSERT(r, p.fBlock->resize(p.fStart, p.fEnd, shrinkTo32));
221 p.fEnd += shrinkTo32;
222 REPORTER_ASSERT(r, p.fEnd - p.fStart == 32);
223
224 std::memset(p.fBlock->ptr(p.fAlignedOffset), 0x33, p.fEnd - p.fAlignedOffset);
225
226 pNext = pool->allocate<4>(16);
227 REPORTER_ASSERT(r, reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(pNext.fAlignedOffset)) -
228 reinterpret_cast<uintptr_t>(pNext.fBlock->ptr(p.fAlignedOffset)) == 32);
229 SkDEBUGCODE(pool->validate();)
230
231 // Confirm that we can't shrink past the start of the allocation, but we can shrink it to 0
232 int shrinkTo0 = pNext.fStart - pNext.fEnd;
233#ifndef SK_DEBUG
234 // Only test for false on release builds; a negative size should assert on debug builds
235 REPORTER_ASSERT(r, !pNext.fBlock->resize(pNext.fStart, pNext.fEnd, shrinkTo0 - 1));
236#endif
237 REPORTER_ASSERT(r, pNext.fBlock->resize(pNext.fStart, pNext.fEnd, shrinkTo0));
238}

◆ DEF_TEST() [12/14]

DEF_TEST ( SkBlockAllocatorRewind  ,
 
)

Definition at line 272 of file SkBlockAllocatorTest.cpp.

272 {
273 // Confirm that a bunch of allocations and then releases in stack order fully goes back to the
274 // start of the block (i.e. unwinds the entire stack, and not just the last cursor position)
276 SkDEBUGCODE(pool->validate();)
277
278 std::vector<SkBlockAllocator::ByteRange> ptrs;
279 ptrs.reserve(32); // silence clang-tidy performance warning
280 for (int i = 0; i < 32; ++i) {
281 ptrs.push_back(pool->allocate<4>(16));
282 }
283
284 // Release everything in reverse order
285 SkDEBUGCODE(pool->validate();)
286 for (int i = 31; i >= 0; --i) {
287 auto br = ptrs[i];
288 REPORTER_ASSERT(r, br.fBlock->release(br.fStart, br.fEnd));
289 }
290
291 // If correct, we've rewound all the way back to the start of the block, so a new allocation
292 // will have the same location as ptrs[0]
293 SkDEBUGCODE(pool->validate();)
294 REPORTER_ASSERT(r, pool->allocate<4>(16).fStart == ptrs[0].fStart);
295}

◆ DEF_TEST() [13/14]

DEF_TEST ( SkBlockAllocatorScratchBlockReserve  ,
 
)

Definition at line 455 of file SkBlockAllocatorTest.cpp.

455 {
457
458 size_t added = add_block(pool);
460 size_t total = pool->totalSize();
461 pool->releaseBlock(pool->currentBlock());
462
463 // Total size shouldn't have changed, the released block should become scratch
464 REPORTER_ASSERT(r, pool->totalSize() == total);
466
467 // But a reset definitely deletes any scratch block
468 pool->reset();
470
471 // Reserving more than what's available adds a scratch block, and current block remains avail.
472 size_t avail = pool->currentBlock()->avail();
473 size_t reserve = avail + 1;
474 pool->reserve(reserve);
475 REPORTER_ASSERT(r, (size_t) pool->currentBlock()->avail() == avail);
476 // And rounds up to the fixed size of this pool's growth policy
479
480 // Allocating more than avail activates the scratch block (so totalSize doesn't change)
481 size_t preAllocTotalSize = pool->totalSize();
482 pool->allocate<1>(avail + 1);
484 REPORTER_ASSERT(r, pool->totalSize() == preAllocTotalSize);
485
486 // When reserving less than what's still available in the current block, no scratch block is
487 // added.
488 pool->reserve(pool->currentBlock()->avail());
490
491 // Unless checking available bytes is disabled
492 pool->reserve(pool->currentBlock()->avail(), SkBlockAllocator::kIgnoreExistingBytes_Flag);
494
495 // If kIgnoreGrowthPolicy is specified, the new scratch block should not have been updated to
496 // follow the size (which in this case is a fixed 256 bytes per block).
497 pool->resetScratchSpace();
501
502 // When requesting an allocation larger than the current block and the scratch block, a new
503 // block is added, and the scratch block remains scratch.
504 SkBlockAllocator::Block* oldTail = pool->currentBlock();
505 avail = oldTail->avail();
506 size_t scratchAvail = 2 * avail;
507 pool->reserve(scratchAvail);
509
510 // This allocation request is higher than oldTail's available, and the scratch size so we
511 // should add a new block and scratch size should stay the same.
513 pool->allocate<1>(scratchAvail + 1);
514 REPORTER_ASSERT(r, pool->currentBlock() != oldTail);
516}
static size_t ScratchBlockSize(SkSBlockAllocator< N > &pool)

◆ DEF_TEST() [14/14]

DEF_TEST ( SkBlockAllocatorStealBlocks  ,
 
)

Definition at line 518 of file SkBlockAllocatorTest.cpp.

518 {
521
522 add_block(poolA);
523 add_block(poolA);
524 add_block(poolA);
525
526 add_block(poolB);
527 add_block(poolB);
528
529 char* bAlloc = (char*) alloc_byte(poolB);
530 *bAlloc = 't';
531
532 const SkBlockAllocator::Block* allocOwner = poolB->findOwningBlock(bAlloc);
533
534 REPORTER_ASSERT(r, block_count(poolA) == 4);
535 REPORTER_ASSERT(r, block_count(poolB) == 3);
536
537 size_t aSize = poolA->totalSize();
538 size_t bSize = poolB->totalSize();
539 size_t theftSize = bSize - poolB->preallocSize();
540
541 // This steal should move B's 2 heap blocks to A, bringing A to 6 and B to just its head
542 poolA->stealHeapBlocks(poolB.allocator());
543 REPORTER_ASSERT(r, block_count(poolA) == 6);
544 REPORTER_ASSERT(r, block_count(poolB) == 1);
545 REPORTER_ASSERT(r, poolB->preallocSize() == poolB->totalSize());
546 REPORTER_ASSERT(r, poolA->totalSize() == aSize + theftSize);
547
548 REPORTER_ASSERT(r, *bAlloc == 't');
549 REPORTER_ASSERT(r, (uintptr_t) poolA->findOwningBlock(bAlloc) == (uintptr_t) allocOwner);
550 REPORTER_ASSERT(r, !poolB->findOwningBlock(bAlloc));
551
552 // Redoing the steal now that B is just a head block should be a no-op
553 poolA->stealHeapBlocks(poolB.allocator());
554 REPORTER_ASSERT(r, block_count(poolA) == 6);
555 REPORTER_ASSERT(r, block_count(poolB) == 1);
556}
size_t totalSize() const
void stealHeapBlocks(SkBlockAllocator *other)
Block * findOwningBlock(const void *ptr)
SkBlockAllocator * allocator()

◆ get_block()

template<size_t N>
static Block * get_block ( SkSBlockAllocator< N > &  pool,
int  blockIndex 
)
static

Definition at line 41 of file SkBlockAllocatorTest.cpp.

41 {
42 Block* found = nullptr;
43 int i = 0;
44 for (Block* b: pool->blocks()) {
45 if (i == blockIndex) {
46 found = b;
47 break;
48 }
49 i++;
50 }
51
52 SkASSERT(found != nullptr);
53 return found;
54}

◆ run_owning_block_test()

template<size_t Align, size_t Padding>
static void run_owning_block_test ( skiatest::Reporter r,
SkBlockAllocator pool 
)
static

Definition at line 626 of file SkBlockAllocatorTest.cpp.

626 {
627 auto br = pool->allocate<Align, Padding>(1);
628
629 void* userPtr = br.fBlock->ptr(br.fAlignedOffset);
630 void* metaPtr = br.fBlock->ptr(br.fAlignedOffset - Padding);
631
632 Block* block = pool->owningBlock<Align, Padding>(userPtr, br.fStart);
633 REPORTER_ASSERT(r, block == br.fBlock);
634
635 block = pool->owningBlock<Align>(metaPtr, br.fStart);
636 REPORTER_ASSERT(r, block == br.fBlock);
637
638 block = reinterpret_cast<Block*>(reinterpret_cast<uintptr_t>(userPtr) - br.fAlignedOffset);
639 REPORTER_ASSERT(r, block == br.fBlock);
640}
Align

◆ run_owning_block_tests()

template<size_t Padding>
static void run_owning_block_tests ( skiatest::Reporter r,
SkBlockAllocator pool 
)
static

Definition at line 643 of file SkBlockAllocatorTest.cpp.

643 {
644 run_owning_block_test<1, Padding>(r, pool);
645 run_owning_block_test<2, Padding>(r, pool);
646 run_owning_block_test<4, Padding>(r, pool);
647 run_owning_block_test<8, Padding>(r, pool);
648 run_owning_block_test<16, Padding>(r, pool);
649 run_owning_block_test<32, Padding>(r, pool);
650 run_owning_block_test<64, Padding>(r, pool);
651 run_owning_block_test<128, Padding>(r, pool);
652}

◆ total_size()

template<size_t N>
static size_t total_size ( SkSBlockAllocator< N > &  pool)
static

Definition at line 60 of file SkBlockAllocatorTest.cpp.

60 {
62}