25 return (
size_t)
pool->scratchBlockSize();
42 Block* found =
nullptr;
45 if (
i == blockIndex) {
68 while(
pool->currentBlock() == current) {
69 pool->template allocate<4>(
pool->preallocSize() / 2);
76 auto br =
pool->template allocate<1>(1);
77 return br.fBlock->ptr(br.fAlignedOffset);
86 REPORTER_ASSERT(r, stack.preallocUsableSpace() == (
size_t) stack.currentBlock()->avail());
89 void* mem =
operator new(1024);
111 auto validate_ptr = [&](
int align,
int size,
114 uintptr_t pt =
reinterpret_cast<uintptr_t
>(br.fBlock->ptr(br.fAlignedOffset));
128 reinterpret_cast<uintptr_t
>(prevBR->fBlock->ptr(prevBR->fEnd - 1));
134 std::memset(br.fBlock->ptr(br.fAlignedOffset), 0xFF, br.fEnd - br.fAlignedOffset);
137 auto p1 =
pool->allocate<1>(14);
138 validate_ptr(1, 14, p1,
nullptr);
140 auto p2 =
pool->allocate<2>(24);
141 validate_ptr(2, 24, p2, &p1);
143 auto p4 =
pool->allocate<4>(28);
144 validate_ptr(4, 28, p4, &p2);
146 auto p8 =
pool->allocate<8>(40);
147 validate_ptr(8, 40, p8, &p4);
149 auto p16 =
pool->allocate<16>(64);
150 validate_ptr(16, 64, p16, &p8);
152 auto p32 =
pool->allocate<32>(96);
153 validate_ptr(32, 96, p32, &p16);
160 size_t avail =
pool->currentBlock()->avail<4>();
161 auto pAvail =
pool->allocate<4>(avail);
162 validate_ptr(4, avail, pAvail, &p32);
167 auto pNextBlock =
pool->allocate<4>(4);
168 validate_ptr(4, 4, pNextBlock,
nullptr);
173 size_t bigRequest =
pool->currentBlock()->avail<4>() * 2;
174 auto pTooBig =
pool->allocate<4>(bigRequest);
175 validate_ptr(4, bigRequest, pTooBig,
nullptr);
181 auto pReallyTooBig =
pool->allocate<4>(4096);
182 validate_ptr(4, 4096, pReallyTooBig,
nullptr);
197 std::memset(
p.fBlock->ptr(
p.fAlignedOffset), 0x11,
p.fEnd -
p.fAlignedOffset);
200 auto pNext =
pool->allocate<4>(16);
201 REPORTER_ASSERT(r,
reinterpret_cast<uintptr_t
>(pNext.fBlock->ptr(pNext.fAlignedOffset)) -
202 reinterpret_cast<uintptr_t
>(pNext.fBlock->ptr(
p.fAlignedOffset)) == 32);
208 int fillBlock =
p.fBlock->avail<4>();
212 std::memset(
p.fBlock->ptr(
p.fAlignedOffset), 0x22,
p.fEnd -
p.fAlignedOffset);
219 int shrinkTo32 =
p.fStart -
p.fEnd + 32;
221 p.fEnd += shrinkTo32;
224 std::memset(
p.fBlock->ptr(
p.fAlignedOffset), 0x33,
p.fEnd -
p.fAlignedOffset);
226 pNext =
pool->allocate<4>(16);
227 REPORTER_ASSERT(r,
reinterpret_cast<uintptr_t
>(pNext.fBlock->ptr(pNext.fAlignedOffset)) -
228 reinterpret_cast<uintptr_t
>(pNext.fBlock->ptr(
p.fAlignedOffset)) == 32);
232 int shrinkTo0 = pNext.fStart - pNext.fEnd;
235 REPORTER_ASSERT(r, !pNext.fBlock->resize(pNext.fStart, pNext.fEnd, shrinkTo0 - 1));
237 REPORTER_ASSERT(r, pNext.fBlock->resize(pNext.fStart, pNext.fEnd, shrinkTo0));
245 auto p =
pool->allocate<8>(32);
248 auto p2 =
pool->allocate<8>(32);
252 auto p3 =
pool->allocate<8>(64);
258 auto p4 =
pool->allocate<8>(16);
259 auto p5 =
pool->allocate<8>(96);
265 p =
pool->allocate<8>(32);
278 std::vector<SkBlockAllocator::ByteRange> ptrs;
280 for (
int i = 0;
i < 32; ++
i) {
281 ptrs.push_back(
pool->allocate<4>(16));
286 for (
int i = 31;
i >= 0; --
i) {
298 static constexpr int kInitSize = 128;
299 static constexpr int kBlockCount = 5;
302 { kInitSize, kInitSize, kInitSize, kInitSize, kInitSize },
304 { kInitSize, 2 * kInitSize, 3 * kInitSize, 4 * kInitSize, 5 * kInitSize },
306 { kInitSize, kInitSize, 2 * kInitSize, 3 * kInitSize, 5 * kInitSize },
308 { kInitSize, 2 * kInitSize, 4 * kInitSize, 8 * kInitSize, 16 * kInitSize },
316 for (
int i = 1;
i < kBlockCount; ++
i) {
325 static constexpr int kBlockIncrement = 1024;
396 pool->resetScratchSpace();
421 pool->headBlock()->setMetadata(1);
427 int releaseCount = 0;
428 for (
auto*
b :
pool->blocks()) {
429 pool->releaseBlock(
b);
438 pool->headBlock()->setMetadata(1);
445 for (
auto*
b :
pool->rblocks()) {
446 pool->releaseBlock(
b);
460 size_t total =
pool->totalSize();
461 pool->releaseBlock(
pool->currentBlock());
472 size_t avail =
pool->currentBlock()->avail();
473 size_t reserve = avail + 1;
474 pool->reserve(reserve);
481 size_t preAllocTotalSize =
pool->totalSize();
482 pool->allocate<1>(avail + 1);
488 pool->reserve(
pool->currentBlock()->avail());
497 pool->resetScratchSpace();
505 avail = oldTail->
avail();
506 size_t scratchAvail = 2 * avail;
507 pool->reserve(scratchAvail);
513 pool->allocate<1>(scratchAvail + 1);
581 REPORTER_ASSERT(r,
reinterpret_cast<uintptr_t
>(p1.fBlock->ptr(p1.fAlignedOffset)) % 16 == 0);
593 p2.fBlock->ptr(p2.fAlignedOffset -
sizeof(
TestMetaBig)));
596 REPORTER_ASSERT(r,
reinterpret_cast<uintptr_t
>(p2.fBlock->ptr(p2.fAlignedOffset)) % 16 == 0);
612 pool->setMetadata(4);
617 pool->releaseBlock(
pool->headBlock());
625template<
size_t Align,
size_t Padding>
627 auto br =
pool->allocate<
Align, Padding>(1);
629 void* userPtr = br.fBlock->ptr(br.fAlignedOffset);
630 void* metaPtr = br.fBlock->ptr(br.fAlignedOffset - Padding);
632 Block* block =
pool->owningBlock<
Align, Padding>(userPtr, br.fStart);
635 block =
pool->owningBlock<
Align>(metaPtr, br.fStart);
638 block =
reinterpret_cast<Block*
>(
reinterpret_cast<uintptr_t
>(userPtr) - br.fAlignedOffset);
642template<
size_t Padding>
644 run_owning_block_test<1, Padding>(r,
pool);
645 run_owning_block_test<2, Padding>(r,
pool);
646 run_owning_block_test<4, Padding>(r,
pool);
647 run_owning_block_test<8, Padding>(r,
pool);
648 run_owning_block_test<16, Padding>(r,
pool);
649 run_owning_block_test<32, Padding>(r,
pool);
650 run_owning_block_test<64, Padding>(r,
pool);
651 run_owning_block_test<128, Padding>(r,
pool);
658 run_owning_block_tests<1>(r,
pool.allocator());
659 run_owning_block_tests<2>(r,
pool.allocator());
660 run_owning_block_tests<4>(r,
pool.allocator());
661 run_owning_block_tests<8>(r,
pool.allocator());
662 run_owning_block_tests<16>(r,
pool.allocator());
663 run_owning_block_tests<32>(r,
pool.allocator());
666 run_owning_block_tests<3>(r,
pool.allocator());
667 run_owning_block_tests<9>(r,
pool.allocator());
668 run_owning_block_tests<17>(r,
pool.allocator());
static void * alloc_byte(SkSBlockAllocator< N > &pool)
SkBlockAllocator::GrowthPolicy GrowthPolicy
static void run_owning_block_test(skiatest::Reporter *r, SkBlockAllocator *pool)
static size_t add_block(SkSBlockAllocator< N > &pool)
static void run_owning_block_tests(skiatest::Reporter *r, SkBlockAllocator *pool)
static Block * get_block(SkSBlockAllocator< N > &pool, int blockIndex)
static int block_count(const SkSBlockAllocator< N > &pool)
static size_t total_size(SkSBlockAllocator< N > &pool)
DEF_TEST(SkBlockAllocatorPreallocSize, r)
SkBlockAllocator::Block Block
SkDEBUGCODE(SK_SPI) SkThreadID SkGetThreadID()
#define REPORTER_ASSERT(r, cond,...)
static size_t ScratchBlockSize(SkSBlockAllocator< N > &pool)
void setMetadata(int value)
size_t preallocUsableSpace() const
void stealHeapBlocks(SkBlockAllocator *other)
size_t preallocSize() const
@ kIgnoreExistingBytes_Flag
@ kIgnoreGrowthPolicy_Flag
static constexpr int kGrowthPolicyCount
Block * findOwningBlock(const void *ptr)
SkBlockAllocator * allocator()
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size