24 int maxBuffersToCache) {
28GrBufferAllocPool::CpuBufferCache::CpuBufferCache(
int maxBuffersToCache)
29 : fMaxBuffersToCache(maxBuffersToCache) {
30 if (fMaxBuffersToCache) {
31 fBuffers = std::make_unique<Buffer[]>(fMaxBuffersToCache);
36 bool mustBeInitialized) {
41 for (;
i < fMaxBuffersToCache && fBuffers[
i].fBuffer; ++
i) {
43 if (fBuffers[
i].fBuffer->unique()) {
47 if (!
result &&
i < fMaxBuffersToCache) {
57 if (mustBeInitialized && !
result->fCleared) {
59 memset(
result->fBuffer->data(), 0,
result->fBuffer->size());
65 for (
int i = 0;
i < fMaxBuffersToCache && fBuffers[
i].fBuffer; ++
i) {
66 fBuffers[
i].fBuffer.reset();
67 fBuffers[
i].fCleared =
false;
74 #define VALIDATE validate
79#define UNMAP_BUFFER(block) \
81 TRACE_EVENT_INSTANT1("skia.gpu", "GrBufferAllocPool Unmapping Buffer", \
82 TRACE_EVENT_SCOPE_THREAD, "percent_unwritten", \
83 (float)((block).fBytesFree) / (block).fBuffer->size()); \
84 SkASSERT(!block.fBuffer->isCpuBuffer()); \
85 static_cast<GrGpuBuffer*>(block.fBuffer.get())->unmap(); \
91 , fCpuBufferCache(
std::move(cpuBufferCache))
93 , fBufferType(bufferType) {}
95void GrBufferAllocPool::deleteBlocks() {
96 if (!fBlocks.
empty()) {
102 while (!fBlocks.
empty()) {
103 this->destroyBlock();
110 this->deleteBlocks();
116 this->deleteBlocks();
117 this->resetCpuData(0);
125 BufferBlock& block = fBlocks.
back();
127 if (!
buffer->isCpuBuffer()) {
131 size_t flushSize = block.fBuffer->size() - block.fBytesFree;
132 this->flushCpuData(fBlocks.
back(), flushSize);
135 fBufferPtr =
nullptr;
141void GrBufferAllocPool::validate(
bool unusedBlockAllowed)
const {
142 bool wasDestroyed =
false;
147 SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->
data() == fBufferPtr);
149 }
else if (!fBlocks.
empty()) {
153 size_t bytesInUse = 0;
154 for (
int i = 0;
i < fBlocks.
size() - 1; ++
i) {
158 for (
int i = 0; !wasDestroyed &&
i < fBlocks.
size(); ++
i) {
163 size_t bytes = fBlocks[
i].fBuffer->
size() - fBlocks[
i].fBytesFree;
165 SkASSERT(bytes || unusedBlockAllowed);
170 SkASSERT(bytesInUse == fBytesInUse);
171 if (unusedBlockAllowed) {
173 (!fBytesInUse && (fBlocks.
size() < 2)));
182 return (alignment -
x % alignment) % alignment;
186 return (
x / alignment) * alignment;
199 BufferBlock& back = fBlocks.
back();
200 size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
203 size_t alignedSize = safeMath.
add(pad,
size);
204 if (!safeMath.
ok()) {
207 if (alignedSize <= back.fBytesFree) {
208 memset((
void*)(
reinterpret_cast<intptr_t
>(fBufferPtr) + usedBytes), 0, pad);
212 back.fBytesFree -= alignedSize;
213 fBytesInUse += alignedSize;
215 return (
void*)(
reinterpret_cast<intptr_t
>(fBufferPtr) + usedBytes);
227 if (!this->createBlock(
size)) {
233 BufferBlock& back = fBlocks.
back();
235 back.fBytesFree -=
size;
246 size_t* actualSize) {
253 size_t usedBytes = (fBlocks.
empty()) ? 0 : fBlocks.
back().fBuffer->size() -
254 fBlocks.
back().fBytesFree;
256 if (!fBufferPtr || fBlocks.
empty() || (minSize + pad) > fBlocks.
back().fBytesFree) {
259 if (!this->createBlock(fallbackSize)) {
268 memset(
static_cast<char*
>(fBufferPtr) + usedBytes, 0, pad);
270 fBlocks.
back().fBytesFree -= pad;
281 return static_cast<char*
>(fBufferPtr) + usedBytes;
290 BufferBlock& block = fBlocks.
back();
296 SkASSERT(bytes <= (block.fBuffer->size() - block.fBytesFree));
297 block.fBytesFree += bytes;
298 fBytesInUse -= bytes;
306 if (block.fBytesFree == block.fBuffer->size()) {
311 this->destroyBlock();
317bool GrBufferAllocPool::createBlock(
size_t requestSize) {
322 BufferBlock& block = fBlocks.
push_back();
325 if (!block.fBuffer) {
330 block.fBytesFree = block.fBuffer->size();
335 if (!
buffer->isCpuBuffer()) {
339 this->flushCpuData(
prev,
prev.fBuffer->size() -
prev.fBytesFree);
342 fBufferPtr =
nullptr;
350 if (block.fBuffer->isCpuBuffer()) {
356 fBufferPtr =
static_cast<GrGpuBuffer*
>(block.fBuffer.get())->
map();
360 this->resetCpuData(block.fBytesFree);
361 fBufferPtr = fCpuStagingBuffer->
data();
369void GrBufferAllocPool::destroyBlock() {
374 fBufferPtr =
nullptr;
377void GrBufferAllocPool::resetCpuData(
size_t newSize) {
380 fCpuStagingBuffer.
reset();
383 if (fCpuStagingBuffer && newSize <= fCpuStagingBuffer->
size()) {
387 fCpuStagingBuffer = fCpuBufferCache ? fCpuBufferCache->makeBuffer(newSize, mustInitialize)
391void GrBufferAllocPool::flushCpuData(
const BufferBlock& block,
size_t flushSize) {
393 SkASSERT(!block.fBuffer.get()->isCpuBuffer());
396 SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->
data() == fBufferPtr);
404 memcpy(
data, fBufferPtr, flushSize);
409 buffer->updateData(fBufferPtr, 0, flushSize,
false);
420 return fCpuBufferCache ? fCpuBufferCache->makeBuffer(
size, mustInitialize)
423 return resourceProvider->createBuffer(
size,
449 *startVertex =
static_cast<int>(
offset / vertexSize);
454 int fallbackVertexCount,
456 int* actualVertexCount) {
458 SkASSERT(fallbackVertexCount >= minVertexCount);
473 *startVertex =
static_cast<int>(
offset / vertexSize);
475 SkASSERT(0 == actualSize % vertexSize);
476 SkASSERT(actualSize >= vertexSize * minVertexCount);
477 *actualVertexCount =
static_cast<int>(actualSize / vertexSize);
500 *startIndex =
static_cast<int>(
offset /
sizeof(uint16_t));
506 int* actualIndexCount) {
508 SkASSERT(fallbackIndexCount >= minIndexCount);
523 *startIndex =
static_cast<int>(
offset /
sizeof(uint16_t));
525 SkASSERT(0 == actualSize %
sizeof(uint16_t));
526 SkASSERT(actualSize >= minIndexCount *
sizeof(uint16_t));
527 *actualIndexCount =
static_cast<int>(actualSize /
sizeof(uint16_t));
static void VALIDATE(bool=false)
#define UNMAP_BUFFER(block)
static size_t align_up_pad(size_t x, size_t alignment)
static size_t align_down(size_t x, uint32_t alignment)
@ kDynamic_GrAccessPattern
static float prev(float f)
#define SK_INIT_TO_AVOID_WARNING
static sk_sp< CpuBufferCache > Make(int maxBuffersToCache)
sk_sp< GrCpuBuffer > makeBuffer(size_t size, bool mustBeInitialized)
void putBack(size_t bytes)
GrBufferAllocPool(GrGpu *gpu, GrGpuBufferType bufferType, sk_sp< CpuBufferCache > cpuBufferCache)
void * makeSpaceAtLeast(size_t minSize, size_t fallbackSize, size_t alignment, sk_sp< const GrBuffer > *buffer, size_t *offset, size_t *actualSize)
void * makeSpace(size_t size, size_t alignment, sk_sp< const GrBuffer > *buffer, size_t *offset)
virtual ~GrBufferAllocPool()
static constexpr size_t kDefaultBufferSize
sk_sp< GrBuffer > getBuffer(size_t size)
bool preferClientSideDynamicBuffers() const
uint32_t mapBufferFlags() const
size_t bufferMapThreshold() const
bool useClientSideIndirectBuffers() const
bool mustClearUploadedBufferData() const
static sk_sp< GrCpuBuffer > Make(size_t size)
GrResourceProvider * resourceProvider()
GrDirectContextPriv priv()
bool wasDestroyed() const
const GrCaps * caps() const
GrDirectContext * getContext()
GrIndexBufferAllocPool(GrGpu *gpu, sk_sp< CpuBufferCache > cpuBufferCache)
void * makeSpace(int indexCount, sk_sp< const GrBuffer > *buffer, int *startIndex)
void * makeSpaceAtLeast(int minIndexCount, int fallbackIndexCount, sk_sp< const GrBuffer > *buffer, int *startIndex, int *actualIndexCount)
void * makeSpace(size_t vertexSize, int vertexCount, sk_sp< const GrBuffer > *buffer, int *startVertex)
GrVertexBufferAllocPool(GrGpu *gpu, sk_sp< CpuBufferCache > cpuBufferCache)
void * makeSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount, sk_sp< const GrBuffer > *buffer, int *startVertex, int *actualVertexCount)
size_t add(size_t x, size_t y)
static size_t Mul(size_t x, size_t y)
void reset(T *ptr=nullptr)
static float max(float r, float g, float b)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
SI auto map(std::index_sequence< I... >, Fn &&fn, const Args &... args) -> skvx::Vec< sizeof...(I), decltype(fn(args[0]...))>
std::shared_ptr< const fml::Mapping > data