27static constexpr size_t kVertexBufferSize = 16 << 10;
28static constexpr size_t kIndexBufferSize = 2 << 10;
29static constexpr size_t kUniformBufferSize = 2 << 10;
30static constexpr size_t kStorageBufferSize = 2 << 10;
38[[maybe_unused]]
static constexpr size_t kMaxStaticDataSize = 6 << 10;
40size_t sufficient_block_size(
size_t requiredBytes,
size_t blockSize) {
44 size_t maxBlocks = kMaxSize / blockSize;
45 size_t blocks = (requiredBytes / blockSize) + 1;
46 size_t bufferSize = blocks > maxBlocks ? kMaxSize : (blocks * blockSize);
47 SkASSERT(requiredBytes < bufferSize);
51bool can_fit(
size_t requestedSize,
55 size_t startOffset =
SkAlignTo(currentOffset, alignment);
56 return requestedSize <= (allocatedSize - startOffset);
59size_t starting_alignment(
BufferType type,
bool useTransferBuffers,
const Caps* caps) {
63 alignment = caps->requiredUniformBufferAlignment();
66 alignment = caps->requiredStorageBufferAlignment();
68 if (useTransferBuffers) {
69 alignment =
std::max(alignment, caps->requiredTransferBufferAlignment());
82 , fAlignment(alignment)
90ScratchBuffer::~ScratchBuffer() { this->returnToPool(); }
93 if (!this->isValid()) {
96 if (!can_fit(requiredBytes, fBuffer->size(), fOffset, fAlignment)) {
100 fOffset =
offset + requiredBytes;
101 return {fBuffer.get(),
offset};
104void ScratchBuffer::returnToPool() {
105 if (fOwner && fBuffer) {
107 fOwner->fReusableScratchStorageBuffers.push_back(std::move(fBuffer));
118 : fResourceProvider(resourceProvider)
120 , fUploadManager(uploadManager)
131DrawBufferManager::~DrawBufferManager() {}
136DrawBufferManager::BufferInfo::BufferInfo(
BufferType type,
size_t blockSize,
const Caps* caps)
138 , fStartAlignment(starting_alignment(
type, !caps->drawBufferCanBeMapped(), caps))
139 , fBlockSize(
SkAlignTo(blockSize, fStartAlignment)) {}
142 if (!requiredBytes) {
146 auto&
info = fCurrentBuffers[kVertexBufferIndex];
147 auto [ptr, bindInfo] = this->prepareMappedBindBuffer(&
info, requiredBytes,
"VertexBuffer");
152 if (fMappingFailed) {
157 SkASSERT(fCurrentBuffers[kVertexBufferIndex].fOffset >= unusedBytes);
158 fCurrentBuffers[kVertexBufferIndex].fOffset -= unusedBytes;
162 if (!requiredBytes) {
166 auto&
info = fCurrentBuffers[kIndexBufferIndex];
167 auto [ptr, bindInfo] = this->prepareMappedBindBuffer(&
info, requiredBytes,
"IndexBuffer");
168 return {
IndexWriter(ptr, requiredBytes), bindInfo};
172 if (!requiredBytes) {
176 auto&
info = fCurrentBuffers[kUniformBufferIndex];
177 auto [ptr, bindInfo] = this->prepareMappedBindBuffer(&
info, requiredBytes,
"UniformBuffer");
182 if (!requiredBytes) {
186 auto&
info = fCurrentBuffers[kStorageBufferIndex];
187 auto [ptr, bindInfo] = this->prepareMappedBindBuffer(&
info, requiredBytes,
"StorageBuffer");
192 size_t requiredBytes) {
193 if (!requiredBytes) {
197 auto&
info = fCurrentBuffers[kUniformBufferIndex];
198 return this->prepareMappedBindBuffer(&
info, requiredBytes,
"UniformBuffer");
202 size_t requiredBytes) {
203 if (!requiredBytes) {
207 auto&
info = fCurrentBuffers[kStorageBufferIndex];
208 return this->prepareMappedBindBuffer(&
info, requiredBytes,
"StorageBuffer");
212 if (!requiredBytes) {
216 auto&
info = fCurrentBuffers[kGpuOnlyStorageBufferIndex];
217 return this->prepareBindBuffer(&
info,
225 if (!requiredBytes) {
229 auto&
info = fCurrentBuffers[kVertexStorageBufferIndex];
230 return this->prepareBindBuffer(&
info, requiredBytes,
"VertexStorageBuffer");
234 if (!requiredBytes) {
238 auto&
info = fCurrentBuffers[kIndexStorageBufferIndex];
239 return this->prepareBindBuffer(&
info, requiredBytes,
"IndexStorageBuffer");
243 if (!requiredBytes) {
247 auto&
info = fCurrentBuffers[kIndirectStorageBufferIndex];
248 return this->prepareBindBuffer(&
info,
250 "IndirectStorageBuffer",
256 if (!requiredBytes || fMappingFailed) {
261 auto&
info = fCurrentBuffers[kStorageBufferIndex];
262 size_t bufferSize = sufficient_block_size(requiredBytes,
info.fBlockSize);
269 this->onFailedBuffer();
273 return {requiredBytes,
info.fStartAlignment, std::move(
buffer),
this};
276void DrawBufferManager::onFailedBuffer() {
277 fMappingFailed =
true;
280 fReusableScratchStorageBuffers.clear();
282 for (
auto& [
buffer, _] : fUsedBuffers) {
287 fUsedBuffers.clear();
289 for (
auto&
info : fCurrentBuffers) {
290 if (
info.fBuffer &&
info.fBuffer->isMapped()) {
291 info.fBuffer->unmap();
293 info.fBuffer =
nullptr;
294 info.fTransferBuffer = {};
305 if (!fClearList.empty()) {
311 for (
auto&
buffer : fReusableScratchStorageBuffers) {
314 fReusableScratchStorageBuffers.clear();
316 for (
auto& [
buffer, transferBuffer] : fUsedBuffers) {
317 if (transferBuffer) {
322 size_t copySize =
buffer->size();
325 transferBuffer.fOffset,
336 fUsedBuffers.clear();
340 for (
auto&
info : fCurrentBuffers) {
344 if (
info.fTransferBuffer) {
352 info.fTransferBuffer.fOffset,
355 info.fBuffer->size()));
357 if (
info.fBuffer->isMapped()) {
358 info.fBuffer->unmap();
362 info.fTransferBuffer = {};
367std::pair<void*, BindBufferInfo> DrawBufferManager::prepareMappedBindBuffer(
369 size_t requiredBytes,
370 std::string_view label) {
378 return {
nullptr, {}};
383 void* mapPtr =
info->fTransferBuffer ?
info->fTransferMapPtr :
info->fBuffer->map();
386 this->onFailedBuffer();
387 return {
nullptr, {}};
390 mapPtr = SkTAddOffset<void>(mapPtr,
static_cast<ptrdiff_t
>(bindInfo.
fOffset));
391 return {mapPtr, bindInfo};
394BindBufferInfo DrawBufferManager::prepareBindBuffer(BufferInfo*
info,
395 size_t requiredBytes,
396 std::string_view label,
397 bool supportCpuUpload,
402 if (fMappingFailed) {
410 !can_fit(requiredBytes,
info->fBuffer->size(),
info->fOffset,
info->fStartAlignment)) {
411 fUsedBuffers.emplace_back(std::move(
info->fBuffer),
info->fTransferBuffer);
412 info->fTransferBuffer = {};
415 if (!
info->fBuffer) {
419 AccessPattern accessPattern = (useTransferBuffer || !supportCpuUpload)
422 size_t bufferSize = sufficient_block_size(requiredBytes,
info->fBlockSize);
428 if (!
info->fBuffer) {
429 this->onFailedBuffer();
434 if (useTransferBuffer && !
info->fTransferBuffer) {
435 std::tie(
info->fTransferMapPtr,
info->fTransferBuffer) =
436 fUploadManager->makeBindInfo(
info->fBuffer->size(),
438 "TransferForDataBuffer");
440 if (!
info->fTransferBuffer) {
441 this->onFailedBuffer();
448 BindBufferInfo bindInfo{
info->fBuffer.get(),
info->fOffset};
449 info->fOffset += requiredBytes;
452 fClearList.push_back({bindInfo.
fBuffer, bindInfo.
fOffset, requiredBytes});
458sk_sp<Buffer> DrawBufferManager::findReusableSbo(
size_t bufferSize) {
462 for (
int i = 0;
i < fReusableScratchStorageBuffers.size(); ++
i) {
464 if ((*buffer)->size() >= bufferSize) {
465 auto found = std::move(*
buffer);
467 if (
i < fReusableScratchStorageBuffers.size() - 1) {
468 *
buffer = std::move(fReusableScratchStorageBuffers.back());
470 fReusableScratchStorageBuffers.pop_back();
482 : fResourceProvider(resourceProvider)
483 , fUploadManager(resourceProvider, caps)
484 , fRequiredTransferAlignment(caps->requiredTransferBufferAlignment())
485 , fVertexBufferInfo(
BufferType::kVertex, caps)
491 , fAlignment(starting_alignment(
type,
true, caps))
492 , fTotalRequiredBytes(0) {}
495 void*
data = this->prepareStaticData(&fVertexBufferInfo,
size, binding);
500 void*
data = this->prepareStaticData(&fIndexBufferInfo,
size, binding);
504void* StaticBufferManager::prepareStaticData(BufferInfo*
info,
510 if (!
size || fMappingFailed) {
519 auto [transferMapPtr, transferBindInfo] =
520 fUploadManager.makeBindInfo(
size,
521 fRequiredTransferAlignment,
522 "TransferForStaticBuffer");
523 if (!transferMapPtr) {
524 SKGPU_LOG_E(
"Failed to create or map transfer buffer that initializes static GPU data.");
525 fMappingFailed =
true;
531 return transferMapPtr;
534bool StaticBufferManager::BufferInfo::createAndUpdateBindings(
537 QueueManager* queueManager,
538 GlobalCache* globalCache,
539 std::string_view label)
const {
540 if (!fTotalRequiredBytes) {
544 sk_sp<Buffer> staticBuffer = resourceProvider->findOrCreateBuffer(
547 SKGPU_LOG_E(
"Failed to create static buffer for type %d of size %zu bytes.\n",
548 (
int) fBufferType, fTotalRequiredBytes);
553 for (
const CopyRange&
data : fData) {
557 data.fTarget->fBuffer = staticBuffer.
get();
561 data.fSource.fBuffer,
data.fSource.fOffset,
564 if (!queueManager->addTask(copyTask.get(), context)) {
565 SKGPU_LOG_E(
"Failed to copy data to static buffer.\n");
573 globalCache->addStaticResource(std::move(staticBuffer));
580 if (fMappingFailed) {
584 const size_t totalRequiredBytes = fVertexBufferInfo.fTotalRequiredBytes +
585 fIndexBufferInfo.fTotalRequiredBytes;
586 SkASSERT(totalRequiredBytes <= kMaxStaticDataSize);
587 if (!totalRequiredBytes) {
591 if (!fVertexBufferInfo.createAndUpdateBindings(fResourceProvider,
595 "StaticVertexBuffer")) {
598 if (!fIndexBufferInfo.createAndUpdateBindings(fResourceProvider,
602 "StaticIndexBuffer")) {
609 fVertexBufferInfo.reset();
610 fIndexBufferInfo.reset();
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
#define SKGPU_LOG_E(fmt,...)
static constexpr size_t SkAlignTo(size_t x, size_t alignment)
sk_sp< T > sk_ref_sp(T *obj)
bool drawBufferCanBeMapped() const
size_t requiredTransferBufferAlignment() const
static sk_sp< ClearBuffersTask > Make(skia_private::TArray< ClearBufferInfo >)
static sk_sp< CopyBufferToBufferTask > Make(const Buffer *srcBuffer, size_t srcOffset, sk_sp< Buffer > dstBuffer, size_t dstOffset, size_t size)
BindBufferInfo getIndexStorage(size_t requiredBytes)
ScratchBuffer getScratchStorage(size_t requiredBytes)
std::pair< void *, BindBufferInfo > getUniformPointer(size_t requiredBytes)
std::pair< VertexWriter, BindBufferInfo > getVertexWriter(size_t requiredBytes)
std::pair< void *, BindBufferInfo > getStoragePointer(size_t requiredBytes)
std::pair< UniformWriter, BindBufferInfo > getUniformWriter(size_t requiredBytes)
std::pair< IndexWriter, BindBufferInfo > getIndexWriter(size_t requiredBytes)
BindBufferInfo getStorage(size_t requiredBytes, ClearBuffer cleared=ClearBuffer::kNo)
void transferToRecording(Recording *)
std::pair< UniformWriter, BindBufferInfo > getSsboWriter(size_t requiredBytes)
void returnVertexBytes(size_t unusedBytes)
BindBufferInfo getIndirectStorage(size_t requiredBytes, ClearBuffer cleared=ClearBuffer::kNo)
BindBufferInfo getVertexStorage(size_t requiredBytes)
void addUploadBufferManagerRefs(UploadBufferManager *)
void addTask(sk_sp< Task > task)
void addResourceRef(sk_sp< Resource > resource)
sk_sp< Buffer > findOrCreateBuffer(size_t size, BufferType type, AccessPattern, std::string_view label)
FinishResult finalize(Context *, QueueManager *, GlobalCache *)
VertexWriter getVertexWriter(size_t size, BindBufferInfo *binding)
StaticBufferManager(ResourceProvider *, const Caps *)
VertexWriter getIndexWriter(size_t size, BindBufferInfo *binding)
static float max(float r, float g, float b)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
std::shared_ptr< const fml::Mapping > data