Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
BufferManager.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2021 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
21
22namespace skgpu::graphite {
23
24namespace {
25
26// TODO: Tune these values on real world data
27static constexpr size_t kVertexBufferSize = 16 << 10; // 16 KB
28static constexpr size_t kIndexBufferSize = 2 << 10; // 2 KB
29static constexpr size_t kUniformBufferSize = 2 << 10; // 2 KB
30static constexpr size_t kStorageBufferSize = 2 << 10; // 2 KB
31
32// The limit for all data created by the StaticBufferManager. This data remains alive for
33// the entire SharedContext so we want to keep it small and give a concrete upper bound to
34// clients for our steady-state memory usage.
35// FIXME The current usage is 4732 bytes across static vertex and index buffers, but that includes
36// multiple copies of tessellation data, and an unoptimized AnalyticRRect mesh. Once those issues
37// are addressed, we can tighten this and decide on the transfer buffer sizing as well.
38[[maybe_unused]] static constexpr size_t kMaxStaticDataSize = 6 << 10;
39
40size_t sufficient_block_size(size_t requiredBytes, size_t blockSize) {
41 // Always request a buffer at least 'requiredBytes', but keep them in multiples of
42 // 'blockSize' for improved reuse.
43 static constexpr size_t kMaxSize = std::numeric_limits<size_t>::max();
44 size_t maxBlocks = kMaxSize / blockSize;
45 size_t blocks = (requiredBytes / blockSize) + 1;
46 size_t bufferSize = blocks > maxBlocks ? kMaxSize : (blocks * blockSize);
47 SkASSERT(requiredBytes < bufferSize);
48 return bufferSize;
49}
50
51bool can_fit(size_t requestedSize,
52 size_t allocatedSize,
53 size_t currentOffset,
54 size_t alignment) {
55 size_t startOffset = SkAlignTo(currentOffset, alignment);
56 return requestedSize <= (allocatedSize - startOffset);
57}
58
59size_t starting_alignment(BufferType type, bool useTransferBuffers, const Caps* caps) {
60 // Both vertex and index data is aligned to 4 bytes by default
61 size_t alignment = 4;
63 alignment = caps->requiredUniformBufferAlignment();
66 alignment = caps->requiredStorageBufferAlignment();
67 }
68 if (useTransferBuffers) {
69 alignment = std::max(alignment, caps->requiredTransferBufferAlignment());
70 }
71 return alignment;
72}
73
74} // anonymous namespace
75
76// ------------------------------------------------------------------------------------------------
77// ScratchBuffer
78
79ScratchBuffer::ScratchBuffer(size_t size, size_t alignment,
80 sk_sp<Buffer> buffer, DrawBufferManager* owner)
81 : fSize(size)
82 , fAlignment(alignment)
83 , fBuffer(std::move(buffer))
84 , fOwner(owner) {
85 SkASSERT(fSize > 0);
86 SkASSERT(fBuffer);
87 SkASSERT(fOwner);
88}
89
90ScratchBuffer::~ScratchBuffer() { this->returnToPool(); }
91
92BindBufferInfo ScratchBuffer::suballocate(size_t requiredBytes) {
93 if (!this->isValid()) {
94 return {};
95 }
96 if (!can_fit(requiredBytes, fBuffer->size(), fOffset, fAlignment)) {
97 return {};
98 }
99 const size_t offset = SkAlignTo(fOffset, fAlignment);
100 fOffset = offset + requiredBytes;
101 return {fBuffer.get(), offset};
102}
103
104void ScratchBuffer::returnToPool() {
105 if (fOwner && fBuffer) {
106 // TODO: Generalize the pool to other buffer types.
107 fOwner->fReusableScratchStorageBuffers.push_back(std::move(fBuffer));
108 SkASSERT(!fBuffer);
109 }
110}
111
112// ------------------------------------------------------------------------------------------------
113// DrawBufferManager
114
115DrawBufferManager::DrawBufferManager(ResourceProvider* resourceProvider,
116 const Caps* caps,
117 UploadBufferManager* uploadManager)
118 : fResourceProvider(resourceProvider)
119 , fCaps(caps)
120 , fUploadManager(uploadManager)
121 , fCurrentBuffers{{
122 { BufferType::kVertex, kVertexBufferSize, caps },
123 { BufferType::kIndex, kIndexBufferSize, caps },
124 { BufferType::kUniform, kUniformBufferSize, caps },
125 { BufferType::kStorage, kStorageBufferSize, caps }, // mapped storage
126 { BufferType::kStorage, kStorageBufferSize, caps }, // GPU-only storage
127 { BufferType::kVertexStorage, kVertexBufferSize, caps },
128 { BufferType::kIndexStorage, kIndexBufferSize, caps },
129 { BufferType::kIndirect, kStorageBufferSize, caps } }} {}
130
131DrawBufferManager::~DrawBufferManager() {}
132
133// For simplicity, if transfer buffers are being used, we align the data to the max alignment of
134// either the final buffer type or cpu->gpu transfer alignment so that the buffers are laid out
135// the same in memory.
136DrawBufferManager::BufferInfo::BufferInfo(BufferType type, size_t blockSize, const Caps* caps)
137 : fType(type)
138 , fStartAlignment(starting_alignment(type, !caps->drawBufferCanBeMapped(), caps))
139 , fBlockSize(SkAlignTo(blockSize, fStartAlignment)) {}
140
141std::pair<VertexWriter, BindBufferInfo> DrawBufferManager::getVertexWriter(size_t requiredBytes) {
142 if (!requiredBytes) {
143 return {};
144 }
145
146 auto& info = fCurrentBuffers[kVertexBufferIndex];
147 auto [ptr, bindInfo] = this->prepareMappedBindBuffer(&info, requiredBytes, "VertexBuffer");
148 return {VertexWriter(ptr, requiredBytes), bindInfo};
149}
150
151void DrawBufferManager::returnVertexBytes(size_t unusedBytes) {
152 if (fMappingFailed) {
153 // The caller can be unaware that the written data went to no-where and will still call
154 // this function.
155 return;
156 }
157 SkASSERT(fCurrentBuffers[kVertexBufferIndex].fOffset >= unusedBytes);
158 fCurrentBuffers[kVertexBufferIndex].fOffset -= unusedBytes;
159}
160
161std::pair<IndexWriter, BindBufferInfo> DrawBufferManager::getIndexWriter(size_t requiredBytes) {
162 if (!requiredBytes) {
163 return {};
164 }
165
166 auto& info = fCurrentBuffers[kIndexBufferIndex];
167 auto [ptr, bindInfo] = this->prepareMappedBindBuffer(&info, requiredBytes, "IndexBuffer");
168 return {IndexWriter(ptr, requiredBytes), bindInfo};
169}
170
171std::pair<UniformWriter, BindBufferInfo> DrawBufferManager::getUniformWriter(size_t requiredBytes) {
172 if (!requiredBytes) {
173 return {};
174 }
175
176 auto& info = fCurrentBuffers[kUniformBufferIndex];
177 auto [ptr, bindInfo] = this->prepareMappedBindBuffer(&info, requiredBytes, "UniformBuffer");
178 return {UniformWriter(ptr, requiredBytes), bindInfo};
179}
180
181std::pair<UniformWriter, BindBufferInfo> DrawBufferManager::getSsboWriter(size_t requiredBytes) {
182 if (!requiredBytes) {
183 return {};
184 }
185
186 auto& info = fCurrentBuffers[kStorageBufferIndex];
187 auto [ptr, bindInfo] = this->prepareMappedBindBuffer(&info, requiredBytes, "StorageBuffer");
188 return {UniformWriter(ptr, requiredBytes), bindInfo};
189}
190
192 size_t requiredBytes) {
193 if (!requiredBytes) {
194 return {};
195 }
196
197 auto& info = fCurrentBuffers[kUniformBufferIndex];
198 return this->prepareMappedBindBuffer(&info, requiredBytes, "UniformBuffer");
199}
200
202 size_t requiredBytes) {
203 if (!requiredBytes) {
204 return {};
205 }
206
207 auto& info = fCurrentBuffers[kStorageBufferIndex];
208 return this->prepareMappedBindBuffer(&info, requiredBytes, "StorageBuffer");
209}
210
212 if (!requiredBytes) {
213 return {};
214 }
215
216 auto& info = fCurrentBuffers[kGpuOnlyStorageBufferIndex];
217 return this->prepareBindBuffer(&info,
218 requiredBytes,
219 "StorageBuffer",
220 /*supportCpuUpload=*/false,
221 cleared);
222}
223
225 if (!requiredBytes) {
226 return {};
227 }
228
229 auto& info = fCurrentBuffers[kVertexStorageBufferIndex];
230 return this->prepareBindBuffer(&info, requiredBytes, "VertexStorageBuffer");
231}
232
234 if (!requiredBytes) {
235 return {};
236 }
237
238 auto& info = fCurrentBuffers[kIndexStorageBufferIndex];
239 return this->prepareBindBuffer(&info, requiredBytes, "IndexStorageBuffer");
240}
241
243 if (!requiredBytes) {
244 return {};
245 }
246
247 auto& info = fCurrentBuffers[kIndirectStorageBufferIndex];
248 return this->prepareBindBuffer(&info,
249 requiredBytes,
250 "IndirectStorageBuffer",
251 /*supportCpuUpload=*/false,
252 cleared);
253}
254
256 if (!requiredBytes || fMappingFailed) {
257 return {};
258 }
259
260 // TODO: Generalize the pool to other buffer types.
261 auto& info = fCurrentBuffers[kStorageBufferIndex];
262 size_t bufferSize = sufficient_block_size(requiredBytes, info.fBlockSize);
263 sk_sp<Buffer> buffer = this->findReusableSbo(bufferSize);
264 if (!buffer) {
265 buffer = fResourceProvider->findOrCreateBuffer(
266 bufferSize, BufferType::kStorage, AccessPattern::kGpuOnly, "ScratchStorageBuffer");
267
268 if (!buffer) {
269 this->onFailedBuffer();
270 return {};
271 }
272 }
273 return {requiredBytes, info.fStartAlignment, std::move(buffer), this};
274}
275
276void DrawBufferManager::onFailedBuffer() {
277 fMappingFailed = true;
278
279 // Clean up and unmap everything now
280 fReusableScratchStorageBuffers.clear();
281
282 for (auto& [buffer, _] : fUsedBuffers) {
283 if (buffer->isMapped()) {
284 buffer->unmap();
285 }
286 }
287 fUsedBuffers.clear();
288
289 for (auto& info : fCurrentBuffers) {
290 if (info.fBuffer && info.fBuffer->isMapped()) {
291 info.fBuffer->unmap();
292 }
293 info.fBuffer = nullptr;
294 info.fTransferBuffer = {};
295 info.fOffset = 0;
296 }
297}
298
300 // We could allow this to be called when the mapping has failed, since the transfer will be a no
301 // op, but in practice, the caller will want to check the error state as soon as possible to
302 // limit any unnecessary resource preparation from other tasks.
303 SkASSERT(!fMappingFailed);
304
305 if (!fClearList.empty()) {
306 recording->priv().addTask(ClearBuffersTask::Make(std::move(fClearList)));
307 }
308
309 // Transfer the buffers in the reuse pool to the recording.
310 // TODO: Allow reuse across different Recordings?
311 for (auto& buffer : fReusableScratchStorageBuffers) {
312 recording->priv().addResourceRef(std::move(buffer));
313 }
314 fReusableScratchStorageBuffers.clear();
315
316 for (auto& [buffer, transferBuffer] : fUsedBuffers) {
317 if (transferBuffer) {
320 // Since the transfer buffer is managed by the UploadManager, we don't manually unmap
321 // it here or need to pass a ref into CopyBufferToBufferTask.
322 size_t copySize = buffer->size();
323 recording->priv().addTask(
324 CopyBufferToBufferTask::Make(transferBuffer.fBuffer,
325 transferBuffer.fOffset,
326 std::move(buffer),
327 /*dstOffset=*/0,
328 copySize));
329 } else {
330 if (buffer->isMapped()) {
331 buffer->unmap();
332 }
333 recording->priv().addResourceRef(std::move(buffer));
334 }
335 }
336 fUsedBuffers.clear();
337
338 // The current draw buffers have not been added to fUsedBuffers,
339 // so we need to handle them as well.
340 for (auto& info : fCurrentBuffers) {
341 if (!info.fBuffer) {
342 continue;
343 }
344 if (info.fTransferBuffer) {
345 // A transfer buffer should always be mapped at this stage
346 SkASSERT(info.fBuffer);
348 // Since the transfer buffer is managed by the UploadManager, we don't manually unmap
349 // it here or need to pass a ref into CopyBufferToBufferTask.
350 recording->priv().addTask(
351 CopyBufferToBufferTask::Make(info.fTransferBuffer.fBuffer,
352 info.fTransferBuffer.fOffset,
353 info.fBuffer,
354 /*dstOffset=*/0,
355 info.fBuffer->size()));
356 } else {
357 if (info.fBuffer->isMapped()) {
358 info.fBuffer->unmap();
359 }
360 recording->priv().addResourceRef(std::move(info.fBuffer));
361 }
362 info.fTransferBuffer = {};
363 info.fOffset = 0;
364 }
365}
366
367std::pair<void*, BindBufferInfo> DrawBufferManager::prepareMappedBindBuffer(
368 BufferInfo* info,
369 size_t requiredBytes,
370 std::string_view label) {
371 BindBufferInfo bindInfo = this->prepareBindBuffer(info,
372 requiredBytes,
373 std::move(label),
374 /*supportCpuUpload=*/true);
375 if (!bindInfo) {
376 // prepareBindBuffer() already called onFailedBuffer()
377 SkASSERT(fMappingFailed);
378 return {nullptr, {}};
379 }
380
381 // If there's a transfer buffer, its mapped pointer should already have been validated
382 SkASSERT(!info->fTransferBuffer || info->fTransferMapPtr);
383 void* mapPtr = info->fTransferBuffer ? info->fTransferMapPtr : info->fBuffer->map();
384 if (!mapPtr) {
385 // Mapping a direct draw buffer failed
386 this->onFailedBuffer();
387 return {nullptr, {}};
388 }
389
390 mapPtr = SkTAddOffset<void>(mapPtr, static_cast<ptrdiff_t>(bindInfo.fOffset));
391 return {mapPtr, bindInfo};
392}
393
394BindBufferInfo DrawBufferManager::prepareBindBuffer(BufferInfo* info,
395 size_t requiredBytes,
396 std::string_view label,
397 bool supportCpuUpload,
398 ClearBuffer cleared) {
399 SkASSERT(info);
400 SkASSERT(requiredBytes);
401
402 if (fMappingFailed) {
403 return {};
404 }
405
406 // A transfer buffer is not necessary if the caller does not intend to upload CPU data to it.
407 bool useTransferBuffer = supportCpuUpload && !fCaps->drawBufferCanBeMapped();
408
409 if (info->fBuffer &&
410 !can_fit(requiredBytes, info->fBuffer->size(), info->fOffset, info->fStartAlignment)) {
411 fUsedBuffers.emplace_back(std::move(info->fBuffer), info->fTransferBuffer);
412 info->fTransferBuffer = {};
413 }
414
415 if (!info->fBuffer) {
416 // This buffer can be GPU-only if
417 // a) the caller does not intend to ever upload CPU data to the buffer; or
418 // b) CPU data will get uploaded to fBuffer only via a transfer buffer
419 AccessPattern accessPattern = (useTransferBuffer || !supportCpuUpload)
422 size_t bufferSize = sufficient_block_size(requiredBytes, info->fBlockSize);
423 info->fBuffer = fResourceProvider->findOrCreateBuffer(bufferSize,
424 info->fType,
425 accessPattern,
426 std::move(label));
427 info->fOffset = 0;
428 if (!info->fBuffer) {
429 this->onFailedBuffer();
430 return {};
431 }
432 }
433
434 if (useTransferBuffer && !info->fTransferBuffer) {
435 std::tie(info->fTransferMapPtr, info->fTransferBuffer) =
436 fUploadManager->makeBindInfo(info->fBuffer->size(),
438 "TransferForDataBuffer");
439
440 if (!info->fTransferBuffer) {
441 this->onFailedBuffer();
442 return {};
443 }
444 SkASSERT(info->fTransferMapPtr);
445 }
446
447 info->fOffset = SkAlignTo(info->fOffset, info->fStartAlignment);
448 BindBufferInfo bindInfo{info->fBuffer.get(), info->fOffset};
449 info->fOffset += requiredBytes;
450
451 if (cleared == ClearBuffer::kYes) {
452 fClearList.push_back({bindInfo.fBuffer, bindInfo.fOffset, requiredBytes});
453 }
454
455 return bindInfo;
456}
457
458sk_sp<Buffer> DrawBufferManager::findReusableSbo(size_t bufferSize) {
459 SkASSERT(bufferSize);
460 SkASSERT(!fMappingFailed);
461
462 for (int i = 0; i < fReusableScratchStorageBuffers.size(); ++i) {
463 sk_sp<Buffer>* buffer = &fReusableScratchStorageBuffers[i];
464 if ((*buffer)->size() >= bufferSize) {
465 auto found = std::move(*buffer);
466 // Fill the hole left by the move (if necessary) and shrink the pool.
467 if (i < fReusableScratchStorageBuffers.size() - 1) {
468 *buffer = std::move(fReusableScratchStorageBuffers.back());
469 }
470 fReusableScratchStorageBuffers.pop_back();
471 return found;
472 }
473 }
474 return nullptr;
475}
476
477// ------------------------------------------------------------------------------------------------
478// StaticBufferManager
479
481 const Caps* caps)
482 : fResourceProvider(resourceProvider)
483 , fUploadManager(resourceProvider, caps)
484 , fRequiredTransferAlignment(caps->requiredTransferBufferAlignment())
485 , fVertexBufferInfo(BufferType::kVertex, caps)
486 , fIndexBufferInfo(BufferType::kIndex, caps) {}
488
489StaticBufferManager::BufferInfo::BufferInfo(BufferType type, const Caps* caps)
490 : fBufferType(type)
491 , fAlignment(starting_alignment(type, /*useTransferBuffers=*/true, caps))
492 , fTotalRequiredBytes(0) {}
493
495 void* data = this->prepareStaticData(&fVertexBufferInfo, size, binding);
496 return VertexWriter{data, size};
497}
498
500 void* data = this->prepareStaticData(&fIndexBufferInfo, size, binding);
501 return VertexWriter{data, size};
502}
503
504void* StaticBufferManager::prepareStaticData(BufferInfo* info,
505 size_t size,
507 // Zero-out the target binding in the event of any failure in actually transfering data later.
509 *target = {nullptr, 0};
510 if (!size || fMappingFailed) {
511 return nullptr;
512 }
513
514 // Both the transfer buffer and static buffers are aligned to the max required alignment for
515 // the pair of buffer types involved (transfer cpu->gpu and either index or vertex). Copies
516 // must also copy an aligned amount of bytes.
517 size = SkAlignTo(size, info->fAlignment);
518
519 auto [transferMapPtr, transferBindInfo] =
520 fUploadManager.makeBindInfo(size,
521 fRequiredTransferAlignment,
522 "TransferForStaticBuffer");
523 if (!transferMapPtr) {
524 SKGPU_LOG_E("Failed to create or map transfer buffer that initializes static GPU data.");
525 fMappingFailed = true;
526 return nullptr;
527 }
528
529 info->fData.push_back({transferBindInfo, target, size});
530 info->fTotalRequiredBytes += size;
531 return transferMapPtr;
532}
533
534bool StaticBufferManager::BufferInfo::createAndUpdateBindings(
535 ResourceProvider* resourceProvider,
536 Context* context,
537 QueueManager* queueManager,
538 GlobalCache* globalCache,
539 std::string_view label) const {
540 if (!fTotalRequiredBytes) {
541 return true; // No buffer needed
542 }
543
544 sk_sp<Buffer> staticBuffer = resourceProvider->findOrCreateBuffer(
545 fTotalRequiredBytes, fBufferType, AccessPattern::kGpuOnly, std::move(label));
546 if (!staticBuffer) {
547 SKGPU_LOG_E("Failed to create static buffer for type %d of size %zu bytes.\n",
548 (int) fBufferType, fTotalRequiredBytes);
549 return false;
550 }
551
552 size_t offset = 0;
553 for (const CopyRange& data : fData) {
554 // Each copy range's size should be aligned to the max of the required buffer alignment and
555 // the transfer alignment, so we can just increment the offset into the static buffer.
556 SkASSERT(offset % fAlignment == 0);
557 data.fTarget->fBuffer = staticBuffer.get();
558 data.fTarget->fOffset = offset;
559
560 auto copyTask = CopyBufferToBufferTask::Make(
561 data.fSource.fBuffer, data.fSource.fOffset,
562 sk_ref_sp(data.fTarget->fBuffer), data.fTarget->fOffset,
563 data.fSize);
564 if (!queueManager->addTask(copyTask.get(), context)) {
565 SKGPU_LOG_E("Failed to copy data to static buffer.\n");
566 return false;
567 }
568
569 offset += data.fSize;
570 }
571
572 SkASSERT(offset == fTotalRequiredBytes);
573 globalCache->addStaticResource(std::move(staticBuffer));
574 return true;
575}
576
578 QueueManager* queueManager,
579 GlobalCache* globalCache) {
580 if (fMappingFailed) {
582 }
583
584 const size_t totalRequiredBytes = fVertexBufferInfo.fTotalRequiredBytes +
585 fIndexBufferInfo.fTotalRequiredBytes;
586 SkASSERT(totalRequiredBytes <= kMaxStaticDataSize);
587 if (!totalRequiredBytes) {
589 }
590
591 if (!fVertexBufferInfo.createAndUpdateBindings(fResourceProvider,
592 context,
593 queueManager,
594 globalCache,
595 "StaticVertexBuffer")) {
597 }
598 if (!fIndexBufferInfo.createAndUpdateBindings(fResourceProvider,
599 context,
600 queueManager,
601 globalCache,
602 "StaticIndexBuffer")) {
604 }
605 queueManager->addUploadBufferManagerRefs(&fUploadManager);
606
607 // Reset the static buffer manager since the Recording's copy tasks now manage ownership of
608 // the transfer buffers and the GlobalCache owns the final static buffers.
609 fVertexBufferInfo.reset();
610 fIndexBufferInfo.reset();
611
613}
614
615} // namespace skgpu::graphite
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition DM.cpp:213
#define SKGPU_LOG_E(fmt,...)
Definition Log.h:38
static constexpr size_t SkAlignTo(size_t x, size_t alignment)
Definition SkAlign.h:33
#define SkASSERT(cond)
Definition SkAssert.h:116
sk_sp< T > sk_ref_sp(T *obj)
Definition SkRefCnt.h:381
T * get() const
Definition SkRefCnt.h:303
bool drawBufferCanBeMapped() const
Definition Caps.h:235
size_t requiredTransferBufferAlignment() const
Definition Caps.h:156
static sk_sp< ClearBuffersTask > Make(skia_private::TArray< ClearBufferInfo >)
static sk_sp< CopyBufferToBufferTask > Make(const Buffer *srcBuffer, size_t srcOffset, sk_sp< Buffer > dstBuffer, size_t dstOffset, size_t size)
Definition CopyTask.cpp:18
BindBufferInfo getIndexStorage(size_t requiredBytes)
ScratchBuffer getScratchStorage(size_t requiredBytes)
std::pair< void *, BindBufferInfo > getUniformPointer(size_t requiredBytes)
std::pair< VertexWriter, BindBufferInfo > getVertexWriter(size_t requiredBytes)
std::pair< void *, BindBufferInfo > getStoragePointer(size_t requiredBytes)
std::pair< UniformWriter, BindBufferInfo > getUniformWriter(size_t requiredBytes)
std::pair< IndexWriter, BindBufferInfo > getIndexWriter(size_t requiredBytes)
BindBufferInfo getStorage(size_t requiredBytes, ClearBuffer cleared=ClearBuffer::kNo)
std::pair< UniformWriter, BindBufferInfo > getSsboWriter(size_t requiredBytes)
void returnVertexBytes(size_t unusedBytes)
BindBufferInfo getIndirectStorage(size_t requiredBytes, ClearBuffer cleared=ClearBuffer::kNo)
BindBufferInfo getVertexStorage(size_t requiredBytes)
void addUploadBufferManagerRefs(UploadBufferManager *)
void addTask(sk_sp< Task > task)
void addResourceRef(sk_sp< Resource > resource)
sk_sp< Buffer > findOrCreateBuffer(size_t size, BufferType type, AccessPattern, std::string_view label)
FinishResult finalize(Context *, QueueManager *, GlobalCache *)
VertexWriter getVertexWriter(size_t size, BindBufferInfo *binding)
StaticBufferManager(ResourceProvider *, const Caps *)
VertexWriter getIndexWriter(size_t size, BindBufferInfo *binding)
static const uint8_t buffer[]
uint32_t * target
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot data
Definition switches.h:41
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
Definition ref_ptr.h:256
Point offset