Flutter Engine
The Flutter Engine
GrVkBuffer.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2021 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
18
19#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
20
21GrVkBuffer::GrVkBuffer(GrVkGpu* gpu,
22 size_t sizeInBytes,
23 GrGpuBufferType bufferType,
24 GrAccessPattern accessPattern,
25 VkBuffer buffer,
26 const skgpu::VulkanAlloc& alloc,
27 const GrVkDescriptorSet* uniformDescriptorSet,
28 std::string_view label)
29 : GrGpuBuffer(gpu, sizeInBytes, bufferType, accessPattern, label)
30 , fBuffer(buffer)
31 , fAlloc(alloc)
32 , fUniformDescriptorSet(uniformDescriptorSet) {
33 // We always require dynamic buffers to be mappable
34 SkASSERT(accessPattern != kDynamic_GrAccessPattern || this->isVkMappable());
35 SkASSERT(bufferType != GrGpuBufferType::kUniform || uniformDescriptorSet);
36 this->registerWithCache(skgpu::Budgeted::kYes);
37}
38
39static const GrVkDescriptorSet* make_uniform_desc_set(GrVkGpu* gpu, VkBuffer buffer, size_t size) {
40 const GrVkDescriptorSet* descriptorSet = gpu->resourceProvider().getUniformDescriptorSet();
41 if (!descriptorSet) {
42 return nullptr;
43 }
44
45 VkDescriptorBufferInfo bufferInfo;
46 memset(&bufferInfo, 0, sizeof(VkDescriptorBufferInfo));
47 bufferInfo.buffer = buffer;
48 bufferInfo.offset = 0;
49 bufferInfo.range = size;
50
51 VkWriteDescriptorSet descriptorWrite;
52 memset(&descriptorWrite, 0, sizeof(VkWriteDescriptorSet));
54 descriptorWrite.pNext = nullptr;
55 descriptorWrite.dstSet = *descriptorSet->descriptorSet();
57 descriptorWrite.dstArrayElement = 0;
58 descriptorWrite.descriptorCount = 1;
60 descriptorWrite.pImageInfo = nullptr;
61 descriptorWrite.pBufferInfo = &bufferInfo;
62 descriptorWrite.pTexelBufferView = nullptr;
63
65 UpdateDescriptorSets(gpu->device(), 1, &descriptorWrite, 0, nullptr));
66 return descriptorSet;
67}
68
70 size_t size,
71 GrGpuBufferType bufferType,
72 GrAccessPattern accessPattern) {
73 VkBuffer buffer;
75
76 // The only time we don't require mappable buffers is when we have a static access pattern and
77 // we're on a device where gpu only memory has faster reads on the gpu than memory that is also
78 // mappable on the cpu. Protected memory always uses mappable buffers.
79 bool requiresMappable = gpu->protectedContext() ||
83
85 BufferUsage allocUsage;
86
87 // create the buffer object
88 VkBufferCreateInfo bufInfo;
89 memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
91 bufInfo.flags = 0;
92 bufInfo.size = size;
93 // To support SkMesh buffer updates we make Vertex and Index buffers capable of being transfer
94 // dsts.
95 switch (bufferType) {
98 allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
99 break;
102 allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
103 break;
106 allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
107 break;
110 allocUsage = BufferUsage::kCpuWritesGpuReads;
111 break;
114 allocUsage = BufferUsage::kTransfersFromCpuToGpu;
115 break;
118 allocUsage = BufferUsage::kTransfersFromGpuToCpu;
119 break;
120 }
121 // We may not always get a mappable buffer for non dynamic access buffers. Thus we set the
122 // transfer dst usage bit in case we need to do a copy to write data.
123 // TODO: It doesn't really hurt setting this extra usage flag, but maybe we can narrow the scope
124 // of buffers we set it on more than just not dynamic.
125 if (!requiresMappable) {
127 }
128
130 bufInfo.queueFamilyIndexCount = 0;
131 bufInfo.pQueueFamilyIndices = nullptr;
132
133 VkResult err;
134 err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
135 if (err) {
136 return nullptr;
137 }
138
139 bool shouldPersistentlyMapCpuToGpu = gpu->vkCaps().shouldPersistentlyMapCpuToGpuBuffers();
140 auto checkResult = [gpu, allocUsage, shouldPersistentlyMapCpuToGpu](VkResult result) {
141 GR_VK_LOG_IF_NOT_SUCCESS(gpu, result, "skgpu::VulkanMemory::AllocBufferMemory "
142 "(allocUsage:%d, shouldPersistentlyMapCpuToGpu:%d)",
143 (int)allocUsage, (int)shouldPersistentlyMapCpuToGpu);
144 return gpu->checkVkResult(result);
145 };
146 auto allocator = gpu->memoryAllocator();
148 buffer,
149 allocUsage,
150 shouldPersistentlyMapCpuToGpu,
151 checkResult,
152 &alloc)) {
153 VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
154 return nullptr;
155 }
156
157 // Bind buffer
158 GR_VK_CALL_RESULT(gpu, err, BindBufferMemory(gpu->device(),
159 buffer,
160 alloc.fMemory,
161 alloc.fOffset));
162 if (err) {
164 VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
165 return nullptr;
166 }
167
168 // If this is a uniform buffer we must setup a descriptor set
169 const GrVkDescriptorSet* uniformDescSet = nullptr;
170 if (bufferType == GrGpuBufferType::kUniform) {
171 uniformDescSet = make_uniform_desc_set(gpu, buffer, size);
172 if (!uniformDescSet) {
173 VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
175 return nullptr;
176 }
177 }
178
179 return sk_sp<GrVkBuffer>(new GrVkBuffer(
180 gpu, size, bufferType, accessPattern, buffer, alloc, uniformDescSet,
181 /*label=*/"MakeVkBuffer"));
182}
183
184void GrVkBuffer::vkMap(size_t readOffset, size_t readSize) {
186 if (this->isVkMappable()) {
187 // Not every buffer will use command buffer usage refs and instead the command buffer just
188 // holds normal refs. Systems higher up in Ganesh should be making sure not to reuse a
189 // buffer that currently has a ref held by something else. However, we do need to make sure
190 // there isn't a buffer with just a command buffer usage that is trying to be mapped.
192 SkASSERT(fAlloc.fSize > 0);
193 SkASSERT(fAlloc.fSize >= readOffset + readSize);
194
195 GrVkGpu* gpu = this->getVkGpu();
196 auto checkResult_mapAlloc = [gpu](VkResult result) {
197 GR_VK_LOG_IF_NOT_SUCCESS(gpu, result, "skgpu::VulkanMemory::MapAlloc");
198 return gpu->checkVkResult(result);
199 };
200 auto allocator = gpu->memoryAllocator();
201 fMapPtr = skgpu::VulkanMemory::MapAlloc(allocator, fAlloc, checkResult_mapAlloc);
202 if (fMapPtr && readSize != 0) {
203 auto checkResult_invalidateMapAlloc = [gpu, readOffset, readSize](VkResult result) {
204 GR_VK_LOG_IF_NOT_SUCCESS(gpu, result, "skgpu::VulkanMemory::InvalidateMappedAlloc "
205 "(readOffset:%zu, readSize:%zu)",
206 readOffset, readSize);
207 return gpu->checkVkResult(result);
208 };
209 // "Invalidate" here means make device writes visible to the host. That is, it makes
210 // sure any GPU writes are finished in the range we might read from.
212 fAlloc,
213 readOffset,
214 readSize,
215 checkResult_invalidateMapAlloc);
216 }
217 }
218}
219
220void GrVkBuffer::vkUnmap(size_t flushOffset, size_t flushSize) {
221 SkASSERT(fMapPtr && this->isVkMappable());
222
223 SkASSERT(fAlloc.fSize > 0);
224 SkASSERT(fAlloc.fSize >= flushOffset + flushSize);
225
226 GrVkGpu* gpu = this->getVkGpu();
227 auto checkResult = [gpu, flushOffset, flushSize](VkResult result) {
228 GR_VK_LOG_IF_NOT_SUCCESS(gpu, result, "skgpu::VulkanMemory::FlushMappedAlloc "
229 "(flushOffset:%zu, flushSize:%zu)",
230 flushOffset, flushSize);
231 return gpu->checkVkResult(result);
232 };
233 auto allocator = this->getVkGpu()->memoryAllocator();
234 skgpu::VulkanMemory::FlushMappedAlloc(allocator, fAlloc, flushOffset, flushSize, checkResult);
235 skgpu::VulkanMemory::UnmapAlloc(allocator, fAlloc);
236}
237
238void GrVkBuffer::copyCpuDataToGpuBuffer(const void* src, size_t offset, size_t size) {
239 SkASSERT(src);
240
241 GrVkGpu* gpu = this->getVkGpu();
242
243 // We should never call this method in protected contexts.
244 SkASSERT(!gpu->protectedContext());
245
246 // The vulkan api restricts the use of vkCmdUpdateBuffer to updates that are less than or equal
247 // to 65536 bytes and a size and offset that are both 4 byte aligned.
248 if ((size <= 65536) && SkIsAlign4(size) && SkIsAlign4(offset) &&
249 !gpu->vkCaps().avoidUpdateBuffers()) {
250 gpu->updateBuffer(sk_ref_sp(this), src, offset, size);
251 } else {
252 GrResourceProvider* resourceProvider = gpu->getContext()->priv().resourceProvider();
253 sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
254 src,
255 size,
258 if (!transferBuffer) {
259 return;
260 }
261
262 gpu->transferFromBufferToBuffer(std::move(transferBuffer),
263 /*srcOffset=*/0,
264 sk_ref_sp(this),
265 offset,
266 size);
267 }
268}
269
271 VkAccessFlags dstAccesMask,
272 VkPipelineStageFlags srcStageMask,
273 VkPipelineStageFlags dstStageMask,
274 bool byRegion) const {
275 VkBufferMemoryBarrier bufferMemoryBarrier = {
277 nullptr, // pNext
278 srcAccessMask, // srcAccessMask
279 dstAccesMask, // dstAccessMask
280 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
281 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
282 fBuffer, // buffer
283 0, // offset
284 this->size(), // size
285 };
286
287 // TODO: restrict to area of buffer we're interested in
288 this->getVkGpu()->addBufferMemoryBarrier(srcStageMask, dstStageMask, byRegion,
289 &bufferMemoryBarrier);
290}
291
292void GrVkBuffer::vkRelease() {
293 if (this->wasDestroyed()) {
294 return;
295 }
296
297 if (fMapPtr) {
298 this->vkUnmap(0, this->size());
299 fMapPtr = nullptr;
300 }
301
302 if (fUniformDescriptorSet) {
303 fUniformDescriptorSet->recycle();
304 fUniformDescriptorSet = nullptr;
305 }
306
307 SkASSERT(fBuffer);
308 SkASSERT(fAlloc.fMemory && fAlloc.fBackendMemory);
309 VK_CALL(this->getVkGpu(), DestroyBuffer(this->getVkGpu()->device(), fBuffer, nullptr));
310 fBuffer = VK_NULL_HANDLE;
311
312 skgpu::VulkanMemory::FreeBufferMemory(this->getVkGpu()->memoryAllocator(), fAlloc);
313 fAlloc.fMemory = VK_NULL_HANDLE;
314 fAlloc.fBackendMemory = 0;
315}
316
317void GrVkBuffer::onRelease() {
318 this->vkRelease();
320}
321
322void GrVkBuffer::onAbandon() {
323 this->vkRelease();
325}
326
327void GrVkBuffer::onMap(MapType type) {
328 this->vkMap(0, type == MapType::kRead ? this->size() : 0);
329}
330
331void GrVkBuffer::onUnmap(MapType type) {
332 this->vkUnmap(0, type == MapType::kWriteDiscard ? this->size() : 0);
333}
334
335bool GrVkBuffer::onClearToZero() { return this->getVkGpu()->zeroBuffer(sk_ref_sp(this)); }
336
337bool GrVkBuffer::onUpdateData(const void* src, size_t offset, size_t size, bool /*preserve*/) {
338 if (this->isVkMappable()) {
339 // We won't be reading the mapped memory so pass an empty range.
340 this->vkMap(0, 0);
341 if (!fMapPtr) {
342 return false;
343 }
344 memcpy(SkTAddOffset<void>(fMapPtr, offset), src, size);
345 // We only need to flush the updated portion so pass the true range here.
346 this->vkUnmap(offset, size);
347 fMapPtr = nullptr;
348 } else {
349 this->copyCpuDataToGpuBuffer(src, offset, size);
350 }
351 return true;
352}
353
354GrVkGpu* GrVkBuffer::getVkGpu() const {
355 SkASSERT(!this->wasDestroyed());
356 return static_cast<GrVkGpu*>(this->getGpu());
357}
358
359const VkDescriptorSet* GrVkBuffer::uniformDescriptorSet() const {
360 SkASSERT(fUniformDescriptorSet);
361 return fUniformDescriptorSet->descriptorSet();
362}
363
GrGpuBufferType
Definition: GrTypesPriv.h:411
GrAccessPattern
Definition: GrTypesPriv.h:424
@ kDynamic_GrAccessPattern
Definition: GrTypesPriv.h:426
@ kStream_GrAccessPattern
Definition: GrTypesPriv.h:430
#define VK_CALL(GPU, X)
Definition: GrVkBuffer.cpp:19
static const GrVkDescriptorSet * make_uniform_desc_set(GrVkGpu *gpu, VkBuffer buffer, size_t size)
Definition: GrVkBuffer.cpp:39
#define GR_VK_CALL(IFACE, X)
Definition: GrVkUtil.h:24
#define GR_VK_LOG_IF_NOT_SUCCESS(GPU, RESULT, X,...)
Definition: GrVkUtil.h:28
#define GR_VK_CALL_RESULT(GPU, RESULT, X)
Definition: GrVkUtil.h:35
static constexpr bool SkIsAlign4(T x)
Definition: SkAlign.h:20
#define SkASSERT(cond)
Definition: SkAssert.h:116
sk_sp< T > sk_ref_sp(T *obj)
Definition: SkRefCnt.h:381
GLenum type
GrResourceProvider * resourceProvider()
GrDirectContextPriv priv()
size_t size() const final
Definition: GrGpuBuffer.h:34
void * fMapPtr
Definition: GrGpuBuffer.h:119
GrAccessPattern accessPattern() const
Definition: GrGpuBuffer.h:32
virtual void onAbandon()
GrGpu * getGpu() const
bool wasDestroyed() const
virtual void onRelease()
GrDirectContext * getContext()
Definition: GrGpu.h:67
bool transferFromBufferToBuffer(sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)
Definition: GrGpu.cpp:511
bool internalHasNoCommandBufferUsages() const
Definition: GrGpuResource.h:89
sk_sp< GrGpuBuffer > createBuffer(size_t size, GrGpuBufferType, GrAccessPattern, ZeroInit)
static sk_sp< GrVkBuffer > Make(GrVkGpu *gpu, size_t size, GrGpuBufferType bufferType, GrAccessPattern accessPattern)
Definition: GrVkBuffer.cpp:69
void addMemoryBarrier(VkAccessFlags srcAccessMask, VkAccessFlags dstAccesMask, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion) const
Definition: GrVkBuffer.cpp:270
const VkDescriptorSet * uniformDescriptorSet() const
Definition: GrVkBuffer.cpp:359
bool shouldPersistentlyMapCpuToGpuBuffers() const
Definition: GrVkCaps.h:198
bool avoidUpdateBuffers() const
Definition: GrVkCaps.h:99
bool gpuOnlyBuffersMorePerformant() const
Definition: GrVkCaps.h:193
const VkDescriptorSet * descriptorSet() const
const GrVkCaps & vkCaps() const
Definition: GrVkGpu.h:61
bool zeroBuffer(sk_sp< GrGpuBuffer >)
Definition: GrVkGpu.cpp:1249
void addBufferMemoryBarrier(const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkBufferMemoryBarrier *barrier) const
Definition: GrVkGpu.cpp:2127
const skgpu::VulkanInterface * vkInterface() const
Definition: GrVkGpu.h:60
VkDevice device() const
Definition: GrVkGpu.h:71
GrVkResourceProvider & resourceProvider()
Definition: GrVkGpu.h:83
bool updateBuffer(sk_sp< GrVkBuffer > buffer, const void *src, VkDeviceSize offset, VkDeviceSize size)
Definition: GrVkGpu.cpp:1229
bool protectedContext() const
Definition: GrVkGpu.h:81
bool checkVkResult(VkResult)
Definition: GrVkGpu.cpp:2659
skgpu::VulkanMemoryAllocator * memoryAllocator() const
Definition: GrVkGpu.h:68
const GrVkDescriptorSet * getUniformDescriptorSet()
VkDevice device
Definition: main.cc:53
GAsyncResult * result
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
void FreeBufferMemory(VulkanMemoryAllocator *, const VulkanAlloc &alloc)
bool AllocBufferMemory(VulkanMemoryAllocator *, VkBuffer buffer, skgpu::VulkanMemoryAllocator::BufferUsage, bool shouldPersistentlyMapCpuToGpu, const std::function< CheckResult > &, VulkanAlloc *alloc)
void * MapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &, const std::function< CheckResult > &)
void FlushMappedAlloc(VulkanMemoryAllocator *, const skgpu::VulkanAlloc &, VkDeviceSize offset, VkDeviceSize size, const std::function< CheckResult > &)
void UnmapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &alloc)
void InvalidateMappedAlloc(VulkanMemoryAllocator *, const VulkanAlloc &alloc, VkDeviceSize offset, VkDeviceSize size, const std::function< CheckResult > &)
VulkanMemoryAllocator::BufferUsage BufferUsage
SeparatedVector2 offset
VkDeviceSize size
Definition: vulkan_core.h:3416
uint32_t queueFamilyIndexCount
Definition: vulkan_core.h:3419
const uint32_t * pQueueFamilyIndices
Definition: vulkan_core.h:3420
VkBufferCreateFlags flags
Definition: vulkan_core.h:3415
VkStructureType sType
Definition: vulkan_core.h:3413
VkBufferUsageFlags usage
Definition: vulkan_core.h:3417
VkSharingMode sharingMode
Definition: vulkan_core.h:3418
const VkBufferView * pTexelBufferView
Definition: vulkan_core.h:3797
VkStructureType sType
Definition: vulkan_core.h:3788
const VkDescriptorImageInfo * pImageInfo
Definition: vulkan_core.h:3795
const VkDescriptorBufferInfo * pBufferInfo
Definition: vulkan_core.h:3796
VkDescriptorSet dstSet
Definition: vulkan_core.h:3790
const void * pNext
Definition: vulkan_core.h:3789
VkDescriptorType descriptorType
Definition: vulkan_core.h:3794
VkDeviceSize fSize
Definition: VulkanTypes.h:41
VulkanBackendMemory fBackendMemory
Definition: VulkanTypes.h:44
VkDeviceMemory fMemory
Definition: VulkanTypes.h:39
VkDeviceSize fOffset
Definition: VulkanTypes.h:40
VkFlags VkPipelineStageFlags
Definition: vulkan_core.h:2470
@ VK_SHARING_MODE_EXCLUSIVE
Definition: vulkan_core.h:1813
VkFlags VkAccessFlags
Definition: vulkan_core.h:2235
@ VK_BUFFER_USAGE_TRANSFER_DST_BIT
Definition: vulkan_core.h:2546
@ VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT
Definition: vulkan_core.h:2553
@ VK_BUFFER_USAGE_INDEX_BUFFER_BIT
Definition: vulkan_core.h:2551
@ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT
Definition: vulkan_core.h:2549
@ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT
Definition: vulkan_core.h:2552
@ VK_BUFFER_USAGE_TRANSFER_SRC_BIT
Definition: vulkan_core.h:2545
VkResult
Definition: vulkan_core.h:140
@ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
Definition: vulkan_core.h:2131
#define VK_NULL_HANDLE
Definition: vulkan_core.h:46
#define VK_QUEUE_FAMILY_IGNORED
Definition: vulkan_core.h:127
@ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
Definition: vulkan_core.h:214
@ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
Definition: vulkan_core.h:237
@ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER
Definition: vulkan_core.h:246