Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
GrVkBuffer.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2021 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
18
19#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
20
21GrVkBuffer::GrVkBuffer(GrVkGpu* gpu,
22 size_t sizeInBytes,
23 GrGpuBufferType bufferType,
24 GrAccessPattern accessPattern,
25 VkBuffer buffer,
26 const skgpu::VulkanAlloc& alloc,
27 const GrVkDescriptorSet* uniformDescriptorSet,
28 std::string_view label)
29 : GrGpuBuffer(gpu, sizeInBytes, bufferType, accessPattern, label)
30 , fBuffer(buffer)
31 , fAlloc(alloc)
32 , fUniformDescriptorSet(uniformDescriptorSet) {
33 // We always require dynamic buffers to be mappable
34 SkASSERT(accessPattern != kDynamic_GrAccessPattern || this->isVkMappable());
35 SkASSERT(bufferType != GrGpuBufferType::kUniform || uniformDescriptorSet);
36 this->registerWithCache(skgpu::Budgeted::kYes);
37}
38
39static const GrVkDescriptorSet* make_uniform_desc_set(GrVkGpu* gpu, VkBuffer buffer, size_t size) {
40 const GrVkDescriptorSet* descriptorSet = gpu->resourceProvider().getUniformDescriptorSet();
41 if (!descriptorSet) {
42 return nullptr;
43 }
44
45 VkDescriptorBufferInfo bufferInfo;
46 memset(&bufferInfo, 0, sizeof(VkDescriptorBufferInfo));
47 bufferInfo.buffer = buffer;
48 bufferInfo.offset = 0;
49 bufferInfo.range = size;
50
51 VkWriteDescriptorSet descriptorWrite;
52 memset(&descriptorWrite, 0, sizeof(VkWriteDescriptorSet));
54 descriptorWrite.pNext = nullptr;
55 descriptorWrite.dstSet = *descriptorSet->descriptorSet();
57 descriptorWrite.dstArrayElement = 0;
58 descriptorWrite.descriptorCount = 1;
60 descriptorWrite.pImageInfo = nullptr;
61 descriptorWrite.pBufferInfo = &bufferInfo;
62 descriptorWrite.pTexelBufferView = nullptr;
63
65 UpdateDescriptorSets(gpu->device(), 1, &descriptorWrite, 0, nullptr));
66 return descriptorSet;
67}
68
70 size_t size,
71 GrGpuBufferType bufferType,
72 GrAccessPattern accessPattern) {
73 VkBuffer buffer;
75
76 // The only time we don't require mappable buffers is when we have a static access pattern and
77 // we're on a device where gpu only memory has faster reads on the gpu than memory that is also
78 // mappable on the cpu. Protected memory always uses mappable buffers.
79 bool requiresMappable = gpu->protectedContext() ||
83
85 BufferUsage allocUsage;
86
87 // create the buffer object
88 VkBufferCreateInfo bufInfo;
89 memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
91 bufInfo.flags = 0;
92 bufInfo.size = size;
93 // To support SkMesh buffer updates we make Vertex and Index buffers capable of being transfer
94 // dsts.
95 switch (bufferType) {
98 allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
99 break;
102 allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
103 break;
106 allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
107 break;
110 allocUsage = BufferUsage::kCpuWritesGpuReads;
111 break;
114 allocUsage = BufferUsage::kTransfersFromCpuToGpu;
115 break;
118 allocUsage = BufferUsage::kTransfersFromGpuToCpu;
119 break;
120 }
121 // We may not always get a mappable buffer for non dynamic access buffers. Thus we set the
122 // transfer dst usage bit in case we need to do a copy to write data.
123 // TODO: It doesn't really hurt setting this extra usage flag, but maybe we can narrow the scope
124 // of buffers we set it on more than just not dynamic.
125 if (!requiresMappable) {
127 }
128
130 bufInfo.queueFamilyIndexCount = 0;
131 bufInfo.pQueueFamilyIndices = nullptr;
132
133 VkResult err;
134 err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
135 if (err) {
136 return nullptr;
137 }
138
139 bool shouldPersistentlyMapCpuToGpu = gpu->vkCaps().shouldPersistentlyMapCpuToGpuBuffers();
140 auto checkResult = [gpu, allocUsage, shouldPersistentlyMapCpuToGpu](VkResult result) {
141 GR_VK_LOG_IF_NOT_SUCCESS(gpu, result, "skgpu::VulkanMemory::AllocBufferMemory "
142 "(allocUsage:%d, shouldPersistentlyMapCpuToGpu:%d)",
143 (int)allocUsage, (int)shouldPersistentlyMapCpuToGpu);
144 return gpu->checkVkResult(result);
145 };
146 auto allocator = gpu->memoryAllocator();
148 buffer,
149 allocUsage,
150 shouldPersistentlyMapCpuToGpu,
151 checkResult,
152 &alloc)) {
153 VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
154 return nullptr;
155 }
156
157 // Bind buffer
158 GR_VK_CALL_RESULT(gpu, err, BindBufferMemory(gpu->device(),
159 buffer,
160 alloc.fMemory,
161 alloc.fOffset));
162 if (err) {
164 VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
165 return nullptr;
166 }
167
168 // If this is a uniform buffer we must setup a descriptor set
169 const GrVkDescriptorSet* uniformDescSet = nullptr;
170 if (bufferType == GrGpuBufferType::kUniform) {
171 uniformDescSet = make_uniform_desc_set(gpu, buffer, size);
172 if (!uniformDescSet) {
173 VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
175 return nullptr;
176 }
177 }
178
179 return sk_sp<GrVkBuffer>(new GrVkBuffer(
180 gpu, size, bufferType, accessPattern, buffer, alloc, uniformDescSet,
181 /*label=*/"MakeVkBuffer"));
182}
183
184void GrVkBuffer::vkMap(size_t readOffset, size_t readSize) {
186 if (this->isVkMappable()) {
187 // Not every buffer will use command buffer usage refs and instead the command buffer just
188 // holds normal refs. Systems higher up in Ganesh should be making sure not to reuse a
189 // buffer that currently has a ref held by something else. However, we do need to make sure
190 // there isn't a buffer with just a command buffer usage that is trying to be mapped.
192 SkASSERT(fAlloc.fSize > 0);
193 SkASSERT(fAlloc.fSize >= readOffset + readSize);
194
195 GrVkGpu* gpu = this->getVkGpu();
196 auto checkResult_mapAlloc = [gpu](VkResult result) {
197 GR_VK_LOG_IF_NOT_SUCCESS(gpu, result, "skgpu::VulkanMemory::MapAlloc");
198 return gpu->checkVkResult(result);
199 };
200 auto allocator = gpu->memoryAllocator();
201 fMapPtr = skgpu::VulkanMemory::MapAlloc(allocator, fAlloc, checkResult_mapAlloc);
202 if (fMapPtr && readSize != 0) {
203 auto checkResult_invalidateMapAlloc = [gpu, readOffset, readSize](VkResult result) {
204 GR_VK_LOG_IF_NOT_SUCCESS(gpu, result, "skgpu::VulkanMemory::InvalidateMappedAlloc "
205 "(readOffset:%zu, readSize:%zu)",
206 readOffset, readSize);
207 return gpu->checkVkResult(result);
208 };
209 // "Invalidate" here means make device writes visible to the host. That is, it makes
210 // sure any GPU writes are finished in the range we might read from.
212 fAlloc,
213 readOffset,
214 readSize,
215 checkResult_invalidateMapAlloc);
216 }
217 }
218}
219
220void GrVkBuffer::vkUnmap(size_t flushOffset, size_t flushSize) {
221 SkASSERT(fMapPtr && this->isVkMappable());
222
223 SkASSERT(fAlloc.fSize > 0);
224 SkASSERT(fAlloc.fSize >= flushOffset + flushSize);
225
226 GrVkGpu* gpu = this->getVkGpu();
227 auto checkResult = [gpu, flushOffset, flushSize](VkResult result) {
228 GR_VK_LOG_IF_NOT_SUCCESS(gpu, result, "skgpu::VulkanMemory::FlushMappedAlloc "
229 "(flushOffset:%zu, flushSize:%zu)",
230 flushOffset, flushSize);
231 return gpu->checkVkResult(result);
232 };
233 auto allocator = this->getVkGpu()->memoryAllocator();
234 skgpu::VulkanMemory::FlushMappedAlloc(allocator, fAlloc, flushOffset, flushSize, checkResult);
235 skgpu::VulkanMemory::UnmapAlloc(allocator, fAlloc);
236}
237
238void GrVkBuffer::copyCpuDataToGpuBuffer(const void* src, size_t offset, size_t size) {
239 SkASSERT(src);
240
241 GrVkGpu* gpu = this->getVkGpu();
242
243 // We should never call this method in protected contexts.
244 SkASSERT(!gpu->protectedContext());
245
246 // The vulkan api restricts the use of vkCmdUpdateBuffer to updates that are less than or equal
247 // to 65536 bytes and a size and offset that are both 4 byte aligned.
248 if ((size <= 65536) && SkIsAlign4(size) && SkIsAlign4(offset) &&
249 !gpu->vkCaps().avoidUpdateBuffers()) {
250 gpu->updateBuffer(sk_ref_sp(this), src, offset, size);
251 } else {
252 GrResourceProvider* resourceProvider = gpu->getContext()->priv().resourceProvider();
253 sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
254 src,
255 size,
258 if (!transferBuffer) {
259 return;
260 }
261
262 gpu->transferFromBufferToBuffer(std::move(transferBuffer),
263 /*srcOffset=*/0,
264 sk_ref_sp(this),
265 offset,
266 size);
267 }
268}
269
271 VkAccessFlags dstAccesMask,
272 VkPipelineStageFlags srcStageMask,
273 VkPipelineStageFlags dstStageMask,
274 bool byRegion) const {
275 VkBufferMemoryBarrier bufferMemoryBarrier = {
277 nullptr, // pNext
278 srcAccessMask, // srcAccessMask
279 dstAccesMask, // dstAccessMask
280 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
281 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
282 fBuffer, // buffer
283 0, // offset
284 this->size(), // size
285 };
286
287 // TODO: restrict to area of buffer we're interested in
288 this->getVkGpu()->addBufferMemoryBarrier(srcStageMask, dstStageMask, byRegion,
289 &bufferMemoryBarrier);
290}
291
292void GrVkBuffer::vkRelease() {
293 if (this->wasDestroyed()) {
294 return;
295 }
296
297 if (fMapPtr) {
298 this->vkUnmap(0, this->size());
299 fMapPtr = nullptr;
300 }
301
302 if (fUniformDescriptorSet) {
303 fUniformDescriptorSet->recycle();
304 fUniformDescriptorSet = nullptr;
305 }
306
307 SkASSERT(fBuffer);
308 SkASSERT(fAlloc.fMemory && fAlloc.fBackendMemory);
309 VK_CALL(this->getVkGpu(), DestroyBuffer(this->getVkGpu()->device(), fBuffer, nullptr));
310 fBuffer = VK_NULL_HANDLE;
311
312 skgpu::VulkanMemory::FreeBufferMemory(this->getVkGpu()->memoryAllocator(), fAlloc);
313 fAlloc.fMemory = VK_NULL_HANDLE;
314 fAlloc.fBackendMemory = 0;
315}
316
318 this->vkRelease();
320}
321
323 this->vkRelease();
325}
326
328 this->vkMap(0, type == MapType::kRead ? this->size() : 0);
329}
330
332 this->vkUnmap(0, type == MapType::kWriteDiscard ? this->size() : 0);
333}
334
335bool GrVkBuffer::onClearToZero() { return this->getVkGpu()->zeroBuffer(sk_ref_sp(this)); }
336
337bool GrVkBuffer::onUpdateData(const void* src, size_t offset, size_t size, bool /*preserve*/) {
338 if (this->isVkMappable()) {
339 // We won't be reading the mapped memory so pass an empty range.
340 this->vkMap(0, 0);
341 if (!fMapPtr) {
342 return false;
343 }
344 memcpy(SkTAddOffset<void>(fMapPtr, offset), src, size);
345 // We only need to flush the updated portion so pass the true range here.
346 this->vkUnmap(offset, size);
347 fMapPtr = nullptr;
348 } else {
349 this->copyCpuDataToGpuBuffer(src, offset, size);
350 }
351 return true;
352}
353
354GrVkGpu* GrVkBuffer::getVkGpu() const {
355 SkASSERT(!this->wasDestroyed());
356 return static_cast<GrVkGpu*>(this->getGpu());
357}
358
359const VkDescriptorSet* GrVkBuffer::uniformDescriptorSet() const {
360 SkASSERT(fUniformDescriptorSet);
361 return fUniformDescriptorSet->descriptorSet();
362}
363
GrGpuBufferType
GrAccessPattern
@ kDynamic_GrAccessPattern
@ kStream_GrAccessPattern
#define VK_CALL(GPU, X)
static const GrVkDescriptorSet * make_uniform_desc_set(GrVkGpu *gpu, VkBuffer buffer, size_t size)
#define GR_VK_CALL(IFACE, X)
Definition GrVkUtil.h:24
#define GR_VK_LOG_IF_NOT_SUCCESS(GPU, RESULT, X,...)
Definition GrVkUtil.h:28
#define GR_VK_CALL_RESULT(GPU, RESULT, X)
Definition GrVkUtil.h:35
static constexpr bool SkIsAlign4(T x)
Definition SkAlign.h:20
#define SkASSERT(cond)
Definition SkAssert.h:116
sk_sp< T > sk_ref_sp(T *obj)
Definition SkRefCnt.h:381
GrResourceProvider * resourceProvider()
GrDirectContextPriv priv()
size_t size() const final
Definition GrGpuBuffer.h:34
void * fMapPtr
GrAccessPattern accessPattern() const
Definition GrGpuBuffer.h:32
virtual void onAbandon()
GrGpu * getGpu() const
bool wasDestroyed() const
virtual void onRelease()
GrDirectContext * getContext()
Definition GrGpu.h:67
bool transferFromBufferToBuffer(sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)
Definition GrGpu.cpp:511
bool internalHasNoCommandBufferUsages() const
sk_sp< GrGpuBuffer > createBuffer(size_t size, GrGpuBufferType, GrAccessPattern, ZeroInit)
static sk_sp< GrVkBuffer > Make(GrVkGpu *gpu, size_t size, GrGpuBufferType bufferType, GrAccessPattern accessPattern)
bool onUpdateData(const void *src, size_t offset, size_t size, bool preserve) override
void onMap(MapType) override
void addMemoryBarrier(VkAccessFlags srcAccessMask, VkAccessFlags dstAccesMask, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion) const
bool onClearToZero() override
const VkDescriptorSet * uniformDescriptorSet() const
void onUnmap(MapType) override
void onRelease() override
void onAbandon() override
bool shouldPersistentlyMapCpuToGpuBuffers() const
Definition GrVkCaps.h:198
bool avoidUpdateBuffers() const
Definition GrVkCaps.h:99
bool gpuOnlyBuffersMorePerformant() const
Definition GrVkCaps.h:193
const VkDescriptorSet * descriptorSet() const
const GrVkCaps & vkCaps() const
Definition GrVkGpu.h:61
bool zeroBuffer(sk_sp< GrGpuBuffer >)
Definition GrVkGpu.cpp:1265
void addBufferMemoryBarrier(const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkBufferMemoryBarrier *barrier) const
Definition GrVkGpu.cpp:2143
const skgpu::VulkanInterface * vkInterface() const
Definition GrVkGpu.h:60
VkDevice device() const
Definition GrVkGpu.h:71
GrVkResourceProvider & resourceProvider()
Definition GrVkGpu.h:83
bool updateBuffer(sk_sp< GrVkBuffer > buffer, const void *src, VkDeviceSize offset, VkDeviceSize size)
Definition GrVkGpu.cpp:1245
bool protectedContext() const
Definition GrVkGpu.h:81
bool checkVkResult(VkResult)
Definition GrVkGpu.cpp:2675
skgpu::VulkanMemoryAllocator * memoryAllocator() const
Definition GrVkGpu.h:68
const GrVkDescriptorSet * getUniformDescriptorSet()
VkDevice device
Definition main.cc:53
static const uint8_t buffer[]
GAsyncResult * result
void FreeBufferMemory(VulkanMemoryAllocator *, const VulkanAlloc &alloc)
bool AllocBufferMemory(VulkanMemoryAllocator *, VkBuffer buffer, skgpu::VulkanMemoryAllocator::BufferUsage, bool shouldPersistentlyMapCpuToGpu, const std::function< CheckResult > &, VulkanAlloc *alloc)
void * MapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &, const std::function< CheckResult > &)
void FlushMappedAlloc(VulkanMemoryAllocator *, const skgpu::VulkanAlloc &, VkDeviceSize offset, VkDeviceSize size, const std::function< CheckResult > &)
void UnmapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &alloc)
void InvalidateMappedAlloc(VulkanMemoryAllocator *, const VulkanAlloc &alloc, VkDeviceSize offset, VkDeviceSize size, const std::function< CheckResult > &)
Point offset
VkDeviceSize size
uint32_t queueFamilyIndexCount
const uint32_t * pQueueFamilyIndices
VkBufferCreateFlags flags
VkStructureType sType
VkBufferUsageFlags usage
VkSharingMode sharingMode
const VkBufferView * pTexelBufferView
VkStructureType sType
const VkDescriptorImageInfo * pImageInfo
const VkDescriptorBufferInfo * pBufferInfo
VkDescriptorSet dstSet
VkDescriptorType descriptorType
VkDeviceSize fSize
Definition VulkanTypes.h:40
VulkanBackendMemory fBackendMemory
Definition VulkanTypes.h:43
VkDeviceMemory fMemory
Definition VulkanTypes.h:38
VkDeviceSize fOffset
Definition VulkanTypes.h:39
VkFlags VkPipelineStageFlags
@ VK_SHARING_MODE_EXCLUSIVE
VkFlags VkAccessFlags
@ VK_BUFFER_USAGE_TRANSFER_DST_BIT
@ VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT
@ VK_BUFFER_USAGE_INDEX_BUFFER_BIT
@ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT
@ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT
@ VK_BUFFER_USAGE_TRANSFER_SRC_BIT
VkResult
@ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER
#define VK_NULL_HANDLE
Definition vulkan_core.h:46
#define VK_QUEUE_FAMILY_IGNORED
@ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
@ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET
@ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER