Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
VulkanBuffer.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
14
15namespace skgpu::graphite {
16
18 size_t size,
20 AccessPattern accessPattern,
21 std::string_view label) {
22 if (size <= 0) {
23 return nullptr;
24 }
25 VkBuffer buffer;
27
28 // The only time we don't require mappable buffers is when we're on a device where gpu only
29 // memory has faster reads on the gpu than memory that is also mappable on the cpu. Protected
30 // memory always uses mappable buffers.
31 bool requiresMappable = sharedContext->isProtected() == Protected::kYes ||
32 accessPattern == AccessPattern::kHostVisible ||
33 !sharedContext->vulkanCaps().gpuOnlyBuffersMorePerformant();
34
36
37 // The default usage captures use cases besides transfer buffers. GPU-only buffers are preferred
38 // unless mappability is required.
39 BufferUsage allocUsage =
40 requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
41
42 // Create the buffer object
43 VkBufferCreateInfo bufInfo;
44 memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
46 bufInfo.flags = 0;
47 bufInfo.size = size;
48
49 // To support SkMesh buffer updates we make Vertex and Index buffers capable of being transfer
50 // dsts. To support rtAdjust uniform buffer updates, we make host-visible uniform buffers also
51 // capable of being transfer dsts.
52 switch (type) {
55 break;
58 break;
61 break;
63 bufInfo.usage =
65 break;
68 break;
71 break;
74 allocUsage = BufferUsage::kCpuWritesGpuReads;
75 break;
78 allocUsage = BufferUsage::kTransfersFromCpuToGpu;
79 break;
82 allocUsage = BufferUsage::kTransfersFromGpuToCpu;
83 break;
84 }
85
86 // We may not always get a mappable buffer for non-dynamic access buffers. Thus we set the
87 // transfer dst usage bit in case we need to do a copy to write data. It doesn't really hurt
88 // to set this extra usage flag, but we could narrow the scope of buffers we set it on more than
89 // just not dynamic.
90 if (!requiresMappable || accessPattern == AccessPattern::kGpuOnly) {
92 }
93
95 bufInfo.queueFamilyIndexCount = 0;
96 bufInfo.pQueueFamilyIndices = nullptr;
97
100 result,
101 CreateBuffer(sharedContext->device(),
102 &bufInfo,
103 nullptr, /*const VkAllocationCallbacks*/
104 &buffer));
105 if (result != VK_SUCCESS) {
106 return nullptr;
107 }
108
109 auto allocator = sharedContext->memoryAllocator();
110 bool shouldPersistentlyMapCpuToGpu =
111 sharedContext->vulkanCaps().shouldPersistentlyMapCpuToGpuBuffers();
112 //AllocBufferMemory
113 auto checkResult = [](VkResult result) {
114 return result == VK_SUCCESS;
115 };
117 buffer,
118 allocUsage,
119 shouldPersistentlyMapCpuToGpu,
120 checkResult,
121 &alloc)) {
122 VULKAN_CALL(sharedContext->interface(), DestroyBuffer(sharedContext->device(),
123 buffer,
124 /*const VkAllocationCallbacks*=*/nullptr));
125 return nullptr;
126 }
127
128 // Bind buffer
131 result,
132 BindBufferMemory(sharedContext->device(), buffer, alloc.fMemory, alloc.fOffset));
133 if (result != VK_SUCCESS) {
135 VULKAN_CALL(sharedContext->interface(), DestroyBuffer(sharedContext->device(),
136 buffer,
137 /*const VkAllocationCallbacks*=*/nullptr));
138 return nullptr;
139 }
140
142 size,
143 type,
144 accessPattern,
145 std::move(buffer),
146 alloc,
147 bufInfo.usage,
148 std::move(label)));
149}
150
151VulkanBuffer::VulkanBuffer(const VulkanSharedContext* sharedContext,
152 size_t size,
154 AccessPattern accessPattern,
155 VkBuffer buffer,
156 const skgpu::VulkanAlloc& alloc,
157 const VkBufferUsageFlags usageFlags,
158 std::string_view label)
159 : Buffer(sharedContext, size, std::move(label))
160 , fBuffer(std::move(buffer))
161 , fAlloc(alloc)
162 , fBufferUsageFlags(usageFlags)
163 // We assume a buffer is used for CPU reads only in the case of GPU->CPU transfer buffers.
164 , fBufferUsedForCPURead(type == BufferType::kXferGpuToCpu) {}
165
167 if (fMapPtr) {
168 this->internalUnmap(0, this->size());
169 fMapPtr = nullptr;
170 }
171
173 static_cast<const VulkanSharedContext*>(this->sharedContext());
174 SkASSERT(fBuffer);
175 SkASSERT(fAlloc.fMemory && fAlloc.fBackendMemory);
176 VULKAN_CALL(sharedContext->interface(),
177 DestroyBuffer(sharedContext->device(), fBuffer, nullptr));
178 fBuffer = VK_NULL_HANDLE;
179
180 skgpu::VulkanMemory::FreeBufferMemory(sharedContext->memoryAllocator(), fAlloc);
181 fAlloc.fMemory = VK_NULL_HANDLE;
182 fAlloc.fBackendMemory = 0;
183}
184
185void VulkanBuffer::internalMap(size_t readOffset, size_t readSize) {
187 if (this->isMappable()) {
188 // Not every buffer will use command buffer usage refs. Instead, the command buffer just
189 // holds normal refs. Systems higher up in Graphite should be making sure not to reuse a
190 // buffer that currently has a ref held by something else. However, we do need to make sure
191 // there isn't a buffer with just a command buffer usage that is trying to be mapped.
192#ifdef SK_DEBUG
193 SkASSERT(!this->debugHasCommandBufferRef());
194#endif
195 SkASSERT(fAlloc.fSize > 0);
196 SkASSERT(fAlloc.fSize >= readOffset + readSize);
197
198 const VulkanSharedContext* sharedContext = this->vulkanSharedContext();
199
200 auto allocator = sharedContext->memoryAllocator();
201 auto checkResult = [sharedContext](VkResult result) {
202 VULKAN_LOG_IF_NOT_SUCCESS(sharedContext, result, "skgpu::VulkanMemory::MapAlloc");
203 return sharedContext->checkVkResult(result);
204 };
205 fMapPtr = skgpu::VulkanMemory::MapAlloc(allocator, fAlloc, checkResult);
206 if (fMapPtr && readSize != 0) {
207 auto checkResult_invalidate = [sharedContext, readOffset, readSize](VkResult result) {
209 result,
210 "skgpu::VulkanMemory::InvalidateMappedAlloc "
211 "(readOffset:%zu, readSize:%zu)",
212 readOffset,
213 readSize);
214 return sharedContext->checkVkResult(result);
215 };
216 // "Invalidate" here means make device writes visible to the host. That is, it makes
217 // sure any GPU writes are finished in the range we might read from.
219 fAlloc,
220 readOffset,
221 readSize,
222 checkResult_invalidate);
223 }
224 }
225}
226
227void VulkanBuffer::internalUnmap(size_t flushOffset, size_t flushSize) {
228 SkASSERT(fMapPtr && this->isMappable());
229
230 SkASSERT(fAlloc.fSize > 0);
231 SkASSERT(fAlloc.fSize >= flushOffset + flushSize);
232
233 const VulkanSharedContext* sharedContext = this->vulkanSharedContext();
234 auto checkResult = [sharedContext, flushOffset, flushSize](VkResult result) {
236 result,
237 "skgpu::VulkanMemory::FlushMappedAlloc "
238 "(flushOffset:%zu, flushSize:%zu)",
239 flushOffset,
240 flushSize);
241 return sharedContext->checkVkResult(result);
242 };
243
244 auto allocator = sharedContext->memoryAllocator();
245 skgpu::VulkanMemory::FlushMappedAlloc(allocator, fAlloc, flushOffset, flushSize, checkResult);
246 skgpu::VulkanMemory::UnmapAlloc(allocator, fAlloc);
247}
248
250 SkASSERT(fBuffer);
251 SkASSERT(!this->isMapped());
252
253 this->internalMap(0, fBufferUsedForCPURead ? this->size() : 0);
254}
255
257 SkASSERT(fBuffer);
258 SkASSERT(this->isMapped());
259 this->internalUnmap(0, fBufferUsedForCPURead ? 0 : this->size());
260}
261
263 VkAccessFlags dstAccessMask,
264 VkPipelineStageFlags dstStageMask) const {
265 // TODO: fill out other cases where we need a barrier
266 if (dstAccessMask == VK_ACCESS_HOST_READ_BIT ||
267 dstAccessMask == VK_ACCESS_TRANSFER_WRITE_BIT ||
268 dstAccessMask == VK_ACCESS_UNIFORM_READ_BIT) {
269 VkPipelineStageFlags srcStageMask =
270 VulkanBuffer::AccessMaskToPipelineSrcStageFlags(fCurrentAccessMask);
271
272 VkBufferMemoryBarrier bufferMemoryBarrier = {
274 nullptr, // pNext
275 fCurrentAccessMask, // srcAccessMask
276 dstAccessMask, // dstAccessMask
277 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
278 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
279 fBuffer, // buffer
280 0, // offset
281 this->size(), // size
282 };
283 cmdBuffer->addBufferMemoryBarrier(srcStageMask, dstStageMask, &bufferMemoryBarrier);
284 }
285
286 fCurrentAccessMask = dstAccessMask;
287}
288
289VkPipelineStageFlags VulkanBuffer::AccessMaskToPipelineSrcStageFlags(const VkAccessFlags srcMask) {
290 if (srcMask == 0) {
292 }
294
297 }
301 }
305 }
308 }
309 if (srcMask & VK_ACCESS_SHADER_READ_BIT ||
310 srcMask & VK_ACCESS_UNIFORM_READ_BIT) {
311 // TODO(b/307577875): It is possible that uniforms could have simply been used in the vertex
312 // shader and not the fragment shader, so using the fragment shader pipeline stage bit
313 // indiscriminately is a bit overkill. This call should be modified to check & allow for
314 // selecting VK_PIPELINE_STAGE_VERTEX_SHADER_BIT when appropriate.
316 }
317 if (srcMask & VK_ACCESS_SHADER_WRITE_BIT) {
319 }
320 if (srcMask & VK_ACCESS_INDEX_READ_BIT ||
323 }
326 }
327 if (srcMask & VK_ACCESS_HOST_READ_BIT || srcMask & VK_ACCESS_HOST_WRITE_BIT) {
329 }
330
331 return flags;
332}
333
334} // namespace skgpu::graphite
#define SkASSERT(cond)
Definition SkAssert.h:116
#define VULKAN_CALL(IFACE, X)
#define VULKAN_CALL_RESULT(SHARED_CONTEXT, RESULT, X)
#define VULKAN_LOG_IF_NOT_SUCCESS(SHARED_CONTEXT, RESULT, X,...)
size_t size() const
Definition Buffer.h:19
bool isMapped() const
Definition Buffer.h:30
const SharedContext * sharedContext() const
Definition Resource.h:187
static sk_sp< Buffer > Make(const VulkanSharedContext *, size_t, BufferType, AccessPattern, std::string_view label)
void setBufferAccess(VulkanCommandBuffer *buffer, VkAccessFlags dstAccessMask, VkPipelineStageFlags dstStageMask) const
void addBufferMemoryBarrier(const Resource *resource, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkBufferMemoryBarrier *barrier)
FlutterSemanticsFlag flags
static const uint8_t buffer[]
GAsyncResult * result
void FreeBufferMemory(VulkanMemoryAllocator *, const VulkanAlloc &alloc)
bool AllocBufferMemory(VulkanMemoryAllocator *, VkBuffer buffer, skgpu::VulkanMemoryAllocator::BufferUsage, bool shouldPersistentlyMapCpuToGpu, const std::function< CheckResult > &, VulkanAlloc *alloc)
void * MapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &, const std::function< CheckResult > &)
void FlushMappedAlloc(VulkanMemoryAllocator *, const skgpu::VulkanAlloc &, VkDeviceSize offset, VkDeviceSize size, const std::function< CheckResult > &)
void UnmapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &alloc)
void InvalidateMappedAlloc(VulkanMemoryAllocator *, const VulkanAlloc &alloc, VkDeviceSize offset, VkDeviceSize size, const std::function< CheckResult > &)
Definition ref_ptr.h:256
VkDeviceSize size
uint32_t queueFamilyIndexCount
const uint32_t * pQueueFamilyIndices
VkBufferCreateFlags flags
VkStructureType sType
VkBufferUsageFlags usage
VkSharingMode sharingMode
VkDeviceSize fSize
Definition VulkanTypes.h:40
VulkanBackendMemory fBackendMemory
Definition VulkanTypes.h:43
VkDeviceMemory fMemory
Definition VulkanTypes.h:38
VkDeviceSize fOffset
Definition VulkanTypes.h:39
VkFlags VkPipelineStageFlags
@ VK_SHARING_MODE_EXCLUSIVE
VkFlags VkAccessFlags
VkFlags VkBufferUsageFlags
@ VK_BUFFER_USAGE_TRANSFER_DST_BIT
@ VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT
@ VK_BUFFER_USAGE_INDEX_BUFFER_BIT
@ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT
@ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT
@ VK_BUFFER_USAGE_STORAGE_BUFFER_BIT
@ VK_BUFFER_USAGE_TRANSFER_SRC_BIT
VkResult
@ VK_SUCCESS
@ VK_ACCESS_HOST_READ_BIT
@ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
@ VK_ACCESS_TRANSFER_WRITE_BIT
@ VK_ACCESS_HOST_WRITE_BIT
@ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
@ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
@ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
@ VK_ACCESS_INDIRECT_COMMAND_READ_BIT
@ VK_ACCESS_TRANSFER_READ_BIT
@ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
@ VK_ACCESS_SHADER_WRITE_BIT
@ VK_ACCESS_SHADER_READ_BIT
@ VK_ACCESS_UNIFORM_READ_BIT
@ VK_ACCESS_INDEX_READ_BIT
@ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
#define VK_NULL_HANDLE
Definition vulkan_core.h:46
@ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT
@ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT
@ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
@ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
@ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
@ VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT
@ VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT
@ VK_PIPELINE_STAGE_HOST_BIT
@ VK_PIPELINE_STAGE_TRANSFER_BIT
#define VK_QUEUE_FAMILY_IGNORED
@ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
@ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER