Flutter Engine
The Flutter Engine
VulkanBuffer.cpp
Go to the documentation of this file.
1/*
2 * Copyright 2022 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
9
14
15namespace skgpu::graphite {
16
18 size_t size,
20 AccessPattern accessPattern) {
21 if (size <= 0) {
22 return nullptr;
23 }
24 VkBuffer buffer;
26
27 // The only time we don't require mappable buffers is when we're on a device where gpu only
28 // memory has faster reads on the gpu than memory that is also mappable on the cpu. Protected
29 // memory always uses mappable buffers.
30 bool requiresMappable = sharedContext->isProtected() == Protected::kYes ||
31 accessPattern == AccessPattern::kHostVisible ||
32 !sharedContext->vulkanCaps().gpuOnlyBuffersMorePerformant();
33
35
36 // The default usage captures use cases besides transfer buffers. GPU-only buffers are preferred
37 // unless mappability is required.
38 BufferUsage allocUsage =
40
41 // Create the buffer object
42 VkBufferCreateInfo bufInfo;
43 memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
45 bufInfo.flags = 0;
46 bufInfo.size = size;
47
48 // To support SkMesh buffer updates we make Vertex and Index buffers capable of being transfer
49 // dsts. To support rtAdjust uniform buffer updates, we make host-visible uniform buffers also
50 // capable of being transfer dsts.
51 switch (type) {
54 break;
57 break;
60 break;
62 bufInfo.usage =
64 break;
67 break;
70 break;
74 break;
78 break;
82 break;
83 }
84
85 // We may not always get a mappable buffer for non-dynamic access buffers. Thus we set the
86 // transfer dst usage bit in case we need to do a copy to write data. It doesn't really hurt
87 // to set this extra usage flag, but we could narrow the scope of buffers we set it on more than
88 // just not dynamic.
89 if (!requiresMappable || accessPattern == AccessPattern::kGpuOnly) {
91 }
92
94 bufInfo.queueFamilyIndexCount = 0;
95 bufInfo.pQueueFamilyIndices = nullptr;
96
99 result,
100 CreateBuffer(sharedContext->device(),
101 &bufInfo,
102 nullptr, /*const VkAllocationCallbacks*/
103 &buffer));
104 if (result != VK_SUCCESS) {
105 return nullptr;
106 }
107
108 auto allocator = sharedContext->memoryAllocator();
109 bool shouldPersistentlyMapCpuToGpu =
110 sharedContext->vulkanCaps().shouldPersistentlyMapCpuToGpuBuffers();
111 //AllocBufferMemory
112 auto checkResult = [](VkResult result) {
113 return result == VK_SUCCESS;
114 };
116 buffer,
117 allocUsage,
118 shouldPersistentlyMapCpuToGpu,
119 checkResult,
120 &alloc)) {
121 VULKAN_CALL(sharedContext->interface(), DestroyBuffer(sharedContext->device(),
122 buffer,
123 /*const VkAllocationCallbacks*=*/nullptr));
124 return nullptr;
125 }
126
127 // Bind buffer
130 result,
131 BindBufferMemory(sharedContext->device(), buffer, alloc.fMemory, alloc.fOffset));
132 if (result != VK_SUCCESS) {
134 VULKAN_CALL(sharedContext->interface(), DestroyBuffer(sharedContext->device(),
135 buffer,
136 /*const VkAllocationCallbacks*=*/nullptr));
137 return nullptr;
138 }
139
140 return sk_sp<Buffer>(new VulkanBuffer(
141 sharedContext, size, type, accessPattern, std::move(buffer), alloc, bufInfo.usage));
142}
143
144VulkanBuffer::VulkanBuffer(const VulkanSharedContext* sharedContext,
145 size_t size,
147 AccessPattern accessPattern,
148 VkBuffer buffer,
149 const skgpu::VulkanAlloc& alloc,
150 const VkBufferUsageFlags usageFlags)
151 : Buffer(sharedContext, size)
152 , fBuffer(std::move(buffer))
153 , fAlloc(alloc)
154 , fBufferUsageFlags(usageFlags)
155 // We assume a buffer is used for CPU reads only in the case of GPU->CPU transfer buffers.
156 , fBufferUsedForCPURead(type == BufferType::kXferGpuToCpu) {}
157
159 if (fMapPtr) {
160 this->internalUnmap(0, this->size());
161 fMapPtr = nullptr;
162 }
163
165 static_cast<const VulkanSharedContext*>(this->sharedContext());
166 SkASSERT(fBuffer);
167 SkASSERT(fAlloc.fMemory && fAlloc.fBackendMemory);
168 VULKAN_CALL(sharedContext->interface(),
169 DestroyBuffer(sharedContext->device(), fBuffer, nullptr));
170 fBuffer = VK_NULL_HANDLE;
171
172 skgpu::VulkanMemory::FreeBufferMemory(sharedContext->memoryAllocator(), fAlloc);
173 fAlloc.fMemory = VK_NULL_HANDLE;
174 fAlloc.fBackendMemory = 0;
175}
176
177void VulkanBuffer::internalMap(size_t readOffset, size_t readSize) {
179 if (this->isMappable()) {
180 // Not every buffer will use command buffer usage refs. Instead, the command buffer just
181 // holds normal refs. Systems higher up in Graphite should be making sure not to reuse a
182 // buffer that currently has a ref held by something else. However, we do need to make sure
183 // there isn't a buffer with just a command buffer usage that is trying to be mapped.
184#ifdef SK_DEBUG
185 SkASSERT(!this->debugHasCommandBufferRef());
186#endif
187 SkASSERT(fAlloc.fSize > 0);
188 SkASSERT(fAlloc.fSize >= readOffset + readSize);
189
190 const VulkanSharedContext* sharedContext = this->vulkanSharedContext();
191
192 auto allocator = sharedContext->memoryAllocator();
193 auto checkResult = [sharedContext](VkResult result) {
194 VULKAN_LOG_IF_NOT_SUCCESS(sharedContext, result, "skgpu::VulkanMemory::MapAlloc");
195 return sharedContext->checkVkResult(result);
196 };
197 fMapPtr = skgpu::VulkanMemory::MapAlloc(allocator, fAlloc, checkResult);
198 if (fMapPtr && readSize != 0) {
199 auto checkResult_invalidate = [sharedContext, readOffset, readSize](VkResult result) {
201 result,
202 "skgpu::VulkanMemory::InvalidateMappedAlloc "
203 "(readOffset:%zu, readSize:%zu)",
204 readOffset,
205 readSize);
206 return sharedContext->checkVkResult(result);
207 };
208 // "Invalidate" here means make device writes visible to the host. That is, it makes
209 // sure any GPU writes are finished in the range we might read from.
211 fAlloc,
212 readOffset,
213 readSize,
214 checkResult_invalidate);
215 }
216 }
217}
218
219void VulkanBuffer::internalUnmap(size_t flushOffset, size_t flushSize) {
220 SkASSERT(fMapPtr && this->isMappable());
221
222 SkASSERT(fAlloc.fSize > 0);
223 SkASSERT(fAlloc.fSize >= flushOffset + flushSize);
224
225 const VulkanSharedContext* sharedContext = this->vulkanSharedContext();
226 auto checkResult = [sharedContext, flushOffset, flushSize](VkResult result) {
228 result,
229 "skgpu::VulkanMemory::FlushMappedAlloc "
230 "(flushOffset:%zu, flushSize:%zu)",
231 flushOffset,
232 flushSize);
233 return sharedContext->checkVkResult(result);
234 };
235
236 auto allocator = sharedContext->memoryAllocator();
237 skgpu::VulkanMemory::FlushMappedAlloc(allocator, fAlloc, flushOffset, flushSize, checkResult);
238 skgpu::VulkanMemory::UnmapAlloc(allocator, fAlloc);
239}
240
241void VulkanBuffer::onMap() {
242 SkASSERT(fBuffer);
243 SkASSERT(!this->isMapped());
244
245 this->internalMap(0, fBufferUsedForCPURead ? this->size() : 0);
246}
247
248void VulkanBuffer::onUnmap() {
249 SkASSERT(fBuffer);
250 SkASSERT(this->isMapped());
251 this->internalUnmap(0, fBufferUsedForCPURead ? 0 : this->size());
252}
253
255 VkAccessFlags dstAccessMask,
256 VkPipelineStageFlags dstStageMask) const {
257 // TODO: fill out other cases where we need a barrier
258 if (dstAccessMask == VK_ACCESS_HOST_READ_BIT ||
259 dstAccessMask == VK_ACCESS_TRANSFER_WRITE_BIT ||
260 dstAccessMask == VK_ACCESS_UNIFORM_READ_BIT) {
261 VkPipelineStageFlags srcStageMask =
262 VulkanBuffer::AccessMaskToPipelineSrcStageFlags(fCurrentAccessMask);
263
264 VkBufferMemoryBarrier bufferMemoryBarrier = {
266 nullptr, // pNext
267 fCurrentAccessMask, // srcAccessMask
268 dstAccessMask, // dstAccessMask
269 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
270 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
271 fBuffer, // buffer
272 0, // offset
273 this->size(), // size
274 };
275 cmdBuffer->addBufferMemoryBarrier(srcStageMask, dstStageMask, &bufferMemoryBarrier);
276 }
277
278 fCurrentAccessMask = dstAccessMask;
279}
280
281VkPipelineStageFlags VulkanBuffer::AccessMaskToPipelineSrcStageFlags(const VkAccessFlags srcMask) {
282 if (srcMask == 0) {
284 }
286
289 }
293 }
297 }
300 }
301 if (srcMask & VK_ACCESS_SHADER_READ_BIT ||
302 srcMask & VK_ACCESS_UNIFORM_READ_BIT) {
303 // TODO(b/307577875): It is possible that uniforms could have simply been used in the vertex
304 // shader and not the fragment shader, so using the fragment shader pipeline stage bit
305 // indiscriminately is a bit overkill. This call should be modified to check & allow for
306 // selecting VK_PIPELINE_STAGE_VERTEX_SHADER_BIT when appropriate.
308 }
309 if (srcMask & VK_ACCESS_SHADER_WRITE_BIT) {
311 }
312 if (srcMask & VK_ACCESS_INDEX_READ_BIT ||
315 }
318 }
319 if (srcMask & VK_ACCESS_HOST_READ_BIT || srcMask & VK_ACCESS_HOST_WRITE_BIT) {
321 }
322
323 return flags;
324}
325
326} // namespace skgpu::graphite
#define SkASSERT(cond)
Definition: SkAssert.h:116
#define VULKAN_CALL(IFACE, X)
#define VULKAN_CALL_RESULT(SHARED_CONTEXT, RESULT, X)
#define VULKAN_LOG_IF_NOT_SUCCESS(SHARED_CONTEXT, RESULT, X,...)
GLenum type
size_t size() const
Definition: Buffer.h:19
bool isMapped() const
Definition: Buffer.h:30
const SharedContext * sharedContext() const
Definition: Resource.h:189
static sk_sp< Buffer > Make(const VulkanSharedContext *, size_t, BufferType, AccessPattern)
void setBufferAccess(VulkanCommandBuffer *buffer, VkAccessFlags dstAccessMask, VkPipelineStageFlags dstStageMask) const
void addBufferMemoryBarrier(const Resource *resource, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkBufferMemoryBarrier *barrier)
FlutterSemanticsFlag flags
GAsyncResult * result
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
void FreeBufferMemory(VulkanMemoryAllocator *, const VulkanAlloc &alloc)
bool AllocBufferMemory(VulkanMemoryAllocator *, VkBuffer buffer, skgpu::VulkanMemoryAllocator::BufferUsage, bool shouldPersistentlyMapCpuToGpu, const std::function< CheckResult > &, VulkanAlloc *alloc)
void * MapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &, const std::function< CheckResult > &)
void FlushMappedAlloc(VulkanMemoryAllocator *, const skgpu::VulkanAlloc &, VkDeviceSize offset, VkDeviceSize size, const std::function< CheckResult > &)
void UnmapAlloc(VulkanMemoryAllocator *, const VulkanAlloc &alloc)
void InvalidateMappedAlloc(VulkanMemoryAllocator *, const VulkanAlloc &alloc, VkDeviceSize offset, VkDeviceSize size, const std::function< CheckResult > &)
Definition: ref_ptr.h:256
VkDeviceSize size
Definition: vulkan_core.h:3416
uint32_t queueFamilyIndexCount
Definition: vulkan_core.h:3419
const uint32_t * pQueueFamilyIndices
Definition: vulkan_core.h:3420
VkBufferCreateFlags flags
Definition: vulkan_core.h:3415
VkStructureType sType
Definition: vulkan_core.h:3413
VkBufferUsageFlags usage
Definition: vulkan_core.h:3417
VkSharingMode sharingMode
Definition: vulkan_core.h:3418
VkDeviceSize fSize
Definition: VulkanTypes.h:41
VulkanBackendMemory fBackendMemory
Definition: VulkanTypes.h:44
VkDeviceMemory fMemory
Definition: VulkanTypes.h:39
VkDeviceSize fOffset
Definition: VulkanTypes.h:40
VkFlags VkPipelineStageFlags
Definition: vulkan_core.h:2470
@ VK_SHARING_MODE_EXCLUSIVE
Definition: vulkan_core.h:1813
VkFlags VkAccessFlags
Definition: vulkan_core.h:2235
VkFlags VkBufferUsageFlags
Definition: vulkan_core.h:2582
@ VK_BUFFER_USAGE_TRANSFER_DST_BIT
Definition: vulkan_core.h:2546
@ VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT
Definition: vulkan_core.h:2553
@ VK_BUFFER_USAGE_INDEX_BUFFER_BIT
Definition: vulkan_core.h:2551
@ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT
Definition: vulkan_core.h:2549
@ VK_BUFFER_USAGE_VERTEX_BUFFER_BIT
Definition: vulkan_core.h:2552
@ VK_BUFFER_USAGE_STORAGE_BUFFER_BIT
Definition: vulkan_core.h:2550
@ VK_BUFFER_USAGE_TRANSFER_SRC_BIT
Definition: vulkan_core.h:2545
VkResult
Definition: vulkan_core.h:140
@ VK_SUCCESS
Definition: vulkan_core.h:141
@ VK_ACCESS_HOST_READ_BIT
Definition: vulkan_core.h:2213
@ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
Definition: vulkan_core.h:2210
@ VK_ACCESS_TRANSFER_WRITE_BIT
Definition: vulkan_core.h:2212
@ VK_ACCESS_HOST_WRITE_BIT
Definition: vulkan_core.h:2214
@ VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT
Definition: vulkan_core.h:2202
@ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
Definition: vulkan_core.h:2204
@ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
Definition: vulkan_core.h:2208
@ VK_ACCESS_INDIRECT_COMMAND_READ_BIT
Definition: vulkan_core.h:2200
@ VK_ACCESS_TRANSFER_READ_BIT
Definition: vulkan_core.h:2211
@ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
Definition: vulkan_core.h:2209
@ VK_ACCESS_SHADER_WRITE_BIT
Definition: vulkan_core.h:2206
@ VK_ACCESS_SHADER_READ_BIT
Definition: vulkan_core.h:2205
@ VK_ACCESS_UNIFORM_READ_BIT
Definition: vulkan_core.h:2203
@ VK_ACCESS_INDEX_READ_BIT
Definition: vulkan_core.h:2201
@ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
Definition: vulkan_core.h:2207
#define VK_NULL_HANDLE
Definition: vulkan_core.h:46
@ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT
Definition: vulkan_core.h:2437
@ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT
Definition: vulkan_core.h:2446
@ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
Definition: vulkan_core.h:2442
@ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
Definition: vulkan_core.h:2435
@ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
Definition: vulkan_core.h:2445
@ VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT
Definition: vulkan_core.h:2444
@ VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT
Definition: vulkan_core.h:2436
@ VK_PIPELINE_STAGE_HOST_BIT
Definition: vulkan_core.h:2449
@ VK_PIPELINE_STAGE_TRANSFER_BIT
Definition: vulkan_core.h:2447
#define VK_QUEUE_FAMILY_IGNORED
Definition: vulkan_core.h:127
@ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO
Definition: vulkan_core.h:214
@ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER
Definition: vulkan_core.h:246