Flutter Engine
The Flutter Engine
GrVkCommandBuffer.h
Go to the documentation of this file.
1/*
2* Copyright 2015 Google Inc.
3*
4* Use of this source code is governed by a BSD-style license that can be
5* found in the LICENSE file.
6*/
7
8#ifndef GrVkCommandBuffer_DEFINED
9#define GrVkCommandBuffer_DEFINED
10
12#include "src/gpu/GpuRefCnt.h"
17
18class GrVkFramebuffer;
19class GrVkImage;
20class GrVkPipeline;
22class GrVkRenderPass;
24
26public:
27 virtual ~GrVkCommandBuffer() {}
28
29 void invalidateState();
30
31 ////////////////////////////////////////////////////////////////////////////
32 // CommandBuffer commands
33 ////////////////////////////////////////////////////////////////////////////
37 };
38
39 void pipelineBarrier(const GrVkGpu* gpu,
41 VkPipelineStageFlags srcStageMask,
42 VkPipelineStageFlags dstStageMask,
43 bool byRegion,
44 BarrierType barrierType,
45 void* barrier);
46
47 void bindInputBuffer(GrVkGpu* gpu, uint32_t binding, sk_sp<const GrBuffer> buffer);
48
50
51 void bindPipeline(const GrVkGpu* gpu, sk_sp<const GrVkPipeline> pipeline);
52
53 void bindDescriptorSets(const GrVkGpu* gpu,
54 VkPipelineLayout layout,
55 uint32_t firstSet,
56 uint32_t setCount,
57 const VkDescriptorSet* descriptorSets,
58 uint32_t dynamicOffsetCount,
59 const uint32_t* dynamicOffsets);
60
61 void pushConstants(const GrVkGpu* gpu, VkPipelineLayout layout,
62 VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
63 const void* values);
64
65 void setViewport(const GrVkGpu* gpu,
66 uint32_t firstViewport,
67 uint32_t viewportCount,
68 const VkViewport* viewports);
69
70 void setScissor(const GrVkGpu* gpu,
71 uint32_t firstScissor,
72 uint32_t scissorCount,
73 const VkRect2D* scissors);
74
75 void setBlendConstants(const GrVkGpu* gpu, const float blendConstants[4]);
76
77 // Commands that only work inside of a render pass
78 void clearAttachments(const GrVkGpu* gpu,
79 int numAttachments,
80 const VkClearAttachment* attachments,
81 int numRects,
82 const VkClearRect* clearRects);
83
84 void drawIndexed(const GrVkGpu* gpu,
85 uint32_t indexCount,
86 uint32_t instanceCount,
87 uint32_t firstIndex,
88 int32_t vertexOffset,
89 uint32_t firstInstance);
90
91 void draw(const GrVkGpu* gpu,
92 uint32_t vertexCount,
93 uint32_t instanceCount,
94 uint32_t firstVertex,
95 uint32_t firstInstance);
96
97 void drawIndirect(const GrVkGpu* gpu,
98 sk_sp<const GrBuffer> indirectBuffer,
100 uint32_t drawCount,
101 uint32_t stride);
102
103 void drawIndexedIndirect(const GrVkGpu* gpu,
104 sk_sp<const GrBuffer> indirectBuffer,
106 uint32_t drawCount,
107 uint32_t stride);
108
109 // Add ref-counted resource that will be tracked and released when this command buffer finishes
110 // execution
113 fTrackedResources.push_back(std::move(resource));
114 }
116 this->addResource(sk_ref_sp(resource));
117 }
118
119 // Add ref-counted resource that will be tracked and released when this command buffer finishes
120 // execution. When it is released, it will signal that the resource can be recycled for reuse.
123 fTrackedRecycledResources.push_back(std::move(resource));
124 }
125
127 this->addRecycledResource(gr_ref_rp<const GrRecycledResource>(resource));
128 }
129
132 }
133
135 fTrackedGpuSurfaces.push_back(std::move(surface));
136 }
137
138 void releaseResources();
139
140 void freeGPUData(const GrGpu* gpu, VkCommandPool pool) const;
141
142 bool hasWork() const { return fHasWork; }
143
144protected:
145 GrVkCommandBuffer(VkCommandBuffer cmdBuffer, bool isWrapped = false)
146 : fIsActive(isWrapped) // All wrapped command buffers start as active
147 , fCmdBuffer(cmdBuffer)
149 this->invalidateState();
150 }
151
152 bool isWrapped() const { return fIsWrapped; }
153
154 void addingWork(const GrVkGpu* gpu);
155
156 void submitPipelineBarriers(const GrVkGpu* gpu, bool forSelfDependency = false);
157
158private:
159 static constexpr int kInitialTrackedResourcesCount = 32;
160
161protected:
162 template <typename T>
168
169 // Tracks whether we are in the middle of a command buffer begin/end calls and thus can add
170 // new commands to the buffer;
172 bool fHasWork = false;
173
174 // Stores a pointer to the current active render pass (i.e. begin has been called but not
175 // end). A nullptr means there is no active render pass. The GrVKCommandBuffer does not own
176 // the render pass.
178
179 VkCommandBuffer fCmdBuffer;
180
181 virtual void onReleaseResources() {}
182 virtual void onFreeGPUData(const GrVkGpu* gpu) const = 0;
183
184 static constexpr uint32_t kMaxInputBuffers = 2;
185
188
189 // Cached values used for dynamic state updates
193
194 // Tracking of memory barriers so that we can submit them all in a batch together.
197 bool fBarriersByRegion = false;
200
202};
203
205
207public:
208 ~GrVkPrimaryCommandBuffer() override;
209
210 static GrVkPrimaryCommandBuffer* Create(GrVkGpu* gpu, VkCommandPool cmdPool);
211
212 void begin(GrVkGpu* gpu);
213 void end(GrVkGpu* gpu, bool abandoningBuffer = false);
214
215 // Begins render pass on this command buffer. The framebuffer from GrVkRenderTarget will be used
216 // in the render pass.
217 bool beginRenderPass(GrVkGpu* gpu,
218 const GrVkRenderPass*,
220 const VkClearValue clearValues[],
221 const GrSurface* target,
222 const SkIRect& bounds,
223 bool forSecondaryCB);
224 void endRenderPass(const GrVkGpu* gpu);
225
226 void nexSubpass(GrVkGpu* gpu, bool forSecondaryCB);
227
228 // Submits the SecondaryCommandBuffer into this command buffer. It is required that we are
229 // currently inside a render pass that is compatible with the one used to create the
230 // SecondaryCommandBuffer.
231 void executeCommands(const GrVkGpu* gpu,
232 std::unique_ptr<GrVkSecondaryCommandBuffer> secondaryBuffer);
233
234 // Commands that only work outside of a render pass
235 void clearColorImage(const GrVkGpu* gpu,
238 uint32_t subRangeCount,
239 const VkImageSubresourceRange* subRanges);
240
241 void clearDepthStencilImage(const GrVkGpu* gpu,
244 uint32_t subRangeCount,
245 const VkImageSubresourceRange* subRanges);
246
247 void copyImage(const GrVkGpu* gpu,
248 GrVkImage* srcImage,
249 VkImageLayout srcLayout,
250 GrVkImage* dstImage,
251 VkImageLayout dstLayout,
252 uint32_t copyRegionCount,
253 const VkImageCopy* copyRegions);
254
255 void blitImage(const GrVkGpu* gpu,
256 const GrManagedResource* srcResource,
257 VkImage srcImage,
258 VkImageLayout srcLayout,
259 const GrManagedResource* dstResource,
260 VkImage dstImage,
261 VkImageLayout dstLayout,
262 uint32_t blitRegionCount,
263 const VkImageBlit* blitRegions,
264 VkFilter filter);
265
266 void blitImage(const GrVkGpu* gpu,
267 const GrVkImage& srcImage,
268 const GrVkImage& dstImage,
269 uint32_t blitRegionCount,
270 const VkImageBlit* blitRegions,
271 VkFilter filter);
272
273 void copyImageToBuffer(const GrVkGpu* gpu,
274 GrVkImage* srcImage,
275 VkImageLayout srcLayout,
276 sk_sp<GrGpuBuffer> dstBuffer,
277 uint32_t copyRegionCount,
278 const VkBufferImageCopy* copyRegions);
279
280 // All uses of copyBufferToImage are done with buffers from our staging manager. The staging
281 // manager will handle making sure the command buffer refs the buffer. Thus we just pass in the
282 // raw VkBuffer here and don't worry about refs.
283 void copyBufferToImage(const GrVkGpu* gpu,
284 VkBuffer srcBuffer,
285 GrVkImage* dstImage,
286 VkImageLayout dstLayout,
287 uint32_t copyRegionCount,
288 const VkBufferImageCopy* copyRegions);
289
290 void fillBuffer(GrVkGpu* gpu,
294 uint32_t data);
295
296 void copyBuffer(GrVkGpu* gpu,
297 sk_sp<GrGpuBuffer> srcBuffer,
298 sk_sp<GrGpuBuffer> dstBuffer,
299 uint32_t regionCount,
300 const VkBufferCopy* regions);
301
302 void updateBuffer(GrVkGpu* gpu,
303 sk_sp<GrVkBuffer> dstBuffer,
304 VkDeviceSize dstOffset,
305 VkDeviceSize dataSize,
306 const void* data);
307
308 void resolveImage(GrVkGpu* gpu,
309 const GrVkImage& srcImage,
310 const GrVkImage& dstImage,
311 uint32_t regionCount,
312 const VkImageResolve* regions);
313
314 bool submitToQueue(GrVkGpu* gpu, VkQueue queue,
317
318 void forceSync(GrVkGpu* gpu);
319
320 bool finished(GrVkGpu* gpu);
321
323
325 fFinishedProcs.clear();
326 }
327
329
330private:
331 explicit GrVkPrimaryCommandBuffer(VkCommandBuffer cmdBuffer)
332 : INHERITED(cmdBuffer)
333 , fSubmitFence(VK_NULL_HANDLE) {}
334
335 void onFreeGPUData(const GrVkGpu* gpu) const override;
336
337 void onReleaseResources() override;
338
340 VkFence fSubmitFence;
342
343 using INHERITED = GrVkCommandBuffer;
344};
345
347public:
349 // Used for wrapping an external secondary command buffer.
350 static GrVkSecondaryCommandBuffer* Create(VkCommandBuffer externalSecondaryCB,
351 const GrVkRenderPass* externalRenderPass);
352
353 void begin(GrVkGpu* gpu, const GrVkFramebuffer* framebuffer,
354 const GrVkRenderPass* compatibleRenderPass);
355 void end(GrVkGpu* gpu);
356
357 void recycle(GrVkCommandPool* cmdPool);
358
359 VkCommandBuffer vkCommandBuffer() { return fCmdBuffer; }
360
361private:
362 explicit GrVkSecondaryCommandBuffer(VkCommandBuffer cmdBuffer,
363 const GrVkRenderPass* externalRenderPass)
364 : INHERITED(cmdBuffer, SkToBool(externalRenderPass)) {
365 fActiveRenderPass = externalRenderPass;
366 }
367
368 void onFreeGPUData(const GrVkGpu* gpu) const override {}
369
370 // Used for accessing fIsActive (on GrVkCommandBuffer)
372
374};
375
376#endif
AutoreleasePool pool
#define SkASSERT(cond)
Definition: SkAssert.h:116
static SkString resource(SkPDFResourceType type, int index)
sk_sp< T > sk_ref_sp(T *obj)
Definition: SkRefCnt.h:381
static constexpr bool SkToBool(const T &x)
Definition: SkTo.h:35
Definition: GrGpu.h:62
TrackedResourceArray< sk_sp< const GrManagedResource > > fTrackedResources
void pipelineBarrier(const GrVkGpu *gpu, const GrManagedResource *resource, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, BarrierType barrierType, void *barrier)
skia_private::STArray< 16, sk_sp< const GrBuffer > > fTrackedGpuBuffers
virtual void onFreeGPUData(const GrVkGpu *gpu) const =0
void setBlendConstants(const GrVkGpu *gpu, const float blendConstants[4])
void setViewport(const GrVkGpu *gpu, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *viewports)
VkCommandBuffer fCmdBuffer
void bindPipeline(const GrVkGpu *gpu, sk_sp< const GrVkPipeline > pipeline)
void addResource(sk_sp< const GrManagedResource > resource)
void addRecycledResource(gr_rp< const GrRecycledResource > resource)
void bindIndexBuffer(GrVkGpu *gpu, sk_sp< const GrBuffer > buffer)
void addingWork(const GrVkGpu *gpu)
virtual void onReleaseResources()
virtual ~GrVkCommandBuffer()
void drawIndexed(const GrVkGpu *gpu, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance)
static constexpr uint32_t kMaxInputBuffers
VkPipelineStageFlags fDstStageMask
void submitPipelineBarriers(const GrVkGpu *gpu, bool forSelfDependency=false)
void clearAttachments(const GrVkGpu *gpu, int numAttachments, const VkClearAttachment *attachments, int numRects, const VkClearRect *clearRects)
void setScissor(const GrVkGpu *gpu, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *scissors)
skia_private::STArray< 2, VkImageMemoryBarrier > fImageBarriers
void addGrSurface(sk_sp< const GrSurface > surface)
void addRecycledResource(const GrRecycledResource *resource)
GrVkCommandBuffer(VkCommandBuffer cmdBuffer, bool isWrapped=false)
VkViewport fCachedViewport
skia_private::STArray< 16, gr_cb< const GrSurface > > fTrackedGpuSurfaces
void addGrBuffer(sk_sp< const GrBuffer > buffer)
void bindInputBuffer(GrVkGpu *gpu, uint32_t binding, sk_sp< const GrBuffer > buffer)
skia_private::STArray< 1, VkBufferMemoryBarrier > fBufferBarriers
const GrVkRenderPass * fActiveRenderPass
void drawIndirect(const GrVkGpu *gpu, sk_sp< const GrBuffer > indirectBuffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
void pushConstants(const GrVkGpu *gpu, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void *values)
VkPipelineStageFlags fSrcStageMask
bool isWrapped() const
void freeGPUData(const GrGpu *gpu, VkCommandPool pool) const
VkBuffer fBoundInputBuffers[kMaxInputBuffers]
void addResource(const GrManagedResource *resource)
void bindDescriptorSets(const GrVkGpu *gpu, VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *descriptorSets, uint32_t dynamicOffsetCount, const uint32_t *dynamicOffsets)
TrackedResourceArray< gr_rp< const GrRecycledResource > > fTrackedRecycledResources
void draw(const GrVkGpu *gpu, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance)
void drawIndexedIndirect(const GrVkGpu *gpu, sk_sp< const GrBuffer > indirectBuffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride)
void endRenderPass(const GrVkGpu *gpu)
bool submitToQueue(GrVkGpu *gpu, VkQueue queue, skia_private::TArray< GrVkSemaphore::Resource * > &signalSemaphores, skia_private::TArray< GrVkSemaphore::Resource * > &waitSemaphores)
void forceSync(GrVkGpu *gpu)
void recycleSecondaryCommandBuffers(GrVkCommandPool *cmdPool)
void clearColorImage(const GrVkGpu *gpu, GrVkImage *image, const VkClearColorValue *color, uint32_t subRangeCount, const VkImageSubresourceRange *subRanges)
void blitImage(const GrVkGpu *gpu, const GrManagedResource *srcResource, VkImage srcImage, VkImageLayout srcLayout, const GrManagedResource *dstResource, VkImage dstImage, VkImageLayout dstLayout, uint32_t blitRegionCount, const VkImageBlit *blitRegions, VkFilter filter)
void copyImageToBuffer(const GrVkGpu *gpu, GrVkImage *srcImage, VkImageLayout srcLayout, sk_sp< GrGpuBuffer > dstBuffer, uint32_t copyRegionCount, const VkBufferImageCopy *copyRegions)
void end(GrVkGpu *gpu, bool abandoningBuffer=false)
void resolveImage(GrVkGpu *gpu, const GrVkImage &srcImage, const GrVkImage &dstImage, uint32_t regionCount, const VkImageResolve *regions)
bool beginRenderPass(GrVkGpu *gpu, const GrVkRenderPass *, sk_sp< const GrVkFramebuffer >, const VkClearValue clearValues[], const GrSurface *target, const SkIRect &bounds, bool forSecondaryCB)
void copyBuffer(GrVkGpu *gpu, sk_sp< GrGpuBuffer > srcBuffer, sk_sp< GrGpuBuffer > dstBuffer, uint32_t regionCount, const VkBufferCopy *regions)
void copyImage(const GrVkGpu *gpu, GrVkImage *srcImage, VkImageLayout srcLayout, GrVkImage *dstImage, VkImageLayout dstLayout, uint32_t copyRegionCount, const VkImageCopy *copyRegions)
void addFinishedProc(sk_sp< skgpu::RefCntedCallback > finishedProc)
void clearDepthStencilImage(const GrVkGpu *gpu, GrVkImage *image, const VkClearDepthStencilValue *color, uint32_t subRangeCount, const VkImageSubresourceRange *subRanges)
void executeCommands(const GrVkGpu *gpu, std::unique_ptr< GrVkSecondaryCommandBuffer > secondaryBuffer)
void nexSubpass(GrVkGpu *gpu, bool forSecondaryCB)
void fillBuffer(GrVkGpu *gpu, sk_sp< GrGpuBuffer >, VkDeviceSize offset, VkDeviceSize size, uint32_t data)
void copyBufferToImage(const GrVkGpu *gpu, VkBuffer srcBuffer, GrVkImage *dstImage, VkImageLayout dstLayout, uint32_t copyRegionCount, const VkBufferImageCopy *copyRegions)
static GrVkPrimaryCommandBuffer * Create(GrVkGpu *gpu, VkCommandPool cmdPool)
void updateBuffer(GrVkGpu *gpu, sk_sp< GrVkBuffer > dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *data)
static GrVkSecondaryCommandBuffer * Create(GrVkGpu *gpu, GrVkCommandPool *cmdPool)
VkCommandBuffer vkCommandBuffer()
void recycle(GrVkCommandPool *cmdPool)
void begin(GrVkGpu *gpu, const GrVkFramebuffer *framebuffer, const GrVkRenderPass *compatibleRenderPass)
DlColor color
VkQueue queue
Definition: main.cc:55
VkSurfaceKHR surface
Definition: main.cc:49
uint32_t * target
Optional< SkRect > bounds
Definition: SkRecords.h:189
sk_sp< const SkImage > image
Definition: SkRecords.h:269
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
SeparatedVector2 offset
Definition: SkRect.h:32
std::shared_ptr< const fml::Mapping > data
Definition: texture_gles.cc:63
VkFlags VkPipelineStageFlags
Definition: vulkan_core.h:2470
VkImageLayout
Definition: vulkan_core.h:1330
uint64_t VkDeviceSize
Definition: vulkan_core.h:96
VkFlags VkShaderStageFlags
Definition: vulkan_core.h:2731
VkFilter
Definition: vulkan_core.h:2100
#define VK_NULL_HANDLE
Definition: vulkan_core.h:46