Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
GrVkGpu.h
Go to the documentation of this file.
1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrVkGpu_DEFINED
9#define GrVkGpu_DEFINED
10
21
22class GrDirectContext;
23class GrPipeline;
24class GrVkBuffer;
25class GrVkCommandPool;
26class GrVkFramebuffer;
28class GrVkPipeline;
31class GrVkRenderPass;
33class GrVkTexture;
35
36namespace skgpu {
37class VulkanMemoryAllocator;
38class VulkanMutableTextureState;
39struct VulkanInterface;
40}
41
42class GrVkGpu : public GrGpu {
43public:
44 static std::unique_ptr<GrGpu> Make(const GrVkBackendContext&,
45 const GrContextOptions&,
47
48 ~GrVkGpu() override;
49
50 void disconnect(DisconnectType) override;
51 bool disconnected() const { return fDisconnected; }
52
54 fResourceProvider.releaseUnlockedBackendObjects();
55 }
56
59
60 const skgpu::VulkanInterface* vkInterface() const { return fInterface.get(); }
61 const GrVkCaps& vkCaps() const { return *fVkCaps; }
62
63 GrStagingBufferManager* stagingBufferManager() override { return &fStagingBufferManager; }
65
66 bool isDeviceLost() const override { return fDeviceIsLost; }
67
68 skgpu::VulkanMemoryAllocator* memoryAllocator() const { return fMemoryAllocator.get(); }
69
70 VkPhysicalDevice physicalDevice() const { return fPhysicalDevice; }
71 VkDevice device() const { return fDevice; }
72 VkQueue queue() const { return fQueue; }
73 uint32_t queueIndex() const { return fQueueIndex; }
74 GrVkCommandPool* cmdPool() const { return fMainCmdPool; }
76 return fPhysDevProps;
77 }
79 return fPhysDevMemProps;
80 }
81 bool protectedContext() const { return fProtectedContext == skgpu::Protected::kYes; }
82
83 GrVkResourceProvider& resourceProvider() { return fResourceProvider; }
84
85 GrVkPrimaryCommandBuffer* currentCommandBuffer() const { return fMainCmdBuffer; }
86
88
91 skgpu::MutableTextureState* previousState,
92 sk_sp<skgpu::RefCntedCallback> finishedCallback) override;
93
96 skgpu::MutableTextureState* previousState,
97 sk_sp<skgpu::RefCntedCallback> finishedCallback) override;
98
99 void deleteBackendTexture(const GrBackendTexture&) override;
100
101 bool compile(const GrProgramDesc&, const GrProgramInfo&) override;
102
103#if defined(GR_TEST_UTILS)
104 bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
105
106 GrBackendRenderTarget createTestingOnlyBackendRenderTarget(SkISize dimensions,
108 int sampleCnt,
109 GrProtected) override;
110 void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) override;
111
112 void resetShaderCacheForTesting() const override {
113 fResourceProvider.resetShaderCacheForTesting();
114 }
115#endif
116
118 SkISize dimensions, int numStencilSamples) override;
119
121 return GrBackendFormats::MakeVk(this->vkCaps().preferredStencilFormat());
122 }
123
125 const GrBackendFormat& format,
126 int numSamples,
127 GrProtected isProtected,
128 GrMemoryless isMemoryless) override;
129
131 VkPipelineStageFlags srcStageMask,
132 VkPipelineStageFlags dstStageMask,
133 bool byRegion,
134 VkBufferMemoryBarrier* barrier) const;
136 VkPipelineStageFlags dstStageMask,
137 bool byRegion,
138 VkBufferMemoryBarrier* barrier) const;
140 VkPipelineStageFlags srcStageMask,
141 VkPipelineStageFlags dstStageMask,
142 bool byRegion,
143 VkImageMemoryBarrier* barrier) const;
144
145 bool loadMSAAFromResolve(GrVkCommandBuffer* commandBuffer,
146 const GrVkRenderPass& renderPass,
147 GrAttachment* dst,
148 GrVkImage* src,
149 const SkIRect& srcRect);
150
151 bool onRegenerateMipMapLevels(GrTexture* tex) override;
152
153 void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) override;
154
155 void submitSecondaryCommandBuffer(std::unique_ptr<GrVkSecondaryCommandBuffer>);
156
157 void submit(GrOpsRenderPass*) override;
158
159 [[nodiscard]] std::unique_ptr<GrSemaphore> makeSemaphore(bool isOwned) override;
160 std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore&,
162 GrWrapOwnership) override;
163 void insertSemaphore(GrSemaphore* semaphore) override;
164 void waitSemaphore(GrSemaphore* semaphore) override;
165
166 // These match the definitions in SkDrawable, from whence they came
167 typedef void* SubmitContext;
168 typedef void (*SubmitProc)(SubmitContext submitContext);
169
170 // Adds an SkDrawable::GpuDrawHandler that we will delete the next time we submit the primary
171 // command buffer to the gpu.
172 void addDrawable(std::unique_ptr<SkDrawable::GpuDrawHandler> drawable);
173
174 void checkFinishProcs() override { fResourceProvider.checkCommandBuffers(); }
175 void finishOutstandingGpuWork() override;
176
177 std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
178
180 VkDeviceSize size);
181
183
188
189 void storeVkPipelineCacheData() override;
190
193 const VkClearValue* colorClear,
194 const GrSurface*,
195 const SkIRect& renderPassBounds,
196 bool forSecondaryCB);
197 void endRenderPass(GrRenderTarget* target, GrSurfaceOrigin origin, const SkIRect& bounds);
198
199 // Returns true if VkResult indicates success and also checks for device lost or OOM. Every
200 // Vulkan call (and skgpu::VulkanMemoryAllocator call that returns VkResult) made on behalf of
201 // the GrVkGpu should be processed by this function so that we respond to OOMs and lost devices.
203
204private:
205 enum SyncQueue {
206 kForce_SyncQueue,
207 kSkip_SyncQueue
208 };
209
211 const GrVkBackendContext&,
212 const sk_sp<GrVkCaps> caps,
214 uint32_t instanceVersion,
215 uint32_t physicalDeviceVersion,
217
218 void destroyResources();
219
221 const GrBackendFormat&,
225 std::string_view label) override;
227 const GrBackendFormat&,
229 GrProtected) override;
230
232 sk_sp<skgpu::RefCntedCallback> finishedCallback,
233 std::array<float, 4> color) override;
234
236 sk_sp<skgpu::RefCntedCallback> finishedCallback,
237 const void* data,
238 size_t length) override;
239
240 bool setBackendSurfaceState(GrVkImageInfo info,
242 SkISize dimensions,
243 VkImageLayout newLayout,
244 uint32_t newQueueFamilyIndex,
245 skgpu::MutableTextureState* previousState,
246 sk_sp<skgpu::RefCntedCallback> finishedCallback);
247
249 const GrBackendFormat&,
251 int renderTargetSampleCnt,
254 int mipLevelCount,
255 uint32_t levelClearMask,
256 std::string_view label) override;
258 const GrBackendFormat&,
262 const void* data,
263 size_t dataSize) override;
264
268 GrIOType) override;
271 GrWrapCacheable) override;
273 int sampleCnt,
275 GrWrapCacheable) override;
277
279 const GrVkDrawableInfo&) override;
280
282
284 SkIRect,
285 GrColorType surfaceColorType,
286 GrColorType dstColorType,
287 void* buffer,
288 size_t rowBytes) override;
289
291 SkIRect,
292 GrColorType surfaceColorType,
293 GrColorType srcColorType,
294 const GrMipLevel[],
295 int mipLevelCount,
296 bool prepForTexSampling) override;
297
299 size_t srcOffset,
301 size_t dstOffset,
302 size_t size) override;
303
305 SkIRect,
306 GrColorType textureColorType,
307 GrColorType bufferColorType,
309 size_t offset,
310 size_t rowBytes) override;
311
313 SkIRect,
314 GrColorType surfaceColorType,
315 GrColorType bufferColorType,
317 size_t offset) override;
318
319 bool onCopySurface(GrSurface* dst, const SkIRect& dstRect,
320 GrSurface* src, const SkIRect& srcRect,
321 GrSamplerState::Filter) override;
322
323 void addFinishedProc(GrGpuFinishedProc finishedProc,
324 GrGpuFinishedContext finishedContext) override;
325
326 void addFinishedCallback(sk_sp<skgpu::RefCntedCallback> finishedCallback);
327
329 bool useMSAASurface,
330 GrAttachment* stencil,
332 const SkIRect&,
336 GrXferBarrierFlags renderPassXferBarriers) override;
337
341 const skgpu::MutableTextureState* newState) override;
342
343 bool onSubmitToGpu(GrSyncCpu sync) override;
344
345 void onReportSubmitHistograms() override;
346
347 // Ends and submits the current command buffer to the queue and then creates a new command
348 // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all
349 // work in the queue to finish before returning. If this GrVkGpu object has any semaphores in
350 // fSemaphoreToSignal, we will add those signal semaphores to the submission of this command
351 // buffer. If this GrVkGpu object has any semaphores in fSemaphoresToWaitOn, we will add those
352 // wait semaphores to the submission of this command buffer.
353 bool submitCommandBuffer(SyncQueue sync);
354
355 void copySurfaceAsCopyImage(GrSurface* dst,
356 GrSurface* src,
357 GrVkImage* dstImage,
358 GrVkImage* srcImage,
359 const SkIRect& srcRect,
360 const SkIPoint& dstPoint);
361
362 void copySurfaceAsBlit(GrSurface* dst,
363 GrSurface* src,
364 GrVkImage* dstImage,
365 GrVkImage* srcImage,
366 const SkIRect& srcRect,
367 const SkIRect& dstRect,
369
370 void copySurfaceAsResolve(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
371 const SkIPoint& dstPoint);
372
373 // helpers for onCreateTexture and writeTexturePixels
374 bool uploadTexDataLinear(GrVkImage* tex,
375 SkIRect rect,
377 const void* data,
378 size_t rowBytes);
379 bool uploadTexDataOptimal(GrVkImage* tex,
380 SkIRect rect,
382 const GrMipLevel texels[],
383 int mipLevelCount);
384 bool uploadTexDataCompressed(GrVkImage* tex,
385 SkTextureCompressionType compression,
386 VkFormat vkFormat,
387 SkISize dimensions,
388 skgpu::Mipmapped mipmapped,
389 const void* data,
390 size_t dataSize);
391 void resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
392 const SkIPoint& dstPoint);
393
394 bool createVkImageForBackendSurface(VkFormat,
395 SkISize dimensions,
396 int sampleCnt,
402
405 sk_sp<GrVkCaps> fVkCaps;
406 bool fDeviceIsLost = false;
407
408 VkPhysicalDevice fPhysicalDevice;
409 VkDevice fDevice;
410 VkQueue fQueue; // Must be Graphics queue
411 uint32_t fQueueIndex;
412
413 // Created by GrVkGpu
414 GrVkResourceProvider fResourceProvider;
415 GrStagingBufferManager fStagingBufferManager;
416
417 GrVkMSAALoadManager fMSAALoadManager;
418
419 GrVkCommandPool* fMainCmdPool;
420 // just a raw pointer; object's lifespan is managed by fCmdPool
421 GrVkPrimaryCommandBuffer* fMainCmdBuffer;
422
425
427
428 VkPhysicalDeviceProperties fPhysDevProps;
429 VkPhysicalDeviceMemoryProperties fPhysDevMemProps;
430
431 // We need a bool to track whether or not we've already disconnected all the gpu resources from
432 // vulkan context.
433 bool fDisconnected;
434
435 skgpu::Protected fProtectedContext;
436
437 std::unique_ptr<GrVkOpsRenderPass> fCachedOpsRenderPass;
438
439 skgpu::VulkanDeviceLostContext fDeviceLostContext;
440 skgpu::VulkanDeviceLostProc fDeviceLostProc;
441
442 using INHERITED = GrGpu;
443};
444
445#endif
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition DM.cpp:213
GrWrapCacheable
Definition GrTypesPriv.h:84
GrIOType
GrWrapOwnership
Definition GrTypesPriv.h:76
GrGpuBufferType
GrMemoryless
GrTexturable
Definition GrTypesPriv.h:63
GrSemaphoreWrapType
GrColorType
GrAccessPattern
GrSurfaceOrigin
Definition GrTypes.h:147
void * GrGpuFinishedContext
Definition GrTypes.h:178
void(* GrGpuFinishedProc)(GrGpuFinishedContext finishedContext)
Definition GrTypes.h:179
GrSyncCpu
Definition GrTypes.h:239
GrXferBarrierType
GrXferBarrierFlags
SkColor4f color
static SkColorType colorType(AImageDecoder *decoder, const AImageDecoderHeaderInfo *headerInfo)
SkFilterMode
Definition GrGpu.h:62
const GrCaps * caps() const
Definition GrGpu.h:73
DisconnectType
Definition GrGpu.h:80
std::unique_ptr< GrSemaphore > makeSemaphore(bool isOwned) override
Definition GrVkGpu.cpp:2713
bool onReadPixels(GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType dstColorType, void *buffer, size_t rowBytes) override
Definition GrVkGpu.cpp:2530
bool setBackendTextureState(const GrBackendTexture &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback) override
Definition GrVkGpu.cpp:1972
uint32_t queueIndex() const
Definition GrVkGpu.h:73
bool onSubmitToGpu(GrSyncCpu sync) override
Definition GrVkGpu.cpp:2255
const GrVkCaps & vkCaps() const
Definition GrVkGpu.h:61
static std::unique_ptr< GrGpu > Make(const GrVkBackendContext &, const GrContextOptions &, GrDirectContext *)
Definition GrVkGpu.cpp:66
void onResolveRenderTarget(GrRenderTarget *target, const SkIRect &resolveRect) override
Definition GrVkGpu.cpp:822
void prepareSurfacesForBackendAccessAndStateUpdates(SkSpan< GrSurfaceProxy * > proxies, SkSurfaces::BackendSurfaceAccess access, const skgpu::MutableTextureState *newState) override
Definition GrVkGpu.cpp:2202
void insertSemaphore(GrSemaphore *semaphore) override
Definition GrVkGpu.cpp:2724
bool zeroBuffer(sk_sp< GrGpuBuffer >)
Definition GrVkGpu.cpp:1265
void * SubmitContext
Definition GrVkGpu.h:167
void addImageMemoryBarrier(const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkImageMemoryBarrier *barrier) const
Definition GrVkGpu.cpp:2180
bool onClearBackendTexture(const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, std::array< float, 4 > color) override
Definition GrVkGpu.cpp:1730
bool loadMSAAFromResolve(GrVkCommandBuffer *commandBuffer, const GrVkRenderPass &renderPass, GrAttachment *dst, GrVkImage *src, const SkIRect &srcRect)
Definition GrVkGpu.cpp:1516
void addBufferMemoryBarrier(const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkBufferMemoryBarrier *barrier) const
Definition GrVkGpu.cpp:2143
const skgpu::VulkanInterface * vkInterface() const
Definition GrVkGpu.h:60
void addDrawable(std::unique_ptr< SkDrawable::GpuDrawHandler > drawable)
Definition GrVkGpu.cpp:2769
VkDevice device() const
Definition GrVkGpu.h:71
void deleteBackendTexture(const GrBackendTexture &) override
Definition GrVkGpu.cpp:2033
void checkFinishProcs() override
Definition GrVkGpu.h:174
bool beginRenderPass(const GrVkRenderPass *, sk_sp< const GrVkFramebuffer >, const VkClearValue *colorClear, const GrSurface *, const SkIRect &renderPassBounds, bool forSecondaryCB)
Definition GrVkGpu.cpp:2637
void disconnect(DisconnectType) override
Definition GrVkGpu.cpp:302
sk_sp< GrTexture > onWrapRenderableBackendTexture(const GrBackendTexture &, int sampleCnt, GrWrapOwnership, GrWrapCacheable) override
Definition GrVkGpu.cpp:1423
const VkPhysicalDeviceMemoryProperties & physicalDeviceMemoryProperties() const
Definition GrVkGpu.h:78
sk_sp< GrAttachment > makeStencilAttachment(const GrBackendFormat &, SkISize dimensions, int numStencilSamples) override
Definition GrVkGpu.cpp:1617
sk_sp< GrGpuBuffer > onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern) override
Definition GrVkGpu.cpp:468
sk_sp< GrThreadSafePipelineBuilder > refPipelineBuilder() override
Definition GrVkGpu.cpp:318
void endRenderPass(GrRenderTarget *target, GrSurfaceOrigin origin, const SkIRect &bounds)
Definition GrVkGpu.cpp:2667
sk_sp< GrRenderTarget > onWrapBackendRenderTarget(const GrBackendRenderTarget &) override
Definition GrVkGpu.cpp:1462
sk_sp< GrTexture > onWrapCompressedBackendTexture(const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable) override
Definition GrVkGpu.cpp:1417
sk_sp< GrTexture > onWrapBackendTexture(const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable, GrIOType) override
Definition GrVkGpu.cpp:1389
GrVkResourceProvider & resourceProvider()
Definition GrVkGpu.h:83
void waitSemaphore(GrSemaphore *semaphore) override
Definition GrVkGpu.cpp:2736
GrVkCommandPool * cmdPool() const
Definition GrVkGpu.h:74
PersistentCacheKeyType
Definition GrVkGpu.h:184
@ kPipelineCache_PersistentCacheKeyType
Definition GrVkGpu.h:186
@ kShader_PersistentCacheKeyType
Definition GrVkGpu.h:185
bool onCopySurface(GrSurface *dst, const SkIRect &dstRect, GrSurface *src, const SkIRect &srcRect, GrSamplerState::Filter) override
Definition GrVkGpu.cpp:2433
bool onRegenerateMipMapLevels(GrTexture *tex) override
Definition GrVkGpu.cpp:1524
GrBackendFormat getPreferredStencilFormat(const GrBackendFormat &) override
Definition GrVkGpu.h:120
VkQueue queue() const
Definition GrVkGpu.h:72
GrThreadSafePipelineBuilder * pipelineBuilder() override
Definition GrVkGpu.cpp:314
bool updateBuffer(sk_sp< GrVkBuffer > buffer, const void *src, VkDeviceSize offset, VkDeviceSize size)
Definition GrVkGpu.cpp:1245
const VkPhysicalDeviceProperties & physicalDeviceProperties() const
Definition GrVkGpu.h:75
bool onTransferPixelsFrom(GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer >, size_t offset) override
Definition GrVkGpu.cpp:710
bool isDeviceLost() const override
Definition GrVkGpu.h:66
std::unique_ptr< GrSemaphore > wrapBackendSemaphore(const GrBackendSemaphore &, GrSemaphoreWrapType, GrWrapOwnership) override
Definition GrVkGpu.cpp:2717
std::unique_ptr< GrSemaphore > prepareTextureForCrossContextUsage(GrTexture *) override
Definition GrVkGpu.cpp:2748
bool disconnected() const
Definition GrVkGpu.h:51
bool onWritePixels(GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType srcColorType, const GrMipLevel[], int mipLevelCount, bool prepForTexSampling) override
Definition GrVkGpu.cpp:494
sk_sp< GrTexture > onCreateTexture(SkISize, const GrBackendFormat &, GrRenderable, int renderTargetSampleCnt, skgpu::Budgeted, GrProtected, int mipLevelCount, uint32_t levelClearMask, std::string_view label) override
Definition GrVkGpu.cpp:1137
VkPhysicalDevice physicalDevice() const
Definition GrVkGpu.h:70
void storeVkPipelineCacheData() override
Definition GrVkGpu.cpp:2773
GrVkPrimaryCommandBuffer * currentCommandBuffer() const
Definition GrVkGpu.h:85
sk_sp< GrRenderTarget > onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo &, const GrVkDrawableInfo &) override
Definition GrVkGpu.cpp:1497
void finishOutstandingGpuWork() override
Definition GrVkGpu.cpp:2263
bool onUpdateCompressedBackendTexture(const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, const void *data, size_t length) override
Definition GrVkGpu.cpp:1835
sk_sp< GrAttachment > makeMSAAAttachment(SkISize dimensions, const GrBackendFormat &format, int numSamples, GrProtected isProtected, GrMemoryless isMemoryless) override
Definition GrVkGpu.cpp:1625
void submit(GrOpsRenderPass *) override
Definition GrVkGpu.cpp:2706
bool protectedContext() const
Definition GrVkGpu.h:81
GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions, const GrBackendFormat &, skgpu::Mipmapped, GrProtected) override
Definition GrVkGpu.cpp:1823
bool checkVkResult(VkResult)
Definition GrVkGpu.cpp:2675
void submitSecondaryCommandBuffer(std::unique_ptr< GrVkSecondaryCommandBuffer >)
Definition GrVkGpu.cpp:2699
GrStagingBufferManager * stagingBufferManager() override
Definition GrVkGpu.h:63
void onReportSubmitHistograms() override
Definition GrVkGpu.cpp:2271
bool onTransferFromBufferToBuffer(sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size) override
Definition GrVkGpu.cpp:608
GrBackendTexture onCreateBackendTexture(SkISize dimensions, const GrBackendFormat &, GrRenderable, skgpu::Mipmapped, GrProtected, std::string_view label) override
Definition GrVkGpu.cpp:1788
sk_sp< GrTexture > onCreateCompressedTexture(SkISize dimensions, const GrBackendFormat &, skgpu::Budgeted, skgpu::Mipmapped, GrProtected, const void *data, size_t dataSize) override
Definition GrVkGpu.cpp:1202
void takeOwnershipOfBuffer(sk_sp< GrGpuBuffer >) override
Definition GrVkGpu.cpp:2251
void releaseUnlockedBackendObjects() override
Definition GrVkGpu.h:53
void(* SubmitProc)(SubmitContext submitContext)
Definition GrVkGpu.h:168
GrOpsRenderPass * onGetOpsRenderPass(GrRenderTarget *, bool useMSAASurface, GrAttachment *stencil, GrSurfaceOrigin, const SkIRect &, const GrOpsRenderPass::LoadAndStoreInfo &, const GrOpsRenderPass::StencilLoadAndStoreInfo &, const skia_private::TArray< GrSurfaceProxy *, true > &sampledProxies, GrXferBarrierFlags renderPassXferBarriers) override
Definition GrVkGpu.cpp:324
bool onTransferPixelsTo(GrTexture *, SkIRect, GrColorType textureColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer >, size_t offset, size_t rowBytes) override
Definition GrVkGpu.cpp:637
bool setBackendRenderTargetState(const GrBackendRenderTarget &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback) override
Definition GrVkGpu.cpp:1988
~GrVkGpu() override
Definition GrVkGpu.cpp:292
void xferBarrier(GrRenderTarget *, GrXferBarrierType) override
Definition GrVkGpu.cpp:2004
void addFinishedProc(GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext) override
Definition GrVkGpu.cpp:2240
skgpu::VulkanMemoryAllocator * memoryAllocator() const
Definition GrVkGpu.h:68
T * get() const
Definition SkRefCnt.h:303
static const uint8_t buffer[]
uint32_t uint32_t * format
uint32_t * target
size_t length
SK_API GrBackendFormat MakeVk(VkFormat format, bool willUseDRMFormatModifiers=false)
BackendSurfaceAccess
Definition SkSurface.h:44
Budgeted
Definition GpuTypes.h:35
Renderable
Definition GpuTypes.h:69
Mipmapped
Definition GpuTypes.h:53
void * VulkanDeviceLostContext
void(* VulkanDeviceLostProc)(VulkanDeviceLostContext faultContext, const std::string &description, const std::vector< VkDeviceFaultAddressInfoEXT > &addressInfos, const std::vector< VkDeviceFaultVendorInfoEXT > &vendorInfos, const std::vector< std::byte > &vendorBinaryData)
Protected
Definition GpuTypes.h:61
Point offset
VkFlags VkPipelineStageFlags
VkImageLayout
uint64_t VkDeviceSize
Definition vulkan_core.h:96
VkResult
VkFormat