Flutter Engine
The Flutter Engine
Loading...
Searching...
No Matches
Public Types | Public Member Functions | Static Public Member Functions | Private Member Functions | List of all members
GrVkGpu Class Reference

#include <GrVkGpu.h>

Inheritance diagram for GrVkGpu:
GrGpu

Public Types

enum  PersistentCacheKeyType : uint32_t { kShader_PersistentCacheKeyType = 0 , kPipelineCache_PersistentCacheKeyType = 1 }
 
typedef void * SubmitContext
 
typedef void(* SubmitProc) (SubmitContext submitContext)
 
- Public Types inherited from GrGpu
enum class  DisconnectType { kAbandon , kCleanup }
 

Public Member Functions

 ~GrVkGpu () override
 
void disconnect (DisconnectType) override
 
bool disconnected () const
 
void releaseUnlockedBackendObjects () override
 
GrThreadSafePipelineBuilderpipelineBuilder () override
 
sk_sp< GrThreadSafePipelineBuilderrefPipelineBuilder () override
 
const skgpu::VulkanInterfacevkInterface () const
 
const GrVkCapsvkCaps () const
 
GrStagingBufferManagerstagingBufferManager () override
 
void takeOwnershipOfBuffer (sk_sp< GrGpuBuffer >) override
 
bool isDeviceLost () const override
 
skgpu::VulkanMemoryAllocatormemoryAllocator () const
 
VkPhysicalDevice physicalDevice () const
 
VkDevice device () const
 
VkQueue queue () const
 
uint32_t queueIndex () const
 
GrVkCommandPoolcmdPool () const
 
const VkPhysicalDevicePropertiesphysicalDeviceProperties () const
 
const VkPhysicalDeviceMemoryPropertiesphysicalDeviceMemoryProperties () const
 
bool protectedContext () const
 
GrVkResourceProviderresourceProvider ()
 
GrVkPrimaryCommandBuffercurrentCommandBuffer () const
 
void xferBarrier (GrRenderTarget *, GrXferBarrierType) override
 
bool setBackendTextureState (const GrBackendTexture &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback) override
 
bool setBackendRenderTargetState (const GrBackendRenderTarget &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback) override
 
void deleteBackendTexture (const GrBackendTexture &) override
 
bool compile (const GrProgramDesc &, const GrProgramInfo &) override
 
sk_sp< GrAttachmentmakeStencilAttachment (const GrBackendFormat &, SkISize dimensions, int numStencilSamples) override
 
GrBackendFormat getPreferredStencilFormat (const GrBackendFormat &) override
 
sk_sp< GrAttachmentmakeMSAAAttachment (SkISize dimensions, const GrBackendFormat &format, int numSamples, GrProtected isProtected, GrMemoryless isMemoryless) override
 
void addBufferMemoryBarrier (const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkBufferMemoryBarrier *barrier) const
 
void addBufferMemoryBarrier (VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkBufferMemoryBarrier *barrier) const
 
void addImageMemoryBarrier (const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkImageMemoryBarrier *barrier) const
 
bool loadMSAAFromResolve (GrVkCommandBuffer *commandBuffer, const GrVkRenderPass &renderPass, GrAttachment *dst, GrVkImage *src, const SkIRect &srcRect)
 
bool onRegenerateMipMapLevels (GrTexture *tex) override
 
void onResolveRenderTarget (GrRenderTarget *target, const SkIRect &resolveRect) override
 
void submitSecondaryCommandBuffer (std::unique_ptr< GrVkSecondaryCommandBuffer >)
 
void submit (GrOpsRenderPass *) override
 
std::unique_ptr< GrSemaphoremakeSemaphore (bool isOwned) override
 
std::unique_ptr< GrSemaphorewrapBackendSemaphore (const GrBackendSemaphore &, GrSemaphoreWrapType, GrWrapOwnership) override
 
void insertSemaphore (GrSemaphore *semaphore) override
 
void waitSemaphore (GrSemaphore *semaphore) override
 
void addDrawable (std::unique_ptr< SkDrawable::GpuDrawHandler > drawable)
 
void checkFinishProcs () override
 
void finishOutstandingGpuWork () override
 
std::unique_ptr< GrSemaphoreprepareTextureForCrossContextUsage (GrTexture *) override
 
bool updateBuffer (sk_sp< GrVkBuffer > buffer, const void *src, VkDeviceSize offset, VkDeviceSize size)
 
bool zeroBuffer (sk_sp< GrGpuBuffer >)
 
void storeVkPipelineCacheData () override
 
bool beginRenderPass (const GrVkRenderPass *, sk_sp< const GrVkFramebuffer >, const VkClearValue *colorClear, const GrSurface *, const SkIRect &renderPassBounds, bool forSecondaryCB)
 
void endRenderPass (GrRenderTarget *target, GrSurfaceOrigin origin, const SkIRect &bounds)
 
bool checkVkResult (VkResult)
 
- Public Member Functions inherited from GrGpu
 GrGpu (GrDirectContext *direct)
 
virtual ~GrGpu ()
 
GrDirectContextgetContext ()
 
const GrDirectContextgetContext () const
 
const GrCapscaps () const
 
sk_sp< const GrCapsrefCaps () const
 
virtual GrRingBufferuniformsRingBuffer ()
 
void markContextDirty (uint32_t state=kAll_GrBackendState)
 
sk_sp< GrTexturecreateTexture (SkISize dimensions, const GrBackendFormat &format, GrTextureType textureType, GrRenderable renderable, int renderTargetSampleCnt, skgpu::Budgeted budgeted, GrProtected isProtected, GrColorType textureColorType, GrColorType srcColorType, const GrMipLevel texels[], int texelLevelCount, std::string_view label)
 
sk_sp< GrTexturecreateTexture (SkISize dimensions, const GrBackendFormat &format, GrTextureType textureType, GrRenderable renderable, int renderTargetSampleCnt, skgpu::Mipmapped mipmapped, skgpu::Budgeted budgeted, GrProtected isProtected, std::string_view label)
 
sk_sp< GrTexturecreateCompressedTexture (SkISize dimensions, const GrBackendFormat &format, skgpu::Budgeted budgeted, skgpu::Mipmapped mipmapped, GrProtected isProtected, const void *data, size_t dataSize)
 
sk_sp< GrTexturewrapBackendTexture (const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable, GrIOType)
 
sk_sp< GrTexturewrapCompressedBackendTexture (const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable)
 
sk_sp< GrTexturewrapRenderableBackendTexture (const GrBackendTexture &, int sampleCnt, GrWrapOwnership, GrWrapCacheable)
 
sk_sp< GrRenderTargetwrapBackendRenderTarget (const GrBackendRenderTarget &)
 
sk_sp< GrRenderTargetwrapVulkanSecondaryCBAsRenderTarget (const SkImageInfo &, const GrVkDrawableInfo &)
 
sk_sp< GrGpuBuffercreateBuffer (size_t size, GrGpuBufferType intendedType, GrAccessPattern accessPattern)
 
void resolveRenderTarget (GrRenderTarget *, const SkIRect &resolveRect)
 
bool regenerateMipMapLevels (GrTexture *)
 
void resetTextureBindings ()
 
bool readPixels (GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType dstColorType, void *buffer, size_t rowBytes)
 
bool writePixels (GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType srcColorType, const GrMipLevel texels[], int mipLevelCount, bool prepForTexSampling=false)
 
bool writePixels (GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType srcColorType, const void *buffer, size_t rowBytes, bool prepForTexSampling=false)
 
bool transferFromBufferToBuffer (sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)
 
bool transferPixelsTo (GrTexture *texture, SkIRect rect, GrColorType textureColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset, size_t rowBytes)
 
bool transferPixelsFrom (GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset)
 
bool copySurface (GrSurface *dst, const SkIRect &dstRect, GrSurface *src, const SkIRect &srcRect, GrSamplerState::Filter filter)
 
GrOpsRenderPassgetOpsRenderPass (GrRenderTarget *renderTarget, bool useMSAASurface, GrAttachment *stencil, GrSurfaceOrigin, const SkIRect &bounds, const GrOpsRenderPass::LoadAndStoreInfo &, const GrOpsRenderPass::StencilLoadAndStoreInfo &, const skia_private::TArray< GrSurfaceProxy *, true > &sampledProxies, GrXferBarrierFlags renderPassXferBarriers)
 
void executeFlushInfo (SkSpan< GrSurfaceProxy * >, SkSurfaces::BackendSurfaceAccess access, const GrFlushInfo &, const skgpu::MutableTextureState *newState)
 
virtual void willExecute ()
 
bool submitToGpu (GrSyncCpu sync)
 
bool checkAndResetOOMed ()
 
Statsstats ()
 
void dumpJSON (SkJSONWriter *) const
 
GrBackendTexture createBackendTexture (SkISize dimensions, const GrBackendFormat &, GrRenderable, skgpu::Mipmapped, GrProtected, std::string_view label)
 
bool clearBackendTexture (const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, std::array< float, 4 > color)
 
GrBackendTexture createCompressedBackendTexture (SkISize dimensions, const GrBackendFormat &, skgpu::Mipmapped, GrProtected)
 
bool updateCompressedBackendTexture (const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, const void *data, size_t length)
 
virtual bool precompileShader (const SkData &key, const SkData &data)
 
void handleDirtyContext ()
 

Static Public Member Functions

static std::unique_ptr< GrGpuMake (const GrVkBackendContext &, const GrContextOptions &, GrDirectContext *)
 

Private Member Functions

GrBackendTexture onCreateBackendTexture (SkISize dimensions, const GrBackendFormat &, GrRenderable, skgpu::Mipmapped, GrProtected, std::string_view label) override
 
GrBackendTexture onCreateCompressedBackendTexture (SkISize dimensions, const GrBackendFormat &, skgpu::Mipmapped, GrProtected) override
 
bool onClearBackendTexture (const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, std::array< float, 4 > color) override
 
bool onUpdateCompressedBackendTexture (const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, const void *data, size_t length) override
 
sk_sp< GrTextureonCreateTexture (SkISize, const GrBackendFormat &, GrRenderable, int renderTargetSampleCnt, skgpu::Budgeted, GrProtected, int mipLevelCount, uint32_t levelClearMask, std::string_view label) override
 
sk_sp< GrTextureonCreateCompressedTexture (SkISize dimensions, const GrBackendFormat &, skgpu::Budgeted, skgpu::Mipmapped, GrProtected, const void *data, size_t dataSize) override
 
sk_sp< GrTextureonWrapBackendTexture (const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable, GrIOType) override
 
sk_sp< GrTextureonWrapCompressedBackendTexture (const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable) override
 
sk_sp< GrTextureonWrapRenderableBackendTexture (const GrBackendTexture &, int sampleCnt, GrWrapOwnership, GrWrapCacheable) override
 
sk_sp< GrRenderTargetonWrapBackendRenderTarget (const GrBackendRenderTarget &) override
 
sk_sp< GrRenderTargetonWrapVulkanSecondaryCBAsRenderTarget (const SkImageInfo &, const GrVkDrawableInfo &) override
 
sk_sp< GrGpuBufferonCreateBuffer (size_t size, GrGpuBufferType type, GrAccessPattern) override
 
bool onReadPixels (GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType dstColorType, void *buffer, size_t rowBytes) override
 
bool onWritePixels (GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType srcColorType, const GrMipLevel[], int mipLevelCount, bool prepForTexSampling) override
 
bool onTransferFromBufferToBuffer (sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size) override
 
bool onTransferPixelsTo (GrTexture *, SkIRect, GrColorType textureColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer >, size_t offset, size_t rowBytes) override
 
bool onTransferPixelsFrom (GrSurface *, SkIRect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer >, size_t offset) override
 
bool onCopySurface (GrSurface *dst, const SkIRect &dstRect, GrSurface *src, const SkIRect &srcRect, GrSamplerState::Filter) override
 
void addFinishedProc (GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext) override
 
GrOpsRenderPassonGetOpsRenderPass (GrRenderTarget *, bool useMSAASurface, GrAttachment *stencil, GrSurfaceOrigin, const SkIRect &, const GrOpsRenderPass::LoadAndStoreInfo &, const GrOpsRenderPass::StencilLoadAndStoreInfo &, const skia_private::TArray< GrSurfaceProxy *, true > &sampledProxies, GrXferBarrierFlags renderPassXferBarriers) override
 
void prepareSurfacesForBackendAccessAndStateUpdates (SkSpan< GrSurfaceProxy * > proxies, SkSurfaces::BackendSurfaceAccess access, const skgpu::MutableTextureState *newState) override
 
bool onSubmitToGpu (GrSyncCpu sync) override
 
void onReportSubmitHistograms () override
 

Additional Inherited Members

- Protected Member Functions inherited from GrGpu
void didWriteToSurface (GrSurface *surface, GrSurfaceOrigin origin, const SkIRect *bounds, uint32_t mipLevels=1) const
 
void setOOMed ()
 
void initCaps (sk_sp< const GrCaps > caps)
 
- Static Protected Member Functions inherited from GrGpu
static bool CompressedDataIsCorrect (SkISize dimensions, SkTextureCompressionType, skgpu::Mipmapped, const void *data, size_t length)
 
- Protected Attributes inherited from GrGpu
Stats fStats
 

Detailed Description

Definition at line 42 of file GrVkGpu.h.

Member Typedef Documentation

◆ SubmitContext

typedef void* GrVkGpu::SubmitContext

Definition at line 167 of file GrVkGpu.h.

◆ SubmitProc

typedef void(* GrVkGpu::SubmitProc) (SubmitContext submitContext)

Definition at line 168 of file GrVkGpu.h.

Member Enumeration Documentation

◆ PersistentCacheKeyType

Enumerator
kShader_PersistentCacheKeyType 
kPipelineCache_PersistentCacheKeyType 

Definition at line 184 of file GrVkGpu.h.

184 : uint32_t {
187 };
@ kPipelineCache_PersistentCacheKeyType
Definition GrVkGpu.h:186
@ kShader_PersistentCacheKeyType
Definition GrVkGpu.h:185

Constructor & Destructor Documentation

◆ ~GrVkGpu()

GrVkGpu::~GrVkGpu ( )
override

Definition at line 292 of file GrVkGpu.cpp.

292 {
293 if (!fDisconnected) {
294 this->destroyResources();
295 }
296 // We don't delete the memory allocator until the very end of the GrVkGpu lifetime so that
297 // clients can continue to delete backend textures even after a context has been abandoned.
298 fMemoryAllocator.reset();
299}
void reset(T *ptr=nullptr)
Definition SkRefCnt.h:310

Member Function Documentation

◆ addBufferMemoryBarrier() [1/2]

void GrVkGpu::addBufferMemoryBarrier ( const GrManagedResource resource,
VkPipelineStageFlags  srcStageMask,
VkPipelineStageFlags  dstStageMask,
bool  byRegion,
VkBufferMemoryBarrier barrier 
) const

Definition at line 2143 of file GrVkGpu.cpp.

2147 {
2148 if (!this->currentCommandBuffer()) {
2149 return;
2150 }
2151 SkASSERT(resource);
2153 resource,
2154 srcStageMask,
2155 dstStageMask,
2156 byRegion,
2158 barrier);
2159}
#define SkASSERT(cond)
Definition SkAssert.h:116
void pipelineBarrier(const GrVkGpu *gpu, const GrManagedResource *resource, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, BarrierType barrierType, void *barrier)
GrVkPrimaryCommandBuffer * currentCommandBuffer() const
Definition GrVkGpu.h:85

◆ addBufferMemoryBarrier() [2/2]

void GrVkGpu::addBufferMemoryBarrier ( VkPipelineStageFlags  srcStageMask,
VkPipelineStageFlags  dstStageMask,
bool  byRegion,
VkBufferMemoryBarrier barrier 
) const

Definition at line 2160 of file GrVkGpu.cpp.

2163 {
2164 if (!this->currentCommandBuffer()) {
2165 return;
2166 }
2167 // We don't pass in a resource here to the command buffer. The command buffer only is using it
2168 // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
2169 // command with the buffer on the command buffer. Thus those other commands will already cause
2170 // the command buffer to be holding a ref to the buffer.
2172 /*resource=*/nullptr,
2173 srcStageMask,
2174 dstStageMask,
2175 byRegion,
2177 barrier);
2178}

◆ addDrawable()

void GrVkGpu::addDrawable ( std::unique_ptr< SkDrawable::GpuDrawHandler drawable)

Definition at line 2769 of file GrVkGpu.cpp.

2769 {
2770 fDrawables.emplace_back(std::move(drawable));
2771}
T & emplace_back(Args &&... args)
Definition SkTArray.h:243

◆ addFinishedProc()

void GrVkGpu::addFinishedProc ( GrGpuFinishedProc  finishedProc,
GrGpuFinishedContext  finishedContext 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 2240 of file GrVkGpu.cpp.

2241 {
2242 SkASSERT(finishedProc);
2243 this->addFinishedCallback(skgpu::RefCntedCallback::Make(finishedProc, finishedContext));
2244}
static sk_sp< RefCntedCallback > Make(Callback proc, Context ctx)

◆ addImageMemoryBarrier()

void GrVkGpu::addImageMemoryBarrier ( const GrManagedResource resource,
VkPipelineStageFlags  srcStageMask,
VkPipelineStageFlags  dstStageMask,
bool  byRegion,
VkImageMemoryBarrier barrier 
) const

Definition at line 2180 of file GrVkGpu.cpp.

2184 {
2185 // If we are in the middle of destroying or abandoning the context we may hit a release proc
2186 // that triggers the destruction of a GrVkImage. This could cause us to try and transfer the
2187 // VkImage back to the original queue. In this state we don't submit anymore work and we may not
2188 // have a current command buffer. Thus we won't do the queue transfer.
2189 if (!this->currentCommandBuffer()) {
2190 return;
2191 }
2192 SkASSERT(resource);
2194 resource,
2195 srcStageMask,
2196 dstStageMask,
2197 byRegion,
2199 barrier);
2200}

◆ beginRenderPass()

bool GrVkGpu::beginRenderPass ( const GrVkRenderPass renderPass,
sk_sp< const GrVkFramebuffer framebuffer,
const VkClearValue colorClear,
const GrSurface target,
const SkIRect renderPassBounds,
bool  forSecondaryCB 
)

Definition at line 2637 of file GrVkGpu.cpp.

2642 {
2643 if (!this->currentCommandBuffer()) {
2644 return false;
2645 }
2646 SkASSERT (!framebuffer->isExternal());
2647
2648#ifdef SK_DEBUG
2649 uint32_t index;
2650 bool result = renderPass->colorAttachmentIndex(&index);
2651 SkASSERT(result && 0 == index);
2652 result = renderPass->stencilAttachmentIndex(&index);
2653 if (result) {
2654 SkASSERT(1 == index);
2655 }
2656#endif
2657 VkClearValue clears[3];
2658 int stencilIndex = renderPass->hasResolveAttachment() ? 2 : 1;
2659 clears[0].color = colorClear->color;
2660 clears[stencilIndex].depthStencil.depth = 0.0f;
2661 clears[stencilIndex].depthStencil.stencil = 0;
2662
2663 return this->currentCommandBuffer()->beginRenderPass(
2664 this, renderPass, std::move(framebuffer), clears, target, renderPassBounds, forSecondaryCB);
2665}
bool isExternal() const
bool beginRenderPass(GrVkGpu *gpu, const GrVkRenderPass *, sk_sp< const GrVkFramebuffer >, const VkClearValue clearValues[], const GrSurface *target, const SkIRect &bounds, bool forSecondaryCB)
bool colorAttachmentIndex(uint32_t *index) const
bool stencilAttachmentIndex(uint32_t *index) const
bool hasResolveAttachment() const
GAsyncResult * result
uint32_t * target
VkClearColorValue color
VkClearDepthStencilValue depthStencil

◆ checkFinishProcs()

void GrVkGpu::checkFinishProcs ( )
inlineoverridevirtual

Implements GrGpu.

Definition at line 174 of file GrVkGpu.h.

174{ fResourceProvider.checkCommandBuffers(); }

◆ checkVkResult()

bool GrVkGpu::checkVkResult ( VkResult  result)

Definition at line 2675 of file GrVkGpu.cpp.

2675 {
2676 switch (result) {
2677 case VK_SUCCESS:
2678 return true;
2680 if (!fDeviceIsLost) {
2681 // Callback should only be invoked once, and device should be marked as lost first.
2682 fDeviceIsLost = true;
2684 device(),
2685 fDeviceLostContext,
2686 fDeviceLostProc,
2687 vkCaps().supportsDeviceFaultInfo());
2688 }
2689 return false;
2692 this->setOOMed();
2693 return false;
2694 default:
2695 return false;
2696 }
2697}
void setOOMed()
Definition GrGpu.h:701
const GrVkCaps & vkCaps() const
Definition GrVkGpu.h:61
const skgpu::VulkanInterface * vkInterface() const
Definition GrVkGpu.h:60
VkDevice device() const
Definition GrVkGpu.h:71
void InvokeDeviceLostCallback(const skgpu::VulkanInterface *vulkanInterface, VkDevice vkDevice, skgpu::VulkanDeviceLostContext deviceLostContext, skgpu::VulkanDeviceLostProc deviceLostProc, bool supportsDeviceFaultInfoExtension)
@ VK_ERROR_DEVICE_LOST
@ VK_SUCCESS
@ VK_ERROR_OUT_OF_HOST_MEMORY
@ VK_ERROR_OUT_OF_DEVICE_MEMORY

◆ cmdPool()

GrVkCommandPool * GrVkGpu::cmdPool ( ) const
inline

Definition at line 74 of file GrVkGpu.h.

74{ return fMainCmdPool; }

◆ compile()

bool GrVkGpu::compile ( const GrProgramDesc ,
const GrProgramInfo  
)
overridevirtual

In this case we have a program descriptor and a program info but no render target.

Implements GrGpu.

Definition at line 2042 of file GrVkGpu.cpp.

2042 {
2043 GrVkRenderPass::AttachmentsDescriptor attachmentsDescriptor;
2044 GrVkRenderPass::AttachmentFlags attachmentFlags;
2046 &attachmentsDescriptor, &attachmentFlags);
2047
2049 if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kBlend) {
2051 }
2052 if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kTexture) {
2054 }
2055
2057 if (this->vkCaps().programInfoWillUseDiscardableMSAA(programInfo) &&
2058 programInfo.colorLoadOp() == GrLoadOp::kLoad) {
2060 }
2061 sk_sp<const GrVkRenderPass> renderPass(this->resourceProvider().findCompatibleRenderPass(
2062 &attachmentsDescriptor, attachmentFlags, selfDepFlags, loadFromResolve));
2063 if (!renderPass) {
2064 return false;
2065 }
2066
2068
2069 auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
2070 desc,
2071 programInfo,
2072 renderPass->vkRenderPass(),
2073 &stat);
2074 if (!pipelineState) {
2075 return false;
2076 }
2077
2079}
GrVkResourceProvider & resourceProvider()
Definition GrVkGpu.h:83
static void ReconstructAttachmentsDescriptor(const GrVkCaps &vkCaps, const GrProgramInfo &programInfo, GrVkRenderPass::AttachmentsDescriptor *desc, GrVkRenderPass::AttachmentFlags *flags)
GrVkPipelineState * findOrCreateCompatiblePipelineState(GrRenderTarget *, const GrProgramInfo &, VkRenderPass compatibleRenderPass, bool overrideSubpassForResolveLoad)

◆ currentCommandBuffer()

GrVkPrimaryCommandBuffer * GrVkGpu::currentCommandBuffer ( ) const
inline

Definition at line 85 of file GrVkGpu.h.

85{ return fMainCmdBuffer; }

◆ deleteBackendTexture()

void GrVkGpu::deleteBackendTexture ( const GrBackendTexture )
overridevirtual

Frees a texture created by createBackendTexture(). If ownership of the backend texture has been transferred to a context using adopt semantics this should not be called.

Implements GrGpu.

Definition at line 2033 of file GrVkGpu.cpp.

2033 {
2034 SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2035
2038 GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2039 }
2040}
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition DM.cpp:213
static void DestroyImageInfo(const GrVkGpu *gpu, GrVkImageInfo *)
SK_API bool GetVkImageInfo(const GrBackendTexture &, GrVkImageInfo *)

◆ device()

VkDevice GrVkGpu::device ( ) const
inline

Definition at line 71 of file GrVkGpu.h.

71{ return fDevice; }

◆ disconnect()

void GrVkGpu::disconnect ( DisconnectType  type)
overridevirtual

Reimplemented from GrGpu.

Definition at line 302 of file GrVkGpu.cpp.

302 {
304 if (!fDisconnected) {
305 this->destroyResources();
306
307 fSemaphoresToWaitOn.clear();
308 fSemaphoresToSignal.clear();
309 fMainCmdBuffer = nullptr;
310 fDisconnected = true;
311 }
312}
virtual void disconnect(DisconnectType)
Definition GrGpu.cpp:51

◆ disconnected()

bool GrVkGpu::disconnected ( ) const
inline

Definition at line 51 of file GrVkGpu.h.

51{ return fDisconnected; }

◆ endRenderPass()

void GrVkGpu::endRenderPass ( GrRenderTarget target,
GrSurfaceOrigin  origin,
const SkIRect bounds 
)

Definition at line 2667 of file GrVkGpu.cpp.

2668 {
2669 // We had a command buffer when we started the render pass, we should have one now as well.
2671 this->currentCommandBuffer()->endRenderPass(this);
2672 this->didWriteToSurface(target, origin, &bounds);
2673}
void didWriteToSurface(GrSurface *surface, GrSurfaceOrigin origin, const SkIRect *bounds, uint32_t mipLevels=1) const
Definition GrGpu.cpp:665
void endRenderPass(const GrVkGpu *gpu)

◆ finishOutstandingGpuWork()

void GrVkGpu::finishOutstandingGpuWork ( )
overridevirtual

Implements GrGpu.

Definition at line 2263 of file GrVkGpu.cpp.

2263 {
2264 VK_CALL(QueueWaitIdle(fQueue));
2265
2266 if (this->vkCaps().mustSyncCommandBuffersWithQueue()) {
2267 fResourceProvider.forceSyncAllCommandBuffers();
2268 }
2269}
#define VK_CALL(GPU, X)

◆ getPreferredStencilFormat()

GrBackendFormat GrVkGpu::getPreferredStencilFormat ( const GrBackendFormat )
inlineoverridevirtual

Implements GrGpu.

Definition at line 120 of file GrVkGpu.h.

120 {
121 return GrBackendFormats::MakeVk(this->vkCaps().preferredStencilFormat());
122 }
SK_API GrBackendFormat MakeVk(VkFormat format, bool willUseDRMFormatModifiers=false)

◆ insertSemaphore()

void GrVkGpu::insertSemaphore ( GrSemaphore semaphore)
overridevirtual

Implements GrGpu.

Definition at line 2724 of file GrVkGpu.cpp.

2724 {
2725 SkASSERT(semaphore);
2726
2727 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2728
2729 GrVkSemaphore::Resource* resource = vkSem->getResource();
2730 if (resource->shouldSignal()) {
2731 resource->ref();
2732 fSemaphoresToSignal.push_back(resource);
2733 }
2734}
Resource * getResource()

◆ isDeviceLost()

bool GrVkGpu::isDeviceLost ( ) const
inlineoverridevirtual

Reimplemented from GrGpu.

Definition at line 66 of file GrVkGpu.h.

66{ return fDeviceIsLost; }

◆ loadMSAAFromResolve()

bool GrVkGpu::loadMSAAFromResolve ( GrVkCommandBuffer commandBuffer,
const GrVkRenderPass renderPass,
GrAttachment dst,
GrVkImage src,
const SkIRect srcRect 
)

Definition at line 1516 of file GrVkGpu.cpp.

1520 {
1521 return fMSAALoadManager.loadMSAAFromResolve(this, commandBuffer, renderPass, dst, src, srcRect);
1522}
bool loadMSAAFromResolve(GrVkGpu *gpu, GrVkCommandBuffer *commandBuffer, const GrVkRenderPass &renderPass, GrAttachment *dst, GrVkImage *src, const SkIRect &srcRect)

◆ Make()

std::unique_ptr< GrGpu > GrVkGpu::Make ( const GrVkBackendContext backendContext,
const GrContextOptions options,
GrDirectContext direct 
)
static

Definition at line 66 of file GrVkGpu.cpp.

68 {
69 if (backendContext.fInstance == VK_NULL_HANDLE ||
70 backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
71 backendContext.fDevice == VK_NULL_HANDLE ||
72 backendContext.fQueue == VK_NULL_HANDLE) {
73 return nullptr;
74 }
75 if (!backendContext.fGetProc) {
76 return nullptr;
77 }
78
79 PFN_vkEnumerateInstanceVersion localEnumerateInstanceVersion =
80 reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
81 backendContext.fGetProc("vkEnumerateInstanceVersion",
83 uint32_t instanceVersion = 0;
84 if (!localEnumerateInstanceVersion) {
85 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
86 } else {
87 VkResult err = localEnumerateInstanceVersion(&instanceVersion);
88 if (err) {
89 SkDebugf("Failed to enumerate instance version. Err: %d\n", err);
90 return nullptr;
91 }
92 }
93
94 PFN_vkGetPhysicalDeviceProperties localGetPhysicalDeviceProperties =
95 reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
96 backendContext.fGetProc("vkGetPhysicalDeviceProperties",
97 backendContext.fInstance,
99
100 if (!localGetPhysicalDeviceProperties) {
101 return nullptr;
102 }
103 VkPhysicalDeviceProperties physDeviceProperties;
104 localGetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &physDeviceProperties);
105 uint32_t physDevVersion = physDeviceProperties.apiVersion;
106
107 uint32_t apiVersion = backendContext.fMaxAPIVersion ? backendContext.fMaxAPIVersion
108 : instanceVersion;
109
110 instanceVersion = std::min(instanceVersion, apiVersion);
111 physDevVersion = std::min(physDevVersion, apiVersion);
112
114
115 if (backendContext.fVkExtensions) {
116 interface.reset(new skgpu::VulkanInterface(backendContext.fGetProc,
117 backendContext.fInstance,
118 backendContext.fDevice,
119 instanceVersion,
120 physDevVersion,
121 backendContext.fVkExtensions));
122 if (!interface->validate(instanceVersion, physDevVersion, backendContext.fVkExtensions)) {
123 return nullptr;
124 }
125 } else {
127 // The only extension flag that may effect the vulkan backend is the swapchain extension. We
128 // need to know if this is enabled to know if we can transition to a present layout when
129 // flushing a surface.
130 if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
131 const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
132 extensions.init(backendContext.fGetProc, backendContext.fInstance,
133 backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
134 }
135 interface.reset(new skgpu::VulkanInterface(backendContext.fGetProc,
136 backendContext.fInstance,
137 backendContext.fDevice,
138 instanceVersion,
139 physDevVersion,
140 &extensions));
141 if (!interface->validate(instanceVersion, physDevVersion, &extensions)) {
142 return nullptr;
143 }
144 }
145
147 if (backendContext.fDeviceFeatures2) {
148 caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
149 *backendContext.fDeviceFeatures2, instanceVersion, physDevVersion,
150 *backendContext.fVkExtensions, backendContext.fProtectedContext));
151 } else if (backendContext.fDeviceFeatures) {
153 features2.pNext = nullptr;
154 features2.features = *backendContext.fDeviceFeatures;
155 caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
156 features2, instanceVersion, physDevVersion,
157 *backendContext.fVkExtensions, backendContext.fProtectedContext));
158 } else {
160 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
161 features.pNext = nullptr;
162 if (backendContext.fFeatures & kGeometryShader_GrVkFeatureFlag) {
163 features.features.geometryShader = true;
164 }
165 if (backendContext.fFeatures & kDualSrcBlend_GrVkFeatureFlag) {
166 features.features.dualSrcBlend = true;
167 }
168 if (backendContext.fFeatures & kSampleRateShading_GrVkFeatureFlag) {
169 features.features.sampleRateShading = true;
170 }
172 // The only extension flag that may effect the vulkan backend is the swapchain extension. We
173 // need to know if this is enabled to know if we can transition to a present layout when
174 // flushing a surface.
175 if (backendContext.fExtensions & kKHR_swapchain_GrVkExtensionFlag) {
176 const char* swapChainExtName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
177 extensions.init(backendContext.fGetProc, backendContext.fInstance,
178 backendContext.fPhysicalDevice, 0, nullptr, 1, &swapChainExtName);
179 }
180 caps.reset(new GrVkCaps(options, interface.get(), backendContext.fPhysicalDevice,
181 features, instanceVersion, physDevVersion, extensions,
182 backendContext.fProtectedContext));
183 }
184
185 if (!caps) {
186 return nullptr;
187 }
188
190 if (!memoryAllocator) {
191 // We were not given a memory allocator at creation
193 backendContext.fPhysicalDevice,
194 backendContext.fDevice,
195 physDevVersion,
196 backendContext.fVkExtensions,
197 interface.get(),
198 /*=threadSafe=*/false);
199 }
200 if (!memoryAllocator) {
201 SkDEBUGFAIL("No supplied vulkan memory allocator and unable to create one internally.");
202 return nullptr;
203 }
204
205 std::unique_ptr<GrVkGpu> vkGpu(new GrVkGpu(direct,
206 backendContext,
207 std::move(caps),
208 interface,
209 instanceVersion,
210 physDevVersion,
211 std::move(memoryAllocator)));
212 if (backendContext.fProtectedContext == GrProtected::kYes &&
213 !vkGpu->vkCaps().supportsProtectedContent()) {
214 return nullptr;
215 }
216 return vkGpu;
217}
const char * options
@ kKHR_swapchain_GrVkExtensionFlag
@ kSampleRateShading_GrVkFeatureFlag
@ kDualSrcBlend_GrVkFeatureFlag
@ kGeometryShader_GrVkFeatureFlag
#define SkDEBUGFAIL(message)
Definition SkAssert.h:118
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
const GrCaps * caps() const
Definition GrGpu.h:73
skgpu::VulkanMemoryAllocator * memoryAllocator() const
Definition GrVkGpu.h:68
T * get() const
Definition SkRefCnt.h:303
static sk_sp< VulkanMemoryAllocator > Make(VkInstance instance, VkPhysicalDevice physicalDevice, VkDevice device, uint32_t physicalDeviceVersion, const VulkanExtensions *extensions, const VulkanInterface *interface, bool threadSafe)
const skgpu::VulkanExtensions * fVkExtensions
skgpu::Protected fProtectedContext
const VkPhysicalDeviceFeatures * fDeviceFeatures
VkPhysicalDevice fPhysicalDevice
skgpu::VulkanGetProc fGetProc
const VkPhysicalDeviceFeatures2 * fDeviceFeatures2
sk_sp< skgpu::VulkanMemoryAllocator > fMemoryAllocator
VkPhysicalDeviceFeatures features
void(VKAPI_PTR * PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties *pProperties)
#define VK_MAKE_VERSION(major, minor, patch)
Definition vulkan_core.h:78
VkResult
#define VK_NULL_HANDLE
Definition vulkan_core.h:46
VkResult(VKAPI_PTR * PFN_vkEnumerateInstanceVersion)(uint32_t *pApiVersion)
#define VK_KHR_SWAPCHAIN_EXTENSION_NAME

◆ makeMSAAAttachment()

sk_sp< GrAttachment > GrVkGpu::makeMSAAAttachment ( SkISize  dimensions,
const GrBackendFormat format,
int  numSamples,
GrProtected  isProtected,
GrMemoryless  isMemoryless 
)
overridevirtual

Implements GrGpu.

Definition at line 1625 of file GrVkGpu.cpp.

1629 {
1630 VkFormat pixelFormat;
1633 SkASSERT(this->vkCaps().isFormatRenderable(pixelFormat, numSamples));
1634
1636 return GrVkImage::MakeMSAA(this, dimensions, numSamples, pixelFormat, isProtected, memoryless);
1637}
#define SkAssertResult(cond)
Definition SkAssert.h:123
void incMSAAAttachmentCreates()
Definition GrGpu.h:540
Stats fStats
Definition GrGpu.h:703
static sk_sp< GrVkImage > MakeMSAA(GrVkGpu *gpu, SkISize dimensions, int numSamples, VkFormat format, GrProtected isProtected, GrMemoryless memoryless)
Definition GrVkImage.cpp:39
uint32_t uint32_t * format
SK_API bool AsVkFormat(const GrBackendFormat &, VkFormat *)
static constexpr bool VkFormatIsCompressed(VkFormat vkFormat)
VkFormat

◆ makeSemaphore()

std::unique_ptr< GrSemaphore > GrVkGpu::makeSemaphore ( bool  isOwned)
overridevirtual

Implements GrGpu.

Definition at line 2713 of file GrVkGpu.cpp.

2713 {
2714 return GrVkSemaphore::Make(this, isOwned);
2715}
static std::unique_ptr< GrVkSemaphore > Make(GrVkGpu *gpu, bool isOwned)

◆ makeStencilAttachment()

sk_sp< GrAttachment > GrVkGpu::makeStencilAttachment ( const GrBackendFormat ,
SkISize  dimensions,
int  numStencilSamples 
)
overridevirtual

Implements GrGpu.

Definition at line 1617 of file GrVkGpu.cpp.

1618 {
1619 VkFormat sFmt = this->vkCaps().preferredStencilFormat();
1620
1622 return GrVkImage::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1623}
void incStencilAttachmentCreates()
Definition GrGpu.h:539
VkFormat preferredStencilFormat() const
Definition GrVkCaps.h:104
static sk_sp< GrVkImage > MakeStencil(GrVkGpu *gpu, SkISize dimensions, int sampleCnt, VkFormat format)
Definition GrVkImage.cpp:21

◆ memoryAllocator()

skgpu::VulkanMemoryAllocator * GrVkGpu::memoryAllocator ( ) const
inline

Definition at line 68 of file GrVkGpu.h.

68{ return fMemoryAllocator.get(); }

◆ onClearBackendTexture()

bool GrVkGpu::onClearBackendTexture ( const GrBackendTexture backendTexture,
sk_sp< skgpu::RefCntedCallback finishedCallback,
std::array< float, 4 >  color 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1730 of file GrVkGpu.cpp.

1732 {
1735
1736 sk_sp<skgpu::MutableTextureState> mutableState = backendTexture.getMutableState();
1737 SkASSERT(mutableState);
1739 GrVkTexture::MakeWrappedTexture(this, backendTexture.dimensions(),
1741 kRW_GrIOType, info, std::move(mutableState));
1742 if (!texture) {
1743 return false;
1744 }
1745 GrVkImage* texImage = texture->textureImage();
1746
1748 if (!cmdBuffer) {
1749 return false;
1750 }
1751
1752 texImage->setImageLayout(this,
1756 false);
1757
1758 // CmdClearColorImage doesn't work for compressed formats
1760
1761 VkClearColorValue vkColor;
1762 // If we ever support SINT or UINT formats this needs to be updated to use the int32 and
1763 // uint32 union members in those cases.
1764 vkColor.float32[0] = color[0];
1765 vkColor.float32[1] = color[1];
1766 vkColor.float32[2] = color[2];
1767 vkColor.float32[3] = color[3];
1770 range.baseArrayLayer = 0;
1771 range.baseMipLevel = 0;
1772 range.layerCount = 1;
1773 range.levelCount = info.fLevelCount;
1774 cmdBuffer->clearColorImage(this, texImage, &vkColor, 1, &range);
1775
1776 // Change image layout to shader read since if we use this texture as a borrowed
1777 // texture within Ganesh we require that its layout be set to that
1780 false);
1781
1782 if (finishedCallback) {
1783 this->addFinishedCallback(std::move(finishedCallback));
1784 }
1785 return true;
1786}
@ kRW_GrIOType
@ kBorrow_GrWrapOwnership
Definition GrTypesPriv.h:78
SkColor4f color
SkISize dimensions() const
void setImageLayout(const GrVkGpu *gpu, VkImageLayout newLayout, VkAccessFlags dstAccessMask, VkPipelineStageFlags dstStageMask, bool byRegion)
Definition GrVkImage.h:143
void clearColorImage(const GrVkGpu *gpu, GrVkImage *image, const VkClearColorValue *color, uint32_t subRangeCount, const VkImageSubresourceRange *subRanges)
static sk_sp< GrVkTexture > MakeWrappedTexture(GrVkGpu *, SkISize dimensions, GrWrapOwnership, GrWrapCacheable, GrIOType, const GrVkImageInfo &, sk_sp< skgpu::MutableTextureState >)
FlTexture * texture
VkImageAspectFlags aspectMask
@ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
@ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
@ VK_IMAGE_ASPECT_COLOR_BIT
@ VK_ACCESS_TRANSFER_WRITE_BIT
@ VK_ACCESS_SHADER_READ_BIT
@ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
@ VK_PIPELINE_STAGE_TRANSFER_BIT

◆ onCopySurface()

bool GrVkGpu::onCopySurface ( GrSurface dst,
const SkIRect dstRect,
GrSurface src,
const SkIRect srcRect,
GrSamplerState::Filter  filter 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 2433 of file GrVkGpu.cpp.

2435 {
2436#ifdef SK_DEBUG
2437 if (GrVkRenderTarget* srcRT = static_cast<GrVkRenderTarget*>(src->asRenderTarget())) {
2438 SkASSERT(!srcRT->wrapsSecondaryCommandBuffer());
2439 }
2440 if (GrVkRenderTarget* dstRT = static_cast<GrVkRenderTarget*>(dst->asRenderTarget())) {
2441 SkASSERT(!dstRT->wrapsSecondaryCommandBuffer());
2442 }
2443#endif
2444 if (src->isProtected() && !dst->isProtected()) {
2445 SkDebugf("Can't copy from protected memory to non-protected");
2446 return false;
2447 }
2448
2449 GrVkImage* dstImage;
2450 GrVkImage* srcImage;
2451 GrRenderTarget* dstRT = dst->asRenderTarget();
2452 if (dstRT) {
2453 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(dstRT);
2454 if (vkRT->wrapsSecondaryCommandBuffer()) {
2455 return false;
2456 }
2457 // This will technically return true for single sample rts that used DMSAA in which case we
2458 // don't have to pick the resolve attachment. But in that case the resolve and color
2459 // attachments will be the same anyways.
2460 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2461 dstImage = vkRT->resolveAttachment();
2462 } else {
2463 dstImage = vkRT->colorAttachment();
2464 }
2465 } else if (dst->asTexture()) {
2466 dstImage = static_cast<GrVkTexture*>(dst->asTexture())->textureImage();
2467 } else {
2468 // The surface in a GrAttachment already
2469 dstImage = static_cast<GrVkImage*>(dst);
2470 }
2471 GrRenderTarget* srcRT = src->asRenderTarget();
2472 if (srcRT) {
2473 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(srcRT);
2474 // This will technically return true for single sample rts that used DMSAA in which case we
2475 // don't have to pick the resolve attachment. But in that case the resolve and color
2476 // attachments will be the same anyways.
2477 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
2478 srcImage = vkRT->resolveAttachment();
2479 } else {
2480 srcImage = vkRT->colorAttachment();
2481 }
2482 } else if (src->asTexture()) {
2483 SkASSERT(src->asTexture());
2484 srcImage = static_cast<GrVkTexture*>(src->asTexture())->textureImage();
2485 } else {
2486 // The surface in a GrAttachment already
2487 srcImage = static_cast<GrVkImage*>(src);
2488 }
2489
2490 VkFormat dstFormat = dstImage->imageFormat();
2491 VkFormat srcFormat = srcImage->imageFormat();
2492
2493 int dstSampleCnt = dstImage->numSamples();
2494 int srcSampleCnt = srcImage->numSamples();
2495
2496 bool dstHasYcbcr = dstImage->ycbcrConversionInfo().isValid();
2497 bool srcHasYcbcr = srcImage->ycbcrConversionInfo().isValid();
2498
2499 if (srcRect.size() == dstRect.size()) {
2500 // Prefer resolves or copy-image commands when there is no scaling
2501 const SkIPoint dstPoint = dstRect.topLeft();
2502 if (this->vkCaps().canCopyAsResolve(dstFormat, dstSampleCnt, dstHasYcbcr,
2503 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2504 this->copySurfaceAsResolve(dst, src, srcRect, dstPoint);
2505 return true;
2506 }
2507
2508 if (this->vkCaps().canCopyImage(dstFormat, dstSampleCnt, dstHasYcbcr,
2509 srcFormat, srcSampleCnt, srcHasYcbcr)) {
2510 this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstPoint);
2511 return true;
2512 }
2513 }
2514
2515 if (this->vkCaps().canCopyAsBlit(dstFormat,
2516 dstSampleCnt,
2517 dstImage->isLinearTiled(),
2518 dstHasYcbcr,
2519 srcFormat,
2520 srcSampleCnt,
2521 srcImage->isLinearTiled(),
2522 srcHasYcbcr)) {
2523 this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstRect, filter);
2524 return true;
2525 }
2526
2527 return false;
2528}
int numSamples() const
bool canCopyAsResolve(VkFormat dstConfig, int dstSampleCnt, bool dstHasYcbcr, VkFormat srcConfig, int srcSamplecnt, bool srcHasYcbcr) const
Definition GrVkCaps.cpp:192
bool renderTargetSupportsDiscardableMSAA(const GrVkRenderTarget *) const
bool isLinearTiled() const
Definition GrVkImage.h:122
const GrVkYcbcrConversionInfo & ycbcrConversionInfo() const
Definition GrVkImage.h:94
VkFormat imageFormat() const
Definition GrVkImage.h:82
bool wrapsSecondaryCommandBuffer() const
GrVkImage * colorAttachment() const
GrVkImage * resolveAttachment() const
dst
Definition cp.py:12
constexpr SkISize size() const
Definition SkRect.h:172
constexpr SkIPoint topLeft() const
Definition SkRect.h:151

◆ onCreateBackendTexture()

GrBackendTexture GrVkGpu::onCreateBackendTexture ( SkISize  dimensions,
const GrBackendFormat format,
GrRenderable  renderable,
skgpu::Mipmapped  mipmapped,
GrProtected  isProtected,
std::string_view  label 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1788 of file GrVkGpu.cpp.

1793 {
1794 const GrVkCaps& caps = this->vkCaps();
1795
1796 if (fProtectedContext != isProtected) {
1797 return {};
1798 }
1799
1800 VkFormat vkFormat;
1801 if (!GrBackendFormats::AsVkFormat(format, &vkFormat)) {
1802 return {};
1803 }
1804
1805 // TODO: move the texturability check up to GrGpu::createBackendTexture and just assert here
1806 if (!caps.isVkFormatTexturable(vkFormat)) {
1807 return {};
1808 }
1809
1810 if (skgpu::VkFormatNeedsYcbcrSampler(vkFormat)) {
1811 return {};
1812 }
1813
1815 if (!this->createVkImageForBackendSurface(vkFormat, dimensions, 1, GrTexturable::kYes,
1816 renderable, mipmapped, &info, isProtected)) {
1817 return {};
1818 }
1819
1820 return GrBackendTextures::MakeVk(dimensions.width(), dimensions.height(), info);
1821}
SK_API GrBackendTexture MakeVk(int width, int height, const GrVkImageInfo &, std::string_view label={})
static constexpr bool VkFormatNeedsYcbcrSampler(VkFormat format)
constexpr int32_t width() const
Definition SkSize.h:36
constexpr int32_t height() const
Definition SkSize.h:37

◆ onCreateBuffer()

sk_sp< GrGpuBuffer > GrVkGpu::onCreateBuffer ( size_t  size,
GrGpuBufferType  type,
GrAccessPattern  accessPattern 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 468 of file GrVkGpu.cpp.

470 {
471#ifdef SK_DEBUG
472 switch (type) {
476 SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
477 accessPattern == kStatic_GrAccessPattern);
478 break;
480 SkASSERT(accessPattern == kDynamic_GrAccessPattern);
481 break;
483 SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
484 accessPattern == kStream_GrAccessPattern);
485 break;
487 SkASSERT(accessPattern == kDynamic_GrAccessPattern);
488 break;
489 }
490#endif
491 return GrVkBuffer::Make(this, size, type, accessPattern);
492}
@ kDynamic_GrAccessPattern
@ kStatic_GrAccessPattern
@ kStream_GrAccessPattern
static sk_sp< GrVkBuffer > Make(GrVkGpu *gpu, size_t size, GrGpuBufferType bufferType, GrAccessPattern accessPattern)

◆ onCreateCompressedBackendTexture()

GrBackendTexture GrVkGpu::onCreateCompressedBackendTexture ( SkISize  dimensions,
const GrBackendFormat format,
skgpu::Mipmapped  mipmapped,
GrProtected  isProtected 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1823 of file GrVkGpu.cpp.

1826 {
1827 return this->onCreateBackendTexture(dimensions,
1828 format,
1829 GrRenderable::kNo,
1830 mipmapped,
1831 isProtected,
1832 /*label=*/"VkGpu_CreateCompressedBackendTexture");
1833}
GrBackendTexture onCreateBackendTexture(SkISize dimensions, const GrBackendFormat &, GrRenderable, skgpu::Mipmapped, GrProtected, std::string_view label) override
Definition GrVkGpu.cpp:1788

◆ onCreateCompressedTexture()

sk_sp< GrTexture > GrVkGpu::onCreateCompressedTexture ( SkISize  dimensions,
const GrBackendFormat format,
skgpu::Budgeted  budgeted,
skgpu::Mipmapped  mipmapped,
GrProtected  isProtected,
const void *  data,
size_t  dataSize 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1202 of file GrVkGpu.cpp.

1208 {
1209 VkFormat pixelFormat;
1212
1213 int numMipLevels = 1;
1214 if (mipmapped == skgpu::Mipmapped::kYes) {
1215 numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1;
1216 }
1217
1218 GrMipmapStatus mipmapStatus = (mipmapped == skgpu::Mipmapped::kYes)
1221
1222 auto tex = GrVkTexture::MakeNewTexture(this,
1223 budgeted,
1224 dimensions,
1225 pixelFormat,
1226 numMipLevels,
1227 isProtected,
1228 mipmapStatus,
1229 /*label=*/"VkGpu_CreateCompressedTexture");
1230 if (!tex) {
1231 return nullptr;
1232 }
1233
1235 if (!this->uploadTexDataCompressed(tex->textureImage(), compression, pixelFormat,
1236 dimensions, mipmapped, data, dataSize)) {
1237 return nullptr;
1238 }
1239
1240 return tex;
1241}
SkTextureCompressionType GrBackendFormatToCompressionType(const GrBackendFormat &format)
GrMipmapStatus
static sk_sp< GrVkTexture > MakeNewTexture(GrVkGpu *, skgpu::Budgeted budgeted, SkISize dimensions, VkFormat format, uint32_t mipLevels, GrProtected, GrMipmapStatus, std::string_view label)
static int ComputeLevelCount(int baseWidth, int baseHeight)
Definition SkMipmap.cpp:134

◆ onCreateTexture()

sk_sp< GrTexture > GrVkGpu::onCreateTexture ( SkISize  dimensions,
const GrBackendFormat format,
GrRenderable  renderable,
int  renderTargetSampleCnt,
skgpu::Budgeted  budgeted,
GrProtected  isProtected,
int  mipLevelCount,
uint32_t  levelClearMask,
std::string_view  label 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1137 of file GrVkGpu.cpp.

1145 {
1146 VkFormat pixelFormat;
1149 SkASSERT(mipLevelCount > 0);
1150
1151 GrMipmapStatus mipmapStatus =
1153
1155 if (renderable == GrRenderable::kYes) {
1157 this, budgeted, dimensions, pixelFormat, mipLevelCount, renderTargetSampleCnt,
1158 mipmapStatus, isProtected, label);
1159 } else {
1160 tex = GrVkTexture::MakeNewTexture(this, budgeted, dimensions, pixelFormat,
1161 mipLevelCount, isProtected, mipmapStatus, label);
1162 }
1163
1164 if (!tex) {
1165 return nullptr;
1166 }
1167
1168 if (levelClearMask) {
1169 if (!this->currentCommandBuffer()) {
1170 return nullptr;
1171 }
1173 bool inRange = false;
1174 GrVkImage* texImage = tex->textureImage();
1175 for (uint32_t i = 0; i < texImage->mipLevels(); ++i) {
1176 if (levelClearMask & (1U << i)) {
1177 if (inRange) {
1178 ranges.back().levelCount++;
1179 } else {
1180 auto& range = ranges.push_back();
1181 range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
1182 range.baseArrayLayer = 0;
1183 range.baseMipLevel = i;
1184 range.layerCount = 1;
1185 range.levelCount = 1;
1186 inRange = true;
1187 }
1188 } else if (inRange) {
1189 inRange = false;
1190 }
1191 }
1192 SkASSERT(!ranges.empty());
1193 static constexpr VkClearColorValue kZeroClearColor = {};
1196 this->currentCommandBuffer()->clearColorImage(this, texImage, &kZeroClearColor,
1197 ranges.size(), ranges.begin());
1198 }
1199 return tex;
1200}
uint32_t mipLevels() const
Definition GrVkImage.h:93
static sk_sp< GrVkTextureRenderTarget > MakeNewTextureRenderTarget(GrVkGpu *gpu, skgpu::Budgeted budgeted, SkISize dimensions, VkFormat format, uint32_t mipLevels, int sampleCnt, GrMipmapStatus mipmapStatus, GrProtected isProtected, std::string_view label)
bool empty() const
Definition SkTArray.h:194
int size() const
Definition SkTArray.h:416

◆ onGetOpsRenderPass()

GrOpsRenderPass * GrVkGpu::onGetOpsRenderPass ( GrRenderTarget rt,
bool  useMSAASurface,
GrAttachment stencil,
GrSurfaceOrigin  origin,
const SkIRect bounds,
const GrOpsRenderPass::LoadAndStoreInfo colorInfo,
const GrOpsRenderPass::StencilLoadAndStoreInfo stencilInfo,
const skia_private::TArray< GrSurfaceProxy *, true > &  sampledProxies,
GrXferBarrierFlags  renderPassXferBarriers 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 324 of file GrVkGpu.cpp.

333 {
334 if (!fCachedOpsRenderPass) {
335 fCachedOpsRenderPass = std::make_unique<GrVkOpsRenderPass>(this);
336 }
337
338 // For the given render target and requested render pass features we need to find a compatible
339 // framebuffer to use for the render pass. Technically it is the underlying VkRenderPass that
340 // is compatible, but that is part of the framebuffer that we get here.
341 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
342
343 SkASSERT(!useMSAASurface ||
344 rt->numSamples() > 1 ||
345 (this->vkCaps().supportsDiscardableMSAAForDMSAA() &&
346 vkRT->resolveAttachment() &&
348
349 // Covert the GrXferBarrierFlags into render pass self dependency flags
351 if (renderPassXferBarriers & GrXferBarrierFlags::kBlend) {
353 }
354 if (renderPassXferBarriers & GrXferBarrierFlags::kTexture) {
356 }
357
358 // Figure out if we need a resolve attachment for this render pass. A resolve attachment is
359 // needed if we are using msaa to draw with a discardable msaa attachment. If we are in this
360 // case we also need to update the color load/store ops since we don't want to ever load or
361 // store the msaa color attachment, but may need to for the resolve attachment.
362 GrOpsRenderPass::LoadAndStoreInfo localColorInfo = colorInfo;
363 bool withResolve = false;
366 if (useMSAASurface && this->vkCaps().renderTargetSupportsDiscardableMSAA(vkRT)) {
367 withResolve = true;
368 localColorInfo.fStoreOp = GrStoreOp::kDiscard;
369 if (colorInfo.fLoadOp == GrLoadOp::kLoad) {
371 localColorInfo.fLoadOp = GrLoadOp::kDiscard;
372 } else {
373 resolveInfo.fLoadOp = GrLoadOp::kDiscard;
374 }
375 }
376
377 // Get the framebuffer to use for the render pass
378 sk_sp<GrVkFramebuffer> framebuffer;
379 if (vkRT->wrapsSecondaryCommandBuffer()) {
380 framebuffer = vkRT->externalFramebuffer();
381 } else {
382 auto fb = vkRT->getFramebuffer(withResolve, SkToBool(stencil), selfDepFlags,
383 loadFromResolve);
384 framebuffer = sk_ref_sp(fb);
385 }
386 if (!framebuffer) {
387 return nullptr;
388 }
389
390 if (!fCachedOpsRenderPass->set(rt, std::move(framebuffer), origin, bounds, localColorInfo,
391 stencilInfo, resolveInfo, selfDepFlags, loadFromResolve,
392 sampledProxies)) {
393 return nullptr;
394 }
395 return fCachedOpsRenderPass.get();
396}
sk_sp< T > sk_ref_sp(T *obj)
Definition SkRefCnt.h:381
static constexpr bool SkToBool(const T &x)
Definition SkTo.h:35
int numSamples() const
bool supportsInputAttachmentUsage() const
Definition GrVkImage.h:101
sk_sp< GrVkFramebuffer > externalFramebuffer() const
const GrVkFramebuffer * getFramebuffer(bool withResolve, bool withStencil, SelfDependencyFlags selfDepFlags, LoadFromResolve)

◆ onReadPixels()

bool GrVkGpu::onReadPixels ( GrSurface surface,
SkIRect  rect,
GrColorType  surfaceColorType,
GrColorType  dstColorType,
void *  buffer,
size_t  rowBytes 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 2530 of file GrVkGpu.cpp.

2535 {
2536 if (surface->isProtected()) {
2537 return false;
2538 }
2539
2540 if (!this->currentCommandBuffer()) {
2541 return false;
2542 }
2543
2544 GrVkImage* image = nullptr;
2545 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget());
2546 if (rt) {
2547 // Reading from render targets that wrap a secondary command buffer is not allowed since
2548 // it would require us to know the VkImage, which we don't have, as well as need us to
2549 // stop and start the VkRenderPass which we don't have access to.
2550 if (rt->wrapsSecondaryCommandBuffer()) {
2551 return false;
2552 }
2553 image = rt->nonMSAAAttachment();
2554 } else {
2555 image = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
2556 }
2557
2558 if (!image) {
2559 return false;
2560 }
2561
2562 if (dstColorType == GrColorType::kUnknown ||
2563 dstColorType != this->vkCaps().transferColorType(image->imageFormat(), surfaceColorType)) {
2564 return false;
2565 }
2566
2567 // Change layout of our target so it can be used as copy
2568 image->setImageLayout(this,
2572 false);
2573
2574 size_t bpp = GrColorTypeBytesPerPixel(dstColorType);
2575 if (skgpu::VkFormatBytesPerBlock(image->imageFormat()) != bpp) {
2576 return false;
2577 }
2578 size_t tightRowBytes = bpp*rect.width();
2579
2581 memset(&region, 0, sizeof(VkBufferImageCopy));
2582 VkOffset3D offset = { rect.left(), rect.top(), 0 };
2583 region.imageOffset = offset;
2584 region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
2585
2586 size_t transBufferRowBytes = bpp * region.imageExtent.width;
2587 size_t imageRows = region.imageExtent.height;
2589 sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
2590 transBufferRowBytes * imageRows,
2594
2595 if (!transferBuffer) {
2596 return false;
2597 }
2598
2599 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
2600
2601 // Copy the image to a buffer so we can map it to cpu memory
2602 region.bufferOffset = 0;
2603 region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
2604 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
2605 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
2606
2608 image,
2610 transferBuffer,
2611 1,
2612 &region);
2613
2614 // make sure the copy to buffer has finished
2619 false);
2620
2621 // We need to submit the current command buffer to the Queue and make sure it finishes before
2622 // we can copy the data out of the buffer.
2623 if (!this->submitCommandBuffer(kForce_SyncQueue)) {
2624 return false;
2625 }
2626 void* mappedMemory = transferBuffer->map();
2627 if (!mappedMemory) {
2628 return false;
2629 }
2630
2631 SkRectMemcpy(buffer, rowBytes, mappedMemory, transBufferRowBytes, tightRowBytes, rect.height());
2632
2633 transferBuffer->unmap();
2634 return true;
2635}
static constexpr size_t GrColorTypeBytesPerPixel(GrColorType ct)
static void SkRectMemcpy(void *dst, size_t dstRB, const void *src, size_t srcRB, size_t trimRowBytes, int rowCount)
GrResourceProvider * resourceProvider()
GrDirectContextPriv priv()
GrDirectContext * getContext()
Definition GrGpu.h:67
void addMemoryBarrier(VkAccessFlags srcAccessMask, VkAccessFlags dstAccesMask, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion) const
GrColorType transferColorType(VkFormat, GrColorType surfaceColorType) const
void copyImageToBuffer(const GrVkGpu *gpu, GrVkImage *srcImage, VkImageLayout srcLayout, sk_sp< GrGpuBuffer > dstBuffer, uint32_t copyRegionCount, const VkBufferImageCopy *copyRegions)
GrVkImage * nonMSAAAttachment() const
VkSurfaceKHR surface
Definition main.cc:49
sk_sp< SkImage > image
Definition examples.cpp:29
static const uint8_t buffer[]
ClipOpAndAA opAA SkRegion region
Definition SkRecords.h:238
sk_sp< SkBlender > blender SkRect rect
Definition SkRecords.h:350
static constexpr size_t VkFormatBytesPerBlock(VkFormat vkFormat)
Point offset
@ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
@ VK_ACCESS_HOST_READ_BIT
@ VK_ACCESS_TRANSFER_READ_BIT
@ VK_PIPELINE_STAGE_HOST_BIT

◆ onRegenerateMipMapLevels()

bool GrVkGpu::onRegenerateMipMapLevels ( GrTexture tex)
overridevirtual

Implements GrGpu.

Definition at line 1524 of file GrVkGpu.cpp.

1524 {
1525 if (!this->currentCommandBuffer()) {
1526 return false;
1527 }
1528 auto* vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
1529 // don't do anything for linearly tiled textures (can't have mipmaps)
1530 if (vkTex->isLinearTiled()) {
1531 SkDebugf("Trying to create mipmap for linear tiled texture");
1532 return false;
1533 }
1535
1536 // determine if we can blit to and from this format
1537 const GrVkCaps& caps = this->vkCaps();
1538 if (!caps.formatCanBeDstofBlit(vkTex->imageFormat(), false) ||
1539 !caps.formatCanBeSrcofBlit(vkTex->imageFormat(), false) ||
1540 !caps.mipmapSupport()) {
1541 return false;
1542 }
1543
1544 int width = tex->width();
1545 int height = tex->height();
1546 VkImageBlit blitRegion;
1547 memset(&blitRegion, 0, sizeof(VkImageBlit));
1548
1549 // SkMipmap doesn't include the base level in the level count so we have to add 1
1550 uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
1551 SkASSERT(levelCount == vkTex->mipLevels());
1552
1553 // change layout of the layers so we can write to them.
1556
1557 // setup memory barrier
1558 SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat()));
1559 VkImageMemoryBarrier imageMemoryBarrier = {
1561 nullptr, // pNext
1562 VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
1563 VK_ACCESS_TRANSFER_READ_BIT, // dstAccessMask
1566 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
1567 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
1568 vkTex->image(), // image
1569 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
1570 };
1571
1572 // Blit the miplevels
1573 uint32_t mipLevel = 1;
1574 while (mipLevel < levelCount) {
1575 int prevWidth = width;
1576 int prevHeight = height;
1577 width = std::max(1, width / 2);
1578 height = std::max(1, height / 2);
1579
1580 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1582 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1583
1584 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
1585 blitRegion.srcOffsets[0] = { 0, 0, 0 };
1586 blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
1587 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
1588 blitRegion.dstOffsets[0] = { 0, 0, 0 };
1589 blitRegion.dstOffsets[1] = { width, height, 1 };
1590 this->currentCommandBuffer()->blitImage(this,
1591 vkTex->resource(),
1592 vkTex->image(),
1594 vkTex->resource(),
1595 vkTex->image(),
1597 1,
1598 &blitRegion,
1600 ++mipLevel;
1601 }
1602 if (levelCount > 1) {
1603 // This barrier logically is not needed, but it changes the final level to the same layout
1604 // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the
1605 // layouts and future layout changes easier. The alternative here would be to track layout
1606 // and memory accesses per layer which doesn't seem work it.
1607 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1609 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1610 vkTex->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1611 }
1612 return true;
1613}
bool GrVkFormatIsSupported(VkFormat format)
Definition GrVkUtil.cpp:21
bool mipmapSupport() const
Definition GrCaps.h:72
int height() const
Definition GrSurface.h:37
int width() const
Definition GrSurface.h:32
GrTextureType textureType() const
Definition GrTexture.h:55
bool formatCanBeDstofBlit(VkFormat format, bool linearTiled) const
Definition GrVkCaps.h:71
void addImageMemoryBarrier(const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkImageMemoryBarrier *barrier) const
Definition GrVkGpu.cpp:2180
void blitImage(const GrVkGpu *gpu, const GrManagedResource *srcResource, VkImage srcImage, VkImageLayout srcLayout, const GrManagedResource *dstResource, VkImage dstImage, VkImageLayout dstLayout, uint32_t blitRegionCount, const VkImageBlit *blitRegions, VkFilter filter)
int32_t height
int32_t width
VkOffset3D srcOffsets[2]
VkImageSubresourceLayers srcSubresource
VkOffset3D dstOffsets[2]
VkImageSubresourceLayers dstSubresource
VkImageSubresourceRange subresourceRange
@ VK_FILTER_LINEAR
#define VK_QUEUE_FAMILY_IGNORED
@ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER

◆ onReportSubmitHistograms()

void GrVkGpu::onReportSubmitHistograms ( )
overrideprivatevirtual

Reimplemented from GrGpu.

Definition at line 2271 of file GrVkGpu.cpp.

2271 {
2272#if SK_HISTOGRAMS_ENABLED
2273 uint64_t allocatedMemory = 0, usedMemory = 0;
2274 std::tie(allocatedMemory, usedMemory) = fMemoryAllocator->totalAllocatedAndUsedMemory();
2275 SkASSERT(usedMemory <= allocatedMemory);
2276 if (allocatedMemory > 0) {
2277 SK_HISTOGRAM_PERCENTAGE("VulkanMemoryAllocator.PercentUsed",
2278 (usedMemory * 100) / allocatedMemory);
2279 }
2280 // allocatedMemory is in bytes and need to be reported it in kilobytes. SK_HISTOGRAM_MEMORY_KB
2281 // supports samples up to around 500MB which should support the amounts of memory we allocate.
2282 SK_HISTOGRAM_MEMORY_KB("VulkanMemoryAllocator.AmountAllocated", allocatedMemory >> 10);
2283#endif // SK_HISTOGRAMS_ENABLED
2284}
#define SK_HISTOGRAM_MEMORY_KB(name, sample)
Definition SkTypes.h:119
#define SK_HISTOGRAM_PERCENTAGE(name, percent_as_int)
Definition SkTypes.h:122
virtual std::pair< uint64_t, uint64_t > totalAllocatedAndUsedMemory() const =0

◆ onResolveRenderTarget()

void GrVkGpu::onResolveRenderTarget ( GrRenderTarget target,
const SkIRect resolveRect 
)
overridevirtual

Implements GrGpu.

Definition at line 822 of file GrVkGpu.cpp.

822 {
823 SkASSERT(target->numSamples() > 1);
824 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
826
827 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(rt)) {
828 // We would have resolved the RT during the render pass;
829 return;
830 }
831
832 this->resolveImage(target, rt, resolveRect,
833 SkIPoint::Make(resolveRect.x(), resolveRect.y()));
834}
const GrVkImageView * resolveAttachmentView() const
const GrVkImageView * colorAttachmentView() const
static constexpr SkIPoint Make(int32_t x, int32_t y)
constexpr int32_t x() const
Definition SkRect.h:141
constexpr int32_t y() const
Definition SkRect.h:148

◆ onSubmitToGpu()

bool GrVkGpu::onSubmitToGpu ( GrSyncCpu  sync)
overrideprivatevirtual

Implements GrGpu.

Definition at line 2255 of file GrVkGpu.cpp.

2255 {
2256 if (sync == GrSyncCpu::kYes) {
2257 return this->submitCommandBuffer(kForce_SyncQueue);
2258 } else {
2259 return this->submitCommandBuffer(kSkip_SyncQueue);
2260 }
2261}

◆ onTransferFromBufferToBuffer()

bool GrVkGpu::onTransferFromBufferToBuffer ( sk_sp< GrGpuBuffer src,
size_t  srcOffset,
sk_sp< GrGpuBuffer dst,
size_t  dstOffset,
size_t  size 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 608 of file GrVkGpu.cpp.

612 {
613 if (!this->currentCommandBuffer()) {
614 return false;
615 }
616
617 VkBufferCopy copyRegion;
618 copyRegion.srcOffset = srcOffset;
619 copyRegion.dstOffset = dstOffset;
620 copyRegion.size = size;
621
623 static_cast<GrVkBuffer*>(dst.get()),
624 dstOffset,
625 size,
626 /*after=*/false);
627 this->currentCommandBuffer()->copyBuffer(this, std::move(src), dst, 1, &copyRegion);
629 static_cast<GrVkBuffer*>(dst.get()),
630 dstOffset,
631 size,
632 /*after=*/true);
633
634 return true;
635}
static void add_transfer_dst_buffer_mem_barrier(GrVkGpu *gpu, GrVkBuffer *dst, size_t offset, size_t size, bool after)
Definition GrVkGpu.cpp:566
void copyBuffer(GrVkGpu *gpu, sk_sp< GrGpuBuffer > srcBuffer, sk_sp< GrGpuBuffer > dstBuffer, uint32_t regionCount, const VkBufferCopy *regions)
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition switches.h:259
VkDeviceSize dstOffset
VkDeviceSize size
VkDeviceSize srcOffset

◆ onTransferPixelsFrom()

bool GrVkGpu::onTransferPixelsFrom ( GrSurface surface,
SkIRect  rect,
GrColorType  surfaceColorType,
GrColorType  bufferColorType,
sk_sp< GrGpuBuffer transferBuffer,
size_t  offset 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 710 of file GrVkGpu.cpp.

715 {
716 if (!this->currentCommandBuffer()) {
717 return false;
718 }
719 SkASSERT(surface);
720 SkASSERT(transferBuffer);
721 if (fProtectedContext == GrProtected::kYes) {
722 return false;
723 }
724
725 GrVkImage* srcImage;
726 if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) {
727 // Reading from render targets that wrap a secondary command buffer is not allowed since
728 // it would require us to know the VkImage, which we don't have, as well as need us to
729 // stop and start the VkRenderPass which we don't have access to.
730 if (rt->wrapsSecondaryCommandBuffer()) {
731 return false;
732 }
733 if (!rt->nonMSAAAttachment()) {
734 return false;
735 }
736 srcImage = rt->nonMSAAAttachment();
737 } else {
738 SkASSERT(surface->asTexture());
739 srcImage = static_cast<GrVkTexture*>(surface->asTexture())->textureImage();
740 }
741
742 VkFormat format = srcImage->imageFormat();
743 if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
744 return false;
745 }
747
748 // Set up copy region
750 memset(&region, 0, sizeof(VkBufferImageCopy));
751 region.bufferOffset = offset;
752 region.bufferRowLength = rect.width();
753 region.bufferImageHeight = 0;
754 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
755 region.imageOffset = {rect.left(), rect.top(), 0};
756 region.imageExtent = {(uint32_t)rect.width(), (uint32_t)rect.height(), 1};
757
758 srcImage->setImageLayout(this,
762 false);
763
764 this->currentCommandBuffer()->copyImageToBuffer(this, srcImage,
766 transferBuffer, 1, &region);
767
768 GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
769 // Make sure the copy to buffer has finished.
774 false);
775 return true;
776}

◆ onTransferPixelsTo()

bool GrVkGpu::onTransferPixelsTo ( GrTexture texture,
SkIRect  rect,
GrColorType  textureColorType,
GrColorType  bufferColorType,
sk_sp< GrGpuBuffer transferBuffer,
size_t  offset,
size_t  rowBytes 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 637 of file GrVkGpu.cpp.

643 {
644 if (!this->currentCommandBuffer()) {
645 return false;
646 }
647
648 size_t bpp = GrColorTypeBytesPerPixel(bufferColorType);
649 if (GrBackendFormatBytesPerPixel(texture->backendFormat()) != bpp) {
650 return false;
651 }
652
653 // Vulkan only supports offsets that are both 4-byte aligned and aligned to a pixel.
654 if ((bufferOffset & 0x3) || (bufferOffset % bpp)) {
655 return false;
656 }
657 GrVkTexture* tex = static_cast<GrVkTexture*>(texture);
658 if (!tex) {
659 return false;
660 }
661 GrVkImage* vkImage = tex->textureImage();
662 VkFormat format = vkImage->imageFormat();
663
664 // Can't transfer compressed data
666
667 if (!transferBuffer) {
668 return false;
669 }
670
671 if (bufferColorType != this->vkCaps().transferColorType(format, surfaceColorType)) {
672 return false;
673 }
675
676 SkASSERT(SkIRect::MakeSize(texture->dimensions()).contains(rect));
677
678 // Set up copy region
680 memset(&region, 0, sizeof(VkBufferImageCopy));
681 region.bufferOffset = bufferOffset;
682 region.bufferRowLength = (uint32_t)(rowBytes/bpp);
683 region.bufferImageHeight = 0;
684 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
685 region.imageOffset = { rect.left(), rect.top(), 0 };
686 region.imageExtent = { (uint32_t)rect.width(), (uint32_t)rect.height(), 1 };
687
688 // Change layout of our target so it can be copied to
689 vkImage->setImageLayout(this,
693 false);
694
695 const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
696
697 // Copy the buffer to the image.
699 vkBuffer->vkBuffer(),
700 vkImage,
702 1,
703 &region);
704 this->currentCommandBuffer()->addGrBuffer(std::move(transferBuffer));
705
706 tex->markMipmapsDirty();
707 return true;
708}
size_t GrBackendFormatBytesPerPixel(const GrBackendFormat &format)
void markMipmapsDirty()
Definition GrTexture.cpp:25
VkBuffer vkBuffer() const
Definition GrVkBuffer.h:24
void addGrBuffer(sk_sp< const GrBuffer > buffer)
void copyBufferToImage(const GrVkGpu *gpu, VkBuffer srcBuffer, GrVkImage *dstImage, VkImageLayout dstLayout, uint32_t copyRegionCount, const VkBufferImageCopy *copyRegions)
GrVkImage * textureImage() const
Definition GrVkTexture.h:50
static constexpr SkIRect MakeSize(const SkISize &size)
Definition SkRect.h:66
bool contains(int32_t x, int32_t y) const
Definition SkRect.h:463

◆ onUpdateCompressedBackendTexture()

bool GrVkGpu::onUpdateCompressedBackendTexture ( const GrBackendTexture backendTexture,
sk_sp< skgpu::RefCntedCallback finishedCallback,
const void *  data,
size_t  length 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1835 of file GrVkGpu.cpp.

1838 {
1841
1842 sk_sp<skgpu::MutableTextureState> mutableState = backendTexture.getMutableState();
1843 SkASSERT(mutableState);
1845 backendTexture.dimensions(),
1849 info,
1850 std::move(mutableState));
1851 if (!texture) {
1852 return false;
1853 }
1854
1856 if (!cmdBuffer) {
1857 return false;
1858 }
1859 GrVkImage* image = texture->textureImage();
1860 image->setImageLayout(this,
1864 false);
1865
1866 SkTextureCompressionType compression =
1868
1870 TArray<size_t> individualMipOffsets;
1872
1873 fill_in_compressed_regions(&fStagingBufferManager,
1874 &regions,
1875 &individualMipOffsets,
1876 &slice,
1877 compression,
1878 info.fFormat,
1879 backendTexture.dimensions(),
1880 backendTexture.fMipmapped);
1881
1882 if (!slice.fBuffer) {
1883 return false;
1884 }
1885
1886 memcpy(slice.fOffsetMapPtr, data, size);
1887
1888 cmdBuffer->addGrSurface(texture);
1889 // Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
1890 // because we don't need the command buffer to ref the buffer here. The reason being is that
1891 // the buffer is coming from the staging manager and the staging manager will make sure the
1892 // command buffer has a ref on the buffer. This avoids having to add and remove a ref for
1893 // every upload in the frame.
1894 cmdBuffer->copyBufferToImage(this,
1895 static_cast<GrVkBuffer*>(slice.fBuffer)->vkBuffer(),
1896 image,
1897 image->currentLayout(),
1898 regions.size(),
1899 regions.begin());
1900
1901 // Change image layout to shader read since if we use this texture as a borrowed
1902 // texture within Ganesh we require that its layout be set to that
1903 image->setImageLayout(this,
1907 false);
1908
1909 if (finishedCallback) {
1910 this->addFinishedCallback(std::move(finishedCallback));
1911 }
1912 return true;
1913}
static size_t fill_in_compressed_regions(GrStagingBufferManager *stagingBufferManager, TArray< VkBufferImageCopy > *regions, TArray< size_t > *individualMipOffsets, GrStagingBufferManager::Slice *slice, SkTextureCompressionType compression, VkFormat vkFormat, SkISize dimensions, skgpu::Mipmapped mipmapped)
Definition GrVkGpu.cpp:897
GrBackendFormat getBackendFormat() const
void addGrSurface(sk_sp< const GrSurface > surface)

◆ onWrapBackendRenderTarget()

sk_sp< GrRenderTarget > GrVkGpu::onWrapBackendRenderTarget ( const GrBackendRenderTarget backendRT)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1462 of file GrVkGpu.cpp.

1462 {
1464 if (!GrBackendRenderTargets::GetVkImageInfo(backendRT, &info)) {
1465 return nullptr;
1466 }
1467
1468 if (!check_image_info(this->vkCaps(), info, false, this->queueIndex())) {
1469 return nullptr;
1470 }
1471
1472 // We will always render directly to this VkImage.
1473 static bool kResolveOnly = false;
1474 if (!check_rt_image_info(this->vkCaps(), info, kResolveOnly)) {
1475 return nullptr;
1476 }
1477
1478 if (backendRT.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1479 return nullptr;
1480 }
1481
1482 sk_sp<skgpu::MutableTextureState> mutableState = backendRT.getMutableState();
1483 SkASSERT(mutableState);
1484
1486 this, backendRT.dimensions(), backendRT.sampleCnt(), info, std::move(mutableState));
1487
1488 // We don't allow the client to supply a premade stencil buffer. We always create one if needed.
1489 SkASSERT(!backendRT.stencilBits());
1490 if (tgt) {
1491 SkASSERT(tgt->canAttemptStencilAttachment(tgt->numSamples() > 1));
1492 }
1493
1494 return tgt;
1495}
static bool check_image_info(const GrVkCaps &caps, const GrVkImageInfo &info, bool needsAllocation, uint32_t graphicsQueueIndex)
Definition GrVkGpu.cpp:1291
static bool check_rt_image_info(const GrVkCaps &caps, const GrVkImageInfo &info, bool resolveOnly)
Definition GrVkGpu.cpp:1379
SkISize dimensions() const
uint32_t queueIndex() const
Definition GrVkGpu.h:73
static sk_sp< GrVkRenderTarget > MakeWrappedRenderTarget(GrVkGpu *, SkISize, int sampleCnt, const GrVkImageInfo &, sk_sp< skgpu::MutableTextureState >)
SK_API bool GetVkImageInfo(const GrBackendRenderTarget &, GrVkImageInfo *)

◆ onWrapBackendTexture()

sk_sp< GrTexture > GrVkGpu::onWrapBackendTexture ( const GrBackendTexture backendTex,
GrWrapOwnership  ownership,
GrWrapCacheable  cacheable,
GrIOType  ioType 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1389 of file GrVkGpu.cpp.

1392 {
1393 GrVkImageInfo imageInfo;
1394 if (!GrBackendTextures::GetVkImageInfo(backendTex, &imageInfo)) {
1395 return nullptr;
1396 }
1397
1398 if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1399 this->queueIndex())) {
1400 return nullptr;
1401 }
1402
1403 if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1404 return nullptr;
1405 }
1406
1407 if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1408 return nullptr;
1409 }
1410
1411 sk_sp<skgpu::MutableTextureState> mutableState = backendTex.getMutableState();
1412 SkASSERT(mutableState);
1413 return GrVkTexture::MakeWrappedTexture(this, backendTex.dimensions(), ownership, cacheable,
1414 ioType, imageInfo, std::move(mutableState));
1415}
@ kAdopt_GrWrapOwnership
Definition GrTypesPriv.h:81
static bool check_tex_image_info(const GrVkCaps &caps, const GrVkImageInfo &info)
Definition GrVkGpu.cpp:1337

◆ onWrapCompressedBackendTexture()

sk_sp< GrTexture > GrVkGpu::onWrapCompressedBackendTexture ( const GrBackendTexture beTex,
GrWrapOwnership  ownership,
GrWrapCacheable  cacheable 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1417 of file GrVkGpu.cpp.

1419 {
1420 return this->onWrapBackendTexture(beTex, ownership, cacheable, kRead_GrIOType);
1421}
@ kRead_GrIOType
sk_sp< GrTexture > onWrapBackendTexture(const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable, GrIOType) override
Definition GrVkGpu.cpp:1389

◆ onWrapRenderableBackendTexture()

sk_sp< GrTexture > GrVkGpu::onWrapRenderableBackendTexture ( const GrBackendTexture backendTex,
int  sampleCnt,
GrWrapOwnership  ownership,
GrWrapCacheable  cacheable 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 1423 of file GrVkGpu.cpp.

1426 {
1427 GrVkImageInfo imageInfo;
1428 if (!GrBackendTextures::GetVkImageInfo(backendTex, &imageInfo)) {
1429 return nullptr;
1430 }
1431
1432 if (!check_image_info(this->vkCaps(), imageInfo, kAdopt_GrWrapOwnership == ownership,
1433 this->queueIndex())) {
1434 return nullptr;
1435 }
1436
1437 if (!check_tex_image_info(this->vkCaps(), imageInfo)) {
1438 return nullptr;
1439 }
1440 // If sampleCnt is > 1 we will create an intermediate MSAA VkImage and then resolve into
1441 // the wrapped VkImage.
1442 bool resolveOnly = sampleCnt > 1;
1443 if (!check_rt_image_info(this->vkCaps(), imageInfo, resolveOnly)) {
1444 return nullptr;
1445 }
1446
1447 if (backendTex.isProtected() && (fProtectedContext == GrProtected::kNo)) {
1448 return nullptr;
1449 }
1450
1451 sampleCnt = this->vkCaps().getRenderTargetSampleCount(sampleCnt, imageInfo.fFormat);
1452
1453 sk_sp<skgpu::MutableTextureState> mutableState = backendTex.getMutableState();
1454 SkASSERT(mutableState);
1455
1457 sampleCnt, ownership, cacheable,
1458 imageInfo,
1459 std::move(mutableState));
1460}
int getRenderTargetSampleCount(int requestedCount, const GrBackendFormat &) const override
static sk_sp< GrVkTextureRenderTarget > MakeWrappedTextureRenderTarget(GrVkGpu *, SkISize dimensions, int sampleCnt, GrWrapOwnership, GrWrapCacheable, const GrVkImageInfo &, sk_sp< skgpu::MutableTextureState >)
VkFormat fFormat
Definition GrVkTypes.h:30

◆ onWrapVulkanSecondaryCBAsRenderTarget()

sk_sp< GrRenderTarget > GrVkGpu::onWrapVulkanSecondaryCBAsRenderTarget ( const SkImageInfo imageInfo,
const GrVkDrawableInfo vkInfo 
)
overrideprivatevirtual

Reimplemented from GrGpu.

Definition at line 1497 of file GrVkGpu.cpp.

1498 {
1499 int maxSize = this->caps()->maxTextureSize();
1500 if (imageInfo.width() > maxSize || imageInfo.height() > maxSize) {
1501 return nullptr;
1502 }
1503
1504 GrBackendFormat backendFormat = GrBackendFormats::MakeVk(vkInfo.fFormat);
1505 if (!backendFormat.isValid()) {
1506 return nullptr;
1507 }
1508 int sampleCnt = this->vkCaps().getRenderTargetSampleCount(1, vkInfo.fFormat);
1509 if (!sampleCnt) {
1510 return nullptr;
1511 }
1512
1513 return GrVkRenderTarget::MakeSecondaryCBRenderTarget(this, imageInfo.dimensions(), vkInfo);
1514}
bool isValid() const
int maxTextureSize() const
Definition GrCaps.h:229
static sk_sp< GrVkRenderTarget > MakeSecondaryCBRenderTarget(GrVkGpu *, SkISize, const GrVkDrawableInfo &vkInfo)
VkFormat fFormat
Definition GrVkTypes.h:88
SkISize dimensions() const
int width() const
int height() const

◆ onWritePixels()

bool GrVkGpu::onWritePixels ( GrSurface surface,
SkIRect  rect,
GrColorType  surfaceColorType,
GrColorType  srcColorType,
const GrMipLevel  texels[],
int  mipLevelCount,
bool  prepForTexSampling 
)
overrideprivatevirtual

Implements GrGpu.

Definition at line 494 of file GrVkGpu.cpp.

500 {
501 GrVkTexture* texture = static_cast<GrVkTexture*>(surface->asTexture());
502 if (!texture) {
503 return false;
504 }
505 GrVkImage* texImage = texture->textureImage();
506
507 // Make sure we have at least the base level
508 if (!mipLevelCount || !texels[0].fPixels) {
509 return false;
510 }
511
513 bool success = false;
514 bool linearTiling = texImage->isLinearTiled();
515 if (linearTiling) {
516 if (mipLevelCount > 1) {
517 SkDebugf("Can't upload mipmap data to linear tiled texture");
518 return false;
519 }
521 // Need to change the layout to general in order to perform a host write
522 texImage->setImageLayout(this,
526 false);
527 if (!this->submitCommandBuffer(kForce_SyncQueue)) {
528 return false;
529 }
530 }
531 success = this->uploadTexDataLinear(texImage,
532 rect,
533 srcColorType,
534 texels[0].fPixels,
535 texels[0].fRowBytes);
536 } else {
537 SkASSERT(mipLevelCount <= (int)texImage->mipLevels());
538 success = this->uploadTexDataOptimal(texImage,
539 rect,
540 srcColorType,
541 texels,
542 mipLevelCount);
543 if (1 == mipLevelCount) {
544 texture->markMipmapsDirty();
545 }
546 }
547
548 if (prepForTexSampling) {
549 texImage->setImageLayout(this,
553 false);
554 }
555
556 return success;
557}
VkImageLayout currentLayout() const
Definition GrVkImage.h:132
@ VK_IMAGE_LAYOUT_PREINITIALIZED
@ VK_IMAGE_LAYOUT_GENERAL
@ VK_ACCESS_HOST_WRITE_BIT

◆ physicalDevice()

VkPhysicalDevice GrVkGpu::physicalDevice ( ) const
inline

Definition at line 70 of file GrVkGpu.h.

70{ return fPhysicalDevice; }

◆ physicalDeviceMemoryProperties()

const VkPhysicalDeviceMemoryProperties & GrVkGpu::physicalDeviceMemoryProperties ( ) const
inline

Definition at line 78 of file GrVkGpu.h.

78 {
79 return fPhysDevMemProps;
80 }

◆ physicalDeviceProperties()

const VkPhysicalDeviceProperties & GrVkGpu::physicalDeviceProperties ( ) const
inline

Definition at line 75 of file GrVkGpu.h.

75 {
76 return fPhysDevProps;
77 }

◆ pipelineBuilder()

GrThreadSafePipelineBuilder * GrVkGpu::pipelineBuilder ( )
overridevirtual

Implements GrGpu.

Definition at line 314 of file GrVkGpu.cpp.

314 {
315 return fResourceProvider.pipelineStateCache();
316}
GrThreadSafePipelineBuilder * pipelineStateCache()

◆ prepareSurfacesForBackendAccessAndStateUpdates()

void GrVkGpu::prepareSurfacesForBackendAccessAndStateUpdates ( SkSpan< GrSurfaceProxy * >  proxies,
SkSurfaces::BackendSurfaceAccess  access,
const skgpu::MutableTextureState newState 
)
overrideprivatevirtual

Reimplemented from GrGpu.

Definition at line 2202 of file GrVkGpu.cpp.

2205 {
2206 // Submit the current command buffer to the Queue. Whether we inserted semaphores or not does
2207 // not effect what we do here.
2208 if (!proxies.empty() && (access == SkSurfaces::BackendSurfaceAccess::kPresent || newState)) {
2209 // We currently don't support passing in new surface state for multiple proxies here. The
2210 // only time we have multiple proxies is if we are flushing a yuv SkImage which won't have
2211 // state updates anyways. Additionally if we have a newState than we must not have any
2212 // BackendSurfaceAccess.
2213 SkASSERT(!newState || proxies.size() == 1);
2216 for (GrSurfaceProxy* proxy : proxies) {
2217 SkASSERT(proxy->isInstantiated());
2218 if (GrTexture* tex = proxy->peekTexture()) {
2219 image = static_cast<GrVkTexture*>(tex)->textureImage();
2220 } else {
2221 GrRenderTarget* rt = proxy->peekRenderTarget();
2222 SkASSERT(rt);
2223 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2224 image = vkRT->externalAttachment();
2225 }
2226 if (newState) {
2227 VkImageLayout newLayout =
2229 uint32_t newIndex =
2231 set_layout_and_queue_from_mutable_state(this, image, newLayout, newIndex);
2232 } else {
2234 image->prepareForPresent(this);
2235 }
2236 }
2237 }
2238}
void set_layout_and_queue_from_mutable_state(GrVkGpu *gpu, GrVkImage *image, VkImageLayout newLayout, uint32_t newQueueFamilyIndex)
Definition GrVkGpu.cpp:1915
GrVkImage * externalAttachment() const
constexpr bool empty() const
Definition SkSpan_impl.h:96
constexpr size_t size() const
Definition SkSpan_impl.h:95
@ kPresent
back-end surface will be used for presenting to screen
@ kNoAccess
back-end surface will not be used by client
SK_API uint32_t GetVkQueueFamilyIndex(const MutableTextureState &state)
SK_API VkImageLayout GetVkImageLayout(const MutableTextureState &state)
VkImageLayout

◆ prepareTextureForCrossContextUsage()

std::unique_ptr< GrSemaphore > GrVkGpu::prepareTextureForCrossContextUsage ( GrTexture )
overridevirtual

Put this texture in a safe and known state for use across multiple contexts. Depending on the backend, this may return a GrSemaphore. If so, other contexts should wait on that semaphore before using this texture.

Implements GrGpu.

Definition at line 2748 of file GrVkGpu.cpp.

2748 {
2750 GrVkImage* vkTexture = static_cast<GrVkTexture*>(texture)->textureImage();
2751 vkTexture->setImageLayout(this,
2755 false);
2756 // TODO: should we have a way to notify the caller that this has failed? Currently if the submit
2757 // fails (caused by DEVICE_LOST) this will just cause us to fail the next use of the gpu.
2758 // Eventually we will abandon the whole GPU if this fails.
2760
2761 // The image layout change serves as a barrier, so no semaphore is needed.
2762 // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is
2763 // thread safe so that only the first thread that tries to use the semaphore actually submits
2764 // it. This additionally would also require thread safety in command buffer submissions to
2765 // queues in general.
2766 return nullptr;
2767}
bool submitToGpu(GrSyncCpu sync)
Definition GrGpu.cpp:748

◆ protectedContext()

bool GrVkGpu::protectedContext ( ) const
inline

Definition at line 81 of file GrVkGpu.h.

81{ return fProtectedContext == skgpu::Protected::kYes; }

◆ queue()

VkQueue GrVkGpu::queue ( ) const
inline

Definition at line 72 of file GrVkGpu.h.

72{ return fQueue; }

◆ queueIndex()

uint32_t GrVkGpu::queueIndex ( ) const
inline

Definition at line 73 of file GrVkGpu.h.

73{ return fQueueIndex; }

◆ refPipelineBuilder()

sk_sp< GrThreadSafePipelineBuilder > GrVkGpu::refPipelineBuilder ( )
overridevirtual

Implements GrGpu.

Definition at line 318 of file GrVkGpu.cpp.

318 {
319 return fResourceProvider.refPipelineStateCache();
320}
sk_sp< GrThreadSafePipelineBuilder > refPipelineStateCache()

◆ releaseUnlockedBackendObjects()

void GrVkGpu::releaseUnlockedBackendObjects ( )
inlineoverridevirtual

Frees any backend specific objects that are not currently in use by the GPU. This is called when the client is trying to free up as much GPU memory as possible. We will not release resources connected to programs/pipelines since the cost to recreate those is significantly higher that other resources.

Reimplemented from GrGpu.

Definition at line 53 of file GrVkGpu.h.

53 {
54 fResourceProvider.releaseUnlockedBackendObjects();
55 }

◆ resourceProvider()

GrVkResourceProvider & GrVkGpu::resourceProvider ( )
inline

Definition at line 83 of file GrVkGpu.h.

83{ return fResourceProvider; }

◆ setBackendRenderTargetState()

bool GrVkGpu::setBackendRenderTargetState ( const GrBackendRenderTarget backendRenderTarget,
const skgpu::MutableTextureState newState,
skgpu::MutableTextureState previousState,
sk_sp< skgpu::RefCntedCallback finishedCallback 
)
overridevirtual

Reimplemented from GrGpu.

Definition at line 1988 of file GrVkGpu.cpp.

1991 {
1994 sk_sp<skgpu::MutableTextureState> currentState = backendRenderTarget.getMutableState();
1995 SkASSERT(currentState);
1997 return this->setBackendSurfaceState(info, std::move(currentState),
1998 backendRenderTarget.dimensions(),
2001 previousState, std::move(finishedCallback));
2002}

◆ setBackendTextureState()

bool GrVkGpu::setBackendTextureState ( const GrBackendTexture backendTeture,
const skgpu::MutableTextureState newState,
skgpu::MutableTextureState previousState,
sk_sp< skgpu::RefCntedCallback finishedCallback 
)
overridevirtual

Reimplemented from GrGpu.

Definition at line 1972 of file GrVkGpu.cpp.

1975 {
1978 sk_sp<skgpu::MutableTextureState> currentState = backendTeture.getMutableState();
1979 SkASSERT(currentState);
1980 SkASSERT(newState.isValid() && newState.backend() == skgpu::BackendApi::kVulkan);
1981 return this->setBackendSurfaceState(info, std::move(currentState), backendTeture.dimensions(),
1984 previousState,
1985 std::move(finishedCallback));
1986}

◆ stagingBufferManager()

GrStagingBufferManager * GrVkGpu::stagingBufferManager ( )
inlineoverridevirtual

Reimplemented from GrGpu.

Definition at line 63 of file GrVkGpu.h.

63{ return &fStagingBufferManager; }

◆ storeVkPipelineCacheData()

void GrVkGpu::storeVkPipelineCacheData ( )
overridevirtual

Reimplemented from GrGpu.

Definition at line 2773 of file GrVkGpu.cpp.

2773 {
2774 if (this->getContext()->priv().getPersistentCache()) {
2776 }
2777}
FlPixelBufferTexturePrivate * priv

◆ submit()

void GrVkGpu::submit ( GrOpsRenderPass renderPass)
overridevirtual

Implements GrGpu.

Definition at line 2706 of file GrVkGpu.cpp.

2706 {
2707 SkASSERT(fCachedOpsRenderPass.get() == renderPass);
2708
2709 fCachedOpsRenderPass->submit();
2710 fCachedOpsRenderPass->reset();
2711}

◆ submitSecondaryCommandBuffer()

void GrVkGpu::submitSecondaryCommandBuffer ( std::unique_ptr< GrVkSecondaryCommandBuffer buffer)

Definition at line 2699 of file GrVkGpu.cpp.

2699 {
2700 if (!this->currentCommandBuffer()) {
2701 return;
2702 }
2703 this->currentCommandBuffer()->executeCommands(this, std::move(buffer));
2704}
void executeCommands(const GrVkGpu *gpu, std::unique_ptr< GrVkSecondaryCommandBuffer > secondaryBuffer)

◆ takeOwnershipOfBuffer()

void GrVkGpu::takeOwnershipOfBuffer ( sk_sp< GrGpuBuffer buffer)
overridevirtual

Reimplemented from GrGpu.

Definition at line 2251 of file GrVkGpu.cpp.

2251 {
2252 this->currentCommandBuffer()->addGrBuffer(std::move(buffer));
2253}

◆ updateBuffer()

bool GrVkGpu::updateBuffer ( sk_sp< GrVkBuffer buffer,
const void *  src,
VkDeviceSize  offset,
VkDeviceSize  size 
)

Definition at line 1245 of file GrVkGpu.cpp.

1246 {
1247 if (!this->currentCommandBuffer()) {
1248 return false;
1249 }
1251 static_cast<GrVkBuffer*>(buffer.get()),
1252 offset,
1253 size,
1254 /*after=*/false);
1255 this->currentCommandBuffer()->updateBuffer(this, buffer, offset, size, src);
1257 static_cast<GrVkBuffer*>(buffer.get()),
1258 offset,
1259 size,
1260 /*after=*/true);
1261
1262 return true;
1263}
void updateBuffer(GrVkGpu *gpu, sk_sp< GrVkBuffer > dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *data)

◆ vkCaps()

const GrVkCaps & GrVkGpu::vkCaps ( ) const
inline

Definition at line 61 of file GrVkGpu.h.

61{ return *fVkCaps; }

◆ vkInterface()

const skgpu::VulkanInterface * GrVkGpu::vkInterface ( ) const
inline

Definition at line 60 of file GrVkGpu.h.

60{ return fInterface.get(); }

◆ waitSemaphore()

void GrVkGpu::waitSemaphore ( GrSemaphore semaphore)
overridevirtual

Implements GrGpu.

Definition at line 2736 of file GrVkGpu.cpp.

2736 {
2737 SkASSERT(semaphore);
2738
2739 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2740
2741 GrVkSemaphore::Resource* resource = vkSem->getResource();
2742 if (resource->shouldWait()) {
2743 resource->ref();
2744 fSemaphoresToWaitOn.push_back(resource);
2745 }
2746}

◆ wrapBackendSemaphore()

std::unique_ptr< GrSemaphore > GrVkGpu::wrapBackendSemaphore ( const GrBackendSemaphore semaphore,
GrSemaphoreWrapType  wrapType,
GrWrapOwnership  ownership 
)
overridevirtual

Implements GrGpu.

Definition at line 2717 of file GrVkGpu.cpp.

2719 {
2721 wrapType, ownership);
2722}
static std::unique_ptr< GrVkSemaphore > MakeWrapped(GrVkGpu *, VkSemaphore, GrSemaphoreWrapType, GrWrapOwnership)
SK_API VkSemaphore GetVkSemaphore(const GrBackendSemaphore &)

◆ xferBarrier()

void GrVkGpu::xferBarrier ( GrRenderTarget rt,
GrXferBarrierType  barrierType 
)
overridevirtual

Implements GrGpu.

Definition at line 2004 of file GrVkGpu.cpp.

2004 {
2005 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
2006 VkPipelineStageFlags dstStage;
2007 VkAccessFlags dstAccess;
2008 if (barrierType == kBlend_GrXferBarrierType) {
2011 } else {
2012 SkASSERT(barrierType == kTexture_GrXferBarrierType);
2015 }
2016 GrVkImage* image = vkRT->colorAttachment();
2017 VkImageMemoryBarrier barrier;
2019 barrier.pNext = nullptr;
2021 barrier.dstAccessMask = dstAccess;
2022 barrier.oldLayout = image->currentLayout();
2023 barrier.newLayout = barrier.oldLayout;
2026 barrier.image = image->image();
2027 barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, image->mipLevels(), 0, 1};
2028 this->addImageMemoryBarrier(image->resource(),
2030 dstStage, true, &barrier);
2031}
@ kTexture_GrXferBarrierType
@ kBlend_GrXferBarrierType
const Resource * resource() const
Definition GrVkImage.h:118
VkAccessFlags dstAccessMask
VkAccessFlags srcAccessMask
VkStructureType sType
VkImageLayout newLayout
VkImageLayout oldLayout
VkFlags VkPipelineStageFlags
VkFlags VkAccessFlags
@ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
@ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
@ VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT
@ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT

◆ zeroBuffer()

bool GrVkGpu::zeroBuffer ( sk_sp< GrGpuBuffer buffer)

Definition at line 1265 of file GrVkGpu.cpp.

1265 {
1266 if (!this->currentCommandBuffer()) {
1267 return false;
1268 }
1269
1271 static_cast<GrVkBuffer*>(buffer.get()),
1272 /*offset=*/0,
1273 buffer->size(),
1274 /*after=*/false);
1275 this->currentCommandBuffer()->fillBuffer(this,
1276 buffer,
1277 /*offset=*/0,
1278 buffer->size(),
1279 /*data=*/0);
1281 static_cast<GrVkBuffer*>(buffer.get()),
1282 /*offset=*/0,
1283 buffer->size(),
1284 /*after=*/true);
1285
1286 return true;
1287}
size_t size() const final
Definition GrGpuBuffer.h:34
void fillBuffer(GrVkGpu *gpu, sk_sp< GrGpuBuffer >, VkDeviceSize offset, VkDeviceSize size, uint32_t data)

The documentation for this class was generated from the following files: