Flutter Engine
The Flutter Engine
Public Types | Public Member Functions | Static Public Member Functions | List of all members
GrVkGpu Class Reference

#include <GrVkGpu.h>

Inheritance diagram for GrVkGpu:
GrGpu

Public Types

enum  PersistentCacheKeyType : uint32_t { kShader_PersistentCacheKeyType = 0 , kPipelineCache_PersistentCacheKeyType = 1 }
 
typedef void * SubmitContext
 
typedef void(* SubmitProc) (SubmitContext submitContext)
 
- Public Types inherited from GrGpu
enum class  DisconnectType { kAbandon , kCleanup }
 

Public Member Functions

 ~GrVkGpu () override
 
void disconnect (DisconnectType) override
 
bool disconnected () const
 
void releaseUnlockedBackendObjects () override
 
GrThreadSafePipelineBuilderpipelineBuilder () override
 
sk_sp< GrThreadSafePipelineBuilderrefPipelineBuilder () override
 
const skgpu::VulkanInterfacevkInterface () const
 
const GrVkCapsvkCaps () const
 
GrStagingBufferManagerstagingBufferManager () override
 
void takeOwnershipOfBuffer (sk_sp< GrGpuBuffer >) override
 
bool isDeviceLost () const override
 
skgpu::VulkanMemoryAllocatormemoryAllocator () const
 
VkPhysicalDevice physicalDevice () const
 
VkDevice device () const
 
VkQueue queue () const
 
uint32_t queueIndex () const
 
GrVkCommandPoolcmdPool () const
 
const VkPhysicalDevicePropertiesphysicalDeviceProperties () const
 
const VkPhysicalDeviceMemoryPropertiesphysicalDeviceMemoryProperties () const
 
bool protectedContext () const
 
GrVkResourceProviderresourceProvider ()
 
GrVkPrimaryCommandBuffercurrentCommandBuffer () const
 
void xferBarrier (GrRenderTarget *, GrXferBarrierType) override
 
bool setBackendTextureState (const GrBackendTexture &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback) override
 
bool setBackendRenderTargetState (const GrBackendRenderTarget &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback) override
 
void deleteBackendTexture (const GrBackendTexture &) override
 
bool compile (const GrProgramDesc &, const GrProgramInfo &) override
 
sk_sp< GrAttachmentmakeStencilAttachment (const GrBackendFormat &, SkISize dimensions, int numStencilSamples) override
 
GrBackendFormat getPreferredStencilFormat (const GrBackendFormat &) override
 
sk_sp< GrAttachmentmakeMSAAAttachment (SkISize dimensions, const GrBackendFormat &format, int numSamples, GrProtected isProtected, GrMemoryless isMemoryless) override
 
void addBufferMemoryBarrier (const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkBufferMemoryBarrier *barrier) const
 
void addBufferMemoryBarrier (VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkBufferMemoryBarrier *barrier) const
 
void addImageMemoryBarrier (const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkImageMemoryBarrier *barrier) const
 
bool loadMSAAFromResolve (GrVkCommandBuffer *commandBuffer, const GrVkRenderPass &renderPass, GrAttachment *dst, GrVkImage *src, const SkIRect &srcRect)
 
bool onRegenerateMipMapLevels (GrTexture *tex) override
 
void onResolveRenderTarget (GrRenderTarget *target, const SkIRect &resolveRect) override
 
void submitSecondaryCommandBuffer (std::unique_ptr< GrVkSecondaryCommandBuffer >)
 
void submit (GrOpsRenderPass *) override
 
std::unique_ptr< GrSemaphoremakeSemaphore (bool isOwned) override
 
std::unique_ptr< GrSemaphorewrapBackendSemaphore (const GrBackendSemaphore &, GrSemaphoreWrapType, GrWrapOwnership) override
 
void insertSemaphore (GrSemaphore *semaphore) override
 
void waitSemaphore (GrSemaphore *semaphore) override
 
void addDrawable (std::unique_ptr< SkDrawable::GpuDrawHandler > drawable)
 
void checkFinishProcs () override
 
void finishOutstandingGpuWork () override
 
std::unique_ptr< GrSemaphoreprepareTextureForCrossContextUsage (GrTexture *) override
 
bool updateBuffer (sk_sp< GrVkBuffer > buffer, const void *src, VkDeviceSize offset, VkDeviceSize size)
 
bool zeroBuffer (sk_sp< GrGpuBuffer >)
 
void storeVkPipelineCacheData () override
 
bool beginRenderPass (const GrVkRenderPass *, sk_sp< const GrVkFramebuffer >, const VkClearValue *colorClear, const GrSurface *, const SkIRect &renderPassBounds, bool forSecondaryCB)
 
void endRenderPass (GrRenderTarget *target, GrSurfaceOrigin origin, const SkIRect &bounds)
 
bool checkVkResult (VkResult)
 
- Public Member Functions inherited from GrGpu
 GrGpu (GrDirectContext *direct)
 
virtual ~GrGpu ()
 
GrDirectContextgetContext ()
 
const GrDirectContextgetContext () const
 
const GrCapscaps () const
 
sk_sp< const GrCapsrefCaps () const
 
virtual GrStagingBufferManagerstagingBufferManager ()
 
virtual GrRingBufferuniformsRingBuffer ()
 
virtual void disconnect (DisconnectType)
 
virtual GrThreadSafePipelineBuilderpipelineBuilder ()=0
 
virtual sk_sp< GrThreadSafePipelineBuilderrefPipelineBuilder ()=0
 
virtual bool isDeviceLost () const
 
void markContextDirty (uint32_t state=kAll_GrBackendState)
 
sk_sp< GrTexturecreateTexture (SkISize dimensions, const GrBackendFormat &format, GrTextureType textureType, GrRenderable renderable, int renderTargetSampleCnt, skgpu::Budgeted budgeted, GrProtected isProtected, GrColorType textureColorType, GrColorType srcColorType, const GrMipLevel texels[], int texelLevelCount, std::string_view label)
 
sk_sp< GrTexturecreateTexture (SkISize dimensions, const GrBackendFormat &format, GrTextureType textureType, GrRenderable renderable, int renderTargetSampleCnt, skgpu::Mipmapped mipmapped, skgpu::Budgeted budgeted, GrProtected isProtected, std::string_view label)
 
sk_sp< GrTexturecreateCompressedTexture (SkISize dimensions, const GrBackendFormat &format, skgpu::Budgeted budgeted, skgpu::Mipmapped mipmapped, GrProtected isProtected, const void *data, size_t dataSize)
 
sk_sp< GrTexturewrapBackendTexture (const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable, GrIOType)
 
sk_sp< GrTexturewrapCompressedBackendTexture (const GrBackendTexture &, GrWrapOwnership, GrWrapCacheable)
 
sk_sp< GrTexturewrapRenderableBackendTexture (const GrBackendTexture &, int sampleCnt, GrWrapOwnership, GrWrapCacheable)
 
sk_sp< GrRenderTargetwrapBackendRenderTarget (const GrBackendRenderTarget &)
 
sk_sp< GrRenderTargetwrapVulkanSecondaryCBAsRenderTarget (const SkImageInfo &, const GrVkDrawableInfo &)
 
sk_sp< GrGpuBuffercreateBuffer (size_t size, GrGpuBufferType intendedType, GrAccessPattern accessPattern)
 
void resolveRenderTarget (GrRenderTarget *, const SkIRect &resolveRect)
 
bool regenerateMipMapLevels (GrTexture *)
 
void resetTextureBindings ()
 
bool readPixels (GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType dstColorType, void *buffer, size_t rowBytes)
 
bool writePixels (GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType srcColorType, const GrMipLevel texels[], int mipLevelCount, bool prepForTexSampling=false)
 
bool writePixels (GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType srcColorType, const void *buffer, size_t rowBytes, bool prepForTexSampling=false)
 
bool transferFromBufferToBuffer (sk_sp< GrGpuBuffer > src, size_t srcOffset, sk_sp< GrGpuBuffer > dst, size_t dstOffset, size_t size)
 
bool transferPixelsTo (GrTexture *texture, SkIRect rect, GrColorType textureColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset, size_t rowBytes)
 
bool transferPixelsFrom (GrSurface *surface, SkIRect rect, GrColorType surfaceColorType, GrColorType bufferColorType, sk_sp< GrGpuBuffer > transferBuffer, size_t offset)
 
bool copySurface (GrSurface *dst, const SkIRect &dstRect, GrSurface *src, const SkIRect &srcRect, GrSamplerState::Filter filter)
 
GrOpsRenderPassgetOpsRenderPass (GrRenderTarget *renderTarget, bool useMSAASurface, GrAttachment *stencil, GrSurfaceOrigin, const SkIRect &bounds, const GrOpsRenderPass::LoadAndStoreInfo &, const GrOpsRenderPass::StencilLoadAndStoreInfo &, const skia_private::TArray< GrSurfaceProxy *, true > &sampledProxies, GrXferBarrierFlags renderPassXferBarriers)
 
void executeFlushInfo (SkSpan< GrSurfaceProxy * >, SkSurfaces::BackendSurfaceAccess access, const GrFlushInfo &, const skgpu::MutableTextureState *newState)
 
virtual void willExecute ()
 
bool submitToGpu (GrSyncCpu sync)
 
virtual void submit (GrOpsRenderPass *)=0
 
virtual std::unique_ptr< GrSemaphoremakeSemaphore (bool isOwned=true)=0
 
virtual std::unique_ptr< GrSemaphorewrapBackendSemaphore (const GrBackendSemaphore &, GrSemaphoreWrapType, GrWrapOwnership)=0
 
virtual void insertSemaphore (GrSemaphore *semaphore)=0
 
virtual void waitSemaphore (GrSemaphore *semaphore)=0
 
virtual void addFinishedProc (GrGpuFinishedProc finishedProc, GrGpuFinishedContext finishedContext)=0
 
virtual void checkFinishProcs ()=0
 
virtual void finishOutstandingGpuWork ()=0
 
virtual void takeOwnershipOfBuffer (sk_sp< GrGpuBuffer >)
 
bool checkAndResetOOMed ()
 
virtual std::unique_ptr< GrSemaphoreprepareTextureForCrossContextUsage (GrTexture *)=0
 
virtual void releaseUnlockedBackendObjects ()
 
Statsstats ()
 
void dumpJSON (SkJSONWriter *) const
 
GrBackendTexture createBackendTexture (SkISize dimensions, const GrBackendFormat &, GrRenderable, skgpu::Mipmapped, GrProtected, std::string_view label)
 
bool clearBackendTexture (const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, std::array< float, 4 > color)
 
GrBackendTexture createCompressedBackendTexture (SkISize dimensions, const GrBackendFormat &, skgpu::Mipmapped, GrProtected)
 
bool updateCompressedBackendTexture (const GrBackendTexture &, sk_sp< skgpu::RefCntedCallback > finishedCallback, const void *data, size_t length)
 
virtual bool setBackendTextureState (const GrBackendTexture &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback)
 
virtual bool setBackendRenderTargetState (const GrBackendRenderTarget &, const skgpu::MutableTextureState &, skgpu::MutableTextureState *previousState, sk_sp< skgpu::RefCntedCallback > finishedCallback)
 
virtual void deleteBackendTexture (const GrBackendTexture &)=0
 
virtual bool compile (const GrProgramDesc &, const GrProgramInfo &)=0
 
virtual bool precompileShader (const SkData &key, const SkData &data)
 
virtual sk_sp< GrAttachmentmakeStencilAttachment (const GrBackendFormat &colorFormat, SkISize dimensions, int numStencilSamples)=0
 
virtual GrBackendFormat getPreferredStencilFormat (const GrBackendFormat &)=0
 
virtual sk_sp< GrAttachmentmakeMSAAAttachment (SkISize dimensions, const GrBackendFormat &format, int numSamples, GrProtected isProtected, GrMemoryless isMemoryless)=0
 
void handleDirtyContext ()
 
virtual void storeVkPipelineCacheData ()
 
virtual void xferBarrier (GrRenderTarget *, GrXferBarrierType)=0
 

Static Public Member Functions

static std::unique_ptr< GrGpuMake (const skgpu::VulkanBackendContext &, const GrContextOptions &, GrDirectContext *)
 

Additional Inherited Members

- Protected Member Functions inherited from GrGpu
void didWriteToSurface (GrSurface *surface, GrSurfaceOrigin origin, const SkIRect *bounds, uint32_t mipLevels=1) const
 
void setOOMed ()
 
void initCaps (sk_sp< const GrCaps > caps)
 
- Static Protected Member Functions inherited from GrGpu
static bool CompressedDataIsCorrect (SkISize dimensions, SkTextureCompressionType, skgpu::Mipmapped, const void *data, size_t length)
 
- Protected Attributes inherited from GrGpu
Stats fStats
 

Detailed Description

Definition at line 42 of file GrVkGpu.h.

Member Typedef Documentation

◆ SubmitContext

typedef void* GrVkGpu::SubmitContext

Definition at line 167 of file GrVkGpu.h.

◆ SubmitProc

typedef void(* GrVkGpu::SubmitProc) (SubmitContext submitContext)

Definition at line 168 of file GrVkGpu.h.

Member Enumeration Documentation

◆ PersistentCacheKeyType

Enumerator
kShader_PersistentCacheKeyType 
kPipelineCache_PersistentCacheKeyType 

Definition at line 184 of file GrVkGpu.h.

184 : uint32_t {
187 };
@ kPipelineCache_PersistentCacheKeyType
Definition: GrVkGpu.h:186
@ kShader_PersistentCacheKeyType
Definition: GrVkGpu.h:185

Constructor & Destructor Documentation

◆ ~GrVkGpu()

GrVkGpu::~GrVkGpu ( )
override

Definition at line 276 of file GrVkGpu.cpp.

276 {
277 if (!fDisconnected) {
278 this->destroyResources();
279 }
280 // We don't delete the memory allocator until the very end of the GrVkGpu lifetime so that
281 // clients can continue to delete backend textures even after a context has been abandoned.
282 fMemoryAllocator.reset();
283}
void reset(T *ptr=nullptr)
Definition: SkRefCnt.h:310

Member Function Documentation

◆ addBufferMemoryBarrier() [1/2]

void GrVkGpu::addBufferMemoryBarrier ( const GrManagedResource resource,
VkPipelineStageFlags  srcStageMask,
VkPipelineStageFlags  dstStageMask,
bool  byRegion,
VkBufferMemoryBarrier barrier 
) const

Definition at line 2127 of file GrVkGpu.cpp.

2131 {
2132 if (!this->currentCommandBuffer()) {
2133 return;
2134 }
2135 SkASSERT(resource);
2137 resource,
2138 srcStageMask,
2139 dstStageMask,
2140 byRegion,
2142 barrier);
2143}
#define SkASSERT(cond)
Definition: SkAssert.h:116
void pipelineBarrier(const GrVkGpu *gpu, const GrManagedResource *resource, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, BarrierType barrierType, void *barrier)
GrVkPrimaryCommandBuffer * currentCommandBuffer() const
Definition: GrVkGpu.h:85

◆ addBufferMemoryBarrier() [2/2]

void GrVkGpu::addBufferMemoryBarrier ( VkPipelineStageFlags  srcStageMask,
VkPipelineStageFlags  dstStageMask,
bool  byRegion,
VkBufferMemoryBarrier barrier 
) const

Definition at line 2144 of file GrVkGpu.cpp.

2147 {
2148 if (!this->currentCommandBuffer()) {
2149 return;
2150 }
2151 // We don't pass in a resource here to the command buffer. The command buffer only is using it
2152 // to hold a ref, but every place where we add a buffer memory barrier we are doing some other
2153 // command with the buffer on the command buffer. Thus those other commands will already cause
2154 // the command buffer to be holding a ref to the buffer.
2156 /*resource=*/nullptr,
2157 srcStageMask,
2158 dstStageMask,
2159 byRegion,
2161 barrier);
2162}

◆ addDrawable()

void GrVkGpu::addDrawable ( std::unique_ptr< SkDrawable::GpuDrawHandler drawable)

Definition at line 2753 of file GrVkGpu.cpp.

2753 {
2754 fDrawables.emplace_back(std::move(drawable));
2755}
T & emplace_back(Args &&... args)
Definition: SkTArray.h:248

◆ addImageMemoryBarrier()

void GrVkGpu::addImageMemoryBarrier ( const GrManagedResource resource,
VkPipelineStageFlags  srcStageMask,
VkPipelineStageFlags  dstStageMask,
bool  byRegion,
VkImageMemoryBarrier barrier 
) const

Definition at line 2164 of file GrVkGpu.cpp.

2168 {
2169 // If we are in the middle of destroying or abandoning the context we may hit a release proc
2170 // that triggers the destruction of a GrVkImage. This could cause us to try and transfer the
2171 // VkImage back to the original queue. In this state we don't submit anymore work and we may not
2172 // have a current command buffer. Thus we won't do the queue transfer.
2173 if (!this->currentCommandBuffer()) {
2174 return;
2175 }
2176 SkASSERT(resource);
2178 resource,
2179 srcStageMask,
2180 dstStageMask,
2181 byRegion,
2183 barrier);
2184}

◆ beginRenderPass()

bool GrVkGpu::beginRenderPass ( const GrVkRenderPass renderPass,
sk_sp< const GrVkFramebuffer framebuffer,
const VkClearValue colorClear,
const GrSurface target,
const SkIRect renderPassBounds,
bool  forSecondaryCB 
)

Definition at line 2621 of file GrVkGpu.cpp.

2626 {
2627 if (!this->currentCommandBuffer()) {
2628 return false;
2629 }
2630 SkASSERT (!framebuffer->isExternal());
2631
2632#ifdef SK_DEBUG
2633 uint32_t index;
2634 bool result = renderPass->colorAttachmentIndex(&index);
2635 SkASSERT(result && 0 == index);
2636 result = renderPass->stencilAttachmentIndex(&index);
2637 if (result) {
2638 SkASSERT(1 == index);
2639 }
2640#endif
2641 VkClearValue clears[3];
2642 int stencilIndex = renderPass->hasResolveAttachment() ? 2 : 1;
2643 clears[0].color = colorClear->color;
2644 clears[stencilIndex].depthStencil.depth = 0.0f;
2645 clears[stencilIndex].depthStencil.stencil = 0;
2646
2647 return this->currentCommandBuffer()->beginRenderPass(
2648 this, renderPass, std::move(framebuffer), clears, target, renderPassBounds, forSecondaryCB);
2649}
bool isExternal() const
bool beginRenderPass(GrVkGpu *gpu, const GrVkRenderPass *, sk_sp< const GrVkFramebuffer >, const VkClearValue clearValues[], const GrSurface *target, const SkIRect &bounds, bool forSecondaryCB)
bool colorAttachmentIndex(uint32_t *index) const
bool stencilAttachmentIndex(uint32_t *index) const
bool hasResolveAttachment() const
GAsyncResult * result
uint32_t * target
VkClearColorValue color
Definition: vulkan_core.h:3931
VkClearDepthStencilValue depthStencil
Definition: vulkan_core.h:3932

◆ checkFinishProcs()

void GrVkGpu::checkFinishProcs ( )
inlineoverridevirtual

Implements GrGpu.

Definition at line 174 of file GrVkGpu.h.

174{ fResourceProvider.checkCommandBuffers(); }

◆ checkVkResult()

bool GrVkGpu::checkVkResult ( VkResult  result)

Definition at line 2659 of file GrVkGpu.cpp.

2659 {
2660 switch (result) {
2661 case VK_SUCCESS:
2662 return true;
2664 if (!fDeviceIsLost) {
2665 // Callback should only be invoked once, and device should be marked as lost first.
2666 fDeviceIsLost = true;
2668 device(),
2669 fDeviceLostContext,
2670 fDeviceLostProc,
2671 vkCaps().supportsDeviceFaultInfo());
2672 }
2673 return false;
2676 this->setOOMed();
2677 return false;
2678 default:
2679 return false;
2680 }
2681}
void setOOMed()
Definition: GrGpu.h:701
const GrVkCaps & vkCaps() const
Definition: GrVkGpu.h:61
const skgpu::VulkanInterface * vkInterface() const
Definition: GrVkGpu.h:60
VkDevice device() const
Definition: GrVkGpu.h:71
void InvokeDeviceLostCallback(const skgpu::VulkanInterface *vulkanInterface, VkDevice vkDevice, skgpu::VulkanDeviceLostContext deviceLostContext, skgpu::VulkanDeviceLostProc deviceLostProc, bool supportsDeviceFaultInfoExtension)
@ VK_ERROR_DEVICE_LOST
Definition: vulkan_core.h:150
@ VK_SUCCESS
Definition: vulkan_core.h:141
@ VK_ERROR_OUT_OF_HOST_MEMORY
Definition: vulkan_core.h:147
@ VK_ERROR_OUT_OF_DEVICE_MEMORY
Definition: vulkan_core.h:148

◆ cmdPool()

GrVkCommandPool * GrVkGpu::cmdPool ( ) const
inline

Definition at line 74 of file GrVkGpu.h.

74{ return fMainCmdPool; }

◆ compile()

bool GrVkGpu::compile ( const GrProgramDesc ,
const GrProgramInfo  
)
overridevirtual

In this case we have a program descriptor and a program info but no render target.

Implements GrGpu.

Definition at line 2026 of file GrVkGpu.cpp.

2026 {
2027 GrVkRenderPass::AttachmentsDescriptor attachmentsDescriptor;
2028 GrVkRenderPass::AttachmentFlags attachmentFlags;
2030 &attachmentsDescriptor, &attachmentFlags);
2031
2033 if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kBlend) {
2035 }
2036 if (programInfo.renderPassBarriers() & GrXferBarrierFlags::kTexture) {
2038 }
2039
2041 if (this->vkCaps().programInfoWillUseDiscardableMSAA(programInfo) &&
2042 programInfo.colorLoadOp() == GrLoadOp::kLoad) {
2044 }
2045 sk_sp<const GrVkRenderPass> renderPass(this->resourceProvider().findCompatibleRenderPass(
2046 &attachmentsDescriptor, attachmentFlags, selfDepFlags, loadFromResolve));
2047 if (!renderPass) {
2048 return false;
2049 }
2050
2052
2053 auto pipelineState = this->resourceProvider().findOrCreateCompatiblePipelineState(
2054 desc,
2055 programInfo,
2056 renderPass->vkRenderPass(),
2057 &stat);
2058 if (!pipelineState) {
2059 return false;
2060 }
2061
2063}
GrVkResourceProvider & resourceProvider()
Definition: GrVkGpu.h:83
static void ReconstructAttachmentsDescriptor(const GrVkCaps &vkCaps, const GrProgramInfo &programInfo, GrVkRenderPass::AttachmentsDescriptor *desc, GrVkRenderPass::AttachmentFlags *flags)
GrVkPipelineState * findOrCreateCompatiblePipelineState(GrRenderTarget *, const GrProgramInfo &, VkRenderPass compatibleRenderPass, bool overrideSubpassForResolveLoad)

◆ currentCommandBuffer()

GrVkPrimaryCommandBuffer * GrVkGpu::currentCommandBuffer ( ) const
inline

Definition at line 85 of file GrVkGpu.h.

85{ return fMainCmdBuffer; }

◆ deleteBackendTexture()

void GrVkGpu::deleteBackendTexture ( const GrBackendTexture )
overridevirtual

Frees a texture created by createBackendTexture(). If ownership of the backend texture has been transferred to a context using adopt semantics this should not be called.

Implements GrGpu.

Definition at line 2017 of file GrVkGpu.cpp.

2017 {
2018 SkASSERT(GrBackendApi::kVulkan == tex.fBackend);
2019
2022 GrVkImage::DestroyImageInfo(this, const_cast<GrVkImageInfo*>(&info));
2023 }
2024}
static void info(const char *fmt,...) SK_PRINTF_LIKE(1
Definition: DM.cpp:213
static void DestroyImageInfo(const GrVkGpu *gpu, GrVkImageInfo *)
Definition: GrVkImage.cpp:577
SK_API bool GetVkImageInfo(const GrBackendTexture &, GrVkImageInfo *)

◆ device()

VkDevice GrVkGpu::device ( ) const
inline

Definition at line 71 of file GrVkGpu.h.

71{ return fDevice; }

◆ disconnect()

void GrVkGpu::disconnect ( DisconnectType  type)
overridevirtual

Reimplemented from GrGpu.

Definition at line 286 of file GrVkGpu.cpp.

286 {
288 if (!fDisconnected) {
289 this->destroyResources();
290
291 fSemaphoresToWaitOn.clear();
292 fSemaphoresToSignal.clear();
293 fMainCmdBuffer = nullptr;
294 fDisconnected = true;
295 }
296}
GLenum type
virtual void disconnect(DisconnectType)
Definition: GrGpu.cpp:51

◆ disconnected()

bool GrVkGpu::disconnected ( ) const
inline

Definition at line 51 of file GrVkGpu.h.

51{ return fDisconnected; }

◆ endRenderPass()

void GrVkGpu::endRenderPass ( GrRenderTarget target,
GrSurfaceOrigin  origin,
const SkIRect bounds 
)

Definition at line 2651 of file GrVkGpu.cpp.

2652 {
2653 // We had a command buffer when we started the render pass, we should have one now as well.
2655 this->currentCommandBuffer()->endRenderPass(this);
2656 this->didWriteToSurface(target, origin, &bounds);
2657}
void didWriteToSurface(GrSurface *surface, GrSurfaceOrigin origin, const SkIRect *bounds, uint32_t mipLevels=1) const
Definition: GrGpu.cpp:665
void endRenderPass(const GrVkGpu *gpu)
Optional< SkRect > bounds
Definition: SkRecords.h:189

◆ finishOutstandingGpuWork()

void GrVkGpu::finishOutstandingGpuWork ( )
overridevirtual

Implements GrGpu.

Definition at line 2247 of file GrVkGpu.cpp.

2247 {
2248 VK_CALL(QueueWaitIdle(fQueue));
2249
2250 if (this->vkCaps().mustSyncCommandBuffersWithQueue()) {
2251 fResourceProvider.forceSyncAllCommandBuffers();
2252 }
2253}
#define VK_CALL(X)
Definition: GrVkGpu.cpp:67

◆ getPreferredStencilFormat()

GrBackendFormat GrVkGpu::getPreferredStencilFormat ( const GrBackendFormat )
inlineoverridevirtual

Implements GrGpu.

Definition at line 120 of file GrVkGpu.h.

120 {
121 return GrBackendFormats::MakeVk(this->vkCaps().preferredStencilFormat());
122 }
SK_API GrBackendFormat MakeVk(VkFormat format, bool willUseDRMFormatModifiers=false)

◆ insertSemaphore()

void GrVkGpu::insertSemaphore ( GrSemaphore semaphore)
overridevirtual

Implements GrGpu.

Definition at line 2708 of file GrVkGpu.cpp.

2708 {
2709 SkASSERT(semaphore);
2710
2711 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2712
2714 if (resource->shouldSignal()) {
2715 resource->ref();
2716 fSemaphoresToSignal.push_back(resource);
2717 }
2718}
static SkString resource(SkPDFResourceType type, int index)
Resource * getResource()
Definition: GrVkSemaphore.h:84

◆ isDeviceLost()

bool GrVkGpu::isDeviceLost ( ) const
inlineoverridevirtual

Reimplemented from GrGpu.

Definition at line 66 of file GrVkGpu.h.

66{ return fDeviceIsLost; }

◆ loadMSAAFromResolve()

bool GrVkGpu::loadMSAAFromResolve ( GrVkCommandBuffer commandBuffer,
const GrVkRenderPass renderPass,
GrAttachment dst,
GrVkImage src,
const SkIRect srcRect 
)

Definition at line 1500 of file GrVkGpu.cpp.

1504 {
1505 return fMSAALoadManager.loadMSAAFromResolve(this, commandBuffer, renderPass, dst, src, srcRect);
1506}
bool loadMSAAFromResolve(GrVkGpu *gpu, GrVkCommandBuffer *commandBuffer, const GrVkRenderPass &renderPass, GrAttachment *dst, GrVkImage *src, const SkIRect &srcRect)
dst
Definition: cp.py:12

◆ Make()

std::unique_ptr< GrGpu > GrVkGpu::Make ( const skgpu::VulkanBackendContext backendContext,
const GrContextOptions options,
GrDirectContext direct 
)
static

Definition at line 70 of file GrVkGpu.cpp.

72 {
73 if (backendContext.fInstance == VK_NULL_HANDLE ||
74 backendContext.fPhysicalDevice == VK_NULL_HANDLE ||
75 backendContext.fDevice == VK_NULL_HANDLE ||
76 backendContext.fQueue == VK_NULL_HANDLE) {
77 return nullptr;
78 }
79 if (!backendContext.fGetProc) {
80 return nullptr;
81 }
82
83 PFN_vkEnumerateInstanceVersion localEnumerateInstanceVersion =
84 reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
85 backendContext.fGetProc("vkEnumerateInstanceVersion",
87 uint32_t instanceVersion = 0;
88 if (!localEnumerateInstanceVersion) {
89 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
90 } else {
91 VkResult err = localEnumerateInstanceVersion(&instanceVersion);
92 if (err) {
93 SkDebugf("Failed to enumerate instance version. Err: %d\n", err);
94 return nullptr;
95 }
96 }
97
98 PFN_vkGetPhysicalDeviceProperties localGetPhysicalDeviceProperties =
99 reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
100 backendContext.fGetProc("vkGetPhysicalDeviceProperties",
101 backendContext.fInstance,
103
104 if (!localGetPhysicalDeviceProperties) {
105 return nullptr;
106 }
107 VkPhysicalDeviceProperties physDeviceProperties;
108 localGetPhysicalDeviceProperties(backendContext.fPhysicalDevice, &physDeviceProperties);
109 uint32_t physDevVersion = physDeviceProperties.apiVersion;
110
111 uint32_t apiVersion = backendContext.fMaxAPIVersion ? backendContext.fMaxAPIVersion
112 : instanceVersion;
113
114 instanceVersion = std::min(instanceVersion, apiVersion);
115 physDevVersion = std::min(physDevVersion, apiVersion);
116
117 skgpu::VulkanExtensions noExtensions;
119 backendContext.fVkExtensions ? backendContext.fVkExtensions : &noExtensions;
120
121 auto interface = sk_make_sp<skgpu::VulkanInterface>(backendContext.fGetProc,
122 backendContext.fInstance,
123 backendContext.fDevice,
124 instanceVersion,
125 physDevVersion,
126 extensions);
127 SkASSERT(interface);
128 if (!interface->validate(instanceVersion, physDevVersion, extensions)) {
129 return nullptr;
130 }
131
133 if (backendContext.fDeviceFeatures2) {
134 caps.reset(new GrVkCaps(options,
135 interface.get(),
136 backendContext.fPhysicalDevice,
137 *backendContext.fDeviceFeatures2,
138 instanceVersion,
139 physDevVersion,
140 *extensions,
141 backendContext.fProtectedContext));
142 } else if (backendContext.fDeviceFeatures) {
144 features2.pNext = nullptr;
145 features2.features = *backendContext.fDeviceFeatures;
146 caps.reset(new GrVkCaps(options,
147 interface.get(),
148 backendContext.fPhysicalDevice,
149 features2,
150 instanceVersion,
151 physDevVersion,
152 *extensions,
153 backendContext.fProtectedContext));
154 } else {
156 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
157
158 caps.reset(new GrVkCaps(options,
159 interface.get(),
160 backendContext.fPhysicalDevice,
161 features,
162 instanceVersion,
163 physDevVersion,
164 *extensions,
165 backendContext.fProtectedContext));
166 }
167
168 if (!caps) {
169 return nullptr;
170 }
171
173#if defined(SK_USE_VMA)
174 if (!memoryAllocator) {
175 // We were not given a memory allocator at creation
177 backendContext.fPhysicalDevice,
178 backendContext.fDevice,
179 physDevVersion,
181 interface.get(),
183 }
184#endif
185 if (!memoryAllocator) {
186 SkDEBUGFAIL("No supplied vulkan memory allocator and unable to create one internally.");
187 return nullptr;
188 }
189
190 std::unique_ptr<GrVkGpu> vkGpu(new GrVkGpu(direct,
191 backendContext,
192 std::move(caps),
193 interface,
194 instanceVersion,
195 physDevVersion,
196 std::move(memoryAllocator)));
197 if (backendContext.fProtectedContext == GrProtected::kYes &&
198 !vkGpu->vkCaps().supportsProtectedContent()) {
199 return nullptr;
200 }
201 return vkGpu;
202}
const char * options
#define SkDEBUGFAIL(message)
Definition: SkAssert.h:118
void SK_SPI SkDebugf(const char format[],...) SK_PRINTF_LIKE(1
sk_sp< T > sk_make_sp(Args &&... args)
Definition: SkRefCnt.h:371
const GrCaps * caps() const
Definition: GrGpu.h:73
skgpu::VulkanMemoryAllocator * memoryAllocator() const
Definition: GrVkGpu.h:68
static sk_sp< VulkanMemoryAllocator > Make(VkInstance instance, VkPhysicalDevice physicalDevice, VkDevice device, uint32_t physicalDeviceVersion, const VulkanExtensions *extensions, const VulkanInterface *interface, ThreadSafe)
if(end==-1)
static float min(float r, float g, float b)
Definition: hsl.cpp:48
Definition: GpuTools.h:21
VkPhysicalDeviceFeatures features
Definition: vulkan_core.h:5271
const VkPhysicalDeviceFeatures2 * fDeviceFeatures2
sk_sp< VulkanMemoryAllocator > fMemoryAllocator
const VkPhysicalDeviceFeatures * fDeviceFeatures
const skgpu::VulkanExtensions * fVkExtensions
void(VKAPI_PTR * PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties *pProperties)
Definition: vulkan_core.h:3986
#define VK_MAKE_VERSION(major, minor, patch)
Definition: vulkan_core.h:78
VkResult
Definition: vulkan_core.h:140
#define VK_NULL_HANDLE
Definition: vulkan_core.h:46
VkResult(VKAPI_PTR * PFN_vkEnumerateInstanceVersion)(uint32_t *pApiVersion)
Definition: vulkan_core.h:5608

◆ makeMSAAAttachment()

sk_sp< GrAttachment > GrVkGpu::makeMSAAAttachment ( SkISize  dimensions,
const GrBackendFormat format,
int  numSamples,
GrProtected  isProtected,
GrMemoryless  isMemoryless 
)
overridevirtual

Implements GrGpu.

Definition at line 1609 of file GrVkGpu.cpp.

1613 {
1614 VkFormat pixelFormat;
1617 SkASSERT(this->vkCaps().isFormatRenderable(pixelFormat, numSamples));
1618
1620 return GrVkImage::MakeMSAA(this, dimensions, numSamples, pixelFormat, isProtected, memoryless);
1621}
SkAssertResult(font.textToGlyphs("Hello", 5, SkTextEncoding::kUTF8, glyphs, std::size(glyphs))==count)
void incMSAAAttachmentCreates()
Definition: GrGpu.h:540
Stats fStats
Definition: GrGpu.h:703
static sk_sp< GrVkImage > MakeMSAA(GrVkGpu *gpu, SkISize dimensions, int numSamples, VkFormat format, GrProtected isProtected, GrMemoryless memoryless)
Definition: GrVkImage.cpp:39
uint32_t uint32_t * format
SK_API bool AsVkFormat(const GrBackendFormat &, VkFormat *)
static constexpr bool VkFormatIsCompressed(VkFormat vkFormat)
VkFormat
Definition: vulkan_core.h:1458

◆ makeSemaphore()

std::unique_ptr< GrSemaphore > GrVkGpu::makeSemaphore ( bool  isOwned)
overridevirtual

Implements GrGpu.

Definition at line 2697 of file GrVkGpu.cpp.

2697 {
2698 return GrVkSemaphore::Make(this, isOwned);
2699}
static std::unique_ptr< GrVkSemaphore > Make(GrVkGpu *gpu, bool isOwned)

◆ makeStencilAttachment()

sk_sp< GrAttachment > GrVkGpu::makeStencilAttachment ( const GrBackendFormat ,
SkISize  dimensions,
int  numStencilSamples 
)
overridevirtual

Implements GrGpu.

Definition at line 1601 of file GrVkGpu.cpp.

1602 {
1603 VkFormat sFmt = this->vkCaps().preferredStencilFormat();
1604
1606 return GrVkImage::MakeStencil(this, dimensions, numStencilSamples, sFmt);
1607}
void incStencilAttachmentCreates()
Definition: GrGpu.h:539
VkFormat preferredStencilFormat() const
Definition: GrVkCaps.h:104
static sk_sp< GrVkImage > MakeStencil(GrVkGpu *gpu, SkISize dimensions, int sampleCnt, VkFormat format)
Definition: GrVkImage.cpp:21

◆ memoryAllocator()

skgpu::VulkanMemoryAllocator * GrVkGpu::memoryAllocator ( ) const
inline

Definition at line 68 of file GrVkGpu.h.

68{ return fMemoryAllocator.get(); }
T * get() const
Definition: SkRefCnt.h:303

◆ onRegenerateMipMapLevels()

bool GrVkGpu::onRegenerateMipMapLevels ( GrTexture tex)
overridevirtual

Implements GrGpu.

Definition at line 1508 of file GrVkGpu.cpp.

1508 {
1509 if (!this->currentCommandBuffer()) {
1510 return false;
1511 }
1512 auto* vkTex = static_cast<GrVkTexture*>(tex)->textureImage();
1513 // don't do anything for linearly tiled textures (can't have mipmaps)
1514 if (vkTex->isLinearTiled()) {
1515 SkDebugf("Trying to create mipmap for linear tiled texture");
1516 return false;
1517 }
1519
1520 // determine if we can blit to and from this format
1521 const GrVkCaps& caps = this->vkCaps();
1522 if (!caps.formatCanBeDstofBlit(vkTex->imageFormat(), false) ||
1523 !caps.formatCanBeSrcofBlit(vkTex->imageFormat(), false) ||
1524 !caps.mipmapSupport()) {
1525 return false;
1526 }
1527
1528 int width = tex->width();
1529 int height = tex->height();
1530 VkImageBlit blitRegion;
1531 memset(&blitRegion, 0, sizeof(VkImageBlit));
1532
1533 // SkMipmap doesn't include the base level in the level count so we have to add 1
1534 uint32_t levelCount = SkMipmap::ComputeLevelCount(tex->width(), tex->height()) + 1;
1535 SkASSERT(levelCount == vkTex->mipLevels());
1536
1537 // change layout of the layers so we can write to them.
1540
1541 // setup memory barrier
1542 SkASSERT(GrVkFormatIsSupported(vkTex->imageFormat()));
1543 VkImageMemoryBarrier imageMemoryBarrier = {
1545 nullptr, // pNext
1546 VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
1547 VK_ACCESS_TRANSFER_READ_BIT, // dstAccessMask
1550 VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
1551 VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
1552 vkTex->image(), // image
1553 {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1} // subresourceRange
1554 };
1555
1556 // Blit the miplevels
1557 uint32_t mipLevel = 1;
1558 while (mipLevel < levelCount) {
1559 int prevWidth = width;
1560 int prevHeight = height;
1561 width = std::max(1, width / 2);
1562 height = std::max(1, height / 2);
1563
1564 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1566 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1567
1568 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel - 1, 0, 1 };
1569 blitRegion.srcOffsets[0] = { 0, 0, 0 };
1570 blitRegion.srcOffsets[1] = { prevWidth, prevHeight, 1 };
1571 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
1572 blitRegion.dstOffsets[0] = { 0, 0, 0 };
1573 blitRegion.dstOffsets[1] = { width, height, 1 };
1574 this->currentCommandBuffer()->blitImage(this,
1575 vkTex->resource(),
1576 vkTex->image(),
1578 vkTex->resource(),
1579 vkTex->image(),
1581 1,
1582 &blitRegion,
1584 ++mipLevel;
1585 }
1586 if (levelCount > 1) {
1587 // This barrier logically is not needed, but it changes the final level to the same layout
1588 // as all the others, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL. This makes tracking of the
1589 // layouts and future layout changes easier. The alternative here would be to track layout
1590 // and memory accesses per layer which doesn't seem work it.
1591 imageMemoryBarrier.subresourceRange.baseMipLevel = mipLevel - 1;
1593 VK_PIPELINE_STAGE_TRANSFER_BIT, false, &imageMemoryBarrier);
1594 vkTex->updateImageLayout(VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
1595 }
1596 return true;
1597}
bool GrVkFormatIsSupported(VkFormat format)
Definition: GrVkUtil.cpp:21
bool mipmapSupport() const
Definition: GrCaps.h:72
int height() const
Definition: GrSurface.h:37
int width() const
Definition: GrSurface.h:32
GrTextureType textureType() const
Definition: GrTexture.h:55
bool formatCanBeDstofBlit(VkFormat format, bool linearTiled) const
Definition: GrVkCaps.h:71
void addImageMemoryBarrier(const GrManagedResource *, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, VkImageMemoryBarrier *barrier) const
Definition: GrVkGpu.cpp:2164
void blitImage(const GrVkGpu *gpu, const GrManagedResource *srcResource, VkImage srcImage, VkImageLayout srcLayout, const GrManagedResource *dstResource, VkImage dstImage, VkImageLayout dstLayout, uint32_t blitRegionCount, const VkImageBlit *blitRegions, VkFilter filter)
static int ComputeLevelCount(int baseWidth, int baseHeight)
Definition: SkMipmap.cpp:134
static float max(float r, float g, float b)
Definition: hsl.cpp:49
int32_t height
int32_t width
VkOffset3D srcOffsets[2]
Definition: vulkan_core.h:3949
VkImageSubresourceLayers srcSubresource
Definition: vulkan_core.h:3948
VkOffset3D dstOffsets[2]
Definition: vulkan_core.h:3951
VkImageSubresourceLayers dstSubresource
Definition: vulkan_core.h:3950
VkImageSubresourceRange subresourceRange
Definition: vulkan_core.h:2945
@ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
Definition: vulkan_core.h:1337
@ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
Definition: vulkan_core.h:1338
@ VK_IMAGE_ASPECT_COLOR_BIT
Definition: vulkan_core.h:2238
@ VK_FILTER_LINEAR
Definition: vulkan_core.h:2102
@ VK_ACCESS_TRANSFER_WRITE_BIT
Definition: vulkan_core.h:2212
@ VK_ACCESS_TRANSFER_READ_BIT
Definition: vulkan_core.h:2211
@ VK_PIPELINE_STAGE_TRANSFER_BIT
Definition: vulkan_core.h:2447
#define VK_QUEUE_FAMILY_IGNORED
Definition: vulkan_core.h:127
@ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER
Definition: vulkan_core.h:247

◆ onResolveRenderTarget()

void GrVkGpu::onResolveRenderTarget ( GrRenderTarget target,
const SkIRect resolveRect 
)
overridevirtual

Implements GrGpu.

Definition at line 806 of file GrVkGpu.cpp.

806 {
807 SkASSERT(target->numSamples() > 1);
808 GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(target);
810
811 if (this->vkCaps().renderTargetSupportsDiscardableMSAA(rt)) {
812 // We would have resolved the RT during the render pass;
813 return;
814 }
815
816 this->resolveImage(target, rt, resolveRect,
817 SkIPoint::Make(resolveRect.x(), resolveRect.y()));
818}
const GrVkImageView * resolveAttachmentView() const
const GrVkImageView * colorAttachmentView() const
static constexpr SkIPoint Make(int32_t x, int32_t y)
Definition: SkPoint_impl.h:38
constexpr int32_t x() const
Definition: SkRect.h:141
constexpr int32_t y() const
Definition: SkRect.h:148

◆ physicalDevice()

VkPhysicalDevice GrVkGpu::physicalDevice ( ) const
inline

Definition at line 70 of file GrVkGpu.h.

70{ return fPhysicalDevice; }

◆ physicalDeviceMemoryProperties()

const VkPhysicalDeviceMemoryProperties & GrVkGpu::physicalDeviceMemoryProperties ( ) const
inline

Definition at line 78 of file GrVkGpu.h.

78 {
79 return fPhysDevMemProps;
80 }

◆ physicalDeviceProperties()

const VkPhysicalDeviceProperties & GrVkGpu::physicalDeviceProperties ( ) const
inline

Definition at line 75 of file GrVkGpu.h.

75 {
76 return fPhysDevProps;
77 }

◆ pipelineBuilder()

GrThreadSafePipelineBuilder * GrVkGpu::pipelineBuilder ( )
overridevirtual

Implements GrGpu.

Definition at line 298 of file GrVkGpu.cpp.

298 {
299 return fResourceProvider.pipelineStateCache();
300}
GrThreadSafePipelineBuilder * pipelineStateCache()

◆ prepareTextureForCrossContextUsage()

std::unique_ptr< GrSemaphore > GrVkGpu::prepareTextureForCrossContextUsage ( GrTexture )
overridevirtual

Put this texture in a safe and known state for use across multiple contexts. Depending on the backend, this may return a GrSemaphore. If so, other contexts should wait on that semaphore before using this texture.

Implements GrGpu.

Definition at line 2732 of file GrVkGpu.cpp.

2732 {
2734 GrVkImage* vkTexture = static_cast<GrVkTexture*>(texture)->textureImage();
2735 vkTexture->setImageLayout(this,
2739 false);
2740 // TODO: should we have a way to notify the caller that this has failed? Currently if the submit
2741 // fails (caused by DEVICE_LOST) this will just cause us to fail the next use of the gpu.
2742 // Eventually we will abandon the whole GPU if this fails.
2744
2745 // The image layout change serves as a barrier, so no semaphore is needed.
2746 // If we ever decide we need to return a semaphore here, we need to make sure GrVkSemaphore is
2747 // thread safe so that only the first thread that tries to use the semaphore actually submits
2748 // it. This additionally would also require thread safety in command buffer submissions to
2749 // queues in general.
2750 return nullptr;
2751}
bool submitToGpu(GrSyncCpu sync)
Definition: GrGpu.cpp:748
void setImageLayout(const GrVkGpu *gpu, VkImageLayout newLayout, VkAccessFlags dstAccessMask, VkPipelineStageFlags dstStageMask, bool byRegion)
Definition: GrVkImage.h:144
FlTexture * texture
@ VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
Definition: vulkan_core.h:1336
@ VK_ACCESS_SHADER_READ_BIT
Definition: vulkan_core.h:2205
@ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
Definition: vulkan_core.h:2442

◆ protectedContext()

bool GrVkGpu::protectedContext ( ) const
inline

Definition at line 81 of file GrVkGpu.h.

81{ return fProtectedContext == skgpu::Protected::kYes; }

◆ queue()

VkQueue GrVkGpu::queue ( ) const
inline

Definition at line 72 of file GrVkGpu.h.

72{ return fQueue; }

◆ queueIndex()

uint32_t GrVkGpu::queueIndex ( ) const
inline

Definition at line 73 of file GrVkGpu.h.

73{ return fQueueIndex; }

◆ refPipelineBuilder()

sk_sp< GrThreadSafePipelineBuilder > GrVkGpu::refPipelineBuilder ( )
overridevirtual

Implements GrGpu.

Definition at line 302 of file GrVkGpu.cpp.

302 {
303 return fResourceProvider.refPipelineStateCache();
304}
sk_sp< GrThreadSafePipelineBuilder > refPipelineStateCache()

◆ releaseUnlockedBackendObjects()

void GrVkGpu::releaseUnlockedBackendObjects ( )
inlineoverridevirtual

Frees any backend specific objects that are not currently in use by the GPU. This is called when the client is trying to free up as much GPU memory as possible. We will not release resources connected to programs/pipelines since the cost to recreate those is significantly higher that other resources.

Reimplemented from GrGpu.

Definition at line 53 of file GrVkGpu.h.

53 {
54 fResourceProvider.releaseUnlockedBackendObjects();
55 }

◆ resourceProvider()

GrVkResourceProvider & GrVkGpu::resourceProvider ( )
inline

Definition at line 83 of file GrVkGpu.h.

83{ return fResourceProvider; }

◆ setBackendRenderTargetState()

bool GrVkGpu::setBackendRenderTargetState ( const GrBackendRenderTarget backendRenderTarget,
const skgpu::MutableTextureState newState,
skgpu::MutableTextureState previousState,
sk_sp< skgpu::RefCntedCallback finishedCallback 
)
overridevirtual

Reimplemented from GrGpu.

Definition at line 1972 of file GrVkGpu.cpp.

1975 {
1978 sk_sp<skgpu::MutableTextureState> currentState = backendRenderTarget.getMutableState();
1979 SkASSERT(currentState);
1981 return this->setBackendSurfaceState(info, std::move(currentState),
1982 backendRenderTarget.dimensions(),
1985 previousState, std::move(finishedCallback));
1986}
SkISize dimensions() const
SK_API bool GetVkImageInfo(const GrBackendRenderTarget &, GrVkImageInfo *)
SK_API uint32_t GetVkQueueFamilyIndex(const MutableTextureState &state)
SK_API VkImageLayout GetVkImageLayout(const MutableTextureState &state)

◆ setBackendTextureState()

bool GrVkGpu::setBackendTextureState ( const GrBackendTexture backendTeture,
const skgpu::MutableTextureState newState,
skgpu::MutableTextureState previousState,
sk_sp< skgpu::RefCntedCallback finishedCallback 
)
overridevirtual

Reimplemented from GrGpu.

Definition at line 1956 of file GrVkGpu.cpp.

1959 {
1962 sk_sp<skgpu::MutableTextureState> currentState = backendTeture.getMutableState();
1963 SkASSERT(currentState);
1964 SkASSERT(newState.isValid() && newState.backend() == skgpu::BackendApi::kVulkan);
1965 return this->setBackendSurfaceState(info, std::move(currentState), backendTeture.dimensions(),
1968 previousState,
1969 std::move(finishedCallback));
1970}
SkISize dimensions() const

◆ stagingBufferManager()

GrStagingBufferManager * GrVkGpu::stagingBufferManager ( )
inlineoverridevirtual

Reimplemented from GrGpu.

Definition at line 63 of file GrVkGpu.h.

63{ return &fStagingBufferManager; }

◆ storeVkPipelineCacheData()

void GrVkGpu::storeVkPipelineCacheData ( )
overridevirtual

Reimplemented from GrGpu.

Definition at line 2757 of file GrVkGpu.cpp.

2757 {
2758 if (this->getContext()->priv().getPersistentCache()) {
2760 }
2761}
GrDirectContext * getContext()
Definition: GrGpu.h:67
FlPixelBufferTexturePrivate * priv

◆ submit()

void GrVkGpu::submit ( GrOpsRenderPass renderPass)
overridevirtual

Implements GrGpu.

Definition at line 2690 of file GrVkGpu.cpp.

2690 {
2691 SkASSERT(fCachedOpsRenderPass.get() == renderPass);
2692
2693 fCachedOpsRenderPass->submit();
2694 fCachedOpsRenderPass->reset();
2695}

◆ submitSecondaryCommandBuffer()

void GrVkGpu::submitSecondaryCommandBuffer ( std::unique_ptr< GrVkSecondaryCommandBuffer buffer)

Definition at line 2683 of file GrVkGpu.cpp.

2683 {
2684 if (!this->currentCommandBuffer()) {
2685 return;
2686 }
2687 this->currentCommandBuffer()->executeCommands(this, std::move(buffer));
2688}
void executeCommands(const GrVkGpu *gpu, std::unique_ptr< GrVkSecondaryCommandBuffer > secondaryBuffer)
DEF_SWITCHES_START aot vmservice shared library Name of the *so containing AOT compiled Dart assets for launching the service isolate vm snapshot The VM snapshot data that will be memory mapped as read only SnapshotAssetPath must be present isolate snapshot The isolate snapshot data that will be memory mapped as read only SnapshotAssetPath must be present cache dir Path to the cache directory This is different from the persistent_cache_path in embedder which is used for Skia shader cache icu native lib Path to the library file that exports the ICU data vm service The hostname IP address on which the Dart VM Service should be served If not defaults to or::depending on whether ipv6 is specified vm service A custom Dart VM Service port The default is to pick a randomly available open port disable vm Disable the Dart VM Service The Dart VM Service is never available in release mode disable vm service Disable mDNS Dart VM Service publication Bind to the IPv6 localhost address for the Dart VM Service Ignored if vm service host is set endless trace buffer
Definition: switches.h:126

◆ takeOwnershipOfBuffer()

void GrVkGpu::takeOwnershipOfBuffer ( sk_sp< GrGpuBuffer buffer)
overridevirtual

Reimplemented from GrGpu.

Definition at line 2235 of file GrVkGpu.cpp.

2235 {
2236 this->currentCommandBuffer()->addGrBuffer(std::move(buffer));
2237}
void addGrBuffer(sk_sp< const GrBuffer > buffer)

◆ updateBuffer()

bool GrVkGpu::updateBuffer ( sk_sp< GrVkBuffer buffer,
const void *  src,
VkDeviceSize  offset,
VkDeviceSize  size 
)

Definition at line 1229 of file GrVkGpu.cpp.

1230 {
1231 if (!this->currentCommandBuffer()) {
1232 return false;
1233 }
1235 static_cast<GrVkBuffer*>(buffer.get()),
1236 offset,
1237 size,
1238 /*after=*/false);
1239 this->currentCommandBuffer()->updateBuffer(this, buffer, offset, size, src);
1241 static_cast<GrVkBuffer*>(buffer.get()),
1242 offset,
1243 size,
1244 /*after=*/true);
1245
1246 return true;
1247}
static void add_transfer_dst_buffer_mem_barrier(GrVkGpu *gpu, GrVkBuffer *dst, size_t offset, size_t size, bool after)
Definition: GrVkGpu.cpp:550
void updateBuffer(GrVkGpu *gpu, sk_sp< GrVkBuffer > dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *data)
it will be possible to load the file into Perfetto s trace viewer disable asset Prevents usage of any non test fonts unless they were explicitly Loaded via prefetched default font Indicates whether the embedding started a prefetch of the default font manager before creating the engine run In non interactive keep the shell running after the Dart script has completed enable serial On low power devices with low core running concurrent GC tasks on threads can cause them to contend with the UI thread which could potentially lead to jank This option turns off all concurrent GC activities domain network JSON encoded network policy per domain This overrides the DisallowInsecureConnections switch Embedder can specify whether to allow or disallow insecure connections at a domain level old gen heap size
Definition: switches.h:259
SeparatedVector2 offset

◆ vkCaps()

const GrVkCaps & GrVkGpu::vkCaps ( ) const
inline

Definition at line 61 of file GrVkGpu.h.

61{ return *fVkCaps; }

◆ vkInterface()

const skgpu::VulkanInterface * GrVkGpu::vkInterface ( ) const
inline

Definition at line 60 of file GrVkGpu.h.

60{ return fInterface.get(); }

◆ waitSemaphore()

void GrVkGpu::waitSemaphore ( GrSemaphore semaphore)
overridevirtual

Implements GrGpu.

Definition at line 2720 of file GrVkGpu.cpp.

2720 {
2721 SkASSERT(semaphore);
2722
2723 GrVkSemaphore* vkSem = static_cast<GrVkSemaphore*>(semaphore);
2724
2726 if (resource->shouldWait()) {
2727 resource->ref();
2728 fSemaphoresToWaitOn.push_back(resource);
2729 }
2730}

◆ wrapBackendSemaphore()

std::unique_ptr< GrSemaphore > GrVkGpu::wrapBackendSemaphore ( const GrBackendSemaphore semaphore,
GrSemaphoreWrapType  wrapType,
GrWrapOwnership  ownership 
)
overridevirtual

Implements GrGpu.

Definition at line 2701 of file GrVkGpu.cpp.

2703 {
2705 wrapType, ownership);
2706}
static std::unique_ptr< GrVkSemaphore > MakeWrapped(GrVkGpu *, VkSemaphore, GrSemaphoreWrapType, GrWrapOwnership)
SK_API VkSemaphore GetVkSemaphore(const GrBackendSemaphore &)

◆ xferBarrier()

void GrVkGpu::xferBarrier ( GrRenderTarget rt,
GrXferBarrierType  barrierType 
)
overridevirtual

Implements GrGpu.

Definition at line 1988 of file GrVkGpu.cpp.

1988 {
1989 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1990 VkPipelineStageFlags dstStage;
1991 VkAccessFlags dstAccess;
1992 if (barrierType == kBlend_GrXferBarrierType) {
1995 } else {
1996 SkASSERT(barrierType == kTexture_GrXferBarrierType);
1999 }
2000 GrVkImage* image = vkRT->colorAttachment();
2001 VkImageMemoryBarrier barrier;
2003 barrier.pNext = nullptr;
2005 barrier.dstAccessMask = dstAccess;
2006 barrier.oldLayout = image->currentLayout();
2007 barrier.newLayout = barrier.oldLayout;
2010 barrier.image = image->image();
2011 barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, image->mipLevels(), 0, 1};
2012 this->addImageMemoryBarrier(image->resource(),
2014 dstStage, true, &barrier);
2015}
@ kTexture_GrXferBarrierType
@ kBlend_GrXferBarrierType
const Resource * resource() const
Definition: GrVkImage.h:119
GrVkImage * colorAttachment() const
sk_sp< const SkImage > image
Definition: SkRecords.h:269
VkAccessFlags dstAccessMask
Definition: vulkan_core.h:2939
uint32_t dstQueueFamilyIndex
Definition: vulkan_core.h:2943
VkAccessFlags srcAccessMask
Definition: vulkan_core.h:2938
VkStructureType sType
Definition: vulkan_core.h:2936
VkImageLayout newLayout
Definition: vulkan_core.h:2941
const void * pNext
Definition: vulkan_core.h:2937
VkImageLayout oldLayout
Definition: vulkan_core.h:2940
uint32_t srcQueueFamilyIndex
Definition: vulkan_core.h:2942
VkFlags VkPipelineStageFlags
Definition: vulkan_core.h:2470
VkFlags VkAccessFlags
Definition: vulkan_core.h:2235
@ VK_ACCESS_INPUT_ATTACHMENT_READ_BIT
Definition: vulkan_core.h:2204
@ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
Definition: vulkan_core.h:2208
@ VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT
Definition: vulkan_core.h:2222
@ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
Definition: vulkan_core.h:2445

◆ zeroBuffer()

bool GrVkGpu::zeroBuffer ( sk_sp< GrGpuBuffer buffer)

Definition at line 1249 of file GrVkGpu.cpp.

1249 {
1250 if (!this->currentCommandBuffer()) {
1251 return false;
1252 }
1253
1255 static_cast<GrVkBuffer*>(buffer.get()),
1256 /*offset=*/0,
1257 buffer->size(),
1258 /*after=*/false);
1259 this->currentCommandBuffer()->fillBuffer(this,
1260 buffer,
1261 /*offset=*/0,
1262 buffer->size(),
1263 /*data=*/0);
1265 static_cast<GrVkBuffer*>(buffer.get()),
1266 /*offset=*/0,
1267 buffer->size(),
1268 /*after=*/true);
1269
1270 return true;
1271}
void fillBuffer(GrVkGpu *gpu, sk_sp< GrGpuBuffer >, VkDeviceSize offset, VkDeviceSize size, uint32_t data)

The documentation for this class was generated from the following files: